diff --git a/data/data/ibmcloud/bootstrap/ignition.tf b/data/data/ibmcloud/bootstrap/ignition.tf new file mode 100644 index 00000000000..7eb8fb41a17 --- /dev/null +++ b/data/data/ibmcloud/bootstrap/ignition.tf @@ -0,0 +1,56 @@ +############################################ +# COS bucket +############################################ + +resource "ibm_cos_bucket" "bootstrap_ignition" { + bucket_name = "${local.prefix}-bootstrap-ignition" + resource_instance_id = var.cos_resource_instance_id + region_location = var.cos_bucket_region + storage_class = "smart" +} + +############################################ +# COS object +############################################ + +resource "ibm_cos_bucket_object" "bootstrap_ignition" { + bucket_crn = ibm_cos_bucket.bootstrap_ignition.crn + bucket_location = ibm_cos_bucket.bootstrap_ignition.region_location + key = "bootstrap.ign" + content_file = var.ignition_file + etag = filemd5(var.ignition_file) +} + +############################################ +# IAM service credentials +############################################ + +# NOTE/TODO: Get IAM token for created Service ID, not supported in provider +data "ibm_iam_auth_token" "iam_token" {} + +# NOTE: Not used at the moment +# resource "ibm_iam_service_id" "cos" { +# name = "${local.prefix}-cos-service-id" +# } + +# NOTE: Not used at the moment +# resource "ibm_resource_key" "cos_reader" { +# name = "${local.prefix}-cos-reader" +# role = "Reader" +# resource_instance_id = ibm_resource_instance.cos.id +# parameters = { +# HMAC = true +# serviceid_crn = ibm_iam_service_id.cos.crn +# } +# } + +# NOTE: Not used at the moment +# resource "ibm_resource_key" "cos_writer" { +# name = "${local.prefix}-cos-writer" +# role = "Writer" +# resource_instance_id = ibm_resource_instance.cos.id +# parameters = { +# HMAC = true +# serviceid_crn = ibm_iam_service_id.cos.crn +# } +# } diff --git a/data/data/ibmcloud/bootstrap/main.tf b/data/data/ibmcloud/bootstrap/main.tf new file mode 100644 index 00000000000..98ce89ff4ef --- /dev/null +++ b/data/data/ibmcloud/bootstrap/main.tf @@ -0,0 +1,78 @@ +locals { + prefix = var.cluster_id + port_kubernetes_api = 6443 + port_machine_config = 22623 +} + +############################################ +# Bootstrap node +############################################ + +resource "ibm_is_instance" "bootstrap_node" { + name = "${local.prefix}-bootstrap" + image = var.vsi_image_id + profile = var.vsi_profile + resource_group = var.resource_group_id + tags = var.tags + + primary_network_interface { + name = "eth0" + subnet = var.subnet_id + security_groups = [var.security_group_id] + } + + vpc = var.vpc_id + zone = var.zone + keys = [] + + # Use custom ignition config that pulls content from COS bucket + # TODO: Once support for the httpHeaders field is added to + # terraform-provider-ignition, we should use it instead of this template. + # https://github.com/community-terraform-providers/terraform-provider-ignition/issues/16 + user_data = templatefile("${path.module}/templates/bootstrap.ign", { + HOSTNAME = ibm_cos_bucket.bootstrap_ignition.s3_endpoint_public + BUCKET_NAME = ibm_cos_bucket.bootstrap_ignition.bucket_name + OBJECT_NAME = ibm_cos_bucket_object.bootstrap_ignition.key + IAM_TOKEN = data.ibm_iam_auth_token.iam_token.iam_access_token + }) +} + +############################################ +# Floating IP +############################################ + +resource "ibm_is_floating_ip" "bootstrap_floatingip" { + count = var.public_endpoints ? 1 : 0 + + name = "${local.prefix}-bootstrap-node-ip" + resource_group = var.resource_group_id + target = ibm_is_instance.bootstrap_node.primary_network_interface.0.id + tags = var.tags +} + +############################################ +# Load balancer backend pool members +############################################ + +resource "ibm_is_lb_pool_member" "kubernetes_api_public" { + count = var.public_endpoints ? 1 : 0 + + lb = var.lb_kubernetes_api_public_id + pool = var.lb_pool_kubernetes_api_public_id + port = local.port_kubernetes_api + target_address = ibm_is_instance.bootstrap_node.primary_network_interface.0.primary_ipv4_address +} + +resource "ibm_is_lb_pool_member" "kubernetes_api_private" { + lb = var.lb_kubernetes_api_private_id + pool = var.lb_pool_kubernetes_api_private_id + port = local.port_kubernetes_api + target_address = ibm_is_instance.bootstrap_node.primary_network_interface.0.primary_ipv4_address +} + +resource "ibm_is_lb_pool_member" "machine_config" { + lb = var.lb_kubernetes_api_private_id + pool = var.lb_pool_machine_config_id + port = local.port_machine_config + target_address = ibm_is_instance.bootstrap_node.primary_network_interface.0.primary_ipv4_address +} diff --git a/data/data/ibmcloud/bootstrap/outputs.tf b/data/data/ibmcloud/bootstrap/outputs.tf new file mode 100644 index 00000000000..f9ce9961a07 --- /dev/null +++ b/data/data/ibmcloud/bootstrap/outputs.tf @@ -0,0 +1,11 @@ +####################################### +# Bootstrap module outputs +####################################### + +output "name" { + value = ibm_is_instance.bootstrap_node.name +} + +output "primary_ipv4_address" { + value = ibm_is_instance.bootstrap_node.primary_network_interface.0.primary_ipv4_address +} diff --git a/data/data/ibmcloud/bootstrap/templates/bootstrap.ign b/data/data/ibmcloud/bootstrap/templates/bootstrap.ign new file mode 100644 index 00000000000..cf824f9c531 --- /dev/null +++ b/data/data/ibmcloud/bootstrap/templates/bootstrap.ign @@ -0,0 +1,16 @@ +{ + "ignition": { + "version": "3.2.0", + "config": { + "replace": { + "source": "https://${HOSTNAME}/${BUCKET_NAME}/${OBJECT_NAME}", + "httpHeaders": [ + { + "name": "Authorization", + "value": "${IAM_TOKEN}" + } + ] + } + } + } +} \ No newline at end of file diff --git a/data/data/ibmcloud/bootstrap/variables.tf b/data/data/ibmcloud/bootstrap/variables.tf new file mode 100644 index 00000000000..628abf79882 --- /dev/null +++ b/data/data/ibmcloud/bootstrap/variables.tf @@ -0,0 +1,75 @@ +####################################### +# Bootstrap module variables +####################################### + +variable "cluster_id" { + type = string +} + +variable "cos_resource_instance_id" { + type = string +} + +variable "cos_bucket_region" { + type = string +} + +variable "ignition_file" { + type = string +} + +variable "lb_kubernetes_api_public_id" { + type = string +} + +variable "lb_kubernetes_api_private_id" { + type = string +} + +variable "lb_pool_kubernetes_api_public_id" { + type = string +} + +variable "lb_pool_kubernetes_api_private_id" { + type = string +} + +variable "lb_pool_machine_config_id" { + type = string +} + +variable "public_endpoints" { + type = bool +} + +variable "resource_group_id" { + type = string +} + +variable "security_group_id" { + type = string +} + +variable "subnet_id" { + type = string +} + +variable "tags" { + type = list(string) +} + +variable "vpc_id" { + type = string +} + +variable "vsi_image_id" { + type = string +} + +variable "vsi_profile" { + type = string +} + +variable "zone" { + type = string +} \ No newline at end of file diff --git a/data/data/ibmcloud/cis/main.tf b/data/data/ibmcloud/cis/main.tf new file mode 100644 index 00000000000..a57d1f33a83 --- /dev/null +++ b/data/data/ibmcloud/cis/main.tf @@ -0,0 +1,78 @@ +############################################ +# Datasources +############################################ + +data "ibm_cis_domain" "base_domain" { + cis_id = var.cis_id + domain = var.base_domain +} + +############################################ +# CIS DNS records (CNAME) +############################################ + +resource "ibm_cis_dns_record" "kubernetes_api" { + cis_id = var.cis_id + domain_id = data.ibm_cis_domain.base_domain.id + type = "CNAME" + name = "api.${var.cluster_domain}" + content = var.lb_kubernetes_api_public_hostname != "" ? var.lb_kubernetes_api_public_hostname : var.lb_kubernetes_api_private_hostname + ttl = 60 +} + +resource "ibm_cis_dns_record" "kubernetes_api_internal" { + cis_id = var.cis_id + domain_id = data.ibm_cis_domain.base_domain.id + type = "CNAME" + name = "api-int.${var.cluster_domain}" + content = var.lb_kubernetes_api_private_hostname + ttl = 60 +} + +############################################ +# CIS DNS records (A) +############################################ + +resource "ibm_cis_dns_record" "bootstrap_node" { + cis_id = var.cis_id + domain_id = data.ibm_cis_domain.base_domain.id + type = "A" + name = "${var.bootstrap_name}.${var.cluster_domain}" + content = var.bootstrap_ipv4_address + ttl = 60 +} + +resource "ibm_cis_dns_record" "master_node" { + count = var.master_count + + cis_id = var.cis_id + domain_id = data.ibm_cis_domain.base_domain.id + type = "A" + name = "${var.master_name_list[count.index]}.${var.cluster_domain}" + content = var.master_ipv4_address_list[count.index] + ttl = 60 +} + +############################################ +# CIS DNS records (PTR) +############################################ + +resource "ibm_cis_dns_record" "bootstrap_node_ptr" { + cis_id = var.cis_id + domain_id = data.ibm_cis_domain.base_domain.id + type = "PTR" + name = var.bootstrap_ipv4_address + content = "${var.bootstrap_name}.${var.cluster_domain}" + ttl = 60 +} + +resource "ibm_cis_dns_record" "master_node_ptr" { + count = var.master_count + + cis_id = var.cis_id + domain_id = data.ibm_cis_domain.base_domain.id + type = "PTR" + name = var.master_ipv4_address_list[count.index] + content = "${var.master_name_list[count.index]}.${var.cluster_domain}" + ttl = 60 +} diff --git a/data/data/ibmcloud/cis/variables.tf b/data/data/ibmcloud/cis/variables.tf new file mode 100644 index 00000000000..ac17b1b237a --- /dev/null +++ b/data/data/ibmcloud/cis/variables.tf @@ -0,0 +1,43 @@ +############################################ +# CIS module variables +############################################ + +variable "cis_id" { + type = string +} + +variable "base_domain" { + type = string +} + +variable "cluster_domain" { + type = string +} + +variable "bootstrap_name" { + type = string +} + +variable "bootstrap_ipv4_address" { + type = string +} + +variable "master_count" { + type = string +} + +variable "master_name_list" { + type = list(string) +} + +variable "master_ipv4_address_list" { + type = list(string) +} + +variable "lb_kubernetes_api_public_hostname" { + type = string +} + +variable "lb_kubernetes_api_private_hostname" { + type = string +} diff --git a/data/data/ibmcloud/image/main.tf b/data/data/ibmcloud/image/main.tf new file mode 100644 index 00000000000..a8502d6c618 --- /dev/null +++ b/data/data/ibmcloud/image/main.tf @@ -0,0 +1,38 @@ +locals { + prefix = var.cluster_id +} + +resource "ibm_cos_bucket" "images" { + bucket_name = "${local.prefix}-vsi-image" + resource_instance_id = var.cos_resource_instance_id + region_location = var.region + storage_class = "smart" +} + +resource "ibm_cos_bucket_object" "file" { + bucket_crn = ibm_cos_bucket.images.crn + bucket_location = ibm_cos_bucket.images.region_location + content_file = var.image_filepath + key = basename(var.image_filepath) + etag = filemd5(var.image_filepath) +} + +resource "ibm_iam_authorization_policy" "policy" { + source_service_name = "is" + source_resource_type = "image" + target_service_name = "cloud-object-storage" + target_resource_instance_id = var.cos_resource_instance_id + roles = ["Reader"] +} + +resource "ibm_is_image" "image" { + depends_on = [ + ibm_iam_authorization_policy.policy + ] + + name = var.name + href = "cos://${ibm_cos_bucket.images.region_location}/${ibm_cos_bucket.images.bucket_name}/${ibm_cos_bucket_object.file.key}" + operating_system = "centos-8-amd64" + resource_group = var.resource_group_id + tags = var.tags +} diff --git a/data/data/ibmcloud/image/outputs.tf b/data/data/ibmcloud/image/outputs.tf new file mode 100644 index 00000000000..e14c4d02e36 --- /dev/null +++ b/data/data/ibmcloud/image/outputs.tf @@ -0,0 +1,3 @@ +output "vsi_image_id" { + value = ibm_is_image.image.id +} diff --git a/data/data/ibmcloud/image/variables.tf b/data/data/ibmcloud/image/variables.tf new file mode 100644 index 00000000000..a149cef502c --- /dev/null +++ b/data/data/ibmcloud/image/variables.tf @@ -0,0 +1,27 @@ +variable "name" { + type = string +} + +variable "image_filepath" { + type = string +} + +variable "cluster_id" { + type = string +} + +variable "resource_group_id" { + type = string +} + +variable "region" { + type = string +} + +variable "tags" { + type = list(string) +} + +variable "cos_resource_instance_id" { + type = string +} \ No newline at end of file diff --git a/data/data/ibmcloud/main.tf b/data/data/ibmcloud/main.tf new file mode 100644 index 00000000000..6f38d98f199 --- /dev/null +++ b/data/data/ibmcloud/main.tf @@ -0,0 +1,152 @@ +locals { + description = "Created By OpenShift Installer" + public_endpoints = var.ibmcloud_publish_strategy == "External" ? true : false + resource_group_id = var.ibmcloud_resource_group_name == "" ? ibm_resource_group.group.0.id : data.ibm_resource_group.group.0.id + tags = concat( + ["kubernetes.io_cluster_${var.cluster_id}:owned"], + var.ibmcloud_extra_tags + ) +} + +############################################ +# IBM Cloud provider +############################################ + +provider "ibm" { + ibmcloud_api_key = var.ibmcloud_api_key + region = var.ibmcloud_region +} + +############################################ +# Resource group +############################################ + +resource "ibm_resource_group" "group" { + count = var.ibmcloud_resource_group_name == "" ? 1 : 0 + name = var.cluster_id +} + +data "ibm_resource_group" "group" { + count = var.ibmcloud_resource_group_name == "" ? 0 : 1 + name = var.ibmcloud_resource_group_name +} + +############################################ +# Shared COS Instance +############################################ +resource "ibm_resource_instance" "cos" { + name = "${var.cluster_id}-cos" + service = "cloud-object-storage" + plan = "standard" + location = "global" + resource_group_id = local.resource_group_id + tags = local.tags +} + +############################################ +# Import VPC Custom Image +############################################ + +module "image" { + source = "./image" + + name = "${var.cluster_id}-rhcos" + image_filepath = var.ibmcloud_image_filepath + cluster_id = var.cluster_id + region = var.ibmcloud_region + resource_group_id = local.resource_group_id + tags = local.tags + cos_resource_instance_id = ibm_resource_instance.cos.id +} + +############################################ +# Bootstrap module +############################################ + +module "bootstrap" { + source = "./bootstrap" + + cluster_id = var.cluster_id + cos_resource_instance_id = ibm_resource_instance.cos.id + cos_bucket_region = var.ibmcloud_region + ignition_file = var.ignition_bootstrap_file + public_endpoints = local.public_endpoints + resource_group_id = local.resource_group_id + security_group_id = module.vpc.control_plane_security_group_id + subnet_id = module.vpc.control_plane_subnet_id_list[0] + tags = local.tags + vpc_id = module.vpc.vpc_id + vsi_image_id = module.image.vsi_image_id + vsi_profile = var.ibmcloud_bootstrap_instance_type + zone = module.vpc.control_plane_subnet_zone_list[0] + + lb_kubernetes_api_public_id = module.vpc.lb_kubernetes_api_public_id + lb_kubernetes_api_private_id = module.vpc.lb_kubernetes_api_private_id + lb_pool_kubernetes_api_public_id = module.vpc.lb_pool_kubernetes_api_public_id + lb_pool_kubernetes_api_private_id = module.vpc.lb_pool_kubernetes_api_private_id + lb_pool_machine_config_id = module.vpc.lb_pool_machine_config_id +} + +############################################ +# Master module +############################################ + +module "master" { + source = "./master" + + cluster_id = var.cluster_id + instance_count = var.master_count + ignition = var.ignition_master + public_endpoints = local.public_endpoints + resource_group_id = local.resource_group_id + security_group_id = module.vpc.control_plane_security_group_id + subnet_id_list = module.vpc.control_plane_subnet_id_list + tags = local.tags + vpc_id = module.vpc.vpc_id + vsi_image_id = module.image.vsi_image_id + vsi_profile = var.ibmcloud_master_instance_type + zone_list = module.vpc.control_plane_subnet_zone_list + + lb_kubernetes_api_public_id = module.vpc.lb_kubernetes_api_public_id + lb_kubernetes_api_private_id = module.vpc.lb_kubernetes_api_private_id + lb_pool_kubernetes_api_public_id = module.vpc.lb_pool_kubernetes_api_public_id + lb_pool_kubernetes_api_private_id = module.vpc.lb_pool_kubernetes_api_private_id + lb_pool_machine_config_id = module.vpc.lb_pool_machine_config_id +} + +############################################ +# CIS module +############################################ + +module "cis" { + source = "./cis" + + cis_id = var.ibmcloud_cis_crn + base_domain = var.base_domain + cluster_domain = var.cluster_domain + + bootstrap_name = module.bootstrap.name + bootstrap_ipv4_address = module.bootstrap.primary_ipv4_address + + master_count = var.master_count + master_name_list = module.master.name_list + master_ipv4_address_list = module.master.primary_ipv4_address_list + + lb_kubernetes_api_public_hostname = module.vpc.lb_kubernetes_api_public_hostname + lb_kubernetes_api_private_hostname = module.vpc.lb_kubernetes_api_private_hostname +} + +############################################ +# VPC module +############################################ + +module "vpc" { + source = "./vpc" + + cluster_id = var.cluster_id + public_endpoints = local.public_endpoints + resource_group_id = local.resource_group_id + region = var.ibmcloud_region + tags = local.tags + zone_list = distinct(var.ibmcloud_master_availability_zones) +} diff --git a/data/data/ibmcloud/master/main.tf b/data/data/ibmcloud/master/main.tf new file mode 100644 index 00000000000..de58e5f8744 --- /dev/null +++ b/data/data/ibmcloud/master/main.tf @@ -0,0 +1,68 @@ +locals { + prefix = var.cluster_id + port_kubernetes_api = 6443 + port_machine_config = 22623 + subnet_count = length(var.subnet_id_list) + zone_count = length(var.zone_list) +} + +############################################ +# Master nodes +############################################ + +resource "ibm_is_instance" "master_node" { + count = var.instance_count + depends_on = [ + var.lb_kubernetes_api_private_id, + var.lb_kubernetes_api_public_id + ] + + name = "${local.prefix}-master-${count.index}" + image = var.vsi_image_id + profile = var.vsi_profile + resource_group = var.resource_group_id + tags = var.tags + + primary_network_interface { + name = "eth0" + subnet = var.subnet_id_list[count.index % local.subnet_count] + security_groups = [var.security_group_id] + } + + vpc = var.vpc_id + zone = var.zone_list[count.index % local.zone_count] + keys = [] + + user_data = var.ignition +} + +############################################ +# Load balancer backend pool members +############################################ + +resource "ibm_is_lb_pool_member" "kubernetes_api_public" { + count = var.public_endpoints ? var.instance_count : 0 + + lb = var.lb_kubernetes_api_public_id + pool = var.lb_pool_kubernetes_api_public_id + port = local.port_kubernetes_api + target_address = ibm_is_instance.master_node[count.index].primary_network_interface.0.primary_ipv4_address +} + +resource "ibm_is_lb_pool_member" "kubernetes_api_private" { + count = var.instance_count + + lb = var.lb_kubernetes_api_private_id + pool = var.lb_pool_kubernetes_api_private_id + port = local.port_kubernetes_api + target_address = ibm_is_instance.master_node[count.index].primary_network_interface.0.primary_ipv4_address +} + +resource "ibm_is_lb_pool_member" "machine_config" { + count = var.instance_count + + lb = var.lb_kubernetes_api_private_id + pool = var.lb_pool_machine_config_id + port = local.port_machine_config + target_address = ibm_is_instance.master_node[count.index].primary_network_interface.0.primary_ipv4_address +} \ No newline at end of file diff --git a/data/data/ibmcloud/master/outputs.tf b/data/data/ibmcloud/master/outputs.tf new file mode 100644 index 00000000000..5598d8c5f05 --- /dev/null +++ b/data/data/ibmcloud/master/outputs.tf @@ -0,0 +1,11 @@ +####################################### +# Master module outputs +####################################### + +output "name_list" { + value = ibm_is_instance.master_node.*.name +} + +output "primary_ipv4_address_list" { + value = ibm_is_instance.master_node.*.primary_network_interface.0.primary_ipv4_address +} \ No newline at end of file diff --git a/data/data/ibmcloud/master/variables.tf b/data/data/ibmcloud/master/variables.tf new file mode 100644 index 00000000000..499a4da8b28 --- /dev/null +++ b/data/data/ibmcloud/master/variables.tf @@ -0,0 +1,71 @@ +####################################### +# Master module variables +####################################### + +variable "cluster_id" { + type = string +} + +variable "instance_count" { + type = string +} + +variable "ignition" { + type = string +} + +variable "lb_kubernetes_api_public_id" { + type = string +} + +variable "lb_kubernetes_api_private_id" { + type = string +} + +variable "lb_pool_kubernetes_api_public_id" { + type = string +} + +variable "lb_pool_kubernetes_api_private_id" { + type = string +} + +variable "lb_pool_machine_config_id" { + type = string +} + +variable "public_endpoints" { + type = bool +} + +variable "resource_group_id" { + type = string +} + +variable "security_group_id" { + type = string +} + +variable "subnet_id_list" { + type = list(string) +} + +variable "tags" { + type = list(string) +} + +variable "vpc_id" { + type = string +} + +variable "vsi_image_id" { + type = string +} + +variable "vsi_profile" { + type = string +} + +variable "zone_list" { + type = list(string) +} \ No newline at end of file diff --git a/data/data/ibmcloud/variables-ibmcloud.tf b/data/data/ibmcloud/variables-ibmcloud.tf new file mode 100644 index 00000000000..ded1ef02659 --- /dev/null +++ b/data/data/ibmcloud/variables-ibmcloud.tf @@ -0,0 +1,73 @@ +####################################### +# Top-level module variables (required) +####################################### + +variable "ibmcloud_api_key" { + type = string + # TODO: Supported on tf 0.14 + # sensitive = true + description = "The IAM API key for authenticating with IBM Cloud APIs." +} + +variable "ibmcloud_bootstrap_instance_type" { + type = string + description = "Instance type for the bootstrap node. Example: `bx2d-4x16`" +} + +variable "ibmcloud_cis_crn" { + type = string + description = "The CRN of CIS instance to use." +} + +variable "ibmcloud_region" { + type = string + description = "The target IBM Cloud region for the cluster." +} + +variable "ibmcloud_master_instance_type" { + type = string + description = "Instance type for the master node(s). Example: `bx2d-4x16`" +} + +variable "ibmcloud_master_availability_zones" { + type = list(string) + description = "The availability zones in which to create the masters. The length of this list must match master_count." +} + +variable "ibmcloud_image_filepath" { + type = string + description = "The file path to the RHCOS image" +} + +####################################### +# Top-level module variables (optional) +####################################### + +variable "ibmcloud_extra_tags" { + type = list(string) + description = < cloud.google.com/go v0.57.0 + github.com/IBM-Cloud/terraform-provider-ibm => github.com/openshift/terraform-provider-ibm v1.26.2-openshift github.com/go-log/log => github.com/go-log/log v0.1.1-0.20181211034820-a514cf01a3eb // Pinned by MCO github.com/hashicorp/terraform => github.com/openshift/terraform v0.12.20-openshift-4 // Pin to fork with deduplicated rpc types v0.12.20-openshift-4 github.com/hashicorp/terraform-plugin-sdk => github.com/openshift/hashicorp-terraform-plugin-sdk v1.14.0-openshift // Pin to fork with public rpc types @@ -144,11 +148,14 @@ replace ( ) // Prevent the following modules from upgrading version as result of terraform-provider-kubernetes module -// The following modules need to be locked to compile correctly with terraform-provider-azure and terraform-provider-google +// The following modules need to be locked to compile correctly with terraform-provider-azure, terraform-provider-google, and terraform-provider-ibm replace ( + github.com/IBM/vpc-go-sdk => github.com/IBM/vpc-go-sdk v0.7.0 github.com/apparentlymart/go-cidr => github.com/apparentlymart/go-cidr v1.0.1 github.com/aws/aws-sdk-go => github.com/aws/aws-sdk-go v1.32.3 + github.com/go-openapi/errors => github.com/go-openapi/errors v0.19.2 github.com/go-openapi/spec => github.com/go-openapi/spec v0.19.4 + github.com/go-openapi/validate => github.com/go-openapi/validate v0.19.8 github.com/hashicorp/go-plugin => github.com/hashicorp/go-plugin v1.2.2 github.com/ulikunitz/xz => github.com/ulikunitz/xz v0.5.7 google.golang.org/api => google.golang.org/api v0.25.0 diff --git a/go.sum b/go.sum index a87c39da6ba..2f70be4490a 100644 --- a/go.sum +++ b/go.sum @@ -101,6 +101,7 @@ github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBp github.com/Azure/go-ntlmssp v0.0.0-20180810175552-4a21cbd618b4/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= github.com/Azure/go-ntlmssp v0.0.0-20191115210519-2b2be6cc8ed4 h1:jxtswewdgihgXM6ayHYtISwzkAOaRzyXpgUMamb8mHw= github.com/Azure/go-ntlmssp v0.0.0-20191115210519-2b2be6cc8ed4/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= +github.com/Bowery/prompt v0.0.0-20190916142128-fa8279994f75/go.mod h1:4/6eNcqZ09BZ9wLK3tZOjBA1nDj+B0728nlX5YRlSmQ= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= @@ -114,20 +115,67 @@ github.com/Djarvur/go-err113 v0.0.0-20200511133814-5174e21577d5/go.mod h1:4UJr5H github.com/Djarvur/go-err113 v0.1.0/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= github.com/IBM-Cloud/bluemix-go v0.0.0-20210513052039-9ddb92888817 h1:9jsTasFEbVSViXb/sU159PBK8KplKb6DeDyGxN/9htk= github.com/IBM-Cloud/bluemix-go v0.0.0-20210513052039-9ddb92888817/go.mod h1:kqTYO0mts71aa8PVwviaKlCKYud/NbEkFIqU8aHH3/g= +github.com/IBM-Cloud/bluemix-go v0.0.0-20210611051827-cdc80c935c05 h1:b/epmuvf99xhUlf81l0r/ciONJSjoUs78t+BC7lCcvI= +github.com/IBM-Cloud/bluemix-go v0.0.0-20210611051827-cdc80c935c05/go.mod h1:kqTYO0mts71aa8PVwviaKlCKYud/NbEkFIqU8aHH3/g= +github.com/IBM-Cloud/ibm-cloud-cli-sdk v0.6.7/go.mod h1:RiUvKuHKTBmBApDMUQzBL14pQUGKcx/IioKQPIcRQjs= +github.com/IBM-Cloud/power-go-client v1.0.55 h1:XoRU8FWYY8NUKau1nkErkf2FHAUbFEbbXtbLTvaGp9c= +github.com/IBM-Cloud/power-go-client v1.0.55/go.mod h1:I4r5tCrA8mV5GFqGAJG4/Tn+/JpR+XLnDCLLNVKJxuI= +github.com/IBM/apigateway-go-sdk v0.0.0-20200414212859-416e5948678a h1:lX3vP+9Y5gTP0w6l+4oL0mvNfgXZYJsMdlsiUG43GBw= +github.com/IBM/apigateway-go-sdk v0.0.0-20200414212859-416e5948678a/go.mod h1:sNVpGpUv3jvA2dQbRPFjVrRFNdmnQrf2QTtKa/c2XrI= +github.com/IBM/appconfiguration-go-admin-sdk v0.1.0 h1:9rdOk32VQFnMqsBB7cTpkZbD7/b0EnwrU3VNN8vuUYc= +github.com/IBM/appconfiguration-go-admin-sdk v0.1.0/go.mod h1:6x6KbqIwrEi07OvEM1+EnU4Lyk+JFm2O0vrSPPGTleU= +github.com/IBM/container-registry-go-sdk v0.0.12 h1:UEHMFVF+uGEcoLsTu3Ow3db0cl2fEXtXKevgt5QiaSY= +github.com/IBM/container-registry-go-sdk v0.0.12/go.mod h1:GYi1VN59VaJWWq2xP06o9Vpi6+K8V5vtmji6WjMJf0w= +github.com/IBM/container-registry-go-sdk v0.0.13 h1:nifb9L0dpMaECkZy7MsNvXaH8vxRW2ARBRbF8cV7S5g= +github.com/IBM/container-registry-go-sdk v0.0.13/go.mod h1:GYi1VN59VaJWWq2xP06o9Vpi6+K8V5vtmji6WjMJf0w= github.com/IBM/go-sdk-core v1.1.0 h1:pV73lZqr9r1xKb3h08c1uNG3AphwoV5KzUzhS+pfEqY= github.com/IBM/go-sdk-core v1.1.0/go.mod h1:2pcx9YWsIsZ3I7kH+1amiAkXvLTZtAq9kbxsfXilSoY= +github.com/IBM/go-sdk-core/v3 v3.0.0/go.mod h1:JI5NS2+iCoY/D8Oq3JNEZNA7qO42agu6fnaUmDsRcJA= +github.com/IBM/go-sdk-core/v3 v3.2.4/go.mod h1:lk9eOzNbNltPf3CBpcg1Ewkhw4qC3u2QCCKDRsUA2M0= +github.com/IBM/go-sdk-core/v3 v3.3.1 h1:DoXjP1+Wm8Yd4XJsvBMRcYLvQwSLFnzKlMjSrg3Rzpw= +github.com/IBM/go-sdk-core/v3 v3.3.1/go.mod h1:lk9eOzNbNltPf3CBpcg1Ewkhw4qC3u2QCCKDRsUA2M0= github.com/IBM/go-sdk-core/v4 v4.5.1/go.mod h1:lTUXbqIX6/aAbSCkP6q59+dyFsTwZAc0ewRS2vJWVbg= +github.com/IBM/go-sdk-core/v4 v4.8.1/go.mod h1:GECJ/p0r9Hs0XcOnCiGjZrt6M/rQc+gW8YkCsGJ+j5U= github.com/IBM/go-sdk-core/v4 v4.9.0 h1:OkSg5kaEfVoNuBA4IsIOz8Ur5rbGHbWxmWCZ7nK/oc0= github.com/IBM/go-sdk-core/v4 v4.9.0/go.mod h1:DbQ+3pFoIjxGGTEiA9zQ2V0cemMNmFMkLBBnR729HKg= +github.com/IBM/go-sdk-core/v4 v4.10.0 h1:aLoKusSFVsxMJeKHf8csj9tBWt4Y50kVvfxoKh6scN0= +github.com/IBM/go-sdk-core/v4 v4.10.0/go.mod h1:0uz2ca0MZ2DwsBRGl9Jp3EaCTqxmKZTdvV/CkCB7JnI= +github.com/IBM/go-sdk-core/v5 v5.0.0/go.mod h1:vyNdbFujJtdTj9HbihtvKwwS3k/GKSKpOx9ZIQ6MWDY= +github.com/IBM/go-sdk-core/v5 v5.0.3/go.mod h1:vyNdbFujJtdTj9HbihtvKwwS3k/GKSKpOx9ZIQ6MWDY= +github.com/IBM/go-sdk-core/v5 v5.1.0/go.mod h1:vyNdbFujJtdTj9HbihtvKwwS3k/GKSKpOx9ZIQ6MWDY= github.com/IBM/go-sdk-core/v5 v5.2.0/go.mod h1:vyNdbFujJtdTj9HbihtvKwwS3k/GKSKpOx9ZIQ6MWDY= +github.com/IBM/go-sdk-core/v5 v5.3.0/go.mod h1:+MNa5Jbqb9FO7KEevo982Pb/YXr4adkyEffJlPs2TGc= github.com/IBM/go-sdk-core/v5 v5.4.0/go.mod h1:+MNa5Jbqb9FO7KEevo982Pb/YXr4adkyEffJlPs2TGc= github.com/IBM/go-sdk-core/v5 v5.4.2/go.mod h1:Sn+z+qTDREQvCr+UFa22TqqfXNxx3o723y8GsfLV8e0= github.com/IBM/go-sdk-core/v5 v5.4.3 h1:g3HUKD2wyptGTAGOjS4IkYWmLFgGsioFCnBUXz5NB4g= github.com/IBM/go-sdk-core/v5 v5.4.3/go.mod h1:Sn+z+qTDREQvCr+UFa22TqqfXNxx3o723y8GsfLV8e0= +github.com/IBM/ibm-cos-sdk-go v1.3.1/go.mod h1:YLBAYobEA8bD27P7xpMwSQeNQu6W3DNBtBComXrRzRY= +github.com/IBM/ibm-cos-sdk-go v1.6.1 h1:2XG/fsXno8228gBEwxf0u2AFI/Nii3wpk17lkpF0IvA= +github.com/IBM/ibm-cos-sdk-go v1.6.1/go.mod h1:BOqDAOxuJTamCSdAKx2XStknDaeB99nXWaf1PtvW0iY= +github.com/IBM/ibm-cos-sdk-go v1.7.0 h1:3DZULY/D5WzjlIm+Iaj6h0surEjQs65EZk1YAe8+rj0= +github.com/IBM/ibm-cos-sdk-go v1.7.0/go.mod h1:Oi8AC5WNDhmUJgbo1GL2FtBdo0nRgbzE/1HmCL1SERU= +github.com/IBM/ibm-cos-sdk-go-config v1.1.0 h1:udFHLvw1o1mtLdX3t1hTaPSdLAUPnkc2qCngNhubR1o= +github.com/IBM/ibm-cos-sdk-go-config v1.1.0/go.mod h1:BAbdv1Zf8mRP6rj40Cem7KgBp+UQn9Fe2EWxIBrp5sM= +github.com/IBM/ibm-cos-sdk-go-config v1.2.0 h1:1E93234yZgVS0ntm7eUwVb3h0AAayPGcxEhhizEN1LE= +github.com/IBM/ibm-cos-sdk-go-config v1.2.0/go.mod h1:Wetfgv6m1xyuzpZLQTTLIBsWstxjYa15h+Utj7x53Dk= +github.com/IBM/keyprotect-go-client v0.7.0 h1:JstSHD14Lp6ihwQseyPuGcs1AjOBjAmcisP0dTBA6A0= +github.com/IBM/keyprotect-go-client v0.7.0/go.mod h1:SVr2ylV/fhSQPDiUjWirN9fsyWFCNNbt8GIT8hPJVjE= github.com/IBM/networking-go-sdk v0.14.0 h1:CWQufnSxynqxYORGbkSqePPSZ33fUijiwmcuZsMRv/Q= github.com/IBM/networking-go-sdk v0.14.0/go.mod h1:8f3hEoWVUSYKbaIj7WZhdeJaseYGDSY85Iz+PqxLEbQ= +github.com/IBM/platform-services-go-sdk v0.18.11/go.mod h1:yaE+2oxhno9RhKYyvzeektKCajakHkM2R2/gWSJIqfA= +github.com/IBM/platform-services-go-sdk v0.18.12/go.mod h1:awc7TZUeGMlToSeMSaWEz34Knf0lQnuGWumcI4pcuoM= github.com/IBM/platform-services-go-sdk v0.18.13 h1:RrqgUtX56XWIuZS85xxB7b2uZLs5AbTbOwhl1PAcn5E= github.com/IBM/platform-services-go-sdk v0.18.13/go.mod h1:awc7TZUeGMlToSeMSaWEz34Knf0lQnuGWumcI4pcuoM= +github.com/IBM/push-notifications-go-sdk v0.0.0-20210310100607-5790b96c47f5 h1:NPUhkoOCRuv3OFWt19PmwjXGGTKlvmbuPg9fUrBUNe4= +github.com/IBM/push-notifications-go-sdk v0.0.0-20210310100607-5790b96c47f5/go.mod h1:b07XHUVh0XYnQE9s2mqgjYST1h9buaQNqN4EcKhOsX0= +github.com/IBM/schematics-go-sdk v0.0.2 h1:IFdM73VL3xwf/KaTh1IY99hkiTfFRYg5F1JNj69FOEg= +github.com/IBM/schematics-go-sdk v0.0.2/go.mod h1:ymN1+3uEaWNT0RthwHzExxMiN0AnTh6W3piSY8canjs= +github.com/IBM/secrets-manager-go-sdk v0.1.19 h1:0GPs5EoTaWNsjo4QPj64GNxlWfN8VHJy4RDFLqddSe8= +github.com/IBM/secrets-manager-go-sdk v0.1.19/go.mod h1:eO3dBhzPrHkkt+yPex/jB2xD6qHZxBko+Aw+0tfqHeA= +github.com/IBM/vpc-go-sdk v0.6.0 h1:acJIYIsdjasCabSQlDJ+LZz62R4Q9WDqPIlwWec3u9Y= +github.com/IBM/vpc-go-sdk v0.6.0/go.mod h1:wxicPDnSTPXt1eNxSO/9KNGqOW9RMgxPoSh4gd8KJY4= +github.com/IBM/vpc-go-sdk v0.7.0 h1:LNAnzcDLD2Bf2UbiYfFFsWGlHQf2N/Qc6S5iM7qln1o= +github.com/IBM/vpc-go-sdk v0.7.0/go.mod h1:wxicPDnSTPXt1eNxSO/9KNGqOW9RMgxPoSh4gd8KJY4= github.com/IBM/vpc-go-sdk v1.0.1 h1:D2cu4KRsM8Q8bLWz/uxp8m7nzUm33mcgDv1sD0w/E8M= github.com/IBM/vpc-go-sdk v1.0.1/go.mod h1:bhd7r482lV30UJz46r2oRgYGawGEo+TuS41ZLIY65y0= github.com/InVisionApp/go-health v2.1.0+incompatible/go.mod h1:/+Gv1o8JUsrjC6pi6MN6/CgKJo4OqZ6x77XAnImrzhg= @@ -162,7 +210,11 @@ github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdko github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/SAP/go-hdb v0.14.1/go.mod h1:7fdQLVC2lER3urZLjZCm0AuMQfApof92n3aylBPEkMo= +github.com/ScaleFT/sshkeys v0.0.0-20200327173127-6142f742bca5 h1:VauE2GcJNZFun2Och6tIT2zJZK1v6jxALQDA9BIji/E= +github.com/ScaleFT/sshkeys v0.0.0-20200327173127-6142f742bca5/go.mod h1:gxOHeajFfvGQh/fxlC8oOKBe23xnnJTif00IFFbiT+o= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/sarama v1.27.2 h1:1EyY1dsxNDUQEv0O/4TsjosHI2CgB1uo9H/v56xzTxc= +github.com/Shopify/sarama v1.27.2/go.mod h1:g5s5osgELxgM+Md9Qni9rzo7Rbt+vvFQI4bt/Mc93II= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA= @@ -198,6 +250,9 @@ github.com/antchfx/xpath v1.1.2 h1:YziPrtM0gEJBnhdUGxYcIVYXZ8FXbtbovxOi+UW/yWQ= github.com/antchfx/xpath v1.1.2/go.mod h1:Yee4kTMuNiPYJ7nSNorELQMr1J33uOpXDMByNYhvtNk= github.com/antchfx/xquery v0.0.0-20180515051857-ad5b8c7a47b0 h1:JaCC8jz0zdMLk2m+qCCVLLLM/PL93p84w4pK3aJWj60= github.com/antchfx/xquery v0.0.0-20180515051857-ad5b8c7a47b0/go.mod h1:LzD22aAzDP8/dyiCKFp31He4m2GPjl0AFyzDtZzUu9M= +github.com/apache/incubator-openwhisk-client-go v0.0.0-20171128215515-ad814bc98c32/go.mod h1:tkMtcI5DHxdNk03R1YVLF66VhkF8uJkZs7pgHE9Mgqk= +github.com/apache/openwhisk-client-go v0.0.0-20200201143223-a804fb82d105 h1:k1wP1gZMrNJeXTz6a+3010NKC/ZvSffk07BzrLmYrmc= +github.com/apache/openwhisk-client-go v0.0.0-20200201143223-a804fb82d105/go.mod h1:jLLKYP7+1+LFlIJW1n9U1gqeveLM1HIwa4ZHNOFxjPw= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apparentlymart/go-cidr v1.0.1 h1:NmIwLZ/KdsjIUlhf+/Np40atNXm/+lZ5txfTJ/SpF+U= @@ -227,6 +282,8 @@ github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:l github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef h1:46PFijGLmAjMPwCCCo7Jf0W6f9slllCkkv7vyc1yOSg= github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d h1:Byv0BzEl3/e6D5CLfI0j/7hiIEtvGVFPCZ7Ei2oq8iQ= +github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/ashcrow/osrelease v0.0.0-20180626175927-9b292693c55c/go.mod h1:BRljTyotlu+6N+Qlu5MhjxpdmccCnp9lDvZjNNV8qr4= github.com/awalterschulze/gographviz v0.0.0-20190522210029-fa59802746ab h1:+cdNqtOJWjvepyhxy23G7z7vmpYCoC65AP0nqi1f53s= github.com/awalterschulze/gographviz v0.0.0-20190522210029-fa59802746ab/go.mod h1:GEV5wmg4YquNw7v1kkyoX9etIk8yVmXj+AkDHuuETHs= @@ -301,6 +358,8 @@ github.com/clarketm/json v1.14.1/go.mod h1:ynr2LRfb0fQU34l07csRNBTcivjySLLiY1YzQ github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudfoundry-community/go-cfclient v0.0.0-20190201205600-f136f9222381/go.mod h1:e5+USP2j8Le2M0Jo3qKPFnNhuo1wueU4nWHCXBOfQ14= +github.com/cloudfoundry/jibber_jabber v0.0.0-20151120183258-bcc4c8345a21 h1:tuijfIjZyjZaHq9xDUh0tNitwXshJpbLkqMOJv4H3do= +github.com/cloudfoundry/jibber_jabber v0.0.0-20151120183258-bcc4c8345a21/go.mod h1:po7NpZ/QiTKzBKyrsEAxwnTamCoh8uDk/egRpQ7siIc= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk= @@ -388,6 +447,9 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-xdr v0.0.0-20161123171359-e6a2ba005892/go.mod h1:CTDl0pzVzE5DEzZhPfvhY/9sPFMQIxaJ9VAMs9AagrE= github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE= +github.com/dchest/bcrypt_pbkdf v0.0.0-20150205184540-83f37f9c154a h1:saTgr5tMLFnmy/yg3qDTft4rE5DY2uJ/cCxCe3q0XTU= +github.com/dchest/bcrypt_pbkdf v0.0.0-20150205184540-83f37f9c154a/go.mod h1:Bw9BbhOJVNR+t0jCqx2GC6zv0TGBsShs56Y3gfSCvl0= +github.com/dchest/safefile v0.0.0-20151022103144-855e8d98f185/go.mod h1:cFRxtTwTOJkz2x3rQUNCYKWC93yP1VKjR8NUhqFxZNU= github.com/denis-tingajkin/go-header v0.3.1/go.mod h1:sq/2IxMhaZX+RRcgHfCRx/m0M5na0fBt4/CRe7Lrji0= github.com/denisenkom/go-mssqldb v0.0.0-20190412130859-3b1d194e553a/go.mod h1:zAg7JM8CkOJ43xKXIj7eRO9kmWm/TW578qo+oDO6tuM= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= @@ -430,7 +492,11 @@ github.com/dylanmei/iso8601 v0.1.0/go.mod h1:w9KhXSgIyROl1DefbMYIE7UVSIvELTbMrCf github.com/dylanmei/winrmtest v0.0.0-20190225150635-99b7fe2fddf1 h1:r1oACdS2XYiAWcfF8BJXkoU8l1J71KehGR+d99yWEDA= github.com/dylanmei/winrmtest v0.0.0-20190225150635-99b7fe2fddf1/go.mod h1:lcy9/2gH1jn/VCLouHA6tOEwLoNVd4GW6zhuKLmHC2Y= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-resiliency v1.2.0 h1:v7g92e/KSN71Rq7vSThKaWIq68fL4YHvWyiUKorFR1Q= +github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/elazarl/go-bindata-assetfs v1.0.0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4= @@ -475,6 +541,7 @@ github.com/frankban/quicktest v1.4.0/go.mod h1:36zfPVQyHxymz4cH7wlDmVwDrJuljRB60 github.com/frankban/quicktest v1.4.1/go.mod h1:36zfPVQyHxymz4cH7wlDmVwDrJuljRB60qkgn7rorfQ= github.com/frankban/quicktest v1.7.2 h1:2QxQoC1TS09S7fhCPsrvqYdvP1H5M1P1ih5ABm3BTYk= github.com/frankban/quicktest v1.7.2/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o= +github.com/frankban/quicktest v1.10.2/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P6txr3mVT54s= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= @@ -532,9 +599,11 @@ github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70t github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= +github.com/go-openapi/analysis v0.19.5 h1:8b2ZgKfKIUTVQpTb77MoRDIMEIwvDVw40o3aOXdfYzI= github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.19.2 h1:a2kIyV3w+OS3S97zxUndRVD46+FhGOUBDFY7nmu4CsY= github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= github.com/go-openapi/errors v0.19.8 h1:doM+tQdZbUm9gydV9yR+iQNmztbjj7I3sW4sIcAwIzc= github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= @@ -555,18 +624,25 @@ github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= +github.com/go-openapi/loads v0.19.3/go.mod h1:YVfqhUCdahYwR3f3iiwQLhicVRvLlU/WO5WPaZvcvSI= +github.com/go-openapi/loads v0.19.4 h1:5I4CCSqoWzT+82bBkNIvmLc0UOsoKKQ4Fz+3VxOB7SY= github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk= github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= +github.com/go-openapi/runtime v0.19.11 h1:6J11dQiIV+BOLlMbk2YmM8RvGaOU38syeqy62qhh3W8= +github.com/go-openapi/runtime v0.19.11/go.mod h1:dhGWCTKRXlAfGnQG0ONViOZpjfg0m2gUt9nTQPQZuoo= github.com/go-openapi/spec v0.19.4 h1:ixzUSnHTd6hCemgtAJgluaTSGYpLNpJY4mA2DIkdOAo= github.com/go-openapi/spec v0.19.4/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= +github.com/go-openapi/strfmt v0.19.4/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= +github.com/go-openapi/strfmt v0.19.8/go.mod h1:qBBipho+3EoIqn6YDI+4RnQEtj6jT/IdKm+PAlXxSUc= github.com/go-openapi/strfmt v0.19.10/go.mod h1:qBBipho+3EoIqn6YDI+4RnQEtj6jT/IdKm+PAlXxSUc= +github.com/go-openapi/strfmt v0.19.11/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc= github.com/go-openapi/strfmt v0.20.0/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc= github.com/go-openapi/strfmt v0.20.1 h1:1VgxvehFne1mbChGeCmZ5pc0LxUf6yaACVSIYAR91Xc= github.com/go-openapi/strfmt v0.20.1/go.mod h1:43urheQI9dNtE5lTZQfuFJvjYJKPrxicATpEfZwHUNk= @@ -576,11 +652,13 @@ github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/ github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.4/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.7/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= github.com/go-openapi/swag v0.19.9 h1:1IxuqvBUU3S2Bi4YC7tlP9SJF1gVpCvqN0T2Qof4azE= github.com/go-openapi/swag v0.19.9/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= +github.com/go-openapi/validate v0.19.8 h1:YFzsdWIDfVuLvIOF+ZmKjVg1MbPJ1QgY9PihMwei1ys= github.com/go-openapi/validate v0.19.8/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= @@ -628,6 +706,8 @@ github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= github.com/gobuffalo/flect v0.2.0 h1:EWCvMGGxOjsgwlWaP+f4+Hh6yrrte7JeFL2S6b+0hdM= github.com/gobuffalo/flect v0.2.0/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80= +github.com/gobuffalo/flect v0.2.2 h1:PAVD7sp0KOdfswjAw9BpLCU9hXo7wFSzgpQ+zNeks/A= +github.com/gobuffalo/flect v0.2.2/go.mod h1:vmkQwuZYhN5Pc4ljYQZzP+1sq+NEkK+lh20jmEmX3jc= github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= @@ -753,6 +833,8 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-metrics-stackdriver v0.0.0-20190816035513-b52628e82e2a/go.mod h1:o93WzqysX0jP/10Y13hfL6aq9RoUvGaVdkrH5awMksE= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -836,6 +918,7 @@ github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/h2non/filetype v1.0.12 h1:yHCsIe0y2cvbDARtJhGBTD2ecvqMSTvlIcph9En/Zao= github.com/h2non/filetype v1.0.12/go.mod h1:319b3zT68BvV+WRj7cwy856M2ehB3HqNOt6sy1HndBY= +github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542/go.mod h1:Ow0tF8D4Kplbc8s8sSb3V2oUCygFHVp8gC3Dn6U4MNI= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= github.com/hashicorp/aws-sdk-go-base v0.4.0 h1:zH9hNUdsS+2G0zJaU85ul8D59BGnZBaKM+KMNPAHGwk= github.com/hashicorp/aws-sdk-go-base v0.4.0/go.mod h1:eRhlz3c4nhqxFZJAahJEFL7gh6Jyj5rQmQc7F9eHFyQ= @@ -904,6 +987,8 @@ github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09 github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.2.1 h1:zEfKbn2+PDgroKdiOzqiE8rsmLqU2uwi5PB5pBJ3TkI= github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.3.0 h1:McDWVJIU/y+u1BRV06dPaLfLCaT7fUTJLp5r04x7iNw= +github.com/hashicorp/go-version v1.3.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -923,6 +1008,8 @@ github.com/hashicorp/hcl/v2 v2.6.0/go.mod h1:bQTN5mpo+jewjJgh8jr0JUguIi7qPHUF6yI github.com/hashicorp/hil v0.0.0-20190212112733-ab17b08d6590/go.mod h1:n2TSygSNwsLJ76m8qFXTSc7beTb+auJxYdqrnoqwZWE= github.com/hashicorp/hil v0.0.0-20190212132231-97b3a9cdfa93 h1:T1Q6ag9tCwun16AW+XK3tAql24P4uTGUMIn1/92WsQQ= github.com/hashicorp/hil v0.0.0-20190212132231-97b3a9cdfa93/go.mod h1:n2TSygSNwsLJ76m8qFXTSc7beTb+auJxYdqrnoqwZWE= +github.com/hashicorp/hil v0.0.0-20200423225030-a18a1cd20038 h1:n9J0rwVWXDpNd5iZnwY7w4WZyq53/rROeI7OVvLW8Ok= +github.com/hashicorp/hil v0.0.0-20200423225030-a18a1cd20038/go.mod h1:n2TSygSNwsLJ76m8qFXTSc7beTb+auJxYdqrnoqwZWE= github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= @@ -997,9 +1084,19 @@ github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKe github.com/hashicorp/yamux v0.0.0-20190923154419-df201c70410d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.0.0-20200609203250-aecfd211c9ce h1:7UnVY3T/ZnHUrfviiAgIUjg2PXxsQfs5bphsG8F7Keo= github.com/hashicorp/yamux v0.0.0-20200609203250-aecfd211c9ce/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hasueki/kubernetesservice-go-sdk v0.0.1 h1:xYg/MbKNzqWl999NwVAYZBFtt/SSyzygZs60bPzo5wk= +github.com/hasueki/kubernetesservice-go-sdk v0.0.1/go.mod h1:bSupIRftcyigc4l3zliWD/8etjg0qsSLeFgRTiCGMnc= +github.com/hasueki/softlayer-go v1.0.3-terraform h1:nL5nzOseGQx6lQPZ7/ghraHFOeJSin6wlHY3CoMVLCo= +github.com/hasueki/softlayer-go v1.0.3-terraform/go.mod h1:6HepcfAXROz0Rf63krk5hPZyHT6qyx2MNvYyHof7ik4= +github.com/hasueki/terraform-provider-ibm v1.25.0-openshift h1:EUpPoXQribOrTTXgYHWlePIj7M+b7GW5AC40ZmE5/98= +github.com/hasueki/terraform-provider-ibm v1.25.0-openshift/go.mod h1:6JYimy9RaQNFhBfZaHDuFWqmdXp+uDK3kVBlOGi22io= +github.com/hasueki/terraform-provider-ibm v1.26.2-openshift h1:VKC3LOQJ9shlYSeXACLmEFEryXERFHapCdPmCvBU9EI= +github.com/hasueki/terraform-provider-ibm v1.26.2-openshift/go.mod h1:V9U5EhKeOWSLAtVtGO3JfWPr/e6uBW9woaQ/1yjw1dc= github.com/hinshun/vt10x v0.0.0-20180616224451-1954e6464174/go.mod h1:DqJ97dSdRW1W22yXSB90986pcOyQ7r45iio1KN2ez1A= github.com/hinshun/vt10x v0.0.0-20180809195222-d55458df857c h1:kp3AxgXgDOmIJFR7bIwqFhwJ2qWar8tEQSE5XXhCfVk= github.com/hinshun/vt10x v0.0.0-20180809195222-d55458df857c/go.mod h1:DqJ97dSdRW1W22yXSB90986pcOyQ7r45iio1KN2ez1A= +github.com/hokaccha/go-prettyjson v0.0.0-20170213120834-e6b9231a2b1c h1:vlXZsaTgJ55QZrAkOrpq0tsJmuuM4ky5OMZOvXnhvqE= +github.com/hokaccha/go-prettyjson v0.0.0-20170213120834-e6b9231a2b1c/go.mod h1:pFlLw2CfqZiIBOx6BuCeRLCrfxBJipTY0nIOF/VbGcI= github.com/hooklift/assert v0.0.0-20170704181755-9d1defd6d214 h1:WgfvpuKg42WVLkxNwzfFraXkTXPK36bMqXvMFN67clI= github.com/hooklift/assert v0.0.0-20170704181755-9d1defd6d214/go.mod h1:kj6hFWqfwSjFjLnYW5PK1DoxZ4O0uapwHRmd9jhln4E= github.com/hooklift/iso9660 v1.0.0 h1:GYN0ejrqTl1qtB+g+ics7xxWHp7J2B1zmr25O9EyG3c= @@ -1021,6 +1118,9 @@ github.com/influxdata/influxdb v0.0.0-20190411212539-d24b7ba8c4c4/go.mod h1:qZna github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= github.com/jackc/pgx v3.3.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= +github.com/jarcoal/httpmock v1.0.5/go.mod h1:ATjnClrvW/3tijVmpL/va5Z3aAyGvqU3gCT8nX0Txik= +github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8= +github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= github.com/jeffchao/backoff v0.0.0-20140404060208-9d7fd7aa17f2/go.mod h1:xkfESuHriIekR+4RoV+fu91j/CfnYM29Zi2tMFw5iD4= github.com/jefferai/isbadcipher v0.0.0-20190226160619-51d2077c035f/go.mod h1:3J2qVK16Lq8V+wfiL2lPeDZ7UWMxk5LemerHa1p6N00= github.com/jefferai/jsonx v1.0.0/go.mod h1:OGmqmi2tTeI/PS+qQfBDToLHHJIy/RMp24fPo8vFvoQ= @@ -1036,6 +1136,9 @@ github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af/go.mod github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc= github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= github.com/jmoiron/sqlx v1.2.1-0.20190826204134-d7d95172beb5/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= @@ -1065,6 +1168,7 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/k8snetworkplumbingwg/network-attachment-definition-client v0.0.0-20191119172530-79f836b90111 h1:Lq6HJa0JqSg5ko/mkizFWlpIrY7845g9Dzz9qeD5aXI= github.com/k8snetworkplumbingwg/network-attachment-definition-client v0.0.0-20191119172530-79f836b90111/go.mod h1:MP2HbArq3QT+oVp8pmtHNZnSnkhdkHtDnc7h6nJXmBU= +github.com/kardianos/govendor v1.0.9/go.mod h1:yvmR6q9ZZ7nSF5Wvh40v0wfP+3TwwL8zYQp+itoZSVM= github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA= github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= @@ -1087,6 +1191,7 @@ github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0 github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.10.8/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.10.10/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.0 h1:wJbzvpYMVGG9iTI9VxpnNZfd4DzMPoCWze3GgSqz8yg= github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= @@ -1118,6 +1223,8 @@ github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+ github.com/kyoh86/exportloopref v0.1.7/go.mod h1:h1rDl2Kdj97+Kwh4gdz3ujE7XHmH51Q0lUiZ1z4NLj8= github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= +github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w= +github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/libvirt/libvirt-go v4.10.0+incompatible/go.mod h1:34zsnB4iGeOv7Byj6qotuW8Ya4v4Tr43ttjz/F0wjLE= @@ -1179,6 +1286,7 @@ github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Ky github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.8/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-shellwords v1.0.4/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/mattn/go-shellwords v1.0.5/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= @@ -1196,6 +1304,8 @@ github.com/mholt/archiver v3.1.1+incompatible/go.mod h1:Dh2dOXnSdiLxRiPoVfIr/fI1 github.com/michaelklishin/rabbit-hole v0.0.0-20191008194146-93d9988f0cd5/go.mod h1:+pmbihVqjC3GPdfWv1V2TnRSuVvwrWLKfEP/MZVB/Wc= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.15/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/minsikl/netscaler-nitro-go v0.0.0-20170827154432-5b14ce3643e3 h1:PHPBYVeLuR7/2XSOfVwDpW+70KNuxMWygsyOZSKK15Y= +github.com/minsikl/netscaler-nitro-go v0.0.0-20170827154432-5b14ce3643e3/go.mod h1:jh28TRFZwBumf7OjMQbRb8TNtDuuX7QNAGRjFEt+h6I= github.com/mistifyio/go-zfs v2.1.1+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.1.1 h1:J64v/xD7Clql+JVKSvkYojLOXu1ibnY9ZjGLwSt/89w= @@ -1232,6 +1342,8 @@ github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh github.com/mitchellh/mapstructure v1.3.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.3.3 h1:SzB1nHZ2Xi+17FP0zVQBHIZqvwRN9408fJO8h+eeNA8= github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/panicwrap v1.0.0 h1:67zIyVakCIvcs69A0FGfZjBdPleaonSgGlXRSRlb6fE= github.com/mitchellh/panicwrap v1.0.0/go.mod h1:pKvZHwWrZowLUzftuFq7coarnxbBXU4aQh3N0BJOeeA= github.com/mitchellh/pointerstructure v0.0.0-20190430161007-f252a8fd71c8/go.mod h1:k4XwG94++jLVsSiTxo7qdIfXA9pj9EAeo0QsNNJOLZ8= @@ -1276,10 +1388,13 @@ github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzE github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32/go.mod h1:9wM+0iRr9ahx58uYLpLIr5fm8diHn0JbqRycJi6w0Ms= github.com/nbutton23/zxcvbn-go v0.0.0-20160627004424-a22cb81b2ecd/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= github.com/nbutton23/zxcvbn-go v0.0.0-20171102151520-eafdab6b0663/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= +github.com/nicksnyder/go-i18n v1.10.0 h1:5AzlPKvXBH4qBzmZ09Ua9Gipyruv6uApMcrNZdo96+Q= +github.com/nicksnyder/go-i18n v1.10.0/go.mod h1:HrK7VCrbOvQoUAQ7Vpy7i87N7JZZZ7R2xBGjv0j365Q= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nirarg/terraform-provider-kubevirt v0.0.0-20201222125919-101cee051ed3 h1:X+6Iial/2VHkDJvtdJiSdJq/RnTDlvdX7WscLXS+z0U= @@ -1304,6 +1419,7 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= @@ -1324,6 +1440,7 @@ github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoT github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= +github.com/onsi/gomega v1.10.4/go.mod h1:g/HbgYopi++010VEqkFgJHKC09uJiW9UkXvMUuKHUCQ= github.com/onsi/gomega v1.10.5 h1:7n6FEkpFmfCoo2t+YYqXH0evK+a9ICQz0xcAy9dYcaQ= github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= @@ -1352,6 +1469,9 @@ github.com/openshift/api v0.0.0-20200929171550-c99a4deebbe5/go.mod h1:Si/I9UGeRR github.com/openshift/api v0.0.0-20201019163320-c6a5ec25f267/go.mod h1:RDvBcRQMGLa3aNuDuejVBbTEQj/2i14NXdpOLqbNBvM= github.com/openshift/api v0.0.0-20201214114959-164a2fb63b5f/go.mod h1:aqU5Cq+kqKKPbDMqxo9FojgDeSpNJI7iuskjXjtojDg= github.com/openshift/api v0.0.0-20201216151826-78a19e96f9eb/go.mod h1:aqU5Cq+kqKKPbDMqxo9FojgDeSpNJI7iuskjXjtojDg= +github.com/openshift/api v0.0.0-20210331193751-3acddb19d360/go.mod h1:dZ4kytOo3svxJHNYd0J55hwe/6IQG5gAUHUE0F3Jkio= +github.com/openshift/api v0.0.0-20210412212256-79bd8cfbbd59/go.mod h1:dZ4kytOo3svxJHNYd0J55hwe/6IQG5gAUHUE0F3Jkio= +github.com/openshift/api v0.0.0-20210416115537-a60c0dc032fd/go.mod h1:dZ4kytOo3svxJHNYd0J55hwe/6IQG5gAUHUE0F3Jkio= github.com/openshift/api v0.0.0-20210622200201-7740bfb97397 h1:wp5My+8d2K2HIZw2yZblsIZawma5UrNQGPVcTev/HSA= github.com/openshift/api v0.0.0-20210622200201-7740bfb97397/go.mod h1:izBmoXbUu3z5kUa4FjZhvekTsyzIWiOoaIgJiZBBMQs= github.com/openshift/baremetal-operator v0.0.0-20210527161605-4e331bfd4b1d h1:S/ucduf5Mu7ktSj/d21079Ye4h7wC8blFFTRUfs03CU= @@ -1360,6 +1480,7 @@ github.com/openshift/build-machinery-go v0.0.0-20200211121458-5e3d6e570160/go.mo github.com/openshift/build-machinery-go v0.0.0-20200424080330-082bf86082cc/go.mod h1:1CkcsT3aVebzRBzVTSbiKSkJMsC/CASqxesfqEMfJEc= github.com/openshift/build-machinery-go v0.0.0-20200819073603-48aa266c95f7/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= github.com/openshift/build-machinery-go v0.0.0-20200917070002-f171684f77ab/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= +github.com/openshift/build-machinery-go v0.0.0-20210209125900-0da259a2c359/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= github.com/openshift/build-machinery-go v0.0.0-20210423112049-9415d7ebd33e/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= github.com/openshift/client-go v0.0.0-20200929181438-91d71ef2122c h1:DQTWW8DGRN7fu5qwEPcbdP9hAxXi7dm5cvi0hrdR3UE= github.com/openshift/client-go v0.0.0-20200929181438-91d71ef2122c/go.mod h1:MwESrlhzumQGcGtPCpz/WjDrlvhu1fMNlLBcNYjO0fY= @@ -1383,6 +1504,8 @@ github.com/openshift/cluster-api-provider-gcp v0.0.1-0.20201027164920-70f2f92e64 github.com/openshift/cluster-api-provider-gcp v0.0.1-0.20201201000827-1117a4fc438c/go.mod h1:21N0wWjiTQypZ7WosEYhcGJHr9JoDR1RBFztE0NvdYM= github.com/openshift/cluster-api-provider-gcp v0.0.1-0.20201203141909-4dc702fd57a5 h1:75U75i/GfStAartlsP/F9v3Gv3MwzuLwqdLTjP1vPeE= github.com/openshift/cluster-api-provider-gcp v0.0.1-0.20201203141909-4dc702fd57a5/go.mod h1:/XjFaKnqBc8K/jcRXHO7tau39CmzNinqmpxYaQGRvnE= +github.com/openshift/cluster-api-provider-ibmcloud v0.0.0-20210608192136-4b79b3b71eec h1:Um4LLVvJ3FK3CXLgye4c1r3FsTGGaumuYNriucJKnu0= +github.com/openshift/cluster-api-provider-ibmcloud v0.0.0-20210608192136-4b79b3b71eec/go.mod h1:4PlLZBW38lwAUTzV61pDWQ72ikepGW8oywll89tss0M= github.com/openshift/cluster-api-provider-kubevirt v0.0.0-20201214114543-e5aed9c73f1f h1:GLvV9l0Qtk8NfqrfDuanHPUyZB413vxp9nUUX1+oyfg= github.com/openshift/cluster-api-provider-kubevirt v0.0.0-20201214114543-e5aed9c73f1f/go.mod h1:Moiq8vUJ4IdTaJBxIA756FFJ4GgVXZAiOds7lTpZ1kQ= github.com/openshift/cluster-api-provider-libvirt v0.2.1-0.20191219173431-2336783d4603 h1:MC6BSZYxFPoqqKj9PdlGjHGVKcMsvn6Kv1NiVzQErZ8= @@ -1403,6 +1526,8 @@ github.com/openshift/library-go v0.0.0-20200909173121-1d055d971916/go.mod h1:6vw github.com/openshift/library-go v0.0.0-20201109112824-093ad3cf6600/go.mod h1:1xYaYQcQsn+AyCRsvOU+Qn5z6GGiCmcblXkT/RZLVfo= github.com/openshift/library-go v0.0.0-20201215165635-4ee79b1caed5 h1:u3whBOCmgIDS+nWnoN6JZmfjeISZK2ILuBEnFy6ivpw= github.com/openshift/library-go v0.0.0-20201215165635-4ee79b1caed5/go.mod h1:udseDnqxn5ON8i+NBjDp00fBTK0JRu1/6Y6tf6EivDE= +github.com/openshift/library-go v0.0.0-20210408164723-7a65fdb398e2 h1:eYdrmOSwRqHhfuPK8bhCSkBRUmCNYkgkOLgnImnz3Rs= +github.com/openshift/library-go v0.0.0-20210408164723-7a65fdb398e2/go.mod h1:pnz961veImKsbn7pQcuFbcVpCQosYiC1fUOjzEDeOLU= github.com/openshift/machine-api-operator v0.0.0-20190312153711-9650e16c9880/go.mod h1:7HeAh0v04zQn1L+4ItUjvpBQYsm2Nf81WaZLiXTcnkc= github.com/openshift/machine-api-operator v0.2.1-0.20200611014855-9a69f85c32dd/go.mod h1:6vMi+R3xqznBdq5rgeal9N3ak3sOpy50t0fdRCcQXjE= github.com/openshift/machine-api-operator v0.2.1-0.20200701225707-950912b03628/go.mod h1:cxjy/RUzv5C2T5FNl1KKXUgtakWsezWQ642B/CD9VQA= @@ -1413,6 +1538,8 @@ github.com/openshift/machine-api-operator v0.2.1-0.20201111151924-77300d0c997a/g github.com/openshift/machine-api-operator v0.2.1-0.20201203125141-79567cb3368e/go.mod h1:Vxdx8K+8sbdcGozW86hSvcVl5JgJOqNFYhLRRhEM9HY= github.com/openshift/machine-api-operator v0.2.1-0.20210104142355-8e6ae0acdfcf h1:+/Lqs2LFqB0X38Kwakqq5qWy/1YBstY/vuNJcYwqJ3A= github.com/openshift/machine-api-operator v0.2.1-0.20210104142355-8e6ae0acdfcf/go.mod h1:U5eAHChde1XvtQy3s1Zcr7ll4X7heb0SzYpaiAwxmQc= +github.com/openshift/machine-api-operator v0.2.1-0.20210504014029-a132ec00f7dd h1:8gwpgdXv0TPrvPRxms3CNqDwtktmOYxTWD4MrW1TVJk= +github.com/openshift/machine-api-operator v0.2.1-0.20210504014029-a132ec00f7dd/go.mod h1:DFZBMPtC2TYZH5NE9+2JQIpbZAnruqc9F26QmbOm9pw= github.com/openshift/machine-config-operator v0.0.1-0.20201009041932-4fe8559913b8 h1:C4gCipkWTDp0B9jb0wZdLgB+HWC7EzVVwQOeNaKnTRA= github.com/openshift/machine-config-operator v0.0.1-0.20201009041932-4fe8559913b8/go.mod h1:fjKreLaKEeUKsyIkT4wlzIQwUVJ2ZKDUh3CI73ckYIY= github.com/openshift/prom-label-proxy v0.1.1-0.20191016113035-b8153a7f39f1/go.mod h1:p5MuxzsYP1JPsNGwtjtcgRHHlGziCJJfztff91nNixw= @@ -1423,6 +1550,8 @@ github.com/openshift/terraform-provider-aws v1.60.1-0.20210622193531-7d13cfbb1a8 github.com/openshift/terraform-provider-aws v1.60.1-0.20210622193531-7d13cfbb1a8c/go.mod h1:0U3OgA2uDYSc7gNkdWA92+/BxWXwuYhWqqZ4UhM1RCw= github.com/openshift/terraform-provider-azurerm v1.44.1-0.20210224232508-7509319df0f4 h1:pwWMhAha70saUe66//ErWm39hek9FUijl67WfxaqXxY= github.com/openshift/terraform-provider-azurerm v1.44.1-0.20210224232508-7509319df0f4/go.mod h1:MohWawSEwkGnsTTEW/U/sPVxsgQVHBNg2LsqQPbMa/U= +github.com/openshift/terraform-provider-ibm v1.26.2-openshift h1:CARSTy7N23M2Ul4tcC8IhSKL0nlM7WojUpeSaIq2dHY= +github.com/openshift/terraform-provider-ibm v1.26.2-openshift/go.mod h1:V9U5EhKeOWSLAtVtGO3JfWPr/e6uBW9woaQ/1yjw1dc= github.com/openshift/terraform-provider-vsphere v1.24.3-openshift h1:tG83XgfFwH4OLONUeEsxh8JPG9QRrqKLoPzkPmLye/Y= github.com/openshift/terraform-provider-vsphere v1.24.3-openshift/go.mod h1:FgcsrcPpnjLUO4XWpudYiBho9ETIXYRxVXWV7R3Iz6k= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= @@ -1455,7 +1584,9 @@ github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.1.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.6.0/go.mod h1:5N711Q9dKgbdkxHL+MEfF31hpT7l0S0s/t2kKREewys= github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= +github.com/pelletier/go-toml v1.8.0 h1:Keo9qb7iRJs2voHvunFtuuYFsbWeOBh8/P9v/kVMFtw= github.com/pelletier/go-toml v1.8.0/go.mod h1:D6yutnOGMveHEPV7VQOuvI/gXY61bv+9bAOTRnLElKs= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= @@ -1466,6 +1597,8 @@ github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi github.com/pierrec/lz4 v2.2.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.2.6+incompatible h1:6aCX4/YZ9v8q69hTyiR7dNLnTA3fgtKHVVW5BCd5Znw= github.com/pierrec/lz4 v2.2.6+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4 v2.5.2+incompatible h1:WCjObylUIOlKy/+7Abdn34TLIkXiA4UWUMhxq9m9ZXI= +github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pin/tftp v2.1.0+incompatible/go.mod h1:xVpZOMCXTy+A5QMjEVN0Glwa1sUvaJhFXbr/aAxuxGY= github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4 h1:49lOXmGaUpV9Fz3gd7TFZY106KVlPVa5jcYD1gaQf98= github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA= @@ -1547,7 +1680,10 @@ github.com/quasilyte/go-ruleguard v0.2.0/go.mod h1:2RT/tf0Ce0UDj5y243iWKosQogJd8 github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= github.com/quasilyte/regex/syntax v0.0.0-20200805063351-8f842688393c/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ= +github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= +github.com/renier/xmlrpc v0.0.0-20170708154548-ce4a1a486c03/go.mod h1:gRAiPF5C5Nd0eyyRdqIu9qTiFSoZzpTq727b5B8fkkU= github.com/rickb777/date v1.12.5-0.20200422084442-6300e543c4d9 h1:czJCcoUR3FMpHnRQow2E84H/0CPrX1fMAGn9HugzyI4= github.com/rickb777/date v1.12.5-0.20200422084442-6300e543c4d9/go.mod h1:L8WrssTzvgYw34/Ppa0JpJfI7KKXZ2cVGI6Djt0brUU= github.com/rickb777/plural v1.2.0 h1:5tvEc7UBCZ7l8h/2UeybSkt/uu1DQsZFOFdNevmUhlE= @@ -1616,6 +1752,10 @@ github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa/go.mod h1:2 github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/softlayer/softlayer-go v1.0.3 h1:9FONm5xzQ9belQtbdryR6gBg4EF6hX6lrjNKi0IvZkU= +github.com/softlayer/softlayer-go v1.0.3/go.mod h1:6HepcfAXROz0Rf63krk5hPZyHT6qyx2MNvYyHof7ik4= +github.com/softlayer/xmlrpc v0.0.0-20200409220501-5f089df7cb7e h1:3OgWYFw7jxCZPcvAg+4R8A50GZ+CCkARF10lxu2qDsQ= +github.com/softlayer/xmlrpc v0.0.0-20200409220501-5f089df7cb7e/go.mod h1:fKZCUVdirrxrBpwd9wb+lSoVixvpwAu8eHzbQB2tums= github.com/soheilhy/cmux v0.1.3/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/sonatard/noctx v0.0.1/go.mod h1:9D2D/EoULe8Yy2joDHJj7bv3sZoq9AaSb8B4lqBjiZI= @@ -1675,6 +1815,8 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.6.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/svanharmelen/jsonapi v0.0.0-20180618144545-0c0828c3f16d/go.mod h1:BSTlc8jOjh0niykqEGVXOLXdi9o0r0kR8tCYiMvjFgw= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= @@ -1727,6 +1869,7 @@ github.com/ultraware/whitespace v0.0.4/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89 github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/uudashr/gocognit v0.0.0-20190926065955-1655d0de0517/go.mod h1:j44Ayx2KW4+oB6SWMv8KsmHzZrOInQav7D3cQMJ5JUM= github.com/uudashr/gocognit v1.0.1/go.mod h1:j44Ayx2KW4+oB6SWMv8KsmHzZrOInQav7D3cQMJ5JUM= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= @@ -1767,6 +1910,7 @@ github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+ github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= +github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonpointer v0.0.0-20190809123943-df4f5c81cb3b/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= @@ -1813,6 +1957,8 @@ go.mongodb.org/mongo-driver v1.4.2/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4S go.mongodb.org/mongo-driver v1.4.3/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= go.mongodb.org/mongo-driver v1.5.1 h1:9nOVLGDfOaZ9R0tBumx/BcuqkbFpyTCU2r/Po7A2azI= go.mongodb.org/mongo-driver v1.5.1/go.mod h1:gRXCHX4Jo7J0IJ1oDQyUxF7jfy19UfxniMS4xxMmUqw= +go.mongodb.org/mongo-driver v1.5.2 h1:AsxOLoJTgP6YNM0fXWw4OjdluYmWzQYp+lFJL7xu9fU= +go.mongodb.org/mongo-driver v1.5.2/go.mod h1:gRXCHX4Jo7J0IJ1oDQyUxF7jfy19UfxniMS4xxMmUqw= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -1877,7 +2023,9 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191202143827-86a70503ff7e/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200429183012-4b2356b1ed79/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -1887,6 +2035,8 @@ golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83 h1:/ZScEX8SfEmUGRHs0gxpqteO5nfNW6axyZbBdw9A12g= golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2 h1:It14KIkyBFYkHkwZ7k45minvA9aorojkyjGk9KJ5B/w= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1978,6 +2128,8 @@ golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210224082022-3d97a244fca7 h1:OgUuv8lsRpBibGNbSizVwKWlysjaNzmC9gYMhPVfqFM= golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 h1:qWPm9rbaAMKs8Bq/9LRpbMqxWRVUAQwMI9fVrssnTfw= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190130055435-99b60b757ec1/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -2059,6 +2211,7 @@ golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200219091948-cb0a6d8edb6c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2212,6 +2365,7 @@ golang.org/x/tools v0.0.0-20201013201025-64a9e34f3752/go.mod h1:z6u4i615ZeAfBE4X golang.org/x/tools v0.0.0-20201020123448-f5c826d1900e/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210107193943-4ed967dd8eff/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -2258,6 +2412,7 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/cheggaaa/pb.v1 v1.0.27/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= @@ -2269,6 +2424,7 @@ gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8 gopkg.in/go-playground/validator.v9 v9.30.0/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ= gopkg.in/go-playground/validator.v9 v9.31.0 h1:bmXmP2RSNtFES+bn4uYuHT7iJFJv7Vj+an+ZQdDaD1M= gopkg.in/go-playground/validator.v9 v9.31.0/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ= +gopkg.in/h2non/gock.v1 v1.0.15/go.mod h1:sX4zAkdYX1TRGJ2JY156cFspQn4yRWn6p9EMdODlynE= gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= @@ -2276,6 +2432,15 @@ gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.61.0 h1:LBCdW4FmFYL4s/vDZD1RQYX7oAR6IjujCYgMdbHBR10= gopkg.in/ini.v1 v1.61.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/jcmturner/aescts.v1 v1.0.1 h1:cVVZBK2b1zY26haWB4vbBiZrfFQnfbTVrE3xZq6hrEw= +gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo= +gopkg.in/jcmturner/dnsutils.v1 v1.0.1 h1:cIuC1OLRGZrld+16ZJvvZxVJeKPsvd5eUIvxfoN5hSM= +gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q= +gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4= +gopkg.in/jcmturner/gokrb5.v7 v7.5.0 h1:a9tsXlIDD9SKxotJMK3niV7rPZAJeX2aD/0yg3qlIrg= +gopkg.in/jcmturner/gokrb5.v7 v7.5.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= +gopkg.in/jcmturner/rpc.v1 v1.1.0 h1:QHIUxTX1ISuAv9dD2wJ9HWQVuWDX/Zc0PfeC2tjc4rU= +gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8= gopkg.in/ldap.v2 v2.5.1/go.mod h1:oI0cpe/D7HRtBQl8aTg+ZmzFUAvu4lsv3eLXMLGFxWk= gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= @@ -2336,6 +2501,7 @@ k8s.io/api v0.20.0/go.mod h1:HyLC5l5eoS/ygQYl1BXBgFzWNlkHiAuyNAbevIn+FKg= k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= k8s.io/api v0.21.0-beta.1/go.mod h1:8A+GKfJYDnFlmsIqnwi7z2l5+GwI3fbIdAkPu3xiZKA= k8s.io/api v0.21.0-rc.0/go.mod h1:Dkc/ZauWJrgZhjOjeBgW89xZQiTBJA2RaBKYHXPsi2Y= +k8s.io/api v0.21.0/go.mod h1:+YbrhBBGgsxbF6o6Kj4KJPJnBmAKuXDeS3E18bgHNVU= k8s.io/api v0.21.1 h1:94bbZ5NTjdINJEdzOkpS4vdPhkb1VFpTYC9zh43f75c= k8s.io/api v0.21.1/go.mod h1:FstGROTmsSHBarKc8bylzXih8BLNYTiS3TZcsoEDg2s= k8s.io/apiextensions-apiserver v0.0.0-20190918161926-8f644eb6e783/go.mod h1:xvae1SZB3E17UpV59AWc271W/Ph25N+bjPyR63X6tPY= @@ -2366,6 +2532,7 @@ k8s.io/apimachinery v0.20.0/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRp k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.21.0-beta.1/go.mod h1:ZaN7d/yx5I8h2mk8Nu08sdLigsmkt4flkTxCTc9LElI= k8s.io/apimachinery v0.21.0-rc.0/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY= +k8s.io/apimachinery v0.21.0/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY= k8s.io/apimachinery v0.21.1 h1:Q6XuHGlj2xc+hlMCvqyYfbv3H7SRGn2c8NycxJquDVs= k8s.io/apimachinery v0.21.1/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY= k8s.io/apiserver v0.0.0-20190918160949-bfa5e2e684ad/go.mod h1:XPCXEwhjaFN29a8NldXA901ElnKeKLrLtREO9ZhFyhg= @@ -2378,6 +2545,7 @@ k8s.io/apiserver v0.19.2/go.mod h1:FreAq0bJ2vtZFj9Ago/X0oNGC51GfubKK/ViOKfVAOA= k8s.io/apiserver v0.20.0/go.mod h1:6gRIWiOkvGvQt12WTYmsiYoUyYW0FXSiMdNl4m+sxY8= k8s.io/apiserver v0.21.0-beta.1/go.mod h1:nl/H4DPS1abtRhCj8bhosbyU9XOgnMt0QFK3fAFEhSE= k8s.io/apiserver v0.21.0-rc.0/go.mod h1:QlW7+1CZTZtAcKvJ34/n4DIb8sC93FeQpkd1KSU+Sok= +k8s.io/apiserver v0.21.0/go.mod h1:w2YSn4/WIwYuxG5zJmcqtRdtqgW/J2JRgFAqps3bBpg= k8s.io/cli-runtime v0.21.0-rc.0 h1:M2cLtWR/LvuSSK8uhdTppcjyd/tqi3Dz5ZiOVfrhDHE= k8s.io/cli-runtime v0.21.0-rc.0/go.mod h1:Gp9Njd5Z9IayPHKL8AKVVMrdzHrMHN+WnGMHYC7Lsds= k8s.io/client-go v0.21.0-rc.0 h1:lsPZHT1ZniXJcwg2udlaTOhAT8wf7BE0rn9Vj0+LWMA= @@ -2397,6 +2565,7 @@ k8s.io/code-generator v0.19.2/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZ k8s.io/code-generator v0.20.0/go.mod h1:UsqdF+VX4PU2g46NC2JRs4gc+IfrctnwHb76RNbWHJg= k8s.io/code-generator v0.21.0-beta.1/go.mod h1:IpCUojpiKp25KNB3/UbEeElznqpQUMvhAOUoC7AbISY= k8s.io/code-generator v0.21.0-rc.0/go.mod h1:hUlps5+9QaTrKx+jiM4rmq7YmH8wPOIko64uZCHDh6Q= +k8s.io/code-generator v0.21.0/go.mod h1:hUlps5+9QaTrKx+jiM4rmq7YmH8wPOIko64uZCHDh6Q= k8s.io/code-generator v0.21.1/go.mod h1:hUlps5+9QaTrKx+jiM4rmq7YmH8wPOIko64uZCHDh6Q= k8s.io/component-base v0.0.0-20190918160511-547f6c5d7090/go.mod h1:933PBGtQFJky3TEwYx4aEPZ4IxqhWh3R6DCmzqIn1hA= k8s.io/component-base v0.16.4/go.mod h1:GYQ+4hlkEwdlpAp59Ztc4gYuFhdoZqiAJD1unYDJ3FM= @@ -2409,6 +2578,8 @@ k8s.io/component-base v0.20.0/go.mod h1:wKPj+RHnAr8LW2EIBIK7AxOHPde4gme2lzXwVSoR k8s.io/component-base v0.21.0-beta.1/go.mod h1:WPMZyV0sNk3ruzA8cWt1EO2KWAnLDK2docEC14JWbTM= k8s.io/component-base v0.21.0-rc.0 h1:8YgFPDsIhRx7zCOxikZn77nYRnwxrc9aMiuQDJtK1+g= k8s.io/component-base v0.21.0-rc.0/go.mod h1:XlP0bM7QJFWRGZYPc5NmphkvsYQ+o7804HWH3GTGjDY= +k8s.io/component-base v0.21.0 h1:tLLGp4BBjQaCpS/KiuWh7m2xqvAdsxLm4ATxHSe5Zpg= +k8s.io/component-base v0.21.0/go.mod h1:qvtjz6X0USWXbgmbfXR+Agik4RZ3jv2Bgr5QnZzdPYw= k8s.io/component-helpers v0.21.0-rc.0/go.mod h1:A8RfzdPEZoysfI5C0RExdjy7Zy/RZET3tcUmt7BMWdc= k8s.io/gengo v0.0.0-20181106084056-51747d6e00da/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= @@ -2440,6 +2611,8 @@ k8s.io/kube-aggregator v0.19.1/go.mod h1:oAj1kWeSDCh7sdzUOs6XXPn/jbzJY+yGGxDd0Qy k8s.io/kube-aggregator v0.19.2/go.mod h1:wVsjy6OTeUrWkgG9WVsGftnjpm8JIY0vJV7LH2j4nhM= k8s.io/kube-aggregator v0.20.0 h1:hXjICaaB1d1vRFGTNbOd0Agdn56rihxeGvS8zpuoWuE= k8s.io/kube-aggregator v0.20.0/go.mod h1:3Is/gzzWmhhG/rA3CpA1+eVye87lreBQDFGcAGT7gzo= +k8s.io/kube-aggregator v0.21.0-rc.0 h1:PxnBqTgEQHCOhWl3J6EX2OKbfx0epwgKF4phlhgNyFA= +k8s.io/kube-aggregator v0.21.0-rc.0/go.mod h1:M+whOmsAeQf8ObJ0/eO9Af1Dz2UQEB9OW9BWmt9b2sU= k8s.io/kube-openapi v0.0.0-20181114233023-0317810137be/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= k8s.io/kube-openapi v0.0.0-20190709113604-33be087ad058/go.mod h1:nfDlWeOsu3pUf4yWGL+ERqohP4YsZcBJXWMK+gkzOA4= @@ -2518,6 +2691,8 @@ sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.1.0 h1:C4r9BgJ98vrKnnVCjwCSXcWjWe0NKcUQkmzDXZXGwH8= sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.1.1 h1:nYqY2A6oy37sKLYuSBXuQhbj4JVclzJK13BOIvJG5XU= +sigs.k8s.io/structured-merge-diff/v4 v4.1.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/pkg/asset/cluster/tfvars.go b/pkg/asset/cluster/tfvars.go index 1b6232f5c7e..448a963f771 100644 --- a/pkg/asset/cluster/tfvars.go +++ b/pkg/asset/cluster/tfvars.go @@ -41,6 +41,7 @@ import ( azuretfvars "github.com/openshift/installer/pkg/tfvars/azure" baremetaltfvars "github.com/openshift/installer/pkg/tfvars/baremetal" gcptfvars "github.com/openshift/installer/pkg/tfvars/gcp" + ibmcloudtfvars "github.com/openshift/installer/pkg/tfvars/ibmcloud" kubevirttfvars "github.com/openshift/installer/pkg/tfvars/kubevirt" libvirttfvars "github.com/openshift/installer/pkg/tfvars/libvirt" openstacktfvars "github.com/openshift/installer/pkg/tfvars/openstack" @@ -51,6 +52,7 @@ import ( "github.com/openshift/installer/pkg/types/azure" "github.com/openshift/installer/pkg/types/baremetal" "github.com/openshift/installer/pkg/types/gcp" + "github.com/openshift/installer/pkg/types/ibmcloud" "github.com/openshift/installer/pkg/types/kubevirt" "github.com/openshift/installer/pkg/types/libvirt" "github.com/openshift/installer/pkg/types/none" @@ -400,6 +402,66 @@ func (t *TerraformVariables) Generate(parents asset.Parents) error { Filename: fmt.Sprintf(TfPlatformVarsFileName, platform), Data: data, }) + case ibmcloud.Name: + client, err := installConfig.IBMCloud.Client() + if err != nil { + return err + } + auth := ibmcloudtfvars.Auth{ + APIKey: client.Authenticator.ApiKey, + } + + // TODO: IBM: Get master and worker machine info + // masters, err := mastersAsset.Machines() + // if err != nil { + // return err + // } + // masterConfigs := make([]*ibmcloudprovider.IBMCloudMachineProviderSpec, len(masters)) + // for i, m := range masters { + // masterConfigs[i] = m.Spec.ProviderSpec.Value.Object.(*ibmcloudprovider.IBMCloudMachineProviderSpec) + // } + // workers, err := workersAsset.MachineSets() + // if err != nil { + // return err + // } + // workerConfigs := make([]*ibmcloudprovider.IBMCloudMachineProviderSpec, len(workers)) + // for i, w := range workers { + // workerConfigs[i] = w.Spec.Template.Spec.ProviderSpec.Value.Object.(*ibmcloudprovider.IBMCloudMachineProviderSpec) + // } + + // TODO: IBM: Fetch config from masterConfig instead + zones, err := client.GetVPCZonesForRegion(ctx, installConfig.Config.Platform.IBMCloud.Region) + if err != nil { + return err + } + + // Get CISInstanceCRN from InstallConfig metadata + crn, err := installConfig.IBMCloud.CISInstanceCRN(ctx) + if err != nil { + return err + } + + data, err = ibmcloudtfvars.TFVars( + ibmcloudtfvars.TFVarsSources{ + Auth: auth, + CISInstanceCRN: crn, + PublishStrategy: installConfig.Config.Publish, + ResourceGroupName: installConfig.Config.Platform.IBMCloud.ResourceGroupName, + + // TODO: IBM: Fetch config from masterConfig instead + Region: installConfig.Config.Platform.IBMCloud.Region, + MachineType: "bx2d-4x16", + MasterAvailabilityZones: zones, + ImageURL: string(*rhcosImage), + }, + ) + if err != nil { + return errors.Wrapf(err, "failed to get %s Terraform variables", platform) + } + t.FileList = append(t.FileList, &asset.File{ + Filename: fmt.Sprintf(TfPlatformVarsFileName, platform), + Data: data, + }) case libvirt.Name: masters, err := mastersAsset.Machines() if err != nil { diff --git a/pkg/asset/installconfig/ibmcloud/client.go b/pkg/asset/installconfig/ibmcloud/client.go index 174e4a1cabb..8fd125aad28 100644 --- a/pkg/asset/installconfig/ibmcloud/client.go +++ b/pkg/asset/installconfig/ibmcloud/client.go @@ -9,6 +9,7 @@ import ( "github.com/IBM/go-sdk-core/v5/core" "github.com/IBM/networking-go-sdk/zonesv1" + "github.com/IBM/platform-services-go-sdk/iamidentityv1" "github.com/IBM/platform-services-go-sdk/resourcecontrollerv2" "github.com/IBM/platform-services-go-sdk/resourcemanagerv2" "github.com/IBM/vpc-go-sdk/vpcv1" @@ -19,9 +20,8 @@ import ( // API represents the calls made to the API. type API interface { + GetAuthenticatorAPIKeyDetails(ctx context.Context) (*iamidentityv1.APIKey, error) GetCISInstance(ctx context.Context, crnstr string) (*resourcecontrollerv2.ResourceInstance, error) - GetCustomImageByName(ctx context.Context, imageName string, region string) (*vpcv1.Image, error) - GetCustomImages(ctx context.Context, region string) ([]vpcv1.Image, error) GetDNSZones(ctx context.Context) ([]DNSZoneResponse, error) GetEncryptionKey(ctx context.Context, keyCRN string) (*EncryptionKeyResponse, error) GetResourceGroups(ctx context.Context) ([]resourcemanagerv2.ResourceGroup, error) @@ -38,7 +38,7 @@ type Client struct { managementAPI *resourcemanagerv2.ResourceManagerV2 controllerAPI *resourcecontrollerv2.ResourceControllerV2 vpcAPI *vpcv1.VpcV1 - authenticator *core.IamAuthenticator + Authenticator *core.IamAuthenticator } // cisServiceID is the Cloud Internet Services' catalog service ID. @@ -83,7 +83,7 @@ func NewClient() (*Client, error) { } client := &Client{ - authenticator: authenticator, + Authenticator: authenticator, } if err := client.loadSDKServices(); err != nil { @@ -110,6 +110,22 @@ func (c *Client) loadSDKServices() error { return nil } +// GetAuthenticatorAPIKeyDetails gets detailed information on the API key used +// for authentication to the IBM Cloud APIs +func (c *Client) GetAuthenticatorAPIKeyDetails(ctx context.Context) (*iamidentityv1.APIKey, error) { + iamIdentityService, err := iamidentityv1.NewIamIdentityV1(&iamidentityv1.IamIdentityV1Options{ + Authenticator: c.Authenticator, + }) + + options := iamIdentityService.NewGetAPIKeysDetailsOptions() + options.SetIamAPIKey(c.Authenticator.ApiKey) + details, _, err := iamIdentityService.GetAPIKeysDetailsWithContext(ctx, options) + if err != nil { + return nil, err + } + return details, nil +} + // GetCISInstance gets a specific Cloud Internet Services instance by its CRN. func (c *Client) GetCISInstance(ctx context.Context, crnstr string) (*resourcecontrollerv2.ResourceInstance, error) { _, cancel := context.WithTimeout(ctx, 1*time.Minute) @@ -141,7 +157,7 @@ func (c *Client) GetDNSZones(ctx context.Context) ([]DNSZoneResponse, error) { for _, instance := range listResourceInstancesResponse.Resources { crnstr := instance.CRN zonesService, err := zonesv1.NewZonesV1(&zonesv1.ZonesV1Options{ - Authenticator: c.authenticator, + Authenticator: c.Authenticator, Crn: crnstr, }) if err != nil { @@ -149,7 +165,11 @@ func (c *Client) GetDNSZones(ctx context.Context) ([]DNSZoneResponse, error) { } options := zonesService.NewListZonesOptions() - listZonesResponse, _, _ := zonesService.ListZones(options) + listZonesResponse, _, err := zonesService.ListZones(options) + + if listZonesResponse == nil { + return nil, err + } for _, zone := range listZonesResponse.Result { if *zone.Status == "active" { @@ -236,48 +256,6 @@ func (c *Client) GetSubnet(ctx context.Context, subnetID string) (*vpcv1.Subnet, return subnet, err } -// GetCustomImages gets a list of custom images within a region. If the image -// status is not "available" it is omitted. -func (c *Client) GetCustomImages(ctx context.Context, region string) ([]vpcv1.Image, error) { - _, cancel := context.WithTimeout(ctx, 1*time.Minute) - defer cancel() - - vpcRegion, err := c.getVPCRegionByName(ctx, region) - if err != nil { - return nil, err - } - - var images []vpcv1.Image - privateImages, err := c.listPrivateImagesForRegion(ctx, *vpcRegion) - if err != nil { - return nil, err - } - for _, image := range privateImages { - if *image.Status == vpcv1.ImageStatusAvailableConst { - images = append(images, image) - } - } - return images, nil -} - -// GetCustomImageByName gets a custom image using its name and region. -func (c *Client) GetCustomImageByName(ctx context.Context, imageName string, region string) (*vpcv1.Image, error) { - _, cancel := context.WithTimeout(ctx, 1*time.Minute) - defer cancel() - - customImages, err := c.GetCustomImages(ctx, region) - if err != nil { - return nil, err - } - - for _, image := range customImages { - if *image.Name == imageName && *image.Status == vpcv1.ImageStatusAvailableConst { - return &image, nil - } - } - return nil, fmt.Errorf("image %q not found", imageName) -} - // GetVSIProfiles gets a list of all VSI profiles. func (c *Client) GetVSIProfiles(ctx context.Context) ([]vpcv1.InstanceProfile, error) { listInstanceProfilesOptions := c.vpcAPI.NewListInstanceProfilesOptions() @@ -331,28 +309,6 @@ func (c *Client) GetVPCZonesForRegion(ctx context.Context, region string) ([]str return response, err } -func (c *Client) getVPCRegionByName(ctx context.Context, regionName string) (*vpcv1.Region, error) { - region, _, err := c.vpcAPI.GetRegionWithContext(ctx, c.vpcAPI.NewGetRegionOptions(regionName)) - return region, err -} - -func (c *Client) listPrivateImagesForRegion(ctx context.Context, region vpcv1.Region) ([]vpcv1.Image, error) { - listImageOptions := c.vpcAPI.NewListImagesOptions() - listImageOptions.SetVisibility(vpcv1.ImageVisibilityPrivateConst) - - err := c.vpcAPI.SetServiceURL(fmt.Sprintf("%s/v1", *region.Endpoint)) - if err != nil { - return nil, errors.Wrap(err, "failed to set vpc api service url") - } - - listImagesResponse, _, err := c.vpcAPI.ListImagesWithContext(ctx, listImageOptions) - if err != nil { - return nil, errors.Wrap(err, "failed to list vpc images") - } - - return listImagesResponse.Images, nil -} - func (c *Client) getVPCRegions(ctx context.Context) ([]vpcv1.Region, error) { listRegionsOptions := c.vpcAPI.NewListRegionsOptions() listRegionsResponse, _, err := c.vpcAPI.ListRegionsWithContext(ctx, listRegionsOptions) @@ -365,7 +321,7 @@ func (c *Client) getVPCRegions(ctx context.Context) ([]vpcv1.Region, error) { func (c *Client) loadResourceManagementAPI() error { options := &resourcemanagerv2.ResourceManagerV2Options{ - Authenticator: c.authenticator, + Authenticator: c.Authenticator, } resourceManagerV2Service, err := resourcemanagerv2.NewResourceManagerV2(options) if err != nil { @@ -377,7 +333,7 @@ func (c *Client) loadResourceManagementAPI() error { func (c *Client) loadResourceControllerAPI() error { options := &resourcecontrollerv2.ResourceControllerV2Options{ - Authenticator: c.authenticator, + Authenticator: c.Authenticator, } resourceControllerV2Service, err := resourcecontrollerv2.NewResourceControllerV2(options) if err != nil { @@ -389,7 +345,7 @@ func (c *Client) loadResourceControllerAPI() error { func (c *Client) loadVPCV1API() error { vpcService, err := vpcv1.NewVpcV1(&vpcv1.VpcV1Options{ - Authenticator: c.authenticator, + Authenticator: c.Authenticator, }) if err != nil { return err diff --git a/pkg/asset/installconfig/ibmcloud/ibmcloud.go b/pkg/asset/installconfig/ibmcloud/ibmcloud.go index 7b59c27bf89..be5019a32b6 100644 --- a/pkg/asset/installconfig/ibmcloud/ibmcloud.go +++ b/pkg/asset/installconfig/ibmcloud/ibmcloud.go @@ -34,15 +34,9 @@ func Platform() (*ibmcloud.Platform, error) { return nil, err } - clusterOSImage, err := selectClusterOSImage(context.TODO(), client, region) - if err != nil { - return nil, err - } - return &ibmcloud.Platform{ ResourceGroupName: resourceGroup, Region: region, - ClusterOSImage: clusterOSImage, }, nil } @@ -126,46 +120,3 @@ func selectRegion() (string, error) { } return selectedRegion, nil } - -func selectClusterOSImage(ctx context.Context, client *Client, region string) (string, error) { - customImages, err := client.GetCustomImages(ctx, region) - if err != nil { - return "", err - } - - if len(customImages) == 0 { - return "", errors.New("could not find custom RHCOS image") - } - - var customImageOptions []string - for _, image := range customImages { - customImageOptions = append(customImageOptions, *image.Name) - } - - sort.Strings(customImageOptions) - - var selectedImage string - err = survey.Ask([]*survey.Question{ - { - Prompt: &survey.Select{ - Message: "RHCOS Custom Image", - Help: "The custom RHCOS image to use for machines.", - Options: customImageOptions, - Default: customImageOptions[0], - }, - Validate: survey.ComposeValidators(survey.Required, func(ans interface{}) error { - choice := ans.(core.OptionAnswer).Value - i := sort.SearchStrings(customImageOptions, choice) - if i == len(customImageOptions) || customImageOptions[i] != choice { - return errors.Errorf("invalid image %q", choice) - } - return nil - }), - }, - }, &selectedImage) - if err != nil { - return "", err - } - - return selectedImage, nil -} diff --git a/pkg/asset/installconfig/ibmcloud/metadata.go b/pkg/asset/installconfig/ibmcloud/metadata.go new file mode 100644 index 00000000000..df0f9514c6d --- /dev/null +++ b/pkg/asset/installconfig/ibmcloud/metadata.go @@ -0,0 +1,87 @@ +package ibmcloud + +import ( + "context" + "fmt" + "sync" +) + +// Metadata holds additional metadata for InstallConfig resources that +// does not need to be user-supplied (e.g. because it can be retrieved +// from external APIs). +type Metadata struct { + BaseDomain string + + accountID string + cisInstanceCRN string + client *Client + + mutex sync.Mutex +} + +// NewMetadata initializes a new Metadata object. +func NewMetadata(baseDomain string) *Metadata { + return &Metadata{BaseDomain: baseDomain} +} + +// AccountID returns the IBM Cloud account ID associated with the authentication +// credentials. +func (m *Metadata) AccountID(ctx context.Context) (string, error) { + m.mutex.Lock() + defer m.mutex.Unlock() + + if m.accountID == "" { + client, err := m.Client() + if err != nil { + return "", err + } + + apiKeyDetails, err := client.GetAuthenticatorAPIKeyDetails(ctx) + if err != nil { + return "", err + } + + m.accountID = *apiKeyDetails.AccountID + } + return m.accountID, nil +} + +// CISInstanceCRN returns the Cloud Internet Services instance CRN that is +// managing the DNS zone for the base domain. +func (m *Metadata) CISInstanceCRN(ctx context.Context) (string, error) { + m.mutex.Lock() + defer m.mutex.Unlock() + + if m.cisInstanceCRN == "" { + client, err := m.Client() + if err != nil { + return "", err + } + + zones, err := client.GetDNSZones(ctx) + if err != nil { + return "", err + } + + for _, z := range zones { + if z.Name == m.BaseDomain { + m.cisInstanceCRN = z.CISInstanceCRN + return m.cisInstanceCRN, nil + } + } + return "", fmt.Errorf("cisInstanceCRN unknown due to DNS zone %q not found", m.BaseDomain) + } + return m.cisInstanceCRN, nil +} + +// Client returns a client used for making API calls to IBM Cloud services. +func (m *Metadata) Client() (*Client, error) { + if m.client == nil { + client, err := NewClient() + if err != nil { + return nil, err + } + m.client = client + } + return m.client, nil +} diff --git a/pkg/asset/installconfig/ibmcloud/mock/ibmcloudclient_generated.go b/pkg/asset/installconfig/ibmcloud/mock/ibmcloudclient_generated.go index f9910272544..d4ee750a194 100644 --- a/pkg/asset/installconfig/ibmcloud/mock/ibmcloudclient_generated.go +++ b/pkg/asset/installconfig/ibmcloud/mock/ibmcloudclient_generated.go @@ -6,6 +6,7 @@ package mock import ( context "context" + iamidentityv1 "github.com/IBM/platform-services-go-sdk/iamidentityv1" resourcecontrollerv2 "github.com/IBM/platform-services-go-sdk/resourcecontrollerv2" resourcemanagerv2 "github.com/IBM/platform-services-go-sdk/resourcemanagerv2" vpcv1 "github.com/IBM/vpc-go-sdk/vpcv1" @@ -37,49 +38,34 @@ func (m *MockAPI) EXPECT() *MockAPIMockRecorder { return m.recorder } -// GetCISInstance mocks base method -func (m *MockAPI) GetCISInstance(ctx context.Context, crnstr string) (*resourcecontrollerv2.ResourceInstance, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetCISInstance", ctx, crnstr) - ret0, _ := ret[0].(*resourcecontrollerv2.ResourceInstance) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetCISInstance indicates an expected call of GetCISInstance -func (mr *MockAPIMockRecorder) GetCISInstance(ctx, crnstr interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCISInstance", reflect.TypeOf((*MockAPI)(nil).GetCISInstance), ctx, crnstr) -} - -// GetCustomImageByName mocks base method -func (m *MockAPI) GetCustomImageByName(ctx context.Context, imageName, region string) (*vpcv1.Image, error) { +// GetAuthenticatorAPIKeyDetails mocks base method +func (m *MockAPI) GetAuthenticatorAPIKeyDetails(ctx context.Context) (*iamidentityv1.APIKey, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetCustomImageByName", ctx, imageName, region) - ret0, _ := ret[0].(*vpcv1.Image) + ret := m.ctrl.Call(m, "GetAuthenticatorAPIKeyDetails", ctx) + ret0, _ := ret[0].(*iamidentityv1.APIKey) ret1, _ := ret[1].(error) return ret0, ret1 } -// GetCustomImageByName indicates an expected call of GetCustomImageByName -func (mr *MockAPIMockRecorder) GetCustomImageByName(ctx, imageName, region interface{}) *gomock.Call { +// GetAuthenticatorAPIKeyDetails indicates an expected call of GetAuthenticatorAPIKeyDetails +func (mr *MockAPIMockRecorder) GetAuthenticatorAPIKeyDetails(ctx interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCustomImageByName", reflect.TypeOf((*MockAPI)(nil).GetCustomImageByName), ctx, imageName, region) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAuthenticatorAPIKeyDetails", reflect.TypeOf((*MockAPI)(nil).GetAuthenticatorAPIKeyDetails), ctx) } -// GetCustomImages mocks base method -func (m *MockAPI) GetCustomImages(ctx context.Context, region string) ([]vpcv1.Image, error) { +// GetCISInstance mocks base method +func (m *MockAPI) GetCISInstance(ctx context.Context, crnstr string) (*resourcecontrollerv2.ResourceInstance, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetCustomImages", ctx, region) - ret0, _ := ret[0].([]vpcv1.Image) + ret := m.ctrl.Call(m, "GetCISInstance", ctx, crnstr) + ret0, _ := ret[0].(*resourcecontrollerv2.ResourceInstance) ret1, _ := ret[1].(error) return ret0, ret1 } -// GetCustomImages indicates an expected call of GetCustomImages -func (mr *MockAPIMockRecorder) GetCustomImages(ctx, region interface{}) *gomock.Call { +// GetCISInstance indicates an expected call of GetCISInstance +func (mr *MockAPIMockRecorder) GetCISInstance(ctx, crnstr interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCustomImages", reflect.TypeOf((*MockAPI)(nil).GetCustomImages), ctx, region) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCISInstance", reflect.TypeOf((*MockAPI)(nil).GetCISInstance), ctx, crnstr) } // GetDNSZones mocks base method diff --git a/pkg/asset/installconfig/ibmcloud/validation.go b/pkg/asset/installconfig/ibmcloud/validation.go index 09d862ac061..06b5d2e620a 100644 --- a/pkg/asset/installconfig/ibmcloud/validation.go +++ b/pkg/asset/installconfig/ibmcloud/validation.go @@ -37,9 +37,6 @@ func Validate(client API, ic *types.InstallConfig) error { func validatePlatform(client API, ic *types.InstallConfig, path *field.Path) field.ErrorList { allErrs := field.ErrorList{} - allErrs = append(allErrs, validateClusterOSImage(client, ic.Platform.IBMCloud.ClusterOSImage, ic.Platform.IBMCloud.Region, path)...) - allErrs = append(allErrs, validateResourceGroup(client, ic, path)...) - if ic.Platform.IBMCloud.ResourceGroupName != "" { allErrs = append(allErrs, validateResourceGroup(client, ic, path)...) } @@ -148,15 +145,6 @@ func validateResourceGroup(client API, ic *types.InstallConfig, path *field.Path return allErrs } -func validateClusterOSImage(client API, imageName string, region string, path *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - customImage, _ := client.GetCustomImageByName(context.TODO(), imageName, region) - if customImage == nil { - allErrs = append(allErrs, field.NotFound(path.Child("clusterOSImage"), imageName)) - } - return allErrs -} - func validateNetworking(client API, ic *types.InstallConfig, path *field.Path) field.ErrorList { allErrs := field.ErrorList{} platform := ic.Platform.IBMCloud @@ -187,7 +175,7 @@ func validateSubnets(client API, ic *types.InstallConfig, subnets []string, path allErrs = append(allErrs, validateSubnetZone(client, subnet, validZones, subnetPath)...) } - // TODO: IBM: additional subnet validation + // TODO: IBM[#80]: additional subnet validation return allErrs } diff --git a/pkg/asset/installconfig/ibmcloud/validation_test.go b/pkg/asset/installconfig/ibmcloud/validation_test.go index defcbf15013..76f60ba7880 100644 --- a/pkg/asset/installconfig/ibmcloud/validation_test.go +++ b/pkg/asset/installconfig/ibmcloud/validation_test.go @@ -19,7 +19,6 @@ type editFunctions []func(ic *types.InstallConfig) var ( validRegion = "us-south" validCIDR = "10.0.0.0/16" - validClusterOSImage = "valid-rhcos-image" validDNSZoneID = "valid-zone-id" validBaseDomain = "valid.base.domain" validVPC = "valid-vpc" @@ -37,10 +36,8 @@ var ( validInstanceProfies = []vpcv1.InstanceProfile{{Name: &[]string{"type-a"}[0]}, {Name: &[]string{"type-b"}[0]}} - notFoundBaseDomain = func(ic *types.InstallConfig) { ic.BaseDomain = "notfound.base.domain" } - notFoundInRegionClusterOSImage = func(ic *types.InstallConfig) { ic.IBMCloud.Region = "us-east" } - notFoundClusterOSImage = func(ic *types.InstallConfig) { ic.IBMCloud.ClusterOSImage = "not-found" } - validVPCConfig = func(ic *types.InstallConfig) { + notFoundBaseDomain = func(ic *types.InstallConfig) { ic.BaseDomain = "notfound.base.domain" } + validVPCConfig = func(ic *types.InstallConfig) { ic.IBMCloud.VPC = validVPC ic.IBMCloud.Subnets = validSubnets } @@ -92,8 +89,7 @@ func validInstallConfig() *types.InstallConfig { func validMinimalPlatform() *ibmcloudtypes.Platform { return &ibmcloudtypes.Platform{ - Region: validRegion, - ClusterOSImage: validClusterOSImage, + Region: validRegion, } } @@ -112,16 +108,6 @@ func TestValidate(t *testing.T) { edits: editFunctions{}, errorMsg: "", }, - { - name: "not found clusterOSImage in region", - edits: editFunctions{notFoundInRegionClusterOSImage}, - errorMsg: `^platform\.ibmcloud\.clusterOSImage: Not found: "valid-rhcos-image"$`, - }, - { - name: "not found clusterOSImage", - edits: editFunctions{notFoundClusterOSImage}, - errorMsg: `^platform\.ibmcloud\.clusterOSImage: Not found: "not-found"$`, - }, { name: "valid vpc config", edits: editFunctions{validVPCConfig}, @@ -147,6 +133,11 @@ func TestValidate(t *testing.T) { edits: editFunctions{validVPCConfig, machinePoolInvalidType}, errorMsg: `^\QcontrolPlane.platform.ibmcloud.type: Not found: "invalid-type"\E$`, }, + { + name: "machine pool invalid type", + edits: editFunctions{validVPCConfig, machinePoolInvalidType}, + errorMsg: `^\QcontrolPlane.platform.ibmcloud.type: Not found: "invalid-type"\E$`, + }, } mockCtrl := gomock.NewController(t) @@ -154,10 +145,6 @@ func TestValidate(t *testing.T) { ibmcloudClient := mock.NewMockAPI(mockCtrl) - ibmcloudClient.EXPECT().GetCustomImageByName(gomock.Any(), validClusterOSImage, validRegion).Return(&vpcv1.Image{}, nil).AnyTimes() - ibmcloudClient.EXPECT().GetCustomImageByName(gomock.Any(), validClusterOSImage, gomock.Not(validRegion)).Return(nil, fmt.Errorf("")).AnyTimes() - ibmcloudClient.EXPECT().GetCustomImageByName(gomock.Any(), gomock.Not(validClusterOSImage), validRegion).Return(nil, fmt.Errorf("")).AnyTimes() - ibmcloudClient.EXPECT().GetVPC(gomock.Any(), validVPC).Return(&vpcv1.VPC{}, nil).AnyTimes() ibmcloudClient.EXPECT().GetVPC(gomock.Any(), "not-found").Return(nil, &ibmcloud.VPCResourceNotFoundError{}) ibmcloudClient.EXPECT().GetVPC(gomock.Any(), "internal-error-vpc").Return(nil, fmt.Errorf("")) diff --git a/pkg/asset/installconfig/installconfig.go b/pkg/asset/installconfig/installconfig.go index d9c9461b01b..100862595cc 100644 --- a/pkg/asset/installconfig/installconfig.go +++ b/pkg/asset/installconfig/installconfig.go @@ -30,10 +30,11 @@ const ( // InstallConfig generates the install-config.yaml file. type InstallConfig struct { - Config *types.InstallConfig `json:"config"` - File *asset.File `json:"file"` - AWS *aws.Metadata `json:"aws,omitempty"` - Azure *icazure.Metadata `json:"azure,omitempty"` + Config *types.InstallConfig `json:"config"` + File *asset.File `json:"file"` + AWS *aws.Metadata `json:"aws,omitempty"` + Azure *icazure.Metadata `json:"azure,omitempty"` + IBMCloud *icibmcloud.Metadata `json:"ibmcloud,omitempty"` } var _ asset.WritableAsset = (*InstallConfig)(nil) @@ -148,6 +149,9 @@ func (a *InstallConfig) finish(filename string) error { if a.Config.Azure != nil { a.Azure = icazure.NewMetadata(a.Config.Azure.CloudName, a.Config.Azure.ARMEndpoint) } + if a.Config.IBMCloud != nil { + a.IBMCloud = icibmcloud.NewMetadata(a.Config.BaseDomain) + } if err := validation.ValidateInstallConfig(a.Config).ToAggregate(); err != nil { if filename == "" { return errors.Wrap(err, "invalid install config") diff --git a/pkg/asset/installconfig/platformpermscheck.go b/pkg/asset/installconfig/platformpermscheck.go index aeaed949212..8939158bd67 100644 --- a/pkg/asset/installconfig/platformpermscheck.go +++ b/pkg/asset/installconfig/platformpermscheck.go @@ -91,8 +91,7 @@ func (a *PlatformPermsCheck) Generate(dependencies asset.Parents) error { return errors.Wrap(err, "failed to validate services in this project") } case ibmcloud.Name: - // no permissions to check - // TODO: IBM: verify there are none + // TODO: IBM[#90]: platformpermscheck case kubevirt.Name: client, err := kubevirtconfig.NewClient() if err != nil { diff --git a/pkg/asset/installconfig/platformprovisioncheck.go b/pkg/asset/installconfig/platformprovisioncheck.go index ee5fa5c84a1..29a028035d9 100644 --- a/pkg/asset/installconfig/platformprovisioncheck.go +++ b/pkg/asset/installconfig/platformprovisioncheck.go @@ -82,7 +82,7 @@ func (a *PlatformProvisionCheck) Generate(dependencies asset.Parents) error { return err } case ibmcloud.Name: - // TODO: IBM: add validation for provisioning + // TODO: IBM[#91]: add validation for provisioning case openstack.Name: err = osconfig.ValidateForProvisioning(ic.Config) if err != nil { diff --git a/pkg/asset/machines/ibmcloud/machines.go b/pkg/asset/machines/ibmcloud/machines.go new file mode 100644 index 00000000000..f64113b7acf --- /dev/null +++ b/pkg/asset/machines/ibmcloud/machines.go @@ -0,0 +1,143 @@ +package ibmcloud + +import ( + "fmt" + + ibmcloudprovider "github.com/openshift/cluster-api-provider-ibmcloud/pkg/apis/ibmcloudprovider/v1beta1" + machineapi "github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + + "github.com/openshift/installer/pkg/types" + "github.com/openshift/installer/pkg/types/ibmcloud" +) + +// Machines returns a list of machines for a machinepool. +func Machines(clusterID string, config *types.InstallConfig, pool *types.MachinePool, role, userDataSecret string) ([]machineapi.Machine, error) { + if configPlatform := config.Platform.Name(); configPlatform != ibmcloud.Name { + return nil, fmt.Errorf("non-IBMCloud configuration: %q", configPlatform) + } + if poolPlatform := pool.Platform.Name(); poolPlatform != ibmcloud.Name { + return nil, fmt.Errorf("non-IBMCloud machine-pool: %q", poolPlatform) + } + platform := config.Platform.IBMCloud + mpool := pool.Platform.IBMCloud + azs := mpool.Zones + + total := int64(1) + if pool.Replicas != nil { + total = *pool.Replicas + } + + var machines []machineapi.Machine + for idx := int64(0); idx < total; idx++ { + azIndex := int(idx) % len(azs) + provider, err := provider(clusterID, platform, mpool, azIndex, role, userDataSecret) + if err != nil { + return nil, errors.Wrap(err, "failed to create provider") + } + machine := machineapi.Machine{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "machine.openshift.io/v1beta1", + Kind: "Machine", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "openshift-machine-api", + Name: fmt.Sprintf("%s-%s-%d", clusterID, pool.Name, idx), + Labels: map[string]string{ + "machine.openshift.io/cluster-api-cluster": clusterID, + "machine.openshift.io/cluster-api-machine-role": role, + "machine.openshift.io/cluster-api-machine-type": role, + }, + }, + Spec: machineapi.MachineSpec{ + ProviderSpec: machineapi.ProviderSpec{ + Value: &runtime.RawExtension{Object: provider}, + }, + }, + } + + machines = append(machines, machine) + } + + return machines, nil +} + +func provider(clusterID string, + platform *ibmcloud.Platform, + mpool *ibmcloud.MachinePool, + azIdx int, + role string, + userDataSecret string, +) (*ibmcloudprovider.IBMCloudMachineProviderSpec, error) { + az := mpool.Zones[azIdx] + + var vpc string + if platform.VPC != "" { + vpc = platform.VPC + } else { + vpc = fmt.Sprintf("%s-vpc", clusterID) + } + + var resourceGroup string + if platform.ResourceGroupName != "" { + resourceGroup = platform.ResourceGroupName + } else { + resourceGroup = clusterID + } + + subnet, err := getSubnetName(clusterID, role, az) + if err != nil { + return nil, err + } + + securityGroup, err := getSecurityGroupName(clusterID, role) + if err != nil { + return nil, err + } + + return &ibmcloudprovider.IBMCloudMachineProviderSpec{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "ibmcloudproviderconfig.openshift.io/v1beta1", + Kind: "IBMCloudMachineProviderSpec", + }, + VPC: vpc, + Tags: []ibmcloudprovider.TagSpecs{}, + Image: fmt.Sprintf("%s-rhcos", clusterID), + Profile: mpool.InstanceType, + Region: platform.Region, + ResourceGroup: resourceGroup, + Zone: az, + PrimaryNetworkInterface: ibmcloudprovider.NetworkInterface{ + Subnet: subnet, + SecurityGroups: []string{securityGroup}, + }, + UserDataSecret: &corev1.LocalObjectReference{Name: userDataSecret}, + CredentialsSecret: &corev1.LocalObjectReference{Name: "ibmcloud-credentials"}, + // TODO: IBM: Boot volume encryption key + }, nil +} + +func getSubnetName(clusterID string, role string, zone string) (string, error) { + switch role { + case "master": + return fmt.Sprintf("%s-subnet-control-plane-%s", clusterID, zone), nil + case "worker": + return fmt.Sprintf("%s-subnet-compute-%s", clusterID, zone), nil + default: + return "", fmt.Errorf("invalid machine role %v", role) + } +} + +func getSecurityGroupName(clusterID string, role string) (string, error) { + switch role { + case "master": + return fmt.Sprintf("%s-security-group-control-plane", clusterID), nil + case "worker": + return fmt.Sprintf("%s-security-group-compute", clusterID), nil + default: + return "", fmt.Errorf("invalid machine role %v", role) + } +} diff --git a/pkg/asset/machines/ibmcloud/machinesets.go b/pkg/asset/machines/ibmcloud/machinesets.go new file mode 100644 index 00000000000..555eeacb6dd --- /dev/null +++ b/pkg/asset/machines/ibmcloud/machinesets.go @@ -0,0 +1,85 @@ +package ibmcloud + +import ( + "fmt" + "strings" + + machineapi "github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1" + "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + + "github.com/openshift/installer/pkg/types" + "github.com/openshift/installer/pkg/types/ibmcloud" +) + +// MachineSets returns a list of machinesets for a machinepool. +func MachineSets(clusterID string, config *types.InstallConfig, pool *types.MachinePool, role, userDataSecret string) ([]*machineapi.MachineSet, error) { + if configPlatform := config.Platform.Name(); configPlatform != ibmcloud.Name { + return nil, fmt.Errorf("non-IBMCloud configuration: %q", configPlatform) + } + if poolPlatform := pool.Platform.Name(); poolPlatform != ibmcloud.Name { + return nil, fmt.Errorf("non-IBMCloud machine-pool: %q", poolPlatform) + } + platform := config.Platform.IBMCloud + mpool := pool.Platform.IBMCloud + azs := mpool.Zones + + total := int64(0) + if pool.Replicas != nil { + total = *pool.Replicas + } + numOfAZs := int64(len(azs)) + var machinesets []*machineapi.MachineSet + for idx, az := range azs { + replicas := int32(total / numOfAZs) + if int64(idx) < total%numOfAZs { + replicas++ + } + + provider, err := provider(clusterID, platform, mpool, idx, role, userDataSecret) + if err != nil { + return nil, errors.Wrap(err, "failed to create provider") + } + name := fmt.Sprintf("%s-%s-%s", clusterID, pool.Name, strings.TrimPrefix(az, fmt.Sprintf("%s-", platform.Region))) + mset := &machineapi.MachineSet{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "machine.openshift.io/v1beta1", + Kind: "MachineSet", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "openshift-machine-api", + Name: name, + Labels: map[string]string{ + "machine.openshift.io/cluster-api-cluster": clusterID, + }, + }, + Spec: machineapi.MachineSetSpec{ + Replicas: &replicas, + Selector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "machine.openshift.io/cluster-api-machineset": name, + "machine.openshift.io/cluster-api-cluster": clusterID, + }, + }, + Template: machineapi.MachineTemplateSpec{ + ObjectMeta: machineapi.ObjectMeta{ + Labels: map[string]string{ + "machine.openshift.io/cluster-api-machineset": name, + "machine.openshift.io/cluster-api-cluster": clusterID, + "machine.openshift.io/cluster-api-machine-role": role, + "machine.openshift.io/cluster-api-machine-type": role, + }, + }, + Spec: machineapi.MachineSpec{ + ProviderSpec: machineapi.ProviderSpec{ + Value: &runtime.RawExtension{Object: provider}, + }, + }, + }, + }, + } + machinesets = append(machinesets, mset) + } + return machinesets, nil +} diff --git a/pkg/asset/machines/ibmcloud/zones.go b/pkg/asset/machines/ibmcloud/zones.go new file mode 100644 index 00000000000..d6baafbe657 --- /dev/null +++ b/pkg/asset/machines/ibmcloud/zones.go @@ -0,0 +1,19 @@ +package ibmcloud + +import ( + "context" + + "github.com/openshift/installer/pkg/asset/installconfig/ibmcloud" +) + +// AvailabilityZones returns a list of supported zones for the specified region. +func AvailabilityZones(region string) ([]string, error) { + ctx := context.TODO() + + client, err := ibmcloud.NewClient() + if err != nil { + return nil, err + } + + return client.GetVPCZonesForRegion(ctx, region) +} diff --git a/pkg/asset/machines/master.go b/pkg/asset/machines/master.go index eb144a1b5b8..81e8a76ef7d 100644 --- a/pkg/asset/machines/master.go +++ b/pkg/asset/machines/master.go @@ -40,6 +40,7 @@ import ( "github.com/openshift/installer/pkg/asset/machines/azure" "github.com/openshift/installer/pkg/asset/machines/baremetal" "github.com/openshift/installer/pkg/asset/machines/gcp" + "github.com/openshift/installer/pkg/asset/machines/ibmcloud" "github.com/openshift/installer/pkg/asset/machines/kubevirt" "github.com/openshift/installer/pkg/asset/machines/libvirt" "github.com/openshift/installer/pkg/asset/machines/machineconfig" @@ -55,6 +56,7 @@ import ( azuredefaults "github.com/openshift/installer/pkg/types/azure/defaults" baremetaltypes "github.com/openshift/installer/pkg/types/baremetal" gcptypes "github.com/openshift/installer/pkg/types/gcp" + ibmcloudtypes "github.com/openshift/installer/pkg/types/ibmcloud" kubevirttypes "github.com/openshift/installer/pkg/types/kubevirt" libvirttypes "github.com/openshift/installer/pkg/types/libvirt" nonetypes "github.com/openshift/installer/pkg/types/none" @@ -236,6 +238,24 @@ func (m *Master) Generate(dependencies asset.Parents) error { return errors.Wrap(err, "failed to create master machine objects") } gcp.ConfigMasters(machines, clusterID.InfraID, ic.Publish) + case ibmcloudtypes.Name: + mpool := defaultIBMCloudMachinePoolPlatform() + mpool.Set(ic.Platform.IBMCloud.DefaultMachinePlatform) + mpool.Set(pool.Platform.IBMCloud) + if len(mpool.Zones) == 0 { + azs, err := ibmcloud.AvailabilityZones(ic.Platform.IBMCloud.Region) + if err != nil { + return errors.Wrap(err, "failed to fetch availability zones") + } + mpool.Zones = azs + } + pool.Platform.IBMCloud = &mpool + machines, err = ibmcloud.Machines(clusterID.InfraID, ic, &pool, "master", "master-user-data") + if err != nil { + return errors.Wrap(err, "failed to create master machine objects") + } + // TODO: IBM: implement ConfigMasters() if needed + // ibmcloud.ConfigMasters(machines, clusterID.InfraID, ic.Publish) case libvirttypes.Name: mpool := defaultLibvirtMachinePoolPlatform() mpool.Set(ic.Platform.Libvirt.DefaultMachinePlatform) diff --git a/pkg/asset/machines/worker.go b/pkg/asset/machines/worker.go index e76436e2cd8..e537d768df5 100644 --- a/pkg/asset/machines/worker.go +++ b/pkg/asset/machines/worker.go @@ -40,6 +40,7 @@ import ( "github.com/openshift/installer/pkg/asset/machines/azure" "github.com/openshift/installer/pkg/asset/machines/baremetal" "github.com/openshift/installer/pkg/asset/machines/gcp" + "github.com/openshift/installer/pkg/asset/machines/ibmcloud" "github.com/openshift/installer/pkg/asset/machines/kubevirt" "github.com/openshift/installer/pkg/asset/machines/libvirt" "github.com/openshift/installer/pkg/asset/machines/machineconfig" @@ -55,6 +56,7 @@ import ( azuredefaults "github.com/openshift/installer/pkg/types/azure/defaults" baremetaltypes "github.com/openshift/installer/pkg/types/baremetal" gcptypes "github.com/openshift/installer/pkg/types/gcp" + ibmcloudtypes "github.com/openshift/installer/pkg/types/ibmcloud" kubevirttypes "github.com/openshift/installer/pkg/types/kubevirt" libvirttypes "github.com/openshift/installer/pkg/types/libvirt" nonetypes "github.com/openshift/installer/pkg/types/none" @@ -109,6 +111,12 @@ func defaultGCPMachinePoolPlatform() gcptypes.MachinePool { } } +func defaultIBMCloudMachinePoolPlatform() ibmcloudtypes.MachinePool { + return ibmcloudtypes.MachinePool{ + InstanceType: "bx2d-4x16", + } +} + func defaultOpenStackMachinePoolPlatform() openstacktypes.MachinePool { return openstacktypes.MachinePool{ Zones: []string{""}, @@ -356,6 +364,25 @@ func (w *Worker) Generate(dependencies asset.Parents) error { for _, set := range sets { machineSets = append(machineSets, set) } + case ibmcloudtypes.Name: + mpool := defaultIBMCloudMachinePoolPlatform() + mpool.Set(ic.Platform.IBMCloud.DefaultMachinePlatform) + mpool.Set(pool.Platform.IBMCloud) + if len(mpool.Zones) == 0 { + azs, err := ibmcloud.AvailabilityZones(ic.Platform.IBMCloud.Region) + if err != nil { + return errors.Wrap(err, "failed to fetch availability zones") + } + mpool.Zones = azs + } + pool.Platform.IBMCloud = &mpool + sets, err := ibmcloud.MachineSets(clusterID.InfraID, ic, &pool, "worker", "worker-user-data") + if err != nil { + return errors.Wrap(err, "failed to create worker machine objects") + } + for _, set := range sets { + machineSets = append(machineSets, set) + } case libvirttypes.Name: mpool := defaultLibvirtMachinePoolPlatform() mpool.Set(ic.Platform.Libvirt.DefaultMachinePlatform) diff --git a/pkg/asset/manifests/cloudproviderconfig.go b/pkg/asset/manifests/cloudproviderconfig.go index f4b3cc9bdc6..909cd9c7892 100644 --- a/pkg/asset/manifests/cloudproviderconfig.go +++ b/pkg/asset/manifests/cloudproviderconfig.go @@ -1,6 +1,7 @@ package manifests import ( + "context" "fmt" "path/filepath" @@ -14,6 +15,7 @@ import ( "github.com/openshift/installer/pkg/asset/installconfig" "github.com/openshift/installer/pkg/asset/manifests/azure" gcpmanifests "github.com/openshift/installer/pkg/asset/manifests/gcp" + ibmcloudmanifests "github.com/openshift/installer/pkg/asset/manifests/ibmcloud" kubevirtmanifests "github.com/openshift/installer/pkg/asset/manifests/kubevirt" openstackmanifests "github.com/openshift/installer/pkg/asset/manifests/openstack" vspheremanifests "github.com/openshift/installer/pkg/asset/manifests/vsphere" @@ -21,6 +23,7 @@ import ( azuretypes "github.com/openshift/installer/pkg/types/azure" baremetaltypes "github.com/openshift/installer/pkg/types/baremetal" gcptypes "github.com/openshift/installer/pkg/types/gcp" + ibmcloudtypes "github.com/openshift/installer/pkg/types/ibmcloud" kubevirttypes "github.com/openshift/installer/pkg/types/kubevirt" libvirttypes "github.com/openshift/installer/pkg/types/libvirt" nonetypes "github.com/openshift/installer/pkg/types/none" @@ -150,6 +153,16 @@ func (cpc *CloudProviderConfig) Generate(dependencies asset.Parents) error { return errors.Wrap(err, "could not create cloud provider config") } cm.Data[cloudProviderConfigDataKey] = gcpConfig + case ibmcloudtypes.Name: + accountID, err := installConfig.IBMCloud.AccountID(context.TODO()) + if err != nil { + return err + } + ibmcloudConfig, err := ibmcloudmanifests.CloudProviderConfig(clusterID.InfraID, accountID) + if err != nil { + return errors.Wrap(err, "could not create cloud provider config") + } + cm.Data[cloudProviderConfigDataKey] = ibmcloudConfig case vspheretypes.Name: folderPath := installConfig.Config.Platform.VSphere.Folder if len(folderPath) == 0 { diff --git a/pkg/asset/manifests/dns.go b/pkg/asset/manifests/dns.go index 1fa6a68e6f0..40d5b91e955 100644 --- a/pkg/asset/manifests/dns.go +++ b/pkg/asset/manifests/dns.go @@ -16,11 +16,13 @@ import ( "github.com/openshift/installer/pkg/asset/installconfig" icaws "github.com/openshift/installer/pkg/asset/installconfig/aws" icgcp "github.com/openshift/installer/pkg/asset/installconfig/gcp" + icibmcloud "github.com/openshift/installer/pkg/asset/installconfig/ibmcloud" "github.com/openshift/installer/pkg/types" awstypes "github.com/openshift/installer/pkg/types/aws" azuretypes "github.com/openshift/installer/pkg/types/azure" baremetaltypes "github.com/openshift/installer/pkg/types/baremetal" gcptypes "github.com/openshift/installer/pkg/types/gcp" + ibmcloudtypes "github.com/openshift/installer/pkg/types/ibmcloud" kubevirttypes "github.com/openshift/installer/pkg/types/kubevirt" libvirttypes "github.com/openshift/installer/pkg/types/libvirt" nonetypes "github.com/openshift/installer/pkg/types/none" @@ -123,6 +125,25 @@ func (d *DNS) Generate(dependencies asset.Parents) error { config.Spec.PublicZone = &configv1.DNSZone{ID: zone.Name} } config.Spec.PrivateZone = &configv1.DNSZone{ID: fmt.Sprintf("%s-private-zone", clusterID.InfraID)} + case ibmcloudtypes.Name: + client, err := icibmcloud.NewClient() + if err != nil { + return errors.Wrap(err, "failed to get IBM Cloud client") + } + + zoneID, err := client.GetZoneIDByName(context.TODO(), installConfig.Config.BaseDomain) + if err != nil { + return errors.Wrap(err, "failed ot get DNS zone ID") + } + + if installConfig.Config.Publish == types.ExternalPublishingStrategy { + config.Spec.PublicZone = &configv1.DNSZone{ + ID: zoneID, + } + } + config.Spec.PrivateZone = &configv1.DNSZone{ + ID: zoneID, + } case libvirttypes.Name, openstacktypes.Name, baremetaltypes.Name, nonetypes.Name, vspheretypes.Name, ovirttypes.Name, kubevirttypes.Name: default: return errors.New("invalid Platform") diff --git a/pkg/asset/manifests/ibmcloud/OWNERS b/pkg/asset/manifests/ibmcloud/OWNERS new file mode 100644 index 00000000000..7a92d60f198 --- /dev/null +++ b/pkg/asset/manifests/ibmcloud/OWNERS @@ -0,0 +1,7 @@ +# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md +# This file just uses aliases defined in OWNERS_ALIASES. + +approvers: + - ibmcloud-approvers +reviewers: + - ibmcloud-reviewers diff --git a/pkg/asset/manifests/ibmcloud/cloudproviderconfig.go b/pkg/asset/manifests/ibmcloud/cloudproviderconfig.go new file mode 100644 index 00000000000..28433f6a389 --- /dev/null +++ b/pkg/asset/manifests/ibmcloud/cloudproviderconfig.go @@ -0,0 +1,74 @@ +package ibmcloud + +import ( + "bytes" + "text/template" +) + +// https://github.com/kubernetes/kubernetes/blob/368ee4bb8ee7a0c18431cd87ee49f0c890aa53e5/staging/src/k8s.io/legacy-cloud-providers/gce/gce.go#L188 +type config struct { + Global global `gcfg:"global"` + Kubernetes kubernetes `gcfg:"kubernetes"` + LoadBalancerDeployment loadBalancerDeployment `gcfg:"load-balancer-deployment"` + Provider provider `gcfg:"provider"` +} + +type global struct { + Version string `gcfg:"version"` +} + +type kubernetes struct { + ConfigFile string `gcfg:"config-file"` +} + +type loadBalancerDeployment struct { + Image string `gcfg:"image"` + Application string `gcfg:"application"` + VLANIPConfigMap string `gcfg:"vlan-ip-config-map"` +} + +type provider struct { + AccountID string `gcfg:"accountID"` + ClusterID string `gcfg:"clusterID"` +} + +// CloudProviderConfig generates the cloud provider config for the IBMCloud platform. +func CloudProviderConfig(infraID string, accountID string) (string, error) { + config := &config{ + Global: global{ + Version: "1.1.0", + }, + Kubernetes: kubernetes{ + ConfigFile: "/mnt/etc/kubernetes/controller-manager-kubeconfig", + }, + LoadBalancerDeployment: loadBalancerDeployment{ + Image: "[REGISTRY]/[NAMESPACE]/keepalived:[TAG]", + Application: "keepalived", + VLANIPConfigMap: "ibm-cloud-provider-vlan-ip-config", + }, + Provider: provider{ + AccountID: accountID, + ClusterID: infraID, + }, + } + buf := &bytes.Buffer{} + template := template.Must(template.New("ibmcloud cloudproviderconfig").Parse(configTmpl)) + if err := template.Execute(buf, config); err != nil { + return "", err + } + return buf.String(), nil +} + +var configTmpl = `[global] +version = {{.Global.Version}} +[kubernetes] +config-file = {{.Kubernetes.ConfigFile}} +[load-balancer-deployment] +image = {{.LoadBalancerDeployment.Image}} +application = {{.LoadBalancerDeployment.Application}} +vlan-ip-config-map = {{.LoadBalancerDeployment.VLANIPConfigMap}} +[provider] +accountID = {{.Provider.AccountID}} +clusterID = {{.Provider.ClusterID}} + +` diff --git a/pkg/asset/manifests/ibmcloud/cloudproviderconfig_test.go b/pkg/asset/manifests/ibmcloud/cloudproviderconfig_test.go new file mode 100644 index 00000000000..4fa496cbfa6 --- /dev/null +++ b/pkg/asset/manifests/ibmcloud/cloudproviderconfig_test.go @@ -0,0 +1,27 @@ +package ibmcloud + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestCloudProviderConfig(t *testing.T) { + expectedConfig := `[global] +version = 1.1.0 +[kubernetes] +config-file = /mnt/etc/kubernetes/controller-manager-kubeconfig +[load-balancer-deployment] +image = [REGISTRY]/[NAMESPACE]/keepalived:[TAG] +application = keepalived +vlan-ip-config-map = ibm-cloud-provider-vlan-ip-config +[provider] +accountID = 1e1f75646aef447814a6d907cc83fb3c +clusterID = ocp4-8pxks + +` + + actualConfig, err := CloudProviderConfig("ocp4-8pxks", "1e1f75646aef447814a6d907cc83fb3c") + assert.NoError(t, err, "failed to create cloud provider config") + assert.Equal(t, expectedConfig, actualConfig, "unexpected cloud provider config") +} diff --git a/pkg/asset/manifests/infrastructure.go b/pkg/asset/manifests/infrastructure.go index 2abe16b3fc5..b63d2ef2252 100644 --- a/pkg/asset/manifests/infrastructure.go +++ b/pkg/asset/manifests/infrastructure.go @@ -16,6 +16,7 @@ import ( "github.com/openshift/installer/pkg/types/azure" "github.com/openshift/installer/pkg/types/baremetal" "github.com/openshift/installer/pkg/types/gcp" + "github.com/openshift/installer/pkg/types/ibmcloud" "github.com/openshift/installer/pkg/types/kubevirt" "github.com/openshift/installer/pkg/types/libvirt" "github.com/openshift/installer/pkg/types/none" @@ -168,6 +169,12 @@ func (i *Infrastructure) Generate(dependencies asset.Parents) error { Filename: cloudControllerUIDFilename, Data: content, }) + case ibmcloud.Name: + config.Spec.PlatformSpec.Type = configv1.IBMCloudPlatformType + config.Status.PlatformStatus.IBMCloud = &configv1.IBMCloudPlatformStatus{ + Location: installConfig.Config.Platform.IBMCloud.Region, + ResourceGroupName: installConfig.Config.Platform.IBMCloud.ClusterResourceGroupName(clusterID.InfraID), + } case libvirt.Name: config.Spec.PlatformSpec.Type = configv1.LibvirtPlatformType case none.Name: diff --git a/pkg/asset/manifests/openshift.go b/pkg/asset/manifests/openshift.go index afb1303674e..032f66fe0fa 100644 --- a/pkg/asset/manifests/openshift.go +++ b/pkg/asset/manifests/openshift.go @@ -16,6 +16,7 @@ import ( "github.com/openshift/installer/pkg/asset/installconfig" installconfigaws "github.com/openshift/installer/pkg/asset/installconfig/aws" "github.com/openshift/installer/pkg/asset/installconfig/gcp" + "github.com/openshift/installer/pkg/asset/installconfig/ibmcloud" kubeconfig "github.com/openshift/installer/pkg/asset/installconfig/kubevirt" "github.com/openshift/installer/pkg/asset/installconfig/ovirt" "github.com/openshift/installer/pkg/asset/machines" @@ -30,6 +31,7 @@ import ( azuretypes "github.com/openshift/installer/pkg/types/azure" baremetaltypes "github.com/openshift/installer/pkg/types/baremetal" gcptypes "github.com/openshift/installer/pkg/types/gcp" + ibmcloudtypes "github.com/openshift/installer/pkg/types/ibmcloud" kubevirttypes "github.com/openshift/installer/pkg/types/kubevirt" openstacktypes "github.com/openshift/installer/pkg/types/openstack" ovirttypes "github.com/openshift/installer/pkg/types/ovirt" @@ -135,6 +137,16 @@ func (o *Openshift) Generate(dependencies asset.Parents) error { Base64encodeServiceAccount: base64.StdEncoding.EncodeToString(creds), }, } + case ibmcloudtypes.Name: + client, err := ibmcloud.NewClient() + if err != nil { + return err + } + cloudCreds = cloudCredsSecretData{ + IBMCloud: &IBMCloudCredsSecretData{ + Base64encodeAPIKey: base64.StdEncoding.EncodeToString([]byte(client.Authenticator.ApiKey)), + }, + } case openstacktypes.Name: opts := new(clientconfig.ClientOpts) opts.Cloud = installConfig.Config.Platform.OpenStack.Cloud @@ -237,7 +249,7 @@ func (o *Openshift) Generate(dependencies asset.Parents) error { } switch platform { - case awstypes.Name, openstacktypes.Name, vspheretypes.Name, azuretypes.Name, gcptypes.Name, ovirttypes.Name, kubevirttypes.Name: + case awstypes.Name, openstacktypes.Name, vspheretypes.Name, azuretypes.Name, gcptypes.Name, ibmcloudtypes.Name, ovirttypes.Name, kubevirttypes.Name: if installConfig.Config.CredentialsMode != types.ManualCredentialsMode { assetData["99_cloud-creds-secret.yaml"] = applyTemplateData(cloudCredsSecret.Files()[0].Data, templateData) } diff --git a/pkg/asset/manifests/proxy.go b/pkg/asset/manifests/proxy.go index 17b2db4394a..9f38bad72a9 100644 --- a/pkg/asset/manifests/proxy.go +++ b/pkg/asset/manifests/proxy.go @@ -149,6 +149,8 @@ func createNoProxy(installConfig *installconfig.InstallConfig, network *Networki } } + // TODO: IBM[#95]: proxy + // From https://cloud.google.com/vpc/docs/special-configurations add GCP metadata. // "metadata.google.internal." added due to https://bugzilla.redhat.com/show_bug.cgi?id=1754049 if platform == gcp.Name { diff --git a/pkg/asset/manifests/template.go b/pkg/asset/manifests/template.go index 8102132c222..356a1693859 100644 --- a/pkg/asset/manifests/template.go +++ b/pkg/asset/manifests/template.go @@ -24,6 +24,11 @@ type GCPCredsSecretData struct { Base64encodeServiceAccount string } +// IBMCloudCredsSecretData holds encoded credentials and is used to generate cloud-creds secret +type IBMCloudCredsSecretData struct { + Base64encodeAPIKey string +} + // OpenStackCredsSecretData holds encoded credentials and is used to generate cloud-creds secret type OpenStackCredsSecretData struct { Base64encodeCloudCreds string @@ -56,6 +61,7 @@ type cloudCredsSecretData struct { AWS *AwsCredsSecretData Azure *AzureCredsSecretData GCP *GCPCredsSecretData + IBMCloud *IBMCloudCredsSecretData OpenStack *OpenStackCredsSecretData VSphere *VSphereCredsSecretData Ovirt *OvirtCredsSecretData diff --git a/pkg/asset/quota/quota.go b/pkg/asset/quota/quota.go index f7d61ce7219..411df04ff3d 100644 --- a/pkg/asset/quota/quota.go +++ b/pkg/asset/quota/quota.go @@ -24,6 +24,7 @@ import ( "github.com/openshift/installer/pkg/types/azure" "github.com/openshift/installer/pkg/types/baremetal" typesgcp "github.com/openshift/installer/pkg/types/gcp" + typesibmcloud "github.com/openshift/installer/pkg/types/ibmcloud" "github.com/openshift/installer/pkg/types/kubevirt" "github.com/openshift/installer/pkg/types/libvirt" "github.com/openshift/installer/pkg/types/none" @@ -55,6 +56,11 @@ func (a *PlatformQuotaCheck) Generate(dependencies asset.Parents) error { workersAsset := &machines.Worker{} dependencies.Get(ic, mastersAsset, workersAsset) + // TODO: IBM[#87]: Add quota checks + if ic.Config.Platform.Name() == typesibmcloud.Name { + return nil + } + masters, err := mastersAsset.Machines() if err != nil { return err @@ -121,6 +127,9 @@ func (a *PlatformQuotaCheck) Generate(dependencies asset.Parents) error { return summarizeFailingReport(reports) } summarizeReport(reports) + case typesibmcloud.Name: + // TODO: IBM[#87]: Add quota checks + return nil case typesopenstack.Name: ci, err := openstackvalidation.GetCloudInfo(ic.Config) if err != nil { diff --git a/pkg/asset/rhcos/image.go b/pkg/asset/rhcos/image.go index 2b7e34462bc..2f339ac1bd5 100644 --- a/pkg/asset/rhcos/image.go +++ b/pkg/asset/rhcos/image.go @@ -19,6 +19,7 @@ import ( "github.com/openshift/installer/pkg/types/azure" "github.com/openshift/installer/pkg/types/baremetal" "github.com/openshift/installer/pkg/types/gcp" + "github.com/openshift/installer/pkg/types/ibmcloud" "github.com/openshift/installer/pkg/types/kubevirt" "github.com/openshift/installer/pkg/types/libvirt" "github.com/openshift/installer/pkg/types/none" @@ -102,6 +103,11 @@ func osImage(config *types.InstallConfig) (string, error) { return fmt.Sprintf("projects/%s/global/images/%s", img.Project, img.Name), nil } return "", fmt.Errorf("%s: No GCP build found", st.FormatPrefix(archName)) + case ibmcloud.Name: + if a, ok := streamArch.Artifacts["ibmcloud"]; ok { + return rhcos.FindArtifactURL(a) + } + return "", fmt.Errorf("%s: No ibmcloud build found", st.FormatPrefix(archName)) case libvirt.Name: // 𝅘𝅥𝅮 Everything's going to be a-ok 𝅘𝅥𝅮 if a, ok := streamArch.Artifacts["qemu"]; ok { diff --git a/pkg/explain/printer_test.go b/pkg/explain/printer_test.go index e56d084c5cd..2d1018bf796 100644 --- a/pkg/explain/printer_test.go +++ b/pkg/explain/printer_test.go @@ -43,7 +43,7 @@ func Test_PrintFields(t *testing.T) { CredentialsMode is used to explicitly set the mode with which CredentialRequests are satisfied. If this field is set, then the installer will not attempt to query the cloud permissions before attempting installation. If the field is not set or empty, then the installer will perform its normal verification that the credentials provided are sufficient to perform an installation. There are three possible values for this field, but the valid values are dependent upon the platform being used. "Mint": create new credentials with a subset of the overall permissions for each CredentialsRequest "Passthrough": copy the credentials with all of the overall permissions for each CredentialsRequest "Manual": CredentialsRequests must be handled manually by the user - For each of the following platforms, the field can set to the specified values. For all other platforms, the field must not be set. AWS: "Mint", "Passthrough", "Manual" Azure: "Mint", "Passthrough", "Manual" GCP: "Mint", "Passthrough", "Manual" + For each of the following platforms, the field can set to the specified values. For all other platforms, the field must not be set. AWS: "Mint", "Passthrough", "Manual" Azure: "Mint", "Passthrough", "Manual" GCP: "Mint", "Passthrough", "Manual" IBMCloud: "Manual" fips Default: false diff --git a/pkg/terraform/exec/plugins/ibm.go b/pkg/terraform/exec/plugins/ibm.go new file mode 100644 index 00000000000..a95775a0ddb --- /dev/null +++ b/pkg/terraform/exec/plugins/ibm.go @@ -0,0 +1,15 @@ +package plugins + +import ( + "github.com/IBM-Cloud/terraform-provider-ibm/ibm" + "github.com/hashicorp/terraform-plugin-sdk/plugin" +) + +func init() { + ibmProvider := func() { + plugin.Serve(&plugin.ServeOpts{ + ProviderFunc: ibm.Provider, + }) + } + KnownPlugins["terraform-provider-ibm"] = ibmProvider +} diff --git a/pkg/tfvars/ibmcloud/ibmcloud.go b/pkg/tfvars/ibmcloud/ibmcloud.go new file mode 100644 index 00000000000..725c1678783 --- /dev/null +++ b/pkg/tfvars/ibmcloud/ibmcloud.go @@ -0,0 +1,84 @@ +package ibmcloud + +import ( + "encoding/json" + + "github.com/openshift/installer/pkg/tfvars/internal/cache" + "github.com/openshift/installer/pkg/types" + "github.com/pkg/errors" +) + +// Auth is the collection of credentials that will be used by terrform. +type Auth struct { + APIKey string `json:"ibmcloud_api_key,omitempty"` +} + +type config struct { + Auth `json:",inline"` + Region string `json:"ibmcloud_region,omitempty"` + BootstrapInstanceType string `json:"ibmcloud_bootstrap_instance_type,omitempty"` + CISInstanceCRN string `json:"ibmcloud_cis_crn,omitempty"` + ExtraTags []string `json:"ibmcloud_extra_tags,omitempty"` + MasterAvailabilityZones []string `json:"ibmcloud_master_availability_zones"` + MasterInstanceType string `json:"ibmcloud_master_instance_type,omitempty"` + PublishStrategy string `json:"ibmcloud_publish_strategy,omitempty"` + ResourceGroupName string `json:"ibmcloud_resource_group_name,omitempty"` + ImageFilePath string `json:"ibmcloud_image_filepath,omitempty"` +} + +// TFVarsSources contains the parameters to be converted into Terraform variables +type TFVarsSources struct { + Auth Auth + CISInstanceCRN string + PublishStrategy types.PublishingStrategy + ResourceGroupName string + + // TODO: IBM: Fetch config from masterConfig instead + MachineType string + MasterAvailabilityZones []string + Region string + ImageURL string + + // TODO: IBM: Future support + // MasterConfigs []*ibmcloudprovider.ibmcloudMachineProviderSpec + // WorkerConfigs []*ibmcloudprovider.ibmcloudMachineProviderSpec +} + +// TFVars generates ibmcloud-specific Terraform variables launching the cluster. +func TFVars(sources TFVarsSources) ([]byte, error) { + cachedImage, err := cache.DownloadImageFile(sources.ImageURL) + if err != nil { + return nil, errors.Wrap(err, "failed to use cached ibmcloud image") + } + + // TODO: IBM: Future support + // masterConfig := sources.MasterConfigs[0] + // workerConfig := sources.WorkerConfigs[0] + // masterAvailabilityZones := make([]string, len(sources.MasterConfigs)) + // for i, c := range sources.MasterConfigs { + // masterAvailabilityZones[i] = c.Zone + // } + + cfg := &config{ + Auth: sources.Auth, + CISInstanceCRN: sources.CISInstanceCRN, + PublishStrategy: string(sources.PublishStrategy), + ResourceGroupName: sources.ResourceGroupName, + + // TODO: IBM: Fetch config from masterConfig instead + BootstrapInstanceType: sources.MachineType, + MasterAvailabilityZones: sources.MasterAvailabilityZones, + MasterInstanceType: sources.MachineType, + Region: sources.Region, + ImageFilePath: cachedImage, + + // TODO: IBM: Future support + // ExtraTags: masterConfig.Tags, + // Region: masterConfig.Region, + // BootstrapInstanceType: masterConfig.MachineType, + // MasterInstanceType: masterConfig.MachineType, + // MasterAvailabilityZones: masterAvailabilityZones, + } + + return json.MarshalIndent(cfg, "", " ") +} diff --git a/pkg/types/ibmcloud/platform.go b/pkg/types/ibmcloud/platform.go index e2e58ed88c6..33de12af7a4 100644 --- a/pkg/types/ibmcloud/platform.go +++ b/pkg/types/ibmcloud/platform.go @@ -6,9 +6,6 @@ type Platform struct { // created. Region string `json:"region"` - // ClusterOSImage is the name of the custom RHCOS image. - ClusterOSImage string `json:"clusterOSImage"` - // ResourceGroupName is the name of an already existing resource group where the // cluster should be installed. This resource group should only be used for // this specific cluster and the cluster components will assume ownership of diff --git a/pkg/types/ibmcloud/validation/platform.go b/pkg/types/ibmcloud/validation/platform.go index bfe4e402497..593fc89ff96 100644 --- a/pkg/types/ibmcloud/validation/platform.go +++ b/pkg/types/ibmcloud/validation/platform.go @@ -42,10 +42,6 @@ func ValidatePlatform(p *ibmcloud.Platform, fldPath *field.Path) field.ErrorList allErrs = append(allErrs, field.NotSupported(fldPath.Child("region"), p.Region, regionShortNames)) } - if p.ClusterOSImage == "" { - allErrs = append(allErrs, field.Required(fldPath.Child("clusterOSImage"), "clusterOSImage must be specified")) - } - allErrs = append(allErrs, validateVPCConfig(p, fldPath)...) if p.DefaultMachinePlatform != nil { diff --git a/pkg/types/ibmcloud/validation/platform_test.go b/pkg/types/ibmcloud/validation/platform_test.go index 889604b2129..a14cb3bb5cf 100644 --- a/pkg/types/ibmcloud/validation/platform_test.go +++ b/pkg/types/ibmcloud/validation/platform_test.go @@ -10,20 +10,16 @@ import ( ) var ( - validCRN = "crn:v1:bluemix:public:internet-svcs:us-south:a/account:instance::" - validRegion = "us-south" - validClusterOSImage = "valid-rhcos-image-name" + validRegion = "us-south" ) func validMinimalPlatform() *ibmcloud.Platform { return &ibmcloud.Platform{ - Region: validRegion, - ClusterOSImage: validClusterOSImage, + Region: validRegion, } } func validMachinePool() *ibmcloud.MachinePool { - // TODO: IBM: Update this once the MachinePool type is done. return &ibmcloud.MachinePool{} } @@ -56,15 +52,6 @@ func TestValidatePlatform(t *testing.T) { }(), valid: false, }, - { - name: "missing clusterOSImage", - platform: func() *ibmcloud.Platform { - p := validMinimalPlatform() - p.ClusterOSImage = "" - return p - }(), - valid: false, - }, { name: "valid machine pool", platform: func() *ibmcloud.Platform { @@ -124,6 +111,25 @@ func TestValidatePlatform(t *testing.T) { }(), valid: false, }, + { + name: "invalid vpc config missing vpcResourceGroupName", + platform: func() *ibmcloud.Platform { + p := validMinimalPlatform() + p.VPC = "valid-vpc-name" + p.Subnets = []string{"valid-compute-subnet-id", "valid-control-subnet-id"} + return p + }(), + valid: false, + }, + { + name: "invalid vpc config missing vpc and subnets", + platform: func() *ibmcloud.Platform { + p := validMinimalPlatform() + p.VPCResourceGroupName = "vpc-rg-name" + return p + }(), + valid: false, + }, { name: "invalid vpc config missing vpc and vpcResourceGroupName", platform: func() *ibmcloud.Platform { diff --git a/pkg/types/installconfig.go b/pkg/types/installconfig.go index ba1ce5c8119..2896e62a57c 100644 --- a/pkg/types/installconfig.go +++ b/pkg/types/installconfig.go @@ -142,6 +142,7 @@ type InstallConfig struct { // AWS: "Mint", "Passthrough", "Manual" // Azure: "Mint", "Passthrough", "Manual" // GCP: "Mint", "Passthrough", "Manual" + // IBMCloud: "Manual" // +optional CredentialsMode CredentialsMode `json:"credentialsMode,omitempty"` diff --git a/pkg/types/validation/installconfig.go b/pkg/types/validation/installconfig.go index 819193ba8ef..3c59ab05e62 100644 --- a/pkg/types/validation/installconfig.go +++ b/pkg/types/validation/installconfig.go @@ -557,9 +557,10 @@ func validateCloudCredentialsMode(mode types.CredentialsMode, fldPath *field.Pat // validPlatformCredentialsModes is a map from the platform name to a slice of credentials modes that are valid // for the platform. If a platform name is not in the map, then the credentials mode cannot be set for that platform. validPlatformCredentialsModes := map[string][]types.CredentialsMode{ - aws.Name: {types.MintCredentialsMode, types.PassthroughCredentialsMode, types.ManualCredentialsMode}, - azure.Name: {types.MintCredentialsMode, types.PassthroughCredentialsMode, types.ManualCredentialsMode}, - gcp.Name: {types.MintCredentialsMode, types.PassthroughCredentialsMode, types.ManualCredentialsMode}, + aws.Name: {types.MintCredentialsMode, types.PassthroughCredentialsMode, types.ManualCredentialsMode}, + azure.Name: {types.MintCredentialsMode, types.PassthroughCredentialsMode, types.ManualCredentialsMode}, + gcp.Name: {types.MintCredentialsMode, types.PassthroughCredentialsMode, types.ManualCredentialsMode}, + ibmcloud.Name: {types.ManualCredentialsMode}, } if validModes, ok := validPlatformCredentialsModes[platform]; ok { validModesSet := sets.NewString() diff --git a/pkg/types/validation/installconfig_test.go b/pkg/types/validation/installconfig_test.go index 0856fbb04a9..5eb80bc079c 100644 --- a/pkg/types/validation/installconfig_test.go +++ b/pkg/types/validation/installconfig_test.go @@ -63,8 +63,7 @@ func validGCPPlatform() *gcp.Platform { func validIBMCloudPlatform() *ibmcloud.Platform { return &ibmcloud.Platform{ - Region: "us-south", - ClusterOSImage: "custom-rhcos-image", + Region: "us-south", } } @@ -1003,7 +1002,7 @@ func TestValidateInstallConfig(t *testing.T) { } return c }(), - expectedError: `^\Q[platform.ibmcloud.region: Required value: region must be specified, platform.ibmcloud.clusterOSImage: Required value: clusterOSImage must be specified]\E$`, + expectedError: `^\Qplatform.ibmcloud.region: Required value: region must be specified\E$`, }, { name: "release image source is not canonical", diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/.gitignore b/vendor/github.com/IBM-Cloud/bluemix-go/.gitignore new file mode 100644 index 00000000000..8db59fead02 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/.gitignore @@ -0,0 +1,4 @@ +*.log +.DS_Store +*.exe + diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/.travis.yml b/vendor/github.com/IBM-Cloud/bluemix-go/.travis.yml new file mode 100644 index 00000000000..c8c63feb546 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/.travis.yml @@ -0,0 +1,10 @@ +language: go + +env: +- MAX_RETRIES=0 + +go: +- 1.x + +script: +- make test diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/Makefile b/vendor/github.com/IBM-Cloud/bluemix-go/Makefile new file mode 100644 index 00000000000..255d59fa152 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/Makefile @@ -0,0 +1,18 @@ +.PHONY : test +test: test_deps vet + go test ./... -timeout 120m + +.PHONY : test_deps +test_deps: + go get -t ./... + +.PHONY : vet + +vet: + @echo 'go vet $$(go list ./... | grep -v vendor)' + @go vet $$(go list ./... | grep -v vendor) ; if [ $$? -eq 1 ]; then \ + echo ""; \ + echo "Vet found suspicious constructs. Please check the reported constructs"; \ + echo "and fix them if necessary before submitting the code for review."; \ + exit 1; \ + fi diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/README.md b/vendor/github.com/IBM-Cloud/bluemix-go/README.md new file mode 100644 index 00000000000..14e89f4e51b --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/README.md @@ -0,0 +1,84 @@ +# IBM Cloud SDK for Go + +[![Build Status](https://travis-ci.org/IBM-Cloud/bluemix-go.svg?branch=master)](https://travis-ci.org/IBM-Cloud/bluemix-go) [![GoDoc](https://godoc.org/github.com/IBM-Cloud/bluemix-go?status.svg)](https://godoc.org/github.com/IBM-Cloud/bluemix-go) + +bluemix-go provides the Go implementation for operating the IBM Cloud platform, which is based on the [Cloud Foundry API][cloudfoundry_api]. + +## Installing + +1. Install the SDK using the following command + +```bash +go get github.com/IBM-Cloud/bluemix-go +``` + +2. Update the SDK to the latest version using the following command + +```bash +go get -u github.com/IBM-Cloud/bluemix-go +``` + + +## Using the SDK + +You must have a working IBM Cloud account to use the APIs. [Sign up][ibmcloud_signup] if you don't have one. + +The SDK has ```examples``` folder which cites few examples on how to use the SDK. +First you need to create a session. + +```go +import "github.com/IBM-Cloud/bluemix-go/session" + +func main(){ + + s := session.New() + ..... +} +``` + +Creating session in this way creates a default configuration which reads the value from the environment variables. +You must export the following environment variables. +* IBMID - This is the IBM ID +* IBMID_PASSWORD - This is the password for the above ID + +OR + +* IC_API_KEY/IBMCLOUD_API_KEY - This is the Bluemix API Key. Login to [IBMCloud][ibmcloud_login] to create one if you don't already have one. See instructions below for creating an API Key. + +The default region is _us_south_. You can override it in the [Config struct][ibmcloud_go_config]. You can also provide the value via environment variables; either via _IC_REGION_ or _IBMCLOUD_REGION_. Valid regions are - +* us-south +* us-east +* eu-gb +* eu-de +* au-syd +* jp-tok + +The maximum retries is 3. You can override it in the [Config struct][ibmcloud_go_config]. You can also provide the value via environment variable; via MAX_RETRIES + +## Creating an IBM Cloud API Key + +First, navigate to the IBM Cloud console and use the Manage toolbar to access IAM. + +![Access IAM from the Manage toolbar](.screenshots/screenshot_api_keys_iam.png) + +On the left, click "IBM Cloud API Keys" + +![Click IBM Cloud API Keys](.screenshots/screenshot_api_keys_iam_left.png) + +Press "Create API Key" + +![Press Create API Key](.screenshots/screenshot_api_keys_create_button.png) + +Pick a name and description for your key + +![Set name and description](.screenshots/screenshot_api_keys_create.png) + +You have created a key! Press the eyeball to show the key. Copy or save it because keys can't be displayed or downloaded twice. + +![Your key is now created](.screenshots/screenshot_api_keys_create_successful.png) + + +[ibmcloud_signup]: https://console.ng.bluemix.net/registration/?target=%2Fdashboard%2Fapps +[ibmcloud_login]: https://console.ng.bluemix.net +[ibmcloud_go_config]: https://godoc.org/github.com/IBM-Cloud/bluemix-go#Config +[cloudfoundry_api]: https://apidocs.cloudfoundry.org/264/ diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/account/accountv1/accounts.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/account/accountv1/accounts.go new file mode 100644 index 00000000000..cc643f75967 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/account/accountv1/accounts.go @@ -0,0 +1,188 @@ +package accountv1 + +import ( + "fmt" + + "github.com/IBM-Cloud/bluemix-go/api/account/accountv2" + "github.com/IBM-Cloud/bluemix-go/bmxerror" + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/rest" +) + +type AccountUser struct { + UserId string `json:"userId"` + FirstName string `json:"firstname"` + LastName string `json:"lastname"` + State string `json:"state"` + IbmUniqueId string `json:"ibmUniqueId"` + Email string `json:"email"` + Phonenumber string `json:"phonenumber"` + CreatedOn string `json:"createdOn"` + VerifiedOn string `json:"verifiedOn"` + Id string `json:"id"` + UaaGuid string `json:"uaaGuid"` + AccountId string `json:"accountId"` + Role string `json:"role"` + InvitedOn string `json:"invitedOn"` + Photo string `json:"photo"` +} + +//Accounts ... +type Accounts interface { + GetAccountUsers(accountGuid string) ([]AccountUser, error) + InviteAccountUser(accountGuid string, userEmail string) (AccountInviteResponse, error) + DeleteAccountUser(accountGuid string, userGuid string) error + FindAccountUserByUserId(accountGuid string, userId string) (*AccountUser, error) +} + +type account struct { + client *client.Client +} + +type AccountUserResource struct { + Metadata AccountUserMetadata + Entity AccountUserEntity +} + +type Metadata struct { + Guid string `json:"guid"` + Url string `json:"url"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` + VerifiedAt string `json:"verified_at"` + Identity Identity `json:"identity"` +} + +type AccountUserEntity struct { + AccountId string `json:"account_id"` + FirstName string `json:"first_name"` + LastName string `json:"last_name"` + State string `json:"state"` + Email string `json:"email"` + PhoneNumber string `json:"phonenumber"` + Role string `json:"role"` + Photo string `json:"photo"` +} + +type AccountUserMetadata Metadata + +type Identity struct { + Id string `json:"id"` + UserName string `json:"username"` + Realmid string `json:"realmid"` + Identifier string `json:"identifier"` +} + +// Account Invites ... +type AccountInviteResponse struct { + Id string `json:"id"` + Email string `json:"email"` + State string `json:"state"` +} + +func (resource AccountUserResource) ToModel() AccountUser { + m := resource.Metadata + e := resource.Entity + + return AccountUser{ + UserId: m.Identity.UserName, + CreatedOn: m.CreatedAt, + VerifiedOn: m.VerifiedAt, + FirstName: e.FirstName, + LastName: e.LastName, + IbmUniqueId: m.Identity.Id, + State: e.State, + Email: e.Email, + Phonenumber: e.PhoneNumber, + Id: m.Guid, + AccountId: e.AccountId, + Role: e.Role, + Photo: e.Photo, + } +} + +type AccountUserQueryResponse struct { + Metadata Metadata + AccountUsers []AccountUserResource `json:"resources"` +} + +func newAccountAPI(c *client.Client) Accounts { + return &account{ + client: c, + } +} + +//GetAccountUser ... +func (a *account) GetAccountUsers(accountGuid string) ([]AccountUser, error) { + var users []AccountUser + + resp, err := a.client.GetPaginated(fmt.Sprintf("/v1/accounts/%s/users", accountGuid), + accountv2.NewAccountPaginatedResources(AccountUserResource{}), + func(resource interface{}) bool { + if accountUser, ok := resource.(AccountUserResource); ok { + users = append(users, accountUser.ToModel()) + return true + } + return false + }) + + if resp.StatusCode == 404 { + return []AccountUser{}, bmxerror.New(ErrCodeNoAccountExists, + fmt.Sprintf("No Account exists with account id:%q", accountGuid)) + } + + return users, err +} + +func (a *account) InviteAccountUser(accountGuid string, userEmail string) (AccountInviteResponse, error) { + type userEntity struct { + Email string `json:"email"` + AccountRole string `json:"account_role"` + } + + payload := struct { + Users []userEntity `json:"users"` + }{ + Users: []userEntity{ + { + Email: userEmail, + AccountRole: "MEMBER", + }, + }, + } + + resp := AccountInviteResponse{} + + _, err := a.client.Post(fmt.Sprintf("/v1/accounts/%s/users", accountGuid), payload, &resp) + return resp, err +} + +func (a *account) DeleteAccountUser(accountGuid string, userGuid string) error { + _, err := a.client.Delete(fmt.Sprintf("/v1/accounts/%s/users/%s", accountGuid, userGuid)) + + return err +} + +func (a *account) FindAccountUserByUserId(accountGuid string, userId string) (*AccountUser, error) { + queryResp := AccountUserQueryResponse{} + + req := rest.GetRequest(*a.client.Config.Endpoint+fmt.Sprintf("/v1/accounts/%s/users", accountGuid)). + Query("user_id", userId) + + response, err := a.client.SendRequest(req, + &queryResp) + + if err != nil { + switch response.StatusCode { + case 404: + return nil, nil + default: + return nil, err + } + } else if len(queryResp.AccountUsers) == 0 { + return nil, nil + } else { + accountUser := queryResp.AccountUsers[0].ToModel() + return &accountUser, nil + } +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/account/accountv1/api_service.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/account/accountv1/api_service.go new file mode 100644 index 00000000000..a1eaa7d0a51 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/account/accountv1/api_service.go @@ -0,0 +1,67 @@ +package accountv1 + +import ( + gohttp "net/http" + + bluemix "github.com/IBM-Cloud/bluemix-go" + "github.com/IBM-Cloud/bluemix-go/authentication" + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/http" + "github.com/IBM-Cloud/bluemix-go/rest" + "github.com/IBM-Cloud/bluemix-go/session" +) + +//AccountServiceAPI is the accountv2 client ... +type AccountServiceAPI interface { + Accounts() Accounts +} + +//ErrCodeNoAccountExists ... +const ErrCodeNoAccountExists = "NoAccountExists" + +//CfService holds the client +type accountService struct { + *client.Client +} + +//New ... +func New(sess *session.Session) (AccountServiceAPI, error) { + config := sess.Config.Copy() + err := config.ValidateConfigForService(bluemix.AccountServicev1) + if err != nil { + return nil, err + } + if config.HTTPClient == nil { + config.HTTPClient = http.NewHTTPClient(config) + } + tokenRefreher, err := authentication.NewIAMAuthRepository(config, &rest.Client{ + DefaultHeader: gohttp.Header{ + "User-Agent": []string{http.UserAgent()}, + }, + HTTPClient: config.HTTPClient, + }) + if err != nil { + return nil, err + } + if config.IAMAccessToken == "" { + err := authentication.PopulateTokens(tokenRefreher, config) + if err != nil { + return nil, err + } + } + if config.Endpoint == nil { + ep, err := config.EndpointLocator.AccountManagementEndpoint() + if err != nil { + return nil, err + } + config.Endpoint = &ep + } + return &accountService{ + Client: client.New(config, bluemix.AccountServicev1, tokenRefreher), + }, nil +} + +//Accounts API +func (a *accountService) Accounts() Accounts { + return newAccountAPI(a.Client) +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/account/accountv2/accounts.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/account/accountv2/accounts.go new file mode 100644 index 00000000000..7f08105f1a2 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/account/accountv2/accounts.go @@ -0,0 +1,231 @@ +package accountv2 + +import ( + "fmt" + + "github.com/IBM-Cloud/bluemix-go/bmxerror" + "github.com/IBM-Cloud/bluemix-go/client" +) + +//Metadata ... +type Metadata struct { + GUID string `json:"guid"` + URL string `json:"url"` +} + +//Resource ... +type Resource struct { + Metadata Metadata +} + +//Account Model ... +type Account struct { + GUID string + Name string + Type string + State string + OwnerGUID string + OwnerUserID string + OwnerUniqueID string + CustomerID string + CountryCode string + CurrencyCode string + Organizations []AccountOrganization + Members []AccountMember `json:"members"` +} + +//AccountOrganization ... +type AccountOrganization struct { + GUID string `json:"guid"` + Region string `json:"region"` +} + +//AccountMember ... +type AccountMember struct { + GUID string `json:"guid"` + UserID string `json:"user_id"` + UniqueID string `json:"unique_id"` +} + +//AccountResource ... +type AccountResource struct { + Resource + Entity AccountEntity +} + +//AccountEntity ... +type AccountEntity struct { + Name string `json:"name"` + Type string `json:"type"` + State string `json:"state"` + OwnerGUID string `json:"owner"` + OwnerUserID string `json:"owner_userid"` + OwnerUniqueID string `json:"owner_unique_id"` + CustomerID string `json:"customer_id"` + CountryCode string `json:"country_code"` + CurrencyCode string `json:"currency_code"` + Organizations []AccountOrganization `json:"organizations_region"` + Members []AccountMember `json:"members"` +} + +//ToModel ... +func (resource AccountResource) ToModel() Account { + entity := resource.Entity + + return Account{ + GUID: resource.Metadata.GUID, + Name: entity.Name, + Type: entity.Type, + State: entity.State, + OwnerGUID: entity.OwnerGUID, + OwnerUserID: entity.OwnerUserID, + OwnerUniqueID: entity.OwnerUniqueID, + CustomerID: entity.CustomerID, + CountryCode: entity.CountryCode, + CurrencyCode: entity.CurrencyCode, + Organizations: entity.Organizations, + Members: entity.Members, + } +} + +func (nameQueryResponse AccountNameQueryResponse) ToModel() Account { + entity := nameQueryResponse.Entity + guid := nameQueryResponse.Metadata.GUID + + return Account{ + GUID: guid, + Name: entity.Name, + Type: entity.Type, + State: entity.State, + OwnerGUID: entity.OwnerGUID, + OwnerUserID: entity.OwnerUserID, + OwnerUniqueID: entity.OwnerUniqueID, + CustomerID: entity.CustomerID, + CountryCode: entity.CountryCode, + CurrencyCode: entity.CurrencyCode, + Organizations: entity.Organizations, + Members: entity.Members, + } +} + +//AccountQueryResponse ... +type AccountQueryResponse struct { + Metadata Metadata + Accounts []AccountResource `json:"resources"` +} + +//AccountQueryResponse ... +type AccountNameQueryResponse struct { + Metadata Metadata + Entity AccountEntity +} + +//Accounts ... +type Accounts interface { + List() ([]Account, error) + FindByOrg(orgGUID string, region string) (*Account, error) + FindByOwner(userID string) (*Account, error) + Get(accountId string) (*Account, error) +} + +type account struct { + client *client.Client +} + +func newAccountAPI(c *client.Client) Accounts { + return &account{ + client: c, + } +} + +//FindByOrg ... +func (a *account) FindByOrg(orgGUID, region string) (*Account, error) { + type organizationRegion struct { + GUID string `json:"guid"` + Region string `json:"region"` + } + + payLoad := struct { + OrganizationsRegion []organizationRegion `json:"organizations_region"` + }{ + OrganizationsRegion: []organizationRegion{ + { + GUID: orgGUID, + Region: region, + }, + }, + } + + queryResp := AccountQueryResponse{} + response, err := a.client.Post("/coe/v2/getaccounts", payLoad, &queryResp) + if err != nil { + + if response.StatusCode == 404 { + return nil, bmxerror.New(ErrCodeNoAccountExists, + fmt.Sprintf("No account exists in the given region: %q and the given org: %q", region, orgGUID)) + } + return nil, err + + } + + if len(queryResp.Accounts) > 0 { + account := queryResp.Accounts[0].ToModel() + return &account, nil + } + + return nil, bmxerror.New(ErrCodeNoAccountExists, + fmt.Sprintf("No account exists in the given region: %q and the given org: %q", region, orgGUID)) +} + +func (a *account) List() ([]Account, error) { + var accounts []Account + resp, err := a.client.GetPaginated("/coe/v2/accounts", NewAccountPaginatedResources(AccountResource{}), func(resource interface{}) bool { + if accountResource, ok := resource.(AccountResource); ok { + accounts = append(accounts, accountResource.ToModel()) + return true + } + return false + }) + + if resp.StatusCode == 404 || len(accounts) == 0 { + return nil, bmxerror.New(ErrCodeNoAccountExists, + fmt.Sprintf("No Account exists")) + } + + return accounts, err +} + +//FindByOwner ... +func (a *account) FindByOwner(userID string) (*Account, error) { + accounts, err := a.List() + if err != nil { + return nil, err + } + + for _, a := range accounts { + if a.OwnerUserID == userID { + return &a, nil + } + } + return nil, bmxerror.New(ErrCodeNoAccountExists, + fmt.Sprintf("No account exists for the user %q", userID)) +} + +//Get ... +func (a *account) Get(accountId string) (*Account, error) { + queryResp := AccountNameQueryResponse{} + response, err := a.client.Get(fmt.Sprintf("/coe/v2/accounts/%s", accountId), &queryResp) + if err != nil { + + if response.StatusCode == 404 { + return nil, bmxerror.New(ErrCodeNoAccountExists, + fmt.Sprintf("Account %q does not exists", accountId)) + } + return nil, err + + } + + account := queryResp.ToModel() + return &account, nil + +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/account/accountv2/api_service.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/account/accountv2/api_service.go new file mode 100644 index 00000000000..200a2842691 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/account/accountv2/api_service.go @@ -0,0 +1,67 @@ +package accountv2 + +import ( + gohttp "net/http" + + bluemix "github.com/IBM-Cloud/bluemix-go" + "github.com/IBM-Cloud/bluemix-go/authentication" + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/http" + "github.com/IBM-Cloud/bluemix-go/rest" + "github.com/IBM-Cloud/bluemix-go/session" +) + +//AccountServiceAPI is the accountv2 client ... +type AccountServiceAPI interface { + Accounts() Accounts +} + +//ErrCodeNoAccountExists ... +const ErrCodeNoAccountExists = "NoAccountExists" + +//MccpService holds the client +type accountService struct { + *client.Client +} + +//New ... +func New(sess *session.Session) (AccountServiceAPI, error) { + config := sess.Config.Copy() + err := config.ValidateConfigForService(bluemix.AccountService) + if err != nil { + return nil, err + } + if config.HTTPClient == nil { + config.HTTPClient = http.NewHTTPClient(config) + } + tokenRefreher, err := authentication.NewUAARepository(config, &rest.Client{ + DefaultHeader: gohttp.Header{ + "User-Agent": []string{http.UserAgent()}, + }, + HTTPClient: config.HTTPClient, + }) + if err != nil { + return nil, err + } + if config.UAAAccessToken == "" { + err := authentication.PopulateTokens(tokenRefreher, config) + if err != nil { + return nil, err + } + } + if config.Endpoint == nil { + ep, err := config.EndpointLocator.AccountManagementEndpoint() + if err != nil { + return nil, err + } + config.Endpoint = &ep + } + return &accountService{ + Client: client.New(config, bluemix.AccountService, tokenRefreher), + }, nil +} + +//Accounts API +func (a *accountService) Accounts() Accounts { + return newAccountAPI(a.Client) +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/account/accountv2/paginate.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/account/accountv2/paginate.go new file mode 100644 index 00000000000..48cccb99776 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/account/accountv2/paginate.go @@ -0,0 +1,39 @@ +package accountv2 + +import ( + "bytes" + "encoding/json" + "reflect" +) + +type GenericPaginatedResourcesHandler struct { + resourceType reflect.Type +} + +func NewAccountPaginatedResources(resource interface{}) GenericPaginatedResourcesHandler { + return GenericPaginatedResourcesHandler{ + resourceType: reflect.TypeOf(resource), + } +} + +func (pr GenericPaginatedResourcesHandler) Resources(data []byte, curURL string) ([]interface{}, string, error) { + var paginatedResources = struct { + NextUrl string `json:"next_url"` + ResourcesBytes json.RawMessage `json:"resources"` + }{} + + err := json.Unmarshal(data, &paginatedResources) + + slicePtr := reflect.New(reflect.SliceOf(pr.resourceType)) + dc := json.NewDecoder(bytes.NewBuffer(paginatedResources.ResourcesBytes)) + dc.UseNumber() + err = dc.Decode(slicePtr.Interface()) + slice := reflect.Indirect(slicePtr) + + contents := make([]interface{}, 0, slice.Len()) + for i := 0; i < slice.Len(); i++ { + contents = append(contents, slice.Index(i).Interface()) + } + + return contents, paginatedResources.NextUrl, err +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/certificatemanager/api_service.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/certificatemanager/api_service.go new file mode 100644 index 00000000000..53f0507de1d --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/certificatemanager/api_service.go @@ -0,0 +1,67 @@ +package certificatemanager + +import ( + gohttp "net/http" + + bluemix "github.com/IBM-Cloud/bluemix-go" + "github.com/IBM-Cloud/bluemix-go/authentication" + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/http" + "github.com/IBM-Cloud/bluemix-go/rest" + "github.com/IBM-Cloud/bluemix-go/session" +) + +//ErrCodeAPICreation ... +const ErrCodeAPICreation = "APICreationError" + +//CertificateManagerServiceAPI is the Aramda K8s client ... +type CertificateManagerServiceAPI interface { + Certificate() Certificate +} + +//CertificateManager Service holds the client +type cmService struct { + *client.Client +} + +//New ... +func New(sess *session.Session) (CertificateManagerServiceAPI, error) { + config := sess.Config.Copy() + err := config.ValidateConfigForService(bluemix.CertificateManager) + if err != nil { + return nil, err + } + if config.HTTPClient == nil { + config.HTTPClient = http.NewHTTPClient(config) + } + tokenRefreher, err := authentication.NewIAMAuthRepository(config, &rest.Client{ + DefaultHeader: gohttp.Header{ + "User-Agent": []string{http.UserAgent()}, + }, + HTTPClient: config.HTTPClient, + }) + if err != nil { + return nil, err + } + if config.IAMAccessToken == "" { + err := authentication.PopulateTokens(tokenRefreher, config) + if err != nil { + return nil, err + } + } + if config.Endpoint == nil { + ep, err := config.EndpointLocator.CertificateManagerEndpoint() + if err != nil { + return nil, err + } + config.Endpoint = &ep + } + + return &cmService{ + Client: client.New(config, bluemix.CertificateManager, tokenRefreher), + }, nil +} + +func (c *cmService) Certificate() Certificate { + return newCertificateAPI(c.Client) +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/certificatemanager/certificate.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/certificatemanager/certificate.go new file mode 100644 index 00000000000..c48bf7856e1 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/certificatemanager/certificate.go @@ -0,0 +1,132 @@ +package certificatemanager + +import ( + "fmt" + "net/url" + + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/models" +) + +//Certificate Interface +type Certificate interface { + ImportCertificate(InstanceID string, importData models.CertificateImportData) (models.CertificateInfo, error) + OrderCertificate(InstanceID string, orderData models.CertificateOrderData) (models.CertificateInfo, error) + RenewCertificate(CertID string, RenewData models.CertificateRenewData) (models.CertificateInfo, error) + GetMetaData(CertID string) (models.CertificateInfo, error) + GetCertData(CertID string) (models.CertificateGetData, error) + DeleteCertificate(CertID string) error + UpdateCertificateMetaData(CertID string, updateData models.CertificateMetadataUpdate) error + ReimportCertificate(CertID string, reimportData models.CertificateReimportData) (models.CertificateInfo, error) + ListCertificates(InstanceID string) ([]models.CertificateInfo, error) + UpdateOrderPolicy(CertID string, autoRenew models.OrderPolicy) (models.OrderPolicy, error) +} + +//Certificates client struct +type Certificates struct { + client *client.Client +} + +func newCertificateAPI(c *client.Client) Certificate { + return &Certificates{ + client: c, + } +} + +//ImportCertificate .. +func (r *Certificates) ImportCertificate(InstanceID string, importData models.CertificateImportData) (models.CertificateInfo, error) { + certInfo := models.CertificateInfo{} + _, err := r.client.Post(fmt.Sprintf("/api/v3/%s/certificates/import", url.QueryEscape(InstanceID)), importData, &certInfo) + if err != nil { + return certInfo, err + } + return certInfo, err +} + +//OrderCertificate ... +func (r *Certificates) OrderCertificate(InstanceID string, orderdata models.CertificateOrderData) (models.CertificateInfo, error) { + certInfo := models.CertificateInfo{} + _, err := r.client.Post(fmt.Sprintf("/api/v1/%s/certificates/order", url.QueryEscape(InstanceID)), orderdata, &certInfo) + if err != nil { + return certInfo, err + } + return certInfo, err +} + +//RenewCertificate ... +func (r *Certificates) RenewCertificate(CertID string, renewdata models.CertificateRenewData) (models.CertificateInfo, error) { + certInfo := models.CertificateInfo{} + _, err := r.client.Post(fmt.Sprintf("/api/v1/certificate/%s/renew", url.QueryEscape(CertID)), renewdata, &certInfo) + if err != nil { + return certInfo, err + } + return certInfo, err +} + +//GetMetaData ... +func (r *Certificates) GetMetaData(CertID string) (models.CertificateInfo, error) { + certInfo := models.CertificateInfo{} + _, err := r.client.Get(fmt.Sprintf("/api/v1/certificate/%s/metadata", url.QueryEscape(CertID)), &certInfo) + if err != nil { + return certInfo, err + } + return certInfo, err +} + +//GetCertData ... +func (r *Certificates) GetCertData(CertID string) (models.CertificateGetData, error) { + certInfo := models.CertificateGetData{} + _, err := r.client.Get(fmt.Sprintf("/api/v2/certificate/%s", url.QueryEscape(CertID)), &certInfo) + if err != nil { + return certInfo, err + } + return certInfo, err +} + +// DeleteCertificate ... +func (r *Certificates) DeleteCertificate(CertID string) error { + _, err := r.client.Delete(fmt.Sprintf("/api/v2/certificate/%s", url.QueryEscape(CertID))) + return err +} + +// UpdateCertificateMetaData ... +func (r *Certificates) UpdateCertificateMetaData(CertID string, updatemetaData models.CertificateMetadataUpdate) error { + _, err := r.client.Post(fmt.Sprintf("/api/v3/certificate/%s", url.QueryEscape(CertID)), updatemetaData, nil) + return err +} + +// ReimportCertificate ... +func (r *Certificates) ReimportCertificate(CertID string, reimportData models.CertificateReimportData) (models.CertificateInfo, error) { + certInfo := models.CertificateInfo{} + _, err := r.client.Put(fmt.Sprintf("/api/v1/certificate/%s", url.QueryEscape(CertID)), reimportData, &certInfo) + if err != nil { + return certInfo, err + } + return certInfo, err +} + +//ListCertificates ... +func (r *Certificates) ListCertificates(InstanceID string) ([]models.CertificateInfo, error) { + certificatesInfo := models.CertificatesInfo{} + rawURL := fmt.Sprintf("/api/v3/%s/certificates?page_size=200", url.QueryEscape(InstanceID)) + if _, err := r.client.GetPaginated(rawURL, NewCMSPaginatedResources(models.CertificateInfo{}), func(resource interface{}) bool { + if certificate, ok := resource.(models.CertificateInfo); ok { + certificatesInfo.CertificateList = append(certificatesInfo.CertificateList, certificate) + return true + } + return false + }); err != nil { + return nil, fmt.Errorf("failed to list paginated Certificates: %s", err) + } + return certificatesInfo.CertificateList, nil +} + +//UpdateOrderPolicy .. +func (r *Certificates) UpdateOrderPolicy(CertID string, autoRenew models.OrderPolicy) (models.OrderPolicy, error) { + orderPolicyInfo := models.OrderPolicy{} + _, err := r.client.Put(fmt.Sprintf("/api/v1/certificate/%s/order/policy", url.QueryEscape(CertID)), autoRenew, &orderPolicyInfo) + if err != nil { + return orderPolicyInfo, err + } + return orderPolicyInfo, err +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/certificatemanager/paginate.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/certificatemanager/paginate.go new file mode 100644 index 00000000000..1836bea148c --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/certificatemanager/paginate.go @@ -0,0 +1,51 @@ +package certificatemanager + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" +) + +type GenericPaginatedResourcesHandler struct { + resourceType reflect.Type +} + +func NewCMSPaginatedResources(resource interface{}) GenericPaginatedResourcesHandler { + return GenericPaginatedResourcesHandler{ + resourceType: reflect.TypeOf(resource), + } +} + +func (pr GenericPaginatedResourcesHandler) Resources(bytes []byte, curURL string) ([]interface{}, string, error) { + var paginatedResources = struct { + NextPageInfo struct { + StartDocId string `json:"startWithDocId"` + StartOrderByValue string `json:"startWithOrderByValue"` + } `json:"nextPageInfo"` + Certificates json.RawMessage `json:"certificates"` + TotalDocs int `json:"totalScannedDocs"` + }{} + + if err := json.Unmarshal(bytes, &paginatedResources); err != nil { + return nil, "", fmt.Errorf("failed to unmarshal paginated response as json: %s", err) + } + slicePtr := reflect.New(reflect.SliceOf(pr.resourceType)) + dc := json.NewDecoder(strings.NewReader(string(paginatedResources.Certificates))) + dc.UseNumber() + if err := dc.Decode(slicePtr.Interface()); err != nil { + return nil, "", fmt.Errorf("failed to decode paginated objects as %T: %s", pr.resourceType, err) + } + slice := reflect.Indirect(slicePtr) + + contents := make([]interface{}, 0, slice.Len()) + for i := 0; i < slice.Len(); i++ { + contents = append(contents, slice.Index(i).Interface()) + } + if paginatedResources.NextPageInfo.StartDocId == "" && paginatedResources.NextPageInfo.StartOrderByValue == "" { + return contents, "", nil + } + urlprefix := strings.Split(curURL, "?")[0] + nextURL := fmt.Sprintf("%s?page_number=1&&page_size=200&&start_from_document_id=%s&&start_from_orderby_value=%s", urlprefix, strings.Replace(paginatedResources.NextPageInfo.StartDocId, "/", "%2F", -1), paginatedResources.NextPageInfo.StartOrderByValue) + return contents, nextURL, nil +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/cis/cisv1/api_service.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/cis/cisv1/api_service.go new file mode 100644 index 00000000000..87512182532 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/cis/cisv1/api_service.go @@ -0,0 +1,125 @@ +package cisv1 + +import ( + gohttp "net/http" + "strconv" + + bluemix "github.com/IBM-Cloud/bluemix-go" + "github.com/IBM-Cloud/bluemix-go/authentication" + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/http" + "github.com/IBM-Cloud/bluemix-go/rest" + "github.com/IBM-Cloud/bluemix-go/session" +) + +//ErrCodeAPICreation ... +const ErrCodeAPICreation = "APICreationError" + +//CisServiceAPI is the Cloud Internet Services API ... +type CisServiceAPI interface { + Zones() Zones + Monitors() Monitors + Pools() Pools + Glbs() Glbs + Settings() Settings + Ips() Ips + Dns() Dns + Firewall() Firewall + RateLimit() RateLimit +} + +//CisService holds the client +type cisService struct { + *client.Client +} + +//New ... +func New(sess *session.Session) (CisServiceAPI, error) { + config := sess.Config.Copy() + err := config.ValidateConfigForService(bluemix.CisService) + if err != nil { + return nil, err + } + if config.HTTPClient == nil { + config.HTTPClient = http.NewHTTPClient(config) + } + tokenRefreher, err := authentication.NewIAMAuthRepository(config, &rest.Client{ + DefaultHeader: gohttp.Header{ + "User-Agent": []string{http.UserAgent()}, + }, + HTTPClient: config.HTTPClient, + }) + if err != nil { + return nil, err + } + if config.IAMAccessToken == "" { + err := authentication.PopulateTokens(tokenRefreher, config) + if err != nil { + return nil, err + } + } + if config.Endpoint == nil { + ep, err := config.EndpointLocator.CisEndpoint() + if err != nil { + return nil, err + } + config.Endpoint = &ep + } + + return &cisService{ + Client: client.New(config, bluemix.CisService, tokenRefreher), + }, nil +} + +//Zones implement albs API +func (c *cisService) Zones() Zones { + return newZoneAPI(c.Client) +} + +//Monitors implements Monitors API +func (c *cisService) Monitors() Monitors { + return newMonitorAPI(c.Client) +} + +//Pools implements Pools API +func (c *cisService) Pools() Pools { + return newPoolAPI(c.Client) +} + +//Glbs implements Glbs API +func (c *cisService) Glbs() Glbs { + return newGlbAPI(c.Client) +} + +//Settings implements Settings API +func (c *cisService) Settings() Settings { + return newSettingsAPI(c.Client) +} + +//Settings implements Settings API +func (c *cisService) Ips() Ips { + return newIpsAPI(c.Client) +} + +//Settings implements DNS records API +func (c *cisService) Dns() Dns { + return newDnsAPI(c.Client) +} + +func (c *cisService) Firewall() Firewall { + return newFirewallAPI(c.Client) +} + +func (c *cisService) RateLimit() RateLimit { + return newRateLimitAPI(c.Client) +} + +func errorsToString(e []Error) string { + + var errMsg string + for _, err := range e { + errFrag := "Code: " + strconv.Itoa(err.Code) + " " + err.Msg + errMsg = errMsg + errFrag + } + return errMsg +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/cis/cisv1/dns.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/cis/cisv1/dns.go new file mode 100644 index 00000000000..cf6893b8b0f --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/cis/cisv1/dns.go @@ -0,0 +1,120 @@ +package cisv1 + +import ( + "fmt" + "time" + + "github.com/IBM-Cloud/bluemix-go/client" +) + +type DnsRecord struct { + Id string `json:"id"` + Name string `json:"name,omitempty"` + DnsType string `json:"type"` + Content string `json:"content"` + ZoneId string `json:"zone_id"` + ZoneName string `json:"zone_name"` + CreatedOn *time.Time `json:"created_on,omitempty"` + ModifiedOn *time.Time `json:"modified_on,omitempty"` + Proxiable bool `json:"proxiable"` + Proxied bool `json:"proxied"` + Ttl int `json:"ttl"` + Priority int `json:"priority,omitempty"` + Data interface{} `json:"data,omitempty"` +} + +type DnsResults struct { + DnsList []DnsRecord `json:"result"` + ResultsInfo ResultsCount `json:"result_info"` + Success bool `json:"success"` + Errors []Error `json:"errors"` +} + +type DnsResult struct { + Dns DnsRecord `json:"result"` + Success bool `json:"success"` + Errors []Error `json:"errors"` + Messages []string `json:"messages"` +} + +type DnsBody struct { + Name string `json:"name,omitempty"` + DnsType string `json:"type"` + Content string `json:"content,omitempty"` + Priority int `json:"priority,omitempty"` + Data interface{} `json:"data,omitempty"` + Proxied bool `json:"proxied,omitempty"` + Ttl int `json:"ttl,omitempty"` +} + +type Dns interface { + ListDns(cisId string, zoneId string) ([]DnsRecord, error) + GetDns(cisId string, zoneId string, dnsId string) (*DnsRecord, error) + CreateDns(cisId string, zoneId string, dnsBody DnsBody) (*DnsRecord, error) + DeleteDns(cisId string, zoneId string, dnsId string) error + UpdateDns(cisId string, zoneId string, dnsId string, dnsBody DnsBody) (*DnsRecord, error) +} + +type dns struct { + client *client.Client +} + +func newDnsAPI(c *client.Client) Dns { + return &dns{ + client: c, + } +} + +func (r *dns) ListDns(cisId string, zoneId string) ([]DnsRecord, error) { + var records []DnsRecord + rawURL := fmt.Sprintf("/v1/%s/zones/%s/dns_records?page=1", cisId, zoneId) + if _, err := r.client.GetPaginated(rawURL, NewDNSPaginatedResources(DnsRecord{}), func(resource interface{}) bool { + if dns, ok := resource.(DnsRecord); ok { + records = append(records, dns) + return true + } + return false + }); err != nil { + return nil, fmt.Errorf("failed to list paginated dns records: %s", err) + } + return records, nil +} + +func (r *dns) GetDns(cisId string, zoneId string, dnsId string) (*DnsRecord, error) { + dnsResult := DnsResult{} + rawURL := fmt.Sprintf("/v1/%s/zones/%s/dns_records/%s", cisId, zoneId, dnsId) + _, err := r.client.Get(rawURL, &dnsResult, nil) + if err != nil { + return nil, err + } + return &dnsResult.Dns, nil +} + +func (r *dns) DeleteDns(cisId string, zoneId string, dnsId string) error { + rawURL := fmt.Sprintf("/v1/%s/zones/%s/dns_records/%s", cisId, zoneId, dnsId) + _, err := r.client.Delete(rawURL) + if err != nil { + return err + } + return nil +} + +func (r *dns) CreateDns(cisId string, zoneId string, dnsBody DnsBody) (*DnsRecord, error) { + dnsResult := DnsResult{} + rawURL := fmt.Sprintf("/v1/%s/zones/%s/dns_records", cisId, zoneId) + _, err := r.client.Post(rawURL, &dnsBody, &dnsResult) + if err != nil { + return nil, err + } + return &dnsResult.Dns, nil +} + +func (r *dns) UpdateDns(cisId string, zoneId string, dnsId string, dnsBody DnsBody) (*DnsRecord, error) { + dnsResult := DnsResult{} + rawURL := fmt.Sprintf("/v1/%s/zones/%s/dns_records/%s", cisId, zoneId, dnsId) + _, err := r.client.Put(rawURL, &dnsBody, &dnsResult) + if err != nil { + return nil, err + } + return &dnsResult.Dns, nil +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/cis/cisv1/firewalls.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/cis/cisv1/firewalls.go new file mode 100644 index 00000000000..9c9371a35ab --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/cis/cisv1/firewalls.go @@ -0,0 +1,150 @@ +package cisv1 + +import ( + "fmt" + "log" + "time" + + "github.com/IBM-Cloud/bluemix-go/client" +) + +//FirewallRecord ... +type FirewallRecord struct { + ID string `json:"id"` + Description string `json:"description,omitempty"` + Urls []string `json:"urls,omitempty"` + Configurations []Configuration `json:"configurations,omitempty"` + Paused bool `json:"paused,omitempty"` + CreatedOn *time.Time `json:"created_on,omitempty"` + ModifiedOn *time.Time `json:"modified_on,omitempty"` + Mode string `json:"mode,omitempty"` + Notes string `json:"notes,omitempty"` + Configuration *Configuration `json:"configuration,omitempty"` + Priority int `json:"priority,omitempty"` +} + +//Configuration ... +type Configuration struct { + Target string `json:"target,omitempty"` + Value string `json:"value,omitempty"` +} + +//FirewallResults ... +type FirewallResults struct { + FirewallList []FirewallRecord `json:"result"` + ResultsInfo ResultsCount `json:"result_info"` + Success bool `json:"success"` + Errors []Error `json:"errors"` +} + +//FirewallResult ... +type FirewallResult struct { + Firewall FirewallRecord `json:"result"` + Success bool `json:"success"` + Errors []Error `json:"errors"` + Messages []string `json:"messages"` +} + +//FirewallBody ... +type FirewallBody struct { + Description string `json:"description,omitempty"` + Urls []string `json:"urls,omitempty"` + Configurations []Configuration `json:"configurations,omitempty"` + Paused bool `json:"paused,omitempty"` + Mode string `json:"mode,omitempty"` + Notes string `json:"notes,omitempty"` + Configuration *Configuration `json:"configuration,omitempty"` + Priority int `json:"priority,omitempty"` +} + +//Firewall ... +type Firewall interface { + ListFirewall(cisID string, zoneID string, firewallType string) ([]FirewallRecord, error) + GetFirewall(cisID string, zoneID string, firewallType string, firewallID string) (*FirewallRecord, error) + CreateFirewall(cisID string, zoneID string, firewallType string, firewallBody FirewallBody) (*FirewallRecord, error) + DeleteFirewall(cisID string, zoneID string, firewallType string, firewallID string) error + UpdateFirewall(cisID string, zoneID string, firewallType string, firewallID string, firewallBody FirewallBody) (*FirewallRecord, error) +} + +//firewall ... +type firewall struct { + client *client.Client +} + +func newFirewallAPI(c *client.Client) Firewall { + return &firewall{ + client: c, + } +} + +func (r *firewall) ListFirewall(cisID string, zoneID string, firewallType string) ([]FirewallRecord, error) { + firewallResults := FirewallResults{} + + var rawURL string + rawURL = fmt.Sprintf("/v1/%s/zones/%s/firewall/%s", cisID, zoneID, firewallType) + if firewallType == "access_rules" { + rawURL = fmt.Sprintf("/v1/%s/zones/%s/firewall/%s/rules", cisID, zoneID, firewallType) + } + + _, err := r.client.Get(rawURL, &firewallResults, nil) + if err != nil { + return nil, err + } + return firewallResults.FirewallList, err +} + +func (r *firewall) GetFirewall(cisID string, zoneID string, firewallType string, firewallID string) (*FirewallRecord, error) { + firewallResult := FirewallResult{} + var rawURL string + rawURL = fmt.Sprintf("/v1/%s/zones/%s/firewall/%s/%s", cisID, zoneID, firewallType, firewallID) + if firewallType == "access_rules" { + rawURL = fmt.Sprintf("/v1/%s/zones/%s/firewall/%s/rules/%s", cisID, zoneID, firewallType, firewallID) + } + _, err := r.client.Get(rawURL, &firewallResult, nil) + if err != nil { + return nil, err + } + return &firewallResult.Firewall, nil +} + +func (r *firewall) DeleteFirewall(cisID string, zoneID string, firewallType string, firewallID string) error { + var rawURL string + rawURL = fmt.Sprintf("/v1/%s/zones/%s/firewall/%s/%s", cisID, zoneID, firewallType, firewallID) + if firewallType == "access_rules" { + rawURL = fmt.Sprintf("/v1/%s/zones/%s/firewall/%s/rules/%s", cisID, zoneID, firewallType, firewallID) + } + _, err := r.client.Delete(rawURL) + if err != nil { + return err + } + return nil +} + +func (r *firewall) CreateFirewall(cisID string, zoneID string, firewallType string, firewallBody FirewallBody) (*FirewallRecord, error) { + firewallResult := FirewallResult{} + var rawURL string + rawURL = fmt.Sprintf("/v1/%s/zones/%s/firewall/%s", cisID, zoneID, firewallType) + if firewallType == "access_rules" { + rawURL = fmt.Sprintf("/v1/%s/zones/%s/firewall/%s/rules", cisID, zoneID, firewallType) + } + log.Printf(">>>> rawURL : %s\n", rawURL) + _, err := r.client.Post(rawURL, &firewallBody, &firewallResult) + if err != nil { + return nil, err + } + return &firewallResult.Firewall, nil +} + +func (r *firewall) UpdateFirewall(cisID string, zoneID string, firewallType string, firewallID string, firewallBody FirewallBody) (*FirewallRecord, error) { + firewallResult := FirewallResult{} + var rawURL string + rawURL = fmt.Sprintf("/v1/%s/zones/%s/firewall/%s/%s", cisID, zoneID, firewallType, firewallID) + if firewallType == "access_rules" { + rawURL = fmt.Sprintf("/v1/%s/zones/%s/firewall/%s/rules/%s", cisID, zoneID, firewallType, firewallID) + } + _, err := r.client.Put(rawURL, &firewallBody, &firewallResult) + if err != nil { + return nil, err + } + return &firewallResult.Firewall, nil +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/cis/cisv1/glbs.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/cis/cisv1/glbs.go new file mode 100644 index 00000000000..9b468f4c5cd --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/cis/cisv1/glbs.go @@ -0,0 +1,127 @@ +package cisv1 + +import ( + "fmt" + "time" + + "github.com/IBM-Cloud/bluemix-go/client" +) + +type Glb struct { + Id string `json:"id"` + Name string `json:"name"` + Desc string `json:"description"` + FallbackPool string `json:"fallback_pool"` + DefaultPools []string `json:"default_pools"` + Ttl int `json:"ttl"` + Proxied bool `json:"proxied"` + CreatedOn *time.Time `json:"created_on,omitempty"` + ModifiedOn *time.Time `json:"modified_on,omitempty"` + SessionAffinity string `json:"session_affinity"` + Enabled bool `json:"enabled,omitempty"` + RegionPools map[string][]string `json:"region_pools,omitempty"` + PopPools map[string][]string `json:"pop_pools,omitempty"` +} + +type GlbResults struct { + GlbList []Glb `json:"result"` + ResultsInfo ResultsCount `json:"result_info"` + Success bool `json:"success"` + Errors []Error `json:"errors"` +} + +type GlbResult struct { + Glb Glb `json:"result"` + Success bool `json:"success"` + Errors []Error `json:"errors"` + Messages []string `json:"messages"` +} + +type GlbBody struct { + Desc string `json:"description,omitempty"` + Proxied bool `json:"proxied,omitempty"` + Name string `json:"name"` + FallbackPool string `json:"fallback_pool"` + DefaultPools []string `json:"default_pools"` + SessionAffinity string `json:"session_affinity,omitempty"` + Ttl int `json:"ttl,omitempty"` + Enabled bool `json:"enabled,omitempty"` + RegionPools map[string][]string `json:"region_pools,omitempty"` + PopPools map[string][]string `json:"pop_pools,omitempty"` +} + +type GlbDelete struct { + Result struct { + GlbId string + } `json:"result"` + Success bool `json:"success"` + Errors []Error `json:"errors"` + Messages []string `json:"messages"` +} + +type Glbs interface { + ListGlbs(cisId string, zoneId string) ([]Glb, error) + GetGlb(cisId string, zoneId string, glbId string) (*Glb, error) + CreateGlb(cisId string, zoneId string, glbBody GlbBody) (*Glb, error) + DeleteGlb(cisId string, zoneId string, glbId string) error + UpdateGlb(cisId string, zoneId string, glbId string, glbBody GlbBody) (*Glb, error) +} + +type glbs struct { + client *client.Client +} + +func newGlbAPI(c *client.Client) Glbs { + return &glbs{ + client: c, + } +} + +func (r *glbs) ListGlbs(cisId string, zoneId string) ([]Glb, error) { + glbResults := GlbResults{} + rawURL := fmt.Sprintf("/v1/%s/zones/%s/load_balancers", cisId, zoneId) + _, err := r.client.Get(rawURL, &glbResults) + if err != nil { + return nil, err + } + return glbResults.GlbList, err +} + +func (r *glbs) GetGlb(cisId string, zoneId string, glbId string) (*Glb, error) { + glbResult := GlbResult{} + rawURL := fmt.Sprintf("/v1/%s/zones/%s/load_balancers/%s", cisId, zoneId, glbId) + _, err := r.client.Get(rawURL, &glbResult, nil) + if err != nil { + return nil, err + } + return &glbResult.Glb, nil +} + +func (r *glbs) DeleteGlb(cisId string, zoneId string, glbId string) error { + rawURL := fmt.Sprintf("/v1/%s/zones/%s/load_balancers/%s", cisId, zoneId, glbId) + _, err := r.client.Delete(rawURL) + if err != nil { + return err + } + return nil +} + +func (r *glbs) CreateGlb(cisId string, zoneId string, glbBody GlbBody) (*Glb, error) { + glbResult := GlbResult{} + rawURL := fmt.Sprintf("/v1/%s/zones/%s/load_balancers", cisId, zoneId) + _, err := r.client.Post(rawURL, &glbBody, &glbResult) + if err != nil { + return nil, err + } + return &glbResult.Glb, nil +} + +func (r *glbs) UpdateGlb(cisId string, zoneId string, glbId string, glbBody GlbBody) (*Glb, error) { + glbResult := GlbResult{} + rawURL := fmt.Sprintf("/v1/%s/zones/%s/load_balancers/%s", cisId, zoneId, glbId) + _, err := r.client.Put(rawURL, &glbBody, &glbResult) + if err != nil { + return nil, err + } + return &glbResult.Glb, nil +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/cis/cisv1/ips.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/cis/cisv1/ips.go new file mode 100644 index 00000000000..af4dcd91034 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/cis/cisv1/ips.go @@ -0,0 +1,42 @@ +package cisv1 + +import ( + "fmt" + "github.com/IBM-Cloud/bluemix-go/client" +) + +type IpsList struct { + Ipv4 []string `json:"ipv4_cidrs"` + Ipv6 []string `json:"ipv6_cidrs"` +} + +type IpsResults struct { + IpList IpsList `json:"result"` + ResultsInfo ResultsCount `json:"result_info"` + Success bool `json:"success"` + Errors []Error `json:"errors"` +} + +type Ips interface { + ListIps() (*IpsList, error) +} + +type ips struct { + client *client.Client +} + +func newIpsAPI(c *client.Client) Ips { + return &ips{ + client: c, + } +} + +func (r *ips) ListIps() (*IpsList, error) { + ipsResults := IpsResults{} + rawURL := fmt.Sprintf("/v1/ips") + _, err := r.client.Get(rawURL, &ipsResults) + if err != nil { + return nil, err + } + return &ipsResults.IpList, err +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/cis/cisv1/monitors.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/cis/cisv1/monitors.go new file mode 100644 index 00000000000..4b3277988ba --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/cis/cisv1/monitors.go @@ -0,0 +1,128 @@ +package cisv1 + +import ( + "fmt" + + "github.com/IBM-Cloud/bluemix-go/client" +) + +type Monitor struct { + Id string `json:"id"` + Path string `json:"path,omitempty"` + Description string `json:"description"` + ExpBody string `json:"expected_body,omitempty"` + ExpCodes string `json:"expected_codes,omitempty"` + MonType string `json:"type,omitempty"` + Method string `json:"method,omitempty"` + Timeout int `json:"timeout,omitempty"` + Retries int `json:"retries,omitempty"` + Interval int `json:"interval,omitempty"` + FollowRedirects bool `json:"follow_redirects,omitempty"` + AllowInsecure bool `json:"allow_insecure,omitempty"` + Port int `json:"port,omitempty"` +} + +type MonitorResults struct { + MonitorList []Monitor `json:"result"` + ResultsInfo ResultsCount `json:"result_info"` + Success bool `json:"success"` + Errors []Error `json:"errors"` +} + +type MonitorResult struct { + Monitor Monitor `json:"result"` + Success bool `json:"success"` + Errors []Error `json:"errors"` + Messages []string `json:"messages"` +} + +type MonitorBody struct { + Description string `json:"description"` + ExpCodes string `json:"expected_codes,omitempty"` + ExpBody string `json:"expected_body,omitempty"` + Path string `json:"path,omitempty"` + MonType string `json:"type,omitempty"` + Method string `json:"method,omitempty"` + Timeout int `json:"timeout,omitempty"` + Retries int `json:"retries,omitempty"` + Interval int `json:"interval,omitempty"` + FollowRedirects bool `json:"follow_redirects,omitempty"` + AllowInsecure bool `json:"allow_insecure,omitempty"` + Port int `json:"port,omitempty"` +} + +type MonitorDelete struct { + Result struct { + MonitorId string + } `json:"result"` + Success bool `json:"success"` + Errors []Error `json:"errors"` + Messages []string `json:"messages"` +} + +type Monitors interface { + ListMonitors(cisId string) ([]Monitor, error) + GetMonitor(cisId string, monitorId string) (*Monitor, error) + CreateMonitor(cisId string, monitorBody MonitorBody) (*Monitor, error) + DeleteMonitor(cisId string, monitorId string) error + UpdateMonitor(cisId string, monitorId string, monitorBody MonitorBody) (*Monitor, error) +} + +type monitors struct { + client *client.Client +} + +func newMonitorAPI(c *client.Client) Monitors { + return &monitors{ + client: c, + } +} + +func (r *monitors) ListMonitors(cisId string) ([]Monitor, error) { + monitorResults := MonitorResults{} + rawURL := fmt.Sprintf("/v1/%s/load_balancers/monitors/", cisId) + _, err := r.client.Get(rawURL, &monitorResults) + if err != nil { + return nil, err + } + return monitorResults.MonitorList, err +} + +func (r *monitors) GetMonitor(cisId string, monitorId string) (*Monitor, error) { + monitorResult := MonitorResult{} + rawURL := fmt.Sprintf("/v1/%s/load_balancers/monitors/%s", cisId, monitorId) + _, err := r.client.Get(rawURL, &monitorResult, nil) + if err != nil { + return nil, err + } + return &monitorResult.Monitor, nil +} + +func (r *monitors) DeleteMonitor(cisId string, monitorId string) error { + rawURL := fmt.Sprintf("/v1/%s/load_balancers/monitors/%s", cisId, monitorId) + _, err := r.client.Delete(rawURL) + if err != nil { + return err + } + return nil +} + +func (r *monitors) CreateMonitor(cisId string, monitorBody MonitorBody) (*Monitor, error) { + monitorResult := MonitorResult{} + rawURL := fmt.Sprintf("/v1/%s/load_balancers/monitors/", cisId) + _, err := r.client.Post(rawURL, &monitorBody, &monitorResult) + if err != nil { + return nil, err + } + return &monitorResult.Monitor, nil +} + +func (r *monitors) UpdateMonitor(cisId string, monitorId string, monitorBody MonitorBody) (*Monitor, error) { + monitorResult := MonitorResult{} + rawURL := fmt.Sprintf("/v1/%s/load_balancers/monitors/%s", cisId, monitorId) + _, err := r.client.Put(rawURL, &monitorBody, &monitorResult) + if err != nil { + return nil, err + } + return &monitorResult.Monitor, nil +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/cis/cisv1/paginate.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/cis/cisv1/paginate.go new file mode 100644 index 00000000000..26e0eb07a3c --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/cis/cisv1/paginate.go @@ -0,0 +1,51 @@ +package cisv1 + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" +) + +type GenericPaginatedResourcesHandler struct { + resourceType reflect.Type +} + +func NewDNSPaginatedResources(resource interface{}) GenericPaginatedResourcesHandler { + return GenericPaginatedResourcesHandler{ + resourceType: reflect.TypeOf(resource), + } +} + +func (pr GenericPaginatedResourcesHandler) Resources(bytes []byte, curURL string) ([]interface{}, string, error) { + var paginatedResources = struct { + ResultInfo struct { + Page int `json:"page"` + TotalPages int `json:"total_pages"` + } `json:"result_info"` + Result json.RawMessage `json:"result"` + }{} + + if err := json.Unmarshal(bytes, &paginatedResources); err != nil { + return nil, "", fmt.Errorf("failed to unmarshal paginated response as json: %s", err) + } + + slicePtr := reflect.New(reflect.SliceOf(pr.resourceType)) + dc := json.NewDecoder(strings.NewReader(string(paginatedResources.Result))) + dc.UseNumber() + if err := dc.Decode(slicePtr.Interface()); err != nil { + return nil, "", fmt.Errorf("failed to decode paginated objects as %T: %s", pr.resourceType, err) + } + slice := reflect.Indirect(slicePtr) + + contents := make([]interface{}, 0, slice.Len()) + for i := 0; i < slice.Len(); i++ { + contents = append(contents, slice.Index(i).Interface()) + } + + if paginatedResources.ResultInfo.Page >= paginatedResources.ResultInfo.TotalPages { + return contents, "", nil + } + + return contents, strings.Replace(curURL, fmt.Sprintf("page=%d", paginatedResources.ResultInfo.Page), fmt.Sprintf("page=%d", paginatedResources.ResultInfo.Page+1), 1), nil +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/cis/cisv1/pools.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/cis/cisv1/pools.go new file mode 100644 index 00000000000..84d2e186d97 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/cis/cisv1/pools.go @@ -0,0 +1,135 @@ +package cisv1 + +import ( + "fmt" + + "github.com/IBM-Cloud/bluemix-go/client" +) + +type Pool struct { + Id string `json:"id"` + Description string `json:"description"` + Name string `json:"name"` + CheckRegions []string `json:"check_regions"` + Enabled bool `json:"enabled"` + MinOrigins int `json:"minimum_origins"` + Monitor string `json:"monitor"` + NotEmail string `json:"notification_email"` + Origins []Origin `json:"origins"` + Health string `json:"health"` + CreatedOn string `json:"created_on"` + ModifiedOn string `json:"modified_on"` +} + +type CheckRegion struct { + Region string `json:"0"` +} + +type Origin struct { + Name string `json:"name"` + Address string `json:"address"` + Enabled bool `json:"enabled"` + Weight int `json:"weight"` + Healthy bool `json:"healthy"` +} + +type PoolResults struct { + PoolList []Pool `json:"result"` + ResultsInfo ResultsCount `json:"result_info"` + Success bool `json:"success"` + Errors []Error `json:"errors"` +} + +type PoolResult struct { + Pool Pool `json:"result"` + Success bool `json:"success"` + Errors []Error `json:"errors"` + Messages []string `json:"messages"` +} + +type PoolBody struct { + Name string `json:"name"` + Description string `json:"description,omitempty"` + Origins []Origin `json:"origins"` + CheckRegions []string `json:"check_regions"` + Enabled bool `json:"enabled"` + MinOrigins int `json:"minimum_origins,omitempty"` + Monitor string `json:"monitor,omitempty"` + NotEmail string `json:"notification_email,omitempty"` +} + +type PoolDelete struct { + Result struct { + PoolId string + } `json:"result"` + Success bool `json:"success"` + Errors []Error `json:"errors"` + Messages []string `json:"messages"` +} + +type Pools interface { + ListPools(cisId string) ([]Pool, error) + GetPool(cisId string, poolId string) (*Pool, error) + CreatePool(cisId string, poolBody PoolBody) (*Pool, error) + DeletePool(cisId string, poolId string) error + UpdatePool(cisId string, poolId string, poolBody PoolBody) (*Pool, error) +} + +type pools struct { + client *client.Client +} + +func newPoolAPI(c *client.Client) Pools { + return &pools{ + client: c, + } +} + +func (r *pools) ListPools(cisId string) ([]Pool, error) { + poolResults := PoolResults{} + rawURL := fmt.Sprintf("/v1/%s/load_balancers/pools/", cisId) + _, err := r.client.Get(rawURL, &poolResults) + if err != nil { + return nil, err + } + return poolResults.PoolList, err +} + +func (r *pools) GetPool(cisId string, poolId string) (*Pool, error) { + poolResult := PoolResult{} + rawURL := fmt.Sprintf("/v1/%s/load_balancers/pools/%s", cisId, poolId) + _, err := r.client.Get(rawURL, &poolResult, nil) + if err != nil { + return nil, err + } + return &poolResult.Pool, nil +} + +func (r *pools) DeletePool(cisId string, poolId string) error { + rawURL := fmt.Sprintf("/v1/%s/load_balancers/pools/%s", cisId, poolId) + _, err := r.client.Delete(rawURL) + if err != nil { + return err + } + return nil +} + +func (r *pools) CreatePool(cisId string, poolBody PoolBody) (*Pool, error) { + poolResult := PoolResult{} + rawURL := fmt.Sprintf("/v1/%s/load_balancers/pools/", cisId) + _, err := r.client.Post(rawURL, &poolBody, &poolResult) + if err != nil { + return nil, err + } + return &poolResult.Pool, nil +} + +func (r *pools) UpdatePool(cisId string, poolId string, poolBody PoolBody) (*Pool, error) { + poolResult := PoolResult{} + rawURL := fmt.Sprintf("/v1/%s/load_balancers/pools/%s", cisId, poolId) + _, err := r.client.Put(rawURL, &poolBody, &poolResult) + if err != nil { + return nil, err + } + return &poolResult.Pool, nil +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/cis/cisv1/ratelimit.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/cis/cisv1/ratelimit.go new file mode 100644 index 00000000000..b91a5a7a543 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/cis/cisv1/ratelimit.go @@ -0,0 +1,162 @@ +package cisv1 + +import ( + "fmt" + + "github.com/IBM-Cloud/bluemix-go/client" +) + +// RateLimitRecord is a policy than can be applied to limit traffic within a customer domain +type RateLimitRecord struct { + ID string `json:"id,omitempty"` + Disabled bool `json:"disabled,omitempty"` + Description string `json:"description,omitempty"` + Bypass []RateLimitByPass `json:"bypass,omitempty"` + Threshold int `json:"threshold"` + Period int `json:"period"` + Correlate *RateLimitCorrelate `json:"correlate,omitempty"` + Action RateLimitAction `json:"action"` + Match RateLimitMatch `json:"match"` +} + +// RateLimitByPass ... +type RateLimitByPass struct { + Name string `json:"name"` + Value string `json:"value"` +} + +// RateLimitCorrelate ... +type RateLimitCorrelate struct { + By string `json:"by"` +} + +// RateLimitAction ... +type RateLimitAction struct { + Mode string `json:"mode"` + Timeout int `json:"timeout,omitempty"` + Response *ActionResponse `json:"response,omitempty"` +} + +// ActionResponse ... +type ActionResponse struct { + ContentType string `json:"content_type,omitempty"` + Body string `json:"body,omitempty"` +} + +// RateLimitMatch ... +type RateLimitMatch struct { + Request MatchRequest `json:"request"` + Response MatchResponse `json:"response"` +} + +// MatchRequest ... +type MatchRequest struct { + Methods []string `json:"methods,omitempty"` + Schemes []string `json:"schemes,omitempty"` + URL string `json:"url,omitempty"` +} + +// MatchResponse ... +type MatchResponse struct { + Statuses []int `json:"status,omitempty"` + OriginTraffic *bool `json:"origin_traffic,omitempty"` // api defaults to true so we need an explicit empty value + Headers []MatchResponseHeader `json:"headers,omitempty"` +} + +// MatchResponseHeader ... +type MatchResponseHeader struct { + Name string `json:"name,omitempty"` + Op string `json:"op,omitempty"` + Value string `json:"value,omitempty"` +} + +//RateLimitResult ... +type RateLimitResult struct { + RateLimit RateLimitRecord `json:"result"` + Success bool `json:"success"` + Errors []Error `json:"errors"` + Messages []string `json:"messages"` +} + +//RateLimitResults ... +type RateLimitResults struct { + RateLimitList []RateLimitRecord `json:"result"` + ResultsInfo ResultsCount `json:"result_info"` + Success bool `json:"success"` + Errors []Error `json:"errors"` +} + +//RateLimit ... +type RateLimit interface { + ListRateLimit(cisID string, zoneID string) ([]RateLimitRecord, error) + GetRateLimit(cisID string, zoneID string, rateLimitID string) (*RateLimitRecord, error) + CreateRateLimit(cisID string, zoneID string, rateLimitBody RateLimitRecord) (*RateLimitRecord, error) + DeleteRateLimit(cisID string, zoneID string, rateLimitID string) error + UpdateRateLimit(cisID string, zoneID string, rateLimitID string, rateLimitBody RateLimitRecord) (*RateLimitRecord, error) +} + +//RateLimit ... +type ratelimit struct { + client *client.Client +} + +func newRateLimitAPI(c *client.Client) RateLimit { + return &ratelimit{ + client: c, + } +} + +func (r *ratelimit) ListRateLimit(cisID string, zoneID string) ([]RateLimitRecord, error) { + rateLimitResults := RateLimitResults{} + + var rawURL string + rawURL = fmt.Sprintf("/v1/%s/zones/%s/rate_limits", cisID, zoneID) + _, err := r.client.Get(rawURL, &rateLimitResults, nil) + if err != nil { + return nil, err + } + return rateLimitResults.RateLimitList, err +} + +func (r *ratelimit) GetRateLimit(cisID string, zoneID string, rateLimitID string) (*RateLimitRecord, error) { + rateLimitResult := RateLimitResult{} + var rawURL string + rawURL = fmt.Sprintf("/v1/%s/zones/%s/rate_limits/%s", cisID, zoneID, rateLimitID) + _, err := r.client.Get(rawURL, &rateLimitResult, nil) + if err != nil { + return nil, err + } + return &rateLimitResult.RateLimit, nil +} + +func (r *ratelimit) DeleteRateLimit(cisID string, zoneID string, rateLimitID string) error { + var rawURL string + rawURL = fmt.Sprintf("/v1/%s/zones/%s/rate_limits/%s", cisID, zoneID, rateLimitID) + _, err := r.client.Delete(rawURL) + if err != nil { + return err + } + return nil +} + +func (r *ratelimit) CreateRateLimit(cisID string, zoneID string, rateLimitBody RateLimitRecord) (*RateLimitRecord, error) { + rateLimitResult := RateLimitResult{} + var rawURL string + rawURL = fmt.Sprintf("/v1/%s/zones/%s/rate_limits", cisID, zoneID) + _, err := r.client.Post(rawURL, &rateLimitBody, &rateLimitResult) + if err != nil { + return nil, err + } + return &rateLimitResult.RateLimit, nil +} + +func (r *ratelimit) UpdateRateLimit(cisID string, zoneID string, rateLimitID string, rateLimitBody RateLimitRecord) (*RateLimitRecord, error) { + rateLimitResult := RateLimitResult{} + var rawURL string + rawURL = fmt.Sprintf("/v1/%s/zones/%s/rate_limits/%s", cisID, zoneID, rateLimitID) + _, err := r.client.Put(rawURL, &rateLimitBody, &rateLimitResult) + if err != nil { + return nil, err + } + return &rateLimitResult.RateLimit, nil +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/cis/cisv1/settings.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/cis/cisv1/settings.go new file mode 100644 index 00000000000..2b17298f1e1 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/cis/cisv1/settings.go @@ -0,0 +1,60 @@ +package cisv1 + +import ( + "fmt" + "github.com/IBM-Cloud/bluemix-go/client" +) + +type SettingsResult struct { + Result SettingsResObj `json:"result"` + Success bool `json:"success"` + Errors []Error `json:"errors"` + Messages []string `json:"messages"` +} + +type SettingsResObj struct { + Id string `json:"id"` + Value string `json:"value"` + Editable bool `json:"editable"` + ModifiedDate string `json:"modified_on"` + CertificateStatus string `json:"certificate_status"` +} + +type SettingsBody struct { + Value string `json:"value"` +} + +type Settings interface { + GetSetting(cisId string, zoneId string, setting string) (*SettingsResObj, error) + UpdateSetting(cisId string, zoneId string, setting string, settingsBody SettingsBody) (*SettingsResObj, error) +} + +type settings struct { + client *client.Client +} + +func newSettingsAPI(c *client.Client) Settings { + return &settings{ + client: c, + } +} + +func (r *settings) GetSetting(cisId string, zoneId string, setting string) (*SettingsResObj, error) { + settingsResult := SettingsResult{} + rawURL := fmt.Sprintf("/v1/%s/zones/%s/settings/%s", cisId, zoneId, setting) + _, err := r.client.Get(rawURL, &settingsResult) + if err != nil { + return nil, err + } + return &settingsResult.Result, nil +} + +func (r *settings) UpdateSetting(cisId string, zoneId string, setting string, settingsBody SettingsBody) (*SettingsResObj, error) { + settingsResult := SettingsResult{} + rawURL := fmt.Sprintf("/v1/%s/zones/%s/settings/%s", cisId, zoneId, setting) + _, err := r.client.Patch(rawURL, &settingsBody, &settingsResult) + if err != nil { + return nil, err + } + return &settingsResult.Result, nil +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/cis/cisv1/zones.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/cis/cisv1/zones.go new file mode 100644 index 00000000000..3515d9c053c --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/cis/cisv1/zones.go @@ -0,0 +1,116 @@ +package cisv1 + +import ( + "fmt" + + "github.com/IBM-Cloud/bluemix-go/client" +) + +type ResultsCount struct { + Count int `json:"count"` +} + +type Error struct { + Code int `json:"code"` + Msg string `json:"message"` +} + +type NameServer struct { + NameS int64 `json:"0"` +} + +type Zone struct { + Id string `json:"id"` + Name string `json:"name"` + Status string `json:"status"` + Paused bool `json:"paused"` + NameServers []string `json:"name_servers"` + OriginalNameServer []string `json:"original_name_servers"` +} + +type ZoneResults struct { + ZoneList []Zone `json:"result"` + ResultsInfo ResultsCount `json:"result_info"` + Success bool `json:"success"` + Errors []Error `json:"errors"` +} + +type ZoneResult struct { + Zone Zone `json:"result"` + Success bool `json:"success"` + Errors []Error `json:"errors"` + Messages []string `json:"messages"` +} + +type ZoneBody struct { + Name string `json:"name"` +} + +type ZoneDelete struct { + Result struct { + ZoneId string + } `json:"result"` + Success bool `json:"success"` + Errors []Error `json:"errors"` + Messages []string `json:"messages"` +} + +type Zones interface { + ListZones(cisId string) ([]Zone, error) + GetZone(cisId string, zoneId string) (*Zone, error) + CreateZone(cisId string, zoneBody ZoneBody) (*Zone, error) + DeleteZone(cisId string, zoneId string) error +} + +type zones struct { + client *client.Client +} + +func newZoneAPI(c *client.Client) Zones { + return &zones{ + client: c, + } +} + +func (r *zones) ListZones(cisId string) ([]Zone, error) { + zoneResults := ZoneResults{} + rawURL := fmt.Sprintf("/v1/%s/zones?page=1", cisId) + if _, err := r.client.GetPaginated(rawURL, NewDNSPaginatedResources(Zone{}), func(resource interface{}) bool { + if zone, ok := resource.(Zone); ok { + zoneResults.ZoneList = append(zoneResults.ZoneList, zone) + return true + } + return false + }); err != nil { + return nil, fmt.Errorf("failed to list paginated dns records: %s", err) + } + return zoneResults.ZoneList, nil +} +func (r *zones) GetZone(cisId string, zoneId string) (*Zone, error) { + zoneResult := ZoneResult{} + rawURL := fmt.Sprintf("/v1/%s/zones/%s", cisId, zoneId) + _, err := r.client.Get(rawURL, &zoneResult, nil) + if err != nil { + return nil, err + } + return &zoneResult.Zone, nil +} + +func (r *zones) DeleteZone(cisId string, zoneId string) error { + rawURL := fmt.Sprintf("/v1/%s/zones/%s", cisId, zoneId) + _, err := r.client.Delete(rawURL) + if err != nil { + return err + } + return nil +} + +func (r *zones) CreateZone(cisId string, zoneBody ZoneBody) (*Zone, error) { + zoneResult := ZoneResult{} + rawURL := fmt.Sprintf("/v1/%s/zones/", cisId) + _, err := r.client.Post(rawURL, &zoneBody, &zoneResult) + if err != nil { + return nil, err + } + return &zoneResult.Zone, nil +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv1/addons.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv1/addons.go new file mode 100644 index 00000000000..e97c9a16fe8 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv1/addons.go @@ -0,0 +1,77 @@ +package containerv1 + +import ( + "fmt" + + "github.com/IBM-Cloud/bluemix-go/client" +) + +//AddOn ... +type AddOn struct { + AllowedUpgradeVersion []string `json:"allowed_upgrade_versions,omitempty"` + Deprecated bool `json:"deprecated"` + HealthState string `json:"healthState,omitempty"` + HealthStatus string `json:"healthStatus,omitempty"` + MinKubeVersion string `json:"minKubeVersion,omitempty"` + MinOCPVersion string `json:"minOCPVersion,omitempty"` + Name string `json:"name"` + Options interface{} `json:"options,omitempty"` + SupportedKubeRange string `json:"supportedKubeRange,omitempty"` + TargetVersion string `json:"targetVersion,omitempty"` + Version string `json:"version,omitempty"` + VlanSpanningRequired bool `json:"vlan_spanning_required"` +} + +//GetAddOns ... +type GetAddOns struct { + AddonsList []AddOn `json:"addons"` +} + +//ConfigureAddOns ... +type ConfigureAddOns struct { + AddonsList []AddOn `json:"addons"` + Enable bool `json:"enable"` + Update bool `json:"update"` +} + +// AddOnsResponse ... +type AddOnsResponse struct { + MissingDeps interface{} `json:"missingDeps,omitempty"` + OrphanedAddons interface{} `json:"orphanedAddons,omitempty"` +} + +//AddOns ... +type AddOns interface { + GetAddons(clusterName string, target ClusterTargetHeader) ([]AddOn, error) + ConfigureAddons(clusterName string, params *ConfigureAddOns, target ClusterTargetHeader) (AddOnsResponse, error) +} + +type addons struct { + client *client.Client +} + +func newAddOnsAPI(c *client.Client) AddOns { + return &addons{ + client: c, + } +} + +//GetAddon ... +func (r *addons) GetAddons(clusterName string, target ClusterTargetHeader) ([]AddOn, error) { + rawURL := fmt.Sprintf("/v1/clusters/%s/addons", clusterName) + addonsList := GetAddOns{} + _, err := r.client.Get(rawURL, &addonsList.AddonsList, target.ToMap()) + if err != nil { + return addonsList.AddonsList, err + } + + return addonsList.AddonsList, err +} + +// ConfigureAddon ... +func (r *addons) ConfigureAddons(clusterName string, params *ConfigureAddOns, target ClusterTargetHeader) (AddOnsResponse, error) { + rawURL := fmt.Sprintf("/v1/clusters/%s/addons", clusterName) + resp := AddOnsResponse{} + _, err := r.client.Patch(rawURL, params, &resp, target.ToMap()) + return resp, err +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv1/alb.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv1/alb.go new file mode 100644 index 00000000000..abe8662ffae --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv1/alb.go @@ -0,0 +1,173 @@ +package containerv1 + +import ( + "fmt" + "github.com/IBM-Cloud/bluemix-go/client" +) + +type ClusterALB struct { + ID string `json:"id"` + Region string `json:"region"` + DataCenter string `json:"dataCenter"` + IsPaid bool `json:"isPaid"` + IngressHostname string `json:"ingressHostname"` + IngressSecretName string `json:"ingressSecretName"` + ALBs []ALBConfig `json:"alb"` +} + +// ALBConfig config for alb configuration +// swagger:model +type ALBConfig struct { + ALBID string `json:"albID" description:"The ALB id"` + ClusterID string `json:"clusterID"` + Name string `json:"name"` + ALBType string `json:"albType"` + Enable bool `json:"enable" description:"Enable (true) or disable(false) ALB"` + State string `json:"state"` + CreatedDate string `json:"createdDate"` + NumOfInstances string `json:"numOfInstances" description:"Desired number of ALB replicas"` + Resize bool `json:"resize" description:"Indicate whether resizing should be done"` + ALBIP string `json:"albip" description:"BYOIP VIP to use for ALB. Currently supported only for private ALB"` + Zone string `json:"zone" description:"Zone to use for adding ALB. This is indicative of the AZ in which ALB will be deployed"` + DisableDeployment bool `json:"disableDeployment" description:"Indicate whether to disable deployment only on disable alb"` +} + +// ClusterALBSecret albsecret related information for cluster +// swagger:model +type ClusterALBSecret struct { + ID string `json:"id"` + Region string `json:"region"` + DataCenter string `json:"dataCenter"` + IsPaid bool `json:"isPaid"` + ALBSecrets []ALBSecretConfig `json:"albSecrets" description:"All the ALB secrets created in this cluster"` +} + +// ALBSecretConfig config for alb-secret configuration +// swagger:model +type ALBSecretConfig struct { + SecretName string `json:"secretName" description:"Name of the ALB secret"` + ClusterID string `json:"clusterID"` + DomainName string `json:"domainName" description:"Domain name of the certficate"` + CloudCertInstanceID string `json:"cloudCertInstanceID" description:"Cloud Cert instance ID from which certficate is downloaded"` + ClusterCrn string `json:"clusterCrn"` + CertCrn string `json:"certCrn" description:"Unique CRN of the certficate which can be located in cloud cert instance"` + IssuerName string `json:"issuerName" description:"Issuer name of the certficate"` + ExpiresOn string `json:"expiresOn" description:"Expiry date of the certficate"` + State string `json:"state" description:"State of ALB secret"` +} + +// ALBSecretsPerCRN ... +type ALBSecretsPerCRN struct { + ALBSecrets []string `json:"albsecrets" description:"ALB secrets correponding to a CRN"` +} + +//Clusters interface +type Albs interface { + ListClusterALBs(clusterNameOrID string, target ClusterTargetHeader) ([]ALBConfig, error) + GetALB(albID string, target ClusterTargetHeader) (ALBConfig, error) + ConfigureALB(albID string, config ALBConfig, disableDeployment bool, target ClusterTargetHeader) error + RemoveALB(albID string, target ClusterTargetHeader) error + DeployALBCert(config ALBSecretConfig, target ClusterTargetHeader) error + UpdateALBCert(config ALBSecretConfig, target ClusterTargetHeader) error + RemoveALBCertBySecretName(clusterID string, secretName string, target ClusterTargetHeader) error + RemoveALBCertByCertCRN(clusterID string, certCRN string, target ClusterTargetHeader) error + GetClusterALBCertBySecretName(clusterID string, secretName string, target ClusterTargetHeader) (ALBSecretConfig, error) + GetClusterALBCertByCertCRN(clusterID string, certCRN string, target ClusterTargetHeader) (ALBSecretConfig, error) + ListALBCerts(clusterID string, target ClusterTargetHeader) ([]ALBSecretConfig, error) + GetALBTypes(target ClusterTargetHeader) ([]string, error) +} + +type alb struct { + client *client.Client +} + +func newAlbAPI(c *client.Client) Albs { + return &alb{ + client: c, + } +} + +// ListClusterALBs returns the list of albs available for cluster +func (r *alb) ListClusterALBs(clusterNameOrID string, target ClusterTargetHeader) ([]ALBConfig, error) { + var successV ClusterALB + rawURL := fmt.Sprintf("/v1/alb/clusters/%s", clusterNameOrID) + _, err := r.client.Get(rawURL, &successV, target.ToMap()) + return successV.ALBs, err +} + +// GetALB returns details about particular alb +func (r *alb) GetALB(albID string, target ClusterTargetHeader) (ALBConfig, error) { + var successV ALBConfig + _, err := r.client.Get(fmt.Sprintf("/v1/alb/albs/%s", albID), &successV, target.ToMap()) + return successV, err +} + +// ConfigureALB enables or disables alb for cluster +func (r *alb) ConfigureALB(albID string, config ALBConfig, disableDeployment bool, target ClusterTargetHeader) error { + var successV interface{} + if config.Enable { + _, err := r.client.Post("/v1/alb/albs", config, &successV, target.ToMap()) + return err + } + _, err := r.client.Delete(fmt.Sprintf("/v1/alb/albs/%s?disableDeployment=%t", albID, disableDeployment), target.ToMap()) + return err +} + +// RemoveALB removes the alb +func (r *alb) RemoveALB(albID string, target ClusterTargetHeader) error { + _, err := r.client.Delete(fmt.Sprintf("/v1/alb/albs/%s", albID), target.ToMap()) + return err +} + +// DeployALBCert deploys alb-cert +func (r *alb) DeployALBCert(config ALBSecretConfig, target ClusterTargetHeader) error { + var successV interface{} + _, err := r.client.Post("/v1/alb/albsecrets", config, &successV, target.ToMap()) + return err +} + +// UpdateALBCert updates alb-cert +func (r *alb) UpdateALBCert(config ALBSecretConfig, target ClusterTargetHeader) error { + _, err := r.client.Put("/v1/alb/albsecrets", config, nil, target.ToMap()) + return err +} + +// RemoveALBCertBySecretName removes the alb-cert +func (r *alb) RemoveALBCertBySecretName(clusterID string, secretName string, target ClusterTargetHeader) error { + _, err := r.client.Delete(fmt.Sprintf("/v1/alb/clusters/%s/albsecrets?albSecretName=%s", clusterID, secretName), target.ToMap()) + return err +} + +// RemoveALBCertByCertCRN removes the alb-cert +func (r *alb) RemoveALBCertByCertCRN(clusterID string, certCRN string, target ClusterTargetHeader) error { + _, err := r.client.Delete(fmt.Sprintf("/v1/alb/clusters/%s/albsecrets?certCrn=%s", clusterID, certCRN), target.ToMap()) + return err +} + +// GetClusterALBCertBySecretName returns details about specified alb cert for given secretName +func (r *alb) GetClusterALBCertBySecretName(clusterID string, secretName string, target ClusterTargetHeader) (ALBSecretConfig, error) { + var successV ALBSecretConfig + _, err := r.client.Get(fmt.Sprintf("/v1/alb/clusters/%s/albsecrets?albSecretName=%s", clusterID, secretName), &successV, target.ToMap()) + return successV, err +} + +// GetClusterALBCertByCertCrn returns details about specified alb cert for given certCRN +func (r *alb) GetClusterALBCertByCertCRN(clusterID string, certCRN string, target ClusterTargetHeader) (ALBSecretConfig, error) { + var successV ALBSecretConfig + _, err := r.client.Get(fmt.Sprintf("/v1/alb/clusters/%s/albsecrets?certCrn=%s", clusterID, certCRN), &successV, target.ToMap()) + return successV, err +} + +// ListALBCerts for cluster... +func (r *alb) ListALBCerts(clusterID string, target ClusterTargetHeader) ([]ALBSecretConfig, error) { + var successV ClusterALBSecret + _, err := r.client.Get(fmt.Sprintf("/v1/alb/clusters/%s/albsecrets", clusterID), &successV, target.ToMap()) + return successV.ALBSecrets, err +} + +// GetALBTypes returns list of available alb types +func (r *alb) GetALBTypes(target ClusterTargetHeader) ([]string, error) { + var successV []string + _, err := r.client.Get("/v1/alb/albtypes", &successV, target.ToMap()) + return successV, err +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv1/api_key.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv1/api_key.go new file mode 100644 index 00000000000..710f25cd283 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv1/api_key.go @@ -0,0 +1,62 @@ +package containerv1 + +import ( + "fmt" + + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/helpers" + "github.com/IBM-Cloud/bluemix-go/rest" +) + +type ApiKeyInfo struct { + ID string + Name string + Email string +} + +// Apikeys ... +type Apikeys interface { + GetApiKeyInfo(clusterID string, target ClusterTargetHeader) (ApiKeyInfo, error) + ResetApiKey(target ClusterTargetHeader) error +} + +type apikeys struct { + client *client.Client +} + +func newApiKeyAPI(c *client.Client) Apikeys { + return &apikeys{ + client: c, + } +} + +//GetApiKeyInfo ... +func (r *apikeys) GetApiKeyInfo(cluster string, target ClusterTargetHeader) (ApiKeyInfo, error) { + retVal := ApiKeyInfo{} + req := rest.GetRequest(helpers.GetFullURL(*r.client.Config.Endpoint, fmt.Sprintf("/v1/logging/%s/clusterkeyowner", cluster))) + + for key, value := range target.ToMap() { + req.Set(key, value) + } + + _, err := r.client.SendRequest(req, &retVal) + if err != nil { + return retVal, err + } + return retVal, err +} + +//ResetApiKey ... +func (r *apikeys) ResetApiKey(target ClusterTargetHeader) error { + req := rest.PostRequest(helpers.GetFullURL(*r.client.Config.Endpoint, "/v1/keys")) + + for key, value := range target.ToMap() { + req.Set(key, value) + } + + _, err := r.client.SendRequest(req, nil) + if err != nil { + return err + } + return err +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv1/api_service.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv1/api_service.go new file mode 100644 index 00000000000..5b457c46c8f --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv1/api_service.go @@ -0,0 +1,128 @@ +package containerv1 + +import ( + gohttp "net/http" + + bluemix "github.com/IBM-Cloud/bluemix-go" + "github.com/IBM-Cloud/bluemix-go/authentication" + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/http" + "github.com/IBM-Cloud/bluemix-go/rest" + "github.com/IBM-Cloud/bluemix-go/session" +) + +//ErrCodeAPICreation ... +const ErrCodeAPICreation = "APICreationError" + +//ContainerServiceAPI is the Aramda K8s client ... +type ContainerServiceAPI interface { + Albs() Albs + Clusters() Clusters + Workers() Workers + WorkerPools() WorkerPool + WebHooks() Webhooks + Subnets() Subnets + KubeVersions() KubeVersions + Vlans() Vlans + Kms() Kms + AddOns() AddOns + Apikeys() Apikeys +} + +//ContainerService holds the client +type csService struct { + *client.Client +} + +//New ... +func New(sess *session.Session) (ContainerServiceAPI, error) { + config := sess.Config.Copy() + err := config.ValidateConfigForService(bluemix.ContainerService) + if err != nil { + return nil, err + } + if config.HTTPClient == nil { + config.HTTPClient = http.NewHTTPClient(config) + } + tokenRefreher, err := authentication.NewIAMAuthRepository(config, &rest.Client{ + DefaultHeader: gohttp.Header{ + "User-Agent": []string{http.UserAgent()}, + }, + HTTPClient: config.HTTPClient, + }) + if err != nil { + return nil, err + } + if config.IAMAccessToken == "" { + err := authentication.PopulateTokens(tokenRefreher, config) + if err != nil { + return nil, err + } + } + if config.Endpoint == nil { + ep, err := config.EndpointLocator.ContainerEndpoint() + if err != nil { + return nil, err + } + config.Endpoint = &ep + } + + return &csService{ + Client: client.New(config, bluemix.ContainerService, tokenRefreher), + }, nil +} + +//Albs implement albs API +func (c *csService) Albs() Albs { + return newAlbAPI(c.Client) +} + +//Clusters implements Clusters API +func (c *csService) Clusters() Clusters { + return newClusterAPI(c.Client) +} + +//Workers implements Cluster Workers API +func (c *csService) Workers() Workers { + return newWorkerAPI(c.Client) +} + +//WorkerPools implements Cluster WorkerPools API +func (c *csService) WorkerPools() WorkerPool { + return newWorkerPoolAPI(c.Client) +} + +//Subnets implements Cluster Subnets API +func (c *csService) Subnets() Subnets { + return newSubnetAPI(c.Client) +} + +//Webhooks implements Cluster WebHooks API +func (c *csService) WebHooks() Webhooks { + return newWebhookAPI(c.Client) +} + +//KubeVersions implements Cluster WebHooks API +func (c *csService) KubeVersions() KubeVersions { + return newKubeVersionAPI(c.Client) +} + +//Vlans implements DC Cluster Vlan API +func (c *csService) Vlans() Vlans { + return newVlanAPI(c.Client) +} + +//Kms implements Cluster Kms API +func (c *csService) Kms() Kms { + return newKmsAPI(c.Client) +} + +//AddOns implements Cluster Add Ons +func (c *csService) AddOns() AddOns { + return newAddOnsAPI(c.Client) +} + +//AddOns implements Cluster Add Ons +func (c *csService) Apikeys() Apikeys { + return newApiKeyAPI(c.Client) +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv1/clusters.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv1/clusters.go new file mode 100644 index 00000000000..0a43042c423 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv1/clusters.go @@ -0,0 +1,1007 @@ +package containerv1 + +import ( + "crypto/sha256" + "errors" + "fmt" + "html/template" + "io/ioutil" + "os" + "path" + "path/filepath" + "strings" + + "gopkg.in/yaml.v2" + + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/helpers" + "github.com/IBM-Cloud/bluemix-go/trace" +) + +//ClusterInfo ... +type ClusterInfo struct { + CreatedDate string `json:"createdDate"` + DataCenter string `json:"dataCenter"` + ID string `json:"id"` + IngressHostname string `json:"ingressHostname"` + IngressSecretName string `json:"ingressSecretName"` + Location string `json:"location"` + MasterKubeVersion string `json:"masterKubeVersion"` + ModifiedDate string `json:"modifiedDate"` + Name string `json:"name"` + Region string `json:"region"` + ResourceGroupID string `json:"resourceGroup"` + ResourceGroupName string `json:"resourceGroupName"` + ServerURL string `json:"serverURL"` + MasterURL string `json:"masterURL"` // vpc cluster serverURL is empty + State string `json:"state"` + OrgID string `json:"logOrg"` + OrgName string `json:"logOrgName"` + SpaceID string `json:"logSpace"` + SpaceName string `json:"logSpaceName"` + IsPaid bool `json:"isPaid"` + IsTrusted bool `json:"isTrusted"` + WorkerCount int `json:"workerCount"` + Vlans []Vlan `json:"vlans"` + Addons []Addon `json:"addons"` + OwnerEmail string `json:"ownerEmail"` + APIUser string `json:"apiUser"` + MonitoringURL string `json:"monitoringURL"` + DisableAutoUpdate bool `json:"disableAutoUpdate"` + EtcdPort string `json:"etcdPort"` + MasterStatus string `json:"masterStatus"` + MasterStatusModifiedDate string `json:"masterStatusModifiedDate"` + KeyProtectEnabled bool `json:"keyProtectEnabled"` + WorkerZones []string `json:"workerZones"` + PullSecretApplied bool `json:"pullSecretApplied"` + CRN string `json:"crn"` + PrivateServiceEndpointEnabled bool `json:"privateServiceEndpointEnabled"` + PrivateServiceEndpointURL string `json:"privateServiceEndpointURL"` + PublicServiceEndpointEnabled bool `json:"publicServiceEndpointEnabled"` + PublicServiceEndpointURL string `json:"publicServiceEndpointURL"` + Type string `json:"type"` + Provider string `json:"provider"` + PodSubnet string `json:"podSubnet"` + ServiceSubnet string `json:"serviceSubnet"` +} + +// ClusterUpdateParam ... +type ClusterUpdateParam struct { + Action string `json:"action"` + Force bool `json:"force"` + Version string `json:"version"` +} + +//ClusterKeyInfo ... +type ClusterKeyInfo struct { + AdminKey string `json:"admin-key"` + Admin string `json:"admin"` + ClusterCACertificate string `json:"cluster-ca-certificate"` + Host string `json:"host"` + Token string `json:"idtoken"` + FilePath string `json:"filepath"` +} + +//ConfigFileOpenshift Openshift .yml Structure +type ConfigFileOpenshift struct { + Clusters []struct { + Name string `yaml:"name"` + Cluster struct { + Server string `yaml:"server"` + } `yaml:"cluster"` + } `yaml:"clusters"` + Users []struct { + Name string `yaml:"name"` + User struct { + Token string `yaml:"token"` + } + } +} + +// ConfigFile ... +type ConfigFile struct { + Clusters []struct { + Name string `yaml:"name"` + Cluster struct { + Server string `yaml:"server"` + } `yaml:"cluster"` + } `yaml:"clusters"` + Users []struct { + Name string `yaml:"name"` + User struct { + AuthProvider struct { + Config struct { + IDToken string `yaml:"id-token"` + } `yaml:"config"` + } `yaml:"auth-provider"` + } `yaml:"user"` + } `yaml:"users"` +} + +//Vlan ... +type Vlan struct { + ID string `json:"id"` + Subnets []struct { + Cidr string `json:"cidr"` + ID string `json:"id"` + Ips []string `json:"ips"` + IsByOIP bool `json:"is_byoip"` + IsPublic bool `json:"is_public"` + } + Zone string `json:"zone"` + Region string `json:"region"` +} + +//Addon ... +type Addon struct { + Name string `json:"name"` + Enabled bool `json:"enabled"` +} + +//ClusterCreateResponse ... +type ClusterCreateResponse struct { + ID string +} + +// MasterAPIServer describes the state to put the Master API server into +// swagger:model +type MasterAPIServer struct { + Action string `json:"action" binding:"required" description:"The action to perform on the API Server"` +} + +//ClusterTargetHeader ... +type ClusterTargetHeader struct { + OrgID string + SpaceID string + AccountID string + Region string + ResourceGroup string +} + +const ( + orgIDHeader = "X-Auth-Resource-Org" + spaceIDHeader = "X-Auth-Resource-Space" + accountIDHeader = "X-Auth-Resource-Account" + slUserNameHeader = "X-Auth-Softlayer-Username" + slAPIKeyHeader = "X-Auth-Softlayer-APIKey" + regionHeader = "X-Region" + resourceGroupHeader = "X-Auth-Resource-Group" +) + +//ToMap ... +func (c ClusterTargetHeader) ToMap() map[string]string { + m := make(map[string]string, 3) + m[orgIDHeader] = c.OrgID + m[spaceIDHeader] = c.SpaceID + m[accountIDHeader] = c.AccountID + m[regionHeader] = c.Region + m[resourceGroupHeader] = c.ResourceGroup + return m +} + +//ClusterSoftlayerHeader ... +type ClusterSoftlayerHeader struct { + SoftLayerUsername string + SoftLayerAPIKey string +} + +//ToMap ... +func (c ClusterSoftlayerHeader) ToMap() map[string]string { + m := make(map[string]string, 2) + m[slAPIKeyHeader] = c.SoftLayerAPIKey + m[slUserNameHeader] = c.SoftLayerUsername + return m +} + +//ClusterCreateRequest ... +type ClusterCreateRequest struct { + GatewayEnabled bool `json:"GatewayEnabled" description:"true for gateway enabled cluster"` + Datacenter string `json:"dataCenter" description:"The worker's data center"` + Isolation string `json:"isolation" description:"Can be 'public' or 'private'"` + MachineType string `json:"machineType" description:"The worker's machine type"` + Name string `json:"name" binding:"required" description:"The cluster's name"` + PrivateVlan string `json:"privateVlan" description:"The worker's private vlan"` + PublicVlan string `json:"publicVlan" description:"The worker's public vlan"` + WorkerNum int `json:"workerNum,omitempty" binding:"required" description:"The number of workers"` + NoSubnet bool `json:"noSubnet" description:"Indicate whether portable subnet should be ordered for user"` + MasterVersion string `json:"masterVersion,omitempty" description:"Desired version of the requested master"` + Prefix string `json:"prefix,omitempty" description:"hostname prefix for new workers"` + DiskEncryption bool `json:"diskEncryption" description:"disable encryption on a worker"` + PrivateEndpointEnabled bool `json:"privateSeviceEndpoint"` + PublicEndpointEnabled bool `json:"publicServiceEndpoint"` + DisableAutoUpdate bool `json:"disableAutoUpdate"` + DefaultWorkerPoolName string `json:"defaultWorkerPoolName" description:"The name of default workerpool"` + PodSubnet string `json:"podSubnet"` + ServiceSubnet string `json:"serviceSubnet"` + DefaultWorkerPoolEntitlement string `json:"defaultWorkerPoolEntitlement" description:"Additional licence/entitlement for the default worker pool"` +} + +// ServiceBindRequest ... +type ServiceBindRequest struct { + ClusterNameOrID string + ServiceInstanceNameOrID string `json:"serviceInstanceGUID" binding:"required"` + NamespaceID string `json:"namespaceID" binding:"required"` + Role string `json:"role"` + ServiceKeyJSON string `json:"serviceKeyJSON"` + ServiceKeyGUID string `json:"serviceKeyGUID"` +} + +// ServiceBindResponse ... +type ServiceBindResponse struct { + ServiceInstanceGUID string `json:"serviceInstanceGUID" binding:"required"` + NamespaceID string `json:"namespaceID" binding:"required"` + SecretName string `json:"secretName"` + Binding string `json:"binding"` +} + +//BoundService ... +type BoundService struct { + ServiceName string `json:"servicename"` + ServiceID string `json:"serviceid"` + ServiceKeyName string `json:"servicekeyname"` + Namespace string `json:"namespace"` +} + +// UpdateWorkerCommand .... +// swagger:model +type UpdateWorkerCommand struct { + Action string `json:"action" binding:"required" description:"Action to perform of the worker"` + // Setting force flag to true will ignore if the master is unavailable during 'os_reboot" and 'reload' action + Force bool `json:"force,omitempty"` +} + +//BoundServices .. +type BoundServices []BoundService + +//Clusters interface +type Clusters interface { + Create(params ClusterCreateRequest, target ClusterTargetHeader) (ClusterCreateResponse, error) + List(target ClusterTargetHeader) ([]ClusterInfo, error) + Update(name string, params ClusterUpdateParam, target ClusterTargetHeader) error + UpdateClusterWorker(clusterNameOrID string, workerID string, params UpdateWorkerCommand, target ClusterTargetHeader) error + UpdateClusterWorkers(clusterNameOrID string, workerIDs []string, params UpdateWorkerCommand, target ClusterTargetHeader) error + Delete(name string, target ClusterTargetHeader, deleteDependencies ...bool) error + Find(name string, target ClusterTargetHeader) (ClusterInfo, error) + FindWithOutShowResources(name string, target ClusterTargetHeader) (ClusterInfo, error) + FindWithOutShowResourcesCompatible(name string, target ClusterTargetHeader) (ClusterInfo, error) + GetClusterConfig(name, homeDir string, admin bool, target ClusterTargetHeader) (string, error) + GetClusterConfigDetail(name, homeDir string, admin bool, target ClusterTargetHeader) (ClusterKeyInfo, error) + StoreConfig(name, baseDir string, admin bool, createCalicoConfig bool, target ClusterTargetHeader) (string, string, error) + StoreConfigDetail(name, baseDir string, admin bool, createCalicoConfig bool, target ClusterTargetHeader) (string, ClusterKeyInfo, error) + UnsetCredentials(target ClusterTargetHeader) error + SetCredentials(slUsername, slAPIKey string, target ClusterTargetHeader) error + BindService(params ServiceBindRequest, target ClusterTargetHeader) (ServiceBindResponse, error) + UnBindService(clusterNameOrID, namespaceID, serviceInstanceGUID string, target ClusterTargetHeader) error + ListServicesBoundToCluster(clusterNameOrID, namespace string, target ClusterTargetHeader) (BoundServices, error) + FindServiceBoundToCluster(clusterNameOrID, serviceName, namespace string, target ClusterTargetHeader) (BoundService, error) + RefreshAPIServers(clusterNameOrID string, target ClusterTargetHeader) error + FetchOCTokenForKubeConfig(kubeConfig []byte, clusterInfo *ClusterInfo, skipSSLVerification bool) ([]byte, error) +} + +type clusters struct { + client *client.Client +} + +func newClusterAPI(c *client.Client) Clusters { + return &clusters{ + client: c, + } +} + +func (r *ClusterInfo) IsStagingSatelliteCluster() bool { + + return strings.Index(r.ServerURL, "stg") > 0 && r.Provider == "satellite" +} + +//Create ... +func (r *clusters) Create(params ClusterCreateRequest, target ClusterTargetHeader) (ClusterCreateResponse, error) { + var cluster ClusterCreateResponse + _, err := r.client.Post("/v1/clusters", params, &cluster, target.ToMap()) + return cluster, err +} + +//Update ... +func (r *clusters) Update(name string, params ClusterUpdateParam, target ClusterTargetHeader) error { + rawURL := fmt.Sprintf("/v1/clusters/%s", name) + _, err := r.client.Put(rawURL, params, nil, target.ToMap()) + return err +} + +// UpdateClusterWorker ... +func (r *clusters) UpdateClusterWorker(clusterNameOrID string, workerID string, params UpdateWorkerCommand, target ClusterTargetHeader) error { + rawURL := fmt.Sprintf("/v1/clusters/%s/workers/%s", clusterNameOrID, workerID) + // Make the request + _, err := r.client.Put(rawURL, params, nil, target.ToMap()) + return err +} + +// UpdateClusterWorkers updates a batch of workers in parallel +func (r *clusters) UpdateClusterWorkers(clusterNameOrID string, workerIDs []string, params UpdateWorkerCommand, target ClusterTargetHeader) error { + for _, workerID := range workerIDs { + if workerID == "" { + return errors.New("workere id's can not be empty") + } + err := r.UpdateClusterWorker(clusterNameOrID, workerID, params, target) + if err != nil { + return err + } + + } + return nil +} + +//Delete ... +func (r *clusters) Delete(name string, target ClusterTargetHeader, deleteDependencies ...bool) error { + var rawURL string + if len(deleteDependencies) != 0 { + rawURL = fmt.Sprintf("/v1/clusters/%s?deleteResources=%t", name, deleteDependencies[0]) + } else { + rawURL = fmt.Sprintf("/v1/clusters/%s", name) + } + _, err := r.client.Delete(rawURL, target.ToMap()) + return err +} + +//List ... +func (r *clusters) List(target ClusterTargetHeader) ([]ClusterInfo, error) { + clusters := []ClusterInfo{} + _, err := r.client.Get("/v1/clusters", &clusters, target.ToMap()) + if err != nil { + return nil, err + } + + return clusters, err +} + +//Find ... +func (r *clusters) Find(name string, target ClusterTargetHeader) (ClusterInfo, error) { + rawURL := fmt.Sprintf("/v1/clusters/%s?showResources=true", name) + cluster := ClusterInfo{} + _, err := r.client.Get(rawURL, &cluster, target.ToMap()) + if err != nil { + return cluster, err + } + + return cluster, err +} + +//FindWithOutShowResources ... +func (r *clusters) FindWithOutShowResources(name string, target ClusterTargetHeader) (ClusterInfo, error) { + rawURL := fmt.Sprintf("/v1/clusters/%s", name) + cluster := ClusterInfo{} + _, err := r.client.Get(rawURL, &cluster, target.ToMap()) + if err != nil { + return cluster, err + } + + return cluster, err +} + +//FindWithOutShowResourcesCompatible ... +func (r *clusters) FindWithOutShowResourcesCompatible(name string, target ClusterTargetHeader) (ClusterInfo, error) { + rawURL := fmt.Sprintf("/v2/getCluster?v1-compatible&cluster=%s", name) + cluster := ClusterInfo{} + _, err := r.client.Get(rawURL, &cluster, target.ToMap()) + if err != nil { + return cluster, err + } + // Handle VPC cluster. ServerURL is blank for v2/vpc clusters + if cluster.ServerURL == "" { + cluster.ServerURL = cluster.MasterURL + } + return cluster, err +} + +//GetClusterConfig ... +func (r *clusters) GetClusterConfig(name, dir string, admin bool, target ClusterTargetHeader) (string, error) { + if !helpers.FileExists(dir) { + return "", fmt.Errorf("Path: %q, to download the config doesn't exist", dir) + } + rawURL := fmt.Sprintf("/v1/clusters/%s/config", name) + if admin { + rawURL += "/admin" + } + resultDir := ComputeClusterConfigDir(dir, name, admin) + const kubeConfigName = "config.yml" + err := os.MkdirAll(resultDir, 0755) + if err != nil { + return "", fmt.Errorf("Error creating directory to download the cluster config") + } + downloadPath := filepath.Join(resultDir, "config.zip") + trace.Logger.Println("Will download the kubeconfig at", downloadPath) + + var out *os.File + if out, err = os.Create(downloadPath); err != nil { + return "", err + } + defer out.Close() + defer helpers.RemoveFile(downloadPath) + _, err = r.client.Get(rawURL, out, target.ToMap()) + if err != nil { + return "", err + } + trace.Logger.Println("Downloaded the kubeconfig at", downloadPath) + if err = helpers.Unzip(downloadPath, resultDir); err != nil { + return "", err + } + defer helpers.RemoveFilesWithPattern(resultDir, "[^(.yml)|(.pem)]$") + var kubedir, kubeyml string + files, _ := ioutil.ReadDir(resultDir) + for _, f := range files { + if f.IsDir() && strings.HasPrefix(f.Name(), "kube") { + kubedir = filepath.Join(resultDir, f.Name()) + files, _ := ioutil.ReadDir(kubedir) + for _, f := range files { + old := filepath.Join(kubedir, f.Name()) + new := filepath.Join(kubedir, "../", f.Name()) + if strings.HasSuffix(f.Name(), ".yml") { + new = filepath.Join(path.Clean(kubedir), "../", path.Clean(kubeConfigName)) + kubeyml = new + } + err := os.Rename(old, new) + if err != nil { + return "", fmt.Errorf("Couldn't rename: %q", err) + } + } + break + } + } + if kubedir == "" { + return "", errors.New("Unable to locate kube config in zip archive") + } + + // Block to add token for openshift clusters (This can be temporary until iks team handles openshift clusters) + clusterInfo, err := r.FindWithOutShowResourcesCompatible(name, target) + if err != nil { + // Assuming an error means that this is a vpc cluster, and we're returning existing kubeconfig + // When we add support for vpcs on openshift clusters, we may want revisit this + return filepath.Abs(kubeyml) + } + + if clusterInfo.Type == "openshift" { + trace.Logger.Println("Debug: type is openshift trying login to get token") + var yamlConfig []byte + if yamlConfig, err = ioutil.ReadFile(kubeyml); err != nil { + return "", err + } + yamlConfig, err = r.FetchOCTokenForKubeConfig(yamlConfig, &clusterInfo, clusterInfo.IsStagingSatelliteCluster()) + if err != nil { + return "", err + } + err = ioutil.WriteFile(kubeyml, yamlConfig, 0644) // 0644 is irrelevant here, since file already exists. + if err != nil { + return "", err + } + } + + return filepath.Abs(kubeyml) +} + +//GetClusterConfigDetail ... +func (r *clusters) GetClusterConfigDetail(name, dir string, admin bool, target ClusterTargetHeader) (ClusterKeyInfo, error) { + clusterkey := ClusterKeyInfo{} + if !helpers.FileExists(dir) { + return clusterkey, fmt.Errorf("Path: %q, to download the config doesn't exist", dir) + } + rawURL := fmt.Sprintf("/v1/clusters/%s/config", name) + if admin { + rawURL += "/admin" + } + resultDir := ComputeClusterConfigDir(dir, name, admin) + const kubeConfigName = "config.yml" + err := os.MkdirAll(resultDir, 0755) + if err != nil { + return clusterkey, fmt.Errorf("Error creating directory to download the cluster config") + } + downloadPath := filepath.Join(resultDir, "config.zip") + trace.Logger.Println("Will download the kubeconfig at", downloadPath) + + var out *os.File + if out, err = os.Create(downloadPath); err != nil { + return clusterkey, err + } + defer out.Close() + defer helpers.RemoveFile(downloadPath) + _, err = r.client.Get(rawURL, out, target.ToMap()) + if err != nil { + return clusterkey, err + } + trace.Logger.Println("Downloaded the kubeconfig at", downloadPath) + if err = helpers.Unzip(downloadPath, resultDir); err != nil { + return clusterkey, err + } + defer helpers.RemoveFilesWithPattern(resultDir, "[^(.yml)|(.pem)]$") + var kubedir, kubeyml string + files, _ := ioutil.ReadDir(resultDir) + for _, f := range files { + if f.IsDir() && strings.HasPrefix(f.Name(), "kube") { + kubedir = filepath.Join(resultDir, f.Name()) + files, _ := ioutil.ReadDir(kubedir) + for _, f := range files { + fileContent, _ := ioutil.ReadFile(kubedir + "/" + f.Name()) + if f.Name() == "admin-key.pem" { + clusterkey.AdminKey = string(fileContent) + } + if f.Name() == "admin.pem" { + clusterkey.Admin = string(fileContent) + } + if strings.HasPrefix(f.Name(), "ca-") && strings.HasSuffix(f.Name(), ".pem") { + clusterkey.ClusterCACertificate = string(fileContent) + } + old := filepath.Join(kubedir, f.Name()) + new := filepath.Join(kubedir, "../", f.Name()) + if strings.HasSuffix(f.Name(), ".yml") { + new = filepath.Join(path.Clean(kubedir), "../", path.Clean(kubeConfigName)) + kubeyml = new + } + err := os.Rename(old, new) + if err != nil { + return clusterkey, fmt.Errorf("Couldn't rename: %q", err) + } + } + break + } + } + if kubedir == "" { + return clusterkey, errors.New("Unable to locate kube config in zip archive") + } + + kubefile, _ := ioutil.ReadFile(kubeyml) + var yamlConfig ConfigFile + err = yaml.Unmarshal(kubefile, &yamlConfig) + if err != nil { + fmt.Printf("Error parsing YAML file: %s\n", err) + } + if len(yamlConfig.Clusters) != 0 { + clusterkey.Host = yamlConfig.Clusters[0].Cluster.Server + } + if len(yamlConfig.Users) != 0 { + clusterkey.Token = yamlConfig.Users[0].User.AuthProvider.Config.IDToken + } + + // Block to add token for openshift clusters (This can be temporary until iks team handles openshift clusters) + clusterInfo, err := r.FindWithOutShowResourcesCompatible(name, target) + if err != nil { + // Assuming an error means that this is a vpc cluster, and we're returning existing kubeconfig + // When we add support for vpcs on openshift clusters, we may want revisit this + clusterkey.FilePath, _ = filepath.Abs(kubeyml) + return clusterkey, err + } + + if clusterInfo.Type == "openshift" { + trace.Logger.Println("Debug: type is openshift trying login to get token") + var yamlConfig []byte + if yamlConfig, err = ioutil.ReadFile(kubeyml); err != nil { + return clusterkey, err + } + yamlConfig, err = r.FetchOCTokenForKubeConfig(yamlConfig, &clusterInfo, clusterInfo.IsStagingSatelliteCluster()) + if err != nil { + return clusterkey, err + } + err = ioutil.WriteFile(kubeyml, yamlConfig, 0644) // 0644 is irrelevant here, since file already exists. + if err != nil { + return clusterkey, err + } + openshiftyml, _ := ioutil.ReadFile(kubeyml) + var openshiftyaml ConfigFileOpenshift + err = yaml.Unmarshal(openshiftyml, &openshiftyaml) + if err != nil { + fmt.Printf("Error parsing YAML file: %s\n", err) + } + openshiftusers := openshiftyaml.Users + for _, usr := range openshiftusers { + if strings.HasPrefix(usr.Name, "IAM") { + clusterkey.Token = usr.User.Token + } + } + if len(openshiftyaml.Clusters) != 0 { + clusterkey.Host = openshiftyaml.Clusters[0].Cluster.Server + } + clusterkey.ClusterCACertificate = "" + + } + clusterkey.FilePath, _ = filepath.Abs(kubeyml) + return clusterkey, err +} + +// StoreConfig ... +func (r *clusters) StoreConfig(name, dir string, admin, createCalicoConfig bool, target ClusterTargetHeader) (string, string, error) { + var calicoConfig string + if !helpers.FileExists(dir) { + return "", "", fmt.Errorf("Path: %q, to download the config doesn't exist", dir) + } + rawURL := fmt.Sprintf("/v1/clusters/%s/config", name) + if admin { + rawURL += "/admin" + } + if createCalicoConfig { + rawURL += "?createNetworkConfig=true" + } + resultDir := ComputeClusterConfigDir(dir, name, admin) + err := os.MkdirAll(resultDir, 0755) + if err != nil { + return "", "", fmt.Errorf("Error creating directory to download the cluster config") + } + downloadPath := filepath.Join(resultDir, "config.zip") + trace.Logger.Println("Will download the kubeconfig at", downloadPath) + + var out *os.File + if out, err = os.Create(downloadPath); err != nil { + return "", "", err + } + defer out.Close() + defer helpers.RemoveFile(downloadPath) + _, err = r.client.Get(rawURL, out, target.ToMap()) + if err != nil { + return "", "", err + } + trace.Logger.Println("Downloaded the kubeconfig at", downloadPath) + if err = helpers.Unzip(downloadPath, resultDir); err != nil { + return "", "", err + } + trace.Logger.Println("Downloaded the kubec", resultDir) + + unzipConfigPath, err := kubeConfigDir(resultDir) + if err != nil { + return "", "", err + } + trace.Logger.Println("Located unzipped directory: ", unzipConfigPath) + files, _ := ioutil.ReadDir(unzipConfigPath) + for _, f := range files { + old := filepath.Join(unzipConfigPath, f.Name()) + new := filepath.Join(unzipConfigPath, "../", f.Name()) + err := os.Rename(old, new) + if err != nil { + return "", "", fmt.Errorf("Couldn't rename: %q", err) + } + } + err = os.RemoveAll(unzipConfigPath) + if err != nil { + return "", "", err + } + // Locate the yaml file and return the new path + baseDirFiles, err := ioutil.ReadDir(resultDir) + if err != nil { + return "", "", err + } + + if createCalicoConfig { + // Proccess calico golang template file if it exists + calicoConfig, err = GenerateCalicoConfig(resultDir) + if err != nil { + return "", "", err + } + } + var kubeconfigFileName string + for _, baseDirFile := range baseDirFiles { + if strings.Contains(baseDirFile.Name(), ".yml") { + kubeconfigFileName = fmt.Sprintf("%s/%s", resultDir, baseDirFile.Name()) + break + } + } + if kubeconfigFileName == "" { + return "", "", errors.New("Unable to locate kube config in zip archive") + } + + // Block to add token for openshift clusters (This can be temporary until iks team handles openshift clusters) + clusterInfo, err := r.FindWithOutShowResourcesCompatible(name, target) + if err != nil { + // Assuming an error means that this is a vpc cluster, and we're returning existing kubeconfig + // When we add support for vpcs on openshift clusters, we may want revisit this + return kubeconfigFileName, calicoConfig, nil + } + + if clusterInfo.Type == "openshift" { + trace.Logger.Println("Cluster Type is openshift trying login to get token") + var yamlConfig []byte + if yamlConfig, err = ioutil.ReadFile(kubeconfigFileName); err != nil { + return "", "", err + } + yamlConfig, err = r.FetchOCTokenForKubeConfig(yamlConfig, &clusterInfo, clusterInfo.IsStagingSatelliteCluster()) + if err != nil { + return "", "", err + } + err = ioutil.WriteFile(kubeconfigFileName, yamlConfig, 0644) // check about permissions and truncate + if err != nil { + return "", "", err + } + } + return kubeconfigFileName, calicoConfig, nil +} + +//StoreConfigDetail ... +func (r *clusters) StoreConfigDetail(name, dir string, admin, createCalicoConfig bool, target ClusterTargetHeader) (string, ClusterKeyInfo, error) { + clusterkey := ClusterKeyInfo{} + var calicoConfig string + if !helpers.FileExists(dir) { + return "", clusterkey, fmt.Errorf("Path: %q, to download the config doesn't exist", dir) + } + rawURL := fmt.Sprintf("/v1/clusters/%s/config", name) + if admin { + rawURL += "/admin" + } + if createCalicoConfig { + rawURL += "?createNetworkConfig=true" + } + resultDir := ComputeClusterConfigDir(dir, name, admin) + err := os.MkdirAll(resultDir, 0755) + if err != nil { + return "", clusterkey, fmt.Errorf("Error creating directory to download the cluster config") + } + downloadPath := filepath.Join(resultDir, "config.zip") + trace.Logger.Println("Will download the kubeconfig at", downloadPath) + + var out *os.File + if out, err = os.Create(downloadPath); err != nil { + return "", clusterkey, err + } + defer out.Close() + defer helpers.RemoveFile(downloadPath) + _, err = r.client.Get(rawURL, out, target.ToMap()) + if err != nil { + return "", clusterkey, err + } + trace.Logger.Println("Downloaded the kubeconfig at", downloadPath) + if err = helpers.Unzip(downloadPath, resultDir); err != nil { + return "", clusterkey, err + } + trace.Logger.Println("Downloaded the kubec", resultDir) + + unzipConfigPath, err := kubeConfigDir(resultDir) + if err != nil { + return "", clusterkey, err + } + trace.Logger.Println("Located unzipped directory: ", unzipConfigPath) + files, _ := ioutil.ReadDir(unzipConfigPath) + for _, f := range files { + fileContent, _ := ioutil.ReadFile(unzipConfigPath + "/" + f.Name()) + if f.Name() == "admin-key.pem" { + clusterkey.AdminKey = string(fileContent) + } + if f.Name() == "admin.pem" { + clusterkey.Admin = string(fileContent) + } + if strings.HasPrefix(f.Name(), "ca-") && strings.HasSuffix(f.Name(), ".pem") { + clusterkey.ClusterCACertificate = string(fileContent) + } + old := filepath.Join(unzipConfigPath, f.Name()) + new := filepath.Join(unzipConfigPath, "../", f.Name()) + err := os.Rename(old, new) + if err != nil { + return "", clusterkey, fmt.Errorf("Couldn't rename: %q", err) + } + } + err = os.RemoveAll(unzipConfigPath) + if err != nil { + return "", clusterkey, err + } + // Locate the yaml file and return the new path + baseDirFiles, err := ioutil.ReadDir(resultDir) + if err != nil { + return "", clusterkey, err + } + + if createCalicoConfig { + // Proccess calico golang template file if it exists + calicoConfig, err = GenerateCalicoConfig(resultDir) + if err != nil { + return "", clusterkey, err + } + } + var kubeconfigFileName string + for _, baseDirFile := range baseDirFiles { + if strings.Contains(baseDirFile.Name(), ".yml") { + kubeconfigFileName = fmt.Sprintf("%s/%s", resultDir, baseDirFile.Name()) + break + } + } + if kubeconfigFileName == "" { + return "", clusterkey, errors.New("Unable to locate kube config in zip archive") + } + kubefile, _ := ioutil.ReadFile(kubeconfigFileName) + var yamlConfig ConfigFile + err = yaml.Unmarshal(kubefile, &yamlConfig) + if err != nil { + fmt.Printf("Error parsing YAML file: %s\n", err) + } + if len(yamlConfig.Clusters) != 0 { + clusterkey.Host = yamlConfig.Clusters[0].Cluster.Server + } + if len(yamlConfig.Users) != 0 { + clusterkey.Token = yamlConfig.Users[0].User.AuthProvider.Config.IDToken + } + + // Block to add token for openshift clusters (This can be temporary until iks team handles openshift clusters) + clusterInfo, err := r.FindWithOutShowResourcesCompatible(name, target) + if err != nil { + // Assuming an error means that this is a vpc cluster, and we're returning existing kubeconfig + // When we add support for vpcs on openshift clusters, we may want revisit this + clusterkey.FilePath = kubeconfigFileName + return calicoConfig, clusterkey, nil + } + + if clusterInfo.Type == "openshift" { + trace.Logger.Println("Cluster Type is openshift trying login to get token") + var yamlConfig []byte + if yamlConfig, err = ioutil.ReadFile(kubeconfigFileName); err != nil { + return "", clusterkey, err + } + yamlConfig, err = r.FetchOCTokenForKubeConfig(yamlConfig, &clusterInfo, clusterInfo.IsStagingSatelliteCluster()) + if err != nil { + return "", clusterkey, err + } + err = ioutil.WriteFile(kubeconfigFileName, yamlConfig, 0644) // check about permissions and truncate + if err != nil { + return "", clusterkey, err + } + openshiftyml, _ := ioutil.ReadFile(kubeconfigFileName) + var openshiftyaml ConfigFileOpenshift + err = yaml.Unmarshal(openshiftyml, &openshiftyaml) + if err != nil { + fmt.Printf("Error parsing YAML file: %s\n", err) + } + openshiftusers := openshiftyaml.Users + for _, usr := range openshiftusers { + if strings.HasPrefix(usr.Name, "IAM") { + clusterkey.Token = usr.User.Token + } + } + if len(openshiftyaml.Clusters) != 0 { + clusterkey.Host = openshiftyaml.Clusters[0].Cluster.Server + } + clusterkey.ClusterCACertificate = "" + + } + clusterkey.FilePath = kubeconfigFileName + return calicoConfig, clusterkey, nil +} + +//kubeConfigDir ... +func kubeConfigDir(baseDir string) (string, error) { + baseDirFiles, err := ioutil.ReadDir(baseDir) + if err != nil { + return "", err + } + + // Locate the new directory in form "kubeConfigxxx" stored in the base directory + for _, baseDirFile := range baseDirFiles { + if baseDirFile.IsDir() && strings.Index(baseDirFile.Name(), "kubeConfig") == 0 { + return filepath.Join(path.Clean(baseDir), path.Clean(baseDirFile.Name())), nil + } + } + + return "", errors.New("Unable to locate extracted configuration directory") +} + +//GenerateCalicoConfig ... +func GenerateCalicoConfig(desiredConfigPath string) (string, error) { + // Proccess calico golang template file if it exists + calicoConfigFile := fmt.Sprintf("%s/%s", desiredConfigPath, "calicoctl.cfg.template") + newCalicoConfigFile := fmt.Sprintf("%s/%s", desiredConfigPath, "calicoctl.cfg") + if _, err := os.Stat(calicoConfigFile); !os.IsNotExist(err) { + tmpl, err := template.ParseFiles(calicoConfigFile) + if err != nil { + return "", fmt.Errorf("Unable to parse network config file: %v", err) + } + + newCaliFile, err := os.Create(newCalicoConfigFile) + if err != nil { + return "", fmt.Errorf("Failed to create network config file: %v", err) + } + defer newCaliFile.Close() + + templateVars := map[string]string{ + "certDir": desiredConfigPath, + } + if err := tmpl.Execute(newCaliFile, templateVars); err != nil { + return "", fmt.Errorf("Failed to execute template: %v", err) + } + return newCalicoConfigFile, nil + } + // Return an empty file path if the calico config doesn't exist + return "", nil +} + +//UnsetCredentials ... +func (r *clusters) UnsetCredentials(target ClusterTargetHeader) error { + rawURL := fmt.Sprintf("/v1/credentials") + _, err := r.client.Delete(rawURL, target.ToMap()) + return err +} + +//SetCredentials ... +func (r *clusters) SetCredentials(slUsername, slAPIKey string, target ClusterTargetHeader) error { + slHeader := &ClusterSoftlayerHeader{ + SoftLayerAPIKey: slAPIKey, + SoftLayerUsername: slUsername, + } + _, err := r.client.Post("/v1/credentials", nil, nil, target.ToMap(), slHeader.ToMap()) + return err +} + +//BindService ... +func (r *clusters) BindService(params ServiceBindRequest, target ClusterTargetHeader) (ServiceBindResponse, error) { + rawURL := fmt.Sprintf("/v1/clusters/%s/services", params.ClusterNameOrID) + payLoad := struct { + ServiceInstanceNameOrID string `json:"serviceInstanceGUID" binding:"required"` + NamespaceID string `json:"namespaceID" binding:"required"` + Role string `json:"role"` + ServiceKeyJSON string `json:"serviceKeyJSON"` + ServiceKeyGUID string `json:"serviceKeyGUID"` + }{ + ServiceInstanceNameOrID: params.ServiceInstanceNameOrID, + NamespaceID: params.NamespaceID, + Role: params.Role, + ServiceKeyGUID: params.ServiceKeyGUID, + } + var cluster ServiceBindResponse + _, err := r.client.Post(rawURL, payLoad, &cluster, target.ToMap()) + return cluster, err +} + +//UnBindService ... +func (r *clusters) UnBindService(clusterNameOrID, namespaceID, serviceInstanceGUID string, target ClusterTargetHeader) error { + rawURL := fmt.Sprintf("/v1/clusters/%s/services/%s/%s", clusterNameOrID, namespaceID, serviceInstanceGUID) + _, err := r.client.Delete(rawURL, target.ToMap()) + return err +} + +//ComputeClusterConfigDir ... +func ComputeClusterConfigDir(dir, name string, admin bool) string { + resultDirPrefix := name + resultDirSuffix := "_k8sconfig" + if len(name) < 30 { + //Make it longer for uniqueness + h := sha256.New() + h.Write([]byte(name)) + resultDirPrefix = fmt.Sprintf("%x_%s", h.Sum(nil), name) + } + if admin { + resultDirPrefix = fmt.Sprintf("%s_admin", resultDirPrefix) + } + resultDir := filepath.Join(dir, fmt.Sprintf("%s%s", path.Clean(resultDirPrefix), path.Clean(resultDirSuffix))) + return resultDir +} + +//ListServicesBoundToCluster ... +func (r *clusters) ListServicesBoundToCluster(clusterNameOrID, namespace string, target ClusterTargetHeader) (BoundServices, error) { + var boundServices BoundServices + var path string + + if namespace == "" { + path = fmt.Sprintf("/v1/clusters/%s/services", clusterNameOrID) + + } else { + path = fmt.Sprintf("/v1/clusters/%s/services/%s", clusterNameOrID, namespace) + } + _, err := r.client.Get(path, &boundServices, target.ToMap()) + if err != nil { + return boundServices, err + } + + return boundServices, err +} + +//FindServiceBoundToCluster... +func (r *clusters) FindServiceBoundToCluster(clusterNameOrID, serviceNameOrId, namespace string, target ClusterTargetHeader) (BoundService, error) { + var boundService BoundService + boundServices, err := r.ListServicesBoundToCluster(clusterNameOrID, namespace, target) + if err != nil { + return boundService, err + } + for _, boundService := range boundServices { + if strings.Compare(boundService.ServiceName, serviceNameOrId) == 0 || strings.Compare(boundService.ServiceID, serviceNameOrId) == 0 { + return boundService, nil + } + } + + return boundService, err +} + +//RefreshAPIServers requests a refresh of a cluster's API server(s) +func (r *clusters) RefreshAPIServers(clusterNameOrID string, target ClusterTargetHeader) error { + params := MasterAPIServer{Action: "refresh"} + rawURL := fmt.Sprintf("/v1/clusters/%s/masters", clusterNameOrID) + _, err := r.client.Put(rawURL, params, nil, target.ToMap()) + return err +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv1/kms.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv1/kms.go new file mode 100644 index 00000000000..4cdf675c265 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv1/kms.go @@ -0,0 +1,52 @@ +package containerv1 + +import ( + "github.com/IBM-Cloud/bluemix-go/client" +) + +const ( + account = "X-Auth-Resource-Account" + resourceGroup = "X-Auth-Resource-Group" +) + +//Request body to attach a KMS to a cluster +type KmsEnableReq struct { + Cluster string `json:"cluster"` + Kms string `json:"instance_id"` + Crk string `json:"crk_id"` + PrivateEndpoint bool `json:"private_endpoint"` +} + +//ClusterHeader ... +type ClusterHeader struct { + AccountID string + ResourceGroup string +} + +//CreateMap ... +func (c ClusterHeader) CreateMap() map[string]string { + m := make(map[string]string, 3) + m[account] = c.AccountID + m[resourceGroup] = c.ResourceGroup + return m +} + +type kms struct { + client *client.Client +} + +//Kms interface +type Kms interface { + EnableKms(enableKmsReq KmsEnableReq, target ClusterHeader) error +} + +func newKmsAPI(c *client.Client) Kms { + return &kms{ + client: c, + } +} + +func (r *kms) EnableKms(enableKmsReq KmsEnableReq, target ClusterHeader) error { + _, err := r.client.Post("/v2/enableKMS", enableKmsReq, nil, target.CreateMap()) + return err +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv1/openshift.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv1/openshift.go new file mode 100644 index 00000000000..06c87856f0b --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv1/openshift.go @@ -0,0 +1,267 @@ +package containerv1 + +/******************************************************************************* + * IBM Confidential + * OCO Source Materials + * IBM Cloud Schematics + * (C) Copyright IBM Corp. 2017 All Rights Reserved. + * The source code for this program is not published or otherwise divested of + * its trade secrets, irrespective of what has been deposited with + * the U.S. Copyright Office. + ******************************************************************************/ + +/******************************************************************************* + * A file for openshift related utility functions, like getting kube + * config + ******************************************************************************/ + +import ( + "encoding/base64" + "errors" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "regexp" + "runtime/debug" + "strings" + "time" + + yaml "github.com/ghodss/yaml" + + bxhttp "github.com/IBM-Cloud/bluemix-go/http" + "github.com/IBM-Cloud/bluemix-go/rest" + "github.com/IBM-Cloud/bluemix-go/trace" +) + +const ( + // IAMHTTPtimeout - + IAMHTTPtimeout = 10 * time.Second +) + +// Frame - +type Frame uintptr + +// StackTrace - +type StackTrace []Frame +type stackTracer interface { + StackTrace() StackTrace +} + +type openShiftUser struct { + Kind string `json:"kind"` + APIVersion string `json:"apiVersion"` + Metadata struct { + Name string `json:"name"` + SelfLink string `json:"selfLink"` + UID string `json:"uid"` + ResourceVersion string `json:"resourceVersion"` + CreationTimestamp time.Time `json:"creationTimestamp"` + } `json:"metadata"` + Identities []string `json:"identities"` + Groups []string `json:"groups"` +} + +type authEndpoints struct { + Issuer string `json:"issuer"` + AuthorizationEndpoint string `json:"authorization_endpoint"` + TokenEndpoint string `json:"token_endpoint"` + ServerURL string `json:"server_endpoint,omitempty"` +} + +// PanicCatch - Catch panic and give error +func PanicCatch(r interface{}) error { + if r != nil { + var e error + switch x := r.(type) { + case string: + e = errors.New(x) + case error: + e = x + default: + e = errors.New("Unknown panic") + } + fmt.Printf("Panic error %v", e) + if err, ok := e.(stackTracer); ok { + fmt.Printf("Panic stack trace %v", err.StackTrace()) + } else { + debug.PrintStack() + } + return e + } + return nil +} + +// NormalizeName - +func NormalizeName(name string) (string, error) { + name = strings.ToLower(name) + reg, err := regexp.Compile("[^A-Za-z0-9:]+") + if err != nil { + return "", err + } + return reg.ReplaceAllString(name, "-"), nil +} + +// logInAndFillOCToken will update kubeConfig with an Openshift token, if one is not there +func (r *clusters) FetchOCTokenForKubeConfig(kubecfg []byte, cMeta *ClusterInfo, skipSSLVerification bool) (kubecfgEdited []byte, rerr error) { + // TODO: this is not a a standard manner to login ... using propriatary OC cli reverse engineering + defer func() { + err := PanicCatch(recover()) + if err != nil { + rerr = fmt.Errorf("Could not login to openshift account %s", err) + } + }() + + var cfg map[string]interface{} + err := yaml.Unmarshal(kubecfg, &cfg) + if err != nil { + return kubecfg, err + } + + var token string + trace.Logger.Println("Creating user passcode to login for getting oc token") + passcode, err := r.client.TokenRefresher.GetPasscode() + + authEP, err := func(meta *ClusterInfo) (*authEndpoints, error) { + request := rest.GetRequest(meta.ServerURL + "/.well-known/oauth-authorization-server") + var auth authEndpoints + tempVar := r.client.ServiceName + r.client.ServiceName = "" + + tempSSL := r.client.Config.SSLDisable + tempClient := r.client.Config.HTTPClient + r.client.Config.SSLDisable = skipSSLVerification + r.client.Config.HTTPClient = bxhttp.NewHTTPClient(r.client.Config) + + defer func() { + r.client.ServiceName = tempVar + r.client.Config.SSLDisable = tempSSL + r.client.Config.HTTPClient = tempClient + }() + resp, err := r.client.SendRequest(request, &auth) + if err != nil { + return &auth, err + } + defer resp.Body.Close() + if resp.StatusCode > 299 { + msg, _ := ioutil.ReadAll(resp.Body) + return nil, fmt.Errorf("Bad status code [%d] returned when fetching Cluster authentication endpoints: %s", resp.StatusCode, msg) + } + auth.ServerURL = meta.ServerURL + return &auth, nil + }(cMeta) + + trace.Logger.Println("Got authentication end points for getting oc token") + token, uname, err := r.openShiftAuthorizePasscode(authEP, passcode, cMeta.IsStagingSatelliteCluster()) + trace.Logger.Println("Got the token and user ", uname) + clusterName, _ := NormalizeName(authEP.ServerURL[len("https://"):len(authEP.ServerURL)]) //TODO deal with http + ccontext := "default/" + clusterName + "/" + uname + uname = uname + "/" + clusterName + clusters := cfg["clusters"].([]interface{}) + newCluster := map[string]interface{}{"name": clusterName, "cluster": map[string]interface{}{"server": authEP.ServerURL}} + if skipSSLVerification { + newCluster["cluster"].(map[string]interface{})["insecure-skip-tls-verify"] = true + } + clusters = append(clusters, newCluster) + cfg["clusters"] = clusters + + contexts := cfg["contexts"].([]interface{}) + newContext := map[string]interface{}{"name": ccontext, "context": map[string]interface{}{"cluster": clusterName, "namespace": "default", "user": uname}} + contexts = append(contexts, newContext) + cfg["contexts"] = contexts + + users := cfg["users"].([]interface{}) + newUser := map[string]interface{}{"name": uname, "user": map[string]interface{}{"token": token}} + users = append(users, newUser) + cfg["users"] = users + + cfg["current-context"] = ccontext + + bytes, err := yaml.Marshal(cfg) + if err != nil { + return kubecfg, err + } + kubecfg = bytes + return kubecfg, nil +} + +// Never redirect. Let caller handle. This is an http.Client callback method (CheckRedirect) +func neverRedirect(req *http.Request, via []*http.Request) error { + return http.ErrUseLastResponse +} + +func (r *clusters) openShiftAuthorizePasscode(authEP *authEndpoints, passcode string, skipSSLVerification bool) (string, string, error) { + request := rest.GetRequest(authEP.AuthorizationEndpoint+"?response_type=token&client_id=openshift-challenging-client"). + Set("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("passcode:%s", passcode)))) + + tempSSL := r.client.Config.SSLDisable + tempClient := r.client.Config.HTTPClient + r.client.Config.SSLDisable = skipSSLVerification + r.client.Config.HTTPClient = bxhttp.NewHTTPClient(r.client.Config) + + // To never redirect for this call + tempVar := r.client.Config.HTTPClient.CheckRedirect + r.client.Config.HTTPClient.CheckRedirect = neverRedirect + defer func() { + r.client.Config.HTTPClient.CheckRedirect = tempVar + r.client.Config.SSLDisable = tempSSL + r.client.Config.HTTPClient = tempClient + }() + + var respInterface interface{} + var resp *http.Response + var err error + for try := 1; try <= 3; try++ { + // bmxerror.NewRequestFailure("ServerErrorResponse", string(raw), resp.StatusCode) + resp, err = r.client.SendRequest(request, respInterface) + if err != nil { + if resp.StatusCode != 302 { + return "", "", err + } + } + defer resp.Body.Close() + if resp.StatusCode > 399 { + if try >= 3 { + msg, _ := ioutil.ReadAll(resp.Body) + return "", "", fmt.Errorf("Bad status code [%d] returned when openshift login: %s", resp.StatusCode, string(msg)) + } + time.Sleep(200 * time.Millisecond) + } else { + break + } + } + + loc, err := resp.Location() + if err != nil { + return "", "", err + } + val, err := url.ParseQuery(loc.Fragment) + if err != nil { + return "", "", err + } + token := val.Get("access_token") + trace.Logger.Println("Getting username after getting the token") + name, err := r.getOpenShiftUser(authEP, token) + if err != nil { + return "", "", err + } + return token, name, nil +} + +func (r *clusters) getOpenShiftUser(authEP *authEndpoints, token string) (string, error) { + request := rest.GetRequest(authEP.ServerURL+"/apis/user.openshift.io/v1/users/~"). + Set("Authorization", "Bearer "+token) + + var user openShiftUser + resp, err := r.client.SendRequest(request, &user) + if err != nil { + return "", err + } + defer resp.Body.Close() + if resp.StatusCode > 299 { + msg, _ := ioutil.ReadAll(resp.Body) + return "", fmt.Errorf("Bad status code [%d] returned when fetching OpenShift user Details: %s", resp.StatusCode, string(msg)) + } + + return user.Metadata.Name, nil +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv1/properties.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv1/properties.go new file mode 100644 index 00000000000..f1817519599 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv1/properties.go @@ -0,0 +1,53 @@ +package containerv1 + +import ( + "fmt" + + "github.com/IBM-Cloud/bluemix-go/client" +) + +//Vlan ... +type DCVlan struct { + ID string `json:"id"` + Properties DCVlanProperties `json:"properties"` + Type string `json:"type"` +} + +//VlanProperties ... +type DCVlanProperties struct { + LocalDiskStorageCapability string `json:"local_disk_storage_capability"` + Location string `json:"location"` + Name string `json:"name"` + Note string `json:"note"` + PrimaryRouter string `json:"primary_router"` + SANStorageCapability string `json:"san_storage_capability"` + VlanNumber string `json:"vlan_number"` + VlanType string `json:"vlan_type"` +} + +//Subnets interface +type Vlans interface { + List(datacenter string, target ClusterTargetHeader) ([]DCVlan, error) +} + +type vlan struct { + client *client.Client +} + +func newVlanAPI(c *client.Client) Vlans { + return &vlan{ + client: c, + } +} + +//GetVlans ... +func (r *vlan) List(datacenter string, target ClusterTargetHeader) ([]DCVlan, error) { + vlans := []DCVlan{} + rawURL := fmt.Sprintf("/v1/datacenters/%s/vlans", datacenter) + _, err := r.client.Get(rawURL, &vlans, target.ToMap()) + if err != nil { + return nil, err + } + + return vlans, err +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv1/subnets.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv1/subnets.go new file mode 100644 index 00000000000..880bada6bdd --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv1/subnets.go @@ -0,0 +1,98 @@ +package containerv1 + +import ( + "fmt" + + "github.com/IBM-Cloud/bluemix-go/client" +) + +//Subnet ... +type Subnet struct { + ID string `json:"id"` + Type string `json:"type"` + VlanID string `json:"vlan_id"` + IPAddresses []string `json:"ip_addresses"` + Properties SubnetProperties `json:"properties"` +} + +//SubnetProperties ... +type SubnetProperties struct { + CIDR string `json:"cidr"` + NetworkIdentifier string `json:"network_identifier"` + Note string `json:"note"` + SubnetType string `json:"subnet_type"` + DisplayLabel string `json:"display_label"` + Gateway string `json:"gateway"` +} + +type UserSubnet struct { + CIDR string `json:"cidr" binding:"required" description:"The CIDR of the subnet that will be bound to the cluster. Eg.format: 12.34.56.78/90"` + VLANID string `json:"vlan_id" binding:"required" description:"The private VLAN where the CIDR exists'"` +} + +//Subnets interface +type Subnets interface { + AddSubnet(clusterName string, subnetID string, target ClusterTargetHeader) error + List(target ClusterTargetHeader, opts ...string) ([]Subnet, error) + AddClusterUserSubnet(clusterID string, userSubnet UserSubnet, target ClusterTargetHeader) error + ListClusterUserSubnets(clusterID string, target ClusterTargetHeader) ([]Vlan, error) + DeleteClusterUserSubnet(clusterID string, subnetID string, vlanID string, target ClusterTargetHeader) error +} + +type subnet struct { + client *client.Client +} + +func newSubnetAPI(c *client.Client) Subnets { + return &subnet{ + client: c, + } +} + +//GetSubnets ... +func (r *subnet) List(target ClusterTargetHeader, opts ...string) ([]Subnet, error) { + subnets := []Subnet{} + rawURL := "/v1/subnets" + if len(opts) > 0 { + rawURL = fmt.Sprintf("/v1/subnets?location=%s", opts[0]) + } + _, err := r.client.Get(rawURL, &subnets, target.ToMap()) + if err != nil { + return nil, err + } + + return subnets, err +} + +//AddSubnetToCluster ... +func (r *subnet) AddSubnet(name string, subnetID string, target ClusterTargetHeader) error { + rawURL := fmt.Sprintf("/v1/clusters/%s/subnets/%s", name, subnetID) + _, err := r.client.Put(rawURL, nil, nil, target.ToMap()) + return err +} + +//AddClusterUserSubnet ... +func (r *subnet) AddClusterUserSubnet(clusterID string, userSubnet UserSubnet, target ClusterTargetHeader) error { + rawURL := fmt.Sprintf("/v1/clusters/%s/usersubnets", clusterID) + _, err := r.client.Post(rawURL, nil, nil, target.ToMap()) + return err +} + +//DeleteClusterUserSubnet ... +func (r *subnet) DeleteClusterUserSubnet(clusterID string, subnetID string, vlanID string, target ClusterTargetHeader) error { + rawURL := fmt.Sprintf("/v1/clusters/%s/usersubnets/%s/vlans/%s", clusterID, subnetID, vlanID) + _, err := r.client.Delete(rawURL, target.ToMap()) + return err +} + +//GetClusterUserSubnet ... +func (r *subnet) ListClusterUserSubnets(clusterID string, target ClusterTargetHeader) ([]Vlan, error) { + vlans := []Vlan{} + rawURL := fmt.Sprintf("/v1/clusters/%s/usersubnets", clusterID) + _, err := r.client.Get(rawURL, &vlans, target.ToMap()) + if err != nil { + return nil, err + } + + return vlans, err +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv1/versions.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv1/versions.go new file mode 100644 index 00000000000..7fb9c9c56fb --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv1/versions.go @@ -0,0 +1,51 @@ +package containerv1 + +import ( + "github.com/IBM-Cloud/bluemix-go/client" +) + +//KubeVersion ... +type KubeVersion struct { + Major int + Minor int + Patch int + Default bool +} + +type V1Version map[string][]KubeVersion + +//KubeVersions interface +type KubeVersions interface { + List(target ClusterTargetHeader) ([]KubeVersion, error) + ListV1(target ClusterTargetHeader) (V1Version, error) +} + +type version struct { + client *client.Client +} + +func newKubeVersionAPI(c *client.Client) KubeVersions { + return &version{ + client: c, + } +} + +//List ... +func (v *version) List(target ClusterTargetHeader) ([]KubeVersion, error) { + versions := []KubeVersion{} + _, err := v.client.Get("/v1/kube-versions", &versions, target.ToMap()) + if err != nil { + return nil, err + } + return versions, err +} + +func (v *version) ListV1(target ClusterTargetHeader) (V1Version, error) { + v1ver := V1Version{} + _, err := v.client.Get("/v1/versions", &v1ver, target.ToMap()) + if err != nil { + return nil, err + } + + return v1ver, err +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv1/webhooks.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv1/webhooks.go new file mode 100644 index 00000000000..be8f5f8c549 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv1/webhooks.go @@ -0,0 +1,49 @@ +package containerv1 + +import ( + "fmt" + + "github.com/IBM-Cloud/bluemix-go/client" +) + +//WebHook is the web hook +type WebHook struct { + Level string + Type string + URL string +} + +//Webhooks interface +type Webhooks interface { + List(clusterName string, target ClusterTargetHeader) ([]WebHook, error) + Add(clusterName string, params WebHook, target ClusterTargetHeader) error +} + +type webhook struct { + client *client.Client +} + +func newWebhookAPI(c *client.Client) Webhooks { + return &webhook{ + client: c, + } +} + +//List ... +func (r *webhook) List(name string, target ClusterTargetHeader) ([]WebHook, error) { + rawURL := fmt.Sprintf("/v1/clusters/%s/webhooks", name) + webhooks := []WebHook{} + _, err := r.client.Get(rawURL, &webhooks, target.ToMap()) + if err != nil { + return nil, err + } + + return webhooks, err +} + +//Add ... +func (r *webhook) Add(name string, params WebHook, target ClusterTargetHeader) error { + rawURL := fmt.Sprintf("/v1/clusters/%s/webhooks", name) + _, err := r.client.Post(rawURL, params, nil, target.ToMap()) + return err +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv1/worker_pool.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv1/worker_pool.go new file mode 100644 index 00000000000..b03b7475ceb --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv1/worker_pool.go @@ -0,0 +1,184 @@ +package containerv1 + +import ( + "fmt" + + "github.com/IBM-Cloud/bluemix-go/client" +) + +// WorkerPoolConfig common worker pool data +type WorkerPoolConfig struct { + Name string `json:"name" binding:"required"` + Size int `json:"sizePerZone" binding:"required"` + MachineType string `json:"machineType" binding:"required"` + Isolation string `json:"isolation"` + Labels map[string]string `json:"labels"` + Entitlement string `json:"entitlement"` +} + +// WorkerPoolRequest provides worker pool data +// swagger:model +type WorkerPoolRequest struct { + WorkerPoolConfig + DiskEncryption bool `json:"diskEncryption" description:"true or false to use encryption for the secondary disk"` + Zones []WorkerPoolZone `json:"zones"` +} + +// WorkerPoolPatchRequest provides attributes to patch update worker pool +// swagger:model +type WorkerPoolPatchRequest struct { + Size int `json:"sizePerZone"` + Labels map[string]string `json:"labels"` + ReasonForResize string `json:"reasonForResize"` + State string `json:"state"` +} + +// WorkerPoolResponse provides worker pool data +// swagger:model +type WorkerPoolResponse struct { + WorkerPoolConfig + ID string `json:"id" binding:"required"` + Region string `json:"region" binding:"required"` + State string `json:"state"` + ReasonForDelete string `json:"reasonForDelete"` + IsBalanced bool `json:"isBalanced"` + Zones WorkerPoolZoneResponses `json:"zones"` +} + +// WorkerPoolResponses sorts WorkerPoolResponse by ID. +// swagger:model +type WorkerPoolResponses []WorkerPoolResponse + +// WorkerPoolZoneNetwork holds network configuration for a zone +type WorkerPoolZoneNetwork struct { + PrivateVLAN string `json:"privateVlan" binding:"required"` + PublicVLAN string `json:"publicVlan"` +} + +// WorkerPoolZone provides zone data +// swagger:model +type WorkerPoolZone struct { + WorkerPoolZoneNetwork + ID string `json:"id" binding:"required"` +} + +// WorkerPoolZonePatchRequest updates worker pool zone data +// swagger:model +type WorkerPoolZonePatchRequest struct { + WorkerPoolZoneNetwork +} + +// WorkerPoolZoneResponse response contents for zone +// swagger:model +type WorkerPoolZoneResponse struct { + WorkerPoolZone + WorkerCount int `json:"workerCount"` +} + +// WorkerPoolZoneResponses sorts WorkerPoolZoneResponse by ID. +// swagger:model +type WorkerPoolZoneResponses []WorkerPoolZoneResponse + +//Workers ... +type WorkerPool interface { + CreateWorkerPool(clusterNameOrID string, workerPoolReq WorkerPoolRequest, target ClusterTargetHeader) (WorkerPoolResponse, error) + ResizeWorkerPool(clusterNameOrID, workerPoolNameOrID string, size int, target ClusterTargetHeader) error + UpdateLabelsWorkerPool(clusterNameOrID, workerPoolNameOrID string, labels map[string]string, target ClusterTargetHeader) error + PatchWorkerPool(clusterNameOrID, workerPoolNameOrID, state string, target ClusterTargetHeader) error + DeleteWorkerPool(clusterNameOrID string, workerPoolNameOrID string, target ClusterTargetHeader) error + ListWorkerPools(clusterNameOrID string, target ClusterTargetHeader) ([]WorkerPoolResponse, error) + GetWorkerPool(clusterNameOrID, workerPoolNameOrID string, target ClusterTargetHeader) (WorkerPoolResponse, error) + AddZone(clusterNameOrID string, poolID string, workerPoolZone WorkerPoolZone, target ClusterTargetHeader) error + RemoveZone(clusterNameOrID, zone, poolID string, target ClusterTargetHeader) error + UpdateZoneNetwork(clusterNameOrID, zone, poolID, privateVlan, publicVlan string, target ClusterTargetHeader) error +} + +type workerpool struct { + client *client.Client +} + +func newWorkerPoolAPI(c *client.Client) WorkerPool { + return &workerpool{ + client: c, + } +} + +// CreateWorkerPool calls the API to create a worker pool +func (w *workerpool) CreateWorkerPool(clusterNameOrID string, workerPoolReq WorkerPoolRequest, target ClusterTargetHeader) (WorkerPoolResponse, error) { + var successV WorkerPoolResponse + _, err := w.client.Post(fmt.Sprintf("/v1/clusters/%s/workerpools", clusterNameOrID), workerPoolReq, &successV, target.ToMap()) + return successV, err +} + +// ResizeWorkerPool calls the API to resize a worker +func (w *workerpool) PatchWorkerPool(clusterNameOrID, workerPoolNameOrID, state string, target ClusterTargetHeader) error { + requestBody := WorkerPoolPatchRequest{ + State: state, + } + _, err := w.client.Patch(fmt.Sprintf("/v1/clusters/%s/workerpools/%s", clusterNameOrID, workerPoolNameOrID), requestBody, nil, target.ToMap()) + return err +} + +// ResizeWorkerPool calls the API to resize a worker +func (w *workerpool) ResizeWorkerPool(clusterNameOrID, workerPoolNameOrID string, size int, target ClusterTargetHeader) error { + requestBody := WorkerPoolPatchRequest{ + State: "resizing", + Size: size, + } + _, err := w.client.Patch(fmt.Sprintf("/v1/clusters/%s/workerpools/%s", clusterNameOrID, workerPoolNameOrID), requestBody, nil, target.ToMap()) + return err +} + +// UpdateLabelsWorkerPool calls the API to resize a worker with the labels option +func (w *workerpool) UpdateLabelsWorkerPool(clusterNameOrID, workerPoolNameOrID string, labels map[string]string, target ClusterTargetHeader) error { + requestBody := WorkerPoolPatchRequest{ + State: "labels", + Labels: labels, + } + _, err := w.client.Patch(fmt.Sprintf("/v1/clusters/%s/workerpools/%s", clusterNameOrID, workerPoolNameOrID), requestBody, nil, target.ToMap()) + return err +} + +// DeleteWorkerPool calls the API to remove a worker pool +func (w *workerpool) DeleteWorkerPool(clusterNameOrID string, workerPoolNameOrID string, target ClusterTargetHeader) error { + // Make the request, don't care about return value + _, err := w.client.Delete(fmt.Sprintf("/v1/clusters/%s/workerpools/%s", clusterNameOrID, workerPoolNameOrID), target.ToMap()) + return err +} + +// ListWorkerPools calls the API to list all worker pools for a cluster +func (w *workerpool) ListWorkerPools(clusterNameOrID string, target ClusterTargetHeader) ([]WorkerPoolResponse, error) { + var successV []WorkerPoolResponse + _, err := w.client.Get(fmt.Sprintf("/v1/clusters/%s/workerpools", clusterNameOrID), &successV, target.ToMap()) + return successV, err +} + +// GetWorkerPool calls the API to get a worker pool +func (w *workerpool) GetWorkerPool(clusterNameOrID, workerPoolNameOrID string, target ClusterTargetHeader) (WorkerPoolResponse, error) { + var successV WorkerPoolResponse + _, err := w.client.Get(fmt.Sprintf("/v1/clusters/%s/workerpools/%s", clusterNameOrID, workerPoolNameOrID), &successV, target.ToMap()) + return successV, err +} + +// AddZone calls the API to add a zone to a cluster and worker pool +func (w *workerpool) AddZone(clusterNameOrID string, poolID string, workerPoolZone WorkerPoolZone, target ClusterTargetHeader) error { + // Make the request, don't care about return value + _, err := w.client.Post(fmt.Sprintf("/v1/clusters/%s/workerpools/%s/zones", clusterNameOrID, poolID), workerPoolZone, nil, target.ToMap()) + return err +} + +// RemoveZone calls the API to remove a zone from a worker pool in a cluster +func (w *workerpool) RemoveZone(clusterNameOrID, zone, poolID string, target ClusterTargetHeader) error { + _, err := w.client.Delete(fmt.Sprintf("/v1/clusters/%s/workerpools/%s/zones/%s", clusterNameOrID, poolID, zone), target.ToMap()) + return err +} + +// UpdateZoneNetwork calls the API to update a zone's network +func (w *workerpool) UpdateZoneNetwork(clusterNameOrID, zone, poolID, privateVlan, publicVlan string, target ClusterTargetHeader) error { + body := WorkerPoolZoneNetwork{ + PrivateVLAN: privateVlan, + PublicVLAN: publicVlan, + } + _, err := w.client.Patch(fmt.Sprintf("/v1/clusters/%s/workerpools/%s/zones/%s", clusterNameOrID, poolID, zone), body, nil, target.ToMap()) + return err +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv1/workers.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv1/workers.go new file mode 100644 index 00000000000..21ae421fc99 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv1/workers.go @@ -0,0 +1,125 @@ +package containerv1 + +import ( + "fmt" + + "github.com/IBM-Cloud/bluemix-go/client" +) + +//Worker ... +type Worker struct { + Billing string `json:"billing,omitempty"` + ErrorMessage string `json:"errorMessage"` + ID string `json:"id"` + Isolation string `json:"isolation"` + KubeVersion string `json:"kubeVersion"` + MachineType string `json:"machineType"` + PrivateIP string `json:"privateIP"` + PrivateVlan string `json:"privateVlan"` + PublicIP string `json:"publicIP"` + PublicVlan string `json:"publicVlan"` + Location string `json:"location"` + PoolID string `json:"poolid"` + PoolName string `json:"poolName"` + TrustedStatus string `json:"trustedStatus"` + ReasonForDelete string `json:"reasonForDelete"` + VersionEOS string `json:"versionEOS"` + MasterVersionEOS string `json:"masterVersionEOS"` + State string `json:"state"` + Status string `json:"status"` + TargetVersion string `json:"targetVersion"` +} + +//WorkerParam ... +type WorkerParam struct { + MachineType string `json:"machineType,omitempty" description:"The worker's machine type"` + PrivateVlan string `json:"privateVlan,omitempty" description:"The worker's private vlan"` + PublicVlan string `json:"publicVlan,omitempty" description:"The worker's public vlan"` + Isolation string `json:"isolation,omitempty" description:"Can be 'public' or 'private'"` + WorkerNum int `json:"workerNum,omitempty" binding:"required" description:"The number of workers"` + Prefix string `json:"prefix,omitempty" description:"hostname prefix for new workers"` + Action string `json:"action,omitempty"` + Count int `json:"count,omitempty"` +} + +//WorkerUpdateParam ... +type WorkerUpdateParam struct { + Action string `json:"action" binding:"required" description:"Action to perform of the worker"` +} + +//Workers ... +type Workers interface { + List(clusterName string, target ClusterTargetHeader) ([]Worker, error) + ListByWorkerPool(clusterIDOrName, workerPoolIDOrName string, showDeleted bool, target ClusterTargetHeader) ([]Worker, error) + Get(clusterName string, target ClusterTargetHeader) (Worker, error) + Add(clusterName string, params WorkerParam, target ClusterTargetHeader) error + Delete(clusterName string, workerD string, target ClusterTargetHeader) error + Update(clusterName string, workerID string, params WorkerUpdateParam, target ClusterTargetHeader) error +} + +type worker struct { + client *client.Client +} + +func newWorkerAPI(c *client.Client) Workers { + return &worker{ + client: c, + } +} + +//Get ... +func (r *worker) Get(id string, target ClusterTargetHeader) (Worker, error) { + rawURL := fmt.Sprintf("/v1/workers/%s", id) + worker := Worker{} + _, err := r.client.Get(rawURL, &worker, target.ToMap()) + if err != nil { + return worker, err + } + + return worker, err +} + +func (r *worker) Add(name string, params WorkerParam, target ClusterTargetHeader) error { + rawURL := fmt.Sprintf("/v1/clusters/%s/workers", name) + _, err := r.client.Post(rawURL, params, nil, target.ToMap()) + return err +} + +//Delete ... +func (r *worker) Delete(name string, workerID string, target ClusterTargetHeader) error { + rawURL := fmt.Sprintf("/v1/clusters/%s/workers/%s", name, workerID) + _, err := r.client.Delete(rawURL, target.ToMap()) + return err +} + +//Update ... +func (r *worker) Update(name string, workerID string, params WorkerUpdateParam, target ClusterTargetHeader) error { + rawURL := fmt.Sprintf("/v1/clusters/%s/workers/%s", name, workerID) + _, err := r.client.Put(rawURL, params, nil, target.ToMap()) + return err +} + +//List ... +func (r *worker) List(name string, target ClusterTargetHeader) ([]Worker, error) { + rawURL := fmt.Sprintf("/v1/clusters/%s/workers", name) + workers := []Worker{} + _, err := r.client.Get(rawURL, &workers, target.ToMap()) + if err != nil { + return nil, err + } + return workers, err +} + +//ListByWorkerPool ... +func (r *worker) ListByWorkerPool(clusterIDOrName, workerPoolIDOrName string, showDeleted bool, target ClusterTargetHeader) ([]Worker, error) { + rawURL := fmt.Sprintf("/v1/clusters/%s/workers?showDeleted=%t", clusterIDOrName, showDeleted) + if len(workerPoolIDOrName) > 0 { + rawURL += "&pool=" + workerPoolIDOrName + } + workers := []Worker{} + _, err := r.client.Get(rawURL, &workers, target.ToMap()) + if err != nil { + return nil, err + } + return workers, err +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv2/alb.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv2/alb.go new file mode 100644 index 00000000000..b7381f4f22d --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv2/alb.go @@ -0,0 +1,92 @@ +package containerv2 + +import ( + "fmt" + + "github.com/IBM-Cloud/bluemix-go/client" +) + +type AlbCreateReq struct { + Cluster string `json:"cluster"` + EnableByDefault bool `json:"enableByDefault"` + Type string `json:"type"` + ZoneAlb string `json:"zone"` +} + +type ClusterALB struct { + ID string `json:"id"` + Region string `json:"region"` + DataCenter string `json:"dataCenter"` + IsPaid bool `json:"isPaid"` + PublicIngressHostname string `json:"publicIngressHostname"` + PublicIngressSecretName string `json:"publicIngressSecretName"` + ALBs []AlbConfig `json:"alb"` +} +type AlbConfig struct { + AlbBuild string `json:"albBuild"` + AlbID string `json:"albID"` + AlbType string `json:"albType"` + AuthBuild string `json:"authBuild"` + Cluster string `json:"cluster"` + CreatedDate string `json:"createdDate"` + DisableDeployment bool `json:"disableDeployment"` + Enable bool `json:"enable"` + LoadBalancerHostname string `json:"loadBalancerHostname"` + Name string `json:"name"` + NumOfInstances string `json:"numOfInstances"` + Resize bool `json:"resize"` + State string `json:"state"` + Status string `json:"status"` + ZoneAlb string `json:"zone"` +} + +type alb struct { + client *client.Client +} + +//Clusters interface +type Alb interface { + CreateAlb(albCreateReq AlbCreateReq, target ClusterTargetHeader) error + DisableAlb(disableAlbReq AlbConfig, target ClusterTargetHeader) error + EnableAlb(enableAlbReq AlbConfig, target ClusterTargetHeader) error + GetAlb(albid string, target ClusterTargetHeader) (AlbConfig, error) + ListClusterAlbs(clusterNameOrID string, target ClusterTargetHeader) ([]AlbConfig, error) +} + +func newAlbAPI(c *client.Client) Alb { + return &alb{ + client: c, + } +} + +func (r *alb) CreateAlb(albCreateReq AlbCreateReq, target ClusterTargetHeader) error { + // Make the request, don't care about return value + _, err := r.client.Post("/v2/alb/vpc/createAlb", albCreateReq, nil, target.ToMap()) + return err +} + +func (r *alb) DisableAlb(disableAlbReq AlbConfig, target ClusterTargetHeader) error { + // Make the request, don't care about return value + _, err := r.client.Post("/v2/alb/vpc/disableAlb", disableAlbReq, nil, target.ToMap()) + return err +} + +func (r *alb) EnableAlb(enableAlbReq AlbConfig, target ClusterTargetHeader) error { + // Make the request, don't care about return value + _, err := r.client.Post("/v2/alb/vpc/enableAlb", enableAlbReq, nil, target.ToMap()) + return err +} + +func (r *alb) GetAlb(albID string, target ClusterTargetHeader) (AlbConfig, error) { + var successV AlbConfig + _, err := r.client.Get(fmt.Sprintf("/v2/alb/getAlb?albID=%s", albID), &successV, target.ToMap()) + return successV, err +} + +// ListClusterALBs returns the list of albs available for cluster +func (r *alb) ListClusterAlbs(clusterNameOrID string, target ClusterTargetHeader) ([]AlbConfig, error) { + var successV ClusterALB + rawURL := fmt.Sprintf("v2/alb/getClusterAlbs?cluster=%s", clusterNameOrID) + _, err := r.client.Get(rawURL, &successV, target.ToMap()) + return successV.ALBs, err +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv2/api_service.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv2/api_service.go new file mode 100644 index 00000000000..7f24035ec83 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv2/api_service.go @@ -0,0 +1,108 @@ +package containerv2 + +import ( + gohttp "net/http" + + bluemix "github.com/IBM-Cloud/bluemix-go" + "github.com/IBM-Cloud/bluemix-go/authentication" + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/http" + "github.com/IBM-Cloud/bluemix-go/rest" + "github.com/IBM-Cloud/bluemix-go/session" +) + +//ErrCodeAPICreation ... +const ErrCodeAPICreation = "APICreationError" + +//ContainerServiceAPI is the Aramda K8s client ... +type ContainerServiceAPI interface { + Monitoring() Monitoring + Logging() Logging + Clusters() Clusters + WorkerPools() WorkerPool + Albs() Alb + Workers() Workers + Kms() Kms + Ingresses() Ingress + + //TODO Add other services +} + +//VpcContainerService holds the client +type csService struct { + *client.Client +} + +//New ... +func New(sess *session.Session) (ContainerServiceAPI, error) { + config := sess.Config.Copy() + err := config.ValidateConfigForService(bluemix.VpcContainerService) + if err != nil { + return nil, err + } + if config.HTTPClient == nil { + config.HTTPClient = http.NewHTTPClient(config) + } + tokenRefreher, err := authentication.NewIAMAuthRepository(config, &rest.Client{ + DefaultHeader: gohttp.Header{ + "User-Agent": []string{http.UserAgent()}, + }, + HTTPClient: config.HTTPClient, + }) + if err != nil { + return nil, err + } + if config.IAMAccessToken == "" { + err := authentication.PopulateTokens(tokenRefreher, config) + if err != nil { + return nil, err + } + } + if config.Endpoint == nil { + ep, err := config.EndpointLocator.ContainerEndpoint() + if err != nil { + return nil, err + } + config.Endpoint = &ep + } + + return &csService{ + Client: client.New(config, bluemix.VpcContainerService, tokenRefreher), + }, nil +} + +//Clusters implements Clusters API +func (c *csService) Clusters() Clusters { + return newClusterAPI(c.Client) +} + +//Monitor implements Monitor API +func (c *csService) Monitoring() Monitoring { + return newMonitoringAPI(c.Client) +} + +//Logging implements Monitor API +func (c *csService) Logging() Logging { + return newLoggingAPI(c.Client) +} + +//WorkerPools implements Cluster WorkerPools API +func (c *csService) WorkerPools() WorkerPool { + return newWorkerPoolAPI(c.Client) +} +func (c *csService) Albs() Alb { + return newAlbAPI(c.Client) +} +func (c *csService) Ingresses() Ingress { + return newIngressAPI(c.Client) +} + +//Kms implements Cluster Kms API +func (c *csService) Kms() Kms { + return newKmsAPI(c.Client) +} + +//Workers implements Cluster Workers API +func (c *csService) Workers() Workers { + return newWorkerAPI(c.Client) +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv2/clusters.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv2/clusters.go new file mode 100644 index 00000000000..880c33bb376 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv2/clusters.go @@ -0,0 +1,535 @@ +package containerv2 + +import ( + "errors" + "fmt" + "io/ioutil" + "net/url" + "os" + "path" + "path/filepath" + "strings" + + "gopkg.in/yaml.v2" + + "github.com/IBM-Cloud/bluemix-go/api/container/containerv1" + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/helpers" + "github.com/IBM-Cloud/bluemix-go/trace" +) + +//ClusterCreateRequest ... +type ClusterCreateRequest struct { + DisablePublicServiceEndpoint bool `json:"disablePublicServiceEndpoint"` + KubeVersion string `json:"kubeVersion" description:"kubeversion of cluster"` + Billing string `json:"billing,omitempty"` + PodSubnet string `json:"podSubnet"` + Provider string `json:"provider"` + ServiceSubnet string `json:"serviceSubnet"` + Name string `json:"name" binding:"required" description:"The cluster's name"` + DefaultWorkerPoolEntitlement string `json:"defaultWorkerPoolEntitlement"` + CosInstanceCRN string `json:"cosInstanceCRN"` + WorkerPools WorkerPoolConfig `json:"workerPool"` +} + +type WorkerPoolConfig struct { + DiskEncryption bool `json:"diskEncryption,omitempty"` + Entitlement string `json:"entitlement"` + Flavor string `json:"flavor"` + Isolation string `json:"isolation,omitempty"` + Labels map[string]string `json:"labels,omitempty"` + Name string `json:"name" binding:"required" description:"The workerpool's name"` + VpcID string `json:"vpcID"` + WorkerCount int `json:"workerCount"` + Zones []Zone `json:"zones"` +} + +// type Label struct { +// AdditionalProp1 string `json:"additionalProp1,omitempty"` +// AdditionalProp2 string `json:"additionalProp2,omitempty"` +// AdditionalProp3 string `json:"additionalProp3,omitempty"` +// } + +type Zone struct { + ID string `json:"id,omitempty" description:"The id"` + SubnetID string `json:"subnetID,omitempty"` +} + +//ClusterInfo ... +type ClusterInfo struct { + CreatedDate string `json:"createdDate"` + DataCenter string `json:"dataCenter"` + ID string `json:"id"` + Location string `json:"location"` + Entitlement string `json:"entitlement"` + MasterKubeVersion string `json:"masterKubeVersion"` + Name string `json:"name"` + Region string `json:"region"` + ResourceGroupID string `json:"resourceGroup"` + State string `json:"state"` + IsPaid bool `json:"isPaid"` + Addons []Addon `json:"addons"` + OwnerEmail string `json:"ownerEmail"` + Type string `json:"type"` + TargetVersion string `json:"targetVersion"` + ServiceSubnet string `json:"serviceSubnet"` + ResourceGroupName string `json:"resourceGroupName"` + Provider string `json:"provider"` + PodSubnet string `json:"podSubnet"` + MultiAzCapable bool `json:"multiAzCapable"` + APIUser string `json:"apiUser"` + ServerURL string `json:"serverURL"` + MasterURL string `json:"masterURL"` + DisableAutoUpdate bool `json:"disableAutoUpdate"` + WorkerZones []string `json:"workerZones"` + Vpcs []string `json:"vpcs"` + CRN string `json:"crn"` + VersionEOS string `json:"versionEOS"` + ServiceEndpoints Endpoints `json:"serviceEndpoints"` + Lifecycle LifeCycleInfo `json:"lifecycle"` + WorkerCount int `json:"workerCount"` + Ingress IngresInfo `json:"ingress"` + Features Feat `json:"features"` +} +type Feat struct { + KeyProtectEnabled bool `json:"keyProtectEnabled"` + PullSecretApplied bool `json:"pullSecretApplied"` +} +type IngresInfo struct { + HostName string `json:"hostname"` + SecretName string `json:"secretName"` +} +type LifeCycleInfo struct { + ModifiedDate string `json:"modifiedDate"` + MasterStatus string `json:"masterStatus"` + MasterStatusModifiedDate string `json:"masterStatusModifiedDate"` + MasterHealth string `json:"masterHealth"` + MasterState string `json:"masterState"` +} + +//ClusterTargetHeader ... +type ClusterTargetHeader struct { + AccountID string + ResourceGroup string + Provider string // supported providers e.g vpc-classic , vpc-gen2, satellite +} +type Endpoints struct { + PrivateServiceEndpointEnabled bool `json:"privateServiceEndpointEnabled"` + PrivateServiceEndpointURL string `json:"privateServiceEndpointURL"` + PublicServiceEndpointEnabled bool `json:"publicServiceEndpointEnabled"` + PublicServiceEndpointURL string `json:"publicServiceEndpointURL"` +} + +type Addon struct { + Name string `json:"name"` + Version string `json:"version"` +} + +//ClusterCreateResponse ... +type ClusterCreateResponse struct { + ID string `json:"clusterID"` +} + +//Clusters interface +type Clusters interface { + Create(params ClusterCreateRequest, target ClusterTargetHeader) (ClusterCreateResponse, error) + List(target ClusterTargetHeader) ([]ClusterInfo, error) + Delete(name string, target ClusterTargetHeader, deleteDependencies ...bool) error + GetCluster(name string, target ClusterTargetHeader) (*ClusterInfo, error) + GetClusterConfigDetail(name, homeDir string, admin bool, target ClusterTargetHeader) (containerv1.ClusterKeyInfo, error) + StoreConfigDetail(name, baseDir string, admin bool, createCalicoConfig bool, target ClusterTargetHeader) (string, containerv1.ClusterKeyInfo, error) + + //TODO Add other opertaions +} +type clusters struct { + client *client.Client + pathPrefix string +} + +const ( + accountIDHeader = "X-Auth-Resource-Account" + resourceGroupHeader = "X-Auth-Resource-Group" +) + +//ToMap ... +func (c ClusterTargetHeader) ToMap() map[string]string { + m := make(map[string]string, 3) + m[accountIDHeader] = c.AccountID + m[resourceGroupHeader] = c.ResourceGroup + return m +} + +func newClusterAPI(c *client.Client) Clusters { + return &clusters{ + client: c, + //pathPrefix: "/v2/vpc/", + } +} + +//List ... +func (r *clusters) List(target ClusterTargetHeader) ([]ClusterInfo, error) { + clusters := []ClusterInfo{} + var err error + if target.Provider != "satellite" { + getClustersPath := "/v2/vpc/getClusters" + if len(target.Provider) > 0 { + getClustersPath = fmt.Sprintf(getClustersPath+"?provider=%s", url.QueryEscape(target.Provider)) + } + _, err := r.client.Get(getClustersPath, &clusters, target.ToMap()) + if err != nil { + return nil, err + } + } + if len(target.Provider) == 0 || target.Provider == "satellite" { + // get satellite clusters + satelliteClusters := []ClusterInfo{} + _, err = r.client.Get("/v2/satellite/getClusters", &satelliteClusters, target.ToMap()) + if err != nil && target.Provider == "satellite" { + // return error only when provider is satellite. Else ignore error and return VPC clusters + trace.Logger.Println("Unable to get the satellite clusters ", err) + return nil, err + } + clusters = append(clusters, satelliteClusters...) + } + return clusters, nil +} + +//Create ... +func (r *clusters) Create(params ClusterCreateRequest, target ClusterTargetHeader) (ClusterCreateResponse, error) { + var cluster ClusterCreateResponse + _, err := r.client.Post("/v2/vpc/createCluster", params, &cluster, target.ToMap()) + return cluster, err +} + +//Delete ... +func (r *clusters) Delete(name string, target ClusterTargetHeader, deleteDependencies ...bool) error { + var rawURL string + if len(deleteDependencies) != 0 { + rawURL = fmt.Sprintf("/v1/clusters/%s?deleteResources=%t", name, deleteDependencies[0]) + } else { + rawURL = fmt.Sprintf("/v1/clusters/%s", name) + } + _, err := r.client.Delete(rawURL, target.ToMap()) + return err +} + +//GetClusterByIDorName +func (r *clusters) GetCluster(name string, target ClusterTargetHeader) (*ClusterInfo, error) { + ClusterInfo := &ClusterInfo{} + rawURL := fmt.Sprintf("/v2/vpc/getCluster?cluster=%s", name) + _, err := r.client.Get(rawURL, &ClusterInfo, target.ToMap()) + if err != nil { + return nil, err + } + return ClusterInfo, err +} +func (r *ClusterInfo) IsStagingSatelliteCluster() bool { + return strings.Index(r.ServerURL, "stg") > 0 && r.Provider == "satellite" +} + +//FindWithOutShowResourcesCompatible ... +func (r *clusters) FindWithOutShowResourcesCompatible(name string, target ClusterTargetHeader) (ClusterInfo, error) { + rawURL := fmt.Sprintf("/v2/getCluster?v1-compatible&cluster=%s", name) + cluster := ClusterInfo{} + _, err := r.client.Get(rawURL, &cluster, target.ToMap()) + if err != nil { + return cluster, err + } + // Handle VPC cluster. ServerURL is blank for v2/vpc clusters + if cluster.ServerURL == "" { + cluster.ServerURL = cluster.MasterURL + } + return cluster, err +} + +//GetClusterConfigDetail ... +func (r *clusters) GetClusterConfigDetail(name, dir string, admin bool, target ClusterTargetHeader) (containerv1.ClusterKeyInfo, error) { + clusterkey := containerv1.ClusterKeyInfo{} + // Block to add token for openshift clusters (This can be temporary until iks team handles openshift clusters) + clusterInfo, err := r.FindWithOutShowResourcesCompatible(name, target) + if err != nil { + // Assuming an error means that this is a vpc cluster, and we're returning existing kubeconfig + // When we add support for vpcs on openshift clusters, we may want revisit this + return clusterkey, err + } + + if !helpers.FileExists(dir) { + return clusterkey, fmt.Errorf("Path: %q, to download the config doesn't exist", dir) + } + postBody := map[string]interface{}{ + "cluster": name, + "format": "zip", + } + rawURL := fmt.Sprintf("/v2/applyRBACAndGetKubeconfig") + if admin { + postBody["admin"] = true + } + if clusterInfo.Provider == "satellite" { + postBody["endpointType"] = "link" + postBody["admin"] = true + } + resultDir := containerv1.ComputeClusterConfigDir(dir, name, admin) + const kubeConfigName = "config.yml" + err = os.MkdirAll(resultDir, 0755) + if err != nil { + return clusterkey, fmt.Errorf("Error creating directory to download the cluster config") + } + downloadPath := filepath.Join(resultDir, "config.zip") + trace.Logger.Println("Will download the kubeconfig at", downloadPath) + + var out *os.File + if out, err = os.Create(downloadPath); err != nil { + return clusterkey, err + } + defer out.Close() + defer helpers.RemoveFile(downloadPath) + _, err = r.client.Post(rawURL, postBody, out, target.ToMap()) + if err != nil { + return clusterkey, err + } + trace.Logger.Println("Downloaded the kubeconfig at", downloadPath) + if err = helpers.Unzip(downloadPath, resultDir); err != nil { + return clusterkey, err + } + defer helpers.RemoveFilesWithPattern(resultDir, "[^(.yml)|(.pem)]$") + var kubeyml string + files, _ := ioutil.ReadDir(resultDir) + + for _, f := range files { + if !strings.HasSuffix(f.Name(), ".zip") { + fileContent, _ := ioutil.ReadFile(resultDir + "/" + f.Name()) + if f.Name() == "admin-key.pem" { + clusterkey.AdminKey = string(fileContent) + } + if f.Name() == "admin.pem" { + clusterkey.Admin = string(fileContent) + } + if strings.HasPrefix(f.Name(), "ca") && strings.HasSuffix(f.Name(), ".pem") { + clusterkey.ClusterCACertificate = string(fileContent) + } + old := filepath.Join(resultDir, f.Name()) + new := filepath.Join(resultDir, f.Name()) + if strings.HasSuffix(f.Name(), ".yaml") { + new = filepath.Join(path.Clean(resultDir), "/", path.Clean(kubeConfigName)) + kubeyml = new + } + err := os.Rename(old, new) + if err != nil { + return clusterkey, fmt.Errorf("Couldn't rename: %q", err) + } + } + } + if resultDir == "" { + return clusterkey, errors.New("Unable to locate kube config in zip archive") + } + + kubefile, _ := ioutil.ReadFile(kubeyml) + var yamlConfig containerv1.ConfigFile + err = yaml.Unmarshal(kubefile, &yamlConfig) + if err != nil { + fmt.Printf("Error parsing YAML file: %s\n", err) + } + if len(yamlConfig.Clusters) != 0 { + clusterkey.Host = yamlConfig.Clusters[0].Cluster.Server + } + if len(yamlConfig.Users) != 0 { + clusterkey.Token = yamlConfig.Users[0].User.AuthProvider.Config.IDToken + } + + // Block to add token for openshift clusters (This can be temporary until iks team handles openshift clusters) + clusterInfo, err = r.FindWithOutShowResourcesCompatible(name, target) + if err != nil { + // Assuming an error means that this is a vpc cluster, and we're returning existing kubeconfig + // When we add support for vpcs on openshift clusters, we may want revisit this + clusterkey.FilePath, _ = filepath.Abs(kubeyml) + return clusterkey, err + } + if clusterInfo.Type == "openshift" && clusterInfo.Provider != "satellite" { + trace.Logger.Println("Debug: type is openshift trying login to get token") + var yamlConfig []byte + if yamlConfig, err = ioutil.ReadFile(kubeyml); err != nil { + return clusterkey, err + } + yamlConfig, err = r.FetchOCTokenForKubeConfig(yamlConfig, &clusterInfo, clusterInfo.IsStagingSatelliteCluster()) + if err != nil { + return clusterkey, err + } + err = ioutil.WriteFile(kubeyml, yamlConfig, 0644) // 0644 is irrelevant here, since file already exists. + if err != nil { + return clusterkey, err + } + openshiftyml, _ := ioutil.ReadFile(kubeyml) + var openshiftyaml containerv1.ConfigFileOpenshift + err = yaml.Unmarshal(openshiftyml, &openshiftyaml) + if err != nil { + fmt.Printf("Error parsing YAML file: %s\n", err) + } + openshiftusers := openshiftyaml.Users + for _, usr := range openshiftusers { + if strings.HasPrefix(usr.Name, "IAM") { + clusterkey.Token = usr.User.Token + } + } + if len(openshiftyaml.Clusters) != 0 { + clusterkey.Host = openshiftyaml.Clusters[0].Cluster.Server + } + clusterkey.ClusterCACertificate = "" + + } + clusterkey.FilePath, _ = filepath.Abs(kubeyml) + return clusterkey, err +} + +//StoreConfigDetail ... +func (r *clusters) StoreConfigDetail(name, dir string, admin, createCalicoConfig bool, target ClusterTargetHeader) (string, containerv1.ClusterKeyInfo, error) { + clusterkey := containerv1.ClusterKeyInfo{} + clusterInfo, err := r.FindWithOutShowResourcesCompatible(name, target) + if err != nil { + return "", clusterkey, err + } + postBody := map[string]interface{}{ + "cluster": name, + "format": "zip", + } + + var calicoConfig string + if !helpers.FileExists(dir) { + return "", clusterkey, fmt.Errorf("Path: %q, to download the config doesn't exist", dir) + } + rawURL := fmt.Sprintf("/v2/applyRBACAndGetKubeconfig") + if admin { + postBody["admin"] = true + } + if clusterInfo.Provider == "satellite" { + postBody["endpointType"] = "link" + postBody["admin"] = true + } + if createCalicoConfig { + postBody["network"] = true + } + resultDir := containerv1.ComputeClusterConfigDir(dir, name, admin) + err = os.MkdirAll(resultDir, 0755) + if err != nil { + return "", clusterkey, fmt.Errorf("Error creating directory to download the cluster config") + } + downloadPath := filepath.Join(resultDir, "config.zip") + trace.Logger.Println("Will download the kubeconfig at", downloadPath) + + var out *os.File + if out, err = os.Create(downloadPath); err != nil { + return "", clusterkey, err + } + defer out.Close() + defer helpers.RemoveFile(downloadPath) + _, err = r.client.Post(rawURL, postBody, out, target.ToMap()) + if err != nil { + return "", clusterkey, err + } + trace.Logger.Println("Downloaded the kubeconfig at", downloadPath) + if err = helpers.Unzip(downloadPath, resultDir); err != nil { + return "", clusterkey, err + } + trace.Logger.Println("Downloaded the kubec", resultDir) + + unzipConfigPath := resultDir + trace.Logger.Println("Located unzipped directory: ", unzipConfigPath) + files, _ := ioutil.ReadDir(unzipConfigPath) + for _, f := range files { + if !strings.HasSuffix(f.Name(), ".zip") { + fileContent, _ := ioutil.ReadFile(unzipConfigPath + "/" + f.Name()) + if f.Name() == "admin-key.pem" { + clusterkey.AdminKey = string(fileContent) + } + if f.Name() == "admin.pem" { + clusterkey.Admin = string(fileContent) + } + if strings.HasPrefix(f.Name(), "ca") && strings.HasSuffix(f.Name(), ".pem") { + clusterkey.ClusterCACertificate = string(fileContent) + } + old := filepath.Join(unzipConfigPath, f.Name()) + new := filepath.Join(unzipConfigPath, f.Name()) + err := os.Rename(old, new) + if err != nil { + return "", clusterkey, fmt.Errorf("Couldn't rename: %q", err) + } + } + } + baseDirFiles, err := ioutil.ReadDir(resultDir) + if err != nil { + return "", clusterkey, err + } + + if createCalicoConfig { + // Proccess calico golang template file if it exists + calicoConfig, err = containerv1.GenerateCalicoConfig(resultDir) + if err != nil { + return "", clusterkey, err + } + } + var kubeconfigFileName string + for _, baseDirFile := range baseDirFiles { + if strings.Contains(baseDirFile.Name(), ".yaml") { + kubeconfigFileName = fmt.Sprintf("%s/%s", resultDir, baseDirFile.Name()) + break + } + } + if kubeconfigFileName == "" { + return "", clusterkey, errors.New("Unable to locate kube config in zip archive") + } + kubefile, _ := ioutil.ReadFile(kubeconfigFileName) + var yamlConfig containerv1.ConfigFile + err = yaml.Unmarshal(kubefile, &yamlConfig) + if err != nil { + fmt.Printf("Error parsing YAML file: %s\n", err) + } + if len(yamlConfig.Clusters) != 0 { + clusterkey.Host = yamlConfig.Clusters[0].Cluster.Server + } + if len(yamlConfig.Users) != 0 { + clusterkey.Token = yamlConfig.Users[0].User.AuthProvider.Config.IDToken + } + + // Block to add token for openshift clusters (This can be temporary until iks team handles openshift clusters) + clusterInfo, err = r.FindWithOutShowResourcesCompatible(name, target) + if err != nil { + // Assuming an error means that this is a vpc cluster, and we're returning existing kubeconfig + // When we add support for vpcs on openshift clusters, we may want revisit this + clusterkey.FilePath = kubeconfigFileName + return calicoConfig, clusterkey, nil + } + + if clusterInfo.Type == "openshift" && clusterInfo.Provider != "satellite" { + trace.Logger.Println("Cluster Type is openshift trying login to get token") + var yamlConfig []byte + if yamlConfig, err = ioutil.ReadFile(kubeconfigFileName); err != nil { + return "", clusterkey, err + } + yamlConfig, err = r.FetchOCTokenForKubeConfig(yamlConfig, &clusterInfo, clusterInfo.IsStagingSatelliteCluster()) + if err != nil { + return "", clusterkey, err + } + err = ioutil.WriteFile(kubeconfigFileName, yamlConfig, 0644) // check about permissions and truncate + if err != nil { + return "", clusterkey, err + } + openshiftyml, _ := ioutil.ReadFile(kubeconfigFileName) + var openshiftyaml containerv1.ConfigFileOpenshift + err = yaml.Unmarshal(openshiftyml, &openshiftyaml) + if err != nil { + fmt.Printf("Error parsing YAML file: %s\n", err) + } + openshiftusers := openshiftyaml.Users + for _, usr := range openshiftusers { + if strings.HasPrefix(usr.Name, "IAM") { + clusterkey.Token = usr.User.Token + } + } + if len(openshiftyaml.Clusters) != 0 { + clusterkey.Host = openshiftyaml.Clusters[0].Cluster.Server + } + clusterkey.ClusterCACertificate = "" + + } + clusterkey.FilePath = kubeconfigFileName + return calicoConfig, clusterkey, nil +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv2/ingress.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv2/ingress.go new file mode 100644 index 00000000000..a4263535993 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv2/ingress.go @@ -0,0 +1,98 @@ +package containerv2 + +import ( + "fmt" + "strconv" + + "github.com/IBM-Cloud/bluemix-go/client" +) + +// Secret struct holding details for a single secret +type Secret struct { + Cluster string `json:"cluster" description:"name of secret"` + Name string `json:"name" description:"name of secret"` + Namespace string `json:"namespace" description:"namespace of secret"` + Domain string `json:"domain" description:"domain the cert belongs to"` + CRN string `json:"crn" description:"crn of the certificate in certificate manager"` + ExpiresOn string `json:"expiresOn" description:"expiration date of the certificate"` + Status string `json:"status" description:"status of Will be used for displaying callback operations to user"` + UserManaged bool `json:"userManaged" description:"true or false. Used to show which certs and secrets are system generated and which are not"` + Persistence bool `json:"persistence" description:"true or false. Persist the secret even if a user attempts to delete it"` +} + +// Secrets struct for a secret array +type Secrets []Secret + +// SecretCreateConfig the secret create request +type SecretCreateConfig struct { + Cluster string `json:"cluster" description:"name of secret" binding:"required"` + Name string `json:"name" description:"name of secret" binding:"required"` + Namespace string `json:"namespace" description:"namespace of Optional, if none specified it will be placed in the ibm-cert-store namespace"` + CRN string `json:"crn" description:"crn of the certificate in certificate manager"` + Persistence bool `json:"persistence" description:"true or false. Persist the secret even if a user attempts to delete it"` +} + +// SecretDeleteConfig the secret delete request +type SecretDeleteConfig struct { + Cluster string `json:"cluster" description:"name of secret" binding:"required"` + Name string `json:"name" description:"name of secret" binding:"required"` + Namespace string `json:"namespace" description:"namespace of secret" binding:"required"` +} + +// SecretUpdateConfig secret update request +type SecretUpdateConfig struct { + Cluster string `json:"cluster" description:"name of secret" binding:"required"` + Name string `json:"name" description:"name of secret" binding:"required"` + Namespace string `json:"namespace" description:"namespace of secret" binding:"required"` + CRN string `json:"crn" description:"crn of the certificate in certificate manager"` +} + +type ingress struct { + client *client.Client +} + +//Ingress interface +type Ingress interface { + CreateIngressSecret(req SecretCreateConfig) (response Secret, err error) + UpdateIngressSecret(req SecretUpdateConfig) (response Secret, err error) + DeleteIngressSecret(req SecretDeleteConfig) (err error) + GetIngressSecretList(clusterNameOrID string, showDeleted bool) (response Secrets, err error) + GetIngressSecret(clusterNameOrID, secretName, secretNamespace string) (response Secret, err error) +} + +func newIngressAPI(c *client.Client) Ingress { + return &ingress{ + client: c, + } +} + +// GetIngressSecretList returns a list of ingress secrets for a given cluster +func (r *ingress) GetIngressSecretList(clusterNameOrID string, showDeleted bool) (response Secrets, err error) { + deleted := strconv.FormatBool(showDeleted) + _, err = r.client.Get(fmt.Sprintf("/ingress/v2/secret/getSecrets?cluster=%s&showDeleted=%s", clusterNameOrID, deleted), &response) + return +} + +// GetIngressSecret returns a single ingress secret in a given cluster +func (r *ingress) GetIngressSecret(clusterNameOrID, secretName, secretNamespace string) (response Secret, err error) { + _, err = r.client.Get(fmt.Sprintf("/ingress/v2/secret/getSecret?cluster=%s&name=%s&namespace=%s", clusterNameOrID, secretName, secretNamespace), &response) + return +} + +// CreateIngressSecret creates an ingress secret with the given name in the given namespace +func (r *ingress) CreateIngressSecret(req SecretCreateConfig) (response Secret, err error) { + _, err = r.client.Post("/ingress/v2/secret/createSecret", req, &response) + return +} + +// UpdateIngressSecret updates an existing secret with new cert values +func (r *ingress) UpdateIngressSecret(req SecretUpdateConfig) (response Secret, err error) { + _, err = r.client.Post("/ingress/v2/secret/updateSecret", req, &response) + return +} + +// DeleteIngressSecret deletes the ingress secret from the cluster +func (r *ingress) DeleteIngressSecret(req SecretDeleteConfig) (err error) { + _, err = r.client.Post("/ingress/v2/secret/deleteSecret", req, nil) + return +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv2/kms.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv2/kms.go new file mode 100644 index 00000000000..2940d10b33f --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv2/kms.go @@ -0,0 +1,52 @@ +package containerv2 + +import ( + "github.com/IBM-Cloud/bluemix-go/client" +) + +const ( + account = "X-Auth-Resource-Account" + resourceGroup = "X-Auth-Resource-Group" +) + +//Request body to attach a KMS to a cluster +type KmsEnableReq struct { + Cluster string `json:"cluster"` + Kms string `json:"instance_id"` + Crk string `json:"crk_id"` + PrivateEndpoint bool `json:"private_endpoint"` +} + +//ClusterHeader ... +type ClusterHeader struct { + AccountID string + ResourceGroup string +} + +//CreateMap ... +func (c ClusterHeader) CreateMap() map[string]string { + m := make(map[string]string, 3) + m[account] = c.AccountID + m[resourceGroup] = c.ResourceGroup + return m +} + +type kms struct { + client *client.Client +} + +//Kms interface +type Kms interface { + EnableKms(enableKmsReq KmsEnableReq, target ClusterHeader) error +} + +func newKmsAPI(c *client.Client) Kms { + return &kms{ + client: c, + } +} + +func (r *kms) EnableKms(enableKmsReq KmsEnableReq, target ClusterHeader) error { + _, err := r.client.Post("/v2/enableKMS", enableKmsReq, nil, target.CreateMap()) + return err +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv2/ob_logging.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv2/ob_logging.go new file mode 100644 index 00000000000..66a5f8ccd50 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv2/ob_logging.go @@ -0,0 +1,150 @@ +package containerv2 + +import ( + "fmt" + + "github.com/IBM-Cloud/bluemix-go/client" +) + +const ( + authResourceAccount = "X-Auth-Resource-Account" +) + +//LoggingTargetHeader ... +type LoggingTargetHeader struct { + AccountID string +} + +//ToMap ... +func (c LoggingTargetHeader) ToMap() map[string]string { + m := make(map[string]string, 2) + m[authResourceAccount] = c.AccountID + return m +} + +//LoggingCreateRequest ... +type LoggingCreateRequest struct { + Cluster string `json:"cluster"` + IngestionKey string `json:"ingestionKey,omitempty"` + LoggingInstance string `json:"instance"` + PrivateEndpoint bool `json:"privateEndpoint,omitempty"` +} + +//LoggingCreateResponse ... +type LoggingCreateResponse struct { + DaemonsetName string `json:"daemonsetName"` + IngestionKey string `json:"ingestionKey"` + InstanceID string `json:"instanceId"` + InstanceName string `json:"instanceName"` + PrivateEndpoint bool `json:"privateEndpoint"` +} + +//LoggingUpdateRequest ... +type LoggingUpdateRequest struct { + Cluster string `json:"cluster"` + IngestionKey string `json:"ingestionKey"` + Instance string `json:"instance"` + NewInstance string `json:"newInstance"` + PrivateEndpoint bool `json:"privateEndpoint"` +} + +//LoggingUpdateResponse ... +type LoggingUpdateResponse struct { + AgentKey string `json:"agentKey"` + AgentNamespace string `json:"agentNamespace"` + CRN string `json:"crn"` + DaemonsetName string `json:"daemonsetName"` + DiscoveredAgent bool `json:"discoveredAgent"` + InstanceID string `json:"instanceId"` + InstanceName string `json:"instanceName"` + Namespace string `json:"namespace"` + PrivateEndpoint bool `json:"privateEndpoint"` +} + +//LoggingInfo ... +type LoggingInfo struct { + AgentKey string `json:"agentKey"` + AgentNamespace string `json:"agentNamespace"` + CRN string `json:"crn"` + DaemonsetName string `json:"daemonsetName"` + DiscoveredAgent bool `json:"discoveredAgent"` + InstanceID string `json:"instanceId"` + InstanceName string `json:"instanceName"` + Namespace string `json:"namespace"` + PrivateEndpoint bool `json:"privateEndpoint"` +} + +//LoggingDeleteRequest ... +type LoggingDeleteRequest struct { + Cluster string `json:"cluster"` + Instance string `json:"instance"` +} + +//Logging interface +type Logging interface { + CreateLoggingConfig(params LoggingCreateRequest, target LoggingTargetHeader) (LoggingCreateResponse, error) + GetLoggingConfig(clusterName string, LoggingInstance string, target LoggingTargetHeader) (*LoggingInfo, error) + ListLoggingInstances(clusterName string, target LoggingTargetHeader) ([]LoggingInfo, error) + UpdateLoggingConfig(params LoggingUpdateRequest, target LoggingTargetHeader) (LoggingUpdateResponse, error) + DeleteLoggingConfig(params LoggingDeleteRequest, target LoggingTargetHeader) (interface{}, error) +} +type logging struct { + client *client.Client +} + +func newLoggingAPI(c *client.Client) Logging { + return &logging{ + client: c, + } +} + +//CreateLoggingConfig ... +//Create a Logging configuration for a cluster. +func (r *logging) CreateLoggingConfig(params LoggingCreateRequest, target LoggingTargetHeader) (LoggingCreateResponse, error) { + var resp LoggingCreateResponse + _, err := r.client.Post("/v2/observe/logging/createConfig", params, &resp, target.ToMap()) + return resp, err +} + +//GetLoggingConfig ... +//Show the details of an existing Logging configuration. +func (r *logging) GetLoggingConfig(clusterName, loggingInstance string, target LoggingTargetHeader) (*LoggingInfo, error) { + loggingInfo := &LoggingInfo{} + rawURL := fmt.Sprintf("/v2/observe/logging/getConfig?cluster=%s&instance=%s", clusterName, loggingInstance) + _, err := r.client.Get(rawURL, &loggingInfo, target.ToMap()) + if err != nil { + return nil, err + } + return loggingInfo, err +} + +//ListLoggingInstances... +//List all logging configurations for a cluster. +func (r *logging) ListLoggingInstances(clusterName string, target LoggingTargetHeader) ([]LoggingInfo, error) { + logging := []LoggingInfo{} + rawURL := fmt.Sprintf("/v2/observe/logging/getConfigs?cluster=%s", clusterName) + _, err := r.client.Get(rawURL, &logging, target.ToMap()) + if err != nil { + return nil, err + } + return logging, nil +} + +//UpdateLoggingConfig ... +//Update a Logging configuration in the cluster. +func (r *logging) UpdateLoggingConfig(params LoggingUpdateRequest, target LoggingTargetHeader) (LoggingUpdateResponse, error) { + var logging LoggingUpdateResponse + _, err := r.client.Post("/v2/observe/logging/modifyConfig", params, &logging, target.ToMap()) + return logging, err +} + +//DeleteLoggingConfig ... +//Remove a Logging configuration from a cluster. +func (r *logging) DeleteLoggingConfig(params LoggingDeleteRequest, target LoggingTargetHeader) (interface{}, error) { + var response interface{} + _, err := r.client.Post("/v2/observe/logging/removeConfig", params, &response, target.ToMap()) + if err != nil { + return response, err + } + return response, err +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv2/ob_monitoring.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv2/ob_monitoring.go new file mode 100644 index 00000000000..c866098df8f --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv2/ob_monitoring.go @@ -0,0 +1,151 @@ +package containerv2 + +import ( + "fmt" + + "github.com/IBM-Cloud/bluemix-go/client" +) + +const ( + resourceAccount = "X-Auth-Resource-Account" +) + +//MonitoringTargetHeader ... +type MonitoringTargetHeader struct { + AccountID string +} + +//ToMap ... +func (c MonitoringTargetHeader) ToMap() map[string]string { + m := make(map[string]string, 2) + m[resourceAccount] = c.AccountID + return m +} + +//MonitoringCreateRequest ... +type MonitoringCreateRequest struct { + Cluster string `json:"cluster"` + IngestionKey string `json:"ingestionKey,omitempty"` + SysidigInstance string `json:"instance"` + PrivateEndpoint bool `json:"privateEndpoint,omitempty"` +} + +//MonitoringCreateResponse ... +type MonitoringCreateResponse struct { + DaemonsetName string `json:"daemonsetName"` + IngestionKey string `json:"ingestionKey"` + InstanceID string `json:"instanceId"` + InstanceName string `json:"instanceName"` + PrivateEndpoint bool `json:"privateEndpoint"` +} + +//MonitoringUpdateRequest ... +type MonitoringUpdateRequest struct { + Cluster string `json:"cluster"` + IngestionKey string `json:"ingestionKey"` + Instance string `json:"instance"` + NewInstance string `json:"newInstance"` + PrivateEndpoint bool `json:"privateEndpoint"` +} + +//MonitoringUpdateResponse ... +type MonitoringUpdateResponse struct { + AgentKey string `json:"agentKey"` + AgentNamespace string `json:"agentNamespace"` + CRN string `json:"crn"` + DaemonsetName string `json:"daemonsetName"` + DiscoveredAgent bool `json:"discoveredAgent"` + InstanceID string `json:"instanceId"` + InstanceName string `json:"instanceName"` + Namespace string `json:"namespace"` + PrivateEndpoint bool `json:"privateEndpoint"` +} + +//MonitoringInfo ... +type MonitoringInfo struct { + AgentKey string `json:"agentKey"` + AgentNamespace string `json:"agentNamespace"` + CRN string `json:"crn"` + DaemonsetName string `json:"daemonsetName"` + DiscoveredAgent bool `json:"discoveredAgent"` + InstanceID string `json:"instanceId"` + InstanceName string `json:"instanceName"` + Namespace string `json:"namespace"` + PrivateEndpoint bool `json:"privateEndpoint"` +} + +//MonitoringDeleteRequest ... +type MonitoringDeleteRequest struct { + Cluster string `json:"cluster"` + Instance string `json:"instance"` +} + +//Monitoring interface +type Monitoring interface { + CreateMonitoringConfig(params MonitoringCreateRequest, target MonitoringTargetHeader) (MonitoringCreateResponse, error) + GetMonitoringConfig(clusterName string, monitoringInstance string, target MonitoringTargetHeader) (*MonitoringInfo, error) + ListAllMonitors(clusterName string, target MonitoringTargetHeader) ([]MonitoringInfo, error) + UpdateMonitoringConfig(params MonitoringUpdateRequest, target MonitoringTargetHeader) (MonitoringUpdateResponse, error) + DeleteMonitoringConfig(params MonitoringDeleteRequest, target MonitoringTargetHeader) (interface{}, error) +} +type monitoring struct { + client *client.Client +} + +func newMonitoringAPI(c *client.Client) Monitoring { + return &monitoring{ + client: c, + } +} + +//CreateMonitoringConfig ... +//Create a Sysdig monitoring configuration for a cluster. +func (r *monitoring) CreateMonitoringConfig(params MonitoringCreateRequest, target MonitoringTargetHeader) (MonitoringCreateResponse, error) { + var monitoring MonitoringCreateResponse + _, err := r.client.Post("/v2/observe/monitoring/createConfig", params, &monitoring, target.ToMap()) + return monitoring, err +} + +//GetMonitoringConfig ... +//Show the details of an existing Sysdig monitoring configuration. +func (r *monitoring) GetMonitoringConfig(clusterName, monitoringInstance string, target MonitoringTargetHeader) (*MonitoringInfo, error) { + monitoringInfo := &MonitoringInfo{} + rawURL := fmt.Sprintf("/v2/observe/monitoring/getConfig?cluster=%s&instance=%s", clusterName, monitoringInstance) + _, err := r.client.Get(rawURL, &monitoringInfo, target.ToMap()) + if err != nil { + return nil, err + } + return monitoringInfo, err +} + +//ListAllMonitors ... +//List all Sysdig monitoring configurations for a cluster. +func (r *monitoring) ListAllMonitors(clusterName string, target MonitoringTargetHeader) ([]MonitoringInfo, error) { + monitors := []MonitoringInfo{} + rawURL := fmt.Sprintf("v2/observe/monitoring/getConfigs?cluster=%s", clusterName) + _, err := r.client.Get(rawURL, &monitors, target.ToMap()) + if err != nil { + return nil, err + } + return monitors, nil +} + +//UpdateMonitoringConfig ... +//Update a Sysdig monitoring configuration in the cluster. +func (r *monitoring) UpdateMonitoringConfig(params MonitoringUpdateRequest, target MonitoringTargetHeader) (MonitoringUpdateResponse, error) { + var monitoring MonitoringUpdateResponse + _, err := r.client.Post("/v2/observe/monitoring/modifyConfig", params, &monitoring, target.ToMap()) + return monitoring, err +} + +//DeleteMonitoringConfig ... +//Remove a Sysdig monitoring configuration from a cluster. +func (r *monitoring) DeleteMonitoringConfig(params MonitoringDeleteRequest, target MonitoringTargetHeader) (interface{}, error) { + var response interface{} + _, err := r.client.Post("/v2/observe/monitoring/removeConfig", params, &response, target.ToMap()) + if err != nil { + return response, err + } + return response, err + +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv2/openshift.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv2/openshift.go new file mode 100644 index 00000000000..9781c8c0f61 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv2/openshift.go @@ -0,0 +1,267 @@ +package containerv2 + +/******************************************************************************* + * IBM Confidential + * OCO Source Materials + * IBM Cloud Schematics + * (C) Copyright IBM Corp. 2017 All Rights Reserved. + * The source code for this program is not published or otherwise divested of + * its trade secrets, irrespective of what has been deposited with + * the U.S. Copyright Office. + ******************************************************************************/ + +/******************************************************************************* + * A file for openshift related utility functions, like getting kube + * config + ******************************************************************************/ + +import ( + "encoding/base64" + "errors" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "regexp" + "runtime/debug" + "strings" + "time" + + yaml "github.com/ghodss/yaml" + + bxhttp "github.com/IBM-Cloud/bluemix-go/http" + "github.com/IBM-Cloud/bluemix-go/rest" + "github.com/IBM-Cloud/bluemix-go/trace" +) + +const ( + // IAMHTTPtimeout - + IAMHTTPtimeout = 10 * time.Second +) + +// Frame - +type Frame uintptr + +// StackTrace - +type StackTrace []Frame +type stackTracer interface { + StackTrace() StackTrace +} + +type openShiftUser struct { + Kind string `json:"kind"` + APIVersion string `json:"apiVersion"` + Metadata struct { + Name string `json:"name"` + SelfLink string `json:"selfLink"` + UID string `json:"uid"` + ResourceVersion string `json:"resourceVersion"` + CreationTimestamp time.Time `json:"creationTimestamp"` + } `json:"metadata"` + Identities []string `json:"identities"` + Groups []string `json:"groups"` +} + +type authEndpoints struct { + Issuer string `json:"issuer"` + AuthorizationEndpoint string `json:"authorization_endpoint"` + TokenEndpoint string `json:"token_endpoint"` + ServerURL string `json:"server_endpoint,omitempty"` +} + +// PanicCatch - Catch panic and give error +func PanicCatch(r interface{}) error { + if r != nil { + var e error + switch x := r.(type) { + case string: + e = errors.New(x) + case error: + e = x + default: + e = errors.New("Unknown panic") + } + fmt.Printf("Panic error %v", e) + if err, ok := e.(stackTracer); ok { + fmt.Printf("Panic stack trace %v", err.StackTrace()) + } else { + debug.PrintStack() + } + return e + } + return nil +} + +// NormalizeName - +func NormalizeName(name string) (string, error) { + name = strings.ToLower(name) + reg, err := regexp.Compile("[^A-Za-z0-9:]+") + if err != nil { + return "", err + } + return reg.ReplaceAllString(name, "-"), nil +} + +// logInAndFillOCToken will update kubeConfig with an Openshift token, if one is not there +func (r *clusters) FetchOCTokenForKubeConfig(kubecfg []byte, cMeta *ClusterInfo, skipSSLVerification bool) (kubecfgEdited []byte, rerr error) { + // TODO: this is not a a standard manner to login ... using propriatary OC cli reverse engineering + defer func() { + err := PanicCatch(recover()) + if err != nil { + rerr = fmt.Errorf("Could not login to openshift account %s", err) + } + }() + + var cfg map[string]interface{} + err := yaml.Unmarshal(kubecfg, &cfg) + if err != nil { + return kubecfg, err + } + + var token string + trace.Logger.Println("Creating user passcode to login for getting oc token") + passcode, err := r.client.TokenRefresher.GetPasscode() + + authEP, err := func(meta *ClusterInfo) (*authEndpoints, error) { + request := rest.GetRequest(meta.ServerURL + "/.well-known/oauth-authorization-server") + var auth authEndpoints + tempVar := r.client.ServiceName + r.client.ServiceName = "" + + tempSSL := r.client.Config.SSLDisable + tempClient := r.client.Config.HTTPClient + r.client.Config.SSLDisable = skipSSLVerification + r.client.Config.HTTPClient = bxhttp.NewHTTPClient(r.client.Config) + + defer func() { + r.client.ServiceName = tempVar + r.client.Config.SSLDisable = tempSSL + r.client.Config.HTTPClient = tempClient + }() + resp, err := r.client.SendRequest(request, &auth) + if err != nil { + return &auth, err + } + defer resp.Body.Close() + if resp.StatusCode > 299 { + msg, _ := ioutil.ReadAll(resp.Body) + return nil, fmt.Errorf("Bad status code [%d] returned when fetching Cluster authentication endpoints: %s", resp.StatusCode, msg) + } + auth.ServerURL = meta.ServerURL + return &auth, nil + }(cMeta) + + trace.Logger.Println("Got authentication end points for getting oc token") + token, uname, err := r.openShiftAuthorizePasscode(authEP, passcode, cMeta.IsStagingSatelliteCluster()) + trace.Logger.Println("Got the token and user ", uname) + clusterName, _ := NormalizeName(authEP.ServerURL[len("https://"):len(authEP.ServerURL)]) //TODO deal with http + ccontext := "default/" + clusterName + "/" + uname + uname = uname + "/" + clusterName + clusters := cfg["clusters"].([]interface{}) + newCluster := map[string]interface{}{"name": clusterName, "cluster": map[string]interface{}{"server": authEP.ServerURL}} + if skipSSLVerification { + newCluster["cluster"].(map[string]interface{})["insecure-skip-tls-verify"] = true + } + clusters = append(clusters, newCluster) + cfg["clusters"] = clusters + + contexts := cfg["contexts"].([]interface{}) + newContext := map[string]interface{}{"name": ccontext, "context": map[string]interface{}{"cluster": clusterName, "namespace": "default", "user": uname}} + contexts = append(contexts, newContext) + cfg["contexts"] = contexts + + users := cfg["users"].([]interface{}) + newUser := map[string]interface{}{"name": uname, "user": map[string]interface{}{"token": token}} + users = append(users, newUser) + cfg["users"] = users + + cfg["current-context"] = ccontext + + bytes, err := yaml.Marshal(cfg) + if err != nil { + return kubecfg, err + } + kubecfg = bytes + return kubecfg, nil +} + +// Never redirect. Let caller handle. This is an http.Client callback method (CheckRedirect) +func neverRedirect(req *http.Request, via []*http.Request) error { + return http.ErrUseLastResponse +} + +func (r *clusters) openShiftAuthorizePasscode(authEP *authEndpoints, passcode string, skipSSLVerification bool) (string, string, error) { + request := rest.GetRequest(authEP.AuthorizationEndpoint+"?response_type=token&client_id=openshift-challenging-client"). + Set("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("passcode:%s", passcode)))) + + tempSSL := r.client.Config.SSLDisable + tempClient := r.client.Config.HTTPClient + r.client.Config.SSLDisable = skipSSLVerification + r.client.Config.HTTPClient = bxhttp.NewHTTPClient(r.client.Config) + + // To never redirect for this call + tempVar := r.client.Config.HTTPClient.CheckRedirect + r.client.Config.HTTPClient.CheckRedirect = neverRedirect + defer func() { + r.client.Config.HTTPClient.CheckRedirect = tempVar + r.client.Config.SSLDisable = tempSSL + r.client.Config.HTTPClient = tempClient + }() + + var respInterface interface{} + var resp *http.Response + var err error + for try := 1; try <= 3; try++ { + // bmxerror.NewRequestFailure("ServerErrorResponse", string(raw), resp.StatusCode) + resp, err = r.client.SendRequest(request, respInterface) + if err != nil { + if resp.StatusCode != 302 { + return "", "", err + } + } + defer resp.Body.Close() + if resp.StatusCode > 399 { + if try >= 3 { + msg, _ := ioutil.ReadAll(resp.Body) + return "", "", fmt.Errorf("Bad status code [%d] returned when openshift login: %s", resp.StatusCode, string(msg)) + } + time.Sleep(200 * time.Millisecond) + } else { + break + } + } + + loc, err := resp.Location() + if err != nil { + return "", "", err + } + val, err := url.ParseQuery(loc.Fragment) + if err != nil { + return "", "", err + } + token := val.Get("access_token") + trace.Logger.Println("Getting username after getting the token") + name, err := r.getOpenShiftUser(authEP, token) + if err != nil { + return "", "", err + } + return token, name, nil +} + +func (r *clusters) getOpenShiftUser(authEP *authEndpoints, token string) (string, error) { + request := rest.GetRequest(authEP.ServerURL+"/apis/user.openshift.io/v1/users/~"). + Set("Authorization", "Bearer "+token) + + var user openShiftUser + resp, err := r.client.SendRequest(request, &user) + if err != nil { + return "", err + } + defer resp.Body.Close() + if resp.StatusCode > 299 { + msg, _ := ioutil.ReadAll(resp.Body) + return "", fmt.Errorf("Bad status code [%d] returned when fetching OpenShift user Details: %s", resp.StatusCode, string(msg)) + } + + return user.Metadata.Name, nil +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv2/worker_pool.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv2/worker_pool.go new file mode 100644 index 00000000000..a1e1850c868 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv2/worker_pool.go @@ -0,0 +1,110 @@ +package containerv2 + +import ( + "fmt" + + "github.com/IBM-Cloud/bluemix-go/client" +) + +// WorkerPoolRequest provides worker pool data +// swagger:model +type WorkerPoolRequest struct { + Cluster string `json:"cluster" description:"cluster name where the worker pool will be created"` + WorkerPoolConfig +} + +// WorkerPoolResponse provides worker pool data +// swagger:model +type WorkerPoolResponse struct { + ID string `json:"workerPoolID"` +} + +type WorkerPoolZone struct { + Cluster string `json:"cluster"` + Id string `json:"id"` + SubnetID string `json:"subnetID"` + WorkerPoolID string `json:"workerPoolID"` +} + +type GetWorkerPoolResponse struct { + Flavor string `json:"flavor"` + ID string `json:"id"` + Isolation string `json:"isolation"` + Labels map[string]string `json:"labels,omitempty"` + Lifecycle `json:"lifecycle"` + VpcID string `json:"vpcID"` + WorkerCount int `json:"workerCount"` + PoolName string `json:"poolName"` + Provider string `json:"provider"` + Zones []ZoneResp `json:"zones"` +} + +type Lifecycle struct { + ActualState string `json:"actualState"` + DesiredState string `json:"desiredState"` +} + +type ZoneResp struct { + ID string `json:"id"` + WorkerCount int `json:"workerCount"` + Subnets []Subnet `json:"subnets"` +} + +type Subnet struct { + ID string `json:"id"` + Primary bool `json:"primary"` +} + +//Workers ... +type WorkerPool interface { + CreateWorkerPool(workerPoolReq WorkerPoolRequest, target ClusterTargetHeader) (WorkerPoolResponse, error) + GetWorkerPool(clusterNameOrID, workerPoolNameOrID string, target ClusterTargetHeader) (GetWorkerPoolResponse, error) + ListWorkerPools(clusterNameOrID string, target ClusterTargetHeader) ([]GetWorkerPoolResponse, error) + CreateWorkerPoolZone(workerPoolZone WorkerPoolZone, target ClusterTargetHeader) error + DeleteWorkerPool(clusterNameOrID string, workerPoolNameOrID string, target ClusterTargetHeader) error +} + +type workerpool struct { + client *client.Client +} + +func newWorkerPoolAPI(c *client.Client) WorkerPool { + return &workerpool{ + client: c, + } +} + +// GetWorkerPool calls the API to get a worker pool +func (w *workerpool) ListWorkerPools(clusterNameOrID string, target ClusterTargetHeader) ([]GetWorkerPoolResponse, error) { + successV := []GetWorkerPoolResponse{} + _, err := w.client.Get(fmt.Sprintf("/v2/vpc/getWorkerPools?cluster=%s", clusterNameOrID), &successV, target.ToMap()) + return successV, err +} + +// GetWorkerPool calls the API to get a worker pool +func (w *workerpool) GetWorkerPool(clusterNameOrID, workerPoolNameOrID string, target ClusterTargetHeader) (GetWorkerPoolResponse, error) { + var successV GetWorkerPoolResponse + _, err := w.client.Get(fmt.Sprintf("/v2/vpc/getWorkerPool?cluster=%s&workerpool=%s", clusterNameOrID, workerPoolNameOrID), &successV, target.ToMap()) + return successV, err +} + +// CreateWorkerPool calls the API to create a worker pool +func (w *workerpool) CreateWorkerPool(workerPoolReq WorkerPoolRequest, target ClusterTargetHeader) (WorkerPoolResponse, error) { + var successV WorkerPoolResponse + _, err := w.client.Post("/v2/vpc/createWorkerPool", workerPoolReq, &successV, target.ToMap()) + return successV, err +} + +// DeleteWorkerPool calls the API to remove a worker pool +func (w *workerpool) DeleteWorkerPool(clusterNameOrID string, workerPoolNameOrID string, target ClusterTargetHeader) error { + // Make the request, don't care about return value + _, err := w.client.Delete(fmt.Sprintf("/v1/clusters/%s/workerpools/%s", clusterNameOrID, workerPoolNameOrID), target.ToMap()) + return err +} + +// CreateWorkerPoolZone calls the API to add a zone to a cluster and worker pool +func (w *workerpool) CreateWorkerPoolZone(workerPoolZone WorkerPoolZone, target ClusterTargetHeader) error { + // Make the request, don't care about return value + _, err := w.client.Post("/v2/vpc/createWorkerPoolZone", workerPoolZone, nil, target.ToMap()) + return err +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv2/workers.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv2/workers.go new file mode 100644 index 00000000000..60df887c141 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/container/containerv2/workers.go @@ -0,0 +1,198 @@ +package containerv2 + +import ( + "fmt" + + "github.com/IBM-Cloud/bluemix-go/client" +) + +//Worker ... +type Worker struct { + Billing string `json:"billing,omitempty"` + Flavor string `json:"flavor"` + ID string `json:"id"` + KubeVersion KubeDetails + Location string `json:"location"` + PoolID string `json:"poolid"` + PoolName string `json:"poolName"` + LifeCycle WorkerLifeCycle `json:"lifecycle"` + Health HealthStatus `json:"health"` + NetworkInterfaces []Network `json:"networkInterfaces"` +} + +type KubeDetails struct { + Actual string `json:"actual"` + Desired string `json:"desired"` + Eos string `json:"eos"` + MasterEOS string `json:"masterEos"` + Target string `json:"target"` +} +type HealthStatus struct { + Message string `json:"message"` + State string `json:"state"` +} +type WorkerLifeCycle struct { + ReasonForDelete string `json:"reasonForDelete"` + ActualState string `json:"actualState"` + DesiredState string `json:"desiredState"` + Message string `json:"message"` + MessageDate string `json:"messageDate"` + MessageDetails string `json:"messageDetails"` + MessageDetailsDate string `json:"messageDetailsDate"` + PendingOperation string `json:"pendingOperation"` +} + +type Network struct { + Cidr string `json:"cidr"` + IpAddress string `json:"ipAddress"` + Primary bool `json:"primary"` + SubnetID string `json:"subnetID"` +} + +type ReplaceWorker struct { + ClusterIDOrName string `json:"cluster"` + Update bool `json:"update"` + WorkerID string `json:"workerID"` +} + +type VoulemeAttachments struct { + VolumeAttachments []VoulemeAttachment `json:"volume_attachments"` +} + +type VoulemeAttachment struct { + Id string `json:"id"` + Volume Volume `json:"volume"` + Device DeviceInfo `json:"device"` + Name string `json:"name"` + Status string `json:"status"` + Type string `json:"type"` +} + +type Volume struct { + Name string `json:"name"` + Id string `json:"id"` +} + +type DeviceInfo struct { + Id string `json:"id"` +} + +type VolumeRequest struct { + Cluster string `json:"cluster"` + VolumeAttachmentID string `json:"volumeAttachmentID,omitempty"` + VolumeID string `json:"volumeID,omitempty"` + Worker string `json:"worker"` +} + +//Workers ... +type Workers interface { + ListByWorkerPool(clusterIDOrName, workerPoolIDOrName string, showDeleted bool, target ClusterTargetHeader) ([]Worker, error) + ListWorkers(clusterIDOrName string, showDeleted bool, target ClusterTargetHeader) ([]Worker, error) + Get(clusterIDOrName, workerID string, target ClusterTargetHeader) (Worker, error) + ReplaceWokerNode(clusterIDOrName, workerID string, target ClusterTargetHeader) (string, error) + ListStorageAttachemnts(clusterIDOrName, workerID string, target ClusterTargetHeader) (VoulemeAttachments, error) + GetStorageAttachment(clusterIDOrName, workerID, volumeAttachmentID string, target ClusterTargetHeader) (VoulemeAttachment, error) + CreateStorageAttachment(payload VolumeRequest, target ClusterTargetHeader) (VoulemeAttachment, error) + DeleteStorageAttachment(payload VolumeRequest, target ClusterTargetHeader) (string, error) +} + +type worker struct { + client *client.Client +} + +func newWorkerAPI(c *client.Client) Workers { + return &worker{ + client: c, + } +} + +//ListByWorkerPool ... +func (r *worker) ListByWorkerPool(clusterIDOrName, workerPoolIDOrName string, showDeleted bool, target ClusterTargetHeader) ([]Worker, error) { + rawURL := fmt.Sprintf("/v2/vpc/getWorkers?cluster=%s&showDeleted=%t", clusterIDOrName, showDeleted) + if len(workerPoolIDOrName) > 0 { + rawURL += "&pool=" + workerPoolIDOrName + } + workers := []Worker{} + _, err := r.client.Get(rawURL, &workers, target.ToMap()) + if err != nil { + return nil, err + } + return workers, err +} + +//ListWorkers ... +func (r *worker) ListWorkers(clusterIDOrName string, showDeleted bool, target ClusterTargetHeader) ([]Worker, error) { + rawURL := fmt.Sprintf("/v2/vpc/getWorkers?cluster=%s&showDeleted=%t", clusterIDOrName, showDeleted) + workers := []Worker{} + _, err := r.client.Get(rawURL, &workers, target.ToMap()) + if err != nil { + return nil, err + } + return workers, err +} + +//Get ... +func (r *worker) Get(clusterIDOrName, workerID string, target ClusterTargetHeader) (Worker, error) { + rawURL := fmt.Sprintf("/v2/vpc/getWorker?cluster=%s&worker=%s", clusterIDOrName, workerID) + worker := Worker{} + _, err := r.client.Get(rawURL, &worker, target.ToMap()) + if err != nil { + return worker, err + } + return worker, err +} + +func (r *worker) ReplaceWokerNode(clusterIDOrName, workerID string, target ClusterTargetHeader) (string, error) { + payload := ReplaceWorker{ + ClusterIDOrName: clusterIDOrName, + WorkerID: workerID, + Update: true, + } + var response string + _, err := r.client.Post("/v2/vpc/replaceWorker", payload, &response, target.ToMap()) + if err != nil { + return response, err + } + return response, err +} + +// ListStorageAttachemnts returns list of attached storage blaocks to a worker node +func (r *worker) ListStorageAttachemnts(clusterIDOrName, workerID string, target ClusterTargetHeader) (VoulemeAttachments, error) { + rawURL := fmt.Sprintf("/v2/storage/getAttachments?cluster=%s&worker=%s", clusterIDOrName, workerID) + workerAttachements := VoulemeAttachments{} + _, err := r.client.Get(rawURL, &workerAttachements, target.ToMap()) + if err != nil { + return workerAttachements, err + } + return workerAttachements, err + +} + +func (r *worker) GetStorageAttachment(clusterIDOrName, workerID, volumeAttachmentID string, target ClusterTargetHeader) (VoulemeAttachment, error) { + rawURL := fmt.Sprintf("/v2/storage/getAttachment?cluster=%s&worker=%s&volumeAttachmentID=%s", clusterIDOrName, workerID, volumeAttachmentID) + workerAttachement := VoulemeAttachment{} + _, err := r.client.Get(rawURL, &workerAttachement, target.ToMap()) + if err != nil { + return workerAttachement, err + } + return workerAttachement, err + +} + +func (r *worker) CreateStorageAttachment(payload VolumeRequest, target ClusterTargetHeader) (VoulemeAttachment, error) { + response := VoulemeAttachment{} + _, err := r.client.Post("/v2/storage/createAttachment", payload, &response, target.ToMap()) + if err != nil { + return response, err + } + return response, err +} + +func (r *worker) DeleteStorageAttachment(payload VolumeRequest, target ClusterTargetHeader) (string, error) { + var response string + _, err := r.client.Post("/v2/storage/deleteAttachment", payload, &response, target.ToMap()) + if err != nil { + return response, err + } + return response, err +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/functions/api_service.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/functions/api_service.go new file mode 100644 index 00000000000..8f879daac80 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/functions/api_service.go @@ -0,0 +1,109 @@ +package functions + +import ( + gohttp "net/http" + + bluemix "github.com/IBM-Cloud/bluemix-go" + "github.com/IBM-Cloud/bluemix-go/authentication" + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/http" + "github.com/IBM-Cloud/bluemix-go/rest" + "github.com/IBM-Cloud/bluemix-go/session" +) + +//ErrCodeAPICreation ... +const ErrCodeAPICreation = "APICreationError" + +//FunctionServiceAPI .. +type FunctionServiceAPI interface { + Namespaces() Functions +} + +//fnService holds the client +type fnService struct { + *client.Client +} + +//New ... +func New(sess *session.Session) (FunctionServiceAPI, error) { + config := sess.Config.Copy() + err := config.ValidateConfigForService(bluemix.FunctionsService) + if err != nil { + return nil, err + } + if config.HTTPClient == nil { + config.HTTPClient = http.NewHTTPClient(config) + } + tokenRefreher, err := authentication.NewIAMAuthRepository(config, &rest.Client{ + DefaultHeader: gohttp.Header{ + "User-Agent": []string{http.UserAgent()}, + }, + HTTPClient: config.HTTPClient, + }) + if err != nil { + return nil, err + } + if config.IAMAccessToken == "" { + err := authentication.PopulateTokens(tokenRefreher, config) + if err != nil { + return nil, err + } + } + + if config.Endpoint == nil { + ep, err := config.EndpointLocator.FunctionsEndpoint() + if err != nil { + return nil, err + } + config.Endpoint = &ep + } + + return &fnService{ + Client: client.New(config, bluemix.FunctionsService, tokenRefreher), + }, nil +} + +//NewCF ... +func NewCF(sess *session.Session) (FunctionServiceAPI, error) { + config := sess.Config.Copy() + err := config.ValidateConfigForService(bluemix.FunctionsService) + if err != nil { + return nil, err + } + if config.HTTPClient == nil { + config.HTTPClient = http.NewHTTPClient(config) + } + tokenRefreher, err := authentication.NewUAARepository(config, &rest.Client{ + DefaultHeader: gohttp.Header{ + "User-Agent": []string{http.UserAgent()}, + }, + HTTPClient: config.HTTPClient, + }) + if err != nil { + return nil, err + } + + if config.UAAAccessToken == "" { + err := authentication.PopulateTokens(tokenRefreher, config) + if err != nil { + return nil, err + } + } + + if config.Endpoint == nil { + ep, err := config.EndpointLocator.FunctionsEndpoint() + if err != nil { + return nil, err + } + config.Endpoint = &ep + } + + return &fnService{ + Client: client.New(config, bluemix.FunctionsService, tokenRefreher), + }, nil +} + +//Namespaces .. +func (ns *fnService) Namespaces() Functions { + return newFunctionsAPI(ns.Client) +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/functions/functions.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/functions/functions.go new file mode 100644 index 00000000000..54c59460b7f --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/functions/functions.go @@ -0,0 +1,77 @@ +package functions + +import ( + "fmt" + + "github.com/IBM-Cloud/bluemix-go/client" +) + +// const .. +const ( + NamespaceTypeCFBased = 1 + NamespaceTypeIamMigrated = 2 + NamespaceTypeIamBased = 3 + + DefaultServiceURL = "https://gateway.watsonplatform.net/servicebroker/API/v1" + DefaultServiceName = "ibm_cloud_functions_namespace_API" +) + +// functions +type functions struct { + client *client.Client +} + +// Functions .. +type Functions interface { + GetCloudFoundaryNamespaces() (NamespaceResponseList, error) + DeleteNamespace(namespaceID string) (NamespaceResponse, error) + CreateNamespace(CreateNamespaceOptions) (NamespaceResponse, error) + GetNamespaces() (NamespaceResponseList, error) + GetNamespace(payload GetNamespaceOptions) (NamespaceResponse, error) + UpdateNamespace(payload UpdateNamespaceOptions) (NamespaceResponse, error) +} + +func newFunctionsAPI(c *client.Client) Functions { + return &functions{ + client: c, + } +} + +func (r *functions) GetCloudFoundaryNamespaces() (NamespaceResponseList, error) { + var successV NamespaceResponseList + formData := make(map[string]string) + formData["accessToken"] = r.client.Config.UAAAccessToken[7:len(r.client.Config.UAAAccessToken)] + formData["refreshToken"] = r.client.Config.UAARefreshToken + _, err := r.client.PostWithForm("/bluemix/v2/authenticate", formData, &successV) + return successV, err +} + +func (r *functions) GetNamespaces() (NamespaceResponseList, error) { + var successV NamespaceResponseList + _, err := r.client.Get("/api/v1/namespaces", &successV) + return successV, err +} + +func (r *functions) CreateNamespace(payload CreateNamespaceOptions) (NamespaceResponse, error) { + var successV NamespaceResponse + _, err := r.client.Post("/api/v1/namespaces", payload, &successV) + return successV, err +} + +func (r *functions) GetNamespace(payload GetNamespaceOptions) (NamespaceResponse, error) { + var successV NamespaceResponse + _, err := r.client.Get(fmt.Sprintf("/api/v1/namespaces/%s", *payload.ID), &successV) + return successV, err +} + +func (r *functions) DeleteNamespace(namespaceID string) (NamespaceResponse, error) { + var successV NamespaceResponse + _, err := r.client.Delete(fmt.Sprintf("/api/v1/namespaces/%s", namespaceID)) + return successV, err +} + +func (r *functions) UpdateNamespace(payload UpdateNamespaceOptions) (NamespaceResponse, error) { + var successV NamespaceResponse + _, err := r.client.Patch(fmt.Sprintf("/api/v1/namespaces/%s", *payload.ID), payload, &successV) + return successV, err +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/functions/functions_util.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/functions/functions_util.go new file mode 100644 index 00000000000..0c3f514e559 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/functions/functions_util.go @@ -0,0 +1,205 @@ +package functions + +import "github.com/go-openapi/strfmt" + +// GetNamespacesOptions : The GetNamespaces options. +type GetNamespacesOptions struct { + + // The maximum number of namespaces to return. Default 100. Maximum 200. + Limit *int64 `json:"limit,omitempty"` + + // The number of namespaces to skip. Default 0. + Offset *int64 `json:"offset,omitempty"` + + // Allows users to set headers to be GDPR compliant + Headers map[string]string +} + +// NamespaceResponse : NamespaceResponse - create/get response. +type NamespaceResponse struct { + + // Time the API key was activated. + APIKeyCreated *strfmt.DateTime `json:"API_key_created,omitempty"` + + // ID of API key used by the namespace. + APIKeyID *string `json:"API_key_id,omitempty"` + + // CF space GUID of classic namespace - present if it is or was a classic namespace. + ClassicSpaceguid *string `json:"classic_spaceguid,omitempty"` + + // ClassicType
This attribute will be absent for an IAM namespace, a namespace which is IAM-enabled and not + // associated with any CF space.
1 : Classic - A namespace which is associated with a CF space.
Such + // namespace is NOT IAM-enabled and can only be used by using the legacy API key ('entitlement key').
2 : Classic + // IAM enabled - A namespace which is associated with a CF space and which is IAM-enabled.
It accepts IMA token + // and legacy API key ('entitlement key') for authorization.
3 : IAM migration complete - A namespace which was/is + // associated with a CF space, which is IAM-enabled.
It accepts only an IAM token for authorization.
. + ClassicType *int64 `json:"classic_type,omitempty"` + + // CRN of namespace - absent if namespace is NOT IAM-enabled. + Crn *string `json:"crn,omitempty"` + + // Description - absent if namespace is NOT IAM-enabled. + Description *string `json:"description,omitempty"` + + // UUID of namespace. + ID *string `json:"id" validate:"required"` + + // Location of the resource. + Location *string `json:"location" validate:"required"` + + // Name - absent if namespace is NOT IAM-enabled. + Name *string `json:"name,omitempty"` + + // Resourceplanid used - absent if namespace is NOT IAM-enabled. + ResourcePlanID *string `json:"resource_plan_id,omitempty"` + + // Resourcegrpid used - absent if namespace is NOT IAM-enabled. + ResourceGroupID *string `json:"resource_group_id,omitempty"` + + // Serviceid used by the namespace - absent if namespace is NOT IAM-enabled. + ServiceID *string `json:"service_id,omitempty"` + + // Key used by the cf based namespace. + Key string `json:"key,omitempty"` + + // UUID used by the cf based namespace. + UUID string `json:"uuid,omitempty"` +} + +// NamespaceResponseList : NamespaceResponseList -. +type NamespaceResponseList struct { + + // Maximum number of namespaces to return. + Limit *int64 `json:"limit" validate:"required"` + + // List of namespaces. + Namespaces []NamespaceResponse `json:"namespaces" validate:"required"` + + // Number of namespaces to skip. + Offset *int64 `json:"offset" validate:"required"` + + // Total number of namespaces available. + TotalCount *int64 `json:"total_count" validate:"required"` +} + +// CreateNamespaceOptions : The CreateNamespace options. +type CreateNamespaceOptions struct { + + // Name. + Name *string `json:"name" validate:"required"` + + // Resourcegroupid of resource group the namespace resource should be placed in. Use 'ibmcloud resource groups' to + // query your resources groups and their ids. + ResourceGroupID *string `json:"resource_group_id" validate:"required"` + + // Resourceplanid to use, e.g. 'functions-base-plan'. + ResourcePlanID *string `json:"resource_plan_id" validate:"required"` + + // Description. + Description *string `json:"description,omitempty"` + + // Allows users to set headers to be GDPR compliant + Headers map[string]string +} + +// GetNamespaceOptions : The GetNamespace options. +type GetNamespaceOptions struct { + + // The id of the namespace to retrieve. + ID *string `json:"id" validate:"required"` + + // Allows users to set headers to be GDPR compliant + Headers map[string]string +} + +// DeleteNamespaceOptions : The DeleteNamespace options. +type DeleteNamespaceOptions struct { + + // The id of the namespace to delete. + ID *string `json:"id" validate:"required"` + + // Allows users to set headers to be GDPR compliant + Headers map[string]string +} + +// UpdateNamespaceOptions : The UpdateNamespace options. +type UpdateNamespaceOptions struct { + + // The id of the namespace to update. + ID *string `json:"id" validate:"required"` + + // New description. + Description *string `json:"description,omitempty"` + + // New name. + Name *string `json:"name,omitempty"` + + // Allows users to set headers to be GDPR compliant + Headers map[string]string +} + +//NamespaceResource .. +type NamespaceResource interface { + GetID() string + GetLocation() string + GetName() string + GetUUID() string + GetKey() string + IsIamEnabled() bool + IsCf() bool +} + +//GetID .. +func (ns *NamespaceResponse) GetID() string { + return *ns.ID +} + +//GetName .. +func (ns *NamespaceResponse) GetName() string { + // Classic support - if no name included in namespace obj return the ID (classic namespace name) + if ns.Name != nil { + return *ns.Name + } + return *ns.ID +} + +//GetKey .. +func (ns *NamespaceResponse) GetKey() string { + return ns.Key +} + +//GetUUID .. +func (ns *NamespaceResponse) GetUUID() string { + return ns.UUID +} + +//GetLocation .. +func (ns *NamespaceResponse) GetLocation() string { + return *ns.Location +} + +//IsCf .. +func (ns *NamespaceResponse) IsCf() bool { + var iscf bool = false + if ns.ClassicType != nil { + iscf = (*ns.ClassicType == NamespaceTypeCFBased) + } + return iscf +} + +//IsIamEnabled .. +func (ns *NamespaceResponse) IsIamEnabled() bool { + // IAM support - classic_type field is not included for new IAM namespaces so always return true if nil + if ns.ClassicType == nil { + return true + } + return false +} + +//IsMigrated .. +func (ns *NamespaceResponse) IsMigrated() bool { + if *ns.ClassicType == NamespaceTypeIamMigrated { + return true + } + return false +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/globalsearch/globalsearchv2/api_service.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/globalsearch/globalsearchv2/api_service.go new file mode 100644 index 00000000000..ed3b4248bfb --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/globalsearch/globalsearchv2/api_service.go @@ -0,0 +1,68 @@ +package globalsearchv2 + +import ( + gohttp "net/http" + + bluemix "github.com/IBM-Cloud/bluemix-go" + "github.com/IBM-Cloud/bluemix-go/authentication" + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/http" + "github.com/IBM-Cloud/bluemix-go/rest" + "github.com/IBM-Cloud/bluemix-go/session" +) + +//ErrCodeAPICreation ... +const ErrCodeAPICreation = "APICreationError" + +//ICDServiceAPI is the Cloud Internet Services API ... +type GlobalSearchServiceAPI interface { + Searches() Searches +} + +//ICDService holds the client +type globalSearchService struct { + *client.Client +} + +//New ... +func New(sess *session.Session) (GlobalSearchServiceAPI, error) { + config := sess.Config.Copy() + err := config.ValidateConfigForService(bluemix.GlobalSearchService) + if err != nil { + return nil, err + } + if config.HTTPClient == nil { + config.HTTPClient = http.NewHTTPClient(config) + } + tokenRefreher, err := authentication.NewIAMAuthRepository(config, &rest.Client{ + DefaultHeader: gohttp.Header{ + "User-Agent": []string{http.UserAgent()}, + }, + HTTPClient: config.HTTPClient, + }) + if err != nil { + return nil, err + } + if config.IAMAccessToken == "" { + err := authentication.PopulateTokens(tokenRefreher, config) + if err != nil { + return nil, err + } + } + if config.Endpoint == nil { + ep, err := config.EndpointLocator.GlobalSearchEndpoint() + if err != nil { + return nil, err + } + config.Endpoint = &ep + } + + return &globalSearchService{ + Client: client.New(config, bluemix.GlobalSearchService, tokenRefreher), + }, nil +} + +//Search implements the global search API +func (c *globalSearchService) Searches() Searches { + return newSearchAPI(c.Client) +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/globalsearch/globalsearchv2/search.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/globalsearch/globalsearchv2/search.go new file mode 100644 index 00000000000..47029b2a76c --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/globalsearch/globalsearchv2/search.go @@ -0,0 +1,51 @@ +package globalsearchv2 + +import ( + "fmt" + "github.com/IBM-Cloud/bluemix-go/client" +) + +type SearchResult struct { + Items []Item `json:"items"` + MoreData bool `json:"more_data"` + Token string `json:"token"` + FilterError bool `json:"filter_error"` + PartialData int `json:"partial_data"` +} + +type Item struct { + Name string `json:"name,omitempty"` + CRN string `json:"crn,omitempty"` + ServiceName string `json:"service_name,omitempty"` + Tags []string `json:"tags,omitempty"` +} + +type SearchBody struct { + Query string `json:"query"` + Fields []string `json:"fields,omitempty"` + Token string `json:"token,omitempty"` +} + +type Searches interface { + PostQuery(searchBody SearchBody) (SearchResult, error) +} + +type searches struct { + client *client.Client +} + +func newSearchAPI(c *client.Client) Searches { + return &searches{ + client: c, + } +} + +func (r *searches) PostQuery(searchBody SearchBody) (SearchResult, error) { + searchResult := SearchResult{} + rawURL := fmt.Sprintf("/v2/resources/search") + _, err := r.client.Post(rawURL, &searchBody, &searchResult) + if err != nil { + return searchResult, err + } + return searchResult, nil +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/globaltagging/globaltaggingv3/api_service.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/globaltagging/globaltaggingv3/api_service.go new file mode 100644 index 00000000000..e2dc34706db --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/globaltagging/globaltaggingv3/api_service.go @@ -0,0 +1,68 @@ +package globaltaggingv3 + +import ( + gohttp "net/http" + + bluemix "github.com/IBM-Cloud/bluemix-go" + "github.com/IBM-Cloud/bluemix-go/authentication" + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/http" + "github.com/IBM-Cloud/bluemix-go/rest" + "github.com/IBM-Cloud/bluemix-go/session" +) + +//ErrCodeAPICreation ... +const ErrCodeAPICreation = "APICreationError" + +//ICDServiceAPI is the Cloud Internet Services API ... +type GlobalTaggingServiceAPI interface { + Tags() Tags +} + +//ICDService holds the client +type globalTaggingService struct { + *client.Client +} + +//New ... +func New(sess *session.Session) (GlobalTaggingServiceAPI, error) { + config := sess.Config.Copy() + err := config.ValidateConfigForService(bluemix.GlobalTaggingService) + if err != nil { + return nil, err + } + if config.HTTPClient == nil { + config.HTTPClient = http.NewHTTPClient(config) + } + tokenRefreher, err := authentication.NewIAMAuthRepository(config, &rest.Client{ + DefaultHeader: gohttp.Header{ + "User-Agent": []string{http.UserAgent()}, + }, + HTTPClient: config.HTTPClient, + }) + if err != nil { + return nil, err + } + if config.IAMAccessToken == "" { + err := authentication.PopulateTokens(tokenRefreher, config) + if err != nil { + return nil, err + } + } + if config.Endpoint == nil { + ep, err := config.EndpointLocator.GlobalTaggingEndpoint() + if err != nil { + return nil, err + } + config.Endpoint = &ep + } + + return &globalTaggingService{ + Client: client.New(config, bluemix.GlobalTaggingService, tokenRefreher), + }, nil +} + +//Tagging implements the global tagging API +func (c *globalTaggingService) Tags() Tags { + return newTaggingAPI(c.Client) +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/globaltagging/globaltaggingv3/tagging.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/globaltagging/globaltaggingv3/tagging.go new file mode 100644 index 00000000000..7d877ef9ab1 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/globaltagging/globaltaggingv3/tagging.go @@ -0,0 +1,114 @@ +package globaltaggingv3 + +import ( + "fmt" + "github.com/IBM-Cloud/bluemix-go/client" +) + +type TaggingResult struct { + Items []Item `json:"items"` +} + +type Item struct { + Name string `json:"name"` +} + +type TagUpdateResult struct { + Results []TagResult `json:"results"` +} + +type TagResult struct { + ResourceID string `json:"resource_id"` + IsError string `json:"isError"` + Response string `json:"response"` + Message string `json:"message"` + Code string `json:"code"` + Level string `json:"level"` + HttpCode int `json:"httpCode"` + Description string `json:"description"` + MoreInfo string `json:"more_info"` +} + +type TaggingBody struct { + TagResources []TagResource `json:"resources"` + TagName string `json:"tag_name,omitempty"` + TagNames []string `json:"tag_names,omitempty"` +} + +type TagResource struct { + ResourceID string `json:"resource_id"` + ResourceType string `json:"resource_type,omitempty"` +} + +type Tags interface { + GetTags(resourceID string) (TaggingResult, error) + AttachTags(resourceID string, taglist []string) (TagUpdateResult, error) + DetachTags(resourceID string, taglist []string) (TagUpdateResult, error) + DeleteTag(tag string) (TagUpdateResult, error) +} + +type tags struct { + client *client.Client +} + +func newTaggingAPI(c *client.Client) Tags { + return &tags{ + client: c, + } +} + +func (r *tags) GetTags(resourceID string) (TaggingResult, error) { + taggingResult := TaggingResult{} + query := fmt.Sprintf("?attached_to=%v", resourceID) + rawURL := fmt.Sprintf("/v3/tags" + query) + _, err := r.client.Get(rawURL, &taggingResult) + if err != nil { + return taggingResult, err + } + return taggingResult, nil +} + +func (r *tags) AttachTags(resourceID string, taglist []string) (TagUpdateResult, error) { + tagUpdateResult := TagUpdateResult{} + taggingBody := TaggingBody{ + TagResources: []TagResource{ + {ResourceID: resourceID}, + }, + TagNames: taglist, + } + rawURL := fmt.Sprintf("/v3/tags/attach") + _, err := r.client.Post(rawURL, &taggingBody, &tagUpdateResult) + if err != nil { + return tagUpdateResult, err + } + return tagUpdateResult, nil + +} + +func (r *tags) DetachTags(resourceID string, taglist []string) (TagUpdateResult, error) { + tagUpdateResult := TagUpdateResult{} + taggingBody := TaggingBody{ + TagResources: []TagResource{ + {ResourceID: resourceID}, + }, + TagNames: taglist, + } + rawURL := fmt.Sprintf("/v3/tags/detach") + _, err := r.client.Post(rawURL, &taggingBody, &tagUpdateResult) + if err != nil { + return tagUpdateResult, err + } + return tagUpdateResult, nil + +} + +func (r *tags) DeleteTag(tagin string) (TagUpdateResult, error) { + tagUpdateResult := TagUpdateResult{} + rawURL := fmt.Sprintf("/v3/tags/%s", tagin) + _, err := r.client.Delete(rawURL, &tagUpdateResult) + if err != nil { + return tagUpdateResult, err + } + return tagUpdateResult, nil + +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/hpcs/api_service.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/hpcs/api_service.go new file mode 100644 index 00000000000..37bb0c262f1 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/hpcs/api_service.go @@ -0,0 +1,68 @@ +package hpcs + +import ( + gohttp "net/http" + + bluemix "github.com/IBM-Cloud/bluemix-go" + "github.com/IBM-Cloud/bluemix-go/authentication" + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/http" + "github.com/IBM-Cloud/bluemix-go/rest" + "github.com/IBM-Cloud/bluemix-go/session" +) + +//HPCSV2 is the resource client ... +type HPCSV2 interface { + Endpoint() EndpointRepository +} + +//ErrCodeAPICreation ... +const ErrCodeAPICreation = "APICreationError" + +//hpcsService holds the client +type hpcsService struct { + *client.Client +} + +//New ... +func New(sess *session.Session) (HPCSV2, error) { + config := sess.Config.Copy() + err := config.ValidateConfigForService(bluemix.HPCService) + if err != nil { + return nil, err + } + if config.HTTPClient == nil { + config.HTTPClient = http.NewHTTPClient(config) + } + tokenRefreher, err := authentication.NewIAMAuthRepository(config, &rest.Client{ + DefaultHeader: gohttp.Header{ + "User-Agent": []string{http.UserAgent()}, + }, + HTTPClient: config.HTTPClient, + }) + if err != nil { + return nil, err + } + if config.IAMAccessToken == "" { + err := authentication.PopulateTokens(tokenRefreher, config) + if err != nil { + return nil, err + } + } + if config.Endpoint == nil { + ep, err := config.EndpointLocator.HpcsEndpoint() + if err != nil { + return nil, err + } + config.Endpoint = &ep + } + + return &hpcsService{ + Client: client.New(config, bluemix.HPCService, tokenRefreher), + }, nil +} + +//Hpcs API +func (a *hpcsService) Endpoint() EndpointRepository { + return NewHpcsEndpointRepository(a.Client) +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/hpcs/endpoint.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/hpcs/endpoint.go new file mode 100644 index 00000000000..dde0e181851 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/hpcs/endpoint.go @@ -0,0 +1,40 @@ +package hpcs + +import ( + "fmt" + + "github.com/IBM-Cloud/bluemix-go/client" +) + +type EndpointResp struct { + InstanceID string `json:"instance_id"` + Kms Endpoints `json:"kms"` + Ep11 Endpoints `json:"ep11"` +} +type Endpoints struct { + Public string `json:"public"` + Private string `json:"private"` +} + +type EndpointRepository interface { + GetAPIEndpoint(instanceID string) (EndpointResp, error) +} + +type hpcsRepository struct { + client *client.Client +} + +func NewHpcsEndpointRepository(c *client.Client) EndpointRepository { + return &hpcsRepository{ + client: c, + } +} + +func (r *hpcsRepository) GetAPIEndpoint(instanceID string) (EndpointResp, error) { + res := EndpointResp{} + _, err := r.client.Get(fmt.Sprintf("/instances/%s", instanceID), &res) + if err != nil { + return EndpointResp{}, err + } + return res, nil +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/iam/iamv1/api_keys.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/iam/iamv1/api_keys.go new file mode 100644 index 00000000000..e57666b611c --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/iam/iamv1/api_keys.go @@ -0,0 +1,151 @@ +package iamv1 + +import ( + "net/http" + "net/url" + + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/models" + "github.com/IBM-Cloud/bluemix-go/rest" +) + +type APIKeyResource struct { + Metadata APIKeyMetadata `json:"metadata"` + Entity APIKeyEntity `json:"entity"` +} + +type APIKeyMetadata struct { + UUID string `json:"uuid"` + Version string `json:"version"` + Crn string `json:"crn"` + CreatedAt string `json:"createdAt"` + ModifiedAt string `json:"modifiedAt"` +} + +type APIKeyEntity struct { + Name string `json:"name"` + Description string `json:"description"` + BoundTo string `json:"boundTo"` + Format string `json:"format"` + APIKey string `json:"apiKey"` + APIKeyID string `json:"apiKeyId"` + APIKeySecret string `json:"apiKeySecret"` +} + +func (r APIKeyResource) ToModel() models.APIKey { + meta := r.Metadata + entity := r.Entity + + return models.APIKey{ + UUID: meta.UUID, + Version: meta.Version, + Crn: meta.Crn, + CreatedAt: meta.CreatedAt, + ModifiedAt: meta.ModifiedAt, + + Name: entity.Name, + Description: entity.Description, + BoundTo: entity.BoundTo, + Format: entity.Format, + APIKey: entity.APIKey, + APIKeyID: entity.APIKeyID, + APIKeySecret: entity.APIKeySecret, + } +} + +const ( + _API_Key_Operation_Path_Root = "/apikeys/" +) + +type APIKeyRepository interface { + Get(uuid string) (*models.APIKey, error) + List(boundTo string) ([]models.APIKey, error) + FindByName(name string, boundTo string) ([]models.APIKey, error) + Create(key models.APIKey) (*models.APIKey, error) + Delete(uuid string) error + Update(uuid string, version string, key models.APIKey) (*models.APIKey, error) +} + +type apiKeyRepository struct { + client *client.Client +} + +func NewAPIKeyRepository(c *client.Client) APIKeyRepository { + return &apiKeyRepository{ + client: c, + } +} + +func (r *apiKeyRepository) Get(uuid string) (*models.APIKey, error) { + key := APIKeyResource{} + _, err := r.client.Get(_API_Key_Operation_Path_Root+uuid, &key) + if err != nil { + return nil, err + } + result := key.ToModel() + return &result, nil +} + +func (r *apiKeyRepository) List(boundTo string) ([]models.APIKey, error) { + var keys []models.APIKey + resp, err := r.client.GetPaginated("/apikeys?boundTo="+url.QueryEscape(boundTo), NewIAMPaginatedResources(APIKeyResource{}), func(resource interface{}) bool { + if apiKeyResource, ok := resource.(APIKeyResource); ok { + keys = append(keys, apiKeyResource.ToModel()) + return true + } + return false + }) + + if resp.StatusCode == http.StatusNotFound { + return []models.APIKey{}, nil + } + + return keys, err +} + +func (r *apiKeyRepository) FindByName(name string, boundTo string) ([]models.APIKey, error) { + var keys []models.APIKey + resp, err := r.client.GetPaginated("/apikeys?boundTo="+url.QueryEscape(boundTo), NewIAMPaginatedResources(APIKeyResource{}), func(resource interface{}) bool { + if apiKeyResource, ok := resource.(APIKeyResource); ok { + if apiKeyResource.Entity.Name == name { + keys = append(keys, apiKeyResource.ToModel()) + } + return true + } + return false + }) + + if resp.StatusCode == http.StatusNotFound { + return []models.APIKey{}, nil + } + + return keys, err +} + +func (r *apiKeyRepository) Create(key models.APIKey) (*models.APIKey, error) { + var keyCreated APIKeyResource + _, err := r.client.Post("/apikeys", &key, &keyCreated) + if err != nil { + return nil, err + } + keyToReturn := keyCreated.ToModel() + return &keyToReturn, err +} + +func (r *apiKeyRepository) Delete(uuid string) error { + _, err := r.client.Delete("/apikeys/" + uuid) + return err +} + +func (r *apiKeyRepository) Update(uuid string, version string, key models.APIKey) (*models.APIKey, error) { + req := rest.PutRequest(*r.client.Config.Endpoint + "/apikeys/" + uuid).Body(&key) + req.Set("If-Match", version) + + var keyUpdated APIKeyResource + _, err := r.client.SendRequest(req, &keyUpdated) + if err != nil { + return nil, err + } + keyToReturn := keyUpdated.ToModel() + return &keyToReturn, nil +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/iam/iamv1/api_service.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/iam/iamv1/api_service.go new file mode 100644 index 00000000000..0953f831e4b --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/iam/iamv1/api_service.go @@ -0,0 +1,98 @@ +package iamv1 + +import ( + gohttp "net/http" + + bluemix "github.com/IBM-Cloud/bluemix-go" + "github.com/IBM-Cloud/bluemix-go/authentication" + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/http" + "github.com/IBM-Cloud/bluemix-go/rest" + "github.com/IBM-Cloud/bluemix-go/session" +) + +//IAMServiceAPI is the resource client ... +type IAMServiceAPI interface { + ServiceRoles() ServiceRoleRepository + ServiceIds() ServiceIDRepository + APIKeys() APIKeyRepository + ServicePolicies() ServicePolicyRepository + UserPolicies() UserPolicyRepository + Identity() Identity +} + +//ErrCodeAPICreation ... +const ErrCodeAPICreation = "APICreationError" + +//iamService holds the client +type iamService struct { + *client.Client +} + +//New ... +func New(sess *session.Session) (IAMServiceAPI, error) { + config := sess.Config.Copy() + err := config.ValidateConfigForService(bluemix.IAMService) + if err != nil { + return nil, err + } + if config.HTTPClient == nil { + config.HTTPClient = http.NewHTTPClient(config) + } + tokenRefreher, err := authentication.NewIAMAuthRepository(config, &rest.Client{ + DefaultHeader: gohttp.Header{ + "User-Agent": []string{http.UserAgent()}, + }, + HTTPClient: config.HTTPClient, + }) + if err != nil { + return nil, err + } + if config.IAMAccessToken == "" { + err := authentication.PopulateTokens(tokenRefreher, config) + if err != nil { + return nil, err + } + } + if config.Endpoint == nil { + ep, err := config.EndpointLocator.IAMEndpoint() + if err != nil { + return nil, err + } + config.Endpoint = &ep + } + + return &iamService{ + Client: client.New(config, bluemix.IAMService, tokenRefreher), + }, nil +} + +//ServiceRoles API +func (a *iamService) ServiceRoles() ServiceRoleRepository { + return NewServiceRoleRepository(a.Client) +} + +//ServiceIdsAPI +func (a *iamService) ServiceIds() ServiceIDRepository { + return NewServiceIDRepository(a.Client) +} + +//APIkeys +func (a *iamService) APIKeys() APIKeyRepository { + return NewAPIKeyRepository(a.Client) +} + +//ServicePolicyAPI +func (a *iamService) ServicePolicies() ServicePolicyRepository { + return NewServicePolicyRepository(a.Client) +} + +//UserPoliciesAPI +func (a *iamService) UserPolicies() UserPolicyRepository { + return NewUserPolicyRepository(a.Client) +} + +//IdentityAPI +func (a *iamService) Identity() Identity { + return NewIdentity(a.Client) +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/iam/iamv1/identity.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/iam/iamv1/identity.go new file mode 100644 index 00000000000..1a466b695c0 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/iam/iamv1/identity.go @@ -0,0 +1,54 @@ +package iamv1 + +import ( + "github.com/IBM-Cloud/bluemix-go/client" +) + +type AccountInfo struct { + Bss string `json:"bss"` + Ims string `json:"ims"` +} + +type UserInfo struct { + Active bool `json:"active"` + RealmID string `json:"realmId"` + Identifier string `json:"identifier"` + IamID string `json:"iam_id"` + GivenName string `json:"given_name"` + FamilyName string `json:"family_name"` + Name string `json:"name"` + Email string `json:"email"` + Sub string `json:"sub"` + Account AccountInfo `json:"account"` + Iat int `json:"iat"` + Exp int `json:"exp"` + Iss string `json:"iss"` + GrantType string `json:"grant_type"` + ClientID string `json:"client_id"` + Scope string `json:"scope"` + Acr int `json:"acr"` + Amr []string `json:"amr"` +} + +type Identity interface { + UserInfo() (*UserInfo, error) +} + +type identity struct { + client *client.Client +} + +func NewIdentity(c *client.Client) Identity { + return &identity{ + client: c, + } +} + +func (r *identity) UserInfo() (*UserInfo, error) { + userInfo := UserInfo{} + _, err := r.client.Get("/identity/userinfo", &userInfo) + if err != nil { + return nil, err + } + return &userInfo, nil +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/iam/iamv1/pagination.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/iam/iamv1/pagination.go new file mode 100644 index 00000000000..7af0f8c355b --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/iam/iamv1/pagination.go @@ -0,0 +1,52 @@ +package iamv1 + +import ( + "encoding/json" + "net/url" + "reflect" + "strings" +) + +const _PageTokenQuery = "pagetoken" + +type IAMPaginatedResourcesHandler struct { + resourceType reflect.Type +} + +func NewIAMPaginatedResources(resource interface{}) IAMPaginatedResourcesHandler { + return IAMPaginatedResourcesHandler{ + resourceType: reflect.TypeOf(resource), + } +} + +func (pr IAMPaginatedResourcesHandler) Resources(bytes []byte, curPath string) ([]interface{}, string, error) { + var paginatedResources = struct { + NextPageToken string `json:"nextPageToken"` + ResourcesBytes json.RawMessage `json:"items"` + }{} + + err := json.Unmarshal(bytes, &paginatedResources) + + var nextPath string + if paginatedResources.NextPageToken != "" { + u, err := url.Parse(curPath) + if err == nil { + q := u.Query() + q.Set(_PageTokenQuery, paginatedResources.NextPageToken) + u.RawQuery = q.Encode() + nextPath = u.String() + } + } + + slicePtr := reflect.New(reflect.SliceOf(pr.resourceType)) + dc := json.NewDecoder(strings.NewReader(string(paginatedResources.ResourcesBytes))) + dc.UseNumber() + err = dc.Decode(slicePtr.Interface()) + slice := reflect.Indirect(slicePtr) + + contents := make([]interface{}, 0, slice.Len()) + for i := 0; i < slice.Len(); i++ { + contents = append(contents, slice.Index(i).Interface()) + } + return contents, nextPath, err +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/iam/iamv1/service_ids.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/iam/iamv1/service_ids.go new file mode 100644 index 00000000000..627e7251749 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/iam/iamv1/service_ids.go @@ -0,0 +1,165 @@ +package iamv1 + +import ( + "net/http" + "net/url" + + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/helpers" + "github.com/IBM-Cloud/bluemix-go/models" + "github.com/IBM-Cloud/bluemix-go/rest" +) + +type ServiceIDResource struct { + Metadata IAMMetadata `json:"metadata"` + Entity ServiceIDEntity `json:"entity"` +} + +type ServiceIDEntity struct { + BoundTo string `json:"boundTo"` + Name string `json:"name"` + Description string `json:"description"` +} + +func (r *ServiceIDResource) ToModel() models.ServiceID { + return models.ServiceID{ + UUID: r.Metadata.UUID, + IAMID: r.Metadata.IAMID, + CRN: r.Metadata.CRN, + BoundTo: r.Entity.BoundTo, + Name: r.Entity.Name, + Description: r.Entity.Description, + Version: r.Metadata.Version, + CreatedAt: r.Metadata.CreatedAt, + ModifiedAt: r.Metadata.ModifiedAt, + } +} + +type IAMMetadata struct { + UUID string `json:"uuid"` + IAMID string `json:"iam_id"` + Version string `json:"version"` + CRN string `json:"crn"` + CreatedAt string `json:"createdAt"` + ModifiedAt string `json:"modifiedAt"` +} + +const ( + _IAM_App = "iam" + _IAM_ENDPOINT_ENV = "IAM_ENDPOINT" + _SERVICE_ID_PATH = "/serviceids/" + _BoundToQuery = "boundTo" +) + +//go:generate counterfeiter . ServiceIDRepository +type ServiceIDRepository interface { + Get(uuid string) (models.ServiceID, error) + List(boundTo string) ([]models.ServiceID, error) + FindByName(boundTo string, name string) ([]models.ServiceID, error) + Create(serviceId models.ServiceID) (models.ServiceID, error) + Update(uuid string, serviceId models.ServiceID, version string) (models.ServiceID, error) + Delete(uuid string) error +} + +type serviceIDRepository struct { + client *client.Client +} + +func NewServiceIDRepository(c *client.Client) ServiceIDRepository { + return &serviceIDRepository{ + client: c, + } +} + +type IAMResponseContext struct { + RequestID string `json:"requestId"` + RequestType string `json:"requestType"` + UserAgent string `json:"userAgent"` + ClientIP string `json:"clientIp"` + InstanceID string `json:"instanceId"` + ThreadID string `json:"threadId"` + Host string `json:"host"` + StartTime string `json:"startTime"` + EndTime string `json:"endTime"` + ElapsedTime string `json:"elapsedTime"` + Locale string `json:"locale"` +} + +type ServiceIDSearchResults struct { + Context IAMResponseContext `json:"context"` + ServiceIDs []ServiceIDResource `json:"items"` +} + +func (r *serviceIDRepository) List(boundTo string) ([]models.ServiceID, error) { + var serviceIDs []models.ServiceID + _, err := r.client.GetPaginated("/serviceids?boundTo="+url.QueryEscape(boundTo), NewIAMPaginatedResources(ServiceIDResource{}), func(r interface{}) bool { + if idResource, ok := r.(ServiceIDResource); ok { + serviceIDs = append(serviceIDs, idResource.ToModel()) + return true + } + return false + }) + + if err != nil { + return []models.ServiceID{}, err + } + + return serviceIDs, nil +} + +func (r *serviceIDRepository) FindByName(boundTo string, name string) ([]models.ServiceID, error) { + var serviceIDs []models.ServiceID + resp, err := r.client.GetPaginated("/serviceids?boundTo="+url.QueryEscape(boundTo), NewIAMPaginatedResources(ServiceIDResource{}), func(r interface{}) bool { + if idResource, ok := r.(ServiceIDResource); ok { + if idResource.Entity.Name == name { + serviceIDs = append(serviceIDs, idResource.ToModel()) + } + return true + } + return false + }) + + if resp.StatusCode == http.StatusNotFound { + return []models.ServiceID{}, nil + } + + return serviceIDs, err +} + +type ServiceIDResponse struct { + IAMResponseContext + ServiceIDResource +} + +func (r *serviceIDRepository) Create(serviceId models.ServiceID) (models.ServiceID, error) { + createdId := ServiceIDResponse{} + _, err := r.client.Post(_SERVICE_ID_PATH, &serviceId, &createdId) + if err != nil { + return models.ServiceID{}, err + } + return createdId.ToModel(), err +} + +func (r *serviceIDRepository) Update(uuid string, serviceId models.ServiceID, version string) (models.ServiceID, error) { + updatedId := ServiceIDResponse{} + request := rest.PutRequest(helpers.GetFullURL(*r.client.Config.Endpoint, _SERVICE_ID_PATH+uuid)).Add("If-Match", version).Body(&serviceId) + _, err := r.client.SendRequest(request, &updatedId) + if err != nil { + return models.ServiceID{}, err + } + return updatedId.ToModel(), err +} + +func (r *serviceIDRepository) Delete(uuid string) error { + _, err := r.client.Delete(_SERVICE_ID_PATH + uuid) + return err +} + +func (r *serviceIDRepository) Get(uuid string) (models.ServiceID, error) { + serviceID := ServiceIDResponse{} + _, err := r.client.Get(_SERVICE_ID_PATH+uuid, &serviceID) + if err != nil { + return models.ServiceID{}, err + } + return serviceID.ToModel(), nil +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/iam/iamv1/service_policies.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/iam/iamv1/service_policies.go new file mode 100644 index 00000000000..810e5ad00c7 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/iam/iamv1/service_policies.go @@ -0,0 +1,102 @@ +package iamv1 + +import ( + "fmt" + "net/url" + + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/helpers" + "github.com/IBM-Cloud/bluemix-go/models" + "github.com/IBM-Cloud/bluemix-go/rest" +) + +const ( + _ServicePoliciesEndpointTemplate = "/acms/v1/scopes/%s/service_ids/%s/policies" + _ServicePolicyEndpointTemplate = "/acms/v1/scopes/%s/service_ids/%s/policies/%s" +) + +// identifier to specify the exact service policy +// following hierarchy "scope/service ID/policy ID" +type ServicePolicyIdentifier struct { + Scope string + IAMID string + PolicyID string +} + +//go:generate counterfeiter . ServicePolicyRepository +type ServicePolicyRepository interface { + List(scope string, serviceID string) ([]models.Policy, error) + Get(scope string, serviceID string, policyID string) (models.Policy, error) + Create(scope string, serviceID string, policy models.Policy) (models.Policy, error) + Update(identifier ServicePolicyIdentifier, policy models.Policy, version string) (models.Policy, error) + Delete(identifier ServicePolicyIdentifier) error +} + +type servicePolicyRepository struct { + client *client.Client +} + +func NewServicePolicyRepository(c *client.Client) ServicePolicyRepository { + return &servicePolicyRepository{ + client: c, + } +} + +type ServicePolicyQueryResult struct { + Policies []models.Policy `json:"policies"` +} + +func (r *servicePolicyRepository) List(scope string, serviceID string) ([]models.Policy, error) { + response := ServicePolicyQueryResult{} + _, err := r.client.Get(r.generateURLPath(_ServicePoliciesEndpointTemplate, scope, serviceID), &response) + if err != nil { + return []models.Policy{}, err + } + return response.Policies, nil +} + +func (r *servicePolicyRepository) Get(scope string, serviceID string, policyID string) (models.Policy, error) { + response := models.Policy{} + resp, err := r.client.Get(r.generateURLPath(_ServicePolicyEndpointTemplate, scope, serviceID, policyID), &response) + if err != nil { + return models.Policy{}, err + } + response.Version = resp.Header.Get("Etag") + return response, nil +} + +func (r *servicePolicyRepository) Create(scope string, serviceID string, policy models.Policy) (models.Policy, error) { + policyCreated := models.Policy{} + resp, err := r.client.Post(r.generateURLPath(_ServicePoliciesEndpointTemplate, scope, serviceID), &policy, &policyCreated) + if err != nil { + return models.Policy{}, err + } + policyCreated.Version = resp.Header.Get("Etag") + return policyCreated, nil +} + +func (r *servicePolicyRepository) Update(identifier ServicePolicyIdentifier, policy models.Policy, version string) (models.Policy, error) { + policyUpdated := models.Policy{} + request := rest.PutRequest(helpers.GetFullURL(*r.client.Config.Endpoint, + r.generateURLPath(_ServicePolicyEndpointTemplate, identifier.Scope, identifier.IAMID, identifier.PolicyID))).Body(&policy).Set("If-Match", version) + resp, err := r.client.SendRequest(request, &policyUpdated) + if err != nil { + return models.Policy{}, err + } + policyUpdated.Version = resp.Header.Get("Etag") + return policyUpdated, nil +} + +func (r *servicePolicyRepository) Delete(identifier ServicePolicyIdentifier) error { + _, err := r.client.Delete(r.generateURLPath(_ServicePolicyEndpointTemplate, identifier.Scope, identifier.IAMID, identifier.PolicyID)) + return err +} + +func (r *servicePolicyRepository) generateURLPath(template string, parameters ...string) string { + // TODO: need a URL generator to auto escape parameters + escaped := []interface{}{} + for _, parameter := range parameters { + escaped = append(escaped, url.PathEscape(parameter)) + } + return fmt.Sprintf(template, escaped...) +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/iam/iamv1/service_roles.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/iam/iamv1/service_roles.go new file mode 100644 index 00000000000..afb6e65fe8b --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/iam/iamv1/service_roles.go @@ -0,0 +1,111 @@ +package iamv1 + +import ( + "net/url" + + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/helpers" + "github.com/IBM-Cloud/bluemix-go/models" + "github.com/IBM-Cloud/bluemix-go/rest" +) + +type serviceRoleQueryResponse struct { + ServiceSpecificRoles []models.PolicyRole `json:"supportedRoles"` + PlatformExtensions struct { + Roles []models.PolicyRole `json:"supportedRoles"` + } `json:"platformExtensions"` +} + +type ServiceRoleRepository interface { + // List all roles of a given service, including those supported system defined roles + ListServiceRoles(serviceName string) ([]models.PolicyRole, error) + // List all system defined roles + ListSystemDefinedRoles() ([]models.PolicyRole, error) + // List servie specific roles + ListServiceSpecificRoles(serviceName string) ([]models.PolicyRole, error) + // List authorization roles + ListAuthorizationRoles(sourceServiceName string, targetServiceName string) ([]models.PolicyRole, error) +} + +type serviceRoleRepository struct { + client *client.Client +} + +func NewServiceRoleRepository(c *client.Client) ServiceRoleRepository { + return &serviceRoleRepository{ + client: c, + } +} + +func (r *serviceRoleRepository) ListServiceRoles(serviceName string) ([]models.PolicyRole, error) { + response := struct { + ServiceSpecificRoles []models.PolicyRole `json:"supportedRoles"` + PlatformExtensions struct { + Roles []models.PolicyRole `json:"supportedRoles"` + } `json:"platformExtensions"` + }{} + + _, err := r.client.Get("/acms/v1/roles?serviceName="+url.QueryEscape(serviceName), &response) + if err != nil { + return []models.PolicyRole{}, err + } + + roles := append(response.ServiceSpecificRoles, response.PlatformExtensions.Roles...) + + return roles, nil +} + +func (r *serviceRoleRepository) ListSystemDefinedRoles() ([]models.PolicyRole, error) { + response := struct { + Roles []models.Role `json:"systemDefinedRoles"` + }{} + + _, err := r.client.Get("/acms/v1/roles", &response) + if err != nil { + return []models.PolicyRole{}, err + } + + // system defined roles uses `crn` instead of `id`, need to conversion + // TODO: remove this if IAM PAP unify the data model + roles := []models.PolicyRole{} + for _, role := range response.Roles { + roles = append(roles, role.ToPolicyRole()) + } + return roles, nil +} +func (r *serviceRoleRepository) ListServiceSpecificRoles(serviceName string) ([]models.PolicyRole, error) { + var response serviceRoleQueryResponse + var err error + if response, err = r.queryServiceRoles(serviceName); err != nil { + return []models.PolicyRole{}, err + } + return response.ServiceSpecificRoles, nil + +} + +func (r *serviceRoleRepository) queryServiceRoles(name string) (serviceRoleQueryResponse, error) { + response := serviceRoleQueryResponse{} + + _, err := r.client.Get("/acms/v1/roles?serviceName="+url.QueryEscape(name), &response) + if err != nil { + return serviceRoleQueryResponse{}, err + } + + return response, nil +} + +func (r *serviceRoleRepository) ListAuthorizationRoles(sourceServiceName string, targetServiceName string) ([]models.PolicyRole, error) { + req := rest.GetRequest(helpers.GetFullURL(*r.client.Config.Endpoint, "/acms/v1/roles")) + req.Query("sourceServiceName", sourceServiceName) + req.Query("serviceName", targetServiceName) + req.Query("policyType", "authorization") + + var response serviceRoleQueryResponse + _, err := r.client.SendRequest(req, &response) + if err != nil { + return []models.PolicyRole{}, err + } + + roles := append(response.ServiceSpecificRoles, response.PlatformExtensions.Roles...) + return roles, nil +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/iam/iamv1/user_policies.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/iam/iamv1/user_policies.go new file mode 100644 index 00000000000..fc8c0795729 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/iam/iamv1/user_policies.go @@ -0,0 +1,101 @@ +package iamv1 + +import ( + "fmt" + "net/http" + "net/url" + + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/models" + "github.com/IBM-Cloud/bluemix-go/rest" +) + +const ( + _UserPoliciesPathTemplate = "/acms/v1/scopes/%s/users/%s/policies" + _UserPolicyPathTemplate = "/acms/v1/scopes/%s/users/%s/policies/%s" +) + +//go:generate counterfeiter . UserPolicyRepository +type UserPolicyRepository interface { + List(scope string, ibmUniqueID string) ([]models.Policy, error) + Get(scope string, ibmUniqueID string, policyID string) (models.Policy, error) + Create(scope string, ibmUniqueID string, policy models.Policy) (models.Policy, error) + Update(scope string, ibmUniqueID string, policyID string, policy models.Policy, version string) (models.Policy, error) + Delete(scope string, ibmUniqueID string, policyID string) error +} + +type userPolicyRepository struct { + client *client.Client +} + +func NewUserPolicyRepository(c *client.Client) UserPolicyRepository { + return &userPolicyRepository{ + client: c, + } +} + +type PoliciesQueryResult struct { + Policies []models.Policy `json:"policies"` +} + +func (r *userPolicyRepository) List(scope string, ibmUniqueID string) ([]models.Policy, error) { + result := PoliciesQueryResult{} + resp, err := r.client.Get(r.generateURLPath(_UserPoliciesPathTemplate, scope, ibmUniqueID), &result) + + if resp.StatusCode == http.StatusNotFound { + return []models.Policy{}, nil + } + + if err != nil { + return nil, err + } + + return result.Policies, nil +} + +func (r *userPolicyRepository) Get(scope string, ibmUniqueID string, policyID string) (models.Policy, error) { + policy := models.Policy{} + resp, err := r.client.Get(r.generateURLPath(_UserPolicyPathTemplate, scope, ibmUniqueID, policyID), &policy) + if err != nil { + return models.Policy{}, err + } + policy.Version = resp.Header.Get("Etag") + return policy, nil +} + +func (r *userPolicyRepository) Create(scope string, ibmUniqueID string, policy models.Policy) (models.Policy, error) { + policyCreated := models.Policy{} + resp, err := r.client.Post(r.generateURLPath(_UserPoliciesPathTemplate, scope, ibmUniqueID), &policy, &policyCreated) + if err != nil { + return models.Policy{}, err + } + policyCreated.Version = resp.Header.Get("Etag") + return policyCreated, nil +} + +func (r *userPolicyRepository) Update(scope string, ibmUniqueID string, policyID string, policy models.Policy, version string) (models.Policy, error) { + policyUpdated := models.Policy{} + + request := rest.PutRequest(*r.client.Config.Endpoint + r.generateURLPath(_UserPolicyPathTemplate, scope, ibmUniqueID, policyID)) + request = request.Set("If-Match", version).Body(&policy) + + resp, err := r.client.SendRequest(request, &policyUpdated) + if err != nil { + return models.Policy{}, err + } + policyUpdated.Version = resp.Header.Get("Etag") + return policyUpdated, nil +} + +func (r *userPolicyRepository) Delete(scope string, ibmUniqueID string, policyID string) error { + _, err := r.client.Delete(r.generateURLPath(_UserPolicyPathTemplate, scope, ibmUniqueID, policyID)) + return err +} + +func (r *userPolicyRepository) generateURLPath(template string, parameters ...string) string { + escaped := []interface{}{} + for _, parameter := range parameters { + escaped = append(escaped, url.QueryEscape(parameter)) + } + return fmt.Sprintf(template, escaped...) +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/iampap/iampapv1/api_service.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/iampap/iampapv1/api_service.go new file mode 100644 index 00000000000..106ce5dfdc8 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/iampap/iampapv1/api_service.go @@ -0,0 +1,84 @@ +package iampapv1 + +import ( + gohttp "net/http" + + bluemix "github.com/IBM-Cloud/bluemix-go" + "github.com/IBM-Cloud/bluemix-go/authentication" + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/http" + "github.com/IBM-Cloud/bluemix-go/rest" + "github.com/IBM-Cloud/bluemix-go/session" +) + +//IAMPAPAPI is the IAMpapv2 client ... +type IAMPAPAPI interface { + IAMPolicy() IAMPolicy + IAMService() IAMService + AuthorizationPolicies() AuthorizationPolicyRepository + V1Policy() V1PolicyRepository +} + +//ErrCodeAPICreation ... +const ErrCodeAPICreation = "APICreationError" + +//IamPapService holds the client +type iampapService struct { + *client.Client +} + +//New ... +func New(sess *session.Session) (IAMPAPAPI, error) { + config := sess.Config.Copy() + err := config.ValidateConfigForService(bluemix.IAMPAPService) + if err != nil { + return nil, err + } + if config.HTTPClient == nil { + config.HTTPClient = http.NewHTTPClient(config) + } + tokenRefreher, err := authentication.NewIAMAuthRepository(config, &rest.Client{ + DefaultHeader: gohttp.Header{ + "User-Agent": []string{http.UserAgent()}, + }, + HTTPClient: config.HTTPClient, + }) + if err != nil { + return nil, err + } + if config.IAMAccessToken == "" { + err := authentication.PopulateTokens(tokenRefreher, config) + if err != nil { + return nil, err + } + } + if config.Endpoint == nil { + ep, err := config.EndpointLocator.IAMPAPEndpoint() + if err != nil { + return nil, err + } + config.Endpoint = &ep + } + return &iampapService{ + Client: client.New(config, bluemix.IAMPAPService, tokenRefreher), + }, nil +} + +//IAMPolicy API +func (a *iampapService) IAMPolicy() IAMPolicy { + return newIAMPolicyAPI(a.Client) +} + +//IAMService API +func (a *iampapService) IAMService() IAMService { + return newIAMServiceAPI(a.Client) +} + +//AuthorizationPolicies API +func (a *iampapService) AuthorizationPolicies() AuthorizationPolicyRepository { + return NewAuthorizationPolicyRepository(a.Client) +} + +func (a *iampapService) V1Policy() V1PolicyRepository { + return NewV1PolicyRepository(a.Client) +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/iampap/iampapv1/authorization_policies.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/iampap/iampapv1/authorization_policies.go new file mode 100644 index 00000000000..d8246b99e23 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/iampap/iampapv1/authorization_policies.go @@ -0,0 +1,125 @@ +package iampapv1 + +import ( + "fmt" + + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/models" + "github.com/IBM-Cloud/bluemix-go/rest" +) + +type AuthorizationPolicy struct { + ID string `json:"id,omitempty"` + Roles []models.PolicyRole `json:"roles"` + Resources []models.PolicyResource `json:"resources"` + Subjects []models.PolicyResource `json:"subjects"` + Type string `json:"type,omitempty"` + Version string `json:"-"` +} + +type AuthorizationPolicySearchQuery struct { + SubjectID string + Type string + AccessGroupID string +} + +func (q *AuthorizationPolicySearchQuery) setQuery(r *rest.Request) { + if q.SubjectID != "" { + r.Query("subjectId", q.SubjectID) + } + if q.Type != "" { + r.Query("type", q.Type) + } + if q.AccessGroupID != "" { + r.Query("accessGroupId", q.AccessGroupID) + } +} + +const ( + AuthorizationPolicyType = "authorization" + AccessPolicyType = "access" +) + +type AuthorizationPolicyRepository interface { + List(accountID string, query *AuthorizationPolicySearchQuery) ([]AuthorizationPolicy, error) + Get(accountID string, policyID string) (AuthorizationPolicy, error) + Create(accountID string, policy AuthorizationPolicy) (AuthorizationPolicy, error) + Update(accountID string, policyID string, policy AuthorizationPolicy, version string) (AuthorizationPolicy, error) + Delete(accountID string, policyID string) error + // Purge(accountID string, request DeleteAuthorizationPolicyRequest) (error) +} + +type authorizationPolicyRepository struct { + client *client.Client +} + +func NewAuthorizationPolicyRepository(c *client.Client) AuthorizationPolicyRepository { + return &authorizationPolicyRepository{ + client: c, + } +} + +type listAuthorizationPolicyResponse struct { + Policies []AuthorizationPolicy `json:"policies"` +} + +func (r *authorizationPolicyRepository) List(accountID string, query *AuthorizationPolicySearchQuery) ([]AuthorizationPolicy, error) { + request := rest.GetRequest(*r.client.Config.Endpoint + fmt.Sprintf("/acms/v2/accounts/%s/policies", accountID)) + + if query != nil { + query.setQuery(request) + } + + var response listAuthorizationPolicyResponse + _, err := r.client.SendRequest(request, &response) + if err != nil { + return []AuthorizationPolicy{}, err + } + return response.Policies, nil +} + +func (r *authorizationPolicyRepository) Get(accountID string, policyID string) (AuthorizationPolicy, error) { + var policy AuthorizationPolicy + + resp, err := r.client.Get(fmt.Sprintf("/acms/v2/accounts/%s/policies/%s", accountID, policyID), &policy) + if err != nil { + return AuthorizationPolicy{}, err + } + policy.Version = resp.Header.Get("Etag") + return policy, nil +} + +func (r *authorizationPolicyRepository) Create(accountID string, policy AuthorizationPolicy) (AuthorizationPolicy, error) { + var policyCreated AuthorizationPolicy + + resp, err := r.client.Post(fmt.Sprintf("/acms/v2/accounts/%s/policies", accountID), &policy, &policyCreated) + if err != nil { + return AuthorizationPolicy{}, err + } + policyCreated.Version = resp.Header.Get("Etag") + return policyCreated, nil +} + +func (r *authorizationPolicyRepository) Update(accountID string, policyID string, policy AuthorizationPolicy, version string) (AuthorizationPolicy, error) { + var policyUpdated AuthorizationPolicy + request := rest.PutRequest(*r.client.Config.Endpoint + fmt.Sprintf("/acms/v2/accounts/%s/policies/%s", accountID, policyID)).Body(policy) + if version != "" { + request = request.Set("If-Match", version) + } + + resp, err := r.client.SendRequest(request, &policyUpdated) + if err != nil { + return AuthorizationPolicy{}, err + } + policyUpdated.Version = resp.Header.Get("Etag") + + return policyUpdated, nil +} + +func (r *authorizationPolicyRepository) Delete(accountID string, policyID string) error { + _, err := r.client.Delete(fmt.Sprintf("/acms/v1/policies/%s?scope=%s", policyID, "a/"+accountID)) + if err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/iampap/iampapv1/iam_policy.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/iampap/iampapv1/iam_policy.go new file mode 100644 index 00000000000..61f9f6680be --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/iampap/iampapv1/iam_policy.go @@ -0,0 +1,103 @@ +package iampapv1 + +import ( + "fmt" + + "github.com/IBM-Cloud/bluemix-go/client" +) + +type AccessPolicyRequest struct { + Roles []Roles `json:"roles" binding:"required"` + Resources []Resources `json:"resources" binding:"required"` +} + +type AccessPolicyResponse struct { + ID string + Roles []Roles + Resources []Resources +} + +type AccessPolicyListResponse struct { + Policies []AccessPolicyResponse +} + +type Roles struct { + ID string `json:"id" binding:"required"` + DisplayName string `json:"displayName,omitempty"` + Description string `json:"description,omitempty"` +} + +type Resources struct { + ServiceName string `json:"serviceName,omitempty"` + ServiceInstance string `json:"serviceInstance,omitempty"` + Region string `json:"region,omitempty"` + ResourceType string `json:"resourceType,omitempty"` + Resource string `json:"resource,omitempty"` + SpaceId string `json:"spaceId,omitempty"` + AccountId string `json:"accountId,omitempty"` + OrganizationId string `json:"organizationId,omitempty"` +} + +type IAMPolicy interface { + Create(scope, userId string, params AccessPolicyRequest) (AccessPolicyResponse, string, error) + List(scope, userId string) (AccessPolicyListResponse, error) + Delete(scope, userId, policyId string) error + Get(scope, userId, policyId string) (AccessPolicyResponse, error) + Update(scope, userId, policyId, etag string, params AccessPolicyRequest) (AccessPolicyResponse, string, error) +} + +type iampolicy struct { + client *client.Client +} + +const IAM_ACCOUNT_ESCAPE = "a%2f" + +func newIAMPolicyAPI(c *client.Client) IAMPolicy { + return &iampolicy{ + client: c, + } +} + +//Create ... +func (r *iampolicy) Create(scope, userId string, params AccessPolicyRequest) (AccessPolicyResponse, string, error) { + var accessPolicy AccessPolicyResponse + rawURL := fmt.Sprintf("/acms/v1/scopes/%s/users/%s/policies", IAM_ACCOUNT_ESCAPE+scope, userId) + resp, err := r.client.Post(rawURL, params, &accessPolicy) + eTag := resp.Header.Get("etag") + return accessPolicy, eTag, err +} + +//List ... +func (r *iampolicy) List(scope, userId string) (AccessPolicyListResponse, error) { + var accessPolicyListResponse AccessPolicyListResponse + rawURL := fmt.Sprintf("/acms/v1/scopes/%s/users/%s/policies", IAM_ACCOUNT_ESCAPE+scope, userId) + _, err := r.client.Get(rawURL, &accessPolicyListResponse) + return accessPolicyListResponse, err +} + +//Find ... +func (r *iampolicy) Get(scope, userId, policyId string) (AccessPolicyResponse, error) { + var accessPolicyResponse AccessPolicyResponse + rawURL := fmt.Sprintf("/acms/v1/scopes/%s/users/%s/policies/%s", IAM_ACCOUNT_ESCAPE+scope, userId, policyId) + _, err := r.client.Get(rawURL, &accessPolicyResponse) + return accessPolicyResponse, err +} + +//Update ... +func (r *iampolicy) Update(scope, userId, policyId, etag string, params AccessPolicyRequest) (AccessPolicyResponse, string, error) { + var accessPolicy AccessPolicyResponse + rawURL := fmt.Sprintf("/acms/v1/scopes/%s/users/%s/policies/%s", IAM_ACCOUNT_ESCAPE+scope, userId, policyId) + header := make(map[string]string) + + header["IF-Match"] = etag + accessPolicyResp, err := r.client.Put(rawURL, params, &accessPolicy, header) + eTag := accessPolicyResp.Header.Get("etag") + return accessPolicy, eTag, err +} + +//Delete ... +func (r *iampolicy) Delete(scope, userId, policyId string) error { + rawURL := fmt.Sprintf("/acms/v1/scopes/%s/users/%s/policies/%s", IAM_ACCOUNT_ESCAPE+scope, userId, policyId) + _, err := r.client.Delete(rawURL) + return err +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/iampap/iampapv1/iam_service.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/iampap/iampapv1/iam_service.go new file mode 100644 index 00000000000..5cca1b84a4e --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/iampap/iampapv1/iam_service.go @@ -0,0 +1,40 @@ +package iampapv1 + +import ( + "github.com/IBM-Cloud/bluemix-go/client" +) + +type IAMService interface { + GetServiceName(serviceDispName string) (string, error) + GetServiceDispalyName(serviceName string) (string, error) +} + +type iamservice struct { + client *client.Client +} + +func newIAMServiceAPI(c *client.Client) IAMService { + return &iamservice{ + client: c, + } +} + +//GetServiceName ... +func (r *iamservice) GetServiceName(serviceDispName string) (string, error) { + serviceMap := make(map[string]string) + serviceMap["IBM Bluemix Container Service"] = "containers-kubernetes" + serviceMap["All Identity and Access enabled services"] = "All Identity and Access enabled services" + //rawURL := "/acms/v1/services" + //resp, err := r.client.Get(rawURL, &services) + return serviceMap[serviceDispName], nil +} + +//GetServiceDisplayName ... +func (r *iamservice) GetServiceDispalyName(serviceName string) (string, error) { + serviceMap := make(map[string]string) + serviceMap["containers-kubernetes"] = "IBM Bluemix Container Service" + serviceMap["All Identity and Access enabled services"] = "All Identity and Access enabled services" + //rawURL := "/acms/v1/services" + //resp, err := r.client.Get(rawURL, &services) + return serviceMap[serviceName], nil +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/iampap/iampapv1/models.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/iampap/iampapv1/models.go new file mode 100644 index 00000000000..8a4683da2d9 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/iampap/iampapv1/models.go @@ -0,0 +1,339 @@ +package iampapv1 + +import ( + "github.com/IBM-Cloud/bluemix-go/api/iampap/iampapv2" + "github.com/IBM-Cloud/bluemix-go/models" +) + +// Policy is the model of IAM PAP policy +type Policy struct { + ID string `json:"id,omitempty"` + Type string `json:"type"` + Subjects []Subject `json:"subjects"` + Roles []Role `json:"roles"` + Resources []Resource `json:"resources"` + Href string `json:"href,omitempty"` + CreatedAt string `json:"created_at,omitempty"` + CreatedByID string `json:"created_by_id,omitempty"` + LastModifiedAt string `json:"last_modified_at,omitempty"` + LastModifiedByID string `json:"last_modified_by_id,omitempty"` + Version string `json:"-"` +} + +// Role is the role model used by policy +type Role struct { + RoleID string `json:"role_id"` + Name string `json:"display_name,omitempty"` + Description string `json:"description,omitempty"` +} + +func fromModel(role models.PolicyRole) Role { + return Role{ + RoleID: role.ID.String(), + // When create/update, "name" and "description" are not allowed + // Name: role.Name, + // Description: role.Description, + } +} + +// ConvertRoleModels will transform role models returned from "/v1/roles" to the model used by policy +func ConvertRoleModels(roles []models.PolicyRole) []Role { + results := make([]Role, len(roles)) + for i, r := range roles { + results[i] = fromModel(r) + } + return results +} + +// ConvertV2RoleModels will transform role models returned from "/v2/roles" to the model used by policy +func ConvertV2RoleModels(roles []iampapv2.Role) []Role { + results := make([]Role, len(roles)) + for i, r := range roles { + results[i] = Role{ + RoleID: r.Crn, + } + } + return results +} + +// Subject is the target to which is assigned policy +type Subject struct { + Attributes []Attribute `json:"attributes"` +} + +const ( + AccessGroupIDAttribute = "accesGroupId" + AccountIDAttribute = "accountId" + OrganizationIDAttribute = "organizationId" + SpaceIDAttribute = "spaceId" + RegionAttribute = "region" + ServiceTypeAttribute = "serviceType" + ServiceNameAttribute = "serviceName" + ServiceInstanceAttribute = "serviceInstance" + ResourceTypeAttribute = "resourceType" + ResourceAttribute = "resource" + ResourceGroupIDAttribute = "resourceGroupId" +) + +// GetAttribute returns an attribute of policy subject +func (s *Subject) GetAttribute(name string) string { + for _, a := range s.Attributes { + if a.Name == name { + return a.Value + } + } + return "" +} + +// SetAttribute sets value of an attribute of policy subject +func (s *Subject) SetAttribute(name string, value string) { + for _, a := range s.Attributes { + if a.Name == name { + a.Value = value + return + } + } + s.Attributes = append(s.Attributes, Attribute{ + Name: name, + Value: value, + }) +} + +// AccessGroupID returns access group ID attribute of policy subject if exists +func (s *Subject) AccessGroupID() string { + return s.GetAttribute("access_group_id") +} + +// AccountID returns account ID attribute of policy subject if exists +func (s *Subject) AccountID() string { + return s.GetAttribute("accountId") +} + +// IAMID returns IAM ID attribute of policy subject if exists +func (s *Subject) IAMID() string { + return s.GetAttribute("iam_id") +} + +// ServiceName returns service name attribute of policy subject if exists +func (s *Subject) ServiceName() string { + return s.GetAttribute("serviceName") +} + +// ServiceInstance returns service instance attribute of policy subject if exists +func (s *Subject) ServiceInstance() string { + return s.GetAttribute("serviceInstance") +} + +// ResourceType returns resource type of the policy subject if exists +func (s *Subject) ResourceType() string { + return s.GetAttribute("resourceType") +} + +// ResourceGroupID returns resource group ID attribute of policy resource if exists +func (s *Subject) ResourceGroupID() string { + return s.GetAttribute(ResourceGroupIDAttribute) +} + +// SetAccessGroupID sets value of access group ID attribute of policy subject +func (s *Subject) SetAccessGroupID(value string) { + s.SetAttribute("access_group_id", value) +} + +// SetAccountID sets value of account ID attribute of policy subject +func (s *Subject) SetAccountID(value string) { + s.SetAttribute("accountId", value) +} + +// SetIAMID sets value of IAM ID attribute of policy subject +func (s *Subject) SetIAMID(value string) { + s.SetAttribute("iam_id", value) +} + +// SetServiceName sets value of service name attribute of policy subject +func (s *Subject) SetServiceName(value string) { + s.SetAttribute("serviceName", value) +} + +// SetServiceInstance sets value of service instance attribute of policy subject +func (s *Subject) SetServiceInstance(value string) { + s.SetAttribute("serviceInstance", value) +} + +// SetResourceType sets value of resource type attribute of policy subject +func (s *Subject) SetResourceType(value string) { + s.SetAttribute("resourceType", value) +} + +// SetResourceGroupID sets value of resource group ID attribute of policy resource +func (s *Subject) SetResourceGroupID(value string) { + s.SetAttribute(ResourceGroupIDAttribute, value) +} + +// Resource is the object controlled by the policy +type Resource struct { + Attributes []Attribute `json:"attributes"` +} + +// GetAttribute returns an attribute of policy resource +func (r *Resource) GetAttribute(name string) string { + for _, a := range r.Attributes { + if a.Name == name { + return a.Value + } + } + return "" +} + +// SetAttribute sets value of an attribute of policy resource +func (r *Resource) SetAttribute(name string, value string) { + for _, a := range r.Attributes { + if a.Name == name { + a.Value = value + return + } + } + r.Attributes = append(r.Attributes, Attribute{ + Name: name, + Value: value, + }) +} + +// AccessGroupID returns access group ID attribute of policy resource if exists +func (r *Resource) AccessGroupID() string { + return r.GetAttribute(AccessGroupIDAttribute) +} + +// AccountID returns account ID attribute of policy resource if exists +func (r *Resource) AccountID() string { + return r.GetAttribute(AccountIDAttribute) +} + +// OrganizationID returns organization ID attribute of policy resource if exists +func (r *Resource) OrganizationID() string { + return r.GetAttribute(OrganizationIDAttribute) +} + +// Region returns region attribute of policy resource if exists +func (r *Resource) Region() string { + return r.GetAttribute(RegionAttribute) +} + +// Resource returns resource attribute of policy resource if exists +func (r *Resource) Resource() string { + return r.GetAttribute(ResourceAttribute) +} + +// ResourceType returns resource type attribute of policy resource if exists +func (r *Resource) ResourceType() string { + return r.GetAttribute(ResourceTypeAttribute) +} + +// ResourceGroupID returns resource group ID attribute of policy resource if exists +func (r *Resource) ResourceGroupID() string { + return r.GetAttribute(ResourceGroupIDAttribute) +} + +// ServiceName returns service name attribute of policy resource if exists +func (r *Resource) ServiceName() string { + return r.GetAttribute(ServiceNameAttribute) +} + +// ServiceInstance returns service instance attribute of policy resource if exists +func (r *Resource) ServiceInstance() string { + return r.GetAttribute(ServiceInstanceAttribute) +} + +// SpaceID returns space ID attribute of policy resource if exists +func (r *Resource) SpaceID() string { + return r.GetAttribute(SpaceIDAttribute) +} + +// ServiceType returns service type attribute of policy resource if exists +func (r *Resource) ServiceType() string { + return r.GetAttribute(ServiceTypeAttribute) +} + +// CustomAttributes will return all attributes which are not system defined +func (r *Resource) CustomAttributes() []Attribute { + attributes := []Attribute{} + for _, a := range r.Attributes { + switch a.Name { + case AccessGroupIDAttribute: + case AccountIDAttribute: + case OrganizationIDAttribute: + case SpaceIDAttribute: + case RegionAttribute: + case ResourceAttribute: + case ResourceTypeAttribute: + case ResourceGroupIDAttribute: + case ServiceTypeAttribute: + case ServiceNameAttribute: + case ServiceInstanceAttribute: + default: + attributes = append(attributes, a) + } + } + return attributes +} + +// SetAccessGroupID sets value of access group ID attribute of policy resource +func (r *Resource) SetAccessGroupID(value string) { + r.SetAttribute(AccessGroupIDAttribute, value) +} + +// SetAccountID sets value of account ID attribute of policy resource +func (r *Resource) SetAccountID(value string) { + r.SetAttribute(AccountIDAttribute, value) +} + +// SetOrganizationID sets value of organization ID attribute of policy resource +func (r *Resource) SetOrganizationID(value string) { + r.SetAttribute(OrganizationIDAttribute, value) +} + +// SetRegion sets value of region attribute of policy resource +func (r *Resource) SetRegion(value string) { + r.SetAttribute(RegionAttribute, value) +} + +// SetResource sets value of resource attribute of policy resource +func (r *Resource) SetResource(value string) { + r.SetAttribute(ResourceAttribute, value) +} + +// SetResourceType sets value of resource type attribute of policy resource +func (r *Resource) SetResourceType(value string) { + r.SetAttribute(ResourceTypeAttribute, value) +} + +// SetResourceGroupID sets value of resource group ID attribute of policy resource +func (r *Resource) SetResourceGroupID(value string) { + r.SetAttribute(ResourceGroupIDAttribute, value) +} + +// SetServiceName sets value of service name attribute of policy resource +func (r *Resource) SetServiceName(value string) { + r.SetAttribute(ServiceNameAttribute, value) +} + +// SetServiceInstance sets value of service instance attribute of policy resource +func (r *Resource) SetServiceInstance(value string) { + r.SetAttribute(ServiceInstanceAttribute, value) +} + +// SetSpaceID sets value of space ID attribute of policy resource +func (r *Resource) SetSpaceID(value string) { + r.SetAttribute("spaceID", value) +} + +// SetServiceType sets value of service type attribute of policy resource +func (r *Resource) SetServiceType(value string) { + r.SetAttribute(ServiceTypeAttribute, value) +} + +// Attribute is part of policy subject and resource +type Attribute struct { + Name string `json:"name"` + Value string `json:"value"` + Operator string `json:"operator,omitempty"` +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/iampap/iampapv1/v1_policies.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/iampap/iampapv1/v1_policies.go new file mode 100644 index 00000000000..ea0528ed96a --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/iampap/iampapv1/v1_policies.go @@ -0,0 +1,109 @@ +package iampapv1 + +import ( + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/rest" +) + +type SearchParams struct { + AccountID string + IAMID string + AccessGroupID string + Type string + ServiceType string + Sort string +} + +func (p SearchParams) buildRequest(r *rest.Request) { + if p.AccountID != "" { + r.Query("account_id", p.AccountID) + } + if p.IAMID != "" { + r.Query("iam_id", p.IAMID) + } + if p.AccessGroupID != "" { + r.Query("access_group_id", p.AccessGroupID) + } + if p.Type != "" { + r.Query("type", p.Type) + } + if p.ServiceType != "" { + r.Query("service_type", p.ServiceType) + } + if p.Sort != "" { + r.Query("sort", p.Sort) + } +} + +type V1PolicyRepository interface { + List(params SearchParams) ([]Policy, error) + Get(policyID string) (Policy, error) + Create(policy Policy) (Policy, error) + Update(policyID string, policy Policy, version string) (Policy, error) + Delete(policyID string) error +} + +type v1PolicyRepository struct { + client *client.Client +} + +func NewV1PolicyRepository(c *client.Client) V1PolicyRepository { + return &v1PolicyRepository{ + client: c, + } +} + +func (r *v1PolicyRepository) List(params SearchParams) ([]Policy, error) { + request := rest.GetRequest(*r.client.Config.Endpoint + "/v1/policies") + params.buildRequest(request) + + response := struct { + Policies []Policy `json:"policies"` + }{} + _, err := r.client.SendRequest(request, &response) + if err != nil { + return []Policy{}, err + } + return response.Policies, nil +} + +func (r *v1PolicyRepository) Get(policyID string) (Policy, error) { + var response Policy + resp, err := r.client.Get("/v1/policies/"+policyID, &response) + if err != nil { + return Policy{}, err + } + response.Version = resp.Header.Get("ETag") + return response, nil +} + +func (r *v1PolicyRepository) Create(policy Policy) (Policy, error) { + var response Policy + resp, err := r.client.Post("/v1/policies", &policy, &response) + if err != nil { + return Policy{}, err + } + response.Version = resp.Header.Get("ETag") + return response, nil +} + +func (r *v1PolicyRepository) Update(policyID string, policy Policy, version string) (Policy, error) { + var response Policy + request := rest.PutRequest(*r.client.Config.Endpoint + "/v1/policies/" + policyID) + request = request.Set("If-Match", version).Body(&policy) + + resp, err := r.client.SendRequest(request, &response) + if err != nil { + return Policy{}, err + } + response.Version = resp.Header.Get("Etag") + return response, nil +} + +func (r *v1PolicyRepository) Delete(policyID string) error { + _, err := r.client.Delete("/v1/policies/" + policyID) + if err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/iampap/iampapv2/api_service.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/iampap/iampapv2/api_service.go new file mode 100644 index 00000000000..383a826a0f9 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/iampap/iampapv2/api_service.go @@ -0,0 +1,68 @@ +package iampapv2 + +import ( + gohttp "net/http" + + bluemix "github.com/IBM-Cloud/bluemix-go" + "github.com/IBM-Cloud/bluemix-go/authentication" + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/http" + "github.com/IBM-Cloud/bluemix-go/rest" + "github.com/IBM-Cloud/bluemix-go/session" +) + +//IAMPAPAPIV2 is the resource client ... +type IAMPAPAPIV2 interface { + IAMRoles() RoleRepository +} + +//ErrCodeAPICreation ... +const ErrCodeAPICreation = "APICreationError" + +//iamService holds the client +type roleService struct { + *client.Client +} + +//New ... +func New(sess *session.Session) (IAMPAPAPIV2, error) { + config := sess.Config.Copy() + err := config.ValidateConfigForService(bluemix.IAMPAPServicev2) + if err != nil { + return nil, err + } + if config.HTTPClient == nil { + config.HTTPClient = http.NewHTTPClient(config) + } + tokenRefreher, err := authentication.NewIAMAuthRepository(config, &rest.Client{ + DefaultHeader: gohttp.Header{ + "User-Agent": []string{http.UserAgent()}, + }, + HTTPClient: config.HTTPClient, + }) + if err != nil { + return nil, err + } + if config.IAMAccessToken == "" { + err := authentication.PopulateTokens(tokenRefreher, config) + if err != nil { + return nil, err + } + } + if config.Endpoint == nil { + ep, err := config.EndpointLocator.IAMEndpoint() + if err != nil { + return nil, err + } + config.Endpoint = &ep + } + + return &roleService{ + Client: client.New(config, bluemix.IAMPAPServicev2, tokenRefreher), + }, nil +} + +//CustomRole API +func (a *roleService) IAMRoles() RoleRepository { + return NewRoleRepository(a.Client) +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/iampap/iampapv2/roles.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/iampap/iampapv2/roles.go new file mode 100644 index 00000000000..b2d02f1aed5 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/iampap/iampapv2/roles.go @@ -0,0 +1,180 @@ +package iampapv2 + +import ( + "fmt" + + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/rest" +) + +type CreateRoleRequest struct { + Name string `json:"name"` + ServiceName string `json:"service_name"` + AccountID string `json:"account_id"` + DisplayName string `json:"display_name"` + Description string `json:"description"` + Actions []string `json:"actions,omitempty"` +} +type UpdateRoleRequest struct { + DisplayName string `json:"display_name"` + Description string `json:"description"` + Actions []string `json:"actions,omitempty"` +} + +type Role struct { + CreateRoleRequest + ID string `json:"id"` + Crn string `json:"crn"` + CreatedAt string `json:"created_at"` + CreatedByID string `json:"created_by_id"` + LastModifiedAt string `json:"last_modified_at"` + LastModifiedByID string `json:"last_modified_by_id"` +} + +type ListResponse struct { + CustomRoles []Role `json:"custom_roles"` + ServiceRoles []Role `json:"service_roles"` + SystemRoles []Role `json:"system_roles"` +} + +type RoleRepository interface { + Get(roleID string) (Role, string, error) + Create(request CreateRoleRequest) (Role, error) + Update(request UpdateRoleRequest, roleID, etag string) (Role, error) + Delete(roleID string) error + ListCustomRoles(accountID, serviceName string) ([]Role, error) + ListSystemDefinedRoles() ([]Role, error) + ListServiceRoles(serviceName string) ([]Role, error) + ListAll(query RoleQuery) ([]Role, error) +} + +type roleRepository struct { + client *client.Client +} + +func NewRoleRepository(c *client.Client) RoleRepository { + return &roleRepository{ + client: c, + } +} + +type RoleQueryFormatParameter string + +type RoleQuery struct { + AccountID string + ServiceName string + Format RoleQueryFormatParameter +} + +// SetQuery will set query parameter to the passed-in request +func (q RoleQuery) SetQuery(req *rest.Request) { + if q.AccountID != "" { + req.Query("account_id", q.AccountID) + } + if q.ServiceName != "" { + req.Query("service_name", q.ServiceName) + } + if q.Format != "" { + req.Query("format", string(q.Format)) + } +} + +func (r *roleRepository) Create(request CreateRoleRequest) (Role, error) { + res := Role{} + _, err := r.client.Post(fmt.Sprintf("/v2/roles"), &request, &res) + if err != nil { + return Role{}, err + } + return res, nil +} + +func (r *roleRepository) Get(roleID string) (Role, string, error) { + res := Role{} + response, err := r.client.Get(fmt.Sprintf("/v2/roles/%s", roleID), &res) + if err != nil { + return Role{}, "", err + } + return res, response.Header.Get("Etag"), nil +} + +func (r *roleRepository) Update(request UpdateRoleRequest, roleID, etag string) (Role, error) { + res := Role{} + header := make(map[string]string) + + header["IF-Match"] = etag + _, err := r.client.Put(fmt.Sprintf("/v2/roles/%s", roleID), &request, &res, header) + if err != nil { + return Role{}, err + } + return res, nil +} + +//Delete Function +func (r *roleRepository) Delete(roleID string) error { + _, err := r.client.Delete(fmt.Sprintf("/v2/roles/%s", roleID)) + return err +} + +func (r *roleRepository) ListCustomRoles(accountID, serviceName string) ([]Role, error) { + res := ListResponse{} + var requestpath string + + requestpath = fmt.Sprintf("/v2/roles?account_id=%s", accountID) + + _, err := r.client.Get(requestpath, &res) + if err != nil { + return []Role{}, err + } + if serviceName == "" { + return res.CustomRoles, nil + } else { + var matchingRoles []Role + for _, role := range res.CustomRoles { + if role.ServiceName == serviceName { + matchingRoles = append(matchingRoles, role) + } + } + return matchingRoles, nil + } + +} + +func (r *roleRepository) ListSystemDefinedRoles() ([]Role, error) { + res := ListResponse{} + var requestpath string + requestpath = fmt.Sprintf("/v2/roles") + _, err := r.client.Get(requestpath, &res) + if err != nil { + return []Role{}, err + } + return res.SystemRoles, nil +} + +func (r *roleRepository) ListServiceRoles(serviceName string) ([]Role, error) { + res := ListResponse{} + var requestpath string + requestpath = fmt.Sprintf("/v2/roles?service_name=%s", serviceName) + _, err := r.client.Get(requestpath, &res) + if err != nil { + return []Role{}, err + } + return res.ServiceRoles, nil +} + +func (r *roleRepository) ListAll(query RoleQuery) ([]Role, error) { + response, err := r.query(query) + if err != nil { + return []Role{}, err + } + return append(response.CustomRoles, append(response.ServiceRoles, response.SystemRoles...)...), nil +} + +func (r *roleRepository) query(query RoleQuery) (ListResponse, error) { + req := rest.GetRequest(*r.client.Config.Endpoint + "/v2/roles") + query.SetQuery(req) + + var response ListResponse + _, err := r.client.SendRequest(req, &response) + + return response, err +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/iamuum/iamuumv1/access_group.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/iamuum/iamuumv1/access_group.go new file mode 100644 index 00000000000..6521f837bc3 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/iamuum/iamuumv1/access_group.go @@ -0,0 +1,114 @@ +package iamuumv1 + +import ( + "fmt" + "net/url" + + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/helpers" + "github.com/IBM-Cloud/bluemix-go/models" + "github.com/IBM-Cloud/bluemix-go/rest" +) + +type Groups struct { + PaginationFields + Groups []models.AccessGroup `json:"groups"` +} + +func (g *Groups) Resources() []interface{} { + r := make([]interface{}, len(g.Groups)) + for i := range g.Groups { + r[i] = g.Groups[i] + } + return r +} + +type AccessGroupRepository interface { + List(accountID string) ([]models.AccessGroup, error) + Create(group models.AccessGroup, accountID string) (*models.AccessGroup, error) + FindByName(name string, accountID string) ([]models.AccessGroup, error) + Delete(accessGroupID string, recursive bool) error + Update(accessGroupID string, group AccessGroupUpdateRequest, revision string) (models.AccessGroup, error) + Get(accessGroupID string) (group *models.AccessGroup, revision string, err error) +} + +type accessGroupRepository struct { + client *client.Client +} + +type AccessGroupUpdateRequest struct { + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` +} + +func NewAccessGroupRepository(c *client.Client) AccessGroupRepository { + return &accessGroupRepository{ + client: c, + } +} + +func (r *accessGroupRepository) List(accountID string) ([]models.AccessGroup, error) { + var groups []models.AccessGroup + _, err := r.client.GetPaginated(fmt.Sprintf("/v1/groups?account=%s", url.QueryEscape(accountID)), NewPaginatedResourcesHandler(&Groups{}), func(v interface{}) bool { + groups = append(groups, v.(models.AccessGroup)) + return true + }) + if err != nil { + return []models.AccessGroup{}, err + } + return groups, err +} + +func (r *accessGroupRepository) Create(accessGroup models.AccessGroup, accountID string) (*models.AccessGroup, error) { + req := rest.PostRequest(helpers.GetFullURL(*r.client.Config.Endpoint, "/v1/groups")).Query("account", accountID).Body(accessGroup) + + newAccessGroup := models.AccessGroup{} + _, err := r.client.SendRequest(req, &newAccessGroup) + if err != nil { + return nil, err + } + return &newAccessGroup, nil +} + +func (r *accessGroupRepository) FindByName(name string, accountID string) ([]models.AccessGroup, error) { + var groups []models.AccessGroup + _, err := r.client.GetPaginated(fmt.Sprintf("/v1/groups?account=%s", url.QueryEscape(accountID)), NewPaginatedResourcesHandler(&Groups{}), func(v interface{}) bool { + if v.(models.AccessGroup).Name == name { + groups = append(groups, v.(models.AccessGroup)) + } + return true + }) + if err != nil { + return []models.AccessGroup{}, err + } + return groups, err +} + +func (r *accessGroupRepository) Delete(accessGroupID string, recursive bool) error { + req := rest.DeleteRequest((helpers.GetFullURL(*r.client.Config.Endpoint, "/v1/groups/"+accessGroupID))) + + if recursive { + req = req.Query("force", "true") + } + _, err := r.client.SendRequest(req, nil) + return err +} + +func (r *accessGroupRepository) Update(accessGroupID string, group AccessGroupUpdateRequest, revision string) (models.AccessGroup, error) { + req := rest.PatchRequest((helpers.GetFullURL(*r.client.Config.Endpoint, "/v1/groups/"+accessGroupID))).Body(group).Add("If-Match", revision) + resp := models.AccessGroup{} + _, err := r.client.SendRequest(req, &resp) + if err != nil { + return resp, err + } + return resp, nil +} + +func (r *accessGroupRepository) Get(accessGroupID string) (*models.AccessGroup, string, error) { + group := models.AccessGroup{} + response, err := r.client.Get("/v1/groups/"+url.PathEscape(accessGroupID), &group) + if err != nil { + return &group, "", err + } + return &group, response.Header.Get("Etag"), nil +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/iamuum/iamuumv1/access_group_members.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/iamuum/iamuumv1/access_group_members.go new file mode 100644 index 00000000000..77454946a87 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/iamuum/iamuumv1/access_group_members.go @@ -0,0 +1,95 @@ +package iamuumv1 + +import ( + "fmt" + + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/helpers" + "github.com/IBM-Cloud/bluemix-go/models" +) + +const ( + AccessGroupMemberUser = "user" + AccessGroupMemberService = "service" +) + +type AddGroupMemberRequest struct { + Members []models.AccessGroupMember `json:"members"` +} + +type AddGroupMemberResponse struct { + Members []AddedGroupMember `json:"members"` +} + +type AddedGroupMember struct { + ID string `json:"id"` + + Msg string `json:"msg,omitempty"` + + Type string `json:"type"` + + OK bool `json:"ok,omitempty"` +} + +type GroupMembers struct { + PaginationFields + Members []models.AccessGroupMember `json:"members"` +} + +func (gm *GroupMembers) Resources() []interface{} { + r := make([]interface{}, len(gm.Members)) + for i := range gm.Members { + r[i] = gm.Members[i] + } + return r +} + +type AccessGroupMemberRepository interface { + List(groupID string) ([]models.AccessGroupMember, error) + Add(groupID string, request AddGroupMemberRequest) (AddGroupMemberResponse, error) + Remove(groupID string, memberID string) error +} + +type accessGroupMemberRepository struct { + client *client.Client +} + +func NewAccessGroupMemberRepository(c *client.Client) AccessGroupMemberRepository { + return &accessGroupMemberRepository{ + client: c, + } + +} + +func (r *accessGroupMemberRepository) List(groupID string) ([]models.AccessGroupMember, error) { + members := []models.AccessGroupMember{} + _, err := r.client.GetPaginated(fmt.Sprintf("/v1/groups/%s/members", groupID), + NewPaginatedResourcesHandler(&GroupMembers{}), func(resource interface{}) bool { + if member, ok := resource.(models.AccessGroupMember); ok { + members = append(members, member) + return true + } + return false + }) + if err != nil { + return []models.AccessGroupMember{}, err + } + return members, nil +} + +func (r *accessGroupMemberRepository) Add(groupID string, request AddGroupMemberRequest) (AddGroupMemberResponse, error) { + res := AddGroupMemberResponse{} + _, err := r.client.Put(fmt.Sprintf("/v1/groups/%s/members", groupID), &request, &res) + if err != nil { + return AddGroupMemberResponse{}, err + } + return res, nil +} + +func (r *accessGroupMemberRepository) Remove(groupID string, memberID string) error { + _, err := r.client.Delete(helpers.Tprintf("/v1/groups/{{.GroupID}}/members/{{.MemberID}}", map[string]interface{}{ + "GroupID": groupID, + "MemberID": memberID, + })) + return err +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/iamuum/iamuumv1/api_service.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/iamuum/iamuumv1/api_service.go new file mode 100644 index 00000000000..e81956a93aa --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/iamuum/iamuumv1/api_service.go @@ -0,0 +1,74 @@ +package iamuumv1 + +import ( + gohttp "net/http" + + bluemix "github.com/IBM-Cloud/bluemix-go" + "github.com/IBM-Cloud/bluemix-go/authentication" + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/http" + "github.com/IBM-Cloud/bluemix-go/rest" + "github.com/IBM-Cloud/bluemix-go/session" +) + +//IAMUUMServiceAPI is the resource client ... +type IAMUUMServiceAPI interface { + AccessGroup() AccessGroupRepository + AccessGroupMember() AccessGroupMemberRepository +} + +//ErrCodeAPICreation ... +const ErrCodeAPICreation = "APICreationError" + +//iamService holds the client +type iamuumService struct { + *client.Client +} + +//New ... +func New(sess *session.Session) (IAMUUMServiceAPI, error) { + config := sess.Config.Copy() + err := config.ValidateConfigForService(bluemix.IAMUUMService) + if err != nil { + return nil, err + } + if config.HTTPClient == nil { + config.HTTPClient = http.NewHTTPClient(config) + } + tokenRefreher, err := authentication.NewIAMAuthRepository(config, &rest.Client{ + DefaultHeader: gohttp.Header{ + "User-Agent": []string{http.UserAgent()}, + }, + HTTPClient: config.HTTPClient, + }) + if err != nil { + return nil, err + } + if config.IAMAccessToken == "" { + err := authentication.PopulateTokens(tokenRefreher, config) + if err != nil { + return nil, err + } + } + if config.Endpoint == nil { + ep, err := config.EndpointLocator.IAMEndpoint() + if err != nil { + return nil, err + } + config.Endpoint = &ep + } + + return &iamuumService{ + Client: client.New(config, bluemix.IAMUUMService, tokenRefreher), + }, nil +} + +//AccessGroup API +func (a *iamuumService) AccessGroup() AccessGroupRepository { + return NewAccessGroupRepository(a.Client) +} + +//AccessGroupMember API +func (a *iamuumService) AccessGroupMember() AccessGroupMemberRepository { + return NewAccessGroupMemberRepository(a.Client) +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/iamuum/iamuumv1/pagination.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/iamuum/iamuumv1/pagination.go new file mode 100644 index 00000000000..4e45313c109 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/iamuum/iamuumv1/pagination.go @@ -0,0 +1,70 @@ +package iamuumv1 + +import ( + "encoding/json" + "net/url" + "reflect" +) + +type PaginatedResourcesHandler struct { + resourcesType reflect.Type +} + +func NewPaginatedResourcesHandler(resources PaginatedResources) PaginatedResourcesHandler { + return PaginatedResourcesHandler{ + resourcesType: reflect.TypeOf(resources).Elem(), + } +} + +func (pr PaginatedResourcesHandler) Resources(bytes []byte, curPath string) ([]interface{}, string, error) { + paginatedResources := reflect.New(pr.resourcesType).Interface().(PaginatedResources) + err := json.Unmarshal(bytes, paginatedResources) + + if err != nil { + return []interface{}{}, "", err + } + + nextPath, err := paginatedResources.NextPath() + + if err != nil { + return []interface{}{}, "", err + } + + return paginatedResources.Resources(), nextPath, nil +} + +type PaginationHref struct { + Href string `json:"href"` +} + +type PaginationFields struct { + First PaginationHref `json:"first"` + Last PaginationHref `json:"last"` + Next PaginationHref `json:"next"` + Previous PaginationHref `json:"previous"` + + Offset int `json:"offset"` + Limit int `json:"limit"` + TotalCount int `json:"total_count"` +} + +func (p *PaginationFields) NextPath() (string, error) { + if p.Next.Href == "" { + return "", nil + } + + u, err := url.Parse(p.Next.Href) + if err == nil { + u.Scheme = "" + u.Opaque = "" + u.Host = "" + u.User = nil + return u.String(), nil + } + return "", err +} + +type PaginatedResources interface { + NextPath() (string, error) + Resources() []interface{} +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/iamuum/iamuumv2/access_group.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/iamuum/iamuumv2/access_group.go new file mode 100644 index 00000000000..abc79d834c4 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/iamuum/iamuumv2/access_group.go @@ -0,0 +1,122 @@ +package iamuumv2 + +import ( + "fmt" + "net/url" + + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/helpers" + "github.com/IBM-Cloud/bluemix-go/models" + "github.com/IBM-Cloud/bluemix-go/rest" +) + +type Groups struct { + PaginationFields + Groups []models.AccessGroupV2 `json:"groups"` +} + +func (g *Groups) Resources() []interface{} { + r := make([]interface{}, len(g.Groups)) + for i := range g.Groups { + r[i] = g.Groups[i] + } + return r +} + +type AccessGroupRepository interface { + List(accountID string, queryParams ...string) ([]models.AccessGroupV2, error) + Create(group models.AccessGroupV2, accountID string) (*models.AccessGroupV2, error) + FindByName(name string, accountID string) ([]models.AccessGroupV2, error) + Delete(accessGroupID string, recursive bool) error + Update(accessGroupID string, group AccessGroupUpdateRequest, revision string) (models.AccessGroupV2, error) + Get(accessGroupID string) (group *models.AccessGroupV2, revision string, err error) +} + +type accessGroupRepository struct { + client *client.Client +} + +type AccessGroupUpdateRequest struct { + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` +} + +func NewAccessGroupRepository(c *client.Client) AccessGroupRepository { + return &accessGroupRepository{ + client: c, + } +} + +func (r *accessGroupRepository) List(accountID string, queryParams ...string) ([]models.AccessGroupV2, error) { + var groups []models.AccessGroupV2 + var err error + if len(queryParams) != 0 { + _, err = r.client.GetPaginated(fmt.Sprintf("/v2/groups?account_id=%s&iam_id=%s", url.QueryEscape(accountID), queryParams[0]), NewPaginatedResourcesHandler(&Groups{}), func(v interface{}) bool { + groups = append(groups, v.(models.AccessGroupV2)) + return true + }) + } else { + _, err = r.client.GetPaginated(fmt.Sprintf("/v2/groups?account_id=%s", url.QueryEscape(accountID)), NewPaginatedResourcesHandler(&Groups{}), func(v interface{}) bool { + groups = append(groups, v.(models.AccessGroupV2)) + return true + }) + } + if err != nil { + return []models.AccessGroupV2{}, err + } + return groups, err +} + +func (r *accessGroupRepository) Create(accessGroup models.AccessGroupV2, accountID string) (*models.AccessGroupV2, error) { + req := rest.PostRequest(helpers.GetFullURL(*r.client.Config.Endpoint, "/v2/groups")).Query("account_id", accountID).Body(accessGroup) + + newAccessGroup := models.AccessGroupV2{} + _, err := r.client.SendRequest(req, &newAccessGroup) + if err != nil { + return nil, err + } + return &newAccessGroup, nil +} + +func (r *accessGroupRepository) FindByName(name string, accountID string) ([]models.AccessGroupV2, error) { + var groups []models.AccessGroupV2 + _, err := r.client.GetPaginated(fmt.Sprintf("/v2/groups?account=%s", url.QueryEscape(accountID)), NewPaginatedResourcesHandler(&Groups{}), func(v interface{}) bool { + if v.(models.AccessGroupV2).AccessGroup.Name == name { + groups = append(groups, v.(models.AccessGroupV2)) + } + return true + }) + if err != nil { + return []models.AccessGroupV2{}, err + } + return groups, err +} + +func (r *accessGroupRepository) Delete(accessGroupID string, recursive bool) error { + req := rest.DeleteRequest((helpers.GetFullURL(*r.client.Config.Endpoint, "/v2/groups/"+accessGroupID))) + + if recursive { + req = req.Query("force", "true") + } + _, err := r.client.SendRequest(req, nil) + return err +} + +func (r *accessGroupRepository) Update(accessGroupID string, group AccessGroupUpdateRequest, revision string) (models.AccessGroupV2, error) { + req := rest.PatchRequest((helpers.GetFullURL(*r.client.Config.Endpoint, "/v2/groups/"+accessGroupID))).Body(group).Add("If-Match", revision) + resp := models.AccessGroupV2{} + _, err := r.client.SendRequest(req, &resp) + if err != nil { + return resp, err + } + return resp, nil +} + +func (r *accessGroupRepository) Get(accessGroupID string) (*models.AccessGroupV2, string, error) { + group := models.AccessGroupV2{} + response, err := r.client.Get("/v2/groups/"+url.PathEscape(accessGroupID), &group) + if err != nil { + return &group, "", err + } + return &group, response.Header.Get("Etag"), nil +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/iamuum/iamuumv2/access_group_members.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/iamuum/iamuumv2/access_group_members.go new file mode 100644 index 00000000000..ac348b2ab1a --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/iamuum/iamuumv2/access_group_members.go @@ -0,0 +1,99 @@ +package iamuumv2 + +import ( + "fmt" + + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/helpers" + "github.com/IBM-Cloud/bluemix-go/models" +) + +const ( + AccessGroupMemberUser = "user" + AccessGroupMemberService = "service" +) + +type AddGroupMemberRequestV2 struct { + Members []models.AccessGroupMemberV2 `json:"members"` +} + +type AddGroupMemberResponseV2 struct { + Members []AddedGroupMemberV2 `json:"members"` +} + +type AddedGroupMemberV2 struct { + ID string `json:"iam_id"` + Type string `json:"type"` + Href string `json:"href,omitempty"` + StatusCode int `json:"status_code,omitempty"` + Trace string `json:"trace,omitempty"` + CreatedAt string `json:"created_at,omitempty"` + CreatedByID string `json:"created_by_id,omitempty"` + Errors []Error `json:"errors,omitempty"` +} +type Error struct { + Code string `json:"code,omitempty"` + Message string `json:"message,omitempty"` +} +type GroupMembers struct { + PaginationFields + Members []models.AccessGroupMemberV2 `json:"members"` +} + +func (gm *GroupMembers) Resources() []interface{} { + r := make([]interface{}, len(gm.Members)) + for i := range gm.Members { + r[i] = gm.Members[i] + } + return r +} + +type AccessGroupMemberRepositoryV2 interface { + List(groupID string) ([]models.AccessGroupMemberV2, error) + Add(groupID string, request AddGroupMemberRequestV2) (AddGroupMemberResponseV2, error) + Remove(groupID string, memberID string) error +} + +type accessGroupMemberRepository struct { + client *client.Client +} + +func NewAccessGroupMemberRepository(c *client.Client) AccessGroupMemberRepositoryV2 { + return &accessGroupMemberRepository{ + client: c, + } + +} + +func (r *accessGroupMemberRepository) List(groupID string) ([]models.AccessGroupMemberV2, error) { + members := []models.AccessGroupMemberV2{} + _, err := r.client.GetPaginated(fmt.Sprintf("/v2/groups/%s/members", groupID), + NewPaginatedResourcesHandler(&GroupMembers{}), func(resource interface{}) bool { + if member, ok := resource.(models.AccessGroupMemberV2); ok { + members = append(members, member) + return true + } + return false + }) + if err != nil { + return []models.AccessGroupMemberV2{}, err + } + return members, nil +} + +func (r *accessGroupMemberRepository) Add(groupID string, request AddGroupMemberRequestV2) (AddGroupMemberResponseV2, error) { + res := AddGroupMemberResponseV2{} + _, err := r.client.Put(fmt.Sprintf("/v2/groups/%s/members", groupID), &request, &res) + if err != nil { + return AddGroupMemberResponseV2{}, err + } + return res, nil +} + +func (r *accessGroupMemberRepository) Remove(groupID string, memberID string) error { + _, err := r.client.Delete(helpers.Tprintf("/v2/groups/{{.GroupID}}/members/{{.MemberID}}", map[string]interface{}{ + "GroupID": groupID, + "MemberID": memberID, + })) + return err +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/iamuum/iamuumv2/api_service.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/iamuum/iamuumv2/api_service.go new file mode 100644 index 00000000000..8fb37972278 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/iamuum/iamuumv2/api_service.go @@ -0,0 +1,80 @@ +package iamuumv2 + +import ( + gohttp "net/http" + + bluemix "github.com/IBM-Cloud/bluemix-go" + "github.com/IBM-Cloud/bluemix-go/authentication" + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/http" + "github.com/IBM-Cloud/bluemix-go/rest" + "github.com/IBM-Cloud/bluemix-go/session" +) + +//IAMUUMServiceAPIv2 is the resource client ... +type IAMUUMServiceAPIv2 interface { + AccessGroup() AccessGroupRepository + AccessGroupMember() AccessGroupMemberRepositoryV2 + DynamicRule() DynamicRuleRepository +} + +//ErrCodeAPICreation ... +const ErrCodeAPICreation = "APICreationError" + +//iamService holds the client +type iamuumService struct { + *client.Client +} + +//New ... +func New(sess *session.Session) (IAMUUMServiceAPIv2, error) { + config := sess.Config.Copy() + err := config.ValidateConfigForService(bluemix.IAMUUMServicev2) + if err != nil { + return nil, err + } + if config.HTTPClient == nil { + config.HTTPClient = http.NewHTTPClient(config) + } + tokenRefreher, err := authentication.NewIAMAuthRepository(config, &rest.Client{ + DefaultHeader: gohttp.Header{ + "User-Agent": []string{http.UserAgent()}, + }, + HTTPClient: config.HTTPClient, + }) + if err != nil { + return nil, err + } + if config.IAMAccessToken == "" { + err := authentication.PopulateTokens(tokenRefreher, config) + if err != nil { + return nil, err + } + } + if config.Endpoint == nil { + ep, err := config.EndpointLocator.IAMEndpoint() + if err != nil { + return nil, err + } + config.Endpoint = &ep + } + + return &iamuumService{ + Client: client.New(config, bluemix.IAMUUMServicev2, tokenRefreher), + }, nil +} + +//AccessGroup API +func (a *iamuumService) AccessGroup() AccessGroupRepository { + return NewAccessGroupRepository(a.Client) +} + +// AccessGroupMember API +func (a *iamuumService) AccessGroupMember() AccessGroupMemberRepositoryV2 { + return NewAccessGroupMemberRepository(a.Client) +} + +// Dynamic Rule API +func (a *iamuumService) DynamicRule() DynamicRuleRepository { + return NewDynamicRuleRepository(a.Client) +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/iamuum/iamuumv2/dynamic_rules.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/iamuum/iamuumv2/dynamic_rules.go new file mode 100644 index 00000000000..94c09aa7399 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/iamuum/iamuumv2/dynamic_rules.go @@ -0,0 +1,98 @@ +package iamuumv2 + +import ( + "fmt" + + "github.com/IBM-Cloud/bluemix-go/client" +) + +type CreateRuleRequest struct { + Name string `json:"name"` + Expiration int `json:"expiration"` + RealmName string `json:"realm_name,omitempty"` + Conditions []Condition `json:"conditions,omitempty"` +} + +type Condition struct { + Claim string `json:"claim"` + Operator string `json:"operator"` + Value string `json:"value,omitempty"` +} + +type CreateRuleResponse struct { + CreateRuleRequest + RuleID string `json:"id"` + AccessGroupID string `json:"access_group_id"` + AccountID string `json:"account_id"` + CreatedAt string `json:"created_at"` + CreatedByID string `json:"created_by_id"` + LastModifiedAt string `json:"last_modified_at"` + LastModifiedByID string `json:"last_modified_by_id"` +} + +type GetResponse struct { + Rules []CreateRuleResponse `json:"rules"` +} + +type DynamicRuleRepository interface { + Create(groupID string, request CreateRuleRequest) (CreateRuleResponse, error) + List(groupID string) ([]CreateRuleResponse, error) + Get(groupID, ruleID string) (CreateRuleResponse, string, error) + Replace(groupID, ruleID string, request CreateRuleRequest, etag string) (CreateRuleResponse, error) + Delete(groupID, ruleID string) error +} + +type dynamicRuleRepository struct { + client *client.Client +} + +func NewDynamicRuleRepository(c *client.Client) DynamicRuleRepository { + return &dynamicRuleRepository{ + client: c, + } +} + +func (r *dynamicRuleRepository) Create(groupID string, request CreateRuleRequest) (CreateRuleResponse, error) { + res := CreateRuleResponse{} + _, err := r.client.Post(fmt.Sprintf("/v2/groups/%s/rules", groupID), &request, &res) + if err != nil { + return CreateRuleResponse{}, err + } + return res, nil +} + +func (r *dynamicRuleRepository) List(groupID string) ([]CreateRuleResponse, error) { + res := GetResponse{} + _, err := r.client.Get(fmt.Sprintf("/v2/groups/%s/rules", groupID), &res) + if err != nil { + return []CreateRuleResponse{}, err + } + return res.Rules, nil +} + +func (r *dynamicRuleRepository) Get(groupID, ruleID string) (CreateRuleResponse, string, error) { + res := CreateRuleResponse{} + response, err := r.client.Get(fmt.Sprintf("/v2/groups/%s/rules/%s", groupID, ruleID), &res) + if err != nil { + return CreateRuleResponse{}, "", err + } + return res, response.Header.Get("Etag"), nil +} + +func (r *dynamicRuleRepository) Replace(groupID, ruleID string, request CreateRuleRequest, etag string) (CreateRuleResponse, error) { + res := CreateRuleResponse{} + header := make(map[string]string) + + header["IF-Match"] = etag + _, err := r.client.Put(fmt.Sprintf("/v2/groups/%s/rules/%s", groupID, ruleID), &request, &res, header) + if err != nil { + return CreateRuleResponse{}, err + } + return res, nil +} + +//Delete Function +func (r *dynamicRuleRepository) Delete(groupID, ruleID string) error { + _, err := r.client.Delete(fmt.Sprintf("/v2/groups/%s/rules/%s", groupID, ruleID)) + return err +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/iamuum/iamuumv2/pagination.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/iamuum/iamuumv2/pagination.go new file mode 100644 index 00000000000..d8900e7775a --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/iamuum/iamuumv2/pagination.go @@ -0,0 +1,70 @@ +package iamuumv2 + +import ( + "encoding/json" + "net/url" + "reflect" +) + +type PaginatedResourcesHandler struct { + resourcesType reflect.Type +} + +func NewPaginatedResourcesHandler(resources PaginatedResources) PaginatedResourcesHandler { + return PaginatedResourcesHandler{ + resourcesType: reflect.TypeOf(resources).Elem(), + } +} + +func (pr PaginatedResourcesHandler) Resources(bytes []byte, curPath string) ([]interface{}, string, error) { + paginatedResources := reflect.New(pr.resourcesType).Interface().(PaginatedResources) + err := json.Unmarshal(bytes, paginatedResources) + + if err != nil { + return []interface{}{}, "", err + } + + nextPath, err := paginatedResources.NextPath() + + if err != nil { + return []interface{}{}, "", err + } + + return paginatedResources.Resources(), nextPath, nil +} + +type PaginationHref struct { + Href string `json:"href"` +} + +type PaginationFields struct { + First PaginationHref `json:"first"` + Last PaginationHref `json:"last"` + Next PaginationHref `json:"next"` + Previous PaginationHref `json:"previous"` + + Offset int `json:"offset"` + Limit int `json:"limit"` + TotalCount int `json:"total_count"` +} + +func (p *PaginationFields) NextPath() (string, error) { + if p.Next.Href == "" { + return "", nil + } + + u, err := url.Parse(p.Next.Href) + if err == nil { + u.Scheme = "" + u.Opaque = "" + u.Host = "" + u.User = nil + return u.String(), nil + } + return "", err +} + +type PaginatedResources interface { + NextPath() (string, error) + Resources() []interface{} +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/icd/icdv4/api_service.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/icd/icdv4/api_service.go new file mode 100644 index 00000000000..b3d992ab05d --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/icd/icdv4/api_service.go @@ -0,0 +1,104 @@ +package icdv4 + +import ( + gohttp "net/http" + + bluemix "github.com/IBM-Cloud/bluemix-go" + "github.com/IBM-Cloud/bluemix-go/authentication" + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/http" + "github.com/IBM-Cloud/bluemix-go/rest" + "github.com/IBM-Cloud/bluemix-go/session" +) + +//ErrCodeAPICreation ... +const ErrCodeAPICreation = "APICreationError" + +//ICDServiceAPI is the Cloud Internet Services API ... +type ICDServiceAPI interface { + Cdbs() Cdbs + Users() Users + Whitelists() Whitelists + Groups() Groups + Tasks() Tasks + Connections() Connections + AutoScaling() AutoScaling +} + +//ICDService holds the client +type icdService struct { + *client.Client +} + +//New ... +func New(sess *session.Session) (ICDServiceAPI, error) { + config := sess.Config.Copy() + err := config.ValidateConfigForService(bluemix.ICDService) + if err != nil { + return nil, err + } + if config.HTTPClient == nil { + config.HTTPClient = http.NewHTTPClient(config) + } + tokenRefreher, err := authentication.NewIAMAuthRepository(config, &rest.Client{ + DefaultHeader: gohttp.Header{ + "User-Agent": []string{http.UserAgent()}, + }, + HTTPClient: config.HTTPClient, + }) + if err != nil { + return nil, err + } + if config.IAMAccessToken == "" { + err := authentication.PopulateTokens(tokenRefreher, config) + if err != nil { + return nil, err + } + } + if config.Endpoint == nil { + ep, err := config.EndpointLocator.ICDEndpoint() + if err != nil { + return nil, err + } + config.Endpoint = &ep + } + + return &icdService{ + Client: client.New(config, bluemix.ICDService, tokenRefreher), + }, nil +} + +//Cdbs implements deployments API +func (c *icdService) Cdbs() Cdbs { + return newCdbAPI(c.Client) +} + +//Users implements users API +func (c *icdService) Users() Users { + return newUsersAPI(c.Client) +} + +//Whilelists implements whitelists API +func (c *icdService) Whitelists() Whitelists { + return newWhitelistAPI(c.Client) +} + +//Groups implements groups API +func (c *icdService) Groups() Groups { + return newGroupAPI(c.Client) +} + +//Tasks implements tasks API +func (c *icdService) Tasks() Tasks { + return newTaskAPI(c.Client) +} + +//Tasks implements tasks API +func (c *icdService) Connections() Connections { + return newConnectionAPI(c.Client) +} + +//AutoScaling implements AutoScaling API +func (c *icdService) AutoScaling() AutoScaling { + return newAutoScalingAPI(c.Client) +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/icd/icdv4/auto_scaling.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/icd/icdv4/auto_scaling.go new file mode 100644 index 00000000000..0d56395ed0f --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/icd/icdv4/auto_scaling.go @@ -0,0 +1,117 @@ +package icdv4 + +import ( + "encoding/json" + "fmt" + + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/utils" +) + +// AutoscalingSetGroup ... +type AutoscalingSetGroup struct { + Autoscaling AutoscalingGroup `json:"autoscaling,omitempty"` +} + +// AutoscalingGroup ... +type AutoscalingGroup struct { + Memory *ASGBody `json:"memory,omitempty"` + CPU *ASGBody `json:"cpu,omitempty"` + Disk *ASGBody `json:"disk,omitempty"` +} + +// ASGBody ... +type ASGBody struct { + Scalers ScalersBody `json:"scalers,omitempty"` + Rate RateBody `json:"rate,omitempty"` +} + +// RateBody ... +type RateBody struct { + IncreasePercent int `json:"increase_percent,omitempty"` + PeriodSeconds int `json:"period_seconds,omitempty"` + LimitCountPerMember int `json:"limit_count_per_member,omitempty"` + LimitMBPerMember int `json:"limit_mb_per_member,omitempty"` + Units string `json:"units,omitempty"` +} + +// ScalersBody ... +type ScalersBody struct { + Capacity *CapacityBody `json:"capacity,omitempty"` + IO *IOBody `json:"io_utilization,omitempty"` +} + +// CapacityBody ... +type CapacityBody struct { + Enabled bool `json:"enabled"` + FreeSpaceRemainingPercent int `json:"free_space_remaining_percent,omitempty"` + FreeSpaceLessThanPercent int `json:"free_space_less_than_percent,omitempty"` +} + +// IOBody ... +type IOBody struct { + Enabled bool `json:"enabled"` + AbovePercent int `json:"above_percent,omitempty"` + OverPeriod string `json:"over_period,omitempty"` +} + +// AutoscalingGetGroup ... +type AutoscalingGetGroup struct { + Autoscaling AutoscalingGet `json:"autoscaling,omitempty"` +} + +// AutoscalingGet ... +type AutoscalingGet struct { + Memory ASGGet `json:"memory,omitempty"` + CPU ASGGet `json:"cpu,omitempty"` + Disk ASGGet `json:"disk,omitempty"` +} + +// ASGGet ... +type ASGGet struct { + Scalers ScalersBody `json:"scalers,omitempty"` + Rate Rate `json:"rate,omitempty"` +} + +// Rate ... +type Rate struct { + IncreasePercent json.Number `json:"increase_percent,omitempty"` + PeriodSeconds int `json:"period_seconds,omitempty"` + LimitCountPerMember int `json:"limit_count_per_member,omitempty"` + LimitMBPerMember json.Number `json:"limit_mb_per_member,omitempty"` + Units string `json:"units,omitempty"` +} + +type autoScaling struct { + client *client.Client +} + +// AutoScaling ... +type AutoScaling interface { + GetAutoScaling(icdID string, groupID string) (AutoscalingGetGroup, error) + SetAutoScaling(icdID string, groupID string, AutoScaleReq AutoscalingSetGroup) (Task, error) +} + +func newAutoScalingAPI(c *client.Client) AutoScaling { + return &autoScaling{ + client: c, + } +} +func (r *autoScaling) GetAutoScaling(icdID string, groupID string) (AutoscalingGetGroup, error) { + autoscalingGroupResult := AutoscalingGetGroup{} + rawURL := fmt.Sprintf("/v4/ibm/deployments/%s/groups/%s/autoscaling", utils.EscapeUrlParm(icdID), groupID) + _, err := r.client.Get(rawURL, &autoscalingGroupResult) + if err != nil { + return autoscalingGroupResult, err + } + return autoscalingGroupResult, nil +} +func (r *autoScaling) SetAutoScaling(icdID string, groupID string, AutoScaleReq AutoscalingSetGroup) (Task, error) { + taskResult := TaskResult{} + rawURL := fmt.Sprintf("/v4/ibm/deployments/%s/groups/%s/autoscaling", utils.EscapeUrlParm(icdID), groupID) + _, err := r.client.Patch(rawURL, &AutoScaleReq, &taskResult) + if err != nil { + return taskResult.Task, err + } + return taskResult.Task, nil +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/icd/icdv4/cdb.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/icd/icdv4/cdb.go new file mode 100644 index 00000000000..293dd7d3a14 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/icd/icdv4/cdb.go @@ -0,0 +1,51 @@ +package icdv4 + +import ( + "fmt" + + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/utils" +) + +type CdbResult struct { + Cdb Cdb `json:"deployment"` +} + +type Cdb struct { + Id string `json:"id"` + Name string `json:"name"` + Type string `json:"type"` + PlatformOptions PlatformOptions `json:"platform_options"` + Version string `json:"version"` + AdminUser string `json:"admin_username"` +} + +type PlatformOptions struct { + KeyProtectKey string `json:"key_protect_key_id"` + DiskENcryptionKeyCrn string `json:"disk_encryption_key_crn"` + BackUpEncryptionKeyCrn string `json:"backup_encryption_key_crn"` +} + +type Cdbs interface { + GetCdb(icdId string) (Cdb, error) +} + +type cdbs struct { + client *client.Client +} + +func newCdbAPI(c *client.Client) Cdbs { + return &cdbs{ + client: c, + } +} + +func (r *cdbs) GetCdb(icdId string) (Cdb, error) { + cdbResult := CdbResult{} + rawURL := fmt.Sprintf("/v4/ibm/deployments/%s", utils.EscapeUrlParm(icdId)) + _, err := r.client.Get(rawURL, &cdbResult) + if err != nil { + return cdbResult.Cdb, err + } + return cdbResult.Cdb, nil +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/icd/icdv4/connection.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/icd/icdv4/connection.go new file mode 100644 index 00000000000..5f3f7dfdbaa --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/icd/icdv4/connection.go @@ -0,0 +1,116 @@ +package icdv4 + +import ( + "fmt" + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/utils" +) + +type ConnectionReq struct { + Password string `json:"password,omitempty"` + CertificateRoot string `json:"certificate_root,omitempty"` +} + +type ConnectionRes struct { + Connection Connection `json:"connection"` +} + +type Connection struct { + Rediss Uri `json:"rediss"` + Grpc Uri `json:"grpc"` + Postgres Uri `json:"postgres"` + Https Uri `json:"https"` + Amqps Uri `json:"amqps"` + Cli CliConn `json:"cli"` + Mongo Uri `json:"mongodb"` + Secure CassandraUri `json:"secure"` +} + +type CassandraUri struct { + Hosts []struct { + HostName string `json:"hostname"` + Port int `json:"port"` + } `json:"hosts"` + Authentication struct { + Method string `json:"method"` + UserName string `json:"username"` + Password string `json:"password"` + } + Bundle struct { + Name string `json:"name"` + BundleBase64 string `json:"bundle_base64"` + } `json:"bundle"` +} + +type Uri struct { + Type string `json:"type"` + Composed []string `json:"composed"` + Scheme string `json:"scheme"` + Hosts []struct { + HostName string `json:"hostname"` + Port int `json:"port"` + } `json:"hosts"` + Path string `json:"path"` + QueryOptions interface{} `json:"query_options"` + Authentication struct { + Method string `json:"method"` + UserName string `json:"username"` + Password string `json:"password"` + } + Certificate struct { + Name string `json:"name"` + CertificateBase64 string `json:"certificate_base64"` + } `json:"certificate"` + Database interface{} `json:"database"` +} + +type CliConn struct { + Type string `json:"type"` + Composed []string `json:"composed"` + Environment interface{} `json:"environment"` + Bin string `json:"bin"` + Arguments [][]string `json:"arguments"` + Certificate struct { + Name string `json:"name"` + CertificateBase64 string `json:"certificate_base64"` + } `json:"certificate"` +} + +type Connections interface { + GetConnection(icdId string, userId string, endpoint ...string) (Connection, error) + GetConnectionSubstitution(icdId string, userID string, connectionReq ConnectionReq) (Connection, error) +} + +type connections struct { + client *client.Client +} + +func newConnectionAPI(c *client.Client) Connections { + return &connections{ + client: c, + } +} + +func (r *connections) GetConnection(icdId string, userId string, endpoint ...string) (Connection, error) { + connectionRes := ConnectionRes{} + connectionEndpoint := "public" + if len(endpoint) > 0 { + connectionEndpoint = endpoint[0] + } + rawURL := fmt.Sprintf("/v4/ibm/deployments/%s/users/%s/connections/%s", utils.EscapeUrlParm(icdId), userId, connectionEndpoint) + _, err := r.client.Get(rawURL, &connectionRes) + if err != nil { + return connectionRes.Connection, err + } + return connectionRes.Connection, nil +} + +func (r *connections) GetConnectionSubstitution(icdId string, userID string, connectionReq ConnectionReq) (Connection, error) { + connectionResSub := ConnectionRes{} + rawURL := fmt.Sprintf("/v4/ibm/deployments/%s/connections", utils.EscapeUrlParm(icdId)) + _, err := r.client.Post(rawURL, &connectionReq, &connectionResSub) + if err != nil { + return connectionResSub.Connection, err + } + return connectionResSub.Connection, nil +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/icd/icdv4/scaling.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/icd/icdv4/scaling.go new file mode 100644 index 00000000000..97dcee4eaa5 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/icd/icdv4/scaling.go @@ -0,0 +1,132 @@ +package icdv4 + +import ( + "fmt" + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/utils" +) + +type GroupList struct { + Groups []Group `json:"groups"` +} + +type Group struct { + Id string `json:"id"` + Count int `json:"count"` + Members Members `json:"members"` + Memory Memory `json:"memory"` + Cpu Cpu `json:"cpu"` + Disk Disk `json:"disk"` +} + +type Members struct { + Units string `json:"units"` + AllocationCount int `json:"allocation_count"` + MinimumCount int `json:"minimum_count"` + MaximumCount int `json:"maximum_count"` + StepSizeCount int `json:"step_size_count"` + IsAdjustable bool `json:"is_adjustable"` + CanScaleDown bool `json:"can_scale_down"` +} + +type Memory struct { + Units string `json:"units"` + AllocationMb int `json:"allocation_mb"` + MinimumMb int `json:"minimum_mb"` + MaximumMb int `json:"maximum_mb"` + StepSizeMb int `json:"step_size_mb"` + IsAdjustable bool `json:"is_adjustable"` + CanScaleDown bool `json:"can_scale_down"` +} + +type Cpu struct { + Units string `json:"units"` + AllocationCount int `json:"allocation_count"` + MinimumCount int `json:"minimum_count"` + MaximumCount int `json:"maximum_count"` + StepSizeCount int `json:"step_size_count"` + IsAdjustable bool `json:"is_adjustable"` + CanScaleDown bool `json:"can_scale_down"` +} + +type Disk struct { + Units string `json:"units"` + AllocationMb int `json:"allocation_mb"` + MinimumMb int `json:"minimum_mb"` + MaximumMb int `json:"maximum_mb"` + StepSizeMb int `json:"step_size_mb"` + IsAdjustable bool `json:"is_adjustable"` + CanScaleDown bool `json:"can_scale_down"` +} + +type GroupReq struct { + GroupBdy GroupBdy `json:"group"` +} + +type GroupBdy struct { + Members *MembersReq `json:"members,omitempty"` + Memory *MemoryReq `json:"memory,omitempty"` + Cpu *CpuReq `json:"cpu,omitempty"` + Disk *DiskReq `json:"disk,omitempty"` +} + +type MembersReq struct { + AllocationCount int `json:"allocation_count,omitempty"` +} +type MemoryReq struct { + AllocationMb int `json:"allocation_mb,omitempty"` +} +type CpuReq struct { + AllocationCount int `json:"allocation_count,omitempty"` +} +type DiskReq struct { + AllocationMb int `json:"allocation_mb,omitempty"` +} + +type Groups interface { + GetDefaultGroups(groupType string) (GroupList, error) + GetGroups(icdId string) (GroupList, error) + UpdateGroup(icdId string, groupId string, groupReq GroupReq) (Task, error) +} + +type groups struct { + client *client.Client +} + +func newGroupAPI(c *client.Client) Groups { + return &groups{ + client: c, + } +} + +func (r *groups) GetDefaultGroups(groupType string) (GroupList, error) { + groupList := GroupList{} + rawURL := fmt.Sprintf("/v4/ibm/deployables/%s/groups", groupType) + _, err := r.client.Get(rawURL, &groupList) + if err != nil { + return groupList, err + } + return groupList, nil +} + +func (r *groups) GetGroups(icdId string) (GroupList, error) { + groupList := GroupList{} + rawURL := fmt.Sprintf("/v4/ibm/deployments/%s/groups", utils.EscapeUrlParm(icdId)) + _, err := r.client.Get(rawURL, &groupList) + if err != nil { + return groupList, err + } + return groupList, nil +} + +func (r *groups) UpdateGroup(icdId string, groupId string, groupReq GroupReq) (Task, error) { + taskResult := TaskResult{} + rawURL := fmt.Sprintf("/v4/ibm/deployments/%s/groups/%s", utils.EscapeUrlParm(icdId), groupId) + _, err := r.client.Patch(rawURL, &groupReq, &taskResult) + if err != nil { + return taskResult.Task, err + } + return taskResult.Task, nil +} + + diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/icd/icdv4/task.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/icd/icdv4/task.go new file mode 100644 index 00000000000..2005d1da448 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/icd/icdv4/task.go @@ -0,0 +1,45 @@ +package icdv4 + +import ( + "fmt" + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/utils" +) + +// type TaskResult struct { +// Task Task `json:"task"` +// } + +// type Task struct { +// Id string `json:"id"` +// Description string `json:"description"` +// Status string `json:"status"` +// DeploymentId string `json:"deployment_id"` +// ProgressPercent int `json:"progress_percent"` +// CreatedAt string `json:"created_at"` + +// } + +type Tasks interface { + GetTask(taskId string) (Task, error) +} + +type tasks struct { + client *client.Client +} + +func newTaskAPI(c *client.Client) Tasks { + return &tasks{ + client: c, + } +} + +func (r *tasks) GetTask(taskId string) (Task, error) { + taskResult := TaskResult{} + rawURL := fmt.Sprintf("/v4/ibm/tasks/%s", utils.EscapeUrlParm(taskId)) + _, err := r.client.Get(rawURL, &taskResult) + if err != nil { + return taskResult.Task, err + } + return taskResult.Task, nil +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/icd/icdv4/user.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/icd/icdv4/user.go new file mode 100644 index 00000000000..7f699e9e628 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/icd/icdv4/user.go @@ -0,0 +1,75 @@ +package icdv4 + +import ( + "fmt" + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/utils" +) + +type UserReq struct { + User User `json:"user"` +} + +type User struct { + UserName string `json:"username,omitempty"` + Password string `json:"password,omitempty"` +} + +type TaskResult struct { + Task Task `json:"task"` +} + +type Task struct { + Id string `json:"id"` + Description string `json:"description"` + Status string `json:"status"` + DeploymentId string `json:"deployment_id"` + ProgressPercent int `json:"progress_percent"` + CreatedAt string `json:"created_at"` +} + +type Users interface { + CreateUser(icdId string, userReq UserReq) (Task, error) + UpdateUser(icdId string, userName string, userReq UserReq) (Task, error) + DeleteUser(icdId string, userName string) (Task, error) +} + +type users struct { + client *client.Client +} + +func newUsersAPI(c *client.Client) Users { + return &users{ + client: c, + } +} + +func (r *users) CreateUser(icdId string, userReq UserReq) (Task, error) { + taskResult := TaskResult{} + rawURL := fmt.Sprintf("/v4/ibm/deployments/%s/users", utils.EscapeUrlParm(icdId)) + _, err := r.client.Post(rawURL, &userReq, &taskResult) + if err != nil { + return taskResult.Task, err + } + return taskResult.Task, nil +} + +func (r *users) UpdateUser(icdId string, userName string, userReq UserReq) (Task, error) { + taskResult := TaskResult{} + rawURL := fmt.Sprintf("/v4/ibm/deployments/%s/users/%s", utils.EscapeUrlParm(icdId), userName) + _, err := r.client.Patch(rawURL, &userReq, &taskResult) + if err != nil { + return taskResult.Task, err + } + return taskResult.Task, nil +} + +func (r *users) DeleteUser(icdId string, userName string) (Task, error) { + taskResult := TaskResult{} + rawURL := fmt.Sprintf("/v4/ibm/deployments/%s/users/%s", utils.EscapeUrlParm(icdId), userName) + _, err := r.client.DeleteWithResp(rawURL, &taskResult) + if err != nil { + return taskResult.Task, err + } + return taskResult.Task, nil +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/icd/icdv4/whitelist.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/icd/icdv4/whitelist.go new file mode 100644 index 00000000000..84db6902ec2 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/icd/icdv4/whitelist.go @@ -0,0 +1,66 @@ +package icdv4 + +import ( + "fmt" + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/utils" +) + +type Whitelist struct { + WhitelistEntrys []WhitelistEntry `json:"ip_addresses"` +} + +type WhitelistEntry struct { + Address string `json:"address,omitempty"` + Description string `json:"description,omitempty"` +} + +type WhitelistReq struct { + WhitelistEntry WhitelistEntry `json:"ip_address"` +} + +type Whitelists interface { + CreateWhitelist(icdId string, whitelistReq WhitelistReq) (Task, error) + GetWhitelist(icdId string) (Whitelist, error) + DeleteWhitelist(icdId string, ipAddress string) (Task, error) +} + +type whitelists struct { + client *client.Client +} + +func newWhitelistAPI(c *client.Client) Whitelists { + return &whitelists{ + client: c, + } +} + +func (r *whitelists) CreateWhitelist(icdId string, whitelistReq WhitelistReq) (Task, error) { + taskResult := TaskResult{} + rawURL := fmt.Sprintf("/v4/ibm/deployments/%s/whitelists/ip_addresses", utils.EscapeUrlParm(icdId)) + _, err := r.client.Post(rawURL, &whitelistReq, &taskResult) + if err != nil { + return taskResult.Task, err + } + return taskResult.Task, nil +} + +func (r *whitelists) GetWhitelist(icdId string) (Whitelist, error) { + whitelist := Whitelist{} + rawURL := fmt.Sprintf("/v4/ibm/deployments/%s/whitelists/ip_addresses", utils.EscapeUrlParm(icdId)) + _, err := r.client.Get(rawURL, &whitelist) + if err != nil { + return whitelist, err + } + return whitelist, nil +} + +func (r *whitelists) DeleteWhitelist(icdId string, ipAddress string) (Task, error) { + taskResult := TaskResult{} + rawURL := fmt.Sprintf("/v4/ibm/deployments/%s/whitelists/ip_addresses/%s", utils.EscapeUrlParm(icdId), utils.EscapeUrlParm(ipAddress)) + _, err := r.client.DeleteWithResp(rawURL, &taskResult) + if err != nil { + return taskResult.Task, err + } + return taskResult.Task, nil +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/mccp/mccpv2/api_service.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/mccp/mccpv2/api_service.go new file mode 100644 index 00000000000..c4944ea1a5b --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/mccp/mccpv2/api_service.go @@ -0,0 +1,151 @@ +package mccpv2 + +import ( + gohttp "net/http" + + bluemix "github.com/IBM-Cloud/bluemix-go" + "github.com/IBM-Cloud/bluemix-go/authentication" + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/http" + "github.com/IBM-Cloud/bluemix-go/rest" + "github.com/IBM-Cloud/bluemix-go/session" +) + +//ErrCodeAPICreation ... +const ErrCodeAPICreation = "APICreationError" + +//MccpServiceAPI is the mccpv2 client ... +type MccpServiceAPI interface { + Organizations() Organizations + Spaces() Spaces + ServiceInstances() ServiceInstances + ServiceKeys() ServiceKeys + ServicePlans() ServicePlans + ServiceOfferings() ServiceOfferings + SpaceQuotas() SpaceQuotas + OrgQuotas() OrgQuotas + Apps() Apps + Routes() Routes + SharedDomains() SharedDomains + PrivateDomains() PrivateDomains + ServiceBindings() ServiceBindings + Regions() RegionRepository +} + +//MccpService holds the client +type mccpService struct { + *client.Client +} + +//New ... +func New(sess *session.Session) (MccpServiceAPI, error) { + config := sess.Config.Copy() + err := config.ValidateConfigForService(bluemix.MccpService) + if err != nil { + return nil, err + } + if config.HTTPClient == nil { + config.HTTPClient = http.NewHTTPClient(config) + } + tokenRefreher, err := authentication.NewUAARepository(config, &rest.Client{ + DefaultHeader: gohttp.Header{ + "User-Agent": []string{http.UserAgent()}, + }, + HTTPClient: config.HTTPClient, + }) + if err != nil { + return nil, err + } + if config.UAAAccessToken == "" || config.UAARefreshToken == "" { + err := authentication.PopulateTokens(tokenRefreher, config) + if err != nil { + return nil, err + } + } + if config.Endpoint == nil { + ep, err := config.EndpointLocator.MCCPAPIEndpoint() + if err != nil { + return nil, err + } + config.Endpoint = &ep + } + + return &mccpService{ + Client: client.New(config, bluemix.MccpService, tokenRefreher), + }, nil +} + +//Organizations implements Organizations APIs +func (c *mccpService) Organizations() Organizations { + return newOrganizationAPI(c.Client) +} + +//Spaces implements Spaces APIs +func (c *mccpService) Spaces() Spaces { + return newSpacesAPI(c.Client) +} + +//ServicePlans implements ServicePlans APIs +func (c *mccpService) ServicePlans() ServicePlans { + return newServicePlanAPI(c.Client) +} + +//ServiceOfferings implements ServiceOfferings APIs +func (c *mccpService) ServiceOfferings() ServiceOfferings { + return newServiceOfferingAPI(c.Client) +} + +//ServiceInstances implements ServiceInstances APIs +func (c *mccpService) ServiceInstances() ServiceInstances { + return newServiceInstanceAPI(c.Client) +} + +//ServiceKeys implements ServiceKey APIs +func (c *mccpService) ServiceKeys() ServiceKeys { + return newServiceKeyAPI(c.Client) +} + +//SpaceQuotas implements SpaceQuota APIs +func (c *mccpService) SpaceQuotas() SpaceQuotas { + return newSpaceQuotasAPI(c.Client) +} + +//OrgQuotas implements OrgQuota APIs +func (c *mccpService) OrgQuotas() OrgQuotas { + return newOrgQuotasAPI(c.Client) +} + +//ServiceBindings implements ServiceBindings APIs +func (c *mccpService) ServiceBindings() ServiceBindings { + return newServiceBindingAPI(c.Client) +} + +//Apps implements Apps APIs + +func (c *mccpService) Apps() Apps { + return newAppAPI(c.Client) +} + +//Routes implements Route APIs + +func (c *mccpService) Routes() Routes { + return newRouteAPI(c.Client) +} + +//SharedDomains implements SharedDomian APIs + +func (c *mccpService) SharedDomains() SharedDomains { + return newSharedDomainAPI(c.Client) +} + +//PrivateDomains implements PrivateDomains APIs + +func (c *mccpService) PrivateDomains() PrivateDomains { + return newPrivateDomainAPI(c.Client) +} + +//Regions implements Regions APIs + +func (c *mccpService) Regions() RegionRepository { + return newRegionRepositoryAPI(c.Client) +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/mccp/mccpv2/apps.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/mccp/mccpv2/apps.go new file mode 100644 index 00000000000..4d2b3fd522a --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/mccp/mccpv2/apps.go @@ -0,0 +1,575 @@ +package mccpv2 + +import ( + "fmt" + "os" + "strconv" + "time" + + "github.com/IBM-Cloud/bluemix-go/bmxerror" + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/helpers" + "github.com/IBM-Cloud/bluemix-go/rest" + "github.com/IBM-Cloud/bluemix-go/trace" +) + +//AppState ... +type AppState struct { + PackageState string + InstanceState string +} + +const ( + //ErrCodeAppDoesnotExist ... + ErrCodeAppDoesnotExist = "AppADoesnotExist" + + //AppRunningState ... + AppRunningState = "RUNNING" + + //AppStartedState ... + AppStartedState = "STARTED" + + //AppStagedState ... + AppStagedState = "STAGED" + + //AppPendingState ... + AppPendingState = "PENDING" + + //AppStoppedState ... + AppStoppedState = "STOPPED" + + //AppFailedState ... + AppFailedState = "FAILED" + + //AppUnKnownState ... + AppUnKnownState = "UNKNOWN" + + //DefaultRetryDelayForStatusCheck ... + DefaultRetryDelayForStatusCheck = 10 * time.Second +) + +//AppRequest ... +type AppRequest struct { + Name *string `json:"name,omitempty"` + Memory int `json:"memory,omitempty"` + Instances int `json:"instances,omitempty"` + DiskQuota int `json:"disk_quota,omitempty"` + SpaceGUID *string `json:"space_guid,omitempty"` + StackGUID *string `json:"stack_guid,omitempty"` + State *string `json:"state,omitempty"` + DetectedStartCommand *string `json:"detected_start_command,omitempty"` + Command *string `json:"command,omitempty"` + BuildPack *string `json:"buildpack,omitempty"` + HealthCheckType *string `json:"health_check_type,omitempty"` + HealthCheckTimeout int `json:"health_check_timeout,omitempty"` + HealthCheckHTTPEndpoint *string `json:"health_check_http_endpoint,omitempty"` + Diego bool `json:"diego,omitempty"` + EnableSSH bool `json:"enable_ssh,omitempty"` + DockerImage *string `json:"docker_image,omitempty"` + StagingFailedReason *string `json:"staging_failed_reason,omitempty"` + StagingFailedDescription *string `json:"staging_failed_description,omitempty"` + Ports []int `json:"ports,omitempty"` + DockerCredentialsJSON *map[string]interface{} `json:"docker_credentials_json,omitempty"` + EnvironmentJSON *map[string]interface{} `json:"environment_json,omitempty"` +} + +//AppEntity ... +type AppEntity struct { + Name string `json:"name"` + SpaceGUID string `json:"space_guid"` + StackGUID string `json:"stack_guid"` + State string `json:"state"` + PackageState string `json:"package_state"` + Memory int `json:"memory"` + Instances int `json:"instances"` + DiskQuota int `json:"disk_quota"` + Version string `json:"version"` + BuildPack *string `json:"buildpack"` + Command *string `json:"command"` + Console bool `json:"console"` + Debug *string `json:"debug"` + StagingTaskID string `json:"staging_task_id"` + HealthCheckType string `json:"health_check_type"` + HealthCheckTimeout *int `json:"health_check_timeout"` + HealthCheckHTTPEndpoint string `json:"health_check_http_endpoint"` + StagingFailedReason string `json:"staging_failed_reason"` + StagingFailedDescription string `json:"staging_failed_description"` + Diego bool `json:"diego"` + DockerImage *string `json:"docker_image"` + EnableSSH bool `json:"enable_ssh"` + Ports []int `json:"ports"` + DockerCredentialsJSON map[string]interface{} `json:"docker_credentials_json"` + EnvironmentJSON map[string]interface{} `json:"environment_json"` +} + +//AppResource ... +type AppResource struct { + Resource + Entity AppEntity +} + +//AppFields ... +type AppFields struct { + Metadata Metadata + Entity AppEntity +} + +//UploadBitsEntity ... +type UploadBitsEntity struct { + GUID string `json:"guid"` + Status string `json:"status"` +} + +//UploadBitFields ... +type UploadBitFields struct { + Metadata Metadata + Entity UploadBitsEntity +} + +//AppSummaryFields ... +type AppSummaryFields struct { + GUID string `json:"guid"` + Name string `json:"name"` + State string `json:"state"` + PackageState string `json:"package_state"` + RunningInstances int `json:"running_instances"` +} + +//AppStats ... +type AppStats struct { + State string `json:"state"` +} + +//ToFields .. +func (resource AppResource) ToFields() App { + entity := resource.Entity + + return App{ + GUID: resource.Metadata.GUID, + Name: entity.Name, + SpaceGUID: entity.SpaceGUID, + StackGUID: entity.StackGUID, + State: entity.State, + PackageState: entity.PackageState, + Memory: entity.Memory, + Instances: entity.Instances, + DiskQuota: entity.DiskQuota, + Version: entity.Version, + BuildPack: entity.BuildPack, + Command: entity.Command, + Console: entity.Console, + Debug: entity.Debug, + StagingTaskID: entity.StagingTaskID, + HealthCheckType: entity.HealthCheckType, + HealthCheckTimeout: entity.HealthCheckTimeout, + HealthCheckHTTPEndpoint: entity.HealthCheckHTTPEndpoint, + Diego: entity.Diego, + DockerImage: entity.DockerImage, + EnableSSH: entity.EnableSSH, + Ports: entity.Ports, + DockerCredentialsJSON: entity.DockerCredentialsJSON, + EnvironmentJSON: entity.EnvironmentJSON, + } +} + +//App model +type App struct { + Name string + SpaceGUID string + GUID string + StackGUID string + State string + PackageState string + Memory int + Instances int + DiskQuota int + Version string + BuildPack *string + Command *string + Console bool + Debug *string + StagingTaskID string + HealthCheckType string + HealthCheckTimeout *int + HealthCheckHTTPEndpoint string + Diego bool + DockerImage *string + EnableSSH bool + Ports []int + DockerCredentialsJSON map[string]interface{} + EnvironmentJSON map[string]interface{} +} + +//Apps ... +type Apps interface { + Create(appPayload AppRequest, opts ...bool) (*AppFields, error) + List() ([]App, error) + Get(appGUID string) (*AppFields, error) + Update(appGUID string, appPayload AppRequest, opts ...bool) (*AppFields, error) + Delete(appGUID string, opts ...bool) error + FindByName(spaceGUID, name string) (*App, error) + Start(appGUID string, timeout time.Duration) (*AppState, error) + Upload(path string, name string, opts ...bool) (*UploadBitFields, error) + Summary(appGUID string) (*AppSummaryFields, error) + Stat(appGUID string) (map[string]AppStats, error) + WaitForAppStatus(waitForThisState, appGUID string, timeout time.Duration) (string, error) + WaitForInstanceStatus(waitForThisState, appGUID string, timeout time.Duration) (string, error) + Instances(appGUID string) (map[string]AppStats, error) + Restage(appGUID string, timeout time.Duration) (*AppState, error) + WaitForStatus(appGUID string, maxWaitTime time.Duration) (*AppState, error) + + //Routes related + BindRoute(appGUID, routeGUID string) (*AppFields, error) + ListRoutes(appGUID string) ([]Route, error) + UnBindRoute(appGUID, routeGUID string) error + + //Service bindings + ListServiceBindings(appGUID string) ([]ServiceBinding, error) + DeleteServiceBindings(appGUID string, bindingGUIDs ...string) error +} + +type app struct { + client *client.Client +} + +func newAppAPI(c *client.Client) Apps { + return &app{ + client: c, + } +} + +func (r *app) FindByName(spaceGUID string, name string) (*App, error) { + rawURL := fmt.Sprintf("/v2/spaces/%s/apps", spaceGUID) + req := rest.GetRequest(rawURL).Query("q", "name:"+name) + httpReq, err := req.Build() + if err != nil { + return nil, err + } + path := httpReq.URL.String() + apps, err := r.listAppWithPath(path) + if err != nil { + return nil, err + } + if len(apps) == 0 { + return nil, bmxerror.New(ErrCodeAppDoesnotExist, + fmt.Sprintf("Given app: %q doesn't exist in given space: %q", name, spaceGUID)) + + } + return &apps[0], nil +} + +// opts is list of boolean parametes +// opts[0] - async - Will run the create request in a background job. Recommended: 'true'. Default to 'true'. +func (r *app) Create(appPayload AppRequest, opts ...bool) (*AppFields, error) { + async := true + if len(opts) > 0 { + async = opts[0] + } + rawURL := fmt.Sprintf("/v2/apps?async=%t", async) + appFields := AppFields{} + _, err := r.client.Post(rawURL, appPayload, &appFields) + if err != nil { + return nil, err + } + return &appFields, nil +} + +func (r *app) BindRoute(appGUID, routeGUID string) (*AppFields, error) { + rawURL := fmt.Sprintf("/v2/apps/%s/routes/%s", appGUID, routeGUID) + appFields := AppFields{} + _, err := r.client.Put(rawURL, nil, &appFields) + if err != nil { + return nil, err + } + return &appFields, nil +} + +func (r *app) ListRoutes(appGUID string) ([]Route, error) { + rawURL := fmt.Sprintf("/v2/apps/%s/routes", appGUID) + req := rest.GetRequest(rawURL) + httpReq, err := req.Build() + if err != nil { + return nil, err + } + path := httpReq.URL.String() + route, err := listRouteWithPath(r.client, path) + if err != nil { + return nil, err + } + return route, nil +} + +func (r *app) UnBindRoute(appGUID, routeGUID string) error { + rawURL := fmt.Sprintf("/v2/apps/%s/routes/%s", appGUID, routeGUID) + _, err := r.client.Delete(rawURL) + return err +} + +func (r *app) DeleteServiceBindings(appGUID string, sbGUIDs ...string) error { + for _, g := range sbGUIDs { + rawURL := fmt.Sprintf("/v2/apps/%s/service_bindings/%s", appGUID, g) + _, err := r.client.Delete(rawURL) + return err + } + return nil +} + +func (r *app) listAppWithPath(path string) ([]App, error) { + var apps []App + _, err := r.client.GetPaginated(path, NewCCPaginatedResources(AppResource{}), func(resource interface{}) bool { + if appResource, ok := resource.(AppResource); ok { + apps = append(apps, appResource.ToFields()) + return true + } + return false + }) + return apps, err +} + +// opts is list of boolean parametes +// opts[0] - async - If true, a new asynchronous job is submitted to persist the bits and the job id is included in the response. +// The client will need to poll the job's status until persistence is completed successfully. +// If false, the request will block until the bits are persisted synchronously. Defaults to 'false'. + +func (r *app) Upload(appGUID string, zipPath string, opts ...bool) (*UploadBitFields, error) { + async := false + if len(opts) > 0 { + async = opts[0] + } + req := rest.PutRequest(r.client.URL("/v2/apps/"+appGUID+"/bits")).Query("async", strconv.FormatBool(async)) + file, err := os.Open(zipPath) + if err != nil { + return nil, err + } + defer file.Close() + + f := rest.File{ + Name: file.Name(), + Content: file, + } + req.File("application", f) + req.Field("resources", "[]") + uploadBitResponse := &UploadBitFields{} + _, err = r.client.SendRequest(req, uploadBitResponse) + return uploadBitResponse, err +} + +func (r *app) Start(appGUID string, maxWaitTime time.Duration) (*AppState, error) { + payload := AppRequest{ + State: helpers.String(AppStartedState), + } + rawURL := fmt.Sprintf("/v2/apps/%s", appGUID) + appFields := AppFields{} + _, err := r.client.Put(rawURL, payload, &appFields) + if err != nil { + return nil, err + } + appState := &AppState{ + PackageState: AppPendingState, + InstanceState: AppUnKnownState, + } + if maxWaitTime == 0 { + appState.PackageState = appFields.Entity.PackageState + appState.InstanceState = appFields.Entity.State + return appState, nil + } + return r.WaitForStatus(appGUID, maxWaitTime) + +} + +func (r *app) Get(appGUID string) (*AppFields, error) { + rawURL := fmt.Sprintf("/v2/apps/%s", appGUID) + appFields := AppFields{} + _, err := r.client.Get(rawURL, &appFields, nil) + if err != nil { + return nil, err + } + return &appFields, nil +} + +func (r *app) Summary(appGUID string) (*AppSummaryFields, error) { + rawURL := fmt.Sprintf("/v2/apps/%s/summary", appGUID) + appFields := AppSummaryFields{} + _, err := r.client.Get(rawURL, &appFields, nil) + if err != nil { + return nil, err + } + return &appFields, nil +} + +func (r *app) Stat(appGUID string) (map[string]AppStats, error) { + rawURL := fmt.Sprintf("/v2/apps/%s/stats", appGUID) + appStats := map[string]AppStats{} + _, err := r.client.Get(rawURL, &appStats, nil) + if err != nil { + return nil, err + } + return appStats, nil +} + +func (r *app) Instances(appGUID string) (map[string]AppStats, error) { + + rawURL := fmt.Sprintf("/v2/apps/%s/instances", appGUID) + appInstances := map[string]AppStats{} + _, err := r.client.Get(rawURL, &appInstances, nil) + if err != nil { + return nil, err + } + return appInstances, nil +} + +func (r *app) List() ([]App, error) { + rawURL := "v2/apps" + req := rest.GetRequest(rawURL) + httpReq, err := req.Build() + if err != nil { + return nil, err + } + path := httpReq.URL.String() + apps, err := r.listAppWithPath(path) + if err != nil { + return nil, err + } + return apps, nil + +} + +// opts is list of boolean parametes +// opts[0] - async - Will run the update request in a background job. Recommended: 'true'. Default to 'true'. +func (r *app) Update(appGUID string, appPayload AppRequest, opts ...bool) (*AppFields, error) { + async := true + if len(opts) > 0 { + async = opts[0] + } + rawURL := fmt.Sprintf("/v2/apps/%s?async=%t", appGUID, async) + appFields := AppFields{} + _, err := r.client.Put(rawURL, appPayload, &appFields) + if err != nil { + return nil, err + } + return &appFields, nil +} + +// opts is list of boolean parametes +// opts[0] - async - Will run the delete request in a background job. Recommended: 'true'. Default to 'true'. +// opts[1] - recursive - Will delete service bindings, and routes associated with the app. Default to 'false'. +func (r *app) Delete(appGUID string, opts ...bool) error { + async := true + recursive := false + if len(opts) > 0 { + async = opts[0] + } + if len(opts) > 1 { + recursive = opts[1] + } + rawURL := fmt.Sprintf("/v2/apps/%s?async=%t&recursive=%t", appGUID, async, recursive) + _, err := r.client.Delete(rawURL) + return err +} + +func (r *app) Restage(appGUID string, maxWaitTime time.Duration) (*AppState, error) { + rawURL := fmt.Sprintf("/v2/apps/%s/restage", appGUID) + appFields := AppFields{} + _, err := r.client.Post(rawURL, nil, &appFields) + if err != nil { + return nil, err + } + appState := &AppState{ + PackageState: AppPendingState, + InstanceState: AppUnKnownState, + } + if maxWaitTime == 0 { + appState.PackageState = appFields.Entity.PackageState + appState.InstanceState = appFields.Entity.State + return appState, nil + } + return r.WaitForStatus(appGUID, maxWaitTime) + +} + +func (r *app) WaitForAppStatus(waitForThisState, appGUID string, maxWaitTime time.Duration) (string, error) { + timeout := time.After(maxWaitTime) + tick := time.NewTicker(DefaultRetryDelayForStatusCheck) + defer tick.Stop() + status := AppPendingState + for { + select { + case <-timeout: + trace.Logger.Printf("Timed out while checking the app status for %q. Waited for %q for the state to be %q", appGUID, maxWaitTime, waitForThisState) + return status, nil + case <-tick.C: + appFields, err := r.Get(appGUID) + if err != nil { + return "", err + } + status = appFields.Entity.PackageState + trace.Logger.Println("apps.Entity.PackageState ===>>> ", status) + if status == waitForThisState || status == AppFailedState { + return status, nil + } + } + } +} + +func (r *app) WaitForInstanceStatus(waitForThisState, appGUID string, maxWaitTime time.Duration) (string, error) { + timeout := time.After(maxWaitTime) + tick := time.NewTicker(DefaultRetryDelayForStatusCheck) + defer tick.Stop() + status := AppStartedState + for { + select { + case <-timeout: + trace.Logger.Printf("Timed out while checking the app status for %q. Waited for %q for the state to be %q", appGUID, maxWaitTime, waitForThisState) + return status, nil + case <-tick.C: + appStat, err := r.Stat(appGUID) + if err != nil { + return status, err + } + stateCount := 0 + for k, v := range appStat { + fmt.Printf("Instance[%s] State is %s", k, v) + if v.State == waitForThisState { + stateCount++ + } + } + if stateCount == len(appStat) { + return waitForThisState, nil + } + + } + } + +} + +func (r *app) WaitForStatus(appGUID string, maxWaitTime time.Duration) (*AppState, error) { + appState := &AppState{ + PackageState: AppPendingState, + InstanceState: AppUnKnownState, + } + status, err := r.WaitForAppStatus(AppStagedState, appGUID, maxWaitTime/2) + appState.PackageState = status + if err != nil || status == AppFailedState { + return appState, err + } + status, err = r.WaitForInstanceStatus(AppRunningState, appGUID, maxWaitTime/2) + appState.InstanceState = status + return appState, err +} + +//TODO pull the wait logic in a auxiliary function which can be used by all + +func (r *app) ListServiceBindings(appGUID string) ([]ServiceBinding, error) { + rawURL := fmt.Sprintf("/v2/apps/%s/service_bindings", appGUID) + req := rest.GetRequest(rawURL) + httpReq, err := req.Build() + if err != nil { + return nil, err + } + path := httpReq.URL.String() + sb, err := listServiceBindingWithPath(r.client, path) + if err != nil { + return nil, err + } + return sb, nil +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/mccp/mccpv2/filter.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/mccp/mccpv2/filter.go new file mode 100644 index 00000000000..d63838d93db --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/mccp/mccpv2/filter.go @@ -0,0 +1,89 @@ +package mccpv2 + +import ( + "errors" + "fmt" + "strings" +) + +var ( + //ErrFilterNameMissing ... + ErrFilterNameMissing = errors.New("Filter must have a name") + + //ErrFilterMissingOp .. + ErrFilterMissingOp = errors.New("Filter must have an operator") +) + +//Filter ... +type Filter struct { + name string + op string + value string +} + +//Name ... +func (f Filter) Name(name string) Filter { + f.name = name + return f +} + +//Eq ... +func (f Filter) Eq(target string) Filter { + f.op = ":" + f.value = target + return f +} + +//In ... +func (f Filter) In(targets ...string) Filter { + f.op = " IN " + f.value = strings.Join(targets, ",") + return f +} + +//Ge ... +func (f Filter) Ge(target string) Filter { + f.op = ":" + f.value = target + return f +} + +//Le ... +func (f Filter) Le(target string) Filter { + f.op = "<=" + f.value = target + return f +} + +//Gt ... +func (f Filter) Gt(target string) Filter { + f.op = ">" + f.value = target + return f +} + +//Lt ... +func (f Filter) Lt(target string) Filter { + f.op = "<" + f.value = target + return f +} + +func (f Filter) validate() error { + if f.name == "" { + return ErrFilterNameMissing + } + if f.op == "" { + return ErrFilterMissingOp + } + return nil +} + +//Build ... +func (f Filter) Build() (string, error) { + err := f.validate() + if err != nil { + return "", nil + } + return fmt.Sprintf("%s%s%s;", f.name, f.op, f.value), nil +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/mccp/mccpv2/organization_quota.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/mccp/mccpv2/organization_quota.go new file mode 100644 index 00000000000..2aeeec97f77 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/mccp/mccpv2/organization_quota.go @@ -0,0 +1,166 @@ +package mccpv2 + +import ( + "encoding/json" + "fmt" + + "github.com/IBM-Cloud/bluemix-go/bmxerror" + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/rest" +) + +//OrgQuota ... +type OrgQuota struct { + GUID string + Name string + NonBasicServicesAllowed bool + ServicesLimit int + RoutesLimit int + MemoryLimitInMB int64 + InstanceMemoryLimitInMB int64 + TrialDBAllowed bool + AppInstanceLimit int + PrivateDomainsLimit int + AppTasksLimit int + ServiceKeysLimit int + RoutePortsLimit int +} + +//OrgQuotaFields ... +type OrgQuotaFields struct { + Metadata OrgQuotaMetadata + Entity OrgQuotaEntity +} + +//OrgQuotaMetadata ... +type OrgQuotaMetadata struct { + GUID string `json:"guid"` + URL string `json:"url"` +} + +//ErrCodeOrgQuotaDoesnotExist ... +const ErrCodeOrgQuotaDoesnotExist = "OrgQuotaDoesnotExist" + +//OrgQuotaResource ... +type OrgQuotaResource struct { + Resource + Entity OrgQuotaEntity +} + +//OrgQuotaEntity ... +type OrgQuotaEntity struct { + Name string `json:"name"` + NonBasicServicesAllowed bool `json:"non_basic_services_allowed"` + ServicesLimit int `json:"total_services"` + RoutesLimit int `json:"total_routes"` + MemoryLimitInMB int64 `json:"memory_limit"` + InstanceMemoryLimitInMB int64 `json:"instance_memory_limit"` + TrialDBAllowed bool `json:"trial_db_allowed"` + AppInstanceLimit json.Number `json:"app_instance_limit"` + PrivateDomainsLimit json.Number `json:"total_private_domains"` + AppTasksLimit json.Number `json:"app_tasks_limit"` + ServiceKeysLimit json.Number `json:"total_service_keys"` + RoutePortsLimit int `json:"total_reserved_route_ports"` +} + +//ToFields ... +func (resource OrgQuotaResource) ToFields() OrgQuota { + entity := resource.Entity + + return OrgQuota{ + GUID: resource.Metadata.GUID, + Name: entity.Name, + NonBasicServicesAllowed: entity.NonBasicServicesAllowed, + ServicesLimit: entity.ServicesLimit, + RoutesLimit: entity.RoutesLimit, + MemoryLimitInMB: entity.MemoryLimitInMB, + InstanceMemoryLimitInMB: entity.InstanceMemoryLimitInMB, + TrialDBAllowed: entity.TrialDBAllowed, + AppInstanceLimit: NumberToInt(entity.AppInstanceLimit, -1), + PrivateDomainsLimit: NumberToInt(entity.PrivateDomainsLimit, -1), + AppTasksLimit: NumberToInt(entity.AppTasksLimit, -1), + ServiceKeysLimit: NumberToInt(entity.ServiceKeysLimit, -1), + RoutePortsLimit: entity.RoutePortsLimit, + } +} + +//OrgQuotas ... +type OrgQuotas interface { + FindByName(name string) (*OrgQuota, error) + Get(orgQuotaGUID string) (*OrgQuotaFields, error) + List() ([]OrgQuota, error) +} + +type orgQuota struct { + client *client.Client +} + +func newOrgQuotasAPI(c *client.Client) OrgQuotas { + return &orgQuota{ + client: c, + } +} + +func (r *orgQuota) FindByName(name string) (*OrgQuota, error) { + rawURL := fmt.Sprintf("/v2/quota_definitions") + req := rest.GetRequest(rawURL).Query("q", "name:"+name) + + httpReq, err := req.Build() + if err != nil { + return nil, err + } + path := httpReq.URL.String() + + orgQuotas, err := r.listOrgQuotaWithPath(path) + if err != nil { + return nil, err + } + + if len(orgQuotas) == 0 { + return nil, bmxerror.New(ErrCodeAppDoesnotExist, + fmt.Sprintf("Given quota definition: %q doesn't exist", name)) + + } + return &orgQuotas[0], nil +} + +func (r *orgQuota) List() ([]OrgQuota, error) { + rawURL := fmt.Sprintf("/v2/quota_definitions") + req := rest.GetRequest(rawURL) + + httpReq, err := req.Build() + if err != nil { + return nil, err + } + path := httpReq.URL.String() + + orgQuotas, err := r.listOrgQuotaWithPath(path) + if err != nil { + return nil, err + } + + return orgQuotas, nil +} + +func (r *orgQuota) listOrgQuotaWithPath(path string) ([]OrgQuota, error) { + var orgQuota []OrgQuota + _, err := r.client.GetPaginated(path, NewCCPaginatedResources(OrgQuotaResource{}), func(resource interface{}) bool { + if orgQuotaResource, ok := resource.(OrgQuotaResource); ok { + orgQuota = append(orgQuota, orgQuotaResource.ToFields()) + return true + } + return false + }) + return orgQuota, err +} + +func (r *orgQuota) Get(quotaGUID string) (*OrgQuotaFields, error) { + rawURL := fmt.Sprintf("/v2/quota_definitions/%s", quotaGUID) + orgQuotaFields := OrgQuotaFields{} + _, err := r.client.Get(rawURL, &orgQuotaFields) + if err != nil { + return nil, err + } + + return &orgQuotaFields, err +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/mccp/mccpv2/organizations.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/mccp/mccpv2/organizations.go new file mode 100644 index 00000000000..0080e4e02a3 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/mccp/mccpv2/organizations.go @@ -0,0 +1,453 @@ +package mccpv2 + +import ( + "fmt" + "strings" + + "github.com/IBM-Cloud/bluemix-go/bmxerror" + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/rest" +) + +//ErrCodeOrgDoesnotExist ... +var ErrCodeOrgDoesnotExist = "OrgDoesnotExist" + +//Metadata ... +type Metadata struct { + GUID string `json:"guid"` + URL string `json:"url"` +} + +//Resource ... +type Resource struct { + Metadata Metadata +} + +//OrgResource ... +type OrgResource struct { + Resource + Entity OrgEntity +} + +//OrgEntity ... +type OrgEntity struct { + Name string `json:"name"` + Region string `json:"region"` + BillingEnabled bool `json:"billing_enabled"` + Status string `json:"status"` + OrgQuotaDefinitionGUID string `json:"quota_definition_guid"` +} + +//ToFields .. +func (resource OrgResource) ToFields() Organization { + entity := resource.Entity + + return Organization{ + GUID: resource.Metadata.GUID, + Name: entity.Name, + Region: entity.Region, + BillingEnabled: entity.BillingEnabled, + Status: entity.Status, + OrgQuotaDefinitionGUID: entity.OrgQuotaDefinitionGUID, + } +} + +//OrgCreateRequest ... +type OrgCreateRequest struct { + Name string `json:"name"` + OrgQuotaDefinitionGUID string `json:"quota_definition_guid,omitempty"` + Status string `json:"status,omitempty"` +} + +//OrgUpdateRequest ... +type OrgUpdateRequest struct { + Name *string `json:"name,omitempty"` + OrgQuotaDefinitionGUID string `json:"quota_definition_guid,omitempty"` +} + +//Organization model +type Organization struct { + GUID string + Name string + Region string + BillingEnabled bool + Status string + OrgQuotaDefinitionGUID string +} + +//OrganizationFields ... +type OrganizationFields struct { + Metadata Metadata + Entity OrgEntity +} + +//OrgRole ... +type OrgRole struct { + UserGUID string + Admin bool + UserName string +} + +//OrgRoleResource ... +type OrgRoleResource struct { + Resource + Entity OrgRoleEntity +} + +//OrgRoleEntity ... +type OrgRoleEntity struct { + UserGUID string `json:"guid"` + Admin bool `json:"bool"` + UserName string `json:"username"` +} + +//ToFields ... +func (resource *OrgRoleResource) ToFields() OrgRole { + entity := resource.Entity + + return OrgRole{ + UserGUID: resource.Metadata.GUID, + Admin: entity.Admin, + UserName: entity.UserName, + } +} + +// OrgRegionInformation is the region information associated with an org +type OrgRegionInformation struct { + ID string `json:"id"` + Domain string `json:"domain"` + Name string `json:"name"` + Region string `json:"region"` + DisplayName string `json:"display_name"` + Customer struct { + Name string `json:"name"` + DisplayName string `json:"display_name"` + } `json:"customer"` + Deployment struct { + Name string `json:"name"` + DisplayName string `json:"display_name"` + } `json:"deployment"` + Geo struct { + Name string `json:"name"` + DisplayName string `json:"display_name"` + } `json:"geo"` + Account struct { + GUID string `json:"guid"` + OwnerGUIDs []string `json:"owner_guids"` + } `json:"account"` + PublicRegionsByProximity []string `json:"public_regions_by_proximity"` + ConsoleURL string `json:"console_url"` + CFAPI string `json:"cf_api"` + MCCPAPI string `json:"mccp_api"` + Type string `json:"type"` + Home bool `json:"home"` + Stealth string `json:"stealth"` + Aliases []string `json:"aliases"` + Settings struct { + Devops struct { + Enabled bool `json:"enabled"` + } `json:"devops"` + EnhancedAutoFix bool `json:"enhancedAutofix"` + } `json:"settings"` + OrgName string `json:"org_name"` + OrgGUID string `json:"org_guid"` +} + +//Organizations ... +type Organizations interface { + Create(req OrgCreateRequest, opts ...bool) (*OrganizationFields, error) + Get(orgGUID string) (*OrganizationFields, error) + List(region string) ([]Organization, error) + FindByName(orgName, region string) (*Organization, error) + DeleteByRegion(guid string, region string, opts ...bool) error + Delete(guid string, opts ...bool) error + Update(guid string, req OrgUpdateRequest, opts ...bool) (*OrganizationFields, error) + GetRegionInformation(orgGUID string) ([]OrgRegionInformation, error) + + AssociateBillingManager(orgGUID string, userMail string) (*OrganizationFields, error) + AssociateAuditor(orgGUID string, userMail string) (*OrganizationFields, error) + AssociateManager(orgGUID string, userMail string) (*OrganizationFields, error) + AssociateUser(orgGUID string, userMail string) (*OrganizationFields, error) + + ListBillingManager(orgGUID string, filters ...string) ([]OrgRole, error) + ListAuditors(orgGUID string, filters ...string) ([]OrgRole, error) + ListManager(orgGUID string, filters ...string) ([]OrgRole, error) + ListUsers(orgGUID string, filters ...string) ([]OrgRole, error) + + DisassociateBillingManager(orgGUID string, userMail string) error + DisassociateManager(orgGUID string, userMail string) error + DisassociateAuditor(orgGUID string, userMail string) error + DisassociateUser(orgGUID string, userMail string) error +} + +type organization struct { + client *client.Client +} + +func newOrganizationAPI(c *client.Client) Organizations { + return &organization{ + client: c, + } +} + +// opts is list of boolean parametes +// opts[0] - async - Will run the create request in a background job. Recommended: 'true'. Default to 'true'. + +func (o *organization) Create(req OrgCreateRequest, opts ...bool) (*OrganizationFields, error) { + async := true + orgFields := OrganizationFields{} + if len(opts) > 0 { + async = opts[0] + } + rawURL := fmt.Sprintf("/v2/organizations?async=%t", async) + _, err := o.client.Post(rawURL, req, &orgFields) + if err != nil { + return nil, err + } + return &orgFields, err +} + +func (o *organization) Get(orgGUID string) (*OrganizationFields, error) { + rawURL := fmt.Sprintf("/v2/organizations/%s", orgGUID) + orgFields := OrganizationFields{} + _, err := o.client.Get(rawURL, &orgFields) + if err != nil { + return nil, err + } + return &orgFields, err +} + +// opts is list of boolean parametes +// opts[0] - async - Will run the update request in a background job. Recommended: 'true'. Default to 'true'. + +func (o *organization) Update(guid string, req OrgUpdateRequest, opts ...bool) (*OrganizationFields, error) { + async := true + if len(opts) > 0 { + async = opts[0] + } + orgFields := OrganizationFields{} + rawURL := fmt.Sprintf("/v2/organizations/%s?async=%t", guid, async) + + _, err := o.client.Put(rawURL, req, &orgFields) + return &orgFields, err +} + +// opts is list of boolean parametes +// opts[0] - async - Will run the delete request in a background job. Recommended: 'true'. Default to 'true'. +// opts[1] - recursive - Will delete all spaces, apps, services, routes, and private domains associated with the org. Default to 'false'. +// Deprecated: Use DeleteByRegion instead. +func (o *organization) Delete(guid string, opts ...bool) error { + async := true + recursive := false + if len(opts) > 0 { + async = opts[0] + } + if len(opts) > 1 { + recursive = opts[1] + } + rawURL := fmt.Sprintf("/v2/organizations/%s?async=%t&recursive=%t", guid, async, recursive) + _, err := o.client.Delete(rawURL) + return err +} + +// opts is list of boolean parametes +// opts[0] - async - Will run the delete request in a background job. Recommended: 'true'. Default to 'true'. +// opts[1] - recursive - Will delete all spaces, apps, services, routes, and private domains associated with the org. Default to 'false'. +// region - specify the region where the org to be deleted. If org to be deleted in all region's pass the region as 'all'. +func (o *organization) DeleteByRegion(guid string, region string, opts ...bool) error { + async := true + recursive := false + if len(opts) > 0 { + async = opts[0] + } + if len(opts) > 1 { + recursive = opts[1] + } + + rawURL := fmt.Sprintf("/v2/organizations/%s?async=%t&recursive=%t®ion=%s", guid, async, recursive, region) + _, err := o.client.Delete(rawURL) + return err +} + +func (o *organization) List(region string) ([]Organization, error) { + req := rest.GetRequest("/v2/organizations") + if region != "" { + req.Query("region", region) + } + path, err := o.url(req) + if err != nil { + return []Organization{}, err + } + + var orgs []Organization + err = o.listOrgResourcesWithPath(path, func(orgResource OrgResource) bool { + orgs = append(orgs, orgResource.ToFields()) + return true + }) + return orgs, err +} + +//FindByName ... +func (o *organization) FindByName(name string, region string) (*Organization, error) { + path, err := o.urlOfOrgWithName(name, region, false) + if err != nil { + return nil, err + } + + var org Organization + var found bool + err = o.listOrgResourcesWithPath(path, func(orgResource OrgResource) bool { + org = orgResource.ToFields() + found = true + return false + }) + + if err != nil { + return nil, err + } + + if found { + return &org, err + } + + //May not be found and no error + return nil, bmxerror.New(ErrCodeOrgDoesnotExist, + fmt.Sprintf("Given org %q doesn't exist in the given region %q", name, region)) + +} + +// GetRegionInformation get the region information associated with this org. +func (o *organization) GetRegionInformation(orgGUID string) ([]OrgRegionInformation, error) { + rawURL := fmt.Sprintf("/v2/organizations/%s/regions", orgGUID) + var regionOrgInfo []OrgRegionInformation + _, err := o.client.Get(rawURL, ®ionOrgInfo) + if err != nil { + return nil, err + } + return regionOrgInfo, nil +} + +func (o *organization) listOrgResourcesWithPath(path string, cb func(OrgResource) bool) error { + _, err := o.client.GetPaginated(path, NewCCPaginatedResources(OrgResource{}), func(resource interface{}) bool { + if orgResource, ok := resource.(OrgResource); ok { + return cb(orgResource) + } + return false + }) + return err +} + +func (o *organization) urlOfOrgWithName(name string, region string, inline bool) (string, error) { + req := rest.GetRequest("/v2/organizations").Query("q", fmt.Sprintf("name:%s", name)) + if region != "" { + req.Query("region", region) + } + if inline { + req.Query("inline-relations-depth", "1") + } + return o.url(req) +} + +func (o *organization) url(req *rest.Request) (string, error) { + httpReq, err := req.Build() + if err != nil { + return "", err + } + return httpReq.URL.String(), nil +} + +func (o *organization) associateOrgRole(url, userMail string) (*OrganizationFields, error) { + orgFields := OrganizationFields{} + _, err := o.client.Put(url, map[string]string{"username": userMail}, &orgFields) + if err != nil { + return nil, err + } + return &orgFields, nil +} + +func (o *organization) removeOrgRole(url, userMail string) error { + orgFields := OrganizationFields{} + _, err := o.client.DeleteWithBody(url, map[string]string{"username": userMail}, &orgFields) + return err +} +func (o *organization) AssociateBillingManager(orgGUID string, userMail string) (*OrganizationFields, error) { + rawURL := fmt.Sprintf("/v2/organizations/%s/billing_managers", orgGUID) + return o.associateOrgRole(rawURL, userMail) + +} +func (o *organization) AssociateAuditor(orgGUID string, userMail string) (*OrganizationFields, error) { + rawURL := fmt.Sprintf("/v2/organizations/%s/auditors", orgGUID) + return o.associateOrgRole(rawURL, userMail) +} +func (o *organization) AssociateManager(orgGUID string, userMail string) (*OrganizationFields, error) { + rawURL := fmt.Sprintf("/v2/organizations/%s/managers", orgGUID) + return o.associateOrgRole(rawURL, userMail) +} + +func (o *organization) AssociateUser(orgGUID string, userMail string) (*OrganizationFields, error) { + rawURL := fmt.Sprintf("/v2/organizations/%s/users", orgGUID) + return o.associateOrgRole(rawURL, userMail) +} + +func (o *organization) DisassociateBillingManager(orgGUID string, userMail string) error { + rawURL := fmt.Sprintf("/v2/organizations/%s/billing_managers", orgGUID) + return o.removeOrgRole(rawURL, userMail) + +} +func (o *organization) DisassociateAuditor(orgGUID string, userMail string) error { + rawURL := fmt.Sprintf("/v2/organizations/%s/auditors", orgGUID) + return o.removeOrgRole(rawURL, userMail) +} +func (o *organization) DisassociateManager(orgGUID string, userMail string) error { + rawURL := fmt.Sprintf("/v2/organizations/%s/managers", orgGUID) + return o.removeOrgRole(rawURL, userMail) +} + +func (o *organization) DisassociateUser(orgGUID string, userMail string) error { + rawURL := fmt.Sprintf("/v2/organizations/%s/users", orgGUID) + return o.removeOrgRole(rawURL, userMail) +} + +func (o *organization) listOrgRolesWithPath(path string) ([]OrgRole, error) { + var orgRoles []OrgRole + _, err := o.client.GetPaginated(path, NewCCPaginatedResources(OrgRoleResource{}), func(resource interface{}) bool { + if orgRoleResource, ok := resource.(OrgRoleResource); ok { + orgRoles = append(orgRoles, orgRoleResource.ToFields()) + return true + } + return false + }) + return orgRoles, err +} +func (o *organization) listOrgRoles(rawURL string, filters ...string) ([]OrgRole, error) { + req := rest.GetRequest(rawURL) + if len(filters) > 0 { + req.Query("q", strings.Join(filters, "")) + } + httpReq, err := req.Build() + if err != nil { + return nil, err + } + path := httpReq.URL.String() + return o.listOrgRolesWithPath(path) +} + +func (o *organization) ListBillingManager(orgGUID string, filters ...string) ([]OrgRole, error) { + rawURL := fmt.Sprintf("/v2/organizations/%s/billing_managers", orgGUID) + return o.listOrgRoles(rawURL, filters...) +} + +func (o *organization) ListManager(orgGUID string, filters ...string) ([]OrgRole, error) { + rawURL := fmt.Sprintf("/v2/organizations/%s/managers", orgGUID) + return o.listOrgRoles(rawURL, filters...) +} + +func (o *organization) ListAuditors(orgGUID string, filters ...string) ([]OrgRole, error) { + rawURL := fmt.Sprintf("/v2/organizations/%s/auditors", orgGUID) + return o.listOrgRoles(rawURL, filters...) +} + +func (o *organization) ListUsers(orgGUID string, filters ...string) ([]OrgRole, error) { + rawURL := fmt.Sprintf("/v2/organizations/%s/users", orgGUID) + return o.listOrgRoles(rawURL, filters...) +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/mccp/mccpv2/paginate.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/mccp/mccpv2/paginate.go new file mode 100644 index 00000000000..da0c21e4db3 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/mccp/mccpv2/paginate.go @@ -0,0 +1,39 @@ +package mccpv2 + +import ( + "encoding/json" + "reflect" + "strings" +) + +type GenericPaginatedResourcesHandler struct { + resourceType reflect.Type +} + +func NewCCPaginatedResources(resource interface{}) GenericPaginatedResourcesHandler { + return GenericPaginatedResourcesHandler{ + resourceType: reflect.TypeOf(resource), + } +} + +func (pr GenericPaginatedResourcesHandler) Resources(bytes []byte, curURL string) ([]interface{}, string, error) { + var paginatedResources = struct { + NextUrl string `json:"next_url"` + ResourcesBytes json.RawMessage `json:"resources"` + }{} + + err := json.Unmarshal(bytes, &paginatedResources) + + slicePtr := reflect.New(reflect.SliceOf(pr.resourceType)) + dc := json.NewDecoder(strings.NewReader(string(paginatedResources.ResourcesBytes))) + dc.UseNumber() + err = dc.Decode(slicePtr.Interface()) + slice := reflect.Indirect(slicePtr) + + contents := make([]interface{}, 0, slice.Len()) + for i := 0; i < slice.Len(); i++ { + contents = append(contents, slice.Index(i).Interface()) + } + + return contents, paginatedResources.NextUrl, err +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/mccp/mccpv2/private_domain.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/mccp/mccpv2/private_domain.go new file mode 100644 index 00000000000..4ed1bae2568 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/mccp/mccpv2/private_domain.go @@ -0,0 +1,173 @@ +package mccpv2 + +import ( + "fmt" + + "github.com/IBM-Cloud/bluemix-go/bmxerror" + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/rest" +) + +//ErrCodePrivateDomainDoesnotExist ... +var ErrCodePrivateDomainDoesnotExist = "PrivateDomainDoesnotExist" + +//PrivateDomainRequest ... +type PrivateDomainRequest struct { + Name string `json:"name,omitempty"` + OrgGUID string `json:"owning_organization_guid,omitempty"` +} + +//PrivateDomaineMetadata ... +type PrivateDomainMetadata struct { + GUID string `json:"guid"` + URL string `json:"url"` +} + +//PrivateDomainEntity ... +type PrivateDomainEntity struct { + Name string `json:"name"` + OwningOrganizationGUID string `json:"owning_organization_guid"` + OwningOrganizationURL string `json:"owning_organization_url"` + SharedOrganizationURL string `json:"shared_organizations_url"` +} + +//PrivateDomainResource ... +type PrivateDomainResource struct { + Resource + Entity PrivateDomainEntity +} + +//PrivateDomainFields ... +type PrivateDomainFields struct { + Metadata PrivateDomainMetadata + Entity PrivateDomainEntity +} + +//ToFields .. +func (resource PrivateDomainResource) ToFields() PrivateDomain { + entity := resource.Entity + + return PrivateDomain{ + GUID: resource.Metadata.GUID, + Name: entity.Name, + OwningOrganizationGUID: entity.OwningOrganizationGUID, + OwningOrganizationURL: entity.OwningOrganizationURL, + SharedOrganizationURL: entity.OwningOrganizationURL, + } +} + +//PrivateDomain model +type PrivateDomain struct { + GUID string + Name string + OwningOrganizationGUID string + OwningOrganizationURL string + SharedOrganizationURL string +} + +//PrivateDomains ... +type PrivateDomains interface { + FindByNameInOrg(orgGUID, domainName string) (*PrivateDomain, error) + FindByName(domainName string) (*PrivateDomain, error) + Create(req PrivateDomainRequest, opts ...bool) (*PrivateDomainFields, error) + Get(privateDomainGUID string) (*PrivateDomainFields, error) + Delete(privateDomainGUID string, opts ...bool) error +} + +type privateDomain struct { + client *client.Client +} + +func newPrivateDomainAPI(c *client.Client) PrivateDomains { + return &privateDomain{ + client: c, + } +} + +func (d *privateDomain) FindByNameInOrg(orgGUID, domainName string) (*PrivateDomain, error) { + rawURL := fmt.Sprintf("/v2/organizations/%s/private_domains", orgGUID) + req := rest.GetRequest(rawURL).Query("q", "name:"+domainName) + httpReq, err := req.Build() + if err != nil { + return nil, err + } + path := httpReq.URL.String() + domain, err := listPrivateDomainWithPath(d.client, path) + if err != nil { + return nil, err + } + if len(domain) == 0 { + return nil, bmxerror.New(ErrCodePrivateDomainDoesnotExist, fmt.Sprintf("Private Domain: %q doesn't exist", domainName)) + } + return &domain[0], nil +} + +func (d *privateDomain) FindByName(domainName string) (*PrivateDomain, error) { + rawURL := fmt.Sprintf("/v2/private_domains") + req := rest.GetRequest(rawURL).Query("q", "name:"+domainName) + httpReq, err := req.Build() + if err != nil { + return nil, err + } + path := httpReq.URL.String() + domain, err := listPrivateDomainWithPath(d.client, path) + if err != nil { + return nil, err + } + if len(domain) == 0 { + return nil, bmxerror.New(ErrCodePrivateDomainDoesnotExist, fmt.Sprintf("Private Domain: %q doesn't exist", domainName)) + } + return &domain[0], nil +} + +func listPrivateDomainWithPath(c *client.Client, path string) ([]PrivateDomain, error) { + var privateDomain []PrivateDomain + _, err := c.GetPaginated(path, NewCCPaginatedResources(PrivateDomainResource{}), func(resource interface{}) bool { + if privateDomainResource, ok := resource.(PrivateDomainResource); ok { + privateDomain = append(privateDomain, privateDomainResource.ToFields()) + return true + } + return false + }) + return privateDomain, err +} + +/* opts is list of boolean parametes +opts[0] - async - Will run the create request in a background job. Recommended: 'true'. Default to 'true'. +*/ +func (d *privateDomain) Create(req PrivateDomainRequest, opts ...bool) (*PrivateDomainFields, error) { + async := true + if len(opts) > 0 { + async = opts[0] + } + rawURL := fmt.Sprintf("/v2/private_domains?async=%t", async) + privateDomainFields := PrivateDomainFields{} + _, err := d.client.Post(rawURL, req, &privateDomainFields) + if err != nil { + return nil, err + } + return &privateDomainFields, nil +} + +func (d *privateDomain) Get(privateDomainGUID string) (*PrivateDomainFields, error) { + rawURL := fmt.Sprintf("/v2/private_domains/%s", privateDomainGUID) + privateDomainFields := PrivateDomainFields{} + _, err := d.client.Get(rawURL, &privateDomainFields, nil) + if err != nil { + return nil, err + } + return &privateDomainFields, nil +} + +// opts is list of boolean parametes +// opts[0] - async - Will run the delete request in a background job. Recommended: 'true'. Default to 'true'. + +func (d *privateDomain) Delete(privateDomainGUID string, opts ...bool) error { + async := true + if len(opts) > 0 { + async = opts[0] + } + rawURL := fmt.Sprintf("/v2/private_domains/%s?async=%t", privateDomainGUID, async) + _, err := d.client.Delete(rawURL) + return err +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/mccp/mccpv2/region.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/mccp/mccpv2/region.go new file mode 100644 index 00000000000..ee6185e76bf --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/mccp/mccpv2/region.go @@ -0,0 +1,75 @@ +package mccpv2 + +import ( + "net/http" + "strings" + + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/models" + "github.com/IBM-Cloud/bluemix-go/rest" +) + +const mccpEndpointOfPublicBluemix = "https://mccp.ng.bluemix.net" + +//go:generate counterfeiter . RegionRepository +type RegionRepository interface { + PublicRegions() ([]models.Region, error) + Regions() ([]models.Region, error) + FindRegionByName(name string) (*models.Region, error) + FindRegionById(id string) (*models.Region, error) +} + +type region struct { + client *client.Client +} + +func newRegionRepositoryAPI(c *client.Client) RegionRepository { + return ®ion{ + client: c, + } +} + +func (r *region) PublicRegions() ([]models.Region, error) { + return r.regions(mccpEndpointOfPublicBluemix) +} + +func (r *region) Regions() ([]models.Region, error) { + return r.regions(*r.client.Config.Endpoint) +} + +func (r *region) regions(endpoint string) ([]models.Region, error) { + var result []models.Region + resp, err := r.client.SendRequest(rest.GetRequest(endpoint+"/v2/regions"), &result) + if resp.StatusCode == http.StatusNotFound { + return []models.Region{}, nil + } + if err != nil { + return []models.Region{}, err + } + return result, nil +} + +func (r *region) FindRegionByName(name string) (*models.Region, error) { + regions, err := r.Regions() + if err != nil { + return nil, err + } + for _, region := range regions { + if strings.EqualFold(region.Name, name) { + return ®ion, nil + } + } + return nil, nil +} +func (r *region) FindRegionById(id string) (*models.Region, error) { + regions, err := r.Regions() + if err != nil { + return nil, err + } + for _, region := range regions { + if strings.EqualFold(region.ID, id) { + return ®ion, nil + } + } + return nil, nil +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/mccp/mccpv2/routes.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/mccp/mccpv2/routes.go new file mode 100644 index 00000000000..e6fdc8aa9d3 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/mccp/mccpv2/routes.go @@ -0,0 +1,201 @@ +package mccpv2 + +import ( + "fmt" + + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/rest" +) + +//ErrCodeRouteDoesnotExist ... +var ErrCodeRouteDoesnotExist = "RouteDoesnotExist" + +//RouteRequest ... +type RouteRequest struct { + Host string `json:"host,omitempty"` + SpaceGUID string `json:"space_guid"` + DomainGUID string `json:"domain_guid,omitempty"` + Path string `json:"path,omitempty"` + Port *int `json:"port,omitempty"` +} + +//RouteUpdateRequest ... +type RouteUpdateRequest struct { + Host *string `json:"host,omitempty"` + Path *string `json:"path,omitempty"` + Port *int `json:"port,omitempty"` +} + +//RouteMetadata ... +type RouteMetadata struct { + GUID string `json:"guid"` + URL string `json:"url"` +} + +//RouteEntity ... +type RouteEntity struct { + Host string `json:"host"` + Path string `json:"path"` + DomainGUID string `json:"domain_guid"` + SpaceGUID string `json:"space_guid"` + ServiceInstanceGUID string `json:"service_instance_guid"` + Port *int `json:"port"` + DomainURL string `json:"domain_url"` + SpaceURL string `json:"space_url"` + AppsURL string `json:"apps_url"` + RouteMappingURL string `json:"route_mapping_url"` +} + +//RouteResource ... +type RouteResource struct { + Resource + Entity RouteEntity +} + +//RouteFields ... +type RouteFields struct { + Metadata RouteMetadata + Entity RouteEntity +} + +//ToFields .. +func (resource RouteResource) ToFields() Route { + entity := resource.Entity + + return Route{ + GUID: resource.Metadata.GUID, + Host: entity.Host, + Path: entity.Path, + DomainGUID: entity.DomainGUID, + SpaceGUID: entity.SpaceGUID, + ServiceInstanceGUID: entity.ServiceInstanceGUID, + Port: entity.Port, + DomainURL: entity.DomainURL, + SpaceURL: entity.SpaceURL, + AppsURL: entity.AppsURL, + RouteMappingURL: entity.RouteMappingURL, + } +} + +//Route model +type Route struct { + GUID string + Host string + Path string + DomainGUID string + SpaceGUID string + ServiceInstanceGUID string + Port *int + DomainURL string + SpaceURL string + AppsURL string + RouteMappingURL string +} + +//Routes ... +type Routes interface { + Find(hostname, domainGUID string) ([]Route, error) + Create(req RouteRequest, opts ...bool) (*RouteFields, error) + Get(routeGUID string) (*RouteFields, error) + Update(routeGUID string, req RouteUpdateRequest, opts ...bool) (*RouteFields, error) + Delete(routeGUID string, opts ...bool) error +} + +type route struct { + client *client.Client +} + +func newRouteAPI(c *client.Client) Routes { + return &route{ + client: c, + } +} + +func (r *route) Get(routeGUID string) (*RouteFields, error) { + rawURL := fmt.Sprintf("/v2/routes/%s", routeGUID) + routeFields := RouteFields{} + _, err := r.client.Get(rawURL, &routeFields, nil) + if err != nil { + return nil, err + } + return &routeFields, nil +} + +func (r *route) Find(hostname, domainGUID string) ([]Route, error) { + rawURL := "/v2/routes?inline-relations-depth=1" + req := rest.GetRequest(rawURL).Query("q", "host:"+hostname+";domain_guid:"+domainGUID) + httpReq, err := req.Build() + if err != nil { + return nil, err + } + path := httpReq.URL.String() + route, err := listRouteWithPath(r.client, path) + if err != nil { + return nil, err + } + return route, nil +} + +// opts is list of boolean parametes +// opts[0] - async - Will run the create request in a background job. Recommended: 'true'. Default to 'true'. + +func (r *route) Create(req RouteRequest, opts ...bool) (*RouteFields, error) { + async := true + if len(opts) > 0 { + async = opts[0] + } + rawURL := fmt.Sprintf("/v2/routes?async=%t&inline-relations-depth=1", async) + routeFields := RouteFields{} + _, err := r.client.Post(rawURL, req, &routeFields) + if err != nil { + return nil, err + } + return &routeFields, nil +} + +// opts is list of boolean parametes +// opts[0] - async - Will run the update request in a background job. Recommended: 'true'. Default to 'true'. + +func (r *route) Update(routeGUID string, req RouteUpdateRequest, opts ...bool) (*RouteFields, error) { + async := true + if len(opts) > 0 { + async = opts[0] + } + rawURL := fmt.Sprintf("/v2/routes/%s?async=%t", routeGUID, async) + routeFields := RouteFields{} + _, err := r.client.Put(rawURL, req, &routeFields) + if err != nil { + return nil, err + } + return &routeFields, nil +} + +// opts is list of boolean parametes +// opts[0] - async - Will run the delete request in a background job. Recommended: 'true'. Default to 'true'. +// opts[1] - recursive - Will delete route service bindings and route mappings associated with the route. Default to 'false'. + +func (r *route) Delete(routeGUID string, opts ...bool) error { + async := true + recursive := false + if len(opts) > 0 { + async = opts[0] + } + if len(opts) > 1 { + recursive = opts[1] + } + rawURL := fmt.Sprintf("/v2/routes/%s?async=%t&recursive=%t", routeGUID, async, recursive) + _, err := r.client.Delete(rawURL) + return err +} + +func listRouteWithPath(c *client.Client, path string) ([]Route, error) { + var route []Route + _, err := c.GetPaginated(path, NewCCPaginatedResources(RouteResource{}), func(resource interface{}) bool { + if routeResource, ok := resource.(RouteResource); ok { + route = append(route, routeResource.ToFields()) + return true + } + return false + }) + return route, err +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/mccp/mccpv2/service_bindings.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/mccp/mccpv2/service_bindings.go new file mode 100644 index 00000000000..e0ea0de91a0 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/mccp/mccpv2/service_bindings.go @@ -0,0 +1,142 @@ +package mccpv2 + +import ( + "fmt" + "strings" + + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/rest" +) + +//ServiceBindingRequest ... +type ServiceBindingRequest struct { + ServiceInstanceGUID string `json:"service_instance_guid"` + AppGUID string `json:"app_guid"` + Parameters string `json:"parameters,omitempty"` +} + +//ServiceBindingMetadata ... +type ServiceBindingMetadata struct { + GUID string `json:"guid"` + URL string `json:"url"` +} + +//ServiceBindingEntity ... +type ServiceBindingEntity struct { + ServiceInstanceGUID string `json:"service_instance_guid"` + AppGUID string `json:"app_guid"` + Credentials map[string]interface{} `json:"credentials"` +} + +//ServiceBindingResource ... +type ServiceBindingResource struct { + Resource + Entity ServiceBindingEntity +} + +//ServiceBindingFields ... +type ServiceBindingFields struct { + Metadata ServiceBindingMetadata + Entity ServiceBindingEntity +} + +//ServiceBinding model +type ServiceBinding struct { + GUID string + ServiceInstanceGUID string + AppGUID string + Credentials map[string]interface{} +} + +//ToFields .. +func (resource ServiceBindingResource) ToFields() ServiceBinding { + entity := resource.Entity + + return ServiceBinding{ + GUID: resource.Metadata.GUID, + ServiceInstanceGUID: entity.ServiceInstanceGUID, + AppGUID: entity.AppGUID, + Credentials: entity.Credentials, + } +} + +//ServiceBindings ... +type ServiceBindings interface { + Create(req ServiceBindingRequest) (*ServiceBindingFields, error) + Get(guid string) (*ServiceBindingFields, error) + Delete(guid string, opts ...bool) error + List(filters ...string) ([]ServiceBinding, error) +} + +type serviceBinding struct { + client *client.Client +} + +func newServiceBindingAPI(c *client.Client) ServiceBindings { + return &serviceBinding{ + client: c, + } +} + +func (r *serviceBinding) Get(sbGUID string) (*ServiceBindingFields, error) { + rawURL := fmt.Sprintf("/v2/service_bindings/%s", sbGUID) + sbFields := ServiceBindingFields{} + _, err := r.client.Get(rawURL, &sbFields, nil) + if err != nil { + return nil, err + } + return &sbFields, nil +} + +func (r *serviceBinding) Create(req ServiceBindingRequest) (*ServiceBindingFields, error) { + rawURL := "/v2/service_bindings" + sbFields := ServiceBindingFields{} + _, err := r.client.Post(rawURL, req, &sbFields) + if err != nil { + return nil, err + } + return &sbFields, nil +} + +// opts is list of boolean parametes +// opts[0] - async - Will run the delete request in a background job. Recommended: 'true'. Default to 'true'. + +func (r *serviceBinding) Delete(guid string, opts ...bool) error { + async := true + if len(opts) > 0 { + async = opts[0] + } + rawURL := fmt.Sprintf("/v2/service_bindings/%s?async=%t", guid, async) + _, err := r.client.Delete(rawURL) + return err +} + +func (r *serviceBinding) List(filters ...string) ([]ServiceBinding, error) { + rawURL := "/v2/service_bindings" + req := rest.GetRequest(rawURL) + if len(filters) > 0 { + req.Query("q", strings.Join(filters, "")) + } + httpReq, err := req.Build() + if err != nil { + return nil, err + } + path := httpReq.URL.String() + bindings, err := listServiceBindingWithPath(r.client, path) + if err != nil { + return nil, err + } + return bindings, nil +} + +func listServiceBindingWithPath(c *client.Client, path string) ([]ServiceBinding, error) { + var sb []ServiceBinding + _, err := c.GetPaginated(path, NewCCPaginatedResources(ServiceBindingResource{}), func(resource interface{}) bool { + if sbResource, ok := resource.(ServiceBindingResource); ok { + sb = append(sb, sbResource.ToFields()) + return true + } + return false + }) + return sb, err +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/mccp/mccpv2/service_instances.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/mccp/mccpv2/service_instances.go new file mode 100644 index 00000000000..5facad4c019 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/mccp/mccpv2/service_instances.go @@ -0,0 +1,266 @@ +package mccpv2 + +import ( + "fmt" + "strconv" + + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/rest" +) + +//ServiceInstanceCreateRequest ... +type ServiceInstanceCreateRequest struct { + Name string `json:"name"` + SpaceGUID string `json:"space_guid"` + PlanGUID string `json:"service_plan_guid"` + Params map[string]interface{} `json:"parameters,omitempty"` + Tags []string `json:"tags,omitempty"` +} + +//ServiceInstanceUpdateRequest ... +type ServiceInstanceUpdateRequest struct { + Name *string `json:"name,omitempty"` + PlanGUID *string `json:"service_plan_guid,omitempty"` + Params map[string]interface{} `json:"parameters,omitempty"` + Tags []string `json:"tags,omitempty"` +} + +//ServiceInstance ... +type ServiceInstance struct { + GUID string + Name string `json:"name"` + Credentials map[string]interface{} `json:"credentials"` + ServicePlanGUID string `json:"service_plan_guid"` + SpaceGUID string `json:"space_guid"` + GatewayData string `json:"gateway_data"` + Type string `json:"type"` + DashboardURL string `json:"dashboard_url"` + LastOperation LastOperationFields `json:"last_operation"` + RouteServiceURL string `json:"routes_url"` + Tags []string `json:"tags"` + SpaceURL string `json:"space_url"` + ServicePlanURL string `json:"service_plan_url"` + ServiceBindingURL string `json:"service_bindings_url"` + ServiceKeysURL string `json:"service_keys_url"` + ServiceKeys []ServiceKeyFields `json:"service_keys"` + ServicePlan ServicePlanFields `json:"service_plan"` +} + +//ServiceInstanceFields ... +type ServiceInstanceFields struct { + Metadata ServiceInstanceMetadata + Entity ServiceInstance +} + +//ServiceInstanceMetadata ... +type ServiceInstanceMetadata struct { + GUID string `json:"guid"` + URL string `json:"url"` +} + +//LastOperationFields ... +type LastOperationFields struct { + Type string `json:"type"` + State string `json:"state"` + Description string `json:"description"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` +} + +//ServiceInstanceResource ... +type ServiceInstanceResource struct { + Resource + Entity ServiceInstanceEntity +} + +//ServiceInstanceEntity ... +type ServiceInstanceEntity struct { + Name string `json:"name"` + Credentials map[string]interface{} `json:"credentials"` + ServicePlanGUID string `json:"service_plan_guid"` + SpaceGUID string `json:"space_guid"` + GatewayData string `json:"gateway_data"` + Type string `json:"type"` + DashboardURL string `json:"dashboard_url"` + LastOperation LastOperationFields `json:"last_operation"` + RouteServiceURL string `json:"routes_url"` + Tags []string `json:"tags"` + SpaceURL string `json:"space_url"` + ServicePlanURL string `json:"service_plan_url"` + ServiceBindingURL string `json:"service_bindings_url"` + ServiceKeysURL string `json:"service_keys_url"` + ServiceKeys []ServiceKeyFields `json:"service_keys"` + ServicePlan ServicePlanFields `json:"service_plan"` +} + +//ToModel ... +func (resource ServiceInstanceResource) ToModel() ServiceInstance { + + entity := resource.Entity + + return ServiceInstance{ + GUID: resource.Metadata.GUID, + Name: entity.Name, + Credentials: entity.Credentials, + ServicePlanGUID: entity.ServicePlanGUID, + SpaceGUID: entity.SpaceGUID, + GatewayData: entity.GatewayData, + Type: entity.Type, + LastOperation: entity.LastOperation, + RouteServiceURL: entity.RouteServiceURL, + DashboardURL: entity.DashboardURL, + Tags: entity.Tags, + SpaceURL: entity.SpaceURL, + ServicePlanURL: entity.ServicePlanURL, + ServiceBindingURL: entity.ServiceBindingURL, + ServiceKeysURL: entity.ServiceKeysURL, + } +} + +//ServiceInstances ... +type ServiceInstances interface { + Create(req ServiceInstanceCreateRequest) (*ServiceInstanceFields, error) + Update(instanceGUID string, req ServiceInstanceUpdateRequest) (*ServiceInstanceFields, error) + Delete(instanceGUID string, opts ...bool) error + FindByName(instanceName string) (*ServiceInstance, error) + FindByNameInSpace(spaceGUID string, instanceName string) (*ServiceInstance, error) + Get(instanceGUID string, depth ...int) (*ServiceInstanceFields, error) + ListServiceBindings(instanceGUID string) ([]ServiceBinding, error) +} + +type serviceInstance struct { + client *client.Client +} + +func newServiceInstanceAPI(c *client.Client) ServiceInstances { + return &serviceInstance{ + client: c, + } +} + +func (s *serviceInstance) Create(req ServiceInstanceCreateRequest) (*ServiceInstanceFields, error) { + rawURL := "/v2/service_instances?accepts_incomplete=true" + serviceFields := ServiceInstanceFields{} + _, err := s.client.Post(rawURL, req, &serviceFields) + if err != nil { + return nil, err + } + return &serviceFields, nil +} + +func (s *serviceInstance) Get(instanceGUID string, depth ...int) (*ServiceInstanceFields, error) { + rawURL := fmt.Sprintf("/v2/service_instances/%s", instanceGUID) + req := rest.GetRequest(rawURL) + if len(depth) > 0 { + req.Query("inline-relations-depth", strconv.Itoa(depth[0])) + } + httpReq, err := req.Build() + if err != nil { + return nil, err + } + path := httpReq.URL.String() + + serviceFields := ServiceInstanceFields{} + _, err = s.client.Get(path, &serviceFields) + if err != nil { + return nil, err + } + return &serviceFields, err +} + +func (s *serviceInstance) FindByName(instanceName string) (*ServiceInstance, error) { + req := rest.GetRequest("/v2/service_instances") + req.Query("return_user_provided_service_instances", "true") + if instanceName != "" { + req.Query("q", "name:"+instanceName) + } + httpReq, err := req.Build() + if err != nil { + return nil, err + } + path := httpReq.URL.String() + services, err := listServicesWithPath(s.client, path) + if err != nil { + return nil, err + } + if len(services) == 0 { + return nil, fmt.Errorf("Service instance: %q doesn't exist", instanceName) + } + return &services[0], nil +} + +func (s *serviceInstance) FindByNameInSpace(spaceGUID string, instanceName string) (*ServiceInstance, error) { + req := rest.GetRequest(fmt.Sprintf("/v2/spaces/%s/service_instances", spaceGUID)) + req.Query("return_user_provided_service_instances", "true") + if instanceName != "" { + req.Query("q", "name:"+instanceName) + } + httpReq, err := req.Build() + if err != nil { + return nil, err + } + path := httpReq.URL.String() + services, err := listServicesWithPath(s.client, path) + if err != nil { + return nil, err + } + if len(services) == 0 { + return nil, fmt.Errorf("Service instance: %q doesn't exist in the space %s", instanceName, spaceGUID) + } + return &services[0], nil +} + +// opts is list of boolean parametes +// opts[0] - async - Will run the delete request in a background job. Recommended: 'true'. Default to 'true'. +// opts[1] - recursive - Will delete service bindings, service keys, and routes associated with the service instance. Default to 'false'. + +func (s *serviceInstance) Delete(instanceGUID string, opts ...bool) error { + async := true + recursive := false + if len(opts) > 0 { + async = opts[0] + } + if len(opts) > 1 { + recursive = opts[1] + } + rawURL := fmt.Sprintf("/v2/service_instances/%s?accepts_incomplete=true&async=%t&recursive=%t", instanceGUID, async, recursive) + _, err := s.client.Delete(rawURL) + return err +} + +func (s *serviceInstance) Update(instanceGUID string, req ServiceInstanceUpdateRequest) (*ServiceInstanceFields, error) { + rawURL := fmt.Sprintf("/v2/service_instances/%s?accepts_incomplete=true", instanceGUID) + serviceFields := ServiceInstanceFields{} + _, err := s.client.Put(rawURL, req, &serviceFields) + if err != nil { + return nil, err + } + return &serviceFields, nil +} + +func (s *serviceInstance) ListServiceBindings(instanceGUID string) ([]ServiceBinding, error) { + rawURL := fmt.Sprintf("/v2/service_instances/%s/service_bindings", instanceGUID) + req := rest.GetRequest(rawURL) + httpReq, err := req.Build() + if err != nil { + return nil, err + } + path := httpReq.URL.String() + sb, err := listServiceBindingWithPath(s.client, path) + if err != nil { + return nil, err + } + return sb, nil +} + +func listServicesWithPath(client *client.Client, path string) ([]ServiceInstance, error) { + var services []ServiceInstance + _, err := client.GetPaginated(path, NewCCPaginatedResources(ServiceInstanceResource{}), func(resource interface{}) bool { + if serviceInstanceResource, ok := resource.(ServiceInstanceResource); ok { + services = append(services, serviceInstanceResource.ToModel()) + return true + } + return false + }) + return services, err +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/mccp/mccpv2/service_keys.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/mccp/mccpv2/service_keys.go new file mode 100644 index 00000000000..60875a682dc --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/mccp/mccpv2/service_keys.go @@ -0,0 +1,173 @@ +package mccpv2 + +import ( + "fmt" + "strings" + + "github.com/IBM-Cloud/bluemix-go/bmxerror" + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/rest" +) + +//ErrCodeServiceKeyDoesNotExist ... +const ErrCodeServiceKeyDoesNotExist = "erviceKeyDoesNotExist" + +//ServiceKeyRequest ... +type ServiceKeyRequest struct { + Name string `json:"name"` + ServiceInstanceGUID string `json:"service_instance_guid"` + Params map[string]interface{} `json:"parameters,omitempty"` +} + +//ServiceKey model... +type ServiceKey struct { + GUID string + Name string `json:"name"` + ServiceInstanceGUID string `json:"service_instance_guid"` + ServiceInstanceURL string `json:"service_instance_url"` + Credentials map[string]interface{} `json:"credentials"` +} + +//ServiceKeyFields ... +type ServiceKeyFields struct { + Metadata ServiceKeyMetadata + Entity ServiceKey +} + +//ServiceKeyMetadata ... +type ServiceKeyMetadata struct { + GUID string `json:"guid"` + URL string `json:"url"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` +} + +//ServiceKeyResource ... +type ServiceKeyResource struct { + Resource + Entity ServiceKeyEntity +} + +//ServiceKeyEntity ... +type ServiceKeyEntity struct { + Name string `json:"name"` + ServiceInstanceGUID string `json:"service_instance_guid"` + ServiceInstanceURL string `json:"service_instance_url"` + Credentials map[string]interface{} `json:"credentials"` +} + +//ToModel ... +func (resource ServiceKeyResource) ToModel() ServiceKey { + + entity := resource.Entity + + return ServiceKey{ + GUID: resource.Metadata.GUID, + Name: entity.Name, + ServiceInstanceGUID: entity.ServiceInstanceGUID, + ServiceInstanceURL: entity.ServiceInstanceURL, + Credentials: entity.Credentials, + } +} + +//ServiceKeys ... +type ServiceKeys interface { + Create(serviceInstanceGUID string, keyName string, params map[string]interface{}) (*ServiceKeyFields, error) + FindByName(serviceInstanceGUID string, keyName string) (*ServiceKey, error) + Get(serviceKeyGUID string) (*ServiceKeyFields, error) + Delete(serviceKeyGUID string) error + List(filters ...string) ([]ServiceKey, error) +} + +type serviceKey struct { + client *client.Client +} + +func newServiceKeyAPI(c *client.Client) ServiceKeys { + return &serviceKey{ + client: c, + } +} + +func (r *serviceKey) Create(serviceInstanceGUID string, keyName string, params map[string]interface{}) (*ServiceKeyFields, error) { + serviceKeyFields := ServiceKeyFields{} + reqParam := ServiceKeyRequest{ + ServiceInstanceGUID: serviceInstanceGUID, + Name: keyName, + Params: params, + } + _, err := r.client.Post("/v2/service_keys", reqParam, &serviceKeyFields) + if err != nil { + return nil, err + } + return &serviceKeyFields, nil +} + +func (r *serviceKey) Delete(serviceKeyGUID string) error { + rawURL := fmt.Sprintf("/v2/service_keys/%s", serviceKeyGUID) + _, err := r.client.Delete(rawURL) + return err +} + +func (r *serviceKey) Get(guid string) (*ServiceKeyFields, error) { + rawURL := fmt.Sprintf("/v2/service_keys/%s", guid) + serviceKeyFields := ServiceKeyFields{} + _, err := r.client.Get(rawURL, &serviceKeyFields) + if err != nil { + return nil, err + } + + return &serviceKeyFields, err +} + +func (r *serviceKey) FindByName(serviceInstanceGUID string, keyName string) (*ServiceKey, error) { + rawURL := fmt.Sprintf("/v2/service_instances/%s/service_keys", serviceInstanceGUID) + req := rest.GetRequest(rawURL) + if keyName != "" { + req.Query("q", "name:"+keyName) + } + httpReq, err := req.Build() + if err != nil { + return nil, err + } + path := httpReq.URL.String() + serviceKeys, err := r.listServiceKeysWithPath(path) + if err != nil { + return nil, err + } + if len(serviceKeys) == 0 { + return nil, bmxerror.New(ErrCodeServiceKeyDoesNotExist, + fmt.Sprintf("Given service key %q doesn't exist for the given service instance %q", keyName, serviceInstanceGUID)) + } + return &serviceKeys[0], nil +} + +func (r *serviceKey) List(filters ...string) ([]ServiceKey, error) { + rawURL := "/v2/service_keys" + req := rest.GetRequest(rawURL) + if len(filters) > 0 { + req.Query("q", strings.Join(filters, "")) + } + httpReq, err := req.Build() + if err != nil { + return nil, err + } + path := httpReq.URL.String() + keys, err := r.listServiceKeysWithPath(path) + if err != nil { + return nil, err + } + return keys, nil +} + +func (r *serviceKey) listServiceKeysWithPath(path string) ([]ServiceKey, error) { + var serviceKeys []ServiceKey + _, err := r.client.GetPaginated(path, NewCCPaginatedResources(ServiceKeyResource{}), func(resource interface{}) bool { + if serviceKeyResource, ok := resource.(ServiceKeyResource); ok { + serviceKeys = append(serviceKeys, serviceKeyResource.ToModel()) + return true + } + return false + }) + return serviceKeys, err +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/mccp/mccpv2/service_offerings.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/mccp/mccpv2/service_offerings.go new file mode 100644 index 00000000000..59a0edfca68 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/mccp/mccpv2/service_offerings.go @@ -0,0 +1,168 @@ +package mccpv2 + +import ( + "fmt" + + "github.com/IBM-Cloud/bluemix-go/client" + + "github.com/IBM-Cloud/bluemix-go/bmxerror" + "github.com/IBM-Cloud/bluemix-go/rest" +) + +//ErrCodeServiceDoesnotExist ... +const ErrCodeServiceDoesnotExist = "ServiceDoesnotExist" + +//ServiceOffering model +type ServiceOffering struct { + GUID string + Label string `json:"label"` + Provider string `json:"provider"` + Description string `json:"description"` + LongDescription string `json:"long_description"` + Version string `json:"version"` + URL string `json:"url"` + InfoURL string `json:"info_url"` + DocumentURL string `json:"documentation_url"` + Timeout string `json:"timeout"` + UniqueID string `json:"unique_id"` + ServiceBrokerGUID string `json:"service_broker_guid"` + ServicePlansURL string `json:"service_plans_url"` + Tags []string `json:"tags"` + Requires []string `json:"requires"` + IsActive bool `json:"active"` + IsBindable bool `json:"bindable"` + IsPlanUpdateable bool `json:"plan_updateable"` +} + +//ServiceOfferingResource ... +type ServiceOfferingResource struct { + Resource + Entity ServiceOfferingEntity +} + +//ServiceOfferingEntity ... +type ServiceOfferingEntity struct { + Label string `json:"label"` + Provider string `json:"provider"` + Description string `json:"description"` + LongDescription string `json:"long_description"` + Version string `json:"version"` + URL string `json:"url"` + InfoURL string `json:"info_url"` + DocumentURL string `json:"documentation_url"` + Timeout string `json:"timeout"` + UniqueID string `json:"unique_id"` + ServiceBrokerGUID string `json:"service_broker_guid"` + ServicePlansURL string `json:"service_plans_url"` + Tags []string `json:"tags"` + Requires []string `json:"requires"` + IsActive bool `json:"active"` + IsBindable bool `json:"bindable"` + IsPlanUpdateable bool `json:"plan_updateable"` +} + +//ToFields ... +func (resource ServiceOfferingResource) ToFields() ServiceOffering { + entity := resource.Entity + + return ServiceOffering{ + GUID: resource.Metadata.GUID, + Label: entity.Label, + Provider: entity.Provider, + Description: entity.Description, + LongDescription: entity.LongDescription, + Version: entity.Version, + URL: entity.URL, + InfoURL: entity.InfoURL, + DocumentURL: entity.DocumentURL, + Timeout: entity.Timeout, + UniqueID: entity.UniqueID, + ServiceBrokerGUID: entity.ServiceBrokerGUID, + ServicePlansURL: entity.ServicePlansURL, + Tags: entity.Tags, + Requires: entity.Requires, + IsActive: entity.IsActive, + IsBindable: entity.IsBindable, + IsPlanUpdateable: entity.IsPlanUpdateable, + } +} + +//ServiceOfferingFields ... +type ServiceOfferingFields struct { + Metadata ServiceOfferingMetadata + Entity ServiceOffering +} + +//ServiceOfferingMetadata ... +type ServiceOfferingMetadata struct { + GUID string `json:"guid"` + URL string `json:"url"` +} + +//ServiceOfferings ... +type ServiceOfferings interface { + FindByLabel(serviceName string) (*ServiceOffering, error) + Get(svcOfferingGUID string) (*ServiceOfferingFields, error) +} + +type serviceOfferrings struct { + client *client.Client +} + +func newServiceOfferingAPI(c *client.Client) ServiceOfferings { + return &serviceOfferrings{ + client: c, + } +} + +func (s *serviceOfferrings) Get(svcGUID string) (*ServiceOfferingFields, error) { + rawURL := fmt.Sprintf("/v2/services/%s", svcGUID) + svcFields := ServiceOfferingFields{} + _, err := s.client.Get(rawURL, &svcFields) + if err != nil { + return nil, err + } + return &svcFields, err +} + +func (s *serviceOfferrings) FindByLabel(serviceName string) (*ServiceOffering, error) { + req := rest.GetRequest("v2/services") + if serviceName != "" { + req.Query("q", "label:"+serviceName) + } + httpReq, err := req.Build() + if err != nil { + return nil, err + } + path := httpReq.URL.String() + var services ServiceOffering + var found bool + err = s.listServicesOfferingWithPath(path, func(serviceOfferingResource ServiceOfferingResource) bool { + services = serviceOfferingResource.ToFields() + found = true + return false + }) + + if err != nil { + return nil, err + } + + if found { + return &services, err + } + //May not be found and no error + + return nil, bmxerror.New(ErrCodeServiceDoesnotExist, + fmt.Sprintf("Given service %q doesn't exist", serviceName)) + +} + +func (s *serviceOfferrings) listServicesOfferingWithPath(path string, cb func(ServiceOfferingResource) bool) error { + _, err := s.client.GetPaginated(path, NewCCPaginatedResources(ServiceOfferingResource{}), func(resource interface{}) bool { + if serviceOfferingResource, ok := resource.(ServiceOfferingResource); ok { + return cb(serviceOfferingResource) + } + return false + }) + return err +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/mccp/mccpv2/service_plans.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/mccp/mccpv2/service_plans.go new file mode 100644 index 00000000000..e4b477c8837 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/mccp/mccpv2/service_plans.go @@ -0,0 +1,139 @@ +package mccpv2 + +import ( + "fmt" + + "github.com/IBM-Cloud/bluemix-go/bmxerror" + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/rest" +) + +//ErrCodeServicePlanDoesNotExist ... +const ErrCodeServicePlanDoesNotExist = "ServicePlanDoesNotExist" + +//ServicePlan ... +type ServicePlan struct { + GUID string + Name string `json:"name"` + Description string `json:"description"` + IsFree bool `json:"free"` + IsPublic bool `json:"public"` + IsActive bool `json:"active"` + ServiceGUID string `json:"service_guid"` + UniqueID string `json:"unique_id"` + ServiceInstancesURL string `json:"service_instances_url"` +} + +//ServicePlanResource ... +type ServicePlanResource struct { + Resource + Entity ServicePlanEntity +} + +//ServicePlanEntity ... +type ServicePlanEntity struct { + Name string `json:"name"` + Description string `json:"description"` + IsFree bool `json:"free"` + IsPublic bool `json:"public"` + IsActive bool `json:"active"` + ServiceGUID string `json:"service_guid"` + UniqueID string `json:"unique_id"` + ServiceInstancesURL string `json:"service_instances_url"` +} + +//ToFields ... +func (resource ServicePlanResource) ToFields() ServicePlan { + entity := resource.Entity + + return ServicePlan{ + GUID: resource.Metadata.GUID, + Name: entity.Name, + Description: entity.Description, + IsFree: entity.IsFree, + IsPublic: entity.IsPublic, + IsActive: entity.IsActive, + ServiceGUID: entity.ServiceGUID, + UniqueID: entity.UniqueID, + ServiceInstancesURL: entity.ServiceInstancesURL, + } +} + +//ServicePlanFields ... +type ServicePlanFields struct { + Metadata ServicePlanMetadata + Entity ServicePlan +} + +//ServicePlanMetadata ... +type ServicePlanMetadata struct { + GUID string `json:"guid"` + URL string `json:"url"` +} + +//ServicePlans ... +type ServicePlans interface { + FindPlanInServiceOffering(serviceOfferingGUID string, planType string) (*ServicePlan, error) + Get(planGUID string) (*ServicePlanFields, error) +} + +type servicePlan struct { + client *client.Client +} + +func newServicePlanAPI(c *client.Client) ServicePlans { + return &servicePlan{ + client: c, + } +} + +func (s *servicePlan) Get(planGUID string) (*ServicePlanFields, error) { + rawURL := fmt.Sprintf("/v2/service_plans/%s", planGUID) + planFields := ServicePlanFields{} + _, err := s.client.Get(rawURL, &planFields) + if err != nil { + return nil, err + } + return &planFields, err +} + +func (s *servicePlan) FindPlanInServiceOffering(serviceOfferingGUID string, planType string) (*ServicePlan, error) { + req := rest.GetRequest("/v2/service_plans") + if serviceOfferingGUID != "" { + req.Query("q", "service_guid:"+serviceOfferingGUID) + } + httpReq, err := req.Build() + if err != nil { + return nil, err + } + path := httpReq.URL.String() + plans, err := s.listServicesPlanWithPath(path) + if err != nil { + return nil, err + } + if len(plans) == 0 { + return nil, bmxerror.New(ErrCodeServicePlanDoesNotExist, + fmt.Sprintf("Given plan %q doesn't exist for the service %q", planType, serviceOfferingGUID)) + } + for _, p := range plans { + if p.Name == planType { + return &p, nil + } + + } + return nil, bmxerror.New(ErrCodeServicePlanDoesNotExist, + fmt.Sprintf("Given plan %q doesn't exist for the service %q", planType, serviceOfferingGUID)) + +} + +func (s *servicePlan) listServicesPlanWithPath(path string) ([]ServicePlan, error) { + var servicePlans []ServicePlan + _, err := s.client.GetPaginated(path, NewCCPaginatedResources(ServicePlanResource{}), func(resource interface{}) bool { + if servicePlanResource, ok := resource.(ServicePlanResource); ok { + servicePlans = append(servicePlans, servicePlanResource.ToFields()) + return true + } + return false + }) + return servicePlans, err +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/mccp/mccpv2/shared_domain.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/mccp/mccpv2/shared_domain.go new file mode 100644 index 00000000000..b5fb3f94bf5 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/mccp/mccpv2/shared_domain.go @@ -0,0 +1,151 @@ +package mccpv2 + +import ( + "fmt" + + "github.com/IBM-Cloud/bluemix-go/bmxerror" + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/rest" +) + +//ErrCodeSharedDomainDoesnotExist ... +var ErrCodeSharedDomainDoesnotExist = "SharedDomainDoesnotExist" + +//SharedDomainRequest ... +type SharedDomainRequest struct { + Name string `json:"name"` + RouterGroupGUID string `json:"router_group_guid,omitempty"` +} + +//SharedDomaineMetadata ... +type SharedDomainMetadata struct { + GUID string `json:"guid"` + URL string `json:"url"` +} + +//SharedDomainEntity ... +type SharedDomainEntity struct { + Name string `json:"name"` + RouterGroupGUID string `json:"router_group_guid"` + RouterGroupType string `json:"router_group_type"` +} + +//SharedDomainResource ... +type SharedDomainResource struct { + Resource + Entity SharedDomainEntity +} + +//SharedDomainFields ... +type SharedDomainFields struct { + Metadata SharedDomainMetadata + Entity SharedDomainEntity +} + +//ToFields .. +func (resource SharedDomainResource) ToFields() SharedDomain { + entity := resource.Entity + + return SharedDomain{ + GUID: resource.Metadata.GUID, + Name: entity.Name, + RouterGroupGUID: entity.RouterGroupGUID, + RouterGroupType: entity.RouterGroupType, + } +} + +//SharedDomain model +type SharedDomain struct { + GUID string + Name string + RouterGroupGUID string + RouterGroupType string +} + +//SharedDomains ... +type SharedDomains interface { + FindByName(domainName string) (*SharedDomain, error) + Create(req SharedDomainRequest, opts ...bool) (*SharedDomainFields, error) + Get(sharedDomainGUID string) (*SharedDomainFields, error) + Delete(sharedDomainGUID string, opts ...bool) error +} + +type sharedDomain struct { + client *client.Client +} + +func newSharedDomainAPI(c *client.Client) SharedDomains { + return &sharedDomain{ + client: c, + } +} + +func (d *sharedDomain) FindByName(domainName string) (*SharedDomain, error) { + rawURL := "/v2/shared_domains" + req := rest.GetRequest(rawURL).Query("q", "name:"+domainName) + httpReq, err := req.Build() + if err != nil { + return nil, err + } + path := httpReq.URL.String() + domain, err := listSharedDomainWithPath(d.client, path) + if err != nil { + return nil, err + } + if len(domain) == 0 { + return nil, bmxerror.New(ErrCodeSharedDomainDoesnotExist, fmt.Sprintf("Shared Domain: %q doesn't exist", domainName)) + } + return &domain[0], nil +} + +func listSharedDomainWithPath(c *client.Client, path string) ([]SharedDomain, error) { + var sharedDomain []SharedDomain + _, err := c.GetPaginated(path, NewCCPaginatedResources(SharedDomainResource{}), func(resource interface{}) bool { + if sharedDomainResource, ok := resource.(SharedDomainResource); ok { + sharedDomain = append(sharedDomain, sharedDomainResource.ToFields()) + return true + } + return false + }) + return sharedDomain, err +} + +// opts is list of boolean parametes +// opts[0] - async - Will run the create request in a background job. Recommended: 'true'. Default to 'true' + +func (d *sharedDomain) Create(req SharedDomainRequest, opts ...bool) (*SharedDomainFields, error) { + async := true + if len(opts) > 0 { + async = opts[0] + } + rawURL := fmt.Sprintf("/v2/shared_domains?async=%t", async) + sharedDomainFields := SharedDomainFields{} + _, err := d.client.Post(rawURL, req, &sharedDomainFields) + if err != nil { + return nil, err + } + return &sharedDomainFields, nil +} + +func (d *sharedDomain) Get(sharedDomainGUID string) (*SharedDomainFields, error) { + rawURL := fmt.Sprintf("/v2/shared_domains/%s", sharedDomainGUID) + sharedDomainFields := SharedDomainFields{} + _, err := d.client.Get(rawURL, &sharedDomainFields, nil) + if err != nil { + return nil, err + } + return &sharedDomainFields, nil +} + +// opts is list of boolean parametes +// opts[0] - async - Will run the delete request in a background job. Recommended: 'true'. Default to 'true' + +func (d *sharedDomain) Delete(sharedDomainGUID string, opts ...bool) error { + async := true + if len(opts) > 0 { + async = opts[0] + } + rawURL := fmt.Sprintf("/v2/shared_domains/%s?async=%t", sharedDomainGUID, async) + _, err := d.client.Delete(rawURL) + return err +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/mccp/mccpv2/space_quota.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/mccp/mccpv2/space_quota.go new file mode 100644 index 00000000000..555ba3ece87 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/mccp/mccpv2/space_quota.go @@ -0,0 +1,212 @@ +package mccpv2 + +import ( + "encoding/json" + "fmt" + + "github.com/IBM-Cloud/bluemix-go/bmxerror" + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/rest" +) + +//SpaceQuotaCreateRequest ... +type SpaceQuotaCreateRequest struct { + Name string `json:"name"` + OrgGUID string `json:"organization_guid"` + MemoryLimitInMB int64 `json:"memory_limit,omitempty"` + InstanceMemoryLimitInMB int64 `json:"instance_memory_limit,omitempty"` + RoutesLimit int `json:"total_routes,omitempty"` + ServicesLimit int `json:"total_services,omitempty"` + NonBasicServicesAllowed bool `json:"non_basic_services_allowed"` +} + +//SpaceQuotaUpdateRequest ... +type SpaceQuotaUpdateRequest struct { + Name string `json:"name"` + OrgGUID string `json:"organization_guid,omitempty"` + MemoryLimitInMB int64 `json:"memory_limit,omitempty"` + InstanceMemoryLimitInMB int64 `json:"instance_memory_limit,omitempty"` + RoutesLimit int `json:"total_routes,omitempty"` + ServicesLimit int `json:"total_services,omitempty"` + NonBasicServicesAllowed bool `json:"non_basic_services_allowed"` +} + +type SpaceQuota struct { + GUID string + Name string + NonBasicServicesAllowed bool + ServicesLimit int + RoutesLimit int + MemoryLimitInMB int64 + InstanceMemoryLimitInMB int64 + TrialDBAllowed bool + AppInstanceLimit int + PrivateDomainsLimit int + AppTaskLimit int +} + +//SpaceQuotaFields ... +type SpaceQuotaFields struct { + Metadata SpaceQuotaMetadata + Entity SpaceQuotaEntity +} + +//SpaceQuotaMetadata ... +type SpaceQuotaMetadata struct { + GUID string `json:"guid"` + URL string `json:"url"` +} + +//ErrCodeSpaceQuotaDoesnotExist ... +const ErrCodeSpaceQuotaDoesnotExist = "SpaceQuotaDoesnotExist" + +type SpaceQuotaResource struct { + Resource + Entity SpaceQuotaEntity +} + +type SpaceQuotaEntity struct { + Name string `json:"name"` + NonBasicServicesAllowed bool `json:"non_basic_services_allowed"` + ServicesLimit int `json:"total_services"` + RoutesLimit int `json:"total_routes"` + MemoryLimitInMB int64 `json:"memory_limit"` + InstanceMemoryLimitInMB int64 `json:"instance_memory_limit"` + TrialDBAllowed bool `json:"trial_db_allowed"` + AppInstanceLimit json.Number `json:"app_instance_limit"` + PrivateDomainsLimit json.Number `json:"total_private_domains"` + AppTaskLimit json.Number `json:"app_task_limit"` +} + +func (resource SpaceQuotaResource) ToFields() SpaceQuota { + entity := resource.Entity + + return SpaceQuota{ + GUID: resource.Metadata.GUID, + Name: entity.Name, + NonBasicServicesAllowed: entity.NonBasicServicesAllowed, + ServicesLimit: entity.ServicesLimit, + RoutesLimit: entity.RoutesLimit, + MemoryLimitInMB: entity.MemoryLimitInMB, + InstanceMemoryLimitInMB: entity.InstanceMemoryLimitInMB, + TrialDBAllowed: entity.TrialDBAllowed, + AppInstanceLimit: NumberToInt(entity.AppInstanceLimit, -1), + PrivateDomainsLimit: NumberToInt(entity.PrivateDomainsLimit, -1), + AppTaskLimit: NumberToInt(entity.AppTaskLimit, -1), + } +} + +//SpaceQuotas ... +type SpaceQuotas interface { + FindByName(name, orgGUID string) (*SpaceQuota, error) + Create(createRequest SpaceQuotaCreateRequest) (*SpaceQuotaFields, error) + Update(updateRequest SpaceQuotaUpdateRequest, spaceQuotaGUID string) (*SpaceQuotaFields, error) + Delete(spaceQuotaGUID string, opts ...bool) error + Get(spaceQuotaGUID string) (*SpaceQuotaFields, error) +} + +type spaceQuota struct { + client *client.Client +} + +func newSpaceQuotasAPI(c *client.Client) SpaceQuotas { + return &spaceQuota{ + client: c, + } +} + +func (r *spaceQuota) FindByName(name, orgGUID string) (*SpaceQuota, error) { + rawURL := fmt.Sprintf("/v2/organizations/%s/space_quota_definitions", orgGUID) + req := rest.GetRequest(rawURL) + + httpReq, err := req.Build() + if err != nil { + return nil, err + } + path := httpReq.URL.String() + + spaceQuotas, err := r.listSpaceQuotaWithPath(path) + if err != nil { + return nil, err + } + + if len(spaceQuotas) == 0 { + return nil, bmxerror.New(ErrCodeSpaceQuotaDoesnotExist, + fmt.Sprintf("Given space quota %q doesn't exist for the organization %q", name, orgGUID)) + } + + for _, q := range spaceQuotas { + if q.Name == name { + return &q, nil + } + + } + return nil, bmxerror.New(ErrCodeSpaceQuotaDoesnotExist, + fmt.Sprintf("Given space quota %q doesn't exist for the organization %q", name, orgGUID)) +} + +func (r *spaceQuota) listSpaceQuotaWithPath(path string) ([]SpaceQuota, error) { + var spaceQuota []SpaceQuota + _, err := r.client.GetPaginated(path, NewCCPaginatedResources(SpaceQuotaResource{}), func(resource interface{}) bool { + if spaceQuotaResource, ok := resource.(SpaceQuotaResource); ok { + spaceQuota = append(spaceQuota, spaceQuotaResource.ToFields()) + return true + } + return false + }) + return spaceQuota, err +} + +func (r *spaceQuota) Create(createRequest SpaceQuotaCreateRequest) (*SpaceQuotaFields, error) { + rawURL := "/v2/space_quota_definitions" + spaceQuotaFields := SpaceQuotaFields{} + _, err := r.client.Post(rawURL, createRequest, &spaceQuotaFields) + if err != nil { + return nil, err + } + return &spaceQuotaFields, nil +} + +func (r *spaceQuota) Get(spaceQuotaGUID string) (*SpaceQuotaFields, error) { + rawURL := fmt.Sprintf("/v2/space_quota_definitions/%s", spaceQuotaGUID) + spaceQuotaFields := SpaceQuotaFields{} + _, err := r.client.Get(rawURL, &spaceQuotaFields) + if err != nil { + return nil, err + } + + return &spaceQuotaFields, err +} + +func (r *spaceQuota) Update(updateRequest SpaceQuotaUpdateRequest, spaceQuotaGUID string) (*SpaceQuotaFields, error) { + rawURL := fmt.Sprintf("/v2/space_quota_definitions/%s", spaceQuotaGUID) + spaceQuotaFields := SpaceQuotaFields{} + _, err := r.client.Put(rawURL, updateRequest, &spaceQuotaFields) + if err != nil { + return nil, err + } + return &spaceQuotaFields, nil +} + +// opts is list of boolean parametes +// opts[0] - async - Will run the delete request in a background job. Recommended: 'true'. Default to 'true'. + +func (r *spaceQuota) Delete(spaceQuotaGUID string, opts ...bool) error { + async := true + if len(opts) > 0 { + async = opts[0] + } + rawURL := fmt.Sprintf("/v2/space_quota_definitions/%s?async=%t", spaceQuotaGUID, async) + _, err := r.client.Delete(rawURL) + return err +} + +func NumberToInt(number json.Number, defaultValue int) int { + if number != "" { + i, err := number.Int64() + if err == nil { + return int(i) + } + } + return defaultValue +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/mccp/mccpv2/spaces.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/mccp/mccpv2/spaces.go new file mode 100644 index 00000000000..4629496ba24 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/mccp/mccpv2/spaces.go @@ -0,0 +1,374 @@ +package mccpv2 + +import ( + "fmt" + "strconv" + "strings" + + "github.com/IBM-Cloud/bluemix-go/bmxerror" + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/rest" +) + +//SpaceCreateRequest ... +type SpaceCreateRequest struct { + Name string `json:"name"` + OrgGUID string `json:"organization_guid"` + SpaceQuotaGUID string `json:"space_quota_definition_guid,omitempty"` +} + +//SpaceUpdateRequest ... +type SpaceUpdateRequest struct { + Name *string `json:"name,omitempty"` +} + +//Space ... +type Space struct { + GUID string + Name string + OrgGUID string + SpaceQuotaGUID string + AllowSSH bool +} + +//SpaceRole ... +type SpaceRole struct { + UserGUID string + Admin bool + UserName string +} + +//SpaceFields ... +type SpaceFields struct { + Metadata SpaceMetadata + Entity SpaceEntity +} + +//SpaceMetadata ... +type SpaceMetadata struct { + GUID string `json:"guid"` + URL string `json:"url"` +} + +//ErrCodeSpaceDoesnotExist ... +const ErrCodeSpaceDoesnotExist = "SpaceDoesnotExist" + +//SpaceResource ... +type SpaceResource struct { + Resource + Entity SpaceEntity +} + +//SpaceRoleResource ... +type SpaceRoleResource struct { + Resource + Entity SpaceRoleEntity +} + +//SpaceRoleEntity ... +type SpaceRoleEntity struct { + UserGUID string `json:"guid"` + Admin bool `json:"bool"` + UserName string `json:"username"` +} + +//SpaceEntity ... +type SpaceEntity struct { + Name string `json:"name"` + OrgGUID string `json:"organization_guid"` + SpaceQuotaGUID string `json:"space_quota_definition_guid"` + AllowSSH bool `json:"allow_ssh"` +} + +//ToFields ... +func (resource *SpaceResource) ToFields() Space { + entity := resource.Entity + + return Space{ + GUID: resource.Metadata.GUID, + Name: entity.Name, + OrgGUID: entity.OrgGUID, + SpaceQuotaGUID: entity.SpaceQuotaGUID, + AllowSSH: entity.AllowSSH, + } +} + +//ToFields ... +func (resource *SpaceRoleResource) ToFields() SpaceRole { + entity := resource.Entity + + return SpaceRole{ + UserGUID: resource.Metadata.GUID, + Admin: entity.Admin, + UserName: entity.UserName, + } +} + +//RouteFilter ... +type RouteFilter struct { + DomainGUID string + Host *string + Path *string + Port *int +} + +//Spaces ... +type Spaces interface { + ListSpacesInOrg(orgGUID, region string) ([]Space, error) + FindByNameInOrg(orgGUID, name, region string) (*Space, error) + Create(req SpaceCreateRequest, opts ...bool) (*SpaceFields, error) + Update(spaceGUID string, req SpaceUpdateRequest, opts ...bool) (*SpaceFields, error) + Delete(spaceGUID string, opts ...bool) error + Get(spaceGUID string) (*SpaceFields, error) + ListRoutes(spaceGUID string, req RouteFilter) ([]Route, error) + AssociateAuditor(spaceGUID, userMail string) (*SpaceFields, error) + AssociateDeveloper(spaceGUID, userMail string) (*SpaceFields, error) + AssociateManager(spaceGUID, userMail string) (*SpaceFields, error) + + DisassociateAuditor(spaceGUID, userMail string) error + DisassociateDeveloper(spaceGUID, userMail string) error + DisassociateManager(spaceGUID, userMail string) error + + ListAuditors(spaceGUID string, filters ...string) ([]SpaceRole, error) + ListDevelopers(spaceGUID string, filters ...string) ([]SpaceRole, error) + ListManagers(spaceGUID string, filters ...string) ([]SpaceRole, error) +} + +type spaces struct { + client *client.Client +} + +func newSpacesAPI(c *client.Client) Spaces { + return &spaces{ + client: c, + } +} + +func (r *spaces) FindByNameInOrg(orgGUID string, name string, region string) (*Space, error) { + rawURL := fmt.Sprintf("/v2/organizations/%s/spaces", orgGUID) + req := rest.GetRequest(rawURL).Query("q", "name:"+name) + if region != "" { + req.Query("region", region) + } + httpReq, err := req.Build() + if err != nil { + return nil, err + } + path := httpReq.URL.String() + + spaces, err := r.listSpacesWithPath(path) + + if err != nil { + return nil, err + } + if len(spaces) == 0 { + return nil, bmxerror.New(ErrCodeSpaceDoesnotExist, + fmt.Sprintf("Given space: %q doesn't exist in given org: %q in the given region %q", name, orgGUID, region)) + + } + return &spaces[0], nil +} + +func (r *spaces) ListSpacesInOrg(orgGUID string, region string) ([]Space, error) { + rawURL := fmt.Sprintf("v2/organizations/%s/spaces", orgGUID) + req := rest.GetRequest(rawURL) + if region != "" { + req.Query("region", region) + } + httpReq, err := req.Build() + if err != nil { + return nil, err + } + path := httpReq.URL.String() + + return r.listSpacesWithPath(path) +} + +func (r *spaces) listSpacesWithPath(path string) ([]Space, error) { + var spaces []Space + _, err := r.client.GetPaginated(path, NewCCPaginatedResources(SpaceResource{}), func(resource interface{}) bool { + if spaceResource, ok := resource.(SpaceResource); ok { + spaces = append(spaces, spaceResource.ToFields()) + return true + } + return false + }) + return spaces, err +} + +func (r *spaces) listSpaceRolesWithPath(path string) ([]SpaceRole, error) { + var spaceRoles []SpaceRole + _, err := r.client.GetPaginated(path, NewCCPaginatedResources(SpaceRoleResource{}), func(resource interface{}) bool { + if spaceRoleResource, ok := resource.(SpaceRoleResource); ok { + spaceRoles = append(spaceRoles, spaceRoleResource.ToFields()) + return true + } + return false + }) + return spaceRoles, err +} + +// opts is list of boolean parametes +// opts[0] - async - Will run the create request in a background job. Recommended: 'true'. Default to 'true'. + +func (r *spaces) Create(req SpaceCreateRequest, opts ...bool) (*SpaceFields, error) { + async := true + if len(opts) > 0 { + async = opts[0] + } + rawURL := fmt.Sprintf("/v2/spaces?async=%t", async) + spaceFields := SpaceFields{} + _, err := r.client.Post(rawURL, req, &spaceFields) + if err != nil { + return nil, err + } + return &spaceFields, nil +} + +func (r *spaces) Get(spaceGUID string) (*SpaceFields, error) { + rawURL := fmt.Sprintf("/v2/spaces/%s", spaceGUID) + spaceFields := SpaceFields{} + _, err := r.client.Get(rawURL, &spaceFields) + if err != nil { + return nil, err + } + + return &spaceFields, err +} + +// opts is list of boolean parametes +// opts[0] - async - Will run the update request in a background job. Recommended: 'true'. Default to 'true'. + +func (r *spaces) Update(spaceGUID string, req SpaceUpdateRequest, opts ...bool) (*SpaceFields, error) { + async := true + if len(opts) > 0 { + async = opts[0] + } + rawURL := fmt.Sprintf("/v2/spaces/%s?async=%t", spaceGUID, async) + spaceFields := SpaceFields{} + _, err := r.client.Put(rawURL, req, &spaceFields) + if err != nil { + return nil, err + } + return &spaceFields, nil +} + +// opts is list of boolean parametes +// opts[0] - async - Will run the delete request in a background job. Recommended: 'true'. Default to 'true'. +// opts[1] - recursive - Will delete all apps, services, routes, and service brokers associated with the space. Default to 'false'. + +func (r *spaces) Delete(spaceGUID string, opts ...bool) error { + async := true + recursive := false + if len(opts) > 0 { + async = opts[0] + } + if len(opts) > 1 { + recursive = opts[1] + } + rawURL := fmt.Sprintf("/v2/spaces/%s?async=%t&recursive=%t", spaceGUID, async, recursive) + _, err := r.client.Delete(rawURL) + return err +} + +func (r *spaces) associateRole(url, userMail string) (*SpaceFields, error) { + spaceFields := SpaceFields{} + _, err := r.client.Put(url, map[string]string{"username": userMail}, &spaceFields) + if err != nil { + return nil, err + } + return &spaceFields, nil +} + +func (r *spaces) removeRole(url, userMail string) error { + spaceFields := SpaceFields{} + _, err := r.client.DeleteWithBody(url, map[string]string{"username": userMail}, &spaceFields) + return err +} + +func (r *spaces) AssociateManager(spaceGUID string, userMail string) (*SpaceFields, error) { + rawURL := fmt.Sprintf("/v2/spaces/%s/managers", spaceGUID) + return r.associateRole(rawURL, userMail) +} +func (r *spaces) AssociateDeveloper(spaceGUID string, userMail string) (*SpaceFields, error) { + rawURL := fmt.Sprintf("/v2/spaces/%s/developers", spaceGUID) + return r.associateRole(rawURL, userMail) +} +func (r *spaces) AssociateAuditor(spaceGUID string, userMail string) (*SpaceFields, error) { + rawURL := fmt.Sprintf("/v2/spaces/%s/auditors", spaceGUID) + return r.associateRole(rawURL, userMail) +} + +func (r *spaces) DisassociateManager(spaceGUID string, userMail string) error { + rawURL := fmt.Sprintf("/v2/spaces/%s/managers", spaceGUID) + return r.removeRole(rawURL, userMail) +} + +func (r *spaces) DisassociateDeveloper(spaceGUID string, userMail string) error { + rawURL := fmt.Sprintf("/v2/spaces/%s/developers", spaceGUID) + return r.removeRole(rawURL, userMail) +} +func (r *spaces) DisassociateAuditor(spaceGUID string, userMail string) error { + rawURL := fmt.Sprintf("/v2/spaces/%s/auditors", spaceGUID) + return r.removeRole(rawURL, userMail) +} + +func (r *spaces) listSpaceRoles(rawURL string, filters ...string) ([]SpaceRole, error) { + req := rest.GetRequest(rawURL) + if len(filters) > 0 { + req.Query("q", strings.Join(filters, "")) + } + httpReq, err := req.Build() + if err != nil { + return nil, err + } + path := httpReq.URL.String() + return r.listSpaceRolesWithPath(path) +} + +func (r *spaces) ListAuditors(spaceGUID string, filters ...string) ([]SpaceRole, error) { + rawURL := fmt.Sprintf("/v2/spaces/%s/auditors", spaceGUID) + return r.listSpaceRoles(rawURL, filters...) +} + +func (r *spaces) ListManagers(spaceGUID string, filters ...string) ([]SpaceRole, error) { + rawURL := fmt.Sprintf("/v2/spaces/%s/managers", spaceGUID) + return r.listSpaceRoles(rawURL, filters...) +} +func (r *spaces) ListDevelopers(spaceGUID string, filters ...string) ([]SpaceRole, error) { + rawURL := fmt.Sprintf("/v2/spaces/%s/developers", spaceGUID) + return r.listSpaceRoles(rawURL, filters...) +} + +func (r *spaces) ListRoutes(spaceGUID string, routeFilter RouteFilter) ([]Route, error) { + rawURL := fmt.Sprintf("/v2/spaces/%s/routes", spaceGUID) + req := rest.GetRequest(rawURL) + var query string + if routeFilter.DomainGUID != "" { + query = "domain_guid:" + routeFilter.DomainGUID + ";" + } + if routeFilter.Host != nil { + query += "host:" + *routeFilter.Host + ";" + } + if routeFilter.Path != nil { + query += "path:" + *routeFilter.Path + ";" + } + if routeFilter.Port != nil { + query += "port:" + strconv.Itoa(*routeFilter.Port) + ";" + } + + if len(query) > 0 { + req.Query("q", query) + } + + httpReq, err := req.Build() + if err != nil { + return nil, err + } + path := httpReq.URL.String() + route, err := listRouteWithPath(r.client, path) + if err != nil { + return nil, err + } + return route, nil +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/resource/resourcev1/catalog/api_service.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/resource/resourcev1/catalog/api_service.go new file mode 100644 index 00000000000..83f3bfe38e6 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/resource/resourcev1/catalog/api_service.go @@ -0,0 +1,67 @@ +package catalog + +import ( + gohttp "net/http" + + bluemix "github.com/IBM-Cloud/bluemix-go" + "github.com/IBM-Cloud/bluemix-go/authentication" + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/http" + "github.com/IBM-Cloud/bluemix-go/rest" + "github.com/IBM-Cloud/bluemix-go/session" +) + +//ResourceCatalogAPI is the resource client ... +type ResourceCatalogAPI interface { + ResourceCatalog() ResourceCatalogRepository +} + +//ErrCodeAPICreation ... +const ErrCodeAPICreation = "APICreationError" + +//resourceControllerService holds the client +type resourceControllerService struct { + *client.Client +} + +//New ... +func New(sess *session.Session) (ResourceCatalogAPI, error) { + config := sess.Config.Copy() + err := config.ValidateConfigForService(bluemix.ResourceCatalogrService) + if err != nil { + return nil, err + } + if config.HTTPClient == nil { + config.HTTPClient = http.NewHTTPClient(config) + } + tokenRefreher, err := authentication.NewIAMAuthRepository(config, &rest.Client{ + DefaultHeader: gohttp.Header{ + "User-Agent": []string{http.UserAgent()}, + }, + HTTPClient: config.HTTPClient, + }) + if err != nil { + return nil, err + } + if config.IAMAccessToken == "" { + err := authentication.PopulateTokens(tokenRefreher, config) + if err != nil { + return nil, err + } + } + if config.Endpoint == nil { + ep, err := config.EndpointLocator.ResourceCatalogEndpoint() + if err != nil { + return nil, err + } + config.Endpoint = &ep + } + return &resourceControllerService{ + Client: client.New(config, bluemix.ResourceControllerService, tokenRefreher), + }, nil +} + +//ResourceCatalog API +func (a *resourceControllerService) ResourceCatalog() ResourceCatalogRepository { + return newResourceCatalogAPI(a.Client) +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/resource/resourcev1/catalog/pagination.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/resource/resourcev1/catalog/pagination.go new file mode 100644 index 00000000000..ad3e9cc79b0 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/resource/resourcev1/catalog/pagination.go @@ -0,0 +1,48 @@ +package catalog + +import ( + "encoding/json" + "reflect" + "strings" +) + +type ResourceCatalogPaginatedResourcesHandler struct { + resourceType reflect.Type + baseURL string +} + +func NewResourceCatalogPaginatedResources(resource interface{}, baseURL string) ResourceCatalogPaginatedResourcesHandler { + return ResourceCatalogPaginatedResourcesHandler{ + resourceType: reflect.TypeOf(resource), + baseURL: baseURL, + } +} + +func (pr ResourceCatalogPaginatedResourcesHandler) Resources(bytes []byte, curPath string) ([]interface{}, string, error) { + var paginatedResources = struct { + NextUrl string `json:"next"` + ResourcesBytes json.RawMessage `json:"resources"` + }{} + + err := json.Unmarshal(bytes, &paginatedResources) + + slicePtr := reflect.New(reflect.SliceOf(pr.resourceType)) + dc := json.NewDecoder(strings.NewReader(string(paginatedResources.ResourcesBytes))) + dc.UseNumber() + err = dc.Decode(slicePtr.Interface()) + slice := reflect.Indirect(slicePtr) + + contents := make([]interface{}, 0, slice.Len()) + for i := 0; i < slice.Len(); i++ { + contents = append(contents, slice.Index(i).Interface()) + } + //The next URL in response is a full qualified URL like https://resource-catalog.stage1.ng.bluemix.net/api/v1?_offset=50&languages=en_US%2Cen + //So need to cut the baseURL from it. + index := strings.Index(paginatedResources.NextUrl, pr.baseURL) + //NextUrl contains baseURL, means need to cut + if index != -1 { + url := paginatedResources.NextUrl[index+len(pr.baseURL):] + return contents, url, err + } + return contents, paginatedResources.NextUrl, err +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/resource/resourcev1/catalog/resource_catalog.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/resource/resourcev1/catalog/resource_catalog.go new file mode 100644 index 00000000000..a29db126216 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/resource/resourcev1/catalog/resource_catalog.go @@ -0,0 +1,358 @@ +package catalog + +import ( + "fmt" + "net/http" + + "github.com/IBM-Cloud/bluemix-go/bmxerror" + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/helpers" + "github.com/IBM-Cloud/bluemix-go/models" + "github.com/IBM-Cloud/bluemix-go/rest" + "github.com/IBM-Cloud/bluemix-go/utils" +) + +//ErrCodeServiceDoesnotExist ... +const ErrCodeServicePlanDoesnotExist = "ServicePlanDoesnotExist" +const ErrCodeServiceDoesnotExist = "ServiceDoesnotExist" +const ErrCodeServiceDeploymentNotFound = "ServiceDeploymentNotFound" + +func newResourceCatalogAPI(c *client.Client) ResourceCatalogRepository { + return &resourceCatalog{ + client: c, + } +} + +type resourceCatalog struct { + client *client.Client +} + +type ResourceCatalogRepository interface { + Get(serviceID string, indepth bool) (models.Service, error) + FindByName(name string, indepth bool) ([]models.Service, error) + ListServices(cb func(service models.Service) bool) error + ListServicePlans(cb func(servicePlan models.ServicePlan) bool, service models.Service) error + GetServiceID(serviceName string) (string, error) + GetServicePlanID(service models.Service, planName string) (string, error) + GetServiceName(serviceID string) (string, error) + GetServicePlanName(servicePlanID string) (string, error) + ListDeployments(servicePlanID string) ([]models.ServiceDeployment, error) + GetServicePlan(servicePlanID string) (models.ServicePlan, error) + ListDeploymentAliases(servicePlanID string) ([]models.ServiceDeploymentAlias, error) + GetDeploymentAlias(servicePlanID string, instanceTarget string, regionID string) (*models.ServiceDeploymentAlias, error) + GetServices() ([]models.Service, error) + GetServicePlans(service models.Service) ([]models.ServicePlan, error) +} + +func (r *resourceCatalog) GetServicePlanID(service models.Service, planName string) (string, error) { + var servicePlanID string + err := r.ListServicePlans(func(servicePlan models.ServicePlan) bool { + if servicePlan.Name == planName { + servicePlanID = servicePlan.ID + return false + } + return true + }, service) + if err != nil { + return "", err + } + return servicePlanID, nil +} + +func (r *resourceCatalog) GetServiceID(serviceName string) (string, error) { + var serviceID string + err := r.ListServices(func(service models.Service) bool { + if service.Name == serviceName { + serviceID = service.ID + return false + } + return true + }) + if err != nil { + return "", err + } + return serviceID, nil +} + +func (r *resourceCatalog) ListServices(cb func(service models.Service) bool) error { + listRequest := rest.GetRequest("/api/v1/") + req, err := listRequest.Build() + if err != nil { + return err + } + + _, err = r.client.GetPaginated( + req.URL.String(), + NewResourceCatalogPaginatedResources(models.Service{}, *r.client.Config.Endpoint), + func(resource interface{}) bool { + if catalogResource, ok := resource.(models.Service); ok { + return cb(catalogResource) + } + return false + }) + + return err +} +func (r *resourceCatalog) ListServicePlans(cb func(service models.ServicePlan) bool, service models.Service) error { + var urlSuffix string + if service.Kind == "iaas" { + urlSuffix = "/flavor" + } else { + urlSuffix = "/plan" + } + listRequest := rest.GetRequest("/api/v1/" + service.ID + urlSuffix) + req, err := listRequest.Build() + if err != nil { + return err + } + _, err = r.client.GetPaginated( + req.URL.String(), + NewResourceCatalogPaginatedResources(models.ServicePlan{}, *r.client.Config.Endpoint), + func(resource interface{}) bool { + if resourcePlan, ok := resource.(models.ServicePlan); ok { + return cb(resourcePlan) + } + return false + }) + + return err +} + +func (r *resourceCatalog) GetServiceName(serviceID string) (string, error) { + request := rest.GetRequest(helpers.GetFullURL(*r.client.Config.Endpoint, "/api/v1/"+serviceID)) + resp := map[string]interface{}{} + _, err := r.client.SendRequest(request, &resp) + if err != nil { + return "", err + } + if resp["kind"] == "runtime" || resp["kind"] == "service" || resp["kind"] == "iaas" || resp["kind"] == "platform_service" || resp["kind"] == "template" { + if name, ok := resp["name"].(string); ok { + return name, nil + } + return "", nil + } + return "", bmxerror.New(ErrCodeServiceDoesnotExist, + fmt.Sprintf("Given service : %q doesn't exist", serviceID)) +} + +func (r *resourceCatalog) GetServicePlanName(servicePlanID string) (string, error) { + request := rest.GetRequest(helpers.GetFullURL(*r.client.Config.Endpoint, "/api/v1/"+servicePlanID)) + resp := map[string]interface{}{} + _, err := r.client.SendRequest(request, &resp) + if err != nil { + return "", err + } + if resp["kind"] == "flavor" || resp["kind"] == "plan" { + if name, ok := resp["name"].(string); ok { + return name, nil + } + return "", nil + } + return "", bmxerror.New(ErrCodeServicePlanDoesnotExist, + fmt.Sprintf("Given service plan : %q doesn't exist", servicePlanID)) +} + +func (r *resourceCatalog) ListDeployments(servicePlanID string) ([]models.ServiceDeployment, error) { + deployments := []models.ServiceDeployment{} + listRequest := rest.GetRequest("/api/v1/" + servicePlanID + "/deployment?include=*") + req, err := listRequest.Build() + if err != nil { + return deployments, err + } + _, err = r.client.GetPaginated( + req.URL.String(), + NewResourceCatalogPaginatedResources(models.ServiceDeployment{}, *r.client.Config.Endpoint), + func(resource interface{}) bool { + if catalogDeployment, ok := resource.(models.ServiceDeployment); ok { + deployments = append(deployments, catalogDeployment) + } + return true + }) + return deployments, err +} + +func (r *resourceCatalog) Get(serviceID string, indepth bool) (models.Service, error) { + request := rest.GetRequest(helpers.GetFullURL(*r.client.Config.Endpoint, fmt.Sprintf("/api/v1/%s", serviceID))) + if indepth { + request = request.Query("include", "*") + } + service := models.Service{} + resp, err := r.client.SendRequest(request, &service) + if err != nil { + if resp.StatusCode == http.StatusNotFound { + return models.Service{}, bmxerror.New(ErrCodeServicePlanDoesnotExist, + fmt.Sprintf("Given service : %q doesn't exist", serviceID)) + } + return models.Service{}, err + } + return service, nil +} + +func (r *resourceCatalog) FindByName(name string, indepth bool) ([]models.Service, error) { + services := []models.Service{} + request := rest.GetRequest("/api/v1/").Query("q", name) + if indepth { + request = request.Query("include", "*") + } + req, err := request.Build() + if err != nil { + return services, err + } + _, err = r.client.GetPaginated( + req.URL.String(), + NewResourceCatalogPaginatedResources(models.Service{}, *r.client.Config.Endpoint), + func(rb interface{}) bool { + if r, ok := rb.(models.Service); ok { + services = append(services, visitServiceTree(r, name)...) + } + return true + }) + if err != nil { + return []models.Service{}, err + } + if len(services) == 0 { + return services, bmxerror.New(ErrCodeServiceDoesnotExist, + fmt.Sprintf("Given service : %q doesn't exist", name)) + } + return services, err +} + +func (r *resourceCatalog) GetServicePlan(servicePlanID string) (models.ServicePlan, error) { + request := rest.GetRequest(helpers.GetFullURL(*r.client.Config.Endpoint, fmt.Sprintf("/api/v1/%s", servicePlanID))) + servicePlan := models.ServicePlan{} + resp, err := r.client.SendRequest(request, &servicePlan) + if err != nil { + if resp.StatusCode == http.StatusNotFound { + return models.ServicePlan{}, bmxerror.New(ErrCodeServicePlanDoesnotExist, + fmt.Sprintf("Given service plan : %q doesn't exist", servicePlanID)) + } + return models.ServicePlan{}, err + } + return servicePlan, nil +} + +func (r *resourceCatalog) ListDeploymentAliases(serviceDeploymentID string) ([]models.ServiceDeploymentAlias, error) { + aliases := []models.ServiceDeploymentAlias{} + listRequest := rest.GetRequest("/api/v1/" + serviceDeploymentID + "/alias?include=*") + req, err := listRequest.Build() + if err != nil { + return aliases, err + } + _, err = r.client.GetPaginated( + req.URL.String(), + NewResourceCatalogPaginatedResources(models.ServiceDeploymentAlias{}, *r.client.Config.Endpoint), + func(resource interface{}) bool { + if deploymentAlias, ok := resource.(models.ServiceDeploymentAlias); ok { + aliases = append(aliases, deploymentAlias) + } + return true + }) + return aliases, err +} + +func (r *resourceCatalog) GetDeploymentAlias(servicePlanID string, instanceTarget string, currentRegion string) (*models.ServiceDeploymentAlias, error) { + deployments, err := r.ListDeployments(servicePlanID) + if err != nil { + return nil, err + } + found := false + var deploymentID string + for _, deployment := range deployments { + deploymentLocation := utils.GetLocationFromTargetCRN(deployment.Metadata.Deployment.TargetCrn.Resource) + if deploymentLocation == instanceTarget { + deploymentID = deployment.ID + found = true + break + } + } + if !found { + //Should not go here since instanceTarget is get from deployments when create service instance + return nil, bmxerror.New(ErrCodeServiceDeploymentNotFound, + fmt.Sprintf("Service alias Deployment doesn't exist for %q", instanceTarget)) + } + aliases, err := r.ListDeploymentAliases(deploymentID) + if err != nil { + return nil, err + } + for _, alias := range aliases { + if alias.Metadata.Deployment.Location == currentRegion { + return &alias, nil + } + } + return nil, nil +} + +func visitServiceTree(rootService models.Service, name string) []models.Service { + services := []models.Service{} + if rootService.Name == name && isService(rootService) { + services = append(services, rootService) + } + for _, child := range rootService.Children { + services = append(services, visitServiceTree(child, name)...) + } + return services +} + +func (r *resourceCatalog) GetServices() ([]models.Service, error) { + listRequest := rest.GetRequest("/api/v1/") + var services []models.Service + req, err := listRequest.Build() + if err != nil { + return nil, err + } + + _, err = r.client.GetPaginated( + req.URL.String(), + NewResourceCatalogPaginatedResources(models.Service{}, *r.client.Config.Endpoint), + func(resource interface{}) bool { + if catalogResource, ok := resource.(models.Service); ok { + services = append(services, catalogResource) + return true + } + return false + }) + + if err != nil { + return []models.Service{}, err + } + + return services, nil +} + +func (r *resourceCatalog) GetServicePlans(service models.Service) ([]models.ServicePlan, error) { + var urlSuffix string + if service.Kind == "iaas" { + urlSuffix = "/flavor" + } else { + urlSuffix = "/plan" + } + listRequest := rest.GetRequest("/api/v1/" + service.ID + urlSuffix) + req, err := listRequest.Build() + if err != nil { + return nil, err + } + var servicePlans []models.ServicePlan + _, err = r.client.GetPaginated( + req.URL.String(), + NewResourceCatalogPaginatedResources(models.ServicePlan{}, *r.client.Config.Endpoint), + func(resource interface{}) bool { + if resourcePlan, ok := resource.(models.ServicePlan); ok { + servicePlans = append(servicePlans, resourcePlan) + } + return false + }) + + if err != nil { + return []models.ServicePlan{}, err + } + + return servicePlans, nil +} + +func isService(e models.Service) bool { + // TODO: COS is 'iaas' kind, but considered to be a service + if e.Kind == "service" || e.Kind == "iaas" { + return true + } + return false +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/resource/resourcev1/controller/api_service.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/resource/resourcev1/controller/api_service.go new file mode 100644 index 00000000000..fd66d17521c --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/resource/resourcev1/controller/api_service.go @@ -0,0 +1,79 @@ +package controller + +import ( + gohttp "net/http" + + bluemix "github.com/IBM-Cloud/bluemix-go" + "github.com/IBM-Cloud/bluemix-go/authentication" + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/http" + "github.com/IBM-Cloud/bluemix-go/rest" + "github.com/IBM-Cloud/bluemix-go/session" +) + +//ResourceControllerAPI is the resource client ... +type ResourceControllerAPI interface { + ResourceServiceInstance() ResourceServiceInstanceRepository + ResourceServiceAlias() ResourceServiceAliasRepository + ResourceServiceKey() ResourceServiceKeyRepository +} + +//ErrCodeAPICreation ... +const ErrCodeAPICreation = "APICreationError" + +//resourceControllerService holds the client +type resourceControllerService struct { + *client.Client +} + +//New ... +func New(sess *session.Session) (ResourceControllerAPI, error) { + config := sess.Config.Copy() + err := config.ValidateConfigForService(bluemix.ResourceControllerService) + if err != nil { + return nil, err + } + if config.HTTPClient == nil { + config.HTTPClient = http.NewHTTPClient(config) + } + tokenRefreher, err := authentication.NewIAMAuthRepository(config, &rest.Client{ + DefaultHeader: gohttp.Header{ + "User-Agent": []string{http.UserAgent()}, + }, + HTTPClient: config.HTTPClient, + }) + if err != nil { + return nil, err + } + if config.IAMAccessToken == "" { + err := authentication.PopulateTokens(tokenRefreher, config) + if err != nil { + return nil, err + } + } + if config.Endpoint == nil { + ep, err := config.EndpointLocator.ResourceControllerEndpoint() + if err != nil { + return nil, err + } + config.Endpoint = &ep + } + return &resourceControllerService{ + Client: client.New(config, bluemix.ResourceManagementService, tokenRefreher), + }, nil +} + +//ResourceController API +func (a *resourceControllerService) ResourceServiceInstance() ResourceServiceInstanceRepository { + return newResourceServiceInstanceAPI(a.Client) +} + +//ResourceController API +func (a *resourceControllerService) ResourceServiceKey() ResourceServiceKeyRepository { + return newResourceServiceKeyAPI(a.Client) +} + +//ResourceController API +func (a *resourceControllerService) ResourceServiceAlias() ResourceServiceAliasRepository { + return newResourceServiceAliasRepository(a.Client) +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/resource/resourcev1/controller/pagination.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/resource/resourcev1/controller/pagination.go new file mode 100644 index 00000000000..287fbcbdded --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/resource/resourcev1/controller/pagination.go @@ -0,0 +1,39 @@ +package controller + +import ( + "encoding/json" + "reflect" + "strings" +) + +type GenericPaginatedResourcesHandler struct { + resourceType reflect.Type +} + +func NewRCPaginatedResources(resource interface{}) GenericPaginatedResourcesHandler { + return GenericPaginatedResourcesHandler{ + resourceType: reflect.TypeOf(resource), + } +} + +func (pr GenericPaginatedResourcesHandler) Resources(bytes []byte, curURL string) ([]interface{}, string, error) { + var paginatedResources = struct { + NextUrl string `json:"next_url"` + ResourcesBytes json.RawMessage `json:"resources"` + }{} + + err := json.Unmarshal(bytes, &paginatedResources) + + slicePtr := reflect.New(reflect.SliceOf(pr.resourceType)) + dc := json.NewDecoder(strings.NewReader(string(paginatedResources.ResourcesBytes))) + dc.UseNumber() + err = dc.Decode(slicePtr.Interface()) + slice := reflect.Indirect(slicePtr) + + contents := make([]interface{}, 0, slice.Len()) + for i := 0; i < slice.Len(); i++ { + contents = append(contents, slice.Index(i).Interface()) + } + + return contents, paginatedResources.NextUrl, err +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/resource/resourcev1/controller/resource_service_alias.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/resource/resourcev1/controller/resource_service_alias.go new file mode 100644 index 00000000000..ccc822d2785 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/resource/resourcev1/controller/resource_service_alias.go @@ -0,0 +1,148 @@ +package controller + +import ( + "fmt" + "net/http" + "net/url" + "strings" + + "github.com/IBM-Cloud/bluemix-go/bmxerror" + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/crn" + "github.com/IBM-Cloud/bluemix-go/models" + "github.com/IBM-Cloud/bluemix-go/rest" +) + +type ServiceAliasQueryFilter struct { + AccountID string + ServiceInstanceID string + Name string // TODO: RC API currently not support name filtering +} + +type CreateServiceAliasParams struct { + Name string `json:"name"` + ServiceInstanceID string `json:"resource_instance_id"` + ScopeCRN crn.CRN `json:"scope_crn"` + Tags []string `json:"tags,omitempty"` + Parameters map[string]interface{} `json:"parameters,omitempty"` +} + +type UpdateServiceAliasParams struct { + Name string `json:"name,omitempty"` + Tags []string `json:"tags,omitempty"` + Parameters map[string]interface{} `json:"parameters,omitempty"` +} + +//ErrCodeResourceServiceAliasDoesnotExist ... +const ErrCodeResourceServiceAliasDoesnotExist = "ResourceServiceAliasDoesnotExist" + +type ResourceServiceAliasRepository interface { + Alias(aliasID string) (models.ServiceAlias, error) + Aliases(*ServiceAliasQueryFilter) ([]models.ServiceAlias, error) + AliasesWithCallback(*ServiceAliasQueryFilter, func(models.ServiceAlias) bool) error + + InstanceAliases(serviceInstanceID string) ([]models.ServiceAlias, error) + InstanceAliasByName(serviceInstanceID string, name string) ([]models.ServiceAlias, error) + + CreateAlias(params CreateServiceAliasParams) (models.ServiceAlias, error) + UpdateAlias(aliasID string, params UpdateServiceAliasParams) (models.ServiceAlias, error) + DeleteAlias(aliasID string) error +} + +type serviceAliasRepository struct { + client *client.Client +} + +func newResourceServiceAliasRepository(c *client.Client) ResourceServiceAliasRepository { + return &serviceAliasRepository{ + client: c, + } +} + +func (r *serviceAliasRepository) InstanceAliases(serviceInstanceID string) ([]models.ServiceAlias, error) { + return r.Aliases(&ServiceAliasQueryFilter{ServiceInstanceID: serviceInstanceID}) +} + +func (r *serviceAliasRepository) InstanceAliasByName(serviceInstanceID string, name string) ([]models.ServiceAlias, error) { + return r.Aliases(&ServiceAliasQueryFilter{ServiceInstanceID: serviceInstanceID, Name: name}) +} + +func (r *serviceAliasRepository) Alias(aliasID string) (models.ServiceAlias, error) { + var alias models.ServiceAlias + resp, err := r.client.Get("/v1/resource_aliases/"+url.PathEscape(aliasID), &alias) + if resp.StatusCode == http.StatusNotFound { + return alias, bmxerror.New(ErrCodeResourceServiceAliasDoesnotExist, + fmt.Sprintf("Given service alias : %q doesn't exist", aliasID)) + } + return alias, err +} +func (r *serviceAliasRepository) Aliases(filter *ServiceAliasQueryFilter) ([]models.ServiceAlias, error) { + var aliases []models.ServiceAlias + err := r.AliasesWithCallback(filter, func(a models.ServiceAlias) bool { + aliases = append(aliases, a) + return true + }) + return aliases, err +} + +func (r *serviceAliasRepository) AliasesWithCallback(filter *ServiceAliasQueryFilter, cb func(models.ServiceAlias) bool) error { + listRequest := rest.GetRequest("/v1/resource_aliases") + if filter != nil { + if filter.AccountID != "" { + listRequest.Query("account_id", filter.AccountID) + } + if filter.ServiceInstanceID != "" { + listRequest.Query("resource_instance_id", url.PathEscape(filter.ServiceInstanceID)) + } + } + + req, err := listRequest.Build() + if err != nil { + return err + } + + // TODO: GetPaginated's first argument should be a request instead if url path + _, err = r.client.GetPaginated( + req.URL.String(), + NewRCPaginatedResources(models.ServiceAlias{}), + func(resource interface{}) bool { + // if alias, ok := resource.(models.ServiceAlias); ok { + // return cb(alias) + // } + // TODO: once RC API support name filtering, remove name check in cb + if alias, ok := resource.(models.ServiceAlias); ok { + if filter.Name == "" || strings.EqualFold(filter.Name, alias.Name) { + return cb(alias) + } + return true + } + return false + }) + + return err +} + +func (r *serviceAliasRepository) CreateAlias(params CreateServiceAliasParams) (models.ServiceAlias, error) { + alias := models.ServiceAlias{} + _, err := r.client.Post("/v1/resource_aliases", params, &alias) + return alias, err +} + +func (r *serviceAliasRepository) UpdateAlias(aliasID string, params UpdateServiceAliasParams) (models.ServiceAlias, error) { + alias := models.ServiceAlias{} + resp, err := r.client.Patch("/v1/resource_aliases/"+url.PathEscape(aliasID), params, &alias) + if resp.StatusCode == http.StatusNotFound { + return alias, bmxerror.New(ErrCodeResourceServiceAliasDoesnotExist, + fmt.Sprintf("Given service alias : %q doesn't exist", aliasID)) + } + return alias, err +} + +func (r *serviceAliasRepository) DeleteAlias(aliasID string) error { + resp, err := r.client.Delete("/v1/resource_aliases/" + url.PathEscape(aliasID)) + if resp.StatusCode == http.StatusNotFound { + return bmxerror.New(ErrCodeResourceServiceAliasDoesnotExist, + fmt.Sprintf("Given service alias : %q doesn't exist", aliasID)) + } + return err +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/resource/resourcev1/controller/resource_service_binding.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/resource/resourcev1/controller/resource_service_binding.go new file mode 100644 index 00000000000..a9226dd71d3 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/resource/resourcev1/controller/resource_service_binding.go @@ -0,0 +1,80 @@ +package controller + +import ( + "net/url" + + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/crn" + "github.com/IBM-Cloud/bluemix-go/helpers" + "github.com/IBM-Cloud/bluemix-go/models" + "github.com/IBM-Cloud/bluemix-go/rest" +) + +type CreateServiceBindingRequest struct { + SourceCRN crn.CRN `json:"source_crn"` + TargetCRN crn.CRN `json:"target_crn"` + Parameters map[string]interface{} `json:"parameters,omitempty"` +} + +type ResourceServiceBindingRepository interface { + ListBindings(cb func(models.ServiceBinding) bool) error + GetBinding(bindingID string) (models.ServiceBinding, error) + CreateBinding(CreateServiceBindingRequest) (models.ServiceBinding, error) + DeleteBinding(bindingID string) error +} + +type serviceBindingRepository struct { + client *client.Client +} + +func newServiceBindingRepository(c *client.Client) ResourceServiceBindingRepository { + return &serviceBindingRepository{ + client: c, + } +} + +func (r *serviceBindingRepository) ListBindings(cb func(models.ServiceBinding) bool) error { + listRequest := rest.GetRequest("/v1/resource_bindings") + req, err := listRequest.Build() + if err != nil { + return err + } + + _, err = r.client.GetPaginated( + req.URL.String(), + NewRCPaginatedResources(models.ServiceBinding{}), + func(resource interface{}) bool { + if binding, ok := resource.(models.ServiceBinding); ok { + return cb(binding) + } + return false + }) + + return err +} + +func (r *serviceBindingRepository) CreateBinding(createBindingRequest CreateServiceBindingRequest) (models.ServiceBinding, error) { + resp := models.ServiceBinding{} + _, err := r.client.Post("/v1/resource_bindings", createBindingRequest, &resp) + if err != nil { + return resp, err + } + return resp, nil +} + +func (r *serviceBindingRepository) GetBinding(bindingID string) (models.ServiceBinding, error) { + resp := models.ServiceBinding{} + request := rest.GetRequest(helpers.GetFullURL(*r.client.Config.Endpoint, "/v1/resource_bindings/"+url.PathEscape(bindingID))) + _, err := r.client.SendRequest(request, &resp) + if err != nil { + return resp, err + } + return resp, nil + +} + +func (r *serviceBindingRepository) DeleteBinding(bindingID string) error { + request := rest.DeleteRequest(helpers.GetFullURL(*r.client.Config.Endpoint, "/v1/resource_bindings/"+url.PathEscape(bindingID))).Query("id", url.PathEscape(bindingID)) + _, err := r.client.SendRequest(request, nil) + return err +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/resource/resourcev1/controller/resource_service_instance.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/resource/resourcev1/controller/resource_service_instance.go new file mode 100644 index 00000000000..05c0e7c37e5 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/resource/resourcev1/controller/resource_service_instance.go @@ -0,0 +1,166 @@ +package controller + +import ( + "fmt" + "net/http" + "net/url" + + "github.com/IBM-Cloud/bluemix-go/bmxerror" + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/helpers" + "github.com/IBM-Cloud/bluemix-go/models" + "github.com/IBM-Cloud/bluemix-go/rest" +) + +type CreateServiceInstanceRequest struct { + Name string `json:"name"` + ServicePlanID string `json:"resource_plan_id"` + ResourceGroupID string `json:"resource_group_id"` + Crn string `json:"crn,omitempty"` + Tags []string `json:"tags,omitempty"` + Parameters map[string]interface{} `json:"parameters,omitempty"` + TargetCrn string `json:"target_crn"` +} + +type UpdateServiceInstanceRequest struct { + Name string `json:"name,omitempty"` + ServicePlanID string `json:"resource_plan_id,omitempty"` + Tags []string `json:"tags,omitempty"` + Parameters map[string]interface{} `json:"parameters,omitempty"` + UpdateTime int64 `json:"update_time,omitempty"` +} + +type ServiceInstanceQuery struct { + ResourceGroupID string + ServiceID string + ServicePlanID string + Name string +} + +//ErrCodeResourceServiceInstanceDoesnotExist ... +const ErrCodeResourceServiceInstanceDoesnotExist = "ResourceServiceInstanceDoesnotExist" + +//ResourceServiceInstanceQuery ... +type ResourceServiceInstanceRepository interface { + ListInstances(query ServiceInstanceQuery) ([]models.ServiceInstance, error) + GetInstance(serviceInstanceID string) (models.ServiceInstance, error) + CreateInstance(serviceInstanceRequest CreateServiceInstanceRequest) (models.ServiceInstance, error) + UpdateInstance(serviceInstanceID string, updateInstanceRequest UpdateServiceInstanceRequest) (models.ServiceInstance, error) + DeleteInstance(serviceInstanceID string, recursive bool) error + //GetBindings(serviceInstanceID string) ([]ServiceBinding, error) +} + +type resourceServiceInstance struct { + client *client.Client +} + +func newResourceServiceInstanceAPI(c *client.Client) ResourceServiceInstanceRepository { + return &resourceServiceInstance{ + client: c, + } +} + +func (r *resourceServiceInstance) ListInstances(query ServiceInstanceQuery) ([]models.ServiceInstance, error) { + listRequest := rest.GetRequest("/v1/resource_instances"). + Query("resource_group_id", query.ResourceGroupID). + Query("resource_id", query.ServiceID). + Query("resource_plan_id", query.ServicePlanID) + + req, err := listRequest.Build() + if err != nil { + return nil, err + } + + var instances []models.ServiceInstance + _, err = r.client.GetPaginated( + req.URL.String(), + NewRCPaginatedResources(models.ServiceInstance{}), + func(resource interface{}) bool { + if instance, ok := resource.(models.ServiceInstance); ok { + instances = append(instances, instance) + return true + } + return false + }, + ) + if err != nil { + return []models.ServiceInstance{}, err + } + + if query.Name != "" { + instances = filterInstancesByName(instances, query.Name) + } + return instances, nil +} + +func (r *resourceServiceInstance) CreateInstance(serviceInstanceRequest CreateServiceInstanceRequest) (models.ServiceInstance, error) { + resp := models.ServiceInstance{} + request := rest.PostRequest(helpers.GetFullURL(*r.client.Config.Endpoint, "/v1/resource_instances")) + _, err := r.client.SendRequest(request.Body(serviceInstanceRequest), &resp) + if err != nil { + return resp, err + } + return resp, nil +} +func (r *resourceServiceInstance) UpdateInstance(serviceInstanceID string, updateInstanceRequest UpdateServiceInstanceRequest) (models.ServiceInstance, error) { + resp := models.ServiceInstance{} + request := rest.PatchRequest(helpers.GetFullURL(*r.client.Config.Endpoint, "/v1/resource_instances/"+url.PathEscape(serviceInstanceID))) + _, err := r.client.SendRequest(request.Body(updateInstanceRequest), &resp) + if err != nil { + return resp, err + } + return resp, nil +} + +func (r *resourceServiceInstance) GetInstance(serviceInstanceID string) (models.ServiceInstance, error) { + var instance models.ServiceInstance + resp, err := r.client.Get("/v1/resource_instances/"+url.PathEscape(serviceInstanceID), &instance) + if resp.StatusCode == http.StatusNotFound { + return models.ServiceInstance{}, bmxerror.New(ErrCodeResourceServiceInstanceDoesnotExist, + fmt.Sprintf("Given service instance : %q doesn't exist", serviceInstanceID)) + } + return instance, err +} + +func (r *resourceServiceInstance) DeleteInstance(resourceInstanceID string, recursive bool) error { + request := rest.DeleteRequest(helpers.GetFullURL(*r.client.Config.Endpoint, "/v1/resource_instances/"+url.PathEscape(resourceInstanceID))) + if recursive { + request = request.Query("recursive", "true") + } + _, err := r.client.SendRequest(request, nil) + return err +} + +/*func (r *resourceServiceInstance) GetBindings(serviceInstanceID string) ([]ServiceBinding, error) { + listRequest := rest.GetRequest("/v1/resource_instances/" + url.PathEscape(serviceInstanceID) + "/resource_bindings") + req, err := listRequest.Build() + if err != nil { + return nil, err + } + + var bindings []ServiceBinding + _, err = r.client.GetPaginated( + req.URL.String(), + ServiceBinding{}, + func(resource interface{}) bool { + if binding, ok := resource.(ServiceBinding); ok { + bindings = append(bindings, binding) + return true + } + return false + }) + if err != nil { + return []ServiceBinding{}, err + } + return bindings, nil +}*/ + +func filterInstancesByName(instances []models.ServiceInstance, name string) []models.ServiceInstance { + ret := []models.ServiceInstance{} + for _, instance := range instances { + if instance.Name == name { + ret = append(ret, instance) + } + } + return ret +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/resource/resourcev1/controller/resource_service_key.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/resource/resourcev1/controller/resource_service_key.go new file mode 100644 index 00000000000..7332590935b --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/resource/resourcev1/controller/resource_service_key.go @@ -0,0 +1,103 @@ +package controller + +import ( + "net/url" + "strings" + + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/crn" + "github.com/IBM-Cloud/bluemix-go/helpers" + "github.com/IBM-Cloud/bluemix-go/models" + "github.com/IBM-Cloud/bluemix-go/rest" +) + +const ( + _Role_Crn = "role_crn" + _Service_ID_Crn = "serviceid_crn" +) + +type CreateServiceKeyRequest struct { + Name string `json:"name"` + SourceCRN crn.CRN `json:"source_crn"` + Parameters map[string]interface{} `json:"parameters,omitempty"` +} + +//ErrCodeResourceServiceInstanceDoesnotExist ... +const ErrCodeResourceServiceKeyDoesnotExist = "ResourceServiceInstanceDoesnotExist" + +//ResourceServiceInstanceQuery ... +type ResourceServiceKeyRepository interface { + GetKey(keyID string) (models.ServiceKey, error) + GetKeys(keyName string) ([]models.ServiceKey, error) + CreateKey(CreateServiceKeyRequest) (models.ServiceKey, error) + DeleteKey(keyID string) error +} + +type resourceServiceKey struct { + client *client.Client +} + +func newResourceServiceKeyAPI(c *client.Client) ResourceServiceKeyRepository { + return &resourceServiceKey{ + client: c, + } +} + +func (r *resourceServiceKey) GetKeys(keyName string) ([]models.ServiceKey, error) { + var keys []models.ServiceKey + _, err := r.client.GetPaginated( + "/v1/resource_keys", + NewRCPaginatedResources(models.ServiceKey{}), + func(resource interface{}) bool { + if key, ok := resource.(models.ServiceKey); ok { + keys = append(keys, key) + return true + } + return false + }, + ) + if err != nil { + return []models.ServiceKey{}, err + } + + if keyName != "" { + keys = filterKeysByName(keys, keyName) + } + + return keys, nil +} + +func filterKeysByName(keys []models.ServiceKey, name string) []models.ServiceKey { + ret := []models.ServiceKey{} + for _, k := range keys { + if strings.EqualFold(k.Name, name) { + ret = append(ret, k) + } + } + return ret +} + +func (r *resourceServiceKey) GetKey(keyID string) (models.ServiceKey, error) { + resp := models.ServiceKey{} + request := rest.GetRequest(helpers.GetFullURL(*r.client.Config.Endpoint, "/v1/resource_keys/"+url.PathEscape(keyID))) + _, err := r.client.SendRequest(request, &resp) + if err != nil { + return resp, err + } + return resp, nil +} + +func (r *resourceServiceKey) CreateKey(request CreateServiceKeyRequest) (models.ServiceKey, error) { + resp := models.ServiceKey{} + _, err := r.client.Post("/v1/resource_keys", request, &resp) + if err != nil { + return resp, err + } + return resp, nil +} + +func (r *resourceServiceKey) DeleteKey(keyID string) error { + request := rest.DeleteRequest(helpers.GetFullURL(*r.client.Config.Endpoint, "/v1/resource_keys/"+url.PathEscape(keyID))).Query("id", url.PathEscape(keyID)) + _, err := r.client.SendRequest(request, nil) + return err +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/resource/resourcev2/controllerv2/api_service.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/resource/resourcev2/controllerv2/api_service.go new file mode 100644 index 00000000000..6f7248a1f4a --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/resource/resourcev2/controllerv2/api_service.go @@ -0,0 +1,67 @@ +package controllerv2 + +import ( + gohttp "net/http" + + bluemix "github.com/IBM-Cloud/bluemix-go" + "github.com/IBM-Cloud/bluemix-go/authentication" + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/http" + "github.com/IBM-Cloud/bluemix-go/rest" + "github.com/IBM-Cloud/bluemix-go/session" +) + +//ResourceControllerAPIV2 is the resource client ... +type ResourceControllerAPIV2 interface { + ResourceServiceInstanceV2() ResourceServiceInstanceRepository +} + +//ErrCodeAPICreation ... +const ErrCodeAPICreation = "APICreationError" + +//resourceControllerService holds the client +type resourceControllerService struct { + *client.Client +} + +//New ... +func New(sess *session.Session) (ResourceControllerAPIV2, error) { + config := sess.Config.Copy() + err := config.ValidateConfigForService(bluemix.ResourceControllerServicev2) + if err != nil { + return nil, err + } + if config.HTTPClient == nil { + config.HTTPClient = http.NewHTTPClient(config) + } + tokenRefreher, err := authentication.NewIAMAuthRepository(config, &rest.Client{ + DefaultHeader: gohttp.Header{ + "User-Agent": []string{http.UserAgent()}, + }, + HTTPClient: config.HTTPClient, + }) + if err != nil { + return nil, err + } + if config.IAMAccessToken == "" { + err := authentication.PopulateTokens(tokenRefreher, config) + if err != nil { + return nil, err + } + } + if config.Endpoint == nil { + ep, err := config.EndpointLocator.ResourceControllerEndpoint() + if err != nil { + return nil, err + } + config.Endpoint = &ep + } + return &resourceControllerService{ + Client: client.New(config, bluemix.ResourceControllerServicev2, tokenRefreher), + }, nil +} + +//ResourceController API +func (a *resourceControllerService) ResourceServiceInstanceV2() ResourceServiceInstanceRepository { + return newResourceServiceInstanceAPI(a.Client) +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/resource/resourcev2/controllerv2/pagination.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/resource/resourcev2/controllerv2/pagination.go new file mode 100644 index 00000000000..8e7abc51748 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/resource/resourcev2/controllerv2/pagination.go @@ -0,0 +1,39 @@ +package controllerv2 + +import ( + "encoding/json" + "reflect" + "strings" +) + +type GenericPaginatedResourcesHandler struct { + resourceType reflect.Type +} + +func NewRCPaginatedResources(resource interface{}) GenericPaginatedResourcesHandler { + return GenericPaginatedResourcesHandler{ + resourceType: reflect.TypeOf(resource), + } +} + +func (pr GenericPaginatedResourcesHandler) Resources(bytes []byte, curURL string) ([]interface{}, string, error) { + var paginatedResources = struct { + NextUrl string `json:"next_url"` + ResourcesBytes json.RawMessage `json:"resources"` + }{} + + err := json.Unmarshal(bytes, &paginatedResources) + + slicePtr := reflect.New(reflect.SliceOf(pr.resourceType)) + dc := json.NewDecoder(strings.NewReader(string(paginatedResources.ResourcesBytes))) + dc.UseNumber() + err = dc.Decode(slicePtr.Interface()) + slice := reflect.Indirect(slicePtr) + + contents := make([]interface{}, 0, slice.Len()) + for i := 0; i < slice.Len(); i++ { + contents = append(contents, slice.Index(i).Interface()) + } + + return contents, paginatedResources.NextUrl, err +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/resource/resourcev2/controllerv2/resource_service_instance.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/resource/resourcev2/controllerv2/resource_service_instance.go new file mode 100644 index 00000000000..505dd38916f --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/resource/resourcev2/controllerv2/resource_service_instance.go @@ -0,0 +1,121 @@ +package controllerv2 + +import ( + "fmt" + "net/http" + "net/url" + + "github.com/IBM-Cloud/bluemix-go/bmxerror" + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/models" + "github.com/IBM-Cloud/bluemix-go/rest" +) + +type CreateServiceInstanceRequest struct { + Name string `json:"name"` + ServicePlanID string `json:"resource_plan_id"` + ResourceGroupID string `json:"resource_group_id"` + Crn string `json:"crn,omitempty"` + Tags []string `json:"tags,omitempty"` + Parameters map[string]interface{} `json:"parameters,omitempty"` + TargetCrn string `json:"target_crn"` +} + +type UpdateServiceInstanceRequest struct { + Name string `json:"name,omitempty"` + ServicePlanID string `json:"resource_plan_id,omitempty"` + Tags []string `json:"tags,omitempty"` + Parameters map[string]interface{} `json:"parameters,omitempty"` + UpdateTime int64 `json:"update_time,omitempty"` +} + +type ServiceInstanceQuery struct { + ResourceGroupID string + ServiceID string + ServicePlanID string + Name string + Type string + SubType string + Limit string + UpdatedFrom string + UpdatedTo string + Guid string +} + +//ErrCodeResourceServiceInstanceDoesnotExist ... +const ErrCodeResourceServiceInstanceDoesnotExist = "ResourceServiceInstanceDoesnotExist" + +//ResourceServiceInstanceQuery ... +type ResourceServiceInstanceRepository interface { + ListInstances(query ServiceInstanceQuery) ([]models.ServiceInstanceV2, error) + GetInstance(serviceInstanceID string) (models.ServiceInstanceV2, error) +} + +type resourceServiceInstance struct { + client *client.Client +} + +func newResourceServiceInstanceAPI(c *client.Client) ResourceServiceInstanceRepository { + return &resourceServiceInstance{ + client: c, + } +} + +func (r *resourceServiceInstance) ListInstances(query ServiceInstanceQuery) ([]models.ServiceInstanceV2, error) { + listRequest := rest.GetRequest("/v2/resource_instances"). + Query("resource_group_id", query.ResourceGroupID). + Query("resource_id", query.ServiceID). + Query("resource_plan_id", query.ServicePlanID). + Query("type", query.Type). + Query("sub_type", query.SubType). + Query("limit", query.Limit). + Query("updated_from", query.UpdatedFrom). + Query("updated_to", query.UpdatedTo). + Query("guid", query.Guid) + + req, err := listRequest.Build() + if err != nil { + return nil, err + } + + var instances []models.ServiceInstanceV2 + _, err = r.client.GetPaginated( + req.URL.String(), + NewRCPaginatedResources(models.ServiceInstanceV2{}), + func(resource interface{}) bool { + if instance, ok := resource.(models.ServiceInstanceV2); ok { + instances = append(instances, instance) + return true + } + return false + }, + ) + if err != nil { + return []models.ServiceInstanceV2{}, err + } + + if query.Name != "" { + instances = filterInstancesByName(instances, query.Name) + } + return instances, nil +} + +func (r *resourceServiceInstance) GetInstance(serviceInstanceID string) (models.ServiceInstanceV2, error) { + var instance models.ServiceInstanceV2 + resp, err := r.client.Get("/v2/resource_instances/"+url.PathEscape(serviceInstanceID), &instance) + if resp.StatusCode == http.StatusNotFound { + return models.ServiceInstanceV2{}, bmxerror.New(ErrCodeResourceServiceInstanceDoesnotExist, + fmt.Sprintf("Given service instance : %q doesn't exist", serviceInstanceID)) + } + return instance, err +} + +func filterInstancesByName(instances []models.ServiceInstanceV2, name string) []models.ServiceInstanceV2 { + ret := []models.ServiceInstanceV2{} + for _, instance := range instances { + if instance.Name == name { + ret = append(ret, instance) + } + } + return ret +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/resource/resourcev2/managementv2/api_service.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/resource/resourcev2/managementv2/api_service.go new file mode 100644 index 00000000000..a9abcb55d25 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/resource/resourcev2/managementv2/api_service.go @@ -0,0 +1,73 @@ +package managementv2 + +import ( + gohttp "net/http" + + bluemix "github.com/IBM-Cloud/bluemix-go" + "github.com/IBM-Cloud/bluemix-go/authentication" + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/http" + "github.com/IBM-Cloud/bluemix-go/rest" + "github.com/IBM-Cloud/bluemix-go/session" +) + +//ResourceManagementAPI is the resource client ... +type ResourceManagementAPIv2 interface { + ResourceQuota() ResourceQuotaRepository + ResourceGroup() ResourceGroupRepository +} + +//ErrCodeAPICreation ... +const ErrCodeAPICreation = "APICreationError" + +//resourceManagementService holds the client +type resourceManagementService struct { + *client.Client +} + +//New ... +func New(sess *session.Session) (ResourceManagementAPIv2, error) { + config := sess.Config.Copy() + err := config.ValidateConfigForService(bluemix.ResourceManagementService) + if err != nil { + return nil, err + } + if config.HTTPClient == nil { + config.HTTPClient = http.NewHTTPClient(config) + } + tokenRefreher, err := authentication.NewIAMAuthRepository(config, &rest.Client{ + DefaultHeader: gohttp.Header{ + "User-Agent": []string{http.UserAgent()}, + }, + HTTPClient: config.HTTPClient, + }) + if err != nil { + return nil, err + } + if config.IAMAccessToken == "" { + err := authentication.PopulateTokens(tokenRefreher, config) + if err != nil { + return nil, err + } + } + if config.Endpoint == nil { + ep, err := config.EndpointLocator.ResourceManagementEndpoint() + if err != nil { + return nil, err + } + config.Endpoint = &ep + } + return &resourceManagementService{ + Client: client.New(config, bluemix.ResourceManagementService, tokenRefreher), + }, nil +} + +//ResourceQuota API +func (a *resourceManagementService) ResourceQuota() ResourceQuotaRepository { + return newResourceQuotaAPI(a.Client) +} + +//ResourceGroup API +func (a *resourceManagementService) ResourceGroup() ResourceGroupRepository { + return newResourceGroupAPI(a.Client) +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/resource/resourcev2/managementv2/pagination.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/resource/resourcev2/managementv2/pagination.go new file mode 100644 index 00000000000..87f5a7a8f6e --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/resource/resourcev2/managementv2/pagination.go @@ -0,0 +1,39 @@ +package managementv2 + +import ( + "encoding/json" + "reflect" + "strings" +) + +type GenericPaginatedResourcesHandler struct { + resourceType reflect.Type +} + +func NewRCPaginatedResources(resource interface{}) GenericPaginatedResourcesHandler { + return GenericPaginatedResourcesHandler{ + resourceType: reflect.TypeOf(resource), + } +} + +func (pr GenericPaginatedResourcesHandler) Resources(bytes []byte, curURL string) ([]interface{}, string, error) { + var paginatedResources = struct { + NextUrl string `json:"next_url"` + ResourcesBytes json.RawMessage `json:"resources"` + }{} + + err := json.Unmarshal(bytes, &paginatedResources) + + slicePtr := reflect.New(reflect.SliceOf(pr.resourceType)) + dc := json.NewDecoder(strings.NewReader(string(paginatedResources.ResourcesBytes))) + dc.UseNumber() + err = dc.Decode(slicePtr.Interface()) + slice := reflect.Indirect(slicePtr) + + contents := make([]interface{}, 0, slice.Len()) + for i := 0; i < slice.Len(); i++ { + contents = append(contents, slice.Index(i).Interface()) + } + + return contents, paginatedResources.NextUrl, err +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/resource/resourcev2/managementv2/resource_group.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/resource/resourcev2/managementv2/resource_group.go new file mode 100644 index 00000000000..07ed8fcee0a --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/resource/resourcev2/managementv2/resource_group.go @@ -0,0 +1,160 @@ +package managementv2 + +import ( + "fmt" + + "github.com/IBM-Cloud/bluemix-go/bmxerror" + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/models" + "github.com/IBM-Cloud/bluemix-go/rest" +) + +type ResourceOrigin string + +func (o ResourceOrigin) String() string { + return string(o) +} + +//ResourceGroupUpdateRequest ... +type ResourceGroupUpdateRequest struct { + Name string `json:"name,omitempty"` + QuotaID string `json:"quota_id,omitempty"` + Default *bool `json:"default,omitempty"` +} + +// ResourceGroupQuery is the query filters to get resource groups +type ResourceGroupQuery struct { + AccountID string + Default bool + ResourceID string + ResourceOrigin models.ResourceOrigin +} + +//ErrCodeResourceGroupDoesnotExist ... +const ErrCodeResourceGroupDoesnotExist = "ResourceGroupDoesnotExist" + +//ResourceGroupQuery ... +type ResourceGroupRepository interface { + // List all available resource groups + List(*ResourceGroupQuery) ([]models.ResourceGroupv2, error) + // Get resource group by ID + Get(id string) (*models.ResourceGroupv2, error) + // Find resource groups having the specific name + FindByName(*ResourceGroupQuery, string) ([]models.ResourceGroupv2, error) + // Create a new resource group + Create(models.ResourceGroupv2) (*models.ResourceGroupv2, error) + // Delete an existing resource group + Delete(id string) error + // Update an existing resource group + Update(id string, request *ResourceGroupUpdateRequest) (*models.ResourceGroupv2, error) +} + +type resourceGroup struct { + client *client.Client +} + +func newResourceGroupAPI(c *client.Client) ResourceGroupRepository { + return &resourceGroup{ + client: c, + } +} + +// populate query part of HTTP requests +func (q ResourceGroupQuery) MakeRequest(r *rest.Request) *rest.Request { + if q.AccountID != "" { + r.Query("account_id", q.AccountID) + } + if q.Default { + r.Query("default", "true") + } + if q.ResourceID != "" { + r.Query("resource_id", q.ResourceID) + } + if q.ResourceOrigin != "" { + r.Query("resource_origin", q.ResourceOrigin.String()) + } + return r +} + +func (r *resourceGroup) List(query *ResourceGroupQuery) ([]models.ResourceGroupv2, error) { + listRequest := rest.GetRequest("/v2/resource_groups") + if query != nil { + query.MakeRequest(listRequest) + } + req, err := listRequest.Build() + if err != nil { + return []models.ResourceGroupv2{}, err + } + + var groups []models.ResourceGroupv2 + _, err = r.client.GetPaginated( + req.URL.String(), + NewRCPaginatedResources(models.ResourceGroupv2{}), + func(resource interface{}) bool { + if group, ok := resource.(models.ResourceGroupv2); ok { + groups = append(groups, group) + return true + } + return false + }, + ) + + if err != nil { + return []models.ResourceGroupv2{}, err + } + + return groups, nil +} + +func (r *resourceGroup) FindByName(query *ResourceGroupQuery, name string) ([]models.ResourceGroupv2, error) { + groups, err := r.List(query) + if err != nil { + return []models.ResourceGroupv2{}, err + } + + filteredGroups := []models.ResourceGroupv2{} + for _, group := range groups { + if group.Name == name { + filteredGroups = append(filteredGroups, group) + } + } + + if len(filteredGroups) == 0 { + return filteredGroups, bmxerror.New(ErrCodeResourceGroupDoesnotExist, + fmt.Sprintf("Given resource Group : %q doesn't exist", name)) + + } + return filteredGroups, nil +} + +func (r *resourceGroup) Create(group models.ResourceGroupv2) (*models.ResourceGroupv2, error) { + newGroup := models.ResourceGroupv2{} + _, err := r.client.Post("/v2/resource_groups", &group, &newGroup) + if err != nil { + return nil, err + } + return &newGroup, nil +} + +func (r *resourceGroup) Delete(id string) error { + _, err := r.client.Delete("/v2/resource_groups/" + id) + return err +} + +func (r *resourceGroup) Update(id string, request *ResourceGroupUpdateRequest) (*models.ResourceGroupv2, error) { + updatedGroup := models.ResourceGroupv2{} + _, err := r.client.Patch("/v2/resource_groups/"+id, request, &updatedGroup) + if err != nil { + return nil, err + } + return &updatedGroup, nil +} + +func (r *resourceGroup) Get(id string) (*models.ResourceGroupv2, error) { + group := models.ResourceGroupv2{} + _, err := r.client.Get("/v2/resource_groups/"+id, &group) + if err != nil { + return nil, err + } + return &group, nil +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/resource/resourcev2/managementv2/resource_quota.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/resource/resourcev2/managementv2/resource_quota.go new file mode 100644 index 00000000000..27f15273d94 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/resource/resourcev2/managementv2/resource_quota.go @@ -0,0 +1,96 @@ +package managementv2 + +import ( + "fmt" + + "github.com/IBM-Cloud/bluemix-go/bmxerror" + "github.com/IBM-Cloud/bluemix-go/client" +) + +type quotaDefinitionQueryResult struct { + QuotaDefinitions []QuotaDefinition `json:"resources"` +} + +type QuotaDefinition struct { + ID string `json:"_id,omitempty"` + Revision string `json:"_rev,omitempty"` + Name string `json:"name,omitmempty"` + Type string `json:"type,omitempty"` + ServiceInstanceCountLimit int `json:"number_of_service_instances,omitempty"` + AppCountLimit int `json:"number_of_apps,omitempty"` + AppInstanceCountLimit int `json:"instances_per_app,omitempty"` + AppInstanceMemoryLimit string `json:"instance_memory,omitempty"` + TotalAppMemoryLimit string `json:"total_app_memory,omitempty"` + VSICountLimit int `json:"vsi_limit,omitempty"` + ResourceQuotas []ResourceQuota `json:"resource_quotas,omitempty"` + CreatedAt string `json:"created_at,omitempty"` + UpdatedAt string `json:"updated_at,omitempty"` +} + +type ResourceQuota struct { + ID string `json:"_id,omitempty"` + ResourceID string `json:"resource_id,omitempty"` + Limit int `json:"limit,omitempty"` +} + +//ErrCodeResourceQuotaDoesnotExist ... +const ErrCodeResourceQuotaDoesnotExist = "ResourceQuotaDoesnotExist" + +type ResourceQuotaRepository interface { + // List all quota definitions + List() ([]QuotaDefinition, error) + // Query quota definitions having specific name + FindByName(name string) ([]QuotaDefinition, error) + // Get quota definition by ID + Get(id string) (*QuotaDefinition, error) +} + +type resourceQuota struct { + client *client.Client +} + +func newResourceQuotaAPI(c *client.Client) ResourceQuotaRepository { + return &resourceQuota{ + client: c, + } +} + +func (r *resourceQuota) List() ([]QuotaDefinition, error) { + resp := quotaDefinitionQueryResult{} + // TODO: change to use pagination if it's available on backend + _, err := r.client.Get("/v2/quota_definitions", &resp) + if err != nil { + return []QuotaDefinition{}, err + } + return resp.QuotaDefinitions, nil +} + +func (r *resourceQuota) FindByName(name string) ([]QuotaDefinition, error) { + allQuotas, err := r.List() + if err != nil { + return []QuotaDefinition{}, err + } + + quotas := []QuotaDefinition{} + for _, quota := range allQuotas { + if quota.Name == name { + quotas = append(quotas, quota) + } + } + + if len(quotas) == 0 { + return quotas, bmxerror.New(ErrCodeResourceQuotaDoesnotExist, + fmt.Sprintf("Given quota : %q doesn't exist", name)) + } + + return quotas, nil +} + +func (r *resourceQuota) Get(id string) (*QuotaDefinition, error) { + quota := QuotaDefinition{} + _, err := r.client.Get("/v2/quota_definitions/"+id, "a) + if err != nil { + return nil, err + } + return "a, nil +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/schematics/api_service.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/schematics/api_service.go new file mode 100644 index 00000000000..3e08ea411e7 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/schematics/api_service.go @@ -0,0 +1,70 @@ +package schematics + +import ( + gohttp "net/http" + + bluemix "github.com/IBM-Cloud/bluemix-go" + "github.com/IBM-Cloud/bluemix-go/authentication" + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/http" + "github.com/IBM-Cloud/bluemix-go/rest" + "github.com/IBM-Cloud/bluemix-go/session" +) + +//ErrCodeAPICreation ... +const ErrCodeAPICreation = "APICreationError" + +//SchematicsServiceAPI is the Aramda K8s client ... +type SchematicsServiceAPI interface { + Workspaces() Workspaces + + //TODO Add other services +} + +//VpcContainerService holds the client +type scService struct { + *client.Client +} + +//New ... +func New(sess *session.Session) (SchematicsServiceAPI, error) { + config := sess.Config.Copy() + err := config.ValidateConfigForService(bluemix.SchematicsService) + if err != nil { + return nil, err + } + if config.HTTPClient == nil { + config.HTTPClient = http.NewHTTPClient(config) + } + tokenRefreher, err := authentication.NewIAMAuthRepository(config, &rest.Client{ + DefaultHeader: gohttp.Header{ + "User-Agent": []string{http.UserAgent()}, + }, + HTTPClient: config.HTTPClient, + }) + if err != nil { + return nil, err + } + if config.IAMAccessToken == "" { + err := authentication.PopulateTokens(tokenRefreher, config) + if err != nil { + return nil, err + } + } + if config.Endpoint == nil { + ep, err := config.EndpointLocator.SchematicsEndpoint() + if err != nil { + return nil, err + } + config.Endpoint = &ep + } + + return &scService{ + Client: client.New(config, bluemix.SchematicsService, tokenRefreher), + }, nil +} + +//Clusters implements Clusters API +func (c scService) Workspaces() Workspaces { + return newWorkspaceAPI(c.Client) +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/schematics/workspace.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/schematics/workspace.go new file mode 100644 index 00000000000..591774be4ba --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/schematics/workspace.go @@ -0,0 +1,188 @@ +package schematics + +import ( + "fmt" + + "github.com/IBM-Cloud/bluemix-go/client" +) + +type WorkspaceConfig struct { + ID string `json:"id"` + Name string `json:"name"` + Type []string `json:"type"` + Description string `json:"description"` + ResourceGroup string `json:"resource_group"` + Location string `json:"location"` + Tags []string `json:"tags"` + CreatedAt string `json:"created_at"` + CreatedBy string `json:"created_by"` + Status string `json:"status"` + WorkspaceStatusMsg StatusMsgInfo `json:"workspace_status_msg"` + WorkspaceStatus StatusInfo `json:"workspace_status"` + TemplateRepo RepoInfo `json:"template_repo"` + TemplateData []TemplateDataInfo `json:"template_data"` + RuntimeData []RuntimeDataInfo `json:"runtime_data"` + SharedData SharedDataInfo `json:"shared_data"` + UpdatedAt string `json:"updated_at"` + LastHealthCheckAt string `json:"last_health_check_at"` + CatalogRef CatalogInfo `json:"catalog_ref"` + CRN string `json:"crn"` +} + +type StatusMsgInfo struct { + StatusCode string `json:"status_code"` + StatusMsg string `json:"status_msg"` +} +type StatusInfo struct { + Frozen bool `json:"frozen"` + FrozenAt string `json:"status_msg"` + LockedBy string `json:"locked_by"` + FrozenBy string `json:"frozen_by"` + Locked bool `json:"locked"` + LockedTime string `json:"locked_time"` +} + +type TemplateDataInfo struct { + Env []EnvValues `json:"env_values"` + Folder string `json:"folder"` + TemplateID string `json:"id"` + Type string `json:"type"` + Locked bool `json:"locked"` + UninstallScriptName string `json:"uninstall_script_name"` + Values string `json:"values"` + ValuesMetadata interface{} `json:"values_metadata"` + ValuesURL string `json:"values_url"` + Variablestore []Variablestore `json:"variablestore"` +} + +type RuntimeDataInfo struct { + EngineCmd string `json:"engine_cmd"` + EngineName string `json:"engine_name"` + TemplateID string `json:"id"` + EngineVersion string `json:"engine_version"` + LogStoreURL string `json:"log_store_url"` + OutputValues map[string]string `json:"output_values"` + StateStoreURL string `json:"state_store_url"` + Resources [][]map[string]string `json:"resources"` +} +type RepoInfo struct { + Branch string `json:"branch"` + Release string `json:"release"` + RepoURL string `json:"repo_url"` + URL string `json:"url"` +} +type SharedDataInfo struct { + ClusterID string `json:"cluster_id"` + ClusterName string `json:"cluster_name"` + EntitlementKeys []map[string]string `json:"entitlement_keys"` + Namespace string `json:"namespace"` + Region string `json:"region"` + ResourceGroupID string `json:"resource_group_id"` +} + +type EnvValues struct { + Hidden bool `json:"hidden"` + Name string `json:"name"` + // Secure bool `json:"secure"` + Value string `json:"value"` +} +type OutputResponse struct { + Folder string `json:"folder"` + TemplateID string `json:"id"` + Type string `json:"type"` + Output []map[string]OutputValues `json:"output_values"` +} + +type OutputValues struct { + Sensitive bool `json:"sensitive"` + Value interface{} `json:"value"` + Type interface{} `json:"type"` +} + +type CreateWorkspaceConfig struct { + Name string `json:"name"` + Type []string `json:"type"` + Description string `json:"description"` + Tags []string `json:"tags"` + WorkspaceStatus StatusInfo `json:"workspace_status"` + TemplateRepo RepoInfo `json:"template_repo"` + TemplateRef string `json:"template_ref"` + TemplateData []TemplateDataInfo `json:"template_data"` +} + +type Payload struct { + Name string `json:"name"` + Type []string `json:"type"` + Description string `json:"description"` + Tags []string `json:"tags"` + TemplateRef string `json:"template_ref"` + TemplateRepo TemplateRepo `json:"template_repo"` + WorkspaceStatus WorkspaceStatus `json:"workspace_status"` + TemplateData []TemplateData `json:"template_data"` +} +type TemplateRepo struct { + URL string `json:"url"` +} +type WorkspaceStatus struct { + Frozen bool `json:"frozen"` +} +type Variablestore struct { + Name string `json:"name"` + Secure bool `json:"secure,omitempty"` + Value string `json:"value"` + Description string `json:"description,omitempty"` +} +type TemplateData struct { + Folder string `json:"folder"` + Type string `json:"type"` + Variablestore []Variablestore `json:"variablestore"` +} + +type CatalogInfo struct { + ItemID string `json:"item_id"` + ItemName string `json:"item_name"` + ItemURL string `json:"item_url"` + ItemReadmeURL string `json:"item_readme_url"` + ItemIconURL string `json:"item_icon_url"` + OfferingVersion string `json:"offering_version"` +} +type workspace struct { + client *client.Client +} + +type Workspaces interface { + GetWorkspaceByID(WorskpaceID string) (WorkspaceConfig, error) + GetOutputValues(WorskpaceID string) ([]OutputResponse, error) + GetStateStore(WorskpaceID, TemplateID string) (interface{}, error) + CreateWorkspace(createReq Payload) (WorkspaceConfig, error) +} + +func newWorkspaceAPI(c *client.Client) Workspaces { + return &workspace{ + client: c, + } +} + +func (r *workspace) GetWorkspaceByID(WorskpaceID string) (WorkspaceConfig, error) { + var successV WorkspaceConfig + _, err := r.client.Get(fmt.Sprintf("/v1/workspaces/%s", WorskpaceID), &successV) + return successV, err +} +func (r *workspace) GetStateStore(WorskpaceID, TemplateID string) (interface{}, error) { + var successV interface{} + _, err := r.client.Get(fmt.Sprintf("/v1/workspaces/%s/runtime_data/%s/state_store", WorskpaceID, TemplateID), &successV) + return successV, err +} +func (r *workspace) GetOutputValues(WorskpaceID string) ([]OutputResponse, error) { + outputs := []OutputResponse{} + _, err := r.client.Get(fmt.Sprintf("/v1/workspaces/%s/output_values", WorskpaceID), &outputs) + if err != nil { + return nil, err + } + return outputs, err +} +func (r *workspace) CreateWorkspace(createReq Payload) (WorkspaceConfig, error) { + var successV WorkspaceConfig + _, err := r.client.Post("/v1/workspaces", createReq, &successV) + return successV, err +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/usermanagement/usermanagementv2/api_service.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/usermanagement/usermanagementv2/api_service.go new file mode 100644 index 00000000000..92a5244b514 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/usermanagement/usermanagementv2/api_service.go @@ -0,0 +1,68 @@ +package usermanagementv2 + +import ( + gohttp "net/http" + + bluemix "github.com/IBM-Cloud/bluemix-go" + "github.com/IBM-Cloud/bluemix-go/authentication" + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/http" + "github.com/IBM-Cloud/bluemix-go/rest" + "github.com/IBM-Cloud/bluemix-go/session" +) + +//UserManagementAPI is the resource client ... +type UserManagementAPI interface { + UserInvite() Users +} + +//ErrCodeAPICreation ... +const ErrCodeAPICreation = "APICreationError" + +//userManagement holds the client +type userManagement struct { + *client.Client +} + +//New ... +func New(sess *session.Session) (UserManagementAPI, error) { + config := sess.Config.Copy() + err := config.ValidateConfigForService(bluemix.UserManagement) + if err != nil { + return nil, err + } + if config.HTTPClient == nil { + config.HTTPClient = http.NewHTTPClient(config) + } + tokenRefreher, err := authentication.NewIAMAuthRepository(config, &rest.Client{ + DefaultHeader: gohttp.Header{ + "User-Agent": []string{http.UserAgent()}, + }, + HTTPClient: config.HTTPClient, + }) + if err != nil { + return nil, err + } + if config.IAMAccessToken == "" { + err := authentication.PopulateTokens(tokenRefreher, config) + if err != nil { + return nil, err + } + } + if config.Endpoint == nil { + ep, err := config.EndpointLocator.UserManagementEndpoint() + if err != nil { + return nil, err + } + config.Endpoint = &ep + } + + return &userManagement{ + Client: client.New(config, bluemix.UserManagement, tokenRefreher), + }, nil +} + +// UserInvite API +func (a *userManagement) UserInvite() Users { + return NewUserInviteHandler(a.Client) +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/usermanagement/usermanagementv2/models.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/usermanagement/usermanagementv2/models.go new file mode 100644 index 00000000000..2324f5e0f47 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/usermanagement/usermanagementv2/models.go @@ -0,0 +1,89 @@ +package usermanagementv2 + +import "github.com/IBM-Cloud/bluemix-go/api/iampap/iampapv1" + +// User ... +type User struct { + Email string `json:"email"` + AccountRole string `json:"account_role"` +} + +// UserInfo contains user info +type UserInfo struct { + ID string `json:"id"` + IamID string `json:"iam_id"` + Realm string `json:"realm"` + UserID string `json:"user_id"` + Firstname string `json:"firstname"` + Lastname string `json:"lastname"` + State string `json:"state"` + Email string `json:"email"` + Phonenumber string `json:"phonenumber"` + Altphonenumber string `json:"altphonenumber"` + Photo string `json:"photo"` + AccountID string `json:"account_id"` +} + +// UserSettings ... +type UserSettingOptions struct { + + //The console UI language + Language string `json:"language"` + + //The language for email and phone notifications. + NotificationLanguage string `json:"notification_language"` + + //The IP addresses listed here are the only ones from which this user can log in to IBM Cloud. + AllowedIPAddresses string `json:"allowed_ip_addresses"` + + //Whether user-managed login is enabled. + SelfManage bool `json:"self_manage"` +} + +// UserInvite ... +type UserInvite struct { + Users []User `json:"users"` + IAMPolicy []UserPolicy `json:"iam_policy,omitempty"` + AccessGroup []string `json:"access_groups,omitempty"` + InfrastructureRoles *InfraPermissions `json:"infrastructure_roles,omitempty"` + OrganizationRoles []OrgRole `json:"organization_roles,omitempty"` +} + +// UsersList to get list of users +type UsersList struct { + TotalUsers int `json:"total_results"` + Limit int `json:"limit"` + FistURL string `json:"fist_url"` + Resources []UserInfo `json:"resources"` +} + +// UserPolicy ... +type UserPolicy struct { + Type string `json:"type"` + Roles []iampapv1.Role `json:"roles"` + Resources []iampapv1.Resource `json:"resources"` +} + +//InfraPermissions ... +type InfraPermissions struct { + Permissions []string `json:"permissions"` +} + +//OrgRole ... +type OrgRole struct { + Users []string `json:"users"` + Region string `json:"region"` + Auditors []string `json:"auditors,omitempty"` + Managers []string `json:"managers,omitempty"` + BillingManagers []string `json:"billing_managers,omitempty"` + ID string `json:"id"` + Spaces []Space `json:"spaces"` +} + +//Space ... +type Space struct { + ID string `json:"id"` + Managers []string `json:"managers,omitempty"` + Developers []string `json:"developers,omitempty"` + Auditors []string `json:"auditors,omitempty" ` +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/usermanagement/usermanagementv2/pagination.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/usermanagement/usermanagementv2/pagination.go new file mode 100644 index 00000000000..62f4c700b90 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/usermanagement/usermanagementv2/pagination.go @@ -0,0 +1,39 @@ +package usermanagementv2 + +import ( + "encoding/json" + "reflect" + "strings" +) + +type GenericPaginatedResourcesHandler struct { + resourceType reflect.Type +} + +func NewRCPaginatedResources(resource interface{}) GenericPaginatedResourcesHandler { + return GenericPaginatedResourcesHandler{ + resourceType: reflect.TypeOf(resource), + } +} + +func (pr GenericPaginatedResourcesHandler) Resources(bytes []byte, curURL string) ([]interface{}, string, error) { + var paginatedResources = struct { + NextUrl string `json:"next_url"` + ResourcesBytes json.RawMessage `json:"resources"` + }{} + + err := json.Unmarshal(bytes, &paginatedResources) + + slicePtr := reflect.New(reflect.SliceOf(pr.resourceType)) + dc := json.NewDecoder(strings.NewReader(string(paginatedResources.ResourcesBytes))) + dc.UseNumber() + err = dc.Decode(slicePtr.Interface()) + slice := reflect.Indirect(slicePtr) + + contents := make([]interface{}, 0, slice.Len()) + for i := 0; i < slice.Len(); i++ { + contents = append(contents, slice.Index(i).Interface()) + } + + return contents, paginatedResources.NextUrl, err +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/api/usermanagement/usermanagementv2/user_invite.go b/vendor/github.com/IBM-Cloud/bluemix-go/api/usermanagement/usermanagementv2/user_invite.go new file mode 100644 index 00000000000..a9cdf9e9c01 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/api/usermanagement/usermanagementv2/user_invite.go @@ -0,0 +1,143 @@ +package usermanagementv2 + +import ( + "fmt" + "net/http" + + "github.com/IBM-Cloud/bluemix-go/client" + "github.com/IBM-Cloud/bluemix-go/rest" +) + +const ( + _UsersIDPath = "/v2/accounts/%s/users/%s" + _UsersURL = "/v2/accounts/%s/users" + _UserSettingsPath = "/v2/accounts/%s/users/%s/settings" +) + +// Users ... +type Users interface { + //GetUsers returns users in the first page alone + GetUsers(ibmUniqueID string) (UsersList, error) + //ListUsers returns all the users in the account + ListUsers(ibmUniqueID string) ([]UserInfo, error) + GetUserProfile(ibmUniqueID string, userID string) (UserInfo, error) + InviteUsers(ibmUniqueID string, users UserInvite) (UserInvite, error) + UpdateUserProfile(ibmUniqueID string, userID string, user UserInfo) error + RemoveUsers(ibmUniqueID string, userID string) error + GetUserSettings(accountID string, iamID string) (UserSettingOptions, error) + //Same patch request is being used to create, update and delete + ManageUserSettings(accountID string, iamID string, userSettings UserSettingOptions) (UserSettingOptions, error) +} + +type inviteUsersHandler struct { + client *client.Client +} + +// NewUsers +func NewUserInviteHandler(c *client.Client) Users { + return &inviteUsersHandler{ + client: c, + } +} + +//GetUsers returns users in the first page alone +func (r *inviteUsersHandler) GetUsers(ibmUniqueID string) (UsersList, error) { + result := UsersList{} + URL := fmt.Sprintf(_UsersURL, ibmUniqueID) + resp, err := r.client.Get(URL, &result) + + if resp.StatusCode == http.StatusNotFound { + return UsersList{}, nil + } + + if err != nil { + return result, err + } + + return result, nil +} + +//ListUsers returns all the users in the account +func (r *inviteUsersHandler) ListUsers(ibmUniqueID string) ([]UserInfo, error) { + URL := fmt.Sprintf(_UsersURL, ibmUniqueID) + + var users []UserInfo + _, err := r.client.GetPaginated(URL, + NewRCPaginatedResources(UserInfo{}), + func(resource interface{}) bool { + if user, ok := resource.(UserInfo); ok { + users = append(users, user) + return true + } + return false + }, + ) + if err != nil { + return users, err + } + return users, nil +} + +func (r *inviteUsersHandler) GetUserProfile(ibmUniqueID string, userID string) (UserInfo, error) { + user := UserInfo{} + URL := fmt.Sprintf(_UsersIDPath, ibmUniqueID, userID) + _, err := r.client.Get(URL, &user) + if err != nil { + return UserInfo{}, err + } + + return user, nil +} + +func (r *inviteUsersHandler) InviteUsers(ibmUniqueID string, users UserInvite) (UserInvite, error) { + usersInvited := UserInvite{} + URL := fmt.Sprintf(_UsersURL, ibmUniqueID) + _, err := r.client.Post(URL, &users, &usersInvited) + if err != nil { + return UserInvite{}, err + } + + return usersInvited, nil +} + +func (r *inviteUsersHandler) UpdateUserProfile(ibmUniqueID string, userID string, user UserInfo) error { + URL := fmt.Sprintf(_UsersIDPath, ibmUniqueID, userID) + request := rest.PutRequest(*r.client.Config.Endpoint + URL) + request = request.Body(&user) + + _, err := r.client.SendRequest(request, nil) + if err != nil { + return err + } + + return nil +} + +func (r *inviteUsersHandler) RemoveUsers(ibmUniqueID string, userID string) error { + URL := fmt.Sprintf(_UsersIDPath, ibmUniqueID, userID) + _, err := r.client.Delete(URL) + return err +} + +func (r *inviteUsersHandler) GetUserSettings(accountID string, iamID string) (UserSettingOptions, error) { + settings := UserSettingOptions{} + URL := fmt.Sprintf(_UserSettingsPath, accountID, iamID) + _, err := r.client.Get(URL, &settings) + if err != nil { + return UserSettingOptions{}, err + } + + return settings, nil +} + +func (r *inviteUsersHandler) ManageUserSettings(accountID string, iamID string, userSettings UserSettingOptions) (UserSettingOptions, error) { + resp := UserSettingOptions{} + URL := fmt.Sprintf(_UserSettingsPath, accountID, iamID) + _, err := r.client.Patch(URL, &userSettings, &resp) + + if err != nil { + return resp, err + } + + return resp, nil +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/authentication/auth.go b/vendor/github.com/IBM-Cloud/bluemix-go/authentication/auth.go new file mode 100644 index 00000000000..5110a41c6db --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/authentication/auth.go @@ -0,0 +1,26 @@ +package authentication + +import ( + "errors" + + bluemix "github.com/IBM-Cloud/bluemix-go" + "github.com/IBM-Cloud/bluemix-go/client" +) + +const ( + //ErrCodeInvalidToken ... + ErrCodeInvalidToken = "InvalidToken" +) + +//PopulateTokens populate the relevant tokens in the bluemix Config using the token provider +func PopulateTokens(tokenProvider client.TokenProvider, c *bluemix.Config) error { + if c.IBMID != "" && c.IBMIDPassword != "" { + err := tokenProvider.AuthenticatePassword(c.IBMID, c.IBMIDPassword) + return err + } + if c.BluemixAPIKey != "" { + err := tokenProvider.AuthenticateAPIKey(c.BluemixAPIKey) + return err + } + return errors.New("Insufficient credentials, need IBMID/IBMIDPassword or IBM Cloud API Key or IAM/IAM refresh tokens") +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/authentication/iam.go b/vendor/github.com/IBM-Cloud/bluemix-go/authentication/iam.go new file mode 100644 index 00000000000..e366a85d087 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/authentication/iam.go @@ -0,0 +1,164 @@ +package authentication + +import ( + "encoding/base64" + "fmt" + + bluemix "github.com/IBM-Cloud/bluemix-go" + "github.com/IBM-Cloud/bluemix-go/bmxerror" + "github.com/IBM-Cloud/bluemix-go/rest" +) + +//IAMError ... +type IAMError struct { + ErrorCode string `json:"errorCode"` + ErrorMessage string `json:"errorMessage"` + ErrorDetails string `json:"errorDetails"` +} + +//Description ... +func (e IAMError) Description() string { + if e.ErrorDetails != "" { + return e.ErrorDetails + } + return e.ErrorMessage +} + +//IAMTokenResponse ... +type IAMTokenResponse struct { + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + UAAAccessToken string `json:"uaa_token"` + UAARefreshToken string `json:"uaa_refresh_token"` + TokenType string `json:"token_type"` +} + +//IAMAuthRepository ... +type IAMAuthRepository struct { + config *bluemix.Config + client *rest.Client + endpoint string +} + +//NewIAMAuthRepository ... +func NewIAMAuthRepository(config *bluemix.Config, client *rest.Client) (*IAMAuthRepository, error) { + var endpoint string + + if config.TokenProviderEndpoint != nil { + endpoint = *config.TokenProviderEndpoint + } else { + var err error + endpoint, err = config.EndpointLocator.IAMEndpoint() + if err != nil { + return nil, err + } + } + + return &IAMAuthRepository{ + config: config, + client: client, + endpoint: endpoint, + }, nil +} + +//AuthenticatePassword ... +func (auth *IAMAuthRepository) AuthenticatePassword(username string, password string) error { + return auth.getToken(map[string]string{ + "grant_type": "password", + "username": username, + "password": password, + }) +} + +//AuthenticateAPIKey ... +func (auth *IAMAuthRepository) AuthenticateAPIKey(apiKey string) error { + return auth.getToken(map[string]string{ + "grant_type": "urn:ibm:params:oauth:grant-type:apikey", + "apikey": apiKey, + }) +} + +//AuthenticateSSO ... +func (auth *IAMAuthRepository) AuthenticateSSO(passcode string) error { + return auth.getToken(map[string]string{ + "grant_type": "urn:ibm:params:oauth:grant-type:passcode", + "passcode": passcode, + }) +} + +//RefreshToken ... +func (auth *IAMAuthRepository) RefreshToken() (string, error) { + data := map[string]string{ + "grant_type": "refresh_token", + "refresh_token": auth.config.IAMRefreshToken, + } + + err := auth.getToken(data) + if err != nil { + return "", err + } + + return auth.config.IAMAccessToken, nil +} + +//GetPasscode ... +func (auth *IAMAuthRepository) GetPasscode() (string, error) { + request := rest.PostRequest(auth.endpoint+"/identity/passcode"). + Set("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte("bx:bx"))). + Field("grant_type", "refresh_token"). + Field("refresh_token", auth.config.IAMRefreshToken). + Field("response_type", "cloud_iam") + + res := make(map[string]string, 0) + var apiErr IAMError + + resp, err := auth.client.Do(request, &res, &apiErr) + if err != nil { + return "", err + } + + if apiErr.ErrorCode != "" { + if apiErr.ErrorCode == "BXNIM0407E" { + return "", bmxerror.New(ErrCodeInvalidToken, apiErr.Description()) + } + return "", bmxerror.NewRequestFailure(apiErr.ErrorCode, apiErr.Description(), resp.StatusCode) + } + + return res["passcode"], nil +} + +func (auth *IAMAuthRepository) getToken(data map[string]string) error { + request := rest.PostRequest(auth.endpoint+"/identity/token"). + Set("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte("bx:bx"))). + Field("response_type", "cloud_iam") + + for k, v := range data { + request.Field(k, v) + } + + var tokens IAMTokenResponse + var apiErr IAMError + + resp, err := auth.client.Do(request, &tokens, &apiErr) + if err != nil { + return err + } + + if apiErr.ErrorCode != "" { + if apiErr.ErrorCode == "BXNIM0407E" { + if resp != nil && resp.Header != nil { + return bmxerror.New(ErrCodeInvalidToken, fmt.Sprintf("Transaction-Id:%s %s", resp.Header["Transaction-Id"], apiErr.Description())) + } + return bmxerror.New(ErrCodeInvalidToken, apiErr.Description()) + } + if resp != nil && resp.Header != nil { + return bmxerror.NewRequestFailure(apiErr.ErrorCode, fmt.Sprintf("Transaction-Id:%s %s", resp.Header["Transaction-Id"], apiErr.Description()), resp.StatusCode) + } + return bmxerror.NewRequestFailure(apiErr.ErrorCode, apiErr.Description(), resp.StatusCode) + } + + auth.config.IAMAccessToken = fmt.Sprintf("%s %s", tokens.TokenType, tokens.AccessToken) + auth.config.IAMRefreshToken = tokens.RefreshToken + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/authentication/uaa.go b/vendor/github.com/IBM-Cloud/bluemix-go/authentication/uaa.go new file mode 100644 index 00000000000..b27670ecb1d --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/authentication/uaa.go @@ -0,0 +1,118 @@ +package authentication + +import ( + "encoding/base64" + "fmt" + + bluemix "github.com/IBM-Cloud/bluemix-go" + "github.com/IBM-Cloud/bluemix-go/bmxerror" + "github.com/IBM-Cloud/bluemix-go/rest" +) + +//UAAError ... +type UAAError struct { + ErrorCode string `json:"error"` + Description string `json:"error_description"` +} + +//UAATokenResponse ... +type UAATokenResponse struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + RefreshToken string `json:"refresh_token"` +} + +//UAARepository ... +type UAARepository struct { + config *bluemix.Config + client *rest.Client + endpoint string +} + +//NewUAARepository ... +func NewUAARepository(config *bluemix.Config, client *rest.Client) (*UAARepository, error) { + var endpoint string + + if config.TokenProviderEndpoint != nil { + endpoint = *config.TokenProviderEndpoint + } else { + var err error + endpoint, err = config.EndpointLocator.UAAEndpoint() + if err != nil { + return nil, err + } + } + return &UAARepository{ + config: config, + client: client, + endpoint: endpoint, + }, nil +} + +//AuthenticatePassword ... +func (auth *UAARepository) AuthenticatePassword(username string, password string) error { + return auth.getToken(map[string]string{ + "grant_type": "password", + "username": username, + "password": password, + }) +} + +//AuthenticateSSO ... +func (auth *UAARepository) AuthenticateSSO(passcode string) error { + return auth.getToken(map[string]string{ + "grant_type": "password", + "passcode": passcode, + }) +} + +//AuthenticateAPIKey ... +func (auth *UAARepository) AuthenticateAPIKey(apiKey string) error { + return auth.AuthenticatePassword("apikey", apiKey) +} + +//RefreshToken ... +func (auth *UAARepository) RefreshToken() (string, error) { + err := auth.getToken(map[string]string{ + "grant_type": "refresh_token", + "refresh_token": auth.config.UAARefreshToken, + }) + if err != nil { + return "", err + } + + return auth.config.UAAAccessToken, nil +} + +//GetPasscode ... +func (auth *UAARepository) GetPasscode() (string, error) { + return "", nil +} + +func (auth *UAARepository) getToken(data map[string]string) error { + request := rest.PostRequest(auth.endpoint+"/oauth/token"). + Set("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte("cf:"))). + Field("scope", "") + + for k, v := range data { + request.Field(k, v) + } + + var tokens UAATokenResponse + var apiErr UAAError + + resp, err := auth.client.Do(request, &tokens, &apiErr) + if err != nil { + return err + } + if apiErr.ErrorCode != "" { + if apiErr.ErrorCode == "invalid-token" { + return bmxerror.NewInvalidTokenError(apiErr.Description) + } + return bmxerror.NewRequestFailure(apiErr.ErrorCode, apiErr.Description, resp.StatusCode) + } + + auth.config.UAAAccessToken = fmt.Sprintf("%s %s", tokens.TokenType, tokens.AccessToken) + auth.config.UAARefreshToken = tokens.RefreshToken + return nil +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/bmxerror/cert.go b/vendor/github.com/IBM-Cloud/bluemix-go/bmxerror/cert.go new file mode 100644 index 00000000000..a8bf0233212 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/bmxerror/cert.go @@ -0,0 +1,23 @@ +package bmxerror + +//InvalidSSLCert ... +type InvalidSSLCert struct { + URL string + Reason string +} + +//NewInvalidSSLCert ... +func NewInvalidSSLCert(url, reason string) *InvalidSSLCert { + return &InvalidSSLCert{ + URL: url, + Reason: reason, + } +} + +func (err *InvalidSSLCert) Error() string { + message := "Received invalid SSL certificate from " + err.URL + if err.Reason != "" { + message += " - " + err.Reason + } + return message +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/bmxerror/error.go b/vendor/github.com/IBM-Cloud/bluemix-go/bmxerror/error.go new file mode 100644 index 00000000000..76888939307 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/bmxerror/error.go @@ -0,0 +1,25 @@ +package bmxerror + +//Error interface +type Error interface { + Error() string + Code() string + Description() string +} + +//RequestFailure interface +type RequestFailure interface { + Error + // The status code of the HTTP response. + StatusCode() int +} + +//New creates a new Error object +func New(code, description string) Error { + return newGenericError(code, description) +} + +//NewRequestFailure creates a new Error object wrapping the server error +func NewRequestFailure(code, description string, statusCode int) Error { + return newRequestError(code, description, statusCode) +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/bmxerror/network.go b/vendor/github.com/IBM-Cloud/bluemix-go/bmxerror/network.go new file mode 100644 index 00000000000..17da7dd0885 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/bmxerror/network.go @@ -0,0 +1,38 @@ +package bmxerror + +import ( + "crypto/x509" + "fmt" + "net" + "net/url" + + "golang.org/x/net/websocket" +) + +//WrapNetworkErrors ... +func WrapNetworkErrors(host string, err error) error { + var innerErr error + switch typedErr := err.(type) { + case *url.Error: + innerErr = typedErr.Err + case *websocket.DialError: + innerErr = typedErr.Err + } + + if innerErr != nil { + switch typedInnerErr := innerErr.(type) { + case x509.UnknownAuthorityError: + return NewInvalidSSLCert(host, "unknown authority") + case x509.HostnameError: + return NewInvalidSSLCert(host, "not valid for the requested host") + case x509.CertificateInvalidError: + return NewInvalidSSLCert(host, "") + case *net.OpError: + if typedInnerErr.Op == "dial" { + return fmt.Errorf("%s\n%s", err.Error(), "TIP: If you are behind a firewall and require an HTTP proxy, verify the https_proxy environment variable is correctly set. Else, check your network connection.") + } + } + } + + return err +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/bmxerror/token.go b/vendor/github.com/IBM-Cloud/bluemix-go/bmxerror/token.go new file mode 100644 index 00000000000..d7193174b2b --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/bmxerror/token.go @@ -0,0 +1,17 @@ +package bmxerror + +//InvalidTokenError ... +type InvalidTokenError struct { + Message string +} + +//NewInvalidTokenError ... +func NewInvalidTokenError(message string) *InvalidTokenError { + return &InvalidTokenError{ + Message: message, + } +} + +func (e *InvalidTokenError) Error() string { + return ("Invalid auth token: ") + e.Message +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/bmxerror/types.go b/vendor/github.com/IBM-Cloud/bluemix-go/bmxerror/types.go new file mode 100644 index 00000000000..1c5323fbe7f --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/bmxerror/types.go @@ -0,0 +1,57 @@ +package bmxerror + +import "fmt" + +type genericError struct { + code string + description string +} + +func newGenericError(code, description string) *genericError { + return &genericError{code, description} +} + +func (g genericError) Error() string { + return fmt.Sprintf("%s: %s", g.code, g.description) +} + +func (g genericError) String() string { + return g.Error() +} + +func (g genericError) Code() string { + return g.code +} + +func (g genericError) Description() string { + return g.description +} + +type requestError struct { + genericError + statusCode int +} + +func newRequestError(code, description string, statusCode int) *requestError { + return &requestError{ + genericError: genericError{ + code: code, + description: description, + }, + statusCode: statusCode, + } +} + +func (r requestError) Error() string { + return fmt.Sprintf("Request failed with status code: %d, %s: %s", r.statusCode, r.code, r.description) +} + +func (r requestError) Code() string { + return r.code +} +func (r requestError) Description() string { + return r.description +} +func (r requestError) StatusCode() int { + return r.statusCode +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/client/client.go b/vendor/github.com/IBM-Cloud/bluemix-go/client/client.go new file mode 100644 index 00000000000..532a1228830 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/client/client.go @@ -0,0 +1,377 @@ +//Package client provides a generic client to be used by all services +package client + +import ( + "encoding/json" + "fmt" + "log" + "net" + gohttp "net/http" + "path" + "strings" + "sync" + "time" + + bluemix "github.com/IBM-Cloud/bluemix-go" + "github.com/IBM-Cloud/bluemix-go/bmxerror" + "github.com/IBM-Cloud/bluemix-go/http" + "github.com/IBM-Cloud/bluemix-go/rest" +) + +//TokenProvider ... +type TokenProvider interface { + RefreshToken() (string, error) + GetPasscode() (string, error) + AuthenticatePassword(string, string) error + AuthenticateAPIKey(string) error +} + +/*type PaginatedResourcesHandler interface { + Resources(rawResponse []byte, curPath string) (resources []interface{}, nextPath string, err error) +} + +//HandlePagination ... +type HandlePagination func(c *Client, path string, paginated PaginatedResourcesHandler, cb func(interface{}) bool) (resp *gohttp.Response, err error) +*/ + +//Client is the base client for all service api client +type Client struct { + Config *bluemix.Config + DefaultHeader gohttp.Header + ServiceName bluemix.ServiceName + TokenRefresher TokenProvider + //HandlePagination HandlePagination + + headerLock sync.Mutex +} + +//Config stores any generic service client configurations +type Config struct { + Config *bluemix.Config + Endpoint string +} + +//New ... +func New(c *bluemix.Config, serviceName bluemix.ServiceName, refresher TokenProvider) *Client { + return &Client{ + Config: c, + ServiceName: serviceName, + TokenRefresher: refresher, + //HandlePagination: pagination, + DefaultHeader: getDefaultAuthHeaders(serviceName, c), + } +} + +//SendRequest ... +func (c *Client) SendRequest(r *rest.Request, respV interface{}) (*gohttp.Response, error) { + + retries := *c.Config.MaxRetries + if retries < 1 { + return c.MakeRequest(r, respV) + } + wait := *c.Config.RetryDelay + + return c.tryHTTPRequest(retries, wait, r, respV) +} + +// MakeRequest ... +func (c *Client) MakeRequest(r *rest.Request, respV interface{}) (*gohttp.Response, error) { + httpClient := c.Config.HTTPClient + if httpClient == nil { + httpClient = gohttp.DefaultClient + } + restClient := &rest.Client{ + DefaultHeader: c.DefaultHeader, + HTTPClient: httpClient, + } + resp, err := restClient.Do(r, respV, nil) + // The response returned by go HTTP client.Do() could be nil if request timeout. + // For convenience, we ensure that response returned by this method is always not nil. + if resp == nil { + return new(gohttp.Response), err + } + if err != nil { + if resp.StatusCode == 401 && c.TokenRefresher != nil { + log.Println("Authentication failed. Trying token refresh") + c.headerLock.Lock() + defer c.headerLock.Unlock() + _, err := c.TokenRefresher.RefreshToken() + switch err.(type) { + case nil: + restClient.DefaultHeader = getDefaultAuthHeaders(c.ServiceName, c.Config) + for k := range c.DefaultHeader { + r.Del(k) + } + c.DefaultHeader = restClient.DefaultHeader + resp, err := restClient.Do(r, respV, nil) + if resp == nil { + return new(gohttp.Response), err + } + if err != nil { + err = bmxerror.WrapNetworkErrors(resp.Request.URL.Host, err) + } + return resp, err + case *bmxerror.InvalidTokenError: + return resp, bmxerror.NewRequestFailure("InvalidToken", fmt.Sprintf("%v", err), 401) + default: + return resp, fmt.Errorf("Authentication failed, Unable to refresh auth token: %v. Try again later", err) + } + } + + } + return resp, err +} + +func (c *Client) tryHTTPRequest(retries int, wait time.Duration, r *rest.Request, respV interface{}) (*gohttp.Response, error) { + + resp, err := c.MakeRequest(r, respV) + if err != nil { + if !isRetryable(err) { + if resp == nil { + return new(gohttp.Response), err + } + return resp, err + } + if retries--; retries >= 0 { + time.Sleep(wait) + return c.tryHTTPRequest( + retries, wait, r, respV) + } + } + if resp == nil { + return new(gohttp.Response), err + } + return resp, err +} + +//Get ... +func (c *Client) Get(path string, respV interface{}, extraHeader ...interface{}) (*gohttp.Response, error) { + r := rest.GetRequest(c.URL(path)) + for _, t := range extraHeader { + addToRequestHeader(t, r) + } + return c.SendRequest(r, respV) +} + +//Put ... +func (c *Client) Put(path string, data interface{}, respV interface{}, extraHeader ...interface{}) (*gohttp.Response, error) { + r := rest.PutRequest(c.URL(path)).Body(data) + for _, t := range extraHeader { + addToRequestHeader(t, r) + } + return c.SendRequest(r, respV) +} + +//Patch ... +func (c *Client) Patch(path string, data interface{}, respV interface{}, extraHeader ...interface{}) (*gohttp.Response, error) { + r := rest.PatchRequest(c.URL(path)).Body(data) + for _, t := range extraHeader { + addToRequestHeader(t, r) + } + return c.SendRequest(r, respV) +} + +//Post ... +func (c *Client) Post(path string, data interface{}, respV interface{}, extraHeader ...interface{}) (*gohttp.Response, error) { + r := rest.PostRequest(c.URL(path)).Body(data) + for _, t := range extraHeader { + addToRequestHeader(t, r) + } + + return c.SendRequest(r, respV) +} + +//PostWithForm ... +func (c *Client) PostWithForm(path string, form interface{}, respV interface{}, extraHeader ...interface{}) (*gohttp.Response, error) { + r := rest.PostRequest(c.URL(path)) + for _, t := range extraHeader { + addToRequestHeader(t, r) + } + addToRequestForm(form, r) + + return c.SendRequest(r, respV) +} + +//Delete ... +func (c *Client) Delete(path string, extraHeader ...interface{}) (*gohttp.Response, error) { + r := rest.DeleteRequest(c.URL(path)) + for _, t := range extraHeader { + addToRequestHeader(t, r) + } + return c.SendRequest(r, nil) +} + +//DeleteWithResp ... +func (c *Client) DeleteWithResp(path string, respV interface{}, extraHeader ...interface{}) (*gohttp.Response, error) { + r := rest.DeleteRequest(c.URL(path)) + for _, t := range extraHeader { + addToRequestHeader(t, r) + } + return c.SendRequest(r, respV) +} + +//DeleteWithBody ... +func (c *Client) DeleteWithBody(path string, data interface{}, extraHeader ...interface{}) (*gohttp.Response, error) { + r := rest.DeleteRequest(c.URL(path)).Body(data) + for _, t := range extraHeader { + addToRequestHeader(t, r) + } + return c.SendRequest(r, nil) +} + +func addToRequestHeader(h interface{}, r *rest.Request) { + switch v := h.(type) { + case map[string]string: + for key, value := range v { + r.Set(key, value) + } + } +} + +func addToRequestForm(h interface{}, r *rest.Request) { + switch v := h.(type) { + case map[string]string: + for key, value := range v { + r.Field(key, value) + } + } +} + +/*//GetPaginated ... +func (c *Client) GetPaginated(path string, paginated PaginatedResourcesHandler, cb func(interface{}) bool) (resp *gohttp.Response, err error) { + return c.HandlePagination(c, path, paginated, cb) +}*/ + +type PaginatedResourcesHandler interface { + Resources(rawResponse []byte, curPath string) (resources []interface{}, nextPath string, err error) +} + +func (c *Client) GetPaginated(path string, paginated PaginatedResourcesHandler, cb func(interface{}) bool) (resp *gohttp.Response, err error) { + for path != "" { + var raw json.RawMessage + resp, err = c.Get(path, &raw) + if err != nil { + return + } + + var resources []interface{} + var nextPath string + resources, nextPath, err = paginated.Resources([]byte(raw), path) + if err != nil { + err = fmt.Errorf("%s: Error parsing JSON", err.Error()) + return + } + + for _, resource := range resources { + if !cb(resource) { + return + } + } + + path = nextPath + } + return +} + +//URL ... +func (c *Client) URL(path string) string { + return *c.Config.Endpoint + cleanPath(path) +} + +func cleanPath(p string) string { + if p == "" { + return "/" + } + if !strings.HasPrefix(p, "/") { + p = "/" + p + } + return path.Clean(p) +} + +const ( + userAgentHeader = "User-Agent" + authorizationHeader = "Authorization" + uaaAccessTokenHeader = "X-Auth-Uaa-Token" + userAccessTokenHeader = "X-Auth-User-Token" + iamRefreshTokenHeader = "X-Auth-Refresh-Token" + crRefreshTokenHeader = "RefreshToken" +) + +func getDefaultAuthHeaders(serviceName bluemix.ServiceName, c *bluemix.Config) gohttp.Header { + h := gohttp.Header{} + switch serviceName { + case bluemix.MccpService, bluemix.AccountService: + h.Set(userAgentHeader, http.UserAgent()) + h.Set(authorizationHeader, c.UAAAccessToken) + case bluemix.ContainerService: + h.Set(userAgentHeader, http.UserAgent()) + h.Set(authorizationHeader, c.IAMAccessToken) + h.Set(iamRefreshTokenHeader, c.IAMRefreshToken) + h.Set(uaaAccessTokenHeader, c.UAAAccessToken) + case bluemix.VpcContainerService: + h.Set(userAgentHeader, http.UserAgent()) + h.Set(authorizationHeader, c.IAMAccessToken) + h.Set(iamRefreshTokenHeader, c.IAMRefreshToken) + case bluemix.SchematicsService: + h.Set(userAgentHeader, http.UserAgent()) + h.Set(authorizationHeader, c.IAMAccessToken) + h.Set(iamRefreshTokenHeader, c.IAMRefreshToken) + case bluemix.ContainerRegistryService: + h.Set(authorizationHeader, c.IAMAccessToken) + h.Set(crRefreshTokenHeader, c.IAMRefreshToken) + case bluemix.IAMPAPService, bluemix.AccountServicev1, bluemix.ResourceCatalogrService, bluemix.ResourceControllerService, bluemix.ResourceControllerServicev2, bluemix.ResourceManagementService, bluemix.ResourceManagementServicev2, bluemix.IAMService, bluemix.IAMUUMService, bluemix.IAMUUMServicev2, bluemix.IAMPAPServicev2, bluemix.CseService: + h.Set(authorizationHeader, c.IAMAccessToken) + case bluemix.UserManagement: + h.Set(userAgentHeader, http.UserAgent()) + h.Set(authorizationHeader, c.IAMAccessToken) + case bluemix.CisService: + h.Set(userAgentHeader, http.UserAgent()) + h.Set(userAccessTokenHeader, c.IAMAccessToken) + case bluemix.GlobalSearchService, bluemix.GlobalTaggingService: + h.Set(userAgentHeader, http.UserAgent()) + h.Set(authorizationHeader, c.IAMAccessToken) + h.Set(iamRefreshTokenHeader, c.IAMRefreshToken) + case bluemix.ICDService: + h.Set(userAgentHeader, http.UserAgent()) + h.Set(authorizationHeader, c.IAMAccessToken) + case bluemix.CertificateManager: + h.Set(userAgentHeader, http.UserAgent()) + h.Set(authorizationHeader, c.IAMAccessToken) + case bluemix.HPCService: + h.Set(authorizationHeader, c.IAMAccessToken) + case bluemix.FunctionsService: + h.Set(userAgentHeader, http.UserAgent()) + h.Set(authorizationHeader, c.IAMAccessToken) + + default: + log.Println("Unknown service - No auth headers set") + } + return h +} + +func isTimeout(err error) bool { + if bmErr, ok := err.(bmxerror.RequestFailure); ok { + switch bmErr.StatusCode() { + case 408, 504, 599, 429, 500, 502, 520, 503, 403: + return true + } + } + + if netErr, ok := err.(net.Error); ok && netErr.Timeout() { + return true + } + + if netErr, ok := err.(*net.OpError); ok && netErr.Timeout() { + return true + } + + if netErr, ok := err.(net.UnknownNetworkError); ok && netErr.Timeout() { + return true + } + + return false +} + +func isRetryable(err error) bool { + return isTimeout(err) +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/config.go b/vendor/github.com/IBM-Cloud/bluemix-go/config.go new file mode 100644 index 00000000000..366a3d70417 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/config.go @@ -0,0 +1,132 @@ +package bluemix + +import ( + "net/http" + "time" + + "github.com/IBM-Cloud/bluemix-go/bmxerror" + "github.com/IBM-Cloud/bluemix-go/endpoints" +) + +//ServiceName .. +type ServiceName string + +const ( + //AccountService ... + AccountService ServiceName = ServiceName("account") + //AccountServicev1 ... + AccountServicev1 ServiceName = ServiceName("accountv1") + //CertificateManager ... + CertificateManager ServiceName = ServiceName("certificate-manager") + //CisService ... + CisService ServiceName = ServiceName("cis") + //ContainerService ... + ContainerService ServiceName = ServiceName("container") + //ContainerService ... + VpcContainerService ServiceName = ServiceName("containerv2") + //RegistryService ... + ContainerRegistryService ServiceName = ServiceName("container-registry") + //GlobalSearchService ... + GlobalSearchService ServiceName = ServiceName("global-search") + //GlobalTaggingService ... + GlobalTaggingService ServiceName = ServiceName("global-tagging") + //IAMService ... + IAMService ServiceName = ServiceName("iam") + //IAMPAPService + IAMPAPService ServiceName = ServiceName("iampap") + //IAMUUMService ... + IAMUUMService ServiceName = ServiceName("iamuum") + //IAMUUMServicev2 ... + IAMUUMServicev2 ServiceName = ServiceName("iamuumv2") + //IAMPAPServicev2 ... + IAMPAPServicev2 ServiceName = ServiceName("iampapv2") + //ICDService ... + ICDService ServiceName = ServiceName("icd") + //MccpService ... + MccpService ServiceName = ServiceName("mccp") + //resourceManagementService + ResourceManagementService ServiceName = ServiceName("resource-management") + //resourceManagementService + ResourceManagementServicev2 ServiceName = ServiceName("resource-managementv2") + //resourceControllerService + ResourceControllerService ServiceName = ServiceName("resource-controller") + //resourceControllerServicev2 + ResourceControllerServicev2 ServiceName = ServiceName("resource-controllerv2") + //resourceCatalogService + ResourceCatalogrService ServiceName = ServiceName("resource-catalog ") + //UAAService ... + UAAService ServiceName = ServiceName("uaa") + //CSEService + CseService ServiceName = ServiceName("cse") + //SchematicsService ... + SchematicsService ServiceName = ServiceName("schematics") + //UserManagement ... + UserManagement ServiceName = ServiceName("user-management") + //HPCService ... + HPCService ServiceName = ServiceName("hpcs") + //FunctionsService ... + FunctionsService ServiceName = ServiceName("functions") +) + +//Config ... +type Config struct { + IBMID string + + IBMIDPassword string + + BluemixAPIKey string + + IAMAccessToken string + IAMRefreshToken string + UAAAccessToken string + UAARefreshToken string + + //Region is optional. If region is not provided then endpoint must be provided + Region string + //ResourceGroupID + ResourceGroup string + //Endpoint is optional. If endpoint is not provided then endpoint must be obtained from region via EndpointLocator + Endpoint *string + //TokenProviderEndpoint is optional. If endpoint is not provided then endpoint must be obtained from region via EndpointLocator + TokenProviderEndpoint *string + EndpointLocator endpoints.EndpointLocator + MaxRetries *int + RetryDelay *time.Duration + + HTTPTimeout time.Duration + + Debug bool + + HTTPClient *http.Client + + SSLDisable bool + Visibility string +} + +//Copy allows the configuration to be overriden or added +//Typically the endpoints etc +func (c *Config) Copy(mccpgs ...*Config) *Config { + out := new(Config) + *out = *c + if len(mccpgs) == 0 { + return out + } + for _, mergeInput := range mccpgs { + if mergeInput.Endpoint != nil { + out.Endpoint = mergeInput.Endpoint + } + } + return out +} + +//ValidateConfigForService ... +func (c *Config) ValidateConfigForService(svc ServiceName) error { + if (c.IBMID == "" || c.IBMIDPassword == "") && c.BluemixAPIKey == "" && (c.IAMAccessToken == "" || c.IAMRefreshToken == "") { + return bmxerror.New(ErrInsufficientCredentials, "Please check the documentation on how to configure the IBM Cloud credentials") + } + + if c.Region == "" && (c.Endpoint == nil || *c.Endpoint == "") { + return bmxerror.New(ErrInvalidConfigurationCode, "Please provide region or endpoint") + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/endpoints/endpoints.go b/vendor/github.com/IBM-Cloud/bluemix-go/endpoints/endpoints.go new file mode 100644 index 00000000000..69c9088b282 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/endpoints/endpoints.go @@ -0,0 +1,455 @@ +package endpoints + +import ( + "fmt" + "log" + + "github.com/IBM-Cloud/bluemix-go/bmxerror" + "github.com/IBM-Cloud/bluemix-go/helpers" +) + +//EndpointLocator ... +type EndpointLocator interface { + AccountManagementEndpoint() (string, error) + CertificateManagerEndpoint() (string, error) + CFAPIEndpoint() (string, error) + ContainerEndpoint() (string, error) + ContainerRegistryEndpoint() (string, error) + CisEndpoint() (string, error) + GlobalSearchEndpoint() (string, error) + GlobalTaggingEndpoint() (string, error) + IAMEndpoint() (string, error) + IAMPAPEndpoint() (string, error) + ICDEndpoint() (string, error) + MCCPAPIEndpoint() (string, error) + ResourceManagementEndpoint() (string, error) + ResourceControllerEndpoint() (string, error) + ResourceCatalogEndpoint() (string, error) + UAAEndpoint() (string, error) + CseEndpoint() (string, error) + SchematicsEndpoint() (string, error) + UserManagementEndpoint() (string, error) + HpcsEndpoint() (string, error) + FunctionsEndpoint() (string, error) +} + +const ( + //ErrCodeServiceEndpoint ... + ErrCodeServiceEndpoint = "ServiceEndpointDoesnotExist" +) + +var regionToEndpoint = map[string]map[string]string{ + "cf": { + "us-south": "https://api.ng.bluemix.net", + "us-east": "https://api.us-east.bluemix.net", + "eu-gb": "https://api.eu-gb.bluemix.net", + "au-syd": "https://api.au-syd.bluemix.net", + "eu-de": "https://api.eu-de.bluemix.net", + "jp-tok": "https://api.jp-tok.bluemix.net", + }, + "cr": { + "us-south": "us.icr.io", + "us-east": "us.icr.io", + "eu-de": "de.icr.io", + "au-syd": "au.icr.io", + "eu-gb": "uk.icr.io", + "jp-tok": "jp.icr.io", + "jp-osa": "jp2.icr.io", + }, + "uaa": { + "us-south": "https://iam.cloud.ibm.com/cloudfoundry/login/us-south", + "us-east": "https://iam.cloud.ibm.com/cloudfoundry/login/us-east", + "eu-gb": "https://iam.cloud.ibm.com/cloudfoundry/login/uk-south", + "au-syd": "https://iam.cloud.ibm.com/cloudfoundry/login/ap-south", + "eu-de": "https://iam.cloud.ibm.com/cloudfoundry/login/eu-central", + }, +} +var privateRegions = map[string][]string{ + "accounts": {"us-south", "us-east"}, + "certificate-manager": {"us-south", "us-east", "eu-gb", "eu-de", "jp-tok", "au-syd", "jp-osa"}, + "icd": {"us-south", "us-east", "eu-gb", "eu-de", "jp-tok", "au-syd", "osl01", "seo01", "che01", "ca-tor"}, + "schematics": {"us-south", "us-east", "eu-de", "eu-gb"}, + "global-search-tagging": {"us-south", "us-east"}, + "container": {"us-south", "us-east", "eu-gb", "eu-de", "jp-tok", "au-syd", "jp-osa", "ca-tor"}, + "iam": {"us-south", "us-east"}, + "resource": {"us-south", "us-east"}, +} +var cloudEndpoint = "cloud.ibm.com" + +func contructEndpoint(subdomain, domain string) string { + endpoint := fmt.Sprintf("https://%s.%s", subdomain, domain) + return endpoint +} + +func validateRegion(region string, regionList []string) (string, error) { + for _, a := range regionList { + if a == region { + return a, nil + } + } + return "", fmt.Errorf("The given region %s doesnot support private endpoints", region) +} + +func init() { + //TODO populate the endpoints which can be retrieved from given endpoints dynamically + //Example - UAA can be found from the CF endpoint +} + +type endpointLocator struct { + region string + visibility string +} + +//NewEndpointLocator ... +func NewEndpointLocator(region, visibility string) EndpointLocator { + return &endpointLocator{region: region, visibility: visibility} +} + +func (e *endpointLocator) AccountManagementEndpoint() (string, error) { + //As the current list of regionToEndpoint above is not exhaustive we allow to read endpoints from the env + endpoint := helpers.EnvFallBack([]string{"IBMCLOUD_ACCOUNT_MANAGEMENT_API_ENDPOINT"}, "") + if endpoint != "" { + return endpoint, nil + } + if e.visibility == "private" || e.visibility == "public-and-private" { + r, err := validateRegion(e.region, privateRegions["accounts"]) + if err != nil { + r = "us-south" // As there is no global private endpoint making default region to us-south + log.Printf("[ WARN ] There is no private endpoint support for this region %s, Defaulting to us-south", e.region) + } + return contructEndpoint(fmt.Sprintf("private.%s.accounts", r), cloudEndpoint), nil + } + return contructEndpoint("accounts", cloudEndpoint), nil +} + +func (e *endpointLocator) CertificateManagerEndpoint() (string, error) { + //As the current list of regionToEndpoint above is not exhaustive we allow to read endpoints from the env + endpoint := helpers.EnvFallBack([]string{"IBMCLOUD_CERTIFICATE_MANAGER_API_ENDPOINT"}, "") + if endpoint != "" { + return endpoint, nil + } + if e.visibility == "private" { + return contructEndpoint(fmt.Sprintf("private.%s.certificate-manager", e.region), cloudEndpoint), nil + } + if e.visibility == "public-and-private" { + r, err := validateRegion(e.region, privateRegions["certificate-manager"]) + if err != nil { + return contructEndpoint(fmt.Sprintf("%s.certificate-manager", e.region), cloudEndpoint), nil + } + return contructEndpoint(fmt.Sprintf("private.%s.certificate-manager", r), cloudEndpoint), nil + } + return contructEndpoint(fmt.Sprintf("%s.certificate-manager", e.region), cloudEndpoint), nil +} + +func (e *endpointLocator) CFAPIEndpoint() (string, error) { + //As the current list of regionToEndpoint above is not exhaustive we allow to read endpoints from the env + endpoint := helpers.EnvFallBack([]string{"IBMCLOUD_CF_API_ENDPOINT"}, "") + if endpoint != "" { + return endpoint, nil + } + if e.visibility == "private" { + return "", bmxerror.New(ErrCodeServiceEndpoint, fmt.Sprintf("Private Endpoints is not supported by this service")) + } + if ep, ok := regionToEndpoint["cf"][e.region]; ok { + return ep, nil + } + return "", bmxerror.New(ErrCodeServiceEndpoint, fmt.Sprintf("Cloud Foundry endpoint doesn't exist for region: %q", e.region)) +} + +func (e *endpointLocator) ContainerEndpoint() (string, error) { + //As the current list of regionToEndpoint above is not exhaustive we allow to read endpoints from the env + endpoint := helpers.EnvFallBack([]string{"IBMCLOUD_CS_API_ENDPOINT"}, "") + if endpoint != "" { + return endpoint, nil + } + if e.visibility == "private" { + return contructEndpoint(fmt.Sprintf("private.%s.containers", e.region), fmt.Sprintf("%s/global", cloudEndpoint)), nil + } + if e.visibility == "public-and-private" { + r, err := validateRegion(e.region, privateRegions["container"]) + if err != nil { + return contructEndpoint("containers", fmt.Sprintf("%s/global", cloudEndpoint)), nil + } + return contructEndpoint(fmt.Sprintf("private.%s.containers", r), fmt.Sprintf("%s/global", cloudEndpoint)), nil + } + return contructEndpoint("containers", fmt.Sprintf("%s/global", cloudEndpoint)), nil +} + +func (e *endpointLocator) SchematicsEndpoint() (string, error) { + //As the current list of regionToEndpoint above is not exhaustive we allow to read endpoints from the env + endpoint := helpers.EnvFallBack([]string{"IBMCLOUD_SCHEMATICS_API_ENDPOINT"}, "") + if endpoint != "" { + return endpoint, nil + } + if e.visibility == "private" || e.visibility == "public-and-private" { + r, err := validateRegion(e.region, privateRegions["schematics"]) + if err != nil { + r = "us-south" + log.Printf("[ WARN ] There is no private endpoint support for this region %s, Defaulting to us-south", e.region) + } + if r == "us-south" || r == "us-east" { + return contructEndpoint("private-us.schematics", cloudEndpoint), nil + } + if r == "eu-gb" || r == "eu-de" { + return contructEndpoint("private-eu.schematics", cloudEndpoint), nil + } + } + return contructEndpoint(fmt.Sprintf("%s.schematics", e.region), cloudEndpoint), nil +} + +func (e *endpointLocator) ContainerRegistryEndpoint() (string, error) { + //As the current list of regionToEndpoint above is not exhaustive we allow to read endpoints from the env + endpoint := helpers.EnvFallBack([]string{"IBMCLOUD_CR_API_ENDPOINT"}, "") + if endpoint != "" { + return endpoint, nil + } + if ep, ok := regionToEndpoint["cr"][e.region]; ok { + return fmt.Sprintf("https://%s", ep), nil + } + if e.visibility == "private" { + if ep, ok := regionToEndpoint["cr"][e.region]; ok { + return contructEndpoint("private", ep), nil + } + return "", bmxerror.New(ErrCodeServiceEndpoint, fmt.Sprintf("Container Registry private endpoint doesn't exist for region: %q", e.region)) + } + if e.visibility == "public-and-private" { + if ep, ok := regionToEndpoint["cr"][e.region]; ok { + return contructEndpoint("private", ep), nil + } + return "", bmxerror.New(ErrCodeServiceEndpoint, fmt.Sprintf("Container Registry endpoint doesn't exist for region: %q", e.region)) + } + return "", bmxerror.New(ErrCodeServiceEndpoint, fmt.Sprintf("Container Registry endpoint doesn't exist for region: %q", e.region)) +} + +// Not used in Provider as we have migrated to go-sdk +func (e *endpointLocator) CisEndpoint() (string, error) { + //As the current list of regionToEndpoint above is not exhaustive we allow to read endpoints from the env + endpoint := helpers.EnvFallBack([]string{"IBMCLOUD_CIS_API_ENDPOINT"}, "") + if endpoint != "" { + return endpoint, nil + } + if e.visibility == "private" || e.visibility == "public-and-private" { + return contructEndpoint("api.private.cis", cloudEndpoint), nil + } + return contructEndpoint("api.cis", cloudEndpoint), nil +} + +func (e *endpointLocator) GlobalSearchEndpoint() (string, error) { + //As the current list of regionToEndpoint above is not exhaustive we allow to read endpoints from the env + endpoint := helpers.EnvFallBack([]string{"IBMCLOUD_GS_API_ENDPOINT"}, "") + if endpoint != "" { + return endpoint, nil + } + if e.visibility == "private" || e.visibility == "public-and-private" { + r, err := validateRegion(e.region, privateRegions["global-search-tagging"]) + if err != nil { + r = "us-south" // As there is no global private endpoint making default region to us-south + log.Printf("[ WARN ] There is no private endpoint support for this region %s, Defaulting to us-south", e.region) + } + return contructEndpoint(fmt.Sprintf("api.private.%s.global-search-tagging", r), cloudEndpoint), nil + } + return contructEndpoint("api.global-search-tagging", cloudEndpoint), nil +} + +func (e *endpointLocator) GlobalTaggingEndpoint() (string, error) { + //As the current list of regionToEndpoint above is not exhaustive we allow to read endpoints from the env + endpoint := helpers.EnvFallBack([]string{"IBMCLOUD_GT_API_ENDPOINT"}, "") + if endpoint != "" { + return endpoint, nil + } + if e.visibility == "private" || e.visibility == "public-and-private" { + r, err := validateRegion(e.region, privateRegions["global-search-tagging"]) + if err != nil { + r = "us-south" // As there is no global private endpoint making default region to us-south + log.Printf("[ WARN ] There is no private endpoint support for this region %s, Defaulting to us-south", e.region) + } + return contructEndpoint(fmt.Sprintf("tags.private.%s.global-search-tagging", r), cloudEndpoint), nil + } + return contructEndpoint("tags.global-search-tagging", cloudEndpoint), nil +} + +func (e *endpointLocator) IAMEndpoint() (string, error) { + //As the current list of regionToEndpoint above is not exhaustive we allow to read endpoints from the env + endpoint := helpers.EnvFallBack([]string{"IBMCLOUD_IAM_API_ENDPOINT"}, "") + if endpoint != "" { + return endpoint, nil + } + if e.visibility == "private" || e.visibility == "public-and-private" { + r, err := validateRegion(e.region, privateRegions["iam"]) + if err != nil { + return contructEndpoint("private.iam", cloudEndpoint), nil + } + return contructEndpoint(fmt.Sprintf("private.%s.iam", r), cloudEndpoint), nil + } + return contructEndpoint("iam", cloudEndpoint), nil +} + +func (e *endpointLocator) IAMPAPEndpoint() (string, error) { + //As the current list of regionToEndpoint above is not exhaustive we allow to read endpoints from the env + endpoint := helpers.EnvFallBack([]string{"IBMCLOUD_IAMPAP_API_ENDPOINT"}, "") + if endpoint != "" { + return endpoint, nil + } + if e.visibility == "private" || e.visibility == "public-and-private" { + r, err := validateRegion(e.region, privateRegions["iam"]) + if err != nil { + return contructEndpoint("private.iam", cloudEndpoint), nil + } + return contructEndpoint(fmt.Sprintf("private.%s.iam", r), cloudEndpoint), nil + } + return contructEndpoint("iam", cloudEndpoint), nil +} + +func (e *endpointLocator) ICDEndpoint() (string, error) { + //As the current list of regionToEndpoint above is not exhaustive we allow to read endpoints from the env + endpoint := helpers.EnvFallBack([]string{"IBMCLOUD_ICD_API_ENDPOINT"}, "") + if endpoint != "" { + return endpoint, nil + } + if e.visibility == "private" { + return contructEndpoint(fmt.Sprintf("api.%s.private.databases", e.region), cloudEndpoint), nil + } + if e.visibility == "public-and-private" { + r, err := validateRegion(e.region, privateRegions["icd"]) + if err != nil { + return contructEndpoint(fmt.Sprintf("api.%s.databases", e.region), cloudEndpoint), nil + } + return contructEndpoint(fmt.Sprintf("api.%s.private.databases", r), cloudEndpoint), nil + } + return contructEndpoint(fmt.Sprintf("api.%s.databases", e.region), cloudEndpoint), nil +} + +func (e *endpointLocator) MCCPAPIEndpoint() (string, error) { + endpoint := helpers.EnvFallBack([]string{"IBMCLOUD_MCCP_API_ENDPOINT"}, "") + if endpoint != "" { + return endpoint, nil + } + if e.visibility == "private" { + return "", bmxerror.New(ErrCodeServiceEndpoint, fmt.Sprintf("Private Endpoints is not supported by this service for the region %s", e.region)) + } + return contructEndpoint(fmt.Sprintf("mccp.%s.cf", e.region), cloudEndpoint), nil +} + +func (e *endpointLocator) ResourceManagementEndpoint() (string, error) { + endpoint := helpers.EnvFallBack([]string{"IBMCLOUD_RESOURCE_MANAGEMENT_API_ENDPOINT"}, "") + if endpoint != "" { + return endpoint, nil + } + if e.visibility == "private" { + r, err := validateRegion(e.region, privateRegions["resource"]) + if err != nil { + fmt.Println("Private Endpint supports only us-south and us-east region specific endpoint") + return contructEndpoint("private.resource-controller", cloudEndpoint), nil + } + return contructEndpoint(fmt.Sprintf("private.%s.resource-controller", r), cloudEndpoint), nil + } + if e.visibility == "public-and-private" { + r, err := validateRegion(e.region, privateRegions["resource"]) + if err != nil { + return contructEndpoint("resource-controller", cloudEndpoint), nil + } + return contructEndpoint(fmt.Sprintf("private.%s.resource-controller", r), cloudEndpoint), nil + } + return contructEndpoint("resource-controller", cloudEndpoint), nil +} + +func (e *endpointLocator) ResourceControllerEndpoint() (string, error) { + endpoint := helpers.EnvFallBack([]string{"IBMCLOUD_RESOURCE_CONTROLLER_API_ENDPOINT"}, "") + if endpoint != "" { + return endpoint, nil + } + if e.visibility == "private" { + r, err := validateRegion(e.region, privateRegions["resource"]) + if err != nil { + fmt.Println("Private Endpint supports only us-south and us-east region specific endpoint") + return contructEndpoint("private.resource-controller", cloudEndpoint), nil + } + return contructEndpoint(fmt.Sprintf("private.%s.resource-controller", r), cloudEndpoint), nil + } + if e.visibility == "public-and-private" { + r, err := validateRegion(e.region, privateRegions["resource"]) + if err != nil { + return contructEndpoint("resource-controller", cloudEndpoint), nil + } + return contructEndpoint(fmt.Sprintf("private.%s.resource-controller", r), cloudEndpoint), nil + } + return contructEndpoint("resource-controller", cloudEndpoint), nil +} + +func (e *endpointLocator) ResourceCatalogEndpoint() (string, error) { + endpoint := helpers.EnvFallBack([]string{"IBMCLOUD_RESOURCE_CATALOG_API_ENDPOINT"}, "") + if endpoint != "" { + return endpoint, nil + } + if e.visibility == "private" || e.visibility == "public-and-private" { + r, err := validateRegion(e.region, privateRegions["resource"]) + if err != nil { + r = "us-south" + } + return contructEndpoint(fmt.Sprintf("private.%s.globalcatalog", r), cloudEndpoint), nil + } + return contructEndpoint("globalcatalog", cloudEndpoint), nil +} + +func (e *endpointLocator) UAAEndpoint() (string, error) { + endpoint := helpers.EnvFallBack([]string{"IBMCLOUD_UAA_ENDPOINT"}, "") + if endpoint != "" { + return endpoint, nil + } + if e.visibility == "private" { + return "", bmxerror.New(ErrCodeServiceEndpoint, fmt.Sprintf("Private Endpoints is not supported by this service for the region %s", e.region)) + } + if ep, ok := regionToEndpoint["uaa"][e.region]; ok { + //As the current list of regionToEndpoint above is not exhaustive we allow to read endpoints from the env + return ep, nil + } + return "", bmxerror.New(ErrCodeServiceEndpoint, fmt.Sprintf("UAA endpoint doesn't exist for region: %q", e.region)) +} + +func (e *endpointLocator) CseEndpoint() (string, error) { + endpoint := helpers.EnvFallBack([]string{"IBMCLOUD_CSE_ENDPOINT"}, "") + if endpoint != "" { + return endpoint, nil + } + if e.visibility == "private" { + return "", bmxerror.New(ErrCodeServiceEndpoint, fmt.Sprintf("Private Endpoints is not supported by this service")) + } + return contructEndpoint("api.serviceendpoint", cloudEndpoint), nil +} + +func (e *endpointLocator) UserManagementEndpoint() (string, error) { + endpoint := helpers.EnvFallBack([]string{"IBMCLOUD_USER_MANAGEMENT_ENDPOINT"}, "") + if endpoint != "" { + return endpoint, nil + } + if e.visibility == "private" || e.visibility == "public-and-private" { + r, err := validateRegion(e.region, privateRegions["resource"]) + if err != nil { + r = "us-south" + } + return contructEndpoint(fmt.Sprintf("private.%s.user-management", r), cloudEndpoint), nil + } + return contructEndpoint("user-management", cloudEndpoint), nil +} + +func (e *endpointLocator) HpcsEndpoint() (string, error) { + endpoint := helpers.EnvFallBack([]string{"IBMCLOUD_HPCS_API_ENDPOINT"}, "") + if endpoint != "" { + return endpoint, nil + } + if e.visibility == "private" { + return "", bmxerror.New(ErrCodeServiceEndpoint, fmt.Sprintf("Private Endpoints is not supported by this service for the region %s", e.region)) + } + return fmt.Sprintf("https://%s.broker.hs-crypto.cloud.ibm.com/crypto_v2/", e.region), nil +} + +func (e *endpointLocator) FunctionsEndpoint() (string, error) { + endpoint := helpers.EnvFallBack([]string{"IBMCLOUD_FUNCTIONS_API_ENDPOINT"}, "") + if endpoint != "" { + return endpoint, nil + } + if e.visibility == "private" { + return "", bmxerror.New(ErrCodeServiceEndpoint, fmt.Sprintf("Private Endpoints is not supported by this service for the region %s", e.region)) + } + return contructEndpoint(fmt.Sprintf("%s.functions", e.region), cloudEndpoint), nil +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/errors.go b/vendor/github.com/IBM-Cloud/bluemix-go/errors.go new file mode 100644 index 00000000000..e260fc44535 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/errors.go @@ -0,0 +1,12 @@ +package bluemix + +const ( + //ErrMissingRegionCode .. + ErrMissingRegionCode = "MissingRegion" + + //ErrInvalidConfigurationCode .. + ErrInvalidConfigurationCode = "InvalidConfiguration" + + //ErrInsufficientCredentials .. + ErrInsufficientCredentials = "InsufficientCredentials" +) diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/go.mod b/vendor/github.com/IBM-Cloud/bluemix-go/go.mod new file mode 100644 index 00000000000..0747b77dd47 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/go.mod @@ -0,0 +1,12 @@ +module github.com/IBM-Cloud/bluemix-go + +go 1.13 + +require ( + github.com/ghodss/yaml v1.0.0 + github.com/go-openapi/strfmt v0.20.0 + github.com/onsi/ginkgo v1.15.0 + github.com/onsi/gomega v1.10.5 + golang.org/x/net v0.0.0-20210119194325-5f4716e94777 + gopkg.in/yaml.v2 v2.4.0 +) diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/go.sum b/vendor/github.com/IBM-Cloud/bluemix-go/go.sum new file mode 100644 index 00000000000..52e4f69dc27 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/go.sum @@ -0,0 +1,186 @@ +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef h1:46PFijGLmAjMPwCCCo7Jf0W6f9slllCkkv7vyc1yOSg= +github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-openapi/errors v0.19.8 h1:doM+tQdZbUm9gydV9yR+iQNmztbjj7I3sW4sIcAwIzc= +github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/strfmt v0.20.0 h1:l2omNtmNbMc39IGptl9BuXBEKcZfS8zjrTsPKTiJiDM= +github.com/go-openapi/strfmt v0.20.0/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc= +github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= +github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= +github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= +github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= +github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= +github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= +github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= +github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= +github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= +github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= +github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= +github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= +github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= +github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= +github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= +github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= +github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= +github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= +github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= +github.com/mitchellh/mapstructure v1.3.3 h1:SzB1nHZ2Xi+17FP0zVQBHIZqvwRN9408fJO8h+eeNA8= +github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.15.0 h1:1V1NfVQR87RtWAgp1lv9JZJ5Jap+XFGKPi00andXGi4= +github.com/onsi/ginkgo v1.15.0/go.mod h1:hF8qUzuuC8DJGygJH3726JnCZX4MYbRB8yFfISqnKUg= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.10.5 h1:7n6FEkpFmfCoo2t+YYqXH0evK+a9ICQz0xcAy9dYcaQ= +github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48= +github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= +github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.mongodb.org/mongo-driver v1.4.3 h1:moga+uhicpVshTyaqY9L23E6QqwcHRUv1sqyOsoyOO8= +go.mongodb.org/mongo-driver v1.4.3/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777 h1:003p0dJM77cxMSyCPFphvZf/Y5/NXf5fzg6ufd1/Oew= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091 h1:DMyOG0U+gKfu8JZzg2UQe9MeaC1X+xQWlAKcRnjxjCw= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/helpers/conversion.go b/vendor/github.com/IBM-Cloud/bluemix-go/helpers/conversion.go new file mode 100644 index 00000000000..2249b21245a --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/helpers/conversion.go @@ -0,0 +1,33 @@ +package helpers + +import "time" + +// Bool returns a pointer to the bool value +func Bool(v bool) *bool { + return &v +} + +// Int returns a pointer to the int value +func Int(v int) *int { + return &v +} + +// String returns a pointer to the string value +func String(v string) *string { + return &v +} + +// Map returns a pointer to the map value +func Map(v map[string]interface{}) *map[string]interface{} { + return &v +} + +// IntSlice returns a pointer to the IntSlice value +func IntSlice(v []int) *[]int { + return &v +} + +// Duration returns a pointer to the time.Duration +func Duration(v time.Duration) *time.Duration { + return &v +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/helpers/env.go b/vendor/github.com/IBM-Cloud/bluemix-go/helpers/env.go new file mode 100644 index 00000000000..3b65d26bb4a --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/helpers/env.go @@ -0,0 +1,13 @@ +package helpers + +import "os" + +//EnvFallBack ... +func EnvFallBack(envs []string, defaultValue string) string { + for _, k := range envs { + if v := os.Getenv(k); v != "" { + return v + } + } + return defaultValue +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/helpers/file.go b/vendor/github.com/IBM-Cloud/bluemix-go/helpers/file.go new file mode 100644 index 00000000000..4ad260fb92a --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/helpers/file.go @@ -0,0 +1,117 @@ +package helpers + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "regexp" +) + +func FileExists(path string) bool { + _, err := os.Stat(path) + if err == nil { + return true + } + if os.IsNotExist(err) { + return false + } + return false +} + +func RemoveFile(path string) error { + if FileExists(path) { + return os.Remove(path) + } + return nil +} + +func CopyFile(src string, dest string) (err error) { + srcFile, err := os.Open(src) + if err != nil { + return + } + defer srcFile.Close() + + srcStat, err := srcFile.Stat() + if err != nil { + return + } + + if !srcStat.Mode().IsRegular() { + return fmt.Errorf("%s is not a regular file.", src) + } + + destFile, err := os.Create(dest) + if err != nil { + return + } + defer destFile.Close() + + _, err = io.Copy(destFile, srcFile) + return +} + +func CopyDir(src string, dest string) (err error) { + srcStat, err := os.Stat(src) + if err != nil { + return + } + + if !srcStat.Mode().IsDir() { + return fmt.Errorf("%s is not a directory.", src) + } + + _, err = os.Stat(dest) + if !os.IsNotExist(err) { + return fmt.Errorf("Destination %s already exists.", dest) + } + + entries, err := ioutil.ReadDir(src) + if err != nil { + return + } + + err = os.MkdirAll(dest, srcStat.Mode()) + if err != nil { + return + } + + for _, entry := range entries { + srcPath := filepath.Join(src, entry.Name()) + destPath := filepath.Join(dest, entry.Name()) + + if entry.Mode().IsDir() { + err = CopyDir(srcPath, destPath) + } else { + err = CopyFile(srcPath, destPath) + } + if err != nil { + return + } + } + + return +} + +//RemoveFilesWithPattern ... +func RemoveFilesWithPattern(targetDir, pattern string) error { + r, err := regexp.Compile(pattern) + if err != nil { + return err + } + files, err := ioutil.ReadDir(targetDir) + if err != nil { + return err + } + for _, f := range files { + if r.MatchString(f.Name()) { + err := os.RemoveAll(filepath.Join(targetDir, f.Name())) + if err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/helpers/template.go b/vendor/github.com/IBM-Cloud/bluemix-go/helpers/template.go new file mode 100644 index 00000000000..22f218bd641 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/helpers/template.go @@ -0,0 +1,15 @@ +package helpers + +import ( + "bytes" + "html/template" +) + +func Tprintf(tmpl string, param map[string]interface{}) string { + t := template.Must(template.New("Tprintf").Parse(tmpl)) + buf := &bytes.Buffer{} + if err := t.Execute(buf, param); err != nil { + return tmpl + } + return buf.String() +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/helpers/utils.go b/vendor/github.com/IBM-Cloud/bluemix-go/helpers/utils.go new file mode 100644 index 00000000000..096f1f592c5 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/helpers/utils.go @@ -0,0 +1,24 @@ +package helpers + +import ( + "path" + "strings" +) + +func GetFullURL(base string, path string) string { + if base == "" { + return path + } + + return base + CleanPath(path) +} + +func CleanPath(p string) string { + if p == "" { + return "/" + } + if !strings.HasPrefix(p, "/") { + p = "/" + p + } + return path.Clean(p) +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/helpers/zip.go b/vendor/github.com/IBM-Cloud/bluemix-go/helpers/zip.go new file mode 100644 index 00000000000..39f85921c82 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/helpers/zip.go @@ -0,0 +1,57 @@ +package helpers + +import ( + "archive/zip" + "io" + "os" + "path" + "path/filepath" +) + +//Unzip src to dest +func Unzip(src, dest string) error { + r, err := zip.OpenReader(src) + if err != nil { + return err + } + defer r.Close() + + err = os.MkdirAll(dest, 0755) + if err != nil { + return err + } + + for _, f := range r.File { + err := extractFileInZipArchive(dest, f) + if err != nil { + return err + } + } + + return nil +} + +func extractFileInZipArchive(dest string, f *zip.File) error { + rc, err := f.Open() + if err != nil { + return err + } + defer rc.Close() + + path := filepath.Join(path.Clean(dest), path.Clean(f.Name)) + + if f.FileInfo().IsDir() { + return os.MkdirAll(path, f.Mode()) + } + err = os.MkdirAll(filepath.Dir(path), f.Mode()) + if err != nil { + return err + } + zf, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode()) + if err != nil { + return err + } + defer zf.Close() + _, err = io.Copy(zf, rc) + return err +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/http/http.go b/vendor/github.com/IBM-Cloud/bluemix-go/http/http.go new file mode 100644 index 00000000000..cef1eade825 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/http/http.go @@ -0,0 +1,40 @@ +package http + +import ( + "crypto/tls" + "fmt" + "net" + "net/http" + "runtime" + "time" + + "github.com/IBM-Cloud/bluemix-go" +) + +//NewHTTPClient ... +func NewHTTPClient(config *bluemix.Config) *http.Client { + return &http.Client{ + Transport: makeTransport(config), + Timeout: config.HTTPTimeout, + } +} + +func makeTransport(config *bluemix.Config) http.RoundTripper { + return NewTraceLoggingTransport(&http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 50 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 20 * time.Second, + DisableCompression: true, + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: config.SSLDisable, + }, + }) +} + +//UserAgent ... +func UserAgent() string { + return fmt.Sprintf("Bluemix-go SDK %s / %s ", bluemix.Version, runtime.GOOS) +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/http/transport.go b/vendor/github.com/IBM-Cloud/bluemix-go/http/transport.go new file mode 100644 index 00000000000..7a76f3c8cff --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/http/transport.go @@ -0,0 +1,81 @@ +package http + +import ( + "net/http" + "net/http/httputil" + "strings" + "time" + + "github.com/IBM-Cloud/bluemix-go/trace" +) + +// TraceLoggingTransport is a thin wrapper around Transport. It dumps HTTP +// request and response using trace logger, based on the "BLUEMIX_TRACE" +// environment variable. Sensitive user data will be replaced by text +// "[PRIVATE DATA HIDDEN]". +type TraceLoggingTransport struct { + rt http.RoundTripper +} + +// NewTraceLoggingTransport returns a TraceLoggingTransport wrapping around +// the passed RoundTripper. If the passed RoundTripper is nil, HTTP +// DefaultTransport is used. +func NewTraceLoggingTransport(rt http.RoundTripper) *TraceLoggingTransport { + if rt == nil { + return &TraceLoggingTransport{ + rt: http.DefaultTransport, + } + } + return &TraceLoggingTransport{ + rt: rt, + } +} + +//RoundTrip ... +func (r *TraceLoggingTransport) RoundTrip(req *http.Request) (resp *http.Response, err error) { + start := time.Now() + r.dumpRequest(req, start) + resp, err = r.rt.RoundTrip(req) + if err != nil { + return + } + r.dumpResponse(resp, start) + return +} + +func (r *TraceLoggingTransport) dumpRequest(req *http.Request, start time.Time) { + shouldDisplayBody := !strings.Contains(req.Header.Get("Content-Type"), "multipart/form-data") + + dumpedRequest, err := httputil.DumpRequest(req, shouldDisplayBody) + if err != nil { + trace.Logger.Printf("An error occurred while dumping request:\n%v\n", err) + return + } + + trace.Logger.Printf("\n%s [%s]\n%s\n", + "REQUEST:", + start.Format(time.RFC3339), + trace.Sanitize(string(dumpedRequest))) + + if !shouldDisplayBody { + trace.Logger.Println("[MULTIPART/FORM-DATA CONTENT HIDDEN]") + } +} + +func (r *TraceLoggingTransport) dumpResponse(res *http.Response, start time.Time) { + end := time.Now() + + shouldDisplayBody := !strings.Contains(res.Header.Get("Content-Type"), "application/zip") + dumpedResponse, err := httputil.DumpResponse(res, shouldDisplayBody) + if err != nil { + trace.Logger.Printf("An error occurred while dumping response:\n%v\n", err) + return + } + + trace.Logger.Printf("\n%s [%s] %s %.0fms\n%s\n", + "RESPONSE:", + end.Format(time.RFC3339), + "Elapsed:", + end.Sub(start).Seconds()*1000, + trace.Sanitize(string(dumpedResponse))) +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/models/access_group.go b/vendor/github.com/IBM-Cloud/bluemix-go/models/access_group.go new file mode 100644 index 00000000000..15ecca8a0ef --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/models/access_group.go @@ -0,0 +1,17 @@ +package models + +// AccessGroup represents the access group of IAM UUM +type AccessGroup struct { + ID string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` +} + +type AccessGroupV2 struct { + AccessGroup + AccountID string `json:"account_id,omitempty"` + CreatedAt string `json:"created_at,omitempty"` + CreatedByID string `json:"created_by_id,omitempty"` + LastModifiedAt string `json:"last_modified_at,omitempty"` + LastModifiedByID string `json:"last_modified_by_id,omitempty"` +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/models/access_group_member.go b/vendor/github.com/IBM-Cloud/bluemix-go/models/access_group_member.go new file mode 100644 index 00000000000..b173e1f8b6e --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/models/access_group_member.go @@ -0,0 +1,15 @@ +package models + +type AccessGroupMember struct { + ID string `json:"id"` + Type string `json:"type"` +} +type AccessGroupMemberV2 struct { + ID string `json:"iam_id,omitempty"` + Type string `json:"type,omitempty"` + Href string `json:"href,omitempty"` + Name string `json:"name,omitempty"` + Email string `json:"email,omitempty"` + CreatedAt string `json:"created_at,omitempty"` + CreatedByID string `json:"created_by_id,omitempty"` +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/models/api_key.go b/vendor/github.com/IBM-Cloud/bluemix-go/models/api_key.go new file mode 100644 index 00000000000..c41503d97de --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/models/api_key.go @@ -0,0 +1,18 @@ +package models + +type APIKey struct { + UUID string `json:"uuid,omitempty"` + Version string `json:"version,omitempty"` + Crn string `json:"crn,omitempty"` + CreatedAt string `json:"createdAt,omitempty"` + ModifiedAt string `json:"modifiedAt,omitempty"` + + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + Format string `json:"format,omitempty"` + BoundTo string `json:"boundTo,omitempty"` + APIKey string `json:"apiKey,omitempty"` + APIKeyID string `json:"apiKeyId,omitempty"` + APIKeySecret string `json:"apiKeySecret,omitempty"` + Locked bool `json:"locked,omitempty"` +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/models/certificate_manager.go b/vendor/github.com/IBM-Cloud/bluemix-go/models/certificate_manager.go new file mode 100644 index 00000000000..44ac302195e --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/models/certificate_manager.go @@ -0,0 +1,110 @@ +package models + +// CertificateInfo struct for cert-import & cert-reimport success response. +type CertificateInfo struct { + ID string `json:"_id"` + Name string `json:"name"` + Description string `json:"description"` + Domains []string `json:"domains"` + RotateKeys bool `json:"rotate_keys"` + Status string `json:"status"` + Issuer string `json:"issuer"` + BeginsOn int64 `json:"begins_on"` + ExpiresOn int64 `json:"expires_on"` + Algorithm string `json:"algorithm"` + KeyAlgorithm string `json:"key_algorithm"` + Imported bool `json:"imported"` + HasPrevious bool `json:"has_previous"` + IssuanceInfo *CertificateIssuanceInfo `json:"issuance_info"` + SerialNumber string `json:"serial_number,omitempty"` + OrderPolicy OrderPolicy `json:"order_policy,omitempty"` +} + +//CertificateIssuanceInfo struct +type CertificateIssuanceInfo struct { + Status string `json:"status"` + Code string `json:"code"` + AdditionalInfo string `json:"additional_info"` + Auto bool `json:"auto"` + OrderedOn int64 `json:"ordered_on"` +} + +// CertificateImportData struct for holding user-provided certificates and keys for cert-import. +type CertificateImportData struct { + Name string `json:"name"` + Description string `json:"description"` + Data Data `json:"data"` +} + +//Data of Imported Certificate +type Data struct { + Content string `json:"content"` + Privatekey string `json:"priv_key,omitempty"` + IntermediateCertificate string `json:"intermediate,omitempty"` +} + +// CertificateDelete struct for cert-delete success response. +type CertificateDelete struct { + Message string +} + +// CertificateMetadataUpdate struct for cert-metadata-update's request body. +type CertificateMetadataUpdate struct { + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` +} + +// CertificateReimportData struct for holding user-provided certificates and keys for cert-reimport. +type CertificateReimportData struct { + Content string `json:"content"` + Privatekey string `json:"priv_key,omitempty"` + IntermediateCertificate string `json:"intermediate,omitempty"` +} + +//CertificateGetData ... +type CertificateGetData struct { + ID string `json:"_id"` + Name string `json:"name"` + Description string `json:"description"` + Domains []string `json:"domains"` + Status string `json:"status"` + Issuer string `json:"issuer"` + BeginsOn int64 `json:"begins_on"` + ExpiresOn int64 `json:"expires_on"` + Algorithm string `json:"algorithm"` + KeyAlgorithm string `json:"key_algorithm"` + Imported bool `json:"imported"` + HasPrevious bool `json:"has_previous"` + IssuanceInfo CertificateIssuanceInfo `json:"issuance_info"` + Data *Data `json:"data"` + DataKeyID string `json:"data_key_id"` +} + +// CertificateOrderData struct for holding user-provided order data for cert-order. +type CertificateOrderData struct { + Name string `json:"name"` + Description string `json:"description,omitempty"` + Domains []string `json:"domains"` + DomainValidationMethod string `json:"domain_validation_method"` + DNSProviderInstanceCrn string `json:"dns_provider_instance_crn,omitempty"` + Issuer string `json:"issuer,omitempty"` + Algorithm string `json:"algorithm,omitempty"` + KeyAlgorithm string `json:"key_algorithm,omitempty"` + AutoRenewEnabled bool `json:"auto_renew_enabled,omitempty"` +} + +// CertificateRenewData struct for holding user-provided renew data for cert-renew. +type CertificateRenewData struct { + RotateKeys bool `json:"rotate_keys"` +} + +//CertificatesInfo List of certificates +type CertificatesInfo struct { + CertificateList []CertificateInfo `json:"certificates"` +} + +//OrderPolicy ... +type OrderPolicy struct { + Name string `json:"name,omitempty"` + AutoRenewEnabled bool `json:"auto_renew_enabled,omitempty"` +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/models/policy.go b/vendor/github.com/IBM-Cloud/bluemix-go/models/policy.go new file mode 100644 index 00000000000..4f9fd9678fa --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/models/policy.go @@ -0,0 +1,30 @@ +package models + +import "github.com/IBM-Cloud/bluemix-go/crn" + +type Policy struct { + ID string `json:"id,omitempty"` + Roles []PolicyRole `json:"roles"` + Resources []PolicyResource `json:"resources"` + Version string `json:"-"` +} + +type PolicyRole struct { + ID crn.CRN `json:"id"` + DisplayName string `json:"displayName"` + Description string `json:"description"` + Actions []RoleAction `json:"actions,omitempty"` +} + +type PolicyResource struct { + ServiceName string `json:"serviceName,omitempty"` + ServiceInstance string `json:"serviceInstance,omitempty"` + Region string `json:"region,omitempty"` + ResourceType string `json:"resourceType,omitempty"` + Resource string `json:"resource,omitempty"` + SpaceID string `json:"spaceId,omitempty"` + AccountID string `json:"accountId,omitempty"` + OrganizationID string `json:"organizationId,omitempty"` + ResourceGroupID string `json:"resourceGroupId,omitempty"` + AccessGroupID string `json:"accessGroupId,omitempty"` +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/models/region.go b/vendor/github.com/IBM-Cloud/bluemix-go/models/region.go new file mode 100644 index 00000000000..eaa8f012b8a --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/models/region.go @@ -0,0 +1,31 @@ +package models + +type Region struct { + ID string `json:"id"` + Name string `json:"name"` + DisplayName string `json:"display_name"` + Domain string `json:"domain"` + APIEndpoint string `json:"cf_api"` + ConsoleEndpoint string `json:"console_url"` + MCCPEndpoint string `json:"mccp_api"` + Type string `json:"type"` + Geolocation `json:"geo"` + Customer `json:"customer"` + Deployment `json:"deployment"` + IsHome bool `json:"home"` +} + +type Geolocation struct { + Name string + DisplayName string `json:"display_name"` +} + +type Customer struct { + Name string + DisplayName string `json:"display_name"` +} + +type Deployment struct { + Name string + DisplayName string `json:"display_name"` +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/models/resource_group.go b/vendor/github.com/IBM-Cloud/bluemix-go/models/resource_group.go new file mode 100644 index 00000000000..5ad5bdeec4d --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/models/resource_group.go @@ -0,0 +1,33 @@ +package models + +type ResourceOrigin string + +func (o ResourceOrigin) String() string { + return string(o) +} + +type ResourceGroup struct { + ID string `json:"id,omitempty"` + AccountID string `json:"account_id,omitempty"` + Name string `json:"name,omitempty"` + Default bool `json:"default,omitempty"` + State string `json:"state,omitempty"` + QuotaID string `json:"quota_id,omitempty"` + PaymentMethodID string `json:"payment_method_id,omitempty"` + Linkages []Linkage `json:"resource_linkages,omitempty"` + CreatedAt string `json:"created_at,omitempty"` + UpdatedAt string `json:"updated_at,omitempty"` +} + +type Linkage struct { + ResourceID string `json:"resource_id"` + ResourceOrigin ResourceOrigin `json:"resource_origin"` +} + +type ResourceGroupv2 struct { + ResourceGroup + CRN string `json:"crn,omitempty"` + QuotaURL string `json:"quota_url,omitempty"` + PaymentMethodsUrl string `json:"payment_methods_url,omitempty"` + TeamsURL string `json:"teams_url,omitempty"` +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/models/resource_quota.go b/vendor/github.com/IBM-Cloud/bluemix-go/models/resource_quota.go new file mode 100644 index 00000000000..cc9ae43cd32 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/models/resource_quota.go @@ -0,0 +1,23 @@ +package models + +type QuotaDefinition struct { + ID string `json:"_id,omitempty"` + Revision string `json:"_rev,omitempty"` + Name string `json:"name,omitmempty"` + Type string `json:"type,omitempty"` + ServiceInstanceCountLimit int `json:"number_of_service_instances,omitempty"` + AppCountLimit int `json:"number_of_apps,omitempty"` + AppInstanceCountLimit int `json:"instances_per_app,omitempty"` + AppInstanceMemoryLimit string `json:"instance_memory,omitempty"` + TotalAppMemoryLimit string `json:"total_app_memory,omitempty"` + VSICountLimit int `json:"vsi_limit,omitempty"` + ServiceQuotas []ServiceQuota `json:"service_quotas,omitempty"` + CreatedAt string `json:"created_at,omitempty"` + UpdatedAt string `json:"updated_at,omitempty"` +} + +type ServiceQuota struct { + ID string `json:"_id,omitempty"` + ServiceID string `json:"service_id,omitempty"` + Limit int `json:"limit,omitempty"` +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/models/resource_service.go b/vendor/github.com/IBM-Cloud/bluemix-go/models/resource_service.go new file mode 100644 index 00000000000..404b8abff65 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/models/resource_service.go @@ -0,0 +1,122 @@ +package models + +import ( + "encoding/json" + + "github.com/IBM-Cloud/bluemix-go/crn" +) + +type Service struct { + ID string `json:"id"` + Name string `json:"name"` + CatalogCRN string `json:"catalog_crn"` + URL string `json:"url"` + Kind string `json:"kind"` + + Metadata ServiceMetadata `json:"-"` + Children []Service `json:"children"` + Active bool `json:"active"` +} + +type ServicePlan struct { + ID string `json:"id"` + Name string `json:"name"` + CatalogCRN string `json:"catalog_crn"` + URL string `json:"url"` + Kind string `json:"kind"` +} + +type ServiceDeployment struct { + ID string `json:"id"` + Name string `json:"name"` + CatalogCRN string `json:"catalog_crn"` + Metadata DeploymentMetaData `json:"metadata,omitempty"` +} + +type ServiceDeploymentAlias struct { + Metadata DeploymentMetaData `json:"metadata,omitempty"` +} + +type DeploymentMetaData struct { + RCCompatible bool `json:"rc_compatible"` + IAMCompatible bool `json:"iam_compatible"` + Deployment MetadataDeploymentFragment `json:"deployment,omitempty"` + Service MetadataServiceFragment `json:"service,omitempty"` +} + +type MetadataDeploymentFragment struct { + DeploymentID string `json:"deployment_id,omitempty"` + TargetCrn crn.CRN `json:"target_crn"` + Location string `json:"location"` +} + +type ServiceMetadata interface{} + +type ServiceResourceMetadata struct { + Service MetadataServiceFragment `json:"service"` +} + +type MetadataServiceFragment struct { + Bindable bool `json:"bindable"` + IAMCompatible bool `json:"iam_compatible"` + RCProvisionable bool `json:"rc_provisionable"` + PlanUpdateable bool `json:"plan_updateable"` + ServiceCheckEnabled bool `json:"service_check_enabled"` + ServiceKeySupported bool `json:"service_key_supported"` + State string `json:"state"` + TestCheckInterval int `json:"test_check_interval"` + UniqueAPIKey bool `json:"unique_api_key"` + + // CF properties + ServiceBrokerGUID string `json:"service_broker_guid"` +} + +type PlatformServiceResourceMetadata struct { +} + +type TemplateResourceMetadata struct { +} + +type RuntimeResourceMetadata struct { +} + +// UnmarshalJSON provide custom JSON unmarshal behavior to support multiple types +// of `metadata` +func (s *Service) UnmarshalJSON(data []byte) error { + type Copy Service + + trial := &struct { + *Copy + Metadata json.RawMessage `json:"metadata"` + }{ + Copy: (*Copy)(s), + } + + if err := json.Unmarshal(data, trial); err != nil { + return err + } + + if len(trial.Metadata) == 0 { + s.Metadata = nil + return nil + } + + switch s.Kind { + case "runtime": + s.Metadata = &RuntimeResourceMetadata{} + case "service", "iaas": + s.Metadata = &ServiceResourceMetadata{} + case "platform_service": + s.Metadata = &PlatformServiceResourceMetadata{} + case "template": + s.Metadata = &TemplateResourceMetadata{} + default: + s.Metadata = nil + return nil + } + + if err := json.Unmarshal(trial.Metadata, s.Metadata); err != nil { + return err + } + return nil +} \ No newline at end of file diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/models/resource_service_alias.go b/vendor/github.com/IBM-Cloud/bluemix-go/models/resource_service_alias.go new file mode 100644 index 00000000000..b4022efd0ec --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/models/resource_service_alias.go @@ -0,0 +1,23 @@ +package models + +import ( + "github.com/IBM-Cloud/bluemix-go/crn" +) + +type ServiceAlias struct { + ID string `json:"id"` + Name string `json:"name"` + ServiceInstanceID string `json:"resource_instance_id"` + ScopeCRN crn.CRN `json:"scope_crn"` + CRN crn.CRN `json:"crn"` + Tags []string `json:"tags,omitempty"` + Parameters map[string]interface{} `json:"parameters,omitempty"` // TODO: check whether the response contains the field + State string `json:"state"` +} + +func (a ServiceAlias) ScopeSpaceID() string { + if a.ScopeCRN.ResourceType == crn.ResourceTypeCFSpace { + return a.ScopeCRN.Resource + } + return "" +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/models/resource_service_binding.go b/vendor/github.com/IBM-Cloud/bluemix-go/models/resource_service_binding.go new file mode 100644 index 00000000000..d120b8cc311 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/models/resource_service_binding.go @@ -0,0 +1,17 @@ +package models + +import "github.com/IBM-Cloud/bluemix-go/crn" + +type ServiceBinding struct { + *MetadataType + SourceCrn crn.CRN `json:"source_crn"` + TargetCrn crn.CRN `json:"target_crn"` + Parameters map[string]interface{} `json:"parameters,omitempty"` + Crn crn.CRN `json:"crn"` + RegionBindingID string `json:"region_binding_id"` + AccountID string `json:"account_id"` + State string `json:"state"` + Credentials map[string]interface{} `json:"credentials"` + ServiceAliasesUrl string `json:"resource_aliases_url"` + TargetName string +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/models/resource_service_id.go b/vendor/github.com/IBM-Cloud/bluemix-go/models/resource_service_id.go new file mode 100644 index 00000000000..e04c0d2f2b6 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/models/resource_service_id.go @@ -0,0 +1,14 @@ +package models + +type ServiceID struct { + UUID string `json:"uuid,omitempty"` + IAMID string `json:"iam_id,omitempty"` + CRN string `json:"crn,omitempty"` + Version string `json:"version,omitempty"` + BoundTo string `json:"boundTo,omitempty"` + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + CreatedAt string `json:"createdAt,omitempty"` + ModifiedAt string `json:"modifiedAt,omitempty"` + Locked bool `json:"locked,omitempty"` +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/models/resource_service_instance.go b/vendor/github.com/IBM-Cloud/bluemix-go/models/resource_service_instance.go new file mode 100644 index 00000000000..184dab58dd8 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/models/resource_service_instance.go @@ -0,0 +1,72 @@ +package models + +import ( + "time" + + "github.com/IBM-Cloud/bluemix-go/crn" +) + +type MetadataType struct { + ID string `json:"id"` + Guid string `json:"guid"` + Url string `json:"url"` + CreatedAt *time.Time `json:"created_at"` + UpdatedAt *time.Time `json:"updated_at"` + DeletedAt *time.Time `json:"deleted_at"` +} +type ServiceInstance struct { + *MetadataType + Name string `json:"name"` + RegionID string `json:"region_id"` + AccountID string `json:"account_id"` + ServicePlanID string `json:"resource_plan_id"` + ServicePlanName string + ResourceGroupID string `json:"resource_group_id"` + ResourceGroupName string + Crn crn.CRN `json:"crn,omitempty"` + Tags []string `json:"tags,omitempty"` + Parameters map[string]interface{} `json:"parameters,omitempty"` + Extensions map[string]interface{} `json:"extensions,omitempty"` + CreateTime int64 `json:"create_time"` + State string `json:"state"` + Type string `json:"type"` + ServiceID string `json:"resource_id"` + ServiceName string + DashboardUrl *string `json:"dashboard_url"` + LastOperation *LastOperationType `json:"last_operation"` + AccountUrl string `json:"account_url"` + ResourcePlanUrl string `json:"resource_plan_url"` + ResourceBindingsUrl string `json:"resource_bindings_url"` + ResourceAliasesUrl string `json:"resource_aliases_url"` + SiblingsUrl string `json:"siblings_url"` + TargetCrn crn.CRN `json:"target_crn"` +} + +type LastOperationType struct { + Type string `json:"type"` + State string `json:"state"` + Description *string `json:"description"` + UpdatedAt *time.Time `json:"updated_at"` +} + +type ServiceInstanceV2 struct { + ServiceInstance + ScheduledReclaimAt interface{} `json:"scheduled_reclaim_at"` + RestoredAt interface{} `json:"restored_at"` + ScheduledReclaimBy string `json:"scheduled_reclaim_by"` + RestoredBy string `json:"restored_by"` + ResourcePlanID string `json:"resource_plan_id"` + ResourceGroupCrn string `json:"resource_group_crn"` + AllowCleanup bool `json:"allow_cleanup"` + ResourceKeysURL string `json:"resource_keys_url"` + PlanHistory []PlanHistoryData `json:"plan_history"` +} + +type PlanHistoryData struct { + ResourcePlanID string `json:"resource_plan_id"` + StartDate time.Time `json:"start_date"` + RequestorID string `json:"requestor_id"` + Migrated bool `json:"migrated"` + ControlledBy string `json:"controlled_by"` + Locked bool `json:"locked"` +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/models/resource_service_key.go b/vendor/github.com/IBM-Cloud/bluemix-go/models/resource_service_key.go new file mode 100644 index 00000000000..02c8b2f8547 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/models/resource_service_key.go @@ -0,0 +1,14 @@ +package models + +import "github.com/IBM-Cloud/bluemix-go/crn" + +type ServiceKey struct { + MetadataType + Name string `json:"name"` + SourceCrn crn.CRN `json:"source_crn"` + Parameters map[string]interface{} `json:"parameters,omitempty"` + Crn crn.CRN `json:"crn"` + State string `json:"state"` + AccountID string `json:"account_id"` + Credentials map[string]interface{} `json:"credentials"` +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/models/role.go b/vendor/github.com/IBM-Cloud/bluemix-go/models/role.go new file mode 100644 index 00000000000..eacb8b5939b --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/models/role.go @@ -0,0 +1,23 @@ +package models + +import "github.com/IBM-Cloud/bluemix-go/crn" + +type Role struct { + CRN crn.CRN `json:"crn"` + Name string `json:"displayName"` + Description string `json:"description"` +} + +type RoleAction struct { + ID string `json:"id"` + Name string `json:"displayName"` + Description string `json:"description"` +} + +func (r Role) ToPolicyRole() PolicyRole { + return PolicyRole{ + ID: r.CRN, + DisplayName: r.Name, + Description: r.Description, + } +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/rest/client.go b/vendor/github.com/IBM-Cloud/bluemix-go/rest/client.go new file mode 100644 index 00000000000..aee550dc231 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/rest/client.go @@ -0,0 +1,192 @@ +// Package rest provides a simple REST client for creating and sending +// API requests. + +// Examples: +// Creating request +// // GET request +// GetRequest("http://www.example.com"). +// Set("Accept", "application/json"). +// Query("foo1", "bar1"). +// Query("foo2", "bar2") +// +// // JSON body +// foo = Foo{Bar: "val"} +// PostRequest("http://www.example.com"). +// Body(foo) + +// // String body +// PostRequest("http://www.example.com"). +// Body("{\"bar\": \"val\"}") + +// // Stream body +// PostRequest("http://www.example.com"). +// Body(strings.NewReader("abcde")) + +// // Multipart POST request +// var f *os.File +// PostRequest("http://www.example.com"). +// Field("foo", "bar"). +// File("file1", File{Name: f.Name(), Content: f}). +// File("file2", File{Name: "1.txt", Content: []byte("abcde"), Type: "text/plain") + +// // Build to an HTTP request +// GetRequest("http://www.example.com").Build() + +// Sending request: +// client := NewClient() +// var foo = struct { +// Bar string +// }{} +// var apiErr = struct { +// Message string +// }{} +// resp, err := client.Do(request, &foo, &apiErr) +package rest + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "reflect" + + "github.com/IBM-Cloud/bluemix-go/bmxerror" +) + +const ( + //ErrCodeEmptyResponse ... + ErrCodeEmptyResponse = "EmptyResponseBody" +) + +//ErrEmptyResponseBody ... +var ErrEmptyResponseBody = bmxerror.New(ErrCodeEmptyResponse, "empty response body") + +// Client is a REST client. It's recommend that a client be created with the +// NewClient() method. +type Client struct { + // The HTTP client to be used. Default is HTTP's defaultClient. + HTTPClient *http.Client + // Defaualt header for all outgoing HTTP requests. + DefaultHeader http.Header +} + +// NewClient creates a new REST client. +func NewClient() *Client { + return &Client{ + HTTPClient: http.DefaultClient, + } +} + +// Do sends an request and returns an HTTP response. The resp.Body will be +// consumed and closed in the method. +// +// For 2XX response, it will be JSON decoded into the value pointed to by +// respv. +// +// For non-2XX response, an attempt will be made to unmarshal the response +// into the value pointed to by errV. If unmarshal failed, an ErrorResponse +// error with status code and response text is returned. +func (c *Client) Do(r *Request, respV interface{}, errV interface{}) (*http.Response, error) { + req, err := c.makeRequest(r) + if err != nil { + return nil, err + } + + client := c.HTTPClient + if client == nil { + client = http.DefaultClient + } + + resp, err := client.Do(req) + if err != nil { + return resp, err + } + defer resp.Body.Close() + + if resp.StatusCode < 200 || resp.StatusCode > 299 { + raw, err := ioutil.ReadAll(resp.Body) + if err != nil { + return resp, fmt.Errorf("Error reading response: %v", err) + } + + if len(raw) > 0 && errV != nil { + if json.Unmarshal(raw, errV) == nil { + return resp, nil + } + } + + return resp, bmxerror.NewRequestFailure("ServerErrorResponse", string(raw), resp.StatusCode) + } + + if respV != nil { + // Callback function with execpted JSON type + if funcType := reflect.TypeOf(respV); funcType.Kind() == reflect.Func { + if funcType.NumIn() != 1 || funcType.NumOut() != 1 { + err = fmt.Errorf("Callback funcion not expected signature: func(interface{}) bool") + } + paramType := funcType.In(0) + dc := json.NewDecoder(resp.Body) + dc.UseNumber() + for { + typedInterface := reflect.New(paramType).Interface() + if err = dc.Decode(typedInterface); err == io.EOF { + err = nil + break + } else if err != nil { + break + } + resv := reflect.ValueOf(respV).Call([]reflect.Value{reflect.ValueOf(typedInterface).Elem()})[0] + if !resv.Bool() { + break + } + } + } else { + switch respV.(type) { + case io.Writer: + _, err = io.Copy(respV.(io.Writer), resp.Body) + default: + dc := json.NewDecoder(resp.Body) + dc.UseNumber() + err = dc.Decode(respV) + if err == io.EOF { + err = ErrEmptyResponseBody + } + } + } + } + + return resp, err +} + +func (c *Client) makeRequest(r *Request) (*http.Request, error) { + req, err := r.Build() + if err != nil { + return nil, err + } + + c.applyDefaultHeader(req) + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/json") + } + if req.Header.Get("Content-Type") == "" { + req.Header.Set("Content-Type", "application/json") + } + if req.Header.Get("Accept-Language") == "" { + req.Header.Set("Accept-Language", "en") + } + + return req, nil +} + +func (c *Client) applyDefaultHeader(req *http.Request) { + for k, vs := range c.DefaultHeader { + if req.Header.Get(k) != "" { + continue + } + for _, v := range vs { + req.Header.Add(k, v) + } + } +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/rest/request.go b/vendor/github.com/IBM-Cloud/bluemix-go/rest/request.go new file mode 100644 index 00000000000..6ab57ee1608 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/rest/request.go @@ -0,0 +1,285 @@ +package rest + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "mime/multipart" + "net/http" + "net/textproto" + "net/url" + "strings" +) + +const ( + contentType = "Content-Type" + jsonContentType = "application/json" + formUrlEncodedContentType = "application/x-www-form-urlencoded" +) + +// File represents a file upload in the POST request +type File struct { + // File name + Name string + // File content + Content io.Reader + // Mime type, defaults to "application/octet-stream" + Type string +} + +// Request is a REST request. It also acts like a HTTP request builder. +type Request struct { + method string + rawUrl string + header http.Header + + queryParams url.Values + formParams url.Values + + // files to upload + files map[string][]File + + // custom request body + body interface{} +} + +// NewRequest creates a new REST request with the given rawUrl. +func NewRequest(rawUrl string) *Request { + return &Request{ + rawUrl: rawUrl, + header: http.Header{}, + queryParams: url.Values{}, + formParams: url.Values{}, + files: make(map[string][]File), + } +} + +// Method sets HTTP method of the request. +func (r *Request) Method(method string) *Request { + r.method = method + return r +} + +// GetRequest creates a REST request with GET method and the given rawUrl. +func GetRequest(rawUrl string) *Request { + return NewRequest(rawUrl).Method("GET") +} + +// HeadRequest creates a REST request with HEAD method and the given rawUrl. +func HeadRequest(rawUrl string) *Request { + return NewRequest(rawUrl).Method("HEAD") +} + +// PostRequest creates a REST request with POST method and the given rawUrl. +func PostRequest(rawUrl string) *Request { + return NewRequest(rawUrl).Method("POST") +} + +// PutRequest creates a REST request with PUT method and the given rawUrl. +func PutRequest(rawUrl string) *Request { + return NewRequest(rawUrl).Method("PUT") +} + +// DeleteRequest creates a REST request with DELETE method and the given +// rawUrl. +func DeleteRequest(rawUrl string) *Request { + return NewRequest(rawUrl).Method("DELETE") +} + +// PatchRequest creates a REST request with PATCH method and the given +// rawUrl. +func PatchRequest(rawUrl string) *Request { + return NewRequest(rawUrl).Method("PATCH") +} + +// Creates a request with HTTP OPTIONS. +func OptionsRequest(rawUrl string) *Request { + return NewRequest(rawUrl).Method("OPTIONS") +} + +// Add adds the key, value pair to the request header. It appends to any +// existing values associated with key. +func (r *Request) Add(key string, value string) *Request { + r.header.Add(http.CanonicalHeaderKey(key), value) + return r +} + +// Del deletes the header as specified by the key. +func (r *Request) Del(key string) *Request { + r.header.Del(http.CanonicalHeaderKey(key)) + return r +} + +// Set sets the header entries associated with key to the single element value. +// It replaces any existing values associated with key. +func (r *Request) Set(key string, value string) *Request { + r.header.Set(http.CanonicalHeaderKey(key), value) + return r +} + +// Query appends the key, value pair to the request query which will be +// encoded as url query parameters on HTTP request's url. +func (r *Request) Query(key string, value string) *Request { + r.queryParams.Add(key, value) + return r +} + +// Field appends the key, value pair to the form fields in the POST request. +func (r *Request) Field(key string, value string) *Request { + r.formParams.Add(key, value) + return r +} + +// File appends a file upload item in the POST request. The file content will +// be consumed when building HTTP request (see Build()) and closed if it's +// also a ReadCloser type. +func (r *Request) File(name string, file File) *Request { + r.files[name] = append(r.files[name], file) + return r +} + +// Body sets the request body. Accepted types are string, []byte, io.Reader, +// or structs to be JSON encodeded. +func (r *Request) Body(body interface{}) *Request { + r.body = body + return r +} + +// Build builds a HTTP request according to the settings in the REST request. +func (r *Request) Build() (*http.Request, error) { + url, err := r.buildURL() + if err != nil { + return nil, err + } + + body, err := r.buildBody() + if err != nil { + return nil, err + } + + req, err := http.NewRequest(r.method, url, body) + if err != nil { + return req, err + } + + for k, vs := range r.header { + for _, v := range vs { + req.Header.Add(k, v) + } + } + + return req, nil +} + +func (r *Request) buildURL() (string, error) { + if r.rawUrl == "" || len(r.queryParams) == 0 { + return r.rawUrl, nil + } + u, err := url.Parse(r.rawUrl) + if err != nil { + return "", err + } + q := u.Query() + for k, vs := range r.queryParams { + for _, v := range vs { + q.Add(k, v) + } + } + u.RawQuery = q.Encode() + return u.String(), nil +} + +func (r *Request) buildBody() (io.Reader, error) { + if len(r.files) > 0 { + return r.buildFormMultipart() + } + + if len(r.formParams) > 0 { + return r.buildFormFields() + } + + return r.buildCustomBody() +} + +func (r *Request) buildFormMultipart() (io.Reader, error) { + b := new(bytes.Buffer) + w := multipart.NewWriter(b) + defer w.Close() + + for k, files := range r.files { + for _, f := range files { + defer func() { + if f, ok := f.Content.(io.ReadCloser); ok { + f.Close() + } + }() + + p, err := createPartWriter(w, k, f) + if err != nil { + return nil, err + } + _, err = io.Copy(p, f.Content) + if err != nil { + return nil, err + } + } + } + + for k, vs := range r.formParams { + for _, v := range vs { + err := w.WriteField(k, v) + if err != nil { + return nil, err + } + } + } + + r.header.Set(contentType, w.FormDataContentType()) + return b, nil +} + +func createPartWriter(w *multipart.Writer, fieldName string, f File) (io.Writer, error) { + h := make(textproto.MIMEHeader) + h.Set("Content-Disposition", + fmt.Sprintf(`form-data; name="%s"; filename="%s"`, + escapeQuotes(fieldName), escapeQuotes(f.Name))) + if f.Type != "" { + h.Set("Content-Type", f.Type) + } else { + h.Set("Content-Type", "application/octet-stream") + } + return w.CreatePart(h) +} + +var quoteEscaper = strings.NewReplacer("\\", "\\\\", `"`, "\\\"") + +func escapeQuotes(s string) string { + return quoteEscaper.Replace(s) +} + +func (r *Request) buildFormFields() (io.Reader, error) { + r.header.Set(contentType, formUrlEncodedContentType) + return strings.NewReader(r.formParams.Encode()), nil +} + +func (r *Request) buildCustomBody() (io.Reader, error) { + if r.body == nil { + return nil, nil + } + + switch b := r.body; b.(type) { + case string: + return strings.NewReader(b.(string)), nil + case []byte: + return bytes.NewReader(b.([]byte)), nil + case io.Reader: + return b.(io.Reader), nil + default: + raw, err := json.Marshal(b) + if err != nil { + return nil, fmt.Errorf("Invalid JSON request: %v", err) + } + return bytes.NewReader(raw), nil + } +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/session/session.go b/vendor/github.com/IBM-Cloud/bluemix-go/session/session.go new file mode 100644 index 00000000000..acf757c9cb0 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/session/session.go @@ -0,0 +1,102 @@ +package session + +import ( + "fmt" + "strconv" + "time" + + bluemix "github.com/IBM-Cloud/bluemix-go" + "github.com/IBM-Cloud/bluemix-go/endpoints" + "github.com/IBM-Cloud/bluemix-go/helpers" + "github.com/IBM-Cloud/bluemix-go/trace" +) + +//Session ... +type Session struct { + Config *bluemix.Config +} + +//New ... +func New(configs ...*bluemix.Config) (*Session, error) { + var c *bluemix.Config + + if len(configs) == 0 { + c = &bluemix.Config{} + } else { + c = configs[0] + } + sess := &Session{ + Config: c, + } + + if len(c.IBMID) == 0 { + c.IBMID = helpers.EnvFallBack([]string{"IBMID"}, "") + } + + if len(c.IBMIDPassword) == 0 { + c.IBMIDPassword = helpers.EnvFallBack([]string{"IBMID_PASSWORD"}, "") + } + + if len(c.BluemixAPIKey) == 0 { + c.BluemixAPIKey = helpers.EnvFallBack([]string{"IC_API_KEY", "IBMCLOUD_API_KEY", "BM_API_KEY", "BLUEMIX_API_KEY"}, "") + } + + if len(c.IAMAccessToken) == 0 { + c.IAMAccessToken = helpers.EnvFallBack([]string{"IC_IAM_TOKEN", "IBMCLOUD_IAM_TOKEN"}, "") + } + + if len(c.IAMRefreshToken) == 0 { + c.IAMRefreshToken = helpers.EnvFallBack([]string{"IC_IAM_REFRESH_TOKEN", "IBMCLOUD_IAM_REFRESH_TOKEN"}, "") + } + + if len(c.Region) == 0 { + c.Region = helpers.EnvFallBack([]string{"IC_REGION", "IBMCLOUD_REGION", "BM_REGION", "BLUEMIX_REGION"}, "us-south") + } + if c.MaxRetries == nil { + c.MaxRetries = helpers.Int(3) + retries := helpers.EnvFallBack([]string{"MAX_RETRIES"}, "3") + i, err := strconv.Atoi(retries) + if err != nil { + fmt.Printf("MAX_RETRIES has invalid retries format. Default retries will be set to %q", *c.MaxRetries) + } + if i < 0 { + fmt.Printf("MAX_RETRIES has invalid retries format. Default retries will be set to %q", *c.MaxRetries) + } + if err == nil && i >= 0 { + c.MaxRetries = &i + } + } + if c.HTTPTimeout == 0 { + c.HTTPTimeout = 180 * time.Second + timeout := helpers.EnvFallBack([]string{"IC_TIMEOUT", "IBMCLOUD_TIMEOUT", "BM_TIMEOUT", "BLUEMIX_TIMEOUT"}, "180") + timeoutDuration, err := time.ParseDuration(fmt.Sprintf("%ss", timeout)) + if err != nil { + fmt.Printf("IC_TIMEOUT or IBMCLOUD_TIMEOUT has invalid time format. Default timeout will be set to %q", c.HTTPTimeout) + } + if err == nil { + c.HTTPTimeout = timeoutDuration + } + } + if len(c.Visibility) == 0 { + c.Visibility = helpers.EnvFallBack([]string{"IC_VISIBILITY", "IBMCLOUD_VISIBILITY"}, "public") + } + if c.RetryDelay == nil { + c.RetryDelay = helpers.Duration(30 * time.Second) + } + if c.EndpointLocator == nil { + c.EndpointLocator = endpoints.NewEndpointLocator(c.Region, c.Visibility) + } + + if c.Debug { + trace.Logger = trace.NewLogger("true") + } + + return sess, nil +} + +//Copy allows sessions to create a copy of it and optionally override any defaults via the config +func (s *Session) Copy(mccpgs ...*bluemix.Config) *Session { + return &Session{ + Config: s.Config.Copy(mccpgs...), + } +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/trace/trace.go b/vendor/github.com/IBM-Cloud/bluemix-go/trace/trace.go new file mode 100644 index 00000000000..87c08987ecb --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/trace/trace.go @@ -0,0 +1,132 @@ +package trace + +import ( + "fmt" + "io" + "log" + "os" + "regexp" + "strings" +) + +//Printer ... +type Printer interface { + Print(v ...interface{}) + Printf(format string, v ...interface{}) + Println(v ...interface{}) +} + +//Closer ... +type Closer interface { + Close() error +} + +//PrinterCloser ... +type PrinterCloser interface { + Printer + Closer +} + +//NullLogger ... +type NullLogger struct{} + +func (l *NullLogger) Print(v ...interface{}) {} +func (l *NullLogger) Printf(format string, v ...interface{}) {} +func (l *NullLogger) Println(v ...interface{}) {} + +type loggerImpl struct { + *log.Logger + c io.WriteCloser +} + +func (loggerImpl *loggerImpl) Close() error { + if loggerImpl.c != nil { + return loggerImpl.c.Close() + } + return nil +} + +func newLoggerImpl(out io.Writer, prefix string, flag int) *loggerImpl { + l := log.New(out, prefix, flag) + c := out.(io.WriteCloser) + return &loggerImpl{ + Logger: l, + c: c, + } +} + +//Logger is global logger +var Logger Printer = NewLogger("") + +// NewLogger returns a printer for the given trace setting. +func NewLogger(bluemix_trace string) Printer { + switch strings.ToLower(bluemix_trace) { + case "", "false": + return new(NullLogger) + case "true": + return NewStdLogger() + default: + return NewFileLogger(bluemix_trace) + } +} + +// NewStdLogger return a printer that writes to StdOut. +func NewStdLogger() PrinterCloser { + return newLoggerImpl(os.Stderr, "", 0) +} + +// NewFileLogger return a printer that writes to the given file path. +func NewFileLogger(path string) PrinterCloser { + file, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR|os.O_APPEND, 0644) + if err != nil { + logger := NewStdLogger() + logger.Printf("An error occurred when creating log file '%s':\n%v\n\n", path, err) + return logger + } + return newLoggerImpl(file, "", 0) +} + +// Sanitize returns a clean string with sentive user data in the input +// replaced by PRIVATE_DATA_PLACEHOLDER. +func Sanitize(input string) string { + re := regexp.MustCompile(`(?m)^Authorization: .*`) + sanitized := re.ReplaceAllString(input, "Authorization: "+privateDataPlaceholder()) + + re = regexp.MustCompile(`(?m)^X-Auth-Token: .*`) + sanitized = re.ReplaceAllString(sanitized, "X-Auth-Token: "+privateDataPlaceholder()) + + re = regexp.MustCompile(`(?m)^X-Auth-Refresh-Token: .*`) + sanitized = re.ReplaceAllString(sanitized, "X-Auth-Refresh-Token: "+privateDataPlaceholder()) + + re = regexp.MustCompile(`(?m)^X-Auth-Uaa-Token: .*`) + sanitized = re.ReplaceAllString(sanitized, "X-Auth-Uaa-Token: "+privateDataPlaceholder()) + + re = regexp.MustCompile(`(?m)^X-Auth-User-Token: .*`) + sanitized = re.ReplaceAllString(sanitized, "X-Auth-User-Token: "+privateDataPlaceholder()) + + re = regexp.MustCompile(`password=[^&]*&`) + sanitized = re.ReplaceAllString(sanitized, "password="+privateDataPlaceholder()+"&") + + re = regexp.MustCompile(`refresh_token=[^&]*&`) + sanitized = re.ReplaceAllString(sanitized, "refresh_token="+privateDataPlaceholder()+"&") + + re = regexp.MustCompile(`apikey=[^&]*&`) + sanitized = re.ReplaceAllString(sanitized, "apikey="+privateDataPlaceholder()+"&") + + sanitized = sanitizeJSON("token", sanitized) + sanitized = sanitizeJSON("password", sanitized) + sanitized = sanitizeJSON("apikey", sanitized) + sanitized = sanitizeJSON("passcode", sanitized) + + return sanitized +} + +func sanitizeJSON(propertySubstring string, json string) string { + regex := regexp.MustCompile(fmt.Sprintf(`(?i)"([^"]*%s[^"]*)":\s*"[^\,]*"`, propertySubstring)) + return regex.ReplaceAllString(json, fmt.Sprintf(`"$1":"%s"`, privateDataPlaceholder())) +} + +// privateDataPlaceholder returns the text to replace the sentive data. +func privateDataPlaceholder() string { + return "[PRIVATE DATA HIDDEN]" +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/utils/utils.go b/vendor/github.com/IBM-Cloud/bluemix-go/utils/utils.go new file mode 100644 index 00000000000..ecd265ecedf --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/utils/utils.go @@ -0,0 +1,160 @@ +package utils + +import ( + "fmt" + "net/url" + "strings" + + "github.com/IBM-Cloud/bluemix-go/api/iampap/iampapv2" + "github.com/IBM-Cloud/bluemix-go/bmxerror" + "github.com/IBM-Cloud/bluemix-go/models" + + "github.com/IBM-Cloud/bluemix-go/crn" +) + +func GetLocationFromTargetCRN(crnResource string) string { + if strings.HasPrefix(crnResource, "bluemix-") { + return crnResource[len("bluemix-"):] + } else if strings.HasPrefix(crnResource, "staging-") { + return crnResource[len("staging-"):] + } else { + return crnResource + } +} + +func GenerateSpaceCRN(region models.Region, orgID string, spaceID string) crn.CRN { + spaceCRN := crn.New(CloudName(region), CloudType(region)) + spaceCRN.ServiceName = crn.ServiceBluemix + spaceCRN.Region = region.Name + spaceCRN.ScopeType = crn.ScopeOrganization + spaceCRN.Scope = orgID + spaceCRN.ResourceType = crn.ResourceTypeCFSpace + spaceCRN.Resource = spaceID + return spaceCRN +} + +func CloudName(region models.Region) string { + regionID := region.ID + if regionID == "" { + return "" + } + + splits := strings.Split(regionID, ":") + if len(splits) != 3 { + return "" + } + + customer := splits[0] + if customer != "ibm" { + return customer + } + + deployment := splits[1] + switch { + case deployment == "yp": + return "bluemix" + case strings.HasPrefix(deployment, "ys"): + return "staging" + default: + return "" + } +} + +func CloudType(region models.Region) string { + return region.Type +} + +func GenerateBoundToCRN(region models.Region, accountID string) crn.CRN { + var boundTo crn.CRN + if region.Type == "dedicated" { + // cname and ctype are hard coded for dedicated + boundTo = crn.New("bluemix", "public") + } else { + boundTo = crn.New(CloudName(region), CloudType(region)) + } + + boundTo.ScopeType = crn.ScopeAccount + boundTo.Scope = accountID + return boundTo +} + +func GetRolesFromRoleNamesV2(roleNames []string, roles []iampapv2.Role) ([]iampapv2.Role, error) { + + filteredRoles := []iampapv2.Role{} + for _, roleName := range roleNames { + role, err := FindRoleByNameV2(roles, roleName) + if err != nil { + return []iampapv2.Role{}, err + } + filteredRoles = append(filteredRoles, role) + } + return filteredRoles, nil +} +func FindRoleByNameV2(supported []iampapv2.Role, name string) (iampapv2.Role, error) { + for _, role := range supported { + if role.DisplayName == name { + return role, nil + } + } + supportedRoles := getSupportedRolesStringV2(supported) + return iampapv2.Role{}, bmxerror.New(ErrCodeRRoleDoesnotExist, + fmt.Sprintf("%s was not found. Valid roles are %s", name, supportedRoles)) + +} + +func getSupportedRolesStringV2(supported []iampapv2.Role) string { + rolesStr := "" + for index, role := range supported { + if index != 0 { + rolesStr += ", " + } + rolesStr += role.DisplayName + } + return rolesStr +} + +func GetRolesFromRoleNames(roleNames []string, roles []models.PolicyRole) ([]models.PolicyRole, error) { + + filteredRoles := []models.PolicyRole{} + for _, roleName := range roleNames { + role, err := FindRoleByName(roles, roleName) + if err != nil { + return []models.PolicyRole{}, err + } + filteredRoles = append(filteredRoles, role) + } + return filteredRoles, nil +} + +const ErrCodeRRoleDoesnotExist = "RoleDoesnotExist" + +func FindRoleByName(supported []models.PolicyRole, name string) (models.PolicyRole, error) { + for _, role := range supported { + if role.DisplayName == name { + return role, nil + } + } + supportedRoles := getSupportedRolesString(supported) + return models.PolicyRole{}, bmxerror.New(ErrCodeRRoleDoesnotExist, + fmt.Sprintf("%s was not found. Valid roles are %s", name, supportedRoles)) + +} + +func getSupportedRolesString(supported []models.PolicyRole) string { + rolesStr := "" + for index, role := range supported { + if index != 0 { + rolesStr += ", " + } + rolesStr += role.DisplayName + } + return rolesStr +} + +func EscapeUrlParm(urlParm string) string { + if strings.Contains(urlParm, "/") { + newUrlParm := url.PathEscape(urlParm) + return newUrlParm + } + return urlParm +} diff --git a/vendor/github.com/IBM-Cloud/bluemix-go/version.go b/vendor/github.com/IBM-Cloud/bluemix-go/version.go new file mode 100644 index 00000000000..ef121571c07 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/bluemix-go/version.go @@ -0,0 +1,4 @@ +package bluemix + +//Version is the SDK version +const Version = "0.1" diff --git a/vendor/github.com/IBM-Cloud/power-go-client/LICENSE b/vendor/github.com/IBM-Cloud/power-go-client/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/IBM-Cloud/power-go-client/clients/instance/ibm-pi-clonevolumes.go b/vendor/github.com/IBM-Cloud/power-go-client/clients/instance/ibm-pi-clonevolumes.go new file mode 100644 index 00000000000..7f875026779 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/clients/instance/ibm-pi-clonevolumes.go @@ -0,0 +1,69 @@ +package instance + +import ( + "fmt" + "time" + + "github.com/IBM-Cloud/power-go-client/helpers" + "github.com/IBM-Cloud/power-go-client/ibmpisession" + "github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes" + "github.com/IBM-Cloud/power-go-client/power/models" +) + +// IBMPICloneVolumeClient ... +type IBMPICloneVolumeClient struct { + session *ibmpisession.IBMPISession + powerinstanceid string +} + +// NewIBMPICloneVolumeClient ... +func NewIBMPICloneVolumeClient(sess *ibmpisession.IBMPISession, powerinstanceid string) *IBMPICloneVolumeClient { + return &IBMPICloneVolumeClient{ + sess, powerinstanceid, + } +} + +//Create a clone volume using V2 of the API - This creates a clone +func (f *IBMPICloneVolumeClient) Create(cloneParams *p_cloud_volumes.PcloudV2VolumesClonePostParams, timeout time.Duration) (*models.CloneTaskReference, error) { + params := p_cloud_volumes.NewPcloudV2VolumesClonePostParamsWithTimeout(helpers.PICreateTimeOut).WithCloudInstanceID(cloneParams.CloudInstanceID).WithBody(cloneParams.Body) + resp, err := f.session.Power.PCloudVolumes.PcloudV2VolumesClonePost(params, ibmpisession.NewAuth(f.session, cloneParams.CloudInstanceID)) + if err != nil || resp.Payload == nil { + return nil, fmt.Errorf("Failed to perform Create operation... %s", err) + } + return resp.Payload, nil +} + +//DeleteClone Deletes a clone +func (f *IBMPICloneVolumeClient) DeleteClone(cloneParams *p_cloud_volumes.PcloudV2VolumescloneDeleteParams, id, cloudinstance string, timeout time.Duration) (models.Object, error) { + params := p_cloud_volumes.NewPcloudV2VolumescloneDeleteParamsWithTimeout(helpers.PIDeleteTimeOut).WithCloudInstanceID(cloudinstance).WithVolumesCloneID(id) + + resp, err := f.session.Power.PCloudVolumes.PcloudV2VolumescloneDelete(params, ibmpisession.NewAuth(f.session, cloudinstance)) + + if err != nil || resp.Payload == nil { + return nil, fmt.Errorf("Failed to perform Delete operation... %s", err) + } + return resp.Payload, nil +} + +// Cancel a Clone + +// Get status of a clone request +func (f *IBMPICloneVolumeClient) Get(powerinstanceid, clonetaskid string, timeout time.Duration) (*models.CloneTaskStatus, error) { + params := p_cloud_volumes.NewPcloudV2VolumesClonetasksGetParamsWithTimeout(helpers.PIGetTimeOut).WithCloudInstanceID(powerinstanceid).WithCloneTaskID(clonetaskid) + resp, err := f.session.Power.PCloudVolumes.PcloudV2VolumesClonetasksGet(params, ibmpisession.NewAuth(f.session, powerinstanceid)) + if err != nil || resp.Payload == nil { + return nil, fmt.Errorf("Failed to perform the get operation for clones... %s", err) + } + return resp.Payload, nil +} + +//StartClone ... +func (f *IBMPICloneVolumeClient) StartClone(powerinstanceid, volumeCloneID string, timeout time.Duration) (*models.VolumesClone, error) { + params := p_cloud_volumes.NewPcloudV2VolumescloneStartPostParamsWithTimeout(helpers.PICreateTimeOut).WithCloudInstanceID(powerinstanceid).WithVolumesCloneID(volumeCloneID) + resp, err := f.session.Power.PCloudVolumes.PcloudV2VolumescloneStartPost(params, ibmpisession.NewAuth(f.session, powerinstanceid)) + + if err != nil || resp.Payload == nil { + return nil, fmt.Errorf("Failed to perform the start operation for clones... %s", err) + } + return resp.Payload, nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/clients/instance/ibm-pi-cloud-connection.go b/vendor/github.com/IBM-Cloud/power-go-client/clients/instance/ibm-pi-cloud-connection.go new file mode 100644 index 00000000000..8366ea42f95 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/clients/instance/ibm-pi-cloud-connection.go @@ -0,0 +1,126 @@ +package instance + +import ( + "fmt" + "time" + + "github.com/IBM-Cloud/power-go-client/ibmpisession" + "github.com/IBM-Cloud/power-go-client/power/client/p_cloud_cloud_connections" + "github.com/IBM-Cloud/power-go-client/power/models" +) + +// IBMPICloudConnectionClient ... +type IBMPICloudConnectionClient struct { + session *ibmpisession.IBMPISession + powerinstanceid string +} + +// NewIBMPICloudConnectionClient ... +func NewIBMPICloudConnectionClient(sess *ibmpisession.IBMPISession, powerinstanceid string) *IBMPICloudConnectionClient { + return &IBMPICloudConnectionClient{ + session: sess, + powerinstanceid: powerinstanceid, + } +} + +// Create a Cloud Connection +func (f *IBMPICloudConnectionClient) Create(pclouddef *p_cloud_cloud_connections.PcloudCloudconnectionsPostParams, powerinstanceid string) (*models.CloudConnection, error) { + + params := p_cloud_cloud_connections.NewPcloudCloudconnectionsPostParamsWithTimeout(postTimeOut).WithCloudInstanceID(powerinstanceid).WithBody(pclouddef.Body) + postok, postcreated, err, _ := f.session.Power.PCloudCloudConnections.PcloudCloudconnectionsPost(params, ibmpisession.NewAuth(f.session, powerinstanceid)) + if err != nil { + return nil, fmt.Errorf("Failed to create cloud connection %s", err) + } + if postok != nil { + return postok.Payload, nil + } + if postcreated != nil { + return postcreated.Payload, nil + } + return nil, nil +} + +/* + gets a cloud connection s state information +*/ + +// Get ... +func (f *IBMPICloudConnectionClient) Get(pclouddef *p_cloud_cloud_connections.PcloudCloudconnectionsGetParams) (*models.CloudConnection, error) { + + params := p_cloud_cloud_connections.NewPcloudCloudconnectionsGetParams().WithCloudInstanceID(pclouddef.CloudInstanceID).WithCloudConnectionID(pclouddef.CloudConnectionID) + resp, err := f.session.Power.PCloudCloudConnections.PcloudCloudconnectionsGet(params, ibmpisession.NewAuth(f.session, pclouddef.CloudInstanceID)) + if err != nil { + return nil, fmt.Errorf("Failed to get cloud connection %s", err) + } + return resp.Payload, nil +} + +/* + gets a cloud connection s state information +*/ + +// GetAll .. +func (f *IBMPICloudConnectionClient) GetAll(powerinstanceid string, timeout time.Duration) (*models.CloudConnections, error) { + + params := p_cloud_cloud_connections.NewPcloudCloudconnectionsGetallParamsWithTimeout(timeout).WithCloudInstanceID(powerinstanceid) + resp, err := f.session.Power.PCloudCloudConnections.PcloudCloudconnectionsGetall(params, ibmpisession.NewAuth(f.session, powerinstanceid)) + if err != nil { + return nil, fmt.Errorf("Failed to get all cloud connection %s", err) + } + return resp.Payload, nil +} + +// Update a cloud Connection +func (f *IBMPICloudConnectionClient) Update(updateparams *p_cloud_cloud_connections.PcloudCloudconnectionsPutParams) (*models.CloudConnection, error) { + + params := p_cloud_cloud_connections.NewPcloudCloudconnectionsPutParams().WithCloudInstanceID(updateparams.CloudInstanceID).WithCloudConnectionID(updateparams.CloudConnectionID).WithBody(updateparams.Body) + resp, err, _ := f.session.Power.PCloudCloudConnections.PcloudCloudconnectionsPut(params, ibmpisession.NewAuth(f.session, updateparams.CloudInstanceID)) + if err != nil { + return nil, fmt.Errorf("Failed to update all cloud connection %s", err) + } + return resp.Payload, nil +} + +// Delete a Cloud Connection +func (f *IBMPICloudConnectionClient) Delete(pclouddef *p_cloud_cloud_connections.PcloudCloudconnectionsDeleteParams) (models.Object, error) { + params := p_cloud_cloud_connections.NewPcloudCloudconnectionsDeleteParams().WithCloudInstanceID(pclouddef.CloudInstanceID).WithCloudConnectionID(pclouddef.CloudConnectionID) + respok, _, err := f.session.Power.PCloudCloudConnections.PcloudCloudconnectionsDelete(params, ibmpisession.NewAuth(f.session, pclouddef.CloudInstanceID)) + + if err != nil || respok.Payload == nil { + return nil, fmt.Errorf("Failed to Delete all cloud connection %s", err) + } + return respok.Payload, nil +} + +// AddNetwork to a cloud connection +func (f *IBMPICloudConnectionClient) AddNetwork(pcloudnetworkdef *p_cloud_cloud_connections.PcloudCloudconnectionsNetworksPutParams) (*models.CloudConnection, error) { + params := p_cloud_cloud_connections.NewPcloudCloudconnectionsNetworksPutParams().WithCloudInstanceID(pcloudnetworkdef.CloudInstanceID).WithCloudConnectionID(pcloudnetworkdef.CloudConnectionID).WithNetworkID(pcloudnetworkdef.NetworkID) + resp, err := f.session.Power.PCloudCloudConnections.PcloudCloudconnectionsNetworksPut(params, ibmpisession.NewAuth(f.session, pcloudnetworkdef.CloudInstanceID)) + if err != nil || resp.Payload == nil { + return nil, fmt.Errorf("Failed to add the network to the cloudconnection %s", err) + } + return resp.Payload, nil +} + +// DeleteNetwork Deletes a network from a cloud connection +func (f *IBMPICloudConnectionClient) DeleteNetwork(pcloudnetworkdef *p_cloud_cloud_connections.PcloudCloudconnectionsNetworksDeleteParams) (*models.CloudConnection, error) { + + params := p_cloud_cloud_connections.NewPcloudCloudconnectionsNetworksDeleteParams().WithCloudInstanceID(pcloudnetworkdef.CloudInstanceID).WithCloudConnectionID(pcloudnetworkdef.CloudConnectionID).WithNetworkID(pcloudnetworkdef.NetworkID) + resp, err := f.session.Power.PCloudCloudConnections.PcloudCloudconnectionsNetworksDelete(params, ibmpisession.NewAuth(f.session, pcloudnetworkdef.CloudInstanceID)) + + if err != nil || resp.Payload == nil { + return nil, fmt.Errorf("Failed to perform the delete operation... %s", err) + } + return resp.Payload, nil +} + +// UpdateNetwork Update a network from a cloud connection +func (f *IBMPICloudConnectionClient) UpdateNetwork(pcloudnetworkdef *p_cloud_cloud_connections.PcloudCloudconnectionsNetworksPutParams) (*models.CloudConnection, error) { + params := p_cloud_cloud_connections.NewPcloudCloudconnectionsNetworksPutParams().WithCloudInstanceID(pcloudnetworkdef.CloudInstanceID).WithCloudConnectionID(pcloudnetworkdef.CloudConnectionID).WithNetworkID(pcloudnetworkdef.NetworkID) + resp, err := f.session.Power.PCloudCloudConnections.PcloudCloudconnectionsNetworksPut(params, ibmpisession.NewAuth(f.session, pcloudnetworkdef.CloudInstanceID)) + + if err != nil || resp.Payload == nil { + return nil, fmt.Errorf("Failed to perform the update operation... %s", err) + } + return resp.Payload, nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/clients/instance/ibm-pi-cloud-instance.go b/vendor/github.com/IBM-Cloud/power-go-client/clients/instance/ibm-pi-cloud-instance.go new file mode 100644 index 00000000000..19068084f73 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/clients/instance/ibm-pi-cloud-instance.go @@ -0,0 +1,54 @@ +package instance + +import ( + "fmt" + + "github.com/IBM-Cloud/power-go-client/ibmpisession" + "github.com/IBM-Cloud/power-go-client/power/client/p_cloud_instances" + "github.com/IBM-Cloud/power-go-client/power/models" +) + +// IBMPICloudInstanceClient ... +type IBMPICloudInstanceClient struct { + session *ibmpisession.IBMPISession + powerinstanceid string +} + +// NewIBMPICloudInstanceClient ... +func NewIBMPICloudInstanceClient(sess *ibmpisession.IBMPISession, powerinstanceid string) *IBMPICloudInstanceClient { + return &IBMPICloudInstanceClient{ + session: sess, + powerinstanceid: powerinstanceid, + } +} + +// Get information about a cloud instance +func (f *IBMPICloudInstanceClient) Get(powerinstanceid string) (*models.CloudInstance, error) { + params := p_cloud_instances.NewPcloudCloudinstancesGetParams().WithCloudInstanceID(powerinstanceid) + resp, err := f.session.Power.PCloudInstances.PcloudCloudinstancesGet(params, ibmpisession.NewAuth(f.session, powerinstanceid)) + if err != nil { + return nil, fmt.Errorf("Failed to Get Cloud Instance %s: %s", powerinstanceid, err) + } + return resp.Payload, nil +} + +// Update a cloud instance +func (f *IBMPICloudInstanceClient) Update(powerinstanceid string, updateparams *p_cloud_instances.PcloudCloudinstancesPutParams) (*models.CloudInstance, error) { + params := p_cloud_instances.NewPcloudCloudinstancesPutParamsWithTimeout(f.session.Timeout).WithCloudInstanceID(powerinstanceid).WithBody(updateparams.Body) + resp, err := f.session.Power.PCloudInstances.PcloudCloudinstancesPut(params, ibmpisession.NewAuth(f.session, powerinstanceid)) + if err != nil { + return nil, fmt.Errorf("Failed to Update Cloud Instance %s: %s", powerinstanceid, err) + + } + return resp.Payload, nil +} + +// Delete a Cloud instance +func (f *IBMPICloudInstanceClient) Delete(powerinstanceid string) (models.Object, error) { + params := p_cloud_instances.NewPcloudCloudinstancesDeleteParams().WithCloudInstanceID(powerinstanceid) + resp, err := f.session.Power.PCloudInstances.PcloudCloudinstancesDelete(params, ibmpisession.NewAuth(f.session, powerinstanceid)) + if err != nil || resp.Payload == nil { + return nil, fmt.Errorf("Failed to Delete Cloud Instance %s: %s", powerinstanceid, err) + } + return resp.Payload, nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/clients/instance/ibm-pi-helper.go b/vendor/github.com/IBM-Cloud/power-go-client/clients/instance/ibm-pi-helper.go new file mode 100644 index 00000000000..d338ecce141 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/clients/instance/ibm-pi-helper.go @@ -0,0 +1,23 @@ +package instance + +import ( + "github.com/IBM-Cloud/power-go-client/ibmpisession" +) + +/* +Helper methods that will be used by the client classes +*/ + +// IBMPIHelperClient ... +type IBMPIHelperClient struct { + session *ibmpisession.IBMPISession + powerinstanceid string +} + +// NewIBMPIHelperClient ... +func NewIBMPIHelperClient(sess *ibmpisession.IBMPISession, powerinstanceid string) *IBMPIHelperClient { + return &IBMPIHelperClient{ + session: sess, + powerinstanceid: powerinstanceid, + } +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/clients/instance/ibm-pi-image.go b/vendor/github.com/IBM-Cloud/power-go-client/clients/instance/ibm-pi-image.go new file mode 100644 index 00000000000..3026d906c01 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/clients/instance/ibm-pi-image.go @@ -0,0 +1,115 @@ +package instance + +import ( + "fmt" + + "github.com/IBM-Cloud/power-go-client/helpers" + "github.com/IBM-Cloud/power-go-client/ibmpisession" + "github.com/IBM-Cloud/power-go-client/power/client/p_cloud_images" + "github.com/IBM-Cloud/power-go-client/power/models" +) + +//IBMPIImageClient ... +type IBMPIImageClient struct { + session *ibmpisession.IBMPISession + powerinstanceid string +} + +// NewIBMPIImageClient ... +func NewIBMPIImageClient(sess *ibmpisession.IBMPISession, powerinstanceid string) *IBMPIImageClient { + return &IBMPIImageClient{ + session: sess, + powerinstanceid: powerinstanceid, + } +} + +// Get PI Image +func (f *IBMPIImageClient) Get(id, powerinstanceid string) (*models.Image, error) { + + params := p_cloud_images.NewPcloudCloudinstancesImagesGetParamsWithTimeout(helpers.PIGetTimeOut).WithCloudInstanceID(powerinstanceid).WithImageID(id) + resp, err := f.session.Power.PCloudImages.PcloudCloudinstancesImagesGet(params, ibmpisession.NewAuth(f.session, powerinstanceid)) + + if err != nil || resp == nil || resp.Payload == nil { + return nil, fmt.Errorf("Failed to Get PI Image %s :%s", id, err) + } + return resp.Payload, nil +} + +//GetAll Images that are imported into Power Instance +func (f *IBMPIImageClient) GetAll(powerinstanceid string) (*models.Images, error) { + + params := p_cloud_images.NewPcloudCloudinstancesImagesGetallParamsWithTimeout(helpers.PIGetTimeOut).WithCloudInstanceID(powerinstanceid) + resp, err := f.session.Power.PCloudImages.PcloudCloudinstancesImagesGetall(params, ibmpisession.NewAuth(f.session, powerinstanceid)) + if err != nil || resp == nil || resp.Payload == nil { + return nil, fmt.Errorf("Failed to Get all PI Images of the PVM instance %s : %s", powerinstanceid, err) + } + return resp.Payload, nil + +} + +//Create the stock image +func (f *IBMPIImageClient) Create(name, imageid string, powerinstanceid string) (*models.Image, error) { + + var source = "root-project" + var body = models.CreateImage{ + ImageName: name, + ImageID: imageid, + Source: &source, + } + params := p_cloud_images.NewPcloudCloudinstancesImagesPostParamsWithTimeout(helpers.PICreateTimeOut).WithCloudInstanceID(powerinstanceid).WithBody(&body) + _, result, err := f.session.Power.PCloudImages.PcloudCloudinstancesImagesPost(params, ibmpisession.NewAuth(f.session, powerinstanceid)) + if err != nil || result == nil || result.Payload == nil { + return nil, fmt.Errorf("Failed to Create Image of the PVM instance %s : %s", powerinstanceid, err) + } + return result.Payload, nil + +} + +// Delete ... +func (f *IBMPIImageClient) Delete(id string, powerinstanceid string) error { + params := p_cloud_images.NewPcloudCloudinstancesImagesDeleteParamsWithTimeout(helpers.PIDeleteTimeOut).WithCloudInstanceID(powerinstanceid).WithImageID(id) + _, err := f.session.Power.PCloudImages.PcloudCloudinstancesImagesDelete(params, ibmpisession.NewAuth(f.session, powerinstanceid)) + if err != nil { + return fmt.Errorf("Failed to Delete PI Image %s :%s", id, err) + } + return nil +} + +// GetStockImages ... +func (f *IBMPIImageClient) GetStockImage(id, powerinstanceid string) (*models.Image, error) { + + params := p_cloud_images.NewPcloudCloudinstancesStockimagesGetParamsWithTimeout(helpers.PICreateTimeOut).WithCloudInstanceID(powerinstanceid).WithImageID(id) + resp, err := f.session.Power.PCloudImages.PcloudCloudinstancesStockimagesGet(params, ibmpisession.NewAuth(f.session, f.powerinstanceid)) + + if err != nil || resp == nil { + return nil, fmt.Errorf("Failed to Get PI Stock Imageid %s : %s", powerinstanceid, err) + } + return resp.Payload, nil +} + +// Get StockImage +func (f *IBMPIImageClient) GetStockImages(powerinstanceid string) (*models.Images, error) { + + params := p_cloud_images.NewPcloudCloudinstancesStockimagesGetallParamsWithTimeout(helpers.PICreateTimeOut).WithCloudInstanceID(powerinstanceid) + resp, err := f.session.Power.PCloudImages.PcloudCloudinstancesStockimagesGetall(params, ibmpisession.NewAuth(f.session, f.powerinstanceid)) + + if err != nil || resp == nil || resp.Payload == nil { + return nil, fmt.Errorf("Failed to Get all PI Stock Images of the PVM instance %s : %s", powerinstanceid, err) + } + return resp.Payload, nil +} + +//GetSAPImages ... +func (f *IBMPIImageClient) GetSAPImages(powerinstanceid string, sapimage bool) (*models.Images, error) { + + params := p_cloud_images.NewPcloudImagesGetallParams() + params.Sap = &sapimage + + resp, err := f.session.Power.PCloudImages.PcloudImagesGetall(params, ibmpisession.NewAuth(f.session, powerinstanceid)) + if err != nil || resp == nil || resp.Payload == nil { + return nil, fmt.Errorf("Failed to Get all PI Sap Images of the PVM instance %s : %s", powerinstanceid, err) + } + return resp.Payload, nil +} + +// Get a single SAP Image diff --git a/vendor/github.com/IBM-Cloud/power-go-client/clients/instance/ibm-pi-instance.go b/vendor/github.com/IBM-Cloud/power-go-client/clients/instance/ibm-pi-instance.go new file mode 100644 index 00000000000..10b6276a5d6 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/clients/instance/ibm-pi-instance.go @@ -0,0 +1,240 @@ +package instance + +import ( + "fmt" + "time" + + "github.com/IBM-Cloud/power-go-client/helpers" + "github.com/IBM-Cloud/power-go-client/ibmpisession" + "github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances" + "github.com/IBM-Cloud/power-go-client/power/client/p_cloud_s_a_p" + "github.com/IBM-Cloud/power-go-client/power/models" +) + +/* ChangeLog + +2020-June-05 : Added the timeout variable to the clients since a lot of the SB / Powervc calls are timing out. + +*/ + +// IBMPIInstanceClient ... +type IBMPIInstanceClient struct { + session *ibmpisession.IBMPISession + powerinstanceid string +} + +// NewIBMPIInstanceClient ... +func NewIBMPIInstanceClient(sess *ibmpisession.IBMPISession, powerinstanceid string) *IBMPIInstanceClient { + return &IBMPIInstanceClient{ + session: sess, + powerinstanceid: powerinstanceid, + } +} + +//Get information about a single pvm only +func (f *IBMPIInstanceClient) Get(id, powerinstanceid string, timeout time.Duration) (*models.PVMInstance, error) { + + params := p_cloud_p_vm_instances.NewPcloudPvminstancesGetParamsWithTimeout(helpers.PIGetTimeOut).WithCloudInstanceID(powerinstanceid).WithPvmInstanceID(id) + resp, err := f.session.Power.PCloudPVMInstances.PcloudPvminstancesGet(params, ibmpisession.NewAuth(f.session, powerinstanceid)) + if err != nil || resp.Payload == nil { + return nil, fmt.Errorf("Failed to Get PVM Instance %s :%s", id, err) + } + return resp.Payload, nil +} + +// GetAll Information about all the PVM Instances for a Client +func (f *IBMPIInstanceClient) GetAll(powerinstanceid string, timeout time.Duration) (*models.PVMInstances, error) { + + params := p_cloud_p_vm_instances.NewPcloudPvminstancesGetallParamsWithTimeout(getTimeOut).WithCloudInstanceID(powerinstanceid) + resp, err := f.session.Power.PCloudPVMInstances.PcloudPvminstancesGetall(params, ibmpisession.NewAuth(f.session, powerinstanceid)) + if err != nil || resp.Payload == nil { + return nil, fmt.Errorf("Failed to Get all PVM Instances of Power Instance %s :%s", powerinstanceid, err) + } + return resp.Payload, nil +} + +//Create ... +func (f *IBMPIInstanceClient) Create(powerdef *p_cloud_p_vm_instances.PcloudPvminstancesPostParams, powerinstanceid string, timeout time.Duration) (*models.PVMInstanceList, error) { + + params := p_cloud_p_vm_instances.NewPcloudPvminstancesPostParamsWithTimeout(helpers.PICreateTimeOut).WithCloudInstanceID(powerinstanceid).WithBody(powerdef.Body) + postok, postcreated, postAccepted, err := f.session.Power.PCloudPVMInstances.PcloudPvminstancesPost(params, ibmpisession.NewAuth(f.session, powerinstanceid)) + + if err != nil { + return nil, fmt.Errorf("Failed to Create PVM Instance :%s", err) + } + + if postok != nil && len(postok.Payload) > 0 { + return &postok.Payload, nil + } + if postcreated != nil && len(postcreated.Payload) > 0 { + return &postcreated.Payload, nil + } + if postAccepted != nil && len(postAccepted.Payload) > 0 { + return &postAccepted.Payload, nil + } + return nil, nil +} + +// Delete PVM Instances +func (f *IBMPIInstanceClient) Delete(id, powerinstanceid string, timeout time.Duration) error { + + params := p_cloud_p_vm_instances.NewPcloudPvminstancesDeleteParamsWithTimeout(helpers.PIDeleteTimeOut).WithCloudInstanceID(powerinstanceid).WithPvmInstanceID(id) + _, err := f.session.Power.PCloudPVMInstances.PcloudPvminstancesDelete(params, ibmpisession.NewAuth(f.session, powerinstanceid)) + + if err != nil { + return fmt.Errorf("Failed to Delete PVM Instance %s :%s", id, err) + } + + return nil +} + +// Update PVM Instances +func (f *IBMPIInstanceClient) Update(id, powerinstanceid string, powerupdateparams *p_cloud_p_vm_instances.PcloudPvminstancesPutParams, timeout time.Duration) (*models.PVMInstanceUpdateResponse, error) { + + params := p_cloud_p_vm_instances.NewPcloudPvminstancesPutParamsWithTimeout(helpers.PICreateTimeOut).WithCloudInstanceID(powerinstanceid).WithPvmInstanceID(id).WithBody(powerupdateparams.Body) + resp, err := f.session.Power.PCloudPVMInstances.PcloudPvminstancesPut(params, ibmpisession.NewAuth(f.session, powerinstanceid)) + if err != nil || resp == nil || resp.Payload == nil { + return nil, fmt.Errorf("Failed to Update PVM Instance %s :%s", id, err) + } + return resp.Payload, nil +} + +// Action PVM Instances Operations +func (f *IBMPIInstanceClient) Action(poweractionparams *p_cloud_p_vm_instances.PcloudPvminstancesActionPostParams, id, powerinstanceid string, timeout time.Duration) (models.Object, error) { + + params := p_cloud_p_vm_instances.NewPcloudPvminstancesActionPostParamsWithTimeout(helpers.PICreateTimeOut).WithCloudInstanceID(powerinstanceid).WithPvmInstanceID(id).WithBody(poweractionparams.Body) + postok, err := f.session.Power.PCloudPVMInstances.PcloudPvminstancesActionPost(params, ibmpisession.NewAuth(f.session, powerinstanceid)) + if err != nil { + return nil, fmt.Errorf("Failed to Action PVM Instance :%s", err) + } + + return postok.Payload, nil + +} + +// PostConsoleURL Generate the Console URL +func (f *IBMPIInstanceClient) PostConsoleURL(id, powerinstanceid string, timeout time.Duration) (models.Object, error) { + + params := p_cloud_p_vm_instances.NewPcloudPvminstancesConsolePostParamsWithTimeout(helpers.PICreateTimeOut).WithCloudInstanceID(powerinstanceid).WithPvmInstanceID(id) + postok, err := f.session.Power.PCloudPVMInstances.PcloudPvminstancesConsolePost(params, ibmpisession.NewAuth(f.session, powerinstanceid)) + if err != nil { + return nil, fmt.Errorf("Failed to Generate the Console URL PVM Instance:%s", err) + } + return postok.Payload, nil +} + +// CaptureInstanceToImageCatalog Captures an instance +func (f *IBMPIInstanceClient) CaptureInstanceToImageCatalog(id, powerinstanceid string, picaptureparams *p_cloud_p_vm_instances.PcloudPvminstancesCapturePostParams, timeout time.Duration) (models.Object, error) { + + params := p_cloud_p_vm_instances.NewPcloudPvminstancesCapturePostParamsWithTimeout(helpers.PIGetTimeOut).WithCloudInstanceID(powerinstanceid).WithPvmInstanceID(id).WithBody(picaptureparams.Body) + postok, _, err := f.session.Power.PCloudPVMInstances.PcloudPvminstancesCapturePost(params, ibmpisession.NewAuth(f.session, powerinstanceid)) + if err != nil { + return nil, fmt.Errorf("Failed to Generate the Console URL PVM Instance:%s", err) + } + return postok.Payload, nil + +} + +// CreatePvmSnapShot Create a snapshot of the instance +func (f *IBMPIInstanceClient) CreatePvmSnapShot(snapshotdef *p_cloud_p_vm_instances.PcloudPvminstancesSnapshotsPostParams, pvminstanceid, powerinstanceid string, timeout time.Duration) (*models.SnapshotCreateResponse, error) { + + params := p_cloud_p_vm_instances.NewPcloudPvminstancesSnapshotsPostParamsWithTimeout(helpers.PICreateTimeOut).WithPvmInstanceID(pvminstanceid).WithCloudInstanceID(powerinstanceid).WithBody(snapshotdef.Body) + snapshotpostaccepted, err := f.session.Power.PCloudPVMInstances.PcloudPvminstancesSnapshotsPost(params, ibmpisession.NewAuth(f.session, powerinstanceid)) + if err != nil || snapshotpostaccepted == nil { + return nil, fmt.Errorf("Failed to Create the snapshot %s for the pvminstance : %s", pvminstanceid, err) + } + return snapshotpostaccepted.Payload, nil +} + +// CreateClone ... +func (f *IBMPIInstanceClient) CreateClone(clonedef *p_cloud_p_vm_instances.PcloudPvminstancesClonePostParams, pvminstanceid, powerinstanceid string) (*models.PVMInstance, error) { + + params := p_cloud_p_vm_instances.NewPcloudPvminstancesClonePostParamsWithTimeout(helpers.PICreateTimeOut).WithPvmInstanceID(pvminstanceid).WithCloudInstanceID(powerinstanceid).WithBody(clonedef.Body) + clonePost, err := f.session.Power.PCloudPVMInstances.PcloudPvminstancesClonePost(params, ibmpisession.NewAuth(f.session, powerinstanceid)) + if err != nil { + return nil, fmt.Errorf("Failed to create the clone of the pvm instance %s", err) + } + return clonePost.Payload, nil +} + +// GetSnapShotVM Get information about the snapshots for a vm +func (f *IBMPIInstanceClient) GetSnapShotVM(powerinstanceid, pvminstanceid string, timeout time.Duration) (*models.Snapshots, error) { + + params := p_cloud_p_vm_instances.NewPcloudPvminstancesSnapshotsGetallParamsWithTimeout(helpers.PICreateTimeOut).WithCloudInstanceID(powerinstanceid).WithPvmInstanceID(pvminstanceid) + resp, err := f.session.Power.PCloudPVMInstances.PcloudPvminstancesSnapshotsGetall(params, ibmpisession.NewAuth(f.session, powerinstanceid)) + if err != nil || resp.Payload == nil { + return nil, fmt.Errorf("Failed to Get the snapshot for the pvminstance [%s]: %s", pvminstanceid, err) + } + return resp.Payload, nil + +} + +// RestoreSnapShotVM Restore a snapshot +func (f *IBMPIInstanceClient) RestoreSnapShotVM(powerinstanceid, pvminstanceid, snapshotid, restoreAction string, restoreparams *p_cloud_p_vm_instances.PcloudPvminstancesSnapshotsRestorePostParams, timeout time.Duration) (*models.Snapshot, error) { + params := p_cloud_p_vm_instances.NewPcloudPvminstancesSnapshotsRestorePostParamsWithTimeout(helpers.PICreateTimeOut).WithCloudInstanceID(powerinstanceid).WithPvmInstanceID(pvminstanceid).WithSnapshotID(snapshotid).WithRestoreFailAction(&restoreAction).WithBody(restoreparams.Body) + resp, err := f.session.Power.PCloudPVMInstances.PcloudPvminstancesSnapshotsRestorePost(params, ibmpisession.NewAuth(f.session, powerinstanceid)) + if err != nil || resp == nil || resp.Payload == nil { + return nil, fmt.Errorf("Failed to restrore the snapshot for the pvminstance [%s]: %s", pvminstanceid, err) + } + return resp.Payload, nil +} + +// AddNetwork Add a network to the instance +func (f *IBMPIInstanceClient) AddNetwork(powerinstanceid, pvminstanceid string, networkdef *p_cloud_p_vm_instances.PcloudPvminstancesNetworksPostParams, timeout time.Duration) (*models.PVMInstanceNetwork, error) { + + params := p_cloud_p_vm_instances.NewPcloudPvminstancesNetworksPostParamsWithTimeout(helpers.PICreateTimeOut).WithCloudInstanceID(powerinstanceid).WithPvmInstanceID(pvminstanceid).WithBody(networkdef.Body) + resp, err := f.session.Power.PCloudPVMInstances.PcloudPvminstancesNetworksPost(params, ibmpisession.NewAuth(f.session, powerinstanceid)) + + if err != nil || resp.Payload.NetworkID == "" { + return nil, fmt.Errorf("Failed to attach the network to the pvminstanceid %s : %s", pvminstanceid, err) + } + return resp.Payload, nil +} + +// Delete a network from an instance + +// CreateSAP Create SAP Systems +func (f *IBMPIInstanceClient) CreateSAP(powerdef *p_cloud_s_a_p.PcloudSapPostParams, powerinstanceid string, timeout time.Duration) (*models.PVMInstanceList, error) { + + params := p_cloud_s_a_p.NewPcloudSapPostParamsWithTimeout(timeout).WithCloudInstanceID(powerinstanceid).WithBody(powerdef.Body) + postok, postcreated, postAccepted, err := f.session.Power.PCloudSAP.PcloudSapPost(params, ibmpisession.NewAuth(f.session, powerinstanceid)) + + if err != nil { + return nil, fmt.Errorf("Failed to create sap to the power instance %s : %s", powerinstanceid, err) + } + + if postok != nil && len(postok.Payload) > 0 { + return &postok.Payload, nil + } + if postcreated != nil && len(postcreated.Payload) > 0 { + return &postcreated.Payload, nil + } + if postAccepted != nil && len(postAccepted.Payload) > 0 { + return &postAccepted.Payload, nil + } + + //return &postok.Payload, nil + return nil, nil +} + +// GetSAPProfiles Get All SAP Profiles +func (f *IBMPIInstanceClient) GetSAPProfiles(powerinstanceid string) (*models.SAPProfiles, error) { + + params := p_cloud_s_a_p.NewPcloudSapGetallParamsWithTimeout(helpers.PIGetTimeOut).WithCloudInstanceID(powerinstanceid) + resp, err := f.session.Power.PCloudSAP.PcloudSapGetall(params, ibmpisession.NewAuth(f.session, powerinstanceid)) + if err != nil || resp.Payload == nil { + return nil, fmt.Errorf("Failed to get sap profiles to the power instance %s : %s", powerinstanceid, err) + } + return resp.Payload, nil +} + +// GetSap Get an SAP profile +func (f *IBMPIInstanceClient) GetSap(powerinstanceid, sapprofileID string) (*models.SAPProfile, error) { + params := p_cloud_s_a_p.NewPcloudSapGetParamsWithTimeout(helpers.PIGetTimeOut).WithCloudInstanceID(powerinstanceid).WithSapProfileID(sapprofileID) + resp, err := f.session.Power.PCloudSAP.PcloudSapGet(params, ibmpisession.NewAuth(f.session, powerinstanceid)) + if err != nil { + return nil, fmt.Errorf("Failed to get sap profile %s to the power instance %s : %s", sapprofileID, powerinstanceid, err) + } + return resp.Payload, nil + +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/clients/instance/ibm-pi-key.go b/vendor/github.com/IBM-Cloud/power-go-client/clients/instance/ibm-pi-key.go new file mode 100644 index 00000000000..fc96fd39483 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/clients/instance/ibm-pi-key.go @@ -0,0 +1,64 @@ +package instance + +import ( + "fmt" + + "github.com/IBM-Cloud/power-go-client/ibmpisession" + "github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tenants_ssh_keys" + "github.com/IBM-Cloud/power-go-client/power/models" +) + +// IBMPIKeyClient ... +type IBMPIKeyClient struct { + session *ibmpisession.IBMPISession + powerinstanceid string +} + +// NewIBMPIKeyClient ... +func NewIBMPIKeyClient(sess *ibmpisession.IBMPISession, powerinstanceid string) *IBMPIKeyClient { + return &IBMPIKeyClient{sess, powerinstanceid} +} + +/* +This was a change requested by the IBM cloud Team to move the powerinstanceid out from the provider and pass it in the call +The Power-IAAS API requires the crn to be passed in the header. +*/ + +// Get Key... +func (f *IBMPIKeyClient) Get(id, powerinstanceid string) (*models.SSHKey, error) { + + var tenantid = f.session.UserAccount + params := p_cloud_tenants_ssh_keys.NewPcloudTenantsSshkeysGetParams().WithTenantID(tenantid).WithSshkeyName(id) + resp, err := f.session.Power.PCloudTenantsSSHKeys.PcloudTenantsSshkeysGet(params, ibmpisession.NewAuth(f.session, powerinstanceid)) + + if err != nil || resp == nil || resp.Payload == nil { + return nil, fmt.Errorf("Failed to Get PI Key %s :%s", id, err) + } + return resp.Payload, nil +} + +// Create PI Key ... +func (f *IBMPIKeyClient) Create(name string, sshkey, powerinstanceid string) (*models.SSHKey, *models.SSHKey, error) { + var body = models.SSHKey{ + Name: &name, + SSHKey: &sshkey, + } + params := p_cloud_tenants_ssh_keys.NewPcloudTenantsSshkeysPostParamsWithTimeout(f.session.Timeout).WithTenantID(f.session.UserAccount).WithBody(&body) + _, postok, err := f.session.Power.PCloudTenantsSSHKeys.PcloudTenantsSshkeysPost(params, ibmpisession.NewAuth(f.session, powerinstanceid)) + if err != nil || postok == nil { + return nil, nil, fmt.Errorf("Failed to Create PI Key %s :%s", name, err) + } + return nil, postok.Payload, nil + +} + +// Delete ... +func (f *IBMPIKeyClient) Delete(id string, powerinstanceid string) error { + var tenantid = f.session.UserAccount + params := p_cloud_tenants_ssh_keys.NewPcloudTenantsSshkeysDeleteParamsWithTimeout(f.session.Timeout).WithTenantID(tenantid).WithSshkeyName(id) + _, err := f.session.Power.PCloudTenantsSSHKeys.PcloudTenantsSshkeysDelete(params, ibmpisession.NewAuth(f.session, powerinstanceid)) + if err != nil { + return fmt.Errorf("Failed to Delete PI Key %s :%s", id, err) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/clients/instance/ibm-pi-network.go b/vendor/github.com/IBM-Cloud/power-go-client/clients/instance/ibm-pi-network.go new file mode 100644 index 00000000000..3a86e52b2d0 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/clients/instance/ibm-pi-network.go @@ -0,0 +1,172 @@ +package instance + +import ( + "fmt" + "time" + + "github.com/IBM-Cloud/power-go-client/ibmpisession" + "github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks" + "github.com/IBM-Cloud/power-go-client/power/models" +) + +// IBMPINetworkClient ... +type IBMPINetworkClient struct { + session *ibmpisession.IBMPISession + powerinstanceid string +} + +// NewIBMPINetworkClient ... +func NewIBMPINetworkClient(sess *ibmpisession.IBMPISession, powerinstanceid string) *IBMPINetworkClient { + return &IBMPINetworkClient{ + session: sess, + powerinstanceid: powerinstanceid, + } +} + +// Get ... +func (f *IBMPINetworkClient) Get(id, powerinstanceid string, timeout time.Duration) (*models.Network, error) { + params := p_cloud_networks.NewPcloudNetworksGetParamsWithTimeout(timeout).WithCloudInstanceID(powerinstanceid).WithNetworkID(id) + resp, err := f.session.Power.PCloudNetworks.PcloudNetworksGet(params, ibmpisession.NewAuth(f.session, powerinstanceid)) + + if err != nil || resp == nil || resp.Payload == nil { + return nil, fmt.Errorf("Failed to Get PI Network %s :%s", id, err) + } + return resp.Payload, nil +} + +// Create ... +func (f *IBMPINetworkClient) Create(name string, networktype string, cidr string, dnsservers []string, gateway string, startip string, endip string, powerinstanceid string, timeout time.Duration) (*models.Network, *models.Network, error) { + + var body = models.NetworkCreate{ + Type: &networktype, + Name: name, + } + if networktype == "vlan" { + var ipbody = []*models.IPAddressRange{ + {EndingIPAddress: &endip, StartingIPAddress: &startip}} + if ipbody != nil { + body.IPAddressRanges = ipbody + } + if &gateway != nil { + body.Gateway = gateway + } + if &cidr != nil { + body.Cidr = cidr + } + } + if dnsservers != nil { + body.DNSServers = dnsservers + } + params := p_cloud_networks.NewPcloudNetworksPostParamsWithTimeout(timeout).WithCloudInstanceID(powerinstanceid).WithBody(&body) + _, resp, err := f.session.Power.PCloudNetworks.PcloudNetworksPost(params, ibmpisession.NewAuth(f.session, powerinstanceid)) + + if err != nil || resp == nil || resp.Payload == nil { + return nil, nil, fmt.Errorf("Failed to Create PI Network %s :%s", name, err) + } + + return resp.Payload, nil, nil +} + +// GetPublic ... +func (f *IBMPINetworkClient) GetPublic(powerinstanceid string, timeout time.Duration) (*models.Networks, error) { + + filterQuery := "type=\"pub-vlan\"" + params := p_cloud_networks.NewPcloudNetworksGetallParamsWithTimeout(timeout).WithCloudInstanceID(powerinstanceid).WithFilter(&filterQuery) + + resp, err := f.session.Power.PCloudNetworks.PcloudNetworksGetall(params, ibmpisession.NewAuth(f.session, powerinstanceid)) + if err != nil || resp == nil || resp.Payload == nil { + return nil, fmt.Errorf("Failed to Get all PI Networks in a power instance %s :%s", powerinstanceid, err) + } + return resp.Payload, nil +} + +// Delete ... +func (f *IBMPINetworkClient) Delete(id string, powerinstanceid string, timeout time.Duration) error { + params := p_cloud_networks.NewPcloudNetworksDeleteParamsWithTimeout(timeout).WithCloudInstanceID(powerinstanceid).WithNetworkID(id) + _, err := f.session.Power.PCloudNetworks.PcloudNetworksDelete(params, ibmpisession.NewAuth(f.session, powerinstanceid)) + if err != nil { + return fmt.Errorf("Failed to Delete PI Network %s :%s", id, err) + } + return nil +} + +// New Function for Ports + +//GetAllPort ... +func (f *IBMPINetworkClient) GetAllPort(id string, powerinstanceid string, timeout time.Duration) (*models.NetworkPorts, error) { + + params := p_cloud_networks.NewPcloudNetworksPortsGetallParamsWithTimeout(timeout).WithCloudInstanceID(powerinstanceid).WithNetworkID(id) + resp, err := f.session.Power.PCloudNetworks.PcloudNetworksPortsGetall(params, ibmpisession.NewAuth(f.session, powerinstanceid)) + if err != nil || resp == nil || resp.Payload == nil { + return nil, fmt.Errorf("Failed to Get all PI Network Ports %s :%s", id, err) + } + return resp.Payload, nil + +} + +// GetPort ... +func (f *IBMPINetworkClient) GetPort(id string, powerinstanceid string, networkPortID string, timeout time.Duration) (*models.NetworkPort, error) { + params := p_cloud_networks.NewPcloudNetworksPortsGetParamsWithTimeout(timeout).WithCloudInstanceID(powerinstanceid).WithNetworkID(id).WithPortID(networkPortID) + resp, err := f.session.Power.PCloudNetworks.PcloudNetworksPortsGet(params, ibmpisession.NewAuth(f.session, powerinstanceid)) + + if err != nil || resp == nil || resp.Payload == nil { + return nil, fmt.Errorf("Failed to Get PI Network Ports %s :%s", networkPortID, err) + } + return resp.Payload, nil + +} + +//CreatePort ... +func (f *IBMPINetworkClient) CreatePort(id string, powerinstanceid string, networkportdef *p_cloud_networks.PcloudNetworksPortsPostParams, timeout time.Duration) (*models.NetworkPort, error) { + params := p_cloud_networks.NewPcloudNetworksPortsPostParamsWithTimeout(timeout).WithCloudInstanceID(powerinstanceid).WithNetworkID(id).WithBody(networkportdef.Body) + resp, err := f.session.Power.PCloudNetworks.PcloudNetworksPortsPost(params, ibmpisession.NewAuth(f.session, powerinstanceid)) + if err != nil || resp == nil || resp.Payload == nil { + return nil, fmt.Errorf("Failed to create the network port for network %s cloudinstance id [%s]", id, powerinstanceid) + } + return resp.Payload, nil +} + +// DeletePort ... +func (f *IBMPINetworkClient) DeletePort(networkid string, powerinstanceid string, portid string, timeout time.Duration) (*models.Object, error) { + params := p_cloud_networks.NewPcloudNetworksPortsDeleteParamsWithTimeout(timeout).WithCloudInstanceID(powerinstanceid).WithNetworkID(networkid).WithPortID(portid) + resp, err := f.session.Power.PCloudNetworks.PcloudNetworksPortsDelete(params, ibmpisession.NewAuth(f.session, powerinstanceid)) + + if err != nil || resp == nil || resp.Payload == nil { + return nil, fmt.Errorf("Failed to create the network port %s for network %s cloudinstance id [%s]", portid, networkid, powerinstanceid) + } + return &resp.Payload, nil +} + +//AttachPort to the PVM Instance +func (f *IBMPINetworkClient) AttachPort(powerinstanceid, networkID, portID, description, pvminstanceid string, timeout time.Duration) (*models.NetworkPort, error) { + + var body = models.NetworkPortUpdate{} + if &description != nil { + body.Description = &description + } + if &pvminstanceid != nil { + body.PvmInstanceID = &pvminstanceid + } + + params := p_cloud_networks.NewPcloudNetworksPortsPutParamsWithTimeout(timeout).WithCloudInstanceID(powerinstanceid).WithNetworkID(networkID).WithPortID(portID).WithBody(&body) + resp, err := f.session.Power.PCloudNetworks.PcloudNetworksPortsPut(params, ibmpisession.NewAuth(f.session, powerinstanceid)) + if err != nil || resp == nil || resp.Payload == nil { + return nil, fmt.Errorf("Failed to attach the port [%s] to network %s the pvminstance [%s]", portID, networkID, pvminstanceid) + } + return resp.Payload, nil +} + +// DetachPort from the PVM Instance +func (f *IBMPINetworkClient) DetachPort(powerinstanceid, networkID, portID string, timeout time.Duration) (*models.NetworkPort, error) { + emptyPVM := "" + body := &models.NetworkPortUpdate{ + PvmInstanceID: &emptyPVM, + } + params := p_cloud_networks.NewPcloudNetworksPortsPutParamsWithTimeout(timeout).WithCloudInstanceID(powerinstanceid).WithNetworkID(networkID).WithPortID(portID).WithBody(body) + resp, err := f.session.Power.PCloudNetworks.PcloudNetworksPortsPut(params, ibmpisession.NewAuth(f.session, powerinstanceid)) + if err != nil || resp == nil || resp.Payload == nil { + return nil, fmt.Errorf("Failed to detach the port [%s] to network %s ", portID, networkID) + } + + return resp.Payload, nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/clients/instance/ibm-pi-sap-instance.go b/vendor/github.com/IBM-Cloud/power-go-client/clients/instance/ibm-pi-sap-instance.go new file mode 100644 index 00000000000..d0014c62d0d --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/clients/instance/ibm-pi-sap-instance.go @@ -0,0 +1,46 @@ +package instance + +import ( + "fmt" + + "github.com/IBM-Cloud/power-go-client/ibmpisession" + "github.com/IBM-Cloud/power-go-client/power/client/p_cloud_s_a_p" + "github.com/IBM-Cloud/power-go-client/power/models" +) + +// IBMPISAPInstanceClient ... +type IBMPISAPInstanceClient struct { + session *ibmpisession.IBMPISession + powerinstanceid string +} + +// NewIBMPISAPInstanceClient ... +func NewIBMPISAPInstanceClient(sess *ibmpisession.IBMPISession, powerinstanceid string) *IBMPISAPInstanceClient { + return &IBMPISAPInstanceClient{ + session: sess, + powerinstanceid: powerinstanceid, + } +} + +//Create SAP System +func (f *IBMPISAPInstanceClient) Create(sapdef *p_cloud_s_a_p.PcloudSapPostParams, id, powerinstanceid string) (*models.PVMInstanceList, error) { + + params := p_cloud_s_a_p.NewPcloudSapPostParamsWithTimeout(f.session.Timeout).WithCloudInstanceID(powerinstanceid).WithBody(sapdef.Body) + sapok, sapcreated, sapaccepted, err := f.session.Power.PCloudSAP.PcloudSapPost(params, ibmpisession.NewAuth(f.session, powerinstanceid)) + if err != nil { + return nil, fmt.Errorf("Failed to Create Sap Instance %s", err) + } + + if sapok != nil && len(sapok.Payload) > 0 { + return &sapok.Payload, nil + } + if sapcreated != nil && len(sapcreated.Payload) > 0 { + return &sapcreated.Payload, nil + } + if sapaccepted != nil && len(sapaccepted.Payload) > 0 { + return &sapaccepted.Payload, nil + } + + //return &postok.Payload, nil + return nil, fmt.Errorf("No response Returned ") +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/clients/instance/ibm-pi-snapshot.go b/vendor/github.com/IBM-Cloud/power-go-client/clients/instance/ibm-pi-snapshot.go new file mode 100644 index 00000000000..01f1824e2f6 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/clients/instance/ibm-pi-snapshot.go @@ -0,0 +1,80 @@ +package instance + +import ( + "fmt" + "time" + + "github.com/IBM-Cloud/power-go-client/helpers" + "github.com/IBM-Cloud/power-go-client/ibmpisession" + "github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances" + "github.com/IBM-Cloud/power-go-client/power/client/p_cloud_snapshots" + "github.com/IBM-Cloud/power-go-client/power/models" +) + +// IBMPISnapshotClient ... +type IBMPISnapshotClient struct { + session *ibmpisession.IBMPISession + powerinstanceid string +} + +// NewIBMPISnapshotClient ... +func NewIBMPISnapshotClient(sess *ibmpisession.IBMPISession, powerinstanceid string) *IBMPISnapshotClient { + return &IBMPISnapshotClient{ + sess, powerinstanceid, + } +} + +//Get information about a single snapshot only +func (f *IBMPISnapshotClient) Get(id, powerinstanceid string, timeout time.Duration) (*models.Snapshot, error) { + params := p_cloud_snapshots.NewPcloudCloudinstancesSnapshotsGetParamsWithTimeout(helpers.PIGetTimeOut).WithCloudInstanceID(powerinstanceid).WithSnapshotID(id) + resp, err := f.session.Power.PCloudSnapshots.PcloudCloudinstancesSnapshotsGet(params, ibmpisession.NewAuth(f.session, powerinstanceid)) + + if err != nil || resp.Payload == nil { + return nil, fmt.Errorf("Failed to Get PI Snapshot %s :%s", id, err) + } + return resp.Payload, nil +} + +// Delete ... +func (f *IBMPISnapshotClient) Delete(id string, powerinstanceid string, timeout time.Duration) error { + //var cloudinstanceid = f.session.PowerServiceInstance + params := p_cloud_snapshots.NewPcloudCloudinstancesSnapshotsDeleteParamsWithTimeout(helpers.PIDeleteTimeOut).WithCloudInstanceID(powerinstanceid).WithSnapshotID(id) + _, err := f.session.Power.PCloudSnapshots.PcloudCloudinstancesSnapshotsDelete(params, ibmpisession.NewAuth(f.session, powerinstanceid)) + if err != nil { + return fmt.Errorf("Failed to Delete PI Snapshot %s :%s", id, err) + } + return nil +} + +// Update ... +func (f *IBMPISnapshotClient) Update(id, powerinstanceid string, snapshotdef *models.SnapshotUpdate, timeout time.Duration) (models.Object, error) { + + params := p_cloud_snapshots.NewPcloudCloudinstancesSnapshotsPutParamsWithTimeout(helpers.PICreateTimeOut).WithCloudInstanceID(powerinstanceid).WithSnapshotID(id).WithBody(snapshotdef) + resp, err := f.session.Power.PCloudSnapshots.PcloudCloudinstancesSnapshotsPut(params, ibmpisession.NewAuth(f.session, powerinstanceid)) + + if err != nil { + return nil, fmt.Errorf("Failed to Update PI Snapshot %s :%s", id, err) + } + return resp.Payload, nil +} + +// GetAll snapshots part of an instance +func (f *IBMPISnapshotClient) GetAll(id, powerinstanceid string, timeout time.Duration) (*models.Snapshots, error) { + params := p_cloud_snapshots.NewPcloudCloudinstancesSnapshotsGetallParamsWithTimeout(helpers.PIGetTimeOut).WithCloudInstanceID(powerinstanceid) + resp, err := f.session.Power.PCloudSnapshots.PcloudCloudinstancesSnapshotsGetall(params, ibmpisession.NewAuth(f.session, powerinstanceid)) + if err != nil { + return nil, fmt.Errorf("Failed to Get all PI Snapshot %s :%s", id, err) + } + return resp.Payload, nil + +} + +// Create or Restore a Snapshot +func (f *IBMPISnapshotClient) Create(pvminstanceid, powerinstanceid, snapshotid, restorefailAction string, timeout time.Duration) (*models.Snapshot, error) { + params := p_cloud_p_vm_instances.NewPcloudPvminstancesSnapshotsRestorePostParamsWithTimeout(helpers.PICreateTimeOut).WithCloudInstanceID(powerinstanceid).WithCloudInstanceID(pvminstanceid).WithSnapshotID(snapshotid).WithRestoreFailAction(&restorefailAction) + resp, err := f.session.Power.PCloudPVMInstances.PcloudPvminstancesSnapshotsRestorePost(params, ibmpisession.NewAuth(f.session, powerinstanceid)) + if err != nil { + return nil, fmt.Errorf("Failed to restore PI Snapshot %s of the instance %s :%s", snapshotid, pvminstanceid, err) + } + return resp.Payload, nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/clients/instance/ibm-pi-storage-capacity.go b/vendor/github.com/IBM-Cloud/power-go-client/clients/instance/ibm-pi-storage-capacity.go new file mode 100644 index 00000000000..5b8cb269be9 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/clients/instance/ibm-pi-storage-capacity.go @@ -0,0 +1,33 @@ +package instance + +import ( + "fmt" + "time" + + "github.com/IBM-Cloud/power-go-client/ibmpisession" + "github.com/IBM-Cloud/power-go-client/power/client/p_cloud_storage_capacity" + "github.com/IBM-Cloud/power-go-client/power/models" +) + +// IBMPIStorageCapacityClient .. +type IBMPIStorageCapacityClient struct { + session *ibmpisession.IBMPISession + powerinstanceid string +} + +// NewIBMPIStorageCapacityClient ... +func NewIBMPIStorageCapacityClient(sess *ibmpisession.IBMPISession, powerinstanceid string) *IBMPIStorageCapacityClient { + return &IBMPIStorageCapacityClient{ + sess, powerinstanceid, + } +} + +//GetAll information about all the storage pools +func (f *IBMPIStorageCapacityClient) GetAll(powerinstanceid string, timeout time.Duration) (*models.StoragePoolsCapacity, error) { + params := p_cloud_storage_capacity.NewPcloudStoragecapacityPoolsGetallParamsWithTimeout(timeout).WithCloudInstanceID(powerinstanceid) + resp, err := f.session.Power.PCloudStorageCapacity.PcloudStoragecapacityPoolsGetall(params, ibmpisession.NewAuth(f.session, powerinstanceid)) + if err != nil || resp.Payload == nil { + return nil, fmt.Errorf("Failed to get all storage pools %s", err) + } + return resp.Payload, nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/clients/instance/ibm-pi-system-pools.go b/vendor/github.com/IBM-Cloud/power-go-client/clients/instance/ibm-pi-system-pools.go new file mode 100644 index 00000000000..4b5ab04d988 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/clients/instance/ibm-pi-system-pools.go @@ -0,0 +1,33 @@ +package instance + +import ( + "fmt" + + "github.com/IBM-Cloud/power-go-client/ibmpisession" + "github.com/IBM-Cloud/power-go-client/power/client/p_cloud_system_pools" + "github.com/IBM-Cloud/power-go-client/power/models" +) + +// IBMPISystemPoolClient ... +type IBMPISystemPoolClient struct { + session *ibmpisession.IBMPISession + powerinstanceid string +} + +// NewIBMPISystemPoolClient ... +func NewIBMPISystemPoolClient(sess *ibmpisession.IBMPISession, powerinstanceid string) *IBMPISystemPoolClient { + return &IBMPISystemPoolClient{ + sess, powerinstanceid, + } +} + +//Get the System Pools +func (f *IBMPISystemPoolClient) Get(powerinstanceid string) (models.SystemPools, error) { + params := p_cloud_system_pools.NewPcloudSystempoolsGetParamsWithTimeout(f.session.Timeout).WithCloudInstanceID(powerinstanceid) + resp, err := f.session.Power.PCloudSystemPools.PcloudSystempoolsGet(params, ibmpisession.NewAuth(f.session, powerinstanceid)) + + if err != nil || resp.Payload == nil { + return nil, fmt.Errorf("Failed to perform get operation... %s", err) + } + return resp.Payload, nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/clients/instance/ibm-pi-tasks.go b/vendor/github.com/IBM-Cloud/power-go-client/clients/instance/ibm-pi-tasks.go new file mode 100644 index 00000000000..67261bf87f2 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/clients/instance/ibm-pi-tasks.go @@ -0,0 +1,46 @@ +package instance + +import ( + "fmt" + + "github.com/IBM-Cloud/power-go-client/ibmpisession" + "github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tasks" + "github.com/IBM-Cloud/power-go-client/power/models" +) + +// IBMPITaskClient ... +type IBMPITaskClient struct { + session *ibmpisession.IBMPISession + powerinstanceid string +} + +// NewIBMPITaskClient ... +func NewIBMPITaskClient(sess *ibmpisession.IBMPISession, powerinstanceid string) *IBMPITaskClient { + return &IBMPITaskClient{ + session: sess, + powerinstanceid: powerinstanceid, + } +} + +// Get ... +func (f *IBMPITaskClient) Get(id, powerinstanceid string) (*models.Task, error) { + params := p_cloud_tasks.NewPcloudTasksGetParamsWithTimeout(postTimeOut).WithTaskID(id) + resp, err := f.session.Power.PCloudTasks.PcloudTasksGet(params, ibmpisession.NewAuth(f.session, powerinstanceid)) + + if err != nil || resp.Payload == nil { + return nil, fmt.Errorf("Failed to get the task id ... %s", err) + } + return resp.Payload, nil +} + +// Delete ... +func (f *IBMPITaskClient) Delete(id, powerinstanceid string) (models.Object, error) { + + params := p_cloud_tasks.NewPcloudTasksDeleteParamsWithTimeout(postTimeOut).WithTaskID(id) + resp, err := f.session.Power.PCloudTasks.PcloudTasksDelete(params, ibmpisession.NewAuth(f.session, powerinstanceid)) + + if err != nil || resp.Payload == nil { + return nil, fmt.Errorf("Failed to delete the task id ... %s", err) + } + return resp.Payload, nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/clients/instance/ibm-pi-tenant.go b/vendor/github.com/IBM-Cloud/power-go-client/clients/instance/ibm-pi-tenant.go new file mode 100644 index 00000000000..f0012a1c821 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/clients/instance/ibm-pi-tenant.go @@ -0,0 +1,34 @@ +package instance + +import ( + "fmt" + + "github.com/IBM-Cloud/power-go-client/ibmpisession" + "github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tenants" + "github.com/IBM-Cloud/power-go-client/power/models" +) + +// IBMPITenantClient ... +type IBMPITenantClient struct { + session *ibmpisession.IBMPISession + powerinstanceid string +} + +// NewIBMPITenantClient ... +func NewIBMPITenantClient(sess *ibmpisession.IBMPISession, powerinstanceid string) *IBMPITenantClient { + return &IBMPITenantClient{ + session: sess, + powerinstanceid: powerinstanceid, + } +} + +// Get .. +func (f *IBMPITenantClient) Get(tenantid string) (*models.Tenant, error) { + params := p_cloud_tenants.NewPcloudTenantsGetParams().WithTenantID(f.session.UserAccount).WithTenantID(tenantid) + resp, err := f.session.Power.PCloudTenants.PcloudTenantsGet(params, ibmpisession.NewAuth(f.session, tenantid)) + + if err != nil || resp.Payload == nil { + return nil, fmt.Errorf("Failed to perform get operation... %s", err) + } + return resp.Payload, nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/clients/instance/ibm-pi-volume.go b/vendor/github.com/IBM-Cloud/power-go-client/clients/instance/ibm-pi-volume.go new file mode 100644 index 00000000000..d9e8c58b05e --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/clients/instance/ibm-pi-volume.go @@ -0,0 +1,191 @@ +package instance + +import ( + "fmt" + "time" + + "github.com/IBM-Cloud/power-go-client/helpers" + "github.com/IBM-Cloud/power-go-client/ibmpisession" + "github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes" + "github.com/IBM-Cloud/power-go-client/power/models" +) + +// IBMPIVolumeClient .. +type IBMPIVolumeClient struct { + session *ibmpisession.IBMPISession + powerinstanceid string +} + +const ( + + // Timeouts for power + postTimeOut = 30 * time.Second + getTimeOut = 60 * time.Second + deleteTimeOut = 30 * time.Second +) + +// NewIBMPIVolumeClient ... +func NewIBMPIVolumeClient(sess *ibmpisession.IBMPISession, powerinstanceid string) *IBMPIVolumeClient { + return &IBMPIVolumeClient{ + sess, powerinstanceid, + } +} + +//Get information about a single volume only +func (f *IBMPIVolumeClient) Get(id, powerinstanceid string, timeout time.Duration) (*models.Volume, error) { + params := p_cloud_volumes.NewPcloudCloudinstancesVolumesGetParamsWithTimeout(helpers.PIGetTimeOut).WithCloudInstanceID(powerinstanceid).WithVolumeID(id) + resp, err := f.session.Power.PCloudVolumes.PcloudCloudinstancesVolumesGet(params, ibmpisession.NewAuth(f.session, powerinstanceid)) + + if err != nil || resp.Payload == nil { + return nil, fmt.Errorf("Failed to Get PI Volume %s :%s", id, err) + } + return resp.Payload, nil +} + +//CreateVolumeV2 ... +func (f *IBMPIVolumeClient) CreateVolumeV2(createVolDefs *p_cloud_volumes.PcloudV2VolumesPostParams, powerinstanceid string, timeout time.Duration) (*models.Volumes, error) { + params := p_cloud_volumes.NewPcloudV2VolumesPostParamsWithTimeout(helpers.PICreateTimeOut).WithCloudInstanceID(powerinstanceid).WithBody(createVolDefs.Body) + resp, err := f.session.Power.PCloudVolumes.PcloudV2VolumesPost(params, ibmpisession.NewAuth(f.session, powerinstanceid)) + if err != nil { + return nil, fmt.Errorf("Failed to Create PI Volume %s :%s", *createVolDefs.Body.Name, err) + } + return resp.Payload, nil +} + +// CreateVolume ... +func (f *IBMPIVolumeClient) CreateVolume(createVolDefs *p_cloud_volumes.PcloudCloudinstancesVolumesPostParams, powerinstanceid string, timeout time.Duration) (*models.Volume, error) { + params := p_cloud_volumes.NewPcloudCloudinstancesVolumesPostParamsWithTimeout(helpers.PICreateTimeOut).WithCloudInstanceID(powerinstanceid).WithBody(createVolDefs.Body) + resp, err := f.session.Power.PCloudVolumes.PcloudCloudinstancesVolumesPost(params, ibmpisession.NewAuth(f.session, powerinstanceid)) + if err != nil || resp == nil || resp.Payload == nil { + return nil, fmt.Errorf("Failed to Create PI Instance Volume %s :%s", *createVolDefs.Body.Name, err) + } + return resp.Payload, nil +} + +// UpdateVolume ... +func (f *IBMPIVolumeClient) UpdateVolume(updateVolDefs *p_cloud_volumes.PcloudCloudinstancesVolumesPutParams, volumeid, powerinstanceid string, timeout time.Duration) (*models.Volume, error) { + params := p_cloud_volumes.NewPcloudCloudinstancesVolumesPutParamsWithTimeout(helpers.PICreateTimeOut).WithCloudInstanceID(powerinstanceid).WithBody(updateVolDefs.Body).WithVolumeID(volumeid) + resp, err := f.session.Power.PCloudVolumes.PcloudCloudinstancesVolumesPut(params, ibmpisession.NewAuth(f.session, powerinstanceid)) + if err != nil || resp == nil || resp.Payload == nil { + return nil, fmt.Errorf("Failed to Update PI Instance Volume %s :%s", volumeid, err) + } + return resp.Payload, nil +} + +// DeleteVolume ... +func (f *IBMPIVolumeClient) DeleteVolume(id string, powerinstanceid string, timeout time.Duration) error { + params := p_cloud_volumes.NewPcloudCloudinstancesVolumesDeleteParamsWithTimeout(helpers.PIDeleteTimeOut).WithCloudInstanceID(powerinstanceid).WithVolumeID(id) + _, err := f.session.Power.PCloudVolumes.PcloudCloudinstancesVolumesDelete(params, ibmpisession.NewAuth(f.session, powerinstanceid)) + if err != nil { + return fmt.Errorf("Failed to Delete PI Instance Volume %s :%s", id, err) + } + return nil +} + +//Create .. +// TO be Deprecated +func (f *IBMPIVolumeClient) Create(volumename string, volumesize float64, volumetype string, volumeshareable bool, powerinstanceid string, timeout time.Duration) (*models.Volume, error) { + + var body = models.CreateDataVolume{ + Name: &volumename, + Size: &volumesize, + DiskType: volumetype, + Shareable: &volumeshareable, + } + + params := p_cloud_volumes.NewPcloudCloudinstancesVolumesPostParamsWithTimeout(helpers.PICreateTimeOut).WithCloudInstanceID(powerinstanceid).WithBody(&body) + resp, err := f.session.Power.PCloudVolumes.PcloudCloudinstancesVolumesPost(params, ibmpisession.NewAuth(f.session, powerinstanceid)) + if err != nil || resp == nil || resp.Payload == nil { + return nil, fmt.Errorf("Failed to Create PI Instance Volume %s :%s", volumename, err) + } + return resp.Payload, nil +} + +// Delete ... +func (f *IBMPIVolumeClient) Delete(id string, powerinstanceid string, timeout time.Duration) error { + //var cloudinstanceid = f.session.PowerServiceInstance + params := p_cloud_volumes.NewPcloudCloudinstancesVolumesDeleteParamsWithTimeout(helpers.PIDeleteTimeOut).WithCloudInstanceID(powerinstanceid).WithVolumeID(id) + _, err := f.session.Power.PCloudVolumes.PcloudCloudinstancesVolumesDelete(params, ibmpisession.NewAuth(f.session, powerinstanceid)) + if err != nil { + return fmt.Errorf("Failed to Delete PI Instance Volume %s :%s", id, err) + } + return nil +} + +// Update ... +func (f *IBMPIVolumeClient) Update(id, volumename string, volumesize float64, volumeshare bool, powerinstanceid string, timeout time.Duration) (*models.Volume, error) { + + var patchbody = models.UpdateVolume{} + if &volumename != nil { + patchbody.Name = &volumename + } + if &volumesize != nil { + patchbody.Size = volumesize + } + if &volumeshare != nil { + patchbody.Shareable = &volumeshare + } + + params := p_cloud_volumes.NewPcloudCloudinstancesVolumesPutParamsWithTimeout(helpers.PICreateTimeOut).WithCloudInstanceID(powerinstanceid).WithVolumeID(id).WithBody(&patchbody) + resp, err := f.session.Power.PCloudVolumes.PcloudCloudinstancesVolumesPut(params, ibmpisession.NewAuth(f.session, powerinstanceid)) + if err != nil || resp == nil || resp.Payload == nil { + return nil, fmt.Errorf("Failed to Update PI Instance Volume %s :%s", id, err) + } + return resp.Payload, nil +} + +// Attach a volume +func (f *IBMPIVolumeClient) Attach(id, volumename string, powerinstanceid string, timeout time.Duration) (models.Object, error) { + params := p_cloud_volumes.NewPcloudPvminstancesVolumesPostParamsWithTimeout(helpers.PICreateTimeOut).WithCloudInstanceID(powerinstanceid).WithPvmInstanceID(id).WithVolumeID(volumename) + resp, err := f.session.Power.PCloudVolumes.PcloudPvminstancesVolumesPost(params, ibmpisession.NewAuth(f.session, powerinstanceid)) + if err != nil || resp == nil || resp.Payload == nil { + return nil, fmt.Errorf("Failed to Attach PI Instance Volume %s :%s", id, err) + } + return resp.Payload, nil + +} + +//Detach a volume +func (f *IBMPIVolumeClient) Detach(id, volumename string, powerinstanceid string, timeout time.Duration) (models.Object, error) { + params := p_cloud_volumes.NewPcloudPvminstancesVolumesDeleteParamsWithTimeout(helpers.PICreateTimeOut).WithCloudInstanceID(powerinstanceid).WithPvmInstanceID(id).WithVolumeID(volumename) + resp, err := f.session.Power.PCloudVolumes.PcloudPvminstancesVolumesDelete(params, ibmpisession.NewAuth(f.session, powerinstanceid)) + + if err != nil || resp == nil || resp.Payload == nil { + //return nil, errors.ToError(err) + return nil, fmt.Errorf("Failed to detach the volume [%s ] for pvm instance with id [%s]: %s", volumename, id, err) + } + return resp.Payload, nil + +} + +// GetAll volumes part of an instance +func (f *IBMPIVolumeClient) GetAll(id, cloudInstanceID string, timeout time.Duration) (*models.Volumes, error) { + params := p_cloud_volumes.NewPcloudPvminstancesVolumesGetallParamsWithTimeout(helpers.PIGetTimeOut).WithPvmInstanceID(id).WithCloudInstanceID(cloudInstanceID) + resp, err := f.session.Power.PCloudVolumes.PcloudPvminstancesVolumesGetall(params, ibmpisession.NewAuth(f.session, cloudInstanceID)) + if err != nil || resp == nil || resp.Payload == nil { + return nil, fmt.Errorf("Failed to Get all PI Instance Volumes %s :%s", id, err) + } + return resp.Payload, nil + +} + +// SetBootVolume as the boot volume - PUT Operation +func (f *IBMPIVolumeClient) SetBootVolume(id, volumename, cloudInstanceID string, timeout time.Duration) (models.Object, error) { + params := p_cloud_volumes.NewPcloudPvminstancesVolumesSetbootPutParamsWithTimeout(helpers.PICreateTimeOut).WithCloudInstanceID(cloudInstanceID).WithPvmInstanceID(id).WithVolumeID(volumename) + resp, err := f.session.Power.PCloudVolumes.PcloudPvminstancesVolumesSetbootPut(params, ibmpisession.NewAuth(f.session, cloudInstanceID)) + if err != nil || resp == nil || resp.Payload == nil { + //return nil, errors.ToError(err) + return nil, fmt.Errorf("Failed to set the boot volume %s for cloud instance id [%s] ", volumename, cloudInstanceID) + } + return resp.Payload, nil +} + +// CheckVolumeAttach if the volume is attached to the instance +func (f *IBMPIVolumeClient) CheckVolumeAttach(cloudInstanceID, pvmInstanceID, volumeID string, timeout time.Duration) (*models.Volume, error) { + params := p_cloud_volumes.NewPcloudPvminstancesVolumesGetParamsWithTimeout(helpers.PIGetTimeOut).WithCloudInstanceID(cloudInstanceID).WithPvmInstanceID(pvmInstanceID).WithVolumeID(volumeID) + resp, err := f.session.Power.PCloudVolumes.PcloudPvminstancesVolumesGet(params, ibmpisession.NewAuth(f.session, cloudInstanceID)) + if err != nil || resp == nil || resp.Payload == nil { + return nil, fmt.Errorf("Failed to validate that the volume [%s] is attached to the pvminstance [%s]: %s", volumeID, pvmInstanceID, err) + } + return resp.Payload, nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/helpers/constants.go b/vendor/github.com/IBM-Cloud/power-go-client/helpers/constants.go new file mode 100644 index 00000000000..000f47b8c71 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/helpers/constants.go @@ -0,0 +1,148 @@ +package helpers + +import "time" + +const ( + // IBM PI Instance + + PIInstanceName = "pi_instance_name" + PIInstanceDate = "pi_creation_date" + PIInstanceSSHKeyName = "pi_key_pair_name" + PIInstanceImageName = "pi_image_id" + PIInstanceProcessors = "pi_processors" + PIInstanceProcType = "pi_proc_type" + PIInstanceMemory = "pi_memory" + PIInstanceSystemType = "pi_sys_type" + PIInstanceId = "pi_instance_id" + PIInstanceDiskSize = "pi_disk_size" + PIInstanceStatus = "pi_instance_status" + PIInstanceMinProc = "pi_minproc" + PIInstanceVolumeIds = "pi_volume_ids" + PIInstanceNetworkIds = "pi_network_ids" + PIInstancePublicNetwork = "pi_public_network" + PIInstanceMigratable = "pi_migratable" + PICloudInstanceId = "pi_cloud_instance_id" + PICloudInstanceSubnetName = "pi_cloud_instance_subnet_name" + PIInstanceMimMem = "pi_minmem" + PIInstanceMaxProc = "pi_maxproc" + PIInstanceMaxMem = "pi_maxmem" + PIInstanceReboot = "pi_reboot" + PITenantId = "pi_tenant_id" + PIVirtualCoresAssigned = "pi_virtual_cores_assigned" + PIVirtualCoresMax = "pi_virtual_cores_max" + PIVirtualCoresMin = "pi_virutal_cores_min" + + PIInstanceHealthStatus = "pi_health_status" + PIInstanceReplicants = "pi_replicants" + PIInstanceReplicationPolicy = "pi_replication_policy" + PIInstanceReplicationScheme = "pi_replication_scheme" + PIInstanceProgress = "pi_progress" + PIInstanceUserData = "pi_user_data" + PIInstancePinPolicy = "pi_pin_policy" + + // IBM PI Volume + PIVolumeName = "pi_volume_name" + PIVolumeSize = "pi_volume_size" + PIVolumeType = "pi_volume_type" + PIVolumeShareable = "pi_volume_shareable" + PIVolumeId = "pi_volume_id" + PIVolumeStatus = "pi_volume_status" + PIVolumeWWN = "pi_volume_wwn" + PIVolumeDeleteOnTerminate = "pi_volume_delete_on_terminate" + PIVolumeCreateDate = "pi_volume_create_date" + PIVolumeLastUpdate = "pi_last_updated_date" + PIVolumePool = "pi_volume_pool" + PIAffinityPolicy = "pi_volume_affinity_policy" + PIAffinityVolume = "pi_volume_affinity" + + // IBM PI Snapshots + + PISnapshot = "pi_snap_shot_id" + PISnapshotName = "pi_snap_shot_name" + PISnapshotStatus = "pi_snap_shot_status" + PISnapshotAction = "pi_snap_shot_action" + PISnapshotComplete = "pi_snap_shot_complete" + + // IBM PI Image + + PIImageName = "pi_image_name" + PIImageAccessKey = "pi_image_access_key" + PIImageSecretKey = "pi_image_secret_key" + PIImageSource = "pi_image_source" + PIImageBucketName = "pi_image_bucket_name" + PIImageFileName = "pi_image_file_name" + PIImageRegion = "pi_image_region" + PIImageDisk = "pi_image_disk" + PIImageCopyID = "pi_image_copy_id" + PIImagePath = "pi_image_path" + PIImageOsType = "pi_image_os_type" + + // IBM PI Key + + PIKeyName = "pi_key_name" + PIKey = "pi_ssh_key" + PIKeyDate = "pi_creation_date" + PIKeyId = "pi_key_id" + + // IBM PI Network + + PINetworkReady = "ready" + PINetworkID = "pi_networkid" + PINetworkName = "pi_network_name" + PINetworkCidr = "pi_cidr" + PINetworkDNS = "pi_dns" + PINetworkType = "pi_network_type" + PINetworkGateway = "pi_gateway" + PINetworkIPAddressRange = "pi_ipaddress_range" + PINetworkVlanId = "pi_vlan_id" + PINetworkProvisioning = "build" + PINetworkPortDescription = "pi_network_port_description" + PINetworkPortIPAddress = "pi_network_port_ipaddress" + PINetworkPortMacAddress = "pi_network_port_macaddress" + PINetworkPortStatus = "pi_network_port_status" + PINetworkPortPortID = "pi_network_port_portid" + + // IBM PI Operations + PIInstanceOperationType = "pi_operation" + PIInstanceOperationProgress = "pi_progress" + PIInstanceOperationStatus = "pi_status" + PIInstanceOperationServerName = "pi_instance_name" + + // IBM PI Volume Attach + PIVolumeAttachName = "pi_volume_attach_name" + PIVolumeAllowableAttachStatus = "in-use" + PIVolumeAttachStatus = "status" + PowerVolumeAttachDeleting = "deleting" + PowerVolumeAttachProvisioning = "creating" + PowerVolumeAttachProvisioningDone = "available" + + // IBM PI Instance Capture + PIInstanceCaptureName = "pi_capture_name" + PIInstanceCaptureDestination = "pi_capture_destination" + PIInstanceCaptureVolumeIds = "pi_capture_volume_ids" + PIInstanceCaptureCloudStorageImagePath = "pi_capture_storage_image_path" + PIInstanceCaptureCloudStorageRegion = "pi_capture_cloud_storage_region" + PIInstanceCaptureCloudStorageAccessKey = "pi_capture_cloud_storage_access_key" + PIInstanceCaptureCloudStorageSecretKey = "pi_capture_cloud_storage_secret_key" + + // Status For all the resources + + PIVolumeDeleting = "deleting" + PIVolumeDeleted = "done" + PIVolumeProvisioning = "creating" + PIVolumeProvisioningDone = "available" + PIInstanceAvailable = "ACTIVE" + PIInstanceHealthOk = "OK" + PIInstanceHealthWarning = "WARNING" + PIInstanceBuilding = "BUILD" + PIInstanceDeleting = "DELETING" + PIInstanceNotFound = "Not Found" + PIImageQueStatus = "queued" + PIImageActiveStatus = "active" + + //Timeout values for Power VS - + + PICreateTimeOut = 5 * time.Minute + PIDeleteTimeOut = 3 * time.Minute + PIGetTimeOut = 2 * time.Minute +) diff --git a/vendor/github.com/IBM-Cloud/power-go-client/helpers/env.go b/vendor/github.com/IBM-Cloud/power-go-client/helpers/env.go new file mode 100644 index 00000000000..3b65d26bb4a --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/helpers/env.go @@ -0,0 +1,13 @@ +package helpers + +import "os" + +//EnvFallBack ... +func EnvFallBack(envs []string, defaultValue string) string { + for _, k := range envs { + if v := os.Getenv(k); v != "" { + return v + } + } + return defaultValue +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/ibmpisession/ibmpowersession.go b/vendor/github.com/IBM-Cloud/power-go-client/ibmpisession/ibmpowersession.go new file mode 100644 index 00000000000..4b7d4a54fea --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/ibmpisession/ibmpowersession.go @@ -0,0 +1,130 @@ +/* +Code to call the IBM IAM Services and get a session object that will be used by the Power Colo Code. + + +*/ + +package ibmpisession + +import ( + "bytes" + "crypto/tls" + "encoding/json" + "io" + "log" + "net/http" + "time" + + "github.com/go-openapi/runtime" + httptransport "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/IBM-Cloud/power-go-client/power/client" + "github.com/IBM-Cloud/power-go-client/power/models" + "github.com/IBM-Cloud/power-go-client/utils" + //"github.com/IBM-Cloud/bluemix-go/crn" +) + +const ( + offering = "power-iaas" + crnString = "crn" + version = "v1" + service = "bluemix" + serviceType = "public" + serviceInstanceSeparator = "/" + separator = ":" +) + +// IBMPISession ... +type IBMPISession struct { + IAMToken string + IMSToken string + Power *client.PowerIaas + Timeout time.Duration + UserAccount string + Region string + Zone string +} + +func powerJSONConsumer() runtime.Consumer { + return runtime.ConsumerFunc(func(reader io.Reader, data interface{}) error { + buf := new(bytes.Buffer) + _, err := buf.ReadFrom(reader) + if err != nil { + return err + } + b := buf.Bytes() + if b != nil { + dec := json.NewDecoder(bytes.NewReader(b)) + dec.UseNumber() // preserve number formats + err = dec.Decode(data) + } + if string(b) == "null" || err != nil { + errorRecord, _ := data.(*models.Error) + log.Printf("The errorrecord is %s ", errorRecord.Error) + return nil + } + return err + }) +} + +// New ... +/* +The method takes in the following params +iamtoken : this is the token that is passed from the client +region : Obtained from the terraform template. Every template /resource will be required to have this information +timeout: +useraccount: +*/ +func New(iamtoken, region string, debug bool, timeout time.Duration, useraccount string, zone string) (*IBMPISession, error) { + session := &IBMPISession{ + IAMToken: iamtoken, + UserAccount: useraccount, + Region: region, + Zone: zone, + } + + http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{InsecureSkipVerify: false} + apiEndpointURL := utils.GetPowerEndPoint(region) + transport := httptransport.New(apiEndpointURL, "/", []string{"https"}) + if debug { + transport.Debug = debug + } + transport.Consumers[runtime.JSONMime] = powerJSONConsumer() + session.Power = client.New(transport, nil) + session.Timeout = timeout + return session, nil +} + +// NewAuth ... +func NewAuth(sess *IBMPISession, PowerInstanceID string) runtime.ClientAuthInfoWriter { + var crndata = crnBuilder(PowerInstanceID, sess.UserAccount, sess.Region, sess.Zone) + return runtime.ClientAuthInfoWriterFunc(func(r runtime.ClientRequest, _ strfmt.Registry) error { + if err := r.SetHeaderParam("Authorization", sess.IAMToken); err != nil { + return err + } + return r.SetHeaderParam("CRN", crndata) + }) + +} + +// BearerTokenAndCRN ... +func BearerTokenAndCRN(session *IBMPISession, crn string) runtime.ClientAuthInfoWriter { + return runtime.ClientAuthInfoWriterFunc(func(r runtime.ClientRequest, _ strfmt.Registry) error { + if err := r.SetHeaderParam("Authorization", session.IAMToken); err != nil { + return err + } + return r.SetHeaderParam("CRN", crn) + }) +} + +// crnBuilder ... +func crnBuilder(powerinstance, useraccount, region string, zone string) string { + var crnData string + if zone == "" { + crnData = crnString + separator + version + separator + service + separator + serviceType + separator + offering + separator + region + separator + "a" + serviceInstanceSeparator + useraccount + separator + powerinstance + separator + separator + } else { + crnData = crnString + separator + version + separator + service + separator + serviceType + separator + offering + separator + zone + separator + "a" + serviceInstanceSeparator + useraccount + separator + powerinstance + separator + separator + } + return crnData +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/authentication/authentication_client.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/authentication/authentication_client.go new file mode 100644 index 00000000000..d55f6b5f5e8 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/authentication/authentication_client.go @@ -0,0 +1,313 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package authentication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" +) + +// New creates a new authentication API client. +func New(transport runtime.ClientTransport, formats strfmt.Registry) *Client { + return &Client{transport: transport, formats: formats} +} + +/* +Client for authentication API +*/ +type Client struct { + transport runtime.ClientTransport + formats strfmt.Registry +} + +/* +ServiceBrokerAuthCallback returns an access token and set cookie +*/ +func (a *Client) ServiceBrokerAuthCallback(params *ServiceBrokerAuthCallbackParams) (*ServiceBrokerAuthCallbackOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewServiceBrokerAuthCallbackParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "serviceBroker.auth.callback", + Method: "GET", + PathPattern: "/auth/v1/callback", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ServiceBrokerAuthCallbackReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*ServiceBrokerAuthCallbackOK), nil + +} + +/* +ServiceBrokerAuthDeviceCodePost requests a authorization device code +*/ +func (a *Client) ServiceBrokerAuthDeviceCodePost(params *ServiceBrokerAuthDeviceCodePostParams) (*ServiceBrokerAuthDeviceCodePostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewServiceBrokerAuthDeviceCodePostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "serviceBroker.auth.device.code.post", + Method: "POST", + PathPattern: "/auth/v1/device/code", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ServiceBrokerAuthDeviceCodePostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*ServiceBrokerAuthDeviceCodePostOK), nil + +} + +/* +ServiceBrokerAuthDeviceTokenPost polls for authorization device token +*/ +func (a *Client) ServiceBrokerAuthDeviceTokenPost(params *ServiceBrokerAuthDeviceTokenPostParams) (*ServiceBrokerAuthDeviceTokenPostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewServiceBrokerAuthDeviceTokenPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "serviceBroker.auth.device.token.post", + Method: "POST", + PathPattern: "/auth/v1/device/token", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ServiceBrokerAuthDeviceTokenPostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*ServiceBrokerAuthDeviceTokenPostOK), nil + +} + +/* +ServiceBrokerAuthInfoToken information about current access token +*/ +func (a *Client) ServiceBrokerAuthInfoToken(params *ServiceBrokerAuthInfoTokenParams, authInfo runtime.ClientAuthInfoWriter) (*ServiceBrokerAuthInfoTokenOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewServiceBrokerAuthInfoTokenParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "serviceBroker.auth.info.token", + Method: "GET", + PathPattern: "/auth/v1/info/token", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ServiceBrokerAuthInfoTokenReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*ServiceBrokerAuthInfoTokenOK), nil + +} + +/* +ServiceBrokerAuthInfoUser information about current user +*/ +func (a *Client) ServiceBrokerAuthInfoUser(params *ServiceBrokerAuthInfoUserParams, authInfo runtime.ClientAuthInfoWriter) (*ServiceBrokerAuthInfoUserOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewServiceBrokerAuthInfoUserParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "serviceBroker.auth.info.user", + Method: "GET", + PathPattern: "/auth/v1/info/user", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ServiceBrokerAuthInfoUserReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*ServiceBrokerAuthInfoUserOK), nil + +} + +/* +ServiceBrokerAuthLogin logins +*/ +func (a *Client) ServiceBrokerAuthLogin(params *ServiceBrokerAuthLoginParams) (*ServiceBrokerAuthLoginOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewServiceBrokerAuthLoginParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "serviceBroker.auth.login", + Method: "GET", + PathPattern: "/auth/v1/login", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ServiceBrokerAuthLoginReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*ServiceBrokerAuthLoginOK), nil + +} + +/* +ServiceBrokerAuthLogout logouts +*/ +func (a *Client) ServiceBrokerAuthLogout(params *ServiceBrokerAuthLogoutParams, authInfo runtime.ClientAuthInfoWriter) (*ServiceBrokerAuthLogoutOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewServiceBrokerAuthLogoutParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "serviceBroker.auth.logout", + Method: "GET", + PathPattern: "/auth/v1/logout", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ServiceBrokerAuthLogoutReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*ServiceBrokerAuthLogoutOK), nil + +} + +/* +ServiceBrokerAuthRegistration registrations of a new tenant and login +*/ +func (a *Client) ServiceBrokerAuthRegistration(params *ServiceBrokerAuthRegistrationParams) (*ServiceBrokerAuthRegistrationOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewServiceBrokerAuthRegistrationParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "serviceBroker.auth.registration", + Method: "GET", + PathPattern: "/auth/v1/registration", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ServiceBrokerAuthRegistrationReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*ServiceBrokerAuthRegistrationOK), nil + +} + +/* +ServiceBrokerAuthRegistrationCallback associates the user with a tenant and returns an access token +*/ +func (a *Client) ServiceBrokerAuthRegistrationCallback(params *ServiceBrokerAuthRegistrationCallbackParams) (*ServiceBrokerAuthRegistrationCallbackOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewServiceBrokerAuthRegistrationCallbackParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "serviceBroker.auth.registration.callback", + Method: "GET", + PathPattern: "/auth/v1/callback-registration", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ServiceBrokerAuthRegistrationCallbackReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*ServiceBrokerAuthRegistrationCallbackOK), nil + +} + +/* +ServiceBrokerAuthTokenPost requests a new token from a refresh token +*/ +func (a *Client) ServiceBrokerAuthTokenPost(params *ServiceBrokerAuthTokenPostParams) (*ServiceBrokerAuthTokenPostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewServiceBrokerAuthTokenPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "serviceBroker.auth.token.post", + Method: "POST", + PathPattern: "/auth/v1/token", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ServiceBrokerAuthTokenPostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*ServiceBrokerAuthTokenPostOK), nil + +} + +// SetTransport changes the transport on the client +func (a *Client) SetTransport(transport runtime.ClientTransport) { + a.transport = transport +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/authentication/service_broker_auth_callback_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/authentication/service_broker_auth_callback_parameters.go new file mode 100644 index 00000000000..01246a8f5db --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/authentication/service_broker_auth_callback_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package authentication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewServiceBrokerAuthCallbackParams creates a new ServiceBrokerAuthCallbackParams object +// with the default values initialized. +func NewServiceBrokerAuthCallbackParams() *ServiceBrokerAuthCallbackParams { + + return &ServiceBrokerAuthCallbackParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewServiceBrokerAuthCallbackParamsWithTimeout creates a new ServiceBrokerAuthCallbackParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewServiceBrokerAuthCallbackParamsWithTimeout(timeout time.Duration) *ServiceBrokerAuthCallbackParams { + + return &ServiceBrokerAuthCallbackParams{ + + timeout: timeout, + } +} + +// NewServiceBrokerAuthCallbackParamsWithContext creates a new ServiceBrokerAuthCallbackParams object +// with the default values initialized, and the ability to set a context for a request +func NewServiceBrokerAuthCallbackParamsWithContext(ctx context.Context) *ServiceBrokerAuthCallbackParams { + + return &ServiceBrokerAuthCallbackParams{ + + Context: ctx, + } +} + +// NewServiceBrokerAuthCallbackParamsWithHTTPClient creates a new ServiceBrokerAuthCallbackParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewServiceBrokerAuthCallbackParamsWithHTTPClient(client *http.Client) *ServiceBrokerAuthCallbackParams { + + return &ServiceBrokerAuthCallbackParams{ + HTTPClient: client, + } +} + +/*ServiceBrokerAuthCallbackParams contains all the parameters to send to the API endpoint +for the service broker auth callback operation typically these are written to a http.Request +*/ +type ServiceBrokerAuthCallbackParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the service broker auth callback params +func (o *ServiceBrokerAuthCallbackParams) WithTimeout(timeout time.Duration) *ServiceBrokerAuthCallbackParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the service broker auth callback params +func (o *ServiceBrokerAuthCallbackParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the service broker auth callback params +func (o *ServiceBrokerAuthCallbackParams) WithContext(ctx context.Context) *ServiceBrokerAuthCallbackParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the service broker auth callback params +func (o *ServiceBrokerAuthCallbackParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the service broker auth callback params +func (o *ServiceBrokerAuthCallbackParams) WithHTTPClient(client *http.Client) *ServiceBrokerAuthCallbackParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the service broker auth callback params +func (o *ServiceBrokerAuthCallbackParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *ServiceBrokerAuthCallbackParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/authentication/service_broker_auth_callback_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/authentication/service_broker_auth_callback_responses.go new file mode 100644 index 00000000000..5142d6352f2 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/authentication/service_broker_auth_callback_responses.go @@ -0,0 +1,139 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package authentication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// ServiceBrokerAuthCallbackReader is a Reader for the ServiceBrokerAuthCallback structure. +type ServiceBrokerAuthCallbackReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ServiceBrokerAuthCallbackReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewServiceBrokerAuthCallbackOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 401: + result := NewServiceBrokerAuthCallbackUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewServiceBrokerAuthCallbackInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewServiceBrokerAuthCallbackOK creates a ServiceBrokerAuthCallbackOK with default headers values +func NewServiceBrokerAuthCallbackOK() *ServiceBrokerAuthCallbackOK { + return &ServiceBrokerAuthCallbackOK{} +} + +/*ServiceBrokerAuthCallbackOK handles this case with default header values. + +OK +*/ +type ServiceBrokerAuthCallbackOK struct { + Payload *models.AccessToken +} + +func (o *ServiceBrokerAuthCallbackOK) Error() string { + return fmt.Sprintf("[GET /auth/v1/callback][%d] serviceBrokerAuthCallbackOK %+v", 200, o.Payload) +} + +func (o *ServiceBrokerAuthCallbackOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.AccessToken) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceBrokerAuthCallbackUnauthorized creates a ServiceBrokerAuthCallbackUnauthorized with default headers values +func NewServiceBrokerAuthCallbackUnauthorized() *ServiceBrokerAuthCallbackUnauthorized { + return &ServiceBrokerAuthCallbackUnauthorized{} +} + +/*ServiceBrokerAuthCallbackUnauthorized handles this case with default header values. + +Unauthorized +*/ +type ServiceBrokerAuthCallbackUnauthorized struct { + Payload *models.Error +} + +func (o *ServiceBrokerAuthCallbackUnauthorized) Error() string { + return fmt.Sprintf("[GET /auth/v1/callback][%d] serviceBrokerAuthCallbackUnauthorized %+v", 401, o.Payload) +} + +func (o *ServiceBrokerAuthCallbackUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceBrokerAuthCallbackInternalServerError creates a ServiceBrokerAuthCallbackInternalServerError with default headers values +func NewServiceBrokerAuthCallbackInternalServerError() *ServiceBrokerAuthCallbackInternalServerError { + return &ServiceBrokerAuthCallbackInternalServerError{} +} + +/*ServiceBrokerAuthCallbackInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type ServiceBrokerAuthCallbackInternalServerError struct { + Payload *models.Error +} + +func (o *ServiceBrokerAuthCallbackInternalServerError) Error() string { + return fmt.Sprintf("[GET /auth/v1/callback][%d] serviceBrokerAuthCallbackInternalServerError %+v", 500, o.Payload) +} + +func (o *ServiceBrokerAuthCallbackInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/authentication/service_broker_auth_device_code_post_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/authentication/service_broker_auth_device_code_post_parameters.go new file mode 100644 index 00000000000..e58444e88c0 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/authentication/service_broker_auth_device_code_post_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package authentication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewServiceBrokerAuthDeviceCodePostParams creates a new ServiceBrokerAuthDeviceCodePostParams object +// with the default values initialized. +func NewServiceBrokerAuthDeviceCodePostParams() *ServiceBrokerAuthDeviceCodePostParams { + + return &ServiceBrokerAuthDeviceCodePostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewServiceBrokerAuthDeviceCodePostParamsWithTimeout creates a new ServiceBrokerAuthDeviceCodePostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewServiceBrokerAuthDeviceCodePostParamsWithTimeout(timeout time.Duration) *ServiceBrokerAuthDeviceCodePostParams { + + return &ServiceBrokerAuthDeviceCodePostParams{ + + timeout: timeout, + } +} + +// NewServiceBrokerAuthDeviceCodePostParamsWithContext creates a new ServiceBrokerAuthDeviceCodePostParams object +// with the default values initialized, and the ability to set a context for a request +func NewServiceBrokerAuthDeviceCodePostParamsWithContext(ctx context.Context) *ServiceBrokerAuthDeviceCodePostParams { + + return &ServiceBrokerAuthDeviceCodePostParams{ + + Context: ctx, + } +} + +// NewServiceBrokerAuthDeviceCodePostParamsWithHTTPClient creates a new ServiceBrokerAuthDeviceCodePostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewServiceBrokerAuthDeviceCodePostParamsWithHTTPClient(client *http.Client) *ServiceBrokerAuthDeviceCodePostParams { + + return &ServiceBrokerAuthDeviceCodePostParams{ + HTTPClient: client, + } +} + +/*ServiceBrokerAuthDeviceCodePostParams contains all the parameters to send to the API endpoint +for the service broker auth device code post operation typically these are written to a http.Request +*/ +type ServiceBrokerAuthDeviceCodePostParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the service broker auth device code post params +func (o *ServiceBrokerAuthDeviceCodePostParams) WithTimeout(timeout time.Duration) *ServiceBrokerAuthDeviceCodePostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the service broker auth device code post params +func (o *ServiceBrokerAuthDeviceCodePostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the service broker auth device code post params +func (o *ServiceBrokerAuthDeviceCodePostParams) WithContext(ctx context.Context) *ServiceBrokerAuthDeviceCodePostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the service broker auth device code post params +func (o *ServiceBrokerAuthDeviceCodePostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the service broker auth device code post params +func (o *ServiceBrokerAuthDeviceCodePostParams) WithHTTPClient(client *http.Client) *ServiceBrokerAuthDeviceCodePostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the service broker auth device code post params +func (o *ServiceBrokerAuthDeviceCodePostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *ServiceBrokerAuthDeviceCodePostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/authentication/service_broker_auth_device_code_post_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/authentication/service_broker_auth_device_code_post_responses.go new file mode 100644 index 00000000000..b16dc9de93c --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/authentication/service_broker_auth_device_code_post_responses.go @@ -0,0 +1,139 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package authentication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// ServiceBrokerAuthDeviceCodePostReader is a Reader for the ServiceBrokerAuthDeviceCodePost structure. +type ServiceBrokerAuthDeviceCodePostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ServiceBrokerAuthDeviceCodePostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewServiceBrokerAuthDeviceCodePostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 403: + result := NewServiceBrokerAuthDeviceCodePostForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewServiceBrokerAuthDeviceCodePostInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewServiceBrokerAuthDeviceCodePostOK creates a ServiceBrokerAuthDeviceCodePostOK with default headers values +func NewServiceBrokerAuthDeviceCodePostOK() *ServiceBrokerAuthDeviceCodePostOK { + return &ServiceBrokerAuthDeviceCodePostOK{} +} + +/*ServiceBrokerAuthDeviceCodePostOK handles this case with default header values. + +OK +*/ +type ServiceBrokerAuthDeviceCodePostOK struct { + Payload *models.DeviceCode +} + +func (o *ServiceBrokerAuthDeviceCodePostOK) Error() string { + return fmt.Sprintf("[POST /auth/v1/device/code][%d] serviceBrokerAuthDeviceCodePostOK %+v", 200, o.Payload) +} + +func (o *ServiceBrokerAuthDeviceCodePostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.DeviceCode) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceBrokerAuthDeviceCodePostForbidden creates a ServiceBrokerAuthDeviceCodePostForbidden with default headers values +func NewServiceBrokerAuthDeviceCodePostForbidden() *ServiceBrokerAuthDeviceCodePostForbidden { + return &ServiceBrokerAuthDeviceCodePostForbidden{} +} + +/*ServiceBrokerAuthDeviceCodePostForbidden handles this case with default header values. + +Quota exceeded +*/ +type ServiceBrokerAuthDeviceCodePostForbidden struct { + Payload *models.Error +} + +func (o *ServiceBrokerAuthDeviceCodePostForbidden) Error() string { + return fmt.Sprintf("[POST /auth/v1/device/code][%d] serviceBrokerAuthDeviceCodePostForbidden %+v", 403, o.Payload) +} + +func (o *ServiceBrokerAuthDeviceCodePostForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceBrokerAuthDeviceCodePostInternalServerError creates a ServiceBrokerAuthDeviceCodePostInternalServerError with default headers values +func NewServiceBrokerAuthDeviceCodePostInternalServerError() *ServiceBrokerAuthDeviceCodePostInternalServerError { + return &ServiceBrokerAuthDeviceCodePostInternalServerError{} +} + +/*ServiceBrokerAuthDeviceCodePostInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type ServiceBrokerAuthDeviceCodePostInternalServerError struct { + Payload *models.Error +} + +func (o *ServiceBrokerAuthDeviceCodePostInternalServerError) Error() string { + return fmt.Sprintf("[POST /auth/v1/device/code][%d] serviceBrokerAuthDeviceCodePostInternalServerError %+v", 500, o.Payload) +} + +func (o *ServiceBrokerAuthDeviceCodePostInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/authentication/service_broker_auth_device_token_post_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/authentication/service_broker_auth_device_token_post_parameters.go new file mode 100644 index 00000000000..c8dce41e672 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/authentication/service_broker_auth_device_token_post_parameters.go @@ -0,0 +1,135 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package authentication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewServiceBrokerAuthDeviceTokenPostParams creates a new ServiceBrokerAuthDeviceTokenPostParams object +// with the default values initialized. +func NewServiceBrokerAuthDeviceTokenPostParams() *ServiceBrokerAuthDeviceTokenPostParams { + var () + return &ServiceBrokerAuthDeviceTokenPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewServiceBrokerAuthDeviceTokenPostParamsWithTimeout creates a new ServiceBrokerAuthDeviceTokenPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewServiceBrokerAuthDeviceTokenPostParamsWithTimeout(timeout time.Duration) *ServiceBrokerAuthDeviceTokenPostParams { + var () + return &ServiceBrokerAuthDeviceTokenPostParams{ + + timeout: timeout, + } +} + +// NewServiceBrokerAuthDeviceTokenPostParamsWithContext creates a new ServiceBrokerAuthDeviceTokenPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewServiceBrokerAuthDeviceTokenPostParamsWithContext(ctx context.Context) *ServiceBrokerAuthDeviceTokenPostParams { + var () + return &ServiceBrokerAuthDeviceTokenPostParams{ + + Context: ctx, + } +} + +// NewServiceBrokerAuthDeviceTokenPostParamsWithHTTPClient creates a new ServiceBrokerAuthDeviceTokenPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewServiceBrokerAuthDeviceTokenPostParamsWithHTTPClient(client *http.Client) *ServiceBrokerAuthDeviceTokenPostParams { + var () + return &ServiceBrokerAuthDeviceTokenPostParams{ + HTTPClient: client, + } +} + +/*ServiceBrokerAuthDeviceTokenPostParams contains all the parameters to send to the API endpoint +for the service broker auth device token post operation typically these are written to a http.Request +*/ +type ServiceBrokerAuthDeviceTokenPostParams struct { + + /*Body + Parameters for polling authorization device code + + */ + Body ServiceBrokerAuthDeviceTokenPostBody + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the service broker auth device token post params +func (o *ServiceBrokerAuthDeviceTokenPostParams) WithTimeout(timeout time.Duration) *ServiceBrokerAuthDeviceTokenPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the service broker auth device token post params +func (o *ServiceBrokerAuthDeviceTokenPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the service broker auth device token post params +func (o *ServiceBrokerAuthDeviceTokenPostParams) WithContext(ctx context.Context) *ServiceBrokerAuthDeviceTokenPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the service broker auth device token post params +func (o *ServiceBrokerAuthDeviceTokenPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the service broker auth device token post params +func (o *ServiceBrokerAuthDeviceTokenPostParams) WithHTTPClient(client *http.Client) *ServiceBrokerAuthDeviceTokenPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the service broker auth device token post params +func (o *ServiceBrokerAuthDeviceTokenPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the service broker auth device token post params +func (o *ServiceBrokerAuthDeviceTokenPostParams) WithBody(body ServiceBrokerAuthDeviceTokenPostBody) *ServiceBrokerAuthDeviceTokenPostParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the service broker auth device token post params +func (o *ServiceBrokerAuthDeviceTokenPostParams) SetBody(body ServiceBrokerAuthDeviceTokenPostBody) { + o.Body = body +} + +// WriteToRequest writes these params to a swagger request +func (o *ServiceBrokerAuthDeviceTokenPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/authentication/service_broker_auth_device_token_post_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/authentication/service_broker_auth_device_token_post_responses.go new file mode 100644 index 00000000000..72c28f8be72 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/authentication/service_broker_auth_device_token_post_responses.go @@ -0,0 +1,244 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package authentication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/swag" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// ServiceBrokerAuthDeviceTokenPostReader is a Reader for the ServiceBrokerAuthDeviceTokenPost structure. +type ServiceBrokerAuthDeviceTokenPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ServiceBrokerAuthDeviceTokenPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewServiceBrokerAuthDeviceTokenPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewServiceBrokerAuthDeviceTokenPostBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 403: + result := NewServiceBrokerAuthDeviceTokenPostForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 429: + result := NewServiceBrokerAuthDeviceTokenPostTooManyRequests() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewServiceBrokerAuthDeviceTokenPostInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewServiceBrokerAuthDeviceTokenPostOK creates a ServiceBrokerAuthDeviceTokenPostOK with default headers values +func NewServiceBrokerAuthDeviceTokenPostOK() *ServiceBrokerAuthDeviceTokenPostOK { + return &ServiceBrokerAuthDeviceTokenPostOK{} +} + +/*ServiceBrokerAuthDeviceTokenPostOK handles this case with default header values. + +OK +*/ +type ServiceBrokerAuthDeviceTokenPostOK struct { + Payload *models.Token +} + +func (o *ServiceBrokerAuthDeviceTokenPostOK) Error() string { + return fmt.Sprintf("[POST /auth/v1/device/token][%d] serviceBrokerAuthDeviceTokenPostOK %+v", 200, o.Payload) +} + +func (o *ServiceBrokerAuthDeviceTokenPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Token) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceBrokerAuthDeviceTokenPostBadRequest creates a ServiceBrokerAuthDeviceTokenPostBadRequest with default headers values +func NewServiceBrokerAuthDeviceTokenPostBadRequest() *ServiceBrokerAuthDeviceTokenPostBadRequest { + return &ServiceBrokerAuthDeviceTokenPostBadRequest{} +} + +/*ServiceBrokerAuthDeviceTokenPostBadRequest handles this case with default header values. + +Authorization pending +*/ +type ServiceBrokerAuthDeviceTokenPostBadRequest struct { + Payload *models.Error +} + +func (o *ServiceBrokerAuthDeviceTokenPostBadRequest) Error() string { + return fmt.Sprintf("[POST /auth/v1/device/token][%d] serviceBrokerAuthDeviceTokenPostBadRequest %+v", 400, o.Payload) +} + +func (o *ServiceBrokerAuthDeviceTokenPostBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceBrokerAuthDeviceTokenPostForbidden creates a ServiceBrokerAuthDeviceTokenPostForbidden with default headers values +func NewServiceBrokerAuthDeviceTokenPostForbidden() *ServiceBrokerAuthDeviceTokenPostForbidden { + return &ServiceBrokerAuthDeviceTokenPostForbidden{} +} + +/*ServiceBrokerAuthDeviceTokenPostForbidden handles this case with default header values. + +User refused grant +*/ +type ServiceBrokerAuthDeviceTokenPostForbidden struct { + Payload *models.Error +} + +func (o *ServiceBrokerAuthDeviceTokenPostForbidden) Error() string { + return fmt.Sprintf("[POST /auth/v1/device/token][%d] serviceBrokerAuthDeviceTokenPostForbidden %+v", 403, o.Payload) +} + +func (o *ServiceBrokerAuthDeviceTokenPostForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceBrokerAuthDeviceTokenPostTooManyRequests creates a ServiceBrokerAuthDeviceTokenPostTooManyRequests with default headers values +func NewServiceBrokerAuthDeviceTokenPostTooManyRequests() *ServiceBrokerAuthDeviceTokenPostTooManyRequests { + return &ServiceBrokerAuthDeviceTokenPostTooManyRequests{} +} + +/*ServiceBrokerAuthDeviceTokenPostTooManyRequests handles this case with default header values. + +Polling too frequently +*/ +type ServiceBrokerAuthDeviceTokenPostTooManyRequests struct { + Payload *models.Error +} + +func (o *ServiceBrokerAuthDeviceTokenPostTooManyRequests) Error() string { + return fmt.Sprintf("[POST /auth/v1/device/token][%d] serviceBrokerAuthDeviceTokenPostTooManyRequests %+v", 429, o.Payload) +} + +func (o *ServiceBrokerAuthDeviceTokenPostTooManyRequests) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceBrokerAuthDeviceTokenPostInternalServerError creates a ServiceBrokerAuthDeviceTokenPostInternalServerError with default headers values +func NewServiceBrokerAuthDeviceTokenPostInternalServerError() *ServiceBrokerAuthDeviceTokenPostInternalServerError { + return &ServiceBrokerAuthDeviceTokenPostInternalServerError{} +} + +/*ServiceBrokerAuthDeviceTokenPostInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type ServiceBrokerAuthDeviceTokenPostInternalServerError struct { + Payload *models.Error +} + +func (o *ServiceBrokerAuthDeviceTokenPostInternalServerError) Error() string { + return fmt.Sprintf("[POST /auth/v1/device/token][%d] serviceBrokerAuthDeviceTokenPostInternalServerError %+v", 500, o.Payload) +} + +func (o *ServiceBrokerAuthDeviceTokenPostInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +/*ServiceBrokerAuthDeviceTokenPostBody service broker auth device token post body +swagger:model ServiceBrokerAuthDeviceTokenPostBody +*/ +type ServiceBrokerAuthDeviceTokenPostBody struct { + + // The deviceCode that the authorization server returned + DeviceCode string `json:"deviceCode,omitempty"` +} + +// Validate validates this service broker auth device token post body +func (o *ServiceBrokerAuthDeviceTokenPostBody) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (o *ServiceBrokerAuthDeviceTokenPostBody) MarshalBinary() ([]byte, error) { + if o == nil { + return nil, nil + } + return swag.WriteJSON(o) +} + +// UnmarshalBinary interface implementation +func (o *ServiceBrokerAuthDeviceTokenPostBody) UnmarshalBinary(b []byte) error { + var res ServiceBrokerAuthDeviceTokenPostBody + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *o = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/authentication/service_broker_auth_info_token_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/authentication/service_broker_auth_info_token_parameters.go new file mode 100644 index 00000000000..95609b0f2a6 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/authentication/service_broker_auth_info_token_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package authentication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewServiceBrokerAuthInfoTokenParams creates a new ServiceBrokerAuthInfoTokenParams object +// with the default values initialized. +func NewServiceBrokerAuthInfoTokenParams() *ServiceBrokerAuthInfoTokenParams { + + return &ServiceBrokerAuthInfoTokenParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewServiceBrokerAuthInfoTokenParamsWithTimeout creates a new ServiceBrokerAuthInfoTokenParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewServiceBrokerAuthInfoTokenParamsWithTimeout(timeout time.Duration) *ServiceBrokerAuthInfoTokenParams { + + return &ServiceBrokerAuthInfoTokenParams{ + + timeout: timeout, + } +} + +// NewServiceBrokerAuthInfoTokenParamsWithContext creates a new ServiceBrokerAuthInfoTokenParams object +// with the default values initialized, and the ability to set a context for a request +func NewServiceBrokerAuthInfoTokenParamsWithContext(ctx context.Context) *ServiceBrokerAuthInfoTokenParams { + + return &ServiceBrokerAuthInfoTokenParams{ + + Context: ctx, + } +} + +// NewServiceBrokerAuthInfoTokenParamsWithHTTPClient creates a new ServiceBrokerAuthInfoTokenParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewServiceBrokerAuthInfoTokenParamsWithHTTPClient(client *http.Client) *ServiceBrokerAuthInfoTokenParams { + + return &ServiceBrokerAuthInfoTokenParams{ + HTTPClient: client, + } +} + +/*ServiceBrokerAuthInfoTokenParams contains all the parameters to send to the API endpoint +for the service broker auth info token operation typically these are written to a http.Request +*/ +type ServiceBrokerAuthInfoTokenParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the service broker auth info token params +func (o *ServiceBrokerAuthInfoTokenParams) WithTimeout(timeout time.Duration) *ServiceBrokerAuthInfoTokenParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the service broker auth info token params +func (o *ServiceBrokerAuthInfoTokenParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the service broker auth info token params +func (o *ServiceBrokerAuthInfoTokenParams) WithContext(ctx context.Context) *ServiceBrokerAuthInfoTokenParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the service broker auth info token params +func (o *ServiceBrokerAuthInfoTokenParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the service broker auth info token params +func (o *ServiceBrokerAuthInfoTokenParams) WithHTTPClient(client *http.Client) *ServiceBrokerAuthInfoTokenParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the service broker auth info token params +func (o *ServiceBrokerAuthInfoTokenParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *ServiceBrokerAuthInfoTokenParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/authentication/service_broker_auth_info_token_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/authentication/service_broker_auth_info_token_responses.go new file mode 100644 index 00000000000..0846f83f191 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/authentication/service_broker_auth_info_token_responses.go @@ -0,0 +1,103 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package authentication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// ServiceBrokerAuthInfoTokenReader is a Reader for the ServiceBrokerAuthInfoToken structure. +type ServiceBrokerAuthInfoTokenReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ServiceBrokerAuthInfoTokenReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewServiceBrokerAuthInfoTokenOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 500: + result := NewServiceBrokerAuthInfoTokenInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewServiceBrokerAuthInfoTokenOK creates a ServiceBrokerAuthInfoTokenOK with default headers values +func NewServiceBrokerAuthInfoTokenOK() *ServiceBrokerAuthInfoTokenOK { + return &ServiceBrokerAuthInfoTokenOK{} +} + +/*ServiceBrokerAuthInfoTokenOK handles this case with default header values. + +OK +*/ +type ServiceBrokerAuthInfoTokenOK struct { + Payload *models.TokenExtra +} + +func (o *ServiceBrokerAuthInfoTokenOK) Error() string { + return fmt.Sprintf("[GET /auth/v1/info/token][%d] serviceBrokerAuthInfoTokenOK %+v", 200, o.Payload) +} + +func (o *ServiceBrokerAuthInfoTokenOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.TokenExtra) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceBrokerAuthInfoTokenInternalServerError creates a ServiceBrokerAuthInfoTokenInternalServerError with default headers values +func NewServiceBrokerAuthInfoTokenInternalServerError() *ServiceBrokerAuthInfoTokenInternalServerError { + return &ServiceBrokerAuthInfoTokenInternalServerError{} +} + +/*ServiceBrokerAuthInfoTokenInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type ServiceBrokerAuthInfoTokenInternalServerError struct { + Payload *models.Error +} + +func (o *ServiceBrokerAuthInfoTokenInternalServerError) Error() string { + return fmt.Sprintf("[GET /auth/v1/info/token][%d] serviceBrokerAuthInfoTokenInternalServerError %+v", 500, o.Payload) +} + +func (o *ServiceBrokerAuthInfoTokenInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/authentication/service_broker_auth_info_user_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/authentication/service_broker_auth_info_user_parameters.go new file mode 100644 index 00000000000..166a37bacb7 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/authentication/service_broker_auth_info_user_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package authentication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewServiceBrokerAuthInfoUserParams creates a new ServiceBrokerAuthInfoUserParams object +// with the default values initialized. +func NewServiceBrokerAuthInfoUserParams() *ServiceBrokerAuthInfoUserParams { + + return &ServiceBrokerAuthInfoUserParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewServiceBrokerAuthInfoUserParamsWithTimeout creates a new ServiceBrokerAuthInfoUserParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewServiceBrokerAuthInfoUserParamsWithTimeout(timeout time.Duration) *ServiceBrokerAuthInfoUserParams { + + return &ServiceBrokerAuthInfoUserParams{ + + timeout: timeout, + } +} + +// NewServiceBrokerAuthInfoUserParamsWithContext creates a new ServiceBrokerAuthInfoUserParams object +// with the default values initialized, and the ability to set a context for a request +func NewServiceBrokerAuthInfoUserParamsWithContext(ctx context.Context) *ServiceBrokerAuthInfoUserParams { + + return &ServiceBrokerAuthInfoUserParams{ + + Context: ctx, + } +} + +// NewServiceBrokerAuthInfoUserParamsWithHTTPClient creates a new ServiceBrokerAuthInfoUserParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewServiceBrokerAuthInfoUserParamsWithHTTPClient(client *http.Client) *ServiceBrokerAuthInfoUserParams { + + return &ServiceBrokerAuthInfoUserParams{ + HTTPClient: client, + } +} + +/*ServiceBrokerAuthInfoUserParams contains all the parameters to send to the API endpoint +for the service broker auth info user operation typically these are written to a http.Request +*/ +type ServiceBrokerAuthInfoUserParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the service broker auth info user params +func (o *ServiceBrokerAuthInfoUserParams) WithTimeout(timeout time.Duration) *ServiceBrokerAuthInfoUserParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the service broker auth info user params +func (o *ServiceBrokerAuthInfoUserParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the service broker auth info user params +func (o *ServiceBrokerAuthInfoUserParams) WithContext(ctx context.Context) *ServiceBrokerAuthInfoUserParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the service broker auth info user params +func (o *ServiceBrokerAuthInfoUserParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the service broker auth info user params +func (o *ServiceBrokerAuthInfoUserParams) WithHTTPClient(client *http.Client) *ServiceBrokerAuthInfoUserParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the service broker auth info user params +func (o *ServiceBrokerAuthInfoUserParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *ServiceBrokerAuthInfoUserParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/authentication/service_broker_auth_info_user_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/authentication/service_broker_auth_info_user_responses.go new file mode 100644 index 00000000000..2e474c2a291 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/authentication/service_broker_auth_info_user_responses.go @@ -0,0 +1,103 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package authentication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// ServiceBrokerAuthInfoUserReader is a Reader for the ServiceBrokerAuthInfoUser structure. +type ServiceBrokerAuthInfoUserReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ServiceBrokerAuthInfoUserReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewServiceBrokerAuthInfoUserOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 500: + result := NewServiceBrokerAuthInfoUserInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewServiceBrokerAuthInfoUserOK creates a ServiceBrokerAuthInfoUserOK with default headers values +func NewServiceBrokerAuthInfoUserOK() *ServiceBrokerAuthInfoUserOK { + return &ServiceBrokerAuthInfoUserOK{} +} + +/*ServiceBrokerAuthInfoUserOK handles this case with default header values. + +OK +*/ +type ServiceBrokerAuthInfoUserOK struct { + Payload *models.UserInfo +} + +func (o *ServiceBrokerAuthInfoUserOK) Error() string { + return fmt.Sprintf("[GET /auth/v1/info/user][%d] serviceBrokerAuthInfoUserOK %+v", 200, o.Payload) +} + +func (o *ServiceBrokerAuthInfoUserOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.UserInfo) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceBrokerAuthInfoUserInternalServerError creates a ServiceBrokerAuthInfoUserInternalServerError with default headers values +func NewServiceBrokerAuthInfoUserInternalServerError() *ServiceBrokerAuthInfoUserInternalServerError { + return &ServiceBrokerAuthInfoUserInternalServerError{} +} + +/*ServiceBrokerAuthInfoUserInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type ServiceBrokerAuthInfoUserInternalServerError struct { + Payload *models.Error +} + +func (o *ServiceBrokerAuthInfoUserInternalServerError) Error() string { + return fmt.Sprintf("[GET /auth/v1/info/user][%d] serviceBrokerAuthInfoUserInternalServerError %+v", 500, o.Payload) +} + +func (o *ServiceBrokerAuthInfoUserInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/authentication/service_broker_auth_login_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/authentication/service_broker_auth_login_parameters.go new file mode 100644 index 00000000000..ee6f6cc20a4 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/authentication/service_broker_auth_login_parameters.go @@ -0,0 +1,223 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package authentication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewServiceBrokerAuthLoginParams creates a new ServiceBrokerAuthLoginParams object +// with the default values initialized. +func NewServiceBrokerAuthLoginParams() *ServiceBrokerAuthLoginParams { + var ( + accessTypeDefault = string("online") + ) + return &ServiceBrokerAuthLoginParams{ + AccessType: &accessTypeDefault, + + timeout: cr.DefaultTimeout, + } +} + +// NewServiceBrokerAuthLoginParamsWithTimeout creates a new ServiceBrokerAuthLoginParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewServiceBrokerAuthLoginParamsWithTimeout(timeout time.Duration) *ServiceBrokerAuthLoginParams { + var ( + accessTypeDefault = string("online") + ) + return &ServiceBrokerAuthLoginParams{ + AccessType: &accessTypeDefault, + + timeout: timeout, + } +} + +// NewServiceBrokerAuthLoginParamsWithContext creates a new ServiceBrokerAuthLoginParams object +// with the default values initialized, and the ability to set a context for a request +func NewServiceBrokerAuthLoginParamsWithContext(ctx context.Context) *ServiceBrokerAuthLoginParams { + var ( + accessTypeDefault = string("online") + ) + return &ServiceBrokerAuthLoginParams{ + AccessType: &accessTypeDefault, + + Context: ctx, + } +} + +// NewServiceBrokerAuthLoginParamsWithHTTPClient creates a new ServiceBrokerAuthLoginParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewServiceBrokerAuthLoginParamsWithHTTPClient(client *http.Client) *ServiceBrokerAuthLoginParams { + var ( + accessTypeDefault = string("online") + ) + return &ServiceBrokerAuthLoginParams{ + AccessType: &accessTypeDefault, + HTTPClient: client, + } +} + +/*ServiceBrokerAuthLoginParams contains all the parameters to send to the API endpoint +for the service broker auth login operation typically these are written to a http.Request +*/ +type ServiceBrokerAuthLoginParams struct { + + /*AccessType + Determines if a refresh token is returned + + */ + AccessType *string + /*RedirectURL + The URL to redirect to after login/registration + + */ + RedirectURL *string + /*UserID + The user id of the user + + */ + UserID *string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the service broker auth login params +func (o *ServiceBrokerAuthLoginParams) WithTimeout(timeout time.Duration) *ServiceBrokerAuthLoginParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the service broker auth login params +func (o *ServiceBrokerAuthLoginParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the service broker auth login params +func (o *ServiceBrokerAuthLoginParams) WithContext(ctx context.Context) *ServiceBrokerAuthLoginParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the service broker auth login params +func (o *ServiceBrokerAuthLoginParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the service broker auth login params +func (o *ServiceBrokerAuthLoginParams) WithHTTPClient(client *http.Client) *ServiceBrokerAuthLoginParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the service broker auth login params +func (o *ServiceBrokerAuthLoginParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithAccessType adds the accessType to the service broker auth login params +func (o *ServiceBrokerAuthLoginParams) WithAccessType(accessType *string) *ServiceBrokerAuthLoginParams { + o.SetAccessType(accessType) + return o +} + +// SetAccessType adds the accessType to the service broker auth login params +func (o *ServiceBrokerAuthLoginParams) SetAccessType(accessType *string) { + o.AccessType = accessType +} + +// WithRedirectURL adds the redirectURL to the service broker auth login params +func (o *ServiceBrokerAuthLoginParams) WithRedirectURL(redirectURL *string) *ServiceBrokerAuthLoginParams { + o.SetRedirectURL(redirectURL) + return o +} + +// SetRedirectURL adds the redirectUrl to the service broker auth login params +func (o *ServiceBrokerAuthLoginParams) SetRedirectURL(redirectURL *string) { + o.RedirectURL = redirectURL +} + +// WithUserID adds the userID to the service broker auth login params +func (o *ServiceBrokerAuthLoginParams) WithUserID(userID *string) *ServiceBrokerAuthLoginParams { + o.SetUserID(userID) + return o +} + +// SetUserID adds the userId to the service broker auth login params +func (o *ServiceBrokerAuthLoginParams) SetUserID(userID *string) { + o.UserID = userID +} + +// WriteToRequest writes these params to a swagger request +func (o *ServiceBrokerAuthLoginParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.AccessType != nil { + + // query param access_type + var qrAccessType string + if o.AccessType != nil { + qrAccessType = *o.AccessType + } + qAccessType := qrAccessType + if qAccessType != "" { + if err := r.SetQueryParam("access_type", qAccessType); err != nil { + return err + } + } + + } + + if o.RedirectURL != nil { + + // query param redirect_url + var qrRedirectURL string + if o.RedirectURL != nil { + qrRedirectURL = *o.RedirectURL + } + qRedirectURL := qrRedirectURL + if qRedirectURL != "" { + if err := r.SetQueryParam("redirect_url", qRedirectURL); err != nil { + return err + } + } + + } + + if o.UserID != nil { + + // query param user_id + var qrUserID string + if o.UserID != nil { + qrUserID = *o.UserID + } + qUserID := qrUserID + if qUserID != "" { + if err := r.SetQueryParam("user_id", qUserID); err != nil { + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/authentication/service_broker_auth_login_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/authentication/service_broker_auth_login_responses.go new file mode 100644 index 00000000000..cd304ee0e7b --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/authentication/service_broker_auth_login_responses.go @@ -0,0 +1,139 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package authentication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// ServiceBrokerAuthLoginReader is a Reader for the ServiceBrokerAuthLogin structure. +type ServiceBrokerAuthLoginReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ServiceBrokerAuthLoginReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewServiceBrokerAuthLoginOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 401: + result := NewServiceBrokerAuthLoginUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewServiceBrokerAuthLoginInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewServiceBrokerAuthLoginOK creates a ServiceBrokerAuthLoginOK with default headers values +func NewServiceBrokerAuthLoginOK() *ServiceBrokerAuthLoginOK { + return &ServiceBrokerAuthLoginOK{} +} + +/*ServiceBrokerAuthLoginOK handles this case with default header values. + +OK +*/ +type ServiceBrokerAuthLoginOK struct { + Payload *models.AccessToken +} + +func (o *ServiceBrokerAuthLoginOK) Error() string { + return fmt.Sprintf("[GET /auth/v1/login][%d] serviceBrokerAuthLoginOK %+v", 200, o.Payload) +} + +func (o *ServiceBrokerAuthLoginOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.AccessToken) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceBrokerAuthLoginUnauthorized creates a ServiceBrokerAuthLoginUnauthorized with default headers values +func NewServiceBrokerAuthLoginUnauthorized() *ServiceBrokerAuthLoginUnauthorized { + return &ServiceBrokerAuthLoginUnauthorized{} +} + +/*ServiceBrokerAuthLoginUnauthorized handles this case with default header values. + +Unauthorized +*/ +type ServiceBrokerAuthLoginUnauthorized struct { + Payload *models.Error +} + +func (o *ServiceBrokerAuthLoginUnauthorized) Error() string { + return fmt.Sprintf("[GET /auth/v1/login][%d] serviceBrokerAuthLoginUnauthorized %+v", 401, o.Payload) +} + +func (o *ServiceBrokerAuthLoginUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceBrokerAuthLoginInternalServerError creates a ServiceBrokerAuthLoginInternalServerError with default headers values +func NewServiceBrokerAuthLoginInternalServerError() *ServiceBrokerAuthLoginInternalServerError { + return &ServiceBrokerAuthLoginInternalServerError{} +} + +/*ServiceBrokerAuthLoginInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type ServiceBrokerAuthLoginInternalServerError struct { + Payload *models.Error +} + +func (o *ServiceBrokerAuthLoginInternalServerError) Error() string { + return fmt.Sprintf("[GET /auth/v1/login][%d] serviceBrokerAuthLoginInternalServerError %+v", 500, o.Payload) +} + +func (o *ServiceBrokerAuthLoginInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/authentication/service_broker_auth_logout_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/authentication/service_broker_auth_logout_parameters.go new file mode 100644 index 00000000000..b3af618851e --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/authentication/service_broker_auth_logout_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package authentication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewServiceBrokerAuthLogoutParams creates a new ServiceBrokerAuthLogoutParams object +// with the default values initialized. +func NewServiceBrokerAuthLogoutParams() *ServiceBrokerAuthLogoutParams { + + return &ServiceBrokerAuthLogoutParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewServiceBrokerAuthLogoutParamsWithTimeout creates a new ServiceBrokerAuthLogoutParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewServiceBrokerAuthLogoutParamsWithTimeout(timeout time.Duration) *ServiceBrokerAuthLogoutParams { + + return &ServiceBrokerAuthLogoutParams{ + + timeout: timeout, + } +} + +// NewServiceBrokerAuthLogoutParamsWithContext creates a new ServiceBrokerAuthLogoutParams object +// with the default values initialized, and the ability to set a context for a request +func NewServiceBrokerAuthLogoutParamsWithContext(ctx context.Context) *ServiceBrokerAuthLogoutParams { + + return &ServiceBrokerAuthLogoutParams{ + + Context: ctx, + } +} + +// NewServiceBrokerAuthLogoutParamsWithHTTPClient creates a new ServiceBrokerAuthLogoutParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewServiceBrokerAuthLogoutParamsWithHTTPClient(client *http.Client) *ServiceBrokerAuthLogoutParams { + + return &ServiceBrokerAuthLogoutParams{ + HTTPClient: client, + } +} + +/*ServiceBrokerAuthLogoutParams contains all the parameters to send to the API endpoint +for the service broker auth logout operation typically these are written to a http.Request +*/ +type ServiceBrokerAuthLogoutParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the service broker auth logout params +func (o *ServiceBrokerAuthLogoutParams) WithTimeout(timeout time.Duration) *ServiceBrokerAuthLogoutParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the service broker auth logout params +func (o *ServiceBrokerAuthLogoutParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the service broker auth logout params +func (o *ServiceBrokerAuthLogoutParams) WithContext(ctx context.Context) *ServiceBrokerAuthLogoutParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the service broker auth logout params +func (o *ServiceBrokerAuthLogoutParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the service broker auth logout params +func (o *ServiceBrokerAuthLogoutParams) WithHTTPClient(client *http.Client) *ServiceBrokerAuthLogoutParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the service broker auth logout params +func (o *ServiceBrokerAuthLogoutParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *ServiceBrokerAuthLogoutParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/authentication/service_broker_auth_logout_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/authentication/service_broker_auth_logout_responses.go new file mode 100644 index 00000000000..819f363d380 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/authentication/service_broker_auth_logout_responses.go @@ -0,0 +1,101 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package authentication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// ServiceBrokerAuthLogoutReader is a Reader for the ServiceBrokerAuthLogout structure. +type ServiceBrokerAuthLogoutReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ServiceBrokerAuthLogoutReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewServiceBrokerAuthLogoutOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 500: + result := NewServiceBrokerAuthLogoutInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewServiceBrokerAuthLogoutOK creates a ServiceBrokerAuthLogoutOK with default headers values +func NewServiceBrokerAuthLogoutOK() *ServiceBrokerAuthLogoutOK { + return &ServiceBrokerAuthLogoutOK{} +} + +/*ServiceBrokerAuthLogoutOK handles this case with default header values. + +OK +*/ +type ServiceBrokerAuthLogoutOK struct { + Payload models.Object +} + +func (o *ServiceBrokerAuthLogoutOK) Error() string { + return fmt.Sprintf("[GET /auth/v1/logout][%d] serviceBrokerAuthLogoutOK %+v", 200, o.Payload) +} + +func (o *ServiceBrokerAuthLogoutOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceBrokerAuthLogoutInternalServerError creates a ServiceBrokerAuthLogoutInternalServerError with default headers values +func NewServiceBrokerAuthLogoutInternalServerError() *ServiceBrokerAuthLogoutInternalServerError { + return &ServiceBrokerAuthLogoutInternalServerError{} +} + +/*ServiceBrokerAuthLogoutInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type ServiceBrokerAuthLogoutInternalServerError struct { + Payload *models.Error +} + +func (o *ServiceBrokerAuthLogoutInternalServerError) Error() string { + return fmt.Sprintf("[GET /auth/v1/logout][%d] serviceBrokerAuthLogoutInternalServerError %+v", 500, o.Payload) +} + +func (o *ServiceBrokerAuthLogoutInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/authentication/service_broker_auth_registration_callback_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/authentication/service_broker_auth_registration_callback_parameters.go new file mode 100644 index 00000000000..f741a44bd01 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/authentication/service_broker_auth_registration_callback_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package authentication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewServiceBrokerAuthRegistrationCallbackParams creates a new ServiceBrokerAuthRegistrationCallbackParams object +// with the default values initialized. +func NewServiceBrokerAuthRegistrationCallbackParams() *ServiceBrokerAuthRegistrationCallbackParams { + + return &ServiceBrokerAuthRegistrationCallbackParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewServiceBrokerAuthRegistrationCallbackParamsWithTimeout creates a new ServiceBrokerAuthRegistrationCallbackParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewServiceBrokerAuthRegistrationCallbackParamsWithTimeout(timeout time.Duration) *ServiceBrokerAuthRegistrationCallbackParams { + + return &ServiceBrokerAuthRegistrationCallbackParams{ + + timeout: timeout, + } +} + +// NewServiceBrokerAuthRegistrationCallbackParamsWithContext creates a new ServiceBrokerAuthRegistrationCallbackParams object +// with the default values initialized, and the ability to set a context for a request +func NewServiceBrokerAuthRegistrationCallbackParamsWithContext(ctx context.Context) *ServiceBrokerAuthRegistrationCallbackParams { + + return &ServiceBrokerAuthRegistrationCallbackParams{ + + Context: ctx, + } +} + +// NewServiceBrokerAuthRegistrationCallbackParamsWithHTTPClient creates a new ServiceBrokerAuthRegistrationCallbackParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewServiceBrokerAuthRegistrationCallbackParamsWithHTTPClient(client *http.Client) *ServiceBrokerAuthRegistrationCallbackParams { + + return &ServiceBrokerAuthRegistrationCallbackParams{ + HTTPClient: client, + } +} + +/*ServiceBrokerAuthRegistrationCallbackParams contains all the parameters to send to the API endpoint +for the service broker auth registration callback operation typically these are written to a http.Request +*/ +type ServiceBrokerAuthRegistrationCallbackParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the service broker auth registration callback params +func (o *ServiceBrokerAuthRegistrationCallbackParams) WithTimeout(timeout time.Duration) *ServiceBrokerAuthRegistrationCallbackParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the service broker auth registration callback params +func (o *ServiceBrokerAuthRegistrationCallbackParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the service broker auth registration callback params +func (o *ServiceBrokerAuthRegistrationCallbackParams) WithContext(ctx context.Context) *ServiceBrokerAuthRegistrationCallbackParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the service broker auth registration callback params +func (o *ServiceBrokerAuthRegistrationCallbackParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the service broker auth registration callback params +func (o *ServiceBrokerAuthRegistrationCallbackParams) WithHTTPClient(client *http.Client) *ServiceBrokerAuthRegistrationCallbackParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the service broker auth registration callback params +func (o *ServiceBrokerAuthRegistrationCallbackParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *ServiceBrokerAuthRegistrationCallbackParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/authentication/service_broker_auth_registration_callback_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/authentication/service_broker_auth_registration_callback_responses.go new file mode 100644 index 00000000000..5c08aab1579 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/authentication/service_broker_auth_registration_callback_responses.go @@ -0,0 +1,139 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package authentication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// ServiceBrokerAuthRegistrationCallbackReader is a Reader for the ServiceBrokerAuthRegistrationCallback structure. +type ServiceBrokerAuthRegistrationCallbackReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ServiceBrokerAuthRegistrationCallbackReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewServiceBrokerAuthRegistrationCallbackOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 401: + result := NewServiceBrokerAuthRegistrationCallbackUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewServiceBrokerAuthRegistrationCallbackInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewServiceBrokerAuthRegistrationCallbackOK creates a ServiceBrokerAuthRegistrationCallbackOK with default headers values +func NewServiceBrokerAuthRegistrationCallbackOK() *ServiceBrokerAuthRegistrationCallbackOK { + return &ServiceBrokerAuthRegistrationCallbackOK{} +} + +/*ServiceBrokerAuthRegistrationCallbackOK handles this case with default header values. + +OK +*/ +type ServiceBrokerAuthRegistrationCallbackOK struct { + Payload *models.AccessToken +} + +func (o *ServiceBrokerAuthRegistrationCallbackOK) Error() string { + return fmt.Sprintf("[GET /auth/v1/callback-registration][%d] serviceBrokerAuthRegistrationCallbackOK %+v", 200, o.Payload) +} + +func (o *ServiceBrokerAuthRegistrationCallbackOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.AccessToken) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceBrokerAuthRegistrationCallbackUnauthorized creates a ServiceBrokerAuthRegistrationCallbackUnauthorized with default headers values +func NewServiceBrokerAuthRegistrationCallbackUnauthorized() *ServiceBrokerAuthRegistrationCallbackUnauthorized { + return &ServiceBrokerAuthRegistrationCallbackUnauthorized{} +} + +/*ServiceBrokerAuthRegistrationCallbackUnauthorized handles this case with default header values. + +Unauthorized +*/ +type ServiceBrokerAuthRegistrationCallbackUnauthorized struct { + Payload *models.Error +} + +func (o *ServiceBrokerAuthRegistrationCallbackUnauthorized) Error() string { + return fmt.Sprintf("[GET /auth/v1/callback-registration][%d] serviceBrokerAuthRegistrationCallbackUnauthorized %+v", 401, o.Payload) +} + +func (o *ServiceBrokerAuthRegistrationCallbackUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceBrokerAuthRegistrationCallbackInternalServerError creates a ServiceBrokerAuthRegistrationCallbackInternalServerError with default headers values +func NewServiceBrokerAuthRegistrationCallbackInternalServerError() *ServiceBrokerAuthRegistrationCallbackInternalServerError { + return &ServiceBrokerAuthRegistrationCallbackInternalServerError{} +} + +/*ServiceBrokerAuthRegistrationCallbackInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type ServiceBrokerAuthRegistrationCallbackInternalServerError struct { + Payload *models.Error +} + +func (o *ServiceBrokerAuthRegistrationCallbackInternalServerError) Error() string { + return fmt.Sprintf("[GET /auth/v1/callback-registration][%d] serviceBrokerAuthRegistrationCallbackInternalServerError %+v", 500, o.Payload) +} + +func (o *ServiceBrokerAuthRegistrationCallbackInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/authentication/service_broker_auth_registration_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/authentication/service_broker_auth_registration_parameters.go new file mode 100644 index 00000000000..dfacdda9485 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/authentication/service_broker_auth_registration_parameters.go @@ -0,0 +1,272 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package authentication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/swag" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewServiceBrokerAuthRegistrationParams creates a new ServiceBrokerAuthRegistrationParams object +// with the default values initialized. +func NewServiceBrokerAuthRegistrationParams() *ServiceBrokerAuthRegistrationParams { + var () + return &ServiceBrokerAuthRegistrationParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewServiceBrokerAuthRegistrationParamsWithTimeout creates a new ServiceBrokerAuthRegistrationParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewServiceBrokerAuthRegistrationParamsWithTimeout(timeout time.Duration) *ServiceBrokerAuthRegistrationParams { + var () + return &ServiceBrokerAuthRegistrationParams{ + + timeout: timeout, + } +} + +// NewServiceBrokerAuthRegistrationParamsWithContext creates a new ServiceBrokerAuthRegistrationParams object +// with the default values initialized, and the ability to set a context for a request +func NewServiceBrokerAuthRegistrationParamsWithContext(ctx context.Context) *ServiceBrokerAuthRegistrationParams { + var () + return &ServiceBrokerAuthRegistrationParams{ + + Context: ctx, + } +} + +// NewServiceBrokerAuthRegistrationParamsWithHTTPClient creates a new ServiceBrokerAuthRegistrationParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewServiceBrokerAuthRegistrationParamsWithHTTPClient(client *http.Client) *ServiceBrokerAuthRegistrationParams { + var () + return &ServiceBrokerAuthRegistrationParams{ + HTTPClient: client, + } +} + +/*ServiceBrokerAuthRegistrationParams contains all the parameters to send to the API endpoint +for the service broker auth registration operation typically these are written to a http.Request +*/ +type ServiceBrokerAuthRegistrationParams struct { + + /*EntitlementID + Entitlement ID of for this tenant + + */ + EntitlementID string + /*Icn + IBM Customer Number (ICN) for this tenant + + */ + Icn string + /*Plan + Plan for this tenant and entitlement + + */ + Plan string + /*RedirectURL + The URL to redirect to after login/registration + + */ + RedirectURL *string + /*Regions + An array of regions matching the number of cloud-instances in the plan + + */ + Regions []string + /*TenantID + Tenant ID of a pcloud tenant + + */ + TenantID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the service broker auth registration params +func (o *ServiceBrokerAuthRegistrationParams) WithTimeout(timeout time.Duration) *ServiceBrokerAuthRegistrationParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the service broker auth registration params +func (o *ServiceBrokerAuthRegistrationParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the service broker auth registration params +func (o *ServiceBrokerAuthRegistrationParams) WithContext(ctx context.Context) *ServiceBrokerAuthRegistrationParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the service broker auth registration params +func (o *ServiceBrokerAuthRegistrationParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the service broker auth registration params +func (o *ServiceBrokerAuthRegistrationParams) WithHTTPClient(client *http.Client) *ServiceBrokerAuthRegistrationParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the service broker auth registration params +func (o *ServiceBrokerAuthRegistrationParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithEntitlementID adds the entitlementID to the service broker auth registration params +func (o *ServiceBrokerAuthRegistrationParams) WithEntitlementID(entitlementID string) *ServiceBrokerAuthRegistrationParams { + o.SetEntitlementID(entitlementID) + return o +} + +// SetEntitlementID adds the entitlementId to the service broker auth registration params +func (o *ServiceBrokerAuthRegistrationParams) SetEntitlementID(entitlementID string) { + o.EntitlementID = entitlementID +} + +// WithIcn adds the icn to the service broker auth registration params +func (o *ServiceBrokerAuthRegistrationParams) WithIcn(icn string) *ServiceBrokerAuthRegistrationParams { + o.SetIcn(icn) + return o +} + +// SetIcn adds the icn to the service broker auth registration params +func (o *ServiceBrokerAuthRegistrationParams) SetIcn(icn string) { + o.Icn = icn +} + +// WithPlan adds the plan to the service broker auth registration params +func (o *ServiceBrokerAuthRegistrationParams) WithPlan(plan string) *ServiceBrokerAuthRegistrationParams { + o.SetPlan(plan) + return o +} + +// SetPlan adds the plan to the service broker auth registration params +func (o *ServiceBrokerAuthRegistrationParams) SetPlan(plan string) { + o.Plan = plan +} + +// WithRedirectURL adds the redirectURL to the service broker auth registration params +func (o *ServiceBrokerAuthRegistrationParams) WithRedirectURL(redirectURL *string) *ServiceBrokerAuthRegistrationParams { + o.SetRedirectURL(redirectURL) + return o +} + +// SetRedirectURL adds the redirectUrl to the service broker auth registration params +func (o *ServiceBrokerAuthRegistrationParams) SetRedirectURL(redirectURL *string) { + o.RedirectURL = redirectURL +} + +// WithRegions adds the regions to the service broker auth registration params +func (o *ServiceBrokerAuthRegistrationParams) WithRegions(regions []string) *ServiceBrokerAuthRegistrationParams { + o.SetRegions(regions) + return o +} + +// SetRegions adds the regions to the service broker auth registration params +func (o *ServiceBrokerAuthRegistrationParams) SetRegions(regions []string) { + o.Regions = regions +} + +// WithTenantID adds the tenantID to the service broker auth registration params +func (o *ServiceBrokerAuthRegistrationParams) WithTenantID(tenantID string) *ServiceBrokerAuthRegistrationParams { + o.SetTenantID(tenantID) + return o +} + +// SetTenantID adds the tenantId to the service broker auth registration params +func (o *ServiceBrokerAuthRegistrationParams) SetTenantID(tenantID string) { + o.TenantID = tenantID +} + +// WriteToRequest writes these params to a swagger request +func (o *ServiceBrokerAuthRegistrationParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // query param entitlement_id + qrEntitlementID := o.EntitlementID + qEntitlementID := qrEntitlementID + if qEntitlementID != "" { + if err := r.SetQueryParam("entitlement_id", qEntitlementID); err != nil { + return err + } + } + + // query param icn + qrIcn := o.Icn + qIcn := qrIcn + if qIcn != "" { + if err := r.SetQueryParam("icn", qIcn); err != nil { + return err + } + } + + // query param plan + qrPlan := o.Plan + qPlan := qrPlan + if qPlan != "" { + if err := r.SetQueryParam("plan", qPlan); err != nil { + return err + } + } + + if o.RedirectURL != nil { + + // query param redirect_url + var qrRedirectURL string + if o.RedirectURL != nil { + qrRedirectURL = *o.RedirectURL + } + qRedirectURL := qrRedirectURL + if qRedirectURL != "" { + if err := r.SetQueryParam("redirect_url", qRedirectURL); err != nil { + return err + } + } + + } + + valuesRegions := o.Regions + + joinedRegions := swag.JoinByFormat(valuesRegions, "") + // query array param regions + if err := r.SetQueryParam("regions", joinedRegions...); err != nil { + return err + } + + // query param tenant_id + qrTenantID := o.TenantID + qTenantID := qrTenantID + if qTenantID != "" { + if err := r.SetQueryParam("tenant_id", qTenantID); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/authentication/service_broker_auth_registration_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/authentication/service_broker_auth_registration_responses.go new file mode 100644 index 00000000000..ab0ea712122 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/authentication/service_broker_auth_registration_responses.go @@ -0,0 +1,139 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package authentication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// ServiceBrokerAuthRegistrationReader is a Reader for the ServiceBrokerAuthRegistration structure. +type ServiceBrokerAuthRegistrationReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ServiceBrokerAuthRegistrationReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewServiceBrokerAuthRegistrationOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 401: + result := NewServiceBrokerAuthRegistrationUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewServiceBrokerAuthRegistrationInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewServiceBrokerAuthRegistrationOK creates a ServiceBrokerAuthRegistrationOK with default headers values +func NewServiceBrokerAuthRegistrationOK() *ServiceBrokerAuthRegistrationOK { + return &ServiceBrokerAuthRegistrationOK{} +} + +/*ServiceBrokerAuthRegistrationOK handles this case with default header values. + +OK +*/ +type ServiceBrokerAuthRegistrationOK struct { + Payload *models.AccessToken +} + +func (o *ServiceBrokerAuthRegistrationOK) Error() string { + return fmt.Sprintf("[GET /auth/v1/registration][%d] serviceBrokerAuthRegistrationOK %+v", 200, o.Payload) +} + +func (o *ServiceBrokerAuthRegistrationOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.AccessToken) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceBrokerAuthRegistrationUnauthorized creates a ServiceBrokerAuthRegistrationUnauthorized with default headers values +func NewServiceBrokerAuthRegistrationUnauthorized() *ServiceBrokerAuthRegistrationUnauthorized { + return &ServiceBrokerAuthRegistrationUnauthorized{} +} + +/*ServiceBrokerAuthRegistrationUnauthorized handles this case with default header values. + +Unauthorized +*/ +type ServiceBrokerAuthRegistrationUnauthorized struct { + Payload *models.Error +} + +func (o *ServiceBrokerAuthRegistrationUnauthorized) Error() string { + return fmt.Sprintf("[GET /auth/v1/registration][%d] serviceBrokerAuthRegistrationUnauthorized %+v", 401, o.Payload) +} + +func (o *ServiceBrokerAuthRegistrationUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceBrokerAuthRegistrationInternalServerError creates a ServiceBrokerAuthRegistrationInternalServerError with default headers values +func NewServiceBrokerAuthRegistrationInternalServerError() *ServiceBrokerAuthRegistrationInternalServerError { + return &ServiceBrokerAuthRegistrationInternalServerError{} +} + +/*ServiceBrokerAuthRegistrationInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type ServiceBrokerAuthRegistrationInternalServerError struct { + Payload *models.Error +} + +func (o *ServiceBrokerAuthRegistrationInternalServerError) Error() string { + return fmt.Sprintf("[GET /auth/v1/registration][%d] serviceBrokerAuthRegistrationInternalServerError %+v", 500, o.Payload) +} + +func (o *ServiceBrokerAuthRegistrationInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/authentication/service_broker_auth_token_post_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/authentication/service_broker_auth_token_post_parameters.go new file mode 100644 index 00000000000..00a99209bd5 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/authentication/service_broker_auth_token_post_parameters.go @@ -0,0 +1,139 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package authentication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// NewServiceBrokerAuthTokenPostParams creates a new ServiceBrokerAuthTokenPostParams object +// with the default values initialized. +func NewServiceBrokerAuthTokenPostParams() *ServiceBrokerAuthTokenPostParams { + var () + return &ServiceBrokerAuthTokenPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewServiceBrokerAuthTokenPostParamsWithTimeout creates a new ServiceBrokerAuthTokenPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewServiceBrokerAuthTokenPostParamsWithTimeout(timeout time.Duration) *ServiceBrokerAuthTokenPostParams { + var () + return &ServiceBrokerAuthTokenPostParams{ + + timeout: timeout, + } +} + +// NewServiceBrokerAuthTokenPostParamsWithContext creates a new ServiceBrokerAuthTokenPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewServiceBrokerAuthTokenPostParamsWithContext(ctx context.Context) *ServiceBrokerAuthTokenPostParams { + var () + return &ServiceBrokerAuthTokenPostParams{ + + Context: ctx, + } +} + +// NewServiceBrokerAuthTokenPostParamsWithHTTPClient creates a new ServiceBrokerAuthTokenPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewServiceBrokerAuthTokenPostParamsWithHTTPClient(client *http.Client) *ServiceBrokerAuthTokenPostParams { + var () + return &ServiceBrokerAuthTokenPostParams{ + HTTPClient: client, + } +} + +/*ServiceBrokerAuthTokenPostParams contains all the parameters to send to the API endpoint +for the service broker auth token post operation typically these are written to a http.Request +*/ +type ServiceBrokerAuthTokenPostParams struct { + + /*Body + Parameters for requesting a new Token from a Refresh Token + + */ + Body *models.TokenRequest + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the service broker auth token post params +func (o *ServiceBrokerAuthTokenPostParams) WithTimeout(timeout time.Duration) *ServiceBrokerAuthTokenPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the service broker auth token post params +func (o *ServiceBrokerAuthTokenPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the service broker auth token post params +func (o *ServiceBrokerAuthTokenPostParams) WithContext(ctx context.Context) *ServiceBrokerAuthTokenPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the service broker auth token post params +func (o *ServiceBrokerAuthTokenPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the service broker auth token post params +func (o *ServiceBrokerAuthTokenPostParams) WithHTTPClient(client *http.Client) *ServiceBrokerAuthTokenPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the service broker auth token post params +func (o *ServiceBrokerAuthTokenPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the service broker auth token post params +func (o *ServiceBrokerAuthTokenPostParams) WithBody(body *models.TokenRequest) *ServiceBrokerAuthTokenPostParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the service broker auth token post params +func (o *ServiceBrokerAuthTokenPostParams) SetBody(body *models.TokenRequest) { + o.Body = body +} + +// WriteToRequest writes these params to a swagger request +func (o *ServiceBrokerAuthTokenPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/authentication/service_broker_auth_token_post_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/authentication/service_broker_auth_token_post_responses.go new file mode 100644 index 00000000000..e73831d7028 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/authentication/service_broker_auth_token_post_responses.go @@ -0,0 +1,211 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package authentication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// ServiceBrokerAuthTokenPostReader is a Reader for the ServiceBrokerAuthTokenPost structure. +type ServiceBrokerAuthTokenPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ServiceBrokerAuthTokenPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewServiceBrokerAuthTokenPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewServiceBrokerAuthTokenPostBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 403: + result := NewServiceBrokerAuthTokenPostForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 429: + result := NewServiceBrokerAuthTokenPostTooManyRequests() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewServiceBrokerAuthTokenPostInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewServiceBrokerAuthTokenPostOK creates a ServiceBrokerAuthTokenPostOK with default headers values +func NewServiceBrokerAuthTokenPostOK() *ServiceBrokerAuthTokenPostOK { + return &ServiceBrokerAuthTokenPostOK{} +} + +/*ServiceBrokerAuthTokenPostOK handles this case with default header values. + +OK +*/ +type ServiceBrokerAuthTokenPostOK struct { + Payload *models.Token +} + +func (o *ServiceBrokerAuthTokenPostOK) Error() string { + return fmt.Sprintf("[POST /auth/v1/token][%d] serviceBrokerAuthTokenPostOK %+v", 200, o.Payload) +} + +func (o *ServiceBrokerAuthTokenPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Token) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceBrokerAuthTokenPostBadRequest creates a ServiceBrokerAuthTokenPostBadRequest with default headers values +func NewServiceBrokerAuthTokenPostBadRequest() *ServiceBrokerAuthTokenPostBadRequest { + return &ServiceBrokerAuthTokenPostBadRequest{} +} + +/*ServiceBrokerAuthTokenPostBadRequest handles this case with default header values. + +Authorization pending +*/ +type ServiceBrokerAuthTokenPostBadRequest struct { + Payload *models.Error +} + +func (o *ServiceBrokerAuthTokenPostBadRequest) Error() string { + return fmt.Sprintf("[POST /auth/v1/token][%d] serviceBrokerAuthTokenPostBadRequest %+v", 400, o.Payload) +} + +func (o *ServiceBrokerAuthTokenPostBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceBrokerAuthTokenPostForbidden creates a ServiceBrokerAuthTokenPostForbidden with default headers values +func NewServiceBrokerAuthTokenPostForbidden() *ServiceBrokerAuthTokenPostForbidden { + return &ServiceBrokerAuthTokenPostForbidden{} +} + +/*ServiceBrokerAuthTokenPostForbidden handles this case with default header values. + +User refused grant +*/ +type ServiceBrokerAuthTokenPostForbidden struct { + Payload *models.Error +} + +func (o *ServiceBrokerAuthTokenPostForbidden) Error() string { + return fmt.Sprintf("[POST /auth/v1/token][%d] serviceBrokerAuthTokenPostForbidden %+v", 403, o.Payload) +} + +func (o *ServiceBrokerAuthTokenPostForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceBrokerAuthTokenPostTooManyRequests creates a ServiceBrokerAuthTokenPostTooManyRequests with default headers values +func NewServiceBrokerAuthTokenPostTooManyRequests() *ServiceBrokerAuthTokenPostTooManyRequests { + return &ServiceBrokerAuthTokenPostTooManyRequests{} +} + +/*ServiceBrokerAuthTokenPostTooManyRequests handles this case with default header values. + +Polling too frequently +*/ +type ServiceBrokerAuthTokenPostTooManyRequests struct { + Payload *models.Error +} + +func (o *ServiceBrokerAuthTokenPostTooManyRequests) Error() string { + return fmt.Sprintf("[POST /auth/v1/token][%d] serviceBrokerAuthTokenPostTooManyRequests %+v", 429, o.Payload) +} + +func (o *ServiceBrokerAuthTokenPostTooManyRequests) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceBrokerAuthTokenPostInternalServerError creates a ServiceBrokerAuthTokenPostInternalServerError with default headers values +func NewServiceBrokerAuthTokenPostInternalServerError() *ServiceBrokerAuthTokenPostInternalServerError { + return &ServiceBrokerAuthTokenPostInternalServerError{} +} + +/*ServiceBrokerAuthTokenPostInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type ServiceBrokerAuthTokenPostInternalServerError struct { + Payload *models.Error +} + +func (o *ServiceBrokerAuthTokenPostInternalServerError) Error() string { + return fmt.Sprintf("[POST /auth/v1/token][%d] serviceBrokerAuthTokenPostInternalServerError %+v", 500, o.Payload) +} + +func (o *ServiceBrokerAuthTokenPostInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/bluemix_service_instances/bluemix_service_instance_get_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/bluemix_service_instances/bluemix_service_instance_get_parameters.go new file mode 100644 index 00000000000..fc39f69c1db --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/bluemix_service_instances/bluemix_service_instance_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package bluemix_service_instances + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewBluemixServiceInstanceGetParams creates a new BluemixServiceInstanceGetParams object +// with the default values initialized. +func NewBluemixServiceInstanceGetParams() *BluemixServiceInstanceGetParams { + var () + return &BluemixServiceInstanceGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewBluemixServiceInstanceGetParamsWithTimeout creates a new BluemixServiceInstanceGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewBluemixServiceInstanceGetParamsWithTimeout(timeout time.Duration) *BluemixServiceInstanceGetParams { + var () + return &BluemixServiceInstanceGetParams{ + + timeout: timeout, + } +} + +// NewBluemixServiceInstanceGetParamsWithContext creates a new BluemixServiceInstanceGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewBluemixServiceInstanceGetParamsWithContext(ctx context.Context) *BluemixServiceInstanceGetParams { + var () + return &BluemixServiceInstanceGetParams{ + + Context: ctx, + } +} + +// NewBluemixServiceInstanceGetParamsWithHTTPClient creates a new BluemixServiceInstanceGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewBluemixServiceInstanceGetParamsWithHTTPClient(client *http.Client) *BluemixServiceInstanceGetParams { + var () + return &BluemixServiceInstanceGetParams{ + HTTPClient: client, + } +} + +/*BluemixServiceInstanceGetParams contains all the parameters to send to the API endpoint +for the bluemix service instance get operation typically these are written to a http.Request +*/ +type BluemixServiceInstanceGetParams struct { + + /*InstanceID + instance id of instance to provision + + */ + InstanceID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the bluemix service instance get params +func (o *BluemixServiceInstanceGetParams) WithTimeout(timeout time.Duration) *BluemixServiceInstanceGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the bluemix service instance get params +func (o *BluemixServiceInstanceGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the bluemix service instance get params +func (o *BluemixServiceInstanceGetParams) WithContext(ctx context.Context) *BluemixServiceInstanceGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the bluemix service instance get params +func (o *BluemixServiceInstanceGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the bluemix service instance get params +func (o *BluemixServiceInstanceGetParams) WithHTTPClient(client *http.Client) *BluemixServiceInstanceGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the bluemix service instance get params +func (o *BluemixServiceInstanceGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithInstanceID adds the instanceID to the bluemix service instance get params +func (o *BluemixServiceInstanceGetParams) WithInstanceID(instanceID string) *BluemixServiceInstanceGetParams { + o.SetInstanceID(instanceID) + return o +} + +// SetInstanceID adds the instanceId to the bluemix service instance get params +func (o *BluemixServiceInstanceGetParams) SetInstanceID(instanceID string) { + o.InstanceID = instanceID +} + +// WriteToRequest writes these params to a swagger request +func (o *BluemixServiceInstanceGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param instance_id + if err := r.SetPathParam("instance_id", o.InstanceID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/bluemix_service_instances/bluemix_service_instance_get_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/bluemix_service_instances/bluemix_service_instance_get_responses.go new file mode 100644 index 00000000000..928de7fe2d0 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/bluemix_service_instances/bluemix_service_instance_get_responses.go @@ -0,0 +1,103 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package bluemix_service_instances + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// BluemixServiceInstanceGetReader is a Reader for the BluemixServiceInstanceGet structure. +type BluemixServiceInstanceGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *BluemixServiceInstanceGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewBluemixServiceInstanceGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewBluemixServiceInstanceGetBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewBluemixServiceInstanceGetOK creates a BluemixServiceInstanceGetOK with default headers values +func NewBluemixServiceInstanceGetOK() *BluemixServiceInstanceGetOK { + return &BluemixServiceInstanceGetOK{} +} + +/*BluemixServiceInstanceGetOK handles this case with default header values. + +OK +*/ +type BluemixServiceInstanceGetOK struct { + Payload *models.ServiceInstance +} + +func (o *BluemixServiceInstanceGetOK) Error() string { + return fmt.Sprintf("[GET /bluemix_v1/service_instances/{instance_id}][%d] bluemixServiceInstanceGetOK %+v", 200, o.Payload) +} + +func (o *BluemixServiceInstanceGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ServiceInstance) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewBluemixServiceInstanceGetBadRequest creates a BluemixServiceInstanceGetBadRequest with default headers values +func NewBluemixServiceInstanceGetBadRequest() *BluemixServiceInstanceGetBadRequest { + return &BluemixServiceInstanceGetBadRequest{} +} + +/*BluemixServiceInstanceGetBadRequest handles this case with default header values. + +Bad Request +*/ +type BluemixServiceInstanceGetBadRequest struct { + Payload *models.Error +} + +func (o *BluemixServiceInstanceGetBadRequest) Error() string { + return fmt.Sprintf("[GET /bluemix_v1/service_instances/{instance_id}][%d] bluemixServiceInstanceGetBadRequest %+v", 400, o.Payload) +} + +func (o *BluemixServiceInstanceGetBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/bluemix_service_instances/bluemix_service_instance_put_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/bluemix_service_instances/bluemix_service_instance_put_parameters.go new file mode 100644 index 00000000000..0b0b7b0987e --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/bluemix_service_instances/bluemix_service_instance_put_parameters.go @@ -0,0 +1,160 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package bluemix_service_instances + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// NewBluemixServiceInstancePutParams creates a new BluemixServiceInstancePutParams object +// with the default values initialized. +func NewBluemixServiceInstancePutParams() *BluemixServiceInstancePutParams { + var () + return &BluemixServiceInstancePutParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewBluemixServiceInstancePutParamsWithTimeout creates a new BluemixServiceInstancePutParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewBluemixServiceInstancePutParamsWithTimeout(timeout time.Duration) *BluemixServiceInstancePutParams { + var () + return &BluemixServiceInstancePutParams{ + + timeout: timeout, + } +} + +// NewBluemixServiceInstancePutParamsWithContext creates a new BluemixServiceInstancePutParams object +// with the default values initialized, and the ability to set a context for a request +func NewBluemixServiceInstancePutParamsWithContext(ctx context.Context) *BluemixServiceInstancePutParams { + var () + return &BluemixServiceInstancePutParams{ + + Context: ctx, + } +} + +// NewBluemixServiceInstancePutParamsWithHTTPClient creates a new BluemixServiceInstancePutParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewBluemixServiceInstancePutParamsWithHTTPClient(client *http.Client) *BluemixServiceInstancePutParams { + var () + return &BluemixServiceInstancePutParams{ + HTTPClient: client, + } +} + +/*BluemixServiceInstancePutParams contains all the parameters to send to the API endpoint +for the bluemix service instance put operation typically these are written to a http.Request +*/ +type BluemixServiceInstancePutParams struct { + + /*Body + parameters for the requested state of a provisioned service + + */ + Body *models.ServiceInstanceRequest + /*InstanceID + instance id of instance to provision + + */ + InstanceID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the bluemix service instance put params +func (o *BluemixServiceInstancePutParams) WithTimeout(timeout time.Duration) *BluemixServiceInstancePutParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the bluemix service instance put params +func (o *BluemixServiceInstancePutParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the bluemix service instance put params +func (o *BluemixServiceInstancePutParams) WithContext(ctx context.Context) *BluemixServiceInstancePutParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the bluemix service instance put params +func (o *BluemixServiceInstancePutParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the bluemix service instance put params +func (o *BluemixServiceInstancePutParams) WithHTTPClient(client *http.Client) *BluemixServiceInstancePutParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the bluemix service instance put params +func (o *BluemixServiceInstancePutParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the bluemix service instance put params +func (o *BluemixServiceInstancePutParams) WithBody(body *models.ServiceInstanceRequest) *BluemixServiceInstancePutParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the bluemix service instance put params +func (o *BluemixServiceInstancePutParams) SetBody(body *models.ServiceInstanceRequest) { + o.Body = body +} + +// WithInstanceID adds the instanceID to the bluemix service instance put params +func (o *BluemixServiceInstancePutParams) WithInstanceID(instanceID string) *BluemixServiceInstancePutParams { + o.SetInstanceID(instanceID) + return o +} + +// SetInstanceID adds the instanceId to the bluemix service instance put params +func (o *BluemixServiceInstancePutParams) SetInstanceID(instanceID string) { + o.InstanceID = instanceID +} + +// WriteToRequest writes these params to a swagger request +func (o *BluemixServiceInstancePutParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + // path param instance_id + if err := r.SetPathParam("instance_id", o.InstanceID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/bluemix_service_instances/bluemix_service_instance_put_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/bluemix_service_instances/bluemix_service_instance_put_responses.go new file mode 100644 index 00000000000..63f74946062 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/bluemix_service_instances/bluemix_service_instance_put_responses.go @@ -0,0 +1,103 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package bluemix_service_instances + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// BluemixServiceInstancePutReader is a Reader for the BluemixServiceInstancePut structure. +type BluemixServiceInstancePutReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *BluemixServiceInstancePutReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewBluemixServiceInstancePutOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewBluemixServiceInstancePutBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewBluemixServiceInstancePutOK creates a BluemixServiceInstancePutOK with default headers values +func NewBluemixServiceInstancePutOK() *BluemixServiceInstancePutOK { + return &BluemixServiceInstancePutOK{} +} + +/*BluemixServiceInstancePutOK handles this case with default header values. + +OK +*/ +type BluemixServiceInstancePutOK struct { + Payload *models.ServiceInstance +} + +func (o *BluemixServiceInstancePutOK) Error() string { + return fmt.Sprintf("[PUT /bluemix_v1/service_instances/{instance_id}][%d] bluemixServiceInstancePutOK %+v", 200, o.Payload) +} + +func (o *BluemixServiceInstancePutOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ServiceInstance) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewBluemixServiceInstancePutBadRequest creates a BluemixServiceInstancePutBadRequest with default headers values +func NewBluemixServiceInstancePutBadRequest() *BluemixServiceInstancePutBadRequest { + return &BluemixServiceInstancePutBadRequest{} +} + +/*BluemixServiceInstancePutBadRequest handles this case with default header values. + +Bad Request +*/ +type BluemixServiceInstancePutBadRequest struct { + Payload *models.Error +} + +func (o *BluemixServiceInstancePutBadRequest) Error() string { + return fmt.Sprintf("[PUT /bluemix_v1/service_instances/{instance_id}][%d] bluemixServiceInstancePutBadRequest %+v", 400, o.Payload) +} + +func (o *BluemixServiceInstancePutBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/bluemix_service_instances/bluemix_service_instances_client.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/bluemix_service_instances/bluemix_service_instances_client.go new file mode 100644 index 00000000000..6e0787f7353 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/bluemix_service_instances/bluemix_service_instances_client.go @@ -0,0 +1,88 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package bluemix_service_instances + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" +) + +// New creates a new bluemix service instances API client. +func New(transport runtime.ClientTransport, formats strfmt.Registry) *Client { + return &Client{transport: transport, formats: formats} +} + +/* +Client for bluemix service instances API +*/ +type Client struct { + transport runtime.ClientTransport + formats strfmt.Registry +} + +/* +BluemixServiceInstanceGet gets the current state information associated with the service instance +*/ +func (a *Client) BluemixServiceInstanceGet(params *BluemixServiceInstanceGetParams, authInfo runtime.ClientAuthInfoWriter) (*BluemixServiceInstanceGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewBluemixServiceInstanceGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "bluemix.serviceInstance.get", + Method: "GET", + PathPattern: "/bluemix_v1/service_instances/{instance_id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &BluemixServiceInstanceGetReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*BluemixServiceInstanceGetOK), nil + +} + +/* +BluemixServiceInstancePut updates disable or enable the state of a provisioned service instance +*/ +func (a *Client) BluemixServiceInstancePut(params *BluemixServiceInstancePutParams, authInfo runtime.ClientAuthInfoWriter) (*BluemixServiceInstancePutOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewBluemixServiceInstancePutParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "bluemix.serviceInstance.put", + Method: "PUT", + PathPattern: "/bluemix_v1/service_instances/{instance_id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &BluemixServiceInstancePutReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*BluemixServiceInstancePutOK), nil + +} + +// SetTransport changes the transport on the client +func (a *Client) SetTransport(transport runtime.ClientTransport) { + a.transport = transport +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/catalog/catalog_client.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/catalog/catalog_client.go new file mode 100644 index 00000000000..efc5a54d44c --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/catalog/catalog_client.go @@ -0,0 +1,59 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package catalog + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" +) + +// New creates a new catalog API client. +func New(transport runtime.ClientTransport, formats strfmt.Registry) *Client { + return &Client{transport: transport, formats: formats} +} + +/* +Client for catalog API +*/ +type Client struct { + transport runtime.ClientTransport + formats strfmt.Registry +} + +/* +CatalogGet gets the catalog of services that the service broker offers +*/ +func (a *Client) CatalogGet(params *CatalogGetParams, authInfo runtime.ClientAuthInfoWriter) (*CatalogGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCatalogGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "catalog.get", + Method: "GET", + PathPattern: "/v2/catalog", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CatalogGetReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*CatalogGetOK), nil + +} + +// SetTransport changes the transport on the client +func (a *Client) SetTransport(transport runtime.ClientTransport) { + a.transport = transport +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/catalog/catalog_get_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/catalog/catalog_get_parameters.go new file mode 100644 index 00000000000..3a9f01f5a5c --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/catalog/catalog_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package catalog + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewCatalogGetParams creates a new CatalogGetParams object +// with the default values initialized. +func NewCatalogGetParams() *CatalogGetParams { + var () + return &CatalogGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCatalogGetParamsWithTimeout creates a new CatalogGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCatalogGetParamsWithTimeout(timeout time.Duration) *CatalogGetParams { + var () + return &CatalogGetParams{ + + timeout: timeout, + } +} + +// NewCatalogGetParamsWithContext creates a new CatalogGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewCatalogGetParamsWithContext(ctx context.Context) *CatalogGetParams { + var () + return &CatalogGetParams{ + + Context: ctx, + } +} + +// NewCatalogGetParamsWithHTTPClient creates a new CatalogGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCatalogGetParamsWithHTTPClient(client *http.Client) *CatalogGetParams { + var () + return &CatalogGetParams{ + HTTPClient: client, + } +} + +/*CatalogGetParams contains all the parameters to send to the API endpoint +for the catalog get operation typically these are written to a http.Request +*/ +type CatalogGetParams struct { + + /*XBrokerAPIVersion + version number of the Service Broker API that the Platform will use + + */ + XBrokerAPIVersion string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the catalog get params +func (o *CatalogGetParams) WithTimeout(timeout time.Duration) *CatalogGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the catalog get params +func (o *CatalogGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the catalog get params +func (o *CatalogGetParams) WithContext(ctx context.Context) *CatalogGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the catalog get params +func (o *CatalogGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the catalog get params +func (o *CatalogGetParams) WithHTTPClient(client *http.Client) *CatalogGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the catalog get params +func (o *CatalogGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithXBrokerAPIVersion adds the xBrokerAPIVersion to the catalog get params +func (o *CatalogGetParams) WithXBrokerAPIVersion(xBrokerAPIVersion string) *CatalogGetParams { + o.SetXBrokerAPIVersion(xBrokerAPIVersion) + return o +} + +// SetXBrokerAPIVersion adds the xBrokerApiVersion to the catalog get params +func (o *CatalogGetParams) SetXBrokerAPIVersion(xBrokerAPIVersion string) { + o.XBrokerAPIVersion = xBrokerAPIVersion +} + +// WriteToRequest writes these params to a swagger request +func (o *CatalogGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // header param X-Broker-API-Version + if err := r.SetHeaderParam("X-Broker-API-Version", o.XBrokerAPIVersion); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/catalog/catalog_get_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/catalog/catalog_get_responses.go new file mode 100644 index 00000000000..5a03deb63d6 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/catalog/catalog_get_responses.go @@ -0,0 +1,67 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package catalog + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// CatalogGetReader is a Reader for the CatalogGet structure. +type CatalogGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CatalogGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewCatalogGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewCatalogGetOK creates a CatalogGetOK with default headers values +func NewCatalogGetOK() *CatalogGetOK { + return &CatalogGetOK{} +} + +/*CatalogGetOK handles this case with default header values. + +catalog response +*/ +type CatalogGetOK struct { + Payload *models.Catalog +} + +func (o *CatalogGetOK) Error() string { + return fmt.Sprintf("[GET /v2/catalog][%d] catalogGetOK %+v", 200, o.Payload) +} + +func (o *CatalogGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Catalog) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/hardware_platforms/hardware_platforms_client.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/hardware_platforms/hardware_platforms_client.go new file mode 100644 index 00000000000..d58ff86ae61 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/hardware_platforms/hardware_platforms_client.go @@ -0,0 +1,58 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package hardware_platforms + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" +) + +// New creates a new hardware platforms API client. +func New(transport runtime.ClientTransport, formats strfmt.Registry) *Client { + return &Client{transport: transport, formats: formats} +} + +/* +Client for hardware platforms API +*/ +type Client struct { + transport runtime.ClientTransport + formats strfmt.Registry +} + +/* +ServiceBrokerHardwareplatformsGet availables hardware statistics and limits +*/ +func (a *Client) ServiceBrokerHardwareplatformsGet(params *ServiceBrokerHardwareplatformsGetParams) (*ServiceBrokerHardwareplatformsGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewServiceBrokerHardwareplatformsGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "serviceBroker.hardwareplatforms.get", + Method: "GET", + PathPattern: "/broker/v1/hardware-platforms", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ServiceBrokerHardwareplatformsGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*ServiceBrokerHardwareplatformsGetOK), nil + +} + +// SetTransport changes the transport on the client +func (a *Client) SetTransport(transport runtime.ClientTransport) { + a.transport = transport +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/hardware_platforms/service_broker_hardwareplatforms_get_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/hardware_platforms/service_broker_hardwareplatforms_get_parameters.go new file mode 100644 index 00000000000..37a62680d30 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/hardware_platforms/service_broker_hardwareplatforms_get_parameters.go @@ -0,0 +1,147 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package hardware_platforms + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewServiceBrokerHardwareplatformsGetParams creates a new ServiceBrokerHardwareplatformsGetParams object +// with the default values initialized. +func NewServiceBrokerHardwareplatformsGetParams() *ServiceBrokerHardwareplatformsGetParams { + var () + return &ServiceBrokerHardwareplatformsGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewServiceBrokerHardwareplatformsGetParamsWithTimeout creates a new ServiceBrokerHardwareplatformsGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewServiceBrokerHardwareplatformsGetParamsWithTimeout(timeout time.Duration) *ServiceBrokerHardwareplatformsGetParams { + var () + return &ServiceBrokerHardwareplatformsGetParams{ + + timeout: timeout, + } +} + +// NewServiceBrokerHardwareplatformsGetParamsWithContext creates a new ServiceBrokerHardwareplatformsGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewServiceBrokerHardwareplatformsGetParamsWithContext(ctx context.Context) *ServiceBrokerHardwareplatformsGetParams { + var () + return &ServiceBrokerHardwareplatformsGetParams{ + + Context: ctx, + } +} + +// NewServiceBrokerHardwareplatformsGetParamsWithHTTPClient creates a new ServiceBrokerHardwareplatformsGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewServiceBrokerHardwareplatformsGetParamsWithHTTPClient(client *http.Client) *ServiceBrokerHardwareplatformsGetParams { + var () + return &ServiceBrokerHardwareplatformsGetParams{ + HTTPClient: client, + } +} + +/*ServiceBrokerHardwareplatformsGetParams contains all the parameters to send to the API endpoint +for the service broker hardwareplatforms get operation typically these are written to a http.Request +*/ +type ServiceBrokerHardwareplatformsGetParams struct { + + /*RegionZone + The region zone of the cloud instance + + */ + RegionZone *string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the service broker hardwareplatforms get params +func (o *ServiceBrokerHardwareplatformsGetParams) WithTimeout(timeout time.Duration) *ServiceBrokerHardwareplatformsGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the service broker hardwareplatforms get params +func (o *ServiceBrokerHardwareplatformsGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the service broker hardwareplatforms get params +func (o *ServiceBrokerHardwareplatformsGetParams) WithContext(ctx context.Context) *ServiceBrokerHardwareplatformsGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the service broker hardwareplatforms get params +func (o *ServiceBrokerHardwareplatformsGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the service broker hardwareplatforms get params +func (o *ServiceBrokerHardwareplatformsGetParams) WithHTTPClient(client *http.Client) *ServiceBrokerHardwareplatformsGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the service broker hardwareplatforms get params +func (o *ServiceBrokerHardwareplatformsGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithRegionZone adds the regionZone to the service broker hardwareplatforms get params +func (o *ServiceBrokerHardwareplatformsGetParams) WithRegionZone(regionZone *string) *ServiceBrokerHardwareplatformsGetParams { + o.SetRegionZone(regionZone) + return o +} + +// SetRegionZone adds the regionZone to the service broker hardwareplatforms get params +func (o *ServiceBrokerHardwareplatformsGetParams) SetRegionZone(regionZone *string) { + o.RegionZone = regionZone +} + +// WriteToRequest writes these params to a swagger request +func (o *ServiceBrokerHardwareplatformsGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.RegionZone != nil { + + // query param regionZone + var qrRegionZone string + if o.RegionZone != nil { + qrRegionZone = *o.RegionZone + } + qRegionZone := qrRegionZone + if qRegionZone != "" { + if err := r.SetQueryParam("regionZone", qRegionZone); err != nil { + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/hardware_platforms/service_broker_hardwareplatforms_get_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/hardware_platforms/service_broker_hardwareplatforms_get_responses.go new file mode 100644 index 00000000000..f981e3ee879 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/hardware_platforms/service_broker_hardwareplatforms_get_responses.go @@ -0,0 +1,101 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package hardware_platforms + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// ServiceBrokerHardwareplatformsGetReader is a Reader for the ServiceBrokerHardwareplatformsGet structure. +type ServiceBrokerHardwareplatformsGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ServiceBrokerHardwareplatformsGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewServiceBrokerHardwareplatformsGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 500: + result := NewServiceBrokerHardwareplatformsGetInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewServiceBrokerHardwareplatformsGetOK creates a ServiceBrokerHardwareplatformsGetOK with default headers values +func NewServiceBrokerHardwareplatformsGetOK() *ServiceBrokerHardwareplatformsGetOK { + return &ServiceBrokerHardwareplatformsGetOK{} +} + +/*ServiceBrokerHardwareplatformsGetOK handles this case with default header values. + +OK +*/ +type ServiceBrokerHardwareplatformsGetOK struct { + Payload models.HardwarePlatforms +} + +func (o *ServiceBrokerHardwareplatformsGetOK) Error() string { + return fmt.Sprintf("[GET /broker/v1/hardware-platforms][%d] serviceBrokerHardwareplatformsGetOK %+v", 200, o.Payload) +} + +func (o *ServiceBrokerHardwareplatformsGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceBrokerHardwareplatformsGetInternalServerError creates a ServiceBrokerHardwareplatformsGetInternalServerError with default headers values +func NewServiceBrokerHardwareplatformsGetInternalServerError() *ServiceBrokerHardwareplatformsGetInternalServerError { + return &ServiceBrokerHardwareplatformsGetInternalServerError{} +} + +/*ServiceBrokerHardwareplatformsGetInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type ServiceBrokerHardwareplatformsGetInternalServerError struct { + Payload *models.Error +} + +func (o *ServiceBrokerHardwareplatformsGetInternalServerError) Error() string { + return fmt.Sprintf("[GET /broker/v1/hardware-platforms][%d] serviceBrokerHardwareplatformsGetInternalServerError %+v", 500, o.Payload) +} + +func (o *ServiceBrokerHardwareplatformsGetInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/iaas_service_broker/iaas_service_broker_client.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/iaas_service_broker/iaas_service_broker_client.go new file mode 100644 index 00000000000..9fe0ae3a0ae --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/iaas_service_broker/iaas_service_broker_client.go @@ -0,0 +1,143 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package iaas_service_broker + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" +) + +// New creates a new iaas service broker API client. +func New(transport runtime.ClientTransport, formats strfmt.Registry) *Client { + return &Client{transport: transport, formats: formats} +} + +/* +Client for iaas service broker API +*/ +type Client struct { + transport runtime.ClientTransport + formats strfmt.Registry +} + +/* +ServiceBrokerHealth gets current server health +*/ +func (a *Client) ServiceBrokerHealth(params *ServiceBrokerHealthParams) (*ServiceBrokerHealthOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewServiceBrokerHealthParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "serviceBroker.health", + Method: "GET", + PathPattern: "/broker/v1/health", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ServiceBrokerHealthReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*ServiceBrokerHealthOK), nil + +} + +/* +ServiceBrokerHealthHead gets current server health +*/ +func (a *Client) ServiceBrokerHealthHead(params *ServiceBrokerHealthHeadParams) (*ServiceBrokerHealthHeadOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewServiceBrokerHealthHeadParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "serviceBroker.health.head", + Method: "HEAD", + PathPattern: "/broker/v1/health", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ServiceBrokerHealthHeadReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*ServiceBrokerHealthHeadOK), nil + +} + +/* +ServiceBrokerTestTimeout gets current server version +*/ +func (a *Client) ServiceBrokerTestTimeout(params *ServiceBrokerTestTimeoutParams) (*ServiceBrokerTestTimeoutOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewServiceBrokerTestTimeoutParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "serviceBroker.test.timeout", + Method: "GET", + PathPattern: "/broker/v1/test/timeout", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ServiceBrokerTestTimeoutReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*ServiceBrokerTestTimeoutOK), nil + +} + +/* +ServiceBrokerVersion gets current server version +*/ +func (a *Client) ServiceBrokerVersion(params *ServiceBrokerVersionParams, authInfo runtime.ClientAuthInfoWriter) (*ServiceBrokerVersionOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewServiceBrokerVersionParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "serviceBroker.version", + Method: "GET", + PathPattern: "/broker/v1/version", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ServiceBrokerVersionReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*ServiceBrokerVersionOK), nil + +} + +// SetTransport changes the transport on the client +func (a *Client) SetTransport(transport runtime.ClientTransport) { + a.transport = transport +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/iaas_service_broker/service_broker_health_head_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/iaas_service_broker/service_broker_health_head_parameters.go new file mode 100644 index 00000000000..211325ce0b8 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/iaas_service_broker/service_broker_health_head_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package iaas_service_broker + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewServiceBrokerHealthHeadParams creates a new ServiceBrokerHealthHeadParams object +// with the default values initialized. +func NewServiceBrokerHealthHeadParams() *ServiceBrokerHealthHeadParams { + + return &ServiceBrokerHealthHeadParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewServiceBrokerHealthHeadParamsWithTimeout creates a new ServiceBrokerHealthHeadParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewServiceBrokerHealthHeadParamsWithTimeout(timeout time.Duration) *ServiceBrokerHealthHeadParams { + + return &ServiceBrokerHealthHeadParams{ + + timeout: timeout, + } +} + +// NewServiceBrokerHealthHeadParamsWithContext creates a new ServiceBrokerHealthHeadParams object +// with the default values initialized, and the ability to set a context for a request +func NewServiceBrokerHealthHeadParamsWithContext(ctx context.Context) *ServiceBrokerHealthHeadParams { + + return &ServiceBrokerHealthHeadParams{ + + Context: ctx, + } +} + +// NewServiceBrokerHealthHeadParamsWithHTTPClient creates a new ServiceBrokerHealthHeadParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewServiceBrokerHealthHeadParamsWithHTTPClient(client *http.Client) *ServiceBrokerHealthHeadParams { + + return &ServiceBrokerHealthHeadParams{ + HTTPClient: client, + } +} + +/*ServiceBrokerHealthHeadParams contains all the parameters to send to the API endpoint +for the service broker health head operation typically these are written to a http.Request +*/ +type ServiceBrokerHealthHeadParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the service broker health head params +func (o *ServiceBrokerHealthHeadParams) WithTimeout(timeout time.Duration) *ServiceBrokerHealthHeadParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the service broker health head params +func (o *ServiceBrokerHealthHeadParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the service broker health head params +func (o *ServiceBrokerHealthHeadParams) WithContext(ctx context.Context) *ServiceBrokerHealthHeadParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the service broker health head params +func (o *ServiceBrokerHealthHeadParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the service broker health head params +func (o *ServiceBrokerHealthHeadParams) WithHTTPClient(client *http.Client) *ServiceBrokerHealthHeadParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the service broker health head params +func (o *ServiceBrokerHealthHeadParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *ServiceBrokerHealthHeadParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/iaas_service_broker/service_broker_health_head_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/iaas_service_broker/service_broker_health_head_responses.go new file mode 100644 index 00000000000..880cac0b587 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/iaas_service_broker/service_broker_health_head_responses.go @@ -0,0 +1,103 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package iaas_service_broker + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// ServiceBrokerHealthHeadReader is a Reader for the ServiceBrokerHealthHead structure. +type ServiceBrokerHealthHeadReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ServiceBrokerHealthHeadReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewServiceBrokerHealthHeadOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewServiceBrokerHealthHeadBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewServiceBrokerHealthHeadOK creates a ServiceBrokerHealthHeadOK with default headers values +func NewServiceBrokerHealthHeadOK() *ServiceBrokerHealthHeadOK { + return &ServiceBrokerHealthHeadOK{} +} + +/*ServiceBrokerHealthHeadOK handles this case with default header values. + +OK +*/ +type ServiceBrokerHealthHeadOK struct { + Payload *models.Health +} + +func (o *ServiceBrokerHealthHeadOK) Error() string { + return fmt.Sprintf("[HEAD /broker/v1/health][%d] serviceBrokerHealthHeadOK %+v", 200, o.Payload) +} + +func (o *ServiceBrokerHealthHeadOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Health) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceBrokerHealthHeadBadRequest creates a ServiceBrokerHealthHeadBadRequest with default headers values +func NewServiceBrokerHealthHeadBadRequest() *ServiceBrokerHealthHeadBadRequest { + return &ServiceBrokerHealthHeadBadRequest{} +} + +/*ServiceBrokerHealthHeadBadRequest handles this case with default header values. + +Bad Request +*/ +type ServiceBrokerHealthHeadBadRequest struct { + Payload *models.Error +} + +func (o *ServiceBrokerHealthHeadBadRequest) Error() string { + return fmt.Sprintf("[HEAD /broker/v1/health][%d] serviceBrokerHealthHeadBadRequest %+v", 400, o.Payload) +} + +func (o *ServiceBrokerHealthHeadBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/iaas_service_broker/service_broker_health_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/iaas_service_broker/service_broker_health_parameters.go new file mode 100644 index 00000000000..f35798159d6 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/iaas_service_broker/service_broker_health_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package iaas_service_broker + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewServiceBrokerHealthParams creates a new ServiceBrokerHealthParams object +// with the default values initialized. +func NewServiceBrokerHealthParams() *ServiceBrokerHealthParams { + + return &ServiceBrokerHealthParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewServiceBrokerHealthParamsWithTimeout creates a new ServiceBrokerHealthParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewServiceBrokerHealthParamsWithTimeout(timeout time.Duration) *ServiceBrokerHealthParams { + + return &ServiceBrokerHealthParams{ + + timeout: timeout, + } +} + +// NewServiceBrokerHealthParamsWithContext creates a new ServiceBrokerHealthParams object +// with the default values initialized, and the ability to set a context for a request +func NewServiceBrokerHealthParamsWithContext(ctx context.Context) *ServiceBrokerHealthParams { + + return &ServiceBrokerHealthParams{ + + Context: ctx, + } +} + +// NewServiceBrokerHealthParamsWithHTTPClient creates a new ServiceBrokerHealthParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewServiceBrokerHealthParamsWithHTTPClient(client *http.Client) *ServiceBrokerHealthParams { + + return &ServiceBrokerHealthParams{ + HTTPClient: client, + } +} + +/*ServiceBrokerHealthParams contains all the parameters to send to the API endpoint +for the service broker health operation typically these are written to a http.Request +*/ +type ServiceBrokerHealthParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the service broker health params +func (o *ServiceBrokerHealthParams) WithTimeout(timeout time.Duration) *ServiceBrokerHealthParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the service broker health params +func (o *ServiceBrokerHealthParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the service broker health params +func (o *ServiceBrokerHealthParams) WithContext(ctx context.Context) *ServiceBrokerHealthParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the service broker health params +func (o *ServiceBrokerHealthParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the service broker health params +func (o *ServiceBrokerHealthParams) WithHTTPClient(client *http.Client) *ServiceBrokerHealthParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the service broker health params +func (o *ServiceBrokerHealthParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *ServiceBrokerHealthParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/iaas_service_broker/service_broker_health_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/iaas_service_broker/service_broker_health_responses.go new file mode 100644 index 00000000000..9d9fd8cc382 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/iaas_service_broker/service_broker_health_responses.go @@ -0,0 +1,103 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package iaas_service_broker + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// ServiceBrokerHealthReader is a Reader for the ServiceBrokerHealth structure. +type ServiceBrokerHealthReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ServiceBrokerHealthReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewServiceBrokerHealthOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewServiceBrokerHealthBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewServiceBrokerHealthOK creates a ServiceBrokerHealthOK with default headers values +func NewServiceBrokerHealthOK() *ServiceBrokerHealthOK { + return &ServiceBrokerHealthOK{} +} + +/*ServiceBrokerHealthOK handles this case with default header values. + +OK +*/ +type ServiceBrokerHealthOK struct { + Payload *models.Health +} + +func (o *ServiceBrokerHealthOK) Error() string { + return fmt.Sprintf("[GET /broker/v1/health][%d] serviceBrokerHealthOK %+v", 200, o.Payload) +} + +func (o *ServiceBrokerHealthOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Health) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceBrokerHealthBadRequest creates a ServiceBrokerHealthBadRequest with default headers values +func NewServiceBrokerHealthBadRequest() *ServiceBrokerHealthBadRequest { + return &ServiceBrokerHealthBadRequest{} +} + +/*ServiceBrokerHealthBadRequest handles this case with default header values. + +Bad Request +*/ +type ServiceBrokerHealthBadRequest struct { + Payload *models.Error +} + +func (o *ServiceBrokerHealthBadRequest) Error() string { + return fmt.Sprintf("[GET /broker/v1/health][%d] serviceBrokerHealthBadRequest %+v", 400, o.Payload) +} + +func (o *ServiceBrokerHealthBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/iaas_service_broker/service_broker_test_timeout_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/iaas_service_broker/service_broker_test_timeout_parameters.go new file mode 100644 index 00000000000..e655336e7c3 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/iaas_service_broker/service_broker_test_timeout_parameters.go @@ -0,0 +1,141 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package iaas_service_broker + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/swag" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewServiceBrokerTestTimeoutParams creates a new ServiceBrokerTestTimeoutParams object +// with the default values initialized. +func NewServiceBrokerTestTimeoutParams() *ServiceBrokerTestTimeoutParams { + var () + return &ServiceBrokerTestTimeoutParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewServiceBrokerTestTimeoutParamsWithTimeout creates a new ServiceBrokerTestTimeoutParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewServiceBrokerTestTimeoutParamsWithTimeout(timeout time.Duration) *ServiceBrokerTestTimeoutParams { + var () + return &ServiceBrokerTestTimeoutParams{ + + timeout: timeout, + } +} + +// NewServiceBrokerTestTimeoutParamsWithContext creates a new ServiceBrokerTestTimeoutParams object +// with the default values initialized, and the ability to set a context for a request +func NewServiceBrokerTestTimeoutParamsWithContext(ctx context.Context) *ServiceBrokerTestTimeoutParams { + var () + return &ServiceBrokerTestTimeoutParams{ + + Context: ctx, + } +} + +// NewServiceBrokerTestTimeoutParamsWithHTTPClient creates a new ServiceBrokerTestTimeoutParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewServiceBrokerTestTimeoutParamsWithHTTPClient(client *http.Client) *ServiceBrokerTestTimeoutParams { + var () + return &ServiceBrokerTestTimeoutParams{ + HTTPClient: client, + } +} + +/*ServiceBrokerTestTimeoutParams contains all the parameters to send to the API endpoint +for the service broker test timeout operation typically these are written to a http.Request +*/ +type ServiceBrokerTestTimeoutParams struct { + + /*T + seconds + + */ + T int64 + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the service broker test timeout params +func (o *ServiceBrokerTestTimeoutParams) WithTimeout(timeout time.Duration) *ServiceBrokerTestTimeoutParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the service broker test timeout params +func (o *ServiceBrokerTestTimeoutParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the service broker test timeout params +func (o *ServiceBrokerTestTimeoutParams) WithContext(ctx context.Context) *ServiceBrokerTestTimeoutParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the service broker test timeout params +func (o *ServiceBrokerTestTimeoutParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the service broker test timeout params +func (o *ServiceBrokerTestTimeoutParams) WithHTTPClient(client *http.Client) *ServiceBrokerTestTimeoutParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the service broker test timeout params +func (o *ServiceBrokerTestTimeoutParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithT adds the t to the service broker test timeout params +func (o *ServiceBrokerTestTimeoutParams) WithT(t int64) *ServiceBrokerTestTimeoutParams { + o.SetT(t) + return o +} + +// SetT adds the t to the service broker test timeout params +func (o *ServiceBrokerTestTimeoutParams) SetT(t int64) { + o.T = t +} + +// WriteToRequest writes these params to a swagger request +func (o *ServiceBrokerTestTimeoutParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // query param t + qrT := o.T + qT := swag.FormatInt64(qrT) + if qT != "" { + if err := r.SetQueryParam("t", qT); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/iaas_service_broker/service_broker_test_timeout_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/iaas_service_broker/service_broker_test_timeout_responses.go new file mode 100644 index 00000000000..5aa692909b4 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/iaas_service_broker/service_broker_test_timeout_responses.go @@ -0,0 +1,65 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package iaas_service_broker + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// ServiceBrokerTestTimeoutReader is a Reader for the ServiceBrokerTestTimeout structure. +type ServiceBrokerTestTimeoutReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ServiceBrokerTestTimeoutReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewServiceBrokerTestTimeoutOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewServiceBrokerTestTimeoutOK creates a ServiceBrokerTestTimeoutOK with default headers values +func NewServiceBrokerTestTimeoutOK() *ServiceBrokerTestTimeoutOK { + return &ServiceBrokerTestTimeoutOK{} +} + +/*ServiceBrokerTestTimeoutOK handles this case with default header values. + +OK +*/ +type ServiceBrokerTestTimeoutOK struct { + Payload models.Object +} + +func (o *ServiceBrokerTestTimeoutOK) Error() string { + return fmt.Sprintf("[GET /broker/v1/test/timeout][%d] serviceBrokerTestTimeoutOK %+v", 200, o.Payload) +} + +func (o *ServiceBrokerTestTimeoutOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/iaas_service_broker/service_broker_version_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/iaas_service_broker/service_broker_version_parameters.go new file mode 100644 index 00000000000..61d0bcb56e2 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/iaas_service_broker/service_broker_version_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package iaas_service_broker + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewServiceBrokerVersionParams creates a new ServiceBrokerVersionParams object +// with the default values initialized. +func NewServiceBrokerVersionParams() *ServiceBrokerVersionParams { + + return &ServiceBrokerVersionParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewServiceBrokerVersionParamsWithTimeout creates a new ServiceBrokerVersionParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewServiceBrokerVersionParamsWithTimeout(timeout time.Duration) *ServiceBrokerVersionParams { + + return &ServiceBrokerVersionParams{ + + timeout: timeout, + } +} + +// NewServiceBrokerVersionParamsWithContext creates a new ServiceBrokerVersionParams object +// with the default values initialized, and the ability to set a context for a request +func NewServiceBrokerVersionParamsWithContext(ctx context.Context) *ServiceBrokerVersionParams { + + return &ServiceBrokerVersionParams{ + + Context: ctx, + } +} + +// NewServiceBrokerVersionParamsWithHTTPClient creates a new ServiceBrokerVersionParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewServiceBrokerVersionParamsWithHTTPClient(client *http.Client) *ServiceBrokerVersionParams { + + return &ServiceBrokerVersionParams{ + HTTPClient: client, + } +} + +/*ServiceBrokerVersionParams contains all the parameters to send to the API endpoint +for the service broker version operation typically these are written to a http.Request +*/ +type ServiceBrokerVersionParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the service broker version params +func (o *ServiceBrokerVersionParams) WithTimeout(timeout time.Duration) *ServiceBrokerVersionParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the service broker version params +func (o *ServiceBrokerVersionParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the service broker version params +func (o *ServiceBrokerVersionParams) WithContext(ctx context.Context) *ServiceBrokerVersionParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the service broker version params +func (o *ServiceBrokerVersionParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the service broker version params +func (o *ServiceBrokerVersionParams) WithHTTPClient(client *http.Client) *ServiceBrokerVersionParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the service broker version params +func (o *ServiceBrokerVersionParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *ServiceBrokerVersionParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/iaas_service_broker/service_broker_version_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/iaas_service_broker/service_broker_version_responses.go new file mode 100644 index 00000000000..452ac817dc5 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/iaas_service_broker/service_broker_version_responses.go @@ -0,0 +1,103 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package iaas_service_broker + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// ServiceBrokerVersionReader is a Reader for the ServiceBrokerVersion structure. +type ServiceBrokerVersionReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ServiceBrokerVersionReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewServiceBrokerVersionOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewServiceBrokerVersionBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewServiceBrokerVersionOK creates a ServiceBrokerVersionOK with default headers values +func NewServiceBrokerVersionOK() *ServiceBrokerVersionOK { + return &ServiceBrokerVersionOK{} +} + +/*ServiceBrokerVersionOK handles this case with default header values. + +OK +*/ +type ServiceBrokerVersionOK struct { + Payload *models.Version +} + +func (o *ServiceBrokerVersionOK) Error() string { + return fmt.Sprintf("[GET /broker/v1/version][%d] serviceBrokerVersionOK %+v", 200, o.Payload) +} + +func (o *ServiceBrokerVersionOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Version) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceBrokerVersionBadRequest creates a ServiceBrokerVersionBadRequest with default headers values +func NewServiceBrokerVersionBadRequest() *ServiceBrokerVersionBadRequest { + return &ServiceBrokerVersionBadRequest{} +} + +/*ServiceBrokerVersionBadRequest handles this case with default header values. + +Bad Request +*/ +type ServiceBrokerVersionBadRequest struct { + Payload *models.Error +} + +func (o *ServiceBrokerVersionBadRequest) Error() string { + return fmt.Sprintf("[GET /broker/v1/version][%d] serviceBrokerVersionBadRequest %+v", 400, o.Payload) +} + +func (o *ServiceBrokerVersionBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/open_stacks/open_stacks_client.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/open_stacks/open_stacks_client.go new file mode 100644 index 00000000000..bf6c4edc5e3 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/open_stacks/open_stacks_client.go @@ -0,0 +1,181 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package open_stacks + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" +) + +// New creates a new open stacks API client. +func New(transport runtime.ClientTransport, formats strfmt.Registry) *Client { + return &Client{transport: transport, formats: formats} +} + +/* +Client for open stacks API +*/ +type Client struct { + transport runtime.ClientTransport + formats strfmt.Registry +} + +/* +ServiceBrokerOpenstacksGet lists all open stack instances being managed +*/ +func (a *Client) ServiceBrokerOpenstacksGet(params *ServiceBrokerOpenstacksGetParams, authInfo runtime.ClientAuthInfoWriter) (*ServiceBrokerOpenstacksGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewServiceBrokerOpenstacksGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "serviceBroker.openstacks.get", + Method: "GET", + PathPattern: "/broker/v1/openstacks", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ServiceBrokerOpenstacksGetReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*ServiceBrokerOpenstacksGetOK), nil + +} + +/* +ServiceBrokerOpenstacksHostsGet lists account information for all pvm instances on hostname +*/ +func (a *Client) ServiceBrokerOpenstacksHostsGet(params *ServiceBrokerOpenstacksHostsGetParams, authInfo runtime.ClientAuthInfoWriter) (*ServiceBrokerOpenstacksHostsGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewServiceBrokerOpenstacksHostsGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "serviceBroker.openstacks.hosts.get", + Method: "GET", + PathPattern: "/broker/v1/openstacks/{openstack_id}/hosts/{hostname}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ServiceBrokerOpenstacksHostsGetReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*ServiceBrokerOpenstacksHostsGetOK), nil + +} + +/* +ServiceBrokerOpenstacksOpenstackGet lists account information for all pvm instances on hostname +*/ +func (a *Client) ServiceBrokerOpenstacksOpenstackGet(params *ServiceBrokerOpenstacksOpenstackGetParams, authInfo runtime.ClientAuthInfoWriter) (*ServiceBrokerOpenstacksOpenstackGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewServiceBrokerOpenstacksOpenstackGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "serviceBroker.openstacks.openstack.get", + Method: "GET", + PathPattern: "/broker/v1/openstacks/{openstack_id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ServiceBrokerOpenstacksOpenstackGetReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*ServiceBrokerOpenstacksOpenstackGetOK), nil + +} + +/* +ServiceBrokerOpenstacksPost creates a new open stack instance to be managed +*/ +func (a *Client) ServiceBrokerOpenstacksPost(params *ServiceBrokerOpenstacksPostParams, authInfo runtime.ClientAuthInfoWriter) (*ServiceBrokerOpenstacksPostOK, *ServiceBrokerOpenstacksPostCreated, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewServiceBrokerOpenstacksPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "serviceBroker.openstacks.post", + Method: "POST", + PathPattern: "/broker/v1/openstacks", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ServiceBrokerOpenstacksPostReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, nil, err + } + switch value := result.(type) { + case *ServiceBrokerOpenstacksPostOK: + return value, nil, nil + case *ServiceBrokerOpenstacksPostCreated: + return nil, value, nil + } + return nil, nil, nil + +} + +/* +ServiceBrokerOpenstacksServersGet lists account information for a pvm instance +*/ +func (a *Client) ServiceBrokerOpenstacksServersGet(params *ServiceBrokerOpenstacksServersGetParams, authInfo runtime.ClientAuthInfoWriter) (*ServiceBrokerOpenstacksServersGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewServiceBrokerOpenstacksServersGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "serviceBroker.openstacks.servers.get", + Method: "GET", + PathPattern: "/broker/v1/openstacks/{openstack_id}/servers/{pvm_instance_id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ServiceBrokerOpenstacksServersGetReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*ServiceBrokerOpenstacksServersGetOK), nil + +} + +// SetTransport changes the transport on the client +func (a *Client) SetTransport(transport runtime.ClientTransport) { + a.transport = transport +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/open_stacks/service_broker_openstacks_get_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/open_stacks/service_broker_openstacks_get_parameters.go new file mode 100644 index 00000000000..6e9fa9b973e --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/open_stacks/service_broker_openstacks_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package open_stacks + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewServiceBrokerOpenstacksGetParams creates a new ServiceBrokerOpenstacksGetParams object +// with the default values initialized. +func NewServiceBrokerOpenstacksGetParams() *ServiceBrokerOpenstacksGetParams { + + return &ServiceBrokerOpenstacksGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewServiceBrokerOpenstacksGetParamsWithTimeout creates a new ServiceBrokerOpenstacksGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewServiceBrokerOpenstacksGetParamsWithTimeout(timeout time.Duration) *ServiceBrokerOpenstacksGetParams { + + return &ServiceBrokerOpenstacksGetParams{ + + timeout: timeout, + } +} + +// NewServiceBrokerOpenstacksGetParamsWithContext creates a new ServiceBrokerOpenstacksGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewServiceBrokerOpenstacksGetParamsWithContext(ctx context.Context) *ServiceBrokerOpenstacksGetParams { + + return &ServiceBrokerOpenstacksGetParams{ + + Context: ctx, + } +} + +// NewServiceBrokerOpenstacksGetParamsWithHTTPClient creates a new ServiceBrokerOpenstacksGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewServiceBrokerOpenstacksGetParamsWithHTTPClient(client *http.Client) *ServiceBrokerOpenstacksGetParams { + + return &ServiceBrokerOpenstacksGetParams{ + HTTPClient: client, + } +} + +/*ServiceBrokerOpenstacksGetParams contains all the parameters to send to the API endpoint +for the service broker openstacks get operation typically these are written to a http.Request +*/ +type ServiceBrokerOpenstacksGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the service broker openstacks get params +func (o *ServiceBrokerOpenstacksGetParams) WithTimeout(timeout time.Duration) *ServiceBrokerOpenstacksGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the service broker openstacks get params +func (o *ServiceBrokerOpenstacksGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the service broker openstacks get params +func (o *ServiceBrokerOpenstacksGetParams) WithContext(ctx context.Context) *ServiceBrokerOpenstacksGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the service broker openstacks get params +func (o *ServiceBrokerOpenstacksGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the service broker openstacks get params +func (o *ServiceBrokerOpenstacksGetParams) WithHTTPClient(client *http.Client) *ServiceBrokerOpenstacksGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the service broker openstacks get params +func (o *ServiceBrokerOpenstacksGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *ServiceBrokerOpenstacksGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/open_stacks/service_broker_openstacks_get_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/open_stacks/service_broker_openstacks_get_responses.go new file mode 100644 index 00000000000..d37d1514cd6 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/open_stacks/service_broker_openstacks_get_responses.go @@ -0,0 +1,175 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package open_stacks + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// ServiceBrokerOpenstacksGetReader is a Reader for the ServiceBrokerOpenstacksGet structure. +type ServiceBrokerOpenstacksGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ServiceBrokerOpenstacksGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewServiceBrokerOpenstacksGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewServiceBrokerOpenstacksGetBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 403: + result := NewServiceBrokerOpenstacksGetForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewServiceBrokerOpenstacksGetInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewServiceBrokerOpenstacksGetOK creates a ServiceBrokerOpenstacksGetOK with default headers values +func NewServiceBrokerOpenstacksGetOK() *ServiceBrokerOpenstacksGetOK { + return &ServiceBrokerOpenstacksGetOK{} +} + +/*ServiceBrokerOpenstacksGetOK handles this case with default header values. + +OK +*/ +type ServiceBrokerOpenstacksGetOK struct { + Payload *models.OpenStacks +} + +func (o *ServiceBrokerOpenstacksGetOK) Error() string { + return fmt.Sprintf("[GET /broker/v1/openstacks][%d] serviceBrokerOpenstacksGetOK %+v", 200, o.Payload) +} + +func (o *ServiceBrokerOpenstacksGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.OpenStacks) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceBrokerOpenstacksGetBadRequest creates a ServiceBrokerOpenstacksGetBadRequest with default headers values +func NewServiceBrokerOpenstacksGetBadRequest() *ServiceBrokerOpenstacksGetBadRequest { + return &ServiceBrokerOpenstacksGetBadRequest{} +} + +/*ServiceBrokerOpenstacksGetBadRequest handles this case with default header values. + +Bad Request +*/ +type ServiceBrokerOpenstacksGetBadRequest struct { + Payload *models.Error +} + +func (o *ServiceBrokerOpenstacksGetBadRequest) Error() string { + return fmt.Sprintf("[GET /broker/v1/openstacks][%d] serviceBrokerOpenstacksGetBadRequest %+v", 400, o.Payload) +} + +func (o *ServiceBrokerOpenstacksGetBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceBrokerOpenstacksGetForbidden creates a ServiceBrokerOpenstacksGetForbidden with default headers values +func NewServiceBrokerOpenstacksGetForbidden() *ServiceBrokerOpenstacksGetForbidden { + return &ServiceBrokerOpenstacksGetForbidden{} +} + +/*ServiceBrokerOpenstacksGetForbidden handles this case with default header values. + +Unauthorized +*/ +type ServiceBrokerOpenstacksGetForbidden struct { + Payload *models.Error +} + +func (o *ServiceBrokerOpenstacksGetForbidden) Error() string { + return fmt.Sprintf("[GET /broker/v1/openstacks][%d] serviceBrokerOpenstacksGetForbidden %+v", 403, o.Payload) +} + +func (o *ServiceBrokerOpenstacksGetForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceBrokerOpenstacksGetInternalServerError creates a ServiceBrokerOpenstacksGetInternalServerError with default headers values +func NewServiceBrokerOpenstacksGetInternalServerError() *ServiceBrokerOpenstacksGetInternalServerError { + return &ServiceBrokerOpenstacksGetInternalServerError{} +} + +/*ServiceBrokerOpenstacksGetInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type ServiceBrokerOpenstacksGetInternalServerError struct { + Payload *models.Error +} + +func (o *ServiceBrokerOpenstacksGetInternalServerError) Error() string { + return fmt.Sprintf("[GET /broker/v1/openstacks][%d] serviceBrokerOpenstacksGetInternalServerError %+v", 500, o.Payload) +} + +func (o *ServiceBrokerOpenstacksGetInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/open_stacks/service_broker_openstacks_hosts_get_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/open_stacks/service_broker_openstacks_hosts_get_parameters.go new file mode 100644 index 00000000000..fdacc2ced6b --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/open_stacks/service_broker_openstacks_hosts_get_parameters.go @@ -0,0 +1,157 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package open_stacks + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewServiceBrokerOpenstacksHostsGetParams creates a new ServiceBrokerOpenstacksHostsGetParams object +// with the default values initialized. +func NewServiceBrokerOpenstacksHostsGetParams() *ServiceBrokerOpenstacksHostsGetParams { + var () + return &ServiceBrokerOpenstacksHostsGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewServiceBrokerOpenstacksHostsGetParamsWithTimeout creates a new ServiceBrokerOpenstacksHostsGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewServiceBrokerOpenstacksHostsGetParamsWithTimeout(timeout time.Duration) *ServiceBrokerOpenstacksHostsGetParams { + var () + return &ServiceBrokerOpenstacksHostsGetParams{ + + timeout: timeout, + } +} + +// NewServiceBrokerOpenstacksHostsGetParamsWithContext creates a new ServiceBrokerOpenstacksHostsGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewServiceBrokerOpenstacksHostsGetParamsWithContext(ctx context.Context) *ServiceBrokerOpenstacksHostsGetParams { + var () + return &ServiceBrokerOpenstacksHostsGetParams{ + + Context: ctx, + } +} + +// NewServiceBrokerOpenstacksHostsGetParamsWithHTTPClient creates a new ServiceBrokerOpenstacksHostsGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewServiceBrokerOpenstacksHostsGetParamsWithHTTPClient(client *http.Client) *ServiceBrokerOpenstacksHostsGetParams { + var () + return &ServiceBrokerOpenstacksHostsGetParams{ + HTTPClient: client, + } +} + +/*ServiceBrokerOpenstacksHostsGetParams contains all the parameters to send to the API endpoint +for the service broker openstacks hosts get operation typically these are written to a http.Request +*/ +type ServiceBrokerOpenstacksHostsGetParams struct { + + /*Hostname + Hostname + + */ + Hostname string + /*OpenstackID + Openstack ID + + */ + OpenstackID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the service broker openstacks hosts get params +func (o *ServiceBrokerOpenstacksHostsGetParams) WithTimeout(timeout time.Duration) *ServiceBrokerOpenstacksHostsGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the service broker openstacks hosts get params +func (o *ServiceBrokerOpenstacksHostsGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the service broker openstacks hosts get params +func (o *ServiceBrokerOpenstacksHostsGetParams) WithContext(ctx context.Context) *ServiceBrokerOpenstacksHostsGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the service broker openstacks hosts get params +func (o *ServiceBrokerOpenstacksHostsGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the service broker openstacks hosts get params +func (o *ServiceBrokerOpenstacksHostsGetParams) WithHTTPClient(client *http.Client) *ServiceBrokerOpenstacksHostsGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the service broker openstacks hosts get params +func (o *ServiceBrokerOpenstacksHostsGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithHostname adds the hostname to the service broker openstacks hosts get params +func (o *ServiceBrokerOpenstacksHostsGetParams) WithHostname(hostname string) *ServiceBrokerOpenstacksHostsGetParams { + o.SetHostname(hostname) + return o +} + +// SetHostname adds the hostname to the service broker openstacks hosts get params +func (o *ServiceBrokerOpenstacksHostsGetParams) SetHostname(hostname string) { + o.Hostname = hostname +} + +// WithOpenstackID adds the openstackID to the service broker openstacks hosts get params +func (o *ServiceBrokerOpenstacksHostsGetParams) WithOpenstackID(openstackID string) *ServiceBrokerOpenstacksHostsGetParams { + o.SetOpenstackID(openstackID) + return o +} + +// SetOpenstackID adds the openstackId to the service broker openstacks hosts get params +func (o *ServiceBrokerOpenstacksHostsGetParams) SetOpenstackID(openstackID string) { + o.OpenstackID = openstackID +} + +// WriteToRequest writes these params to a swagger request +func (o *ServiceBrokerOpenstacksHostsGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param hostname + if err := r.SetPathParam("hostname", o.Hostname); err != nil { + return err + } + + // path param openstack_id + if err := r.SetPathParam("openstack_id", o.OpenstackID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/open_stacks/service_broker_openstacks_hosts_get_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/open_stacks/service_broker_openstacks_hosts_get_responses.go new file mode 100644 index 00000000000..e747dff7b8e --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/open_stacks/service_broker_openstacks_hosts_get_responses.go @@ -0,0 +1,175 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package open_stacks + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// ServiceBrokerOpenstacksHostsGetReader is a Reader for the ServiceBrokerOpenstacksHostsGet structure. +type ServiceBrokerOpenstacksHostsGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ServiceBrokerOpenstacksHostsGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewServiceBrokerOpenstacksHostsGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewServiceBrokerOpenstacksHostsGetBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 404: + result := NewServiceBrokerOpenstacksHostsGetNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewServiceBrokerOpenstacksHostsGetInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewServiceBrokerOpenstacksHostsGetOK creates a ServiceBrokerOpenstacksHostsGetOK with default headers values +func NewServiceBrokerOpenstacksHostsGetOK() *ServiceBrokerOpenstacksHostsGetOK { + return &ServiceBrokerOpenstacksHostsGetOK{} +} + +/*ServiceBrokerOpenstacksHostsGetOK handles this case with default header values. + +OK +*/ +type ServiceBrokerOpenstacksHostsGetOK struct { + Payload *models.HostInfo +} + +func (o *ServiceBrokerOpenstacksHostsGetOK) Error() string { + return fmt.Sprintf("[GET /broker/v1/openstacks/{openstack_id}/hosts/{hostname}][%d] serviceBrokerOpenstacksHostsGetOK %+v", 200, o.Payload) +} + +func (o *ServiceBrokerOpenstacksHostsGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.HostInfo) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceBrokerOpenstacksHostsGetBadRequest creates a ServiceBrokerOpenstacksHostsGetBadRequest with default headers values +func NewServiceBrokerOpenstacksHostsGetBadRequest() *ServiceBrokerOpenstacksHostsGetBadRequest { + return &ServiceBrokerOpenstacksHostsGetBadRequest{} +} + +/*ServiceBrokerOpenstacksHostsGetBadRequest handles this case with default header values. + +Bad Request +*/ +type ServiceBrokerOpenstacksHostsGetBadRequest struct { + Payload *models.Error +} + +func (o *ServiceBrokerOpenstacksHostsGetBadRequest) Error() string { + return fmt.Sprintf("[GET /broker/v1/openstacks/{openstack_id}/hosts/{hostname}][%d] serviceBrokerOpenstacksHostsGetBadRequest %+v", 400, o.Payload) +} + +func (o *ServiceBrokerOpenstacksHostsGetBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceBrokerOpenstacksHostsGetNotFound creates a ServiceBrokerOpenstacksHostsGetNotFound with default headers values +func NewServiceBrokerOpenstacksHostsGetNotFound() *ServiceBrokerOpenstacksHostsGetNotFound { + return &ServiceBrokerOpenstacksHostsGetNotFound{} +} + +/*ServiceBrokerOpenstacksHostsGetNotFound handles this case with default header values. + +Not Found +*/ +type ServiceBrokerOpenstacksHostsGetNotFound struct { + Payload *models.Error +} + +func (o *ServiceBrokerOpenstacksHostsGetNotFound) Error() string { + return fmt.Sprintf("[GET /broker/v1/openstacks/{openstack_id}/hosts/{hostname}][%d] serviceBrokerOpenstacksHostsGetNotFound %+v", 404, o.Payload) +} + +func (o *ServiceBrokerOpenstacksHostsGetNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceBrokerOpenstacksHostsGetInternalServerError creates a ServiceBrokerOpenstacksHostsGetInternalServerError with default headers values +func NewServiceBrokerOpenstacksHostsGetInternalServerError() *ServiceBrokerOpenstacksHostsGetInternalServerError { + return &ServiceBrokerOpenstacksHostsGetInternalServerError{} +} + +/*ServiceBrokerOpenstacksHostsGetInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type ServiceBrokerOpenstacksHostsGetInternalServerError struct { + Payload *models.Error +} + +func (o *ServiceBrokerOpenstacksHostsGetInternalServerError) Error() string { + return fmt.Sprintf("[GET /broker/v1/openstacks/{openstack_id}/hosts/{hostname}][%d] serviceBrokerOpenstacksHostsGetInternalServerError %+v", 500, o.Payload) +} + +func (o *ServiceBrokerOpenstacksHostsGetInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/open_stacks/service_broker_openstacks_openstack_get_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/open_stacks/service_broker_openstacks_openstack_get_parameters.go new file mode 100644 index 00000000000..4bcf4ee0745 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/open_stacks/service_broker_openstacks_openstack_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package open_stacks + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewServiceBrokerOpenstacksOpenstackGetParams creates a new ServiceBrokerOpenstacksOpenstackGetParams object +// with the default values initialized. +func NewServiceBrokerOpenstacksOpenstackGetParams() *ServiceBrokerOpenstacksOpenstackGetParams { + var () + return &ServiceBrokerOpenstacksOpenstackGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewServiceBrokerOpenstacksOpenstackGetParamsWithTimeout creates a new ServiceBrokerOpenstacksOpenstackGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewServiceBrokerOpenstacksOpenstackGetParamsWithTimeout(timeout time.Duration) *ServiceBrokerOpenstacksOpenstackGetParams { + var () + return &ServiceBrokerOpenstacksOpenstackGetParams{ + + timeout: timeout, + } +} + +// NewServiceBrokerOpenstacksOpenstackGetParamsWithContext creates a new ServiceBrokerOpenstacksOpenstackGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewServiceBrokerOpenstacksOpenstackGetParamsWithContext(ctx context.Context) *ServiceBrokerOpenstacksOpenstackGetParams { + var () + return &ServiceBrokerOpenstacksOpenstackGetParams{ + + Context: ctx, + } +} + +// NewServiceBrokerOpenstacksOpenstackGetParamsWithHTTPClient creates a new ServiceBrokerOpenstacksOpenstackGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewServiceBrokerOpenstacksOpenstackGetParamsWithHTTPClient(client *http.Client) *ServiceBrokerOpenstacksOpenstackGetParams { + var () + return &ServiceBrokerOpenstacksOpenstackGetParams{ + HTTPClient: client, + } +} + +/*ServiceBrokerOpenstacksOpenstackGetParams contains all the parameters to send to the API endpoint +for the service broker openstacks openstack get operation typically these are written to a http.Request +*/ +type ServiceBrokerOpenstacksOpenstackGetParams struct { + + /*OpenstackID + Openstack ID + + */ + OpenstackID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the service broker openstacks openstack get params +func (o *ServiceBrokerOpenstacksOpenstackGetParams) WithTimeout(timeout time.Duration) *ServiceBrokerOpenstacksOpenstackGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the service broker openstacks openstack get params +func (o *ServiceBrokerOpenstacksOpenstackGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the service broker openstacks openstack get params +func (o *ServiceBrokerOpenstacksOpenstackGetParams) WithContext(ctx context.Context) *ServiceBrokerOpenstacksOpenstackGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the service broker openstacks openstack get params +func (o *ServiceBrokerOpenstacksOpenstackGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the service broker openstacks openstack get params +func (o *ServiceBrokerOpenstacksOpenstackGetParams) WithHTTPClient(client *http.Client) *ServiceBrokerOpenstacksOpenstackGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the service broker openstacks openstack get params +func (o *ServiceBrokerOpenstacksOpenstackGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithOpenstackID adds the openstackID to the service broker openstacks openstack get params +func (o *ServiceBrokerOpenstacksOpenstackGetParams) WithOpenstackID(openstackID string) *ServiceBrokerOpenstacksOpenstackGetParams { + o.SetOpenstackID(openstackID) + return o +} + +// SetOpenstackID adds the openstackId to the service broker openstacks openstack get params +func (o *ServiceBrokerOpenstacksOpenstackGetParams) SetOpenstackID(openstackID string) { + o.OpenstackID = openstackID +} + +// WriteToRequest writes these params to a swagger request +func (o *ServiceBrokerOpenstacksOpenstackGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param openstack_id + if err := r.SetPathParam("openstack_id", o.OpenstackID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/open_stacks/service_broker_openstacks_openstack_get_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/open_stacks/service_broker_openstacks_openstack_get_responses.go new file mode 100644 index 00000000000..b427be39a8a --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/open_stacks/service_broker_openstacks_openstack_get_responses.go @@ -0,0 +1,175 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package open_stacks + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// ServiceBrokerOpenstacksOpenstackGetReader is a Reader for the ServiceBrokerOpenstacksOpenstackGet structure. +type ServiceBrokerOpenstacksOpenstackGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ServiceBrokerOpenstacksOpenstackGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewServiceBrokerOpenstacksOpenstackGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewServiceBrokerOpenstacksOpenstackGetBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 404: + result := NewServiceBrokerOpenstacksOpenstackGetNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewServiceBrokerOpenstacksOpenstackGetInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewServiceBrokerOpenstacksOpenstackGetOK creates a ServiceBrokerOpenstacksOpenstackGetOK with default headers values +func NewServiceBrokerOpenstacksOpenstackGetOK() *ServiceBrokerOpenstacksOpenstackGetOK { + return &ServiceBrokerOpenstacksOpenstackGetOK{} +} + +/*ServiceBrokerOpenstacksOpenstackGetOK handles this case with default header values. + +OK +*/ +type ServiceBrokerOpenstacksOpenstackGetOK struct { + Payload *models.OpenStackInfo +} + +func (o *ServiceBrokerOpenstacksOpenstackGetOK) Error() string { + return fmt.Sprintf("[GET /broker/v1/openstacks/{openstack_id}][%d] serviceBrokerOpenstacksOpenstackGetOK %+v", 200, o.Payload) +} + +func (o *ServiceBrokerOpenstacksOpenstackGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.OpenStackInfo) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceBrokerOpenstacksOpenstackGetBadRequest creates a ServiceBrokerOpenstacksOpenstackGetBadRequest with default headers values +func NewServiceBrokerOpenstacksOpenstackGetBadRequest() *ServiceBrokerOpenstacksOpenstackGetBadRequest { + return &ServiceBrokerOpenstacksOpenstackGetBadRequest{} +} + +/*ServiceBrokerOpenstacksOpenstackGetBadRequest handles this case with default header values. + +Bad Request +*/ +type ServiceBrokerOpenstacksOpenstackGetBadRequest struct { + Payload *models.Error +} + +func (o *ServiceBrokerOpenstacksOpenstackGetBadRequest) Error() string { + return fmt.Sprintf("[GET /broker/v1/openstacks/{openstack_id}][%d] serviceBrokerOpenstacksOpenstackGetBadRequest %+v", 400, o.Payload) +} + +func (o *ServiceBrokerOpenstacksOpenstackGetBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceBrokerOpenstacksOpenstackGetNotFound creates a ServiceBrokerOpenstacksOpenstackGetNotFound with default headers values +func NewServiceBrokerOpenstacksOpenstackGetNotFound() *ServiceBrokerOpenstacksOpenstackGetNotFound { + return &ServiceBrokerOpenstacksOpenstackGetNotFound{} +} + +/*ServiceBrokerOpenstacksOpenstackGetNotFound handles this case with default header values. + +Not Found +*/ +type ServiceBrokerOpenstacksOpenstackGetNotFound struct { + Payload *models.Error +} + +func (o *ServiceBrokerOpenstacksOpenstackGetNotFound) Error() string { + return fmt.Sprintf("[GET /broker/v1/openstacks/{openstack_id}][%d] serviceBrokerOpenstacksOpenstackGetNotFound %+v", 404, o.Payload) +} + +func (o *ServiceBrokerOpenstacksOpenstackGetNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceBrokerOpenstacksOpenstackGetInternalServerError creates a ServiceBrokerOpenstacksOpenstackGetInternalServerError with default headers values +func NewServiceBrokerOpenstacksOpenstackGetInternalServerError() *ServiceBrokerOpenstacksOpenstackGetInternalServerError { + return &ServiceBrokerOpenstacksOpenstackGetInternalServerError{} +} + +/*ServiceBrokerOpenstacksOpenstackGetInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type ServiceBrokerOpenstacksOpenstackGetInternalServerError struct { + Payload *models.Error +} + +func (o *ServiceBrokerOpenstacksOpenstackGetInternalServerError) Error() string { + return fmt.Sprintf("[GET /broker/v1/openstacks/{openstack_id}][%d] serviceBrokerOpenstacksOpenstackGetInternalServerError %+v", 500, o.Payload) +} + +func (o *ServiceBrokerOpenstacksOpenstackGetInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/open_stacks/service_broker_openstacks_post_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/open_stacks/service_broker_openstacks_post_parameters.go new file mode 100644 index 00000000000..b505aa4bc66 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/open_stacks/service_broker_openstacks_post_parameters.go @@ -0,0 +1,139 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package open_stacks + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// NewServiceBrokerOpenstacksPostParams creates a new ServiceBrokerOpenstacksPostParams object +// with the default values initialized. +func NewServiceBrokerOpenstacksPostParams() *ServiceBrokerOpenstacksPostParams { + var () + return &ServiceBrokerOpenstacksPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewServiceBrokerOpenstacksPostParamsWithTimeout creates a new ServiceBrokerOpenstacksPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewServiceBrokerOpenstacksPostParamsWithTimeout(timeout time.Duration) *ServiceBrokerOpenstacksPostParams { + var () + return &ServiceBrokerOpenstacksPostParams{ + + timeout: timeout, + } +} + +// NewServiceBrokerOpenstacksPostParamsWithContext creates a new ServiceBrokerOpenstacksPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewServiceBrokerOpenstacksPostParamsWithContext(ctx context.Context) *ServiceBrokerOpenstacksPostParams { + var () + return &ServiceBrokerOpenstacksPostParams{ + + Context: ctx, + } +} + +// NewServiceBrokerOpenstacksPostParamsWithHTTPClient creates a new ServiceBrokerOpenstacksPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewServiceBrokerOpenstacksPostParamsWithHTTPClient(client *http.Client) *ServiceBrokerOpenstacksPostParams { + var () + return &ServiceBrokerOpenstacksPostParams{ + HTTPClient: client, + } +} + +/*ServiceBrokerOpenstacksPostParams contains all the parameters to send to the API endpoint +for the service broker openstacks post operation typically these are written to a http.Request +*/ +type ServiceBrokerOpenstacksPostParams struct { + + /*Body + Parameters for the creation of a new Open Stack Instance + + */ + Body *models.OpenStackCreate + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the service broker openstacks post params +func (o *ServiceBrokerOpenstacksPostParams) WithTimeout(timeout time.Duration) *ServiceBrokerOpenstacksPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the service broker openstacks post params +func (o *ServiceBrokerOpenstacksPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the service broker openstacks post params +func (o *ServiceBrokerOpenstacksPostParams) WithContext(ctx context.Context) *ServiceBrokerOpenstacksPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the service broker openstacks post params +func (o *ServiceBrokerOpenstacksPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the service broker openstacks post params +func (o *ServiceBrokerOpenstacksPostParams) WithHTTPClient(client *http.Client) *ServiceBrokerOpenstacksPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the service broker openstacks post params +func (o *ServiceBrokerOpenstacksPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the service broker openstacks post params +func (o *ServiceBrokerOpenstacksPostParams) WithBody(body *models.OpenStackCreate) *ServiceBrokerOpenstacksPostParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the service broker openstacks post params +func (o *ServiceBrokerOpenstacksPostParams) SetBody(body *models.OpenStackCreate) { + o.Body = body +} + +// WriteToRequest writes these params to a swagger request +func (o *ServiceBrokerOpenstacksPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/open_stacks/service_broker_openstacks_post_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/open_stacks/service_broker_openstacks_post_responses.go new file mode 100644 index 00000000000..99a18430182 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/open_stacks/service_broker_openstacks_post_responses.go @@ -0,0 +1,247 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package open_stacks + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// ServiceBrokerOpenstacksPostReader is a Reader for the ServiceBrokerOpenstacksPost structure. +type ServiceBrokerOpenstacksPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ServiceBrokerOpenstacksPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewServiceBrokerOpenstacksPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 201: + result := NewServiceBrokerOpenstacksPostCreated() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewServiceBrokerOpenstacksPostBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 409: + result := NewServiceBrokerOpenstacksPostConflict() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 422: + result := NewServiceBrokerOpenstacksPostUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewServiceBrokerOpenstacksPostInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewServiceBrokerOpenstacksPostOK creates a ServiceBrokerOpenstacksPostOK with default headers values +func NewServiceBrokerOpenstacksPostOK() *ServiceBrokerOpenstacksPostOK { + return &ServiceBrokerOpenstacksPostOK{} +} + +/*ServiceBrokerOpenstacksPostOK handles this case with default header values. + +OK +*/ +type ServiceBrokerOpenstacksPostOK struct { + Payload *models.OpenStack +} + +func (o *ServiceBrokerOpenstacksPostOK) Error() string { + return fmt.Sprintf("[POST /broker/v1/openstacks][%d] serviceBrokerOpenstacksPostOK %+v", 200, o.Payload) +} + +func (o *ServiceBrokerOpenstacksPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.OpenStack) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceBrokerOpenstacksPostCreated creates a ServiceBrokerOpenstacksPostCreated with default headers values +func NewServiceBrokerOpenstacksPostCreated() *ServiceBrokerOpenstacksPostCreated { + return &ServiceBrokerOpenstacksPostCreated{} +} + +/*ServiceBrokerOpenstacksPostCreated handles this case with default header values. + +Created +*/ +type ServiceBrokerOpenstacksPostCreated struct { + Payload *models.OpenStack +} + +func (o *ServiceBrokerOpenstacksPostCreated) Error() string { + return fmt.Sprintf("[POST /broker/v1/openstacks][%d] serviceBrokerOpenstacksPostCreated %+v", 201, o.Payload) +} + +func (o *ServiceBrokerOpenstacksPostCreated) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.OpenStack) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceBrokerOpenstacksPostBadRequest creates a ServiceBrokerOpenstacksPostBadRequest with default headers values +func NewServiceBrokerOpenstacksPostBadRequest() *ServiceBrokerOpenstacksPostBadRequest { + return &ServiceBrokerOpenstacksPostBadRequest{} +} + +/*ServiceBrokerOpenstacksPostBadRequest handles this case with default header values. + +Bad Request +*/ +type ServiceBrokerOpenstacksPostBadRequest struct { + Payload *models.Error +} + +func (o *ServiceBrokerOpenstacksPostBadRequest) Error() string { + return fmt.Sprintf("[POST /broker/v1/openstacks][%d] serviceBrokerOpenstacksPostBadRequest %+v", 400, o.Payload) +} + +func (o *ServiceBrokerOpenstacksPostBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceBrokerOpenstacksPostConflict creates a ServiceBrokerOpenstacksPostConflict with default headers values +func NewServiceBrokerOpenstacksPostConflict() *ServiceBrokerOpenstacksPostConflict { + return &ServiceBrokerOpenstacksPostConflict{} +} + +/*ServiceBrokerOpenstacksPostConflict handles this case with default header values. + +Conflict +*/ +type ServiceBrokerOpenstacksPostConflict struct { + Payload *models.Error +} + +func (o *ServiceBrokerOpenstacksPostConflict) Error() string { + return fmt.Sprintf("[POST /broker/v1/openstacks][%d] serviceBrokerOpenstacksPostConflict %+v", 409, o.Payload) +} + +func (o *ServiceBrokerOpenstacksPostConflict) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceBrokerOpenstacksPostUnprocessableEntity creates a ServiceBrokerOpenstacksPostUnprocessableEntity with default headers values +func NewServiceBrokerOpenstacksPostUnprocessableEntity() *ServiceBrokerOpenstacksPostUnprocessableEntity { + return &ServiceBrokerOpenstacksPostUnprocessableEntity{} +} + +/*ServiceBrokerOpenstacksPostUnprocessableEntity handles this case with default header values. + +Unprocessable Entity +*/ +type ServiceBrokerOpenstacksPostUnprocessableEntity struct { + Payload *models.Error +} + +func (o *ServiceBrokerOpenstacksPostUnprocessableEntity) Error() string { + return fmt.Sprintf("[POST /broker/v1/openstacks][%d] serviceBrokerOpenstacksPostUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *ServiceBrokerOpenstacksPostUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceBrokerOpenstacksPostInternalServerError creates a ServiceBrokerOpenstacksPostInternalServerError with default headers values +func NewServiceBrokerOpenstacksPostInternalServerError() *ServiceBrokerOpenstacksPostInternalServerError { + return &ServiceBrokerOpenstacksPostInternalServerError{} +} + +/*ServiceBrokerOpenstacksPostInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type ServiceBrokerOpenstacksPostInternalServerError struct { + Payload *models.Error +} + +func (o *ServiceBrokerOpenstacksPostInternalServerError) Error() string { + return fmt.Sprintf("[POST /broker/v1/openstacks][%d] serviceBrokerOpenstacksPostInternalServerError %+v", 500, o.Payload) +} + +func (o *ServiceBrokerOpenstacksPostInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/open_stacks/service_broker_openstacks_servers_get_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/open_stacks/service_broker_openstacks_servers_get_parameters.go new file mode 100644 index 00000000000..9143ff59ab2 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/open_stacks/service_broker_openstacks_servers_get_parameters.go @@ -0,0 +1,157 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package open_stacks + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewServiceBrokerOpenstacksServersGetParams creates a new ServiceBrokerOpenstacksServersGetParams object +// with the default values initialized. +func NewServiceBrokerOpenstacksServersGetParams() *ServiceBrokerOpenstacksServersGetParams { + var () + return &ServiceBrokerOpenstacksServersGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewServiceBrokerOpenstacksServersGetParamsWithTimeout creates a new ServiceBrokerOpenstacksServersGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewServiceBrokerOpenstacksServersGetParamsWithTimeout(timeout time.Duration) *ServiceBrokerOpenstacksServersGetParams { + var () + return &ServiceBrokerOpenstacksServersGetParams{ + + timeout: timeout, + } +} + +// NewServiceBrokerOpenstacksServersGetParamsWithContext creates a new ServiceBrokerOpenstacksServersGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewServiceBrokerOpenstacksServersGetParamsWithContext(ctx context.Context) *ServiceBrokerOpenstacksServersGetParams { + var () + return &ServiceBrokerOpenstacksServersGetParams{ + + Context: ctx, + } +} + +// NewServiceBrokerOpenstacksServersGetParamsWithHTTPClient creates a new ServiceBrokerOpenstacksServersGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewServiceBrokerOpenstacksServersGetParamsWithHTTPClient(client *http.Client) *ServiceBrokerOpenstacksServersGetParams { + var () + return &ServiceBrokerOpenstacksServersGetParams{ + HTTPClient: client, + } +} + +/*ServiceBrokerOpenstacksServersGetParams contains all the parameters to send to the API endpoint +for the service broker openstacks servers get operation typically these are written to a http.Request +*/ +type ServiceBrokerOpenstacksServersGetParams struct { + + /*OpenstackID + Openstack ID + + */ + OpenstackID string + /*PvmInstanceID + PCloud PVM Instance ID + + */ + PvmInstanceID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the service broker openstacks servers get params +func (o *ServiceBrokerOpenstacksServersGetParams) WithTimeout(timeout time.Duration) *ServiceBrokerOpenstacksServersGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the service broker openstacks servers get params +func (o *ServiceBrokerOpenstacksServersGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the service broker openstacks servers get params +func (o *ServiceBrokerOpenstacksServersGetParams) WithContext(ctx context.Context) *ServiceBrokerOpenstacksServersGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the service broker openstacks servers get params +func (o *ServiceBrokerOpenstacksServersGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the service broker openstacks servers get params +func (o *ServiceBrokerOpenstacksServersGetParams) WithHTTPClient(client *http.Client) *ServiceBrokerOpenstacksServersGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the service broker openstacks servers get params +func (o *ServiceBrokerOpenstacksServersGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithOpenstackID adds the openstackID to the service broker openstacks servers get params +func (o *ServiceBrokerOpenstacksServersGetParams) WithOpenstackID(openstackID string) *ServiceBrokerOpenstacksServersGetParams { + o.SetOpenstackID(openstackID) + return o +} + +// SetOpenstackID adds the openstackId to the service broker openstacks servers get params +func (o *ServiceBrokerOpenstacksServersGetParams) SetOpenstackID(openstackID string) { + o.OpenstackID = openstackID +} + +// WithPvmInstanceID adds the pvmInstanceID to the service broker openstacks servers get params +func (o *ServiceBrokerOpenstacksServersGetParams) WithPvmInstanceID(pvmInstanceID string) *ServiceBrokerOpenstacksServersGetParams { + o.SetPvmInstanceID(pvmInstanceID) + return o +} + +// SetPvmInstanceID adds the pvmInstanceId to the service broker openstacks servers get params +func (o *ServiceBrokerOpenstacksServersGetParams) SetPvmInstanceID(pvmInstanceID string) { + o.PvmInstanceID = pvmInstanceID +} + +// WriteToRequest writes these params to a swagger request +func (o *ServiceBrokerOpenstacksServersGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param openstack_id + if err := r.SetPathParam("openstack_id", o.OpenstackID); err != nil { + return err + } + + // path param pvm_instance_id + if err := r.SetPathParam("pvm_instance_id", o.PvmInstanceID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/open_stacks/service_broker_openstacks_servers_get_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/open_stacks/service_broker_openstacks_servers_get_responses.go new file mode 100644 index 00000000000..901260334f2 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/open_stacks/service_broker_openstacks_servers_get_responses.go @@ -0,0 +1,175 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package open_stacks + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// ServiceBrokerOpenstacksServersGetReader is a Reader for the ServiceBrokerOpenstacksServersGet structure. +type ServiceBrokerOpenstacksServersGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ServiceBrokerOpenstacksServersGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewServiceBrokerOpenstacksServersGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewServiceBrokerOpenstacksServersGetBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 404: + result := NewServiceBrokerOpenstacksServersGetNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewServiceBrokerOpenstacksServersGetInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewServiceBrokerOpenstacksServersGetOK creates a ServiceBrokerOpenstacksServersGetOK with default headers values +func NewServiceBrokerOpenstacksServersGetOK() *ServiceBrokerOpenstacksServersGetOK { + return &ServiceBrokerOpenstacksServersGetOK{} +} + +/*ServiceBrokerOpenstacksServersGetOK handles this case with default header values. + +OK +*/ +type ServiceBrokerOpenstacksServersGetOK struct { + Payload *models.HostPVMInstance +} + +func (o *ServiceBrokerOpenstacksServersGetOK) Error() string { + return fmt.Sprintf("[GET /broker/v1/openstacks/{openstack_id}/servers/{pvm_instance_id}][%d] serviceBrokerOpenstacksServersGetOK %+v", 200, o.Payload) +} + +func (o *ServiceBrokerOpenstacksServersGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.HostPVMInstance) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceBrokerOpenstacksServersGetBadRequest creates a ServiceBrokerOpenstacksServersGetBadRequest with default headers values +func NewServiceBrokerOpenstacksServersGetBadRequest() *ServiceBrokerOpenstacksServersGetBadRequest { + return &ServiceBrokerOpenstacksServersGetBadRequest{} +} + +/*ServiceBrokerOpenstacksServersGetBadRequest handles this case with default header values. + +Bad Request +*/ +type ServiceBrokerOpenstacksServersGetBadRequest struct { + Payload *models.Error +} + +func (o *ServiceBrokerOpenstacksServersGetBadRequest) Error() string { + return fmt.Sprintf("[GET /broker/v1/openstacks/{openstack_id}/servers/{pvm_instance_id}][%d] serviceBrokerOpenstacksServersGetBadRequest %+v", 400, o.Payload) +} + +func (o *ServiceBrokerOpenstacksServersGetBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceBrokerOpenstacksServersGetNotFound creates a ServiceBrokerOpenstacksServersGetNotFound with default headers values +func NewServiceBrokerOpenstacksServersGetNotFound() *ServiceBrokerOpenstacksServersGetNotFound { + return &ServiceBrokerOpenstacksServersGetNotFound{} +} + +/*ServiceBrokerOpenstacksServersGetNotFound handles this case with default header values. + +Not Found +*/ +type ServiceBrokerOpenstacksServersGetNotFound struct { + Payload *models.Error +} + +func (o *ServiceBrokerOpenstacksServersGetNotFound) Error() string { + return fmt.Sprintf("[GET /broker/v1/openstacks/{openstack_id}/servers/{pvm_instance_id}][%d] serviceBrokerOpenstacksServersGetNotFound %+v", 404, o.Payload) +} + +func (o *ServiceBrokerOpenstacksServersGetNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceBrokerOpenstacksServersGetInternalServerError creates a ServiceBrokerOpenstacksServersGetInternalServerError with default headers values +func NewServiceBrokerOpenstacksServersGetInternalServerError() *ServiceBrokerOpenstacksServersGetInternalServerError { + return &ServiceBrokerOpenstacksServersGetInternalServerError{} +} + +/*ServiceBrokerOpenstacksServersGetInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type ServiceBrokerOpenstacksServersGetInternalServerError struct { + Payload *models.Error +} + +func (o *ServiceBrokerOpenstacksServersGetInternalServerError) Error() string { + return fmt.Sprintf("[GET /broker/v1/openstacks/{openstack_id}/servers/{pvm_instance_id}][%d] serviceBrokerOpenstacksServersGetInternalServerError %+v", 500, o.Payload) +} + +func (o *ServiceBrokerOpenstacksServersGetInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_cloud_connections/p_cloud_cloud_connections_client.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_cloud_connections/p_cloud_cloud_connections_client.go new file mode 100644 index 00000000000..dbb88b41b57 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_cloud_connections/p_cloud_cloud_connections_client.go @@ -0,0 +1,311 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_cloud_connections + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" +) + +// New creates a new p cloud cloud connections API client. +func New(transport runtime.ClientTransport, formats strfmt.Registry) *Client { + return &Client{transport: transport, formats: formats} +} + +/* +Client for p cloud cloud connections API +*/ +type Client struct { + transport runtime.ClientTransport + formats strfmt.Registry +} + +/* +PcloudCloudconnectionsDelete deletes a cloud connection +*/ +func (a *Client) PcloudCloudconnectionsDelete(params *PcloudCloudconnectionsDeleteParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudCloudconnectionsDeleteOK, *PcloudCloudconnectionsDeleteAccepted, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudCloudconnectionsDeleteParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.cloudconnections.delete", + Method: "DELETE", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/cloud-connections/{cloud_connection_id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudCloudconnectionsDeleteReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, nil, err + } + switch value := result.(type) { + case *PcloudCloudconnectionsDeleteOK: + return value, nil, nil + case *PcloudCloudconnectionsDeleteAccepted: + return nil, value, nil + } + return nil, nil, nil + +} + +/* +PcloudCloudconnectionsGet gets a cloud connection s state information +*/ +func (a *Client) PcloudCloudconnectionsGet(params *PcloudCloudconnectionsGetParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudCloudconnectionsGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudCloudconnectionsGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.cloudconnections.get", + Method: "GET", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/cloud-connections/{cloud_connection_id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudCloudconnectionsGetReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudCloudconnectionsGetOK), nil + +} + +/* +PcloudCloudconnectionsGetall gets all cloud connections in this cloud instance +*/ +func (a *Client) PcloudCloudconnectionsGetall(params *PcloudCloudconnectionsGetallParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudCloudconnectionsGetallOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudCloudconnectionsGetallParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.cloudconnections.getall", + Method: "GET", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/cloud-connections", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudCloudconnectionsGetallReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudCloudconnectionsGetallOK), nil + +} + +/* +PcloudCloudconnectionsNetworksDelete deletes a network from a cloud connection +*/ +func (a *Client) PcloudCloudconnectionsNetworksDelete(params *PcloudCloudconnectionsNetworksDeleteParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudCloudconnectionsNetworksDeleteOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudCloudconnectionsNetworksDeleteParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.cloudconnections.networks.delete", + Method: "DELETE", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/cloud-connections/{cloud_connection_id}/networks/{network_id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudCloudconnectionsNetworksDeleteReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudCloudconnectionsNetworksDeleteOK), nil + +} + +/* +PcloudCloudconnectionsNetworksGet gets information about a cloud connections attached network +*/ +func (a *Client) PcloudCloudconnectionsNetworksGet(params *PcloudCloudconnectionsNetworksGetParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudCloudconnectionsNetworksGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudCloudconnectionsNetworksGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.cloudconnections.networks.get", + Method: "GET", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/cloud-connections/{cloud_connection_id}/networks/{network_id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudCloudconnectionsNetworksGetReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudCloudconnectionsNetworksGetOK), nil + +} + +/* +PcloudCloudconnectionsNetworksPut adds a network to the cloud connection +*/ +func (a *Client) PcloudCloudconnectionsNetworksPut(params *PcloudCloudconnectionsNetworksPutParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudCloudconnectionsNetworksPutOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudCloudconnectionsNetworksPutParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.cloudconnections.networks.put", + Method: "PUT", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/cloud-connections/{cloud_connection_id}/networks/{network_id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudCloudconnectionsNetworksPutReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudCloudconnectionsNetworksPutOK), nil + +} + +/* +PcloudCloudconnectionsPost creates a new cloud connection +*/ +func (a *Client) PcloudCloudconnectionsPost(params *PcloudCloudconnectionsPostParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudCloudconnectionsPostOK, *PcloudCloudconnectionsPostCreated, *PcloudCloudconnectionsPostAccepted, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudCloudconnectionsPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.cloudconnections.post", + Method: "POST", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/cloud-connections", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudCloudconnectionsPostReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, nil, nil, err + } + switch value := result.(type) { + case *PcloudCloudconnectionsPostOK: + return value, nil, nil, nil + case *PcloudCloudconnectionsPostCreated: + return nil, value, nil, nil + case *PcloudCloudconnectionsPostAccepted: + return nil, nil, value, nil + } + return nil, nil, nil, nil + +} + +/* +PcloudCloudconnectionsPut updates a cloud connection +*/ +func (a *Client) PcloudCloudconnectionsPut(params *PcloudCloudconnectionsPutParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudCloudconnectionsPutOK, *PcloudCloudconnectionsPutAccepted, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudCloudconnectionsPutParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.cloudconnections.put", + Method: "PUT", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/cloud-connections/{cloud_connection_id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudCloudconnectionsPutReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, nil, err + } + switch value := result.(type) { + case *PcloudCloudconnectionsPutOK: + return value, nil, nil + case *PcloudCloudconnectionsPutAccepted: + return nil, value, nil + } + return nil, nil, nil + +} + +/* +PcloudCloudconnectionsVirtualprivatecloudsGetall gets all cloud connections in this cloud instance +*/ +func (a *Client) PcloudCloudconnectionsVirtualprivatecloudsGetall(params *PcloudCloudconnectionsVirtualprivatecloudsGetallParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudCloudconnectionsVirtualprivatecloudsGetallOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudCloudconnectionsVirtualprivatecloudsGetallParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.cloudconnections.virtualprivateclouds.getall", + Method: "GET", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/cloud-connections-virtual-private-clouds", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudCloudconnectionsVirtualprivatecloudsGetallReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudCloudconnectionsVirtualprivatecloudsGetallOK), nil + +} + +// SetTransport changes the transport on the client +func (a *Client) SetTransport(transport runtime.ClientTransport) { + a.transport = transport +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_cloud_connections/pcloud_cloudconnections_delete_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_cloud_connections/pcloud_cloudconnections_delete_parameters.go new file mode 100644 index 00000000000..8e2157b5e87 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_cloud_connections/pcloud_cloudconnections_delete_parameters.go @@ -0,0 +1,157 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_cloud_connections + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewPcloudCloudconnectionsDeleteParams creates a new PcloudCloudconnectionsDeleteParams object +// with the default values initialized. +func NewPcloudCloudconnectionsDeleteParams() *PcloudCloudconnectionsDeleteParams { + var () + return &PcloudCloudconnectionsDeleteParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudCloudconnectionsDeleteParamsWithTimeout creates a new PcloudCloudconnectionsDeleteParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudCloudconnectionsDeleteParamsWithTimeout(timeout time.Duration) *PcloudCloudconnectionsDeleteParams { + var () + return &PcloudCloudconnectionsDeleteParams{ + + timeout: timeout, + } +} + +// NewPcloudCloudconnectionsDeleteParamsWithContext creates a new PcloudCloudconnectionsDeleteParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudCloudconnectionsDeleteParamsWithContext(ctx context.Context) *PcloudCloudconnectionsDeleteParams { + var () + return &PcloudCloudconnectionsDeleteParams{ + + Context: ctx, + } +} + +// NewPcloudCloudconnectionsDeleteParamsWithHTTPClient creates a new PcloudCloudconnectionsDeleteParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudCloudconnectionsDeleteParamsWithHTTPClient(client *http.Client) *PcloudCloudconnectionsDeleteParams { + var () + return &PcloudCloudconnectionsDeleteParams{ + HTTPClient: client, + } +} + +/*PcloudCloudconnectionsDeleteParams contains all the parameters to send to the API endpoint +for the pcloud cloudconnections delete operation typically these are written to a http.Request +*/ +type PcloudCloudconnectionsDeleteParams struct { + + /*CloudConnectionID + Cloud Connection ID + + */ + CloudConnectionID string + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud cloudconnections delete params +func (o *PcloudCloudconnectionsDeleteParams) WithTimeout(timeout time.Duration) *PcloudCloudconnectionsDeleteParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud cloudconnections delete params +func (o *PcloudCloudconnectionsDeleteParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud cloudconnections delete params +func (o *PcloudCloudconnectionsDeleteParams) WithContext(ctx context.Context) *PcloudCloudconnectionsDeleteParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud cloudconnections delete params +func (o *PcloudCloudconnectionsDeleteParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud cloudconnections delete params +func (o *PcloudCloudconnectionsDeleteParams) WithHTTPClient(client *http.Client) *PcloudCloudconnectionsDeleteParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud cloudconnections delete params +func (o *PcloudCloudconnectionsDeleteParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCloudConnectionID adds the cloudConnectionID to the pcloud cloudconnections delete params +func (o *PcloudCloudconnectionsDeleteParams) WithCloudConnectionID(cloudConnectionID string) *PcloudCloudconnectionsDeleteParams { + o.SetCloudConnectionID(cloudConnectionID) + return o +} + +// SetCloudConnectionID adds the cloudConnectionId to the pcloud cloudconnections delete params +func (o *PcloudCloudconnectionsDeleteParams) SetCloudConnectionID(cloudConnectionID string) { + o.CloudConnectionID = cloudConnectionID +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud cloudconnections delete params +func (o *PcloudCloudconnectionsDeleteParams) WithCloudInstanceID(cloudInstanceID string) *PcloudCloudconnectionsDeleteParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud cloudconnections delete params +func (o *PcloudCloudconnectionsDeleteParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudCloudconnectionsDeleteParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param cloud_connection_id + if err := r.SetPathParam("cloud_connection_id", o.CloudConnectionID); err != nil { + return err + } + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_cloud_connections/pcloud_cloudconnections_delete_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_cloud_connections/pcloud_cloudconnections_delete_responses.go new file mode 100644 index 00000000000..392d4aa5a2a --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_cloud_connections/pcloud_cloudconnections_delete_responses.go @@ -0,0 +1,207 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_cloud_connections + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudCloudconnectionsDeleteReader is a Reader for the PcloudCloudconnectionsDelete structure. +type PcloudCloudconnectionsDeleteReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudCloudconnectionsDeleteReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudCloudconnectionsDeleteOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 202: + result := NewPcloudCloudconnectionsDeleteAccepted() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudCloudconnectionsDeleteBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 410: + result := NewPcloudCloudconnectionsDeleteGone() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudCloudconnectionsDeleteInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudCloudconnectionsDeleteOK creates a PcloudCloudconnectionsDeleteOK with default headers values +func NewPcloudCloudconnectionsDeleteOK() *PcloudCloudconnectionsDeleteOK { + return &PcloudCloudconnectionsDeleteOK{} +} + +/*PcloudCloudconnectionsDeleteOK handles this case with default header values. + +OK +*/ +type PcloudCloudconnectionsDeleteOK struct { + Payload models.Object +} + +func (o *PcloudCloudconnectionsDeleteOK) Error() string { + return fmt.Sprintf("[DELETE /pcloud/v1/cloud-instances/{cloud_instance_id}/cloud-connections/{cloud_connection_id}][%d] pcloudCloudconnectionsDeleteOK %+v", 200, o.Payload) +} + +func (o *PcloudCloudconnectionsDeleteOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudconnectionsDeleteAccepted creates a PcloudCloudconnectionsDeleteAccepted with default headers values +func NewPcloudCloudconnectionsDeleteAccepted() *PcloudCloudconnectionsDeleteAccepted { + return &PcloudCloudconnectionsDeleteAccepted{} +} + +/*PcloudCloudconnectionsDeleteAccepted handles this case with default header values. + +Accepted +*/ +type PcloudCloudconnectionsDeleteAccepted struct { + Payload models.Object +} + +func (o *PcloudCloudconnectionsDeleteAccepted) Error() string { + return fmt.Sprintf("[DELETE /pcloud/v1/cloud-instances/{cloud_instance_id}/cloud-connections/{cloud_connection_id}][%d] pcloudCloudconnectionsDeleteAccepted %+v", 202, o.Payload) +} + +func (o *PcloudCloudconnectionsDeleteAccepted) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudconnectionsDeleteBadRequest creates a PcloudCloudconnectionsDeleteBadRequest with default headers values +func NewPcloudCloudconnectionsDeleteBadRequest() *PcloudCloudconnectionsDeleteBadRequest { + return &PcloudCloudconnectionsDeleteBadRequest{} +} + +/*PcloudCloudconnectionsDeleteBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudCloudconnectionsDeleteBadRequest struct { + Payload *models.Error +} + +func (o *PcloudCloudconnectionsDeleteBadRequest) Error() string { + return fmt.Sprintf("[DELETE /pcloud/v1/cloud-instances/{cloud_instance_id}/cloud-connections/{cloud_connection_id}][%d] pcloudCloudconnectionsDeleteBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudCloudconnectionsDeleteBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudconnectionsDeleteGone creates a PcloudCloudconnectionsDeleteGone with default headers values +func NewPcloudCloudconnectionsDeleteGone() *PcloudCloudconnectionsDeleteGone { + return &PcloudCloudconnectionsDeleteGone{} +} + +/*PcloudCloudconnectionsDeleteGone handles this case with default header values. + +Gone +*/ +type PcloudCloudconnectionsDeleteGone struct { + Payload *models.Error +} + +func (o *PcloudCloudconnectionsDeleteGone) Error() string { + return fmt.Sprintf("[DELETE /pcloud/v1/cloud-instances/{cloud_instance_id}/cloud-connections/{cloud_connection_id}][%d] pcloudCloudconnectionsDeleteGone %+v", 410, o.Payload) +} + +func (o *PcloudCloudconnectionsDeleteGone) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudconnectionsDeleteInternalServerError creates a PcloudCloudconnectionsDeleteInternalServerError with default headers values +func NewPcloudCloudconnectionsDeleteInternalServerError() *PcloudCloudconnectionsDeleteInternalServerError { + return &PcloudCloudconnectionsDeleteInternalServerError{} +} + +/*PcloudCloudconnectionsDeleteInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudCloudconnectionsDeleteInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudCloudconnectionsDeleteInternalServerError) Error() string { + return fmt.Sprintf("[DELETE /pcloud/v1/cloud-instances/{cloud_instance_id}/cloud-connections/{cloud_connection_id}][%d] pcloudCloudconnectionsDeleteInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudCloudconnectionsDeleteInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_cloud_connections/pcloud_cloudconnections_get_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_cloud_connections/pcloud_cloudconnections_get_parameters.go new file mode 100644 index 00000000000..8a8f4d40f98 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_cloud_connections/pcloud_cloudconnections_get_parameters.go @@ -0,0 +1,157 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_cloud_connections + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewPcloudCloudconnectionsGetParams creates a new PcloudCloudconnectionsGetParams object +// with the default values initialized. +func NewPcloudCloudconnectionsGetParams() *PcloudCloudconnectionsGetParams { + var () + return &PcloudCloudconnectionsGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudCloudconnectionsGetParamsWithTimeout creates a new PcloudCloudconnectionsGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudCloudconnectionsGetParamsWithTimeout(timeout time.Duration) *PcloudCloudconnectionsGetParams { + var () + return &PcloudCloudconnectionsGetParams{ + + timeout: timeout, + } +} + +// NewPcloudCloudconnectionsGetParamsWithContext creates a new PcloudCloudconnectionsGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudCloudconnectionsGetParamsWithContext(ctx context.Context) *PcloudCloudconnectionsGetParams { + var () + return &PcloudCloudconnectionsGetParams{ + + Context: ctx, + } +} + +// NewPcloudCloudconnectionsGetParamsWithHTTPClient creates a new PcloudCloudconnectionsGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudCloudconnectionsGetParamsWithHTTPClient(client *http.Client) *PcloudCloudconnectionsGetParams { + var () + return &PcloudCloudconnectionsGetParams{ + HTTPClient: client, + } +} + +/*PcloudCloudconnectionsGetParams contains all the parameters to send to the API endpoint +for the pcloud cloudconnections get operation typically these are written to a http.Request +*/ +type PcloudCloudconnectionsGetParams struct { + + /*CloudConnectionID + Cloud Connection ID + + */ + CloudConnectionID string + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud cloudconnections get params +func (o *PcloudCloudconnectionsGetParams) WithTimeout(timeout time.Duration) *PcloudCloudconnectionsGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud cloudconnections get params +func (o *PcloudCloudconnectionsGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud cloudconnections get params +func (o *PcloudCloudconnectionsGetParams) WithContext(ctx context.Context) *PcloudCloudconnectionsGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud cloudconnections get params +func (o *PcloudCloudconnectionsGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud cloudconnections get params +func (o *PcloudCloudconnectionsGetParams) WithHTTPClient(client *http.Client) *PcloudCloudconnectionsGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud cloudconnections get params +func (o *PcloudCloudconnectionsGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCloudConnectionID adds the cloudConnectionID to the pcloud cloudconnections get params +func (o *PcloudCloudconnectionsGetParams) WithCloudConnectionID(cloudConnectionID string) *PcloudCloudconnectionsGetParams { + o.SetCloudConnectionID(cloudConnectionID) + return o +} + +// SetCloudConnectionID adds the cloudConnectionId to the pcloud cloudconnections get params +func (o *PcloudCloudconnectionsGetParams) SetCloudConnectionID(cloudConnectionID string) { + o.CloudConnectionID = cloudConnectionID +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud cloudconnections get params +func (o *PcloudCloudconnectionsGetParams) WithCloudInstanceID(cloudInstanceID string) *PcloudCloudconnectionsGetParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud cloudconnections get params +func (o *PcloudCloudconnectionsGetParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudCloudconnectionsGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param cloud_connection_id + if err := r.SetPathParam("cloud_connection_id", o.CloudConnectionID); err != nil { + return err + } + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_cloud_connections/pcloud_cloudconnections_get_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_cloud_connections/pcloud_cloudconnections_get_responses.go new file mode 100644 index 00000000000..df1a91e221e --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_cloud_connections/pcloud_cloudconnections_get_responses.go @@ -0,0 +1,175 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_cloud_connections + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudCloudconnectionsGetReader is a Reader for the PcloudCloudconnectionsGet structure. +type PcloudCloudconnectionsGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudCloudconnectionsGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudCloudconnectionsGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudCloudconnectionsGetBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 404: + result := NewPcloudCloudconnectionsGetNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudCloudconnectionsGetInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudCloudconnectionsGetOK creates a PcloudCloudconnectionsGetOK with default headers values +func NewPcloudCloudconnectionsGetOK() *PcloudCloudconnectionsGetOK { + return &PcloudCloudconnectionsGetOK{} +} + +/*PcloudCloudconnectionsGetOK handles this case with default header values. + +OK +*/ +type PcloudCloudconnectionsGetOK struct { + Payload *models.CloudConnection +} + +func (o *PcloudCloudconnectionsGetOK) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/cloud-connections/{cloud_connection_id}][%d] pcloudCloudconnectionsGetOK %+v", 200, o.Payload) +} + +func (o *PcloudCloudconnectionsGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.CloudConnection) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudconnectionsGetBadRequest creates a PcloudCloudconnectionsGetBadRequest with default headers values +func NewPcloudCloudconnectionsGetBadRequest() *PcloudCloudconnectionsGetBadRequest { + return &PcloudCloudconnectionsGetBadRequest{} +} + +/*PcloudCloudconnectionsGetBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudCloudconnectionsGetBadRequest struct { + Payload *models.Error +} + +func (o *PcloudCloudconnectionsGetBadRequest) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/cloud-connections/{cloud_connection_id}][%d] pcloudCloudconnectionsGetBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudCloudconnectionsGetBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudconnectionsGetNotFound creates a PcloudCloudconnectionsGetNotFound with default headers values +func NewPcloudCloudconnectionsGetNotFound() *PcloudCloudconnectionsGetNotFound { + return &PcloudCloudconnectionsGetNotFound{} +} + +/*PcloudCloudconnectionsGetNotFound handles this case with default header values. + +Not Found +*/ +type PcloudCloudconnectionsGetNotFound struct { + Payload *models.Error +} + +func (o *PcloudCloudconnectionsGetNotFound) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/cloud-connections/{cloud_connection_id}][%d] pcloudCloudconnectionsGetNotFound %+v", 404, o.Payload) +} + +func (o *PcloudCloudconnectionsGetNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudconnectionsGetInternalServerError creates a PcloudCloudconnectionsGetInternalServerError with default headers values +func NewPcloudCloudconnectionsGetInternalServerError() *PcloudCloudconnectionsGetInternalServerError { + return &PcloudCloudconnectionsGetInternalServerError{} +} + +/*PcloudCloudconnectionsGetInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudCloudconnectionsGetInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudCloudconnectionsGetInternalServerError) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/cloud-connections/{cloud_connection_id}][%d] pcloudCloudconnectionsGetInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudCloudconnectionsGetInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_cloud_connections/pcloud_cloudconnections_getall_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_cloud_connections/pcloud_cloudconnections_getall_parameters.go new file mode 100644 index 00000000000..69487e593e9 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_cloud_connections/pcloud_cloudconnections_getall_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_cloud_connections + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewPcloudCloudconnectionsGetallParams creates a new PcloudCloudconnectionsGetallParams object +// with the default values initialized. +func NewPcloudCloudconnectionsGetallParams() *PcloudCloudconnectionsGetallParams { + var () + return &PcloudCloudconnectionsGetallParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudCloudconnectionsGetallParamsWithTimeout creates a new PcloudCloudconnectionsGetallParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudCloudconnectionsGetallParamsWithTimeout(timeout time.Duration) *PcloudCloudconnectionsGetallParams { + var () + return &PcloudCloudconnectionsGetallParams{ + + timeout: timeout, + } +} + +// NewPcloudCloudconnectionsGetallParamsWithContext creates a new PcloudCloudconnectionsGetallParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudCloudconnectionsGetallParamsWithContext(ctx context.Context) *PcloudCloudconnectionsGetallParams { + var () + return &PcloudCloudconnectionsGetallParams{ + + Context: ctx, + } +} + +// NewPcloudCloudconnectionsGetallParamsWithHTTPClient creates a new PcloudCloudconnectionsGetallParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudCloudconnectionsGetallParamsWithHTTPClient(client *http.Client) *PcloudCloudconnectionsGetallParams { + var () + return &PcloudCloudconnectionsGetallParams{ + HTTPClient: client, + } +} + +/*PcloudCloudconnectionsGetallParams contains all the parameters to send to the API endpoint +for the pcloud cloudconnections getall operation typically these are written to a http.Request +*/ +type PcloudCloudconnectionsGetallParams struct { + + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud cloudconnections getall params +func (o *PcloudCloudconnectionsGetallParams) WithTimeout(timeout time.Duration) *PcloudCloudconnectionsGetallParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud cloudconnections getall params +func (o *PcloudCloudconnectionsGetallParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud cloudconnections getall params +func (o *PcloudCloudconnectionsGetallParams) WithContext(ctx context.Context) *PcloudCloudconnectionsGetallParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud cloudconnections getall params +func (o *PcloudCloudconnectionsGetallParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud cloudconnections getall params +func (o *PcloudCloudconnectionsGetallParams) WithHTTPClient(client *http.Client) *PcloudCloudconnectionsGetallParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud cloudconnections getall params +func (o *PcloudCloudconnectionsGetallParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud cloudconnections getall params +func (o *PcloudCloudconnectionsGetallParams) WithCloudInstanceID(cloudInstanceID string) *PcloudCloudconnectionsGetallParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud cloudconnections getall params +func (o *PcloudCloudconnectionsGetallParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudCloudconnectionsGetallParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_cloud_connections/pcloud_cloudconnections_getall_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_cloud_connections/pcloud_cloudconnections_getall_responses.go new file mode 100644 index 00000000000..bf3befbd472 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_cloud_connections/pcloud_cloudconnections_getall_responses.go @@ -0,0 +1,139 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_cloud_connections + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudCloudconnectionsGetallReader is a Reader for the PcloudCloudconnectionsGetall structure. +type PcloudCloudconnectionsGetallReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudCloudconnectionsGetallReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudCloudconnectionsGetallOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudCloudconnectionsGetallBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudCloudconnectionsGetallInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudCloudconnectionsGetallOK creates a PcloudCloudconnectionsGetallOK with default headers values +func NewPcloudCloudconnectionsGetallOK() *PcloudCloudconnectionsGetallOK { + return &PcloudCloudconnectionsGetallOK{} +} + +/*PcloudCloudconnectionsGetallOK handles this case with default header values. + +OK +*/ +type PcloudCloudconnectionsGetallOK struct { + Payload *models.CloudConnections +} + +func (o *PcloudCloudconnectionsGetallOK) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/cloud-connections][%d] pcloudCloudconnectionsGetallOK %+v", 200, o.Payload) +} + +func (o *PcloudCloudconnectionsGetallOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.CloudConnections) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudconnectionsGetallBadRequest creates a PcloudCloudconnectionsGetallBadRequest with default headers values +func NewPcloudCloudconnectionsGetallBadRequest() *PcloudCloudconnectionsGetallBadRequest { + return &PcloudCloudconnectionsGetallBadRequest{} +} + +/*PcloudCloudconnectionsGetallBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudCloudconnectionsGetallBadRequest struct { + Payload *models.Error +} + +func (o *PcloudCloudconnectionsGetallBadRequest) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/cloud-connections][%d] pcloudCloudconnectionsGetallBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudCloudconnectionsGetallBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudconnectionsGetallInternalServerError creates a PcloudCloudconnectionsGetallInternalServerError with default headers values +func NewPcloudCloudconnectionsGetallInternalServerError() *PcloudCloudconnectionsGetallInternalServerError { + return &PcloudCloudconnectionsGetallInternalServerError{} +} + +/*PcloudCloudconnectionsGetallInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudCloudconnectionsGetallInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudCloudconnectionsGetallInternalServerError) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/cloud-connections][%d] pcloudCloudconnectionsGetallInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudCloudconnectionsGetallInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_cloud_connections/pcloud_cloudconnections_networks_delete_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_cloud_connections/pcloud_cloudconnections_networks_delete_parameters.go new file mode 100644 index 00000000000..e28101d3dd4 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_cloud_connections/pcloud_cloudconnections_networks_delete_parameters.go @@ -0,0 +1,178 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_cloud_connections + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewPcloudCloudconnectionsNetworksDeleteParams creates a new PcloudCloudconnectionsNetworksDeleteParams object +// with the default values initialized. +func NewPcloudCloudconnectionsNetworksDeleteParams() *PcloudCloudconnectionsNetworksDeleteParams { + var () + return &PcloudCloudconnectionsNetworksDeleteParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudCloudconnectionsNetworksDeleteParamsWithTimeout creates a new PcloudCloudconnectionsNetworksDeleteParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudCloudconnectionsNetworksDeleteParamsWithTimeout(timeout time.Duration) *PcloudCloudconnectionsNetworksDeleteParams { + var () + return &PcloudCloudconnectionsNetworksDeleteParams{ + + timeout: timeout, + } +} + +// NewPcloudCloudconnectionsNetworksDeleteParamsWithContext creates a new PcloudCloudconnectionsNetworksDeleteParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudCloudconnectionsNetworksDeleteParamsWithContext(ctx context.Context) *PcloudCloudconnectionsNetworksDeleteParams { + var () + return &PcloudCloudconnectionsNetworksDeleteParams{ + + Context: ctx, + } +} + +// NewPcloudCloudconnectionsNetworksDeleteParamsWithHTTPClient creates a new PcloudCloudconnectionsNetworksDeleteParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudCloudconnectionsNetworksDeleteParamsWithHTTPClient(client *http.Client) *PcloudCloudconnectionsNetworksDeleteParams { + var () + return &PcloudCloudconnectionsNetworksDeleteParams{ + HTTPClient: client, + } +} + +/*PcloudCloudconnectionsNetworksDeleteParams contains all the parameters to send to the API endpoint +for the pcloud cloudconnections networks delete operation typically these are written to a http.Request +*/ +type PcloudCloudconnectionsNetworksDeleteParams struct { + + /*CloudConnectionID + Cloud Connection ID + + */ + CloudConnectionID string + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + /*NetworkID + Network ID + + */ + NetworkID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud cloudconnections networks delete params +func (o *PcloudCloudconnectionsNetworksDeleteParams) WithTimeout(timeout time.Duration) *PcloudCloudconnectionsNetworksDeleteParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud cloudconnections networks delete params +func (o *PcloudCloudconnectionsNetworksDeleteParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud cloudconnections networks delete params +func (o *PcloudCloudconnectionsNetworksDeleteParams) WithContext(ctx context.Context) *PcloudCloudconnectionsNetworksDeleteParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud cloudconnections networks delete params +func (o *PcloudCloudconnectionsNetworksDeleteParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud cloudconnections networks delete params +func (o *PcloudCloudconnectionsNetworksDeleteParams) WithHTTPClient(client *http.Client) *PcloudCloudconnectionsNetworksDeleteParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud cloudconnections networks delete params +func (o *PcloudCloudconnectionsNetworksDeleteParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCloudConnectionID adds the cloudConnectionID to the pcloud cloudconnections networks delete params +func (o *PcloudCloudconnectionsNetworksDeleteParams) WithCloudConnectionID(cloudConnectionID string) *PcloudCloudconnectionsNetworksDeleteParams { + o.SetCloudConnectionID(cloudConnectionID) + return o +} + +// SetCloudConnectionID adds the cloudConnectionId to the pcloud cloudconnections networks delete params +func (o *PcloudCloudconnectionsNetworksDeleteParams) SetCloudConnectionID(cloudConnectionID string) { + o.CloudConnectionID = cloudConnectionID +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud cloudconnections networks delete params +func (o *PcloudCloudconnectionsNetworksDeleteParams) WithCloudInstanceID(cloudInstanceID string) *PcloudCloudconnectionsNetworksDeleteParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud cloudconnections networks delete params +func (o *PcloudCloudconnectionsNetworksDeleteParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WithNetworkID adds the networkID to the pcloud cloudconnections networks delete params +func (o *PcloudCloudconnectionsNetworksDeleteParams) WithNetworkID(networkID string) *PcloudCloudconnectionsNetworksDeleteParams { + o.SetNetworkID(networkID) + return o +} + +// SetNetworkID adds the networkId to the pcloud cloudconnections networks delete params +func (o *PcloudCloudconnectionsNetworksDeleteParams) SetNetworkID(networkID string) { + o.NetworkID = networkID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudCloudconnectionsNetworksDeleteParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param cloud_connection_id + if err := r.SetPathParam("cloud_connection_id", o.CloudConnectionID); err != nil { + return err + } + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + // path param network_id + if err := r.SetPathParam("network_id", o.NetworkID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_cloud_connections/pcloud_cloudconnections_networks_delete_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_cloud_connections/pcloud_cloudconnections_networks_delete_responses.go new file mode 100644 index 00000000000..88cef00aa28 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_cloud_connections/pcloud_cloudconnections_networks_delete_responses.go @@ -0,0 +1,175 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_cloud_connections + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudCloudconnectionsNetworksDeleteReader is a Reader for the PcloudCloudconnectionsNetworksDelete structure. +type PcloudCloudconnectionsNetworksDeleteReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudCloudconnectionsNetworksDeleteReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudCloudconnectionsNetworksDeleteOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudCloudconnectionsNetworksDeleteBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 410: + result := NewPcloudCloudconnectionsNetworksDeleteGone() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudCloudconnectionsNetworksDeleteInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudCloudconnectionsNetworksDeleteOK creates a PcloudCloudconnectionsNetworksDeleteOK with default headers values +func NewPcloudCloudconnectionsNetworksDeleteOK() *PcloudCloudconnectionsNetworksDeleteOK { + return &PcloudCloudconnectionsNetworksDeleteOK{} +} + +/*PcloudCloudconnectionsNetworksDeleteOK handles this case with default header values. + +OK +*/ +type PcloudCloudconnectionsNetworksDeleteOK struct { + Payload *models.CloudConnection +} + +func (o *PcloudCloudconnectionsNetworksDeleteOK) Error() string { + return fmt.Sprintf("[DELETE /pcloud/v1/cloud-instances/{cloud_instance_id}/cloud-connections/{cloud_connection_id}/networks/{network_id}][%d] pcloudCloudconnectionsNetworksDeleteOK %+v", 200, o.Payload) +} + +func (o *PcloudCloudconnectionsNetworksDeleteOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.CloudConnection) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudconnectionsNetworksDeleteBadRequest creates a PcloudCloudconnectionsNetworksDeleteBadRequest with default headers values +func NewPcloudCloudconnectionsNetworksDeleteBadRequest() *PcloudCloudconnectionsNetworksDeleteBadRequest { + return &PcloudCloudconnectionsNetworksDeleteBadRequest{} +} + +/*PcloudCloudconnectionsNetworksDeleteBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudCloudconnectionsNetworksDeleteBadRequest struct { + Payload *models.Error +} + +func (o *PcloudCloudconnectionsNetworksDeleteBadRequest) Error() string { + return fmt.Sprintf("[DELETE /pcloud/v1/cloud-instances/{cloud_instance_id}/cloud-connections/{cloud_connection_id}/networks/{network_id}][%d] pcloudCloudconnectionsNetworksDeleteBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudCloudconnectionsNetworksDeleteBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudconnectionsNetworksDeleteGone creates a PcloudCloudconnectionsNetworksDeleteGone with default headers values +func NewPcloudCloudconnectionsNetworksDeleteGone() *PcloudCloudconnectionsNetworksDeleteGone { + return &PcloudCloudconnectionsNetworksDeleteGone{} +} + +/*PcloudCloudconnectionsNetworksDeleteGone handles this case with default header values. + +Gone +*/ +type PcloudCloudconnectionsNetworksDeleteGone struct { + Payload *models.Error +} + +func (o *PcloudCloudconnectionsNetworksDeleteGone) Error() string { + return fmt.Sprintf("[DELETE /pcloud/v1/cloud-instances/{cloud_instance_id}/cloud-connections/{cloud_connection_id}/networks/{network_id}][%d] pcloudCloudconnectionsNetworksDeleteGone %+v", 410, o.Payload) +} + +func (o *PcloudCloudconnectionsNetworksDeleteGone) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudconnectionsNetworksDeleteInternalServerError creates a PcloudCloudconnectionsNetworksDeleteInternalServerError with default headers values +func NewPcloudCloudconnectionsNetworksDeleteInternalServerError() *PcloudCloudconnectionsNetworksDeleteInternalServerError { + return &PcloudCloudconnectionsNetworksDeleteInternalServerError{} +} + +/*PcloudCloudconnectionsNetworksDeleteInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudCloudconnectionsNetworksDeleteInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudCloudconnectionsNetworksDeleteInternalServerError) Error() string { + return fmt.Sprintf("[DELETE /pcloud/v1/cloud-instances/{cloud_instance_id}/cloud-connections/{cloud_connection_id}/networks/{network_id}][%d] pcloudCloudconnectionsNetworksDeleteInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudCloudconnectionsNetworksDeleteInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_cloud_connections/pcloud_cloudconnections_networks_get_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_cloud_connections/pcloud_cloudconnections_networks_get_parameters.go new file mode 100644 index 00000000000..bf4756c494b --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_cloud_connections/pcloud_cloudconnections_networks_get_parameters.go @@ -0,0 +1,178 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_cloud_connections + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewPcloudCloudconnectionsNetworksGetParams creates a new PcloudCloudconnectionsNetworksGetParams object +// with the default values initialized. +func NewPcloudCloudconnectionsNetworksGetParams() *PcloudCloudconnectionsNetworksGetParams { + var () + return &PcloudCloudconnectionsNetworksGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudCloudconnectionsNetworksGetParamsWithTimeout creates a new PcloudCloudconnectionsNetworksGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudCloudconnectionsNetworksGetParamsWithTimeout(timeout time.Duration) *PcloudCloudconnectionsNetworksGetParams { + var () + return &PcloudCloudconnectionsNetworksGetParams{ + + timeout: timeout, + } +} + +// NewPcloudCloudconnectionsNetworksGetParamsWithContext creates a new PcloudCloudconnectionsNetworksGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudCloudconnectionsNetworksGetParamsWithContext(ctx context.Context) *PcloudCloudconnectionsNetworksGetParams { + var () + return &PcloudCloudconnectionsNetworksGetParams{ + + Context: ctx, + } +} + +// NewPcloudCloudconnectionsNetworksGetParamsWithHTTPClient creates a new PcloudCloudconnectionsNetworksGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudCloudconnectionsNetworksGetParamsWithHTTPClient(client *http.Client) *PcloudCloudconnectionsNetworksGetParams { + var () + return &PcloudCloudconnectionsNetworksGetParams{ + HTTPClient: client, + } +} + +/*PcloudCloudconnectionsNetworksGetParams contains all the parameters to send to the API endpoint +for the pcloud cloudconnections networks get operation typically these are written to a http.Request +*/ +type PcloudCloudconnectionsNetworksGetParams struct { + + /*CloudConnectionID + Cloud Connection ID + + */ + CloudConnectionID string + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + /*NetworkID + Network ID + + */ + NetworkID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud cloudconnections networks get params +func (o *PcloudCloudconnectionsNetworksGetParams) WithTimeout(timeout time.Duration) *PcloudCloudconnectionsNetworksGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud cloudconnections networks get params +func (o *PcloudCloudconnectionsNetworksGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud cloudconnections networks get params +func (o *PcloudCloudconnectionsNetworksGetParams) WithContext(ctx context.Context) *PcloudCloudconnectionsNetworksGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud cloudconnections networks get params +func (o *PcloudCloudconnectionsNetworksGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud cloudconnections networks get params +func (o *PcloudCloudconnectionsNetworksGetParams) WithHTTPClient(client *http.Client) *PcloudCloudconnectionsNetworksGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud cloudconnections networks get params +func (o *PcloudCloudconnectionsNetworksGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCloudConnectionID adds the cloudConnectionID to the pcloud cloudconnections networks get params +func (o *PcloudCloudconnectionsNetworksGetParams) WithCloudConnectionID(cloudConnectionID string) *PcloudCloudconnectionsNetworksGetParams { + o.SetCloudConnectionID(cloudConnectionID) + return o +} + +// SetCloudConnectionID adds the cloudConnectionId to the pcloud cloudconnections networks get params +func (o *PcloudCloudconnectionsNetworksGetParams) SetCloudConnectionID(cloudConnectionID string) { + o.CloudConnectionID = cloudConnectionID +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud cloudconnections networks get params +func (o *PcloudCloudconnectionsNetworksGetParams) WithCloudInstanceID(cloudInstanceID string) *PcloudCloudconnectionsNetworksGetParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud cloudconnections networks get params +func (o *PcloudCloudconnectionsNetworksGetParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WithNetworkID adds the networkID to the pcloud cloudconnections networks get params +func (o *PcloudCloudconnectionsNetworksGetParams) WithNetworkID(networkID string) *PcloudCloudconnectionsNetworksGetParams { + o.SetNetworkID(networkID) + return o +} + +// SetNetworkID adds the networkId to the pcloud cloudconnections networks get params +func (o *PcloudCloudconnectionsNetworksGetParams) SetNetworkID(networkID string) { + o.NetworkID = networkID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudCloudconnectionsNetworksGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param cloud_connection_id + if err := r.SetPathParam("cloud_connection_id", o.CloudConnectionID); err != nil { + return err + } + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + // path param network_id + if err := r.SetPathParam("network_id", o.NetworkID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_cloud_connections/pcloud_cloudconnections_networks_get_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_cloud_connections/pcloud_cloudconnections_networks_get_responses.go new file mode 100644 index 00000000000..b39dcbfc610 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_cloud_connections/pcloud_cloudconnections_networks_get_responses.go @@ -0,0 +1,175 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_cloud_connections + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudCloudconnectionsNetworksGetReader is a Reader for the PcloudCloudconnectionsNetworksGet structure. +type PcloudCloudconnectionsNetworksGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudCloudconnectionsNetworksGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudCloudconnectionsNetworksGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudCloudconnectionsNetworksGetBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 404: + result := NewPcloudCloudconnectionsNetworksGetNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudCloudconnectionsNetworksGetInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudCloudconnectionsNetworksGetOK creates a PcloudCloudconnectionsNetworksGetOK with default headers values +func NewPcloudCloudconnectionsNetworksGetOK() *PcloudCloudconnectionsNetworksGetOK { + return &PcloudCloudconnectionsNetworksGetOK{} +} + +/*PcloudCloudconnectionsNetworksGetOK handles this case with default header values. + +OK +*/ +type PcloudCloudconnectionsNetworksGetOK struct { + Payload *models.Network +} + +func (o *PcloudCloudconnectionsNetworksGetOK) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/cloud-connections/{cloud_connection_id}/networks/{network_id}][%d] pcloudCloudconnectionsNetworksGetOK %+v", 200, o.Payload) +} + +func (o *PcloudCloudconnectionsNetworksGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Network) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudconnectionsNetworksGetBadRequest creates a PcloudCloudconnectionsNetworksGetBadRequest with default headers values +func NewPcloudCloudconnectionsNetworksGetBadRequest() *PcloudCloudconnectionsNetworksGetBadRequest { + return &PcloudCloudconnectionsNetworksGetBadRequest{} +} + +/*PcloudCloudconnectionsNetworksGetBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudCloudconnectionsNetworksGetBadRequest struct { + Payload *models.Error +} + +func (o *PcloudCloudconnectionsNetworksGetBadRequest) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/cloud-connections/{cloud_connection_id}/networks/{network_id}][%d] pcloudCloudconnectionsNetworksGetBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudCloudconnectionsNetworksGetBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudconnectionsNetworksGetNotFound creates a PcloudCloudconnectionsNetworksGetNotFound with default headers values +func NewPcloudCloudconnectionsNetworksGetNotFound() *PcloudCloudconnectionsNetworksGetNotFound { + return &PcloudCloudconnectionsNetworksGetNotFound{} +} + +/*PcloudCloudconnectionsNetworksGetNotFound handles this case with default header values. + +Not Found +*/ +type PcloudCloudconnectionsNetworksGetNotFound struct { + Payload *models.Error +} + +func (o *PcloudCloudconnectionsNetworksGetNotFound) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/cloud-connections/{cloud_connection_id}/networks/{network_id}][%d] pcloudCloudconnectionsNetworksGetNotFound %+v", 404, o.Payload) +} + +func (o *PcloudCloudconnectionsNetworksGetNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudconnectionsNetworksGetInternalServerError creates a PcloudCloudconnectionsNetworksGetInternalServerError with default headers values +func NewPcloudCloudconnectionsNetworksGetInternalServerError() *PcloudCloudconnectionsNetworksGetInternalServerError { + return &PcloudCloudconnectionsNetworksGetInternalServerError{} +} + +/*PcloudCloudconnectionsNetworksGetInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudCloudconnectionsNetworksGetInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudCloudconnectionsNetworksGetInternalServerError) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/cloud-connections/{cloud_connection_id}/networks/{network_id}][%d] pcloudCloudconnectionsNetworksGetInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudCloudconnectionsNetworksGetInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_cloud_connections/pcloud_cloudconnections_networks_put_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_cloud_connections/pcloud_cloudconnections_networks_put_parameters.go new file mode 100644 index 00000000000..fae9bbd573e --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_cloud_connections/pcloud_cloudconnections_networks_put_parameters.go @@ -0,0 +1,178 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_cloud_connections + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewPcloudCloudconnectionsNetworksPutParams creates a new PcloudCloudconnectionsNetworksPutParams object +// with the default values initialized. +func NewPcloudCloudconnectionsNetworksPutParams() *PcloudCloudconnectionsNetworksPutParams { + var () + return &PcloudCloudconnectionsNetworksPutParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudCloudconnectionsNetworksPutParamsWithTimeout creates a new PcloudCloudconnectionsNetworksPutParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudCloudconnectionsNetworksPutParamsWithTimeout(timeout time.Duration) *PcloudCloudconnectionsNetworksPutParams { + var () + return &PcloudCloudconnectionsNetworksPutParams{ + + timeout: timeout, + } +} + +// NewPcloudCloudconnectionsNetworksPutParamsWithContext creates a new PcloudCloudconnectionsNetworksPutParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudCloudconnectionsNetworksPutParamsWithContext(ctx context.Context) *PcloudCloudconnectionsNetworksPutParams { + var () + return &PcloudCloudconnectionsNetworksPutParams{ + + Context: ctx, + } +} + +// NewPcloudCloudconnectionsNetworksPutParamsWithHTTPClient creates a new PcloudCloudconnectionsNetworksPutParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudCloudconnectionsNetworksPutParamsWithHTTPClient(client *http.Client) *PcloudCloudconnectionsNetworksPutParams { + var () + return &PcloudCloudconnectionsNetworksPutParams{ + HTTPClient: client, + } +} + +/*PcloudCloudconnectionsNetworksPutParams contains all the parameters to send to the API endpoint +for the pcloud cloudconnections networks put operation typically these are written to a http.Request +*/ +type PcloudCloudconnectionsNetworksPutParams struct { + + /*CloudConnectionID + Cloud Connection ID + + */ + CloudConnectionID string + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + /*NetworkID + Network ID + + */ + NetworkID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud cloudconnections networks put params +func (o *PcloudCloudconnectionsNetworksPutParams) WithTimeout(timeout time.Duration) *PcloudCloudconnectionsNetworksPutParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud cloudconnections networks put params +func (o *PcloudCloudconnectionsNetworksPutParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud cloudconnections networks put params +func (o *PcloudCloudconnectionsNetworksPutParams) WithContext(ctx context.Context) *PcloudCloudconnectionsNetworksPutParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud cloudconnections networks put params +func (o *PcloudCloudconnectionsNetworksPutParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud cloudconnections networks put params +func (o *PcloudCloudconnectionsNetworksPutParams) WithHTTPClient(client *http.Client) *PcloudCloudconnectionsNetworksPutParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud cloudconnections networks put params +func (o *PcloudCloudconnectionsNetworksPutParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCloudConnectionID adds the cloudConnectionID to the pcloud cloudconnections networks put params +func (o *PcloudCloudconnectionsNetworksPutParams) WithCloudConnectionID(cloudConnectionID string) *PcloudCloudconnectionsNetworksPutParams { + o.SetCloudConnectionID(cloudConnectionID) + return o +} + +// SetCloudConnectionID adds the cloudConnectionId to the pcloud cloudconnections networks put params +func (o *PcloudCloudconnectionsNetworksPutParams) SetCloudConnectionID(cloudConnectionID string) { + o.CloudConnectionID = cloudConnectionID +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud cloudconnections networks put params +func (o *PcloudCloudconnectionsNetworksPutParams) WithCloudInstanceID(cloudInstanceID string) *PcloudCloudconnectionsNetworksPutParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud cloudconnections networks put params +func (o *PcloudCloudconnectionsNetworksPutParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WithNetworkID adds the networkID to the pcloud cloudconnections networks put params +func (o *PcloudCloudconnectionsNetworksPutParams) WithNetworkID(networkID string) *PcloudCloudconnectionsNetworksPutParams { + o.SetNetworkID(networkID) + return o +} + +// SetNetworkID adds the networkId to the pcloud cloudconnections networks put params +func (o *PcloudCloudconnectionsNetworksPutParams) SetNetworkID(networkID string) { + o.NetworkID = networkID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudCloudconnectionsNetworksPutParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param cloud_connection_id + if err := r.SetPathParam("cloud_connection_id", o.CloudConnectionID); err != nil { + return err + } + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + // path param network_id + if err := r.SetPathParam("network_id", o.NetworkID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_cloud_connections/pcloud_cloudconnections_networks_put_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_cloud_connections/pcloud_cloudconnections_networks_put_responses.go new file mode 100644 index 00000000000..8406a663c4f --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_cloud_connections/pcloud_cloudconnections_networks_put_responses.go @@ -0,0 +1,175 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_cloud_connections + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudCloudconnectionsNetworksPutReader is a Reader for the PcloudCloudconnectionsNetworksPut structure. +type PcloudCloudconnectionsNetworksPutReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudCloudconnectionsNetworksPutReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudCloudconnectionsNetworksPutOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudCloudconnectionsNetworksPutBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 422: + result := NewPcloudCloudconnectionsNetworksPutUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudCloudconnectionsNetworksPutInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudCloudconnectionsNetworksPutOK creates a PcloudCloudconnectionsNetworksPutOK with default headers values +func NewPcloudCloudconnectionsNetworksPutOK() *PcloudCloudconnectionsNetworksPutOK { + return &PcloudCloudconnectionsNetworksPutOK{} +} + +/*PcloudCloudconnectionsNetworksPutOK handles this case with default header values. + +OK +*/ +type PcloudCloudconnectionsNetworksPutOK struct { + Payload *models.CloudConnection +} + +func (o *PcloudCloudconnectionsNetworksPutOK) Error() string { + return fmt.Sprintf("[PUT /pcloud/v1/cloud-instances/{cloud_instance_id}/cloud-connections/{cloud_connection_id}/networks/{network_id}][%d] pcloudCloudconnectionsNetworksPutOK %+v", 200, o.Payload) +} + +func (o *PcloudCloudconnectionsNetworksPutOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.CloudConnection) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudconnectionsNetworksPutBadRequest creates a PcloudCloudconnectionsNetworksPutBadRequest with default headers values +func NewPcloudCloudconnectionsNetworksPutBadRequest() *PcloudCloudconnectionsNetworksPutBadRequest { + return &PcloudCloudconnectionsNetworksPutBadRequest{} +} + +/*PcloudCloudconnectionsNetworksPutBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudCloudconnectionsNetworksPutBadRequest struct { + Payload *models.Error +} + +func (o *PcloudCloudconnectionsNetworksPutBadRequest) Error() string { + return fmt.Sprintf("[PUT /pcloud/v1/cloud-instances/{cloud_instance_id}/cloud-connections/{cloud_connection_id}/networks/{network_id}][%d] pcloudCloudconnectionsNetworksPutBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudCloudconnectionsNetworksPutBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudconnectionsNetworksPutUnprocessableEntity creates a PcloudCloudconnectionsNetworksPutUnprocessableEntity with default headers values +func NewPcloudCloudconnectionsNetworksPutUnprocessableEntity() *PcloudCloudconnectionsNetworksPutUnprocessableEntity { + return &PcloudCloudconnectionsNetworksPutUnprocessableEntity{} +} + +/*PcloudCloudconnectionsNetworksPutUnprocessableEntity handles this case with default header values. + +Unprocessable Entity +*/ +type PcloudCloudconnectionsNetworksPutUnprocessableEntity struct { + Payload *models.Error +} + +func (o *PcloudCloudconnectionsNetworksPutUnprocessableEntity) Error() string { + return fmt.Sprintf("[PUT /pcloud/v1/cloud-instances/{cloud_instance_id}/cloud-connections/{cloud_connection_id}/networks/{network_id}][%d] pcloudCloudconnectionsNetworksPutUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *PcloudCloudconnectionsNetworksPutUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudconnectionsNetworksPutInternalServerError creates a PcloudCloudconnectionsNetworksPutInternalServerError with default headers values +func NewPcloudCloudconnectionsNetworksPutInternalServerError() *PcloudCloudconnectionsNetworksPutInternalServerError { + return &PcloudCloudconnectionsNetworksPutInternalServerError{} +} + +/*PcloudCloudconnectionsNetworksPutInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudCloudconnectionsNetworksPutInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudCloudconnectionsNetworksPutInternalServerError) Error() string { + return fmt.Sprintf("[PUT /pcloud/v1/cloud-instances/{cloud_instance_id}/cloud-connections/{cloud_connection_id}/networks/{network_id}][%d] pcloudCloudconnectionsNetworksPutInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudCloudconnectionsNetworksPutInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_cloud_connections/pcloud_cloudconnections_post_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_cloud_connections/pcloud_cloudconnections_post_parameters.go new file mode 100644 index 00000000000..7e0d595df46 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_cloud_connections/pcloud_cloudconnections_post_parameters.go @@ -0,0 +1,160 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_cloud_connections + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// NewPcloudCloudconnectionsPostParams creates a new PcloudCloudconnectionsPostParams object +// with the default values initialized. +func NewPcloudCloudconnectionsPostParams() *PcloudCloudconnectionsPostParams { + var () + return &PcloudCloudconnectionsPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudCloudconnectionsPostParamsWithTimeout creates a new PcloudCloudconnectionsPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudCloudconnectionsPostParamsWithTimeout(timeout time.Duration) *PcloudCloudconnectionsPostParams { + var () + return &PcloudCloudconnectionsPostParams{ + + timeout: timeout, + } +} + +// NewPcloudCloudconnectionsPostParamsWithContext creates a new PcloudCloudconnectionsPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudCloudconnectionsPostParamsWithContext(ctx context.Context) *PcloudCloudconnectionsPostParams { + var () + return &PcloudCloudconnectionsPostParams{ + + Context: ctx, + } +} + +// NewPcloudCloudconnectionsPostParamsWithHTTPClient creates a new PcloudCloudconnectionsPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudCloudconnectionsPostParamsWithHTTPClient(client *http.Client) *PcloudCloudconnectionsPostParams { + var () + return &PcloudCloudconnectionsPostParams{ + HTTPClient: client, + } +} + +/*PcloudCloudconnectionsPostParams contains all the parameters to send to the API endpoint +for the pcloud cloudconnections post operation typically these are written to a http.Request +*/ +type PcloudCloudconnectionsPostParams struct { + + /*Body + Parameters for the creation of a new cloud connection + + */ + Body *models.CloudConnectionCreate + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud cloudconnections post params +func (o *PcloudCloudconnectionsPostParams) WithTimeout(timeout time.Duration) *PcloudCloudconnectionsPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud cloudconnections post params +func (o *PcloudCloudconnectionsPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud cloudconnections post params +func (o *PcloudCloudconnectionsPostParams) WithContext(ctx context.Context) *PcloudCloudconnectionsPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud cloudconnections post params +func (o *PcloudCloudconnectionsPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud cloudconnections post params +func (o *PcloudCloudconnectionsPostParams) WithHTTPClient(client *http.Client) *PcloudCloudconnectionsPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud cloudconnections post params +func (o *PcloudCloudconnectionsPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the pcloud cloudconnections post params +func (o *PcloudCloudconnectionsPostParams) WithBody(body *models.CloudConnectionCreate) *PcloudCloudconnectionsPostParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the pcloud cloudconnections post params +func (o *PcloudCloudconnectionsPostParams) SetBody(body *models.CloudConnectionCreate) { + o.Body = body +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud cloudconnections post params +func (o *PcloudCloudconnectionsPostParams) WithCloudInstanceID(cloudInstanceID string) *PcloudCloudconnectionsPostParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud cloudconnections post params +func (o *PcloudCloudconnectionsPostParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudCloudconnectionsPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_cloud_connections/pcloud_cloudconnections_post_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_cloud_connections/pcloud_cloudconnections_post_responses.go new file mode 100644 index 00000000000..61bb04049a8 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_cloud_connections/pcloud_cloudconnections_post_responses.go @@ -0,0 +1,283 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_cloud_connections + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudCloudconnectionsPostReader is a Reader for the PcloudCloudconnectionsPost structure. +type PcloudCloudconnectionsPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudCloudconnectionsPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudCloudconnectionsPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 201: + result := NewPcloudCloudconnectionsPostCreated() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 202: + result := NewPcloudCloudconnectionsPostAccepted() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudCloudconnectionsPostBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 409: + result := NewPcloudCloudconnectionsPostConflict() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 422: + result := NewPcloudCloudconnectionsPostUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudCloudconnectionsPostInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudCloudconnectionsPostOK creates a PcloudCloudconnectionsPostOK with default headers values +func NewPcloudCloudconnectionsPostOK() *PcloudCloudconnectionsPostOK { + return &PcloudCloudconnectionsPostOK{} +} + +/*PcloudCloudconnectionsPostOK handles this case with default header values. + +OK +*/ +type PcloudCloudconnectionsPostOK struct { + Payload *models.CloudConnection +} + +func (o *PcloudCloudconnectionsPostOK) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/cloud-connections][%d] pcloudCloudconnectionsPostOK %+v", 200, o.Payload) +} + +func (o *PcloudCloudconnectionsPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.CloudConnection) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudconnectionsPostCreated creates a PcloudCloudconnectionsPostCreated with default headers values +func NewPcloudCloudconnectionsPostCreated() *PcloudCloudconnectionsPostCreated { + return &PcloudCloudconnectionsPostCreated{} +} + +/*PcloudCloudconnectionsPostCreated handles this case with default header values. + +Created +*/ +type PcloudCloudconnectionsPostCreated struct { + Payload *models.CloudConnection +} + +func (o *PcloudCloudconnectionsPostCreated) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/cloud-connections][%d] pcloudCloudconnectionsPostCreated %+v", 201, o.Payload) +} + +func (o *PcloudCloudconnectionsPostCreated) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.CloudConnection) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudconnectionsPostAccepted creates a PcloudCloudconnectionsPostAccepted with default headers values +func NewPcloudCloudconnectionsPostAccepted() *PcloudCloudconnectionsPostAccepted { + return &PcloudCloudconnectionsPostAccepted{} +} + +/*PcloudCloudconnectionsPostAccepted handles this case with default header values. + +Accepted +*/ +type PcloudCloudconnectionsPostAccepted struct { + Payload *models.CloudConnection +} + +func (o *PcloudCloudconnectionsPostAccepted) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/cloud-connections][%d] pcloudCloudconnectionsPostAccepted %+v", 202, o.Payload) +} + +func (o *PcloudCloudconnectionsPostAccepted) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.CloudConnection) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudconnectionsPostBadRequest creates a PcloudCloudconnectionsPostBadRequest with default headers values +func NewPcloudCloudconnectionsPostBadRequest() *PcloudCloudconnectionsPostBadRequest { + return &PcloudCloudconnectionsPostBadRequest{} +} + +/*PcloudCloudconnectionsPostBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudCloudconnectionsPostBadRequest struct { + Payload *models.Error +} + +func (o *PcloudCloudconnectionsPostBadRequest) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/cloud-connections][%d] pcloudCloudconnectionsPostBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudCloudconnectionsPostBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudconnectionsPostConflict creates a PcloudCloudconnectionsPostConflict with default headers values +func NewPcloudCloudconnectionsPostConflict() *PcloudCloudconnectionsPostConflict { + return &PcloudCloudconnectionsPostConflict{} +} + +/*PcloudCloudconnectionsPostConflict handles this case with default header values. + +Conflict +*/ +type PcloudCloudconnectionsPostConflict struct { + Payload *models.Error +} + +func (o *PcloudCloudconnectionsPostConflict) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/cloud-connections][%d] pcloudCloudconnectionsPostConflict %+v", 409, o.Payload) +} + +func (o *PcloudCloudconnectionsPostConflict) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudconnectionsPostUnprocessableEntity creates a PcloudCloudconnectionsPostUnprocessableEntity with default headers values +func NewPcloudCloudconnectionsPostUnprocessableEntity() *PcloudCloudconnectionsPostUnprocessableEntity { + return &PcloudCloudconnectionsPostUnprocessableEntity{} +} + +/*PcloudCloudconnectionsPostUnprocessableEntity handles this case with default header values. + +Unprocessable Entity +*/ +type PcloudCloudconnectionsPostUnprocessableEntity struct { + Payload *models.Error +} + +func (o *PcloudCloudconnectionsPostUnprocessableEntity) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/cloud-connections][%d] pcloudCloudconnectionsPostUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *PcloudCloudconnectionsPostUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudconnectionsPostInternalServerError creates a PcloudCloudconnectionsPostInternalServerError with default headers values +func NewPcloudCloudconnectionsPostInternalServerError() *PcloudCloudconnectionsPostInternalServerError { + return &PcloudCloudconnectionsPostInternalServerError{} +} + +/*PcloudCloudconnectionsPostInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudCloudconnectionsPostInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudCloudconnectionsPostInternalServerError) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/cloud-connections][%d] pcloudCloudconnectionsPostInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudCloudconnectionsPostInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_cloud_connections/pcloud_cloudconnections_put_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_cloud_connections/pcloud_cloudconnections_put_parameters.go new file mode 100644 index 00000000000..902efdd5cd8 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_cloud_connections/pcloud_cloudconnections_put_parameters.go @@ -0,0 +1,181 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_cloud_connections + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// NewPcloudCloudconnectionsPutParams creates a new PcloudCloudconnectionsPutParams object +// with the default values initialized. +func NewPcloudCloudconnectionsPutParams() *PcloudCloudconnectionsPutParams { + var () + return &PcloudCloudconnectionsPutParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudCloudconnectionsPutParamsWithTimeout creates a new PcloudCloudconnectionsPutParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudCloudconnectionsPutParamsWithTimeout(timeout time.Duration) *PcloudCloudconnectionsPutParams { + var () + return &PcloudCloudconnectionsPutParams{ + + timeout: timeout, + } +} + +// NewPcloudCloudconnectionsPutParamsWithContext creates a new PcloudCloudconnectionsPutParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudCloudconnectionsPutParamsWithContext(ctx context.Context) *PcloudCloudconnectionsPutParams { + var () + return &PcloudCloudconnectionsPutParams{ + + Context: ctx, + } +} + +// NewPcloudCloudconnectionsPutParamsWithHTTPClient creates a new PcloudCloudconnectionsPutParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudCloudconnectionsPutParamsWithHTTPClient(client *http.Client) *PcloudCloudconnectionsPutParams { + var () + return &PcloudCloudconnectionsPutParams{ + HTTPClient: client, + } +} + +/*PcloudCloudconnectionsPutParams contains all the parameters to send to the API endpoint +for the pcloud cloudconnections put operation typically these are written to a http.Request +*/ +type PcloudCloudconnectionsPutParams struct { + + /*Body + Parameters to update a Cloud Connection + + */ + Body *models.CloudConnectionUpdate + /*CloudConnectionID + Cloud Connection ID + + */ + CloudConnectionID string + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud cloudconnections put params +func (o *PcloudCloudconnectionsPutParams) WithTimeout(timeout time.Duration) *PcloudCloudconnectionsPutParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud cloudconnections put params +func (o *PcloudCloudconnectionsPutParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud cloudconnections put params +func (o *PcloudCloudconnectionsPutParams) WithContext(ctx context.Context) *PcloudCloudconnectionsPutParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud cloudconnections put params +func (o *PcloudCloudconnectionsPutParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud cloudconnections put params +func (o *PcloudCloudconnectionsPutParams) WithHTTPClient(client *http.Client) *PcloudCloudconnectionsPutParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud cloudconnections put params +func (o *PcloudCloudconnectionsPutParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the pcloud cloudconnections put params +func (o *PcloudCloudconnectionsPutParams) WithBody(body *models.CloudConnectionUpdate) *PcloudCloudconnectionsPutParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the pcloud cloudconnections put params +func (o *PcloudCloudconnectionsPutParams) SetBody(body *models.CloudConnectionUpdate) { + o.Body = body +} + +// WithCloudConnectionID adds the cloudConnectionID to the pcloud cloudconnections put params +func (o *PcloudCloudconnectionsPutParams) WithCloudConnectionID(cloudConnectionID string) *PcloudCloudconnectionsPutParams { + o.SetCloudConnectionID(cloudConnectionID) + return o +} + +// SetCloudConnectionID adds the cloudConnectionId to the pcloud cloudconnections put params +func (o *PcloudCloudconnectionsPutParams) SetCloudConnectionID(cloudConnectionID string) { + o.CloudConnectionID = cloudConnectionID +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud cloudconnections put params +func (o *PcloudCloudconnectionsPutParams) WithCloudInstanceID(cloudInstanceID string) *PcloudCloudconnectionsPutParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud cloudconnections put params +func (o *PcloudCloudconnectionsPutParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudCloudconnectionsPutParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + // path param cloud_connection_id + if err := r.SetPathParam("cloud_connection_id", o.CloudConnectionID); err != nil { + return err + } + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_cloud_connections/pcloud_cloudconnections_put_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_cloud_connections/pcloud_cloudconnections_put_responses.go new file mode 100644 index 00000000000..c7ec47ff39d --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_cloud_connections/pcloud_cloudconnections_put_responses.go @@ -0,0 +1,211 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_cloud_connections + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudCloudconnectionsPutReader is a Reader for the PcloudCloudconnectionsPut structure. +type PcloudCloudconnectionsPutReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudCloudconnectionsPutReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudCloudconnectionsPutOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 202: + result := NewPcloudCloudconnectionsPutAccepted() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudCloudconnectionsPutBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 422: + result := NewPcloudCloudconnectionsPutUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudCloudconnectionsPutInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudCloudconnectionsPutOK creates a PcloudCloudconnectionsPutOK with default headers values +func NewPcloudCloudconnectionsPutOK() *PcloudCloudconnectionsPutOK { + return &PcloudCloudconnectionsPutOK{} +} + +/*PcloudCloudconnectionsPutOK handles this case with default header values. + +OK +*/ +type PcloudCloudconnectionsPutOK struct { + Payload *models.CloudConnection +} + +func (o *PcloudCloudconnectionsPutOK) Error() string { + return fmt.Sprintf("[PUT /pcloud/v1/cloud-instances/{cloud_instance_id}/cloud-connections/{cloud_connection_id}][%d] pcloudCloudconnectionsPutOK %+v", 200, o.Payload) +} + +func (o *PcloudCloudconnectionsPutOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.CloudConnection) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudconnectionsPutAccepted creates a PcloudCloudconnectionsPutAccepted with default headers values +func NewPcloudCloudconnectionsPutAccepted() *PcloudCloudconnectionsPutAccepted { + return &PcloudCloudconnectionsPutAccepted{} +} + +/*PcloudCloudconnectionsPutAccepted handles this case with default header values. + +Accepted +*/ +type PcloudCloudconnectionsPutAccepted struct { + Payload *models.CloudConnection +} + +func (o *PcloudCloudconnectionsPutAccepted) Error() string { + return fmt.Sprintf("[PUT /pcloud/v1/cloud-instances/{cloud_instance_id}/cloud-connections/{cloud_connection_id}][%d] pcloudCloudconnectionsPutAccepted %+v", 202, o.Payload) +} + +func (o *PcloudCloudconnectionsPutAccepted) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.CloudConnection) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudconnectionsPutBadRequest creates a PcloudCloudconnectionsPutBadRequest with default headers values +func NewPcloudCloudconnectionsPutBadRequest() *PcloudCloudconnectionsPutBadRequest { + return &PcloudCloudconnectionsPutBadRequest{} +} + +/*PcloudCloudconnectionsPutBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudCloudconnectionsPutBadRequest struct { + Payload *models.Error +} + +func (o *PcloudCloudconnectionsPutBadRequest) Error() string { + return fmt.Sprintf("[PUT /pcloud/v1/cloud-instances/{cloud_instance_id}/cloud-connections/{cloud_connection_id}][%d] pcloudCloudconnectionsPutBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudCloudconnectionsPutBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudconnectionsPutUnprocessableEntity creates a PcloudCloudconnectionsPutUnprocessableEntity with default headers values +func NewPcloudCloudconnectionsPutUnprocessableEntity() *PcloudCloudconnectionsPutUnprocessableEntity { + return &PcloudCloudconnectionsPutUnprocessableEntity{} +} + +/*PcloudCloudconnectionsPutUnprocessableEntity handles this case with default header values. + +Unprocessable Entity +*/ +type PcloudCloudconnectionsPutUnprocessableEntity struct { + Payload *models.Error +} + +func (o *PcloudCloudconnectionsPutUnprocessableEntity) Error() string { + return fmt.Sprintf("[PUT /pcloud/v1/cloud-instances/{cloud_instance_id}/cloud-connections/{cloud_connection_id}][%d] pcloudCloudconnectionsPutUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *PcloudCloudconnectionsPutUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudconnectionsPutInternalServerError creates a PcloudCloudconnectionsPutInternalServerError with default headers values +func NewPcloudCloudconnectionsPutInternalServerError() *PcloudCloudconnectionsPutInternalServerError { + return &PcloudCloudconnectionsPutInternalServerError{} +} + +/*PcloudCloudconnectionsPutInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudCloudconnectionsPutInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudCloudconnectionsPutInternalServerError) Error() string { + return fmt.Sprintf("[PUT /pcloud/v1/cloud-instances/{cloud_instance_id}/cloud-connections/{cloud_connection_id}][%d] pcloudCloudconnectionsPutInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudCloudconnectionsPutInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_cloud_connections/pcloud_cloudconnections_virtualprivateclouds_getall_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_cloud_connections/pcloud_cloudconnections_virtualprivateclouds_getall_parameters.go new file mode 100644 index 00000000000..540fd66631e --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_cloud_connections/pcloud_cloudconnections_virtualprivateclouds_getall_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_cloud_connections + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewPcloudCloudconnectionsVirtualprivatecloudsGetallParams creates a new PcloudCloudconnectionsVirtualprivatecloudsGetallParams object +// with the default values initialized. +func NewPcloudCloudconnectionsVirtualprivatecloudsGetallParams() *PcloudCloudconnectionsVirtualprivatecloudsGetallParams { + var () + return &PcloudCloudconnectionsVirtualprivatecloudsGetallParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudCloudconnectionsVirtualprivatecloudsGetallParamsWithTimeout creates a new PcloudCloudconnectionsVirtualprivatecloudsGetallParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudCloudconnectionsVirtualprivatecloudsGetallParamsWithTimeout(timeout time.Duration) *PcloudCloudconnectionsVirtualprivatecloudsGetallParams { + var () + return &PcloudCloudconnectionsVirtualprivatecloudsGetallParams{ + + timeout: timeout, + } +} + +// NewPcloudCloudconnectionsVirtualprivatecloudsGetallParamsWithContext creates a new PcloudCloudconnectionsVirtualprivatecloudsGetallParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudCloudconnectionsVirtualprivatecloudsGetallParamsWithContext(ctx context.Context) *PcloudCloudconnectionsVirtualprivatecloudsGetallParams { + var () + return &PcloudCloudconnectionsVirtualprivatecloudsGetallParams{ + + Context: ctx, + } +} + +// NewPcloudCloudconnectionsVirtualprivatecloudsGetallParamsWithHTTPClient creates a new PcloudCloudconnectionsVirtualprivatecloudsGetallParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudCloudconnectionsVirtualprivatecloudsGetallParamsWithHTTPClient(client *http.Client) *PcloudCloudconnectionsVirtualprivatecloudsGetallParams { + var () + return &PcloudCloudconnectionsVirtualprivatecloudsGetallParams{ + HTTPClient: client, + } +} + +/*PcloudCloudconnectionsVirtualprivatecloudsGetallParams contains all the parameters to send to the API endpoint +for the pcloud cloudconnections virtualprivateclouds getall operation typically these are written to a http.Request +*/ +type PcloudCloudconnectionsVirtualprivatecloudsGetallParams struct { + + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud cloudconnections virtualprivateclouds getall params +func (o *PcloudCloudconnectionsVirtualprivatecloudsGetallParams) WithTimeout(timeout time.Duration) *PcloudCloudconnectionsVirtualprivatecloudsGetallParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud cloudconnections virtualprivateclouds getall params +func (o *PcloudCloudconnectionsVirtualprivatecloudsGetallParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud cloudconnections virtualprivateclouds getall params +func (o *PcloudCloudconnectionsVirtualprivatecloudsGetallParams) WithContext(ctx context.Context) *PcloudCloudconnectionsVirtualprivatecloudsGetallParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud cloudconnections virtualprivateclouds getall params +func (o *PcloudCloudconnectionsVirtualprivatecloudsGetallParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud cloudconnections virtualprivateclouds getall params +func (o *PcloudCloudconnectionsVirtualprivatecloudsGetallParams) WithHTTPClient(client *http.Client) *PcloudCloudconnectionsVirtualprivatecloudsGetallParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud cloudconnections virtualprivateclouds getall params +func (o *PcloudCloudconnectionsVirtualprivatecloudsGetallParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud cloudconnections virtualprivateclouds getall params +func (o *PcloudCloudconnectionsVirtualprivatecloudsGetallParams) WithCloudInstanceID(cloudInstanceID string) *PcloudCloudconnectionsVirtualprivatecloudsGetallParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud cloudconnections virtualprivateclouds getall params +func (o *PcloudCloudconnectionsVirtualprivatecloudsGetallParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudCloudconnectionsVirtualprivatecloudsGetallParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_cloud_connections/pcloud_cloudconnections_virtualprivateclouds_getall_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_cloud_connections/pcloud_cloudconnections_virtualprivateclouds_getall_responses.go new file mode 100644 index 00000000000..41e0bfca491 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_cloud_connections/pcloud_cloudconnections_virtualprivateclouds_getall_responses.go @@ -0,0 +1,139 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_cloud_connections + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudCloudconnectionsVirtualprivatecloudsGetallReader is a Reader for the PcloudCloudconnectionsVirtualprivatecloudsGetall structure. +type PcloudCloudconnectionsVirtualprivatecloudsGetallReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudCloudconnectionsVirtualprivatecloudsGetallReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudCloudconnectionsVirtualprivatecloudsGetallOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudCloudconnectionsVirtualprivatecloudsGetallBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudCloudconnectionsVirtualprivatecloudsGetallInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudCloudconnectionsVirtualprivatecloudsGetallOK creates a PcloudCloudconnectionsVirtualprivatecloudsGetallOK with default headers values +func NewPcloudCloudconnectionsVirtualprivatecloudsGetallOK() *PcloudCloudconnectionsVirtualprivatecloudsGetallOK { + return &PcloudCloudconnectionsVirtualprivatecloudsGetallOK{} +} + +/*PcloudCloudconnectionsVirtualprivatecloudsGetallOK handles this case with default header values. + +OK +*/ +type PcloudCloudconnectionsVirtualprivatecloudsGetallOK struct { + Payload *models.CloudConnectionVirtualPrivateClouds +} + +func (o *PcloudCloudconnectionsVirtualprivatecloudsGetallOK) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/cloud-connections-virtual-private-clouds][%d] pcloudCloudconnectionsVirtualprivatecloudsGetallOK %+v", 200, o.Payload) +} + +func (o *PcloudCloudconnectionsVirtualprivatecloudsGetallOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.CloudConnectionVirtualPrivateClouds) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudconnectionsVirtualprivatecloudsGetallBadRequest creates a PcloudCloudconnectionsVirtualprivatecloudsGetallBadRequest with default headers values +func NewPcloudCloudconnectionsVirtualprivatecloudsGetallBadRequest() *PcloudCloudconnectionsVirtualprivatecloudsGetallBadRequest { + return &PcloudCloudconnectionsVirtualprivatecloudsGetallBadRequest{} +} + +/*PcloudCloudconnectionsVirtualprivatecloudsGetallBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudCloudconnectionsVirtualprivatecloudsGetallBadRequest struct { + Payload *models.Error +} + +func (o *PcloudCloudconnectionsVirtualprivatecloudsGetallBadRequest) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/cloud-connections-virtual-private-clouds][%d] pcloudCloudconnectionsVirtualprivatecloudsGetallBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudCloudconnectionsVirtualprivatecloudsGetallBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudconnectionsVirtualprivatecloudsGetallInternalServerError creates a PcloudCloudconnectionsVirtualprivatecloudsGetallInternalServerError with default headers values +func NewPcloudCloudconnectionsVirtualprivatecloudsGetallInternalServerError() *PcloudCloudconnectionsVirtualprivatecloudsGetallInternalServerError { + return &PcloudCloudconnectionsVirtualprivatecloudsGetallInternalServerError{} +} + +/*PcloudCloudconnectionsVirtualprivatecloudsGetallInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudCloudconnectionsVirtualprivatecloudsGetallInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudCloudconnectionsVirtualprivatecloudsGetallInternalServerError) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/cloud-connections-virtual-private-clouds][%d] pcloudCloudconnectionsVirtualprivatecloudsGetallInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudCloudconnectionsVirtualprivatecloudsGetallInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_events/p_cloud_events_client.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_events/p_cloud_events_client.go new file mode 100644 index 00000000000..41cd2f13cff --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_events/p_cloud_events_client.go @@ -0,0 +1,88 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_events + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" +) + +// New creates a new p cloud events API client. +func New(transport runtime.ClientTransport, formats strfmt.Registry) *Client { + return &Client{transport: transport, formats: formats} +} + +/* +Client for p cloud events API +*/ +type Client struct { + transport runtime.ClientTransport + formats strfmt.Registry +} + +/* +PcloudEventsGet gets a single event +*/ +func (a *Client) PcloudEventsGet(params *PcloudEventsGetParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudEventsGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudEventsGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.events.get", + Method: "GET", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/events/{event_id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudEventsGetReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudEventsGetOK), nil + +} + +/* +PcloudEventsGetquery gets events from this cloud instance since a specific timestamp +*/ +func (a *Client) PcloudEventsGetquery(params *PcloudEventsGetqueryParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudEventsGetqueryOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudEventsGetqueryParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.events.getquery", + Method: "GET", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/events", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudEventsGetqueryReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudEventsGetqueryOK), nil + +} + +// SetTransport changes the transport on the client +func (a *Client) SetTransport(transport runtime.ClientTransport) { + a.transport = transport +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_events/pcloud_events_get_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_events/pcloud_events_get_parameters.go new file mode 100644 index 00000000000..459c4f0ef8f --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_events/pcloud_events_get_parameters.go @@ -0,0 +1,182 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_events + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewPcloudEventsGetParams creates a new PcloudEventsGetParams object +// with the default values initialized. +func NewPcloudEventsGetParams() *PcloudEventsGetParams { + var () + return &PcloudEventsGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudEventsGetParamsWithTimeout creates a new PcloudEventsGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudEventsGetParamsWithTimeout(timeout time.Duration) *PcloudEventsGetParams { + var () + return &PcloudEventsGetParams{ + + timeout: timeout, + } +} + +// NewPcloudEventsGetParamsWithContext creates a new PcloudEventsGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudEventsGetParamsWithContext(ctx context.Context) *PcloudEventsGetParams { + var () + return &PcloudEventsGetParams{ + + Context: ctx, + } +} + +// NewPcloudEventsGetParamsWithHTTPClient creates a new PcloudEventsGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudEventsGetParamsWithHTTPClient(client *http.Client) *PcloudEventsGetParams { + var () + return &PcloudEventsGetParams{ + HTTPClient: client, + } +} + +/*PcloudEventsGetParams contains all the parameters to send to the API endpoint +for the pcloud events get operation typically these are written to a http.Request +*/ +type PcloudEventsGetParams struct { + + /*AcceptLanguage + The language requested for the return document + + */ + AcceptLanguage *string + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + /*EventID + Event ID + + */ + EventID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud events get params +func (o *PcloudEventsGetParams) WithTimeout(timeout time.Duration) *PcloudEventsGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud events get params +func (o *PcloudEventsGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud events get params +func (o *PcloudEventsGetParams) WithContext(ctx context.Context) *PcloudEventsGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud events get params +func (o *PcloudEventsGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud events get params +func (o *PcloudEventsGetParams) WithHTTPClient(client *http.Client) *PcloudEventsGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud events get params +func (o *PcloudEventsGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithAcceptLanguage adds the acceptLanguage to the pcloud events get params +func (o *PcloudEventsGetParams) WithAcceptLanguage(acceptLanguage *string) *PcloudEventsGetParams { + o.SetAcceptLanguage(acceptLanguage) + return o +} + +// SetAcceptLanguage adds the acceptLanguage to the pcloud events get params +func (o *PcloudEventsGetParams) SetAcceptLanguage(acceptLanguage *string) { + o.AcceptLanguage = acceptLanguage +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud events get params +func (o *PcloudEventsGetParams) WithCloudInstanceID(cloudInstanceID string) *PcloudEventsGetParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud events get params +func (o *PcloudEventsGetParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WithEventID adds the eventID to the pcloud events get params +func (o *PcloudEventsGetParams) WithEventID(eventID string) *PcloudEventsGetParams { + o.SetEventID(eventID) + return o +} + +// SetEventID adds the eventId to the pcloud events get params +func (o *PcloudEventsGetParams) SetEventID(eventID string) { + o.EventID = eventID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudEventsGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.AcceptLanguage != nil { + + // header param Accept-Language + if err := r.SetHeaderParam("Accept-Language", *o.AcceptLanguage); err != nil { + return err + } + + } + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + // path param event_id + if err := r.SetPathParam("event_id", o.EventID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_events/pcloud_events_get_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_events/pcloud_events_get_responses.go new file mode 100644 index 00000000000..795d78c34f4 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_events/pcloud_events_get_responses.go @@ -0,0 +1,175 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_events + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudEventsGetReader is a Reader for the PcloudEventsGet structure. +type PcloudEventsGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudEventsGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudEventsGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudEventsGetBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 404: + result := NewPcloudEventsGetNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudEventsGetInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudEventsGetOK creates a PcloudEventsGetOK with default headers values +func NewPcloudEventsGetOK() *PcloudEventsGetOK { + return &PcloudEventsGetOK{} +} + +/*PcloudEventsGetOK handles this case with default header values. + +OK +*/ +type PcloudEventsGetOK struct { + Payload *models.Event +} + +func (o *PcloudEventsGetOK) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/events/{event_id}][%d] pcloudEventsGetOK %+v", 200, o.Payload) +} + +func (o *PcloudEventsGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Event) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudEventsGetBadRequest creates a PcloudEventsGetBadRequest with default headers values +func NewPcloudEventsGetBadRequest() *PcloudEventsGetBadRequest { + return &PcloudEventsGetBadRequest{} +} + +/*PcloudEventsGetBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudEventsGetBadRequest struct { + Payload *models.Error +} + +func (o *PcloudEventsGetBadRequest) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/events/{event_id}][%d] pcloudEventsGetBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudEventsGetBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudEventsGetNotFound creates a PcloudEventsGetNotFound with default headers values +func NewPcloudEventsGetNotFound() *PcloudEventsGetNotFound { + return &PcloudEventsGetNotFound{} +} + +/*PcloudEventsGetNotFound handles this case with default header values. + +Not Found +*/ +type PcloudEventsGetNotFound struct { + Payload *models.Error +} + +func (o *PcloudEventsGetNotFound) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/events/{event_id}][%d] pcloudEventsGetNotFound %+v", 404, o.Payload) +} + +func (o *PcloudEventsGetNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudEventsGetInternalServerError creates a PcloudEventsGetInternalServerError with default headers values +func NewPcloudEventsGetInternalServerError() *PcloudEventsGetInternalServerError { + return &PcloudEventsGetInternalServerError{} +} + +/*PcloudEventsGetInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudEventsGetInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudEventsGetInternalServerError) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/events/{event_id}][%d] pcloudEventsGetInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudEventsGetInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_events/pcloud_events_getquery_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_events/pcloud_events_getquery_parameters.go new file mode 100644 index 00000000000..83d27dcfdee --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_events/pcloud_events_getquery_parameters.go @@ -0,0 +1,257 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_events + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewPcloudEventsGetqueryParams creates a new PcloudEventsGetqueryParams object +// with the default values initialized. +func NewPcloudEventsGetqueryParams() *PcloudEventsGetqueryParams { + var () + return &PcloudEventsGetqueryParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudEventsGetqueryParamsWithTimeout creates a new PcloudEventsGetqueryParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudEventsGetqueryParamsWithTimeout(timeout time.Duration) *PcloudEventsGetqueryParams { + var () + return &PcloudEventsGetqueryParams{ + + timeout: timeout, + } +} + +// NewPcloudEventsGetqueryParamsWithContext creates a new PcloudEventsGetqueryParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudEventsGetqueryParamsWithContext(ctx context.Context) *PcloudEventsGetqueryParams { + var () + return &PcloudEventsGetqueryParams{ + + Context: ctx, + } +} + +// NewPcloudEventsGetqueryParamsWithHTTPClient creates a new PcloudEventsGetqueryParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudEventsGetqueryParamsWithHTTPClient(client *http.Client) *PcloudEventsGetqueryParams { + var () + return &PcloudEventsGetqueryParams{ + HTTPClient: client, + } +} + +/*PcloudEventsGetqueryParams contains all the parameters to send to the API endpoint +for the pcloud events getquery operation typically these are written to a http.Request +*/ +type PcloudEventsGetqueryParams struct { + + /*AcceptLanguage + The language requested for the return document + + */ + AcceptLanguage *string + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + /*FromTime + A from query time in either ISO 8601 or unix epoch format + + */ + FromTime *string + /*Time + (deprecated - use from_time) A time in either ISO 8601 or unix epoch format + + */ + Time *string + /*ToTime + A to query time in either ISO 8601 or unix epoch format + + */ + ToTime *string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud events getquery params +func (o *PcloudEventsGetqueryParams) WithTimeout(timeout time.Duration) *PcloudEventsGetqueryParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud events getquery params +func (o *PcloudEventsGetqueryParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud events getquery params +func (o *PcloudEventsGetqueryParams) WithContext(ctx context.Context) *PcloudEventsGetqueryParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud events getquery params +func (o *PcloudEventsGetqueryParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud events getquery params +func (o *PcloudEventsGetqueryParams) WithHTTPClient(client *http.Client) *PcloudEventsGetqueryParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud events getquery params +func (o *PcloudEventsGetqueryParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithAcceptLanguage adds the acceptLanguage to the pcloud events getquery params +func (o *PcloudEventsGetqueryParams) WithAcceptLanguage(acceptLanguage *string) *PcloudEventsGetqueryParams { + o.SetAcceptLanguage(acceptLanguage) + return o +} + +// SetAcceptLanguage adds the acceptLanguage to the pcloud events getquery params +func (o *PcloudEventsGetqueryParams) SetAcceptLanguage(acceptLanguage *string) { + o.AcceptLanguage = acceptLanguage +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud events getquery params +func (o *PcloudEventsGetqueryParams) WithCloudInstanceID(cloudInstanceID string) *PcloudEventsGetqueryParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud events getquery params +func (o *PcloudEventsGetqueryParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WithFromTime adds the fromTime to the pcloud events getquery params +func (o *PcloudEventsGetqueryParams) WithFromTime(fromTime *string) *PcloudEventsGetqueryParams { + o.SetFromTime(fromTime) + return o +} + +// SetFromTime adds the fromTime to the pcloud events getquery params +func (o *PcloudEventsGetqueryParams) SetFromTime(fromTime *string) { + o.FromTime = fromTime +} + +// WithTime adds the time to the pcloud events getquery params +func (o *PcloudEventsGetqueryParams) WithTime(time *string) *PcloudEventsGetqueryParams { + o.SetTime(time) + return o +} + +// SetTime adds the time to the pcloud events getquery params +func (o *PcloudEventsGetqueryParams) SetTime(time *string) { + o.Time = time +} + +// WithToTime adds the toTime to the pcloud events getquery params +func (o *PcloudEventsGetqueryParams) WithToTime(toTime *string) *PcloudEventsGetqueryParams { + o.SetToTime(toTime) + return o +} + +// SetToTime adds the toTime to the pcloud events getquery params +func (o *PcloudEventsGetqueryParams) SetToTime(toTime *string) { + o.ToTime = toTime +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudEventsGetqueryParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.AcceptLanguage != nil { + + // header param Accept-Language + if err := r.SetHeaderParam("Accept-Language", *o.AcceptLanguage); err != nil { + return err + } + + } + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + if o.FromTime != nil { + + // query param from_time + var qrFromTime string + if o.FromTime != nil { + qrFromTime = *o.FromTime + } + qFromTime := qrFromTime + if qFromTime != "" { + if err := r.SetQueryParam("from_time", qFromTime); err != nil { + return err + } + } + + } + + if o.Time != nil { + + // query param time + var qrTime string + if o.Time != nil { + qrTime = *o.Time + } + qTime := qrTime + if qTime != "" { + if err := r.SetQueryParam("time", qTime); err != nil { + return err + } + } + + } + + if o.ToTime != nil { + + // query param to_time + var qrToTime string + if o.ToTime != nil { + qrToTime = *o.ToTime + } + qToTime := qrToTime + if qToTime != "" { + if err := r.SetQueryParam("to_time", qToTime); err != nil { + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_events/pcloud_events_getquery_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_events/pcloud_events_getquery_responses.go new file mode 100644 index 00000000000..40cfbd3423e --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_events/pcloud_events_getquery_responses.go @@ -0,0 +1,139 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_events + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudEventsGetqueryReader is a Reader for the PcloudEventsGetquery structure. +type PcloudEventsGetqueryReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudEventsGetqueryReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudEventsGetqueryOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudEventsGetqueryBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudEventsGetqueryInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudEventsGetqueryOK creates a PcloudEventsGetqueryOK with default headers values +func NewPcloudEventsGetqueryOK() *PcloudEventsGetqueryOK { + return &PcloudEventsGetqueryOK{} +} + +/*PcloudEventsGetqueryOK handles this case with default header values. + +OK +*/ +type PcloudEventsGetqueryOK struct { + Payload *models.Events +} + +func (o *PcloudEventsGetqueryOK) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/events][%d] pcloudEventsGetqueryOK %+v", 200, o.Payload) +} + +func (o *PcloudEventsGetqueryOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Events) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudEventsGetqueryBadRequest creates a PcloudEventsGetqueryBadRequest with default headers values +func NewPcloudEventsGetqueryBadRequest() *PcloudEventsGetqueryBadRequest { + return &PcloudEventsGetqueryBadRequest{} +} + +/*PcloudEventsGetqueryBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudEventsGetqueryBadRequest struct { + Payload *models.Error +} + +func (o *PcloudEventsGetqueryBadRequest) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/events][%d] pcloudEventsGetqueryBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudEventsGetqueryBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudEventsGetqueryInternalServerError creates a PcloudEventsGetqueryInternalServerError with default headers values +func NewPcloudEventsGetqueryInternalServerError() *PcloudEventsGetqueryInternalServerError { + return &PcloudEventsGetqueryInternalServerError{} +} + +/*PcloudEventsGetqueryInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudEventsGetqueryInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudEventsGetqueryInternalServerError) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/events][%d] pcloudEventsGetqueryInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudEventsGetqueryInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_images/p_cloud_images_client.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_images/p_cloud_images_client.go new file mode 100644 index 00000000000..d87f3de717f --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_images/p_cloud_images_client.go @@ -0,0 +1,297 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_images + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" +) + +// New creates a new p cloud images API client. +func New(transport runtime.ClientTransport, formats strfmt.Registry) *Client { + return &Client{transport: transport, formats: formats} +} + +/* +Client for p cloud images API +*/ +type Client struct { + transport runtime.ClientTransport + formats strfmt.Registry +} + +/* +PcloudCloudinstancesImagesDelete deletes an image from a cloud instance +*/ +func (a *Client) PcloudCloudinstancesImagesDelete(params *PcloudCloudinstancesImagesDeleteParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudCloudinstancesImagesDeleteOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudCloudinstancesImagesDeleteParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.cloudinstances.images.delete", + Method: "DELETE", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/images/{image_id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudCloudinstancesImagesDeleteReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudCloudinstancesImagesDeleteOK), nil + +} + +/* +PcloudCloudinstancesImagesExportPost exports an image +*/ +func (a *Client) PcloudCloudinstancesImagesExportPost(params *PcloudCloudinstancesImagesExportPostParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudCloudinstancesImagesExportPostAccepted, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudCloudinstancesImagesExportPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.cloudinstances.images.export.post", + Method: "POST", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/images/{image_id}/export", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudCloudinstancesImagesExportPostReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudCloudinstancesImagesExportPostAccepted), nil + +} + +/* +PcloudCloudinstancesImagesGet detaileds info of an image +*/ +func (a *Client) PcloudCloudinstancesImagesGet(params *PcloudCloudinstancesImagesGetParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudCloudinstancesImagesGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudCloudinstancesImagesGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.cloudinstances.images.get", + Method: "GET", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/images/{image_id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudCloudinstancesImagesGetReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudCloudinstancesImagesGetOK), nil + +} + +/* +PcloudCloudinstancesImagesGetall lists all images for this cloud instance +*/ +func (a *Client) PcloudCloudinstancesImagesGetall(params *PcloudCloudinstancesImagesGetallParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudCloudinstancesImagesGetallOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudCloudinstancesImagesGetallParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.cloudinstances.images.getall", + Method: "GET", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/images", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudCloudinstancesImagesGetallReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudCloudinstancesImagesGetallOK), nil + +} + +/* +PcloudCloudinstancesImagesPost creates a new image from available images +*/ +func (a *Client) PcloudCloudinstancesImagesPost(params *PcloudCloudinstancesImagesPostParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudCloudinstancesImagesPostOK, *PcloudCloudinstancesImagesPostCreated, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudCloudinstancesImagesPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.cloudinstances.images.post", + Method: "POST", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/images", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudCloudinstancesImagesPostReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, nil, err + } + switch value := result.(type) { + case *PcloudCloudinstancesImagesPostOK: + return value, nil, nil + case *PcloudCloudinstancesImagesPostCreated: + return nil, value, nil + } + return nil, nil, nil + +} + +/* +PcloudCloudinstancesStockimagesGet detaileds info of an available stock image +*/ +func (a *Client) PcloudCloudinstancesStockimagesGet(params *PcloudCloudinstancesStockimagesGetParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudCloudinstancesStockimagesGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudCloudinstancesStockimagesGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.cloudinstances.stockimages.get", + Method: "GET", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/stock-images/{image_id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudCloudinstancesStockimagesGetReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudCloudinstancesStockimagesGetOK), nil + +} + +/* +PcloudCloudinstancesStockimagesGetall lists all available stock images +*/ +func (a *Client) PcloudCloudinstancesStockimagesGetall(params *PcloudCloudinstancesStockimagesGetallParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudCloudinstancesStockimagesGetallOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudCloudinstancesStockimagesGetallParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.cloudinstances.stockimages.getall", + Method: "GET", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/stock-images", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudCloudinstancesStockimagesGetallReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudCloudinstancesStockimagesGetallOK), nil + +} + +/* +PcloudImagesGet deprecateds for pcloud v1 cloud instances cloud instance id stock images image id detailed info of an available stock image +*/ +func (a *Client) PcloudImagesGet(params *PcloudImagesGetParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudImagesGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudImagesGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.images.get", + Method: "GET", + PathPattern: "/pcloud/v1/images/{image_id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudImagesGetReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudImagesGetOK), nil + +} + +/* +PcloudImagesGetall deprecateds for pcloud v1 cloud instances cloud instance id stock images list all available stock images +*/ +func (a *Client) PcloudImagesGetall(params *PcloudImagesGetallParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudImagesGetallOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudImagesGetallParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.images.getall", + Method: "GET", + PathPattern: "/pcloud/v1/images", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudImagesGetallReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudImagesGetallOK), nil + +} + +// SetTransport changes the transport on the client +func (a *Client) SetTransport(transport runtime.ClientTransport) { + a.transport = transport +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_images/pcloud_cloudinstances_images_delete_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_images/pcloud_cloudinstances_images_delete_parameters.go new file mode 100644 index 00000000000..aa70c0530d3 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_images/pcloud_cloudinstances_images_delete_parameters.go @@ -0,0 +1,157 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_images + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewPcloudCloudinstancesImagesDeleteParams creates a new PcloudCloudinstancesImagesDeleteParams object +// with the default values initialized. +func NewPcloudCloudinstancesImagesDeleteParams() *PcloudCloudinstancesImagesDeleteParams { + var () + return &PcloudCloudinstancesImagesDeleteParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudCloudinstancesImagesDeleteParamsWithTimeout creates a new PcloudCloudinstancesImagesDeleteParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudCloudinstancesImagesDeleteParamsWithTimeout(timeout time.Duration) *PcloudCloudinstancesImagesDeleteParams { + var () + return &PcloudCloudinstancesImagesDeleteParams{ + + timeout: timeout, + } +} + +// NewPcloudCloudinstancesImagesDeleteParamsWithContext creates a new PcloudCloudinstancesImagesDeleteParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudCloudinstancesImagesDeleteParamsWithContext(ctx context.Context) *PcloudCloudinstancesImagesDeleteParams { + var () + return &PcloudCloudinstancesImagesDeleteParams{ + + Context: ctx, + } +} + +// NewPcloudCloudinstancesImagesDeleteParamsWithHTTPClient creates a new PcloudCloudinstancesImagesDeleteParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudCloudinstancesImagesDeleteParamsWithHTTPClient(client *http.Client) *PcloudCloudinstancesImagesDeleteParams { + var () + return &PcloudCloudinstancesImagesDeleteParams{ + HTTPClient: client, + } +} + +/*PcloudCloudinstancesImagesDeleteParams contains all the parameters to send to the API endpoint +for the pcloud cloudinstances images delete operation typically these are written to a http.Request +*/ +type PcloudCloudinstancesImagesDeleteParams struct { + + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + /*ImageID + Image ID of a image + + */ + ImageID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud cloudinstances images delete params +func (o *PcloudCloudinstancesImagesDeleteParams) WithTimeout(timeout time.Duration) *PcloudCloudinstancesImagesDeleteParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud cloudinstances images delete params +func (o *PcloudCloudinstancesImagesDeleteParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud cloudinstances images delete params +func (o *PcloudCloudinstancesImagesDeleteParams) WithContext(ctx context.Context) *PcloudCloudinstancesImagesDeleteParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud cloudinstances images delete params +func (o *PcloudCloudinstancesImagesDeleteParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud cloudinstances images delete params +func (o *PcloudCloudinstancesImagesDeleteParams) WithHTTPClient(client *http.Client) *PcloudCloudinstancesImagesDeleteParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud cloudinstances images delete params +func (o *PcloudCloudinstancesImagesDeleteParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud cloudinstances images delete params +func (o *PcloudCloudinstancesImagesDeleteParams) WithCloudInstanceID(cloudInstanceID string) *PcloudCloudinstancesImagesDeleteParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud cloudinstances images delete params +func (o *PcloudCloudinstancesImagesDeleteParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WithImageID adds the imageID to the pcloud cloudinstances images delete params +func (o *PcloudCloudinstancesImagesDeleteParams) WithImageID(imageID string) *PcloudCloudinstancesImagesDeleteParams { + o.SetImageID(imageID) + return o +} + +// SetImageID adds the imageId to the pcloud cloudinstances images delete params +func (o *PcloudCloudinstancesImagesDeleteParams) SetImageID(imageID string) { + o.ImageID = imageID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudCloudinstancesImagesDeleteParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + // path param image_id + if err := r.SetPathParam("image_id", o.ImageID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_images/pcloud_cloudinstances_images_delete_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_images/pcloud_cloudinstances_images_delete_responses.go new file mode 100644 index 00000000000..0be0ba32059 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_images/pcloud_cloudinstances_images_delete_responses.go @@ -0,0 +1,173 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_images + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudCloudinstancesImagesDeleteReader is a Reader for the PcloudCloudinstancesImagesDelete structure. +type PcloudCloudinstancesImagesDeleteReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudCloudinstancesImagesDeleteReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudCloudinstancesImagesDeleteOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudCloudinstancesImagesDeleteBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 410: + result := NewPcloudCloudinstancesImagesDeleteGone() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudCloudinstancesImagesDeleteInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudCloudinstancesImagesDeleteOK creates a PcloudCloudinstancesImagesDeleteOK with default headers values +func NewPcloudCloudinstancesImagesDeleteOK() *PcloudCloudinstancesImagesDeleteOK { + return &PcloudCloudinstancesImagesDeleteOK{} +} + +/*PcloudCloudinstancesImagesDeleteOK handles this case with default header values. + +OK +*/ +type PcloudCloudinstancesImagesDeleteOK struct { + Payload models.Object +} + +func (o *PcloudCloudinstancesImagesDeleteOK) Error() string { + return fmt.Sprintf("[DELETE /pcloud/v1/cloud-instances/{cloud_instance_id}/images/{image_id}][%d] pcloudCloudinstancesImagesDeleteOK %+v", 200, o.Payload) +} + +func (o *PcloudCloudinstancesImagesDeleteOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudinstancesImagesDeleteBadRequest creates a PcloudCloudinstancesImagesDeleteBadRequest with default headers values +func NewPcloudCloudinstancesImagesDeleteBadRequest() *PcloudCloudinstancesImagesDeleteBadRequest { + return &PcloudCloudinstancesImagesDeleteBadRequest{} +} + +/*PcloudCloudinstancesImagesDeleteBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudCloudinstancesImagesDeleteBadRequest struct { + Payload *models.Error +} + +func (o *PcloudCloudinstancesImagesDeleteBadRequest) Error() string { + return fmt.Sprintf("[DELETE /pcloud/v1/cloud-instances/{cloud_instance_id}/images/{image_id}][%d] pcloudCloudinstancesImagesDeleteBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudCloudinstancesImagesDeleteBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudinstancesImagesDeleteGone creates a PcloudCloudinstancesImagesDeleteGone with default headers values +func NewPcloudCloudinstancesImagesDeleteGone() *PcloudCloudinstancesImagesDeleteGone { + return &PcloudCloudinstancesImagesDeleteGone{} +} + +/*PcloudCloudinstancesImagesDeleteGone handles this case with default header values. + +Gone +*/ +type PcloudCloudinstancesImagesDeleteGone struct { + Payload *models.Error +} + +func (o *PcloudCloudinstancesImagesDeleteGone) Error() string { + return fmt.Sprintf("[DELETE /pcloud/v1/cloud-instances/{cloud_instance_id}/images/{image_id}][%d] pcloudCloudinstancesImagesDeleteGone %+v", 410, o.Payload) +} + +func (o *PcloudCloudinstancesImagesDeleteGone) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudinstancesImagesDeleteInternalServerError creates a PcloudCloudinstancesImagesDeleteInternalServerError with default headers values +func NewPcloudCloudinstancesImagesDeleteInternalServerError() *PcloudCloudinstancesImagesDeleteInternalServerError { + return &PcloudCloudinstancesImagesDeleteInternalServerError{} +} + +/*PcloudCloudinstancesImagesDeleteInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudCloudinstancesImagesDeleteInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudCloudinstancesImagesDeleteInternalServerError) Error() string { + return fmt.Sprintf("[DELETE /pcloud/v1/cloud-instances/{cloud_instance_id}/images/{image_id}][%d] pcloudCloudinstancesImagesDeleteInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudCloudinstancesImagesDeleteInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_images/pcloud_cloudinstances_images_export_post_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_images/pcloud_cloudinstances_images_export_post_parameters.go new file mode 100644 index 00000000000..afc5ec09572 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_images/pcloud_cloudinstances_images_export_post_parameters.go @@ -0,0 +1,181 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_images + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// NewPcloudCloudinstancesImagesExportPostParams creates a new PcloudCloudinstancesImagesExportPostParams object +// with the default values initialized. +func NewPcloudCloudinstancesImagesExportPostParams() *PcloudCloudinstancesImagesExportPostParams { + var () + return &PcloudCloudinstancesImagesExportPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudCloudinstancesImagesExportPostParamsWithTimeout creates a new PcloudCloudinstancesImagesExportPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudCloudinstancesImagesExportPostParamsWithTimeout(timeout time.Duration) *PcloudCloudinstancesImagesExportPostParams { + var () + return &PcloudCloudinstancesImagesExportPostParams{ + + timeout: timeout, + } +} + +// NewPcloudCloudinstancesImagesExportPostParamsWithContext creates a new PcloudCloudinstancesImagesExportPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudCloudinstancesImagesExportPostParamsWithContext(ctx context.Context) *PcloudCloudinstancesImagesExportPostParams { + var () + return &PcloudCloudinstancesImagesExportPostParams{ + + Context: ctx, + } +} + +// NewPcloudCloudinstancesImagesExportPostParamsWithHTTPClient creates a new PcloudCloudinstancesImagesExportPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudCloudinstancesImagesExportPostParamsWithHTTPClient(client *http.Client) *PcloudCloudinstancesImagesExportPostParams { + var () + return &PcloudCloudinstancesImagesExportPostParams{ + HTTPClient: client, + } +} + +/*PcloudCloudinstancesImagesExportPostParams contains all the parameters to send to the API endpoint +for the pcloud cloudinstances images export post operation typically these are written to a http.Request +*/ +type PcloudCloudinstancesImagesExportPostParams struct { + + /*Body + Parameters for exporting an image + + */ + Body *models.ExportImage + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + /*ImageID + Image ID of a image + + */ + ImageID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud cloudinstances images export post params +func (o *PcloudCloudinstancesImagesExportPostParams) WithTimeout(timeout time.Duration) *PcloudCloudinstancesImagesExportPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud cloudinstances images export post params +func (o *PcloudCloudinstancesImagesExportPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud cloudinstances images export post params +func (o *PcloudCloudinstancesImagesExportPostParams) WithContext(ctx context.Context) *PcloudCloudinstancesImagesExportPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud cloudinstances images export post params +func (o *PcloudCloudinstancesImagesExportPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud cloudinstances images export post params +func (o *PcloudCloudinstancesImagesExportPostParams) WithHTTPClient(client *http.Client) *PcloudCloudinstancesImagesExportPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud cloudinstances images export post params +func (o *PcloudCloudinstancesImagesExportPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the pcloud cloudinstances images export post params +func (o *PcloudCloudinstancesImagesExportPostParams) WithBody(body *models.ExportImage) *PcloudCloudinstancesImagesExportPostParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the pcloud cloudinstances images export post params +func (o *PcloudCloudinstancesImagesExportPostParams) SetBody(body *models.ExportImage) { + o.Body = body +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud cloudinstances images export post params +func (o *PcloudCloudinstancesImagesExportPostParams) WithCloudInstanceID(cloudInstanceID string) *PcloudCloudinstancesImagesExportPostParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud cloudinstances images export post params +func (o *PcloudCloudinstancesImagesExportPostParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WithImageID adds the imageID to the pcloud cloudinstances images export post params +func (o *PcloudCloudinstancesImagesExportPostParams) WithImageID(imageID string) *PcloudCloudinstancesImagesExportPostParams { + o.SetImageID(imageID) + return o +} + +// SetImageID adds the imageId to the pcloud cloudinstances images export post params +func (o *PcloudCloudinstancesImagesExportPostParams) SetImageID(imageID string) { + o.ImageID = imageID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudCloudinstancesImagesExportPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + // path param image_id + if err := r.SetPathParam("image_id", o.ImageID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_images/pcloud_cloudinstances_images_export_post_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_images/pcloud_cloudinstances_images_export_post_responses.go new file mode 100644 index 00000000000..215635c358b --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_images/pcloud_cloudinstances_images_export_post_responses.go @@ -0,0 +1,173 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_images + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudCloudinstancesImagesExportPostReader is a Reader for the PcloudCloudinstancesImagesExportPost structure. +type PcloudCloudinstancesImagesExportPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudCloudinstancesImagesExportPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 202: + result := NewPcloudCloudinstancesImagesExportPostAccepted() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudCloudinstancesImagesExportPostBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 404: + result := NewPcloudCloudinstancesImagesExportPostNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudCloudinstancesImagesExportPostInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudCloudinstancesImagesExportPostAccepted creates a PcloudCloudinstancesImagesExportPostAccepted with default headers values +func NewPcloudCloudinstancesImagesExportPostAccepted() *PcloudCloudinstancesImagesExportPostAccepted { + return &PcloudCloudinstancesImagesExportPostAccepted{} +} + +/*PcloudCloudinstancesImagesExportPostAccepted handles this case with default header values. + +Accepted +*/ +type PcloudCloudinstancesImagesExportPostAccepted struct { + Payload models.Object +} + +func (o *PcloudCloudinstancesImagesExportPostAccepted) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/images/{image_id}/export][%d] pcloudCloudinstancesImagesExportPostAccepted %+v", 202, o.Payload) +} + +func (o *PcloudCloudinstancesImagesExportPostAccepted) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudinstancesImagesExportPostBadRequest creates a PcloudCloudinstancesImagesExportPostBadRequest with default headers values +func NewPcloudCloudinstancesImagesExportPostBadRequest() *PcloudCloudinstancesImagesExportPostBadRequest { + return &PcloudCloudinstancesImagesExportPostBadRequest{} +} + +/*PcloudCloudinstancesImagesExportPostBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudCloudinstancesImagesExportPostBadRequest struct { + Payload *models.Error +} + +func (o *PcloudCloudinstancesImagesExportPostBadRequest) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/images/{image_id}/export][%d] pcloudCloudinstancesImagesExportPostBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudCloudinstancesImagesExportPostBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudinstancesImagesExportPostNotFound creates a PcloudCloudinstancesImagesExportPostNotFound with default headers values +func NewPcloudCloudinstancesImagesExportPostNotFound() *PcloudCloudinstancesImagesExportPostNotFound { + return &PcloudCloudinstancesImagesExportPostNotFound{} +} + +/*PcloudCloudinstancesImagesExportPostNotFound handles this case with default header values. + +Not Found +*/ +type PcloudCloudinstancesImagesExportPostNotFound struct { + Payload *models.Error +} + +func (o *PcloudCloudinstancesImagesExportPostNotFound) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/images/{image_id}/export][%d] pcloudCloudinstancesImagesExportPostNotFound %+v", 404, o.Payload) +} + +func (o *PcloudCloudinstancesImagesExportPostNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudinstancesImagesExportPostInternalServerError creates a PcloudCloudinstancesImagesExportPostInternalServerError with default headers values +func NewPcloudCloudinstancesImagesExportPostInternalServerError() *PcloudCloudinstancesImagesExportPostInternalServerError { + return &PcloudCloudinstancesImagesExportPostInternalServerError{} +} + +/*PcloudCloudinstancesImagesExportPostInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudCloudinstancesImagesExportPostInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudCloudinstancesImagesExportPostInternalServerError) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/images/{image_id}/export][%d] pcloudCloudinstancesImagesExportPostInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudCloudinstancesImagesExportPostInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_images/pcloud_cloudinstances_images_get_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_images/pcloud_cloudinstances_images_get_parameters.go new file mode 100644 index 00000000000..9f5ba65ea84 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_images/pcloud_cloudinstances_images_get_parameters.go @@ -0,0 +1,157 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_images + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewPcloudCloudinstancesImagesGetParams creates a new PcloudCloudinstancesImagesGetParams object +// with the default values initialized. +func NewPcloudCloudinstancesImagesGetParams() *PcloudCloudinstancesImagesGetParams { + var () + return &PcloudCloudinstancesImagesGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudCloudinstancesImagesGetParamsWithTimeout creates a new PcloudCloudinstancesImagesGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudCloudinstancesImagesGetParamsWithTimeout(timeout time.Duration) *PcloudCloudinstancesImagesGetParams { + var () + return &PcloudCloudinstancesImagesGetParams{ + + timeout: timeout, + } +} + +// NewPcloudCloudinstancesImagesGetParamsWithContext creates a new PcloudCloudinstancesImagesGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudCloudinstancesImagesGetParamsWithContext(ctx context.Context) *PcloudCloudinstancesImagesGetParams { + var () + return &PcloudCloudinstancesImagesGetParams{ + + Context: ctx, + } +} + +// NewPcloudCloudinstancesImagesGetParamsWithHTTPClient creates a new PcloudCloudinstancesImagesGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudCloudinstancesImagesGetParamsWithHTTPClient(client *http.Client) *PcloudCloudinstancesImagesGetParams { + var () + return &PcloudCloudinstancesImagesGetParams{ + HTTPClient: client, + } +} + +/*PcloudCloudinstancesImagesGetParams contains all the parameters to send to the API endpoint +for the pcloud cloudinstances images get operation typically these are written to a http.Request +*/ +type PcloudCloudinstancesImagesGetParams struct { + + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + /*ImageID + Image ID of a image + + */ + ImageID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud cloudinstances images get params +func (o *PcloudCloudinstancesImagesGetParams) WithTimeout(timeout time.Duration) *PcloudCloudinstancesImagesGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud cloudinstances images get params +func (o *PcloudCloudinstancesImagesGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud cloudinstances images get params +func (o *PcloudCloudinstancesImagesGetParams) WithContext(ctx context.Context) *PcloudCloudinstancesImagesGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud cloudinstances images get params +func (o *PcloudCloudinstancesImagesGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud cloudinstances images get params +func (o *PcloudCloudinstancesImagesGetParams) WithHTTPClient(client *http.Client) *PcloudCloudinstancesImagesGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud cloudinstances images get params +func (o *PcloudCloudinstancesImagesGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud cloudinstances images get params +func (o *PcloudCloudinstancesImagesGetParams) WithCloudInstanceID(cloudInstanceID string) *PcloudCloudinstancesImagesGetParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud cloudinstances images get params +func (o *PcloudCloudinstancesImagesGetParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WithImageID adds the imageID to the pcloud cloudinstances images get params +func (o *PcloudCloudinstancesImagesGetParams) WithImageID(imageID string) *PcloudCloudinstancesImagesGetParams { + o.SetImageID(imageID) + return o +} + +// SetImageID adds the imageId to the pcloud cloudinstances images get params +func (o *PcloudCloudinstancesImagesGetParams) SetImageID(imageID string) { + o.ImageID = imageID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudCloudinstancesImagesGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + // path param image_id + if err := r.SetPathParam("image_id", o.ImageID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_images/pcloud_cloudinstances_images_get_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_images/pcloud_cloudinstances_images_get_responses.go new file mode 100644 index 00000000000..da20793f5fb --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_images/pcloud_cloudinstances_images_get_responses.go @@ -0,0 +1,175 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_images + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudCloudinstancesImagesGetReader is a Reader for the PcloudCloudinstancesImagesGet structure. +type PcloudCloudinstancesImagesGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudCloudinstancesImagesGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudCloudinstancesImagesGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudCloudinstancesImagesGetBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 404: + result := NewPcloudCloudinstancesImagesGetNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudCloudinstancesImagesGetInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudCloudinstancesImagesGetOK creates a PcloudCloudinstancesImagesGetOK with default headers values +func NewPcloudCloudinstancesImagesGetOK() *PcloudCloudinstancesImagesGetOK { + return &PcloudCloudinstancesImagesGetOK{} +} + +/*PcloudCloudinstancesImagesGetOK handles this case with default header values. + +OK +*/ +type PcloudCloudinstancesImagesGetOK struct { + Payload *models.Image +} + +func (o *PcloudCloudinstancesImagesGetOK) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/images/{image_id}][%d] pcloudCloudinstancesImagesGetOK %+v", 200, o.Payload) +} + +func (o *PcloudCloudinstancesImagesGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Image) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudinstancesImagesGetBadRequest creates a PcloudCloudinstancesImagesGetBadRequest with default headers values +func NewPcloudCloudinstancesImagesGetBadRequest() *PcloudCloudinstancesImagesGetBadRequest { + return &PcloudCloudinstancesImagesGetBadRequest{} +} + +/*PcloudCloudinstancesImagesGetBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudCloudinstancesImagesGetBadRequest struct { + Payload *models.Error +} + +func (o *PcloudCloudinstancesImagesGetBadRequest) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/images/{image_id}][%d] pcloudCloudinstancesImagesGetBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudCloudinstancesImagesGetBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudinstancesImagesGetNotFound creates a PcloudCloudinstancesImagesGetNotFound with default headers values +func NewPcloudCloudinstancesImagesGetNotFound() *PcloudCloudinstancesImagesGetNotFound { + return &PcloudCloudinstancesImagesGetNotFound{} +} + +/*PcloudCloudinstancesImagesGetNotFound handles this case with default header values. + +Not Found +*/ +type PcloudCloudinstancesImagesGetNotFound struct { + Payload *models.Error +} + +func (o *PcloudCloudinstancesImagesGetNotFound) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/images/{image_id}][%d] pcloudCloudinstancesImagesGetNotFound %+v", 404, o.Payload) +} + +func (o *PcloudCloudinstancesImagesGetNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudinstancesImagesGetInternalServerError creates a PcloudCloudinstancesImagesGetInternalServerError with default headers values +func NewPcloudCloudinstancesImagesGetInternalServerError() *PcloudCloudinstancesImagesGetInternalServerError { + return &PcloudCloudinstancesImagesGetInternalServerError{} +} + +/*PcloudCloudinstancesImagesGetInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudCloudinstancesImagesGetInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudCloudinstancesImagesGetInternalServerError) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/images/{image_id}][%d] pcloudCloudinstancesImagesGetInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudCloudinstancesImagesGetInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_images/pcloud_cloudinstances_images_getall_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_images/pcloud_cloudinstances_images_getall_parameters.go new file mode 100644 index 00000000000..6bcc6651ee2 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_images/pcloud_cloudinstances_images_getall_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_images + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewPcloudCloudinstancesImagesGetallParams creates a new PcloudCloudinstancesImagesGetallParams object +// with the default values initialized. +func NewPcloudCloudinstancesImagesGetallParams() *PcloudCloudinstancesImagesGetallParams { + var () + return &PcloudCloudinstancesImagesGetallParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudCloudinstancesImagesGetallParamsWithTimeout creates a new PcloudCloudinstancesImagesGetallParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudCloudinstancesImagesGetallParamsWithTimeout(timeout time.Duration) *PcloudCloudinstancesImagesGetallParams { + var () + return &PcloudCloudinstancesImagesGetallParams{ + + timeout: timeout, + } +} + +// NewPcloudCloudinstancesImagesGetallParamsWithContext creates a new PcloudCloudinstancesImagesGetallParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudCloudinstancesImagesGetallParamsWithContext(ctx context.Context) *PcloudCloudinstancesImagesGetallParams { + var () + return &PcloudCloudinstancesImagesGetallParams{ + + Context: ctx, + } +} + +// NewPcloudCloudinstancesImagesGetallParamsWithHTTPClient creates a new PcloudCloudinstancesImagesGetallParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudCloudinstancesImagesGetallParamsWithHTTPClient(client *http.Client) *PcloudCloudinstancesImagesGetallParams { + var () + return &PcloudCloudinstancesImagesGetallParams{ + HTTPClient: client, + } +} + +/*PcloudCloudinstancesImagesGetallParams contains all the parameters to send to the API endpoint +for the pcloud cloudinstances images getall operation typically these are written to a http.Request +*/ +type PcloudCloudinstancesImagesGetallParams struct { + + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud cloudinstances images getall params +func (o *PcloudCloudinstancesImagesGetallParams) WithTimeout(timeout time.Duration) *PcloudCloudinstancesImagesGetallParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud cloudinstances images getall params +func (o *PcloudCloudinstancesImagesGetallParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud cloudinstances images getall params +func (o *PcloudCloudinstancesImagesGetallParams) WithContext(ctx context.Context) *PcloudCloudinstancesImagesGetallParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud cloudinstances images getall params +func (o *PcloudCloudinstancesImagesGetallParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud cloudinstances images getall params +func (o *PcloudCloudinstancesImagesGetallParams) WithHTTPClient(client *http.Client) *PcloudCloudinstancesImagesGetallParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud cloudinstances images getall params +func (o *PcloudCloudinstancesImagesGetallParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud cloudinstances images getall params +func (o *PcloudCloudinstancesImagesGetallParams) WithCloudInstanceID(cloudInstanceID string) *PcloudCloudinstancesImagesGetallParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud cloudinstances images getall params +func (o *PcloudCloudinstancesImagesGetallParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudCloudinstancesImagesGetallParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_images/pcloud_cloudinstances_images_getall_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_images/pcloud_cloudinstances_images_getall_responses.go new file mode 100644 index 00000000000..fffd794fee8 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_images/pcloud_cloudinstances_images_getall_responses.go @@ -0,0 +1,175 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_images + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudCloudinstancesImagesGetallReader is a Reader for the PcloudCloudinstancesImagesGetall structure. +type PcloudCloudinstancesImagesGetallReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudCloudinstancesImagesGetallReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudCloudinstancesImagesGetallOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudCloudinstancesImagesGetallBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 404: + result := NewPcloudCloudinstancesImagesGetallNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudCloudinstancesImagesGetallInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudCloudinstancesImagesGetallOK creates a PcloudCloudinstancesImagesGetallOK with default headers values +func NewPcloudCloudinstancesImagesGetallOK() *PcloudCloudinstancesImagesGetallOK { + return &PcloudCloudinstancesImagesGetallOK{} +} + +/*PcloudCloudinstancesImagesGetallOK handles this case with default header values. + +OK +*/ +type PcloudCloudinstancesImagesGetallOK struct { + Payload *models.Images +} + +func (o *PcloudCloudinstancesImagesGetallOK) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/images][%d] pcloudCloudinstancesImagesGetallOK %+v", 200, o.Payload) +} + +func (o *PcloudCloudinstancesImagesGetallOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Images) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudinstancesImagesGetallBadRequest creates a PcloudCloudinstancesImagesGetallBadRequest with default headers values +func NewPcloudCloudinstancesImagesGetallBadRequest() *PcloudCloudinstancesImagesGetallBadRequest { + return &PcloudCloudinstancesImagesGetallBadRequest{} +} + +/*PcloudCloudinstancesImagesGetallBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudCloudinstancesImagesGetallBadRequest struct { + Payload *models.Error +} + +func (o *PcloudCloudinstancesImagesGetallBadRequest) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/images][%d] pcloudCloudinstancesImagesGetallBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudCloudinstancesImagesGetallBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudinstancesImagesGetallNotFound creates a PcloudCloudinstancesImagesGetallNotFound with default headers values +func NewPcloudCloudinstancesImagesGetallNotFound() *PcloudCloudinstancesImagesGetallNotFound { + return &PcloudCloudinstancesImagesGetallNotFound{} +} + +/*PcloudCloudinstancesImagesGetallNotFound handles this case with default header values. + +Not Found +*/ +type PcloudCloudinstancesImagesGetallNotFound struct { + Payload *models.Error +} + +func (o *PcloudCloudinstancesImagesGetallNotFound) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/images][%d] pcloudCloudinstancesImagesGetallNotFound %+v", 404, o.Payload) +} + +func (o *PcloudCloudinstancesImagesGetallNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudinstancesImagesGetallInternalServerError creates a PcloudCloudinstancesImagesGetallInternalServerError with default headers values +func NewPcloudCloudinstancesImagesGetallInternalServerError() *PcloudCloudinstancesImagesGetallInternalServerError { + return &PcloudCloudinstancesImagesGetallInternalServerError{} +} + +/*PcloudCloudinstancesImagesGetallInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudCloudinstancesImagesGetallInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudCloudinstancesImagesGetallInternalServerError) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/images][%d] pcloudCloudinstancesImagesGetallInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudCloudinstancesImagesGetallInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_images/pcloud_cloudinstances_images_post_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_images/pcloud_cloudinstances_images_post_parameters.go new file mode 100644 index 00000000000..d45fd822534 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_images/pcloud_cloudinstances_images_post_parameters.go @@ -0,0 +1,160 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_images + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// NewPcloudCloudinstancesImagesPostParams creates a new PcloudCloudinstancesImagesPostParams object +// with the default values initialized. +func NewPcloudCloudinstancesImagesPostParams() *PcloudCloudinstancesImagesPostParams { + var () + return &PcloudCloudinstancesImagesPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudCloudinstancesImagesPostParamsWithTimeout creates a new PcloudCloudinstancesImagesPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudCloudinstancesImagesPostParamsWithTimeout(timeout time.Duration) *PcloudCloudinstancesImagesPostParams { + var () + return &PcloudCloudinstancesImagesPostParams{ + + timeout: timeout, + } +} + +// NewPcloudCloudinstancesImagesPostParamsWithContext creates a new PcloudCloudinstancesImagesPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudCloudinstancesImagesPostParamsWithContext(ctx context.Context) *PcloudCloudinstancesImagesPostParams { + var () + return &PcloudCloudinstancesImagesPostParams{ + + Context: ctx, + } +} + +// NewPcloudCloudinstancesImagesPostParamsWithHTTPClient creates a new PcloudCloudinstancesImagesPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudCloudinstancesImagesPostParamsWithHTTPClient(client *http.Client) *PcloudCloudinstancesImagesPostParams { + var () + return &PcloudCloudinstancesImagesPostParams{ + HTTPClient: client, + } +} + +/*PcloudCloudinstancesImagesPostParams contains all the parameters to send to the API endpoint +for the pcloud cloudinstances images post operation typically these are written to a http.Request +*/ +type PcloudCloudinstancesImagesPostParams struct { + + /*Body + Parameters for the creation of a new image from available images + + */ + Body *models.CreateImage + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud cloudinstances images post params +func (o *PcloudCloudinstancesImagesPostParams) WithTimeout(timeout time.Duration) *PcloudCloudinstancesImagesPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud cloudinstances images post params +func (o *PcloudCloudinstancesImagesPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud cloudinstances images post params +func (o *PcloudCloudinstancesImagesPostParams) WithContext(ctx context.Context) *PcloudCloudinstancesImagesPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud cloudinstances images post params +func (o *PcloudCloudinstancesImagesPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud cloudinstances images post params +func (o *PcloudCloudinstancesImagesPostParams) WithHTTPClient(client *http.Client) *PcloudCloudinstancesImagesPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud cloudinstances images post params +func (o *PcloudCloudinstancesImagesPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the pcloud cloudinstances images post params +func (o *PcloudCloudinstancesImagesPostParams) WithBody(body *models.CreateImage) *PcloudCloudinstancesImagesPostParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the pcloud cloudinstances images post params +func (o *PcloudCloudinstancesImagesPostParams) SetBody(body *models.CreateImage) { + o.Body = body +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud cloudinstances images post params +func (o *PcloudCloudinstancesImagesPostParams) WithCloudInstanceID(cloudInstanceID string) *PcloudCloudinstancesImagesPostParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud cloudinstances images post params +func (o *PcloudCloudinstancesImagesPostParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudCloudinstancesImagesPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_images/pcloud_cloudinstances_images_post_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_images/pcloud_cloudinstances_images_post_responses.go new file mode 100644 index 00000000000..9ec57afbacf --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_images/pcloud_cloudinstances_images_post_responses.go @@ -0,0 +1,247 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_images + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudCloudinstancesImagesPostReader is a Reader for the PcloudCloudinstancesImagesPost structure. +type PcloudCloudinstancesImagesPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudCloudinstancesImagesPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudCloudinstancesImagesPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 201: + result := NewPcloudCloudinstancesImagesPostCreated() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudCloudinstancesImagesPostBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 409: + result := NewPcloudCloudinstancesImagesPostConflict() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 422: + result := NewPcloudCloudinstancesImagesPostUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudCloudinstancesImagesPostInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudCloudinstancesImagesPostOK creates a PcloudCloudinstancesImagesPostOK with default headers values +func NewPcloudCloudinstancesImagesPostOK() *PcloudCloudinstancesImagesPostOK { + return &PcloudCloudinstancesImagesPostOK{} +} + +/*PcloudCloudinstancesImagesPostOK handles this case with default header values. + +OK +*/ +type PcloudCloudinstancesImagesPostOK struct { + Payload *models.Image +} + +func (o *PcloudCloudinstancesImagesPostOK) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/images][%d] pcloudCloudinstancesImagesPostOK %+v", 200, o.Payload) +} + +func (o *PcloudCloudinstancesImagesPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Image) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudinstancesImagesPostCreated creates a PcloudCloudinstancesImagesPostCreated with default headers values +func NewPcloudCloudinstancesImagesPostCreated() *PcloudCloudinstancesImagesPostCreated { + return &PcloudCloudinstancesImagesPostCreated{} +} + +/*PcloudCloudinstancesImagesPostCreated handles this case with default header values. + +Created +*/ +type PcloudCloudinstancesImagesPostCreated struct { + Payload *models.Image +} + +func (o *PcloudCloudinstancesImagesPostCreated) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/images][%d] pcloudCloudinstancesImagesPostCreated %+v", 201, o.Payload) +} + +func (o *PcloudCloudinstancesImagesPostCreated) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Image) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudinstancesImagesPostBadRequest creates a PcloudCloudinstancesImagesPostBadRequest with default headers values +func NewPcloudCloudinstancesImagesPostBadRequest() *PcloudCloudinstancesImagesPostBadRequest { + return &PcloudCloudinstancesImagesPostBadRequest{} +} + +/*PcloudCloudinstancesImagesPostBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudCloudinstancesImagesPostBadRequest struct { + Payload *models.Error +} + +func (o *PcloudCloudinstancesImagesPostBadRequest) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/images][%d] pcloudCloudinstancesImagesPostBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudCloudinstancesImagesPostBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudinstancesImagesPostConflict creates a PcloudCloudinstancesImagesPostConflict with default headers values +func NewPcloudCloudinstancesImagesPostConflict() *PcloudCloudinstancesImagesPostConflict { + return &PcloudCloudinstancesImagesPostConflict{} +} + +/*PcloudCloudinstancesImagesPostConflict handles this case with default header values. + +Conflict +*/ +type PcloudCloudinstancesImagesPostConflict struct { + Payload *models.Error +} + +func (o *PcloudCloudinstancesImagesPostConflict) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/images][%d] pcloudCloudinstancesImagesPostConflict %+v", 409, o.Payload) +} + +func (o *PcloudCloudinstancesImagesPostConflict) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudinstancesImagesPostUnprocessableEntity creates a PcloudCloudinstancesImagesPostUnprocessableEntity with default headers values +func NewPcloudCloudinstancesImagesPostUnprocessableEntity() *PcloudCloudinstancesImagesPostUnprocessableEntity { + return &PcloudCloudinstancesImagesPostUnprocessableEntity{} +} + +/*PcloudCloudinstancesImagesPostUnprocessableEntity handles this case with default header values. + +Unprocessable Entity +*/ +type PcloudCloudinstancesImagesPostUnprocessableEntity struct { + Payload *models.Error +} + +func (o *PcloudCloudinstancesImagesPostUnprocessableEntity) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/images][%d] pcloudCloudinstancesImagesPostUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *PcloudCloudinstancesImagesPostUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudinstancesImagesPostInternalServerError creates a PcloudCloudinstancesImagesPostInternalServerError with default headers values +func NewPcloudCloudinstancesImagesPostInternalServerError() *PcloudCloudinstancesImagesPostInternalServerError { + return &PcloudCloudinstancesImagesPostInternalServerError{} +} + +/*PcloudCloudinstancesImagesPostInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudCloudinstancesImagesPostInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudCloudinstancesImagesPostInternalServerError) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/images][%d] pcloudCloudinstancesImagesPostInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudCloudinstancesImagesPostInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_images/pcloud_cloudinstances_stockimages_get_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_images/pcloud_cloudinstances_stockimages_get_parameters.go new file mode 100644 index 00000000000..30f2ff8d9e7 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_images/pcloud_cloudinstances_stockimages_get_parameters.go @@ -0,0 +1,157 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_images + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewPcloudCloudinstancesStockimagesGetParams creates a new PcloudCloudinstancesStockimagesGetParams object +// with the default values initialized. +func NewPcloudCloudinstancesStockimagesGetParams() *PcloudCloudinstancesStockimagesGetParams { + var () + return &PcloudCloudinstancesStockimagesGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudCloudinstancesStockimagesGetParamsWithTimeout creates a new PcloudCloudinstancesStockimagesGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudCloudinstancesStockimagesGetParamsWithTimeout(timeout time.Duration) *PcloudCloudinstancesStockimagesGetParams { + var () + return &PcloudCloudinstancesStockimagesGetParams{ + + timeout: timeout, + } +} + +// NewPcloudCloudinstancesStockimagesGetParamsWithContext creates a new PcloudCloudinstancesStockimagesGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudCloudinstancesStockimagesGetParamsWithContext(ctx context.Context) *PcloudCloudinstancesStockimagesGetParams { + var () + return &PcloudCloudinstancesStockimagesGetParams{ + + Context: ctx, + } +} + +// NewPcloudCloudinstancesStockimagesGetParamsWithHTTPClient creates a new PcloudCloudinstancesStockimagesGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudCloudinstancesStockimagesGetParamsWithHTTPClient(client *http.Client) *PcloudCloudinstancesStockimagesGetParams { + var () + return &PcloudCloudinstancesStockimagesGetParams{ + HTTPClient: client, + } +} + +/*PcloudCloudinstancesStockimagesGetParams contains all the parameters to send to the API endpoint +for the pcloud cloudinstances stockimages get operation typically these are written to a http.Request +*/ +type PcloudCloudinstancesStockimagesGetParams struct { + + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + /*ImageID + Image ID of a image + + */ + ImageID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud cloudinstances stockimages get params +func (o *PcloudCloudinstancesStockimagesGetParams) WithTimeout(timeout time.Duration) *PcloudCloudinstancesStockimagesGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud cloudinstances stockimages get params +func (o *PcloudCloudinstancesStockimagesGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud cloudinstances stockimages get params +func (o *PcloudCloudinstancesStockimagesGetParams) WithContext(ctx context.Context) *PcloudCloudinstancesStockimagesGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud cloudinstances stockimages get params +func (o *PcloudCloudinstancesStockimagesGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud cloudinstances stockimages get params +func (o *PcloudCloudinstancesStockimagesGetParams) WithHTTPClient(client *http.Client) *PcloudCloudinstancesStockimagesGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud cloudinstances stockimages get params +func (o *PcloudCloudinstancesStockimagesGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud cloudinstances stockimages get params +func (o *PcloudCloudinstancesStockimagesGetParams) WithCloudInstanceID(cloudInstanceID string) *PcloudCloudinstancesStockimagesGetParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud cloudinstances stockimages get params +func (o *PcloudCloudinstancesStockimagesGetParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WithImageID adds the imageID to the pcloud cloudinstances stockimages get params +func (o *PcloudCloudinstancesStockimagesGetParams) WithImageID(imageID string) *PcloudCloudinstancesStockimagesGetParams { + o.SetImageID(imageID) + return o +} + +// SetImageID adds the imageId to the pcloud cloudinstances stockimages get params +func (o *PcloudCloudinstancesStockimagesGetParams) SetImageID(imageID string) { + o.ImageID = imageID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudCloudinstancesStockimagesGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + // path param image_id + if err := r.SetPathParam("image_id", o.ImageID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_images/pcloud_cloudinstances_stockimages_get_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_images/pcloud_cloudinstances_stockimages_get_responses.go new file mode 100644 index 00000000000..564aad9675f --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_images/pcloud_cloudinstances_stockimages_get_responses.go @@ -0,0 +1,175 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_images + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudCloudinstancesStockimagesGetReader is a Reader for the PcloudCloudinstancesStockimagesGet structure. +type PcloudCloudinstancesStockimagesGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudCloudinstancesStockimagesGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudCloudinstancesStockimagesGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudCloudinstancesStockimagesGetBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 404: + result := NewPcloudCloudinstancesStockimagesGetNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudCloudinstancesStockimagesGetInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudCloudinstancesStockimagesGetOK creates a PcloudCloudinstancesStockimagesGetOK with default headers values +func NewPcloudCloudinstancesStockimagesGetOK() *PcloudCloudinstancesStockimagesGetOK { + return &PcloudCloudinstancesStockimagesGetOK{} +} + +/*PcloudCloudinstancesStockimagesGetOK handles this case with default header values. + +OK +*/ +type PcloudCloudinstancesStockimagesGetOK struct { + Payload *models.Image +} + +func (o *PcloudCloudinstancesStockimagesGetOK) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/stock-images/{image_id}][%d] pcloudCloudinstancesStockimagesGetOK %+v", 200, o.Payload) +} + +func (o *PcloudCloudinstancesStockimagesGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Image) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudinstancesStockimagesGetBadRequest creates a PcloudCloudinstancesStockimagesGetBadRequest with default headers values +func NewPcloudCloudinstancesStockimagesGetBadRequest() *PcloudCloudinstancesStockimagesGetBadRequest { + return &PcloudCloudinstancesStockimagesGetBadRequest{} +} + +/*PcloudCloudinstancesStockimagesGetBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudCloudinstancesStockimagesGetBadRequest struct { + Payload *models.Error +} + +func (o *PcloudCloudinstancesStockimagesGetBadRequest) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/stock-images/{image_id}][%d] pcloudCloudinstancesStockimagesGetBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudCloudinstancesStockimagesGetBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudinstancesStockimagesGetNotFound creates a PcloudCloudinstancesStockimagesGetNotFound with default headers values +func NewPcloudCloudinstancesStockimagesGetNotFound() *PcloudCloudinstancesStockimagesGetNotFound { + return &PcloudCloudinstancesStockimagesGetNotFound{} +} + +/*PcloudCloudinstancesStockimagesGetNotFound handles this case with default header values. + +Not Found +*/ +type PcloudCloudinstancesStockimagesGetNotFound struct { + Payload *models.Error +} + +func (o *PcloudCloudinstancesStockimagesGetNotFound) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/stock-images/{image_id}][%d] pcloudCloudinstancesStockimagesGetNotFound %+v", 404, o.Payload) +} + +func (o *PcloudCloudinstancesStockimagesGetNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudinstancesStockimagesGetInternalServerError creates a PcloudCloudinstancesStockimagesGetInternalServerError with default headers values +func NewPcloudCloudinstancesStockimagesGetInternalServerError() *PcloudCloudinstancesStockimagesGetInternalServerError { + return &PcloudCloudinstancesStockimagesGetInternalServerError{} +} + +/*PcloudCloudinstancesStockimagesGetInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudCloudinstancesStockimagesGetInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudCloudinstancesStockimagesGetInternalServerError) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/stock-images/{image_id}][%d] pcloudCloudinstancesStockimagesGetInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudCloudinstancesStockimagesGetInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_images/pcloud_cloudinstances_stockimages_getall_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_images/pcloud_cloudinstances_stockimages_getall_parameters.go new file mode 100644 index 00000000000..ea73e175e52 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_images/pcloud_cloudinstances_stockimages_getall_parameters.go @@ -0,0 +1,169 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_images + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/swag" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewPcloudCloudinstancesStockimagesGetallParams creates a new PcloudCloudinstancesStockimagesGetallParams object +// with the default values initialized. +func NewPcloudCloudinstancesStockimagesGetallParams() *PcloudCloudinstancesStockimagesGetallParams { + var () + return &PcloudCloudinstancesStockimagesGetallParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudCloudinstancesStockimagesGetallParamsWithTimeout creates a new PcloudCloudinstancesStockimagesGetallParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudCloudinstancesStockimagesGetallParamsWithTimeout(timeout time.Duration) *PcloudCloudinstancesStockimagesGetallParams { + var () + return &PcloudCloudinstancesStockimagesGetallParams{ + + timeout: timeout, + } +} + +// NewPcloudCloudinstancesStockimagesGetallParamsWithContext creates a new PcloudCloudinstancesStockimagesGetallParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudCloudinstancesStockimagesGetallParamsWithContext(ctx context.Context) *PcloudCloudinstancesStockimagesGetallParams { + var () + return &PcloudCloudinstancesStockimagesGetallParams{ + + Context: ctx, + } +} + +// NewPcloudCloudinstancesStockimagesGetallParamsWithHTTPClient creates a new PcloudCloudinstancesStockimagesGetallParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudCloudinstancesStockimagesGetallParamsWithHTTPClient(client *http.Client) *PcloudCloudinstancesStockimagesGetallParams { + var () + return &PcloudCloudinstancesStockimagesGetallParams{ + HTTPClient: client, + } +} + +/*PcloudCloudinstancesStockimagesGetallParams contains all the parameters to send to the API endpoint +for the pcloud cloudinstances stockimages getall operation typically these are written to a http.Request +*/ +type PcloudCloudinstancesStockimagesGetallParams struct { + + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + /*Sap + Include SAP images with get available stock images + + */ + Sap *bool + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud cloudinstances stockimages getall params +func (o *PcloudCloudinstancesStockimagesGetallParams) WithTimeout(timeout time.Duration) *PcloudCloudinstancesStockimagesGetallParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud cloudinstances stockimages getall params +func (o *PcloudCloudinstancesStockimagesGetallParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud cloudinstances stockimages getall params +func (o *PcloudCloudinstancesStockimagesGetallParams) WithContext(ctx context.Context) *PcloudCloudinstancesStockimagesGetallParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud cloudinstances stockimages getall params +func (o *PcloudCloudinstancesStockimagesGetallParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud cloudinstances stockimages getall params +func (o *PcloudCloudinstancesStockimagesGetallParams) WithHTTPClient(client *http.Client) *PcloudCloudinstancesStockimagesGetallParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud cloudinstances stockimages getall params +func (o *PcloudCloudinstancesStockimagesGetallParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud cloudinstances stockimages getall params +func (o *PcloudCloudinstancesStockimagesGetallParams) WithCloudInstanceID(cloudInstanceID string) *PcloudCloudinstancesStockimagesGetallParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud cloudinstances stockimages getall params +func (o *PcloudCloudinstancesStockimagesGetallParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WithSap adds the sap to the pcloud cloudinstances stockimages getall params +func (o *PcloudCloudinstancesStockimagesGetallParams) WithSap(sap *bool) *PcloudCloudinstancesStockimagesGetallParams { + o.SetSap(sap) + return o +} + +// SetSap adds the sap to the pcloud cloudinstances stockimages getall params +func (o *PcloudCloudinstancesStockimagesGetallParams) SetSap(sap *bool) { + o.Sap = sap +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudCloudinstancesStockimagesGetallParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + if o.Sap != nil { + + // query param sap + var qrSap bool + if o.Sap != nil { + qrSap = *o.Sap + } + qSap := swag.FormatBool(qrSap) + if qSap != "" { + if err := r.SetQueryParam("sap", qSap); err != nil { + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_images/pcloud_cloudinstances_stockimages_getall_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_images/pcloud_cloudinstances_stockimages_getall_responses.go new file mode 100644 index 00000000000..58dee5af645 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_images/pcloud_cloudinstances_stockimages_getall_responses.go @@ -0,0 +1,175 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_images + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudCloudinstancesStockimagesGetallReader is a Reader for the PcloudCloudinstancesStockimagesGetall structure. +type PcloudCloudinstancesStockimagesGetallReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudCloudinstancesStockimagesGetallReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudCloudinstancesStockimagesGetallOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudCloudinstancesStockimagesGetallBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 404: + result := NewPcloudCloudinstancesStockimagesGetallNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudCloudinstancesStockimagesGetallInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudCloudinstancesStockimagesGetallOK creates a PcloudCloudinstancesStockimagesGetallOK with default headers values +func NewPcloudCloudinstancesStockimagesGetallOK() *PcloudCloudinstancesStockimagesGetallOK { + return &PcloudCloudinstancesStockimagesGetallOK{} +} + +/*PcloudCloudinstancesStockimagesGetallOK handles this case with default header values. + +OK +*/ +type PcloudCloudinstancesStockimagesGetallOK struct { + Payload *models.Images +} + +func (o *PcloudCloudinstancesStockimagesGetallOK) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/stock-images][%d] pcloudCloudinstancesStockimagesGetallOK %+v", 200, o.Payload) +} + +func (o *PcloudCloudinstancesStockimagesGetallOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Images) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudinstancesStockimagesGetallBadRequest creates a PcloudCloudinstancesStockimagesGetallBadRequest with default headers values +func NewPcloudCloudinstancesStockimagesGetallBadRequest() *PcloudCloudinstancesStockimagesGetallBadRequest { + return &PcloudCloudinstancesStockimagesGetallBadRequest{} +} + +/*PcloudCloudinstancesStockimagesGetallBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudCloudinstancesStockimagesGetallBadRequest struct { + Payload *models.Error +} + +func (o *PcloudCloudinstancesStockimagesGetallBadRequest) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/stock-images][%d] pcloudCloudinstancesStockimagesGetallBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudCloudinstancesStockimagesGetallBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudinstancesStockimagesGetallNotFound creates a PcloudCloudinstancesStockimagesGetallNotFound with default headers values +func NewPcloudCloudinstancesStockimagesGetallNotFound() *PcloudCloudinstancesStockimagesGetallNotFound { + return &PcloudCloudinstancesStockimagesGetallNotFound{} +} + +/*PcloudCloudinstancesStockimagesGetallNotFound handles this case with default header values. + +Not Found +*/ +type PcloudCloudinstancesStockimagesGetallNotFound struct { + Payload *models.Error +} + +func (o *PcloudCloudinstancesStockimagesGetallNotFound) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/stock-images][%d] pcloudCloudinstancesStockimagesGetallNotFound %+v", 404, o.Payload) +} + +func (o *PcloudCloudinstancesStockimagesGetallNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudinstancesStockimagesGetallInternalServerError creates a PcloudCloudinstancesStockimagesGetallInternalServerError with default headers values +func NewPcloudCloudinstancesStockimagesGetallInternalServerError() *PcloudCloudinstancesStockimagesGetallInternalServerError { + return &PcloudCloudinstancesStockimagesGetallInternalServerError{} +} + +/*PcloudCloudinstancesStockimagesGetallInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudCloudinstancesStockimagesGetallInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudCloudinstancesStockimagesGetallInternalServerError) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/stock-images][%d] pcloudCloudinstancesStockimagesGetallInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudCloudinstancesStockimagesGetallInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_images/pcloud_images_get_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_images/pcloud_images_get_parameters.go new file mode 100644 index 00000000000..2052bc828ba --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_images/pcloud_images_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_images + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewPcloudImagesGetParams creates a new PcloudImagesGetParams object +// with the default values initialized. +func NewPcloudImagesGetParams() *PcloudImagesGetParams { + var () + return &PcloudImagesGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudImagesGetParamsWithTimeout creates a new PcloudImagesGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudImagesGetParamsWithTimeout(timeout time.Duration) *PcloudImagesGetParams { + var () + return &PcloudImagesGetParams{ + + timeout: timeout, + } +} + +// NewPcloudImagesGetParamsWithContext creates a new PcloudImagesGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudImagesGetParamsWithContext(ctx context.Context) *PcloudImagesGetParams { + var () + return &PcloudImagesGetParams{ + + Context: ctx, + } +} + +// NewPcloudImagesGetParamsWithHTTPClient creates a new PcloudImagesGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudImagesGetParamsWithHTTPClient(client *http.Client) *PcloudImagesGetParams { + var () + return &PcloudImagesGetParams{ + HTTPClient: client, + } +} + +/*PcloudImagesGetParams contains all the parameters to send to the API endpoint +for the pcloud images get operation typically these are written to a http.Request +*/ +type PcloudImagesGetParams struct { + + /*ImageID + Image ID of a image + + */ + ImageID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud images get params +func (o *PcloudImagesGetParams) WithTimeout(timeout time.Duration) *PcloudImagesGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud images get params +func (o *PcloudImagesGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud images get params +func (o *PcloudImagesGetParams) WithContext(ctx context.Context) *PcloudImagesGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud images get params +func (o *PcloudImagesGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud images get params +func (o *PcloudImagesGetParams) WithHTTPClient(client *http.Client) *PcloudImagesGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud images get params +func (o *PcloudImagesGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithImageID adds the imageID to the pcloud images get params +func (o *PcloudImagesGetParams) WithImageID(imageID string) *PcloudImagesGetParams { + o.SetImageID(imageID) + return o +} + +// SetImageID adds the imageId to the pcloud images get params +func (o *PcloudImagesGetParams) SetImageID(imageID string) { + o.ImageID = imageID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudImagesGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param image_id + if err := r.SetPathParam("image_id", o.ImageID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_images/pcloud_images_get_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_images/pcloud_images_get_responses.go new file mode 100644 index 00000000000..1e32b1daeb9 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_images/pcloud_images_get_responses.go @@ -0,0 +1,175 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_images + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudImagesGetReader is a Reader for the PcloudImagesGet structure. +type PcloudImagesGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudImagesGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudImagesGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudImagesGetBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 404: + result := NewPcloudImagesGetNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudImagesGetInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudImagesGetOK creates a PcloudImagesGetOK with default headers values +func NewPcloudImagesGetOK() *PcloudImagesGetOK { + return &PcloudImagesGetOK{} +} + +/*PcloudImagesGetOK handles this case with default header values. + +OK +*/ +type PcloudImagesGetOK struct { + Payload *models.Image +} + +func (o *PcloudImagesGetOK) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/images/{image_id}][%d] pcloudImagesGetOK %+v", 200, o.Payload) +} + +func (o *PcloudImagesGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Image) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudImagesGetBadRequest creates a PcloudImagesGetBadRequest with default headers values +func NewPcloudImagesGetBadRequest() *PcloudImagesGetBadRequest { + return &PcloudImagesGetBadRequest{} +} + +/*PcloudImagesGetBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudImagesGetBadRequest struct { + Payload *models.Error +} + +func (o *PcloudImagesGetBadRequest) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/images/{image_id}][%d] pcloudImagesGetBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudImagesGetBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudImagesGetNotFound creates a PcloudImagesGetNotFound with default headers values +func NewPcloudImagesGetNotFound() *PcloudImagesGetNotFound { + return &PcloudImagesGetNotFound{} +} + +/*PcloudImagesGetNotFound handles this case with default header values. + +Not Found +*/ +type PcloudImagesGetNotFound struct { + Payload *models.Error +} + +func (o *PcloudImagesGetNotFound) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/images/{image_id}][%d] pcloudImagesGetNotFound %+v", 404, o.Payload) +} + +func (o *PcloudImagesGetNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudImagesGetInternalServerError creates a PcloudImagesGetInternalServerError with default headers values +func NewPcloudImagesGetInternalServerError() *PcloudImagesGetInternalServerError { + return &PcloudImagesGetInternalServerError{} +} + +/*PcloudImagesGetInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudImagesGetInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudImagesGetInternalServerError) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/images/{image_id}][%d] pcloudImagesGetInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudImagesGetInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_images/pcloud_images_getall_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_images/pcloud_images_getall_parameters.go new file mode 100644 index 00000000000..b9a55a57994 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_images/pcloud_images_getall_parameters.go @@ -0,0 +1,148 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_images + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/swag" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewPcloudImagesGetallParams creates a new PcloudImagesGetallParams object +// with the default values initialized. +func NewPcloudImagesGetallParams() *PcloudImagesGetallParams { + var () + return &PcloudImagesGetallParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudImagesGetallParamsWithTimeout creates a new PcloudImagesGetallParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudImagesGetallParamsWithTimeout(timeout time.Duration) *PcloudImagesGetallParams { + var () + return &PcloudImagesGetallParams{ + + timeout: timeout, + } +} + +// NewPcloudImagesGetallParamsWithContext creates a new PcloudImagesGetallParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudImagesGetallParamsWithContext(ctx context.Context) *PcloudImagesGetallParams { + var () + return &PcloudImagesGetallParams{ + + Context: ctx, + } +} + +// NewPcloudImagesGetallParamsWithHTTPClient creates a new PcloudImagesGetallParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudImagesGetallParamsWithHTTPClient(client *http.Client) *PcloudImagesGetallParams { + var () + return &PcloudImagesGetallParams{ + HTTPClient: client, + } +} + +/*PcloudImagesGetallParams contains all the parameters to send to the API endpoint +for the pcloud images getall operation typically these are written to a http.Request +*/ +type PcloudImagesGetallParams struct { + + /*Sap + Include SAP images with get available stock images + + */ + Sap *bool + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud images getall params +func (o *PcloudImagesGetallParams) WithTimeout(timeout time.Duration) *PcloudImagesGetallParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud images getall params +func (o *PcloudImagesGetallParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud images getall params +func (o *PcloudImagesGetallParams) WithContext(ctx context.Context) *PcloudImagesGetallParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud images getall params +func (o *PcloudImagesGetallParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud images getall params +func (o *PcloudImagesGetallParams) WithHTTPClient(client *http.Client) *PcloudImagesGetallParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud images getall params +func (o *PcloudImagesGetallParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithSap adds the sap to the pcloud images getall params +func (o *PcloudImagesGetallParams) WithSap(sap *bool) *PcloudImagesGetallParams { + o.SetSap(sap) + return o +} + +// SetSap adds the sap to the pcloud images getall params +func (o *PcloudImagesGetallParams) SetSap(sap *bool) { + o.Sap = sap +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudImagesGetallParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Sap != nil { + + // query param sap + var qrSap bool + if o.Sap != nil { + qrSap = *o.Sap + } + qSap := swag.FormatBool(qrSap) + if qSap != "" { + if err := r.SetQueryParam("sap", qSap); err != nil { + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_images/pcloud_images_getall_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_images/pcloud_images_getall_responses.go new file mode 100644 index 00000000000..9b5b595a587 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_images/pcloud_images_getall_responses.go @@ -0,0 +1,175 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_images + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudImagesGetallReader is a Reader for the PcloudImagesGetall structure. +type PcloudImagesGetallReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudImagesGetallReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudImagesGetallOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudImagesGetallBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 404: + result := NewPcloudImagesGetallNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudImagesGetallInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudImagesGetallOK creates a PcloudImagesGetallOK with default headers values +func NewPcloudImagesGetallOK() *PcloudImagesGetallOK { + return &PcloudImagesGetallOK{} +} + +/*PcloudImagesGetallOK handles this case with default header values. + +OK +*/ +type PcloudImagesGetallOK struct { + Payload *models.Images +} + +func (o *PcloudImagesGetallOK) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/images][%d] pcloudImagesGetallOK %+v", 200, o.Payload) +} + +func (o *PcloudImagesGetallOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Images) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudImagesGetallBadRequest creates a PcloudImagesGetallBadRequest with default headers values +func NewPcloudImagesGetallBadRequest() *PcloudImagesGetallBadRequest { + return &PcloudImagesGetallBadRequest{} +} + +/*PcloudImagesGetallBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudImagesGetallBadRequest struct { + Payload *models.Error +} + +func (o *PcloudImagesGetallBadRequest) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/images][%d] pcloudImagesGetallBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudImagesGetallBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudImagesGetallNotFound creates a PcloudImagesGetallNotFound with default headers values +func NewPcloudImagesGetallNotFound() *PcloudImagesGetallNotFound { + return &PcloudImagesGetallNotFound{} +} + +/*PcloudImagesGetallNotFound handles this case with default header values. + +Not Found +*/ +type PcloudImagesGetallNotFound struct { + Payload *models.Error +} + +func (o *PcloudImagesGetallNotFound) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/images][%d] pcloudImagesGetallNotFound %+v", 404, o.Payload) +} + +func (o *PcloudImagesGetallNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudImagesGetallInternalServerError creates a PcloudImagesGetallInternalServerError with default headers values +func NewPcloudImagesGetallInternalServerError() *PcloudImagesGetallInternalServerError { + return &PcloudImagesGetallInternalServerError{} +} + +/*PcloudImagesGetallInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudImagesGetallInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudImagesGetallInternalServerError) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/images][%d] pcloudImagesGetallInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudImagesGetallInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_instances/p_cloud_instances_client.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_instances/p_cloud_instances_client.go new file mode 100644 index 00000000000..512cca71e88 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_instances/p_cloud_instances_client.go @@ -0,0 +1,117 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_instances + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" +) + +// New creates a new p cloud instances API client. +func New(transport runtime.ClientTransport, formats strfmt.Registry) *Client { + return &Client{transport: transport, formats: formats} +} + +/* +Client for p cloud instances API +*/ +type Client struct { + transport runtime.ClientTransport + formats strfmt.Registry +} + +/* +PcloudCloudinstancesDelete deletes a power cloud instance +*/ +func (a *Client) PcloudCloudinstancesDelete(params *PcloudCloudinstancesDeleteParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudCloudinstancesDeleteOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudCloudinstancesDeleteParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.cloudinstances.delete", + Method: "DELETE", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudCloudinstancesDeleteReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudCloudinstancesDeleteOK), nil + +} + +/* +PcloudCloudinstancesGet gets a cloud instance s current state information +*/ +func (a *Client) PcloudCloudinstancesGet(params *PcloudCloudinstancesGetParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudCloudinstancesGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudCloudinstancesGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.cloudinstances.get", + Method: "GET", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudCloudinstancesGetReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudCloudinstancesGetOK), nil + +} + +/* +PcloudCloudinstancesPut updates upgrade a cloud instance +*/ +func (a *Client) PcloudCloudinstancesPut(params *PcloudCloudinstancesPutParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudCloudinstancesPutOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudCloudinstancesPutParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.cloudinstances.put", + Method: "PUT", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudCloudinstancesPutReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudCloudinstancesPutOK), nil + +} + +// SetTransport changes the transport on the client +func (a *Client) SetTransport(transport runtime.ClientTransport) { + a.transport = transport +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_instances/pcloud_cloudinstances_delete_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_instances/pcloud_cloudinstances_delete_parameters.go new file mode 100644 index 00000000000..a55acf93e26 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_instances/pcloud_cloudinstances_delete_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_instances + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewPcloudCloudinstancesDeleteParams creates a new PcloudCloudinstancesDeleteParams object +// with the default values initialized. +func NewPcloudCloudinstancesDeleteParams() *PcloudCloudinstancesDeleteParams { + var () + return &PcloudCloudinstancesDeleteParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudCloudinstancesDeleteParamsWithTimeout creates a new PcloudCloudinstancesDeleteParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudCloudinstancesDeleteParamsWithTimeout(timeout time.Duration) *PcloudCloudinstancesDeleteParams { + var () + return &PcloudCloudinstancesDeleteParams{ + + timeout: timeout, + } +} + +// NewPcloudCloudinstancesDeleteParamsWithContext creates a new PcloudCloudinstancesDeleteParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudCloudinstancesDeleteParamsWithContext(ctx context.Context) *PcloudCloudinstancesDeleteParams { + var () + return &PcloudCloudinstancesDeleteParams{ + + Context: ctx, + } +} + +// NewPcloudCloudinstancesDeleteParamsWithHTTPClient creates a new PcloudCloudinstancesDeleteParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudCloudinstancesDeleteParamsWithHTTPClient(client *http.Client) *PcloudCloudinstancesDeleteParams { + var () + return &PcloudCloudinstancesDeleteParams{ + HTTPClient: client, + } +} + +/*PcloudCloudinstancesDeleteParams contains all the parameters to send to the API endpoint +for the pcloud cloudinstances delete operation typically these are written to a http.Request +*/ +type PcloudCloudinstancesDeleteParams struct { + + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud cloudinstances delete params +func (o *PcloudCloudinstancesDeleteParams) WithTimeout(timeout time.Duration) *PcloudCloudinstancesDeleteParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud cloudinstances delete params +func (o *PcloudCloudinstancesDeleteParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud cloudinstances delete params +func (o *PcloudCloudinstancesDeleteParams) WithContext(ctx context.Context) *PcloudCloudinstancesDeleteParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud cloudinstances delete params +func (o *PcloudCloudinstancesDeleteParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud cloudinstances delete params +func (o *PcloudCloudinstancesDeleteParams) WithHTTPClient(client *http.Client) *PcloudCloudinstancesDeleteParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud cloudinstances delete params +func (o *PcloudCloudinstancesDeleteParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud cloudinstances delete params +func (o *PcloudCloudinstancesDeleteParams) WithCloudInstanceID(cloudInstanceID string) *PcloudCloudinstancesDeleteParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud cloudinstances delete params +func (o *PcloudCloudinstancesDeleteParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudCloudinstancesDeleteParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_instances/pcloud_cloudinstances_delete_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_instances/pcloud_cloudinstances_delete_responses.go new file mode 100644 index 00000000000..83d4b62ad5b --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_instances/pcloud_cloudinstances_delete_responses.go @@ -0,0 +1,173 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_instances + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudCloudinstancesDeleteReader is a Reader for the PcloudCloudinstancesDelete structure. +type PcloudCloudinstancesDeleteReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudCloudinstancesDeleteReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudCloudinstancesDeleteOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudCloudinstancesDeleteBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 410: + result := NewPcloudCloudinstancesDeleteGone() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudCloudinstancesDeleteInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudCloudinstancesDeleteOK creates a PcloudCloudinstancesDeleteOK with default headers values +func NewPcloudCloudinstancesDeleteOK() *PcloudCloudinstancesDeleteOK { + return &PcloudCloudinstancesDeleteOK{} +} + +/*PcloudCloudinstancesDeleteOK handles this case with default header values. + +OK +*/ +type PcloudCloudinstancesDeleteOK struct { + Payload models.Object +} + +func (o *PcloudCloudinstancesDeleteOK) Error() string { + return fmt.Sprintf("[DELETE /pcloud/v1/cloud-instances/{cloud_instance_id}][%d] pcloudCloudinstancesDeleteOK %+v", 200, o.Payload) +} + +func (o *PcloudCloudinstancesDeleteOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudinstancesDeleteBadRequest creates a PcloudCloudinstancesDeleteBadRequest with default headers values +func NewPcloudCloudinstancesDeleteBadRequest() *PcloudCloudinstancesDeleteBadRequest { + return &PcloudCloudinstancesDeleteBadRequest{} +} + +/*PcloudCloudinstancesDeleteBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudCloudinstancesDeleteBadRequest struct { + Payload *models.Error +} + +func (o *PcloudCloudinstancesDeleteBadRequest) Error() string { + return fmt.Sprintf("[DELETE /pcloud/v1/cloud-instances/{cloud_instance_id}][%d] pcloudCloudinstancesDeleteBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudCloudinstancesDeleteBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudinstancesDeleteGone creates a PcloudCloudinstancesDeleteGone with default headers values +func NewPcloudCloudinstancesDeleteGone() *PcloudCloudinstancesDeleteGone { + return &PcloudCloudinstancesDeleteGone{} +} + +/*PcloudCloudinstancesDeleteGone handles this case with default header values. + +Gone +*/ +type PcloudCloudinstancesDeleteGone struct { + Payload *models.Error +} + +func (o *PcloudCloudinstancesDeleteGone) Error() string { + return fmt.Sprintf("[DELETE /pcloud/v1/cloud-instances/{cloud_instance_id}][%d] pcloudCloudinstancesDeleteGone %+v", 410, o.Payload) +} + +func (o *PcloudCloudinstancesDeleteGone) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudinstancesDeleteInternalServerError creates a PcloudCloudinstancesDeleteInternalServerError with default headers values +func NewPcloudCloudinstancesDeleteInternalServerError() *PcloudCloudinstancesDeleteInternalServerError { + return &PcloudCloudinstancesDeleteInternalServerError{} +} + +/*PcloudCloudinstancesDeleteInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudCloudinstancesDeleteInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudCloudinstancesDeleteInternalServerError) Error() string { + return fmt.Sprintf("[DELETE /pcloud/v1/cloud-instances/{cloud_instance_id}][%d] pcloudCloudinstancesDeleteInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudCloudinstancesDeleteInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_instances/pcloud_cloudinstances_get_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_instances/pcloud_cloudinstances_get_parameters.go new file mode 100644 index 00000000000..2ee7da64cde --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_instances/pcloud_cloudinstances_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_instances + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewPcloudCloudinstancesGetParams creates a new PcloudCloudinstancesGetParams object +// with the default values initialized. +func NewPcloudCloudinstancesGetParams() *PcloudCloudinstancesGetParams { + var () + return &PcloudCloudinstancesGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudCloudinstancesGetParamsWithTimeout creates a new PcloudCloudinstancesGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudCloudinstancesGetParamsWithTimeout(timeout time.Duration) *PcloudCloudinstancesGetParams { + var () + return &PcloudCloudinstancesGetParams{ + + timeout: timeout, + } +} + +// NewPcloudCloudinstancesGetParamsWithContext creates a new PcloudCloudinstancesGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudCloudinstancesGetParamsWithContext(ctx context.Context) *PcloudCloudinstancesGetParams { + var () + return &PcloudCloudinstancesGetParams{ + + Context: ctx, + } +} + +// NewPcloudCloudinstancesGetParamsWithHTTPClient creates a new PcloudCloudinstancesGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudCloudinstancesGetParamsWithHTTPClient(client *http.Client) *PcloudCloudinstancesGetParams { + var () + return &PcloudCloudinstancesGetParams{ + HTTPClient: client, + } +} + +/*PcloudCloudinstancesGetParams contains all the parameters to send to the API endpoint +for the pcloud cloudinstances get operation typically these are written to a http.Request +*/ +type PcloudCloudinstancesGetParams struct { + + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud cloudinstances get params +func (o *PcloudCloudinstancesGetParams) WithTimeout(timeout time.Duration) *PcloudCloudinstancesGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud cloudinstances get params +func (o *PcloudCloudinstancesGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud cloudinstances get params +func (o *PcloudCloudinstancesGetParams) WithContext(ctx context.Context) *PcloudCloudinstancesGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud cloudinstances get params +func (o *PcloudCloudinstancesGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud cloudinstances get params +func (o *PcloudCloudinstancesGetParams) WithHTTPClient(client *http.Client) *PcloudCloudinstancesGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud cloudinstances get params +func (o *PcloudCloudinstancesGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud cloudinstances get params +func (o *PcloudCloudinstancesGetParams) WithCloudInstanceID(cloudInstanceID string) *PcloudCloudinstancesGetParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud cloudinstances get params +func (o *PcloudCloudinstancesGetParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudCloudinstancesGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_instances/pcloud_cloudinstances_get_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_instances/pcloud_cloudinstances_get_responses.go new file mode 100644 index 00000000000..a485801dfcb --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_instances/pcloud_cloudinstances_get_responses.go @@ -0,0 +1,175 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_instances + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudCloudinstancesGetReader is a Reader for the PcloudCloudinstancesGet structure. +type PcloudCloudinstancesGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudCloudinstancesGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudCloudinstancesGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudCloudinstancesGetBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 404: + result := NewPcloudCloudinstancesGetNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudCloudinstancesGetInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudCloudinstancesGetOK creates a PcloudCloudinstancesGetOK with default headers values +func NewPcloudCloudinstancesGetOK() *PcloudCloudinstancesGetOK { + return &PcloudCloudinstancesGetOK{} +} + +/*PcloudCloudinstancesGetOK handles this case with default header values. + +OK +*/ +type PcloudCloudinstancesGetOK struct { + Payload *models.CloudInstance +} + +func (o *PcloudCloudinstancesGetOK) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}][%d] pcloudCloudinstancesGetOK %+v", 200, o.Payload) +} + +func (o *PcloudCloudinstancesGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.CloudInstance) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudinstancesGetBadRequest creates a PcloudCloudinstancesGetBadRequest with default headers values +func NewPcloudCloudinstancesGetBadRequest() *PcloudCloudinstancesGetBadRequest { + return &PcloudCloudinstancesGetBadRequest{} +} + +/*PcloudCloudinstancesGetBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudCloudinstancesGetBadRequest struct { + Payload *models.Error +} + +func (o *PcloudCloudinstancesGetBadRequest) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}][%d] pcloudCloudinstancesGetBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudCloudinstancesGetBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudinstancesGetNotFound creates a PcloudCloudinstancesGetNotFound with default headers values +func NewPcloudCloudinstancesGetNotFound() *PcloudCloudinstancesGetNotFound { + return &PcloudCloudinstancesGetNotFound{} +} + +/*PcloudCloudinstancesGetNotFound handles this case with default header values. + +Not Found +*/ +type PcloudCloudinstancesGetNotFound struct { + Payload *models.Error +} + +func (o *PcloudCloudinstancesGetNotFound) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}][%d] pcloudCloudinstancesGetNotFound %+v", 404, o.Payload) +} + +func (o *PcloudCloudinstancesGetNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudinstancesGetInternalServerError creates a PcloudCloudinstancesGetInternalServerError with default headers values +func NewPcloudCloudinstancesGetInternalServerError() *PcloudCloudinstancesGetInternalServerError { + return &PcloudCloudinstancesGetInternalServerError{} +} + +/*PcloudCloudinstancesGetInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudCloudinstancesGetInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudCloudinstancesGetInternalServerError) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}][%d] pcloudCloudinstancesGetInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudCloudinstancesGetInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_instances/pcloud_cloudinstances_put_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_instances/pcloud_cloudinstances_put_parameters.go new file mode 100644 index 00000000000..04a998b458c --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_instances/pcloud_cloudinstances_put_parameters.go @@ -0,0 +1,160 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_instances + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// NewPcloudCloudinstancesPutParams creates a new PcloudCloudinstancesPutParams object +// with the default values initialized. +func NewPcloudCloudinstancesPutParams() *PcloudCloudinstancesPutParams { + var () + return &PcloudCloudinstancesPutParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudCloudinstancesPutParamsWithTimeout creates a new PcloudCloudinstancesPutParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudCloudinstancesPutParamsWithTimeout(timeout time.Duration) *PcloudCloudinstancesPutParams { + var () + return &PcloudCloudinstancesPutParams{ + + timeout: timeout, + } +} + +// NewPcloudCloudinstancesPutParamsWithContext creates a new PcloudCloudinstancesPutParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudCloudinstancesPutParamsWithContext(ctx context.Context) *PcloudCloudinstancesPutParams { + var () + return &PcloudCloudinstancesPutParams{ + + Context: ctx, + } +} + +// NewPcloudCloudinstancesPutParamsWithHTTPClient creates a new PcloudCloudinstancesPutParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudCloudinstancesPutParamsWithHTTPClient(client *http.Client) *PcloudCloudinstancesPutParams { + var () + return &PcloudCloudinstancesPutParams{ + HTTPClient: client, + } +} + +/*PcloudCloudinstancesPutParams contains all the parameters to send to the API endpoint +for the pcloud cloudinstances put operation typically these are written to a http.Request +*/ +type PcloudCloudinstancesPutParams struct { + + /*Body + Parameters for updating a Power Cloud Instance + + */ + Body *models.CloudInstanceUpdate + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud cloudinstances put params +func (o *PcloudCloudinstancesPutParams) WithTimeout(timeout time.Duration) *PcloudCloudinstancesPutParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud cloudinstances put params +func (o *PcloudCloudinstancesPutParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud cloudinstances put params +func (o *PcloudCloudinstancesPutParams) WithContext(ctx context.Context) *PcloudCloudinstancesPutParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud cloudinstances put params +func (o *PcloudCloudinstancesPutParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud cloudinstances put params +func (o *PcloudCloudinstancesPutParams) WithHTTPClient(client *http.Client) *PcloudCloudinstancesPutParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud cloudinstances put params +func (o *PcloudCloudinstancesPutParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the pcloud cloudinstances put params +func (o *PcloudCloudinstancesPutParams) WithBody(body *models.CloudInstanceUpdate) *PcloudCloudinstancesPutParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the pcloud cloudinstances put params +func (o *PcloudCloudinstancesPutParams) SetBody(body *models.CloudInstanceUpdate) { + o.Body = body +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud cloudinstances put params +func (o *PcloudCloudinstancesPutParams) WithCloudInstanceID(cloudInstanceID string) *PcloudCloudinstancesPutParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud cloudinstances put params +func (o *PcloudCloudinstancesPutParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudCloudinstancesPutParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_instances/pcloud_cloudinstances_put_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_instances/pcloud_cloudinstances_put_responses.go new file mode 100644 index 00000000000..bbe76c87699 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_instances/pcloud_cloudinstances_put_responses.go @@ -0,0 +1,175 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_instances + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudCloudinstancesPutReader is a Reader for the PcloudCloudinstancesPut structure. +type PcloudCloudinstancesPutReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudCloudinstancesPutReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudCloudinstancesPutOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudCloudinstancesPutBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 422: + result := NewPcloudCloudinstancesPutUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudCloudinstancesPutInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudCloudinstancesPutOK creates a PcloudCloudinstancesPutOK with default headers values +func NewPcloudCloudinstancesPutOK() *PcloudCloudinstancesPutOK { + return &PcloudCloudinstancesPutOK{} +} + +/*PcloudCloudinstancesPutOK handles this case with default header values. + +OK +*/ +type PcloudCloudinstancesPutOK struct { + Payload *models.CloudInstance +} + +func (o *PcloudCloudinstancesPutOK) Error() string { + return fmt.Sprintf("[PUT /pcloud/v1/cloud-instances/{cloud_instance_id}][%d] pcloudCloudinstancesPutOK %+v", 200, o.Payload) +} + +func (o *PcloudCloudinstancesPutOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.CloudInstance) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudinstancesPutBadRequest creates a PcloudCloudinstancesPutBadRequest with default headers values +func NewPcloudCloudinstancesPutBadRequest() *PcloudCloudinstancesPutBadRequest { + return &PcloudCloudinstancesPutBadRequest{} +} + +/*PcloudCloudinstancesPutBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudCloudinstancesPutBadRequest struct { + Payload *models.Error +} + +func (o *PcloudCloudinstancesPutBadRequest) Error() string { + return fmt.Sprintf("[PUT /pcloud/v1/cloud-instances/{cloud_instance_id}][%d] pcloudCloudinstancesPutBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudCloudinstancesPutBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudinstancesPutUnprocessableEntity creates a PcloudCloudinstancesPutUnprocessableEntity with default headers values +func NewPcloudCloudinstancesPutUnprocessableEntity() *PcloudCloudinstancesPutUnprocessableEntity { + return &PcloudCloudinstancesPutUnprocessableEntity{} +} + +/*PcloudCloudinstancesPutUnprocessableEntity handles this case with default header values. + +Unprocessable Entity +*/ +type PcloudCloudinstancesPutUnprocessableEntity struct { + Payload *models.Error +} + +func (o *PcloudCloudinstancesPutUnprocessableEntity) Error() string { + return fmt.Sprintf("[PUT /pcloud/v1/cloud-instances/{cloud_instance_id}][%d] pcloudCloudinstancesPutUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *PcloudCloudinstancesPutUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudinstancesPutInternalServerError creates a PcloudCloudinstancesPutInternalServerError with default headers values +func NewPcloudCloudinstancesPutInternalServerError() *PcloudCloudinstancesPutInternalServerError { + return &PcloudCloudinstancesPutInternalServerError{} +} + +/*PcloudCloudinstancesPutInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudCloudinstancesPutInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudCloudinstancesPutInternalServerError) Error() string { + return fmt.Sprintf("[PUT /pcloud/v1/cloud-instances/{cloud_instance_id}][%d] pcloudCloudinstancesPutInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudCloudinstancesPutInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks/p_cloud_networks_client.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks/p_cloud_networks_client.go new file mode 100644 index 00000000000..a264c421698 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks/p_cloud_networks_client.go @@ -0,0 +1,326 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_networks + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" +) + +// New creates a new p cloud networks API client. +func New(transport runtime.ClientTransport, formats strfmt.Registry) *Client { + return &Client{transport: transport, formats: formats} +} + +/* +Client for p cloud networks API +*/ +type Client struct { + transport runtime.ClientTransport + formats strfmt.Registry +} + +/* +PcloudNetworksDelete deletes a network +*/ +func (a *Client) PcloudNetworksDelete(params *PcloudNetworksDeleteParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudNetworksDeleteOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudNetworksDeleteParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.networks.delete", + Method: "DELETE", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/networks/{network_id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudNetworksDeleteReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudNetworksDeleteOK), nil + +} + +/* +PcloudNetworksGet gets a network s current state information +*/ +func (a *Client) PcloudNetworksGet(params *PcloudNetworksGetParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudNetworksGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudNetworksGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.networks.get", + Method: "GET", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/networks/{network_id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudNetworksGetReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudNetworksGetOK), nil + +} + +/* +PcloudNetworksGetall gets all networks in this cloud instance +*/ +func (a *Client) PcloudNetworksGetall(params *PcloudNetworksGetallParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudNetworksGetallOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudNetworksGetallParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.networks.getall", + Method: "GET", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/networks", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudNetworksGetallReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudNetworksGetallOK), nil + +} + +/* +PcloudNetworksPortsDelete deletes a network port +*/ +func (a *Client) PcloudNetworksPortsDelete(params *PcloudNetworksPortsDeleteParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudNetworksPortsDeleteOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudNetworksPortsDeleteParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.networks.ports.delete", + Method: "DELETE", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/networks/{network_id}/ports/{port_id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudNetworksPortsDeleteReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudNetworksPortsDeleteOK), nil + +} + +/* +PcloudNetworksPortsGet gets a port s information +*/ +func (a *Client) PcloudNetworksPortsGet(params *PcloudNetworksPortsGetParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudNetworksPortsGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudNetworksPortsGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.networks.ports.get", + Method: "GET", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/networks/{network_id}/ports/{port_id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudNetworksPortsGetReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudNetworksPortsGetOK), nil + +} + +/* +PcloudNetworksPortsGetall gets all ports for this network +*/ +func (a *Client) PcloudNetworksPortsGetall(params *PcloudNetworksPortsGetallParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudNetworksPortsGetallOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudNetworksPortsGetallParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.networks.ports.getall", + Method: "GET", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/networks/{network_id}/ports", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudNetworksPortsGetallReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudNetworksPortsGetallOK), nil + +} + +/* +PcloudNetworksPortsPost performs port addition deletion and listing +*/ +func (a *Client) PcloudNetworksPortsPost(params *PcloudNetworksPortsPostParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudNetworksPortsPostCreated, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudNetworksPortsPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.networks.ports.post", + Method: "POST", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/networks/{network_id}/ports", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudNetworksPortsPostReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudNetworksPortsPostCreated), nil + +} + +/* +PcloudNetworksPortsPut updates a port s information +*/ +func (a *Client) PcloudNetworksPortsPut(params *PcloudNetworksPortsPutParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudNetworksPortsPutOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudNetworksPortsPutParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.networks.ports.put", + Method: "PUT", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/networks/{network_id}/ports/{port_id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudNetworksPortsPutReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudNetworksPortsPutOK), nil + +} + +/* +PcloudNetworksPost creates a new network +*/ +func (a *Client) PcloudNetworksPost(params *PcloudNetworksPostParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudNetworksPostOK, *PcloudNetworksPostCreated, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudNetworksPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.networks.post", + Method: "POST", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/networks", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudNetworksPostReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, nil, err + } + switch value := result.(type) { + case *PcloudNetworksPostOK: + return value, nil, nil + case *PcloudNetworksPostCreated: + return nil, value, nil + } + return nil, nil, nil + +} + +/* +PcloudNetworksPut updates a network +*/ +func (a *Client) PcloudNetworksPut(params *PcloudNetworksPutParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudNetworksPutOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudNetworksPutParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.networks.put", + Method: "PUT", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/networks/{network_id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudNetworksPutReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudNetworksPutOK), nil + +} + +// SetTransport changes the transport on the client +func (a *Client) SetTransport(transport runtime.ClientTransport) { + a.transport = transport +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks/pcloud_networks_delete_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks/pcloud_networks_delete_parameters.go new file mode 100644 index 00000000000..c823a42771c --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks/pcloud_networks_delete_parameters.go @@ -0,0 +1,157 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_networks + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewPcloudNetworksDeleteParams creates a new PcloudNetworksDeleteParams object +// with the default values initialized. +func NewPcloudNetworksDeleteParams() *PcloudNetworksDeleteParams { + var () + return &PcloudNetworksDeleteParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudNetworksDeleteParamsWithTimeout creates a new PcloudNetworksDeleteParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudNetworksDeleteParamsWithTimeout(timeout time.Duration) *PcloudNetworksDeleteParams { + var () + return &PcloudNetworksDeleteParams{ + + timeout: timeout, + } +} + +// NewPcloudNetworksDeleteParamsWithContext creates a new PcloudNetworksDeleteParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudNetworksDeleteParamsWithContext(ctx context.Context) *PcloudNetworksDeleteParams { + var () + return &PcloudNetworksDeleteParams{ + + Context: ctx, + } +} + +// NewPcloudNetworksDeleteParamsWithHTTPClient creates a new PcloudNetworksDeleteParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudNetworksDeleteParamsWithHTTPClient(client *http.Client) *PcloudNetworksDeleteParams { + var () + return &PcloudNetworksDeleteParams{ + HTTPClient: client, + } +} + +/*PcloudNetworksDeleteParams contains all the parameters to send to the API endpoint +for the pcloud networks delete operation typically these are written to a http.Request +*/ +type PcloudNetworksDeleteParams struct { + + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + /*NetworkID + Network ID + + */ + NetworkID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud networks delete params +func (o *PcloudNetworksDeleteParams) WithTimeout(timeout time.Duration) *PcloudNetworksDeleteParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud networks delete params +func (o *PcloudNetworksDeleteParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud networks delete params +func (o *PcloudNetworksDeleteParams) WithContext(ctx context.Context) *PcloudNetworksDeleteParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud networks delete params +func (o *PcloudNetworksDeleteParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud networks delete params +func (o *PcloudNetworksDeleteParams) WithHTTPClient(client *http.Client) *PcloudNetworksDeleteParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud networks delete params +func (o *PcloudNetworksDeleteParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud networks delete params +func (o *PcloudNetworksDeleteParams) WithCloudInstanceID(cloudInstanceID string) *PcloudNetworksDeleteParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud networks delete params +func (o *PcloudNetworksDeleteParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WithNetworkID adds the networkID to the pcloud networks delete params +func (o *PcloudNetworksDeleteParams) WithNetworkID(networkID string) *PcloudNetworksDeleteParams { + o.SetNetworkID(networkID) + return o +} + +// SetNetworkID adds the networkId to the pcloud networks delete params +func (o *PcloudNetworksDeleteParams) SetNetworkID(networkID string) { + o.NetworkID = networkID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudNetworksDeleteParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + // path param network_id + if err := r.SetPathParam("network_id", o.NetworkID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks/pcloud_networks_delete_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks/pcloud_networks_delete_responses.go new file mode 100644 index 00000000000..cdd74b11a60 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks/pcloud_networks_delete_responses.go @@ -0,0 +1,173 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_networks + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudNetworksDeleteReader is a Reader for the PcloudNetworksDelete structure. +type PcloudNetworksDeleteReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudNetworksDeleteReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudNetworksDeleteOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudNetworksDeleteBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 410: + result := NewPcloudNetworksDeleteGone() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudNetworksDeleteInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudNetworksDeleteOK creates a PcloudNetworksDeleteOK with default headers values +func NewPcloudNetworksDeleteOK() *PcloudNetworksDeleteOK { + return &PcloudNetworksDeleteOK{} +} + +/*PcloudNetworksDeleteOK handles this case with default header values. + +OK +*/ +type PcloudNetworksDeleteOK struct { + Payload models.Object +} + +func (o *PcloudNetworksDeleteOK) Error() string { + return fmt.Sprintf("[DELETE /pcloud/v1/cloud-instances/{cloud_instance_id}/networks/{network_id}][%d] pcloudNetworksDeleteOK %+v", 200, o.Payload) +} + +func (o *PcloudNetworksDeleteOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudNetworksDeleteBadRequest creates a PcloudNetworksDeleteBadRequest with default headers values +func NewPcloudNetworksDeleteBadRequest() *PcloudNetworksDeleteBadRequest { + return &PcloudNetworksDeleteBadRequest{} +} + +/*PcloudNetworksDeleteBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudNetworksDeleteBadRequest struct { + Payload *models.Error +} + +func (o *PcloudNetworksDeleteBadRequest) Error() string { + return fmt.Sprintf("[DELETE /pcloud/v1/cloud-instances/{cloud_instance_id}/networks/{network_id}][%d] pcloudNetworksDeleteBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudNetworksDeleteBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudNetworksDeleteGone creates a PcloudNetworksDeleteGone with default headers values +func NewPcloudNetworksDeleteGone() *PcloudNetworksDeleteGone { + return &PcloudNetworksDeleteGone{} +} + +/*PcloudNetworksDeleteGone handles this case with default header values. + +Gone +*/ +type PcloudNetworksDeleteGone struct { + Payload *models.Error +} + +func (o *PcloudNetworksDeleteGone) Error() string { + return fmt.Sprintf("[DELETE /pcloud/v1/cloud-instances/{cloud_instance_id}/networks/{network_id}][%d] pcloudNetworksDeleteGone %+v", 410, o.Payload) +} + +func (o *PcloudNetworksDeleteGone) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudNetworksDeleteInternalServerError creates a PcloudNetworksDeleteInternalServerError with default headers values +func NewPcloudNetworksDeleteInternalServerError() *PcloudNetworksDeleteInternalServerError { + return &PcloudNetworksDeleteInternalServerError{} +} + +/*PcloudNetworksDeleteInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudNetworksDeleteInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudNetworksDeleteInternalServerError) Error() string { + return fmt.Sprintf("[DELETE /pcloud/v1/cloud-instances/{cloud_instance_id}/networks/{network_id}][%d] pcloudNetworksDeleteInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudNetworksDeleteInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks/pcloud_networks_get_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks/pcloud_networks_get_parameters.go new file mode 100644 index 00000000000..ff53360917e --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks/pcloud_networks_get_parameters.go @@ -0,0 +1,157 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_networks + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewPcloudNetworksGetParams creates a new PcloudNetworksGetParams object +// with the default values initialized. +func NewPcloudNetworksGetParams() *PcloudNetworksGetParams { + var () + return &PcloudNetworksGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudNetworksGetParamsWithTimeout creates a new PcloudNetworksGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudNetworksGetParamsWithTimeout(timeout time.Duration) *PcloudNetworksGetParams { + var () + return &PcloudNetworksGetParams{ + + timeout: timeout, + } +} + +// NewPcloudNetworksGetParamsWithContext creates a new PcloudNetworksGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudNetworksGetParamsWithContext(ctx context.Context) *PcloudNetworksGetParams { + var () + return &PcloudNetworksGetParams{ + + Context: ctx, + } +} + +// NewPcloudNetworksGetParamsWithHTTPClient creates a new PcloudNetworksGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudNetworksGetParamsWithHTTPClient(client *http.Client) *PcloudNetworksGetParams { + var () + return &PcloudNetworksGetParams{ + HTTPClient: client, + } +} + +/*PcloudNetworksGetParams contains all the parameters to send to the API endpoint +for the pcloud networks get operation typically these are written to a http.Request +*/ +type PcloudNetworksGetParams struct { + + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + /*NetworkID + Network ID + + */ + NetworkID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud networks get params +func (o *PcloudNetworksGetParams) WithTimeout(timeout time.Duration) *PcloudNetworksGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud networks get params +func (o *PcloudNetworksGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud networks get params +func (o *PcloudNetworksGetParams) WithContext(ctx context.Context) *PcloudNetworksGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud networks get params +func (o *PcloudNetworksGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud networks get params +func (o *PcloudNetworksGetParams) WithHTTPClient(client *http.Client) *PcloudNetworksGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud networks get params +func (o *PcloudNetworksGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud networks get params +func (o *PcloudNetworksGetParams) WithCloudInstanceID(cloudInstanceID string) *PcloudNetworksGetParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud networks get params +func (o *PcloudNetworksGetParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WithNetworkID adds the networkID to the pcloud networks get params +func (o *PcloudNetworksGetParams) WithNetworkID(networkID string) *PcloudNetworksGetParams { + o.SetNetworkID(networkID) + return o +} + +// SetNetworkID adds the networkId to the pcloud networks get params +func (o *PcloudNetworksGetParams) SetNetworkID(networkID string) { + o.NetworkID = networkID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudNetworksGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + // path param network_id + if err := r.SetPathParam("network_id", o.NetworkID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks/pcloud_networks_get_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks/pcloud_networks_get_responses.go new file mode 100644 index 00000000000..c7e324eefe4 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks/pcloud_networks_get_responses.go @@ -0,0 +1,175 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_networks + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudNetworksGetReader is a Reader for the PcloudNetworksGet structure. +type PcloudNetworksGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudNetworksGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudNetworksGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudNetworksGetBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 404: + result := NewPcloudNetworksGetNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudNetworksGetInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudNetworksGetOK creates a PcloudNetworksGetOK with default headers values +func NewPcloudNetworksGetOK() *PcloudNetworksGetOK { + return &PcloudNetworksGetOK{} +} + +/*PcloudNetworksGetOK handles this case with default header values. + +OK +*/ +type PcloudNetworksGetOK struct { + Payload *models.Network +} + +func (o *PcloudNetworksGetOK) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/networks/{network_id}][%d] pcloudNetworksGetOK %+v", 200, o.Payload) +} + +func (o *PcloudNetworksGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Network) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudNetworksGetBadRequest creates a PcloudNetworksGetBadRequest with default headers values +func NewPcloudNetworksGetBadRequest() *PcloudNetworksGetBadRequest { + return &PcloudNetworksGetBadRequest{} +} + +/*PcloudNetworksGetBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudNetworksGetBadRequest struct { + Payload *models.Error +} + +func (o *PcloudNetworksGetBadRequest) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/networks/{network_id}][%d] pcloudNetworksGetBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudNetworksGetBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudNetworksGetNotFound creates a PcloudNetworksGetNotFound with default headers values +func NewPcloudNetworksGetNotFound() *PcloudNetworksGetNotFound { + return &PcloudNetworksGetNotFound{} +} + +/*PcloudNetworksGetNotFound handles this case with default header values. + +Not Found +*/ +type PcloudNetworksGetNotFound struct { + Payload *models.Error +} + +func (o *PcloudNetworksGetNotFound) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/networks/{network_id}][%d] pcloudNetworksGetNotFound %+v", 404, o.Payload) +} + +func (o *PcloudNetworksGetNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudNetworksGetInternalServerError creates a PcloudNetworksGetInternalServerError with default headers values +func NewPcloudNetworksGetInternalServerError() *PcloudNetworksGetInternalServerError { + return &PcloudNetworksGetInternalServerError{} +} + +/*PcloudNetworksGetInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudNetworksGetInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudNetworksGetInternalServerError) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/networks/{network_id}][%d] pcloudNetworksGetInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudNetworksGetInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks/pcloud_networks_getall_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks/pcloud_networks_getall_parameters.go new file mode 100644 index 00000000000..f7313c8ee49 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks/pcloud_networks_getall_parameters.go @@ -0,0 +1,168 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_networks + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewPcloudNetworksGetallParams creates a new PcloudNetworksGetallParams object +// with the default values initialized. +func NewPcloudNetworksGetallParams() *PcloudNetworksGetallParams { + var () + return &PcloudNetworksGetallParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudNetworksGetallParamsWithTimeout creates a new PcloudNetworksGetallParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudNetworksGetallParamsWithTimeout(timeout time.Duration) *PcloudNetworksGetallParams { + var () + return &PcloudNetworksGetallParams{ + + timeout: timeout, + } +} + +// NewPcloudNetworksGetallParamsWithContext creates a new PcloudNetworksGetallParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudNetworksGetallParamsWithContext(ctx context.Context) *PcloudNetworksGetallParams { + var () + return &PcloudNetworksGetallParams{ + + Context: ctx, + } +} + +// NewPcloudNetworksGetallParamsWithHTTPClient creates a new PcloudNetworksGetallParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudNetworksGetallParamsWithHTTPClient(client *http.Client) *PcloudNetworksGetallParams { + var () + return &PcloudNetworksGetallParams{ + HTTPClient: client, + } +} + +/*PcloudNetworksGetallParams contains all the parameters to send to the API endpoint +for the pcloud networks getall operation typically these are written to a http.Request +*/ +type PcloudNetworksGetallParams struct { + + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + /*Filter + A filter expression that filters resources listed in the response + + */ + Filter *string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud networks getall params +func (o *PcloudNetworksGetallParams) WithTimeout(timeout time.Duration) *PcloudNetworksGetallParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud networks getall params +func (o *PcloudNetworksGetallParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud networks getall params +func (o *PcloudNetworksGetallParams) WithContext(ctx context.Context) *PcloudNetworksGetallParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud networks getall params +func (o *PcloudNetworksGetallParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud networks getall params +func (o *PcloudNetworksGetallParams) WithHTTPClient(client *http.Client) *PcloudNetworksGetallParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud networks getall params +func (o *PcloudNetworksGetallParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud networks getall params +func (o *PcloudNetworksGetallParams) WithCloudInstanceID(cloudInstanceID string) *PcloudNetworksGetallParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud networks getall params +func (o *PcloudNetworksGetallParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WithFilter adds the filter to the pcloud networks getall params +func (o *PcloudNetworksGetallParams) WithFilter(filter *string) *PcloudNetworksGetallParams { + o.SetFilter(filter) + return o +} + +// SetFilter adds the filter to the pcloud networks getall params +func (o *PcloudNetworksGetallParams) SetFilter(filter *string) { + o.Filter = filter +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudNetworksGetallParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + if o.Filter != nil { + + // query param filter + var qrFilter string + if o.Filter != nil { + qrFilter = *o.Filter + } + qFilter := qrFilter + if qFilter != "" { + if err := r.SetQueryParam("filter", qFilter); err != nil { + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks/pcloud_networks_getall_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks/pcloud_networks_getall_responses.go new file mode 100644 index 00000000000..937d60735c1 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks/pcloud_networks_getall_responses.go @@ -0,0 +1,139 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_networks + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudNetworksGetallReader is a Reader for the PcloudNetworksGetall structure. +type PcloudNetworksGetallReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudNetworksGetallReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudNetworksGetallOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudNetworksGetallBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudNetworksGetallInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudNetworksGetallOK creates a PcloudNetworksGetallOK with default headers values +func NewPcloudNetworksGetallOK() *PcloudNetworksGetallOK { + return &PcloudNetworksGetallOK{} +} + +/*PcloudNetworksGetallOK handles this case with default header values. + +OK +*/ +type PcloudNetworksGetallOK struct { + Payload *models.Networks +} + +func (o *PcloudNetworksGetallOK) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/networks][%d] pcloudNetworksGetallOK %+v", 200, o.Payload) +} + +func (o *PcloudNetworksGetallOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Networks) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudNetworksGetallBadRequest creates a PcloudNetworksGetallBadRequest with default headers values +func NewPcloudNetworksGetallBadRequest() *PcloudNetworksGetallBadRequest { + return &PcloudNetworksGetallBadRequest{} +} + +/*PcloudNetworksGetallBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudNetworksGetallBadRequest struct { + Payload *models.Error +} + +func (o *PcloudNetworksGetallBadRequest) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/networks][%d] pcloudNetworksGetallBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudNetworksGetallBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudNetworksGetallInternalServerError creates a PcloudNetworksGetallInternalServerError with default headers values +func NewPcloudNetworksGetallInternalServerError() *PcloudNetworksGetallInternalServerError { + return &PcloudNetworksGetallInternalServerError{} +} + +/*PcloudNetworksGetallInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudNetworksGetallInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudNetworksGetallInternalServerError) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/networks][%d] pcloudNetworksGetallInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudNetworksGetallInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks/pcloud_networks_ports_delete_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks/pcloud_networks_ports_delete_parameters.go new file mode 100644 index 00000000000..01c40eeb064 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks/pcloud_networks_ports_delete_parameters.go @@ -0,0 +1,178 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_networks + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewPcloudNetworksPortsDeleteParams creates a new PcloudNetworksPortsDeleteParams object +// with the default values initialized. +func NewPcloudNetworksPortsDeleteParams() *PcloudNetworksPortsDeleteParams { + var () + return &PcloudNetworksPortsDeleteParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudNetworksPortsDeleteParamsWithTimeout creates a new PcloudNetworksPortsDeleteParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudNetworksPortsDeleteParamsWithTimeout(timeout time.Duration) *PcloudNetworksPortsDeleteParams { + var () + return &PcloudNetworksPortsDeleteParams{ + + timeout: timeout, + } +} + +// NewPcloudNetworksPortsDeleteParamsWithContext creates a new PcloudNetworksPortsDeleteParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudNetworksPortsDeleteParamsWithContext(ctx context.Context) *PcloudNetworksPortsDeleteParams { + var () + return &PcloudNetworksPortsDeleteParams{ + + Context: ctx, + } +} + +// NewPcloudNetworksPortsDeleteParamsWithHTTPClient creates a new PcloudNetworksPortsDeleteParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudNetworksPortsDeleteParamsWithHTTPClient(client *http.Client) *PcloudNetworksPortsDeleteParams { + var () + return &PcloudNetworksPortsDeleteParams{ + HTTPClient: client, + } +} + +/*PcloudNetworksPortsDeleteParams contains all the parameters to send to the API endpoint +for the pcloud networks ports delete operation typically these are written to a http.Request +*/ +type PcloudNetworksPortsDeleteParams struct { + + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + /*NetworkID + Network ID + + */ + NetworkID string + /*PortID + Port ID + + */ + PortID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud networks ports delete params +func (o *PcloudNetworksPortsDeleteParams) WithTimeout(timeout time.Duration) *PcloudNetworksPortsDeleteParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud networks ports delete params +func (o *PcloudNetworksPortsDeleteParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud networks ports delete params +func (o *PcloudNetworksPortsDeleteParams) WithContext(ctx context.Context) *PcloudNetworksPortsDeleteParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud networks ports delete params +func (o *PcloudNetworksPortsDeleteParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud networks ports delete params +func (o *PcloudNetworksPortsDeleteParams) WithHTTPClient(client *http.Client) *PcloudNetworksPortsDeleteParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud networks ports delete params +func (o *PcloudNetworksPortsDeleteParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud networks ports delete params +func (o *PcloudNetworksPortsDeleteParams) WithCloudInstanceID(cloudInstanceID string) *PcloudNetworksPortsDeleteParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud networks ports delete params +func (o *PcloudNetworksPortsDeleteParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WithNetworkID adds the networkID to the pcloud networks ports delete params +func (o *PcloudNetworksPortsDeleteParams) WithNetworkID(networkID string) *PcloudNetworksPortsDeleteParams { + o.SetNetworkID(networkID) + return o +} + +// SetNetworkID adds the networkId to the pcloud networks ports delete params +func (o *PcloudNetworksPortsDeleteParams) SetNetworkID(networkID string) { + o.NetworkID = networkID +} + +// WithPortID adds the portID to the pcloud networks ports delete params +func (o *PcloudNetworksPortsDeleteParams) WithPortID(portID string) *PcloudNetworksPortsDeleteParams { + o.SetPortID(portID) + return o +} + +// SetPortID adds the portId to the pcloud networks ports delete params +func (o *PcloudNetworksPortsDeleteParams) SetPortID(portID string) { + o.PortID = portID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudNetworksPortsDeleteParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + // path param network_id + if err := r.SetPathParam("network_id", o.NetworkID); err != nil { + return err + } + + // path param port_id + if err := r.SetPathParam("port_id", o.PortID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks/pcloud_networks_ports_delete_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks/pcloud_networks_ports_delete_responses.go new file mode 100644 index 00000000000..33c7d083f1e --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks/pcloud_networks_ports_delete_responses.go @@ -0,0 +1,173 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_networks + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudNetworksPortsDeleteReader is a Reader for the PcloudNetworksPortsDelete structure. +type PcloudNetworksPortsDeleteReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudNetworksPortsDeleteReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudNetworksPortsDeleteOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudNetworksPortsDeleteBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 410: + result := NewPcloudNetworksPortsDeleteGone() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudNetworksPortsDeleteInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudNetworksPortsDeleteOK creates a PcloudNetworksPortsDeleteOK with default headers values +func NewPcloudNetworksPortsDeleteOK() *PcloudNetworksPortsDeleteOK { + return &PcloudNetworksPortsDeleteOK{} +} + +/*PcloudNetworksPortsDeleteOK handles this case with default header values. + +OK +*/ +type PcloudNetworksPortsDeleteOK struct { + Payload models.Object +} + +func (o *PcloudNetworksPortsDeleteOK) Error() string { + return fmt.Sprintf("[DELETE /pcloud/v1/cloud-instances/{cloud_instance_id}/networks/{network_id}/ports/{port_id}][%d] pcloudNetworksPortsDeleteOK %+v", 200, o.Payload) +} + +func (o *PcloudNetworksPortsDeleteOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudNetworksPortsDeleteBadRequest creates a PcloudNetworksPortsDeleteBadRequest with default headers values +func NewPcloudNetworksPortsDeleteBadRequest() *PcloudNetworksPortsDeleteBadRequest { + return &PcloudNetworksPortsDeleteBadRequest{} +} + +/*PcloudNetworksPortsDeleteBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudNetworksPortsDeleteBadRequest struct { + Payload *models.Error +} + +func (o *PcloudNetworksPortsDeleteBadRequest) Error() string { + return fmt.Sprintf("[DELETE /pcloud/v1/cloud-instances/{cloud_instance_id}/networks/{network_id}/ports/{port_id}][%d] pcloudNetworksPortsDeleteBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudNetworksPortsDeleteBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudNetworksPortsDeleteGone creates a PcloudNetworksPortsDeleteGone with default headers values +func NewPcloudNetworksPortsDeleteGone() *PcloudNetworksPortsDeleteGone { + return &PcloudNetworksPortsDeleteGone{} +} + +/*PcloudNetworksPortsDeleteGone handles this case with default header values. + +Gone +*/ +type PcloudNetworksPortsDeleteGone struct { + Payload *models.Error +} + +func (o *PcloudNetworksPortsDeleteGone) Error() string { + return fmt.Sprintf("[DELETE /pcloud/v1/cloud-instances/{cloud_instance_id}/networks/{network_id}/ports/{port_id}][%d] pcloudNetworksPortsDeleteGone %+v", 410, o.Payload) +} + +func (o *PcloudNetworksPortsDeleteGone) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudNetworksPortsDeleteInternalServerError creates a PcloudNetworksPortsDeleteInternalServerError with default headers values +func NewPcloudNetworksPortsDeleteInternalServerError() *PcloudNetworksPortsDeleteInternalServerError { + return &PcloudNetworksPortsDeleteInternalServerError{} +} + +/*PcloudNetworksPortsDeleteInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudNetworksPortsDeleteInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudNetworksPortsDeleteInternalServerError) Error() string { + return fmt.Sprintf("[DELETE /pcloud/v1/cloud-instances/{cloud_instance_id}/networks/{network_id}/ports/{port_id}][%d] pcloudNetworksPortsDeleteInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudNetworksPortsDeleteInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks/pcloud_networks_ports_get_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks/pcloud_networks_ports_get_parameters.go new file mode 100644 index 00000000000..c3ba79d1af8 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks/pcloud_networks_ports_get_parameters.go @@ -0,0 +1,178 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_networks + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewPcloudNetworksPortsGetParams creates a new PcloudNetworksPortsGetParams object +// with the default values initialized. +func NewPcloudNetworksPortsGetParams() *PcloudNetworksPortsGetParams { + var () + return &PcloudNetworksPortsGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudNetworksPortsGetParamsWithTimeout creates a new PcloudNetworksPortsGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudNetworksPortsGetParamsWithTimeout(timeout time.Duration) *PcloudNetworksPortsGetParams { + var () + return &PcloudNetworksPortsGetParams{ + + timeout: timeout, + } +} + +// NewPcloudNetworksPortsGetParamsWithContext creates a new PcloudNetworksPortsGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudNetworksPortsGetParamsWithContext(ctx context.Context) *PcloudNetworksPortsGetParams { + var () + return &PcloudNetworksPortsGetParams{ + + Context: ctx, + } +} + +// NewPcloudNetworksPortsGetParamsWithHTTPClient creates a new PcloudNetworksPortsGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudNetworksPortsGetParamsWithHTTPClient(client *http.Client) *PcloudNetworksPortsGetParams { + var () + return &PcloudNetworksPortsGetParams{ + HTTPClient: client, + } +} + +/*PcloudNetworksPortsGetParams contains all the parameters to send to the API endpoint +for the pcloud networks ports get operation typically these are written to a http.Request +*/ +type PcloudNetworksPortsGetParams struct { + + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + /*NetworkID + Network ID + + */ + NetworkID string + /*PortID + Port ID + + */ + PortID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud networks ports get params +func (o *PcloudNetworksPortsGetParams) WithTimeout(timeout time.Duration) *PcloudNetworksPortsGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud networks ports get params +func (o *PcloudNetworksPortsGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud networks ports get params +func (o *PcloudNetworksPortsGetParams) WithContext(ctx context.Context) *PcloudNetworksPortsGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud networks ports get params +func (o *PcloudNetworksPortsGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud networks ports get params +func (o *PcloudNetworksPortsGetParams) WithHTTPClient(client *http.Client) *PcloudNetworksPortsGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud networks ports get params +func (o *PcloudNetworksPortsGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud networks ports get params +func (o *PcloudNetworksPortsGetParams) WithCloudInstanceID(cloudInstanceID string) *PcloudNetworksPortsGetParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud networks ports get params +func (o *PcloudNetworksPortsGetParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WithNetworkID adds the networkID to the pcloud networks ports get params +func (o *PcloudNetworksPortsGetParams) WithNetworkID(networkID string) *PcloudNetworksPortsGetParams { + o.SetNetworkID(networkID) + return o +} + +// SetNetworkID adds the networkId to the pcloud networks ports get params +func (o *PcloudNetworksPortsGetParams) SetNetworkID(networkID string) { + o.NetworkID = networkID +} + +// WithPortID adds the portID to the pcloud networks ports get params +func (o *PcloudNetworksPortsGetParams) WithPortID(portID string) *PcloudNetworksPortsGetParams { + o.SetPortID(portID) + return o +} + +// SetPortID adds the portId to the pcloud networks ports get params +func (o *PcloudNetworksPortsGetParams) SetPortID(portID string) { + o.PortID = portID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudNetworksPortsGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + // path param network_id + if err := r.SetPathParam("network_id", o.NetworkID); err != nil { + return err + } + + // path param port_id + if err := r.SetPathParam("port_id", o.PortID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks/pcloud_networks_ports_get_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks/pcloud_networks_ports_get_responses.go new file mode 100644 index 00000000000..db7482dd4f1 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks/pcloud_networks_ports_get_responses.go @@ -0,0 +1,139 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_networks + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudNetworksPortsGetReader is a Reader for the PcloudNetworksPortsGet structure. +type PcloudNetworksPortsGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudNetworksPortsGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudNetworksPortsGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 404: + result := NewPcloudNetworksPortsGetNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudNetworksPortsGetInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudNetworksPortsGetOK creates a PcloudNetworksPortsGetOK with default headers values +func NewPcloudNetworksPortsGetOK() *PcloudNetworksPortsGetOK { + return &PcloudNetworksPortsGetOK{} +} + +/*PcloudNetworksPortsGetOK handles this case with default header values. + +OK +*/ +type PcloudNetworksPortsGetOK struct { + Payload *models.NetworkPort +} + +func (o *PcloudNetworksPortsGetOK) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/networks/{network_id}/ports/{port_id}][%d] pcloudNetworksPortsGetOK %+v", 200, o.Payload) +} + +func (o *PcloudNetworksPortsGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.NetworkPort) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudNetworksPortsGetNotFound creates a PcloudNetworksPortsGetNotFound with default headers values +func NewPcloudNetworksPortsGetNotFound() *PcloudNetworksPortsGetNotFound { + return &PcloudNetworksPortsGetNotFound{} +} + +/*PcloudNetworksPortsGetNotFound handles this case with default header values. + +Not Found +*/ +type PcloudNetworksPortsGetNotFound struct { + Payload *models.Error +} + +func (o *PcloudNetworksPortsGetNotFound) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/networks/{network_id}/ports/{port_id}][%d] pcloudNetworksPortsGetNotFound %+v", 404, o.Payload) +} + +func (o *PcloudNetworksPortsGetNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudNetworksPortsGetInternalServerError creates a PcloudNetworksPortsGetInternalServerError with default headers values +func NewPcloudNetworksPortsGetInternalServerError() *PcloudNetworksPortsGetInternalServerError { + return &PcloudNetworksPortsGetInternalServerError{} +} + +/*PcloudNetworksPortsGetInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudNetworksPortsGetInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudNetworksPortsGetInternalServerError) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/networks/{network_id}/ports/{port_id}][%d] pcloudNetworksPortsGetInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudNetworksPortsGetInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks/pcloud_networks_ports_getall_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks/pcloud_networks_ports_getall_parameters.go new file mode 100644 index 00000000000..5dbb41a1a42 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks/pcloud_networks_ports_getall_parameters.go @@ -0,0 +1,157 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_networks + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewPcloudNetworksPortsGetallParams creates a new PcloudNetworksPortsGetallParams object +// with the default values initialized. +func NewPcloudNetworksPortsGetallParams() *PcloudNetworksPortsGetallParams { + var () + return &PcloudNetworksPortsGetallParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudNetworksPortsGetallParamsWithTimeout creates a new PcloudNetworksPortsGetallParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudNetworksPortsGetallParamsWithTimeout(timeout time.Duration) *PcloudNetworksPortsGetallParams { + var () + return &PcloudNetworksPortsGetallParams{ + + timeout: timeout, + } +} + +// NewPcloudNetworksPortsGetallParamsWithContext creates a new PcloudNetworksPortsGetallParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudNetworksPortsGetallParamsWithContext(ctx context.Context) *PcloudNetworksPortsGetallParams { + var () + return &PcloudNetworksPortsGetallParams{ + + Context: ctx, + } +} + +// NewPcloudNetworksPortsGetallParamsWithHTTPClient creates a new PcloudNetworksPortsGetallParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudNetworksPortsGetallParamsWithHTTPClient(client *http.Client) *PcloudNetworksPortsGetallParams { + var () + return &PcloudNetworksPortsGetallParams{ + HTTPClient: client, + } +} + +/*PcloudNetworksPortsGetallParams contains all the parameters to send to the API endpoint +for the pcloud networks ports getall operation typically these are written to a http.Request +*/ +type PcloudNetworksPortsGetallParams struct { + + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + /*NetworkID + Network ID + + */ + NetworkID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud networks ports getall params +func (o *PcloudNetworksPortsGetallParams) WithTimeout(timeout time.Duration) *PcloudNetworksPortsGetallParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud networks ports getall params +func (o *PcloudNetworksPortsGetallParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud networks ports getall params +func (o *PcloudNetworksPortsGetallParams) WithContext(ctx context.Context) *PcloudNetworksPortsGetallParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud networks ports getall params +func (o *PcloudNetworksPortsGetallParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud networks ports getall params +func (o *PcloudNetworksPortsGetallParams) WithHTTPClient(client *http.Client) *PcloudNetworksPortsGetallParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud networks ports getall params +func (o *PcloudNetworksPortsGetallParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud networks ports getall params +func (o *PcloudNetworksPortsGetallParams) WithCloudInstanceID(cloudInstanceID string) *PcloudNetworksPortsGetallParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud networks ports getall params +func (o *PcloudNetworksPortsGetallParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WithNetworkID adds the networkID to the pcloud networks ports getall params +func (o *PcloudNetworksPortsGetallParams) WithNetworkID(networkID string) *PcloudNetworksPortsGetallParams { + o.SetNetworkID(networkID) + return o +} + +// SetNetworkID adds the networkId to the pcloud networks ports getall params +func (o *PcloudNetworksPortsGetallParams) SetNetworkID(networkID string) { + o.NetworkID = networkID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudNetworksPortsGetallParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + // path param network_id + if err := r.SetPathParam("network_id", o.NetworkID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks/pcloud_networks_ports_getall_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks/pcloud_networks_ports_getall_responses.go new file mode 100644 index 00000000000..0b290baf809 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks/pcloud_networks_ports_getall_responses.go @@ -0,0 +1,139 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_networks + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudNetworksPortsGetallReader is a Reader for the PcloudNetworksPortsGetall structure. +type PcloudNetworksPortsGetallReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudNetworksPortsGetallReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudNetworksPortsGetallOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudNetworksPortsGetallBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudNetworksPortsGetallInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudNetworksPortsGetallOK creates a PcloudNetworksPortsGetallOK with default headers values +func NewPcloudNetworksPortsGetallOK() *PcloudNetworksPortsGetallOK { + return &PcloudNetworksPortsGetallOK{} +} + +/*PcloudNetworksPortsGetallOK handles this case with default header values. + +OK +*/ +type PcloudNetworksPortsGetallOK struct { + Payload *models.NetworkPorts +} + +func (o *PcloudNetworksPortsGetallOK) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/networks/{network_id}/ports][%d] pcloudNetworksPortsGetallOK %+v", 200, o.Payload) +} + +func (o *PcloudNetworksPortsGetallOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.NetworkPorts) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudNetworksPortsGetallBadRequest creates a PcloudNetworksPortsGetallBadRequest with default headers values +func NewPcloudNetworksPortsGetallBadRequest() *PcloudNetworksPortsGetallBadRequest { + return &PcloudNetworksPortsGetallBadRequest{} +} + +/*PcloudNetworksPortsGetallBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudNetworksPortsGetallBadRequest struct { + Payload *models.Error +} + +func (o *PcloudNetworksPortsGetallBadRequest) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/networks/{network_id}/ports][%d] pcloudNetworksPortsGetallBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudNetworksPortsGetallBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudNetworksPortsGetallInternalServerError creates a PcloudNetworksPortsGetallInternalServerError with default headers values +func NewPcloudNetworksPortsGetallInternalServerError() *PcloudNetworksPortsGetallInternalServerError { + return &PcloudNetworksPortsGetallInternalServerError{} +} + +/*PcloudNetworksPortsGetallInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudNetworksPortsGetallInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudNetworksPortsGetallInternalServerError) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/networks/{network_id}/ports][%d] pcloudNetworksPortsGetallInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudNetworksPortsGetallInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks/pcloud_networks_ports_post_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks/pcloud_networks_ports_post_parameters.go new file mode 100644 index 00000000000..37863da8e9d --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks/pcloud_networks_ports_post_parameters.go @@ -0,0 +1,181 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_networks + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// NewPcloudNetworksPortsPostParams creates a new PcloudNetworksPortsPostParams object +// with the default values initialized. +func NewPcloudNetworksPortsPostParams() *PcloudNetworksPortsPostParams { + var () + return &PcloudNetworksPortsPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudNetworksPortsPostParamsWithTimeout creates a new PcloudNetworksPortsPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudNetworksPortsPostParamsWithTimeout(timeout time.Duration) *PcloudNetworksPortsPostParams { + var () + return &PcloudNetworksPortsPostParams{ + + timeout: timeout, + } +} + +// NewPcloudNetworksPortsPostParamsWithContext creates a new PcloudNetworksPortsPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudNetworksPortsPostParamsWithContext(ctx context.Context) *PcloudNetworksPortsPostParams { + var () + return &PcloudNetworksPortsPostParams{ + + Context: ctx, + } +} + +// NewPcloudNetworksPortsPostParamsWithHTTPClient creates a new PcloudNetworksPortsPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudNetworksPortsPostParamsWithHTTPClient(client *http.Client) *PcloudNetworksPortsPostParams { + var () + return &PcloudNetworksPortsPostParams{ + HTTPClient: client, + } +} + +/*PcloudNetworksPortsPostParams contains all the parameters to send to the API endpoint +for the pcloud networks ports post operation typically these are written to a http.Request +*/ +type PcloudNetworksPortsPostParams struct { + + /*Body + Create a Network Port + + */ + Body *models.NetworkPortCreate + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + /*NetworkID + Network ID + + */ + NetworkID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud networks ports post params +func (o *PcloudNetworksPortsPostParams) WithTimeout(timeout time.Duration) *PcloudNetworksPortsPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud networks ports post params +func (o *PcloudNetworksPortsPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud networks ports post params +func (o *PcloudNetworksPortsPostParams) WithContext(ctx context.Context) *PcloudNetworksPortsPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud networks ports post params +func (o *PcloudNetworksPortsPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud networks ports post params +func (o *PcloudNetworksPortsPostParams) WithHTTPClient(client *http.Client) *PcloudNetworksPortsPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud networks ports post params +func (o *PcloudNetworksPortsPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the pcloud networks ports post params +func (o *PcloudNetworksPortsPostParams) WithBody(body *models.NetworkPortCreate) *PcloudNetworksPortsPostParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the pcloud networks ports post params +func (o *PcloudNetworksPortsPostParams) SetBody(body *models.NetworkPortCreate) { + o.Body = body +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud networks ports post params +func (o *PcloudNetworksPortsPostParams) WithCloudInstanceID(cloudInstanceID string) *PcloudNetworksPortsPostParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud networks ports post params +func (o *PcloudNetworksPortsPostParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WithNetworkID adds the networkID to the pcloud networks ports post params +func (o *PcloudNetworksPortsPostParams) WithNetworkID(networkID string) *PcloudNetworksPortsPostParams { + o.SetNetworkID(networkID) + return o +} + +// SetNetworkID adds the networkId to the pcloud networks ports post params +func (o *PcloudNetworksPortsPostParams) SetNetworkID(networkID string) { + o.NetworkID = networkID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudNetworksPortsPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + // path param network_id + if err := r.SetPathParam("network_id", o.NetworkID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks/pcloud_networks_ports_post_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks/pcloud_networks_ports_post_responses.go new file mode 100644 index 00000000000..cba7f8237d1 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks/pcloud_networks_ports_post_responses.go @@ -0,0 +1,211 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_networks + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudNetworksPortsPostReader is a Reader for the PcloudNetworksPortsPost structure. +type PcloudNetworksPortsPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudNetworksPortsPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 201: + result := NewPcloudNetworksPortsPostCreated() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudNetworksPortsPostBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 409: + result := NewPcloudNetworksPortsPostConflict() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 422: + result := NewPcloudNetworksPortsPostUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudNetworksPortsPostInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudNetworksPortsPostCreated creates a PcloudNetworksPortsPostCreated with default headers values +func NewPcloudNetworksPortsPostCreated() *PcloudNetworksPortsPostCreated { + return &PcloudNetworksPortsPostCreated{} +} + +/*PcloudNetworksPortsPostCreated handles this case with default header values. + +Created +*/ +type PcloudNetworksPortsPostCreated struct { + Payload *models.NetworkPort +} + +func (o *PcloudNetworksPortsPostCreated) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/networks/{network_id}/ports][%d] pcloudNetworksPortsPostCreated %+v", 201, o.Payload) +} + +func (o *PcloudNetworksPortsPostCreated) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.NetworkPort) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudNetworksPortsPostBadRequest creates a PcloudNetworksPortsPostBadRequest with default headers values +func NewPcloudNetworksPortsPostBadRequest() *PcloudNetworksPortsPostBadRequest { + return &PcloudNetworksPortsPostBadRequest{} +} + +/*PcloudNetworksPortsPostBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudNetworksPortsPostBadRequest struct { + Payload *models.Error +} + +func (o *PcloudNetworksPortsPostBadRequest) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/networks/{network_id}/ports][%d] pcloudNetworksPortsPostBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudNetworksPortsPostBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudNetworksPortsPostConflict creates a PcloudNetworksPortsPostConflict with default headers values +func NewPcloudNetworksPortsPostConflict() *PcloudNetworksPortsPostConflict { + return &PcloudNetworksPortsPostConflict{} +} + +/*PcloudNetworksPortsPostConflict handles this case with default header values. + +Conflict +*/ +type PcloudNetworksPortsPostConflict struct { + Payload *models.Error +} + +func (o *PcloudNetworksPortsPostConflict) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/networks/{network_id}/ports][%d] pcloudNetworksPortsPostConflict %+v", 409, o.Payload) +} + +func (o *PcloudNetworksPortsPostConflict) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudNetworksPortsPostUnprocessableEntity creates a PcloudNetworksPortsPostUnprocessableEntity with default headers values +func NewPcloudNetworksPortsPostUnprocessableEntity() *PcloudNetworksPortsPostUnprocessableEntity { + return &PcloudNetworksPortsPostUnprocessableEntity{} +} + +/*PcloudNetworksPortsPostUnprocessableEntity handles this case with default header values. + +Unprocessable Entity +*/ +type PcloudNetworksPortsPostUnprocessableEntity struct { + Payload *models.Error +} + +func (o *PcloudNetworksPortsPostUnprocessableEntity) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/networks/{network_id}/ports][%d] pcloudNetworksPortsPostUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *PcloudNetworksPortsPostUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudNetworksPortsPostInternalServerError creates a PcloudNetworksPortsPostInternalServerError with default headers values +func NewPcloudNetworksPortsPostInternalServerError() *PcloudNetworksPortsPostInternalServerError { + return &PcloudNetworksPortsPostInternalServerError{} +} + +/*PcloudNetworksPortsPostInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudNetworksPortsPostInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudNetworksPortsPostInternalServerError) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/networks/{network_id}/ports][%d] pcloudNetworksPortsPostInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudNetworksPortsPostInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks/pcloud_networks_ports_put_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks/pcloud_networks_ports_put_parameters.go new file mode 100644 index 00000000000..460a6538624 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks/pcloud_networks_ports_put_parameters.go @@ -0,0 +1,202 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_networks + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// NewPcloudNetworksPortsPutParams creates a new PcloudNetworksPortsPutParams object +// with the default values initialized. +func NewPcloudNetworksPortsPutParams() *PcloudNetworksPortsPutParams { + var () + return &PcloudNetworksPortsPutParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudNetworksPortsPutParamsWithTimeout creates a new PcloudNetworksPortsPutParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudNetworksPortsPutParamsWithTimeout(timeout time.Duration) *PcloudNetworksPortsPutParams { + var () + return &PcloudNetworksPortsPutParams{ + + timeout: timeout, + } +} + +// NewPcloudNetworksPortsPutParamsWithContext creates a new PcloudNetworksPortsPutParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudNetworksPortsPutParamsWithContext(ctx context.Context) *PcloudNetworksPortsPutParams { + var () + return &PcloudNetworksPortsPutParams{ + + Context: ctx, + } +} + +// NewPcloudNetworksPortsPutParamsWithHTTPClient creates a new PcloudNetworksPortsPutParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudNetworksPortsPutParamsWithHTTPClient(client *http.Client) *PcloudNetworksPortsPutParams { + var () + return &PcloudNetworksPortsPutParams{ + HTTPClient: client, + } +} + +/*PcloudNetworksPortsPutParams contains all the parameters to send to the API endpoint +for the pcloud networks ports put operation typically these are written to a http.Request +*/ +type PcloudNetworksPortsPutParams struct { + + /*Body + Parameters for updating a Port + + */ + Body *models.NetworkPortUpdate + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + /*NetworkID + Network ID + + */ + NetworkID string + /*PortID + Port ID + + */ + PortID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud networks ports put params +func (o *PcloudNetworksPortsPutParams) WithTimeout(timeout time.Duration) *PcloudNetworksPortsPutParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud networks ports put params +func (o *PcloudNetworksPortsPutParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud networks ports put params +func (o *PcloudNetworksPortsPutParams) WithContext(ctx context.Context) *PcloudNetworksPortsPutParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud networks ports put params +func (o *PcloudNetworksPortsPutParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud networks ports put params +func (o *PcloudNetworksPortsPutParams) WithHTTPClient(client *http.Client) *PcloudNetworksPortsPutParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud networks ports put params +func (o *PcloudNetworksPortsPutParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the pcloud networks ports put params +func (o *PcloudNetworksPortsPutParams) WithBody(body *models.NetworkPortUpdate) *PcloudNetworksPortsPutParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the pcloud networks ports put params +func (o *PcloudNetworksPortsPutParams) SetBody(body *models.NetworkPortUpdate) { + o.Body = body +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud networks ports put params +func (o *PcloudNetworksPortsPutParams) WithCloudInstanceID(cloudInstanceID string) *PcloudNetworksPortsPutParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud networks ports put params +func (o *PcloudNetworksPortsPutParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WithNetworkID adds the networkID to the pcloud networks ports put params +func (o *PcloudNetworksPortsPutParams) WithNetworkID(networkID string) *PcloudNetworksPortsPutParams { + o.SetNetworkID(networkID) + return o +} + +// SetNetworkID adds the networkId to the pcloud networks ports put params +func (o *PcloudNetworksPortsPutParams) SetNetworkID(networkID string) { + o.NetworkID = networkID +} + +// WithPortID adds the portID to the pcloud networks ports put params +func (o *PcloudNetworksPortsPutParams) WithPortID(portID string) *PcloudNetworksPortsPutParams { + o.SetPortID(portID) + return o +} + +// SetPortID adds the portId to the pcloud networks ports put params +func (o *PcloudNetworksPortsPutParams) SetPortID(portID string) { + o.PortID = portID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudNetworksPortsPutParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + // path param network_id + if err := r.SetPathParam("network_id", o.NetworkID); err != nil { + return err + } + + // path param port_id + if err := r.SetPathParam("port_id", o.PortID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks/pcloud_networks_ports_put_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks/pcloud_networks_ports_put_responses.go new file mode 100644 index 00000000000..05d0c3325d4 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks/pcloud_networks_ports_put_responses.go @@ -0,0 +1,175 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_networks + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudNetworksPortsPutReader is a Reader for the PcloudNetworksPortsPut structure. +type PcloudNetworksPortsPutReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudNetworksPortsPutReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudNetworksPortsPutOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudNetworksPortsPutBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 422: + result := NewPcloudNetworksPortsPutUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudNetworksPortsPutInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudNetworksPortsPutOK creates a PcloudNetworksPortsPutOK with default headers values +func NewPcloudNetworksPortsPutOK() *PcloudNetworksPortsPutOK { + return &PcloudNetworksPortsPutOK{} +} + +/*PcloudNetworksPortsPutOK handles this case with default header values. + +OK +*/ +type PcloudNetworksPortsPutOK struct { + Payload *models.NetworkPort +} + +func (o *PcloudNetworksPortsPutOK) Error() string { + return fmt.Sprintf("[PUT /pcloud/v1/cloud-instances/{cloud_instance_id}/networks/{network_id}/ports/{port_id}][%d] pcloudNetworksPortsPutOK %+v", 200, o.Payload) +} + +func (o *PcloudNetworksPortsPutOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.NetworkPort) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudNetworksPortsPutBadRequest creates a PcloudNetworksPortsPutBadRequest with default headers values +func NewPcloudNetworksPortsPutBadRequest() *PcloudNetworksPortsPutBadRequest { + return &PcloudNetworksPortsPutBadRequest{} +} + +/*PcloudNetworksPortsPutBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudNetworksPortsPutBadRequest struct { + Payload *models.Error +} + +func (o *PcloudNetworksPortsPutBadRequest) Error() string { + return fmt.Sprintf("[PUT /pcloud/v1/cloud-instances/{cloud_instance_id}/networks/{network_id}/ports/{port_id}][%d] pcloudNetworksPortsPutBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudNetworksPortsPutBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudNetworksPortsPutUnprocessableEntity creates a PcloudNetworksPortsPutUnprocessableEntity with default headers values +func NewPcloudNetworksPortsPutUnprocessableEntity() *PcloudNetworksPortsPutUnprocessableEntity { + return &PcloudNetworksPortsPutUnprocessableEntity{} +} + +/*PcloudNetworksPortsPutUnprocessableEntity handles this case with default header values. + +Unprocessable Entity +*/ +type PcloudNetworksPortsPutUnprocessableEntity struct { + Payload *models.Error +} + +func (o *PcloudNetworksPortsPutUnprocessableEntity) Error() string { + return fmt.Sprintf("[PUT /pcloud/v1/cloud-instances/{cloud_instance_id}/networks/{network_id}/ports/{port_id}][%d] pcloudNetworksPortsPutUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *PcloudNetworksPortsPutUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudNetworksPortsPutInternalServerError creates a PcloudNetworksPortsPutInternalServerError with default headers values +func NewPcloudNetworksPortsPutInternalServerError() *PcloudNetworksPortsPutInternalServerError { + return &PcloudNetworksPortsPutInternalServerError{} +} + +/*PcloudNetworksPortsPutInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudNetworksPortsPutInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudNetworksPortsPutInternalServerError) Error() string { + return fmt.Sprintf("[PUT /pcloud/v1/cloud-instances/{cloud_instance_id}/networks/{network_id}/ports/{port_id}][%d] pcloudNetworksPortsPutInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudNetworksPortsPutInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks/pcloud_networks_post_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks/pcloud_networks_post_parameters.go new file mode 100644 index 00000000000..69c238877c2 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks/pcloud_networks_post_parameters.go @@ -0,0 +1,160 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_networks + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// NewPcloudNetworksPostParams creates a new PcloudNetworksPostParams object +// with the default values initialized. +func NewPcloudNetworksPostParams() *PcloudNetworksPostParams { + var () + return &PcloudNetworksPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudNetworksPostParamsWithTimeout creates a new PcloudNetworksPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudNetworksPostParamsWithTimeout(timeout time.Duration) *PcloudNetworksPostParams { + var () + return &PcloudNetworksPostParams{ + + timeout: timeout, + } +} + +// NewPcloudNetworksPostParamsWithContext creates a new PcloudNetworksPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudNetworksPostParamsWithContext(ctx context.Context) *PcloudNetworksPostParams { + var () + return &PcloudNetworksPostParams{ + + Context: ctx, + } +} + +// NewPcloudNetworksPostParamsWithHTTPClient creates a new PcloudNetworksPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudNetworksPostParamsWithHTTPClient(client *http.Client) *PcloudNetworksPostParams { + var () + return &PcloudNetworksPostParams{ + HTTPClient: client, + } +} + +/*PcloudNetworksPostParams contains all the parameters to send to the API endpoint +for the pcloud networks post operation typically these are written to a http.Request +*/ +type PcloudNetworksPostParams struct { + + /*Body + Parameters for the creation of a new network + + */ + Body *models.NetworkCreate + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud networks post params +func (o *PcloudNetworksPostParams) WithTimeout(timeout time.Duration) *PcloudNetworksPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud networks post params +func (o *PcloudNetworksPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud networks post params +func (o *PcloudNetworksPostParams) WithContext(ctx context.Context) *PcloudNetworksPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud networks post params +func (o *PcloudNetworksPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud networks post params +func (o *PcloudNetworksPostParams) WithHTTPClient(client *http.Client) *PcloudNetworksPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud networks post params +func (o *PcloudNetworksPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the pcloud networks post params +func (o *PcloudNetworksPostParams) WithBody(body *models.NetworkCreate) *PcloudNetworksPostParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the pcloud networks post params +func (o *PcloudNetworksPostParams) SetBody(body *models.NetworkCreate) { + o.Body = body +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud networks post params +func (o *PcloudNetworksPostParams) WithCloudInstanceID(cloudInstanceID string) *PcloudNetworksPostParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud networks post params +func (o *PcloudNetworksPostParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudNetworksPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks/pcloud_networks_post_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks/pcloud_networks_post_responses.go new file mode 100644 index 00000000000..3179a066029 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks/pcloud_networks_post_responses.go @@ -0,0 +1,247 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_networks + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudNetworksPostReader is a Reader for the PcloudNetworksPost structure. +type PcloudNetworksPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudNetworksPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudNetworksPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 201: + result := NewPcloudNetworksPostCreated() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudNetworksPostBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 409: + result := NewPcloudNetworksPostConflict() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 422: + result := NewPcloudNetworksPostUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudNetworksPostInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudNetworksPostOK creates a PcloudNetworksPostOK with default headers values +func NewPcloudNetworksPostOK() *PcloudNetworksPostOK { + return &PcloudNetworksPostOK{} +} + +/*PcloudNetworksPostOK handles this case with default header values. + +OK +*/ +type PcloudNetworksPostOK struct { + Payload *models.Network +} + +func (o *PcloudNetworksPostOK) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/networks][%d] pcloudNetworksPostOK %+v", 200, o.Payload) +} + +func (o *PcloudNetworksPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Network) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudNetworksPostCreated creates a PcloudNetworksPostCreated with default headers values +func NewPcloudNetworksPostCreated() *PcloudNetworksPostCreated { + return &PcloudNetworksPostCreated{} +} + +/*PcloudNetworksPostCreated handles this case with default header values. + +Created +*/ +type PcloudNetworksPostCreated struct { + Payload *models.Network +} + +func (o *PcloudNetworksPostCreated) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/networks][%d] pcloudNetworksPostCreated %+v", 201, o.Payload) +} + +func (o *PcloudNetworksPostCreated) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Network) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudNetworksPostBadRequest creates a PcloudNetworksPostBadRequest with default headers values +func NewPcloudNetworksPostBadRequest() *PcloudNetworksPostBadRequest { + return &PcloudNetworksPostBadRequest{} +} + +/*PcloudNetworksPostBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudNetworksPostBadRequest struct { + Payload *models.Error +} + +func (o *PcloudNetworksPostBadRequest) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/networks][%d] pcloudNetworksPostBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudNetworksPostBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudNetworksPostConflict creates a PcloudNetworksPostConflict with default headers values +func NewPcloudNetworksPostConflict() *PcloudNetworksPostConflict { + return &PcloudNetworksPostConflict{} +} + +/*PcloudNetworksPostConflict handles this case with default header values. + +Conflict +*/ +type PcloudNetworksPostConflict struct { + Payload *models.Error +} + +func (o *PcloudNetworksPostConflict) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/networks][%d] pcloudNetworksPostConflict %+v", 409, o.Payload) +} + +func (o *PcloudNetworksPostConflict) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudNetworksPostUnprocessableEntity creates a PcloudNetworksPostUnprocessableEntity with default headers values +func NewPcloudNetworksPostUnprocessableEntity() *PcloudNetworksPostUnprocessableEntity { + return &PcloudNetworksPostUnprocessableEntity{} +} + +/*PcloudNetworksPostUnprocessableEntity handles this case with default header values. + +Unprocessable Entity +*/ +type PcloudNetworksPostUnprocessableEntity struct { + Payload *models.Error +} + +func (o *PcloudNetworksPostUnprocessableEntity) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/networks][%d] pcloudNetworksPostUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *PcloudNetworksPostUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudNetworksPostInternalServerError creates a PcloudNetworksPostInternalServerError with default headers values +func NewPcloudNetworksPostInternalServerError() *PcloudNetworksPostInternalServerError { + return &PcloudNetworksPostInternalServerError{} +} + +/*PcloudNetworksPostInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudNetworksPostInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudNetworksPostInternalServerError) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/networks][%d] pcloudNetworksPostInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudNetworksPostInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks/pcloud_networks_put_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks/pcloud_networks_put_parameters.go new file mode 100644 index 00000000000..4c5ff09c4bb --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks/pcloud_networks_put_parameters.go @@ -0,0 +1,181 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_networks + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// NewPcloudNetworksPutParams creates a new PcloudNetworksPutParams object +// with the default values initialized. +func NewPcloudNetworksPutParams() *PcloudNetworksPutParams { + var () + return &PcloudNetworksPutParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudNetworksPutParamsWithTimeout creates a new PcloudNetworksPutParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudNetworksPutParamsWithTimeout(timeout time.Duration) *PcloudNetworksPutParams { + var () + return &PcloudNetworksPutParams{ + + timeout: timeout, + } +} + +// NewPcloudNetworksPutParamsWithContext creates a new PcloudNetworksPutParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudNetworksPutParamsWithContext(ctx context.Context) *PcloudNetworksPutParams { + var () + return &PcloudNetworksPutParams{ + + Context: ctx, + } +} + +// NewPcloudNetworksPutParamsWithHTTPClient creates a new PcloudNetworksPutParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudNetworksPutParamsWithHTTPClient(client *http.Client) *PcloudNetworksPutParams { + var () + return &PcloudNetworksPutParams{ + HTTPClient: client, + } +} + +/*PcloudNetworksPutParams contains all the parameters to send to the API endpoint +for the pcloud networks put operation typically these are written to a http.Request +*/ +type PcloudNetworksPutParams struct { + + /*Body + Parameters to update a Network + + */ + Body *models.NetworkUpdate + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + /*NetworkID + Network ID + + */ + NetworkID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud networks put params +func (o *PcloudNetworksPutParams) WithTimeout(timeout time.Duration) *PcloudNetworksPutParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud networks put params +func (o *PcloudNetworksPutParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud networks put params +func (o *PcloudNetworksPutParams) WithContext(ctx context.Context) *PcloudNetworksPutParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud networks put params +func (o *PcloudNetworksPutParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud networks put params +func (o *PcloudNetworksPutParams) WithHTTPClient(client *http.Client) *PcloudNetworksPutParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud networks put params +func (o *PcloudNetworksPutParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the pcloud networks put params +func (o *PcloudNetworksPutParams) WithBody(body *models.NetworkUpdate) *PcloudNetworksPutParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the pcloud networks put params +func (o *PcloudNetworksPutParams) SetBody(body *models.NetworkUpdate) { + o.Body = body +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud networks put params +func (o *PcloudNetworksPutParams) WithCloudInstanceID(cloudInstanceID string) *PcloudNetworksPutParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud networks put params +func (o *PcloudNetworksPutParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WithNetworkID adds the networkID to the pcloud networks put params +func (o *PcloudNetworksPutParams) WithNetworkID(networkID string) *PcloudNetworksPutParams { + o.SetNetworkID(networkID) + return o +} + +// SetNetworkID adds the networkId to the pcloud networks put params +func (o *PcloudNetworksPutParams) SetNetworkID(networkID string) { + o.NetworkID = networkID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudNetworksPutParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + // path param network_id + if err := r.SetPathParam("network_id", o.NetworkID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks/pcloud_networks_put_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks/pcloud_networks_put_responses.go new file mode 100644 index 00000000000..e4412da0f58 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks/pcloud_networks_put_responses.go @@ -0,0 +1,175 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_networks + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudNetworksPutReader is a Reader for the PcloudNetworksPut structure. +type PcloudNetworksPutReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudNetworksPutReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudNetworksPutOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudNetworksPutBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 422: + result := NewPcloudNetworksPutUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudNetworksPutInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudNetworksPutOK creates a PcloudNetworksPutOK with default headers values +func NewPcloudNetworksPutOK() *PcloudNetworksPutOK { + return &PcloudNetworksPutOK{} +} + +/*PcloudNetworksPutOK handles this case with default header values. + +OK +*/ +type PcloudNetworksPutOK struct { + Payload *models.Network +} + +func (o *PcloudNetworksPutOK) Error() string { + return fmt.Sprintf("[PUT /pcloud/v1/cloud-instances/{cloud_instance_id}/networks/{network_id}][%d] pcloudNetworksPutOK %+v", 200, o.Payload) +} + +func (o *PcloudNetworksPutOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Network) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudNetworksPutBadRequest creates a PcloudNetworksPutBadRequest with default headers values +func NewPcloudNetworksPutBadRequest() *PcloudNetworksPutBadRequest { + return &PcloudNetworksPutBadRequest{} +} + +/*PcloudNetworksPutBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudNetworksPutBadRequest struct { + Payload *models.Error +} + +func (o *PcloudNetworksPutBadRequest) Error() string { + return fmt.Sprintf("[PUT /pcloud/v1/cloud-instances/{cloud_instance_id}/networks/{network_id}][%d] pcloudNetworksPutBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudNetworksPutBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudNetworksPutUnprocessableEntity creates a PcloudNetworksPutUnprocessableEntity with default headers values +func NewPcloudNetworksPutUnprocessableEntity() *PcloudNetworksPutUnprocessableEntity { + return &PcloudNetworksPutUnprocessableEntity{} +} + +/*PcloudNetworksPutUnprocessableEntity handles this case with default header values. + +Unprocessable Entity +*/ +type PcloudNetworksPutUnprocessableEntity struct { + Payload *models.Error +} + +func (o *PcloudNetworksPutUnprocessableEntity) Error() string { + return fmt.Sprintf("[PUT /pcloud/v1/cloud-instances/{cloud_instance_id}/networks/{network_id}][%d] pcloudNetworksPutUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *PcloudNetworksPutUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudNetworksPutInternalServerError creates a PcloudNetworksPutInternalServerError with default headers values +func NewPcloudNetworksPutInternalServerError() *PcloudNetworksPutInternalServerError { + return &PcloudNetworksPutInternalServerError{} +} + +/*PcloudNetworksPutInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudNetworksPutInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudNetworksPutInternalServerError) Error() string { + return fmt.Sprintf("[PUT /pcloud/v1/cloud-instances/{cloud_instance_id}/networks/{network_id}][%d] pcloudNetworksPutInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudNetworksPutInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/p_cloud_p_vm_instances_client.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/p_cloud_p_vm_instances_client.go new file mode 100644 index 00000000000..22edc1d3a08 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/p_cloud_p_vm_instances_client.go @@ -0,0 +1,537 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_p_vm_instances + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" +) + +// New creates a new p cloud p vm instances API client. +func New(transport runtime.ClientTransport, formats strfmt.Registry) *Client { + return &Client{transport: transport, formats: formats} +} + +/* +Client for p cloud p vm instances API +*/ +type Client struct { + transport runtime.ClientTransport + formats strfmt.Registry +} + +/* +PcloudPvminstancesActionPost performs an action start stop reboot immediate shutdown reset on a p VM instance +*/ +func (a *Client) PcloudPvminstancesActionPost(params *PcloudPvminstancesActionPostParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudPvminstancesActionPostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudPvminstancesActionPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.pvminstances.action.post", + Method: "POST", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/action", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudPvminstancesActionPostReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudPvminstancesActionPostOK), nil + +} + +/* +PcloudPvminstancesCapturePost captures a p VM instance and create a deployable image +*/ +func (a *Client) PcloudPvminstancesCapturePost(params *PcloudPvminstancesCapturePostParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudPvminstancesCapturePostOK, *PcloudPvminstancesCapturePostAccepted, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudPvminstancesCapturePostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.pvminstances.capture.post", + Method: "POST", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/capture", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudPvminstancesCapturePostReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, nil, err + } + switch value := result.(type) { + case *PcloudPvminstancesCapturePostOK: + return value, nil, nil + case *PcloudPvminstancesCapturePostAccepted: + return nil, value, nil + } + return nil, nil, nil + +} + +/* +PcloudPvminstancesClonePost clones a p VM instance +*/ +func (a *Client) PcloudPvminstancesClonePost(params *PcloudPvminstancesClonePostParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudPvminstancesClonePostAccepted, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudPvminstancesClonePostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.pvminstances.clone.post", + Method: "POST", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/clone", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudPvminstancesClonePostReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudPvminstancesClonePostAccepted), nil + +} + +/* +PcloudPvminstancesConsolePost generates the no v n c console URL +*/ +func (a *Client) PcloudPvminstancesConsolePost(params *PcloudPvminstancesConsolePostParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudPvminstancesConsolePostCreated, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudPvminstancesConsolePostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.pvminstances.console.post", + Method: "POST", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/console", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudPvminstancesConsolePostReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudPvminstancesConsolePostCreated), nil + +} + +/* +PcloudPvminstancesDelete deletes a p cloud p VM instance +*/ +func (a *Client) PcloudPvminstancesDelete(params *PcloudPvminstancesDeleteParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudPvminstancesDeleteOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudPvminstancesDeleteParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.pvminstances.delete", + Method: "DELETE", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudPvminstancesDeleteReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudPvminstancesDeleteOK), nil + +} + +/* +PcloudPvminstancesGet gets a p VM instance s current state information +*/ +func (a *Client) PcloudPvminstancesGet(params *PcloudPvminstancesGetParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudPvminstancesGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudPvminstancesGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.pvminstances.get", + Method: "GET", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudPvminstancesGetReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudPvminstancesGetOK), nil + +} + +/* +PcloudPvminstancesGetall gets all the pvm instances for this cloud instance +*/ +func (a *Client) PcloudPvminstancesGetall(params *PcloudPvminstancesGetallParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudPvminstancesGetallOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudPvminstancesGetallParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.pvminstances.getall", + Method: "GET", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudPvminstancesGetallReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudPvminstancesGetallOK), nil + +} + +/* +PcloudPvminstancesNetworksDelete removes all address of network from a p VM instance +*/ +func (a *Client) PcloudPvminstancesNetworksDelete(params *PcloudPvminstancesNetworksDeleteParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudPvminstancesNetworksDeleteOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudPvminstancesNetworksDeleteParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.pvminstances.networks.delete", + Method: "DELETE", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/networks/{network_id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudPvminstancesNetworksDeleteReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudPvminstancesNetworksDeleteOK), nil + +} + +/* +PcloudPvminstancesNetworksGet gets a p VM instance s network information +*/ +func (a *Client) PcloudPvminstancesNetworksGet(params *PcloudPvminstancesNetworksGetParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudPvminstancesNetworksGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudPvminstancesNetworksGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.pvminstances.networks.get", + Method: "GET", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/networks/{network_id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudPvminstancesNetworksGetReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudPvminstancesNetworksGetOK), nil + +} + +/* +PcloudPvminstancesNetworksGetall gets all networks for this p VM instance +*/ +func (a *Client) PcloudPvminstancesNetworksGetall(params *PcloudPvminstancesNetworksGetallParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudPvminstancesNetworksGetallOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudPvminstancesNetworksGetallParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.pvminstances.networks.getall", + Method: "GET", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/networks", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudPvminstancesNetworksGetallReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudPvminstancesNetworksGetallOK), nil + +} + +/* +PcloudPvminstancesNetworksPost performs network addition deletion and listing +*/ +func (a *Client) PcloudPvminstancesNetworksPost(params *PcloudPvminstancesNetworksPostParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudPvminstancesNetworksPostCreated, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudPvminstancesNetworksPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.pvminstances.networks.post", + Method: "POST", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/networks", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudPvminstancesNetworksPostReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudPvminstancesNetworksPostCreated), nil + +} + +/* +PcloudPvminstancesOperationsPost performs an operation on a p VM instance +*/ +func (a *Client) PcloudPvminstancesOperationsPost(params *PcloudPvminstancesOperationsPostParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudPvminstancesOperationsPostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudPvminstancesOperationsPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.pvminstances.operations.post", + Method: "POST", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/operations", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudPvminstancesOperationsPostReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudPvminstancesOperationsPostOK), nil + +} + +/* +PcloudPvminstancesPost creates a new power VM instance +*/ +func (a *Client) PcloudPvminstancesPost(params *PcloudPvminstancesPostParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudPvminstancesPostOK, *PcloudPvminstancesPostCreated, *PcloudPvminstancesPostAccepted, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudPvminstancesPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.pvminstances.post", + Method: "POST", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudPvminstancesPostReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, nil, nil, err + } + switch value := result.(type) { + case *PcloudPvminstancesPostOK: + return value, nil, nil, nil + case *PcloudPvminstancesPostCreated: + return nil, value, nil, nil + case *PcloudPvminstancesPostAccepted: + return nil, nil, value, nil + } + return nil, nil, nil, nil + +} + +/* +PcloudPvminstancesPut updates a p cloud p VM instance +*/ +func (a *Client) PcloudPvminstancesPut(params *PcloudPvminstancesPutParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudPvminstancesPutAccepted, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudPvminstancesPutParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.pvminstances.put", + Method: "PUT", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudPvminstancesPutReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudPvminstancesPutAccepted), nil + +} + +/* +PcloudPvminstancesSnapshotsGetall gets all snapshots for this p VM instance +*/ +func (a *Client) PcloudPvminstancesSnapshotsGetall(params *PcloudPvminstancesSnapshotsGetallParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudPvminstancesSnapshotsGetallOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudPvminstancesSnapshotsGetallParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.pvminstances.snapshots.getall", + Method: "GET", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/snapshots", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudPvminstancesSnapshotsGetallReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudPvminstancesSnapshotsGetallOK), nil + +} + +/* +PcloudPvminstancesSnapshotsPost creates a p VM instance snapshot +*/ +func (a *Client) PcloudPvminstancesSnapshotsPost(params *PcloudPvminstancesSnapshotsPostParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudPvminstancesSnapshotsPostAccepted, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudPvminstancesSnapshotsPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.pvminstances.snapshots.post", + Method: "POST", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/snapshots", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudPvminstancesSnapshotsPostReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudPvminstancesSnapshotsPostAccepted), nil + +} + +/* +PcloudPvminstancesSnapshotsRestorePost restores a p VM instance snapshot +*/ +func (a *Client) PcloudPvminstancesSnapshotsRestorePost(params *PcloudPvminstancesSnapshotsRestorePostParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudPvminstancesSnapshotsRestorePostAccepted, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudPvminstancesSnapshotsRestorePostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.pvminstances.snapshots.restore.post", + Method: "POST", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/snapshots/{snapshot_id}/restore", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudPvminstancesSnapshotsRestorePostReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudPvminstancesSnapshotsRestorePostAccepted), nil + +} + +// SetTransport changes the transport on the client +func (a *Client) SetTransport(transport runtime.ClientTransport) { + a.transport = transport +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_action_post_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_action_post_parameters.go new file mode 100644 index 00000000000..bcff916925f --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_action_post_parameters.go @@ -0,0 +1,181 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_p_vm_instances + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// NewPcloudPvminstancesActionPostParams creates a new PcloudPvminstancesActionPostParams object +// with the default values initialized. +func NewPcloudPvminstancesActionPostParams() *PcloudPvminstancesActionPostParams { + var () + return &PcloudPvminstancesActionPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudPvminstancesActionPostParamsWithTimeout creates a new PcloudPvminstancesActionPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudPvminstancesActionPostParamsWithTimeout(timeout time.Duration) *PcloudPvminstancesActionPostParams { + var () + return &PcloudPvminstancesActionPostParams{ + + timeout: timeout, + } +} + +// NewPcloudPvminstancesActionPostParamsWithContext creates a new PcloudPvminstancesActionPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudPvminstancesActionPostParamsWithContext(ctx context.Context) *PcloudPvminstancesActionPostParams { + var () + return &PcloudPvminstancesActionPostParams{ + + Context: ctx, + } +} + +// NewPcloudPvminstancesActionPostParamsWithHTTPClient creates a new PcloudPvminstancesActionPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudPvminstancesActionPostParamsWithHTTPClient(client *http.Client) *PcloudPvminstancesActionPostParams { + var () + return &PcloudPvminstancesActionPostParams{ + HTTPClient: client, + } +} + +/*PcloudPvminstancesActionPostParams contains all the parameters to send to the API endpoint +for the pcloud pvminstances action post operation typically these are written to a http.Request +*/ +type PcloudPvminstancesActionPostParams struct { + + /*Body + Parameters for the desired action + + */ + Body *models.PVMInstanceAction + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + /*PvmInstanceID + PCloud PVM Instance ID + + */ + PvmInstanceID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud pvminstances action post params +func (o *PcloudPvminstancesActionPostParams) WithTimeout(timeout time.Duration) *PcloudPvminstancesActionPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud pvminstances action post params +func (o *PcloudPvminstancesActionPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud pvminstances action post params +func (o *PcloudPvminstancesActionPostParams) WithContext(ctx context.Context) *PcloudPvminstancesActionPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud pvminstances action post params +func (o *PcloudPvminstancesActionPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud pvminstances action post params +func (o *PcloudPvminstancesActionPostParams) WithHTTPClient(client *http.Client) *PcloudPvminstancesActionPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud pvminstances action post params +func (o *PcloudPvminstancesActionPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the pcloud pvminstances action post params +func (o *PcloudPvminstancesActionPostParams) WithBody(body *models.PVMInstanceAction) *PcloudPvminstancesActionPostParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the pcloud pvminstances action post params +func (o *PcloudPvminstancesActionPostParams) SetBody(body *models.PVMInstanceAction) { + o.Body = body +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud pvminstances action post params +func (o *PcloudPvminstancesActionPostParams) WithCloudInstanceID(cloudInstanceID string) *PcloudPvminstancesActionPostParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud pvminstances action post params +func (o *PcloudPvminstancesActionPostParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WithPvmInstanceID adds the pvmInstanceID to the pcloud pvminstances action post params +func (o *PcloudPvminstancesActionPostParams) WithPvmInstanceID(pvmInstanceID string) *PcloudPvminstancesActionPostParams { + o.SetPvmInstanceID(pvmInstanceID) + return o +} + +// SetPvmInstanceID adds the pvmInstanceId to the pcloud pvminstances action post params +func (o *PcloudPvminstancesActionPostParams) SetPvmInstanceID(pvmInstanceID string) { + o.PvmInstanceID = pvmInstanceID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudPvminstancesActionPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + // path param pvm_instance_id + if err := r.SetPathParam("pvm_instance_id", o.PvmInstanceID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_action_post_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_action_post_responses.go new file mode 100644 index 00000000000..45896fbfb3d --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_action_post_responses.go @@ -0,0 +1,173 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_p_vm_instances + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudPvminstancesActionPostReader is a Reader for the PcloudPvminstancesActionPost structure. +type PcloudPvminstancesActionPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudPvminstancesActionPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudPvminstancesActionPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudPvminstancesActionPostBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 404: + result := NewPcloudPvminstancesActionPostNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudPvminstancesActionPostInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudPvminstancesActionPostOK creates a PcloudPvminstancesActionPostOK with default headers values +func NewPcloudPvminstancesActionPostOK() *PcloudPvminstancesActionPostOK { + return &PcloudPvminstancesActionPostOK{} +} + +/*PcloudPvminstancesActionPostOK handles this case with default header values. + +OK +*/ +type PcloudPvminstancesActionPostOK struct { + Payload models.Object +} + +func (o *PcloudPvminstancesActionPostOK) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/action][%d] pcloudPvminstancesActionPostOK %+v", 200, o.Payload) +} + +func (o *PcloudPvminstancesActionPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesActionPostBadRequest creates a PcloudPvminstancesActionPostBadRequest with default headers values +func NewPcloudPvminstancesActionPostBadRequest() *PcloudPvminstancesActionPostBadRequest { + return &PcloudPvminstancesActionPostBadRequest{} +} + +/*PcloudPvminstancesActionPostBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudPvminstancesActionPostBadRequest struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesActionPostBadRequest) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/action][%d] pcloudPvminstancesActionPostBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudPvminstancesActionPostBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesActionPostNotFound creates a PcloudPvminstancesActionPostNotFound with default headers values +func NewPcloudPvminstancesActionPostNotFound() *PcloudPvminstancesActionPostNotFound { + return &PcloudPvminstancesActionPostNotFound{} +} + +/*PcloudPvminstancesActionPostNotFound handles this case with default header values. + +Not Found +*/ +type PcloudPvminstancesActionPostNotFound struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesActionPostNotFound) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/action][%d] pcloudPvminstancesActionPostNotFound %+v", 404, o.Payload) +} + +func (o *PcloudPvminstancesActionPostNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesActionPostInternalServerError creates a PcloudPvminstancesActionPostInternalServerError with default headers values +func NewPcloudPvminstancesActionPostInternalServerError() *PcloudPvminstancesActionPostInternalServerError { + return &PcloudPvminstancesActionPostInternalServerError{} +} + +/*PcloudPvminstancesActionPostInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudPvminstancesActionPostInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesActionPostInternalServerError) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/action][%d] pcloudPvminstancesActionPostInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudPvminstancesActionPostInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_capture_post_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_capture_post_parameters.go new file mode 100644 index 00000000000..1a73c2090a0 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_capture_post_parameters.go @@ -0,0 +1,181 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_p_vm_instances + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// NewPcloudPvminstancesCapturePostParams creates a new PcloudPvminstancesCapturePostParams object +// with the default values initialized. +func NewPcloudPvminstancesCapturePostParams() *PcloudPvminstancesCapturePostParams { + var () + return &PcloudPvminstancesCapturePostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudPvminstancesCapturePostParamsWithTimeout creates a new PcloudPvminstancesCapturePostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudPvminstancesCapturePostParamsWithTimeout(timeout time.Duration) *PcloudPvminstancesCapturePostParams { + var () + return &PcloudPvminstancesCapturePostParams{ + + timeout: timeout, + } +} + +// NewPcloudPvminstancesCapturePostParamsWithContext creates a new PcloudPvminstancesCapturePostParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudPvminstancesCapturePostParamsWithContext(ctx context.Context) *PcloudPvminstancesCapturePostParams { + var () + return &PcloudPvminstancesCapturePostParams{ + + Context: ctx, + } +} + +// NewPcloudPvminstancesCapturePostParamsWithHTTPClient creates a new PcloudPvminstancesCapturePostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudPvminstancesCapturePostParamsWithHTTPClient(client *http.Client) *PcloudPvminstancesCapturePostParams { + var () + return &PcloudPvminstancesCapturePostParams{ + HTTPClient: client, + } +} + +/*PcloudPvminstancesCapturePostParams contains all the parameters to send to the API endpoint +for the pcloud pvminstances capture post operation typically these are written to a http.Request +*/ +type PcloudPvminstancesCapturePostParams struct { + + /*Body + Parameters for the capture PVMInstance + + */ + Body *models.PVMInstanceCapture + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + /*PvmInstanceID + PCloud PVM Instance ID + + */ + PvmInstanceID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud pvminstances capture post params +func (o *PcloudPvminstancesCapturePostParams) WithTimeout(timeout time.Duration) *PcloudPvminstancesCapturePostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud pvminstances capture post params +func (o *PcloudPvminstancesCapturePostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud pvminstances capture post params +func (o *PcloudPvminstancesCapturePostParams) WithContext(ctx context.Context) *PcloudPvminstancesCapturePostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud pvminstances capture post params +func (o *PcloudPvminstancesCapturePostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud pvminstances capture post params +func (o *PcloudPvminstancesCapturePostParams) WithHTTPClient(client *http.Client) *PcloudPvminstancesCapturePostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud pvminstances capture post params +func (o *PcloudPvminstancesCapturePostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the pcloud pvminstances capture post params +func (o *PcloudPvminstancesCapturePostParams) WithBody(body *models.PVMInstanceCapture) *PcloudPvminstancesCapturePostParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the pcloud pvminstances capture post params +func (o *PcloudPvminstancesCapturePostParams) SetBody(body *models.PVMInstanceCapture) { + o.Body = body +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud pvminstances capture post params +func (o *PcloudPvminstancesCapturePostParams) WithCloudInstanceID(cloudInstanceID string) *PcloudPvminstancesCapturePostParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud pvminstances capture post params +func (o *PcloudPvminstancesCapturePostParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WithPvmInstanceID adds the pvmInstanceID to the pcloud pvminstances capture post params +func (o *PcloudPvminstancesCapturePostParams) WithPvmInstanceID(pvmInstanceID string) *PcloudPvminstancesCapturePostParams { + o.SetPvmInstanceID(pvmInstanceID) + return o +} + +// SetPvmInstanceID adds the pvmInstanceId to the pcloud pvminstances capture post params +func (o *PcloudPvminstancesCapturePostParams) SetPvmInstanceID(pvmInstanceID string) { + o.PvmInstanceID = pvmInstanceID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudPvminstancesCapturePostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + // path param pvm_instance_id + if err := r.SetPathParam("pvm_instance_id", o.PvmInstanceID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_capture_post_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_capture_post_responses.go new file mode 100644 index 00000000000..8974e1f5f32 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_capture_post_responses.go @@ -0,0 +1,135 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_p_vm_instances + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudPvminstancesCapturePostReader is a Reader for the PcloudPvminstancesCapturePost structure. +type PcloudPvminstancesCapturePostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudPvminstancesCapturePostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudPvminstancesCapturePostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 202: + result := NewPcloudPvminstancesCapturePostAccepted() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 500: + result := NewPcloudPvminstancesCapturePostInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudPvminstancesCapturePostOK creates a PcloudPvminstancesCapturePostOK with default headers values +func NewPcloudPvminstancesCapturePostOK() *PcloudPvminstancesCapturePostOK { + return &PcloudPvminstancesCapturePostOK{} +} + +/*PcloudPvminstancesCapturePostOK handles this case with default header values. + +OK +*/ +type PcloudPvminstancesCapturePostOK struct { + Payload models.Object +} + +func (o *PcloudPvminstancesCapturePostOK) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/capture][%d] pcloudPvminstancesCapturePostOK %+v", 200, o.Payload) +} + +func (o *PcloudPvminstancesCapturePostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesCapturePostAccepted creates a PcloudPvminstancesCapturePostAccepted with default headers values +func NewPcloudPvminstancesCapturePostAccepted() *PcloudPvminstancesCapturePostAccepted { + return &PcloudPvminstancesCapturePostAccepted{} +} + +/*PcloudPvminstancesCapturePostAccepted handles this case with default header values. + +Accepted, upload to cloud storage in progress +*/ +type PcloudPvminstancesCapturePostAccepted struct { + Payload models.Object +} + +func (o *PcloudPvminstancesCapturePostAccepted) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/capture][%d] pcloudPvminstancesCapturePostAccepted %+v", 202, o.Payload) +} + +func (o *PcloudPvminstancesCapturePostAccepted) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesCapturePostInternalServerError creates a PcloudPvminstancesCapturePostInternalServerError with default headers values +func NewPcloudPvminstancesCapturePostInternalServerError() *PcloudPvminstancesCapturePostInternalServerError { + return &PcloudPvminstancesCapturePostInternalServerError{} +} + +/*PcloudPvminstancesCapturePostInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudPvminstancesCapturePostInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesCapturePostInternalServerError) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/capture][%d] pcloudPvminstancesCapturePostInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudPvminstancesCapturePostInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_clone_post_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_clone_post_parameters.go new file mode 100644 index 00000000000..9c5e86bf2f7 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_clone_post_parameters.go @@ -0,0 +1,181 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_p_vm_instances + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// NewPcloudPvminstancesClonePostParams creates a new PcloudPvminstancesClonePostParams object +// with the default values initialized. +func NewPcloudPvminstancesClonePostParams() *PcloudPvminstancesClonePostParams { + var () + return &PcloudPvminstancesClonePostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudPvminstancesClonePostParamsWithTimeout creates a new PcloudPvminstancesClonePostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudPvminstancesClonePostParamsWithTimeout(timeout time.Duration) *PcloudPvminstancesClonePostParams { + var () + return &PcloudPvminstancesClonePostParams{ + + timeout: timeout, + } +} + +// NewPcloudPvminstancesClonePostParamsWithContext creates a new PcloudPvminstancesClonePostParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudPvminstancesClonePostParamsWithContext(ctx context.Context) *PcloudPvminstancesClonePostParams { + var () + return &PcloudPvminstancesClonePostParams{ + + Context: ctx, + } +} + +// NewPcloudPvminstancesClonePostParamsWithHTTPClient creates a new PcloudPvminstancesClonePostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudPvminstancesClonePostParamsWithHTTPClient(client *http.Client) *PcloudPvminstancesClonePostParams { + var () + return &PcloudPvminstancesClonePostParams{ + HTTPClient: client, + } +} + +/*PcloudPvminstancesClonePostParams contains all the parameters to send to the API endpoint +for the pcloud pvminstances clone post operation typically these are written to a http.Request +*/ +type PcloudPvminstancesClonePostParams struct { + + /*Body + Clone PVM Instance parameters + + */ + Body *models.PVMInstanceClone + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + /*PvmInstanceID + PCloud PVM Instance ID + + */ + PvmInstanceID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud pvminstances clone post params +func (o *PcloudPvminstancesClonePostParams) WithTimeout(timeout time.Duration) *PcloudPvminstancesClonePostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud pvminstances clone post params +func (o *PcloudPvminstancesClonePostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud pvminstances clone post params +func (o *PcloudPvminstancesClonePostParams) WithContext(ctx context.Context) *PcloudPvminstancesClonePostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud pvminstances clone post params +func (o *PcloudPvminstancesClonePostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud pvminstances clone post params +func (o *PcloudPvminstancesClonePostParams) WithHTTPClient(client *http.Client) *PcloudPvminstancesClonePostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud pvminstances clone post params +func (o *PcloudPvminstancesClonePostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the pcloud pvminstances clone post params +func (o *PcloudPvminstancesClonePostParams) WithBody(body *models.PVMInstanceClone) *PcloudPvminstancesClonePostParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the pcloud pvminstances clone post params +func (o *PcloudPvminstancesClonePostParams) SetBody(body *models.PVMInstanceClone) { + o.Body = body +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud pvminstances clone post params +func (o *PcloudPvminstancesClonePostParams) WithCloudInstanceID(cloudInstanceID string) *PcloudPvminstancesClonePostParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud pvminstances clone post params +func (o *PcloudPvminstancesClonePostParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WithPvmInstanceID adds the pvmInstanceID to the pcloud pvminstances clone post params +func (o *PcloudPvminstancesClonePostParams) WithPvmInstanceID(pvmInstanceID string) *PcloudPvminstancesClonePostParams { + o.SetPvmInstanceID(pvmInstanceID) + return o +} + +// SetPvmInstanceID adds the pvmInstanceId to the pcloud pvminstances clone post params +func (o *PcloudPvminstancesClonePostParams) SetPvmInstanceID(pvmInstanceID string) { + o.PvmInstanceID = pvmInstanceID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudPvminstancesClonePostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + // path param pvm_instance_id + if err := r.SetPathParam("pvm_instance_id", o.PvmInstanceID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_clone_post_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_clone_post_responses.go new file mode 100644 index 00000000000..52773217fef --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_clone_post_responses.go @@ -0,0 +1,211 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_p_vm_instances + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudPvminstancesClonePostReader is a Reader for the PcloudPvminstancesClonePost structure. +type PcloudPvminstancesClonePostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudPvminstancesClonePostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 202: + result := NewPcloudPvminstancesClonePostAccepted() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudPvminstancesClonePostBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 409: + result := NewPcloudPvminstancesClonePostConflict() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 422: + result := NewPcloudPvminstancesClonePostUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudPvminstancesClonePostInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudPvminstancesClonePostAccepted creates a PcloudPvminstancesClonePostAccepted with default headers values +func NewPcloudPvminstancesClonePostAccepted() *PcloudPvminstancesClonePostAccepted { + return &PcloudPvminstancesClonePostAccepted{} +} + +/*PcloudPvminstancesClonePostAccepted handles this case with default header values. + +Accepted +*/ +type PcloudPvminstancesClonePostAccepted struct { + Payload *models.PVMInstance +} + +func (o *PcloudPvminstancesClonePostAccepted) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/clone][%d] pcloudPvminstancesClonePostAccepted %+v", 202, o.Payload) +} + +func (o *PcloudPvminstancesClonePostAccepted) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.PVMInstance) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesClonePostBadRequest creates a PcloudPvminstancesClonePostBadRequest with default headers values +func NewPcloudPvminstancesClonePostBadRequest() *PcloudPvminstancesClonePostBadRequest { + return &PcloudPvminstancesClonePostBadRequest{} +} + +/*PcloudPvminstancesClonePostBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudPvminstancesClonePostBadRequest struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesClonePostBadRequest) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/clone][%d] pcloudPvminstancesClonePostBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudPvminstancesClonePostBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesClonePostConflict creates a PcloudPvminstancesClonePostConflict with default headers values +func NewPcloudPvminstancesClonePostConflict() *PcloudPvminstancesClonePostConflict { + return &PcloudPvminstancesClonePostConflict{} +} + +/*PcloudPvminstancesClonePostConflict handles this case with default header values. + +Conflict +*/ +type PcloudPvminstancesClonePostConflict struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesClonePostConflict) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/clone][%d] pcloudPvminstancesClonePostConflict %+v", 409, o.Payload) +} + +func (o *PcloudPvminstancesClonePostConflict) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesClonePostUnprocessableEntity creates a PcloudPvminstancesClonePostUnprocessableEntity with default headers values +func NewPcloudPvminstancesClonePostUnprocessableEntity() *PcloudPvminstancesClonePostUnprocessableEntity { + return &PcloudPvminstancesClonePostUnprocessableEntity{} +} + +/*PcloudPvminstancesClonePostUnprocessableEntity handles this case with default header values. + +Unprocessable Entity +*/ +type PcloudPvminstancesClonePostUnprocessableEntity struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesClonePostUnprocessableEntity) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/clone][%d] pcloudPvminstancesClonePostUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *PcloudPvminstancesClonePostUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesClonePostInternalServerError creates a PcloudPvminstancesClonePostInternalServerError with default headers values +func NewPcloudPvminstancesClonePostInternalServerError() *PcloudPvminstancesClonePostInternalServerError { + return &PcloudPvminstancesClonePostInternalServerError{} +} + +/*PcloudPvminstancesClonePostInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudPvminstancesClonePostInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesClonePostInternalServerError) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/clone][%d] pcloudPvminstancesClonePostInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudPvminstancesClonePostInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_console_post_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_console_post_parameters.go new file mode 100644 index 00000000000..d96110748ca --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_console_post_parameters.go @@ -0,0 +1,157 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_p_vm_instances + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewPcloudPvminstancesConsolePostParams creates a new PcloudPvminstancesConsolePostParams object +// with the default values initialized. +func NewPcloudPvminstancesConsolePostParams() *PcloudPvminstancesConsolePostParams { + var () + return &PcloudPvminstancesConsolePostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudPvminstancesConsolePostParamsWithTimeout creates a new PcloudPvminstancesConsolePostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudPvminstancesConsolePostParamsWithTimeout(timeout time.Duration) *PcloudPvminstancesConsolePostParams { + var () + return &PcloudPvminstancesConsolePostParams{ + + timeout: timeout, + } +} + +// NewPcloudPvminstancesConsolePostParamsWithContext creates a new PcloudPvminstancesConsolePostParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudPvminstancesConsolePostParamsWithContext(ctx context.Context) *PcloudPvminstancesConsolePostParams { + var () + return &PcloudPvminstancesConsolePostParams{ + + Context: ctx, + } +} + +// NewPcloudPvminstancesConsolePostParamsWithHTTPClient creates a new PcloudPvminstancesConsolePostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudPvminstancesConsolePostParamsWithHTTPClient(client *http.Client) *PcloudPvminstancesConsolePostParams { + var () + return &PcloudPvminstancesConsolePostParams{ + HTTPClient: client, + } +} + +/*PcloudPvminstancesConsolePostParams contains all the parameters to send to the API endpoint +for the pcloud pvminstances console post operation typically these are written to a http.Request +*/ +type PcloudPvminstancesConsolePostParams struct { + + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + /*PvmInstanceID + PCloud PVM Instance ID + + */ + PvmInstanceID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud pvminstances console post params +func (o *PcloudPvminstancesConsolePostParams) WithTimeout(timeout time.Duration) *PcloudPvminstancesConsolePostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud pvminstances console post params +func (o *PcloudPvminstancesConsolePostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud pvminstances console post params +func (o *PcloudPvminstancesConsolePostParams) WithContext(ctx context.Context) *PcloudPvminstancesConsolePostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud pvminstances console post params +func (o *PcloudPvminstancesConsolePostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud pvminstances console post params +func (o *PcloudPvminstancesConsolePostParams) WithHTTPClient(client *http.Client) *PcloudPvminstancesConsolePostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud pvminstances console post params +func (o *PcloudPvminstancesConsolePostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud pvminstances console post params +func (o *PcloudPvminstancesConsolePostParams) WithCloudInstanceID(cloudInstanceID string) *PcloudPvminstancesConsolePostParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud pvminstances console post params +func (o *PcloudPvminstancesConsolePostParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WithPvmInstanceID adds the pvmInstanceID to the pcloud pvminstances console post params +func (o *PcloudPvminstancesConsolePostParams) WithPvmInstanceID(pvmInstanceID string) *PcloudPvminstancesConsolePostParams { + o.SetPvmInstanceID(pvmInstanceID) + return o +} + +// SetPvmInstanceID adds the pvmInstanceId to the pcloud pvminstances console post params +func (o *PcloudPvminstancesConsolePostParams) SetPvmInstanceID(pvmInstanceID string) { + o.PvmInstanceID = pvmInstanceID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudPvminstancesConsolePostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + // path param pvm_instance_id + if err := r.SetPathParam("pvm_instance_id", o.PvmInstanceID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_console_post_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_console_post_responses.go new file mode 100644 index 00000000000..e5fca8af213 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_console_post_responses.go @@ -0,0 +1,139 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_p_vm_instances + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudPvminstancesConsolePostReader is a Reader for the PcloudPvminstancesConsolePost structure. +type PcloudPvminstancesConsolePostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudPvminstancesConsolePostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 201: + result := NewPcloudPvminstancesConsolePostCreated() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 422: + result := NewPcloudPvminstancesConsolePostUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudPvminstancesConsolePostInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudPvminstancesConsolePostCreated creates a PcloudPvminstancesConsolePostCreated with default headers values +func NewPcloudPvminstancesConsolePostCreated() *PcloudPvminstancesConsolePostCreated { + return &PcloudPvminstancesConsolePostCreated{} +} + +/*PcloudPvminstancesConsolePostCreated handles this case with default header values. + +Created +*/ +type PcloudPvminstancesConsolePostCreated struct { + Payload *models.PVMInstanceConsole +} + +func (o *PcloudPvminstancesConsolePostCreated) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/console][%d] pcloudPvminstancesConsolePostCreated %+v", 201, o.Payload) +} + +func (o *PcloudPvminstancesConsolePostCreated) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.PVMInstanceConsole) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesConsolePostUnprocessableEntity creates a PcloudPvminstancesConsolePostUnprocessableEntity with default headers values +func NewPcloudPvminstancesConsolePostUnprocessableEntity() *PcloudPvminstancesConsolePostUnprocessableEntity { + return &PcloudPvminstancesConsolePostUnprocessableEntity{} +} + +/*PcloudPvminstancesConsolePostUnprocessableEntity handles this case with default header values. + +Unprocessable Entity +*/ +type PcloudPvminstancesConsolePostUnprocessableEntity struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesConsolePostUnprocessableEntity) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/console][%d] pcloudPvminstancesConsolePostUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *PcloudPvminstancesConsolePostUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesConsolePostInternalServerError creates a PcloudPvminstancesConsolePostInternalServerError with default headers values +func NewPcloudPvminstancesConsolePostInternalServerError() *PcloudPvminstancesConsolePostInternalServerError { + return &PcloudPvminstancesConsolePostInternalServerError{} +} + +/*PcloudPvminstancesConsolePostInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudPvminstancesConsolePostInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesConsolePostInternalServerError) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/console][%d] pcloudPvminstancesConsolePostInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudPvminstancesConsolePostInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_delete_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_delete_parameters.go new file mode 100644 index 00000000000..4f6a9c5a403 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_delete_parameters.go @@ -0,0 +1,190 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_p_vm_instances + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/swag" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewPcloudPvminstancesDeleteParams creates a new PcloudPvminstancesDeleteParams object +// with the default values initialized. +func NewPcloudPvminstancesDeleteParams() *PcloudPvminstancesDeleteParams { + var () + return &PcloudPvminstancesDeleteParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudPvminstancesDeleteParamsWithTimeout creates a new PcloudPvminstancesDeleteParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudPvminstancesDeleteParamsWithTimeout(timeout time.Duration) *PcloudPvminstancesDeleteParams { + var () + return &PcloudPvminstancesDeleteParams{ + + timeout: timeout, + } +} + +// NewPcloudPvminstancesDeleteParamsWithContext creates a new PcloudPvminstancesDeleteParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudPvminstancesDeleteParamsWithContext(ctx context.Context) *PcloudPvminstancesDeleteParams { + var () + return &PcloudPvminstancesDeleteParams{ + + Context: ctx, + } +} + +// NewPcloudPvminstancesDeleteParamsWithHTTPClient creates a new PcloudPvminstancesDeleteParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudPvminstancesDeleteParamsWithHTTPClient(client *http.Client) *PcloudPvminstancesDeleteParams { + var () + return &PcloudPvminstancesDeleteParams{ + HTTPClient: client, + } +} + +/*PcloudPvminstancesDeleteParams contains all the parameters to send to the API endpoint +for the pcloud pvminstances delete operation typically these are written to a http.Request +*/ +type PcloudPvminstancesDeleteParams struct { + + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + /*DeleteDataVolumes + Indicates if all data volumes attached to the PVMInstance should be deleted when deleting the PVMInstance. Shared data volumes will be deleted if there are no other PVMInstances attached. + + */ + DeleteDataVolumes *bool + /*PvmInstanceID + PCloud PVM Instance ID + + */ + PvmInstanceID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud pvminstances delete params +func (o *PcloudPvminstancesDeleteParams) WithTimeout(timeout time.Duration) *PcloudPvminstancesDeleteParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud pvminstances delete params +func (o *PcloudPvminstancesDeleteParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud pvminstances delete params +func (o *PcloudPvminstancesDeleteParams) WithContext(ctx context.Context) *PcloudPvminstancesDeleteParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud pvminstances delete params +func (o *PcloudPvminstancesDeleteParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud pvminstances delete params +func (o *PcloudPvminstancesDeleteParams) WithHTTPClient(client *http.Client) *PcloudPvminstancesDeleteParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud pvminstances delete params +func (o *PcloudPvminstancesDeleteParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud pvminstances delete params +func (o *PcloudPvminstancesDeleteParams) WithCloudInstanceID(cloudInstanceID string) *PcloudPvminstancesDeleteParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud pvminstances delete params +func (o *PcloudPvminstancesDeleteParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WithDeleteDataVolumes adds the deleteDataVolumes to the pcloud pvminstances delete params +func (o *PcloudPvminstancesDeleteParams) WithDeleteDataVolumes(deleteDataVolumes *bool) *PcloudPvminstancesDeleteParams { + o.SetDeleteDataVolumes(deleteDataVolumes) + return o +} + +// SetDeleteDataVolumes adds the deleteDataVolumes to the pcloud pvminstances delete params +func (o *PcloudPvminstancesDeleteParams) SetDeleteDataVolumes(deleteDataVolumes *bool) { + o.DeleteDataVolumes = deleteDataVolumes +} + +// WithPvmInstanceID adds the pvmInstanceID to the pcloud pvminstances delete params +func (o *PcloudPvminstancesDeleteParams) WithPvmInstanceID(pvmInstanceID string) *PcloudPvminstancesDeleteParams { + o.SetPvmInstanceID(pvmInstanceID) + return o +} + +// SetPvmInstanceID adds the pvmInstanceId to the pcloud pvminstances delete params +func (o *PcloudPvminstancesDeleteParams) SetPvmInstanceID(pvmInstanceID string) { + o.PvmInstanceID = pvmInstanceID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudPvminstancesDeleteParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + if o.DeleteDataVolumes != nil { + + // query param delete_data_volumes + var qrDeleteDataVolumes bool + if o.DeleteDataVolumes != nil { + qrDeleteDataVolumes = *o.DeleteDataVolumes + } + qDeleteDataVolumes := swag.FormatBool(qrDeleteDataVolumes) + if qDeleteDataVolumes != "" { + if err := r.SetQueryParam("delete_data_volumes", qDeleteDataVolumes); err != nil { + return err + } + } + + } + + // path param pvm_instance_id + if err := r.SetPathParam("pvm_instance_id", o.PvmInstanceID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_delete_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_delete_responses.go new file mode 100644 index 00000000000..fa64c53c133 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_delete_responses.go @@ -0,0 +1,209 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_p_vm_instances + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudPvminstancesDeleteReader is a Reader for the PcloudPvminstancesDelete structure. +type PcloudPvminstancesDeleteReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudPvminstancesDeleteReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudPvminstancesDeleteOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudPvminstancesDeleteBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 404: + result := NewPcloudPvminstancesDeleteNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 410: + result := NewPcloudPvminstancesDeleteGone() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudPvminstancesDeleteInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudPvminstancesDeleteOK creates a PcloudPvminstancesDeleteOK with default headers values +func NewPcloudPvminstancesDeleteOK() *PcloudPvminstancesDeleteOK { + return &PcloudPvminstancesDeleteOK{} +} + +/*PcloudPvminstancesDeleteOK handles this case with default header values. + +OK +*/ +type PcloudPvminstancesDeleteOK struct { + Payload models.Object +} + +func (o *PcloudPvminstancesDeleteOK) Error() string { + return fmt.Sprintf("[DELETE /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}][%d] pcloudPvminstancesDeleteOK %+v", 200, o.Payload) +} + +func (o *PcloudPvminstancesDeleteOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesDeleteBadRequest creates a PcloudPvminstancesDeleteBadRequest with default headers values +func NewPcloudPvminstancesDeleteBadRequest() *PcloudPvminstancesDeleteBadRequest { + return &PcloudPvminstancesDeleteBadRequest{} +} + +/*PcloudPvminstancesDeleteBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudPvminstancesDeleteBadRequest struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesDeleteBadRequest) Error() string { + return fmt.Sprintf("[DELETE /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}][%d] pcloudPvminstancesDeleteBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudPvminstancesDeleteBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesDeleteNotFound creates a PcloudPvminstancesDeleteNotFound with default headers values +func NewPcloudPvminstancesDeleteNotFound() *PcloudPvminstancesDeleteNotFound { + return &PcloudPvminstancesDeleteNotFound{} +} + +/*PcloudPvminstancesDeleteNotFound handles this case with default header values. + +Not Found +*/ +type PcloudPvminstancesDeleteNotFound struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesDeleteNotFound) Error() string { + return fmt.Sprintf("[DELETE /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}][%d] pcloudPvminstancesDeleteNotFound %+v", 404, o.Payload) +} + +func (o *PcloudPvminstancesDeleteNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesDeleteGone creates a PcloudPvminstancesDeleteGone with default headers values +func NewPcloudPvminstancesDeleteGone() *PcloudPvminstancesDeleteGone { + return &PcloudPvminstancesDeleteGone{} +} + +/*PcloudPvminstancesDeleteGone handles this case with default header values. + +Gone +*/ +type PcloudPvminstancesDeleteGone struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesDeleteGone) Error() string { + return fmt.Sprintf("[DELETE /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}][%d] pcloudPvminstancesDeleteGone %+v", 410, o.Payload) +} + +func (o *PcloudPvminstancesDeleteGone) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesDeleteInternalServerError creates a PcloudPvminstancesDeleteInternalServerError with default headers values +func NewPcloudPvminstancesDeleteInternalServerError() *PcloudPvminstancesDeleteInternalServerError { + return &PcloudPvminstancesDeleteInternalServerError{} +} + +/*PcloudPvminstancesDeleteInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudPvminstancesDeleteInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesDeleteInternalServerError) Error() string { + return fmt.Sprintf("[DELETE /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}][%d] pcloudPvminstancesDeleteInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudPvminstancesDeleteInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_get_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_get_parameters.go new file mode 100644 index 00000000000..5badd59cab6 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_get_parameters.go @@ -0,0 +1,157 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_p_vm_instances + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewPcloudPvminstancesGetParams creates a new PcloudPvminstancesGetParams object +// with the default values initialized. +func NewPcloudPvminstancesGetParams() *PcloudPvminstancesGetParams { + var () + return &PcloudPvminstancesGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudPvminstancesGetParamsWithTimeout creates a new PcloudPvminstancesGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudPvminstancesGetParamsWithTimeout(timeout time.Duration) *PcloudPvminstancesGetParams { + var () + return &PcloudPvminstancesGetParams{ + + timeout: timeout, + } +} + +// NewPcloudPvminstancesGetParamsWithContext creates a new PcloudPvminstancesGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudPvminstancesGetParamsWithContext(ctx context.Context) *PcloudPvminstancesGetParams { + var () + return &PcloudPvminstancesGetParams{ + + Context: ctx, + } +} + +// NewPcloudPvminstancesGetParamsWithHTTPClient creates a new PcloudPvminstancesGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudPvminstancesGetParamsWithHTTPClient(client *http.Client) *PcloudPvminstancesGetParams { + var () + return &PcloudPvminstancesGetParams{ + HTTPClient: client, + } +} + +/*PcloudPvminstancesGetParams contains all the parameters to send to the API endpoint +for the pcloud pvminstances get operation typically these are written to a http.Request +*/ +type PcloudPvminstancesGetParams struct { + + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + /*PvmInstanceID + PCloud PVM Instance ID + + */ + PvmInstanceID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud pvminstances get params +func (o *PcloudPvminstancesGetParams) WithTimeout(timeout time.Duration) *PcloudPvminstancesGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud pvminstances get params +func (o *PcloudPvminstancesGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud pvminstances get params +func (o *PcloudPvminstancesGetParams) WithContext(ctx context.Context) *PcloudPvminstancesGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud pvminstances get params +func (o *PcloudPvminstancesGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud pvminstances get params +func (o *PcloudPvminstancesGetParams) WithHTTPClient(client *http.Client) *PcloudPvminstancesGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud pvminstances get params +func (o *PcloudPvminstancesGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud pvminstances get params +func (o *PcloudPvminstancesGetParams) WithCloudInstanceID(cloudInstanceID string) *PcloudPvminstancesGetParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud pvminstances get params +func (o *PcloudPvminstancesGetParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WithPvmInstanceID adds the pvmInstanceID to the pcloud pvminstances get params +func (o *PcloudPvminstancesGetParams) WithPvmInstanceID(pvmInstanceID string) *PcloudPvminstancesGetParams { + o.SetPvmInstanceID(pvmInstanceID) + return o +} + +// SetPvmInstanceID adds the pvmInstanceId to the pcloud pvminstances get params +func (o *PcloudPvminstancesGetParams) SetPvmInstanceID(pvmInstanceID string) { + o.PvmInstanceID = pvmInstanceID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudPvminstancesGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + // path param pvm_instance_id + if err := r.SetPathParam("pvm_instance_id", o.PvmInstanceID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_get_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_get_responses.go new file mode 100644 index 00000000000..4b7d77d8684 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_get_responses.go @@ -0,0 +1,175 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_p_vm_instances + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudPvminstancesGetReader is a Reader for the PcloudPvminstancesGet structure. +type PcloudPvminstancesGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudPvminstancesGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudPvminstancesGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudPvminstancesGetBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 404: + result := NewPcloudPvminstancesGetNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudPvminstancesGetInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudPvminstancesGetOK creates a PcloudPvminstancesGetOK with default headers values +func NewPcloudPvminstancesGetOK() *PcloudPvminstancesGetOK { + return &PcloudPvminstancesGetOK{} +} + +/*PcloudPvminstancesGetOK handles this case with default header values. + +OK +*/ +type PcloudPvminstancesGetOK struct { + Payload *models.PVMInstance +} + +func (o *PcloudPvminstancesGetOK) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}][%d] pcloudPvminstancesGetOK %+v", 200, o.Payload) +} + +func (o *PcloudPvminstancesGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.PVMInstance) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesGetBadRequest creates a PcloudPvminstancesGetBadRequest with default headers values +func NewPcloudPvminstancesGetBadRequest() *PcloudPvminstancesGetBadRequest { + return &PcloudPvminstancesGetBadRequest{} +} + +/*PcloudPvminstancesGetBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudPvminstancesGetBadRequest struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesGetBadRequest) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}][%d] pcloudPvminstancesGetBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudPvminstancesGetBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesGetNotFound creates a PcloudPvminstancesGetNotFound with default headers values +func NewPcloudPvminstancesGetNotFound() *PcloudPvminstancesGetNotFound { + return &PcloudPvminstancesGetNotFound{} +} + +/*PcloudPvminstancesGetNotFound handles this case with default header values. + +Not Found +*/ +type PcloudPvminstancesGetNotFound struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesGetNotFound) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}][%d] pcloudPvminstancesGetNotFound %+v", 404, o.Payload) +} + +func (o *PcloudPvminstancesGetNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesGetInternalServerError creates a PcloudPvminstancesGetInternalServerError with default headers values +func NewPcloudPvminstancesGetInternalServerError() *PcloudPvminstancesGetInternalServerError { + return &PcloudPvminstancesGetInternalServerError{} +} + +/*PcloudPvminstancesGetInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudPvminstancesGetInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesGetInternalServerError) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}][%d] pcloudPvminstancesGetInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudPvminstancesGetInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_getall_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_getall_parameters.go new file mode 100644 index 00000000000..3e530984189 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_getall_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_p_vm_instances + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewPcloudPvminstancesGetallParams creates a new PcloudPvminstancesGetallParams object +// with the default values initialized. +func NewPcloudPvminstancesGetallParams() *PcloudPvminstancesGetallParams { + var () + return &PcloudPvminstancesGetallParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudPvminstancesGetallParamsWithTimeout creates a new PcloudPvminstancesGetallParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudPvminstancesGetallParamsWithTimeout(timeout time.Duration) *PcloudPvminstancesGetallParams { + var () + return &PcloudPvminstancesGetallParams{ + + timeout: timeout, + } +} + +// NewPcloudPvminstancesGetallParamsWithContext creates a new PcloudPvminstancesGetallParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudPvminstancesGetallParamsWithContext(ctx context.Context) *PcloudPvminstancesGetallParams { + var () + return &PcloudPvminstancesGetallParams{ + + Context: ctx, + } +} + +// NewPcloudPvminstancesGetallParamsWithHTTPClient creates a new PcloudPvminstancesGetallParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudPvminstancesGetallParamsWithHTTPClient(client *http.Client) *PcloudPvminstancesGetallParams { + var () + return &PcloudPvminstancesGetallParams{ + HTTPClient: client, + } +} + +/*PcloudPvminstancesGetallParams contains all the parameters to send to the API endpoint +for the pcloud pvminstances getall operation typically these are written to a http.Request +*/ +type PcloudPvminstancesGetallParams struct { + + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud pvminstances getall params +func (o *PcloudPvminstancesGetallParams) WithTimeout(timeout time.Duration) *PcloudPvminstancesGetallParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud pvminstances getall params +func (o *PcloudPvminstancesGetallParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud pvminstances getall params +func (o *PcloudPvminstancesGetallParams) WithContext(ctx context.Context) *PcloudPvminstancesGetallParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud pvminstances getall params +func (o *PcloudPvminstancesGetallParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud pvminstances getall params +func (o *PcloudPvminstancesGetallParams) WithHTTPClient(client *http.Client) *PcloudPvminstancesGetallParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud pvminstances getall params +func (o *PcloudPvminstancesGetallParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud pvminstances getall params +func (o *PcloudPvminstancesGetallParams) WithCloudInstanceID(cloudInstanceID string) *PcloudPvminstancesGetallParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud pvminstances getall params +func (o *PcloudPvminstancesGetallParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudPvminstancesGetallParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_getall_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_getall_responses.go new file mode 100644 index 00000000000..334d18daf3f --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_getall_responses.go @@ -0,0 +1,139 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_p_vm_instances + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudPvminstancesGetallReader is a Reader for the PcloudPvminstancesGetall structure. +type PcloudPvminstancesGetallReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudPvminstancesGetallReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudPvminstancesGetallOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudPvminstancesGetallBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudPvminstancesGetallInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudPvminstancesGetallOK creates a PcloudPvminstancesGetallOK with default headers values +func NewPcloudPvminstancesGetallOK() *PcloudPvminstancesGetallOK { + return &PcloudPvminstancesGetallOK{} +} + +/*PcloudPvminstancesGetallOK handles this case with default header values. + +OK +*/ +type PcloudPvminstancesGetallOK struct { + Payload *models.PVMInstances +} + +func (o *PcloudPvminstancesGetallOK) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances][%d] pcloudPvminstancesGetallOK %+v", 200, o.Payload) +} + +func (o *PcloudPvminstancesGetallOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.PVMInstances) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesGetallBadRequest creates a PcloudPvminstancesGetallBadRequest with default headers values +func NewPcloudPvminstancesGetallBadRequest() *PcloudPvminstancesGetallBadRequest { + return &PcloudPvminstancesGetallBadRequest{} +} + +/*PcloudPvminstancesGetallBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudPvminstancesGetallBadRequest struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesGetallBadRequest) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances][%d] pcloudPvminstancesGetallBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudPvminstancesGetallBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesGetallInternalServerError creates a PcloudPvminstancesGetallInternalServerError with default headers values +func NewPcloudPvminstancesGetallInternalServerError() *PcloudPvminstancesGetallInternalServerError { + return &PcloudPvminstancesGetallInternalServerError{} +} + +/*PcloudPvminstancesGetallInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudPvminstancesGetallInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesGetallInternalServerError) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances][%d] pcloudPvminstancesGetallInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudPvminstancesGetallInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_networks_delete_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_networks_delete_parameters.go new file mode 100644 index 00000000000..8cc10093c8a --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_networks_delete_parameters.go @@ -0,0 +1,202 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_p_vm_instances + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// NewPcloudPvminstancesNetworksDeleteParams creates a new PcloudPvminstancesNetworksDeleteParams object +// with the default values initialized. +func NewPcloudPvminstancesNetworksDeleteParams() *PcloudPvminstancesNetworksDeleteParams { + var () + return &PcloudPvminstancesNetworksDeleteParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudPvminstancesNetworksDeleteParamsWithTimeout creates a new PcloudPvminstancesNetworksDeleteParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudPvminstancesNetworksDeleteParamsWithTimeout(timeout time.Duration) *PcloudPvminstancesNetworksDeleteParams { + var () + return &PcloudPvminstancesNetworksDeleteParams{ + + timeout: timeout, + } +} + +// NewPcloudPvminstancesNetworksDeleteParamsWithContext creates a new PcloudPvminstancesNetworksDeleteParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudPvminstancesNetworksDeleteParamsWithContext(ctx context.Context) *PcloudPvminstancesNetworksDeleteParams { + var () + return &PcloudPvminstancesNetworksDeleteParams{ + + Context: ctx, + } +} + +// NewPcloudPvminstancesNetworksDeleteParamsWithHTTPClient creates a new PcloudPvminstancesNetworksDeleteParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudPvminstancesNetworksDeleteParamsWithHTTPClient(client *http.Client) *PcloudPvminstancesNetworksDeleteParams { + var () + return &PcloudPvminstancesNetworksDeleteParams{ + HTTPClient: client, + } +} + +/*PcloudPvminstancesNetworksDeleteParams contains all the parameters to send to the API endpoint +for the pcloud pvminstances networks delete operation typically these are written to a http.Request +*/ +type PcloudPvminstancesNetworksDeleteParams struct { + + /*Body + Remove a network from PVM Instance parameters + + */ + Body *models.PVMInstanceRemoveNetwork + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + /*NetworkID + Network ID + + */ + NetworkID string + /*PvmInstanceID + PCloud PVM Instance ID + + */ + PvmInstanceID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud pvminstances networks delete params +func (o *PcloudPvminstancesNetworksDeleteParams) WithTimeout(timeout time.Duration) *PcloudPvminstancesNetworksDeleteParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud pvminstances networks delete params +func (o *PcloudPvminstancesNetworksDeleteParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud pvminstances networks delete params +func (o *PcloudPvminstancesNetworksDeleteParams) WithContext(ctx context.Context) *PcloudPvminstancesNetworksDeleteParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud pvminstances networks delete params +func (o *PcloudPvminstancesNetworksDeleteParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud pvminstances networks delete params +func (o *PcloudPvminstancesNetworksDeleteParams) WithHTTPClient(client *http.Client) *PcloudPvminstancesNetworksDeleteParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud pvminstances networks delete params +func (o *PcloudPvminstancesNetworksDeleteParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the pcloud pvminstances networks delete params +func (o *PcloudPvminstancesNetworksDeleteParams) WithBody(body *models.PVMInstanceRemoveNetwork) *PcloudPvminstancesNetworksDeleteParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the pcloud pvminstances networks delete params +func (o *PcloudPvminstancesNetworksDeleteParams) SetBody(body *models.PVMInstanceRemoveNetwork) { + o.Body = body +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud pvminstances networks delete params +func (o *PcloudPvminstancesNetworksDeleteParams) WithCloudInstanceID(cloudInstanceID string) *PcloudPvminstancesNetworksDeleteParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud pvminstances networks delete params +func (o *PcloudPvminstancesNetworksDeleteParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WithNetworkID adds the networkID to the pcloud pvminstances networks delete params +func (o *PcloudPvminstancesNetworksDeleteParams) WithNetworkID(networkID string) *PcloudPvminstancesNetworksDeleteParams { + o.SetNetworkID(networkID) + return o +} + +// SetNetworkID adds the networkId to the pcloud pvminstances networks delete params +func (o *PcloudPvminstancesNetworksDeleteParams) SetNetworkID(networkID string) { + o.NetworkID = networkID +} + +// WithPvmInstanceID adds the pvmInstanceID to the pcloud pvminstances networks delete params +func (o *PcloudPvminstancesNetworksDeleteParams) WithPvmInstanceID(pvmInstanceID string) *PcloudPvminstancesNetworksDeleteParams { + o.SetPvmInstanceID(pvmInstanceID) + return o +} + +// SetPvmInstanceID adds the pvmInstanceId to the pcloud pvminstances networks delete params +func (o *PcloudPvminstancesNetworksDeleteParams) SetPvmInstanceID(pvmInstanceID string) { + o.PvmInstanceID = pvmInstanceID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudPvminstancesNetworksDeleteParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + // path param network_id + if err := r.SetPathParam("network_id", o.NetworkID); err != nil { + return err + } + + // path param pvm_instance_id + if err := r.SetPathParam("pvm_instance_id", o.PvmInstanceID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_networks_delete_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_networks_delete_responses.go new file mode 100644 index 00000000000..06916c3b8ce --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_networks_delete_responses.go @@ -0,0 +1,173 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_p_vm_instances + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudPvminstancesNetworksDeleteReader is a Reader for the PcloudPvminstancesNetworksDelete structure. +type PcloudPvminstancesNetworksDeleteReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudPvminstancesNetworksDeleteReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudPvminstancesNetworksDeleteOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudPvminstancesNetworksDeleteBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 410: + result := NewPcloudPvminstancesNetworksDeleteGone() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudPvminstancesNetworksDeleteInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudPvminstancesNetworksDeleteOK creates a PcloudPvminstancesNetworksDeleteOK with default headers values +func NewPcloudPvminstancesNetworksDeleteOK() *PcloudPvminstancesNetworksDeleteOK { + return &PcloudPvminstancesNetworksDeleteOK{} +} + +/*PcloudPvminstancesNetworksDeleteOK handles this case with default header values. + +OK +*/ +type PcloudPvminstancesNetworksDeleteOK struct { + Payload models.Object +} + +func (o *PcloudPvminstancesNetworksDeleteOK) Error() string { + return fmt.Sprintf("[DELETE /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/networks/{network_id}][%d] pcloudPvminstancesNetworksDeleteOK %+v", 200, o.Payload) +} + +func (o *PcloudPvminstancesNetworksDeleteOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesNetworksDeleteBadRequest creates a PcloudPvminstancesNetworksDeleteBadRequest with default headers values +func NewPcloudPvminstancesNetworksDeleteBadRequest() *PcloudPvminstancesNetworksDeleteBadRequest { + return &PcloudPvminstancesNetworksDeleteBadRequest{} +} + +/*PcloudPvminstancesNetworksDeleteBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudPvminstancesNetworksDeleteBadRequest struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesNetworksDeleteBadRequest) Error() string { + return fmt.Sprintf("[DELETE /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/networks/{network_id}][%d] pcloudPvminstancesNetworksDeleteBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudPvminstancesNetworksDeleteBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesNetworksDeleteGone creates a PcloudPvminstancesNetworksDeleteGone with default headers values +func NewPcloudPvminstancesNetworksDeleteGone() *PcloudPvminstancesNetworksDeleteGone { + return &PcloudPvminstancesNetworksDeleteGone{} +} + +/*PcloudPvminstancesNetworksDeleteGone handles this case with default header values. + +Gone +*/ +type PcloudPvminstancesNetworksDeleteGone struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesNetworksDeleteGone) Error() string { + return fmt.Sprintf("[DELETE /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/networks/{network_id}][%d] pcloudPvminstancesNetworksDeleteGone %+v", 410, o.Payload) +} + +func (o *PcloudPvminstancesNetworksDeleteGone) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesNetworksDeleteInternalServerError creates a PcloudPvminstancesNetworksDeleteInternalServerError with default headers values +func NewPcloudPvminstancesNetworksDeleteInternalServerError() *PcloudPvminstancesNetworksDeleteInternalServerError { + return &PcloudPvminstancesNetworksDeleteInternalServerError{} +} + +/*PcloudPvminstancesNetworksDeleteInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudPvminstancesNetworksDeleteInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesNetworksDeleteInternalServerError) Error() string { + return fmt.Sprintf("[DELETE /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/networks/{network_id}][%d] pcloudPvminstancesNetworksDeleteInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudPvminstancesNetworksDeleteInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_networks_get_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_networks_get_parameters.go new file mode 100644 index 00000000000..98888850bb3 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_networks_get_parameters.go @@ -0,0 +1,178 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_p_vm_instances + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewPcloudPvminstancesNetworksGetParams creates a new PcloudPvminstancesNetworksGetParams object +// with the default values initialized. +func NewPcloudPvminstancesNetworksGetParams() *PcloudPvminstancesNetworksGetParams { + var () + return &PcloudPvminstancesNetworksGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudPvminstancesNetworksGetParamsWithTimeout creates a new PcloudPvminstancesNetworksGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudPvminstancesNetworksGetParamsWithTimeout(timeout time.Duration) *PcloudPvminstancesNetworksGetParams { + var () + return &PcloudPvminstancesNetworksGetParams{ + + timeout: timeout, + } +} + +// NewPcloudPvminstancesNetworksGetParamsWithContext creates a new PcloudPvminstancesNetworksGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudPvminstancesNetworksGetParamsWithContext(ctx context.Context) *PcloudPvminstancesNetworksGetParams { + var () + return &PcloudPvminstancesNetworksGetParams{ + + Context: ctx, + } +} + +// NewPcloudPvminstancesNetworksGetParamsWithHTTPClient creates a new PcloudPvminstancesNetworksGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudPvminstancesNetworksGetParamsWithHTTPClient(client *http.Client) *PcloudPvminstancesNetworksGetParams { + var () + return &PcloudPvminstancesNetworksGetParams{ + HTTPClient: client, + } +} + +/*PcloudPvminstancesNetworksGetParams contains all the parameters to send to the API endpoint +for the pcloud pvminstances networks get operation typically these are written to a http.Request +*/ +type PcloudPvminstancesNetworksGetParams struct { + + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + /*NetworkID + Network ID + + */ + NetworkID string + /*PvmInstanceID + PCloud PVM Instance ID + + */ + PvmInstanceID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud pvminstances networks get params +func (o *PcloudPvminstancesNetworksGetParams) WithTimeout(timeout time.Duration) *PcloudPvminstancesNetworksGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud pvminstances networks get params +func (o *PcloudPvminstancesNetworksGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud pvminstances networks get params +func (o *PcloudPvminstancesNetworksGetParams) WithContext(ctx context.Context) *PcloudPvminstancesNetworksGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud pvminstances networks get params +func (o *PcloudPvminstancesNetworksGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud pvminstances networks get params +func (o *PcloudPvminstancesNetworksGetParams) WithHTTPClient(client *http.Client) *PcloudPvminstancesNetworksGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud pvminstances networks get params +func (o *PcloudPvminstancesNetworksGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud pvminstances networks get params +func (o *PcloudPvminstancesNetworksGetParams) WithCloudInstanceID(cloudInstanceID string) *PcloudPvminstancesNetworksGetParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud pvminstances networks get params +func (o *PcloudPvminstancesNetworksGetParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WithNetworkID adds the networkID to the pcloud pvminstances networks get params +func (o *PcloudPvminstancesNetworksGetParams) WithNetworkID(networkID string) *PcloudPvminstancesNetworksGetParams { + o.SetNetworkID(networkID) + return o +} + +// SetNetworkID adds the networkId to the pcloud pvminstances networks get params +func (o *PcloudPvminstancesNetworksGetParams) SetNetworkID(networkID string) { + o.NetworkID = networkID +} + +// WithPvmInstanceID adds the pvmInstanceID to the pcloud pvminstances networks get params +func (o *PcloudPvminstancesNetworksGetParams) WithPvmInstanceID(pvmInstanceID string) *PcloudPvminstancesNetworksGetParams { + o.SetPvmInstanceID(pvmInstanceID) + return o +} + +// SetPvmInstanceID adds the pvmInstanceId to the pcloud pvminstances networks get params +func (o *PcloudPvminstancesNetworksGetParams) SetPvmInstanceID(pvmInstanceID string) { + o.PvmInstanceID = pvmInstanceID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudPvminstancesNetworksGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + // path param network_id + if err := r.SetPathParam("network_id", o.NetworkID); err != nil { + return err + } + + // path param pvm_instance_id + if err := r.SetPathParam("pvm_instance_id", o.PvmInstanceID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_networks_get_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_networks_get_responses.go new file mode 100644 index 00000000000..c5f0e555f09 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_networks_get_responses.go @@ -0,0 +1,139 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_p_vm_instances + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudPvminstancesNetworksGetReader is a Reader for the PcloudPvminstancesNetworksGet structure. +type PcloudPvminstancesNetworksGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudPvminstancesNetworksGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudPvminstancesNetworksGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 404: + result := NewPcloudPvminstancesNetworksGetNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudPvminstancesNetworksGetInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudPvminstancesNetworksGetOK creates a PcloudPvminstancesNetworksGetOK with default headers values +func NewPcloudPvminstancesNetworksGetOK() *PcloudPvminstancesNetworksGetOK { + return &PcloudPvminstancesNetworksGetOK{} +} + +/*PcloudPvminstancesNetworksGetOK handles this case with default header values. + +OK +*/ +type PcloudPvminstancesNetworksGetOK struct { + Payload *models.PVMInstanceNetworks +} + +func (o *PcloudPvminstancesNetworksGetOK) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/networks/{network_id}][%d] pcloudPvminstancesNetworksGetOK %+v", 200, o.Payload) +} + +func (o *PcloudPvminstancesNetworksGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.PVMInstanceNetworks) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesNetworksGetNotFound creates a PcloudPvminstancesNetworksGetNotFound with default headers values +func NewPcloudPvminstancesNetworksGetNotFound() *PcloudPvminstancesNetworksGetNotFound { + return &PcloudPvminstancesNetworksGetNotFound{} +} + +/*PcloudPvminstancesNetworksGetNotFound handles this case with default header values. + +Not Found +*/ +type PcloudPvminstancesNetworksGetNotFound struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesNetworksGetNotFound) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/networks/{network_id}][%d] pcloudPvminstancesNetworksGetNotFound %+v", 404, o.Payload) +} + +func (o *PcloudPvminstancesNetworksGetNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesNetworksGetInternalServerError creates a PcloudPvminstancesNetworksGetInternalServerError with default headers values +func NewPcloudPvminstancesNetworksGetInternalServerError() *PcloudPvminstancesNetworksGetInternalServerError { + return &PcloudPvminstancesNetworksGetInternalServerError{} +} + +/*PcloudPvminstancesNetworksGetInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudPvminstancesNetworksGetInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesNetworksGetInternalServerError) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/networks/{network_id}][%d] pcloudPvminstancesNetworksGetInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudPvminstancesNetworksGetInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_networks_getall_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_networks_getall_parameters.go new file mode 100644 index 00000000000..c3d716a349e --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_networks_getall_parameters.go @@ -0,0 +1,157 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_p_vm_instances + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewPcloudPvminstancesNetworksGetallParams creates a new PcloudPvminstancesNetworksGetallParams object +// with the default values initialized. +func NewPcloudPvminstancesNetworksGetallParams() *PcloudPvminstancesNetworksGetallParams { + var () + return &PcloudPvminstancesNetworksGetallParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudPvminstancesNetworksGetallParamsWithTimeout creates a new PcloudPvminstancesNetworksGetallParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudPvminstancesNetworksGetallParamsWithTimeout(timeout time.Duration) *PcloudPvminstancesNetworksGetallParams { + var () + return &PcloudPvminstancesNetworksGetallParams{ + + timeout: timeout, + } +} + +// NewPcloudPvminstancesNetworksGetallParamsWithContext creates a new PcloudPvminstancesNetworksGetallParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudPvminstancesNetworksGetallParamsWithContext(ctx context.Context) *PcloudPvminstancesNetworksGetallParams { + var () + return &PcloudPvminstancesNetworksGetallParams{ + + Context: ctx, + } +} + +// NewPcloudPvminstancesNetworksGetallParamsWithHTTPClient creates a new PcloudPvminstancesNetworksGetallParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudPvminstancesNetworksGetallParamsWithHTTPClient(client *http.Client) *PcloudPvminstancesNetworksGetallParams { + var () + return &PcloudPvminstancesNetworksGetallParams{ + HTTPClient: client, + } +} + +/*PcloudPvminstancesNetworksGetallParams contains all the parameters to send to the API endpoint +for the pcloud pvminstances networks getall operation typically these are written to a http.Request +*/ +type PcloudPvminstancesNetworksGetallParams struct { + + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + /*PvmInstanceID + PCloud PVM Instance ID + + */ + PvmInstanceID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud pvminstances networks getall params +func (o *PcloudPvminstancesNetworksGetallParams) WithTimeout(timeout time.Duration) *PcloudPvminstancesNetworksGetallParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud pvminstances networks getall params +func (o *PcloudPvminstancesNetworksGetallParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud pvminstances networks getall params +func (o *PcloudPvminstancesNetworksGetallParams) WithContext(ctx context.Context) *PcloudPvminstancesNetworksGetallParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud pvminstances networks getall params +func (o *PcloudPvminstancesNetworksGetallParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud pvminstances networks getall params +func (o *PcloudPvminstancesNetworksGetallParams) WithHTTPClient(client *http.Client) *PcloudPvminstancesNetworksGetallParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud pvminstances networks getall params +func (o *PcloudPvminstancesNetworksGetallParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud pvminstances networks getall params +func (o *PcloudPvminstancesNetworksGetallParams) WithCloudInstanceID(cloudInstanceID string) *PcloudPvminstancesNetworksGetallParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud pvminstances networks getall params +func (o *PcloudPvminstancesNetworksGetallParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WithPvmInstanceID adds the pvmInstanceID to the pcloud pvminstances networks getall params +func (o *PcloudPvminstancesNetworksGetallParams) WithPvmInstanceID(pvmInstanceID string) *PcloudPvminstancesNetworksGetallParams { + o.SetPvmInstanceID(pvmInstanceID) + return o +} + +// SetPvmInstanceID adds the pvmInstanceId to the pcloud pvminstances networks getall params +func (o *PcloudPvminstancesNetworksGetallParams) SetPvmInstanceID(pvmInstanceID string) { + o.PvmInstanceID = pvmInstanceID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudPvminstancesNetworksGetallParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + // path param pvm_instance_id + if err := r.SetPathParam("pvm_instance_id", o.PvmInstanceID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_networks_getall_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_networks_getall_responses.go new file mode 100644 index 00000000000..df7783e3046 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_networks_getall_responses.go @@ -0,0 +1,139 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_p_vm_instances + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudPvminstancesNetworksGetallReader is a Reader for the PcloudPvminstancesNetworksGetall structure. +type PcloudPvminstancesNetworksGetallReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudPvminstancesNetworksGetallReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudPvminstancesNetworksGetallOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudPvminstancesNetworksGetallBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudPvminstancesNetworksGetallInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudPvminstancesNetworksGetallOK creates a PcloudPvminstancesNetworksGetallOK with default headers values +func NewPcloudPvminstancesNetworksGetallOK() *PcloudPvminstancesNetworksGetallOK { + return &PcloudPvminstancesNetworksGetallOK{} +} + +/*PcloudPvminstancesNetworksGetallOK handles this case with default header values. + +OK +*/ +type PcloudPvminstancesNetworksGetallOK struct { + Payload *models.PVMInstanceNetworks +} + +func (o *PcloudPvminstancesNetworksGetallOK) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/networks][%d] pcloudPvminstancesNetworksGetallOK %+v", 200, o.Payload) +} + +func (o *PcloudPvminstancesNetworksGetallOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.PVMInstanceNetworks) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesNetworksGetallBadRequest creates a PcloudPvminstancesNetworksGetallBadRequest with default headers values +func NewPcloudPvminstancesNetworksGetallBadRequest() *PcloudPvminstancesNetworksGetallBadRequest { + return &PcloudPvminstancesNetworksGetallBadRequest{} +} + +/*PcloudPvminstancesNetworksGetallBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudPvminstancesNetworksGetallBadRequest struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesNetworksGetallBadRequest) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/networks][%d] pcloudPvminstancesNetworksGetallBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudPvminstancesNetworksGetallBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesNetworksGetallInternalServerError creates a PcloudPvminstancesNetworksGetallInternalServerError with default headers values +func NewPcloudPvminstancesNetworksGetallInternalServerError() *PcloudPvminstancesNetworksGetallInternalServerError { + return &PcloudPvminstancesNetworksGetallInternalServerError{} +} + +/*PcloudPvminstancesNetworksGetallInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudPvminstancesNetworksGetallInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesNetworksGetallInternalServerError) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/networks][%d] pcloudPvminstancesNetworksGetallInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudPvminstancesNetworksGetallInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_networks_post_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_networks_post_parameters.go new file mode 100644 index 00000000000..21e0976059d --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_networks_post_parameters.go @@ -0,0 +1,181 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_p_vm_instances + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// NewPcloudPvminstancesNetworksPostParams creates a new PcloudPvminstancesNetworksPostParams object +// with the default values initialized. +func NewPcloudPvminstancesNetworksPostParams() *PcloudPvminstancesNetworksPostParams { + var () + return &PcloudPvminstancesNetworksPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudPvminstancesNetworksPostParamsWithTimeout creates a new PcloudPvminstancesNetworksPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudPvminstancesNetworksPostParamsWithTimeout(timeout time.Duration) *PcloudPvminstancesNetworksPostParams { + var () + return &PcloudPvminstancesNetworksPostParams{ + + timeout: timeout, + } +} + +// NewPcloudPvminstancesNetworksPostParamsWithContext creates a new PcloudPvminstancesNetworksPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudPvminstancesNetworksPostParamsWithContext(ctx context.Context) *PcloudPvminstancesNetworksPostParams { + var () + return &PcloudPvminstancesNetworksPostParams{ + + Context: ctx, + } +} + +// NewPcloudPvminstancesNetworksPostParamsWithHTTPClient creates a new PcloudPvminstancesNetworksPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudPvminstancesNetworksPostParamsWithHTTPClient(client *http.Client) *PcloudPvminstancesNetworksPostParams { + var () + return &PcloudPvminstancesNetworksPostParams{ + HTTPClient: client, + } +} + +/*PcloudPvminstancesNetworksPostParams contains all the parameters to send to the API endpoint +for the pcloud pvminstances networks post operation typically these are written to a http.Request +*/ +type PcloudPvminstancesNetworksPostParams struct { + + /*Body + Add network to PVM Instance parameters + + */ + Body *models.PVMInstanceAddNetwork + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + /*PvmInstanceID + PCloud PVM Instance ID + + */ + PvmInstanceID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud pvminstances networks post params +func (o *PcloudPvminstancesNetworksPostParams) WithTimeout(timeout time.Duration) *PcloudPvminstancesNetworksPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud pvminstances networks post params +func (o *PcloudPvminstancesNetworksPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud pvminstances networks post params +func (o *PcloudPvminstancesNetworksPostParams) WithContext(ctx context.Context) *PcloudPvminstancesNetworksPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud pvminstances networks post params +func (o *PcloudPvminstancesNetworksPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud pvminstances networks post params +func (o *PcloudPvminstancesNetworksPostParams) WithHTTPClient(client *http.Client) *PcloudPvminstancesNetworksPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud pvminstances networks post params +func (o *PcloudPvminstancesNetworksPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the pcloud pvminstances networks post params +func (o *PcloudPvminstancesNetworksPostParams) WithBody(body *models.PVMInstanceAddNetwork) *PcloudPvminstancesNetworksPostParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the pcloud pvminstances networks post params +func (o *PcloudPvminstancesNetworksPostParams) SetBody(body *models.PVMInstanceAddNetwork) { + o.Body = body +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud pvminstances networks post params +func (o *PcloudPvminstancesNetworksPostParams) WithCloudInstanceID(cloudInstanceID string) *PcloudPvminstancesNetworksPostParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud pvminstances networks post params +func (o *PcloudPvminstancesNetworksPostParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WithPvmInstanceID adds the pvmInstanceID to the pcloud pvminstances networks post params +func (o *PcloudPvminstancesNetworksPostParams) WithPvmInstanceID(pvmInstanceID string) *PcloudPvminstancesNetworksPostParams { + o.SetPvmInstanceID(pvmInstanceID) + return o +} + +// SetPvmInstanceID adds the pvmInstanceId to the pcloud pvminstances networks post params +func (o *PcloudPvminstancesNetworksPostParams) SetPvmInstanceID(pvmInstanceID string) { + o.PvmInstanceID = pvmInstanceID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudPvminstancesNetworksPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + // path param pvm_instance_id + if err := r.SetPathParam("pvm_instance_id", o.PvmInstanceID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_networks_post_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_networks_post_responses.go new file mode 100644 index 00000000000..afc778fd122 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_networks_post_responses.go @@ -0,0 +1,211 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_p_vm_instances + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudPvminstancesNetworksPostReader is a Reader for the PcloudPvminstancesNetworksPost structure. +type PcloudPvminstancesNetworksPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudPvminstancesNetworksPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 201: + result := NewPcloudPvminstancesNetworksPostCreated() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudPvminstancesNetworksPostBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 409: + result := NewPcloudPvminstancesNetworksPostConflict() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 422: + result := NewPcloudPvminstancesNetworksPostUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudPvminstancesNetworksPostInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudPvminstancesNetworksPostCreated creates a PcloudPvminstancesNetworksPostCreated with default headers values +func NewPcloudPvminstancesNetworksPostCreated() *PcloudPvminstancesNetworksPostCreated { + return &PcloudPvminstancesNetworksPostCreated{} +} + +/*PcloudPvminstancesNetworksPostCreated handles this case with default header values. + +Created +*/ +type PcloudPvminstancesNetworksPostCreated struct { + Payload *models.PVMInstanceNetwork +} + +func (o *PcloudPvminstancesNetworksPostCreated) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/networks][%d] pcloudPvminstancesNetworksPostCreated %+v", 201, o.Payload) +} + +func (o *PcloudPvminstancesNetworksPostCreated) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.PVMInstanceNetwork) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesNetworksPostBadRequest creates a PcloudPvminstancesNetworksPostBadRequest with default headers values +func NewPcloudPvminstancesNetworksPostBadRequest() *PcloudPvminstancesNetworksPostBadRequest { + return &PcloudPvminstancesNetworksPostBadRequest{} +} + +/*PcloudPvminstancesNetworksPostBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudPvminstancesNetworksPostBadRequest struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesNetworksPostBadRequest) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/networks][%d] pcloudPvminstancesNetworksPostBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudPvminstancesNetworksPostBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesNetworksPostConflict creates a PcloudPvminstancesNetworksPostConflict with default headers values +func NewPcloudPvminstancesNetworksPostConflict() *PcloudPvminstancesNetworksPostConflict { + return &PcloudPvminstancesNetworksPostConflict{} +} + +/*PcloudPvminstancesNetworksPostConflict handles this case with default header values. + +Conflict +*/ +type PcloudPvminstancesNetworksPostConflict struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesNetworksPostConflict) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/networks][%d] pcloudPvminstancesNetworksPostConflict %+v", 409, o.Payload) +} + +func (o *PcloudPvminstancesNetworksPostConflict) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesNetworksPostUnprocessableEntity creates a PcloudPvminstancesNetworksPostUnprocessableEntity with default headers values +func NewPcloudPvminstancesNetworksPostUnprocessableEntity() *PcloudPvminstancesNetworksPostUnprocessableEntity { + return &PcloudPvminstancesNetworksPostUnprocessableEntity{} +} + +/*PcloudPvminstancesNetworksPostUnprocessableEntity handles this case with default header values. + +Unprocessable Entity +*/ +type PcloudPvminstancesNetworksPostUnprocessableEntity struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesNetworksPostUnprocessableEntity) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/networks][%d] pcloudPvminstancesNetworksPostUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *PcloudPvminstancesNetworksPostUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesNetworksPostInternalServerError creates a PcloudPvminstancesNetworksPostInternalServerError with default headers values +func NewPcloudPvminstancesNetworksPostInternalServerError() *PcloudPvminstancesNetworksPostInternalServerError { + return &PcloudPvminstancesNetworksPostInternalServerError{} +} + +/*PcloudPvminstancesNetworksPostInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudPvminstancesNetworksPostInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesNetworksPostInternalServerError) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/networks][%d] pcloudPvminstancesNetworksPostInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudPvminstancesNetworksPostInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_operations_post_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_operations_post_parameters.go new file mode 100644 index 00000000000..28041b31952 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_operations_post_parameters.go @@ -0,0 +1,181 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_p_vm_instances + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// NewPcloudPvminstancesOperationsPostParams creates a new PcloudPvminstancesOperationsPostParams object +// with the default values initialized. +func NewPcloudPvminstancesOperationsPostParams() *PcloudPvminstancesOperationsPostParams { + var () + return &PcloudPvminstancesOperationsPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudPvminstancesOperationsPostParamsWithTimeout creates a new PcloudPvminstancesOperationsPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudPvminstancesOperationsPostParamsWithTimeout(timeout time.Duration) *PcloudPvminstancesOperationsPostParams { + var () + return &PcloudPvminstancesOperationsPostParams{ + + timeout: timeout, + } +} + +// NewPcloudPvminstancesOperationsPostParamsWithContext creates a new PcloudPvminstancesOperationsPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudPvminstancesOperationsPostParamsWithContext(ctx context.Context) *PcloudPvminstancesOperationsPostParams { + var () + return &PcloudPvminstancesOperationsPostParams{ + + Context: ctx, + } +} + +// NewPcloudPvminstancesOperationsPostParamsWithHTTPClient creates a new PcloudPvminstancesOperationsPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudPvminstancesOperationsPostParamsWithHTTPClient(client *http.Client) *PcloudPvminstancesOperationsPostParams { + var () + return &PcloudPvminstancesOperationsPostParams{ + HTTPClient: client, + } +} + +/*PcloudPvminstancesOperationsPostParams contains all the parameters to send to the API endpoint +for the pcloud pvminstances operations post operation typically these are written to a http.Request +*/ +type PcloudPvminstancesOperationsPostParams struct { + + /*Body + Parameters for the desired operations + + */ + Body *models.PVMInstanceOperation + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + /*PvmInstanceID + PCloud PVM Instance ID + + */ + PvmInstanceID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud pvminstances operations post params +func (o *PcloudPvminstancesOperationsPostParams) WithTimeout(timeout time.Duration) *PcloudPvminstancesOperationsPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud pvminstances operations post params +func (o *PcloudPvminstancesOperationsPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud pvminstances operations post params +func (o *PcloudPvminstancesOperationsPostParams) WithContext(ctx context.Context) *PcloudPvminstancesOperationsPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud pvminstances operations post params +func (o *PcloudPvminstancesOperationsPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud pvminstances operations post params +func (o *PcloudPvminstancesOperationsPostParams) WithHTTPClient(client *http.Client) *PcloudPvminstancesOperationsPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud pvminstances operations post params +func (o *PcloudPvminstancesOperationsPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the pcloud pvminstances operations post params +func (o *PcloudPvminstancesOperationsPostParams) WithBody(body *models.PVMInstanceOperation) *PcloudPvminstancesOperationsPostParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the pcloud pvminstances operations post params +func (o *PcloudPvminstancesOperationsPostParams) SetBody(body *models.PVMInstanceOperation) { + o.Body = body +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud pvminstances operations post params +func (o *PcloudPvminstancesOperationsPostParams) WithCloudInstanceID(cloudInstanceID string) *PcloudPvminstancesOperationsPostParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud pvminstances operations post params +func (o *PcloudPvminstancesOperationsPostParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WithPvmInstanceID adds the pvmInstanceID to the pcloud pvminstances operations post params +func (o *PcloudPvminstancesOperationsPostParams) WithPvmInstanceID(pvmInstanceID string) *PcloudPvminstancesOperationsPostParams { + o.SetPvmInstanceID(pvmInstanceID) + return o +} + +// SetPvmInstanceID adds the pvmInstanceId to the pcloud pvminstances operations post params +func (o *PcloudPvminstancesOperationsPostParams) SetPvmInstanceID(pvmInstanceID string) { + o.PvmInstanceID = pvmInstanceID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudPvminstancesOperationsPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + // path param pvm_instance_id + if err := r.SetPathParam("pvm_instance_id", o.PvmInstanceID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_operations_post_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_operations_post_responses.go new file mode 100644 index 00000000000..dfa55ca6b69 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_operations_post_responses.go @@ -0,0 +1,209 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_p_vm_instances + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudPvminstancesOperationsPostReader is a Reader for the PcloudPvminstancesOperationsPost structure. +type PcloudPvminstancesOperationsPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudPvminstancesOperationsPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudPvminstancesOperationsPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudPvminstancesOperationsPostBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 404: + result := NewPcloudPvminstancesOperationsPostNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 422: + result := NewPcloudPvminstancesOperationsPostUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudPvminstancesOperationsPostInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudPvminstancesOperationsPostOK creates a PcloudPvminstancesOperationsPostOK with default headers values +func NewPcloudPvminstancesOperationsPostOK() *PcloudPvminstancesOperationsPostOK { + return &PcloudPvminstancesOperationsPostOK{} +} + +/*PcloudPvminstancesOperationsPostOK handles this case with default header values. + +OK +*/ +type PcloudPvminstancesOperationsPostOK struct { + Payload models.Object +} + +func (o *PcloudPvminstancesOperationsPostOK) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/operations][%d] pcloudPvminstancesOperationsPostOK %+v", 200, o.Payload) +} + +func (o *PcloudPvminstancesOperationsPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesOperationsPostBadRequest creates a PcloudPvminstancesOperationsPostBadRequest with default headers values +func NewPcloudPvminstancesOperationsPostBadRequest() *PcloudPvminstancesOperationsPostBadRequest { + return &PcloudPvminstancesOperationsPostBadRequest{} +} + +/*PcloudPvminstancesOperationsPostBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudPvminstancesOperationsPostBadRequest struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesOperationsPostBadRequest) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/operations][%d] pcloudPvminstancesOperationsPostBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudPvminstancesOperationsPostBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesOperationsPostNotFound creates a PcloudPvminstancesOperationsPostNotFound with default headers values +func NewPcloudPvminstancesOperationsPostNotFound() *PcloudPvminstancesOperationsPostNotFound { + return &PcloudPvminstancesOperationsPostNotFound{} +} + +/*PcloudPvminstancesOperationsPostNotFound handles this case with default header values. + +Not Found +*/ +type PcloudPvminstancesOperationsPostNotFound struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesOperationsPostNotFound) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/operations][%d] pcloudPvminstancesOperationsPostNotFound %+v", 404, o.Payload) +} + +func (o *PcloudPvminstancesOperationsPostNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesOperationsPostUnprocessableEntity creates a PcloudPvminstancesOperationsPostUnprocessableEntity with default headers values +func NewPcloudPvminstancesOperationsPostUnprocessableEntity() *PcloudPvminstancesOperationsPostUnprocessableEntity { + return &PcloudPvminstancesOperationsPostUnprocessableEntity{} +} + +/*PcloudPvminstancesOperationsPostUnprocessableEntity handles this case with default header values. + +Unprocessable Entity +*/ +type PcloudPvminstancesOperationsPostUnprocessableEntity struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesOperationsPostUnprocessableEntity) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/operations][%d] pcloudPvminstancesOperationsPostUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *PcloudPvminstancesOperationsPostUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesOperationsPostInternalServerError creates a PcloudPvminstancesOperationsPostInternalServerError with default headers values +func NewPcloudPvminstancesOperationsPostInternalServerError() *PcloudPvminstancesOperationsPostInternalServerError { + return &PcloudPvminstancesOperationsPostInternalServerError{} +} + +/*PcloudPvminstancesOperationsPostInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudPvminstancesOperationsPostInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesOperationsPostInternalServerError) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/operations][%d] pcloudPvminstancesOperationsPostInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudPvminstancesOperationsPostInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_post_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_post_parameters.go new file mode 100644 index 00000000000..3501f48eb05 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_post_parameters.go @@ -0,0 +1,193 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_p_vm_instances + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/swag" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// NewPcloudPvminstancesPostParams creates a new PcloudPvminstancesPostParams object +// with the default values initialized. +func NewPcloudPvminstancesPostParams() *PcloudPvminstancesPostParams { + var () + return &PcloudPvminstancesPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudPvminstancesPostParamsWithTimeout creates a new PcloudPvminstancesPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudPvminstancesPostParamsWithTimeout(timeout time.Duration) *PcloudPvminstancesPostParams { + var () + return &PcloudPvminstancesPostParams{ + + timeout: timeout, + } +} + +// NewPcloudPvminstancesPostParamsWithContext creates a new PcloudPvminstancesPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudPvminstancesPostParamsWithContext(ctx context.Context) *PcloudPvminstancesPostParams { + var () + return &PcloudPvminstancesPostParams{ + + Context: ctx, + } +} + +// NewPcloudPvminstancesPostParamsWithHTTPClient creates a new PcloudPvminstancesPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudPvminstancesPostParamsWithHTTPClient(client *http.Client) *PcloudPvminstancesPostParams { + var () + return &PcloudPvminstancesPostParams{ + HTTPClient: client, + } +} + +/*PcloudPvminstancesPostParams contains all the parameters to send to the API endpoint +for the pcloud pvminstances post operation typically these are written to a http.Request +*/ +type PcloudPvminstancesPostParams struct { + + /*Body + Parameters for the creation of a new tenant + + */ + Body *models.PVMInstanceCreate + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + /*SkipHostValidation + Option to skip host validation on PVMInstance Create API + + */ + SkipHostValidation *bool + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud pvminstances post params +func (o *PcloudPvminstancesPostParams) WithTimeout(timeout time.Duration) *PcloudPvminstancesPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud pvminstances post params +func (o *PcloudPvminstancesPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud pvminstances post params +func (o *PcloudPvminstancesPostParams) WithContext(ctx context.Context) *PcloudPvminstancesPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud pvminstances post params +func (o *PcloudPvminstancesPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud pvminstances post params +func (o *PcloudPvminstancesPostParams) WithHTTPClient(client *http.Client) *PcloudPvminstancesPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud pvminstances post params +func (o *PcloudPvminstancesPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the pcloud pvminstances post params +func (o *PcloudPvminstancesPostParams) WithBody(body *models.PVMInstanceCreate) *PcloudPvminstancesPostParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the pcloud pvminstances post params +func (o *PcloudPvminstancesPostParams) SetBody(body *models.PVMInstanceCreate) { + o.Body = body +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud pvminstances post params +func (o *PcloudPvminstancesPostParams) WithCloudInstanceID(cloudInstanceID string) *PcloudPvminstancesPostParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud pvminstances post params +func (o *PcloudPvminstancesPostParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WithSkipHostValidation adds the skipHostValidation to the pcloud pvminstances post params +func (o *PcloudPvminstancesPostParams) WithSkipHostValidation(skipHostValidation *bool) *PcloudPvminstancesPostParams { + o.SetSkipHostValidation(skipHostValidation) + return o +} + +// SetSkipHostValidation adds the skipHostValidation to the pcloud pvminstances post params +func (o *PcloudPvminstancesPostParams) SetSkipHostValidation(skipHostValidation *bool) { + o.SkipHostValidation = skipHostValidation +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudPvminstancesPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + if o.SkipHostValidation != nil { + + // query param skipHostValidation + var qrSkipHostValidation bool + if o.SkipHostValidation != nil { + qrSkipHostValidation = *o.SkipHostValidation + } + qSkipHostValidation := swag.FormatBool(qrSkipHostValidation) + if qSkipHostValidation != "" { + if err := r.SetQueryParam("skipHostValidation", qSkipHostValidation); err != nil { + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_post_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_post_responses.go new file mode 100644 index 00000000000..aba6a62e65a --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_post_responses.go @@ -0,0 +1,277 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_p_vm_instances + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudPvminstancesPostReader is a Reader for the PcloudPvminstancesPost structure. +type PcloudPvminstancesPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudPvminstancesPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudPvminstancesPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 201: + result := NewPcloudPvminstancesPostCreated() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 202: + result := NewPcloudPvminstancesPostAccepted() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudPvminstancesPostBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 409: + result := NewPcloudPvminstancesPostConflict() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 422: + result := NewPcloudPvminstancesPostUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudPvminstancesPostInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudPvminstancesPostOK creates a PcloudPvminstancesPostOK with default headers values +func NewPcloudPvminstancesPostOK() *PcloudPvminstancesPostOK { + return &PcloudPvminstancesPostOK{} +} + +/*PcloudPvminstancesPostOK handles this case with default header values. + +OK +*/ +type PcloudPvminstancesPostOK struct { + Payload models.PVMInstanceList +} + +func (o *PcloudPvminstancesPostOK) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances][%d] pcloudPvminstancesPostOK %+v", 200, o.Payload) +} + +func (o *PcloudPvminstancesPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesPostCreated creates a PcloudPvminstancesPostCreated with default headers values +func NewPcloudPvminstancesPostCreated() *PcloudPvminstancesPostCreated { + return &PcloudPvminstancesPostCreated{} +} + +/*PcloudPvminstancesPostCreated handles this case with default header values. + +Created +*/ +type PcloudPvminstancesPostCreated struct { + Payload models.PVMInstanceList +} + +func (o *PcloudPvminstancesPostCreated) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances][%d] pcloudPvminstancesPostCreated %+v", 201, o.Payload) +} + +func (o *PcloudPvminstancesPostCreated) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesPostAccepted creates a PcloudPvminstancesPostAccepted with default headers values +func NewPcloudPvminstancesPostAccepted() *PcloudPvminstancesPostAccepted { + return &PcloudPvminstancesPostAccepted{} +} + +/*PcloudPvminstancesPostAccepted handles this case with default header values. + +Accepted +*/ +type PcloudPvminstancesPostAccepted struct { + Payload models.PVMInstanceList +} + +func (o *PcloudPvminstancesPostAccepted) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances][%d] pcloudPvminstancesPostAccepted %+v", 202, o.Payload) +} + +func (o *PcloudPvminstancesPostAccepted) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesPostBadRequest creates a PcloudPvminstancesPostBadRequest with default headers values +func NewPcloudPvminstancesPostBadRequest() *PcloudPvminstancesPostBadRequest { + return &PcloudPvminstancesPostBadRequest{} +} + +/*PcloudPvminstancesPostBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudPvminstancesPostBadRequest struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesPostBadRequest) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances][%d] pcloudPvminstancesPostBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudPvminstancesPostBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesPostConflict creates a PcloudPvminstancesPostConflict with default headers values +func NewPcloudPvminstancesPostConflict() *PcloudPvminstancesPostConflict { + return &PcloudPvminstancesPostConflict{} +} + +/*PcloudPvminstancesPostConflict handles this case with default header values. + +Conflict +*/ +type PcloudPvminstancesPostConflict struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesPostConflict) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances][%d] pcloudPvminstancesPostConflict %+v", 409, o.Payload) +} + +func (o *PcloudPvminstancesPostConflict) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesPostUnprocessableEntity creates a PcloudPvminstancesPostUnprocessableEntity with default headers values +func NewPcloudPvminstancesPostUnprocessableEntity() *PcloudPvminstancesPostUnprocessableEntity { + return &PcloudPvminstancesPostUnprocessableEntity{} +} + +/*PcloudPvminstancesPostUnprocessableEntity handles this case with default header values. + +Unprocessable Entity +*/ +type PcloudPvminstancesPostUnprocessableEntity struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesPostUnprocessableEntity) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances][%d] pcloudPvminstancesPostUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *PcloudPvminstancesPostUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesPostInternalServerError creates a PcloudPvminstancesPostInternalServerError with default headers values +func NewPcloudPvminstancesPostInternalServerError() *PcloudPvminstancesPostInternalServerError { + return &PcloudPvminstancesPostInternalServerError{} +} + +/*PcloudPvminstancesPostInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudPvminstancesPostInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesPostInternalServerError) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances][%d] pcloudPvminstancesPostInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudPvminstancesPostInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_put_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_put_parameters.go new file mode 100644 index 00000000000..cf5ee58bed3 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_put_parameters.go @@ -0,0 +1,181 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_p_vm_instances + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// NewPcloudPvminstancesPutParams creates a new PcloudPvminstancesPutParams object +// with the default values initialized. +func NewPcloudPvminstancesPutParams() *PcloudPvminstancesPutParams { + var () + return &PcloudPvminstancesPutParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudPvminstancesPutParamsWithTimeout creates a new PcloudPvminstancesPutParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudPvminstancesPutParamsWithTimeout(timeout time.Duration) *PcloudPvminstancesPutParams { + var () + return &PcloudPvminstancesPutParams{ + + timeout: timeout, + } +} + +// NewPcloudPvminstancesPutParamsWithContext creates a new PcloudPvminstancesPutParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudPvminstancesPutParamsWithContext(ctx context.Context) *PcloudPvminstancesPutParams { + var () + return &PcloudPvminstancesPutParams{ + + Context: ctx, + } +} + +// NewPcloudPvminstancesPutParamsWithHTTPClient creates a new PcloudPvminstancesPutParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudPvminstancesPutParamsWithHTTPClient(client *http.Client) *PcloudPvminstancesPutParams { + var () + return &PcloudPvminstancesPutParams{ + HTTPClient: client, + } +} + +/*PcloudPvminstancesPutParams contains all the parameters to send to the API endpoint +for the pcloud pvminstances put operation typically these are written to a http.Request +*/ +type PcloudPvminstancesPutParams struct { + + /*Body + Parameters to update a PCloud PVM Instance + + */ + Body *models.PVMInstanceUpdate + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + /*PvmInstanceID + PCloud PVM Instance ID + + */ + PvmInstanceID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud pvminstances put params +func (o *PcloudPvminstancesPutParams) WithTimeout(timeout time.Duration) *PcloudPvminstancesPutParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud pvminstances put params +func (o *PcloudPvminstancesPutParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud pvminstances put params +func (o *PcloudPvminstancesPutParams) WithContext(ctx context.Context) *PcloudPvminstancesPutParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud pvminstances put params +func (o *PcloudPvminstancesPutParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud pvminstances put params +func (o *PcloudPvminstancesPutParams) WithHTTPClient(client *http.Client) *PcloudPvminstancesPutParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud pvminstances put params +func (o *PcloudPvminstancesPutParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the pcloud pvminstances put params +func (o *PcloudPvminstancesPutParams) WithBody(body *models.PVMInstanceUpdate) *PcloudPvminstancesPutParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the pcloud pvminstances put params +func (o *PcloudPvminstancesPutParams) SetBody(body *models.PVMInstanceUpdate) { + o.Body = body +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud pvminstances put params +func (o *PcloudPvminstancesPutParams) WithCloudInstanceID(cloudInstanceID string) *PcloudPvminstancesPutParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud pvminstances put params +func (o *PcloudPvminstancesPutParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WithPvmInstanceID adds the pvmInstanceID to the pcloud pvminstances put params +func (o *PcloudPvminstancesPutParams) WithPvmInstanceID(pvmInstanceID string) *PcloudPvminstancesPutParams { + o.SetPvmInstanceID(pvmInstanceID) + return o +} + +// SetPvmInstanceID adds the pvmInstanceId to the pcloud pvminstances put params +func (o *PcloudPvminstancesPutParams) SetPvmInstanceID(pvmInstanceID string) { + o.PvmInstanceID = pvmInstanceID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudPvminstancesPutParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + // path param pvm_instance_id + if err := r.SetPathParam("pvm_instance_id", o.PvmInstanceID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_put_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_put_responses.go new file mode 100644 index 00000000000..31aefdf0e6d --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_put_responses.go @@ -0,0 +1,175 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_p_vm_instances + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudPvminstancesPutReader is a Reader for the PcloudPvminstancesPut structure. +type PcloudPvminstancesPutReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudPvminstancesPutReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 202: + result := NewPcloudPvminstancesPutAccepted() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudPvminstancesPutBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 422: + result := NewPcloudPvminstancesPutUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudPvminstancesPutInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudPvminstancesPutAccepted creates a PcloudPvminstancesPutAccepted with default headers values +func NewPcloudPvminstancesPutAccepted() *PcloudPvminstancesPutAccepted { + return &PcloudPvminstancesPutAccepted{} +} + +/*PcloudPvminstancesPutAccepted handles this case with default header values. + +Accepted (this is a long running operation) +*/ +type PcloudPvminstancesPutAccepted struct { + Payload *models.PVMInstanceUpdateResponse +} + +func (o *PcloudPvminstancesPutAccepted) Error() string { + return fmt.Sprintf("[PUT /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}][%d] pcloudPvminstancesPutAccepted %+v", 202, o.Payload) +} + +func (o *PcloudPvminstancesPutAccepted) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.PVMInstanceUpdateResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesPutBadRequest creates a PcloudPvminstancesPutBadRequest with default headers values +func NewPcloudPvminstancesPutBadRequest() *PcloudPvminstancesPutBadRequest { + return &PcloudPvminstancesPutBadRequest{} +} + +/*PcloudPvminstancesPutBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudPvminstancesPutBadRequest struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesPutBadRequest) Error() string { + return fmt.Sprintf("[PUT /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}][%d] pcloudPvminstancesPutBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudPvminstancesPutBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesPutUnprocessableEntity creates a PcloudPvminstancesPutUnprocessableEntity with default headers values +func NewPcloudPvminstancesPutUnprocessableEntity() *PcloudPvminstancesPutUnprocessableEntity { + return &PcloudPvminstancesPutUnprocessableEntity{} +} + +/*PcloudPvminstancesPutUnprocessableEntity handles this case with default header values. + +Unprocessable Entity +*/ +type PcloudPvminstancesPutUnprocessableEntity struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesPutUnprocessableEntity) Error() string { + return fmt.Sprintf("[PUT /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}][%d] pcloudPvminstancesPutUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *PcloudPvminstancesPutUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesPutInternalServerError creates a PcloudPvminstancesPutInternalServerError with default headers values +func NewPcloudPvminstancesPutInternalServerError() *PcloudPvminstancesPutInternalServerError { + return &PcloudPvminstancesPutInternalServerError{} +} + +/*PcloudPvminstancesPutInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudPvminstancesPutInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesPutInternalServerError) Error() string { + return fmt.Sprintf("[PUT /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}][%d] pcloudPvminstancesPutInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudPvminstancesPutInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_snapshots_getall_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_snapshots_getall_parameters.go new file mode 100644 index 00000000000..de156083025 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_snapshots_getall_parameters.go @@ -0,0 +1,157 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_p_vm_instances + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewPcloudPvminstancesSnapshotsGetallParams creates a new PcloudPvminstancesSnapshotsGetallParams object +// with the default values initialized. +func NewPcloudPvminstancesSnapshotsGetallParams() *PcloudPvminstancesSnapshotsGetallParams { + var () + return &PcloudPvminstancesSnapshotsGetallParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudPvminstancesSnapshotsGetallParamsWithTimeout creates a new PcloudPvminstancesSnapshotsGetallParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudPvminstancesSnapshotsGetallParamsWithTimeout(timeout time.Duration) *PcloudPvminstancesSnapshotsGetallParams { + var () + return &PcloudPvminstancesSnapshotsGetallParams{ + + timeout: timeout, + } +} + +// NewPcloudPvminstancesSnapshotsGetallParamsWithContext creates a new PcloudPvminstancesSnapshotsGetallParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudPvminstancesSnapshotsGetallParamsWithContext(ctx context.Context) *PcloudPvminstancesSnapshotsGetallParams { + var () + return &PcloudPvminstancesSnapshotsGetallParams{ + + Context: ctx, + } +} + +// NewPcloudPvminstancesSnapshotsGetallParamsWithHTTPClient creates a new PcloudPvminstancesSnapshotsGetallParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudPvminstancesSnapshotsGetallParamsWithHTTPClient(client *http.Client) *PcloudPvminstancesSnapshotsGetallParams { + var () + return &PcloudPvminstancesSnapshotsGetallParams{ + HTTPClient: client, + } +} + +/*PcloudPvminstancesSnapshotsGetallParams contains all the parameters to send to the API endpoint +for the pcloud pvminstances snapshots getall operation typically these are written to a http.Request +*/ +type PcloudPvminstancesSnapshotsGetallParams struct { + + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + /*PvmInstanceID + PCloud PVM Instance ID + + */ + PvmInstanceID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud pvminstances snapshots getall params +func (o *PcloudPvminstancesSnapshotsGetallParams) WithTimeout(timeout time.Duration) *PcloudPvminstancesSnapshotsGetallParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud pvminstances snapshots getall params +func (o *PcloudPvminstancesSnapshotsGetallParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud pvminstances snapshots getall params +func (o *PcloudPvminstancesSnapshotsGetallParams) WithContext(ctx context.Context) *PcloudPvminstancesSnapshotsGetallParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud pvminstances snapshots getall params +func (o *PcloudPvminstancesSnapshotsGetallParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud pvminstances snapshots getall params +func (o *PcloudPvminstancesSnapshotsGetallParams) WithHTTPClient(client *http.Client) *PcloudPvminstancesSnapshotsGetallParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud pvminstances snapshots getall params +func (o *PcloudPvminstancesSnapshotsGetallParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud pvminstances snapshots getall params +func (o *PcloudPvminstancesSnapshotsGetallParams) WithCloudInstanceID(cloudInstanceID string) *PcloudPvminstancesSnapshotsGetallParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud pvminstances snapshots getall params +func (o *PcloudPvminstancesSnapshotsGetallParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WithPvmInstanceID adds the pvmInstanceID to the pcloud pvminstances snapshots getall params +func (o *PcloudPvminstancesSnapshotsGetallParams) WithPvmInstanceID(pvmInstanceID string) *PcloudPvminstancesSnapshotsGetallParams { + o.SetPvmInstanceID(pvmInstanceID) + return o +} + +// SetPvmInstanceID adds the pvmInstanceId to the pcloud pvminstances snapshots getall params +func (o *PcloudPvminstancesSnapshotsGetallParams) SetPvmInstanceID(pvmInstanceID string) { + o.PvmInstanceID = pvmInstanceID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudPvminstancesSnapshotsGetallParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + // path param pvm_instance_id + if err := r.SetPathParam("pvm_instance_id", o.PvmInstanceID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_snapshots_getall_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_snapshots_getall_responses.go new file mode 100644 index 00000000000..5b3a92355a0 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_snapshots_getall_responses.go @@ -0,0 +1,139 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_p_vm_instances + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudPvminstancesSnapshotsGetallReader is a Reader for the PcloudPvminstancesSnapshotsGetall structure. +type PcloudPvminstancesSnapshotsGetallReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudPvminstancesSnapshotsGetallReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudPvminstancesSnapshotsGetallOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudPvminstancesSnapshotsGetallBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudPvminstancesSnapshotsGetallInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudPvminstancesSnapshotsGetallOK creates a PcloudPvminstancesSnapshotsGetallOK with default headers values +func NewPcloudPvminstancesSnapshotsGetallOK() *PcloudPvminstancesSnapshotsGetallOK { + return &PcloudPvminstancesSnapshotsGetallOK{} +} + +/*PcloudPvminstancesSnapshotsGetallOK handles this case with default header values. + +OK +*/ +type PcloudPvminstancesSnapshotsGetallOK struct { + Payload *models.Snapshots +} + +func (o *PcloudPvminstancesSnapshotsGetallOK) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/snapshots][%d] pcloudPvminstancesSnapshotsGetallOK %+v", 200, o.Payload) +} + +func (o *PcloudPvminstancesSnapshotsGetallOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Snapshots) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesSnapshotsGetallBadRequest creates a PcloudPvminstancesSnapshotsGetallBadRequest with default headers values +func NewPcloudPvminstancesSnapshotsGetallBadRequest() *PcloudPvminstancesSnapshotsGetallBadRequest { + return &PcloudPvminstancesSnapshotsGetallBadRequest{} +} + +/*PcloudPvminstancesSnapshotsGetallBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudPvminstancesSnapshotsGetallBadRequest struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesSnapshotsGetallBadRequest) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/snapshots][%d] pcloudPvminstancesSnapshotsGetallBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudPvminstancesSnapshotsGetallBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesSnapshotsGetallInternalServerError creates a PcloudPvminstancesSnapshotsGetallInternalServerError with default headers values +func NewPcloudPvminstancesSnapshotsGetallInternalServerError() *PcloudPvminstancesSnapshotsGetallInternalServerError { + return &PcloudPvminstancesSnapshotsGetallInternalServerError{} +} + +/*PcloudPvminstancesSnapshotsGetallInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudPvminstancesSnapshotsGetallInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesSnapshotsGetallInternalServerError) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/snapshots][%d] pcloudPvminstancesSnapshotsGetallInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudPvminstancesSnapshotsGetallInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_snapshots_post_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_snapshots_post_parameters.go new file mode 100644 index 00000000000..512b492bc96 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_snapshots_post_parameters.go @@ -0,0 +1,181 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_p_vm_instances + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// NewPcloudPvminstancesSnapshotsPostParams creates a new PcloudPvminstancesSnapshotsPostParams object +// with the default values initialized. +func NewPcloudPvminstancesSnapshotsPostParams() *PcloudPvminstancesSnapshotsPostParams { + var () + return &PcloudPvminstancesSnapshotsPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudPvminstancesSnapshotsPostParamsWithTimeout creates a new PcloudPvminstancesSnapshotsPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudPvminstancesSnapshotsPostParamsWithTimeout(timeout time.Duration) *PcloudPvminstancesSnapshotsPostParams { + var () + return &PcloudPvminstancesSnapshotsPostParams{ + + timeout: timeout, + } +} + +// NewPcloudPvminstancesSnapshotsPostParamsWithContext creates a new PcloudPvminstancesSnapshotsPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudPvminstancesSnapshotsPostParamsWithContext(ctx context.Context) *PcloudPvminstancesSnapshotsPostParams { + var () + return &PcloudPvminstancesSnapshotsPostParams{ + + Context: ctx, + } +} + +// NewPcloudPvminstancesSnapshotsPostParamsWithHTTPClient creates a new PcloudPvminstancesSnapshotsPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudPvminstancesSnapshotsPostParamsWithHTTPClient(client *http.Client) *PcloudPvminstancesSnapshotsPostParams { + var () + return &PcloudPvminstancesSnapshotsPostParams{ + HTTPClient: client, + } +} + +/*PcloudPvminstancesSnapshotsPostParams contains all the parameters to send to the API endpoint +for the pcloud pvminstances snapshots post operation typically these are written to a http.Request +*/ +type PcloudPvminstancesSnapshotsPostParams struct { + + /*Body + PVM Instance snapshot create parameters + + */ + Body *models.SnapshotCreate + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + /*PvmInstanceID + PCloud PVM Instance ID + + */ + PvmInstanceID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud pvminstances snapshots post params +func (o *PcloudPvminstancesSnapshotsPostParams) WithTimeout(timeout time.Duration) *PcloudPvminstancesSnapshotsPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud pvminstances snapshots post params +func (o *PcloudPvminstancesSnapshotsPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud pvminstances snapshots post params +func (o *PcloudPvminstancesSnapshotsPostParams) WithContext(ctx context.Context) *PcloudPvminstancesSnapshotsPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud pvminstances snapshots post params +func (o *PcloudPvminstancesSnapshotsPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud pvminstances snapshots post params +func (o *PcloudPvminstancesSnapshotsPostParams) WithHTTPClient(client *http.Client) *PcloudPvminstancesSnapshotsPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud pvminstances snapshots post params +func (o *PcloudPvminstancesSnapshotsPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the pcloud pvminstances snapshots post params +func (o *PcloudPvminstancesSnapshotsPostParams) WithBody(body *models.SnapshotCreate) *PcloudPvminstancesSnapshotsPostParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the pcloud pvminstances snapshots post params +func (o *PcloudPvminstancesSnapshotsPostParams) SetBody(body *models.SnapshotCreate) { + o.Body = body +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud pvminstances snapshots post params +func (o *PcloudPvminstancesSnapshotsPostParams) WithCloudInstanceID(cloudInstanceID string) *PcloudPvminstancesSnapshotsPostParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud pvminstances snapshots post params +func (o *PcloudPvminstancesSnapshotsPostParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WithPvmInstanceID adds the pvmInstanceID to the pcloud pvminstances snapshots post params +func (o *PcloudPvminstancesSnapshotsPostParams) WithPvmInstanceID(pvmInstanceID string) *PcloudPvminstancesSnapshotsPostParams { + o.SetPvmInstanceID(pvmInstanceID) + return o +} + +// SetPvmInstanceID adds the pvmInstanceId to the pcloud pvminstances snapshots post params +func (o *PcloudPvminstancesSnapshotsPostParams) SetPvmInstanceID(pvmInstanceID string) { + o.PvmInstanceID = pvmInstanceID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudPvminstancesSnapshotsPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + // path param pvm_instance_id + if err := r.SetPathParam("pvm_instance_id", o.PvmInstanceID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_snapshots_post_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_snapshots_post_responses.go new file mode 100644 index 00000000000..3adea85a34b --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_snapshots_post_responses.go @@ -0,0 +1,175 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_p_vm_instances + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudPvminstancesSnapshotsPostReader is a Reader for the PcloudPvminstancesSnapshotsPost structure. +type PcloudPvminstancesSnapshotsPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudPvminstancesSnapshotsPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 202: + result := NewPcloudPvminstancesSnapshotsPostAccepted() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudPvminstancesSnapshotsPostBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 409: + result := NewPcloudPvminstancesSnapshotsPostConflict() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudPvminstancesSnapshotsPostInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudPvminstancesSnapshotsPostAccepted creates a PcloudPvminstancesSnapshotsPostAccepted with default headers values +func NewPcloudPvminstancesSnapshotsPostAccepted() *PcloudPvminstancesSnapshotsPostAccepted { + return &PcloudPvminstancesSnapshotsPostAccepted{} +} + +/*PcloudPvminstancesSnapshotsPostAccepted handles this case with default header values. + +Accepted +*/ +type PcloudPvminstancesSnapshotsPostAccepted struct { + Payload *models.SnapshotCreateResponse +} + +func (o *PcloudPvminstancesSnapshotsPostAccepted) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/snapshots][%d] pcloudPvminstancesSnapshotsPostAccepted %+v", 202, o.Payload) +} + +func (o *PcloudPvminstancesSnapshotsPostAccepted) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.SnapshotCreateResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesSnapshotsPostBadRequest creates a PcloudPvminstancesSnapshotsPostBadRequest with default headers values +func NewPcloudPvminstancesSnapshotsPostBadRequest() *PcloudPvminstancesSnapshotsPostBadRequest { + return &PcloudPvminstancesSnapshotsPostBadRequest{} +} + +/*PcloudPvminstancesSnapshotsPostBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudPvminstancesSnapshotsPostBadRequest struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesSnapshotsPostBadRequest) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/snapshots][%d] pcloudPvminstancesSnapshotsPostBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudPvminstancesSnapshotsPostBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesSnapshotsPostConflict creates a PcloudPvminstancesSnapshotsPostConflict with default headers values +func NewPcloudPvminstancesSnapshotsPostConflict() *PcloudPvminstancesSnapshotsPostConflict { + return &PcloudPvminstancesSnapshotsPostConflict{} +} + +/*PcloudPvminstancesSnapshotsPostConflict handles this case with default header values. + +Conflict +*/ +type PcloudPvminstancesSnapshotsPostConflict struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesSnapshotsPostConflict) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/snapshots][%d] pcloudPvminstancesSnapshotsPostConflict %+v", 409, o.Payload) +} + +func (o *PcloudPvminstancesSnapshotsPostConflict) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesSnapshotsPostInternalServerError creates a PcloudPvminstancesSnapshotsPostInternalServerError with default headers values +func NewPcloudPvminstancesSnapshotsPostInternalServerError() *PcloudPvminstancesSnapshotsPostInternalServerError { + return &PcloudPvminstancesSnapshotsPostInternalServerError{} +} + +/*PcloudPvminstancesSnapshotsPostInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudPvminstancesSnapshotsPostInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesSnapshotsPostInternalServerError) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/snapshots][%d] pcloudPvminstancesSnapshotsPostInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudPvminstancesSnapshotsPostInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_snapshots_restore_post_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_snapshots_restore_post_parameters.go new file mode 100644 index 00000000000..bbf792fb7c4 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_snapshots_restore_post_parameters.go @@ -0,0 +1,234 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_p_vm_instances + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// NewPcloudPvminstancesSnapshotsRestorePostParams creates a new PcloudPvminstancesSnapshotsRestorePostParams object +// with the default values initialized. +func NewPcloudPvminstancesSnapshotsRestorePostParams() *PcloudPvminstancesSnapshotsRestorePostParams { + var () + return &PcloudPvminstancesSnapshotsRestorePostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudPvminstancesSnapshotsRestorePostParamsWithTimeout creates a new PcloudPvminstancesSnapshotsRestorePostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudPvminstancesSnapshotsRestorePostParamsWithTimeout(timeout time.Duration) *PcloudPvminstancesSnapshotsRestorePostParams { + var () + return &PcloudPvminstancesSnapshotsRestorePostParams{ + + timeout: timeout, + } +} + +// NewPcloudPvminstancesSnapshotsRestorePostParamsWithContext creates a new PcloudPvminstancesSnapshotsRestorePostParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudPvminstancesSnapshotsRestorePostParamsWithContext(ctx context.Context) *PcloudPvminstancesSnapshotsRestorePostParams { + var () + return &PcloudPvminstancesSnapshotsRestorePostParams{ + + Context: ctx, + } +} + +// NewPcloudPvminstancesSnapshotsRestorePostParamsWithHTTPClient creates a new PcloudPvminstancesSnapshotsRestorePostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudPvminstancesSnapshotsRestorePostParamsWithHTTPClient(client *http.Client) *PcloudPvminstancesSnapshotsRestorePostParams { + var () + return &PcloudPvminstancesSnapshotsRestorePostParams{ + HTTPClient: client, + } +} + +/*PcloudPvminstancesSnapshotsRestorePostParams contains all the parameters to send to the API endpoint +for the pcloud pvminstances snapshots restore post operation typically these are written to a http.Request +*/ +type PcloudPvminstancesSnapshotsRestorePostParams struct { + + /*Body + PVM Instance snapshot restore parameters + + */ + Body *models.SnapshotRestore + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + /*PvmInstanceID + PCloud PVM Instance ID + + */ + PvmInstanceID string + /*RestoreFailAction + Action to take on a failed snapshot restore + + */ + RestoreFailAction *string + /*SnapshotID + PVM Instance snapshot id + + */ + SnapshotID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud pvminstances snapshots restore post params +func (o *PcloudPvminstancesSnapshotsRestorePostParams) WithTimeout(timeout time.Duration) *PcloudPvminstancesSnapshotsRestorePostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud pvminstances snapshots restore post params +func (o *PcloudPvminstancesSnapshotsRestorePostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud pvminstances snapshots restore post params +func (o *PcloudPvminstancesSnapshotsRestorePostParams) WithContext(ctx context.Context) *PcloudPvminstancesSnapshotsRestorePostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud pvminstances snapshots restore post params +func (o *PcloudPvminstancesSnapshotsRestorePostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud pvminstances snapshots restore post params +func (o *PcloudPvminstancesSnapshotsRestorePostParams) WithHTTPClient(client *http.Client) *PcloudPvminstancesSnapshotsRestorePostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud pvminstances snapshots restore post params +func (o *PcloudPvminstancesSnapshotsRestorePostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the pcloud pvminstances snapshots restore post params +func (o *PcloudPvminstancesSnapshotsRestorePostParams) WithBody(body *models.SnapshotRestore) *PcloudPvminstancesSnapshotsRestorePostParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the pcloud pvminstances snapshots restore post params +func (o *PcloudPvminstancesSnapshotsRestorePostParams) SetBody(body *models.SnapshotRestore) { + o.Body = body +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud pvminstances snapshots restore post params +func (o *PcloudPvminstancesSnapshotsRestorePostParams) WithCloudInstanceID(cloudInstanceID string) *PcloudPvminstancesSnapshotsRestorePostParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud pvminstances snapshots restore post params +func (o *PcloudPvminstancesSnapshotsRestorePostParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WithPvmInstanceID adds the pvmInstanceID to the pcloud pvminstances snapshots restore post params +func (o *PcloudPvminstancesSnapshotsRestorePostParams) WithPvmInstanceID(pvmInstanceID string) *PcloudPvminstancesSnapshotsRestorePostParams { + o.SetPvmInstanceID(pvmInstanceID) + return o +} + +// SetPvmInstanceID adds the pvmInstanceId to the pcloud pvminstances snapshots restore post params +func (o *PcloudPvminstancesSnapshotsRestorePostParams) SetPvmInstanceID(pvmInstanceID string) { + o.PvmInstanceID = pvmInstanceID +} + +// WithRestoreFailAction adds the restoreFailAction to the pcloud pvminstances snapshots restore post params +func (o *PcloudPvminstancesSnapshotsRestorePostParams) WithRestoreFailAction(restoreFailAction *string) *PcloudPvminstancesSnapshotsRestorePostParams { + o.SetRestoreFailAction(restoreFailAction) + return o +} + +// SetRestoreFailAction adds the restoreFailAction to the pcloud pvminstances snapshots restore post params +func (o *PcloudPvminstancesSnapshotsRestorePostParams) SetRestoreFailAction(restoreFailAction *string) { + o.RestoreFailAction = restoreFailAction +} + +// WithSnapshotID adds the snapshotID to the pcloud pvminstances snapshots restore post params +func (o *PcloudPvminstancesSnapshotsRestorePostParams) WithSnapshotID(snapshotID string) *PcloudPvminstancesSnapshotsRestorePostParams { + o.SetSnapshotID(snapshotID) + return o +} + +// SetSnapshotID adds the snapshotId to the pcloud pvminstances snapshots restore post params +func (o *PcloudPvminstancesSnapshotsRestorePostParams) SetSnapshotID(snapshotID string) { + o.SnapshotID = snapshotID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudPvminstancesSnapshotsRestorePostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + // path param pvm_instance_id + if err := r.SetPathParam("pvm_instance_id", o.PvmInstanceID); err != nil { + return err + } + + if o.RestoreFailAction != nil { + + // query param restore_fail_action + var qrRestoreFailAction string + if o.RestoreFailAction != nil { + qrRestoreFailAction = *o.RestoreFailAction + } + qRestoreFailAction := qrRestoreFailAction + if qRestoreFailAction != "" { + if err := r.SetQueryParam("restore_fail_action", qRestoreFailAction); err != nil { + return err + } + } + + } + + // path param snapshot_id + if err := r.SetPathParam("snapshot_id", o.SnapshotID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_snapshots_restore_post_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_snapshots_restore_post_responses.go new file mode 100644 index 00000000000..91029793160 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances/pcloud_pvminstances_snapshots_restore_post_responses.go @@ -0,0 +1,175 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_p_vm_instances + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudPvminstancesSnapshotsRestorePostReader is a Reader for the PcloudPvminstancesSnapshotsRestorePost structure. +type PcloudPvminstancesSnapshotsRestorePostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudPvminstancesSnapshotsRestorePostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 202: + result := NewPcloudPvminstancesSnapshotsRestorePostAccepted() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudPvminstancesSnapshotsRestorePostBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 409: + result := NewPcloudPvminstancesSnapshotsRestorePostConflict() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudPvminstancesSnapshotsRestorePostInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudPvminstancesSnapshotsRestorePostAccepted creates a PcloudPvminstancesSnapshotsRestorePostAccepted with default headers values +func NewPcloudPvminstancesSnapshotsRestorePostAccepted() *PcloudPvminstancesSnapshotsRestorePostAccepted { + return &PcloudPvminstancesSnapshotsRestorePostAccepted{} +} + +/*PcloudPvminstancesSnapshotsRestorePostAccepted handles this case with default header values. + +Accepted +*/ +type PcloudPvminstancesSnapshotsRestorePostAccepted struct { + Payload *models.Snapshot +} + +func (o *PcloudPvminstancesSnapshotsRestorePostAccepted) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/snapshots/{snapshot_id}/restore][%d] pcloudPvminstancesSnapshotsRestorePostAccepted %+v", 202, o.Payload) +} + +func (o *PcloudPvminstancesSnapshotsRestorePostAccepted) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Snapshot) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesSnapshotsRestorePostBadRequest creates a PcloudPvminstancesSnapshotsRestorePostBadRequest with default headers values +func NewPcloudPvminstancesSnapshotsRestorePostBadRequest() *PcloudPvminstancesSnapshotsRestorePostBadRequest { + return &PcloudPvminstancesSnapshotsRestorePostBadRequest{} +} + +/*PcloudPvminstancesSnapshotsRestorePostBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudPvminstancesSnapshotsRestorePostBadRequest struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesSnapshotsRestorePostBadRequest) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/snapshots/{snapshot_id}/restore][%d] pcloudPvminstancesSnapshotsRestorePostBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudPvminstancesSnapshotsRestorePostBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesSnapshotsRestorePostConflict creates a PcloudPvminstancesSnapshotsRestorePostConflict with default headers values +func NewPcloudPvminstancesSnapshotsRestorePostConflict() *PcloudPvminstancesSnapshotsRestorePostConflict { + return &PcloudPvminstancesSnapshotsRestorePostConflict{} +} + +/*PcloudPvminstancesSnapshotsRestorePostConflict handles this case with default header values. + +Conflict +*/ +type PcloudPvminstancesSnapshotsRestorePostConflict struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesSnapshotsRestorePostConflict) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/snapshots/{snapshot_id}/restore][%d] pcloudPvminstancesSnapshotsRestorePostConflict %+v", 409, o.Payload) +} + +func (o *PcloudPvminstancesSnapshotsRestorePostConflict) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesSnapshotsRestorePostInternalServerError creates a PcloudPvminstancesSnapshotsRestorePostInternalServerError with default headers values +func NewPcloudPvminstancesSnapshotsRestorePostInternalServerError() *PcloudPvminstancesSnapshotsRestorePostInternalServerError { + return &PcloudPvminstancesSnapshotsRestorePostInternalServerError{} +} + +/*PcloudPvminstancesSnapshotsRestorePostInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudPvminstancesSnapshotsRestorePostInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesSnapshotsRestorePostInternalServerError) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/snapshots/{snapshot_id}/restore][%d] pcloudPvminstancesSnapshotsRestorePostInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudPvminstancesSnapshotsRestorePostInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_s_a_p/p_cloud_s_a_p_client.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_s_a_p/p_cloud_s_a_p_client.go new file mode 100644 index 00000000000..a6a75e1440b --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_s_a_p/p_cloud_s_a_p_client.go @@ -0,0 +1,125 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_s_a_p + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" +) + +// New creates a new p cloud s a p API client. +func New(transport runtime.ClientTransport, formats strfmt.Registry) *Client { + return &Client{transport: transport, formats: formats} +} + +/* +Client for p cloud s a p API +*/ +type Client struct { + transport runtime.ClientTransport + formats strfmt.Registry +} + +/* +PcloudSapGet gets the information on an s a p profile +*/ +func (a *Client) PcloudSapGet(params *PcloudSapGetParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudSapGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudSapGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.sap.get", + Method: "GET", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/sap/{sap_profile_id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudSapGetReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudSapGetOK), nil + +} + +/* +PcloudSapGetall gets list of s a p profiles +*/ +func (a *Client) PcloudSapGetall(params *PcloudSapGetallParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudSapGetallOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudSapGetallParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.sap.getall", + Method: "GET", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/sap", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudSapGetallReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudSapGetallOK), nil + +} + +/* +PcloudSapPost creates a new s a p p VM instance +*/ +func (a *Client) PcloudSapPost(params *PcloudSapPostParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudSapPostOK, *PcloudSapPostCreated, *PcloudSapPostAccepted, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudSapPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.sap.post", + Method: "POST", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/sap", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudSapPostReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, nil, nil, err + } + switch value := result.(type) { + case *PcloudSapPostOK: + return value, nil, nil, nil + case *PcloudSapPostCreated: + return nil, value, nil, nil + case *PcloudSapPostAccepted: + return nil, nil, value, nil + } + return nil, nil, nil, nil + +} + +// SetTransport changes the transport on the client +func (a *Client) SetTransport(transport runtime.ClientTransport) { + a.transport = transport +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_s_a_p/pcloud_sap_get_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_s_a_p/pcloud_sap_get_parameters.go new file mode 100644 index 00000000000..72ed1f22556 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_s_a_p/pcloud_sap_get_parameters.go @@ -0,0 +1,157 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_s_a_p + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewPcloudSapGetParams creates a new PcloudSapGetParams object +// with the default values initialized. +func NewPcloudSapGetParams() *PcloudSapGetParams { + var () + return &PcloudSapGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudSapGetParamsWithTimeout creates a new PcloudSapGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudSapGetParamsWithTimeout(timeout time.Duration) *PcloudSapGetParams { + var () + return &PcloudSapGetParams{ + + timeout: timeout, + } +} + +// NewPcloudSapGetParamsWithContext creates a new PcloudSapGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudSapGetParamsWithContext(ctx context.Context) *PcloudSapGetParams { + var () + return &PcloudSapGetParams{ + + Context: ctx, + } +} + +// NewPcloudSapGetParamsWithHTTPClient creates a new PcloudSapGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudSapGetParamsWithHTTPClient(client *http.Client) *PcloudSapGetParams { + var () + return &PcloudSapGetParams{ + HTTPClient: client, + } +} + +/*PcloudSapGetParams contains all the parameters to send to the API endpoint +for the pcloud sap get operation typically these are written to a http.Request +*/ +type PcloudSapGetParams struct { + + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + /*SapProfileID + SAP Profile ID + + */ + SapProfileID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud sap get params +func (o *PcloudSapGetParams) WithTimeout(timeout time.Duration) *PcloudSapGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud sap get params +func (o *PcloudSapGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud sap get params +func (o *PcloudSapGetParams) WithContext(ctx context.Context) *PcloudSapGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud sap get params +func (o *PcloudSapGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud sap get params +func (o *PcloudSapGetParams) WithHTTPClient(client *http.Client) *PcloudSapGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud sap get params +func (o *PcloudSapGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud sap get params +func (o *PcloudSapGetParams) WithCloudInstanceID(cloudInstanceID string) *PcloudSapGetParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud sap get params +func (o *PcloudSapGetParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WithSapProfileID adds the sapProfileID to the pcloud sap get params +func (o *PcloudSapGetParams) WithSapProfileID(sapProfileID string) *PcloudSapGetParams { + o.SetSapProfileID(sapProfileID) + return o +} + +// SetSapProfileID adds the sapProfileId to the pcloud sap get params +func (o *PcloudSapGetParams) SetSapProfileID(sapProfileID string) { + o.SapProfileID = sapProfileID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudSapGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + // path param sap_profile_id + if err := r.SetPathParam("sap_profile_id", o.SapProfileID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_s_a_p/pcloud_sap_get_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_s_a_p/pcloud_sap_get_responses.go new file mode 100644 index 00000000000..d6362f7d357 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_s_a_p/pcloud_sap_get_responses.go @@ -0,0 +1,175 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_s_a_p + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudSapGetReader is a Reader for the PcloudSapGet structure. +type PcloudSapGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudSapGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudSapGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudSapGetBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 404: + result := NewPcloudSapGetNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudSapGetInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudSapGetOK creates a PcloudSapGetOK with default headers values +func NewPcloudSapGetOK() *PcloudSapGetOK { + return &PcloudSapGetOK{} +} + +/*PcloudSapGetOK handles this case with default header values. + +OK +*/ +type PcloudSapGetOK struct { + Payload *models.SAPProfile +} + +func (o *PcloudSapGetOK) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/sap/{sap_profile_id}][%d] pcloudSapGetOK %+v", 200, o.Payload) +} + +func (o *PcloudSapGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.SAPProfile) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudSapGetBadRequest creates a PcloudSapGetBadRequest with default headers values +func NewPcloudSapGetBadRequest() *PcloudSapGetBadRequest { + return &PcloudSapGetBadRequest{} +} + +/*PcloudSapGetBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudSapGetBadRequest struct { + Payload *models.Error +} + +func (o *PcloudSapGetBadRequest) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/sap/{sap_profile_id}][%d] pcloudSapGetBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudSapGetBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudSapGetNotFound creates a PcloudSapGetNotFound with default headers values +func NewPcloudSapGetNotFound() *PcloudSapGetNotFound { + return &PcloudSapGetNotFound{} +} + +/*PcloudSapGetNotFound handles this case with default header values. + +Not Found +*/ +type PcloudSapGetNotFound struct { + Payload *models.Error +} + +func (o *PcloudSapGetNotFound) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/sap/{sap_profile_id}][%d] pcloudSapGetNotFound %+v", 404, o.Payload) +} + +func (o *PcloudSapGetNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudSapGetInternalServerError creates a PcloudSapGetInternalServerError with default headers values +func NewPcloudSapGetInternalServerError() *PcloudSapGetInternalServerError { + return &PcloudSapGetInternalServerError{} +} + +/*PcloudSapGetInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudSapGetInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudSapGetInternalServerError) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/sap/{sap_profile_id}][%d] pcloudSapGetInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudSapGetInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_s_a_p/pcloud_sap_getall_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_s_a_p/pcloud_sap_getall_parameters.go new file mode 100644 index 00000000000..f68f39dd0e2 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_s_a_p/pcloud_sap_getall_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_s_a_p + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewPcloudSapGetallParams creates a new PcloudSapGetallParams object +// with the default values initialized. +func NewPcloudSapGetallParams() *PcloudSapGetallParams { + var () + return &PcloudSapGetallParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudSapGetallParamsWithTimeout creates a new PcloudSapGetallParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudSapGetallParamsWithTimeout(timeout time.Duration) *PcloudSapGetallParams { + var () + return &PcloudSapGetallParams{ + + timeout: timeout, + } +} + +// NewPcloudSapGetallParamsWithContext creates a new PcloudSapGetallParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudSapGetallParamsWithContext(ctx context.Context) *PcloudSapGetallParams { + var () + return &PcloudSapGetallParams{ + + Context: ctx, + } +} + +// NewPcloudSapGetallParamsWithHTTPClient creates a new PcloudSapGetallParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudSapGetallParamsWithHTTPClient(client *http.Client) *PcloudSapGetallParams { + var () + return &PcloudSapGetallParams{ + HTTPClient: client, + } +} + +/*PcloudSapGetallParams contains all the parameters to send to the API endpoint +for the pcloud sap getall operation typically these are written to a http.Request +*/ +type PcloudSapGetallParams struct { + + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud sap getall params +func (o *PcloudSapGetallParams) WithTimeout(timeout time.Duration) *PcloudSapGetallParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud sap getall params +func (o *PcloudSapGetallParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud sap getall params +func (o *PcloudSapGetallParams) WithContext(ctx context.Context) *PcloudSapGetallParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud sap getall params +func (o *PcloudSapGetallParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud sap getall params +func (o *PcloudSapGetallParams) WithHTTPClient(client *http.Client) *PcloudSapGetallParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud sap getall params +func (o *PcloudSapGetallParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud sap getall params +func (o *PcloudSapGetallParams) WithCloudInstanceID(cloudInstanceID string) *PcloudSapGetallParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud sap getall params +func (o *PcloudSapGetallParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudSapGetallParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_s_a_p/pcloud_sap_getall_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_s_a_p/pcloud_sap_getall_responses.go new file mode 100644 index 00000000000..e2cf74fc59f --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_s_a_p/pcloud_sap_getall_responses.go @@ -0,0 +1,139 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_s_a_p + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudSapGetallReader is a Reader for the PcloudSapGetall structure. +type PcloudSapGetallReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudSapGetallReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudSapGetallOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudSapGetallBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudSapGetallInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudSapGetallOK creates a PcloudSapGetallOK with default headers values +func NewPcloudSapGetallOK() *PcloudSapGetallOK { + return &PcloudSapGetallOK{} +} + +/*PcloudSapGetallOK handles this case with default header values. + +OK +*/ +type PcloudSapGetallOK struct { + Payload *models.SAPProfiles +} + +func (o *PcloudSapGetallOK) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/sap][%d] pcloudSapGetallOK %+v", 200, o.Payload) +} + +func (o *PcloudSapGetallOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.SAPProfiles) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudSapGetallBadRequest creates a PcloudSapGetallBadRequest with default headers values +func NewPcloudSapGetallBadRequest() *PcloudSapGetallBadRequest { + return &PcloudSapGetallBadRequest{} +} + +/*PcloudSapGetallBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudSapGetallBadRequest struct { + Payload *models.Error +} + +func (o *PcloudSapGetallBadRequest) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/sap][%d] pcloudSapGetallBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudSapGetallBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudSapGetallInternalServerError creates a PcloudSapGetallInternalServerError with default headers values +func NewPcloudSapGetallInternalServerError() *PcloudSapGetallInternalServerError { + return &PcloudSapGetallInternalServerError{} +} + +/*PcloudSapGetallInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudSapGetallInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudSapGetallInternalServerError) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/sap][%d] pcloudSapGetallInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudSapGetallInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_s_a_p/pcloud_sap_post_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_s_a_p/pcloud_sap_post_parameters.go new file mode 100644 index 00000000000..91eea914882 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_s_a_p/pcloud_sap_post_parameters.go @@ -0,0 +1,160 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_s_a_p + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// NewPcloudSapPostParams creates a new PcloudSapPostParams object +// with the default values initialized. +func NewPcloudSapPostParams() *PcloudSapPostParams { + var () + return &PcloudSapPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudSapPostParamsWithTimeout creates a new PcloudSapPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudSapPostParamsWithTimeout(timeout time.Duration) *PcloudSapPostParams { + var () + return &PcloudSapPostParams{ + + timeout: timeout, + } +} + +// NewPcloudSapPostParamsWithContext creates a new PcloudSapPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudSapPostParamsWithContext(ctx context.Context) *PcloudSapPostParams { + var () + return &PcloudSapPostParams{ + + Context: ctx, + } +} + +// NewPcloudSapPostParamsWithHTTPClient creates a new PcloudSapPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudSapPostParamsWithHTTPClient(client *http.Client) *PcloudSapPostParams { + var () + return &PcloudSapPostParams{ + HTTPClient: client, + } +} + +/*PcloudSapPostParams contains all the parameters to send to the API endpoint +for the pcloud sap post operation typically these are written to a http.Request +*/ +type PcloudSapPostParams struct { + + /*Body + Parameters for the creation of a new SAP PVM Instance + + */ + Body *models.SAPCreate + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud sap post params +func (o *PcloudSapPostParams) WithTimeout(timeout time.Duration) *PcloudSapPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud sap post params +func (o *PcloudSapPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud sap post params +func (o *PcloudSapPostParams) WithContext(ctx context.Context) *PcloudSapPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud sap post params +func (o *PcloudSapPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud sap post params +func (o *PcloudSapPostParams) WithHTTPClient(client *http.Client) *PcloudSapPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud sap post params +func (o *PcloudSapPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the pcloud sap post params +func (o *PcloudSapPostParams) WithBody(body *models.SAPCreate) *PcloudSapPostParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the pcloud sap post params +func (o *PcloudSapPostParams) SetBody(body *models.SAPCreate) { + o.Body = body +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud sap post params +func (o *PcloudSapPostParams) WithCloudInstanceID(cloudInstanceID string) *PcloudSapPostParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud sap post params +func (o *PcloudSapPostParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudSapPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_s_a_p/pcloud_sap_post_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_s_a_p/pcloud_sap_post_responses.go new file mode 100644 index 00000000000..d3951686b07 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_s_a_p/pcloud_sap_post_responses.go @@ -0,0 +1,277 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_s_a_p + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudSapPostReader is a Reader for the PcloudSapPost structure. +type PcloudSapPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudSapPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudSapPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 201: + result := NewPcloudSapPostCreated() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 202: + result := NewPcloudSapPostAccepted() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudSapPostBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 409: + result := NewPcloudSapPostConflict() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 422: + result := NewPcloudSapPostUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudSapPostInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudSapPostOK creates a PcloudSapPostOK with default headers values +func NewPcloudSapPostOK() *PcloudSapPostOK { + return &PcloudSapPostOK{} +} + +/*PcloudSapPostOK handles this case with default header values. + +OK +*/ +type PcloudSapPostOK struct { + Payload models.PVMInstanceList +} + +func (o *PcloudSapPostOK) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/sap][%d] pcloudSapPostOK %+v", 200, o.Payload) +} + +func (o *PcloudSapPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudSapPostCreated creates a PcloudSapPostCreated with default headers values +func NewPcloudSapPostCreated() *PcloudSapPostCreated { + return &PcloudSapPostCreated{} +} + +/*PcloudSapPostCreated handles this case with default header values. + +Created +*/ +type PcloudSapPostCreated struct { + Payload models.PVMInstanceList +} + +func (o *PcloudSapPostCreated) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/sap][%d] pcloudSapPostCreated %+v", 201, o.Payload) +} + +func (o *PcloudSapPostCreated) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudSapPostAccepted creates a PcloudSapPostAccepted with default headers values +func NewPcloudSapPostAccepted() *PcloudSapPostAccepted { + return &PcloudSapPostAccepted{} +} + +/*PcloudSapPostAccepted handles this case with default header values. + +Accepted +*/ +type PcloudSapPostAccepted struct { + Payload models.PVMInstanceList +} + +func (o *PcloudSapPostAccepted) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/sap][%d] pcloudSapPostAccepted %+v", 202, o.Payload) +} + +func (o *PcloudSapPostAccepted) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudSapPostBadRequest creates a PcloudSapPostBadRequest with default headers values +func NewPcloudSapPostBadRequest() *PcloudSapPostBadRequest { + return &PcloudSapPostBadRequest{} +} + +/*PcloudSapPostBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudSapPostBadRequest struct { + Payload *models.Error +} + +func (o *PcloudSapPostBadRequest) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/sap][%d] pcloudSapPostBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudSapPostBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudSapPostConflict creates a PcloudSapPostConflict with default headers values +func NewPcloudSapPostConflict() *PcloudSapPostConflict { + return &PcloudSapPostConflict{} +} + +/*PcloudSapPostConflict handles this case with default header values. + +Conflict +*/ +type PcloudSapPostConflict struct { + Payload *models.Error +} + +func (o *PcloudSapPostConflict) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/sap][%d] pcloudSapPostConflict %+v", 409, o.Payload) +} + +func (o *PcloudSapPostConflict) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudSapPostUnprocessableEntity creates a PcloudSapPostUnprocessableEntity with default headers values +func NewPcloudSapPostUnprocessableEntity() *PcloudSapPostUnprocessableEntity { + return &PcloudSapPostUnprocessableEntity{} +} + +/*PcloudSapPostUnprocessableEntity handles this case with default header values. + +Unprocessable Entity +*/ +type PcloudSapPostUnprocessableEntity struct { + Payload *models.Error +} + +func (o *PcloudSapPostUnprocessableEntity) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/sap][%d] pcloudSapPostUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *PcloudSapPostUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudSapPostInternalServerError creates a PcloudSapPostInternalServerError with default headers values +func NewPcloudSapPostInternalServerError() *PcloudSapPostInternalServerError { + return &PcloudSapPostInternalServerError{} +} + +/*PcloudSapPostInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudSapPostInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudSapPostInternalServerError) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/sap][%d] pcloudSapPostInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudSapPostInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_snapshots/p_cloud_snapshots_client.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_snapshots/p_cloud_snapshots_client.go new file mode 100644 index 00000000000..654163d37be --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_snapshots/p_cloud_snapshots_client.go @@ -0,0 +1,146 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_snapshots + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" +) + +// New creates a new p cloud snapshots API client. +func New(transport runtime.ClientTransport, formats strfmt.Registry) *Client { + return &Client{transport: transport, formats: formats} +} + +/* +Client for p cloud snapshots API +*/ +type Client struct { + transport runtime.ClientTransport + formats strfmt.Registry +} + +/* +PcloudCloudinstancesSnapshotsDelete deletes a p VM instance snapshot of a cloud instance +*/ +func (a *Client) PcloudCloudinstancesSnapshotsDelete(params *PcloudCloudinstancesSnapshotsDeleteParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudCloudinstancesSnapshotsDeleteAccepted, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudCloudinstancesSnapshotsDeleteParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.cloudinstances.snapshots.delete", + Method: "DELETE", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/snapshots/{snapshot_id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudCloudinstancesSnapshotsDeleteReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudCloudinstancesSnapshotsDeleteAccepted), nil + +} + +/* +PcloudCloudinstancesSnapshotsGet gets the detail of a snapshot +*/ +func (a *Client) PcloudCloudinstancesSnapshotsGet(params *PcloudCloudinstancesSnapshotsGetParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudCloudinstancesSnapshotsGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudCloudinstancesSnapshotsGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.cloudinstances.snapshots.get", + Method: "GET", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/snapshots/{snapshot_id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudCloudinstancesSnapshotsGetReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudCloudinstancesSnapshotsGetOK), nil + +} + +/* +PcloudCloudinstancesSnapshotsGetall lists all p VM instance snapshots for this cloud instance +*/ +func (a *Client) PcloudCloudinstancesSnapshotsGetall(params *PcloudCloudinstancesSnapshotsGetallParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudCloudinstancesSnapshotsGetallOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudCloudinstancesSnapshotsGetallParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.cloudinstances.snapshots.getall", + Method: "GET", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/snapshots", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudCloudinstancesSnapshotsGetallReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudCloudinstancesSnapshotsGetallOK), nil + +} + +/* +PcloudCloudinstancesSnapshotsPut updates a p VM instance snapshot +*/ +func (a *Client) PcloudCloudinstancesSnapshotsPut(params *PcloudCloudinstancesSnapshotsPutParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudCloudinstancesSnapshotsPutOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudCloudinstancesSnapshotsPutParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.cloudinstances.snapshots.put", + Method: "PUT", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/snapshots/{snapshot_id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudCloudinstancesSnapshotsPutReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudCloudinstancesSnapshotsPutOK), nil + +} + +// SetTransport changes the transport on the client +func (a *Client) SetTransport(transport runtime.ClientTransport) { + a.transport = transport +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_snapshots/pcloud_cloudinstances_snapshots_delete_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_snapshots/pcloud_cloudinstances_snapshots_delete_parameters.go new file mode 100644 index 00000000000..3845c354978 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_snapshots/pcloud_cloudinstances_snapshots_delete_parameters.go @@ -0,0 +1,157 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_snapshots + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewPcloudCloudinstancesSnapshotsDeleteParams creates a new PcloudCloudinstancesSnapshotsDeleteParams object +// with the default values initialized. +func NewPcloudCloudinstancesSnapshotsDeleteParams() *PcloudCloudinstancesSnapshotsDeleteParams { + var () + return &PcloudCloudinstancesSnapshotsDeleteParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudCloudinstancesSnapshotsDeleteParamsWithTimeout creates a new PcloudCloudinstancesSnapshotsDeleteParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudCloudinstancesSnapshotsDeleteParamsWithTimeout(timeout time.Duration) *PcloudCloudinstancesSnapshotsDeleteParams { + var () + return &PcloudCloudinstancesSnapshotsDeleteParams{ + + timeout: timeout, + } +} + +// NewPcloudCloudinstancesSnapshotsDeleteParamsWithContext creates a new PcloudCloudinstancesSnapshotsDeleteParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudCloudinstancesSnapshotsDeleteParamsWithContext(ctx context.Context) *PcloudCloudinstancesSnapshotsDeleteParams { + var () + return &PcloudCloudinstancesSnapshotsDeleteParams{ + + Context: ctx, + } +} + +// NewPcloudCloudinstancesSnapshotsDeleteParamsWithHTTPClient creates a new PcloudCloudinstancesSnapshotsDeleteParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudCloudinstancesSnapshotsDeleteParamsWithHTTPClient(client *http.Client) *PcloudCloudinstancesSnapshotsDeleteParams { + var () + return &PcloudCloudinstancesSnapshotsDeleteParams{ + HTTPClient: client, + } +} + +/*PcloudCloudinstancesSnapshotsDeleteParams contains all the parameters to send to the API endpoint +for the pcloud cloudinstances snapshots delete operation typically these are written to a http.Request +*/ +type PcloudCloudinstancesSnapshotsDeleteParams struct { + + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + /*SnapshotID + PVM Instance snapshot id + + */ + SnapshotID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud cloudinstances snapshots delete params +func (o *PcloudCloudinstancesSnapshotsDeleteParams) WithTimeout(timeout time.Duration) *PcloudCloudinstancesSnapshotsDeleteParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud cloudinstances snapshots delete params +func (o *PcloudCloudinstancesSnapshotsDeleteParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud cloudinstances snapshots delete params +func (o *PcloudCloudinstancesSnapshotsDeleteParams) WithContext(ctx context.Context) *PcloudCloudinstancesSnapshotsDeleteParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud cloudinstances snapshots delete params +func (o *PcloudCloudinstancesSnapshotsDeleteParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud cloudinstances snapshots delete params +func (o *PcloudCloudinstancesSnapshotsDeleteParams) WithHTTPClient(client *http.Client) *PcloudCloudinstancesSnapshotsDeleteParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud cloudinstances snapshots delete params +func (o *PcloudCloudinstancesSnapshotsDeleteParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud cloudinstances snapshots delete params +func (o *PcloudCloudinstancesSnapshotsDeleteParams) WithCloudInstanceID(cloudInstanceID string) *PcloudCloudinstancesSnapshotsDeleteParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud cloudinstances snapshots delete params +func (o *PcloudCloudinstancesSnapshotsDeleteParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WithSnapshotID adds the snapshotID to the pcloud cloudinstances snapshots delete params +func (o *PcloudCloudinstancesSnapshotsDeleteParams) WithSnapshotID(snapshotID string) *PcloudCloudinstancesSnapshotsDeleteParams { + o.SetSnapshotID(snapshotID) + return o +} + +// SetSnapshotID adds the snapshotId to the pcloud cloudinstances snapshots delete params +func (o *PcloudCloudinstancesSnapshotsDeleteParams) SetSnapshotID(snapshotID string) { + o.SnapshotID = snapshotID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudCloudinstancesSnapshotsDeleteParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + // path param snapshot_id + if err := r.SetPathParam("snapshot_id", o.SnapshotID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_snapshots/pcloud_cloudinstances_snapshots_delete_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_snapshots/pcloud_cloudinstances_snapshots_delete_responses.go new file mode 100644 index 00000000000..d87794e51ec --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_snapshots/pcloud_cloudinstances_snapshots_delete_responses.go @@ -0,0 +1,173 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_snapshots + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudCloudinstancesSnapshotsDeleteReader is a Reader for the PcloudCloudinstancesSnapshotsDelete structure. +type PcloudCloudinstancesSnapshotsDeleteReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudCloudinstancesSnapshotsDeleteReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 202: + result := NewPcloudCloudinstancesSnapshotsDeleteAccepted() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudCloudinstancesSnapshotsDeleteBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 410: + result := NewPcloudCloudinstancesSnapshotsDeleteGone() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudCloudinstancesSnapshotsDeleteInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudCloudinstancesSnapshotsDeleteAccepted creates a PcloudCloudinstancesSnapshotsDeleteAccepted with default headers values +func NewPcloudCloudinstancesSnapshotsDeleteAccepted() *PcloudCloudinstancesSnapshotsDeleteAccepted { + return &PcloudCloudinstancesSnapshotsDeleteAccepted{} +} + +/*PcloudCloudinstancesSnapshotsDeleteAccepted handles this case with default header values. + +Accepted +*/ +type PcloudCloudinstancesSnapshotsDeleteAccepted struct { + Payload models.Object +} + +func (o *PcloudCloudinstancesSnapshotsDeleteAccepted) Error() string { + return fmt.Sprintf("[DELETE /pcloud/v1/cloud-instances/{cloud_instance_id}/snapshots/{snapshot_id}][%d] pcloudCloudinstancesSnapshotsDeleteAccepted %+v", 202, o.Payload) +} + +func (o *PcloudCloudinstancesSnapshotsDeleteAccepted) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudinstancesSnapshotsDeleteBadRequest creates a PcloudCloudinstancesSnapshotsDeleteBadRequest with default headers values +func NewPcloudCloudinstancesSnapshotsDeleteBadRequest() *PcloudCloudinstancesSnapshotsDeleteBadRequest { + return &PcloudCloudinstancesSnapshotsDeleteBadRequest{} +} + +/*PcloudCloudinstancesSnapshotsDeleteBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudCloudinstancesSnapshotsDeleteBadRequest struct { + Payload *models.Error +} + +func (o *PcloudCloudinstancesSnapshotsDeleteBadRequest) Error() string { + return fmt.Sprintf("[DELETE /pcloud/v1/cloud-instances/{cloud_instance_id}/snapshots/{snapshot_id}][%d] pcloudCloudinstancesSnapshotsDeleteBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudCloudinstancesSnapshotsDeleteBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudinstancesSnapshotsDeleteGone creates a PcloudCloudinstancesSnapshotsDeleteGone with default headers values +func NewPcloudCloudinstancesSnapshotsDeleteGone() *PcloudCloudinstancesSnapshotsDeleteGone { + return &PcloudCloudinstancesSnapshotsDeleteGone{} +} + +/*PcloudCloudinstancesSnapshotsDeleteGone handles this case with default header values. + +Gone +*/ +type PcloudCloudinstancesSnapshotsDeleteGone struct { + Payload *models.Error +} + +func (o *PcloudCloudinstancesSnapshotsDeleteGone) Error() string { + return fmt.Sprintf("[DELETE /pcloud/v1/cloud-instances/{cloud_instance_id}/snapshots/{snapshot_id}][%d] pcloudCloudinstancesSnapshotsDeleteGone %+v", 410, o.Payload) +} + +func (o *PcloudCloudinstancesSnapshotsDeleteGone) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudinstancesSnapshotsDeleteInternalServerError creates a PcloudCloudinstancesSnapshotsDeleteInternalServerError with default headers values +func NewPcloudCloudinstancesSnapshotsDeleteInternalServerError() *PcloudCloudinstancesSnapshotsDeleteInternalServerError { + return &PcloudCloudinstancesSnapshotsDeleteInternalServerError{} +} + +/*PcloudCloudinstancesSnapshotsDeleteInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudCloudinstancesSnapshotsDeleteInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudCloudinstancesSnapshotsDeleteInternalServerError) Error() string { + return fmt.Sprintf("[DELETE /pcloud/v1/cloud-instances/{cloud_instance_id}/snapshots/{snapshot_id}][%d] pcloudCloudinstancesSnapshotsDeleteInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudCloudinstancesSnapshotsDeleteInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_snapshots/pcloud_cloudinstances_snapshots_get_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_snapshots/pcloud_cloudinstances_snapshots_get_parameters.go new file mode 100644 index 00000000000..d4805a80e27 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_snapshots/pcloud_cloudinstances_snapshots_get_parameters.go @@ -0,0 +1,157 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_snapshots + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewPcloudCloudinstancesSnapshotsGetParams creates a new PcloudCloudinstancesSnapshotsGetParams object +// with the default values initialized. +func NewPcloudCloudinstancesSnapshotsGetParams() *PcloudCloudinstancesSnapshotsGetParams { + var () + return &PcloudCloudinstancesSnapshotsGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudCloudinstancesSnapshotsGetParamsWithTimeout creates a new PcloudCloudinstancesSnapshotsGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudCloudinstancesSnapshotsGetParamsWithTimeout(timeout time.Duration) *PcloudCloudinstancesSnapshotsGetParams { + var () + return &PcloudCloudinstancesSnapshotsGetParams{ + + timeout: timeout, + } +} + +// NewPcloudCloudinstancesSnapshotsGetParamsWithContext creates a new PcloudCloudinstancesSnapshotsGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudCloudinstancesSnapshotsGetParamsWithContext(ctx context.Context) *PcloudCloudinstancesSnapshotsGetParams { + var () + return &PcloudCloudinstancesSnapshotsGetParams{ + + Context: ctx, + } +} + +// NewPcloudCloudinstancesSnapshotsGetParamsWithHTTPClient creates a new PcloudCloudinstancesSnapshotsGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudCloudinstancesSnapshotsGetParamsWithHTTPClient(client *http.Client) *PcloudCloudinstancesSnapshotsGetParams { + var () + return &PcloudCloudinstancesSnapshotsGetParams{ + HTTPClient: client, + } +} + +/*PcloudCloudinstancesSnapshotsGetParams contains all the parameters to send to the API endpoint +for the pcloud cloudinstances snapshots get operation typically these are written to a http.Request +*/ +type PcloudCloudinstancesSnapshotsGetParams struct { + + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + /*SnapshotID + PVM Instance snapshot id + + */ + SnapshotID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud cloudinstances snapshots get params +func (o *PcloudCloudinstancesSnapshotsGetParams) WithTimeout(timeout time.Duration) *PcloudCloudinstancesSnapshotsGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud cloudinstances snapshots get params +func (o *PcloudCloudinstancesSnapshotsGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud cloudinstances snapshots get params +func (o *PcloudCloudinstancesSnapshotsGetParams) WithContext(ctx context.Context) *PcloudCloudinstancesSnapshotsGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud cloudinstances snapshots get params +func (o *PcloudCloudinstancesSnapshotsGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud cloudinstances snapshots get params +func (o *PcloudCloudinstancesSnapshotsGetParams) WithHTTPClient(client *http.Client) *PcloudCloudinstancesSnapshotsGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud cloudinstances snapshots get params +func (o *PcloudCloudinstancesSnapshotsGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud cloudinstances snapshots get params +func (o *PcloudCloudinstancesSnapshotsGetParams) WithCloudInstanceID(cloudInstanceID string) *PcloudCloudinstancesSnapshotsGetParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud cloudinstances snapshots get params +func (o *PcloudCloudinstancesSnapshotsGetParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WithSnapshotID adds the snapshotID to the pcloud cloudinstances snapshots get params +func (o *PcloudCloudinstancesSnapshotsGetParams) WithSnapshotID(snapshotID string) *PcloudCloudinstancesSnapshotsGetParams { + o.SetSnapshotID(snapshotID) + return o +} + +// SetSnapshotID adds the snapshotId to the pcloud cloudinstances snapshots get params +func (o *PcloudCloudinstancesSnapshotsGetParams) SetSnapshotID(snapshotID string) { + o.SnapshotID = snapshotID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudCloudinstancesSnapshotsGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + // path param snapshot_id + if err := r.SetPathParam("snapshot_id", o.SnapshotID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_snapshots/pcloud_cloudinstances_snapshots_get_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_snapshots/pcloud_cloudinstances_snapshots_get_responses.go new file mode 100644 index 00000000000..57240c0b985 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_snapshots/pcloud_cloudinstances_snapshots_get_responses.go @@ -0,0 +1,175 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_snapshots + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudCloudinstancesSnapshotsGetReader is a Reader for the PcloudCloudinstancesSnapshotsGet structure. +type PcloudCloudinstancesSnapshotsGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudCloudinstancesSnapshotsGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudCloudinstancesSnapshotsGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudCloudinstancesSnapshotsGetBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 404: + result := NewPcloudCloudinstancesSnapshotsGetNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudCloudinstancesSnapshotsGetInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudCloudinstancesSnapshotsGetOK creates a PcloudCloudinstancesSnapshotsGetOK with default headers values +func NewPcloudCloudinstancesSnapshotsGetOK() *PcloudCloudinstancesSnapshotsGetOK { + return &PcloudCloudinstancesSnapshotsGetOK{} +} + +/*PcloudCloudinstancesSnapshotsGetOK handles this case with default header values. + +OK +*/ +type PcloudCloudinstancesSnapshotsGetOK struct { + Payload *models.Snapshot +} + +func (o *PcloudCloudinstancesSnapshotsGetOK) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/snapshots/{snapshot_id}][%d] pcloudCloudinstancesSnapshotsGetOK %+v", 200, o.Payload) +} + +func (o *PcloudCloudinstancesSnapshotsGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Snapshot) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudinstancesSnapshotsGetBadRequest creates a PcloudCloudinstancesSnapshotsGetBadRequest with default headers values +func NewPcloudCloudinstancesSnapshotsGetBadRequest() *PcloudCloudinstancesSnapshotsGetBadRequest { + return &PcloudCloudinstancesSnapshotsGetBadRequest{} +} + +/*PcloudCloudinstancesSnapshotsGetBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudCloudinstancesSnapshotsGetBadRequest struct { + Payload *models.Error +} + +func (o *PcloudCloudinstancesSnapshotsGetBadRequest) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/snapshots/{snapshot_id}][%d] pcloudCloudinstancesSnapshotsGetBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudCloudinstancesSnapshotsGetBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudinstancesSnapshotsGetNotFound creates a PcloudCloudinstancesSnapshotsGetNotFound with default headers values +func NewPcloudCloudinstancesSnapshotsGetNotFound() *PcloudCloudinstancesSnapshotsGetNotFound { + return &PcloudCloudinstancesSnapshotsGetNotFound{} +} + +/*PcloudCloudinstancesSnapshotsGetNotFound handles this case with default header values. + +Not Found +*/ +type PcloudCloudinstancesSnapshotsGetNotFound struct { + Payload *models.Error +} + +func (o *PcloudCloudinstancesSnapshotsGetNotFound) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/snapshots/{snapshot_id}][%d] pcloudCloudinstancesSnapshotsGetNotFound %+v", 404, o.Payload) +} + +func (o *PcloudCloudinstancesSnapshotsGetNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudinstancesSnapshotsGetInternalServerError creates a PcloudCloudinstancesSnapshotsGetInternalServerError with default headers values +func NewPcloudCloudinstancesSnapshotsGetInternalServerError() *PcloudCloudinstancesSnapshotsGetInternalServerError { + return &PcloudCloudinstancesSnapshotsGetInternalServerError{} +} + +/*PcloudCloudinstancesSnapshotsGetInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudCloudinstancesSnapshotsGetInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudCloudinstancesSnapshotsGetInternalServerError) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/snapshots/{snapshot_id}][%d] pcloudCloudinstancesSnapshotsGetInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudCloudinstancesSnapshotsGetInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_snapshots/pcloud_cloudinstances_snapshots_getall_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_snapshots/pcloud_cloudinstances_snapshots_getall_parameters.go new file mode 100644 index 00000000000..49294074411 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_snapshots/pcloud_cloudinstances_snapshots_getall_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_snapshots + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewPcloudCloudinstancesSnapshotsGetallParams creates a new PcloudCloudinstancesSnapshotsGetallParams object +// with the default values initialized. +func NewPcloudCloudinstancesSnapshotsGetallParams() *PcloudCloudinstancesSnapshotsGetallParams { + var () + return &PcloudCloudinstancesSnapshotsGetallParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudCloudinstancesSnapshotsGetallParamsWithTimeout creates a new PcloudCloudinstancesSnapshotsGetallParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudCloudinstancesSnapshotsGetallParamsWithTimeout(timeout time.Duration) *PcloudCloudinstancesSnapshotsGetallParams { + var () + return &PcloudCloudinstancesSnapshotsGetallParams{ + + timeout: timeout, + } +} + +// NewPcloudCloudinstancesSnapshotsGetallParamsWithContext creates a new PcloudCloudinstancesSnapshotsGetallParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudCloudinstancesSnapshotsGetallParamsWithContext(ctx context.Context) *PcloudCloudinstancesSnapshotsGetallParams { + var () + return &PcloudCloudinstancesSnapshotsGetallParams{ + + Context: ctx, + } +} + +// NewPcloudCloudinstancesSnapshotsGetallParamsWithHTTPClient creates a new PcloudCloudinstancesSnapshotsGetallParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudCloudinstancesSnapshotsGetallParamsWithHTTPClient(client *http.Client) *PcloudCloudinstancesSnapshotsGetallParams { + var () + return &PcloudCloudinstancesSnapshotsGetallParams{ + HTTPClient: client, + } +} + +/*PcloudCloudinstancesSnapshotsGetallParams contains all the parameters to send to the API endpoint +for the pcloud cloudinstances snapshots getall operation typically these are written to a http.Request +*/ +type PcloudCloudinstancesSnapshotsGetallParams struct { + + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud cloudinstances snapshots getall params +func (o *PcloudCloudinstancesSnapshotsGetallParams) WithTimeout(timeout time.Duration) *PcloudCloudinstancesSnapshotsGetallParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud cloudinstances snapshots getall params +func (o *PcloudCloudinstancesSnapshotsGetallParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud cloudinstances snapshots getall params +func (o *PcloudCloudinstancesSnapshotsGetallParams) WithContext(ctx context.Context) *PcloudCloudinstancesSnapshotsGetallParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud cloudinstances snapshots getall params +func (o *PcloudCloudinstancesSnapshotsGetallParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud cloudinstances snapshots getall params +func (o *PcloudCloudinstancesSnapshotsGetallParams) WithHTTPClient(client *http.Client) *PcloudCloudinstancesSnapshotsGetallParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud cloudinstances snapshots getall params +func (o *PcloudCloudinstancesSnapshotsGetallParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud cloudinstances snapshots getall params +func (o *PcloudCloudinstancesSnapshotsGetallParams) WithCloudInstanceID(cloudInstanceID string) *PcloudCloudinstancesSnapshotsGetallParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud cloudinstances snapshots getall params +func (o *PcloudCloudinstancesSnapshotsGetallParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudCloudinstancesSnapshotsGetallParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_snapshots/pcloud_cloudinstances_snapshots_getall_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_snapshots/pcloud_cloudinstances_snapshots_getall_responses.go new file mode 100644 index 00000000000..c557417dca8 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_snapshots/pcloud_cloudinstances_snapshots_getall_responses.go @@ -0,0 +1,139 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_snapshots + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudCloudinstancesSnapshotsGetallReader is a Reader for the PcloudCloudinstancesSnapshotsGetall structure. +type PcloudCloudinstancesSnapshotsGetallReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudCloudinstancesSnapshotsGetallReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudCloudinstancesSnapshotsGetallOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudCloudinstancesSnapshotsGetallBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudCloudinstancesSnapshotsGetallInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudCloudinstancesSnapshotsGetallOK creates a PcloudCloudinstancesSnapshotsGetallOK with default headers values +func NewPcloudCloudinstancesSnapshotsGetallOK() *PcloudCloudinstancesSnapshotsGetallOK { + return &PcloudCloudinstancesSnapshotsGetallOK{} +} + +/*PcloudCloudinstancesSnapshotsGetallOK handles this case with default header values. + +OK +*/ +type PcloudCloudinstancesSnapshotsGetallOK struct { + Payload *models.Snapshots +} + +func (o *PcloudCloudinstancesSnapshotsGetallOK) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/snapshots][%d] pcloudCloudinstancesSnapshotsGetallOK %+v", 200, o.Payload) +} + +func (o *PcloudCloudinstancesSnapshotsGetallOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Snapshots) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudinstancesSnapshotsGetallBadRequest creates a PcloudCloudinstancesSnapshotsGetallBadRequest with default headers values +func NewPcloudCloudinstancesSnapshotsGetallBadRequest() *PcloudCloudinstancesSnapshotsGetallBadRequest { + return &PcloudCloudinstancesSnapshotsGetallBadRequest{} +} + +/*PcloudCloudinstancesSnapshotsGetallBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudCloudinstancesSnapshotsGetallBadRequest struct { + Payload *models.Error +} + +func (o *PcloudCloudinstancesSnapshotsGetallBadRequest) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/snapshots][%d] pcloudCloudinstancesSnapshotsGetallBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudCloudinstancesSnapshotsGetallBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudinstancesSnapshotsGetallInternalServerError creates a PcloudCloudinstancesSnapshotsGetallInternalServerError with default headers values +func NewPcloudCloudinstancesSnapshotsGetallInternalServerError() *PcloudCloudinstancesSnapshotsGetallInternalServerError { + return &PcloudCloudinstancesSnapshotsGetallInternalServerError{} +} + +/*PcloudCloudinstancesSnapshotsGetallInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudCloudinstancesSnapshotsGetallInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudCloudinstancesSnapshotsGetallInternalServerError) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/snapshots][%d] pcloudCloudinstancesSnapshotsGetallInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudCloudinstancesSnapshotsGetallInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_snapshots/pcloud_cloudinstances_snapshots_put_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_snapshots/pcloud_cloudinstances_snapshots_put_parameters.go new file mode 100644 index 00000000000..81c6dd13625 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_snapshots/pcloud_cloudinstances_snapshots_put_parameters.go @@ -0,0 +1,181 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_snapshots + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// NewPcloudCloudinstancesSnapshotsPutParams creates a new PcloudCloudinstancesSnapshotsPutParams object +// with the default values initialized. +func NewPcloudCloudinstancesSnapshotsPutParams() *PcloudCloudinstancesSnapshotsPutParams { + var () + return &PcloudCloudinstancesSnapshotsPutParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudCloudinstancesSnapshotsPutParamsWithTimeout creates a new PcloudCloudinstancesSnapshotsPutParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudCloudinstancesSnapshotsPutParamsWithTimeout(timeout time.Duration) *PcloudCloudinstancesSnapshotsPutParams { + var () + return &PcloudCloudinstancesSnapshotsPutParams{ + + timeout: timeout, + } +} + +// NewPcloudCloudinstancesSnapshotsPutParamsWithContext creates a new PcloudCloudinstancesSnapshotsPutParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudCloudinstancesSnapshotsPutParamsWithContext(ctx context.Context) *PcloudCloudinstancesSnapshotsPutParams { + var () + return &PcloudCloudinstancesSnapshotsPutParams{ + + Context: ctx, + } +} + +// NewPcloudCloudinstancesSnapshotsPutParamsWithHTTPClient creates a new PcloudCloudinstancesSnapshotsPutParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudCloudinstancesSnapshotsPutParamsWithHTTPClient(client *http.Client) *PcloudCloudinstancesSnapshotsPutParams { + var () + return &PcloudCloudinstancesSnapshotsPutParams{ + HTTPClient: client, + } +} + +/*PcloudCloudinstancesSnapshotsPutParams contains all the parameters to send to the API endpoint +for the pcloud cloudinstances snapshots put operation typically these are written to a http.Request +*/ +type PcloudCloudinstancesSnapshotsPutParams struct { + + /*Body + Parameters for the update of a PVM instance snapshot + + */ + Body *models.SnapshotUpdate + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + /*SnapshotID + PVM Instance snapshot id + + */ + SnapshotID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud cloudinstances snapshots put params +func (o *PcloudCloudinstancesSnapshotsPutParams) WithTimeout(timeout time.Duration) *PcloudCloudinstancesSnapshotsPutParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud cloudinstances snapshots put params +func (o *PcloudCloudinstancesSnapshotsPutParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud cloudinstances snapshots put params +func (o *PcloudCloudinstancesSnapshotsPutParams) WithContext(ctx context.Context) *PcloudCloudinstancesSnapshotsPutParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud cloudinstances snapshots put params +func (o *PcloudCloudinstancesSnapshotsPutParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud cloudinstances snapshots put params +func (o *PcloudCloudinstancesSnapshotsPutParams) WithHTTPClient(client *http.Client) *PcloudCloudinstancesSnapshotsPutParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud cloudinstances snapshots put params +func (o *PcloudCloudinstancesSnapshotsPutParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the pcloud cloudinstances snapshots put params +func (o *PcloudCloudinstancesSnapshotsPutParams) WithBody(body *models.SnapshotUpdate) *PcloudCloudinstancesSnapshotsPutParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the pcloud cloudinstances snapshots put params +func (o *PcloudCloudinstancesSnapshotsPutParams) SetBody(body *models.SnapshotUpdate) { + o.Body = body +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud cloudinstances snapshots put params +func (o *PcloudCloudinstancesSnapshotsPutParams) WithCloudInstanceID(cloudInstanceID string) *PcloudCloudinstancesSnapshotsPutParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud cloudinstances snapshots put params +func (o *PcloudCloudinstancesSnapshotsPutParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WithSnapshotID adds the snapshotID to the pcloud cloudinstances snapshots put params +func (o *PcloudCloudinstancesSnapshotsPutParams) WithSnapshotID(snapshotID string) *PcloudCloudinstancesSnapshotsPutParams { + o.SetSnapshotID(snapshotID) + return o +} + +// SetSnapshotID adds the snapshotId to the pcloud cloudinstances snapshots put params +func (o *PcloudCloudinstancesSnapshotsPutParams) SetSnapshotID(snapshotID string) { + o.SnapshotID = snapshotID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudCloudinstancesSnapshotsPutParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + // path param snapshot_id + if err := r.SetPathParam("snapshot_id", o.SnapshotID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_snapshots/pcloud_cloudinstances_snapshots_put_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_snapshots/pcloud_cloudinstances_snapshots_put_responses.go new file mode 100644 index 00000000000..6c8ca7a5ace --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_snapshots/pcloud_cloudinstances_snapshots_put_responses.go @@ -0,0 +1,173 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_snapshots + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudCloudinstancesSnapshotsPutReader is a Reader for the PcloudCloudinstancesSnapshotsPut structure. +type PcloudCloudinstancesSnapshotsPutReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudCloudinstancesSnapshotsPutReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudCloudinstancesSnapshotsPutOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudCloudinstancesSnapshotsPutBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 404: + result := NewPcloudCloudinstancesSnapshotsPutNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudCloudinstancesSnapshotsPutInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudCloudinstancesSnapshotsPutOK creates a PcloudCloudinstancesSnapshotsPutOK with default headers values +func NewPcloudCloudinstancesSnapshotsPutOK() *PcloudCloudinstancesSnapshotsPutOK { + return &PcloudCloudinstancesSnapshotsPutOK{} +} + +/*PcloudCloudinstancesSnapshotsPutOK handles this case with default header values. + +OK +*/ +type PcloudCloudinstancesSnapshotsPutOK struct { + Payload models.Object +} + +func (o *PcloudCloudinstancesSnapshotsPutOK) Error() string { + return fmt.Sprintf("[PUT /pcloud/v1/cloud-instances/{cloud_instance_id}/snapshots/{snapshot_id}][%d] pcloudCloudinstancesSnapshotsPutOK %+v", 200, o.Payload) +} + +func (o *PcloudCloudinstancesSnapshotsPutOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudinstancesSnapshotsPutBadRequest creates a PcloudCloudinstancesSnapshotsPutBadRequest with default headers values +func NewPcloudCloudinstancesSnapshotsPutBadRequest() *PcloudCloudinstancesSnapshotsPutBadRequest { + return &PcloudCloudinstancesSnapshotsPutBadRequest{} +} + +/*PcloudCloudinstancesSnapshotsPutBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudCloudinstancesSnapshotsPutBadRequest struct { + Payload *models.Error +} + +func (o *PcloudCloudinstancesSnapshotsPutBadRequest) Error() string { + return fmt.Sprintf("[PUT /pcloud/v1/cloud-instances/{cloud_instance_id}/snapshots/{snapshot_id}][%d] pcloudCloudinstancesSnapshotsPutBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudCloudinstancesSnapshotsPutBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudinstancesSnapshotsPutNotFound creates a PcloudCloudinstancesSnapshotsPutNotFound with default headers values +func NewPcloudCloudinstancesSnapshotsPutNotFound() *PcloudCloudinstancesSnapshotsPutNotFound { + return &PcloudCloudinstancesSnapshotsPutNotFound{} +} + +/*PcloudCloudinstancesSnapshotsPutNotFound handles this case with default header values. + +Not Found +*/ +type PcloudCloudinstancesSnapshotsPutNotFound struct { + Payload *models.Error +} + +func (o *PcloudCloudinstancesSnapshotsPutNotFound) Error() string { + return fmt.Sprintf("[PUT /pcloud/v1/cloud-instances/{cloud_instance_id}/snapshots/{snapshot_id}][%d] pcloudCloudinstancesSnapshotsPutNotFound %+v", 404, o.Payload) +} + +func (o *PcloudCloudinstancesSnapshotsPutNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudinstancesSnapshotsPutInternalServerError creates a PcloudCloudinstancesSnapshotsPutInternalServerError with default headers values +func NewPcloudCloudinstancesSnapshotsPutInternalServerError() *PcloudCloudinstancesSnapshotsPutInternalServerError { + return &PcloudCloudinstancesSnapshotsPutInternalServerError{} +} + +/*PcloudCloudinstancesSnapshotsPutInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudCloudinstancesSnapshotsPutInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudCloudinstancesSnapshotsPutInternalServerError) Error() string { + return fmt.Sprintf("[PUT /pcloud/v1/cloud-instances/{cloud_instance_id}/snapshots/{snapshot_id}][%d] pcloudCloudinstancesSnapshotsPutInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudCloudinstancesSnapshotsPutInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_storage_capacity/p_cloud_storage_capacity_client.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_storage_capacity/p_cloud_storage_capacity_client.go new file mode 100644 index 00000000000..018a381a100 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_storage_capacity/p_cloud_storage_capacity_client.go @@ -0,0 +1,146 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_storage_capacity + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" +) + +// New creates a new p cloud storage capacity API client. +func New(transport runtime.ClientTransport, formats strfmt.Registry) *Client { + return &Client{transport: transport, formats: formats} +} + +/* +Client for p cloud storage capacity API +*/ +type Client struct { + transport runtime.ClientTransport + formats strfmt.Registry +} + +/* +PcloudStoragecapacityPoolsGet storages capacity for a storage pool in a region +*/ +func (a *Client) PcloudStoragecapacityPoolsGet(params *PcloudStoragecapacityPoolsGetParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudStoragecapacityPoolsGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudStoragecapacityPoolsGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.storagecapacity.pools.get", + Method: "GET", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/storage-capacity/storage-pools/{storage_pool_name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudStoragecapacityPoolsGetReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudStoragecapacityPoolsGetOK), nil + +} + +/* +PcloudStoragecapacityPoolsGetall storages capacity for all available storage pools in a region +*/ +func (a *Client) PcloudStoragecapacityPoolsGetall(params *PcloudStoragecapacityPoolsGetallParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudStoragecapacityPoolsGetallOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudStoragecapacityPoolsGetallParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.storagecapacity.pools.getall", + Method: "GET", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/storage-capacity/storage-pools", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudStoragecapacityPoolsGetallReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudStoragecapacityPoolsGetallOK), nil + +} + +/* +PcloudStoragecapacityTypesGet storages capacity for a storage type in a region +*/ +func (a *Client) PcloudStoragecapacityTypesGet(params *PcloudStoragecapacityTypesGetParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudStoragecapacityTypesGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudStoragecapacityTypesGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.storagecapacity.types.get", + Method: "GET", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/storage-capacity/storage-types/{storage_type_name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudStoragecapacityTypesGetReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudStoragecapacityTypesGetOK), nil + +} + +/* +PcloudStoragecapacityTypesGetall storages capacity for all available storage types in a region +*/ +func (a *Client) PcloudStoragecapacityTypesGetall(params *PcloudStoragecapacityTypesGetallParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudStoragecapacityTypesGetallOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudStoragecapacityTypesGetallParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.storagecapacity.types.getall", + Method: "GET", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/storage-capacity/storage-types", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudStoragecapacityTypesGetallReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudStoragecapacityTypesGetallOK), nil + +} + +// SetTransport changes the transport on the client +func (a *Client) SetTransport(transport runtime.ClientTransport) { + a.transport = transport +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_storage_capacity/pcloud_storagecapacity_pools_get_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_storage_capacity/pcloud_storagecapacity_pools_get_parameters.go new file mode 100644 index 00000000000..76ee684ff23 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_storage_capacity/pcloud_storagecapacity_pools_get_parameters.go @@ -0,0 +1,157 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_storage_capacity + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewPcloudStoragecapacityPoolsGetParams creates a new PcloudStoragecapacityPoolsGetParams object +// with the default values initialized. +func NewPcloudStoragecapacityPoolsGetParams() *PcloudStoragecapacityPoolsGetParams { + var () + return &PcloudStoragecapacityPoolsGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudStoragecapacityPoolsGetParamsWithTimeout creates a new PcloudStoragecapacityPoolsGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudStoragecapacityPoolsGetParamsWithTimeout(timeout time.Duration) *PcloudStoragecapacityPoolsGetParams { + var () + return &PcloudStoragecapacityPoolsGetParams{ + + timeout: timeout, + } +} + +// NewPcloudStoragecapacityPoolsGetParamsWithContext creates a new PcloudStoragecapacityPoolsGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudStoragecapacityPoolsGetParamsWithContext(ctx context.Context) *PcloudStoragecapacityPoolsGetParams { + var () + return &PcloudStoragecapacityPoolsGetParams{ + + Context: ctx, + } +} + +// NewPcloudStoragecapacityPoolsGetParamsWithHTTPClient creates a new PcloudStoragecapacityPoolsGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudStoragecapacityPoolsGetParamsWithHTTPClient(client *http.Client) *PcloudStoragecapacityPoolsGetParams { + var () + return &PcloudStoragecapacityPoolsGetParams{ + HTTPClient: client, + } +} + +/*PcloudStoragecapacityPoolsGetParams contains all the parameters to send to the API endpoint +for the pcloud storagecapacity pools get operation typically these are written to a http.Request +*/ +type PcloudStoragecapacityPoolsGetParams struct { + + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + /*StoragePoolName + Storage pool name + + */ + StoragePoolName string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud storagecapacity pools get params +func (o *PcloudStoragecapacityPoolsGetParams) WithTimeout(timeout time.Duration) *PcloudStoragecapacityPoolsGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud storagecapacity pools get params +func (o *PcloudStoragecapacityPoolsGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud storagecapacity pools get params +func (o *PcloudStoragecapacityPoolsGetParams) WithContext(ctx context.Context) *PcloudStoragecapacityPoolsGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud storagecapacity pools get params +func (o *PcloudStoragecapacityPoolsGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud storagecapacity pools get params +func (o *PcloudStoragecapacityPoolsGetParams) WithHTTPClient(client *http.Client) *PcloudStoragecapacityPoolsGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud storagecapacity pools get params +func (o *PcloudStoragecapacityPoolsGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud storagecapacity pools get params +func (o *PcloudStoragecapacityPoolsGetParams) WithCloudInstanceID(cloudInstanceID string) *PcloudStoragecapacityPoolsGetParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud storagecapacity pools get params +func (o *PcloudStoragecapacityPoolsGetParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WithStoragePoolName adds the storagePoolName to the pcloud storagecapacity pools get params +func (o *PcloudStoragecapacityPoolsGetParams) WithStoragePoolName(storagePoolName string) *PcloudStoragecapacityPoolsGetParams { + o.SetStoragePoolName(storagePoolName) + return o +} + +// SetStoragePoolName adds the storagePoolName to the pcloud storagecapacity pools get params +func (o *PcloudStoragecapacityPoolsGetParams) SetStoragePoolName(storagePoolName string) { + o.StoragePoolName = storagePoolName +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudStoragecapacityPoolsGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + // path param storage_pool_name + if err := r.SetPathParam("storage_pool_name", o.StoragePoolName); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_storage_capacity/pcloud_storagecapacity_pools_get_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_storage_capacity/pcloud_storagecapacity_pools_get_responses.go new file mode 100644 index 00000000000..44ca460be50 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_storage_capacity/pcloud_storagecapacity_pools_get_responses.go @@ -0,0 +1,139 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_storage_capacity + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudStoragecapacityPoolsGetReader is a Reader for the PcloudStoragecapacityPoolsGet structure. +type PcloudStoragecapacityPoolsGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudStoragecapacityPoolsGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudStoragecapacityPoolsGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 404: + result := NewPcloudStoragecapacityPoolsGetNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudStoragecapacityPoolsGetInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudStoragecapacityPoolsGetOK creates a PcloudStoragecapacityPoolsGetOK with default headers values +func NewPcloudStoragecapacityPoolsGetOK() *PcloudStoragecapacityPoolsGetOK { + return &PcloudStoragecapacityPoolsGetOK{} +} + +/*PcloudStoragecapacityPoolsGetOK handles this case with default header values. + +OK +*/ +type PcloudStoragecapacityPoolsGetOK struct { + Payload *models.StoragePoolCapacity +} + +func (o *PcloudStoragecapacityPoolsGetOK) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/storage-capacity/storage-pools/{storage_pool_name}][%d] pcloudStoragecapacityPoolsGetOK %+v", 200, o.Payload) +} + +func (o *PcloudStoragecapacityPoolsGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.StoragePoolCapacity) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudStoragecapacityPoolsGetNotFound creates a PcloudStoragecapacityPoolsGetNotFound with default headers values +func NewPcloudStoragecapacityPoolsGetNotFound() *PcloudStoragecapacityPoolsGetNotFound { + return &PcloudStoragecapacityPoolsGetNotFound{} +} + +/*PcloudStoragecapacityPoolsGetNotFound handles this case with default header values. + +Not Found +*/ +type PcloudStoragecapacityPoolsGetNotFound struct { + Payload *models.Error +} + +func (o *PcloudStoragecapacityPoolsGetNotFound) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/storage-capacity/storage-pools/{storage_pool_name}][%d] pcloudStoragecapacityPoolsGetNotFound %+v", 404, o.Payload) +} + +func (o *PcloudStoragecapacityPoolsGetNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudStoragecapacityPoolsGetInternalServerError creates a PcloudStoragecapacityPoolsGetInternalServerError with default headers values +func NewPcloudStoragecapacityPoolsGetInternalServerError() *PcloudStoragecapacityPoolsGetInternalServerError { + return &PcloudStoragecapacityPoolsGetInternalServerError{} +} + +/*PcloudStoragecapacityPoolsGetInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudStoragecapacityPoolsGetInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudStoragecapacityPoolsGetInternalServerError) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/storage-capacity/storage-pools/{storage_pool_name}][%d] pcloudStoragecapacityPoolsGetInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudStoragecapacityPoolsGetInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_storage_capacity/pcloud_storagecapacity_pools_getall_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_storage_capacity/pcloud_storagecapacity_pools_getall_parameters.go new file mode 100644 index 00000000000..0bb1b7640d2 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_storage_capacity/pcloud_storagecapacity_pools_getall_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_storage_capacity + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewPcloudStoragecapacityPoolsGetallParams creates a new PcloudStoragecapacityPoolsGetallParams object +// with the default values initialized. +func NewPcloudStoragecapacityPoolsGetallParams() *PcloudStoragecapacityPoolsGetallParams { + var () + return &PcloudStoragecapacityPoolsGetallParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudStoragecapacityPoolsGetallParamsWithTimeout creates a new PcloudStoragecapacityPoolsGetallParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudStoragecapacityPoolsGetallParamsWithTimeout(timeout time.Duration) *PcloudStoragecapacityPoolsGetallParams { + var () + return &PcloudStoragecapacityPoolsGetallParams{ + + timeout: timeout, + } +} + +// NewPcloudStoragecapacityPoolsGetallParamsWithContext creates a new PcloudStoragecapacityPoolsGetallParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudStoragecapacityPoolsGetallParamsWithContext(ctx context.Context) *PcloudStoragecapacityPoolsGetallParams { + var () + return &PcloudStoragecapacityPoolsGetallParams{ + + Context: ctx, + } +} + +// NewPcloudStoragecapacityPoolsGetallParamsWithHTTPClient creates a new PcloudStoragecapacityPoolsGetallParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudStoragecapacityPoolsGetallParamsWithHTTPClient(client *http.Client) *PcloudStoragecapacityPoolsGetallParams { + var () + return &PcloudStoragecapacityPoolsGetallParams{ + HTTPClient: client, + } +} + +/*PcloudStoragecapacityPoolsGetallParams contains all the parameters to send to the API endpoint +for the pcloud storagecapacity pools getall operation typically these are written to a http.Request +*/ +type PcloudStoragecapacityPoolsGetallParams struct { + + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud storagecapacity pools getall params +func (o *PcloudStoragecapacityPoolsGetallParams) WithTimeout(timeout time.Duration) *PcloudStoragecapacityPoolsGetallParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud storagecapacity pools getall params +func (o *PcloudStoragecapacityPoolsGetallParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud storagecapacity pools getall params +func (o *PcloudStoragecapacityPoolsGetallParams) WithContext(ctx context.Context) *PcloudStoragecapacityPoolsGetallParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud storagecapacity pools getall params +func (o *PcloudStoragecapacityPoolsGetallParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud storagecapacity pools getall params +func (o *PcloudStoragecapacityPoolsGetallParams) WithHTTPClient(client *http.Client) *PcloudStoragecapacityPoolsGetallParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud storagecapacity pools getall params +func (o *PcloudStoragecapacityPoolsGetallParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud storagecapacity pools getall params +func (o *PcloudStoragecapacityPoolsGetallParams) WithCloudInstanceID(cloudInstanceID string) *PcloudStoragecapacityPoolsGetallParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud storagecapacity pools getall params +func (o *PcloudStoragecapacityPoolsGetallParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudStoragecapacityPoolsGetallParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_storage_capacity/pcloud_storagecapacity_pools_getall_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_storage_capacity/pcloud_storagecapacity_pools_getall_responses.go new file mode 100644 index 00000000000..b97fe55711e --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_storage_capacity/pcloud_storagecapacity_pools_getall_responses.go @@ -0,0 +1,103 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_storage_capacity + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudStoragecapacityPoolsGetallReader is a Reader for the PcloudStoragecapacityPoolsGetall structure. +type PcloudStoragecapacityPoolsGetallReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudStoragecapacityPoolsGetallReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudStoragecapacityPoolsGetallOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 500: + result := NewPcloudStoragecapacityPoolsGetallInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudStoragecapacityPoolsGetallOK creates a PcloudStoragecapacityPoolsGetallOK with default headers values +func NewPcloudStoragecapacityPoolsGetallOK() *PcloudStoragecapacityPoolsGetallOK { + return &PcloudStoragecapacityPoolsGetallOK{} +} + +/*PcloudStoragecapacityPoolsGetallOK handles this case with default header values. + +OK +*/ +type PcloudStoragecapacityPoolsGetallOK struct { + Payload *models.StoragePoolsCapacity +} + +func (o *PcloudStoragecapacityPoolsGetallOK) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/storage-capacity/storage-pools][%d] pcloudStoragecapacityPoolsGetallOK %+v", 200, o.Payload) +} + +func (o *PcloudStoragecapacityPoolsGetallOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.StoragePoolsCapacity) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudStoragecapacityPoolsGetallInternalServerError creates a PcloudStoragecapacityPoolsGetallInternalServerError with default headers values +func NewPcloudStoragecapacityPoolsGetallInternalServerError() *PcloudStoragecapacityPoolsGetallInternalServerError { + return &PcloudStoragecapacityPoolsGetallInternalServerError{} +} + +/*PcloudStoragecapacityPoolsGetallInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudStoragecapacityPoolsGetallInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudStoragecapacityPoolsGetallInternalServerError) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/storage-capacity/storage-pools][%d] pcloudStoragecapacityPoolsGetallInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudStoragecapacityPoolsGetallInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_storage_capacity/pcloud_storagecapacity_types_get_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_storage_capacity/pcloud_storagecapacity_types_get_parameters.go new file mode 100644 index 00000000000..372a17a7053 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_storage_capacity/pcloud_storagecapacity_types_get_parameters.go @@ -0,0 +1,157 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_storage_capacity + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewPcloudStoragecapacityTypesGetParams creates a new PcloudStoragecapacityTypesGetParams object +// with the default values initialized. +func NewPcloudStoragecapacityTypesGetParams() *PcloudStoragecapacityTypesGetParams { + var () + return &PcloudStoragecapacityTypesGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudStoragecapacityTypesGetParamsWithTimeout creates a new PcloudStoragecapacityTypesGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudStoragecapacityTypesGetParamsWithTimeout(timeout time.Duration) *PcloudStoragecapacityTypesGetParams { + var () + return &PcloudStoragecapacityTypesGetParams{ + + timeout: timeout, + } +} + +// NewPcloudStoragecapacityTypesGetParamsWithContext creates a new PcloudStoragecapacityTypesGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudStoragecapacityTypesGetParamsWithContext(ctx context.Context) *PcloudStoragecapacityTypesGetParams { + var () + return &PcloudStoragecapacityTypesGetParams{ + + Context: ctx, + } +} + +// NewPcloudStoragecapacityTypesGetParamsWithHTTPClient creates a new PcloudStoragecapacityTypesGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudStoragecapacityTypesGetParamsWithHTTPClient(client *http.Client) *PcloudStoragecapacityTypesGetParams { + var () + return &PcloudStoragecapacityTypesGetParams{ + HTTPClient: client, + } +} + +/*PcloudStoragecapacityTypesGetParams contains all the parameters to send to the API endpoint +for the pcloud storagecapacity types get operation typically these are written to a http.Request +*/ +type PcloudStoragecapacityTypesGetParams struct { + + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + /*StorageTypeName + Storage type name + + */ + StorageTypeName string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud storagecapacity types get params +func (o *PcloudStoragecapacityTypesGetParams) WithTimeout(timeout time.Duration) *PcloudStoragecapacityTypesGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud storagecapacity types get params +func (o *PcloudStoragecapacityTypesGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud storagecapacity types get params +func (o *PcloudStoragecapacityTypesGetParams) WithContext(ctx context.Context) *PcloudStoragecapacityTypesGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud storagecapacity types get params +func (o *PcloudStoragecapacityTypesGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud storagecapacity types get params +func (o *PcloudStoragecapacityTypesGetParams) WithHTTPClient(client *http.Client) *PcloudStoragecapacityTypesGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud storagecapacity types get params +func (o *PcloudStoragecapacityTypesGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud storagecapacity types get params +func (o *PcloudStoragecapacityTypesGetParams) WithCloudInstanceID(cloudInstanceID string) *PcloudStoragecapacityTypesGetParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud storagecapacity types get params +func (o *PcloudStoragecapacityTypesGetParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WithStorageTypeName adds the storageTypeName to the pcloud storagecapacity types get params +func (o *PcloudStoragecapacityTypesGetParams) WithStorageTypeName(storageTypeName string) *PcloudStoragecapacityTypesGetParams { + o.SetStorageTypeName(storageTypeName) + return o +} + +// SetStorageTypeName adds the storageTypeName to the pcloud storagecapacity types get params +func (o *PcloudStoragecapacityTypesGetParams) SetStorageTypeName(storageTypeName string) { + o.StorageTypeName = storageTypeName +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudStoragecapacityTypesGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + // path param storage_type_name + if err := r.SetPathParam("storage_type_name", o.StorageTypeName); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_storage_capacity/pcloud_storagecapacity_types_get_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_storage_capacity/pcloud_storagecapacity_types_get_responses.go new file mode 100644 index 00000000000..9adf16720c8 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_storage_capacity/pcloud_storagecapacity_types_get_responses.go @@ -0,0 +1,139 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_storage_capacity + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudStoragecapacityTypesGetReader is a Reader for the PcloudStoragecapacityTypesGet structure. +type PcloudStoragecapacityTypesGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudStoragecapacityTypesGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudStoragecapacityTypesGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 404: + result := NewPcloudStoragecapacityTypesGetNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudStoragecapacityTypesGetInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudStoragecapacityTypesGetOK creates a PcloudStoragecapacityTypesGetOK with default headers values +func NewPcloudStoragecapacityTypesGetOK() *PcloudStoragecapacityTypesGetOK { + return &PcloudStoragecapacityTypesGetOK{} +} + +/*PcloudStoragecapacityTypesGetOK handles this case with default header values. + +OK +*/ +type PcloudStoragecapacityTypesGetOK struct { + Payload *models.StorageTypeCapacity +} + +func (o *PcloudStoragecapacityTypesGetOK) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/storage-capacity/storage-types/{storage_type_name}][%d] pcloudStoragecapacityTypesGetOK %+v", 200, o.Payload) +} + +func (o *PcloudStoragecapacityTypesGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.StorageTypeCapacity) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudStoragecapacityTypesGetNotFound creates a PcloudStoragecapacityTypesGetNotFound with default headers values +func NewPcloudStoragecapacityTypesGetNotFound() *PcloudStoragecapacityTypesGetNotFound { + return &PcloudStoragecapacityTypesGetNotFound{} +} + +/*PcloudStoragecapacityTypesGetNotFound handles this case with default header values. + +Not Found +*/ +type PcloudStoragecapacityTypesGetNotFound struct { + Payload *models.Error +} + +func (o *PcloudStoragecapacityTypesGetNotFound) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/storage-capacity/storage-types/{storage_type_name}][%d] pcloudStoragecapacityTypesGetNotFound %+v", 404, o.Payload) +} + +func (o *PcloudStoragecapacityTypesGetNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudStoragecapacityTypesGetInternalServerError creates a PcloudStoragecapacityTypesGetInternalServerError with default headers values +func NewPcloudStoragecapacityTypesGetInternalServerError() *PcloudStoragecapacityTypesGetInternalServerError { + return &PcloudStoragecapacityTypesGetInternalServerError{} +} + +/*PcloudStoragecapacityTypesGetInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudStoragecapacityTypesGetInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudStoragecapacityTypesGetInternalServerError) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/storage-capacity/storage-types/{storage_type_name}][%d] pcloudStoragecapacityTypesGetInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudStoragecapacityTypesGetInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_storage_capacity/pcloud_storagecapacity_types_getall_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_storage_capacity/pcloud_storagecapacity_types_getall_parameters.go new file mode 100644 index 00000000000..155f47581f2 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_storage_capacity/pcloud_storagecapacity_types_getall_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_storage_capacity + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewPcloudStoragecapacityTypesGetallParams creates a new PcloudStoragecapacityTypesGetallParams object +// with the default values initialized. +func NewPcloudStoragecapacityTypesGetallParams() *PcloudStoragecapacityTypesGetallParams { + var () + return &PcloudStoragecapacityTypesGetallParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudStoragecapacityTypesGetallParamsWithTimeout creates a new PcloudStoragecapacityTypesGetallParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudStoragecapacityTypesGetallParamsWithTimeout(timeout time.Duration) *PcloudStoragecapacityTypesGetallParams { + var () + return &PcloudStoragecapacityTypesGetallParams{ + + timeout: timeout, + } +} + +// NewPcloudStoragecapacityTypesGetallParamsWithContext creates a new PcloudStoragecapacityTypesGetallParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudStoragecapacityTypesGetallParamsWithContext(ctx context.Context) *PcloudStoragecapacityTypesGetallParams { + var () + return &PcloudStoragecapacityTypesGetallParams{ + + Context: ctx, + } +} + +// NewPcloudStoragecapacityTypesGetallParamsWithHTTPClient creates a new PcloudStoragecapacityTypesGetallParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudStoragecapacityTypesGetallParamsWithHTTPClient(client *http.Client) *PcloudStoragecapacityTypesGetallParams { + var () + return &PcloudStoragecapacityTypesGetallParams{ + HTTPClient: client, + } +} + +/*PcloudStoragecapacityTypesGetallParams contains all the parameters to send to the API endpoint +for the pcloud storagecapacity types getall operation typically these are written to a http.Request +*/ +type PcloudStoragecapacityTypesGetallParams struct { + + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud storagecapacity types getall params +func (o *PcloudStoragecapacityTypesGetallParams) WithTimeout(timeout time.Duration) *PcloudStoragecapacityTypesGetallParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud storagecapacity types getall params +func (o *PcloudStoragecapacityTypesGetallParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud storagecapacity types getall params +func (o *PcloudStoragecapacityTypesGetallParams) WithContext(ctx context.Context) *PcloudStoragecapacityTypesGetallParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud storagecapacity types getall params +func (o *PcloudStoragecapacityTypesGetallParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud storagecapacity types getall params +func (o *PcloudStoragecapacityTypesGetallParams) WithHTTPClient(client *http.Client) *PcloudStoragecapacityTypesGetallParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud storagecapacity types getall params +func (o *PcloudStoragecapacityTypesGetallParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud storagecapacity types getall params +func (o *PcloudStoragecapacityTypesGetallParams) WithCloudInstanceID(cloudInstanceID string) *PcloudStoragecapacityTypesGetallParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud storagecapacity types getall params +func (o *PcloudStoragecapacityTypesGetallParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudStoragecapacityTypesGetallParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_storage_capacity/pcloud_storagecapacity_types_getall_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_storage_capacity/pcloud_storagecapacity_types_getall_responses.go new file mode 100644 index 00000000000..9f6f19e0d5d --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_storage_capacity/pcloud_storagecapacity_types_getall_responses.go @@ -0,0 +1,103 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_storage_capacity + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudStoragecapacityTypesGetallReader is a Reader for the PcloudStoragecapacityTypesGetall structure. +type PcloudStoragecapacityTypesGetallReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudStoragecapacityTypesGetallReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudStoragecapacityTypesGetallOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 500: + result := NewPcloudStoragecapacityTypesGetallInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudStoragecapacityTypesGetallOK creates a PcloudStoragecapacityTypesGetallOK with default headers values +func NewPcloudStoragecapacityTypesGetallOK() *PcloudStoragecapacityTypesGetallOK { + return &PcloudStoragecapacityTypesGetallOK{} +} + +/*PcloudStoragecapacityTypesGetallOK handles this case with default header values. + +OK +*/ +type PcloudStoragecapacityTypesGetallOK struct { + Payload *models.StorageTypesCapacity +} + +func (o *PcloudStoragecapacityTypesGetallOK) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/storage-capacity/storage-types][%d] pcloudStoragecapacityTypesGetallOK %+v", 200, o.Payload) +} + +func (o *PcloudStoragecapacityTypesGetallOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.StorageTypesCapacity) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudStoragecapacityTypesGetallInternalServerError creates a PcloudStoragecapacityTypesGetallInternalServerError with default headers values +func NewPcloudStoragecapacityTypesGetallInternalServerError() *PcloudStoragecapacityTypesGetallInternalServerError { + return &PcloudStoragecapacityTypesGetallInternalServerError{} +} + +/*PcloudStoragecapacityTypesGetallInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudStoragecapacityTypesGetallInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudStoragecapacityTypesGetallInternalServerError) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/storage-capacity/storage-types][%d] pcloudStoragecapacityTypesGetallInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudStoragecapacityTypesGetallInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_system_pools/p_cloud_system_pools_client.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_system_pools/p_cloud_system_pools_client.go new file mode 100644 index 00000000000..6a3909dc0fd --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_system_pools/p_cloud_system_pools_client.go @@ -0,0 +1,59 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_system_pools + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" +) + +// New creates a new p cloud system pools API client. +func New(transport runtime.ClientTransport, formats strfmt.Registry) *Client { + return &Client{transport: transport, formats: formats} +} + +/* +Client for p cloud system pools API +*/ +type Client struct { + transport runtime.ClientTransport + formats strfmt.Registry +} + +/* +PcloudSystempoolsGet lists of available system pools within a particular data center +*/ +func (a *Client) PcloudSystempoolsGet(params *PcloudSystempoolsGetParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudSystempoolsGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudSystempoolsGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.systempools.get", + Method: "GET", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/system-pools", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudSystempoolsGetReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudSystempoolsGetOK), nil + +} + +// SetTransport changes the transport on the client +func (a *Client) SetTransport(transport runtime.ClientTransport) { + a.transport = transport +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_system_pools/pcloud_systempools_get_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_system_pools/pcloud_systempools_get_parameters.go new file mode 100644 index 00000000000..842b8b6c9f5 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_system_pools/pcloud_systempools_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_system_pools + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewPcloudSystempoolsGetParams creates a new PcloudSystempoolsGetParams object +// with the default values initialized. +func NewPcloudSystempoolsGetParams() *PcloudSystempoolsGetParams { + var () + return &PcloudSystempoolsGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudSystempoolsGetParamsWithTimeout creates a new PcloudSystempoolsGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudSystempoolsGetParamsWithTimeout(timeout time.Duration) *PcloudSystempoolsGetParams { + var () + return &PcloudSystempoolsGetParams{ + + timeout: timeout, + } +} + +// NewPcloudSystempoolsGetParamsWithContext creates a new PcloudSystempoolsGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudSystempoolsGetParamsWithContext(ctx context.Context) *PcloudSystempoolsGetParams { + var () + return &PcloudSystempoolsGetParams{ + + Context: ctx, + } +} + +// NewPcloudSystempoolsGetParamsWithHTTPClient creates a new PcloudSystempoolsGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudSystempoolsGetParamsWithHTTPClient(client *http.Client) *PcloudSystempoolsGetParams { + var () + return &PcloudSystempoolsGetParams{ + HTTPClient: client, + } +} + +/*PcloudSystempoolsGetParams contains all the parameters to send to the API endpoint +for the pcloud systempools get operation typically these are written to a http.Request +*/ +type PcloudSystempoolsGetParams struct { + + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud systempools get params +func (o *PcloudSystempoolsGetParams) WithTimeout(timeout time.Duration) *PcloudSystempoolsGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud systempools get params +func (o *PcloudSystempoolsGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud systempools get params +func (o *PcloudSystempoolsGetParams) WithContext(ctx context.Context) *PcloudSystempoolsGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud systempools get params +func (o *PcloudSystempoolsGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud systempools get params +func (o *PcloudSystempoolsGetParams) WithHTTPClient(client *http.Client) *PcloudSystempoolsGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud systempools get params +func (o *PcloudSystempoolsGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud systempools get params +func (o *PcloudSystempoolsGetParams) WithCloudInstanceID(cloudInstanceID string) *PcloudSystempoolsGetParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud systempools get params +func (o *PcloudSystempoolsGetParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudSystempoolsGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_system_pools/pcloud_systempools_get_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_system_pools/pcloud_systempools_get_responses.go new file mode 100644 index 00000000000..47d0b86c372 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_system_pools/pcloud_systempools_get_responses.go @@ -0,0 +1,101 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_system_pools + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudSystempoolsGetReader is a Reader for the PcloudSystempoolsGet structure. +type PcloudSystempoolsGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudSystempoolsGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudSystempoolsGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 500: + result := NewPcloudSystempoolsGetInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudSystempoolsGetOK creates a PcloudSystempoolsGetOK with default headers values +func NewPcloudSystempoolsGetOK() *PcloudSystempoolsGetOK { + return &PcloudSystempoolsGetOK{} +} + +/*PcloudSystempoolsGetOK handles this case with default header values. + +OK +*/ +type PcloudSystempoolsGetOK struct { + Payload models.SystemPools +} + +func (o *PcloudSystempoolsGetOK) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/system-pools][%d] pcloudSystempoolsGetOK %+v", 200, o.Payload) +} + +func (o *PcloudSystempoolsGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudSystempoolsGetInternalServerError creates a PcloudSystempoolsGetInternalServerError with default headers values +func NewPcloudSystempoolsGetInternalServerError() *PcloudSystempoolsGetInternalServerError { + return &PcloudSystempoolsGetInternalServerError{} +} + +/*PcloudSystempoolsGetInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudSystempoolsGetInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudSystempoolsGetInternalServerError) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/system-pools][%d] pcloudSystempoolsGetInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudSystempoolsGetInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tasks/p_cloud_tasks_client.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tasks/p_cloud_tasks_client.go new file mode 100644 index 00000000000..a9696ec2bc4 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tasks/p_cloud_tasks_client.go @@ -0,0 +1,88 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_tasks + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" +) + +// New creates a new p cloud tasks API client. +func New(transport runtime.ClientTransport, formats strfmt.Registry) *Client { + return &Client{transport: transport, formats: formats} +} + +/* +Client for p cloud tasks API +*/ +type Client struct { + transport runtime.ClientTransport + formats strfmt.Registry +} + +/* +PcloudTasksDelete deletes a task +*/ +func (a *Client) PcloudTasksDelete(params *PcloudTasksDeleteParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudTasksDeleteOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudTasksDeleteParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.tasks.delete", + Method: "DELETE", + PathPattern: "/pcloud/v1/tasks/{task_id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudTasksDeleteReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudTasksDeleteOK), nil + +} + +/* +PcloudTasksGet gets a task +*/ +func (a *Client) PcloudTasksGet(params *PcloudTasksGetParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudTasksGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudTasksGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.tasks.get", + Method: "GET", + PathPattern: "/pcloud/v1/tasks/{task_id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudTasksGetReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudTasksGetOK), nil + +} + +// SetTransport changes the transport on the client +func (a *Client) SetTransport(transport runtime.ClientTransport) { + a.transport = transport +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tasks/pcloud_tasks_delete_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tasks/pcloud_tasks_delete_parameters.go new file mode 100644 index 00000000000..8fda12b0e7a --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tasks/pcloud_tasks_delete_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_tasks + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewPcloudTasksDeleteParams creates a new PcloudTasksDeleteParams object +// with the default values initialized. +func NewPcloudTasksDeleteParams() *PcloudTasksDeleteParams { + var () + return &PcloudTasksDeleteParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudTasksDeleteParamsWithTimeout creates a new PcloudTasksDeleteParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudTasksDeleteParamsWithTimeout(timeout time.Duration) *PcloudTasksDeleteParams { + var () + return &PcloudTasksDeleteParams{ + + timeout: timeout, + } +} + +// NewPcloudTasksDeleteParamsWithContext creates a new PcloudTasksDeleteParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudTasksDeleteParamsWithContext(ctx context.Context) *PcloudTasksDeleteParams { + var () + return &PcloudTasksDeleteParams{ + + Context: ctx, + } +} + +// NewPcloudTasksDeleteParamsWithHTTPClient creates a new PcloudTasksDeleteParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudTasksDeleteParamsWithHTTPClient(client *http.Client) *PcloudTasksDeleteParams { + var () + return &PcloudTasksDeleteParams{ + HTTPClient: client, + } +} + +/*PcloudTasksDeleteParams contains all the parameters to send to the API endpoint +for the pcloud tasks delete operation typically these are written to a http.Request +*/ +type PcloudTasksDeleteParams struct { + + /*TaskID + PCloud Task ID + + */ + TaskID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud tasks delete params +func (o *PcloudTasksDeleteParams) WithTimeout(timeout time.Duration) *PcloudTasksDeleteParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud tasks delete params +func (o *PcloudTasksDeleteParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud tasks delete params +func (o *PcloudTasksDeleteParams) WithContext(ctx context.Context) *PcloudTasksDeleteParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud tasks delete params +func (o *PcloudTasksDeleteParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud tasks delete params +func (o *PcloudTasksDeleteParams) WithHTTPClient(client *http.Client) *PcloudTasksDeleteParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud tasks delete params +func (o *PcloudTasksDeleteParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithTaskID adds the taskID to the pcloud tasks delete params +func (o *PcloudTasksDeleteParams) WithTaskID(taskID string) *PcloudTasksDeleteParams { + o.SetTaskID(taskID) + return o +} + +// SetTaskID adds the taskId to the pcloud tasks delete params +func (o *PcloudTasksDeleteParams) SetTaskID(taskID string) { + o.TaskID = taskID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudTasksDeleteParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param task_id + if err := r.SetPathParam("task_id", o.TaskID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tasks/pcloud_tasks_delete_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tasks/pcloud_tasks_delete_responses.go new file mode 100644 index 00000000000..44d5342e9bd --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tasks/pcloud_tasks_delete_responses.go @@ -0,0 +1,173 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_tasks + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudTasksDeleteReader is a Reader for the PcloudTasksDelete structure. +type PcloudTasksDeleteReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudTasksDeleteReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudTasksDeleteOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudTasksDeleteBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 410: + result := NewPcloudTasksDeleteGone() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudTasksDeleteInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudTasksDeleteOK creates a PcloudTasksDeleteOK with default headers values +func NewPcloudTasksDeleteOK() *PcloudTasksDeleteOK { + return &PcloudTasksDeleteOK{} +} + +/*PcloudTasksDeleteOK handles this case with default header values. + +OK +*/ +type PcloudTasksDeleteOK struct { + Payload models.Object +} + +func (o *PcloudTasksDeleteOK) Error() string { + return fmt.Sprintf("[DELETE /pcloud/v1/tasks/{task_id}][%d] pcloudTasksDeleteOK %+v", 200, o.Payload) +} + +func (o *PcloudTasksDeleteOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudTasksDeleteBadRequest creates a PcloudTasksDeleteBadRequest with default headers values +func NewPcloudTasksDeleteBadRequest() *PcloudTasksDeleteBadRequest { + return &PcloudTasksDeleteBadRequest{} +} + +/*PcloudTasksDeleteBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudTasksDeleteBadRequest struct { + Payload *models.Error +} + +func (o *PcloudTasksDeleteBadRequest) Error() string { + return fmt.Sprintf("[DELETE /pcloud/v1/tasks/{task_id}][%d] pcloudTasksDeleteBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudTasksDeleteBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudTasksDeleteGone creates a PcloudTasksDeleteGone with default headers values +func NewPcloudTasksDeleteGone() *PcloudTasksDeleteGone { + return &PcloudTasksDeleteGone{} +} + +/*PcloudTasksDeleteGone handles this case with default header values. + +Gone +*/ +type PcloudTasksDeleteGone struct { + Payload *models.Error +} + +func (o *PcloudTasksDeleteGone) Error() string { + return fmt.Sprintf("[DELETE /pcloud/v1/tasks/{task_id}][%d] pcloudTasksDeleteGone %+v", 410, o.Payload) +} + +func (o *PcloudTasksDeleteGone) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudTasksDeleteInternalServerError creates a PcloudTasksDeleteInternalServerError with default headers values +func NewPcloudTasksDeleteInternalServerError() *PcloudTasksDeleteInternalServerError { + return &PcloudTasksDeleteInternalServerError{} +} + +/*PcloudTasksDeleteInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudTasksDeleteInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudTasksDeleteInternalServerError) Error() string { + return fmt.Sprintf("[DELETE /pcloud/v1/tasks/{task_id}][%d] pcloudTasksDeleteInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudTasksDeleteInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tasks/pcloud_tasks_get_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tasks/pcloud_tasks_get_parameters.go new file mode 100644 index 00000000000..23eee0bb16f --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tasks/pcloud_tasks_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_tasks + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewPcloudTasksGetParams creates a new PcloudTasksGetParams object +// with the default values initialized. +func NewPcloudTasksGetParams() *PcloudTasksGetParams { + var () + return &PcloudTasksGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudTasksGetParamsWithTimeout creates a new PcloudTasksGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudTasksGetParamsWithTimeout(timeout time.Duration) *PcloudTasksGetParams { + var () + return &PcloudTasksGetParams{ + + timeout: timeout, + } +} + +// NewPcloudTasksGetParamsWithContext creates a new PcloudTasksGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudTasksGetParamsWithContext(ctx context.Context) *PcloudTasksGetParams { + var () + return &PcloudTasksGetParams{ + + Context: ctx, + } +} + +// NewPcloudTasksGetParamsWithHTTPClient creates a new PcloudTasksGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudTasksGetParamsWithHTTPClient(client *http.Client) *PcloudTasksGetParams { + var () + return &PcloudTasksGetParams{ + HTTPClient: client, + } +} + +/*PcloudTasksGetParams contains all the parameters to send to the API endpoint +for the pcloud tasks get operation typically these are written to a http.Request +*/ +type PcloudTasksGetParams struct { + + /*TaskID + PCloud Task ID + + */ + TaskID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud tasks get params +func (o *PcloudTasksGetParams) WithTimeout(timeout time.Duration) *PcloudTasksGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud tasks get params +func (o *PcloudTasksGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud tasks get params +func (o *PcloudTasksGetParams) WithContext(ctx context.Context) *PcloudTasksGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud tasks get params +func (o *PcloudTasksGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud tasks get params +func (o *PcloudTasksGetParams) WithHTTPClient(client *http.Client) *PcloudTasksGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud tasks get params +func (o *PcloudTasksGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithTaskID adds the taskID to the pcloud tasks get params +func (o *PcloudTasksGetParams) WithTaskID(taskID string) *PcloudTasksGetParams { + o.SetTaskID(taskID) + return o +} + +// SetTaskID adds the taskId to the pcloud tasks get params +func (o *PcloudTasksGetParams) SetTaskID(taskID string) { + o.TaskID = taskID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudTasksGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param task_id + if err := r.SetPathParam("task_id", o.TaskID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tasks/pcloud_tasks_get_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tasks/pcloud_tasks_get_responses.go new file mode 100644 index 00000000000..f1312b393c6 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tasks/pcloud_tasks_get_responses.go @@ -0,0 +1,175 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_tasks + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudTasksGetReader is a Reader for the PcloudTasksGet structure. +type PcloudTasksGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudTasksGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudTasksGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudTasksGetBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 404: + result := NewPcloudTasksGetNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudTasksGetInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudTasksGetOK creates a PcloudTasksGetOK with default headers values +func NewPcloudTasksGetOK() *PcloudTasksGetOK { + return &PcloudTasksGetOK{} +} + +/*PcloudTasksGetOK handles this case with default header values. + +OK +*/ +type PcloudTasksGetOK struct { + Payload *models.Task +} + +func (o *PcloudTasksGetOK) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/tasks/{task_id}][%d] pcloudTasksGetOK %+v", 200, o.Payload) +} + +func (o *PcloudTasksGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Task) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudTasksGetBadRequest creates a PcloudTasksGetBadRequest with default headers values +func NewPcloudTasksGetBadRequest() *PcloudTasksGetBadRequest { + return &PcloudTasksGetBadRequest{} +} + +/*PcloudTasksGetBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudTasksGetBadRequest struct { + Payload *models.Error +} + +func (o *PcloudTasksGetBadRequest) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/tasks/{task_id}][%d] pcloudTasksGetBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudTasksGetBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudTasksGetNotFound creates a PcloudTasksGetNotFound with default headers values +func NewPcloudTasksGetNotFound() *PcloudTasksGetNotFound { + return &PcloudTasksGetNotFound{} +} + +/*PcloudTasksGetNotFound handles this case with default header values. + +Not Found +*/ +type PcloudTasksGetNotFound struct { + Payload *models.Error +} + +func (o *PcloudTasksGetNotFound) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/tasks/{task_id}][%d] pcloudTasksGetNotFound %+v", 404, o.Payload) +} + +func (o *PcloudTasksGetNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudTasksGetInternalServerError creates a PcloudTasksGetInternalServerError with default headers values +func NewPcloudTasksGetInternalServerError() *PcloudTasksGetInternalServerError { + return &PcloudTasksGetInternalServerError{} +} + +/*PcloudTasksGetInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudTasksGetInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudTasksGetInternalServerError) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/tasks/{task_id}][%d] pcloudTasksGetInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudTasksGetInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tenants/p_cloud_tenants_client.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tenants/p_cloud_tenants_client.go new file mode 100644 index 00000000000..cdb69dcd822 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tenants/p_cloud_tenants_client.go @@ -0,0 +1,88 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_tenants + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" +) + +// New creates a new p cloud tenants API client. +func New(transport runtime.ClientTransport, formats strfmt.Registry) *Client { + return &Client{transport: transport, formats: formats} +} + +/* +Client for p cloud tenants API +*/ +type Client struct { + transport runtime.ClientTransport + formats strfmt.Registry +} + +/* +PcloudTenantsGet gets a tenant s current state information +*/ +func (a *Client) PcloudTenantsGet(params *PcloudTenantsGetParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudTenantsGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudTenantsGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.tenants.get", + Method: "GET", + PathPattern: "/pcloud/v1/tenants/{tenant_id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudTenantsGetReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudTenantsGetOK), nil + +} + +/* +PcloudTenantsPut updates a tenant +*/ +func (a *Client) PcloudTenantsPut(params *PcloudTenantsPutParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudTenantsPutOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudTenantsPutParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.tenants.put", + Method: "PUT", + PathPattern: "/pcloud/v1/tenants/{tenant_id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudTenantsPutReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudTenantsPutOK), nil + +} + +// SetTransport changes the transport on the client +func (a *Client) SetTransport(transport runtime.ClientTransport) { + a.transport = transport +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tenants/pcloud_tenants_get_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tenants/pcloud_tenants_get_parameters.go new file mode 100644 index 00000000000..d307eed31bb --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tenants/pcloud_tenants_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_tenants + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewPcloudTenantsGetParams creates a new PcloudTenantsGetParams object +// with the default values initialized. +func NewPcloudTenantsGetParams() *PcloudTenantsGetParams { + var () + return &PcloudTenantsGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudTenantsGetParamsWithTimeout creates a new PcloudTenantsGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudTenantsGetParamsWithTimeout(timeout time.Duration) *PcloudTenantsGetParams { + var () + return &PcloudTenantsGetParams{ + + timeout: timeout, + } +} + +// NewPcloudTenantsGetParamsWithContext creates a new PcloudTenantsGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudTenantsGetParamsWithContext(ctx context.Context) *PcloudTenantsGetParams { + var () + return &PcloudTenantsGetParams{ + + Context: ctx, + } +} + +// NewPcloudTenantsGetParamsWithHTTPClient creates a new PcloudTenantsGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudTenantsGetParamsWithHTTPClient(client *http.Client) *PcloudTenantsGetParams { + var () + return &PcloudTenantsGetParams{ + HTTPClient: client, + } +} + +/*PcloudTenantsGetParams contains all the parameters to send to the API endpoint +for the pcloud tenants get operation typically these are written to a http.Request +*/ +type PcloudTenantsGetParams struct { + + /*TenantID + Tenant ID of a pcloud tenant + + */ + TenantID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud tenants get params +func (o *PcloudTenantsGetParams) WithTimeout(timeout time.Duration) *PcloudTenantsGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud tenants get params +func (o *PcloudTenantsGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud tenants get params +func (o *PcloudTenantsGetParams) WithContext(ctx context.Context) *PcloudTenantsGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud tenants get params +func (o *PcloudTenantsGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud tenants get params +func (o *PcloudTenantsGetParams) WithHTTPClient(client *http.Client) *PcloudTenantsGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud tenants get params +func (o *PcloudTenantsGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithTenantID adds the tenantID to the pcloud tenants get params +func (o *PcloudTenantsGetParams) WithTenantID(tenantID string) *PcloudTenantsGetParams { + o.SetTenantID(tenantID) + return o +} + +// SetTenantID adds the tenantId to the pcloud tenants get params +func (o *PcloudTenantsGetParams) SetTenantID(tenantID string) { + o.TenantID = tenantID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudTenantsGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param tenant_id + if err := r.SetPathParam("tenant_id", o.TenantID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tenants/pcloud_tenants_get_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tenants/pcloud_tenants_get_responses.go new file mode 100644 index 00000000000..04ab304d1c3 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tenants/pcloud_tenants_get_responses.go @@ -0,0 +1,175 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_tenants + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudTenantsGetReader is a Reader for the PcloudTenantsGet structure. +type PcloudTenantsGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudTenantsGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudTenantsGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudTenantsGetBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 404: + result := NewPcloudTenantsGetNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudTenantsGetInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudTenantsGetOK creates a PcloudTenantsGetOK with default headers values +func NewPcloudTenantsGetOK() *PcloudTenantsGetOK { + return &PcloudTenantsGetOK{} +} + +/*PcloudTenantsGetOK handles this case with default header values. + +OK +*/ +type PcloudTenantsGetOK struct { + Payload *models.Tenant +} + +func (o *PcloudTenantsGetOK) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/tenants/{tenant_id}][%d] pcloudTenantsGetOK %+v", 200, o.Payload) +} + +func (o *PcloudTenantsGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Tenant) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudTenantsGetBadRequest creates a PcloudTenantsGetBadRequest with default headers values +func NewPcloudTenantsGetBadRequest() *PcloudTenantsGetBadRequest { + return &PcloudTenantsGetBadRequest{} +} + +/*PcloudTenantsGetBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudTenantsGetBadRequest struct { + Payload *models.Error +} + +func (o *PcloudTenantsGetBadRequest) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/tenants/{tenant_id}][%d] pcloudTenantsGetBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudTenantsGetBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudTenantsGetNotFound creates a PcloudTenantsGetNotFound with default headers values +func NewPcloudTenantsGetNotFound() *PcloudTenantsGetNotFound { + return &PcloudTenantsGetNotFound{} +} + +/*PcloudTenantsGetNotFound handles this case with default header values. + +Not Found +*/ +type PcloudTenantsGetNotFound struct { + Payload *models.Error +} + +func (o *PcloudTenantsGetNotFound) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/tenants/{tenant_id}][%d] pcloudTenantsGetNotFound %+v", 404, o.Payload) +} + +func (o *PcloudTenantsGetNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudTenantsGetInternalServerError creates a PcloudTenantsGetInternalServerError with default headers values +func NewPcloudTenantsGetInternalServerError() *PcloudTenantsGetInternalServerError { + return &PcloudTenantsGetInternalServerError{} +} + +/*PcloudTenantsGetInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudTenantsGetInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudTenantsGetInternalServerError) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/tenants/{tenant_id}][%d] pcloudTenantsGetInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudTenantsGetInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tenants/pcloud_tenants_put_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tenants/pcloud_tenants_put_parameters.go new file mode 100644 index 00000000000..f481bbbcd46 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tenants/pcloud_tenants_put_parameters.go @@ -0,0 +1,160 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_tenants + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// NewPcloudTenantsPutParams creates a new PcloudTenantsPutParams object +// with the default values initialized. +func NewPcloudTenantsPutParams() *PcloudTenantsPutParams { + var () + return &PcloudTenantsPutParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudTenantsPutParamsWithTimeout creates a new PcloudTenantsPutParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudTenantsPutParamsWithTimeout(timeout time.Duration) *PcloudTenantsPutParams { + var () + return &PcloudTenantsPutParams{ + + timeout: timeout, + } +} + +// NewPcloudTenantsPutParamsWithContext creates a new PcloudTenantsPutParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudTenantsPutParamsWithContext(ctx context.Context) *PcloudTenantsPutParams { + var () + return &PcloudTenantsPutParams{ + + Context: ctx, + } +} + +// NewPcloudTenantsPutParamsWithHTTPClient creates a new PcloudTenantsPutParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudTenantsPutParamsWithHTTPClient(client *http.Client) *PcloudTenantsPutParams { + var () + return &PcloudTenantsPutParams{ + HTTPClient: client, + } +} + +/*PcloudTenantsPutParams contains all the parameters to send to the API endpoint +for the pcloud tenants put operation typically these are written to a http.Request +*/ +type PcloudTenantsPutParams struct { + + /*Body + Parameters for updating a Tenant + + */ + Body *models.TenantUpdate + /*TenantID + Tenant ID of a pcloud tenant + + */ + TenantID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud tenants put params +func (o *PcloudTenantsPutParams) WithTimeout(timeout time.Duration) *PcloudTenantsPutParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud tenants put params +func (o *PcloudTenantsPutParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud tenants put params +func (o *PcloudTenantsPutParams) WithContext(ctx context.Context) *PcloudTenantsPutParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud tenants put params +func (o *PcloudTenantsPutParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud tenants put params +func (o *PcloudTenantsPutParams) WithHTTPClient(client *http.Client) *PcloudTenantsPutParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud tenants put params +func (o *PcloudTenantsPutParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the pcloud tenants put params +func (o *PcloudTenantsPutParams) WithBody(body *models.TenantUpdate) *PcloudTenantsPutParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the pcloud tenants put params +func (o *PcloudTenantsPutParams) SetBody(body *models.TenantUpdate) { + o.Body = body +} + +// WithTenantID adds the tenantID to the pcloud tenants put params +func (o *PcloudTenantsPutParams) WithTenantID(tenantID string) *PcloudTenantsPutParams { + o.SetTenantID(tenantID) + return o +} + +// SetTenantID adds the tenantId to the pcloud tenants put params +func (o *PcloudTenantsPutParams) SetTenantID(tenantID string) { + o.TenantID = tenantID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudTenantsPutParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + // path param tenant_id + if err := r.SetPathParam("tenant_id", o.TenantID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tenants/pcloud_tenants_put_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tenants/pcloud_tenants_put_responses.go new file mode 100644 index 00000000000..8fdd1ba1630 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tenants/pcloud_tenants_put_responses.go @@ -0,0 +1,175 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_tenants + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudTenantsPutReader is a Reader for the PcloudTenantsPut structure. +type PcloudTenantsPutReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudTenantsPutReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudTenantsPutOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudTenantsPutBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 422: + result := NewPcloudTenantsPutUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudTenantsPutInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudTenantsPutOK creates a PcloudTenantsPutOK with default headers values +func NewPcloudTenantsPutOK() *PcloudTenantsPutOK { + return &PcloudTenantsPutOK{} +} + +/*PcloudTenantsPutOK handles this case with default header values. + +OK +*/ +type PcloudTenantsPutOK struct { + Payload *models.Tenant +} + +func (o *PcloudTenantsPutOK) Error() string { + return fmt.Sprintf("[PUT /pcloud/v1/tenants/{tenant_id}][%d] pcloudTenantsPutOK %+v", 200, o.Payload) +} + +func (o *PcloudTenantsPutOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Tenant) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudTenantsPutBadRequest creates a PcloudTenantsPutBadRequest with default headers values +func NewPcloudTenantsPutBadRequest() *PcloudTenantsPutBadRequest { + return &PcloudTenantsPutBadRequest{} +} + +/*PcloudTenantsPutBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudTenantsPutBadRequest struct { + Payload *models.Error +} + +func (o *PcloudTenantsPutBadRequest) Error() string { + return fmt.Sprintf("[PUT /pcloud/v1/tenants/{tenant_id}][%d] pcloudTenantsPutBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudTenantsPutBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudTenantsPutUnprocessableEntity creates a PcloudTenantsPutUnprocessableEntity with default headers values +func NewPcloudTenantsPutUnprocessableEntity() *PcloudTenantsPutUnprocessableEntity { + return &PcloudTenantsPutUnprocessableEntity{} +} + +/*PcloudTenantsPutUnprocessableEntity handles this case with default header values. + +Unprocessable Entity +*/ +type PcloudTenantsPutUnprocessableEntity struct { + Payload *models.Error +} + +func (o *PcloudTenantsPutUnprocessableEntity) Error() string { + return fmt.Sprintf("[PUT /pcloud/v1/tenants/{tenant_id}][%d] pcloudTenantsPutUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *PcloudTenantsPutUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudTenantsPutInternalServerError creates a PcloudTenantsPutInternalServerError with default headers values +func NewPcloudTenantsPutInternalServerError() *PcloudTenantsPutInternalServerError { + return &PcloudTenantsPutInternalServerError{} +} + +/*PcloudTenantsPutInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudTenantsPutInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudTenantsPutInternalServerError) Error() string { + return fmt.Sprintf("[PUT /pcloud/v1/tenants/{tenant_id}][%d] pcloudTenantsPutInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudTenantsPutInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tenants_ssh_keys/p_cloud_tenants_ssh_keys_client.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tenants_ssh_keys/p_cloud_tenants_ssh_keys_client.go new file mode 100644 index 00000000000..4f74cbbe319 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tenants_ssh_keys/p_cloud_tenants_ssh_keys_client.go @@ -0,0 +1,181 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_tenants_ssh_keys + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" +) + +// New creates a new p cloud tenants ssh keys API client. +func New(transport runtime.ClientTransport, formats strfmt.Registry) *Client { + return &Client{transport: transport, formats: formats} +} + +/* +Client for p cloud tenants ssh keys API +*/ +type Client struct { + transport runtime.ClientTransport + formats strfmt.Registry +} + +/* +PcloudTenantsSshkeysDelete deletes a tenant s SSH key +*/ +func (a *Client) PcloudTenantsSshkeysDelete(params *PcloudTenantsSshkeysDeleteParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudTenantsSshkeysDeleteOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudTenantsSshkeysDeleteParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.tenants.sshkeys.delete", + Method: "DELETE", + PathPattern: "/pcloud/v1/tenants/{tenant_id}/sshkeys/{sshkey_name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudTenantsSshkeysDeleteReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudTenantsSshkeysDeleteOK), nil + +} + +/* +PcloudTenantsSshkeysGet gets a tenant s SSH key by name +*/ +func (a *Client) PcloudTenantsSshkeysGet(params *PcloudTenantsSshkeysGetParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudTenantsSshkeysGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudTenantsSshkeysGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.tenants.sshkeys.get", + Method: "GET", + PathPattern: "/pcloud/v1/tenants/{tenant_id}/sshkeys/{sshkey_name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudTenantsSshkeysGetReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudTenantsSshkeysGetOK), nil + +} + +/* +PcloudTenantsSshkeysGetall lists a tenant s SSH keys +*/ +func (a *Client) PcloudTenantsSshkeysGetall(params *PcloudTenantsSshkeysGetallParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudTenantsSshkeysGetallOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudTenantsSshkeysGetallParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.tenants.sshkeys.getall", + Method: "GET", + PathPattern: "/pcloud/v1/tenants/{tenant_id}/sshkeys", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudTenantsSshkeysGetallReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudTenantsSshkeysGetallOK), nil + +} + +/* +PcloudTenantsSshkeysPost adds a new SSH key to the tenant +*/ +func (a *Client) PcloudTenantsSshkeysPost(params *PcloudTenantsSshkeysPostParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudTenantsSshkeysPostOK, *PcloudTenantsSshkeysPostCreated, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudTenantsSshkeysPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.tenants.sshkeys.post", + Method: "POST", + PathPattern: "/pcloud/v1/tenants/{tenant_id}/sshkeys", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudTenantsSshkeysPostReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, nil, err + } + switch value := result.(type) { + case *PcloudTenantsSshkeysPostOK: + return value, nil, nil + case *PcloudTenantsSshkeysPostCreated: + return nil, value, nil + } + return nil, nil, nil + +} + +/* +PcloudTenantsSshkeysPut updates an SSH key +*/ +func (a *Client) PcloudTenantsSshkeysPut(params *PcloudTenantsSshkeysPutParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudTenantsSshkeysPutOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudTenantsSshkeysPutParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.tenants.sshkeys.put", + Method: "PUT", + PathPattern: "/pcloud/v1/tenants/{tenant_id}/sshkeys/{sshkey_name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudTenantsSshkeysPutReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudTenantsSshkeysPutOK), nil + +} + +// SetTransport changes the transport on the client +func (a *Client) SetTransport(transport runtime.ClientTransport) { + a.transport = transport +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tenants_ssh_keys/pcloud_tenants_sshkeys_delete_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tenants_ssh_keys/pcloud_tenants_sshkeys_delete_parameters.go new file mode 100644 index 00000000000..dce57f3816b --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tenants_ssh_keys/pcloud_tenants_sshkeys_delete_parameters.go @@ -0,0 +1,157 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_tenants_ssh_keys + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewPcloudTenantsSshkeysDeleteParams creates a new PcloudTenantsSshkeysDeleteParams object +// with the default values initialized. +func NewPcloudTenantsSshkeysDeleteParams() *PcloudTenantsSshkeysDeleteParams { + var () + return &PcloudTenantsSshkeysDeleteParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudTenantsSshkeysDeleteParamsWithTimeout creates a new PcloudTenantsSshkeysDeleteParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudTenantsSshkeysDeleteParamsWithTimeout(timeout time.Duration) *PcloudTenantsSshkeysDeleteParams { + var () + return &PcloudTenantsSshkeysDeleteParams{ + + timeout: timeout, + } +} + +// NewPcloudTenantsSshkeysDeleteParamsWithContext creates a new PcloudTenantsSshkeysDeleteParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudTenantsSshkeysDeleteParamsWithContext(ctx context.Context) *PcloudTenantsSshkeysDeleteParams { + var () + return &PcloudTenantsSshkeysDeleteParams{ + + Context: ctx, + } +} + +// NewPcloudTenantsSshkeysDeleteParamsWithHTTPClient creates a new PcloudTenantsSshkeysDeleteParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudTenantsSshkeysDeleteParamsWithHTTPClient(client *http.Client) *PcloudTenantsSshkeysDeleteParams { + var () + return &PcloudTenantsSshkeysDeleteParams{ + HTTPClient: client, + } +} + +/*PcloudTenantsSshkeysDeleteParams contains all the parameters to send to the API endpoint +for the pcloud tenants sshkeys delete operation typically these are written to a http.Request +*/ +type PcloudTenantsSshkeysDeleteParams struct { + + /*SshkeyName + SSH key name for a pcloud tenant + + */ + SshkeyName string + /*TenantID + Tenant ID of a pcloud tenant + + */ + TenantID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud tenants sshkeys delete params +func (o *PcloudTenantsSshkeysDeleteParams) WithTimeout(timeout time.Duration) *PcloudTenantsSshkeysDeleteParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud tenants sshkeys delete params +func (o *PcloudTenantsSshkeysDeleteParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud tenants sshkeys delete params +func (o *PcloudTenantsSshkeysDeleteParams) WithContext(ctx context.Context) *PcloudTenantsSshkeysDeleteParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud tenants sshkeys delete params +func (o *PcloudTenantsSshkeysDeleteParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud tenants sshkeys delete params +func (o *PcloudTenantsSshkeysDeleteParams) WithHTTPClient(client *http.Client) *PcloudTenantsSshkeysDeleteParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud tenants sshkeys delete params +func (o *PcloudTenantsSshkeysDeleteParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithSshkeyName adds the sshkeyName to the pcloud tenants sshkeys delete params +func (o *PcloudTenantsSshkeysDeleteParams) WithSshkeyName(sshkeyName string) *PcloudTenantsSshkeysDeleteParams { + o.SetSshkeyName(sshkeyName) + return o +} + +// SetSshkeyName adds the sshkeyName to the pcloud tenants sshkeys delete params +func (o *PcloudTenantsSshkeysDeleteParams) SetSshkeyName(sshkeyName string) { + o.SshkeyName = sshkeyName +} + +// WithTenantID adds the tenantID to the pcloud tenants sshkeys delete params +func (o *PcloudTenantsSshkeysDeleteParams) WithTenantID(tenantID string) *PcloudTenantsSshkeysDeleteParams { + o.SetTenantID(tenantID) + return o +} + +// SetTenantID adds the tenantId to the pcloud tenants sshkeys delete params +func (o *PcloudTenantsSshkeysDeleteParams) SetTenantID(tenantID string) { + o.TenantID = tenantID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudTenantsSshkeysDeleteParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param sshkey_name + if err := r.SetPathParam("sshkey_name", o.SshkeyName); err != nil { + return err + } + + // path param tenant_id + if err := r.SetPathParam("tenant_id", o.TenantID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tenants_ssh_keys/pcloud_tenants_sshkeys_delete_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tenants_ssh_keys/pcloud_tenants_sshkeys_delete_responses.go new file mode 100644 index 00000000000..49583089700 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tenants_ssh_keys/pcloud_tenants_sshkeys_delete_responses.go @@ -0,0 +1,173 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_tenants_ssh_keys + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudTenantsSshkeysDeleteReader is a Reader for the PcloudTenantsSshkeysDelete structure. +type PcloudTenantsSshkeysDeleteReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudTenantsSshkeysDeleteReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudTenantsSshkeysDeleteOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudTenantsSshkeysDeleteBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 410: + result := NewPcloudTenantsSshkeysDeleteGone() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudTenantsSshkeysDeleteInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudTenantsSshkeysDeleteOK creates a PcloudTenantsSshkeysDeleteOK with default headers values +func NewPcloudTenantsSshkeysDeleteOK() *PcloudTenantsSshkeysDeleteOK { + return &PcloudTenantsSshkeysDeleteOK{} +} + +/*PcloudTenantsSshkeysDeleteOK handles this case with default header values. + +OK +*/ +type PcloudTenantsSshkeysDeleteOK struct { + Payload models.Object +} + +func (o *PcloudTenantsSshkeysDeleteOK) Error() string { + return fmt.Sprintf("[DELETE /pcloud/v1/tenants/{tenant_id}/sshkeys/{sshkey_name}][%d] pcloudTenantsSshkeysDeleteOK %+v", 200, o.Payload) +} + +func (o *PcloudTenantsSshkeysDeleteOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudTenantsSshkeysDeleteBadRequest creates a PcloudTenantsSshkeysDeleteBadRequest with default headers values +func NewPcloudTenantsSshkeysDeleteBadRequest() *PcloudTenantsSshkeysDeleteBadRequest { + return &PcloudTenantsSshkeysDeleteBadRequest{} +} + +/*PcloudTenantsSshkeysDeleteBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudTenantsSshkeysDeleteBadRequest struct { + Payload *models.Error +} + +func (o *PcloudTenantsSshkeysDeleteBadRequest) Error() string { + return fmt.Sprintf("[DELETE /pcloud/v1/tenants/{tenant_id}/sshkeys/{sshkey_name}][%d] pcloudTenantsSshkeysDeleteBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudTenantsSshkeysDeleteBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudTenantsSshkeysDeleteGone creates a PcloudTenantsSshkeysDeleteGone with default headers values +func NewPcloudTenantsSshkeysDeleteGone() *PcloudTenantsSshkeysDeleteGone { + return &PcloudTenantsSshkeysDeleteGone{} +} + +/*PcloudTenantsSshkeysDeleteGone handles this case with default header values. + +Gone +*/ +type PcloudTenantsSshkeysDeleteGone struct { + Payload *models.Error +} + +func (o *PcloudTenantsSshkeysDeleteGone) Error() string { + return fmt.Sprintf("[DELETE /pcloud/v1/tenants/{tenant_id}/sshkeys/{sshkey_name}][%d] pcloudTenantsSshkeysDeleteGone %+v", 410, o.Payload) +} + +func (o *PcloudTenantsSshkeysDeleteGone) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudTenantsSshkeysDeleteInternalServerError creates a PcloudTenantsSshkeysDeleteInternalServerError with default headers values +func NewPcloudTenantsSshkeysDeleteInternalServerError() *PcloudTenantsSshkeysDeleteInternalServerError { + return &PcloudTenantsSshkeysDeleteInternalServerError{} +} + +/*PcloudTenantsSshkeysDeleteInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudTenantsSshkeysDeleteInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudTenantsSshkeysDeleteInternalServerError) Error() string { + return fmt.Sprintf("[DELETE /pcloud/v1/tenants/{tenant_id}/sshkeys/{sshkey_name}][%d] pcloudTenantsSshkeysDeleteInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudTenantsSshkeysDeleteInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tenants_ssh_keys/pcloud_tenants_sshkeys_get_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tenants_ssh_keys/pcloud_tenants_sshkeys_get_parameters.go new file mode 100644 index 00000000000..052ac742c1f --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tenants_ssh_keys/pcloud_tenants_sshkeys_get_parameters.go @@ -0,0 +1,157 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_tenants_ssh_keys + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewPcloudTenantsSshkeysGetParams creates a new PcloudTenantsSshkeysGetParams object +// with the default values initialized. +func NewPcloudTenantsSshkeysGetParams() *PcloudTenantsSshkeysGetParams { + var () + return &PcloudTenantsSshkeysGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudTenantsSshkeysGetParamsWithTimeout creates a new PcloudTenantsSshkeysGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudTenantsSshkeysGetParamsWithTimeout(timeout time.Duration) *PcloudTenantsSshkeysGetParams { + var () + return &PcloudTenantsSshkeysGetParams{ + + timeout: timeout, + } +} + +// NewPcloudTenantsSshkeysGetParamsWithContext creates a new PcloudTenantsSshkeysGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudTenantsSshkeysGetParamsWithContext(ctx context.Context) *PcloudTenantsSshkeysGetParams { + var () + return &PcloudTenantsSshkeysGetParams{ + + Context: ctx, + } +} + +// NewPcloudTenantsSshkeysGetParamsWithHTTPClient creates a new PcloudTenantsSshkeysGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudTenantsSshkeysGetParamsWithHTTPClient(client *http.Client) *PcloudTenantsSshkeysGetParams { + var () + return &PcloudTenantsSshkeysGetParams{ + HTTPClient: client, + } +} + +/*PcloudTenantsSshkeysGetParams contains all the parameters to send to the API endpoint +for the pcloud tenants sshkeys get operation typically these are written to a http.Request +*/ +type PcloudTenantsSshkeysGetParams struct { + + /*SshkeyName + SSH key name for a pcloud tenant + + */ + SshkeyName string + /*TenantID + Tenant ID of a pcloud tenant + + */ + TenantID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud tenants sshkeys get params +func (o *PcloudTenantsSshkeysGetParams) WithTimeout(timeout time.Duration) *PcloudTenantsSshkeysGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud tenants sshkeys get params +func (o *PcloudTenantsSshkeysGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud tenants sshkeys get params +func (o *PcloudTenantsSshkeysGetParams) WithContext(ctx context.Context) *PcloudTenantsSshkeysGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud tenants sshkeys get params +func (o *PcloudTenantsSshkeysGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud tenants sshkeys get params +func (o *PcloudTenantsSshkeysGetParams) WithHTTPClient(client *http.Client) *PcloudTenantsSshkeysGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud tenants sshkeys get params +func (o *PcloudTenantsSshkeysGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithSshkeyName adds the sshkeyName to the pcloud tenants sshkeys get params +func (o *PcloudTenantsSshkeysGetParams) WithSshkeyName(sshkeyName string) *PcloudTenantsSshkeysGetParams { + o.SetSshkeyName(sshkeyName) + return o +} + +// SetSshkeyName adds the sshkeyName to the pcloud tenants sshkeys get params +func (o *PcloudTenantsSshkeysGetParams) SetSshkeyName(sshkeyName string) { + o.SshkeyName = sshkeyName +} + +// WithTenantID adds the tenantID to the pcloud tenants sshkeys get params +func (o *PcloudTenantsSshkeysGetParams) WithTenantID(tenantID string) *PcloudTenantsSshkeysGetParams { + o.SetTenantID(tenantID) + return o +} + +// SetTenantID adds the tenantId to the pcloud tenants sshkeys get params +func (o *PcloudTenantsSshkeysGetParams) SetTenantID(tenantID string) { + o.TenantID = tenantID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudTenantsSshkeysGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param sshkey_name + if err := r.SetPathParam("sshkey_name", o.SshkeyName); err != nil { + return err + } + + // path param tenant_id + if err := r.SetPathParam("tenant_id", o.TenantID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tenants_ssh_keys/pcloud_tenants_sshkeys_get_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tenants_ssh_keys/pcloud_tenants_sshkeys_get_responses.go new file mode 100644 index 00000000000..f49279efa7a --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tenants_ssh_keys/pcloud_tenants_sshkeys_get_responses.go @@ -0,0 +1,175 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_tenants_ssh_keys + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudTenantsSshkeysGetReader is a Reader for the PcloudTenantsSshkeysGet structure. +type PcloudTenantsSshkeysGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudTenantsSshkeysGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudTenantsSshkeysGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudTenantsSshkeysGetBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 404: + result := NewPcloudTenantsSshkeysGetNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudTenantsSshkeysGetInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudTenantsSshkeysGetOK creates a PcloudTenantsSshkeysGetOK with default headers values +func NewPcloudTenantsSshkeysGetOK() *PcloudTenantsSshkeysGetOK { + return &PcloudTenantsSshkeysGetOK{} +} + +/*PcloudTenantsSshkeysGetOK handles this case with default header values. + +OK +*/ +type PcloudTenantsSshkeysGetOK struct { + Payload *models.SSHKey +} + +func (o *PcloudTenantsSshkeysGetOK) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/tenants/{tenant_id}/sshkeys/{sshkey_name}][%d] pcloudTenantsSshkeysGetOK %+v", 200, o.Payload) +} + +func (o *PcloudTenantsSshkeysGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.SSHKey) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudTenantsSshkeysGetBadRequest creates a PcloudTenantsSshkeysGetBadRequest with default headers values +func NewPcloudTenantsSshkeysGetBadRequest() *PcloudTenantsSshkeysGetBadRequest { + return &PcloudTenantsSshkeysGetBadRequest{} +} + +/*PcloudTenantsSshkeysGetBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudTenantsSshkeysGetBadRequest struct { + Payload *models.Error +} + +func (o *PcloudTenantsSshkeysGetBadRequest) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/tenants/{tenant_id}/sshkeys/{sshkey_name}][%d] pcloudTenantsSshkeysGetBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudTenantsSshkeysGetBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudTenantsSshkeysGetNotFound creates a PcloudTenantsSshkeysGetNotFound with default headers values +func NewPcloudTenantsSshkeysGetNotFound() *PcloudTenantsSshkeysGetNotFound { + return &PcloudTenantsSshkeysGetNotFound{} +} + +/*PcloudTenantsSshkeysGetNotFound handles this case with default header values. + +Not Found +*/ +type PcloudTenantsSshkeysGetNotFound struct { + Payload *models.Error +} + +func (o *PcloudTenantsSshkeysGetNotFound) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/tenants/{tenant_id}/sshkeys/{sshkey_name}][%d] pcloudTenantsSshkeysGetNotFound %+v", 404, o.Payload) +} + +func (o *PcloudTenantsSshkeysGetNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudTenantsSshkeysGetInternalServerError creates a PcloudTenantsSshkeysGetInternalServerError with default headers values +func NewPcloudTenantsSshkeysGetInternalServerError() *PcloudTenantsSshkeysGetInternalServerError { + return &PcloudTenantsSshkeysGetInternalServerError{} +} + +/*PcloudTenantsSshkeysGetInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudTenantsSshkeysGetInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudTenantsSshkeysGetInternalServerError) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/tenants/{tenant_id}/sshkeys/{sshkey_name}][%d] pcloudTenantsSshkeysGetInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudTenantsSshkeysGetInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tenants_ssh_keys/pcloud_tenants_sshkeys_getall_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tenants_ssh_keys/pcloud_tenants_sshkeys_getall_parameters.go new file mode 100644 index 00000000000..66bbb51fe6f --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tenants_ssh_keys/pcloud_tenants_sshkeys_getall_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_tenants_ssh_keys + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewPcloudTenantsSshkeysGetallParams creates a new PcloudTenantsSshkeysGetallParams object +// with the default values initialized. +func NewPcloudTenantsSshkeysGetallParams() *PcloudTenantsSshkeysGetallParams { + var () + return &PcloudTenantsSshkeysGetallParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudTenantsSshkeysGetallParamsWithTimeout creates a new PcloudTenantsSshkeysGetallParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudTenantsSshkeysGetallParamsWithTimeout(timeout time.Duration) *PcloudTenantsSshkeysGetallParams { + var () + return &PcloudTenantsSshkeysGetallParams{ + + timeout: timeout, + } +} + +// NewPcloudTenantsSshkeysGetallParamsWithContext creates a new PcloudTenantsSshkeysGetallParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudTenantsSshkeysGetallParamsWithContext(ctx context.Context) *PcloudTenantsSshkeysGetallParams { + var () + return &PcloudTenantsSshkeysGetallParams{ + + Context: ctx, + } +} + +// NewPcloudTenantsSshkeysGetallParamsWithHTTPClient creates a new PcloudTenantsSshkeysGetallParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudTenantsSshkeysGetallParamsWithHTTPClient(client *http.Client) *PcloudTenantsSshkeysGetallParams { + var () + return &PcloudTenantsSshkeysGetallParams{ + HTTPClient: client, + } +} + +/*PcloudTenantsSshkeysGetallParams contains all the parameters to send to the API endpoint +for the pcloud tenants sshkeys getall operation typically these are written to a http.Request +*/ +type PcloudTenantsSshkeysGetallParams struct { + + /*TenantID + Tenant ID of a pcloud tenant + + */ + TenantID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud tenants sshkeys getall params +func (o *PcloudTenantsSshkeysGetallParams) WithTimeout(timeout time.Duration) *PcloudTenantsSshkeysGetallParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud tenants sshkeys getall params +func (o *PcloudTenantsSshkeysGetallParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud tenants sshkeys getall params +func (o *PcloudTenantsSshkeysGetallParams) WithContext(ctx context.Context) *PcloudTenantsSshkeysGetallParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud tenants sshkeys getall params +func (o *PcloudTenantsSshkeysGetallParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud tenants sshkeys getall params +func (o *PcloudTenantsSshkeysGetallParams) WithHTTPClient(client *http.Client) *PcloudTenantsSshkeysGetallParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud tenants sshkeys getall params +func (o *PcloudTenantsSshkeysGetallParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithTenantID adds the tenantID to the pcloud tenants sshkeys getall params +func (o *PcloudTenantsSshkeysGetallParams) WithTenantID(tenantID string) *PcloudTenantsSshkeysGetallParams { + o.SetTenantID(tenantID) + return o +} + +// SetTenantID adds the tenantId to the pcloud tenants sshkeys getall params +func (o *PcloudTenantsSshkeysGetallParams) SetTenantID(tenantID string) { + o.TenantID = tenantID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudTenantsSshkeysGetallParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param tenant_id + if err := r.SetPathParam("tenant_id", o.TenantID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tenants_ssh_keys/pcloud_tenants_sshkeys_getall_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tenants_ssh_keys/pcloud_tenants_sshkeys_getall_responses.go new file mode 100644 index 00000000000..06b31465164 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tenants_ssh_keys/pcloud_tenants_sshkeys_getall_responses.go @@ -0,0 +1,175 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_tenants_ssh_keys + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudTenantsSshkeysGetallReader is a Reader for the PcloudTenantsSshkeysGetall structure. +type PcloudTenantsSshkeysGetallReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudTenantsSshkeysGetallReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudTenantsSshkeysGetallOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudTenantsSshkeysGetallBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 404: + result := NewPcloudTenantsSshkeysGetallNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudTenantsSshkeysGetallInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudTenantsSshkeysGetallOK creates a PcloudTenantsSshkeysGetallOK with default headers values +func NewPcloudTenantsSshkeysGetallOK() *PcloudTenantsSshkeysGetallOK { + return &PcloudTenantsSshkeysGetallOK{} +} + +/*PcloudTenantsSshkeysGetallOK handles this case with default header values. + +OK +*/ +type PcloudTenantsSshkeysGetallOK struct { + Payload *models.SSHKeys +} + +func (o *PcloudTenantsSshkeysGetallOK) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/tenants/{tenant_id}/sshkeys][%d] pcloudTenantsSshkeysGetallOK %+v", 200, o.Payload) +} + +func (o *PcloudTenantsSshkeysGetallOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.SSHKeys) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudTenantsSshkeysGetallBadRequest creates a PcloudTenantsSshkeysGetallBadRequest with default headers values +func NewPcloudTenantsSshkeysGetallBadRequest() *PcloudTenantsSshkeysGetallBadRequest { + return &PcloudTenantsSshkeysGetallBadRequest{} +} + +/*PcloudTenantsSshkeysGetallBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudTenantsSshkeysGetallBadRequest struct { + Payload *models.Error +} + +func (o *PcloudTenantsSshkeysGetallBadRequest) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/tenants/{tenant_id}/sshkeys][%d] pcloudTenantsSshkeysGetallBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudTenantsSshkeysGetallBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudTenantsSshkeysGetallNotFound creates a PcloudTenantsSshkeysGetallNotFound with default headers values +func NewPcloudTenantsSshkeysGetallNotFound() *PcloudTenantsSshkeysGetallNotFound { + return &PcloudTenantsSshkeysGetallNotFound{} +} + +/*PcloudTenantsSshkeysGetallNotFound handles this case with default header values. + +Not Found +*/ +type PcloudTenantsSshkeysGetallNotFound struct { + Payload *models.Error +} + +func (o *PcloudTenantsSshkeysGetallNotFound) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/tenants/{tenant_id}/sshkeys][%d] pcloudTenantsSshkeysGetallNotFound %+v", 404, o.Payload) +} + +func (o *PcloudTenantsSshkeysGetallNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudTenantsSshkeysGetallInternalServerError creates a PcloudTenantsSshkeysGetallInternalServerError with default headers values +func NewPcloudTenantsSshkeysGetallInternalServerError() *PcloudTenantsSshkeysGetallInternalServerError { + return &PcloudTenantsSshkeysGetallInternalServerError{} +} + +/*PcloudTenantsSshkeysGetallInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudTenantsSshkeysGetallInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudTenantsSshkeysGetallInternalServerError) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/tenants/{tenant_id}/sshkeys][%d] pcloudTenantsSshkeysGetallInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudTenantsSshkeysGetallInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tenants_ssh_keys/pcloud_tenants_sshkeys_post_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tenants_ssh_keys/pcloud_tenants_sshkeys_post_parameters.go new file mode 100644 index 00000000000..cdc2326209c --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tenants_ssh_keys/pcloud_tenants_sshkeys_post_parameters.go @@ -0,0 +1,160 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_tenants_ssh_keys + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// NewPcloudTenantsSshkeysPostParams creates a new PcloudTenantsSshkeysPostParams object +// with the default values initialized. +func NewPcloudTenantsSshkeysPostParams() *PcloudTenantsSshkeysPostParams { + var () + return &PcloudTenantsSshkeysPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudTenantsSshkeysPostParamsWithTimeout creates a new PcloudTenantsSshkeysPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudTenantsSshkeysPostParamsWithTimeout(timeout time.Duration) *PcloudTenantsSshkeysPostParams { + var () + return &PcloudTenantsSshkeysPostParams{ + + timeout: timeout, + } +} + +// NewPcloudTenantsSshkeysPostParamsWithContext creates a new PcloudTenantsSshkeysPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudTenantsSshkeysPostParamsWithContext(ctx context.Context) *PcloudTenantsSshkeysPostParams { + var () + return &PcloudTenantsSshkeysPostParams{ + + Context: ctx, + } +} + +// NewPcloudTenantsSshkeysPostParamsWithHTTPClient creates a new PcloudTenantsSshkeysPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudTenantsSshkeysPostParamsWithHTTPClient(client *http.Client) *PcloudTenantsSshkeysPostParams { + var () + return &PcloudTenantsSshkeysPostParams{ + HTTPClient: client, + } +} + +/*PcloudTenantsSshkeysPostParams contains all the parameters to send to the API endpoint +for the pcloud tenants sshkeys post operation typically these are written to a http.Request +*/ +type PcloudTenantsSshkeysPostParams struct { + + /*Body + Parameters for the creation of a new SSH key + + */ + Body *models.SSHKey + /*TenantID + Tenant ID of a pcloud tenant + + */ + TenantID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud tenants sshkeys post params +func (o *PcloudTenantsSshkeysPostParams) WithTimeout(timeout time.Duration) *PcloudTenantsSshkeysPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud tenants sshkeys post params +func (o *PcloudTenantsSshkeysPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud tenants sshkeys post params +func (o *PcloudTenantsSshkeysPostParams) WithContext(ctx context.Context) *PcloudTenantsSshkeysPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud tenants sshkeys post params +func (o *PcloudTenantsSshkeysPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud tenants sshkeys post params +func (o *PcloudTenantsSshkeysPostParams) WithHTTPClient(client *http.Client) *PcloudTenantsSshkeysPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud tenants sshkeys post params +func (o *PcloudTenantsSshkeysPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the pcloud tenants sshkeys post params +func (o *PcloudTenantsSshkeysPostParams) WithBody(body *models.SSHKey) *PcloudTenantsSshkeysPostParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the pcloud tenants sshkeys post params +func (o *PcloudTenantsSshkeysPostParams) SetBody(body *models.SSHKey) { + o.Body = body +} + +// WithTenantID adds the tenantID to the pcloud tenants sshkeys post params +func (o *PcloudTenantsSshkeysPostParams) WithTenantID(tenantID string) *PcloudTenantsSshkeysPostParams { + o.SetTenantID(tenantID) + return o +} + +// SetTenantID adds the tenantId to the pcloud tenants sshkeys post params +func (o *PcloudTenantsSshkeysPostParams) SetTenantID(tenantID string) { + o.TenantID = tenantID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudTenantsSshkeysPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + // path param tenant_id + if err := r.SetPathParam("tenant_id", o.TenantID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tenants_ssh_keys/pcloud_tenants_sshkeys_post_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tenants_ssh_keys/pcloud_tenants_sshkeys_post_responses.go new file mode 100644 index 00000000000..dbe77406527 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tenants_ssh_keys/pcloud_tenants_sshkeys_post_responses.go @@ -0,0 +1,247 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_tenants_ssh_keys + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudTenantsSshkeysPostReader is a Reader for the PcloudTenantsSshkeysPost structure. +type PcloudTenantsSshkeysPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudTenantsSshkeysPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudTenantsSshkeysPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 201: + result := NewPcloudTenantsSshkeysPostCreated() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudTenantsSshkeysPostBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 409: + result := NewPcloudTenantsSshkeysPostConflict() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 422: + result := NewPcloudTenantsSshkeysPostUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudTenantsSshkeysPostInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudTenantsSshkeysPostOK creates a PcloudTenantsSshkeysPostOK with default headers values +func NewPcloudTenantsSshkeysPostOK() *PcloudTenantsSshkeysPostOK { + return &PcloudTenantsSshkeysPostOK{} +} + +/*PcloudTenantsSshkeysPostOK handles this case with default header values. + +OK +*/ +type PcloudTenantsSshkeysPostOK struct { + Payload *models.SSHKey +} + +func (o *PcloudTenantsSshkeysPostOK) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/tenants/{tenant_id}/sshkeys][%d] pcloudTenantsSshkeysPostOK %+v", 200, o.Payload) +} + +func (o *PcloudTenantsSshkeysPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.SSHKey) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudTenantsSshkeysPostCreated creates a PcloudTenantsSshkeysPostCreated with default headers values +func NewPcloudTenantsSshkeysPostCreated() *PcloudTenantsSshkeysPostCreated { + return &PcloudTenantsSshkeysPostCreated{} +} + +/*PcloudTenantsSshkeysPostCreated handles this case with default header values. + +Created +*/ +type PcloudTenantsSshkeysPostCreated struct { + Payload *models.SSHKey +} + +func (o *PcloudTenantsSshkeysPostCreated) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/tenants/{tenant_id}/sshkeys][%d] pcloudTenantsSshkeysPostCreated %+v", 201, o.Payload) +} + +func (o *PcloudTenantsSshkeysPostCreated) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.SSHKey) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudTenantsSshkeysPostBadRequest creates a PcloudTenantsSshkeysPostBadRequest with default headers values +func NewPcloudTenantsSshkeysPostBadRequest() *PcloudTenantsSshkeysPostBadRequest { + return &PcloudTenantsSshkeysPostBadRequest{} +} + +/*PcloudTenantsSshkeysPostBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudTenantsSshkeysPostBadRequest struct { + Payload *models.Error +} + +func (o *PcloudTenantsSshkeysPostBadRequest) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/tenants/{tenant_id}/sshkeys][%d] pcloudTenantsSshkeysPostBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudTenantsSshkeysPostBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudTenantsSshkeysPostConflict creates a PcloudTenantsSshkeysPostConflict with default headers values +func NewPcloudTenantsSshkeysPostConflict() *PcloudTenantsSshkeysPostConflict { + return &PcloudTenantsSshkeysPostConflict{} +} + +/*PcloudTenantsSshkeysPostConflict handles this case with default header values. + +Conflict +*/ +type PcloudTenantsSshkeysPostConflict struct { + Payload *models.Error +} + +func (o *PcloudTenantsSshkeysPostConflict) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/tenants/{tenant_id}/sshkeys][%d] pcloudTenantsSshkeysPostConflict %+v", 409, o.Payload) +} + +func (o *PcloudTenantsSshkeysPostConflict) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudTenantsSshkeysPostUnprocessableEntity creates a PcloudTenantsSshkeysPostUnprocessableEntity with default headers values +func NewPcloudTenantsSshkeysPostUnprocessableEntity() *PcloudTenantsSshkeysPostUnprocessableEntity { + return &PcloudTenantsSshkeysPostUnprocessableEntity{} +} + +/*PcloudTenantsSshkeysPostUnprocessableEntity handles this case with default header values. + +Unprocessable Entity +*/ +type PcloudTenantsSshkeysPostUnprocessableEntity struct { + Payload *models.Error +} + +func (o *PcloudTenantsSshkeysPostUnprocessableEntity) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/tenants/{tenant_id}/sshkeys][%d] pcloudTenantsSshkeysPostUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *PcloudTenantsSshkeysPostUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudTenantsSshkeysPostInternalServerError creates a PcloudTenantsSshkeysPostInternalServerError with default headers values +func NewPcloudTenantsSshkeysPostInternalServerError() *PcloudTenantsSshkeysPostInternalServerError { + return &PcloudTenantsSshkeysPostInternalServerError{} +} + +/*PcloudTenantsSshkeysPostInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudTenantsSshkeysPostInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudTenantsSshkeysPostInternalServerError) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/tenants/{tenant_id}/sshkeys][%d] pcloudTenantsSshkeysPostInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudTenantsSshkeysPostInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tenants_ssh_keys/pcloud_tenants_sshkeys_put_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tenants_ssh_keys/pcloud_tenants_sshkeys_put_parameters.go new file mode 100644 index 00000000000..79f482c3e60 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tenants_ssh_keys/pcloud_tenants_sshkeys_put_parameters.go @@ -0,0 +1,181 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_tenants_ssh_keys + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// NewPcloudTenantsSshkeysPutParams creates a new PcloudTenantsSshkeysPutParams object +// with the default values initialized. +func NewPcloudTenantsSshkeysPutParams() *PcloudTenantsSshkeysPutParams { + var () + return &PcloudTenantsSshkeysPutParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudTenantsSshkeysPutParamsWithTimeout creates a new PcloudTenantsSshkeysPutParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudTenantsSshkeysPutParamsWithTimeout(timeout time.Duration) *PcloudTenantsSshkeysPutParams { + var () + return &PcloudTenantsSshkeysPutParams{ + + timeout: timeout, + } +} + +// NewPcloudTenantsSshkeysPutParamsWithContext creates a new PcloudTenantsSshkeysPutParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudTenantsSshkeysPutParamsWithContext(ctx context.Context) *PcloudTenantsSshkeysPutParams { + var () + return &PcloudTenantsSshkeysPutParams{ + + Context: ctx, + } +} + +// NewPcloudTenantsSshkeysPutParamsWithHTTPClient creates a new PcloudTenantsSshkeysPutParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudTenantsSshkeysPutParamsWithHTTPClient(client *http.Client) *PcloudTenantsSshkeysPutParams { + var () + return &PcloudTenantsSshkeysPutParams{ + HTTPClient: client, + } +} + +/*PcloudTenantsSshkeysPutParams contains all the parameters to send to the API endpoint +for the pcloud tenants sshkeys put operation typically these are written to a http.Request +*/ +type PcloudTenantsSshkeysPutParams struct { + + /*Body + Parameters for updating a Tenant's SSH Key + + */ + Body *models.SSHKey + /*SshkeyName + SSH key name for a pcloud tenant + + */ + SshkeyName string + /*TenantID + Tenant ID of a pcloud tenant + + */ + TenantID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud tenants sshkeys put params +func (o *PcloudTenantsSshkeysPutParams) WithTimeout(timeout time.Duration) *PcloudTenantsSshkeysPutParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud tenants sshkeys put params +func (o *PcloudTenantsSshkeysPutParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud tenants sshkeys put params +func (o *PcloudTenantsSshkeysPutParams) WithContext(ctx context.Context) *PcloudTenantsSshkeysPutParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud tenants sshkeys put params +func (o *PcloudTenantsSshkeysPutParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud tenants sshkeys put params +func (o *PcloudTenantsSshkeysPutParams) WithHTTPClient(client *http.Client) *PcloudTenantsSshkeysPutParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud tenants sshkeys put params +func (o *PcloudTenantsSshkeysPutParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the pcloud tenants sshkeys put params +func (o *PcloudTenantsSshkeysPutParams) WithBody(body *models.SSHKey) *PcloudTenantsSshkeysPutParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the pcloud tenants sshkeys put params +func (o *PcloudTenantsSshkeysPutParams) SetBody(body *models.SSHKey) { + o.Body = body +} + +// WithSshkeyName adds the sshkeyName to the pcloud tenants sshkeys put params +func (o *PcloudTenantsSshkeysPutParams) WithSshkeyName(sshkeyName string) *PcloudTenantsSshkeysPutParams { + o.SetSshkeyName(sshkeyName) + return o +} + +// SetSshkeyName adds the sshkeyName to the pcloud tenants sshkeys put params +func (o *PcloudTenantsSshkeysPutParams) SetSshkeyName(sshkeyName string) { + o.SshkeyName = sshkeyName +} + +// WithTenantID adds the tenantID to the pcloud tenants sshkeys put params +func (o *PcloudTenantsSshkeysPutParams) WithTenantID(tenantID string) *PcloudTenantsSshkeysPutParams { + o.SetTenantID(tenantID) + return o +} + +// SetTenantID adds the tenantId to the pcloud tenants sshkeys put params +func (o *PcloudTenantsSshkeysPutParams) SetTenantID(tenantID string) { + o.TenantID = tenantID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudTenantsSshkeysPutParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + // path param sshkey_name + if err := r.SetPathParam("sshkey_name", o.SshkeyName); err != nil { + return err + } + + // path param tenant_id + if err := r.SetPathParam("tenant_id", o.TenantID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tenants_ssh_keys/pcloud_tenants_sshkeys_put_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tenants_ssh_keys/pcloud_tenants_sshkeys_put_responses.go new file mode 100644 index 00000000000..02a1b32b701 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tenants_ssh_keys/pcloud_tenants_sshkeys_put_responses.go @@ -0,0 +1,175 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_tenants_ssh_keys + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudTenantsSshkeysPutReader is a Reader for the PcloudTenantsSshkeysPut structure. +type PcloudTenantsSshkeysPutReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudTenantsSshkeysPutReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudTenantsSshkeysPutOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudTenantsSshkeysPutBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 422: + result := NewPcloudTenantsSshkeysPutUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudTenantsSshkeysPutInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudTenantsSshkeysPutOK creates a PcloudTenantsSshkeysPutOK with default headers values +func NewPcloudTenantsSshkeysPutOK() *PcloudTenantsSshkeysPutOK { + return &PcloudTenantsSshkeysPutOK{} +} + +/*PcloudTenantsSshkeysPutOK handles this case with default header values. + +OK +*/ +type PcloudTenantsSshkeysPutOK struct { + Payload *models.SSHKey +} + +func (o *PcloudTenantsSshkeysPutOK) Error() string { + return fmt.Sprintf("[PUT /pcloud/v1/tenants/{tenant_id}/sshkeys/{sshkey_name}][%d] pcloudTenantsSshkeysPutOK %+v", 200, o.Payload) +} + +func (o *PcloudTenantsSshkeysPutOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.SSHKey) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudTenantsSshkeysPutBadRequest creates a PcloudTenantsSshkeysPutBadRequest with default headers values +func NewPcloudTenantsSshkeysPutBadRequest() *PcloudTenantsSshkeysPutBadRequest { + return &PcloudTenantsSshkeysPutBadRequest{} +} + +/*PcloudTenantsSshkeysPutBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudTenantsSshkeysPutBadRequest struct { + Payload *models.Error +} + +func (o *PcloudTenantsSshkeysPutBadRequest) Error() string { + return fmt.Sprintf("[PUT /pcloud/v1/tenants/{tenant_id}/sshkeys/{sshkey_name}][%d] pcloudTenantsSshkeysPutBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudTenantsSshkeysPutBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudTenantsSshkeysPutUnprocessableEntity creates a PcloudTenantsSshkeysPutUnprocessableEntity with default headers values +func NewPcloudTenantsSshkeysPutUnprocessableEntity() *PcloudTenantsSshkeysPutUnprocessableEntity { + return &PcloudTenantsSshkeysPutUnprocessableEntity{} +} + +/*PcloudTenantsSshkeysPutUnprocessableEntity handles this case with default header values. + +Unprocessable Entity +*/ +type PcloudTenantsSshkeysPutUnprocessableEntity struct { + Payload *models.Error +} + +func (o *PcloudTenantsSshkeysPutUnprocessableEntity) Error() string { + return fmt.Sprintf("[PUT /pcloud/v1/tenants/{tenant_id}/sshkeys/{sshkey_name}][%d] pcloudTenantsSshkeysPutUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *PcloudTenantsSshkeysPutUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudTenantsSshkeysPutInternalServerError creates a PcloudTenantsSshkeysPutInternalServerError with default headers values +func NewPcloudTenantsSshkeysPutInternalServerError() *PcloudTenantsSshkeysPutInternalServerError { + return &PcloudTenantsSshkeysPutInternalServerError{} +} + +/*PcloudTenantsSshkeysPutInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudTenantsSshkeysPutInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudTenantsSshkeysPutInternalServerError) Error() string { + return fmt.Sprintf("[PUT /pcloud/v1/tenants/{tenant_id}/sshkeys/{sshkey_name}][%d] pcloudTenantsSshkeysPutInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudTenantsSshkeysPutInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/p_cloud_volumes_client.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/p_cloud_volumes_client.go new file mode 100644 index 00000000000..1c6a777f64a --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/p_cloud_volumes_client.go @@ -0,0 +1,668 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_volumes + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" +) + +// New creates a new p cloud volumes API client. +func New(transport runtime.ClientTransport, formats strfmt.Registry) *Client { + return &Client{transport: transport, formats: formats} +} + +/* +Client for p cloud volumes API +*/ +type Client struct { + transport runtime.ClientTransport + formats strfmt.Registry +} + +/* +PcloudCloudinstancesVolumesDelete deletes a cloud instance volume +*/ +func (a *Client) PcloudCloudinstancesVolumesDelete(params *PcloudCloudinstancesVolumesDeleteParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudCloudinstancesVolumesDeleteOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudCloudinstancesVolumesDeleteParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.cloudinstances.volumes.delete", + Method: "DELETE", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/volumes/{volume_id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudCloudinstancesVolumesDeleteReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudCloudinstancesVolumesDeleteOK), nil + +} + +/* +PcloudCloudinstancesVolumesGet detaileds info of a volume +*/ +func (a *Client) PcloudCloudinstancesVolumesGet(params *PcloudCloudinstancesVolumesGetParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudCloudinstancesVolumesGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudCloudinstancesVolumesGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.cloudinstances.volumes.get", + Method: "GET", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/volumes/{volume_id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudCloudinstancesVolumesGetReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudCloudinstancesVolumesGetOK), nil + +} + +/* +PcloudCloudinstancesVolumesGetall lists all volumes for this cloud instance +*/ +func (a *Client) PcloudCloudinstancesVolumesGetall(params *PcloudCloudinstancesVolumesGetallParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudCloudinstancesVolumesGetallOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudCloudinstancesVolumesGetallParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.cloudinstances.volumes.getall", + Method: "GET", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/volumes", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudCloudinstancesVolumesGetallReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudCloudinstancesVolumesGetallOK), nil + +} + +/* +PcloudCloudinstancesVolumesPost creates a new data volume +*/ +func (a *Client) PcloudCloudinstancesVolumesPost(params *PcloudCloudinstancesVolumesPostParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudCloudinstancesVolumesPostAccepted, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudCloudinstancesVolumesPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.cloudinstances.volumes.post", + Method: "POST", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/volumes", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudCloudinstancesVolumesPostReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudCloudinstancesVolumesPostAccepted), nil + +} + +/* +PcloudCloudinstancesVolumesPut updates a cloud instance volume +*/ +func (a *Client) PcloudCloudinstancesVolumesPut(params *PcloudCloudinstancesVolumesPutParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudCloudinstancesVolumesPutOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudCloudinstancesVolumesPutParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.cloudinstances.volumes.put", + Method: "PUT", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/volumes/{volume_id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudCloudinstancesVolumesPutReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudCloudinstancesVolumesPutOK), nil + +} + +/* +PcloudPvminstancesVolumesDelete detaches a volume from a p VM instance +*/ +func (a *Client) PcloudPvminstancesVolumesDelete(params *PcloudPvminstancesVolumesDeleteParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudPvminstancesVolumesDeleteAccepted, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudPvminstancesVolumesDeleteParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.pvminstances.volumes.delete", + Method: "DELETE", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/volumes/{volume_id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudPvminstancesVolumesDeleteReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudPvminstancesVolumesDeleteAccepted), nil + +} + +/* +PcloudPvminstancesVolumesGet detaileds info of a volume attached to a p VM instance +*/ +func (a *Client) PcloudPvminstancesVolumesGet(params *PcloudPvminstancesVolumesGetParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudPvminstancesVolumesGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudPvminstancesVolumesGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.pvminstances.volumes.get", + Method: "GET", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/volumes/{volume_id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudPvminstancesVolumesGetReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudPvminstancesVolumesGetOK), nil + +} + +/* +PcloudPvminstancesVolumesGetall lists all volumes attached to a p VM instance +*/ +func (a *Client) PcloudPvminstancesVolumesGetall(params *PcloudPvminstancesVolumesGetallParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudPvminstancesVolumesGetallOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudPvminstancesVolumesGetallParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.pvminstances.volumes.getall", + Method: "GET", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/volumes", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudPvminstancesVolumesGetallReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudPvminstancesVolumesGetallOK), nil + +} + +/* +PcloudPvminstancesVolumesPost attaches a volume to a p VM instance +*/ +func (a *Client) PcloudPvminstancesVolumesPost(params *PcloudPvminstancesVolumesPostParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudPvminstancesVolumesPostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudPvminstancesVolumesPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.pvminstances.volumes.post", + Method: "POST", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/volumes/{volume_id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudPvminstancesVolumesPostReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudPvminstancesVolumesPostOK), nil + +} + +/* +PcloudPvminstancesVolumesPut updates a volume attached to a p VM instance +*/ +func (a *Client) PcloudPvminstancesVolumesPut(params *PcloudPvminstancesVolumesPutParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudPvminstancesVolumesPutOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudPvminstancesVolumesPutParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.pvminstances.volumes.put", + Method: "PUT", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/volumes/{volume_id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudPvminstancesVolumesPutReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudPvminstancesVolumesPutOK), nil + +} + +/* +PcloudPvminstancesVolumesSetbootPut sets the p VM instance volume as the boot volume +*/ +func (a *Client) PcloudPvminstancesVolumesSetbootPut(params *PcloudPvminstancesVolumesSetbootPutParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudPvminstancesVolumesSetbootPutOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudPvminstancesVolumesSetbootPutParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.pvminstances.volumes.setboot.put", + Method: "PUT", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/volumes/{volume_id}/setboot", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudPvminstancesVolumesSetbootPutReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudPvminstancesVolumesSetbootPutOK), nil + +} + +/* +PcloudV2VolumesClonePost creates a volume clone for specified volumes +*/ +func (a *Client) PcloudV2VolumesClonePost(params *PcloudV2VolumesClonePostParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudV2VolumesClonePostAccepted, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudV2VolumesClonePostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.v2.volumes.clone.post", + Method: "POST", + PathPattern: "/pcloud/v2/cloud-instances/{cloud_instance_id}/volumes/clone", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudV2VolumesClonePostReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudV2VolumesClonePostAccepted), nil + +} + +/* +PcloudV2VolumesClonetasksGet gets the status of a volumes clone request for the specified clone task ID +*/ +func (a *Client) PcloudV2VolumesClonetasksGet(params *PcloudV2VolumesClonetasksGetParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudV2VolumesClonetasksGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudV2VolumesClonetasksGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.v2.volumes.clonetasks.get", + Method: "GET", + PathPattern: "/pcloud/v2/cloud-instances/{cloud_instance_id}/volumes/clone-tasks/{clone_task_id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudV2VolumesClonetasksGetReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudV2VolumesClonetasksGetOK), nil + +} + +/* +PcloudV2VolumesPost creates multiple data volumes from a single definition +*/ +func (a *Client) PcloudV2VolumesPost(params *PcloudV2VolumesPostParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudV2VolumesPostCreated, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudV2VolumesPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.v2.volumes.post", + Method: "POST", + PathPattern: "/pcloud/v2/cloud-instances/{cloud_instance_id}/volumes", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudV2VolumesPostReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudV2VolumesPostCreated), nil + +} + +/* +PcloudV2VolumescloneCancelPost cancels a volumes clone request initiates the cleanup action cleanup action performs the cleanup of the preparatory clones and snapshot volumes +*/ +func (a *Client) PcloudV2VolumescloneCancelPost(params *PcloudV2VolumescloneCancelPostParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudV2VolumescloneCancelPostAccepted, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudV2VolumescloneCancelPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.v2.volumesclone.cancel.post", + Method: "POST", + PathPattern: "/pcloud/v2/cloud-instances/{cloud_instance_id}/volumes-clone/{volumes_clone_id}/cancel", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudV2VolumescloneCancelPostReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudV2VolumescloneCancelPostAccepted), nil + +} + +/* +PcloudV2VolumescloneDelete deletes a volumes clone request +*/ +func (a *Client) PcloudV2VolumescloneDelete(params *PcloudV2VolumescloneDeleteParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudV2VolumescloneDeleteOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudV2VolumescloneDeleteParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.v2.volumesclone.delete", + Method: "DELETE", + PathPattern: "/pcloud/v2/cloud-instances/{cloud_instance_id}/volumes-clone/{volumes_clone_id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudV2VolumescloneDeleteReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudV2VolumescloneDeleteOK), nil + +} + +/* +PcloudV2VolumescloneExecutePost initiates the execute action for a volumes clone request execute action creates the cloned volumes using the volume snapshots +*/ +func (a *Client) PcloudV2VolumescloneExecutePost(params *PcloudV2VolumescloneExecutePostParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudV2VolumescloneExecutePostAccepted, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudV2VolumescloneExecutePostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.v2.volumesclone.execute.post", + Method: "POST", + PathPattern: "/pcloud/v2/cloud-instances/{cloud_instance_id}/volumes-clone/{volumes_clone_id}/execute", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudV2VolumescloneExecutePostReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudV2VolumescloneExecutePostAccepted), nil + +} + +/* +PcloudV2VolumescloneGet gets the details for a volumes clone request +*/ +func (a *Client) PcloudV2VolumescloneGet(params *PcloudV2VolumescloneGetParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudV2VolumescloneGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudV2VolumescloneGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.v2.volumesclone.get", + Method: "GET", + PathPattern: "/pcloud/v2/cloud-instances/{cloud_instance_id}/volumes-clone/{volumes_clone_id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudV2VolumescloneGetReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudV2VolumescloneGetOK), nil + +} + +/* +PcloudV2VolumescloneGetall gets the list of volumes clone request for a cloud instance +*/ +func (a *Client) PcloudV2VolumescloneGetall(params *PcloudV2VolumescloneGetallParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudV2VolumescloneGetallOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudV2VolumescloneGetallParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.v2.volumesclone.getall", + Method: "GET", + PathPattern: "/pcloud/v2/cloud-instances/{cloud_instance_id}/volumes-clone", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudV2VolumescloneGetallReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudV2VolumescloneGetallOK), nil + +} + +/* +PcloudV2VolumesclonePost creates a new volumes clone request and initiates the prepare action requires a minimum of two volumes requires a minimum of one volume to be in the in use state requires a unique volumes clone name prepare action does the preparatory work for creating the snapshot volumes +*/ +func (a *Client) PcloudV2VolumesclonePost(params *PcloudV2VolumesclonePostParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudV2VolumesclonePostAccepted, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudV2VolumesclonePostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.v2.volumesclone.post", + Method: "POST", + PathPattern: "/pcloud/v2/cloud-instances/{cloud_instance_id}/volumes-clone", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudV2VolumesclonePostReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudV2VolumesclonePostAccepted), nil + +} + +/* +PcloudV2VolumescloneStartPost initiates the start action for a volumes clone request start action starts the consistency group to initiate the flash copy +*/ +func (a *Client) PcloudV2VolumescloneStartPost(params *PcloudV2VolumescloneStartPostParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudV2VolumescloneStartPostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudV2VolumescloneStartPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.v2.volumesclone.start.post", + Method: "POST", + PathPattern: "/pcloud/v2/cloud-instances/{cloud_instance_id}/volumes-clone/{volumes_clone_id}/start", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudV2VolumescloneStartPostReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudV2VolumescloneStartPostOK), nil + +} + +/* +PcloudVolumesClonePost creates a volume clone for specified volumes +*/ +func (a *Client) PcloudVolumesClonePost(params *PcloudVolumesClonePostParams, authInfo runtime.ClientAuthInfoWriter) (*PcloudVolumesClonePostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPcloudVolumesClonePostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "pcloud.volumes.clone.post", + Method: "POST", + PathPattern: "/pcloud/v1/cloud-instances/{cloud_instance_id}/volumes/clone", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PcloudVolumesClonePostReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PcloudVolumesClonePostOK), nil + +} + +// SetTransport changes the transport on the client +func (a *Client) SetTransport(transport runtime.ClientTransport) { + a.transport = transport +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_cloudinstances_volumes_delete_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_cloudinstances_volumes_delete_parameters.go new file mode 100644 index 00000000000..9eda652b430 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_cloudinstances_volumes_delete_parameters.go @@ -0,0 +1,157 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_volumes + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewPcloudCloudinstancesVolumesDeleteParams creates a new PcloudCloudinstancesVolumesDeleteParams object +// with the default values initialized. +func NewPcloudCloudinstancesVolumesDeleteParams() *PcloudCloudinstancesVolumesDeleteParams { + var () + return &PcloudCloudinstancesVolumesDeleteParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudCloudinstancesVolumesDeleteParamsWithTimeout creates a new PcloudCloudinstancesVolumesDeleteParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudCloudinstancesVolumesDeleteParamsWithTimeout(timeout time.Duration) *PcloudCloudinstancesVolumesDeleteParams { + var () + return &PcloudCloudinstancesVolumesDeleteParams{ + + timeout: timeout, + } +} + +// NewPcloudCloudinstancesVolumesDeleteParamsWithContext creates a new PcloudCloudinstancesVolumesDeleteParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudCloudinstancesVolumesDeleteParamsWithContext(ctx context.Context) *PcloudCloudinstancesVolumesDeleteParams { + var () + return &PcloudCloudinstancesVolumesDeleteParams{ + + Context: ctx, + } +} + +// NewPcloudCloudinstancesVolumesDeleteParamsWithHTTPClient creates a new PcloudCloudinstancesVolumesDeleteParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudCloudinstancesVolumesDeleteParamsWithHTTPClient(client *http.Client) *PcloudCloudinstancesVolumesDeleteParams { + var () + return &PcloudCloudinstancesVolumesDeleteParams{ + HTTPClient: client, + } +} + +/*PcloudCloudinstancesVolumesDeleteParams contains all the parameters to send to the API endpoint +for the pcloud cloudinstances volumes delete operation typically these are written to a http.Request +*/ +type PcloudCloudinstancesVolumesDeleteParams struct { + + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + /*VolumeID + Volume ID + + */ + VolumeID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud cloudinstances volumes delete params +func (o *PcloudCloudinstancesVolumesDeleteParams) WithTimeout(timeout time.Duration) *PcloudCloudinstancesVolumesDeleteParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud cloudinstances volumes delete params +func (o *PcloudCloudinstancesVolumesDeleteParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud cloudinstances volumes delete params +func (o *PcloudCloudinstancesVolumesDeleteParams) WithContext(ctx context.Context) *PcloudCloudinstancesVolumesDeleteParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud cloudinstances volumes delete params +func (o *PcloudCloudinstancesVolumesDeleteParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud cloudinstances volumes delete params +func (o *PcloudCloudinstancesVolumesDeleteParams) WithHTTPClient(client *http.Client) *PcloudCloudinstancesVolumesDeleteParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud cloudinstances volumes delete params +func (o *PcloudCloudinstancesVolumesDeleteParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud cloudinstances volumes delete params +func (o *PcloudCloudinstancesVolumesDeleteParams) WithCloudInstanceID(cloudInstanceID string) *PcloudCloudinstancesVolumesDeleteParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud cloudinstances volumes delete params +func (o *PcloudCloudinstancesVolumesDeleteParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WithVolumeID adds the volumeID to the pcloud cloudinstances volumes delete params +func (o *PcloudCloudinstancesVolumesDeleteParams) WithVolumeID(volumeID string) *PcloudCloudinstancesVolumesDeleteParams { + o.SetVolumeID(volumeID) + return o +} + +// SetVolumeID adds the volumeId to the pcloud cloudinstances volumes delete params +func (o *PcloudCloudinstancesVolumesDeleteParams) SetVolumeID(volumeID string) { + o.VolumeID = volumeID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudCloudinstancesVolumesDeleteParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + // path param volume_id + if err := r.SetPathParam("volume_id", o.VolumeID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_cloudinstances_volumes_delete_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_cloudinstances_volumes_delete_responses.go new file mode 100644 index 00000000000..6978a005a79 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_cloudinstances_volumes_delete_responses.go @@ -0,0 +1,173 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_volumes + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudCloudinstancesVolumesDeleteReader is a Reader for the PcloudCloudinstancesVolumesDelete structure. +type PcloudCloudinstancesVolumesDeleteReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudCloudinstancesVolumesDeleteReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudCloudinstancesVolumesDeleteOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudCloudinstancesVolumesDeleteBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 410: + result := NewPcloudCloudinstancesVolumesDeleteGone() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudCloudinstancesVolumesDeleteInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudCloudinstancesVolumesDeleteOK creates a PcloudCloudinstancesVolumesDeleteOK with default headers values +func NewPcloudCloudinstancesVolumesDeleteOK() *PcloudCloudinstancesVolumesDeleteOK { + return &PcloudCloudinstancesVolumesDeleteOK{} +} + +/*PcloudCloudinstancesVolumesDeleteOK handles this case with default header values. + +OK +*/ +type PcloudCloudinstancesVolumesDeleteOK struct { + Payload models.Object +} + +func (o *PcloudCloudinstancesVolumesDeleteOK) Error() string { + return fmt.Sprintf("[DELETE /pcloud/v1/cloud-instances/{cloud_instance_id}/volumes/{volume_id}][%d] pcloudCloudinstancesVolumesDeleteOK %+v", 200, o.Payload) +} + +func (o *PcloudCloudinstancesVolumesDeleteOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudinstancesVolumesDeleteBadRequest creates a PcloudCloudinstancesVolumesDeleteBadRequest with default headers values +func NewPcloudCloudinstancesVolumesDeleteBadRequest() *PcloudCloudinstancesVolumesDeleteBadRequest { + return &PcloudCloudinstancesVolumesDeleteBadRequest{} +} + +/*PcloudCloudinstancesVolumesDeleteBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudCloudinstancesVolumesDeleteBadRequest struct { + Payload *models.Error +} + +func (o *PcloudCloudinstancesVolumesDeleteBadRequest) Error() string { + return fmt.Sprintf("[DELETE /pcloud/v1/cloud-instances/{cloud_instance_id}/volumes/{volume_id}][%d] pcloudCloudinstancesVolumesDeleteBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudCloudinstancesVolumesDeleteBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudinstancesVolumesDeleteGone creates a PcloudCloudinstancesVolumesDeleteGone with default headers values +func NewPcloudCloudinstancesVolumesDeleteGone() *PcloudCloudinstancesVolumesDeleteGone { + return &PcloudCloudinstancesVolumesDeleteGone{} +} + +/*PcloudCloudinstancesVolumesDeleteGone handles this case with default header values. + +Gone +*/ +type PcloudCloudinstancesVolumesDeleteGone struct { + Payload *models.Error +} + +func (o *PcloudCloudinstancesVolumesDeleteGone) Error() string { + return fmt.Sprintf("[DELETE /pcloud/v1/cloud-instances/{cloud_instance_id}/volumes/{volume_id}][%d] pcloudCloudinstancesVolumesDeleteGone %+v", 410, o.Payload) +} + +func (o *PcloudCloudinstancesVolumesDeleteGone) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudinstancesVolumesDeleteInternalServerError creates a PcloudCloudinstancesVolumesDeleteInternalServerError with default headers values +func NewPcloudCloudinstancesVolumesDeleteInternalServerError() *PcloudCloudinstancesVolumesDeleteInternalServerError { + return &PcloudCloudinstancesVolumesDeleteInternalServerError{} +} + +/*PcloudCloudinstancesVolumesDeleteInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudCloudinstancesVolumesDeleteInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudCloudinstancesVolumesDeleteInternalServerError) Error() string { + return fmt.Sprintf("[DELETE /pcloud/v1/cloud-instances/{cloud_instance_id}/volumes/{volume_id}][%d] pcloudCloudinstancesVolumesDeleteInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudCloudinstancesVolumesDeleteInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_cloudinstances_volumes_get_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_cloudinstances_volumes_get_parameters.go new file mode 100644 index 00000000000..a922bbac5f5 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_cloudinstances_volumes_get_parameters.go @@ -0,0 +1,157 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_volumes + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewPcloudCloudinstancesVolumesGetParams creates a new PcloudCloudinstancesVolumesGetParams object +// with the default values initialized. +func NewPcloudCloudinstancesVolumesGetParams() *PcloudCloudinstancesVolumesGetParams { + var () + return &PcloudCloudinstancesVolumesGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudCloudinstancesVolumesGetParamsWithTimeout creates a new PcloudCloudinstancesVolumesGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudCloudinstancesVolumesGetParamsWithTimeout(timeout time.Duration) *PcloudCloudinstancesVolumesGetParams { + var () + return &PcloudCloudinstancesVolumesGetParams{ + + timeout: timeout, + } +} + +// NewPcloudCloudinstancesVolumesGetParamsWithContext creates a new PcloudCloudinstancesVolumesGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudCloudinstancesVolumesGetParamsWithContext(ctx context.Context) *PcloudCloudinstancesVolumesGetParams { + var () + return &PcloudCloudinstancesVolumesGetParams{ + + Context: ctx, + } +} + +// NewPcloudCloudinstancesVolumesGetParamsWithHTTPClient creates a new PcloudCloudinstancesVolumesGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudCloudinstancesVolumesGetParamsWithHTTPClient(client *http.Client) *PcloudCloudinstancesVolumesGetParams { + var () + return &PcloudCloudinstancesVolumesGetParams{ + HTTPClient: client, + } +} + +/*PcloudCloudinstancesVolumesGetParams contains all the parameters to send to the API endpoint +for the pcloud cloudinstances volumes get operation typically these are written to a http.Request +*/ +type PcloudCloudinstancesVolumesGetParams struct { + + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + /*VolumeID + Volume ID + + */ + VolumeID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud cloudinstances volumes get params +func (o *PcloudCloudinstancesVolumesGetParams) WithTimeout(timeout time.Duration) *PcloudCloudinstancesVolumesGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud cloudinstances volumes get params +func (o *PcloudCloudinstancesVolumesGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud cloudinstances volumes get params +func (o *PcloudCloudinstancesVolumesGetParams) WithContext(ctx context.Context) *PcloudCloudinstancesVolumesGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud cloudinstances volumes get params +func (o *PcloudCloudinstancesVolumesGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud cloudinstances volumes get params +func (o *PcloudCloudinstancesVolumesGetParams) WithHTTPClient(client *http.Client) *PcloudCloudinstancesVolumesGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud cloudinstances volumes get params +func (o *PcloudCloudinstancesVolumesGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud cloudinstances volumes get params +func (o *PcloudCloudinstancesVolumesGetParams) WithCloudInstanceID(cloudInstanceID string) *PcloudCloudinstancesVolumesGetParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud cloudinstances volumes get params +func (o *PcloudCloudinstancesVolumesGetParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WithVolumeID adds the volumeID to the pcloud cloudinstances volumes get params +func (o *PcloudCloudinstancesVolumesGetParams) WithVolumeID(volumeID string) *PcloudCloudinstancesVolumesGetParams { + o.SetVolumeID(volumeID) + return o +} + +// SetVolumeID adds the volumeId to the pcloud cloudinstances volumes get params +func (o *PcloudCloudinstancesVolumesGetParams) SetVolumeID(volumeID string) { + o.VolumeID = volumeID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudCloudinstancesVolumesGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + // path param volume_id + if err := r.SetPathParam("volume_id", o.VolumeID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_cloudinstances_volumes_get_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_cloudinstances_volumes_get_responses.go new file mode 100644 index 00000000000..7bc1023ea50 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_cloudinstances_volumes_get_responses.go @@ -0,0 +1,175 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_volumes + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudCloudinstancesVolumesGetReader is a Reader for the PcloudCloudinstancesVolumesGet structure. +type PcloudCloudinstancesVolumesGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudCloudinstancesVolumesGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudCloudinstancesVolumesGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudCloudinstancesVolumesGetBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 404: + result := NewPcloudCloudinstancesVolumesGetNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudCloudinstancesVolumesGetInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudCloudinstancesVolumesGetOK creates a PcloudCloudinstancesVolumesGetOK with default headers values +func NewPcloudCloudinstancesVolumesGetOK() *PcloudCloudinstancesVolumesGetOK { + return &PcloudCloudinstancesVolumesGetOK{} +} + +/*PcloudCloudinstancesVolumesGetOK handles this case with default header values. + +OK +*/ +type PcloudCloudinstancesVolumesGetOK struct { + Payload *models.Volume +} + +func (o *PcloudCloudinstancesVolumesGetOK) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/volumes/{volume_id}][%d] pcloudCloudinstancesVolumesGetOK %+v", 200, o.Payload) +} + +func (o *PcloudCloudinstancesVolumesGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Volume) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudinstancesVolumesGetBadRequest creates a PcloudCloudinstancesVolumesGetBadRequest with default headers values +func NewPcloudCloudinstancesVolumesGetBadRequest() *PcloudCloudinstancesVolumesGetBadRequest { + return &PcloudCloudinstancesVolumesGetBadRequest{} +} + +/*PcloudCloudinstancesVolumesGetBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudCloudinstancesVolumesGetBadRequest struct { + Payload *models.Error +} + +func (o *PcloudCloudinstancesVolumesGetBadRequest) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/volumes/{volume_id}][%d] pcloudCloudinstancesVolumesGetBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudCloudinstancesVolumesGetBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudinstancesVolumesGetNotFound creates a PcloudCloudinstancesVolumesGetNotFound with default headers values +func NewPcloudCloudinstancesVolumesGetNotFound() *PcloudCloudinstancesVolumesGetNotFound { + return &PcloudCloudinstancesVolumesGetNotFound{} +} + +/*PcloudCloudinstancesVolumesGetNotFound handles this case with default header values. + +Not Found +*/ +type PcloudCloudinstancesVolumesGetNotFound struct { + Payload *models.Error +} + +func (o *PcloudCloudinstancesVolumesGetNotFound) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/volumes/{volume_id}][%d] pcloudCloudinstancesVolumesGetNotFound %+v", 404, o.Payload) +} + +func (o *PcloudCloudinstancesVolumesGetNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudinstancesVolumesGetInternalServerError creates a PcloudCloudinstancesVolumesGetInternalServerError with default headers values +func NewPcloudCloudinstancesVolumesGetInternalServerError() *PcloudCloudinstancesVolumesGetInternalServerError { + return &PcloudCloudinstancesVolumesGetInternalServerError{} +} + +/*PcloudCloudinstancesVolumesGetInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudCloudinstancesVolumesGetInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudCloudinstancesVolumesGetInternalServerError) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/volumes/{volume_id}][%d] pcloudCloudinstancesVolumesGetInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudCloudinstancesVolumesGetInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_cloudinstances_volumes_getall_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_cloudinstances_volumes_getall_parameters.go new file mode 100644 index 00000000000..eadb96744bc --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_cloudinstances_volumes_getall_parameters.go @@ -0,0 +1,168 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_volumes + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewPcloudCloudinstancesVolumesGetallParams creates a new PcloudCloudinstancesVolumesGetallParams object +// with the default values initialized. +func NewPcloudCloudinstancesVolumesGetallParams() *PcloudCloudinstancesVolumesGetallParams { + var () + return &PcloudCloudinstancesVolumesGetallParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudCloudinstancesVolumesGetallParamsWithTimeout creates a new PcloudCloudinstancesVolumesGetallParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudCloudinstancesVolumesGetallParamsWithTimeout(timeout time.Duration) *PcloudCloudinstancesVolumesGetallParams { + var () + return &PcloudCloudinstancesVolumesGetallParams{ + + timeout: timeout, + } +} + +// NewPcloudCloudinstancesVolumesGetallParamsWithContext creates a new PcloudCloudinstancesVolumesGetallParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudCloudinstancesVolumesGetallParamsWithContext(ctx context.Context) *PcloudCloudinstancesVolumesGetallParams { + var () + return &PcloudCloudinstancesVolumesGetallParams{ + + Context: ctx, + } +} + +// NewPcloudCloudinstancesVolumesGetallParamsWithHTTPClient creates a new PcloudCloudinstancesVolumesGetallParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudCloudinstancesVolumesGetallParamsWithHTTPClient(client *http.Client) *PcloudCloudinstancesVolumesGetallParams { + var () + return &PcloudCloudinstancesVolumesGetallParams{ + HTTPClient: client, + } +} + +/*PcloudCloudinstancesVolumesGetallParams contains all the parameters to send to the API endpoint +for the pcloud cloudinstances volumes getall operation typically these are written to a http.Request +*/ +type PcloudCloudinstancesVolumesGetallParams struct { + + /*Affinity + A pvmInstance (id or name), limits a volumes list response to only volumes that have affinity to the pvmInstance + + */ + Affinity *string + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud cloudinstances volumes getall params +func (o *PcloudCloudinstancesVolumesGetallParams) WithTimeout(timeout time.Duration) *PcloudCloudinstancesVolumesGetallParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud cloudinstances volumes getall params +func (o *PcloudCloudinstancesVolumesGetallParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud cloudinstances volumes getall params +func (o *PcloudCloudinstancesVolumesGetallParams) WithContext(ctx context.Context) *PcloudCloudinstancesVolumesGetallParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud cloudinstances volumes getall params +func (o *PcloudCloudinstancesVolumesGetallParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud cloudinstances volumes getall params +func (o *PcloudCloudinstancesVolumesGetallParams) WithHTTPClient(client *http.Client) *PcloudCloudinstancesVolumesGetallParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud cloudinstances volumes getall params +func (o *PcloudCloudinstancesVolumesGetallParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithAffinity adds the affinity to the pcloud cloudinstances volumes getall params +func (o *PcloudCloudinstancesVolumesGetallParams) WithAffinity(affinity *string) *PcloudCloudinstancesVolumesGetallParams { + o.SetAffinity(affinity) + return o +} + +// SetAffinity adds the affinity to the pcloud cloudinstances volumes getall params +func (o *PcloudCloudinstancesVolumesGetallParams) SetAffinity(affinity *string) { + o.Affinity = affinity +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud cloudinstances volumes getall params +func (o *PcloudCloudinstancesVolumesGetallParams) WithCloudInstanceID(cloudInstanceID string) *PcloudCloudinstancesVolumesGetallParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud cloudinstances volumes getall params +func (o *PcloudCloudinstancesVolumesGetallParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudCloudinstancesVolumesGetallParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Affinity != nil { + + // query param affinity + var qrAffinity string + if o.Affinity != nil { + qrAffinity = *o.Affinity + } + qAffinity := qrAffinity + if qAffinity != "" { + if err := r.SetQueryParam("affinity", qAffinity); err != nil { + return err + } + } + + } + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_cloudinstances_volumes_getall_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_cloudinstances_volumes_getall_responses.go new file mode 100644 index 00000000000..ade48e1cc1b --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_cloudinstances_volumes_getall_responses.go @@ -0,0 +1,175 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_volumes + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudCloudinstancesVolumesGetallReader is a Reader for the PcloudCloudinstancesVolumesGetall structure. +type PcloudCloudinstancesVolumesGetallReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudCloudinstancesVolumesGetallReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudCloudinstancesVolumesGetallOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudCloudinstancesVolumesGetallBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 404: + result := NewPcloudCloudinstancesVolumesGetallNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudCloudinstancesVolumesGetallInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudCloudinstancesVolumesGetallOK creates a PcloudCloudinstancesVolumesGetallOK with default headers values +func NewPcloudCloudinstancesVolumesGetallOK() *PcloudCloudinstancesVolumesGetallOK { + return &PcloudCloudinstancesVolumesGetallOK{} +} + +/*PcloudCloudinstancesVolumesGetallOK handles this case with default header values. + +OK +*/ +type PcloudCloudinstancesVolumesGetallOK struct { + Payload *models.Volumes +} + +func (o *PcloudCloudinstancesVolumesGetallOK) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/volumes][%d] pcloudCloudinstancesVolumesGetallOK %+v", 200, o.Payload) +} + +func (o *PcloudCloudinstancesVolumesGetallOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Volumes) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudinstancesVolumesGetallBadRequest creates a PcloudCloudinstancesVolumesGetallBadRequest with default headers values +func NewPcloudCloudinstancesVolumesGetallBadRequest() *PcloudCloudinstancesVolumesGetallBadRequest { + return &PcloudCloudinstancesVolumesGetallBadRequest{} +} + +/*PcloudCloudinstancesVolumesGetallBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudCloudinstancesVolumesGetallBadRequest struct { + Payload *models.Error +} + +func (o *PcloudCloudinstancesVolumesGetallBadRequest) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/volumes][%d] pcloudCloudinstancesVolumesGetallBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudCloudinstancesVolumesGetallBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudinstancesVolumesGetallNotFound creates a PcloudCloudinstancesVolumesGetallNotFound with default headers values +func NewPcloudCloudinstancesVolumesGetallNotFound() *PcloudCloudinstancesVolumesGetallNotFound { + return &PcloudCloudinstancesVolumesGetallNotFound{} +} + +/*PcloudCloudinstancesVolumesGetallNotFound handles this case with default header values. + +Not Found +*/ +type PcloudCloudinstancesVolumesGetallNotFound struct { + Payload *models.Error +} + +func (o *PcloudCloudinstancesVolumesGetallNotFound) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/volumes][%d] pcloudCloudinstancesVolumesGetallNotFound %+v", 404, o.Payload) +} + +func (o *PcloudCloudinstancesVolumesGetallNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudinstancesVolumesGetallInternalServerError creates a PcloudCloudinstancesVolumesGetallInternalServerError with default headers values +func NewPcloudCloudinstancesVolumesGetallInternalServerError() *PcloudCloudinstancesVolumesGetallInternalServerError { + return &PcloudCloudinstancesVolumesGetallInternalServerError{} +} + +/*PcloudCloudinstancesVolumesGetallInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudCloudinstancesVolumesGetallInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudCloudinstancesVolumesGetallInternalServerError) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/volumes][%d] pcloudCloudinstancesVolumesGetallInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudCloudinstancesVolumesGetallInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_cloudinstances_volumes_post_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_cloudinstances_volumes_post_parameters.go new file mode 100644 index 00000000000..55a0685ec0c --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_cloudinstances_volumes_post_parameters.go @@ -0,0 +1,160 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_volumes + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// NewPcloudCloudinstancesVolumesPostParams creates a new PcloudCloudinstancesVolumesPostParams object +// with the default values initialized. +func NewPcloudCloudinstancesVolumesPostParams() *PcloudCloudinstancesVolumesPostParams { + var () + return &PcloudCloudinstancesVolumesPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudCloudinstancesVolumesPostParamsWithTimeout creates a new PcloudCloudinstancesVolumesPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudCloudinstancesVolumesPostParamsWithTimeout(timeout time.Duration) *PcloudCloudinstancesVolumesPostParams { + var () + return &PcloudCloudinstancesVolumesPostParams{ + + timeout: timeout, + } +} + +// NewPcloudCloudinstancesVolumesPostParamsWithContext creates a new PcloudCloudinstancesVolumesPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudCloudinstancesVolumesPostParamsWithContext(ctx context.Context) *PcloudCloudinstancesVolumesPostParams { + var () + return &PcloudCloudinstancesVolumesPostParams{ + + Context: ctx, + } +} + +// NewPcloudCloudinstancesVolumesPostParamsWithHTTPClient creates a new PcloudCloudinstancesVolumesPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudCloudinstancesVolumesPostParamsWithHTTPClient(client *http.Client) *PcloudCloudinstancesVolumesPostParams { + var () + return &PcloudCloudinstancesVolumesPostParams{ + HTTPClient: client, + } +} + +/*PcloudCloudinstancesVolumesPostParams contains all the parameters to send to the API endpoint +for the pcloud cloudinstances volumes post operation typically these are written to a http.Request +*/ +type PcloudCloudinstancesVolumesPostParams struct { + + /*Body + Parameters for the creation of a new data volume + + */ + Body *models.CreateDataVolume + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud cloudinstances volumes post params +func (o *PcloudCloudinstancesVolumesPostParams) WithTimeout(timeout time.Duration) *PcloudCloudinstancesVolumesPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud cloudinstances volumes post params +func (o *PcloudCloudinstancesVolumesPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud cloudinstances volumes post params +func (o *PcloudCloudinstancesVolumesPostParams) WithContext(ctx context.Context) *PcloudCloudinstancesVolumesPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud cloudinstances volumes post params +func (o *PcloudCloudinstancesVolumesPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud cloudinstances volumes post params +func (o *PcloudCloudinstancesVolumesPostParams) WithHTTPClient(client *http.Client) *PcloudCloudinstancesVolumesPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud cloudinstances volumes post params +func (o *PcloudCloudinstancesVolumesPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the pcloud cloudinstances volumes post params +func (o *PcloudCloudinstancesVolumesPostParams) WithBody(body *models.CreateDataVolume) *PcloudCloudinstancesVolumesPostParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the pcloud cloudinstances volumes post params +func (o *PcloudCloudinstancesVolumesPostParams) SetBody(body *models.CreateDataVolume) { + o.Body = body +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud cloudinstances volumes post params +func (o *PcloudCloudinstancesVolumesPostParams) WithCloudInstanceID(cloudInstanceID string) *PcloudCloudinstancesVolumesPostParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud cloudinstances volumes post params +func (o *PcloudCloudinstancesVolumesPostParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudCloudinstancesVolumesPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_cloudinstances_volumes_post_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_cloudinstances_volumes_post_responses.go new file mode 100644 index 00000000000..ace84485189 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_cloudinstances_volumes_post_responses.go @@ -0,0 +1,211 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_volumes + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudCloudinstancesVolumesPostReader is a Reader for the PcloudCloudinstancesVolumesPost structure. +type PcloudCloudinstancesVolumesPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudCloudinstancesVolumesPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 202: + result := NewPcloudCloudinstancesVolumesPostAccepted() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudCloudinstancesVolumesPostBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 409: + result := NewPcloudCloudinstancesVolumesPostConflict() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 422: + result := NewPcloudCloudinstancesVolumesPostUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudCloudinstancesVolumesPostInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudCloudinstancesVolumesPostAccepted creates a PcloudCloudinstancesVolumesPostAccepted with default headers values +func NewPcloudCloudinstancesVolumesPostAccepted() *PcloudCloudinstancesVolumesPostAccepted { + return &PcloudCloudinstancesVolumesPostAccepted{} +} + +/*PcloudCloudinstancesVolumesPostAccepted handles this case with default header values. + +Accepted +*/ +type PcloudCloudinstancesVolumesPostAccepted struct { + Payload *models.Volume +} + +func (o *PcloudCloudinstancesVolumesPostAccepted) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/volumes][%d] pcloudCloudinstancesVolumesPostAccepted %+v", 202, o.Payload) +} + +func (o *PcloudCloudinstancesVolumesPostAccepted) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Volume) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudinstancesVolumesPostBadRequest creates a PcloudCloudinstancesVolumesPostBadRequest with default headers values +func NewPcloudCloudinstancesVolumesPostBadRequest() *PcloudCloudinstancesVolumesPostBadRequest { + return &PcloudCloudinstancesVolumesPostBadRequest{} +} + +/*PcloudCloudinstancesVolumesPostBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudCloudinstancesVolumesPostBadRequest struct { + Payload *models.Error +} + +func (o *PcloudCloudinstancesVolumesPostBadRequest) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/volumes][%d] pcloudCloudinstancesVolumesPostBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudCloudinstancesVolumesPostBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudinstancesVolumesPostConflict creates a PcloudCloudinstancesVolumesPostConflict with default headers values +func NewPcloudCloudinstancesVolumesPostConflict() *PcloudCloudinstancesVolumesPostConflict { + return &PcloudCloudinstancesVolumesPostConflict{} +} + +/*PcloudCloudinstancesVolumesPostConflict handles this case with default header values. + +Conflict +*/ +type PcloudCloudinstancesVolumesPostConflict struct { + Payload *models.Error +} + +func (o *PcloudCloudinstancesVolumesPostConflict) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/volumes][%d] pcloudCloudinstancesVolumesPostConflict %+v", 409, o.Payload) +} + +func (o *PcloudCloudinstancesVolumesPostConflict) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudinstancesVolumesPostUnprocessableEntity creates a PcloudCloudinstancesVolumesPostUnprocessableEntity with default headers values +func NewPcloudCloudinstancesVolumesPostUnprocessableEntity() *PcloudCloudinstancesVolumesPostUnprocessableEntity { + return &PcloudCloudinstancesVolumesPostUnprocessableEntity{} +} + +/*PcloudCloudinstancesVolumesPostUnprocessableEntity handles this case with default header values. + +Unprocessable Entity +*/ +type PcloudCloudinstancesVolumesPostUnprocessableEntity struct { + Payload *models.Error +} + +func (o *PcloudCloudinstancesVolumesPostUnprocessableEntity) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/volumes][%d] pcloudCloudinstancesVolumesPostUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *PcloudCloudinstancesVolumesPostUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudinstancesVolumesPostInternalServerError creates a PcloudCloudinstancesVolumesPostInternalServerError with default headers values +func NewPcloudCloudinstancesVolumesPostInternalServerError() *PcloudCloudinstancesVolumesPostInternalServerError { + return &PcloudCloudinstancesVolumesPostInternalServerError{} +} + +/*PcloudCloudinstancesVolumesPostInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudCloudinstancesVolumesPostInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudCloudinstancesVolumesPostInternalServerError) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/volumes][%d] pcloudCloudinstancesVolumesPostInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudCloudinstancesVolumesPostInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_cloudinstances_volumes_put_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_cloudinstances_volumes_put_parameters.go new file mode 100644 index 00000000000..05c98f1a904 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_cloudinstances_volumes_put_parameters.go @@ -0,0 +1,181 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_volumes + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// NewPcloudCloudinstancesVolumesPutParams creates a new PcloudCloudinstancesVolumesPutParams object +// with the default values initialized. +func NewPcloudCloudinstancesVolumesPutParams() *PcloudCloudinstancesVolumesPutParams { + var () + return &PcloudCloudinstancesVolumesPutParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudCloudinstancesVolumesPutParamsWithTimeout creates a new PcloudCloudinstancesVolumesPutParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudCloudinstancesVolumesPutParamsWithTimeout(timeout time.Duration) *PcloudCloudinstancesVolumesPutParams { + var () + return &PcloudCloudinstancesVolumesPutParams{ + + timeout: timeout, + } +} + +// NewPcloudCloudinstancesVolumesPutParamsWithContext creates a new PcloudCloudinstancesVolumesPutParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudCloudinstancesVolumesPutParamsWithContext(ctx context.Context) *PcloudCloudinstancesVolumesPutParams { + var () + return &PcloudCloudinstancesVolumesPutParams{ + + Context: ctx, + } +} + +// NewPcloudCloudinstancesVolumesPutParamsWithHTTPClient creates a new PcloudCloudinstancesVolumesPutParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudCloudinstancesVolumesPutParamsWithHTTPClient(client *http.Client) *PcloudCloudinstancesVolumesPutParams { + var () + return &PcloudCloudinstancesVolumesPutParams{ + HTTPClient: client, + } +} + +/*PcloudCloudinstancesVolumesPutParams contains all the parameters to send to the API endpoint +for the pcloud cloudinstances volumes put operation typically these are written to a http.Request +*/ +type PcloudCloudinstancesVolumesPutParams struct { + + /*Body + Parameters to update a cloud instance volume + + */ + Body *models.UpdateVolume + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + /*VolumeID + Volume ID + + */ + VolumeID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud cloudinstances volumes put params +func (o *PcloudCloudinstancesVolumesPutParams) WithTimeout(timeout time.Duration) *PcloudCloudinstancesVolumesPutParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud cloudinstances volumes put params +func (o *PcloudCloudinstancesVolumesPutParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud cloudinstances volumes put params +func (o *PcloudCloudinstancesVolumesPutParams) WithContext(ctx context.Context) *PcloudCloudinstancesVolumesPutParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud cloudinstances volumes put params +func (o *PcloudCloudinstancesVolumesPutParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud cloudinstances volumes put params +func (o *PcloudCloudinstancesVolumesPutParams) WithHTTPClient(client *http.Client) *PcloudCloudinstancesVolumesPutParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud cloudinstances volumes put params +func (o *PcloudCloudinstancesVolumesPutParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the pcloud cloudinstances volumes put params +func (o *PcloudCloudinstancesVolumesPutParams) WithBody(body *models.UpdateVolume) *PcloudCloudinstancesVolumesPutParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the pcloud cloudinstances volumes put params +func (o *PcloudCloudinstancesVolumesPutParams) SetBody(body *models.UpdateVolume) { + o.Body = body +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud cloudinstances volumes put params +func (o *PcloudCloudinstancesVolumesPutParams) WithCloudInstanceID(cloudInstanceID string) *PcloudCloudinstancesVolumesPutParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud cloudinstances volumes put params +func (o *PcloudCloudinstancesVolumesPutParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WithVolumeID adds the volumeID to the pcloud cloudinstances volumes put params +func (o *PcloudCloudinstancesVolumesPutParams) WithVolumeID(volumeID string) *PcloudCloudinstancesVolumesPutParams { + o.SetVolumeID(volumeID) + return o +} + +// SetVolumeID adds the volumeId to the pcloud cloudinstances volumes put params +func (o *PcloudCloudinstancesVolumesPutParams) SetVolumeID(volumeID string) { + o.VolumeID = volumeID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudCloudinstancesVolumesPutParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + // path param volume_id + if err := r.SetPathParam("volume_id", o.VolumeID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_cloudinstances_volumes_put_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_cloudinstances_volumes_put_responses.go new file mode 100644 index 00000000000..bcabbc44b8f --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_cloudinstances_volumes_put_responses.go @@ -0,0 +1,211 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_volumes + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudCloudinstancesVolumesPutReader is a Reader for the PcloudCloudinstancesVolumesPut structure. +type PcloudCloudinstancesVolumesPutReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudCloudinstancesVolumesPutReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudCloudinstancesVolumesPutOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudCloudinstancesVolumesPutBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 409: + result := NewPcloudCloudinstancesVolumesPutConflict() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 422: + result := NewPcloudCloudinstancesVolumesPutUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudCloudinstancesVolumesPutInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudCloudinstancesVolumesPutOK creates a PcloudCloudinstancesVolumesPutOK with default headers values +func NewPcloudCloudinstancesVolumesPutOK() *PcloudCloudinstancesVolumesPutOK { + return &PcloudCloudinstancesVolumesPutOK{} +} + +/*PcloudCloudinstancesVolumesPutOK handles this case with default header values. + +OK +*/ +type PcloudCloudinstancesVolumesPutOK struct { + Payload *models.Volume +} + +func (o *PcloudCloudinstancesVolumesPutOK) Error() string { + return fmt.Sprintf("[PUT /pcloud/v1/cloud-instances/{cloud_instance_id}/volumes/{volume_id}][%d] pcloudCloudinstancesVolumesPutOK %+v", 200, o.Payload) +} + +func (o *PcloudCloudinstancesVolumesPutOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Volume) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudinstancesVolumesPutBadRequest creates a PcloudCloudinstancesVolumesPutBadRequest with default headers values +func NewPcloudCloudinstancesVolumesPutBadRequest() *PcloudCloudinstancesVolumesPutBadRequest { + return &PcloudCloudinstancesVolumesPutBadRequest{} +} + +/*PcloudCloudinstancesVolumesPutBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudCloudinstancesVolumesPutBadRequest struct { + Payload *models.Error +} + +func (o *PcloudCloudinstancesVolumesPutBadRequest) Error() string { + return fmt.Sprintf("[PUT /pcloud/v1/cloud-instances/{cloud_instance_id}/volumes/{volume_id}][%d] pcloudCloudinstancesVolumesPutBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudCloudinstancesVolumesPutBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudinstancesVolumesPutConflict creates a PcloudCloudinstancesVolumesPutConflict with default headers values +func NewPcloudCloudinstancesVolumesPutConflict() *PcloudCloudinstancesVolumesPutConflict { + return &PcloudCloudinstancesVolumesPutConflict{} +} + +/*PcloudCloudinstancesVolumesPutConflict handles this case with default header values. + +Conflict +*/ +type PcloudCloudinstancesVolumesPutConflict struct { + Payload *models.Error +} + +func (o *PcloudCloudinstancesVolumesPutConflict) Error() string { + return fmt.Sprintf("[PUT /pcloud/v1/cloud-instances/{cloud_instance_id}/volumes/{volume_id}][%d] pcloudCloudinstancesVolumesPutConflict %+v", 409, o.Payload) +} + +func (o *PcloudCloudinstancesVolumesPutConflict) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudinstancesVolumesPutUnprocessableEntity creates a PcloudCloudinstancesVolumesPutUnprocessableEntity with default headers values +func NewPcloudCloudinstancesVolumesPutUnprocessableEntity() *PcloudCloudinstancesVolumesPutUnprocessableEntity { + return &PcloudCloudinstancesVolumesPutUnprocessableEntity{} +} + +/*PcloudCloudinstancesVolumesPutUnprocessableEntity handles this case with default header values. + +Unprocessable Entity +*/ +type PcloudCloudinstancesVolumesPutUnprocessableEntity struct { + Payload *models.Error +} + +func (o *PcloudCloudinstancesVolumesPutUnprocessableEntity) Error() string { + return fmt.Sprintf("[PUT /pcloud/v1/cloud-instances/{cloud_instance_id}/volumes/{volume_id}][%d] pcloudCloudinstancesVolumesPutUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *PcloudCloudinstancesVolumesPutUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudCloudinstancesVolumesPutInternalServerError creates a PcloudCloudinstancesVolumesPutInternalServerError with default headers values +func NewPcloudCloudinstancesVolumesPutInternalServerError() *PcloudCloudinstancesVolumesPutInternalServerError { + return &PcloudCloudinstancesVolumesPutInternalServerError{} +} + +/*PcloudCloudinstancesVolumesPutInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudCloudinstancesVolumesPutInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudCloudinstancesVolumesPutInternalServerError) Error() string { + return fmt.Sprintf("[PUT /pcloud/v1/cloud-instances/{cloud_instance_id}/volumes/{volume_id}][%d] pcloudCloudinstancesVolumesPutInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudCloudinstancesVolumesPutInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_pvminstances_volumes_delete_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_pvminstances_volumes_delete_parameters.go new file mode 100644 index 00000000000..d4a1b2e8b6a --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_pvminstances_volumes_delete_parameters.go @@ -0,0 +1,178 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_volumes + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewPcloudPvminstancesVolumesDeleteParams creates a new PcloudPvminstancesVolumesDeleteParams object +// with the default values initialized. +func NewPcloudPvminstancesVolumesDeleteParams() *PcloudPvminstancesVolumesDeleteParams { + var () + return &PcloudPvminstancesVolumesDeleteParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudPvminstancesVolumesDeleteParamsWithTimeout creates a new PcloudPvminstancesVolumesDeleteParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudPvminstancesVolumesDeleteParamsWithTimeout(timeout time.Duration) *PcloudPvminstancesVolumesDeleteParams { + var () + return &PcloudPvminstancesVolumesDeleteParams{ + + timeout: timeout, + } +} + +// NewPcloudPvminstancesVolumesDeleteParamsWithContext creates a new PcloudPvminstancesVolumesDeleteParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudPvminstancesVolumesDeleteParamsWithContext(ctx context.Context) *PcloudPvminstancesVolumesDeleteParams { + var () + return &PcloudPvminstancesVolumesDeleteParams{ + + Context: ctx, + } +} + +// NewPcloudPvminstancesVolumesDeleteParamsWithHTTPClient creates a new PcloudPvminstancesVolumesDeleteParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudPvminstancesVolumesDeleteParamsWithHTTPClient(client *http.Client) *PcloudPvminstancesVolumesDeleteParams { + var () + return &PcloudPvminstancesVolumesDeleteParams{ + HTTPClient: client, + } +} + +/*PcloudPvminstancesVolumesDeleteParams contains all the parameters to send to the API endpoint +for the pcloud pvminstances volumes delete operation typically these are written to a http.Request +*/ +type PcloudPvminstancesVolumesDeleteParams struct { + + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + /*PvmInstanceID + PCloud PVM Instance ID + + */ + PvmInstanceID string + /*VolumeID + Volume ID + + */ + VolumeID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud pvminstances volumes delete params +func (o *PcloudPvminstancesVolumesDeleteParams) WithTimeout(timeout time.Duration) *PcloudPvminstancesVolumesDeleteParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud pvminstances volumes delete params +func (o *PcloudPvminstancesVolumesDeleteParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud pvminstances volumes delete params +func (o *PcloudPvminstancesVolumesDeleteParams) WithContext(ctx context.Context) *PcloudPvminstancesVolumesDeleteParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud pvminstances volumes delete params +func (o *PcloudPvminstancesVolumesDeleteParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud pvminstances volumes delete params +func (o *PcloudPvminstancesVolumesDeleteParams) WithHTTPClient(client *http.Client) *PcloudPvminstancesVolumesDeleteParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud pvminstances volumes delete params +func (o *PcloudPvminstancesVolumesDeleteParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud pvminstances volumes delete params +func (o *PcloudPvminstancesVolumesDeleteParams) WithCloudInstanceID(cloudInstanceID string) *PcloudPvminstancesVolumesDeleteParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud pvminstances volumes delete params +func (o *PcloudPvminstancesVolumesDeleteParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WithPvmInstanceID adds the pvmInstanceID to the pcloud pvminstances volumes delete params +func (o *PcloudPvminstancesVolumesDeleteParams) WithPvmInstanceID(pvmInstanceID string) *PcloudPvminstancesVolumesDeleteParams { + o.SetPvmInstanceID(pvmInstanceID) + return o +} + +// SetPvmInstanceID adds the pvmInstanceId to the pcloud pvminstances volumes delete params +func (o *PcloudPvminstancesVolumesDeleteParams) SetPvmInstanceID(pvmInstanceID string) { + o.PvmInstanceID = pvmInstanceID +} + +// WithVolumeID adds the volumeID to the pcloud pvminstances volumes delete params +func (o *PcloudPvminstancesVolumesDeleteParams) WithVolumeID(volumeID string) *PcloudPvminstancesVolumesDeleteParams { + o.SetVolumeID(volumeID) + return o +} + +// SetVolumeID adds the volumeId to the pcloud pvminstances volumes delete params +func (o *PcloudPvminstancesVolumesDeleteParams) SetVolumeID(volumeID string) { + o.VolumeID = volumeID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudPvminstancesVolumesDeleteParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + // path param pvm_instance_id + if err := r.SetPathParam("pvm_instance_id", o.PvmInstanceID); err != nil { + return err + } + + // path param volume_id + if err := r.SetPathParam("volume_id", o.VolumeID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_pvminstances_volumes_delete_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_pvminstances_volumes_delete_responses.go new file mode 100644 index 00000000000..1a039e67381 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_pvminstances_volumes_delete_responses.go @@ -0,0 +1,281 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_volumes + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudPvminstancesVolumesDeleteReader is a Reader for the PcloudPvminstancesVolumesDelete structure. +type PcloudPvminstancesVolumesDeleteReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudPvminstancesVolumesDeleteReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 202: + result := NewPcloudPvminstancesVolumesDeleteAccepted() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudPvminstancesVolumesDeleteBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 401: + result := NewPcloudPvminstancesVolumesDeleteUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 403: + result := NewPcloudPvminstancesVolumesDeleteForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 404: + result := NewPcloudPvminstancesVolumesDeleteNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 409: + result := NewPcloudPvminstancesVolumesDeleteConflict() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudPvminstancesVolumesDeleteInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudPvminstancesVolumesDeleteAccepted creates a PcloudPvminstancesVolumesDeleteAccepted with default headers values +func NewPcloudPvminstancesVolumesDeleteAccepted() *PcloudPvminstancesVolumesDeleteAccepted { + return &PcloudPvminstancesVolumesDeleteAccepted{} +} + +/*PcloudPvminstancesVolumesDeleteAccepted handles this case with default header values. + +OK +*/ +type PcloudPvminstancesVolumesDeleteAccepted struct { + Payload models.Object +} + +func (o *PcloudPvminstancesVolumesDeleteAccepted) Error() string { + return fmt.Sprintf("[DELETE /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/volumes/{volume_id}][%d] pcloudPvminstancesVolumesDeleteAccepted %+v", 202, o.Payload) +} + +func (o *PcloudPvminstancesVolumesDeleteAccepted) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesVolumesDeleteBadRequest creates a PcloudPvminstancesVolumesDeleteBadRequest with default headers values +func NewPcloudPvminstancesVolumesDeleteBadRequest() *PcloudPvminstancesVolumesDeleteBadRequest { + return &PcloudPvminstancesVolumesDeleteBadRequest{} +} + +/*PcloudPvminstancesVolumesDeleteBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudPvminstancesVolumesDeleteBadRequest struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesVolumesDeleteBadRequest) Error() string { + return fmt.Sprintf("[DELETE /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/volumes/{volume_id}][%d] pcloudPvminstancesVolumesDeleteBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudPvminstancesVolumesDeleteBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesVolumesDeleteUnauthorized creates a PcloudPvminstancesVolumesDeleteUnauthorized with default headers values +func NewPcloudPvminstancesVolumesDeleteUnauthorized() *PcloudPvminstancesVolumesDeleteUnauthorized { + return &PcloudPvminstancesVolumesDeleteUnauthorized{} +} + +/*PcloudPvminstancesVolumesDeleteUnauthorized handles this case with default header values. + +Unauthorized +*/ +type PcloudPvminstancesVolumesDeleteUnauthorized struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesVolumesDeleteUnauthorized) Error() string { + return fmt.Sprintf("[DELETE /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/volumes/{volume_id}][%d] pcloudPvminstancesVolumesDeleteUnauthorized %+v", 401, o.Payload) +} + +func (o *PcloudPvminstancesVolumesDeleteUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesVolumesDeleteForbidden creates a PcloudPvminstancesVolumesDeleteForbidden with default headers values +func NewPcloudPvminstancesVolumesDeleteForbidden() *PcloudPvminstancesVolumesDeleteForbidden { + return &PcloudPvminstancesVolumesDeleteForbidden{} +} + +/*PcloudPvminstancesVolumesDeleteForbidden handles this case with default header values. + +Forbidden +*/ +type PcloudPvminstancesVolumesDeleteForbidden struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesVolumesDeleteForbidden) Error() string { + return fmt.Sprintf("[DELETE /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/volumes/{volume_id}][%d] pcloudPvminstancesVolumesDeleteForbidden %+v", 403, o.Payload) +} + +func (o *PcloudPvminstancesVolumesDeleteForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesVolumesDeleteNotFound creates a PcloudPvminstancesVolumesDeleteNotFound with default headers values +func NewPcloudPvminstancesVolumesDeleteNotFound() *PcloudPvminstancesVolumesDeleteNotFound { + return &PcloudPvminstancesVolumesDeleteNotFound{} +} + +/*PcloudPvminstancesVolumesDeleteNotFound handles this case with default header values. + +Not Found +*/ +type PcloudPvminstancesVolumesDeleteNotFound struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesVolumesDeleteNotFound) Error() string { + return fmt.Sprintf("[DELETE /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/volumes/{volume_id}][%d] pcloudPvminstancesVolumesDeleteNotFound %+v", 404, o.Payload) +} + +func (o *PcloudPvminstancesVolumesDeleteNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesVolumesDeleteConflict creates a PcloudPvminstancesVolumesDeleteConflict with default headers values +func NewPcloudPvminstancesVolumesDeleteConflict() *PcloudPvminstancesVolumesDeleteConflict { + return &PcloudPvminstancesVolumesDeleteConflict{} +} + +/*PcloudPvminstancesVolumesDeleteConflict handles this case with default header values. + +Conflict +*/ +type PcloudPvminstancesVolumesDeleteConflict struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesVolumesDeleteConflict) Error() string { + return fmt.Sprintf("[DELETE /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/volumes/{volume_id}][%d] pcloudPvminstancesVolumesDeleteConflict %+v", 409, o.Payload) +} + +func (o *PcloudPvminstancesVolumesDeleteConflict) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesVolumesDeleteInternalServerError creates a PcloudPvminstancesVolumesDeleteInternalServerError with default headers values +func NewPcloudPvminstancesVolumesDeleteInternalServerError() *PcloudPvminstancesVolumesDeleteInternalServerError { + return &PcloudPvminstancesVolumesDeleteInternalServerError{} +} + +/*PcloudPvminstancesVolumesDeleteInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudPvminstancesVolumesDeleteInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesVolumesDeleteInternalServerError) Error() string { + return fmt.Sprintf("[DELETE /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/volumes/{volume_id}][%d] pcloudPvminstancesVolumesDeleteInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudPvminstancesVolumesDeleteInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_pvminstances_volumes_get_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_pvminstances_volumes_get_parameters.go new file mode 100644 index 00000000000..ecd4ea666d2 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_pvminstances_volumes_get_parameters.go @@ -0,0 +1,178 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_volumes + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewPcloudPvminstancesVolumesGetParams creates a new PcloudPvminstancesVolumesGetParams object +// with the default values initialized. +func NewPcloudPvminstancesVolumesGetParams() *PcloudPvminstancesVolumesGetParams { + var () + return &PcloudPvminstancesVolumesGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudPvminstancesVolumesGetParamsWithTimeout creates a new PcloudPvminstancesVolumesGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudPvminstancesVolumesGetParamsWithTimeout(timeout time.Duration) *PcloudPvminstancesVolumesGetParams { + var () + return &PcloudPvminstancesVolumesGetParams{ + + timeout: timeout, + } +} + +// NewPcloudPvminstancesVolumesGetParamsWithContext creates a new PcloudPvminstancesVolumesGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudPvminstancesVolumesGetParamsWithContext(ctx context.Context) *PcloudPvminstancesVolumesGetParams { + var () + return &PcloudPvminstancesVolumesGetParams{ + + Context: ctx, + } +} + +// NewPcloudPvminstancesVolumesGetParamsWithHTTPClient creates a new PcloudPvminstancesVolumesGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudPvminstancesVolumesGetParamsWithHTTPClient(client *http.Client) *PcloudPvminstancesVolumesGetParams { + var () + return &PcloudPvminstancesVolumesGetParams{ + HTTPClient: client, + } +} + +/*PcloudPvminstancesVolumesGetParams contains all the parameters to send to the API endpoint +for the pcloud pvminstances volumes get operation typically these are written to a http.Request +*/ +type PcloudPvminstancesVolumesGetParams struct { + + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + /*PvmInstanceID + PCloud PVM Instance ID + + */ + PvmInstanceID string + /*VolumeID + Volume ID + + */ + VolumeID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud pvminstances volumes get params +func (o *PcloudPvminstancesVolumesGetParams) WithTimeout(timeout time.Duration) *PcloudPvminstancesVolumesGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud pvminstances volumes get params +func (o *PcloudPvminstancesVolumesGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud pvminstances volumes get params +func (o *PcloudPvminstancesVolumesGetParams) WithContext(ctx context.Context) *PcloudPvminstancesVolumesGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud pvminstances volumes get params +func (o *PcloudPvminstancesVolumesGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud pvminstances volumes get params +func (o *PcloudPvminstancesVolumesGetParams) WithHTTPClient(client *http.Client) *PcloudPvminstancesVolumesGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud pvminstances volumes get params +func (o *PcloudPvminstancesVolumesGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud pvminstances volumes get params +func (o *PcloudPvminstancesVolumesGetParams) WithCloudInstanceID(cloudInstanceID string) *PcloudPvminstancesVolumesGetParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud pvminstances volumes get params +func (o *PcloudPvminstancesVolumesGetParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WithPvmInstanceID adds the pvmInstanceID to the pcloud pvminstances volumes get params +func (o *PcloudPvminstancesVolumesGetParams) WithPvmInstanceID(pvmInstanceID string) *PcloudPvminstancesVolumesGetParams { + o.SetPvmInstanceID(pvmInstanceID) + return o +} + +// SetPvmInstanceID adds the pvmInstanceId to the pcloud pvminstances volumes get params +func (o *PcloudPvminstancesVolumesGetParams) SetPvmInstanceID(pvmInstanceID string) { + o.PvmInstanceID = pvmInstanceID +} + +// WithVolumeID adds the volumeID to the pcloud pvminstances volumes get params +func (o *PcloudPvminstancesVolumesGetParams) WithVolumeID(volumeID string) *PcloudPvminstancesVolumesGetParams { + o.SetVolumeID(volumeID) + return o +} + +// SetVolumeID adds the volumeId to the pcloud pvminstances volumes get params +func (o *PcloudPvminstancesVolumesGetParams) SetVolumeID(volumeID string) { + o.VolumeID = volumeID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudPvminstancesVolumesGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + // path param pvm_instance_id + if err := r.SetPathParam("pvm_instance_id", o.PvmInstanceID); err != nil { + return err + } + + // path param volume_id + if err := r.SetPathParam("volume_id", o.VolumeID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_pvminstances_volumes_get_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_pvminstances_volumes_get_responses.go new file mode 100644 index 00000000000..520403da998 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_pvminstances_volumes_get_responses.go @@ -0,0 +1,175 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_volumes + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudPvminstancesVolumesGetReader is a Reader for the PcloudPvminstancesVolumesGet structure. +type PcloudPvminstancesVolumesGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudPvminstancesVolumesGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudPvminstancesVolumesGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudPvminstancesVolumesGetBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 404: + result := NewPcloudPvminstancesVolumesGetNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudPvminstancesVolumesGetInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudPvminstancesVolumesGetOK creates a PcloudPvminstancesVolumesGetOK with default headers values +func NewPcloudPvminstancesVolumesGetOK() *PcloudPvminstancesVolumesGetOK { + return &PcloudPvminstancesVolumesGetOK{} +} + +/*PcloudPvminstancesVolumesGetOK handles this case with default header values. + +OK +*/ +type PcloudPvminstancesVolumesGetOK struct { + Payload *models.Volume +} + +func (o *PcloudPvminstancesVolumesGetOK) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/volumes/{volume_id}][%d] pcloudPvminstancesVolumesGetOK %+v", 200, o.Payload) +} + +func (o *PcloudPvminstancesVolumesGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Volume) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesVolumesGetBadRequest creates a PcloudPvminstancesVolumesGetBadRequest with default headers values +func NewPcloudPvminstancesVolumesGetBadRequest() *PcloudPvminstancesVolumesGetBadRequest { + return &PcloudPvminstancesVolumesGetBadRequest{} +} + +/*PcloudPvminstancesVolumesGetBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudPvminstancesVolumesGetBadRequest struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesVolumesGetBadRequest) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/volumes/{volume_id}][%d] pcloudPvminstancesVolumesGetBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudPvminstancesVolumesGetBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesVolumesGetNotFound creates a PcloudPvminstancesVolumesGetNotFound with default headers values +func NewPcloudPvminstancesVolumesGetNotFound() *PcloudPvminstancesVolumesGetNotFound { + return &PcloudPvminstancesVolumesGetNotFound{} +} + +/*PcloudPvminstancesVolumesGetNotFound handles this case with default header values. + +Not Found +*/ +type PcloudPvminstancesVolumesGetNotFound struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesVolumesGetNotFound) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/volumes/{volume_id}][%d] pcloudPvminstancesVolumesGetNotFound %+v", 404, o.Payload) +} + +func (o *PcloudPvminstancesVolumesGetNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesVolumesGetInternalServerError creates a PcloudPvminstancesVolumesGetInternalServerError with default headers values +func NewPcloudPvminstancesVolumesGetInternalServerError() *PcloudPvminstancesVolumesGetInternalServerError { + return &PcloudPvminstancesVolumesGetInternalServerError{} +} + +/*PcloudPvminstancesVolumesGetInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudPvminstancesVolumesGetInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesVolumesGetInternalServerError) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/volumes/{volume_id}][%d] pcloudPvminstancesVolumesGetInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudPvminstancesVolumesGetInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_pvminstances_volumes_getall_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_pvminstances_volumes_getall_parameters.go new file mode 100644 index 00000000000..f07597d38dc --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_pvminstances_volumes_getall_parameters.go @@ -0,0 +1,157 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_volumes + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewPcloudPvminstancesVolumesGetallParams creates a new PcloudPvminstancesVolumesGetallParams object +// with the default values initialized. +func NewPcloudPvminstancesVolumesGetallParams() *PcloudPvminstancesVolumesGetallParams { + var () + return &PcloudPvminstancesVolumesGetallParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudPvminstancesVolumesGetallParamsWithTimeout creates a new PcloudPvminstancesVolumesGetallParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudPvminstancesVolumesGetallParamsWithTimeout(timeout time.Duration) *PcloudPvminstancesVolumesGetallParams { + var () + return &PcloudPvminstancesVolumesGetallParams{ + + timeout: timeout, + } +} + +// NewPcloudPvminstancesVolumesGetallParamsWithContext creates a new PcloudPvminstancesVolumesGetallParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudPvminstancesVolumesGetallParamsWithContext(ctx context.Context) *PcloudPvminstancesVolumesGetallParams { + var () + return &PcloudPvminstancesVolumesGetallParams{ + + Context: ctx, + } +} + +// NewPcloudPvminstancesVolumesGetallParamsWithHTTPClient creates a new PcloudPvminstancesVolumesGetallParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudPvminstancesVolumesGetallParamsWithHTTPClient(client *http.Client) *PcloudPvminstancesVolumesGetallParams { + var () + return &PcloudPvminstancesVolumesGetallParams{ + HTTPClient: client, + } +} + +/*PcloudPvminstancesVolumesGetallParams contains all the parameters to send to the API endpoint +for the pcloud pvminstances volumes getall operation typically these are written to a http.Request +*/ +type PcloudPvminstancesVolumesGetallParams struct { + + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + /*PvmInstanceID + PCloud PVM Instance ID + + */ + PvmInstanceID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud pvminstances volumes getall params +func (o *PcloudPvminstancesVolumesGetallParams) WithTimeout(timeout time.Duration) *PcloudPvminstancesVolumesGetallParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud pvminstances volumes getall params +func (o *PcloudPvminstancesVolumesGetallParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud pvminstances volumes getall params +func (o *PcloudPvminstancesVolumesGetallParams) WithContext(ctx context.Context) *PcloudPvminstancesVolumesGetallParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud pvminstances volumes getall params +func (o *PcloudPvminstancesVolumesGetallParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud pvminstances volumes getall params +func (o *PcloudPvminstancesVolumesGetallParams) WithHTTPClient(client *http.Client) *PcloudPvminstancesVolumesGetallParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud pvminstances volumes getall params +func (o *PcloudPvminstancesVolumesGetallParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud pvminstances volumes getall params +func (o *PcloudPvminstancesVolumesGetallParams) WithCloudInstanceID(cloudInstanceID string) *PcloudPvminstancesVolumesGetallParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud pvminstances volumes getall params +func (o *PcloudPvminstancesVolumesGetallParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WithPvmInstanceID adds the pvmInstanceID to the pcloud pvminstances volumes getall params +func (o *PcloudPvminstancesVolumesGetallParams) WithPvmInstanceID(pvmInstanceID string) *PcloudPvminstancesVolumesGetallParams { + o.SetPvmInstanceID(pvmInstanceID) + return o +} + +// SetPvmInstanceID adds the pvmInstanceId to the pcloud pvminstances volumes getall params +func (o *PcloudPvminstancesVolumesGetallParams) SetPvmInstanceID(pvmInstanceID string) { + o.PvmInstanceID = pvmInstanceID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudPvminstancesVolumesGetallParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + // path param pvm_instance_id + if err := r.SetPathParam("pvm_instance_id", o.PvmInstanceID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_pvminstances_volumes_getall_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_pvminstances_volumes_getall_responses.go new file mode 100644 index 00000000000..9fb49afeb85 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_pvminstances_volumes_getall_responses.go @@ -0,0 +1,175 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_volumes + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudPvminstancesVolumesGetallReader is a Reader for the PcloudPvminstancesVolumesGetall structure. +type PcloudPvminstancesVolumesGetallReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudPvminstancesVolumesGetallReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudPvminstancesVolumesGetallOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudPvminstancesVolumesGetallBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 404: + result := NewPcloudPvminstancesVolumesGetallNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudPvminstancesVolumesGetallInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudPvminstancesVolumesGetallOK creates a PcloudPvminstancesVolumesGetallOK with default headers values +func NewPcloudPvminstancesVolumesGetallOK() *PcloudPvminstancesVolumesGetallOK { + return &PcloudPvminstancesVolumesGetallOK{} +} + +/*PcloudPvminstancesVolumesGetallOK handles this case with default header values. + +OK +*/ +type PcloudPvminstancesVolumesGetallOK struct { + Payload *models.Volumes +} + +func (o *PcloudPvminstancesVolumesGetallOK) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/volumes][%d] pcloudPvminstancesVolumesGetallOK %+v", 200, o.Payload) +} + +func (o *PcloudPvminstancesVolumesGetallOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Volumes) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesVolumesGetallBadRequest creates a PcloudPvminstancesVolumesGetallBadRequest with default headers values +func NewPcloudPvminstancesVolumesGetallBadRequest() *PcloudPvminstancesVolumesGetallBadRequest { + return &PcloudPvminstancesVolumesGetallBadRequest{} +} + +/*PcloudPvminstancesVolumesGetallBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudPvminstancesVolumesGetallBadRequest struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesVolumesGetallBadRequest) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/volumes][%d] pcloudPvminstancesVolumesGetallBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudPvminstancesVolumesGetallBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesVolumesGetallNotFound creates a PcloudPvminstancesVolumesGetallNotFound with default headers values +func NewPcloudPvminstancesVolumesGetallNotFound() *PcloudPvminstancesVolumesGetallNotFound { + return &PcloudPvminstancesVolumesGetallNotFound{} +} + +/*PcloudPvminstancesVolumesGetallNotFound handles this case with default header values. + +Not Found +*/ +type PcloudPvminstancesVolumesGetallNotFound struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesVolumesGetallNotFound) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/volumes][%d] pcloudPvminstancesVolumesGetallNotFound %+v", 404, o.Payload) +} + +func (o *PcloudPvminstancesVolumesGetallNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesVolumesGetallInternalServerError creates a PcloudPvminstancesVolumesGetallInternalServerError with default headers values +func NewPcloudPvminstancesVolumesGetallInternalServerError() *PcloudPvminstancesVolumesGetallInternalServerError { + return &PcloudPvminstancesVolumesGetallInternalServerError{} +} + +/*PcloudPvminstancesVolumesGetallInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudPvminstancesVolumesGetallInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesVolumesGetallInternalServerError) Error() string { + return fmt.Sprintf("[GET /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/volumes][%d] pcloudPvminstancesVolumesGetallInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudPvminstancesVolumesGetallInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_pvminstances_volumes_post_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_pvminstances_volumes_post_parameters.go new file mode 100644 index 00000000000..45688ac115a --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_pvminstances_volumes_post_parameters.go @@ -0,0 +1,178 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_volumes + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewPcloudPvminstancesVolumesPostParams creates a new PcloudPvminstancesVolumesPostParams object +// with the default values initialized. +func NewPcloudPvminstancesVolumesPostParams() *PcloudPvminstancesVolumesPostParams { + var () + return &PcloudPvminstancesVolumesPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudPvminstancesVolumesPostParamsWithTimeout creates a new PcloudPvminstancesVolumesPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudPvminstancesVolumesPostParamsWithTimeout(timeout time.Duration) *PcloudPvminstancesVolumesPostParams { + var () + return &PcloudPvminstancesVolumesPostParams{ + + timeout: timeout, + } +} + +// NewPcloudPvminstancesVolumesPostParamsWithContext creates a new PcloudPvminstancesVolumesPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudPvminstancesVolumesPostParamsWithContext(ctx context.Context) *PcloudPvminstancesVolumesPostParams { + var () + return &PcloudPvminstancesVolumesPostParams{ + + Context: ctx, + } +} + +// NewPcloudPvminstancesVolumesPostParamsWithHTTPClient creates a new PcloudPvminstancesVolumesPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudPvminstancesVolumesPostParamsWithHTTPClient(client *http.Client) *PcloudPvminstancesVolumesPostParams { + var () + return &PcloudPvminstancesVolumesPostParams{ + HTTPClient: client, + } +} + +/*PcloudPvminstancesVolumesPostParams contains all the parameters to send to the API endpoint +for the pcloud pvminstances volumes post operation typically these are written to a http.Request +*/ +type PcloudPvminstancesVolumesPostParams struct { + + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + /*PvmInstanceID + PCloud PVM Instance ID + + */ + PvmInstanceID string + /*VolumeID + Volume ID + + */ + VolumeID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud pvminstances volumes post params +func (o *PcloudPvminstancesVolumesPostParams) WithTimeout(timeout time.Duration) *PcloudPvminstancesVolumesPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud pvminstances volumes post params +func (o *PcloudPvminstancesVolumesPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud pvminstances volumes post params +func (o *PcloudPvminstancesVolumesPostParams) WithContext(ctx context.Context) *PcloudPvminstancesVolumesPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud pvminstances volumes post params +func (o *PcloudPvminstancesVolumesPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud pvminstances volumes post params +func (o *PcloudPvminstancesVolumesPostParams) WithHTTPClient(client *http.Client) *PcloudPvminstancesVolumesPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud pvminstances volumes post params +func (o *PcloudPvminstancesVolumesPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud pvminstances volumes post params +func (o *PcloudPvminstancesVolumesPostParams) WithCloudInstanceID(cloudInstanceID string) *PcloudPvminstancesVolumesPostParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud pvminstances volumes post params +func (o *PcloudPvminstancesVolumesPostParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WithPvmInstanceID adds the pvmInstanceID to the pcloud pvminstances volumes post params +func (o *PcloudPvminstancesVolumesPostParams) WithPvmInstanceID(pvmInstanceID string) *PcloudPvminstancesVolumesPostParams { + o.SetPvmInstanceID(pvmInstanceID) + return o +} + +// SetPvmInstanceID adds the pvmInstanceId to the pcloud pvminstances volumes post params +func (o *PcloudPvminstancesVolumesPostParams) SetPvmInstanceID(pvmInstanceID string) { + o.PvmInstanceID = pvmInstanceID +} + +// WithVolumeID adds the volumeID to the pcloud pvminstances volumes post params +func (o *PcloudPvminstancesVolumesPostParams) WithVolumeID(volumeID string) *PcloudPvminstancesVolumesPostParams { + o.SetVolumeID(volumeID) + return o +} + +// SetVolumeID adds the volumeId to the pcloud pvminstances volumes post params +func (o *PcloudPvminstancesVolumesPostParams) SetVolumeID(volumeID string) { + o.VolumeID = volumeID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudPvminstancesVolumesPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + // path param pvm_instance_id + if err := r.SetPathParam("pvm_instance_id", o.PvmInstanceID); err != nil { + return err + } + + // path param volume_id + if err := r.SetPathParam("volume_id", o.VolumeID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_pvminstances_volumes_post_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_pvminstances_volumes_post_responses.go new file mode 100644 index 00000000000..fada4f68500 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_pvminstances_volumes_post_responses.go @@ -0,0 +1,281 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_volumes + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudPvminstancesVolumesPostReader is a Reader for the PcloudPvminstancesVolumesPost structure. +type PcloudPvminstancesVolumesPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudPvminstancesVolumesPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudPvminstancesVolumesPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudPvminstancesVolumesPostBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 401: + result := NewPcloudPvminstancesVolumesPostUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 403: + result := NewPcloudPvminstancesVolumesPostForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 404: + result := NewPcloudPvminstancesVolumesPostNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 409: + result := NewPcloudPvminstancesVolumesPostConflict() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudPvminstancesVolumesPostInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudPvminstancesVolumesPostOK creates a PcloudPvminstancesVolumesPostOK with default headers values +func NewPcloudPvminstancesVolumesPostOK() *PcloudPvminstancesVolumesPostOK { + return &PcloudPvminstancesVolumesPostOK{} +} + +/*PcloudPvminstancesVolumesPostOK handles this case with default header values. + +OK +*/ +type PcloudPvminstancesVolumesPostOK struct { + Payload models.Object +} + +func (o *PcloudPvminstancesVolumesPostOK) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/volumes/{volume_id}][%d] pcloudPvminstancesVolumesPostOK %+v", 200, o.Payload) +} + +func (o *PcloudPvminstancesVolumesPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesVolumesPostBadRequest creates a PcloudPvminstancesVolumesPostBadRequest with default headers values +func NewPcloudPvminstancesVolumesPostBadRequest() *PcloudPvminstancesVolumesPostBadRequest { + return &PcloudPvminstancesVolumesPostBadRequest{} +} + +/*PcloudPvminstancesVolumesPostBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudPvminstancesVolumesPostBadRequest struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesVolumesPostBadRequest) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/volumes/{volume_id}][%d] pcloudPvminstancesVolumesPostBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudPvminstancesVolumesPostBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesVolumesPostUnauthorized creates a PcloudPvminstancesVolumesPostUnauthorized with default headers values +func NewPcloudPvminstancesVolumesPostUnauthorized() *PcloudPvminstancesVolumesPostUnauthorized { + return &PcloudPvminstancesVolumesPostUnauthorized{} +} + +/*PcloudPvminstancesVolumesPostUnauthorized handles this case with default header values. + +Unauthorized +*/ +type PcloudPvminstancesVolumesPostUnauthorized struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesVolumesPostUnauthorized) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/volumes/{volume_id}][%d] pcloudPvminstancesVolumesPostUnauthorized %+v", 401, o.Payload) +} + +func (o *PcloudPvminstancesVolumesPostUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesVolumesPostForbidden creates a PcloudPvminstancesVolumesPostForbidden with default headers values +func NewPcloudPvminstancesVolumesPostForbidden() *PcloudPvminstancesVolumesPostForbidden { + return &PcloudPvminstancesVolumesPostForbidden{} +} + +/*PcloudPvminstancesVolumesPostForbidden handles this case with default header values. + +Forbidden +*/ +type PcloudPvminstancesVolumesPostForbidden struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesVolumesPostForbidden) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/volumes/{volume_id}][%d] pcloudPvminstancesVolumesPostForbidden %+v", 403, o.Payload) +} + +func (o *PcloudPvminstancesVolumesPostForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesVolumesPostNotFound creates a PcloudPvminstancesVolumesPostNotFound with default headers values +func NewPcloudPvminstancesVolumesPostNotFound() *PcloudPvminstancesVolumesPostNotFound { + return &PcloudPvminstancesVolumesPostNotFound{} +} + +/*PcloudPvminstancesVolumesPostNotFound handles this case with default header values. + +Not Found +*/ +type PcloudPvminstancesVolumesPostNotFound struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesVolumesPostNotFound) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/volumes/{volume_id}][%d] pcloudPvminstancesVolumesPostNotFound %+v", 404, o.Payload) +} + +func (o *PcloudPvminstancesVolumesPostNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesVolumesPostConflict creates a PcloudPvminstancesVolumesPostConflict with default headers values +func NewPcloudPvminstancesVolumesPostConflict() *PcloudPvminstancesVolumesPostConflict { + return &PcloudPvminstancesVolumesPostConflict{} +} + +/*PcloudPvminstancesVolumesPostConflict handles this case with default header values. + +Conflict +*/ +type PcloudPvminstancesVolumesPostConflict struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesVolumesPostConflict) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/volumes/{volume_id}][%d] pcloudPvminstancesVolumesPostConflict %+v", 409, o.Payload) +} + +func (o *PcloudPvminstancesVolumesPostConflict) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesVolumesPostInternalServerError creates a PcloudPvminstancesVolumesPostInternalServerError with default headers values +func NewPcloudPvminstancesVolumesPostInternalServerError() *PcloudPvminstancesVolumesPostInternalServerError { + return &PcloudPvminstancesVolumesPostInternalServerError{} +} + +/*PcloudPvminstancesVolumesPostInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudPvminstancesVolumesPostInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesVolumesPostInternalServerError) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/volumes/{volume_id}][%d] pcloudPvminstancesVolumesPostInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudPvminstancesVolumesPostInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_pvminstances_volumes_put_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_pvminstances_volumes_put_parameters.go new file mode 100644 index 00000000000..3927ed0216b --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_pvminstances_volumes_put_parameters.go @@ -0,0 +1,202 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_volumes + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// NewPcloudPvminstancesVolumesPutParams creates a new PcloudPvminstancesVolumesPutParams object +// with the default values initialized. +func NewPcloudPvminstancesVolumesPutParams() *PcloudPvminstancesVolumesPutParams { + var () + return &PcloudPvminstancesVolumesPutParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudPvminstancesVolumesPutParamsWithTimeout creates a new PcloudPvminstancesVolumesPutParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudPvminstancesVolumesPutParamsWithTimeout(timeout time.Duration) *PcloudPvminstancesVolumesPutParams { + var () + return &PcloudPvminstancesVolumesPutParams{ + + timeout: timeout, + } +} + +// NewPcloudPvminstancesVolumesPutParamsWithContext creates a new PcloudPvminstancesVolumesPutParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudPvminstancesVolumesPutParamsWithContext(ctx context.Context) *PcloudPvminstancesVolumesPutParams { + var () + return &PcloudPvminstancesVolumesPutParams{ + + Context: ctx, + } +} + +// NewPcloudPvminstancesVolumesPutParamsWithHTTPClient creates a new PcloudPvminstancesVolumesPutParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudPvminstancesVolumesPutParamsWithHTTPClient(client *http.Client) *PcloudPvminstancesVolumesPutParams { + var () + return &PcloudPvminstancesVolumesPutParams{ + HTTPClient: client, + } +} + +/*PcloudPvminstancesVolumesPutParams contains all the parameters to send to the API endpoint +for the pcloud pvminstances volumes put operation typically these are written to a http.Request +*/ +type PcloudPvminstancesVolumesPutParams struct { + + /*Body + Parameters to update a volume attached to a PVMInstance + + */ + Body *models.PVMInstanceVolumeUpdate + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + /*PvmInstanceID + PCloud PVM Instance ID + + */ + PvmInstanceID string + /*VolumeID + Volume ID + + */ + VolumeID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud pvminstances volumes put params +func (o *PcloudPvminstancesVolumesPutParams) WithTimeout(timeout time.Duration) *PcloudPvminstancesVolumesPutParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud pvminstances volumes put params +func (o *PcloudPvminstancesVolumesPutParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud pvminstances volumes put params +func (o *PcloudPvminstancesVolumesPutParams) WithContext(ctx context.Context) *PcloudPvminstancesVolumesPutParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud pvminstances volumes put params +func (o *PcloudPvminstancesVolumesPutParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud pvminstances volumes put params +func (o *PcloudPvminstancesVolumesPutParams) WithHTTPClient(client *http.Client) *PcloudPvminstancesVolumesPutParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud pvminstances volumes put params +func (o *PcloudPvminstancesVolumesPutParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the pcloud pvminstances volumes put params +func (o *PcloudPvminstancesVolumesPutParams) WithBody(body *models.PVMInstanceVolumeUpdate) *PcloudPvminstancesVolumesPutParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the pcloud pvminstances volumes put params +func (o *PcloudPvminstancesVolumesPutParams) SetBody(body *models.PVMInstanceVolumeUpdate) { + o.Body = body +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud pvminstances volumes put params +func (o *PcloudPvminstancesVolumesPutParams) WithCloudInstanceID(cloudInstanceID string) *PcloudPvminstancesVolumesPutParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud pvminstances volumes put params +func (o *PcloudPvminstancesVolumesPutParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WithPvmInstanceID adds the pvmInstanceID to the pcloud pvminstances volumes put params +func (o *PcloudPvminstancesVolumesPutParams) WithPvmInstanceID(pvmInstanceID string) *PcloudPvminstancesVolumesPutParams { + o.SetPvmInstanceID(pvmInstanceID) + return o +} + +// SetPvmInstanceID adds the pvmInstanceId to the pcloud pvminstances volumes put params +func (o *PcloudPvminstancesVolumesPutParams) SetPvmInstanceID(pvmInstanceID string) { + o.PvmInstanceID = pvmInstanceID +} + +// WithVolumeID adds the volumeID to the pcloud pvminstances volumes put params +func (o *PcloudPvminstancesVolumesPutParams) WithVolumeID(volumeID string) *PcloudPvminstancesVolumesPutParams { + o.SetVolumeID(volumeID) + return o +} + +// SetVolumeID adds the volumeId to the pcloud pvminstances volumes put params +func (o *PcloudPvminstancesVolumesPutParams) SetVolumeID(volumeID string) { + o.VolumeID = volumeID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudPvminstancesVolumesPutParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + // path param pvm_instance_id + if err := r.SetPathParam("pvm_instance_id", o.PvmInstanceID); err != nil { + return err + } + + // path param volume_id + if err := r.SetPathParam("volume_id", o.VolumeID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_pvminstances_volumes_put_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_pvminstances_volumes_put_responses.go new file mode 100644 index 00000000000..1bf3e1b960c --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_pvminstances_volumes_put_responses.go @@ -0,0 +1,137 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_volumes + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudPvminstancesVolumesPutReader is a Reader for the PcloudPvminstancesVolumesPut structure. +type PcloudPvminstancesVolumesPutReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudPvminstancesVolumesPutReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudPvminstancesVolumesPutOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudPvminstancesVolumesPutBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudPvminstancesVolumesPutInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudPvminstancesVolumesPutOK creates a PcloudPvminstancesVolumesPutOK with default headers values +func NewPcloudPvminstancesVolumesPutOK() *PcloudPvminstancesVolumesPutOK { + return &PcloudPvminstancesVolumesPutOK{} +} + +/*PcloudPvminstancesVolumesPutOK handles this case with default header values. + +OK +*/ +type PcloudPvminstancesVolumesPutOK struct { + Payload models.Object +} + +func (o *PcloudPvminstancesVolumesPutOK) Error() string { + return fmt.Sprintf("[PUT /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/volumes/{volume_id}][%d] pcloudPvminstancesVolumesPutOK %+v", 200, o.Payload) +} + +func (o *PcloudPvminstancesVolumesPutOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesVolumesPutBadRequest creates a PcloudPvminstancesVolumesPutBadRequest with default headers values +func NewPcloudPvminstancesVolumesPutBadRequest() *PcloudPvminstancesVolumesPutBadRequest { + return &PcloudPvminstancesVolumesPutBadRequest{} +} + +/*PcloudPvminstancesVolumesPutBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudPvminstancesVolumesPutBadRequest struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesVolumesPutBadRequest) Error() string { + return fmt.Sprintf("[PUT /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/volumes/{volume_id}][%d] pcloudPvminstancesVolumesPutBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudPvminstancesVolumesPutBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesVolumesPutInternalServerError creates a PcloudPvminstancesVolumesPutInternalServerError with default headers values +func NewPcloudPvminstancesVolumesPutInternalServerError() *PcloudPvminstancesVolumesPutInternalServerError { + return &PcloudPvminstancesVolumesPutInternalServerError{} +} + +/*PcloudPvminstancesVolumesPutInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudPvminstancesVolumesPutInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesVolumesPutInternalServerError) Error() string { + return fmt.Sprintf("[PUT /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/volumes/{volume_id}][%d] pcloudPvminstancesVolumesPutInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudPvminstancesVolumesPutInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_pvminstances_volumes_setboot_put_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_pvminstances_volumes_setboot_put_parameters.go new file mode 100644 index 00000000000..00d6420123e --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_pvminstances_volumes_setboot_put_parameters.go @@ -0,0 +1,178 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_volumes + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewPcloudPvminstancesVolumesSetbootPutParams creates a new PcloudPvminstancesVolumesSetbootPutParams object +// with the default values initialized. +func NewPcloudPvminstancesVolumesSetbootPutParams() *PcloudPvminstancesVolumesSetbootPutParams { + var () + return &PcloudPvminstancesVolumesSetbootPutParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudPvminstancesVolumesSetbootPutParamsWithTimeout creates a new PcloudPvminstancesVolumesSetbootPutParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudPvminstancesVolumesSetbootPutParamsWithTimeout(timeout time.Duration) *PcloudPvminstancesVolumesSetbootPutParams { + var () + return &PcloudPvminstancesVolumesSetbootPutParams{ + + timeout: timeout, + } +} + +// NewPcloudPvminstancesVolumesSetbootPutParamsWithContext creates a new PcloudPvminstancesVolumesSetbootPutParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudPvminstancesVolumesSetbootPutParamsWithContext(ctx context.Context) *PcloudPvminstancesVolumesSetbootPutParams { + var () + return &PcloudPvminstancesVolumesSetbootPutParams{ + + Context: ctx, + } +} + +// NewPcloudPvminstancesVolumesSetbootPutParamsWithHTTPClient creates a new PcloudPvminstancesVolumesSetbootPutParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudPvminstancesVolumesSetbootPutParamsWithHTTPClient(client *http.Client) *PcloudPvminstancesVolumesSetbootPutParams { + var () + return &PcloudPvminstancesVolumesSetbootPutParams{ + HTTPClient: client, + } +} + +/*PcloudPvminstancesVolumesSetbootPutParams contains all the parameters to send to the API endpoint +for the pcloud pvminstances volumes setboot put operation typically these are written to a http.Request +*/ +type PcloudPvminstancesVolumesSetbootPutParams struct { + + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + /*PvmInstanceID + PCloud PVM Instance ID + + */ + PvmInstanceID string + /*VolumeID + Volume ID + + */ + VolumeID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud pvminstances volumes setboot put params +func (o *PcloudPvminstancesVolumesSetbootPutParams) WithTimeout(timeout time.Duration) *PcloudPvminstancesVolumesSetbootPutParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud pvminstances volumes setboot put params +func (o *PcloudPvminstancesVolumesSetbootPutParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud pvminstances volumes setboot put params +func (o *PcloudPvminstancesVolumesSetbootPutParams) WithContext(ctx context.Context) *PcloudPvminstancesVolumesSetbootPutParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud pvminstances volumes setboot put params +func (o *PcloudPvminstancesVolumesSetbootPutParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud pvminstances volumes setboot put params +func (o *PcloudPvminstancesVolumesSetbootPutParams) WithHTTPClient(client *http.Client) *PcloudPvminstancesVolumesSetbootPutParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud pvminstances volumes setboot put params +func (o *PcloudPvminstancesVolumesSetbootPutParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud pvminstances volumes setboot put params +func (o *PcloudPvminstancesVolumesSetbootPutParams) WithCloudInstanceID(cloudInstanceID string) *PcloudPvminstancesVolumesSetbootPutParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud pvminstances volumes setboot put params +func (o *PcloudPvminstancesVolumesSetbootPutParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WithPvmInstanceID adds the pvmInstanceID to the pcloud pvminstances volumes setboot put params +func (o *PcloudPvminstancesVolumesSetbootPutParams) WithPvmInstanceID(pvmInstanceID string) *PcloudPvminstancesVolumesSetbootPutParams { + o.SetPvmInstanceID(pvmInstanceID) + return o +} + +// SetPvmInstanceID adds the pvmInstanceId to the pcloud pvminstances volumes setboot put params +func (o *PcloudPvminstancesVolumesSetbootPutParams) SetPvmInstanceID(pvmInstanceID string) { + o.PvmInstanceID = pvmInstanceID +} + +// WithVolumeID adds the volumeID to the pcloud pvminstances volumes setboot put params +func (o *PcloudPvminstancesVolumesSetbootPutParams) WithVolumeID(volumeID string) *PcloudPvminstancesVolumesSetbootPutParams { + o.SetVolumeID(volumeID) + return o +} + +// SetVolumeID adds the volumeId to the pcloud pvminstances volumes setboot put params +func (o *PcloudPvminstancesVolumesSetbootPutParams) SetVolumeID(volumeID string) { + o.VolumeID = volumeID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudPvminstancesVolumesSetbootPutParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + // path param pvm_instance_id + if err := r.SetPathParam("pvm_instance_id", o.PvmInstanceID); err != nil { + return err + } + + // path param volume_id + if err := r.SetPathParam("volume_id", o.VolumeID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_pvminstances_volumes_setboot_put_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_pvminstances_volumes_setboot_put_responses.go new file mode 100644 index 00000000000..03d1c0e5166 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_pvminstances_volumes_setboot_put_responses.go @@ -0,0 +1,173 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_volumes + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudPvminstancesVolumesSetbootPutReader is a Reader for the PcloudPvminstancesVolumesSetbootPut structure. +type PcloudPvminstancesVolumesSetbootPutReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudPvminstancesVolumesSetbootPutReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudPvminstancesVolumesSetbootPutOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudPvminstancesVolumesSetbootPutBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 404: + result := NewPcloudPvminstancesVolumesSetbootPutNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudPvminstancesVolumesSetbootPutInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudPvminstancesVolumesSetbootPutOK creates a PcloudPvminstancesVolumesSetbootPutOK with default headers values +func NewPcloudPvminstancesVolumesSetbootPutOK() *PcloudPvminstancesVolumesSetbootPutOK { + return &PcloudPvminstancesVolumesSetbootPutOK{} +} + +/*PcloudPvminstancesVolumesSetbootPutOK handles this case with default header values. + +OK +*/ +type PcloudPvminstancesVolumesSetbootPutOK struct { + Payload models.Object +} + +func (o *PcloudPvminstancesVolumesSetbootPutOK) Error() string { + return fmt.Sprintf("[PUT /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/volumes/{volume_id}/setboot][%d] pcloudPvminstancesVolumesSetbootPutOK %+v", 200, o.Payload) +} + +func (o *PcloudPvminstancesVolumesSetbootPutOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesVolumesSetbootPutBadRequest creates a PcloudPvminstancesVolumesSetbootPutBadRequest with default headers values +func NewPcloudPvminstancesVolumesSetbootPutBadRequest() *PcloudPvminstancesVolumesSetbootPutBadRequest { + return &PcloudPvminstancesVolumesSetbootPutBadRequest{} +} + +/*PcloudPvminstancesVolumesSetbootPutBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudPvminstancesVolumesSetbootPutBadRequest struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesVolumesSetbootPutBadRequest) Error() string { + return fmt.Sprintf("[PUT /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/volumes/{volume_id}/setboot][%d] pcloudPvminstancesVolumesSetbootPutBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudPvminstancesVolumesSetbootPutBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesVolumesSetbootPutNotFound creates a PcloudPvminstancesVolumesSetbootPutNotFound with default headers values +func NewPcloudPvminstancesVolumesSetbootPutNotFound() *PcloudPvminstancesVolumesSetbootPutNotFound { + return &PcloudPvminstancesVolumesSetbootPutNotFound{} +} + +/*PcloudPvminstancesVolumesSetbootPutNotFound handles this case with default header values. + +Not Found +*/ +type PcloudPvminstancesVolumesSetbootPutNotFound struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesVolumesSetbootPutNotFound) Error() string { + return fmt.Sprintf("[PUT /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/volumes/{volume_id}/setboot][%d] pcloudPvminstancesVolumesSetbootPutNotFound %+v", 404, o.Payload) +} + +func (o *PcloudPvminstancesVolumesSetbootPutNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudPvminstancesVolumesSetbootPutInternalServerError creates a PcloudPvminstancesVolumesSetbootPutInternalServerError with default headers values +func NewPcloudPvminstancesVolumesSetbootPutInternalServerError() *PcloudPvminstancesVolumesSetbootPutInternalServerError { + return &PcloudPvminstancesVolumesSetbootPutInternalServerError{} +} + +/*PcloudPvminstancesVolumesSetbootPutInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudPvminstancesVolumesSetbootPutInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudPvminstancesVolumesSetbootPutInternalServerError) Error() string { + return fmt.Sprintf("[PUT /pcloud/v1/cloud-instances/{cloud_instance_id}/pvm-instances/{pvm_instance_id}/volumes/{volume_id}/setboot][%d] pcloudPvminstancesVolumesSetbootPutInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudPvminstancesVolumesSetbootPutInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_v2_volumes_clone_post_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_v2_volumes_clone_post_parameters.go new file mode 100644 index 00000000000..57f303cd0d9 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_v2_volumes_clone_post_parameters.go @@ -0,0 +1,160 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_volumes + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// NewPcloudV2VolumesClonePostParams creates a new PcloudV2VolumesClonePostParams object +// with the default values initialized. +func NewPcloudV2VolumesClonePostParams() *PcloudV2VolumesClonePostParams { + var () + return &PcloudV2VolumesClonePostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudV2VolumesClonePostParamsWithTimeout creates a new PcloudV2VolumesClonePostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudV2VolumesClonePostParamsWithTimeout(timeout time.Duration) *PcloudV2VolumesClonePostParams { + var () + return &PcloudV2VolumesClonePostParams{ + + timeout: timeout, + } +} + +// NewPcloudV2VolumesClonePostParamsWithContext creates a new PcloudV2VolumesClonePostParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudV2VolumesClonePostParamsWithContext(ctx context.Context) *PcloudV2VolumesClonePostParams { + var () + return &PcloudV2VolumesClonePostParams{ + + Context: ctx, + } +} + +// NewPcloudV2VolumesClonePostParamsWithHTTPClient creates a new PcloudV2VolumesClonePostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudV2VolumesClonePostParamsWithHTTPClient(client *http.Client) *PcloudV2VolumesClonePostParams { + var () + return &PcloudV2VolumesClonePostParams{ + HTTPClient: client, + } +} + +/*PcloudV2VolumesClonePostParams contains all the parameters to send to the API endpoint +for the pcloud v2 volumes clone post operation typically these are written to a http.Request +*/ +type PcloudV2VolumesClonePostParams struct { + + /*Body + Parameters for the cloning of volumes + + */ + Body *models.VolumesCloneAsyncRequest + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud v2 volumes clone post params +func (o *PcloudV2VolumesClonePostParams) WithTimeout(timeout time.Duration) *PcloudV2VolumesClonePostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud v2 volumes clone post params +func (o *PcloudV2VolumesClonePostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud v2 volumes clone post params +func (o *PcloudV2VolumesClonePostParams) WithContext(ctx context.Context) *PcloudV2VolumesClonePostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud v2 volumes clone post params +func (o *PcloudV2VolumesClonePostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud v2 volumes clone post params +func (o *PcloudV2VolumesClonePostParams) WithHTTPClient(client *http.Client) *PcloudV2VolumesClonePostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud v2 volumes clone post params +func (o *PcloudV2VolumesClonePostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the pcloud v2 volumes clone post params +func (o *PcloudV2VolumesClonePostParams) WithBody(body *models.VolumesCloneAsyncRequest) *PcloudV2VolumesClonePostParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the pcloud v2 volumes clone post params +func (o *PcloudV2VolumesClonePostParams) SetBody(body *models.VolumesCloneAsyncRequest) { + o.Body = body +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud v2 volumes clone post params +func (o *PcloudV2VolumesClonePostParams) WithCloudInstanceID(cloudInstanceID string) *PcloudV2VolumesClonePostParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud v2 volumes clone post params +func (o *PcloudV2VolumesClonePostParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudV2VolumesClonePostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_v2_volumes_clone_post_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_v2_volumes_clone_post_responses.go new file mode 100644 index 00000000000..ea8769645e4 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_v2_volumes_clone_post_responses.go @@ -0,0 +1,175 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_volumes + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudV2VolumesClonePostReader is a Reader for the PcloudV2VolumesClonePost structure. +type PcloudV2VolumesClonePostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudV2VolumesClonePostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 202: + result := NewPcloudV2VolumesClonePostAccepted() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudV2VolumesClonePostBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 404: + result := NewPcloudV2VolumesClonePostNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudV2VolumesClonePostInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudV2VolumesClonePostAccepted creates a PcloudV2VolumesClonePostAccepted with default headers values +func NewPcloudV2VolumesClonePostAccepted() *PcloudV2VolumesClonePostAccepted { + return &PcloudV2VolumesClonePostAccepted{} +} + +/*PcloudV2VolumesClonePostAccepted handles this case with default header values. + +Accepted +*/ +type PcloudV2VolumesClonePostAccepted struct { + Payload *models.CloneTaskReference +} + +func (o *PcloudV2VolumesClonePostAccepted) Error() string { + return fmt.Sprintf("[POST /pcloud/v2/cloud-instances/{cloud_instance_id}/volumes/clone][%d] pcloudV2VolumesClonePostAccepted %+v", 202, o.Payload) +} + +func (o *PcloudV2VolumesClonePostAccepted) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.CloneTaskReference) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudV2VolumesClonePostBadRequest creates a PcloudV2VolumesClonePostBadRequest with default headers values +func NewPcloudV2VolumesClonePostBadRequest() *PcloudV2VolumesClonePostBadRequest { + return &PcloudV2VolumesClonePostBadRequest{} +} + +/*PcloudV2VolumesClonePostBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudV2VolumesClonePostBadRequest struct { + Payload *models.Error +} + +func (o *PcloudV2VolumesClonePostBadRequest) Error() string { + return fmt.Sprintf("[POST /pcloud/v2/cloud-instances/{cloud_instance_id}/volumes/clone][%d] pcloudV2VolumesClonePostBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudV2VolumesClonePostBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudV2VolumesClonePostNotFound creates a PcloudV2VolumesClonePostNotFound with default headers values +func NewPcloudV2VolumesClonePostNotFound() *PcloudV2VolumesClonePostNotFound { + return &PcloudV2VolumesClonePostNotFound{} +} + +/*PcloudV2VolumesClonePostNotFound handles this case with default header values. + +Not Found +*/ +type PcloudV2VolumesClonePostNotFound struct { + Payload *models.Error +} + +func (o *PcloudV2VolumesClonePostNotFound) Error() string { + return fmt.Sprintf("[POST /pcloud/v2/cloud-instances/{cloud_instance_id}/volumes/clone][%d] pcloudV2VolumesClonePostNotFound %+v", 404, o.Payload) +} + +func (o *PcloudV2VolumesClonePostNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudV2VolumesClonePostInternalServerError creates a PcloudV2VolumesClonePostInternalServerError with default headers values +func NewPcloudV2VolumesClonePostInternalServerError() *PcloudV2VolumesClonePostInternalServerError { + return &PcloudV2VolumesClonePostInternalServerError{} +} + +/*PcloudV2VolumesClonePostInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudV2VolumesClonePostInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudV2VolumesClonePostInternalServerError) Error() string { + return fmt.Sprintf("[POST /pcloud/v2/cloud-instances/{cloud_instance_id}/volumes/clone][%d] pcloudV2VolumesClonePostInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudV2VolumesClonePostInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_v2_volumes_clonetasks_get_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_v2_volumes_clonetasks_get_parameters.go new file mode 100644 index 00000000000..93b3aa56f83 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_v2_volumes_clonetasks_get_parameters.go @@ -0,0 +1,157 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_volumes + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewPcloudV2VolumesClonetasksGetParams creates a new PcloudV2VolumesClonetasksGetParams object +// with the default values initialized. +func NewPcloudV2VolumesClonetasksGetParams() *PcloudV2VolumesClonetasksGetParams { + var () + return &PcloudV2VolumesClonetasksGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudV2VolumesClonetasksGetParamsWithTimeout creates a new PcloudV2VolumesClonetasksGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudV2VolumesClonetasksGetParamsWithTimeout(timeout time.Duration) *PcloudV2VolumesClonetasksGetParams { + var () + return &PcloudV2VolumesClonetasksGetParams{ + + timeout: timeout, + } +} + +// NewPcloudV2VolumesClonetasksGetParamsWithContext creates a new PcloudV2VolumesClonetasksGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudV2VolumesClonetasksGetParamsWithContext(ctx context.Context) *PcloudV2VolumesClonetasksGetParams { + var () + return &PcloudV2VolumesClonetasksGetParams{ + + Context: ctx, + } +} + +// NewPcloudV2VolumesClonetasksGetParamsWithHTTPClient creates a new PcloudV2VolumesClonetasksGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudV2VolumesClonetasksGetParamsWithHTTPClient(client *http.Client) *PcloudV2VolumesClonetasksGetParams { + var () + return &PcloudV2VolumesClonetasksGetParams{ + HTTPClient: client, + } +} + +/*PcloudV2VolumesClonetasksGetParams contains all the parameters to send to the API endpoint +for the pcloud v2 volumes clonetasks get operation typically these are written to a http.Request +*/ +type PcloudV2VolumesClonetasksGetParams struct { + + /*CloneTaskID + Volumes Clone Task ID + + */ + CloneTaskID string + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud v2 volumes clonetasks get params +func (o *PcloudV2VolumesClonetasksGetParams) WithTimeout(timeout time.Duration) *PcloudV2VolumesClonetasksGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud v2 volumes clonetasks get params +func (o *PcloudV2VolumesClonetasksGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud v2 volumes clonetasks get params +func (o *PcloudV2VolumesClonetasksGetParams) WithContext(ctx context.Context) *PcloudV2VolumesClonetasksGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud v2 volumes clonetasks get params +func (o *PcloudV2VolumesClonetasksGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud v2 volumes clonetasks get params +func (o *PcloudV2VolumesClonetasksGetParams) WithHTTPClient(client *http.Client) *PcloudV2VolumesClonetasksGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud v2 volumes clonetasks get params +func (o *PcloudV2VolumesClonetasksGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCloneTaskID adds the cloneTaskID to the pcloud v2 volumes clonetasks get params +func (o *PcloudV2VolumesClonetasksGetParams) WithCloneTaskID(cloneTaskID string) *PcloudV2VolumesClonetasksGetParams { + o.SetCloneTaskID(cloneTaskID) + return o +} + +// SetCloneTaskID adds the cloneTaskId to the pcloud v2 volumes clonetasks get params +func (o *PcloudV2VolumesClonetasksGetParams) SetCloneTaskID(cloneTaskID string) { + o.CloneTaskID = cloneTaskID +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud v2 volumes clonetasks get params +func (o *PcloudV2VolumesClonetasksGetParams) WithCloudInstanceID(cloudInstanceID string) *PcloudV2VolumesClonetasksGetParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud v2 volumes clonetasks get params +func (o *PcloudV2VolumesClonetasksGetParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudV2VolumesClonetasksGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param clone_task_id + if err := r.SetPathParam("clone_task_id", o.CloneTaskID); err != nil { + return err + } + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_v2_volumes_clonetasks_get_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_v2_volumes_clonetasks_get_responses.go new file mode 100644 index 00000000000..ecb1be0fccd --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_v2_volumes_clonetasks_get_responses.go @@ -0,0 +1,211 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_volumes + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudV2VolumesClonetasksGetReader is a Reader for the PcloudV2VolumesClonetasksGet structure. +type PcloudV2VolumesClonetasksGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudV2VolumesClonetasksGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudV2VolumesClonetasksGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudV2VolumesClonetasksGetBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 404: + result := NewPcloudV2VolumesClonetasksGetNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 409: + result := NewPcloudV2VolumesClonetasksGetConflict() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudV2VolumesClonetasksGetInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudV2VolumesClonetasksGetOK creates a PcloudV2VolumesClonetasksGetOK with default headers values +func NewPcloudV2VolumesClonetasksGetOK() *PcloudV2VolumesClonetasksGetOK { + return &PcloudV2VolumesClonetasksGetOK{} +} + +/*PcloudV2VolumesClonetasksGetOK handles this case with default header values. + +OK +*/ +type PcloudV2VolumesClonetasksGetOK struct { + Payload *models.CloneTaskStatus +} + +func (o *PcloudV2VolumesClonetasksGetOK) Error() string { + return fmt.Sprintf("[GET /pcloud/v2/cloud-instances/{cloud_instance_id}/volumes/clone-tasks/{clone_task_id}][%d] pcloudV2VolumesClonetasksGetOK %+v", 200, o.Payload) +} + +func (o *PcloudV2VolumesClonetasksGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.CloneTaskStatus) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudV2VolumesClonetasksGetBadRequest creates a PcloudV2VolumesClonetasksGetBadRequest with default headers values +func NewPcloudV2VolumesClonetasksGetBadRequest() *PcloudV2VolumesClonetasksGetBadRequest { + return &PcloudV2VolumesClonetasksGetBadRequest{} +} + +/*PcloudV2VolumesClonetasksGetBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudV2VolumesClonetasksGetBadRequest struct { + Payload *models.Error +} + +func (o *PcloudV2VolumesClonetasksGetBadRequest) Error() string { + return fmt.Sprintf("[GET /pcloud/v2/cloud-instances/{cloud_instance_id}/volumes/clone-tasks/{clone_task_id}][%d] pcloudV2VolumesClonetasksGetBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudV2VolumesClonetasksGetBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudV2VolumesClonetasksGetNotFound creates a PcloudV2VolumesClonetasksGetNotFound with default headers values +func NewPcloudV2VolumesClonetasksGetNotFound() *PcloudV2VolumesClonetasksGetNotFound { + return &PcloudV2VolumesClonetasksGetNotFound{} +} + +/*PcloudV2VolumesClonetasksGetNotFound handles this case with default header values. + +Not Found +*/ +type PcloudV2VolumesClonetasksGetNotFound struct { + Payload *models.Error +} + +func (o *PcloudV2VolumesClonetasksGetNotFound) Error() string { + return fmt.Sprintf("[GET /pcloud/v2/cloud-instances/{cloud_instance_id}/volumes/clone-tasks/{clone_task_id}][%d] pcloudV2VolumesClonetasksGetNotFound %+v", 404, o.Payload) +} + +func (o *PcloudV2VolumesClonetasksGetNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudV2VolumesClonetasksGetConflict creates a PcloudV2VolumesClonetasksGetConflict with default headers values +func NewPcloudV2VolumesClonetasksGetConflict() *PcloudV2VolumesClonetasksGetConflict { + return &PcloudV2VolumesClonetasksGetConflict{} +} + +/*PcloudV2VolumesClonetasksGetConflict handles this case with default header values. + +Conflict +*/ +type PcloudV2VolumesClonetasksGetConflict struct { + Payload *models.Error +} + +func (o *PcloudV2VolumesClonetasksGetConflict) Error() string { + return fmt.Sprintf("[GET /pcloud/v2/cloud-instances/{cloud_instance_id}/volumes/clone-tasks/{clone_task_id}][%d] pcloudV2VolumesClonetasksGetConflict %+v", 409, o.Payload) +} + +func (o *PcloudV2VolumesClonetasksGetConflict) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudV2VolumesClonetasksGetInternalServerError creates a PcloudV2VolumesClonetasksGetInternalServerError with default headers values +func NewPcloudV2VolumesClonetasksGetInternalServerError() *PcloudV2VolumesClonetasksGetInternalServerError { + return &PcloudV2VolumesClonetasksGetInternalServerError{} +} + +/*PcloudV2VolumesClonetasksGetInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudV2VolumesClonetasksGetInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudV2VolumesClonetasksGetInternalServerError) Error() string { + return fmt.Sprintf("[GET /pcloud/v2/cloud-instances/{cloud_instance_id}/volumes/clone-tasks/{clone_task_id}][%d] pcloudV2VolumesClonetasksGetInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudV2VolumesClonetasksGetInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_v2_volumes_post_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_v2_volumes_post_parameters.go new file mode 100644 index 00000000000..8b845c01d70 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_v2_volumes_post_parameters.go @@ -0,0 +1,160 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_volumes + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// NewPcloudV2VolumesPostParams creates a new PcloudV2VolumesPostParams object +// with the default values initialized. +func NewPcloudV2VolumesPostParams() *PcloudV2VolumesPostParams { + var () + return &PcloudV2VolumesPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudV2VolumesPostParamsWithTimeout creates a new PcloudV2VolumesPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudV2VolumesPostParamsWithTimeout(timeout time.Duration) *PcloudV2VolumesPostParams { + var () + return &PcloudV2VolumesPostParams{ + + timeout: timeout, + } +} + +// NewPcloudV2VolumesPostParamsWithContext creates a new PcloudV2VolumesPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudV2VolumesPostParamsWithContext(ctx context.Context) *PcloudV2VolumesPostParams { + var () + return &PcloudV2VolumesPostParams{ + + Context: ctx, + } +} + +// NewPcloudV2VolumesPostParamsWithHTTPClient creates a new PcloudV2VolumesPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudV2VolumesPostParamsWithHTTPClient(client *http.Client) *PcloudV2VolumesPostParams { + var () + return &PcloudV2VolumesPostParams{ + HTTPClient: client, + } +} + +/*PcloudV2VolumesPostParams contains all the parameters to send to the API endpoint +for the pcloud v2 volumes post operation typically these are written to a http.Request +*/ +type PcloudV2VolumesPostParams struct { + + /*Body + Parameters for creating multiple volumes + + */ + Body *models.MultiVolumesCreate + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud v2 volumes post params +func (o *PcloudV2VolumesPostParams) WithTimeout(timeout time.Duration) *PcloudV2VolumesPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud v2 volumes post params +func (o *PcloudV2VolumesPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud v2 volumes post params +func (o *PcloudV2VolumesPostParams) WithContext(ctx context.Context) *PcloudV2VolumesPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud v2 volumes post params +func (o *PcloudV2VolumesPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud v2 volumes post params +func (o *PcloudV2VolumesPostParams) WithHTTPClient(client *http.Client) *PcloudV2VolumesPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud v2 volumes post params +func (o *PcloudV2VolumesPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the pcloud v2 volumes post params +func (o *PcloudV2VolumesPostParams) WithBody(body *models.MultiVolumesCreate) *PcloudV2VolumesPostParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the pcloud v2 volumes post params +func (o *PcloudV2VolumesPostParams) SetBody(body *models.MultiVolumesCreate) { + o.Body = body +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud v2 volumes post params +func (o *PcloudV2VolumesPostParams) WithCloudInstanceID(cloudInstanceID string) *PcloudV2VolumesPostParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud v2 volumes post params +func (o *PcloudV2VolumesPostParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudV2VolumesPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_v2_volumes_post_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_v2_volumes_post_responses.go new file mode 100644 index 00000000000..a63631c660d --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_v2_volumes_post_responses.go @@ -0,0 +1,175 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_volumes + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudV2VolumesPostReader is a Reader for the PcloudV2VolumesPost structure. +type PcloudV2VolumesPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudV2VolumesPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 201: + result := NewPcloudV2VolumesPostCreated() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudV2VolumesPostBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 409: + result := NewPcloudV2VolumesPostConflict() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudV2VolumesPostInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudV2VolumesPostCreated creates a PcloudV2VolumesPostCreated with default headers values +func NewPcloudV2VolumesPostCreated() *PcloudV2VolumesPostCreated { + return &PcloudV2VolumesPostCreated{} +} + +/*PcloudV2VolumesPostCreated handles this case with default header values. + +Created +*/ +type PcloudV2VolumesPostCreated struct { + Payload *models.Volumes +} + +func (o *PcloudV2VolumesPostCreated) Error() string { + return fmt.Sprintf("[POST /pcloud/v2/cloud-instances/{cloud_instance_id}/volumes][%d] pcloudV2VolumesPostCreated %+v", 201, o.Payload) +} + +func (o *PcloudV2VolumesPostCreated) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Volumes) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudV2VolumesPostBadRequest creates a PcloudV2VolumesPostBadRequest with default headers values +func NewPcloudV2VolumesPostBadRequest() *PcloudV2VolumesPostBadRequest { + return &PcloudV2VolumesPostBadRequest{} +} + +/*PcloudV2VolumesPostBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudV2VolumesPostBadRequest struct { + Payload *models.Error +} + +func (o *PcloudV2VolumesPostBadRequest) Error() string { + return fmt.Sprintf("[POST /pcloud/v2/cloud-instances/{cloud_instance_id}/volumes][%d] pcloudV2VolumesPostBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudV2VolumesPostBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudV2VolumesPostConflict creates a PcloudV2VolumesPostConflict with default headers values +func NewPcloudV2VolumesPostConflict() *PcloudV2VolumesPostConflict { + return &PcloudV2VolumesPostConflict{} +} + +/*PcloudV2VolumesPostConflict handles this case with default header values. + +Conflict +*/ +type PcloudV2VolumesPostConflict struct { + Payload *models.Error +} + +func (o *PcloudV2VolumesPostConflict) Error() string { + return fmt.Sprintf("[POST /pcloud/v2/cloud-instances/{cloud_instance_id}/volumes][%d] pcloudV2VolumesPostConflict %+v", 409, o.Payload) +} + +func (o *PcloudV2VolumesPostConflict) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudV2VolumesPostInternalServerError creates a PcloudV2VolumesPostInternalServerError with default headers values +func NewPcloudV2VolumesPostInternalServerError() *PcloudV2VolumesPostInternalServerError { + return &PcloudV2VolumesPostInternalServerError{} +} + +/*PcloudV2VolumesPostInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudV2VolumesPostInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudV2VolumesPostInternalServerError) Error() string { + return fmt.Sprintf("[POST /pcloud/v2/cloud-instances/{cloud_instance_id}/volumes][%d] pcloudV2VolumesPostInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudV2VolumesPostInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_v2_volumesclone_cancel_post_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_v2_volumesclone_cancel_post_parameters.go new file mode 100644 index 00000000000..c9c6ef715dc --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_v2_volumesclone_cancel_post_parameters.go @@ -0,0 +1,181 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_volumes + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// NewPcloudV2VolumescloneCancelPostParams creates a new PcloudV2VolumescloneCancelPostParams object +// with the default values initialized. +func NewPcloudV2VolumescloneCancelPostParams() *PcloudV2VolumescloneCancelPostParams { + var () + return &PcloudV2VolumescloneCancelPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudV2VolumescloneCancelPostParamsWithTimeout creates a new PcloudV2VolumescloneCancelPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudV2VolumescloneCancelPostParamsWithTimeout(timeout time.Duration) *PcloudV2VolumescloneCancelPostParams { + var () + return &PcloudV2VolumescloneCancelPostParams{ + + timeout: timeout, + } +} + +// NewPcloudV2VolumescloneCancelPostParamsWithContext creates a new PcloudV2VolumescloneCancelPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudV2VolumescloneCancelPostParamsWithContext(ctx context.Context) *PcloudV2VolumescloneCancelPostParams { + var () + return &PcloudV2VolumescloneCancelPostParams{ + + Context: ctx, + } +} + +// NewPcloudV2VolumescloneCancelPostParamsWithHTTPClient creates a new PcloudV2VolumescloneCancelPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudV2VolumescloneCancelPostParamsWithHTTPClient(client *http.Client) *PcloudV2VolumescloneCancelPostParams { + var () + return &PcloudV2VolumescloneCancelPostParams{ + HTTPClient: client, + } +} + +/*PcloudV2VolumescloneCancelPostParams contains all the parameters to send to the API endpoint +for the pcloud v2 volumesclone cancel post operation typically these are written to a http.Request +*/ +type PcloudV2VolumescloneCancelPostParams struct { + + /*Body + Parameters for cancelling a volumes-clone request + + */ + Body *models.VolumesCloneCancel + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + /*VolumesCloneID + Volumes Clone ID + + */ + VolumesCloneID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud v2 volumesclone cancel post params +func (o *PcloudV2VolumescloneCancelPostParams) WithTimeout(timeout time.Duration) *PcloudV2VolumescloneCancelPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud v2 volumesclone cancel post params +func (o *PcloudV2VolumescloneCancelPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud v2 volumesclone cancel post params +func (o *PcloudV2VolumescloneCancelPostParams) WithContext(ctx context.Context) *PcloudV2VolumescloneCancelPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud v2 volumesclone cancel post params +func (o *PcloudV2VolumescloneCancelPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud v2 volumesclone cancel post params +func (o *PcloudV2VolumescloneCancelPostParams) WithHTTPClient(client *http.Client) *PcloudV2VolumescloneCancelPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud v2 volumesclone cancel post params +func (o *PcloudV2VolumescloneCancelPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the pcloud v2 volumesclone cancel post params +func (o *PcloudV2VolumescloneCancelPostParams) WithBody(body *models.VolumesCloneCancel) *PcloudV2VolumescloneCancelPostParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the pcloud v2 volumesclone cancel post params +func (o *PcloudV2VolumescloneCancelPostParams) SetBody(body *models.VolumesCloneCancel) { + o.Body = body +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud v2 volumesclone cancel post params +func (o *PcloudV2VolumescloneCancelPostParams) WithCloudInstanceID(cloudInstanceID string) *PcloudV2VolumescloneCancelPostParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud v2 volumesclone cancel post params +func (o *PcloudV2VolumescloneCancelPostParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WithVolumesCloneID adds the volumesCloneID to the pcloud v2 volumesclone cancel post params +func (o *PcloudV2VolumescloneCancelPostParams) WithVolumesCloneID(volumesCloneID string) *PcloudV2VolumescloneCancelPostParams { + o.SetVolumesCloneID(volumesCloneID) + return o +} + +// SetVolumesCloneID adds the volumesCloneId to the pcloud v2 volumesclone cancel post params +func (o *PcloudV2VolumescloneCancelPostParams) SetVolumesCloneID(volumesCloneID string) { + o.VolumesCloneID = volumesCloneID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudV2VolumescloneCancelPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + // path param volumes_clone_id + if err := r.SetPathParam("volumes_clone_id", o.VolumesCloneID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_v2_volumesclone_cancel_post_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_v2_volumesclone_cancel_post_responses.go new file mode 100644 index 00000000000..258338e7507 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_v2_volumesclone_cancel_post_responses.go @@ -0,0 +1,139 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_volumes + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudV2VolumescloneCancelPostReader is a Reader for the PcloudV2VolumescloneCancelPost structure. +type PcloudV2VolumescloneCancelPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudV2VolumescloneCancelPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 202: + result := NewPcloudV2VolumescloneCancelPostAccepted() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 404: + result := NewPcloudV2VolumescloneCancelPostNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudV2VolumescloneCancelPostInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudV2VolumescloneCancelPostAccepted creates a PcloudV2VolumescloneCancelPostAccepted with default headers values +func NewPcloudV2VolumescloneCancelPostAccepted() *PcloudV2VolumescloneCancelPostAccepted { + return &PcloudV2VolumescloneCancelPostAccepted{} +} + +/*PcloudV2VolumescloneCancelPostAccepted handles this case with default header values. + +Accepted +*/ +type PcloudV2VolumescloneCancelPostAccepted struct { + Payload *models.VolumesClone +} + +func (o *PcloudV2VolumescloneCancelPostAccepted) Error() string { + return fmt.Sprintf("[POST /pcloud/v2/cloud-instances/{cloud_instance_id}/volumes-clone/{volumes_clone_id}/cancel][%d] pcloudV2VolumescloneCancelPostAccepted %+v", 202, o.Payload) +} + +func (o *PcloudV2VolumescloneCancelPostAccepted) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.VolumesClone) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudV2VolumescloneCancelPostNotFound creates a PcloudV2VolumescloneCancelPostNotFound with default headers values +func NewPcloudV2VolumescloneCancelPostNotFound() *PcloudV2VolumescloneCancelPostNotFound { + return &PcloudV2VolumescloneCancelPostNotFound{} +} + +/*PcloudV2VolumescloneCancelPostNotFound handles this case with default header values. + +Not Found +*/ +type PcloudV2VolumescloneCancelPostNotFound struct { + Payload *models.Error +} + +func (o *PcloudV2VolumescloneCancelPostNotFound) Error() string { + return fmt.Sprintf("[POST /pcloud/v2/cloud-instances/{cloud_instance_id}/volumes-clone/{volumes_clone_id}/cancel][%d] pcloudV2VolumescloneCancelPostNotFound %+v", 404, o.Payload) +} + +func (o *PcloudV2VolumescloneCancelPostNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudV2VolumescloneCancelPostInternalServerError creates a PcloudV2VolumescloneCancelPostInternalServerError with default headers values +func NewPcloudV2VolumescloneCancelPostInternalServerError() *PcloudV2VolumescloneCancelPostInternalServerError { + return &PcloudV2VolumescloneCancelPostInternalServerError{} +} + +/*PcloudV2VolumescloneCancelPostInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudV2VolumescloneCancelPostInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudV2VolumescloneCancelPostInternalServerError) Error() string { + return fmt.Sprintf("[POST /pcloud/v2/cloud-instances/{cloud_instance_id}/volumes-clone/{volumes_clone_id}/cancel][%d] pcloudV2VolumescloneCancelPostInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudV2VolumescloneCancelPostInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_v2_volumesclone_delete_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_v2_volumesclone_delete_parameters.go new file mode 100644 index 00000000000..49a56a1fa1b --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_v2_volumesclone_delete_parameters.go @@ -0,0 +1,157 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_volumes + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewPcloudV2VolumescloneDeleteParams creates a new PcloudV2VolumescloneDeleteParams object +// with the default values initialized. +func NewPcloudV2VolumescloneDeleteParams() *PcloudV2VolumescloneDeleteParams { + var () + return &PcloudV2VolumescloneDeleteParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudV2VolumescloneDeleteParamsWithTimeout creates a new PcloudV2VolumescloneDeleteParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudV2VolumescloneDeleteParamsWithTimeout(timeout time.Duration) *PcloudV2VolumescloneDeleteParams { + var () + return &PcloudV2VolumescloneDeleteParams{ + + timeout: timeout, + } +} + +// NewPcloudV2VolumescloneDeleteParamsWithContext creates a new PcloudV2VolumescloneDeleteParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudV2VolumescloneDeleteParamsWithContext(ctx context.Context) *PcloudV2VolumescloneDeleteParams { + var () + return &PcloudV2VolumescloneDeleteParams{ + + Context: ctx, + } +} + +// NewPcloudV2VolumescloneDeleteParamsWithHTTPClient creates a new PcloudV2VolumescloneDeleteParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudV2VolumescloneDeleteParamsWithHTTPClient(client *http.Client) *PcloudV2VolumescloneDeleteParams { + var () + return &PcloudV2VolumescloneDeleteParams{ + HTTPClient: client, + } +} + +/*PcloudV2VolumescloneDeleteParams contains all the parameters to send to the API endpoint +for the pcloud v2 volumesclone delete operation typically these are written to a http.Request +*/ +type PcloudV2VolumescloneDeleteParams struct { + + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + /*VolumesCloneID + Volumes Clone ID + + */ + VolumesCloneID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud v2 volumesclone delete params +func (o *PcloudV2VolumescloneDeleteParams) WithTimeout(timeout time.Duration) *PcloudV2VolumescloneDeleteParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud v2 volumesclone delete params +func (o *PcloudV2VolumescloneDeleteParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud v2 volumesclone delete params +func (o *PcloudV2VolumescloneDeleteParams) WithContext(ctx context.Context) *PcloudV2VolumescloneDeleteParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud v2 volumesclone delete params +func (o *PcloudV2VolumescloneDeleteParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud v2 volumesclone delete params +func (o *PcloudV2VolumescloneDeleteParams) WithHTTPClient(client *http.Client) *PcloudV2VolumescloneDeleteParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud v2 volumesclone delete params +func (o *PcloudV2VolumescloneDeleteParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud v2 volumesclone delete params +func (o *PcloudV2VolumescloneDeleteParams) WithCloudInstanceID(cloudInstanceID string) *PcloudV2VolumescloneDeleteParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud v2 volumesclone delete params +func (o *PcloudV2VolumescloneDeleteParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WithVolumesCloneID adds the volumesCloneID to the pcloud v2 volumesclone delete params +func (o *PcloudV2VolumescloneDeleteParams) WithVolumesCloneID(volumesCloneID string) *PcloudV2VolumescloneDeleteParams { + o.SetVolumesCloneID(volumesCloneID) + return o +} + +// SetVolumesCloneID adds the volumesCloneId to the pcloud v2 volumesclone delete params +func (o *PcloudV2VolumescloneDeleteParams) SetVolumesCloneID(volumesCloneID string) { + o.VolumesCloneID = volumesCloneID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudV2VolumescloneDeleteParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + // path param volumes_clone_id + if err := r.SetPathParam("volumes_clone_id", o.VolumesCloneID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_v2_volumesclone_delete_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_v2_volumesclone_delete_responses.go new file mode 100644 index 00000000000..f16230f29a7 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_v2_volumesclone_delete_responses.go @@ -0,0 +1,173 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_volumes + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudV2VolumescloneDeleteReader is a Reader for the PcloudV2VolumescloneDelete structure. +type PcloudV2VolumescloneDeleteReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudV2VolumescloneDeleteReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudV2VolumescloneDeleteOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudV2VolumescloneDeleteBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 404: + result := NewPcloudV2VolumescloneDeleteNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudV2VolumescloneDeleteInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudV2VolumescloneDeleteOK creates a PcloudV2VolumescloneDeleteOK with default headers values +func NewPcloudV2VolumescloneDeleteOK() *PcloudV2VolumescloneDeleteOK { + return &PcloudV2VolumescloneDeleteOK{} +} + +/*PcloudV2VolumescloneDeleteOK handles this case with default header values. + +OK +*/ +type PcloudV2VolumescloneDeleteOK struct { + Payload models.Object +} + +func (o *PcloudV2VolumescloneDeleteOK) Error() string { + return fmt.Sprintf("[DELETE /pcloud/v2/cloud-instances/{cloud_instance_id}/volumes-clone/{volumes_clone_id}][%d] pcloudV2VolumescloneDeleteOK %+v", 200, o.Payload) +} + +func (o *PcloudV2VolumescloneDeleteOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudV2VolumescloneDeleteBadRequest creates a PcloudV2VolumescloneDeleteBadRequest with default headers values +func NewPcloudV2VolumescloneDeleteBadRequest() *PcloudV2VolumescloneDeleteBadRequest { + return &PcloudV2VolumescloneDeleteBadRequest{} +} + +/*PcloudV2VolumescloneDeleteBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudV2VolumescloneDeleteBadRequest struct { + Payload *models.Error +} + +func (o *PcloudV2VolumescloneDeleteBadRequest) Error() string { + return fmt.Sprintf("[DELETE /pcloud/v2/cloud-instances/{cloud_instance_id}/volumes-clone/{volumes_clone_id}][%d] pcloudV2VolumescloneDeleteBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudV2VolumescloneDeleteBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudV2VolumescloneDeleteNotFound creates a PcloudV2VolumescloneDeleteNotFound with default headers values +func NewPcloudV2VolumescloneDeleteNotFound() *PcloudV2VolumescloneDeleteNotFound { + return &PcloudV2VolumescloneDeleteNotFound{} +} + +/*PcloudV2VolumescloneDeleteNotFound handles this case with default header values. + +Not Found +*/ +type PcloudV2VolumescloneDeleteNotFound struct { + Payload *models.Error +} + +func (o *PcloudV2VolumescloneDeleteNotFound) Error() string { + return fmt.Sprintf("[DELETE /pcloud/v2/cloud-instances/{cloud_instance_id}/volumes-clone/{volumes_clone_id}][%d] pcloudV2VolumescloneDeleteNotFound %+v", 404, o.Payload) +} + +func (o *PcloudV2VolumescloneDeleteNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudV2VolumescloneDeleteInternalServerError creates a PcloudV2VolumescloneDeleteInternalServerError with default headers values +func NewPcloudV2VolumescloneDeleteInternalServerError() *PcloudV2VolumescloneDeleteInternalServerError { + return &PcloudV2VolumescloneDeleteInternalServerError{} +} + +/*PcloudV2VolumescloneDeleteInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudV2VolumescloneDeleteInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudV2VolumescloneDeleteInternalServerError) Error() string { + return fmt.Sprintf("[DELETE /pcloud/v2/cloud-instances/{cloud_instance_id}/volumes-clone/{volumes_clone_id}][%d] pcloudV2VolumescloneDeleteInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudV2VolumescloneDeleteInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_v2_volumesclone_execute_post_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_v2_volumesclone_execute_post_parameters.go new file mode 100644 index 00000000000..222da1c3d72 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_v2_volumesclone_execute_post_parameters.go @@ -0,0 +1,181 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_volumes + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// NewPcloudV2VolumescloneExecutePostParams creates a new PcloudV2VolumescloneExecutePostParams object +// with the default values initialized. +func NewPcloudV2VolumescloneExecutePostParams() *PcloudV2VolumescloneExecutePostParams { + var () + return &PcloudV2VolumescloneExecutePostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudV2VolumescloneExecutePostParamsWithTimeout creates a new PcloudV2VolumescloneExecutePostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudV2VolumescloneExecutePostParamsWithTimeout(timeout time.Duration) *PcloudV2VolumescloneExecutePostParams { + var () + return &PcloudV2VolumescloneExecutePostParams{ + + timeout: timeout, + } +} + +// NewPcloudV2VolumescloneExecutePostParamsWithContext creates a new PcloudV2VolumescloneExecutePostParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudV2VolumescloneExecutePostParamsWithContext(ctx context.Context) *PcloudV2VolumescloneExecutePostParams { + var () + return &PcloudV2VolumescloneExecutePostParams{ + + Context: ctx, + } +} + +// NewPcloudV2VolumescloneExecutePostParamsWithHTTPClient creates a new PcloudV2VolumescloneExecutePostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudV2VolumescloneExecutePostParamsWithHTTPClient(client *http.Client) *PcloudV2VolumescloneExecutePostParams { + var () + return &PcloudV2VolumescloneExecutePostParams{ + HTTPClient: client, + } +} + +/*PcloudV2VolumescloneExecutePostParams contains all the parameters to send to the API endpoint +for the pcloud v2 volumesclone execute post operation typically these are written to a http.Request +*/ +type PcloudV2VolumescloneExecutePostParams struct { + + /*Body + Parameters for the cloning of volumes + + */ + Body *models.VolumesCloneExecute + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + /*VolumesCloneID + Volumes Clone ID + + */ + VolumesCloneID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud v2 volumesclone execute post params +func (o *PcloudV2VolumescloneExecutePostParams) WithTimeout(timeout time.Duration) *PcloudV2VolumescloneExecutePostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud v2 volumesclone execute post params +func (o *PcloudV2VolumescloneExecutePostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud v2 volumesclone execute post params +func (o *PcloudV2VolumescloneExecutePostParams) WithContext(ctx context.Context) *PcloudV2VolumescloneExecutePostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud v2 volumesclone execute post params +func (o *PcloudV2VolumescloneExecutePostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud v2 volumesclone execute post params +func (o *PcloudV2VolumescloneExecutePostParams) WithHTTPClient(client *http.Client) *PcloudV2VolumescloneExecutePostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud v2 volumesclone execute post params +func (o *PcloudV2VolumescloneExecutePostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the pcloud v2 volumesclone execute post params +func (o *PcloudV2VolumescloneExecutePostParams) WithBody(body *models.VolumesCloneExecute) *PcloudV2VolumescloneExecutePostParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the pcloud v2 volumesclone execute post params +func (o *PcloudV2VolumescloneExecutePostParams) SetBody(body *models.VolumesCloneExecute) { + o.Body = body +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud v2 volumesclone execute post params +func (o *PcloudV2VolumescloneExecutePostParams) WithCloudInstanceID(cloudInstanceID string) *PcloudV2VolumescloneExecutePostParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud v2 volumesclone execute post params +func (o *PcloudV2VolumescloneExecutePostParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WithVolumesCloneID adds the volumesCloneID to the pcloud v2 volumesclone execute post params +func (o *PcloudV2VolumescloneExecutePostParams) WithVolumesCloneID(volumesCloneID string) *PcloudV2VolumescloneExecutePostParams { + o.SetVolumesCloneID(volumesCloneID) + return o +} + +// SetVolumesCloneID adds the volumesCloneId to the pcloud v2 volumesclone execute post params +func (o *PcloudV2VolumescloneExecutePostParams) SetVolumesCloneID(volumesCloneID string) { + o.VolumesCloneID = volumesCloneID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudV2VolumescloneExecutePostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + // path param volumes_clone_id + if err := r.SetPathParam("volumes_clone_id", o.VolumesCloneID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_v2_volumesclone_execute_post_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_v2_volumesclone_execute_post_responses.go new file mode 100644 index 00000000000..54efc768ad3 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_v2_volumesclone_execute_post_responses.go @@ -0,0 +1,175 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_volumes + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudV2VolumescloneExecutePostReader is a Reader for the PcloudV2VolumescloneExecutePost structure. +type PcloudV2VolumescloneExecutePostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudV2VolumescloneExecutePostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 202: + result := NewPcloudV2VolumescloneExecutePostAccepted() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudV2VolumescloneExecutePostBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 404: + result := NewPcloudV2VolumescloneExecutePostNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudV2VolumescloneExecutePostInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudV2VolumescloneExecutePostAccepted creates a PcloudV2VolumescloneExecutePostAccepted with default headers values +func NewPcloudV2VolumescloneExecutePostAccepted() *PcloudV2VolumescloneExecutePostAccepted { + return &PcloudV2VolumescloneExecutePostAccepted{} +} + +/*PcloudV2VolumescloneExecutePostAccepted handles this case with default header values. + +Accepted +*/ +type PcloudV2VolumescloneExecutePostAccepted struct { + Payload *models.VolumesClone +} + +func (o *PcloudV2VolumescloneExecutePostAccepted) Error() string { + return fmt.Sprintf("[POST /pcloud/v2/cloud-instances/{cloud_instance_id}/volumes-clone/{volumes_clone_id}/execute][%d] pcloudV2VolumescloneExecutePostAccepted %+v", 202, o.Payload) +} + +func (o *PcloudV2VolumescloneExecutePostAccepted) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.VolumesClone) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudV2VolumescloneExecutePostBadRequest creates a PcloudV2VolumescloneExecutePostBadRequest with default headers values +func NewPcloudV2VolumescloneExecutePostBadRequest() *PcloudV2VolumescloneExecutePostBadRequest { + return &PcloudV2VolumescloneExecutePostBadRequest{} +} + +/*PcloudV2VolumescloneExecutePostBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudV2VolumescloneExecutePostBadRequest struct { + Payload *models.Error +} + +func (o *PcloudV2VolumescloneExecutePostBadRequest) Error() string { + return fmt.Sprintf("[POST /pcloud/v2/cloud-instances/{cloud_instance_id}/volumes-clone/{volumes_clone_id}/execute][%d] pcloudV2VolumescloneExecutePostBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudV2VolumescloneExecutePostBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudV2VolumescloneExecutePostNotFound creates a PcloudV2VolumescloneExecutePostNotFound with default headers values +func NewPcloudV2VolumescloneExecutePostNotFound() *PcloudV2VolumescloneExecutePostNotFound { + return &PcloudV2VolumescloneExecutePostNotFound{} +} + +/*PcloudV2VolumescloneExecutePostNotFound handles this case with default header values. + +Not Found +*/ +type PcloudV2VolumescloneExecutePostNotFound struct { + Payload *models.Error +} + +func (o *PcloudV2VolumescloneExecutePostNotFound) Error() string { + return fmt.Sprintf("[POST /pcloud/v2/cloud-instances/{cloud_instance_id}/volumes-clone/{volumes_clone_id}/execute][%d] pcloudV2VolumescloneExecutePostNotFound %+v", 404, o.Payload) +} + +func (o *PcloudV2VolumescloneExecutePostNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudV2VolumescloneExecutePostInternalServerError creates a PcloudV2VolumescloneExecutePostInternalServerError with default headers values +func NewPcloudV2VolumescloneExecutePostInternalServerError() *PcloudV2VolumescloneExecutePostInternalServerError { + return &PcloudV2VolumescloneExecutePostInternalServerError{} +} + +/*PcloudV2VolumescloneExecutePostInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudV2VolumescloneExecutePostInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudV2VolumescloneExecutePostInternalServerError) Error() string { + return fmt.Sprintf("[POST /pcloud/v2/cloud-instances/{cloud_instance_id}/volumes-clone/{volumes_clone_id}/execute][%d] pcloudV2VolumescloneExecutePostInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudV2VolumescloneExecutePostInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_v2_volumesclone_get_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_v2_volumesclone_get_parameters.go new file mode 100644 index 00000000000..47fc5eeb8e1 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_v2_volumesclone_get_parameters.go @@ -0,0 +1,157 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_volumes + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewPcloudV2VolumescloneGetParams creates a new PcloudV2VolumescloneGetParams object +// with the default values initialized. +func NewPcloudV2VolumescloneGetParams() *PcloudV2VolumescloneGetParams { + var () + return &PcloudV2VolumescloneGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudV2VolumescloneGetParamsWithTimeout creates a new PcloudV2VolumescloneGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudV2VolumescloneGetParamsWithTimeout(timeout time.Duration) *PcloudV2VolumescloneGetParams { + var () + return &PcloudV2VolumescloneGetParams{ + + timeout: timeout, + } +} + +// NewPcloudV2VolumescloneGetParamsWithContext creates a new PcloudV2VolumescloneGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudV2VolumescloneGetParamsWithContext(ctx context.Context) *PcloudV2VolumescloneGetParams { + var () + return &PcloudV2VolumescloneGetParams{ + + Context: ctx, + } +} + +// NewPcloudV2VolumescloneGetParamsWithHTTPClient creates a new PcloudV2VolumescloneGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudV2VolumescloneGetParamsWithHTTPClient(client *http.Client) *PcloudV2VolumescloneGetParams { + var () + return &PcloudV2VolumescloneGetParams{ + HTTPClient: client, + } +} + +/*PcloudV2VolumescloneGetParams contains all the parameters to send to the API endpoint +for the pcloud v2 volumesclone get operation typically these are written to a http.Request +*/ +type PcloudV2VolumescloneGetParams struct { + + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + /*VolumesCloneID + Volumes Clone ID + + */ + VolumesCloneID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud v2 volumesclone get params +func (o *PcloudV2VolumescloneGetParams) WithTimeout(timeout time.Duration) *PcloudV2VolumescloneGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud v2 volumesclone get params +func (o *PcloudV2VolumescloneGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud v2 volumesclone get params +func (o *PcloudV2VolumescloneGetParams) WithContext(ctx context.Context) *PcloudV2VolumescloneGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud v2 volumesclone get params +func (o *PcloudV2VolumescloneGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud v2 volumesclone get params +func (o *PcloudV2VolumescloneGetParams) WithHTTPClient(client *http.Client) *PcloudV2VolumescloneGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud v2 volumesclone get params +func (o *PcloudV2VolumescloneGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud v2 volumesclone get params +func (o *PcloudV2VolumescloneGetParams) WithCloudInstanceID(cloudInstanceID string) *PcloudV2VolumescloneGetParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud v2 volumesclone get params +func (o *PcloudV2VolumescloneGetParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WithVolumesCloneID adds the volumesCloneID to the pcloud v2 volumesclone get params +func (o *PcloudV2VolumescloneGetParams) WithVolumesCloneID(volumesCloneID string) *PcloudV2VolumescloneGetParams { + o.SetVolumesCloneID(volumesCloneID) + return o +} + +// SetVolumesCloneID adds the volumesCloneId to the pcloud v2 volumesclone get params +func (o *PcloudV2VolumescloneGetParams) SetVolumesCloneID(volumesCloneID string) { + o.VolumesCloneID = volumesCloneID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudV2VolumescloneGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + // path param volumes_clone_id + if err := r.SetPathParam("volumes_clone_id", o.VolumesCloneID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_v2_volumesclone_get_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_v2_volumesclone_get_responses.go new file mode 100644 index 00000000000..8122b8b9d79 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_v2_volumesclone_get_responses.go @@ -0,0 +1,175 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_volumes + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudV2VolumescloneGetReader is a Reader for the PcloudV2VolumescloneGet structure. +type PcloudV2VolumescloneGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudV2VolumescloneGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudV2VolumescloneGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudV2VolumescloneGetBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 404: + result := NewPcloudV2VolumescloneGetNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudV2VolumescloneGetInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudV2VolumescloneGetOK creates a PcloudV2VolumescloneGetOK with default headers values +func NewPcloudV2VolumescloneGetOK() *PcloudV2VolumescloneGetOK { + return &PcloudV2VolumescloneGetOK{} +} + +/*PcloudV2VolumescloneGetOK handles this case with default header values. + +OK +*/ +type PcloudV2VolumescloneGetOK struct { + Payload *models.VolumesCloneDetail +} + +func (o *PcloudV2VolumescloneGetOK) Error() string { + return fmt.Sprintf("[GET /pcloud/v2/cloud-instances/{cloud_instance_id}/volumes-clone/{volumes_clone_id}][%d] pcloudV2VolumescloneGetOK %+v", 200, o.Payload) +} + +func (o *PcloudV2VolumescloneGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.VolumesCloneDetail) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudV2VolumescloneGetBadRequest creates a PcloudV2VolumescloneGetBadRequest with default headers values +func NewPcloudV2VolumescloneGetBadRequest() *PcloudV2VolumescloneGetBadRequest { + return &PcloudV2VolumescloneGetBadRequest{} +} + +/*PcloudV2VolumescloneGetBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudV2VolumescloneGetBadRequest struct { + Payload *models.Error +} + +func (o *PcloudV2VolumescloneGetBadRequest) Error() string { + return fmt.Sprintf("[GET /pcloud/v2/cloud-instances/{cloud_instance_id}/volumes-clone/{volumes_clone_id}][%d] pcloudV2VolumescloneGetBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudV2VolumescloneGetBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudV2VolumescloneGetNotFound creates a PcloudV2VolumescloneGetNotFound with default headers values +func NewPcloudV2VolumescloneGetNotFound() *PcloudV2VolumescloneGetNotFound { + return &PcloudV2VolumescloneGetNotFound{} +} + +/*PcloudV2VolumescloneGetNotFound handles this case with default header values. + +Not Found +*/ +type PcloudV2VolumescloneGetNotFound struct { + Payload *models.Error +} + +func (o *PcloudV2VolumescloneGetNotFound) Error() string { + return fmt.Sprintf("[GET /pcloud/v2/cloud-instances/{cloud_instance_id}/volumes-clone/{volumes_clone_id}][%d] pcloudV2VolumescloneGetNotFound %+v", 404, o.Payload) +} + +func (o *PcloudV2VolumescloneGetNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudV2VolumescloneGetInternalServerError creates a PcloudV2VolumescloneGetInternalServerError with default headers values +func NewPcloudV2VolumescloneGetInternalServerError() *PcloudV2VolumescloneGetInternalServerError { + return &PcloudV2VolumescloneGetInternalServerError{} +} + +/*PcloudV2VolumescloneGetInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudV2VolumescloneGetInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudV2VolumescloneGetInternalServerError) Error() string { + return fmt.Sprintf("[GET /pcloud/v2/cloud-instances/{cloud_instance_id}/volumes-clone/{volumes_clone_id}][%d] pcloudV2VolumescloneGetInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudV2VolumescloneGetInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_v2_volumesclone_getall_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_v2_volumesclone_getall_parameters.go new file mode 100644 index 00000000000..6e231a9a546 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_v2_volumesclone_getall_parameters.go @@ -0,0 +1,177 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_volumes + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewPcloudV2VolumescloneGetallParams creates a new PcloudV2VolumescloneGetallParams object +// with the default values initialized. +func NewPcloudV2VolumescloneGetallParams() *PcloudV2VolumescloneGetallParams { + var () + return &PcloudV2VolumescloneGetallParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudV2VolumescloneGetallParamsWithTimeout creates a new PcloudV2VolumescloneGetallParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudV2VolumescloneGetallParamsWithTimeout(timeout time.Duration) *PcloudV2VolumescloneGetallParams { + var () + return &PcloudV2VolumescloneGetallParams{ + + timeout: timeout, + } +} + +// NewPcloudV2VolumescloneGetallParamsWithContext creates a new PcloudV2VolumescloneGetallParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudV2VolumescloneGetallParamsWithContext(ctx context.Context) *PcloudV2VolumescloneGetallParams { + var () + return &PcloudV2VolumescloneGetallParams{ + + Context: ctx, + } +} + +// NewPcloudV2VolumescloneGetallParamsWithHTTPClient creates a new PcloudV2VolumescloneGetallParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudV2VolumescloneGetallParamsWithHTTPClient(client *http.Client) *PcloudV2VolumescloneGetallParams { + var () + return &PcloudV2VolumescloneGetallParams{ + HTTPClient: client, + } +} + +/*PcloudV2VolumescloneGetallParams contains all the parameters to send to the API endpoint +for the pcloud v2 volumesclone getall operation typically these are written to a http.Request +*/ +type PcloudV2VolumescloneGetallParams struct { + + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + /*Filter + volumes-clone filter to limit list items: + prepare - includes status values (preparing, prepared) + start - includes status values (starting, available) + execute - includes status values (executing, available-rollback) + cancel - includes status values (cancelling) + completed - includes status values (completed) + failed - includes status values (failed) + cancelled - includes status values (cancelled) + finalized - included status values (completed, failed, cancelled) + + + */ + Filter *string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud v2 volumesclone getall params +func (o *PcloudV2VolumescloneGetallParams) WithTimeout(timeout time.Duration) *PcloudV2VolumescloneGetallParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud v2 volumesclone getall params +func (o *PcloudV2VolumescloneGetallParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud v2 volumesclone getall params +func (o *PcloudV2VolumescloneGetallParams) WithContext(ctx context.Context) *PcloudV2VolumescloneGetallParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud v2 volumesclone getall params +func (o *PcloudV2VolumescloneGetallParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud v2 volumesclone getall params +func (o *PcloudV2VolumescloneGetallParams) WithHTTPClient(client *http.Client) *PcloudV2VolumescloneGetallParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud v2 volumesclone getall params +func (o *PcloudV2VolumescloneGetallParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud v2 volumesclone getall params +func (o *PcloudV2VolumescloneGetallParams) WithCloudInstanceID(cloudInstanceID string) *PcloudV2VolumescloneGetallParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud v2 volumesclone getall params +func (o *PcloudV2VolumescloneGetallParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WithFilter adds the filter to the pcloud v2 volumesclone getall params +func (o *PcloudV2VolumescloneGetallParams) WithFilter(filter *string) *PcloudV2VolumescloneGetallParams { + o.SetFilter(filter) + return o +} + +// SetFilter adds the filter to the pcloud v2 volumesclone getall params +func (o *PcloudV2VolumescloneGetallParams) SetFilter(filter *string) { + o.Filter = filter +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudV2VolumescloneGetallParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + if o.Filter != nil { + + // query param filter + var qrFilter string + if o.Filter != nil { + qrFilter = *o.Filter + } + qFilter := qrFilter + if qFilter != "" { + if err := r.SetQueryParam("filter", qFilter); err != nil { + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_v2_volumesclone_getall_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_v2_volumesclone_getall_responses.go new file mode 100644 index 00000000000..dcf41fb26ac --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_v2_volumesclone_getall_responses.go @@ -0,0 +1,175 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_volumes + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudV2VolumescloneGetallReader is a Reader for the PcloudV2VolumescloneGetall structure. +type PcloudV2VolumescloneGetallReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudV2VolumescloneGetallReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudV2VolumescloneGetallOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudV2VolumescloneGetallBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 404: + result := NewPcloudV2VolumescloneGetallNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudV2VolumescloneGetallInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudV2VolumescloneGetallOK creates a PcloudV2VolumescloneGetallOK with default headers values +func NewPcloudV2VolumescloneGetallOK() *PcloudV2VolumescloneGetallOK { + return &PcloudV2VolumescloneGetallOK{} +} + +/*PcloudV2VolumescloneGetallOK handles this case with default header values. + +OK +*/ +type PcloudV2VolumescloneGetallOK struct { + Payload *models.VolumesClones +} + +func (o *PcloudV2VolumescloneGetallOK) Error() string { + return fmt.Sprintf("[GET /pcloud/v2/cloud-instances/{cloud_instance_id}/volumes-clone][%d] pcloudV2VolumescloneGetallOK %+v", 200, o.Payload) +} + +func (o *PcloudV2VolumescloneGetallOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.VolumesClones) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudV2VolumescloneGetallBadRequest creates a PcloudV2VolumescloneGetallBadRequest with default headers values +func NewPcloudV2VolumescloneGetallBadRequest() *PcloudV2VolumescloneGetallBadRequest { + return &PcloudV2VolumescloneGetallBadRequest{} +} + +/*PcloudV2VolumescloneGetallBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudV2VolumescloneGetallBadRequest struct { + Payload *models.Error +} + +func (o *PcloudV2VolumescloneGetallBadRequest) Error() string { + return fmt.Sprintf("[GET /pcloud/v2/cloud-instances/{cloud_instance_id}/volumes-clone][%d] pcloudV2VolumescloneGetallBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudV2VolumescloneGetallBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudV2VolumescloneGetallNotFound creates a PcloudV2VolumescloneGetallNotFound with default headers values +func NewPcloudV2VolumescloneGetallNotFound() *PcloudV2VolumescloneGetallNotFound { + return &PcloudV2VolumescloneGetallNotFound{} +} + +/*PcloudV2VolumescloneGetallNotFound handles this case with default header values. + +Not Found +*/ +type PcloudV2VolumescloneGetallNotFound struct { + Payload *models.Error +} + +func (o *PcloudV2VolumescloneGetallNotFound) Error() string { + return fmt.Sprintf("[GET /pcloud/v2/cloud-instances/{cloud_instance_id}/volumes-clone][%d] pcloudV2VolumescloneGetallNotFound %+v", 404, o.Payload) +} + +func (o *PcloudV2VolumescloneGetallNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudV2VolumescloneGetallInternalServerError creates a PcloudV2VolumescloneGetallInternalServerError with default headers values +func NewPcloudV2VolumescloneGetallInternalServerError() *PcloudV2VolumescloneGetallInternalServerError { + return &PcloudV2VolumescloneGetallInternalServerError{} +} + +/*PcloudV2VolumescloneGetallInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudV2VolumescloneGetallInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudV2VolumescloneGetallInternalServerError) Error() string { + return fmt.Sprintf("[GET /pcloud/v2/cloud-instances/{cloud_instance_id}/volumes-clone][%d] pcloudV2VolumescloneGetallInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudV2VolumescloneGetallInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_v2_volumesclone_post_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_v2_volumesclone_post_parameters.go new file mode 100644 index 00000000000..46423f79ccd --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_v2_volumesclone_post_parameters.go @@ -0,0 +1,160 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_volumes + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// NewPcloudV2VolumesclonePostParams creates a new PcloudV2VolumesclonePostParams object +// with the default values initialized. +func NewPcloudV2VolumesclonePostParams() *PcloudV2VolumesclonePostParams { + var () + return &PcloudV2VolumesclonePostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudV2VolumesclonePostParamsWithTimeout creates a new PcloudV2VolumesclonePostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudV2VolumesclonePostParamsWithTimeout(timeout time.Duration) *PcloudV2VolumesclonePostParams { + var () + return &PcloudV2VolumesclonePostParams{ + + timeout: timeout, + } +} + +// NewPcloudV2VolumesclonePostParamsWithContext creates a new PcloudV2VolumesclonePostParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudV2VolumesclonePostParamsWithContext(ctx context.Context) *PcloudV2VolumesclonePostParams { + var () + return &PcloudV2VolumesclonePostParams{ + + Context: ctx, + } +} + +// NewPcloudV2VolumesclonePostParamsWithHTTPClient creates a new PcloudV2VolumesclonePostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudV2VolumesclonePostParamsWithHTTPClient(client *http.Client) *PcloudV2VolumesclonePostParams { + var () + return &PcloudV2VolumesclonePostParams{ + HTTPClient: client, + } +} + +/*PcloudV2VolumesclonePostParams contains all the parameters to send to the API endpoint +for the pcloud v2 volumesclone post operation typically these are written to a http.Request +*/ +type PcloudV2VolumesclonePostParams struct { + + /*Body + Parameters for preparing a set of volumes to be cloned + + */ + Body *models.VolumesCloneCreate + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud v2 volumesclone post params +func (o *PcloudV2VolumesclonePostParams) WithTimeout(timeout time.Duration) *PcloudV2VolumesclonePostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud v2 volumesclone post params +func (o *PcloudV2VolumesclonePostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud v2 volumesclone post params +func (o *PcloudV2VolumesclonePostParams) WithContext(ctx context.Context) *PcloudV2VolumesclonePostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud v2 volumesclone post params +func (o *PcloudV2VolumesclonePostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud v2 volumesclone post params +func (o *PcloudV2VolumesclonePostParams) WithHTTPClient(client *http.Client) *PcloudV2VolumesclonePostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud v2 volumesclone post params +func (o *PcloudV2VolumesclonePostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the pcloud v2 volumesclone post params +func (o *PcloudV2VolumesclonePostParams) WithBody(body *models.VolumesCloneCreate) *PcloudV2VolumesclonePostParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the pcloud v2 volumesclone post params +func (o *PcloudV2VolumesclonePostParams) SetBody(body *models.VolumesCloneCreate) { + o.Body = body +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud v2 volumesclone post params +func (o *PcloudV2VolumesclonePostParams) WithCloudInstanceID(cloudInstanceID string) *PcloudV2VolumesclonePostParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud v2 volumesclone post params +func (o *PcloudV2VolumesclonePostParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudV2VolumesclonePostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_v2_volumesclone_post_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_v2_volumesclone_post_responses.go new file mode 100644 index 00000000000..0a26fb0797a --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_v2_volumesclone_post_responses.go @@ -0,0 +1,211 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_volumes + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudV2VolumesclonePostReader is a Reader for the PcloudV2VolumesclonePost structure. +type PcloudV2VolumesclonePostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudV2VolumesclonePostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 202: + result := NewPcloudV2VolumesclonePostAccepted() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudV2VolumesclonePostBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 403: + result := NewPcloudV2VolumesclonePostForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 404: + result := NewPcloudV2VolumesclonePostNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudV2VolumesclonePostInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudV2VolumesclonePostAccepted creates a PcloudV2VolumesclonePostAccepted with default headers values +func NewPcloudV2VolumesclonePostAccepted() *PcloudV2VolumesclonePostAccepted { + return &PcloudV2VolumesclonePostAccepted{} +} + +/*PcloudV2VolumesclonePostAccepted handles this case with default header values. + +Accepted +*/ +type PcloudV2VolumesclonePostAccepted struct { + Payload *models.VolumesClone +} + +func (o *PcloudV2VolumesclonePostAccepted) Error() string { + return fmt.Sprintf("[POST /pcloud/v2/cloud-instances/{cloud_instance_id}/volumes-clone][%d] pcloudV2VolumesclonePostAccepted %+v", 202, o.Payload) +} + +func (o *PcloudV2VolumesclonePostAccepted) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.VolumesClone) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudV2VolumesclonePostBadRequest creates a PcloudV2VolumesclonePostBadRequest with default headers values +func NewPcloudV2VolumesclonePostBadRequest() *PcloudV2VolumesclonePostBadRequest { + return &PcloudV2VolumesclonePostBadRequest{} +} + +/*PcloudV2VolumesclonePostBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudV2VolumesclonePostBadRequest struct { + Payload *models.Error +} + +func (o *PcloudV2VolumesclonePostBadRequest) Error() string { + return fmt.Sprintf("[POST /pcloud/v2/cloud-instances/{cloud_instance_id}/volumes-clone][%d] pcloudV2VolumesclonePostBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudV2VolumesclonePostBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudV2VolumesclonePostForbidden creates a PcloudV2VolumesclonePostForbidden with default headers values +func NewPcloudV2VolumesclonePostForbidden() *PcloudV2VolumesclonePostForbidden { + return &PcloudV2VolumesclonePostForbidden{} +} + +/*PcloudV2VolumesclonePostForbidden handles this case with default header values. + +Forbidden +*/ +type PcloudV2VolumesclonePostForbidden struct { + Payload *models.Error +} + +func (o *PcloudV2VolumesclonePostForbidden) Error() string { + return fmt.Sprintf("[POST /pcloud/v2/cloud-instances/{cloud_instance_id}/volumes-clone][%d] pcloudV2VolumesclonePostForbidden %+v", 403, o.Payload) +} + +func (o *PcloudV2VolumesclonePostForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudV2VolumesclonePostNotFound creates a PcloudV2VolumesclonePostNotFound with default headers values +func NewPcloudV2VolumesclonePostNotFound() *PcloudV2VolumesclonePostNotFound { + return &PcloudV2VolumesclonePostNotFound{} +} + +/*PcloudV2VolumesclonePostNotFound handles this case with default header values. + +Not Found +*/ +type PcloudV2VolumesclonePostNotFound struct { + Payload *models.Error +} + +func (o *PcloudV2VolumesclonePostNotFound) Error() string { + return fmt.Sprintf("[POST /pcloud/v2/cloud-instances/{cloud_instance_id}/volumes-clone][%d] pcloudV2VolumesclonePostNotFound %+v", 404, o.Payload) +} + +func (o *PcloudV2VolumesclonePostNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudV2VolumesclonePostInternalServerError creates a PcloudV2VolumesclonePostInternalServerError with default headers values +func NewPcloudV2VolumesclonePostInternalServerError() *PcloudV2VolumesclonePostInternalServerError { + return &PcloudV2VolumesclonePostInternalServerError{} +} + +/*PcloudV2VolumesclonePostInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudV2VolumesclonePostInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudV2VolumesclonePostInternalServerError) Error() string { + return fmt.Sprintf("[POST /pcloud/v2/cloud-instances/{cloud_instance_id}/volumes-clone][%d] pcloudV2VolumesclonePostInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudV2VolumesclonePostInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_v2_volumesclone_start_post_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_v2_volumesclone_start_post_parameters.go new file mode 100644 index 00000000000..240393ba15e --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_v2_volumesclone_start_post_parameters.go @@ -0,0 +1,157 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_volumes + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewPcloudV2VolumescloneStartPostParams creates a new PcloudV2VolumescloneStartPostParams object +// with the default values initialized. +func NewPcloudV2VolumescloneStartPostParams() *PcloudV2VolumescloneStartPostParams { + var () + return &PcloudV2VolumescloneStartPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudV2VolumescloneStartPostParamsWithTimeout creates a new PcloudV2VolumescloneStartPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudV2VolumescloneStartPostParamsWithTimeout(timeout time.Duration) *PcloudV2VolumescloneStartPostParams { + var () + return &PcloudV2VolumescloneStartPostParams{ + + timeout: timeout, + } +} + +// NewPcloudV2VolumescloneStartPostParamsWithContext creates a new PcloudV2VolumescloneStartPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudV2VolumescloneStartPostParamsWithContext(ctx context.Context) *PcloudV2VolumescloneStartPostParams { + var () + return &PcloudV2VolumescloneStartPostParams{ + + Context: ctx, + } +} + +// NewPcloudV2VolumescloneStartPostParamsWithHTTPClient creates a new PcloudV2VolumescloneStartPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudV2VolumescloneStartPostParamsWithHTTPClient(client *http.Client) *PcloudV2VolumescloneStartPostParams { + var () + return &PcloudV2VolumescloneStartPostParams{ + HTTPClient: client, + } +} + +/*PcloudV2VolumescloneStartPostParams contains all the parameters to send to the API endpoint +for the pcloud v2 volumesclone start post operation typically these are written to a http.Request +*/ +type PcloudV2VolumescloneStartPostParams struct { + + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + /*VolumesCloneID + Volumes Clone ID + + */ + VolumesCloneID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud v2 volumesclone start post params +func (o *PcloudV2VolumescloneStartPostParams) WithTimeout(timeout time.Duration) *PcloudV2VolumescloneStartPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud v2 volumesclone start post params +func (o *PcloudV2VolumescloneStartPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud v2 volumesclone start post params +func (o *PcloudV2VolumescloneStartPostParams) WithContext(ctx context.Context) *PcloudV2VolumescloneStartPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud v2 volumesclone start post params +func (o *PcloudV2VolumescloneStartPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud v2 volumesclone start post params +func (o *PcloudV2VolumescloneStartPostParams) WithHTTPClient(client *http.Client) *PcloudV2VolumescloneStartPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud v2 volumesclone start post params +func (o *PcloudV2VolumescloneStartPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud v2 volumesclone start post params +func (o *PcloudV2VolumescloneStartPostParams) WithCloudInstanceID(cloudInstanceID string) *PcloudV2VolumescloneStartPostParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud v2 volumesclone start post params +func (o *PcloudV2VolumescloneStartPostParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WithVolumesCloneID adds the volumesCloneID to the pcloud v2 volumesclone start post params +func (o *PcloudV2VolumescloneStartPostParams) WithVolumesCloneID(volumesCloneID string) *PcloudV2VolumescloneStartPostParams { + o.SetVolumesCloneID(volumesCloneID) + return o +} + +// SetVolumesCloneID adds the volumesCloneId to the pcloud v2 volumesclone start post params +func (o *PcloudV2VolumescloneStartPostParams) SetVolumesCloneID(volumesCloneID string) { + o.VolumesCloneID = volumesCloneID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudV2VolumescloneStartPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + // path param volumes_clone_id + if err := r.SetPathParam("volumes_clone_id", o.VolumesCloneID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_v2_volumesclone_start_post_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_v2_volumesclone_start_post_responses.go new file mode 100644 index 00000000000..4848db18761 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_v2_volumesclone_start_post_responses.go @@ -0,0 +1,139 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_volumes + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudV2VolumescloneStartPostReader is a Reader for the PcloudV2VolumescloneStartPost structure. +type PcloudV2VolumescloneStartPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudV2VolumescloneStartPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudV2VolumescloneStartPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 404: + result := NewPcloudV2VolumescloneStartPostNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudV2VolumescloneStartPostInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudV2VolumescloneStartPostOK creates a PcloudV2VolumescloneStartPostOK with default headers values +func NewPcloudV2VolumescloneStartPostOK() *PcloudV2VolumescloneStartPostOK { + return &PcloudV2VolumescloneStartPostOK{} +} + +/*PcloudV2VolumescloneStartPostOK handles this case with default header values. + +OK +*/ +type PcloudV2VolumescloneStartPostOK struct { + Payload *models.VolumesClone +} + +func (o *PcloudV2VolumescloneStartPostOK) Error() string { + return fmt.Sprintf("[POST /pcloud/v2/cloud-instances/{cloud_instance_id}/volumes-clone/{volumes_clone_id}/start][%d] pcloudV2VolumescloneStartPostOK %+v", 200, o.Payload) +} + +func (o *PcloudV2VolumescloneStartPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.VolumesClone) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudV2VolumescloneStartPostNotFound creates a PcloudV2VolumescloneStartPostNotFound with default headers values +func NewPcloudV2VolumescloneStartPostNotFound() *PcloudV2VolumescloneStartPostNotFound { + return &PcloudV2VolumescloneStartPostNotFound{} +} + +/*PcloudV2VolumescloneStartPostNotFound handles this case with default header values. + +Not Found +*/ +type PcloudV2VolumescloneStartPostNotFound struct { + Payload *models.Error +} + +func (o *PcloudV2VolumescloneStartPostNotFound) Error() string { + return fmt.Sprintf("[POST /pcloud/v2/cloud-instances/{cloud_instance_id}/volumes-clone/{volumes_clone_id}/start][%d] pcloudV2VolumescloneStartPostNotFound %+v", 404, o.Payload) +} + +func (o *PcloudV2VolumescloneStartPostNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudV2VolumescloneStartPostInternalServerError creates a PcloudV2VolumescloneStartPostInternalServerError with default headers values +func NewPcloudV2VolumescloneStartPostInternalServerError() *PcloudV2VolumescloneStartPostInternalServerError { + return &PcloudV2VolumescloneStartPostInternalServerError{} +} + +/*PcloudV2VolumescloneStartPostInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudV2VolumescloneStartPostInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudV2VolumescloneStartPostInternalServerError) Error() string { + return fmt.Sprintf("[POST /pcloud/v2/cloud-instances/{cloud_instance_id}/volumes-clone/{volumes_clone_id}/start][%d] pcloudV2VolumescloneStartPostInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudV2VolumescloneStartPostInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_volumes_clone_post_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_volumes_clone_post_parameters.go new file mode 100644 index 00000000000..7aba811656d --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_volumes_clone_post_parameters.go @@ -0,0 +1,160 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_volumes + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// NewPcloudVolumesClonePostParams creates a new PcloudVolumesClonePostParams object +// with the default values initialized. +func NewPcloudVolumesClonePostParams() *PcloudVolumesClonePostParams { + var () + return &PcloudVolumesClonePostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewPcloudVolumesClonePostParamsWithTimeout creates a new PcloudVolumesClonePostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewPcloudVolumesClonePostParamsWithTimeout(timeout time.Duration) *PcloudVolumesClonePostParams { + var () + return &PcloudVolumesClonePostParams{ + + timeout: timeout, + } +} + +// NewPcloudVolumesClonePostParamsWithContext creates a new PcloudVolumesClonePostParams object +// with the default values initialized, and the ability to set a context for a request +func NewPcloudVolumesClonePostParamsWithContext(ctx context.Context) *PcloudVolumesClonePostParams { + var () + return &PcloudVolumesClonePostParams{ + + Context: ctx, + } +} + +// NewPcloudVolumesClonePostParamsWithHTTPClient creates a new PcloudVolumesClonePostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPcloudVolumesClonePostParamsWithHTTPClient(client *http.Client) *PcloudVolumesClonePostParams { + var () + return &PcloudVolumesClonePostParams{ + HTTPClient: client, + } +} + +/*PcloudVolumesClonePostParams contains all the parameters to send to the API endpoint +for the pcloud volumes clone post operation typically these are written to a http.Request +*/ +type PcloudVolumesClonePostParams struct { + + /*Body + Parameters for the cloning of volumes + + */ + Body *models.VolumesCloneRequest + /*CloudInstanceID + Cloud Instance ID of a PCloud Instance + + */ + CloudInstanceID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the pcloud volumes clone post params +func (o *PcloudVolumesClonePostParams) WithTimeout(timeout time.Duration) *PcloudVolumesClonePostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the pcloud volumes clone post params +func (o *PcloudVolumesClonePostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the pcloud volumes clone post params +func (o *PcloudVolumesClonePostParams) WithContext(ctx context.Context) *PcloudVolumesClonePostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the pcloud volumes clone post params +func (o *PcloudVolumesClonePostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the pcloud volumes clone post params +func (o *PcloudVolumesClonePostParams) WithHTTPClient(client *http.Client) *PcloudVolumesClonePostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the pcloud volumes clone post params +func (o *PcloudVolumesClonePostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the pcloud volumes clone post params +func (o *PcloudVolumesClonePostParams) WithBody(body *models.VolumesCloneRequest) *PcloudVolumesClonePostParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the pcloud volumes clone post params +func (o *PcloudVolumesClonePostParams) SetBody(body *models.VolumesCloneRequest) { + o.Body = body +} + +// WithCloudInstanceID adds the cloudInstanceID to the pcloud volumes clone post params +func (o *PcloudVolumesClonePostParams) WithCloudInstanceID(cloudInstanceID string) *PcloudVolumesClonePostParams { + o.SetCloudInstanceID(cloudInstanceID) + return o +} + +// SetCloudInstanceID adds the cloudInstanceId to the pcloud volumes clone post params +func (o *PcloudVolumesClonePostParams) SetCloudInstanceID(cloudInstanceID string) { + o.CloudInstanceID = cloudInstanceID +} + +// WriteToRequest writes these params to a swagger request +func (o *PcloudVolumesClonePostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + // path param cloud_instance_id + if err := r.SetPathParam("cloud_instance_id", o.CloudInstanceID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_volumes_clone_post_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_volumes_clone_post_responses.go new file mode 100644 index 00000000000..ea9f31799ea --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes/pcloud_volumes_clone_post_responses.go @@ -0,0 +1,175 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package p_cloud_volumes + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// PcloudVolumesClonePostReader is a Reader for the PcloudVolumesClonePost structure. +type PcloudVolumesClonePostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PcloudVolumesClonePostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewPcloudVolumesClonePostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewPcloudVolumesClonePostBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 409: + result := NewPcloudVolumesClonePostConflict() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 500: + result := NewPcloudVolumesClonePostInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewPcloudVolumesClonePostOK creates a PcloudVolumesClonePostOK with default headers values +func NewPcloudVolumesClonePostOK() *PcloudVolumesClonePostOK { + return &PcloudVolumesClonePostOK{} +} + +/*PcloudVolumesClonePostOK handles this case with default header values. + +OK +*/ +type PcloudVolumesClonePostOK struct { + Payload *models.VolumesCloneResponse +} + +func (o *PcloudVolumesClonePostOK) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/volumes/clone][%d] pcloudVolumesClonePostOK %+v", 200, o.Payload) +} + +func (o *PcloudVolumesClonePostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.VolumesCloneResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudVolumesClonePostBadRequest creates a PcloudVolumesClonePostBadRequest with default headers values +func NewPcloudVolumesClonePostBadRequest() *PcloudVolumesClonePostBadRequest { + return &PcloudVolumesClonePostBadRequest{} +} + +/*PcloudVolumesClonePostBadRequest handles this case with default header values. + +Bad Request +*/ +type PcloudVolumesClonePostBadRequest struct { + Payload *models.Error +} + +func (o *PcloudVolumesClonePostBadRequest) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/volumes/clone][%d] pcloudVolumesClonePostBadRequest %+v", 400, o.Payload) +} + +func (o *PcloudVolumesClonePostBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudVolumesClonePostConflict creates a PcloudVolumesClonePostConflict with default headers values +func NewPcloudVolumesClonePostConflict() *PcloudVolumesClonePostConflict { + return &PcloudVolumesClonePostConflict{} +} + +/*PcloudVolumesClonePostConflict handles this case with default header values. + +Conflict +*/ +type PcloudVolumesClonePostConflict struct { + Payload *models.Error +} + +func (o *PcloudVolumesClonePostConflict) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/volumes/clone][%d] pcloudVolumesClonePostConflict %+v", 409, o.Payload) +} + +func (o *PcloudVolumesClonePostConflict) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPcloudVolumesClonePostInternalServerError creates a PcloudVolumesClonePostInternalServerError with default headers values +func NewPcloudVolumesClonePostInternalServerError() *PcloudVolumesClonePostInternalServerError { + return &PcloudVolumesClonePostInternalServerError{} +} + +/*PcloudVolumesClonePostInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type PcloudVolumesClonePostInternalServerError struct { + Payload *models.Error +} + +func (o *PcloudVolumesClonePostInternalServerError) Error() string { + return fmt.Sprintf("[POST /pcloud/v1/cloud-instances/{cloud_instance_id}/volumes/clone][%d] pcloudVolumesClonePostInternalServerError %+v", 500, o.Payload) +} + +func (o *PcloudVolumesClonePostInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/power_iaas_client.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/power_iaas_client.go new file mode 100644 index 00000000000..71edfb8da01 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/power_iaas_client.go @@ -0,0 +1,278 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package client + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/runtime" + httptransport "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/IBM-Cloud/power-go-client/power/client/authentication" + "github.com/IBM-Cloud/power-go-client/power/client/bluemix_service_instances" + "github.com/IBM-Cloud/power-go-client/power/client/catalog" + "github.com/IBM-Cloud/power-go-client/power/client/hardware_platforms" + "github.com/IBM-Cloud/power-go-client/power/client/iaas_service_broker" + "github.com/IBM-Cloud/power-go-client/power/client/open_stacks" + "github.com/IBM-Cloud/power-go-client/power/client/p_cloud_cloud_connections" + "github.com/IBM-Cloud/power-go-client/power/client/p_cloud_events" + "github.com/IBM-Cloud/power-go-client/power/client/p_cloud_images" + "github.com/IBM-Cloud/power-go-client/power/client/p_cloud_instances" + "github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks" + "github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances" + "github.com/IBM-Cloud/power-go-client/power/client/p_cloud_s_a_p" + "github.com/IBM-Cloud/power-go-client/power/client/p_cloud_snapshots" + "github.com/IBM-Cloud/power-go-client/power/client/p_cloud_storage_capacity" + "github.com/IBM-Cloud/power-go-client/power/client/p_cloud_system_pools" + "github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tasks" + "github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tenants" + "github.com/IBM-Cloud/power-go-client/power/client/p_cloud_tenants_ssh_keys" + "github.com/IBM-Cloud/power-go-client/power/client/p_cloud_volumes" + "github.com/IBM-Cloud/power-go-client/power/client/service_bindings" + "github.com/IBM-Cloud/power-go-client/power/client/service_instances" + "github.com/IBM-Cloud/power-go-client/power/client/storage_types" + "github.com/IBM-Cloud/power-go-client/power/client/swagger_spec" +) + +// Default power iaas HTTP client. +var Default = NewHTTPClient(nil) + +const ( + // DefaultHost is the default Host + // found in Meta (info) section of spec file + DefaultHost string = "localhost" + // DefaultBasePath is the default BasePath + // found in Meta (info) section of spec file + DefaultBasePath string = "/" +) + +// DefaultSchemes are the default schemes found in Meta (info) section of spec file +var DefaultSchemes = []string{"http"} + +// NewHTTPClient creates a new power iaas HTTP client. +func NewHTTPClient(formats strfmt.Registry) *PowerIaas { + return NewHTTPClientWithConfig(formats, nil) +} + +// NewHTTPClientWithConfig creates a new power iaas HTTP client, +// using a customizable transport config. +func NewHTTPClientWithConfig(formats strfmt.Registry, cfg *TransportConfig) *PowerIaas { + // ensure nullable parameters have default + if cfg == nil { + cfg = DefaultTransportConfig() + } + + // create transport and client + transport := httptransport.New(cfg.Host, cfg.BasePath, cfg.Schemes) + return New(transport, formats) +} + +// New creates a new power iaas client +func New(transport runtime.ClientTransport, formats strfmt.Registry) *PowerIaas { + // ensure nullable parameters have default + if formats == nil { + formats = strfmt.Default + } + + cli := new(PowerIaas) + cli.Transport = transport + + cli.Authentication = authentication.New(transport, formats) + + cli.BluemixServiceInstances = bluemix_service_instances.New(transport, formats) + + cli.Catalog = catalog.New(transport, formats) + + cli.HardwarePlatforms = hardware_platforms.New(transport, formats) + + cli.IaasServiceBroker = iaas_service_broker.New(transport, formats) + + cli.OpenStacks = open_stacks.New(transport, formats) + + cli.PCloudCloudConnections = p_cloud_cloud_connections.New(transport, formats) + + cli.PCloudEvents = p_cloud_events.New(transport, formats) + + cli.PCloudImages = p_cloud_images.New(transport, formats) + + cli.PCloudInstances = p_cloud_instances.New(transport, formats) + + cli.PCloudNetworks = p_cloud_networks.New(transport, formats) + + cli.PCloudPVMInstances = p_cloud_p_vm_instances.New(transport, formats) + + cli.PCloudSAP = p_cloud_s_a_p.New(transport, formats) + + cli.PCloudSnapshots = p_cloud_snapshots.New(transport, formats) + + cli.PCloudStorageCapacity = p_cloud_storage_capacity.New(transport, formats) + + cli.PCloudSystemPools = p_cloud_system_pools.New(transport, formats) + + cli.PCloudTasks = p_cloud_tasks.New(transport, formats) + + cli.PCloudTenants = p_cloud_tenants.New(transport, formats) + + cli.PCloudTenantsSSHKeys = p_cloud_tenants_ssh_keys.New(transport, formats) + + cli.PCloudVolumes = p_cloud_volumes.New(transport, formats) + + cli.ServiceBindings = service_bindings.New(transport, formats) + + cli.ServiceInstances = service_instances.New(transport, formats) + + cli.StorageTypes = storage_types.New(transport, formats) + + cli.SwaggerSpec = swagger_spec.New(transport, formats) + + return cli +} + +// DefaultTransportConfig creates a TransportConfig with the +// default settings taken from the meta section of the spec file. +func DefaultTransportConfig() *TransportConfig { + return &TransportConfig{ + Host: DefaultHost, + BasePath: DefaultBasePath, + Schemes: DefaultSchemes, + } +} + +// TransportConfig contains the transport related info, +// found in the meta section of the spec file. +type TransportConfig struct { + Host string + BasePath string + Schemes []string +} + +// WithHost overrides the default host, +// provided by the meta section of the spec file. +func (cfg *TransportConfig) WithHost(host string) *TransportConfig { + cfg.Host = host + return cfg +} + +// WithBasePath overrides the default basePath, +// provided by the meta section of the spec file. +func (cfg *TransportConfig) WithBasePath(basePath string) *TransportConfig { + cfg.BasePath = basePath + return cfg +} + +// WithSchemes overrides the default schemes, +// provided by the meta section of the spec file. +func (cfg *TransportConfig) WithSchemes(schemes []string) *TransportConfig { + cfg.Schemes = schemes + return cfg +} + +// PowerIaas is a client for power iaas +type PowerIaas struct { + Authentication *authentication.Client + + BluemixServiceInstances *bluemix_service_instances.Client + + Catalog *catalog.Client + + HardwarePlatforms *hardware_platforms.Client + + IaasServiceBroker *iaas_service_broker.Client + + OpenStacks *open_stacks.Client + + PCloudCloudConnections *p_cloud_cloud_connections.Client + + PCloudEvents *p_cloud_events.Client + + PCloudImages *p_cloud_images.Client + + PCloudInstances *p_cloud_instances.Client + + PCloudNetworks *p_cloud_networks.Client + + PCloudPVMInstances *p_cloud_p_vm_instances.Client + + PCloudSAP *p_cloud_s_a_p.Client + + PCloudSnapshots *p_cloud_snapshots.Client + + PCloudStorageCapacity *p_cloud_storage_capacity.Client + + PCloudSystemPools *p_cloud_system_pools.Client + + PCloudTasks *p_cloud_tasks.Client + + PCloudTenants *p_cloud_tenants.Client + + PCloudTenantsSSHKeys *p_cloud_tenants_ssh_keys.Client + + PCloudVolumes *p_cloud_volumes.Client + + ServiceBindings *service_bindings.Client + + ServiceInstances *service_instances.Client + + StorageTypes *storage_types.Client + + SwaggerSpec *swagger_spec.Client + + Transport runtime.ClientTransport +} + +// SetTransport changes the transport on the client and all its subresources +func (c *PowerIaas) SetTransport(transport runtime.ClientTransport) { + c.Transport = transport + + c.Authentication.SetTransport(transport) + + c.BluemixServiceInstances.SetTransport(transport) + + c.Catalog.SetTransport(transport) + + c.HardwarePlatforms.SetTransport(transport) + + c.IaasServiceBroker.SetTransport(transport) + + c.OpenStacks.SetTransport(transport) + + c.PCloudCloudConnections.SetTransport(transport) + + c.PCloudEvents.SetTransport(transport) + + c.PCloudImages.SetTransport(transport) + + c.PCloudInstances.SetTransport(transport) + + c.PCloudNetworks.SetTransport(transport) + + c.PCloudPVMInstances.SetTransport(transport) + + c.PCloudSAP.SetTransport(transport) + + c.PCloudSnapshots.SetTransport(transport) + + c.PCloudStorageCapacity.SetTransport(transport) + + c.PCloudSystemPools.SetTransport(transport) + + c.PCloudTasks.SetTransport(transport) + + c.PCloudTenants.SetTransport(transport) + + c.PCloudTenantsSSHKeys.SetTransport(transport) + + c.PCloudVolumes.SetTransport(transport) + + c.ServiceBindings.SetTransport(transport) + + c.ServiceInstances.SetTransport(transport) + + c.StorageTypes.SetTransport(transport) + + c.SwaggerSpec.SetTransport(transport) + +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/service_bindings/service_binding_binding_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/service_bindings/service_binding_binding_parameters.go new file mode 100644 index 00000000000..cb170e49b5e --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/service_bindings/service_binding_binding_parameters.go @@ -0,0 +1,260 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package service_bindings + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/swag" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// NewServiceBindingBindingParams creates a new ServiceBindingBindingParams object +// with the default values initialized. +func NewServiceBindingBindingParams() *ServiceBindingBindingParams { + var () + return &ServiceBindingBindingParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewServiceBindingBindingParamsWithTimeout creates a new ServiceBindingBindingParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewServiceBindingBindingParamsWithTimeout(timeout time.Duration) *ServiceBindingBindingParams { + var () + return &ServiceBindingBindingParams{ + + timeout: timeout, + } +} + +// NewServiceBindingBindingParamsWithContext creates a new ServiceBindingBindingParams object +// with the default values initialized, and the ability to set a context for a request +func NewServiceBindingBindingParamsWithContext(ctx context.Context) *ServiceBindingBindingParams { + var () + return &ServiceBindingBindingParams{ + + Context: ctx, + } +} + +// NewServiceBindingBindingParamsWithHTTPClient creates a new ServiceBindingBindingParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewServiceBindingBindingParamsWithHTTPClient(client *http.Client) *ServiceBindingBindingParams { + var () + return &ServiceBindingBindingParams{ + HTTPClient: client, + } +} + +/*ServiceBindingBindingParams contains all the parameters to send to the API endpoint +for the service binding binding operation typically these are written to a http.Request +*/ +type ServiceBindingBindingParams struct { + + /*XBrokerAPIOriginatingIdentity + identity of the user that initiated the request from the Platform + + */ + XBrokerAPIOriginatingIdentity *string + /*XBrokerAPIVersion + version number of the Service Broker API that the Platform will use + + */ + XBrokerAPIVersion string + /*AcceptsIncomplete + asynchronous operations supported + + */ + AcceptsIncomplete *bool + /*BindingID + binding id of binding to create + + */ + BindingID string + /*Body + parameters for the requested service binding + + */ + Body *models.ServiceBindingRequest + /*InstanceID + instance id of instance to provision + + */ + InstanceID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the service binding binding params +func (o *ServiceBindingBindingParams) WithTimeout(timeout time.Duration) *ServiceBindingBindingParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the service binding binding params +func (o *ServiceBindingBindingParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the service binding binding params +func (o *ServiceBindingBindingParams) WithContext(ctx context.Context) *ServiceBindingBindingParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the service binding binding params +func (o *ServiceBindingBindingParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the service binding binding params +func (o *ServiceBindingBindingParams) WithHTTPClient(client *http.Client) *ServiceBindingBindingParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the service binding binding params +func (o *ServiceBindingBindingParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithXBrokerAPIOriginatingIdentity adds the xBrokerAPIOriginatingIdentity to the service binding binding params +func (o *ServiceBindingBindingParams) WithXBrokerAPIOriginatingIdentity(xBrokerAPIOriginatingIdentity *string) *ServiceBindingBindingParams { + o.SetXBrokerAPIOriginatingIdentity(xBrokerAPIOriginatingIdentity) + return o +} + +// SetXBrokerAPIOriginatingIdentity adds the xBrokerApiOriginatingIdentity to the service binding binding params +func (o *ServiceBindingBindingParams) SetXBrokerAPIOriginatingIdentity(xBrokerAPIOriginatingIdentity *string) { + o.XBrokerAPIOriginatingIdentity = xBrokerAPIOriginatingIdentity +} + +// WithXBrokerAPIVersion adds the xBrokerAPIVersion to the service binding binding params +func (o *ServiceBindingBindingParams) WithXBrokerAPIVersion(xBrokerAPIVersion string) *ServiceBindingBindingParams { + o.SetXBrokerAPIVersion(xBrokerAPIVersion) + return o +} + +// SetXBrokerAPIVersion adds the xBrokerApiVersion to the service binding binding params +func (o *ServiceBindingBindingParams) SetXBrokerAPIVersion(xBrokerAPIVersion string) { + o.XBrokerAPIVersion = xBrokerAPIVersion +} + +// WithAcceptsIncomplete adds the acceptsIncomplete to the service binding binding params +func (o *ServiceBindingBindingParams) WithAcceptsIncomplete(acceptsIncomplete *bool) *ServiceBindingBindingParams { + o.SetAcceptsIncomplete(acceptsIncomplete) + return o +} + +// SetAcceptsIncomplete adds the acceptsIncomplete to the service binding binding params +func (o *ServiceBindingBindingParams) SetAcceptsIncomplete(acceptsIncomplete *bool) { + o.AcceptsIncomplete = acceptsIncomplete +} + +// WithBindingID adds the bindingID to the service binding binding params +func (o *ServiceBindingBindingParams) WithBindingID(bindingID string) *ServiceBindingBindingParams { + o.SetBindingID(bindingID) + return o +} + +// SetBindingID adds the bindingId to the service binding binding params +func (o *ServiceBindingBindingParams) SetBindingID(bindingID string) { + o.BindingID = bindingID +} + +// WithBody adds the body to the service binding binding params +func (o *ServiceBindingBindingParams) WithBody(body *models.ServiceBindingRequest) *ServiceBindingBindingParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the service binding binding params +func (o *ServiceBindingBindingParams) SetBody(body *models.ServiceBindingRequest) { + o.Body = body +} + +// WithInstanceID adds the instanceID to the service binding binding params +func (o *ServiceBindingBindingParams) WithInstanceID(instanceID string) *ServiceBindingBindingParams { + o.SetInstanceID(instanceID) + return o +} + +// SetInstanceID adds the instanceId to the service binding binding params +func (o *ServiceBindingBindingParams) SetInstanceID(instanceID string) { + o.InstanceID = instanceID +} + +// WriteToRequest writes these params to a swagger request +func (o *ServiceBindingBindingParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.XBrokerAPIOriginatingIdentity != nil { + + // header param X-Broker-API-Originating-Identity + if err := r.SetHeaderParam("X-Broker-API-Originating-Identity", *o.XBrokerAPIOriginatingIdentity); err != nil { + return err + } + + } + + // header param X-Broker-API-Version + if err := r.SetHeaderParam("X-Broker-API-Version", o.XBrokerAPIVersion); err != nil { + return err + } + + if o.AcceptsIncomplete != nil { + + // query param accepts_incomplete + var qrAcceptsIncomplete bool + if o.AcceptsIncomplete != nil { + qrAcceptsIncomplete = *o.AcceptsIncomplete + } + qAcceptsIncomplete := swag.FormatBool(qrAcceptsIncomplete) + if qAcceptsIncomplete != "" { + if err := r.SetQueryParam("accepts_incomplete", qAcceptsIncomplete); err != nil { + return err + } + } + + } + + // path param binding_id + if err := r.SetPathParam("binding_id", o.BindingID); err != nil { + return err + } + + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + // path param instance_id + if err := r.SetPathParam("instance_id", o.InstanceID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/service_bindings/service_binding_binding_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/service_bindings/service_binding_binding_responses.go new file mode 100644 index 00000000000..8314fbf8286 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/service_bindings/service_binding_binding_responses.go @@ -0,0 +1,247 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package service_bindings + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// ServiceBindingBindingReader is a Reader for the ServiceBindingBinding structure. +type ServiceBindingBindingReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ServiceBindingBindingReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewServiceBindingBindingOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 201: + result := NewServiceBindingBindingCreated() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 202: + result := NewServiceBindingBindingAccepted() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewServiceBindingBindingBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 409: + result := NewServiceBindingBindingConflict() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 422: + result := NewServiceBindingBindingUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewServiceBindingBindingOK creates a ServiceBindingBindingOK with default headers values +func NewServiceBindingBindingOK() *ServiceBindingBindingOK { + return &ServiceBindingBindingOK{} +} + +/*ServiceBindingBindingOK handles this case with default header values. + +OK +*/ +type ServiceBindingBindingOK struct { + Payload *models.ServiceBinding +} + +func (o *ServiceBindingBindingOK) Error() string { + return fmt.Sprintf("[PUT /v2/service_instances/{instance_id}/service_bindings/{binding_id}][%d] serviceBindingBindingOK %+v", 200, o.Payload) +} + +func (o *ServiceBindingBindingOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ServiceBinding) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceBindingBindingCreated creates a ServiceBindingBindingCreated with default headers values +func NewServiceBindingBindingCreated() *ServiceBindingBindingCreated { + return &ServiceBindingBindingCreated{} +} + +/*ServiceBindingBindingCreated handles this case with default header values. + +Created +*/ +type ServiceBindingBindingCreated struct { + Payload *models.ServiceBinding +} + +func (o *ServiceBindingBindingCreated) Error() string { + return fmt.Sprintf("[PUT /v2/service_instances/{instance_id}/service_bindings/{binding_id}][%d] serviceBindingBindingCreated %+v", 201, o.Payload) +} + +func (o *ServiceBindingBindingCreated) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ServiceBinding) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceBindingBindingAccepted creates a ServiceBindingBindingAccepted with default headers values +func NewServiceBindingBindingAccepted() *ServiceBindingBindingAccepted { + return &ServiceBindingBindingAccepted{} +} + +/*ServiceBindingBindingAccepted handles this case with default header values. + +Accepted +*/ +type ServiceBindingBindingAccepted struct { + Payload *models.AsyncOperation +} + +func (o *ServiceBindingBindingAccepted) Error() string { + return fmt.Sprintf("[PUT /v2/service_instances/{instance_id}/service_bindings/{binding_id}][%d] serviceBindingBindingAccepted %+v", 202, o.Payload) +} + +func (o *ServiceBindingBindingAccepted) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.AsyncOperation) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceBindingBindingBadRequest creates a ServiceBindingBindingBadRequest with default headers values +func NewServiceBindingBindingBadRequest() *ServiceBindingBindingBadRequest { + return &ServiceBindingBindingBadRequest{} +} + +/*ServiceBindingBindingBadRequest handles this case with default header values. + +Bad Request +*/ +type ServiceBindingBindingBadRequest struct { + Payload *models.Error +} + +func (o *ServiceBindingBindingBadRequest) Error() string { + return fmt.Sprintf("[PUT /v2/service_instances/{instance_id}/service_bindings/{binding_id}][%d] serviceBindingBindingBadRequest %+v", 400, o.Payload) +} + +func (o *ServiceBindingBindingBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceBindingBindingConflict creates a ServiceBindingBindingConflict with default headers values +func NewServiceBindingBindingConflict() *ServiceBindingBindingConflict { + return &ServiceBindingBindingConflict{} +} + +/*ServiceBindingBindingConflict handles this case with default header values. + +Conflict +*/ +type ServiceBindingBindingConflict struct { + Payload *models.Error +} + +func (o *ServiceBindingBindingConflict) Error() string { + return fmt.Sprintf("[PUT /v2/service_instances/{instance_id}/service_bindings/{binding_id}][%d] serviceBindingBindingConflict %+v", 409, o.Payload) +} + +func (o *ServiceBindingBindingConflict) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceBindingBindingUnprocessableEntity creates a ServiceBindingBindingUnprocessableEntity with default headers values +func NewServiceBindingBindingUnprocessableEntity() *ServiceBindingBindingUnprocessableEntity { + return &ServiceBindingBindingUnprocessableEntity{} +} + +/*ServiceBindingBindingUnprocessableEntity handles this case with default header values. + +Unprocessable Entity +*/ +type ServiceBindingBindingUnprocessableEntity struct { + Payload *models.Error +} + +func (o *ServiceBindingBindingUnprocessableEntity) Error() string { + return fmt.Sprintf("[PUT /v2/service_instances/{instance_id}/service_bindings/{binding_id}][%d] serviceBindingBindingUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *ServiceBindingBindingUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/service_bindings/service_binding_get_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/service_bindings/service_binding_get_parameters.go new file mode 100644 index 00000000000..bc7ef77248b --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/service_bindings/service_binding_get_parameters.go @@ -0,0 +1,203 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package service_bindings + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewServiceBindingGetParams creates a new ServiceBindingGetParams object +// with the default values initialized. +func NewServiceBindingGetParams() *ServiceBindingGetParams { + var () + return &ServiceBindingGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewServiceBindingGetParamsWithTimeout creates a new ServiceBindingGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewServiceBindingGetParamsWithTimeout(timeout time.Duration) *ServiceBindingGetParams { + var () + return &ServiceBindingGetParams{ + + timeout: timeout, + } +} + +// NewServiceBindingGetParamsWithContext creates a new ServiceBindingGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewServiceBindingGetParamsWithContext(ctx context.Context) *ServiceBindingGetParams { + var () + return &ServiceBindingGetParams{ + + Context: ctx, + } +} + +// NewServiceBindingGetParamsWithHTTPClient creates a new ServiceBindingGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewServiceBindingGetParamsWithHTTPClient(client *http.Client) *ServiceBindingGetParams { + var () + return &ServiceBindingGetParams{ + HTTPClient: client, + } +} + +/*ServiceBindingGetParams contains all the parameters to send to the API endpoint +for the service binding get operation typically these are written to a http.Request +*/ +type ServiceBindingGetParams struct { + + /*XBrokerAPIOriginatingIdentity + identity of the user that initiated the request from the Platform + + */ + XBrokerAPIOriginatingIdentity *string + /*XBrokerAPIVersion + version number of the Service Broker API that the Platform will use + + */ + XBrokerAPIVersion string + /*BindingID + binding id of binding to create + + */ + BindingID string + /*InstanceID + instance id of instance to provision + + */ + InstanceID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the service binding get params +func (o *ServiceBindingGetParams) WithTimeout(timeout time.Duration) *ServiceBindingGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the service binding get params +func (o *ServiceBindingGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the service binding get params +func (o *ServiceBindingGetParams) WithContext(ctx context.Context) *ServiceBindingGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the service binding get params +func (o *ServiceBindingGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the service binding get params +func (o *ServiceBindingGetParams) WithHTTPClient(client *http.Client) *ServiceBindingGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the service binding get params +func (o *ServiceBindingGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithXBrokerAPIOriginatingIdentity adds the xBrokerAPIOriginatingIdentity to the service binding get params +func (o *ServiceBindingGetParams) WithXBrokerAPIOriginatingIdentity(xBrokerAPIOriginatingIdentity *string) *ServiceBindingGetParams { + o.SetXBrokerAPIOriginatingIdentity(xBrokerAPIOriginatingIdentity) + return o +} + +// SetXBrokerAPIOriginatingIdentity adds the xBrokerApiOriginatingIdentity to the service binding get params +func (o *ServiceBindingGetParams) SetXBrokerAPIOriginatingIdentity(xBrokerAPIOriginatingIdentity *string) { + o.XBrokerAPIOriginatingIdentity = xBrokerAPIOriginatingIdentity +} + +// WithXBrokerAPIVersion adds the xBrokerAPIVersion to the service binding get params +func (o *ServiceBindingGetParams) WithXBrokerAPIVersion(xBrokerAPIVersion string) *ServiceBindingGetParams { + o.SetXBrokerAPIVersion(xBrokerAPIVersion) + return o +} + +// SetXBrokerAPIVersion adds the xBrokerApiVersion to the service binding get params +func (o *ServiceBindingGetParams) SetXBrokerAPIVersion(xBrokerAPIVersion string) { + o.XBrokerAPIVersion = xBrokerAPIVersion +} + +// WithBindingID adds the bindingID to the service binding get params +func (o *ServiceBindingGetParams) WithBindingID(bindingID string) *ServiceBindingGetParams { + o.SetBindingID(bindingID) + return o +} + +// SetBindingID adds the bindingId to the service binding get params +func (o *ServiceBindingGetParams) SetBindingID(bindingID string) { + o.BindingID = bindingID +} + +// WithInstanceID adds the instanceID to the service binding get params +func (o *ServiceBindingGetParams) WithInstanceID(instanceID string) *ServiceBindingGetParams { + o.SetInstanceID(instanceID) + return o +} + +// SetInstanceID adds the instanceId to the service binding get params +func (o *ServiceBindingGetParams) SetInstanceID(instanceID string) { + o.InstanceID = instanceID +} + +// WriteToRequest writes these params to a swagger request +func (o *ServiceBindingGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.XBrokerAPIOriginatingIdentity != nil { + + // header param X-Broker-API-Originating-Identity + if err := r.SetHeaderParam("X-Broker-API-Originating-Identity", *o.XBrokerAPIOriginatingIdentity); err != nil { + return err + } + + } + + // header param X-Broker-API-Version + if err := r.SetHeaderParam("X-Broker-API-Version", o.XBrokerAPIVersion); err != nil { + return err + } + + // path param binding_id + if err := r.SetPathParam("binding_id", o.BindingID); err != nil { + return err + } + + // path param instance_id + if err := r.SetPathParam("instance_id", o.InstanceID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/service_bindings/service_binding_get_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/service_bindings/service_binding_get_responses.go new file mode 100644 index 00000000000..61d138e25ea --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/service_bindings/service_binding_get_responses.go @@ -0,0 +1,103 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package service_bindings + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// ServiceBindingGetReader is a Reader for the ServiceBindingGet structure. +type ServiceBindingGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ServiceBindingGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewServiceBindingGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 404: + result := NewServiceBindingGetNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewServiceBindingGetOK creates a ServiceBindingGetOK with default headers values +func NewServiceBindingGetOK() *ServiceBindingGetOK { + return &ServiceBindingGetOK{} +} + +/*ServiceBindingGetOK handles this case with default header values. + +OK +*/ +type ServiceBindingGetOK struct { + Payload *models.ServiceBindingResource +} + +func (o *ServiceBindingGetOK) Error() string { + return fmt.Sprintf("[GET /v2/service_instances/{instance_id}/service_bindings/{binding_id}][%d] serviceBindingGetOK %+v", 200, o.Payload) +} + +func (o *ServiceBindingGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ServiceBindingResource) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceBindingGetNotFound creates a ServiceBindingGetNotFound with default headers values +func NewServiceBindingGetNotFound() *ServiceBindingGetNotFound { + return &ServiceBindingGetNotFound{} +} + +/*ServiceBindingGetNotFound handles this case with default header values. + +Not Found +*/ +type ServiceBindingGetNotFound struct { + Payload *models.Error +} + +func (o *ServiceBindingGetNotFound) Error() string { + return fmt.Sprintf("[GET /v2/service_instances/{instance_id}/service_bindings/{binding_id}][%d] serviceBindingGetNotFound %+v", 404, o.Payload) +} + +func (o *ServiceBindingGetNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/service_bindings/service_binding_last_operation_get_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/service_bindings/service_binding_last_operation_get_parameters.go new file mode 100644 index 00000000000..25bba75ced3 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/service_bindings/service_binding_last_operation_get_parameters.go @@ -0,0 +1,274 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package service_bindings + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewServiceBindingLastOperationGetParams creates a new ServiceBindingLastOperationGetParams object +// with the default values initialized. +func NewServiceBindingLastOperationGetParams() *ServiceBindingLastOperationGetParams { + var () + return &ServiceBindingLastOperationGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewServiceBindingLastOperationGetParamsWithTimeout creates a new ServiceBindingLastOperationGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewServiceBindingLastOperationGetParamsWithTimeout(timeout time.Duration) *ServiceBindingLastOperationGetParams { + var () + return &ServiceBindingLastOperationGetParams{ + + timeout: timeout, + } +} + +// NewServiceBindingLastOperationGetParamsWithContext creates a new ServiceBindingLastOperationGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewServiceBindingLastOperationGetParamsWithContext(ctx context.Context) *ServiceBindingLastOperationGetParams { + var () + return &ServiceBindingLastOperationGetParams{ + + Context: ctx, + } +} + +// NewServiceBindingLastOperationGetParamsWithHTTPClient creates a new ServiceBindingLastOperationGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewServiceBindingLastOperationGetParamsWithHTTPClient(client *http.Client) *ServiceBindingLastOperationGetParams { + var () + return &ServiceBindingLastOperationGetParams{ + HTTPClient: client, + } +} + +/*ServiceBindingLastOperationGetParams contains all the parameters to send to the API endpoint +for the service binding last operation get operation typically these are written to a http.Request +*/ +type ServiceBindingLastOperationGetParams struct { + + /*XBrokerAPIVersion + version number of the Service Broker API that the Platform will use + + */ + XBrokerAPIVersion string + /*BindingID + binding id of binding to create + + */ + BindingID string + /*InstanceID + instance id of instance to provision + + */ + InstanceID string + /*Operation + a provided identifier for the operation + + */ + Operation *string + /*PlanID + id of the plan associated with the instance + + */ + PlanID *string + /*ServiceID + id of the service associated with the instance + + */ + ServiceID *string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the service binding last operation get params +func (o *ServiceBindingLastOperationGetParams) WithTimeout(timeout time.Duration) *ServiceBindingLastOperationGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the service binding last operation get params +func (o *ServiceBindingLastOperationGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the service binding last operation get params +func (o *ServiceBindingLastOperationGetParams) WithContext(ctx context.Context) *ServiceBindingLastOperationGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the service binding last operation get params +func (o *ServiceBindingLastOperationGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the service binding last operation get params +func (o *ServiceBindingLastOperationGetParams) WithHTTPClient(client *http.Client) *ServiceBindingLastOperationGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the service binding last operation get params +func (o *ServiceBindingLastOperationGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithXBrokerAPIVersion adds the xBrokerAPIVersion to the service binding last operation get params +func (o *ServiceBindingLastOperationGetParams) WithXBrokerAPIVersion(xBrokerAPIVersion string) *ServiceBindingLastOperationGetParams { + o.SetXBrokerAPIVersion(xBrokerAPIVersion) + return o +} + +// SetXBrokerAPIVersion adds the xBrokerApiVersion to the service binding last operation get params +func (o *ServiceBindingLastOperationGetParams) SetXBrokerAPIVersion(xBrokerAPIVersion string) { + o.XBrokerAPIVersion = xBrokerAPIVersion +} + +// WithBindingID adds the bindingID to the service binding last operation get params +func (o *ServiceBindingLastOperationGetParams) WithBindingID(bindingID string) *ServiceBindingLastOperationGetParams { + o.SetBindingID(bindingID) + return o +} + +// SetBindingID adds the bindingId to the service binding last operation get params +func (o *ServiceBindingLastOperationGetParams) SetBindingID(bindingID string) { + o.BindingID = bindingID +} + +// WithInstanceID adds the instanceID to the service binding last operation get params +func (o *ServiceBindingLastOperationGetParams) WithInstanceID(instanceID string) *ServiceBindingLastOperationGetParams { + o.SetInstanceID(instanceID) + return o +} + +// SetInstanceID adds the instanceId to the service binding last operation get params +func (o *ServiceBindingLastOperationGetParams) SetInstanceID(instanceID string) { + o.InstanceID = instanceID +} + +// WithOperation adds the operation to the service binding last operation get params +func (o *ServiceBindingLastOperationGetParams) WithOperation(operation *string) *ServiceBindingLastOperationGetParams { + o.SetOperation(operation) + return o +} + +// SetOperation adds the operation to the service binding last operation get params +func (o *ServiceBindingLastOperationGetParams) SetOperation(operation *string) { + o.Operation = operation +} + +// WithPlanID adds the planID to the service binding last operation get params +func (o *ServiceBindingLastOperationGetParams) WithPlanID(planID *string) *ServiceBindingLastOperationGetParams { + o.SetPlanID(planID) + return o +} + +// SetPlanID adds the planId to the service binding last operation get params +func (o *ServiceBindingLastOperationGetParams) SetPlanID(planID *string) { + o.PlanID = planID +} + +// WithServiceID adds the serviceID to the service binding last operation get params +func (o *ServiceBindingLastOperationGetParams) WithServiceID(serviceID *string) *ServiceBindingLastOperationGetParams { + o.SetServiceID(serviceID) + return o +} + +// SetServiceID adds the serviceId to the service binding last operation get params +func (o *ServiceBindingLastOperationGetParams) SetServiceID(serviceID *string) { + o.ServiceID = serviceID +} + +// WriteToRequest writes these params to a swagger request +func (o *ServiceBindingLastOperationGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // header param X-Broker-API-Version + if err := r.SetHeaderParam("X-Broker-API-Version", o.XBrokerAPIVersion); err != nil { + return err + } + + // path param binding_id + if err := r.SetPathParam("binding_id", o.BindingID); err != nil { + return err + } + + // path param instance_id + if err := r.SetPathParam("instance_id", o.InstanceID); err != nil { + return err + } + + if o.Operation != nil { + + // query param operation + var qrOperation string + if o.Operation != nil { + qrOperation = *o.Operation + } + qOperation := qrOperation + if qOperation != "" { + if err := r.SetQueryParam("operation", qOperation); err != nil { + return err + } + } + + } + + if o.PlanID != nil { + + // query param plan_id + var qrPlanID string + if o.PlanID != nil { + qrPlanID = *o.PlanID + } + qPlanID := qrPlanID + if qPlanID != "" { + if err := r.SetQueryParam("plan_id", qPlanID); err != nil { + return err + } + } + + } + + if o.ServiceID != nil { + + // query param service_id + var qrServiceID string + if o.ServiceID != nil { + qrServiceID = *o.ServiceID + } + qServiceID := qrServiceID + if qServiceID != "" { + if err := r.SetQueryParam("service_id", qServiceID); err != nil { + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/service_bindings/service_binding_last_operation_get_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/service_bindings/service_binding_last_operation_get_responses.go new file mode 100644 index 00000000000..e71e8725dd1 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/service_bindings/service_binding_last_operation_get_responses.go @@ -0,0 +1,139 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package service_bindings + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// ServiceBindingLastOperationGetReader is a Reader for the ServiceBindingLastOperationGet structure. +type ServiceBindingLastOperationGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ServiceBindingLastOperationGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewServiceBindingLastOperationGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewServiceBindingLastOperationGetBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 410: + result := NewServiceBindingLastOperationGetGone() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewServiceBindingLastOperationGetOK creates a ServiceBindingLastOperationGetOK with default headers values +func NewServiceBindingLastOperationGetOK() *ServiceBindingLastOperationGetOK { + return &ServiceBindingLastOperationGetOK{} +} + +/*ServiceBindingLastOperationGetOK handles this case with default header values. + +OK +*/ +type ServiceBindingLastOperationGetOK struct { + Payload *models.LastOperationResource +} + +func (o *ServiceBindingLastOperationGetOK) Error() string { + return fmt.Sprintf("[GET /v2/service_instances/{instance_id}/service_bindings/{binding_id}/last_operation][%d] serviceBindingLastOperationGetOK %+v", 200, o.Payload) +} + +func (o *ServiceBindingLastOperationGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.LastOperationResource) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceBindingLastOperationGetBadRequest creates a ServiceBindingLastOperationGetBadRequest with default headers values +func NewServiceBindingLastOperationGetBadRequest() *ServiceBindingLastOperationGetBadRequest { + return &ServiceBindingLastOperationGetBadRequest{} +} + +/*ServiceBindingLastOperationGetBadRequest handles this case with default header values. + +Bad Request +*/ +type ServiceBindingLastOperationGetBadRequest struct { + Payload *models.Error +} + +func (o *ServiceBindingLastOperationGetBadRequest) Error() string { + return fmt.Sprintf("[GET /v2/service_instances/{instance_id}/service_bindings/{binding_id}/last_operation][%d] serviceBindingLastOperationGetBadRequest %+v", 400, o.Payload) +} + +func (o *ServiceBindingLastOperationGetBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceBindingLastOperationGetGone creates a ServiceBindingLastOperationGetGone with default headers values +func NewServiceBindingLastOperationGetGone() *ServiceBindingLastOperationGetGone { + return &ServiceBindingLastOperationGetGone{} +} + +/*ServiceBindingLastOperationGetGone handles this case with default header values. + +Gone +*/ +type ServiceBindingLastOperationGetGone struct { + Payload *models.Error +} + +func (o *ServiceBindingLastOperationGetGone) Error() string { + return fmt.Sprintf("[GET /v2/service_instances/{instance_id}/service_bindings/{binding_id}/last_operation][%d] serviceBindingLastOperationGetGone %+v", 410, o.Payload) +} + +func (o *ServiceBindingLastOperationGetGone) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/service_bindings/service_binding_unbinding_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/service_bindings/service_binding_unbinding_parameters.go new file mode 100644 index 00000000000..898dfe1b322 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/service_bindings/service_binding_unbinding_parameters.go @@ -0,0 +1,286 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package service_bindings + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/swag" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewServiceBindingUnbindingParams creates a new ServiceBindingUnbindingParams object +// with the default values initialized. +func NewServiceBindingUnbindingParams() *ServiceBindingUnbindingParams { + var () + return &ServiceBindingUnbindingParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewServiceBindingUnbindingParamsWithTimeout creates a new ServiceBindingUnbindingParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewServiceBindingUnbindingParamsWithTimeout(timeout time.Duration) *ServiceBindingUnbindingParams { + var () + return &ServiceBindingUnbindingParams{ + + timeout: timeout, + } +} + +// NewServiceBindingUnbindingParamsWithContext creates a new ServiceBindingUnbindingParams object +// with the default values initialized, and the ability to set a context for a request +func NewServiceBindingUnbindingParamsWithContext(ctx context.Context) *ServiceBindingUnbindingParams { + var () + return &ServiceBindingUnbindingParams{ + + Context: ctx, + } +} + +// NewServiceBindingUnbindingParamsWithHTTPClient creates a new ServiceBindingUnbindingParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewServiceBindingUnbindingParamsWithHTTPClient(client *http.Client) *ServiceBindingUnbindingParams { + var () + return &ServiceBindingUnbindingParams{ + HTTPClient: client, + } +} + +/*ServiceBindingUnbindingParams contains all the parameters to send to the API endpoint +for the service binding unbinding operation typically these are written to a http.Request +*/ +type ServiceBindingUnbindingParams struct { + + /*XBrokerAPIOriginatingIdentity + identity of the user that initiated the request from the Platform + + */ + XBrokerAPIOriginatingIdentity *string + /*XBrokerAPIVersion + version number of the Service Broker API that the Platform will use + + */ + XBrokerAPIVersion string + /*AcceptsIncomplete + asynchronous operations supported + + */ + AcceptsIncomplete *bool + /*BindingID + binding id of binding to create + + */ + BindingID string + /*InstanceID + instance id of instance to provision + + */ + InstanceID string + /*PlanID + id of the plan associated with the instance being deleted + + */ + PlanID string + /*ServiceID + id of the service associated with the instance being deleted + + */ + ServiceID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the service binding unbinding params +func (o *ServiceBindingUnbindingParams) WithTimeout(timeout time.Duration) *ServiceBindingUnbindingParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the service binding unbinding params +func (o *ServiceBindingUnbindingParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the service binding unbinding params +func (o *ServiceBindingUnbindingParams) WithContext(ctx context.Context) *ServiceBindingUnbindingParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the service binding unbinding params +func (o *ServiceBindingUnbindingParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the service binding unbinding params +func (o *ServiceBindingUnbindingParams) WithHTTPClient(client *http.Client) *ServiceBindingUnbindingParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the service binding unbinding params +func (o *ServiceBindingUnbindingParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithXBrokerAPIOriginatingIdentity adds the xBrokerAPIOriginatingIdentity to the service binding unbinding params +func (o *ServiceBindingUnbindingParams) WithXBrokerAPIOriginatingIdentity(xBrokerAPIOriginatingIdentity *string) *ServiceBindingUnbindingParams { + o.SetXBrokerAPIOriginatingIdentity(xBrokerAPIOriginatingIdentity) + return o +} + +// SetXBrokerAPIOriginatingIdentity adds the xBrokerApiOriginatingIdentity to the service binding unbinding params +func (o *ServiceBindingUnbindingParams) SetXBrokerAPIOriginatingIdentity(xBrokerAPIOriginatingIdentity *string) { + o.XBrokerAPIOriginatingIdentity = xBrokerAPIOriginatingIdentity +} + +// WithXBrokerAPIVersion adds the xBrokerAPIVersion to the service binding unbinding params +func (o *ServiceBindingUnbindingParams) WithXBrokerAPIVersion(xBrokerAPIVersion string) *ServiceBindingUnbindingParams { + o.SetXBrokerAPIVersion(xBrokerAPIVersion) + return o +} + +// SetXBrokerAPIVersion adds the xBrokerApiVersion to the service binding unbinding params +func (o *ServiceBindingUnbindingParams) SetXBrokerAPIVersion(xBrokerAPIVersion string) { + o.XBrokerAPIVersion = xBrokerAPIVersion +} + +// WithAcceptsIncomplete adds the acceptsIncomplete to the service binding unbinding params +func (o *ServiceBindingUnbindingParams) WithAcceptsIncomplete(acceptsIncomplete *bool) *ServiceBindingUnbindingParams { + o.SetAcceptsIncomplete(acceptsIncomplete) + return o +} + +// SetAcceptsIncomplete adds the acceptsIncomplete to the service binding unbinding params +func (o *ServiceBindingUnbindingParams) SetAcceptsIncomplete(acceptsIncomplete *bool) { + o.AcceptsIncomplete = acceptsIncomplete +} + +// WithBindingID adds the bindingID to the service binding unbinding params +func (o *ServiceBindingUnbindingParams) WithBindingID(bindingID string) *ServiceBindingUnbindingParams { + o.SetBindingID(bindingID) + return o +} + +// SetBindingID adds the bindingId to the service binding unbinding params +func (o *ServiceBindingUnbindingParams) SetBindingID(bindingID string) { + o.BindingID = bindingID +} + +// WithInstanceID adds the instanceID to the service binding unbinding params +func (o *ServiceBindingUnbindingParams) WithInstanceID(instanceID string) *ServiceBindingUnbindingParams { + o.SetInstanceID(instanceID) + return o +} + +// SetInstanceID adds the instanceId to the service binding unbinding params +func (o *ServiceBindingUnbindingParams) SetInstanceID(instanceID string) { + o.InstanceID = instanceID +} + +// WithPlanID adds the planID to the service binding unbinding params +func (o *ServiceBindingUnbindingParams) WithPlanID(planID string) *ServiceBindingUnbindingParams { + o.SetPlanID(planID) + return o +} + +// SetPlanID adds the planId to the service binding unbinding params +func (o *ServiceBindingUnbindingParams) SetPlanID(planID string) { + o.PlanID = planID +} + +// WithServiceID adds the serviceID to the service binding unbinding params +func (o *ServiceBindingUnbindingParams) WithServiceID(serviceID string) *ServiceBindingUnbindingParams { + o.SetServiceID(serviceID) + return o +} + +// SetServiceID adds the serviceId to the service binding unbinding params +func (o *ServiceBindingUnbindingParams) SetServiceID(serviceID string) { + o.ServiceID = serviceID +} + +// WriteToRequest writes these params to a swagger request +func (o *ServiceBindingUnbindingParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.XBrokerAPIOriginatingIdentity != nil { + + // header param X-Broker-API-Originating-Identity + if err := r.SetHeaderParam("X-Broker-API-Originating-Identity", *o.XBrokerAPIOriginatingIdentity); err != nil { + return err + } + + } + + // header param X-Broker-API-Version + if err := r.SetHeaderParam("X-Broker-API-Version", o.XBrokerAPIVersion); err != nil { + return err + } + + if o.AcceptsIncomplete != nil { + + // query param accepts_incomplete + var qrAcceptsIncomplete bool + if o.AcceptsIncomplete != nil { + qrAcceptsIncomplete = *o.AcceptsIncomplete + } + qAcceptsIncomplete := swag.FormatBool(qrAcceptsIncomplete) + if qAcceptsIncomplete != "" { + if err := r.SetQueryParam("accepts_incomplete", qAcceptsIncomplete); err != nil { + return err + } + } + + } + + // path param binding_id + if err := r.SetPathParam("binding_id", o.BindingID); err != nil { + return err + } + + // path param instance_id + if err := r.SetPathParam("instance_id", o.InstanceID); err != nil { + return err + } + + // query param plan_id + qrPlanID := o.PlanID + qPlanID := qrPlanID + if qPlanID != "" { + if err := r.SetQueryParam("plan_id", qPlanID); err != nil { + return err + } + } + + // query param service_id + qrServiceID := o.ServiceID + qServiceID := qrServiceID + if qServiceID != "" { + if err := r.SetQueryParam("service_id", qServiceID); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/service_bindings/service_binding_unbinding_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/service_bindings/service_binding_unbinding_responses.go new file mode 100644 index 00000000000..f5ee2f943bd --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/service_bindings/service_binding_unbinding_responses.go @@ -0,0 +1,173 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package service_bindings + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// ServiceBindingUnbindingReader is a Reader for the ServiceBindingUnbinding structure. +type ServiceBindingUnbindingReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ServiceBindingUnbindingReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewServiceBindingUnbindingOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 202: + result := NewServiceBindingUnbindingAccepted() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewServiceBindingUnbindingBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 410: + result := NewServiceBindingUnbindingGone() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewServiceBindingUnbindingOK creates a ServiceBindingUnbindingOK with default headers values +func NewServiceBindingUnbindingOK() *ServiceBindingUnbindingOK { + return &ServiceBindingUnbindingOK{} +} + +/*ServiceBindingUnbindingOK handles this case with default header values. + +OK +*/ +type ServiceBindingUnbindingOK struct { + Payload models.Object +} + +func (o *ServiceBindingUnbindingOK) Error() string { + return fmt.Sprintf("[DELETE /v2/service_instances/{instance_id}/service_bindings/{binding_id}][%d] serviceBindingUnbindingOK %+v", 200, o.Payload) +} + +func (o *ServiceBindingUnbindingOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceBindingUnbindingAccepted creates a ServiceBindingUnbindingAccepted with default headers values +func NewServiceBindingUnbindingAccepted() *ServiceBindingUnbindingAccepted { + return &ServiceBindingUnbindingAccepted{} +} + +/*ServiceBindingUnbindingAccepted handles this case with default header values. + +Accepted +*/ +type ServiceBindingUnbindingAccepted struct { + Payload *models.AsyncOperation +} + +func (o *ServiceBindingUnbindingAccepted) Error() string { + return fmt.Sprintf("[DELETE /v2/service_instances/{instance_id}/service_bindings/{binding_id}][%d] serviceBindingUnbindingAccepted %+v", 202, o.Payload) +} + +func (o *ServiceBindingUnbindingAccepted) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.AsyncOperation) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceBindingUnbindingBadRequest creates a ServiceBindingUnbindingBadRequest with default headers values +func NewServiceBindingUnbindingBadRequest() *ServiceBindingUnbindingBadRequest { + return &ServiceBindingUnbindingBadRequest{} +} + +/*ServiceBindingUnbindingBadRequest handles this case with default header values. + +Bad Request +*/ +type ServiceBindingUnbindingBadRequest struct { + Payload *models.Error +} + +func (o *ServiceBindingUnbindingBadRequest) Error() string { + return fmt.Sprintf("[DELETE /v2/service_instances/{instance_id}/service_bindings/{binding_id}][%d] serviceBindingUnbindingBadRequest %+v", 400, o.Payload) +} + +func (o *ServiceBindingUnbindingBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceBindingUnbindingGone creates a ServiceBindingUnbindingGone with default headers values +func NewServiceBindingUnbindingGone() *ServiceBindingUnbindingGone { + return &ServiceBindingUnbindingGone{} +} + +/*ServiceBindingUnbindingGone handles this case with default header values. + +Gone +*/ +type ServiceBindingUnbindingGone struct { + Payload *models.Error +} + +func (o *ServiceBindingUnbindingGone) Error() string { + return fmt.Sprintf("[DELETE /v2/service_instances/{instance_id}/service_bindings/{binding_id}][%d] serviceBindingUnbindingGone %+v", 410, o.Payload) +} + +func (o *ServiceBindingUnbindingGone) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/service_bindings/service_bindings_client.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/service_bindings/service_bindings_client.go new file mode 100644 index 00000000000..f119decec1a --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/service_bindings/service_bindings_client.go @@ -0,0 +1,160 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package service_bindings + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" +) + +// New creates a new service bindings API client. +func New(transport runtime.ClientTransport, formats strfmt.Registry) *Client { + return &Client{transport: transport, formats: formats} +} + +/* +Client for service bindings API +*/ +type Client struct { + transport runtime.ClientTransport + formats strfmt.Registry +} + +/* +ServiceBindingBinding generations of a service binding +*/ +func (a *Client) ServiceBindingBinding(params *ServiceBindingBindingParams, authInfo runtime.ClientAuthInfoWriter) (*ServiceBindingBindingOK, *ServiceBindingBindingCreated, *ServiceBindingBindingAccepted, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewServiceBindingBindingParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "serviceBinding.binding", + Method: "PUT", + PathPattern: "/v2/service_instances/{instance_id}/service_bindings/{binding_id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ServiceBindingBindingReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, nil, nil, err + } + switch value := result.(type) { + case *ServiceBindingBindingOK: + return value, nil, nil, nil + case *ServiceBindingBindingCreated: + return nil, value, nil, nil + case *ServiceBindingBindingAccepted: + return nil, nil, value, nil + } + return nil, nil, nil, nil + +} + +/* +ServiceBindingGet gets a service binding +*/ +func (a *Client) ServiceBindingGet(params *ServiceBindingGetParams, authInfo runtime.ClientAuthInfoWriter) (*ServiceBindingGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewServiceBindingGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "serviceBinding.get", + Method: "GET", + PathPattern: "/v2/service_instances/{instance_id}/service_bindings/{binding_id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ServiceBindingGetReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*ServiceBindingGetOK), nil + +} + +/* +ServiceBindingLastOperationGet lasts requested operation state for service binding +*/ +func (a *Client) ServiceBindingLastOperationGet(params *ServiceBindingLastOperationGetParams, authInfo runtime.ClientAuthInfoWriter) (*ServiceBindingLastOperationGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewServiceBindingLastOperationGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "serviceBinding.lastOperation.get", + Method: "GET", + PathPattern: "/v2/service_instances/{instance_id}/service_bindings/{binding_id}/last_operation", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ServiceBindingLastOperationGetReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*ServiceBindingLastOperationGetOK), nil + +} + +/* +ServiceBindingUnbinding deprovisions of a service binding +*/ +func (a *Client) ServiceBindingUnbinding(params *ServiceBindingUnbindingParams, authInfo runtime.ClientAuthInfoWriter) (*ServiceBindingUnbindingOK, *ServiceBindingUnbindingAccepted, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewServiceBindingUnbindingParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "serviceBinding.unbinding", + Method: "DELETE", + PathPattern: "/v2/service_instances/{instance_id}/service_bindings/{binding_id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ServiceBindingUnbindingReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, nil, err + } + switch value := result.(type) { + case *ServiceBindingUnbindingOK: + return value, nil, nil + case *ServiceBindingUnbindingAccepted: + return nil, value, nil + } + return nil, nil, nil + +} + +// SetTransport changes the transport on the client +func (a *Client) SetTransport(transport runtime.ClientTransport) { + a.transport = transport +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/service_instances/service_instance_deprovision_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/service_instances/service_instance_deprovision_parameters.go new file mode 100644 index 00000000000..f2efcedfc94 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/service_instances/service_instance_deprovision_parameters.go @@ -0,0 +1,265 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package service_instances + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/swag" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewServiceInstanceDeprovisionParams creates a new ServiceInstanceDeprovisionParams object +// with the default values initialized. +func NewServiceInstanceDeprovisionParams() *ServiceInstanceDeprovisionParams { + var () + return &ServiceInstanceDeprovisionParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewServiceInstanceDeprovisionParamsWithTimeout creates a new ServiceInstanceDeprovisionParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewServiceInstanceDeprovisionParamsWithTimeout(timeout time.Duration) *ServiceInstanceDeprovisionParams { + var () + return &ServiceInstanceDeprovisionParams{ + + timeout: timeout, + } +} + +// NewServiceInstanceDeprovisionParamsWithContext creates a new ServiceInstanceDeprovisionParams object +// with the default values initialized, and the ability to set a context for a request +func NewServiceInstanceDeprovisionParamsWithContext(ctx context.Context) *ServiceInstanceDeprovisionParams { + var () + return &ServiceInstanceDeprovisionParams{ + + Context: ctx, + } +} + +// NewServiceInstanceDeprovisionParamsWithHTTPClient creates a new ServiceInstanceDeprovisionParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewServiceInstanceDeprovisionParamsWithHTTPClient(client *http.Client) *ServiceInstanceDeprovisionParams { + var () + return &ServiceInstanceDeprovisionParams{ + HTTPClient: client, + } +} + +/*ServiceInstanceDeprovisionParams contains all the parameters to send to the API endpoint +for the service instance deprovision operation typically these are written to a http.Request +*/ +type ServiceInstanceDeprovisionParams struct { + + /*XBrokerAPIOriginatingIdentity + identity of the user that initiated the request from the Platform + + */ + XBrokerAPIOriginatingIdentity *string + /*XBrokerAPIVersion + version number of the Service Broker API that the Platform will use + + */ + XBrokerAPIVersion string + /*AcceptsIncomplete + asynchronous operations supported + + */ + AcceptsIncomplete *bool + /*InstanceID + instance id of instance to provision + + */ + InstanceID string + /*PlanID + id of the plan associated with the instance being deleted + + */ + PlanID string + /*ServiceID + id of the service associated with the instance being deleted + + */ + ServiceID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the service instance deprovision params +func (o *ServiceInstanceDeprovisionParams) WithTimeout(timeout time.Duration) *ServiceInstanceDeprovisionParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the service instance deprovision params +func (o *ServiceInstanceDeprovisionParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the service instance deprovision params +func (o *ServiceInstanceDeprovisionParams) WithContext(ctx context.Context) *ServiceInstanceDeprovisionParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the service instance deprovision params +func (o *ServiceInstanceDeprovisionParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the service instance deprovision params +func (o *ServiceInstanceDeprovisionParams) WithHTTPClient(client *http.Client) *ServiceInstanceDeprovisionParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the service instance deprovision params +func (o *ServiceInstanceDeprovisionParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithXBrokerAPIOriginatingIdentity adds the xBrokerAPIOriginatingIdentity to the service instance deprovision params +func (o *ServiceInstanceDeprovisionParams) WithXBrokerAPIOriginatingIdentity(xBrokerAPIOriginatingIdentity *string) *ServiceInstanceDeprovisionParams { + o.SetXBrokerAPIOriginatingIdentity(xBrokerAPIOriginatingIdentity) + return o +} + +// SetXBrokerAPIOriginatingIdentity adds the xBrokerApiOriginatingIdentity to the service instance deprovision params +func (o *ServiceInstanceDeprovisionParams) SetXBrokerAPIOriginatingIdentity(xBrokerAPIOriginatingIdentity *string) { + o.XBrokerAPIOriginatingIdentity = xBrokerAPIOriginatingIdentity +} + +// WithXBrokerAPIVersion adds the xBrokerAPIVersion to the service instance deprovision params +func (o *ServiceInstanceDeprovisionParams) WithXBrokerAPIVersion(xBrokerAPIVersion string) *ServiceInstanceDeprovisionParams { + o.SetXBrokerAPIVersion(xBrokerAPIVersion) + return o +} + +// SetXBrokerAPIVersion adds the xBrokerApiVersion to the service instance deprovision params +func (o *ServiceInstanceDeprovisionParams) SetXBrokerAPIVersion(xBrokerAPIVersion string) { + o.XBrokerAPIVersion = xBrokerAPIVersion +} + +// WithAcceptsIncomplete adds the acceptsIncomplete to the service instance deprovision params +func (o *ServiceInstanceDeprovisionParams) WithAcceptsIncomplete(acceptsIncomplete *bool) *ServiceInstanceDeprovisionParams { + o.SetAcceptsIncomplete(acceptsIncomplete) + return o +} + +// SetAcceptsIncomplete adds the acceptsIncomplete to the service instance deprovision params +func (o *ServiceInstanceDeprovisionParams) SetAcceptsIncomplete(acceptsIncomplete *bool) { + o.AcceptsIncomplete = acceptsIncomplete +} + +// WithInstanceID adds the instanceID to the service instance deprovision params +func (o *ServiceInstanceDeprovisionParams) WithInstanceID(instanceID string) *ServiceInstanceDeprovisionParams { + o.SetInstanceID(instanceID) + return o +} + +// SetInstanceID adds the instanceId to the service instance deprovision params +func (o *ServiceInstanceDeprovisionParams) SetInstanceID(instanceID string) { + o.InstanceID = instanceID +} + +// WithPlanID adds the planID to the service instance deprovision params +func (o *ServiceInstanceDeprovisionParams) WithPlanID(planID string) *ServiceInstanceDeprovisionParams { + o.SetPlanID(planID) + return o +} + +// SetPlanID adds the planId to the service instance deprovision params +func (o *ServiceInstanceDeprovisionParams) SetPlanID(planID string) { + o.PlanID = planID +} + +// WithServiceID adds the serviceID to the service instance deprovision params +func (o *ServiceInstanceDeprovisionParams) WithServiceID(serviceID string) *ServiceInstanceDeprovisionParams { + o.SetServiceID(serviceID) + return o +} + +// SetServiceID adds the serviceId to the service instance deprovision params +func (o *ServiceInstanceDeprovisionParams) SetServiceID(serviceID string) { + o.ServiceID = serviceID +} + +// WriteToRequest writes these params to a swagger request +func (o *ServiceInstanceDeprovisionParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.XBrokerAPIOriginatingIdentity != nil { + + // header param X-Broker-API-Originating-Identity + if err := r.SetHeaderParam("X-Broker-API-Originating-Identity", *o.XBrokerAPIOriginatingIdentity); err != nil { + return err + } + + } + + // header param X-Broker-API-Version + if err := r.SetHeaderParam("X-Broker-API-Version", o.XBrokerAPIVersion); err != nil { + return err + } + + if o.AcceptsIncomplete != nil { + + // query param accepts_incomplete + var qrAcceptsIncomplete bool + if o.AcceptsIncomplete != nil { + qrAcceptsIncomplete = *o.AcceptsIncomplete + } + qAcceptsIncomplete := swag.FormatBool(qrAcceptsIncomplete) + if qAcceptsIncomplete != "" { + if err := r.SetQueryParam("accepts_incomplete", qAcceptsIncomplete); err != nil { + return err + } + } + + } + + // path param instance_id + if err := r.SetPathParam("instance_id", o.InstanceID); err != nil { + return err + } + + // query param plan_id + qrPlanID := o.PlanID + qPlanID := qrPlanID + if qPlanID != "" { + if err := r.SetQueryParam("plan_id", qPlanID); err != nil { + return err + } + } + + // query param service_id + qrServiceID := o.ServiceID + qServiceID := qrServiceID + if qServiceID != "" { + if err := r.SetQueryParam("service_id", qServiceID); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/service_instances/service_instance_deprovision_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/service_instances/service_instance_deprovision_responses.go new file mode 100644 index 00000000000..adfc8497af3 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/service_instances/service_instance_deprovision_responses.go @@ -0,0 +1,209 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package service_instances + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// ServiceInstanceDeprovisionReader is a Reader for the ServiceInstanceDeprovision structure. +type ServiceInstanceDeprovisionReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ServiceInstanceDeprovisionReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewServiceInstanceDeprovisionOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 202: + result := NewServiceInstanceDeprovisionAccepted() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewServiceInstanceDeprovisionBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 410: + result := NewServiceInstanceDeprovisionGone() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 422: + result := NewServiceInstanceDeprovisionUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewServiceInstanceDeprovisionOK creates a ServiceInstanceDeprovisionOK with default headers values +func NewServiceInstanceDeprovisionOK() *ServiceInstanceDeprovisionOK { + return &ServiceInstanceDeprovisionOK{} +} + +/*ServiceInstanceDeprovisionOK handles this case with default header values. + +OK +*/ +type ServiceInstanceDeprovisionOK struct { + Payload models.Object +} + +func (o *ServiceInstanceDeprovisionOK) Error() string { + return fmt.Sprintf("[DELETE /v2/service_instances/{instance_id}][%d] serviceInstanceDeprovisionOK %+v", 200, o.Payload) +} + +func (o *ServiceInstanceDeprovisionOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceInstanceDeprovisionAccepted creates a ServiceInstanceDeprovisionAccepted with default headers values +func NewServiceInstanceDeprovisionAccepted() *ServiceInstanceDeprovisionAccepted { + return &ServiceInstanceDeprovisionAccepted{} +} + +/*ServiceInstanceDeprovisionAccepted handles this case with default header values. + +Accepted +*/ +type ServiceInstanceDeprovisionAccepted struct { + Payload *models.AsyncOperation +} + +func (o *ServiceInstanceDeprovisionAccepted) Error() string { + return fmt.Sprintf("[DELETE /v2/service_instances/{instance_id}][%d] serviceInstanceDeprovisionAccepted %+v", 202, o.Payload) +} + +func (o *ServiceInstanceDeprovisionAccepted) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.AsyncOperation) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceInstanceDeprovisionBadRequest creates a ServiceInstanceDeprovisionBadRequest with default headers values +func NewServiceInstanceDeprovisionBadRequest() *ServiceInstanceDeprovisionBadRequest { + return &ServiceInstanceDeprovisionBadRequest{} +} + +/*ServiceInstanceDeprovisionBadRequest handles this case with default header values. + +Bad Request +*/ +type ServiceInstanceDeprovisionBadRequest struct { + Payload *models.Error +} + +func (o *ServiceInstanceDeprovisionBadRequest) Error() string { + return fmt.Sprintf("[DELETE /v2/service_instances/{instance_id}][%d] serviceInstanceDeprovisionBadRequest %+v", 400, o.Payload) +} + +func (o *ServiceInstanceDeprovisionBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceInstanceDeprovisionGone creates a ServiceInstanceDeprovisionGone with default headers values +func NewServiceInstanceDeprovisionGone() *ServiceInstanceDeprovisionGone { + return &ServiceInstanceDeprovisionGone{} +} + +/*ServiceInstanceDeprovisionGone handles this case with default header values. + +Gone +*/ +type ServiceInstanceDeprovisionGone struct { + Payload *models.Error +} + +func (o *ServiceInstanceDeprovisionGone) Error() string { + return fmt.Sprintf("[DELETE /v2/service_instances/{instance_id}][%d] serviceInstanceDeprovisionGone %+v", 410, o.Payload) +} + +func (o *ServiceInstanceDeprovisionGone) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceInstanceDeprovisionUnprocessableEntity creates a ServiceInstanceDeprovisionUnprocessableEntity with default headers values +func NewServiceInstanceDeprovisionUnprocessableEntity() *ServiceInstanceDeprovisionUnprocessableEntity { + return &ServiceInstanceDeprovisionUnprocessableEntity{} +} + +/*ServiceInstanceDeprovisionUnprocessableEntity handles this case with default header values. + +Unprocessable Entity +*/ +type ServiceInstanceDeprovisionUnprocessableEntity struct { + Payload *models.Error +} + +func (o *ServiceInstanceDeprovisionUnprocessableEntity) Error() string { + return fmt.Sprintf("[DELETE /v2/service_instances/{instance_id}][%d] serviceInstanceDeprovisionUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *ServiceInstanceDeprovisionUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/service_instances/service_instance_get_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/service_instances/service_instance_get_parameters.go new file mode 100644 index 00000000000..b9a16cfc9fd --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/service_instances/service_instance_get_parameters.go @@ -0,0 +1,182 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package service_instances + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewServiceInstanceGetParams creates a new ServiceInstanceGetParams object +// with the default values initialized. +func NewServiceInstanceGetParams() *ServiceInstanceGetParams { + var () + return &ServiceInstanceGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewServiceInstanceGetParamsWithTimeout creates a new ServiceInstanceGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewServiceInstanceGetParamsWithTimeout(timeout time.Duration) *ServiceInstanceGetParams { + var () + return &ServiceInstanceGetParams{ + + timeout: timeout, + } +} + +// NewServiceInstanceGetParamsWithContext creates a new ServiceInstanceGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewServiceInstanceGetParamsWithContext(ctx context.Context) *ServiceInstanceGetParams { + var () + return &ServiceInstanceGetParams{ + + Context: ctx, + } +} + +// NewServiceInstanceGetParamsWithHTTPClient creates a new ServiceInstanceGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewServiceInstanceGetParamsWithHTTPClient(client *http.Client) *ServiceInstanceGetParams { + var () + return &ServiceInstanceGetParams{ + HTTPClient: client, + } +} + +/*ServiceInstanceGetParams contains all the parameters to send to the API endpoint +for the service instance get operation typically these are written to a http.Request +*/ +type ServiceInstanceGetParams struct { + + /*XBrokerAPIOriginatingIdentity + identity of the user that initiated the request from the Platform + + */ + XBrokerAPIOriginatingIdentity *string + /*XBrokerAPIVersion + version number of the Service Broker API that the Platform will use + + */ + XBrokerAPIVersion string + /*InstanceID + instance id of instance to provision + + */ + InstanceID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the service instance get params +func (o *ServiceInstanceGetParams) WithTimeout(timeout time.Duration) *ServiceInstanceGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the service instance get params +func (o *ServiceInstanceGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the service instance get params +func (o *ServiceInstanceGetParams) WithContext(ctx context.Context) *ServiceInstanceGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the service instance get params +func (o *ServiceInstanceGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the service instance get params +func (o *ServiceInstanceGetParams) WithHTTPClient(client *http.Client) *ServiceInstanceGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the service instance get params +func (o *ServiceInstanceGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithXBrokerAPIOriginatingIdentity adds the xBrokerAPIOriginatingIdentity to the service instance get params +func (o *ServiceInstanceGetParams) WithXBrokerAPIOriginatingIdentity(xBrokerAPIOriginatingIdentity *string) *ServiceInstanceGetParams { + o.SetXBrokerAPIOriginatingIdentity(xBrokerAPIOriginatingIdentity) + return o +} + +// SetXBrokerAPIOriginatingIdentity adds the xBrokerApiOriginatingIdentity to the service instance get params +func (o *ServiceInstanceGetParams) SetXBrokerAPIOriginatingIdentity(xBrokerAPIOriginatingIdentity *string) { + o.XBrokerAPIOriginatingIdentity = xBrokerAPIOriginatingIdentity +} + +// WithXBrokerAPIVersion adds the xBrokerAPIVersion to the service instance get params +func (o *ServiceInstanceGetParams) WithXBrokerAPIVersion(xBrokerAPIVersion string) *ServiceInstanceGetParams { + o.SetXBrokerAPIVersion(xBrokerAPIVersion) + return o +} + +// SetXBrokerAPIVersion adds the xBrokerApiVersion to the service instance get params +func (o *ServiceInstanceGetParams) SetXBrokerAPIVersion(xBrokerAPIVersion string) { + o.XBrokerAPIVersion = xBrokerAPIVersion +} + +// WithInstanceID adds the instanceID to the service instance get params +func (o *ServiceInstanceGetParams) WithInstanceID(instanceID string) *ServiceInstanceGetParams { + o.SetInstanceID(instanceID) + return o +} + +// SetInstanceID adds the instanceId to the service instance get params +func (o *ServiceInstanceGetParams) SetInstanceID(instanceID string) { + o.InstanceID = instanceID +} + +// WriteToRequest writes these params to a swagger request +func (o *ServiceInstanceGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.XBrokerAPIOriginatingIdentity != nil { + + // header param X-Broker-API-Originating-Identity + if err := r.SetHeaderParam("X-Broker-API-Originating-Identity", *o.XBrokerAPIOriginatingIdentity); err != nil { + return err + } + + } + + // header param X-Broker-API-Version + if err := r.SetHeaderParam("X-Broker-API-Version", o.XBrokerAPIVersion); err != nil { + return err + } + + // path param instance_id + if err := r.SetPathParam("instance_id", o.InstanceID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/service_instances/service_instance_get_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/service_instances/service_instance_get_responses.go new file mode 100644 index 00000000000..1bbb698951e --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/service_instances/service_instance_get_responses.go @@ -0,0 +1,103 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package service_instances + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// ServiceInstanceGetReader is a Reader for the ServiceInstanceGet structure. +type ServiceInstanceGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ServiceInstanceGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewServiceInstanceGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 404: + result := NewServiceInstanceGetNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewServiceInstanceGetOK creates a ServiceInstanceGetOK with default headers values +func NewServiceInstanceGetOK() *ServiceInstanceGetOK { + return &ServiceInstanceGetOK{} +} + +/*ServiceInstanceGetOK handles this case with default header values. + +OK +*/ +type ServiceInstanceGetOK struct { + Payload *models.ServiceInstanceResource +} + +func (o *ServiceInstanceGetOK) Error() string { + return fmt.Sprintf("[GET /v2/service_instances/{instance_id}][%d] serviceInstanceGetOK %+v", 200, o.Payload) +} + +func (o *ServiceInstanceGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ServiceInstanceResource) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceInstanceGetNotFound creates a ServiceInstanceGetNotFound with default headers values +func NewServiceInstanceGetNotFound() *ServiceInstanceGetNotFound { + return &ServiceInstanceGetNotFound{} +} + +/*ServiceInstanceGetNotFound handles this case with default header values. + +Not Found +*/ +type ServiceInstanceGetNotFound struct { + Payload *models.Error +} + +func (o *ServiceInstanceGetNotFound) Error() string { + return fmt.Sprintf("[GET /v2/service_instances/{instance_id}][%d] serviceInstanceGetNotFound %+v", 404, o.Payload) +} + +func (o *ServiceInstanceGetNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/service_instances/service_instance_last_operation_get_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/service_instances/service_instance_last_operation_get_parameters.go new file mode 100644 index 00000000000..c6d90363ce1 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/service_instances/service_instance_last_operation_get_parameters.go @@ -0,0 +1,253 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package service_instances + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewServiceInstanceLastOperationGetParams creates a new ServiceInstanceLastOperationGetParams object +// with the default values initialized. +func NewServiceInstanceLastOperationGetParams() *ServiceInstanceLastOperationGetParams { + var () + return &ServiceInstanceLastOperationGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewServiceInstanceLastOperationGetParamsWithTimeout creates a new ServiceInstanceLastOperationGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewServiceInstanceLastOperationGetParamsWithTimeout(timeout time.Duration) *ServiceInstanceLastOperationGetParams { + var () + return &ServiceInstanceLastOperationGetParams{ + + timeout: timeout, + } +} + +// NewServiceInstanceLastOperationGetParamsWithContext creates a new ServiceInstanceLastOperationGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewServiceInstanceLastOperationGetParamsWithContext(ctx context.Context) *ServiceInstanceLastOperationGetParams { + var () + return &ServiceInstanceLastOperationGetParams{ + + Context: ctx, + } +} + +// NewServiceInstanceLastOperationGetParamsWithHTTPClient creates a new ServiceInstanceLastOperationGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewServiceInstanceLastOperationGetParamsWithHTTPClient(client *http.Client) *ServiceInstanceLastOperationGetParams { + var () + return &ServiceInstanceLastOperationGetParams{ + HTTPClient: client, + } +} + +/*ServiceInstanceLastOperationGetParams contains all the parameters to send to the API endpoint +for the service instance last operation get operation typically these are written to a http.Request +*/ +type ServiceInstanceLastOperationGetParams struct { + + /*XBrokerAPIVersion + version number of the Service Broker API that the Platform will use + + */ + XBrokerAPIVersion string + /*InstanceID + instance id of instance to provision + + */ + InstanceID string + /*Operation + a provided identifier for the operation + + */ + Operation *string + /*PlanID + id of the plan associated with the instance + + */ + PlanID *string + /*ServiceID + id of the service associated with the instance + + */ + ServiceID *string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the service instance last operation get params +func (o *ServiceInstanceLastOperationGetParams) WithTimeout(timeout time.Duration) *ServiceInstanceLastOperationGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the service instance last operation get params +func (o *ServiceInstanceLastOperationGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the service instance last operation get params +func (o *ServiceInstanceLastOperationGetParams) WithContext(ctx context.Context) *ServiceInstanceLastOperationGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the service instance last operation get params +func (o *ServiceInstanceLastOperationGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the service instance last operation get params +func (o *ServiceInstanceLastOperationGetParams) WithHTTPClient(client *http.Client) *ServiceInstanceLastOperationGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the service instance last operation get params +func (o *ServiceInstanceLastOperationGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithXBrokerAPIVersion adds the xBrokerAPIVersion to the service instance last operation get params +func (o *ServiceInstanceLastOperationGetParams) WithXBrokerAPIVersion(xBrokerAPIVersion string) *ServiceInstanceLastOperationGetParams { + o.SetXBrokerAPIVersion(xBrokerAPIVersion) + return o +} + +// SetXBrokerAPIVersion adds the xBrokerApiVersion to the service instance last operation get params +func (o *ServiceInstanceLastOperationGetParams) SetXBrokerAPIVersion(xBrokerAPIVersion string) { + o.XBrokerAPIVersion = xBrokerAPIVersion +} + +// WithInstanceID adds the instanceID to the service instance last operation get params +func (o *ServiceInstanceLastOperationGetParams) WithInstanceID(instanceID string) *ServiceInstanceLastOperationGetParams { + o.SetInstanceID(instanceID) + return o +} + +// SetInstanceID adds the instanceId to the service instance last operation get params +func (o *ServiceInstanceLastOperationGetParams) SetInstanceID(instanceID string) { + o.InstanceID = instanceID +} + +// WithOperation adds the operation to the service instance last operation get params +func (o *ServiceInstanceLastOperationGetParams) WithOperation(operation *string) *ServiceInstanceLastOperationGetParams { + o.SetOperation(operation) + return o +} + +// SetOperation adds the operation to the service instance last operation get params +func (o *ServiceInstanceLastOperationGetParams) SetOperation(operation *string) { + o.Operation = operation +} + +// WithPlanID adds the planID to the service instance last operation get params +func (o *ServiceInstanceLastOperationGetParams) WithPlanID(planID *string) *ServiceInstanceLastOperationGetParams { + o.SetPlanID(planID) + return o +} + +// SetPlanID adds the planId to the service instance last operation get params +func (o *ServiceInstanceLastOperationGetParams) SetPlanID(planID *string) { + o.PlanID = planID +} + +// WithServiceID adds the serviceID to the service instance last operation get params +func (o *ServiceInstanceLastOperationGetParams) WithServiceID(serviceID *string) *ServiceInstanceLastOperationGetParams { + o.SetServiceID(serviceID) + return o +} + +// SetServiceID adds the serviceId to the service instance last operation get params +func (o *ServiceInstanceLastOperationGetParams) SetServiceID(serviceID *string) { + o.ServiceID = serviceID +} + +// WriteToRequest writes these params to a swagger request +func (o *ServiceInstanceLastOperationGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // header param X-Broker-API-Version + if err := r.SetHeaderParam("X-Broker-API-Version", o.XBrokerAPIVersion); err != nil { + return err + } + + // path param instance_id + if err := r.SetPathParam("instance_id", o.InstanceID); err != nil { + return err + } + + if o.Operation != nil { + + // query param operation + var qrOperation string + if o.Operation != nil { + qrOperation = *o.Operation + } + qOperation := qrOperation + if qOperation != "" { + if err := r.SetQueryParam("operation", qOperation); err != nil { + return err + } + } + + } + + if o.PlanID != nil { + + // query param plan_id + var qrPlanID string + if o.PlanID != nil { + qrPlanID = *o.PlanID + } + qPlanID := qrPlanID + if qPlanID != "" { + if err := r.SetQueryParam("plan_id", qPlanID); err != nil { + return err + } + } + + } + + if o.ServiceID != nil { + + // query param service_id + var qrServiceID string + if o.ServiceID != nil { + qrServiceID = *o.ServiceID + } + qServiceID := qrServiceID + if qServiceID != "" { + if err := r.SetQueryParam("service_id", qServiceID); err != nil { + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/service_instances/service_instance_last_operation_get_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/service_instances/service_instance_last_operation_get_responses.go new file mode 100644 index 00000000000..30123e356c6 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/service_instances/service_instance_last_operation_get_responses.go @@ -0,0 +1,139 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package service_instances + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// ServiceInstanceLastOperationGetReader is a Reader for the ServiceInstanceLastOperationGet structure. +type ServiceInstanceLastOperationGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ServiceInstanceLastOperationGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewServiceInstanceLastOperationGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewServiceInstanceLastOperationGetBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 410: + result := NewServiceInstanceLastOperationGetGone() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewServiceInstanceLastOperationGetOK creates a ServiceInstanceLastOperationGetOK with default headers values +func NewServiceInstanceLastOperationGetOK() *ServiceInstanceLastOperationGetOK { + return &ServiceInstanceLastOperationGetOK{} +} + +/*ServiceInstanceLastOperationGetOK handles this case with default header values. + +OK +*/ +type ServiceInstanceLastOperationGetOK struct { + Payload *models.LastOperationResource +} + +func (o *ServiceInstanceLastOperationGetOK) Error() string { + return fmt.Sprintf("[GET /v2/service_instances/{instance_id}/last_operation][%d] serviceInstanceLastOperationGetOK %+v", 200, o.Payload) +} + +func (o *ServiceInstanceLastOperationGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.LastOperationResource) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceInstanceLastOperationGetBadRequest creates a ServiceInstanceLastOperationGetBadRequest with default headers values +func NewServiceInstanceLastOperationGetBadRequest() *ServiceInstanceLastOperationGetBadRequest { + return &ServiceInstanceLastOperationGetBadRequest{} +} + +/*ServiceInstanceLastOperationGetBadRequest handles this case with default header values. + +Bad Request +*/ +type ServiceInstanceLastOperationGetBadRequest struct { + Payload *models.Error +} + +func (o *ServiceInstanceLastOperationGetBadRequest) Error() string { + return fmt.Sprintf("[GET /v2/service_instances/{instance_id}/last_operation][%d] serviceInstanceLastOperationGetBadRequest %+v", 400, o.Payload) +} + +func (o *ServiceInstanceLastOperationGetBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceInstanceLastOperationGetGone creates a ServiceInstanceLastOperationGetGone with default headers values +func NewServiceInstanceLastOperationGetGone() *ServiceInstanceLastOperationGetGone { + return &ServiceInstanceLastOperationGetGone{} +} + +/*ServiceInstanceLastOperationGetGone handles this case with default header values. + +Gone +*/ +type ServiceInstanceLastOperationGetGone struct { + Payload *models.Error +} + +func (o *ServiceInstanceLastOperationGetGone) Error() string { + return fmt.Sprintf("[GET /v2/service_instances/{instance_id}/last_operation][%d] serviceInstanceLastOperationGetGone %+v", 410, o.Payload) +} + +func (o *ServiceInstanceLastOperationGetGone) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/service_instances/service_instance_provision_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/service_instances/service_instance_provision_parameters.go new file mode 100644 index 00000000000..178a80e54c2 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/service_instances/service_instance_provision_parameters.go @@ -0,0 +1,239 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package service_instances + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/swag" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// NewServiceInstanceProvisionParams creates a new ServiceInstanceProvisionParams object +// with the default values initialized. +func NewServiceInstanceProvisionParams() *ServiceInstanceProvisionParams { + var () + return &ServiceInstanceProvisionParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewServiceInstanceProvisionParamsWithTimeout creates a new ServiceInstanceProvisionParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewServiceInstanceProvisionParamsWithTimeout(timeout time.Duration) *ServiceInstanceProvisionParams { + var () + return &ServiceInstanceProvisionParams{ + + timeout: timeout, + } +} + +// NewServiceInstanceProvisionParamsWithContext creates a new ServiceInstanceProvisionParams object +// with the default values initialized, and the ability to set a context for a request +func NewServiceInstanceProvisionParamsWithContext(ctx context.Context) *ServiceInstanceProvisionParams { + var () + return &ServiceInstanceProvisionParams{ + + Context: ctx, + } +} + +// NewServiceInstanceProvisionParamsWithHTTPClient creates a new ServiceInstanceProvisionParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewServiceInstanceProvisionParamsWithHTTPClient(client *http.Client) *ServiceInstanceProvisionParams { + var () + return &ServiceInstanceProvisionParams{ + HTTPClient: client, + } +} + +/*ServiceInstanceProvisionParams contains all the parameters to send to the API endpoint +for the service instance provision operation typically these are written to a http.Request +*/ +type ServiceInstanceProvisionParams struct { + + /*XBrokerAPIOriginatingIdentity + identity of the user that initiated the request from the Platform + + */ + XBrokerAPIOriginatingIdentity *string + /*XBrokerAPIVersion + version number of the Service Broker API that the Platform will use + + */ + XBrokerAPIVersion string + /*AcceptsIncomplete + asynchronous operations supported + + */ + AcceptsIncomplete *bool + /*Body + parameters for the requested service instance provision + + */ + Body *models.ServiceInstanceProvisionRequest + /*InstanceID + instance id of instance to provision + + */ + InstanceID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the service instance provision params +func (o *ServiceInstanceProvisionParams) WithTimeout(timeout time.Duration) *ServiceInstanceProvisionParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the service instance provision params +func (o *ServiceInstanceProvisionParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the service instance provision params +func (o *ServiceInstanceProvisionParams) WithContext(ctx context.Context) *ServiceInstanceProvisionParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the service instance provision params +func (o *ServiceInstanceProvisionParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the service instance provision params +func (o *ServiceInstanceProvisionParams) WithHTTPClient(client *http.Client) *ServiceInstanceProvisionParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the service instance provision params +func (o *ServiceInstanceProvisionParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithXBrokerAPIOriginatingIdentity adds the xBrokerAPIOriginatingIdentity to the service instance provision params +func (o *ServiceInstanceProvisionParams) WithXBrokerAPIOriginatingIdentity(xBrokerAPIOriginatingIdentity *string) *ServiceInstanceProvisionParams { + o.SetXBrokerAPIOriginatingIdentity(xBrokerAPIOriginatingIdentity) + return o +} + +// SetXBrokerAPIOriginatingIdentity adds the xBrokerApiOriginatingIdentity to the service instance provision params +func (o *ServiceInstanceProvisionParams) SetXBrokerAPIOriginatingIdentity(xBrokerAPIOriginatingIdentity *string) { + o.XBrokerAPIOriginatingIdentity = xBrokerAPIOriginatingIdentity +} + +// WithXBrokerAPIVersion adds the xBrokerAPIVersion to the service instance provision params +func (o *ServiceInstanceProvisionParams) WithXBrokerAPIVersion(xBrokerAPIVersion string) *ServiceInstanceProvisionParams { + o.SetXBrokerAPIVersion(xBrokerAPIVersion) + return o +} + +// SetXBrokerAPIVersion adds the xBrokerApiVersion to the service instance provision params +func (o *ServiceInstanceProvisionParams) SetXBrokerAPIVersion(xBrokerAPIVersion string) { + o.XBrokerAPIVersion = xBrokerAPIVersion +} + +// WithAcceptsIncomplete adds the acceptsIncomplete to the service instance provision params +func (o *ServiceInstanceProvisionParams) WithAcceptsIncomplete(acceptsIncomplete *bool) *ServiceInstanceProvisionParams { + o.SetAcceptsIncomplete(acceptsIncomplete) + return o +} + +// SetAcceptsIncomplete adds the acceptsIncomplete to the service instance provision params +func (o *ServiceInstanceProvisionParams) SetAcceptsIncomplete(acceptsIncomplete *bool) { + o.AcceptsIncomplete = acceptsIncomplete +} + +// WithBody adds the body to the service instance provision params +func (o *ServiceInstanceProvisionParams) WithBody(body *models.ServiceInstanceProvisionRequest) *ServiceInstanceProvisionParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the service instance provision params +func (o *ServiceInstanceProvisionParams) SetBody(body *models.ServiceInstanceProvisionRequest) { + o.Body = body +} + +// WithInstanceID adds the instanceID to the service instance provision params +func (o *ServiceInstanceProvisionParams) WithInstanceID(instanceID string) *ServiceInstanceProvisionParams { + o.SetInstanceID(instanceID) + return o +} + +// SetInstanceID adds the instanceId to the service instance provision params +func (o *ServiceInstanceProvisionParams) SetInstanceID(instanceID string) { + o.InstanceID = instanceID +} + +// WriteToRequest writes these params to a swagger request +func (o *ServiceInstanceProvisionParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.XBrokerAPIOriginatingIdentity != nil { + + // header param X-Broker-API-Originating-Identity + if err := r.SetHeaderParam("X-Broker-API-Originating-Identity", *o.XBrokerAPIOriginatingIdentity); err != nil { + return err + } + + } + + // header param X-Broker-API-Version + if err := r.SetHeaderParam("X-Broker-API-Version", o.XBrokerAPIVersion); err != nil { + return err + } + + if o.AcceptsIncomplete != nil { + + // query param accepts_incomplete + var qrAcceptsIncomplete bool + if o.AcceptsIncomplete != nil { + qrAcceptsIncomplete = *o.AcceptsIncomplete + } + qAcceptsIncomplete := swag.FormatBool(qrAcceptsIncomplete) + if qAcceptsIncomplete != "" { + if err := r.SetQueryParam("accepts_incomplete", qAcceptsIncomplete); err != nil { + return err + } + } + + } + + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + // path param instance_id + if err := r.SetPathParam("instance_id", o.InstanceID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/service_instances/service_instance_provision_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/service_instances/service_instance_provision_responses.go new file mode 100644 index 00000000000..4f29ccbc753 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/service_instances/service_instance_provision_responses.go @@ -0,0 +1,247 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package service_instances + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// ServiceInstanceProvisionReader is a Reader for the ServiceInstanceProvision structure. +type ServiceInstanceProvisionReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ServiceInstanceProvisionReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewServiceInstanceProvisionOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 201: + result := NewServiceInstanceProvisionCreated() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 202: + result := NewServiceInstanceProvisionAccepted() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewServiceInstanceProvisionBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 409: + result := NewServiceInstanceProvisionConflict() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 422: + result := NewServiceInstanceProvisionUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewServiceInstanceProvisionOK creates a ServiceInstanceProvisionOK with default headers values +func NewServiceInstanceProvisionOK() *ServiceInstanceProvisionOK { + return &ServiceInstanceProvisionOK{} +} + +/*ServiceInstanceProvisionOK handles this case with default header values. + +OK +*/ +type ServiceInstanceProvisionOK struct { + Payload *models.ServiceInstanceProvision +} + +func (o *ServiceInstanceProvisionOK) Error() string { + return fmt.Sprintf("[PUT /v2/service_instances/{instance_id}][%d] serviceInstanceProvisionOK %+v", 200, o.Payload) +} + +func (o *ServiceInstanceProvisionOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ServiceInstanceProvision) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceInstanceProvisionCreated creates a ServiceInstanceProvisionCreated with default headers values +func NewServiceInstanceProvisionCreated() *ServiceInstanceProvisionCreated { + return &ServiceInstanceProvisionCreated{} +} + +/*ServiceInstanceProvisionCreated handles this case with default header values. + +Created +*/ +type ServiceInstanceProvisionCreated struct { + Payload *models.ServiceInstanceProvision +} + +func (o *ServiceInstanceProvisionCreated) Error() string { + return fmt.Sprintf("[PUT /v2/service_instances/{instance_id}][%d] serviceInstanceProvisionCreated %+v", 201, o.Payload) +} + +func (o *ServiceInstanceProvisionCreated) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ServiceInstanceProvision) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceInstanceProvisionAccepted creates a ServiceInstanceProvisionAccepted with default headers values +func NewServiceInstanceProvisionAccepted() *ServiceInstanceProvisionAccepted { + return &ServiceInstanceProvisionAccepted{} +} + +/*ServiceInstanceProvisionAccepted handles this case with default header values. + +Accepted +*/ +type ServiceInstanceProvisionAccepted struct { + Payload *models.ServiceInstanceAsyncOperation +} + +func (o *ServiceInstanceProvisionAccepted) Error() string { + return fmt.Sprintf("[PUT /v2/service_instances/{instance_id}][%d] serviceInstanceProvisionAccepted %+v", 202, o.Payload) +} + +func (o *ServiceInstanceProvisionAccepted) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ServiceInstanceAsyncOperation) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceInstanceProvisionBadRequest creates a ServiceInstanceProvisionBadRequest with default headers values +func NewServiceInstanceProvisionBadRequest() *ServiceInstanceProvisionBadRequest { + return &ServiceInstanceProvisionBadRequest{} +} + +/*ServiceInstanceProvisionBadRequest handles this case with default header values. + +Bad Request +*/ +type ServiceInstanceProvisionBadRequest struct { + Payload *models.Error +} + +func (o *ServiceInstanceProvisionBadRequest) Error() string { + return fmt.Sprintf("[PUT /v2/service_instances/{instance_id}][%d] serviceInstanceProvisionBadRequest %+v", 400, o.Payload) +} + +func (o *ServiceInstanceProvisionBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceInstanceProvisionConflict creates a ServiceInstanceProvisionConflict with default headers values +func NewServiceInstanceProvisionConflict() *ServiceInstanceProvisionConflict { + return &ServiceInstanceProvisionConflict{} +} + +/*ServiceInstanceProvisionConflict handles this case with default header values. + +Conflict +*/ +type ServiceInstanceProvisionConflict struct { + Payload *models.Error +} + +func (o *ServiceInstanceProvisionConflict) Error() string { + return fmt.Sprintf("[PUT /v2/service_instances/{instance_id}][%d] serviceInstanceProvisionConflict %+v", 409, o.Payload) +} + +func (o *ServiceInstanceProvisionConflict) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceInstanceProvisionUnprocessableEntity creates a ServiceInstanceProvisionUnprocessableEntity with default headers values +func NewServiceInstanceProvisionUnprocessableEntity() *ServiceInstanceProvisionUnprocessableEntity { + return &ServiceInstanceProvisionUnprocessableEntity{} +} + +/*ServiceInstanceProvisionUnprocessableEntity handles this case with default header values. + +Unprocessable Entity +*/ +type ServiceInstanceProvisionUnprocessableEntity struct { + Payload *models.Error +} + +func (o *ServiceInstanceProvisionUnprocessableEntity) Error() string { + return fmt.Sprintf("[PUT /v2/service_instances/{instance_id}][%d] serviceInstanceProvisionUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *ServiceInstanceProvisionUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/service_instances/service_instance_update_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/service_instances/service_instance_update_parameters.go new file mode 100644 index 00000000000..52a6ad67f54 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/service_instances/service_instance_update_parameters.go @@ -0,0 +1,239 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package service_instances + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/swag" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// NewServiceInstanceUpdateParams creates a new ServiceInstanceUpdateParams object +// with the default values initialized. +func NewServiceInstanceUpdateParams() *ServiceInstanceUpdateParams { + var () + return &ServiceInstanceUpdateParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewServiceInstanceUpdateParamsWithTimeout creates a new ServiceInstanceUpdateParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewServiceInstanceUpdateParamsWithTimeout(timeout time.Duration) *ServiceInstanceUpdateParams { + var () + return &ServiceInstanceUpdateParams{ + + timeout: timeout, + } +} + +// NewServiceInstanceUpdateParamsWithContext creates a new ServiceInstanceUpdateParams object +// with the default values initialized, and the ability to set a context for a request +func NewServiceInstanceUpdateParamsWithContext(ctx context.Context) *ServiceInstanceUpdateParams { + var () + return &ServiceInstanceUpdateParams{ + + Context: ctx, + } +} + +// NewServiceInstanceUpdateParamsWithHTTPClient creates a new ServiceInstanceUpdateParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewServiceInstanceUpdateParamsWithHTTPClient(client *http.Client) *ServiceInstanceUpdateParams { + var () + return &ServiceInstanceUpdateParams{ + HTTPClient: client, + } +} + +/*ServiceInstanceUpdateParams contains all the parameters to send to the API endpoint +for the service instance update operation typically these are written to a http.Request +*/ +type ServiceInstanceUpdateParams struct { + + /*XBrokerAPIOriginatingIdentity + identity of the user that initiated the request from the Platform + + */ + XBrokerAPIOriginatingIdentity *string + /*XBrokerAPIVersion + version number of the Service Broker API that the Platform will use + + */ + XBrokerAPIVersion string + /*AcceptsIncomplete + asynchronous operations supported + + */ + AcceptsIncomplete *bool + /*Body + parameters for the requested service instance update + + */ + Body *models.ServiceInstanceUpdateRequest + /*InstanceID + instance id of instance to provision + + */ + InstanceID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the service instance update params +func (o *ServiceInstanceUpdateParams) WithTimeout(timeout time.Duration) *ServiceInstanceUpdateParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the service instance update params +func (o *ServiceInstanceUpdateParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the service instance update params +func (o *ServiceInstanceUpdateParams) WithContext(ctx context.Context) *ServiceInstanceUpdateParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the service instance update params +func (o *ServiceInstanceUpdateParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the service instance update params +func (o *ServiceInstanceUpdateParams) WithHTTPClient(client *http.Client) *ServiceInstanceUpdateParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the service instance update params +func (o *ServiceInstanceUpdateParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithXBrokerAPIOriginatingIdentity adds the xBrokerAPIOriginatingIdentity to the service instance update params +func (o *ServiceInstanceUpdateParams) WithXBrokerAPIOriginatingIdentity(xBrokerAPIOriginatingIdentity *string) *ServiceInstanceUpdateParams { + o.SetXBrokerAPIOriginatingIdentity(xBrokerAPIOriginatingIdentity) + return o +} + +// SetXBrokerAPIOriginatingIdentity adds the xBrokerApiOriginatingIdentity to the service instance update params +func (o *ServiceInstanceUpdateParams) SetXBrokerAPIOriginatingIdentity(xBrokerAPIOriginatingIdentity *string) { + o.XBrokerAPIOriginatingIdentity = xBrokerAPIOriginatingIdentity +} + +// WithXBrokerAPIVersion adds the xBrokerAPIVersion to the service instance update params +func (o *ServiceInstanceUpdateParams) WithXBrokerAPIVersion(xBrokerAPIVersion string) *ServiceInstanceUpdateParams { + o.SetXBrokerAPIVersion(xBrokerAPIVersion) + return o +} + +// SetXBrokerAPIVersion adds the xBrokerApiVersion to the service instance update params +func (o *ServiceInstanceUpdateParams) SetXBrokerAPIVersion(xBrokerAPIVersion string) { + o.XBrokerAPIVersion = xBrokerAPIVersion +} + +// WithAcceptsIncomplete adds the acceptsIncomplete to the service instance update params +func (o *ServiceInstanceUpdateParams) WithAcceptsIncomplete(acceptsIncomplete *bool) *ServiceInstanceUpdateParams { + o.SetAcceptsIncomplete(acceptsIncomplete) + return o +} + +// SetAcceptsIncomplete adds the acceptsIncomplete to the service instance update params +func (o *ServiceInstanceUpdateParams) SetAcceptsIncomplete(acceptsIncomplete *bool) { + o.AcceptsIncomplete = acceptsIncomplete +} + +// WithBody adds the body to the service instance update params +func (o *ServiceInstanceUpdateParams) WithBody(body *models.ServiceInstanceUpdateRequest) *ServiceInstanceUpdateParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the service instance update params +func (o *ServiceInstanceUpdateParams) SetBody(body *models.ServiceInstanceUpdateRequest) { + o.Body = body +} + +// WithInstanceID adds the instanceID to the service instance update params +func (o *ServiceInstanceUpdateParams) WithInstanceID(instanceID string) *ServiceInstanceUpdateParams { + o.SetInstanceID(instanceID) + return o +} + +// SetInstanceID adds the instanceId to the service instance update params +func (o *ServiceInstanceUpdateParams) SetInstanceID(instanceID string) { + o.InstanceID = instanceID +} + +// WriteToRequest writes these params to a swagger request +func (o *ServiceInstanceUpdateParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.XBrokerAPIOriginatingIdentity != nil { + + // header param X-Broker-API-Originating-Identity + if err := r.SetHeaderParam("X-Broker-API-Originating-Identity", *o.XBrokerAPIOriginatingIdentity); err != nil { + return err + } + + } + + // header param X-Broker-API-Version + if err := r.SetHeaderParam("X-Broker-API-Version", o.XBrokerAPIVersion); err != nil { + return err + } + + if o.AcceptsIncomplete != nil { + + // query param accepts_incomplete + var qrAcceptsIncomplete bool + if o.AcceptsIncomplete != nil { + qrAcceptsIncomplete = *o.AcceptsIncomplete + } + qAcceptsIncomplete := swag.FormatBool(qrAcceptsIncomplete) + if qAcceptsIncomplete != "" { + if err := r.SetQueryParam("accepts_incomplete", qAcceptsIncomplete); err != nil { + return err + } + } + + } + + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + // path param instance_id + if err := r.SetPathParam("instance_id", o.InstanceID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/service_instances/service_instance_update_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/service_instances/service_instance_update_responses.go new file mode 100644 index 00000000000..5780f592034 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/service_instances/service_instance_update_responses.go @@ -0,0 +1,173 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package service_instances + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// ServiceInstanceUpdateReader is a Reader for the ServiceInstanceUpdate structure. +type ServiceInstanceUpdateReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ServiceInstanceUpdateReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewServiceInstanceUpdateOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 202: + result := NewServiceInstanceUpdateAccepted() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 400: + result := NewServiceInstanceUpdateBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + case 422: + result := NewServiceInstanceUpdateUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewServiceInstanceUpdateOK creates a ServiceInstanceUpdateOK with default headers values +func NewServiceInstanceUpdateOK() *ServiceInstanceUpdateOK { + return &ServiceInstanceUpdateOK{} +} + +/*ServiceInstanceUpdateOK handles this case with default header values. + +OK +*/ +type ServiceInstanceUpdateOK struct { + Payload models.Object +} + +func (o *ServiceInstanceUpdateOK) Error() string { + return fmt.Sprintf("[PATCH /v2/service_instances/{instance_id}][%d] serviceInstanceUpdateOK %+v", 200, o.Payload) +} + +func (o *ServiceInstanceUpdateOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceInstanceUpdateAccepted creates a ServiceInstanceUpdateAccepted with default headers values +func NewServiceInstanceUpdateAccepted() *ServiceInstanceUpdateAccepted { + return &ServiceInstanceUpdateAccepted{} +} + +/*ServiceInstanceUpdateAccepted handles this case with default header values. + +Accepted +*/ +type ServiceInstanceUpdateAccepted struct { + Payload *models.ServiceInstanceAsyncOperation +} + +func (o *ServiceInstanceUpdateAccepted) Error() string { + return fmt.Sprintf("[PATCH /v2/service_instances/{instance_id}][%d] serviceInstanceUpdateAccepted %+v", 202, o.Payload) +} + +func (o *ServiceInstanceUpdateAccepted) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ServiceInstanceAsyncOperation) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceInstanceUpdateBadRequest creates a ServiceInstanceUpdateBadRequest with default headers values +func NewServiceInstanceUpdateBadRequest() *ServiceInstanceUpdateBadRequest { + return &ServiceInstanceUpdateBadRequest{} +} + +/*ServiceInstanceUpdateBadRequest handles this case with default header values. + +Bad Request +*/ +type ServiceInstanceUpdateBadRequest struct { + Payload *models.Error +} + +func (o *ServiceInstanceUpdateBadRequest) Error() string { + return fmt.Sprintf("[PATCH /v2/service_instances/{instance_id}][%d] serviceInstanceUpdateBadRequest %+v", 400, o.Payload) +} + +func (o *ServiceInstanceUpdateBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceInstanceUpdateUnprocessableEntity creates a ServiceInstanceUpdateUnprocessableEntity with default headers values +func NewServiceInstanceUpdateUnprocessableEntity() *ServiceInstanceUpdateUnprocessableEntity { + return &ServiceInstanceUpdateUnprocessableEntity{} +} + +/*ServiceInstanceUpdateUnprocessableEntity handles this case with default header values. + +Unprocessable entity +*/ +type ServiceInstanceUpdateUnprocessableEntity struct { + Payload *models.Error +} + +func (o *ServiceInstanceUpdateUnprocessableEntity) Error() string { + return fmt.Sprintf("[PATCH /v2/service_instances/{instance_id}][%d] serviceInstanceUpdateUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *ServiceInstanceUpdateUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/service_instances/service_instances_client.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/service_instances/service_instances_client.go new file mode 100644 index 00000000000..27b7469e25e --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/service_instances/service_instances_client.go @@ -0,0 +1,195 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package service_instances + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" +) + +// New creates a new service instances API client. +func New(transport runtime.ClientTransport, formats strfmt.Registry) *Client { + return &Client{transport: transport, formats: formats} +} + +/* +Client for service instances API +*/ +type Client struct { + transport runtime.ClientTransport + formats strfmt.Registry +} + +/* +ServiceInstanceDeprovision deprovisions a service instance +*/ +func (a *Client) ServiceInstanceDeprovision(params *ServiceInstanceDeprovisionParams, authInfo runtime.ClientAuthInfoWriter) (*ServiceInstanceDeprovisionOK, *ServiceInstanceDeprovisionAccepted, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewServiceInstanceDeprovisionParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "serviceInstance.deprovision", + Method: "DELETE", + PathPattern: "/v2/service_instances/{instance_id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ServiceInstanceDeprovisionReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, nil, err + } + switch value := result.(type) { + case *ServiceInstanceDeprovisionOK: + return value, nil, nil + case *ServiceInstanceDeprovisionAccepted: + return nil, value, nil + } + return nil, nil, nil + +} + +/* +ServiceInstanceGet gets a service instance +*/ +func (a *Client) ServiceInstanceGet(params *ServiceInstanceGetParams, authInfo runtime.ClientAuthInfoWriter) (*ServiceInstanceGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewServiceInstanceGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "serviceInstance.get", + Method: "GET", + PathPattern: "/v2/service_instances/{instance_id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ServiceInstanceGetReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*ServiceInstanceGetOK), nil + +} + +/* +ServiceInstanceLastOperationGet lasts requested operation state for service instance +*/ +func (a *Client) ServiceInstanceLastOperationGet(params *ServiceInstanceLastOperationGetParams, authInfo runtime.ClientAuthInfoWriter) (*ServiceInstanceLastOperationGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewServiceInstanceLastOperationGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "serviceInstance.lastOperation.get", + Method: "GET", + PathPattern: "/v2/service_instances/{instance_id}/last_operation", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ServiceInstanceLastOperationGetReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*ServiceInstanceLastOperationGetOK), nil + +} + +/* +ServiceInstanceProvision provisions a service instance +*/ +func (a *Client) ServiceInstanceProvision(params *ServiceInstanceProvisionParams, authInfo runtime.ClientAuthInfoWriter) (*ServiceInstanceProvisionOK, *ServiceInstanceProvisionCreated, *ServiceInstanceProvisionAccepted, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewServiceInstanceProvisionParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "serviceInstance.provision", + Method: "PUT", + PathPattern: "/v2/service_instances/{instance_id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ServiceInstanceProvisionReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, nil, nil, err + } + switch value := result.(type) { + case *ServiceInstanceProvisionOK: + return value, nil, nil, nil + case *ServiceInstanceProvisionCreated: + return nil, value, nil, nil + case *ServiceInstanceProvisionAccepted: + return nil, nil, value, nil + } + return nil, nil, nil, nil + +} + +/* +ServiceInstanceUpdate updates a service instance +*/ +func (a *Client) ServiceInstanceUpdate(params *ServiceInstanceUpdateParams, authInfo runtime.ClientAuthInfoWriter) (*ServiceInstanceUpdateOK, *ServiceInstanceUpdateAccepted, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewServiceInstanceUpdateParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "serviceInstance.update", + Method: "PATCH", + PathPattern: "/v2/service_instances/{instance_id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ServiceInstanceUpdateReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, nil, err + } + switch value := result.(type) { + case *ServiceInstanceUpdateOK: + return value, nil, nil + case *ServiceInstanceUpdateAccepted: + return nil, value, nil + } + return nil, nil, nil + +} + +// SetTransport changes the transport on the client +func (a *Client) SetTransport(transport runtime.ClientTransport) { + a.transport = transport +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/storage_types/service_broker_storagetypes_get_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/storage_types/service_broker_storagetypes_get_parameters.go new file mode 100644 index 00000000000..7567d13564f --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/storage_types/service_broker_storagetypes_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package storage_types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewServiceBrokerStoragetypesGetParams creates a new ServiceBrokerStoragetypesGetParams object +// with the default values initialized. +func NewServiceBrokerStoragetypesGetParams() *ServiceBrokerStoragetypesGetParams { + + return &ServiceBrokerStoragetypesGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewServiceBrokerStoragetypesGetParamsWithTimeout creates a new ServiceBrokerStoragetypesGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewServiceBrokerStoragetypesGetParamsWithTimeout(timeout time.Duration) *ServiceBrokerStoragetypesGetParams { + + return &ServiceBrokerStoragetypesGetParams{ + + timeout: timeout, + } +} + +// NewServiceBrokerStoragetypesGetParamsWithContext creates a new ServiceBrokerStoragetypesGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewServiceBrokerStoragetypesGetParamsWithContext(ctx context.Context) *ServiceBrokerStoragetypesGetParams { + + return &ServiceBrokerStoragetypesGetParams{ + + Context: ctx, + } +} + +// NewServiceBrokerStoragetypesGetParamsWithHTTPClient creates a new ServiceBrokerStoragetypesGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewServiceBrokerStoragetypesGetParamsWithHTTPClient(client *http.Client) *ServiceBrokerStoragetypesGetParams { + + return &ServiceBrokerStoragetypesGetParams{ + HTTPClient: client, + } +} + +/*ServiceBrokerStoragetypesGetParams contains all the parameters to send to the API endpoint +for the service broker storagetypes get operation typically these are written to a http.Request +*/ +type ServiceBrokerStoragetypesGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the service broker storagetypes get params +func (o *ServiceBrokerStoragetypesGetParams) WithTimeout(timeout time.Duration) *ServiceBrokerStoragetypesGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the service broker storagetypes get params +func (o *ServiceBrokerStoragetypesGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the service broker storagetypes get params +func (o *ServiceBrokerStoragetypesGetParams) WithContext(ctx context.Context) *ServiceBrokerStoragetypesGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the service broker storagetypes get params +func (o *ServiceBrokerStoragetypesGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the service broker storagetypes get params +func (o *ServiceBrokerStoragetypesGetParams) WithHTTPClient(client *http.Client) *ServiceBrokerStoragetypesGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the service broker storagetypes get params +func (o *ServiceBrokerStoragetypesGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *ServiceBrokerStoragetypesGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/storage_types/service_broker_storagetypes_get_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/storage_types/service_broker_storagetypes_get_responses.go new file mode 100644 index 00000000000..9ed46be1dec --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/storage_types/service_broker_storagetypes_get_responses.go @@ -0,0 +1,101 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package storage_types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// ServiceBrokerStoragetypesGetReader is a Reader for the ServiceBrokerStoragetypesGet structure. +type ServiceBrokerStoragetypesGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ServiceBrokerStoragetypesGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewServiceBrokerStoragetypesGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 500: + result := NewServiceBrokerStoragetypesGetInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewServiceBrokerStoragetypesGetOK creates a ServiceBrokerStoragetypesGetOK with default headers values +func NewServiceBrokerStoragetypesGetOK() *ServiceBrokerStoragetypesGetOK { + return &ServiceBrokerStoragetypesGetOK{} +} + +/*ServiceBrokerStoragetypesGetOK handles this case with default header values. + +OK +*/ +type ServiceBrokerStoragetypesGetOK struct { + Payload models.StorageTypes +} + +func (o *ServiceBrokerStoragetypesGetOK) Error() string { + return fmt.Sprintf("[GET /broker/v1/storage-types][%d] serviceBrokerStoragetypesGetOK %+v", 200, o.Payload) +} + +func (o *ServiceBrokerStoragetypesGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewServiceBrokerStoragetypesGetInternalServerError creates a ServiceBrokerStoragetypesGetInternalServerError with default headers values +func NewServiceBrokerStoragetypesGetInternalServerError() *ServiceBrokerStoragetypesGetInternalServerError { + return &ServiceBrokerStoragetypesGetInternalServerError{} +} + +/*ServiceBrokerStoragetypesGetInternalServerError handles this case with default header values. + +Internal Server Error +*/ +type ServiceBrokerStoragetypesGetInternalServerError struct { + Payload *models.Error +} + +func (o *ServiceBrokerStoragetypesGetInternalServerError) Error() string { + return fmt.Sprintf("[GET /broker/v1/storage-types][%d] serviceBrokerStoragetypesGetInternalServerError %+v", 500, o.Payload) +} + +func (o *ServiceBrokerStoragetypesGetInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/storage_types/storage_types_client.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/storage_types/storage_types_client.go new file mode 100644 index 00000000000..5359c424c95 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/storage_types/storage_types_client.go @@ -0,0 +1,58 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package storage_types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" +) + +// New creates a new storage types API client. +func New(transport runtime.ClientTransport, formats strfmt.Registry) *Client { + return &Client{transport: transport, formats: formats} +} + +/* +Client for storage types API +*/ +type Client struct { + transport runtime.ClientTransport + formats strfmt.Registry +} + +/* +ServiceBrokerStoragetypesGet availables storage types in a region +*/ +func (a *Client) ServiceBrokerStoragetypesGet(params *ServiceBrokerStoragetypesGetParams) (*ServiceBrokerStoragetypesGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewServiceBrokerStoragetypesGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "serviceBroker.storagetypes.get", + Method: "GET", + PathPattern: "/broker/v1/storage-types", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ServiceBrokerStoragetypesGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*ServiceBrokerStoragetypesGetOK), nil + +} + +// SetTransport changes the transport on the client +func (a *Client) SetTransport(transport runtime.ClientTransport) { + a.transport = transport +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/swagger_spec/service_broker_swaggerspec_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/swagger_spec/service_broker_swaggerspec_parameters.go new file mode 100644 index 00000000000..e57a82db7dd --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/swagger_spec/service_broker_swaggerspec_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package swagger_spec + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewServiceBrokerSwaggerspecParams creates a new ServiceBrokerSwaggerspecParams object +// with the default values initialized. +func NewServiceBrokerSwaggerspecParams() *ServiceBrokerSwaggerspecParams { + + return &ServiceBrokerSwaggerspecParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewServiceBrokerSwaggerspecParamsWithTimeout creates a new ServiceBrokerSwaggerspecParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewServiceBrokerSwaggerspecParamsWithTimeout(timeout time.Duration) *ServiceBrokerSwaggerspecParams { + + return &ServiceBrokerSwaggerspecParams{ + + timeout: timeout, + } +} + +// NewServiceBrokerSwaggerspecParamsWithContext creates a new ServiceBrokerSwaggerspecParams object +// with the default values initialized, and the ability to set a context for a request +func NewServiceBrokerSwaggerspecParamsWithContext(ctx context.Context) *ServiceBrokerSwaggerspecParams { + + return &ServiceBrokerSwaggerspecParams{ + + Context: ctx, + } +} + +// NewServiceBrokerSwaggerspecParamsWithHTTPClient creates a new ServiceBrokerSwaggerspecParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewServiceBrokerSwaggerspecParamsWithHTTPClient(client *http.Client) *ServiceBrokerSwaggerspecParams { + + return &ServiceBrokerSwaggerspecParams{ + HTTPClient: client, + } +} + +/*ServiceBrokerSwaggerspecParams contains all the parameters to send to the API endpoint +for the service broker swaggerspec operation typically these are written to a http.Request +*/ +type ServiceBrokerSwaggerspecParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the service broker swaggerspec params +func (o *ServiceBrokerSwaggerspecParams) WithTimeout(timeout time.Duration) *ServiceBrokerSwaggerspecParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the service broker swaggerspec params +func (o *ServiceBrokerSwaggerspecParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the service broker swaggerspec params +func (o *ServiceBrokerSwaggerspecParams) WithContext(ctx context.Context) *ServiceBrokerSwaggerspecParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the service broker swaggerspec params +func (o *ServiceBrokerSwaggerspecParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the service broker swaggerspec params +func (o *ServiceBrokerSwaggerspecParams) WithHTTPClient(client *http.Client) *ServiceBrokerSwaggerspecParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the service broker swaggerspec params +func (o *ServiceBrokerSwaggerspecParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *ServiceBrokerSwaggerspecParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/swagger_spec/service_broker_swaggerspec_responses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/swagger_spec/service_broker_swaggerspec_responses.go new file mode 100644 index 00000000000..a2942f901e2 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/swagger_spec/service_broker_swaggerspec_responses.go @@ -0,0 +1,65 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package swagger_spec + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + models "github.com/IBM-Cloud/power-go-client/power/models" +) + +// ServiceBrokerSwaggerspecReader is a Reader for the ServiceBrokerSwaggerspec structure. +type ServiceBrokerSwaggerspecReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ServiceBrokerSwaggerspecReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewServiceBrokerSwaggerspecOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewServiceBrokerSwaggerspecOK creates a ServiceBrokerSwaggerspecOK with default headers values +func NewServiceBrokerSwaggerspecOK() *ServiceBrokerSwaggerspecOK { + return &ServiceBrokerSwaggerspecOK{} +} + +/*ServiceBrokerSwaggerspecOK handles this case with default header values. + +OK +*/ +type ServiceBrokerSwaggerspecOK struct { + Payload models.Object +} + +func (o *ServiceBrokerSwaggerspecOK) Error() string { + return fmt.Sprintf("[GET /v1/swagger.json][%d] serviceBrokerSwaggerspecOK %+v", 200, o.Payload) +} + +func (o *ServiceBrokerSwaggerspecOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/client/swagger_spec/swagger_spec_client.go b/vendor/github.com/IBM-Cloud/power-go-client/power/client/swagger_spec/swagger_spec_client.go new file mode 100644 index 00000000000..8f9e1a05c74 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/client/swagger_spec/swagger_spec_client.go @@ -0,0 +1,58 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package swagger_spec + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" +) + +// New creates a new swagger spec API client. +func New(transport runtime.ClientTransport, formats strfmt.Registry) *Client { + return &Client{transport: transport, formats: formats} +} + +/* +Client for swagger spec API +*/ +type Client struct { + transport runtime.ClientTransport + formats strfmt.Registry +} + +/* +ServiceBrokerSwaggerspec gets swagger json spec +*/ +func (a *Client) ServiceBrokerSwaggerspec(params *ServiceBrokerSwaggerspecParams) (*ServiceBrokerSwaggerspecOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewServiceBrokerSwaggerspecParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "serviceBroker.swaggerspec", + Method: "GET", + PathPattern: "/v1/swagger.json", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ServiceBrokerSwaggerspecReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*ServiceBrokerSwaggerspecOK), nil + +} + +// SetTransport changes the transport on the client +func (a *Client) SetTransport(transport runtime.ClientTransport) { + a.transport = transport +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/access_token.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/access_token.go new file mode 100644 index 00000000000..6efa42aa291 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/access_token.go @@ -0,0 +1,64 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// AccessToken access token +// swagger:model AccessToken +type AccessToken struct { + + // Access Token + // Required: true + AccessToken *string `json:"accessToken"` +} + +// Validate validates this access token +func (m *AccessToken) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAccessToken(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *AccessToken) validateAccessToken(formats strfmt.Registry) error { + + if err := validate.Required("accessToken", "body", m.AccessToken); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *AccessToken) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *AccessToken) UnmarshalBinary(b []byte) error { + var res AccessToken + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/async_operation.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/async_operation.go new file mode 100644 index 00000000000..33b1dc40d40 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/async_operation.go @@ -0,0 +1,43 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/swag" +) + +// AsyncOperation async operation +// swagger:model AsyncOperation +type AsyncOperation struct { + + // operation + Operation string `json:"operation,omitempty"` +} + +// Validate validates this async operation +func (m *AsyncOperation) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *AsyncOperation) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *AsyncOperation) UnmarshalBinary(b []byte) error { + var res AsyncOperation + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/available_stock_images.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/available_stock_images.go new file mode 100644 index 00000000000..711845a3b15 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/available_stock_images.go @@ -0,0 +1,42 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/validate" +) + +// AvailableStockImages A map of an array of stock images for each available storage type +// swagger:model AvailableStockImages +type AvailableStockImages map[string]StockImages + +// Validate validates this available stock images +func (m AvailableStockImages) Validate(formats strfmt.Registry) error { + var res []error + + for k := range m { + + if err := validate.Required(k, "body", m[k]); err != nil { + return err + } + + if err := m[k].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(k) + } + return err + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/catalog.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/catalog.go new file mode 100644 index 00000000000..e60918ece5c --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/catalog.go @@ -0,0 +1,80 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "strconv" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" +) + +// Catalog catalog +// swagger:model Catalog +type Catalog struct { + + // services + Services []*Service `json:"services"` +} + +// Validate validates this catalog +func (m *Catalog) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateServices(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Catalog) validateServices(formats strfmt.Registry) error { + + if swag.IsZero(m.Services) { // not required + return nil + } + + for i := 0; i < len(m.Services); i++ { + if swag.IsZero(m.Services[i]) { // not required + continue + } + + if m.Services[i] != nil { + if err := m.Services[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("services" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *Catalog) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Catalog) UnmarshalBinary(b []byte) error { + var res Catalog + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/clone_task_reference.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/clone_task_reference.go new file mode 100644 index 00000000000..bd777a2db58 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/clone_task_reference.go @@ -0,0 +1,81 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// CloneTaskReference clone task reference +// swagger:model CloneTaskReference +type CloneTaskReference struct { + + // ID of a long running PowerVC clone task + // Required: true + CloneTaskID *string `json:"cloneTaskID"` + + // Link to PowerVC clone task resource + // Required: true + Href *string `json:"href"` +} + +// Validate validates this clone task reference +func (m *CloneTaskReference) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateCloneTaskID(formats); err != nil { + res = append(res, err) + } + + if err := m.validateHref(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *CloneTaskReference) validateCloneTaskID(formats strfmt.Registry) error { + + if err := validate.Required("cloneTaskID", "body", m.CloneTaskID); err != nil { + return err + } + + return nil +} + +func (m *CloneTaskReference) validateHref(formats strfmt.Registry) error { + + if err := validate.Required("href", "body", m.Href); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *CloneTaskReference) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *CloneTaskReference) UnmarshalBinary(b []byte) error { + var res CloneTaskReference + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/clone_task_status.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/clone_task_status.go new file mode 100644 index 00000000000..9788a64614f --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/clone_task_status.go @@ -0,0 +1,160 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + "strconv" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// CloneTaskStatus clone task status +// swagger:model CloneTaskStatus +type CloneTaskStatus struct { + + // List of cloned volumes created from the clone volumes task + ClonedVolumes []*ClonedVolume `json:"clonedVolumes"` + + // The reason the clone volumes task has failed + FailedReason string `json:"failedReason,omitempty"` + + // Snapshot completion percentage + // Required: true + PercentComplete *int64 `json:"percentComplete"` + + // Status of the clone volumes task + // Required: true + // Enum: [running completed failed unknown] + Status *string `json:"status"` +} + +// Validate validates this clone task status +func (m *CloneTaskStatus) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateClonedVolumes(formats); err != nil { + res = append(res, err) + } + + if err := m.validatePercentComplete(formats); err != nil { + res = append(res, err) + } + + if err := m.validateStatus(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *CloneTaskStatus) validateClonedVolumes(formats strfmt.Registry) error { + + if swag.IsZero(m.ClonedVolumes) { // not required + return nil + } + + for i := 0; i < len(m.ClonedVolumes); i++ { + if swag.IsZero(m.ClonedVolumes[i]) { // not required + continue + } + + if m.ClonedVolumes[i] != nil { + if err := m.ClonedVolumes[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("clonedVolumes" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *CloneTaskStatus) validatePercentComplete(formats strfmt.Registry) error { + + if err := validate.Required("percentComplete", "body", m.PercentComplete); err != nil { + return err + } + + return nil +} + +var cloneTaskStatusTypeStatusPropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["running","completed","failed","unknown"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + cloneTaskStatusTypeStatusPropEnum = append(cloneTaskStatusTypeStatusPropEnum, v) + } +} + +const ( + + // CloneTaskStatusStatusRunning captures enum value "running" + CloneTaskStatusStatusRunning string = "running" + + // CloneTaskStatusStatusCompleted captures enum value "completed" + CloneTaskStatusStatusCompleted string = "completed" + + // CloneTaskStatusStatusFailed captures enum value "failed" + CloneTaskStatusStatusFailed string = "failed" + + // CloneTaskStatusStatusUnknown captures enum value "unknown" + CloneTaskStatusStatusUnknown string = "unknown" +) + +// prop value enum +func (m *CloneTaskStatus) validateStatusEnum(path, location string, value string) error { + if err := validate.Enum(path, location, value, cloneTaskStatusTypeStatusPropEnum); err != nil { + return err + } + return nil +} + +func (m *CloneTaskStatus) validateStatus(formats strfmt.Registry) error { + + if err := validate.Required("status", "body", m.Status); err != nil { + return err + } + + // value enum + if err := m.validateStatusEnum("status", "body", *m.Status); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *CloneTaskStatus) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *CloneTaskStatus) UnmarshalBinary(b []byte) error { + var res CloneTaskStatus + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/cloned_volume.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/cloned_volume.go new file mode 100644 index 00000000000..2c525f2eb59 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/cloned_volume.go @@ -0,0 +1,46 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/swag" +) + +// ClonedVolume cloned volume +// swagger:model ClonedVolume +type ClonedVolume struct { + + // ID of the new cloned volume + ClonedVolumeID string `json:"clonedVolumeID,omitempty"` + + // ID of the source volume to be cloned + SourceVolumeID string `json:"sourceVolumeID,omitempty"` +} + +// Validate validates this cloned volume +func (m *ClonedVolume) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *ClonedVolume) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ClonedVolume) UnmarshalBinary(b []byte) error { + var res ClonedVolume + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/cloned_volume_detail.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/cloned_volume_detail.go new file mode 100644 index 00000000000..50910571b68 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/cloned_volume_detail.go @@ -0,0 +1,99 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// ClonedVolumeDetail cloned volume detail +// swagger:model ClonedVolumeDetail +type ClonedVolumeDetail struct { + + // clone + // Required: true + Clone *VolumeInfo `json:"clone"` + + // source + // Required: true + Source *VolumeInfo `json:"source"` +} + +// Validate validates this cloned volume detail +func (m *ClonedVolumeDetail) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateClone(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSource(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *ClonedVolumeDetail) validateClone(formats strfmt.Registry) error { + + if err := validate.Required("clone", "body", m.Clone); err != nil { + return err + } + + if m.Clone != nil { + if err := m.Clone.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("clone") + } + return err + } + } + + return nil +} + +func (m *ClonedVolumeDetail) validateSource(formats strfmt.Registry) error { + + if err := validate.Required("source", "body", m.Source); err != nil { + return err + } + + if m.Source != nil { + if err := m.Source.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("source") + } + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *ClonedVolumeDetail) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ClonedVolumeDetail) UnmarshalBinary(b []byte) error { + var res ClonedVolumeDetail + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/cloud_connection.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/cloud_connection.go new file mode 100644 index 00000000000..c8736628673 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/cloud_connection.go @@ -0,0 +1,306 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "strconv" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// CloudConnection cloud connection +// swagger:model CloudConnection +type CloudConnection struct { + + // classic + Classic *CloudConnectionEndpointClassic `json:"classic,omitempty"` + + // cloud connection ID + // Required: true + CloudConnectionID *string `json:"cloudConnectionID"` + + // creation date + // Required: true + // Format: date-time + CreationDate *strfmt.DateTime `json:"creationDate"` + + // enable global routing for this cloud connection (default=false) + // Required: true + GlobalRouting *bool `json:"globalRouting"` + + // IBM IP address + // Required: true + IbmIPAddress *string `json:"ibmIPAddress"` + + // link status + // Required: true + LinkStatus *string `json:"linkStatus"` + + // metered + // Required: true + Metered *bool `json:"metered"` + + // name of the cloud connection + // Required: true + Name *string `json:"name"` + + // Network References + Networks []*NetworkReference `json:"networks,omitempty"` + + // port + // Required: true + Port *int64 `json:"port"` + + // speed of the cloud connection (speed in megabits per second) + // Required: true + Speed *int64 `json:"speed"` + + // user IP address + // Required: true + UserIPAddress *string `json:"userIPAddress"` + + // vpc + Vpc *CloudConnectionEndpointVPC `json:"vpc,omitempty"` +} + +// Validate validates this cloud connection +func (m *CloudConnection) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateClassic(formats); err != nil { + res = append(res, err) + } + + if err := m.validateCloudConnectionID(formats); err != nil { + res = append(res, err) + } + + if err := m.validateCreationDate(formats); err != nil { + res = append(res, err) + } + + if err := m.validateGlobalRouting(formats); err != nil { + res = append(res, err) + } + + if err := m.validateIbmIPAddress(formats); err != nil { + res = append(res, err) + } + + if err := m.validateLinkStatus(formats); err != nil { + res = append(res, err) + } + + if err := m.validateMetered(formats); err != nil { + res = append(res, err) + } + + if err := m.validateName(formats); err != nil { + res = append(res, err) + } + + if err := m.validateNetworks(formats); err != nil { + res = append(res, err) + } + + if err := m.validatePort(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSpeed(formats); err != nil { + res = append(res, err) + } + + if err := m.validateUserIPAddress(formats); err != nil { + res = append(res, err) + } + + if err := m.validateVpc(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *CloudConnection) validateClassic(formats strfmt.Registry) error { + + if swag.IsZero(m.Classic) { // not required + return nil + } + + if m.Classic != nil { + if err := m.Classic.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("classic") + } + return err + } + } + + return nil +} + +func (m *CloudConnection) validateCloudConnectionID(formats strfmt.Registry) error { + + if err := validate.Required("cloudConnectionID", "body", m.CloudConnectionID); err != nil { + return err + } + + return nil +} + +func (m *CloudConnection) validateCreationDate(formats strfmt.Registry) error { + + if err := validate.Required("creationDate", "body", m.CreationDate); err != nil { + return err + } + + if err := validate.FormatOf("creationDate", "body", "date-time", m.CreationDate.String(), formats); err != nil { + return err + } + + return nil +} + +func (m *CloudConnection) validateGlobalRouting(formats strfmt.Registry) error { + + if err := validate.Required("globalRouting", "body", m.GlobalRouting); err != nil { + return err + } + + return nil +} + +func (m *CloudConnection) validateIbmIPAddress(formats strfmt.Registry) error { + + if err := validate.Required("ibmIPAddress", "body", m.IbmIPAddress); err != nil { + return err + } + + return nil +} + +func (m *CloudConnection) validateLinkStatus(formats strfmt.Registry) error { + + if err := validate.Required("linkStatus", "body", m.LinkStatus); err != nil { + return err + } + + return nil +} + +func (m *CloudConnection) validateMetered(formats strfmt.Registry) error { + + if err := validate.Required("metered", "body", m.Metered); err != nil { + return err + } + + return nil +} + +func (m *CloudConnection) validateName(formats strfmt.Registry) error { + + if err := validate.Required("name", "body", m.Name); err != nil { + return err + } + + return nil +} + +func (m *CloudConnection) validateNetworks(formats strfmt.Registry) error { + + if swag.IsZero(m.Networks) { // not required + return nil + } + + for i := 0; i < len(m.Networks); i++ { + if swag.IsZero(m.Networks[i]) { // not required + continue + } + + if m.Networks[i] != nil { + if err := m.Networks[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("networks" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *CloudConnection) validatePort(formats strfmt.Registry) error { + + if err := validate.Required("port", "body", m.Port); err != nil { + return err + } + + return nil +} + +func (m *CloudConnection) validateSpeed(formats strfmt.Registry) error { + + if err := validate.Required("speed", "body", m.Speed); err != nil { + return err + } + + return nil +} + +func (m *CloudConnection) validateUserIPAddress(formats strfmt.Registry) error { + + if err := validate.Required("userIPAddress", "body", m.UserIPAddress); err != nil { + return err + } + + return nil +} + +func (m *CloudConnection) validateVpc(formats strfmt.Registry) error { + + if swag.IsZero(m.Vpc) { // not required + return nil + } + + if m.Vpc != nil { + if err := m.Vpc.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("vpc") + } + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *CloudConnection) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *CloudConnection) UnmarshalBinary(b []byte) error { + var res CloudConnection + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/cloud_connection_create.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/cloud_connection_create.go new file mode 100644 index 00000000000..25d7a4c44e5 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/cloud_connection_create.go @@ -0,0 +1,137 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// CloudConnectionCreate cloud connection create +// swagger:model CloudConnectionCreate +type CloudConnectionCreate struct { + + // classic + Classic *CloudConnectionEndpointClassic `json:"classic,omitempty"` + + // enable global routing for this cloud connection (default=false) + GlobalRouting bool `json:"globalRouting,omitempty"` + + // enable metered for this cloud connection (default=false) + Metered bool `json:"metered,omitempty"` + + // name of the cloud connection + // Required: true + Name *string `json:"name"` + + // speed of the cloud connection (speed in megabits per second) + // Required: true + Speed *int64 `json:"speed"` + + // vpc + Vpc *CloudConnectionEndpointVPC `json:"vpc,omitempty"` +} + +// Validate validates this cloud connection create +func (m *CloudConnectionCreate) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateClassic(formats); err != nil { + res = append(res, err) + } + + if err := m.validateName(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSpeed(formats); err != nil { + res = append(res, err) + } + + if err := m.validateVpc(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *CloudConnectionCreate) validateClassic(formats strfmt.Registry) error { + + if swag.IsZero(m.Classic) { // not required + return nil + } + + if m.Classic != nil { + if err := m.Classic.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("classic") + } + return err + } + } + + return nil +} + +func (m *CloudConnectionCreate) validateName(formats strfmt.Registry) error { + + if err := validate.Required("name", "body", m.Name); err != nil { + return err + } + + return nil +} + +func (m *CloudConnectionCreate) validateSpeed(formats strfmt.Registry) error { + + if err := validate.Required("speed", "body", m.Speed); err != nil { + return err + } + + return nil +} + +func (m *CloudConnectionCreate) validateVpc(formats strfmt.Registry) error { + + if swag.IsZero(m.Vpc) { // not required + return nil + } + + if m.Vpc != nil { + if err := m.Vpc.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("vpc") + } + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *CloudConnectionCreate) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *CloudConnectionCreate) UnmarshalBinary(b []byte) error { + var res CloudConnectionCreate + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/cloud_connection_endpoint_classic.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/cloud_connection_endpoint_classic.go new file mode 100644 index 00000000000..74b1c934f48 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/cloud_connection_endpoint_classic.go @@ -0,0 +1,74 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" +) + +// CloudConnectionEndpointClassic cloud connection endpoint classic +// swagger:model CloudConnectionEndpointClassic +type CloudConnectionEndpointClassic struct { + + // enable classic endpoint destination (default=false) + Enabled bool `json:"enabled"` + + // gre + Gre *CloudConnectionEndpointGRE `json:"gre,omitempty"` +} + +// Validate validates this cloud connection endpoint classic +func (m *CloudConnectionEndpointClassic) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateGre(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *CloudConnectionEndpointClassic) validateGre(formats strfmt.Registry) error { + + if swag.IsZero(m.Gre) { // not required + return nil + } + + if m.Gre != nil { + if err := m.Gre.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("gre") + } + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *CloudConnectionEndpointClassic) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *CloudConnectionEndpointClassic) UnmarshalBinary(b []byte) error { + var res CloudConnectionEndpointClassic + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/cloud_connection_endpoint_g_r_e.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/cloud_connection_endpoint_g_r_e.go new file mode 100644 index 00000000000..19d235a6d7c --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/cloud_connection_endpoint_g_r_e.go @@ -0,0 +1,83 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "strconv" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" +) + +// CloudConnectionEndpointGRE cloud connection endpoint g r e +// swagger:model CloudConnectionEndpointGRE +type CloudConnectionEndpointGRE struct { + + // enable gre for this cloud connection (default=false) + Enabled bool `json:"enabled,omitempty"` + + // gre tunnels configured + Tunnels []*CloudConnectionGRETunnel `json:"tunnels"` +} + +// Validate validates this cloud connection endpoint g r e +func (m *CloudConnectionEndpointGRE) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateTunnels(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *CloudConnectionEndpointGRE) validateTunnels(formats strfmt.Registry) error { + + if swag.IsZero(m.Tunnels) { // not required + return nil + } + + for i := 0; i < len(m.Tunnels); i++ { + if swag.IsZero(m.Tunnels[i]) { // not required + continue + } + + if m.Tunnels[i] != nil { + if err := m.Tunnels[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("tunnels" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *CloudConnectionEndpointGRE) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *CloudConnectionEndpointGRE) UnmarshalBinary(b []byte) error { + var res CloudConnectionEndpointGRE + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/cloud_connection_endpoint_v_p_c.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/cloud_connection_endpoint_v_p_c.go new file mode 100644 index 00000000000..7e71e2cddf5 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/cloud_connection_endpoint_v_p_c.go @@ -0,0 +1,83 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "strconv" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" +) + +// CloudConnectionEndpointVPC cloud connection endpoint v p c +// swagger:model CloudConnectionEndpointVPC +type CloudConnectionEndpointVPC struct { + + // enable vpc for this cloud connection (default=false) + Enabled bool `json:"enabled"` + + // vpc connections + Vpcs []*CloudConnectionVPC `json:"vpcs,omitempty"` +} + +// Validate validates this cloud connection endpoint v p c +func (m *CloudConnectionEndpointVPC) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateVpcs(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *CloudConnectionEndpointVPC) validateVpcs(formats strfmt.Registry) error { + + if swag.IsZero(m.Vpcs) { // not required + return nil + } + + for i := 0; i < len(m.Vpcs); i++ { + if swag.IsZero(m.Vpcs[i]) { // not required + continue + } + + if m.Vpcs[i] != nil { + if err := m.Vpcs[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("vpcs" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *CloudConnectionEndpointVPC) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *CloudConnectionEndpointVPC) UnmarshalBinary(b []byte) error { + var res CloudConnectionEndpointVPC + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/cloud_connection_g_r_e_tunnel.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/cloud_connection_g_r_e_tunnel.go new file mode 100644 index 00000000000..2775719391e --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/cloud_connection_g_r_e_tunnel.go @@ -0,0 +1,84 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// CloudConnectionGRETunnel cloud connection g r e tunnel +// swagger:model CloudConnectionGRETunnel +type CloudConnectionGRETunnel struct { + + // gre network in CIDR notation (192.168.0.0/24) + // Required: true + Cidr *string `json:"cidr"` + + // gre destination IP address + // Required: true + DestIPAddress *string `json:"destIPAddress"` + + // gre auto-assigned source IP address + SourceIPAddress string `json:"sourceIPAddress,omitempty"` +} + +// Validate validates this cloud connection g r e tunnel +func (m *CloudConnectionGRETunnel) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateCidr(formats); err != nil { + res = append(res, err) + } + + if err := m.validateDestIPAddress(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *CloudConnectionGRETunnel) validateCidr(formats strfmt.Registry) error { + + if err := validate.Required("cidr", "body", m.Cidr); err != nil { + return err + } + + return nil +} + +func (m *CloudConnectionGRETunnel) validateDestIPAddress(formats strfmt.Registry) error { + + if err := validate.Required("destIPAddress", "body", m.DestIPAddress); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *CloudConnectionGRETunnel) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *CloudConnectionGRETunnel) UnmarshalBinary(b []byte) error { + var res CloudConnectionGRETunnel + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/cloud_connection_update.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/cloud_connection_update.go new file mode 100644 index 00000000000..070758a2be9 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/cloud_connection_update.go @@ -0,0 +1,108 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" +) + +// CloudConnectionUpdate cloud connection update +// swagger:model CloudConnectionUpdate +type CloudConnectionUpdate struct { + + // classic + Classic *CloudConnectionEndpointClassic `json:"classic,omitempty"` + + // enable global routing for this cloud connection (default=false) + GlobalRouting *bool `json:"globalRouting,omitempty"` + + // enable metered for this cloud connection (default=false) + Metered *bool `json:"metered,omitempty"` + + // name of the cloud connection + Name *string `json:"name,omitempty"` + + // speed of the cloud connection (speed in megabits per second) + Speed *int64 `json:"speed,omitempty"` + + // vpc + Vpc *CloudConnectionEndpointVPC `json:"vpc,omitempty"` +} + +// Validate validates this cloud connection update +func (m *CloudConnectionUpdate) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateClassic(formats); err != nil { + res = append(res, err) + } + + if err := m.validateVpc(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *CloudConnectionUpdate) validateClassic(formats strfmt.Registry) error { + + if swag.IsZero(m.Classic) { // not required + return nil + } + + if m.Classic != nil { + if err := m.Classic.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("classic") + } + return err + } + } + + return nil +} + +func (m *CloudConnectionUpdate) validateVpc(formats strfmt.Registry) error { + + if swag.IsZero(m.Vpc) { // not required + return nil + } + + if m.Vpc != nil { + if err := m.Vpc.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("vpc") + } + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *CloudConnectionUpdate) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *CloudConnectionUpdate) UnmarshalBinary(b []byte) error { + var res CloudConnectionUpdate + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/cloud_connection_v_p_c.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/cloud_connection_v_p_c.go new file mode 100644 index 00000000000..a59b174057b --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/cloud_connection_v_p_c.go @@ -0,0 +1,67 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// CloudConnectionVPC cloud connection v p c +// swagger:model CloudConnectionVPC +type CloudConnectionVPC struct { + + // vpc name + Name string `json:"name,omitempty"` + + // vpc id + // Required: true + VpcID *string `json:"vpcID"` +} + +// Validate validates this cloud connection v p c +func (m *CloudConnectionVPC) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateVpcID(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *CloudConnectionVPC) validateVpcID(formats strfmt.Registry) error { + + if err := validate.Required("vpcID", "body", m.VpcID); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *CloudConnectionVPC) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *CloudConnectionVPC) UnmarshalBinary(b []byte) error { + var res CloudConnectionVPC + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/cloud_connection_virtual_private_clouds.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/cloud_connection_virtual_private_clouds.go new file mode 100644 index 00000000000..fbda0226f87 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/cloud_connection_virtual_private_clouds.go @@ -0,0 +1,166 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "strconv" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// CloudConnectionVirtualPrivateClouds cloud connection virtual private clouds +// swagger:model CloudConnectionVirtualPrivateClouds +type CloudConnectionVirtualPrivateClouds struct { + + // list of available virtual private clouds + // Required: true + VirtualPrivateClouds []*CloudConnectionVirtualPrivateCloud `json:"virtualPrivateClouds"` +} + +// Validate validates this cloud connection virtual private clouds +func (m *CloudConnectionVirtualPrivateClouds) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateVirtualPrivateClouds(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *CloudConnectionVirtualPrivateClouds) validateVirtualPrivateClouds(formats strfmt.Registry) error { + + if err := validate.Required("virtualPrivateClouds", "body", m.VirtualPrivateClouds); err != nil { + return err + } + + for i := 0; i < len(m.VirtualPrivateClouds); i++ { + if swag.IsZero(m.VirtualPrivateClouds[i]) { // not required + continue + } + + if m.VirtualPrivateClouds[i] != nil { + if err := m.VirtualPrivateClouds[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("virtualPrivateClouds" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *CloudConnectionVirtualPrivateClouds) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *CloudConnectionVirtualPrivateClouds) UnmarshalBinary(b []byte) error { + var res CloudConnectionVirtualPrivateClouds + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// CloudConnectionVirtualPrivateCloud cloud connection virtual private cloud +// swagger:model CloudConnectionVirtualPrivateCloud +type CloudConnectionVirtualPrivateCloud struct { + + // name for the vpc + // Required: true + Name *string `json:"name"` + + // status of this vpc + // Required: true + Status *string `json:"status"` + + // virtual private cloud id + // Required: true + VpcID *string `json:"vpcID"` +} + +// Validate validates this cloud connection virtual private cloud +func (m *CloudConnectionVirtualPrivateCloud) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateName(formats); err != nil { + res = append(res, err) + } + + if err := m.validateStatus(formats); err != nil { + res = append(res, err) + } + + if err := m.validateVpcID(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *CloudConnectionVirtualPrivateCloud) validateName(formats strfmt.Registry) error { + + if err := validate.Required("name", "body", m.Name); err != nil { + return err + } + + return nil +} + +func (m *CloudConnectionVirtualPrivateCloud) validateStatus(formats strfmt.Registry) error { + + if err := validate.Required("status", "body", m.Status); err != nil { + return err + } + + return nil +} + +func (m *CloudConnectionVirtualPrivateCloud) validateVpcID(formats strfmt.Registry) error { + + if err := validate.Required("vpcID", "body", m.VpcID); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *CloudConnectionVirtualPrivateCloud) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *CloudConnectionVirtualPrivateCloud) UnmarshalBinary(b []byte) error { + var res CloudConnectionVirtualPrivateCloud + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/cloud_connections.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/cloud_connections.go new file mode 100644 index 00000000000..a573bc3f6a8 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/cloud_connections.go @@ -0,0 +1,82 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "strconv" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// CloudConnections cloud connections +// swagger:model CloudConnections +type CloudConnections struct { + + // Cloud Connections + // Required: true + CloudConnections []*CloudConnection `json:"cloudConnections"` +} + +// Validate validates this cloud connections +func (m *CloudConnections) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateCloudConnections(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *CloudConnections) validateCloudConnections(formats strfmt.Registry) error { + + if err := validate.Required("cloudConnections", "body", m.CloudConnections); err != nil { + return err + } + + for i := 0; i < len(m.CloudConnections); i++ { + if swag.IsZero(m.CloudConnections[i]) { // not required + continue + } + + if m.CloudConnections[i] != nil { + if err := m.CloudConnections[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("cloudConnections" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *CloudConnections) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *CloudConnections) UnmarshalBinary(b []byte) error { + var res CloudConnections + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/cloud_instance.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/cloud_instance.go new file mode 100644 index 00000000000..fd31ec247e3 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/cloud_instance.go @@ -0,0 +1,256 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "strconv" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// CloudInstance cloud instance +// swagger:model CloudInstance +type CloudInstance struct { + + // Cloud Instance Capabilities + Capabilities []string `json:"capabilities"` + + // Cloud Instance ID + // Required: true + CloudInstanceID *string `json:"cloudInstanceID"` + + // Indicates if the cloud instance is enabled + // Required: true + Enabled *bool `json:"enabled"` + + // Indicates if the cloud instance is initialized and ready for use + // Required: true + Initialized *bool `json:"initialized"` + + // Limits on the cloud instance + // Required: true + Limits *CloudInstanceUsageLimits `json:"limits"` + + // Cloud Instance Name + // Required: true + Name *string `json:"name"` + + // The open stack ID that controls this cloud instance + // Required: true + OpenstackID *string `json:"openstackID"` + + // PVM instances owned by the Cloud Instance + // Required: true + PvmInstances []*PVMInstanceReference `json:"pvmInstances"` + + // The region the cloud instance lives + // Required: true + Region *string `json:"region"` + + // The tenant ID that owns this cloud instance + // Required: true + TenantID *string `json:"tenantID"` + + // Current usage on the cloud instance + // Required: true + Usage *CloudInstanceUsageLimits `json:"usage"` +} + +// Validate validates this cloud instance +func (m *CloudInstance) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateCloudInstanceID(formats); err != nil { + res = append(res, err) + } + + if err := m.validateEnabled(formats); err != nil { + res = append(res, err) + } + + if err := m.validateInitialized(formats); err != nil { + res = append(res, err) + } + + if err := m.validateLimits(formats); err != nil { + res = append(res, err) + } + + if err := m.validateName(formats); err != nil { + res = append(res, err) + } + + if err := m.validateOpenstackID(formats); err != nil { + res = append(res, err) + } + + if err := m.validatePvmInstances(formats); err != nil { + res = append(res, err) + } + + if err := m.validateRegion(formats); err != nil { + res = append(res, err) + } + + if err := m.validateTenantID(formats); err != nil { + res = append(res, err) + } + + if err := m.validateUsage(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *CloudInstance) validateCloudInstanceID(formats strfmt.Registry) error { + + if err := validate.Required("cloudInstanceID", "body", m.CloudInstanceID); err != nil { + return err + } + + return nil +} + +func (m *CloudInstance) validateEnabled(formats strfmt.Registry) error { + + if err := validate.Required("enabled", "body", m.Enabled); err != nil { + return err + } + + return nil +} + +func (m *CloudInstance) validateInitialized(formats strfmt.Registry) error { + + if err := validate.Required("initialized", "body", m.Initialized); err != nil { + return err + } + + return nil +} + +func (m *CloudInstance) validateLimits(formats strfmt.Registry) error { + + if err := validate.Required("limits", "body", m.Limits); err != nil { + return err + } + + if m.Limits != nil { + if err := m.Limits.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("limits") + } + return err + } + } + + return nil +} + +func (m *CloudInstance) validateName(formats strfmt.Registry) error { + + if err := validate.Required("name", "body", m.Name); err != nil { + return err + } + + return nil +} + +func (m *CloudInstance) validateOpenstackID(formats strfmt.Registry) error { + + if err := validate.Required("openstackID", "body", m.OpenstackID); err != nil { + return err + } + + return nil +} + +func (m *CloudInstance) validatePvmInstances(formats strfmt.Registry) error { + + if err := validate.Required("pvmInstances", "body", m.PvmInstances); err != nil { + return err + } + + for i := 0; i < len(m.PvmInstances); i++ { + if swag.IsZero(m.PvmInstances[i]) { // not required + continue + } + + if m.PvmInstances[i] != nil { + if err := m.PvmInstances[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("pvmInstances" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *CloudInstance) validateRegion(formats strfmt.Registry) error { + + if err := validate.Required("region", "body", m.Region); err != nil { + return err + } + + return nil +} + +func (m *CloudInstance) validateTenantID(formats strfmt.Registry) error { + + if err := validate.Required("tenantID", "body", m.TenantID); err != nil { + return err + } + + return nil +} + +func (m *CloudInstance) validateUsage(formats strfmt.Registry) error { + + if err := validate.Required("usage", "body", m.Usage); err != nil { + return err + } + + if m.Usage != nil { + if err := m.Usage.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("usage") + } + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *CloudInstance) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *CloudInstance) UnmarshalBinary(b []byte) error { + var res CloudInstance + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/cloud_instance_create.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/cloud_instance_create.go new file mode 100644 index 00000000000..ac335500eeb --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/cloud_instance_create.go @@ -0,0 +1,138 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// CloudInstanceCreate cloud instance create +// swagger:model CloudInstanceCreate +type CloudInstanceCreate struct { + + // Number of power instances allowed + Instances *float64 `json:"instances,omitempty"` + + // Amount of memory allowed + // Required: true + Memory *float64 `json:"memory"` + + // Number of processor units allowed + // Required: true + ProcUnits *float64 `json:"procUnits"` + + // Number of processors allowed + // Required: true + Processors *float64 `json:"processors"` + + // The region the cloud instance lives + // Required: true + Region *string `json:"region"` + + // Amount of storage allowed (TB) + Storage *float64 `json:"storage,omitempty"` + + // The tenant ID that owns this cloud instance + // Required: true + TenantID *string `json:"tenantID"` +} + +// Validate validates this cloud instance create +func (m *CloudInstanceCreate) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateMemory(formats); err != nil { + res = append(res, err) + } + + if err := m.validateProcUnits(formats); err != nil { + res = append(res, err) + } + + if err := m.validateProcessors(formats); err != nil { + res = append(res, err) + } + + if err := m.validateRegion(formats); err != nil { + res = append(res, err) + } + + if err := m.validateTenantID(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *CloudInstanceCreate) validateMemory(formats strfmt.Registry) error { + + if err := validate.Required("memory", "body", m.Memory); err != nil { + return err + } + + return nil +} + +func (m *CloudInstanceCreate) validateProcUnits(formats strfmt.Registry) error { + + if err := validate.Required("procUnits", "body", m.ProcUnits); err != nil { + return err + } + + return nil +} + +func (m *CloudInstanceCreate) validateProcessors(formats strfmt.Registry) error { + + if err := validate.Required("processors", "body", m.Processors); err != nil { + return err + } + + return nil +} + +func (m *CloudInstanceCreate) validateRegion(formats strfmt.Registry) error { + + if err := validate.Required("region", "body", m.Region); err != nil { + return err + } + + return nil +} + +func (m *CloudInstanceCreate) validateTenantID(formats strfmt.Registry) error { + + if err := validate.Required("tenantID", "body", m.TenantID); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *CloudInstanceCreate) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *CloudInstanceCreate) UnmarshalBinary(b []byte) error { + var res CloudInstanceCreate + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/cloud_instance_reference.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/cloud_instance_reference.go new file mode 100644 index 00000000000..e5ef396ef8f --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/cloud_instance_reference.go @@ -0,0 +1,178 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// CloudInstanceReference cloud instance reference +// swagger:model CloudInstanceReference +type CloudInstanceReference struct { + + // Cloud Instance Capabilities + Capabilities []string `json:"capabilities"` + + // Cloud Instance ID + // Required: true + CloudInstanceID *string `json:"cloudInstanceID"` + + // Indicates if the cloud instance is enabled + // Required: true + Enabled *bool `json:"enabled"` + + // Link to Cloud Instance resource + // Required: true + Href *string `json:"href"` + + // Indicates if the cloud instance is initialized and ready for use + // Required: true + Initialized *bool `json:"initialized"` + + // Limits on the cloud instance + // Required: true + Limits *CloudInstanceUsageLimits `json:"limits"` + + // Cloud Instance Name + // Required: true + Name *string `json:"name"` + + // The region the cloud instance lives + // Required: true + Region *string `json:"region"` +} + +// Validate validates this cloud instance reference +func (m *CloudInstanceReference) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateCloudInstanceID(formats); err != nil { + res = append(res, err) + } + + if err := m.validateEnabled(formats); err != nil { + res = append(res, err) + } + + if err := m.validateHref(formats); err != nil { + res = append(res, err) + } + + if err := m.validateInitialized(formats); err != nil { + res = append(res, err) + } + + if err := m.validateLimits(formats); err != nil { + res = append(res, err) + } + + if err := m.validateName(formats); err != nil { + res = append(res, err) + } + + if err := m.validateRegion(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *CloudInstanceReference) validateCloudInstanceID(formats strfmt.Registry) error { + + if err := validate.Required("cloudInstanceID", "body", m.CloudInstanceID); err != nil { + return err + } + + return nil +} + +func (m *CloudInstanceReference) validateEnabled(formats strfmt.Registry) error { + + if err := validate.Required("enabled", "body", m.Enabled); err != nil { + return err + } + + return nil +} + +func (m *CloudInstanceReference) validateHref(formats strfmt.Registry) error { + + if err := validate.Required("href", "body", m.Href); err != nil { + return err + } + + return nil +} + +func (m *CloudInstanceReference) validateInitialized(formats strfmt.Registry) error { + + if err := validate.Required("initialized", "body", m.Initialized); err != nil { + return err + } + + return nil +} + +func (m *CloudInstanceReference) validateLimits(formats strfmt.Registry) error { + + if err := validate.Required("limits", "body", m.Limits); err != nil { + return err + } + + if m.Limits != nil { + if err := m.Limits.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("limits") + } + return err + } + } + + return nil +} + +func (m *CloudInstanceReference) validateName(formats strfmt.Registry) error { + + if err := validate.Required("name", "body", m.Name); err != nil { + return err + } + + return nil +} + +func (m *CloudInstanceReference) validateRegion(formats strfmt.Registry) error { + + if err := validate.Required("region", "body", m.Region); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *CloudInstanceReference) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *CloudInstanceReference) UnmarshalBinary(b []byte) error { + var res CloudInstanceReference + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/cloud_instance_update.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/cloud_instance_update.go new file mode 100644 index 00000000000..2b244aad6f0 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/cloud_instance_update.go @@ -0,0 +1,55 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/swag" +) + +// CloudInstanceUpdate cloud instance update +// swagger:model CloudInstanceUpdate +type CloudInstanceUpdate struct { + + // Number of power instances allowed + Instances *float64 `json:"instances,omitempty"` + + // Amount of memory allowed + Memory *float64 `json:"memory,omitempty"` + + // Number of processor units allowed + ProcUnits *float64 `json:"procUnits,omitempty"` + + // Number of processors allowed + Processors *float64 `json:"processors,omitempty"` + + // Amount of storage allowed (TB) + Storage *float64 `json:"storage,omitempty"` +} + +// Validate validates this cloud instance update +func (m *CloudInstanceUpdate) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *CloudInstanceUpdate) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *CloudInstanceUpdate) UnmarshalBinary(b []byte) error { + var res CloudInstanceUpdate + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/cloud_instance_usage_limits.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/cloud_instance_usage_limits.go new file mode 100644 index 00000000000..c3a158a8fcf --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/cloud_instance_usage_limits.go @@ -0,0 +1,150 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// CloudInstanceUsageLimits cloud instance usage limits +// swagger:model CloudInstanceUsageLimits +type CloudInstanceUsageLimits struct { + + // Maximum memory (in GB) per PVMInstance + InstanceMemory *float64 `json:"instanceMemory,omitempty"` + + // Maximum proc units per PVMInstance + InstanceProcUnits *float64 `json:"instanceProcUnits,omitempty"` + + // Number of power instances allowed + // Required: true + Instances *float64 `json:"instances"` + + // Amount of memory allowed + // Required: true + Memory *float64 `json:"memory"` + + // Maximum network bandwidth to GCP Mbps + PeeringBandwidth *int64 `json:"peeringBandwidth,omitempty"` + + // Amount of peering networks allowed + PeeringNetworks *int64 `json:"peeringNetworks,omitempty"` + + // Number of processor units allowed + // Required: true + ProcUnits *float64 `json:"procUnits"` + + // Number of processors allowed + // Required: true + Processors *float64 `json:"processors"` + + // Amount of storage allowed (TB) + // Required: true + Storage *float64 `json:"storage"` + + // Amount of SSD storage allowed (TB) + StorageSSD *float64 `json:"storageSSD,omitempty"` + + // Amount of standard (HDD) storage allowed (TB) + StorageStandard *float64 `json:"storageStandard,omitempty"` +} + +// Validate validates this cloud instance usage limits +func (m *CloudInstanceUsageLimits) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateInstances(formats); err != nil { + res = append(res, err) + } + + if err := m.validateMemory(formats); err != nil { + res = append(res, err) + } + + if err := m.validateProcUnits(formats); err != nil { + res = append(res, err) + } + + if err := m.validateProcessors(formats); err != nil { + res = append(res, err) + } + + if err := m.validateStorage(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *CloudInstanceUsageLimits) validateInstances(formats strfmt.Registry) error { + + if err := validate.Required("instances", "body", m.Instances); err != nil { + return err + } + + return nil +} + +func (m *CloudInstanceUsageLimits) validateMemory(formats strfmt.Registry) error { + + if err := validate.Required("memory", "body", m.Memory); err != nil { + return err + } + + return nil +} + +func (m *CloudInstanceUsageLimits) validateProcUnits(formats strfmt.Registry) error { + + if err := validate.Required("procUnits", "body", m.ProcUnits); err != nil { + return err + } + + return nil +} + +func (m *CloudInstanceUsageLimits) validateProcessors(formats strfmt.Registry) error { + + if err := validate.Required("processors", "body", m.Processors); err != nil { + return err + } + + return nil +} + +func (m *CloudInstanceUsageLimits) validateStorage(formats strfmt.Registry) error { + + if err := validate.Required("storage", "body", m.Storage); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *CloudInstanceUsageLimits) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *CloudInstanceUsageLimits) UnmarshalBinary(b []byte) error { + var res CloudInstanceUsageLimits + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/context.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/context.go new file mode 100644 index 00000000000..63103fe1ead --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/context.go @@ -0,0 +1,10 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Context See [Context Conventions](https://github.com/openservicebrokerapi/servicebroker/blob/master/profile.md#context-object) for more details. +// swagger:model Context +type Context interface{} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/create_data_volume.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/create_data_volume.go new file mode 100644 index 00000000000..d5b8545fdb6 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/create_data_volume.go @@ -0,0 +1,149 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// CreateDataVolume create data volume +// swagger:model CreateDataVolume +type CreateDataVolume struct { + + // PVM Instance (ID or Name) to base volume affinity policy against; required if affinityPolicy is provided and affinityVolume is not provided + AffinityPVMInstance *string `json:"affinityPVMInstance,omitempty"` + + // Affinity policy for data volume being created; requires affinityPVMInstance or affinityVolume to be specified; ignored if volumePool provided + // Enum: [affinity anti-affinity] + AffinityPolicy *string `json:"affinityPolicy,omitempty"` + + // Volume (ID or Name) to base volume affinity policy against; required if affinityPolicy is provided and affinityPVMInstance is not provided + AffinityVolume *string `json:"affinityVolume,omitempty"` + + // Type of Disk, required if affinityPolicy and volumePool not provided, otherwise ignored + DiskType string `json:"diskType,omitempty"` + + // Volume Name + // Required: true + Name *string `json:"name"` + + // Indicates if the volume is shareable between VMs + Shareable *bool `json:"shareable,omitempty"` + + // Volume Size (GB) + // Required: true + Size *float64 `json:"size"` + + // Volume pool where the volume will be created; if provided then diskType and affinityPolicy values will be ignored + VolumePool string `json:"volumePool,omitempty"` +} + +// Validate validates this create data volume +func (m *CreateDataVolume) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAffinityPolicy(formats); err != nil { + res = append(res, err) + } + + if err := m.validateName(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSize(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +var createDataVolumeTypeAffinityPolicyPropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["affinity","anti-affinity"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + createDataVolumeTypeAffinityPolicyPropEnum = append(createDataVolumeTypeAffinityPolicyPropEnum, v) + } +} + +const ( + + // CreateDataVolumeAffinityPolicyAffinity captures enum value "affinity" + CreateDataVolumeAffinityPolicyAffinity string = "affinity" + + // CreateDataVolumeAffinityPolicyAntiAffinity captures enum value "anti-affinity" + CreateDataVolumeAffinityPolicyAntiAffinity string = "anti-affinity" +) + +// prop value enum +func (m *CreateDataVolume) validateAffinityPolicyEnum(path, location string, value string) error { + if err := validate.Enum(path, location, value, createDataVolumeTypeAffinityPolicyPropEnum); err != nil { + return err + } + return nil +} + +func (m *CreateDataVolume) validateAffinityPolicy(formats strfmt.Registry) error { + + if swag.IsZero(m.AffinityPolicy) { // not required + return nil + } + + // value enum + if err := m.validateAffinityPolicyEnum("affinityPolicy", "body", *m.AffinityPolicy); err != nil { + return err + } + + return nil +} + +func (m *CreateDataVolume) validateName(formats strfmt.Registry) error { + + if err := validate.Required("name", "body", m.Name); err != nil { + return err + } + + return nil +} + +func (m *CreateDataVolume) validateSize(formats strfmt.Registry) error { + + if err := validate.Required("size", "body", m.Size); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *CreateDataVolume) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *CreateDataVolume) UnmarshalBinary(b []byte) error { + var res CreateDataVolume + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/create_image.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/create_image.go new file mode 100644 index 00000000000..080f1d1fa15 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/create_image.go @@ -0,0 +1,185 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// CreateImage create image +// swagger:model CreateImage +type CreateImage struct { + + // Cloud Storage access key; required for import image + AccessKey string `json:"accessKey,omitempty"` + + // Cloud Storage bucket name; bucket-name[/optional/folder]; required for import image + BucketName string `json:"bucketName,omitempty"` + + // Type of Disk + DiskType string `json:"diskType,omitempty"` + + // Cloud Storage image filename; required for import image + ImageFilename string `json:"imageFilename,omitempty"` + + // Image ID of existing source image; required for copy image + ImageID string `json:"imageID,omitempty"` + + // Name to give created image; required for import image + ImageName string `json:"imageName,omitempty"` + + // (deprecated - replaced by region, imageFilename and bucketName) Path to image starting with service endpoint and ending with image filename + ImagePath string `json:"imagePath,omitempty"` + + // Image OS Type, required if importing a raw image; raw images can only be imported using the command line interface + // Enum: [aix ibmi redhat sles] + OsType string `json:"osType,omitempty"` + + // Cloud Storage Region; only required to access IBM Cloud Storage + Region string `json:"region,omitempty"` + + // Cloud Storage secret key; required for import image + SecretKey string `json:"secretKey,omitempty"` + + // Source of the image + // Required: true + // Enum: [root-project url] + Source *string `json:"source"` +} + +// Validate validates this create image +func (m *CreateImage) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateOsType(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSource(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +var createImageTypeOsTypePropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["aix","ibmi","redhat","sles"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + createImageTypeOsTypePropEnum = append(createImageTypeOsTypePropEnum, v) + } +} + +const ( + + // CreateImageOsTypeAix captures enum value "aix" + CreateImageOsTypeAix string = "aix" + + // CreateImageOsTypeIbmi captures enum value "ibmi" + CreateImageOsTypeIbmi string = "ibmi" + + // CreateImageOsTypeRedhat captures enum value "redhat" + CreateImageOsTypeRedhat string = "redhat" + + // CreateImageOsTypeSles captures enum value "sles" + CreateImageOsTypeSles string = "sles" +) + +// prop value enum +func (m *CreateImage) validateOsTypeEnum(path, location string, value string) error { + if err := validate.Enum(path, location, value, createImageTypeOsTypePropEnum); err != nil { + return err + } + return nil +} + +func (m *CreateImage) validateOsType(formats strfmt.Registry) error { + + if swag.IsZero(m.OsType) { // not required + return nil + } + + // value enum + if err := m.validateOsTypeEnum("osType", "body", m.OsType); err != nil { + return err + } + + return nil +} + +var createImageTypeSourcePropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["root-project","url"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + createImageTypeSourcePropEnum = append(createImageTypeSourcePropEnum, v) + } +} + +const ( + + // CreateImageSourceRootProject captures enum value "root-project" + CreateImageSourceRootProject string = "root-project" + + // CreateImageSourceURL captures enum value "url" + CreateImageSourceURL string = "url" +) + +// prop value enum +func (m *CreateImage) validateSourceEnum(path, location string, value string) error { + if err := validate.Enum(path, location, value, createImageTypeSourcePropEnum); err != nil { + return err + } + return nil +} + +func (m *CreateImage) validateSource(formats strfmt.Registry) error { + + if err := validate.Required("source", "body", m.Source); err != nil { + return err + } + + // value enum + if err := m.validateSourceEnum("source", "body", *m.Source); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *CreateImage) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *CreateImage) UnmarshalBinary(b []byte) error { + var res CreateImage + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/dashboard_client.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/dashboard_client.go new file mode 100644 index 00000000000..c993d3c9089 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/dashboard_client.go @@ -0,0 +1,49 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/swag" +) + +// DashboardClient dashboard client +// swagger:model DashboardClient +type DashboardClient struct { + + // id + ID string `json:"id,omitempty"` + + // redirect uri + RedirectURI string `json:"redirect_uri,omitempty"` + + // secret + Secret string `json:"secret,omitempty"` +} + +// Validate validates this dashboard client +func (m *DashboardClient) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *DashboardClient) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *DashboardClient) UnmarshalBinary(b []byte) error { + var res DashboardClient + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/device_code.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/device_code.go new file mode 100644 index 00000000000..a14fb20caae --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/device_code.go @@ -0,0 +1,132 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// DeviceCode device code +// swagger:model DeviceCode +type DeviceCode struct { + + // This code lets the device running the app securely determine whether the user has granted or denied access + // Required: true + DeviceCode *string `json:"deviceCode"` + + // The length of time, in seconds, that the device_code and user_code are valid + // Required: true + ExpiresIn *float64 `json:"expiresIn"` + + // The length of time, in seconds, that your device should wait between polling requests + // Required: true + Interval *float64 `json:"interval"` + + // The value given to the user to enter on device authentication page + // Required: true + UserCode *string `json:"userCode"` + + // A URL that the user must navigate to, on a separate device, to enter the user_code and grant or deny access to your application. Your user interface will also display this value + // Required: true + VerificationURL *string `json:"verificationURL"` +} + +// Validate validates this device code +func (m *DeviceCode) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateDeviceCode(formats); err != nil { + res = append(res, err) + } + + if err := m.validateExpiresIn(formats); err != nil { + res = append(res, err) + } + + if err := m.validateInterval(formats); err != nil { + res = append(res, err) + } + + if err := m.validateUserCode(formats); err != nil { + res = append(res, err) + } + + if err := m.validateVerificationURL(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *DeviceCode) validateDeviceCode(formats strfmt.Registry) error { + + if err := validate.Required("deviceCode", "body", m.DeviceCode); err != nil { + return err + } + + return nil +} + +func (m *DeviceCode) validateExpiresIn(formats strfmt.Registry) error { + + if err := validate.Required("expiresIn", "body", m.ExpiresIn); err != nil { + return err + } + + return nil +} + +func (m *DeviceCode) validateInterval(formats strfmt.Registry) error { + + if err := validate.Required("interval", "body", m.Interval); err != nil { + return err + } + + return nil +} + +func (m *DeviceCode) validateUserCode(formats strfmt.Registry) error { + + if err := validate.Required("userCode", "body", m.UserCode); err != nil { + return err + } + + return nil +} + +func (m *DeviceCode) validateVerificationURL(formats strfmt.Registry) error { + + if err := validate.Required("verificationURL", "body", m.VerificationURL); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *DeviceCode) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *DeviceCode) UnmarshalBinary(b []byte) error { + var res DeviceCode + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/error.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/error.go new file mode 100644 index 00000000000..a4a74a0b365 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/error.go @@ -0,0 +1,52 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/swag" +) + +// Error error +// swagger:model Error +type Error struct { + + // code + Code int64 `json:"code,omitempty"` + + // description + Description string `json:"description,omitempty"` + + // error + Error string `json:"error,omitempty"` + + // message + Message string `json:"message,omitempty"` +} + +// Validate validates this error +func (m *Error) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *Error) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Error) UnmarshalBinary(b []byte) error { + var res Error + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/event.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/event.go new file mode 100644 index 00000000000..74faa8a651f --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/event.go @@ -0,0 +1,242 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// Event event +// swagger:model Event +type Event struct { + + // Type of action for this event + // Required: true + Action *string `json:"action"` + + // ID of the Activity + // Required: true + EventID *string `json:"eventID"` + + // Level of the event (notice, info, warning, error) + // Required: true + // Enum: [notice info warning error] + Level *string `json:"level"` + + // The (translated) message of the event + // Required: true + Message *string `json:"message"` + + // Any metadata associated with the event + Metadata interface{} `json:"metadata,omitempty"` + + // Type of resource for this event + // Required: true + Resource *string `json:"resource"` + + // Time of activity in ISO 8601 - RFC3339 + // Required: true + // Format: date-time + Time *strfmt.DateTime `json:"time"` + + // Time of activity in unix epoch + // Required: true + Timestamp *int64 `json:"timestamp"` + + // user + User *EventUser `json:"user,omitempty"` +} + +// Validate validates this event +func (m *Event) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAction(formats); err != nil { + res = append(res, err) + } + + if err := m.validateEventID(formats); err != nil { + res = append(res, err) + } + + if err := m.validateLevel(formats); err != nil { + res = append(res, err) + } + + if err := m.validateMessage(formats); err != nil { + res = append(res, err) + } + + if err := m.validateResource(formats); err != nil { + res = append(res, err) + } + + if err := m.validateTime(formats); err != nil { + res = append(res, err) + } + + if err := m.validateTimestamp(formats); err != nil { + res = append(res, err) + } + + if err := m.validateUser(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Event) validateAction(formats strfmt.Registry) error { + + if err := validate.Required("action", "body", m.Action); err != nil { + return err + } + + return nil +} + +func (m *Event) validateEventID(formats strfmt.Registry) error { + + if err := validate.Required("eventID", "body", m.EventID); err != nil { + return err + } + + return nil +} + +var eventTypeLevelPropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["notice","info","warning","error"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + eventTypeLevelPropEnum = append(eventTypeLevelPropEnum, v) + } +} + +const ( + + // EventLevelNotice captures enum value "notice" + EventLevelNotice string = "notice" + + // EventLevelInfo captures enum value "info" + EventLevelInfo string = "info" + + // EventLevelWarning captures enum value "warning" + EventLevelWarning string = "warning" + + // EventLevelError captures enum value "error" + EventLevelError string = "error" +) + +// prop value enum +func (m *Event) validateLevelEnum(path, location string, value string) error { + if err := validate.Enum(path, location, value, eventTypeLevelPropEnum); err != nil { + return err + } + return nil +} + +func (m *Event) validateLevel(formats strfmt.Registry) error { + + if err := validate.Required("level", "body", m.Level); err != nil { + return err + } + + // value enum + if err := m.validateLevelEnum("level", "body", *m.Level); err != nil { + return err + } + + return nil +} + +func (m *Event) validateMessage(formats strfmt.Registry) error { + + if err := validate.Required("message", "body", m.Message); err != nil { + return err + } + + return nil +} + +func (m *Event) validateResource(formats strfmt.Registry) error { + + if err := validate.Required("resource", "body", m.Resource); err != nil { + return err + } + + return nil +} + +func (m *Event) validateTime(formats strfmt.Registry) error { + + if err := validate.Required("time", "body", m.Time); err != nil { + return err + } + + if err := validate.FormatOf("time", "body", "date-time", m.Time.String(), formats); err != nil { + return err + } + + return nil +} + +func (m *Event) validateTimestamp(formats strfmt.Registry) error { + + if err := validate.Required("timestamp", "body", m.Timestamp); err != nil { + return err + } + + return nil +} + +func (m *Event) validateUser(formats strfmt.Registry) error { + + if swag.IsZero(m.User) { // not required + return nil + } + + if m.User != nil { + if err := m.User.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("user") + } + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *Event) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Event) UnmarshalBinary(b []byte) error { + var res Event + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/event_user.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/event_user.go new file mode 100644 index 00000000000..4de9b694fce --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/event_user.go @@ -0,0 +1,70 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// EventUser event user +// swagger:model EventUser +type EventUser struct { + + // Email of the User + Email string `json:"email,omitempty"` + + // Name of the User + Name string `json:"name,omitempty"` + + // ID of user who created/caused the event + // Required: true + UserID *string `json:"userID"` +} + +// Validate validates this event user +func (m *EventUser) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateUserID(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *EventUser) validateUserID(formats strfmt.Registry) error { + + if err := validate.Required("userID", "body", m.UserID); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *EventUser) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *EventUser) UnmarshalBinary(b []byte) error { + var res EventUser + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/events.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/events.go new file mode 100644 index 00000000000..6747b1728b1 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/events.go @@ -0,0 +1,82 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "strconv" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// Events events +// swagger:model Events +type Events struct { + + // Events + // Required: true + Events []*Event `json:"events"` +} + +// Validate validates this events +func (m *Events) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateEvents(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Events) validateEvents(formats strfmt.Registry) error { + + if err := validate.Required("events", "body", m.Events); err != nil { + return err + } + + for i := 0; i < len(m.Events); i++ { + if swag.IsZero(m.Events[i]) { // not required + continue + } + + if m.Events[i] != nil { + if err := m.Events[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("events" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *Events) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Events) UnmarshalBinary(b []byte) error { + var res Events + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/export_image.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/export_image.go new file mode 100644 index 00000000000..594acbe8ff6 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/export_image.go @@ -0,0 +1,87 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// ExportImage export image +// swagger:model ExportImage +type ExportImage struct { + + // Cloud Object Storage Access key + // Required: true + AccessKey *string `json:"accessKey"` + + // Cloud Object Storage Bucket name + // Required: true + BucketName *string `json:"bucketName"` + + // Cloud Object Storage Region; required for IBM COS + Region string `json:"region,omitempty"` + + // Cloud Object Storage Secret key + SecretKey string `json:"secretKey,omitempty"` +} + +// Validate validates this export image +func (m *ExportImage) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAccessKey(formats); err != nil { + res = append(res, err) + } + + if err := m.validateBucketName(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *ExportImage) validateAccessKey(formats strfmt.Registry) error { + + if err := validate.Required("accessKey", "body", m.AccessKey); err != nil { + return err + } + + return nil +} + +func (m *ExportImage) validateBucketName(formats strfmt.Registry) error { + + if err := validate.Required("bucketName", "body", m.BucketName); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *ExportImage) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ExportImage) UnmarshalBinary(b []byte) error { + var res ExportImage + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/hardware_platform.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/hardware_platform.go new file mode 100644 index 00000000000..064fc18f0a3 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/hardware_platform.go @@ -0,0 +1,98 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "strconv" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" +) + +// HardwarePlatform Hardware platform detailing its limits and statistics +// swagger:model HardwarePlatform +type HardwarePlatform struct { + + // Description + Description string `json:"description,omitempty"` + + // The DataCenter list of servers and their available resources + HostsResources []*HostResources `json:"hostsResources"` + + // Configured Memory GB + Memory float64 `json:"memory,omitempty"` + + // Processor to Memory (GB) Ratio + ProcessorMemoryRatio float64 `json:"processorMemoryRatio,omitempty"` + + // Configured Processors + Processors float64 `json:"processors,omitempty"` + + // Allowable granularity for shared processors + SharedProcessorStep float64 `json:"sharedProcessorStep,omitempty"` + + // Short code for hardware + Type string `json:"type,omitempty"` +} + +// Validate validates this hardware platform +func (m *HardwarePlatform) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateHostsResources(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *HardwarePlatform) validateHostsResources(formats strfmt.Registry) error { + + if swag.IsZero(m.HostsResources) { // not required + return nil + } + + for i := 0; i < len(m.HostsResources); i++ { + if swag.IsZero(m.HostsResources[i]) { // not required + continue + } + + if m.HostsResources[i] != nil { + if err := m.HostsResources[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("hostsResources" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *HardwarePlatform) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *HardwarePlatform) UnmarshalBinary(b []byte) error { + var res HardwarePlatform + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/hardware_platforms.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/hardware_platforms.go new file mode 100644 index 00000000000..fa3008ac422 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/hardware_platforms.go @@ -0,0 +1,40 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/validate" +) + +// HardwarePlatforms A map of hardware platforms detailing their limits and statistics +// swagger:model HardwarePlatforms +type HardwarePlatforms map[string]HardwarePlatform + +// Validate validates this hardware platforms +func (m HardwarePlatforms) Validate(formats strfmt.Registry) error { + var res []error + + for k := range m { + + if err := validate.Required(k, "body", m[k]); err != nil { + return err + } + if val, ok := m[k]; ok { + if err := val.Validate(formats); err != nil { + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/health.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/health.go new file mode 100644 index 00000000000..4e02af8f824 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/health.go @@ -0,0 +1,64 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// Health health +// swagger:model Health +type Health struct { + + // Returns a description of the current servers health + // Required: true + Status *string `json:"status"` +} + +// Validate validates this health +func (m *Health) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateStatus(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Health) validateStatus(formats strfmt.Registry) error { + + if err := validate.Required("status", "body", m.Status); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *Health) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Health) UnmarshalBinary(b []byte) error { + var res Health + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/host_info.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/host_info.go new file mode 100644 index 00000000000..16082117c4f --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/host_info.go @@ -0,0 +1,168 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "strconv" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// HostInfo host info +// swagger:model HostInfo +type HostInfo struct { + + // Host core information + // Required: true + Cores *HostResource `json:"cores"` + + // Hostname + // Required: true + Hostname *string `json:"hostname"` + + // IP Address + // Required: true + IPAddress *string `json:"ipAddress"` + + // Host memory information + // Required: true + Memory *HostResource `json:"memory"` + + // PVM Instances on host + // Required: true + PvmInstances []*HostPVMInstance `json:"pvmInstances"` +} + +// Validate validates this host info +func (m *HostInfo) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateCores(formats); err != nil { + res = append(res, err) + } + + if err := m.validateHostname(formats); err != nil { + res = append(res, err) + } + + if err := m.validateIPAddress(formats); err != nil { + res = append(res, err) + } + + if err := m.validateMemory(formats); err != nil { + res = append(res, err) + } + + if err := m.validatePvmInstances(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *HostInfo) validateCores(formats strfmt.Registry) error { + + if err := validate.Required("cores", "body", m.Cores); err != nil { + return err + } + + if m.Cores != nil { + if err := m.Cores.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("cores") + } + return err + } + } + + return nil +} + +func (m *HostInfo) validateHostname(formats strfmt.Registry) error { + + if err := validate.Required("hostname", "body", m.Hostname); err != nil { + return err + } + + return nil +} + +func (m *HostInfo) validateIPAddress(formats strfmt.Registry) error { + + if err := validate.Required("ipAddress", "body", m.IPAddress); err != nil { + return err + } + + return nil +} + +func (m *HostInfo) validateMemory(formats strfmt.Registry) error { + + if err := validate.Required("memory", "body", m.Memory); err != nil { + return err + } + + if m.Memory != nil { + if err := m.Memory.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("memory") + } + return err + } + } + + return nil +} + +func (m *HostInfo) validatePvmInstances(formats strfmt.Registry) error { + + if err := validate.Required("pvmInstances", "body", m.PvmInstances); err != nil { + return err + } + + for i := 0; i < len(m.PvmInstances); i++ { + if swag.IsZero(m.PvmInstances[i]) { // not required + continue + } + + if m.PvmInstances[i] != nil { + if err := m.PvmInstances[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("pvmInstances" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *HostInfo) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *HostInfo) UnmarshalBinary(b []byte) error { + var res HostInfo + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/host_p_vm_instance.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/host_p_vm_instance.go new file mode 100644 index 00000000000..ee21b460ccc --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/host_p_vm_instance.go @@ -0,0 +1,158 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// HostPVMInstance A pvm instance on host +// swagger:model HostPVMInstance +type HostPVMInstance struct { + + // Cloud Instance ID pvm instance is a member of + // Required: true + CloudInstanceID *string `json:"cloudInstanceID"` + + // Owner information of pvm instance + // Required: true + Owner *OwnerInfo `json:"owner"` + + // Instance ID + // Required: true + PvmInstanceID *string `json:"pvmInstanceID"` + + // Instance name + // Required: true + PvmName *string `json:"pvmName"` + + // State of pvm instance + // Required: true + State *string `json:"state"` + + // Tenant ID of pvm instance + // Required: true + TenantID *string `json:"tenantID"` +} + +// Validate validates this host p VM instance +func (m *HostPVMInstance) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateCloudInstanceID(formats); err != nil { + res = append(res, err) + } + + if err := m.validateOwner(formats); err != nil { + res = append(res, err) + } + + if err := m.validatePvmInstanceID(formats); err != nil { + res = append(res, err) + } + + if err := m.validatePvmName(formats); err != nil { + res = append(res, err) + } + + if err := m.validateState(formats); err != nil { + res = append(res, err) + } + + if err := m.validateTenantID(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *HostPVMInstance) validateCloudInstanceID(formats strfmt.Registry) error { + + if err := validate.Required("cloudInstanceID", "body", m.CloudInstanceID); err != nil { + return err + } + + return nil +} + +func (m *HostPVMInstance) validateOwner(formats strfmt.Registry) error { + + if err := validate.Required("owner", "body", m.Owner); err != nil { + return err + } + + if m.Owner != nil { + if err := m.Owner.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("owner") + } + return err + } + } + + return nil +} + +func (m *HostPVMInstance) validatePvmInstanceID(formats strfmt.Registry) error { + + if err := validate.Required("pvmInstanceID", "body", m.PvmInstanceID); err != nil { + return err + } + + return nil +} + +func (m *HostPVMInstance) validatePvmName(formats strfmt.Registry) error { + + if err := validate.Required("pvmName", "body", m.PvmName); err != nil { + return err + } + + return nil +} + +func (m *HostPVMInstance) validateState(formats strfmt.Registry) error { + + if err := validate.Required("state", "body", m.State); err != nil { + return err + } + + return nil +} + +func (m *HostPVMInstance) validateTenantID(formats strfmt.Registry) error { + + if err := validate.Required("tenantID", "body", m.TenantID); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *HostPVMInstance) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *HostPVMInstance) UnmarshalBinary(b []byte) error { + var res HostPVMInstance + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/host_resource.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/host_resource.go new file mode 100644 index 00000000000..68cd69abd00 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/host_resource.go @@ -0,0 +1,98 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// HostResource host resource +// swagger:model HostResource +type HostResource struct { + + // Free + // Required: true + Free *float64 `json:"free"` + + // Total + // Required: true + Total *float64 `json:"total"` + + // Used + // Required: true + Used *float64 `json:"used"` +} + +// Validate validates this host resource +func (m *HostResource) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateFree(formats); err != nil { + res = append(res, err) + } + + if err := m.validateTotal(formats); err != nil { + res = append(res, err) + } + + if err := m.validateUsed(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *HostResource) validateFree(formats strfmt.Registry) error { + + if err := validate.Required("free", "body", m.Free); err != nil { + return err + } + + return nil +} + +func (m *HostResource) validateTotal(formats strfmt.Registry) error { + + if err := validate.Required("total", "body", m.Total); err != nil { + return err + } + + return nil +} + +func (m *HostResource) validateUsed(formats strfmt.Registry) error { + + if err := validate.Required("used", "body", m.Used); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *HostResource) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *HostResource) UnmarshalBinary(b []byte) error { + var res HostResource + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/host_resources.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/host_resources.go new file mode 100644 index 00000000000..92c43fabd82 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/host_resources.go @@ -0,0 +1,98 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// HostResources host resources +// swagger:model HostResources +type HostResources struct { + + // The host available Processor units + // Required: true + Cores *float64 `json:"cores"` + + // The host identifier + // Required: true + ID *int64 `json:"id"` + + // The host available RAM memory in GiB + // Required: true + Memory *int64 `json:"memory"` +} + +// Validate validates this host resources +func (m *HostResources) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateCores(formats); err != nil { + res = append(res, err) + } + + if err := m.validateID(formats); err != nil { + res = append(res, err) + } + + if err := m.validateMemory(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *HostResources) validateCores(formats strfmt.Registry) error { + + if err := validate.Required("cores", "body", m.Cores); err != nil { + return err + } + + return nil +} + +func (m *HostResources) validateID(formats strfmt.Registry) error { + + if err := validate.Required("id", "body", m.ID); err != nil { + return err + } + + return nil +} + +func (m *HostResources) validateMemory(formats strfmt.Registry) error { + + if err := validate.Required("memory", "body", m.Memory); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *HostResources) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *HostResources) UnmarshalBinary(b []byte) error { + var res HostResources + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/image.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/image.go new file mode 100644 index 00000000000..149a46a8ecd --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/image.go @@ -0,0 +1,269 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "strconv" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// Image image +// swagger:model Image +type Image struct { + + // Creation Date + // Required: true + // Format: date-time + CreationDate *strfmt.DateTime `json:"creationDate"` + + // Description + Description string `json:"description,omitempty"` + + // Image ID + // Required: true + ImageID *string `json:"imageID"` + + // Last Update Date + // Required: true + // Format: date-time + LastUpdateDate *strfmt.DateTime `json:"lastUpdateDate"` + + // Image Name + // Required: true + Name *string `json:"name"` + + // List of Servers that have deployed the image + Servers []string `json:"servers"` + + // Image Size + // Required: true + Size *float64 `json:"size"` + + // specifications + Specifications *ImageSpecifications `json:"specifications,omitempty"` + + // Image State + State string `json:"state,omitempty"` + + // Storage pool where the image resides + // Required: true + StoragePool *string `json:"storagePool"` + + // Storage type for image + // Required: true + StorageType *string `json:"storageType"` + + // taskref + Taskref *TaskReference `json:"taskref,omitempty"` + + // Image Volumes + Volumes []*ImageVolume `json:"volumes"` +} + +// Validate validates this image +func (m *Image) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateCreationDate(formats); err != nil { + res = append(res, err) + } + + if err := m.validateImageID(formats); err != nil { + res = append(res, err) + } + + if err := m.validateLastUpdateDate(formats); err != nil { + res = append(res, err) + } + + if err := m.validateName(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSize(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSpecifications(formats); err != nil { + res = append(res, err) + } + + if err := m.validateStoragePool(formats); err != nil { + res = append(res, err) + } + + if err := m.validateStorageType(formats); err != nil { + res = append(res, err) + } + + if err := m.validateTaskref(formats); err != nil { + res = append(res, err) + } + + if err := m.validateVolumes(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Image) validateCreationDate(formats strfmt.Registry) error { + + if err := validate.Required("creationDate", "body", m.CreationDate); err != nil { + return err + } + + if err := validate.FormatOf("creationDate", "body", "date-time", m.CreationDate.String(), formats); err != nil { + return err + } + + return nil +} + +func (m *Image) validateImageID(formats strfmt.Registry) error { + + if err := validate.Required("imageID", "body", m.ImageID); err != nil { + return err + } + + return nil +} + +func (m *Image) validateLastUpdateDate(formats strfmt.Registry) error { + + if err := validate.Required("lastUpdateDate", "body", m.LastUpdateDate); err != nil { + return err + } + + if err := validate.FormatOf("lastUpdateDate", "body", "date-time", m.LastUpdateDate.String(), formats); err != nil { + return err + } + + return nil +} + +func (m *Image) validateName(formats strfmt.Registry) error { + + if err := validate.Required("name", "body", m.Name); err != nil { + return err + } + + return nil +} + +func (m *Image) validateSize(formats strfmt.Registry) error { + + if err := validate.Required("size", "body", m.Size); err != nil { + return err + } + + return nil +} + +func (m *Image) validateSpecifications(formats strfmt.Registry) error { + + if swag.IsZero(m.Specifications) { // not required + return nil + } + + if m.Specifications != nil { + if err := m.Specifications.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("specifications") + } + return err + } + } + + return nil +} + +func (m *Image) validateStoragePool(formats strfmt.Registry) error { + + if err := validate.Required("storagePool", "body", m.StoragePool); err != nil { + return err + } + + return nil +} + +func (m *Image) validateStorageType(formats strfmt.Registry) error { + + if err := validate.Required("storageType", "body", m.StorageType); err != nil { + return err + } + + return nil +} + +func (m *Image) validateTaskref(formats strfmt.Registry) error { + + if swag.IsZero(m.Taskref) { // not required + return nil + } + + if m.Taskref != nil { + if err := m.Taskref.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("taskref") + } + return err + } + } + + return nil +} + +func (m *Image) validateVolumes(formats strfmt.Registry) error { + + if swag.IsZero(m.Volumes) { // not required + return nil + } + + for i := 0; i < len(m.Volumes); i++ { + if swag.IsZero(m.Volumes[i]) { // not required + continue + } + + if m.Volumes[i] != nil { + if err := m.Volumes[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("volumes" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *Image) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Image) UnmarshalBinary(b []byte) error { + var res Image + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/image_reference.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/image_reference.go new file mode 100644 index 00000000000..a13618ba7a9 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/image_reference.go @@ -0,0 +1,236 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// ImageReference image reference +// swagger:model ImageReference +type ImageReference struct { + + // Creation Date + // Required: true + // Format: date-time + CreationDate *strfmt.DateTime `json:"creationDate"` + + // Description + // Required: true + Description *string `json:"description"` + + // Link to Image resource + // Required: true + Href *string `json:"href"` + + // Image ID + // Required: true + ImageID *string `json:"imageID"` + + // Last Update Date + // Required: true + // Format: date-time + LastUpdateDate *strfmt.DateTime `json:"lastUpdateDate"` + + // Image Name + // Required: true + Name *string `json:"name"` + + // specifications + // Required: true + Specifications *ImageSpecifications `json:"specifications"` + + // Image State + // Required: true + State *string `json:"state"` + + // Storage pool where image resides + // Required: true + StoragePool *string `json:"storagePool"` + + // Storage type for image + // Required: true + StorageType *string `json:"storageType"` +} + +// Validate validates this image reference +func (m *ImageReference) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateCreationDate(formats); err != nil { + res = append(res, err) + } + + if err := m.validateDescription(formats); err != nil { + res = append(res, err) + } + + if err := m.validateHref(formats); err != nil { + res = append(res, err) + } + + if err := m.validateImageID(formats); err != nil { + res = append(res, err) + } + + if err := m.validateLastUpdateDate(formats); err != nil { + res = append(res, err) + } + + if err := m.validateName(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSpecifications(formats); err != nil { + res = append(res, err) + } + + if err := m.validateState(formats); err != nil { + res = append(res, err) + } + + if err := m.validateStoragePool(formats); err != nil { + res = append(res, err) + } + + if err := m.validateStorageType(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *ImageReference) validateCreationDate(formats strfmt.Registry) error { + + if err := validate.Required("creationDate", "body", m.CreationDate); err != nil { + return err + } + + if err := validate.FormatOf("creationDate", "body", "date-time", m.CreationDate.String(), formats); err != nil { + return err + } + + return nil +} + +func (m *ImageReference) validateDescription(formats strfmt.Registry) error { + + if err := validate.Required("description", "body", m.Description); err != nil { + return err + } + + return nil +} + +func (m *ImageReference) validateHref(formats strfmt.Registry) error { + + if err := validate.Required("href", "body", m.Href); err != nil { + return err + } + + return nil +} + +func (m *ImageReference) validateImageID(formats strfmt.Registry) error { + + if err := validate.Required("imageID", "body", m.ImageID); err != nil { + return err + } + + return nil +} + +func (m *ImageReference) validateLastUpdateDate(formats strfmt.Registry) error { + + if err := validate.Required("lastUpdateDate", "body", m.LastUpdateDate); err != nil { + return err + } + + if err := validate.FormatOf("lastUpdateDate", "body", "date-time", m.LastUpdateDate.String(), formats); err != nil { + return err + } + + return nil +} + +func (m *ImageReference) validateName(formats strfmt.Registry) error { + + if err := validate.Required("name", "body", m.Name); err != nil { + return err + } + + return nil +} + +func (m *ImageReference) validateSpecifications(formats strfmt.Registry) error { + + if err := validate.Required("specifications", "body", m.Specifications); err != nil { + return err + } + + if m.Specifications != nil { + if err := m.Specifications.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("specifications") + } + return err + } + } + + return nil +} + +func (m *ImageReference) validateState(formats strfmt.Registry) error { + + if err := validate.Required("state", "body", m.State); err != nil { + return err + } + + return nil +} + +func (m *ImageReference) validateStoragePool(formats strfmt.Registry) error { + + if err := validate.Required("storagePool", "body", m.StoragePool); err != nil { + return err + } + + return nil +} + +func (m *ImageReference) validateStorageType(formats strfmt.Registry) error { + + if err := validate.Required("storageType", "body", m.StorageType); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *ImageReference) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ImageReference) UnmarshalBinary(b []byte) error { + var res ImageReference + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/image_specifications.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/image_specifications.go new file mode 100644 index 00000000000..2f4639b57ff --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/image_specifications.go @@ -0,0 +1,61 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/swag" +) + +// ImageSpecifications image specifications +// swagger:model ImageSpecifications +type ImageSpecifications struct { + + // Architecture + Architecture string `json:"architecture,omitempty"` + + // Container Format + ContainerFormat string `json:"containerFormat,omitempty"` + + // Disk Format + DiskFormat string `json:"diskFormat,omitempty"` + + // Endianness + Endianness string `json:"endianness,omitempty"` + + // Hypervisor Type + HypervisorType string `json:"hypervisorType,omitempty"` + + // Image Type + ImageType string `json:"imageType,omitempty"` + + // Operating System + OperatingSystem string `json:"operatingSystem,omitempty"` +} + +// Validate validates this image specifications +func (m *ImageSpecifications) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *ImageSpecifications) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ImageSpecifications) UnmarshalBinary(b []byte) error { + var res ImageSpecifications + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/image_volume.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/image_volume.go new file mode 100644 index 00000000000..f419e7a4566 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/image_volume.go @@ -0,0 +1,115 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// ImageVolume image volume +// swagger:model ImageVolume +type ImageVolume struct { + + // Indicates if the volume is boot capable + // Required: true + Bootable *bool `json:"bootable"` + + // Volume Name + // Required: true + Name *string `json:"name"` + + // Volume Size + // Required: true + Size *float64 `json:"size"` + + // Volume ID + // Required: true + VolumeID *string `json:"volumeID"` +} + +// Validate validates this image volume +func (m *ImageVolume) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateBootable(formats); err != nil { + res = append(res, err) + } + + if err := m.validateName(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSize(formats); err != nil { + res = append(res, err) + } + + if err := m.validateVolumeID(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *ImageVolume) validateBootable(formats strfmt.Registry) error { + + if err := validate.Required("bootable", "body", m.Bootable); err != nil { + return err + } + + return nil +} + +func (m *ImageVolume) validateName(formats strfmt.Registry) error { + + if err := validate.Required("name", "body", m.Name); err != nil { + return err + } + + return nil +} + +func (m *ImageVolume) validateSize(formats strfmt.Registry) error { + + if err := validate.Required("size", "body", m.Size); err != nil { + return err + } + + return nil +} + +func (m *ImageVolume) validateVolumeID(formats strfmt.Registry) error { + + if err := validate.Required("volumeID", "body", m.VolumeID); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *ImageVolume) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ImageVolume) UnmarshalBinary(b []byte) error { + var res ImageVolume + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/images.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/images.go new file mode 100644 index 00000000000..3c355c496a5 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/images.go @@ -0,0 +1,82 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "strconv" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// Images images +// swagger:model Images +type Images struct { + + // Images + // Required: true + Images []*ImageReference `json:"images"` +} + +// Validate validates this images +func (m *Images) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateImages(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Images) validateImages(formats strfmt.Registry) error { + + if err := validate.Required("images", "body", m.Images); err != nil { + return err + } + + for i := 0; i < len(m.Images); i++ { + if swag.IsZero(m.Images[i]) { // not required + continue + } + + if m.Images[i] != nil { + if err := m.Images[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("images" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *Images) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Images) UnmarshalBinary(b []byte) error { + var res Images + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/ip_address_range.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/ip_address_range.go new file mode 100644 index 00000000000..b4961b41bca --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/ip_address_range.go @@ -0,0 +1,81 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// IPAddressRange IP address range +// swagger:model IPAddressRange +type IPAddressRange struct { + + // Ending IP Address + // Required: true + EndingIPAddress *string `json:"endingIPAddress"` + + // Starting IP Address + // Required: true + StartingIPAddress *string `json:"startingIPAddress"` +} + +// Validate validates this IP address range +func (m *IPAddressRange) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateEndingIPAddress(formats); err != nil { + res = append(res, err) + } + + if err := m.validateStartingIPAddress(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *IPAddressRange) validateEndingIPAddress(formats strfmt.Registry) error { + + if err := validate.Required("endingIPAddress", "body", m.EndingIPAddress); err != nil { + return err + } + + return nil +} + +func (m *IPAddressRange) validateStartingIPAddress(formats strfmt.Registry) error { + + if err := validate.Required("startingIPAddress", "body", m.StartingIPAddress); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *IPAddressRange) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *IPAddressRange) UnmarshalBinary(b []byte) error { + var res IPAddressRange + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/json_schema_object.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/json_schema_object.go new file mode 100644 index 00000000000..ea8f359bc29 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/json_schema_object.go @@ -0,0 +1,10 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// JSONSchemaObject JSON schema object +// swagger:model JSONSchemaObject +type JSONSchemaObject interface{} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/last_operation_resource.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/last_operation_resource.go new file mode 100644 index 00000000000..558cd5a0f48 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/last_operation_resource.go @@ -0,0 +1,107 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// LastOperationResource last operation resource +// swagger:model LastOperationResource +type LastOperationResource struct { + + // description + Description string `json:"description,omitempty"` + + // state + // Required: true + // Enum: [in progress succeeded failed] + State *string `json:"state"` +} + +// Validate validates this last operation resource +func (m *LastOperationResource) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateState(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +var lastOperationResourceTypeStatePropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["in progress","succeeded","failed"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + lastOperationResourceTypeStatePropEnum = append(lastOperationResourceTypeStatePropEnum, v) + } +} + +const ( + + // LastOperationResourceStateInProgress captures enum value "in progress" + LastOperationResourceStateInProgress string = "in progress" + + // LastOperationResourceStateSucceeded captures enum value "succeeded" + LastOperationResourceStateSucceeded string = "succeeded" + + // LastOperationResourceStateFailed captures enum value "failed" + LastOperationResourceStateFailed string = "failed" +) + +// prop value enum +func (m *LastOperationResource) validateStateEnum(path, location string, value string) error { + if err := validate.Enum(path, location, value, lastOperationResourceTypeStatePropEnum); err != nil { + return err + } + return nil +} + +func (m *LastOperationResource) validateState(formats strfmt.Registry) error { + + if err := validate.Required("state", "body", m.State); err != nil { + return err + } + + // value enum + if err := m.validateStateEnum("state", "body", *m.State); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *LastOperationResource) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *LastOperationResource) UnmarshalBinary(b []byte) error { + var res LastOperationResource + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/maximum_storage_allocation.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/maximum_storage_allocation.go new file mode 100644 index 00000000000..dfac4c98714 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/maximum_storage_allocation.go @@ -0,0 +1,98 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// MaximumStorageAllocation Maximum storage allocation +// swagger:model MaximumStorageAllocation +type MaximumStorageAllocation struct { + + // Maximum allocation storage size (GB) + // Required: true + MaxAllocationSize *int64 `json:"maxAllocationSize"` + + // Storage pool + // Required: true + StoragePool *string `json:"storagePool"` + + // Storage type + // Required: true + StorageType *string `json:"storageType"` +} + +// Validate validates this maximum storage allocation +func (m *MaximumStorageAllocation) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateMaxAllocationSize(formats); err != nil { + res = append(res, err) + } + + if err := m.validateStoragePool(formats); err != nil { + res = append(res, err) + } + + if err := m.validateStorageType(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *MaximumStorageAllocation) validateMaxAllocationSize(formats strfmt.Registry) error { + + if err := validate.Required("maxAllocationSize", "body", m.MaxAllocationSize); err != nil { + return err + } + + return nil +} + +func (m *MaximumStorageAllocation) validateStoragePool(formats strfmt.Registry) error { + + if err := validate.Required("storagePool", "body", m.StoragePool); err != nil { + return err + } + + return nil +} + +func (m *MaximumStorageAllocation) validateStorageType(formats strfmt.Registry) error { + + if err := validate.Required("storageType", "body", m.StorageType); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *MaximumStorageAllocation) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *MaximumStorageAllocation) UnmarshalBinary(b []byte) error { + var res MaximumStorageAllocation + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/metadata.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/metadata.go new file mode 100644 index 00000000000..e8ddbbdc193 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/metadata.go @@ -0,0 +1,10 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Metadata See [Service Metadata Conventions](https://github.com/openservicebrokerapi/servicebroker/blob/master/profile.md#service-metadata) for more details. +// swagger:model Metadata +type Metadata interface{} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/min_max_default.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/min_max_default.go new file mode 100644 index 00000000000..06238e2e6a4 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/min_max_default.go @@ -0,0 +1,98 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// MinMaxDefault min max default +// swagger:model MinMaxDefault +type MinMaxDefault struct { + + // default value + // Required: true + Default *float64 `json:"default"` + + // max value + // Required: true + Max *float64 `json:"max"` + + // min value + // Required: true + Min *float64 `json:"min"` +} + +// Validate validates this min max default +func (m *MinMaxDefault) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateDefault(formats); err != nil { + res = append(res, err) + } + + if err := m.validateMax(formats); err != nil { + res = append(res, err) + } + + if err := m.validateMin(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *MinMaxDefault) validateDefault(formats strfmt.Registry) error { + + if err := validate.Required("default", "body", m.Default); err != nil { + return err + } + + return nil +} + +func (m *MinMaxDefault) validateMax(formats strfmt.Registry) error { + + if err := validate.Required("max", "body", m.Max); err != nil { + return err + } + + return nil +} + +func (m *MinMaxDefault) validateMin(formats strfmt.Registry) error { + + if err := validate.Required("min", "body", m.Min); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *MinMaxDefault) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *MinMaxDefault) UnmarshalBinary(b []byte) error { + var res MinMaxDefault + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/multi_volumes_create.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/multi_volumes_create.go new file mode 100644 index 00000000000..c6d33d68dea --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/multi_volumes_create.go @@ -0,0 +1,152 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// MultiVolumesCreate multi volumes create +// swagger:model MultiVolumesCreate +type MultiVolumesCreate struct { + + // PVM Instance (ID or Name)to base volume affinity policy against; required if affinityPolicy is provided and affinityVolume is not provided + AffinityPVMInstance *string `json:"affinityPVMInstance,omitempty"` + + // Affinity policy for data volume being created; requires affinityPVMInstance or affinityVolume to be specified; ignored if volumePool provided + // Enum: [affinity anti-affinity] + AffinityPolicy *string `json:"affinityPolicy,omitempty"` + + // Volume (ID or Name) to base volume affinity policy against; required if affinityPolicy is provided and affinityPVMInstance is not provided + AffinityVolume *string `json:"affinityVolume,omitempty"` + + // Number of volumes to create + Count int64 `json:"count,omitempty"` + + // Type of Disk, required if affinityPolicy and volumePool not provided, otherwise ignored + DiskType string `json:"diskType,omitempty"` + + // Base name of the volume(s) + // Required: true + Name *string `json:"name"` + + // Indicates if the volume is shareable between VMs + Shareable *bool `json:"shareable,omitempty"` + + // Volume Size (GB) + // Required: true + Size *int64 `json:"size"` + + // Volume pool where the volume will be created; if provided then diskType and affinityPolicy values will be ignored + VolumePool string `json:"volumePool,omitempty"` +} + +// Validate validates this multi volumes create +func (m *MultiVolumesCreate) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAffinityPolicy(formats); err != nil { + res = append(res, err) + } + + if err := m.validateName(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSize(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +var multiVolumesCreateTypeAffinityPolicyPropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["affinity","anti-affinity"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + multiVolumesCreateTypeAffinityPolicyPropEnum = append(multiVolumesCreateTypeAffinityPolicyPropEnum, v) + } +} + +const ( + + // MultiVolumesCreateAffinityPolicyAffinity captures enum value "affinity" + MultiVolumesCreateAffinityPolicyAffinity string = "affinity" + + // MultiVolumesCreateAffinityPolicyAntiAffinity captures enum value "anti-affinity" + MultiVolumesCreateAffinityPolicyAntiAffinity string = "anti-affinity" +) + +// prop value enum +func (m *MultiVolumesCreate) validateAffinityPolicyEnum(path, location string, value string) error { + if err := validate.Enum(path, location, value, multiVolumesCreateTypeAffinityPolicyPropEnum); err != nil { + return err + } + return nil +} + +func (m *MultiVolumesCreate) validateAffinityPolicy(formats strfmt.Registry) error { + + if swag.IsZero(m.AffinityPolicy) { // not required + return nil + } + + // value enum + if err := m.validateAffinityPolicyEnum("affinityPolicy", "body", *m.AffinityPolicy); err != nil { + return err + } + + return nil +} + +func (m *MultiVolumesCreate) validateName(formats strfmt.Registry) error { + + if err := validate.Required("name", "body", m.Name); err != nil { + return err + } + + return nil +} + +func (m *MultiVolumesCreate) validateSize(formats strfmt.Registry) error { + + if err := validate.Required("size", "body", m.Size); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *MultiVolumesCreate) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *MultiVolumesCreate) UnmarshalBinary(b []byte) error { + var res MultiVolumesCreate + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/network.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/network.go new file mode 100644 index 00000000000..8092a3fefd0 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/network.go @@ -0,0 +1,465 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + "strconv" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// Network network +// swagger:model Network +type Network struct { + + // Network in CIDR notation (192.168.0.0/24) + // Required: true + Cidr *string `json:"cidr"` + + // (currently not available) cloud connections this network is attached + CloudConnections []*NetworkCloudConnectionsItems0 `json:"cloudConnections,omitempty"` + + // DNS Servers + // Required: true + DNSServers []string `json:"dnsServers"` + + // Gateway IP Address + Gateway string `json:"gateway,omitempty"` + + // ip address metrics + // Required: true + IPAddressMetrics *NetworkIPAddressMetrics `json:"ipAddressMetrics"` + + // IP Address Ranges + // Required: true + IPAddressRanges []*IPAddressRange `json:"ipAddressRanges"` + + // MTU Jumbo Network enabled + // Required: true + Jumbo *bool `json:"jumbo"` + + // Network Name + // Required: true + Name *string `json:"name"` + + // Unique Network ID + // Required: true + NetworkID *string `json:"networkID"` + + // Public IP Address Ranges (for pub-vlan networks) + PublicIPAddressRanges []*IPAddressRange `json:"publicIPAddressRanges,omitempty"` + + // Type of Network {vlan, pub-vlan} + // Required: true + // Enum: [vlan pub-vlan] + Type *string `json:"type"` + + // VLAN ID + // Required: true + VlanID *float64 `json:"vlanID"` +} + +// Validate validates this network +func (m *Network) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateCidr(formats); err != nil { + res = append(res, err) + } + + if err := m.validateCloudConnections(formats); err != nil { + res = append(res, err) + } + + if err := m.validateDNSServers(formats); err != nil { + res = append(res, err) + } + + if err := m.validateIPAddressMetrics(formats); err != nil { + res = append(res, err) + } + + if err := m.validateIPAddressRanges(formats); err != nil { + res = append(res, err) + } + + if err := m.validateJumbo(formats); err != nil { + res = append(res, err) + } + + if err := m.validateName(formats); err != nil { + res = append(res, err) + } + + if err := m.validateNetworkID(formats); err != nil { + res = append(res, err) + } + + if err := m.validatePublicIPAddressRanges(formats); err != nil { + res = append(res, err) + } + + if err := m.validateType(formats); err != nil { + res = append(res, err) + } + + if err := m.validateVlanID(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Network) validateCidr(formats strfmt.Registry) error { + + if err := validate.Required("cidr", "body", m.Cidr); err != nil { + return err + } + + return nil +} + +func (m *Network) validateCloudConnections(formats strfmt.Registry) error { + + if swag.IsZero(m.CloudConnections) { // not required + return nil + } + + for i := 0; i < len(m.CloudConnections); i++ { + if swag.IsZero(m.CloudConnections[i]) { // not required + continue + } + + if m.CloudConnections[i] != nil { + if err := m.CloudConnections[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("cloudConnections" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *Network) validateDNSServers(formats strfmt.Registry) error { + + if err := validate.Required("dnsServers", "body", m.DNSServers); err != nil { + return err + } + + return nil +} + +func (m *Network) validateIPAddressMetrics(formats strfmt.Registry) error { + + if err := validate.Required("ipAddressMetrics", "body", m.IPAddressMetrics); err != nil { + return err + } + + if m.IPAddressMetrics != nil { + if err := m.IPAddressMetrics.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("ipAddressMetrics") + } + return err + } + } + + return nil +} + +func (m *Network) validateIPAddressRanges(formats strfmt.Registry) error { + + if err := validate.Required("ipAddressRanges", "body", m.IPAddressRanges); err != nil { + return err + } + + for i := 0; i < len(m.IPAddressRanges); i++ { + if swag.IsZero(m.IPAddressRanges[i]) { // not required + continue + } + + if m.IPAddressRanges[i] != nil { + if err := m.IPAddressRanges[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("ipAddressRanges" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *Network) validateJumbo(formats strfmt.Registry) error { + + if err := validate.Required("jumbo", "body", m.Jumbo); err != nil { + return err + } + + return nil +} + +func (m *Network) validateName(formats strfmt.Registry) error { + + if err := validate.Required("name", "body", m.Name); err != nil { + return err + } + + return nil +} + +func (m *Network) validateNetworkID(formats strfmt.Registry) error { + + if err := validate.Required("networkID", "body", m.NetworkID); err != nil { + return err + } + + return nil +} + +func (m *Network) validatePublicIPAddressRanges(formats strfmt.Registry) error { + + if swag.IsZero(m.PublicIPAddressRanges) { // not required + return nil + } + + for i := 0; i < len(m.PublicIPAddressRanges); i++ { + if swag.IsZero(m.PublicIPAddressRanges[i]) { // not required + continue + } + + if m.PublicIPAddressRanges[i] != nil { + if err := m.PublicIPAddressRanges[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("publicIPAddressRanges" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +var networkTypeTypePropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["vlan","pub-vlan"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + networkTypeTypePropEnum = append(networkTypeTypePropEnum, v) + } +} + +const ( + + // NetworkTypeVlan captures enum value "vlan" + NetworkTypeVlan string = "vlan" + + // NetworkTypePubVlan captures enum value "pub-vlan" + NetworkTypePubVlan string = "pub-vlan" +) + +// prop value enum +func (m *Network) validateTypeEnum(path, location string, value string) error { + if err := validate.Enum(path, location, value, networkTypeTypePropEnum); err != nil { + return err + } + return nil +} + +func (m *Network) validateType(formats strfmt.Registry) error { + + if err := validate.Required("type", "body", m.Type); err != nil { + return err + } + + // value enum + if err := m.validateTypeEnum("type", "body", *m.Type); err != nil { + return err + } + + return nil +} + +func (m *Network) validateVlanID(formats strfmt.Registry) error { + + if err := validate.Required("vlanID", "body", m.VlanID); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *Network) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Network) UnmarshalBinary(b []byte) error { + var res Network + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// NetworkCloudConnectionsItems0 network cloud connections items0 +// swagger:model NetworkCloudConnectionsItems0 +type NetworkCloudConnectionsItems0 struct { + + // the cloud connection id + CloudConnectionID string `json:"cloudConnectionID,omitempty"` + + // link to the cloud connection resource + Href string `json:"href,omitempty"` +} + +// Validate validates this network cloud connections items0 +func (m *NetworkCloudConnectionsItems0) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *NetworkCloudConnectionsItems0) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *NetworkCloudConnectionsItems0) UnmarshalBinary(b []byte) error { + var res NetworkCloudConnectionsItems0 + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// NetworkIPAddressMetrics IP Address Metrics +// swagger:model NetworkIPAddressMetrics +type NetworkIPAddressMetrics struct { + + // Number of available IP addresses + // Required: true + Available *float64 `json:"available"` + + // Total number of all IP addresses in all ipAddressRanges + // Required: true + Total *float64 `json:"total"` + + // Number of IP addresses currently in use + // Required: true + Used *float64 `json:"used"` + + // Utilization of IP addresses in percent form (used / total) [0 - 100] + // Required: true + Utilization *float64 `json:"utilization"` +} + +// Validate validates this network IP address metrics +func (m *NetworkIPAddressMetrics) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAvailable(formats); err != nil { + res = append(res, err) + } + + if err := m.validateTotal(formats); err != nil { + res = append(res, err) + } + + if err := m.validateUsed(formats); err != nil { + res = append(res, err) + } + + if err := m.validateUtilization(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *NetworkIPAddressMetrics) validateAvailable(formats strfmt.Registry) error { + + if err := validate.Required("ipAddressMetrics"+"."+"available", "body", m.Available); err != nil { + return err + } + + return nil +} + +func (m *NetworkIPAddressMetrics) validateTotal(formats strfmt.Registry) error { + + if err := validate.Required("ipAddressMetrics"+"."+"total", "body", m.Total); err != nil { + return err + } + + return nil +} + +func (m *NetworkIPAddressMetrics) validateUsed(formats strfmt.Registry) error { + + if err := validate.Required("ipAddressMetrics"+"."+"used", "body", m.Used); err != nil { + return err + } + + return nil +} + +func (m *NetworkIPAddressMetrics) validateUtilization(formats strfmt.Registry) error { + + if err := validate.Required("ipAddressMetrics"+"."+"utilization", "body", m.Utilization); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *NetworkIPAddressMetrics) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *NetworkIPAddressMetrics) UnmarshalBinary(b []byte) error { + var res NetworkIPAddressMetrics + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/network_create.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/network_create.go new file mode 100644 index 00000000000..bdd9072c6db --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/network_create.go @@ -0,0 +1,149 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + "strconv" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// NetworkCreate network create +// swagger:model NetworkCreate +type NetworkCreate struct { + + // Network in CIDR notation (192.168.0.0/24) + Cidr string `json:"cidr,omitempty"` + + // DNS Servers + DNSServers []string `json:"dnsServers"` + + // Gateway IP Address + Gateway string `json:"gateway,omitempty"` + + // IP Address Ranges + IPAddressRanges []*IPAddressRange `json:"ipAddressRanges"` + + // Enable MTU Jumbo Network + Jumbo bool `json:"jumbo,omitempty"` + + // Network Name + Name string `json:"name,omitempty"` + + // Type of Network - 'vlan' (private network) 'pub-vlan' (public network) + // Required: true + // Enum: [vlan pub-vlan] + Type *string `json:"type"` +} + +// Validate validates this network create +func (m *NetworkCreate) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateIPAddressRanges(formats); err != nil { + res = append(res, err) + } + + if err := m.validateType(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *NetworkCreate) validateIPAddressRanges(formats strfmt.Registry) error { + + if swag.IsZero(m.IPAddressRanges) { // not required + return nil + } + + for i := 0; i < len(m.IPAddressRanges); i++ { + if swag.IsZero(m.IPAddressRanges[i]) { // not required + continue + } + + if m.IPAddressRanges[i] != nil { + if err := m.IPAddressRanges[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("ipAddressRanges" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +var networkCreateTypeTypePropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["vlan","pub-vlan"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + networkCreateTypeTypePropEnum = append(networkCreateTypeTypePropEnum, v) + } +} + +const ( + + // NetworkCreateTypeVlan captures enum value "vlan" + NetworkCreateTypeVlan string = "vlan" + + // NetworkCreateTypePubVlan captures enum value "pub-vlan" + NetworkCreateTypePubVlan string = "pub-vlan" +) + +// prop value enum +func (m *NetworkCreate) validateTypeEnum(path, location string, value string) error { + if err := validate.Enum(path, location, value, networkCreateTypeTypePropEnum); err != nil { + return err + } + return nil +} + +func (m *NetworkCreate) validateType(formats strfmt.Registry) error { + + if err := validate.Required("type", "body", m.Type); err != nil { + return err + } + + // value enum + if err := m.validateTypeEnum("type", "body", *m.Type); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *NetworkCreate) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *NetworkCreate) UnmarshalBinary(b []byte) error { + var res NetworkCreate + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/network_port.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/network_port.go new file mode 100644 index 00000000000..226bb26aed4 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/network_port.go @@ -0,0 +1,197 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// NetworkPort network port +// swagger:model NetworkPort +type NetworkPort struct { + + // Description of the port (not unique or indexable) + // Required: true + Description *string `json:"description"` + + // The external ip address (for pub-vlan networks) + ExternalIP string `json:"externalIP,omitempty"` + + // Link to port resource + Href string `json:"href,omitempty"` + + // The ip address of this port + // Required: true + IPAddress *string `json:"ipAddress"` + + // The mac address of the network interface + // Required: true + MacAddress *string `json:"macAddress"` + + // The unique Port ID + // Required: true + PortID *string `json:"portID"` + + // pvm instance + PvmInstance *NetworkPortPvmInstance `json:"pvmInstance,omitempty"` + + // Te + // Required: true + Status *string `json:"status"` +} + +// Validate validates this network port +func (m *NetworkPort) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateDescription(formats); err != nil { + res = append(res, err) + } + + if err := m.validateIPAddress(formats); err != nil { + res = append(res, err) + } + + if err := m.validateMacAddress(formats); err != nil { + res = append(res, err) + } + + if err := m.validatePortID(formats); err != nil { + res = append(res, err) + } + + if err := m.validatePvmInstance(formats); err != nil { + res = append(res, err) + } + + if err := m.validateStatus(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *NetworkPort) validateDescription(formats strfmt.Registry) error { + + if err := validate.Required("description", "body", m.Description); err != nil { + return err + } + + return nil +} + +func (m *NetworkPort) validateIPAddress(formats strfmt.Registry) error { + + if err := validate.Required("ipAddress", "body", m.IPAddress); err != nil { + return err + } + + return nil +} + +func (m *NetworkPort) validateMacAddress(formats strfmt.Registry) error { + + if err := validate.Required("macAddress", "body", m.MacAddress); err != nil { + return err + } + + return nil +} + +func (m *NetworkPort) validatePortID(formats strfmt.Registry) error { + + if err := validate.Required("portID", "body", m.PortID); err != nil { + return err + } + + return nil +} + +func (m *NetworkPort) validatePvmInstance(formats strfmt.Registry) error { + + if swag.IsZero(m.PvmInstance) { // not required + return nil + } + + if m.PvmInstance != nil { + if err := m.PvmInstance.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("pvmInstance") + } + return err + } + } + + return nil +} + +func (m *NetworkPort) validateStatus(formats strfmt.Registry) error { + + if err := validate.Required("status", "body", m.Status); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *NetworkPort) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *NetworkPort) UnmarshalBinary(b []byte) error { + var res NetworkPort + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// NetworkPortPvmInstance The attached pvm-instance to this port +// swagger:model NetworkPortPvmInstance +type NetworkPortPvmInstance struct { + + // Link to pvm-instance resource + Href string `json:"href,omitempty"` + + // The attahed pvm-instance ID + PvmInstanceID string `json:"pvmInstanceID,omitempty"` +} + +// Validate validates this network port pvm instance +func (m *NetworkPortPvmInstance) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *NetworkPortPvmInstance) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *NetworkPortPvmInstance) UnmarshalBinary(b []byte) error { + var res NetworkPortPvmInstance + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/network_port_create.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/network_port_create.go new file mode 100644 index 00000000000..c477d5856d3 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/network_port_create.go @@ -0,0 +1,46 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/swag" +) + +// NetworkPortCreate network port create +// swagger:model NetworkPortCreate +type NetworkPortCreate struct { + + // Description of the port (not unique or indexable) + Description string `json:"description,omitempty"` + + // The requested ip address of this port + IPAddress string `json:"ipAddress,omitempty"` +} + +// Validate validates this network port create +func (m *NetworkPortCreate) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *NetworkPortCreate) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *NetworkPortCreate) UnmarshalBinary(b []byte) error { + var res NetworkPortCreate + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/network_port_update.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/network_port_update.go new file mode 100644 index 00000000000..63f0d2e779e --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/network_port_update.go @@ -0,0 +1,46 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/swag" +) + +// NetworkPortUpdate network port update +// swagger:model NetworkPortUpdate +type NetworkPortUpdate struct { + + // Description of the port (not unique or indexable) + Description *string `json:"description,omitempty"` + + // If supplied populated it attaches to the PVMInstanceID, if empty detaches from PVMInstanceID + PvmInstanceID *string `json:"pvmInstanceID,omitempty"` +} + +// Validate validates this network port update +func (m *NetworkPortUpdate) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *NetworkPortUpdate) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *NetworkPortUpdate) UnmarshalBinary(b []byte) error { + var res NetworkPortUpdate + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/network_ports.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/network_ports.go new file mode 100644 index 00000000000..5fe68913d07 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/network_ports.go @@ -0,0 +1,82 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "strconv" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// NetworkPorts network ports +// swagger:model NetworkPorts +type NetworkPorts struct { + + // Network Ports + // Required: true + Ports []*NetworkPort `json:"ports"` +} + +// Validate validates this network ports +func (m *NetworkPorts) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validatePorts(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *NetworkPorts) validatePorts(formats strfmt.Registry) error { + + if err := validate.Required("ports", "body", m.Ports); err != nil { + return err + } + + for i := 0; i < len(m.Ports); i++ { + if swag.IsZero(m.Ports[i]) { // not required + continue + } + + if m.Ports[i] != nil { + if err := m.Ports[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("ports" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *NetworkPorts) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *NetworkPorts) UnmarshalBinary(b []byte) error { + var res NetworkPorts + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/network_reference.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/network_reference.go new file mode 100644 index 00000000000..8ece9b00ebc --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/network_reference.go @@ -0,0 +1,186 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// NetworkReference network reference +// swagger:model NetworkReference +type NetworkReference struct { + + // Link to Network resource + // Required: true + Href *string `json:"href"` + + // MTU Jumbo Network enabled + // Required: true + Jumbo *bool `json:"jumbo"` + + // Network Name + // Required: true + Name *string `json:"name"` + + // Unique Network ID + // Required: true + NetworkID *string `json:"networkID"` + + // Type of Network {vlan, pub-vlan} + // Required: true + // Enum: [vlan pub-vlan] + Type *string `json:"type"` + + // VLAN ID + // Required: true + VlanID *float64 `json:"vlanID"` +} + +// Validate validates this network reference +func (m *NetworkReference) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateHref(formats); err != nil { + res = append(res, err) + } + + if err := m.validateJumbo(formats); err != nil { + res = append(res, err) + } + + if err := m.validateName(formats); err != nil { + res = append(res, err) + } + + if err := m.validateNetworkID(formats); err != nil { + res = append(res, err) + } + + if err := m.validateType(formats); err != nil { + res = append(res, err) + } + + if err := m.validateVlanID(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *NetworkReference) validateHref(formats strfmt.Registry) error { + + if err := validate.Required("href", "body", m.Href); err != nil { + return err + } + + return nil +} + +func (m *NetworkReference) validateJumbo(formats strfmt.Registry) error { + + if err := validate.Required("jumbo", "body", m.Jumbo); err != nil { + return err + } + + return nil +} + +func (m *NetworkReference) validateName(formats strfmt.Registry) error { + + if err := validate.Required("name", "body", m.Name); err != nil { + return err + } + + return nil +} + +func (m *NetworkReference) validateNetworkID(formats strfmt.Registry) error { + + if err := validate.Required("networkID", "body", m.NetworkID); err != nil { + return err + } + + return nil +} + +var networkReferenceTypeTypePropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["vlan","pub-vlan"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + networkReferenceTypeTypePropEnum = append(networkReferenceTypeTypePropEnum, v) + } +} + +const ( + + // NetworkReferenceTypeVlan captures enum value "vlan" + NetworkReferenceTypeVlan string = "vlan" + + // NetworkReferenceTypePubVlan captures enum value "pub-vlan" + NetworkReferenceTypePubVlan string = "pub-vlan" +) + +// prop value enum +func (m *NetworkReference) validateTypeEnum(path, location string, value string) error { + if err := validate.Enum(path, location, value, networkReferenceTypeTypePropEnum); err != nil { + return err + } + return nil +} + +func (m *NetworkReference) validateType(formats strfmt.Registry) error { + + if err := validate.Required("type", "body", m.Type); err != nil { + return err + } + + // value enum + if err := m.validateTypeEnum("type", "body", *m.Type); err != nil { + return err + } + + return nil +} + +func (m *NetworkReference) validateVlanID(formats strfmt.Registry) error { + + if err := validate.Required("vlanID", "body", m.VlanID); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *NetworkReference) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *NetworkReference) UnmarshalBinary(b []byte) error { + var res NetworkReference + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/network_update.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/network_update.go new file mode 100644 index 00000000000..786eff03fdd --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/network_update.go @@ -0,0 +1,89 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "strconv" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" +) + +// NetworkUpdate network update +// swagger:model NetworkUpdate +type NetworkUpdate struct { + + // Replaces the current DNS Servers + DNSServers []string `json:"dnsServers"` + + // Replaces the current Gateway IP Address + Gateway *string `json:"gateway,omitempty"` + + // Replaces the current IP Address Ranges + IPAddressRanges []*IPAddressRange `json:"ipAddressRanges"` + + // Replaces the current Network Name + Name *string `json:"name,omitempty"` +} + +// Validate validates this network update +func (m *NetworkUpdate) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateIPAddressRanges(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *NetworkUpdate) validateIPAddressRanges(formats strfmt.Registry) error { + + if swag.IsZero(m.IPAddressRanges) { // not required + return nil + } + + for i := 0; i < len(m.IPAddressRanges); i++ { + if swag.IsZero(m.IPAddressRanges[i]) { // not required + continue + } + + if m.IPAddressRanges[i] != nil { + if err := m.IPAddressRanges[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("ipAddressRanges" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *NetworkUpdate) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *NetworkUpdate) UnmarshalBinary(b []byte) error { + var res NetworkUpdate + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/networks.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/networks.go new file mode 100644 index 00000000000..416a86b59f2 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/networks.go @@ -0,0 +1,82 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "strconv" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// Networks networks +// swagger:model Networks +type Networks struct { + + // Network References + // Required: true + Networks []*NetworkReference `json:"networks"` +} + +// Validate validates this networks +func (m *Networks) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateNetworks(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Networks) validateNetworks(formats strfmt.Registry) error { + + if err := validate.Required("networks", "body", m.Networks); err != nil { + return err + } + + for i := 0; i < len(m.Networks); i++ { + if swag.IsZero(m.Networks[i]) { // not required + continue + } + + if m.Networks[i] != nil { + if err := m.Networks[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("networks" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *Networks) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Networks) UnmarshalBinary(b []byte) error { + var res Networks + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/object.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/object.go new file mode 100644 index 00000000000..313a24e977b --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/object.go @@ -0,0 +1,10 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Object object +// swagger:model Object +type Object interface{} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/open_stack.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/open_stack.go new file mode 100644 index 00000000000..5c5ba92b2e2 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/open_stack.go @@ -0,0 +1,132 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// OpenStack open stack +// swagger:model OpenStack +type OpenStack struct { + + // Unique identifier for the OpenStack instance + // Required: true + ID *string `json:"id"` + + // Internal IP address of the OpenStack instance + // Required: true + IPAddress *string `json:"ipAddress"` + + // Shortname of the OpenStack instance + // Required: true + Name *string `json:"name"` + + // Next available VLAN ID to be used for a network creation + // Required: true + NextVLANID *float64 `json:"nextVLANID"` + + // The region where the open stack lives + // Required: true + Region *string `json:"region"` +} + +// Validate validates this open stack +func (m *OpenStack) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateID(formats); err != nil { + res = append(res, err) + } + + if err := m.validateIPAddress(formats); err != nil { + res = append(res, err) + } + + if err := m.validateName(formats); err != nil { + res = append(res, err) + } + + if err := m.validateNextVLANID(formats); err != nil { + res = append(res, err) + } + + if err := m.validateRegion(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *OpenStack) validateID(formats strfmt.Registry) error { + + if err := validate.Required("id", "body", m.ID); err != nil { + return err + } + + return nil +} + +func (m *OpenStack) validateIPAddress(formats strfmt.Registry) error { + + if err := validate.Required("ipAddress", "body", m.IPAddress); err != nil { + return err + } + + return nil +} + +func (m *OpenStack) validateName(formats strfmt.Registry) error { + + if err := validate.Required("name", "body", m.Name); err != nil { + return err + } + + return nil +} + +func (m *OpenStack) validateNextVLANID(formats strfmt.Registry) error { + + if err := validate.Required("nextVLANID", "body", m.NextVLANID); err != nil { + return err + } + + return nil +} + +func (m *OpenStack) validateRegion(formats strfmt.Registry) error { + + if err := validate.Required("region", "body", m.Region); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *OpenStack) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *OpenStack) UnmarshalBinary(b []byte) error { + var res OpenStack + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/open_stack_create.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/open_stack_create.go new file mode 100644 index 00000000000..f86cb6cc8fd --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/open_stack_create.go @@ -0,0 +1,98 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// OpenStackCreate open stack create +// swagger:model OpenStackCreate +type OpenStackCreate struct { + + // Internal IP address of the OpenStack instance + // Required: true + IPAddress *string `json:"ipAddress"` + + // Shortname of the OpenStack instance + // Required: true + Name *string `json:"name"` + + // The region where the open stack lives + // Required: true + Region *string `json:"region"` +} + +// Validate validates this open stack create +func (m *OpenStackCreate) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateIPAddress(formats); err != nil { + res = append(res, err) + } + + if err := m.validateName(formats); err != nil { + res = append(res, err) + } + + if err := m.validateRegion(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *OpenStackCreate) validateIPAddress(formats strfmt.Registry) error { + + if err := validate.Required("ipAddress", "body", m.IPAddress); err != nil { + return err + } + + return nil +} + +func (m *OpenStackCreate) validateName(formats strfmt.Registry) error { + + if err := validate.Required("name", "body", m.Name); err != nil { + return err + } + + return nil +} + +func (m *OpenStackCreate) validateRegion(formats strfmt.Registry) error { + + if err := validate.Required("region", "body", m.Region); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *OpenStackCreate) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *OpenStackCreate) UnmarshalBinary(b []byte) error { + var res OpenStackCreate + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/open_stack_info.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/open_stack_info.go new file mode 100644 index 00000000000..3596793128d --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/open_stack_info.go @@ -0,0 +1,99 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "strconv" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// OpenStackInfo open stack info +// swagger:model OpenStackInfo +type OpenStackInfo struct { + + // Hosts on OpenStack + // Required: true + Hosts []*HostInfo `json:"hosts"` + + // Requested region + // Required: true + Region *string `json:"region"` +} + +// Validate validates this open stack info +func (m *OpenStackInfo) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateHosts(formats); err != nil { + res = append(res, err) + } + + if err := m.validateRegion(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *OpenStackInfo) validateHosts(formats strfmt.Registry) error { + + if err := validate.Required("hosts", "body", m.Hosts); err != nil { + return err + } + + for i := 0; i < len(m.Hosts); i++ { + if swag.IsZero(m.Hosts[i]) { // not required + continue + } + + if m.Hosts[i] != nil { + if err := m.Hosts[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("hosts" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *OpenStackInfo) validateRegion(formats strfmt.Registry) error { + + if err := validate.Required("region", "body", m.Region); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *OpenStackInfo) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *OpenStackInfo) UnmarshalBinary(b []byte) error { + var res OpenStackInfo + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/open_stacks.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/open_stacks.go new file mode 100644 index 00000000000..701a0fce85c --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/open_stacks.go @@ -0,0 +1,82 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "strconv" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// OpenStacks open stacks +// swagger:model OpenStacks +type OpenStacks struct { + + // OpenStacks managed by Power IAAS + // Required: true + OpenStacks []*OpenStack `json:"openStacks"` +} + +// Validate validates this open stacks +func (m *OpenStacks) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateOpenStacks(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *OpenStacks) validateOpenStacks(formats strfmt.Registry) error { + + if err := validate.Required("openStacks", "body", m.OpenStacks); err != nil { + return err + } + + for i := 0; i < len(m.OpenStacks); i++ { + if swag.IsZero(m.OpenStacks[i]) { // not required + continue + } + + if m.OpenStacks[i] != nil { + if err := m.OpenStacks[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("openStacks" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *OpenStacks) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *OpenStacks) UnmarshalBinary(b []byte) error { + var res OpenStacks + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/operations.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/operations.go new file mode 100644 index 00000000000..a64afc1e3e0 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/operations.go @@ -0,0 +1,226 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// Operations operations +// swagger:model Operations +type Operations struct { + + // Name of the server boot mode a(Boot from disk using copy A), b(Boot from disk using copy B), c(Reserved for IBM lab use only), d(Boot from media/drives) + // Enum: [a b c d] + BootMode string `json:"bootMode,omitempty"` + + // Name of the server operating mode + // Enum: [normal manual] + OperatingMode string `json:"operatingMode,omitempty"` + + // Name of the job task to execute + // Enum: [dston retrydump consoleservice iopreset remotedstoff remotedston iopdump dumprestart] + Task string `json:"task,omitempty"` +} + +// Validate validates this operations +func (m *Operations) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateBootMode(formats); err != nil { + res = append(res, err) + } + + if err := m.validateOperatingMode(formats); err != nil { + res = append(res, err) + } + + if err := m.validateTask(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +var operationsTypeBootModePropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["a","b","c","d"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + operationsTypeBootModePropEnum = append(operationsTypeBootModePropEnum, v) + } +} + +const ( + + // OperationsBootModeA captures enum value "a" + OperationsBootModeA string = "a" + + // OperationsBootModeB captures enum value "b" + OperationsBootModeB string = "b" + + // OperationsBootModeC captures enum value "c" + OperationsBootModeC string = "c" + + // OperationsBootModeD captures enum value "d" + OperationsBootModeD string = "d" +) + +// prop value enum +func (m *Operations) validateBootModeEnum(path, location string, value string) error { + if err := validate.Enum(path, location, value, operationsTypeBootModePropEnum); err != nil { + return err + } + return nil +} + +func (m *Operations) validateBootMode(formats strfmt.Registry) error { + + if swag.IsZero(m.BootMode) { // not required + return nil + } + + // value enum + if err := m.validateBootModeEnum("bootMode", "body", m.BootMode); err != nil { + return err + } + + return nil +} + +var operationsTypeOperatingModePropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["normal","manual"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + operationsTypeOperatingModePropEnum = append(operationsTypeOperatingModePropEnum, v) + } +} + +const ( + + // OperationsOperatingModeNormal captures enum value "normal" + OperationsOperatingModeNormal string = "normal" + + // OperationsOperatingModeManual captures enum value "manual" + OperationsOperatingModeManual string = "manual" +) + +// prop value enum +func (m *Operations) validateOperatingModeEnum(path, location string, value string) error { + if err := validate.Enum(path, location, value, operationsTypeOperatingModePropEnum); err != nil { + return err + } + return nil +} + +func (m *Operations) validateOperatingMode(formats strfmt.Registry) error { + + if swag.IsZero(m.OperatingMode) { // not required + return nil + } + + // value enum + if err := m.validateOperatingModeEnum("operatingMode", "body", m.OperatingMode); err != nil { + return err + } + + return nil +} + +var operationsTypeTaskPropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["dston","retrydump","consoleservice","iopreset","remotedstoff","remotedston","iopdump","dumprestart"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + operationsTypeTaskPropEnum = append(operationsTypeTaskPropEnum, v) + } +} + +const ( + + // OperationsTaskDston captures enum value "dston" + OperationsTaskDston string = "dston" + + // OperationsTaskRetrydump captures enum value "retrydump" + OperationsTaskRetrydump string = "retrydump" + + // OperationsTaskConsoleservice captures enum value "consoleservice" + OperationsTaskConsoleservice string = "consoleservice" + + // OperationsTaskIopreset captures enum value "iopreset" + OperationsTaskIopreset string = "iopreset" + + // OperationsTaskRemotedstoff captures enum value "remotedstoff" + OperationsTaskRemotedstoff string = "remotedstoff" + + // OperationsTaskRemotedston captures enum value "remotedston" + OperationsTaskRemotedston string = "remotedston" + + // OperationsTaskIopdump captures enum value "iopdump" + OperationsTaskIopdump string = "iopdump" + + // OperationsTaskDumprestart captures enum value "dumprestart" + OperationsTaskDumprestart string = "dumprestart" +) + +// prop value enum +func (m *Operations) validateTaskEnum(path, location string, value string) error { + if err := validate.Enum(path, location, value, operationsTypeTaskPropEnum); err != nil { + return err + } + return nil +} + +func (m *Operations) validateTask(formats strfmt.Registry) error { + + if swag.IsZero(m.Task) { // not required + return nil + } + + // value enum + if err := m.validateTaskEnum("task", "body", m.Task); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *Operations) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Operations) UnmarshalBinary(b []byte) error { + var res Operations + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/owner_info.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/owner_info.go new file mode 100644 index 00000000000..021980fa029 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/owner_info.go @@ -0,0 +1,183 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// OwnerInfo owner info +// swagger:model OwnerInfo +type OwnerInfo struct { + + // Country code of user + // Required: true + CountryCode *string `json:"countryCode"` + + // Currency code of user + // Required: true + CurrencyCode *string `json:"currencyCode"` + + // Email address of user + // Required: true + Email *string `json:"email"` + + // IAM id of user + // Required: true + IamID *string `json:"iamID"` + + // Indicates if user is an IBMer + // Required: true + IsIBMer *bool `json:"isIBMer"` + + // Name of user + // Required: true + Name *string `json:"name"` + + // Array of Soft Layer IDs + // Required: true + SoftlayerIds []string `json:"softlayerIDs"` + + // User id of user + // Required: true + UserID *string `json:"userID"` +} + +// Validate validates this owner info +func (m *OwnerInfo) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateCountryCode(formats); err != nil { + res = append(res, err) + } + + if err := m.validateCurrencyCode(formats); err != nil { + res = append(res, err) + } + + if err := m.validateEmail(formats); err != nil { + res = append(res, err) + } + + if err := m.validateIamID(formats); err != nil { + res = append(res, err) + } + + if err := m.validateIsIBMer(formats); err != nil { + res = append(res, err) + } + + if err := m.validateName(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSoftlayerIds(formats); err != nil { + res = append(res, err) + } + + if err := m.validateUserID(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *OwnerInfo) validateCountryCode(formats strfmt.Registry) error { + + if err := validate.Required("countryCode", "body", m.CountryCode); err != nil { + return err + } + + return nil +} + +func (m *OwnerInfo) validateCurrencyCode(formats strfmt.Registry) error { + + if err := validate.Required("currencyCode", "body", m.CurrencyCode); err != nil { + return err + } + + return nil +} + +func (m *OwnerInfo) validateEmail(formats strfmt.Registry) error { + + if err := validate.Required("email", "body", m.Email); err != nil { + return err + } + + return nil +} + +func (m *OwnerInfo) validateIamID(formats strfmt.Registry) error { + + if err := validate.Required("iamID", "body", m.IamID); err != nil { + return err + } + + return nil +} + +func (m *OwnerInfo) validateIsIBMer(formats strfmt.Registry) error { + + if err := validate.Required("isIBMer", "body", m.IsIBMer); err != nil { + return err + } + + return nil +} + +func (m *OwnerInfo) validateName(formats strfmt.Registry) error { + + if err := validate.Required("name", "body", m.Name); err != nil { + return err + } + + return nil +} + +func (m *OwnerInfo) validateSoftlayerIds(formats strfmt.Registry) error { + + if err := validate.Required("softlayerIDs", "body", m.SoftlayerIds); err != nil { + return err + } + + return nil +} + +func (m *OwnerInfo) validateUserID(formats strfmt.Registry) error { + + if err := validate.Required("userID", "body", m.UserID); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *OwnerInfo) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *OwnerInfo) UnmarshalBinary(b []byte) error { + var res OwnerInfo + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/p_vm_instance.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/p_vm_instance.go new file mode 100644 index 00000000000..ac2720e5149 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/p_vm_instance.go @@ -0,0 +1,586 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + "strconv" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// PVMInstance p VM instance +// swagger:model PVMInstance +type PVMInstance struct { + + // (deprecated - replaced by networks) The list of addresses and their network information + Addresses []*PVMInstanceNetwork `json:"addresses"` + + // Date/Time of PVM creation + // Format: date-time + CreationDate strfmt.DateTime `json:"creationDate,omitempty"` + + // Size of allocated disk (in GB) + // Required: true + DiskSize *float64 `json:"diskSize"` + + // fault + Fault *PVMInstanceFault `json:"fault,omitempty"` + + // health + Health *PVMInstanceHealth `json:"health,omitempty"` + + // The ImageID used by the server + // Required: true + ImageID *string `json:"imageID"` + + // Maximum amount of memory that can be allocated (in GB, for resize) + Maxmem float64 `json:"maxmem,omitempty"` + + // Maximum number of processors that can be allocated (for resize) + Maxproc float64 `json:"maxproc,omitempty"` + + // Amount of memory allocated (in GB) + // Required: true + Memory *float64 `json:"memory"` + + // whether the instance can be migrated + Migratable *bool `json:"migratable,omitempty"` + + // Minimum amount of memory that can be allocated (in GB, for resize) + Minmem float64 `json:"minmem,omitempty"` + + // Minimum number of processors that can be allocated (for resize) + Minproc float64 `json:"minproc,omitempty"` + + // (deprecated - replaced by networks) List of Network IDs + // Required: true + NetworkIds []string `json:"networkIDs"` + + // The pvm instance networks information + Networks []*PVMInstanceNetwork `json:"networks"` + + // OS system information (usually version and build) + OperatingSystem string `json:"operatingSystem,omitempty"` + + // Type of the OS [aix, ibmi, redhat, sles] + // Required: true + OsType *string `json:"osType"` + + // VM pinning policy to use [none, soft, hard] + PinPolicy string `json:"pinPolicy,omitempty"` + + // Processor type (dedicated, shared, capped) + // Required: true + // Enum: [dedicated shared capped] + ProcType *string `json:"procType"` + + // Number of processors allocated + // Required: true + Processors *float64 `json:"processors"` + + // The progress of an operation + Progress float64 `json:"progress,omitempty"` + + // PCloud PVM Instance ID + // Required: true + PvmInstanceID *string `json:"pvmInstanceID"` + + // If this is an SAP pvm-instance the profile reference will link to the SAP profile + SapProfile *SAPProfileReference `json:"sapProfile,omitempty"` + + // Name of the server + // Required: true + ServerName *string `json:"serverName"` + + // The pvm instance Software Licenses + SoftwareLicenses *SoftwareLicenses `json:"softwareLicenses,omitempty"` + + // The pvm instance SRC lists + Srcs [][]*SRC `json:"srcs"` + + // The status of the instance + // Required: true + Status *string `json:"status"` + + // Storage type where server is deployed + // Required: true + StorageType *string `json:"storageType"` + + // System type used to host the instance + SysType string `json:"sysType,omitempty"` + + // Date/Time of PVM last update + // Format: date-time + UpdatedDate strfmt.DateTime `json:"updatedDate,omitempty"` + + // The pvm instance virtual CPU information + VirtualCores *VirtualCores `json:"virtualCores,omitempty"` + + // List of volume IDs + // Required: true + VolumeIds []string `json:"volumeIDs"` +} + +// Validate validates this p VM instance +func (m *PVMInstance) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAddresses(formats); err != nil { + res = append(res, err) + } + + if err := m.validateCreationDate(formats); err != nil { + res = append(res, err) + } + + if err := m.validateDiskSize(formats); err != nil { + res = append(res, err) + } + + if err := m.validateFault(formats); err != nil { + res = append(res, err) + } + + if err := m.validateHealth(formats); err != nil { + res = append(res, err) + } + + if err := m.validateImageID(formats); err != nil { + res = append(res, err) + } + + if err := m.validateMemory(formats); err != nil { + res = append(res, err) + } + + if err := m.validateNetworkIds(formats); err != nil { + res = append(res, err) + } + + if err := m.validateNetworks(formats); err != nil { + res = append(res, err) + } + + if err := m.validateOsType(formats); err != nil { + res = append(res, err) + } + + if err := m.validateProcType(formats); err != nil { + res = append(res, err) + } + + if err := m.validateProcessors(formats); err != nil { + res = append(res, err) + } + + if err := m.validatePvmInstanceID(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSapProfile(formats); err != nil { + res = append(res, err) + } + + if err := m.validateServerName(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSoftwareLicenses(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSrcs(formats); err != nil { + res = append(res, err) + } + + if err := m.validateStatus(formats); err != nil { + res = append(res, err) + } + + if err := m.validateStorageType(formats); err != nil { + res = append(res, err) + } + + if err := m.validateUpdatedDate(formats); err != nil { + res = append(res, err) + } + + if err := m.validateVirtualCores(formats); err != nil { + res = append(res, err) + } + + if err := m.validateVolumeIds(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *PVMInstance) validateAddresses(formats strfmt.Registry) error { + + if swag.IsZero(m.Addresses) { // not required + return nil + } + + for i := 0; i < len(m.Addresses); i++ { + if swag.IsZero(m.Addresses[i]) { // not required + continue + } + + if m.Addresses[i] != nil { + if err := m.Addresses[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("addresses" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *PVMInstance) validateCreationDate(formats strfmt.Registry) error { + + if swag.IsZero(m.CreationDate) { // not required + return nil + } + + if err := validate.FormatOf("creationDate", "body", "date-time", m.CreationDate.String(), formats); err != nil { + return err + } + + return nil +} + +func (m *PVMInstance) validateDiskSize(formats strfmt.Registry) error { + + if err := validate.Required("diskSize", "body", m.DiskSize); err != nil { + return err + } + + return nil +} + +func (m *PVMInstance) validateFault(formats strfmt.Registry) error { + + if swag.IsZero(m.Fault) { // not required + return nil + } + + if m.Fault != nil { + if err := m.Fault.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("fault") + } + return err + } + } + + return nil +} + +func (m *PVMInstance) validateHealth(formats strfmt.Registry) error { + + if swag.IsZero(m.Health) { // not required + return nil + } + + if m.Health != nil { + if err := m.Health.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("health") + } + return err + } + } + + return nil +} + +func (m *PVMInstance) validateImageID(formats strfmt.Registry) error { + + if err := validate.Required("imageID", "body", m.ImageID); err != nil { + return err + } + + return nil +} + +func (m *PVMInstance) validateMemory(formats strfmt.Registry) error { + + if err := validate.Required("memory", "body", m.Memory); err != nil { + return err + } + + return nil +} + +func (m *PVMInstance) validateNetworkIds(formats strfmt.Registry) error { + + if err := validate.Required("networkIDs", "body", m.NetworkIds); err != nil { + return err + } + + return nil +} + +func (m *PVMInstance) validateNetworks(formats strfmt.Registry) error { + + if swag.IsZero(m.Networks) { // not required + return nil + } + + for i := 0; i < len(m.Networks); i++ { + if swag.IsZero(m.Networks[i]) { // not required + continue + } + + if m.Networks[i] != nil { + if err := m.Networks[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("networks" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *PVMInstance) validateOsType(formats strfmt.Registry) error { + + if err := validate.Required("osType", "body", m.OsType); err != nil { + return err + } + + return nil +} + +var pVmInstanceTypeProcTypePropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["dedicated","shared","capped"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + pVmInstanceTypeProcTypePropEnum = append(pVmInstanceTypeProcTypePropEnum, v) + } +} + +const ( + + // PVMInstanceProcTypeDedicated captures enum value "dedicated" + PVMInstanceProcTypeDedicated string = "dedicated" + + // PVMInstanceProcTypeShared captures enum value "shared" + PVMInstanceProcTypeShared string = "shared" + + // PVMInstanceProcTypeCapped captures enum value "capped" + PVMInstanceProcTypeCapped string = "capped" +) + +// prop value enum +func (m *PVMInstance) validateProcTypeEnum(path, location string, value string) error { + if err := validate.Enum(path, location, value, pVmInstanceTypeProcTypePropEnum); err != nil { + return err + } + return nil +} + +func (m *PVMInstance) validateProcType(formats strfmt.Registry) error { + + if err := validate.Required("procType", "body", m.ProcType); err != nil { + return err + } + + // value enum + if err := m.validateProcTypeEnum("procType", "body", *m.ProcType); err != nil { + return err + } + + return nil +} + +func (m *PVMInstance) validateProcessors(formats strfmt.Registry) error { + + if err := validate.Required("processors", "body", m.Processors); err != nil { + return err + } + + return nil +} + +func (m *PVMInstance) validatePvmInstanceID(formats strfmt.Registry) error { + + if err := validate.Required("pvmInstanceID", "body", m.PvmInstanceID); err != nil { + return err + } + + return nil +} + +func (m *PVMInstance) validateSapProfile(formats strfmt.Registry) error { + + if swag.IsZero(m.SapProfile) { // not required + return nil + } + + if m.SapProfile != nil { + if err := m.SapProfile.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("sapProfile") + } + return err + } + } + + return nil +} + +func (m *PVMInstance) validateServerName(formats strfmt.Registry) error { + + if err := validate.Required("serverName", "body", m.ServerName); err != nil { + return err + } + + return nil +} + +func (m *PVMInstance) validateSoftwareLicenses(formats strfmt.Registry) error { + + if swag.IsZero(m.SoftwareLicenses) { // not required + return nil + } + + if m.SoftwareLicenses != nil { + if err := m.SoftwareLicenses.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("softwareLicenses") + } + return err + } + } + + return nil +} + +func (m *PVMInstance) validateSrcs(formats strfmt.Registry) error { + + if swag.IsZero(m.Srcs) { // not required + return nil + } + + for i := 0; i < len(m.Srcs); i++ { + + for ii := 0; ii < len(m.Srcs[i]); ii++ { + if swag.IsZero(m.Srcs[i][ii]) { // not required + continue + } + + if m.Srcs[i][ii] != nil { + if err := m.Srcs[i][ii].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("srcs" + "." + strconv.Itoa(i) + "." + strconv.Itoa(ii)) + } + return err + } + } + + } + + } + + return nil +} + +func (m *PVMInstance) validateStatus(formats strfmt.Registry) error { + + if err := validate.Required("status", "body", m.Status); err != nil { + return err + } + + return nil +} + +func (m *PVMInstance) validateStorageType(formats strfmt.Registry) error { + + if err := validate.Required("storageType", "body", m.StorageType); err != nil { + return err + } + + return nil +} + +func (m *PVMInstance) validateUpdatedDate(formats strfmt.Registry) error { + + if swag.IsZero(m.UpdatedDate) { // not required + return nil + } + + if err := validate.FormatOf("updatedDate", "body", "date-time", m.UpdatedDate.String(), formats); err != nil { + return err + } + + return nil +} + +func (m *PVMInstance) validateVirtualCores(formats strfmt.Registry) error { + + if swag.IsZero(m.VirtualCores) { // not required + return nil + } + + if m.VirtualCores != nil { + if err := m.VirtualCores.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("virtualCores") + } + return err + } + } + + return nil +} + +func (m *PVMInstance) validateVolumeIds(formats strfmt.Registry) error { + + if err := validate.Required("volumeIDs", "body", m.VolumeIds); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *PVMInstance) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *PVMInstance) UnmarshalBinary(b []byte) error { + var res PVMInstance + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/p_vm_instance_action.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/p_vm_instance_action.go new file mode 100644 index 00000000000..6a6cc668028 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/p_vm_instance_action.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// PVMInstanceAction p VM instance action +// swagger:model PVMInstanceAction +type PVMInstanceAction struct { + + // Name of the action to take; can be start, stop, hard-reboot, soft-reboot, immediate-shutdown, reset-state + // Required: true + // Enum: [start stop immediate-shutdown hard-reboot soft-reboot reset-state] + Action *string `json:"action"` +} + +// Validate validates this p VM instance action +func (m *PVMInstanceAction) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAction(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +var pVmInstanceActionTypeActionPropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["start","stop","immediate-shutdown","hard-reboot","soft-reboot","reset-state"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + pVmInstanceActionTypeActionPropEnum = append(pVmInstanceActionTypeActionPropEnum, v) + } +} + +const ( + + // PVMInstanceActionActionStart captures enum value "start" + PVMInstanceActionActionStart string = "start" + + // PVMInstanceActionActionStop captures enum value "stop" + PVMInstanceActionActionStop string = "stop" + + // PVMInstanceActionActionImmediateShutdown captures enum value "immediate-shutdown" + PVMInstanceActionActionImmediateShutdown string = "immediate-shutdown" + + // PVMInstanceActionActionHardReboot captures enum value "hard-reboot" + PVMInstanceActionActionHardReboot string = "hard-reboot" + + // PVMInstanceActionActionSoftReboot captures enum value "soft-reboot" + PVMInstanceActionActionSoftReboot string = "soft-reboot" + + // PVMInstanceActionActionResetState captures enum value "reset-state" + PVMInstanceActionActionResetState string = "reset-state" +) + +// prop value enum +func (m *PVMInstanceAction) validateActionEnum(path, location string, value string) error { + if err := validate.Enum(path, location, value, pVmInstanceActionTypeActionPropEnum); err != nil { + return err + } + return nil +} + +func (m *PVMInstanceAction) validateAction(formats strfmt.Registry) error { + + if err := validate.Required("action", "body", m.Action); err != nil { + return err + } + + // value enum + if err := m.validateActionEnum("action", "body", *m.Action); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *PVMInstanceAction) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *PVMInstanceAction) UnmarshalBinary(b []byte) error { + var res PVMInstanceAction + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/p_vm_instance_add_network.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/p_vm_instance_add_network.go new file mode 100644 index 00000000000..8d0bc58ea67 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/p_vm_instance_add_network.go @@ -0,0 +1,67 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// PVMInstanceAddNetwork p VM instance add network +// swagger:model PVMInstanceAddNetwork +type PVMInstanceAddNetwork struct { + + // The requested ip address of this network interface + IPAddress string `json:"ipAddress,omitempty"` + + // ID of the network + // Required: true + NetworkID *string `json:"networkID"` +} + +// Validate validates this p VM instance add network +func (m *PVMInstanceAddNetwork) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateNetworkID(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *PVMInstanceAddNetwork) validateNetworkID(formats strfmt.Registry) error { + + if err := validate.Required("networkID", "body", m.NetworkID); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *PVMInstanceAddNetwork) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *PVMInstanceAddNetwork) UnmarshalBinary(b []byte) error { + var res PVMInstanceAddNetwork + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/p_vm_instance_address.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/p_vm_instance_address.go new file mode 100644 index 00000000000..3cff8dad50f --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/p_vm_instance_address.go @@ -0,0 +1,66 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/swag" +) + +// PVMInstanceAddress deprecated - replaced by PVMInstanceNetwork +// swagger:model PVMInstanceAddress +type PVMInstanceAddress struct { + PVMInstanceNetwork +} + +// UnmarshalJSON unmarshals this object from a JSON structure +func (m *PVMInstanceAddress) UnmarshalJSON(raw []byte) error { + // AO0 + var aO0 PVMInstanceNetwork + if err := swag.ReadJSON(raw, &aO0); err != nil { + return err + } + m.PVMInstanceNetwork = aO0 + + return nil +} + +// MarshalJSON marshals this object to a JSON structure +func (m PVMInstanceAddress) MarshalJSON() ([]byte, error) { + _parts := make([][]byte, 0, 1) + + aO0, err := swag.WriteJSON(m.PVMInstanceNetwork) + if err != nil { + return nil, err + } + _parts = append(_parts, aO0) + + return swag.ConcatJSON(_parts...), nil +} + +// Validate validates this p VM instance address +func (m *PVMInstanceAddress) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *PVMInstanceAddress) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *PVMInstanceAddress) UnmarshalBinary(b []byte) error { + var res PVMInstanceAddress + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/p_vm_instance_capture.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/p_vm_instance_capture.go new file mode 100644 index 00000000000..c33e6451e06 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/p_vm_instance_capture.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// PVMInstanceCapture p VM instance capture +// swagger:model PVMInstanceCapture +type PVMInstanceCapture struct { + + // Destination for the deployable image + // Required: true + // Enum: [image-catalog cloud-storage both] + CaptureDestination *string `json:"captureDestination"` + + // Name of the deployable image created for the captured PVMInstance + // Required: true + CaptureName *string `json:"captureName"` + + // List of Data volume IDs to include in the captured PVMInstance + CaptureVolumeIds []string `json:"captureVolumeIDs"` + + // Cloud Storage Access key + CloudStorageAccessKey string `json:"cloudStorageAccessKey,omitempty"` + + // Cloud Storage Image Path (bucket-name [/folder/../..]) + CloudStorageImagePath string `json:"cloudStorageImagePath,omitempty"` + + // Cloud Storage Region + CloudStorageRegion string `json:"cloudStorageRegion,omitempty"` + + // Cloud Storage Secret key + CloudStorageSecretKey string `json:"cloudStorageSecretKey,omitempty"` +} + +// Validate validates this p VM instance capture +func (m *PVMInstanceCapture) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateCaptureDestination(formats); err != nil { + res = append(res, err) + } + + if err := m.validateCaptureName(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +var pVmInstanceCaptureTypeCaptureDestinationPropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["image-catalog","cloud-storage","both"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + pVmInstanceCaptureTypeCaptureDestinationPropEnum = append(pVmInstanceCaptureTypeCaptureDestinationPropEnum, v) + } +} + +const ( + + // PVMInstanceCaptureCaptureDestinationImageCatalog captures enum value "image-catalog" + PVMInstanceCaptureCaptureDestinationImageCatalog string = "image-catalog" + + // PVMInstanceCaptureCaptureDestinationCloudStorage captures enum value "cloud-storage" + PVMInstanceCaptureCaptureDestinationCloudStorage string = "cloud-storage" + + // PVMInstanceCaptureCaptureDestinationBoth captures enum value "both" + PVMInstanceCaptureCaptureDestinationBoth string = "both" +) + +// prop value enum +func (m *PVMInstanceCapture) validateCaptureDestinationEnum(path, location string, value string) error { + if err := validate.Enum(path, location, value, pVmInstanceCaptureTypeCaptureDestinationPropEnum); err != nil { + return err + } + return nil +} + +func (m *PVMInstanceCapture) validateCaptureDestination(formats strfmt.Registry) error { + + if err := validate.Required("captureDestination", "body", m.CaptureDestination); err != nil { + return err + } + + // value enum + if err := m.validateCaptureDestinationEnum("captureDestination", "body", *m.CaptureDestination); err != nil { + return err + } + + return nil +} + +func (m *PVMInstanceCapture) validateCaptureName(formats strfmt.Registry) error { + + if err := validate.Required("captureName", "body", m.CaptureName); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *PVMInstanceCapture) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *PVMInstanceCapture) UnmarshalBinary(b []byte) error { + var res PVMInstanceCapture + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/p_vm_instance_clone.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/p_vm_instance_clone.go new file mode 100644 index 00000000000..bad6a5beaec --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/p_vm_instance_clone.go @@ -0,0 +1,191 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + "strconv" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// PVMInstanceClone p VM instance clone +// swagger:model PVMInstanceClone +type PVMInstanceClone struct { + + // The name of the SSH key pair provided to the server for authenticating users (looked up in the tenant's list of keys) + KeyPairName string `json:"keyPairName,omitempty"` + + // Amount of memory allocated (in GB) + Memory *float64 `json:"memory,omitempty"` + + // Name of the server to create + // Required: true + Name *string `json:"name"` + + // The pvm instance networks information + // Required: true + Networks []*PVMInstanceAddNetwork `json:"networks"` + + // Processor type (dedicated, shared, capped) + // Enum: [dedicated shared capped] + ProcType *string `json:"procType,omitempty"` + + // Number of processors allocated + Processors *float64 `json:"processors,omitempty"` + + // The pvm instance Software Licenses + SoftwareLicenses *SoftwareLicenses `json:"softwareLicenses,omitempty"` + + // List of volume IDs + VolumeIds []string `json:"volumeIDs"` +} + +// Validate validates this p VM instance clone +func (m *PVMInstanceClone) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateName(formats); err != nil { + res = append(res, err) + } + + if err := m.validateNetworks(formats); err != nil { + res = append(res, err) + } + + if err := m.validateProcType(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSoftwareLicenses(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *PVMInstanceClone) validateName(formats strfmt.Registry) error { + + if err := validate.Required("name", "body", m.Name); err != nil { + return err + } + + return nil +} + +func (m *PVMInstanceClone) validateNetworks(formats strfmt.Registry) error { + + if err := validate.Required("networks", "body", m.Networks); err != nil { + return err + } + + for i := 0; i < len(m.Networks); i++ { + if swag.IsZero(m.Networks[i]) { // not required + continue + } + + if m.Networks[i] != nil { + if err := m.Networks[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("networks" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +var pVmInstanceCloneTypeProcTypePropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["dedicated","shared","capped"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + pVmInstanceCloneTypeProcTypePropEnum = append(pVmInstanceCloneTypeProcTypePropEnum, v) + } +} + +const ( + + // PVMInstanceCloneProcTypeDedicated captures enum value "dedicated" + PVMInstanceCloneProcTypeDedicated string = "dedicated" + + // PVMInstanceCloneProcTypeShared captures enum value "shared" + PVMInstanceCloneProcTypeShared string = "shared" + + // PVMInstanceCloneProcTypeCapped captures enum value "capped" + PVMInstanceCloneProcTypeCapped string = "capped" +) + +// prop value enum +func (m *PVMInstanceClone) validateProcTypeEnum(path, location string, value string) error { + if err := validate.Enum(path, location, value, pVmInstanceCloneTypeProcTypePropEnum); err != nil { + return err + } + return nil +} + +func (m *PVMInstanceClone) validateProcType(formats strfmt.Registry) error { + + if swag.IsZero(m.ProcType) { // not required + return nil + } + + // value enum + if err := m.validateProcTypeEnum("procType", "body", *m.ProcType); err != nil { + return err + } + + return nil +} + +func (m *PVMInstanceClone) validateSoftwareLicenses(formats strfmt.Registry) error { + + if swag.IsZero(m.SoftwareLicenses) { // not required + return nil + } + + if m.SoftwareLicenses != nil { + if err := m.SoftwareLicenses.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("softwareLicenses") + } + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *PVMInstanceClone) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *PVMInstanceClone) UnmarshalBinary(b []byte) error { + var res PVMInstanceClone + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/p_vm_instance_console.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/p_vm_instance_console.go new file mode 100644 index 00000000000..62face87955 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/p_vm_instance_console.go @@ -0,0 +1,64 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// PVMInstanceConsole p VM instance console +// swagger:model PVMInstanceConsole +type PVMInstanceConsole struct { + + // The URL to the noVNC console for the PVM Instance + // Required: true + ConsoleURL *string `json:"consoleURL"` +} + +// Validate validates this p VM instance console +func (m *PVMInstanceConsole) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateConsoleURL(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *PVMInstanceConsole) validateConsoleURL(formats strfmt.Registry) error { + + if err := validate.Required("consoleURL", "body", m.ConsoleURL); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *PVMInstanceConsole) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *PVMInstanceConsole) UnmarshalBinary(b []byte) error { + var res PVMInstanceConsole + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/p_vm_instance_create.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/p_vm_instance_create.go new file mode 100644 index 00000000000..08ce25af0e6 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/p_vm_instance_create.go @@ -0,0 +1,407 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + "strconv" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// PVMInstanceCreate p VM instance create +// swagger:model PVMInstanceCreate +type PVMInstanceCreate struct { + + // Image ID of the image to use for the server + // Required: true + ImageID *string `json:"imageID"` + + // The name of the SSH key pair provided to the server for authenticating users (looked up in the tenant's list of keys) + KeyPairName string `json:"keyPairName,omitempty"` + + // Amount of memory allocated (in GB) + // Required: true + Memory *float64 `json:"memory"` + + // Indicates if the server is allowed to migrate between hosts + Migratable *bool `json:"migratable,omitempty"` + + // (deprecated - replaced by networks) List of Network IDs + NetworkIds []string `json:"networkIDs"` + + // The pvm instance networks information + Networks []*PVMInstanceAddNetwork `json:"networks"` + + // pin policy + PinPolicy PinPolicy `json:"pinPolicy,omitempty"` + + // Processor type (dedicated, shared, capped) + // Required: true + // Enum: [dedicated shared capped] + ProcType *string `json:"procType"` + + // Number of processors allocated + // Required: true + Processors *float64 `json:"processors"` + + // Affinity policy for replicants being created; affinity for the same host, anti-affinity for different hosts, none for no preference + // Enum: [affinity anti-affinity none] + ReplicantAffinityPolicy *string `json:"replicantAffinityPolicy,omitempty"` + + // How to name the created vms + // Enum: [prefix suffix] + ReplicantNamingScheme *string `json:"replicantNamingScheme,omitempty"` + + // Number of duplicate instances to create in this request + Replicants float64 `json:"replicants,omitempty"` + + // Name of the server to create + // Required: true + ServerName *string `json:"serverName"` + + // The pvm instance Software Licenses + SoftwareLicenses *SoftwareLicenses `json:"softwareLicenses,omitempty"` + + // Storage type for server deployment + StorageType string `json:"storageType,omitempty"` + + // System type used to host the instance + SysType string `json:"sysType,omitempty"` + + // Cloud init user defined data + UserData string `json:"userData,omitempty"` + + // The pvm instance virtual CPU information + VirtualCores *VirtualCores `json:"virtualCores,omitempty"` + + // List of volume IDs + VolumeIds []string `json:"volumeIDs"` +} + +// Validate validates this p VM instance create +func (m *PVMInstanceCreate) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateImageID(formats); err != nil { + res = append(res, err) + } + + if err := m.validateMemory(formats); err != nil { + res = append(res, err) + } + + if err := m.validateNetworks(formats); err != nil { + res = append(res, err) + } + + if err := m.validatePinPolicy(formats); err != nil { + res = append(res, err) + } + + if err := m.validateProcType(formats); err != nil { + res = append(res, err) + } + + if err := m.validateProcessors(formats); err != nil { + res = append(res, err) + } + + if err := m.validateReplicantAffinityPolicy(formats); err != nil { + res = append(res, err) + } + + if err := m.validateReplicantNamingScheme(formats); err != nil { + res = append(res, err) + } + + if err := m.validateServerName(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSoftwareLicenses(formats); err != nil { + res = append(res, err) + } + + if err := m.validateVirtualCores(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *PVMInstanceCreate) validateImageID(formats strfmt.Registry) error { + + if err := validate.Required("imageID", "body", m.ImageID); err != nil { + return err + } + + return nil +} + +func (m *PVMInstanceCreate) validateMemory(formats strfmt.Registry) error { + + if err := validate.Required("memory", "body", m.Memory); err != nil { + return err + } + + return nil +} + +func (m *PVMInstanceCreate) validateNetworks(formats strfmt.Registry) error { + + if swag.IsZero(m.Networks) { // not required + return nil + } + + for i := 0; i < len(m.Networks); i++ { + if swag.IsZero(m.Networks[i]) { // not required + continue + } + + if m.Networks[i] != nil { + if err := m.Networks[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("networks" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *PVMInstanceCreate) validatePinPolicy(formats strfmt.Registry) error { + + if swag.IsZero(m.PinPolicy) { // not required + return nil + } + + if err := m.PinPolicy.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("pinPolicy") + } + return err + } + + return nil +} + +var pVmInstanceCreateTypeProcTypePropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["dedicated","shared","capped"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + pVmInstanceCreateTypeProcTypePropEnum = append(pVmInstanceCreateTypeProcTypePropEnum, v) + } +} + +const ( + + // PVMInstanceCreateProcTypeDedicated captures enum value "dedicated" + PVMInstanceCreateProcTypeDedicated string = "dedicated" + + // PVMInstanceCreateProcTypeShared captures enum value "shared" + PVMInstanceCreateProcTypeShared string = "shared" + + // PVMInstanceCreateProcTypeCapped captures enum value "capped" + PVMInstanceCreateProcTypeCapped string = "capped" +) + +// prop value enum +func (m *PVMInstanceCreate) validateProcTypeEnum(path, location string, value string) error { + if err := validate.Enum(path, location, value, pVmInstanceCreateTypeProcTypePropEnum); err != nil { + return err + } + return nil +} + +func (m *PVMInstanceCreate) validateProcType(formats strfmt.Registry) error { + + if err := validate.Required("procType", "body", m.ProcType); err != nil { + return err + } + + // value enum + if err := m.validateProcTypeEnum("procType", "body", *m.ProcType); err != nil { + return err + } + + return nil +} + +func (m *PVMInstanceCreate) validateProcessors(formats strfmt.Registry) error { + + if err := validate.Required("processors", "body", m.Processors); err != nil { + return err + } + + return nil +} + +var pVmInstanceCreateTypeReplicantAffinityPolicyPropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["affinity","anti-affinity","none"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + pVmInstanceCreateTypeReplicantAffinityPolicyPropEnum = append(pVmInstanceCreateTypeReplicantAffinityPolicyPropEnum, v) + } +} + +const ( + + // PVMInstanceCreateReplicantAffinityPolicyAffinity captures enum value "affinity" + PVMInstanceCreateReplicantAffinityPolicyAffinity string = "affinity" + + // PVMInstanceCreateReplicantAffinityPolicyAntiAffinity captures enum value "anti-affinity" + PVMInstanceCreateReplicantAffinityPolicyAntiAffinity string = "anti-affinity" + + // PVMInstanceCreateReplicantAffinityPolicyNone captures enum value "none" + PVMInstanceCreateReplicantAffinityPolicyNone string = "none" +) + +// prop value enum +func (m *PVMInstanceCreate) validateReplicantAffinityPolicyEnum(path, location string, value string) error { + if err := validate.Enum(path, location, value, pVmInstanceCreateTypeReplicantAffinityPolicyPropEnum); err != nil { + return err + } + return nil +} + +func (m *PVMInstanceCreate) validateReplicantAffinityPolicy(formats strfmt.Registry) error { + + if swag.IsZero(m.ReplicantAffinityPolicy) { // not required + return nil + } + + // value enum + if err := m.validateReplicantAffinityPolicyEnum("replicantAffinityPolicy", "body", *m.ReplicantAffinityPolicy); err != nil { + return err + } + + return nil +} + +var pVmInstanceCreateTypeReplicantNamingSchemePropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["prefix","suffix"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + pVmInstanceCreateTypeReplicantNamingSchemePropEnum = append(pVmInstanceCreateTypeReplicantNamingSchemePropEnum, v) + } +} + +const ( + + // PVMInstanceCreateReplicantNamingSchemePrefix captures enum value "prefix" + PVMInstanceCreateReplicantNamingSchemePrefix string = "prefix" + + // PVMInstanceCreateReplicantNamingSchemeSuffix captures enum value "suffix" + PVMInstanceCreateReplicantNamingSchemeSuffix string = "suffix" +) + +// prop value enum +func (m *PVMInstanceCreate) validateReplicantNamingSchemeEnum(path, location string, value string) error { + if err := validate.Enum(path, location, value, pVmInstanceCreateTypeReplicantNamingSchemePropEnum); err != nil { + return err + } + return nil +} + +func (m *PVMInstanceCreate) validateReplicantNamingScheme(formats strfmt.Registry) error { + + if swag.IsZero(m.ReplicantNamingScheme) { // not required + return nil + } + + // value enum + if err := m.validateReplicantNamingSchemeEnum("replicantNamingScheme", "body", *m.ReplicantNamingScheme); err != nil { + return err + } + + return nil +} + +func (m *PVMInstanceCreate) validateServerName(formats strfmt.Registry) error { + + if err := validate.Required("serverName", "body", m.ServerName); err != nil { + return err + } + + return nil +} + +func (m *PVMInstanceCreate) validateSoftwareLicenses(formats strfmt.Registry) error { + + if swag.IsZero(m.SoftwareLicenses) { // not required + return nil + } + + if m.SoftwareLicenses != nil { + if err := m.SoftwareLicenses.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("softwareLicenses") + } + return err + } + } + + return nil +} + +func (m *PVMInstanceCreate) validateVirtualCores(formats strfmt.Registry) error { + + if swag.IsZero(m.VirtualCores) { // not required + return nil + } + + if m.VirtualCores != nil { + if err := m.VirtualCores.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("virtualCores") + } + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *PVMInstanceCreate) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *PVMInstanceCreate) UnmarshalBinary(b []byte) error { + var res PVMInstanceCreate + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/p_vm_instance_fault.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/p_vm_instance_fault.go new file mode 100644 index 00000000000..0b5a25b3447 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/p_vm_instance_fault.go @@ -0,0 +1,77 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// PVMInstanceFault Fault information (if occurred) +// swagger:model PVMInstanceFault +type PVMInstanceFault struct { + + // The fault status of the server, if any + Code float64 `json:"code,omitempty"` + + // The date and time the fault occurred + // Format: date-time + Created strfmt.DateTime `json:"created,omitempty"` + + // The fault details of the server, if any + Details string `json:"details,omitempty"` + + // The fault message of the server, if any + Message string `json:"message,omitempty"` +} + +// Validate validates this p VM instance fault +func (m *PVMInstanceFault) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateCreated(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *PVMInstanceFault) validateCreated(formats strfmt.Registry) error { + + if swag.IsZero(m.Created) { // not required + return nil + } + + if err := validate.FormatOf("created", "body", "date-time", m.Created.String(), formats); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *PVMInstanceFault) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *PVMInstanceFault) UnmarshalBinary(b []byte) error { + var res PVMInstanceFault + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/p_vm_instance_health.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/p_vm_instance_health.go new file mode 100644 index 00000000000..64e5731b4fe --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/p_vm_instance_health.go @@ -0,0 +1,49 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/swag" +) + +// PVMInstanceHealth PVM's health status details +// swagger:model PVMInstanceHealth +type PVMInstanceHealth struct { + + // Date/Time of PVM last health status change + LastUpdate string `json:"lastUpdate,omitempty"` + + // The health status reason, if any + Reason string `json:"reason,omitempty"` + + // The PVM's health status value + Status string `json:"status,omitempty"` +} + +// Validate validates this p VM instance health +func (m *PVMInstanceHealth) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *PVMInstanceHealth) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *PVMInstanceHealth) UnmarshalBinary(b []byte) error { + var res PVMInstanceHealth + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/p_vm_instance_list.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/p_vm_instance_list.go new file mode 100644 index 00000000000..489e0da3bd1 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/p_vm_instance_list.go @@ -0,0 +1,45 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "strconv" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" +) + +// PVMInstanceList A list of PVMInstances +// swagger:model PVMInstanceList +type PVMInstanceList []*PVMInstance + +// Validate validates this p VM instance list +func (m PVMInstanceList) Validate(formats strfmt.Registry) error { + var res []error + + for i := 0; i < len(m); i++ { + if swag.IsZero(m[i]) { // not required + continue + } + + if m[i] != nil { + if err := m[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(strconv.Itoa(i)) + } + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/p_vm_instance_multi_create.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/p_vm_instance_multi_create.go new file mode 100644 index 00000000000..800c2b900a1 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/p_vm_instance_multi_create.go @@ -0,0 +1,157 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// PVMInstanceMultiCreate p VM instance multi create +// swagger:model PVMInstanceMultiCreate +type PVMInstanceMultiCreate struct { + + // Affinity policy for pvm-instances being created; affinity for the same host, anti-affinity for different hosts, none for no preference + // Enum: [affinity anti-affinity none] + AffinityPolicy *string `json:"affinityPolicy,omitempty"` + + // Number of pvm-instances to create + Count int64 `json:"count,omitempty"` + + // Where to place the numerical number of the multi-created instance + // Enum: [prefix suffix] + Numerical *string `json:"numerical,omitempty"` +} + +// Validate validates this p VM instance multi create +func (m *PVMInstanceMultiCreate) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAffinityPolicy(formats); err != nil { + res = append(res, err) + } + + if err := m.validateNumerical(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +var pVmInstanceMultiCreateTypeAffinityPolicyPropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["affinity","anti-affinity","none"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + pVmInstanceMultiCreateTypeAffinityPolicyPropEnum = append(pVmInstanceMultiCreateTypeAffinityPolicyPropEnum, v) + } +} + +const ( + + // PVMInstanceMultiCreateAffinityPolicyAffinity captures enum value "affinity" + PVMInstanceMultiCreateAffinityPolicyAffinity string = "affinity" + + // PVMInstanceMultiCreateAffinityPolicyAntiAffinity captures enum value "anti-affinity" + PVMInstanceMultiCreateAffinityPolicyAntiAffinity string = "anti-affinity" + + // PVMInstanceMultiCreateAffinityPolicyNone captures enum value "none" + PVMInstanceMultiCreateAffinityPolicyNone string = "none" +) + +// prop value enum +func (m *PVMInstanceMultiCreate) validateAffinityPolicyEnum(path, location string, value string) error { + if err := validate.Enum(path, location, value, pVmInstanceMultiCreateTypeAffinityPolicyPropEnum); err != nil { + return err + } + return nil +} + +func (m *PVMInstanceMultiCreate) validateAffinityPolicy(formats strfmt.Registry) error { + + if swag.IsZero(m.AffinityPolicy) { // not required + return nil + } + + // value enum + if err := m.validateAffinityPolicyEnum("affinityPolicy", "body", *m.AffinityPolicy); err != nil { + return err + } + + return nil +} + +var pVmInstanceMultiCreateTypeNumericalPropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["prefix","suffix"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + pVmInstanceMultiCreateTypeNumericalPropEnum = append(pVmInstanceMultiCreateTypeNumericalPropEnum, v) + } +} + +const ( + + // PVMInstanceMultiCreateNumericalPrefix captures enum value "prefix" + PVMInstanceMultiCreateNumericalPrefix string = "prefix" + + // PVMInstanceMultiCreateNumericalSuffix captures enum value "suffix" + PVMInstanceMultiCreateNumericalSuffix string = "suffix" +) + +// prop value enum +func (m *PVMInstanceMultiCreate) validateNumericalEnum(path, location string, value string) error { + if err := validate.Enum(path, location, value, pVmInstanceMultiCreateTypeNumericalPropEnum); err != nil { + return err + } + return nil +} + +func (m *PVMInstanceMultiCreate) validateNumerical(formats strfmt.Registry) error { + + if swag.IsZero(m.Numerical) { // not required + return nil + } + + // value enum + if err := m.validateNumericalEnum("numerical", "body", *m.Numerical); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *PVMInstanceMultiCreate) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *PVMInstanceMultiCreate) UnmarshalBinary(b []byte) error { + var res PVMInstanceMultiCreate + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/p_vm_instance_network.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/p_vm_instance_network.go new file mode 100644 index 00000000000..5a9dfa189d1 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/p_vm_instance_network.go @@ -0,0 +1,67 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/swag" +) + +// PVMInstanceNetwork A map containing information about a network address +// swagger:model PVMInstanceNetwork +type PVMInstanceNetwork struct { + + // The external ip address (for pub-vlan networks) + ExternalIP string `json:"externalIP,omitempty"` + + // Link to PVM Instance Network + Href string `json:"href,omitempty"` + + // (deprecated - replaced by ipAddress) + IP string `json:"ip,omitempty"` + + // The ip address of this network interface + IPAddress string `json:"ipAddress,omitempty"` + + // The mac address of the network interface + MacAddress string `json:"macAddress,omitempty"` + + // ID of the network + NetworkID string `json:"networkID,omitempty"` + + // The name of the network the address is on + NetworkName string `json:"networkName,omitempty"` + + // The address type (fixed or dynamic) + Type string `json:"type,omitempty"` + + // The version of the information provided + Version float64 `json:"version,omitempty"` +} + +// Validate validates this p VM instance network +func (m *PVMInstanceNetwork) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *PVMInstanceNetwork) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *PVMInstanceNetwork) UnmarshalBinary(b []byte) error { + var res PVMInstanceNetwork + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/p_vm_instance_networks.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/p_vm_instance_networks.go new file mode 100644 index 00000000000..4ed2e3cf5af --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/p_vm_instance_networks.go @@ -0,0 +1,82 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "strconv" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// PVMInstanceNetworks p VM instance networks +// swagger:model PVMInstanceNetworks +type PVMInstanceNetworks struct { + + // PVM Instance Networks + // Required: true + Networks []*PVMInstanceNetwork `json:"networks"` +} + +// Validate validates this p VM instance networks +func (m *PVMInstanceNetworks) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateNetworks(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *PVMInstanceNetworks) validateNetworks(formats strfmt.Registry) error { + + if err := validate.Required("networks", "body", m.Networks); err != nil { + return err + } + + for i := 0; i < len(m.Networks); i++ { + if swag.IsZero(m.Networks[i]) { // not required + continue + } + + if m.Networks[i] != nil { + if err := m.Networks[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("networks" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *PVMInstanceNetworks) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *PVMInstanceNetworks) UnmarshalBinary(b []byte) error { + var res PVMInstanceNetworks + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/p_vm_instance_operation.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/p_vm_instance_operation.go new file mode 100644 index 00000000000..fe752989ad1 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/p_vm_instance_operation.go @@ -0,0 +1,127 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// PVMInstanceOperation p VM instance operation +// swagger:model PVMInstanceOperation +type PVMInstanceOperation struct { + + // operation + // Required: true + Operation *Operations `json:"operation"` + + // Name of the operation to execute; can be job or boot + // Required: true + // Enum: [job boot] + OperationType *string `json:"operationType"` +} + +// Validate validates this p VM instance operation +func (m *PVMInstanceOperation) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateOperation(formats); err != nil { + res = append(res, err) + } + + if err := m.validateOperationType(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *PVMInstanceOperation) validateOperation(formats strfmt.Registry) error { + + if err := validate.Required("operation", "body", m.Operation); err != nil { + return err + } + + if m.Operation != nil { + if err := m.Operation.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("operation") + } + return err + } + } + + return nil +} + +var pVmInstanceOperationTypeOperationTypePropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["job","boot"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + pVmInstanceOperationTypeOperationTypePropEnum = append(pVmInstanceOperationTypeOperationTypePropEnum, v) + } +} + +const ( + + // PVMInstanceOperationOperationTypeJob captures enum value "job" + PVMInstanceOperationOperationTypeJob string = "job" + + // PVMInstanceOperationOperationTypeBoot captures enum value "boot" + PVMInstanceOperationOperationTypeBoot string = "boot" +) + +// prop value enum +func (m *PVMInstanceOperation) validateOperationTypeEnum(path, location string, value string) error { + if err := validate.Enum(path, location, value, pVmInstanceOperationTypeOperationTypePropEnum); err != nil { + return err + } + return nil +} + +func (m *PVMInstanceOperation) validateOperationType(formats strfmt.Registry) error { + + if err := validate.Required("operationType", "body", m.OperationType); err != nil { + return err + } + + // value enum + if err := m.validateOperationTypeEnum("operationType", "body", *m.OperationType); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *PVMInstanceOperation) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *PVMInstanceOperation) UnmarshalBinary(b []byte) error { + var res PVMInstanceOperation + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/p_vm_instance_reference.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/p_vm_instance_reference.go new file mode 100644 index 00000000000..c3d92bcb893 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/p_vm_instance_reference.go @@ -0,0 +1,549 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + "strconv" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// PVMInstanceReference p VM instance reference +// swagger:model PVMInstanceReference +type PVMInstanceReference struct { + + // (deprecated - replaced by networks) The list of addresses and their network information + Addresses []*PVMInstanceNetwork `json:"addresses"` + + // Date/Time of PVM creation + // Format: date-time + CreationDate strfmt.DateTime `json:"creationDate,omitempty"` + + // Size of allocated disk (in GB) + // Required: true + DiskSize *float64 `json:"diskSize"` + + // fault + Fault *PVMInstanceFault `json:"fault,omitempty"` + + // health + Health *PVMInstanceHealth `json:"health,omitempty"` + + // Link to Cloud Instance resource + // Required: true + Href *string `json:"href"` + + // The ImageID used by the server + // Required: true + ImageID *string `json:"imageID"` + + // Maximum amount of memory that can be allocated (in GB, for resize) + Maxmem float64 `json:"maxmem,omitempty"` + + // Maximum number of processors that can be allocated (for resize) + Maxproc float64 `json:"maxproc,omitempty"` + + // Amount of memory allocated (in GB) + // Required: true + Memory *float64 `json:"memory"` + + // Minimum amount of memory that can be allocated (in GB, for resize) + Minmem float64 `json:"minmem,omitempty"` + + // Minimum number of processors that can be allocated (for resize) + Minproc float64 `json:"minproc,omitempty"` + + // The list of addresses and their network information + Networks []*PVMInstanceNetwork `json:"networks"` + + // OS system information (usually version and build) + OperatingSystem string `json:"operatingSystem,omitempty"` + + // Type of the OS [aix, ibmi, redhat, sles] + // Required: true + OsType *string `json:"osType"` + + // VM pinning policy to use [none, soft, hard] + PinPolicy string `json:"pinPolicy,omitempty"` + + // Processor type (dedicated, shared, capped) + // Required: true + // Enum: [dedicated shared capped] + ProcType *string `json:"procType"` + + // Number of processors allocated + // Required: true + Processors *float64 `json:"processors"` + + // The progress of an operation + Progress float64 `json:"progress,omitempty"` + + // PCloud PVM Instance ID + // Required: true + PvmInstanceID *string `json:"pvmInstanceID"` + + // If this is an SAP pvm-instance the profile reference will link to the SAP profile + SapProfile *SAPProfileReference `json:"sapProfile,omitempty"` + + // Name of the server + // Required: true + ServerName *string `json:"serverName"` + + // The pvm instance Software Licenses + SoftwareLicenses *SoftwareLicenses `json:"softwareLicenses,omitempty"` + + // The pvm instance SRC lists + Srcs [][]*SRC `json:"srcs"` + + // The status of the instance + // Required: true + Status *string `json:"status"` + + // System type used to host the instance + SysType string `json:"sysType,omitempty"` + + // Date/Time of PVM last update + // Format: date-time + UpdatedDate strfmt.DateTime `json:"updatedDate,omitempty"` + + // The pvm instance virtual CPU information + VirtualCores *VirtualCores `json:"virtualCores,omitempty"` +} + +// Validate validates this p VM instance reference +func (m *PVMInstanceReference) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAddresses(formats); err != nil { + res = append(res, err) + } + + if err := m.validateCreationDate(formats); err != nil { + res = append(res, err) + } + + if err := m.validateDiskSize(formats); err != nil { + res = append(res, err) + } + + if err := m.validateFault(formats); err != nil { + res = append(res, err) + } + + if err := m.validateHealth(formats); err != nil { + res = append(res, err) + } + + if err := m.validateHref(formats); err != nil { + res = append(res, err) + } + + if err := m.validateImageID(formats); err != nil { + res = append(res, err) + } + + if err := m.validateMemory(formats); err != nil { + res = append(res, err) + } + + if err := m.validateNetworks(formats); err != nil { + res = append(res, err) + } + + if err := m.validateOsType(formats); err != nil { + res = append(res, err) + } + + if err := m.validateProcType(formats); err != nil { + res = append(res, err) + } + + if err := m.validateProcessors(formats); err != nil { + res = append(res, err) + } + + if err := m.validatePvmInstanceID(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSapProfile(formats); err != nil { + res = append(res, err) + } + + if err := m.validateServerName(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSoftwareLicenses(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSrcs(formats); err != nil { + res = append(res, err) + } + + if err := m.validateStatus(formats); err != nil { + res = append(res, err) + } + + if err := m.validateUpdatedDate(formats); err != nil { + res = append(res, err) + } + + if err := m.validateVirtualCores(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *PVMInstanceReference) validateAddresses(formats strfmt.Registry) error { + + if swag.IsZero(m.Addresses) { // not required + return nil + } + + for i := 0; i < len(m.Addresses); i++ { + if swag.IsZero(m.Addresses[i]) { // not required + continue + } + + if m.Addresses[i] != nil { + if err := m.Addresses[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("addresses" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *PVMInstanceReference) validateCreationDate(formats strfmt.Registry) error { + + if swag.IsZero(m.CreationDate) { // not required + return nil + } + + if err := validate.FormatOf("creationDate", "body", "date-time", m.CreationDate.String(), formats); err != nil { + return err + } + + return nil +} + +func (m *PVMInstanceReference) validateDiskSize(formats strfmt.Registry) error { + + if err := validate.Required("diskSize", "body", m.DiskSize); err != nil { + return err + } + + return nil +} + +func (m *PVMInstanceReference) validateFault(formats strfmt.Registry) error { + + if swag.IsZero(m.Fault) { // not required + return nil + } + + if m.Fault != nil { + if err := m.Fault.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("fault") + } + return err + } + } + + return nil +} + +func (m *PVMInstanceReference) validateHealth(formats strfmt.Registry) error { + + if swag.IsZero(m.Health) { // not required + return nil + } + + if m.Health != nil { + if err := m.Health.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("health") + } + return err + } + } + + return nil +} + +func (m *PVMInstanceReference) validateHref(formats strfmt.Registry) error { + + if err := validate.Required("href", "body", m.Href); err != nil { + return err + } + + return nil +} + +func (m *PVMInstanceReference) validateImageID(formats strfmt.Registry) error { + + if err := validate.Required("imageID", "body", m.ImageID); err != nil { + return err + } + + return nil +} + +func (m *PVMInstanceReference) validateMemory(formats strfmt.Registry) error { + + if err := validate.Required("memory", "body", m.Memory); err != nil { + return err + } + + return nil +} + +func (m *PVMInstanceReference) validateNetworks(formats strfmt.Registry) error { + + if swag.IsZero(m.Networks) { // not required + return nil + } + + for i := 0; i < len(m.Networks); i++ { + if swag.IsZero(m.Networks[i]) { // not required + continue + } + + if m.Networks[i] != nil { + if err := m.Networks[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("networks" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *PVMInstanceReference) validateOsType(formats strfmt.Registry) error { + + if err := validate.Required("osType", "body", m.OsType); err != nil { + return err + } + + return nil +} + +var pVmInstanceReferenceTypeProcTypePropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["dedicated","shared","capped"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + pVmInstanceReferenceTypeProcTypePropEnum = append(pVmInstanceReferenceTypeProcTypePropEnum, v) + } +} + +const ( + + // PVMInstanceReferenceProcTypeDedicated captures enum value "dedicated" + PVMInstanceReferenceProcTypeDedicated string = "dedicated" + + // PVMInstanceReferenceProcTypeShared captures enum value "shared" + PVMInstanceReferenceProcTypeShared string = "shared" + + // PVMInstanceReferenceProcTypeCapped captures enum value "capped" + PVMInstanceReferenceProcTypeCapped string = "capped" +) + +// prop value enum +func (m *PVMInstanceReference) validateProcTypeEnum(path, location string, value string) error { + if err := validate.Enum(path, location, value, pVmInstanceReferenceTypeProcTypePropEnum); err != nil { + return err + } + return nil +} + +func (m *PVMInstanceReference) validateProcType(formats strfmt.Registry) error { + + if err := validate.Required("procType", "body", m.ProcType); err != nil { + return err + } + + // value enum + if err := m.validateProcTypeEnum("procType", "body", *m.ProcType); err != nil { + return err + } + + return nil +} + +func (m *PVMInstanceReference) validateProcessors(formats strfmt.Registry) error { + + if err := validate.Required("processors", "body", m.Processors); err != nil { + return err + } + + return nil +} + +func (m *PVMInstanceReference) validatePvmInstanceID(formats strfmt.Registry) error { + + if err := validate.Required("pvmInstanceID", "body", m.PvmInstanceID); err != nil { + return err + } + + return nil +} + +func (m *PVMInstanceReference) validateSapProfile(formats strfmt.Registry) error { + + if swag.IsZero(m.SapProfile) { // not required + return nil + } + + if m.SapProfile != nil { + if err := m.SapProfile.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("sapProfile") + } + return err + } + } + + return nil +} + +func (m *PVMInstanceReference) validateServerName(formats strfmt.Registry) error { + + if err := validate.Required("serverName", "body", m.ServerName); err != nil { + return err + } + + return nil +} + +func (m *PVMInstanceReference) validateSoftwareLicenses(formats strfmt.Registry) error { + + if swag.IsZero(m.SoftwareLicenses) { // not required + return nil + } + + if m.SoftwareLicenses != nil { + if err := m.SoftwareLicenses.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("softwareLicenses") + } + return err + } + } + + return nil +} + +func (m *PVMInstanceReference) validateSrcs(formats strfmt.Registry) error { + + if swag.IsZero(m.Srcs) { // not required + return nil + } + + for i := 0; i < len(m.Srcs); i++ { + + for ii := 0; ii < len(m.Srcs[i]); ii++ { + if swag.IsZero(m.Srcs[i][ii]) { // not required + continue + } + + if m.Srcs[i][ii] != nil { + if err := m.Srcs[i][ii].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("srcs" + "." + strconv.Itoa(i) + "." + strconv.Itoa(ii)) + } + return err + } + } + + } + + } + + return nil +} + +func (m *PVMInstanceReference) validateStatus(formats strfmt.Registry) error { + + if err := validate.Required("status", "body", m.Status); err != nil { + return err + } + + return nil +} + +func (m *PVMInstanceReference) validateUpdatedDate(formats strfmt.Registry) error { + + if swag.IsZero(m.UpdatedDate) { // not required + return nil + } + + if err := validate.FormatOf("updatedDate", "body", "date-time", m.UpdatedDate.String(), formats); err != nil { + return err + } + + return nil +} + +func (m *PVMInstanceReference) validateVirtualCores(formats strfmt.Registry) error { + + if swag.IsZero(m.VirtualCores) { // not required + return nil + } + + if m.VirtualCores != nil { + if err := m.VirtualCores.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("virtualCores") + } + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *PVMInstanceReference) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *PVMInstanceReference) UnmarshalBinary(b []byte) error { + var res PVMInstanceReference + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/p_vm_instance_remove_network.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/p_vm_instance_remove_network.go new file mode 100644 index 00000000000..1d6dc39fba2 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/p_vm_instance_remove_network.go @@ -0,0 +1,43 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/swag" +) + +// PVMInstanceRemoveNetwork p VM instance remove network +// swagger:model PVMInstanceRemoveNetwork +type PVMInstanceRemoveNetwork struct { + + // The mac address of the network interface to be removed + MacAddress string `json:"macAddress,omitempty"` +} + +// Validate validates this p VM instance remove network +func (m *PVMInstanceRemoveNetwork) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *PVMInstanceRemoveNetwork) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *PVMInstanceRemoveNetwork) UnmarshalBinary(b []byte) error { + var res PVMInstanceRemoveNetwork + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/p_vm_instance_update.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/p_vm_instance_update.go new file mode 100644 index 00000000000..cb997f2c487 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/p_vm_instance_update.go @@ -0,0 +1,191 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// PVMInstanceUpdate p VM instance update +// swagger:model PVMInstanceUpdate +type PVMInstanceUpdate struct { + + // Amount of memory allocated (in GB) + Memory float64 `json:"memory,omitempty"` + + // Indicates if the server is allowed to migrate between hosts + Migratable *bool `json:"migratable,omitempty"` + + // pin policy + PinPolicy PinPolicy `json:"pinPolicy,omitempty"` + + // Processor type (dedicated, shared, capped) + // Enum: [dedicated shared capped] + ProcType string `json:"procType,omitempty"` + + // Number of processors allocated + Processors float64 `json:"processors,omitempty"` + + // If an SAP pvm-instance, the SAP profile ID to switch to (only while shutdown) + SapProfileID string `json:"sapProfileID,omitempty"` + + // Name of the server to create + ServerName string `json:"serverName,omitempty"` + + // The pvm instance Software Licenses + SoftwareLicenses *SoftwareLicenses `json:"softwareLicenses,omitempty"` + + // The pvm instance virtual CPU information + VirtualCores *VirtualCores `json:"virtualCores,omitempty"` +} + +// Validate validates this p VM instance update +func (m *PVMInstanceUpdate) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validatePinPolicy(formats); err != nil { + res = append(res, err) + } + + if err := m.validateProcType(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSoftwareLicenses(formats); err != nil { + res = append(res, err) + } + + if err := m.validateVirtualCores(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *PVMInstanceUpdate) validatePinPolicy(formats strfmt.Registry) error { + + if swag.IsZero(m.PinPolicy) { // not required + return nil + } + + if err := m.PinPolicy.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("pinPolicy") + } + return err + } + + return nil +} + +var pVmInstanceUpdateTypeProcTypePropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["dedicated","shared","capped"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + pVmInstanceUpdateTypeProcTypePropEnum = append(pVmInstanceUpdateTypeProcTypePropEnum, v) + } +} + +const ( + + // PVMInstanceUpdateProcTypeDedicated captures enum value "dedicated" + PVMInstanceUpdateProcTypeDedicated string = "dedicated" + + // PVMInstanceUpdateProcTypeShared captures enum value "shared" + PVMInstanceUpdateProcTypeShared string = "shared" + + // PVMInstanceUpdateProcTypeCapped captures enum value "capped" + PVMInstanceUpdateProcTypeCapped string = "capped" +) + +// prop value enum +func (m *PVMInstanceUpdate) validateProcTypeEnum(path, location string, value string) error { + if err := validate.Enum(path, location, value, pVmInstanceUpdateTypeProcTypePropEnum); err != nil { + return err + } + return nil +} + +func (m *PVMInstanceUpdate) validateProcType(formats strfmt.Registry) error { + + if swag.IsZero(m.ProcType) { // not required + return nil + } + + // value enum + if err := m.validateProcTypeEnum("procType", "body", m.ProcType); err != nil { + return err + } + + return nil +} + +func (m *PVMInstanceUpdate) validateSoftwareLicenses(formats strfmt.Registry) error { + + if swag.IsZero(m.SoftwareLicenses) { // not required + return nil + } + + if m.SoftwareLicenses != nil { + if err := m.SoftwareLicenses.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("softwareLicenses") + } + return err + } + } + + return nil +} + +func (m *PVMInstanceUpdate) validateVirtualCores(formats strfmt.Registry) error { + + if swag.IsZero(m.VirtualCores) { // not required + return nil + } + + if m.VirtualCores != nil { + if err := m.VirtualCores.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("virtualCores") + } + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *PVMInstanceUpdate) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *PVMInstanceUpdate) UnmarshalBinary(b []byte) error { + var res PVMInstanceUpdate + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/p_vm_instance_update_response.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/p_vm_instance_update_response.go new file mode 100644 index 00000000000..23dee315d66 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/p_vm_instance_update_response.go @@ -0,0 +1,138 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// PVMInstanceUpdateResponse p VM instance update response +// swagger:model PVMInstanceUpdateResponse +type PVMInstanceUpdateResponse struct { + + // Amount of memory allocated (in GB) + Memory float64 `json:"memory,omitempty"` + + // pin policy + PinPolicy PinPolicy `json:"pinPolicy,omitempty"` + + // Processor type (dedicated, shared, capped) + // Enum: [dedicated shared capped] + ProcType string `json:"procType,omitempty"` + + // Number of processors allocated + Processors float64 `json:"processors,omitempty"` + + // Name of the server to create + ServerName string `json:"serverName,omitempty"` + + // URL to check for status of the operation (for now, just the URL for the GET on the server, which has status information from powervc) + StatusURL string `json:"statusUrl,omitempty"` +} + +// Validate validates this p VM instance update response +func (m *PVMInstanceUpdateResponse) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validatePinPolicy(formats); err != nil { + res = append(res, err) + } + + if err := m.validateProcType(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *PVMInstanceUpdateResponse) validatePinPolicy(formats strfmt.Registry) error { + + if swag.IsZero(m.PinPolicy) { // not required + return nil + } + + if err := m.PinPolicy.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("pinPolicy") + } + return err + } + + return nil +} + +var pVmInstanceUpdateResponseTypeProcTypePropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["dedicated","shared","capped"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + pVmInstanceUpdateResponseTypeProcTypePropEnum = append(pVmInstanceUpdateResponseTypeProcTypePropEnum, v) + } +} + +const ( + + // PVMInstanceUpdateResponseProcTypeDedicated captures enum value "dedicated" + PVMInstanceUpdateResponseProcTypeDedicated string = "dedicated" + + // PVMInstanceUpdateResponseProcTypeShared captures enum value "shared" + PVMInstanceUpdateResponseProcTypeShared string = "shared" + + // PVMInstanceUpdateResponseProcTypeCapped captures enum value "capped" + PVMInstanceUpdateResponseProcTypeCapped string = "capped" +) + +// prop value enum +func (m *PVMInstanceUpdateResponse) validateProcTypeEnum(path, location string, value string) error { + if err := validate.Enum(path, location, value, pVmInstanceUpdateResponseTypeProcTypePropEnum); err != nil { + return err + } + return nil +} + +func (m *PVMInstanceUpdateResponse) validateProcType(formats strfmt.Registry) error { + + if swag.IsZero(m.ProcType) { // not required + return nil + } + + // value enum + if err := m.validateProcTypeEnum("procType", "body", m.ProcType); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *PVMInstanceUpdateResponse) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *PVMInstanceUpdateResponse) UnmarshalBinary(b []byte) error { + var res PVMInstanceUpdateResponse + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/p_vm_instance_volume_update.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/p_vm_instance_volume_update.go new file mode 100644 index 00000000000..e21263c5e29 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/p_vm_instance_volume_update.go @@ -0,0 +1,64 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// PVMInstanceVolumeUpdate p VM instance volume update +// swagger:model PVMInstanceVolumeUpdate +type PVMInstanceVolumeUpdate struct { + + // Indicates if the volume should be deleted when the PVMInstance is terminated + // Required: true + DeleteOnTermination *bool `json:"deleteOnTermination"` +} + +// Validate validates this p VM instance volume update +func (m *PVMInstanceVolumeUpdate) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateDeleteOnTermination(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *PVMInstanceVolumeUpdate) validateDeleteOnTermination(formats strfmt.Registry) error { + + if err := validate.Required("deleteOnTermination", "body", m.DeleteOnTermination); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *PVMInstanceVolumeUpdate) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *PVMInstanceVolumeUpdate) UnmarshalBinary(b []byte) error { + var res PVMInstanceVolumeUpdate + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/p_vm_instances.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/p_vm_instances.go new file mode 100644 index 00000000000..88eed70bbfe --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/p_vm_instances.go @@ -0,0 +1,82 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "strconv" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// PVMInstances p VM instances +// swagger:model PVMInstances +type PVMInstances struct { + + // PVM Instance References + // Required: true + PvmInstances []*PVMInstanceReference `json:"pvmInstances"` +} + +// Validate validates this p VM instances +func (m *PVMInstances) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validatePvmInstances(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *PVMInstances) validatePvmInstances(formats strfmt.Registry) error { + + if err := validate.Required("pvmInstances", "body", m.PvmInstances); err != nil { + return err + } + + for i := 0; i < len(m.PvmInstances); i++ { + if swag.IsZero(m.PvmInstances[i]) { // not required + continue + } + + if m.PvmInstances[i] != nil { + if err := m.PvmInstances[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("pvmInstances" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *PVMInstances) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *PVMInstances) UnmarshalBinary(b []byte) error { + var res PVMInstances + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/peering_network.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/peering_network.go new file mode 100644 index 00000000000..4b77e777028 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/peering_network.go @@ -0,0 +1,84 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// PeeringNetwork peering network +// swagger:model PeeringNetwork +type PeeringNetwork struct { + + // Network in CIDR notation (192.168.0.0/24) + // Required: true + Cidr *string `json:"cidr" datastore:"cidr"` + + // DNS Servers + DNSServers []string `json:"dnsServers,omitempty" datastore:"dnsServers"` + + // Name of project to be peered + // Required: true + ProjectName *string `json:"projectName" datastore:"projectName"` +} + +// Validate validates this peering network +func (m *PeeringNetwork) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateCidr(formats); err != nil { + res = append(res, err) + } + + if err := m.validateProjectName(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *PeeringNetwork) validateCidr(formats strfmt.Registry) error { + + if err := validate.Required("cidr", "body", m.Cidr); err != nil { + return err + } + + return nil +} + +func (m *PeeringNetwork) validateProjectName(formats strfmt.Registry) error { + + if err := validate.Required("projectName", "body", m.ProjectName); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *PeeringNetwork) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *PeeringNetwork) UnmarshalBinary(b []byte) error { + var res PeeringNetwork + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/pin_policy.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/pin_policy.go new file mode 100644 index 00000000000..b3493ba53a2 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/pin_policy.go @@ -0,0 +1,66 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/validate" +) + +// PinPolicy Specify PVM pin policy +// swagger:model PinPolicy +type PinPolicy string + +const ( + + // PinPolicyNone captures enum value "none" + PinPolicyNone PinPolicy = "none" + + // PinPolicySoft captures enum value "soft" + PinPolicySoft PinPolicy = "soft" + + // PinPolicyHard captures enum value "hard" + PinPolicyHard PinPolicy = "hard" +) + +// for schema +var pinPolicyEnum []interface{} + +func init() { + var res []PinPolicy + if err := json.Unmarshal([]byte(`["none","soft","hard"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + pinPolicyEnum = append(pinPolicyEnum, v) + } +} + +func (m PinPolicy) validatePinPolicyEnum(path, location string, value PinPolicy) error { + if err := validate.Enum(path, location, value, pinPolicyEnum); err != nil { + return err + } + return nil +} + +// Validate validates this pin policy +func (m PinPolicy) Validate(formats strfmt.Registry) error { + var res []error + + // value enum + if err := m.validatePinPolicyEnum("", "body", m); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/plan.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/plan.go new file mode 100644 index 00000000000..7b38a50746b --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/plan.go @@ -0,0 +1,132 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// Plan plan +// swagger:model Plan +type Plan struct { + + // bindable + Bindable bool `json:"bindable,omitempty"` + + // description + // Required: true + Description *string `json:"description"` + + // free + Free *bool `json:"free,omitempty"` + + // id + // Required: true + ID *string `json:"id"` + + // metadata + Metadata Metadata `json:"metadata,omitempty"` + + // name + // Required: true + Name *string `json:"name"` + + // schemas + Schemas *SchemasObject `json:"schemas,omitempty"` +} + +// Validate validates this plan +func (m *Plan) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateDescription(formats); err != nil { + res = append(res, err) + } + + if err := m.validateID(formats); err != nil { + res = append(res, err) + } + + if err := m.validateName(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSchemas(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Plan) validateDescription(formats strfmt.Registry) error { + + if err := validate.Required("description", "body", m.Description); err != nil { + return err + } + + return nil +} + +func (m *Plan) validateID(formats strfmt.Registry) error { + + if err := validate.Required("id", "body", m.ID); err != nil { + return err + } + + return nil +} + +func (m *Plan) validateName(formats strfmt.Registry) error { + + if err := validate.Required("name", "body", m.Name); err != nil { + return err + } + + return nil +} + +func (m *Plan) validateSchemas(formats strfmt.Registry) error { + + if swag.IsZero(m.Schemas) { // not required + return nil + } + + if m.Schemas != nil { + if err := m.Schemas.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("schemas") + } + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *Plan) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Plan) UnmarshalBinary(b []byte) error { + var res Plan + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/region_storage_types.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/region_storage_types.go new file mode 100644 index 00000000000..b5a50ef443d --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/region_storage_types.go @@ -0,0 +1,45 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "strconv" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" +) + +// RegionStorageTypes An array of of storage types supported in a region +// swagger:model RegionStorageTypes +type RegionStorageTypes []*StorageType + +// Validate validates this region storage types +func (m RegionStorageTypes) Validate(formats strfmt.Registry) error { + var res []error + + for i := 0; i < len(m); i++ { + if swag.IsZero(m[i]) { // not required + continue + } + + if m[i] != nil { + if err := m[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(strconv.Itoa(i)) + } + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/s_a_p_create.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/s_a_p_create.go new file mode 100644 index 00000000000..f2d3af08112 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/s_a_p_create.go @@ -0,0 +1,190 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "strconv" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// SAPCreate s a p create +// swagger:model SAPCreate +type SAPCreate struct { + + // Image ID of the sap image to use for the server + // Required: true + ImageID *string `json:"imageID"` + + // instances + Instances *PVMInstanceMultiCreate `json:"instances,omitempty"` + + // Name of the sap pvm-instance + // Required: true + Name *string `json:"name"` + + // The pvm instance networks information + // Required: true + Networks []*PVMInstanceAddNetwork `json:"networks"` + + // pin policy + PinPolicy PinPolicy `json:"pinPolicy,omitempty"` + + // SAP Profile ID for the amount of cores and memory + // Required: true + ProfileID *string `json:"profileID"` + + // The name of the SSH Key to provide to the server for authenticating + SSHKeyName string `json:"sshKeyName,omitempty"` + + // Cloud init user defined data + UserData string `json:"userData,omitempty"` + + // List of Volume IDs to attach to the pvm-instance on creation + VolumeIds []string `json:"volumeIDs"` +} + +// Validate validates this s a p create +func (m *SAPCreate) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateImageID(formats); err != nil { + res = append(res, err) + } + + if err := m.validateInstances(formats); err != nil { + res = append(res, err) + } + + if err := m.validateName(formats); err != nil { + res = append(res, err) + } + + if err := m.validateNetworks(formats); err != nil { + res = append(res, err) + } + + if err := m.validatePinPolicy(formats); err != nil { + res = append(res, err) + } + + if err := m.validateProfileID(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *SAPCreate) validateImageID(formats strfmt.Registry) error { + + if err := validate.Required("imageID", "body", m.ImageID); err != nil { + return err + } + + return nil +} + +func (m *SAPCreate) validateInstances(formats strfmt.Registry) error { + + if swag.IsZero(m.Instances) { // not required + return nil + } + + if m.Instances != nil { + if err := m.Instances.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("instances") + } + return err + } + } + + return nil +} + +func (m *SAPCreate) validateName(formats strfmt.Registry) error { + + if err := validate.Required("name", "body", m.Name); err != nil { + return err + } + + return nil +} + +func (m *SAPCreate) validateNetworks(formats strfmt.Registry) error { + + if err := validate.Required("networks", "body", m.Networks); err != nil { + return err + } + + for i := 0; i < len(m.Networks); i++ { + if swag.IsZero(m.Networks[i]) { // not required + continue + } + + if m.Networks[i] != nil { + if err := m.Networks[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("networks" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *SAPCreate) validatePinPolicy(formats strfmt.Registry) error { + + if swag.IsZero(m.PinPolicy) { // not required + return nil + } + + if err := m.PinPolicy.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("pinPolicy") + } + return err + } + + return nil +} + +func (m *SAPCreate) validateProfileID(formats strfmt.Registry) error { + + if err := validate.Required("profileID", "body", m.ProfileID); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *SAPCreate) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *SAPCreate) UnmarshalBinary(b []byte) error { + var res SAPCreate + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/s_a_p_profile.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/s_a_p_profile.go new file mode 100644 index 00000000000..e05cd737e5e --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/s_a_p_profile.go @@ -0,0 +1,178 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// SAPProfile s a p profile +// swagger:model SAPProfile +type SAPProfile struct { + + // Has certification been performed on profile + // Required: true + Certified *bool `json:"certified"` + + // Amount of cores + // Required: true + Cores *int64 `json:"cores"` + + // Amount of memory (in GB) + // Required: true + Memory *int64 `json:"memory"` + + // SAP Profile ID + // Required: true + ProfileID *string `json:"profileID"` + + // Type of profile + // Required: true + // Enum: [balanced compute memory non-production ultra-memory] + Type *string `json:"type"` +} + +// Validate validates this s a p profile +func (m *SAPProfile) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateCertified(formats); err != nil { + res = append(res, err) + } + + if err := m.validateCores(formats); err != nil { + res = append(res, err) + } + + if err := m.validateMemory(formats); err != nil { + res = append(res, err) + } + + if err := m.validateProfileID(formats); err != nil { + res = append(res, err) + } + + if err := m.validateType(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *SAPProfile) validateCertified(formats strfmt.Registry) error { + + if err := validate.Required("certified", "body", m.Certified); err != nil { + return err + } + + return nil +} + +func (m *SAPProfile) validateCores(formats strfmt.Registry) error { + + if err := validate.Required("cores", "body", m.Cores); err != nil { + return err + } + + return nil +} + +func (m *SAPProfile) validateMemory(formats strfmt.Registry) error { + + if err := validate.Required("memory", "body", m.Memory); err != nil { + return err + } + + return nil +} + +func (m *SAPProfile) validateProfileID(formats strfmt.Registry) error { + + if err := validate.Required("profileID", "body", m.ProfileID); err != nil { + return err + } + + return nil +} + +var sAPProfileTypeTypePropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["balanced","compute","memory","non-production","ultra-memory"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + sAPProfileTypeTypePropEnum = append(sAPProfileTypeTypePropEnum, v) + } +} + +const ( + + // SAPProfileTypeBalanced captures enum value "balanced" + SAPProfileTypeBalanced string = "balanced" + + // SAPProfileTypeCompute captures enum value "compute" + SAPProfileTypeCompute string = "compute" + + // SAPProfileTypeMemory captures enum value "memory" + SAPProfileTypeMemory string = "memory" + + // SAPProfileTypeNonProduction captures enum value "non-production" + SAPProfileTypeNonProduction string = "non-production" + + // SAPProfileTypeUltraMemory captures enum value "ultra-memory" + SAPProfileTypeUltraMemory string = "ultra-memory" +) + +// prop value enum +func (m *SAPProfile) validateTypeEnum(path, location string, value string) error { + if err := validate.Enum(path, location, value, sAPProfileTypeTypePropEnum); err != nil { + return err + } + return nil +} + +func (m *SAPProfile) validateType(formats strfmt.Registry) error { + + if err := validate.Required("type", "body", m.Type); err != nil { + return err + } + + // value enum + if err := m.validateTypeEnum("type", "body", *m.Type); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *SAPProfile) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *SAPProfile) UnmarshalBinary(b []byte) error { + var res SAPProfile + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/s_a_p_profile_reference.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/s_a_p_profile_reference.go new file mode 100644 index 00000000000..e12750c369a --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/s_a_p_profile_reference.go @@ -0,0 +1,81 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// SAPProfileReference s a p profile reference +// swagger:model SAPProfileReference +type SAPProfileReference struct { + + // Link to SAP profile resource + // Required: true + Href *string `json:"href"` + + // SAP Profile ID + // Required: true + ProfileID *string `json:"profileID"` +} + +// Validate validates this s a p profile reference +func (m *SAPProfileReference) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateHref(formats); err != nil { + res = append(res, err) + } + + if err := m.validateProfileID(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *SAPProfileReference) validateHref(formats strfmt.Registry) error { + + if err := validate.Required("href", "body", m.Href); err != nil { + return err + } + + return nil +} + +func (m *SAPProfileReference) validateProfileID(formats strfmt.Registry) error { + + if err := validate.Required("profileID", "body", m.ProfileID); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *SAPProfileReference) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *SAPProfileReference) UnmarshalBinary(b []byte) error { + var res SAPProfileReference + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/s_a_p_profiles.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/s_a_p_profiles.go new file mode 100644 index 00000000000..3fd70e46283 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/s_a_p_profiles.go @@ -0,0 +1,82 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "strconv" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// SAPProfiles s a p profiles +// swagger:model SAPProfiles +type SAPProfiles struct { + + // SAP Profiles + // Required: true + Profiles []*SAPProfile `json:"profiles"` +} + +// Validate validates this s a p profiles +func (m *SAPProfiles) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateProfiles(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *SAPProfiles) validateProfiles(formats strfmt.Registry) error { + + if err := validate.Required("profiles", "body", m.Profiles); err != nil { + return err + } + + for i := 0; i < len(m.Profiles); i++ { + if swag.IsZero(m.Profiles[i]) { // not required + continue + } + + if m.Profiles[i] != nil { + if err := m.Profiles[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("profiles" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *SAPProfiles) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *SAPProfiles) UnmarshalBinary(b []byte) error { + var res SAPProfiles + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/s_r_c.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/s_r_c.go new file mode 100644 index 00000000000..3f54a518eb6 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/s_r_c.go @@ -0,0 +1,46 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/swag" +) + +// SRC s r c +// swagger:model SRC +type SRC struct { + + // The SRC reference code + Src string `json:"src,omitempty"` + + // The date stamp of the SRC + Timestamp string `json:"timestamp,omitempty"` +} + +// Validate validates this s r c +func (m *SRC) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *SRC) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *SRC) UnmarshalBinary(b []byte) error { + var res SRC + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/schema_parameters.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/schema_parameters.go new file mode 100644 index 00000000000..e464790b4b2 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/schema_parameters.go @@ -0,0 +1,43 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/swag" +) + +// SchemaParameters schema parameters +// swagger:model SchemaParameters +type SchemaParameters struct { + + // parameters + Parameters JSONSchemaObject `json:"parameters,omitempty"` +} + +// Validate validates this schema parameters +func (m *SchemaParameters) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *SchemaParameters) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *SchemaParameters) UnmarshalBinary(b []byte) error { + var res SchemaParameters + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/schemas_object.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/schemas_object.go new file mode 100644 index 00000000000..0900d82bfdd --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/schemas_object.go @@ -0,0 +1,96 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" +) + +// SchemasObject schemas object +// swagger:model SchemasObject +type SchemasObject struct { + + // service binding + ServiceBinding *ServiceBindingSchemaObject `json:"service_binding,omitempty"` + + // service instance + ServiceInstance *ServiceInstanceSchemaObject `json:"service_instance,omitempty"` +} + +// Validate validates this schemas object +func (m *SchemasObject) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateServiceBinding(formats); err != nil { + res = append(res, err) + } + + if err := m.validateServiceInstance(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *SchemasObject) validateServiceBinding(formats strfmt.Registry) error { + + if swag.IsZero(m.ServiceBinding) { // not required + return nil + } + + if m.ServiceBinding != nil { + if err := m.ServiceBinding.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("service_binding") + } + return err + } + } + + return nil +} + +func (m *SchemasObject) validateServiceInstance(formats strfmt.Registry) error { + + if swag.IsZero(m.ServiceInstance) { // not required + return nil + } + + if m.ServiceInstance != nil { + if err := m.ServiceInstance.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("service_instance") + } + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *SchemasObject) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *SchemasObject) UnmarshalBinary(b []byte) error { + var res SchemasObject + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/service.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/service.go new file mode 100644 index 00000000000..51d719f673d --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/service.go @@ -0,0 +1,241 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + "strconv" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// Service service +// swagger:model Service +type Service struct { + + // bindable + // Required: true + Bindable *bool `json:"bindable"` + + // dashboard client + DashboardClient *DashboardClient `json:"dashboard_client,omitempty"` + + // description + // Required: true + Description *string `json:"description"` + + // iam compatible + IamCompatible bool `json:"iam_compatible,omitempty"` + + // id + // Required: true + ID *string `json:"id"` + + // metadata + Metadata Metadata `json:"metadata,omitempty"` + + // name + // Required: true + Name *string `json:"name"` + + // plan updateable + PlanUpdateable bool `json:"plan_updateable,omitempty"` + + // plans + // Required: true + Plans []*Plan `json:"plans"` + + // provisionable + Provisionable bool `json:"provisionable,omitempty"` + + // rc compatible + RcCompatible bool `json:"rc_compatible,omitempty"` + + // requires + Requires []string `json:"requires"` + + // tags + Tags []string `json:"tags"` + + // unique api key + UniqueAPIKey bool `json:"unique_api_key,omitempty"` +} + +// Validate validates this service +func (m *Service) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateBindable(formats); err != nil { + res = append(res, err) + } + + if err := m.validateDashboardClient(formats); err != nil { + res = append(res, err) + } + + if err := m.validateDescription(formats); err != nil { + res = append(res, err) + } + + if err := m.validateID(formats); err != nil { + res = append(res, err) + } + + if err := m.validateName(formats); err != nil { + res = append(res, err) + } + + if err := m.validatePlans(formats); err != nil { + res = append(res, err) + } + + if err := m.validateRequires(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Service) validateBindable(formats strfmt.Registry) error { + + if err := validate.Required("bindable", "body", m.Bindable); err != nil { + return err + } + + return nil +} + +func (m *Service) validateDashboardClient(formats strfmt.Registry) error { + + if swag.IsZero(m.DashboardClient) { // not required + return nil + } + + if m.DashboardClient != nil { + if err := m.DashboardClient.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("dashboard_client") + } + return err + } + } + + return nil +} + +func (m *Service) validateDescription(formats strfmt.Registry) error { + + if err := validate.Required("description", "body", m.Description); err != nil { + return err + } + + return nil +} + +func (m *Service) validateID(formats strfmt.Registry) error { + + if err := validate.Required("id", "body", m.ID); err != nil { + return err + } + + return nil +} + +func (m *Service) validateName(formats strfmt.Registry) error { + + if err := validate.Required("name", "body", m.Name); err != nil { + return err + } + + return nil +} + +func (m *Service) validatePlans(formats strfmt.Registry) error { + + if err := validate.Required("plans", "body", m.Plans); err != nil { + return err + } + + for i := 0; i < len(m.Plans); i++ { + if swag.IsZero(m.Plans[i]) { // not required + continue + } + + if m.Plans[i] != nil { + if err := m.Plans[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("plans" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +var serviceRequiresItemsEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["syslog_drain","route_forwarding","volume_mount"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + serviceRequiresItemsEnum = append(serviceRequiresItemsEnum, v) + } +} + +func (m *Service) validateRequiresItemsEnum(path, location string, value string) error { + if err := validate.Enum(path, location, value, serviceRequiresItemsEnum); err != nil { + return err + } + return nil +} + +func (m *Service) validateRequires(formats strfmt.Registry) error { + + if swag.IsZero(m.Requires) { // not required + return nil + } + + for i := 0; i < len(m.Requires); i++ { + + // value enum + if err := m.validateRequiresItemsEnum("requires"+"."+strconv.Itoa(i), "body", m.Requires[i]); err != nil { + return err + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *Service) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Service) UnmarshalBinary(b []byte) error { + var res Service + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/service_binding.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/service_binding.go new file mode 100644 index 00000000000..994409d3f01 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/service_binding.go @@ -0,0 +1,89 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "strconv" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" +) + +// ServiceBinding service binding +// swagger:model ServiceBinding +type ServiceBinding struct { + + // credentials + Credentials Object `json:"credentials,omitempty"` + + // route service url + RouteServiceURL string `json:"route_service_url,omitempty"` + + // syslog drain url + SyslogDrainURL string `json:"syslog_drain_url,omitempty"` + + // volume mounts + VolumeMounts []*ServiceBindingVolumeMount `json:"volume_mounts"` +} + +// Validate validates this service binding +func (m *ServiceBinding) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateVolumeMounts(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *ServiceBinding) validateVolumeMounts(formats strfmt.Registry) error { + + if swag.IsZero(m.VolumeMounts) { // not required + return nil + } + + for i := 0; i < len(m.VolumeMounts); i++ { + if swag.IsZero(m.VolumeMounts[i]) { // not required + continue + } + + if m.VolumeMounts[i] != nil { + if err := m.VolumeMounts[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("volume_mounts" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *ServiceBinding) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ServiceBinding) UnmarshalBinary(b []byte) error { + var res ServiceBinding + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/service_binding_request.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/service_binding_request.go new file mode 100644 index 00000000000..68ca42fcb98 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/service_binding_request.go @@ -0,0 +1,115 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// ServiceBindingRequest service binding request +// swagger:model ServiceBindingRequest +type ServiceBindingRequest struct { + + // app guid + AppGUID string `json:"app_guid,omitempty"` + + // bind resource + BindResource *ServiceBindingResourceObject `json:"bind_resource,omitempty"` + + // context + Context Context `json:"context,omitempty"` + + // parameters + Parameters Object `json:"parameters,omitempty"` + + // plan id + // Required: true + PlanID *string `json:"plan_id"` + + // service id + // Required: true + ServiceID *string `json:"service_id"` +} + +// Validate validates this service binding request +func (m *ServiceBindingRequest) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateBindResource(formats); err != nil { + res = append(res, err) + } + + if err := m.validatePlanID(formats); err != nil { + res = append(res, err) + } + + if err := m.validateServiceID(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *ServiceBindingRequest) validateBindResource(formats strfmt.Registry) error { + + if swag.IsZero(m.BindResource) { // not required + return nil + } + + if m.BindResource != nil { + if err := m.BindResource.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("bind_resource") + } + return err + } + } + + return nil +} + +func (m *ServiceBindingRequest) validatePlanID(formats strfmt.Registry) error { + + if err := validate.Required("plan_id", "body", m.PlanID); err != nil { + return err + } + + return nil +} + +func (m *ServiceBindingRequest) validateServiceID(formats strfmt.Registry) error { + + if err := validate.Required("service_id", "body", m.ServiceID); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *ServiceBindingRequest) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ServiceBindingRequest) UnmarshalBinary(b []byte) error { + var res ServiceBindingRequest + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/service_binding_resource.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/service_binding_resource.go new file mode 100644 index 00000000000..1a97439da6b --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/service_binding_resource.go @@ -0,0 +1,92 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "strconv" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" +) + +// ServiceBindingResource service binding resource +// swagger:model ServiceBindingResource +type ServiceBindingResource struct { + + // credentials + Credentials Object `json:"credentials,omitempty"` + + // parameters + Parameters Object `json:"parameters,omitempty"` + + // route service url + RouteServiceURL string `json:"route_service_url,omitempty"` + + // syslog drain url + SyslogDrainURL string `json:"syslog_drain_url,omitempty"` + + // volume mounts + VolumeMounts []*ServiceBindingVolumeMount `json:"volume_mounts"` +} + +// Validate validates this service binding resource +func (m *ServiceBindingResource) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateVolumeMounts(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *ServiceBindingResource) validateVolumeMounts(formats strfmt.Registry) error { + + if swag.IsZero(m.VolumeMounts) { // not required + return nil + } + + for i := 0; i < len(m.VolumeMounts); i++ { + if swag.IsZero(m.VolumeMounts[i]) { // not required + continue + } + + if m.VolumeMounts[i] != nil { + if err := m.VolumeMounts[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("volume_mounts" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *ServiceBindingResource) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ServiceBindingResource) UnmarshalBinary(b []byte) error { + var res ServiceBindingResource + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/service_binding_resource_object.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/service_binding_resource_object.go new file mode 100644 index 00000000000..cc1aa4dd141 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/service_binding_resource_object.go @@ -0,0 +1,46 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/swag" +) + +// ServiceBindingResourceObject service binding resource object +// swagger:model ServiceBindingResourceObject +type ServiceBindingResourceObject struct { + + // app guid + AppGUID string `json:"app_guid,omitempty"` + + // route + Route string `json:"route,omitempty"` +} + +// Validate validates this service binding resource object +func (m *ServiceBindingResourceObject) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *ServiceBindingResourceObject) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ServiceBindingResourceObject) UnmarshalBinary(b []byte) error { + var res ServiceBindingResourceObject + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/service_binding_schema_object.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/service_binding_schema_object.go new file mode 100644 index 00000000000..9df0a3da65a --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/service_binding_schema_object.go @@ -0,0 +1,71 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" +) + +// ServiceBindingSchemaObject service binding schema object +// swagger:model ServiceBindingSchemaObject +type ServiceBindingSchemaObject struct { + + // create + Create *SchemaParameters `json:"create,omitempty"` +} + +// Validate validates this service binding schema object +func (m *ServiceBindingSchemaObject) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateCreate(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *ServiceBindingSchemaObject) validateCreate(formats strfmt.Registry) error { + + if swag.IsZero(m.Create) { // not required + return nil + } + + if m.Create != nil { + if err := m.Create.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("create") + } + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *ServiceBindingSchemaObject) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ServiceBindingSchemaObject) UnmarshalBinary(b []byte) error { + var res ServiceBindingSchemaObject + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/service_binding_volume_mount.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/service_binding_volume_mount.go new file mode 100644 index 00000000000..c3c23e231e4 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/service_binding_volume_mount.go @@ -0,0 +1,210 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// ServiceBindingVolumeMount service binding volume mount +// swagger:model ServiceBindingVolumeMount +type ServiceBindingVolumeMount struct { + + // container dir + // Required: true + ContainerDir *string `json:"container_dir"` + + // device + // Required: true + Device *ServiceBindingVolumeMountDevice `json:"device"` + + // device type + // Required: true + // Enum: [shared] + DeviceType *string `json:"device_type"` + + // driver + // Required: true + Driver *string `json:"driver"` + + // mode + // Required: true + // Enum: [r rw] + Mode *string `json:"mode"` +} + +// Validate validates this service binding volume mount +func (m *ServiceBindingVolumeMount) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateContainerDir(formats); err != nil { + res = append(res, err) + } + + if err := m.validateDevice(formats); err != nil { + res = append(res, err) + } + + if err := m.validateDeviceType(formats); err != nil { + res = append(res, err) + } + + if err := m.validateDriver(formats); err != nil { + res = append(res, err) + } + + if err := m.validateMode(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *ServiceBindingVolumeMount) validateContainerDir(formats strfmt.Registry) error { + + if err := validate.Required("container_dir", "body", m.ContainerDir); err != nil { + return err + } + + return nil +} + +func (m *ServiceBindingVolumeMount) validateDevice(formats strfmt.Registry) error { + + if err := validate.Required("device", "body", m.Device); err != nil { + return err + } + + if m.Device != nil { + if err := m.Device.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("device") + } + return err + } + } + + return nil +} + +var serviceBindingVolumeMountTypeDeviceTypePropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["shared"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + serviceBindingVolumeMountTypeDeviceTypePropEnum = append(serviceBindingVolumeMountTypeDeviceTypePropEnum, v) + } +} + +const ( + + // ServiceBindingVolumeMountDeviceTypeShared captures enum value "shared" + ServiceBindingVolumeMountDeviceTypeShared string = "shared" +) + +// prop value enum +func (m *ServiceBindingVolumeMount) validateDeviceTypeEnum(path, location string, value string) error { + if err := validate.Enum(path, location, value, serviceBindingVolumeMountTypeDeviceTypePropEnum); err != nil { + return err + } + return nil +} + +func (m *ServiceBindingVolumeMount) validateDeviceType(formats strfmt.Registry) error { + + if err := validate.Required("device_type", "body", m.DeviceType); err != nil { + return err + } + + // value enum + if err := m.validateDeviceTypeEnum("device_type", "body", *m.DeviceType); err != nil { + return err + } + + return nil +} + +func (m *ServiceBindingVolumeMount) validateDriver(formats strfmt.Registry) error { + + if err := validate.Required("driver", "body", m.Driver); err != nil { + return err + } + + return nil +} + +var serviceBindingVolumeMountTypeModePropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["r","rw"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + serviceBindingVolumeMountTypeModePropEnum = append(serviceBindingVolumeMountTypeModePropEnum, v) + } +} + +const ( + + // ServiceBindingVolumeMountModeR captures enum value "r" + ServiceBindingVolumeMountModeR string = "r" + + // ServiceBindingVolumeMountModeRw captures enum value "rw" + ServiceBindingVolumeMountModeRw string = "rw" +) + +// prop value enum +func (m *ServiceBindingVolumeMount) validateModeEnum(path, location string, value string) error { + if err := validate.Enum(path, location, value, serviceBindingVolumeMountTypeModePropEnum); err != nil { + return err + } + return nil +} + +func (m *ServiceBindingVolumeMount) validateMode(formats strfmt.Registry) error { + + if err := validate.Required("mode", "body", m.Mode); err != nil { + return err + } + + // value enum + if err := m.validateModeEnum("mode", "body", *m.Mode); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *ServiceBindingVolumeMount) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ServiceBindingVolumeMount) UnmarshalBinary(b []byte) error { + var res ServiceBindingVolumeMount + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/service_binding_volume_mount_device.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/service_binding_volume_mount_device.go new file mode 100644 index 00000000000..0a64a3a771c --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/service_binding_volume_mount_device.go @@ -0,0 +1,67 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// ServiceBindingVolumeMountDevice service binding volume mount device +// swagger:model ServiceBindingVolumeMountDevice +type ServiceBindingVolumeMountDevice struct { + + // mount config + MountConfig Object `json:"mount_config,omitempty"` + + // volume id + // Required: true + VolumeID *string `json:"volume_id"` +} + +// Validate validates this service binding volume mount device +func (m *ServiceBindingVolumeMountDevice) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateVolumeID(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *ServiceBindingVolumeMountDevice) validateVolumeID(formats strfmt.Registry) error { + + if err := validate.Required("volume_id", "body", m.VolumeID); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *ServiceBindingVolumeMountDevice) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ServiceBindingVolumeMountDevice) UnmarshalBinary(b []byte) error { + var res ServiceBindingVolumeMountDevice + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/service_instance.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/service_instance.go new file mode 100644 index 00000000000..98d6f004847 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/service_instance.go @@ -0,0 +1,98 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// ServiceInstance service instance +// swagger:model ServiceInstance +type ServiceInstance struct { + + // Indicates (from the viewpoint of the provider) whether the service instance is (perceived) active or not and is meaningful if enabled is true. The default value is true if not specified. + // Required: true + Active *bool `json:"active"` + + // Indicates the current state of the service instance. + // Required: true + Enable *bool `json:"enable"` + + // Indicates when the service instance was last accessed or modified, and is meaningful if enabled is true AND active is false. Represented as milliseconds since the epoch, but does not need to be accurate to the second/hour. + // Required: true + LastActive *float64 `json:"last_active"` +} + +// Validate validates this service instance +func (m *ServiceInstance) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateActive(formats); err != nil { + res = append(res, err) + } + + if err := m.validateEnable(formats); err != nil { + res = append(res, err) + } + + if err := m.validateLastActive(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *ServiceInstance) validateActive(formats strfmt.Registry) error { + + if err := validate.Required("active", "body", m.Active); err != nil { + return err + } + + return nil +} + +func (m *ServiceInstance) validateEnable(formats strfmt.Registry) error { + + if err := validate.Required("enable", "body", m.Enable); err != nil { + return err + } + + return nil +} + +func (m *ServiceInstance) validateLastActive(formats strfmt.Registry) error { + + if err := validate.Required("last_active", "body", m.LastActive); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *ServiceInstance) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ServiceInstance) UnmarshalBinary(b []byte) error { + var res ServiceInstance + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/service_instance_async_operation.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/service_instance_async_operation.go new file mode 100644 index 00000000000..49c62ee3cb4 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/service_instance_async_operation.go @@ -0,0 +1,46 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/swag" +) + +// ServiceInstanceAsyncOperation service instance async operation +// swagger:model ServiceInstanceAsyncOperation +type ServiceInstanceAsyncOperation struct { + + // dashboard url + DashboardURL string `json:"dashboard_url,omitempty"` + + // operation + Operation string `json:"operation,omitempty"` +} + +// Validate validates this service instance async operation +func (m *ServiceInstanceAsyncOperation) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *ServiceInstanceAsyncOperation) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ServiceInstanceAsyncOperation) UnmarshalBinary(b []byte) error { + var res ServiceInstanceAsyncOperation + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/service_instance_previous_values.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/service_instance_previous_values.go new file mode 100644 index 00000000000..df228924401 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/service_instance_previous_values.go @@ -0,0 +1,52 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/swag" +) + +// ServiceInstancePreviousValues service instance previous values +// swagger:model ServiceInstancePreviousValues +type ServiceInstancePreviousValues struct { + + // organization id + OrganizationID string `json:"organization_id,omitempty"` + + // plan id + PlanID string `json:"plan_id,omitempty"` + + // service id + ServiceID string `json:"service_id,omitempty"` + + // space id + SpaceID string `json:"space_id,omitempty"` +} + +// Validate validates this service instance previous values +func (m *ServiceInstancePreviousValues) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *ServiceInstancePreviousValues) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ServiceInstancePreviousValues) UnmarshalBinary(b []byte) error { + var res ServiceInstancePreviousValues + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/service_instance_provision.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/service_instance_provision.go new file mode 100644 index 00000000000..dd9973eb8ef --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/service_instance_provision.go @@ -0,0 +1,43 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/swag" +) + +// ServiceInstanceProvision service instance provision +// swagger:model ServiceInstanceProvision +type ServiceInstanceProvision struct { + + // dashboard url + DashboardURL string `json:"dashboard_url,omitempty"` +} + +// Validate validates this service instance provision +func (m *ServiceInstanceProvision) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *ServiceInstanceProvision) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ServiceInstanceProvision) UnmarshalBinary(b []byte) error { + var res ServiceInstanceProvision + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/service_instance_provision_request.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/service_instance_provision_request.go new file mode 100644 index 00000000000..52835c28484 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/service_instance_provision_request.go @@ -0,0 +1,93 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// ServiceInstanceProvisionRequest service instance provision request +// swagger:model ServiceInstanceProvisionRequest +type ServiceInstanceProvisionRequest struct { + + // context + Context Context `json:"context,omitempty"` + + // organization guid + OrganizationGUID string `json:"organization_guid,omitempty"` + + // parameters + Parameters Object `json:"parameters,omitempty"` + + // plan id + // Required: true + PlanID *string `json:"plan_id"` + + // service id + // Required: true + ServiceID *string `json:"service_id"` + + // space guid + SpaceGUID string `json:"space_guid,omitempty"` +} + +// Validate validates this service instance provision request +func (m *ServiceInstanceProvisionRequest) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validatePlanID(formats); err != nil { + res = append(res, err) + } + + if err := m.validateServiceID(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *ServiceInstanceProvisionRequest) validatePlanID(formats strfmt.Registry) error { + + if err := validate.Required("plan_id", "body", m.PlanID); err != nil { + return err + } + + return nil +} + +func (m *ServiceInstanceProvisionRequest) validateServiceID(formats strfmt.Registry) error { + + if err := validate.Required("service_id", "body", m.ServiceID); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *ServiceInstanceProvisionRequest) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ServiceInstanceProvisionRequest) UnmarshalBinary(b []byte) error { + var res ServiceInstanceProvisionRequest + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/service_instance_request.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/service_instance_request.go new file mode 100644 index 00000000000..fd635a615f6 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/service_instance_request.go @@ -0,0 +1,70 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// ServiceInstanceRequest service instance request +// swagger:model ServiceInstanceRequest +type ServiceInstanceRequest struct { + + // Indicates the current state of the service instance. + // Required: true + Enabled *bool `json:"enabled"` + + // Optional string stating the reason code for the service instance state change. Valid values are BMX_ACCT_ACTIVATE, BMX_SERVICE_INSTANCE_BELOW_CAP for enable calls, and BMX_ACCT_SUSPEND, BMX_SERVICE_INSTANCE_ABOVE_CAP for disable calls. + InitiatorID string `json:"initiator_id,omitempty"` + + // Optional string showing the user id initiating the call + ReasonCode string `json:"reason_code,omitempty"` +} + +// Validate validates this service instance request +func (m *ServiceInstanceRequest) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateEnabled(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *ServiceInstanceRequest) validateEnabled(formats strfmt.Registry) error { + + if err := validate.Required("enabled", "body", m.Enabled); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *ServiceInstanceRequest) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ServiceInstanceRequest) UnmarshalBinary(b []byte) error { + var res ServiceInstanceRequest + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/service_instance_resource.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/service_instance_resource.go new file mode 100644 index 00000000000..95989abe3b9 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/service_instance_resource.go @@ -0,0 +1,52 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/swag" +) + +// ServiceInstanceResource service instance resource +// swagger:model ServiceInstanceResource +type ServiceInstanceResource struct { + + // dashboard url + DashboardURL string `json:"dashboard_url,omitempty"` + + // parameters + Parameters Object `json:"parameters,omitempty"` + + // plan id + PlanID string `json:"plan_id,omitempty"` + + // service id + ServiceID string `json:"service_id,omitempty"` +} + +// Validate validates this service instance resource +func (m *ServiceInstanceResource) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *ServiceInstanceResource) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ServiceInstanceResource) UnmarshalBinary(b []byte) error { + var res ServiceInstanceResource + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/service_instance_schema_object.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/service_instance_schema_object.go new file mode 100644 index 00000000000..0c1e0e1698c --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/service_instance_schema_object.go @@ -0,0 +1,96 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" +) + +// ServiceInstanceSchemaObject service instance schema object +// swagger:model ServiceInstanceSchemaObject +type ServiceInstanceSchemaObject struct { + + // create + Create *SchemaParameters `json:"create,omitempty"` + + // update + Update *SchemaParameters `json:"update,omitempty"` +} + +// Validate validates this service instance schema object +func (m *ServiceInstanceSchemaObject) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateCreate(formats); err != nil { + res = append(res, err) + } + + if err := m.validateUpdate(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *ServiceInstanceSchemaObject) validateCreate(formats strfmt.Registry) error { + + if swag.IsZero(m.Create) { // not required + return nil + } + + if m.Create != nil { + if err := m.Create.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("create") + } + return err + } + } + + return nil +} + +func (m *ServiceInstanceSchemaObject) validateUpdate(formats strfmt.Registry) error { + + if swag.IsZero(m.Update) { // not required + return nil + } + + if m.Update != nil { + if err := m.Update.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("update") + } + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *ServiceInstanceSchemaObject) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ServiceInstanceSchemaObject) UnmarshalBinary(b []byte) error { + var res ServiceInstanceSchemaObject + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/service_instance_update_request.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/service_instance_update_request.go new file mode 100644 index 00000000000..a1894b8d0b4 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/service_instance_update_request.go @@ -0,0 +1,98 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// ServiceInstanceUpdateRequest service instance update request +// swagger:model ServiceInstanceUpdateRequest +type ServiceInstanceUpdateRequest struct { + + // context + Context Context `json:"context,omitempty"` + + // parameters + Parameters Object `json:"parameters,omitempty"` + + // plan id + PlanID string `json:"plan_id,omitempty"` + + // previous values + PreviousValues *ServiceInstancePreviousValues `json:"previous_values,omitempty"` + + // service id + // Required: true + ServiceID *string `json:"service_id"` +} + +// Validate validates this service instance update request +func (m *ServiceInstanceUpdateRequest) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validatePreviousValues(formats); err != nil { + res = append(res, err) + } + + if err := m.validateServiceID(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *ServiceInstanceUpdateRequest) validatePreviousValues(formats strfmt.Registry) error { + + if swag.IsZero(m.PreviousValues) { // not required + return nil + } + + if m.PreviousValues != nil { + if err := m.PreviousValues.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("previous_values") + } + return err + } + } + + return nil +} + +func (m *ServiceInstanceUpdateRequest) validateServiceID(formats strfmt.Registry) error { + + if err := validate.Required("service_id", "body", m.ServiceID); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *ServiceInstanceUpdateRequest) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ServiceInstanceUpdateRequest) UnmarshalBinary(b []byte) error { + var res ServiceInstanceUpdateRequest + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/snapshot.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/snapshot.go new file mode 100644 index 00000000000..ab301f42f70 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/snapshot.go @@ -0,0 +1,165 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// Snapshot snapshot +// swagger:model Snapshot +type Snapshot struct { + + // Action performed on the instance snapshot + Action string `json:"action,omitempty"` + + // Creation Date + // Format: date-time + CreationDate strfmt.DateTime `json:"creationDate,omitempty"` + + // Description of the PVM instance snapshot + Description string `json:"description,omitempty"` + + // Last Update Date + // Format: date-time + LastUpdateDate strfmt.DateTime `json:"lastUpdateDate,omitempty"` + + // Name of the PVM instance snapshot + // Required: true + Name *string `json:"name"` + + // Snapshot completion percentage + PercentComplete int64 `json:"percentComplete,omitempty"` + + // PCloud PVM Instance ID + // Required: true + PvmInstanceID *string `json:"pvmInstanceID"` + + // ID of the PVM instance snapshot + // Required: true + SnapshotID *string `json:"snapshotID"` + + // Status of the PVM instancesnapshot + Status string `json:"status,omitempty"` + + // A map of volume snapshots included in the PVM instance snapshot + // Required: true + VolumeSnapshots map[string]string `json:"volumeSnapshots"` +} + +// Validate validates this snapshot +func (m *Snapshot) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateCreationDate(formats); err != nil { + res = append(res, err) + } + + if err := m.validateLastUpdateDate(formats); err != nil { + res = append(res, err) + } + + if err := m.validateName(formats); err != nil { + res = append(res, err) + } + + if err := m.validatePvmInstanceID(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSnapshotID(formats); err != nil { + res = append(res, err) + } + + if err := m.validateVolumeSnapshots(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Snapshot) validateCreationDate(formats strfmt.Registry) error { + + if swag.IsZero(m.CreationDate) { // not required + return nil + } + + if err := validate.FormatOf("creationDate", "body", "date-time", m.CreationDate.String(), formats); err != nil { + return err + } + + return nil +} + +func (m *Snapshot) validateLastUpdateDate(formats strfmt.Registry) error { + + if swag.IsZero(m.LastUpdateDate) { // not required + return nil + } + + if err := validate.FormatOf("lastUpdateDate", "body", "date-time", m.LastUpdateDate.String(), formats); err != nil { + return err + } + + return nil +} + +func (m *Snapshot) validateName(formats strfmt.Registry) error { + + if err := validate.Required("name", "body", m.Name); err != nil { + return err + } + + return nil +} + +func (m *Snapshot) validatePvmInstanceID(formats strfmt.Registry) error { + + if err := validate.Required("pvmInstanceID", "body", m.PvmInstanceID); err != nil { + return err + } + + return nil +} + +func (m *Snapshot) validateSnapshotID(formats strfmt.Registry) error { + + if err := validate.Required("snapshotID", "body", m.SnapshotID); err != nil { + return err + } + + return nil +} + +func (m *Snapshot) validateVolumeSnapshots(formats strfmt.Registry) error { + + return nil +} + +// MarshalBinary interface implementation +func (m *Snapshot) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Snapshot) UnmarshalBinary(b []byte) error { + var res Snapshot + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/snapshot_create.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/snapshot_create.go new file mode 100644 index 00000000000..17827deb2f1 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/snapshot_create.go @@ -0,0 +1,70 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// SnapshotCreate snapshot create +// swagger:model SnapshotCreate +type SnapshotCreate struct { + + // Description of the PVM instance snapshot + Description string `json:"description,omitempty"` + + // Name of the PVM instance snapshot to create + // Required: true + Name *string `json:"name"` + + // List of volumes to include in the PVM instance snapshot + VolumeIds []string `json:"volumeIDs"` +} + +// Validate validates this snapshot create +func (m *SnapshotCreate) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateName(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *SnapshotCreate) validateName(formats strfmt.Registry) error { + + if err := validate.Required("name", "body", m.Name); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *SnapshotCreate) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *SnapshotCreate) UnmarshalBinary(b []byte) error { + var res SnapshotCreate + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/snapshot_create_response.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/snapshot_create_response.go new file mode 100644 index 00000000000..c372430b626 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/snapshot_create_response.go @@ -0,0 +1,64 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// SnapshotCreateResponse snapshot create response +// swagger:model SnapshotCreateResponse +type SnapshotCreateResponse struct { + + // ID of the PVM instance snapshot + // Required: true + SnapshotID *string `json:"snapshotID"` +} + +// Validate validates this snapshot create response +func (m *SnapshotCreateResponse) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateSnapshotID(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *SnapshotCreateResponse) validateSnapshotID(formats strfmt.Registry) error { + + if err := validate.Required("snapshotID", "body", m.SnapshotID); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *SnapshotCreateResponse) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *SnapshotCreateResponse) UnmarshalBinary(b []byte) error { + var res SnapshotCreateResponse + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/snapshot_restore.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/snapshot_restore.go new file mode 100644 index 00000000000..f9b8ba15339 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/snapshot_restore.go @@ -0,0 +1,43 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/swag" +) + +// SnapshotRestore snapshot restore +// swagger:model SnapshotRestore +type SnapshotRestore struct { + + // By default the VM must be shutoff during a snapshot restore, force set to true will relax the VM shutoff pre-condition. + Force *bool `json:"force,omitempty"` +} + +// Validate validates this snapshot restore +func (m *SnapshotRestore) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *SnapshotRestore) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *SnapshotRestore) UnmarshalBinary(b []byte) error { + var res SnapshotRestore + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/snapshot_update.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/snapshot_update.go new file mode 100644 index 00000000000..27d6b7751ee --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/snapshot_update.go @@ -0,0 +1,46 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/swag" +) + +// SnapshotUpdate snapshot update +// swagger:model SnapshotUpdate +type SnapshotUpdate struct { + + // Description of the PVM instance snapshot + Description string `json:"description,omitempty"` + + // Name of the PVM instance snapshot + Name string `json:"name,omitempty"` +} + +// Validate validates this snapshot update +func (m *SnapshotUpdate) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *SnapshotUpdate) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *SnapshotUpdate) UnmarshalBinary(b []byte) error { + var res SnapshotUpdate + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/snapshots.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/snapshots.go new file mode 100644 index 00000000000..885d8eabc81 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/snapshots.go @@ -0,0 +1,82 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "strconv" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// Snapshots snapshots +// swagger:model Snapshots +type Snapshots struct { + + // List of PVM instance snapshots + // Required: true + Snapshots []*Snapshot `json:"snapshots"` +} + +// Validate validates this snapshots +func (m *Snapshots) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateSnapshots(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Snapshots) validateSnapshots(formats strfmt.Registry) error { + + if err := validate.Required("snapshots", "body", m.Snapshots); err != nil { + return err + } + + for i := 0; i < len(m.Snapshots); i++ { + if swag.IsZero(m.Snapshots[i]) { // not required + continue + } + + if m.Snapshots[i] != nil { + if err := m.Snapshots[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("snapshots" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *Snapshots) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Snapshots) UnmarshalBinary(b []byte) error { + var res Snapshots + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/software_licenses.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/software_licenses.go new file mode 100644 index 00000000000..d06fd08d919 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/software_licenses.go @@ -0,0 +1,55 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/swag" +) + +// SoftwareLicenses software licenses +// swagger:model SoftwareLicenses +type SoftwareLicenses struct { + + // IBMi Cloud Storage Solution + IbmiCSS *bool `json:"ibmiCSS,omitempty"` + + // IBMi Cloud Storage Solution + IbmiDBQ *bool `json:"ibmiDBQ,omitempty"` + + // IBMi Power High Availability + IbmiPHA *bool `json:"ibmiPHA,omitempty"` + + // IBMi Rational Dev Studio + IbmiRDS *bool `json:"ibmiRDS,omitempty"` + + // IBMi Rational Dev Studio Number of User Licenses + IbmiRDSUsers int64 `json:"ibmiRDSUsers,omitempty"` +} + +// Validate validates this software licenses +func (m *SoftwareLicenses) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *SoftwareLicenses) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *SoftwareLicenses) UnmarshalBinary(b []byte) error { + var res SoftwareLicenses + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/ssh_key.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/ssh_key.go new file mode 100644 index 00000000000..29314e99c63 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/ssh_key.go @@ -0,0 +1,102 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// SSHKey SSH key +// swagger:model SSHKey +type SSHKey struct { + + // Date of sshkey creation + // Format: date-time + CreationDate *strfmt.DateTime `json:"creationDate,omitempty" datastore:"creationDate"` + + // User defined name for the SSH key + // Required: true + Name *string `json:"name" datastore:"name"` + + // SSH RSA key + // Required: true + SSHKey *string `json:"sshKey" datastore:"sshKey"` +} + +// Validate validates this SSH key +func (m *SSHKey) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateCreationDate(formats); err != nil { + res = append(res, err) + } + + if err := m.validateName(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSSHKey(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *SSHKey) validateCreationDate(formats strfmt.Registry) error { + + if swag.IsZero(m.CreationDate) { // not required + return nil + } + + if err := validate.FormatOf("creationDate", "body", "date-time", m.CreationDate.String(), formats); err != nil { + return err + } + + return nil +} + +func (m *SSHKey) validateName(formats strfmt.Registry) error { + + if err := validate.Required("name", "body", m.Name); err != nil { + return err + } + + return nil +} + +func (m *SSHKey) validateSSHKey(formats strfmt.Registry) error { + + if err := validate.Required("sshKey", "body", m.SSHKey); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *SSHKey) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *SSHKey) UnmarshalBinary(b []byte) error { + var res SSHKey + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/ssh_keys.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/ssh_keys.go new file mode 100644 index 00000000000..d281a24aea5 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/ssh_keys.go @@ -0,0 +1,82 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "strconv" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// SSHKeys SSH keys +// swagger:model SSHKeys +type SSHKeys struct { + + // SSH Keys + // Required: true + SSHKeys []*SSHKey `json:"sshKeys"` +} + +// Validate validates this SSH keys +func (m *SSHKeys) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateSSHKeys(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *SSHKeys) validateSSHKeys(formats strfmt.Registry) error { + + if err := validate.Required("sshKeys", "body", m.SSHKeys); err != nil { + return err + } + + for i := 0; i < len(m.SSHKeys); i++ { + if swag.IsZero(m.SSHKeys[i]) { // not required + continue + } + + if m.SSHKeys[i] != nil { + if err := m.SSHKeys[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("sshKeys" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *SSHKeys) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *SSHKeys) UnmarshalBinary(b []byte) error { + var res SSHKeys + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/stock_image.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/stock_image.go new file mode 100644 index 00000000000..61639996533 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/stock_image.go @@ -0,0 +1,49 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/swag" +) + +// StockImage Stock image detail +// swagger:model StockImage +type StockImage struct { + + // Image ID + ID string `json:"id,omitempty"` + + // Storage pool for a stock image + StoragePool string `json:"storagePool,omitempty"` + + // Storage type for a stock image + StorageType string `json:"storageType,omitempty"` +} + +// Validate validates this stock image +func (m *StockImage) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *StockImage) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *StockImage) UnmarshalBinary(b []byte) error { + var res StockImage + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/stock_images.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/stock_images.go new file mode 100644 index 00000000000..d9551dbef97 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/stock_images.go @@ -0,0 +1,45 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "strconv" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" +) + +// StockImages List of stock images +// swagger:model StockImages +type StockImages []*StockImage + +// Validate validates this stock images +func (m StockImages) Validate(formats strfmt.Registry) error { + var res []error + + for i := 0; i < len(m); i++ { + if swag.IsZero(m[i]) { // not required + continue + } + + if m[i] != nil { + if err := m[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(strconv.Itoa(i)) + } + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/storage_pool_capacity.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/storage_pool_capacity.go new file mode 100644 index 00000000000..4162d3674e7 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/storage_pool_capacity.go @@ -0,0 +1,73 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// StoragePoolCapacity Storage pool capacity +// swagger:model StoragePoolCapacity +type StoragePoolCapacity struct { + + // Maximum allocation storage size (GB) + // Required: true + MaxAllocationSize *int64 `json:"maxAllocationSize"` + + // Pool name + PoolName string `json:"poolName,omitempty"` + + // Storage type of the storage pool + StorageType string `json:"storageType,omitempty"` + + // Total pool capacity (GB) + TotalCapacity int64 `json:"totalCapacity,omitempty"` +} + +// Validate validates this storage pool capacity +func (m *StoragePoolCapacity) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateMaxAllocationSize(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *StoragePoolCapacity) validateMaxAllocationSize(formats strfmt.Registry) error { + + if err := validate.Required("maxAllocationSize", "body", m.MaxAllocationSize); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *StoragePoolCapacity) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *StoragePoolCapacity) UnmarshalBinary(b []byte) error { + var res StoragePoolCapacity + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/storage_pools_capacity.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/storage_pools_capacity.go new file mode 100644 index 00000000000..92380226dd5 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/storage_pools_capacity.go @@ -0,0 +1,105 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "strconv" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" +) + +// StoragePoolsCapacity Storage capacity for all storage pools +// swagger:model StoragePoolsCapacity +type StoragePoolsCapacity struct { + + // maximum storage allocation + MaximumStorageAllocation *MaximumStorageAllocation `json:"maximumStorageAllocation,omitempty"` + + // storage pools capacity + StoragePoolsCapacity []*StoragePoolCapacity `json:"storagePoolsCapacity"` +} + +// Validate validates this storage pools capacity +func (m *StoragePoolsCapacity) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateMaximumStorageAllocation(formats); err != nil { + res = append(res, err) + } + + if err := m.validateStoragePoolsCapacity(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *StoragePoolsCapacity) validateMaximumStorageAllocation(formats strfmt.Registry) error { + + if swag.IsZero(m.MaximumStorageAllocation) { // not required + return nil + } + + if m.MaximumStorageAllocation != nil { + if err := m.MaximumStorageAllocation.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("maximumStorageAllocation") + } + return err + } + } + + return nil +} + +func (m *StoragePoolsCapacity) validateStoragePoolsCapacity(formats strfmt.Registry) error { + + if swag.IsZero(m.StoragePoolsCapacity) { // not required + return nil + } + + for i := 0; i < len(m.StoragePoolsCapacity); i++ { + if swag.IsZero(m.StoragePoolsCapacity[i]) { // not required + continue + } + + if m.StoragePoolsCapacity[i] != nil { + if err := m.StoragePoolsCapacity[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("storagePoolsCapacity" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *StoragePoolsCapacity) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *StoragePoolsCapacity) UnmarshalBinary(b []byte) error { + var res StoragePoolsCapacity + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/storage_type.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/storage_type.go new file mode 100644 index 00000000000..1f709a93d08 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/storage_type.go @@ -0,0 +1,109 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// StorageType Storage type detail +// swagger:model StorageType +type StorageType struct { + + // Identifies if the storage type is the default for a region + Default bool `json:"default,omitempty"` + + // Description, storage type label + Description string `json:"description,omitempty"` + + // State of the storage type (active or inactive) + // Enum: [active inactive] + State *string `json:"state,omitempty"` + + // Storage type + Type string `json:"type,omitempty"` +} + +// Validate validates this storage type +func (m *StorageType) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateState(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +var storageTypeTypeStatePropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["active","inactive"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + storageTypeTypeStatePropEnum = append(storageTypeTypeStatePropEnum, v) + } +} + +const ( + + // StorageTypeStateActive captures enum value "active" + StorageTypeStateActive string = "active" + + // StorageTypeStateInactive captures enum value "inactive" + StorageTypeStateInactive string = "inactive" +) + +// prop value enum +func (m *StorageType) validateStateEnum(path, location string, value string) error { + if err := validate.Enum(path, location, value, storageTypeTypeStatePropEnum); err != nil { + return err + } + return nil +} + +func (m *StorageType) validateState(formats strfmt.Registry) error { + + if swag.IsZero(m.State) { // not required + return nil + } + + // value enum + if err := m.validateStateEnum("state", "body", *m.State); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *StorageType) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *StorageType) UnmarshalBinary(b []byte) error { + var res StorageType + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/storage_type_capacity.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/storage_type_capacity.go new file mode 100644 index 00000000000..899d9d3891b --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/storage_type_capacity.go @@ -0,0 +1,108 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "strconv" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" +) + +// StorageTypeCapacity Storage type capacity +// swagger:model StorageTypeCapacity +type StorageTypeCapacity struct { + + // maximum storage allocation + MaximumStorageAllocation *MaximumStorageAllocation `json:"maximumStorageAllocation,omitempty"` + + // List of storage pool capacity for storage type + StoragePoolsCapacity []*StoragePoolCapacity `json:"storagePoolsCapacity"` + + // Storage type + StorageType string `json:"storageType,omitempty"` +} + +// Validate validates this storage type capacity +func (m *StorageTypeCapacity) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateMaximumStorageAllocation(formats); err != nil { + res = append(res, err) + } + + if err := m.validateStoragePoolsCapacity(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *StorageTypeCapacity) validateMaximumStorageAllocation(formats strfmt.Registry) error { + + if swag.IsZero(m.MaximumStorageAllocation) { // not required + return nil + } + + if m.MaximumStorageAllocation != nil { + if err := m.MaximumStorageAllocation.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("maximumStorageAllocation") + } + return err + } + } + + return nil +} + +func (m *StorageTypeCapacity) validateStoragePoolsCapacity(formats strfmt.Registry) error { + + if swag.IsZero(m.StoragePoolsCapacity) { // not required + return nil + } + + for i := 0; i < len(m.StoragePoolsCapacity); i++ { + if swag.IsZero(m.StoragePoolsCapacity[i]) { // not required + continue + } + + if m.StoragePoolsCapacity[i] != nil { + if err := m.StoragePoolsCapacity[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("storagePoolsCapacity" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *StorageTypeCapacity) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *StorageTypeCapacity) UnmarshalBinary(b []byte) error { + var res StorageTypeCapacity + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/storage_types.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/storage_types.go new file mode 100644 index 00000000000..3801a77aac7 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/storage_types.go @@ -0,0 +1,42 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/validate" +) + +// StorageTypes A map of an array of storage types supported in a region +// swagger:model StorageTypes +type StorageTypes map[string]RegionStorageTypes + +// Validate validates this storage types +func (m StorageTypes) Validate(formats strfmt.Registry) error { + var res []error + + for k := range m { + + if err := validate.Required(k, "body", m[k]); err != nil { + return err + } + + if err := m[k].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(k) + } + return err + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/storage_types_capacity.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/storage_types_capacity.go new file mode 100644 index 00000000000..79040673818 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/storage_types_capacity.go @@ -0,0 +1,105 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "strconv" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" +) + +// StorageTypesCapacity Storage types capacity +// swagger:model StorageTypesCapacity +type StorageTypesCapacity struct { + + // maximum storage allocation + MaximumStorageAllocation *MaximumStorageAllocation `json:"maximumStorageAllocation,omitempty"` + + // storage types capacity + StorageTypesCapacity []*StorageTypeCapacity `json:"storageTypesCapacity"` +} + +// Validate validates this storage types capacity +func (m *StorageTypesCapacity) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateMaximumStorageAllocation(formats); err != nil { + res = append(res, err) + } + + if err := m.validateStorageTypesCapacity(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *StorageTypesCapacity) validateMaximumStorageAllocation(formats strfmt.Registry) error { + + if swag.IsZero(m.MaximumStorageAllocation) { // not required + return nil + } + + if m.MaximumStorageAllocation != nil { + if err := m.MaximumStorageAllocation.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("maximumStorageAllocation") + } + return err + } + } + + return nil +} + +func (m *StorageTypesCapacity) validateStorageTypesCapacity(formats strfmt.Registry) error { + + if swag.IsZero(m.StorageTypesCapacity) { // not required + return nil + } + + for i := 0; i < len(m.StorageTypesCapacity); i++ { + if swag.IsZero(m.StorageTypesCapacity[i]) { // not required + continue + } + + if m.StorageTypesCapacity[i] != nil { + if err := m.StorageTypesCapacity[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("storageTypesCapacity" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *StorageTypesCapacity) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *StorageTypesCapacity) UnmarshalBinary(b []byte) error { + var res StorageTypesCapacity + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/system.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/system.go new file mode 100644 index 00000000000..c1dc2c218a6 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/system.go @@ -0,0 +1,84 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// System system +// swagger:model System +type System struct { + + // The host available Processor units + // Required: true + Cores *float64 `json:"cores"` + + // The host identifier + ID int64 `json:"id,omitempty"` + + // The host available RAM memory in GiB + // Required: true + Memory *int64 `json:"memory"` +} + +// Validate validates this system +func (m *System) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateCores(formats); err != nil { + res = append(res, err) + } + + if err := m.validateMemory(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *System) validateCores(formats strfmt.Registry) error { + + if err := validate.Required("cores", "body", m.Cores); err != nil { + return err + } + + return nil +} + +func (m *System) validateMemory(formats strfmt.Registry) error { + + if err := validate.Required("memory", "body", m.Memory); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *System) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *System) UnmarshalBinary(b []byte) error { + var res System + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/system_pool.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/system_pool.go new file mode 100644 index 00000000000..c9ae131108f --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/system_pool.go @@ -0,0 +1,211 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "strconv" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" +) + +// SystemPool Hardware platform detailing its limits and statistics +// swagger:model SystemPool +type SystemPool struct { + + // Advertised capacity cores and memory (GB) + Capacity *System `json:"capacity,omitempty"` + + // Processor to Memory (GB) Ratio + CoreMemoryRatio float64 `json:"coreMemoryRatio,omitempty"` + + // Maximum configurable cores and memory (GB) (aggregated from all hosts) + MaxAvailable *System `json:"maxAvailable,omitempty"` + + // Maximum configurable cores available combined with available memory of that host + MaxCoresAvailable *System `json:"maxCoresAvailable,omitempty"` + + // Maximum configurable memory available combined with available cores of that host + MaxMemoryAvailable *System `json:"maxMemoryAvailable,omitempty"` + + // min-max-default allocation percentage of shared core per vCPU + SharedCoreRatio *MinMaxDefault `json:"sharedCoreRatio,omitempty"` + + // The DataCenter list of servers and their available resources + Systems []*System `json:"systems"` + + // Type of system hardware + Type string `json:"type,omitempty"` +} + +// Validate validates this system pool +func (m *SystemPool) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateCapacity(formats); err != nil { + res = append(res, err) + } + + if err := m.validateMaxAvailable(formats); err != nil { + res = append(res, err) + } + + if err := m.validateMaxCoresAvailable(formats); err != nil { + res = append(res, err) + } + + if err := m.validateMaxMemoryAvailable(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSharedCoreRatio(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSystems(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *SystemPool) validateCapacity(formats strfmt.Registry) error { + + if swag.IsZero(m.Capacity) { // not required + return nil + } + + if m.Capacity != nil { + if err := m.Capacity.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("capacity") + } + return err + } + } + + return nil +} + +func (m *SystemPool) validateMaxAvailable(formats strfmt.Registry) error { + + if swag.IsZero(m.MaxAvailable) { // not required + return nil + } + + if m.MaxAvailable != nil { + if err := m.MaxAvailable.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("maxAvailable") + } + return err + } + } + + return nil +} + +func (m *SystemPool) validateMaxCoresAvailable(formats strfmt.Registry) error { + + if swag.IsZero(m.MaxCoresAvailable) { // not required + return nil + } + + if m.MaxCoresAvailable != nil { + if err := m.MaxCoresAvailable.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("maxCoresAvailable") + } + return err + } + } + + return nil +} + +func (m *SystemPool) validateMaxMemoryAvailable(formats strfmt.Registry) error { + + if swag.IsZero(m.MaxMemoryAvailable) { // not required + return nil + } + + if m.MaxMemoryAvailable != nil { + if err := m.MaxMemoryAvailable.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("maxMemoryAvailable") + } + return err + } + } + + return nil +} + +func (m *SystemPool) validateSharedCoreRatio(formats strfmt.Registry) error { + + if swag.IsZero(m.SharedCoreRatio) { // not required + return nil + } + + if m.SharedCoreRatio != nil { + if err := m.SharedCoreRatio.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("sharedCoreRatio") + } + return err + } + } + + return nil +} + +func (m *SystemPool) validateSystems(formats strfmt.Registry) error { + + if swag.IsZero(m.Systems) { // not required + return nil + } + + for i := 0; i < len(m.Systems); i++ { + if swag.IsZero(m.Systems[i]) { // not required + continue + } + + if m.Systems[i] != nil { + if err := m.Systems[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("systems" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *SystemPool) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *SystemPool) UnmarshalBinary(b []byte) error { + var res SystemPool + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/system_pools.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/system_pools.go new file mode 100644 index 00000000000..c12cde48f6d --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/system_pools.go @@ -0,0 +1,40 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/validate" +) + +// SystemPools List of available system pools within a particular DataCenter +// swagger:model SystemPools +type SystemPools map[string]SystemPool + +// Validate validates this system pools +func (m SystemPools) Validate(formats strfmt.Registry) error { + var res []error + + for k := range m { + + if err := validate.Required(k, "body", m[k]); err != nil { + return err + } + if val, ok := m[k]; ok { + if err := val.Validate(formats); err != nil { + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/task.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/task.go new file mode 100644 index 00000000000..d5c4cf212da --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/task.go @@ -0,0 +1,210 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// Task task +// swagger:model Task +type Task struct { + + // Cloud Instance ID of task owner + // Required: true + CloudInstanceID *string `json:"cloudInstanceID"` + + // the component id of the task + // Required: true + ComponentID *string `json:"componentID"` + + // the component type of the task + // Required: true + ComponentType *string `json:"componentType"` + + // Creation Date + // Required: true + // Format: date-time + CreationDate *strfmt.DateTime `json:"creationDate"` + + // Last Update Date + // Required: true + // Format: date-time + LastUpdateDate *strfmt.DateTime `json:"lastUpdateDate"` + + // Task Operation + // Required: true + Operation *string `json:"operation"` + + // status code of the task + // Required: true + Status *string `json:"status"` + + // status detail of the task + // Required: true + StatusDetail *string `json:"statusDetail"` + + // Pcloud Task ID + // Required: true + TaskID *string `json:"taskID"` +} + +// Validate validates this task +func (m *Task) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateCloudInstanceID(formats); err != nil { + res = append(res, err) + } + + if err := m.validateComponentID(formats); err != nil { + res = append(res, err) + } + + if err := m.validateComponentType(formats); err != nil { + res = append(res, err) + } + + if err := m.validateCreationDate(formats); err != nil { + res = append(res, err) + } + + if err := m.validateLastUpdateDate(formats); err != nil { + res = append(res, err) + } + + if err := m.validateOperation(formats); err != nil { + res = append(res, err) + } + + if err := m.validateStatus(formats); err != nil { + res = append(res, err) + } + + if err := m.validateStatusDetail(formats); err != nil { + res = append(res, err) + } + + if err := m.validateTaskID(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Task) validateCloudInstanceID(formats strfmt.Registry) error { + + if err := validate.Required("cloudInstanceID", "body", m.CloudInstanceID); err != nil { + return err + } + + return nil +} + +func (m *Task) validateComponentID(formats strfmt.Registry) error { + + if err := validate.Required("componentID", "body", m.ComponentID); err != nil { + return err + } + + return nil +} + +func (m *Task) validateComponentType(formats strfmt.Registry) error { + + if err := validate.Required("componentType", "body", m.ComponentType); err != nil { + return err + } + + return nil +} + +func (m *Task) validateCreationDate(formats strfmt.Registry) error { + + if err := validate.Required("creationDate", "body", m.CreationDate); err != nil { + return err + } + + if err := validate.FormatOf("creationDate", "body", "date-time", m.CreationDate.String(), formats); err != nil { + return err + } + + return nil +} + +func (m *Task) validateLastUpdateDate(formats strfmt.Registry) error { + + if err := validate.Required("lastUpdateDate", "body", m.LastUpdateDate); err != nil { + return err + } + + if err := validate.FormatOf("lastUpdateDate", "body", "date-time", m.LastUpdateDate.String(), formats); err != nil { + return err + } + + return nil +} + +func (m *Task) validateOperation(formats strfmt.Registry) error { + + if err := validate.Required("operation", "body", m.Operation); err != nil { + return err + } + + return nil +} + +func (m *Task) validateStatus(formats strfmt.Registry) error { + + if err := validate.Required("status", "body", m.Status); err != nil { + return err + } + + return nil +} + +func (m *Task) validateStatusDetail(formats strfmt.Registry) error { + + if err := validate.Required("statusDetail", "body", m.StatusDetail); err != nil { + return err + } + + return nil +} + +func (m *Task) validateTaskID(formats strfmt.Registry) error { + + if err := validate.Required("taskID", "body", m.TaskID); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *Task) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Task) UnmarshalBinary(b []byte) error { + var res Task + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/task_reference.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/task_reference.go new file mode 100644 index 00000000000..0f7b85fcc7f --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/task_reference.go @@ -0,0 +1,81 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// TaskReference task reference +// swagger:model TaskReference +type TaskReference struct { + + // Link to Task resource + // Required: true + Href *string `json:"href"` + + // ID of Task used to get status of long running operation + // Required: true + TaskID *string `json:"taskID"` +} + +// Validate validates this task reference +func (m *TaskReference) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateHref(formats); err != nil { + res = append(res, err) + } + + if err := m.validateTaskID(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *TaskReference) validateHref(formats strfmt.Registry) error { + + if err := validate.Required("href", "body", m.Href); err != nil { + return err + } + + return nil +} + +func (m *TaskReference) validateTaskID(formats strfmt.Registry) error { + + if err := validate.Required("taskID", "body", m.TaskID); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *TaskReference) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *TaskReference) UnmarshalBinary(b []byte) error { + var res TaskReference + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/tenant.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/tenant.go new file mode 100644 index 00000000000..df8acb6e93d --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/tenant.go @@ -0,0 +1,205 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "strconv" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// Tenant tenant +// swagger:model Tenant +type Tenant struct { + + // Cloud Instances owned by the Tenant + // Required: true + CloudInstances []*CloudInstanceReference `json:"cloudInstances"` + + // Date of Tenant creation + // Required: true + // Format: date-time + CreationDate *strfmt.DateTime `json:"creationDate"` + + // Indicates if the tenant is enabled + // Required: true + Enabled *bool `json:"enabled"` + + // IBM Customer Number + Icn string `json:"icn,omitempty"` + + // Peering Network Information (optional) + PeeringNetworks []*PeeringNetwork `json:"peeringNetworks,omitempty"` + + // Tenant SSH Keys + SSHKeys []*SSHKey `json:"sshKeys"` + + // Tenant ID + // Required: true + TenantID *string `json:"tenantID"` +} + +// Validate validates this tenant +func (m *Tenant) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateCloudInstances(formats); err != nil { + res = append(res, err) + } + + if err := m.validateCreationDate(formats); err != nil { + res = append(res, err) + } + + if err := m.validateEnabled(formats); err != nil { + res = append(res, err) + } + + if err := m.validatePeeringNetworks(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSSHKeys(formats); err != nil { + res = append(res, err) + } + + if err := m.validateTenantID(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Tenant) validateCloudInstances(formats strfmt.Registry) error { + + if err := validate.Required("cloudInstances", "body", m.CloudInstances); err != nil { + return err + } + + for i := 0; i < len(m.CloudInstances); i++ { + if swag.IsZero(m.CloudInstances[i]) { // not required + continue + } + + if m.CloudInstances[i] != nil { + if err := m.CloudInstances[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("cloudInstances" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *Tenant) validateCreationDate(formats strfmt.Registry) error { + + if err := validate.Required("creationDate", "body", m.CreationDate); err != nil { + return err + } + + if err := validate.FormatOf("creationDate", "body", "date-time", m.CreationDate.String(), formats); err != nil { + return err + } + + return nil +} + +func (m *Tenant) validateEnabled(formats strfmt.Registry) error { + + if err := validate.Required("enabled", "body", m.Enabled); err != nil { + return err + } + + return nil +} + +func (m *Tenant) validatePeeringNetworks(formats strfmt.Registry) error { + + if swag.IsZero(m.PeeringNetworks) { // not required + return nil + } + + for i := 0; i < len(m.PeeringNetworks); i++ { + if swag.IsZero(m.PeeringNetworks[i]) { // not required + continue + } + + if m.PeeringNetworks[i] != nil { + if err := m.PeeringNetworks[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("peeringNetworks" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *Tenant) validateSSHKeys(formats strfmt.Registry) error { + + if swag.IsZero(m.SSHKeys) { // not required + return nil + } + + for i := 0; i < len(m.SSHKeys); i++ { + if swag.IsZero(m.SSHKeys[i]) { // not required + continue + } + + if m.SSHKeys[i] != nil { + if err := m.SSHKeys[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("sshKeys" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *Tenant) validateTenantID(formats strfmt.Registry) error { + + if err := validate.Required("tenantID", "body", m.TenantID); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *Tenant) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Tenant) UnmarshalBinary(b []byte) error { + var res Tenant + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/tenant_create.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/tenant_create.go new file mode 100644 index 00000000000..65386d9c569 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/tenant_create.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "strconv" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// TenantCreate tenant create +// swagger:model TenantCreate +type TenantCreate struct { + + // Billing account ID + BillingAccountID string `json:"billingAccountID,omitempty"` + + // Entitlement ID + EntitlementID string `json:"entitlementID,omitempty"` + + // Tenant SSH Keys + SSHKeys []*SSHKey `json:"sshKeys"` + + // Tenant ID + // Required: true + TenantID *string `json:"tenantID"` +} + +// Validate validates this tenant create +func (m *TenantCreate) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateSSHKeys(formats); err != nil { + res = append(res, err) + } + + if err := m.validateTenantID(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *TenantCreate) validateSSHKeys(formats strfmt.Registry) error { + + if swag.IsZero(m.SSHKeys) { // not required + return nil + } + + for i := 0; i < len(m.SSHKeys); i++ { + if swag.IsZero(m.SSHKeys[i]) { // not required + continue + } + + if m.SSHKeys[i] != nil { + if err := m.SSHKeys[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("sshKeys" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *TenantCreate) validateTenantID(formats strfmt.Registry) error { + + if err := validate.Required("tenantID", "body", m.TenantID); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *TenantCreate) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *TenantCreate) UnmarshalBinary(b []byte) error { + var res TenantCreate + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/tenant_update.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/tenant_update.go new file mode 100644 index 00000000000..25edd294ba5 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/tenant_update.go @@ -0,0 +1,83 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "strconv" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" +) + +// TenantUpdate tenant update +// swagger:model TenantUpdate +type TenantUpdate struct { + + // IBM Customer Number + Icn *string `json:"icn,omitempty"` + + // Peering Network Information (optional) + PeeringNetworks []*PeeringNetwork `json:"peeringNetworks"` +} + +// Validate validates this tenant update +func (m *TenantUpdate) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validatePeeringNetworks(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *TenantUpdate) validatePeeringNetworks(formats strfmt.Registry) error { + + if swag.IsZero(m.PeeringNetworks) { // not required + return nil + } + + for i := 0; i < len(m.PeeringNetworks); i++ { + if swag.IsZero(m.PeeringNetworks[i]) { // not required + continue + } + + if m.PeeringNetworks[i] != nil { + if err := m.PeeringNetworks[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("peeringNetworks" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *TenantUpdate) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *TenantUpdate) UnmarshalBinary(b []byte) error { + var res TenantUpdate + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/token.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/token.go new file mode 100644 index 00000000000..70fe73bc842 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/token.go @@ -0,0 +1,120 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// Token token +// swagger:model Token +type Token struct { + + // AccessToken + // Required: true + AccessToken *string `json:"accessToken"` + + // Token Expires + // Required: true + // Format: date-time + Expires *strfmt.DateTime `json:"expires"` + + // Refresh Token + // Required: true + RefreshToken *string `json:"refreshToken"` + + // Token Type + // Required: true + Type *string `json:"type"` +} + +// Validate validates this token +func (m *Token) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAccessToken(formats); err != nil { + res = append(res, err) + } + + if err := m.validateExpires(formats); err != nil { + res = append(res, err) + } + + if err := m.validateRefreshToken(formats); err != nil { + res = append(res, err) + } + + if err := m.validateType(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Token) validateAccessToken(formats strfmt.Registry) error { + + if err := validate.Required("accessToken", "body", m.AccessToken); err != nil { + return err + } + + return nil +} + +func (m *Token) validateExpires(formats strfmt.Registry) error { + + if err := validate.Required("expires", "body", m.Expires); err != nil { + return err + } + + if err := validate.FormatOf("expires", "body", "date-time", m.Expires.String(), formats); err != nil { + return err + } + + return nil +} + +func (m *Token) validateRefreshToken(formats strfmt.Registry) error { + + if err := validate.Required("refreshToken", "body", m.RefreshToken); err != nil { + return err + } + + return nil +} + +func (m *Token) validateType(formats strfmt.Registry) error { + + if err := validate.Required("type", "body", m.Type); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *Token) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Token) UnmarshalBinary(b []byte) error { + var res Token + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/token_extra.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/token_extra.go new file mode 100644 index 00000000000..d8f6b64f727 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/token_extra.go @@ -0,0 +1,129 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// TokenExtra token extra +// swagger:model TokenExtra +type TokenExtra struct { + + // Number of seconds token will expire + // Required: true + ExpiresIn *float64 `json:"expiresIn"` + + // Time on the service broker + // Required: true + // Format: date-time + ServerTime *strfmt.DateTime `json:"serverTime"` + + // OAuth Token + // Required: true + Token *Token `json:"token"` + + // Is this token valid + // Required: true + Valid *bool `json:"valid"` +} + +// Validate validates this token extra +func (m *TokenExtra) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateExpiresIn(formats); err != nil { + res = append(res, err) + } + + if err := m.validateServerTime(formats); err != nil { + res = append(res, err) + } + + if err := m.validateToken(formats); err != nil { + res = append(res, err) + } + + if err := m.validateValid(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *TokenExtra) validateExpiresIn(formats strfmt.Registry) error { + + if err := validate.Required("expiresIn", "body", m.ExpiresIn); err != nil { + return err + } + + return nil +} + +func (m *TokenExtra) validateServerTime(formats strfmt.Registry) error { + + if err := validate.Required("serverTime", "body", m.ServerTime); err != nil { + return err + } + + if err := validate.FormatOf("serverTime", "body", "date-time", m.ServerTime.String(), formats); err != nil { + return err + } + + return nil +} + +func (m *TokenExtra) validateToken(formats strfmt.Registry) error { + + if err := validate.Required("token", "body", m.Token); err != nil { + return err + } + + if m.Token != nil { + if err := m.Token.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("token") + } + return err + } + } + + return nil +} + +func (m *TokenExtra) validateValid(formats strfmt.Registry) error { + + if err := validate.Required("valid", "body", m.Valid); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *TokenExtra) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *TokenExtra) UnmarshalBinary(b []byte) error { + var res TokenExtra + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/token_request.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/token_request.go new file mode 100644 index 00000000000..e622f512c54 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/token_request.go @@ -0,0 +1,118 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// TokenRequest token request +// swagger:model TokenRequest +type TokenRequest struct { + + // The refresh token to request the new Access Token + // Required: true + RefreshToken *string `json:"refreshToken"` + + // Source type of the token request (web or cli) + // Required: true + // Enum: [web cli] + Source *string `json:"source"` +} + +// Validate validates this token request +func (m *TokenRequest) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateRefreshToken(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSource(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *TokenRequest) validateRefreshToken(formats strfmt.Registry) error { + + if err := validate.Required("refreshToken", "body", m.RefreshToken); err != nil { + return err + } + + return nil +} + +var tokenRequestTypeSourcePropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["web","cli"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + tokenRequestTypeSourcePropEnum = append(tokenRequestTypeSourcePropEnum, v) + } +} + +const ( + + // TokenRequestSourceWeb captures enum value "web" + TokenRequestSourceWeb string = "web" + + // TokenRequestSourceCli captures enum value "cli" + TokenRequestSourceCli string = "cli" +) + +// prop value enum +func (m *TokenRequest) validateSourceEnum(path, location string, value string) error { + if err := validate.Enum(path, location, value, tokenRequestTypeSourcePropEnum); err != nil { + return err + } + return nil +} + +func (m *TokenRequest) validateSource(formats strfmt.Registry) error { + + if err := validate.Required("source", "body", m.Source); err != nil { + return err + } + + // value enum + if err := m.validateSourceEnum("source", "body", *m.Source); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *TokenRequest) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *TokenRequest) UnmarshalBinary(b []byte) error { + var res TokenRequest + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/update_volume.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/update_volume.go new file mode 100644 index 00000000000..ab44cd60bcd --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/update_volume.go @@ -0,0 +1,52 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/swag" +) + +// UpdateVolume update volume +// swagger:model UpdateVolume +type UpdateVolume struct { + + // Indicates if the volume is boot capable + Bootable *bool `json:"bootable,omitempty"` + + // Name + Name *string `json:"name,omitempty"` + + // Indicates if the volume is shareable between VMs + Shareable *bool `json:"shareable,omitempty"` + + // New Volume size + Size float64 `json:"size,omitempty"` +} + +// Validate validates this update volume +func (m *UpdateVolume) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *UpdateVolume) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *UpdateVolume) UnmarshalBinary(b []byte) error { + var res UpdateVolume + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/user_info.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/user_info.go new file mode 100644 index 00000000000..d343271d96c --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/user_info.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// UserInfo user info +// swagger:model UserInfo +type UserInfo struct { + + // User Email + // Required: true + Email *string `json:"email"` + + // User ID + // Required: true + ID *string `json:"id"` + + // User Image URL + ImageURL string `json:"imageURL,omitempty"` + + // Member of the following tenants + MemberOf []string `json:"memberOf"` + + // User Name + // Required: true + Name *string `json:"name"` +} + +// Validate validates this user info +func (m *UserInfo) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateEmail(formats); err != nil { + res = append(res, err) + } + + if err := m.validateID(formats); err != nil { + res = append(res, err) + } + + if err := m.validateName(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *UserInfo) validateEmail(formats strfmt.Registry) error { + + if err := validate.Required("email", "body", m.Email); err != nil { + return err + } + + return nil +} + +func (m *UserInfo) validateID(formats strfmt.Registry) error { + + if err := validate.Required("id", "body", m.ID); err != nil { + return err + } + + return nil +} + +func (m *UserInfo) validateName(formats strfmt.Registry) error { + + if err := validate.Required("name", "body", m.Name); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *UserInfo) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *UserInfo) UnmarshalBinary(b []byte) error { + var res UserInfo + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/version.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/version.go new file mode 100644 index 00000000000..0d57619e3cd --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/version.go @@ -0,0 +1,52 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/swag" +) + +// Version version +// swagger:model Version +type Version struct { + + // Returns the build time + BuildDate string `json:"buildDate,omitempty"` + + // Returns the current go runtime version + GoVersion string `json:"goVersion,omitempty"` + + // Hostname of the responding system + Hostname string `json:"hostname,omitempty"` + + // Returns the git versioning information + Version string `json:"version,omitempty"` +} + +// Validate validates this version +func (m *Version) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *Version) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Version) UnmarshalBinary(b []byte) error { + var res Version + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/virtual_cores.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/virtual_cores.go new file mode 100644 index 00000000000..7ce4fc20363 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/virtual_cores.go @@ -0,0 +1,75 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// VirtualCores virtual cores +// swagger:model VirtualCores +type VirtualCores struct { + + // The active virtual Cores + // Required: true + // Minimum: 1 + Assigned *int64 `json:"assigned"` + + // The maximum DLPAR range for virtual Cores (Display only support) + Max int64 `json:"max,omitempty"` + + // The minimum DLPAR range for virtual Cores (Display only support) + Min int64 `json:"min,omitempty"` +} + +// Validate validates this virtual cores +func (m *VirtualCores) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAssigned(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *VirtualCores) validateAssigned(formats strfmt.Registry) error { + + if err := validate.Required("assigned", "body", m.Assigned); err != nil { + return err + } + + if err := validate.MinimumInt("assigned", "body", int64(*m.Assigned), 1, false); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *VirtualCores) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *VirtualCores) UnmarshalBinary(b []byte) error { + var res VirtualCores + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/volume.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/volume.go new file mode 100644 index 00000000000..cd842d71edc --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/volume.go @@ -0,0 +1,169 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// Volume volume +// swagger:model Volume +type Volume struct { + + // Indicates if the volume is the server's boot volume + BootVolume *bool `json:"bootVolume,omitempty"` + + // Indicates if the volume is boot capable + Bootable *bool `json:"bootable,omitempty"` + + // Creation Date + // Required: true + // Format: date-time + CreationDate *strfmt.DateTime `json:"creationDate"` + + // Indicates if the volume should be deleted when the server terminates + DeleteOnTermination *bool `json:"deleteOnTermination,omitempty"` + + // Type of Disk + DiskType string `json:"diskType,omitempty"` + + // Last Update Date + // Required: true + // Format: date-time + LastUpdateDate *strfmt.DateTime `json:"lastUpdateDate"` + + // Volume Name + // Required: true + Name *string `json:"name"` + + // List of PCloud PVM Instance attached to the volume + PvmInstanceIds []string `json:"pvmInstanceIDs"` + + // Indicates if the volume is shareable between VMs + Shareable *bool `json:"shareable,omitempty"` + + // Volume Size + // Required: true + Size *float64 `json:"size"` + + // Volume State + State string `json:"state,omitempty"` + + // Volume ID + // Required: true + VolumeID *string `json:"volumeID"` + + // Volume type, name of storage template used to create the volume + VolumeType string `json:"volumeType,omitempty"` + + // Volume world wide name + Wwn string `json:"wwn,omitempty"` +} + +// Validate validates this volume +func (m *Volume) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateCreationDate(formats); err != nil { + res = append(res, err) + } + + if err := m.validateLastUpdateDate(formats); err != nil { + res = append(res, err) + } + + if err := m.validateName(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSize(formats); err != nil { + res = append(res, err) + } + + if err := m.validateVolumeID(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Volume) validateCreationDate(formats strfmt.Registry) error { + + if err := validate.Required("creationDate", "body", m.CreationDate); err != nil { + return err + } + + if err := validate.FormatOf("creationDate", "body", "date-time", m.CreationDate.String(), formats); err != nil { + return err + } + + return nil +} + +func (m *Volume) validateLastUpdateDate(formats strfmt.Registry) error { + + if err := validate.Required("lastUpdateDate", "body", m.LastUpdateDate); err != nil { + return err + } + + if err := validate.FormatOf("lastUpdateDate", "body", "date-time", m.LastUpdateDate.String(), formats); err != nil { + return err + } + + return nil +} + +func (m *Volume) validateName(formats strfmt.Registry) error { + + if err := validate.Required("name", "body", m.Name); err != nil { + return err + } + + return nil +} + +func (m *Volume) validateSize(formats strfmt.Registry) error { + + if err := validate.Required("size", "body", m.Size); err != nil { + return err + } + + return nil +} + +func (m *Volume) validateVolumeID(formats strfmt.Registry) error { + + if err := validate.Required("volumeID", "body", m.VolumeID); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *Volume) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Volume) UnmarshalBinary(b []byte) error { + var res Volume + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/volume_info.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/volume_info.go new file mode 100644 index 00000000000..b339b5feb80 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/volume_info.go @@ -0,0 +1,46 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/swag" +) + +// VolumeInfo volume info +// swagger:model VolumeInfo +type VolumeInfo struct { + + // Name of the volume + Name string `json:"name,omitempty"` + + // ID of the volume + VolumeID string `json:"volumeID,omitempty"` +} + +// Validate validates this volume info +func (m *VolumeInfo) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *VolumeInfo) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *VolumeInfo) UnmarshalBinary(b []byte) error { + var res VolumeInfo + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/volume_reference.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/volume_reference.go new file mode 100644 index 00000000000..ae20e65e375 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/volume_reference.go @@ -0,0 +1,256 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// VolumeReference volume reference +// swagger:model VolumeReference +type VolumeReference struct { + + // Indicates if the volume is the server's boot volume + BootVolume *bool `json:"bootVolume,omitempty"` + + // Indicates if the volume is boot capable + // Required: true + Bootable *bool `json:"bootable"` + + // Creation Date + // Required: true + // Format: date-time + CreationDate *strfmt.DateTime `json:"creationDate"` + + // Indicates if the volume should be deleted when the server terminates + DeleteOnTermination *bool `json:"deleteOnTermination,omitempty"` + + // Type of Disk + // Required: true + DiskType *string `json:"diskType"` + + // Link to Volume resource + // Required: true + Href *string `json:"href"` + + // Last Update Date + // Required: true + // Format: date-time + LastUpdateDate *strfmt.DateTime `json:"lastUpdateDate"` + + // Volume Name + // Required: true + Name *string `json:"name"` + + // List of PCloud PVM Instance attached to the volume + PvmInstanceIds []string `json:"pvmInstanceIDs"` + + // Indicates if the volume is shareable between VMs + // Required: true + Shareable *bool `json:"shareable"` + + // Volume Size + // Required: true + Size *float64 `json:"size"` + + // Volume State + // Required: true + State *string `json:"state"` + + // Volume ID + // Required: true + VolumeID *string `json:"volumeID"` + + // Volume type, name of storage template used to create the volume + VolumeType string `json:"volumeType,omitempty"` + + // Volume world wide name + // Required: true + Wwn *string `json:"wwn"` +} + +// Validate validates this volume reference +func (m *VolumeReference) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateBootable(formats); err != nil { + res = append(res, err) + } + + if err := m.validateCreationDate(formats); err != nil { + res = append(res, err) + } + + if err := m.validateDiskType(formats); err != nil { + res = append(res, err) + } + + if err := m.validateHref(formats); err != nil { + res = append(res, err) + } + + if err := m.validateLastUpdateDate(formats); err != nil { + res = append(res, err) + } + + if err := m.validateName(formats); err != nil { + res = append(res, err) + } + + if err := m.validateShareable(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSize(formats); err != nil { + res = append(res, err) + } + + if err := m.validateState(formats); err != nil { + res = append(res, err) + } + + if err := m.validateVolumeID(formats); err != nil { + res = append(res, err) + } + + if err := m.validateWwn(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *VolumeReference) validateBootable(formats strfmt.Registry) error { + + if err := validate.Required("bootable", "body", m.Bootable); err != nil { + return err + } + + return nil +} + +func (m *VolumeReference) validateCreationDate(formats strfmt.Registry) error { + + if err := validate.Required("creationDate", "body", m.CreationDate); err != nil { + return err + } + + if err := validate.FormatOf("creationDate", "body", "date-time", m.CreationDate.String(), formats); err != nil { + return err + } + + return nil +} + +func (m *VolumeReference) validateDiskType(formats strfmt.Registry) error { + + if err := validate.Required("diskType", "body", m.DiskType); err != nil { + return err + } + + return nil +} + +func (m *VolumeReference) validateHref(formats strfmt.Registry) error { + + if err := validate.Required("href", "body", m.Href); err != nil { + return err + } + + return nil +} + +func (m *VolumeReference) validateLastUpdateDate(formats strfmt.Registry) error { + + if err := validate.Required("lastUpdateDate", "body", m.LastUpdateDate); err != nil { + return err + } + + if err := validate.FormatOf("lastUpdateDate", "body", "date-time", m.LastUpdateDate.String(), formats); err != nil { + return err + } + + return nil +} + +func (m *VolumeReference) validateName(formats strfmt.Registry) error { + + if err := validate.Required("name", "body", m.Name); err != nil { + return err + } + + return nil +} + +func (m *VolumeReference) validateShareable(formats strfmt.Registry) error { + + if err := validate.Required("shareable", "body", m.Shareable); err != nil { + return err + } + + return nil +} + +func (m *VolumeReference) validateSize(formats strfmt.Registry) error { + + if err := validate.Required("size", "body", m.Size); err != nil { + return err + } + + return nil +} + +func (m *VolumeReference) validateState(formats strfmt.Registry) error { + + if err := validate.Required("state", "body", m.State); err != nil { + return err + } + + return nil +} + +func (m *VolumeReference) validateVolumeID(formats strfmt.Registry) error { + + if err := validate.Required("volumeID", "body", m.VolumeID); err != nil { + return err + } + + return nil +} + +func (m *VolumeReference) validateWwn(formats strfmt.Registry) error { + + if err := validate.Required("wwn", "body", m.Wwn); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *VolumeReference) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *VolumeReference) UnmarshalBinary(b []byte) error { + var res VolumeReference + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/volumes.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/volumes.go new file mode 100644 index 00000000000..477c6d34bd3 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/volumes.go @@ -0,0 +1,82 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "strconv" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// Volumes volumes +// swagger:model Volumes +type Volumes struct { + + // Volumes + // Required: true + Volumes []*VolumeReference `json:"volumes"` +} + +// Validate validates this volumes +func (m *Volumes) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateVolumes(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Volumes) validateVolumes(formats strfmt.Registry) error { + + if err := validate.Required("volumes", "body", m.Volumes); err != nil { + return err + } + + for i := 0; i < len(m.Volumes); i++ { + if swag.IsZero(m.Volumes[i]) { // not required + continue + } + + if m.Volumes[i] != nil { + if err := m.Volumes[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("volumes" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *Volumes) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Volumes) UnmarshalBinary(b []byte) error { + var res Volumes + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/volumes_clone.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/volumes_clone.go new file mode 100644 index 00000000000..5a57acefd89 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/volumes_clone.go @@ -0,0 +1,121 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// VolumesClone volumes clone +// swagger:model VolumesClone +type VolumesClone struct { + + // Current action performed for the volumes-clone request + Action string `json:"action,omitempty"` + + // Creation Date + // Format: date-time + CreationDate strfmt.DateTime `json:"creationDate,omitempty"` + + // Failure reason for a failed volumes-clone request + FailureMessage string `json:"failureMessage,omitempty"` + + // Last Update Date + // Format: date-time + LastUpdateDate strfmt.DateTime `json:"lastUpdateDate,omitempty"` + + // Name assigned to a volumes-clone request + Name string `json:"name,omitempty"` + + // The percent completion for the current action + // Required: true + PercentComplete *int64 `json:"percentComplete"` + + // Current status of the volumes-clone request + Status string `json:"status,omitempty"` + + // ID assigned to a volumes-clone request + VolumesCloneID string `json:"volumesCloneID,omitempty"` +} + +// Validate validates this volumes clone +func (m *VolumesClone) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateCreationDate(formats); err != nil { + res = append(res, err) + } + + if err := m.validateLastUpdateDate(formats); err != nil { + res = append(res, err) + } + + if err := m.validatePercentComplete(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *VolumesClone) validateCreationDate(formats strfmt.Registry) error { + + if swag.IsZero(m.CreationDate) { // not required + return nil + } + + if err := validate.FormatOf("creationDate", "body", "date-time", m.CreationDate.String(), formats); err != nil { + return err + } + + return nil +} + +func (m *VolumesClone) validateLastUpdateDate(formats strfmt.Registry) error { + + if swag.IsZero(m.LastUpdateDate) { // not required + return nil + } + + if err := validate.FormatOf("lastUpdateDate", "body", "date-time", m.LastUpdateDate.String(), formats); err != nil { + return err + } + + return nil +} + +func (m *VolumesClone) validatePercentComplete(formats strfmt.Registry) error { + + if err := validate.Required("percentComplete", "body", m.PercentComplete); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *VolumesClone) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *VolumesClone) UnmarshalBinary(b []byte) error { + var res VolumesClone + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/volumes_clone_async_request.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/volumes_clone_async_request.go new file mode 100644 index 00000000000..6174a967e26 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/volumes_clone_async_request.go @@ -0,0 +1,88 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// VolumesCloneAsyncRequest volumes clone async request +// swagger:model VolumesCloneAsyncRequest +type VolumesCloneAsyncRequest struct { + + // Base name of the new cloned volume(s). + // Cloned Volume names will be prefixed with 'clone-' + // and suffixed with ‘-#####’ (where ##### is a 5 digit random number) + // If multiple volumes cloned they will be further suffixed with an incremental number starting with 1. + // Example volume names using name="volume-abcdef" + // single volume clone will be named "clone-volume-abcdef-83081“ + // multi volume clone will be named "clone-volume-abcdef-73721-1”, "clone-volume-abcdef-73721-2”, ... + // + // Required: true + Name *string `json:"name"` + + // List of volumes to be cloned + // Required: true + VolumeIds []string `json:"volumeIDs"` +} + +// Validate validates this volumes clone async request +func (m *VolumesCloneAsyncRequest) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateName(formats); err != nil { + res = append(res, err) + } + + if err := m.validateVolumeIds(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *VolumesCloneAsyncRequest) validateName(formats strfmt.Registry) error { + + if err := validate.Required("name", "body", m.Name); err != nil { + return err + } + + return nil +} + +func (m *VolumesCloneAsyncRequest) validateVolumeIds(formats strfmt.Registry) error { + + if err := validate.Required("volumeIDs", "body", m.VolumeIds); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *VolumesCloneAsyncRequest) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *VolumesCloneAsyncRequest) UnmarshalBinary(b []byte) error { + var res VolumesCloneAsyncRequest + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/volumes_clone_cancel.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/volumes_clone_cancel.go new file mode 100644 index 00000000000..7922a1605c9 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/volumes_clone_cancel.go @@ -0,0 +1,45 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/swag" +) + +// VolumesCloneCancel volumes clone cancel +// swagger:model VolumesCloneCancel +type VolumesCloneCancel struct { + + // default False, Cancel will only be allowed if the status is ‘prepared’, or ‘available’ + // True, Cancel will be allowed when the status is NOT completed, cancelling, cancelled, or failed + // + Force bool `json:"force,omitempty"` +} + +// Validate validates this volumes clone cancel +func (m *VolumesCloneCancel) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *VolumesCloneCancel) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *VolumesCloneCancel) UnmarshalBinary(b []byte) error { + var res VolumesCloneCancel + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/volumes_clone_create.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/volumes_clone_create.go new file mode 100644 index 00000000000..64183ff17fa --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/volumes_clone_create.go @@ -0,0 +1,83 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// VolumesCloneCreate volumes clone create +// swagger:model VolumesCloneCreate +type VolumesCloneCreate struct { + + // Unique name within a cloud instance used to identify a volumes-clone request + // name can be used in replace of a volumesCloneID when used as a URL path parameter + // + // Required: true + Name *string `json:"name"` + + // List of volumes to be cloned + // Required: true + VolumeIds []string `json:"volumeIDs"` +} + +// Validate validates this volumes clone create +func (m *VolumesCloneCreate) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateName(formats); err != nil { + res = append(res, err) + } + + if err := m.validateVolumeIds(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *VolumesCloneCreate) validateName(formats strfmt.Registry) error { + + if err := validate.Required("name", "body", m.Name); err != nil { + return err + } + + return nil +} + +func (m *VolumesCloneCreate) validateVolumeIds(formats strfmt.Registry) error { + + if err := validate.Required("volumeIDs", "body", m.VolumeIds); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *VolumesCloneCreate) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *VolumesCloneCreate) UnmarshalBinary(b []byte) error { + var res VolumesCloneCreate + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/volumes_clone_detail.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/volumes_clone_detail.go new file mode 100644 index 00000000000..3e45cc24235 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/volumes_clone_detail.go @@ -0,0 +1,155 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "strconv" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// VolumesCloneDetail volumes clone detail +// swagger:model VolumesCloneDetail +type VolumesCloneDetail struct { + + // Current action performed for the volumes-clone request + Action string `json:"action,omitempty"` + + // List of cloned volumes created from the volumes-clone request + ClonedVolumes []*ClonedVolumeDetail `json:"clonedVolumes"` + + // Creation Date + // Format: date-time + CreationDate strfmt.DateTime `json:"creationDate,omitempty"` + + // Failure reason for a failed volumes-clone request + FailureMessage string `json:"failureMessage,omitempty"` + + // Last Update Date + // Format: date-time + LastUpdateDate strfmt.DateTime `json:"lastUpdateDate,omitempty"` + + // Name assigned to a volumes-clone request + Name string `json:"name,omitempty"` + + // The percent completion for the current action + // Required: true + PercentComplete *int64 `json:"percentComplete"` + + // Current status of the volumes-clone request + Status string `json:"status,omitempty"` + + // ID assigned to a volumes-clone request + VolumesCloneID string `json:"volumesCloneID,omitempty"` +} + +// Validate validates this volumes clone detail +func (m *VolumesCloneDetail) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateClonedVolumes(formats); err != nil { + res = append(res, err) + } + + if err := m.validateCreationDate(formats); err != nil { + res = append(res, err) + } + + if err := m.validateLastUpdateDate(formats); err != nil { + res = append(res, err) + } + + if err := m.validatePercentComplete(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *VolumesCloneDetail) validateClonedVolumes(formats strfmt.Registry) error { + + if swag.IsZero(m.ClonedVolumes) { // not required + return nil + } + + for i := 0; i < len(m.ClonedVolumes); i++ { + if swag.IsZero(m.ClonedVolumes[i]) { // not required + continue + } + + if m.ClonedVolumes[i] != nil { + if err := m.ClonedVolumes[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("clonedVolumes" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *VolumesCloneDetail) validateCreationDate(formats strfmt.Registry) error { + + if swag.IsZero(m.CreationDate) { // not required + return nil + } + + if err := validate.FormatOf("creationDate", "body", "date-time", m.CreationDate.String(), formats); err != nil { + return err + } + + return nil +} + +func (m *VolumesCloneDetail) validateLastUpdateDate(formats strfmt.Registry) error { + + if swag.IsZero(m.LastUpdateDate) { // not required + return nil + } + + if err := validate.FormatOf("lastUpdateDate", "body", "date-time", m.LastUpdateDate.String(), formats); err != nil { + return err + } + + return nil +} + +func (m *VolumesCloneDetail) validatePercentComplete(formats strfmt.Registry) error { + + if err := validate.Required("percentComplete", "body", m.PercentComplete); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *VolumesCloneDetail) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *VolumesCloneDetail) UnmarshalBinary(b []byte) error { + var res VolumesCloneDetail + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/volumes_clone_execute.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/volumes_clone_execute.go new file mode 100644 index 00000000000..14d9102b919 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/volumes_clone_execute.go @@ -0,0 +1,76 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// VolumesCloneExecute volumes clone execute +// swagger:model VolumesCloneExecute +type VolumesCloneExecute struct { + + // Base name of the new cloned volume(s). + // Cloned Volume names will be prefixed with 'clone-' + // and suffixed with ‘-#####’ (where ##### is a 5 digit random number) + // If multiple volumes cloned they will be further suffixed with an incremental number starting with 1. + // Example volume names using name="volume-abcdef" + // single volume clone will be named "clone-volume-abcdef-83081“ + // multi volume clone will be named "clone-volume-abcdef-73721-1”, "clone-volume-abcdef-73721-2”, ... + // + // Required: true + Name *string `json:"name"` + + // default False, Execute failure rolls back clone activity but leaves prepared snapshot + // True, Execute failure rolls back clone activity and removes the prepared snapshot + // + RollbackPrepare bool `json:"rollbackPrepare,omitempty"` +} + +// Validate validates this volumes clone execute +func (m *VolumesCloneExecute) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateName(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *VolumesCloneExecute) validateName(formats strfmt.Registry) error { + + if err := validate.Required("name", "body", m.Name); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *VolumesCloneExecute) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *VolumesCloneExecute) UnmarshalBinary(b []byte) error { + var res VolumesCloneExecute + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/volumes_clone_request.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/volumes_clone_request.go new file mode 100644 index 00000000000..2a4e7c3e69d --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/volumes_clone_request.go @@ -0,0 +1,87 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// VolumesCloneRequest volumes clone request +// swagger:model VolumesCloneRequest +type VolumesCloneRequest struct { + + // Display name for the new cloned volumes. + // Cloned Volume names will be prefixed with 'clone-'. + // If multiple volumes cloned they will be suffix with a '-' and an incremental number starting with 1. + // Example volume names using displayName="volume-abcdef" + // single volume clone will be named "clone-volume-abcdef" + // multi volume clone will be named "clone-volume-abcdef-1", "clone-volume-abcdef-2", ... + // + // Required: true + DisplayName *string `json:"displayName"` + + // List of volumes to be cloned + // Required: true + VolumeIds []string `json:"volumeIDs"` +} + +// Validate validates this volumes clone request +func (m *VolumesCloneRequest) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateDisplayName(formats); err != nil { + res = append(res, err) + } + + if err := m.validateVolumeIds(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *VolumesCloneRequest) validateDisplayName(formats strfmt.Registry) error { + + if err := validate.Required("displayName", "body", m.DisplayName); err != nil { + return err + } + + return nil +} + +func (m *VolumesCloneRequest) validateVolumeIds(formats strfmt.Registry) error { + + if err := validate.Required("volumeIDs", "body", m.VolumeIds); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *VolumesCloneRequest) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *VolumesCloneRequest) UnmarshalBinary(b []byte) error { + var res VolumesCloneRequest + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/volumes_clone_response.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/volumes_clone_response.go new file mode 100644 index 00000000000..96a2aad1ebd --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/volumes_clone_response.go @@ -0,0 +1,46 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/swag" +) + +// VolumesCloneResponse volumes clone response +// swagger:model VolumesCloneResponse +type VolumesCloneResponse struct { + + // ID of the new cloned volume + AdditionalProperties string `json:"additionalProperties,omitempty"` + + // A map of volume IDs to cloned volume IDs + ClonedVolumes interface{} `json:"clonedVolumes,omitempty"` +} + +// Validate validates this volumes clone response +func (m *VolumesCloneResponse) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *VolumesCloneResponse) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *VolumesCloneResponse) UnmarshalBinary(b []byte) error { + var res VolumesCloneResponse + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/power/models/volumes_clones.go b/vendor/github.com/IBM-Cloud/power-go-client/power/models/volumes_clones.go new file mode 100644 index 00000000000..3aa4aa23781 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/power/models/volumes_clones.go @@ -0,0 +1,80 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "strconv" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" +) + +// VolumesClones volumes clones +// swagger:model VolumesClones +type VolumesClones struct { + + // list of volumes-clone requests + VolumesClone []*VolumesClone `json:"volumesClone"` +} + +// Validate validates this volumes clones +func (m *VolumesClones) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateVolumesClone(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *VolumesClones) validateVolumesClone(formats strfmt.Registry) error { + + if swag.IsZero(m.VolumesClone) { // not required + return nil + } + + for i := 0; i < len(m.VolumesClone); i++ { + if swag.IsZero(m.VolumesClone[i]) { // not required + continue + } + + if m.VolumesClone[i] != nil { + if err := m.VolumesClone[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("volumesClone" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *VolumesClones) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *VolumesClones) UnmarshalBinary(b []byte) error { + var res VolumesClones + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/IBM-Cloud/power-go-client/utils/utils.go b/vendor/github.com/IBM-Cloud/power-go-client/utils/utils.go new file mode 100644 index 00000000000..d42a0700399 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/power-go-client/utils/utils.go @@ -0,0 +1,62 @@ +package utils + +import ( + "net/url" + "os" + "reflect" + + "github.com/IBM-Cloud/power-go-client/helpers" +) + +// GetNext ... +func GetNext(next interface{}) string { + if reflect.ValueOf(next).IsNil() { + return "" + } + + u, err := url.Parse(reflect.ValueOf(next).Elem().FieldByName("Href").Elem().String()) + if err != nil { + return "" + } + + q := u.Query() + return q.Get("start") +} + +// GetEndpoint ... +func GetEndpoint(generation int, regionName string) string { + + switch generation { + case 1: + ep := getGCEndpoint(regionName) + return helpers.EnvFallBack([]string{"IBMCLOUD_IS_API_ENDPOINT"}, ep) + case 2: + ep := getNGEndpoint(regionName) + return helpers.EnvFallBack([]string{"IBMCLOUD_IS_NG_API_ENDPOINT"}, ep) + } + ep := getNGEndpoint(regionName) + return helpers.EnvFallBack([]string{"IBMCLOUD_IS_NG_API_ENDPOINT"}, ep) +} + +func getGCEndpoint(regionName string) string { + if url := os.Getenv("IBMCLOUD_IS_API_ENDPOINT"); url != "" { + return url + } + return regionName + ".iaas.cloud.ibm.com" +} + +// For Power-IAAS +func getNGEndpoint(regionName string) string { + if url := os.Getenv("IBMCLOUD_IS_NG_API_ENDPOINT"); url != "" { + return url + } + return regionName + ".power-iaas.cloud.ibm.com" +} + +func GetPowerEndPoint(regionName string) string { + if url := os.Getenv("IBMCLOUD_POWER_API_ENDPOINT"); url != "" { + return url + } + return regionName + ".power-iaas.cloud.ibm.com" + +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/LICENSE b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/LICENSE new file mode 100644 index 00000000000..c33dcc7c928 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/config.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/config.go new file mode 100644 index 00000000000..437532a97a3 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/config.go @@ -0,0 +1,2286 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "crypto/tls" + "errors" + "fmt" + "log" + "net" + gohttp "net/http" + "os" + "strings" + "time" + + // Added code for the Power Colo Offering + + apigateway "github.com/IBM/apigateway-go-sdk" + "github.com/IBM/appconfiguration-go-admin-sdk/appconfigurationv1" + "github.com/IBM/container-registry-go-sdk/containerregistryv1" + "github.com/IBM/go-sdk-core/v4/core" + cosconfig "github.com/IBM/ibm-cos-sdk-go-config/resourceconfigurationv1" + kp "github.com/IBM/keyprotect-go-client" + ciscachev1 "github.com/IBM/networking-go-sdk/cachingapiv1" + cisipv1 "github.com/IBM/networking-go-sdk/cisipapiv1" + ciscustompagev1 "github.com/IBM/networking-go-sdk/custompagesv1" + dlProviderV2 "github.com/IBM/networking-go-sdk/directlinkproviderv2" + dl "github.com/IBM/networking-go-sdk/directlinkv1" + cisdnsbulkv1 "github.com/IBM/networking-go-sdk/dnsrecordbulkv1" + cisdnsrecordsv1 "github.com/IBM/networking-go-sdk/dnsrecordsv1" + dns "github.com/IBM/networking-go-sdk/dnssvcsv1" + cisedgefunctionv1 "github.com/IBM/networking-go-sdk/edgefunctionsapiv1" + cisglbhealthcheckv1 "github.com/IBM/networking-go-sdk/globalloadbalancermonitorv1" + cisglbpoolv0 "github.com/IBM/networking-go-sdk/globalloadbalancerpoolsv0" + cisglbv1 "github.com/IBM/networking-go-sdk/globalloadbalancerv1" + cispagerulev1 "github.com/IBM/networking-go-sdk/pageruleapiv1" + cisrangeappv1 "github.com/IBM/networking-go-sdk/rangeapplicationsv1" + cisroutingv1 "github.com/IBM/networking-go-sdk/routingv1" + cissslv1 "github.com/IBM/networking-go-sdk/sslcertificateapiv1" + tg "github.com/IBM/networking-go-sdk/transitgatewayapisv1" + cisuarulev1 "github.com/IBM/networking-go-sdk/useragentblockingrulesv1" + ciswafgroupv1 "github.com/IBM/networking-go-sdk/wafrulegroupsapiv1" + ciswafpackagev1 "github.com/IBM/networking-go-sdk/wafrulepackagesapiv1" + ciswafrulev1 "github.com/IBM/networking-go-sdk/wafrulesapiv1" + cisaccessrulev1 "github.com/IBM/networking-go-sdk/zonefirewallaccessrulesv1" + cislockdownv1 "github.com/IBM/networking-go-sdk/zonelockdownv1" + cisratelimitv1 "github.com/IBM/networking-go-sdk/zoneratelimitsv1" + cisdomainsettingsv1 "github.com/IBM/networking-go-sdk/zonessettingsv1" + ciszonesv1 "github.com/IBM/networking-go-sdk/zonesv1" + "github.com/IBM/platform-services-go-sdk/catalogmanagementv1" + "github.com/IBM/platform-services-go-sdk/enterprisemanagementv1" + "github.com/IBM/platform-services-go-sdk/globaltaggingv1" + iamidentity "github.com/IBM/platform-services-go-sdk/iamidentityv1" + iampolicymanagement "github.com/IBM/platform-services-go-sdk/iampolicymanagementv1" + resourcecontroller "github.com/IBM/platform-services-go-sdk/resourcecontrollerv2" + resourcemanager "github.com/IBM/platform-services-go-sdk/resourcemanagerv2" + "github.com/IBM/push-notifications-go-sdk/pushservicev1" + schematicsv1 "github.com/IBM/schematics-go-sdk/schematicsv1" + "github.com/IBM/secrets-manager-go-sdk/secretsmanagerv1" + vpcclassic "github.com/IBM/vpc-go-sdk/vpcclassicv1" + vpc "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/apache/openwhisk-client-go/whisk" + jwt "github.com/dgrijalva/jwt-go" + slsession "github.com/softlayer/softlayer-go/session" + + bluemix "github.com/IBM-Cloud/bluemix-go" + "github.com/IBM-Cloud/bluemix-go/api/account/accountv1" + "github.com/IBM-Cloud/bluemix-go/api/account/accountv2" + "github.com/IBM-Cloud/bluemix-go/api/certificatemanager" + "github.com/IBM-Cloud/bluemix-go/api/cis/cisv1" + "github.com/IBM-Cloud/bluemix-go/api/container/containerv1" + "github.com/IBM-Cloud/bluemix-go/api/container/containerv2" + "github.com/IBM-Cloud/bluemix-go/api/functions" + "github.com/IBM-Cloud/bluemix-go/api/globalsearch/globalsearchv2" + "github.com/IBM-Cloud/bluemix-go/api/globaltagging/globaltaggingv3" + "github.com/IBM-Cloud/bluemix-go/api/hpcs" + "github.com/IBM-Cloud/bluemix-go/api/iam/iamv1" + "github.com/IBM-Cloud/bluemix-go/api/iamuum/iamuumv2" + "github.com/IBM-Cloud/bluemix-go/api/icd/icdv4" + "github.com/IBM-Cloud/bluemix-go/api/mccp/mccpv2" + "github.com/IBM-Cloud/bluemix-go/api/resource/resourcev1/catalog" + "github.com/IBM-Cloud/bluemix-go/api/resource/resourcev1/controller" + "github.com/IBM-Cloud/bluemix-go/api/resource/resourcev2/controllerv2" + "github.com/IBM-Cloud/bluemix-go/api/resource/resourcev2/managementv2" + "github.com/IBM-Cloud/bluemix-go/api/usermanagement/usermanagementv2" + "github.com/IBM-Cloud/bluemix-go/authentication" + "github.com/IBM-Cloud/bluemix-go/bmxerror" + "github.com/IBM-Cloud/bluemix-go/http" + "github.com/IBM-Cloud/bluemix-go/rest" + bxsession "github.com/IBM-Cloud/bluemix-go/session" + ibmpisession "github.com/IBM-Cloud/power-go-client/ibmpisession" + "github.com/IBM-Cloud/terraform-provider-ibm/version" +) + +// RetryAPIDelay - retry api delay +const RetryAPIDelay = 5 * time.Second + +//BluemixRegion ... +var BluemixRegion string + +var ( + errEmptySoftLayerCredentials = errors.New("iaas_classic_username and iaas_classic_api_key must be provided. Please see the documentation on how to configure them") + errEmptyBluemixCredentials = errors.New("ibmcloud_api_key or bluemix_api_key or iam_token and iam_refresh_token must be provided. Please see the documentation on how to configure it") +) + +//UserConfig ... +type UserConfig struct { + userID string + userEmail string + userAccount string + cloudName string `default:"bluemix"` + cloudType string `default:"public"` + generation int `default:"2"` +} + +//Config stores user provider input +type Config struct { + //BluemixAPIKey is the Bluemix api key + BluemixAPIKey string + //Bluemix region + Region string + //Resource group id + ResourceGroup string + //Bluemix API timeout + BluemixTimeout time.Duration + + //Softlayer end point url + SoftLayerEndpointURL string + + //Softlayer API timeout + SoftLayerTimeout time.Duration + + // Softlayer User Name + SoftLayerUserName string + + // Softlayer API Key + SoftLayerAPIKey string + + //Retry Count for API calls + //Unexposed in the schema at this point as they are used only during session creation for a few calls + //When sdk implements it we an expose them for expected behaviour + //https://github.com/softlayer/softlayer-go/issues/41 + RetryCount int + //Constant Retry Delay for API calls + RetryDelay time.Duration + + // FunctionNameSpace ... + FunctionNameSpace string + + //Riaas End point + RiaasEndPoint string + + //Generation + Generation int + + //IAM Token + IAMToken string + + //IAM Refresh Token + IAMRefreshToken string + + // PowerService Instance + PowerServiceInstance string + + // Zone + Zone string + Visibility string +} + +//Session stores the information required for communication with the SoftLayer and Bluemix API +type Session struct { + // SoftLayerSesssion is the the SoftLayer session used to connect to the SoftLayer API + SoftLayerSession *slsession.Session + + // BluemixSession is the the Bluemix session used to connect to the Bluemix API + BluemixSession *bxsession.Session +} + +// ClientSession ... +type ClientSession interface { + BluemixSession() (*bxsession.Session, error) + BluemixAcccountAPI() (accountv2.AccountServiceAPI, error) + BluemixAcccountv1API() (accountv1.AccountServiceAPI, error) + BluemixUserDetails() (*UserConfig, error) + ContainerAPI() (containerv1.ContainerServiceAPI, error) + VpcContainerAPI() (containerv2.ContainerServiceAPI, error) + ContainerRegistryV1() (*containerregistryv1.ContainerRegistryV1, error) + CisAPI() (cisv1.CisServiceAPI, error) + FunctionClient() (*whisk.Client, error) + GlobalSearchAPI() (globalsearchv2.GlobalSearchServiceAPI, error) + GlobalTaggingAPI() (globaltaggingv3.GlobalTaggingServiceAPI, error) + GlobalTaggingAPIv1() (globaltaggingv1.GlobalTaggingV1, error) + ICDAPI() (icdv4.ICDServiceAPI, error) + IAMAPI() (iamv1.IAMServiceAPI, error) + IAMPolicyManagementV1API() (*iampolicymanagement.IamPolicyManagementV1, error) + IAMUUMAPIV2() (iamuumv2.IAMUUMServiceAPIv2, error) + MccpAPI() (mccpv2.MccpServiceAPI, error) + ResourceCatalogAPI() (catalog.ResourceCatalogAPI, error) + ResourceManagementAPIv2() (managementv2.ResourceManagementAPIv2, error) + ResourceControllerAPI() (controller.ResourceControllerAPI, error) + ResourceControllerAPIV2() (controllerv2.ResourceControllerAPIV2, error) + SoftLayerSession() *slsession.Session + IBMPISession() (*ibmpisession.IBMPISession, error) + UserManagementAPI() (usermanagementv2.UserManagementAPI, error) + PushServiceV1() (*pushservicev1.PushServiceV1, error) + AppConfigurationV1() (*appconfigurationv1.AppConfigurationV1, error) + CertificateManagerAPI() (certificatemanager.CertificateManagerServiceAPI, error) + keyProtectAPI() (*kp.Client, error) + keyManagementAPI() (*kp.Client, error) + VpcClassicV1API() (*vpcclassic.VpcClassicV1, error) + VpcV1API() (*vpc.VpcV1, error) + APIGateway() (*apigateway.ApiGatewayControllerApiV1, error) + PrivateDNSClientSession() (*dns.DnsSvcsV1, error) + CosConfigV1API() (*cosconfig.ResourceConfigurationV1, error) + DirectlinkV1API() (*dl.DirectLinkV1, error) + DirectlinkProviderV2API() (*dlProviderV2.DirectLinkProviderV2, error) + TransitGatewayV1API() (*tg.TransitGatewayApisV1, error) + HpcsEndpointAPI() (hpcs.HPCSV2, error) + FunctionIAMNamespaceAPI() (functions.FunctionServiceAPI, error) + CisZonesV1ClientSession() (*ciszonesv1.ZonesV1, error) + CisDNSRecordClientSession() (*cisdnsrecordsv1.DnsRecordsV1, error) + CisDNSRecordBulkClientSession() (*cisdnsbulkv1.DnsRecordBulkV1, error) + CisGLBClientSession() (*cisglbv1.GlobalLoadBalancerV1, error) + CisGLBPoolClientSession() (*cisglbpoolv0.GlobalLoadBalancerPoolsV0, error) + CisGLBHealthCheckClientSession() (*cisglbhealthcheckv1.GlobalLoadBalancerMonitorV1, error) + CisIPClientSession() (*cisipv1.CisIpApiV1, error) + CisPageRuleClientSession() (*cispagerulev1.PageRuleApiV1, error) + CisRLClientSession() (*cisratelimitv1.ZoneRateLimitsV1, error) + CisEdgeFunctionClientSession() (*cisedgefunctionv1.EdgeFunctionsApiV1, error) + CisSSLClientSession() (*cissslv1.SslCertificateApiV1, error) + CisWAFPackageClientSession() (*ciswafpackagev1.WafRulePackagesApiV1, error) + CisDomainSettingsClientSession() (*cisdomainsettingsv1.ZonesSettingsV1, error) + CisRoutingClientSession() (*cisroutingv1.RoutingV1, error) + CisWAFGroupClientSession() (*ciswafgroupv1.WafRuleGroupsApiV1, error) + CisCacheClientSession() (*ciscachev1.CachingApiV1, error) + CisCustomPageClientSession() (*ciscustompagev1.CustomPagesV1, error) + CisAccessRuleClientSession() (*cisaccessrulev1.ZoneFirewallAccessRulesV1, error) + CisUARuleClientSession() (*cisuarulev1.UserAgentBlockingRulesV1, error) + CisLockdownClientSession() (*cislockdownv1.ZoneLockdownV1, error) + CisRangeAppClientSession() (*cisrangeappv1.RangeApplicationsV1, error) + CisWAFRuleClientSession() (*ciswafrulev1.WafRulesApiV1, error) + IAMIdentityV1API() (*iamidentity.IamIdentityV1, error) + ResourceManagerV2API() (*resourcemanager.ResourceManagerV2, error) + CatalogManagementV1() (*catalogmanagementv1.CatalogManagementV1, error) + EnterpriseManagementV1() (*enterprisemanagementv1.EnterpriseManagementV1, error) + ResourceControllerV2API() (*resourcecontroller.ResourceControllerV2, error) + SecretsManagerV1() (*secretsmanagerv1.SecretsManagerV1, error) + SchematicsV1() (*schematicsv1.SchematicsV1, error) +} + +type clientSession struct { + session *Session + + apigatewayErr error + apigatewayAPI *apigateway.ApiGatewayControllerApiV1 + + accountConfigErr error + bmxAccountServiceAPI accountv2.AccountServiceAPI + + accountV1ConfigErr error + bmxAccountv1ServiceAPI accountv1.AccountServiceAPI + + bmxUserDetails *UserConfig + bmxUserFetchErr error + + csConfigErr error + csServiceAPI containerv1.ContainerServiceAPI + + csv2ConfigErr error + csv2ServiceAPI containerv2.ContainerServiceAPI + + containerRegistryClientErr error + containerRegistryClient *containerregistryv1.ContainerRegistryV1 + + certManagementErr error + certManagementAPI certificatemanager.CertificateManagerServiceAPI + + cfConfigErr error + cfServiceAPI mccpv2.MccpServiceAPI + + cisConfigErr error + cisServiceAPI cisv1.CisServiceAPI + + functionConfigErr error + functionClient *whisk.Client + + globalSearchConfigErr error + globalSearchServiceAPI globalsearchv2.GlobalSearchServiceAPI + + globalTaggingConfigErr error + globalTaggingServiceAPI globaltaggingv3.GlobalTaggingServiceAPI + + globalTaggingConfigErrV1 error + globalTaggingServiceAPIV1 globaltaggingv1.GlobalTaggingV1 + + iamUUMConfigErrV2 error + iamUUMServiceAPIV2 iamuumv2.IAMUUMServiceAPIv2 + + iamConfigErr error + iamServiceAPI iamv1.IAMServiceAPI + + userManagementErr error + userManagementAPI usermanagementv2.UserManagementAPI + + icdConfigErr error + icdServiceAPI icdv4.ICDServiceAPI + + resourceControllerConfigErr error + resourceControllerServiceAPI controller.ResourceControllerAPI + + resourceControllerConfigErrv2 error + resourceControllerServiceAPIv2 controllerv2.ResourceControllerAPIV2 + + resourceManagementConfigErrv2 error + resourceManagementServiceAPIv2 managementv2.ResourceManagementAPIv2 + + resourceCatalogConfigErr error + resourceCatalogServiceAPI catalog.ResourceCatalogAPI + + powerConfigErr error + ibmpiConfigErr error + ibmpiSession *ibmpisession.IBMPISession + + kpErr error + kpAPI *kp.API + + kmsErr error + kmsAPI *kp.API + + hpcsEndpointErr error + hpcsEndpointAPI hpcs.HPCSV2 + + pDNSClient *dns.DnsSvcsV1 + pDNSErr error + + bluemixSessionErr error + + pushServiceClient *pushservicev1.PushServiceV1 + pushServiceClientErr error + + appConfigurationClient *appconfigurationv1.AppConfigurationV1 + appConfigurationClientErr error + + vpcClassicErr error + vpcClassicAPI *vpcclassic.VpcClassicV1 + + vpcErr error + vpcAPI *vpc.VpcV1 + + directlinkAPI *dl.DirectLinkV1 + directlinkErr error + dlProviderAPI *dlProviderV2.DirectLinkProviderV2 + dlProviderErr error + + cosConfigErr error + cosConfigAPI *cosconfig.ResourceConfigurationV1 + + transitgatewayAPI *tg.TransitGatewayApisV1 + transitgatewayErr error + + functionIAMNamespaceAPI functions.FunctionServiceAPI + functionIAMNamespaceErr error + + // CIS Zones + cisZonesErr error + cisZonesV1Client *ciszonesv1.ZonesV1 + + // CIS dns service options + cisDNSErr error + cisDNSRecordsClient *cisdnsrecordsv1.DnsRecordsV1 + + // CIS dns bulk service options + cisDNSBulkErr error + cisDNSRecordBulkClient *cisdnsbulkv1.DnsRecordBulkV1 + + // CIS Global Load Balancer Pool service options + cisGLBPoolErr error + cisGLBPoolClient *cisglbpoolv0.GlobalLoadBalancerPoolsV0 + + // CIS GLB service options + cisGLBErr error + cisGLBClient *cisglbv1.GlobalLoadBalancerV1 + + // CIS GLB health check service options + cisGLBHealthCheckErr error + cisGLBHealthCheckClient *cisglbhealthcheckv1.GlobalLoadBalancerMonitorV1 + + // CIS IP service options + cisIPErr error + cisIPClient *cisipv1.CisIpApiV1 + + // CIS Zone Rate Limits service options + cisRLErr error + cisRLClient *cisratelimitv1.ZoneRateLimitsV1 + + // CIS Page Rules service options + cisPageRuleErr error + cisPageRuleClient *cispagerulev1.PageRuleApiV1 + + // CIS Edge Functions service options + cisEdgeFunctionErr error + cisEdgeFunctionClient *cisedgefunctionv1.EdgeFunctionsApiV1 + + // CIS SSL certificate service options + cisSSLErr error + cisSSLClient *cissslv1.SslCertificateApiV1 + + // CIS WAF Package service options + cisWAFPackageErr error + cisWAFPackageClient *ciswafpackagev1.WafRulePackagesApiV1 + + // CIS Zone Setting service options + cisDomainSettingsErr error + cisDomainSettingsClient *cisdomainsettingsv1.ZonesSettingsV1 + + // CIS Routing service options + cisRoutingErr error + cisRoutingClient *cisroutingv1.RoutingV1 + + // CIS WAF Group service options + cisWAFGroupErr error + cisWAFGroupClient *ciswafgroupv1.WafRuleGroupsApiV1 + + // CIS Caching service options + cisCacheErr error + cisCacheClient *ciscachev1.CachingApiV1 + + // CIS Custom Pages service options + cisCustomPageErr error + cisCustomPageClient *ciscustompagev1.CustomPagesV1 + + // CIS Firewall Access rule service option + cisAccessRuleErr error + cisAccessRuleClient *cisaccessrulev1.ZoneFirewallAccessRulesV1 + + // CIS User Agent Blocking Rule service option + cisUARuleErr error + cisUARuleClient *cisuarulev1.UserAgentBlockingRulesV1 + + // CIS Firewall Lockdwon Rule service option + cisLockdownErr error + cisLockdownClient *cislockdownv1.ZoneLockdownV1 + + // CIS Range app service option + cisRangeAppErr error + cisRangeAppClient *cisrangeappv1.RangeApplicationsV1 + + // CIS WAF rule service options + cisWAFRuleErr error + cisWAFRuleClient *ciswafrulev1.WafRulesApiV1 + //IAM Identity Option + iamIdentityErr error + iamIdentityAPI *iamidentity.IamIdentityV1 + + //Resource Manager Option + resourceManagerErr error + resourceManagerAPI *resourcemanager.ResourceManagerV2 + + //Catalog Management Option + catalogManagementClient *catalogmanagementv1.CatalogManagementV1 + catalogManagementClientErr error + + enterpriseManagementClient *enterprisemanagementv1.EnterpriseManagementV1 + enterpriseManagementClientErr error + + //Resource Controller Option + resourceControllerErr error + resourceControllerAPI *resourcecontroller.ResourceControllerV2 + secretsManagerClient *secretsmanagerv1.SecretsManagerV1 + secretsManagerClientErr error + + // Schematics service options + schematicsClient *schematicsv1.SchematicsV1 + schematicsClientErr error + + //IAM Policy Management + iamPolicyManagementErr error + iamPolicyManagementAPI *iampolicymanagement.IamPolicyManagementV1 +} + +func (session clientSession) CatalogManagementV1() (*catalogmanagementv1.CatalogManagementV1, error) { + return session.catalogManagementClient, session.catalogManagementClientErr +} + +// BluemixAcccountAPI ... +func (sess clientSession) BluemixAcccountAPI() (accountv2.AccountServiceAPI, error) { + return sess.bmxAccountServiceAPI, sess.accountConfigErr +} + +// BluemixAcccountAPI ... +func (sess clientSession) BluemixAcccountv1API() (accountv1.AccountServiceAPI, error) { + return sess.bmxAccountv1ServiceAPI, sess.accountV1ConfigErr +} + +// BluemixSession to provide the Bluemix Session +func (sess clientSession) BluemixSession() (*bxsession.Session, error) { + return sess.session.BluemixSession, sess.bluemixSessionErr +} + +// BluemixUserDetails ... +func (sess clientSession) BluemixUserDetails() (*UserConfig, error) { + return sess.bmxUserDetails, sess.bmxUserFetchErr +} + +// ContainerAPI provides Container Service APIs ... +func (sess clientSession) ContainerAPI() (containerv1.ContainerServiceAPI, error) { + return sess.csServiceAPI, sess.csConfigErr +} + +// VpcContainerAPI provides v2Container Service APIs ... +func (sess clientSession) VpcContainerAPI() (containerv2.ContainerServiceAPI, error) { + return sess.csv2ServiceAPI, sess.csv2ConfigErr +} + +// ContainerRegistryV1 provides Container Registry Service APIs ... +func (session clientSession) ContainerRegistryV1() (*containerregistryv1.ContainerRegistryV1, error) { + return session.containerRegistryClient, session.containerRegistryClientErr +} + +// SchematicsAPI provides schematics Service APIs ... +func (sess clientSession) SchematicsV1() (*schematicsv1.SchematicsV1, error) { + return sess.schematicsClient, sess.schematicsClientErr +} + +// CisAPI provides Cloud Internet Services APIs ... +func (sess clientSession) CisAPI() (cisv1.CisServiceAPI, error) { + return sess.cisServiceAPI, sess.cisConfigErr +} + +// FunctionClient ... +func (sess clientSession) FunctionClient() (*whisk.Client, error) { + return sess.functionClient, sess.functionConfigErr +} + +// GlobalSearchAPI provides Global Search APIs ... +func (sess clientSession) GlobalSearchAPI() (globalsearchv2.GlobalSearchServiceAPI, error) { + return sess.globalSearchServiceAPI, sess.globalSearchConfigErr +} + +// GlobalTaggingAPI provides Global Search APIs ... +func (sess clientSession) GlobalTaggingAPI() (globaltaggingv3.GlobalTaggingServiceAPI, error) { + return sess.globalTaggingServiceAPI, sess.globalTaggingConfigErr +} + +// GlobalTaggingAPIV1 provides Platform-go Global Tagging APIs ... +func (sess clientSession) GlobalTaggingAPIv1() (globaltaggingv1.GlobalTaggingV1, error) { + return sess.globalTaggingServiceAPIV1, sess.globalTaggingConfigErrV1 +} + +// HpcsEndpointAPI provides Hpcs Endpoint generator APIs ... +func (sess clientSession) HpcsEndpointAPI() (hpcs.HPCSV2, error) { + return sess.hpcsEndpointAPI, sess.hpcsEndpointErr +} + +// IAMAPI provides IAM PAP APIs ... +func (sess clientSession) IAMAPI() (iamv1.IAMServiceAPI, error) { + return sess.iamServiceAPI, sess.iamConfigErr +} + +// UserManagementAPI provides User management APIs ... +func (sess clientSession) UserManagementAPI() (usermanagementv2.UserManagementAPI, error) { + return sess.userManagementAPI, sess.userManagementErr +} + +// IAM Policy Management +func (sess clientSession) IAMPolicyManagementV1API() (*iampolicymanagement.IamPolicyManagementV1, error) { + return sess.iamPolicyManagementAPI, sess.iamPolicyManagementErr +} + +// IAMUUMAPIV2 provides IAM UUM APIs ... +func (sess clientSession) IAMUUMAPIV2() (iamuumv2.IAMUUMServiceAPIv2, error) { + return sess.iamUUMServiceAPIV2, sess.iamUUMConfigErrV2 +} + +// IcdAPI provides IBM Cloud Databases APIs ... +func (sess clientSession) ICDAPI() (icdv4.ICDServiceAPI, error) { + return sess.icdServiceAPI, sess.icdConfigErr +} + +// MccpAPI provides Multi Cloud Controller Proxy APIs ... +func (sess clientSession) MccpAPI() (mccpv2.MccpServiceAPI, error) { + return sess.cfServiceAPI, sess.cfConfigErr +} + +// ResourceCatalogAPI ... +func (sess clientSession) ResourceCatalogAPI() (catalog.ResourceCatalogAPI, error) { + return sess.resourceCatalogServiceAPI, sess.resourceCatalogConfigErr +} + +// ResourceManagementAPIv2 ... +func (sess clientSession) ResourceManagementAPIv2() (managementv2.ResourceManagementAPIv2, error) { + return sess.resourceManagementServiceAPIv2, sess.resourceManagementConfigErrv2 +} + +// ResourceControllerAPI ... +func (sess clientSession) ResourceControllerAPI() (controller.ResourceControllerAPI, error) { + return sess.resourceControllerServiceAPI, sess.resourceControllerConfigErr +} + +// ResourceControllerAPIv2 ... +func (sess clientSession) ResourceControllerAPIV2() (controllerv2.ResourceControllerAPIV2, error) { + return sess.resourceControllerServiceAPIv2, sess.resourceControllerConfigErrv2 +} + +// SoftLayerSession providers SoftLayer Session +func (sess clientSession) SoftLayerSession() *slsession.Session { + return sess.session.SoftLayerSession +} + +// CertManagementAPI provides Certificate management APIs ... +func (sess clientSession) CertificateManagerAPI() (certificatemanager.CertificateManagerServiceAPI, error) { + return sess.certManagementAPI, sess.certManagementErr +} + +//apigatewayAPI provides API Gateway APIs +func (sess clientSession) APIGateway() (*apigateway.ApiGatewayControllerApiV1, error) { + return sess.apigatewayAPI, sess.apigatewayErr +} + +func (session clientSession) PushServiceV1() (*pushservicev1.PushServiceV1, error) { + return session.pushServiceClient, session.pushServiceClientErr +} + +func (session clientSession) AppConfigurationV1() (*appconfigurationv1.AppConfigurationV1, error) { + return session.appConfigurationClient, session.appConfigurationClientErr +} + +func (sess clientSession) keyProtectAPI() (*kp.Client, error) { + return sess.kpAPI, sess.kpErr +} + +func (sess clientSession) keyManagementAPI() (*kp.Client, error) { + return sess.kmsAPI, sess.kmsErr +} + +func (sess clientSession) VpcClassicV1API() (*vpcclassic.VpcClassicV1, error) { + return sess.vpcClassicAPI, sess.vpcClassicErr +} + +func (sess clientSession) VpcV1API() (*vpc.VpcV1, error) { + return sess.vpcAPI, sess.vpcErr +} + +func (sess clientSession) DirectlinkV1API() (*dl.DirectLinkV1, error) { + return sess.directlinkAPI, sess.directlinkErr +} +func (sess clientSession) DirectlinkProviderV2API() (*dlProviderV2.DirectLinkProviderV2, error) { + return sess.dlProviderAPI, sess.dlProviderErr +} +func (sess clientSession) CosConfigV1API() (*cosconfig.ResourceConfigurationV1, error) { + return sess.cosConfigAPI, sess.cosConfigErr +} + +func (sess clientSession) TransitGatewayV1API() (*tg.TransitGatewayApisV1, error) { + return sess.transitgatewayAPI, sess.transitgatewayErr +} + +// Session to the Power Colo Service + +func (sess clientSession) IBMPISession() (*ibmpisession.IBMPISession, error) { + return sess.ibmpiSession, sess.powerConfigErr +} + +// Private DNS Service + +func (sess clientSession) PrivateDNSClientSession() (*dns.DnsSvcsV1, error) { + return sess.pDNSClient, sess.pDNSErr +} + +// Session to the Namespace cloud function + +func (sess clientSession) FunctionIAMNamespaceAPI() (functions.FunctionServiceAPI, error) { + return sess.functionIAMNamespaceAPI, sess.functionIAMNamespaceErr +} + +// CIS Zones Service +func (sess clientSession) CisZonesV1ClientSession() (*ciszonesv1.ZonesV1, error) { + if sess.cisZonesErr != nil { + return sess.cisZonesV1Client, sess.cisZonesErr + } + return sess.cisZonesV1Client.Clone(), nil +} + +// CIS DNS Service +func (sess clientSession) CisDNSRecordClientSession() (*cisdnsrecordsv1.DnsRecordsV1, error) { + if sess.cisDNSErr != nil { + return sess.cisDNSRecordsClient, sess.cisDNSErr + } + return sess.cisDNSRecordsClient.Clone(), nil +} + +// CIS DNS Bulk Service +func (sess clientSession) CisDNSRecordBulkClientSession() (*cisdnsbulkv1.DnsRecordBulkV1, error) { + if sess.cisDNSBulkErr != nil { + return sess.cisDNSRecordBulkClient, sess.cisDNSBulkErr + } + return sess.cisDNSRecordBulkClient.Clone(), nil +} + +// CIS GLB Pool +func (sess clientSession) CisGLBPoolClientSession() (*cisglbpoolv0.GlobalLoadBalancerPoolsV0, error) { + if sess.cisGLBPoolErr != nil { + return sess.cisGLBPoolClient, sess.cisGLBPoolErr + } + return sess.cisGLBPoolClient.Clone(), nil +} + +// CIS GLB +func (sess clientSession) CisGLBClientSession() (*cisglbv1.GlobalLoadBalancerV1, error) { + if sess.cisGLBErr != nil { + return sess.cisGLBClient, sess.cisGLBErr + } + return sess.cisGLBClient.Clone(), nil +} + +// CIS GLB Health Check/Monitor +func (sess clientSession) CisGLBHealthCheckClientSession() (*cisglbhealthcheckv1.GlobalLoadBalancerMonitorV1, error) { + if sess.cisGLBHealthCheckErr != nil { + return sess.cisGLBHealthCheckClient, sess.cisGLBHealthCheckErr + } + return sess.cisGLBHealthCheckClient.Clone(), nil +} + +// CIS Zone Rate Limits +func (sess clientSession) CisRLClientSession() (*cisratelimitv1.ZoneRateLimitsV1, error) { + if sess.cisRLErr != nil { + return sess.cisRLClient, sess.cisRLErr + } + return sess.cisRLClient.Clone(), nil +} + +// CIS IP +func (sess clientSession) CisIPClientSession() (*cisipv1.CisIpApiV1, error) { + if sess.cisIPErr != nil { + return sess.cisIPClient, sess.cisIPErr + } + return sess.cisIPClient.Clone(), nil +} + +// CIS Page Rules +func (sess clientSession) CisPageRuleClientSession() (*cispagerulev1.PageRuleApiV1, error) { + if sess.cisPageRuleErr != nil { + return sess.cisPageRuleClient, sess.cisPageRuleErr + } + return sess.cisPageRuleClient.Clone(), nil +} + +// CIS Edge Function +func (sess clientSession) CisEdgeFunctionClientSession() (*cisedgefunctionv1.EdgeFunctionsApiV1, error) { + if sess.cisEdgeFunctionErr != nil { + return sess.cisEdgeFunctionClient, sess.cisEdgeFunctionErr + } + return sess.cisEdgeFunctionClient.Clone(), nil +} + +// CIS SSL certificate +func (sess clientSession) CisSSLClientSession() (*cissslv1.SslCertificateApiV1, error) { + if sess.cisSSLErr != nil { + return sess.cisSSLClient, sess.cisSSLErr + } + return sess.cisSSLClient.Clone(), nil +} + +// CIS WAF Packages +func (sess clientSession) CisWAFPackageClientSession() (*ciswafpackagev1.WafRulePackagesApiV1, error) { + if sess.cisWAFPackageErr != nil { + return sess.cisWAFPackageClient, sess.cisWAFPackageErr + } + return sess.cisWAFPackageClient.Clone(), nil +} + +// CIS Zone Settings +func (sess clientSession) CisDomainSettingsClientSession() (*cisdomainsettingsv1.ZonesSettingsV1, error) { + if sess.cisDomainSettingsErr != nil { + return sess.cisDomainSettingsClient, sess.cisDomainSettingsErr + } + return sess.cisDomainSettingsClient.Clone(), nil +} + +// CIS Routing +func (sess clientSession) CisRoutingClientSession() (*cisroutingv1.RoutingV1, error) { + if sess.cisRoutingErr != nil { + return sess.cisRoutingClient, sess.cisRoutingErr + } + return sess.cisRoutingClient.Clone(), nil +} + +// CIS WAF Group +func (sess clientSession) CisWAFGroupClientSession() (*ciswafgroupv1.WafRuleGroupsApiV1, error) { + if sess.cisWAFGroupErr != nil { + return sess.cisWAFGroupClient, sess.cisWAFGroupErr + } + return sess.cisWAFGroupClient.Clone(), nil +} + +// CIS Cache service +func (sess clientSession) CisCacheClientSession() (*ciscachev1.CachingApiV1, error) { + if sess.cisCacheErr != nil { + return sess.cisCacheClient, sess.cisCacheErr + } + return sess.cisCacheClient.Clone(), nil +} + +// CIS Zone Settings +func (sess clientSession) CisCustomPageClientSession() (*ciscustompagev1.CustomPagesV1, error) { + if sess.cisCustomPageErr != nil { + return sess.cisCustomPageClient, sess.cisCustomPageErr + } + return sess.cisCustomPageClient.Clone(), nil +} + +// CIS Firewall access rule +func (sess clientSession) CisAccessRuleClientSession() (*cisaccessrulev1.ZoneFirewallAccessRulesV1, error) { + if sess.cisAccessRuleErr != nil { + return sess.cisAccessRuleClient, sess.cisAccessRuleErr + } + return sess.cisAccessRuleClient.Clone(), nil +} + +// CIS User Agent Blocking rule +func (sess clientSession) CisUARuleClientSession() (*cisuarulev1.UserAgentBlockingRulesV1, error) { + if sess.cisUARuleErr != nil { + return sess.cisUARuleClient, sess.cisUARuleErr + } + return sess.cisUARuleClient.Clone(), nil +} + +// CIS Firewall Lockdown rule +func (sess clientSession) CisLockdownClientSession() (*cislockdownv1.ZoneLockdownV1, error) { + if sess.cisLockdownErr != nil { + return sess.cisLockdownClient, sess.cisLockdownErr + } + return sess.cisLockdownClient.Clone(), nil +} + +// CIS Range app rule +func (sess clientSession) CisRangeAppClientSession() (*cisrangeappv1.RangeApplicationsV1, error) { + if sess.cisRangeAppErr != nil { + return sess.cisRangeAppClient, sess.cisRangeAppErr + } + return sess.cisRangeAppClient.Clone(), nil +} + +// CIS WAF Rule +func (sess clientSession) CisWAFRuleClientSession() (*ciswafrulev1.WafRulesApiV1, error) { + if sess.cisWAFRuleErr != nil { + return sess.cisWAFRuleClient, sess.cisWAFRuleErr + } + return sess.cisWAFRuleClient.Clone(), nil +} + +// IAM Identity Session +func (sess clientSession) IAMIdentityV1API() (*iamidentity.IamIdentityV1, error) { + return sess.iamIdentityAPI, sess.iamIdentityErr +} + +// ResourceMAanger Session +func (sess clientSession) ResourceManagerV2API() (*resourcemanager.ResourceManagerV2, error) { + return sess.resourceManagerAPI, sess.resourceManagerErr +} + +func (session clientSession) EnterpriseManagementV1() (*enterprisemanagementv1.EnterpriseManagementV1, error) { + return session.enterpriseManagementClient, session.enterpriseManagementClientErr +} + +// ResourceController Session +func (sess clientSession) ResourceControllerV2API() (*resourcecontroller.ResourceControllerV2, error) { + return sess.resourceControllerAPI, sess.resourceControllerErr +} + +// SecretsManager Session +func (session clientSession) SecretsManagerV1() (*secretsmanagerv1.SecretsManagerV1, error) { + return session.secretsManagerClient, session.secretsManagerClientErr +} + +var cloudEndpoint = "cloud.ibm.com" + +// ClientSession configures and returns a fully initialized ClientSession +func (c *Config) ClientSession() (interface{}, error) { + sess, err := newSession(c) + if err != nil { + return nil, err + } + log.Printf("[INFO] Configured Region: %s\n", c.Region) + session := clientSession{ + session: sess, + } + + if sess.BluemixSession == nil { + //Can be nil only if bluemix_api_key is not provided + log.Println("Skipping Bluemix Clients configuration") + session.bluemixSessionErr = errEmptyBluemixCredentials + session.accountConfigErr = errEmptyBluemixCredentials + session.accountV1ConfigErr = errEmptyBluemixCredentials + session.csConfigErr = errEmptyBluemixCredentials + session.csv2ConfigErr = errEmptyBluemixCredentials + session.containerRegistryClientErr = errEmptyBluemixCredentials + session.kpErr = errEmptyBluemixCredentials + session.pushServiceClientErr = errEmptyBluemixCredentials + session.appConfigurationClientErr = errEmptyBluemixCredentials + session.kmsErr = errEmptyBluemixCredentials + session.cfConfigErr = errEmptyBluemixCredentials + session.cisConfigErr = errEmptyBluemixCredentials + session.functionConfigErr = errEmptyBluemixCredentials + session.globalSearchConfigErr = errEmptyBluemixCredentials + session.globalTaggingConfigErr = errEmptyBluemixCredentials + session.globalTaggingConfigErrV1 = errEmptyBluemixCredentials + session.hpcsEndpointErr = errEmptyBluemixCredentials + session.iamConfigErr = errEmptyBluemixCredentials + session.iamUUMConfigErrV2 = errEmptyBluemixCredentials + session.icdConfigErr = errEmptyBluemixCredentials + session.resourceCatalogConfigErr = errEmptyBluemixCredentials + session.resourceManagerErr = errEmptyBluemixCredentials + session.resourceManagementConfigErrv2 = errEmptyBluemixCredentials + session.resourceControllerConfigErr = errEmptyBluemixCredentials + session.resourceControllerConfigErrv2 = errEmptyBluemixCredentials + session.enterpriseManagementClientErr = errEmptyBluemixCredentials + session.resourceControllerErr = errEmptyBluemixCredentials + session.catalogManagementClientErr = errEmptyBluemixCredentials + session.powerConfigErr = errEmptyBluemixCredentials + session.ibmpiConfigErr = errEmptyBluemixCredentials + session.userManagementErr = errEmptyBluemixCredentials + session.certManagementErr = errEmptyBluemixCredentials + session.vpcClassicErr = errEmptyBluemixCredentials + session.vpcErr = errEmptyBluemixCredentials + session.apigatewayErr = errEmptyBluemixCredentials + session.pDNSErr = errEmptyBluemixCredentials + session.bmxUserFetchErr = errEmptyBluemixCredentials + session.directlinkErr = errEmptyBluemixCredentials + session.dlProviderErr = errEmptyBluemixCredentials + session.cosConfigErr = errEmptyBluemixCredentials + session.transitgatewayErr = errEmptyBluemixCredentials + session.functionIAMNamespaceErr = errEmptyBluemixCredentials + session.cisDNSErr = errEmptyBluemixCredentials + session.cisDNSBulkErr = errEmptyBluemixCredentials + session.cisGLBPoolErr = errEmptyBluemixCredentials + session.cisGLBErr = errEmptyBluemixCredentials + session.cisGLBHealthCheckErr = errEmptyBluemixCredentials + session.cisIPErr = errEmptyBluemixCredentials + session.cisZonesErr = errEmptyBluemixCredentials + session.cisRLErr = errEmptyBluemixCredentials + session.cisPageRuleErr = errEmptyBluemixCredentials + session.cisEdgeFunctionErr = errEmptyBluemixCredentials + session.cisSSLErr = errEmptyBluemixCredentials + session.cisWAFPackageErr = errEmptyBluemixCredentials + session.cisDomainSettingsErr = errEmptyBluemixCredentials + session.cisRoutingErr = errEmptyBluemixCredentials + session.cisWAFGroupErr = errEmptyBluemixCredentials + session.cisCacheErr = errEmptyBluemixCredentials + session.cisCustomPageErr = errEmptyBluemixCredentials + session.cisAccessRuleErr = errEmptyBluemixCredentials + session.cisUARuleErr = errEmptyBluemixCredentials + session.cisLockdownErr = errEmptyBluemixCredentials + session.cisRangeAppErr = errEmptyBluemixCredentials + session.cisWAFRuleErr = errEmptyBluemixCredentials + session.iamIdentityErr = errEmptyBluemixCredentials + session.secretsManagerClientErr = errEmptyBluemixCredentials + session.schematicsClientErr = errEmptyBluemixCredentials + session.iamPolicyManagementErr = errEmptyBluemixCredentials + + return session, nil + } + + if sess.BluemixSession.Config.BluemixAPIKey != "" { + err = authenticateAPIKey(sess.BluemixSession) + if err != nil { + for count := c.RetryCount; count >= 0; count-- { + if err == nil || !isRetryable(err) { + break + } + time.Sleep(c.RetryDelay) + log.Printf("Retrying IAM Authentication %d", count) + err = authenticateAPIKey(sess.BluemixSession) + } + if err != nil { + session.bmxUserFetchErr = fmt.Errorf("Error occured while fetching auth key for account user details: %q", err) + session.functionConfigErr = fmt.Errorf("Error occured while fetching auth key for function: %q", err) + session.powerConfigErr = fmt.Errorf("Error occured while fetching the auth key for power iaas: %q", err) + session.ibmpiConfigErr = fmt.Errorf("Error occured while fetching the auth key for power iaas: %q", err) + } + } + err = authenticateCF(sess.BluemixSession) + if err != nil { + for count := c.RetryCount; count >= 0; count-- { + if err == nil || !isRetryable(err) { + break + } + time.Sleep(c.RetryDelay) + log.Printf("Retrying CF Authentication %d", count) + err = authenticateCF(sess.BluemixSession) + } + if err != nil { + session.functionConfigErr = fmt.Errorf("Error occured while fetching auth key for function: %q", err) + } + } + } + + if sess.BluemixSession.Config.IAMAccessToken != "" && sess.BluemixSession.Config.BluemixAPIKey == "" { + err := refreshToken(sess.BluemixSession) + if err != nil { + for count := c.RetryCount; count >= 0; count-- { + if err == nil || !isRetryable(err) { + break + } + time.Sleep(c.RetryDelay) + log.Printf("Retrying refresh token %d", count) + err = refreshToken(sess.BluemixSession) + } + if err != nil { + return nil, fmt.Errorf("Error occured while refreshing the token: %q", err) + } + } + + } + userConfig, err := fetchUserDetails(sess.BluemixSession, c.RetryCount, c.RetryDelay) + if err != nil { + session.bmxUserFetchErr = fmt.Errorf("Error occured while fetching account user details: %q", err) + } + session.bmxUserDetails = userConfig + + if sess.SoftLayerSession != nil && sess.SoftLayerSession.IAMToken != "" { + sess.SoftLayerSession.IAMToken = sess.BluemixSession.Config.IAMAccessToken + } + + session.functionClient, session.functionConfigErr = FunctionClient(sess.BluemixSession.Config) + + BluemixRegion = sess.BluemixSession.Config.Region + + accv1API, err := accountv1.New(sess.BluemixSession) + if err != nil { + session.accountV1ConfigErr = fmt.Errorf("Error occured while configuring Bluemix Accountv1 Service: %q", err) + } + session.bmxAccountv1ServiceAPI = accv1API + + accAPI, err := accountv2.New(sess.BluemixSession) + if err != nil { + session.accountConfigErr = fmt.Errorf("Error occured while configuring Account Service: %q", err) + } + session.bmxAccountServiceAPI = accAPI + + cfAPI, err := mccpv2.New(sess.BluemixSession) + if err != nil { + session.cfConfigErr = fmt.Errorf("Error occured while configuring MCCP service: %q", err) + } + session.cfServiceAPI = cfAPI + + clusterAPI, err := containerv1.New(sess.BluemixSession) + if err != nil { + session.csConfigErr = fmt.Errorf("Error occured while configuring Container Service for K8s cluster: %q", err) + } + session.csServiceAPI = clusterAPI + + v2clusterAPI, err := containerv2.New(sess.BluemixSession) + if err != nil { + session.csv2ConfigErr = fmt.Errorf("Error occured while configuring vpc Container Service for K8s cluster: %q", err) + } + session.csv2ServiceAPI = v2clusterAPI + + hpcsAPI, err := hpcs.New(sess.BluemixSession) + if err != nil { + session.hpcsEndpointErr = fmt.Errorf("Error occured while configuring hpcs Endpoint: %q", err) + } + session.hpcsEndpointAPI = hpcsAPI + + kpurl := contructEndpoint(fmt.Sprintf("%s.kms", c.Region), cloudEndpoint) + if c.Visibility == "private" || c.Visibility == "public-and-private" { + kpurl = contructEndpoint(fmt.Sprintf("private.%s.kms", c.Region), cloudEndpoint) + } + var options kp.ClientConfig + if c.BluemixAPIKey != "" { + options = kp.ClientConfig{ + BaseURL: envFallBack([]string{"IBMCLOUD_KP_API_ENDPOINT"}, kpurl), + APIKey: sess.BluemixSession.Config.BluemixAPIKey, //pragma: allowlist secret + // InstanceID: "42fET57nnadurKXzXAedFLOhGqETfIGYxOmQXkFgkJV9", + Verbose: kp.VerboseFailOnly, + } + + } else { + options = kp.ClientConfig{ + BaseURL: envFallBack([]string{"IBMCLOUD_KP_API_ENDPOINT"}, kpurl), + Authorization: sess.BluemixSession.Config.IAMAccessToken, + // InstanceID: "42fET57nnadurKXzXAedFLOhGqETfIGYxOmQXkFgkJV9", + Verbose: kp.VerboseFailOnly, + } + } + kpAPIclient, err := kp.New(options, kp.DefaultTransport()) + if err != nil { + session.kpErr = fmt.Errorf("Error occured while configuring Key Protect Service: %q", err) + } + session.kpAPI = kpAPIclient + + kmsurl := contructEndpoint(fmt.Sprintf("%s.kms", c.Region), cloudEndpoint) + if c.Visibility == "private" || c.Visibility == "public-and-private" { + kmsurl = contructEndpoint(fmt.Sprintf("private.%s.kms", c.Region), cloudEndpoint) + } + var kmsOptions kp.ClientConfig + if c.BluemixAPIKey != "" { + kmsOptions = kp.ClientConfig{ + BaseURL: envFallBack([]string{"IBMCLOUD_KP_API_ENDPOINT"}, kmsurl), + APIKey: sess.BluemixSession.Config.BluemixAPIKey, //pragma: allowlist secret + // InstanceID: "5af62d5d-5d90-4b84-bbcd-90d2123ae6c8", + Verbose: kp.VerboseFailOnly, + } + + } else { + kmsOptions = kp.ClientConfig{ + BaseURL: envFallBack([]string{"IBMCLOUD_KP_API_ENDPOINT"}, kmsurl), + Authorization: sess.BluemixSession.Config.IAMAccessToken, + // InstanceID: "5af62d5d-5d90-4b84-bbcd-90d2123ae6c8", + Verbose: kp.VerboseFailOnly, + } + } + kmsAPIclient, err := kp.New(kmsOptions, DefaultTransport()) + if err != nil { + session.kmsErr = fmt.Errorf("Error occured while configuring key Service: %q", err) + } + session.kmsAPI = kmsAPIclient + + var authenticator core.Authenticator + + if c.BluemixAPIKey != "" { + authenticator = &core.IamAuthenticator{ + ApiKey: c.BluemixAPIKey, + URL: envFallBack([]string{"IBMCLOUD_IAM_API_ENDPOINT"}, "https://iam.cloud.ibm.com") + "/identity/token", + } + } else if strings.HasPrefix(sess.BluemixSession.Config.IAMAccessToken, "Bearer") { + authenticator = &core.BearerTokenAuthenticator{ + BearerToken: sess.BluemixSession.Config.IAMAccessToken[7:], + } + } else { + authenticator = &core.BearerTokenAuthenticator{ + BearerToken: sess.BluemixSession.Config.IAMAccessToken, + } + } + + // Construct an "options" struct for creating the service client. + catalogManagementURL := "https://cm.globalcatalog.cloud.ibm.com/api/v1-beta" + if c.Visibility == "private" { + session.catalogManagementClientErr = fmt.Errorf("Catalog Management resource doesnot support private endpoints") + } + catalogManagementClientOptions := &catalogmanagementv1.CatalogManagementV1Options{ + URL: envFallBack([]string{"IBMCLOUD_CATALOG_MANAGEMENT_API_ENDPOINT"}, catalogManagementURL), + Authenticator: authenticator, + } + + // Construct the service client. + session.catalogManagementClient, err = catalogmanagementv1.NewCatalogManagementV1(catalogManagementClientOptions) + if err == nil { + // Enable retries for API calls + session.catalogManagementClient.Service.EnableRetries(c.RetryCount, c.RetryDelay) + // Add custom header for analytics + session.catalogManagementClient.SetDefaultHeaders(gohttp.Header{ + "X-Original-User-Agent": {fmt.Sprintf("terraform-provider-ibm/%s", version.Version)}, + }) + } else { + session.catalogManagementClientErr = fmt.Errorf("Error occurred while configuring Catalog Management API service: %q", err) + } + schematicsEndpoint := "https://schematics.cloud.ibm.com" + if c.Visibility == "private" || c.Visibility == "public-and-private" { + if c.Region == "us-south" || c.Region == "us-east" { + schematicsEndpoint = contructEndpoint("private-us.schematics", cloudEndpoint) + } else if c.Region == "eu-gb" || c.Region == "eu-de" { + schematicsEndpoint = contructEndpoint("private-eu.schematics", cloudEndpoint) + } else { + schematicsEndpoint = "https://schematics.cloud.ibm.com" + } + } + schematicsClientOptions := &schematicsv1.SchematicsV1Options{ + Authenticator: authenticator, + URL: envFallBack([]string{"IBMCLOUD_SCHEMATICS_API_ENDPOINT"}, schematicsEndpoint), + } + + // Construct the service client. + schematicsClient, err := schematicsv1.NewSchematicsV1(schematicsClientOptions) + // Enable retries for API calls + if schematicsClient != nil && schematicsClient.Service != nil { + schematicsClient.Service.EnableRetries(c.RetryCount, c.RetryDelay) + if err != nil { + session.schematicsClientErr = fmt.Errorf("Error occurred while configuring Schematics Service API service: %q", err) + } + } + session.schematicsClient = schematicsClient + + vpcclassicurl := contructEndpoint(fmt.Sprintf("%s.iaas", c.Region), fmt.Sprintf("%s/v1", cloudEndpoint)) + if c.Visibility == "private" { + if c.Region == "us-south" || c.Region == "us-east" { + vpcclassicurl = contructEndpoint(fmt.Sprintf("%s.private.iaas", c.Region), fmt.Sprintf("%s/v1", cloudEndpoint)) + } else { + session.vpcClassicErr = fmt.Errorf("VPC Classic supports private endpoints only in us-south and us-east") + } + } + if c.Visibility == "public-and-private" { + if c.Region == "us-south" || c.Region == "us-east" { + vpcclassicurl = contructEndpoint(fmt.Sprintf("%s.private.iaas", c.Region), fmt.Sprintf("%s/v1", cloudEndpoint)) + } else { + vpcclassicurl = contructEndpoint(fmt.Sprintf("%s.iaas", c.Region), fmt.Sprintf("%s/v1", cloudEndpoint)) + } + } + vpcclassicoptions := &vpcclassic.VpcClassicV1Options{ + URL: envFallBack([]string{"IBMCLOUD_IS_API_ENDPOINT"}, vpcclassicurl), + Authenticator: authenticator, + } + vpcclassicclient, err := vpcclassic.NewVpcClassicV1(vpcclassicoptions) + if err != nil { + session.vpcErr = fmt.Errorf("Error occured while configuring vpc classic service: %q", err) + } + if vpcclassicclient != nil && vpcclassicclient.Service != nil { + vpcclassicclient.Service.EnableRetries(c.RetryCount, c.RetryDelay) + } + + session.vpcClassicAPI = vpcclassicclient + + vpcurl := contructEndpoint(fmt.Sprintf("%s.iaas", c.Region), fmt.Sprintf("%s/v1", cloudEndpoint)) + if c.Visibility == "private" { + if c.Region == "us-south" || c.Region == "us-east" { + vpcurl = contructEndpoint(fmt.Sprintf("%s.private.iaas", c.Region), fmt.Sprintf("%s/v1", cloudEndpoint)) + } else { + session.vpcErr = fmt.Errorf("VPC supports private endpoints only in us-south and us-east") + } + } + if c.Visibility == "public-and-private" { + if c.Region == "us-south" || c.Region == "us-east" { + vpcurl = contructEndpoint(fmt.Sprintf("%s.private.iaas", c.Region), fmt.Sprintf("%s/v1", cloudEndpoint)) + } + vpcurl = contructEndpoint(fmt.Sprintf("%s.iaas", c.Region), fmt.Sprintf("%s/v1", cloudEndpoint)) + } + vpcoptions := &vpc.VpcV1Options{ + URL: envFallBack([]string{"IBMCLOUD_IS_NG_API_ENDPOINT"}, vpcurl), + Authenticator: authenticator, + } + vpcclient, err := vpc.NewVpcV1(vpcoptions) + if err != nil { + session.vpcErr = fmt.Errorf("Error occured while configuring vpc service: %q", err) + } + if vpcclient != nil && vpcclient.Service != nil { + vpcclient.Service.EnableRetries(c.RetryCount, c.RetryDelay) + } + session.vpcAPI = vpcclient + + pnurl := fmt.Sprintf("https://%s.imfpush.cloud.ibm.com/imfpush/v1", c.Region) + if c.Visibility == "private" { + session.pushServiceClientErr = fmt.Errorf("Push Notifications Service API doesnot support private endpoints") + } + pushNotificationOptions := &pushservicev1.PushServiceV1Options{ + URL: envFallBack([]string{"IBMCLOUD_PUSH_API_ENDPOINT"}, pnurl), + Authenticator: authenticator, + } + pnclient, err := pushservicev1.NewPushServiceV1(pushNotificationOptions) + if pnclient != nil { + // Enable retries for API calls + pnclient.EnableRetries(c.RetryCount, c.RetryDelay) + session.pushServiceClient = pnclient + } else { + session.pushServiceClientErr = fmt.Errorf("Error occured while configuring push notification service: %q", err) + } + if c.Visibility == "private" { + session.appConfigurationClientErr = fmt.Errorf("App Configuration Service API doesnot support private endpoints") + } + appConfigurationClientOptions := &appconfigurationv1.AppConfigurationV1Options{ + Authenticator: authenticator, + } + appConfigClient, err := appconfigurationv1.NewAppConfigurationV1(appConfigurationClientOptions) + if appConfigClient != nil { + // Enable retries for API calls + appConfigClient.Service.EnableRetries(c.RetryCount, c.RetryDelay) + session.appConfigurationClient = appConfigClient + } else { + session.appConfigurationClientErr = fmt.Errorf("Error occurred while configuring App Configuration service: %q", err) + } + // Construct an "options" struct for creating the service client. + containerRegistryClientURL, err := containerregistryv1.GetServiceURLForRegion(c.Region) + if err != nil { + containerRegistryClientURL = containerregistryv1.DefaultServiceURL + } + if c.Visibility == "private" || c.Visibility == "public-and-private" { + containerRegistryClientURL, err = GetPrivateServiceURLForRegion(c.Region) + if err != nil { + containerRegistryClientURL, _ = GetPrivateServiceURLForRegion("us-south") + } + } + containerRegistryClientOptions := &containerregistryv1.ContainerRegistryV1Options{ + Authenticator: authenticator, + URL: envFallBack([]string{"IBMCLOUD_CR_API_ENDPOINT"}, containerRegistryClientURL), + Account: core.StringPtr(userConfig.userAccount), + } + + // Construct the service client. + session.containerRegistryClient, err = containerregistryv1.NewContainerRegistryV1(containerRegistryClientOptions) + if err == nil { + // Enable retries for API calls + session.containerRegistryClient.Service.EnableRetries(c.RetryCount, c.RetryDelay) + // Add custom header for analytics + session.containerRegistryClient.SetDefaultHeaders(gohttp.Header{ + "X-Original-User-Agent": {fmt.Sprintf("terraform-provider-ibm/%s", version.Version)}, + }) + } else { + session.containerRegistryClientErr = fmt.Errorf("Error occurred while configuring IBM Cloud Container Registry API service: %q", err) + } + + //cosconfigurl := fmt.Sprintf("https://%s.iaas.cloud.ibm.com/v1", c.Region) + cosconfigoptions := &cosconfig.ResourceConfigurationV1Options{ + Authenticator: authenticator, + URL: envFallBack([]string{"IBMCLOUD_COS_CONFIG_ENDPOINT"}, "https://config.cloud-object-storage.cloud.ibm.com/v1"), + } + cosconfigclient, err := cosconfig.NewResourceConfigurationV1(cosconfigoptions) + if err != nil { + session.cosConfigErr = fmt.Errorf("Error occured while configuring COS config service: %q", err) + } + session.cosConfigAPI = cosconfigclient + + cisAPI, err := cisv1.New(sess.BluemixSession) + if err != nil { + session.cisConfigErr = fmt.Errorf("Error occured while configuring Cloud Internet Services: %q", err) + } + session.cisServiceAPI = cisAPI + + globalSearchAPI, err := globalsearchv2.New(sess.BluemixSession) + if err != nil { + session.globalSearchConfigErr = fmt.Errorf("Error occured while configuring Global Search: %q", err) + } + session.globalSearchServiceAPI = globalSearchAPI + + globalTaggingAPI, err := globaltaggingv3.New(sess.BluemixSession) + if err != nil { + session.globalTaggingConfigErr = fmt.Errorf("Error occured while configuring Global Tagging: %q", err) + } + session.globalTaggingServiceAPI = globalTaggingAPI + + globalTaggingEndpoint := "https://tags.global-search-tagging.cloud.ibm.com" + if c.Visibility == "private" || c.Visibility == "public-and-private" { + var globalTaggingRegion string + if c.Region != "us-south" && c.Region != "us-east" { + globalTaggingRegion = "us-south" + } else { + globalTaggingRegion = c.Region + } + globalTaggingEndpoint = contructEndpoint(fmt.Sprintf("tags.private.%s", globalTaggingRegion), fmt.Sprintf("global-search-tagging.%s", cloudEndpoint)) + } + + globalTaggingV1Options := &globaltaggingv1.GlobalTaggingV1Options{ + URL: envFallBack([]string{"IBMCLOUD_GT_API_ENDPOINT"}, globalTaggingEndpoint), + Authenticator: authenticator, + } + + globalTaggingAPIV1, err := globaltaggingv1.NewGlobalTaggingV1(globalTaggingV1Options) + if err != nil { + session.globalTaggingConfigErrV1 = fmt.Errorf("Error occured while configuring Global Tagging: %q", err) + } + if globalTaggingAPIV1 != nil { + session.globalTaggingServiceAPIV1 = *globalTaggingAPIV1 + session.globalTaggingServiceAPIV1.Service.EnableRetries(c.RetryCount, c.RetryDelay) + } + + iam, err := iamv1.New(sess.BluemixSession) + if err != nil { + session.iamConfigErr = fmt.Errorf("Error occured while configuring Bluemix IAM Service: %q", err) + } + session.iamServiceAPI = iam + + iamuumv2, err := iamuumv2.New(sess.BluemixSession) + if err != nil { + session.iamUUMConfigErrV2 = fmt.Errorf("Error occured while configuring Bluemix IAMUUM Service: %q", err) + } + session.iamUUMServiceAPIV2 = iamuumv2 + + icdAPI, err := icdv4.New(sess.BluemixSession) + if err != nil { + session.icdConfigErr = fmt.Errorf("Error occured while configuring IBM Cloud Database Services: %q", err) + } + session.icdServiceAPI = icdAPI + + resourceCatalogAPI, err := catalog.New(sess.BluemixSession) + if err != nil { + session.resourceCatalogConfigErr = fmt.Errorf("Error occured while configuring Resource Catalog service: %q", err) + } + session.resourceCatalogServiceAPI = resourceCatalogAPI + + resourceManagementAPIv2, err := managementv2.New(sess.BluemixSession) + if err != nil { + session.resourceManagementConfigErrv2 = fmt.Errorf("Error occured while configuring Resource Management service: %q", err) + } + session.resourceManagementServiceAPIv2 = resourceManagementAPIv2 + + resourceControllerAPI, err := controller.New(sess.BluemixSession) + if err != nil { + session.resourceControllerConfigErr = fmt.Errorf("Error occured while configuring Resource Controller service: %q", err) + } + session.resourceControllerServiceAPI = resourceControllerAPI + + ResourceControllerAPIv2, err := controllerv2.New(sess.BluemixSession) + if err != nil { + session.resourceControllerConfigErrv2 = fmt.Errorf("Error occured while configuring Resource Controller v2 service: %q", err) + } + session.resourceControllerServiceAPIv2 = ResourceControllerAPIv2 + + userManagementAPI, err := usermanagementv2.New(sess.BluemixSession) + if err != nil { + session.userManagementErr = fmt.Errorf("Error occured while configuring user management service: %q", err) + } + session.userManagementAPI = userManagementAPI + certManagementAPI, err := certificatemanager.New(sess.BluemixSession) + if err != nil { + session.certManagementErr = fmt.Errorf("Error occured while configuring Certificate manager service: %q", err) + } + session.certManagementAPI = certManagementAPI + + namespaceFunction, err := functions.New(sess.BluemixSession) + if err != nil { + session.functionIAMNamespaceErr = fmt.Errorf("Error occured while configuring Cloud Funciton Service : %q", err) + } + session.functionIAMNamespaceAPI = namespaceFunction + + apicurl := contructEndpoint(fmt.Sprintf("api.%s.apigw", c.Region), fmt.Sprintf("%s/controller", cloudEndpoint)) + if c.Visibility == "private" || c.Visibility == "public-and-private" { + apicurl = contructEndpoint(fmt.Sprintf("api.private.%s.apigw", c.Region), fmt.Sprintf("%s/controller", cloudEndpoint)) + } + APIGatewayControllerAPIV1Options := &apigateway.ApiGatewayControllerApiV1Options{ + URL: envFallBack([]string{"IBMCLOUD_API_GATEWAY_ENDPOINT"}, apicurl), + Authenticator: &core.NoAuthAuthenticator{}, + } + apigatewayAPI, err := apigateway.NewApiGatewayControllerApiV1(APIGatewayControllerAPIV1Options) + if err != nil { + session.apigatewayErr = fmt.Errorf("Error occured while configuring APIGateway service: %q", err) + } + session.apigatewayAPI = apigatewayAPI + + ibmpisession, err := ibmpisession.New(sess.BluemixSession.Config.IAMAccessToken, c.Region, false, 90000000000, session.bmxUserDetails.userAccount, c.Zone) + if err != nil { + session.ibmpiConfigErr = err + return nil, err + } + + session.ibmpiSession = ibmpisession + + pdnsURL := dns.DefaultServiceURL + if c.Visibility == "private" || c.Visibility == "public-and-private" { + pdnsURL = contructEndpoint("api.private.dns-svcs", fmt.Sprintf("%s/v1", cloudEndpoint)) + } + dnsOptions := &dns.DnsSvcsV1Options{ + URL: envFallBack([]string{"IBMCLOUD_PRIVATE_DNS_API_ENDPOINT"}, pdnsURL), + Authenticator: authenticator, + } + + session.pDNSClient, session.pDNSErr = dns.NewDnsSvcsV1(dnsOptions) + if session.pDNSErr != nil { + session.pDNSErr = fmt.Errorf("Error occured while configuring PrivateDNS Service: %s", session.pDNSErr) + } + if session.pDNSClient != nil && session.pDNSClient.Service != nil { + session.pDNSClient.Service.EnableRetries(c.RetryCount, c.RetryDelay) + } + + ver := time.Now().Format("2006-01-02") + + dlURL := dl.DefaultServiceURL + if c.Visibility == "private" || c.Visibility == "public-and-private" { + dlURL = contructEndpoint("private.directlink", fmt.Sprintf("%s/v1", cloudEndpoint)) + } + directlinkOptions := &dl.DirectLinkV1Options{ + URL: envFallBack([]string{"IBMCLOUD_DL_API_ENDPOINT"}, dlURL), + Authenticator: authenticator, + Version: &ver, + } + + session.directlinkAPI, session.directlinkErr = dl.NewDirectLinkV1(directlinkOptions) + if session.directlinkErr != nil { + session.directlinkErr = fmt.Errorf("Error occured while configuring Direct Link Service: %s", session.directlinkErr) + } + if session.directlinkAPI != nil && session.directlinkAPI.Service != nil { + session.directlinkAPI.Service.EnableRetries(c.RetryCount, c.RetryDelay) + } + + //Direct link provider + dlproviderURL := dlProviderV2.DefaultServiceURL + if c.Visibility == "private" || c.Visibility == "public-and-private" { + dlproviderURL = contructEndpoint("private.directlink", fmt.Sprintf("%s/provider/v2", cloudEndpoint)) + } + directLinkProviderV2Options := &dlProviderV2.DirectLinkProviderV2Options{ + URL: envFallBack([]string{"IBMCLOUD_DL_PROVIDER_API_ENDPOINT"}, dlproviderURL), + Authenticator: authenticator, + Version: &ver, + } + + session.dlProviderAPI, session.dlProviderErr = dlProviderV2.NewDirectLinkProviderV2(directLinkProviderV2Options) + if session.dlProviderErr != nil { + session.dlProviderErr = fmt.Errorf("Error occured while configuring Direct Link Provider Service: %s", session.dlProviderErr) + } + if session.dlProviderAPI != nil && session.dlProviderAPI.Service != nil { + session.dlProviderAPI.Service.EnableRetries(c.RetryCount, c.RetryDelay) + } + + tgURL := tg.DefaultServiceURL + if c.Visibility == "private" || c.Visibility == "public-and-private" { + tgURL = contructEndpoint("private.transit", fmt.Sprintf("%s/v1", cloudEndpoint)) + } + transitgatewayOptions := &tg.TransitGatewayApisV1Options{ + URL: envFallBack([]string{"IBMCLOUD_TG_API_ENDPOINT"}, tgURL), + Authenticator: authenticator, + Version: CreateVersionDate(), + } + + session.transitgatewayAPI, session.transitgatewayErr = tg.NewTransitGatewayApisV1(transitgatewayOptions) + if session.transitgatewayErr != nil { + session.transitgatewayErr = fmt.Errorf("Error occured while configuring Transit Gateway Service: %s", session.transitgatewayErr) + } + if session.transitgatewayAPI != nil && session.transitgatewayAPI.Service != nil { + session.transitgatewayAPI.Service.EnableRetries(c.RetryCount, c.RetryDelay) + } + + // CIS Service instances starts here. + cisURL := contructEndpoint("api.cis", cloudEndpoint) + if c.Visibility == "private" { + // cisURL = contructEndpoint("api.private.cis", cloudEndpoint) + session.cisZonesErr = fmt.Errorf("CIS Service doesnt support private endpoints.") + session.cisDNSBulkErr = fmt.Errorf("CIS Service doesnt support private endpoints.") + session.cisGLBPoolErr = fmt.Errorf("CIS Service doesnt support private endpoints.") + session.cisGLBErr = fmt.Errorf("CIS Service doesnt support private endpoints.") + session.cisGLBHealthCheckErr = fmt.Errorf("CIS Service doesnt support private endpoints.") + session.cisIPErr = fmt.Errorf("CIS Service doesnt support private endpoints.") + session.cisRLErr = fmt.Errorf("CIS Service doesnt support private endpoints.") + session.cisPageRuleErr = fmt.Errorf("CIS Service doesnt support private endpoints.") + session.cisEdgeFunctionErr = fmt.Errorf("CIS Service doesnt support private endpoints.") + session.cisSSLErr = fmt.Errorf("CIS Service doesnt support private endpoints.") + session.cisWAFPackageErr = fmt.Errorf("CIS Service doesnt support private endpoints.") + session.cisDomainSettingsErr = fmt.Errorf("CIS Service doesnt support private endpoints.") + session.cisRoutingErr = fmt.Errorf("CIS Service doesnt support private endpoints.") + session.cisWAFGroupErr = fmt.Errorf("CIS Service doesnt support private endpoints.") + session.cisCacheErr = fmt.Errorf("CIS Service doesnt support private endpoints.") + session.cisCustomPageErr = fmt.Errorf("CIS Service doesnt support private endpoints.") + session.cisAccessRuleErr = fmt.Errorf("CIS Service doesnt support private endpoints.") + session.cisUARuleErr = fmt.Errorf("CIS Service doesnt support private endpoints.") + session.cisLockdownErr = fmt.Errorf("CIS Service doesnt support private endpoints.") + session.cisRangeAppErr = fmt.Errorf("CIS Service doesnt support private endpoints.") + session.cisWAFRuleErr = fmt.Errorf("CIS Service doesnt support private endpoints.") + } + cisEndPoint := envFallBack([]string{"IBMCLOUD_CIS_API_ENDPOINT"}, cisURL) + + // IBM Network CIS Zones service + cisZonesV1Opt := &ciszonesv1.ZonesV1Options{ + URL: cisEndPoint, + Crn: core.StringPtr(""), + Authenticator: authenticator, + } + session.cisZonesV1Client, session.cisZonesErr = ciszonesv1.NewZonesV1(cisZonesV1Opt) + if session.cisZonesErr != nil { + session.cisZonesErr = fmt.Errorf( + "Error occured while configuring CIS Zones service: %s", + session.cisZonesErr) + } + if session.cisZonesV1Client != nil && session.cisZonesV1Client.Service != nil { + session.cisZonesV1Client.Service.EnableRetries(c.RetryCount, c.RetryDelay) + } + + // IBM Network CIS DNS Record service + cisDNSRecordsOpt := &cisdnsrecordsv1.DnsRecordsV1Options{ + URL: cisEndPoint, + Crn: core.StringPtr(""), + ZoneIdentifier: core.StringPtr(""), + Authenticator: authenticator, + } + session.cisDNSRecordsClient, session.cisDNSErr = cisdnsrecordsv1.NewDnsRecordsV1(cisDNSRecordsOpt) + if session.cisDNSErr != nil { + session.cisDNSErr = fmt.Errorf("Error occured while configuring CIS DNS Service: %s", session.cisDNSErr) + } + if session.cisDNSRecordsClient != nil && session.cisDNSRecordsClient.Service != nil { + session.cisDNSRecordsClient.Service.EnableRetries(c.RetryCount, c.RetryDelay) + } + + // IBM Network CIS DNS Record bulk service + cisDNSRecordBulkOpt := &cisdnsbulkv1.DnsRecordBulkV1Options{ + URL: cisEndPoint, + Crn: core.StringPtr(""), + ZoneIdentifier: core.StringPtr(""), + Authenticator: authenticator, + } + session.cisDNSRecordBulkClient, session.cisDNSBulkErr = cisdnsbulkv1.NewDnsRecordBulkV1(cisDNSRecordBulkOpt) + if session.cisDNSBulkErr != nil { + session.cisDNSBulkErr = fmt.Errorf( + "Error occured while configuration CIS DNS bulk service : %s", + session.cisDNSBulkErr) + } + if session.cisDNSRecordBulkClient != nil && session.cisDNSRecordBulkClient.Service != nil { + session.cisDNSRecordBulkClient.Service.EnableRetries(c.RetryCount, c.RetryDelay) + } + + // IBM Network CIS Global load balancer pool + cisGLBPoolOpt := &cisglbpoolv0.GlobalLoadBalancerPoolsV0Options{ + URL: cisEndPoint, + Crn: core.StringPtr(""), + Authenticator: authenticator, + } + session.cisGLBPoolClient, session.cisGLBPoolErr = + cisglbpoolv0.NewGlobalLoadBalancerPoolsV0(cisGLBPoolOpt) + if session.cisGLBPoolErr != nil { + session.cisGLBPoolErr = + fmt.Errorf("Error occured while configuring CIS GLB Pool service: %s", + session.cisGLBPoolErr) + } + if session.cisGLBPoolClient != nil && session.cisGLBPoolClient.Service != nil { + session.cisGLBPoolClient.Service.EnableRetries(c.RetryCount, c.RetryDelay) + } + + // IBM Network CIS Global load balancer + cisGLBOpt := &cisglbv1.GlobalLoadBalancerV1Options{ + URL: cisEndPoint, + Authenticator: authenticator, + Crn: core.StringPtr(""), + ZoneIdentifier: core.StringPtr(""), + } + session.cisGLBClient, session.cisGLBErr = cisglbv1.NewGlobalLoadBalancerV1(cisGLBOpt) + if session.cisGLBErr != nil { + session.cisGLBErr = + fmt.Errorf("Error occured while configuring CIS GLB service: %s", + session.cisGLBErr) + } + if session.cisGLBClient != nil && session.cisGLBClient.Service != nil { + session.cisGLBClient.Service.EnableRetries(c.RetryCount, c.RetryDelay) + } + + // IBM Network CIS Global load balancer health check/monitor + cisGLBHealthCheckOpt := &cisglbhealthcheckv1.GlobalLoadBalancerMonitorV1Options{ + URL: cisEndPoint, + Crn: core.StringPtr(""), + Authenticator: authenticator, + } + session.cisGLBHealthCheckClient, session.cisGLBHealthCheckErr = + cisglbhealthcheckv1.NewGlobalLoadBalancerMonitorV1(cisGLBHealthCheckOpt) + if session.cisGLBHealthCheckErr != nil { + session.cisGLBHealthCheckErr = + fmt.Errorf("Error occured while configuring CIS GLB Health Check service: %s", + session.cisGLBHealthCheckErr) + } + if session.cisGLBHealthCheckClient != nil && session.cisGLBHealthCheckClient.Service != nil { + session.cisGLBHealthCheckClient.Service.EnableRetries(c.RetryCount, c.RetryDelay) + } + + // IBM Network CIS IP + cisIPOpt := &cisipv1.CisIpApiV1Options{ + URL: cisEndPoint, + Authenticator: authenticator, + } + session.cisIPClient, session.cisIPErr = cisipv1.NewCisIpApiV1(cisIPOpt) + if session.cisIPErr != nil { + session.cisIPErr = fmt.Errorf("Error occured while configuring CIS IP service: %s", + session.cisIPErr) + } + if session.cisIPClient != nil && session.cisIPClient.Service != nil { + session.cisIPClient.Service.EnableRetries(c.RetryCount, c.RetryDelay) + } + + // IBM Network CIS Zone Rate Limit + cisRLOpt := &cisratelimitv1.ZoneRateLimitsV1Options{ + URL: cisEndPoint, + Crn: core.StringPtr(""), + ZoneIdentifier: core.StringPtr(""), + Authenticator: authenticator, + } + session.cisRLClient, session.cisRLErr = cisratelimitv1.NewZoneRateLimitsV1(cisRLOpt) + if session.cisRLErr != nil { + session.cisRLErr = fmt.Errorf( + "Error occured while cofiguring CIS Zone Rate Limit service: %s", + session.cisRLErr) + } + if session.cisRLClient != nil && session.cisRLClient.Service != nil { + session.cisRLClient.Service.EnableRetries(c.RetryCount, c.RetryDelay) + } + + // IBM Network CIS Page Rules + cisPageRuleOpt := &cispagerulev1.PageRuleApiV1Options{ + URL: cisEndPoint, + Crn: core.StringPtr(""), + ZoneID: core.StringPtr(""), + Authenticator: authenticator, + } + session.cisPageRuleClient, session.cisPageRuleErr = cispagerulev1.NewPageRuleApiV1(cisPageRuleOpt) + if session.cisPageRuleErr != nil { + session.cisPageRuleErr = fmt.Errorf( + "Error occured while cofiguring CIS Page Rule service: %s", + session.cisPageRuleErr) + } + if session.cisPageRuleClient != nil && session.cisPageRuleClient.Service != nil { + session.cisPageRuleClient.Service.EnableRetries(c.RetryCount, c.RetryDelay) + } + + // IBM Network CIS Edge Function + cisEdgeFunctionOpt := &cisedgefunctionv1.EdgeFunctionsApiV1Options{ + URL: cisEndPoint, + Crn: core.StringPtr(""), + ZoneIdentifier: core.StringPtr(""), + Authenticator: authenticator, + } + session.cisEdgeFunctionClient, session.cisEdgeFunctionErr = + cisedgefunctionv1.NewEdgeFunctionsApiV1(cisEdgeFunctionOpt) + if session.cisEdgeFunctionErr != nil { + session.cisEdgeFunctionErr = + fmt.Errorf("Error occured while configuring CIS Edge Function service: %s", + session.cisEdgeFunctionErr) + } + if session.cisEdgeFunctionClient != nil && session.cisEdgeFunctionClient.Service != nil { + session.cisEdgeFunctionClient.Service.EnableRetries(c.RetryCount, c.RetryDelay) + } + + // IBM Network CIS SSL certificate + cisSSLOpt := &cissslv1.SslCertificateApiV1Options{ + URL: cisEndPoint, + Crn: core.StringPtr(""), + ZoneIdentifier: core.StringPtr(""), + Authenticator: authenticator, + } + + session.cisSSLClient, session.cisSSLErr = cissslv1.NewSslCertificateApiV1(cisSSLOpt) + if session.cisSSLErr != nil { + session.cisSSLErr = + fmt.Errorf("Error occured while configuring CIS SSL certificate service: %s", + session.cisSSLErr) + } + if session.cisSSLClient != nil && session.cisSSLClient.Service != nil { + session.cisSSLClient.Service.EnableRetries(c.RetryCount, c.RetryDelay) + } + + // IBM Network CIS WAF Package + cisWAFPackageOpt := &ciswafpackagev1.WafRulePackagesApiV1Options{ + URL: cisEndPoint, + Crn: core.StringPtr(""), + ZoneID: core.StringPtr(""), + Authenticator: authenticator, + } + session.cisWAFPackageClient, session.cisWAFPackageErr = + ciswafpackagev1.NewWafRulePackagesApiV1(cisWAFPackageOpt) + if session.cisWAFPackageErr != nil { + session.cisWAFPackageErr = + fmt.Errorf("Error occured while configuration CIS WAF Package service: %s", + session.cisWAFPackageErr) + } + if session.cisWAFPackageClient != nil && session.cisWAFPackageClient.Service != nil { + session.cisWAFPackageClient.Service.EnableRetries(c.RetryCount, c.RetryDelay) + } + + // IBM Network CIS Domain settings + cisDomainSettingsOpt := &cisdomainsettingsv1.ZonesSettingsV1Options{ + URL: cisEndPoint, + Crn: core.StringPtr(""), + ZoneIdentifier: core.StringPtr(""), + Authenticator: authenticator, + } + session.cisDomainSettingsClient, session.cisDomainSettingsErr = + cisdomainsettingsv1.NewZonesSettingsV1(cisDomainSettingsOpt) + if session.cisDomainSettingsErr != nil { + session.cisDomainSettingsErr = + fmt.Errorf("Error occured while configuring CIS Domain Settings service: %s", + session.cisDomainSettingsErr) + } + if session.cisDomainSettingsClient != nil && session.cisDomainSettingsClient.Service != nil { + session.cisDomainSettingsClient.Service.EnableRetries(c.RetryCount, c.RetryDelay) + } + + // IBM Network CIS Routing + cisRoutingOpt := &cisroutingv1.RoutingV1Options{ + URL: cisEndPoint, + Crn: core.StringPtr(""), + ZoneIdentifier: core.StringPtr(""), + Authenticator: authenticator, + } + session.cisRoutingClient, session.cisRoutingErr = + cisroutingv1.NewRoutingV1(cisRoutingOpt) + if session.cisRoutingErr != nil { + session.cisRoutingErr = + fmt.Errorf("Error occured while configuring CIS Routing service: %s", + session.cisRoutingErr) + } + if session.cisRoutingClient != nil && session.cisRoutingClient.Service != nil { + session.cisRoutingClient.Service.EnableRetries(c.RetryCount, c.RetryDelay) + } + + // IBM Network CIS WAF Group + cisWAFGroupOpt := &ciswafgroupv1.WafRuleGroupsApiV1Options{ + URL: cisEndPoint, + Crn: core.StringPtr(""), + ZoneID: core.StringPtr(""), + Authenticator: authenticator, + } + session.cisWAFGroupClient, session.cisWAFGroupErr = + ciswafgroupv1.NewWafRuleGroupsApiV1(cisWAFGroupOpt) + if session.cisWAFGroupErr != nil { + session.cisWAFGroupErr = + fmt.Errorf("Error occured while configuring CIS WAF Group service: %s", + session.cisWAFGroupErr) + } + if session.cisWAFGroupClient != nil && session.cisWAFGroupClient.Service != nil { + session.cisWAFGroupClient.Service.EnableRetries(c.RetryCount, c.RetryDelay) + } + + // IBM Network CIS Cache service + cisCacheOpt := &ciscachev1.CachingApiV1Options{ + URL: cisEndPoint, + Crn: core.StringPtr(""), + ZoneID: core.StringPtr(""), + Authenticator: authenticator, + } + session.cisCacheClient, session.cisCacheErr = + ciscachev1.NewCachingApiV1(cisCacheOpt) + if session.cisCacheErr != nil { + session.cisCacheErr = + fmt.Errorf("Error occured while configuring CIS Caching service: %s", + session.cisCacheErr) + } + if session.cisCacheClient != nil && session.cisCacheClient.Service != nil { + session.cisCacheClient.Service.EnableRetries(c.RetryCount, c.RetryDelay) + } + + // IBM Network CIS Custom pages service + cisCustomPageOpt := &ciscustompagev1.CustomPagesV1Options{ + URL: cisEndPoint, + Crn: core.StringPtr(""), + ZoneIdentifier: core.StringPtr(""), + Authenticator: authenticator, + } + + session.cisCustomPageClient, session.cisCustomPageErr = + ciscustompagev1.NewCustomPagesV1(cisCustomPageOpt) + if session.cisCustomPageErr != nil { + session.cisCustomPageErr = + fmt.Errorf("Error occured while configuring CIS Custom Pages service: %s", + session.cisCustomPageErr) + } + if session.cisCustomPageClient != nil && session.cisCustomPageClient.Service != nil { + session.cisCustomPageClient.Service.EnableRetries(c.RetryCount, c.RetryDelay) + } + + // IBM Network CIS Firewall Access rule + cisAccessRuleOpt := &cisaccessrulev1.ZoneFirewallAccessRulesV1Options{ + URL: cisEndPoint, + Crn: core.StringPtr(""), + ZoneIdentifier: core.StringPtr(""), + Authenticator: authenticator, + } + session.cisAccessRuleClient, session.cisAccessRuleErr = + cisaccessrulev1.NewZoneFirewallAccessRulesV1(cisAccessRuleOpt) + if session.cisAccessRuleErr != nil { + session.cisAccessRuleErr = + fmt.Errorf("Error occured while configuring CIS Firewall Access Rule service: %s", + session.cisAccessRuleErr) + } + if session.cisAccessRuleClient != nil && session.cisAccessRuleClient.Service != nil { + session.cisAccessRuleClient.Service.EnableRetries(c.RetryCount, c.RetryDelay) + } + + // IBM Network CIS Firewall User Agent Blocking rule + cisUARuleOpt := &cisuarulev1.UserAgentBlockingRulesV1Options{ + URL: cisEndPoint, + Crn: core.StringPtr(""), + ZoneIdentifier: core.StringPtr(""), + Authenticator: authenticator, + } + session.cisUARuleClient, session.cisUARuleErr = + cisuarulev1.NewUserAgentBlockingRulesV1(cisUARuleOpt) + if session.cisUARuleErr != nil { + session.cisUARuleErr = + fmt.Errorf("Error occured while configuring CIS Firewall User Agent Blocking Rule service: %s", + session.cisUARuleErr) + } + if session.cisUARuleClient != nil && session.cisUARuleClient.Service != nil { + session.cisUARuleClient.Service.EnableRetries(c.RetryCount, c.RetryDelay) + } + + // IBM Network CIS Firewall Lockdown rule + cisLockdownOpt := &cislockdownv1.ZoneLockdownV1Options{ + URL: cisEndPoint, + Crn: core.StringPtr(""), + ZoneIdentifier: core.StringPtr(""), + Authenticator: authenticator, + } + session.cisLockdownClient, session.cisLockdownErr = + cislockdownv1.NewZoneLockdownV1(cisLockdownOpt) + if session.cisLockdownErr != nil { + session.cisLockdownErr = + fmt.Errorf("Error occured while configuring CIS Firewall Lockdown Rule service: %s", + session.cisLockdownErr) + } + if session.cisLockdownClient != nil && session.cisLockdownClient.Service != nil { + session.cisLockdownClient.Service.EnableRetries(c.RetryCount, c.RetryDelay) + } + + // IBM Network CIS Range Application rule + cisRangeAppOpt := &cisrangeappv1.RangeApplicationsV1Options{ + URL: cisEndPoint, + Crn: core.StringPtr(""), + ZoneIdentifier: core.StringPtr(""), + Authenticator: authenticator, + } + session.cisRangeAppClient, session.cisRangeAppErr = + cisrangeappv1.NewRangeApplicationsV1(cisRangeAppOpt) + if session.cisRangeAppErr != nil { + session.cisRangeAppErr = + fmt.Errorf("Error occured while configuring CIS Range Application rule service: %s", + session.cisRangeAppErr) + } + if session.cisRangeAppClient != nil && session.cisRangeAppClient.Service != nil { + session.cisRangeAppClient.Service.EnableRetries(c.RetryCount, c.RetryDelay) + } + + // IBM Network CIS WAF Rule Service + cisWAFRuleOpt := &ciswafrulev1.WafRulesApiV1Options{ + URL: cisEndPoint, + Crn: core.StringPtr(""), + ZoneID: core.StringPtr(""), + Authenticator: authenticator, + } + session.cisWAFRuleClient, session.cisWAFRuleErr = + ciswafrulev1.NewWafRulesApiV1(cisWAFRuleOpt) + if session.cisWAFRuleErr != nil { + session.cisWAFRuleErr = fmt.Errorf( + "Error occured while configuring CIS WAF Rules service: %s", + session.cisWAFRuleErr) + } + if session.cisWAFRuleClient != nil && session.cisWAFRuleClient.Service != nil { + session.cisWAFRuleClient.Service.EnableRetries(c.RetryCount, c.RetryDelay) + } + + // iamIdenityURL := fmt.Sprintf("https://%s.iam.cloud.ibm.com/v1", c.Region) + iamURL := iamidentity.DefaultServiceURL + if c.Visibility == "private" || c.Visibility == "public-and-private" { + if c.Region == "us-south" || c.Region == "us-east" { + iamURL = contructEndpoint(fmt.Sprintf("private.%s.iam", c.Region), cloudEndpoint) + } else { + iamURL = contructEndpoint("private.iam", cloudEndpoint) + } + } + iamIdentityOptions := &iamidentity.IamIdentityV1Options{ + Authenticator: authenticator, + URL: envFallBack([]string{"IBMCLOUD_IAM_API_ENDPOINT"}, iamURL), + } + iamIdentityClient, err := iamidentity.NewIamIdentityV1(iamIdentityOptions) + if err != nil { + session.iamIdentityErr = fmt.Errorf("Error occured while configuring IAM Identity service: %q", err) + } + if iamIdentityClient != nil && iamIdentityClient.Service != nil { + iamIdentityClient.Service.EnableRetries(c.RetryCount, c.RetryDelay) + } + session.iamIdentityAPI = iamIdentityClient + + iamPolicyManagementURL := iampolicymanagement.DefaultServiceURL + if c.Visibility == "private" || c.Visibility == "public-and-private" { + if c.Region == "us-south" || c.Region == "us-east" { + iamPolicyManagementURL = contructEndpoint(fmt.Sprintf("private.%s.iam", c.Region), cloudEndpoint) + } else { + iamPolicyManagementURL = contructEndpoint("private.iam", cloudEndpoint) + } + } + iamPolicyManagementOptions := &iampolicymanagement.IamPolicyManagementV1Options{ + Authenticator: authenticator, + URL: envFallBack([]string{"IBMCLOUD_IAM_API_ENDPOINT"}, iamPolicyManagementURL), + } + iamPolicyManagementClient, err := iampolicymanagement.NewIamPolicyManagementV1(iamPolicyManagementOptions) + if err != nil { + session.iamPolicyManagementErr = fmt.Errorf("Error occured while configuring IAM Policy Management service: %q", err) + } + if iamPolicyManagementClient != nil && iamPolicyManagementClient.Service != nil { + iamPolicyManagementClient.Service.EnableRetries(c.RetryCount, c.RetryDelay) + } + session.iamPolicyManagementAPI = iamPolicyManagementClient + + rmURL := resourcemanager.DefaultServiceURL + if c.Visibility == "private" { + if c.Region == "us-south" || c.Region == "us-east" { + rmURL = contructEndpoint(fmt.Sprintf("private.%s.resource-controller", c.Region), fmt.Sprintf("%s/v2", cloudEndpoint)) + } else { + fmt.Println("Private Endpint supports only us-south and us-east region specific endpoint") + rmURL = contructEndpoint("private.us-south.resource-controller", fmt.Sprintf("%s/v2", cloudEndpoint)) + } + } + if c.Visibility == "public-and-private" { + if c.Region == "us-south" || c.Region == "us-east" { + rmURL = contructEndpoint(fmt.Sprintf("private.%s.resource-controller", c.Region), fmt.Sprintf("%s/v2", cloudEndpoint)) + } else { + rmURL = resourcemanager.DefaultServiceURL + } + } + resourceManagerOptions := &resourcemanager.ResourceManagerV2Options{ + Authenticator: authenticator, + URL: envFallBack([]string{"IBMCLOUD_RESOURCE_MANAGEMENT_API_ENDPOINT"}, rmURL), + } + resourceManagerClient, err := resourcemanager.NewResourceManagerV2(resourceManagerOptions) + if err != nil { + session.resourceManagerErr = fmt.Errorf("Error occured while configuring Resource Manager service: %q", err) + } + if resourceManagerClient != nil { + resourceManagerClient.EnableRetries(c.RetryCount, c.RetryDelay) + } + session.resourceManagerAPI = resourceManagerClient + + enterpriseURL := enterprisemanagementv1.DefaultServiceURL + if c.Visibility == "private" { + if c.Region == "us-south" || c.Region == "us-east" || c.Region == "eu-fr" { + enterpriseURL = contructEndpoint(fmt.Sprintf("private.%s.enterprise", c.Region), fmt.Sprintf("%s/v1", cloudEndpoint)) + } else { + fmt.Println("Private Endpint supports only us-south and us-east region specific endpoint") + enterpriseURL = contructEndpoint("private.us-south.enterprise", fmt.Sprintf("%s/v1", cloudEndpoint)) + } + } + if c.Visibility == "public-and-private" { + if c.Region == "us-south" || c.Region == "us-east" || c.Region == "eu-fr" { + enterpriseURL = contructEndpoint(fmt.Sprintf("private.%s.enterprise", c.Region), + fmt.Sprintf("%s/v1", cloudEndpoint)) + } else { + enterpriseURL = enterprisemanagementv1.DefaultServiceURL + } + } + enterpriseManagementClientOptions := &enterprisemanagementv1.EnterpriseManagementV1Options{ + Authenticator: authenticator, + URL: envFallBack([]string{"IBMCLOUD_ENTERPRISE_API_ENDPOINT"}, enterpriseURL), + } + enterpriseManagementClient, err := enterprisemanagementv1.NewEnterpriseManagementV1(enterpriseManagementClientOptions) + if err == nil { + enterpriseManagementClient.EnableRetries(c.RetryCount, c.RetryDelay) + } else { + session.enterpriseManagementClientErr = fmt.Errorf("Error occurred while configuring IBM Cloud Enterprise Management API service: %q", err) + } + session.enterpriseManagementClient = enterpriseManagementClient + + // resource controller API + rcURL := resourcecontroller.DefaultServiceURL + if c.Visibility == "private" { + if c.Region == "us-south" || c.Region == "us-east" { + rcURL = contructEndpoint(fmt.Sprintf("private.%s.resource-controller", c.Region), cloudEndpoint) + } else { + fmt.Println("Private Endpint supports only us-south and us-east region specific endpoint") + rcURL = contructEndpoint("private.us-south.resource-controller", cloudEndpoint) + } + } + if c.Visibility == "public-and-private" { + if c.Region == "us-south" || c.Region == "us-east" { + rcURL = contructEndpoint(fmt.Sprintf("private.%s.resource-controller", c.Region), cloudEndpoint) + } else { + rcURL = resourcecontroller.DefaultServiceURL + } + } + resourceControllerOptions := &resourcecontroller.ResourceControllerV2Options{ + Authenticator: authenticator, + URL: envFallBack([]string{"IBMCLOUD_RESOURCE_CONTROLLER_API_ENDPOINT"}, rcURL), + } + resourceControllerClient, err := resourcecontroller.NewResourceControllerV2(resourceControllerOptions) + if err != nil { + session.resourceControllerErr = fmt.Errorf("Error occured while configuring Resource Controller service: %q", err) + } + if resourceControllerClient != nil { + resourceControllerClient.EnableRetries(c.RetryCount, c.RetryDelay) + } + session.resourceControllerAPI = resourceControllerClient + // var authenticator2 *core.BearerTokenAuthenticator + // Construct an "options" struct for creating the service client. + secretsManagerClientOptions := &secretsmanagerv1.SecretsManagerV1Options{ + Authenticator: authenticator, + } + + /// Construct the service client. + session.secretsManagerClient, err = secretsmanagerv1.NewSecretsManagerV1(secretsManagerClientOptions) + if err == nil { + // Enable retries for API calls + session.secretsManagerClient.Service.EnableRetries(c.RetryCount, c.RetryDelay) + // Add custom header for analytics + session.secretsManagerClient.SetDefaultHeaders(gohttp.Header{ + "X-Original-User-Agent": {fmt.Sprintf("terraform-provider-ibm/%s", version.Version)}, + }) + } else { + session.secretsManagerClientErr = fmt.Errorf("Error occurred while configuring IBM Cloud Secrets Manager API service: %q", err) + } + + return session, nil +} + +// CreateVersionDate requires mandatory version attribute. Any date from 2019-12-13 up to the currentdate may be provided. Specify the current date to request the latest version. +func CreateVersionDate() *string { + version := time.Now().Format("2006-01-02") + return &version +} + +func newSession(c *Config) (*Session, error) { + ibmSession := &Session{} + + softlayerSession := &slsession.Session{ + Endpoint: c.SoftLayerEndpointURL, + Timeout: c.SoftLayerTimeout, + UserName: c.SoftLayerUserName, + APIKey: c.SoftLayerAPIKey, + Debug: os.Getenv("TF_LOG") != "", + Retries: c.RetryCount, + RetryWait: c.RetryDelay, + } + + if c.IAMToken != "" { + log.Println("Configuring SoftLayer Session with token") + softlayerSession.IAMToken = c.IAMToken + } + if c.SoftLayerAPIKey != "" && c.SoftLayerUserName != "" { + log.Println("Configuring SoftLayer Session with API key") + softlayerSession.APIKey = c.SoftLayerAPIKey + softlayerSession.UserName = c.SoftLayerUserName + } + softlayerSession.AppendUserAgent(fmt.Sprintf("terraform-provider-ibm/%s", version.Version)) + ibmSession.SoftLayerSession = softlayerSession + + if (c.IAMToken != "" && c.IAMRefreshToken == "") || (c.IAMToken == "" && c.IAMRefreshToken != "") { + return nil, fmt.Errorf("iam_token and iam_refresh_token must be provided") + } + + if c.IAMToken != "" && c.IAMRefreshToken != "" { + log.Println("Configuring IBM Cloud Session with token") + var sess *bxsession.Session + bmxConfig := &bluemix.Config{ + IAMAccessToken: c.IAMToken, + IAMRefreshToken: c.IAMRefreshToken, + //Comment out debug mode for v0.12 + //Debug: os.Getenv("TF_LOG") != "", + HTTPTimeout: c.BluemixTimeout, + Region: c.Region, + ResourceGroup: c.ResourceGroup, + RetryDelay: &c.RetryDelay, + MaxRetries: &c.RetryCount, + Visibility: c.Visibility, + } + sess, err := bxsession.New(bmxConfig) + if err != nil { + return nil, err + } + ibmSession.BluemixSession = sess + } + + if c.BluemixAPIKey != "" { + log.Println("Configuring IBM Cloud Session with API key") + var sess *bxsession.Session + bmxConfig := &bluemix.Config{ + BluemixAPIKey: c.BluemixAPIKey, + //Comment out debug mode for v0.12 + //Debug: os.Getenv("TF_LOG") != "", + HTTPTimeout: c.BluemixTimeout, + Region: c.Region, + ResourceGroup: c.ResourceGroup, + RetryDelay: &c.RetryDelay, + MaxRetries: &c.RetryCount, + Visibility: c.Visibility, + //PowerServiceInstance: c.PowerServiceInstance, + } + sess, err := bxsession.New(bmxConfig) + if err != nil { + return nil, err + } + ibmSession.BluemixSession = sess + } + + return ibmSession, nil +} + +func authenticateAPIKey(sess *bxsession.Session) error { + config := sess.Config + tokenRefresher, err := authentication.NewIAMAuthRepository(config, &rest.Client{ + DefaultHeader: gohttp.Header{ + "User-Agent": []string{http.UserAgent()}, + }, + }) + if err != nil { + return err + } + return tokenRefresher.AuthenticateAPIKey(config.BluemixAPIKey) +} + +func authenticateCF(sess *bxsession.Session) error { + config := sess.Config + tokenRefresher, err := authentication.NewUAARepository(config, &rest.Client{ + DefaultHeader: gohttp.Header{ + "User-Agent": []string{http.UserAgent()}, + }, + }) + if err != nil { + return err + } + return tokenRefresher.AuthenticateAPIKey(config.BluemixAPIKey) +} + +func fetchUserDetails(sess *bxsession.Session, retries int, retryDelay time.Duration) (*UserConfig, error) { + config := sess.Config + user := UserConfig{} + var bluemixToken string + + if strings.HasPrefix(config.IAMAccessToken, "Bearer") { + bluemixToken = config.IAMAccessToken[7:len(config.IAMAccessToken)] + } else { + bluemixToken = config.IAMAccessToken + } + + token, err := jwt.Parse(bluemixToken, func(token *jwt.Token) (interface{}, error) { + return "", nil + }) + //TODO validate with key + if err != nil && !strings.Contains(err.Error(), "key is of invalid type") { + if retries > 0 { + if config.BluemixAPIKey != "" { + time.Sleep(retryDelay) + log.Printf("Retrying authentication for user details %d", retries) + _ = authenticateAPIKey(sess) + return fetchUserDetails(sess, retries-1, retryDelay) + } + } + return &user, err + } + claims := token.Claims.(jwt.MapClaims) + if email, ok := claims["email"]; ok { + user.userEmail = email.(string) + } + user.userID = claims["id"].(string) + user.userAccount = claims["account"].(map[string]interface{})["bss"].(string) + iss := claims["iss"].(string) + if strings.Contains(iss, "https://iam.cloud.ibm.com") { + user.cloudName = "bluemix" + } else { + user.cloudName = "staging" + } + user.cloudType = "public" + + user.generation = 2 + return &user, nil +} + +func refreshToken(sess *bxsession.Session) error { + config := sess.Config + tokenRefresher, err := authentication.NewIAMAuthRepository(config, &rest.Client{ + DefaultHeader: gohttp.Header{ + "User-Agent": []string{http.UserAgent()}, + }, + }) + if err != nil { + return err + } + _, err = tokenRefresher.RefreshToken() + return err +} + +func envFallBack(envs []string, defaultValue string) string { + for _, k := range envs { + if v := os.Getenv(k); v != "" { + return v + } + } + return defaultValue +} + +// DefaultTransport ... +func DefaultTransport() gohttp.RoundTripper { + transport := &gohttp.Transport{ + Proxy: gohttp.ProxyFromEnvironment, + DisableKeepAlives: true, + MaxIdleConnsPerHost: -1, + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: false, + }, + } + return transport +} + +func isRetryable(err error) bool { + if bmErr, ok := err.(bmxerror.RequestFailure); ok { + switch bmErr.StatusCode() { + case 408, 504, 599, 429, 500, 502, 520, 503: + return true + } + } + + if netErr, ok := err.(net.Error); ok && netErr.Timeout() { + return true + } + + if netErr, ok := err.(*net.OpError); ok && netErr.Timeout() { + return true + } + + if netErr, ok := err.(net.UnknownNetworkError); ok && netErr.Timeout() { + return true + } + + return false +} + +func contructEndpoint(subdomain, domain string) string { + endpoint := fmt.Sprintf("https://%s.%s", subdomain, domain) + return endpoint +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/config_functions.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/config_functions.go new file mode 100644 index 00000000000..f7988da19ce --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/config_functions.go @@ -0,0 +1,136 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "net/http" + "net/url" + "os" + + bluemix "github.com/IBM-Cloud/bluemix-go" + "github.com/IBM-Cloud/bluemix-go/api/functions" + bxsession "github.com/IBM-Cloud/bluemix-go/session" + "github.com/apache/openwhisk-client-go/whisk" +) + +// DefaultServiceURL is the default URL to make service requests to. +const DefaultServiceURL = "https://us-south.functions.cloud.ibm.com" + +//FunctionClient ... +func FunctionClient(c *bluemix.Config) (*whisk.Client, error) { + baseEndpoint := getBaseURL(c.Region) + u, err := url.Parse(fmt.Sprintf("%s/api", baseEndpoint)) + if err != nil { + return nil, err + } + + functionsClient, err := whisk.NewClient(http.DefaultClient, &whisk.Config{ + Host: u.Host, + Version: "v1", + }) + + return functionsClient, err +} + +//getBaseURL .. +func getBaseURL(region string) string { + baseEndpoint := fmt.Sprintf(DefaultServiceURL) + if region != "us-south" { + baseEndpoint = fmt.Sprintf("https://%s.functions.cloud.ibm.com", region) + } + + return baseEndpoint +} + +/* + * + * Configure a HTTP client using the OpenWhisk properties (i.e. host, auth, iamtoken) + * Only cf-based namespaces needs auth key value. + * iam-based namespace don't have an auth key and needs only iam token for authorization. + * + */ +func setupOpenWhiskClientConfig(namespace string, sess *bxsession.Session, functionNamespace functions.FunctionServiceAPI) (*whisk.Client, error) { + u, _ := url.Parse(fmt.Sprintf("https://%s.functions.cloud.ibm.com/api", sess.Config.Region)) + wskClient, _ := whisk.NewClient(http.DefaultClient, &whisk.Config{ + Host: u.Host, + Version: "v1", + }) + + nsList, err := functionNamespace.Namespaces().GetNamespaces() + if err != nil { + return nil, err + } + + var validNamespace bool + var isCFNamespace bool + allNamespaces := []string{} + for _, n := range nsList.Namespaces { + allNamespaces = append(allNamespaces, n.GetName()) + if n.GetName() == namespace || n.GetID() == namespace { + if os.Getenv("TF_LOG") != "" { + whisk.SetDebug(true) + } + if n.IsCf() { + isCFNamespace = true + break + } + validNamespace = true + // Configure whisk properties to handle iam-based/iam-migrated namespaces. + if n.IsIamEnabled() { + additionalHeaders := make(http.Header) + + err := refreshToken(sess) + if err != nil { + for count := sess.Config.MaxRetries; *count >= 0; *count-- { + if err == nil || !isRetryable(err) { + break + } + err = refreshToken(sess) + } + if err != nil { + return nil, err + } + + } + additionalHeaders.Add("Authorization", sess.Config.IAMAccessToken) + additionalHeaders.Add("X-Namespace-Id", n.GetID()) + + wskClient.Config.Namespace = n.GetID() + wskClient.Config.AdditionalHeaders = additionalHeaders + return wskClient, nil + } + } + } + + // Configure whisk properties to handle cf-based namespaces. + if isCFNamespace { + if sess.Config.UAAAccessToken == "" && sess.Config.UAARefreshToken == "" { + return nil, fmt.Errorf("Couldn't retrieve auth key for IBM Cloud Function") + } + err := validateNamespace(namespace) + if err != nil { + return nil, err + } + + nsList, err := functionNamespace.Namespaces().GetCloudFoundaryNamespaces() + if err != nil { + return nil, err + } + + for _, n := range nsList.Namespaces { + if n.GetName() == namespace { + wskClient.Config.Namespace = n.GetName() + wskClient.Config.AuthToken = fmt.Sprintf("%s:%s", n.GetUUID(), n.GetKey()) + return wskClient, nil + } + } + } + + if !validNamespace { + return nil, fmt.Errorf("Namespace '%s' is not in the list of entitled namespaces. Available namespaces are %s", namespace, allNamespaces) + } + + return nil, fmt.Errorf("Failed to create whisk config object for namespace '%s'", namespace) +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_account.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_account.go new file mode 100644 index 00000000000..a0f62c9c279 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_account.go @@ -0,0 +1,86 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMAccount() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMAccountRead, + + Schema: map[string]*schema.Schema{ + "org_guid": { + Description: "The guid of the org", + Type: schema.TypeString, + Required: true, + }, + "account_users": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "email": { + Type: schema.TypeString, + Computed: true, + }, + "state": { + Type: schema.TypeString, + Computed: true, + }, + "role": { + Type: schema.TypeString, + Computed: true, + }, + "id": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMAccountRead(d *schema.ResourceData, meta interface{}) error { + bmxSess, err := meta.(ClientSession).BluemixSession() + if err != nil { + return err + } + accClient, err := meta.(ClientSession).BluemixAcccountAPI() + if err != nil { + return err + } + orgGUID := d.Get("org_guid").(string) + account, err := accClient.Accounts().FindByOrg(orgGUID, bmxSess.Config.Region) + if err != nil { + return fmt.Errorf("Error retrieving organisation: %s", err) + } + + accountv1Client, err := meta.(ClientSession).BluemixAcccountv1API() + if err != nil { + return err + } + accountUsers, err := accountv1Client.Accounts().GetAccountUsers(account.GUID) + if err != nil { + return fmt.Errorf("Error retrieving users in account: %s", err) + } + accountUsersMap := make([]map[string]string, 0, len(accountUsers)) + for _, user := range accountUsers { + accountUser := make(map[string]string) + accountUser["id"] = user.Id + accountUser["email"] = user.Email + accountUser["state"] = user.State + accountUser["role"] = user.Role + accountUsersMap = append(accountUsersMap, accountUser) + } + + d.SetId(account.GUID) + d.Set("account_users", accountUsersMap) + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_api_gateway.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_api_gateway.go new file mode 100644 index 00000000000..c6d8a32b74a --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_api_gateway.go @@ -0,0 +1,196 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "encoding/json" + "fmt" + "strings" + + apigatewaysdk "github.com/IBM/apigateway-go-sdk" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMApiGateway() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMApiGatewayRead, + Schema: map[string]*schema.Schema{ + "service_instance_crn": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Api Gateway Service Instance Crn", + }, + "endpoints": { + Type: schema.TypeList, + Computed: true, + Description: "List of all endpoints of an instance", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "endpoint_id": { + Type: schema.TypeString, + Computed: true, + Description: "Endpoint ID", + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: "Endpoint name", + }, + "routes": { + Type: schema.TypeList, + Elem: &schema.Schema{Type: schema.TypeString}, + Computed: true, + Description: "Invokable routes for an endpoint", + }, + "managed": { + Type: schema.TypeBool, + Computed: true, + Description: "Managed indicates if endpoint is online or offline.", + }, + "shared": { + Type: schema.TypeBool, + Computed: true, + Description: "The Shared status of an endpoint", + }, + "base_path": { + Type: schema.TypeString, + Computed: true, + Description: " Base path of an endpoint", + }, + "managed_url": { + Type: schema.TypeString, + Computed: true, + Description: "Managed url for an endpoint", + }, + "alias_url": { + Type: schema.TypeString, + Computed: true, + Description: "Alias Url for an endpoint", + }, + "open_api_doc": { + Type: schema.TypeString, + Computed: true, + Description: "API document that represents endpoint", + }, + "subscriptions": { + Type: schema.TypeList, + Computed: true, + Description: "List of all subscription of an endpoint", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "client_id": { + Type: schema.TypeString, + Computed: true, + Description: "Subscription Id", + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: "Subscription name", + }, + "type": { + Type: schema.TypeString, + Computed: true, + Description: "Subscription type", + }, + "secret_provided": { + Type: schema.TypeBool, + Computed: true, + Description: "Indicates if client secret is provided to subscription or not", + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMApiGatewayRead(d *schema.ResourceData, meta interface{}) error { + sess, err := meta.(ClientSession).BluemixSession() + if err != nil { + return err + } + endpointservice, err := meta.(ClientSession).APIGateway() + if err != nil { + return err + } + payload := &apigatewaysdk.GetAllEndpointsOptions{} + oauthtoken := sess.Config.IAMAccessToken + oauthtoken = strings.Replace(oauthtoken, "Bearer ", "", -1) + serviceInstanceCrn := d.Get("service_instance_crn").(string) + payload.Authorization = &oauthtoken + payload.ServiceInstanceCrn = &serviceInstanceCrn + allendpoints, response, err := endpointservice.GetAllEndpoints(payload) + if err != nil { + return fmt.Errorf("Error Getting All Endpoint: %s,%s", err, response) + } + endpointsMap := make([]map[string]interface{}, 0, len(*allendpoints)) + + for _, endpoint := range *allendpoints { + ArtifactID := endpoint.ArtifactID + + swaggerPayload := &apigatewaysdk.GetEndpointSwaggerOptions{} + swaggerPayload.Authorization = &oauthtoken + swaggerPayload.ID = ArtifactID + swaggerPayload.ServiceInstanceCrn = &serviceInstanceCrn + + swagger, err := endpointservice.GetEndpointSwagger(swaggerPayload) + if err != nil { + return fmt.Errorf("Error Getting All Endpoint: %s,%s", err, swagger) + } + doc := swagger.Result + str, err := json.Marshal(doc) + if err != nil { + fmt.Printf("error while json Marshal: %v", err) + } + swagger_document := string(str) + SubscriptionPayload := &apigatewaysdk.GetAllSubscriptionsOptions{} + SubscriptionPayload.ArtifactID = ArtifactID + SubscriptionPayload.Authorization = &oauthtoken + if v, ok := d.GetOk("type"); ok && v != nil { + Type := v.(string) + if Type == "internal" { + Type = "bluemix" + } + SubscriptionPayload.Type = &Type + } + allsubscriptions, response, err := endpointservice.GetAllSubscriptions(SubscriptionPayload) + if err != nil { + return fmt.Errorf("Error Getting All Endpoint: %s %s", err, response) + } + subscriptionMap := make([]map[string]interface{}, 0, len(*allsubscriptions)) + for _, subscription := range *allsubscriptions { + allsubscription := make(map[string]interface{}) + allsubscription["name"] = *subscription.Name + allsubscription["client_id"] = subscription.ClientID + if *subscription.Type == "bluemix" { + *subscription.Type = "internal" + } + allsubscription["type"] = subscription.Type + allsubscription["secret_provided"] = subscription.SecretProvided + subscriptionMap = append(subscriptionMap, allsubscription) + } + result := make(map[string]interface{}) + result["endpoint_id"] = *endpoint.ArtifactID + result["name"] = *endpoint.Name + result["managed"] = endpoint.Managed + result["shared"] = endpoint.Shared + result["routes"] = endpoint.Routes + result["managed_url"] = *endpoint.ManagedURL + result["base_path"] = endpoint.BasePath + result["alias_url"] = endpoint.AliasURL + result["open_api_doc"] = swagger_document + result["subscriptions"] = subscriptionMap + endpointsMap = append(endpointsMap, result) + } + d.SetId(serviceInstanceCrn) + d.Set("service_instance_crn", serviceInstanceCrn) + d.Set("endpoints", endpointsMap) + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_app.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_app.go new file mode 100644 index 00000000000..7ae4bb97f10 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_app.go @@ -0,0 +1,135 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMApp() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMAppRead, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: "The name for the app", + }, + "space_guid": { + Description: "Define space guid to which app belongs", + Type: schema.TypeString, + Required: true, + }, + "memory": { + Description: "The amount of memory each instance should have. In megabytes.", + Type: schema.TypeInt, + Computed: true, + }, + "instances": { + Description: "The number of instances", + Type: schema.TypeInt, + Computed: true, + }, + "disk_quota": { + Description: "The maximum amount of disk available to an instance of an app. In megabytes.", + Type: schema.TypeInt, + Computed: true, + }, + "buildpack": { + Description: "Buildpack to build the app. 3 options: a) Blank means autodetection; b) A Git Url pointing to a buildpack; c) Name of an installed buildpack.", + Type: schema.TypeString, + Computed: true, + }, + "environment_json": { + Description: "Key/value pairs of all the environment variables to run in your app. Does not include any system or service variables.", + Type: schema.TypeMap, + Computed: true, + }, + "route_guid": { + Description: "Define the route guids which should be bound to the application.", + Type: schema.TypeSet, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Computed: true, + }, + "service_instance_guid": { + Description: "Define the service instance guids that should be bound to this application.", + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "package_state": { + Description: "The state of the application package whether staged, pending etc", + Type: schema.TypeString, + Computed: true, + }, + "state": { + Description: "The state of the application", + Type: schema.TypeString, + Computed: true, + }, + "health_check_http_endpoint": { + Description: "Endpoint called to determine if the app is healthy.", + Type: schema.TypeString, + Computed: true, + }, + "health_check_type": { + Description: "Type of health check to perform.", + Type: schema.TypeString, + Computed: true, + }, + "health_check_timeout": { + Description: "Timeout in seconds for health checking of an staged app when starting up.", + Type: schema.TypeInt, + Computed: true, + }, + }, + } +} + +func dataSourceIBMAppRead(d *schema.ResourceData, meta interface{}) error { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + appAPI := cfClient.Apps() + name := d.Get("name").(string) + spaceGUID := d.Get("space_guid").(string) + + app, err := appAPI.FindByName(spaceGUID, name) + if err != nil { + return err + } + d.SetId(app.GUID) + d.Set("memory", app.Memory) + d.Set("disk_quota", app.DiskQuota) + if app.BuildPack != nil { + d.Set("buildpack", app.BuildPack) + } + d.Set("environment_json", Flatten(app.EnvironmentJSON)) + d.Set("package_state", app.PackageState) + d.Set("state", app.State) + d.Set("instances", app.Instances) + d.Set("health_check_type", app.HealthCheckType) + d.Set("health_check_http_endpoint", app.HealthCheckHTTPEndpoint) + d.Set("health_check_timeout", app.HealthCheckTimeout) + + route, err := appAPI.ListRoutes(app.GUID) + if err != nil { + return err + } + if len(route) > 0 { + d.Set("route_guid", flattenRoute(route)) + } + svcBindings, err := appAPI.ListServiceBindings(app.GUID) + if err != nil { + return err + } + if len(svcBindings) > 0 { + d.Set("service_instance_guid", flattenServiceBindings(svcBindings)) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_app_config_environment.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_app_config_environment.go new file mode 100644 index 00000000000..1e3a912d092 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_app_config_environment.go @@ -0,0 +1,133 @@ +// Copyright IBM Corp. 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/IBM/appconfiguration-go-admin-sdk/appconfigurationv1" +) + +func dataSourceIbmAppConfigEnvironment() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIbmAppConfigEnvironmentRead, + + Schema: map[string]*schema.Schema{ + "guid": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "GUID of the App Configuration service. Get it from the service instance credentials section of the dashboard.", + }, + "environment_id": { + Type: schema.TypeString, + Required: true, + Description: "Environment Id.", + }, + "expand": { + Type: schema.TypeBool, + Optional: true, + Description: "If set to `true`, returns expanded view of the resource details.", + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: "Environment name.", + }, + "description": { + Type: schema.TypeString, + Computed: true, + Description: "Environment description.", + }, + "tags": { + Type: schema.TypeString, + Computed: true, + Description: "Tags associated with the environment.", + }, + "color_code": { + Type: schema.TypeString, + Computed: true, + Description: "Color code to distinguish the environment. The Hex code for the color. For example `#FF0000` for `red`.", + }, + "created_time": { + Type: schema.TypeString, + Computed: true, + Description: "Creation time of the environment.", + }, + "updated_time": { + Type: schema.TypeString, + Computed: true, + Description: "Last modified time of the environment data.", + }, + "href": { + Type: schema.TypeString, + Computed: true, + Description: "Environment URL.", + }, + }, + } +} + +func dataSourceIbmAppConfigEnvironmentRead(d *schema.ResourceData, meta interface{}) error { + guid := d.Get("guid").(string) + + appconfigClient, err := getAppConfigClient(meta, guid) + if err != nil { + return err + } + + options := &appconfigurationv1.GetEnvironmentOptions{} + options.SetEnvironmentID(d.Get("environment_id").(string)) + + if _, ok := d.GetOk("expand"); ok { + options.SetExpand(d.Get("expand").(bool)) + } + result, response, err := appconfigClient.GetEnvironment(options) + if err != nil { + log.Printf("GetEnvironment failed %s\n%s", err, response) + return err + } + + d.SetId(fmt.Sprintf("%s/%s", guid, *result.EnvironmentID)) + + if result.Name != nil { + if err = d.Set("name", result.Name); err != nil { + return fmt.Errorf("error setting name: %s", err) + } + } + if result.Description != nil { + if err = d.Set("description", result.Description); err != nil { + return fmt.Errorf("error setting description: %s", err) + } + } + if result.Tags != nil { + if err = d.Set("tags", result.Tags); err != nil { + return fmt.Errorf("error setting tags: %s", err) + } + } + if result.ColorCode != nil { + if err = d.Set("color_code", result.ColorCode); err != nil { + return fmt.Errorf("error setting color_code: %s", err) + } + } + if result.CreatedTime != nil { + if err = d.Set("created_time", result.CreatedTime.String()); err != nil { + return fmt.Errorf("error setting created_time: %s", err) + } + } + if result.UpdatedTime != nil { + if err = d.Set("updated_time", result.UpdatedTime.String()); err != nil { + return fmt.Errorf("error setting updated_time: %s", err) + } + } + if result.Href != nil { + if err = d.Set("href", result.Href); err != nil { + return fmt.Errorf("error setting href: %s", err) + } + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_app_config_environments.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_app_config_environments.go new file mode 100644 index 00000000000..a00ed0a6108 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_app_config_environments.go @@ -0,0 +1,351 @@ +// Copyright IBM Corp. 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "net/url" + "reflect" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/IBM/appconfiguration-go-admin-sdk/appconfigurationv1" +) + +func dataSourceIbmAppConfigEnvironments() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIbmAppConfigEnvironmentsRead, + + Schema: map[string]*schema.Schema{ + "guid": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "GUID of the App Configuration service. Get it from the service instance credentials section of the dashboard.", + }, + "tags": { + Type: schema.TypeString, + Optional: true, + Description: "filter the resources to be returned based on the associated tags. Returns resources associated with any of the specified tags.", + }, + "expand": { + Type: schema.TypeBool, + Optional: true, + Description: "If set to `true`, returns expanded view of the resource details.", + }, + "limit": { + Type: schema.TypeInt, + Optional: true, + Description: "The number of records to retrieve. By default, the list operation return the first 10 records. To retrieve different set of records, use `limit` with `offset` to page through the available records.", + }, + "offset": { + Type: schema.TypeInt, + Optional: true, + Description: "The number of records to skip. By specifying `offset`, you retrieve a subset of items that starts with the `offset` value. Use `offset` with `limit` to page through the available records.", + }, + "environments": { + Type: schema.TypeList, + Computed: true, + Description: "Array of environments.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + Description: "Environment name.", + }, + "environment_id": { + Type: schema.TypeString, + Computed: true, + Description: "Environment id.", + }, + "description": { + Type: schema.TypeString, + Computed: true, + Description: "Environment description.", + }, + "tags": { + Type: schema.TypeString, + Computed: true, + Description: "Tags associated with the environment.", + }, + "color_code": { + Type: schema.TypeString, + Computed: true, + Description: "Color code to distinguish the environment. The Hex code for the color. For example `#FF0000` for `red`.", + }, + "created_time": { + Type: schema.TypeString, + Computed: true, + Description: "Creation time of the environment.", + }, + "updated_time": { + Type: schema.TypeString, + Computed: true, + Description: "Last modified time of the environment data.", + }, + "href": { + Type: schema.TypeString, + Computed: true, + Description: "Environment URL.", + }, + }, + }, + }, + "total_count": { + Type: schema.TypeInt, + Computed: true, + Description: "Total number of records.", + }, + "next": { + Type: schema.TypeList, + Computed: true, + Description: "URL to navigate to the next list of records.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "href": { + Type: schema.TypeString, + Computed: true, + Description: "URL of the response.", + }, + }, + }, + }, + "first": { + Type: schema.TypeList, + Computed: true, + Description: "URL to navigate to the first page of records.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "href": { + Type: schema.TypeString, + Computed: true, + Description: "URL of the response.", + }, + }, + }, + }, + "previous": { + Type: schema.TypeList, + Computed: true, + Description: "URL to navigate to the previous list of records.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "href": { + Type: schema.TypeString, + Computed: true, + Description: "URL of the response.", + }, + }, + }, + }, + "last": { + Type: schema.TypeList, + Computed: true, + Description: "URL to navigate to the last page of records.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "href": { + Type: schema.TypeString, + Computed: true, + Description: "URL of the response.", + }, + }, + }, + }, + }, + } +} + +func dataSourceIbmAppConfigEnvironmentsRead(d *schema.ResourceData, meta interface{}) error { + guid := d.Get("guid").(string) + + appconfigClient, err := getAppConfigClient(meta, guid) + if err != nil { + return err + } + + options := &appconfigurationv1.ListEnvironmentsOptions{} + + if _, ok := d.GetOk("expand"); ok { + options.SetExpand(d.Get("expand").(bool)) + } + + if _, ok := d.GetOk("tags"); ok { + options.SetTags(d.Get("tags").(string)) + } + + var environmentList *appconfigurationv1.EnvironmentList + var offset int64 = 0 + var limit int64 = 10 + finalList := []appconfigurationv1.Environment{} + + var isLimit bool + if _, ok := d.GetOk("limit"); ok { + isLimit = true + limit = int64(d.Get("limit").(int)) + } + options.SetLimit(limit) + + if _, ok := d.GetOk("offset"); ok { + offset = int64(d.Get("offset").(int)) + } + for { + options.SetOffset(offset) + result, response, err := appconfigClient.ListEnvironments(options) + environmentList = result + if err != nil { + log.Printf("[DEBUG] ListEnvironments failed %s\n%s", err, response) + return err + } + if isLimit { + offset = 0 + } else { + offset = dataSourceEnvironmentListGetNext(result.Next) + } + finalList = append(finalList, result.Environments...) + if offset == 0 { + break + } + } + + environmentList.Environments = finalList + + d.SetId(guid) + + if environmentList.Environments != nil { + err = d.Set("environments", dataSourceEnvironmentListFlattenEnvironments(environmentList.Environments)) + if err != nil { + return fmt.Errorf("error setting environments %s", err) + } + } + if environmentList.TotalCount != nil { + if err = d.Set("total_count", environmentList.TotalCount); err != nil { + return fmt.Errorf("error setting total_count: %s", err) + } + } + if environmentList.Limit != nil { + if err = d.Set("limit", environmentList.Limit); err != nil { + return fmt.Errorf("error setting limit: %s", err) + } + } + if environmentList.Offset != nil { + if err = d.Set("offset", environmentList.Offset); err != nil { + return fmt.Errorf("error setting offset: %s", err) + } + } + if environmentList.First != nil { + err = d.Set("first", dataSourceEnvironmentListFlattenPagination(*environmentList.First)) + if err != nil { + return fmt.Errorf("error setting first %s", err) + } + } + + if environmentList.Previous != nil { + err = d.Set("previous", dataSourceEnvironmentListFlattenPagination(*environmentList.Previous)) + if err != nil { + return fmt.Errorf("error setting previous %s", err) + } + } + + if environmentList.Last != nil { + err = d.Set("last", dataSourceEnvironmentListFlattenPagination(*environmentList.Last)) + if err != nil { + return fmt.Errorf("error setting last %s", err) + } + } + if environmentList.Next != nil { + err = d.Set("next", dataSourceEnvironmentListFlattenPagination(*environmentList.Next)) + if err != nil { + return fmt.Errorf("error setting next %s", err) + } + } + + return nil +} + +func dataSourceEnvironmentListFlattenEnvironments(result []appconfigurationv1.Environment) (environments []map[string]interface{}) { + for _, environmentsItem := range result { + environments = append(environments, dataSourceEnvironmentListEnvironmentsToMap(environmentsItem)) + } + + return environments +} + +func dataSourceEnvironmentListEnvironmentsToMap(environmentsItem appconfigurationv1.Environment) (environmentsMap map[string]interface{}) { + environmentsMap = map[string]interface{}{} + + if environmentsItem.Name != nil { + environmentsMap["name"] = environmentsItem.Name + } + if environmentsItem.EnvironmentID != nil { + environmentsMap["environment_id"] = environmentsItem.EnvironmentID + } + if environmentsItem.Description != nil { + environmentsMap["description"] = environmentsItem.Description + } + if environmentsItem.Tags != nil { + environmentsMap["tags"] = environmentsItem.Tags + } + if environmentsItem.ColorCode != nil { + environmentsMap["color_code"] = environmentsItem.ColorCode + } + if environmentsItem.CreatedTime != nil { + environmentsMap["created_time"] = environmentsItem.CreatedTime.String() + } + if environmentsItem.UpdatedTime != nil { + environmentsMap["updated_time"] = environmentsItem.UpdatedTime.String() + } + if environmentsItem.Href != nil { + environmentsMap["href"] = environmentsItem.Href + } + + return environmentsMap +} + +func dataSourceEnvironmentListGetNext(next interface{}) int64 { + if reflect.ValueOf(next).IsNil() { + return 0 + } + + u, err := url.Parse(reflect.ValueOf(next).Elem().FieldByName("Href").Elem().String()) + if err != nil { + return 0 + } + + q := u.Query() + var page string + + if q.Get("start") != "" { + page = q.Get("start") + } else if q.Get("offset") != "" { + page = q.Get("offset") + } + + convertedVal, err := strconv.ParseInt(page, 10, 64) + if err != nil { + return 0 + } + return convertedVal +} + +func dataSourceEnvironmentListFlattenPagination(result appconfigurationv1.PageHrefResponse) (finalList []map[string]interface{}) { + finalList = []map[string]interface{}{} + finalMap := dataSourceEnvironmentListURLToMap(result) + finalList = append(finalList, finalMap) + + return finalList +} + +func dataSourceEnvironmentListURLToMap(urlItem appconfigurationv1.PageHrefResponse) (urlMap map[string]interface{}) { + urlMap = map[string]interface{}{} + + if urlItem.Href != nil { + urlMap["href"] = urlItem.Href + } + + return urlMap +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_app_config_feature.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_app_config_feature.go new file mode 100644 index 00000000000..f370a9f814f --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_app_config_feature.go @@ -0,0 +1,332 @@ +// Copyright IBM Corp. 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/IBM/appconfiguration-go-admin-sdk/appconfigurationv1" +) + +func dataSourceIbmAppConfigFeature() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIbmAppConfigFeatureRead, + + Schema: map[string]*schema.Schema{ + "guid": { + Type: schema.TypeString, + Required: true, + Description: "GUID of the App Configuration service. Get it from the service instance credentials section of the dashboard.", + }, + "environment_id": { + Type: schema.TypeString, + Required: true, + Description: "Environment Id.", + }, + "feature_id": { + Type: schema.TypeString, + Required: true, + Description: "Feature Id.", + }, + "includes": { + Type: schema.TypeString, + Optional: true, + Description: "Include the associated collections in the response.", + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: "Feature name.", + }, + "description": { + Type: schema.TypeString, + Computed: true, + Description: "Feature description.", + }, + "type": { + Type: schema.TypeString, + Computed: true, + Description: "Type of the feature (BOOLEAN, STRING, NUMERIC).", + }, + "enabled_value": { + Type: schema.TypeString, + Computed: true, + Description: "Value of the feature when it is enabled. The value can be Boolean, String or a Numeric value as per the `type` attribute.", + }, + "disabled_value": { + Type: schema.TypeString, + Computed: true, + Description: "Value of the feature when it is disabled. The value can be Boolean, String or a Numeric value as per the `type` attribute.", + }, + "enabled": { + Type: schema.TypeBool, + Computed: true, + Description: "The state of the feature flag.", + }, + "tags": { + Type: schema.TypeString, + Computed: true, + Description: "Tags associated with the feature.", + }, + "segment_exists": { + Type: schema.TypeBool, + Computed: true, + Description: "Denotes if the targeting rules are specified for the feature flag.", + }, + "segment_rules": { + Type: schema.TypeList, + Computed: true, + Description: "Specify the targeting rules that is used to set different feature flag values for different segments.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "rules": { + Type: schema.TypeList, + Computed: true, + Description: "Rules array.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "segments": { + Type: schema.TypeList, + Computed: true, + Description: "List of segment ids that are used for targeting using the rule.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "value": { + Type: schema.TypeString, + Computed: true, + Description: "Value to be used for evaluation for this rule. The value can be Boolean, String or a Numeric value as per the `type` attribute.", + }, + "order": { + Type: schema.TypeInt, + Computed: true, + Description: "Order of the rule, used during evaluation. The evaluation is performed in the order defined and the value associated with the first matching rule is used for evaluation.", + }, + }, + }, + }, + "collections": { + Type: schema.TypeList, + Computed: true, + Description: "List of collection id representing the collections that are associated with the specified feature flag.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "collection_id": { + Type: schema.TypeString, + Computed: true, + Description: "Collection id.", + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: "Name of the collection.", + }, + }, + }, + }, + "created_time": { + Type: schema.TypeString, + Computed: true, + Description: "Creation time of the feature flag.", + }, + "updated_time": { + Type: schema.TypeString, + Computed: true, + Description: "Last modified time of the feature flag data.", + }, + "href": { + Type: schema.TypeString, + Computed: true, + Description: "Feature flag URL.", + }, + }, + } +} + +func dataSourceIbmAppConfigFeatureRead(d *schema.ResourceData, meta interface{}) error { + guid := d.Get("guid").(string) + + appconfigClient, err := getAppConfigClient(meta, guid) + if err != nil { + return err + } + + options := &appconfigurationv1.GetFeatureOptions{} + + options.SetEnvironmentID(d.Get("environment_id").(string)) + options.SetFeatureID(d.Get("feature_id").(string)) + + if _, ok := d.GetOk("includes"); ok { + options.SetInclude(d.Get("includes").(string)) + } + + result, response, err := appconfigClient.GetFeature(options) + if err != nil { + log.Printf("[DEBUG] GetFeature failed %s\n%s", err, response) + return err + } + + d.SetId(fmt.Sprintf("%s/%s/%s", guid, *options.EnvironmentID, *result.FeatureID)) + if result.Name != nil { + if err = d.Set("name", result.Name); err != nil { + return fmt.Errorf("error setting name: %s", err) + } + } + if result.Description != nil { + if err = d.Set("description", result.Description); err != nil { + return fmt.Errorf("error setting description: %s", err) + } + } + if result.Type != nil { + if err = d.Set("type", result.Type); err != nil { + return fmt.Errorf("error setting type: %s", err) + } + } + if result.Enabled != nil { + if err = d.Set("enabled", result.Enabled); err != nil { + return fmt.Errorf("error setting enabled: %s", err) + } + } + if result.Tags != nil { + if err = d.Set("tags", result.Tags); err != nil { + return fmt.Errorf("error setting tags: %s", err) + } + } + if result.SegmentExists != nil { + if err = d.Set("segment_exists", result.SegmentExists); err != nil { + return fmt.Errorf("error setting segment_exists: %s", err) + } + } + if result.CreatedTime != nil { + if err = d.Set("created_time", result.CreatedTime.String()); err != nil { + return fmt.Errorf("error setting created_time: %s", err) + } + } + if result.UpdatedTime != nil { + if err = d.Set("updated_time", result.UpdatedTime.String()); err != nil { + return fmt.Errorf("error setting updated_time: %s", err) + } + } + if result.Href != nil { + if err = d.Set("href", result.Href); err != nil { + return fmt.Errorf("error setting href: %s", err) + } + } + + if result.EnabledValue != nil { + enabledValue := result.EnabledValue + + switch enabledValue.(interface{}).(type) { + case string: + d.Set("enabled_value", enabledValue.(string)) + case float64: + d.Set("enabled_value", fmt.Sprintf("%v", enabledValue)) + case bool: + d.Set("enabled_value", strconv.FormatBool(enabledValue.(bool))) + } + } + + if result.DisabledValue != nil { + disabledValue := result.DisabledValue + + switch disabledValue.(interface{}).(type) { + case string: + d.Set("disabled_value", disabledValue.(string)) + case float64: + d.Set("disabled_value", fmt.Sprintf("%v", disabledValue)) + case bool: + d.Set("disabled_value", strconv.FormatBool(disabledValue.(bool))) + } + } + + if result.SegmentRules != nil { + err = d.Set("segment_rules", dataSourceFeatureFlattenSegmentRules(result.SegmentRules)) + if err != nil { + return fmt.Errorf("error setting segment_rules %s", err) + } + } + + if result.Collections != nil { + err = d.Set("collections", dataSourceFeatureFlattenCollections(result.Collections)) + if err != nil { + return fmt.Errorf("error setting collections %s", err) + } + } + return nil +} + +func dataSourceFeatureFlattenSegmentRules(result []appconfigurationv1.SegmentRule) (segmentRules []map[string]interface{}) { + for _, segmentRulesItem := range result { + segmentRules = append(segmentRules, dataSourceFeatureSegmentRulesToMap(segmentRulesItem)) + } + + return segmentRules +} + +func dataSourceFeatureSegmentRulesToMap(segmentRulesItem appconfigurationv1.SegmentRule) (segmentRulesMap map[string]interface{}) { + segmentRulesMap = map[string]interface{}{} + + if segmentRulesItem.Rules != nil { + rulesList := []map[string]interface{}{} + for _, rulesItem := range segmentRulesItem.Rules { + rulesList = append(rulesList, dataSourceFeatureSegmentRulesRulesToMap(rulesItem)) + } + segmentRulesMap["rules"] = rulesList + } + if segmentRulesItem.Value != nil { + segmentValue := segmentRulesItem.Value + switch segmentValue.(interface{}).(type) { + case string: + segmentRulesMap["value"] = segmentValue.(string) + case float64: + segmentRulesMap["value"] = fmt.Sprintf("%v", segmentValue) + case bool: + segmentRulesMap["value"] = strconv.FormatBool(segmentValue.(bool)) + } + } + if segmentRulesItem.Order != nil { + segmentRulesMap["order"] = segmentRulesItem.Order + } + + return segmentRulesMap +} + +func dataSourceFeatureSegmentRulesRulesToMap(rulesItem appconfigurationv1.TargetSegments) (rulesMap map[string]interface{}) { + rulesMap = map[string]interface{}{} + + if rulesItem.Segments != nil { + rulesMap["segments"] = rulesItem.Segments + } + + return rulesMap +} + +func dataSourceFeatureFlattenCollections(result []appconfigurationv1.CollectionRef) (collections []map[string]interface{}) { + for _, collectionsItem := range result { + collections = append(collections, dataSourceFeatureCollectionsToMap(collectionsItem)) + } + + return collections +} + +func dataSourceFeatureCollectionsToMap(collectionsItem appconfigurationv1.CollectionRef) (collectionsMap map[string]interface{}) { + collectionsMap = map[string]interface{}{} + + if collectionsItem.CollectionID != nil { + collectionsMap["collection_id"] = collectionsItem.CollectionID + } + if collectionsItem.Name != nil { + collectionsMap["name"] = collectionsItem.Name + } + + return collectionsMap +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_app_config_features.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_app_config_features.go new file mode 100644 index 00000000000..dca55005449 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_app_config_features.go @@ -0,0 +1,565 @@ +// Copyright IBM Corp. 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "net/url" + "reflect" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/IBM/appconfiguration-go-admin-sdk/appconfigurationv1" +) + +func dataSourceIbmAppConfigFeatures() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIbmAppConfigFeaturesRead, + + Schema: map[string]*schema.Schema{ + "guid": { + Type: schema.TypeString, + Required: true, + Description: "GUID of the App Configuration service. Get it from the service instance credentials section of the dashboard.", + }, + "environment_id": { + Type: schema.TypeString, + Required: true, + Description: "Environment Id.", + }, + "sort": { + Type: schema.TypeString, + Optional: true, + Description: "Sort the feature details based on the specified attribute.", + }, + "tags": { + Type: schema.TypeString, + Optional: true, + Description: "Filter the resources to be returned based on the associated tags. Specify the parameter as a list of comma separated tags. Returns resources associated with any of the specified tags.", + }, + "collections": { + Type: schema.TypeList, + Optional: true, + Description: "Filter features by a list of comma separated collections.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "segments": { + Type: schema.TypeList, + Optional: true, + Description: "Filter features by a list of comma separated segments.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "expand": { + Type: schema.TypeBool, + Optional: true, + Description: "If set to `true`, returns expanded view of the resource details.", + }, + "includes": { + Type: schema.TypeList, + Optional: true, + Description: "Include the associated collections or targeting rules details in the response.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "limit": { + Type: schema.TypeInt, + Optional: true, + Description: "The number of records to retrieve. By default, the list operation return the first 10 records. To retrieve different set of records, use `limit` with `offset` to page through the available records.", + }, + "offset": { + Type: schema.TypeInt, + Optional: true, + Description: "The number of records to skip. By specifying `offset`, you retrieve a subset of items that starts with the `offset` value. Use `offset` with `limit` to page through the available records.", + }, + "features": { + Type: schema.TypeList, + Computed: true, + Description: "Array of Features.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + Description: "Feature name.", + }, + "feature_id": { + Type: schema.TypeString, + Computed: true, + Description: "Feature id.", + }, + "description": { + Type: schema.TypeString, + Computed: true, + Description: "Feature description.", + }, + "type": { + Type: schema.TypeString, + Computed: true, + Description: "Type of the feature (BOOLEAN, STRING, NUMERIC).", + }, + "enabled_value": { + Type: schema.TypeString, + Computed: true, + Description: "Value of the feature when it is enabled. The value can be Boolean, String or a Numeric value as per the `type` attribute.", + }, + "disabled_value": { + Type: schema.TypeString, + Computed: true, + Description: "Value of the feature when it is disabled. The value can be Boolean, String or a Numeric value as per the `type` attribute.", + }, + "enabled": { + Type: schema.TypeBool, + Computed: true, + Description: "The state of the feature flag.", + }, + "tags": { + Type: schema.TypeString, + Computed: true, + Description: "Tags associated with the feature.", + }, + "segment_rules": { + Type: schema.TypeList, + Computed: true, + Description: "Specify the targeting rules that is used to set different feature flag values for different segments.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "rules": { + Type: schema.TypeList, + Computed: true, + Description: "Rules array.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "segments": { + Type: schema.TypeList, + Computed: true, + Description: "List of segment ids that are used for targeting using the rule.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "value": { + Type: schema.TypeString, + Computed: true, + Description: "Value to be used for evaluation for this rule. The value can be Boolean, String or a Numeric value as per the `type` attribute.", + }, + "order": { + Type: schema.TypeInt, + Computed: true, + Description: "Order of the rule, used during evaluation. The evaluation is performed in the order defined and the value associated with the first matching rule is used for evaluation.", + }, + }, + }, + }, + "segment_exists": { + Type: schema.TypeBool, + Computed: true, + Description: "Denotes if the targeting rules are specified for the feature flag.", + }, + "collections": { + Type: schema.TypeList, + Computed: true, + Description: "List of collection id representing the collections that are associated with the specified feature flag.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "collection_id": { + Type: schema.TypeString, + Computed: true, + Description: "Collection id.", + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: "Name of the collection.", + }, + }, + }, + }, + "created_time": { + Type: schema.TypeString, + Computed: true, + Description: "Creation time of the feature flag.", + }, + "updated_time": { + Type: schema.TypeString, + Computed: true, + Description: "Last modified time of the feature flag data.", + }, + "href": { + Type: schema.TypeString, + Computed: true, + Description: "Feature flag URL.", + }, + }, + }, + }, + "total_count": { + Type: schema.TypeInt, + Computed: true, + Description: "Number of records returned in the current response.", + }, + "next": { + Type: schema.TypeList, + Computed: true, + Description: "URL to navigate to the next list of records.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "href": { + Type: schema.TypeString, + Computed: true, + Description: "URL of the response.", + }, + }, + }, + }, + "first": { + Type: schema.TypeList, + Computed: true, + Description: "URL to navigate to the first page of records.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "href": { + Type: schema.TypeString, + Computed: true, + Description: "URL of the response.", + }, + }, + }, + }, + "previous": { + Type: schema.TypeList, + Computed: true, + Description: "URL to navigate to the previous list of records.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "href": { + Type: schema.TypeString, + Computed: true, + Description: "URL of the response.", + }, + }, + }, + }, + "last": { + Type: schema.TypeList, + Computed: true, + Description: "URL to navigate to the last page of records.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "href": { + Type: schema.TypeString, + Computed: true, + Description: "URL of the response.", + }, + }, + }, + }, + }, + } +} + +func dataSourceIbmAppConfigFeaturesRead(d *schema.ResourceData, meta interface{}) error { + guid := d.Get("guid").(string) + + appconfigClient, err := getAppConfigClient(meta, guid) + if err != nil { + return err + } + + options := &appconfigurationv1.ListFeaturesOptions{} + options.SetEnvironmentID(d.Get("environment_id").(string)) + if _, ok := d.GetOk("expand"); ok { + options.SetExpand(d.Get("expand").(bool)) + } + if _, ok := d.GetOk("tags"); ok { + options.SetTags(d.Get("tags").(string)) + } + if _, ok := d.GetOk("collections"); ok { + collections := []string{} + for _, segmentsItem := range d.Get("collections").([]interface{}) { + collections = append(collections, segmentsItem.(string)) + } + options.SetCollections(collections) + } + if _, ok := d.GetOk("segments"); ok { + segments := []string{} + for _, segmentsItem := range d.Get("segments").([]interface{}) { + segments = append(segments, segmentsItem.(string)) + } + options.SetSegments(segments) + } + if _, ok := d.GetOk("includes"); ok { + includes := []string{} + for _, segmentsItem := range d.Get("includes").([]interface{}) { + includes = append(includes, segmentsItem.(string)) + } + options.SetInclude(includes) + } + + var featuresList *appconfigurationv1.FeaturesList + var offset int64 + var limit int64 = 10 + var isLimit bool + finalList := []appconfigurationv1.Feature{} + + if _, ok := d.GetOk("limit"); ok { + isLimit = true + limit = int64(d.Get("limit").(int)) + } + options.SetLimit(limit) + if _, ok := d.GetOk("offset"); ok { + offset = int64(d.Get("offset").(int)) + } + for { + options.Offset = &offset + result, response, err := appconfigClient.ListFeatures(options) + featuresList = result + if err != nil { + log.Printf("[DEBUG] ListFeatures failed %s\n%s", err, response) + return err + } + if isLimit { + offset = 0 + } else { + offset = dataSourceFeaturesListGetNext(result.Next) + } + finalList = append(finalList, result.Features...) + if offset == 0 { + break + } + } + + featuresList.Features = finalList + + d.SetId(fmt.Sprintf("%s/%s", guid, *options.EnvironmentID)) + + if featuresList.Features != nil { + err = d.Set("features", dataSourceFeaturesListFlattenFeatures(featuresList.Features)) + if err != nil { + return fmt.Errorf("error setting features %s", err) + } + } + if featuresList.TotalCount != nil { + if err = d.Set("total_count", featuresList.TotalCount); err != nil { + return fmt.Errorf("error setting total_count: %s", err) + } + } + if featuresList.Limit != nil { + if err = d.Set("limit", featuresList.Limit); err != nil { + return fmt.Errorf("error setting limit: %s", err) + } + } + if featuresList.Offset != nil { + if err = d.Set("offset", featuresList.Offset); err != nil { + return fmt.Errorf("error setting offset: %s", err) + } + } + if featuresList.First != nil { + err = d.Set("first", dataSourceFeatureListFlattenPagination(*featuresList.First)) + if err != nil { + return fmt.Errorf("error setting first %s", err) + } + } + + if featuresList.Previous != nil { + err = d.Set("previous", dataSourceFeatureListFlattenPagination(*featuresList.Previous)) + if err != nil { + return fmt.Errorf("error setting previous %s", err) + } + } + + if featuresList.Last != nil { + err = d.Set("last", dataSourceFeatureListFlattenPagination(*featuresList.Last)) + if err != nil { + return fmt.Errorf("error setting last %s", err) + } + } + if featuresList.Next != nil { + err = d.Set("next", dataSourceFeatureListFlattenPagination(*featuresList.Next)) + if err != nil { + return fmt.Errorf("error setting next %s", err) + } + } + + return nil +} + +func dataSourceFeaturesListFlattenFeatures(result []appconfigurationv1.Feature) (features []map[string]interface{}) { + for _, featuresItem := range result { + features = append(features, dataSourceFeaturesListFeaturesToMap(featuresItem)) + } + + return features +} + +func dataSourceFeaturesListFeaturesToMap(featuresItem appconfigurationv1.Feature) (featuresMap map[string]interface{}) { + featuresMap = map[string]interface{}{} + + if featuresItem.Name != nil { + featuresMap["name"] = featuresItem.Name + } + if featuresItem.FeatureID != nil { + featuresMap["feature_id"] = featuresItem.FeatureID + } + if featuresItem.Description != nil { + featuresMap["description"] = featuresItem.Description + } + if featuresItem.Type != nil { + featuresMap["type"] = featuresItem.Type + } + if featuresItem.Enabled != nil { + featuresMap["enabled"] = featuresItem.Enabled + } + if featuresItem.Tags != nil { + featuresMap["tags"] = featuresItem.Tags + } + if featuresItem.SegmentRules != nil { + segmentRulesList := []map[string]interface{}{} + for _, segmentRulesItem := range featuresItem.SegmentRules { + segmentRulesList = append(segmentRulesList, dataSourceFeaturesListFeaturesSegmentRulesToMap(segmentRulesItem)) + } + featuresMap["segment_rules"] = segmentRulesList + } + if featuresItem.SegmentExists != nil { + featuresMap["segment_exists"] = featuresItem.SegmentExists + } + if featuresItem.Collections != nil { + collectionsList := []map[string]interface{}{} + for _, collectionsItem := range featuresItem.Collections { + collectionsList = append(collectionsList, dataSourceFeaturesListFeaturesCollectionsToMap(collectionsItem)) + } + featuresMap["collections"] = collectionsList + } + if featuresItem.CreatedTime != nil { + featuresMap["created_time"] = featuresItem.CreatedTime.String() + } + if featuresItem.UpdatedTime != nil { + featuresMap["updated_time"] = featuresItem.UpdatedTime.String() + } + if featuresItem.Href != nil { + featuresMap["href"] = featuresItem.Href + } + if featuresItem.EnabledValue != nil { + enabledValue := featuresItem.EnabledValue + + switch enabledValue.(interface{}).(type) { + case string: + featuresMap["enabled_value"] = enabledValue.(string) + case float64: + featuresMap["enabled_value"] = fmt.Sprintf("%v", enabledValue) + case bool: + featuresMap["enabled_value"] = strconv.FormatBool(enabledValue.(bool)) + } + } + + if featuresItem.DisabledValue != nil { + disabledValue := featuresItem.DisabledValue + + switch disabledValue.(interface{}).(type) { + case string: + featuresMap["disabled_value"] = disabledValue.(string) + case float64: + featuresMap["disabled_value"] = fmt.Sprintf("%v", disabledValue) + case bool: + featuresMap["disabled_value"] = strconv.FormatBool(disabledValue.(bool)) + } + } + return featuresMap +} + +func dataSourceFeaturesListFeaturesSegmentRulesToMap(segmentRulesItem appconfigurationv1.SegmentRule) (segmentRulesMap map[string]interface{}) { + segmentRulesMap = map[string]interface{}{} + + if segmentRulesItem.Rules != nil { + rulesList := []map[string]interface{}{} + for _, rulesItem := range segmentRulesItem.Rules { + rulesList = append(rulesList, dataSourceListFeaturesSegmentRulesRulesToMap(rulesItem)) + } + segmentRulesMap["rules"] = rulesList + } + if segmentRulesItem.Value != nil { + segmentValue := segmentRulesItem.Value + switch segmentValue.(interface{}).(type) { + case string: + segmentRulesMap["value"] = segmentValue.(string) + case float64: + segmentRulesMap["value"] = fmt.Sprintf("%v", segmentValue) + case bool: + segmentRulesMap["value"] = strconv.FormatBool(segmentValue.(bool)) + } + } + if segmentRulesItem.Order != nil { + segmentRulesMap["order"] = segmentRulesItem.Order + } + + return segmentRulesMap +} + +func dataSourceFeaturesListFeaturesCollectionsToMap(collectionsItem appconfigurationv1.CollectionRef) (collectionsMap map[string]interface{}) { + collectionsMap = map[string]interface{}{} + + if collectionsItem.CollectionID != nil { + collectionsMap["collection_id"] = collectionsItem.CollectionID + } + if collectionsItem.Name != nil { + collectionsMap["name"] = collectionsItem.Name + } + + return collectionsMap +} + +func dataSourceFeaturesListGetNext(next interface{}) int64 { + if reflect.ValueOf(next).IsNil() { + return 0 + } + + u, err := url.Parse(reflect.ValueOf(next).Elem().FieldByName("Href").Elem().String()) + if err != nil { + return 0 + } + + q := u.Query() + var page string + + if q.Get("start") != "" { + page = q.Get("start") + } else if q.Get("offset") != "" { + page = q.Get("offset") + } + + convertedVal, err := strconv.ParseInt(page, 10, 64) + if err != nil { + return 0 + } + return convertedVal +} + +func dataSourceListFeaturesSegmentRulesRulesToMap(rule appconfigurationv1.TargetSegments) map[string]interface{} { + ruleMap := map[string]interface{}{} + + ruleMap["segments"] = rule.Segments + + return ruleMap +} + +func dataSourceFeatureListFlattenPagination(result appconfigurationv1.PageHrefResponse) (finalList []map[string]interface{}) { + finalList = []map[string]interface{}{} + finalMap := dataSourceFeatureListURLToMap(result) + finalList = append(finalList, finalMap) + + return finalList +} + +func dataSourceFeatureListURLToMap(urlItem appconfigurationv1.PageHrefResponse) (urlMap map[string]interface{}) { + urlMap = map[string]interface{}{} + + if urlItem.Href != nil { + urlMap["href"] = urlItem.Href + } + + return urlMap +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_app_domain_private.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_app_domain_private.go new file mode 100644 index 00000000000..751da80a759 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_app_domain_private.go @@ -0,0 +1,39 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMAppDomainPrivate() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMAppDomainPrivateRead, + + Schema: map[string]*schema.Schema{ + "name": { + Description: "The name of the private domain", + Type: schema.TypeString, + Required: true, + }, + }, + } +} + +func dataSourceIBMAppDomainPrivateRead(d *schema.ResourceData, meta interface{}) error { + cfAPI, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + domainName := d.Get("name").(string) + prdomain, err := cfAPI.PrivateDomains().FindByName(domainName) + if err != nil { + return fmt.Errorf("Error retrieving domain: %s", err) + } + d.SetId(prdomain.GUID) + return nil + +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_app_domain_shared.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_app_domain_shared.go new file mode 100644 index 00000000000..c740848487b --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_app_domain_shared.go @@ -0,0 +1,40 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMAppDomainShared() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMAppDomainSharedRead, + + Schema: map[string]*schema.Schema{ + "name": { + Description: "The name of the shared domain", + Type: schema.TypeString, + Required: true, + ValidateFunc: validateDomainName, + }, + }, + } +} + +func dataSourceIBMAppDomainSharedRead(d *schema.ResourceData, meta interface{}) error { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + domainName := d.Get("name").(string) + shdomain, err := cfClient.SharedDomains().FindByName(domainName) + if err != nil { + return fmt.Errorf("Error retrieving shared domain: %s", err) + } + d.SetId(shdomain.GUID) + return nil + +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_app_route.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_app_route.go new file mode 100644 index 00000000000..32254d1f4d0 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_app_route.go @@ -0,0 +1,90 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + + v2 "github.com/IBM-Cloud/bluemix-go/api/mccp/mccpv2" + "github.com/IBM-Cloud/bluemix-go/helpers" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMAppRoute() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMAppRouteRead, + + Schema: map[string]*schema.Schema{ + "space_guid": { + Description: "The guid of the space", + Type: schema.TypeString, + Required: true, + }, + "domain_guid": { + Description: "The guid of the domain", + Type: schema.TypeString, + Required: true, + }, + "host": { + Description: "The host of the route", + Type: schema.TypeString, + Optional: true, + }, + "path": { + Type: schema.TypeString, + Optional: true, + Description: "The path of the route", + ValidateFunc: validateRoutePath, + }, + "port": { + Type: schema.TypeString, + Optional: true, + Description: "The port of the route", + ValidateFunc: validateRoutePort, + }, + }, + } +} + +func dataSourceIBMAppRouteRead(d *schema.ResourceData, meta interface{}) error { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + spaceAPI := cfClient.Spaces() + spaceGUID := d.Get("space_guid").(string) + domainGUID := d.Get("domain_guid").(string) + + params := v2.RouteFilter{ + DomainGUID: domainGUID, + } + + if host, ok := d.GetOk("host"); ok { + params.Host = helpers.String(host.(string)) + } + + if port, ok := d.GetOk("port"); ok { + params.Port = helpers.Int(port.(int)) + } + + if path, ok := d.GetOk("path"); ok { + params.Path = helpers.String(path.(string)) + } + route, err := spaceAPI.ListRoutes(spaceGUID, params) + if err != nil { + return fmt.Errorf("Error retrieving route: %s", err) + } + if len(route) == 0 { + return fmt.Errorf("No route satifies the given parameters") + } + + if len(route) > 1 { + return fmt.Errorf("More than one route satifies the given parameters") + } + + d.SetId(route[0].GUID) + return nil + +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_certificate_manager_certificate.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_certificate_manager_certificate.go new file mode 100644 index 00000000000..5c46af352fe --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_certificate_manager_certificate.go @@ -0,0 +1,165 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataIBMCertificateManagerCertificate() *schema.Resource { + return &schema.Resource{ + Read: dataIBMCertificateManagerCertificateRead, + Schema: map[string]*schema.Schema{ + "certificate_manager_instance_id": { + Type: schema.TypeString, + Required: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + }, + "certificate_details": { + Type: schema.TypeList, + Computed: true, + Description: "List of certificate", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cert_id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "data": { + Type: schema.TypeMap, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "issuer": { + Type: schema.TypeString, + Computed: true, + }, + "domains": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "begins_on": { + Type: schema.TypeInt, + Computed: true, + }, + "expires_on": { + Type: schema.TypeInt, + Computed: true, + }, + "imported": { + Type: schema.TypeBool, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "has_previous": { + Type: schema.TypeBool, + Computed: true, + }, + "key_algorithm": { + Type: schema.TypeString, + Computed: true, + }, + "algorithm": { + Type: schema.TypeString, + Computed: true, + }, + "issuance_info": { + Type: schema.TypeMap, + Computed: true, + }, + }, + }, + }, + }, + } +} +func dataIBMCertificateManagerCertificateRead(d *schema.ResourceData, meta interface{}) error { + cmService, err := meta.(ClientSession).CertificateManagerAPI() + if err != nil { + return err + } + instanceID := d.Get("certificate_manager_instance_id").(string) + certName := d.Get("name").(string) + + certificateList, err := cmService.Certificate().ListCertificates(instanceID) + if err != nil { + return err + } + record := make([]map[string]interface{}, 0) + for _, cert := range certificateList { + if certName == cert.Name { + certificate := make(map[string]interface{}) + certificatedata, err := cmService.Certificate().GetCertData(cert.ID) + if err != nil { + return err + } + certificate["cert_id"] = certificatedata.ID + certificate["name"] = certificatedata.Name + certificate["domains"] = certificatedata.Domains + certificate["description"] = certificatedata.Description + if certificatedata.Data != nil { + data := map[string]interface{}{ + "content": certificatedata.Data.Content, + } + if certificatedata.Data.Privatekey != "" { + data["priv_key"] = certificatedata.Data.Privatekey + } + if certificatedata.Data.IntermediateCertificate != "" { + data["intermediate"] = certificatedata.Data.IntermediateCertificate + } + certificate["data"] = data + } + if &certificatedata.IssuanceInfo != nil { + issuanceinfo := map[string]interface{}{} + if certificatedata.IssuanceInfo.Status != "" { + issuanceinfo["status"] = certificatedata.IssuanceInfo.Status + } + if certificatedata.IssuanceInfo.Code != "" { + issuanceinfo["code"] = certificatedata.IssuanceInfo.Code + } + if certificatedata.IssuanceInfo.AdditionalInfo != "" { + issuanceinfo["additional_info"] = certificatedata.IssuanceInfo.AdditionalInfo + } + if certificatedata.IssuanceInfo.OrderedOn != 0 { + order := certificatedata.IssuanceInfo.OrderedOn + orderedOn := strconv.FormatInt(order, 10) + issuanceinfo["ordered_on"] = orderedOn + } + certificate["issuance_info"] = issuanceinfo + } + certificate["status"] = certificatedata.Status + certificate["issuer"] = certificatedata.Issuer + certificate["imported"] = certificatedata.Imported + certificate["has_previous"] = certificatedata.HasPrevious + certificate["key_algorithm"] = certificatedata.KeyAlgorithm + certificate["algorithm"] = certificatedata.Algorithm + certificate["begins_on"] = certificatedata.BeginsOn + certificate["expires_on"] = certificatedata.ExpiresOn + + record = append(record, certificate) + d.Set("certificate_details", record) + } + } + d.SetId(fmt.Sprintf("%s:%s", certName, instanceID)) + d.Set("certificate_manager_instance_id", instanceID) + d.Set("name", certName) + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_certificate_manager_certificates.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_certificate_manager_certificates.go new file mode 100644 index 00000000000..6221519f962 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_certificate_manager_certificates.go @@ -0,0 +1,136 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataIBMCertificateManagerCertificates() *schema.Resource { + return &schema.Resource{ + Read: dataIBMCertificateManagerCertificatesRead, + Schema: map[string]*schema.Schema{ + "certificate_manager_instance_id": { + Type: schema.TypeString, + Description: "Certificate Manager Instance ID", + Required: true, + }, + "certificates": { + Type: schema.TypeList, + Computed: true, + Description: "List of certificates", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cert_id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "domains": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "issuer": { + Type: schema.TypeString, + Computed: true, + }, + "begins_on": { + Type: schema.TypeInt, + Computed: true, + }, + "expires_on": { + Type: schema.TypeInt, + Computed: true, + }, + "algorithm": { + Type: schema.TypeString, + Computed: true, + }, + "key_algorithm": { + Type: schema.TypeString, + Computed: true, + }, + "serial_number": { + Type: schema.TypeString, + Computed: true, + }, + "imported": { + Type: schema.TypeBool, + Computed: true, + }, + "has_previous": { + Type: schema.TypeBool, + Computed: true, + }, + "issuance_info": { + Type: schema.TypeMap, + Computed: true, + }, + }, + }, + }, + }, + } +} +func dataIBMCertificateManagerCertificatesRead(d *schema.ResourceData, meta interface{}) error { + cmService, err := meta.(ClientSession).CertificateManagerAPI() + if err != nil { + return err + } + instanceID := d.Get("certificate_manager_instance_id").(string) + result, err := cmService.Certificate().ListCertificates(instanceID) + if err != nil { + return err + } + record := make([]map[string]interface{}, len(result)) + for i, c := range result { + certificate := make(map[string]interface{}) + certificate["cert_id"] = c.ID + certificate["name"] = c.Name + certificate["domains"] = c.Domains + certificate["status"] = c.Status + certificate["issuer"] = c.Issuer + certificate["begins_on"] = c.BeginsOn + certificate["expires_on"] = c.ExpiresOn + certificate["algorithm"] = c.Algorithm + certificate["key_algorithm"] = c.KeyAlgorithm + certificate["serial_number"] = c.SerialNumber + certificate["imported"] = c.Imported + certificate["has_previous"] = c.HasPrevious + if c.IssuanceInfo != nil { + issuanceinfo := make(map[string]interface{}) + if c.IssuanceInfo.Status != "" { + issuanceinfo["status"] = c.IssuanceInfo.Status + } + if c.IssuanceInfo.Code != "" { + issuanceinfo["code"] = c.IssuanceInfo.Code + } + if c.IssuanceInfo.AdditionalInfo != "" { + issuanceinfo["additional_info"] = c.IssuanceInfo.AdditionalInfo + } + if c.IssuanceInfo.OrderedOn != 0 { + order := c.IssuanceInfo.OrderedOn + orderedOn := strconv.FormatInt(order, 10) + issuanceinfo["ordered_on"] = orderedOn + } + certificate["issuance_info"] = issuanceinfo + } + record[i] = certificate + } + d.SetId(instanceID) + d.Set("certificate_manager_instance_id", instanceID) + d.Set("certificates", record) + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cis.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cis.go new file mode 100644 index 00000000000..795646adbd8 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cis.go @@ -0,0 +1,194 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "net/url" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/IBM-Cloud/bluemix-go/api/resource/resourcev2/controllerv2" + "github.com/IBM-Cloud/bluemix-go/models" +) + +func dataSourceIBMCISInstance() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMCISInstanceRead, + + Schema: map[string]*schema.Schema{ + "name": { + Description: "Resource instance name for example, my cis instance", + Type: schema.TypeString, + Required: true, + }, + + "resource_group_id": { + Type: schema.TypeString, + Optional: true, + Description: "The id of the resource group in which the cis instance is present", + }, + + "guid": { + Type: schema.TypeString, + Computed: true, + Description: "Unique identifier of resource instance", + }, + + "location": { + Description: "The location or the environment in which cis instance exists", + Type: schema.TypeString, + Computed: true, + }, + + "service": { + Description: "The name of the Cloud Internet Services offering, 'internet-svcs'", + Type: schema.TypeString, + Computed: true, + }, + + "plan": { + Description: "The plan type of the cis instance", + Type: schema.TypeString, + Computed: true, + }, + + "status": { + Description: "The resource instance status", + Type: schema.TypeString, + Computed: true, + }, + ResourceName: { + Type: schema.TypeString, + Computed: true, + Description: "The name of the resource", + }, + + ResourceCRN: { + Type: schema.TypeString, + Computed: true, + Description: "The crn of the resource", + }, + + ResourceStatus: { + Type: schema.TypeString, + Computed: true, + Description: "The status of the resource", + }, + + ResourceGroupName: { + Type: schema.TypeString, + Computed: true, + Description: "The resource group name in which resource is provisioned", + }, + ResourceControllerURL: { + Type: schema.TypeString, + Computed: true, + Description: "The URL of the IBM Cloud dashboard that can be used to explore and view details about the resource", + }, + }, + } +} + +func dataSourceIBMCISInstanceRead(d *schema.ResourceData, meta interface{}) error { + rsConClient, err := meta.(ClientSession).ResourceControllerAPIV2() + if err != nil { + return err + } + rsAPI := rsConClient.ResourceServiceInstanceV2() + name := d.Get("name").(string) + + rsInstQuery := controllerv2.ServiceInstanceQuery{ + Name: name, + } + + if rsGrpID, ok := d.GetOk("resource_group_id"); ok { + rsInstQuery.ResourceGroupID = rsGrpID.(string) + } else { + defaultRg, err := defaultResourceGroup(meta) + if err != nil { + return err + } + rsInstQuery.ResourceGroupID = defaultRg + } + + rsCatClient, err := meta.(ClientSession).ResourceCatalogAPI() + if err != nil { + return err + } + rsCatRepo := rsCatClient.ResourceCatalog() + + if service, ok := d.GetOk("service"); ok { + + serviceOff, err := rsCatRepo.FindByName(service.(string), true) + if err != nil { + return fmt.Errorf("Error retrieving service offering: %s", err) + } + + rsInstQuery.ServiceID = serviceOff[0].ID + } + + var instances []models.ServiceInstanceV2 + + instances, err = rsAPI.ListInstances(rsInstQuery) + if err != nil { + return err + } + var filteredInstances []models.ServiceInstanceV2 + var location string + + if loc, ok := d.GetOk("location"); ok { + location = loc.(string) + for _, instance := range instances { + if getLocation(instance) == location { + filteredInstances = append(filteredInstances, instance) + } + } + } else { + filteredInstances = instances + } + + if len(filteredInstances) == 0 { + return fmt.Errorf("No resource instance found with name [%s]\nIf not specified please specify more filters like resource_group_id if instance doesn't exists in default group, location or service", name) + } + + var instance models.ServiceInstanceV2 + + if len(filteredInstances) > 1 { + return fmt.Errorf( + "More than one resource instance found with name matching [%s]\nIf not specified please specify more filters like resource_group_id if instance doesn't exists in default group, location or service", name) + } + instance = filteredInstances[0] + + d.SetId(instance.ID) + d.Set("status", instance.State) + d.Set("resource_group_id", instance.ResourceGroupID) + d.Set("location", instance.RegionID) + d.Set("guid", instance.Guid) + serviceOff, err := rsCatRepo.GetServiceName(instance.ServiceID) + if err != nil { + return fmt.Errorf("Error retrieving service offering: %s", err) + } + + d.Set("service", serviceOff) + + servicePlan, err := rsCatRepo.GetServicePlanName(instance.ResourcePlanID) + if err != nil { + return fmt.Errorf("Error retrieving plan: %s", err) + } + d.Set("plan", servicePlan) + + d.Set(ResourceName, instance.Name) + d.Set(ResourceCRN, instance.Crn.String()) + d.Set(ResourceStatus, instance.State) + d.Set(ResourceGroupName, instance.ResourceGroupName) + + rcontroller, err := getBaseController(meta) + if err != nil { + return err + } + d.Set(ResourceControllerURL, rcontroller+"/internet-svcs/"+url.QueryEscape(instance.Crn.String())) + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cis_cache_settings.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cis_cache_settings.go new file mode 100644 index 00000000000..8138dfcffc5 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cis_cache_settings.go @@ -0,0 +1,337 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "log" + "time" + + "github.com/IBM/go-sdk-core/v4/core" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMCISCacheSetting() *schema.Resource { + return &schema.Resource{ + Read: dataSourceCISCacheSettingsRead, + Schema: map[string]*schema.Schema{ + cisID: { + Type: schema.TypeString, + Description: "CIS instance crn", + Required: true, + }, + cisDomainID: { + Type: schema.TypeString, + Description: "Associated CIS domain", + Required: true, + DiffSuppressFunc: suppressDomainIDDiff, + }, + cisCacheSettingsCachingLevel: { + Type: schema.TypeList, + Computed: true, + Description: "Cache Level Setting", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + Description: "cache level id", + }, + "value": { + Type: schema.TypeString, + Computed: true, + Description: "cache level value", + }, + "editable": { + Type: schema.TypeBool, + Computed: true, + Description: "cache level editable", + }, + "modified_on": { + Type: schema.TypeString, + Computed: true, + Description: "cache level modified on", + }, + }, + }, + }, + cisCacheServeStaleContent: { + Type: schema.TypeList, + Computed: true, + Description: "Serve Stale Content ", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + Description: "serve stale content id ", + }, + "value": { + Type: schema.TypeString, + Computed: true, + Description: "serve stale content value ", + }, + "editable": { + Type: schema.TypeBool, + Computed: true, + Description: "serve stale content editable ", + }, + "modified_on": { + Type: schema.TypeString, + Computed: true, + Description: "serve stale content modified on ", + }, + }, + }, + }, + cisCacheSettingsBrowserExpiration: { + Type: schema.TypeList, + Computed: true, + Description: "Browser Expiration setting", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + Description: "browser expiration id", + }, + "value": { + Type: schema.TypeInt, + Computed: true, + Description: "browser expiration value", + }, + "editable": { + Type: schema.TypeBool, + Computed: true, + Description: "browser expiration editable", + }, + "modified_on": { + Type: schema.TypeString, + Computed: true, + Description: "browser expiration modified on", + }, + }, + }, + }, + cisCacheSettingsDevelopmentMode: { + Type: schema.TypeList, + Computed: true, + Description: "Development mode setting", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + Description: "development mode id", + }, + "value": { + Type: schema.TypeString, + Computed: true, + Description: "development mode value", + }, + "editable": { + Type: schema.TypeBool, + Computed: true, + Description: "development mode editable", + }, + "modified_on": { + Type: schema.TypeString, + Computed: true, + Description: "development mode modified on", + }, + }, + }, + }, + cisCacheSettingsQueryStringSort: { + Type: schema.TypeList, + Computed: true, + Description: "Query String sort setting", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + Description: "query string sort id", + }, + "value": { + Type: schema.TypeString, + Computed: true, + Description: "query qtring sort value", + }, + "editable": { + Type: schema.TypeBool, + Computed: true, + Description: "query string sort editable", + }, + "modified_on": { + Type: schema.TypeString, + Computed: true, + Description: "query string sort modified on", + }, + }, + }, + }, + }, + } +} +func dataSourceCISCacheSettingsRead(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisCacheClientSession() + if err != nil { + return err + } + + crn := d.Get(cisID).(string) + zoneID, _, _ := convertTftoCisTwoVar(d.Get(cisDomainID).(string)) + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneID = core.StringPtr(zoneID) + + // Cache Level Setting + cacheLevel_result, resp, err := cisClient.GetCacheLevel(cisClient.NewGetCacheLevelOptions()) + + if err != nil { + log.Printf("Get Cache Level setting failed : %v\n", resp) + return err + } + if cacheLevel_result != nil || cacheLevel_result.Result != nil { + + cacheLevels := make([]map[string]interface{}, 0) + cacheLevel := make(map[string]interface{}) + + if cacheLevel_result.Result.ID != nil { + cacheLevel["id"] = cacheLevel_result.Result.ID + } + if cacheLevel_result.Result.Value != nil { + cacheLevel["value"] = cacheLevel_result.Result.Value + } + if cacheLevel_result.Result.Editable != nil { + cacheLevel["editable"] = cacheLevel_result.Result.Editable + } + if cacheLevel_result.Result.ModifiedOn != nil { + cacheLevel["modified_on"] = cacheLevel_result.Result.ModifiedOn + } + cacheLevels = append(cacheLevels, cacheLevel) + d.Set(cisCacheSettingsCachingLevel, cacheLevels) + + } + // Serve Stale Content setting + servestaleContent_result, resp, err := cisClient.GetServeStaleContent(cisClient.NewGetServeStaleContentOptions()) + + if err != nil { + log.Printf("Get Serve Stale Content setting failed : %v\n", resp) + return err + } + if servestaleContent_result != nil || servestaleContent_result.Result != nil { + + servestalecontents := make([]map[string]interface{}, 0) + servestalecontent := make(map[string]interface{}) + + if servestaleContent_result.Result.ID != nil { + servestalecontent["id"] = servestaleContent_result.Result.ID + } + if servestaleContent_result.Result.Value != nil { + servestalecontent["value"] = servestaleContent_result.Result.Value + } + if servestaleContent_result.Result.Editable != nil { + servestalecontent["editable"] = servestaleContent_result.Result.Editable + } + if servestaleContent_result.Result.ModifiedOn != nil { + servestalecontent["modified_on"] = servestaleContent_result.Result.ModifiedOn + } + servestalecontents = append(servestalecontents, servestalecontent) + d.Set(cisCacheServeStaleContent, servestalecontents) + + } + + // Browser Expiration setting + browserCacheTTL_result, resp, err := cisClient.GetBrowserCacheTTL(cisClient.NewGetBrowserCacheTtlOptions()) + + if err != nil { + log.Printf("Get browser expiration setting failed : %v\n", resp) + return err + } + if browserCacheTTL_result != nil || browserCacheTTL_result.Result != nil { + + browserCacheTTLs := make([]map[string]interface{}, 0) + browserCacheTTL := make(map[string]interface{}) + + if browserCacheTTL_result.Result.ID != nil { + browserCacheTTL["id"] = browserCacheTTL_result.Result.ID + } + if browserCacheTTL_result.Result.Value != nil { + browserCacheTTL["value"] = browserCacheTTL_result.Result.Value + } + if browserCacheTTL_result.Result.Editable != nil { + browserCacheTTL["editable"] = browserCacheTTL_result.Result.Editable + } + if browserCacheTTL_result.Result.ModifiedOn != nil { + browserCacheTTL["modified_on"] = browserCacheTTL_result.Result.ModifiedOn + } + browserCacheTTLs = append(browserCacheTTLs, browserCacheTTL) + d.Set(cisCacheSettingsBrowserExpiration, browserCacheTTLs) + + } + // development mode setting + devMode_result, resp, err := cisClient.GetDevelopmentMode(cisClient.NewGetDevelopmentModeOptions()) + + if err != nil { + log.Printf("Get development mode setting failed : %v", resp) + return err + } + if devMode_result != nil || devMode_result.Result != nil { + + devModes := make([]map[string]interface{}, 0) + devMode := make(map[string]interface{}) + + if devMode_result.Result.ID != nil { + devMode["id"] = devMode_result.Result.ID + } + if devMode_result.Result.Value != nil { + devMode["value"] = devMode_result.Result.Value + } + if devMode_result.Result.Editable != nil { + devMode["editable"] = devMode_result.Result.Editable + } + if devMode_result.Result.ModifiedOn != nil { + devMode["modified_on"] = devMode_result.Result.ModifiedOn + } + devModes = append(devModes, devMode) + d.Set(cisCacheSettingsDevelopmentMode, devModes) + + } + + // Query string sort setting + queryStringSort_result, resp, err := cisClient.GetQueryStringSort(cisClient.NewGetQueryStringSortOptions()) + + if err != nil { + log.Printf("Get query string sort setting failed : %v", resp) + return err + } + if queryStringSort_result != nil || queryStringSort_result.Result != nil { + + queryStringSorts := make([]map[string]interface{}, 0) + queryStringSort := make(map[string]interface{}) + + if queryStringSort_result.Result.ID != nil { + queryStringSort["id"] = queryStringSort_result.Result.ID + } + if queryStringSort_result.Result.Value != nil { + queryStringSort["value"] = queryStringSort_result.Result.Value + } + if queryStringSort_result.Result.Editable != nil { + queryStringSort["editable"] = queryStringSort_result.Result.Editable + } + if queryStringSort_result.Result.ModifiedOn != nil { + queryStringSort["modified_on"] = queryStringSort_result.Result.ModifiedOn + } + queryStringSorts = append(queryStringSorts, queryStringSort) + d.Set(cisCacheSettingsQueryStringSort, queryStringSorts) + + } + d.SetId(dataSourceIBMCISCacheSettingID(d)) + d.Set(cisID, crn) + d.Set(cisDomainID, zoneID) + return nil +} +func dataSourceIBMCISCacheSettingID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cis_certificates.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cis_certificates.go new file mode 100644 index 00000000000..80bcf8565ef --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cis_certificates.go @@ -0,0 +1,169 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "strings" + "time" + + "github.com/IBM/go-sdk-core/v4/core" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + cisCertificates = "certificates" + cisCertificatesCertificates = "certificates" + cisCertificatesCertificatesID = "id" + cisCertificatesCertificatesHosts = "hosts" + cisCertificatesCertificatesStatus = "status" + cisCertificatesPrimaryCertificate = "primary_certificate" + cisCertificatesType = "type" + cisCertificateTypeDedicated = "dedicated" +) + +func dataIBMCISCertificates() *schema.Resource { + return &schema.Resource{ + Read: dataIBMCISCertificatesRead, + Schema: map[string]*schema.Schema{ + cisID: { + Type: schema.TypeString, + Description: "CIS object id", + Required: true, + }, + cisDomainID: { + Type: schema.TypeString, + Description: "Associated CIS domain", + Required: true, + DiffSuppressFunc: suppressDomainIDDiff, + }, + cisCertificates: { + Type: schema.TypeList, + Computed: true, + Description: "Collection of certificates", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + Description: "certificate identifier", + }, + cisCertificateOrderID: { + Type: schema.TypeString, + Computed: true, + Description: "certificate id", + }, + cisCertificatesType: { + Type: schema.TypeString, + Computed: true, + Description: "certificate type", + }, + cisCertificateOrderHosts: { + Type: schema.TypeList, + Description: "Hosts which certificate need to be ordered", + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + cisCertificateOrderStatus: { + Type: schema.TypeString, + Description: "certificate status", + Computed: true, + }, + cisCertificatesPrimaryCertificate: { + Type: schema.TypeString, + Description: "Primary certificate id", + Computed: true, + }, + cisCertificatesCertificates: { + Type: schema.TypeList, + Computed: true, + Description: "Collection of certificates associated with this certificates", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + cisCertificatesCertificatesID: { + Type: schema.TypeString, + Description: "certificate id", + Computed: true, + }, + cisCertificatesCertificatesHosts: { + Type: schema.TypeList, + Description: "Hosts which certificates are ordered", + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + cisCertificatesCertificatesStatus: { + Type: schema.TypeString, + Description: "certificate status", + Computed: true, + }, + }, + }, + }, + }, + }, + }, + }, + } +} +func dataIBMCISCertificatesRead(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisSSLClientSession() + if err != nil { + return err + } + crn := d.Get(cisID).(string) + zoneID, _, _ := convertTftoCisTwoVar(d.Get(cisDomainID).(string)) + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + opt := cisClient.NewListCertificatesOptions() + result, response, err := cisClient.ListCertificates(opt) + if err != nil { + log.Printf("List all certificates failed: %v", response) + return err + } + certificatesList := make([]interface{}, 0) + for _, instance := range result.Result { + certificate := map[string]interface{}{} + certificate["id"] = convertCisToTfThreeVar(*instance.ID, zoneID, crn) + certificate[cisCertificateOrderID] = *instance.ID + certificate[cisCertificateOrderStatus] = *instance.Status + if instance.PrimaryCertificate != nil { + certificate[cisCertificatesPrimaryCertificate] = + convertCISCertificatesObj(*instance.Type, instance.PrimaryCertificate) + } + certificate[cisCertificateOrderHosts] = flattenStringList(instance.Hosts) + + certs := []interface{}{} + for _, i := range instance.Certificates { + cert := map[string]interface{}{} + if i.ID != nil { + cert[cisCertificatesCertificatesID] = convertCISCertificatesObj(*instance.Type, i.ID) + } + cert[cisCertificatesCertificatesStatus] = *i.Status + cert[cisCertificatesCertificatesHosts] = flattenStringList(i.Hosts) + certs = append(certs, cert) + } + certificate[cisCertificatesType] = *instance.Type + certificate[cisCertificatesCertificates] = certs + certificatesList = append(certificatesList, certificate) + } + d.SetId(dataSourceIBMCISCertificatesID(d)) + d.Set(cisID, crn) + d.Set(cisDomainID, zoneID) + d.Set(cisCertificates, certificatesList) + return nil +} + +func dataSourceIBMCISCertificatesID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} + +func convertCISCertificatesObj(certType string, obj interface{}) (result string) { + if certType == cisCertificateTypeDedicated { + result = strings.TrimSpace(fmt.Sprintf("%32.f", obj)) + } else { + result = fmt.Sprint(obj) + } + return result +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cis_custom_certificates.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cis_custom_certificates.go new file mode 100644 index 00000000000..296e5c182ae --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cis_custom_certificates.go @@ -0,0 +1,138 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "time" + + "github.com/IBM/go-sdk-core/v4/core" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + cisCustomCertificates = "custom_certificates" +) + +func dataSourceIBMCISCustomCertificates() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMCISCustomCertificatesRead, + Schema: map[string]*schema.Schema{ + cisID: { + Type: schema.TypeString, + Required: true, + }, + cisDomainID: { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: suppressDomainIDDiff, + }, + cisCustomCertificates: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + cisCertificateUploadCustomCertID: { + Type: schema.TypeString, + Computed: true, + }, + cisCertificateUploadBundleMethod: { + Type: schema.TypeString, + Description: "Certificate bundle method", + Computed: true, + }, + cisCertificateUploadHosts: { + Type: schema.TypeList, + Computed: true, + Description: "hosts which the certificate uploaded to", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + cisPageRulePriority: { + Type: schema.TypeInt, + Description: "Certificate priority", + Computed: true, + }, + cisCertificateUploadStatus: { + Type: schema.TypeString, + Description: "certificate status", + Computed: true, + }, + cisCertificateUploadIssuer: { + Type: schema.TypeString, + Description: "certificate issuer", + Computed: true, + }, + cisCertificateUploadSignature: { + Type: schema.TypeString, + Description: "certificate signature", + Computed: true, + }, + cisCertificateUploadUploadedOn: { + Type: schema.TypeString, + Description: "certificate uploaded date", + Computed: true, + }, + cisCertificateUploadModifiedOn: { + Type: schema.TypeString, + Description: "certificate modified date", + Computed: true, + }, + cisCertificateUploadExpiresOn: { + Type: schema.TypeString, + Description: "certificate expires date", + Computed: true, + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMCISCustomCertificatesRead(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisSSLClientSession() + if err != nil { + return err + } + crn := d.Get(cisID).(string) + zoneID, _, err := convertTftoCisTwoVar(d.Get(cisDomainID).(string)) + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + opt := cisClient.NewListCustomCertificatesOptions() + result, resp, err := cisClient.ListCustomCertificates(opt) + if err != nil { + return fmt.Errorf("Failed to list custom certificates: %v", resp) + } + certsList := make([]map[string]interface{}, 0) + for _, r := range result.Result { + cert := map[string]interface{}{} + cert["id"] = convertCisToTfThreeVar(*r.ID, zoneID, crn) + cert[cisCertificateUploadCustomCertID] = *r.ID + cert[cisCertificateUploadBundleMethod] = *r.BundleMethod + cert[cisCertificateUploadHosts] = flattenStringList(r.Hosts) + cert[cisCertificateUploadIssuer] = *r.Issuer + cert[cisCertificateUploadSignature] = *r.Signature + cert[cisCertificateUploadStatus] = *r.Status + cert[cisCertificateUploadPriority] = *r.Priority + cert[cisCertificateUploadUploadedOn] = *r.UploadedOn + cert[cisCertificateUploadModifiedOn] = *r.ModifiedOn + cert[cisCertificateUploadExpiresOn] = *r.ExpiresOn + certsList = append(certsList, cert) + } + d.SetId(dataSourceIBMCISCustomCertificatesID(d)) + d.Set(cisID, crn) + d.Set(cisDomainID, zoneID) + d.Set(cisCustomCertificates, certsList) + return nil +} + +func dataSourceIBMCISCustomCertificatesID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cis_custom_pages.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cis_custom_pages.go new file mode 100644 index 00000000000..0d9957722e6 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cis_custom_pages.go @@ -0,0 +1,133 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "log" + "time" + + "github.com/IBM/go-sdk-core/v4/core" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + cisCustomPages = "cis_custom_pages" +) + +func dataSourceIBMCISCustomPages() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMCISCustomPagesRead, + Importer: &schema.ResourceImporter{}, + Timeouts: &schema.ResourceTimeout{ + Read: schema.DefaultTimeout(10 * time.Minute), + }, + Schema: map[string]*schema.Schema{ + cisID: { + Type: schema.TypeString, + Required: true, + }, + cisDomainID: { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: suppressDomainIDDiff, + }, + cisCustomPages: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + cisCustomPageIdentifier: { + Type: schema.TypeString, + Description: "Custom page identifier", + Computed: true, + }, + cisCustomPageURL: { + Type: schema.TypeString, + Description: "Custom page url", + Computed: true, + }, + cisCustomPageState: { + Type: schema.TypeString, + Description: "Custom page state", + Computed: true, + }, + cisCustomPageDesc: { + Type: schema.TypeString, + Description: "Free text", + Computed: true, + }, + cisCustomPageRequiredTokens: { + Type: schema.TypeList, + Description: "Custom page state", + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + cisCustomPagePreviewTarget: { + Type: schema.TypeString, + Description: "Custom page preview target", + Computed: true, + }, + cisCustomPageCreatedOn: { + Type: schema.TypeString, + Description: "Custom page created date", + Computed: true, + }, + cisCustomPageModifiedOn: { + Type: schema.TypeString, + Description: "Custom page modified date", + Computed: true, + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMCISCustomPagesRead(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisCustomPageClientSession() + if err != nil { + return err + } + crn := d.Get(cisID).(string) + zoneID := d.Get(cisDomainID).(string) + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + opt := cisClient.NewListZoneCustomPagesOptions() + + result, response, err := cisClient.ListZoneCustomPages(opt) + if err != nil { + log.Printf("List custom pages failed: %v", response) + return err + } + customPagesOutput := make([]map[string]interface{}, 0) + for _, instance := range result.Result { + customPage := make(map[string]interface{}, 0) + customPage[cisCustomPageIdentifier] = *instance.ID + customPage[cisCustomPageState] = *instance.State + customPage[cisCustomPageDesc] = *instance.Description + customPage[cisCustomPagePreviewTarget] = *instance.PreviewTarget + customPage[cisCustomPageRequiredTokens] = flattenStringList(instance.RequiredTokens) + if instance.CreatedOn != nil { + customPage[cisCustomPageCreatedOn] = (*instance.CreatedOn).String() + } + if instance.ModifiedOn != nil { + customPage[cisCustomPageModifiedOn] = (*instance.ModifiedOn).String() + } + if instance.URL != nil { + customPage[cisCustomPageURL] = *instance.URL + } + + customPagesOutput = append(customPagesOutput, customPage) + } + d.SetId(dataSourceIBMCISCustomPageID(d)) + d.Set(cisCustomPages, customPagesOutput) + return nil +} + +func dataSourceIBMCISCustomPageID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cis_dns_records.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cis_dns_records.go new file mode 100644 index 00000000000..e8026b812fb --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cis_dns_records.go @@ -0,0 +1,218 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "io/ioutil" + "log" + "os" + "time" + + "github.com/IBM/go-sdk-core/v4/core" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + cisDNSRecords = "cis_dns_records" + cisDNSRecordsExportFile = "file" +) + +func dataSourceIBMCISDNSRecords() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMCISDNSRecordsRead, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Read: schema.DefaultTimeout(10 * time.Minute), + }, + Schema: map[string]*schema.Schema{ + cisID: { + Type: schema.TypeString, + Required: true, + Description: "DNS Zone CRN", + }, + cisDomainID: { + Type: schema.TypeString, + Required: true, + Description: "Zone Id", + DiffSuppressFunc: suppressDomainIDDiff, + }, + cisDNSRecordsExportFile: { + Type: schema.TypeString, + Optional: true, + Description: "file to be exported", + }, + + cisDNSRecords: { + Type: schema.TypeList, + Description: "Collection of dns resource records", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + Description: "DNS record id", + }, + cisDNSRecordID: { + Type: schema.TypeString, + Computed: true, + Description: "DNS record id", + }, + cisZoneName: { + Type: schema.TypeString, + Computed: true, + Description: "DNS Record Name", + }, + cisDNSRecordName: { + Type: schema.TypeString, + Computed: true, + Description: "DNS Record Name", + }, + cisDNSRecordCreatedOn: { + Type: schema.TypeString, + Computed: true, + Description: "DNS Record created on", + }, + cisDNSRecordModifiedOn: { + Type: schema.TypeString, + Computed: true, + Description: "DNS Record modified on", + }, + cisDNSRecordType: { + Type: schema.TypeString, + Computed: true, + Description: "DNS Record Type", + }, + cisDNSRecordContent: { + Type: schema.TypeString, + Computed: true, + Description: "DNS Record conent info", + }, + cisDNSRecordPriority: { + Type: schema.TypeInt, + Computed: true, + Description: "DNS Record MX priority", + }, + cisDNSRecordProxiable: { + Type: schema.TypeBool, + Computed: true, + Description: "DNS Record proxiable", + }, + cisDNSRecordProxied: { + Type: schema.TypeBool, + Computed: true, + Description: "DNS Record proxied", + }, + cisDNSRecordTTL: { + Type: schema.TypeInt, + Computed: true, + Description: "DNS Record Time To Live", + }, + cisDNSRecordData: { + Type: schema.TypeMap, + Computed: true, + Description: "DNS Record Data", + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMCISDNSRecordsRead(d *schema.ResourceData, meta interface{}) error { + var ( + crn string + zoneID string + records []map[string]interface{} + ) + sess, err := meta.(ClientSession).CisDNSRecordClientSession() + if err != nil { + return err + } + + // session options + crn = d.Get(cisID).(string) + zoneID, _, _ = convertTftoCisTwoVar(d.Get(cisDomainID).(string)) + sess.Crn = core.StringPtr(crn) + sess.ZoneIdentifier = core.StringPtr(zoneID) + + if file, ok := d.GetOk(cisDNSRecordsExportFile); ok { + sess, err := meta.(ClientSession).CisDNSRecordBulkClientSession() + if err != nil { + return err + } + sess.Crn = core.StringPtr(crn) + sess.ZoneIdentifier = core.StringPtr(zoneID) + opt := sess.NewGetDnsRecordsBulkOptions() + result, response, err := sess.GetDnsRecordsBulk(opt) + if err != nil { + log.Printf("Error exporting dns records: %s", response) + return err + } + buf, err := ioutil.ReadAll(result) + if err != nil { + log.Printf("Error while reading io reader") + return err + } + + f, err := os.Create(file.(string)) + if err != nil { + log.Printf("Error opening file: %v", err) + return err + } + defer f.Close() + f.Write(buf) + d.Set(cisDNSRecordsExportFile, file) + } + + opt := sess.NewListAllDnsRecordsOptions() + opt.SetPage(1) + opt.SetPerPage(1000) + result, response, err := sess.ListAllDnsRecords(opt) + if err != nil { + log.Printf("Error reading dns records: %s", response) + return err + } + + records = make([]map[string]interface{}, 0) + for _, instance := range result.Result { + record := map[string]interface{}{} + record["id"] = convertCisToTfThreeVar(*instance.ID, zoneID, crn) + record[cisDNSRecordID] = *instance.ID + record[cisZoneName] = *instance.ZoneName + record[cisDNSRecordCreatedOn] = *instance.CreatedOn + record[cisDNSRecordModifiedOn] = *instance.ModifiedOn + record[cisDNSRecordName] = *instance.Name + record[cisDNSRecordType] = *instance.Type + if instance.Priority != nil { + record[cisDNSRecordPriority] = *instance.Priority + } + if instance.Content != nil { + record[cisDNSRecordContent] = *instance.Content + } + record[cisDNSRecordProxiable] = *instance.Proxiable + record[cisDNSRecordProxied] = *instance.Proxied + record[cisDNSRecordTTL] = *instance.TTL + if instance.Data != nil { + d.Set(cisDNSRecordData, flattenData(instance.Data, *instance.ZoneName)) + } + + records = append(records, record) + } + d.SetId(dataSourceIBMCISDNSRecordID(d)) + d.Set(cisID, crn) + d.Set(cisDomainID, zoneID) + d.Set(cisDNSRecords, records) + return nil +} + +// dataSourceIBMCISDNSRecordID returns a reasonable ID for dns zones list. +func dataSourceIBMCISDNSRecordID(d *schema.ResourceData) string { + zoneID := d.Get(cisDomainID) + crn := d.Get(cisID) + return fmt.Sprintf("%s:%s", zoneID, crn) +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cis_domain.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cis_domain.go new file mode 100644 index 00000000000..b37edf1132e --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cis_domain.go @@ -0,0 +1,94 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + + "github.com/IBM/go-sdk-core/v4/core" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMCISDomain() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMCISDomainRead, + + Schema: map[string]*schema.Schema{ + cisID: { + Type: schema.TypeString, + Description: "CIS object id", + Required: true, + }, + cisDomain: { + Type: schema.TypeString, + Description: "CISzone - Domain", + Required: true, + }, + cisDomainPaused: { + Type: schema.TypeBool, + Computed: true, + }, + cisDomainStatus: { + Type: schema.TypeString, + Computed: true, + }, + cisDomainNameServers: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + cisDomainOriginalNameServers: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + cisDomainID: { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceIBMCISDomainRead(d *schema.ResourceData, meta interface{}) error { + var zoneFound bool + cisClient, err := meta.(ClientSession).CisZonesV1ClientSession() + if err != nil { + return err + } + + crn := d.Get(cisID).(string) + cisClient.Crn = core.StringPtr(crn) + zoneName := d.Get(cisDomain).(string) + + opt := cisClient.NewListZonesOptions() + opt.SetPage(1) // list all zones in one page + opt.SetPerPage(1000) // maximum allowed limit is 1000 per page + zones, resp, err := cisClient.ListZones(opt) + if err != nil { + log.Printf("dataSourcCISdomainRead - ListZones Failed %s\n", resp) + return err + } + + for _, zone := range zones.Result { + if *zone.Name == zoneName { + d.SetId(convertCisToTfTwoVar(*zone.ID, crn)) + d.Set(cisID, crn) + d.Set(cisDomain, *zone.Name) + d.Set(cisDomainStatus, *zone.Status) + d.Set(cisDomainPaused, *zone.Paused) + d.Set(cisDomainNameServers, zone.NameServers) + d.Set(cisDomainOriginalNameServers, zone.OriginalNameServers) + d.Set(cisDomainID, *zone.ID) + zoneFound = true + } + } + + if zoneFound == false { + return fmt.Errorf("Given zone does not exist. Please specify correct domain") + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cis_edge_functions_actions.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cis_edge_functions_actions.go new file mode 100644 index 00000000000..16a8b809284 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cis_edge_functions_actions.go @@ -0,0 +1,154 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "time" + + "github.com/IBM/go-sdk-core/v4/core" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + cisEdgeFunctionsActions = "cis_edge_functions_actions" + cisEdgeFunctionsActionEtag = "etag" + cisEdgeFunctionsActionHandlers = "handlers" + cisEdgeFunctionsActionRoutes = "routes" + cisEdgeFunctionsActionTriggerID = "trigger_id" + cisEdgeFunctionsActionRoutePattern = "pattern_url" + cisEdgeFunctionsActionRouteActionName = "action_name" + cisEdgeFunctionsActionRouteRequestLimitFailOpen = "request_limit_fail_open" + cisEdgeFunctionsActionCreatedOn = "created_on" + cisEdgeFunctionsActionModifiedOn = "modified_on" +) + +func dataSourceIBMCISEdgeFunctionsActions() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMCISEdgeFunctionsActionsRead, + Schema: map[string]*schema.Schema{ + cisID: { + Type: schema.TypeString, + Required: true, + Description: "CIS Intance CRN", + }, + cisDomainID: { + Type: schema.TypeString, + Required: true, + Description: "CIS Domain ID", + DiffSuppressFunc: suppressDataDiff, + }, + cisEdgeFunctionsActions: { + Type: schema.TypeList, + Description: "List of edge functions actions", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + cisEdgeFunctionsActionEtag: { + Type: schema.TypeString, + Computed: true, + Description: "Edge function action etag", + }, + cisEdgeFunctionsActionHandlers: { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "Edge function action handlers", + }, + cisEdgeFunctionsActionCreatedOn: { + Type: schema.TypeString, + Computed: true, + Description: "Edge function action script created on", + }, + cisEdgeFunctionsActionModifiedOn: { + Type: schema.TypeString, + Computed: true, + Description: "Edge function action script modified on", + }, + cisEdgeFunctionsActionRoutes: { + Type: schema.TypeList, + Description: "List of edge function action routes", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + cisEdgeFunctionsActionTriggerID: { + Type: schema.TypeString, + Computed: true, + Description: "Edge function action script identifier", + }, + cisEdgeFunctionsActionRouteActionName: { + Type: schema.TypeString, + Computed: true, + Description: "Edge function action script name", + }, + cisEdgeFunctionsActionRoutePattern: { + Type: schema.TypeString, + Computed: true, + Description: "Edge function action pattern", + }, + cisEdgeFunctionsActionRouteRequestLimitFailOpen: { + Type: schema.TypeBool, + Computed: true, + Description: "Edge function action script request limit fail open", + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMCISEdgeFunctionsActionsRead(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisEdgeFunctionClientSession() + if err != nil { + return err + } + crn := d.Get(cisID).(string) + zoneID, _, err := convertTftoCisTwoVar(d.Get(cisDomainID).(string)) + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + + opt := cisClient.NewListEdgeFunctionsActionsOptions() + result, _, err := cisClient.ListEdgeFunctionsActions(opt) + if err != nil { + return fmt.Errorf("Error: %v", err) + } + scriptInfo := make([]map[string]interface{}, 0) + for _, script := range result.Result { + routes := make([]map[string]interface{}, 0) + for _, route := range script.Routes { + r := map[string]interface{}{ + cisEdgeFunctionsActionTriggerID: *route.ID, + cisEdgeFunctionsActionRoutePattern: *route.Pattern, + cisEdgeFunctionsActionRouteActionName: *route.Script, + cisEdgeFunctionsActionRouteRequestLimitFailOpen: *route.RequestLimitFailOpen, + } + routes = append(routes, r) + } + handlers := make([]string, 0) + for _, h := range script.Handlers { + handlers = append(handlers, h) + } + l := map[string]interface{}{ + cisEdgeFunctionsActionEtag: *script.Etag, + cisEdgeFunctionsActionHandlers: handlers, + cisEdgeFunctionsActionCreatedOn: (*script.CreatedOn).String(), + cisEdgeFunctionsActionModifiedOn: (*script.ModifiedOn).String(), + cisEdgeFunctionsActionRoutes: routes, + } + scriptInfo = append(scriptInfo, l) + } + d.SetId(dataSourceIBMCISEdgeFunctionsActionsID(d)) + d.Set(cisID, crn) + d.Set(cisDomainID, zoneID) + d.Set(cisEdgeFunctionsActions, scriptInfo) + return nil +} + +func dataSourceIBMCISEdgeFunctionsActionsID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cis_edge_functions_triggers.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cis_edge_functions_triggers.go new file mode 100644 index 00000000000..c203b597895 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cis_edge_functions_triggers.go @@ -0,0 +1,105 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "time" + + "github.com/IBM/go-sdk-core/v4/core" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const cisEdgeFunctionsTriggers = "cis_edge_functions_triggers" + +func dataSourceIBMCISEdgeFunctionsTriggers() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMCISEdgeFunctionsTriggerRead, + Schema: map[string]*schema.Schema{ + cisID: { + Type: schema.TypeString, + Required: true, + Description: "CIS Intance CRN", + }, + cisDomainID: { + Type: schema.TypeString, + Required: true, + Description: "CIS Domain ID", + DiffSuppressFunc: suppressDataDiff, + }, + cisEdgeFunctionsTriggers: { + Type: schema.TypeList, + Description: "List of edge functions triggers", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + Description: "Edge function trigger id", + }, + cisEdgeFunctionsTriggerID: { + Type: schema.TypeString, + Computed: true, + Description: "Edge function trigger route id", + }, + cisEdgeFunctionsTriggerPattern: { + Type: schema.TypeString, + Computed: true, + Description: "Edge function trigger pattern", + }, + cisEdgeFunctionsTriggerActionName: { + Type: schema.TypeString, + Computed: true, + Description: "Edge function trigger script name", + }, + cisEdgeFunctionsTriggerRequestLimitFailOpen: { + Type: schema.TypeBool, + Computed: true, + Description: "Edge function trigger request limit fail open", + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMCISEdgeFunctionsTriggerRead(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisEdgeFunctionClientSession() + if err != nil { + return err + } + crn := d.Get(cisID).(string) + zoneID, _, err := convertTftoCisTwoVar(d.Get(cisDomainID).(string)) + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + + opt := cisClient.NewListEdgeFunctionsTriggersOptions() + result, _, err := cisClient.ListEdgeFunctionsTriggers(opt) + if err != nil { + return fmt.Errorf("Error listing edge functions triggers: %v", err) + } + triggerInfo := make([]map[string]interface{}, 0) + for _, trigger := range result.Result { + l := map[string]interface{}{} + l["id"] = convertCisToTfThreeVar(*trigger.ID, zoneID, crn) + l[cisEdgeFunctionsTriggerID] = *trigger.ID + l[cisEdgeFunctionsTriggerPattern] = *trigger.Pattern + l[cisEdgeFunctionsTriggerRequestLimitFailOpen] = *trigger.RequestLimitFailOpen + if trigger.Script != nil { + l[cisEdgeFunctionsTriggerActionName] = *trigger.Script + } + triggerInfo = append(triggerInfo, l) + } + d.SetId(dataSourceIBMCISEdgeFunctionsTriggersID(d)) + d.Set(cisID, crn) + d.Set(cisDomainID, zoneID) + d.Set(cisEdgeFunctionsTriggers, triggerInfo) + return nil +} + +func dataSourceIBMCISEdgeFunctionsTriggersID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cis_firewall.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cis_firewall.go new file mode 100644 index 00000000000..c26b994498d --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cis_firewall.go @@ -0,0 +1,291 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "log" + "time" + + "github.com/IBM/go-sdk-core/v4/core" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataIBMCISFirewallsRecord() *schema.Resource { + return &schema.Resource{ + Read: dataIBMCISFirewallRecordRead, + Schema: map[string]*schema.Schema{ + cisID: { + Type: schema.TypeString, + Description: "CIS object id", + Required: true, + }, + cisDomainID: { + Type: schema.TypeString, + Description: "Associated CIS domain", + Required: true, + DiffSuppressFunc: suppressDomainIDDiff, + }, + cisFirewallType: { + Type: schema.TypeString, + Required: true, + Description: "Type of firewall.Allowable values are access-rules,ua-rules,lockdowns", + }, + cisFirewallLockdown: { + Type: schema.TypeList, + Computed: true, + Description: "Lockdown Data", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + cisFirewallLockdownID: { + Type: schema.TypeString, + Computed: true, + Description: "firewall identifier", + }, + cisFirewallLockdownPaused: { + Type: schema.TypeBool, + Computed: true, + Description: "Firewall rule paused or enabled", + }, + cisFirewallLockdownDesc: { + Type: schema.TypeString, + Computed: true, + Description: "description", + }, + cisFirewallLockdownPriority: { + Type: schema.TypeInt, + Computed: true, + Description: "Firewall priority", + }, + cisFirewallLockdownURLs: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "URL in which firewall rule is applied", + }, + cisFirewallLockdownConfigurations: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + cisFirewallLockdownConfigurationsTarget: { + Type: schema.TypeString, + Computed: true, + Description: "Target type", + }, + cisFirewallLockdownConfigurationsValue: { + Type: schema.TypeString, + Computed: true, + Description: "Target value", + }, + }, + }, + }, + }, + }, + }, + cisFirewallAccessRule: { + Type: schema.TypeList, + Computed: true, + Description: "Access Rule Data", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + cisFirewallAccessRuleID: { + Type: schema.TypeString, + Computed: true, + Description: "firewall identifier", + }, + cisFirewallAccessRuleNotes: { + Type: schema.TypeString, + Computed: true, + Description: "description", + }, + cisFirewallAccessRuleMode: { + Type: schema.TypeString, + Computed: true, + Description: "Access rule mode", + }, + cisFirewallAccessRuleConfiguration: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + cisFirewallUARuleConfigurationTarget: { + Type: schema.TypeString, + Computed: true, + Description: "Target type", + }, + cisFirewallUARuleConfigurationValue: { + Type: schema.TypeString, + Computed: true, + Description: "Target value", + }, + }, + }, + }, + }, + }, + }, + cisFirewallUARule: { + Type: schema.TypeList, + Computed: true, + Description: "User Agent Rule Data", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + cisFirewallUARuleID: { + Type: schema.TypeString, + Computed: true, + Description: "firewall identifier", + }, + cisFirewallUARulePaused: { + Type: schema.TypeBool, + Computed: true, + Description: "Rule whether paused or not", + }, + cisFirewallUARuleDesc: { + Type: schema.TypeString, + Computed: true, + Description: "description", + }, + cisFirewallUARuleMode: { + Type: schema.TypeString, + Computed: true, + Description: "user agent rule mode", + }, + cisFirewallUARuleConfiguration: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + cisFirewallUARuleConfigurationTarget: { + Type: schema.TypeString, + Computed: true, + Description: "Target type", + }, + cisFirewallUARuleConfigurationValue: { + Type: schema.TypeString, + Computed: true, + Description: "Target value", + }, + }, + }, + }, + }, + }, + }, + }, + } +} +func dataIBMCISFirewallRecordRead(d *schema.ResourceData, meta interface{}) error { + crn := d.Get(cisID).(string) + zoneID, _, _ := convertTftoCisTwoVar(d.Get(cisDomainID).(string)) + firewallType := d.Get(cisFirewallType).(string) + + if firewallType == cisFirewallTypeLockdowns { + cisClient, err := meta.(ClientSession).CisLockdownClientSession() + if err != nil { + return err + } + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + opt := cisClient.NewListAllZoneLockownRulesOptions() + result, response, err := cisClient.ListAllZoneLockownRules(opt) + if err != nil { + log.Printf("List all zone lockdown rules failed: %v", response) + return err + } + lockdownList := make([]map[string]interface{}, 0) + for _, instance := range result.Result { + configurationList := []interface{}{} + for _, c := range instance.Configurations { + configuration := make(map[string]interface{}, 0) + configuration[cisFirewallLockdownConfigurationsTarget] = c.Target + configuration[cisFirewallLockdownConfigurationsValue] = c.Value + configurationList = append(configurationList, configuration) + } + lockdown := make(map[string]interface{}) + lockdown[cisFirewallLockdownID] = *instance.ID + lockdown[cisFirewallLockdownPaused] = *instance.Paused + if instance.Priority != nil { + lockdown[cisFirewallLockdownPriority] = *instance.Priority + } + lockdown[cisFirewallLockdownURLs] = instance.Urls + lockdown[cisFirewallLockdownConfigurations] = configurationList + if instance.Description != nil { + lockdown[cisFirewallLockdownDesc] = *instance.Description + } + lockdownList = append(lockdownList, lockdown) + } + d.Set(cisFirewallLockdown, lockdownList) + } else if firewallType == cisFirewallTypeAccessRules { + cisClient, err := meta.(ClientSession).CisAccessRuleClientSession() + if err != nil { + return err + } + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + opt := cisClient.NewListAllZoneAccessRulesOptions() + result, response, err := cisClient.ListAllZoneAccessRules(opt) + if err != nil { + log.Printf("List all zone access rules failed: %v", response) + return err + } + accessRuleList := make([]interface{}, 0) + for _, instance := range result.Result { + configurations := []interface{}{} + configuration := map[string]interface{}{} + configuration[cisFirewallAccessRuleConfigurationTarget] = *instance.Configuration.Target + configuration[cisFirewallAccessRuleConfigurationValue] = *instance.Configuration.Value + configurations = append(configurations, configuration) + accessRule := make(map[string]interface{}, 0) + accessRule[cisFirewallAccessRuleID] = *instance.ID + accessRule[cisFirewallAccessRuleMode] = *instance.Mode + accessRule[cisFirewallAccessRuleNotes] = *instance.Notes + accessRule[cisFirewallAccessRuleConfiguration] = configurations + accessRuleList = append(accessRuleList, accessRule) + } + d.Set(cisFirewallAccessRule, accessRuleList) + } else if firewallType == cisFirewallTypeUARules { + cisClient, err := meta.(ClientSession).CisUARuleClientSession() + if err != nil { + return err + } + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + opt := cisClient.NewListAllZoneUserAgentRulesOptions() + result, response, err := cisClient.ListAllZoneUserAgentRules(opt) + if err != nil { + log.Printf("List all zone ua rules failed: %v", response) + return err + } + uaRuleList := make([]interface{}, 0) + for _, instance := range result.Result { + configurations := []interface{}{} + configuration := map[string]interface{}{} + configuration[cisFirewallUARuleConfigurationTarget] = *instance.Configuration.Target + configuration[cisFirewallUARuleConfigurationValue] = *instance.Configuration.Value + configurations = append(configurations, configuration) + uaRule := make(map[string]interface{}, 0) + uaRule[cisFirewallUARuleID] = *instance.ID + uaRule[cisFirewallUARuleMode] = *instance.Mode + uaRule[cisFirewallUARulePaused] = *instance.Paused + if instance.Description != nil { + uaRule[cisFirewallUARuleDesc] = *instance.Description + } + uaRule[cisFirewallUARuleConfiguration] = configurations + uaRuleList = append(uaRuleList, uaRule) + } + d.Set(cisFirewallUARule, uaRuleList) + } + + d.SetId(dataIBMCISFirewallRecordsID(d)) + d.Set(cisID, crn) + d.Set(cisDomainID, zoneID) + d.Set(cisFirewallType, firewallType) + + return nil +} + +func dataIBMCISFirewallRecordsID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cis_global_load_balancers.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cis_global_load_balancers.go new file mode 100644 index 00000000000..54926048912 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cis_global_load_balancers.go @@ -0,0 +1,230 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "log" + "time" + + "github.com/IBM/go-sdk-core/v4/core" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const cisGLB = "cis_glb" + +func dataSourceIBMCISGlbs() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + cisID: { + Type: schema.TypeString, + Description: "CIS instance crn", + Required: true, + }, + cisDomainID: { + Type: schema.TypeString, + Description: "Associated CIS domain", + Required: true, + DiffSuppressFunc: suppressDomainIDDiff, + }, + cisGLB: { + Type: schema.TypeList, + Description: "Collection of GLB detail", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Description: "identifier with zone id", + Computed: true, + }, + cisGLBID: { + Type: schema.TypeString, + Description: "global load balancer id", + Computed: true, + }, + cisGLBName: { + Type: schema.TypeString, + Description: "name", + Computed: true, + }, + cisGLBFallbackPoolID: { + Type: schema.TypeString, + Description: "fallback pool ID", + Computed: true, + }, + cisGLBDefaultPoolIDs: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: "List of default Pool IDs", + }, + cisGLBDesc: { + Type: schema.TypeString, + Computed: true, + Description: "Description for the load balancer instance", + }, + cisGLBTTL: { + Type: schema.TypeInt, + Computed: true, + Description: "TTL value", + }, + cisGLBProxied: { + Type: schema.TypeBool, + Computed: true, + Description: "set to true if proxy needs to be enabled", + }, + cisGLBSessionAffinity: { + Type: schema.TypeString, + Computed: true, + Description: "Session affinity info", + }, + cisGLBEnabled: { + Type: schema.TypeBool, + Computed: true, + Description: "set to true of LB needs to enabled", + }, + cisGLBCreatedOn: { + Type: schema.TypeString, + Computed: true, + Description: "Load balancer creation date", + }, + cisGLBModifiedOn: { + Type: schema.TypeString, + Computed: true, + Description: "Load balancer modified date", + }, + cisGLBPopPools: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + cisGLBPopPoolsPop: { + Type: schema.TypeString, + Computed: true, + Description: "pop pools region", + }, + + cisGLBPopPoolsPoolIDs: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + cisGLBRegionPools: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + cisGLBRegionPoolsRegion: { + Type: schema.TypeString, + Computed: true, + }, + cisGLBRegionPoolsPoolIDs: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + }, + }, + }, + }, + Timeouts: &schema.ResourceTimeout{ + Read: schema.DefaultTimeout(10 * time.Minute), + }, + Read: dataSourceCISGlbsRead, + Importer: &schema.ResourceImporter{}, + } +} + +func dataSourceCISGlbsRead(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisGLBClientSession() + if err != nil { + return err + } + + crn := d.Get(cisID).(string) + zoneID, _, err := convertTftoCisTwoVar(d.Get(cisDomainID).(string)) + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + + opt := cisClient.NewListAllLoadBalancersOptions() + + result, resp, err := cisClient.ListAllLoadBalancers(opt) + if err != nil { + log.Printf("[WARN] List all GLB failed: %v\n", resp) + return err + } + glbs := result.Result + + glbList := make([]map[string]interface{}, 0) + for _, glbObj := range glbs { + glbOutput := map[string]interface{}{} + glbOutput["id"] = convertCisToTfThreeVar(*glbObj.ID, zoneID, crn) + glbOutput[cisGLBID] = *glbObj.ID + glbOutput[cisGLBName] = *glbObj.Name + glbOutput[cisGLBDefaultPoolIDs] = convertCisToTfTwoVarSlice(glbObj.DefaultPools, crn) + glbOutput[cisGLBDesc] = *glbObj.Description + glbOutput[cisGLBFallbackPoolID] = convertCisToTfTwoVar(*glbObj.FallbackPool, crn) + glbOutput[cisGLBTTL] = *glbObj.TTL + glbOutput[cisGLBProxied] = *glbObj.Proxied + glbOutput[cisGLBEnabled] = *glbObj.Enabled + glbOutput[cisGLBSessionAffinity] = *glbObj.SessionAffinity + glbOutput[cisGLBCreatedOn] = *glbObj.CreatedOn + glbOutput[cisGLBModifiedOn] = *glbObj.ModifiedOn + flattenPopPools := flattenDataSourcePopPools( + glbObj.PopPools, cisGLBPopPoolsPop, crn) + glbOutput[cisGLBPopPools] = flattenPopPools + flattenRegionPools := flattenDataSourceRegionPools( + glbObj.RegionPools, cisGLBRegionPoolsRegion, crn) + glbOutput[cisGLBRegionPools] = flattenRegionPools + glbList = append(glbList, glbOutput) + } + d.SetId(dataSourceCISGlbsCheckID(d)) + d.Set(cisID, crn) + d.Set(cisDomainID, zoneID) + d.Set(cisGLB, glbList) + return nil +} + +// dataSourceCISGlbCheckID returns a reasonable ID glb list +func dataSourceCISGlbsCheckID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} + +func flattenDataSourcePopPools(pools interface{}, geoType string, cisID string) []interface{} { + result := make([]interface{}, 0) + for k, v := range pools.(map[string]interface{}) { + poolIds := convertCisToTfTwoVarSlice(expandStringList(v.([]interface{})), cisID) + pool := map[string]interface{}{ + cisGLBPopPoolsPop: k, + cisGLBPopPoolsPoolIDs: poolIds, + } + result = append(result, pool) + } + return result +} + +func flattenDataSourceRegionPools(pools interface{}, geoType string, cisID string) []interface{} { + result := make([]interface{}, 0) + for k, v := range pools.(map[string]interface{}) { + poolIds := convertCisToTfTwoVarSlice(expandStringList(v.([]interface{})), cisID) + pool := map[string]interface{}{ + cisGLBRegionPoolsRegion: k, + cisGLBRegionPoolsPoolIDs: poolIds, + } + result = append(result, pool) + } + return result +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cis_healthchecks.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cis_healthchecks.go new file mode 100644 index 00000000000..8853d657724 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cis_healthchecks.go @@ -0,0 +1,211 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "log" + "time" + + "github.com/IBM/go-sdk-core/v4/core" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + cisGLBHealthCheck = "cis_healthchecks" +) + +func dataSourceIBMCISHealthChecks() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMCISGLBHealthCheckRead, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Read: schema.DefaultTimeout(10 * time.Minute), + }, + Schema: map[string]*schema.Schema{ + cisID: { + Type: schema.TypeString, + Required: true, + Description: "DNS Zone CRN", + }, + + cisGLBHealthCheck: { + Type: schema.TypeList, + Description: "Collection of GLB Health check/monitor detail", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + Description: "GLB Monitor/Health check id", + }, + cisID: { + Type: schema.TypeString, + Computed: true, + Description: "DNS Zone CRN", + }, + cisGLBHealthCheckID: { + Type: schema.TypeString, + Computed: true, + Description: "GLB Monitor/Health check id", + }, + cisGLBHealthCheckPath: { + Type: schema.TypeString, + Description: "path", + Computed: true, + }, + cisGLBHealthCheckExpectedBody: { + Type: schema.TypeString, + Description: "expected_body", + Computed: true, + }, + cisGLBHealthCheckExpectedCodes: { + Type: schema.TypeString, + Description: "expected_codes", + Computed: true, + }, + cisGLBHealthCheckDesc: { + Type: schema.TypeString, + Description: "description", + Computed: true, + }, + cisGLBHealthCheckType: { + Type: schema.TypeString, + Description: "type", + Computed: true, + }, + cisGLBHealthCheckMethod: { + Type: schema.TypeString, + Description: "method", + Computed: true, + }, + cisGLBHealthCheckTimeout: { + Type: schema.TypeInt, + Description: "timeout", + Computed: true, + }, + cisGLBHealthCheckRetries: { + Type: schema.TypeInt, + Description: "retries", + Computed: true, + }, + cisGLBHealthCheckInterval: { + Type: schema.TypeInt, + Description: "interval", + Computed: true, + }, + cisGLBHealthCheckFollowRedirects: { + Type: schema.TypeBool, + Description: "follow_redirects", + Computed: true, + }, + cisGLBHealthCheckAllowInsecure: { + Type: schema.TypeBool, + Description: "allow_insecure", + Computed: true, + }, + cisGLBHealthCheckCreatedOn: { + Type: schema.TypeString, + Computed: true, + }, + cisGLBHealthCheckModifiedOn: { + Type: schema.TypeString, + Computed: true, + }, + cisGLBHealthCheckPort: { + Type: schema.TypeInt, + Computed: true, + }, + cisGLBHealthCheckHeaders: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + cisGLBHealthCheckHeadersHeader: { + Type: schema.TypeString, + Computed: true, + }, + cisGLBHealthCheckHeadersValues: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMCISGLBHealthCheckRead(d *schema.ResourceData, meta interface{}) error { + sess, err := meta.(ClientSession).CisGLBHealthCheckClientSession() + if err != nil { + return err + } + + crn := d.Get(cisID).(string) + sess.Crn = core.StringPtr(crn) + + opt := sess.NewListAllLoadBalancerMonitorsOptions() + + result, resp, err := sess.ListAllLoadBalancerMonitors(opt) + if err != nil { + log.Printf("Error listing global load balancer health check detail: %s", resp) + return err + } + + monitors := make([]map[string]interface{}, 0) + for _, instance := range result.Result { + monitor := map[string]interface{}{} + monitor["id"] = convertCisToTfTwoVar(*instance.ID, crn) + monitor[cisID] = crn + monitor[cisGLBHealthCheckID] = *instance.ID + monitor[cisGLBHealthCheckDesc] = *instance.Description + monitor[cisGLBHealthCheckPath] = *instance.Path + monitor[cisGLBHealthCheckCreatedOn] = *instance.CreatedOn + monitor[cisGLBHealthCheckModifiedOn] = *instance.ModifiedOn + monitor[cisGLBHealthCheckExpectedBody] = *instance.ExpectedBody + monitor[cisGLBHealthCheckExpectedCodes] = *instance.ExpectedCodes + monitor[cisGLBHealthCheckType] = *instance.Type + monitor[cisGLBHealthCheckMethod] = *instance.Method + monitor[cisGLBHealthCheckTimeout] = *instance.Timeout + monitor[cisGLBHealthCheckRetries] = *instance.Retries + monitor[cisGLBHealthCheckInterval] = *instance.Interval + monitor[cisGLBHealthCheckFollowRedirects] = *instance.FollowRedirects + monitor[cisGLBHealthCheckAllowInsecure] = *instance.AllowInsecure + monitor[cisGLBHealthCheckHeaders] = flattenDataSourceLoadBalancerMonitorHeader(instance.Header) + if instance.Port != nil { + monitor[cisGLBHealthCheckPort] = *instance.Port + } + + monitors = append(monitors, monitor) + } + d.SetId(dataSourceIBMCISGLBHealthCheckID(d)) + d.Set(cisID, crn) + d.Set(cisGLBHealthCheck, monitors) + return nil +} + +// dataSourceIBMCISDNSRecordID returns a reasonable ID for dns zones list. +func dataSourceIBMCISGLBHealthCheckID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} + +func flattenDataSourceLoadBalancerMonitorHeader(header map[string][]string) interface{} { + flattened := make([]interface{}, 0) + for k, v := range header { + cfg := map[string]interface{}{ + cisGLBHealthCheckHeadersHeader: k, + cisGLBHealthCheckHeadersValues: v, + } + flattened = append(flattened, cfg) + } + return flattened +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cis_ip_addresses.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cis_ip_addresses.go new file mode 100644 index 00000000000..828bd3d5df9 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cis_ip_addresses.go @@ -0,0 +1,57 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + cisIPv4CIDRs = "ipv4_cidrs" + cisIPv6CIDRs = "ipv6_cidrs" +) + +func dataSourceIBMCISIP() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMCISIPRead, + + Schema: map[string]*schema.Schema{ + cisIPv4CIDRs: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + cisIPv6CIDRs: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func dataSourceIBMCISIPRead(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisIPClientSession() + if err != nil { + return err + } + opt := cisClient.NewListIpsOptions() + result, response, err := cisClient.ListIps(opt) + if err != nil { + log.Printf("Failed to list IP addresses: %v", response) + return err + } + + d.Set(cisIPv4CIDRs, flattenStringList(result.Result.Ipv4Cidrs)) + d.Set(cisIPv6CIDRs, flattenStringList(result.Result.Ipv4Cidrs)) + d.SetId(dataSourceIBMCISIPID(d)) + return nil +} + +func dataSourceIBMCISIPID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cis_origin_pools.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cis_origin_pools.go new file mode 100644 index 00000000000..4177f8ff9b1 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cis_origin_pools.go @@ -0,0 +1,191 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "log" + "time" + + "github.com/IBM/go-sdk-core/v4/core" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + cisOriginPools = "cis_origin_pools" +) + +func dataSourceIBMCISOriginPools() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMCISGLBPoolsRead, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Read: schema.DefaultTimeout(10 * time.Minute), + }, + Schema: map[string]*schema.Schema{ + cisID: { + Type: schema.TypeString, + Required: true, + Description: "DNS Zone CRN", + }, + cisOriginPools: { + Type: schema.TypeList, + Description: "Collection of GLB pools detail", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + Description: "GLB Pools id", + }, + cisGLBPoolID: { + Type: schema.TypeString, + Computed: true, + Description: "GLB Pool id", + }, + cisGLBPoolName: { + Type: schema.TypeString, + Description: "name", + Computed: true, + }, + cisGLBPoolRegions: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: "List of regions", + }, + cisGLBPoolDesc: { + Type: schema.TypeString, + Computed: true, + Description: "Description of the CIS Origin Pool", + }, + cisGLBPoolEnabled: { + Type: schema.TypeBool, + Computed: true, + Description: "Boolean value set to true if cis origin pool needs to be enabled", + }, + cisGLBPoolMinimumOrigins: { + Type: schema.TypeInt, + Computed: true, + Description: "Minimum number of Origins", + }, + cisGLBPoolMonitor: { + Type: schema.TypeString, + Computed: true, + Description: "Monitor value", + }, + cisGLBPoolNotificationEMail: { + Type: schema.TypeString, + Computed: true, + Description: "Email address configured to recieve the notifications", + }, + cisGLBPoolOrigins: { + Type: schema.TypeList, + Computed: true, + Description: "Origins info", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + cisGLBPoolOriginsName: { + Type: schema.TypeString, + Computed: true, + }, + cisGLBPoolOriginsAddress: { + Type: schema.TypeString, + Computed: true, + }, + cisGLBPoolOriginsEnabled: { + Type: schema.TypeBool, + Computed: true, + }, + cisGLBPoolOriginsWeight: { + Type: schema.TypeFloat, + Computed: true, + }, + cisGLBPoolOriginsHealthy: { + Type: schema.TypeBool, + Computed: true, + }, + cisGLBPoolOriginsDisabledAt: { + Type: schema.TypeString, + Computed: true, + }, + cisGLBPoolOriginsFailureReason: { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + cisGLBPoolHealthy: { + Type: schema.TypeBool, + Computed: true, + Description: "Health status", + }, + cisGLBPoolCreatedOn: { + Type: schema.TypeString, + Computed: true, + Description: "Creation date info", + }, + cisGLBPoolModifiedOn: { + Type: schema.TypeString, + Computed: true, + Description: "Modified date info", + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMCISGLBPoolsRead(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisGLBPoolClientSession() + if err != nil { + return err + } + + crn := d.Get(cisID).(string) + cisClient.Crn = core.StringPtr(crn) + + opt := cisClient.NewListAllLoadBalancerPoolsOptions() + result, resp, err := cisClient.ListAllLoadBalancerPools(opt) + if err != nil { + log.Printf("Error listing global load balancer pools detail: %s", resp) + return err + } + + pools := make([]map[string]interface{}, 0) + for _, instance := range result.Result { + pool := map[string]interface{}{} + pool["id"] = convertCisToTfTwoVar(*instance.ID, crn) + pool[cisGLBPoolID] = *instance.ID + pool[cisGLBPoolName] = *instance.Name + pool[cisGLBPoolOrigins] = flattenOrigins(instance.Origins) + pool[cisGLBPoolRegions] = instance.CheckRegions + pool[cisGLBPoolDesc] = *instance.Description + pool[cisGLBPoolEnabled] = *instance.Enabled + pool[cisGLBPoolNotificationEMail] = *instance.NotificationEmail + pool[cisGLBPoolCreatedOn] = *instance.CreatedOn + pool[cisGLBPoolModifiedOn] = *instance.ModifiedOn + if instance.Monitor != nil { + pool[cisGLBPoolMonitor] = *instance.Monitor + } + if instance.Healthy != nil { + pool[cisGLBPoolHealthy] = *instance.Healthy + } + + pools = append(pools, pool) + } + d.SetId(dataSourceIBMCISGLBPoolsID(d)) + d.Set(cisID, crn) + d.Set(cisOriginPools, pools) + return nil +} + +func dataSourceIBMCISGLBPoolsID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cis_page_rules.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cis_page_rules.go new file mode 100644 index 00000000000..fd9da1797f6 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cis_page_rules.go @@ -0,0 +1,171 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "log" + "time" + + "github.com/IBM/go-sdk-core/v4/core" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + cisPageRules = "cis_page_rules" +) + +func dataSourceIBMCISPageRules() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMCISPageRulesRead, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Read: schema.DefaultTimeout(10 * time.Minute), + }, + Schema: map[string]*schema.Schema{ + cisID: { + Type: schema.TypeString, + Required: true, + Description: "DNS Zone CRN", + }, + cisDomainID: { + Type: schema.TypeString, + Required: true, + Description: "DNS Zone ID", + DiffSuppressFunc: suppressDomainIDDiff, + }, + cisPageRules: { + Type: schema.TypeList, + Description: "Collection of page rules detail", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + Description: "Page rule identifier", + }, + cisPageRuleID: { + Type: schema.TypeString, + Computed: true, + }, + cisPageRulePriority: { + Type: schema.TypeInt, + Description: "Page rule priority", + Computed: true, + }, + cisPageRuleStatus: { + Type: schema.TypeString, + Description: "Page Rule status", + Computed: true, + }, + cisPageRuleTargets: { + Type: schema.TypeList, + Description: "Page rule targets", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + cisPageRuleTargetsTarget: { + Type: schema.TypeString, + Computed: true, + Description: "Page rule target url", + }, + cisPageRuleTargetsConstraint: { + Type: schema.TypeList, + Computed: true, + Description: "Page rule constraint", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + cisPageRuleTargetsConstraintOperator: { + Type: schema.TypeString, + Computed: true, + Description: "Constraint operator", + }, + cisPageRuleTargetsConstraintValue: { + Type: schema.TypeString, + Computed: true, + Description: "Constraint value", + }, + }, + }, + }, + }, + }, + }, + cisPageRuleActions: { + Type: schema.TypeList, + Description: "Page rule actions", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + cisPageRuleActionsID: { + Type: schema.TypeString, + Computed: true, + Description: "Page rule target url", + }, + cisPageRuleActionsValue: { + Type: schema.TypeString, + Computed: true, + Description: "Page rule target url", + }, + cisPageRuleActionsValueURL: { + Type: schema.TypeString, + Computed: true, + Description: "Page rule actions value url", + }, + cisPageRuleActionsValueStatusCode: { + Type: schema.TypeInt, + Computed: true, + Description: "Page rule actions status code", + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMCISPageRulesRead(d *schema.ResourceData, meta interface{}) error { + sess, err := meta.(ClientSession).CisPageRuleClientSession() + if err != nil { + return err + } + + crn := d.Get(cisID).(string) + zoneID, _, _ := convertTftoCisTwoVar(d.Get(cisDomainID).(string)) + sess.Crn = core.StringPtr(crn) + sess.ZoneID = core.StringPtr(zoneID) + + opt := sess.NewListPageRulesOptions() + + result, resp, err := sess.ListPageRules(opt) + if err != nil { + log.Printf("Error listing page rules detail: %s", resp) + return err + } + + pageRules := make([]map[string]interface{}, 0) + for _, instance := range result.Result { + pageRule := map[string]interface{}{} + pageRule["id"] = convertCisToTfThreeVar(*instance.ID, zoneID, crn) + pageRule[cisPageRuleID] = *instance.ID + pageRule[cisPageRulePriority] = *instance.Priority + pageRule[cisPageRuleStatus] = *instance.Status + pageRule[cisPageRuleTargets] = flattenCISPageRuleTargets(instance.Targets) + pageRule[cisPageRuleActions] = flattenCISPageRuleActions(instance.Actions) + pageRules = append(pageRules, pageRule) + } + d.SetId(dataSourceIBMCISPageRulesID(d)) + d.Set(cisID, crn) + d.Set(cisDomainID, zoneID) + d.Set(cisPageRules, pageRules) + return nil +} + +func dataSourceIBMCISPageRulesID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cis_range_apps.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cis_range_apps.go new file mode 100644 index 00000000000..a2d0c5ea9d8 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cis_range_apps.go @@ -0,0 +1,168 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "time" + + "github.com/IBM/go-sdk-core/v4/core" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const cisRangeApps = "range_apps" + +func dataSourceIBMCISRangeApps() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMCISRangeAppsRead, + Schema: map[string]*schema.Schema{ + cisID: { + Type: schema.TypeString, + Required: true, + Description: "CIS Intance CRN", + }, + cisDomainID: { + Type: schema.TypeString, + Required: true, + Description: "CIS Domain ID", + DiffSuppressFunc: suppressDomainIDDiff, + }, + cisRangeApps: { + Type: schema.TypeList, + Description: "Collection of range application detail", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + Description: "range app id", + }, + cisRangeAppID: { + Type: schema.TypeString, + Computed: true, + Description: "Application identifier", + }, + cisRangeAppProtocol: { + Type: schema.TypeString, + Computed: true, + Description: "Defines the protocol and port for this application", + }, + cisRangeAppDNS: { + Type: schema.TypeString, + Computed: true, + Description: "Name of the DNS record for this application", + }, + cisRangeAppDNSType: { + Type: schema.TypeString, + Computed: true, + Description: "Type of the DNS record for this application", + }, + cisRangeAppOriginDirect: { + Type: schema.TypeList, + Computed: true, + Description: "IP address and port of the origin for this Range application.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + // cisRangeAppOriginDNS: { + // Type: schema.TypeString, + // Computed: true, + // }, + // cisRangeAppOriginPort: { + // Type: schema.TypeInt, + // Computed: true, + // }, + cisRangeAppIPFirewall: { + Type: schema.TypeBool, + Computed: true, + Description: "Enables the IP Firewall for this application. Only available for TCP applications.", + }, + cisRangeAppProxyProtocol: { + Type: schema.TypeString, + Computed: true, + Description: "Allows for the true client IP to be passed to the service.", + }, + cisRangeAppEdgeIPsType: { + Type: schema.TypeString, + Computed: true, + Description: "The type of edge IP configuration.", + }, + cisRangeAppEdgeIPsConnectivity: { + Type: schema.TypeString, + Computed: true, + Description: "Specifies the IP version.", + }, + cisRangeAppTrafficType: { + Type: schema.TypeString, + Computed: true, + Description: "Configure how traffic is handled at the edge.", + }, + cisRangeAppTLS: { + Type: schema.TypeString, + Computed: true, + Description: "Configure if and how TLS connections are terminated at the edge.", + }, + cisRangeAppCreatedOn: { + Type: schema.TypeString, + Computed: true, + Description: "created on date", + }, + cisRangeAppModifiedOn: { + Type: schema.TypeString, + Computed: true, + Description: "modified on date", + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMCISRangeAppsRead(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisRangeAppClientSession() + if err != nil { + return err + } + crn := d.Get(cisID).(string) + zoneID, _, err := convertTftoCisTwoVar(d.Get(cisDomainID).(string)) + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + opt := cisClient.NewListRangeAppsOptions() + result, resp, err := cisClient.ListRangeApps(opt) + if err != nil { + return fmt.Errorf("Failed to list range applications: %v", resp) + } + apps := make([]map[string]interface{}, 0) + for _, i := range result.Result { + app := map[string]interface{}{} + app["id"] = convertCisToTfThreeVar(*i.ID, zoneID, crn) + app[cisRangeAppID] = *i.ID + app[cisRangeAppProtocol] = *i.Protocol + app[cisRangeAppDNS] = *i.Dns.Name + app[cisRangeAppDNSType] = *i.Dns.Type + app[cisRangeAppOriginDirect] = flattenStringList(i.OriginDirect) + app[cisRangeAppIPFirewall] = *i.IpFirewall + app[cisRangeAppProxyProtocol] = *i.ProxyProtocol + app[cisRangeAppEdgeIPsType] = *i.EdgeIps.Type + app[cisRangeAppEdgeIPsConnectivity] = *i.EdgeIps.Connectivity + app[cisRangeAppTLS] = *i.Tls + app[cisRangeAppTrafficType] = *i.TrafficType + app[cisRangeAppCreatedOn] = (*i.CreatedOn).String() + app[cisRangeAppModifiedOn] = (*i.ModifiedOn).String() + apps = append(apps, app) + + } + d.SetId(dataSourceIBMCISRangeAppsID(d)) + d.Set(cisRangeApps, apps) + d.Set(cisID, crn) + d.Set(cisDomainID, zoneID) + return nil +} + +func dataSourceIBMCISRangeAppsID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cis_rate_limit.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cis_rate_limit.go new file mode 100644 index 00000000000..f5ad746d302 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cis_rate_limit.go @@ -0,0 +1,226 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "time" + + "github.com/IBM/go-sdk-core/v4/core" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMCISRateLimit() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMCISRateLimitRead, + Schema: map[string]*schema.Schema{ + "cis_id": { + Type: schema.TypeString, + Required: true, + }, + "domain_id": { + Type: schema.TypeString, + Required: true, + }, + "rate_limit": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disabled": { + Type: schema.TypeBool, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "bypass": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + }, + "value": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "threshold": { + Type: schema.TypeInt, + Computed: true, + }, + "period": { + Type: schema.TypeInt, + Computed: true, + }, + "correlate": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "by": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "action": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "mode": { + Type: schema.TypeString, + Computed: true, + }, + "timeout": { + Type: schema.TypeInt, + Computed: true, + }, + "response": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "content_type": { + Type: schema.TypeString, + Computed: true, + }, + "body": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + "match": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "request": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "methods": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "schemes": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "url": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "response": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "status": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeInt}, + }, + "origin_traffic": { + Type: schema.TypeBool, + Computed: true, + }, + "headers": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + }, + "op": { + Type: schema.TypeString, + Computed: true, + }, + "value": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "rule_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMCISRateLimitRead(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisRLClientSession() + if err != nil { + return err + } + cisID := d.Get("cis_id").(string) + zoneID, _, err := convertTftoCisTwoVar(d.Get("domain_id").(string)) + if err != nil { + return err + } + cisClient.Crn = core.StringPtr(cisID) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + opt := cisClient.NewListAllZoneRateLimitsOptions() + rateLimitRecord, resp, err := cisClient.ListAllZoneRateLimits(opt) + if err != nil { + return fmt.Errorf("Failed to read RateLimit: %v", resp) + } + rules := make([]map[string]interface{}, 0) + for _, r := range rateLimitRecord.Result { + rule := map[string]interface{}{} + rule["rule_id"] = *r.ID + rule["disabled"] = *r.Disabled + rule["description"] = *r.Description + rule["threshold"] = *r.Threshold + rule["period"] = *r.Period + rule["action"] = flattenRateLimitAction(r.Action) + rule["match"] = flattenRateLimitMatch(r.Match) + rule["correlate"] = flattenRateLimitCorrelate(r.Correlate) + rule["bypass"] = flattenRateLimitByPass(r.Bypass) + rules = append(rules, rule) + + } + d.SetId(dataSourceIBMCISRateLimitID(d)) + d.Set("rate_limit", rules) + d.Set("cis_id", cisID) + d.Set("domain_id", zoneID) + return nil +} + +func dataSourceIBMCISRateLimitID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cis_waf_groups.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cis_waf_groups.go new file mode 100644 index 00000000000..c130a4cbd8d --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cis_waf_groups.go @@ -0,0 +1,125 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "log" + "time" + + "github.com/IBM/go-sdk-core/v4/core" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const cisWAFGroups = "waf_groups" + +func dataSourceIBMCISWAFGroups() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMCISWAFGroupsRead, + Schema: map[string]*schema.Schema{ + cisID: { + Type: schema.TypeString, + Required: true, + Description: "CIS Intance CRN", + }, + cisDomainID: { + Type: schema.TypeString, + Required: true, + Description: "CIS Domain ID", + DiffSuppressFunc: suppressDomainIDDiff, + }, + cisWAFGroupPackageID: { + Type: schema.TypeString, + Required: true, + Description: "WAF Rule package id", + }, + cisWAFGroups: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + Description: "WAF Rule group id", + }, + cisWAFGroupID: { + Type: schema.TypeString, + Computed: true, + Description: "WAF Rule group id", + }, + cisWAFGroupMode: { + Type: schema.TypeString, + Computed: true, + Description: "WAF Rule group mode on/off", + }, + cisWAFGroupName: { + Type: schema.TypeString, + Computed: true, + Description: "WAF Rule group name", + }, + cisWAFGroupDesc: { + Type: schema.TypeString, + Computed: true, + Description: "WAF Rule group description", + }, + cisWAFGroupRulesCount: { + Type: schema.TypeInt, + Computed: true, + Description: "WAF Rule group rules count", + }, + cisWAFGroupModifiedRulesCount: { + Type: schema.TypeInt, + Computed: true, + Description: "WAF Rule group modified rules count", + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMCISWAFGroupsRead(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisWAFGroupClientSession() + if err != nil { + return err + } + + crn := d.Get(cisID).(string) + zoneID, _, err := convertTftoCisTwoVar(d.Get(cisDomainID).(string)) + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneID = core.StringPtr(zoneID) + packageID, _, _, _ := convertTfToCisThreeVar(d.Get(cisWAFGroupPackageID).(string)) + + opt := cisClient.NewListWafRuleGroupsOptions(packageID) + opt.SetPage(1) + opt.SetPerPage(100) + result, resp, err := cisClient.ListWafRuleGroups(opt) + if err != nil { + log.Printf("List waf rule groups failed: %s\n", resp) + return err + } + wafGroups := []interface{}{} + for _, i := range result.Result { + waf := map[string]interface{}{} + waf["id"] = convertCisToTfFourVar(*i.ID, packageID, zoneID, crn) + waf[cisWAFGroupID] = *i.ID + waf[cisWAFGroupName] = *i.Name + waf[cisWAFGroupDesc] = *i.Description + waf[cisWAFGroupMode] = *i.Mode + waf[cisWAFGroupModifiedRulesCount] = *i.ModifiedRulesCount + waf[cisWAFGroupRulesCount] = *i.RulesCount + wafGroups = append(wafGroups, waf) + } + d.SetId(dataSourceIBMCISWAFGroupID(d)) + d.Set(cisID, crn) + d.Set(cisDomainID, zoneID) + d.Set(cisWAFGroupPackageID, packageID) + d.Set(cisWAFGroups, wafGroups) + return nil +} + +func dataSourceIBMCISWAFGroupID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cis_waf_packages.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cis_waf_packages.go new file mode 100644 index 00000000000..679c4148e67 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cis_waf_packages.go @@ -0,0 +1,111 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "log" + "time" + + "github.com/IBM/go-sdk-core/v4/core" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + cisWAFPackages = "waf_packages" +) + +func dataSourceIBMCISWAFPackages() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMCISWAFPackagesRead, + Schema: map[string]*schema.Schema{ + cisID: { + Type: schema.TypeString, + Required: true, + Description: "DNS Zone CRN", + }, + cisDomainID: { + Type: schema.TypeString, + Required: true, + Description: "CIS domain id", + DiffSuppressFunc: suppressDomainIDDiff, + }, + cisWAFPackages: { + Type: schema.TypeList, + Description: "Collection of GLB Health check/monitor detail", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + Description: "CIS WAF package id", + }, + cisWAFPackageID: { + Type: schema.TypeString, + Computed: true, + Description: "WAF pakcage ID", + }, + cisWAFPackageName: { + Type: schema.TypeString, + Computed: true, + Description: "WAF pakcage name", + }, + cisWAFPackageDetectionMode: { + Type: schema.TypeString, + Computed: true, + Description: "WAF pakcage detection mode", + }, + cisWAFPackageDescription: { + Type: schema.TypeString, + Computed: true, + Description: "WAF pakcage description", + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMCISWAFPackagesRead(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisWAFPackageClientSession() + if err != nil { + return err + } + + crn := d.Get(cisID).(string) + zoneID, _, err := convertTftoCisTwoVar(d.Get(cisDomainID).(string)) + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneID = core.StringPtr(zoneID) + + opt := cisClient.NewListWafPackagesOptions() + result, resp, err := cisClient.ListWafPackages(opt) + if err != nil { + log.Printf("Error listing waf packages detail: %s", resp) + return err + } + + packages := make([]interface{}, 0) + for _, instance := range result.Result { + packageDetail := make(map[string]interface{}, 0) + packageDetail["id"] = convertCisToTfThreeVar(*instance.ID, zoneID, crn) + packageDetail[cisWAFPackageID] = *instance.ID + packageDetail[cisWAFPackageName] = *instance.Name + packageDetail[cisWAFPackageDetectionMode] = *instance.DetectionMode + + if instance.Description != nil { + packageDetail[cisWAFPackageDescription] = *instance.Description + } + packages = append(packages, packageDetail) + } + d.SetId(dataSourceIBMCISWAFPackagesCheckID(d)) + d.Set(cisID, crn) + d.Set(cisDomainID, zoneID) + d.Set(cisWAFPackages, packages) + return nil +} + +func dataSourceIBMCISWAFPackagesCheckID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cis_waf_rules.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cis_waf_rules.go new file mode 100644 index 00000000000..e01a949c076 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cis_waf_rules.go @@ -0,0 +1,158 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "log" + "time" + + "github.com/IBM/go-sdk-core/v4/core" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const cisWAFRules = "waf_rules" + +func dataSourceIBMCISWAFRules() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMCISWAFRuleRead, + Schema: map[string]*schema.Schema{ + cisID: { + Type: schema.TypeString, + Description: "CIS object id", + Required: true, + }, + cisDomainID: { + Type: schema.TypeString, + Description: "CISzone - Domain", + Required: true, + DiffSuppressFunc: suppressDomainIDDiff, + }, + cisWAFRulePackageID: { + Type: schema.TypeString, + Description: "WAF rule package id", + Required: true, + DiffSuppressFunc: suppressDomainIDDiff, + }, + cisWAFRules: { + Type: schema.TypeList, + Description: "Collection of WAF Rules", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + Description: "WAF Rule id", + }, + cisWAFRuleID: { + Type: schema.TypeString, + Computed: true, + Description: "WAF Rule id", + }, + cisWAFRulePackageID: { + Type: schema.TypeString, + Computed: true, + Description: "WAF Package id", + }, + cisWAFRuleMode: { + Type: schema.TypeString, + Computed: true, + Description: "CIS WAF Rule mode", + }, + cisWAFRuleDesc: { + Type: schema.TypeString, + Computed: true, + Description: "CIS WAF Rule descriptions", + }, + cisWAFRulePriority: { + Type: schema.TypeString, + Computed: true, + Description: "CIS WAF Rule Priority", + }, + cisWAFRuleGroup: { + Type: schema.TypeList, + Computed: true, + Description: "CIS WAF Rule group", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + cisWAFRuleGroupID: { + Type: schema.TypeString, + Computed: true, + Description: "waf rule group id", + }, + cisWAFRuleGroupName: { + Type: schema.TypeString, + Computed: true, + Description: "waf rule group name", + }, + }, + }, + }, + cisWAFRuleAllowedModes: { + Type: schema.TypeList, + Computed: true, + Description: "CIS WAF Rule allowed modes", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMCISWAFRuleRead(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisWAFRuleClientSession() + if err != nil { + return err + } + + crn := d.Get(cisID).(string) + zoneID, _, err := convertTftoCisTwoVar(d.Get(cisDomainID).(string)) + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneID = core.StringPtr(zoneID) + packageID, _, _, _ := convertTfToCisThreeVar(d.Get(cisWAFRulePackageID).(string)) + + opt := cisClient.NewListWafRulesOptions(packageID) + opt.SetPage(1) + opt.SetPerPage(1000) + result, response, err := cisClient.ListWafRules(opt) + if err != nil { + log.Printf("List waf rules failed %s\n", response) + return err + } + rules := []interface{}{} + for _, i := range result.Result { + + groups := []interface{}{} + group := map[string]interface{}{} + group[cisWAFRuleGroupID] = *i.Group.ID + group[cisWAFRuleGroupName] = *i.Group.Name + groups = append(groups, group) + + rule := map[string]interface{}{} + rule["id"] = convertCisToTfFourVar(*i.ID, *i.PackageID, zoneID, crn) + rule[cisWAFRuleID] = *i.ID + rule[cisWAFRulePackageID] = *i.PackageID + rule[cisWAFRuleMode] = *i.Mode + rule[cisWAFRuleDesc] = *i.Description + rule[cisWAFRulePriority] = *i.Priority + rule[cisWAFRuleGroup] = groups + rule[cisWAFRuleAllowedModes] = flattenStringList(i.AllowedModes) + + rules = append(rules, rule) + } + d.SetId(dataSourceIBMCISWAFRulesID(d)) + d.Set(cisID, crn) + d.Set(cisDomainID, zoneID) + d.Set(cisWAFRulePackageID, packageID) + d.Set(cisWAFRules, rules) + return nil +} + +func dataSourceIBMCISWAFRulesID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cm_catalog.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cm_catalog.go new file mode 100644 index 00000000000..a4abdf15b35 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cm_catalog.go @@ -0,0 +1,106 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "context" + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/IBM/platform-services-go-sdk/catalogmanagementv1" +) + +func dataSourceIBMCmCatalog() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMCmCatalogRead, + + Schema: map[string]*schema.Schema{ + "catalog_identifier": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "ID for catalog", + }, + "label": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Display Name in the requested language.", + }, + "short_description": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Description in the requested language.", + }, + "catalog_icon_url": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "URL for an icon associated with this catalog.", + }, + "tags": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "List of tags associated with this catalog.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "url": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The url for this specific catalog.", + }, + "crn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "CRN associated with the catalog.", + }, + "offerings_url": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "URL path to offerings.", + }, + }, + } +} + +func dataSourceIBMCmCatalogRead(d *schema.ResourceData, meta interface{}) error { + catalogManagementClient, err := meta.(ClientSession).CatalogManagementV1() + if err != nil { + return err + } + + getCatalogOptions := &catalogmanagementv1.GetCatalogOptions{} + + getCatalogOptions.SetCatalogIdentifier(d.Get("catalog_identifier").(string)) + + catalog, response, err := catalogManagementClient.GetCatalogWithContext(context.TODO(), getCatalogOptions) + if err != nil { + log.Printf("[DEBUG] GetCatalogWithContext failed %s\n%s", err, response) + return err + } + + d.SetId(*catalog.ID) + if err = d.Set("label", catalog.Label); err != nil { + return fmt.Errorf("Error setting label: %s", err) + } + if err = d.Set("short_description", catalog.ShortDescription); err != nil { + return fmt.Errorf("Error setting short_description: %s", err) + } + if err = d.Set("catalog_icon_url", catalog.CatalogIconURL); err != nil { + return fmt.Errorf("Error setting catalog_icon_url: %s", err) + } + if err = d.Set("tags", catalog.Tags); err != nil { + return fmt.Errorf("Error setting tags: %s", err) + } + if err = d.Set("url", catalog.URL); err != nil { + return fmt.Errorf("Error setting url: %s", err) + } + if err = d.Set("crn", catalog.CRN); err != nil { + return fmt.Errorf("Error setting crn: %s", err) + } + if err = d.Set("offerings_url", catalog.OfferingsURL); err != nil { + return fmt.Errorf("Error setting offerings_url: %s", err) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cm_offering.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cm_offering.go new file mode 100644 index 00000000000..c0adef0b867 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cm_offering.go @@ -0,0 +1,257 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "context" + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/IBM/platform-services-go-sdk/catalogmanagementv1" +) + +func dataSourceIBMCmOffering() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMCmOfferingRead, + + Schema: map[string]*schema.Schema{ + "catalog_identifier": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "Catalog identifier.", + ForceNew: true, + }, + "offering_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "The id of the catalog containing this offering.", + ForceNew: true, + }, + "url": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The url for this specific offering.", + }, + "crn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The crn for this specific offering.", + }, + "label": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Display Name in the requested language.", + ForceNew: true, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The programmatic name of this offering.", + }, + "offering_icon_url": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "URL for an icon associated with this offering.", + }, + "offering_docs_url": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "URL for an additional docs with this offering.", + }, + "offering_support_url": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "URL to be displayed in the Consumption UI for getting support on this offering.", + }, + "short_description": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Short description in the requested language.", + }, + "long_description": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Long description in the requested language.", + }, + "permit_request_ibm_public_publish": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "Is it permitted to request publishing to IBM or Public.", + }, + "ibm_publish_approved": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "Indicates if this offering has been approved for use by all IBMers.", + }, + "public_publish_approved": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "Indicates if this offering has been approved for use by all IBM Cloud users.", + }, + "public_original_crn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The original offering CRN that this publish entry came from.", + }, + "publish_public_crn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The crn of the public catalog entry of this offering.", + }, + "portal_approval_record": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The portal's approval record ID.", + }, + "portal_ui_url": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The portal UI URL.", + }, + "catalog_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The id of the catalog containing this offering.", + }, + "catalog_name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The name of the catalog.", + }, + "disclaimer": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "A disclaimer for this offering.", + }, + "hidden": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "Determine if this offering should be displayed in the Consumption UI.", + }, + "repo_info": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Repository info for offerings.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "token": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Token for private repos.", + }, + "type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Public or enterprise GitHub.", + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMCmOfferingRead(d *schema.ResourceData, meta interface{}) error { + catalogManagementClient, err := meta.(ClientSession).CatalogManagementV1() + if err != nil { + return err + } + + getOfferingOptions := &catalogmanagementv1.GetOfferingOptions{} + + getOfferingOptions.SetCatalogIdentifier(d.Get("catalog_identifier").(string)) + getOfferingOptions.SetOfferingID(d.Get("offering_id").(string)) + + offering, response, err := catalogManagementClient.GetOfferingWithContext(context.TODO(), getOfferingOptions) + if err != nil { + log.Printf("[DEBUG] GetOfferingWithContext failed %s\n%s", err, response) + return err + } + + d.SetId(*offering.ID) + if err = d.Set("url", offering.URL); err != nil { + return fmt.Errorf("Error setting url: %s", err) + } + if err = d.Set("crn", offering.CRN); err != nil { + return fmt.Errorf("Error setting crn: %s", err) + } + if err = d.Set("label", offering.Label); err != nil { + return fmt.Errorf("Error setting label: %s", err) + } + if err = d.Set("name", offering.Name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + if err = d.Set("offering_icon_url", offering.OfferingIconURL); err != nil { + return fmt.Errorf("Error setting offering_icon_url: %s", err) + } + if err = d.Set("offering_docs_url", offering.OfferingDocsURL); err != nil { + return fmt.Errorf("Error setting offering_docs_url: %s", err) + } + if err = d.Set("offering_support_url", offering.OfferingSupportURL); err != nil { + return fmt.Errorf("Error setting offering_support_url: %s", err) + } + if err = d.Set("short_description", offering.ShortDescription); err != nil { + return fmt.Errorf("Error setting short_description: %s", err) + } + if err = d.Set("long_description", offering.LongDescription); err != nil { + return fmt.Errorf("Error setting long_description: %s", err) + } + if err = d.Set("permit_request_ibm_public_publish", offering.PermitRequestIBMPublicPublish); err != nil { + return fmt.Errorf("Error setting permit_request_ibm_public_publish: %s", err) + } + if err = d.Set("ibm_publish_approved", offering.IBMPublishApproved); err != nil { + return fmt.Errorf("Error setting ibm_publish_approved: %s", err) + } + if err = d.Set("public_publish_approved", offering.PublicPublishApproved); err != nil { + return fmt.Errorf("Error setting public_publish_approved: %s", err) + } + if err = d.Set("public_original_crn", offering.PublicOriginalCRN); err != nil { + return fmt.Errorf("Error setting public_original_crn: %s", err) + } + if err = d.Set("publish_public_crn", offering.PublishPublicCRN); err != nil { + return fmt.Errorf("Error setting publish_public_crn: %s", err) + } + if err = d.Set("portal_approval_record", offering.PortalApprovalRecord); err != nil { + return fmt.Errorf("Error setting portal_approval_record: %s", err) + } + if err = d.Set("portal_ui_url", offering.PortalUIURL); err != nil { + return fmt.Errorf("Error setting portal_ui_url: %s", err) + } + if err = d.Set("catalog_id", offering.CatalogID); err != nil { + return fmt.Errorf("Error setting catalog_id: %s", err) + } + if err = d.Set("catalog_name", offering.CatalogName); err != nil { + return fmt.Errorf("Error setting catalog_name: %s", err) + } + if err = d.Set("disclaimer", offering.Disclaimer); err != nil { + return fmt.Errorf("Error setting disclaimer: %s", err) + } + if err = d.Set("hidden", offering.Hidden); err != nil { + return fmt.Errorf("Error setting hidden: %s", err) + } + + if offering.RepoInfo != nil { + repoInfoMap := dataSourceOfferingRepoInfoToMap(*offering.RepoInfo) + if err = d.Set("repo_info", []map[string]interface{}{repoInfoMap}); err != nil { + return fmt.Errorf("Error setting repo_info %s", err) + } + } + + return nil +} + +func dataSourceOfferingRepoInfoToMap(repoInfoItem catalogmanagementv1.RepoInfo) (repoInfoMap map[string]interface{}) { + repoInfoMap = map[string]interface{}{} + + if repoInfoItem.Token != nil { + repoInfoMap["token"] = repoInfoItem.Token + } + if repoInfoItem.Type != nil { + repoInfoMap["type"] = repoInfoItem.Type + } + + return repoInfoMap +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cm_offering_instance.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cm_offering_instance.go new file mode 100644 index 00000000000..834099a4765 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cm_offering_instance.go @@ -0,0 +1,165 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "context" + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/IBM/platform-services-go-sdk/catalogmanagementv1" +) + +func dataSourceIBMCmOfferingInstance() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMCmOfferingInstanceRead, + + Schema: map[string]*schema.Schema{ + "instance_identifier": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "ID for this instance", + }, + "url": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "url reference to this object.", + }, + "crn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "platform CRN for this instance.", + }, + "_rev": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Cloudant Revision for this instance", + }, + "label": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "the label for this instance.", + }, + "catalog_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Catalog ID this instance was created from.", + }, + "offering_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Offering ID this instance was created from.", + }, + "kind_format": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "the format this instance has (helm, operator, ova...).", + }, + "version": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The version this instance was installed from (not version id).", + }, + "cluster_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Cluster ID.", + }, + "cluster_region": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Cluster region (e.g., us-south).", + }, + "cluster_namespaces": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "List of target namespaces to install into.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "cluster_all_namespaces": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "designate to install into all namespaces.", + }, + "schematics_workspace_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "id of the schematics workspace, for offerings installed through schematics", + }, + "resource_group_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "id of the resource group", + }, + }, + } +} + +func dataSourceIBMCmOfferingInstanceRead(d *schema.ResourceData, meta interface{}) error { + catalogManagementClient, err := meta.(ClientSession).CatalogManagementV1() + if err != nil { + return err + } + + getOfferingInstanceOptions := &catalogmanagementv1.GetOfferingInstanceOptions{} + + getOfferingInstanceOptions.SetInstanceIdentifier(d.Get("instance_identifier").(string)) + + offeringInstance, response, err := catalogManagementClient.GetOfferingInstanceWithContext(context.TODO(), getOfferingInstanceOptions) + if err != nil { + log.Printf("[DEBUG] GetOfferingInstanceWithContext failed %s\n%s", err, response) + return err + } + + d.SetId(*offeringInstance.ID) + + if err = d.Set("url", offeringInstance.URL); err != nil { + return fmt.Errorf("Error setting url: %s", err) + } + if err = d.Set("crn", offeringInstance.CRN); err != nil { + return fmt.Errorf("Error setting crn: %s", err) + } + if err = d.Set("_rev", offeringInstance.Rev); err != nil { + return fmt.Errorf("Error setting _rev: %s", err) + } + if err = d.Set("label", offeringInstance.Label); err != nil { + return fmt.Errorf("Error setting label: %s", err) + } + if err = d.Set("catalog_id", offeringInstance.CatalogID); err != nil { + return fmt.Errorf("Error setting catalog_id: %s", err) + } + if err = d.Set("offering_id", offeringInstance.OfferingID); err != nil { + return fmt.Errorf("Error setting offering_id: %s", err) + } + if err = d.Set("kind_format", offeringInstance.KindFormat); err != nil { + return fmt.Errorf("Error setting kind_format: %s", err) + } + if err = d.Set("version", offeringInstance.Version); err != nil { + return fmt.Errorf("Error setting version: %s", err) + } + if err = d.Set("cluster_id", offeringInstance.ClusterID); err != nil { + return fmt.Errorf("Error setting cluster_id: %s", err) + } + if err = d.Set("cluster_region", offeringInstance.ClusterRegion); err != nil { + return fmt.Errorf("Error setting cluster_region: %s", err) + } + if err = d.Set("cluster_namespaces", offeringInstance.ClusterNamespaces); err != nil { + return fmt.Errorf("Error setting cluster_namespaces: %s", err) + } + if err = d.Set("cluster_all_namespaces", offeringInstance.ClusterAllNamespaces); err != nil { + return fmt.Errorf("Error setting cluster_all_namespaces: %s", err) + } + if err = d.Set("schematics_workspace_id", offeringInstance.SchematicsWorkspaceID); err != nil { + return fmt.Errorf("Error setting schematics_workspace_id: %s", err) + } + if err = d.Set("resource_group_id", offeringInstance.ResourceGroupID); err != nil { + return fmt.Errorf("Error setting resource_group_id: %s", err) + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cm_version.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cm_version.go new file mode 100644 index 00000000000..6013ee85b41 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cm_version.go @@ -0,0 +1,108 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "context" + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/IBM/platform-services-go-sdk/catalogmanagementv1" +) + +func dataSourceIBMCmVersion() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMCmVersionRead, + + Schema: map[string]*schema.Schema{ + "version_loc_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Catalog identifier.", + }, + "crn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Version's CRN.", + }, + "version": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Version of content type.", + }, + "sha": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "hash of the content.", + }, + "catalog_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Catalog ID.", + }, + "repo_url": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Content's repo URL.", + }, + "source_url": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Content's source URL (e.g git repo).", + }, + "tgz_url": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "File used to on-board this version.", + }, + }, + } +} + +func dataSourceIBMCmVersionRead(d *schema.ResourceData, meta interface{}) error { + catalogManagementClient, err := meta.(ClientSession).CatalogManagementV1() + if err != nil { + return err + } + + getVersionOptions := &catalogmanagementv1.GetVersionOptions{} + + getVersionOptions.SetVersionLocID(d.Get("version_loc_id").(string)) + + offering, response, err := catalogManagementClient.GetVersionWithContext(context.TODO(), getVersionOptions) + version := offering.Kinds[0].Versions[0] + + if err != nil { + log.Printf("[DEBUG] GetVersionWithContext failed %s\n%s", err, response) + return err + } + + d.SetId(*version.VersionLocator) + if err = d.Set("crn", version.CRN); err != nil { + return fmt.Errorf("Error setting crn: %s", err) + } + if err = d.Set("version", version.Version); err != nil { + return fmt.Errorf("Error setting version: %s", err) + } + if err = d.Set("sha", version.Sha); err != nil { + return fmt.Errorf("Error setting sha: %s", err) + } + if err = d.Set("catalog_id", version.CatalogID); err != nil { + return fmt.Errorf("Error setting catalog_id: %s", err) + } + if err = d.Set("repo_url", version.RepoURL); err != nil { + return fmt.Errorf("Error setting repo_url: %s", err) + } + if err = d.Set("source_url", version.SourceURL); err != nil { + return fmt.Errorf("Error setting source_url: %s", err) + } + if err = d.Set("tgz_url", version.TgzURL); err != nil { + return fmt.Errorf("Error setting tgz_url: %s", err) + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_compute_bare_metal.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_compute_bare_metal.go new file mode 100644 index 00000000000..fd23e887497 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_compute_bare_metal.go @@ -0,0 +1,440 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "sort" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/filter" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/sl" +) + +const ( + BareMetalMask = "globalIdentifier,hostname,domain,bandwidthAllocation,provisionDate,id," + + "primaryIpAddress,primaryBackendIpAddress,privateNetworkOnlyFlag," + + "notes,userData[value],tagReferences[id,tag[name]]," + + "allowedNetworkStorage[id,nasType]," + + "hourlyBillingFlag," + + "datacenter[id,name,longName]," + + "primaryNetworkComponent[primarySubnet[networkVlan[id,primaryRouter,vlanNumber],id],maxSpeed," + + "primaryIpAddressRecord[id]," + + "primaryVersion6IpAddressRecord[subnet,id]]," + + "primaryBackendNetworkComponent[primarySubnet[networkVlan[id,primaryRouter,vlanNumber],id]," + + "primaryIpAddressRecord[id]," + + "maxSpeed,redundancyEnabledFlag]," + + "memoryCapacity,powerSupplyCount," + + "operatingSystem[softwareLicense[softwareDescription[referenceCode]]]" +) + +func dataSourceIBMComputeBareMetal() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMComputeBareMetalRead, + + Schema: map[string]*schema.Schema{ + + "global_identifier": &schema.Schema{ + Description: "The unique global identifier of the bare metal server", + Type: schema.TypeString, + Optional: true, + Computed: true, + ConflictsWith: []string{"hostname", "domain", "most_recent"}, + }, + + "hostname": &schema.Schema{ + Description: "The hostname of the bare metal server", + Type: schema.TypeString, + Optional: true, + Computed: true, + ConflictsWith: []string{"global_identifier"}, + }, + + "domain": &schema.Schema{ + Description: "The domain of the bare metal server", + Type: schema.TypeString, + Optional: true, + Computed: true, + ConflictsWith: []string{"global_identifier"}, + }, + + "datacenter": &schema.Schema{ + Description: "Datacenter in which the bare metal is deployed", + Type: schema.TypeString, + Computed: true, + }, + + "network_speed": { + Type: schema.TypeInt, + Computed: true, + Description: "The connection speed, expressed in Mbps, for the server network components.", + }, + + "public_bandwidth": { + Type: schema.TypeInt, + Computed: true, + Description: "The amount of public network traffic, allowed per month.", + }, + + "public_ipv4_address": { + Type: schema.TypeString, + Computed: true, + Description: "The public IPv4 address of the bare metal server.", + }, + + "public_ipv4_address_id": { + Type: schema.TypeInt, + Computed: true, + }, + + "private_ipv4_address": { + Type: schema.TypeString, + Computed: true, + Description: "The private IPv4 address of the bare metal server.", + }, + + "private_ipv4_address_id": { + Type: schema.TypeInt, + Computed: true, + }, + + "public_vlan_id": { + Type: schema.TypeInt, + Computed: true, + Description: "The public VLAN used for the public network interface of the server.", + }, + + "public_subnet": { + Type: schema.TypeInt, + Computed: true, + Description: "The public subnet used for the public network interface of the server.", + }, + + "private_vlan_id": { + Type: schema.TypeInt, + Computed: true, + Description: "The private VLAN used for the private network interface of the server.", + }, + + "private_subnet": { + Type: schema.TypeInt, + Computed: true, + Description: "The private subnet used for the private network interface of the server.", + }, + + "hourly_billing": { + Type: schema.TypeBool, + Computed: true, + Description: "The billing type of the server.", + }, + + "private_network_only": { + Type: schema.TypeBool, + Computed: true, + Description: "Specifies whether the server only has access to the private network.", + }, + + "user_metadata": { + Type: schema.TypeString, + Computed: true, + Description: "Arbitrary data available to the computing server.", + }, + + "notes": { + Type: schema.TypeString, + Computed: true, + Description: "Notes associated with the server.", + }, + + "memory": { + Type: schema.TypeInt, + Computed: true, + Description: "The amount of memory in gigabytes, for the server.", + }, + + "redundant_power_supply": { + Type: schema.TypeBool, + Computed: true, + Description: "When the value is `true`, it indicates additional power supply is provided.", + }, + + "redundant_network": { + Type: schema.TypeBool, + Computed: true, + Description: "When the value is `true`, two physical network interfaces are provided with a bonding configuration.", + }, + + "unbonded_network": { + Type: schema.TypeBool, + Computed: true, + Description: "When the value is `true`, two physical network interfaces are provided without a bonding configuration.", + }, + + "os_reference_code": { + Type: schema.TypeString, + Computed: true, + }, + + "tags": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "Tags associated with this bare metal server.", + }, + + "block_storage_ids": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeInt}, + Description: "Block storage to which this computing server have access.", + }, + + "file_storage_ids": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeInt}, + Description: "File storage to which this computing server have access.", + }, + + "ipv6_enabled": { + Type: schema.TypeBool, + Computed: true, + Description: "Indicates whether the public IPv6 address enabled or not", + }, + + "ipv6_address": { + Type: schema.TypeString, + Computed: true, + Description: "The public IPv6 address of the bare metal server ", + }, + + "ipv6_address_id": { + Type: schema.TypeInt, + Computed: true, + }, + + "secondary_ip_count": { + Type: schema.TypeInt, + Computed: true, + Description: "The number of secondary IPv4 addresses of the bare metal server.", + }, + + "secondary_ip_addresses": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: " The public secondary IPv4 addresses of the bare metal server.", + }, + + "most_recent": &schema.Schema{ + Description: "If true and multiple entries are found, the most recently created bare metal is used. " + + "If false, an error is returned", + Type: schema.TypeBool, + Optional: true, + Default: false, + ConflictsWith: []string{"global_identifier"}, + }, + }, + } +} + +func dataSourceIBMComputeBareMetalRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetAccountService(sess) + + var hostname, domain, globalIdentifier string + var mostRecent bool + var bms []datatypes.Hardware + var err error + + if host, ok := d.GetOk("hostname"); ok { + hostname = host.(string) + } + + if dmn, ok := d.GetOk("domain"); ok { + domain = dmn.(string) + } + + if mrcnt, ok := d.GetOk("most_recent"); ok { + mostRecent = mrcnt.(bool) + } + + if gID, ok := d.GetOk("global_identifier"); ok { + globalIdentifier = gID.(string) + } + + if globalIdentifier != "" { + bms, err = service. + Filter(filter.Build(filter.Path("hardware.globalIdentifier").Eq(globalIdentifier))).Mask( + BareMetalMask).GetHardware() + + if err != nil { + return fmt.Errorf("Error retrieving bare metal server details for %s: %s", globalIdentifier, err) + } + if len(bms) == 0 { + return fmt.Errorf("No bare metal server found with identifier %s", globalIdentifier) + } + + } else { + bms, err = service. + Filter(filter.Build(filter.Path("hardware.hostname").Eq(hostname), + filter.Path("hardware.domain").Eq(domain))).Mask( + BareMetalMask).GetHardware() + + if err != nil { + return fmt.Errorf("Error retrieving bare metal server for host %s: %s", hostname, err) + } + if len(bms) == 0 { + return fmt.Errorf("No bare metal server with hostname %s and domain %s", hostname, domain) + } + + } + + var bm datatypes.Hardware + + if len(bms) > 1 { + if mostRecent { + bm = mostRecentBareMetal(bms) + } else { + return fmt.Errorf( + "More than one bare metals found with host matching [%s] and domain "+ + "matching [%s]. Set 'most_recent' to true in your configuration to force the most recent bare metal "+ + "to be used", hostname, domain) + } + } else { + bm = bms[0] + } + + d.SetId(fmt.Sprintf("%d", *bm.Id)) + d.Set("global_identifier", bm.GlobalIdentifier) + d.Set("hostname", bm.Hostname) + d.Set("domain", bm.Domain) + + if bm.Datacenter != nil { + d.Set("datacenter", bm.Datacenter.Name) + } + + d.Set("network_speed", bm.PrimaryNetworkComponent.MaxSpeed) + d.Set("public_bandwidth", bm.BandwidthAllocation) + if bm.PrimaryIpAddress != nil { + d.Set("public_ipv4_address", bm.PrimaryIpAddress) + } + if bm.PrimaryNetworkComponent.PrimaryIpAddressRecord != nil { + d.Set("public_ipv4_address_id", bm.PrimaryNetworkComponent.PrimaryIpAddressRecord.Id) + } + d.Set("private_ipv4_address", bm.PrimaryBackendIpAddress) + d.Set("private_ipv4_address_id", + bm.PrimaryBackendNetworkComponent.PrimaryIpAddressRecord.Id) + + d.Set("private_network_only", bm.PrivateNetworkOnlyFlag) + d.Set("hourly_billing", bm.HourlyBillingFlag) + + if bm.PrimaryNetworkComponent.PrimarySubnet != nil { + d.Set("public_vlan_id", bm.PrimaryNetworkComponent.PrimarySubnet.NetworkVlan.Id) + d.Set("public_subnet", bm.PrimaryNetworkComponent.PrimarySubnet.Id) + } + + if bm.PrimaryBackendNetworkComponent.PrimarySubnet != nil { + d.Set("private_vlan_id", bm.PrimaryBackendNetworkComponent.PrimarySubnet.NetworkVlan.Id) + d.Set("private_subnet", bm.PrimaryBackendNetworkComponent.PrimarySubnet.Id) + } + + userData := bm.UserData + if len(userData) > 0 && userData[0].Value != nil { + d.Set("user_metadata", userData[0].Value) + } + + d.Set("notes", sl.Get(bm.Notes, nil)) + d.Set("memory", bm.MemoryCapacity) + + d.Set("redundant_power_supply", false) + + if *bm.PowerSupplyCount == 2 { + d.Set("redundant_power_supply", true) + } + + d.Set("redundant_network", false) + d.Set("unbonded_network", false) + + bareMetalService := services.GetHardwareService(meta.(ClientSession).SoftLayerSession()) + backendNetworkComponent, err := bareMetalService.Filter( + filter.Build( + filter.Path("backendNetworkComponents.status").Eq("ACTIVE"), + ), + ).Id(*bm.Id).GetBackendNetworkComponents() + + if err != nil { + return fmt.Errorf("Error retrieving bare metal server network: %s", err) + } + + if len(backendNetworkComponent) > 2 && bm.PrimaryBackendNetworkComponent != nil { + if *bm.PrimaryBackendNetworkComponent.RedundancyEnabledFlag { + d.Set("redundant_network", true) + } else { + d.Set("unbonded_network", true) + } + } + + if bm.OperatingSystem != nil && + bm.OperatingSystem.SoftwareLicense != nil && + bm.OperatingSystem.SoftwareLicense.SoftwareDescription != nil && + bm.OperatingSystem.SoftwareLicense.SoftwareDescription.ReferenceCode != nil { + d.Set("os_reference_code", bm.OperatingSystem.SoftwareLicense.SoftwareDescription.ReferenceCode) + } + + tagReferences := bm.TagReferences + tagReferencesLen := len(tagReferences) + if tagReferencesLen > 0 { + tags := make([]string, 0, tagReferencesLen) + for _, tagRef := range tagReferences { + tags = append(tags, *tagRef.Tag.Name) + } + d.Set("tags", tags) + } + + storages := bm.AllowedNetworkStorage + if len(storages) > 0 { + d.Set("block_storage_ids", flattenBlockStorageID(storages)) + d.Set("file_storage_ids", flattenFileStorageID(storages)) + } + + connInfo := map[string]string{"type": "ssh"} + if !*bm.PrivateNetworkOnlyFlag && bm.PrimaryIpAddress != nil { + connInfo["host"] = *bm.PrimaryIpAddress + } else { + connInfo["host"] = *bm.PrimaryBackendIpAddress + } + d.SetConnInfo(connInfo) + + d.Set("ipv6_enabled", false) + if bm.PrimaryNetworkComponent.PrimaryVersion6IpAddressRecord != nil { + d.Set("ipv6_enabled", true) + d.Set("ipv6_address", bm.PrimaryNetworkComponent.PrimaryVersion6IpAddressRecord.IpAddress) + d.Set("ipv6_address_id", bm.PrimaryNetworkComponent.PrimaryVersion6IpAddressRecord.Id) + } + err = readSecondaryIPAddresses(d, meta, bm.PrimaryIpAddress) + if err != nil { + return err + } + + return nil +} + +type bareMetal []datatypes.Hardware + +func (k bareMetal) Len() int { return len(k) } + +func (k bareMetal) Swap(i, j int) { k[i], k[j] = k[j], k[i] } + +func (k bareMetal) Less(i, j int) bool { + return k[i].ProvisionDate.Before(k[j].ProvisionDate.Time) +} + +func mostRecentBareMetal(keys bareMetal) datatypes.Hardware { + sortedKeys := keys + sort.Sort(sortedKeys) + return sortedKeys[len(sortedKeys)-1] +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_compute_image_template.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_compute_image_template.go new file mode 100644 index 00000000000..11760916acf --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_compute_image_template.go @@ -0,0 +1,74 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/softlayer/softlayer-go/filter" + "github.com/softlayer/softlayer-go/services" +) + +func dataSourceIBMComputeImageTemplate() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMComputeImageTemplateRead, + + // TODO: based on need add properties for visibility, type of image, + // notes, size, shared on accounts if needed + Schema: map[string]*schema.Schema{ + "id": { + Description: "The internal id of the image template", + Type: schema.TypeInt, + Computed: true, + }, + + "name": { + Description: "The name of this image template", + Type: schema.TypeString, + Required: true, + }, + }, + } +} + +func dataSourceIBMComputeImageTemplateRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetAccountService(sess) + + name := d.Get("name").(string) + + imageTemplates, err := service. + Mask("id,name"). + GetBlockDeviceTemplateGroups() + if err != nil { + return fmt.Errorf("Error looking up image template [%s]: %s", name, err) + } + + for _, imageTemplate := range imageTemplates { + if imageTemplate.Name != nil && *imageTemplate.Name == name { + d.SetId(fmt.Sprintf("%d", *imageTemplate.Id)) + return nil + } + } + + // Image not found among private nor shared images in the account. + // Looking up in the public images + templateService := services.GetVirtualGuestBlockDeviceTemplateGroupService(sess) + pubImageTemplates, err := templateService. + Mask("id,name"). + Filter(filter.Path("name").Eq(name).Build()). + GetPublicImages() + if err != nil { + return fmt.Errorf("Error looking up image template [%s] among the public images: %s", name, err) + } + + if len(pubImageTemplates) > 0 { + imageTemplate := pubImageTemplates[0] + d.SetId(fmt.Sprintf("%d", *imageTemplate.Id)) + return nil + } + + return fmt.Errorf("Could not find image template with name [%s]", name) +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_compute_placement_group.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_compute_placement_group.go new file mode 100644 index 00000000000..2c8eeb07a6f --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_compute_placement_group.go @@ -0,0 +1,116 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "regexp" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/filter" + "github.com/softlayer/softlayer-go/services" +) + +func dataSourceIBMComputePlacementGroup() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMComputePlacementGroupRead, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + + "datacenter": { + Type: schema.TypeString, + Computed: true, + }, + + "pod": { + Type: schema.TypeString, + Computed: true, + }, + + "rule": { + Type: schema.TypeString, + Computed: true, + }, + + "virtual_guests": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeInt, + Computed: true, + }, + "domain": { + Type: schema.TypeString, + Computed: true, + }, + "hostname": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMComputePlacementGroupRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetAccountService(sess) + + name := d.Get("name").(string) + + groups, err := service. + Filter(filter.Build(filter.Path("placementGroup.name").Eq(name))). + Mask("id,name,rule[name],guests[id,domain,hostname],backendRouter[hostname,datacenter[name]]").GetPlacementGroups() + + if err != nil { + return fmt.Errorf("Error retrieving placement group: %s", err) + } + + grps := []datatypes.Virtual_PlacementGroup{} + for _, g := range groups { + if name == *g.Name { + grps = append(grps, g) + + } + } + + if len(grps) == 0 { + return fmt.Errorf("No placement group found with name [%s]", name) + } + + var grp datatypes.Virtual_PlacementGroup + + grp = grps[0] + + d.SetId(fmt.Sprintf("%d", *grp.Id)) + d.Set("name", grp.Name) + d.Set("datacenter", grp.BackendRouter.Datacenter.Name) + pod := strings.SplitAfter(*grp.BackendRouter.Hostname, ".")[0] + r, _ := regexp.Compile("[0-9]{2}") + pod = "pod" + r.FindString(pod) + d.Set("pod", pod) + d.Set("rule", grp.Rule.Name) + + vgs := make([]map[string]interface{}, len(grp.Guests)) + for i, vg := range grp.Guests { + v := make(map[string]interface{}) + v["id"] = *vg.Id + v["domain"] = *vg.Domain + v["hostname"] = *vg.Hostname + vgs[i] = v + } + d.Set("virtual_guests", vgs) + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_compute_ssh_key.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_compute_ssh_key.go new file mode 100644 index 00000000000..a27dc320860 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_compute_ssh_key.go @@ -0,0 +1,113 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "sort" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/filter" + "github.com/softlayer/softlayer-go/services" +) + +func dataSourceIBMComputeSSHKey() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMComputeSSHKeyRead, + + Schema: map[string]*schema.Schema{ + "label": &schema.Schema{ + Description: "The label associated with the ssh key", + Type: schema.TypeString, + Required: true, + }, + + "public_key": &schema.Schema{ + Description: "The public ssh key", + Type: schema.TypeString, + Computed: true, + }, + + "fingerprint": &schema.Schema{ + Description: "A sequence of bytes to authenticate or lookup a longer ssh key", + Type: schema.TypeString, + Computed: true, + }, + + "notes": &schema.Schema{ + Description: "A small note about a ssh key to use at your discretion", + Type: schema.TypeString, + Computed: true, + }, + + "most_recent": &schema.Schema{ + Description: "If true and multiple entries are found, the most recently created key is used. " + + "If false, an error is returned", + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + }, + } +} + +func dataSourceIBMComputeSSHKeyRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetAccountService(sess) + + label := d.Get("label").(string) + mostRecent := d.Get("most_recent").(bool) + + keys, err := service. + Filter(filter.Build(filter.Path("sshKeys.label").Eq(label))). + Mask("id,label,key,fingerprint,notes,createDate"). + GetSshKeys() + + if err != nil { + return fmt.Errorf("Error retrieving SSH key: %s", err) + } + if len(keys) == 0 { + return fmt.Errorf("No ssh key found with name [%s]", label) + } + + var key datatypes.Security_Ssh_Key + if len(keys) > 1 { + if mostRecent { + key = mostRecentSSHKey(keys) + } else { + return fmt.Errorf( + "More than one ssh key found with label matching [%s]. "+ + "Either set 'most_recent' to true in your "+ + "configuration to force the most recent ssh key "+ + "to be used, or ensure that the label is unique", label) + } + } else { + key = keys[0] + } + + d.SetId(fmt.Sprintf("%d", *key.Id)) + d.Set("label", label) + d.Set("public_key", strings.TrimSpace(*key.Key)) + d.Set("fingerprint", key.Fingerprint) + d.Set("notes", key.Notes) + return nil +} + +type sshKeys []datatypes.Security_Ssh_Key + +func (k sshKeys) Len() int { return len(k) } + +func (k sshKeys) Swap(i, j int) { k[i], k[j] = k[j], k[i] } + +func (k sshKeys) Less(i, j int) bool { + return k[i].CreateDate.Before(k[j].CreateDate.Time) +} + +func mostRecentSSHKey(keys sshKeys) datatypes.Security_Ssh_Key { + sortedKeys := keys + sort.Sort(sortedKeys) + return sortedKeys[len(sortedKeys)-1] +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_compute_vm_instance.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_compute_vm_instance.go new file mode 100644 index 00000000000..1e153ea11c0 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_compute_vm_instance.go @@ -0,0 +1,250 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "sort" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/filter" + "github.com/softlayer/softlayer-go/services" +) + +func dataSourceIBMComputeVmInstance() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMComputeVmInstanceRead, + + Schema: map[string]*schema.Schema{ + + "hostname": &schema.Schema{ + Description: "The hostname of the virtual guest", + Type: schema.TypeString, + Required: true, + }, + + "domain": &schema.Schema{ + Description: "The domain of the virtual guest", + Type: schema.TypeString, + Required: true, + }, + + "datacenter": &schema.Schema{ + Description: "Datacenter in which the virtual guest is deployed", + Type: schema.TypeString, + Computed: true, + }, + + "cores": &schema.Schema{ + Description: "Number of cpu cores", + Type: schema.TypeInt, + Computed: true, + }, + + "status": &schema.Schema{ + Description: "The VSI status", + Type: schema.TypeString, + Computed: true, + }, + + "last_known_power_state": &schema.Schema{ + Description: "The last known power state of a virtual guest in the event the guest is turned off outside of IMS or has gone offline.", + Type: schema.TypeString, + Computed: true, + }, + + "public_interface_id": { + Type: schema.TypeInt, + Computed: true, + }, + "private_interface_id": { + Type: schema.TypeInt, + Computed: true, + }, + + "power_state": &schema.Schema{ + Description: "The current power state of a virtual guest.", + Type: schema.TypeString, + Computed: true, + }, + "most_recent": &schema.Schema{ + Description: "If true and multiple entries are found, the most recently created virtual guest is used. " + + "If false, an error is returned", + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + + "public_subnet_id": { + Type: schema.TypeInt, + Computed: true, + }, + + "private_subnet_id": { + Type: schema.TypeInt, + Computed: true, + }, + + "ipv4_address": { + Type: schema.TypeString, + Computed: true, + }, + + "ipv4_address_private": { + Type: schema.TypeString, + Computed: true, + }, + + "ip_address_id": { + Type: schema.TypeInt, + Computed: true, + }, + + "ip_address_id_private": { + Type: schema.TypeInt, + Computed: true, + }, + + "ipv6_address": { + Type: schema.TypeString, + Computed: true, + }, + + "ipv6_address_id": { + Type: schema.TypeInt, + Computed: true, + }, + + "public_ipv6_subnet": { + Type: schema.TypeString, + Computed: true, + }, + + "public_ipv6_subnet_id": { + Type: schema.TypeString, + Computed: true, + }, + + "secondary_ip_addresses": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "secondary_ip_count": { + Type: schema.TypeInt, + Computed: true, + }, + }, + } +} + +func dataSourceIBMComputeVmInstanceRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetAccountService(sess) + + hostname := d.Get("hostname").(string) + domain := d.Get("domain").(string) + mostRecent := d.Get("most_recent").(bool) + + vgs, err := service. + Filter(filter.Build(filter.Path("virtualGuests.hostname").Eq(hostname), + filter.Path("virtualGuests.domain").Eq(domain))).Mask( + "hostname,domain,primaryIpAddress,primaryBackendIpAddress,startCpus,datacenter[id,name,longName],statusId,status,id,powerState,lastKnownPowerState,createDate,primaryNetworkComponent[id, primaryIpAddressRecord[subnet,guestNetworkComponentBinding[ipAddressId]]," + + "primaryVersion6IpAddressRecord[subnet,guestNetworkComponentBinding[ipAddressId]]]," + + "primaryBackendNetworkComponent[id],primaryBackendNetworkComponent[networkVlan[id]," + + "securityGroupBindings[securityGroup]]", + ).GetVirtualGuests() + + if err != nil { + return fmt.Errorf("Error retrieving virtual guest details for host %s: %s", hostname, err) + } + if len(vgs) == 0 { + return fmt.Errorf("No virtual guest with hostname %s and domain %s", hostname, domain) + } + + var vg datatypes.Virtual_Guest + if len(vgs) > 1 { + if mostRecent { + vg = mostRecentVirtualGuest(vgs) + } else { + return fmt.Errorf( + "More than one virtual guest found with host matching [%s] and domain "+ + "matching [%s]. Set 'most_recent' to true in your configuration to force the most recent virtual guest "+ + "to be used", hostname, domain) + } + } else { + vg = vgs[0] + } + + d.SetId(fmt.Sprintf("%d", *vg.Id)) + d.Set("hostname", vg.Hostname) + d.Set("domain", vg.Domain) + + if vg.Datacenter != nil { + d.Set("datacenter", *vg.Datacenter.Name) + } + d.Set("cores", *vg.StartCpus) + if vg.Status != nil { + d.Set("status", vg.Status.KeyName) + } + if vg.PowerState != nil { + d.Set("power_state", vg.PowerState.KeyName) + } + if vg.LastKnownPowerState != nil { + d.Set("last_known_power_state", vg.LastKnownPowerState.KeyName) + } + d.Set("public_interface_id", vg.PrimaryNetworkComponent.Id) + d.Set("private_interface_id", vg.PrimaryBackendNetworkComponent.Id) + d.Set("ipv4_address", vg.PrimaryIpAddress) + d.Set("ipv4_address_private", vg.PrimaryBackendIpAddress) + if vg.PrimaryNetworkComponent.PrimaryIpAddressRecord != nil { + d.Set("ip_address_id", *vg.PrimaryNetworkComponent.PrimaryIpAddressRecord.GuestNetworkComponentBinding.IpAddressId) + } + if vg.PrimaryBackendNetworkComponent.PrimaryIpAddressRecord != nil { + d.Set("ip_address_id_private", + *vg.PrimaryBackendNetworkComponent.PrimaryIpAddressRecord.GuestNetworkComponentBinding.IpAddressId) + } + if vg.PrimaryNetworkComponent.PrimaryIpAddressRecord != nil { + d.Set("public_subnet_id", vg.PrimaryNetworkComponent.PrimaryIpAddressRecord.SubnetId) + } + + if vg.PrimaryBackendNetworkComponent.PrimaryIpAddressRecord != nil { + d.Set("private_subnet_id", vg.PrimaryBackendNetworkComponent.PrimaryIpAddressRecord.SubnetId) + } + + if vg.PrimaryNetworkComponent.PrimaryVersion6IpAddressRecord != nil { + d.Set("ipv6_address", *vg.PrimaryNetworkComponent.PrimaryVersion6IpAddressRecord.IpAddress) + d.Set("ipv6_address_id", *vg.PrimaryNetworkComponent.PrimaryVersion6IpAddressRecord.GuestNetworkComponentBinding.IpAddressId) + publicSubnet := vg.PrimaryNetworkComponent.PrimaryVersion6IpAddressRecord.Subnet + d.Set( + "public_ipv6_subnet", + fmt.Sprintf("%s/%d", *publicSubnet.NetworkIdentifier, *publicSubnet.Cidr), + ) + d.Set("public_ipv6_subnet_id", vg.PrimaryNetworkComponent.PrimaryVersion6IpAddressRecord.SubnetId) + } + + err = readSecondaryIPAddresses(d, meta, vg.PrimaryIpAddress) + if err != nil { + return fmt.Errorf("Error retrieving virtual guest details for host %s: %s", hostname, err) + } + return nil +} + +type virtualGuests []datatypes.Virtual_Guest + +func (k virtualGuests) Len() int { return len(k) } + +func (k virtualGuests) Swap(i, j int) { k[i], k[j] = k[j], k[i] } + +func (k virtualGuests) Less(i, j int) bool { + return k[i].CreateDate.Before(k[j].CreateDate.Time) +} + +func mostRecentVirtualGuest(keys virtualGuests) datatypes.Virtual_Guest { + sortedKeys := keys + sort.Sort(sortedKeys) + return sortedKeys[len(sortedKeys)-1] +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_container_addons.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_container_addons.go new file mode 100644 index 00000000000..4c1c8a1ac1d --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_container_addons.go @@ -0,0 +1,161 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + v1 "github.com/IBM-Cloud/bluemix-go/api/container/containerv1" +) + +func datasourceIBMContainerAddOns() *schema.Resource { + return &schema.Resource{ + Read: datasourceIBMContainerAddOnsRead, + + Schema: map[string]*schema.Schema{ + "cluster": { + Type: schema.TypeString, + Required: true, + Description: "Cluster Name or ID", + }, + "resource_group_id": { + Type: schema.TypeString, + Computed: true, + Description: "ID of the resource group.", + }, + "addons": { + Type: schema.TypeList, + Computed: true, + Description: "The List of AddOns", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + Description: "The addon name such as 'istio'.", + }, + "version": { + Type: schema.TypeString, + Computed: true, + Description: "The addon version, omit the version if you wish to use the default version.", + }, + "allowed_upgrade_versions": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "The versions that the addon can be upgraded to", + }, + "deprecated": { + Type: schema.TypeBool, + Computed: true, + Description: "Determines if this addon version is deprecated", + }, + "health_state": { + Type: schema.TypeString, + Computed: true, + Description: "The health state for this addon, a short indication (e.g. critical, pending)", + }, + "health_status": { + Type: schema.TypeString, + Computed: true, + Description: "The health status for this addon, provides a description of the state (e.g. error message)", + }, + "min_kube_version": { + Type: schema.TypeString, + Computed: true, + Description: "The minimum kubernetes version for this addon.", + }, + "min_ocp_version": { + Type: schema.TypeString, + Computed: true, + Description: "The minimum OpenShift version for this addon.", + }, + "supported_kube_range": { + Type: schema.TypeString, + Computed: true, + Description: "The supported kubernetes version range for this addon.", + }, + "target_version": { + Type: schema.TypeString, + Computed: true, + Description: "The addon target version.", + }, + "vlan_spanning_required": { + Type: schema.TypeBool, + Computed: true, + Description: "VLAN spanning required for multi-zone clusters", + }, + }, + }, + }, + }, + } +} +func datasourceIBMContainerAddOnsRead(d *schema.ResourceData, meta interface{}) error { + csClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return err + } + addOnAPI := csClient.AddOns() + + targetEnv, err := getClusterTargetHeader(d, meta) + if err != nil { + return err + } + cluster := d.Get("cluster").(string) + + result, err := addOnAPI.GetAddons(cluster, targetEnv) + if err != nil { + return err + } + d.Set("cluster", cluster) + addOns, err := flattenAddOnsList(result) + if err != nil { + fmt.Printf("Error Flattening Addons list %s", err) + } + d.Set("resource_group_id", targetEnv.ResourceGroup) + d.Set("addons", addOns) + d.SetId(cluster) + return nil +} +func flattenAddOnsList(result []v1.AddOn) (addOns []map[string]interface{}, err error) { + for _, addOn := range result { + record := map[string]interface{}{} + record["name"] = addOn.Name + record["version"] = addOn.Version + if len(addOn.AllowedUpgradeVersion) > 0 { + record["allowed_upgrade_versions"] = addOn.AllowedUpgradeVersion + } + if &addOn.Deprecated != nil { + record["deprecated"] = addOn.Deprecated + } + if &addOn.HealthState != nil { + record["health_state"] = addOn.HealthState + } + if &addOn.HealthStatus != nil { + record["health_status"] = addOn.HealthStatus + } + if addOn.MinKubeVersion != "" { + record["min_kube_version"] = addOn.MinKubeVersion + } + if addOn.MinOCPVersion != "" { + record["min_ocp_version"] = addOn.MinOCPVersion + } + if addOn.SupportedKubeRange != "" { + record["supported_kube_range"] = addOn.SupportedKubeRange + } + if addOn.TargetVersion != "" { + record["target_version"] = addOn.TargetVersion + } + if &addOn.VlanSpanningRequired != nil { + record["vlan_spanning_required"] = addOn.VlanSpanningRequired + } + + addOns = append(addOns, record) + } + + return addOns, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_container_alb.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_container_alb.go new file mode 100644 index 00000000000..a76a7f4c98d --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_container_alb.go @@ -0,0 +1,88 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMContainerALB() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMContainerALBRead, + + Schema: map[string]*schema.Schema{ + "alb_id": { + Type: schema.TypeString, + Required: true, + Description: "ALB ID", + }, + "alb_type": { + Type: schema.TypeString, + Computed: true, + Description: "ALB type", + }, + "cluster": { + Type: schema.TypeString, + Computed: true, + Description: "Cluster id", + }, + "user_ip": { + Type: schema.TypeString, + Computed: true, + Description: "IP assigned by the user", + }, + "enable": { + Type: schema.TypeBool, + Computed: true, + Description: "set to true if ALB needs to be enabled", + }, + "disable_deployment": { + Type: schema.TypeBool, + Computed: true, + Description: "Set to true if ALB needs to be disabled", + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: "ALB name", + }, + "zone": { + Type: schema.TypeString, + Computed: true, + Description: "ALB zone", + }, + }, + } +} + +func dataSourceIBMContainerALBRead(d *schema.ResourceData, meta interface{}) error { + albClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return err + } + + albID := d.Get("alb_id").(string) + + albAPI := albClient.Albs() + targetEnv, err := getAlbTargetHeader(d, meta) + if err != nil { + return err + } + albConfig, err := albAPI.GetALB(albID, targetEnv) + if err != nil { + return err + } + + d.SetId(albID) + d.Set("alb_type", &albConfig.ALBType) + d.Set("cluster", &albConfig.ClusterID) + d.Set("name", &albConfig.Name) + d.Set("enable", &albConfig.Enable) + d.Set("disable_deployment", &albConfig.DisableDeployment) + d.Set("replicas", &albConfig.NumOfInstances) + d.Set("resize", &albConfig.Resize) + d.Set("user_ip", &albConfig.ALBIP) + d.Set("zone", &albConfig.Zone) + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_container_alb_cert.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_container_alb_cert.go new file mode 100644 index 00000000000..dc8cd7b054c --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_container_alb_cert.go @@ -0,0 +1,111 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMContainerALBCert() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMContainerALBCertRead, + + Schema: map[string]*schema.Schema{ + "cert_crn": { + Type: schema.TypeString, + Computed: true, + Description: "Certificate CRN id", + }, + "cluster_id": { + Type: schema.TypeString, + Required: true, + Description: "Cluster ID", + }, + "secret_name": { + Type: schema.TypeString, + Required: true, + Description: "Secret name", + }, + "namespace": { + Type: schema.TypeString, + Optional: true, + Default: "ibm-cert-store", + Description: "Namespace of the secret", + }, + "persistence": { + Type: schema.TypeBool, + Computed: true, + Description: "Persistence of secret", + }, + "status": { + Type: schema.TypeString, + Computed: true, + Description: "Secret Status", + }, + "domain_name": { + Type: schema.TypeString, + Computed: true, + Description: "Domain name", + }, + "expires_on": { + Type: schema.TypeString, + Computed: true, + Description: "Certificate expaire on date", + }, + "issuer_name": { + Type: schema.TypeString, + Computed: true, + Description: "certificate issuer name", + Deprecated: "This field is depricated and is not available in v2 version of ingress api", + }, + "cluster_crn": { + Type: schema.TypeString, + Computed: true, + Description: "cluster CRN", + Deprecated: "This field is depricated and is not available in v2 version of ingress api", + }, + "cloud_cert_instance_id": { + Type: schema.TypeString, + Computed: true, + Description: "cloud cert instance ID", + }, + }, + } +} + +func dataSourceIBMContainerALBCertRead(d *schema.ResourceData, meta interface{}) error { + ingressClient, err := meta.(ClientSession).VpcContainerAPI() + if err != nil { + return err + } + + clusterID := d.Get("cluster_id").(string) + secretName := d.Get("secret_name").(string) + namespace := d.Get("namespace").(string) + + ingressAPI := ingressClient.Ingresses() + ingressSecretConfig, err := ingressAPI.GetIngressSecret(clusterID, secretName, namespace) + if err != nil { + return err + } + + d.Set("cluster_id", ingressSecretConfig.Cluster) + d.Set("secret_name", ingressSecretConfig.Name) + d.Set("cert_crn", ingressSecretConfig.CRN) + d.Set("namespace", ingressSecretConfig.Namespace) + instancecrn := strings.Split(ingressSecretConfig.CRN, ":certificate:") + d.Set("cloud_cert_instance_id", fmt.Sprintf("%s::", instancecrn[0])) + // d.Set("cluster_crn", ingressSecretConfig.ClusterCrn) + d.Set("domain_name", ingressSecretConfig.Domain) + d.Set("expires_on", ingressSecretConfig.ExpiresOn) + d.Set("status", ingressSecretConfig.Status) + d.Set("persistence", ingressSecretConfig.Persistence) + // d.Set("issuer_name", ingressSecretConfig.IssuerName) + d.SetId(fmt.Sprintf("%s/%s/%s", clusterID, secretName, ingressSecretConfig.Namespace)) + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_container_bind_service.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_container_bind_service.go new file mode 100644 index 00000000000..f1af5f75a1e --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_container_bind_service.go @@ -0,0 +1,82 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMContainerBindService() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMContainerBindServiceRead, + + Schema: map[string]*schema.Schema{ + "cluster_name_id": { + Type: schema.TypeString, + Required: true, + Description: "Cluster name or ID", + }, + "service_instance_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ConflictsWith: []string{"service_instance_name"}, + Description: "Service instance ID", + }, + "service_instance_name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ConflictsWith: []string{"service_instance_id"}, + Description: "serivice instance name", + }, + "namespace_id": { + Type: schema.TypeString, + Required: true, + Description: "namespace ID", + }, + "service_key_name": { + Type: schema.TypeString, + Computed: true, + Description: "Key info", + }, + }, + } +} + +func dataSourceIBMContainerBindServiceRead(d *schema.ResourceData, meta interface{}) error { + csClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return err + } + + clusterNameID := d.Get("cluster_name_id").(string) + namespaceID := d.Get("namespace_id").(string) + var serviceInstanceNameID string + if serviceInstanceName, ok := d.GetOk("service_instance_name"); ok { + serviceInstanceNameID = serviceInstanceName.(string) + } else if serviceInstanceID, ok := d.GetOk("service_instance_id"); ok { + serviceInstanceNameID = serviceInstanceID.(string) + } else { + return fmt.Errorf("Please set either service_instance_name or service_instance_id") + } + + targetEnv, err := getClusterTargetHeader(d, meta) + if err != nil { + return err + } + + boundService, err := csClient.Clusters().FindServiceBoundToCluster(clusterNameID, serviceInstanceNameID, namespaceID, targetEnv) + if err != nil { + return err + } + d.Set("namespace_id", boundService.Namespace) + d.Set("service_instance_name", boundService.ServiceName) + d.Set("service_instance_id", boundService.ServiceID) + d.Set("service_key_name", boundService.ServiceKeyName) + d.SetId(fmt.Sprintf("%s/%s/%s", clusterNameID, serviceInstanceNameID, namespaceID)) + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_container_cluster.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_container_cluster.go new file mode 100644 index 00000000000..ed795869e3b --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_container_cluster.go @@ -0,0 +1,448 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMContainerCluster() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMContainerClusterRead, + + Schema: map[string]*schema.Schema{ + "cluster_name_id": { + Description: "Name or id of the cluster", + Type: schema.TypeString, + Optional: true, + ExactlyOneOf: []string{"cluster_name_id", "name"}, + Deprecated: "use name instead", + }, + "name": { + Description: "Name or id of the cluster", + Type: schema.TypeString, + Optional: true, + ExactlyOneOf: []string{"cluster_name_id", "name"}, + }, + "worker_count": { + Description: "Number of workers", + Type: schema.TypeInt, + Computed: true, + }, + "workers": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "is_trusted": { + Type: schema.TypeBool, + Computed: true, + }, + "worker_pools": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + }, + "machine_type": { + Type: schema.TypeString, + Computed: true, + }, + "size_per_zone": { + Type: schema.TypeInt, + Computed: true, + }, + "hardware": { + Type: schema.TypeString, + Computed: true, + }, + "id": { + Type: schema.TypeString, + Computed: true, + }, + "state": { + Type: schema.TypeString, + Computed: true, + }, + "labels": { + Type: schema.TypeMap, + Computed: true, + }, + "zones": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "zone": { + Type: schema.TypeString, + Computed: true, + }, + "private_vlan": { + Type: schema.TypeString, + Computed: true, + }, + "public_vlan": { + Type: schema.TypeString, + Computed: true, + }, + "worker_count": { + Type: schema.TypeInt, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + "bounded_services": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "service_name": { + Type: schema.TypeString, + Computed: true, + }, + "service_id": { + Type: schema.TypeString, + Computed: true, + }, + "service_key_name": { + Type: schema.TypeString, + Computed: true, + }, + "namespace": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "vlans": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "subnets": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "ips": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "is_public": { + Type: schema.TypeBool, + Computed: true, + }, + "is_byoip": { + Type: schema.TypeBool, + Computed: true, + }, + "cidr": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + "alb_type": { + Type: schema.TypeString, + Optional: true, + Default: "all", + ValidateFunc: validateAllowedStringValue([]string{"private", "public", "all"}), + }, + "albs": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "alb_type": { + Type: schema.TypeString, + Computed: true, + }, + "enable": { + Type: schema.TypeBool, + Computed: true, + }, + "state": { + Type: schema.TypeString, + Computed: true, + }, + "num_of_instances": { + Type: schema.TypeString, + Computed: true, + }, + "alb_ip": { + Type: schema.TypeString, + Computed: true, + }, + "resize": { + Type: schema.TypeBool, + Computed: true, + }, + "disable_deployment": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + "ingress_hostname": { + Type: schema.TypeString, + Computed: true, + }, + "ingress_secret": { + Type: schema.TypeString, + Computed: true, + Sensitive: true, + }, + "org_guid": { + Description: "The bluemix organization guid this cluster belongs to", + Type: schema.TypeString, + Optional: true, + Deprecated: "This field is deprecated", + }, + "space_guid": { + Description: "The bluemix space guid this cluster belongs to", + Type: schema.TypeString, + Optional: true, + Deprecated: "This field is deprecated", + }, + "account_guid": { + Description: "The bluemix account guid this cluster belongs to", + Type: schema.TypeString, + Optional: true, + Deprecated: "This field is deprecated", + }, + "region": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "The cluster region", + Deprecated: "This field is deprecated", + }, + "resource_group_id": { + Type: schema.TypeString, + Optional: true, + Description: "ID of the resource group.", + Computed: true, + }, + "public_service_endpoint": { + Type: schema.TypeBool, + Computed: true, + }, + + "private_service_endpoint": { + Type: schema.TypeBool, + Computed: true, + }, + "public_service_endpoint_url": { + Type: schema.TypeString, + Computed: true, + }, + + "private_service_endpoint_url": { + Type: schema.TypeString, + Computed: true, + }, + + "crn": { + Type: schema.TypeString, + Computed: true, + Description: "CRN of resource instance", + }, + + "server_url": { + Type: schema.TypeString, + Computed: true, + }, + + "list_bounded_services": { + Type: schema.TypeBool, + Default: true, + Optional: true, + Description: "If set to false bounded services won't be listed.", + }, + "api_key_id": { + Type: schema.TypeString, + Computed: true, + Description: "ID of APIkey", + }, + "api_key_owner_name": { + Type: schema.TypeString, + Computed: true, + Description: "Name of the key owner", + }, + "api_key_owner_email": { + Type: schema.TypeString, + Computed: true, + Description: "email id of the key owner", + }, + ResourceControllerURL: { + Type: schema.TypeString, + Computed: true, + Description: "The URL of the IBM Cloud dashboard that can be used to explore and view details about this cluster", + }, + + ResourceName: { + Type: schema.TypeString, + Computed: true, + Description: "The name of the resource", + }, + + ResourceCRN: { + Type: schema.TypeString, + Computed: true, + Description: "The crn of the resource", + }, + + ResourceStatus: { + Type: schema.TypeString, + Computed: true, + Description: "The status of the resource", + }, + + ResourceGroupName: { + Type: schema.TypeString, + Computed: true, + Description: "The resource group name in which resource is provisioned", + }, + }, + } +} + +func dataSourceIBMContainerClusterRead(d *schema.ResourceData, meta interface{}) error { + csClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return err + } + csAPI := csClient.Clusters() + wrkAPI := csClient.Workers() + workerPoolsAPI := csClient.WorkerPools() + albsAPI := csClient.Albs() + + targetEnv, err := getClusterTargetHeader(d, meta) + if err != nil { + return err + } + + var name string + + if v, ok := d.GetOk("cluster_name_id"); ok { + name = v.(string) + } + if v, ok := d.GetOk("name"); ok { + name = v.(string) + } + clusterFields, err := csAPI.Find(name, targetEnv) + if err != nil { + return fmt.Errorf("Error retrieving cluster: %s", err) + } + workerFields, err := wrkAPI.List(name, targetEnv) + if err != nil { + return fmt.Errorf("Error retrieving workers for cluster: %s", err) + } + workers := make([]string, len(workerFields)) + for i, worker := range workerFields { + workers[i] = worker.ID + } + + listBoundedServices := d.Get("list_bounded_services").(bool) + boundedServices := make([]map[string]interface{}, 0) + if listBoundedServices { + servicesBoundToCluster, err := csAPI.ListServicesBoundToCluster(name, "", targetEnv) + if err != nil { + return fmt.Errorf("Error retrieving services bound to cluster: %s", err) + } + for _, service := range servicesBoundToCluster { + boundedService := make(map[string]interface{}) + boundedService["service_name"] = service.ServiceName + boundedService["service_id"] = service.ServiceID + boundedService["service_key_name"] = service.ServiceKeyName + boundedService["namespace"] = service.Namespace + boundedServices = append(boundedServices, boundedService) + } + } + + workerPools, err := workerPoolsAPI.ListWorkerPools(name, targetEnv) + if err != nil { + return fmt.Errorf("Error retrieving worker pools of the cluster %s: %s", name, err) + } + + albs, err := albsAPI.ListClusterALBs(name, targetEnv) + if err != nil && !strings.Contains(err.Error(), "The specified cluster is a lite cluster.") && !strings.Contains(err.Error(), "This operation is not supported for your cluster's version.") && !strings.Contains(err.Error(), "The specified cluster is a free cluster.") { + return fmt.Errorf("Error retrieving alb's of the cluster %s: %s", name, err) + } + + filterType := d.Get("alb_type").(string) + filteredAlbs := flattenAlbs(albs, filterType) + + d.SetId(clusterFields.ID) + d.Set("worker_count", clusterFields.WorkerCount) + d.Set("workers", workers) + d.Set("region", clusterFields.Region) + d.Set("bounded_services", boundedServices) + d.Set("vlans", flattenVlans(clusterFields.Vlans)) + d.Set("is_trusted", clusterFields.IsTrusted) + d.Set("worker_pools", flattenWorkerPools(workerPools)) + d.Set("albs", filteredAlbs) + d.Set("resource_group_id", clusterFields.ResourceGroupID) + d.Set("public_service_endpoint", clusterFields.PublicServiceEndpointEnabled) + d.Set("private_service_endpoint", clusterFields.PrivateServiceEndpointEnabled) + d.Set("public_service_endpoint_url", clusterFields.PublicServiceEndpointURL) + d.Set("private_service_endpoint_url", clusterFields.PrivateServiceEndpointURL) + d.Set("crn", clusterFields.CRN) + d.Set("server_url", clusterFields.ServerURL) + d.Set("ingress_hostname", clusterFields.IngressHostname) + d.Set("ingress_secret", clusterFields.IngressSecretName) + + controller, err := getBaseController(meta) + if err != nil { + return err + } + d.Set(ResourceControllerURL, controller+"/kubernetes/clusters") + apikeyAPI := csClient.Apikeys() + apikeyConfig, err := apikeyAPI.GetApiKeyInfo(name, targetEnv) + if err != nil { + return err + } + d.Set("api_key_id", apikeyConfig.ID) + d.Set("api_key_owner_name", apikeyConfig.Name) + d.Set("api_key_owner_email", apikeyConfig.Email) + d.Set(ResourceName, clusterFields.Name) + d.Set(ResourceCRN, clusterFields.CRN) + d.Set(ResourceStatus, clusterFields.State) + d.Set(ResourceGroupName, clusterFields.ResourceGroupName) + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_container_cluster_config.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_container_cluster_config.go new file mode 100644 index 00000000000..f1386b0acdf --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_container_cluster_config.go @@ -0,0 +1,186 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "path/filepath" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + homedir "github.com/mitchellh/go-homedir" + + v1 "github.com/IBM-Cloud/bluemix-go/api/container/containerv1" + "github.com/IBM-Cloud/bluemix-go/helpers" +) + +func dataSourceIBMContainerClusterConfig() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMContainerClusterConfigRead, + + Schema: map[string]*schema.Schema{ + + "org_guid": { + Description: "The bluemix organization guid this cluster belongs to", + Type: schema.TypeString, + Optional: true, + Deprecated: "This field is deprecated", + }, + "space_guid": { + Description: "The bluemix space guid this cluster belongs to", + Type: schema.TypeString, + Optional: true, + Deprecated: "This field is deprecated", + }, + "account_guid": { + Description: "The bluemix account guid this cluster belongs to", + Type: schema.TypeString, + Optional: true, + Deprecated: "This field is deprecated", + }, + "region": { + Type: schema.TypeString, + Optional: true, + Description: "The cluster region", + Deprecated: "This field is deprecated", + }, + "resource_group_id": { + Type: schema.TypeString, + Optional: true, + Description: "ID of the resource group.", + }, + "cluster_name_id": { + Description: "The name/id of the cluster", + Type: schema.TypeString, + Required: true, + }, + "config_dir": { + Description: "The directory where the cluster config to be downloaded. Default is home directory ", + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "download": { + Description: "If set to false will not download the config, otherwise they are downloaded each time but onto the same path for a given cluster name/id", + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "admin": { + Description: "If set to true will download the config for admin", + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "network": { + Description: "If set to true will download the Calico network config with the Admin config", + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "config_file_path": { + Description: "The absolute path to the kubernetes config yml file ", + Type: schema.TypeString, + Computed: true, + }, + "calico_config_file_path": { + Description: "The absolute path to the calico network config file ", + Type: schema.TypeString, + Computed: true, + }, + "admin_key": { + Type: schema.TypeString, + Computed: true, + Sensitive: true, + }, + "admin_certificate": { + Type: schema.TypeString, + Computed: true, + Sensitive: true, + }, + "ca_certificate": { + Type: schema.TypeString, + Computed: true, + Sensitive: true, + }, + "host": { + Type: schema.TypeString, + Computed: true, + }, + "token": { + Type: schema.TypeString, + Computed: true, + Sensitive: true, + }, + }, + } +} + +func dataSourceIBMContainerClusterConfigRead(d *schema.ResourceData, meta interface{}) error { + csClient, err := meta.(ClientSession).VpcContainerAPI() + if err != nil { + return err + } + csAPI := csClient.Clusters() + name := d.Get("cluster_name_id").(string) + download := d.Get("download").(bool) + admin := d.Get("admin").(bool) + configDir := d.Get("config_dir").(string) + network := d.Get("network").(bool) + + if len(configDir) == 0 { + configDir, err = homedir.Dir() + if err != nil { + return fmt.Errorf("Error fetching homedir: %s", err) + } + } + configDir, _ = filepath.Abs(configDir) + + var configPath string + if !download { + log.Println("Skipping download of the cluster config", "Going to check if it already exists") + expectedDir := v1.ComputeClusterConfigDir(configDir, name, admin) + configPath = filepath.Join(expectedDir, "config.yml") + if !helpers.FileExists(configPath) { + return fmt.Errorf(`Couldn't find the cluster config at expected path %s. Please set "download" to true to download the new config`, configPath) + } + d.Set("config_file_path", configPath) + + } else { + targetEnv, err := getVpcClusterTargetHeader(d, meta) + if err != nil { + return err + } + if network { + // For the Network config we need to gather the certs so we must override the admin value + calicoConfigFilePath, clusterKeyDetails, err := csAPI.StoreConfigDetail(name, configDir, admin || true, network, targetEnv) + if err != nil { + return fmt.Errorf("Error downloading the cluster config [%s]: %s", name, err) + } + d.Set("calico_config_file_path", calicoConfigFilePath) + d.Set("admin_key", clusterKeyDetails.AdminKey) + d.Set("admin_certificate", clusterKeyDetails.Admin) + d.Set("ca_certificate", clusterKeyDetails.ClusterCACertificate) + d.Set("host", clusterKeyDetails.Host) + d.Set("token", clusterKeyDetails.Token) + d.Set("config_file_path", clusterKeyDetails.FilePath) + + } else { + clusterKeyDetails, err := csAPI.GetClusterConfigDetail(name, configDir, admin, targetEnv) + if err != nil { + return fmt.Errorf("Error downloading the cluster config [%s]: %s", name, err) + } + d.Set("admin_key", clusterKeyDetails.AdminKey) + d.Set("admin_certificate", clusterKeyDetails.Admin) + d.Set("ca_certificate", clusterKeyDetails.ClusterCACertificate) + d.Set("host", clusterKeyDetails.Host) + d.Set("token", clusterKeyDetails.Token) + d.Set("config_file_path", clusterKeyDetails.FilePath) + } + } + + d.SetId(name) + d.Set("config_dir", configDir) + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_container_cluster_versions.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_container_cluster_versions.go new file mode 100644 index 00000000000..56272eee15b --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_container_cluster_versions.go @@ -0,0 +1,88 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMContainerClusterVersions() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMContainerClusterVersionsRead, + + Schema: map[string]*schema.Schema{ + "org_guid": { + Description: "The bluemix organization guid this cluster belongs to", + Type: schema.TypeString, + Optional: true, + Deprecated: "This field is deprecated", + }, + "space_guid": { + Description: "The bluemix space guid this cluster belongs to", + Type: schema.TypeString, + Optional: true, + Deprecated: "This field is deprecated", + }, + "account_guid": { + Description: "The bluemix account guid this cluster belongs to", + Type: schema.TypeString, + Optional: true, + Deprecated: "This field is deprecated", + }, + "region": { + Type: schema.TypeString, + Optional: true, + Description: "The cluster region", + Deprecated: "This field is deprecated", + }, + "resource_group_id": { + Type: schema.TypeString, + Optional: true, + Description: "ID of the resource group.", + }, + "valid_kube_versions": { + Description: "List supported kube-versions", + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "valid_openshift_versions": { + Description: "List of supported openshift-versions", + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func dataSourceIBMContainerClusterVersionsRead(d *schema.ResourceData, meta interface{}) error { + csClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return err + } + verAPI := csClient.KubeVersions() + targetEnv, err := getClusterTargetHeader(d, meta) + if err != nil { + return err + } + + availableVersions, _ := verAPI.ListV1(targetEnv) + versions := make([]string, len(availableVersions["kubernetes"])) + for i, version := range availableVersions["kubernetes"] { + versions[i] = fmt.Sprintf("%d%s%d%s%d", version.Major, ".", version.Minor, ".", version.Patch) + } + + openshiftVersions := make([]string, len(availableVersions["openshift"])) + for i, version := range availableVersions["openshift"] { + openshiftVersions[i] = fmt.Sprintf("%d%s%d%s%d", version.Major, ".", version.Minor, ".", version.Patch) + } + d.SetId(time.Now().UTC().String()) + d.Set("valid_kube_versions", versions) + d.Set("valid_openshift_versions", openshiftVersions) + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_container_cluster_worker.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_container_cluster_worker.go new file mode 100644 index 00000000000..65d952059cc --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_container_cluster_worker.go @@ -0,0 +1,118 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMContainerClusterWorker() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMContainerClusterWorkerRead, + + Schema: map[string]*schema.Schema{ + "worker_id": { + Description: "ID of the worker", + Type: schema.TypeString, + Required: true, + }, + "state": { + Description: "State of the worker", + Type: schema.TypeString, + Computed: true, + }, + "status": { + Description: "Status of the worker", + Type: schema.TypeString, + Computed: true, + }, + "private_vlan": { + Type: schema.TypeString, + Computed: true, + }, + "public_vlan": { + Type: schema.TypeString, + Computed: true, + }, + "private_ip": { + Type: schema.TypeString, + Computed: true, + }, + "public_ip": { + Type: schema.TypeString, + Computed: true, + }, + "org_guid": { + Description: "The bluemix organization guid this cluster belongs to", + Type: schema.TypeString, + Optional: true, + Deprecated: "This field is deprecated", + }, + "space_guid": { + Description: "The bluemix space guid this cluster belongs to", + Type: schema.TypeString, + Optional: true, + Deprecated: "This field is deprecated", + }, + "account_guid": { + Description: "The bluemix account guid this cluster belongs to", + Type: schema.TypeString, + Optional: true, + Deprecated: "This field is deprecated", + }, + "region": { + Type: schema.TypeString, + Optional: true, + Description: "The cluster region", + Deprecated: "This field is deprecated", + }, + "resource_group_id": { + Type: schema.TypeString, + Optional: true, + Description: "ID of the resource group.", + }, + ResourceControllerURL: { + Type: schema.TypeString, + Computed: true, + Description: "The URL of the IBM Cloud dashboard that can be used to explore and view details about this cluster", + }, + }, + } +} + +func dataSourceIBMContainerClusterWorkerRead(d *schema.ResourceData, meta interface{}) error { + csClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return err + } + + wrkAPI := csClient.Workers() + workerID := d.Get("worker_id").(string) + targetEnv, err := getClusterTargetHeader(d, meta) + if err != nil { + return err + } + + workerFields, err := wrkAPI.Get(workerID, targetEnv) + if err != nil { + return fmt.Errorf("Error retrieving worker: %s", err) + } + + d.SetId(workerFields.ID) + d.Set("state", workerFields.State) + d.Set("status", workerFields.Status) + d.Set("private_vlan", workerFields.PrivateVlan) + d.Set("public_vlan", workerFields.PublicVlan) + d.Set("private_ip", workerFields.PrivateIP) + d.Set("public_ip", workerFields.PublicIP) + controller, err := getBaseController(meta) + if err != nil { + return err + } + d.Set(ResourceControllerURL, controller+"/kubernetes/clusters") + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_container_vpc_alb.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_container_vpc_alb.go new file mode 100644 index 00000000000..5451602ed40 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_container_vpc_alb.go @@ -0,0 +1,92 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + v2 "github.com/IBM-Cloud/bluemix-go/api/container/containerv2" +) + +func dataSourceIBMContainerVPCClusterALB() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMContainerVpcALBRead, + Schema: map[string]*schema.Schema{ + "alb_id": { + Type: schema.TypeString, + Required: true, + Description: "ALB ID", + }, + "alb_type": { + Type: schema.TypeString, + Computed: true, + }, + "cluster": { + Type: schema.TypeString, + Computed: true, + }, + "enable": { + Type: schema.TypeBool, + Computed: true, + }, + "disable_deployment": { + Type: schema.TypeBool, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "load_balancer_hostname": { + Type: schema.TypeString, + Computed: true, + }, + "resize": { + Type: schema.TypeBool, + Computed: true, + }, + "state": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "zone": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceIBMContainerVpcALBRead(d *schema.ResourceData, meta interface{}) error { + albClient, err := meta.(ClientSession).VpcContainerAPI() + if err != nil { + return err + } + + albID := d.Get("alb_id").(string) + albAPI := albClient.Albs() + targetEnv := v2.ClusterTargetHeader{} + + albConfig, err := albAPI.GetAlb(albID, targetEnv) + if err != nil { + return err + } + + d.Set("alb_type", albConfig.AlbType) + d.Set("cluster", albConfig.Cluster) + d.Set("name", albConfig.Name) + d.Set("enable", albConfig.Enable) + d.Set("disable_deployment", albConfig.DisableDeployment) + d.Set("resize", albConfig.Resize) + d.Set("zone", albConfig.ZoneAlb) + d.Set("status", albConfig.Status) + d.Set("state", albConfig.State) + d.Set("load_balancer_hostname", albConfig.LoadBalancerHostname) + d.SetId(albID) + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_container_vpc_cluster.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_container_vpc_cluster.go new file mode 100644 index 00000000000..4e4662ea81e --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_container_vpc_cluster.go @@ -0,0 +1,408 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + _OPENSHIFT = "_openshift" +) + +func dataSourceIBMContainerVPCCluster() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMContainerClusterVPCRead, + + Schema: map[string]*schema.Schema{ + "cluster_name_id": { + Description: "Name or id of the cluster", + Type: schema.TypeString, + Optional: true, + ExactlyOneOf: []string{"cluster_name_id", "name"}, + Deprecated: "use name instead", + }, + "name": { + Description: "Name or id of the cluster", + Type: schema.TypeString, + Optional: true, + ExactlyOneOf: []string{"cluster_name_id", "name"}, + }, + "worker_count": { + Description: "Number of workers", + Type: schema.TypeInt, + Computed: true, + }, + "workers": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "worker_pools": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + }, + "flavor": { + Type: schema.TypeString, + Computed: true, + }, + "worker_count": { + Type: schema.TypeInt, + Computed: true, + }, + "isolation": { + Type: schema.TypeString, + Computed: true, + }, + "id": { + Type: schema.TypeString, + Computed: true, + }, + "labels": { + Type: schema.TypeMap, + Computed: true, + }, + "state": { + Type: schema.TypeString, + Computed: true, + }, + "zones": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "zone": { + Type: schema.TypeString, + Computed: true, + }, + "subnets": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "primary": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + "worker_count": { + Type: schema.TypeInt, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + "alb_type": { + Type: schema.TypeString, + Optional: true, + Default: "all", + ValidateFunc: validateAllowedStringValue([]string{"private", "public", "all"}), + }, + "albs": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "alb_type": { + Type: schema.TypeString, + Computed: true, + }, + "enable": { + Type: schema.TypeBool, + Computed: true, + }, + "state": { + Type: schema.TypeString, + Computed: true, + }, + "load_balancer_hostname": { + Type: schema.TypeString, + Computed: true, + }, + "resize": { + Type: schema.TypeBool, + Computed: true, + }, + "disable_deployment": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + "service_subnet": { + Type: schema.TypeString, + Description: "Custom subnet CIDR to provide private IP addresses for services", + Computed: true, + }, + "pod_subnet": { + Type: schema.TypeString, + Description: "Custom subnet CIDR to provide private IP addresses for pods", + Computed: true, + }, + "ingress_hostname": { + Type: schema.TypeString, + Computed: true, + }, + "ingress_secret": { + Type: schema.TypeString, + Computed: true, + Sensitive: true, + }, + "resource_group_id": { + Type: schema.TypeString, + Optional: true, + Description: "ID of the resource group.", + Computed: true, + }, + "state": { + Type: schema.TypeString, + Computed: true, + }, + "public_service_endpoint": { + Type: schema.TypeBool, + Computed: true, + }, + + "private_service_endpoint": { + Type: schema.TypeBool, + Computed: true, + }, + "public_service_endpoint_url": { + Type: schema.TypeString, + Computed: true, + }, + + "private_service_endpoint_url": { + Type: schema.TypeString, + Computed: true, + }, + + "crn": { + Type: schema.TypeString, + Computed: true, + Description: "CRN of resource instance", + }, + + "master_url": { + Type: schema.TypeString, + Computed: true, + }, + + "status": { + Type: schema.TypeString, + Computed: true, + Description: "The status of the cluster master", + }, + + "health": { + Type: schema.TypeString, + Computed: true, + }, + + "kube_version": { + Type: schema.TypeString, + Computed: true, + }, + + "tags": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "api_key_id": { + Type: schema.TypeString, + Computed: true, + Description: "ID of APIkey", + }, + "api_key_owner_name": { + Type: schema.TypeString, + Computed: true, + Description: "Name of the key owner", + }, + "api_key_owner_email": { + Type: schema.TypeString, + Computed: true, + Description: "email id of the key owner", + }, + + ResourceControllerURL: { + Type: schema.TypeString, + Computed: true, + Description: "The URL of the IBM Cloud dashboard that can be used to explore and view details about this cluster", + }, + + ResourceName: { + Type: schema.TypeString, + Computed: true, + Description: "The name of the resource", + }, + + ResourceCRN: { + Type: schema.TypeString, + Computed: true, + Description: "The crn of the resource", + }, + + ResourceStatus: { + Type: schema.TypeString, + Computed: true, + Description: "The status of the resource", + }, + + ResourceGroupName: { + Type: schema.TypeString, + Computed: true, + Description: "The resource group name in which resource is provisioned", + }, + }, + } +} + +func dataSourceIBMContainerClusterVPCRead(d *schema.ResourceData, meta interface{}) error { + csClient, err := meta.(ClientSession).VpcContainerAPI() + if err != nil { + return err + } + + targetEnv, err := getVpcClusterTargetHeader(d, meta) + if err != nil { + return err + } + + var clusterID string + + if v, ok := d.GetOk("cluster_name_id"); ok { + clusterID = v.(string) + } + if v, ok := d.GetOk("name"); ok { + clusterID = v.(string) + } + + cls, err := csClient.Clusters().GetCluster(clusterID, targetEnv) + if err != nil { + return fmt.Errorf("Error retrieving container vpc cluster: %s", err) + } + + d.SetId(cls.ID) + d.Set("crn", cls.CRN) + d.Set("status", cls.Lifecycle.MasterStatus) + d.Set("health", cls.Lifecycle.MasterHealth) + if strings.HasSuffix(cls.MasterKubeVersion, _OPENSHIFT) { + d.Set("kube_version", strings.Split(cls.MasterKubeVersion, "_")[0]+_OPENSHIFT) + } else { + d.Set("kube_version", strings.Split(cls.MasterKubeVersion, "_")[0]) + + } + d.Set("master_url", cls.MasterURL) + d.Set("worker_count", cls.WorkerCount) + d.Set("service_subnet", cls.ServiceSubnet) + d.Set("pod_subnet", cls.PodSubnet) + d.Set("state", cls.State) + d.Set("resource_group_id", cls.ResourceGroupID) + d.Set("public_service_endpoint_url", cls.ServiceEndpoints.PublicServiceEndpointURL) + d.Set("private_service_endpoint_url", cls.ServiceEndpoints.PrivateServiceEndpointURL) + d.Set("public_service_endpoint", cls.ServiceEndpoints.PublicServiceEndpointEnabled) + d.Set("private_service_endpoint", cls.ServiceEndpoints.PrivateServiceEndpointEnabled) + d.Set("ingress_hostname", cls.Ingress.HostName) + d.Set("ingress_secret", cls.Ingress.SecretName) + + workerFields, err := csClient.Workers().ListWorkers(clusterID, false, targetEnv) + if err != nil { + return fmt.Errorf("Error retrieving workers for cluster: %s", err) + } + workers := make([]string, len(workerFields)) + for i, worker := range workerFields { + workers[i] = worker.ID + } + + d.Set("workers", workers) + + //Get worker pools + pools, err := csClient.WorkerPools().ListWorkerPools(clusterID, targetEnv) + if err != nil { + return fmt.Errorf("Error retrieving worker pools for container vpc cluster: %s", err) + } + + d.Set("worker_pools", flattenVpcWorkerPools(pools)) + + if !strings.HasSuffix(cls.MasterKubeVersion, _OPENSHIFT) { + albs, err := csClient.Albs().ListClusterAlbs(clusterID, targetEnv) + if err != nil && !strings.Contains(err.Error(), "The specified cluster is a lite cluster.") { + return fmt.Errorf("Error retrieving alb's of the cluster %s: %s", clusterID, err) + } + + filterType := d.Get("alb_type").(string) + filteredAlbs := flattenVpcAlbs(albs, filterType) + + d.Set("albs", filteredAlbs) + } + tags, err := GetTagsUsingCRN(meta, cls.CRN) + if err != nil { + log.Printf( + "An error occured during reading of instance (%s) tags : %s", d.Id(), err) + } + d.Set("tags", tags) + controller, err := getBaseController(meta) + if err != nil { + return err + } + csClientv1, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return err + } + apikeyAPI := csClientv1.Apikeys() + v1targetEnv, err := getClusterTargetHeader(d, meta) + if err != nil { + return err + } + apikeyConfig, err := apikeyAPI.GetApiKeyInfo(clusterID, v1targetEnv) + if err != nil { + return err + } + if &apikeyConfig != nil { + if &apikeyConfig.Name != nil { + d.Set("api_key_id", apikeyConfig.ID) + } + if &apikeyConfig.ID != nil { + d.Set("api_key_owner_name", apikeyConfig.Name) + } + if &apikeyConfig.Email != nil { + d.Set("api_key_owner_email", apikeyConfig.Email) + } + } + d.Set(ResourceControllerURL, controller+"/kubernetes/clusters") + d.Set(ResourceName, cls.Name) + d.Set(ResourceCRN, cls.CRN) + d.Set(ResourceStatus, cls.State) + d.Set(ResourceGroupName, cls.ResourceGroupName) + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_container_vpc_cluster_worker.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_container_vpc_cluster_worker.go new file mode 100644 index 00000000000..9146ad4df72 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_container_vpc_cluster_worker.go @@ -0,0 +1,120 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMContainerVPCClusterWorker() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMContainerVPCClusterWorkerRead, + + Schema: map[string]*schema.Schema{ + "worker_id": { + Description: "ID of the worker", + Type: schema.TypeString, + Required: true, + }, + "cluster_name_id": { + Description: "Name or ID of the cluster", + Type: schema.TypeString, + Required: true, + }, + "flavor": { + Description: "flavor of the worker", + Type: schema.TypeString, + Computed: true, + }, + "kube_version": { + Description: "kube version of the worker", + Type: schema.TypeString, + Computed: true, + }, + "state": { + Description: "State of the worker", + Type: schema.TypeString, + Computed: true, + }, + "pool_id": { + Description: "worker pool id", + Type: schema.TypeString, + Computed: true, + }, + "pool_name": { + Description: "worker pool name", + Type: schema.TypeString, + Computed: true, + }, + "network_interfaces": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cidr": { + Type: schema.TypeString, + Computed: true, + }, + "ip_address": { + Type: schema.TypeString, + Computed: true, + }, + "subnet_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "resource_group_id": { + Type: schema.TypeString, + Optional: true, + Description: "ID of the resource group.", + }, + ResourceControllerURL: { + Type: schema.TypeString, + Computed: true, + Description: "The URL of the IBM Cloud dashboard that can be used to explore and view details about this cluster", + }, + }, + } +} + +func dataSourceIBMContainerVPCClusterWorkerRead(d *schema.ResourceData, meta interface{}) error { + csClient, err := meta.(ClientSession).VpcContainerAPI() + if err != nil { + return err + } + + targetEnv, err := getVpcClusterTargetHeader(d, meta) + if err != nil { + return err + } + + wrkAPI := csClient.Workers() + workerID := d.Get("worker_id").(string) + clusterID := d.Get("cluster_name_id").(string) + + workerFields, err := wrkAPI.Get(clusterID, workerID, targetEnv) + if err != nil { + return fmt.Errorf("Error retrieving worker: %s", err) + } + + d.SetId(workerFields.ID) + d.Set("flavor", workerFields.Flavor) + d.Set("kube_version", workerFields.KubeVersion.Actual) + d.Set("state", workerFields.Health.State) + d.Set("pool_id", workerFields.PoolID) + d.Set("pool_name", workerFields.PoolName) + d.Set("network_interfaces", flattenNetworkInterfaces(workerFields.NetworkInterfaces)) + controller, err := getBaseController(meta) + if err != nil { + return err + } + d.Set(ResourceControllerURL, controller+"/kubernetes/clusters") + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_container_vpc_worker_pool.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_container_vpc_worker_pool.go new file mode 100644 index 00000000000..dd1c3efd729 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_container_vpc_worker_pool.go @@ -0,0 +1,109 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMContainerVpcClusterWorkerPool() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMContainerVpcClusterWorkerPoolRead, + Schema: map[string]*schema.Schema{ + "cluster": { + Type: schema.TypeString, + Required: true, + Description: "Cluster name", + }, + "worker_pool_name": { + Type: schema.TypeString, + Required: true, + Description: "worker pool name", + }, + "flavor": { + Type: schema.TypeString, + Computed: true, + }, + "zones": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + }, + + "subnet_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "labels": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "resource_group_id": { + Type: schema.TypeString, + Computed: true, + }, + "vpc_id": { + Type: schema.TypeString, + Computed: true, + }, + "worker_count": { + Type: schema.TypeInt, + Computed: true, + }, + "isolation": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} +func dataSourceIBMContainerVpcClusterWorkerPoolRead(d *schema.ResourceData, meta interface{}) error { + wpClient, err := meta.(ClientSession).VpcContainerAPI() + if err != nil { + return err + } + clusterName := d.Get("cluster").(string) + workerPoolName := d.Get("worker_pool_name").(string) + workerPoolsAPI := wpClient.WorkerPools() + targetEnv, err := getVpcClusterTargetHeader(d, meta) + if err != nil { + return err + } + + workerPool, err := workerPoolsAPI.GetWorkerPool(clusterName, workerPoolName, targetEnv) + if err != nil { + return err + } + + var zones = make([]map[string]interface{}, 0) + for _, zone := range workerPool.Zones { + for _, subnet := range zone.Subnets { + zoneInfo := map[string]interface{}{ + "name": zone.ID, + "subnet_id": subnet.ID, + } + zones = append(zones, zoneInfo) + } + } + d.Set("worker_pool_name", workerPool.PoolName) + d.Set("flavor", workerPool.Flavor) + d.Set("worker_count", workerPool.WorkerCount) + d.Set("provider", workerPool.Provider) + d.Set("labels", workerPool.Labels) + d.Set("zones", zones) + d.Set("cluster", clusterName) + d.Set("vpc_id", workerPool.VpcID) + d.Set("isolation", workerPool.Isolation) + d.Set("resource_group_id", targetEnv.ResourceGroup) + d.SetId(workerPool.ID) + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_container_worker_pool.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_container_worker_pool.go new file mode 100644 index 00000000000..76ecc022e76 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_container_worker_pool.go @@ -0,0 +1,152 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMContainerWorkerPool() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMContainerWorkerPoolRead, + + Schema: map[string]*schema.Schema{ + "cluster": { + Type: schema.TypeString, + Required: true, + Description: "Name or ID of the cluster", + }, + + "worker_pool_name": { + Type: schema.TypeString, + Required: true, + Description: "worker pool name", + }, + + "machine_type": { + Type: schema.TypeString, + Computed: true, + Description: "worker nodes machine type", + }, + + "size_per_zone": { + Type: schema.TypeInt, + Computed: true, + Description: "Number of nodes per zone", + }, + + "hardware": { + Type: schema.TypeString, + Computed: true, + Description: "Hardware type", + }, + + "disk_encryption": { + Type: schema.TypeBool, + Computed: true, + Description: "worker node disk encrypted if set to true", + }, + + "state": { + Type: schema.TypeString, + Computed: true, + Description: "worker pool state", + }, + + "zones": { + Type: schema.TypeList, + Computed: true, + Description: "worker pool zones", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "zone": { + Type: schema.TypeString, + Computed: true, + Description: "worker pool zone name", + }, + + "private_vlan": { + Type: schema.TypeString, + Computed: true, + Description: "worker pool zone private vlan", + }, + + "public_vlan": { + Type: schema.TypeString, + Computed: true, + Description: "worker pool zone public vlan", + }, + + "worker_count": { + Type: schema.TypeInt, + Computed: true, + Description: "worker pool zone worker count", + }, + }, + }, + }, + + "labels": { + Type: schema.TypeMap, + Computed: true, + Description: "list of labels to worker pool", + }, + + "resource_group_id": { + Type: schema.TypeString, + Computed: true, + Description: "ID of the resource group.", + }, + }, + } +} + +func dataSourceIBMContainerWorkerPoolRead(d *schema.ResourceData, meta interface{}) error { + csClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return err + } + workerPoolName := d.Get("worker_pool_name").(string) + cluster := d.Get("cluster").(string) + + workerPoolsAPI := csClient.WorkerPools() + targetEnv, err := getWorkerPoolTargetHeader(d, meta) + if err != nil { + return err + } + + workerPool, err := workerPoolsAPI.GetWorkerPool(cluster, workerPoolName, targetEnv) + if err != nil { + return err + } + + machineType := workerPool.MachineType + d.SetId(workerPool.ID) + d.Set("machine_type", strings.Split(machineType, ".encrypted")[0]) + d.Set("size_per_zone", workerPool.Size) + hardware := workerPool.Isolation + switch strings.ToLower(hardware) { + case "": + hardware = hardwareShared + case isolationPrivate: + hardware = hardwareDedicated + case isolationPublic: + hardware = hardwareShared + } + d.Set("hardware", hardware) + d.Set("state", workerPool.State) + if workerPool.Labels != nil { + d.Set("labels", workerPool.Labels) + } + d.Set("zones", flattenZones(workerPool.Zones)) + if strings.Contains(machineType, "encrypted") { + d.Set("disk_encryption", true) + } else { + d.Set("disk_encryption", false) + } + d.Set("resource_group_id", targetEnv.ResourceGroup) + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cos_bucket.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cos_bucket.go new file mode 100644 index 00000000000..83420168fbb --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cos_bucket.go @@ -0,0 +1,445 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "regexp" + "strings" + "time" + + "github.com/IBM/ibm-cos-sdk-go-config/resourceconfigurationv1" + "github.com/IBM/ibm-cos-sdk-go/aws" + "github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam" + token "github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/token" + "github.com/IBM/ibm-cos-sdk-go/aws/session" + "github.com/IBM/ibm-cos-sdk-go/service/s3" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +var bucketTypes = []string{"single_site_location", "region_location", "cross_region_location"} + +func dataSourceIBMCosBucket() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMCosBucketRead, + + Schema: map[string]*schema.Schema{ + "bucket_name": { + Type: schema.TypeString, + Required: true, + }, + "bucket_type": { + Type: schema.TypeString, + ValidateFunc: validateAllowedStringValue(bucketTypes), + Required: true, + }, + "bucket_region": { + Type: schema.TypeString, + Required: true, + }, + "resource_instance_id": { + Type: schema.TypeString, + Required: true, + }, + "endpoint_type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateAllowedStringValue([]string{"public", "private"}), + Description: "public or private", + Default: "public", + }, + "crn": { + Type: schema.TypeString, + Computed: true, + Description: "CRN of resource instance", + }, + "key_protect": { + Type: schema.TypeString, + Computed: true, + Description: "CRN of the key you want to use data at rest encryption", + }, + "single_site_location": { + Type: schema.TypeString, + Computed: true, + }, + "region_location": { + Type: schema.TypeString, + Computed: true, + }, + "cross_region_location": { + Type: schema.TypeString, + Computed: true, + }, + "storage_class": { + Type: schema.TypeString, + Computed: true, + }, + "s3_endpoint_public": { + Type: schema.TypeString, + Computed: true, + Description: "Public endpoint for the COS bucket", + }, + "s3_endpoint_private": { + Type: schema.TypeString, + Computed: true, + Description: "Private endpoint for the COS bucket", + }, + "allowed_ip": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "List of IPv4 or IPv6 addresses ", + }, + "activity_tracking": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "read_data_events": { + Type: schema.TypeBool, + Computed: true, + Description: "If set to true, all object read events will be sent to Activity Tracker.", + }, + "write_data_events": { + Type: schema.TypeBool, + Computed: true, + Description: "If set to true, all object write events will be sent to Activity Tracker.", + }, + "activity_tracker_crn": { + Type: schema.TypeString, + Computed: true, + Description: "The instance of Activity Tracker that will receive object event data", + }, + }, + }, + }, + "metrics_monitoring": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "usage_metrics_enabled": { + Type: schema.TypeBool, + Computed: true, + Description: "Usage metrics will be sent to the monitoring service.", + }, + "request_metrics_enabled": { + Type: schema.TypeBool, + Computed: true, + Description: "Request metrics will be sent to the monitoring service.", + }, + "metrics_monitoring_crn": { + Type: schema.TypeString, + Computed: true, + Description: "Instance of IBM Cloud Monitoring that will receive the bucket metrics.", + }, + }, + }, + }, + "archive_rule": { + Type: schema.TypeList, + Computed: true, + Description: "Enable configuration archive_rule (glacier/accelerated) to COS Bucket after a defined period of time", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "rule_id": { + Type: schema.TypeString, + Computed: true, + }, + "enable": { + Type: schema.TypeBool, + Computed: true, + Description: "Enable or disable an archive rule for a bucket", + }, + "days": { + Type: schema.TypeInt, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "expire_rule": { + Type: schema.TypeList, + Computed: true, + Description: "Enable configuration expire_rule to COS Bucket", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "rule_id": { + Type: schema.TypeString, + Computed: true, + }, + "enable": { + Type: schema.TypeBool, + Computed: true, + Description: "Enable or disable an archive rule for a bucket", + }, + "days": { + Type: schema.TypeInt, + Computed: true, + Description: "Specifies the number of days when the specific rule action takes effect.", + }, + "prefix": { + Type: schema.TypeString, + Computed: true, + Description: "The rule applies to any objects with keys that match this prefix", + }, + }, + }, + }, + "retention_rule": { + Type: schema.TypeList, + Computed: true, + Description: "A retention policy is enabled at the IBM Cloud Object Storage bucket level. Minimum, maximum and default retention period are defined by this policy and apply to all objects in the bucket.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "default": { + Type: schema.TypeInt, + Computed: true, + Description: "If an object is stored in the bucket without specifying a custom retention period.", + }, + "maximum": { + Type: schema.TypeInt, + Computed: true, + Description: "Maximum duration of time an object can be kept unmodified in the bucket.", + }, + "minimum": { + Type: schema.TypeInt, + Computed: true, + Description: "Minimum duration of time an object must be kept unmodified in the bucket", + }, + "permanent": { + Type: schema.TypeBool, + Computed: true, + Description: "Enable or disable the permanent retention policy on the bucket", + }, + }, + }, + }, + "object_versioning": { + Type: schema.TypeList, + Computed: true, + Description: "Protect objects from accidental deletion or overwrites. Versioning allows you to keep multiple versions of an object protecting from unintentional data loss.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enable": { + Type: schema.TypeBool, + Computed: true, + Description: "Enable or suspend the versioning for objects in the bucket", + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMCosBucketRead(d *schema.ResourceData, meta interface{}) error { + var s3Conf *aws.Config + rsConClient, err := meta.(ClientSession).BluemixSession() + if err != nil { + return err + } + bucketName := d.Get("bucket_name").(string) + serviceID := d.Get("resource_instance_id").(string) + bucketType := d.Get("bucket_type").(string) + bucketRegion := d.Get("bucket_region").(string) + var endpointType = d.Get("endpoint_type").(string) + apiEndpoint, apiEndpointPrivate := selectCosApi(bucketLocationConvert(bucketType), bucketRegion) + if endpointType == "private" { + apiEndpoint = apiEndpointPrivate + } + apiEndpoint = envFallBack([]string{"IBMCLOUD_COS_ENDPOINT"}, apiEndpoint) + if apiEndpoint == "" { + return fmt.Errorf("The endpoint doesn't exists for given location %s and endpoint type %s", bucketRegion, endpointType) + } + authEndpoint, err := rsConClient.Config.EndpointLocator.IAMEndpoint() + if err != nil { + return err + } + authEndpointPath := fmt.Sprintf("%s%s", authEndpoint, "/identity/token") + apiKey := rsConClient.Config.BluemixAPIKey + if apiKey != "" { + s3Conf = aws.NewConfig().WithEndpoint(apiEndpoint).WithCredentials(ibmiam.NewStaticCredentials(aws.NewConfig(), authEndpointPath, apiKey, serviceID)).WithS3ForcePathStyle(true) + } + iamAccessToken := rsConClient.Config.IAMAccessToken + if iamAccessToken != "" { + initFunc := func() (*token.Token, error) { + return &token.Token{ + AccessToken: rsConClient.Config.IAMAccessToken, + RefreshToken: rsConClient.Config.IAMRefreshToken, + TokenType: "Bearer", + ExpiresIn: int64((time.Hour * 248).Seconds()) * -1, + Expiration: time.Now().Add(-1 * time.Hour).Unix(), + }, nil + } + s3Conf = aws.NewConfig().WithEndpoint(apiEndpoint).WithCredentials(ibmiam.NewCustomInitFuncCredentials(aws.NewConfig(), initFunc, authEndpointPath, serviceID)).WithS3ForcePathStyle(true) + } + s3Sess := session.Must(session.NewSession()) + s3Client := s3.New(s3Sess, s3Conf) + + headInput := &s3.HeadBucketInput{ + Bucket: aws.String(bucketName), + } + err = s3Client.WaitUntilBucketExists(headInput) + if err != nil { + return fmt.Errorf("failed waiting for bucket %s to be created, %v", + bucketName, err) + } + bucketLocationInput := &s3.GetBucketLocationInput{ + Bucket: aws.String(bucketName), + } + bucketLocationConstraint, err := s3Client.GetBucketLocation(bucketLocationInput) + if err != nil { + return err + } + bLocationConstraint := *bucketLocationConstraint.LocationConstraint + + singleSiteLocationRegex, err := regexp.Compile("^[a-z]{3}[0-9][0-9]-[a-z]{4,8}$") + if err != nil { + return err + } + regionLocationRegex, err := regexp.Compile("^[a-z]{2}-[a-z]{2,5}-[a-z]{4,8}$") + if err != nil { + return err + } + crossRegionLocationRegex, err := regexp.Compile("^[a-z]{2}-[a-z]{4,8}$") + if err != nil { + return err + } + + if singleSiteLocationRegex.MatchString(bLocationConstraint) { + d.Set("single_site_location", strings.Split(bLocationConstraint, "-")[0]) + d.Set("storage_class", strings.Split(bLocationConstraint, "-")[1]) + } + if regionLocationRegex.MatchString(bLocationConstraint) { + d.Set("region_location", fmt.Sprintf("%s-%s", strings.Split(bLocationConstraint, "-")[0], strings.Split(bLocationConstraint, "-")[1])) + d.Set("storage_class", strings.Split(bLocationConstraint, "-")[2]) + } + if crossRegionLocationRegex.MatchString(bLocationConstraint) { + d.Set("cross_region_location", strings.Split(bLocationConstraint, "-")[0]) + d.Set("storage_class", strings.Split(bLocationConstraint, "-")[1]) + } + + head, err := s3Client.HeadBucket(headInput) + if err != nil { + return err + } + bucketID := fmt.Sprintf("%s:%s:%s:meta:%s:%s:%s", strings.Replace(serviceID, "::", "", -1), "bucket", bucketName, bucketLocationConvert(bucketType), bucketRegion, endpointType) + d.SetId(bucketID) + d.Set("key_protect", head.IBMSSEKPCrkId) + bucketCRN := fmt.Sprintf("%s:%s:%s", strings.Replace(serviceID, "::", "", -1), "bucket", bucketName) + d.Set("crn", bucketCRN) + d.Set("resource_instance_id", serviceID) + d.Set("s3_endpoint_public", apiEndpoint) + d.Set("s3_endpoint_private", apiEndpointPrivate) + + getBucketConfigOptions := &resourceconfigurationv1.GetBucketConfigOptions{ + Bucket: &bucketName, + } + + sess, err := meta.(ClientSession).CosConfigV1API() + if err != nil { + return err + } + + if endpointType == "private" { + sess.SetServiceURL("https://config.private.cloud-object-storage.cloud.ibm.com/v1") + } + bucketPtr, response, err := sess.GetBucketConfig(getBucketConfigOptions) + + if err != nil { + return fmt.Errorf("Error in getting bucket info rule: %s\n%s", err, response) + } + + if bucketPtr != nil { + + if bucketPtr.Firewall != nil { + d.Set("allowed_ip", flattenStringList(bucketPtr.Firewall.AllowedIp)) + } + if bucketPtr.ActivityTracking != nil { + d.Set("activity_tracking", flattenActivityTrack(bucketPtr.ActivityTracking)) + } + if bucketPtr.MetricsMonitoring != nil { + d.Set("metrics_monitoring", flattenMetricsMonitor(bucketPtr.MetricsMonitoring)) + } + + } + + // Read the lifecycle configuration (archive) + + gInput := &s3.GetBucketLifecycleConfigurationInput{ + Bucket: aws.String(bucketName), + } + + lifecycleptr, err := s3Client.GetBucketLifecycleConfiguration(gInput) + + if (err != nil && !strings.Contains(err.Error(), "NoSuchLifecycleConfiguration: The lifecycle configuration does not exist")) && (err != nil && bucketPtr != nil && bucketPtr.Firewall != nil && !strings.Contains(err.Error(), "AccessDenied: Access Denied")) { + return err + } + + if lifecycleptr != nil { + if len(lifecycleptr.Rules) > 0 { + archiveRules := archiveRuleGet(lifecycleptr.Rules) + expireRules := expireRuleGet(lifecycleptr.Rules) + if len(archiveRules) > 0 { + d.Set("archive_rule", archiveRules) + } + if len(expireRules) > 0 { + d.Set("expire_rule", expireRules) + } + } + } + + // Read the retention policy + retentionInput := &s3.GetBucketProtectionConfigurationInput{ + Bucket: aws.String(bucketName), + } + retentionptr, err := s3Client.GetBucketProtectionConfiguration(retentionInput) + + if err != nil && bucketPtr != nil && bucketPtr.Firewall != nil && !strings.Contains(err.Error(), "AccessDenied: Access Denied") { + return err + } + + if retentionptr != nil { + retentionRules := retentionRuleGet(retentionptr.ProtectionConfiguration) + if len(retentionRules) > 0 { + d.Set("retention_rule", retentionRules) + } + } + + // Get the object Versioning + versionInput := &s3.GetBucketVersioningInput{ + Bucket: aws.String(bucketName), + } + versionPtr, err := s3Client.GetBucketVersioning(versionInput) + + if err != nil && bucketPtr != nil && bucketPtr.Firewall != nil && !strings.Contains(err.Error(), "AccessDenied: Access Denied") { + return err + } + if versionPtr != nil { + versioningData := flattenCosObejctVersioning(versionPtr) + if len(versioningData) > 0 { + d.Set("object_versioning", versioningData) + } + } + + return nil +} + +func bucketLocationConvert(locationtype string) string { + if locationtype == "cross_region_location" { + return "crl" + } + if locationtype == "region_location" { + return "rl" + } + if locationtype == "single_site_location" { + return "crl" + } + return "" +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cos_bucket_object.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cos_bucket_object.go new file mode 100644 index 00000000000..ab1034e5a21 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cos_bucket_object.go @@ -0,0 +1,151 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "bytes" + "fmt" + "log" + "strings" + "time" + + "github.com/IBM/ibm-cos-sdk-go/aws" + "github.com/IBM/ibm-cos-sdk-go/service/s3" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMCosBucketObject() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMCosBucketObjectRead, + + Schema: map[string]*schema.Schema{ + "body": { + Type: schema.TypeString, + Computed: true, + Description: "COS object body", + }, + "bucket_crn": { + Type: schema.TypeString, + Required: true, + Description: "COS bucket CRN", + }, + "bucket_location": { + Type: schema.TypeString, + Required: true, + Description: "COS bucket location", + }, + "content_length": { + Type: schema.TypeInt, + Computed: true, + Description: "COS object content length", + }, + "content_type": { + Type: schema.TypeString, + Computed: true, + Description: "COS object content type", + }, + "endpoint_type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateAllowedStringValue([]string{"public", "private", "direct"}), + Description: "COS endpoint type: public, private, direct", + Default: "public", + }, + "etag": { + Type: schema.TypeString, + Computed: true, + Description: "COS object MD5 hexdigest", + }, + "key": { + Type: schema.TypeString, + Required: true, + Description: "COS object key", + }, + "last_modified": { + Type: schema.TypeString, + Computed: true, + Description: "COS object last modified date", + }, + "version_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceIBMCosBucketObjectRead(d *schema.ResourceData, m interface{}) error { + bucketCRN := d.Get("bucket_crn").(string) + bucketName := strings.Split(bucketCRN, ":bucket:")[1] + instanceCRN := fmt.Sprintf("%s::", strings.Split(bucketCRN, ":bucket:")[0]) + + bucketLocation := d.Get("bucket_location").(string) + endpointType := d.Get("endpoint_type").(string) + + bxSession, err := m.(ClientSession).BluemixSession() + if err != nil { + return err + } + + s3Client, err := getS3Client(bxSession, bucketLocation, endpointType, instanceCRN) + if err != nil { + return err + } + + objectKey := d.Get("key").(string) + headInput := &s3.HeadObjectInput{ + Bucket: aws.String(bucketName), + Key: aws.String(objectKey), + } + + out, err := s3Client.HeadObject(headInput) + if err != nil { + return fmt.Errorf("failed getting COS bucket (%s) object (%s): %w", bucketName, objectKey, err) + } + + log.Printf("[DEBUG] Received COS object: %s", out) + + d.Set("content_length", out.ContentLength) + d.Set("content_type", out.ContentType) + d.Set("etag", strings.Trim(aws.StringValue(out.ETag), `"`)) + if out.LastModified != nil { + d.Set("last_modified", out.LastModified.Format(time.RFC1123)) + } else { + d.Set("last_modified", "") + } + + if isContentTypeAllowed(out.ContentType) { + getInput := s3.GetObjectInput{ + Bucket: aws.String(bucketName), + Key: aws.String(objectKey), + } + out, err := s3Client.GetObject(&getInput) + if err != nil { + return fmt.Errorf("failed getting COS object: %w", err) + } + + buf := new(bytes.Buffer) + bytesRead, err := buf.ReadFrom(out.Body) + if err != nil { + return fmt.Errorf("failed reading content of COS bucket (%s) object (%s): %w", bucketName, objectKey, err) + } + log.Printf("[INFO] Saving %d bytes from COS bucket (%s) object (%s)", bytesRead, bucketName, objectKey) + d.Set("body", buf.String()) + } else { + contentType := "" + if out.ContentType == nil { + contentType = "" + } else { + contentType = aws.StringValue(out.ContentType) + } + + log.Printf("[INFO] Ignoring body of COS bucket (%s) object (%s) with Content-Type %q", bucketName, objectKey, contentType) + } + + objectID := getObjectId(bucketCRN, objectKey, bucketLocation) + d.SetId(objectID) + d.Set("version_id", out.VersionId) + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cr_namespaces.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cr_namespaces.go new file mode 100644 index 00000000000..65f5bec066f --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_cr_namespaces.go @@ -0,0 +1,114 @@ +// Copyright IBM Corp. 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/IBM/container-registry-go-sdk/containerregistryv1" +) + +func dataIBMContainerRegistryNamespaces() *schema.Resource { + return &schema.Resource{ + Read: dataIBMContainerRegistryNamespacesRead, + + Schema: map[string]*schema.Schema{ + "namespaces": { + Type: schema.TypeList, + Computed: true, + Description: "Container Registry Namespaces", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + Description: "Container Registry Namespace name", + }, + "resource_group_id": { + Type: schema.TypeString, + Computed: true, + Description: "Resource Group to which namespace has to be assigned", + }, + "crn": { + Type: schema.TypeString, + Computed: true, + Description: "CRN of the Namespace", + }, + "created_date": { + Type: schema.TypeString, + Computed: true, + Description: "Created Date", + }, + "updated_date": { + Type: schema.TypeString, + Computed: true, + Description: "Updated Date", + }, + "resource_created_date": { + Type: schema.TypeString, + Computed: true, + Description: "When the namespace was assigned to a resource group.", + }, + "account": { + Type: schema.TypeString, + Computed: true, + Description: "The IBM Cloud account that owns the namespace.", + }, + // DEPRECATED FIELDS TO BE REMOVED IN FUTURE + "created_on": { + Type: schema.TypeString, + Computed: true, + Description: "Created Date", + Deprecated: "This field is deprecated", + }, + "updated_on": { + Type: schema.TypeString, + Computed: true, + Description: "Updated Date", + Deprecated: "This field is deprecated", + }, + }, + }, + }, + }, + } +} + +func dataIBMContainerRegistryNamespacesRead(d *schema.ResourceData, meta interface{}) error { + containerRegistryClient, err := meta.(ClientSession).ContainerRegistryV1() + if err != nil { + return err + } + + listNamespaceDetailsOptions := &containerregistryv1.ListNamespaceDetailsOptions{} + + namespaceDetailsList, _, err := containerRegistryClient.ListNamespaceDetails(listNamespaceDetailsOptions) + if err != nil { + return err + } + + namespaces := []map[string]interface{}{} + for _, namespaceDetails := range namespaceDetailsList { + namespace := map[string]interface{}{} + namespace["name"] = namespaceDetails.Name + namespace["resource_group_id"] = namespaceDetails.ResourceGroup + namespace["crn"] = namespaceDetails.CRN + namespace["created_date"] = namespaceDetails.CreatedDate + namespace["updated_date"] = namespaceDetails.UpdatedDate + namespace["account"] = namespaceDetails.Account + namespace["resource_created_date"] = namespaceDetails.ResourceCreatedDate + // DEPRECATED FIELDS TO BE REMOVED IN FUTURE + namespace["created_on"] = namespaceDetails.CreatedDate + namespace["updated_on"] = namespaceDetails.UpdatedDate + namespaces = append(namespaces, namespace) + } + if err = d.Set("namespaces", namespaces); err != nil { + return fmt.Errorf("Error setting namespaces: %s", err) + } + d.SetId(time.Now().UTC().String()) + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_database.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_database.go new file mode 100644 index 00000000000..84fddda3874 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_database.go @@ -0,0 +1,733 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "encoding/base64" + "fmt" + "io/ioutil" + "net/url" + "path/filepath" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/IBM-Cloud/bluemix-go/api/icd/icdv4" + "github.com/IBM-Cloud/bluemix-go/api/resource/resourcev2/controllerv2" + "github.com/IBM-Cloud/bluemix-go/bmxerror" + "github.com/IBM-Cloud/bluemix-go/models" +) + +func dataSourceIBMDatabaseInstance() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMDatabaseInstanceRead, + + Schema: map[string]*schema.Schema{ + "name": { + Description: "Resource instance name for example, my Database instance", + Type: schema.TypeString, + Required: true, + }, + + "resource_group_id": { + Type: schema.TypeString, + Optional: true, + Description: "The id of the resource group in which the Database instance is present", + }, + + "location": { + Description: "The location or the region in which the Database instance exists", + Type: schema.TypeString, + Optional: true, + }, + + "guid": { + Type: schema.TypeString, + Computed: true, + Description: "Unique identifier of resource instance", + }, + + "service": { + Description: "The name of the Cloud Internet database service", + Type: schema.TypeString, + Optional: true, + }, + "plan": { + Description: "The plan type of the Database instance", + Type: schema.TypeString, + Computed: true, + }, + + "status": { + Description: "The resource instance status", + Type: schema.TypeString, + Computed: true, + }, + "adminuser": { + Description: "The admin user id for the instance", + Type: schema.TypeString, + Computed: true, + }, + "adminpassword": { + Description: "The admin user id for the instance", + Type: schema.TypeString, + Computed: true, + Sensitive: true, + }, + "version": { + Description: "The database version to provision if specified", + Type: schema.TypeString, + Computed: true, + }, + "members_memory_allocation_mb": { + Description: "Memory allocation required for cluster", + Type: schema.TypeInt, + Computed: true, + }, + "members_disk_allocation_mb": { + Description: "Disk allocation required for cluster", + Type: schema.TypeInt, + Computed: true, + }, + "platform_options": { + Description: "Platform-specific options for this deployment.r", + Type: schema.TypeMap, + Computed: true, + }, + "tags": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "users": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Description: "User name", + Type: schema.TypeString, + Computed: true, + }, + "password": { + Description: "User password", + Type: schema.TypeString, + Computed: true, + Sensitive: true, + }, + }, + }, + }, + "cert_file_path": { + Description: "The absolute path to certificate PEM file", + Type: schema.TypeString, + Computed: true, + }, + "connectionstrings": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Description: "User name", + Type: schema.TypeString, + Computed: true, + }, + "composed": { + Description: "Connection string", + Type: schema.TypeString, + Computed: true, + }, + "scheme": { + Description: "DB scheme", + Type: schema.TypeString, + Computed: true, + }, + "certname": { + Description: "Certificate Name", + Type: schema.TypeString, + Computed: true, + }, + "certbase64": { + Description: "Certificate in base64 encoding", + Type: schema.TypeString, + Computed: true, + }, + "password": { + Description: "Password", + Type: schema.TypeString, + Computed: true, + }, + "queryoptions": { + Description: "DB query options", + Type: schema.TypeString, + Computed: true, + }, + "database": { + Description: "DB name", + Type: schema.TypeString, + Computed: true, + }, + "path": { + Description: "DB path", + Type: schema.TypeString, + Computed: true, + }, + "hosts": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "hostname": { + Description: "DB host name", + Type: schema.TypeString, + Computed: true, + }, + "port": { + Description: "DB port", + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + "whitelist": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "address": { + Description: "Whitelist IP address in CIDR notation", + Type: schema.TypeString, + Computed: true, + }, + "description": { + Description: "Unique white list description", + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "groups": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "group_id": { + Description: "Scaling group name", + Type: schema.TypeString, + Computed: true, + }, + "count": { + Description: "Count of scaling groups for the instance", + Type: schema.TypeInt, + Computed: true, + }, + "memory": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "units": { + Type: schema.TypeString, + Computed: true, + Description: "The units memory is allocated in.", + }, + "allocation_mb": { + Type: schema.TypeInt, + Computed: true, + Description: "The current memory allocation for a group instance", + }, + "minimum_mb": { + Type: schema.TypeInt, + Computed: true, + Description: "The minimum memory size for a group instance", + }, + "step_size_mb": { + Type: schema.TypeInt, + Computed: true, + Description: "The step size memory increases or decreases in.", + }, + "is_adjustable": { + Type: schema.TypeBool, + Computed: true, + Description: "Is the memory size adjustable.", + }, + "can_scale_down": { + Type: schema.TypeBool, + Computed: true, + Description: "Can memory scale down as well as up.", + }, + }, + }, + }, + "cpu": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "units": { + Type: schema.TypeString, + Computed: true, + Description: "The .", + }, + "allocation_count": { + Type: schema.TypeInt, + Computed: true, + Description: "The current cpu allocation count", + }, + "minimum_count": { + Type: schema.TypeInt, + Computed: true, + Description: "The minimum number of cpus allowed", + }, + "step_size_count": { + Type: schema.TypeInt, + Computed: true, + Description: "The number of CPUs allowed to step up or down by", + }, + "is_adjustable": { + Type: schema.TypeBool, + Computed: true, + Description: "Are the number of CPUs adjustable", + }, + "can_scale_down": { + Type: schema.TypeBool, + Computed: true, + Description: "Can the number of CPUs be scaled down as well as up", + }, + }, + }, + }, + "disk": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "units": { + Type: schema.TypeString, + Computed: true, + Description: "The units disk is allocated in", + }, + "allocation_mb": { + Type: schema.TypeInt, + Computed: true, + Description: "The current disk allocation", + }, + "minimum_mb": { + Type: schema.TypeInt, + Computed: true, + Description: "The minimum disk size allowed", + }, + "step_size_mb": { + Type: schema.TypeInt, + Computed: true, + Description: "The step size disk increases or decreases in", + }, + "is_adjustable": { + Type: schema.TypeBool, + Computed: true, + Description: "Is the disk size adjustable", + }, + "can_scale_down": { + Type: schema.TypeBool, + Computed: true, + Description: "Can the disk size be scaled down as well as up", + }, + }, + }, + }, + }, + }, + }, + "auto_scaling": { + Type: schema.TypeList, + Description: "ICD Auto Scaling", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disk": { + Type: schema.TypeList, + Description: "Disk Auto Scaling", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "capacity_enabled": { + Description: "Auto Scaling Scalar: Capacity Enabled", + Type: schema.TypeBool, + Computed: true, + }, + "free_space_remaining_percent": { + Description: "Auto Scaling Scalar: Capacity Free Space Remaining Percent", + Type: schema.TypeInt, + Computed: true, + }, + "free_space_less_than_percent": { + Description: "Auto Scaling Scalar: Capacity Free Space Less Than Percent", + Type: schema.TypeInt, + Computed: true, + }, + "io_enabled": { + Description: "Auto Scaling Scalar: IO Utilization Enabled", + Type: schema.TypeBool, + Computed: true, + }, + + "io_over_period": { + Description: "Auto Scaling Scalar: IO Utilization Over Period", + Type: schema.TypeString, + Computed: true, + }, + "io_above_percent": { + Description: "Auto Scaling Scalar: IO Utilization Above Percent", + Type: schema.TypeInt, + Computed: true, + }, + "rate_increase_percent": { + Description: "Auto Scaling Rate: Increase Percent", + Type: schema.TypeInt, + Computed: true, + }, + "rate_period_seconds": { + Description: "Auto Scaling Rate: Period Seconds", + Type: schema.TypeInt, + Computed: true, + }, + "rate_limit_mb_per_member": { + Description: "Auto Scaling Rate: Limit mb per member", + Type: schema.TypeInt, + Computed: true, + }, + "rate_limit_count_per_member": { + Description: "Auto Scaling Rate: Limit count per number", + Type: schema.TypeInt, + Computed: true, + }, + "rate_units": { + Description: "Auto Scaling Rate: Units ", + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "memory": { + Type: schema.TypeList, + Description: "Memory Auto Scaling", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "io_enabled": { + Description: "Auto Scaling Scalar: IO Utilization Enabled", + Type: schema.TypeBool, + Computed: true, + }, + + "io_over_period": { + Description: "Auto Scaling Scalar: IO Utilization Over Period", + Type: schema.TypeString, + Computed: true, + }, + "io_above_percent": { + Description: "Auto Scaling Scalar: IO Utilization Above Percent", + Type: schema.TypeInt, + Computed: true, + }, + "rate_increase_percent": { + Description: "Auto Scaling Rate: Increase Percent", + Type: schema.TypeInt, + Computed: true, + }, + "rate_period_seconds": { + Description: "Auto Scaling Rate: Period Seconds", + Type: schema.TypeInt, + Computed: true, + }, + "rate_limit_mb_per_member": { + Description: "Auto Scaling Rate: Limit mb per member", + Type: schema.TypeInt, + Computed: true, + }, + "rate_limit_count_per_member": { + Description: "Auto Scaling Rate: Limit count per number", + Type: schema.TypeInt, + Computed: true, + }, + "rate_units": { + Description: "Auto Scaling Rate: Units ", + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "cpu": { + Type: schema.TypeList, + Description: "CPU Auto Scaling", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "rate_increase_percent": { + Description: "Auto Scaling Rate: Increase Percent", + Type: schema.TypeInt, + Computed: true, + }, + "rate_period_seconds": { + Description: "Auto Scaling Rate: Period Seconds", + Type: schema.TypeInt, + Computed: true, + }, + "rate_limit_mb_per_member": { + Description: "Auto Scaling Rate: Limit mb per member", + Type: schema.TypeInt, + Computed: true, + }, + "rate_limit_count_per_member": { + Description: "Auto Scaling Rate: Limit count per number", + Type: schema.TypeInt, + Computed: true, + }, + "rate_units": { + Description: "Auto Scaling Rate: Units ", + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + ResourceName: { + Type: schema.TypeString, + Computed: true, + Description: "The name of the resource", + }, + + ResourceCRN: { + Type: schema.TypeString, + Computed: true, + Description: "The crn of the resource", + }, + + ResourceStatus: { + Type: schema.TypeString, + Computed: true, + Description: "The status of the resource", + }, + + ResourceGroupName: { + Type: schema.TypeString, + Computed: true, + Description: "The resource group name in which resource is provisioned", + }, + ResourceControllerURL: { + Type: schema.TypeString, + Computed: true, + Description: "The URL of the IBM Cloud dashboard that can be used to explore and view details about the resource", + }, + }, + } +} + +func dataSourceIBMDatabaseInstanceRead(d *schema.ResourceData, meta interface{}) error { + rsConClient, err := meta.(ClientSession).ResourceControllerAPIV2() + if err != nil { + return err + } + rsAPI := rsConClient.ResourceServiceInstanceV2() + name := d.Get("name").(string) + + rsInstQuery := controllerv2.ServiceInstanceQuery{ + Name: name, + } + + if rsGrpID, ok := d.GetOk("resource_group_id"); ok { + rsInstQuery.ResourceGroupID = rsGrpID.(string) + } else { + defaultRg, err := defaultResourceGroup(meta) + if err != nil { + return err + } + rsInstQuery.ResourceGroupID = defaultRg + } + + rsCatClient, err := meta.(ClientSession).ResourceCatalogAPI() + if err != nil { + return err + } + rsCatRepo := rsCatClient.ResourceCatalog() + + if service, ok := d.GetOk("database"); ok { + + serviceOff, err := rsCatRepo.FindByName(service.(string), true) + if err != nil { + return fmt.Errorf("Error retrieving database offering: %s", err) + } + + rsInstQuery.ServiceID = serviceOff[0].ID + } + + var instances []models.ServiceInstanceV2 + + instances, err = rsAPI.ListInstances(rsInstQuery) + if err != nil { + return err + } + var filteredInstances []models.ServiceInstanceV2 + var location string + + if loc, ok := d.GetOk("location"); ok { + location = loc.(string) + for _, instance := range instances { + if getLocation(instance) == location { + filteredInstances = append(filteredInstances, instance) + } + } + } else { + filteredInstances = instances + } + + if len(filteredInstances) == 0 { + return fmt.Errorf("No resource instance found with name [%s]\nIf not specified please specify more filters like resource_group_id if instance doesn't exists in default group, location or database", name) + } + + var instance models.ServiceInstanceV2 + + if len(filteredInstances) > 1 { + return fmt.Errorf( + "More than one resource instance found with name matching [%s]\nIf not specified please specify more filters like resource_group_id if instance doesn't exists in default group, location or database", name) + } + instance = filteredInstances[0] + + d.SetId(instance.ID) + + err = GetTags(d, meta) + if err != nil { + return fmt.Errorf( + "Error on get of resource instance (%s) tags: %s", d.Id(), err) + } + + d.Set("name", instance.Name) + d.Set("status", instance.State) + d.Set("resource_group_id", instance.ResourceGroupID) + d.Set("location", instance.RegionID) + d.Set("guid", instance.Guid) + + serviceOff, err := rsCatRepo.GetServiceName(instance.ServiceID) + if err != nil { + return fmt.Errorf("Error retrieving service offering: %s", err) + } + + d.Set("service", serviceOff) + + servicePlan, err := rsCatRepo.GetServicePlanName(instance.ResourcePlanID) + if err != nil { + return fmt.Errorf("Error retrieving plan: %s", err) + } + d.Set("plan", servicePlan) + + d.Set(ResourceName, instance.Name) + d.Set(ResourceCRN, instance.Crn.String()) + d.Set(ResourceStatus, instance.State) + d.Set(ResourceGroupName, instance.ResourceGroupName) + + rcontroller, err := getBaseController(meta) + if err != nil { + return err + } + d.Set(ResourceControllerURL, rcontroller+"/services/"+url.QueryEscape(instance.Crn.String())) + + icdClient, err := meta.(ClientSession).ICDAPI() + if err != nil { + return fmt.Errorf("Error getting database client settings: %s", err) + } + + icdId := EscapeUrlParm(instance.ID) + cdb, err := icdClient.Cdbs().GetCdb(icdId) + if err != nil { + if apiErr, ok := err.(bmxerror.RequestFailure); ok && apiErr.StatusCode() == 404 { + return fmt.Errorf("The database instance was not found in the region set for the Provider, or the default of us-south. Specify the correct region in the provider definition, or create a provider alias for the correct region. %v", err) + } + return fmt.Errorf("Error getting database config for: %s with error %s\n", icdId, err) + } + d.Set("adminuser", cdb.AdminUser) + d.Set("version", cdb.Version) + if &cdb.PlatformOptions != nil { + platformOptions := map[string]interface{}{ + "key_protect_key_id": cdb.PlatformOptions.KeyProtectKey, + "disk_encryption_key_crn": cdb.PlatformOptions.DiskENcryptionKeyCrn, + "backup_encryption_key_crn": cdb.PlatformOptions.BackUpEncryptionKeyCrn, + } + d.Set("platform_options", platformOptions) + } + + groupList, err := icdClient.Groups().GetGroups(icdId) + if err != nil { + return fmt.Errorf("Error getting database groups: %s", err) + } + d.Set("groups", flattenIcdGroups(groupList)) + d.Set("members_memory_allocation_mb", groupList.Groups[0].Memory.AllocationMb) + d.Set("members_disk_allocation_mb", groupList.Groups[0].Disk.AllocationMb) + + autoSclaingGroup, err := icdClient.AutoScaling().GetAutoScaling(icdId, "member") + if err != nil { + return fmt.Errorf("Error getting database groups: %s", err) + } + d.Set("auto_scaling", flattenICDAutoScalingGroup(autoSclaingGroup)) + + whitelist, err := icdClient.Whitelists().GetWhitelist(icdId) + if err != nil { + return fmt.Errorf("Error getting database whitelist: %s", err) + } + d.Set("whitelist", flattenWhitelist(whitelist)) + + connectionEndpoint := "public" + if instance.Parameters != nil { + if endpoint, ok := instance.Parameters["service-endpoints"]; ok { + if endpoint == "private" { + connectionEndpoint = "private" + } + } + + } + + var connectionStrings []CsEntry + //ICD does not implement a GetUsers API. Users populated from tf configuration. + tfusers := d.Get("users").(*schema.Set) + users := expandUsers(tfusers) + user := icdv4.User{ + UserName: cdb.AdminUser, + } + users = append(users, user) + for _, user := range users { + userName := user.UserName + csEntry, err := getConnectionString(d, userName, connectionEndpoint, meta) + if err != nil { + return fmt.Errorf("Error getting user connection string for user (%s): %s", userName, err) + } + connectionStrings = append(connectionStrings, csEntry) + } + d.Set("connectionstrings", flattenConnectionStrings(connectionStrings)) + + connStr := connectionStrings[0] + certFile, err := filepath.Abs(connStr.CertName + ".pem") + if err != nil { + return fmt.Errorf("Error generating certificate file path: %s", err) + } + content, err := base64.StdEncoding.DecodeString(connStr.CertBase64) + if err != nil { + return fmt.Errorf("Error decoding certificate content: %s", err) + } + if err := ioutil.WriteFile(certFile, content, 0644); err != nil { + return fmt.Errorf("Error writing certificate to file: %s", err) + } + d.Set("cert_file_path", certFile) + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_dl_gateway.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_dl_gateway.go new file mode 100644 index 00000000000..c1d2d4ed2a0 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_dl_gateway.go @@ -0,0 +1,467 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + + "github.com/IBM/networking-go-sdk/directlinkv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + dlGateway = "gateway" + dlSecurityPolicy = "security_policy" + dlActiveCak = "active_cak" +) + +func dataSourceIBMDLGateway() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMDLGatewayRead, + Schema: map[string]*schema.Schema{ + dlName: { + Type: schema.TypeString, + Required: true, + Description: "The unique user-defined name for this gateway", + ValidateFunc: InvokeValidator("ibm_dl_gateway", dlName), + }, + + dlGatewaysVirtualConnections: { + Type: schema.TypeList, + Description: "Collection of direct link gateway virtual connections", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + dlVCCreatedAt: { + Type: schema.TypeString, + Computed: true, + Description: "The date and time resource was created", + }, + "id": { + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this virtual connection", + }, + dlVCStatus: { + Type: schema.TypeString, + Computed: true, + Description: "Status of the virtual connection.Possible values: [pending,attached,approval_pending,rejected,expired,deleting,detached_by_network_pending,detached_by_network]", + }, + dlVCNetworkAccount: { + Type: schema.TypeString, + Computed: true, + Description: "For virtual connections across two different IBM Cloud Accounts network_account indicates the account that owns the target network.", + }, + dlVCNetworkId: { + Type: schema.TypeString, + Computed: true, + Description: "Unique identifier of the target network. For type=vpc virtual connections this is the CRN of the target VPC. This field does not apply to type=classic connections.", + }, + dlVCType: { + Type: schema.TypeString, + Computed: true, + Description: "The type of virtual connection. (classic,vpc)", + }, + dlVCName: { + Type: schema.TypeString, + Computed: true, + Description: "The user-defined name for this virtual connection. Virtualconnection names are unique within a gateway. This is the name of thevirtual connection itself, the network being connected may have its ownname attribute", + }, + }, + }, + }, + + dlBgpAsn: { + Type: schema.TypeInt, + Computed: true, + Description: "BGP ASN", + }, + dlBgpBaseCidr: { + Type: schema.TypeString, + Computed: true, + Description: "BGP base CIDR", + }, + dlBgpCerCidr: { + Type: schema.TypeString, + Computed: true, + Description: "BGP customer edge router CIDR", + }, + dlBgpIbmAsn: { + Type: schema.TypeInt, + Computed: true, + Description: "IBM BGP ASN", + }, + dlBgpIbmCidr: { + Type: schema.TypeString, + Computed: true, + Description: "BGP IBM CIDR", + }, + dlBgpStatus: { + Type: schema.TypeString, + Computed: true, + Description: "Gateway BGP status", + }, + dlMacSecConfig: { + Type: schema.TypeList, + Computed: true, + Description: "MACsec configuration information", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + dlActive: { + Type: schema.TypeBool, + Computed: true, + Description: "Indicate whether MACsec protection should be active (true) or inactive (false) for this MACsec enabled gateway", + }, + dlActiveCak: { + Type: schema.TypeString, + Computed: true, + Description: "Active connectivity association key.", + }, + dlPrimaryCak: { + Type: schema.TypeString, + Computed: true, + Description: "Desired primary connectivity association key.", + }, + dlFallbackCak: { + Type: schema.TypeString, + Computed: true, + Description: "Fallback connectivity association key.", + }, + dlSakExpiryTime: { + Type: schema.TypeInt, + Computed: true, + Description: "Secure Association Key (SAK) expiry time in seconds", + }, + dlSecurityPolicy: { + Type: schema.TypeString, + Computed: true, + Description: "Packets without MACsec headers are not dropped when security_policy is should_secure.", + }, + dlWindowSize: { + Type: schema.TypeInt, + Computed: true, + Description: "Replay protection window size", + }, + dlCipherSuite: { + Type: schema.TypeString, + Computed: true, + Description: "SAK cipher suite", + }, + dlConfidentialityOffset: { + Type: schema.TypeInt, + Computed: true, + Description: "Confidentiality Offset", + }, + dlCryptographicAlgorithm: { + Type: schema.TypeString, + Computed: true, + Description: "Cryptographic Algorithm", + }, + dlKeyServerPriority: { + Type: schema.TypeInt, + Computed: true, + Description: "Key Server Priority", + }, + dlMacSecConfigStatus: { + Type: schema.TypeString, + Computed: true, + Description: "The current status of MACsec on the device for this gateway", + }, + }, + }, + }, + dlChangeRequest: { + Type: schema.TypeString, + Computed: true, + Description: "Changes pending approval for provider managed Direct Link Connect gateways", + }, + dlCompletionNoticeRejectReason: { + Type: schema.TypeString, + Computed: true, + Description: "Reason for completion notice rejection", + }, + dlCreatedAt: { + Type: schema.TypeString, + Computed: true, + Description: "The date and time resource was created", + }, + dlCrn: { + Type: schema.TypeString, + Computed: true, + Description: "The CRN (Cloud Resource Name) of this gateway", + }, + dlCrossConnectRouter: { + Type: schema.TypeString, + Computed: true, + Description: "Cross connect router", + }, + dlGlobal: { + Type: schema.TypeBool, + Computed: true, + Description: "Gateways with global routing (true) can connect to networks outside their associated region", + }, + dlLinkStatus: { + Type: schema.TypeString, + Computed: true, + Description: "Gateway link status", + }, + dlLocationDisplayName: { + Type: schema.TypeString, + Computed: true, + Description: "Gateway location long name", + }, + dlLocationName: { + Type: schema.TypeString, + Computed: true, + Description: "Gateway location", + }, + dlMetered: { + Type: schema.TypeBool, + Computed: true, + Description: "Metered billing option", + }, + + dlOperationalStatus: { + Type: schema.TypeString, + Computed: true, + Description: "Gateway operational status", + }, + dlPort: { + Type: schema.TypeString, + Computed: true, + Description: "Gateway port", + }, + dlProviderAPIManaged: { + Type: schema.TypeBool, + Computed: true, + Description: "Indicates whether gateway was created through a provider portal", + }, + dlResourceGroup: { + Type: schema.TypeString, + Computed: true, + Description: "Gateway resource group", + }, + dlSpeedMbps: { + Type: schema.TypeInt, + Computed: true, + Description: "Gateway speed in megabits per second", + }, + dlType: { + Type: schema.TypeString, + Computed: true, + Description: "Gateway type", + }, + dlVlan: { + Type: schema.TypeInt, + Computed: true, + Description: "VLAN allocated for this gateway", + }, + }, + } +} + +func dataSourceIBMDLGatewayVirtualConnectionsRead(d *schema.ResourceData, meta interface{}) error { + directLink, err := meta.(ClientSession).DirectlinkV1API() + + if err != nil { + return err + } + listVcOptions := &directlinkv1.ListGatewayVirtualConnectionsOptions{} + dlGatewayId := d.Id() + listVcOptions.SetGatewayID(dlGatewayId) + listGatewayVirtualConnections, response, err := directLink.ListGatewayVirtualConnections(listVcOptions) + if err != nil { + return fmt.Errorf("Error while listing directlink gateway's virtual connections XXX %s\n%s", err, response) + } + gatewayVCs := make([]map[string]interface{}, 0) + for _, instance := range listGatewayVirtualConnections.VirtualConnections { + gatewayVC := map[string]interface{}{} + + if instance.ID != nil { + gatewayVC[ID] = *instance.ID + } + if instance.Name != nil { + gatewayVC[dlVCName] = *instance.Name + } + if instance.Type != nil { + gatewayVC[dlVCType] = *instance.Type + } + if instance.NetworkAccount != nil { + gatewayVC[dlVCNetworkAccount] = *instance.NetworkAccount + } + if instance.NetworkID != nil { + gatewayVC[dlVCNetworkId] = *instance.NetworkID + } + if instance.CreatedAt != nil { + gatewayVC[dlVCCreatedAt] = instance.CreatedAt.String() + + } + if instance.Status != nil { + gatewayVC[dlVCStatus] = *instance.Status + } + + gatewayVCs = append(gatewayVCs, gatewayVC) + } + d.SetId(dlGatewayId) + + d.Set(dlGatewaysVirtualConnections, gatewayVCs) + return nil +} +func dataSourceIBMDLGatewayRead(d *schema.ResourceData, meta interface{}) error { + directLink, err := directlinkClient(meta) + dlGatewayName := d.Get(dlName).(string) + + if err != nil { + return err + } + listGatewaysOptionsModel := &directlinkv1.ListGatewaysOptions{} + listGateways, response, err := directLink.ListGateways(listGatewaysOptionsModel) + if err != nil { + log.Println("[WARN] Error listing dl Gateway", response, err) + return err + } + var found bool + + for _, instance := range listGateways.Gateways { + + if *instance.Name == dlGatewayName { + found = true + if instance.ID != nil { + d.SetId(*instance.ID) + } + if instance.Name != nil { + d.Set(dlName, *instance.Name) + } + if instance.Crn != nil { + d.Set(dlCrn, *instance.Crn) + } + if instance.BgpAsn != nil { + d.Set(dlBgpAsn, *instance.BgpAsn) + } + if instance.BgpIbmCidr != nil { + d.Set(dlBgpIbmCidr, *instance.BgpIbmCidr) + } + if instance.BgpIbmAsn != nil { + d.Set(dlBgpIbmAsn, *instance.BgpIbmAsn) + } + if instance.Metered != nil { + d.Set(dlMetered, *instance.Metered) + } + if instance.CrossConnectRouter != nil { + d.Set(dlCrossConnectRouter, *instance.CrossConnectRouter) + } + if instance.BgpBaseCidr != nil { + d.Set(dlBgpBaseCidr, *instance.BgpBaseCidr) + } + if instance.BgpCerCidr != nil { + d.Set(dlBgpCerCidr, *instance.BgpCerCidr) + } + + if instance.ProviderApiManaged != nil { + d.Set(dlProviderAPIManaged, *instance.ProviderApiManaged) + } + if instance.Type != nil { + d.Set(dlType, *instance.Type) + } + if instance.SpeedMbps != nil { + d.Set(dlSpeedMbps, *instance.SpeedMbps) + } + if instance.OperationalStatus != nil { + d.Set(dlOperationalStatus, *instance.OperationalStatus) + } + if instance.BgpStatus != nil { + d.Set(dlBgpStatus, *instance.BgpStatus) + } + if instance.LocationName != nil { + d.Set(dlLocationName, *instance.LocationName) + } + if instance.LocationDisplayName != nil { + d.Set(dlLocationDisplayName, *instance.LocationDisplayName) + } + if instance.Vlan != nil { + d.Set(dlVlan, *instance.Vlan) + } + if instance.Global != nil { + d.Set(dlGlobal, *instance.Global) + } + if instance.Port != nil { + d.Set(dlPort, *instance.Port.ID) + } + if instance.LinkStatus != nil { + d.Set(dlLinkStatus, *instance.LinkStatus) + } + if instance.CreatedAt != nil { + d.Set(dlCreatedAt, instance.CreatedAt.String()) + } + dtype := *instance.Type + if dtype == "dedicated" { + if instance.MacsecConfig != nil { + macsecList := make([]map[string]interface{}, 0) + currentMacSec := map[string]interface{}{} + // Construct an instance of the GatewayMacsecConfigTemplate model + gatewayMacsecConfigTemplateModel := instance.MacsecConfig + if gatewayMacsecConfigTemplateModel.Active != nil { + currentMacSec[dlActive] = *gatewayMacsecConfigTemplateModel.Active + } + if gatewayMacsecConfigTemplateModel.ActiveCak != nil { + if gatewayMacsecConfigTemplateModel.ActiveCak.Crn != nil { + currentMacSec[dlActiveCak] = *gatewayMacsecConfigTemplateModel.ActiveCak.Crn + } + } + if gatewayMacsecConfigTemplateModel.PrimaryCak != nil { + currentMacSec[dlPrimaryCak] = *gatewayMacsecConfigTemplateModel.PrimaryCak.Crn + } + if gatewayMacsecConfigTemplateModel.FallbackCak != nil { + if gatewayMacsecConfigTemplateModel.FallbackCak.Crn != nil { + currentMacSec[dlFallbackCak] = *gatewayMacsecConfigTemplateModel.FallbackCak.Crn + } + } + if gatewayMacsecConfigTemplateModel.SakExpiryTime != nil { + currentMacSec[dlSakExpiryTime] = *gatewayMacsecConfigTemplateModel.SakExpiryTime + } + if gatewayMacsecConfigTemplateModel.SecurityPolicy != nil { + currentMacSec[dlSecurityPolicy] = *gatewayMacsecConfigTemplateModel.SecurityPolicy + } + if gatewayMacsecConfigTemplateModel.WindowSize != nil { + currentMacSec[dlWindowSize] = *gatewayMacsecConfigTemplateModel.WindowSize + } + if gatewayMacsecConfigTemplateModel.CipherSuite != nil { + currentMacSec[dlCipherSuite] = *gatewayMacsecConfigTemplateModel.CipherSuite + } + if gatewayMacsecConfigTemplateModel.ConfidentialityOffset != nil { + currentMacSec[dlConfidentialityOffset] = *gatewayMacsecConfigTemplateModel.ConfidentialityOffset + } + if gatewayMacsecConfigTemplateModel.CryptographicAlgorithm != nil { + currentMacSec[dlCryptographicAlgorithm] = *gatewayMacsecConfigTemplateModel.CryptographicAlgorithm + } + if gatewayMacsecConfigTemplateModel.KeyServerPriority != nil { + currentMacSec[dlKeyServerPriority] = *gatewayMacsecConfigTemplateModel.KeyServerPriority + } + if gatewayMacsecConfigTemplateModel.Status != nil { + currentMacSec[dlMacSecConfigStatus] = *gatewayMacsecConfigTemplateModel.Status + } + macsecList = append(macsecList, currentMacSec) + d.Set(dlMacSecConfig, macsecList) + } + } + if instance.ChangeRequest != nil { + gatewayChangeRequestIntf := instance.ChangeRequest + gatewayChangeRequest := gatewayChangeRequestIntf.(*directlinkv1.GatewayChangeRequest) + d.Set(dlChangeRequest, *gatewayChangeRequest.Type) + } + if instance.ResourceGroup != nil { + rg := instance.ResourceGroup + d.Set(dlResourceGroup, *rg.ID) + } + + } + } + + if !found { + return fmt.Errorf( + "Error Gateway with name (%s) not found ", dlGatewayName) + } + return dataSourceIBMDLGatewayVirtualConnectionsRead(d, meta) +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_dl_gateways.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_dl_gateways.go new file mode 100644 index 00000000000..311ae8f5714 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_dl_gateways.go @@ -0,0 +1,381 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "log" + "time" + + "github.com/IBM/networking-go-sdk/directlinkv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + dlGateways = "gateways" + dlGatewaysId = "id" +) + +func dataSourceIBMDLGateways() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMDLGatewaysRead, + Schema: map[string]*schema.Schema{ + dlGateways: { + Type: schema.TypeList, + Description: "Collection of direct link gateways", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + dlGatewaysId: { + Type: schema.TypeString, + Computed: true, + Description: "Id of the data source gateways", + }, + dlBgpAsn: { + Type: schema.TypeInt, + Computed: true, + Description: "BGP ASN", + }, + dlBgpBaseCidr: { + Type: schema.TypeString, + Computed: true, + Description: "BGP base CIDR", + }, + dlBgpCerCidr: { + Type: schema.TypeString, + Computed: true, + Description: "BGP customer edge router CIDR", + }, + dlBgpIbmAsn: { + Type: schema.TypeInt, + Computed: true, + Description: "IBM BGP ASN", + }, + dlBgpIbmCidr: { + Type: schema.TypeString, + Computed: true, + Description: "BGP IBM CIDR", + }, + dlBgpStatus: { + Type: schema.TypeString, + Computed: true, + Description: "Gateway BGP status", + }, + dlCompletionNoticeRejectReason: { + Type: schema.TypeString, + Computed: true, + Description: "Reason for completion notice rejection", + }, + dlCreatedAt: { + Type: schema.TypeString, + Computed: true, + Description: "The date and time resource was created", + }, + dlCrn: { + Type: schema.TypeString, + Computed: true, + Description: "The CRN (Cloud Resource Name) of this gateway", + }, + dlCrossConnectRouter: { + Type: schema.TypeString, + Computed: true, + Description: "Cross connect router", + }, + dlGlobal: { + Type: schema.TypeBool, + Computed: true, + Description: "Gateways with global routing (true) can connect to networks outside their associated region", + }, + dlLinkStatus: { + Type: schema.TypeString, + Computed: true, + Description: "Gateway link status", + }, + dlLocationDisplayName: { + Type: schema.TypeString, + Computed: true, + Description: "Gateway location long name", + }, + dlLocationName: { + Type: schema.TypeString, + Computed: true, + Description: "Gateway location", + }, + dlMetered: { + Type: schema.TypeBool, + Computed: true, + Description: "Metered billing option", + }, + dlName: { + Type: schema.TypeString, + Computed: true, + Description: "The unique user-defined name for this gateway", + }, + dlOperationalStatus: { + Type: schema.TypeString, + Computed: true, + Description: "Gateway operational status", + }, + dlPort: { + Type: schema.TypeString, + Computed: true, + Description: "Gateway port", + }, + dlProviderAPIManaged: { + Type: schema.TypeBool, + Computed: true, + Description: "Indicates whether gateway was created through a provider portal", + }, + dlResourceGroup: { + Type: schema.TypeString, + Computed: true, + Description: "Gateway resource group", + }, + dlSpeedMbps: { + Type: schema.TypeInt, + Computed: true, + Description: "Gateway speed in megabits per second", + }, + dlType: { + Type: schema.TypeString, + Computed: true, + Description: "Gateway type", + }, + dlMacSecConfig: { + Type: schema.TypeList, + Computed: true, + Description: "MACsec configuration information", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + dlActive: { + Type: schema.TypeBool, + Computed: true, + Description: "Indicate whether MACsec protection should be active (true) or inactive (false) for this MACsec enabled gateway", + }, + dlActiveCak: { + Type: schema.TypeString, + Computed: true, + Description: "Active connectivity association key.", + }, + dlPrimaryCak: { + Type: schema.TypeString, + Computed: true, + Description: "Desired primary connectivity association key. Keys for a MACsec configuration must have names with an even number of characters from [0-9a-fA-F]", + }, + dlFallbackCak: { + Type: schema.TypeString, + Computed: true, + Description: "Fallback connectivity association key. Keys used for MACsec configuration must have names with an even number of characters from [0-9a-fA-F]", + }, + dlSakExpiryTime: { + Type: schema.TypeInt, + Computed: true, + Description: "Secure Association Key (SAK) expiry time in seconds", + }, + dlSecurityPolicy: { + Type: schema.TypeString, + Computed: true, + Description: "Packets without MACsec headers are not dropped when security_policy is should_secure.", + }, + dlWindowSize: { + Type: schema.TypeInt, + Computed: true, + Description: "Replay protection window size", + }, + dlCipherSuite: { + Type: schema.TypeString, + Computed: true, + Description: "SAK cipher suite", + }, + dlConfidentialityOffset: { + Type: schema.TypeInt, + Computed: true, + Description: "Confidentiality Offset", + }, + dlCryptographicAlgorithm: { + Type: schema.TypeString, + Computed: true, + Description: "Cryptographic Algorithm", + }, + dlKeyServerPriority: { + Type: schema.TypeInt, + Computed: true, + Description: "Key Server Priority", + }, + dlMacSecConfigStatus: { + Type: schema.TypeString, + Computed: true, + Description: "The current status of MACsec on the device for this gateway", + }, + }, + }, + }, + dlChangeRequest: { + Type: schema.TypeString, + Computed: true, + Description: "Changes pending approval for provider managed Direct Link Connect gateways", + }, + dlVlan: { + Type: schema.TypeInt, + Computed: true, + Description: "VLAN allocated for this gateway", + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMDLGatewaysRead(d *schema.ResourceData, meta interface{}) error { + directLink, err := directlinkClient(meta) + if err != nil { + return err + } + listGatewaysOptionsModel := &directlinkv1.ListGatewaysOptions{} + listGateways, response, err := directLink.ListGateways(listGatewaysOptionsModel) + if err != nil { + log.Println("[WARN] Error listing dl Gateway", response, err) + return err + } + gateways := make([]map[string]interface{}, 0) + for _, instance := range listGateways.Gateways { + gateway := map[string]interface{}{} + if instance.ID != nil { + gateway["id"] = *instance.ID + } + if instance.Name != nil { + gateway[dlName] = *instance.Name + } + if instance.Crn != nil { + gateway[dlCrn] = *instance.Crn + } + if instance.BgpAsn != nil { + gateway[dlBgpAsn] = *instance.BgpAsn + } + if instance.BgpIbmCidr != nil { + gateway[dlBgpIbmCidr] = *instance.BgpIbmCidr + } + if instance.BgpIbmAsn != nil { + gateway[dlBgpIbmAsn] = *instance.BgpIbmAsn + } + if instance.Metered != nil { + gateway[dlMetered] = *instance.Metered + } + if instance.CrossConnectRouter != nil { + gateway[dlCrossConnectRouter] = *instance.CrossConnectRouter + } + if instance.BgpBaseCidr != nil { + gateway[dlBgpBaseCidr] = *instance.BgpBaseCidr + } + if instance.BgpCerCidr != nil { + gateway[dlBgpCerCidr] = *instance.BgpCerCidr + } + + if instance.ProviderApiManaged != nil { + gateway[dlProviderAPIManaged] = *instance.ProviderApiManaged + } + if instance.Type != nil { + gateway[dlType] = *instance.Type + } + if instance.SpeedMbps != nil { + gateway[dlSpeedMbps] = *instance.SpeedMbps + } + if instance.OperationalStatus != nil { + gateway[dlOperationalStatus] = *instance.OperationalStatus + } + if instance.BgpStatus != nil { + gateway[dlBgpStatus] = *instance.BgpStatus + } + if instance.LocationName != nil { + gateway[dlLocationName] = *instance.LocationName + } + if instance.LocationDisplayName != nil { + gateway[dlLocationDisplayName] = *instance.LocationDisplayName + } + if instance.Vlan != nil { + gateway[dlVlan] = *instance.Vlan + } + if instance.Global != nil { + gateway[dlGlobal] = *instance.Global + } + if instance.Port != nil { + gateway[dlPort] = *instance.Port.ID + } + if instance.LinkStatus != nil { + gateway[dlLinkStatus] = *instance.LinkStatus + } + if instance.CreatedAt != nil { + gateway[dlCreatedAt] = instance.CreatedAt.String() + } + if instance.ResourceGroup != nil { + rg := instance.ResourceGroup + gateway[dlResourceGroup] = *rg.ID + } + dtype := *instance.Type + if dtype == "dedicated" { + if instance.MacsecConfig != nil { + macsecList := make([]map[string]interface{}, 0) + currentMacSec := map[string]interface{}{} + // Construct an instance of the GatewayMacsecConfigTemplate model + gatewayMacsecConfigTemplateModel := instance.MacsecConfig + if gatewayMacsecConfigTemplateModel.Active != nil { + currentMacSec[dlActive] = *gatewayMacsecConfigTemplateModel.Active + } + if gatewayMacsecConfigTemplateModel.ActiveCak != nil { + if gatewayMacsecConfigTemplateModel.ActiveCak.Crn != nil { + currentMacSec[dlActiveCak] = *gatewayMacsecConfigTemplateModel.ActiveCak.Crn + } + } + if gatewayMacsecConfigTemplateModel.PrimaryCak != nil { + currentMacSec[dlPrimaryCak] = *gatewayMacsecConfigTemplateModel.PrimaryCak.Crn + } + if gatewayMacsecConfigTemplateModel.FallbackCak != nil { + if gatewayMacsecConfigTemplateModel.FallbackCak.Crn != nil { + currentMacSec[dlFallbackCak] = *gatewayMacsecConfigTemplateModel.FallbackCak.Crn + } + } + if gatewayMacsecConfigTemplateModel.SakExpiryTime != nil { + currentMacSec[dlSakExpiryTime] = *gatewayMacsecConfigTemplateModel.SakExpiryTime + } + if gatewayMacsecConfigTemplateModel.SecurityPolicy != nil { + currentMacSec[dlSecurityPolicy] = *gatewayMacsecConfigTemplateModel.SecurityPolicy + } + if gatewayMacsecConfigTemplateModel.WindowSize != nil { + currentMacSec[dlWindowSize] = *gatewayMacsecConfigTemplateModel.WindowSize + } + if gatewayMacsecConfigTemplateModel.CipherSuite != nil { + currentMacSec[dlCipherSuite] = *gatewayMacsecConfigTemplateModel.CipherSuite + } + if gatewayMacsecConfigTemplateModel.ConfidentialityOffset != nil { + currentMacSec[dlConfidentialityOffset] = *gatewayMacsecConfigTemplateModel.ConfidentialityOffset + } + if gatewayMacsecConfigTemplateModel.CryptographicAlgorithm != nil { + currentMacSec[dlCryptographicAlgorithm] = *gatewayMacsecConfigTemplateModel.CryptographicAlgorithm + } + if gatewayMacsecConfigTemplateModel.KeyServerPriority != nil { + currentMacSec[dlKeyServerPriority] = *gatewayMacsecConfigTemplateModel.KeyServerPriority + } + if gatewayMacsecConfigTemplateModel.Status != nil { + currentMacSec[dlMacSecConfigStatus] = *gatewayMacsecConfigTemplateModel.Status + } + macsecList = append(macsecList, currentMacSec) + gateway[dlMacSecConfig] = macsecList + } + } + if instance.ChangeRequest != nil { + gatewayChangeRequestIntf := instance.ChangeRequest + gatewayChangeRequest := gatewayChangeRequestIntf.(*directlinkv1.GatewayChangeRequest) + gateway[dlChangeRequest] = *gatewayChangeRequest.Type + } + gateways = append(gateways, gateway) + } + d.SetId(dataSourceIBMDLGatewaysID(d)) + d.Set(dlGateways, gateways) + return nil +} + +// dataSourceIBMDLGatewaysID returns a reasonable ID for a direct link gateways list. +func dataSourceIBMDLGatewaysID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_dl_locations.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_dl_locations.go new file mode 100644 index 00000000000..57894b1e160 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_dl_locations.go @@ -0,0 +1,174 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "github.com/IBM/networking-go-sdk/directlinkv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "time" +) + +const ( + dlBillingLocation = "billing_location" + dlLocDisplayName = "display_name" + dlLocationType = "location_type" + dlMarket = "market" + dlMarketGeography = "market_geography" + dlMzr = "mzr" + dlLocShortName = "name" + dlBuildingColocationOwner = "building_colocation_owner" + dlVpcRegion = "vpc_region" + dlLocations = "locations" + dlMacsec = "macsec_enabled" + dlProvisionEnabled = "provision_enabled" +) + +func dataSourceIBMDLLocations() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMDLOfferingLocationsRead, + Schema: map[string]*schema.Schema{ + dlOfferingType: { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateAllowedStringValue([]string{"dedicated", "connect"}), + Description: "The Direct Link offering type. Current supported values (dedicated and connect).", + }, + dlLocations: { + Type: schema.TypeList, + Description: "Collection of valid locations for the specified Direct Link offering.", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + dlOfferingType: { + Type: schema.TypeString, + Computed: true, + Description: "The Direct Link offering type. Current supported values (dedicated and connect).", + }, + dlBillingLocation: { + Type: schema.TypeString, + Computed: true, + Description: "Billing location. Only present for locations where provisioning is enabled.", + }, + dlBuildingColocationOwner: { + Type: schema.TypeString, + Computed: true, + Description: "Building colocation owner. Only present for offering_type=dedicated locations where provisioning is enabled.", + }, + dlLocationType: { + Type: schema.TypeString, + Computed: true, + Description: "Location type", + }, + dlLocShortName: { + Type: schema.TypeString, + Computed: true, + Description: "Location short name", + }, dlLocDisplayName: { + Type: schema.TypeString, + Computed: true, + Description: "Location long name", + }, + dlMarket: { + Type: schema.TypeString, + Computed: true, + Description: "Location market", + }, + dlMarketGeography: { + Type: schema.TypeString, + Computed: true, + Description: "Location geography. Only present for locations where provisioning is enabled.", + }, + dlMzr: { + Type: schema.TypeBool, + Computed: true, + Description: "Is location a multi-zone region (MZR). Only present for locations where provisioning is enabled.", + }, + dlVpcRegion: { + Type: schema.TypeString, + Computed: true, + Description: "Location's VPC region. Only present for locations where provisioning is enabled.", + }, + dlMacsec: { + Type: schema.TypeBool, + Computed: true, + Description: "Indicates whether location supports MACsec.", + }, + dlProvisionEnabled: { + Type: schema.TypeBool, + Computed: true, + Description: "Indicates for the specific offering_type whether this location supports gateway provisioning.", + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMDLOfferingLocationsRead(d *schema.ResourceData, meta interface{}) error { + directLink, err := meta.(ClientSession).DirectlinkV1API() + if err != nil { + return err + } + listOfferingTypeLocationsOptions := &directlinkv1.ListOfferingTypeLocationsOptions{} + listOfferingTypeLocationsOptions.SetOfferingType(d.Get(dlOfferingType).(string)) + listLocations, response, err := directLink.ListOfferingTypeLocations(listOfferingTypeLocationsOptions) + if err != nil { + return fmt.Errorf("Error while listing directlink gateway's locations %s\n%s", err, response) + } + + locations := make([]map[string]interface{}, 0) + for _, instance := range listLocations.Locations { + location := map[string]interface{}{} + if instance.BuildingColocationOwner != nil { + location[dlBuildingColocationOwner] = *instance.BuildingColocationOwner + } + + if instance.DisplayName != nil { + location[dlLocDisplayName] = *instance.DisplayName + } + if instance.Name != nil { + location[dlLocShortName] = *instance.Name + } + if instance.LocationType != nil { + location[dlLocationType] = *instance.LocationType + } + if instance.OfferingType != nil { + location[dlOfferingType] = *instance.OfferingType + } + if instance.Market != nil { + location[dlMarket] = *instance.Market + } + + if instance.MarketGeography != nil { + location[dlMarketGeography] = *instance.MarketGeography + } + if instance.Mzr != nil { + location[dlMzr] = *instance.Mzr + } + if instance.VpcRegion != nil { + location[dlVpcRegion] = *instance.VpcRegion + } + if instance.BillingLocation != nil { + location[dlBillingLocation] = *instance.BillingLocation + } + if instance.MacsecEnabled != nil { + location[dlMacsec] = *instance.MacsecEnabled + } + if instance.ProvisionEnabled != nil { + location[dlProvisionEnabled] = *instance.ProvisionEnabled + } + locations = append(locations, location) + } + + d.SetId(dataSourceIBMDLLocationsID(d)) + d.Set(dlLocations, locations) + return nil +} + +// dataSourceIBMDLLocationsID returns a reasonable ID for a direct link offering locations list. +func dataSourceIBMDLLocationsID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_dl_offering_speeds.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_dl_offering_speeds.go new file mode 100644 index 00000000000..e4175e9e606 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_dl_offering_speeds.go @@ -0,0 +1,114 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "log" + "time" + + "github.com/IBM/networking-go-sdk/directlinkv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + dlSpeeds = "offering_speeds" + dlLinkSpeed = "link_speed" + dlOfferingType = "offering_type" + dlMacSecEnabled = "macsec_enabled" + dlMeteringCapabilities = "capabilities" +) + +func dataSourceIBMDLOfferingSpeeds() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMDLOfferingSpeedsRead, + Schema: map[string]*schema.Schema{ + dlOfferingType: { + Type: schema.TypeString, + Required: true, + Description: "The Direct Link offering type", + ValidateFunc: InvokeDataSourceValidator("ibm_dl_offering_speeds", dlOfferingType), + }, + dlSpeeds: { + Type: schema.TypeList, + Description: "Collection of direct link speeds", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + dlMeteringCapabilities: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "List of capabilities for billing option", + }, + dlLinkSpeed: { + Type: schema.TypeInt, + Computed: true, + Description: "Direct Link offering speed for the specified offering type", + }, + dlMacSecEnabled: { + Type: schema.TypeBool, + Optional: true, + Description: "Indicate whether speed supports MACsec", + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMDLOfferingSpeedsRead(d *schema.ResourceData, meta interface{}) error { + directLink, err := directlinkClient(meta) + if err != nil { + return err + } + dlType := d.Get(dlOfferingType).(string) + listSpeedsOptionsModel := &directlinkv1.ListOfferingTypeSpeedsOptions{} + listSpeedsOptionsModel.OfferingType = &dlType + listSpeeds, detail, err := directLink.ListOfferingTypeSpeeds(listSpeedsOptionsModel) + + if err != nil { + log.Printf("Error reading list of direct link offering speeds:%s\n%s", err, detail) + return err + } + speeds := make([]map[string]interface{}, 0) + for _, instance := range listSpeeds.Speeds { + speed := map[string]interface{}{} + if instance.Capabilities != nil { + speed[dlMeteringCapabilities] = flattenStringList(instance.Capabilities) + } + if instance.LinkSpeed != nil { + speed[dlLinkSpeed] = *instance.LinkSpeed + } + if instance.MacsecEnabled != nil { + speed[dlMacSecEnabled] = *instance.MacsecEnabled + } + speeds = append(speeds, speed) + } + d.SetId(dataSourceIBMDLOfferingSpeedsID(d)) + d.Set(dlSpeeds, speeds) + return nil +} + +// dataSourceIBMDLOfferingSpeedsID returns a reasonable ID for a direct link speeds list. +func dataSourceIBMDLOfferingSpeedsID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} + +func datasourceIBMDLOfferingSpeedsValidator() *ResourceValidator { + + validateSchema := make([]ValidateSchema, 2) + dlTypeAllowedValues := "dedicated, connect" + + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: dlOfferingType, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: dlTypeAllowedValues}) + + ibmDLOfferingSpeedsDatasourceValidator := ResourceValidator{ResourceName: "ibm_dl_offering_speeds", Schema: validateSchema} + return &ibmDLOfferingSpeedsDatasourceValidator +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_dl_port.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_dl_port.go new file mode 100644 index 00000000000..a7815485ee1 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_dl_port.go @@ -0,0 +1,85 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "log" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMDirectLinkPort() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMDirectLinkPortRead, + Schema: map[string]*schema.Schema{ + dlPortID: { + Type: schema.TypeString, + Required: true, + Description: "Port ID", + }, + dlCount: { + Type: schema.TypeInt, + Computed: true, + Description: "Count of existing Direct Link gateways in this account on this port", + }, + dlLabel: { + Type: schema.TypeString, + Computed: true, + Description: "Port Label", + }, + dlLocationDisplayName: { + Type: schema.TypeString, + Computed: true, + Description: "Port location long name", + }, + dlLocationName: { + Type: schema.TypeString, + Computed: true, + Description: "Port location name identifier", + }, + dlProviderName: { + Type: schema.TypeString, + Computed: true, + Description: "Port's provider name", + }, + dlSupportedLinkSpeeds: { + Type: schema.TypeList, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + Computed: true, + Description: "Port's supported speeds in megabits per second", + }, + }, + } +} + +func dataSourceIBMDirectLinkPortRead(d *schema.ResourceData, meta interface{}) error { + + sess, err := meta.(ClientSession).DirectlinkV1API() + if err != nil { + return err + } + + getPortsOptions := sess.NewGetPortOptions(d.Get(dlPortID).(string)) + response, resp, err := sess.GetPort(getPortsOptions) + if err != nil { + log.Println("[WARN] Error getting port", resp, err) + return err + } + + d.SetId(*response.ID) + d.Set(dlPortID, *response.ID) + d.Set(dlCount, *response.DirectLinkCount) + d.Set(dlLabel, *response.Label) + d.Set(dlLocationDisplayName, *response.LocationDisplayName) + d.Set(dlLocationName, *response.LocationName) + d.Set(dlProviderName, *response.ProviderName) + speed := make([]interface{}, 0) + for _, s := range response.SupportedLinkSpeeds { + speed = append(speed, s) + } + d.Set(dlSupportedLinkSpeeds, speed) + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_dl_ports.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_dl_ports.go new file mode 100644 index 00000000000..99c936ae717 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_dl_ports.go @@ -0,0 +1,145 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + dl "github.com/IBM/networking-go-sdk/directlinkv1" + + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + dlPorts = "ports" + dlPortID = "port_id" + dlCount = "direct_link_count" + dlLabel = "label" + // dlLocationDisplayName = "location_display_name" + // dlLocationName = "location_name" + dlSupportedLinkSpeeds = "supported_link_speeds" + dlProviderName = "provider_name" +) + +func dataSourceIBMDirectLinkPorts() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMDirectLinkPortsRead, + + Schema: map[string]*schema.Schema{ + dlLocationName: { + Type: schema.TypeString, + Optional: true, + Description: "Direct Link location short name", + }, + dlPorts: { + + Type: schema.TypeList, + Description: "Collection of direct link ports", + Computed: true, + Elem: &schema.Resource{ + + Schema: map[string]*schema.Schema{ + + dlPortID: { + Type: schema.TypeString, + Computed: true, + Description: "Port ID", + }, + dlCount: { + Type: schema.TypeInt, + Computed: true, + Description: "Count of existing Direct Link gateways in this account on this port", + }, + dlLabel: { + Type: schema.TypeString, + Computed: true, + Description: "Port Label", + }, + dlLocationDisplayName: { + Type: schema.TypeString, + Computed: true, + Description: "Port location long name", + }, + dlLocationName: { + Type: schema.TypeString, + Computed: true, + Description: "Port location name identifier", + }, + dlProviderName: { + Type: schema.TypeString, + Computed: true, + Description: "Port's provider name", + }, + dlSupportedLinkSpeeds: { + Type: schema.TypeList, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + Computed: true, + Description: "Port's supported speeds in megabits per second", + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMDirectLinkPortsRead(d *schema.ResourceData, meta interface{}) error { + + sess, err := meta.(ClientSession).DirectlinkV1API() + if err != nil { + return err + } + + start := "" + allrecs := []dl.Port{} + for { + listPortsOptions := sess.NewListPortsOptions() + if _, ok := d.GetOk(dlLocationName); ok { + dlLocationName := d.Get(dlLocationName).(string) + listPortsOptions.SetLocationName(dlLocationName) + } + if start != "" { + listPortsOptions.Start = &start + + } + + response, resp, err := sess.ListPorts(listPortsOptions) + if err != nil { + log.Println("[WARN] Error listing dl ports", resp, err) + return err + } + start = GetNext(response.Next) + allrecs = append(allrecs, response.Ports...) + if start == "" { + break + } + } + + portCollections := make([]map[string]interface{}, 0) + for _, port := range allrecs { + portCollection := map[string]interface{}{} + portCollection[dlPortID] = *port.ID + portCollection[dlCount] = *port.DirectLinkCount + portCollection[dlLabel] = *port.Label + portCollection[dlLocationDisplayName] = *port.LocationDisplayName + portCollection[dlLocationName] = *port.LocationName + portCollection[dlProviderName] = *port.ProviderName + speed := make([]interface{}, 0) + for _, s := range port.SupportedLinkSpeeds { + speed = append(speed, s) + } + portCollection[dlSupportedLinkSpeeds] = speed + portCollections = append(portCollections, portCollection) + } + d.SetId(dataSourceIBMDirectLinkPortsReadID(d)) + d.Set(dlPorts, portCollections) + return nil +} + +func dataSourceIBMDirectLinkPortsReadID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_dl_provider_gateways.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_dl_provider_gateways.go new file mode 100644 index 00000000000..fb187d264e6 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_dl_provider_gateways.go @@ -0,0 +1,218 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + dlProviderV2 "github.com/IBM/networking-go-sdk/directlinkproviderv2" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "log" + "time" +) + +const ( + dlProviderGateways = "gateways" + dlProviderGatewaysID = "id" +) + +func dataSourceIBMDirectLinkProviderGateways() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMDirectLinkProviderGatewaysRead, + + Schema: map[string]*schema.Schema{ + + dlProviderGateways: { + + Type: schema.TypeList, + Description: "Collection of direct link provider ports", + Computed: true, + Elem: &schema.Resource{ + + Schema: map[string]*schema.Schema{ + dlProviderGatewaysID: { + Type: schema.TypeString, + Computed: true, + Description: "Id of the data source gateways", + }, + dlBgpAsn: { + Type: schema.TypeInt, + Computed: true, + Description: "BGP ASN", + }, + + dlBgpCerCidr: { + Type: schema.TypeString, + Computed: true, + Description: "BGP customer edge router CIDR", + }, + dlBgpIbmAsn: { + Type: schema.TypeInt, + Computed: true, + Description: "IBM BGP ASN", + }, + dlBgpIbmCidr: { + Type: schema.TypeString, + Computed: true, + Description: "BGP IBM CIDR", + }, + dlBgpStatus: { + Type: schema.TypeString, + Computed: true, + Description: "Gateway BGP status", + }, + customerAccountID: { + Type: schema.TypeString, + Computed: true, + Description: "Customer IBM Cloud account ID for the new gateway. A gateway object containing the pending create request will become available in the specified account.", + }, + dlCreatedAt: { + Type: schema.TypeString, + Computed: true, + Description: "The date and time resource was created", + }, + dlCrn: { + Type: schema.TypeString, + Computed: true, + Description: "The CRN (Cloud Resource Name) of this gateway", + }, + + dlName: { + Type: schema.TypeString, + Computed: true, + Description: "The unique user-defined name for this gateway", + }, + dlOperationalStatus: { + Type: schema.TypeString, + Computed: true, + Description: "Gateway operational status", + }, + dlChangeRequest: { + Type: schema.TypeString, + Computed: true, + Description: "Changes pending approval for provider managed Direct Link gateways", + }, + dlPort: { + Type: schema.TypeString, + Computed: true, + Description: "Gateway port", + }, + dlProviderAPIManaged: { + Type: schema.TypeBool, + Computed: true, + Description: "Indicates whether gateway was created through a provider portal", + }, + dlResourceGroup: { + Type: schema.TypeString, + Computed: true, + Description: "Gateway resource group", + }, + dlSpeedMbps: { + Type: schema.TypeInt, + Computed: true, + Description: "Gateway speed in megabits per second", + }, + dlType: { + Type: schema.TypeString, + Computed: true, + Description: "Gateway type", + }, + dlVlan: { + Type: schema.TypeInt, + Computed: true, + Description: "VLAN allocated for this gateway", + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMDirectLinkProviderGatewaysRead(d *schema.ResourceData, meta interface{}) error { + directLinkProvider, err := directlinkProviderClient(meta) + if err != nil { + return err + } + start := "" + allrecs := []dlProviderV2.ProviderGateway{} + for { + listProviderGatewaysOptions := directLinkProvider.NewListProviderGatewaysOptions() + if start != "" { + listProviderGatewaysOptions.Start = &start + } + + providerGateways, resp, err := directLinkProvider.ListProviderGateways(listProviderGatewaysOptions) + if err != nil { + log.Println("[WARN] Error listing dl provider gateways", providerGateways, resp, err) + return err + } + start = GetNext(providerGateways.Next) + allrecs = append(allrecs, providerGateways.Gateways...) + if start == "" { + break + } + } + gatewayCollections := make([]map[string]interface{}, 0) + for _, instance := range allrecs { + gatewayCollection := map[string]interface{}{} + + if instance.ID != nil { + gatewayCollection[dlProviderGatewaysID] = *instance.ID + } + if instance.Name != nil { + gatewayCollection[dlName] = *instance.Name + } + if instance.Crn != nil { + gatewayCollection[dlCrn] = *instance.Crn + } + if instance.BgpAsn != nil { + gatewayCollection[dlBgpAsn] = *instance.BgpAsn + } + if instance.BgpIbmCidr != nil { + gatewayCollection[dlBgpIbmCidr] = *instance.BgpIbmCidr + } + if instance.BgpIbmAsn != nil { + gatewayCollection[dlBgpIbmAsn] = *instance.BgpIbmAsn + } + + if instance.BgpCerCidr != nil { + gatewayCollection[dlBgpCerCidr] = *instance.BgpCerCidr + } + + if instance.ProviderApiManaged != nil { + gatewayCollection[dlProviderAPIManaged] = *instance.ProviderApiManaged + } + if instance.Type != nil { + gatewayCollection[dlType] = *instance.Type + } + if instance.SpeedMbps != nil { + gatewayCollection[dlSpeedMbps] = *instance.SpeedMbps + } + if instance.OperationalStatus != nil { + gatewayCollection[dlOperationalStatus] = *instance.OperationalStatus + } + if instance.BgpStatus != nil { + gatewayCollection[dlBgpStatus] = *instance.BgpStatus + } + if instance.Vlan != nil { + gatewayCollection[dlVlan] = *instance.Vlan + } + + if instance.Port != nil { + gatewayCollection[dlPort] = *instance.Port.ID + } + + if instance.CreatedAt != nil { + gatewayCollection[dlCreatedAt] = instance.CreatedAt.String() + } + + gatewayCollections = append(gatewayCollections, gatewayCollection) + } + d.SetId(dataSourceIBMDirectLinkProviderGatewaysReadID(d)) + d.Set(dlProviderGateways, gatewayCollections) + return nil +} + +func dataSourceIBMDirectLinkProviderGatewaysReadID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_dl_provider_ports.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_dl_provider_ports.go new file mode 100644 index 00000000000..2beb0834071 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_dl_provider_ports.go @@ -0,0 +1,119 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + dlProviderV2 "github.com/IBM/networking-go-sdk/directlinkproviderv2" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "log" + "time" +) + +func dataSourceIBMDirectLinkProviderPorts() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMDirectLinkProviderPortsRead, + + Schema: map[string]*schema.Schema{ + + dlPorts: { + + Type: schema.TypeList, + Description: "Collection of direct link provider ports", + Computed: true, + Elem: &schema.Resource{ + + Schema: map[string]*schema.Schema{ + + dlPortID: { + Type: schema.TypeString, + Computed: true, + Description: "Port ID", + }, + + dlLabel: { + Type: schema.TypeString, + Computed: true, + Description: "Port Label", + }, + dlLocationDisplayName: { + Type: schema.TypeString, + Computed: true, + Description: "Port location long name", + }, + dlLocationName: { + Type: schema.TypeString, + Computed: true, + Description: "Port location name identifier", + }, + dlProviderName: { + Type: schema.TypeString, + Computed: true, + Description: "Port's provider name", + }, + dlSupportedLinkSpeeds: { + Type: schema.TypeList, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + Computed: true, + Description: "Port's supported speeds in megabits per second", + }, + }, + }, + }, + }, + } +} + +func directlinkProviderClient(meta interface{}) (*dlProviderV2.DirectLinkProviderV2, error) { + sess, err := meta.(ClientSession).DirectlinkProviderV2API() + return sess, err +} +func dataSourceIBMDirectLinkProviderPortsRead(d *schema.ResourceData, meta interface{}) error { + directLinkProvider, err := directlinkProviderClient(meta) + if err != nil { + return err + } + start := "" + allrecs := []dlProviderV2.ProviderPort{} + for { + listPortsProviderOptions := directLinkProvider.NewListProviderPortsOptions() + if start != "" { + listPortsProviderOptions.Start = &start + } + + ports, resp, err := directLinkProvider.ListProviderPorts(listPortsProviderOptions) + if err != nil { + log.Println("[WARN] Error listing dl provider ports", ports, resp, err) + return err + } + start = GetNext(ports.Next) + allrecs = append(allrecs, ports.Ports...) + if start == "" { + break + } + } + portCollections := make([]map[string]interface{}, 0) + for _, port := range allrecs { + portCollection := map[string]interface{}{} + portCollection[dlPortID] = *port.ID + portCollection[dlLabel] = *port.Label + portCollection[dlLocationDisplayName] = *port.LocationDisplayName + portCollection[dlLocationName] = *port.LocationName + portCollection[dlProviderName] = *port.ProviderName + speed := make([]interface{}, 0) + for _, s := range port.SupportedLinkSpeeds { + speed = append(speed, s) + } + portCollection[dlSupportedLinkSpeeds] = speed + portCollections = append(portCollections, portCollection) + } + d.SetId(dataSourceIBMDirectLinkProviderPortsReadID(d)) + d.Set(dlPorts, portCollections) + return nil +} + +func dataSourceIBMDirectLinkProviderPortsReadID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_dl_routers.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_dl_routers.go new file mode 100644 index 00000000000..0a8fd6cca9d --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_dl_routers.go @@ -0,0 +1,122 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "time" + + "github.com/IBM/networking-go-sdk/directlinkv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + dlCrossConnectRouters = "cross_connect_routers" + dlRouterName = "router_name" + dlTotalConns = "total_connections" + dlLocation = "location_name" + dlMacsecCapabilities = "capabilities" +) + +func dataSourceIBMDLRouters() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMDLRoutersRead, + Schema: map[string]*schema.Schema{ + dlOfferingType: { + Type: schema.TypeString, + Required: true, + Description: "The Direct Link offering type", + ValidateFunc: InvokeDataSourceValidator("ibm_dl_routers", dlOfferingType), + }, + dlLocation: { + Type: schema.TypeString, + Required: true, + Description: "The name of the Direct Link location", + }, + dlCrossConnectRouters: { + Type: schema.TypeList, + Description: "Collection of Direct Link cross connect routers", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + dlMacsecCapabilities: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "List of capabilities for this router", + }, + dlRouterName: { + Type: schema.TypeString, + Computed: true, + Description: "The name of the Router", + }, + dlTotalConns: { + Type: schema.TypeInt, + Computed: true, + Description: "Count of existing Direct Link Dedicated gateways on this router for this account", + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMDLRoutersRead(d *schema.ResourceData, meta interface{}) error { + directLink, err := directlinkClient(meta) + if err != nil { + return err + } + dlType := d.Get(dlOfferingType).(string) + dlLocName := d.Get(dlLocation).(string) + listRoutersOptionsModel := &directlinkv1.ListOfferingTypeLocationCrossConnectRoutersOptions{} + listRoutersOptionsModel.OfferingType = &dlType + listRoutersOptionsModel.LocationName = &dlLocName + + listRouters, detail, err := directLink.ListOfferingTypeLocationCrossConnectRouters(listRoutersOptionsModel) + + if err != nil { + return fmt.Errorf("Error Getting Direct Link Location Cross Connect Routers: %s\n%s", err, detail) + } + + routers := make([]map[string]interface{}, 0) + for _, instance := range listRouters.CrossConnectRouters { + route := map[string]interface{}{} + if instance.Capabilities != nil { + route[dlMacsecCapabilities] = flattenStringList(instance.Capabilities) + } + if instance.RouterName != nil { + route[dlRouterName] = *instance.RouterName + } + if instance.TotalConnections != nil { + route[dlTotalConns] = *instance.TotalConnections + } + routers = append(routers, route) + } + d.SetId(dataSourceIBMDLRoutersID(d)) + d.Set(dlCrossConnectRouters, routers) + return nil +} + +// dataSourceIBMDLSpeedsID returns a reasonable ID for a direct link speeds list. +func dataSourceIBMDLRoutersID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} + +func datasourceIBMDLRoutersValidator() *ResourceValidator { + + validateSchema := make([]ValidateSchema, 2) + dlTypeAllowedValues := "dedicated" + + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: dlOfferingType, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: dlTypeAllowedValues}) + + ibmDLRoutersDatasourceValidator := ResourceValidator{ResourceName: "ibm_dl_routers", Schema: validateSchema} + return &ibmDLRoutersDatasourceValidator +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_dns_domain.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_dns_domain.go new file mode 100644 index 00000000000..406b75fd209 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_dns_domain.go @@ -0,0 +1,55 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/softlayer/softlayer-go/filter" + "github.com/softlayer/softlayer-go/services" +) + +func dataSourceIBMDNSDomain() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMDNSDomainRead, + + Schema: map[string]*schema.Schema{ + "id": &schema.Schema{ + Description: "A domain record's internal identifier", + Type: schema.TypeInt, + Computed: true, + }, + + "name": &schema.Schema{ + Description: "The name of the domain", + Type: schema.TypeString, + Required: true, + }, + }, + } +} + +func dataSourceIBMDNSDomainRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetAccountService(sess) + + name := d.Get("name").(string) + + names, err := service. + Filter(filter.Build(filter.Path("domains.name").Eq(name))). + Mask("id,name"). + GetDomains() + + if err != nil { + return fmt.Errorf("Error retrieving domain: %s", err) + } + + if len(names) == 0 { + return fmt.Errorf("No domain found with name [%s]", name) + } + + d.SetId(fmt.Sprintf("%d", *names[0].Id)) + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_dns_domain_registration.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_dns_domain_registration.go new file mode 100644 index 00000000000..cb50380f4c6 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_dns_domain_registration.go @@ -0,0 +1,86 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/softlayer/softlayer-go/filter" + "github.com/softlayer/softlayer-go/services" + "log" +) + +func dataSourceIBMDNSDomainRegistration() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMDNSDomainRegistrationRead, + + Schema: map[string]*schema.Schema{ + "id": &schema.Schema{ + Description: "A domain registration record's internal identifier", + Type: schema.TypeInt, + Computed: true, + }, + + "name": &schema.Schema{ + Description: "The name of the domain registration", + Type: schema.TypeString, + Required: true, + }, + "name_servers": &schema.Schema{ + Description: "Custom name servers for the domain registration", + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func dataSourceIBMDNSDomainRegistrationRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetAccountService(sess) + + name := d.Get("name").(string) + names, err := service. + Filter(filter.Build(filter.Path("domainRegistrations.name").Eq(name))). + Mask("id,name"). + GetDomainRegistrations() + + if err != nil { + return fmt.Errorf("Error retrieving domain registration: %s", err) + } + + if len(names) == 0 { + return fmt.Errorf("No domain registration found with name [%s]", name) + } + + log.Printf("names %v\n", names) + dnsId := *names[0].Id + log.Printf("Domain Registration Id %d\n", dnsId) + + // Get nameservers for domain + nService := services.GetDnsDomainRegistrationService(sess) + + // retrieve remote object state + dns_domain_nameservers, err := nService.Id(dnsId). + Mask("nameservers.name"). + GetDomainNameservers() + + log.Printf("list %v\n", dns_domain_nameservers) + + ns := make([]string, len(dns_domain_nameservers[0].Nameservers)) + for i, elem := range dns_domain_nameservers[0].Nameservers { + ns[i] = *elem.Name + } + + log.Printf("names %v\n", ns) + + if err != nil { + return fmt.Errorf("Error retrieving domain registration nameservers: %s", err) + } + + d.SetId(fmt.Sprintf("%d", dnsId)) + d.Set("name_servers", ns) + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_dns_secondary.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_dns_secondary.go new file mode 100644 index 00000000000..d1037351534 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_dns_secondary.go @@ -0,0 +1,80 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/softlayer/softlayer-go/services" +) + +func dataSourceIBMDNSSecondary() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMDNSSecondaryRead, + + Schema: map[string]*schema.Schema{ + + "zone_name": &schema.Schema{ + Description: "The name of the secondary", + Type: schema.TypeString, + Required: true, + }, + + "master_ip_address": { + Type: schema.TypeString, + Computed: true, + }, + + "transfer_frequency": { + Type: schema.TypeInt, + Computed: true, + }, + + "status_id": { + Type: schema.TypeInt, + Computed: true, + }, + + "status_text": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceIBMDNSSecondaryRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetAccountService(sess) + + name := d.Get("zone_name").(string) + + names, err := service. + Mask("id, masterIpAddress, transferFrequency, zoneName, statusId, statusText"). + GetSecondaryDomains() + + if err != nil { + return fmt.Errorf("Error retrieving secondary zone: %s", err) + } + + if len(names) == 0 { + return fmt.Errorf("No secondary zone found with name: %s", name) + } + + for _, zone := range names { + if name == *zone.ZoneName { + d.SetId(fmt.Sprintf("%d", *zone.Id)) + d.Set("master_ip_address", *zone.MasterIpAddress) + d.Set("transfer_frequency", *zone.TransferFrequency) + d.Set("zone_name", *zone.ZoneName) + d.Set("status_id", *zone.StatusId) + d.Set("status_text", *zone.StatusText) + return nil + + } + } + return fmt.Errorf("No secondary zone found with name: %s", name) + +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_enterprise_account_groups.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_enterprise_account_groups.go new file mode 100644 index 00000000000..13eaefe8e6d --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_enterprise_account_groups.go @@ -0,0 +1,258 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "context" + "fmt" + + "log" + "net/url" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/IBM/platform-services-go-sdk/enterprisemanagementv1" +) + +func dataSourceIbmEnterpriseAccountGroups() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIbmEnterpriseAccountGroupsRead, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The name of the account group.", + ValidateFunc: validateAllowedEnterpriseNameValue(), + }, + "account_groups": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "A list of account groups.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "url": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The URL of the account group.", + }, + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The account group ID.", + }, + "crn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The Cloud Resource Name (CRN) of the account group.", + }, + "parent": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The CRN of the parent of the account group.", + }, + "enterprise_account_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The enterprise account ID.", + }, + "enterprise_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The enterprise ID that the account group is a part of.", + }, + "enterprise_path": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The path from the enterprise to this particular account group.", + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The name of the account group.", + }, + "state": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The state of the account group.", + }, + "primary_contact_iam_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The IAM ID of the primary contact of the account group.", + }, + "primary_contact_email": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The email address of the primary contact of the account group.", + }, + "created_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The time stamp at which the account group was created.", + }, + "created_by": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The IAM ID of the user or service that created the account group.", + }, + "updated_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The time stamp at which the account group was last updated.", + }, + "updated_by": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The IAM ID of the user or service that updated the account group.", + }, + }, + }, + }, + }, + } +} + +func getEnterpriseNext(next *string) (string, error) { + if reflect.ValueOf(next).IsNil() { + return "", nil + } + u, err := url.Parse(*next) + if err != nil { + return "", err + } + q := u.Query() + return q.Get("next_docid"), nil +} + +func dataSourceIbmEnterpriseAccountGroupsRead(d *schema.ResourceData, meta interface{}) error { + enterpriseManagementClient, err := meta.(ClientSession).EnterpriseManagementV1() + if err != nil { + return err + } + next_docid := "" + var allRecs []enterprisemanagementv1.AccountGroup + for { + listAccountGroupsOptions := &enterprisemanagementv1.ListAccountGroupsOptions{} + if next_docid != "" { + listAccountGroupsOptions.NextDocid = &next_docid + } + listAccountGroupsResponse, response, err := enterpriseManagementClient.ListAccountGroupsWithContext(context.TODO(), listAccountGroupsOptions) + if err != nil { + log.Printf("[DEBUG] ListAccountGroupsWithContext failed %s\n%s", err, response) + return err + } + next_docid, err = getEnterpriseNext(listAccountGroupsResponse.NextURL) + if err != nil { + log.Printf("[DEBUG] ListAccountGroupsWithContext failed. Error occurred while parsing NextURL: %s", err) + return err + } + allRecs = append(allRecs, listAccountGroupsResponse.Resources...) + if next_docid == "" { + break + } + } + + // Use the provided filter argument and construct a new list with only the requested resource(s) + var matchResources []enterprisemanagementv1.AccountGroup + var name string + var suppliedFilter bool + if v, ok := d.GetOk("name"); ok { + name = v.(string) + suppliedFilter = true + for _, data := range allRecs { + if *data.Name == name { + matchResources = append(matchResources, data) + } + } + } else { + matchResources = allRecs + } + allRecs = matchResources + + if len(allRecs) == 0 { + return fmt.Errorf("no Resources found with name %s", name) + } + + if suppliedFilter { + d.SetId(name) + } else { + d.SetId(dataSourceIbmAccountGroupsID(d)) + } + if allRecs != nil { + err = d.Set("account_groups", dataSourceListEnterpriseAccountGroupsResponseFlattenResources(allRecs)) + if err != nil { + return fmt.Errorf("Error setting resources %s", err) + } + } + + return nil +} + +// dataSourceIbmAccountGroupsID returns a reasonable ID for the list. +func dataSourceIbmAccountGroupsID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} + +func dataSourceListEnterpriseAccountGroupsResponseFlattenResources(result []enterprisemanagementv1.AccountGroup) (resources []map[string]interface{}) { + for _, resourcesItem := range result { + resources = append(resources, dataSourceListEnterpriseAccountGroupsResponseResourcesToMap(resourcesItem)) + } + + return resources +} + +func dataSourceListEnterpriseAccountGroupsResponseResourcesToMap(resourcesItem enterprisemanagementv1.AccountGroup) (resourcesMap map[string]interface{}) { + resourcesMap = map[string]interface{}{} + + if resourcesItem.URL != nil { + resourcesMap["url"] = resourcesItem.URL + } + if resourcesItem.ID != nil { + resourcesMap["id"] = resourcesItem.ID + } + if resourcesItem.CRN != nil { + resourcesMap["crn"] = resourcesItem.CRN + } + if resourcesItem.Parent != nil { + resourcesMap["parent"] = resourcesItem.Parent + } + if resourcesItem.EnterpriseAccountID != nil { + resourcesMap["enterprise_account_id"] = resourcesItem.EnterpriseAccountID + } + if resourcesItem.EnterpriseID != nil { + resourcesMap["enterprise_id"] = resourcesItem.EnterpriseID + } + if resourcesItem.EnterprisePath != nil { + resourcesMap["enterprise_path"] = resourcesItem.EnterprisePath + } + if resourcesItem.Name != nil { + resourcesMap["name"] = resourcesItem.Name + } + if resourcesItem.State != nil { + resourcesMap["state"] = resourcesItem.State + } + if resourcesItem.PrimaryContactIamID != nil { + resourcesMap["primary_contact_iam_id"] = resourcesItem.PrimaryContactIamID + } + if resourcesItem.PrimaryContactEmail != nil { + resourcesMap["primary_contact_email"] = resourcesItem.PrimaryContactEmail + } + if resourcesItem.CreatedAt != nil { + resourcesMap["created_at"] = resourcesItem.CreatedAt.String() + } + if resourcesItem.CreatedBy != nil { + resourcesMap["created_by"] = resourcesItem.CreatedBy + } + if resourcesItem.UpdatedAt != nil { + resourcesMap["updated_at"] = resourcesItem.UpdatedAt.String() + } + if resourcesItem.UpdatedBy != nil { + resourcesMap["updated_by"] = resourcesItem.UpdatedBy + } + + return resourcesMap +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_enterprise_accounts.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_enterprise_accounts.go new file mode 100644 index 00000000000..73c2cee79b4 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_enterprise_accounts.go @@ -0,0 +1,261 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/IBM/platform-services-go-sdk/enterprisemanagementv1" +) + +func dataSourceIbmEnterpriseAccounts() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIbmEnterpriseAccountsRead, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The name of the account.", + ValidateFunc: validateAllowedEnterpriseNameValue(), + }, + "accounts": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "A list of accounts.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "url": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The URL of the account.", + }, + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The account ID.", + }, + "crn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The Cloud Resource Name (CRN) of the account.", + }, + "parent": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The CRN of the parent of the account.", + }, + "enterprise_account_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The enterprise account ID.", + }, + "enterprise_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The enterprise ID that the account is a part of.", + }, + "enterprise_path": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The path from the enterprise to this particular account.", + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The name of the account.", + }, + "state": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The state of the account.", + }, + "owner_iam_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The IAM ID of the owner of the account.", + }, + "paid": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "The type of account - whether it is free or paid.", + }, + "owner_email": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The email address of the owner of the account.", + }, + "is_enterprise_account": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "The flag to indicate whether the account is an enterprise account or not.", + }, + "created_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The time stamp at which the account was created.", + }, + "created_by": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The IAM ID of the user or service that created the account.", + }, + "updated_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The time stamp at which the account was last updated.", + }, + "updated_by": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The IAM ID of the user or service that updated the account.", + }, + }, + }, + }, + }, + } +} + +func dataSourceIbmEnterpriseAccountsRead(d *schema.ResourceData, meta interface{}) error { + enterpriseManagementClient, err := meta.(ClientSession).EnterpriseManagementV1() + if err != nil { + return err + } + next_docid := "" + var allRecs []enterprisemanagementv1.Account + for { + listAccountsOptions := &enterprisemanagementv1.ListAccountsOptions{} + if next_docid != "" { + listAccountsOptions.NextDocid = &next_docid + } + listAccountsResponse, response, err := enterpriseManagementClient.ListAccountsWithContext(context.TODO(), listAccountsOptions) + if err != nil { + log.Printf("[DEBUG] ListAccountsWithContext failed %s\n%s", err, response) + return err + } + next_docid, err = getEnterpriseNext(listAccountsResponse.NextURL) + if err != nil { + log.Printf("[DEBUG] ListAccountsWithContext failed. Error occurred while parsing NextURL: %s", err) + return err + } + allRecs = append(allRecs, listAccountsResponse.Resources...) + if next_docid == "" { + break + } + } + + // Use the provided filter argument and construct a new list with only the requested resource(s) + var matchResources []enterprisemanagementv1.Account + var name string + var suppliedFilter bool + + if v, ok := d.GetOk("name"); ok { + name = v.(string) + suppliedFilter = true + for _, data := range allRecs { + if *data.Name == name { + matchResources = append(matchResources, data) + } + } + } else { + matchResources = allRecs + } + allRecs = matchResources + + if len(allRecs) == 0 { + return fmt.Errorf("no Resources found with name %s\nIf not specified, please specify more filters", name) + } + + if suppliedFilter { + d.SetId(name) + } else { + d.SetId(dataSourceIbmEnterpriseAccountsID(d)) + } + + if allRecs != nil { + err = d.Set("accounts", dataSourceListEnterpriseAccountsResponseFlattenResources(allRecs)) + if err != nil { + return fmt.Errorf("Error setting resources %s", err) + } + } + + return nil +} + +// dataSourceIbmAccountsID returns a reasonable ID for the list. +func dataSourceIbmEnterpriseAccountsID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} + +func dataSourceListEnterpriseAccountsResponseFlattenResources(result []enterprisemanagementv1.Account) (resources []map[string]interface{}) { + for _, resourcesItem := range result { + resources = append(resources, dataSourceListEnterpriseAccountsResponseResourcesToMap(resourcesItem)) + } + + return resources +} + +func dataSourceListEnterpriseAccountsResponseResourcesToMap(resourcesItem enterprisemanagementv1.Account) (resourcesMap map[string]interface{}) { + resourcesMap = map[string]interface{}{} + + if resourcesItem.URL != nil { + resourcesMap["url"] = resourcesItem.URL + } + if resourcesItem.ID != nil { + resourcesMap["id"] = resourcesItem.ID + } + if resourcesItem.CRN != nil { + resourcesMap["crn"] = resourcesItem.CRN + } + if resourcesItem.Parent != nil { + resourcesMap["parent"] = resourcesItem.Parent + } + if resourcesItem.EnterpriseAccountID != nil { + resourcesMap["enterprise_account_id"] = resourcesItem.EnterpriseAccountID + } + if resourcesItem.EnterpriseID != nil { + resourcesMap["enterprise_id"] = resourcesItem.EnterpriseID + } + if resourcesItem.EnterprisePath != nil { + resourcesMap["enterprise_path"] = resourcesItem.EnterprisePath + } + if resourcesItem.Name != nil { + resourcesMap["name"] = resourcesItem.Name + } + if resourcesItem.State != nil { + resourcesMap["state"] = resourcesItem.State + } + if resourcesItem.OwnerIamID != nil { + resourcesMap["owner_iam_id"] = resourcesItem.OwnerIamID + } + if resourcesItem.Paid != nil { + resourcesMap["paid"] = resourcesItem.Paid + } + if resourcesItem.OwnerEmail != nil { + resourcesMap["owner_email"] = resourcesItem.OwnerEmail + } + if resourcesItem.IsEnterpriseAccount != nil { + resourcesMap["is_enterprise_account"] = resourcesItem.IsEnterpriseAccount + } + if resourcesItem.CreatedAt != nil { + resourcesMap["created_at"] = resourcesItem.CreatedAt.String() + } + if resourcesItem.CreatedBy != nil { + resourcesMap["created_by"] = resourcesItem.CreatedBy + } + if resourcesItem.UpdatedAt != nil { + resourcesMap["updated_at"] = resourcesItem.UpdatedAt.String() + } + if resourcesItem.UpdatedBy != nil { + resourcesMap["updated_by"] = resourcesItem.UpdatedBy + } + + return resourcesMap +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_enterprises.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_enterprises.go new file mode 100644 index 00000000000..3aa98974599 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_enterprises.go @@ -0,0 +1,214 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 +package ibm + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/IBM/platform-services-go-sdk/enterprisemanagementv1" +) + +func dataSourceIbmEnterprises() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIbmEnterprisesRead, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The name of the enterprise.", + ValidateFunc: validateAllowedEnterpriseNameValue(), + }, + "enterprises": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "A list of enterprise objects.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "url": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The URL of the enterprise.", + }, + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The enterprise ID.", + }, + "enterprise_account_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The enterprise account ID.", + }, + "crn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The Cloud Resource Name (CRN) of the enterprise.", + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The name of the enterprise.", + }, + "domain": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The domain of the enterprise.", + }, + "state": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The state of the enterprise.", + }, + "primary_contact_iam_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The IAM ID of the primary contact of the enterprise, such as `IBMid-0123ABC`.", + }, + "primary_contact_email": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The email of the primary contact of the enterprise.", + }, + "created_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The time stamp at which the enterprise was created.", + }, + "created_by": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The IAM ID of the user or service that created the enterprise.", + }, + "updated_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The time stamp at which the enterprise was last updated.", + }, + "updated_by": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The IAM ID of the user or service that updated the enterprise.", + }, + }, + }, + }, + }, + } +} + +func dataSourceIbmEnterprisesRead(d *schema.ResourceData, meta interface{}) error { + enterpriseManagementClient, err := meta.(ClientSession).EnterpriseManagementV1() + if err != nil { + return err + } + + listEnterprisesOptions := &enterprisemanagementv1.ListEnterprisesOptions{} + + listEnterprisesResponse, response, err := enterpriseManagementClient.ListEnterprisesWithContext(context.TODO(), listEnterprisesOptions) + if err != nil { + log.Printf("[DEBUG] ListEnterprisesWithContext failed %s\n%s", err, response) + return err + } + + // Use the provided filter argument and construct a new list with only the requested resource(s) + var matchResources []enterprisemanagementv1.Enterprise + var name string + var suppliedFilter bool + + if v, ok := d.GetOk("name"); ok { + name = v.(string) + suppliedFilter = true + for _, data := range listEnterprisesResponse.Resources { + if *data.Name == name { + matchResources = append(matchResources, data) + } + } + } else { + matchResources = listEnterprisesResponse.Resources + } + listEnterprisesResponse.Resources = matchResources + + if len(listEnterprisesResponse.Resources) == 0 { + return fmt.Errorf("no Resources found with name %s\nIf not specified, please specify more filters", name) + } + + if suppliedFilter { + d.SetId(name) + } else { + d.SetId(dataSourceIbmEnterprisesID(d)) + } + + if listEnterprisesResponse.Resources != nil { + err = d.Set("enterprises", dataSourceListEnterprisesResponseFlattenResources(listEnterprisesResponse.Resources)) + if err != nil { + return fmt.Errorf("Error setting resources %s", err) + } + } + + return nil +} + +// dataSourceIbmEnterprisesID returns a reasonable ID for the list. +func dataSourceIbmEnterprisesID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} + +func dataSourceListEnterprisesResponseFlattenResources(result []enterprisemanagementv1.Enterprise) (resources []map[string]interface{}) { + for _, resourcesItem := range result { + resources = append(resources, dataSourceListEnterprisesResponseResourcesToMap(resourcesItem)) + } + + return resources +} + +func dataSourceListEnterprisesResponseResourcesToMap(resourcesItem enterprisemanagementv1.Enterprise) (resourcesMap map[string]interface{}) { + resourcesMap = map[string]interface{}{} + + if resourcesItem.URL != nil { + resourcesMap["url"] = resourcesItem.URL + } + if resourcesItem.ID != nil { + resourcesMap["id"] = resourcesItem.ID + } + if resourcesItem.EnterpriseAccountID != nil { + resourcesMap["enterprise_account_id"] = resourcesItem.EnterpriseAccountID + } + if resourcesItem.CRN != nil { + resourcesMap["crn"] = resourcesItem.CRN + } + if resourcesItem.Name != nil { + resourcesMap["name"] = resourcesItem.Name + } + if resourcesItem.Domain != nil { + resourcesMap["domain"] = resourcesItem.Domain + } + if resourcesItem.State != nil { + resourcesMap["state"] = resourcesItem.State + } + if resourcesItem.PrimaryContactIamID != nil { + resourcesMap["primary_contact_iam_id"] = resourcesItem.PrimaryContactIamID + } + if resourcesItem.PrimaryContactEmail != nil { + resourcesMap["primary_contact_email"] = resourcesItem.PrimaryContactEmail + } + if resourcesItem.CreatedAt != nil { + resourcesMap["created_at"] = resourcesItem.CreatedAt.String() + } + if resourcesItem.CreatedBy != nil { + resourcesMap["created_by"] = resourcesItem.CreatedBy + } + if resourcesItem.UpdatedAt != nil { + resourcesMap["updated_at"] = resourcesItem.UpdatedAt.String() + } + if resourcesItem.UpdatedBy != nil { + resourcesMap["updated_by"] = resourcesItem.UpdatedBy + } + + return resourcesMap +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_event_streams_topic.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_event_streams_topic.go new file mode 100644 index 00000000000..fb4b50fdeaa --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_event_streams_topic.go @@ -0,0 +1,75 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMEventStreamsTopic() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMEventStreamsTopicRead, + Schema: map[string]*schema.Schema{ + "resource_instance_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "The CRN of the Event Streams instance", + }, + "kafka_http_url": { + Type: schema.TypeString, + Computed: true, + Description: "The API endpoint for interacting with Event Streams REST API", + }, + "kafka_brokers_sasl": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "Kafka brokers addresses for interacting with Kafka native API", + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Description: "The name of the topic", + Required: true, + }, + "partitions": &schema.Schema{ + Type: schema.TypeInt, + Description: "The number of partitions of the topic", + Computed: true, + }, + "config": &schema.Schema{ + Type: schema.TypeMap, + Description: "The configuration parameters of the topic.", + Computed: true, + }, + }, + } +} + +func dataSourceIBMEventStreamsTopicRead(d *schema.ResourceData, meta interface{}) error { + adminClient, instanceCRN, err := createSaramaAdminClient(d, meta) + if err != nil { + log.Printf("[DEBUG]dataSourceIBMEventStreamsTopicRead createSaramaAdminClient err %s", err) + return err + } + topics, err := adminClient.ListTopics() + if err != nil { + log.Printf("[DEBUG]dataSourceIBMEventStreamsTopicRead ListTopics err %s", err) + return err + } + topicName := d.Get("name").(string) + for name := range topics { + if name == topicName { + topicID := getTopicID(instanceCRN, topicName) + d.SetId(topicID) + log.Printf("[INFO]dataSourceIBMEventStreamsTopicRead set topic ID to %s", topicID) + d.Set("resource_instance_id", instanceCRN) + return nil + } + } + log.Printf("[DEBUG]dataSourceIBMEventStreamsTopicRead topic %s does not exist", topicName) + return fmt.Errorf("topic %s does not exist", topicName) +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_function_action.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_function_action.go new file mode 100644 index 00000000000..ca49928a962 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_function_action.go @@ -0,0 +1,188 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMFunctionAction() *schema.Resource { + return &schema.Resource{ + + Read: dataSourceIBMFunctionActionRead, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: "Name of action.", + }, + "namespace": { + Type: schema.TypeString, + Required: true, + Description: "Name of the namespace.", + }, + "limits": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "timeout": { + Type: schema.TypeInt, + Computed: true, + Description: "The timeout LIMIT in milliseconds after which the action is terminated.", + }, + "memory": { + Type: schema.TypeInt, + Computed: true, + Description: "The maximum memory LIMIT in MB for the action (default 256.", + }, + "log_size": { + Type: schema.TypeInt, + Computed: true, + Description: "The maximum log size LIMIT in MB for the action.", + }, + }, + }, + }, + "exec": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "image": { + Type: schema.TypeString, + Computed: true, + Description: "Container image name when kind is 'blackbox'.", + }, + "init": { + Type: schema.TypeString, + Computed: true, + Description: "Optional zipfile reference.", + }, + "code": { + Type: schema.TypeString, + Computed: true, + Description: "The code to execute when kind is not 'blackbox'.", + }, + "kind": { + Type: schema.TypeString, + Computed: true, + Description: "The type of action. Possible values:php:7.3, nodejs:8, swift:3, nodejs, blackbox, java, sequence, nodejs:10, python:3, python, python:2, swift, swift:4.2.", + }, + "main": { + Type: schema.TypeString, + Computed: true, + Description: "The name of the action entry point (function or fully-qualified method name when applicable)", + }, + "components": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "The List of fully qualified action", + }, + }, + }, + }, + "publish": { + Type: schema.TypeBool, + Computed: true, + Description: "Action visibilty.", + }, + "version": { + Type: schema.TypeString, + Computed: true, + Description: "Semantic version of the item.", + }, + "annotations": { + Type: schema.TypeString, + Computed: true, + Description: "All annotations set on action by user and those set by the IBM Cloud Function backend/API.", + }, + "parameters": { + Type: schema.TypeString, + Computed: true, + Description: "All paramters set on action by user and those set by the IBM Cloud Function backend/API.", + }, + "action_id": { + Type: schema.TypeString, + Computed: true, + }, + "target_endpoint_url": { + Type: schema.TypeString, + Computed: true, + Description: "Action target endpoint URL.", + }, + }, + } +} + +func dataSourceIBMFunctionActionRead(d *schema.ResourceData, meta interface{}) error { + functionNamespaceAPI, err := meta.(ClientSession).FunctionIAMNamespaceAPI() + if err != nil { + return err + } + + bxSession, err := meta.(ClientSession).BluemixSession() + if err != nil { + return err + } + namespace := d.Get("namespace").(string) + wskClient, err := setupOpenWhiskClientConfig(namespace, bxSession, functionNamespaceAPI) + if err != nil { + return err + + } + + actionService := wskClient.Actions + name := d.Get("name").(string) + + action, _, err := actionService.Get(name, true) + if err != nil { + return fmt.Errorf("Error retrieving IBM Cloud Function Action %s : %s", name, err) + } + + temp := strings.Split(action.Namespace, "/") + pkgName := "" + if len(temp) == 2 { + pkgName = temp[1] + d.SetId(fmt.Sprintf("%s/%s", pkgName, action.Name)) + d.Set("name", fmt.Sprintf("%s/%s", pkgName, action.Name)) + } else { + d.SetId(action.Name) + d.Set("name", action.Name) + } + + d.Set("namespace", namespace) + d.Set("limits", flattenLimits(action.Limits)) + d.Set("exec", flattenExec(action.Exec, d)) + d.Set("publish", action.Publish) + d.Set("version", action.Version) + d.Set("action_id", action.Name) + annotations, err := flattenAnnotations(action.Annotations) + if err != nil { + log.Printf( + "An error occured during reading of action (%s) annotations : %s", d.Id(), err) + } + d.Set("annotations", annotations) + parameters, err := flattenParameters(action.Parameters) + if err != nil { + log.Printf( + "An error occured during reading of action (%s) parameters : %s", d.Id(), err) + } + d.Set("parameters", parameters) + + targetURL, err := action.ActionURL(wskClient.Config.Host, "/api", wskClient.Config.Version, pkgName) + if err != nil { + log.Printf( + "An error occured during reading of action (%s) targetURL : %s", d.Id(), err) + } + d.Set("target_endpoint_url", targetURL) + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_function_namespace.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_function_namespace.go new file mode 100644 index 00000000000..3bc01e3f652 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_function_namespace.go @@ -0,0 +1,91 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/IBM-Cloud/bluemix-go/api/functions" +) + +func dataSourceIBMFunctionNamespace() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMFunctionNamespaceRead, + Schema: map[string]*schema.Schema{ + funcNamespaceName: { + Type: schema.TypeString, + Required: true, + Description: "Name of namespace.", + ValidateFunc: InvokeValidator("ibm_function_namespace", funcNamespaceName), + }, + funcNamespaceDesc: { + Type: schema.TypeString, + Computed: true, + Description: "Namespace Description.", + }, + funcNamespaceResGrpId: { + Type: schema.TypeString, + Computed: true, + Description: "Resource Group ID.", + }, + funcNamespaceLoc: { + Type: schema.TypeString, + Computed: true, + Description: "Namespace Location.", + }, + }, + } +} + +func dataSourceIBMFunctionNamespaceRead(d *schema.ResourceData, meta interface{}) error { + functionNamespaceAPI, err := meta.(ClientSession).FunctionIAMNamespaceAPI() + if err != nil { + return err + } + + name := d.Get("name").(string) + nsList, err := functionNamespaceAPI.Namespaces().GetNamespaces() + if err != nil { + return err + } + for _, n := range nsList.Namespaces { + if n.Name != nil && *n.Name == name { + getOptions := functions.GetNamespaceOptions{ + ID: n.ID, + } + + instance, err := functionNamespaceAPI.Namespaces().GetNamespace(getOptions) + if err != nil { + d.SetId("") + return nil + } + + if instance.ID != nil { + d.SetId(*instance.ID) + } + + if instance.Name != nil { + d.Set(funcNamespaceName, *instance.Name) + } + + if instance.ResourceGroupID != nil { + d.Set(funcNamespaceResGrpId, *instance.ResourceGroupID) + } + + if instance.Location != nil { + d.Set(funcNamespaceLoc, *instance.Location) + } + + if instance.Description != nil { + d.Set(funcNamespaceDesc, *instance.Description) + } + + return nil + } + } + + return fmt.Errorf("No cloud function namespace found with name [%s]", name) +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_function_package.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_function_package.go new file mode 100644 index 00000000000..43f1054a0e5 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_function_package.go @@ -0,0 +1,113 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMFunctionPackage() *schema.Resource { + return &schema.Resource{ + + Read: dataSourceIBMFunctionPackageRead, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: "Name of the package.", + }, + "namespace": { + Type: schema.TypeString, + Required: true, + Description: "Name of the namespace.", + }, + "publish": { + Type: schema.TypeBool, + Computed: true, + Description: "Package Visibility.", + }, + "version": { + Type: schema.TypeString, + Computed: true, + Description: "Semantic version of the package.", + }, + + "annotations": { + Type: schema.TypeString, + Computed: true, + Description: "All annotations set on package by user and those set by the IBM Cloud Function backend/API.", + }, + + "parameters": { + Type: schema.TypeString, + Computed: true, + Description: "All parameters set on package by user and those set by the IBM Cloud Function backend/API.", + }, + + "bind_package_name": { + Type: schema.TypeString, + Computed: true, + Description: "Name of binded package.", + }, + + "package_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceIBMFunctionPackageRead(d *schema.ResourceData, meta interface{}) error { + functionNamespaceAPI, err := meta.(ClientSession).FunctionIAMNamespaceAPI() + if err != nil { + return err + } + + bxSession, err := meta.(ClientSession).BluemixSession() + if err != nil { + return err + } + namespace := d.Get("namespace").(string) + wskClient, err := setupOpenWhiskClientConfig(namespace, bxSession, functionNamespaceAPI) + if err != nil { + return err + + } + + packageService := wskClient.Packages + name := d.Get("name").(string) + pkg, _, err := packageService.Get(name) + if err != nil { + return fmt.Errorf("Error retrieving IBM Cloud Function package %s : %s", name, err) + } + + d.SetId(pkg.Name) + d.Set("name", pkg.Name) + d.Set("namespace", namespace) + d.Set("publish", pkg.Publish) + d.Set("version", pkg.Version) + d.Set("package_id", pkg.Name) + annotations, err := flattenAnnotations(pkg.Annotations) + if err != nil { + log.Printf( + "An error occured during reading of package (%s) annotations : %s", d.Id(), err) + } + d.Set("annotations", annotations) + parameters, err := flattenParameters(pkg.Parameters) + if err != nil { + log.Printf( + "An error occured during reading of package (%s) parameters : %s", d.Id(), err) + } + d.Set("parameters", parameters) + + if !isEmpty(*pkg.Binding) { + d.Set("bind_package_name", fmt.Sprintf("/%s/%s", pkg.Binding.Namespace, pkg.Binding.Name)) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_function_rule.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_function_rule.go new file mode 100644 index 00000000000..ed47039bf9e --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_function_rule.go @@ -0,0 +1,98 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMFunctionRule() *schema.Resource { + return &schema.Resource{ + + Read: dataSourceIBMFunctionRuleRead, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: "Name of the rule.", + }, + "namespace": { + Type: schema.TypeString, + Required: true, + Description: "Name of the namespace.", + }, + "trigger_name": { + Type: schema.TypeString, + Computed: true, + Description: "Name of the trigger.", + }, + "action_name": { + Type: schema.TypeString, + Computed: true, + Description: "Name of an action.", + }, + "status": { + Type: schema.TypeString, + Computed: true, + Description: "Status of the rule.", + }, + "publish": { + Type: schema.TypeBool, + Computed: true, + Description: "Rule Visibility.", + }, + "version": { + Type: schema.TypeString, + Computed: true, + Description: "Semantic version of the rule", + }, + "rule_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceIBMFunctionRuleRead(d *schema.ResourceData, meta interface{}) error { + functionNamespaceAPI, err := meta.(ClientSession).FunctionIAMNamespaceAPI() + if err != nil { + return err + } + + bxSession, err := meta.(ClientSession).BluemixSession() + if err != nil { + return err + } + namespace := d.Get("namespace").(string) + wskClient, err := setupOpenWhiskClientConfig(namespace, bxSession, functionNamespaceAPI) + if err != nil { + return err + + } + + ruleService := wskClient.Rules + name := d.Get("name").(string) + + rule, _, err := ruleService.Get(name) + if err != nil { + return fmt.Errorf("Error retrieving IBM Cloud Function Rule %s : %s", name, err) + } + + d.SetId(rule.Name) + d.Set("name", rule.Name) + d.Set("namespace", namespace) + d.Set("publish", rule.Publish) + d.Set("version", rule.Version) + d.Set("status", rule.Status) + d.Set("rule_id", rule.Name) + d.Set("trigger_name", rule.Trigger.(map[string]interface{})["name"]) + path := rule.Action.(map[string]interface{})["path"] + actionName := rule.Action.(map[string]interface{})["name"] + d.Set("action_name", fmt.Sprintf("/%s/%s", path, actionName)) + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_function_trigger.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_function_trigger.go new file mode 100644 index 00000000000..4a246303189 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_function_trigger.go @@ -0,0 +1,104 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMFunctionTrigger() *schema.Resource { + return &schema.Resource{ + + Read: dataSourceIBMFunctionTriggerRead, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: "Name of Trigger.", + }, + "namespace": { + Type: schema.TypeString, + Required: true, + Description: "Name of the namespace.", + }, + "publish": { + Type: schema.TypeBool, + Computed: true, + Description: "Trigger Visibility.", + }, + "version": { + Type: schema.TypeString, + Computed: true, + Description: "Semantic version of the trigger.", + }, + + "annotations": { + Type: schema.TypeString, + Computed: true, + Description: "All annotations set on trigger by user and those set by the IBM Cloud Function backend/API.", + }, + "parameters": { + Type: schema.TypeString, + Computed: true, + Description: "All parameters set on trigger by user and those set by the IBM Cloud Function backend/API.", + }, + "trigger_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceIBMFunctionTriggerRead(d *schema.ResourceData, meta interface{}) error { + functionNamespaceAPI, err := meta.(ClientSession).FunctionIAMNamespaceAPI() + if err != nil { + return err + } + + bxSession, err := meta.(ClientSession).BluemixSession() + if err != nil { + return err + } + namespace := d.Get("namespace").(string) + wskClient, err := setupOpenWhiskClientConfig(namespace, bxSession, functionNamespaceAPI) + if err != nil { + return err + + } + + triggerService := wskClient.Triggers + name := d.Get("name").(string) + + trigger, _, err := triggerService.Get(name) + if err != nil { + return fmt.Errorf("Error retrieving IBM Cloud Function Trigger %s : %s", name, err) + } + + d.SetId(trigger.Name) + d.Set("name", trigger.Name) + d.Set("namespace", namespace) + d.Set("publish", trigger.Publish) + d.Set("version", trigger.Version) + d.Set("trigger_id", trigger.Name) + annotations, err := flattenAnnotations(trigger.Annotations) + if err != nil { + log.Printf( + "An error occured during reading of trigger (%s) annotations : %s", d.Id(), err) + + } + d.Set("annotations", annotations) + parameters, err := flattenParameters(trigger.Parameters) + if err != nil { + log.Printf( + "An error occured during reading of trigger (%s) parameters : %s", d.Id(), err) + } + d.Set("parameters", parameters) + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_iam_access_group.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_iam_access_group.go new file mode 100644 index 00000000000..b264c7f4e49 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_iam_access_group.go @@ -0,0 +1,202 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + + "log" + + "github.com/IBM-Cloud/bluemix-go/crn" + "github.com/IBM-Cloud/bluemix-go/models" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMIAMAccessGroup() *schema.Resource { + return &schema.Resource{ + Read: dataIBMIAMAccessGroupRead, + Exists: resourceIBMIAMAccessGroupExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "access_group_name": { + Type: schema.TypeString, + Optional: true, + Description: "Name of the access group", + }, + "groups": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + }, + "id": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: "Description of the access group", + }, + "ibm_ids": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "iam_service_ids": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "rules": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + Description: "The name of the Rule", + }, + "expiration": { + Type: schema.TypeInt, + Computed: true, + Description: "The expiration in hours", + }, + "identity_provider": { + Type: schema.TypeString, + Computed: true, + Description: "The realm name or identity proivider url", + }, + "conditions": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "claim": { + Type: schema.TypeString, + Computed: true, + }, + "operator": { + Type: schema.TypeString, + Computed: true, + }, + "value": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "rule_id": { + Type: schema.TypeString, + Computed: true, + Description: "id of the rule", + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func dataIBMIAMAccessGroupRead(d *schema.ResourceData, meta interface{}) error { + iamuumClient, err := meta.(ClientSession).IAMUUMAPIV2() + if err != nil { + return err + } + + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + + accountID := userDetails.userAccount + userManagement, err := meta.(ClientSession).UserManagementAPI() + if err != nil { + return err + } + client := userManagement.UserInvite() + res, err := client.ListUsers(accountID) + if err != nil { + return err + } + + iamClient, err := meta.(ClientSession).IAMAPI() + if err != nil { + return err + } + + boundTo := crn.New(userDetails.cloudName, userDetails.cloudType) + boundTo.ScopeType = crn.ScopeAccount + boundTo.Scope = userDetails.userAccount + + serviceIDs, err := iamClient.ServiceIds().List(boundTo.String()) + if err != nil { + return err + } + + retreivedGroups, err := iamuumClient.AccessGroup().List(accountID) + if err != nil { + return fmt.Errorf("Error retrieving access groups: %s", err) + } + + if len(retreivedGroups) == 0 { + return fmt.Errorf("No access group in account") + } + var agName string + var matchGroups []models.AccessGroupV2 + if v, ok := d.GetOk("access_group_name"); ok { + agName = v.(string) + for _, grpData := range retreivedGroups { + if grpData.Name == agName { + matchGroups = append(matchGroups, grpData) + } + } + } else { + matchGroups = retreivedGroups + } + if len(matchGroups) == 0 { + return fmt.Errorf("No Access Groups with name %s in Account", agName) + } + + grpMap := make([]map[string]interface{}, 0, len(matchGroups)) + + for _, grp := range matchGroups { + members, err := iamuumClient.AccessGroupMember().List(grp.ID) + if err != nil { + log.Println("Error retrieving access group members: ", err) + } + rules, err := iamuumClient.DynamicRule().List(grp.ID) + if err != nil { + log.Println("Error retrieving access group rules: ", err) + } + ibmID, serviceID := flattenMembersData(members, res, serviceIDs) + + grpInstance := map[string]interface{}{ + "id": grp.ID, + "name": grp.Name, + "description": grp.Description, + "ibm_ids": ibmID, + "iam_service_ids": serviceID, + "rules": flattenAccessGroupRules(rules), + } + + grpMap = append(grpMap, grpInstance) + + } + + d.SetId(accountID) + d.Set("groups", grpMap) + d.Set("access_group_name", agName) + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_iam_account_settings.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_iam_account_settings.go new file mode 100644 index 00000000000..cba6f70aaac --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_iam_account_settings.go @@ -0,0 +1,210 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/IBM/platform-services-go-sdk/iamidentityv1" +) + +func dataSourceIBMIAMAccountSettings() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIbmIamAccountSettingsRead, + + Schema: map[string]*schema.Schema{ + "include_history": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Defines if the entity history is included in the response.", + }, + "account_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Unique ID of the account.", + }, + "restrict_create_service_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Defines whether or not creating a Service Id is access controlled. Valid values: * RESTRICTED - to apply access control * NOT_RESTRICTED - to remove access control * NOT_SET - to 'unset' a previous set value.", + }, + "restrict_create_platform_apikey": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Defines whether or not creating platform API keys is access controlled. Valid values: * RESTRICTED - to apply access control * NOT_RESTRICTED - to remove access control * NOT_SET - to 'unset' a previous set value.", + }, + "allowed_ip_addresses": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Defines the IP addresses and subnets from which IAM tokens can be created for the account.", + }, + "entity_tag": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Version of the account settings.", + }, + "mfa": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Defines the MFA trait for the account. Valid values: * NONE - No MFA trait set * TOTP - For all non-federated IBMId users * TOTP4ALL - For all users * LEVEL1 - Email-based MFA for all users * LEVEL2 - TOTP-based MFA for all users * LEVEL3 - U2F MFA for all users.", + }, + "history": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "History of the Account Settings.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "timestamp": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Timestamp when the action was triggered.", + }, + "iam_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "IAM ID of the identity which triggered the action.", + }, + "iam_id_account": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Account of the identity which triggered the action.", + }, + "action": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Action of the history entry.", + }, + "params": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Params of the history entry.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "message": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Message which summarizes the executed action.", + }, + }, + }, + }, + "session_expiration_in_seconds": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Defines the session expiration in seconds for the account. Valid values: * Any whole number between between '900' and '86400' * NOT_SET - To unset account setting and use service default.", + }, + "session_invalidation_in_seconds": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Defines the period of time in seconds in which a session will be invalidated due to inactivity. Valid values: * Any whole number between '900' and '7200' * NOT_SET - To unset account setting and use service default.", + }, + "max_sessions_per_identity": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Defines the max allowed sessions per identity required by the account. Value values: * Any whole number greater than '0' * NOT_SET - To unset account setting and use service default.", + }, + }, + } +} + +func dataSourceIbmIamAccountSettingsRead(d *schema.ResourceData, meta interface{}) error { + iamIdentityClient, err := meta.(ClientSession).IAMIdentityV1API() + if err != nil { + return err + } + + getAccountSettingsOptions := &iamidentityv1.GetAccountSettingsOptions{} + getAccountSettingsOptions.SetIncludeHistory(d.Get("include_history").(bool)) + + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + + getAccountSettingsOptions.SetAccountID(userDetails.userAccount) + + accountSettingsResponse, response, err := iamIdentityClient.GetAccountSettings(getAccountSettingsOptions) + if err != nil { + log.Printf("[DEBUG] GetAccountSettings failed %s\n%s", err, response) + return err + } + + d.SetId(userDetails.userAccount) + + if err = d.Set("account_id", accountSettingsResponse.AccountID); err != nil { + return fmt.Errorf("Error setting account_id: %s", err) + } + if err = d.Set("restrict_create_service_id", accountSettingsResponse.RestrictCreateServiceID); err != nil { + return fmt.Errorf("Error setting restrict_create_service_id: %s", err) + } + if err = d.Set("restrict_create_platform_apikey", accountSettingsResponse.RestrictCreatePlatformApikey); err != nil { + return fmt.Errorf("Error setting restrict_create_platform_apikey: %s", err) + } + if err = d.Set("allowed_ip_addresses", accountSettingsResponse.AllowedIPAddresses); err != nil { + return fmt.Errorf("Error setting allowed_ip_addresses: %s", err) + } + if err = d.Set("entity_tag", accountSettingsResponse.EntityTag); err != nil { + return fmt.Errorf("Error setting entity_tag: %s", err) + } + if err = d.Set("mfa", accountSettingsResponse.Mfa); err != nil { + return fmt.Errorf("Error setting mfa: %s", err) + } + + if accountSettingsResponse.History != nil { + err = d.Set("history", dataSourceAccountSettingsResponseFlattenHistory(accountSettingsResponse.History)) + if err != nil { + return fmt.Errorf("Error setting history %s", err) + } + } + if err = d.Set("session_expiration_in_seconds", accountSettingsResponse.SessionExpirationInSeconds); err != nil { + return fmt.Errorf("Error setting session_expiration_in_seconds: %s", err) + } + if err = d.Set("session_invalidation_in_seconds", accountSettingsResponse.SessionInvalidationInSeconds); err != nil { + return fmt.Errorf("Error setting session_invalidation_in_seconds: %s", err) + } + if err = d.Set("max_sessions_per_identity", accountSettingsResponse.MaxSessionsPerIdentity); err != nil { + return fmt.Errorf("Error setting max_sessions_per_identity: %s", err) + } + + return nil +} + +func dataSourceAccountSettingsResponseFlattenHistory(result []iamidentityv1.EnityHistoryRecord) (history []map[string]interface{}) { + for _, historyItem := range result { + history = append(history, dataSourceAccountSettingsResponseHistoryToMap(historyItem)) + } + + return history +} + +func dataSourceAccountSettingsResponseHistoryToMap(historyItem iamidentityv1.EnityHistoryRecord) (historyMap map[string]interface{}) { + historyMap = map[string]interface{}{} + + if historyItem.Timestamp != nil { + historyMap["timestamp"] = historyItem.Timestamp + } + if historyItem.IamID != nil { + historyMap["iam_id"] = historyItem.IamID + } + if historyItem.IamIDAccount != nil { + historyMap["iam_id_account"] = historyItem.IamIDAccount + } + if historyItem.Action != nil { + historyMap["action"] = historyItem.Action + } + if historyItem.Params != nil { + historyMap["params"] = historyItem.Params + } + if historyItem.Message != nil { + historyMap["message"] = historyItem.Message + } + + return historyMap +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_iam_api_key.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_iam_api_key.go new file mode 100644 index 00000000000..b93bc1d3ba5 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_iam_api_key.go @@ -0,0 +1,137 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/IBM/platform-services-go-sdk/iamidentityv1" +) + +func dataSourceIbmIamApiKey() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIbmIamApiKeyRead, + + Schema: map[string]*schema.Schema{ + "apikey_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "Unique ID of the API key.", + }, + "entity_tag": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Version of the API Key details object. You need to specify this value when updating the API key to avoid stale updates.", + }, + "crn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Cloud Resource Name of the item. Example Cloud Resource Name: 'crn:v1:bluemix:public:iam-identity:us-south:a/myaccount::apikey:1234-9012-5678'.", + }, + "locked": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "The API key cannot be changed if set to true.", + }, + "created_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "If set contains a date time string of the creation date in ISO format.", + }, + "created_by": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "IAM ID of the user or service which created the API key.", + }, + "modified_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "If set contains a date time string of the last modification date in ISO format.", + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Name of the API key. The name is not checked for uniqueness. Therefore multiple names with the same value can exist. Access is done via the UUID of the API key.", + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The optional description of the API key. The 'description' property is only available if a description was provided during a create of an API key.", + }, + "iam_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The iam_id that this API key authenticates.", + }, + "account_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "ID of the account that this API key authenticates for.", + }, + "apikey": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The API key value. This property only contains the API key value for the following cases: create an API key, update a service ID API key that stores the API key value as retrievable, or get a service ID API key that stores the API key value as retrievable. All other operations don't return the API key value, for example all user API key related operations, except for create, don't contain the API key value.", + }, + }, + } +} + +func dataSourceIbmIamApiKeyRead(d *schema.ResourceData, meta interface{}) error { + iamIdentityClient, err := meta.(ClientSession).IAMIdentityV1API() + if err != nil { + return err + } + + getApiKeyOptions := &iamidentityv1.GetAPIKeyOptions{} + + getApiKeyOptions.SetID(d.Get("apikey_id").(string)) + + apiKey, response, err := iamIdentityClient.GetAPIKey(getApiKeyOptions) + if err != nil { + log.Printf("[DEBUG] GetApiKey failed %s\n%s", err, response) + return err + } + + d.SetId(*apiKey.ID) + + if err = d.Set("entity_tag", apiKey.EntityTag); err != nil { + return fmt.Errorf("Error setting entity_tag: %s", err) + } + if err = d.Set("crn", apiKey.CRN); err != nil { + return fmt.Errorf("Error setting crn: %s", err) + } + if err = d.Set("locked", apiKey.Locked); err != nil { + return fmt.Errorf("Error setting locked: %s", err) + } + if err = d.Set("created_at", apiKey.CreatedAt.String()); err != nil { + return fmt.Errorf("Error setting created_at: %s", err) + } + if err = d.Set("created_by", apiKey.CreatedBy); err != nil { + return fmt.Errorf("Error setting created_by: %s", err) + } + if err = d.Set("modified_at", apiKey.ModifiedAt.String()); err != nil { + return fmt.Errorf("Error setting modified_at: %s", err) + } + if err = d.Set("name", apiKey.Name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + if err = d.Set("description", apiKey.Description); err != nil { + return fmt.Errorf("Error setting description: %s", err) + } + if err = d.Set("iam_id", apiKey.IamID); err != nil { + return fmt.Errorf("Error setting iam_id: %s", err) + } + if err = d.Set("account_id", apiKey.AccountID); err != nil { + return fmt.Errorf("Error setting account_id: %s", err) + } + if err = d.Set("apikey", apiKey.Apikey); err != nil { + return fmt.Errorf("Error setting apikey: %s", err) + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_iam_auth_token.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_iam_auth_token.go new file mode 100644 index 00000000000..a16ae7a2fd9 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_iam_auth_token.go @@ -0,0 +1,57 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMIAMAuthToken() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMIAMAuthTokenRead, + + Schema: map[string]*schema.Schema{ + + "iam_access_token": { + Type: schema.TypeString, + Computed: true, + }, + + "iam_refresh_token": { + Type: schema.TypeString, + Computed: true, + }, + "uaa_access_token": { + Type: schema.TypeString, + Computed: true, + }, + + "uaa_refresh_token": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceIBMIAMAuthTokenRead(d *schema.ResourceData, meta interface{}) error { + bmxSess, err := meta.(ClientSession).BluemixSession() + if err != nil { + return err + } + d.SetId(dataSourceIBMIAMAuthTokenID(d)) + + d.Set("iam_access_token", bmxSess.Config.IAMAccessToken) + d.Set("iam_refresh_token", bmxSess.Config.IAMRefreshToken) + d.Set("uaa_access_token", bmxSess.Config.UAAAccessToken) + d.Set("uaa_refresh_token", bmxSess.Config.UAARefreshToken) + + return nil +} + +func dataSourceIBMIAMAuthTokenID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_iam_role_actions.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_iam_role_actions.go new file mode 100644 index 00000000000..2024a520236 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_iam_role_actions.go @@ -0,0 +1,76 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "github.com/IBM/platform-services-go-sdk/iampolicymanagementv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func datasourceIBMIAMRoleAction() *schema.Resource { + return &schema.Resource{ + Read: datasourceIBMIAMRoleActionRead, + + Schema: map[string]*schema.Schema{ + "service": { + Type: schema.TypeString, + Required: true, + Description: "The Service Name", + ForceNew: true, + }, + "reader": { + Type: schema.TypeList, + Computed: true, + Description: "Reader action ids", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "manager": { + Type: schema.TypeList, + Computed: true, + Description: "manager action ids", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "reader_plus": { + Type: schema.TypeList, + Computed: true, + Description: "readerplus action ids", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "writer": { + Type: schema.TypeList, + Computed: true, + Description: "writer action ids", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } + +} + +func datasourceIBMIAMRoleActionRead(d *schema.ResourceData, meta interface{}) error { + iamPolicyManagementClient, err := meta.(ClientSession).IAMPolicyManagementV1API() + if err != nil { + return err + } + + serviceName := d.Get("service").(string) + d.SetId(serviceName) + + listRoleOptions := &iampolicymanagementv1.ListRolesOptions{ + ServiceName: &serviceName, + } + + roleList, _, err := iamPolicyManagementClient.ListRoles(listRoleOptions) + if err != nil { + return err + } + serviceRoles := roleList.ServiceRoles + + d.Set("reader", flattenActionbyDisplayName("Reader", serviceRoles)) + d.Set("manager", flattenActionbyDisplayName("Manager", serviceRoles)) + d.Set("reader_plus", flattenActionbyDisplayName("ReaderPlus", serviceRoles)) + d.Set("writer", flattenActionbyDisplayName("Writer", serviceRoles)) + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_iam_roles.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_iam_roles.go new file mode 100644 index 00000000000..e154fc82736 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_iam_roles.go @@ -0,0 +1,87 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "github.com/IBM/platform-services-go-sdk/iampolicymanagementv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func datasourceIBMIAMRole() *schema.Resource { + return &schema.Resource{ + Read: datasourceIBMIAMRoleRead, + + Schema: map[string]*schema.Schema{ + "service": { + Type: schema.TypeString, + Optional: true, + Description: "The Service Name", + ForceNew: true, + }, + "roles": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + } + +} + +func datasourceIBMIAMRoleRead(d *schema.ResourceData, meta interface{}) error { + iamPolicyManagementClient, err := meta.(ClientSession).IAMPolicyManagementV1API() + if err != nil { + return err + } + + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + + var serviceName string + var customRoles []iampolicymanagementv1.CustomRole + var serviceRoles, systemRoles []iampolicymanagementv1.Role + + listRoleOptions := &iampolicymanagementv1.ListRolesOptions{ + AccountID: &userDetails.userAccount, + } + + if service, ok := d.GetOk("service"); ok { + serviceName = service.(string) + listRoleOptions.ServiceName = &serviceName + } + roleList, _, err := iamPolicyManagementClient.ListRoles(listRoleOptions) + if err != nil { + return err + } + customRoles = roleList.CustomRoles + serviceRoles = roleList.ServiceRoles + systemRoles = roleList.SystemRoles + + d.SetId(userDetails.userAccount) + + var roles []map[string]string + + roles = append(flattenRoleData(systemRoles, "platform"), append(flattenRoleData(serviceRoles, "service"), flattenCustomRoleData(customRoles, "custom")...)...) + + d.Set("roles", roles) + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_iam_service_id.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_iam_service_id.go new file mode 100644 index 00000000000..83d05f744ff --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_iam_service_id.go @@ -0,0 +1,118 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + + "github.com/IBM-Cloud/bluemix-go/crn" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMIAMServiceID() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMIAMServiceIDRead, + + Schema: map[string]*schema.Schema{ + "name": { + Description: "Name of the serviceID", + Type: schema.TypeString, + Required: true, + }, + + "service_ids": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + + "bound_to": { + Description: "bound to of the serviceID", + Type: schema.TypeString, + Computed: true, + }, + + "crn": { + Description: "CRN of the serviceID", + Type: schema.TypeString, + Computed: true, + }, + + "description": { + Description: "description of the serviceID", + Type: schema.TypeString, + Computed: true, + }, + + "version": { + Description: "Version of the serviceID", + Type: schema.TypeString, + Computed: true, + }, + + "locked": { + Description: "lock state of the serviceID", + Type: schema.TypeBool, + Computed: true, + }, + + "iam_id": { + Description: "The IAM ID of the serviceID", + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMIAMServiceIDRead(d *schema.ResourceData, meta interface{}) error { + iamClient, err := meta.(ClientSession).IAMAPI() + if err != nil { + return err + } + name := d.Get("name").(string) + + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + + boundTo := crn.New(userDetails.cloudName, userDetails.cloudType) + boundTo.ScopeType = crn.ScopeAccount + boundTo.Scope = userDetails.userAccount + + serviceIDS, err := iamClient.ServiceIds().FindByName(boundTo.String(), name) + if err != nil { + return err + } + + if len(serviceIDS) == 0 { + return fmt.Errorf("No serviceID found with name [%s]", name) + + } + + serviceIDListMap := make([]map[string]interface{}, 0, len(serviceIDS)) + for _, serviceID := range serviceIDS { + l := map[string]interface{}{ + "id": serviceID.UUID, + "bound_to": serviceID.BoundTo, + "version": serviceID.Version, + "description": serviceID.Description, + "crn": serviceID.CRN, + "locked": serviceID.Locked, + "iam_id": serviceID.IAMID, + } + serviceIDListMap = append(serviceIDListMap, l) + } + d.SetId(name) + d.Set("service_ids", serviceIDListMap) + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_iam_service_policy.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_iam_service_policy.go new file mode 100644 index 00000000000..54af71b2dae --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_iam_service_policy.go @@ -0,0 +1,175 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/IBM/go-sdk-core/v5/core" + "github.com/IBM/platform-services-go-sdk/iampolicymanagementv1" +) + +// Data source to find all the policies for a serviceID +func dataSourceIBMIAMServicePolicy() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMIAMServicePolicyRead, + + Schema: map[string]*schema.Schema{ + "iam_service_id": { + Type: schema.TypeString, + Optional: true, + ExactlyOneOf: []string{"iam_service_id", "iam_id"}, + Description: "UUID of ServiceID", + }, + "iam_id": { + Type: schema.TypeString, + Optional: true, + ExactlyOneOf: []string{"iam_service_id", "iam_id"}, + Description: "IAM ID of ServiceID", + }, + "sort": { + Description: "Sort query for policies", + Type: schema.TypeString, + Optional: true, + }, + "policies": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "roles": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "Role names of the policy definition", + }, + "resources": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "service": { + Type: schema.TypeString, + Computed: true, + Description: "Service name of the policy definition", + }, + "resource_instance_id": { + Type: schema.TypeString, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "ID of resource instance of the policy definition", + }, + "region": { + Type: schema.TypeString, + Computed: true, + Description: "Region of the policy definition", + }, + "resource_type": { + Type: schema.TypeString, + Computed: true, + Description: "Resource type of the policy definition", + }, + "resource": { + Type: schema.TypeString, + Computed: true, + Description: "Resource of the policy definition", + }, + "resource_group_id": { + Type: schema.TypeString, + Computed: true, + Description: "ID of the resource group.", + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMIAMServicePolicyRead(d *schema.ResourceData, meta interface{}) error { + + var iamID string + if v, ok := d.GetOk("iam_service_id"); ok && v != nil { + + serviceIDUUID := v.(string) + iamClient, err := meta.(ClientSession).IAMAPI() + if err != nil { + return err + } + serviceID, err := iamClient.ServiceIds().Get(serviceIDUUID) + if err != nil { + return err + } + iamID = serviceID.IAMID + } + if v, ok := d.GetOk("iam_id"); ok && v != nil { + iamID = v.(string) + } + + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + + iamPolicyManagementClient, err := meta.(ClientSession).IAMPolicyManagementV1API() + if err != nil { + return err + } + + listPoliciesOptions := &iampolicymanagementv1.ListPoliciesOptions{ + AccountID: core.StringPtr(userDetails.userAccount), + IamID: core.StringPtr(iamID), + Type: core.StringPtr("access"), + } + + if v, ok := d.GetOk("sort"); ok { + listPoliciesOptions.Sort = core.StringPtr(v.(string)) + } + + policyList, _, err := iamPolicyManagementClient.ListPolicies(listPoliciesOptions) + policies := policyList.Policies + if err != nil { + return err + } + + servicePolicies := make([]map[string]interface{}, 0, len(policies)) + for _, policy := range policies { + roles := make([]string, len(policy.Roles)) + for i, role := range policy.Roles { + roles[i] = *role.DisplayName + } + resources := flattenPolicyResource(policy.Resources) + p := map[string]interface{}{ + "roles": roles, + "resources": resources, + } + if v, ok := d.GetOk("iam_service_id"); ok && v != nil { + serviceIDUUID := v.(string) + p["id"] = fmt.Sprintf("%s/%s", serviceIDUUID, *policy.ID) + } else if v, ok := d.GetOk("iam_id"); ok && v != nil { + iamID := v.(string) + p["id"] = fmt.Sprintf("%s/%s", iamID, *policy.ID) + } + servicePolicies = append(servicePolicies, p) + } + + if v, ok := d.GetOk("iam_service_id"); ok && v != nil { + serviceIDUUID := v.(string) + d.SetId(serviceIDUUID) + } else if v, ok := d.GetOk("iam_id"); ok && v != nil { + iamID := v.(string) + d.SetId(iamID) + } + d.Set("policies", servicePolicies) + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_iam_user_policy.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_iam_user_policy.go new file mode 100644 index 00000000000..91bd6f15de0 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_iam_user_policy.go @@ -0,0 +1,149 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + + "github.com/IBM/go-sdk-core/v5/core" + "github.com/IBM/platform-services-go-sdk/iampolicymanagementv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +// Data source to find all the policies for a user in a particular account +func dataSourceIBMIAMUserPolicy() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMIAMUserPolicyRead, + + Schema: map[string]*schema.Schema{ + "ibm_id": { + Description: "The ibm id or email of user", + Type: schema.TypeString, + Required: true, + }, + "sort": { + Description: "Sort query for policies", + Type: schema.TypeString, + Optional: true, + }, + "policies": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "roles": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "Role names of the policy definition", + }, + "resources": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "service": { + Type: schema.TypeString, + Computed: true, + Description: "Service name of the policy definition", + }, + "resource_instance_id": { + Type: schema.TypeString, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "ID of resource instance of the policy definition", + }, + "region": { + Type: schema.TypeString, + Computed: true, + Description: "Region of the policy definition", + }, + "resource_type": { + Type: schema.TypeString, + Computed: true, + Description: "Resource type of the policy definition", + }, + "resource": { + Type: schema.TypeString, + Computed: true, + Description: "Resource of the policy definition", + }, + "resource_group_id": { + Type: schema.TypeString, + Computed: true, + Description: "ID of the resource group.", + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMIAMUserPolicyRead(d *schema.ResourceData, meta interface{}) error { + iamPolicyManagementClient, err := meta.(ClientSession).IAMPolicyManagementV1API() + if err != nil { + return err + } + + userEmail := d.Get("ibm_id").(string) + + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + + accountID := userDetails.userAccount + + ibmUniqueID, err := getIBMUniqueId(accountID, userEmail, meta) + if err != nil { + return err + } + + listPoliciesOptions := &iampolicymanagementv1.ListPoliciesOptions{ + AccountID: core.StringPtr(accountID), + IamID: core.StringPtr(ibmUniqueID), + Type: core.StringPtr("access"), + } + + if v, ok := d.GetOk("sort"); ok { + listPoliciesOptions.Sort = core.StringPtr(v.(string)) + } + + policyList, _, err := iamPolicyManagementClient.ListPolicies(listPoliciesOptions) + policies := policyList.Policies + if err != nil { + return err + } + + if err != nil { + return err + } + + userPolicies := make([]map[string]interface{}, 0, len(policies)) + for _, policy := range policies { + roles := make([]string, len(policy.Roles)) + for i, role := range policy.Roles { + roles[i] = *role.DisplayName + } + resources := flattenPolicyResource(policy.Resources) + p := map[string]interface{}{ + "id": fmt.Sprintf("%s/%s", userEmail, *policy.ID), + "roles": roles, + "resources": resources, + } + userPolicies = append(userPolicies, p) + } + d.SetId(userEmail) + d.Set("policies", userPolicies) + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_iam_user_profile.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_iam_user_profile.go new file mode 100644 index 00000000000..7a1a4114a7a --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_iam_user_profile.go @@ -0,0 +1,125 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMIAMUserProfile() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMIAMUserProfileRead, + + Schema: map[string]*schema.Schema{ + + "iam_id": { + Description: "User's IAM ID or or email of user", + Type: schema.TypeString, + Required: true, + }, + + "allowed_ip_addresses": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "List of allowed IPv4 or IPv6 addresses ", + }, + + "user_id": { + Type: schema.TypeString, + Computed: true, + Description: "The user ID used for login. ", + }, + + "firstname": { + Type: schema.TypeString, + Computed: true, + Description: "The first name of the user. ", + }, + + "lastname": { + Type: schema.TypeString, + Computed: true, + Description: "The last name of the user. ", + }, + + "state": { + Type: schema.TypeString, + Computed: true, + Description: "The state of the user. Possible values are PROCESSING, PENDING, ACTIVE, DISABLED_CLASSIC_INFRASTRUCTURE, and VPN_ONLY. ", + }, + + "email": { + Type: schema.TypeString, + Computed: true, + Description: "The email of the user. ", + }, + + "phonenumber": { + Type: schema.TypeString, + Computed: true, + Description: "The phone for the user.", + }, + + "altphonenumber": { + Type: schema.TypeString, + Computed: true, + Description: "The alternative phone number of the user. ", + }, + + "account_id": { + Type: schema.TypeString, + Computed: true, + Description: "An alphanumeric value identifying the account ID. ", + }, + }, + } +} + +func dataSourceIBMIAMUserProfileRead(d *schema.ResourceData, meta interface{}) error { + userManagement, err := meta.(ClientSession).UserManagementAPI() + if err != nil { + return err + } + client := userManagement.UserInvite() + + userEmail := d.Get("iam_id").(string) + + accountID, err := getUserAccountID(d, meta) + if err != nil { + return err + } + + iamID, err := getIBMUniqueId(accountID, userEmail, meta) + if err != nil { + return err + } + + userInfo, error := client.GetUserProfile(accountID, iamID) + if error != nil { + return error + } + + d.Set("user_id", userInfo.UserID) + d.Set("firstname", userInfo.Firstname) + d.Set("lastname", userInfo.Lastname) + d.Set("state", userInfo.State) + d.Set("email", userInfo.Email) + d.Set("phonenumber", userInfo.Phonenumber) + d.Set("altphonenumber", userInfo.Altphonenumber) + d.Set("account_id", userInfo.AccountID) + + UserSettings, UserSettingError := client.GetUserSettings(accountID, iamID) + if UserSettingError != nil { + return UserSettingError + } + + iplist := strings.Split(UserSettings.AllowedIPAddresses, ",") + d.Set("allowed_ip_addresses", iplist) + d.SetId(userEmail) + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_iam_users.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_iam_users.go new file mode 100644 index 00000000000..49ca6b574f0 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_iam_users.go @@ -0,0 +1,138 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMIAMUsers() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMIAMUsersRead, + + Schema: map[string]*schema.Schema{ + + "users": { + Type: schema.TypeList, + Computed: true, + Description: "User's Profiles", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + + "iam_id": { + Type: schema.TypeString, + Computed: true, + Description: "User's IAM ID or or email of user", + }, + + "user_id": { + Type: schema.TypeString, + Computed: true, + Description: "The user ID used for login. ", + }, + + "realm": { + Type: schema.TypeString, + Computed: true, + Description: "The realm of the user. ", + }, + + "first_name": { + Type: schema.TypeString, + Computed: true, + Description: "The first name of the user. ", + }, + + "last_name": { + Type: schema.TypeString, + Computed: true, + Description: "The last name of the user. ", + }, + + "state": { + Type: schema.TypeString, + Computed: true, + Description: "The state of the user. Possible values are PROCESSING, PENDING, ACTIVE, DISABLED_CLASSIC_INFRASTRUCTURE, and VPN_ONLY. ", + }, + + "email": { + Type: schema.TypeString, + Computed: true, + Description: "The email of the user. ", + }, + + "phonenumber": { + Type: schema.TypeString, + Computed: true, + Description: "The phone for the user.", + }, + + "alt_phonenumber": { + Type: schema.TypeString, + Computed: true, + Description: "The alternative phone number of the user. ", + }, + + "account_id": { + Type: schema.TypeString, + Computed: true, + Description: "An alphanumeric value identifying the account ID. ", + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMIAMUsersRead(d *schema.ResourceData, meta interface{}) error { + userManagement, err := meta.(ClientSession).UserManagementAPI() + if err != nil { + return err + } + client := userManagement.UserInvite() + + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + + accountID := userDetails.userAccount + + if err != nil { + return err + } + + res, err := client.ListUsers(accountID) + if err != nil { + return err + } + + profileList := make([]interface{}, 0) + + for _, userInfo := range res { + if userInfo.State == "ACTIVE" { + + user := map[string]interface{}{ + "iam_id": userInfo.IamID, + "user_id": userInfo.UserID, + "realm": userInfo.Realm, + "first_name": userInfo.Firstname, + "last_name": userInfo.Lastname, + "state": userInfo.State, + "email": userInfo.Email, + "phonenumber": userInfo.Phonenumber, + "alt_phonenumber": userInfo.Altphonenumber, + "account_id": userInfo.AccountID, + } + + profileList = append(profileList, user) + } + } + + d.SetId(accountID) + d.Set("users", profileList) + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_dedicated_host.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_dedicated_host.go new file mode 100644 index 00000000000..f560973322e --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_dedicated_host.go @@ -0,0 +1,727 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/IBM/vpc-go-sdk/vpcv1" +) + +func dataSourceIbmIsDedicatedHost() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIbmIsDedicatedHostRead, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "The unique name of this dedicated host", + }, + "host_group": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "The unique identifier of the dedicated host group this dedicated host belongs to", + }, + "resource_group": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "The unique identifier of the resource group this dedicated host belongs to", + }, + "available_memory": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The amount of memory in gibibytes that is currently available for instances.", + }, + "available_vcpu": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The available VCPU for the dedicated host.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "architecture": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The VCPU architecture.", + }, + "count": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The number of VCPUs assigned.", + }, + }, + }, + }, + "created_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The date and time that the dedicated host was created.", + }, + "crn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The CRN for this dedicated host.", + }, + "disks": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Collection of the dedicated host's disks.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "available": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The remaining space left for instance placement in GB (gigabytes).", + }, + "created_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The date and time that the disk was created.", + }, + "href": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The URL for this disk.", + }, + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this disk.", + }, + "instance_disks": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Instance disks that are on this dedicated host disk.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "deleted": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "If present, this property indicates the referenced resource has been deleted and providessome supplementary information.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "more_info": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Link to documentation about deleted resources.", + }, + }, + }, + }, + "href": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The URL for this instance disk.", + }, + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this instance disk.", + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The user-defined name for this disk.", + }, + "resource_type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The resource type.", + }, + }, + }, + }, + "interface_type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The disk interface used for attaching the diskThe enumerated values for this property are expected to expand in the future. When processing this property, check for and log unknown values. Optionally halt processing and surface the error, or bypass the resource on which the unexpected property value was encountered.", + }, + "lifecycle_state": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The lifecycle state of this dedicated host disk.", + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The user-defined or system-provided name for this disk.", + }, + "provisionable": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "Indicates whether this dedicated host disk is available for instance disk creation.", + }, + "resource_type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The type of resource referenced.", + }, + "size": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The size of the disk in GB (gigabytes).", + }, + "supported_instance_interface_types": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The instance disk interfaces supported for this dedicated host disk.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "href": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The URL for this dedicated host.", + }, + "instance_placement_enabled": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "If set to true, instances can be placed on this dedicated host.", + }, + "instances": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Array of instances that are allocated to this dedicated host.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "crn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The CRN for this virtual server instance.", + }, + "deleted": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "If present, this property indicates the referenced resource has been deleted and providessome supplementary information.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "more_info": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Link to documentation about deleted resources.", + }, + }, + }, + }, + "href": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The URL for this virtual server instance.", + }, + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this virtual server instance.", + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The user-defined name for this virtual server instance (and default system hostname).", + }, + }, + }, + }, + "lifecycle_state": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The lifecycle state of the dedicated host resource.", + }, + "memory": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The total amount of memory in gibibytes for this host.", + }, + "profile": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The profile this dedicated host uses.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "href": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The URL for this dedicated host.", + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The globally unique name for this dedicated host profile.", + }, + }, + }, + }, + "provisionable": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "Indicates whether this dedicated host is available for instance creation.", + }, + "resource_type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The type of resource referenced.", + }, + "socket_count": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The total number of sockets for this host.", + }, + "state": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The administrative state of the dedicated host.The enumerated values for this property are expected to expand in the future. When processing this property, check for and log unknown values. Optionally halt processing and surface the error, or bypass the dedicated host on which the unexpected property value was encountered.", + }, + "supported_instance_profiles": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Array of instance profiles that can be used by instances placed on this dedicated host.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "href": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The URL for this virtual server instance profile.", + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The globally unique name for this virtual server instance profile.", + }, + }, + }, + }, + "vcpu": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The total VCPU of the dedicated host.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "architecture": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The VCPU architecture.", + }, + "count": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The number of VCPUs assigned.", + }, + }, + }, + }, + "zone": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The globally unique name of the zone this dedicated host resides in.", + }, + }, + } +} + +func dataSourceIbmIsDedicatedHostRead(d *schema.ResourceData, meta interface{}) error { + vpcClient, err := meta.(ClientSession).VpcV1API() + if err != nil { + return err + } + listDedicatedHostsOptions := &vpcv1.ListDedicatedHostsOptions{} + hostgroupid := d.Get("host_group").(string) + listDedicatedHostsOptions.DedicatedHostGroupID = &hostgroupid + if resgrpid, ok := d.GetOk("resource_group"); ok { + resgrpidstr := resgrpid.(string) + listDedicatedHostsOptions.ResourceGroupID = &resgrpidstr + } + dedicatedHostCollection, response, err := vpcClient.ListDedicatedHostsWithContext(context.TODO(), listDedicatedHostsOptions) + if err != nil { + log.Printf("[DEBUG] ListDedicatedHostsWithContext failed %s\n%s", err, response) + return err + } + name := d.Get("name").(string) + if len(dedicatedHostCollection.DedicatedHosts) != 0 { + dedicatedHost := vpcv1.DedicatedHost{} + for _, data := range dedicatedHostCollection.DedicatedHosts { + if *data.Name == name { + dedicatedHost = data + d.SetId(*dedicatedHost.ID) + + if err = d.Set("available_memory", dedicatedHost.AvailableMemory); err != nil { + return fmt.Errorf("Error setting available_memory: %s", err) + } + + if dedicatedHost.AvailableVcpu != nil { + err = d.Set("available_vcpu", dataSourceDedicatedHostFlattenAvailableVcpu(*dedicatedHost.AvailableVcpu)) + if err != nil { + return fmt.Errorf("Error setting available_vcpu %s", err) + } + } + if err = d.Set("created_at", dedicatedHost.CreatedAt.String()); err != nil { + return fmt.Errorf("Error setting created_at: %s", err) + } + if err = d.Set("crn", dedicatedHost.CRN); err != nil { + return fmt.Errorf("Error setting crn: %s", err) + } + if dedicatedHost.Disks != nil { + err = d.Set("disks", dataSourceDedicatedHostFlattenDisks(dedicatedHost.Disks)) + if err != nil { + return fmt.Errorf("Error setting disks %s", err) + } + } + if dedicatedHost.Group != nil { + err = d.Set("host_group", *dedicatedHost.Group.ID) + if err != nil { + return fmt.Errorf("Error setting group %s", err) + } + } + if err = d.Set("href", dedicatedHost.Href); err != nil { + return fmt.Errorf("Error setting href: %s", err) + } + if err = d.Set("instance_placement_enabled", dedicatedHost.InstancePlacementEnabled); err != nil { + return fmt.Errorf("Error setting instance_placement_enabled: %s", err) + } + + if dedicatedHost.Instances != nil { + err = d.Set("instances", dataSourceDedicatedHostFlattenInstances(dedicatedHost.Instances)) + if err != nil { + return fmt.Errorf("Error setting instances %s", err) + } + } + if err = d.Set("lifecycle_state", dedicatedHost.LifecycleState); err != nil { + return fmt.Errorf("Error setting lifecycle_state: %s", err) + } + if err = d.Set("memory", dedicatedHost.Memory); err != nil { + return fmt.Errorf("Error setting memory: %s", err) + } + if err = d.Set("name", dedicatedHost.Name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + + if dedicatedHost.Profile != nil { + err = d.Set("profile", dataSourceDedicatedHostFlattenProfile(*dedicatedHost.Profile)) + if err != nil { + return fmt.Errorf("Error setting profile %s", err) + } + } + if err = d.Set("provisionable", dedicatedHost.Provisionable); err != nil { + return fmt.Errorf("Error setting provisionable: %s", err) + } + + if dedicatedHost.ResourceGroup != nil { + err = d.Set("resource_group", *dedicatedHost.ResourceGroup.ID) + if err != nil { + return fmt.Errorf("Error setting resource_group %s", err) + } + } + if err = d.Set("resource_type", dedicatedHost.ResourceType); err != nil { + return fmt.Errorf("Error setting resource_type: %s", err) + } + if err = d.Set("socket_count", dedicatedHost.SocketCount); err != nil { + return fmt.Errorf("Error setting socket_count: %s", err) + } + if err = d.Set("state", dedicatedHost.State); err != nil { + return fmt.Errorf("Error setting state: %s", err) + } + + if dedicatedHost.SupportedInstanceProfiles != nil { + err = d.Set("supported_instance_profiles", dataSourceDedicatedHostFlattenSupportedInstanceProfiles(dedicatedHost.SupportedInstanceProfiles)) + if err != nil { + return fmt.Errorf("Error setting supported_instance_profiles %s", err) + } + } + + if dedicatedHost.Vcpu != nil { + err = d.Set("vcpu", dataSourceDedicatedHostFlattenVcpu(*dedicatedHost.Vcpu)) + if err != nil { + return fmt.Errorf("Error setting vcpu %s", err) + } + } + + if dedicatedHost.Zone != nil { + err = d.Set("zone", *dedicatedHost.Zone.Name) + if err != nil { + return fmt.Errorf("Error setting zone %s", err) + } + } + + return nil + } + } + } + return fmt.Errorf("No Dedicated Host found with name %s", name) +} + +// dataSourceIbmIsDedicatedHostID returns a reasonable ID for the list. +func dataSourceIbmIsDedicatedHostID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} + +func dataSourceDedicatedHostFlattenAvailableVcpu(result vpcv1.Vcpu) (finalList []map[string]interface{}) { + finalList = []map[string]interface{}{} + finalMap := dataSourceDedicatedHostAvailableVcpuToMap(result) + finalList = append(finalList, finalMap) + + return finalList +} + +func dataSourceDedicatedHostAvailableVcpuToMap(availableVcpuItem vpcv1.Vcpu) (availableVcpuMap map[string]interface{}) { + availableVcpuMap = map[string]interface{}{} + + if availableVcpuItem.Architecture != nil { + availableVcpuMap["architecture"] = availableVcpuItem.Architecture + } + if availableVcpuItem.Count != nil { + availableVcpuMap["count"] = availableVcpuItem.Count + } + + return availableVcpuMap +} + +func dataSourceDedicatedHostFlattenGroup(result vpcv1.DedicatedHostGroupReference) (finalList []map[string]interface{}) { + finalList = []map[string]interface{}{} + finalMap := dataSourceDedicatedHostGroupToMap(result) + finalList = append(finalList, finalMap) + + return finalList +} + +func dataSourceDedicatedHostGroupToMap(groupItem vpcv1.DedicatedHostGroupReference) (groupMap map[string]interface{}) { + groupMap = map[string]interface{}{} + + if groupItem.CRN != nil { + groupMap["crn"] = groupItem.CRN + } + if groupItem.Deleted != nil { + deletedList := []map[string]interface{}{} + deletedMap := dataSourceDedicatedHostGroupDeletedToMap(*groupItem.Deleted) + deletedList = append(deletedList, deletedMap) + groupMap["deleted"] = deletedList + } + if groupItem.Href != nil { + groupMap["href"] = groupItem.Href + } + if groupItem.ID != nil { + groupMap["id"] = groupItem.ID + } + if groupItem.Name != nil { + groupMap["name"] = groupItem.Name + } + if groupItem.ResourceType != nil { + groupMap["resource_type"] = groupItem.ResourceType + } + + return groupMap +} + +func dataSourceDedicatedHostGroupDeletedToMap(deletedItem vpcv1.DedicatedHostGroupReferenceDeleted) (deletedMap map[string]interface{}) { + deletedMap = map[string]interface{}{} + + if deletedItem.MoreInfo != nil { + deletedMap["more_info"] = deletedItem.MoreInfo + } + + return deletedMap +} + +func dataSourceDedicatedHostFlattenInstances(result []vpcv1.InstanceReference) (instances []map[string]interface{}) { + for _, instancesItem := range result { + instances = append(instances, dataSourceDedicatedHostInstancesToMap(instancesItem)) + } + + return instances +} + +func dataSourceDedicatedHostInstancesToMap(instancesItem vpcv1.InstanceReference) (instancesMap map[string]interface{}) { + instancesMap = map[string]interface{}{} + + if instancesItem.CRN != nil { + instancesMap["crn"] = instancesItem.CRN + } + if instancesItem.Deleted != nil { + deletedList := []map[string]interface{}{} + deletedMap := dataSourceDedicatedHostInstancesDeletedToMap(*instancesItem.Deleted) + deletedList = append(deletedList, deletedMap) + instancesMap["deleted"] = deletedList + } + if instancesItem.Href != nil { + instancesMap["href"] = instancesItem.Href + } + if instancesItem.ID != nil { + instancesMap["id"] = instancesItem.ID + } + if instancesItem.Name != nil { + instancesMap["name"] = instancesItem.Name + } + + return instancesMap +} + +func dataSourceDedicatedHostInstancesDeletedToMap(deletedItem vpcv1.InstanceReferenceDeleted) (deletedMap map[string]interface{}) { + deletedMap = map[string]interface{}{} + + if deletedItem.MoreInfo != nil { + deletedMap["more_info"] = deletedItem.MoreInfo + } + + return deletedMap +} + +func dataSourceDedicatedHostFlattenProfile(result vpcv1.DedicatedHostProfileReference) (finalList []map[string]interface{}) { + finalList = []map[string]interface{}{} + finalMap := dataSourceDedicatedHostProfileToMap(result) + finalList = append(finalList, finalMap) + + return finalList +} + +func dataSourceDedicatedHostProfileToMap(profileItem vpcv1.DedicatedHostProfileReference) (profileMap map[string]interface{}) { + profileMap = map[string]interface{}{} + + if profileItem.Href != nil { + profileMap["href"] = profileItem.Href + } + if profileItem.Name != nil { + profileMap["name"] = profileItem.Name + } + + return profileMap +} + +func dataSourceDedicatedHostFlattenSupportedInstanceProfiles(result []vpcv1.InstanceProfileReference) (supportedInstanceProfiles []map[string]interface{}) { + for _, supportedInstanceProfilesItem := range result { + supportedInstanceProfiles = append(supportedInstanceProfiles, dataSourceDedicatedHostSupportedInstanceProfilesToMap(supportedInstanceProfilesItem)) + } + + return supportedInstanceProfiles +} + +func dataSourceDedicatedHostSupportedInstanceProfilesToMap(supportedInstanceProfilesItem vpcv1.InstanceProfileReference) (supportedInstanceProfilesMap map[string]interface{}) { + supportedInstanceProfilesMap = map[string]interface{}{} + + if supportedInstanceProfilesItem.Href != nil { + supportedInstanceProfilesMap["href"] = supportedInstanceProfilesItem.Href + } + if supportedInstanceProfilesItem.Name != nil { + supportedInstanceProfilesMap["name"] = supportedInstanceProfilesItem.Name + } + + return supportedInstanceProfilesMap +} + +func dataSourceDedicatedHostFlattenVcpu(result vpcv1.Vcpu) (finalList []map[string]interface{}) { + finalList = []map[string]interface{}{} + finalMap := dataSourceDedicatedHostVcpuToMap(result) + finalList = append(finalList, finalMap) + + return finalList +} + +func dataSourceDedicatedHostVcpuToMap(vcpuItem vpcv1.Vcpu) (vcpuMap map[string]interface{}) { + vcpuMap = map[string]interface{}{} + + if vcpuItem.Architecture != nil { + vcpuMap["architecture"] = vcpuItem.Architecture + } + if vcpuItem.Count != nil { + vcpuMap["count"] = vcpuItem.Count + } + + return vcpuMap +} + +func dataSourceDedicatedHostFlattenDisks(result []vpcv1.DedicatedHostDisk) (disks []map[string]interface{}) { + for _, disksItem := range result { + disks = append(disks, dataSourceDedicatedHostDisksToMap(disksItem)) + } + + return disks +} + +func dataSourceDedicatedHostDisksToMap(disksItem vpcv1.DedicatedHostDisk) (disksMap map[string]interface{}) { + disksMap = map[string]interface{}{} + + if disksItem.Available != nil { + disksMap["available"] = disksItem.Available + } + if disksItem.CreatedAt != nil { + disksMap["created_at"] = disksItem.CreatedAt.String() + } + if disksItem.Href != nil { + disksMap["href"] = disksItem.Href + } + if disksItem.ID != nil { + disksMap["id"] = disksItem.ID + } + if disksItem.InstanceDisks != nil { + instanceDisksList := []map[string]interface{}{} + for _, instanceDisksItem := range disksItem.InstanceDisks { + instanceDisksList = append(instanceDisksList, dataSourceDedicatedHostDisksInstanceDisksToMap(instanceDisksItem)) + } + disksMap["instance_disks"] = instanceDisksList + } + if disksItem.InterfaceType != nil { + disksMap["interface_type"] = disksItem.InterfaceType + } + if disksItem.LifecycleState != nil { + disksMap["lifecycle_state"] = disksItem.LifecycleState + } + if disksItem.Name != nil { + disksMap["name"] = disksItem.Name + } + if disksItem.Provisionable != nil { + disksMap["provisionable"] = disksItem.Provisionable + } + if disksItem.ResourceType != nil { + disksMap["resource_type"] = disksItem.ResourceType + } + if disksItem.Size != nil { + disksMap["size"] = disksItem.Size + } + if disksItem.SupportedInstanceInterfaceTypes != nil { + disksMap["supported_instance_interface_types"] = disksItem.SupportedInstanceInterfaceTypes + } + + return disksMap +} + +func dataSourceDedicatedHostDisksInstanceDisksToMap(instanceDisksItem vpcv1.InstanceDiskReference) (instanceDisksMap map[string]interface{}) { + instanceDisksMap = map[string]interface{}{} + + if instanceDisksItem.Deleted != nil { + deletedList := []map[string]interface{}{} + deletedMap := dataSourceDedicatedHostInstanceDisksDeletedToMap(*instanceDisksItem.Deleted) + deletedList = append(deletedList, deletedMap) + instanceDisksMap["deleted"] = deletedList + } + if instanceDisksItem.Href != nil { + instanceDisksMap["href"] = instanceDisksItem.Href + } + if instanceDisksItem.ID != nil { + instanceDisksMap["id"] = instanceDisksItem.ID + } + if instanceDisksItem.Name != nil { + instanceDisksMap["name"] = instanceDisksItem.Name + } + if instanceDisksItem.ResourceType != nil { + instanceDisksMap["resource_type"] = instanceDisksItem.ResourceType + } + + return instanceDisksMap +} + +func dataSourceDedicatedHostInstanceDisksDeletedToMap(deletedItem vpcv1.InstanceDiskReferenceDeleted) (deletedMap map[string]interface{}) { + deletedMap = map[string]interface{}{} + + if deletedItem.MoreInfo != nil { + deletedMap["more_info"] = deletedItem.MoreInfo + } + + return deletedMap +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_dedicated_host_disk.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_dedicated_host_disk.go new file mode 100644 index 00000000000..3cc623e0a83 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_dedicated_host_disk.go @@ -0,0 +1,233 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "context" + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/IBM/vpc-go-sdk/vpcv1" +) + +func dataSourceIbmIsDedicatedHostDisk() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIbmIsDedicatedHostDiskRead, + + Schema: map[string]*schema.Schema{ + "dedicated_host": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "The dedicated host identifier.", + }, + "disk": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "The dedicated host disk identifier.", + }, + "available": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The remaining space left for instance placement in GB (gigabytes).", + }, + "created_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The date and time that the disk was created.", + }, + "href": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The URL for this disk.", + }, + "instance_disks": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Instance disks that are on this dedicated host disk.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "deleted": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "If present, this property indicates the referenced resource has been deleted and providessome supplementary information.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "more_info": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Link to documentation about deleted resources.", + }, + }, + }, + }, + "href": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The URL for this instance disk.", + }, + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this instance disk.", + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The user-defined name for this disk.", + }, + "resource_type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The resource type.", + }, + }, + }, + }, + "interface_type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The disk interface used for attaching the diskThe enumerated values for this property are expected to expand in the future. When processing this property, check for and log unknown values. Optionally halt processing and surface the error, or bypass the resource on which the unexpected property value was encountered.", + }, + "lifecycle_state": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The lifecycle state of this dedicated host disk.", + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The user-defined or system-provided name for this disk.", + }, + "provisionable": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "Indicates whether this dedicated host disk is available for instance disk creation.", + }, + "resource_type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The type of resource referenced.", + }, + "size": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The size of the disk in GB (gigabytes).", + }, + "supported_instance_interface_types": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The instance disk interfaces supported for this dedicated host disk.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + } +} + +func dataSourceIbmIsDedicatedHostDiskRead(d *schema.ResourceData, meta interface{}) error { + vpcClient, err := meta.(ClientSession).VpcV1API() + if err != nil { + return err + } + + getDedicatedHostDiskOptions := &vpcv1.GetDedicatedHostDiskOptions{} + + getDedicatedHostDiskOptions.SetDedicatedHostID(d.Get("dedicated_host").(string)) + getDedicatedHostDiskOptions.SetID(d.Get("disk").(string)) + + dedicatedHostDisk, response, err := vpcClient.GetDedicatedHostDiskWithContext(context.TODO(), getDedicatedHostDiskOptions) + if err != nil { + log.Printf("[DEBUG] GetDedicatedHostDiskWithContext failed %s\n%s", err, response) + return err + } + + d.SetId(*dedicatedHostDisk.ID) + if err = d.Set("available", dedicatedHostDisk.Available); err != nil { + return fmt.Errorf("Error setting available: %s", err) + } + if err = d.Set("created_at", dedicatedHostDisk.CreatedAt.String()); err != nil { + return fmt.Errorf("Error setting created_at: %s", err) + } + if err = d.Set("href", dedicatedHostDisk.Href); err != nil { + return fmt.Errorf("Error setting href: %s", err) + } + + if dedicatedHostDisk.InstanceDisks != nil { + err = d.Set("instance_disks", dataSourceDedicatedHostDiskFlattenInstanceDisks(dedicatedHostDisk.InstanceDisks)) + if err != nil { + return fmt.Errorf("Error setting instance_disks %s", err) + } + } + if err = d.Set("interface_type", dedicatedHostDisk.InterfaceType); err != nil { + return fmt.Errorf("Error setting interface_type: %s", err) + } + if dedicatedHostDisk.LifecycleState != nil { + if err = d.Set("lifecycle_state", dedicatedHostDisk.LifecycleState); err != nil { + return fmt.Errorf("Error setting lifecycle_state: %s", err) + } + } + if err = d.Set("name", dedicatedHostDisk.Name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + if err = d.Set("provisionable", dedicatedHostDisk.Provisionable); err != nil { + return fmt.Errorf("Error setting provisionable: %s", err) + } + if err = d.Set("resource_type", dedicatedHostDisk.ResourceType); err != nil { + return fmt.Errorf("Error setting resource_type: %s", err) + } + if err = d.Set("size", dedicatedHostDisk.Size); err != nil { + return fmt.Errorf("Error setting size: %s", err) + } + if err = d.Set("supported_instance_interface_types", dedicatedHostDisk.SupportedInstanceInterfaceTypes); err != nil { + return fmt.Errorf("Error setting supported_instance_interface_types: %s", err) + } + + return nil +} + +func dataSourceDedicatedHostDiskFlattenInstanceDisks(result []vpcv1.InstanceDiskReference) (instanceDisks []map[string]interface{}) { + for _, instanceDisksItem := range result { + instanceDisks = append(instanceDisks, dataSourceDedicatedHostDiskInstanceDisksToMap(instanceDisksItem)) + } + + return instanceDisks +} + +func dataSourceDedicatedHostDiskInstanceDisksToMap(instanceDisksItem vpcv1.InstanceDiskReference) (instanceDisksMap map[string]interface{}) { + instanceDisksMap = map[string]interface{}{} + + if instanceDisksItem.Deleted != nil { + deletedList := []map[string]interface{}{} + deletedMap := dataSourceDedicatedHostDiskInstanceDisksDeletedToMap(*instanceDisksItem.Deleted) + deletedList = append(deletedList, deletedMap) + instanceDisksMap["deleted"] = deletedList + } + if instanceDisksItem.Href != nil { + instanceDisksMap["href"] = instanceDisksItem.Href + } + if instanceDisksItem.ID != nil { + instanceDisksMap["id"] = instanceDisksItem.ID + } + if instanceDisksItem.Name != nil { + instanceDisksMap["name"] = instanceDisksItem.Name + } + if instanceDisksItem.ResourceType != nil { + instanceDisksMap["resource_type"] = instanceDisksItem.ResourceType + } + + return instanceDisksMap +} + +func dataSourceDedicatedHostDiskInstanceDisksDeletedToMap(deletedItem vpcv1.InstanceDiskReferenceDeleted) (deletedMap map[string]interface{}) { + deletedMap = map[string]interface{}{} + + if deletedItem.MoreInfo != nil { + deletedMap["more_info"] = deletedItem.MoreInfo + } + + return deletedMap +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_dedicated_host_disks.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_dedicated_host_disks.go new file mode 100644 index 00000000000..c84a85640f7 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_dedicated_host_disks.go @@ -0,0 +1,262 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/IBM/vpc-go-sdk/vpcv1" +) + +func dataSourceIbmIsDedicatedHostDisks() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIbmIsDedicatedHostDisksRead, + + Schema: map[string]*schema.Schema{ + "dedicated_host": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "The dedicated host identifier.", + }, + "disks": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Collection of the dedicated host's disks.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "available": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The remaining space left for instance placement in GB (gigabytes).", + }, + "created_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The date and time that the disk was created.", + }, + "href": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The URL for this disk.", + }, + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this disk.", + }, + "instance_disks": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Instance disks that are on this dedicated host disk.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "deleted": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "If present, this property indicates the referenced resource has been deleted and providessome supplementary information.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "more_info": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Link to documentation about deleted resources.", + }, + }, + }, + }, + "href": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The URL for this instance disk.", + }, + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this instance disk.", + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The user-defined name for this disk.", + }, + "resource_type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The resource type.", + }, + }, + }, + }, + "interface_type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The disk interface used for attaching the diskThe enumerated values for this property are expected to expand in the future. When processing this property, check for and log unknown values. Optionally halt processing and surface the error, or bypass the resource on which the unexpected property value was encountered.", + }, + "lifecycle_state": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The lifecycle state of this dedicated host disk.", + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The user-defined or system-provided name for this disk.", + }, + "provisionable": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "Indicates whether this dedicated host disk is available for instance disk creation.", + }, + "resource_type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The type of resource referenced.", + }, + "size": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The size of the disk in GB (gigabytes).", + }, + "supported_instance_interface_types": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The instance disk interfaces supported for this dedicated host disk.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + }, + } +} + +func dataSourceIbmIsDedicatedHostDisksRead(d *schema.ResourceData, meta interface{}) error { + vpcClient, err := meta.(ClientSession).VpcV1API() + if err != nil { + return err + } + + listDedicatedHostDisksOptions := &vpcv1.ListDedicatedHostDisksOptions{} + + listDedicatedHostDisksOptions.SetDedicatedHostID(d.Get("dedicated_host").(string)) + + dedicatedHostDiskCollection, response, err := vpcClient.ListDedicatedHostDisksWithContext(context.TODO(), listDedicatedHostDisksOptions) + if err != nil { + log.Printf("[DEBUG] ListDedicatedHostDisksWithContext failed %s\n%s", err, response) + return err + } + + d.SetId(dataSourceIbmIsDedicatedHostDisksID(d)) + + if dedicatedHostDiskCollection.Disks != nil { + err = d.Set("disks", dataSourceDedicatedHostDiskCollectionFlattenDisks(dedicatedHostDiskCollection.Disks)) + if err != nil { + return fmt.Errorf("Error setting disks %s", err) + } + } + + return nil +} + +// dataSourceIbmIsDedicatedHostDisksID returns a reasonable ID for the list. +func dataSourceIbmIsDedicatedHostDisksID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} + +func dataSourceDedicatedHostDiskCollectionFlattenDisks(result []vpcv1.DedicatedHostDisk) (disks []map[string]interface{}) { + for _, disksItem := range result { + disks = append(disks, dataSourceDedicatedHostDiskCollectionDisksToMap(disksItem)) + } + + return disks +} + +func dataSourceDedicatedHostDiskCollectionDisksToMap(disksItem vpcv1.DedicatedHostDisk) (disksMap map[string]interface{}) { + disksMap = map[string]interface{}{} + + if disksItem.Available != nil { + disksMap["available"] = disksItem.Available + } + if disksItem.CreatedAt != nil { + disksMap["created_at"] = disksItem.CreatedAt.String() + } + if disksItem.Href != nil { + disksMap["href"] = disksItem.Href + } + if disksItem.ID != nil { + disksMap["id"] = disksItem.ID + } + if disksItem.InstanceDisks != nil { + instanceDisksList := []map[string]interface{}{} + for _, instanceDisksItem := range disksItem.InstanceDisks { + instanceDisksList = append(instanceDisksList, dataSourceDedicatedHostDiskCollectionDisksInstanceDisksToMap(instanceDisksItem)) + } + disksMap["instance_disks"] = instanceDisksList + } + if disksItem.InterfaceType != nil { + disksMap["interface_type"] = disksItem.InterfaceType + } + if disksItem.LifecycleState != nil { + disksMap["lifecycle_state"] = disksItem.LifecycleState + } + if disksItem.Name != nil { + disksMap["name"] = disksItem.Name + } + if disksItem.Provisionable != nil { + disksMap["provisionable"] = disksItem.Provisionable + } + if disksItem.ResourceType != nil { + disksMap["resource_type"] = disksItem.ResourceType + } + if disksItem.Size != nil { + disksMap["size"] = disksItem.Size + } + if disksItem.SupportedInstanceInterfaceTypes != nil { + disksMap["supported_instance_interface_types"] = disksItem.SupportedInstanceInterfaceTypes + } + + return disksMap +} + +func dataSourceDedicatedHostDiskCollectionDisksInstanceDisksToMap(instanceDisksItem vpcv1.InstanceDiskReference) (instanceDisksMap map[string]interface{}) { + instanceDisksMap = map[string]interface{}{} + + if instanceDisksItem.Deleted != nil { + deletedList := []map[string]interface{}{} + deletedMap := dataSourceDedicatedHostDiskCollectionInstanceDisksDeletedToMap(*instanceDisksItem.Deleted) + deletedList = append(deletedList, deletedMap) + instanceDisksMap["deleted"] = deletedList + } + if instanceDisksItem.Href != nil { + instanceDisksMap["href"] = instanceDisksItem.Href + } + if instanceDisksItem.ID != nil { + instanceDisksMap["id"] = instanceDisksItem.ID + } + if instanceDisksItem.Name != nil { + instanceDisksMap["name"] = instanceDisksItem.Name + } + if instanceDisksItem.ResourceType != nil { + instanceDisksMap["resource_type"] = instanceDisksItem.ResourceType + } + + return instanceDisksMap +} + +func dataSourceDedicatedHostDiskCollectionInstanceDisksDeletedToMap(deletedItem vpcv1.InstanceDiskReferenceDeleted) (deletedMap map[string]interface{}) { + deletedMap = map[string]interface{}{} + + if deletedItem.MoreInfo != nil { + deletedMap["more_info"] = deletedItem.MoreInfo + } + + return deletedMap +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_dedicated_host_group.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_dedicated_host_group.go new file mode 100644 index 00000000000..8faf702e91b --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_dedicated_host_group.go @@ -0,0 +1,281 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "context" + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/IBM/vpc-go-sdk/vpcv1" +) + +func dataSourceIbmIsDedicatedHostGroup() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIbmIsDedicatedHostGroupRead, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "The unique user-defined name for this dedicated host. If unspecified, the name will be a hyphenated list of randomly-selected words.", + }, + "class": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The dedicated host profile class for hosts in this group.", + }, + "created_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The date and time that the dedicated host group was created.", + }, + "crn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The CRN for this dedicated host group.", + }, + "dedicated_hosts": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The dedicated hosts that are in this dedicated host group.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "crn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The CRN for this dedicated host.", + }, + "deleted": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "If present, this property indicates the referenced resource has been deleted and providessome supplementary information.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "more_info": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Link to documentation about deleted resources.", + }, + }, + }, + }, + "href": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The URL for this dedicated host.", + }, + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this dedicated host.", + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The unique user-defined name for this dedicated host. If unspecified, the name will be a hyphenated list of randomly-selected words.", + }, + "resource_type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The type of resource referenced.", + }, + }, + }, + }, + "family": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The dedicated host profile family for hosts in this group.", + }, + "href": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The URL for this dedicated host group.", + }, + "resource_group": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier of the resource group for this dedicated host group.", + }, + "resource_type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The type of resource referenced.", + }, + "supported_instance_profiles": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Array of instance profiles that can be used by instances placed on this dedicated host group.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "href": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The URL for this virtual server instance profile.", + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The globally unique name for this virtual server instance profile.", + }, + }, + }, + }, + "zone": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The globally unique name of the zone this dedicated host group resides in.", + }, + }, + } +} + +func dataSourceIbmIsDedicatedHostGroupRead(d *schema.ResourceData, meta interface{}) error { + vpcClient, err := meta.(ClientSession).VpcV1API() + if err != nil { + return err + } + + listDedicatedHostGroupsOptions := &vpcv1.ListDedicatedHostGroupsOptions{} + + dedicatedHostGroupCollection, response, err := vpcClient.ListDedicatedHostGroupsWithContext(context.TODO(), listDedicatedHostGroupsOptions) + if err != nil { + log.Printf("[DEBUG] ListDedicatedHostGroupsWithContext failed %s\n%s", err, response) + return err + } + + name := d.Get("name").(string) + + if len(dedicatedHostGroupCollection.Groups) != 0 { + + dedicatedHostGroup := vpcv1.DedicatedHostGroup{} + for _, data := range dedicatedHostGroupCollection.Groups { + if *data.Name == name { + dedicatedHostGroup = data + d.SetId(*dedicatedHostGroup.ID) + if err = d.Set("class", dedicatedHostGroup.Class); err != nil { + return fmt.Errorf("Error setting class: %s", err) + } + if dedicatedHostGroup.CreatedAt != nil { + if err = d.Set("created_at", dedicatedHostGroup.CreatedAt.String()); err != nil { + return fmt.Errorf("Error setting created_at: %s", err) + } + } + + if err = d.Set("crn", dedicatedHostGroup.CRN); err != nil { + return fmt.Errorf("Error setting crn: %s", err) + } + + if dedicatedHostGroup.DedicatedHosts != nil { + err = d.Set("dedicated_hosts", dataSourceDedicatedHostGroupFlattenDedicatedHosts(dedicatedHostGroup.DedicatedHosts)) + if err != nil { + return fmt.Errorf("Error setting dedicated_hosts %s", err) + } + } + if err = d.Set("family", dedicatedHostGroup.Family); err != nil { + return fmt.Errorf("Error setting family: %s", err) + } + if err = d.Set("href", dedicatedHostGroup.Href); err != nil { + return fmt.Errorf("Error setting href: %s", err) + } + + if dedicatedHostGroup.ResourceGroup != nil { + err = d.Set("resource_group", *dedicatedHostGroup.ResourceGroup.ID) + if err != nil { + return fmt.Errorf("Error setting resource_group %s", err) + } + } + if err = d.Set("resource_type", dedicatedHostGroup.ResourceType); err != nil { + return fmt.Errorf("Error setting resource_type: %s", err) + } + + if dedicatedHostGroup.SupportedInstanceProfiles != nil { + err = d.Set("supported_instance_profiles", dataSourceDedicatedHostGroupFlattenSupportedInstanceProfiles(dedicatedHostGroup.SupportedInstanceProfiles)) + if err != nil { + return fmt.Errorf("Error setting supported_instance_profiles %s", err) + } + } + + if dedicatedHostGroup.Zone != nil { + err = d.Set("zone", *dedicatedHostGroup.Zone.Name) + if err != nil { + return fmt.Errorf("Error setting zone %s", err) + } + } + return nil + } + } + } + return fmt.Errorf("No Dedicated Host Group found with name %s", name) +} + +func dataSourceDedicatedHostGroupFlattenDedicatedHosts(result []vpcv1.DedicatedHostReference) (dedicatedHosts []map[string]interface{}) { + for _, dedicatedHostsItem := range result { + dedicatedHosts = append(dedicatedHosts, dataSourceDedicatedHostGroupDedicatedHostsToMap(dedicatedHostsItem)) + } + + return dedicatedHosts +} + +func dataSourceDedicatedHostGroupDedicatedHostsToMap(dedicatedHostsItem vpcv1.DedicatedHostReference) (dedicatedHostsMap map[string]interface{}) { + dedicatedHostsMap = map[string]interface{}{} + + if dedicatedHostsItem.CRN != nil { + dedicatedHostsMap["crn"] = dedicatedHostsItem.CRN + } + if dedicatedHostsItem.Deleted != nil { + deletedList := []map[string]interface{}{} + deletedMap := dataSourceDedicatedHostGroupDedicatedHostsDeletedToMap(*dedicatedHostsItem.Deleted) + deletedList = append(deletedList, deletedMap) + dedicatedHostsMap["deleted"] = deletedList + } + if dedicatedHostsItem.Href != nil { + dedicatedHostsMap["href"] = dedicatedHostsItem.Href + } + if dedicatedHostsItem.ID != nil { + dedicatedHostsMap["id"] = dedicatedHostsItem.ID + } + if dedicatedHostsItem.Name != nil { + dedicatedHostsMap["name"] = dedicatedHostsItem.Name + } + if dedicatedHostsItem.ResourceType != nil { + dedicatedHostsMap["resource_type"] = dedicatedHostsItem.ResourceType + } + + return dedicatedHostsMap +} + +func dataSourceDedicatedHostGroupDedicatedHostsDeletedToMap(deletedItem vpcv1.DedicatedHostReferenceDeleted) (deletedMap map[string]interface{}) { + deletedMap = map[string]interface{}{} + + if deletedItem.MoreInfo != nil { + deletedMap["more_info"] = deletedItem.MoreInfo + } + + return deletedMap +} + +func dataSourceDedicatedHostGroupFlattenSupportedInstanceProfiles(result []vpcv1.InstanceProfileReference) (supportedInstanceProfiles []map[string]interface{}) { + for _, supportedInstanceProfilesItem := range result { + supportedInstanceProfiles = append(supportedInstanceProfiles, dataSourceDedicatedHostGroupSupportedInstanceProfilesToMap(supportedInstanceProfilesItem)) + } + + return supportedInstanceProfiles +} + +func dataSourceDedicatedHostGroupSupportedInstanceProfilesToMap(supportedInstanceProfilesItem vpcv1.InstanceProfileReference) (supportedInstanceProfilesMap map[string]interface{}) { + supportedInstanceProfilesMap = map[string]interface{}{} + + if supportedInstanceProfilesItem.Href != nil { + supportedInstanceProfilesMap["href"] = supportedInstanceProfilesItem.Href + } + if supportedInstanceProfilesItem.Name != nil { + supportedInstanceProfilesMap["name"] = supportedInstanceProfilesItem.Name + } + + return supportedInstanceProfilesMap +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_dedicated_host_groups.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_dedicated_host_groups.go new file mode 100644 index 00000000000..5a201d9e3dc --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_dedicated_host_groups.go @@ -0,0 +1,387 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/IBM/vpc-go-sdk/vpcv1" +) + +func dataSourceIbmIsDedicatedHostGroups() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIbmIsDedicatedHostGroupsRead, + + Schema: map[string]*schema.Schema{ + "first": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "A link to the first page of resources.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "href": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The URL for a page of resources.", + }, + }, + }, + }, + "limit": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The maximum number of resources that can be returned by the request.", + }, + "next": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "A link to the next page of resources. This property is present for all pagesexcept the last page.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "href": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The URL for a page of resources.", + }, + }, + }, + }, + "host_groups": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Collection of dedicated host groups.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "class": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The dedicated host profile class for hosts in this group.", + }, + "created_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The date and time that the dedicated host group was created.", + }, + "crn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The CRN for this dedicated host group.", + }, + "dedicated_hosts": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The dedicated hosts that are in this dedicated host group.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "crn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The CRN for this dedicated host.", + }, + "deleted": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "If present, this property indicates the referenced resource has been deleted and providessome supplementary information.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "more_info": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Link to documentation about deleted resources.", + }, + }, + }, + }, + "href": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The URL for this dedicated host.", + }, + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this dedicated host.", + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The unique user-defined name for this dedicated host. If unspecified, the name will be a hyphenated list of randomly-selected words.", + }, + "resource_type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The type of resource referenced.", + }, + }, + }, + }, + "family": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The dedicated host profile family for hosts in this group.", + }, + "href": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The URL for this dedicated host group.", + }, + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this dedicated host group.", + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The unique user-defined name for this dedicated host group. If unspecified, the name will be a hyphenated list of randomly-selected words.", + }, + "resource_group": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The resource group for this dedicated host group.", + }, + "resource_type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The type of resource referenced.", + }, + "supported_instance_profiles": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Array of instance profiles that can be used by instances placed on this dedicated host group.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "href": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The URL for this virtual server instance profile.", + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The globally unique name for this virtual server instance profile.", + }, + }, + }, + }, + "zone": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The globally unique name of the zone this dedicated host group resides in.", + }, + }, + }, + }, + "total_count": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The total number of resources across all pages.", + }, + }, + } +} + +func dataSourceIbmIsDedicatedHostGroupsRead(d *schema.ResourceData, meta interface{}) error { + vpcClient, err := meta.(ClientSession).VpcV1API() + if err != nil { + return err + } + listDedicatedHostGroupsOptions := &vpcv1.ListDedicatedHostGroupsOptions{} + + dedicatedHostGroupCollection, response, err := vpcClient.ListDedicatedHostGroupsWithContext(context.TODO(), listDedicatedHostGroupsOptions) + if err != nil { + log.Printf("[DEBUG] ListDedicatedHostGroupsWithContext failed %s\n%s", err, response) + return err + } + + if len(dedicatedHostGroupCollection.Groups) != 0 { + + d.SetId(dataSourceIbmIsDedicatedHostGroupsID(d)) + + if dedicatedHostGroupCollection.First != nil { + err = d.Set("first", dataSourceDedicatedHostGroupCollectionFlattenFirst(*dedicatedHostGroupCollection.First)) + if err != nil { + return fmt.Errorf("Error setting first %s", err) + } + } + + if dedicatedHostGroupCollection.Groups != nil { + err = d.Set("host_groups", dataSourceDedicatedHostGroupCollectionFlattenGroups(dedicatedHostGroupCollection.Groups)) + if err != nil { + return fmt.Errorf("Error setting groups %s", err) + } + } + if err = d.Set("limit", dedicatedHostGroupCollection.Limit); err != nil { + return fmt.Errorf("Error setting limit: %s", err) + } + + if dedicatedHostGroupCollection.Next != nil { + err = d.Set("next", dataSourceDedicatedHostGroupCollectionFlattenNext(*dedicatedHostGroupCollection.Next)) + if err != nil { + return fmt.Errorf("Error setting next %s", err) + } + } + if err = d.Set("total_count", dedicatedHostGroupCollection.TotalCount); err != nil { + return fmt.Errorf("Error setting total_count: %s", err) + } + + } + return nil +} + +// dataSourceIbmIsDedicatedHostGroupsID returns a reasonable ID for the list. +func dataSourceIbmIsDedicatedHostGroupsID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} + +func dataSourceDedicatedHostGroupCollectionFirstToMap(firstItem vpcv1.DedicatedHostGroupCollectionFirst) (firstMap map[string]interface{}) { + firstMap = map[string]interface{}{} + + if firstItem.Href != nil { + firstMap["href"] = firstItem.Href + } + + return firstMap +} + +func dataSourceDedicatedHostGroupCollectionFlattenGroups(result []vpcv1.DedicatedHostGroup) (groups []map[string]interface{}) { + for _, groupsItem := range result { + groups = append(groups, dataSourceDedicatedHostGroupCollectionGroupsToMap(groupsItem)) + } + + return groups +} + +func dataSourceDedicatedHostGroupCollectionGroupsToMap(groupsItem vpcv1.DedicatedHostGroup) (groupsMap map[string]interface{}) { + groupsMap = map[string]interface{}{} + + if groupsItem.Class != nil { + groupsMap["class"] = groupsItem.Class + } + if groupsItem.CreatedAt != nil { + groupsMap["created_at"] = groupsItem.CreatedAt.String() + } + if groupsItem.CRN != nil { + groupsMap["crn"] = groupsItem.CRN + } + if groupsItem.DedicatedHosts != nil { + dedicatedHostsList := []map[string]interface{}{} + for _, dedicatedHostsItem := range groupsItem.DedicatedHosts { + dedicatedHostsList = append(dedicatedHostsList, dataSourceDedicatedHostGroupCollectionGroupsDedicatedHostsToMap(dedicatedHostsItem)) + } + groupsMap["dedicated_hosts"] = dedicatedHostsList + } + if groupsItem.Family != nil { + groupsMap["family"] = groupsItem.Family + } + if groupsItem.Href != nil { + groupsMap["href"] = groupsItem.Href + } + if groupsItem.ID != nil { + groupsMap["id"] = groupsItem.ID + } + if groupsItem.Name != nil { + groupsMap["name"] = groupsItem.Name + } + if groupsItem.ResourceGroup != nil { + groupsMap["resource_group"] = *groupsItem.ResourceGroup.ID + } + if groupsItem.ResourceType != nil { + groupsMap["resource_type"] = groupsItem.ResourceType + } + if groupsItem.SupportedInstanceProfiles != nil { + supportedInstanceProfilesList := []map[string]interface{}{} + for _, supportedInstanceProfilesItem := range groupsItem.SupportedInstanceProfiles { + supportedInstanceProfilesList = append(supportedInstanceProfilesList, dataSourceDedicatedHostGroupCollectionGroupsSupportedInstanceProfilesToMap(supportedInstanceProfilesItem)) + } + groupsMap["supported_instance_profiles"] = supportedInstanceProfilesList + } + if groupsItem.Zone != nil { + groupsMap["zone"] = *groupsItem.Zone.Name + } + + return groupsMap +} + +func dataSourceDedicatedHostGroupCollectionDedicatedHostsDeletedToMap(deletedItem vpcv1.DedicatedHostReferenceDeleted) (deletedMap map[string]interface{}) { + deletedMap = map[string]interface{}{} + + if deletedItem.MoreInfo != nil { + deletedMap["more_info"] = deletedItem.MoreInfo + } + + return deletedMap +} + +func dataSourceDedicatedHostGroupCollectionGroupsDedicatedHostsToMap(dedicatedHostsItem vpcv1.DedicatedHostReference) (dedicatedHostsMap map[string]interface{}) { + dedicatedHostsMap = map[string]interface{}{} + + if dedicatedHostsItem.CRN != nil { + dedicatedHostsMap["crn"] = dedicatedHostsItem.CRN + } + if dedicatedHostsItem.Deleted != nil { + deletedList := []map[string]interface{}{} + deletedMap := dataSourceDedicatedHostGroupCollectionDedicatedHostsDeletedToMap(*dedicatedHostsItem.Deleted) + deletedList = append(deletedList, deletedMap) + dedicatedHostsMap["deleted"] = deletedList + } + if dedicatedHostsItem.Href != nil { + dedicatedHostsMap["href"] = dedicatedHostsItem.Href + } + if dedicatedHostsItem.ID != nil { + dedicatedHostsMap["id"] = dedicatedHostsItem.ID + } + if dedicatedHostsItem.Name != nil { + dedicatedHostsMap["name"] = dedicatedHostsItem.Name + } + if dedicatedHostsItem.ResourceType != nil { + dedicatedHostsMap["resource_type"] = dedicatedHostsItem.ResourceType + } + + return dedicatedHostsMap +} + +func dataSourceDedicatedHostGroupCollectionGroupsSupportedInstanceProfilesToMap(supportedInstanceProfilesItem vpcv1.InstanceProfileReference) (supportedInstanceProfilesMap map[string]interface{}) { + supportedInstanceProfilesMap = map[string]interface{}{} + + if supportedInstanceProfilesItem.Href != nil { + supportedInstanceProfilesMap["href"] = supportedInstanceProfilesItem.Href + } + if supportedInstanceProfilesItem.Name != nil { + supportedInstanceProfilesMap["name"] = supportedInstanceProfilesItem.Name + } + + return supportedInstanceProfilesMap +} + +func dataSourceDedicatedHostGroupCollectionFlattenFirst(result vpcv1.DedicatedHostGroupCollectionFirst) (finalList []map[string]interface{}) { + finalList = []map[string]interface{}{} + finalMap := dataSourceDedicatedHostGroupCollectionFirstToMap(result) + finalList = append(finalList, finalMap) + + return finalList +} + +func dataSourceDedicatedHostGroupCollectionFlattenNext(result vpcv1.DedicatedHostGroupCollectionNext) (finalList []map[string]interface{}) { + finalList = []map[string]interface{}{} + finalMap := dataSourceDedicatedHostGroupCollectionNextToMap(result) + finalList = append(finalList, finalMap) + + return finalList +} + +func dataSourceDedicatedHostGroupCollectionNextToMap(nextItem vpcv1.DedicatedHostGroupCollectionNext) (nextMap map[string]interface{}) { + nextMap = map[string]interface{}{} + + if nextItem.Href != nil { + nextMap["href"] = nextItem.Href + } + + return nextMap +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_dedicated_host_profile.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_dedicated_host_profile.go new file mode 100644 index 00000000000..bb798505be4 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_dedicated_host_profile.go @@ -0,0 +1,628 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/IBM/vpc-go-sdk/vpcv1" +) + +func dataSourceIbmIsDedicatedHostProfile() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIbmIsDedicatedHostProfileRead, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The globally unique name for this virtual server instance profile.", + }, + "class": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The product class this dedicated host profile belongs to.", + }, + "disks": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Collection of the dedicated host profile's disks.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "interface_type": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The type for this profile field.", + }, + "value": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The interface of the disk for a dedicated host with this profileThe enumerated values for this property are expected to expand in the future. When processing this property, check for and log unknown values. Optionally halt processing and surface the error, or bypass the resource on which the unexpected property value was encountered.", + }, + }, + }, + }, + "quantity": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The number of disks of this type for a dedicated host with this profile.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The type for this profile field.", + }, + "value": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The value for this profile field.", + }, + }, + }, + }, + "size": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The size of the disk in GB (gigabytes).", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The type for this profile field.", + }, + "value": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The size of the disk in GB (gigabytes).", + }, + }, + }, + }, + "supported_instance_interface_types": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The type for this profile field.", + }, + "value": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The instance disk interfaces supported for a dedicated host with this profile.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + }, + }, + }, + "family": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The product family this dedicated host profile belongs toThe enumerated values for this property are expected to expand in the future. When processing this property, check for and log unknown values. Optionally halt processing and surface the error, or bypass the resource on which the unexpected property value was encountered.", + }, + "href": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The URL for this dedicated host.", + }, + "memory": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The type for this profile field.", + }, + "value": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The value for this profile field.", + }, + "default": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The default value for this profile field.", + }, + "max": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The maximum value for this profile field.", + }, + "min": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The minimum value for this profile field.", + }, + "step": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The increment step value for this profile field.", + }, + "values": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The permitted values for this profile field.", + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + }, + }, + }, + "socket_count": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The type for this profile field.", + }, + "value": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The value for this profile field.", + }, + "default": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The default value for this profile field.", + }, + "max": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The maximum value for this profile field.", + }, + "min": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The minimum value for this profile field.", + }, + "step": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The increment step value for this profile field.", + }, + "values": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The permitted values for this profile field.", + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + }, + }, + }, + "supported_instance_profiles": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Array of instance profiles that can be used by instances placed on dedicated hosts with this profile.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "href": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The URL for this virtual server instance profile.", + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The globally unique name for this virtual server instance profile.", + }, + }, + }, + }, + "vcpu_architecture": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The type for this profile field.", + }, + "value": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The VCPU architecture for a dedicated host with this profile.", + }, + }, + }, + }, + "vcpu_count": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The type for this profile field.", + }, + "value": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The value for this profile field.", + }, + "default": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The default value for this profile field.", + }, + "max": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The maximum value for this profile field.", + }, + "min": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The minimum value for this profile field.", + }, + "step": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The increment step value for this profile field.", + }, + "values": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The permitted values for this profile field.", + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + }, + }, + }, + }, + } +} + +func dataSourceIbmIsDedicatedHostProfileRead(d *schema.ResourceData, meta interface{}) error { + vpcClient, err := meta.(ClientSession).VpcV1API() + if err != nil { + return err + } + + name := d.Get("name").(string) + getDedicatedHostProfileOptions := &vpcv1.GetDedicatedHostProfileOptions{ + Name: &name, + } + dedicatedHostProfile, response, err := vpcClient.GetDedicatedHostProfileWithContext(context.TODO(), getDedicatedHostProfileOptions) + if err != nil { + log.Printf("[DEBUG] ListDedicatedHostProfilesWithContext failed %s\n%s", err, response) + return err + } + if dedicatedHostProfile == nil { + return fmt.Errorf("No Dedicated Host Profile found with name %s", name) + } + d.SetId(dataSourceIbmIsDedicatedHostProfileID(d)) + + if err = d.Set("class", dedicatedHostProfile.Class); err != nil { + return fmt.Errorf("Error setting class: %s", err) + } + + if dedicatedHostProfile.Disks != nil { + err = d.Set("disks", dataSourceDedicatedHostProfileFlattenDisks(dedicatedHostProfile.Disks)) + if err != nil { + return fmt.Errorf("Error setting disks %s", err) + } + } + + if err = d.Set("family", dedicatedHostProfile.Family); err != nil { + return fmt.Errorf("Error setting family: %s", err) + } + if err = d.Set("href", dedicatedHostProfile.Href); err != nil { + return fmt.Errorf("Error setting href: %s", err) + } + + if dedicatedHostProfile.Memory != nil { + err = d.Set("memory", dataSourceDedicatedHostProfileFlattenMemory(*dedicatedHostProfile.Memory.(*vpcv1.DedicatedHostProfileMemory))) + if err != nil { + return fmt.Errorf("Error setting memory %s", err) + } + } + + if dedicatedHostProfile.SocketCount != nil { + err = d.Set("socket_count", dataSourceDedicatedHostProfileFlattenSocketCount(*dedicatedHostProfile.SocketCount.(*vpcv1.DedicatedHostProfileSocket))) + if err != nil { + return fmt.Errorf("Error setting socket_count %s", err) + } + } + + if dedicatedHostProfile.SupportedInstanceProfiles != nil { + err = d.Set("supported_instance_profiles", dataSourceDedicatedHostProfileFlattenSupportedInstanceProfiles(dedicatedHostProfile.SupportedInstanceProfiles)) + if err != nil { + return fmt.Errorf("Error setting supported_instance_profiles %s", err) + } + } + + if dedicatedHostProfile.VcpuArchitecture != nil { + err = d.Set("vcpu_architecture", dataSourceDedicatedHostProfileFlattenVcpuArchitecture(*dedicatedHostProfile.VcpuArchitecture)) + if err != nil { + return fmt.Errorf("Error setting vcpu_architecture %s", err) + } + } + + if dedicatedHostProfile.VcpuCount != nil { + err = d.Set("vcpu_count", dataSourceDedicatedHostProfileFlattenVcpuCount(*dedicatedHostProfile.VcpuCount.(*vpcv1.DedicatedHostProfileVcpu))) + if err != nil { + return fmt.Errorf("Error setting vcpu_count %s", err) + } + } + + return nil + +} + +// dataSourceIbmIsDedicatedHostProfileID returns a reasonable ID for the list. +func dataSourceIbmIsDedicatedHostProfileID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} + +func dataSourceDedicatedHostProfileFlattenMemory(result vpcv1.DedicatedHostProfileMemory) (finalList []map[string]interface{}) { + finalList = []map[string]interface{}{} + finalMap := dataSourceDedicatedHostProfileMemoryToMap(result) + finalList = append(finalList, finalMap) + + return finalList +} + +func dataSourceDedicatedHostProfileMemoryToMap(memoryItem vpcv1.DedicatedHostProfileMemory) (memoryMap map[string]interface{}) { + memoryMap = map[string]interface{}{} + + if memoryItem.Type != nil { + memoryMap["type"] = memoryItem.Type + } + if memoryItem.Value != nil { + memoryMap["value"] = memoryItem.Value + } + if memoryItem.Default != nil { + memoryMap["default"] = memoryItem.Default + } + if memoryItem.Max != nil { + memoryMap["max"] = memoryItem.Max + } + if memoryItem.Min != nil { + memoryMap["min"] = memoryItem.Min + } + if memoryItem.Step != nil { + memoryMap["step"] = memoryItem.Step + } + if memoryItem.Values != nil { + memoryMap["values"] = memoryItem.Values + } + + return memoryMap +} + +func dataSourceDedicatedHostProfileFlattenSocketCount(result vpcv1.DedicatedHostProfileSocket) (finalList []map[string]interface{}) { + finalList = []map[string]interface{}{} + finalMap := dataSourceDedicatedHostProfileSocketCountToMap(result) + finalList = append(finalList, finalMap) + + return finalList +} + +func dataSourceDedicatedHostProfileSocketCountToMap(socketCountItem vpcv1.DedicatedHostProfileSocket) (socketCountMap map[string]interface{}) { + socketCountMap = map[string]interface{}{} + + if socketCountItem.Type != nil { + socketCountMap["type"] = socketCountItem.Type + } + if socketCountItem.Value != nil { + socketCountMap["value"] = socketCountItem.Value + } + if socketCountItem.Default != nil { + socketCountMap["default"] = socketCountItem.Default + } + if socketCountItem.Max != nil { + socketCountMap["max"] = socketCountItem.Max + } + if socketCountItem.Min != nil { + socketCountMap["min"] = socketCountItem.Min + } + if socketCountItem.Step != nil { + socketCountMap["step"] = socketCountItem.Step + } + if socketCountItem.Values != nil { + socketCountMap["values"] = socketCountItem.Values + } + + return socketCountMap +} + +func dataSourceDedicatedHostProfileFlattenSupportedInstanceProfiles(result []vpcv1.InstanceProfileReference) (supportedInstanceProfiles []map[string]interface{}) { + for _, supportedInstanceProfilesItem := range result { + supportedInstanceProfiles = append(supportedInstanceProfiles, dataSourceDedicatedHostProfileSupportedInstanceProfilesToMap(supportedInstanceProfilesItem)) + } + + return supportedInstanceProfiles +} + +func dataSourceDedicatedHostProfileSupportedInstanceProfilesToMap(supportedInstanceProfilesItem vpcv1.InstanceProfileReference) (supportedInstanceProfilesMap map[string]interface{}) { + supportedInstanceProfilesMap = map[string]interface{}{} + + if supportedInstanceProfilesItem.Href != nil { + supportedInstanceProfilesMap["href"] = supportedInstanceProfilesItem.Href + } + if supportedInstanceProfilesItem.Name != nil { + supportedInstanceProfilesMap["name"] = supportedInstanceProfilesItem.Name + } + + return supportedInstanceProfilesMap +} + +func dataSourceDedicatedHostProfileFlattenVcpuArchitecture(result vpcv1.DedicatedHostProfileVcpuArchitecture) (finalList []map[string]interface{}) { + finalList = []map[string]interface{}{} + finalMap := dataSourceDedicatedHostProfileVcpuArchitectureToMap(result) + finalList = append(finalList, finalMap) + + return finalList +} + +func dataSourceDedicatedHostProfileVcpuArchitectureToMap(vcpuArchitectureItem vpcv1.DedicatedHostProfileVcpuArchitecture) (vcpuArchitectureMap map[string]interface{}) { + vcpuArchitectureMap = map[string]interface{}{} + + if vcpuArchitectureItem.Type != nil { + vcpuArchitectureMap["type"] = vcpuArchitectureItem.Type + } + if vcpuArchitectureItem.Value != nil { + vcpuArchitectureMap["value"] = vcpuArchitectureItem.Value + } + + return vcpuArchitectureMap +} + +func dataSourceDedicatedHostProfileFlattenVcpuCount(result vpcv1.DedicatedHostProfileVcpu) (finalList []map[string]interface{}) { + finalList = []map[string]interface{}{} + finalMap := dataSourceDedicatedHostProfileVcpuCountToMap(result) + finalList = append(finalList, finalMap) + + return finalList +} + +func dataSourceDedicatedHostProfileVcpuCountToMap(vcpuCountItem vpcv1.DedicatedHostProfileVcpu) (vcpuCountMap map[string]interface{}) { + vcpuCountMap = map[string]interface{}{} + + if vcpuCountItem.Type != nil { + vcpuCountMap["type"] = vcpuCountItem.Type + } + if vcpuCountItem.Value != nil { + vcpuCountMap["value"] = vcpuCountItem.Value + } + if vcpuCountItem.Default != nil { + vcpuCountMap["default"] = vcpuCountItem.Default + } + if vcpuCountItem.Max != nil { + vcpuCountMap["max"] = vcpuCountItem.Max + } + if vcpuCountItem.Min != nil { + vcpuCountMap["min"] = vcpuCountItem.Min + } + if vcpuCountItem.Step != nil { + vcpuCountMap["step"] = vcpuCountItem.Step + } + if vcpuCountItem.Values != nil { + vcpuCountMap["values"] = vcpuCountItem.Values + } + + return vcpuCountMap +} + +func dataSourceDedicatedHostProfileFlattenDisks(result []vpcv1.DedicatedHostProfileDisk) (disks []map[string]interface{}) { + for _, disksItem := range result { + disks = append(disks, dataSourceDedicatedHostProfileDisksToMap(disksItem)) + } + + return disks +} + +func dataSourceDedicatedHostProfileDisksToMap(disksItem vpcv1.DedicatedHostProfileDisk) (disksMap map[string]interface{}) { + disksMap = map[string]interface{}{} + + if disksItem.InterfaceType != nil { + interfaceTypeList := []map[string]interface{}{} + interfaceTypeMap := dataSourceDedicatedHostProfileDisksInterfaceTypeToMap(*disksItem.InterfaceType) + interfaceTypeList = append(interfaceTypeList, interfaceTypeMap) + disksMap["interface_type"] = interfaceTypeList + } + if disksItem.Quantity != nil { + quantityList := []map[string]interface{}{} + quantityMap := dataSourceDedicatedHostProfileDisksQuantityToMap(*disksItem.Quantity) + quantityList = append(quantityList, quantityMap) + disksMap["quantity"] = quantityList + } + if disksItem.Size != nil { + sizeList := []map[string]interface{}{} + sizeMap := dataSourceDedicatedHostProfileDisksSizeToMap(*disksItem.Size) + sizeList = append(sizeList, sizeMap) + disksMap["size"] = sizeList + } + if disksItem.SupportedInstanceInterfaceTypes != nil { + supportedInstanceInterfaceTypesList := []map[string]interface{}{} + supportedInstanceInterfaceTypesMap := dataSourceDedicatedHostProfileDisksSupportedInstanceInterfaceTypesToMap(*disksItem.SupportedInstanceInterfaceTypes) + supportedInstanceInterfaceTypesList = append(supportedInstanceInterfaceTypesList, supportedInstanceInterfaceTypesMap) + disksMap["supported_instance_interface_types"] = supportedInstanceInterfaceTypesList + } + + return disksMap +} + +func dataSourceDedicatedHostProfileDisksInterfaceTypeToMap(interfaceTypeItem vpcv1.DedicatedHostProfileDiskInterface) (interfaceTypeMap map[string]interface{}) { + interfaceTypeMap = map[string]interface{}{} + + if interfaceTypeItem.Type != nil { + interfaceTypeMap["type"] = interfaceTypeItem.Type + } + if interfaceTypeItem.Value != nil { + interfaceTypeMap["value"] = interfaceTypeItem.Value + } + + return interfaceTypeMap +} + +func dataSourceDedicatedHostProfileDisksQuantityToMap(quantityItem vpcv1.DedicatedHostProfileDiskQuantity) (quantityMap map[string]interface{}) { + quantityMap = map[string]interface{}{} + + if quantityItem.Type != nil { + quantityMap["type"] = quantityItem.Type + } + if quantityItem.Value != nil { + quantityMap["value"] = quantityItem.Value + } + + return quantityMap +} + +func dataSourceDedicatedHostProfileDisksSizeToMap(sizeItem vpcv1.DedicatedHostProfileDiskSize) (sizeMap map[string]interface{}) { + sizeMap = map[string]interface{}{} + + if sizeItem.Type != nil { + sizeMap["type"] = sizeItem.Type + } + if sizeItem.Value != nil { + sizeMap["value"] = sizeItem.Value + } + + return sizeMap +} + +func dataSourceDedicatedHostProfileDisksSupportedInstanceInterfaceTypesToMap(supportedInstanceInterfaceTypesItem vpcv1.DedicatedHostProfileDiskSupportedInterfaces) (supportedInstanceInterfaceTypesMap map[string]interface{}) { + supportedInstanceInterfaceTypesMap = map[string]interface{}{} + + if supportedInstanceInterfaceTypesItem.Type != nil { + supportedInstanceInterfaceTypesMap["type"] = supportedInstanceInterfaceTypesItem.Type + } + if supportedInstanceInterfaceTypesItem.Value != nil { + supportedInstanceInterfaceTypesMap["value"] = supportedInstanceInterfaceTypesItem.Value + } + + return supportedInstanceInterfaceTypesMap +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_dedicated_host_profiles.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_dedicated_host_profiles.go new file mode 100644 index 00000000000..174dcbeea78 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_dedicated_host_profiles.go @@ -0,0 +1,701 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/IBM/vpc-go-sdk/vpcv1" +) + +func dataSourceIbmIsDedicatedHostProfiles() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIbmIsDedicatedHostProfilesRead, + + Schema: map[string]*schema.Schema{ + "first": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "A link to the first page of resources.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "href": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The URL for a page of resources.", + }, + }, + }, + }, + "limit": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The maximum number of resources that can be returned by the request.", + }, + "next": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "A link to the next page of resources. This property is present for all pagesexcept the last page.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "href": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The URL for a page of resources.", + }, + }, + }, + }, + "profiles": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Collection of dedicated host profiles.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "class": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The product class this dedicated host profile belongs to.", + }, + "disks": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Collection of the dedicated host profile's disks.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "interface_type": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The type for this profile field.", + }, + "value": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The interface of the disk for a dedicated host with this profileThe enumerated values for this property are expected to expand in the future. When processing this property, check for and log unknown values. Optionally halt processing and surface the error, or bypass the resource on which the unexpected property value was encountered.", + }, + }, + }, + }, + "quantity": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The number of disks of this type for a dedicated host with this profile.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The type for this profile field.", + }, + "value": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The value for this profile field.", + }, + }, + }, + }, + "size": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The size of the disk in GB (gigabytes).", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The type for this profile field.", + }, + "value": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The size of the disk in GB (gigabytes).", + }, + }, + }, + }, + "supported_instance_interface_types": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The type for this profile field.", + }, + "value": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The instance disk interfaces supported for a dedicated host with this profile.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + }, + }, + }, + "family": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The product family this dedicated host profile belongs toThe enumerated values for this property are expected to expand in the future. When processing this property, check for and log unknown values. Optionally halt processing and surface the error, or bypass the resource on which the unexpected property value was encountered.", + }, + "href": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The URL for this dedicated host.", + }, + "memory": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The type for this profile field.", + }, + "value": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The value for this profile field.", + }, + "default": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The default value for this profile field.", + }, + "max": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The maximum value for this profile field.", + }, + "min": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The minimum value for this profile field.", + }, + "step": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The increment step value for this profile field.", + }, + "values": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The permitted values for this profile field.", + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + }, + }, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The globally unique name for this dedicated host profile.", + }, + "socket_count": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The type for this profile field.", + }, + "value": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The value for this profile field.", + }, + "default": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The default value for this profile field.", + }, + "max": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The maximum value for this profile field.", + }, + "min": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The minimum value for this profile field.", + }, + "step": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The increment step value for this profile field.", + }, + "values": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The permitted values for this profile field.", + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + }, + }, + }, + "supported_instance_profiles": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Array of instance profiles that can be used by instances placed on dedicated hosts with this profile.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "href": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The URL for this virtual server instance profile.", + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The globally unique name for this virtual server instance profile.", + }, + }, + }, + }, + "vcpu_architecture": &schema.Schema{ + Type: schema.TypeList, + + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The type for this profile field.", + }, + "value": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The VCPU architecture for a dedicated host with this profile.", + }, + }, + }, + }, + "vcpu_count": &schema.Schema{ + Type: schema.TypeList, + + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The type for this profile field.", + }, + "value": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The value for this profile field.", + }, + "default": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The default value for this profile field.", + }, + "max": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The maximum value for this profile field.", + }, + "min": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The minimum value for this profile field.", + }, + "step": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The increment step value for this profile field.", + }, + "values": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The permitted values for this profile field.", + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + }, + }, + }, + }, + }, + }, + "total_count": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The total number of resources across all pages.", + }, + }, + } +} + +func dataSourceIbmIsDedicatedHostProfilesRead(d *schema.ResourceData, meta interface{}) error { + vpcClient, err := meta.(ClientSession).VpcV1API() + if err != nil { + return err + } + + listDedicatedHostProfilesOptions := &vpcv1.ListDedicatedHostProfilesOptions{} + + dedicatedHostProfileCollection, response, err := vpcClient.ListDedicatedHostProfilesWithContext(context.TODO(), listDedicatedHostProfilesOptions) + if err != nil { + log.Printf("[DEBUG] ListDedicatedHostProfilesWithContext failed %s\n%s", err, response) + return err + } + + if dedicatedHostProfileCollection.First != nil { + err = d.Set("first", dataSourceDedicatedHostProfileCollectionFlattenFirst(*dedicatedHostProfileCollection.First)) + if err != nil { + return fmt.Errorf("Error setting first %s", err) + } + } + if err = d.Set("limit", dedicatedHostProfileCollection.Limit); err != nil { + return fmt.Errorf("Error setting limit: %s", err) + } + + if dedicatedHostProfileCollection.Next != nil { + err = d.Set("next", dataSourceDedicatedHostProfileCollectionFlattenNext(*dedicatedHostProfileCollection.Next)) + if err != nil { + return fmt.Errorf("Error setting next %s", err) + } + } + + if len(dedicatedHostProfileCollection.Profiles) != 0 { + + d.SetId(dataSourceIbmIsDedicatedHostProfilesID(d)) + + if dedicatedHostProfileCollection.Profiles != nil { + err = d.Set("profiles", dataSourceDedicatedHostProfileCollectionFlattenProfiles(dedicatedHostProfileCollection.Profiles)) + if err != nil { + return fmt.Errorf("Error setting profiles %s", err) + } + } + if err = d.Set("total_count", dedicatedHostProfileCollection.TotalCount); err != nil { + return fmt.Errorf("Error setting total_count: %s", err) + } + } + return nil +} + +// dataSourceIbmIsDedicatedHostProfilesID returns a reasonable ID for the list. +func dataSourceIbmIsDedicatedHostProfilesID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} + +func dataSourceDedicatedHostProfileCollectionFlattenFirst(result vpcv1.DedicatedHostProfileCollectionFirst) (finalList []map[string]interface{}) { + finalList = []map[string]interface{}{} + finalMap := dataSourceDedicatedHostProfileCollectionFirstToMap(result) + finalList = append(finalList, finalMap) + + return finalList +} + +func dataSourceDedicatedHostProfileCollectionFirstToMap(firstItem vpcv1.DedicatedHostProfileCollectionFirst) (firstMap map[string]interface{}) { + firstMap = map[string]interface{}{} + + if firstItem.Href != nil { + firstMap["href"] = firstItem.Href + } + + return firstMap +} + +func dataSourceDedicatedHostProfileCollectionFlattenNext(result vpcv1.DedicatedHostProfileCollectionNext) (finalList []map[string]interface{}) { + finalList = []map[string]interface{}{} + finalMap := dataSourceDedicatedHostProfileCollectionNextToMap(result) + finalList = append(finalList, finalMap) + + return finalList +} + +func dataSourceDedicatedHostProfileCollectionNextToMap(nextItem vpcv1.DedicatedHostProfileCollectionNext) (nextMap map[string]interface{}) { + nextMap = map[string]interface{}{} + + if nextItem.Href != nil { + nextMap["href"] = nextItem.Href + } + + return nextMap +} + +func dataSourceDedicatedHostProfileCollectionFlattenProfiles(result []vpcv1.DedicatedHostProfile) (profiles []map[string]interface{}) { + for _, profilesItem := range result { + profiles = append(profiles, dataSourceDedicatedHostProfileCollectionProfilesToMap(profilesItem)) + } + + return profiles +} + +func dataSourceDedicatedHostProfileCollectionProfilesToMap(profilesItem vpcv1.DedicatedHostProfile) (profilesMap map[string]interface{}) { + profilesMap = map[string]interface{}{} + + if profilesItem.Class != nil { + profilesMap["class"] = profilesItem.Class + } + if profilesItem.Disks != nil { + disksList := []map[string]interface{}{} + for _, disksItem := range profilesItem.Disks { + disksList = append(disksList, dataSourceDedicatedHostProfileCollectionProfilesDisksToMap(disksItem)) + } + profilesMap["disks"] = disksList + } + if profilesItem.Family != nil { + profilesMap["family"] = profilesItem.Family + } + if profilesItem.Href != nil { + profilesMap["href"] = profilesItem.Href + } + if profilesItem.Memory != nil { + memoryList := []map[string]interface{}{} + memoryMap := dataSourceDedicatedHostProfileCollectionProfilesMemoryToMap(*profilesItem.Memory.(*vpcv1.DedicatedHostProfileMemory)) + memoryList = append(memoryList, memoryMap) + profilesMap["memory"] = memoryList + } + if profilesItem.Name != nil { + profilesMap["name"] = profilesItem.Name + } + if profilesItem.SocketCount != nil { + socketCountList := []map[string]interface{}{} + socketCountMap := dataSourceDedicatedHostProfileCollectionProfilesSocketCountToMap(*profilesItem.SocketCount.(*vpcv1.DedicatedHostProfileSocket)) + socketCountList = append(socketCountList, socketCountMap) + profilesMap["socket_count"] = socketCountList + } + if profilesItem.SupportedInstanceProfiles != nil { + supportedInstanceProfilesList := []map[string]interface{}{} + for _, supportedInstanceProfilesItem := range profilesItem.SupportedInstanceProfiles { + supportedInstanceProfilesList = append(supportedInstanceProfilesList, dataSourceDedicatedHostProfileCollectionProfilesSupportedInstanceProfilesToMap(supportedInstanceProfilesItem)) + } + profilesMap["supported_instance_profiles"] = supportedInstanceProfilesList + } + if profilesItem.VcpuArchitecture != nil { + vcpuArchitectureList := []map[string]interface{}{} + vcpuArchitectureMap := dataSourceDedicatedHostProfileCollectionProfilesVcpuArchitectureToMap(*profilesItem.VcpuArchitecture) + vcpuArchitectureList = append(vcpuArchitectureList, vcpuArchitectureMap) + profilesMap["vcpu_architecture"] = vcpuArchitectureList + } + if profilesItem.VcpuCount != nil { + vcpuCountList := []map[string]interface{}{} + vcpuCountMap := dataSourceDedicatedHostProfileCollectionProfilesVcpuCountToMap(*profilesItem.VcpuCount.(*vpcv1.DedicatedHostProfileVcpu)) + vcpuCountList = append(vcpuCountList, vcpuCountMap) + profilesMap["vcpu_count"] = vcpuCountList + } + + return profilesMap +} + +func dataSourceDedicatedHostProfileCollectionProfilesMemoryToMap(memoryItem vpcv1.DedicatedHostProfileMemory) (memoryMap map[string]interface{}) { + memoryMap = map[string]interface{}{} + + if memoryItem.Type != nil { + memoryMap["type"] = memoryItem.Type + } + if memoryItem.Value != nil { + memoryMap["value"] = memoryItem.Value + } + if memoryItem.Default != nil { + memoryMap["default"] = memoryItem.Default + } + if memoryItem.Max != nil { + memoryMap["max"] = memoryItem.Max + } + if memoryItem.Min != nil { + memoryMap["min"] = memoryItem.Min + } + if memoryItem.Step != nil { + memoryMap["step"] = memoryItem.Step + } + if memoryItem.Values != nil { + memoryMap["values"] = memoryItem.Values + } + + return memoryMap +} + +func dataSourceDedicatedHostProfileCollectionProfilesSocketCountToMap(socketCountItem vpcv1.DedicatedHostProfileSocket) (socketCountMap map[string]interface{}) { + socketCountMap = map[string]interface{}{} + + if socketCountItem.Type != nil { + socketCountMap["type"] = socketCountItem.Type + } + if socketCountItem.Value != nil { + socketCountMap["value"] = socketCountItem.Value + } + if socketCountItem.Default != nil { + socketCountMap["default"] = socketCountItem.Default + } + if socketCountItem.Max != nil { + socketCountMap["max"] = socketCountItem.Max + } + if socketCountItem.Min != nil { + socketCountMap["min"] = socketCountItem.Min + } + if socketCountItem.Step != nil { + socketCountMap["step"] = socketCountItem.Step + } + if socketCountItem.Values != nil { + socketCountMap["values"] = socketCountItem.Values + } + + return socketCountMap +} + +func dataSourceDedicatedHostProfileCollectionProfilesSupportedInstanceProfilesToMap(supportedInstanceProfilesItem vpcv1.InstanceProfileReference) (supportedInstanceProfilesMap map[string]interface{}) { + supportedInstanceProfilesMap = map[string]interface{}{} + + if supportedInstanceProfilesItem.Href != nil { + supportedInstanceProfilesMap["href"] = supportedInstanceProfilesItem.Href + } + if supportedInstanceProfilesItem.Name != nil { + supportedInstanceProfilesMap["name"] = supportedInstanceProfilesItem.Name + } + + return supportedInstanceProfilesMap +} + +func dataSourceDedicatedHostProfileCollectionProfilesVcpuArchitectureToMap(vcpuArchitectureItem vpcv1.DedicatedHostProfileVcpuArchitecture) (vcpuArchitectureMap map[string]interface{}) { + vcpuArchitectureMap = map[string]interface{}{} + + if vcpuArchitectureItem.Type != nil { + vcpuArchitectureMap["type"] = vcpuArchitectureItem.Type + } + if vcpuArchitectureItem.Value != nil { + vcpuArchitectureMap["value"] = vcpuArchitectureItem.Value + } + + return vcpuArchitectureMap +} + +func dataSourceDedicatedHostProfileCollectionProfilesVcpuCountToMap(vcpuCountItem vpcv1.DedicatedHostProfileVcpu) (vcpuCountMap map[string]interface{}) { + vcpuCountMap = map[string]interface{}{} + + if vcpuCountItem.Type != nil { + vcpuCountMap["type"] = vcpuCountItem.Type + } + if vcpuCountItem.Value != nil { + vcpuCountMap["value"] = vcpuCountItem.Value + } + if vcpuCountItem.Default != nil { + vcpuCountMap["default"] = vcpuCountItem.Default + } + if vcpuCountItem.Max != nil { + vcpuCountMap["max"] = vcpuCountItem.Max + } + if vcpuCountItem.Min != nil { + vcpuCountMap["min"] = vcpuCountItem.Min + } + if vcpuCountItem.Step != nil { + vcpuCountMap["step"] = vcpuCountItem.Step + } + if vcpuCountItem.Values != nil { + vcpuCountMap["values"] = vcpuCountItem.Values + } + + return vcpuCountMap +} + +func dataSourceDedicatedHostProfileCollectionProfilesDisksToMap(disksItem vpcv1.DedicatedHostProfileDisk) (disksMap map[string]interface{}) { + disksMap = map[string]interface{}{} + + if disksItem.InterfaceType != nil { + interfaceTypeList := []map[string]interface{}{} + interfaceTypeMap := dataSourceDedicatedHostProfileCollectionDisksInterfaceTypeToMap(*disksItem.InterfaceType) + interfaceTypeList = append(interfaceTypeList, interfaceTypeMap) + disksMap["interface_type"] = interfaceTypeList + } + if disksItem.Quantity != nil { + quantityList := []map[string]interface{}{} + quantityMap := dataSourceDedicatedHostProfileCollectionDisksQuantityToMap(*disksItem.Quantity) + quantityList = append(quantityList, quantityMap) + disksMap["quantity"] = quantityList + } + if disksItem.Size != nil { + sizeList := []map[string]interface{}{} + sizeMap := dataSourceDedicatedHostProfileCollectionDisksSizeToMap(*disksItem.Size) + sizeList = append(sizeList, sizeMap) + disksMap["size"] = sizeList + } + if disksItem.SupportedInstanceInterfaceTypes != nil { + supportedInstanceInterfaceTypesList := []map[string]interface{}{} + supportedInstanceInterfaceTypesMap := dataSourceDedicatedHostProfileCollectionDisksSupportedInstanceInterfaceTypesToMap(*disksItem.SupportedInstanceInterfaceTypes) + supportedInstanceInterfaceTypesList = append(supportedInstanceInterfaceTypesList, supportedInstanceInterfaceTypesMap) + disksMap["supported_instance_interface_types"] = supportedInstanceInterfaceTypesList + } + + return disksMap +} + +func dataSourceDedicatedHostProfileCollectionDisksInterfaceTypeToMap(interfaceTypeItem vpcv1.DedicatedHostProfileDiskInterface) (interfaceTypeMap map[string]interface{}) { + interfaceTypeMap = map[string]interface{}{} + + if interfaceTypeItem.Type != nil { + interfaceTypeMap["type"] = interfaceTypeItem.Type + } + if interfaceTypeItem.Value != nil { + interfaceTypeMap["value"] = interfaceTypeItem.Value + } + + return interfaceTypeMap +} + +func dataSourceDedicatedHostProfileCollectionDisksQuantityToMap(quantityItem vpcv1.DedicatedHostProfileDiskQuantity) (quantityMap map[string]interface{}) { + quantityMap = map[string]interface{}{} + + if quantityItem.Type != nil { + quantityMap["type"] = quantityItem.Type + } + if quantityItem.Value != nil { + quantityMap["value"] = quantityItem.Value + } + + return quantityMap +} + +func dataSourceDedicatedHostProfileCollectionDisksSizeToMap(sizeItem vpcv1.DedicatedHostProfileDiskSize) (sizeMap map[string]interface{}) { + sizeMap = map[string]interface{}{} + + if sizeItem.Type != nil { + sizeMap["type"] = sizeItem.Type + } + if sizeItem.Value != nil { + sizeMap["value"] = sizeItem.Value + } + + return sizeMap +} + +func dataSourceDedicatedHostProfileCollectionDisksSupportedInstanceInterfaceTypesToMap(supportedInstanceInterfaceTypesItem vpcv1.DedicatedHostProfileDiskSupportedInterfaces) (supportedInstanceInterfaceTypesMap map[string]interface{}) { + supportedInstanceInterfaceTypesMap = map[string]interface{}{} + + if supportedInstanceInterfaceTypesItem.Type != nil { + supportedInstanceInterfaceTypesMap["type"] = supportedInstanceInterfaceTypesItem.Type + } + if supportedInstanceInterfaceTypesItem.Value != nil { + supportedInstanceInterfaceTypesMap["value"] = supportedInstanceInterfaceTypesItem.Value + } + + return supportedInstanceInterfaceTypesMap +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_dedicated_hosts.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_dedicated_hosts.go new file mode 100644 index 00000000000..23072e04b9b --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_dedicated_hosts.go @@ -0,0 +1,753 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/IBM/vpc-go-sdk/vpcv1" +) + +func dataSourceIbmIsDedicatedHosts() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIbmIsDedicatedHostsRead, + + Schema: map[string]*schema.Schema{ + "host_group": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The unique identifier of the dedicated host group this dedicated host belongs to", + }, + "dedicated_hosts": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Collection of dedicated hosts.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "available_memory": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The amount of memory in gibibytes that is currently available for instances.", + }, + "available_vcpu": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The available VCPU for the dedicated host.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "architecture": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The VCPU architecture.", + }, + "count": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The number of VCPUs assigned.", + }, + }, + }, + }, + "created_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The date and time that the dedicated host was created.", + }, + "crn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The CRN for this dedicated host.", + }, + "disks": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Collection of the dedicated host's disks.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "available": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The remaining space left for instance placement in GB (gigabytes).", + }, + "created_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The date and time that the disk was created.", + }, + "href": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The URL for this disk.", + }, + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this disk.", + }, + "instance_disks": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Instance disks that are on this dedicated host disk.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "deleted": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "If present, this property indicates the referenced resource has been deleted and providessome supplementary information.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "more_info": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Link to documentation about deleted resources.", + }, + }, + }, + }, + "href": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The URL for this instance disk.", + }, + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this instance disk.", + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The user-defined name for this disk.", + }, + "resource_type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The resource type.", + }, + }, + }, + }, + "interface_type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The disk interface used for attaching the diskThe enumerated values for this property are expected to expand in the future. When processing this property, check for and log unknown values. Optionally halt processing and surface the error, or bypass the resource on which the unexpected property value was encountered.", + }, + "lifecycle_state": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The lifecycle state of this dedicated host disk.", + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The user-defined or system-provided name for this disk.", + }, + "provisionable": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "Indicates whether this dedicated host disk is available for instance disk creation.", + }, + "resource_type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The type of resource referenced.", + }, + "size": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The size of the disk in GB (gigabytes).", + }, + "supported_instance_interface_types": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The instance disk interfaces supported for this dedicated host disk.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "host_group": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier of the dedicated host group this dedicated host is in.", + }, + "href": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The URL for this dedicated host.", + }, + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this dedicated host.", + }, + "instance_placement_enabled": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "If set to true, instances can be placed on this dedicated host.", + }, + "instances": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Array of instances that are allocated to this dedicated host.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "crn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The CRN for this virtual server instance.", + }, + "deleted": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "If present, this property indicates the referenced resource has been deleted and providessome supplementary information.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "more_info": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Link to documentation about deleted resources.", + }, + }, + }, + }, + "href": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The URL for this virtual server instance.", + }, + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this virtual server instance.", + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The user-defined name for this virtual server instance (and default system hostname).", + }, + }, + }, + }, + "lifecycle_state": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The lifecycle state of the dedicated host resource.", + }, + "memory": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The total amount of memory in gibibytes for this host.", + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The unique user-defined name for this dedicated host. If unspecified, the name will be a hyphenated list of randomly-selected words.", + }, + "profile": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The profile this dedicated host uses.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "href": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The URL for this dedicated host.", + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The globally unique name for this dedicated host profile.", + }, + }, + }, + }, + "provisionable": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "Indicates whether this dedicated host is available for instance creation.", + }, + "resource_group": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier of the resource group for this dedicated host.", + }, + "resource_type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The type of resource referenced.", + }, + "socket_count": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The total number of sockets for this host.", + }, + "state": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The administrative state of the dedicated host.The enumerated values for this property are expected to expand in the future. When processing this property, check for and log unknown values. Optionally halt processing and surface the error, or bypass the dedicated host on which the unexpected property value was encountered.", + }, + "supported_instance_profiles": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Array of instance profiles that can be used by instances placed on this dedicated host.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "href": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The URL for this virtual server instance profile.", + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The globally unique name for this virtual server instance profile.", + }, + }, + }, + }, + "vcpu": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The total VCPU of the dedicated host.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "architecture": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The VCPU architecture.", + }, + "count": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The number of VCPUs assigned.", + }, + }, + }, + }, + "zone": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The globally unique name of the zone this dedicated host resides in.", + }, + }, + }, + }, + "first": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "A link to the first page of resources.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "href": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The URL for a page of resources.", + }, + }, + }, + }, + "limit": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The maximum number of resources that can be returned by the request.", + }, + "next": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "A link to the next page of resources. This property is present for all pagesexcept the last page.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "href": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The URL for a page of resources.", + }, + }, + }, + }, + "total_count": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The total number of resources across all pages.", + }, + }, + } +} + +func dataSourceIbmIsDedicatedHostsRead(d *schema.ResourceData, meta interface{}) error { + vpcClient, err := meta.(ClientSession).VpcV1API() + if err != nil { + return err + } + + listDedicatedHostsOptions := &vpcv1.ListDedicatedHostsOptions{} + if hostgroupintf, ok := d.GetOk("host_group"); ok { + hostgroupid := hostgroupintf.(string) + listDedicatedHostsOptions.DedicatedHostGroupID = &hostgroupid + } + + dedicatedHostCollection, response, err := vpcClient.ListDedicatedHostsWithContext(context.TODO(), listDedicatedHostsOptions) + if err != nil { + log.Printf("[DEBUG] ListDedicatedHostsWithContext failed %s\n%s", err, response) + return err + } + + if len(dedicatedHostCollection.DedicatedHosts) != 0 { + + d.SetId(dataSourceIbmIsDedicatedHostsID(d)) + + if dedicatedHostCollection.DedicatedHosts != nil { + err = d.Set("dedicated_hosts", dataSourceDedicatedHostCollectionFlattenDedicatedHosts(dedicatedHostCollection.DedicatedHosts)) + if err != nil { + return fmt.Errorf("Error setting dedicated_hosts %s", err) + } + } + + if dedicatedHostCollection.First != nil { + err = d.Set("first", dataSourceDedicatedHostCollectionFlattenFirst(*dedicatedHostCollection.First)) + if err != nil { + return fmt.Errorf("Error setting first %s", err) + } + } + if err = d.Set("limit", dedicatedHostCollection.Limit); err != nil { + return fmt.Errorf("Error setting limit: %s", err) + } + + if dedicatedHostCollection.Next != nil { + err = d.Set("next", dataSourceDedicatedHostCollectionFlattenNext(*dedicatedHostCollection.Next)) + if err != nil { + return fmt.Errorf("Error setting next %s", err) + } + } + + if err = d.Set("total_count", dedicatedHostCollection.TotalCount); err != nil { + return fmt.Errorf("Error setting total_count: %s", err) + } + } + return nil +} + +// dataSourceIbmIsDedicatedHostsID returns a reasonable ID for the list. +func dataSourceIbmIsDedicatedHostsID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} + +func dataSourceDedicatedHostCollectionFlattenDedicatedHosts(result []vpcv1.DedicatedHost) (dedicatedHosts []map[string]interface{}) { + for _, dedicatedHostsItem := range result { + dedicatedHosts = append(dedicatedHosts, dataSourceDedicatedHostCollectionDedicatedHostsToMap(dedicatedHostsItem)) + } + + return dedicatedHosts +} + +func dataSourceDedicatedHostCollectionDedicatedHostsToMap(dedicatedHostsItem vpcv1.DedicatedHost) (dedicatedHostsMap map[string]interface{}) { + dedicatedHostsMap = map[string]interface{}{} + + if dedicatedHostsItem.AvailableMemory != nil { + dedicatedHostsMap["available_memory"] = dedicatedHostsItem.AvailableMemory + } + if dedicatedHostsItem.AvailableVcpu != nil { + availableVcpuList := []map[string]interface{}{} + availableVcpuMap := dataSourceDedicatedHostCollectionDedicatedHostsAvailableVcpuToMap(*dedicatedHostsItem.AvailableVcpu) + availableVcpuList = append(availableVcpuList, availableVcpuMap) + dedicatedHostsMap["available_vcpu"] = availableVcpuList + } + if dedicatedHostsItem.CreatedAt != nil { + dedicatedHostsMap["created_at"] = dedicatedHostsItem.CreatedAt.String() + } + if dedicatedHostsItem.CRN != nil { + dedicatedHostsMap["crn"] = dedicatedHostsItem.CRN + } + if dedicatedHostsItem.Disks != nil { + disksList := []map[string]interface{}{} + for _, disksItem := range dedicatedHostsItem.Disks { + disksList = append(disksList, dataSourceDedicatedHostCollectionDedicatedHostsDisksToMap(disksItem)) + } + dedicatedHostsMap["disks"] = disksList + } + if dedicatedHostsItem.Group != nil { + dedicatedHostsMap["host_group"] = *dedicatedHostsItem.Group.ID + } + if dedicatedHostsItem.Href != nil { + dedicatedHostsMap["href"] = dedicatedHostsItem.Href + } + if dedicatedHostsItem.ID != nil { + dedicatedHostsMap["id"] = dedicatedHostsItem.ID + } + if dedicatedHostsItem.InstancePlacementEnabled != nil { + dedicatedHostsMap["instance_placement_enabled"] = dedicatedHostsItem.InstancePlacementEnabled + } + if dedicatedHostsItem.Instances != nil { + instancesList := []map[string]interface{}{} + for _, instancesItem := range dedicatedHostsItem.Instances { + instancesList = append(instancesList, dataSourceDedicatedHostCollectionDedicatedHostsInstancesToMap(instancesItem)) + } + dedicatedHostsMap["instances"] = instancesList + } + if dedicatedHostsItem.LifecycleState != nil { + dedicatedHostsMap["lifecycle_state"] = dedicatedHostsItem.LifecycleState + } + if dedicatedHostsItem.Memory != nil { + dedicatedHostsMap["memory"] = dedicatedHostsItem.Memory + } + if dedicatedHostsItem.Name != nil { + dedicatedHostsMap["name"] = dedicatedHostsItem.Name + } + if dedicatedHostsItem.Profile != nil { + profileList := []map[string]interface{}{} + profileMap := dataSourceDedicatedHostCollectionDedicatedHostsProfileToMap(*dedicatedHostsItem.Profile) + profileList = append(profileList, profileMap) + dedicatedHostsMap["profile"] = profileList + } + if dedicatedHostsItem.Provisionable != nil { + dedicatedHostsMap["provisionable"] = dedicatedHostsItem.Provisionable + } + if dedicatedHostsItem.ResourceGroup != nil { + dedicatedHostsMap["resource_group"] = *dedicatedHostsItem.ResourceGroup.ID + } + if dedicatedHostsItem.ResourceType != nil { + dedicatedHostsMap["resource_type"] = dedicatedHostsItem.ResourceType + } + if dedicatedHostsItem.SocketCount != nil { + dedicatedHostsMap["socket_count"] = dedicatedHostsItem.SocketCount + } + if dedicatedHostsItem.State != nil { + dedicatedHostsMap["state"] = dedicatedHostsItem.State + } + if dedicatedHostsItem.SupportedInstanceProfiles != nil { + supportedInstanceProfilesList := []map[string]interface{}{} + for _, supportedInstanceProfilesItem := range dedicatedHostsItem.SupportedInstanceProfiles { + supportedInstanceProfilesList = append(supportedInstanceProfilesList, dataSourceDedicatedHostCollectionDedicatedHostsSupportedInstanceProfilesToMap(supportedInstanceProfilesItem)) + } + dedicatedHostsMap["supported_instance_profiles"] = supportedInstanceProfilesList + } + if dedicatedHostsItem.Vcpu != nil { + vcpuList := []map[string]interface{}{} + vcpuMap := dataSourceDedicatedHostCollectionDedicatedHostsVcpuToMap(*dedicatedHostsItem.Vcpu) + vcpuList = append(vcpuList, vcpuMap) + dedicatedHostsMap["vcpu"] = vcpuList + } + if dedicatedHostsItem.Zone != nil { + dedicatedHostsMap["zone"] = *dedicatedHostsItem.Zone.Name + } + + return dedicatedHostsMap +} + +func dataSourceDedicatedHostCollectionDedicatedHostsAvailableVcpuToMap(availableVcpuItem vpcv1.Vcpu) (availableVcpuMap map[string]interface{}) { + availableVcpuMap = map[string]interface{}{} + + if availableVcpuItem.Architecture != nil { + availableVcpuMap["architecture"] = availableVcpuItem.Architecture + } + if availableVcpuItem.Count != nil { + availableVcpuMap["count"] = availableVcpuItem.Count + } + + return availableVcpuMap +} + +func dataSourceDedicatedHostCollectionDedicatedHostsGroupToMap(groupItem vpcv1.DedicatedHostGroupReference) (groupMap map[string]interface{}) { + groupMap = map[string]interface{}{} + + if groupItem.CRN != nil { + groupMap["crn"] = groupItem.CRN + } + if groupItem.Deleted != nil { + deletedList := []map[string]interface{}{} + deletedMap := dataSourceDedicatedHostCollectionGroupDeletedToMap(*groupItem.Deleted) + deletedList = append(deletedList, deletedMap) + groupMap["deleted"] = deletedList + } + if groupItem.Href != nil { + groupMap["href"] = groupItem.Href + } + if groupItem.ID != nil { + groupMap["id"] = groupItem.ID + } + if groupItem.Name != nil { + groupMap["name"] = groupItem.Name + } + if groupItem.ResourceType != nil { + groupMap["resource_type"] = groupItem.ResourceType + } + + return groupMap +} + +func dataSourceDedicatedHostCollectionGroupDeletedToMap(deletedItem vpcv1.DedicatedHostGroupReferenceDeleted) (deletedMap map[string]interface{}) { + deletedMap = map[string]interface{}{} + + if deletedItem.MoreInfo != nil { + deletedMap["more_info"] = deletedItem.MoreInfo + } + + return deletedMap +} + +func dataSourceDedicatedHostCollectionInstancesDeletedToMap(deletedItem vpcv1.InstanceReferenceDeleted) (deletedMap map[string]interface{}) { + deletedMap = map[string]interface{}{} + + if deletedItem.MoreInfo != nil { + deletedMap["more_info"] = deletedItem.MoreInfo + } + + return deletedMap +} + +func dataSourceDedicatedHostCollectionDedicatedHostsInstancesToMap(instancesItem vpcv1.InstanceReference) (instancesMap map[string]interface{}) { + instancesMap = map[string]interface{}{} + + if instancesItem.CRN != nil { + instancesMap["crn"] = instancesItem.CRN + } + if instancesItem.Deleted != nil { + deletedList := []map[string]interface{}{} + deletedMap := dataSourceDedicatedHostCollectionInstancesDeletedToMap(*instancesItem.Deleted) + deletedList = append(deletedList, deletedMap) + instancesMap["deleted"] = deletedList + } + if instancesItem.Href != nil { + instancesMap["href"] = instancesItem.Href + } + if instancesItem.ID != nil { + instancesMap["id"] = instancesItem.ID + } + if instancesItem.Name != nil { + instancesMap["name"] = instancesItem.Name + } + + return instancesMap +} + +func dataSourceDedicatedHostCollectionDedicatedHostsProfileToMap(profileItem vpcv1.DedicatedHostProfileReference) (profileMap map[string]interface{}) { + profileMap = map[string]interface{}{} + + if profileItem.Href != nil { + profileMap["href"] = profileItem.Href + } + if profileItem.Name != nil { + profileMap["name"] = profileItem.Name + } + + return profileMap +} + +func dataSourceDedicatedHostCollectionDedicatedHostsSupportedInstanceProfilesToMap(supportedInstanceProfilesItem vpcv1.InstanceProfileReference) (supportedInstanceProfilesMap map[string]interface{}) { + supportedInstanceProfilesMap = map[string]interface{}{} + + if supportedInstanceProfilesItem.Href != nil { + supportedInstanceProfilesMap["href"] = supportedInstanceProfilesItem.Href + } + if supportedInstanceProfilesItem.Name != nil { + supportedInstanceProfilesMap["name"] = supportedInstanceProfilesItem.Name + } + + return supportedInstanceProfilesMap +} + +func dataSourceDedicatedHostCollectionDedicatedHostsVcpuToMap(vcpuItem vpcv1.Vcpu) (vcpuMap map[string]interface{}) { + vcpuMap = map[string]interface{}{} + + if vcpuItem.Architecture != nil { + vcpuMap["architecture"] = vcpuItem.Architecture + } + if vcpuItem.Count != nil { + vcpuMap["count"] = vcpuItem.Count + } + + return vcpuMap +} + +func dataSourceDedicatedHostCollectionFlattenFirst(result vpcv1.DedicatedHostCollectionFirst) (finalList []map[string]interface{}) { + finalList = []map[string]interface{}{} + finalMap := dataSourceDedicatedHostCollectionFirstToMap(result) + finalList = append(finalList, finalMap) + + return finalList +} + +func dataSourceDedicatedHostCollectionFirstToMap(firstItem vpcv1.DedicatedHostCollectionFirst) (firstMap map[string]interface{}) { + firstMap = map[string]interface{}{} + + if firstItem.Href != nil { + firstMap["href"] = firstItem.Href + } + + return firstMap +} + +func dataSourceDedicatedHostCollectionFlattenNext(result vpcv1.DedicatedHostCollectionNext) (finalList []map[string]interface{}) { + finalList = []map[string]interface{}{} + finalMap := dataSourceDedicatedHostCollectionNextToMap(result) + finalList = append(finalList, finalMap) + + return finalList +} + +func dataSourceDedicatedHostCollectionNextToMap(nextItem vpcv1.DedicatedHostCollectionNext) (nextMap map[string]interface{}) { + nextMap = map[string]interface{}{} + + if nextItem.Href != nil { + nextMap["href"] = nextItem.Href + } + + return nextMap +} + +func dataSourceDedicatedHostCollectionDedicatedHostsDisksToMap(disksItem vpcv1.DedicatedHostDisk) (disksMap map[string]interface{}) { + disksMap = map[string]interface{}{} + + if disksItem.Available != nil { + disksMap["available"] = disksItem.Available + } + if disksItem.CreatedAt != nil { + disksMap["created_at"] = disksItem.CreatedAt.String() + } + if disksItem.Href != nil { + disksMap["href"] = disksItem.Href + } + if disksItem.ID != nil { + disksMap["id"] = disksItem.ID + } + if disksItem.InstanceDisks != nil { + instanceDisksList := []map[string]interface{}{} + for _, instanceDisksItem := range disksItem.InstanceDisks { + instanceDisksList = append(instanceDisksList, dataSourceDedicatedHostDisksInstanceDisksToMap(instanceDisksItem)) + } + disksMap["instance_disks"] = instanceDisksList + } + if disksItem.InterfaceType != nil { + disksMap["interface_type"] = disksItem.InterfaceType + } + if disksItem.LifecycleState != nil { + disksMap["lifecycle_state"] = disksItem.LifecycleState + } + if disksItem.Name != nil { + disksMap["name"] = disksItem.Name + } + if disksItem.Provisionable != nil { + disksMap["provisionable"] = disksItem.Provisionable + } + if disksItem.ResourceType != nil { + disksMap["resource_type"] = disksItem.ResourceType + } + if disksItem.Size != nil { + disksMap["size"] = disksItem.Size + } + if disksItem.SupportedInstanceInterfaceTypes != nil { + disksMap["supported_instance_interface_types"] = disksItem.SupportedInstanceInterfaceTypes + } + + return disksMap +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_endpoint_gateway_targets.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_endpoint_gateway_targets.go new file mode 100644 index 00000000000..4f6223a75e0 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_endpoint_gateway_targets.go @@ -0,0 +1,145 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "context" + "fmt" + "log" + "strings" + "time" + + "github.com/IBM/platform-services-go-sdk/catalogmanagementv1" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + isVPEResources = "resources" + isVPEResourceCRN = "crn" + isVPEResourceParent = "parent" + isVPEResourceName = "name" + isVPEResourceEndpointType = "endpoint_type" + isVPEResourceType = "resource_type" + isVPEResourceFullQualifiedDomainNames = "full_qualified_domain_names" + isVPEResourceServiceLocation = "location" +) + +func dataSourceIBMISEndpointGatewayTargets() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMISEndpointGatewayTargetsRead, + + Schema: map[string]*schema.Schema{ + isVPEResources: { + Type: schema.TypeList, + Computed: true, + Description: "List of resources", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + isVPEResourceCRN: { + Type: schema.TypeString, + Computed: true, + Description: "CRN for this specific object", + }, + isVPEResourceParent: { + Type: schema.TypeString, + Computed: true, + Description: "Parent for this specific object", + }, + isVPEResourceName: { + Type: schema.TypeString, + Computed: true, + Description: "Display name in the requested language", + }, + isVPEResourceType: { + Type: schema.TypeString, + Computed: true, + Description: "Resource type of this offering.", + }, + isVPEResourceEndpointType: { + Type: schema.TypeString, + Computed: true, + Description: "Data endpoint type of this offering", + }, + isVPEResourceFullQualifiedDomainNames: { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "Fully qualified domain names", + }, + isVPEResourceServiceLocation: { + Type: schema.TypeString, + Computed: true, + Description: "Service location of this offering", + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMISEndpointGatewayTargetsRead(d *schema.ResourceData, meta interface{}) error { + bmxSess, err := meta.(ClientSession).BluemixSession() + if err != nil { + return err + } + region := bmxSess.Config.Region + catalogManagementClient, err := meta.(ClientSession).CatalogManagementV1() + if err != nil { + return err + } + + getCatalogOptions := &catalogmanagementv1.SearchObjectsOptions{} + // query := "kind%3Avpe+AND+svc+AND+parent_id%3Aus-south" + query := fmt.Sprintf("kind:vpe AND svc AND parent_id:%s", region) + getCatalogOptions.Query = &query + digest := false + getCatalogOptions.Digest = &digest + catalog, response, err := catalogManagementClient.SearchObjectsWithContext(context.TODO(), getCatalogOptions) + if err != nil { + log.Printf("[DEBUG] GetCatalogWithContext failed %s\n%s", err, response) + return err + } + if catalog != nil && *catalog.ResourceCount > 0 && catalog.Resources != nil { + resourceInfo := make([]map[string]interface{}, 0) + for _, res := range catalog.Resources { + l := map[string]interface{}{} + if res.ParentID != nil { + l[isVPEResourceParent] = *res.ParentID + } + l[isVPEResourceName] = "provider_cloud_service" + if res.Label != nil { + l[isVPEResourceType] = *res.Label + } + sl := "" + data := res.Data + if data != nil { + if serviceCrn, ok := data["service_crn"].(string); ok { + if serviceCrn != "" { + l[isVPEResourceCRN] = serviceCrn + crnFs := strings.Split(serviceCrn, ":") + if len(crnFs) > 5 { + sl = crnFs[5] + } + l[isVPEResourceServiceLocation] = sl + } + } + if data["endpoint_type"] != nil { + l[isVPEResourceEndpointType] = data["endpoint_type"] + } + if data["fully_qualified_domain_names"] != nil { + l[isVPEResourceFullQualifiedDomainNames] = data["fully_qualified_domain_names"] + } + } + resourceInfo = append(resourceInfo, l) + } + d.Set(isVPEResources, resourceInfo) + d.SetId(dataSourceIBMISEndpointGatewayTargetsId(d)) + } + return nil +} +func dataSourceIBMISEndpointGatewayTargetsId(d *schema.ResourceData) string { + return time.Now().UTC().String() +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_floating_ip.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_floating_ip.go new file mode 100644 index 00000000000..51d95d01161 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_floating_ip.go @@ -0,0 +1,193 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + + "github.com/IBM/vpc-go-sdk/vpcclassicv1" + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + floatingIPName = "name" + floatingIPAddress = "address" + floatingIPStatus = "status" + floatingIPZone = "zone" + floatingIPTarget = "target" + floatingIPTags = "tags" +) + +func dataSourceIBMISFloatingIP() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMISFloatingIPRead, + + Schema: map[string]*schema.Schema{ + + floatingIPName: { + Type: schema.TypeString, + Required: true, + Description: "Name of the floating IP", + }, + + floatingIPAddress: { + Type: schema.TypeString, + Computed: true, + Description: "Floating IP address", + }, + + floatingIPStatus: { + Type: schema.TypeString, + Computed: true, + Description: "Floating IP status", + }, + + floatingIPZone: { + Type: schema.TypeString, + Computed: true, + Description: "Zone name", + }, + + floatingIPTarget: { + Type: schema.TypeString, + Computed: true, + Description: "Target info", + }, + + floatingIPTags: { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: resourceIBMVPCHash, + Description: "Floating IP tags", + }, + }, + } +} + +func dataSourceIBMISFloatingIPRead(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + floatingIPName := d.Get(isFloatingIPName).(string) + if userDetails.generation == 1 { + err := classicFloatingIPGet(d, meta, floatingIPName) + if err != nil { + return err + } + } else { + err := floatingIPGet(d, meta, floatingIPName) + if err != nil { + return err + } + } + return nil +} + +func classicFloatingIPGet(d *schema.ResourceData, meta interface{}, name string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + + start := "" + allFloatingIPs := []vpcclassicv1.FloatingIP{} + for { + floatingIPOptions := &vpcclassicv1.ListFloatingIpsOptions{} + if start != "" { + floatingIPOptions.Start = &start + } + floatingIPs, response, err := sess.ListFloatingIps(floatingIPOptions) + if err != nil { + return fmt.Errorf("Error Fetching floating IPs %s\n%s", err, response) + } + start = GetNext(floatingIPs.Next) + allFloatingIPs = append(allFloatingIPs, floatingIPs.FloatingIps...) + if start == "" { + break + } + } + + for _, ip := range allFloatingIPs { + if *ip.Name == name { + + d.Set(floatingIPName, *ip.Name) + d.Set(floatingIPAddress, *ip.Address) + d.Set(floatingIPStatus, *ip.Status) + d.Set(floatingIPZone, *ip.Zone.Name) + + target, ok := ip.Target.(*vpcclassicv1.FloatingIPTarget) + if ok { + d.Set(floatingIPTarget, target.ID) + } + + tags, err := GetTagsUsingCRN(meta, *ip.CRN) + if err != nil { + fmt.Printf("Error on get of vpc Floating IP (%s) tags: %s", *ip.Address, err) + } + + d.Set(floatingIPTags, tags) + d.SetId(*ip.ID) + + return nil + } + } + + return fmt.Errorf("No floatingIP found with name %s", name) +} + +func floatingIPGet(d *schema.ResourceData, meta interface{}, name string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + + start := "" + allFloatingIPs := []vpcv1.FloatingIP{} + for { + floatingIPOptions := &vpcv1.ListFloatingIpsOptions{} + if start != "" { + floatingIPOptions.Start = &start + } + floatingIPs, response, err := sess.ListFloatingIps(floatingIPOptions) + if err != nil { + return fmt.Errorf("Error Fetching floating IPs %s\n%s", err, response) + } + start = GetNext(floatingIPs.Next) + allFloatingIPs = append(allFloatingIPs, floatingIPs.FloatingIps...) + if start == "" { + break + } + } + + for _, ip := range allFloatingIPs { + if *ip.Name == name { + + d.Set(floatingIPName, *ip.Name) + d.Set(floatingIPAddress, *ip.Address) + d.Set(floatingIPStatus, *ip.Status) + d.Set(floatingIPZone, *ip.Zone.Name) + + target, ok := ip.Target.(*vpcv1.FloatingIPTarget) + if ok { + d.Set(floatingIPTarget, target.ID) + } + + tags, err := GetTagsUsingCRN(meta, *ip.CRN) + if err != nil { + fmt.Printf("Error on get of vpc Floating IP (%s) tags: %s", *ip.Address, err) + } + + d.Set(floatingIPTags, tags) + d.SetId(*ip.ID) + + return nil + } + } + + return fmt.Errorf("No floatingIP found with name %s", name) + +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_flow_logs.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_flow_logs.go new file mode 100644 index 00000000000..0d0e654a104 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_flow_logs.go @@ -0,0 +1,144 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "time" + + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + isFlowLogs = "flow_log_collectors" +) + +func dataSourceIBMISFlowLogs() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMISFlowLogsRead, + + Schema: map[string]*schema.Schema{ + + isFlowLogs: { + Type: schema.TypeList, + Description: "Collection of flow log collectors", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this flow log collector", + }, + "crn": { + Type: schema.TypeString, + Computed: true, + Description: "The CRN for this flow log collector", + }, + "href": { + Type: schema.TypeString, + Computed: true, + Description: "The URL for this flow log collector", + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: "Flow Log Collector name", + }, + "resource_group": { + Type: schema.TypeString, + Computed: true, + Description: "The resource group of flow log", + }, + "created_at": { + Type: schema.TypeString, + Computed: true, + Description: "The date and time flow log was created", + }, + "lifecycle_state": { + Type: schema.TypeString, + Computed: true, + Description: "The lifecycle state of the flow log collector", + }, + "storage_bucket": { + Type: schema.TypeString, + Computed: true, + Description: "The Cloud Object Storage bucket name where the collected flows will be logged", + }, + "active": { + Type: schema.TypeBool, + Computed: true, + Description: "Indicates whether this collector is active", + }, + "target": { + Type: schema.TypeString, + Computed: true, + Description: "The target id that the flow log collector is to collect flow logs", + }, + "vpc": { + Type: schema.TypeString, + Computed: true, + Description: "The VPC this flow log collector is associated with", + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMISFlowLogsRead(d *schema.ResourceData, meta interface{}) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + + start := "" + allrecs := []vpcv1.FlowLogCollector{} + for { + listOptions := &vpcv1.ListFlowLogCollectorsOptions{} + if start != "" { + listOptions.Start = &start + } + flowlogCollectors, response, err := sess.ListFlowLogCollectors(listOptions) + if err != nil { + return fmt.Errorf("Error Fetching Flow Logs for VPC %s\n%s", err, response) + } + start = GetNext(flowlogCollectors.Next) + allrecs = append(allrecs, flowlogCollectors.FlowLogCollectors...) + if start == "" { + break + } + } + flowlogsInfo := make([]map[string]interface{}, 0) + for _, flowlogCollector := range allrecs { + + targetIntf := flowlogCollector.Target + target := targetIntf.(*vpcv1.FlowLogCollectorTarget) + + l := map[string]interface{}{ + "id": *flowlogCollector.ID, + "crn": *flowlogCollector.CRN, + "href": *flowlogCollector.Href, + "name": *flowlogCollector.Name, + "resource_group": *flowlogCollector.ResourceGroup.ID, + "created_at": flowlogCollector.CreatedAt.String(), + "lifecycle_state": *flowlogCollector.LifecycleState, + "storage_bucket": *flowlogCollector.StorageBucket.Name, + "active": *flowlogCollector.Active, + "vpc": *flowlogCollector.VPC.ID, + "target": *target.ID, + } + flowlogsInfo = append(flowlogsInfo, l) + } + d.SetId(dataSourceIBMISFlowLogsID(d)) + d.Set(isFlowLogs, flowlogsInfo) + return nil +} + +// dataSourceIBMISFlowLogsID returns a reasonable ID for a flowlogCollector list. +func dataSourceIBMISFlowLogsID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_image.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_image.go new file mode 100644 index 00000000000..5130904a6ff --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_image.go @@ -0,0 +1,189 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + + "github.com/IBM/vpc-go-sdk/vpcclassicv1" + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMISImage() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMISImageRead, + + Schema: map[string]*schema.Schema{ + + "name": { + Type: schema.TypeString, + Required: true, + Description: "Image name", + }, + + "visibility": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateAllowedStringValue([]string{"public", "private"}), + Description: "Whether the image is publicly visible or private to the account", + }, + + "status": { + Type: schema.TypeString, + Computed: true, + Description: "The status of this image", + }, + + "os": { + Type: schema.TypeString, + Computed: true, + Description: "Image Operating system", + }, + "architecture": { + Type: schema.TypeString, + Computed: true, + Description: "The operating system architecture", + }, + "crn": { + Type: schema.TypeString, + Computed: true, + Description: "The CRN for this image", + }, + isImageCheckSum: { + Type: schema.TypeString, + Computed: true, + Description: "The SHA256 Checksum for this image", + }, + isImageEncryptionKey: { + Type: schema.TypeString, + Computed: true, + Description: "The CRN of the Key Protect Root Key or Hyper Protect Crypto Service Root Key for this resource", + }, + isImageEncryption: { + Type: schema.TypeString, + Computed: true, + Description: "The type of encryption used on the image", + }, + }, + } +} + +func dataSourceIBMISImageRead(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + name := d.Get("name").(string) + var visibility string + if v, ok := d.GetOk("visibility"); ok { + visibility = v.(string) + } + if userDetails.generation == 1 { + err := classicImageGet(d, meta, name, visibility) + if err != nil { + return err + } + } else { + err := imageGet(d, meta, name, visibility) + if err != nil { + return err + } + } + return nil +} + +func classicImageGet(d *schema.ResourceData, meta interface{}, name, visibility string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + start := "" + allrecs := []vpcclassicv1.Image{} + for { + listImagesOptions := &vpcclassicv1.ListImagesOptions{} + if start != "" { + listImagesOptions.Start = &start + } + if visibility != "" { + listImagesOptions.Visibility = &visibility + } + availableImages, response, err := sess.ListImages(listImagesOptions) + if err != nil { + return fmt.Errorf("Error Fetching Images %s\n%s", err, response) + } + start = GetNext(availableImages.Next) + allrecs = append(allrecs, availableImages.Images...) + if start == "" { + break + } + } + for _, image := range allrecs { + if *image.Name == name { + d.SetId(*image.ID) + d.Set("status", *image.Status) + d.Set("name", *image.Name) + d.Set("visibility", *image.Visibility) + d.Set("os", *image.OperatingSystem.Name) + d.Set("architecture", *image.OperatingSystem.Architecture) + d.Set("crn", *image.CRN) + return nil + } + } + return fmt.Errorf("No Image found with name %s", name) +} + +func imageGet(d *schema.ResourceData, meta interface{}, name, visibility string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + start := "" + allrecs := []vpcv1.Image{} + for { + listImagesOptions := &vpcv1.ListImagesOptions{} + if start != "" { + listImagesOptions.Start = &start + } + if visibility != "" { + listImagesOptions.Visibility = &visibility + } + availableImages, response, err := sess.ListImages(listImagesOptions) + if err != nil { + return fmt.Errorf("Error Fetching Images %s\n%s", err, response) + } + start = GetNext(availableImages.Next) + allrecs = append(allrecs, availableImages.Images...) + if start == "" { + break + } + } + + for _, image := range allrecs { + if *image.Name == name { + d.SetId(*image.ID) + d.Set("status", *image.Status) + if *image.Status == "deprecated" { + fmt.Printf("[WARN] Given image %s is deprecated and soon will be obsolete.", name) + } + d.Set("name", *image.Name) + d.Set("visibility", *image.Visibility) + d.Set("os", *image.OperatingSystem.Name) + d.Set("architecture", *image.OperatingSystem.Architecture) + d.Set("crn", *image.CRN) + if image.Encryption != nil { + d.Set("encryption", *image.Encryption) + } + if image.EncryptionKey != nil { + d.Set("encryption_key", *image.EncryptionKey.CRN) + } + if image.File != nil && image.File.Checksums != nil { + d.Set(isImageCheckSum, *image.File.Checksums.Sha256) + } + return nil + } + } + + return fmt.Errorf("No image found with name %s", name) +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_images.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_images.go new file mode 100644 index 00000000000..a394741b98c --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_images.go @@ -0,0 +1,201 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "time" + + "github.com/IBM/vpc-go-sdk/vpcclassicv1" + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + isImages = "images" +) + +func dataSourceIBMISImages() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMISImagesRead, + + Schema: map[string]*schema.Schema{ + + isImages: { + Type: schema.TypeList, + Description: "List of images", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + Description: "Image name", + }, + "id": { + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this image", + }, + "status": { + Type: schema.TypeString, + Computed: true, + Description: "The status of this image", + }, + "visibility": { + Type: schema.TypeString, + Computed: true, + Description: "Whether the image is publicly visible or private to the account", + }, + "os": { + Type: schema.TypeString, + Computed: true, + Description: "Image Operating system", + }, + "architecture": { + Type: schema.TypeString, + Computed: true, + Description: "The operating system architecture", + }, + "crn": { + Type: schema.TypeString, + Computed: true, + Description: "The CRN for this image", + }, + isImageCheckSum: { + Type: schema.TypeString, + Computed: true, + Description: "The SHA256 Checksum for this image", + }, + isImageEncryptionKey: { + Type: schema.TypeString, + Computed: true, + Description: "The CRN of the Key Protect Root Key or Hyper Protect Crypto Service Root Key for this resource", + }, + isImageEncryption: { + Type: schema.TypeString, + Computed: true, + Description: "The type of encryption used on the image", + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMISImagesRead(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + if userDetails.generation == 1 { + err := classicImageList(d, meta) + if err != nil { + return err + } + } else { + err := imageList(d, meta) + if err != nil { + return err + } + } + return nil +} + +func classicImageList(d *schema.ResourceData, meta interface{}) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + start := "" + allrecs := []vpcclassicv1.Image{} + for { + listImagesOptions := &vpcclassicv1.ListImagesOptions{} + if start != "" { + listImagesOptions.Start = &start + } + availableImages, response, err := sess.ListImages(listImagesOptions) + if err != nil { + return fmt.Errorf("Error Fetching Images %s\n%s", err, response) + } + start = GetNext(availableImages.Next) + allrecs = append(allrecs, availableImages.Images...) + if start == "" { + break + } + } + imagesInfo := make([]map[string]interface{}, 0) + for _, image := range allrecs { + + l := map[string]interface{}{ + "name": *image.Name, + "id": *image.ID, + "status": *image.Status, + "crn": *image.CRN, + "visibility": *image.Visibility, + "os": *image.OperatingSystem.Name, + "architecture": *image.OperatingSystem.Architecture, + } + imagesInfo = append(imagesInfo, l) + } + d.SetId(dataSourceIBMISSubnetsID(d)) + d.Set(isImages, imagesInfo) + return nil +} + +func imageList(d *schema.ResourceData, meta interface{}) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + start := "" + allrecs := []vpcv1.Image{} + for { + listImagesOptions := &vpcv1.ListImagesOptions{} + if start != "" { + listImagesOptions.Start = &start + } + availableImages, response, err := sess.ListImages(listImagesOptions) + if err != nil { + return fmt.Errorf("Error Fetching Images %s\n%s", err, response) + } + start = GetNext(availableImages.Next) + allrecs = append(allrecs, availableImages.Images...) + if start == "" { + break + } + } + imagesInfo := make([]map[string]interface{}, 0) + for _, image := range allrecs { + + l := map[string]interface{}{ + "name": *image.Name, + "id": *image.ID, + "status": *image.Status, + "crn": *image.CRN, + "visibility": *image.Visibility, + "os": *image.OperatingSystem.Name, + "architecture": *image.OperatingSystem.Architecture, + } + if image.File != nil && image.File.Checksums != nil { + l[isImageCheckSum] = *image.File.Checksums.Sha256 + } + if image.Encryption != nil { + l["encryption"] = *image.Encryption + } + if image.EncryptionKey != nil { + l["encryption_key"] = *image.EncryptionKey.CRN + } + imagesInfo = append(imagesInfo, l) + } + d.SetId(dataSourceIBMISSubnetsID(d)) + d.Set(isImages, imagesInfo) + return nil +} + +// dataSourceIBMISImagesId returns a reasonable ID for a image list. +func dataSourceIBMISImagesID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_instance.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_instance.go new file mode 100644 index 00000000000..05dd13d58e6 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_instance.go @@ -0,0 +1,1072 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "bytes" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "encoding/base64" + "encoding/pem" + "errors" + "fmt" + "log" + "strings" + + "github.com/IBM/vpc-go-sdk/vpcclassicv1" + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/ScaleFT/sshkeys" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "golang.org/x/crypto/ssh" +) + +const ( + isInstancePEM = "private_key" + isInstancePassphrase = "passphrase" + isInstanceInitPassword = "password" + isInstanceInitKeys = "keys" +) + +func dataSourceIBMISInstance() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMISInstanceRead, + + Schema: map[string]*schema.Schema{ + + isInstanceName: { + Type: schema.TypeString, + Required: true, + Description: "Instance name", + }, + + isInstancePEM: { + Type: schema.TypeString, + Optional: true, + Description: "Instance Private Key file", + }, + + isInstancePassphrase: { + Type: schema.TypeString, + Optional: true, + Sensitive: true, + Description: "Passphrase for Instance Private Key file", + }, + + isInstanceInitPassword: { + Type: schema.TypeString, + Sensitive: true, + Computed: true, + Description: "password for Windows Instance", + }, + + isInstanceInitKeys: { + Type: schema.TypeList, + Computed: true, + Description: "Instance keys", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + Description: "Instance key id", + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: "Instance key name", + }, + }, + }, + }, + + isInstanceVPC: { + Type: schema.TypeString, + Computed: true, + Description: "VPC id", + }, + + isInstanceZone: { + Type: schema.TypeString, + Computed: true, + Description: "Zone name", + }, + + isInstanceProfile: { + Type: schema.TypeString, + Computed: true, + Description: "Profile info", + }, + + isInstanceTags: { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: resourceIBMVPCHash, + Description: "list of tags for the instance", + }, + isInstanceBootVolume: { + Type: schema.TypeList, + Computed: true, + Description: "Instance Boot Volume", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + Description: "Instance Boot Volume id", + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: "Instance Boot Volume name", + }, + "device": { + Type: schema.TypeString, + Computed: true, + Description: "Instance Boot Volume device", + }, + "volume_id": { + Type: schema.TypeString, + Computed: true, + Description: "Instance Boot Volume's volume id", + }, + "volume_name": { + Type: schema.TypeString, + Computed: true, + Description: "Instance Boot Volume's volume name", + }, + "volume_crn": { + Type: schema.TypeString, + Computed: true, + Description: "Instance Boot Volume's volume CRN", + }, + }, + }, + }, + + isInstanceVolumeAttachments: { + Type: schema.TypeList, + Computed: true, + Description: "Instance Volume Attachments", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + Description: "Instance Volume Attachment id", + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: "Instance Volume Attachment name", + }, + "volume_id": { + Type: schema.TypeString, + Computed: true, + Description: "Instance Boot Volume's volume id", + }, + "volume_name": { + Type: schema.TypeString, + Computed: true, + Description: "Instance Boot Volume's volume name", + }, + "volume_crn": { + Type: schema.TypeString, + Computed: true, + Description: "Instance Boot Volume's volume CRN", + }, + }, + }, + }, + + isInstancePrimaryNetworkInterface: { + Type: schema.TypeList, + Computed: true, + Description: "Primary Network interface info", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + Description: "Instance Primary Network Interface id", + }, + isInstanceNicName: { + Type: schema.TypeString, + Computed: true, + Description: "Instance Primary Network Interface name", + }, + isInstanceNicPortSpeed: { + Type: schema.TypeInt, + Computed: true, + Description: "Instance Primary Network Interface port speed", + }, + isInstanceNicPrimaryIpv4Address: { + Type: schema.TypeString, + Computed: true, + Description: "Instance Primary Network Interface IPV4 Address", + }, + isInstanceNicSecurityGroups: { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: "Instance Primary Network Interface Security groups", + }, + isInstanceNicSubnet: { + Type: schema.TypeString, + Computed: true, + Description: "Instance Primary Network Interface subnet", + }, + }, + }, + }, + + isInstanceNetworkInterfaces: { + Type: schema.TypeList, + Computed: true, + Description: "Instance Network interface info", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + Description: "Instance Network Interface id", + }, + isInstanceNicName: { + Type: schema.TypeString, + Computed: true, + Description: "Instance Network Interface name", + }, + isInstanceNicPrimaryIpv4Address: { + Type: schema.TypeString, + Computed: true, + Description: "Instance Network Interface IPV4 Address", + }, + isInstanceNicSecurityGroups: { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: "Instance Network Interface Security Groups", + }, + isInstanceNicSubnet: { + Type: schema.TypeString, + Computed: true, + Description: "Instance Network Interface subnet", + }, + }, + }, + }, + + isInstanceImage: { + Type: schema.TypeString, + Computed: true, + Description: "Instance Image", + }, + + isInstanceVolumes: { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: "List of volumes", + }, + + isInstanceResourceGroup: { + Type: schema.TypeString, + Computed: true, + Description: "Instance resource group", + }, + + isInstanceCPU: { + Type: schema.TypeList, + Computed: true, + Description: "Instance vCPU", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + isInstanceCPUArch: { + Type: schema.TypeString, + Computed: true, + Description: "Instance vCPU Architecture", + }, + isInstanceCPUCount: { + Type: schema.TypeInt, + Computed: true, + Description: "Instance vCPU count", + }, + }, + }, + }, + + isInstanceGpu: { + Type: schema.TypeList, + Computed: true, + Deprecated: "This field is deprecated", + Description: "Instance GPU", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + isInstanceGpuCores: { + Type: schema.TypeInt, + Computed: true, + Description: "Instance GPU Cores", + }, + isInstanceGpuCount: { + Type: schema.TypeInt, + Computed: true, + Description: "Instance GPU Count", + }, + isInstanceGpuMemory: { + Type: schema.TypeInt, + Computed: true, + Description: "Instance GPU Memory", + }, + isInstanceGpuManufacturer: { + Type: schema.TypeString, + Computed: true, + Description: "Instance GPU Manufacturer", + }, + isInstanceGpuModel: { + Type: schema.TypeString, + Computed: true, + Description: "Instance GPU Model", + }, + }, + }, + }, + + isInstanceMemory: { + Type: schema.TypeInt, + Computed: true, + Description: "Instance memory", + }, + + isInstanceStatus: { + Type: schema.TypeString, + Computed: true, + Description: "instance status", + }, + + ResourceControllerURL: { + Type: schema.TypeString, + Computed: true, + Description: "The URL of the IBM Cloud dashboard that can be used to explore and view details about this instance", + }, + + ResourceName: { + Type: schema.TypeString, + Computed: true, + Description: "The name of the resource", + }, + + ResourceCRN: { + Type: schema.TypeString, + Computed: true, + Description: "The crn of the resource", + }, + + ResourceStatus: { + Type: schema.TypeString, + Computed: true, + Description: "The status of the resource", + }, + + ResourceGroupName: { + Type: schema.TypeString, + Computed: true, + Description: "The resource group name in which resource is provisioned", + }, + isInstanceDisks: &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Collection of the instance's disks.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "created_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The date and time that the disk was created.", + }, + "href": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The URL for this instance disk.", + }, + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this instance disk.", + }, + "interface_type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The disk interface used for attaching the disk.The enumerated values for this property are expected to expand in the future. When processing this property, check for and log unknown values. Optionally halt processing and surface the error, or bypass the resource on which the unexpected property value was encountered.", + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The user-defined name for this disk.", + }, + "resource_type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The resource type.", + }, + "size": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The size of the disk in GB (gigabytes).", + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMISInstanceRead(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + name := d.Get(isInstanceName).(string) + if userDetails.generation == 1 { + err := classicInstanceGetByName(d, meta, name) + if err != nil { + return err + } + } else { + err := instanceGetByName(d, meta, name) + if err != nil { + return err + } + } + return nil +} + +func classicInstanceGetByName(d *schema.ResourceData, meta interface{}, name string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + start := "" + allrecs := []vpcclassicv1.Instance{} + for { + listInstancesOptions := &vpcclassicv1.ListInstancesOptions{} + if start != "" { + listInstancesOptions.Start = &start + } + instances, response, err := sess.ListInstances(listInstancesOptions) + if err != nil { + return fmt.Errorf("Error Fetching Instances %s\n%s", err, response) + } + start = GetNext(instances.Next) + allrecs = append(allrecs, instances.Instances...) + if start == "" { + break + } + } + for _, instance := range allrecs { + if *instance.Name == name { + d.SetId(*instance.ID) + id := *instance.ID + d.Set(isInstanceName, *instance.Name) + if instance.Profile != nil { + d.Set(isInstanceProfile, *instance.Profile.Name) + } + cpuList := make([]map[string]interface{}, 0) + if instance.Vcpu != nil { + currentCPU := map[string]interface{}{} + currentCPU[isInstanceCPUArch] = *instance.Vcpu.Architecture + currentCPU[isInstanceCPUCount] = *instance.Vcpu.Count + cpuList = append(cpuList, currentCPU) + } + d.Set(isInstanceCPU, cpuList) + + d.Set(isInstanceMemory, *instance.Memory) + gpuList := make([]map[string]interface{}, 0) + d.Set(isInstanceGpu, gpuList) + + if instance.PrimaryNetworkInterface != nil { + primaryNicList := make([]map[string]interface{}, 0) + currentPrimNic := map[string]interface{}{} + currentPrimNic["id"] = *instance.PrimaryNetworkInterface.ID + currentPrimNic[isInstanceNicName] = *instance.PrimaryNetworkInterface.Name + currentPrimNic[isInstanceNicPrimaryIpv4Address] = *instance.PrimaryNetworkInterface.PrimaryIpv4Address + getnicoptions := &vpcclassicv1.GetInstanceNetworkInterfaceOptions{ + InstanceID: &id, + ID: instance.PrimaryNetworkInterface.ID, + } + insnic, response, err := sess.GetInstanceNetworkInterface(getnicoptions) + if err != nil { + return fmt.Errorf("Error getting network interfaces attached to the instance %s\n%s", err, response) + } + currentPrimNic[isInstanceNicSubnet] = *insnic.Subnet.ID + if len(insnic.SecurityGroups) != 0 { + secgrpList := []string{} + for i := 0; i < len(insnic.SecurityGroups); i++ { + secgrpList = append(secgrpList, string(*(insnic.SecurityGroups[i].ID))) + } + currentPrimNic[isInstanceNicSecurityGroups] = newStringSet(schema.HashString, secgrpList) + } + + primaryNicList = append(primaryNicList, currentPrimNic) + d.Set(isInstancePrimaryNetworkInterface, primaryNicList) + } + + if instance.NetworkInterfaces != nil { + interfacesList := make([]map[string]interface{}, 0) + for _, intfc := range instance.NetworkInterfaces { + if *intfc.ID != *instance.PrimaryNetworkInterface.ID { + currentNic := map[string]interface{}{} + currentNic["id"] = *intfc.ID + currentNic[isInstanceNicName] = *intfc.Name + currentNic[isInstanceNicPrimaryIpv4Address] = *intfc.PrimaryIpv4Address + getnicoptions := &vpcclassicv1.GetInstanceNetworkInterfaceOptions{ + InstanceID: &id, + ID: intfc.ID, + } + insnic, response, err := sess.GetInstanceNetworkInterface(getnicoptions) + if err != nil { + return fmt.Errorf("Error getting network interfaces attached to the instance %s\n%s", err, response) + } + currentNic[isInstanceNicSubnet] = *insnic.Subnet.ID + if len(insnic.SecurityGroups) != 0 { + secgrpList := []string{} + for i := 0; i < len(insnic.SecurityGroups); i++ { + secgrpList = append(secgrpList, string(*(insnic.SecurityGroups[i].ID))) + } + currentNic[isInstanceNicSecurityGroups] = newStringSet(schema.HashString, secgrpList) + } + interfacesList = append(interfacesList, currentNic) + + } + } + + d.Set(isInstanceNetworkInterfaces, interfacesList) + } + + var rsaKey *rsa.PrivateKey + if instance.Image != nil { + d.Set(isInstanceImage, *instance.Image.ID) + image := *instance.Image.Name + res := strings.Contains(image, "windows") + if res { + if privatekey, ok := d.GetOk(isInstancePEM); ok { + keyFlag := privatekey.(string) + keybytes := []byte(keyFlag) + + if keyFlag != "" { + block, err := pem.Decode(keybytes) + if block == nil { + return fmt.Errorf("Failed to load the private key from the given key contents. Instead of the key file path, please make sure the private key is pem format") + } + isEncrypted := false + switch block.Type { + case "RSA PRIVATE KEY": + isEncrypted = x509.IsEncryptedPEMBlock(block) + case "OPENSSH PRIVATE KEY": + var err error + isEncrypted, err = isOpenSSHPrivKeyEncrypted(block.Bytes) + if err != nil { + return fmt.Errorf("Failed to check if the provided open ssh key is encrypted or not %s", err) + } + default: + return fmt.Errorf("PEM and OpenSSH private key formats with RSA key type are supported, can not support this key file type: %s", err) + } + passphrase := "" + var privateKey interface{} + if isEncrypted { + if pass, ok := d.GetOk(isInstancePassphrase); ok { + passphrase = pass.(string) + } else { + return fmt.Errorf("Mandatory field 'passphrase' not provided") + } + var err error + privateKey, err = sshkeys.ParseEncryptedRawPrivateKey(keybytes, []byte(passphrase)) + if err != nil { + return fmt.Errorf("Fail to decrypting the private key: %s", err) + } + } else { + var err error + privateKey, err = sshkeys.ParseEncryptedRawPrivateKey(keybytes, nil) + if err != nil { + return fmt.Errorf("Fail to decrypting the private key: %s", err) + } + } + var ok bool + rsaKey, ok = privateKey.(*rsa.PrivateKey) + if !ok { + return fmt.Errorf("Failed to convert to RSA private key") + } + } + } + } + } + + getInstanceInitializationOptions := &vpcclassicv1.GetInstanceInitializationOptions{ + ID: &id, + } + initParms, response, err := sess.GetInstanceInitialization(getInstanceInitializationOptions) + if err != nil { + return fmt.Errorf("Error Getting instance Initialization: %s\n%s", err, response) + } + if initParms.Keys != nil { + initKeyList := make([]map[string]interface{}, 0) + for _, key := range initParms.Keys { + key := key.(*vpcclassicv1.KeyReferenceInstanceInitializationContext) + initKey := map[string]interface{}{} + id := "" + if key.ID != nil { + id = *key.ID + } + initKey["id"] = id + name := "" + if key.Name != nil { + name = *key.Name + } + initKey["name"] = name + initKeyList = append(initKeyList, initKey) + break + + } + d.Set(isInstanceInitKeys, initKeyList) + } + if initParms.Password != nil && initParms.Password.EncryptedPassword != nil { + ciphertext := *initParms.Password.EncryptedPassword + password := base64.StdEncoding.EncodeToString(ciphertext) + if rsaKey != nil { + rng := rand.Reader + clearPassword, err := rsa.DecryptOAEP(sha256.New(), rng, rsaKey, ciphertext, nil) + if err != nil { + return fmt.Errorf("Can not decrypt the password with the given key, %s", err) + } + password = string(clearPassword) + } + d.Set(isInstanceInitPassword, password) + } + + d.Set(isInstanceStatus, *instance.Status) + d.Set(isInstanceVPC, *instance.VPC.ID) + d.Set(isInstanceZone, *instance.Zone.Name) + + var volumes []string + volumes = make([]string, 0) + if instance.VolumeAttachments != nil { + for _, volume := range instance.VolumeAttachments { + if volume.Volume != nil && *volume.Volume.ID != *instance.BootVolumeAttachment.Volume.ID { + volumes = append(volumes, *volume.Volume.ID) + } + } + } + d.Set(isInstanceVolumes, newStringSet(schema.HashString, volumes)) + if instance.VolumeAttachments != nil { + volList := make([]map[string]interface{}, 0) + for _, volume := range instance.VolumeAttachments { + vol := map[string]interface{}{} + if volume.Volume != nil { + vol["id"] = *volume.ID + vol["volume_id"] = *volume.Volume.ID + vol["name"] = *volume.Name + vol["volume_name"] = *volume.Volume.Name + vol["volume_crn"] = *volume.Volume.CRN + volList = append(volList, vol) + } + } + d.Set(isInstanceVolumeAttachments, volList) + } + if instance.BootVolumeAttachment != nil { + bootVolList := make([]map[string]interface{}, 0) + bootVol := map[string]interface{}{} + bootVol["id"] = *instance.BootVolumeAttachment.ID + bootVol["name"] = *instance.BootVolumeAttachment.Name + if instance.BootVolumeAttachment.Device != nil { + bootVol["device"] = *instance.BootVolumeAttachment.Device.ID + } + if instance.BootVolumeAttachment.Volume != nil { + bootVol["volume_name"] = *instance.BootVolumeAttachment.Volume.Name + bootVol["volume_id"] = *instance.BootVolumeAttachment.Volume.ID + bootVol["volume_crn"] = *instance.BootVolumeAttachment.Volume.CRN + } + bootVolList = append(bootVolList, bootVol) + d.Set(isInstanceBootVolume, bootVolList) + } + tags, err := GetTagsUsingCRN(meta, *instance.CRN) + if err != nil { + log.Printf( + "Error on get of resource vpc Instance (%s) tags: %s", d.Id(), err) + } + d.Set(isInstanceTags, tags) + + controller, err := getBaseController(meta) + if err != nil { + return err + } + d.Set(ResourceControllerURL, controller+"/vpc/compute/vs") + d.Set(ResourceName, instance.Name) + d.Set(ResourceCRN, instance.CRN) + d.Set(ResourceStatus, instance.Status) + if instance.ResourceGroup != nil { + rsMangClient, err := meta.(ClientSession).ResourceManagementAPIv2() + if err != nil { + return err + } + grp, err := rsMangClient.ResourceGroup().Get(*instance.ResourceGroup.ID) + if err != nil { + return err + } + d.Set(ResourceGroupName, grp.Name) + d.Set(isInstanceResourceGroup, instance.ResourceGroup.ID) + } + return nil + } + } + return fmt.Errorf("No Instance found with name %s", name) +} + +func instanceGetByName(d *schema.ResourceData, meta interface{}, name string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + start := "" + allrecs := []vpcv1.Instance{} + for { + listInstancesOptions := &vpcv1.ListInstancesOptions{} + if start != "" { + listInstancesOptions.Start = &start + } + instances, response, err := sess.ListInstances(listInstancesOptions) + if err != nil { + return fmt.Errorf("Error Fetching Instances %s\n%s", err, response) + } + start = GetNext(instances.Next) + allrecs = append(allrecs, instances.Instances...) + if start == "" { + break + } + } + for _, instance := range allrecs { + if *instance.Name == name { + d.SetId(*instance.ID) + id := *instance.ID + d.Set(isInstanceName, *instance.Name) + if instance.Profile != nil { + d.Set(isInstanceProfile, *instance.Profile.Name) + } + cpuList := make([]map[string]interface{}, 0) + if instance.Vcpu != nil { + currentCPU := map[string]interface{}{} + currentCPU[isInstanceCPUArch] = *instance.Vcpu.Architecture + currentCPU[isInstanceCPUCount] = *instance.Vcpu.Count + cpuList = append(cpuList, currentCPU) + } + d.Set(isInstanceCPU, cpuList) + + d.Set(isInstanceMemory, *instance.Memory) + gpuList := make([]map[string]interface{}, 0) + d.Set(isInstanceGpu, gpuList) + + if instance.Disks != nil { + err = d.Set(isInstanceDisks, dataSourceInstanceFlattenDisks(instance.Disks)) + if err != nil { + return fmt.Errorf("Error setting disks %s", err) + } + } + + if instance.PrimaryNetworkInterface != nil { + primaryNicList := make([]map[string]interface{}, 0) + currentPrimNic := map[string]interface{}{} + currentPrimNic["id"] = *instance.PrimaryNetworkInterface.ID + currentPrimNic[isInstanceNicName] = *instance.PrimaryNetworkInterface.Name + currentPrimNic[isInstanceNicPrimaryIpv4Address] = *instance.PrimaryNetworkInterface.PrimaryIpv4Address + getnicoptions := &vpcv1.GetInstanceNetworkInterfaceOptions{ + InstanceID: &id, + ID: instance.PrimaryNetworkInterface.ID, + } + insnic, response, err := sess.GetInstanceNetworkInterface(getnicoptions) + if err != nil { + return fmt.Errorf("Error getting network interfaces attached to the instance %s\n%s", err, response) + } + currentPrimNic[isInstanceNicSubnet] = *insnic.Subnet.ID + if len(insnic.SecurityGroups) != 0 { + secgrpList := []string{} + for i := 0; i < len(insnic.SecurityGroups); i++ { + secgrpList = append(secgrpList, string(*(insnic.SecurityGroups[i].ID))) + } + currentPrimNic[isInstanceNicSecurityGroups] = newStringSet(schema.HashString, secgrpList) + } + + primaryNicList = append(primaryNicList, currentPrimNic) + d.Set(isInstancePrimaryNetworkInterface, primaryNicList) + } + + if instance.NetworkInterfaces != nil { + interfacesList := make([]map[string]interface{}, 0) + for _, intfc := range instance.NetworkInterfaces { + if *intfc.ID != *instance.PrimaryNetworkInterface.ID { + currentNic := map[string]interface{}{} + currentNic["id"] = *intfc.ID + currentNic[isInstanceNicName] = *intfc.Name + currentNic[isInstanceNicPrimaryIpv4Address] = *intfc.PrimaryIpv4Address + getnicoptions := &vpcv1.GetInstanceNetworkInterfaceOptions{ + InstanceID: &id, + ID: intfc.ID, + } + insnic, response, err := sess.GetInstanceNetworkInterface(getnicoptions) + if err != nil { + return fmt.Errorf("Error getting network interfaces attached to the instance %s\n%s", err, response) + } + currentNic[isInstanceNicSubnet] = *insnic.Subnet.ID + if len(insnic.SecurityGroups) != 0 { + secgrpList := []string{} + for i := 0; i < len(insnic.SecurityGroups); i++ { + secgrpList = append(secgrpList, string(*(insnic.SecurityGroups[i].ID))) + } + currentNic[isInstanceNicSecurityGroups] = newStringSet(schema.HashString, secgrpList) + } + interfacesList = append(interfacesList, currentNic) + + } + } + + d.Set(isInstanceNetworkInterfaces, interfacesList) + } + + var rsaKey *rsa.PrivateKey + if instance.Image != nil { + d.Set(isInstanceImage, *instance.Image.ID) + image := *instance.Image.Name + res := strings.Contains(image, "windows") + if res { + if privatekey, ok := d.GetOk(isInstancePEM); ok { + keyFlag := privatekey.(string) + keybytes := []byte(keyFlag) + + if keyFlag != "" { + block, err := pem.Decode(keybytes) + if block == nil { + return fmt.Errorf("Failed to load the private key from the given key contents. Instead of the key file path, please make sure the private key is pem format") + } + isEncrypted := false + switch block.Type { + case "RSA PRIVATE KEY": + isEncrypted = x509.IsEncryptedPEMBlock(block) + case "OPENSSH PRIVATE KEY": + var err error + isEncrypted, err = isOpenSSHPrivKeyEncrypted(block.Bytes) + if err != nil { + return fmt.Errorf("Failed to check if the provided open ssh key is encrypted or not %s", err) + } + default: + return fmt.Errorf("PEM and OpenSSH private key formats with RSA key type are supported, can not support this key file type: %s", err) + } + passphrase := "" + var privateKey interface{} + if isEncrypted { + if pass, ok := d.GetOk(isInstancePassphrase); ok { + passphrase = pass.(string) + } else { + return fmt.Errorf("Mandatory field 'passphrase' not provided") + } + var err error + privateKey, err = sshkeys.ParseEncryptedRawPrivateKey(keybytes, []byte(passphrase)) + if err != nil { + return fmt.Errorf("Fail to decrypting the private key: %s", err) + } + } else { + var err error + privateKey, err = sshkeys.ParseEncryptedRawPrivateKey(keybytes, nil) + if err != nil { + return fmt.Errorf("Fail to decrypting the private key: %s", err) + } + } + var ok bool + rsaKey, ok = privateKey.(*rsa.PrivateKey) + if !ok { + return fmt.Errorf("Failed to convert to RSA private key") + } + } + } + } + } + + getInstanceInitializationOptions := &vpcv1.GetInstanceInitializationOptions{ + ID: &id, + } + initParms, response, err := sess.GetInstanceInitialization(getInstanceInitializationOptions) + if err != nil { + return fmt.Errorf("Error Getting instance Initialization: %s\n%s", err, response) + } + if initParms.Keys != nil { + initKeyList := make([]map[string]interface{}, 0) + for _, key := range initParms.Keys { + key := key.(*vpcv1.KeyReferenceInstanceInitializationContext) + initKey := map[string]interface{}{} + id := "" + if key.ID != nil { + id = *key.ID + } + initKey["id"] = id + name := "" + if key.Name != nil { + name = *key.Name + } + initKey["name"] = name + initKeyList = append(initKeyList, initKey) + break + + } + d.Set(isInstanceInitKeys, initKeyList) + } + if initParms.Password != nil && initParms.Password.EncryptedPassword != nil { + ciphertext := *initParms.Password.EncryptedPassword + password := base64.StdEncoding.EncodeToString(ciphertext) + if rsaKey != nil { + rng := rand.Reader + clearPassword, err := rsa.DecryptPKCS1v15(rng, rsaKey, ciphertext) + if err != nil { + return fmt.Errorf("Can not decrypt the password with the given key, %s", err) + } + password = string(clearPassword) + } + d.Set(isInstanceInitPassword, password) + } + + d.Set(isInstanceStatus, *instance.Status) + d.Set(isInstanceVPC, *instance.VPC.ID) + d.Set(isInstanceZone, *instance.Zone.Name) + + var volumes []string + volumes = make([]string, 0) + if instance.VolumeAttachments != nil { + for _, volume := range instance.VolumeAttachments { + if volume.Volume != nil && *volume.Volume.ID != *instance.BootVolumeAttachment.Volume.ID { + volumes = append(volumes, *volume.Volume.ID) + } + } + } + d.Set(isInstanceVolumes, newStringSet(schema.HashString, volumes)) + if instance.VolumeAttachments != nil { + volList := make([]map[string]interface{}, 0) + for _, volume := range instance.VolumeAttachments { + vol := map[string]interface{}{} + if volume.Volume != nil { + vol["id"] = *volume.ID + vol["volume_id"] = *volume.Volume.ID + vol["name"] = *volume.Name + vol["volume_name"] = *volume.Volume.Name + vol["volume_crn"] = *volume.Volume.CRN + volList = append(volList, vol) + } + } + d.Set(isInstanceVolumeAttachments, volList) + } + if instance.BootVolumeAttachment != nil { + bootVolList := make([]map[string]interface{}, 0) + bootVol := map[string]interface{}{} + bootVol["id"] = *instance.BootVolumeAttachment.ID + bootVol["name"] = *instance.BootVolumeAttachment.Name + if instance.BootVolumeAttachment.Device != nil { + bootVol["device"] = *instance.BootVolumeAttachment.Device.ID + } + if instance.BootVolumeAttachment.Volume != nil { + bootVol["volume_name"] = *instance.BootVolumeAttachment.Volume.Name + bootVol["volume_id"] = *instance.BootVolumeAttachment.Volume.ID + bootVol["volume_crn"] = *instance.BootVolumeAttachment.Volume.CRN + } + bootVolList = append(bootVolList, bootVol) + d.Set(isInstanceBootVolume, bootVolList) + } + tags, err := GetTagsUsingCRN(meta, *instance.CRN) + if err != nil { + log.Printf( + "Error on get of resource vpc Instance (%s) tags: %s", d.Id(), err) + } + d.Set(isInstanceTags, tags) + + controller, err := getBaseController(meta) + if err != nil { + return err + } + d.Set(ResourceControllerURL, controller+"/vpc-ext/compute/vs") + d.Set(ResourceName, instance.Name) + d.Set(ResourceCRN, instance.CRN) + d.Set(ResourceStatus, instance.Status) + if instance.ResourceGroup != nil { + d.Set(isInstanceResourceGroup, instance.ResourceGroup.ID) + d.Set(ResourceGroupName, instance.ResourceGroup.Name) + } + return nil + } + } + return fmt.Errorf("No Instance found with name %s", name) +} + +const opensshv1Magic = "openssh-key-v1" + +type opensshPrivateKey struct { + CipherName string + KdfName string + KdfOpts string + NumKeys uint32 + PubKey string + PrivKeyBlock string +} + +func isOpenSSHPrivKeyEncrypted(data []byte) (bool, error) { + magic := append([]byte(opensshv1Magic), 0) + if !bytes.Equal(magic, data[0:len(magic)]) { + return false, errors.New("Invalid openssh private key format") + } + content := data[len(magic):] + + privKey := opensshPrivateKey{} + + if err := ssh.Unmarshal(content, &privKey); err != nil { + return false, err + } + + if privKey.KdfName == "none" && privKey.CipherName == "none" { + return false, nil + } + return true, nil +} + +func dataSourceInstanceFlattenDisks(result []vpcv1.InstanceDisk) (disks []map[string]interface{}) { + for _, disksItem := range result { + disks = append(disks, dataSourceInstanceDisksToMap(disksItem)) + } + + return disks +} + +func dataSourceInstanceDisksToMap(disksItem vpcv1.InstanceDisk) (disksMap map[string]interface{}) { + disksMap = map[string]interface{}{} + + if disksItem.CreatedAt != nil { + disksMap["created_at"] = disksItem.CreatedAt.String() + } + if disksItem.Href != nil { + disksMap["href"] = disksItem.Href + } + if disksItem.ID != nil { + disksMap["id"] = disksItem.ID + } + if disksItem.InterfaceType != nil { + disksMap["interface_type"] = disksItem.InterfaceType + } + if disksItem.Name != nil { + disksMap["name"] = disksItem.Name + } + if disksItem.ResourceType != nil { + disksMap["resource_type"] = disksItem.ResourceType + } + if disksItem.Size != nil { + disksMap["size"] = disksItem.Size + } + + return disksMap +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_instance_disk.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_instance_disk.go new file mode 100644 index 00000000000..4df461d956b --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_instance_disk.go @@ -0,0 +1,103 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "context" + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/IBM/vpc-go-sdk/vpcv1" +) + +func dataSourceIbmIsInstanceDisk() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIbmIsInstanceDiskRead, + + Schema: map[string]*schema.Schema{ + "instance": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "The instance identifier.", + }, + "disk": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "The instance disk identifier.", + }, + "created_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The date and time that the disk was created.", + }, + "href": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The URL for this instance disk.", + }, + "interface_type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The disk interface used for attaching the disk.The enumerated values for this property are expected to expand in the future. When processing this property, check for and log unknown values. Optionally halt processing and surface the error, or bypass the resource on which the unexpected property value was encountered.", + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The user-defined name for this disk.", + }, + "resource_type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The resource type.", + }, + "size": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The size of the disk in GB (gigabytes).", + }, + }, + } +} + +func dataSourceIbmIsInstanceDiskRead(d *schema.ResourceData, meta interface{}) error { + vpcClient, err := meta.(ClientSession).VpcV1API() + if err != nil { + return err + } + + getInstanceDiskOptions := &vpcv1.GetInstanceDiskOptions{} + + getInstanceDiskOptions.SetInstanceID(d.Get("instance").(string)) + getInstanceDiskOptions.SetID(d.Get("disk").(string)) + + instanceDisk, response, err := vpcClient.GetInstanceDiskWithContext(context.TODO(), getInstanceDiskOptions) + if err != nil { + log.Printf("[DEBUG] GetInstanceDiskWithContext failed %s\n%s", err, response) + return err + } + + d.SetId(*instanceDisk.ID) + if err = d.Set("created_at", instanceDisk.CreatedAt.String()); err != nil { + return fmt.Errorf("Error setting created_at: %s", err) + } + if err = d.Set("href", instanceDisk.Href); err != nil { + return fmt.Errorf("Error setting href: %s", err) + } + if err = d.Set("interface_type", instanceDisk.InterfaceType); err != nil { + return fmt.Errorf("Error setting interface_type: %s", err) + } + if err = d.Set("name", instanceDisk.Name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + if err = d.Set("resource_type", instanceDisk.ResourceType); err != nil { + return fmt.Errorf("Error setting resource_type: %s", err) + } + if err = d.Set("size", instanceDisk.Size); err != nil { + return fmt.Errorf("Error setting size: %s", err) + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_instance_disks.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_instance_disks.go new file mode 100644 index 00000000000..3896bd4a584 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_instance_disks.go @@ -0,0 +1,141 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/IBM/vpc-go-sdk/vpcv1" +) + +func dataSourceIbmIsInstanceDisks() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIbmIsInstanceDisksRead, + + Schema: map[string]*schema.Schema{ + "instance": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "The instance identifier.", + }, + isInstanceDisks: &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Collection of the instance's disks.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "created_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The date and time that the disk was created.", + }, + "href": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The URL for this instance disk.", + }, + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this instance disk.", + }, + "interface_type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The disk interface used for attaching the disk.The enumerated values for this property are expected to expand in the future. When processing this property, check for and log unknown values. Optionally halt processing and surface the error, or bypass the resource on which the unexpected property value was encountered.", + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The user-defined name for this disk.", + }, + "resource_type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The resource type.", + }, + "size": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The size of the disk in GB (gigabytes).", + }, + }, + }, + }, + }, + } +} + +func dataSourceIbmIsInstanceDisksRead(d *schema.ResourceData, meta interface{}) error { + vpcClient, err := meta.(ClientSession).VpcV1API() + if err != nil { + return err + } + listInstanceDisksOptions := &vpcv1.ListInstanceDisksOptions{} + + listInstanceDisksOptions.SetInstanceID(d.Get("instance").(string)) + + instanceDiskCollection, response, err := vpcClient.ListInstanceDisksWithContext(context.TODO(), listInstanceDisksOptions) + if err != nil { + log.Printf("[DEBUG] ListInstanceDisksWithContext failed %s\n%s", err, response) + return err + } + + d.SetId(dataSourceIbmIsInstanceDisksID(d)) + + if instanceDiskCollection.Disks != nil { + err = d.Set(isInstanceDisks, dataSourceInstanceDiskCollectionFlattenDisks(instanceDiskCollection.Disks)) + if err != nil { + return fmt.Errorf("Error setting disks %s", err) + } + } + + return nil +} + +// dataSourceIbmIsInstanceDisksID returns a reasonable ID for the list. +func dataSourceIbmIsInstanceDisksID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} + +func dataSourceInstanceDiskCollectionFlattenDisks(result []vpcv1.InstanceDisk) (disks []map[string]interface{}) { + for _, disksItem := range result { + disks = append(disks, dataSourceInstanceDiskCollectionDisksToMap(disksItem)) + } + + return disks +} + +func dataSourceInstanceDiskCollectionDisksToMap(disksItem vpcv1.InstanceDisk) (disksMap map[string]interface{}) { + disksMap = map[string]interface{}{} + + if disksItem.CreatedAt != nil { + disksMap["created_at"] = disksItem.CreatedAt.String() + } + if disksItem.Href != nil { + disksMap["href"] = disksItem.Href + } + if disksItem.ID != nil { + disksMap["id"] = disksItem.ID + } + if disksItem.InterfaceType != nil { + disksMap["interface_type"] = disksItem.InterfaceType + } + if disksItem.Name != nil { + disksMap["name"] = disksItem.Name + } + if disksItem.ResourceType != nil { + disksMap["resource_type"] = disksItem.ResourceType + } + if disksItem.Size != nil { + disksMap["size"] = disksItem.Size + } + + return disksMap +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_instance_group.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_instance_group.go new file mode 100644 index 00000000000..ed1d2a85889 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_instance_group.go @@ -0,0 +1,142 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMISInstanceGroup() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMISInstanceGroupRead, + + Schema: map[string]*schema.Schema{ + + "name": { + Type: schema.TypeString, + Required: true, + Description: "The user-defined name for this instance group", + }, + + "instance_template": { + Type: schema.TypeString, + Computed: true, + Description: "instance template ID", + }, + + "membership_count": { + Type: schema.TypeInt, + Computed: true, + Description: "The number of instances in the instance group", + }, + + "resource_group": { + Type: schema.TypeString, + Computed: true, + Description: "Resource group ID", + }, + + "subnets": { + Type: schema.TypeList, + Elem: &schema.Schema{Type: schema.TypeString}, + Computed: true, + Description: "list of subnet IDs", + }, + + "application_port": { + Type: schema.TypeInt, + Computed: true, + Description: "Used by the instance group when scaling up instances to supply the port for the load balancer pool member.", + }, + + "load_balancer_pool": { + Type: schema.TypeString, + Computed: true, + Description: "load balancer pool ID", + }, + + "managers": { + Type: schema.TypeList, + Elem: &schema.Schema{Type: schema.TypeString}, + Computed: true, + Description: "list of Managers associated with instancegroup", + }, + + "vpc": { + Type: schema.TypeString, + Computed: true, + Description: "vpc instance", + }, + + "status": { + Type: schema.TypeString, + Computed: true, + Description: "Instance group status - deleting, healthy, scaling, unhealthy", + }, + }, + } +} + +func dataSourceIBMISInstanceGroupRead(d *schema.ResourceData, meta interface{}) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + + name := d.Get("name") + + // Support for pagination + start := "" + allrecs := []vpcv1.InstanceGroup{} + for { + listInstanceGroupOptions := vpcv1.ListInstanceGroupsOptions{} + if start != "" { + listInstanceGroupOptions.Start = &start + } + instanceGroupsCollection, response, err := sess.ListInstanceGroups(&listInstanceGroupOptions) + if err != nil { + return fmt.Errorf("Error Fetching InstanceGroups %s\n%s", err, response) + } + start = GetNext(instanceGroupsCollection.Next) + allrecs = append(allrecs, instanceGroupsCollection.InstanceGroups...) + + if start == "" { + break + } + + } + + for _, instanceGroup := range allrecs { + if *instanceGroup.Name == name { + d.Set("name", *instanceGroup.Name) + d.Set("instance_template", *instanceGroup.InstanceTemplate.ID) + d.Set("membership_count", *instanceGroup.MembershipCount) + d.Set("resource_group", *instanceGroup.ResourceGroup.ID) + d.SetId(*instanceGroup.ID) + if instanceGroup.ApplicationPort != nil { + d.Set("application_port", *instanceGroup.ApplicationPort) + } + subnets := make([]string, 0) + for i := 0; i < len(instanceGroup.Subnets); i++ { + subnets = append(subnets, string(*(instanceGroup.Subnets[i].ID))) + } + if instanceGroup.LoadBalancerPool != nil { + d.Set("load_balancer_pool", *instanceGroup.LoadBalancerPool.ID) + } + d.Set("subnets", subnets) + managers := make([]string, 0) + for i := 0; i < len(instanceGroup.Managers); i++ { + managers = append(managers, string(*(instanceGroup.Managers[i].ID))) + } + d.Set("managers", managers) + d.Set("vpc", *instanceGroup.VPC.ID) + d.Set("status", *instanceGroup.Status) + return nil + } + } + return fmt.Errorf("Instance group %s not found", name) +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_instance_group_manager.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_instance_group_manager.go new file mode 100644 index 00000000000..0c96b10407d --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_instance_group_manager.go @@ -0,0 +1,168 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMISInstanceGroupManager() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMISInstanceGroupManagerRead, + + Schema: map[string]*schema.Schema{ + + "instance_group": { + Type: schema.TypeString, + Required: true, + Description: "instance group ID", + }, + + "name": { + Type: schema.TypeString, + Required: true, + Description: "Name of the instance group manager.", + }, + + "manager_type": { + Type: schema.TypeString, + Computed: true, + Description: "The type of instance group manager.", + }, + + "aggregation_window": { + Type: schema.TypeInt, + Computed: true, + Description: "The time window in seconds to aggregate metrics prior to evaluation", + }, + + "cooldown": { + Type: schema.TypeInt, + Computed: true, + Description: "The duration of time in seconds to pause further scale actions after scaling has taken place", + }, + + "max_membership_count": { + Type: schema.TypeInt, + Computed: true, + Description: "The maximum number of members in a managed instance group", + }, + + "min_membership_count": { + Type: schema.TypeInt, + Computed: true, + Description: "The minimum number of members in a managed instance group", + }, + + "manager_id": { + Type: schema.TypeString, + Computed: true, + Description: "The ID of instance group manager.", + }, + + "policies": { + Type: schema.TypeList, + Elem: &schema.Schema{Type: schema.TypeString}, + Computed: true, + Description: "list of Policies associated with instancegroup manager", + }, + + "actions": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "instance_group_manager_action": { + Type: schema.TypeString, + Computed: true, + }, + "instance_group_manager_action_name": { + Type: schema.TypeString, + Computed: true, + }, + "resource_type": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMISInstanceGroupManagerRead(d *schema.ResourceData, meta interface{}) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + + instanceGroupID := d.Get("instance_group").(string) + + // Support for pagination + start := "" + allrecs := []vpcv1.InstanceGroupManagerIntf{} + + for { + listInstanceGroupManagerOptions := vpcv1.ListInstanceGroupManagersOptions{ + InstanceGroupID: &instanceGroupID, + } + instanceGroupManagerCollections, response, err := sess.ListInstanceGroupManagers(&listInstanceGroupManagerOptions) + if err != nil { + return fmt.Errorf("Error Getting InstanceGroup Managers %s\n%s", err, response) + } + start = GetNext(instanceGroupManagerCollections.Next) + allrecs = append(allrecs, instanceGroupManagerCollections.Managers...) + + if start == "" { + break + } + } + + instanceGroupManagerName := d.Get("name").(string) + for _, instanceGroupManagerIntf := range allrecs { + instanceGroupManager := instanceGroupManagerIntf.(*vpcv1.InstanceGroupManager) + if instanceGroupManagerName == *instanceGroupManager.Name { + d.SetId(fmt.Sprintf("%s/%s", instanceGroupID, *instanceGroupManager.ID)) + d.Set("manager_type", *instanceGroupManager.ManagerType) + d.Set("manager_id", *instanceGroupManager.ID) + + if *instanceGroupManager.ManagerType == "scheduled" { + + actions := make([]map[string]interface{}, 0) + if instanceGroupManager.Actions != nil { + for _, action := range instanceGroupManager.Actions { + actn := map[string]interface{}{ + "instance_group_manager_action": action.ID, + "instance_group_manager_action_name": action.Name, + "resource_type": action.ResourceType, + } + actions = append(actions, actn) + } + d.Set("actions", actions) + } + + } else { + d.Set("aggregation_window", *instanceGroupManager.AggregationWindow) + d.Set("cooldown", *instanceGroupManager.Cooldown) + d.Set("max_membership_count", *instanceGroupManager.MaxMembershipCount) + d.Set("min_membership_count", *instanceGroupManager.MinMembershipCount) + policies := make([]string, 0) + if instanceGroupManager.Policies != nil { + for i := 0; i < len(instanceGroupManager.Policies); i++ { + policies = append(policies, string(*(instanceGroupManager.Policies[i].ID))) + } + } + + d.Set("policies", policies) + } + + return nil + } + } + return fmt.Errorf("Instance group manager %s not found", instanceGroupManagerName) +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_instance_group_manager_action.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_instance_group_manager_action.go new file mode 100644 index 00000000000..3f57ab08051 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_instance_group_manager_action.go @@ -0,0 +1,238 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMISInstanceGroupManagerAction() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMISInstanceGroupManagerActionRead, + + Schema: map[string]*schema.Schema{ + + "name": { + Type: schema.TypeString, + Required: true, + Description: "instance group manager action name", + }, + + "action_id": { + Type: schema.TypeString, + Computed: true, + Description: "Instance group manager action ID", + }, + + "instance_group": { + Type: schema.TypeString, + Required: true, + Description: "instance group ID", + }, + + "instance_group_manager": { + Type: schema.TypeString, + Required: true, + Description: "Instance group manager ID of type scheduled", + }, + + "run_at": { + Type: schema.TypeString, + Computed: true, + Description: "The date and time the scheduled action will run.", + }, + + "membership_count": { + Type: schema.TypeInt, + Computed: true, + Description: "The number of members the instance group should have at the scheduled time.", + }, + + "max_membership_count": { + Type: schema.TypeInt, + Computed: true, + Description: "The maximum number of members in a managed instance group", + }, + + "min_membership_count": { + Type: schema.TypeInt, + Computed: true, + Description: "The minimum number of members in a managed instance group", + }, + + "target_manager": { + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this instance group manager of type autoscale.", + }, + + "target_manager_name": { + Type: schema.TypeString, + Computed: true, + Description: "Instance group manager name of type autoscale.", + }, + + "cron_spec": { + Type: schema.TypeString, + Computed: true, + Description: "The cron specification for a recurring scheduled action. Actions can be applied a maximum of one time within a 5 min period.", + }, + + "resource_type": { + Type: schema.TypeString, + Computed: true, + Description: "The resource type.", + }, + "status": { + Type: schema.TypeString, + Computed: true, + Description: "The status of the instance group action- `active`: Action is ready to be run- `completed`: Action was completed successfully- `failed`: Action could not be completed successfully- `incompatible`: Action parameters are not compatible with the group or manager- `omitted`: Action was not applied because this action's manager was disabled.", + }, + "updated_at": { + Type: schema.TypeString, + Computed: true, + Description: "The date and time that the instance group manager action was modified.", + }, + "action_type": { + Type: schema.TypeString, + Computed: true, + Description: "The type of action for the instance group.", + }, + + "last_applied_at": { + Type: schema.TypeString, + Computed: true, + Description: "The date and time the scheduled action was last applied. If empty the action has never been applied.", + }, + "next_run_at": { + Type: schema.TypeString, + Computed: true, + Description: "The date and time the scheduled action will next run. If empty the system is currently calculating the next run time.", + }, + "auto_delete": { + Type: schema.TypeBool, + Computed: true, + }, + "auto_delete_timeout": { + Type: schema.TypeInt, + Computed: true, + }, + "created_at": { + Type: schema.TypeString, + Computed: true, + Description: "The date and time that the instance group manager action was modified.", + }, + }, + } +} + +func dataSourceIBMISInstanceGroupManagerActionRead(d *schema.ResourceData, meta interface{}) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + + instanceGroupManagerID := d.Get("instance_group_manager").(string) + instanceGroupID := d.Get("instance_group").(string) + actionName := d.Get("name").(string) + + // Support for pagination + start := "" + allrecs := []vpcv1.InstanceGroupManagerActionIntf{} + + for { + listInstanceGroupManagerActionsOptions := vpcv1.ListInstanceGroupManagerActionsOptions{ + InstanceGroupID: &instanceGroupID, + InstanceGroupManagerID: &instanceGroupManagerID, + } + + instanceGroupManagerActionsCollection, response, err := sess.ListInstanceGroupManagerActions(&listInstanceGroupManagerActionsOptions) + if err != nil { + return fmt.Errorf("error Getting InstanceGroup Manager Actions %s\n%s", err, response) + } + if instanceGroupManagerActionsCollection != nil && *instanceGroupManagerActionsCollection.TotalCount == int64(0) { + break + } + start = GetNext(instanceGroupManagerActionsCollection.Next) + allrecs = append(allrecs, instanceGroupManagerActionsCollection.Actions...) + if start == "" { + break + } + } + + for _, data := range allrecs { + instanceGroupManagerAction := data.(*vpcv1.InstanceGroupManagerAction) + if actionName == *instanceGroupManagerAction.Name { + d.SetId(fmt.Sprintf("%s/%s/%s", instanceGroupID, instanceGroupManagerID, *instanceGroupManagerAction.ID)) + + if err = d.Set("auto_delete", *instanceGroupManagerAction.AutoDelete); err != nil { + return fmt.Errorf("error setting auto_delete: %s", err) + } + + if err = d.Set("auto_delete_timeout", intValue(instanceGroupManagerAction.AutoDeleteTimeout)); err != nil { + return fmt.Errorf("error setting auto_delete_timeout: %s", err) + } + if err = d.Set("created_at", instanceGroupManagerAction.CreatedAt.String()); err != nil { + return fmt.Errorf("error setting created_at: %s", err) + } + + if err = d.Set("action_id", *instanceGroupManagerAction.ID); err != nil { + return fmt.Errorf("error setting instance_group_manager_action : %s", err) + } + + if err = d.Set("resource_type", *instanceGroupManagerAction.ResourceType); err != nil { + return fmt.Errorf("error setting resource_type: %s", err) + } + if err = d.Set("status", *instanceGroupManagerAction.Status); err != nil { + return fmt.Errorf("error setting status: %s", err) + } + if err = d.Set("updated_at", instanceGroupManagerAction.UpdatedAt.String()); err != nil { + return fmt.Errorf("error setting updated_at: %s", err) + } + if err = d.Set("action_type", *instanceGroupManagerAction.ActionType); err != nil { + return fmt.Errorf("error setting action_type: %s", err) + } + + if instanceGroupManagerAction.CronSpec != nil { + if err = d.Set("cron_spec", *instanceGroupManagerAction.CronSpec); err != nil { + return fmt.Errorf("error setting cron_spec: %s", err) + } + } + + if instanceGroupManagerAction.LastAppliedAt != nil { + if err = d.Set("last_applied_at", instanceGroupManagerAction.LastAppliedAt.String()); err != nil { + return fmt.Errorf("error setting last_applied_at: %s", err) + } + } + if instanceGroupManagerAction.NextRunAt != nil { + if err = d.Set("next_run_at", instanceGroupManagerAction.NextRunAt.String()); err != nil { + return fmt.Errorf("error setting next_run_at: %s", err) + } + } + + instanceGroupManagerScheduledActionGroupGroup := instanceGroupManagerAction.Group + if instanceGroupManagerScheduledActionGroupGroup != nil && instanceGroupManagerScheduledActionGroupGroup.MembershipCount != nil { + d.Set("membership_count", intValue(instanceGroupManagerScheduledActionGroupGroup.MembershipCount)) + } + instanceGroupManagerScheduledActionManagerManagerInt := instanceGroupManagerAction.Manager + if instanceGroupManagerScheduledActionManagerManagerInt != nil { + instanceGroupManagerScheduledActionManagerManager := instanceGroupManagerScheduledActionManagerManagerInt.(*vpcv1.InstanceGroupManagerScheduledActionManager) + if instanceGroupManagerScheduledActionManagerManager != nil && instanceGroupManagerScheduledActionManagerManager.ID != nil { + + if instanceGroupManagerScheduledActionManagerManager.MaxMembershipCount != nil { + d.Set("max_membership_count", intValue(instanceGroupManagerScheduledActionManagerManager.MaxMembershipCount)) + } + d.Set("min_membership_count", intValue(instanceGroupManagerScheduledActionManagerManager.MinMembershipCount)) + d.Set("target_manager_name", *instanceGroupManagerScheduledActionManagerManager.Name) + d.Set("target_manager", *instanceGroupManagerScheduledActionManagerManager.ID) + } + } + return nil + } + } + return fmt.Errorf("instance group manager action %s not found", actionName) +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_instance_group_manager_actions.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_instance_group_manager_actions.go new file mode 100644 index 00000000000..91b4bbb39c2 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_instance_group_manager_actions.go @@ -0,0 +1,233 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "time" + + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMISInstanceGroupManagerActions() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMISInstanceGroupManagerActionsRead, + + Schema: map[string]*schema.Schema{ + + "instance_group": { + Type: schema.TypeString, + Required: true, + Description: "instance group ID", + }, + + "instance_group_manager": { + Type: schema.TypeString, + Required: true, + Description: "Instance group manager ID", + }, + + "instance_group_manager_actions": { + Type: schema.TypeList, + Description: "List of instance group manager actions", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + + "name": { + Type: schema.TypeString, + Computed: true, + Description: "instance group manager action name", + }, + + "action_id": { + Type: schema.TypeString, + Computed: true, + Description: "Instance group manager action ID", + }, + + "instance_group": { + Type: schema.TypeString, + Required: true, + Description: "instance group ID", + }, + + "run_at": { + Type: schema.TypeString, + Computed: true, + Description: "The date and time the scheduled action will run.", + }, + + "membership_count": { + Type: schema.TypeInt, + Computed: true, + Description: "The number of members the instance group should have at the scheduled time.", + }, + + "max_membership_count": { + Type: schema.TypeInt, + Computed: true, + Description: "The maximum number of members in a managed instance group", + }, + + "min_membership_count": { + Type: schema.TypeInt, + Computed: true, + Description: "The minimum number of members in a managed instance group", + }, + + "target_manager": { + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this instance group manager of type autoscale.", + }, + + "target_manager_name": { + Type: schema.TypeString, + Computed: true, + Description: "Instance group manager name of type autoscale.", + }, + + "cron_spec": { + Type: schema.TypeString, + Computed: true, + Description: "The cron specification for a recurring scheduled action. Actions can be applied a maximum of one time within a 5 min period.", + }, + + "resource_type": { + Type: schema.TypeString, + Computed: true, + Description: "The resource type.", + }, + "status": { + Type: schema.TypeString, + Computed: true, + Description: "The status of the instance group action- `active`: Action is ready to be run- `completed`: Action was completed successfully- `failed`: Action could not be completed successfully- `incompatible`: Action parameters are not compatible with the group or manager- `omitted`: Action was not applied because this action's manager was disabled.", + }, + "updated_at": { + Type: schema.TypeString, + Computed: true, + Description: "The date and time that the instance group manager action was modified.", + }, + "action_type": { + Type: schema.TypeString, + Computed: true, + Description: "The type of action for the instance group.", + }, + + "last_applied_at": { + Type: schema.TypeString, + Computed: true, + Description: "The date and time the scheduled action was last applied. If empty the action has never been applied.", + }, + "next_run_at": { + Type: schema.TypeString, + Computed: true, + Description: "The date and time the scheduled action will next run. If empty the system is currently calculating the next run time.", + }, + "auto_delete": { + Type: schema.TypeBool, + Computed: true, + }, + "auto_delete_timeout": { + Type: schema.TypeInt, + Computed: true, + }, + "created_at": { + Type: schema.TypeString, + Computed: true, + Description: "The date and time that the instance group manager action was modified.", + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMISInstanceGroupManagerActionsRead(d *schema.ResourceData, meta interface{}) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + + instanceGroupManagerID := d.Get("instance_group_manager").(string) + instanceGroupID := d.Get("instance_group").(string) + + // Support for pagination + start := "" + allrecs := []vpcv1.InstanceGroupManagerActionIntf{} + + for { + listInstanceGroupManagerActionsOptions := vpcv1.ListInstanceGroupManagerActionsOptions{ + InstanceGroupID: &instanceGroupID, + InstanceGroupManagerID: &instanceGroupManagerID, + } + + instanceGroupManagerActionsCollection, response, err := sess.ListInstanceGroupManagerActions(&listInstanceGroupManagerActionsOptions) + if err != nil { + return fmt.Errorf("error Getting InstanceGroup Manager Actions %s\n%s", err, response) + } + if instanceGroupManagerActionsCollection != nil && *instanceGroupManagerActionsCollection.TotalCount == int64(0) { + break + } + start = GetNext(instanceGroupManagerActionsCollection.Next) + allrecs = append(allrecs, instanceGroupManagerActionsCollection.Actions...) + if start == "" { + break + } + } + + actions := make([]map[string]interface{}, 0) + for _, data := range allrecs { + instanceGroupManagerAction := data.(*vpcv1.InstanceGroupManagerAction) + + action := map[string]interface{}{ + "name": *instanceGroupManagerAction.Name, + "auto_delete": *instanceGroupManagerAction.AutoDelete, + "auto_delete_timeout": intValue(instanceGroupManagerAction.AutoDeleteTimeout), + "created_at": instanceGroupManagerAction.CreatedAt.String(), + "action_id": *instanceGroupManagerAction.ID, + "resource_type": *instanceGroupManagerAction.ResourceType, + "status": *instanceGroupManagerAction.Status, + "updated_at": instanceGroupManagerAction.UpdatedAt.String(), + "action_type": *instanceGroupManagerAction.ActionType, + } + if instanceGroupManagerAction.CronSpec != nil { + action["cron_spec"] = *instanceGroupManagerAction.CronSpec + } + if instanceGroupManagerAction.LastAppliedAt != nil { + action["last_applied_at"] = instanceGroupManagerAction.LastAppliedAt.String() + } + if instanceGroupManagerAction.NextRunAt != nil { + action["last_applied_at"] = instanceGroupManagerAction.NextRunAt.String() + } + instanceGroupManagerScheduledActionGroupGroup := instanceGroupManagerAction.Group + if instanceGroupManagerScheduledActionGroupGroup != nil && instanceGroupManagerScheduledActionGroupGroup.MembershipCount != nil { + action["membership_count"] = intValue(instanceGroupManagerScheduledActionGroupGroup.MembershipCount) + } + instanceGroupManagerScheduledActionManagerManagerInt := instanceGroupManagerAction.Manager + if instanceGroupManagerScheduledActionManagerManagerInt != nil { + instanceGroupManagerScheduledActionManagerManager := instanceGroupManagerScheduledActionManagerManagerInt.(*vpcv1.InstanceGroupManagerScheduledActionManager) + if instanceGroupManagerScheduledActionManagerManager != nil && instanceGroupManagerScheduledActionManagerManager.ID != nil { + + if instanceGroupManagerScheduledActionManagerManager.MaxMembershipCount != nil { + action["max_membership_count"] = intValue(instanceGroupManagerScheduledActionManagerManager.MaxMembershipCount) + } + action["min_membership_count"] = intValue(instanceGroupManagerScheduledActionManagerManager.MinMembershipCount) + action["target_manager_name"] = *instanceGroupManagerScheduledActionManagerManager.Name + action["target_manager"] = *instanceGroupManagerScheduledActionManagerManager.ID + } + } + actions = append(actions, action) + } + d.Set("instance_group_manager_actions", actions) + d.SetId(dataSourceIBMISInstanceGroupManagerActionsID(d)) + return nil +} + +func dataSourceIBMISInstanceGroupManagerActionsID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_instance_group_manager_policies.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_instance_group_manager_policies.go new file mode 100644 index 00000000000..2e887f865ac --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_instance_group_manager_policies.go @@ -0,0 +1,131 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "time" + + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMISInstanceGroupManagerPolicies() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMISInstanceGroupManagerPoliciesRead, + + Schema: map[string]*schema.Schema{ + + "instance_group": { + Type: schema.TypeString, + Required: true, + Description: "instance group ID", + }, + + "instance_group_manager": { + Type: schema.TypeString, + Required: true, + Description: "Instance group manager ID", + }, + + "instance_group_manager_policies": { + Type: schema.TypeList, + Description: "List of instance group manager policies", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + + "id": { + Type: schema.TypeString, + Computed: true, + Description: "ID of the instance group manager policy.", + }, + + "name": { + Type: schema.TypeString, + Computed: true, + Description: "The name of the instance group manager policy", + }, + + "metric_type": { + Type: schema.TypeString, + Computed: true, + Description: "The type of metric to be evaluated", + }, + + "metric_value": { + Type: schema.TypeInt, + Computed: true, + Description: "The metric value to be evaluated", + }, + + "policy_type": { + Type: schema.TypeString, + Computed: true, + Description: "The type of Policy for the Instance Group", + }, + "policy_id": { + Type: schema.TypeString, + Computed: true, + Description: "The policy ID", + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMISInstanceGroupManagerPoliciesRead(d *schema.ResourceData, meta interface{}) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + + instanceGroupManagerID := d.Get("instance_group_manager").(string) + instanceGroupID := d.Get("instance_group").(string) + + // Support for pagination + start := "" + allrecs := []vpcv1.InstanceGroupManagerPolicyIntf{} + + for { + listInstanceGroupManagerPoliciesOptions := vpcv1.ListInstanceGroupManagerPoliciesOptions{ + InstanceGroupID: &instanceGroupID, + InstanceGroupManagerID: &instanceGroupManagerID, + } + + instanceGroupManagerPolicyCollection, response, err := sess.ListInstanceGroupManagerPolicies(&listInstanceGroupManagerPoliciesOptions) + if err != nil { + return fmt.Errorf("Error Getting InstanceGroup Manager Policies %s\n%s", err, response) + } + start = GetNext(instanceGroupManagerPolicyCollection.Next) + allrecs = append(allrecs, instanceGroupManagerPolicyCollection.Policies...) + if start == "" { + break + } + } + + policies := make([]map[string]interface{}, 0) + for _, data := range allrecs { + instanceGroupManagerPolicy := data.(*vpcv1.InstanceGroupManagerPolicy) + policy := map[string]interface{}{ + "id": fmt.Sprintf("%s/%s/%s", instanceGroupID, instanceGroupManagerID, *instanceGroupManagerPolicy.ID), + "name": *instanceGroupManagerPolicy.Name, + "metric_value": *instanceGroupManagerPolicy.MetricValue, + "metric_type": *instanceGroupManagerPolicy.MetricType, + "policy_type": *instanceGroupManagerPolicy.PolicyType, + "policy_id": *instanceGroupManagerPolicy.ID, + } + policies = append(policies, policy) + } + d.Set("instance_group_manager_policies", policies) + d.SetId(dataSourceIBMISInstanceGroupManagerPoliciesID(d)) + return nil +} + +// dataSourceIBMISInstanceGroupManagerPoliciesID returns a reasonable ID for a instance group manager policies list. +func dataSourceIBMISInstanceGroupManagerPoliciesID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_instance_group_manager_policy.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_instance_group_manager_policy.go new file mode 100644 index 00000000000..2868ae0a110 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_instance_group_manager_policy.go @@ -0,0 +1,106 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMISInstanceGroupManagerPolicy() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMISInstanceGroupManagerPolicyRead, + + Schema: map[string]*schema.Schema{ + + "instance_group": { + Type: schema.TypeString, + Required: true, + Description: "instance group ID", + }, + + "instance_group_manager": { + Type: schema.TypeString, + Required: true, + Description: "Instance group manager ID", + }, + + "name": { + Type: schema.TypeString, + Required: true, + Description: "The name of the instance group manager policy", + }, + + "metric_type": { + Type: schema.TypeString, + Computed: true, + Description: "The type of metric to be evaluated", + }, + + "metric_value": { + Type: schema.TypeInt, + Computed: true, + Description: "The metric value to be evaluated", + }, + + "policy_type": { + Type: schema.TypeString, + Computed: true, + Description: "The type of Policy for the Instance Group", + }, + "policy_id": { + Type: schema.TypeString, + Computed: true, + Description: "The policy ID", + }, + }, + } +} + +func dataSourceIBMISInstanceGroupManagerPolicyRead(d *schema.ResourceData, meta interface{}) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + + instanceGroupManagerID := d.Get("instance_group_manager").(string) + instanceGroupID := d.Get("instance_group").(string) + policyName := d.Get("name").(string) + + // Support for pagination + start := "" + allrecs := []vpcv1.InstanceGroupManagerPolicyIntf{} + + for { + listInstanceGroupManagerPoliciesOptions := vpcv1.ListInstanceGroupManagerPoliciesOptions{ + InstanceGroupID: &instanceGroupID, + InstanceGroupManagerID: &instanceGroupManagerID, + } + + instanceGroupManagerPolicyCollection, response, err := sess.ListInstanceGroupManagerPolicies(&listInstanceGroupManagerPoliciesOptions) + if err != nil { + return fmt.Errorf("Error Getting InstanceGroup Manager Policies %s\n%s", err, response) + } + start = GetNext(instanceGroupManagerPolicyCollection.Next) + allrecs = append(allrecs, instanceGroupManagerPolicyCollection.Policies...) + if start == "" { + break + } + } + + for _, data := range allrecs { + instanceGroupManagerPolicy := data.(*vpcv1.InstanceGroupManagerPolicy) + if policyName == *instanceGroupManagerPolicy.Name { + d.SetId(fmt.Sprintf("%s/%s/%s", instanceGroupID, instanceGroupManagerID, *instanceGroupManagerPolicy.ID)) + d.Set("policy_id", *instanceGroupManagerPolicy.ID) + d.Set("metric_value", *instanceGroupManagerPolicy.MetricValue) + d.Set("metric_type", *instanceGroupManagerPolicy.MetricType) + d.Set("policy_type", *instanceGroupManagerPolicy.PolicyType) + return nil + } + } + return fmt.Errorf("Instance group manager policy %s not found", policyName) +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_instance_group_managers.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_instance_group_managers.go new file mode 100644 index 00000000000..b081a819443 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_instance_group_managers.go @@ -0,0 +1,202 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "time" + + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMISInstanceGroupManagers() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMISInstanceGroupManagersRead, + + Schema: map[string]*schema.Schema{ + + "instance_group": { + Type: schema.TypeString, + Required: true, + Description: "instance group ID", + }, + + "instance_group_managers": { + Type: schema.TypeList, + Description: "List of instance group managers", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + + "name": { + Type: schema.TypeString, + Computed: true, + Description: "Name of the instance group manager.", + }, + + "id": { + Type: schema.TypeString, + Computed: true, + Description: "ID of the instance group manager.", + }, + + "manager_type": { + Type: schema.TypeString, + Computed: true, + Description: "The type of instance group manager.", + }, + + "aggregation_window": { + Type: schema.TypeInt, + Computed: true, + Description: "The time window in seconds to aggregate metrics prior to evaluation", + }, + + "cooldown": { + Type: schema.TypeInt, + Computed: true, + Description: "The duration of time in seconds to pause further scale actions after scaling has taken place", + }, + + "max_membership_count": { + Type: schema.TypeInt, + Computed: true, + Description: "The maximum number of members in a managed instance group", + }, + + "min_membership_count": { + Type: schema.TypeInt, + Computed: true, + Description: "The minimum number of members in a managed instance group", + }, + + "manager_id": { + Type: schema.TypeString, + Computed: true, + Description: "The ID of instance group manager.", + }, + + "policies": { + Type: schema.TypeList, + Elem: &schema.Schema{Type: schema.TypeString}, + Computed: true, + Description: "list of Policies associated with instancegroup manager", + }, + + "actions": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "instance_group_manager_action": { + Type: schema.TypeString, + Computed: true, + }, + "instance_group_manager_action_name": { + Type: schema.TypeString, + Computed: true, + }, + "resource_type": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMISInstanceGroupManagersRead(d *schema.ResourceData, meta interface{}) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + + instanceGroupID := d.Get("instance_group").(string) + + // Support for pagination + start := "" + allrecs := []vpcv1.InstanceGroupManagerIntf{} + + for { + listInstanceGroupManagerOptions := vpcv1.ListInstanceGroupManagersOptions{ + InstanceGroupID: &instanceGroupID, + } + instanceGroupManagerCollections, response, err := sess.ListInstanceGroupManagers(&listInstanceGroupManagerOptions) + if err != nil { + return fmt.Errorf("Error Getting InstanceGroup Managers %s\n%s", err, response) + } + + start = GetNext(instanceGroupManagerCollections.Next) + allrecs = append(allrecs, instanceGroupManagerCollections.Managers...) + + if start == "" { + break + } + + } + + instanceGroupMnagers := make([]map[string]interface{}, 0) + for _, instanceGroupManagerIntf := range allrecs { + instanceGroupManager := instanceGroupManagerIntf.(*vpcv1.InstanceGroupManager) + + if *instanceGroupManager.ManagerType == "scheduled" { + manager := map[string]interface{}{ + "id": fmt.Sprintf("%s/%s", instanceGroupID, *instanceGroupManager.ID), + "manager_id": *instanceGroupManager.ID, + "name": *instanceGroupManager.Name, + "manager_type": *instanceGroupManager.ManagerType, + } + + actions := make([]map[string]interface{}, 0) + if instanceGroupManager.Actions != nil { + for _, action := range instanceGroupManager.Actions { + actn := map[string]interface{}{ + "instance_group_manager_action": action.ID, + "instance_group_manager_action_name": action.Name, + "resource_type": action.ResourceType, + } + actions = append(actions, actn) + } + manager["actions"] = actions + } + instanceGroupMnagers = append(instanceGroupMnagers, manager) + + } else { + manager := map[string]interface{}{ + "id": fmt.Sprintf("%s/%s", instanceGroupID, *instanceGroupManager.ID), + "manager_id": *instanceGroupManager.ID, + "name": *instanceGroupManager.Name, + "aggregation_window": *instanceGroupManager.AggregationWindow, + "cooldown": *instanceGroupManager.Cooldown, + "max_membership_count": *instanceGroupManager.MaxMembershipCount, + "min_membership_count": *instanceGroupManager.MinMembershipCount, + "manager_type": *instanceGroupManager.ManagerType, + } + + policies := make([]string, 0) + if instanceGroupManager.Policies != nil { + for i := 0; i < len(instanceGroupManager.Policies); i++ { + policies = append(policies, string(*(instanceGroupManager.Policies[i].ID))) + } + } + manager["policies"] = policies + instanceGroupMnagers = append(instanceGroupMnagers, manager) + } + + } + d.Set("instance_group_managers", instanceGroupMnagers) + d.SetId(dataSourceIBMISInstanceGroupManagersID(d)) + return nil +} + +// dataSourceIBMISInstanceGroupManagersID returns a reasonable ID for a instance group manager list. +func dataSourceIBMISInstanceGroupManagersID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_instance_group_membership.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_instance_group_membership.go new file mode 100644 index 00000000000..89ae748928d --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_instance_group_membership.go @@ -0,0 +1,164 @@ +// Copyright IBM Corp. 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/IBM/vpc-go-sdk/vpcv1" +) + +func dataSourceIBMISInstanceGroupMembership() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMISInstanceGroupMembershipRead, + + Schema: map[string]*schema.Schema{ + isInstanceGroup: { + Type: schema.TypeString, + Required: true, + Description: "The instance group identifier.", + }, + isInstanceGroupMembershipName: { + Type: schema.TypeString, + Required: true, + Description: "The user-defined name for this instance group membership. Names must be unique within the instance group.", + }, + isInstanceGroupMemershipDeleteInstanceOnMembershipDelete: { + Type: schema.TypeBool, + Computed: true, + Description: "If set to true, when deleting the membership the instance will also be deleted.", + }, + isInstanceGroupMembership: { + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this instance group membership.", + }, + isInstanceGroupMemershipInstance: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + isInstanceGroupMembershipCrn: { + Type: schema.TypeString, + Computed: true, + Description: "The CRN for this virtual server instance.", + }, + isInstanceGroupMembershipVirtualServerInstance: { + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this virtual server instance.", + }, + isInstanceGroupMemershipInstanceName: { + Type: schema.TypeString, + Computed: true, + Description: "The user-defined name for this virtual server instance (and default system hostname).", + }, + }, + }, + }, + isInstanceGroupMemershipInstanceTemplate: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + isInstanceGroupMembershipCrn: { + Type: schema.TypeString, + Computed: true, + Description: "The CRN for this instance template.", + }, + isInstanceGroupMemershipInstanceTemplate: { + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this instance template.", + }, + isInstanceGroupMemershipInstanceTemplateName: { + Type: schema.TypeString, + Computed: true, + Description: "The unique user-defined name for this instance template.", + }, + }, + }, + }, + isInstanceGroupMembershipLoadBalancerPoolMember: { + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this load balancer pool member.", + }, + isInstanceGroupMembershipStatus: { + Type: schema.TypeString, + Computed: true, + Description: "The status of the instance group membership- `deleting`: Membership is deleting dependent resources- `failed`: Membership was unable to maintain dependent resources- `healthy`: Membership is active and serving in the group- `pending`: Membership is waiting for dependent resources- `unhealthy`: Membership has unhealthy dependent resources.", + }, + }, + } +} + +func dataSourceIBMISInstanceGroupMembershipRead(d *schema.ResourceData, meta interface{}) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + instanceGroupID := d.Get(isInstanceGroup).(string) + // Support for pagination + start := "" + allrecs := []vpcv1.InstanceGroupMembership{} + + for { + listInstanceGroupMembershipsOptions := vpcv1.ListInstanceGroupMembershipsOptions{ + InstanceGroupID: &instanceGroupID, + } + instanceGroupMembershipCollection, response, err := sess.ListInstanceGroupMemberships(&listInstanceGroupMembershipsOptions) + if err != nil || instanceGroupMembershipCollection == nil { + return fmt.Errorf("Error Getting InstanceGroup Membership Collection %s\n%s", err, response) + } + + start = GetNext(instanceGroupMembershipCollection.Next) + allrecs = append(allrecs, instanceGroupMembershipCollection.Memberships...) + + if start == "" { + break + } + + } + + instanceGroupMembershipName := d.Get(isInstanceGroupMembershipName).(string) + for _, instanceGroupMembership := range allrecs { + if instanceGroupMembershipName == *instanceGroupMembership.Name { + d.SetId(fmt.Sprintf("%s/%s", instanceGroupID, *instanceGroupMembership.Instance.ID)) + d.Set(isInstanceGroupMemershipDeleteInstanceOnMembershipDelete, *instanceGroupMembership.DeleteInstanceOnMembershipDelete) + d.Set(isInstanceGroupMembership, *instanceGroupMembership.ID) + d.Set(isInstanceGroupMembershipStatus, *instanceGroupMembership.Status) + + instances := make([]map[string]interface{}, 0) + if instanceGroupMembership.Instance != nil { + instance := map[string]interface{}{ + isInstanceGroupMembershipCrn: *instanceGroupMembership.Instance.CRN, + isInstanceGroupMembershipVirtualServerInstance: *instanceGroupMembership.Instance.ID, + isInstanceGroupMemershipInstanceName: *instanceGroupMembership.Instance.Name, + } + instances = append(instances, instance) + } + d.Set(isInstanceGroupMemershipInstance, instances) + + instance_templates := make([]map[string]interface{}, 0) + if instanceGroupMembership.InstanceTemplate != nil { + instance_template := map[string]interface{}{ + isInstanceGroupMembershipCrn: *instanceGroupMembership.InstanceTemplate.CRN, + isInstanceGroupMemershipInstanceTemplate: *instanceGroupMembership.InstanceTemplate.ID, + isInstanceGroupMemershipInstanceTemplateName: *instanceGroupMembership.InstanceTemplate.Name, + } + instance_templates = append(instance_templates, instance_template) + } + d.Set(isInstanceGroupMemershipInstanceTemplate, instance_templates) + + if instanceGroupMembership.PoolMember != nil && instanceGroupMembership.PoolMember.ID != nil { + d.Set(isInstanceGroupMembershipLoadBalancerPoolMember, *instanceGroupMembership.PoolMember.ID) + } + return nil + } + } + return fmt.Errorf("Instance group membership %s not found", instanceGroupMembershipName) +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_instance_group_memberships.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_instance_group_memberships.go new file mode 100644 index 00000000000..63b8d2e2105 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_instance_group_memberships.go @@ -0,0 +1,188 @@ +// Copyright IBM Corp. 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/IBM/vpc-go-sdk/vpcv1" +) + +const ( + isInstanceGroupMemberships = "memberships" +) + +func dataSourceIBMISInstanceGroupMemberships() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMISInstanceGroupMembershipsRead, + + Schema: map[string]*schema.Schema{ + isInstanceGroup: { + Type: schema.TypeString, + Required: true, + Description: "The instance group identifier.", + }, + + isInstanceGroupMemberships: { + Type: schema.TypeList, + Computed: true, + Description: "Collection of instance group memberships.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + isInstanceGroupMemershipDeleteInstanceOnMembershipDelete: { + Type: schema.TypeBool, + Computed: true, + Description: "If set to true, when deleting the membership the instance will also be deleted.", + }, + isInstanceGroupMembership: { + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this instance group membership.", + }, + isInstanceGroupMemershipInstance: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + isInstanceGroupMembershipCrn: { + Type: schema.TypeString, + Computed: true, + Description: "The CRN for this virtual server instance.", + }, + isInstanceGroupMembershipVirtualServerInstance: { + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this virtual server instance.", + }, + isInstanceGroupMemershipInstanceName: { + Type: schema.TypeString, + Computed: true, + Description: "The user-defined name for this virtual server instance (and default system hostname).", + }, + }, + }, + }, + isInstanceGroupMemershipInstanceTemplate: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + isInstanceGroupMembershipCrn: { + Type: schema.TypeString, + Computed: true, + Description: "The CRN for this instance template.", + }, + isInstanceGroupMemershipInstanceTemplate: { + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this instance template.", + }, + isInstanceGroupMemershipInstanceTemplateName: { + Type: schema.TypeString, + Computed: true, + Description: "The unique user-defined name for this instance template.", + }, + }, + }, + }, + isInstanceGroupMembershipName: { + Type: schema.TypeString, + Computed: true, + Description: "The user-defined name for this instance group membership. Names must be unique within the instance group.", + }, + isInstanceGroupMembershipLoadBalancerPoolMember: { + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this load balancer pool member.", + }, + isInstanceGroupMembershipStatus: { + Type: schema.TypeString, + Computed: true, + Description: "The status of the instance group membership- `deleting`: Membership is deleting dependent resources- `failed`: Membership was unable to maintain dependent resources- `healthy`: Membership is active and serving in the group- `pending`: Membership is waiting for dependent resources- `unhealthy`: Membership has unhealthy dependent resources.", + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMISInstanceGroupMembershipsRead(d *schema.ResourceData, meta interface{}) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + instanceGroupID := d.Get(isInstanceGroup).(string) + // Support for pagination + start := "" + allrecs := []vpcv1.InstanceGroupMembership{} + + for { + listInstanceGroupMembershipsOptions := vpcv1.ListInstanceGroupMembershipsOptions{ + InstanceGroupID: &instanceGroupID, + } + instanceGroupMembershipCollection, response, err := sess.ListInstanceGroupMemberships(&listInstanceGroupMembershipsOptions) + if err != nil { + return fmt.Errorf("Error Getting InstanceGroup Membership Collection %s\n%s", err, response) + } + + start = GetNext(instanceGroupMembershipCollection.Next) + allrecs = append(allrecs, instanceGroupMembershipCollection.Memberships...) + + if start == "" { + break + } + + } + + memberships := make([]map[string]interface{}, 0) + for _, instanceGroupMembership := range allrecs { + membership := map[string]interface{}{ + isInstanceGroupMemershipDeleteInstanceOnMembershipDelete: *instanceGroupMembership.DeleteInstanceOnMembershipDelete, + isInstanceGroupMembership: *instanceGroupMembership.ID, + isInstanceGroupMembershipName: *instanceGroupMembership.Name, + isInstanceGroupMembershipStatus: *instanceGroupMembership.Status, + } + + instances := make([]map[string]interface{}, 0) + if instanceGroupMembership.Instance != nil { + instance := map[string]interface{}{ + isInstanceGroupMembershipCrn: *instanceGroupMembership.Instance.CRN, + isInstanceGroupMembershipVirtualServerInstance: *instanceGroupMembership.Instance.ID, + isInstanceGroupMemershipInstanceName: *instanceGroupMembership.Instance.Name, + } + instances = append(instances, instance) + } + membership[isInstanceGroupMemershipInstance] = instances + + instance_templates := make([]map[string]interface{}, 0) + if instanceGroupMembership.InstanceTemplate != nil { + instance_template := map[string]interface{}{ + isInstanceGroupMembershipCrn: *instanceGroupMembership.InstanceTemplate.CRN, + isInstanceGroupMemershipInstanceTemplate: *instanceGroupMembership.InstanceTemplate.ID, + isInstanceGroupMemershipInstanceTemplateName: *instanceGroupMembership.InstanceTemplate.Name, + } + instance_templates = append(instance_templates, instance_template) + } + membership[isInstanceGroupMemershipInstanceTemplate] = instance_templates + + if instanceGroupMembership.PoolMember != nil && instanceGroupMembership.PoolMember.ID != nil { + membership[isInstanceGroupMembershipLoadBalancerPoolMember] = *instanceGroupMembership.PoolMember.ID + } + + memberships = append(memberships, membership) + } + d.Set(isInstanceGroupMemberships, memberships) + d.SetId(dataSourceIbmIsInstanceGroupMembershipsID(d)) + + return nil +} + +// dataSourceIbmIsInstanceGroupMembershipsID returns a reasonable ID for the list. +func dataSourceIbmIsInstanceGroupMembershipsID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_instance_profile.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_instance_profile.go new file mode 100644 index 00000000000..d904ce1dd22 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_instance_profile.go @@ -0,0 +1,739 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "github.com/IBM/vpc-go-sdk/vpcclassicv1" + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + isInstanceProfileName = "name" + isInstanceProfileFamily = "family" + isInstanceProfileArchitecture = "architecture" +) + +func dataSourceIBMISInstanceProfile() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMISInstanceProfileRead, + + Schema: map[string]*schema.Schema{ + + isInstanceProfileName: { + Type: schema.TypeString, + Required: true, + }, + + isInstanceProfileFamily: { + Type: schema.TypeString, + Computed: true, + Description: "The product family this virtual server instance profile belongs to.", + }, + + isInstanceProfileArchitecture: { + Type: schema.TypeString, + Computed: true, + Description: "The default OS architecture for an instance with this profile.", + }, + + "architecture_type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The type for the OS architecture.", + }, + + "architecture_values": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The supported OS architecture(s) for an instance with this profile.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + + "bandwidth": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The type for this profile field.", + }, + "value": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The value for this profile field.", + }, + "default": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The default value for this profile field.", + }, + "max": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The maximum value for this profile field.", + }, + "min": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The minimum value for this profile field.", + }, + "step": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The increment step value for this profile field.", + }, + "values": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The permitted values for this profile field.", + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + }, + }, + }, + "disks": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Collection of the instance profile's disks.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "quantity": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The type for this profile field.", + }, + "value": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The value for this profile field.", + }, + "default": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The default value for this profile field.", + }, + "max": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The maximum value for this profile field.", + }, + "min": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The minimum value for this profile field.", + }, + "step": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The increment step value for this profile field.", + }, + "values": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The permitted values for this profile field.", + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + }, + }, + }, + "size": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The type for this profile field.", + }, + "value": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The value for this profile field.", + }, + "default": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The default value for this profile field.", + }, + "max": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The maximum value for this profile field.", + }, + "min": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The minimum value for this profile field.", + }, + "step": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The increment step value for this profile field.", + }, + "values": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The permitted values for this profile field.", + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + }, + }, + }, + "supported_interface_types": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "default": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The disk interface used for attaching the disk.The enumerated values for this property are expected to expand in the future. When processing this property, check for and log unknown values. Optionally halt processing and surface the error, or bypass the resource on which the unexpected property value was encountered.", + }, + "type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The type for this profile field.", + }, + "values": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The supported disk interfaces used for attaching the disk.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + }, + }, + }, + "href": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The URL for this virtual server instance profile.", + }, + "memory": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The type for this profile field.", + }, + "value": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The value for this profile field.", + }, + "default": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The default value for this profile field.", + }, + "max": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The maximum value for this profile field.", + }, + "min": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The minimum value for this profile field.", + }, + "step": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The increment step value for this profile field.", + }, + "values": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The permitted values for this profile field.", + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + }, + }, + }, + "port_speed": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The type for this profile field.", + }, + "value": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The value for this profile field.", + }, + }, + }, + }, + "vcpu_architecture": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "default": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The default VCPU architecture for an instance with this profile.", + }, + "type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The type for this profile field.", + }, + "value": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The VCPU architecture for an instance with this profile.", + }, + }, + }, + }, + "vcpu_count": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The type for this profile field.", + }, + "value": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The value for this profile field.", + }, + "default": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The default value for this profile field.", + }, + "max": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The maximum value for this profile field.", + }, + "min": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The minimum value for this profile field.", + }, + "step": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The increment step value for this profile field.", + }, + "values": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The permitted values for this profile field.", + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMISInstanceProfileRead(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + name := d.Get(isInstanceProfileName).(string) + if userDetails.generation == 1 { + err := classicInstanceProfileGet(d, meta, name) + if err != nil { + return err + } + } else { + err := instanceProfileGet(d, meta, name) + if err != nil { + return err + } + } + return nil +} + +func classicInstanceProfileGet(d *schema.ResourceData, meta interface{}, name string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + getInstanceProfileOptions := &vpcclassicv1.GetInstanceProfileOptions{ + Name: &name, + } + profile, _, err := sess.GetInstanceProfile(getInstanceProfileOptions) + if err != nil { + return err + } + // For lack of anything better, compose our id from profile name. + d.SetId(*profile.Name) + d.Set(isInstanceProfileName, *profile.Name) + d.Set(isInstanceProfileFamily, *profile.Family) + return nil +} + +func instanceProfileGet(d *schema.ResourceData, meta interface{}, name string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + getInstanceProfileOptions := &vpcv1.GetInstanceProfileOptions{ + Name: &name, + } + profile, _, err := sess.GetInstanceProfile(getInstanceProfileOptions) + if err != nil { + return err + } + // For lack of anything better, compose our id from profile name. + d.SetId(*profile.Name) + d.Set(isInstanceProfileName, *profile.Name) + d.Set(isInstanceProfileFamily, *profile.Family) + if profile.OsArchitecture != nil { + if profile.OsArchitecture.Default != nil { + d.Set(isInstanceProfileArchitecture, *profile.OsArchitecture.Default) + } + if profile.OsArchitecture.Type != nil { + d.Set("architecture_type", *profile.OsArchitecture.Type) + } + if profile.OsArchitecture.Values != nil { + d.Set("architecture_values", *&profile.OsArchitecture.Values) + } + + } + if profile.Bandwidth != nil { + err = d.Set("bandwidth", dataSourceInstanceProfileFlattenBandwidth(*profile.Bandwidth.(*vpcv1.InstanceProfileBandwidth))) + if err != nil { + return err + } + } + if profile.Disks != nil { + err = d.Set("disks", dataSourceInstanceProfileFlattenDisks(profile.Disks)) + if err != nil { + return err + } + } + if err = d.Set("href", profile.Href); err != nil { + return err + } + + if profile.Memory != nil { + err = d.Set("memory", dataSourceInstanceProfileFlattenMemory(*profile.Memory.(*vpcv1.InstanceProfileMemory))) + if err != nil { + return err + } + } + if profile.PortSpeed != nil { + err = d.Set("port_speed", dataSourceInstanceProfileFlattenPortSpeed(*profile.PortSpeed.(*vpcv1.InstanceProfilePortSpeed))) + if err != nil { + return err + } + } + + if profile.VcpuArchitecture != nil { + err = d.Set("vcpu_architecture", dataSourceInstanceProfileFlattenVcpuArchitecture(*profile.VcpuArchitecture)) + if err != nil { + return err + } + } + + if profile.VcpuCount != nil { + err = d.Set("vcpu_count", dataSourceInstanceProfileFlattenVcpuCount(*profile.VcpuCount.(*vpcv1.InstanceProfileVcpu))) + if err != nil { + return err + } + } + return nil +} + +func dataSourceInstanceProfileFlattenBandwidth(result vpcv1.InstanceProfileBandwidth) (finalList []map[string]interface{}) { + finalList = []map[string]interface{}{} + finalMap := dataSourceInstanceProfileBandwidthToMap(result) + finalList = append(finalList, finalMap) + + return finalList +} + +func dataSourceInstanceProfileBandwidthToMap(bandwidthItem vpcv1.InstanceProfileBandwidth) (bandwidthMap map[string]interface{}) { + bandwidthMap = map[string]interface{}{} + + if bandwidthItem.Type != nil { + bandwidthMap["type"] = bandwidthItem.Type + } + if bandwidthItem.Value != nil { + bandwidthMap["value"] = bandwidthItem.Value + } + if bandwidthItem.Default != nil { + bandwidthMap["default"] = bandwidthItem.Default + } + if bandwidthItem.Max != nil { + bandwidthMap["max"] = bandwidthItem.Max + } + if bandwidthItem.Min != nil { + bandwidthMap["min"] = bandwidthItem.Min + } + if bandwidthItem.Step != nil { + bandwidthMap["step"] = bandwidthItem.Step + } + if bandwidthItem.Values != nil { + bandwidthMap["values"] = bandwidthItem.Values + } + + return bandwidthMap +} + +func dataSourceInstanceProfileFlattenMemory(result vpcv1.InstanceProfileMemory) (finalList []map[string]interface{}) { + finalList = []map[string]interface{}{} + finalMap := dataSourceInstanceProfileMemoryToMap(result) + finalList = append(finalList, finalMap) + + return finalList +} + +func dataSourceInstanceProfileMemoryToMap(memoryItem vpcv1.InstanceProfileMemory) (memoryMap map[string]interface{}) { + memoryMap = map[string]interface{}{} + + if memoryItem.Type != nil { + memoryMap["type"] = memoryItem.Type + } + if memoryItem.Value != nil { + memoryMap["value"] = memoryItem.Value + } + if memoryItem.Default != nil { + memoryMap["default"] = memoryItem.Default + } + if memoryItem.Max != nil { + memoryMap["max"] = memoryItem.Max + } + if memoryItem.Min != nil { + memoryMap["min"] = memoryItem.Min + } + if memoryItem.Step != nil { + memoryMap["step"] = memoryItem.Step + } + if memoryItem.Values != nil { + memoryMap["values"] = memoryItem.Values + } + + return memoryMap +} + +func dataSourceInstanceProfileFlattenPortSpeed(result vpcv1.InstanceProfilePortSpeed) (finalList []map[string]interface{}) { + finalList = []map[string]interface{}{} + finalMap := dataSourceInstanceProfilePortSpeedToMap(result) + finalList = append(finalList, finalMap) + + return finalList +} + +func dataSourceInstanceProfilePortSpeedToMap(portSpeedItem vpcv1.InstanceProfilePortSpeed) (portSpeedMap map[string]interface{}) { + portSpeedMap = map[string]interface{}{} + + if portSpeedItem.Type != nil { + portSpeedMap["type"] = portSpeedItem.Type + } + if portSpeedItem.Value != nil { + portSpeedMap["value"] = portSpeedItem.Value + } + + return portSpeedMap +} + +func dataSourceInstanceProfileFlattenVcpuArchitecture(result vpcv1.InstanceProfileVcpuArchitecture) (finalList []map[string]interface{}) { + finalList = []map[string]interface{}{} + finalMap := dataSourceInstanceProfileVcpuArchitectureToMap(result) + finalList = append(finalList, finalMap) + + return finalList +} + +func dataSourceInstanceProfileVcpuArchitectureToMap(vcpuArchitectureItem vpcv1.InstanceProfileVcpuArchitecture) (vcpuArchitectureMap map[string]interface{}) { + vcpuArchitectureMap = map[string]interface{}{} + + if vcpuArchitectureItem.Default != nil { + vcpuArchitectureMap["default"] = vcpuArchitectureItem.Default + } + if vcpuArchitectureItem.Type != nil { + vcpuArchitectureMap["type"] = vcpuArchitectureItem.Type + } + if vcpuArchitectureItem.Value != nil { + vcpuArchitectureMap["value"] = vcpuArchitectureItem.Value + } + + return vcpuArchitectureMap +} + +func dataSourceInstanceProfileFlattenVcpuCount(result vpcv1.InstanceProfileVcpu) (finalList []map[string]interface{}) { + finalList = []map[string]interface{}{} + finalMap := dataSourceInstanceProfileVcpuCountToMap(result) + finalList = append(finalList, finalMap) + + return finalList +} + +func dataSourceInstanceProfileVcpuCountToMap(vcpuCountItem vpcv1.InstanceProfileVcpu) (vcpuCountMap map[string]interface{}) { + vcpuCountMap = map[string]interface{}{} + + if vcpuCountItem.Type != nil { + vcpuCountMap["type"] = vcpuCountItem.Type + } + if vcpuCountItem.Value != nil { + vcpuCountMap["value"] = vcpuCountItem.Value + } + if vcpuCountItem.Default != nil { + vcpuCountMap["default"] = vcpuCountItem.Default + } + if vcpuCountItem.Max != nil { + vcpuCountMap["max"] = vcpuCountItem.Max + } + if vcpuCountItem.Min != nil { + vcpuCountMap["min"] = vcpuCountItem.Min + } + if vcpuCountItem.Step != nil { + vcpuCountMap["step"] = vcpuCountItem.Step + } + if vcpuCountItem.Values != nil { + vcpuCountMap["values"] = vcpuCountItem.Values + } + + return vcpuCountMap +} + +func dataSourceInstanceProfileFlattenDisks(result []vpcv1.InstanceProfileDisk) (disks []map[string]interface{}) { + for _, disksItem := range result { + disks = append(disks, dataSourceInstanceProfileDisksToMap(disksItem)) + } + + return disks +} + +func dataSourceInstanceProfileDisksToMap(disksItem vpcv1.InstanceProfileDisk) (disksMap map[string]interface{}) { + disksMap = map[string]interface{}{} + + if disksItem.Quantity != nil { + quantityList := []map[string]interface{}{} + quantityMap := dataSourceInstanceProfileDisksQuantityToMap(*disksItem.Quantity.(*vpcv1.InstanceProfileDiskQuantity)) + quantityList = append(quantityList, quantityMap) + disksMap["quantity"] = quantityList + } + if disksItem.Size != nil { + sizeList := []map[string]interface{}{} + sizeMap := dataSourceInstanceProfileDisksSizeToMap(*disksItem.Size.(*vpcv1.InstanceProfileDiskSize)) + sizeList = append(sizeList, sizeMap) + disksMap["size"] = sizeList + } + if disksItem.SupportedInterfaceTypes != nil { + supportedInterfaceTypesList := []map[string]interface{}{} + supportedInterfaceTypesMap := dataSourceInstanceProfileDisksSupportedInterfaceTypesToMap(*disksItem.SupportedInterfaceTypes) + supportedInterfaceTypesList = append(supportedInterfaceTypesList, supportedInterfaceTypesMap) + disksMap["supported_interface_types"] = supportedInterfaceTypesList + } + + return disksMap +} + +func dataSourceInstanceProfileDisksQuantityToMap(quantityItem vpcv1.InstanceProfileDiskQuantity) (quantityMap map[string]interface{}) { + quantityMap = map[string]interface{}{} + + if quantityItem.Type != nil { + quantityMap["type"] = quantityItem.Type + } + if quantityItem.Value != nil { + quantityMap["value"] = quantityItem.Value + } + if quantityItem.Default != nil { + quantityMap["default"] = quantityItem.Default + } + if quantityItem.Max != nil { + quantityMap["max"] = quantityItem.Max + } + if quantityItem.Min != nil { + quantityMap["min"] = quantityItem.Min + } + if quantityItem.Step != nil { + quantityMap["step"] = quantityItem.Step + } + if quantityItem.Values != nil { + quantityMap["values"] = quantityItem.Values + } + + return quantityMap +} + +func dataSourceInstanceProfileDisksSizeToMap(sizeItem vpcv1.InstanceProfileDiskSize) (sizeMap map[string]interface{}) { + sizeMap = map[string]interface{}{} + + if sizeItem.Type != nil { + sizeMap["type"] = sizeItem.Type + } + if sizeItem.Value != nil { + sizeMap["value"] = sizeItem.Value + } + if sizeItem.Default != nil { + sizeMap["default"] = sizeItem.Default + } + if sizeItem.Max != nil { + sizeMap["max"] = sizeItem.Max + } + if sizeItem.Min != nil { + sizeMap["min"] = sizeItem.Min + } + if sizeItem.Step != nil { + sizeMap["step"] = sizeItem.Step + } + if sizeItem.Values != nil { + sizeMap["values"] = sizeItem.Values + } + + return sizeMap +} + +func dataSourceInstanceProfileDisksSupportedInterfaceTypesToMap(supportedInterfaceTypesItem vpcv1.InstanceProfileDiskSupportedInterfaces) (supportedInterfaceTypesMap map[string]interface{}) { + supportedInterfaceTypesMap = map[string]interface{}{} + + if supportedInterfaceTypesItem.Default != nil { + supportedInterfaceTypesMap["default"] = supportedInterfaceTypesItem.Default + } + if supportedInterfaceTypesItem.Type != nil { + supportedInterfaceTypesMap["type"] = supportedInterfaceTypesItem.Type + } + if supportedInterfaceTypesItem.Values != nil { + supportedInterfaceTypesMap["values"] = supportedInterfaceTypesItem.Values + } + + return supportedInterfaceTypesMap +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_instance_profiles.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_instance_profiles.go new file mode 100644 index 00000000000..a97cb48f656 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_instance_profiles.go @@ -0,0 +1,516 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "time" + + "github.com/IBM/vpc-go-sdk/vpcclassicv1" + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + isInstanceProfiles = "profiles" +) + +func dataSourceIBMISInstanceProfiles() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMISInstanceProfilesRead, + + Schema: map[string]*schema.Schema{ + + isInstanceProfiles: { + Type: schema.TypeList, + Description: "List of instance profile maps", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + }, + "family": { + Type: schema.TypeString, + Computed: true, + Description: "The product family this virtual server instance profile belongs to.", + }, + "architecture": { + Type: schema.TypeString, + Computed: true, + Description: "The default OS architecture for an instance with this profile.", + }, + "architecture_type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The type for the OS architecture.", + }, + + "architecture_values": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The supported OS architecture(s) for an instance with this profile.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "bandwidth": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The type for this profile field.", + }, + "value": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The value for this profile field.", + }, + "default": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The default value for this profile field.", + }, + "max": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The maximum value for this profile field.", + }, + "min": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The minimum value for this profile field.", + }, + "step": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The increment step value for this profile field.", + }, + "values": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The permitted values for this profile field.", + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + }, + }, + }, + "disks": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Collection of the instance profile's disks.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "quantity": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The type for this profile field.", + }, + "value": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The value for this profile field.", + }, + "default": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The default value for this profile field.", + }, + "max": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The maximum value for this profile field.", + }, + "min": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The minimum value for this profile field.", + }, + "step": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The increment step value for this profile field.", + }, + "values": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The permitted values for this profile field.", + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + }, + }, + }, + "size": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The type for this profile field.", + }, + "value": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The value for this profile field.", + }, + "default": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The default value for this profile field.", + }, + "max": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The maximum value for this profile field.", + }, + "min": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The minimum value for this profile field.", + }, + "step": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The increment step value for this profile field.", + }, + "values": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The permitted values for this profile field.", + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + }, + }, + }, + "supported_interface_types": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "default": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The disk interface used for attaching the disk.The enumerated values for this property are expected to expand in the future. When processing this property, check for and log unknown values. Optionally halt processing and surface the error, or bypass the resource on which the unexpected property value was encountered.", + }, + "type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The type for this profile field.", + }, + "values": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The supported disk interfaces used for attaching the disk.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + }, + }, + }, + "href": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The URL for this virtual server instance profile.", + }, + "memory": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The type for this profile field.", + }, + "value": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The value for this profile field.", + }, + "default": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The default value for this profile field.", + }, + "max": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The maximum value for this profile field.", + }, + "min": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The minimum value for this profile field.", + }, + "step": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The increment step value for this profile field.", + }, + "values": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The permitted values for this profile field.", + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + }, + }, + }, + "port_speed": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The type for this profile field.", + }, + "value": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The value for this profile field.", + }, + }, + }, + }, + "vcpu_architecture": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "default": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The default VCPU architecture for an instance with this profile.", + }, + "type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The type for this profile field.", + }, + "value": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The VCPU architecture for an instance with this profile.", + }, + }, + }, + }, + "vcpu_count": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The type for this profile field.", + }, + "value": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The value for this profile field.", + }, + "default": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The default value for this profile field.", + }, + "max": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The maximum value for this profile field.", + }, + "min": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The minimum value for this profile field.", + }, + "step": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The increment step value for this profile field.", + }, + "values": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The permitted values for this profile field.", + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMISInstanceProfilesRead(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + if userDetails.generation == 1 { + err := classicInstanceProfilesList(d, meta) + if err != nil { + return err + } + } else { + err := instanceProfilesList(d, meta) + if err != nil { + return err + } + } + return nil +} + +func classicInstanceProfilesList(d *schema.ResourceData, meta interface{}) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + start := "" + allrecs := []vpcclassicv1.InstanceProfile{} + for { + listInstanceProfilesOptions := &vpcclassicv1.ListInstanceProfilesOptions{} + if start != "" { + listInstanceProfilesOptions.Start = &start + } + availableProfiles, response, err := sess.ListInstanceProfiles(listInstanceProfilesOptions) + if err != nil { + return fmt.Errorf("Error Fetching Instance Profiles %s\n%s", err, response) + } + start = GetNext(availableProfiles.Next) + allrecs = append(allrecs, availableProfiles.Profiles...) + if start == "" { + break + } + } + profilesInfo := make([]map[string]interface{}, 0) + for _, profile := range allrecs { + + l := map[string]interface{}{ + "name": *profile.Name, + "family": *profile.Family, + } + profilesInfo = append(profilesInfo, l) + } + d.SetId(dataSourceIBMISInstanceProfilesID(d)) + d.Set(isInstanceProfiles, profilesInfo) + return nil +} + +func instanceProfilesList(d *schema.ResourceData, meta interface{}) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + listInstanceProfilesOptions := &vpcv1.ListInstanceProfilesOptions{} + availableProfiles, response, err := sess.ListInstanceProfiles(listInstanceProfilesOptions) + if err != nil { + return fmt.Errorf("Error Fetching Instance Profiles %s\n%s", err, response) + } + profilesInfo := make([]map[string]interface{}, 0) + for _, profile := range availableProfiles.Profiles { + + l := map[string]interface{}{ + "name": *profile.Name, + "family": *profile.Family, + } + if profile.OsArchitecture != nil { + if profile.OsArchitecture.Default != nil { + l["architecture"] = *profile.OsArchitecture.Default + } + if profile.OsArchitecture.Type != nil { + l["architecture_type"] = profile.OsArchitecture.Type + } + if profile.OsArchitecture.Values != nil { + l["architecture_values"] = profile.OsArchitecture.Values + } + } + if profile.Bandwidth != nil { + bandwidthList := []map[string]interface{}{} + bandwidthMap := dataSourceInstanceProfileBandwidthToMap(*profile.Bandwidth.(*vpcv1.InstanceProfileBandwidth)) + bandwidthList = append(bandwidthList, bandwidthMap) + l["bandwidth"] = bandwidthList + } + if profile.Disks != nil { + disksList := []map[string]interface{}{} + for _, disksItem := range profile.Disks { + disksList = append(disksList, dataSourceInstanceProfileDisksToMap(disksItem)) + } + l["disks"] = disksList + } + if profile.Href != nil { + l["href"] = profile.Href + } + if profile.Memory != nil { + memoryList := []map[string]interface{}{} + memoryMap := dataSourceInstanceProfileMemoryToMap(*profile.Memory.(*vpcv1.InstanceProfileMemory)) + memoryList = append(memoryList, memoryMap) + l["memory"] = memoryList + } + if profile.PortSpeed != nil { + portSpeedList := []map[string]interface{}{} + portSpeedMap := dataSourceInstanceProfilePortSpeedToMap(*profile.PortSpeed.(*vpcv1.InstanceProfilePortSpeed)) + portSpeedList = append(portSpeedList, portSpeedMap) + l["port_speed"] = portSpeedList + } + if profile.VcpuArchitecture != nil { + vcpuArchitectureList := []map[string]interface{}{} + vcpuArchitectureMap := dataSourceInstanceProfileVcpuArchitectureToMap(*profile.VcpuArchitecture) + vcpuArchitectureList = append(vcpuArchitectureList, vcpuArchitectureMap) + l["vcpu_architecture"] = vcpuArchitectureList + } + if profile.VcpuCount != nil { + vcpuCountList := []map[string]interface{}{} + vcpuCountMap := dataSourceInstanceProfileVcpuCountToMap(*profile.VcpuCount.(*vpcv1.InstanceProfileVcpu)) + vcpuCountList = append(vcpuCountList, vcpuCountMap) + l["vcpu_count"] = vcpuCountList + } + if profile.Disks != nil { + l[isInstanceDisks] = dataSourceInstanceProfileFlattenDisks(profile.Disks) + if err != nil { + return fmt.Errorf("Error setting disks %s", err) + } + } + profilesInfo = append(profilesInfo, l) + } + d.SetId(dataSourceIBMISInstanceProfilesID(d)) + d.Set(isInstanceProfiles, profilesInfo) + return nil +} + +// dataSourceIBMISInstanceProfilesID returns a reasonable ID for a Instance Profile list. +func dataSourceIBMISInstanceProfilesID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_instance_templates.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_instance_templates.go new file mode 100644 index 00000000000..a1dc6b2c1e2 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_instance_templates.go @@ -0,0 +1,499 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "time" + + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + isInstanceTemplates = "templates" + isInstanceTemplatesFirst = "first" + isInstanceTemplatesHref = "href" + isInstanceTemplatesCrn = "crn" + isInstanceTemplatesLimit = "limit" + isInstanceTemplatesNext = "next" + isInstanceTemplatesTotalCount = "total_count" + isInstanceTemplatesName = "name" + isInstanceTemplatesPortSpeed = "port_speed" + isInstanceTemplatesPortType = "type" + isInstanceTemplatesPortValue = "value" + isInstanceTemplatesDeleteVol = "delete_volume_on_instance_delete" + isInstanceTemplatesVol = "volume" + isInstanceTemplatesMemory = "memory" + isInstanceTemplatesMemoryValue = "value" + isInstanceTemplatesMemoryType = "type" + isInstanceTemplatesMemoryValues = "values" + isInstanceTemplatesMemoryDefault = "default" + isInstanceTemplatesMemoryMin = "min" + isInstanceTemplatesMemoryMax = "max" + isInstanceTemplatesMemoryStep = "step" + isInstanceTemplatesSocketCount = "socket_count" + isInstanceTemplatesSocketValue = "value" + isInstanceTemplatesSocketType = "type" + isInstanceTemplatesSocketValues = "values" + isInstanceTemplatesSocketDefault = "default" + isInstanceTemplatesSocketMin = "min" + isInstanceTemplatesSocketMax = "max" + isInstanceTemplatesSocketStep = "step" + isInstanceTemplatesVcpuArch = "vcpu_architecture" + isInstanceTemplatesVcpuArchType = "type" + isInstanceTemplatesVcpuArchValue = "value" + isInstanceTemplatesVcpuCount = "vcpu_count" + isInstanceTemplatesVcpuCountValue = "value" + isInstanceTemplatesVcpuCountType = "type" + isInstanceTemplatesVcpuCountValues = "values" + isInstanceTemplatesVcpuCountDefault = "default" + isInstanceTemplatesVcpuCountMin = "min" + isInstanceTemplatesVcpuCountMax = "max" + isInstanceTemplatesVcpuCountStep = "step" + isInstanceTemplatesStart = "start" + isInstanceTemplatesVersion = "version" + isInstanceTemplatesGeneration = "generation" + isInstanceTemplatesBootVolumeAttachment = "boot_volume_attachment" + + isInstanceTemplateVPC = "vpc" + isInstanceTemplateZone = "zone" + isInstanceTemplateProfile = "profile" + isInstanceTemplateKeys = "keys" + isInstanceTemplateVolumeAttachments = "volume_attachments" + isInstanceTemplateNetworkInterfaces = "network_interfaces" + isInstanceTemplatePrimaryNetworkInterface = "primary_network_interface" + isInstanceTemplateNicName = "name" + isInstanceTemplateNicPortSpeed = "port_speed" + isInstanceTemplateNicAllowIPSpoofing = "allow_ip_spoofing" + isInstanceTemplateNicPrimaryIpv4Address = "primary_ipv4_address" + isInstanceTemplateNicPrimaryIpv6Address = "primary_ipv6_address" + isInstanceTemplateNicSecondaryAddress = "secondary_addresses" + isInstanceTemplateNicSecurityGroups = "security_groups" + isInstanceTemplateNicSubnet = "subnet" + isInstanceTemplateNicFloatingIPs = "floating_ips" + isInstanceTemplateUserData = "user_data" + isInstanceTemplateGeneration = "generation" + isInstanceTemplateImage = "image" + isInstanceTemplateResourceGroup = "resource_group" + isInstanceTemplateName = "name" + isInstanceTemplateDeleteVolume = "delete_volume_on_instance_delete" + isInstanceTemplateVolAttName = "name" + isInstanceTemplateVolAttVolume = "volume" +) + +func dataSourceIBMISInstanceTemplates() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMISInstanceTemplatesRead, + Schema: map[string]*schema.Schema{ + isInstanceTemplates: { + Type: schema.TypeList, + Description: "Collection of instance templates", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + isInstanceTemplatesName: { + Type: schema.TypeString, + Computed: true, + }, + isInstanceTemplatesHref: { + Type: schema.TypeString, + Computed: true, + }, + isInstanceTemplatesCrn: { + Type: schema.TypeString, + Computed: true, + }, + isInstanceTemplateVPC: { + Type: schema.TypeString, + Computed: true, + }, + isInstanceTemplateZone: { + Type: schema.TypeString, + Computed: true, + }, + isInstanceTemplateProfile: { + Type: schema.TypeString, + Computed: true, + }, + isInstanceTemplateKeys: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + isInstanceTemplateVolumeAttachments: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + isInstanceTemplatesDeleteVol: { + Type: schema.TypeBool, + Computed: true, + }, + isInstanceTemplatesName: { + Type: schema.TypeString, + Computed: true, + }, + isInstanceTemplatesVol: { + Type: schema.TypeString, + Computed: true, + }, + isInstanceTemplateVolAttVolPrototype: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + isInstanceTemplateVolAttVolIops: { + Type: schema.TypeInt, + Computed: true, + Description: "The maximum I/O operations per second (IOPS) for the volume.", + }, + isInstanceTemplateVolAttVolProfile: { + Type: schema.TypeString, + Computed: true, + Description: "The globally unique name for the volume profile to use for this volume.", + }, + isInstanceTemplateVolAttVolCapacity: { + Type: schema.TypeInt, + Computed: true, + Description: "The capacity of the volume in gigabytes. The specified minimum and maximum capacity values for creating or updating volumes may expand in the future.", + }, + isInstanceTemplateVolAttVolEncryptionKey: { + Type: schema.TypeString, + Computed: true, + Description: "The CRN of the [Key Protect Root Key](https://cloud.ibm.com/docs/key-protect?topic=key-protect-getting-started-tutorial) or [Hyper Protect Crypto Service Root Key](https://cloud.ibm.com/docs/hs-crypto?topic=hs-crypto-get-started) for this resource.", + }, + }, + }, + }, + }, + }, + }, + isInstanceTemplatePrimaryNetworkInterface: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + isInstanceTemplateNicName: { + Type: schema.TypeString, + Computed: true, + }, + isInstanceTemplateNicPrimaryIpv4Address: { + Type: schema.TypeString, + Computed: true, + }, + isInstanceTemplateNicSecurityGroups: { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + isInstanceTemplateNicSubnet: { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + + isInstanceTemplateNetworkInterfaces: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + isInstanceTemplateNicName: { + Type: schema.TypeString, + Computed: true, + }, + isInstanceTemplateNicPrimaryIpv4Address: { + Type: schema.TypeString, + Computed: true, + }, + isInstanceTemplateNicSecurityGroups: { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + isInstanceTemplateNicSubnet: { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + isInstanceTemplateUserData: { + Type: schema.TypeString, + Computed: true, + }, + isInstanceTemplateImage: { + Type: schema.TypeString, + Computed: true, + }, + isInstanceTemplatesBootVolumeAttachment: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + isInstanceTemplatesDeleteVol: { + Type: schema.TypeBool, + Computed: true, + }, + isInstanceTemplatesName: { + Type: schema.TypeString, + Computed: true, + }, + isInstanceTemplatesVol: { + Type: schema.TypeString, + Computed: true, + }, + isInstanceTemplateBootSize: { + Type: schema.TypeInt, + Computed: true, + }, + isInstanceTemplateBootProfile: { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + isInstanceTemplateResourceGroup: { + Type: schema.TypeString, + Computed: true, + }, + "placement_target": { + Type: schema.TypeList, + Computed: true, + Description: "The placement restrictions to use for the virtual server instance.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this dedicated host.", + }, + "crn": { + Type: schema.TypeString, + Computed: true, + Description: "The CRN for this dedicated host.", + }, + "href": { + Type: schema.TypeString, + Computed: true, + Description: "The URL for this dedicated host.", + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMISInstanceTemplatesRead(d *schema.ResourceData, meta interface{}) error { + instanceC, err := vpcClient(meta) + if err != nil { + return err + } + listInstanceTemplatesOptions := &vpcv1.ListInstanceTemplatesOptions{} + availableTemplates, _, err := instanceC.ListInstanceTemplates(listInstanceTemplatesOptions) + if err != nil { + return err + } + templates := make([]map[string]interface{}, 0) + for _, instTempl := range availableTemplates.Templates { + template := map[string]interface{}{} + instance := instTempl.(*vpcv1.InstanceTemplate) + template["id"] = instance.ID + template[isInstanceTemplatesHref] = instance.Href + template[isInstanceTemplatesCrn] = instance.CRN + template[isInstanceTemplateName] = instance.Name + template[isInstanceTemplateUserData] = instance.UserData + + if instance.Keys != nil { + keys := []string{} + for _, intfc := range instance.Keys { + instanceKeyIntf := intfc.(*vpcv1.KeyIdentity) + keys = append(keys, *instanceKeyIntf.ID) + } + template[isInstanceTemplateKeys] = keys + } + if instance.Profile != nil { + instanceProfileIntf := instance.Profile + identity := instanceProfileIntf.(*vpcv1.InstanceProfileIdentity) + template[isInstanceTemplateProfile] = identity.Name + } + + if instance.PlacementTarget != nil { + placementTargetList := []map[string]interface{}{} + placementTargetMap := dataSourceInstanceTemplateCollectionTemplatesPlacementTargetToMap(*instance.PlacementTarget.(*vpcv1.InstancePlacementTargetPrototype)) + placementTargetList = append(placementTargetList, placementTargetMap) + template["placement_target"] = placementTargetList + } + + if instance.PrimaryNetworkInterface != nil { + interfaceList := make([]map[string]interface{}, 0) + currentPrimNic := map[string]interface{}{} + currentPrimNic[isInstanceTemplateNicName] = *instance.PrimaryNetworkInterface.Name + if instance.PrimaryNetworkInterface.PrimaryIpv4Address != nil { + currentPrimNic[isInstanceTemplateNicPrimaryIpv4Address] = *instance.PrimaryNetworkInterface.PrimaryIpv4Address + } + subInf := instance.PrimaryNetworkInterface.Subnet + subnetIdentity := subInf.(*vpcv1.SubnetIdentity) + currentPrimNic[isInstanceTemplateNicSubnet] = *subnetIdentity.ID + + if len(instance.PrimaryNetworkInterface.SecurityGroups) != 0 { + secgrpList := []string{} + for i := 0; i < len(instance.PrimaryNetworkInterface.SecurityGroups); i++ { + secGrpInf := instance.PrimaryNetworkInterface.SecurityGroups[i] + secGrpIdentity := secGrpInf.(*vpcv1.SecurityGroupIdentity) + secgrpList = append(secgrpList, string(*secGrpIdentity.ID)) + } + currentPrimNic[isInstanceTemplateNicSecurityGroups] = newStringSet(schema.HashString, secgrpList) + } + interfaceList = append(interfaceList, currentPrimNic) + template[isInstanceTemplatePrimaryNetworkInterface] = interfaceList + } + + if instance.NetworkInterfaces != nil { + interfacesList := make([]map[string]interface{}, 0) + for _, intfc := range instance.NetworkInterfaces { + currentNic := map[string]interface{}{} + currentNic[isInstanceTemplateNicName] = *intfc.Name + if intfc.PrimaryIpv4Address != nil { + currentNic[isInstanceTemplateNicPrimaryIpv4Address] = *intfc.PrimaryIpv4Address + } + //currentNic[isInstanceTemplateNicAllowIpSpoofing] = intfc.AllowIpSpoofing + subInf := intfc.Subnet + subnetIdentity := subInf.(*vpcv1.SubnetIdentity) + currentNic[isInstanceTemplateNicSubnet] = *subnetIdentity.ID + if len(intfc.SecurityGroups) != 0 { + secgrpList := []string{} + for i := 0; i < len(intfc.SecurityGroups); i++ { + secGrpInf := intfc.SecurityGroups[i] + secGrpIdentity := secGrpInf.(*vpcv1.SecurityGroupIdentity) + secgrpList = append(secgrpList, string(*secGrpIdentity.ID)) + } + currentNic[isInstanceTemplateNicSecurityGroups] = newStringSet(schema.HashString, secgrpList) + } + + interfacesList = append(interfacesList, currentNic) + } + template[isInstanceTemplateNetworkInterfaces] = interfacesList + } + + if instance.Image != nil { + imageInf := instance.Image + imageIdentity := imageInf.(*vpcv1.ImageIdentity) + template[isInstanceTemplateImage] = imageIdentity.ID + } + + if instance.VPC != nil { + vpcInf := instance.VPC + vpcRef := vpcInf.(*vpcv1.VPCIdentity) + template[isInstanceTemplateVPC] = vpcRef.ID + } + + if instance.Zone != nil { + zoneInf := instance.Zone + zone := zoneInf.(*vpcv1.ZoneIdentity) + template[isInstanceTemplateZone] = zone.Name + } + + interfacesList := make([]map[string]interface{}, 0) + if instance.VolumeAttachments != nil { + for _, volume := range instance.VolumeAttachments { + volumeAttach := map[string]interface{}{} + volumeAttach[isInstanceTemplateVolAttName] = *volume.Name + volumeAttach[isInstanceTemplateDeleteVolume] = *volume.DeleteVolumeOnInstanceDelete + volumeIntf := volume.Volume + volumeInst := volumeIntf.(*vpcv1.VolumeAttachmentVolumePrototypeInstanceContext) + newVolumeArr := []map[string]interface{}{} + newVolume := map[string]interface{}{} + + if volumeInst.ID != nil { + volumeAttach[isInstanceTemplateVolAttVolume] = *volumeInst.ID + } + + if volumeInst.Capacity != nil { + newVolume[isInstanceTemplateVolAttVolCapacity] = *volumeInst.Capacity + } + if volumeInst.Profile != nil { + profile := volumeInst.Profile.(*vpcv1.VolumeProfileIdentity) + newVolume[isInstanceTemplateVolAttVolProfile] = profile.Name + } + + if volumeInst.Iops != nil { + newVolume[isInstanceTemplateVolAttVolIops] = *volumeInst.Iops + } + if volumeInst.EncryptionKey != nil { + encryptionKey := volumeInst.EncryptionKey.(*vpcv1.EncryptionKeyIdentity) + newVolume[isInstanceTemplateVolAttVolEncryptionKey] = *encryptionKey.CRN + } + newVolumeArr = append(newVolumeArr, newVolume) + volumeAttach[isInstanceTemplateVolAttVolPrototype] = newVolumeArr + + interfacesList = append(interfacesList, volumeAttach) + } + template[isInstanceTemplateVolumeAttachments] = interfacesList + } + + if instance.BootVolumeAttachment != nil { + bootVolList := make([]map[string]interface{}, 0) + bootVol := map[string]interface{}{} + + bootVol[isInstanceTemplatesDeleteVol] = *instance.BootVolumeAttachment.DeleteVolumeOnInstanceDelete + if instance.BootVolumeAttachment.Volume != nil { + volumeIntf := instance.BootVolumeAttachment.Volume + bootVol[isInstanceTemplatesName] = volumeIntf.Name + bootVol[isInstanceTemplatesVol] = volumeIntf.Name + bootVol[isInstanceTemplateBootSize] = volumeIntf.Capacity + if instance.BootVolumeAttachment.Volume.Profile != nil { + volProfIntf := instance.BootVolumeAttachment.Volume.Profile + volProfInst := volProfIntf.(*vpcv1.VolumeProfileIdentity) + bootVol[isInstanceTemplateBootProfile] = volProfInst.Name + } + } + bootVolList = append(bootVolList, bootVol) + template[isInstanceTemplatesBootVolumeAttachment] = bootVolList + } + + if instance.ResourceGroup != nil { + rg := instance.ResourceGroup + template[isInstanceTemplateResourceGroup] = rg.ID + } + + templates = append(templates, template) + } + d.SetId(dataSourceIBMISInstanceTemplatesID(d)) + d.Set(isInstanceTemplates, templates) + return nil +} + +// dataSourceIBMISInstanceTemplatesID returns a reasonable ID for a instance templates list. +func dataSourceIBMISInstanceTemplatesID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} + +func dataSourceInstanceTemplateCollectionTemplatesPlacementTargetToMap(placementTargetItem vpcv1.InstancePlacementTargetPrototype) (placementTargetMap map[string]interface{}) { + placementTargetMap = map[string]interface{}{} + + if placementTargetItem.ID != nil { + placementTargetMap["id"] = placementTargetItem.ID + } + if placementTargetItem.CRN != nil { + placementTargetMap["crn"] = placementTargetItem.CRN + } + if placementTargetItem.Href != nil { + placementTargetMap["href"] = placementTargetItem.Href + } + + return placementTargetMap +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_instances.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_instances.go new file mode 100644 index 00000000000..d49c9ba6e56 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_instances.go @@ -0,0 +1,636 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "time" + + "github.com/IBM/vpc-go-sdk/vpcclassicv1" + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + isInstances = "instances" +) + +func dataSourceIBMISInstances() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMISInstancesRead, + + Schema: map[string]*schema.Schema{ + "vpc_name": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"vpc"}, + Description: "Name of the vpc to filter the instances attached to it", + }, + + "vpc": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"vpc_name"}, + Description: "VPC ID to filter the instances attached to it", + }, + + isInstances: { + Type: schema.TypeList, + Description: "List of instances", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + Description: "Instance id", + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: "Instance name", + }, + "memory": { + Type: schema.TypeInt, + Computed: true, + Description: "Instance memory", + }, + "status": { + Type: schema.TypeString, + Computed: true, + Description: "Instance status", + }, + "resource_group": { + Type: schema.TypeString, + Computed: true, + Description: "Instance resource group", + }, + "vpc": { + Type: schema.TypeString, + Computed: true, + Description: "vpc attached to the instance", + }, + "boot_volume": { + Type: schema.TypeList, + Computed: true, + Description: "Instance Boot Volume", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + Description: "Instance Boot volume id", + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: "Instance Boot volume name", + }, + "device": { + Type: schema.TypeString, + Computed: true, + Description: "Instance Boot volume device", + }, + "volume_id": { + Type: schema.TypeString, + Computed: true, + Description: "Instance Boot volume's volume id", + }, + "volume_crn": { + Type: schema.TypeString, + Computed: true, + Description: "Instance Boot volume's volume CRN", + }, + }, + }, + }, + + "volume_attachments": { + Type: schema.TypeList, + Computed: true, + Description: "Instance Volume Attachments", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + Description: "Instance volume Attachment id", + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: "Instance volume Attachment name", + }, + "volume_id": { + Type: schema.TypeString, + Computed: true, + Description: "Instance volume Attachment's volume id", + }, + "volume_name": { + Type: schema.TypeString, + Computed: true, + Description: "Instance volume Attachment's volume name", + }, + "volume_crn": { + Type: schema.TypeString, + Computed: true, + Description: "Instance volume Attachment's volume CRN", + }, + }, + }, + }, + + "primary_network_interface": { + Type: schema.TypeList, + Computed: true, + Description: "Instance Primary Network Interface", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + Description: "Instance Primary Network interface id", + }, + isInstanceNicName: { + Type: schema.TypeString, + Computed: true, + Description: "Instance Primary Network interface name", + }, + isInstanceNicPrimaryIpv4Address: { + Type: schema.TypeString, + Computed: true, + Description: "Instance Primary Network interface IPV4 Address", + }, + isInstanceNicSecurityGroups: { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: "Instance Primary Network interface security groups", + }, + isInstanceNicSubnet: { + Type: schema.TypeString, + Computed: true, + Description: "Instance Primary Network interface subnet", + }, + }, + }, + }, + "network_interfaces": { + Type: schema.TypeList, + Computed: true, + Description: "Instance Network Interfaces", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + Description: "Instance Network interface id", + }, + isInstanceNicName: { + Type: schema.TypeString, + Computed: true, + Description: "Instance Network interface name", + }, + isInstanceNicPrimaryIpv4Address: { + Type: schema.TypeString, + Computed: true, + Description: "Instance Network interface IPV4 Address", + }, + isInstanceNicSecurityGroups: { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: "Instance Network interface security groups", + }, + isInstanceNicSubnet: { + Type: schema.TypeString, + Computed: true, + Description: "Instance Network interface subnet", + }, + }, + }, + }, + "profile": { + Type: schema.TypeString, + Computed: true, + Description: "Instance Profile", + }, + "vcpu": { + Type: schema.TypeList, + Computed: true, + Description: "Instance vcpu", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "architecture": { + Type: schema.TypeString, + Computed: true, + Description: "Instance vcpu architecture", + }, + "count": { + Type: schema.TypeInt, + Computed: true, + Description: "Instance vcpu count", + }, + }, + }, + }, + "zone": { + Type: schema.TypeString, + Computed: true, + Description: "Instance zone", + }, + "image": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Instance Image", + }, + isInstanceDisks: &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Collection of the instance's disks.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "created_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The date and time that the disk was created.", + }, + "href": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The URL for this instance disk.", + }, + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this instance disk.", + }, + "interface_type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The disk interface used for attaching the disk.The enumerated values for this property are expected to expand in the future. When processing this property, check for and log unknown values. Optionally halt processing and surface the error, or bypass the resource on which the unexpected property value was encountered.", + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The user-defined name for this disk.", + }, + "resource_type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The resource type.", + }, + "size": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The size of the disk in GB (gigabytes).", + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMISInstancesRead(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + if userDetails.generation == 1 { + err := classicInstancesList(d, meta) + if err != nil { + return err + } + } else { + err := instancesList(d, meta) + if err != nil { + return err + } + } + return nil +} + +func classicInstancesList(d *schema.ResourceData, meta interface{}) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + start := "" + allrecs := []vpcclassicv1.Instance{} + for { + listInstancesOptions := &vpcclassicv1.ListInstancesOptions{} + if start != "" { + listInstancesOptions.Start = &start + } + instances, response, err := sess.ListInstances(listInstancesOptions) + if err != nil { + return fmt.Errorf("Error Fetching Instances %s\n%s", err, response) + } + start = GetNext(instances.Next) + allrecs = append(allrecs, instances.Instances...) + if start == "" { + break + } + } + instancesInfo := make([]map[string]interface{}, 0) + for _, instance := range allrecs { + id := *instance.ID + l := map[string]interface{}{} + l["id"] = id + l["name"] = *instance.Name + l["memory"] = *instance.Memory + l["status"] = *instance.Status + l["resource_group"] = *instance.ResourceGroup.ID + l["vpc"] = *instance.VPC.ID + + if instance.BootVolumeAttachment != nil { + bootVolList := make([]map[string]interface{}, 0) + bootVol := map[string]interface{}{} + bootVol["id"] = *instance.BootVolumeAttachment.ID + bootVol["name"] = *instance.BootVolumeAttachment.Name + if instance.BootVolumeAttachment.Device != nil { + bootVol["device"] = *instance.BootVolumeAttachment.Device.ID + } + if instance.BootVolumeAttachment.Volume != nil { + bootVol["volume_id"] = *instance.BootVolumeAttachment.Volume.ID + bootVol["volume_crn"] = *instance.BootVolumeAttachment.Volume.CRN + } + bootVolList = append(bootVolList, bootVol) + l["boot_volume"] = bootVolList + } + + if instance.VolumeAttachments != nil { + volList := make([]map[string]interface{}, 0) + for _, volume := range instance.VolumeAttachments { + vol := map[string]interface{}{} + if volume.Volume != nil { + vol["id"] = *volume.ID + vol["volume_id"] = *volume.Volume.ID + vol["name"] = *volume.Name + vol["volume_name"] = *volume.Volume.Name + vol["volume_crn"] = *volume.Volume.CRN + volList = append(volList, vol) + } + } + l["volume_attachments"] = volList + } + + if instance.PrimaryNetworkInterface != nil { + primaryNicList := make([]map[string]interface{}, 0) + currentPrimNic := map[string]interface{}{} + currentPrimNic["id"] = *instance.PrimaryNetworkInterface.ID + currentPrimNic[isInstanceNicName] = *instance.PrimaryNetworkInterface.Name + currentPrimNic[isInstanceNicPrimaryIpv4Address] = *instance.PrimaryNetworkInterface.PrimaryIpv4Address + getnicoptions := &vpcclassicv1.GetInstanceNetworkInterfaceOptions{ + InstanceID: &id, + ID: instance.PrimaryNetworkInterface.ID, + } + insnic, response, err := sess.GetInstanceNetworkInterface(getnicoptions) + if err != nil { + return fmt.Errorf("Error getting network interfaces attached to the instance %s\n%s", err, response) + } + currentPrimNic[isInstanceNicSubnet] = *insnic.Subnet.ID + if len(insnic.SecurityGroups) != 0 { + secgrpList := []string{} + for i := 0; i < len(insnic.SecurityGroups); i++ { + secgrpList = append(secgrpList, string(*(insnic.SecurityGroups[i].ID))) + } + currentPrimNic[isInstanceNicSecurityGroups] = newStringSet(schema.HashString, secgrpList) + } + + primaryNicList = append(primaryNicList, currentPrimNic) + l["primary_network_interface"] = primaryNicList + } + + if instance.NetworkInterfaces != nil { + interfacesList := make([]map[string]interface{}, 0) + for _, intfc := range instance.NetworkInterfaces { + if *intfc.ID != *instance.PrimaryNetworkInterface.ID { + currentNic := map[string]interface{}{} + currentNic["id"] = *intfc.ID + currentNic[isInstanceNicName] = *intfc.Name + currentNic[isInstanceNicPrimaryIpv4Address] = *intfc.PrimaryIpv4Address + getnicoptions := &vpcclassicv1.GetInstanceNetworkInterfaceOptions{ + InstanceID: &id, + ID: intfc.ID, + } + insnic, response, err := sess.GetInstanceNetworkInterface(getnicoptions) + if err != nil { + return fmt.Errorf("Error getting network interfaces attached to the instance %s\n%s", err, response) + } + currentNic[isInstanceNicSubnet] = *insnic.Subnet.ID + if len(insnic.SecurityGroups) != 0 { + secgrpList := []string{} + for i := 0; i < len(insnic.SecurityGroups); i++ { + secgrpList = append(secgrpList, string(*(insnic.SecurityGroups[i].ID))) + } + currentNic[isInstanceNicSecurityGroups] = newStringSet(schema.HashString, secgrpList) + } + interfacesList = append(interfacesList, currentNic) + } + } + l["network_interfaces"] = interfacesList + } + + l["profile"] = *instance.Profile.Name + + cpuList := make([]map[string]interface{}, 0) + if instance.Vcpu != nil { + currentCPU := map[string]interface{}{} + currentCPU["architecture"] = *instance.Vcpu.Architecture + currentCPU["count"] = *instance.Vcpu.Count + cpuList = append(cpuList, currentCPU) + } + l["vcpu"] = cpuList + + l["zone"] = *instance.Zone.Name + if instance.Image != nil { + l["image"] = *instance.Image.ID + } + instancesInfo = append(instancesInfo, l) + } + d.SetId(dataSourceIBMISInstancesID(d)) + d.Set(isInstances, instancesInfo) + return nil +} + +func instancesList(d *schema.ResourceData, meta interface{}) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + + var vpcName, vpcID string + + if vpc, ok := d.GetOk("vpc_name"); ok { + vpcName = vpc.(string) + } + + if vpc, ok := d.GetOk("vpc"); ok { + vpcID = vpc.(string) + } + + start := "" + allrecs := []vpcv1.Instance{} + for { + listInstancesOptions := &vpcv1.ListInstancesOptions{} + if start != "" { + listInstancesOptions.Start = &start + } + + if vpcName != "" { + listInstancesOptions.VPCName = &vpcName + } + if vpcID != "" { + listInstancesOptions.VPCID = &vpcID + } + + instances, response, err := sess.ListInstances(listInstancesOptions) + if err != nil { + return fmt.Errorf("Error Fetching Instances %s\n%s", err, response) + } + start = GetNext(instances.Next) + allrecs = append(allrecs, instances.Instances...) + if start == "" { + break + } + } + instancesInfo := make([]map[string]interface{}, 0) + for _, instance := range allrecs { + id := *instance.ID + l := map[string]interface{}{} + l["id"] = id + l["name"] = *instance.Name + l["memory"] = *instance.Memory + l["status"] = *instance.Status + l["resource_group"] = *instance.ResourceGroup.ID + l["vpc"] = *instance.VPC.ID + + if instance.BootVolumeAttachment != nil { + bootVolList := make([]map[string]interface{}, 0) + bootVol := map[string]interface{}{} + bootVol["id"] = *instance.BootVolumeAttachment.ID + bootVol["name"] = *instance.BootVolumeAttachment.Name + if instance.BootVolumeAttachment.Device != nil { + bootVol["device"] = *instance.BootVolumeAttachment.Device.ID + } + if instance.BootVolumeAttachment.Volume != nil { + bootVol["volume_id"] = *instance.BootVolumeAttachment.Volume.ID + bootVol["volume_crn"] = *instance.BootVolumeAttachment.Volume.CRN + } + bootVolList = append(bootVolList, bootVol) + l["boot_volume"] = bootVolList + } + + if instance.VolumeAttachments != nil { + volList := make([]map[string]interface{}, 0) + for _, volume := range instance.VolumeAttachments { + vol := map[string]interface{}{} + if volume.Volume != nil { + vol["id"] = *volume.ID + vol["volume_id"] = *volume.Volume.ID + vol["name"] = *volume.Name + vol["volume_name"] = *volume.Volume.Name + vol["volume_crn"] = *volume.Volume.CRN + volList = append(volList, vol) + } + } + l["volume_attachments"] = volList + } + + if instance.PrimaryNetworkInterface != nil { + primaryNicList := make([]map[string]interface{}, 0) + currentPrimNic := map[string]interface{}{} + currentPrimNic["id"] = *instance.PrimaryNetworkInterface.ID + currentPrimNic[isInstanceNicName] = *instance.PrimaryNetworkInterface.Name + currentPrimNic[isInstanceNicPrimaryIpv4Address] = *instance.PrimaryNetworkInterface.PrimaryIpv4Address + getnicoptions := &vpcv1.GetInstanceNetworkInterfaceOptions{ + InstanceID: &id, + ID: instance.PrimaryNetworkInterface.ID, + } + insnic, response, err := sess.GetInstanceNetworkInterface(getnicoptions) + if err != nil { + return fmt.Errorf("Error getting network interfaces attached to the instance %s\n%s", err, response) + } + currentPrimNic[isInstanceNicSubnet] = *insnic.Subnet.ID + if len(insnic.SecurityGroups) != 0 { + secgrpList := []string{} + for i := 0; i < len(insnic.SecurityGroups); i++ { + secgrpList = append(secgrpList, string(*(insnic.SecurityGroups[i].ID))) + } + currentPrimNic[isInstanceNicSecurityGroups] = newStringSet(schema.HashString, secgrpList) + } + + primaryNicList = append(primaryNicList, currentPrimNic) + l["primary_network_interface"] = primaryNicList + } + + if instance.NetworkInterfaces != nil { + interfacesList := make([]map[string]interface{}, 0) + for _, intfc := range instance.NetworkInterfaces { + if *intfc.ID != *instance.PrimaryNetworkInterface.ID { + currentNic := map[string]interface{}{} + currentNic["id"] = *intfc.ID + currentNic[isInstanceNicName] = *intfc.Name + currentNic[isInstanceNicPrimaryIpv4Address] = *intfc.PrimaryIpv4Address + getnicoptions := &vpcv1.GetInstanceNetworkInterfaceOptions{ + InstanceID: &id, + ID: intfc.ID, + } + insnic, response, err := sess.GetInstanceNetworkInterface(getnicoptions) + if err != nil { + return fmt.Errorf("Error getting network interfaces attached to the instance %s\n%s", err, response) + } + currentNic[isInstanceNicSubnet] = *insnic.Subnet.ID + if len(insnic.SecurityGroups) != 0 { + secgrpList := []string{} + for i := 0; i < len(insnic.SecurityGroups); i++ { + secgrpList = append(secgrpList, string(*(insnic.SecurityGroups[i].ID))) + } + currentNic[isInstanceNicSecurityGroups] = newStringSet(schema.HashString, secgrpList) + } + interfacesList = append(interfacesList, currentNic) + } + } + l["network_interfaces"] = interfacesList + } + + l["profile"] = *instance.Profile.Name + + cpuList := make([]map[string]interface{}, 0) + if instance.Vcpu != nil { + currentCPU := map[string]interface{}{} + currentCPU["architecture"] = *instance.Vcpu.Architecture + currentCPU["count"] = *instance.Vcpu.Count + cpuList = append(cpuList, currentCPU) + } + l["vcpu"] = cpuList + + l["zone"] = *instance.Zone.Name + if instance.Image != nil { + l["image"] = *instance.Image.ID + } + + if instance.Disks != nil { + l[isInstanceDisks] = dataSourceInstanceFlattenDisks(instance.Disks) + } + + instancesInfo = append(instancesInfo, l) + } + d.SetId(dataSourceIBMISInstancesID(d)) + d.Set(isInstances, instancesInfo) + return nil +} + +// dataSourceIBMISInstancesID returns a reasonable ID for a Instance list. +func dataSourceIBMISInstancesID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_lb.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_lb.go new file mode 100644 index 00000000000..d9c804e16e1 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_lb.go @@ -0,0 +1,550 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "strconv" + + "github.com/IBM/vpc-go-sdk/vpcclassicv1" + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + name = "name" + poolAlgorithm = "algorithm" + href = "href" + poolProtocol = "protocol" + poolCreatedAt = "created_at" + poolProvisioningStatus = "provisioning_status" + healthMonitor = "health_monitor" + instanceGroup = "instance_group" + members = "members" + sessionPersistence = "session_persistence" + crnInstance = "crn" + sessionType = "type" + healthMonitorType = "type" + healthMonitorDelay = "delay" + healthMonitorMaxRetries = "max_retries" + healthMonitorPort = "port" + healthMonitorTimeout = "timeout" + healthMonitorURLPath = "url_path" +) + +func dataSourceIBMISLB() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMISLBRead, + + Schema: map[string]*schema.Schema{ + isLBName: { + Type: schema.TypeString, + Required: true, + Description: "Load Balancer name", + }, + + isLBType: { + Type: schema.TypeString, + Computed: true, + Description: "Load Balancer type", + }, + + isLBStatus: { + Type: schema.TypeString, + Computed: true, + Description: "Load Balancer status", + }, + + isLBOperatingStatus: { + Type: schema.TypeString, + Computed: true, + Description: "Load Balancer operating status", + }, + + isLBPublicIPs: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "Load Balancer Public IPs", + }, + + isLBPrivateIPs: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "Load Balancer private IPs", + }, + + isLBSubnets: { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: "Load Balancer subnets list", + }, + + isLBSecurityGroups: { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: "Load Balancer securitygroups list", + }, + + isLBSecurityGroupsSupported: { + Type: schema.TypeBool, + Computed: true, + Description: "Security Group Supported for this Load Balancer", + }, + + isLBTags: { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: resourceIBMVPCHash, + Description: "Tags associated to Load Balancer", + }, + + isLBResourceGroup: { + Type: schema.TypeString, + Computed: true, + Description: "Load Balancer Resource group", + }, + + isLBHostName: { + Type: schema.TypeString, + Computed: true, + Description: "Load Balancer Host Name", + }, + + isLBLogging: { + Type: schema.TypeBool, + Computed: true, + Description: "Logging of Load Balancer", + }, + + isLBListeners: { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: "Load Balancer Listeners list", + }, + isLBPools: { + Type: schema.TypeList, + Computed: true, + Description: "Load Balancer Pools list", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + poolAlgorithm: { + Type: schema.TypeString, + Computed: true, + Description: "The load balancing algorithm.", + }, + healthMonitor: { + Description: "The health monitor of this pool.", + Computed: true, + Type: schema.TypeMap, + }, + + instanceGroup: { + Description: "The instance group that is managing this pool.", + Computed: true, + Type: schema.TypeMap, + }, + + members: { + Description: "The backend server members of the pool.", + Computed: true, + Type: schema.TypeList, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + href: { + Type: schema.TypeString, + Computed: true, + Description: "The member's canonical URL.", + }, + ID: { + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this load balancer pool member.", + }, + }, + }, + }, + sessionPersistence: { + Description: "The session persistence of this pool.", + Computed: true, + Type: schema.TypeMap, + }, + poolCreatedAt: { + Type: schema.TypeString, + Computed: true, + Description: "The date and time that this pool was created.", + }, + href: { + Type: schema.TypeString, + Computed: true, + Description: "The pool's canonical URL.", + }, + ID: { + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this load balancer pool", + }, + name: { + Type: schema.TypeString, + Computed: true, + Description: "The user-defined name for this load balancer pool", + }, + poolProtocol: { + Type: schema.TypeString, + Computed: true, + Description: "The protocol used for this load balancer pool.", + }, + poolProvisioningStatus: { + Type: schema.TypeString, + Computed: true, + Description: "The provisioning status of this pool.", + }, + }, + }, + }, + ResourceControllerURL: { + Type: schema.TypeString, + Computed: true, + Description: "The URL of the IBM Cloud dashboard that can be used to explore and view details about this instance", + }, + + ResourceName: { + Type: schema.TypeString, + Computed: true, + Description: "The name of the resource", + }, + + ResourceGroupName: { + Type: schema.TypeString, + Computed: true, + Description: "The resource group name in which resource is provisioned", + }, + }, + } +} + +func dataSourceIBMISLBRead(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + name := d.Get(isLBName).(string) + if userDetails.generation == 1 { + err := classiclbGetbyName(d, meta, name) + if err != nil { + return err + } + } else { + err := lbGetByName(d, meta, name) + if err != nil { + return err + } + } + return nil +} + +func classiclbGetbyName(d *schema.ResourceData, meta interface{}, name string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + listLoadBalancersOptions := &vpcclassicv1.ListLoadBalancersOptions{} + lbs, response, err := sess.ListLoadBalancers(listLoadBalancersOptions) + if err != nil { + return fmt.Errorf("Error Fetching Load Balancers %s\n%s", err, response) + } + for _, lb := range lbs.LoadBalancers { + if *lb.Name == name { + d.SetId(*lb.ID) + d.Set(isLBName, *lb.Name) + if *lb.IsPublic { + d.Set(isLBType, "public") + } else { + d.Set(isLBType, "private") + } + d.Set(isLBStatus, *lb.ProvisioningStatus) + d.Set(isLBOperatingStatus, *lb.OperatingStatus) + publicIpList := make([]string, 0) + if lb.PublicIps != nil { + for _, ip := range lb.PublicIps { + if ip.Address != nil { + pubip := *ip.Address + publicIpList = append(publicIpList, pubip) + } + } + } + d.Set(isLBPublicIPs, publicIpList) + privateIpList := make([]string, 0) + if lb.PrivateIps != nil { + for _, ip := range lb.PrivateIps { + if ip.Address != nil { + prip := *ip.Address + privateIpList = append(privateIpList, prip) + } + } + } + d.Set(isLBPrivateIPs, privateIpList) + if lb.Subnets != nil { + subnetList := make([]string, 0) + for _, subnet := range lb.Subnets { + if subnet.ID != nil { + sub := *subnet.ID + subnetList = append(subnetList, sub) + } + } + d.Set(isLBSubnets, subnetList) + } + if lb.Listeners != nil { + listenerList := make([]string, 0) + for _, listener := range lb.Listeners { + if listener.ID != nil { + lis := *listener.ID + listenerList = append(listenerList, lis) + } + } + d.Set(isLBListeners, listenerList) + } + listLoadBalancerPoolsOptions := &vpcclassicv1.ListLoadBalancerPoolsOptions{} + listLoadBalancerPoolsOptions.SetLoadBalancerID(*lb.ID) + poolsResult, _, _ := sess.ListLoadBalancerPools(listLoadBalancerPoolsOptions) + if poolsResult != nil { + poolsInfo := make([]map[string]interface{}, 0) + for _, p := range poolsResult.Pools { + //log.Printf("******* p ******** : (%+v)", p) + pool := make(map[string]interface{}) + pool[poolAlgorithm] = *p.Algorithm + pool[ID] = *p.ID + pool[href] = *p.Href + pool[poolProtocol] = *p.Protocol + pool[poolCreatedAt] = p.CreatedAt.String() + pool[poolProvisioningStatus] = *p.ProvisioningStatus + pool["name"] = *p.Name + if p.HealthMonitor != nil { + healthMonitorInfo := make(map[string]interface{}) + delayfinal := strconv.FormatInt(*(p.HealthMonitor.Delay), 10) + healthMonitorInfo[healthMonitorDelay] = delayfinal + maxRetriesfinal := strconv.FormatInt(*(p.HealthMonitor.MaxRetries), 10) + timeoutfinal := strconv.FormatInt(*(p.HealthMonitor.Timeout), 10) + + healthMonitorInfo[healthMonitorMaxRetries] = maxRetriesfinal + healthMonitorInfo[healthMonitorTimeout] = timeoutfinal + if p.HealthMonitor.URLPath != nil { + healthMonitorInfo[healthMonitorURLPath] = *(p.HealthMonitor.URLPath) + } + healthMonitorInfo[healthMonitorType] = *(p.HealthMonitor.Type) + pool[healthMonitor] = healthMonitorInfo + } + + if p.SessionPersistence != nil { + sessionPersistenceInfo := make(map[string]interface{}) + sessionPersistenceInfo[sessionType] = *p.SessionPersistence.Type + pool[sessionPersistence] = sessionPersistenceInfo + } + if p.Members != nil { + memberList := make([]map[string]interface{}, len(p.Members)) + for j, m := range p.Members { + member := make(map[string]interface{}) + member[ID] = *m.ID + member[href] = *m.Href + memberList[j] = member + } + pool[members] = memberList + } + poolsInfo = append(poolsInfo, pool) + } //for + d.Set(isLBPools, poolsInfo) + + } + d.Set(isLBResourceGroup, *lb.ResourceGroup.ID) + d.Set(isLBHostName, *lb.Hostname) + tags, err := GetTagsUsingCRN(meta, *lb.CRN) + if err != nil { + log.Printf( + "Error on get of resource vpc Load Balancer (%s) tags: %s", d.Id(), err) + } + d.Set(isLBTags, tags) + controller, err := getBaseController(meta) + if err != nil { + return err + } + d.Set(ResourceControllerURL, controller+"/vpc/network/loadBalancers") + d.Set(ResourceName, *lb.Name) + if lb.ResourceGroup != nil { + d.Set(ResourceGroupName, *lb.ResourceGroup.ID) + } + return nil + } + } + return fmt.Errorf("No Load balancer found with name %s", name) +} + +func lbGetByName(d *schema.ResourceData, meta interface{}, name string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + listLoadBalancersOptions := &vpcv1.ListLoadBalancersOptions{} + lbs, response, err := sess.ListLoadBalancers(listLoadBalancersOptions) + if err != nil { + return fmt.Errorf("Error Fetching Load Balancers %s\n%s", err, response) + } + for _, lb := range lbs.LoadBalancers { + if *lb.Name == name { + d.SetId(*lb.ID) + d.Set(isLBName, *lb.Name) + if lb.Logging != nil && lb.Logging.Datapath != nil { + d.Set(isLBLogging, *lb.Logging.Datapath.Active) + } + if *lb.IsPublic { + d.Set(isLBType, "public") + } else { + d.Set(isLBType, "private") + } + d.Set(isLBStatus, *lb.ProvisioningStatus) + d.Set(isLBOperatingStatus, *lb.OperatingStatus) + publicIpList := make([]string, 0) + if lb.PublicIps != nil { + for _, ip := range lb.PublicIps { + if ip.Address != nil { + pubip := *ip.Address + publicIpList = append(publicIpList, pubip) + } + } + } + d.Set(isLBPublicIPs, publicIpList) + privateIpList := make([]string, 0) + if lb.PrivateIps != nil { + for _, ip := range lb.PrivateIps { + if ip.Address != nil { + prip := *ip.Address + privateIpList = append(privateIpList, prip) + } + } + } + d.Set(isLBPrivateIPs, privateIpList) + if lb.Subnets != nil { + subnetList := make([]string, 0) + for _, subnet := range lb.Subnets { + if subnet.ID != nil { + sub := *subnet.ID + subnetList = append(subnetList, sub) + } + } + d.Set(isLBSubnets, subnetList) + } + + d.Set(isLBSecurityGroupsSupported, false) + if lb.SecurityGroups != nil { + securitygroupList := make([]string, 0) + for _, securityGroup := range lb.SecurityGroups { + if securityGroup.ID != nil { + securityGroupID := *securityGroup.ID + securitygroupList = append(securitygroupList, securityGroupID) + } + } + d.Set(isLBSecurityGroups, securitygroupList) + d.Set(isLBSecurityGroupsSupported, true) + } + + if lb.Listeners != nil { + listenerList := make([]string, 0) + for _, listener := range lb.Listeners { + if listener.ID != nil { + lis := *listener.ID + listenerList = append(listenerList, lis) + } + } + d.Set(isLBListeners, listenerList) + } + listLoadBalancerPoolsOptions := &vpcv1.ListLoadBalancerPoolsOptions{} + listLoadBalancerPoolsOptions.SetLoadBalancerID(*lb.ID) + poolsResult, _, _ := sess.ListLoadBalancerPools(listLoadBalancerPoolsOptions) + if poolsResult != nil { + poolsInfo := make([]map[string]interface{}, 0) + + for _, p := range poolsResult.Pools { + // log.Printf("******* p ******** : (%+v)", p) + pool := make(map[string]interface{}) + pool[poolAlgorithm] = *p.Algorithm + pool[ID] = *p.ID + pool[href] = *p.Href + pool[poolProtocol] = *p.Protocol + pool[poolCreatedAt] = p.CreatedAt.String() + pool[poolProvisioningStatus] = *p.ProvisioningStatus + pool["name"] = *p.Name + if p.HealthMonitor != nil { + healthMonitorInfo := make(map[string]interface{}) + delayfinal := strconv.FormatInt(*(p.HealthMonitor.Delay), 10) + healthMonitorInfo[healthMonitorDelay] = delayfinal + maxRetriesfinal := strconv.FormatInt(*(p.HealthMonitor.MaxRetries), 10) + timeoutfinal := strconv.FormatInt(*(p.HealthMonitor.Timeout), 10) + healthMonitorInfo[healthMonitorMaxRetries] = maxRetriesfinal + healthMonitorInfo[healthMonitorTimeout] = timeoutfinal + if p.HealthMonitor.URLPath != nil { + healthMonitorInfo[healthMonitorURLPath] = *(p.HealthMonitor.URLPath) + } + healthMonitorInfo[healthMonitorType] = *(p.HealthMonitor.Type) + pool[healthMonitor] = healthMonitorInfo + } + + if p.SessionPersistence != nil { + sessionPersistenceInfo := make(map[string]interface{}) + sessionPersistenceInfo[sessionType] = *p.SessionPersistence.Type + pool[sessionPersistence] = sessionPersistenceInfo + } + if p.Members != nil { + memberList := make([]map[string]interface{}, len(p.Members)) + for j, m := range p.Members { + member := make(map[string]interface{}) + member[ID] = *m.ID + member[href] = *m.Href + memberList[j] = member + } + pool[members] = memberList + } + + if p.InstanceGroup != nil { + instanceGroupInfo := make(map[string]interface{}) + instanceGroupInfo[ID] = *(p.InstanceGroup.ID) + instanceGroupInfo[crnInstance] = *(p.InstanceGroup.CRN) + instanceGroupInfo[href] = *(p.InstanceGroup.Href) + instanceGroupInfo[name] = *(p.InstanceGroup.Name) + pool[instanceGroup] = instanceGroupInfo + } + poolsInfo = append(poolsInfo, pool) + } //for + d.Set(isLBPools, poolsInfo) + } + + d.Set(isLBResourceGroup, *lb.ResourceGroup.ID) + d.Set(isLBHostName, *lb.Hostname) + tags, err := GetTagsUsingCRN(meta, *lb.CRN) + if err != nil { + log.Printf( + "Error on get of resource vpc Load Balancer (%s) tags: %s", d.Id(), err) + } + d.Set(isLBTags, tags) + controller, err := getBaseController(meta) + if err != nil { + return err + } + d.Set(ResourceControllerURL, controller+"/vpc-ext/network/loadBalancers") + d.Set(ResourceName, *lb.Name) + if lb.ResourceGroup != nil { + d.Set(ResourceGroupName, *lb.ResourceGroup.ID) + } + return nil + } + } + return fmt.Errorf("No Load balancer found with name %s", name) +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_lb_profiles.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_lb_profiles.go new file mode 100644 index 00000000000..d8c75bc438f --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_lb_profiles.go @@ -0,0 +1,93 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "time" + + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + isLbsProfiles = "lb_profiles" +) + +func dataSourceIBMISLbProfiles() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMISLbProfilesRead, + + Schema: map[string]*schema.Schema{ + + isLbsProfiles: { + Type: schema.TypeList, + Description: "Collection of load balancer profile collectors", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + Description: "The name for this load balancer profile", + }, + "href": { + Type: schema.TypeString, + Computed: true, + Description: "The URL for this load balancer profile", + }, + "family": { + Type: schema.TypeString, + Computed: true, + Description: "The product family this load balancer profile belongs to", + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMISLbProfilesRead(d *schema.ResourceData, meta interface{}) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + + start := "" + allrecs := []vpcv1.LoadBalancerProfile{} + for { + listOptions := &vpcv1.ListLoadBalancerProfilesOptions{} + if start != "" { + listOptions.Start = &start + } + profileCollectors, response, err := sess.ListLoadBalancerProfiles(listOptions) + if err != nil { + return fmt.Errorf("Error Fetching Load Balancer Profiles for VPC %s\n%s", err, response) + } + start = GetNext(profileCollectors.Next) + allrecs = append(allrecs, profileCollectors.Profiles...) + if start == "" { + break + } + } + lbprofilesInfo := make([]map[string]interface{}, 0) + for _, profileCollector := range allrecs { + + l := map[string]interface{}{ + "name": *profileCollector.Name, + "href": *profileCollector.Href, + "family": *profileCollector.Family, + } + lbprofilesInfo = append(lbprofilesInfo, l) + } + d.SetId(dataSourceIBMISLbProfilesID(d)) + d.Set(isLbsProfiles, lbprofilesInfo) + return nil +} + +// dataSourceIBMISLbProfilesID returns a reasonable ID for a profileCollector list. +func dataSourceIBMISLbProfilesID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_lbs.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_lbs.go new file mode 100644 index 00000000000..2121e8ece29 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_lbs.go @@ -0,0 +1,491 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "time" + + "github.com/IBM/vpc-go-sdk/vpcclassicv1" + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + loadBalancers = "load_balancers" + CRN = "crn" + CreatedAt = "created_at" + isLbProfile = "profile" + ProvisioningStatus = "provisioning_status" +) + +func dataSourceIBMISLBS() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMISLBSRead, + Schema: map[string]*schema.Schema{ + loadBalancers: { + Type: schema.TypeList, + Description: "Collection of load balancers", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + ID: { + Type: schema.TypeString, + Computed: true, + }, + CRN: { + Type: schema.TypeString, + Computed: true, + Description: "The load balancer's CRN", + }, + CreatedAt: { + Type: schema.TypeString, + Computed: true, + Description: "The date and time that this pool was created.", + }, + ProvisioningStatus: { + Type: schema.TypeString, + Computed: true, + Description: "The provisioning status of this load balancer", + }, + isLBName: { + Type: schema.TypeString, + Computed: true, + Description: "Load Balancer name", + }, + + isLBType: { + Type: schema.TypeString, + Computed: true, + Description: "Load Balancer type", + }, + + isLBStatus: { + Type: schema.TypeString, + Computed: true, + Description: "Load Balancer status", + }, + + isLBOperatingStatus: { + Type: schema.TypeString, + Computed: true, + Description: "Load Balancer operating status", + }, + + isLBPublicIPs: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "Load Balancer Public IPs", + }, + + isLBPrivateIPs: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "Load Balancer private IPs", + }, + + isLBSubnets: { + Type: schema.TypeList, + Computed: true, + Description: "Load Balancer subnets list", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + href: { + Type: schema.TypeString, + Computed: true, + Description: "The subnet's canonical URL.", + }, + ID: { + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this load balancer subnet", + }, + name: { + Type: schema.TypeString, + Computed: true, + Description: "The user-defined name for this load balancer subnet", + }, + CRN: { + Type: schema.TypeString, + Computed: true, + Description: "The CRN for this subnet", + }, + }, + }, + }, + + isLBTags: { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: resourceIBMVPCHash, + Description: "Tags associated to Load Balancer", + }, + + isLBResourceGroup: { + Type: schema.TypeString, + Computed: true, + Description: "Load Balancer Resource group", + }, + + isLBHostName: { + Type: schema.TypeString, + Computed: true, + Description: "Load Balancer Host Name", + }, + + isLBListeners: { + Type: schema.TypeList, + Computed: true, + Description: "Load Balancer Listeners list", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + href: { + Type: schema.TypeString, + Computed: true, + Description: "The listener's canonical URL.", + }, + ID: { + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this load balancer listener", + }, + }, + }, + }, + isLbProfile: { + Type: schema.TypeMap, + Computed: true, + Description: "The profile to use for this load balancer", + }, + + isLBPools: { + Type: schema.TypeList, + Computed: true, + Description: "Load Balancer Pools list", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + href: { + Type: schema.TypeString, + Computed: true, + Description: "The pool's canonical URL.", + }, + ID: { + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this load balancer pool", + }, + name: { + Type: schema.TypeString, + Computed: true, + Description: "The user-defined name for this load balancer pool", + }, + }, + }, + }, + + ResourceControllerURL: { + Type: schema.TypeString, + Computed: true, + Description: "The URL of the IBM Cloud dashboard that can be used to explore and view details about this instance", + }, + + ResourceName: { + Type: schema.TypeString, + Computed: true, + Description: "The name of the resource", + }, + + ResourceGroupName: { + Type: schema.TypeString, + Computed: true, + Description: "The resource group name in which resource is provisioned", + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMISLBSRead(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + if userDetails.generation == 1 { + err := classiclbs(d, meta) + if err != nil { + return err + } + fmt.Println("classics") + } else { + err := getLbs(d, meta) + if err != nil { + return err + } + } + return nil +} + +func classiclbs(d *schema.ResourceData, meta interface{}) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + listLoadBalancersOptions := &vpcclassicv1.ListLoadBalancersOptions{} + lbs, response, err := sess.ListLoadBalancers(listLoadBalancersOptions) + if err != nil { + return fmt.Errorf("Error Fetching Load Balancers %s\n%s", err, response) + } + + lbList := make([]map[string]interface{}, 0) + + for _, lb := range lbs.LoadBalancers { + lbInfo := make(map[string]interface{}) + // log.Printf("******* lb ******** : (%+v)", lb) + lbInfo[ID] = *lb.ID + lbInfo[isLBName] = *lb.Name + lbInfo[CRN] = *lb.CRN + lbInfo[ProvisioningStatus] = *lb.ProvisioningStatus + if *lb.IsPublic { + lbInfo[isLBType] = "public" + } else { + lbInfo[isLBType] = "private" + } + lbInfo[isLBStatus] = *lb.ProvisioningStatus + lbInfo[isLBOperatingStatus] = *lb.OperatingStatus + publicIpList := make([]string, 0) + if lb.PublicIps != nil { + for _, ip := range lb.PublicIps { + if ip.Address != nil { + pubip := *ip.Address + publicIpList = append(publicIpList, pubip) + } + } + } + + lbInfo[isLBPublicIPs] = publicIpList + privateIpList := make([]string, 0) + if lb.PrivateIps != nil { + for _, ip := range lb.PrivateIps { + if ip.Address != nil { + prip := *ip.Address + privateIpList = append(privateIpList, prip) + } + } + } + lbInfo[isLBPrivateIPs] = privateIpList + //log.Printf("*******isLBPrivateIPs %+v", lbInfo[isLBPrivateIPs]) + + if lb.Subnets != nil { + subnetList := make([]map[string]interface{}, 0) + for _, subnet := range lb.Subnets { + //log.Printf("*******subnet %+v", subnet) + sub := make(map[string]interface{}) + sub[ID] = *subnet.ID + sub[href] = *subnet.Href + if subnet.CRN != nil { + sub[CRN] = *subnet.CRN + } + sub[name] = *subnet.Name + subnetList = append(subnetList, sub) + + } + lbInfo[isLBSubnets] = subnetList + //log.Printf("*******isLBSubnets %+v", lbInfo[isLBSubnets]) + + } + if lb.Listeners != nil { + listenerList := make([]map[string]interface{}, 0) + for _, listener := range lb.Listeners { + lis := make(map[string]interface{}) + lis[ID] = *listener.ID + lis[href] = *listener.Href + listenerList = append(listenerList, lis) + } + lbInfo[isLBListeners] = listenerList + } + //log.Printf("*******isLBListeners %+v", lbInfo[isLBListeners]) + + if lb.Pools != nil { + poolList := make([]map[string]interface{}, 0) + + for _, p := range lb.Pools { + pool := make(map[string]interface{}) + pool[name] = *p.Name + pool[ID] = *p.ID + pool[href] = *p.Href + poolList = append(poolList, pool) + + } + lbInfo[isLBPools] = poolList + } + lbInfo[isLBResourceGroup] = *lb.ResourceGroup.ID + lbInfo[isLBHostName] = *lb.Hostname + tags, err := GetTagsUsingCRN(meta, *lb.CRN) + if err != nil { + log.Printf( + "Error on get of resource vpc Load Balancer (%s) tags: %s", d.Id(), err) + } + lbInfo[isLBTags] = tags + //log.Printf("*******tags %+v", tags) + + controller, err := getBaseController(meta) + if err != nil { + return err + } + lbInfo[ResourceControllerURL] = controller + "/vpc-ext/network/loadBalancers" + lbInfo[ResourceName] = *lb.Name + //log.Printf("*******lbInfo %+v", lbInfo) + + if lb.ResourceGroup != nil { + lbInfo[ResourceGroupName] = *lb.ResourceGroup.ID + } + lbList = append(lbList, lbInfo) + // log.Printf("*******lbList %+v", lbList) + + } + d.SetId(dataSourceIBMISLBsID(d)) + d.Set(loadBalancers, lbList) + + return nil +} + +func getLbs(d *schema.ResourceData, meta interface{}) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + listLoadBalancersOptions := &vpcv1.ListLoadBalancersOptions{} + lbs, response, err := sess.ListLoadBalancers(listLoadBalancersOptions) + if err != nil { + return fmt.Errorf("Error Fetching Load Balancers %s\n%s", err, response) + } + lbList := make([]map[string]interface{}, 0) + + for _, lb := range lbs.LoadBalancers { + lbInfo := make(map[string]interface{}) + // log.Printf("******* lb ******** : (%+v)", lb) + lbInfo[ID] = *lb.ID + lbInfo[isLBName] = *lb.Name + lbInfo[CRN] = *lb.CRN + lbInfo[ProvisioningStatus] = *lb.ProvisioningStatus + + lbInfo[CreatedAt] = lb.CreatedAt.String() + if *lb.IsPublic { + lbInfo[isLBType] = "public" + } else { + lbInfo[isLBType] = "private" + } + lbInfo[isLBStatus] = *lb.ProvisioningStatus + lbInfo[isLBOperatingStatus] = *lb.OperatingStatus + publicIpList := make([]string, 0) + if lb.PublicIps != nil { + for _, ip := range lb.PublicIps { + if ip.Address != nil { + pubip := *ip.Address + publicIpList = append(publicIpList, pubip) + } + } + } + + lbInfo[isLBPublicIPs] = publicIpList + privateIpList := make([]string, 0) + if lb.PrivateIps != nil { + for _, ip := range lb.PrivateIps { + if ip.Address != nil { + prip := *ip.Address + privateIpList = append(privateIpList, prip) + } + } + } + lbInfo[isLBPrivateIPs] = privateIpList + //log.Printf("*******isLBPrivateIPs %+v", lbInfo[isLBPrivateIPs]) + + if lb.Subnets != nil { + subnetList := make([]map[string]interface{}, 0) + for _, subnet := range lb.Subnets { + // log.Printf("*******subnet %+v", subnet) + sub := make(map[string]interface{}) + sub[ID] = *subnet.ID + sub[href] = *subnet.Href + if subnet.CRN != nil { + sub[CRN] = *subnet.CRN + } + sub[name] = *subnet.Name + subnetList = append(subnetList, sub) + + } + lbInfo[isLBSubnets] = subnetList + // log.Printf("*******isLBSubnets %+v", lbInfo[isLBSubnets]) + + } + if lb.Listeners != nil { + listenerList := make([]map[string]interface{}, 0) + for _, listener := range lb.Listeners { + lis := make(map[string]interface{}) + lis[ID] = *listener.ID + lis[href] = *listener.Href + listenerList = append(listenerList, lis) + } + lbInfo[isLBListeners] = listenerList + } + //log.Printf("*******isLBListeners %+v", lbInfo[isLBListeners]) + if lb.Pools != nil { + poolList := make([]map[string]interface{}, 0) + + for _, p := range lb.Pools { + pool := make(map[string]interface{}) + pool[name] = *p.Name + pool[ID] = *p.ID + pool[href] = *p.Href + poolList = append(poolList, pool) + + } + lbInfo[isLBPools] = poolList + } + if lb.Profile != nil { + lbProfile := make(map[string]interface{}) + lbProfile[name] = *lb.Profile.Name + lbProfile[href] = *lb.Profile.Href + lbProfile["family"] = *lb.Profile.Family + lbInfo[isLbProfile] = lbProfile + } + lbInfo[isLBResourceGroup] = *lb.ResourceGroup.ID + lbInfo[isLBHostName] = *lb.Hostname + tags, err := GetTagsUsingCRN(meta, *lb.CRN) + if err != nil { + log.Printf( + "Error on get of resource vpc Load Balancer (%s) tags: %s", d.Id(), err) + } + lbInfo[isLBTags] = tags + controller, err := getBaseController(meta) + if err != nil { + return err + } + lbInfo[ResourceControllerURL] = controller + "/vpc-ext/network/loadBalancers" + lbInfo[ResourceName] = *lb.Name + //log.Printf("*******lbInfo %+v", lbInfo) + + if lb.ResourceGroup != nil { + lbInfo[ResourceGroupName] = *lb.ResourceGroup.ID + } + lbList = append(lbList, lbInfo) + // log.Printf("*******lbList %+v", lbList) + + } + //log.Printf("*******lbList %+v", lbList) + d.SetId(dataSourceIBMISLBsID(d)) + d.Set(loadBalancers, lbList) + return nil +} + +// dataSourceIBMISLBsID returns a reasonable ID for a transit gateways list. +func dataSourceIBMISLBsID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_operating_system.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_operating_system.go new file mode 100644 index 00000000000..69743ed834b --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_operating_system.go @@ -0,0 +1,108 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + isOperatingSystemName = "name" + isOperatingSystemArchitecture = "architecture" + isOperatingSystemDHOnly = "dedicated_host_only" + isOperatingSystemDisplayName = "display_name" + isOperatingSystemFamily = "family" + isOperatingSystemHref = "href" + isOperatingSystemVendor = "vendor" + isOperatingSystemVersion = "version" +) + +func dataSourceIBMISOperatingSystem() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMISOperatingSystemRead, + + Schema: map[string]*schema.Schema{ + + isOperatingSystemName: { + Type: schema.TypeString, + Required: true, + Description: "The globally unique name for this operating system", + }, + + isOperatingSystemArchitecture: { + Type: schema.TypeString, + Computed: true, + Description: "The operating system architecture", + }, + + isOperatingSystemVersion: { + Type: schema.TypeString, + Computed: true, + Description: "The major release version of this operating system", + }, + isOperatingSystemDHOnly: { + Type: schema.TypeBool, + Computed: true, + Description: "Flag which shows images with this operating system can only be used on dedicated hosts or dedicated host groups", + }, + isOperatingSystemDisplayName: { + Type: schema.TypeString, + Computed: true, + Description: "A unique, display-friendly name for the operating system", + }, + isOperatingSystemFamily: { + Type: schema.TypeString, + Computed: true, + Description: "The name of the software family this operating system belongs to", + }, + isOperatingSystemHref: { + Type: schema.TypeString, + Computed: true, + Description: "The URL for this operating system", + }, + + isOperatingSystemVendor: { + Type: schema.TypeString, + Computed: true, + Description: "The vendor of the operating system", + }, + }, + } +} + +func dataSourceIBMISOperatingSystemRead(d *schema.ResourceData, meta interface{}) error { + name := d.Get(isOperatingSystemName).(string) + err := osGet(d, meta, name) + if err != nil { + return err + } + return nil +} + +func osGet(d *schema.ResourceData, meta interface{}, name string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + getOperatingSystemOptions := &vpcv1.GetOperatingSystemOptions{ + Name: &name, + } + os, response, err := sess.GetOperatingSystem(getOperatingSystemOptions) + if err != nil || os == nil { + return fmt.Errorf("Error Getting Operating System Details %s , %s", err, response) + } + d.Set(isOperatingSystemName, *os.Name) + d.SetId(*os.Name) + d.Set(isOperatingSystemDHOnly, *os.DedicatedHostOnly) + d.Set(isOperatingSystemArchitecture, *os.Architecture) + d.Set(isOperatingSystemDisplayName, *os.DisplayName) + d.Set(isOperatingSystemFamily, *os.Family) + d.Set(isOperatingSystemHref, *os.Href) + d.Set(isOperatingSystemVendor, *os.Vendor) + d.Set(isOperatingSystemVersion, *os.Version) + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_operating_systems.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_operating_systems.go new file mode 100644 index 00000000000..e84ab06ddd3 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_operating_systems.go @@ -0,0 +1,132 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "time" + + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + isOperatingSystems = "operating_systems" +) + +func dataSourceIBMISOperatingSystems() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMISOperatingSystemsRead, + + Schema: map[string]*schema.Schema{ + isOperatingSystems: { + Type: schema.TypeList, + Description: "List of operating systems", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + isOperatingSystemName: { + Type: schema.TypeString, + Computed: true, + Description: "The globally unique name for this operating system", + }, + + isOperatingSystemArchitecture: { + Type: schema.TypeString, + Computed: true, + Description: "The operating system architecture", + }, + + isOperatingSystemVersion: { + Type: schema.TypeString, + Computed: true, + Description: "The major release version of this operating system", + }, + isOperatingSystemDHOnly: { + Type: schema.TypeBool, + Computed: true, + Description: "Flag which shows images with this operating system can only be used on dedicated hosts or dedicated host groups", + }, + isOperatingSystemDisplayName: { + Type: schema.TypeString, + Computed: true, + Description: "A unique, display-friendly name for the operating system", + }, + isOperatingSystemFamily: { + Type: schema.TypeString, + Computed: true, + Description: "The name of the software family this operating system belongs to", + }, + isOperatingSystemHref: { + Type: schema.TypeString, + Computed: true, + Description: "The URL for this operating system", + }, + + isOperatingSystemVendor: { + Type: schema.TypeString, + Computed: true, + Description: "The vendor of the operating system", + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMISOperatingSystemsRead(d *schema.ResourceData, meta interface{}) error { + err := osList(d, meta) + if err != nil { + return err + } + return nil +} + +func osList(d *schema.ResourceData, meta interface{}) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + start := "" + allrecs := []vpcv1.OperatingSystem{} + for { + listOperatingSystemsOptions := &vpcv1.ListOperatingSystemsOptions{} + if start != "" { + listOperatingSystemsOptions.Start = &start + } + + osList, response, err := sess.ListOperatingSystems(listOperatingSystemsOptions) + if err != nil { + return fmt.Errorf("Error Fetching operating systems %s\n%s", err, response) + } + start = GetNext(osList.Next) + allrecs = append(allrecs, osList.OperatingSystems...) + if start == "" { + break + } + } + osInfo := make([]map[string]interface{}, 0) + for _, os := range allrecs { + l := map[string]interface{}{ + isOperatingSystemName: *os.Name, + isOperatingSystemArchitecture: *os.Architecture, + isOperatingSystemDHOnly: *os.DedicatedHostOnly, + isOperatingSystemFamily: *os.Family, + isOperatingSystemHref: *os.Href, + isOperatingSystemDisplayName: *os.DisplayName, + isOperatingSystemVendor: *os.Vendor, + isOperatingSystemVersion: *os.Version, + } + osInfo = append(osInfo, l) + } + d.SetId(dataSourceIBMISOperatingSystemsId(d)) + d.Set(isOperatingSystems, osInfo) + return nil +} + +// dataSourceIBMISOperatingSystemsId returns a reasonable ID for a os list. +func dataSourceIBMISOperatingSystemsId(d *schema.ResourceData) string { + return time.Now().UTC().String() +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_public_gateway.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_public_gateway.go new file mode 100644 index 00000000000..3fe5e4bfa33 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_public_gateway.go @@ -0,0 +1,241 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + + "github.com/IBM/vpc-go-sdk/vpcclassicv1" + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMISPublicGateway() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMISPublicGatewayRead, + + Schema: map[string]*schema.Schema{ + isPublicGatewayName: { + Type: schema.TypeString, + Required: true, + Description: "Public gateway Name", + }, + + isPublicGatewayFloatingIP: { + Type: schema.TypeMap, + Computed: true, + Description: "Public gateway floating IP", + }, + + isPublicGatewayStatus: { + Type: schema.TypeString, + Computed: true, + Description: "Public gateway instance status", + }, + + isPublicGatewayResourceGroup: { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Public gateway resource group info", + }, + + isPublicGatewayVPC: { + Type: schema.TypeString, + Computed: true, + Description: "Public gateway VPC info", + }, + + isPublicGatewayZone: { + Type: schema.TypeString, + Computed: true, + Description: "Public gateway zone info", + }, + + isPublicGatewayTags: { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: resourceIBMVPCHash, + Description: "Service tags for the public gateway instance", + }, + + ResourceControllerURL: { + Type: schema.TypeString, + Computed: true, + Description: "The URL of the IBM Cloud dashboard that can be used to explore and view details about this instance", + }, + + ResourceName: { + Type: schema.TypeString, + Computed: true, + Description: "The name of the resource", + }, + + ResourceCRN: { + Type: schema.TypeString, + Computed: true, + Description: "The crn of the resource", + }, + + ResourceStatus: { + Type: schema.TypeString, + Computed: true, + Description: "The status of the resource", + }, + + ResourceGroupName: { + Type: schema.TypeString, + Computed: true, + Description: "The resource group name in which resource is provisioned", + }, + }, + } +} + +func dataSourceIBMISPublicGatewayRead(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + name := d.Get(isPublicGatewayName).(string) + if userDetails.generation == 1 { + err := classicPublicGatewayGet(d, meta, name) + if err != nil { + return err + } + } else { + err := publicGatewayGet(d, meta, name) + if err != nil { + return err + } + } + return nil +} + +func classicPublicGatewayGet(d *schema.ResourceData, meta interface{}, name string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + start := "" + allrecs := []vpcclassicv1.PublicGateway{} + for { + listPublicGatewaysOptions := &vpcclassicv1.ListPublicGatewaysOptions{} + if start != "" { + listPublicGatewaysOptions.Start = &start + } + publicgws, response, err := sess.ListPublicGateways(listPublicGatewaysOptions) + if err != nil { + return fmt.Errorf("Error Fetching public gateways %s\n%s", err, response) + } + start = GetNext(publicgws.Next) + allrecs = append(allrecs, publicgws.PublicGateways...) + if start == "" { + break + } + } + for _, publicgw := range allrecs { + if *publicgw.Name == name { + d.SetId(*publicgw.ID) + d.Set(isPublicGatewayName, *publicgw.Name) + if publicgw.FloatingIP != nil { + floatIP := map[string]interface{}{ + "id": *publicgw.FloatingIP.ID, + isPublicGatewayFloatingIPAddress: *publicgw.FloatingIP.Address, + } + d.Set(isPublicGatewayFloatingIP, floatIP) + + } + d.Set(isPublicGatewayStatus, *publicgw.Status) + d.Set(isPublicGatewayZone, *publicgw.Zone.Name) + d.Set(isPublicGatewayVPC, *publicgw.VPC.ID) + tags, err := GetTagsUsingCRN(meta, *publicgw.CRN) + if err != nil { + log.Printf( + "Error on get of vpc public gateway (%s) tags: %s", *publicgw.ID, err) + } + d.Set(isPublicGatewayTags, tags) + controller, err := getBaseController(meta) + if err != nil { + return err + } + d.Set(ResourceControllerURL, controller+"/vpc/network/publicGateways") + d.Set(ResourceName, *publicgw.Name) + d.Set(ResourceCRN, *publicgw.CRN) + d.Set(ResourceStatus, *publicgw.Status) + return nil + } + } + return fmt.Errorf("No Public Gateway found with name %s", name) +} + +func publicGatewayGet(d *schema.ResourceData, meta interface{}, name string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + rgroup := "" + if rg, ok := d.GetOk(isPublicGatewayResourceGroup); ok { + rgroup = rg.(string) + } + start := "" + allrecs := []vpcv1.PublicGateway{} + for { + listPublicGatewaysOptions := &vpcv1.ListPublicGatewaysOptions{} + if start != "" { + listPublicGatewaysOptions.Start = &start + } + if rgroup != "" { + listPublicGatewaysOptions.ResourceGroupID = &rgroup + } + publicgws, response, err := sess.ListPublicGateways(listPublicGatewaysOptions) + if err != nil { + return fmt.Errorf("Error Fetching public gateways %s\n%s", err, response) + } + start = GetNext(publicgws.Next) + allrecs = append(allrecs, publicgws.PublicGateways...) + if start == "" { + break + } + } + for _, publicgw := range allrecs { + if *publicgw.Name == name { + d.SetId(*publicgw.ID) + d.Set(isPublicGatewayName, *publicgw.Name) + if publicgw.FloatingIP != nil { + floatIP := map[string]interface{}{ + "id": *publicgw.FloatingIP.ID, + isPublicGatewayFloatingIPAddress: *publicgw.FloatingIP.Address, + } + d.Set(isPublicGatewayFloatingIP, floatIP) + + } + d.Set(isPublicGatewayStatus, *publicgw.Status) + d.Set(isPublicGatewayZone, *publicgw.Zone.Name) + d.Set(isPublicGatewayVPC, *publicgw.VPC.ID) + tags, err := GetTagsUsingCRN(meta, *publicgw.CRN) + if err != nil { + log.Printf( + "Error on get of vpc public gateway (%s) tags: %s", *publicgw.ID, err) + } + d.Set(isPublicGatewayTags, tags) + controller, err := getBaseController(meta) + if err != nil { + return err + } + d.Set(ResourceControllerURL, controller+"/vpc-ext/network/publicGateways") + d.Set(ResourceName, *publicgw.Name) + d.Set(ResourceCRN, *publicgw.CRN) + d.Set(ResourceStatus, *publicgw.Status) + if publicgw.ResourceGroup != nil { + d.Set(isPublicGatewayResourceGroup, *publicgw.ResourceGroup.ID) + d.Set(ResourceGroupName, *publicgw.ResourceGroup.Name) + } + return nil + } + } + return fmt.Errorf("No Public gateway found with name %s", name) +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_public_gateways.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_public_gateways.go new file mode 100644 index 00000000000..8c89d07ce0d --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_public_gateways.go @@ -0,0 +1,199 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "time" + + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + isPublicGateways = "public_gateways" +) + +func dataSourceIBMISPublicGateways() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMISPublicGatewaysRead, + + Schema: map[string]*schema.Schema{ + isPublicGateways: { + Type: schema.TypeList, + Description: "List of public gateways", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + Description: "Public gateway id", + }, + isPublicGatewayName: { + Type: schema.TypeString, + Computed: true, + Description: "Public gateway Name", + }, + + isPublicGatewayFloatingIP: { + Type: schema.TypeMap, + Computed: true, + Description: "Public gateway floating IP", + }, + + isPublicGatewayStatus: { + Type: schema.TypeString, + Computed: true, + Description: "Public gateway instance status", + }, + + isPublicGatewayResourceGroup: { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Public gateway resource group info", + }, + + isPublicGatewayVPC: { + Type: schema.TypeString, + Computed: true, + Description: "Public gateway VPC info", + }, + + isPublicGatewayZone: { + Type: schema.TypeString, + Computed: true, + Description: "Public gateway zone info", + }, + + isPublicGatewayTags: { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: resourceIBMVPCHash, + Description: "Service tags for the public gateway instance", + }, + + ResourceControllerURL: { + Type: schema.TypeString, + Computed: true, + Description: "The URL of the IBM Cloud dashboard that can be used to explore and view details about this instance", + }, + + ResourceName: { + Type: schema.TypeString, + Computed: true, + Description: "The name of the resource", + }, + + ResourceCRN: { + Type: schema.TypeString, + Computed: true, + Description: "The crn of the resource", + }, + + ResourceStatus: { + Type: schema.TypeString, + Computed: true, + Description: "The status of the resource", + }, + + ResourceGroupName: { + Type: schema.TypeString, + Computed: true, + Description: "The resource group name in which resource is provisioned", + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMISPublicGatewaysRead(d *schema.ResourceData, meta interface{}) error { + err := publicGatewaysGet(d, meta, name) + if err != nil { + return err + } + return nil +} + +func publicGatewaysGet(d *schema.ResourceData, meta interface{}, name string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + rgroup := "" + if rg, ok := d.GetOk(isPublicGatewayResourceGroup); ok { + rgroup = rg.(string) + } + start := "" + allrecs := []vpcv1.PublicGateway{} + for { + listPublicGatewaysOptions := &vpcv1.ListPublicGatewaysOptions{} + if start != "" { + listPublicGatewaysOptions.Start = &start + } + if rgroup != "" { + listPublicGatewaysOptions.ResourceGroupID = &rgroup + } + publicgws, response, err := sess.ListPublicGateways(listPublicGatewaysOptions) + if err != nil { + return fmt.Errorf("Error Fetching public gateways %s\n%s", err, response) + } + start = GetNext(publicgws.Next) + allrecs = append(allrecs, publicgws.PublicGateways...) + if start == "" { + break + } + } + publicgwInfo := make([]map[string]interface{}, 0) + for _, publicgw := range allrecs { + id := *publicgw.ID + l := map[string]interface{}{ + "id": id, + isPublicGatewayName: *publicgw.Name, + isPublicGatewayStatus: *publicgw.Status, + isPublicGatewayZone: *publicgw.Zone.Name, + isPublicGatewayVPC: *publicgw.VPC.ID, + + ResourceName: *publicgw.Name, + ResourceCRN: *publicgw.CRN, + ResourceStatus: *publicgw.Status, + } + if publicgw.FloatingIP != nil { + floatIP := map[string]interface{}{ + "id": *publicgw.FloatingIP.ID, + isPublicGatewayFloatingIPAddress: *publicgw.FloatingIP.Address, + } + l[isPublicGatewayFloatingIP] = floatIP + } + tags, err := GetTagsUsingCRN(meta, *publicgw.CRN) + if err != nil { + log.Printf( + "Error on get of vpc public gateway (%s) tags: %s", *publicgw.ID, err) + } + l[isPublicGatewayTags] = tags + controller, err := getBaseController(meta) + if err != nil { + return err + } + l[ResourceControllerURL] = controller + "/vpc-ext/network/publicGateways" + if publicgw.ResourceGroup != nil { + l[isPublicGatewayResourceGroup] = *publicgw.ResourceGroup.ID + l[ResourceGroupName] = *publicgw.ResourceGroup.Name + } + publicgwInfo = append(publicgwInfo, l) + } + d.SetId(dataSourceIBMISPublicGatewaysID(d)) + d.Set(isPublicGateways, publicgwInfo) + return nil +} + +// dataSourceIBMISPublicGatewaysID returns a reasonable ID for a Public Gateway list. +func dataSourceIBMISPublicGatewaysID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_region.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_region.go new file mode 100644 index 00000000000..fcdb84b75c7 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_region.go @@ -0,0 +1,100 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "github.com/IBM/vpc-go-sdk/vpcclassicv1" + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + isRegionEndpoint = "endpoint" + isRegionName = "name" + isRegionStatus = "status" +) + +func dataSourceIBMISRegion() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMISRegionRead, + + Schema: map[string]*schema.Schema{ + + isRegionEndpoint: { + Type: schema.TypeString, + Computed: true, + }, + + isRegionName: { + Type: schema.TypeString, + Required: true, + }, + + isRegionStatus: { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceIBMISRegionRead(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + name := d.Get("name").(string) + if userDetails.generation == 1 { + err := classicRegionGet(d, meta, name) + if err != nil { + return err + } + } else { + err := regionGet(d, meta, name) + if err != nil { + return err + } + } + return nil +} + +func classicRegionGet(d *schema.ResourceData, meta interface{}, name string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + getRegionOptions := &vpcclassicv1.GetRegionOptions{ + Name: &name, + } + region, _, err := sess.GetRegion(getRegionOptions) + if err != nil { + return err + } + // For lack of anything better, compose our id from region name. + d.SetId(*region.Name) + d.Set(isRegionEndpoint, *region.Endpoint) + d.Set(isRegionName, *region.Name) + d.Set(isRegionStatus, *region.Status) + return nil +} + +func regionGet(d *schema.ResourceData, meta interface{}, name string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + getRegionOptions := &vpcv1.GetRegionOptions{ + Name: &name, + } + region, _, err := sess.GetRegion(getRegionOptions) + if err != nil { + return err + } + // For lack of anything better, compose our id from region name. + d.SetId(*region.Name) + d.Set(isRegionEndpoint, *region.Endpoint) + d.Set(isRegionName, *region.Name) + d.Set(isRegionStatus, *region.Status) + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_security_group.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_security_group.go new file mode 100644 index 00000000000..8c3c2607635 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_security_group.go @@ -0,0 +1,473 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "log" + "reflect" + + "github.com/IBM/vpc-go-sdk/vpcclassicv1" + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + isSgName = "name" + isSgRules = "rules" + isSgRuleID = "rule_id" + isSgRuleDirection = "direction" + isSgRuleIPVersion = "ip_version" + isSgRuleRemote = "remote" + isSgRuleType = "type" + isSgRuleCode = "code" + isSgRulePortMax = "port_max" + isSgRulePortMin = "port_min" + isSgRuleProtocol = "protocol" + isSgVPC = "vpc" + isSgTags = "tags" + isSgCRN = "crn" +) + +func dataSourceIBMISSecurityGroup() *schema.Resource { + return &schema.Resource{ + + Read: dataSourceIBMISSecurityGroupRuleRead, + + Schema: map[string]*schema.Schema{ + + isSgName: { + Type: schema.TypeString, + Required: true, + Description: "Security group name", + }, + + isSecurityGroupVPC: { + Type: schema.TypeString, + Computed: true, + Description: "Security group's resource group id", + ForceNew: true, + }, + + isSgRules: { + Type: schema.TypeList, + Computed: true, + Description: "Security Rules", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + + isSgRuleID: { + Type: schema.TypeString, + Computed: true, + Description: "Rule id", + }, + + isSgRuleDirection: { + Type: schema.TypeString, + Computed: true, + Description: "Direction of traffic to enforce, either inbound or outbound", + }, + + isSgRuleIPVersion: { + Type: schema.TypeString, + Computed: true, + Description: "IP version: ipv4 or ipv6", + }, + + isSgRuleRemote: { + Type: schema.TypeString, + Computed: true, + Description: "Security group id: an IP address, a CIDR block, or a single security group identifier", + }, + + isSgRuleType: { + Type: schema.TypeInt, + Computed: true, + }, + + isSgRuleCode: { + Type: schema.TypeInt, + Computed: true, + }, + + isSgRulePortMin: { + Type: schema.TypeInt, + Computed: true, + }, + + isSgRulePortMax: { + Type: schema.TypeInt, + Computed: true, + }, + + isSgRuleProtocol: { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + + ResourceControllerURL: { + Type: schema.TypeString, + Computed: true, + Description: "The URL of the IBM Cloud dashboard that can be used to explore and view details about this instance", + }, + + ResourceName: { + Type: schema.TypeString, + Computed: true, + Description: "The name of the resource", + }, + + ResourceCRN: { + Type: schema.TypeString, + Computed: true, + Description: "The crn of the resource", + }, + + ResourceGroupName: { + Type: schema.TypeString, + Computed: true, + Description: "The resource group name in which resource is provisioned", + }, + + isSgTags: { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: resourceIBMVPCHash, + Description: "List of tags", + }, + + isSgCRN: { + Type: schema.TypeString, + Computed: true, + Description: "The crn of the resource", + }, + }, + } +} + +func dataSourceIBMISSecurityGroupRuleRead(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + sgName := d.Get(isSgName).(string) + if userDetails.generation == 1 { + err := classicSecurityGroupGet(d, meta, sgName) + if err != nil { + return err + } + } else { + err := securityGroupGet(d, meta, sgName) + if err != nil { + return err + } + } + return nil +} + +func classicSecurityGroupGet(d *schema.ResourceData, meta interface{}, name string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + + listSgOptions := &vpcclassicv1.ListSecurityGroupsOptions{} + sgs, _, err := sess.ListSecurityGroups(listSgOptions) + if err != nil { + return err + } + + for _, group := range sgs.SecurityGroups { + if *group.Name == name { + + d.Set(isSgName, *group.Name) + d.Set(isSgVPC, *group.VPC.ID) + d.Set(isSgCRN, *group.CRN) + tags, err := GetTagsUsingCRN(meta, *group.CRN) + if err != nil { + log.Printf( + "An error occured during reading of security group (%s) tags : %s", *group.ID, err) + } + d.Set(isSgTags, tags) + rules := make([]map[string]interface{}, 0) + for _, sgrule := range group.Rules { + switch reflect.TypeOf(sgrule).String() { + case "*vpcclassicv1.SecurityGroupRuleSecurityGroupRuleProtocolIcmp": + { + + rule := sgrule.(*vpcclassicv1.SecurityGroupRuleSecurityGroupRuleProtocolIcmp) + r := make(map[string]interface{}) + if rule.Code != nil { + r[isSgRuleCode] = int(*rule.Code) + } + if rule.Type != nil { + r[isSgRuleType] = int(*rule.Type) + } + r[isSgRuleDirection] = *rule.Direction + r[isSgRuleIPVersion] = *rule.IPVersion + if rule.Protocol != nil { + r[isSgRuleProtocol] = *rule.Protocol + } + r[isSgRuleID] = *rule.ID + remote, ok := rule.Remote.(*vpcclassicv1.SecurityGroupRuleRemote) + if ok { + if remote != nil && reflect.ValueOf(remote).IsNil() == false { + if remote.ID != nil { + r[isSgRuleRemote] = remote.ID + } else if remote.Address != nil { + r[isSgRuleRemote] = remote.Address + } else if remote.CIDRBlock != nil { + r[isSgRuleRemote] = remote.CIDRBlock + } + } + } + rules = append(rules, r) + } + + case "*vpcclassicv1.SecurityGroupRuleSecurityGroupRuleProtocolAll": + { + + rule := sgrule.(*vpcclassicv1.SecurityGroupRuleSecurityGroupRuleProtocolAll) + r := make(map[string]interface{}) + r[isSgRuleDirection] = *rule.Direction + r[isSgRuleIPVersion] = *rule.IPVersion + if rule.Protocol != nil { + r[isSgRuleProtocol] = *rule.Protocol + } + r[isSgRuleID] = *rule.ID + remote, ok := rule.Remote.(*vpcclassicv1.SecurityGroupRuleRemote) + if ok { + if remote != nil && reflect.ValueOf(remote).IsNil() == false { + if remote.ID != nil { + r[isSgRuleRemote] = remote.ID + } else if remote.Address != nil { + r[isSgRuleRemote] = remote.Address + } else if remote.CIDRBlock != nil { + r[isSgRuleRemote] = remote.CIDRBlock + } + } + } + rules = append(rules, r) + } + + case "*vpcclassicv1.SecurityGroupRuleSecurityGroupRuleProtocolTcpudp": + { + rule := sgrule.(*vpcclassicv1.SecurityGroupRuleSecurityGroupRuleProtocolTcpudp) + r := make(map[string]interface{}) + if rule.PortMin != nil { + r[isSgRulePortMin] = int(*rule.PortMin) + } + if rule.PortMax != nil { + r[isSgRulePortMax] = int(*rule.PortMax) + } + r[isSgRuleDirection] = *rule.Direction + r[isSgRuleIPVersion] = *rule.IPVersion + if rule.Protocol != nil { + r[isSgRuleProtocol] = *rule.Protocol + } + + r[isSgRuleID] = *rule.ID + remote, ok := rule.Remote.(*vpcclassicv1.SecurityGroupRuleRemote) + if ok { + if remote != nil && reflect.ValueOf(remote).IsNil() == false { + if remote.ID != nil { + r[isSgRuleRemote] = remote.ID + } else if remote.Address != nil { + r[isSgRuleRemote] = remote.Address + } else if remote.CIDRBlock != nil { + r[isSgRuleRemote] = remote.CIDRBlock + } + } + } + rules = append(rules, r) + } + } + } + + d.Set(isSgRules, rules) + d.SetId(*group.ID) + + if group.ResourceGroup != nil { + rsMangClient, err := meta.(ClientSession).ResourceManagementAPIv2() + if err != nil { + return err + } + grp, err := rsMangClient.ResourceGroup().Get(*group.ResourceGroup.ID) + if err != nil { + return err + } + d.Set(ResourceGroupName, grp.Name) + } + + controller, err := getBaseController(meta) + if err != nil { + return err + } + d.Set(ResourceControllerURL, controller+"/vpc/network/securityGroups") + if group.Name != nil { + d.Set(ResourceName, *group.Name) + } + + if group.CRN != nil { + d.Set(ResourceCRN, *group.CRN) + } + return nil + } + } + + return nil +} + +func securityGroupGet(d *schema.ResourceData, meta interface{}, name string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + + listSgOptions := &vpcv1.ListSecurityGroupsOptions{} + sgs, _, err := sess.ListSecurityGroups(listSgOptions) + if err != nil { + return err + } + + for _, group := range sgs.SecurityGroups { + if *group.Name == name { + + d.Set(isSgName, *group.Name) + d.Set(isSgVPC, *group.VPC.ID) + d.Set(isSgCRN, *group.CRN) + tags, err := GetTagsUsingCRN(meta, *group.CRN) + if err != nil { + log.Printf( + "An error occured during reading of security group (%s) tags : %s", *group.ID, err) + } + d.Set(isSgTags, tags) + rules := make([]map[string]interface{}, 0) + for _, sgrule := range group.Rules { + switch reflect.TypeOf(sgrule).String() { + case "*vpcv1.SecurityGroupRuleSecurityGroupRuleProtocolIcmp": + { + rule := sgrule.(*vpcv1.SecurityGroupRuleSecurityGroupRuleProtocolIcmp) + r := make(map[string]interface{}) + if rule.Code != nil { + r[isSgRuleCode] = int(*rule.Code) + } + if rule.Type != nil { + r[isSgRuleType] = int(*rule.Type) + } + r[isSgRuleDirection] = *rule.Direction + r[isSgRuleIPVersion] = *rule.IPVersion + if rule.Protocol != nil { + r[isSgRuleProtocol] = *rule.Protocol + } + r[isSgRuleID] = *rule.ID + remote, ok := rule.Remote.(*vpcv1.SecurityGroupRuleRemote) + if ok { + if remote != nil && reflect.ValueOf(remote).IsNil() == false { + if remote.ID != nil { + r[isSgRuleRemote] = remote.ID + } else if remote.Address != nil { + r[isSgRuleRemote] = remote.Address + } else if remote.CIDRBlock != nil { + r[isSgRuleRemote] = remote.CIDRBlock + } + } + } + rules = append(rules, r) + } + + case "*vpcv1.SecurityGroupRuleSecurityGroupRuleProtocolAll": + { + rule := sgrule.(*vpcv1.SecurityGroupRuleSecurityGroupRuleProtocolAll) + r := make(map[string]interface{}) + r[isSgRuleDirection] = *rule.Direction + r[isSgRuleIPVersion] = *rule.IPVersion + if rule.Protocol != nil { + r[isSgRuleProtocol] = *rule.Protocol + } + r[isSgRuleID] = *rule.ID + remote, ok := rule.Remote.(*vpcv1.SecurityGroupRuleRemote) + if ok { + if remote != nil && reflect.ValueOf(remote).IsNil() == false { + if remote.ID != nil { + r[isSgRuleRemote] = remote.ID + } else if remote.Address != nil { + r[isSgRuleRemote] = remote.Address + } else if remote.CIDRBlock != nil { + r[isSgRuleRemote] = remote.CIDRBlock + } + } + } + rules = append(rules, r) + } + + case "*vpcv1.SecurityGroupRuleSecurityGroupRuleProtocolTcpudp": + { + rule := sgrule.(*vpcv1.SecurityGroupRuleSecurityGroupRuleProtocolTcpudp) + r := make(map[string]interface{}) + if rule.PortMin != nil { + r[isSgRulePortMin] = int(*rule.PortMin) + } + if rule.PortMax != nil { + r[isSgRulePortMax] = int(*rule.PortMax) + } + r[isSgRuleDirection] = *rule.Direction + r[isSgRuleIPVersion] = *rule.IPVersion + if rule.Protocol != nil { + r[isSgRuleProtocol] = *rule.Protocol + } + remote, ok := rule.Remote.(*vpcv1.SecurityGroupRuleRemote) + if ok { + if remote != nil && reflect.ValueOf(remote).IsNil() == false { + if remote.ID != nil { + r[isSgRuleRemote] = remote.ID + } else if remote.Address != nil { + r[isSgRuleRemote] = remote.Address + } else if remote.CIDRBlock != nil { + r[isSgRuleRemote] = remote.CIDRBlock + } + } + } + rules = append(rules, r) + } + } + } + + d.Set(isSgRules, rules) + d.SetId(*group.ID) + + if group.ResourceGroup != nil { + rsMangClient, err := meta.(ClientSession).ResourceManagementAPIv2() + if err != nil { + return err + } + grp, err := rsMangClient.ResourceGroup().Get(*group.ResourceGroup.ID) + if err != nil { + return err + } + d.Set(ResourceGroupName, grp.Name) + } + + controller, err := getBaseController(meta) + if err != nil { + return err + } + d.Set(ResourceControllerURL, controller+"/vpc/network/securityGroups") + if group.Name != nil { + d.Set(ResourceName, *group.Name) + } + + if group.CRN != nil { + d.Set(ResourceCRN, *group.CRN) + } + return nil + } + } + + return nil + +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_security_group_target.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_security_group_target.go new file mode 100644 index 00000000000..6d5f35f1a78 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_security_group_target.go @@ -0,0 +1,100 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMISSecurityGroupTarget() *schema.Resource { + return &schema.Resource{ + + Read: dataSourceIBMISSecurityGroupTargetRead, + + Schema: map[string]*schema.Schema{ + + "security_group": { + Type: schema.TypeString, + Required: true, + Description: "Security group id", + }, + + "target": { + Type: schema.TypeString, + Computed: true, + Description: "security group target identifier", + }, + + "name": { + Type: schema.TypeString, + Required: true, + Description: "Security group target name", + }, + + "resource_type": { + Type: schema.TypeString, + Computed: true, + Description: "Resource Type", + }, + + "more_info": { + Type: schema.TypeString, + Computed: true, + Description: "Link to documentation about deleted resources", + }, + }, + } +} + +func dataSourceIBMISSecurityGroupTargetRead(d *schema.ResourceData, meta interface{}) error { + + sess, err := vpcClient(meta) + if err != nil { + return err + } + + securityGroupID := d.Get("security_group").(string) + name := d.Get("name").(string) + + // Support for pagination + start := "" + allrecs := []vpcv1.SecurityGroupTargetReferenceIntf{} + + for { + listSecurityGroupTargetsOptions := sess.NewListSecurityGroupTargetsOptions(securityGroupID) + + groups, response, err := sess.ListSecurityGroupTargets(listSecurityGroupTargetsOptions) + if err != nil { + return fmt.Errorf("Error Getting InstanceGroup Managers %s\n%s", err, response) + } + if *groups.TotalCount == int64(0) { + break + } + + start = GetNext(groups.Next) + allrecs = append(allrecs, groups.Targets...) + + if start == "" { + break + } + + } + + for _, securityGroupTargetReferenceIntf := range allrecs { + securityGroupTargetReference := securityGroupTargetReferenceIntf.(*vpcv1.SecurityGroupTargetReference) + if *securityGroupTargetReference.Name == name { + d.Set("target", *securityGroupTargetReference.ID) + d.Set("resource_type", *securityGroupTargetReference.ResourceType) + if securityGroupTargetReference.Deleted != nil { + d.Set("more_info", *securityGroupTargetReference.Deleted.MoreInfo) + } + d.SetId(fmt.Sprintf("%s/%s", securityGroupID, *securityGroupTargetReference.ID)) + return nil + } + } + return fmt.Errorf("Security Group Target %s not found", name) +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_security_group_targets.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_security_group_targets.go new file mode 100644 index 00000000000..70716f3fa66 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_security_group_targets.go @@ -0,0 +1,112 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMISSecurityGroupTargets() *schema.Resource { + return &schema.Resource{ + + Read: dataSourceIBMISSecurityGroupTargetsRead, + + Schema: map[string]*schema.Schema{ + + "security_group": { + Type: schema.TypeString, + Required: true, + Description: "Security group id", + }, + + "targets": { + Type: schema.TypeList, + Description: "List of targets", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + + "target": { + Type: schema.TypeString, + Computed: true, + Description: "security group target identifier", + }, + + "name": { + Type: schema.TypeString, + Computed: true, + Description: "Security group target name", + }, + + "resource_type": { + Type: schema.TypeString, + Computed: true, + Description: "Resource Type", + }, + + "more_info": { + Type: schema.TypeString, + Computed: true, + Description: "Link to documentation about deleted resources", + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMISSecurityGroupTargetsRead(d *schema.ResourceData, meta interface{}) error { + + sess, err := vpcClient(meta) + if err != nil { + return err + } + + securityGroupID := d.Get("security_group").(string) + + // Support for pagination + start := "" + allrecs := []vpcv1.SecurityGroupTargetReferenceIntf{} + + for { + listSecurityGroupTargetsOptions := sess.NewListSecurityGroupTargetsOptions(securityGroupID) + + groups, response, err := sess.ListSecurityGroupTargets(listSecurityGroupTargetsOptions) + if err != nil || groups == nil { + return fmt.Errorf("Error Getting InstanceGroup Managers %s\n%s", err, response) + } + if *groups.TotalCount == int64(0) { + break + } + + start = GetNext(groups.Next) + allrecs = append(allrecs, groups.Targets...) + + if start == "" { + break + } + + } + + targets := make([]map[string]interface{}, 0) + for _, securityGroupTargetReferenceIntf := range allrecs { + securityGroupTargetReference := securityGroupTargetReferenceIntf.(*vpcv1.SecurityGroupTargetReference) + tr := map[string]interface{}{ + "name": *securityGroupTargetReference.Name, + "target": *securityGroupTargetReference.ID, + "resource_type": *securityGroupTargetReference.ResourceType, + } + if securityGroupTargetReference.Deleted != nil { + tr["more_info"] = *securityGroupTargetReference.Deleted.MoreInfo + } + targets = append(targets, tr) + } + d.Set("targets", targets) + d.SetId(securityGroupID) + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_ssh_key.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_ssh_key.go new file mode 100644 index 00000000000..fae191f0390 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_ssh_key.go @@ -0,0 +1,178 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + + "github.com/IBM/vpc-go-sdk/vpcclassicv1" + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMISSSHKey() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMISSSHKeyRead, + + Schema: map[string]*schema.Schema{ + isKeyName: { + Type: schema.TypeString, + Required: true, + Description: "The name of the ssh key", + }, + + isKeyType: { + Type: schema.TypeString, + Computed: true, + Description: "The ssh key type", + }, + + isKeyFingerprint: { + Type: schema.TypeString, + Computed: true, + Description: "The ssh key Fingerprint", + }, + + isKeyPublicKey: { + Type: schema.TypeString, + Computed: true, + Description: "SSH Public key data", + }, + + isKeyLength: { + Type: schema.TypeInt, + Computed: true, + Description: "The ssh key length", + }, + + ResourceControllerURL: { + Type: schema.TypeString, + Computed: true, + Description: "The URL of the IBM Cloud dashboard that can be used to explore and view details about this instance", + }, + + ResourceName: { + Type: schema.TypeString, + Computed: true, + Description: "The name of the resource", + }, + + ResourceCRN: { + Type: schema.TypeString, + Computed: true, + Description: "The crn of the resource", + }, + + ResourceGroupName: { + Type: schema.TypeString, + Computed: true, + Description: "The resource group name in which resource is provisioned", + }, + }, + } +} + +func dataSourceIBMISSSHKeyRead(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + name := d.Get(isKeyName).(string) + if userDetails.generation == 1 { + err := classicKeyGetByName(d, meta, name) + if err != nil { + return err + } + } else { + err := keyGetByName(d, meta, name) + if err != nil { + return err + } + } + return nil +} + +func classicKeyGetByName(d *schema.ResourceData, meta interface{}, name string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + start := "" + allrecs := []vpcclassicv1.Key{} + for { + listKeysOptions := &vpcclassicv1.ListKeysOptions{} + if start != "" { + listKeysOptions.Start = &start + } + keys, response, err := sess.ListKeys(listKeysOptions) + if err != nil { + return fmt.Errorf("Error Fetching Keys %s\n%s", err, response) + } + start = GetNext(keys.Next) + allrecs = append(allrecs, keys.Keys...) + if start == "" { + break + } + } + for _, key := range allrecs { + if *key.Name == name { + d.SetId(*key.ID) + d.Set("name", *key.Name) + d.Set(isKeyType, *key.Type) + d.Set(isKeyFingerprint, *key.Fingerprint) + d.Set(isKeyLength, *key.Length) + controller, err := getBaseController(meta) + if err != nil { + return err + } + d.Set(ResourceControllerURL, controller+"/vpc/compute/sshKeys") + d.Set(ResourceName, *key.Name) + d.Set(ResourceCRN, *key.CRN) + if key.ResourceGroup != nil { + d.Set(ResourceGroupName, *key.ResourceGroup.ID) + } + if key.PublicKey != nil { + d.Set(isKeyPublicKey, *key.PublicKey) + } + return nil + } + } + return fmt.Errorf("No SSH Key found with name %s", name) +} + +func keyGetByName(d *schema.ResourceData, meta interface{}, name string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + listKeysOptions := &vpcv1.ListKeysOptions{} + keys, response, err := sess.ListKeys(listKeysOptions) + if err != nil { + return fmt.Errorf("Error Fetching Keys %s\n%s", err, response) + } + for _, key := range keys.Keys { + if *key.Name == name { + d.SetId(*key.ID) + d.Set("name", *key.Name) + d.Set(isKeyType, *key.Type) + d.Set(isKeyFingerprint, *key.Fingerprint) + d.Set(isKeyLength, *key.Length) + controller, err := getBaseController(meta) + if err != nil { + return err + } + d.Set(ResourceControllerURL, controller+"/vpc/compute/sshKeys") + d.Set(ResourceName, *key.Name) + d.Set(ResourceCRN, *key.CRN) + if key.ResourceGroup != nil { + d.Set(ResourceGroupName, *key.ResourceGroup.ID) + } + if key.PublicKey != nil { + d.Set(isKeyPublicKey, *key.PublicKey) + } + return nil + } + } + return fmt.Errorf("No SSH Key found with name %s", name) +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_subnet.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_subnet.go new file mode 100644 index 00000000000..996ac747936 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_subnet.go @@ -0,0 +1,308 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + + "github.com/IBM/vpc-go-sdk/vpcclassicv1" + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMISSubnet() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMISSubnetRead, + + Schema: map[string]*schema.Schema{ + + "identifier": { + Type: schema.TypeString, + Optional: true, + ExactlyOneOf: []string{isSubnetName, "identifier"}, + ValidateFunc: InvokeDataSourceValidator("ibm_is_subnet", "identifier"), + }, + + isSubnetIpv4CidrBlock: { + Type: schema.TypeString, + Computed: true, + }, + + isSubnetIpv6CidrBlock: { + Type: schema.TypeString, + Computed: true, + }, + + isSubnetAvailableIpv4AddressCount: { + Type: schema.TypeInt, + Computed: true, + }, + + isSubnetTotalIpv4AddressCount: { + Type: schema.TypeInt, + Computed: true, + }, + + isSubnetName: { + Type: schema.TypeString, + Computed: true, + Optional: true, + ExactlyOneOf: []string{isSubnetName, "identifier"}, + ValidateFunc: InvokeDataSourceValidator("ibm_is_subnet", isSubnetName), + }, + + isSubnetTags: { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: resourceIBMVPCHash, + Description: "List of tags", + }, + + isSubnetCRN: { + Type: schema.TypeString, + Computed: true, + Description: "The crn of the resource", + }, + + isSubnetNetworkACL: { + Type: schema.TypeString, + Computed: true, + }, + + isSubnetPublicGateway: { + Type: schema.TypeString, + Computed: true, + }, + + isSubnetStatus: { + Type: schema.TypeString, + Computed: true, + }, + + isSubnetVPC: { + Type: schema.TypeString, + Computed: true, + }, + + isSubnetZone: { + Type: schema.TypeString, + Computed: true, + }, + + isSubnetResourceGroup: { + Type: schema.TypeString, + Computed: true, + }, + + ResourceControllerURL: { + Type: schema.TypeString, + Computed: true, + Description: "The URL of the IBM Cloud dashboard that can be used to explore and view details about this instance", + }, + + ResourceName: { + Type: schema.TypeString, + Computed: true, + Description: "The name of the resource", + }, + + ResourceCRN: { + Type: schema.TypeString, + Computed: true, + Description: "The crn of the resource", + }, + + ResourceStatus: { + Type: schema.TypeString, + Computed: true, + Description: "The status of the resource", + }, + + ResourceGroupName: { + Type: schema.TypeString, + Computed: true, + Description: "The resource group name in which resource is provisioned", + }, + }, + } +} + +func dataSourceIBMISSubnetValidator() *ResourceValidator { + validateSchema := make([]ValidateSchema, 1) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: "identifier", + ValidateFunctionIdentifier: ValidateNoZeroValues, + Type: TypeString}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isSubnetName, + ValidateFunctionIdentifier: ValidateNoZeroValues, + Type: TypeString}) + + ibmISSubnetDataSourceValidator := ResourceValidator{ResourceName: "ibm_is_subnet", Schema: validateSchema} + return &ibmISSubnetDataSourceValidator +} + +func dataSourceIBMISSubnetRead(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + if userDetails.generation == 1 { + err := classicSubnetGetByNameOrID(d, meta) + if err != nil { + return err + } + } else { + err := subnetGetByNameOrID(d, meta) + if err != nil { + return err + } + } + return nil +} + +func classicSubnetGetByNameOrID(d *schema.ResourceData, meta interface{}) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + var subnet *vpcclassicv1.Subnet + + if v, ok := d.GetOk("identifier"); ok { + id := v.(string) + getSubnetOptions := &vpcclassicv1.GetSubnetOptions{ + ID: &id, + } + subnetinfo, response, err := sess.GetSubnet(getSubnetOptions) + if err != nil { + return fmt.Errorf("Error Getting Subnet (%s): %s\n%s", id, err, response) + } + subnet = subnetinfo + } + if v, ok := d.GetOk(isSubnetName); ok { + name := v.(string) + getSubnetsListOptions := &vpcclassicv1.ListSubnetsOptions{} + subnetsCollection, response, err := sess.ListSubnets(getSubnetsListOptions) + if err != nil { + return fmt.Errorf("Error Getting Subnets List : %s\n%s", err, response) + } + for _, subnetInfo := range subnetsCollection.Subnets { + if *subnetInfo.Name == name { + subnet = &subnetInfo + break + } + } + } + d.SetId(*subnet.ID) + d.Set(isSubnetName, *subnet.Name) + d.Set(isSubnetIpv4CidrBlock, *subnet.Ipv4CIDRBlock) + d.Set(isSubnetAvailableIpv4AddressCount, *subnet.AvailableIpv4AddressCount) + d.Set(isSubnetTotalIpv4AddressCount, *subnet.TotalIpv4AddressCount) + if subnet.NetworkACL != nil { + d.Set(isSubnetNetworkACL, *subnet.NetworkACL.ID) + } + if subnet.PublicGateway != nil { + d.Set(isSubnetPublicGateway, *subnet.PublicGateway.ID) + } else { + d.Set(isSubnetPublicGateway, nil) + } + d.Set(isSubnetStatus, *subnet.Status) + d.Set(isSubnetZone, *subnet.Zone.Name) + d.Set(isSubnetVPC, *subnet.VPC.ID) + + controller, err := getBaseController(meta) + if err != nil { + return err + } + tags, err := GetTagsUsingCRN(meta, *subnet.CRN) + if err != nil { + log.Printf( + "An error occured during reading of subnet (%s) tags : %s", d.Id(), err) + } + d.Set(isSubnetTags, tags) + d.Set(isSubnetCRN, *subnet.CRN) + d.Set(ResourceControllerURL, controller+"/vpc/network/subnets") + d.Set(ResourceName, *subnet.Name) + d.Set(ResourceCRN, *subnet.CRN) + d.Set(ResourceStatus, *subnet.Status) + return nil +} + +func subnetGetByNameOrID(d *schema.ResourceData, meta interface{}) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + var subnet *vpcv1.Subnet + + if v, ok := d.GetOk("identifier"); ok { + id := v.(string) + getSubnetOptions := &vpcv1.GetSubnetOptions{ + ID: &id, + } + subnetinfo, response, err := sess.GetSubnet(getSubnetOptions) + if err != nil { + return fmt.Errorf("Error Getting Subnet (%s): %s\n%s", id, err, response) + } + subnet = subnetinfo + } else if v, ok := d.GetOk(isSubnetName); ok { + name := v.(string) + getSubnetsListOptions := &vpcv1.ListSubnetsOptions{} + subnetsCollection, response, err := sess.ListSubnets(getSubnetsListOptions) + if err != nil { + return fmt.Errorf("Error Getting Subnets List : %s\n%s", err, response) + } + for _, subnetInfo := range subnetsCollection.Subnets { + if *subnetInfo.Name == name { + subnet = &subnetInfo + break + } + } + if subnet == nil { + return fmt.Errorf("No subnet found with name (%s)", name) + } + } + + d.SetId(*subnet.ID) + d.Set(isSubnetName, *subnet.Name) + d.Set(isSubnetIpv4CidrBlock, *subnet.Ipv4CIDRBlock) + d.Set(isSubnetAvailableIpv4AddressCount, *subnet.AvailableIpv4AddressCount) + d.Set(isSubnetTotalIpv4AddressCount, *subnet.TotalIpv4AddressCount) + if subnet.NetworkACL != nil { + d.Set(isSubnetNetworkACL, *subnet.NetworkACL.ID) + } + if subnet.PublicGateway != nil { + d.Set(isSubnetPublicGateway, *subnet.PublicGateway.ID) + } else { + d.Set(isSubnetPublicGateway, nil) + } + d.Set(isSubnetStatus, *subnet.Status) + d.Set(isSubnetZone, *subnet.Zone.Name) + d.Set(isSubnetVPC, *subnet.VPC.ID) + + controller, err := getBaseController(meta) + if err != nil { + return err + } + tags, err := GetTagsUsingCRN(meta, *subnet.CRN) + if err != nil { + log.Printf( + "An error occured during reading of subnet (%s) tags : %s", d.Id(), err) + } + d.Set(isSubnetTags, tags) + d.Set(isSubnetCRN, *subnet.CRN) + d.Set(ResourceControllerURL, controller+"/vpc-ext/network/subnets") + d.Set(ResourceName, *subnet.Name) + d.Set(ResourceCRN, *subnet.CRN) + d.Set(ResourceStatus, *subnet.Status) + if subnet.ResourceGroup != nil { + d.Set(isSubnetResourceGroup, *subnet.ResourceGroup.ID) + d.Set(ResourceGroupName, *subnet.ResourceGroup.Name) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_subnet_reserved_ip.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_subnet_reserved_ip.go new file mode 100644 index 00000000000..f8b65edf4b1 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_subnet_reserved_ip.go @@ -0,0 +1,132 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +// Define all the constants that matches with the given terrafrom attribute +const ( + // Request Param Constants + isSubNetID = "subnet" + isReservedIPID = "reserved_ip" + + // Response Param Constants + isReservedIPAddress = "address" + isReservedIPAutoDelete = "auto_delete" + isReservedIPCreatedAt = "created_at" + isReservedIPhref = "href" + isReservedIPName = "name" + isReservedIPOwner = "owner" + isReservedIPType = "resource_type" +) + +func dataSourceIBMISReservedIP() *schema.Resource { + return &schema.Resource{ + Read: dataSdataSourceIBMISReservedIPRead, + Schema: map[string]*schema.Schema{ + /* + Request Parameters + ================== + These are mandatory req parameters + */ + isSubNetID: { + Type: schema.TypeString, + Required: true, + Description: "The subnet identifier.", + }, + isReservedIPID: { + Type: schema.TypeString, + Required: true, + Description: "The reserved IP identifier.", + }, + + /* + Response Parameters + =================== + All of these are computed and an user doesn't need to provide + these from outside. + */ + + isReservedIPAddress: { + Type: schema.TypeString, + Computed: true, + Description: "The IP address", + }, + isReservedIPAutoDelete: { + Type: schema.TypeBool, + Computed: true, + Description: "If set to true, this reserved IP will be automatically deleted", + }, + isReservedIPCreatedAt: { + Type: schema.TypeString, + Computed: true, + Description: "The date and time that the reserved IP was created.", + }, + isReservedIPhref: { + Type: schema.TypeString, + Computed: true, + Description: "The URL for this reserved IP.", + }, + isReservedIPName: { + Type: schema.TypeString, + Computed: true, + Description: "The user-defined or system-provided name for this reserved IP.", + }, + isReservedIPOwner: { + Type: schema.TypeString, + Computed: true, + Description: "The owner of a reserved IP, defining whether it is managed by the user or the provider.", + }, + isReservedIPType: { + Type: schema.TypeString, + Computed: true, + Description: "The resource type.", + }, + isReservedIPTarget: { + Type: schema.TypeString, + Computed: true, + Description: "Reserved IP target id.", + }, + }, + } +} + +// dataSdataSourceIBMISReservedIPRead is used when the reserved IPs are read from the vpc +func dataSdataSourceIBMISReservedIPRead(d *schema.ResourceData, meta interface{}) error { + + sess, err := vpcClient(meta) + if err != nil { + return err + } + + subnetID := d.Get(isSubNetID).(string) + reservedIPID := d.Get(isReservedIPID).(string) + + options := sess.NewGetSubnetReservedIPOptions(subnetID, reservedIPID) + reserveIP, response, err := sess.GetSubnetReservedIP(options) + + if err != nil || response == nil || reserveIP == nil { + return fmt.Errorf("Error fetching the reserved IP %s\n%s", err, response) + } + + d.SetId(*reserveIP.ID) + d.Set(isReservedIPAutoDelete, *reserveIP.AutoDelete) + d.Set(isReservedIPCreatedAt, (*reserveIP.CreatedAt).String()) + d.Set(isReservedIPhref, *reserveIP.Href) + d.Set(isReservedIPName, *reserveIP.Name) + d.Set(isReservedIPOwner, *reserveIP.Owner) + d.Set(isReservedIPType, *reserveIP.ResourceType) + if reserveIP.Target != nil { + target, ok := reserveIP.Target.(*vpcv1.ReservedIPTarget) + if ok { + d.Set(isReservedIPTarget, target.ID) + } + } + return nil // By default there should be no error +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_subnet_reserved_ips.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_subnet_reserved_ips.go new file mode 100644 index 00000000000..46fc38d9b29 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_subnet_reserved_ips.go @@ -0,0 +1,160 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "time" + + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +// Define all the constants that matches with the given terrafrom attribute +const ( + // Request Param Constants + isReservedIPLimit = "limit" + isReservedIPSort = "sort" + isReservedIPs = "reserved_ips" + isReservedIPsCount = "total_count" +) + +func dataSourceIBMISReservedIPs() *schema.Resource { + return &schema.Resource{ + Read: dataSdataSourceIBMISReservedIPsRead, + Schema: map[string]*schema.Schema{ + /* + Request Parameters + ================== + These are mandatory req parameters + */ + isSubNetID: { + Type: schema.TypeString, + Required: true, + Description: "The subnet identifier.", + }, + /* + Response Parameters + =================== + All of these are computed and an user doesn't need to provide + these from outside. + */ + + isReservedIPs: { + Type: schema.TypeList, + Description: "Collection of reserved IPs in this subnet.", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + isReservedIPAddress: { + Type: schema.TypeString, + Computed: true, + Description: "The IP address", + }, + isReservedIPAutoDelete: { + Type: schema.TypeBool, + Computed: true, + Description: "If reserved ip shall be deleted automatically", + }, + isReservedIPCreatedAt: { + Type: schema.TypeString, + Computed: true, + Description: "The date and time that the reserved IP was created.", + }, + isReservedIPhref: { + Type: schema.TypeString, + Computed: true, + Description: "The URL for this reserved IP.", + }, + isReservedIPID: { + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this reserved IP", + }, + isReservedIPName: { + Type: schema.TypeString, + Computed: true, + Description: "The user-defined or system-provided name for this reserved IP.", + }, + isReservedIPOwner: { + Type: schema.TypeString, + Computed: true, + Description: "The owner of a reserved IP, defining whether it is managed by the user or the provider.", + }, + isReservedIPType: { + Type: schema.TypeString, + Computed: true, + Description: "The resource type.", + }, + isReservedIPTarget: { + Type: schema.TypeString, + Computed: true, + Description: "Reserved IP target id", + }, + }, + }, + }, + isReservedIPsCount: { + Type: schema.TypeInt, + Computed: true, + Description: "The total number of resources across all pages", + }, + }, + } +} + +func dataSdataSourceIBMISReservedIPsRead(d *schema.ResourceData, meta interface{}) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + + subnetID := d.Get(isSubNetID).(string) + + // Flatten all the reserved IPs + start := "" + allrecs := []vpcv1.ReservedIP{} + for { + options := &vpcv1.ListSubnetReservedIpsOptions{SubnetID: &subnetID} + + if start != "" { + options.Start = &start + } + + result, response, err := sess.ListSubnetReservedIps(options) + if err != nil || response == nil || result == nil { + return fmt.Errorf("Error fetching reserved ips %s\n%s", err, response) + } + start = GetNext(result.Next) + allrecs = append(allrecs, result.ReservedIps...) + if start == "" { + break + } + } + + // Now store all the reserved IP info with their response tags + reservedIPs := []map[string]interface{}{} + for _, data := range allrecs { + ipsOutput := map[string]interface{}{} + ipsOutput[isReservedIPAddress] = *data.Address + ipsOutput[isReservedIPAutoDelete] = *data.AutoDelete + ipsOutput[isReservedIPCreatedAt] = (*data.CreatedAt).String() + ipsOutput[isReservedIPhref] = *data.Href + ipsOutput[isReservedIPID] = *data.ID + ipsOutput[isReservedIPName] = *data.Name + ipsOutput[isReservedIPOwner] = *data.Owner + ipsOutput[isReservedIPType] = *data.ResourceType + target, ok := data.Target.(*vpcv1.ReservedIPTarget) + if ok { + ipsOutput[isReservedIPTarget] = target.ID + } + reservedIPs = append(reservedIPs, ipsOutput) + } + + d.SetId(time.Now().UTC().String()) // This is not any reserved ip or subnet id but state id + d.Set(isReservedIPs, reservedIPs) + d.Set(isReservedIPsCount, len(reservedIPs)) + d.Set(isSubNetID, subnetID) + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_subnets.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_subnets.go new file mode 100644 index 00000000000..a22fb234f34 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_subnets.go @@ -0,0 +1,214 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "strconv" + "time" + + "github.com/IBM/vpc-go-sdk/vpcclassicv1" + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + isSubnets = "subnets" +) + +func dataSourceIBMISSubnets() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMISSubnetsRead, + + Schema: map[string]*schema.Schema{ + + isSubnets: { + Type: schema.TypeList, + Description: "List of subnets", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + }, + "id": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "crn": { + Type: schema.TypeString, + Computed: true, + }, + "ipv4_cidr_block": { + Type: schema.TypeString, + Computed: true, + }, + "ipv6_cidr_block": { + Type: schema.TypeString, + Computed: true, + }, + "available_ipv4_address_count": { + Type: schema.TypeString, + Computed: true, + }, + "network_acl": { + Type: schema.TypeString, + Computed: true, + }, + "public_gateway": { + Type: schema.TypeString, + Computed: true, + }, + "resource_group": { + Type: schema.TypeString, + Computed: true, + }, + "total_ipv4_address_count": { + Type: schema.TypeString, + Computed: true, + }, + "vpc": { + Type: schema.TypeString, + Computed: true, + }, + "zone": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMISSubnetsRead(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + if userDetails.generation == 1 { + err := classicSubnetList(d, meta) + if err != nil { + return err + } + } else { + err := subnetList(d, meta) + if err != nil { + return err + } + } + return nil +} + +func classicSubnetList(d *schema.ResourceData, meta interface{}) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + start := "" + allrecs := []vpcclassicv1.Subnet{} + for { + options := &vpcclassicv1.ListSubnetsOptions{} + if start != "" { + options.Start = &start + } + subnets, response, err := sess.ListSubnets(options) + if err != nil { + return fmt.Errorf("Error Fetching subnets %s\n%s", err, response) + } + start = GetNext(subnets.Next) + allrecs = append(allrecs, subnets.Subnets...) + if start == "" { + break + } + } + subnetsInfo := make([]map[string]interface{}, 0) + for _, subnet := range allrecs { + var aac string = strconv.FormatInt(*subnet.AvailableIpv4AddressCount, 10) + var tac string = strconv.FormatInt(*subnet.TotalIpv4AddressCount, 10) + + l := map[string]interface{}{ + "name": *subnet.Name, + "id": *subnet.ID, + "status": *subnet.Status, + "crn": *subnet.CRN, + "ipv4_cidr_block": *subnet.Ipv4CIDRBlock, + "available_ipv4_address_count": aac, + "network_acl": *subnet.NetworkACL.Name, + "total_ipv4_address_count": tac, + "vpc": *subnet.VPC.ID, + "zone": *subnet.Zone.Name, + } + if subnet.PublicGateway != nil { + l["public_gateway"] = *subnet.PublicGateway.ID + } + subnetsInfo = append(subnetsInfo, l) + } + d.SetId(dataSourceIBMISSubnetsID(d)) + d.Set(isSubnets, subnetsInfo) + return nil +} + +func subnetList(d *schema.ResourceData, meta interface{}) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + start := "" + allrecs := []vpcv1.Subnet{} + for { + options := &vpcv1.ListSubnetsOptions{} + if start != "" { + options.Start = &start + } + subnets, response, err := sess.ListSubnets(options) + if err != nil { + return fmt.Errorf("Error Fetching subnets %s\n%s", err, response) + } + start = GetNext(subnets.Next) + allrecs = append(allrecs, subnets.Subnets...) + if start == "" { + break + } + } + subnetsInfo := make([]map[string]interface{}, 0) + for _, subnet := range allrecs { + + var aac string = strconv.FormatInt(*subnet.AvailableIpv4AddressCount, 10) + var tac string = strconv.FormatInt(*subnet.TotalIpv4AddressCount, 10) + l := map[string]interface{}{ + "name": *subnet.Name, + "id": *subnet.ID, + "status": *subnet.Status, + "crn": *subnet.CRN, + "ipv4_cidr_block": *subnet.Ipv4CIDRBlock, + "available_ipv4_address_count": aac, + "network_acl": *subnet.NetworkACL.Name, + "total_ipv4_address_count": tac, + "vpc": *subnet.VPC.ID, + "zone": *subnet.Zone.Name, + } + if subnet.PublicGateway != nil { + l["public_gateway"] = *subnet.PublicGateway.ID + } + if subnet.ResourceGroup != nil { + l["resource_group"] = *subnet.ResourceGroup.ID + } + subnetsInfo = append(subnetsInfo, l) + } + d.SetId(dataSourceIBMISSubnetsID(d)) + d.Set(isSubnets, subnetsInfo) + return nil +} + +// dataSourceIBMISSubnetsId returns a reasonable ID for a subnet list. +func dataSourceIBMISSubnetsID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_virtual_endpoint_gateway.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_virtual_endpoint_gateway.go new file mode 100644 index 00000000000..0baa5b55aa3 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_virtual_endpoint_gateway.go @@ -0,0 +1,149 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMISEndpointGateway() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMISEndpointGatewayRead, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + isVirtualEndpointGatewayName: { + Type: schema.TypeString, + Required: true, + Description: "Endpoint gateway name", + }, + isVirtualEndpointGatewayResourceType: { + Type: schema.TypeString, + Computed: true, + Description: "Endpoint gateway resource type", + }, + isVirtualEndpointGatewayResourceGroupID: { + Type: schema.TypeString, + Computed: true, + Description: "The resource group id", + }, + isVirtualEndpointGatewayCreatedAt: { + Type: schema.TypeString, + Computed: true, + Description: "Endpoint gateway created date and time", + }, + isVirtualEndpointGatewayHealthState: { + Type: schema.TypeString, + Computed: true, + Description: "Endpoint gateway health state", + }, + isVirtualEndpointGatewayLifecycleState: { + Type: schema.TypeString, + Computed: true, + Description: "Endpoint gateway lifecycle state", + }, + isVirtualEndpointGatewayIPs: { + Type: schema.TypeList, + Computed: true, + Description: "Endpoint gateway resource group", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + isVirtualEndpointGatewayIPsID: { + Type: schema.TypeString, + Computed: true, + Description: "The IPs id", + }, + isVirtualEndpointGatewayIPsName: { + Type: schema.TypeString, + Computed: true, + Description: "The IPs name", + }, + isVirtualEndpointGatewayIPsResourceType: { + Type: schema.TypeString, + Computed: true, + Description: "Endpoint gateway IP resource type", + }, + }, + }, + }, + isVirtualEndpointGatewayTarget: { + Type: schema.TypeList, + Computed: true, + Description: "Endpoint gateway target", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + isVirtualEndpointGatewayTargetName: { + Type: schema.TypeString, + Computed: true, + Description: "The target name", + }, + isVirtualEndpointGatewayTargetResourceType: { + Type: schema.TypeString, + Computed: true, + Description: "The target resource type", + }, + }, + }, + }, + isVirtualEndpointGatewayVpcID: { + Type: schema.TypeString, + Computed: true, + Description: "The VPC id", + }, + }, + } +} + +func dataSourceIBMISEndpointGatewayRead( + d *schema.ResourceData, meta interface{}) error { + var found bool + sess, err := vpcClient(meta) + if err != nil { + return err + } + + name := d.Get(isVirtualEndpointGatewayName).(string) + + start := "" + allrecs := []vpcv1.EndpointGateway{} + for { + options := sess.NewListEndpointGatewaysOptions() + if start != "" { + options.Start = &start + } + result, response, err := sess.ListEndpointGateways(options) + if err != nil { + return fmt.Errorf("Error fetching endpoint gateways %s\n%s", err, response) + } + start = GetNext(result.Next) + allrecs = append(allrecs, result.EndpointGateways...) + if start == "" { + break + } + } + for _, result := range allrecs { + if *result.Name == name { + d.SetId(*result.ID) + d.Set(isVirtualEndpointGatewayName, result.Name) + d.Set(isVirtualEndpointGatewayHealthState, result.HealthState) + d.Set(isVirtualEndpointGatewayCreatedAt, result.CreatedAt.String()) + d.Set(isVirtualEndpointGatewayLifecycleState, result.LifecycleState) + d.Set(isVirtualEndpointGatewayResourceType, result.ResourceType) + d.Set(isVirtualEndpointGatewayIPs, flattenIPs(result.Ips)) + d.Set(isVirtualEndpointGatewayResourceGroupID, result.ResourceGroup.ID) + d.Set(isVirtualEndpointGatewayTarget, flattenEndpointGatewayTarget( + result.Target.(*vpcv1.EndpointGatewayTarget))) + d.Set(isVirtualEndpointGatewayVpcID, result.VPC.ID) + found = true + break + } + } + if !found { + return fmt.Errorf("No Virtual Endpoints Gateway found with given name %s", name) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_virtual_endpoint_gateway_ips.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_virtual_endpoint_gateway_ips.go new file mode 100644 index 00000000000..db0bb357b69 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_virtual_endpoint_gateway_ips.go @@ -0,0 +1,135 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "time" + + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMISEndpointGatewayIPs() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMISEndpointGatewayIPsRead, + Importer: &schema.ResourceImporter{}, + Schema: map[string]*schema.Schema{ + isVirtualEndpointGatewayID: { + Type: schema.TypeString, + Required: true, + }, + isVirtualEndpointGatewayIPs: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + isVirtualEndpointGatewayIPID: { + Type: schema.TypeString, + Computed: true, + Description: "Endpoint gateway IP id", + }, + isVirtualEndpointGatewayIPName: { + Type: schema.TypeString, + Computed: true, + Description: "Endpoint gateway IP name", + }, + isVirtualEndpointGatewayIPResourceType: { + Type: schema.TypeString, + Computed: true, + Description: "Endpoint gateway IP resource type", + }, + isVirtualEndpointGatewayIPCreatedAt: { + Type: schema.TypeString, + Computed: true, + Description: "Endpoint gateway IP created date and time", + }, + isVirtualEndpointGatewayIPAutoDelete: { + Type: schema.TypeBool, + Computed: true, + Description: "Endpoint gateway IP auto delete", + }, + isVirtualEndpointGatewayIPAddress: { + Type: schema.TypeString, + Computed: true, + Description: "Endpoint gateway IP address", + }, + isVirtualEndpointGatewayIPTarget: { + Type: schema.TypeList, + Computed: true, + Description: "Endpoint gateway detail", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + isVirtualEndpointGatewayIPTargetID: { + Type: schema.TypeString, + Computed: true, + Description: "The IPs target id", + }, + isVirtualEndpointGatewayIPTargetName: { + Type: schema.TypeString, + Computed: true, + Description: "The IPs target name", + }, + isVirtualEndpointGatewayIPTargetResourceType: { + Type: schema.TypeString, + Computed: true, + Description: "Endpoint gateway resource type", + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMISEndpointGatewayIPsRead(d *schema.ResourceData, meta interface{}) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + gatewayID := d.Get(isVirtualEndpointGatewayID).(string) + + start := "" + allrecs := []vpcv1.ReservedIP{} + for { + options := sess.NewListEndpointGatewayIpsOptions(gatewayID) + if start != "" { + options.Start = &start + } + result, response, err := sess.ListEndpointGatewayIps(options) + if err != nil { + return fmt.Errorf("Error fetching endpoint gateway ips %s\n%s", err, response) + } + start = GetNext(result.Next) + allrecs = append(allrecs, result.Ips...) + if start == "" { + break + } + } + endpointGatewayIPs := []map[string]interface{}{} + for _, ip := range allrecs { + ipsOutput := map[string]interface{}{} + ipsOutput[isVirtualEndpointGatewayIPID] = *ip.ID + ipsOutput[isVirtualEndpointGatewayIPName] = *ip.Name + ipsOutput[isVirtualEndpointGatewayIPCreatedAt] = (*ip.CreatedAt).String() + ipsOutput[isVirtualEndpointGatewayIPAddress] = *ip.Address + ipsOutput[isVirtualEndpointGatewayIPAutoDelete] = *ip.AutoDelete + ipsOutput[isVirtualEndpointGatewayIPResourceType] = *ip.ResourceType + ipsOutput[isVirtualEndpointGatewayIPTarget] = + flattenEndpointGatewayIPTarget(ip.Target.(*vpcv1.ReservedIPTarget)) + + endpointGatewayIPs = append(endpointGatewayIPs, ipsOutput) + } + d.SetId(dataSourceIBMISEndpointGatewayIPsCheckID(d)) + d.Set(isVirtualEndpointGatewayIPs, endpointGatewayIPs) + return nil +} + +// dataSourceIBMISEndpointGatewayIPsCheckID returns a reasonable ID for dns zones list. +func dataSourceIBMISEndpointGatewayIPsCheckID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_virtual_endpoint_gateways.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_virtual_endpoint_gateways.go new file mode 100644 index 00000000000..90b43998a42 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_virtual_endpoint_gateways.go @@ -0,0 +1,180 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "time" + + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + ibmISVirtualEndpointGateways = "ibm_is_virtual_endpoint_gateways" + isVirtualEndpointGateways = "virtual_endpoint_gateways" +) + +func dataSourceIBMISEndpointGateways() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMISEndpointGatewaysRead, + Importer: &schema.ResourceImporter{}, + Schema: map[string]*schema.Schema{ + isVirtualEndpointGateways: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + Description: "Endpoint gateway id", + }, + isVirtualEndpointGatewayName: { + Type: schema.TypeString, + Computed: true, + Description: "Endpoint gateway name", + }, + isVirtualEndpointGatewayResourceType: { + Type: schema.TypeString, + Computed: true, + Description: "Endpoint gateway resource type", + }, + isVirtualEndpointGatewayResourceGroupID: { + Type: schema.TypeString, + Computed: true, + Description: "The resource group id", + }, + isVirtualEndpointGatewayCreatedAt: { + Type: schema.TypeString, + Computed: true, + Description: "Endpoint gateway created date and time", + }, + isVirtualEndpointGatewayHealthState: { + Type: schema.TypeString, + Computed: true, + Description: "Endpoint gateway health state", + }, + isVirtualEndpointGatewayLifecycleState: { + Type: schema.TypeString, + Computed: true, + Description: "Endpoint gateway lifecycle state", + }, + isVirtualEndpointGatewayIPs: { + Type: schema.TypeList, + Computed: true, + Description: "Collection of reserved IPs bound to an endpoint gateway", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + isVirtualEndpointGatewayIPsID: { + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this reserved IP", + }, + isVirtualEndpointGatewayIPsName: { + Type: schema.TypeString, + Computed: true, + Description: "The user-defined or system-provided name for this reserved IP", + }, + isVirtualEndpointGatewayIPsResourceType: { + Type: schema.TypeString, + Computed: true, + Description: "The resource type(subnet_reserved_ip)", + }, + }, + }, + }, + isVirtualEndpointGatewayTarget: { + Type: schema.TypeList, + Computed: true, + Description: "Endpoint gateway target", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + isVirtualEndpointGatewayTargetName: { + Type: schema.TypeString, + Computed: true, + Description: "The target name", + }, + isVirtualEndpointGatewayTargetResourceType: { + Type: schema.TypeString, + Computed: true, + Description: "The target resource type", + }, + }, + }, + }, + isVirtualEndpointGatewayVpcID: { + Type: schema.TypeString, + Computed: true, + Description: "The VPC id", + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMISEndpointGatewaysRead(d *schema.ResourceData, meta interface{}) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + + start := "" + allrecs := []vpcv1.EndpointGateway{} + for { + options := sess.NewListEndpointGatewaysOptions() + if start != "" { + options.Start = &start + } + result, response, err := sess.ListEndpointGateways(options) + if err != nil { + return fmt.Errorf("Error fetching endpoint gateways %s\n%s", err, response) + } + start = GetNext(result.Next) + allrecs = append(allrecs, result.EndpointGateways...) + if start == "" { + break + } + } + endpointGateways := []map[string]interface{}{} + for _, endpointGateway := range allrecs { + endpointGatewayOutput := map[string]interface{}{} + endpointGatewayOutput["id"] = *endpointGateway.ID + endpointGatewayOutput[isVirtualEndpointGatewayName] = *endpointGateway.Name + endpointGatewayOutput[isVirtualEndpointGatewayCreatedAt] = (*endpointGateway.CreatedAt).String() + endpointGatewayOutput[isVirtualEndpointGatewayResourceType] = (*endpointGateway.ResourceType) + endpointGatewayOutput[isVirtualEndpointGatewayHealthState] = *endpointGateway.HealthState + endpointGatewayOutput[isVirtualEndpointGatewayLifecycleState] = *endpointGateway.LifecycleState + endpointGatewayOutput[isVirtualEndpointGatewayResourceGroupID] = *endpointGateway.ResourceGroup.ID + endpointGatewayOutput[isVirtualEndpointGatewayVpcID] = *endpointGateway.VPC.ID + endpointGatewayOutput[isVirtualEndpointGatewayTarget] = + flattenEndpointGatewayTarget(endpointGateway.Target.(*vpcv1.EndpointGatewayTarget)) + endpointGatewayOutput[isVirtualEndpointGatewayIPs] = + flattenDataSourceIPs(endpointGateway.Ips) + endpointGateways = append(endpointGateways, endpointGatewayOutput) + } + d.SetId(dataSourceIBMISEndpointGatewaysCheckID(d)) + d.Set(isVirtualEndpointGateways, endpointGateways) + return nil +} + +// dataSourceIBMISEndpointGatewaysCheckID returns a reasonable ID for dns zones list. +func dataSourceIBMISEndpointGatewaysCheckID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} + +func flattenDataSourceIPs(ipsList []vpcv1.ReservedIPReference) interface{} { + ipsListOutput := make([]interface{}, 0) + for _, item := range ipsList { + ips := make(map[string]interface{}, 0) + ips[isVirtualEndpointGatewayIPsID] = *item.ID + ips[isVirtualEndpointGatewayIPsName] = *item.Name + ips[isVirtualEndpointGatewayIPsResourceType] = *item.ResourceType + + ipsListOutput = append(ipsListOutput, ips) + } + return ipsListOutput +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_volume.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_volume.go new file mode 100644 index 00000000000..37ce23cc0d0 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_volume.go @@ -0,0 +1,310 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + + "github.com/IBM/vpc-go-sdk/vpcclassicv1" + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMISVolume() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMISVolumeRead, + + Schema: map[string]*schema.Schema{ + + isVolumeName: { + Type: schema.TypeString, + Required: true, + ValidateFunc: InvokeDataSourceValidator("ibm_is_subnet", isVolumeName), + Description: "Volume name", + }, + + isVolumeZone: { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Zone name", + }, + + isVolumeResourceGroup: { + Type: schema.TypeString, + Computed: true, + Description: "Resource group name", + }, + + isVolumeProfileName: { + Type: schema.TypeString, + Computed: true, + Description: "Volume profile name", + }, + + isVolumeEncryptionKey: { + Type: schema.TypeString, + Computed: true, + Description: "Volume encryption key info", + }, + + isVolumeCapacity: { + Type: schema.TypeInt, + Computed: true, + Description: "Vloume capacity value", + }, + + isVolumeIops: { + Type: schema.TypeInt, + Computed: true, + Description: "IOPS value for the Volume", + }, + + isVolumeCrn: { + Type: schema.TypeString, + Computed: true, + Description: "CRN value for the volume instance", + }, + + isVolumeStatus: { + Type: schema.TypeString, + Computed: true, + Description: "Volume status", + }, + + isVolumeStatusReasons: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + isVolumeStatusReasonsCode: { + Type: schema.TypeString, + Computed: true, + Description: "A snake case string succinctly identifying the status reason", + }, + + isVolumeStatusReasonsMessage: { + Type: schema.TypeString, + Computed: true, + Description: "An explanation of the status reason", + }, + }, + }, + }, + + isVolumeTags: { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: resourceIBMVPCHash, + Description: "Tags for the volume instance", + }, + + ResourceControllerURL: { + Type: schema.TypeString, + Computed: true, + Description: "The URL of the IBM Cloud dashboard that can be used to explore and view details about this instance", + }, + + ResourceName: { + Type: schema.TypeString, + Computed: true, + Description: "The name of the resource", + }, + + ResourceCRN: { + Type: schema.TypeString, + Computed: true, + Description: "The crn of the resource", + }, + + ResourceStatus: { + Type: schema.TypeString, + Computed: true, + Description: "The status of the resource", + }, + + ResourceGroupName: { + Type: schema.TypeString, + Computed: true, + Description: "The resource group name in which resource is provisioned", + }, + }, + } +} + +func dataSourceIBMISVolumeValidator() *ResourceValidator { + validateSchema := make([]ValidateSchema, 1) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isVolumeName, + ValidateFunctionIdentifier: ValidateNoZeroValues, + Type: TypeString}) + + ibmISVoulmeDataSourceValidator := ResourceValidator{ResourceName: "ibm_is_volume", Schema: validateSchema} + return &ibmISVoulmeDataSourceValidator +} + +func dataSourceIBMISVolumeRead(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + name := d.Get(isVolumeName).(string) + if userDetails.generation == 1 { + err := classicVolumeGet(d, meta, name) + if err != nil { + return err + } + } else { + err := volumeGet(d, meta, name) + if err != nil { + return err + } + } + return nil +} + +func classicVolumeGet(d *schema.ResourceData, meta interface{}, name string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + zone := "" + if zname, ok := d.GetOk(isVolumeZone); ok { + zone = zname.(string) + } + start := "" + allrecs := []vpcclassicv1.Volume{} + for { + listVolumesOptions := &vpcclassicv1.ListVolumesOptions{} + if start != "" { + listVolumesOptions.Start = &start + } + if zone != "" { + listVolumesOptions.ZoneName = &zone + } + listVolumesOptions.Name = &name + vols, response, err := sess.ListVolumes(listVolumesOptions) + if err != nil { + return fmt.Errorf("Error Fetching volumes %s\n%s", err, response) + } + start = GetNext(vols.Next) + allrecs = append(allrecs, vols.Volumes...) + if start == "" { + break + } + } + for _, vol := range allrecs { + d.SetId(*vol.ID) + d.Set(isVolumeName, *vol.Name) + d.Set(isVolumeProfileName, *vol.Profile.Name) + d.Set(isVolumeZone, *vol.Zone.Name) + if vol.EncryptionKey != nil { + d.Set(isVolumeEncryptionKey, *vol.EncryptionKey.CRN) + } + d.Set(isVolumeIops, *vol.Iops) + d.Set(isVolumeCapacity, *vol.Capacity) + d.Set(isVolumeCrn, *vol.CRN) + d.Set(isVolumeStatus, *vol.Status) + tags, err := GetTagsUsingCRN(meta, *vol.CRN) + if err != nil { + log.Printf( + "Error on get of resource vpc volume (%s) tags: %s", d.Id(), err) + } + d.Set(isVolumeTags, tags) + controller, err := getBaseController(meta) + if err != nil { + return err + } + d.Set(ResourceControllerURL, controller+"/vpc/storage/storageVolumes") + d.Set(ResourceName, *vol.Name) + d.Set(ResourceCRN, *vol.CRN) + d.Set(ResourceStatus, *vol.Status) + if vol.ResourceGroup != nil { + d.Set(ResourceGroupName, *vol.ResourceGroup.ID) + d.Set(isVolumeResourceGroup, *vol.ResourceGroup.ID) + } + return nil + } + return fmt.Errorf("No Volume found with name %s", name) +} + +func volumeGet(d *schema.ResourceData, meta interface{}, name string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + zone := "" + if zname, ok := d.GetOk(isVolumeZone); ok { + zone = zname.(string) + } + start := "" + allrecs := []vpcv1.Volume{} + for { + listVolumesOptions := &vpcv1.ListVolumesOptions{} + if start != "" { + listVolumesOptions.Start = &start + } + if zone != "" { + listVolumesOptions.ZoneName = &zone + } + listVolumesOptions.Name = &name + vols, response, err := sess.ListVolumes(listVolumesOptions) + if err != nil { + return fmt.Errorf("Error Fetching volumes %s\n%s", err, response) + } + start = GetNext(vols.Next) + allrecs = append(allrecs, vols.Volumes...) + if start == "" { + break + } + } + for _, vol := range allrecs { + d.SetId(*vol.ID) + d.Set(isVolumeName, *vol.Name) + d.Set(isVolumeProfileName, *vol.Profile.Name) + d.Set(isVolumeZone, *vol.Zone.Name) + if vol.EncryptionKey != nil { + d.Set(isVolumeEncryptionKey, vol.EncryptionKey.CRN) + } + d.Set(isVolumeIops, *vol.Iops) + d.Set(isVolumeCapacity, *vol.Capacity) + d.Set(isVolumeCrn, *vol.CRN) + d.Set(isVolumeStatus, *vol.Status) + if vol.StatusReasons != nil { + statusReasonsList := make([]map[string]interface{}, 0) + for _, sr := range vol.StatusReasons { + currentSR := map[string]interface{}{} + if sr.Code != nil && sr.Message != nil { + currentSR[isVolumeStatusReasonsCode] = *sr.Code + currentSR[isVolumeStatusReasonsMessage] = *sr.Message + statusReasonsList = append(statusReasonsList, currentSR) + } + } + d.Set(isVolumeStatusReasons, statusReasonsList) + } + tags, err := GetTagsUsingCRN(meta, *vol.CRN) + if err != nil { + log.Printf( + "Error on get of resource vpc volume (%s) tags: %s", d.Id(), err) + } + d.Set(isVolumeTags, tags) + controller, err := getBaseController(meta) + if err != nil { + return err + } + d.Set(ResourceControllerURL, controller+"/vpc-ext/storage/storageVolumes") + d.Set(ResourceName, *vol.Name) + d.Set(ResourceCRN, *vol.CRN) + d.Set(ResourceStatus, *vol.Status) + if vol.ResourceGroup != nil { + d.Set(ResourceGroupName, *vol.ResourceGroup.Name) + d.Set(isVolumeResourceGroup, *vol.ResourceGroup.ID) + } + return nil + } + return fmt.Errorf("No Volume found with name %s", name) +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_volume_profile.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_volume_profile.go new file mode 100644 index 00000000000..c3cc3572849 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_volume_profile.go @@ -0,0 +1,94 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "github.com/IBM/vpc-go-sdk/vpcclassicv1" + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + isVolumeProfile = "name" + isVolumeProfileFamily = "family" +) + +func dataSourceIBMISVolumeProfile() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMISVolumeProfileRead, + + Schema: map[string]*schema.Schema{ + + isVolumeProfile: { + Type: schema.TypeString, + Required: true, + Description: "Volume profile name", + }, + + isVolumeProfileFamily: { + Type: schema.TypeString, + Computed: true, + Description: "Volume profile family", + }, + }, + } +} + +func dataSourceIBMISVolumeProfileRead(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + name := d.Get(isVolumeProfile).(string) + if userDetails.generation == 1 { + err := classicVolumeProfileGet(d, meta, name) + if err != nil { + return err + } + } else { + err := volumeProfileGet(d, meta, name) + if err != nil { + return err + } + } + return nil +} + +func classicVolumeProfileGet(d *schema.ResourceData, meta interface{}, name string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + getVolumeProfileOptions := &vpcclassicv1.GetVolumeProfileOptions{ + Name: &name, + } + profile, _, err := sess.GetVolumeProfile(getVolumeProfileOptions) + if err != nil { + return err + } + // For lack of anything better, compose our id from profile name. + d.SetId(*profile.Name) + d.Set(isVolumeProfile, *profile.Name) + d.Set(isVolumeProfileFamily, *profile.Family) + return nil +} + +func volumeProfileGet(d *schema.ResourceData, meta interface{}, name string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + getVolumeProfileOptions := &vpcv1.GetVolumeProfileOptions{ + Name: &name, + } + profile, _, err := sess.GetVolumeProfile(getVolumeProfileOptions) + if err != nil { + return err + } + // For lack of anything better, compose our id from profile name. + d.SetId(*profile.Name) + d.Set(isVolumeProfile, *profile.Name) + d.Set(isVolumeProfileFamily, *profile.Family) + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_volume_profiles.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_volume_profiles.go new file mode 100644 index 00000000000..7f34b9a8a43 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_volume_profiles.go @@ -0,0 +1,128 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "time" + + "github.com/IBM/vpc-go-sdk/vpcclassicv1" + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + isVolumeProfiles = "profiles" +) + +func dataSourceIBMISVolumeProfiles() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMISVolumeProfilesRead, + + Schema: map[string]*schema.Schema{ + + isVolumeProfiles: { + Type: schema.TypeList, + Description: "List of Volume profile maps", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + }, + "family": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMISVolumeProfilesRead(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + if userDetails.generation == 1 { + err := classicVolumeProfilesList(d, meta) + if err != nil { + return err + } + } else { + err := volumeProfilesList(d, meta) + if err != nil { + return err + } + } + return nil +} + +func classicVolumeProfilesList(d *schema.ResourceData, meta interface{}) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + start := "" + allrecs := []vpcclassicv1.VolumeProfile{} + for { + listVolumeProfilesOptions := &vpcclassicv1.ListVolumeProfilesOptions{} + if start != "" { + listVolumeProfilesOptions.Start = &start + } + availableProfiles, response, err := sess.ListVolumeProfiles(listVolumeProfilesOptions) + if err != nil { + return fmt.Errorf("Error Fetching Volume Profiles %s\n%s", err, response) + } + start = GetNext(availableProfiles.Next) + allrecs = append(allrecs, availableProfiles.Profiles...) + if start == "" { + break + } + } + profilesInfo := make([]map[string]interface{}, 0) + for _, profile := range allrecs { + + l := map[string]interface{}{ + "name": *profile.Name, + "family": *profile.Family, + } + profilesInfo = append(profilesInfo, l) + } + d.SetId(dataSourceIBMISVolumeProfilesID(d)) + d.Set(isVolumeProfiles, profilesInfo) + return nil +} + +func volumeProfilesList(d *schema.ResourceData, meta interface{}) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + listVolumeProfilesOptions := &vpcv1.ListVolumeProfilesOptions{} + availableProfiles, response, err := sess.ListVolumeProfiles(listVolumeProfilesOptions) + if err != nil { + return fmt.Errorf("Error Fetching Volume Profiles %s\n%s", err, response) + } + profilesInfo := make([]map[string]interface{}, 0) + for _, profile := range availableProfiles.Profiles { + + l := map[string]interface{}{ + "name": *profile.Name, + "family": *profile.Family, + } + profilesInfo = append(profilesInfo, l) + } + d.SetId(dataSourceIBMISVolumeProfilesID(d)) + d.Set(isVolumeProfiles, profilesInfo) + return nil +} + +// dataSourceIBMISVolumeProfilesID returns a reasonable ID for a Volume Profile list. +func dataSourceIBMISVolumeProfilesID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_vpc.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_vpc.go new file mode 100644 index 00000000000..edc64bc5088 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_vpc.go @@ -0,0 +1,730 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "reflect" + + "github.com/IBM/vpc-go-sdk/vpcclassicv1" + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMISVPC() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMISVPCRead, + + Schema: map[string]*schema.Schema{ + isVPCDefaultNetworkACL: { + Type: schema.TypeString, + Computed: true, + }, + + isVPCClassicAccess: { + Type: schema.TypeBool, + Computed: true, + }, + + isVPCDefaultRoutingTable: { + Type: schema.TypeString, + Computed: true, + Description: "Default routing table associated with VPC", + }, + + isVPCName: { + Type: schema.TypeString, + Required: true, + ValidateFunc: InvokeDataSourceValidator("ibm_is_subnet", isVPCName), + }, + + isVPCDefaultNetworkACLName: { + Type: schema.TypeString, + Computed: true, + Description: "Default Network ACL name", + }, + + isVPCDefaultSecurityGroupName: { + Type: schema.TypeString, + Computed: true, + Description: "Default security group name", + }, + + isVPCDefaultRoutingTableName: { + Type: schema.TypeString, + Computed: true, + Description: "Default routing table name", + }, + + isVPCResourceGroup: { + Type: schema.TypeString, + Computed: true, + }, + + isVPCStatus: { + Type: schema.TypeString, + Computed: true, + }, + + isVPCDefaultSecurityGroup: { + Type: schema.TypeString, + Computed: true, + Description: "Security group associated with VPC", + }, + + isVPCTags: { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: resourceIBMVPCHash, + }, + + isVPCCRN: { + Type: schema.TypeString, + Computed: true, + Description: "The crn of the resource", + }, + + ResourceControllerURL: { + Type: schema.TypeString, + Computed: true, + Description: "The URL of the IBM Cloud dashboard that can be used to explore and view details about this instance", + }, + ResourceName: { + Type: schema.TypeString, + Computed: true, + Description: "The name of the resource", + }, + + ResourceCRN: { + Type: schema.TypeString, + Computed: true, + Description: "The crn of the resource", + }, + + ResourceStatus: { + Type: schema.TypeString, + Computed: true, + Description: "The status of the resource", + }, + + ResourceGroupName: { + Type: schema.TypeString, + Computed: true, + Description: "The resource group name in which resource is provisioned", + }, + + cseSourceAddresses: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "address": { + Type: schema.TypeString, + Computed: true, + Description: "Cloud service endpoint IP Address", + }, + + "zone_name": { + Type: schema.TypeString, + Computed: true, + Description: "Location info of CSE Address", + }, + }, + }, + }, + + isVPCSecurityGroupList: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + isVPCSecurityGroupName: { + Type: schema.TypeString, + Computed: true, + Description: "Security group name", + }, + + isVPCSecurityGroupID: { + Type: schema.TypeString, + Required: true, + Description: "Security group id", + ForceNew: true, + }, + + isSecurityGroupRules: { + Type: schema.TypeList, + Computed: true, + Description: "Security Rules", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + + isVPCSecurityGroupRuleID: { + Type: schema.TypeString, + Computed: true, + Description: "Rule ID", + }, + + isVPCSecurityGroupRuleDirection: { + Type: schema.TypeString, + Computed: true, + Description: "Direction of traffic to enforce, either inbound or outbound", + }, + + isVPCSecurityGroupRuleIPVersion: { + Type: schema.TypeString, + Computed: true, + Description: "IP version: ipv4 or ipv6", + }, + + isVPCSecurityGroupRuleRemote: { + Type: schema.TypeString, + Computed: true, + Description: "Security group id: an IP address, a CIDR block, or a single security group identifier", + }, + + isVPCSecurityGroupRuleType: { + Type: schema.TypeInt, + Computed: true, + }, + + isVPCSecurityGroupRuleCode: { + Type: schema.TypeInt, + Computed: true, + }, + + isVPCSecurityGroupRulePortMin: { + Type: schema.TypeInt, + Computed: true, + }, + + isVPCSecurityGroupRulePortMax: { + Type: schema.TypeInt, + Computed: true, + }, + + isVPCSecurityGroupRuleProtocol: { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + + subnetsList: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + Description: "subent name", + }, + + "id": { + Type: schema.TypeString, + Computed: true, + Description: "subnet ID", + }, + + "status": { + Type: schema.TypeString, + Computed: true, + Description: "subnet status", + }, + + "zone": { + Type: schema.TypeString, + Computed: true, + Description: "subnet location", + }, + + totalIPV4AddressCount: { + Type: schema.TypeInt, + Computed: true, + Description: "Total IPv4 address count in the subnet", + }, + + availableIPV4AddressCount: { + Type: schema.TypeInt, + Computed: true, + Description: "Available IPv4 address count in the subnet", + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMISVpcValidator() *ResourceValidator { + validateSchema := make([]ValidateSchema, 1) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isVPCName, + ValidateFunctionIdentifier: ValidateNoZeroValues, + Type: TypeString}) + + ibmISVpcDataSourceValidator := ResourceValidator{ResourceName: "ibm_is_vpc", Schema: validateSchema} + return &ibmISVpcDataSourceValidator +} + +func dataSourceIBMISVPCRead(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + + name := d.Get(isVPCName).(string) + if userDetails.generation == 1 { + err := classicVpcGetByName(d, meta, name) + if err != nil { + return err + } + } else { + err := vpcGetByName(d, meta, name) + if err != nil { + return err + } + } + return nil +} + +func classicVpcGetByName(d *schema.ResourceData, meta interface{}, name string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + start := "" + allrecs := []vpcclassicv1.VPC{} + for { + listVpcsOptions := &vpcclassicv1.ListVpcsOptions{} + if start != "" { + listVpcsOptions.Start = &start + } + vpcs, response, err := sess.ListVpcs(listVpcsOptions) + if err != nil { + return fmt.Errorf("Error Fetching vpcs %s\n%s", err, response) + } + start = GetNext(vpcs.Next) + allrecs = append(allrecs, vpcs.Vpcs...) + if start == "" { + break + } + } + for _, vpc := range allrecs { + if *vpc.Name == name { + d.SetId(*vpc.ID) + d.Set(isVPCName, *vpc.Name) + d.Set(isVPCClassicAccess, *vpc.ClassicAccess) + d.Set(isVPCStatus, *vpc.Status) + d.Set(isVPCResourceGroup, *vpc.ResourceGroup.ID) + if vpc.DefaultNetworkACL != nil { + d.Set(isVPCDefaultNetworkACL, *vpc.DefaultNetworkACL.ID) + } else { + d.Set(isVPCDefaultNetworkACL, nil) + } + if vpc.DefaultSecurityGroup != nil { + d.Set(isVPCDefaultSecurityGroup, *vpc.DefaultSecurityGroup.ID) + } else { + d.Set(isVPCDefaultSecurityGroup, nil) + } + tags, err := GetTagsUsingCRN(meta, *vpc.CRN) + if err != nil { + log.Printf( + "An error occured during reading of vpc (%s) tags : %s", d.Id(), err) + } + d.Set(isVPCTags, tags) + d.Set(isVPCCRN, *vpc.CRN) + + controller, err := getBaseController(meta) + if err != nil { + return err + } + d.Set(ResourceControllerURL, controller+"/vpc/network/vpcs") + d.Set(ResourceName, *vpc.Name) + d.Set(ResourceCRN, *vpc.CRN) + d.Set(ResourceStatus, *vpc.Status) + if vpc.ResourceGroup != nil { + d.Set(ResourceGroupName, *vpc.ResourceGroup.ID) + } + //set the cse ip addresses info + if vpc.CseSourceIps != nil { + cseSourceIpsList := make([]map[string]interface{}, 0) + for _, sourceIP := range vpc.CseSourceIps { + currentCseSourceIp := map[string]interface{}{} + if sourceIP.IP != nil { + currentCseSourceIp["address"] = *sourceIP.IP.Address + currentCseSourceIp["zone_name"] = *sourceIP.Zone.Name + cseSourceIpsList = append(cseSourceIpsList, currentCseSourceIp) + } + } + d.Set(cseSourceAddresses, cseSourceIpsList) + } + options := &vpcclassicv1.ListSubnetsOptions{} + s, response, err := sess.ListSubnets(options) + if err != nil { + log.Printf("Error Fetching subnets %s\n%s", err, response) + } else { + subnetsInfo := make([]map[string]interface{}, 0) + for _, subnet := range s.Subnets { + if *subnet.VPC.ID == d.Id() { + l := map[string]interface{}{ + "name": *subnet.Name, + "id": *subnet.ID, + "status": *subnet.Status, + "zone": *subnet.Zone.Name, + totalIPV4AddressCount: *subnet.TotalIpv4AddressCount, + availableIPV4AddressCount: *subnet.AvailableIpv4AddressCount, + } + subnetsInfo = append(subnetsInfo, l) + } + } + d.Set(subnetsList, subnetsInfo) + } + + //Set Security group list + + listSgOptions := &vpcclassicv1.ListSecurityGroupsOptions{} + sgs, _, err := sess.ListSecurityGroups(listSgOptions) + if err != nil { + return err + } + + securityGroupList := make([]map[string]interface{}, 0) + + for _, group := range sgs.SecurityGroups { + if *group.VPC.ID == d.Id() { + g := make(map[string]interface{}) + + g[isVPCSecurityGroupName] = *group.Name + g[isVPCSecurityGroupID] = *group.ID + + rules := make([]map[string]interface{}, 0) + for _, sgrule := range group.Rules { + switch reflect.TypeOf(sgrule).String() { + case "*vpcclassicv1.SecurityGroupRuleSecurityGroupRuleProtocolIcmp": + { + rule := sgrule.(*vpcclassicv1.SecurityGroupRuleSecurityGroupRuleProtocolIcmp) + r := make(map[string]interface{}) + if rule.Code != nil { + r[isVPCSecurityGroupRuleCode] = int(*rule.Code) + } + if rule.Type != nil { + r[isVPCSecurityGroupRuleType] = int(*rule.Type) + } + r[isVPCSecurityGroupRuleDirection] = *rule.Direction + r[isVPCSecurityGroupRuleIPVersion] = *rule.IPVersion + if rule.Protocol != nil { + r[isVPCSecurityGroupRuleProtocol] = *rule.Protocol + } + r[isVPCSecurityGroupRuleID] = *rule.ID + remote, ok := rule.Remote.(*vpcclassicv1.SecurityGroupRuleRemote) + if ok { + if remote != nil && reflect.ValueOf(remote).IsNil() == false { + if remote.ID != nil { + r[isVPCSecurityGroupRuleRemote] = remote.ID + } else if remote.Address != nil { + r[isVPCSecurityGroupRuleRemote] = remote.Address + } else if remote.CIDRBlock != nil { + r[isVPCSecurityGroupRuleRemote] = remote.CIDRBlock + } + } + } + rules = append(rules, r) + } + + case "*vpcclassicv1.SecurityGroupRuleSecurityGroupRuleProtocolAll": + { + rule := sgrule.(*vpcclassicv1.SecurityGroupRuleSecurityGroupRuleProtocolAll) + r := make(map[string]interface{}) + r[isVPCSecurityGroupRuleDirection] = *rule.Direction + r[isVPCSecurityGroupRuleIPVersion] = *rule.IPVersion + if rule.Protocol != nil { + r[isVPCSecurityGroupRuleProtocol] = *rule.Protocol + } + r[isVPCSecurityGroupRuleID] = *rule.ID + remote, ok := rule.Remote.(*vpcclassicv1.SecurityGroupRuleRemote) + if ok { + if remote != nil && reflect.ValueOf(remote).IsNil() == false { + if remote.ID != nil { + r[isVPCSecurityGroupRuleRemote] = remote.ID + } else if remote.Address != nil { + r[isVPCSecurityGroupRuleRemote] = remote.Address + } else if remote.CIDRBlock != nil { + r[isVPCSecurityGroupRuleRemote] = remote.CIDRBlock + } + } + } + rules = append(rules, r) + } + + case "*vpcclassicv1.SecurityGroupRuleSecurityGroupRuleProtocolTcpudp": + { + rule := sgrule.(*vpcclassicv1.SecurityGroupRuleSecurityGroupRuleProtocolTcpudp) + r := make(map[string]interface{}) + r[isVPCSecurityGroupRuleDirection] = *rule.Direction + r[isVPCSecurityGroupRuleIPVersion] = *rule.IPVersion + if rule.PortMin != nil { + r[isVPCSecurityGroupRulePortMin] = int(*rule.PortMin) + } + if rule.PortMax != nil { + r[isVPCSecurityGroupRulePortMax] = int(*rule.PortMax) + } + + if rule.Protocol != nil { + r[isVPCSecurityGroupRuleProtocol] = *rule.Protocol + } + + r[isVPCSecurityGroupRuleID] = *rule.ID + remote, ok := rule.Remote.(*vpcclassicv1.SecurityGroupRuleRemote) + if ok { + if remote != nil && reflect.ValueOf(remote).IsNil() == false { + if remote.ID != nil { + r[isVPCSecurityGroupRuleRemote] = remote.ID + } else if remote.Address != nil { + r[isVPCSecurityGroupRuleRemote] = remote.Address + } else if remote.CIDRBlock != nil { + r[isVPCSecurityGroupRuleRemote] = remote.CIDRBlock + } + } + } + rules = append(rules, r) + } + } + } + g[isVPCSgRules] = rules + securityGroupList = append(securityGroupList, g) + } + } + + d.Set(isVPCSecurityGroupList, securityGroupList) + return nil + } + } + return fmt.Errorf("No VPC found with name %s", name) +} +func vpcGetByName(d *schema.ResourceData, meta interface{}, name string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + start := "" + allrecs := []vpcv1.VPC{} + for { + listVpcsOptions := &vpcv1.ListVpcsOptions{} + if start != "" { + listVpcsOptions.Start = &start + } + vpcs, response, err := sess.ListVpcs(listVpcsOptions) + if err != nil { + return fmt.Errorf("Error Fetching vpcs %s\n%s", err, response) + } + start = GetNext(vpcs.Next) + allrecs = append(allrecs, vpcs.Vpcs...) + if start == "" { + break + } + } + for _, vpc := range allrecs { + if *vpc.Name == name { + d.SetId(*vpc.ID) + d.Set(isVPCName, *vpc.Name) + d.Set(isVPCClassicAccess, *vpc.ClassicAccess) + d.Set(isVPCStatus, *vpc.Status) + d.Set(isVPCResourceGroup, *vpc.ResourceGroup.ID) + d.Set(isVPCDefaultNetworkACLName, *vpc.DefaultNetworkACL.Name) + d.Set(isVPCDefaultRoutingTableName, *vpc.DefaultRoutingTable.Name) + d.Set(isVPCDefaultSecurityGroupName, *vpc.DefaultSecurityGroup.Name) + if vpc.DefaultNetworkACL != nil { + d.Set(isVPCDefaultNetworkACL, *vpc.DefaultNetworkACL.ID) + } else { + d.Set(isVPCDefaultNetworkACL, nil) + } + if vpc.DefaultRoutingTable != nil { + d.Set(isVPCDefaultRoutingTable, *vpc.DefaultRoutingTable.ID) + } + if vpc.DefaultSecurityGroup != nil { + d.Set(isVPCDefaultSecurityGroup, *vpc.DefaultSecurityGroup.ID) + } else { + d.Set(isVPCDefaultSecurityGroup, nil) + } + tags, err := GetTagsUsingCRN(meta, *vpc.CRN) + if err != nil { + log.Printf( + "An error occured during reading of vpc (%s) tags : %s", d.Id(), err) + } + d.Set(isVPCTags, tags) + d.Set(isVPCCRN, *vpc.CRN) + + controller, err := getBaseController(meta) + if err != nil { + return err + } + d.Set(ResourceControllerURL, controller+"/vpc-ext/network/vpcs") + d.Set(ResourceName, *vpc.Name) + d.Set(ResourceCRN, *vpc.CRN) + d.Set(ResourceStatus, *vpc.Status) + if vpc.ResourceGroup != nil { + d.Set(ResourceGroupName, *vpc.ResourceGroup.Name) + } + //set the cse ip addresses info + if vpc.CseSourceIps != nil { + cseSourceIpsList := make([]map[string]interface{}, 0) + for _, sourceIP := range vpc.CseSourceIps { + currentCseSourceIp := map[string]interface{}{} + if sourceIP.IP != nil { + currentCseSourceIp["address"] = *sourceIP.IP.Address + currentCseSourceIp["zone_name"] = *sourceIP.Zone.Name + cseSourceIpsList = append(cseSourceIpsList, currentCseSourceIp) + } + } + d.Set(cseSourceAddresses, cseSourceIpsList) + } + options := &vpcv1.ListSubnetsOptions{} + s, response, err := sess.ListSubnets(options) + if err != nil { + log.Printf("Error Fetching subnets %s\n%s", err, response) + } else { + subnetsInfo := make([]map[string]interface{}, 0) + for _, subnet := range s.Subnets { + if *subnet.VPC.ID == d.Id() { + l := map[string]interface{}{ + "name": *subnet.Name, + "id": *subnet.ID, + "status": *subnet.Status, + "zone": *subnet.Zone.Name, + totalIPV4AddressCount: *subnet.TotalIpv4AddressCount, + availableIPV4AddressCount: *subnet.AvailableIpv4AddressCount, + } + subnetsInfo = append(subnetsInfo, l) + } + } + d.Set(subnetsList, subnetsInfo) + } + + listSgOptions := &vpcv1.ListSecurityGroupsOptions{} + sgs, _, err := sess.ListSecurityGroups(listSgOptions) + if err != nil { + return err + } + + securityGroupList := make([]map[string]interface{}, 0) + + for _, group := range sgs.SecurityGroups { + if *group.VPC.ID == d.Id() { + g := make(map[string]interface{}) + + g[isVPCSecurityGroupName] = *group.Name + g[isVPCSecurityGroupID] = *group.ID + + rules := make([]map[string]interface{}, 0) + for _, sgrule := range group.Rules { + switch reflect.TypeOf(sgrule).String() { + case "*vpcv1.SecurityGroupRuleSecurityGroupRuleProtocolIcmp": + { + rule := sgrule.(*vpcv1.SecurityGroupRuleSecurityGroupRuleProtocolIcmp) + r := make(map[string]interface{}) + if rule.Code != nil { + r[isVPCSecurityGroupRuleCode] = int(*rule.Code) + } + if rule.Type != nil { + r[isVPCSecurityGroupRuleType] = int(*rule.Type) + } + r[isVPCSecurityGroupRuleDirection] = *rule.Direction + r[isVPCSecurityGroupRuleIPVersion] = *rule.IPVersion + if rule.Protocol != nil { + r[isVPCSecurityGroupRuleProtocol] = *rule.Protocol + } + r[isVPCSecurityGroupRuleID] = *rule.ID + remote, ok := rule.Remote.(*vpcv1.SecurityGroupRuleRemote) + if ok { + if remote != nil && reflect.ValueOf(remote).IsNil() == false { + if remote.ID != nil { + r[isVPCSecurityGroupRuleRemote] = remote.ID + } else if remote.Address != nil { + r[isVPCSecurityGroupRuleRemote] = remote.Address + } else if remote.CIDRBlock != nil { + r[isVPCSecurityGroupRuleRemote] = remote.CIDRBlock + } + } + } + rules = append(rules, r) + } + + case "*vpcv1.SecurityGroupRuleSecurityGroupRuleProtocolAll": + { + rule := sgrule.(*vpcv1.SecurityGroupRuleSecurityGroupRuleProtocolAll) + r := make(map[string]interface{}) + r[isVPCSecurityGroupRuleDirection] = *rule.Direction + r[isVPCSecurityGroupRuleIPVersion] = *rule.IPVersion + if rule.Protocol != nil { + r[isVPCSecurityGroupRuleProtocol] = *rule.Protocol + } + r[isVPCSecurityGroupRuleID] = *rule.ID + remote, ok := rule.Remote.(*vpcv1.SecurityGroupRuleRemote) + if ok { + if remote != nil && reflect.ValueOf(remote).IsNil() == false { + if remote.ID != nil { + r[isVPCSecurityGroupRuleRemote] = remote.ID + } else if remote.Address != nil { + r[isVPCSecurityGroupRuleRemote] = remote.Address + } else if remote.CIDRBlock != nil { + r[isVPCSecurityGroupRuleRemote] = remote.CIDRBlock + } + } + } + rules = append(rules, r) + } + + case "*vpcv1.SecurityGroupRuleSecurityGroupRuleProtocolTcpudp": + { + rule := sgrule.(*vpcv1.SecurityGroupRuleSecurityGroupRuleProtocolTcpudp) + r := make(map[string]interface{}) + r[isVPCSecurityGroupRuleDirection] = *rule.Direction + r[isVPCSecurityGroupRuleIPVersion] = *rule.IPVersion + if rule.PortMin != nil { + r[isVPCSecurityGroupRulePortMin] = int(*rule.PortMin) + } + if rule.PortMax != nil { + r[isVPCSecurityGroupRulePortMax] = int(*rule.PortMax) + } + + if rule.Protocol != nil { + r[isVPCSecurityGroupRuleProtocol] = *rule.Protocol + } + + remote, ok := rule.Remote.(*vpcv1.SecurityGroupRuleRemote) + if ok { + if remote != nil && reflect.ValueOf(remote).IsNil() == false { + if remote.ID != nil { + r[isVPCSecurityGroupRuleRemote] = remote.ID + } else if remote.Address != nil { + r[isVPCSecurityGroupRuleRemote] = remote.Address + } else if remote.CIDRBlock != nil { + r[isVPCSecurityGroupRuleRemote] = remote.CIDRBlock + } + } + } + rules = append(rules, r) + } + } + } + g[isVPCSgRules] = rules + securityGroupList = append(securityGroupList, g) + } + } + + d.Set(isVPCSecurityGroupList, securityGroupList) + + return nil + } + } + return fmt.Errorf("No VPC found with name %s", name) +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_vpc_default_routing_table.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_vpc_default_routing_table.go new file mode 100644 index 00000000000..da2c0fd882e --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_vpc_default_routing_table.go @@ -0,0 +1,180 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "log" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + isDefaultRoutingTableID = "default_routing_table" + isDefaultRoutingTableHref = "href" + isDefaultRoutingTableName = "name" + isDefaultRoutingTableResourceType = "resource_type" + isDefaultRoutingTableCreatedAt = "created_at" + isDefaultRoutingTableLifecycleState = "lifecycle_state" + isDefaultRoutingTableRoutesList = "routes" + isDefaultRoutingTableSubnetsList = "subnets" + isDefaultRTVpcID = "vpc" + isDefaultRTDirectLinkIngress = "route_direct_link_ingress" + isDefaultRTTransitGatewayIngress = "route_transit_gateway_ingress" + isDefaultRTVPCZoneIngress = "route_vpc_zone_ingress" + isDefaultRTDefault = "is_default" +) + +func dataSourceIBMISVPCDefaultRoutingTable() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMISVPCDefaultRoutingTableGet, + Schema: map[string]*schema.Schema{ + isDefaultRTVpcID: { + Type: schema.TypeString, + Required: true, + Description: "VPC identifier", + }, + isDefaultRoutingTableID: { + Type: schema.TypeString, + Computed: true, + Description: "Default Routing Table ID", + }, + isDefaultRoutingTableHref: { + Type: schema.TypeString, + Computed: true, + Description: "Default Routing table Href", + }, + isDefaultRoutingTableName: { + Type: schema.TypeString, + Computed: true, + Description: "Default Routing table Name", + }, + isDefaultRoutingTableResourceType: { + Type: schema.TypeString, + Computed: true, + Description: "Default Routing table Resource Type", + }, + isDefaultRoutingTableCreatedAt: { + Type: schema.TypeString, + Computed: true, + Description: "Default Routing table Created At", + }, + isDefaultRoutingTableLifecycleState: { + Type: schema.TypeString, + Computed: true, + Description: "Default Routing table Lifecycle State", + }, + isDefaultRTDirectLinkIngress: { + Type: schema.TypeBool, + Computed: true, + Description: "If set to true, this routing table will be used to route traffic that originates from Direct Link to this VPC.", + }, + isDefaultRTTransitGatewayIngress: { + Type: schema.TypeBool, + Computed: true, + Description: "If set to true, this routing table will be used to route traffic that originates from Transit Gateway to this VPC.", + }, + isDefaultRTVPCZoneIngress: { + Type: schema.TypeBool, + Computed: true, + Description: "If set to true, this routing table will be used to route traffic that originates from subnets in other zones in this VPC.", + }, + isDefaultRTDefault: { + Type: schema.TypeBool, + Computed: true, + Description: "Indicates whether this is the default routing table for this VPC", + }, + isDefaultRoutingTableRoutesList: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + Description: "Route name", + }, + + "id": { + Type: schema.TypeString, + Computed: true, + Description: "Route ID", + }, + }, + }, + }, + isDefaultRoutingTableSubnetsList: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + Description: "Subnet name", + }, + + "id": { + Type: schema.TypeString, + Computed: true, + Description: "Subnet ID", + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMISVPCDefaultRoutingTableGet(d *schema.ResourceData, meta interface{}) error { + + sess, err := vpcClient(meta) + if err != nil { + return err + } + + vpcID := d.Get(isDefaultRTVpcID).(string) + + getVpcDefaultRoutingTableOptions := sess.NewGetVPCDefaultRoutingTableOptions(vpcID) + result, detail, err := sess.GetVPCDefaultRoutingTable(getVpcDefaultRoutingTableOptions) + if err != nil || result == nil { + log.Printf("Error reading details of VPC Default Routing Table:%s", detail) + return err + } + d.Set(isDefaultRoutingTableID, *result.ID) + d.Set(isDefaultRoutingTableHref, *result.Href) + d.Set(isDefaultRoutingTableName, *result.Name) + d.Set(isDefaultRoutingTableResourceType, *result.ResourceType) + createdAt := *result.CreatedAt + d.Set(isDefaultRoutingTableCreatedAt, createdAt.String()) + d.Set(isDefaultRoutingTableLifecycleState, *result.LifecycleState) + d.Set(isDefaultRTDirectLinkIngress, *result.RouteDirectLinkIngress) + d.Set(isDefaultRTTransitGatewayIngress, *result.RouteTransitGatewayIngress) + d.Set(isDefaultRTVPCZoneIngress, *result.RouteVPCZoneIngress) + d.Set(isDefaultRTDefault, *result.IsDefault) + subnetsInfo := make([]map[string]interface{}, 0) + for _, subnet := range result.Subnets { + if subnet.Name != nil && subnet.ID != nil { + l := map[string]interface{}{ + "name": *subnet.Name, + "id": *subnet.ID, + } + subnetsInfo = append(subnetsInfo, l) + } + } + d.Set(isDefaultRoutingTableSubnetsList, subnetsInfo) + routesInfo := make([]map[string]interface{}, 0) + for _, route := range result.Routes { + if route.Name != nil && route.ID != nil { + k := map[string]interface{}{ + "name": *route.Name, + "id": *route.ID, + } + routesInfo = append(routesInfo, k) + } + } + d.Set(isDefaultRoutingTableRoutesList, routesInfo) + d.Set(isDefaultRTVpcID, vpcID) + d.SetId(*result.ID) + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_vpc_routing_table_routes.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_vpc_routing_table_routes.go new file mode 100644 index 00000000000..c08a6d32856 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_vpc_routing_table_routes.go @@ -0,0 +1,174 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "log" + "time" + + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + isRoutingTableRouteID = "route_id" + isRoutingTableRouteHref = "href" + isRoutingTableRouteName = "name" + isRoutingTableRouteCreatedAt = "created_at" + isRoutingTableRouteLifecycleState = "lifecycle_state" + isRoutingTableRouteAction = "action" + isRoutingTableRouteDestination = "destination" + isRoutingTableRouteNexthop = "nexthop" + isRoutingTableRouteZoneName = "zone" + isRoutingTableRouteVpcID = "vpc" + isRouteTableID = "routing_table" + isRoutingTableRoutes = "routes" +) + +func dataSourceIBMISVPCRoutingTableRoutes() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMISVPCRoutingTableRoutesList, + Schema: map[string]*schema.Schema{ + isRoutingTableRouteVpcID: { + Type: schema.TypeString, + Required: true, + Description: "VPC identifier", + }, + isRouteTableID: { + Type: schema.TypeString, + Required: true, + Description: "Routing table identifier", + }, + isRoutingTableRoutes: { + Type: schema.TypeList, + Description: "Collection of Routing Table Routes", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + isRoutingTableRouteID: { + Type: schema.TypeString, + Computed: true, + Description: "Routing Table Route ID", + }, + isRoutingTableRouteHref: { + Type: schema.TypeString, + Computed: true, + Description: "Routing Table Route Href", + }, + isRoutingTableRouteName: { + Type: schema.TypeString, + Computed: true, + Description: "Routing Table Route Name", + }, + isRoutingTableRouteCreatedAt: { + Type: schema.TypeString, + Computed: true, + Description: "Routing Table Route Created At", + }, + isRoutingTableRouteLifecycleState: { + Type: schema.TypeString, + Computed: true, + Description: "Routing Table Route Lifecycle State", + }, + isRoutingTableRouteAction: { + Type: schema.TypeString, + Computed: true, + Description: "Routing Table Route Action", + }, + isRoutingTableRouteDestination: { + Type: schema.TypeString, + Computed: true, + Description: "Routing Table Route Destination", + }, + isRoutingTableRouteNexthop: { + Type: schema.TypeString, + Computed: true, + Description: "Routing Table Route Nexthop Address or VPN Gateway Connection ID", + }, + isRoutingTableRouteZoneName: { + Type: schema.TypeString, + Computed: true, + Description: "Routing Table Route Zone Name", + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMISVPCRoutingTableRoutesList(d *schema.ResourceData, meta interface{}) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + + vpcID := d.Get(isRoutingTableRouteVpcID).(string) + routingTableID := d.Get(isRouteTableID).(string) + start := "" + allrecs := []vpcv1.Route{} + for { + listVpcRoutingTablesRoutesOptions := sess.NewListVPCRoutingTableRoutesOptions(vpcID, routingTableID) + if start != "" { + listVpcRoutingTablesRoutesOptions.Start = &start + } + result, detail, err := sess.ListVPCRoutingTableRoutes(listVpcRoutingTablesRoutesOptions) + if err != nil { + log.Printf("Error reading list of VPC Routing Table Routes:%s\n%s", err, detail) + return err + } + start = GetNext(result.Next) + allrecs = append(allrecs, result.Routes...) + if start == "" { + break + } + } + + vpcRoutingTableRoutes := make([]map[string]interface{}, 0) + + for _, instance := range allrecs { + route := map[string]interface{}{} + if instance.ID != nil { + route[isRoutingTableRouteID] = *instance.ID + } + if instance.Href != nil { + route[isRoutingTableRouteHref] = *instance.Href + } + if instance.Name != nil { + route[isRoutingTableRouteName] = *instance.Name + } + if instance.CreatedAt != nil { + route[isRoutingTableRouteCreatedAt] = (*instance.CreatedAt).String() + } + if instance.LifecycleState != nil { + route[isRoutingTableRouteLifecycleState] = *instance.LifecycleState + } + if instance.Destination != nil { + route[isRoutingTableRouteDestination] = *instance.Destination + } + if instance.Zone != nil && instance.Zone.Name != nil { + route[isRoutingTableRouteZoneName] = *instance.Zone.Name + } + if instance.NextHop != nil { + nexthop := *instance.NextHop.(*vpcv1.RouteNextHop) + if nexthop.Address != nil { + route[isRoutingTableRouteNexthop] = *nexthop.Address + } else { + route[isRoutingTableRouteNexthop] = *nexthop.ID + } + } + + vpcRoutingTableRoutes = append(vpcRoutingTableRoutes, route) + } + d.SetId(dataSourceIBMISVPCRoutingTableRoutesID(d)) + d.Set(isRoutingTableRouteVpcID, vpcID) + d.Set(isRouteTableID, routingTableID) + d.Set(isRoutingTableRoutes, vpcRoutingTableRoutes) + return nil +} + +// dataSourceIBMISVPCRoutingTablesID returns a reasonable ID for dns zones list. +func dataSourceIBMISVPCRoutingTableRoutesID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_vpc_routing_tables.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_vpc_routing_tables.go new file mode 100644 index 00000000000..522d1de5c02 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_vpc_routing_tables.go @@ -0,0 +1,238 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + //"encoding/json" + "log" + "time" + + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + isRoutingTableID = "routing_table" + isRoutingTableHref = "href" + isRoutingTableName = "name" + isRoutingTableResourceType = "resource_type" + isRoutingTableCreatedAt = "created_at" + isRoutingTableLifecycleState = "lifecycle_state" + isRoutingTableRoutesList = "routes" + isRoutingTableSubnetsList = "subnets" + isRoutingTables = "routing_tables" + isVpcID = "vpc" + isRoutingTableDirectLinkIngress = "route_direct_link_ingress" + isRoutingTableTransitGatewayIngress = "route_transit_gateway_ingress" + isRoutingTableVPCZoneIngress = "route_vpc_zone_ingress" + isRoutingTableDefault = "is_default" +) + +func dataSourceIBMISVPCRoutingTables() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMISVPCRoutingTablesList, + Schema: map[string]*schema.Schema{ + isVpcID: { + Type: schema.TypeString, + Required: true, + Description: "VPC identifier", + }, + isRoutingTables: { + Type: schema.TypeList, + Description: "Collection of Routing tables", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + isRoutingTableID: { + Type: schema.TypeString, + Computed: true, + Description: "Routing Table ID", + }, + isRoutingTableHref: { + Type: schema.TypeString, + Computed: true, + Description: "Routing table Href", + }, + isRoutingTableName: { + Type: schema.TypeString, + Computed: true, + Description: "Routing table Name", + }, + isRoutingTableResourceType: { + Type: schema.TypeString, + Computed: true, + Description: "Routing table Resource Type", + }, + isRoutingTableCreatedAt: { + Type: schema.TypeString, + Computed: true, + Description: "Routing table Created At", + }, + isRoutingTableLifecycleState: { + Type: schema.TypeString, + Computed: true, + Description: "Routing table Lifecycle State", + }, + isRoutingTableDirectLinkIngress: { + Type: schema.TypeBool, + Computed: true, + Description: "If set to true, this routing table will be used to route traffic that originates from Direct Link to this VPC.", + }, + isRoutingTableTransitGatewayIngress: { + Type: schema.TypeBool, + Computed: true, + Description: "If set to true, this routing table will be used to route traffic that originates from Transit Gateway to this VPC.", + }, + isRoutingTableVPCZoneIngress: { + Type: schema.TypeBool, + Computed: true, + Description: "If set to true, this routing table will be used to route traffic that originates from subnets in other zones in this VPC.", + }, + isRoutingTableDefault: { + Type: schema.TypeBool, + Computed: true, + Description: "Indicates whether this is the default routing table for this VPC", + }, + isRoutingTableRoutesList: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + Description: "Route name", + }, + + "id": { + Type: schema.TypeString, + Computed: true, + Description: "Route ID", + }, + }, + }, + }, + isRoutingTableSubnetsList: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + Description: "Subnet name", + }, + + "id": { + Type: schema.TypeString, + Computed: true, + Description: "Subnet ID", + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMISVPCRoutingTablesList(d *schema.ResourceData, meta interface{}) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + + vpcID := d.Get(isVpcID).(string) + + start := "" + allrecs := []vpcv1.RoutingTable{} + for { + listOptions := sess.NewListVPCRoutingTablesOptions(vpcID) + if start != "" { + listOptions.Start = &start + } + result, detail, err := sess.ListVPCRoutingTables(listOptions) + if err != nil { + log.Printf("Error reading list of VPC Routing Tables:%s\n%s", err, detail) + return err + } + start = GetNext(result.Next) + allrecs = append(allrecs, result.RoutingTables...) + if start == "" { + break + } + } + + vpcRoutingTables := make([]map[string]interface{}, 0) + for _, routingTable := range allrecs { + + rtable := map[string]interface{}{} + if routingTable.ID != nil { + rtable[isRoutingTableID] = *routingTable.ID + } + if routingTable.Href != nil { + rtable[isRoutingTableHref] = *routingTable.Href + } + if routingTable.Name != nil { + rtable[isRoutingTableName] = *routingTable.Name + } + if routingTable.ResourceType != nil { + rtable[isRoutingTableResourceType] = *routingTable.ResourceType + } + if routingTable.CreatedAt != nil { + rtable[isRoutingTableCreatedAt] = (*routingTable.CreatedAt).String() + } + if routingTable.LifecycleState != nil { + rtable[isRoutingTableLifecycleState] = *routingTable.LifecycleState + } + if routingTable.RouteDirectLinkIngress != nil { + rtable[isRoutingTableDirectLinkIngress] = *routingTable.RouteDirectLinkIngress + } + if routingTable.RouteTransitGatewayIngress != nil { + rtable[isRoutingTableTransitGatewayIngress] = *routingTable.RouteTransitGatewayIngress + } + if routingTable.RouteVPCZoneIngress != nil { + rtable[isRoutingTableVPCZoneIngress] = *routingTable.RouteVPCZoneIngress + } + if routingTable.IsDefault != nil { + rtable[isRoutingTableDefault] = *routingTable.IsDefault + } + subnetsInfo := make([]map[string]interface{}, 0) + for _, subnet := range routingTable.Subnets { + if subnet.Name != nil && subnet.ID != nil { + l := map[string]interface{}{ + "name": *subnet.Name, + "id": *subnet.ID, + } + subnetsInfo = append(subnetsInfo, l) + } + + } + rtable[isRoutingTableSubnetsList] = subnetsInfo + routesInfo := make([]map[string]interface{}, 0) + for _, route := range routingTable.Routes { + if route.Name != nil && route.ID != nil { + k := map[string]interface{}{ + "name": *route.Name, + "id": *route.ID, + } + routesInfo = append(routesInfo, k) + } + } + rtable[isRoutingTableRoutesList] = routesInfo + vpcRoutingTables = append(vpcRoutingTables, rtable) + } + + d.SetId(dataSourceIBMISVPCRoutingTablesID(d)) + d.Set(isVpcID, vpcID) + d.Set(isRoutingTables, vpcRoutingTables) + return nil +} + +// dataSourceIBMISVPCRoutingTablesID returns a reasonable ID for dns zones list. +func dataSourceIBMISVPCRoutingTablesID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_vpn_gateway_connections.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_vpn_gateway_connections.go new file mode 100644 index 00000000000..3d75266ba3d --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_vpn_gateway_connections.go @@ -0,0 +1,226 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "time" + + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + isvpnGatewayConnections = "connections" + isVPNGatewayID = "vpn_gateway" + isVPNGatewayConnectionID = "id" +) + +func dataSourceIBMISVPNGatewayConnections() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMVPNGatewayConnectionsRead, + + Schema: map[string]*schema.Schema{ + + isVPNGatewayID: { + Type: schema.TypeString, + Required: true, + Description: "The VPN gateway identifier ", + }, + + isvpnGatewayConnections: { + Type: schema.TypeList, + Description: "Collection of VPN Gateways", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + + isVPNGatewayConnectionAdminAuthenticationmode: { + Type: schema.TypeString, + Computed: true, + Description: "The authentication mode", + }, + isVPNGatewayConnectionCreatedat: { + Type: schema.TypeString, + Computed: true, + Description: "The date and time that this VPN gateway connection was created", + }, + isVPNGatewayConnectionAdminStateup: { + Type: schema.TypeBool, + Computed: true, + Description: "VPN gateway connection admin state", + }, + isVPNGatewayConnectionDeadPeerDetectionAction: { + Type: schema.TypeString, + Computed: true, + Description: "Action detection for dead peer detection action", + }, + isVPNGatewayConnectionDeadPeerDetectionInterval: { + Type: schema.TypeInt, + Computed: true, + Description: "Interval for dead peer detection interval", + }, + isVPNGatewayConnectionDeadPeerDetectionTimeout: { + Type: schema.TypeInt, + Computed: true, + Description: "Timeout for dead peer detection", + }, + isVPNGatewayConnectionID: { + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this VPN gateway connection", + }, + + isVPNGatewayConnectionIKEPolicy: { + Type: schema.TypeString, + Computed: true, + Description: "VPN gateway connection IKE Policy", + }, + isVPNGatewayConnectionIPSECPolicy: { + Type: schema.TypeString, + Computed: true, + Description: "IP security policy for vpn gateway connection", + }, + isVPNGatewayConnectionMode: { + Type: schema.TypeString, + Computed: true, + Description: "The mode of the VPN gateway", + }, + isVPNGatewayConnectionName: { + Type: schema.TypeString, + Computed: true, + Description: "VPN Gateway connection name", + }, + isVPNGatewayConnectionPeerAddress: { + Type: schema.TypeString, + Computed: true, + Description: "VPN gateway connection peer address", + }, + isVPNGatewayConnectionResourcetype: { + Type: schema.TypeString, + Computed: true, + Description: "The resource type", + }, + isVPNGatewayConnectionStatus: { + Type: schema.TypeString, + Computed: true, + Description: "VPN gateway connection status", + }, + + isVPNGatewayConnectionTunnels: { + Type: schema.TypeList, + Computed: true, + Optional: true, + MinItems: 0, + Description: "The VPN tunnel configuration for this VPN gateway connection (in static route mode)", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "address": { + Type: schema.TypeString, + Computed: true, + Description: "The IP address of the VPN gateway member in which the tunnel resides", + }, + + "status": { + Type: schema.TypeString, + Computed: true, + Description: "The status of the VPN Tunnel", + }, + }, + }, + }, + isVPNGatewayConnectionLocalCIDRS: { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: "VPN gateway connection local CIDRs", + }, + + isVPNGatewayConnectionPeerCIDRS: { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: "VPN gateway connection peer CIDRs", + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMVPNGatewayConnectionsRead(d *schema.ResourceData, meta interface{}) error { + + sess, err := vpcClient(meta) + if err != nil { + return err + } + vpngatewayID := d.Get(isVPNGatewayID).(string) + listvpnGWConnectionOptions := sess.NewListVPNGatewayConnectionsOptions(vpngatewayID) + + availableVPNGatewayConnections, detail, err := sess.ListVPNGatewayConnections(listvpnGWConnectionOptions) + if err != nil { + return fmt.Errorf("Error reading list of VPN Gateway Connections:%s\n%s", err, detail) + } + vpngatewayconnections := make([]map[string]interface{}, 0) + for _, instance := range availableVPNGatewayConnections.Connections { + gatewayconnection := map[string]interface{}{} + data := instance.(*vpcv1.VPNGatewayConnection) + gatewayconnection[isVPNGatewayConnectionAdminAuthenticationmode] = *data.AuthenticationMode + gatewayconnection[isVPNGatewayConnectionCreatedat] = data.CreatedAt.String() + gatewayconnection[isVPNGatewayConnectionAdminStateup] = *data.AdminStateUp + gatewayconnection[isVPNGatewayConnectionDeadPeerDetectionAction] = *data.DeadPeerDetection.Action + gatewayconnection[isVPNGatewayConnectionDeadPeerDetectionInterval] = *data.DeadPeerDetection.Interval + gatewayconnection[isVPNGatewayConnectionDeadPeerDetectionTimeout] = *data.DeadPeerDetection.Timeout + gatewayconnection[isVPNGatewayConnectionID] = *data.ID + + if data.IkePolicy != nil { + gatewayconnection[isVPNGatewayConnectionIKEPolicy] = *data.IkePolicy.ID + } + if data.IpsecPolicy != nil { + gatewayconnection[isVPNGatewayConnectionIPSECPolicy] = *data.IpsecPolicy.ID + } + if data.LocalCIDRs != nil { + gatewayconnection[isVPNGatewayConnectionLocalCIDRS] = flattenStringList(data.LocalCIDRs) + } + if data.PeerCIDRs != nil { + gatewayconnection[isVPNGatewayConnectionPeerCIDRS] = flattenStringList(data.PeerCIDRs) + } + gatewayconnection[isVPNGatewayConnectionMode] = *data.Mode + gatewayconnection[isVPNGatewayConnectionName] = *data.Name + gatewayconnection[isVPNGatewayConnectionPeerAddress] = *data.PeerAddress + gatewayconnection[isVPNGatewayConnectionResourcetype] = *data.ResourceType + gatewayconnection[isVPNGatewayConnectionStatus] = *data.Status + //if data.Tunnels != nil { + if len(data.Tunnels) > 0 { + vpcTunnelsList := make([]map[string]interface{}, 0) + for _, vpcTunnel := range data.Tunnels { + currentTunnel := map[string]interface{}{} + if vpcTunnel.PublicIP != nil { + if vpcTunnel.PublicIP != nil { + currentTunnel["address"] = *vpcTunnel.PublicIP.Address + } + if vpcTunnel.Status != nil { + currentTunnel["status"] = *vpcTunnel.Status + } + vpcTunnelsList = append(vpcTunnelsList, currentTunnel) + } + } + gatewayconnection[isVPNGatewayConnectionTunnels] = vpcTunnelsList + } + + vpngatewayconnections = append(vpngatewayconnections, gatewayconnection) + } + + d.SetId(dataSourceIBMVPNGatewayConnectionsID(d)) + d.Set(isvpnGatewayConnections, vpngatewayconnections) + return nil +} + +// dataSourceIBMVPNGatewaysID returns a reasonable ID list. +func dataSourceIBMVPNGatewayConnectionsID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_vpn_gateways.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_vpn_gateways.go new file mode 100644 index 00000000000..c196df2e404 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_vpn_gateways.go @@ -0,0 +1,181 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "time" + + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + isvpnGateways = "vpn_gateways" + isVPNGatewayResourceType = "resource_type" + isVPNGatewayCrn = "crn" +) + +func dataSourceIBMISVPNGateways() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMVPNGatewaysRead, + + Schema: map[string]*schema.Schema{ + + isvpnGateways: { + Type: schema.TypeList, + Description: "Collection of VPN Gateways", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + isVPNGatewayName: { + Type: schema.TypeString, + Computed: true, + Description: "VPN Gateway instance name", + }, + isVPNGatewayCreatedAt: { + Type: schema.TypeString, + Computed: true, + Description: "The date and time that this VPN gateway was created", + }, + isVPNGatewayCrn: { + Type: schema.TypeString, + Computed: true, + Description: "The VPN gateway's CRN", + }, + isVPNGatewayMembers: { + Type: schema.TypeList, + Computed: true, + Description: "Collection of VPN gateway members", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "address": { + Type: schema.TypeString, + Computed: true, + Description: "The public IP address assigned to the VPN gateway member", + }, + + "private_address": { + Type: schema.TypeString, + Computed: true, + Description: "The private IP address assigned to the VPN gateway member", + }, + + "role": { + Type: schema.TypeString, + Computed: true, + Description: "The high availability role assigned to the VPN gateway member", + }, + + "status": { + Type: schema.TypeString, + Computed: true, + Description: "The status of the VPN gateway member", + }, + }, + }, + }, + + isVPNGatewayResourceType: { + Type: schema.TypeString, + Computed: true, + Description: "The resource type.", + }, + + isVPNGatewayStatus: { + Type: schema.TypeString, + Computed: true, + Description: "The status of the VPN gateway", + }, + + isVPNGatewaySubnet: { + Type: schema.TypeString, + Computed: true, + Description: "VPNGateway subnet info", + }, + isVPNGatewayResourceGroup: { + Type: schema.TypeString, + Computed: true, + Description: "resource group identifiers ", + }, + isVPNGatewayMode: { + Type: schema.TypeString, + Computed: true, + Description: " VPN gateway mode(policy/route) ", + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMVPNGatewaysRead(d *schema.ResourceData, meta interface{}) error { + + sess, err := vpcClient(meta) + if err != nil { + return err + } + + listvpnGWOptions := sess.NewListVPNGatewaysOptions() + + start := "" + allrecs := []vpcv1.VPNGatewayIntf{} + for { + if start != "" { + listvpnGWOptions.Start = &start + } + availableVPNGateways, detail, err := sess.ListVPNGateways(listvpnGWOptions) + if err != nil { + return fmt.Errorf("Error reading list of VPN Gateways:%s\n%s", err, detail) + } + start = GetNext(availableVPNGateways.Next) + allrecs = append(allrecs, availableVPNGateways.VPNGateways...) + if start == "" { + break + } + } + + vpngateways := make([]map[string]interface{}, 0) + for _, instance := range allrecs { + gateway := map[string]interface{}{} + data := instance.(*vpcv1.VPNGateway) + gateway[isVPNGatewayName] = *data.Name + gateway[isVPNGatewayCreatedAt] = data.CreatedAt.String() + gateway[isVPNGatewayResourceType] = *data.ResourceType + gateway[isVPNGatewayStatus] = *data.Status + gateway[isVPNGatewayMode] = *data.Mode + gateway[isVPNGatewayResourceGroup] = *data.ResourceGroup.ID + gateway[isVPNGatewaySubnet] = *data.Subnet.ID + gateway[isVPNGatewayCrn] = *data.CRN + + if data.Members != nil { + vpcMembersIpsList := make([]map[string]interface{}, 0) + for _, memberIP := range data.Members { + currentMemberIP := map[string]interface{}{} + if memberIP.PublicIP != nil { + currentMemberIP["address"] = *memberIP.PublicIP.Address + currentMemberIP["role"] = *memberIP.Role + currentMemberIP["status"] = *memberIP.Status + vpcMembersIpsList = append(vpcMembersIpsList, currentMemberIP) + } + if memberIP.PrivateIP != nil { + currentMemberIP["private_address"] = *memberIP.PrivateIP.Address + } + } + gateway[isVPNGatewayMembers] = vpcMembersIpsList + } + + vpngateways = append(vpngateways, gateway) + } + + d.SetId(dataSourceIBMVPNGatewaysID(d)) + d.Set(isvpnGateways, vpngateways) + return nil +} + +// dataSourceIBMVPNGatewaysID returns a reasonable ID list. +func dataSourceIBMVPNGatewaysID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_zone.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_zone.go new file mode 100644 index 00000000000..5d2f0c988f1 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_zone.go @@ -0,0 +1,107 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + + "github.com/IBM/vpc-go-sdk/vpcclassicv1" + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + isZoneName = "name" + isZoneRegion = "region" + isZoneStatus = "status" +) + +func dataSourceIBMISZone() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMISZoneRead, + + Schema: map[string]*schema.Schema{ + + isZoneName: { + Type: schema.TypeString, + Required: true, + }, + + isZoneRegion: { + Type: schema.TypeString, + Required: true, + }, + + isZoneStatus: { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceIBMISZoneRead(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + regionName := d.Get(isZoneRegion).(string) + zoneName := d.Get(isZoneName).(string) + if userDetails.generation == 1 { + err := classicZoneGet(d, meta, regionName, zoneName) + if err != nil { + return err + } + } else { + err := zoneGet(d, meta, regionName, zoneName) + if err != nil { + return err + } + } + return nil +} + +func classicZoneGet(d *schema.ResourceData, meta interface{}, regionName, zoneName string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + getRegionZoneOptions := &vpcclassicv1.GetRegionZoneOptions{ + RegionName: ®ionName, + Name: &zoneName, + } + zone, _, err := sess.GetRegionZone(getRegionZoneOptions) + if err != nil { + return err + } + // For lack of anything better, compose our id from region name + zone name. + id := fmt.Sprintf("%s.%s", *zone.Region.Name, *zone.Name) + d.SetId(id) + d.Set(isZoneName, *zone.Name) + d.Set(isZoneRegion, *zone.Region.Name) + d.Set(isZoneStatus, *zone.Status) + return nil +} + +func zoneGet(d *schema.ResourceData, meta interface{}, regionName, zoneName string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + getRegionZoneOptions := &vpcv1.GetRegionZoneOptions{ + RegionName: ®ionName, + Name: &zoneName, + } + zone, _, err := sess.GetRegionZone(getRegionZoneOptions) + if err != nil { + return err + } + // For lack of anything better, compose our id from region name + zone name. + id := fmt.Sprintf("%s.%s", *zone.Region.Name, *zone.Name) + d.SetId(id) + d.Set(isZoneName, *zone.Name) + d.Set(isZoneRegion, *zone.Region.Name) + d.Set(isZoneStatus, *zone.Status) + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_zones.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_zones.go new file mode 100644 index 00000000000..7b59171e263 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_is_zones.go @@ -0,0 +1,127 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "time" + + "github.com/IBM/vpc-go-sdk/vpcclassicv1" + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + isZoneNames = "zones" +) + +func dataSourceIBMISZones() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMISZonesRead, + + Schema: map[string]*schema.Schema{ + + isZoneRegion: { + Type: schema.TypeString, + Required: true, + }, + + isZoneStatus: { + Type: schema.TypeString, + Optional: true, + }, + + isZoneNames: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func dataSourceIBMISZonesRead(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + regionName := d.Get(isZoneRegion).(string) + if userDetails.generation == 1 { + err := classicZonesList(d, meta, regionName) + if err != nil { + return err + } + } else { + err := zonesList(d, meta, regionName) + if err != nil { + return err + } + } + return nil +} + +func classicZonesList(d *schema.ResourceData, meta interface{}, regionName string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + listRegionZonesOptions := &vpcclassicv1.ListRegionZonesOptions{ + RegionName: ®ionName, + } + availableZones, _, err := sess.ListRegionZones(listRegionZonesOptions) + if err != nil { + return err + } + names := make([]string, 0) + status := d.Get(isZoneStatus).(string) + for _, zone := range availableZones.Zones { + if status == "" || *zone.Status == status { + names = append(names, *zone.Name) + } + } + d.SetId(dataSourceIBMISZonesId(d)) + d.Set(isZoneNames, names) + return nil +} + +func zonesList(d *schema.ResourceData, meta interface{}, regionName string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + listRegionZonesOptions := &vpcv1.ListRegionZonesOptions{ + RegionName: ®ionName, + } + availableZones, _, err := sess.ListRegionZones(listRegionZonesOptions) + if err != nil { + return err + } + names := make([]string, 0) + status := d.Get(isZoneStatus).(string) + for _, zone := range availableZones.Zones { + if status == "" || *zone.Status == status { + names = append(names, *zone.Name) + } + } + d.SetId(dataSourceIBMISZonesId(d)) + d.Set(isZoneNames, names) + return nil +} + +// dataSourceIBMISZonesId returns a reasonable ID for a zone list. +func dataSourceIBMISZonesId(d *schema.ResourceData) string { + // Our zone list is not guaranteed to be stable because the content + // of the list can vary between two calls if any of the following + // events occur between calls: + // - a zone is added to our region + // - a zone is dropped from our region + // - we are using the status filter and the status of one or more + // zones changes between calls. + // + // For simplicity we are using a timestamp for the required terraform id. + // If we find through usage that this choice is too ephemeral for our users + // then we can change this function to use a more stable id, perhaps + // composed from a hash of the list contents. But, for now, a timestamp + // is good enough. + return time.Now().UTC().String() +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_kms_key.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_kms_key.go new file mode 100644 index 00000000000..222a73c341b --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_kms_key.go @@ -0,0 +1,299 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "context" + "fmt" + "log" + "net/url" + "strings" + + kp "github.com/IBM/keyprotect-go-client" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMKMSkey() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMKMSKeyRead, + + Schema: map[string]*schema.Schema{ + "instance_id": { + Type: schema.TypeString, + Required: true, + Description: "Key protect or hpcs instance GUID", + }, + "key_name": { + Type: schema.TypeString, + Optional: true, + Description: "The name of the key to be fetched", + ExactlyOneOf: []string{"alias", "key_name"}, + }, + "alias": { + Type: schema.TypeString, + Optional: true, + Description: "The alias associated with the key", + ExactlyOneOf: []string{"alias", "key_name"}, + }, + "endpoint_type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateAllowedStringValue([]string{"public", "private"}), + Description: "public or private", + Default: "public", + }, + "keys": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "aliases": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "key_ring_id": { + Type: schema.TypeString, + Computed: true, + Description: "The key ring id of the key to be fetched", + }, + "crn": { + Type: schema.TypeString, + Computed: true, + }, + "id": { + Type: schema.TypeString, + Computed: true, + }, + "standard_key": { + Type: schema.TypeBool, + Computed: true, + }, + "policies": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "rotation": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "created_by": { + Type: schema.TypeString, + Computed: true, + }, + "creation_date": { + Type: schema.TypeString, + Computed: true, + }, + "updated_by": { + Type: schema.TypeString, + Computed: true, + }, + "last_update_date": { + Type: schema.TypeString, + Computed: true, + }, + "interval_month": { + Type: schema.TypeInt, + Computed: true, + }, + }, + }, + }, + "dual_auth_delete": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "created_by": { + Type: schema.TypeString, + Computed: true, + }, + "creation_date": { + Type: schema.TypeString, + Computed: true, + }, + "updated_by": { + Type: schema.TypeString, + Computed: true, + }, + "last_update_date": { + Type: schema.TypeString, + Computed: true, + }, + "enabled": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMKMSKeyRead(d *schema.ResourceData, meta interface{}) error { + api, err := meta.(ClientSession).keyManagementAPI() + if err != nil { + return err + } + + rContollerClient, err := meta.(ClientSession).ResourceControllerAPIV2() + if err != nil { + return err + } + + instanceID := d.Get("instance_id").(string) + endpointType := d.Get("endpoint_type").(string) + + rContollerApi := rContollerClient.ResourceServiceInstanceV2() + + instanceData, err := rContollerApi.GetInstance(instanceID) + if err != nil { + return err + } + instanceCRN := instanceData.Crn.String() + + var hpcsEndpointURL string + crnData := strings.Split(instanceCRN, ":") + + if crnData[4] == "hs-crypto" { + + hpcsEndpointApi, err := meta.(ClientSession).HpcsEndpointAPI() + if err != nil { + return err + } + resp, err := hpcsEndpointApi.Endpoint().GetAPIEndpoint(instanceID) + if err != nil { + return err + } + + if endpointType == "public" { + hpcsEndpointURL = "https://" + resp.Kms.Public + "/api/v2/keys" + } else { + hpcsEndpointURL = "https://" + resp.Kms.Private + "/api/v2/keys" + } + + u, err := url.Parse(hpcsEndpointURL) + if err != nil { + return fmt.Errorf("Error Parsing hpcs EndpointURL") + } + api.URL = u + } else if crnData[4] == "kms" { + if endpointType == "private" { + if !strings.HasPrefix(api.Config.BaseURL, "private") { + api.Config.BaseURL = "private." + api.Config.BaseURL + } + } + } else { + return fmt.Errorf("Invalid or unsupported service Instance") + } + + api.Config.InstanceID = instanceID + + if v, ok := d.GetOk("key_name"); ok { + keys, err := api.GetKeys(context.Background(), 0, 0) + if err != nil { + return fmt.Errorf( + "Get Keys failed with error: %s", err) + } + retreivedKeys := keys.Keys + if len(retreivedKeys) == 0 { + return fmt.Errorf("No keys in instance %s", instanceID) + } + var keyName string + var matchKeys []kp.Key + if v.(string) != "" { + keyName = v.(string) + for _, keyData := range retreivedKeys { + if keyData.Name == keyName { + matchKeys = append(matchKeys, keyData) + } + } + } else { + matchKeys = retreivedKeys + } + + if len(matchKeys) == 0 { + return fmt.Errorf("No keys with name %s in instance %s", keyName, instanceID) + } + + keyMap := make([]map[string]interface{}, 0, len(matchKeys)) + + for _, key := range matchKeys { + keyInstance := make(map[string]interface{}) + keyInstance["id"] = key.ID + keyInstance["name"] = key.Name + keyInstance["crn"] = key.CRN + keyInstance["standard_key"] = key.Extractable + keyInstance["aliases"] = key.Aliases + keyInstance["key_ring_id"] = key.KeyRingID + policies, err := api.GetPolicies(context.Background(), key.ID) + if err != nil { + return fmt.Errorf("Failed to read policies: %s", err) + } + if len(policies) == 0 { + log.Printf("No Policy Configurations read\n") + } else { + keyInstance["policies"] = flattenKeyPolicies(policies) + } + keyMap = append(keyMap, keyInstance) + + } + d.SetId(instanceID) + d.Set("keys", keyMap) + d.Set("instance_id", instanceID) + } else { + aliasName := d.Get("alias_name").(string) + key, err := api.GetKey(context.Background(), aliasName) + if err != nil { + return fmt.Errorf( + "Get Keys failed with error: %s", err) + } + keyMap := make([]map[string]interface{}, 0, 1) + keyInstance := make(map[string]interface{}) + keyInstance["id"] = key.ID + keyInstance["name"] = key.Name + keyInstance["crn"] = key.CRN + keyInstance["standard_key"] = key.Extractable + keyInstance["aliases"] = key.Aliases + keyInstance["key_ring_id"] = key.KeyRingID + policies, err := api.GetPolicies(context.Background(), key.ID) + if err != nil { + return fmt.Errorf("Failed to read policies: %s", err) + } + if len(policies) == 0 { + log.Printf("No Policy Configurations read\n") + } else { + keyInstance["policies"] = flattenKeyPolicies(policies) + } + keyMap = append(keyMap, keyInstance) + + d.SetId(instanceID) + d.Set("keys", keyMap) + d.Set("instance_id", instanceID) + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_kms_key_rings.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_kms_key_rings.go new file mode 100644 index 00000000000..d96a3c0d76a --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_kms_key_rings.go @@ -0,0 +1,152 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "context" + "fmt" + "net/url" + "strings" + + //kp "github.com/IBM/keyprotect-go-client" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMKMSkeyRings() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMKMSKeyRingsRead, + + Schema: map[string]*schema.Schema{ + "instance_id": { + Type: schema.TypeString, + Required: true, + Description: "Key protect or hpcs instance GUID", + }, + "endpoint_type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateAllowedStringValue([]string{"public", "private"}), + Description: "public or private", + Default: "public", + }, + "key_rings": { + Type: schema.TypeList, + Computed: true, + Description: "Key Rings for a particualer instance", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "creation_date": { + Type: schema.TypeString, + Computed: true, + }, + "created_by": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMKMSKeyRingsRead(d *schema.ResourceData, meta interface{}) error { + api, err := meta.(ClientSession).keyManagementAPI() + if err != nil { + return err + } + + rContollerClient, err := meta.(ClientSession).ResourceControllerAPIV2() + if err != nil { + return err + } + + instanceID := d.Get("instance_id").(string) + endpointType := d.Get("endpoint_type").(string) + + rContollerApi := rContollerClient.ResourceServiceInstanceV2() + + instanceData, err := rContollerApi.GetInstance(instanceID) + if err != nil { + return err + } + instanceCRN := instanceData.Crn.String() + + var hpcsEndpointURL string + crnData := strings.Split(instanceCRN, ":") + + if crnData[4] == "hs-crypto" { + + hpcsEndpointApi, err := meta.(ClientSession).HpcsEndpointAPI() + if err != nil { + return err + } + resp, err := hpcsEndpointApi.Endpoint().GetAPIEndpoint(instanceID) + if err != nil { + return err + } + + if endpointType == "public" { + hpcsEndpointURL = "https://" + resp.Kms.Public + "/api/v2/keys" + } else { + hpcsEndpointURL = "https://" + resp.Kms.Private + "/api/v2/keys" + } + + u, err := url.Parse(hpcsEndpointURL) + if err != nil { + return fmt.Errorf("Error Parsing hpcs EndpointURL") + } + api.URL = u + } else if crnData[4] == "kms" { + if endpointType == "private" { + if !strings.HasPrefix(api.Config.BaseURL, "private") { + api.Config.BaseURL = "private." + api.Config.BaseURL + } + } + } else { + return fmt.Errorf("Invalid or unsupported service Instance") + } + + api.Config.InstanceID = instanceID + keys, err := api.GetKeyRings(context.Background()) + if err != nil { + return fmt.Errorf( + "Get Key Rings failed with error: %s", err) + } + retreivedKeyRings := keys.KeyRings + if keys == nil || len(retreivedKeyRings) == 0 { + return fmt.Errorf("No key Rings in instance %s", instanceID) + } + var keyRingName string + + if len(retreivedKeyRings) == 0 { + return fmt.Errorf("No key Ring with name %s in instance %s", keyRingName, instanceID) + } + + keyRingMap := make([]map[string]interface{}, 0, len(retreivedKeyRings)) + + for _, keyRing := range retreivedKeyRings { + keyInstance := make(map[string]interface{}) + + keyInstance["id"] = keyRing.ID + keyInstance["created_by"] = keyRing.CreatedBy + if keyRing.CreationDate != nil { + keyInstance["creation_date"] = keyRing.CreationDate.String() + } + keyRingMap = append(keyRingMap, keyInstance) + + } + + d.SetId(instanceID) + d.Set("key_rings", keyRingMap) + d.Set("instance_id", instanceID) + d.Set("endpoint_type", endpointType) + + return nil + +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_kms_keys.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_kms_keys.go new file mode 100644 index 00000000000..a0e51609e1a --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_kms_keys.go @@ -0,0 +1,281 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "context" + "fmt" + "net/url" + "strings" + + kp "github.com/IBM/keyprotect-go-client" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMKMSkeys() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMKMSKeysRead, + + Schema: map[string]*schema.Schema{ + "instance_id": { + Type: schema.TypeString, + Required: true, + Description: "Key protect or hpcs instance GUID", + }, + "key_name": { + Type: schema.TypeString, + Optional: true, + Description: "The name of the key to be fetched", + ConflictsWith: []string{"alias"}, + }, + "alias": { + Type: schema.TypeString, + Optional: true, + Description: "The name of the key to be fetched", + ConflictsWith: []string{"key_name"}, + }, + "endpoint_type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateAllowedStringValue([]string{"public", "private"}), + Description: "public or private", + ForceNew: true, + Default: "public", + }, + "keys": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "aliases": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "crn": { + Type: schema.TypeString, + Computed: true, + }, + "id": { + Type: schema.TypeString, + Computed: true, + }, + "key_ring_id": { + Type: schema.TypeString, + Computed: true, + Description: "The key ring id of the key to be fetched", + }, + "standard_key": { + Type: schema.TypeBool, + Computed: true, + }, + "policies": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "rotation": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "created_by": { + Type: schema.TypeString, + Computed: true, + }, + "creation_date": { + Type: schema.TypeString, + Computed: true, + }, + "updated_by": { + Type: schema.TypeString, + Computed: true, + }, + "last_update_date": { + Type: schema.TypeString, + Computed: true, + }, + "interval_month": { + Type: schema.TypeInt, + Computed: true, + }, + }, + }, + }, + "dual_auth_delete": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "created_by": { + Type: schema.TypeString, + Computed: true, + }, + "creation_date": { + Type: schema.TypeString, + Computed: true, + }, + "updated_by": { + Type: schema.TypeString, + Computed: true, + }, + "last_update_date": { + Type: schema.TypeString, + Computed: true, + }, + "enabled": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + +} + +func dataSourceIBMKMSKeysRead(d *schema.ResourceData, meta interface{}) error { + api, err := meta.(ClientSession).keyManagementAPI() + if err != nil { + return err + } + + rContollerClient, err := meta.(ClientSession).ResourceControllerAPIV2() + if err != nil { + return err + } + + instanceID := d.Get("instance_id").(string) + endpointType := d.Get("endpoint_type").(string) + + rContollerApi := rContollerClient.ResourceServiceInstanceV2() + + instanceData, err := rContollerApi.GetInstance(instanceID) + if err != nil { + return err + } + instanceCRN := instanceData.Crn.String() + + var hpcsEndpointURL string + crnData := strings.Split(instanceCRN, ":") + + if crnData[4] == "hs-crypto" { + hpcsEndpointApi, err := meta.(ClientSession).HpcsEndpointAPI() + if err != nil { + return err + } + resp, err := hpcsEndpointApi.Endpoint().GetAPIEndpoint(instanceID) + if err != nil { + return err + } + + if endpointType == "public" { + hpcsEndpointURL = "https://" + resp.Kms.Public + "/api/v2/keys" + } else { + hpcsEndpointURL = "https://" + resp.Kms.Private + "/api/v2/keys" + } + + u, err := url.Parse(hpcsEndpointURL) + if err != nil { + return fmt.Errorf("Error Parsing hpcs EndpointURL") + } + api.URL = u + } else if crnData[4] == "kms" { + if endpointType == "private" { + if !strings.HasPrefix(api.Config.BaseURL, "private") { + api.Config.BaseURL = "private." + api.Config.BaseURL + } + } + } else { + return fmt.Errorf("Invalid or unsupported service Instance") + } + + api.Config.InstanceID = instanceID + if v, ok := d.GetOk("alias"); ok { + aliasName := v.(string) + key, err := api.GetKey(context.Background(), aliasName) + if err != nil { + return fmt.Errorf( + "Get Keys failed with error: %s", err) + } else { + keyMap := make([]map[string]interface{}, 0, 1) + keyInstance := make(map[string]interface{}) + keyInstance["id"] = key.ID + keyInstance["name"] = key.Name + keyInstance["crn"] = key.CRN + keyInstance["standard_key"] = key.Extractable + keyInstance["aliases"] = key.Aliases + keyInstance["key_ring_id"] = key.KeyRingID + keyMap = append(keyMap, keyInstance) + d.Set("keys", keyMap) + + } + } else { + keys, err := api.GetKeys(context.Background(), 100, 0) + if err != nil { + return fmt.Errorf( + "Get Keys failed with error: %s", err) + } + retreivedKeys := keys.Keys + if len(retreivedKeys) == 0 { + return fmt.Errorf("No keys in instance %s", instanceID) + } + var keyName string + var matchKeys []kp.Key + if v, ok := d.GetOk("key_name"); ok { + keyName = v.(string) + for _, keyData := range retreivedKeys { + if keyData.Name == keyName { + matchKeys = append(matchKeys, keyData) + } + } + } else { + matchKeys = retreivedKeys + } + + if len(matchKeys) == 0 { + return fmt.Errorf("No keys with name %s in instance %s", keyName, instanceID) + } + + keyMap := make([]map[string]interface{}, 0, len(matchKeys)) + + for _, key := range matchKeys { + keyInstance := make(map[string]interface{}) + keyInstance["id"] = key.ID + keyInstance["name"] = key.Name + keyInstance["crn"] = key.CRN + keyInstance["standard_key"] = key.Extractable + keyInstance["aliases"] = key.Aliases + keyInstance["key_ring_id"] = key.KeyRingID + keyMap = append(keyMap, keyInstance) + + } + d.Set("keys", keyMap) + } + + d.SetId(instanceID) + d.Set("instance_id", instanceID) + + return nil + +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_kp_key.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_kp_key.go new file mode 100644 index 00000000000..3481bf54e2d --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_kp_key.go @@ -0,0 +1,108 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "context" + "fmt" + + kp "github.com/IBM/keyprotect-go-client" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMkey() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMKeyRead, + + Schema: map[string]*schema.Schema{ + "key_protect_id": { + Type: schema.TypeString, + Required: true, + }, + "key_name": { + Type: schema.TypeString, + Optional: true, + }, + "keys": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + }, + "crn": { + Type: schema.TypeString, + Computed: true, + }, + "id": { + Type: schema.TypeString, + Computed: true, + }, + "standard_key": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + }, + } + +} + +func dataSourceIBMKeyRead(d *schema.ResourceData, meta interface{}) error { + api, err := meta.(ClientSession).keyProtectAPI() + if err != nil { + return err + } + + instanceID := d.Get("key_protect_id").(string) + api.Config.InstanceID = instanceID + keys, err := api.GetKeys(context.Background(), 100, 0) + if err != nil { + return fmt.Errorf( + "Get Keys failed with error: %s", err) + } + retreivedKeys := keys.Keys + if len(retreivedKeys) == 0 { + return fmt.Errorf("No keys in instance %s", instanceID) + } + var keyName string + var matchKeys []kp.Key + if v, ok := d.GetOk("key_name"); ok { + keyName = v.(string) + for _, keyData := range retreivedKeys { + if keyData.Name == keyName { + matchKeys = append(matchKeys, keyData) + } + } + } else { + matchKeys = retreivedKeys + } + + if len(matchKeys) == 0 { + return fmt.Errorf("No keys with name %s in instance %s", keyName, instanceID) + } + + keyMap := make([]map[string]interface{}, 0, len(matchKeys)) + + for _, key := range matchKeys { + keyInstance := make(map[string]interface{}) + keyInstance["id"] = key.ID + keyInstance["name"] = key.Name + keyInstance["crn"] = key.CRN + keyInstance["standard_key"] = key.Extractable + keyMap = append(keyMap, keyInstance) + + } + + d.SetId(instanceID) + d.Set("keys", keyMap) + d.Set("key_protect_id", instanceID) + + return nil + +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_lbaas.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_lbaas.go new file mode 100644 index 00000000000..1e1774153fc --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_lbaas.go @@ -0,0 +1,239 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/softlayer/softlayer-go/filter" + "github.com/softlayer/softlayer-go/services" +) + +func dataSourceIBMLbaas() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMLbaasRead, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "datacenter": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "vip": { + Type: schema.TypeString, + Computed: true, + }, + "server_instances_up": { + Type: schema.TypeInt, + Computed: true, + }, + "server_instances_down": { + Type: schema.TypeInt, + Computed: true, + }, + "active_connections": { + Type: schema.TypeInt, + Computed: true, + }, + "use_system_public_ip_pool": { + Type: schema.TypeBool, + Computed: true, + }, + "ssl_ciphers": { + Type: schema.TypeSet, + Computed: true, + Set: schema.HashString, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "protocols": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "frontend_protocol": { + Type: schema.TypeString, + Computed: true, + }, + "frontend_port": { + Type: schema.TypeInt, + Computed: true, + }, + "backend_protocol": { + Type: schema.TypeString, + Computed: true, + }, + "backend_port": { + Type: schema.TypeInt, + Computed: true, + }, + "load_balancing_method": { + Type: schema.TypeString, + Computed: true, + }, + "session_stickiness": { + Type: schema.TypeString, + Computed: true, + }, + "max_conn": { + Type: schema.TypeInt, + Computed: true, + }, + "tls_certificate_id": { + Type: schema.TypeInt, + Computed: true, + }, + "protocol_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "health_monitors": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "protocol": { + Type: schema.TypeString, + Computed: true, + }, + "port": { + Type: schema.TypeInt, + Computed: true, + }, + "interval": { + Type: schema.TypeInt, + Computed: true, + }, + "max_retries": { + Type: schema.TypeInt, + Computed: true, + }, + "timeout": { + Type: schema.TypeInt, + Computed: true, + }, + "url_path": { + Type: schema.TypeString, + Computed: true, + }, + "monitor_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "server_instances": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "private_ip_address": { + Type: schema.TypeString, + Computed: true, + }, + "weight": { + Type: schema.TypeInt, + Computed: true, + }, + "member_id": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMLbaasRead(d *schema.ResourceData, meta interface{}) error { + name := d.Get("name").(string) + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkLBaaSLoadBalancerService(sess) + lbs, err := service.Mask("datacenter,members,listeners.defaultPool,listeners.defaultPool.sessionAffinity,listeners.defaultPool.healthMonitor,healthMonitors,sslCiphers[name],useSystemPublicIpPool,isPublic,name,description,operatingStatus,address,uuid").Filter(filter.Build( + filter.Path("name").Eq(name))).GetAllObjects() + if err != nil { + return err + } + if len(lbs) != 1 { + return fmt.Errorf("No load balancer with name: %s", name) + } + result := lbs[0] + + //Get statistics + lbStat, err := service.GetLoadBalancerStatistics(result.Uuid) + if err != nil { + return fmt.Errorf("Error retrieving load balancer statistics: %s", err) + } + //Get members health + lbMembersHealth, err := service.GetLoadBalancerMemberHealth(result.Uuid) + if err != nil { + return fmt.Errorf("Error retrieving load balancer members: %s", err) + } + members := flattenServerInstances(result.Members) + + for _, lbHealth := range lbMembersHealth { + for _, lbMemHealth := range lbHealth.MembersHealth { + for _, member := range members { + if member["member_id"] == *lbMemHealth.Uuid { + member["status"] = *lbMemHealth.Status + } + } + } + } + + var lbType string + if *result.IsPublic == 1 { + lbType = "PUBLIC" + } else { + lbType = "PRIVATE" + } + + d.SetId(*result.Uuid) + + d.Set("name", result.Name) + d.Set("description", result.Description) + d.Set("server_instances_up", lbStat.NumberOfMembersUp) + d.Set("server_instances_down", lbStat.NumberOfMembersDown) + d.Set("active_connections", lbStat.TotalConnections) + if result.Datacenter != nil { + d.Set("datacenter", result.Datacenter.Name) + } + d.Set("type", lbType) + d.Set("status", result.OperatingStatus) + d.Set("vip", result.Address) + d.Set("protocols", flattenProtocols(result.Listeners)) + d.Set("health_monitors", flattenHealthMonitors(result.Listeners)) + d.Set("server_instances", members) + d.Set("ssl_ciphers", flattenSSLCiphers(result.SslCiphers)) + if *result.UseSystemPublicIpPool == 1 { + d.Set("use_system_public_ip_pool", true) + } else { + d.Set("use_system_public_ip_pool", false) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_network_vlan.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_network_vlan.go new file mode 100644 index 00000000000..e177dd5a48d --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_network_vlan.go @@ -0,0 +1,210 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "errors" + "fmt" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/filter" + "github.com/softlayer/softlayer-go/services" +) + +func dataSourceIBMNetworkVlan() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMNetworkVlanRead, + + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeInt, + Computed: true, + }, + + "name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "number": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + + "router_hostname": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "virtual_guests": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeInt, + Computed: true, + }, + "domain": { + Type: schema.TypeString, + Computed: true, + }, + "hostname": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "subnets": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "subnet": { + Type: schema.TypeString, + Computed: true, + }, + "subnet_type": { + Type: schema.TypeString, + Computed: true, + }, + "subnet_size": { + Type: schema.TypeInt, + Computed: true, + }, + "id": { + Type: schema.TypeInt, + Computed: true, + }, + "cidr": { + Type: schema.TypeInt, + Computed: true, + }, + "gateway": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMNetworkVlanRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetAccountService(sess) + + name := d.Get("name").(string) + number := d.Get("number").(int) + routerHostname := d.Get("router_hostname").(string) + var vlan *datatypes.Network_Vlan + var err error + + if number != 0 && routerHostname != "" { + // Got vlan number and router, get vlan, and compute name + vlan, err = getVlan(number, routerHostname, name, meta) + if err != nil { + return err + } + + d.SetId(fmt.Sprintf("%d", *vlan.Id)) + if vlan.Name != nil { + d.Set("name", *vlan.Name) + } + } else if name != "" { + // Got name, get vlan, and compute router hostname and vlan number + filters := filter.New(filter.Path("networkVlans.name").Eq(name)) + if number != 0 { + filters = append(filters, filter.Path("networkVlans.vlanNumber").Eq(number)) + } + networkVlans, err := service. + Mask("id,vlanNumber,name,primaryRouter[hostname],subnets[networkIdentifier,cidr,subnetType,id,gateway],virtualGuests[id,domain,hostname]"). + Filter( + filter.Build( + filters..., + ), + ). + GetNetworkVlans() + if err != nil { + return fmt.Errorf("Error obtaining VLAN id: %s", err) + } else if len(networkVlans) == 0 { + return fmt.Errorf("No VLAN was found with the name '%s'", name) + } + + vlan = &networkVlans[0] + d.SetId(fmt.Sprintf("%d", *vlan.Id)) + d.Set("number", *vlan.VlanNumber) + + if vlan.PrimaryRouter != nil && vlan.PrimaryRouter.Hostname != nil { + d.Set("router_hostname", *vlan.PrimaryRouter.Hostname) + } + } else { + return errors.New("missing required properties. Need a VLAN name, or the VLAN's number and router hostname") + } + + // Get subnets in cidr format for display + sbns := make([]map[string]interface{}, len(vlan.Subnets)) + for i, elem := range vlan.Subnets { + subnet := make(map[string]interface{}) + subnet["subnet"] = fmt.Sprintf("%s/%s", *elem.NetworkIdentifier, strconv.Itoa(*elem.Cidr)) + subnet["subnet_type"] = *elem.SubnetType + subnet["subnet_size"] = 1 << (uint)(32-*elem.Cidr) + subnet["cidr"] = *elem.Cidr + subnet["id"] = *elem.Id + if elem.Gateway != nil { + subnet["gateway"] = *elem.Gateway + } + sbns[i] = subnet + + } + d.Set("subnets", sbns) + + vgs := make([]map[string]interface{}, len(vlan.VirtualGuests)) + for i, vg := range vlan.VirtualGuests { + v := make(map[string]interface{}) + v["id"] = *vg.Id + v["domain"] = *vg.Domain + v["hostname"] = *vg.Hostname + vgs[i] = v + } + d.Set("virtual_guests", vgs) + return nil +} + +func getVlan(vlanNumber int, primaryRouterHostname string, name string, meta interface{}) (*datatypes.Network_Vlan, error) { + service := services.GetAccountService(meta.(ClientSession).SoftLayerSession()) + + filters := filter.New(filter.Path("networkVlans.primaryRouter.hostname").Eq(primaryRouterHostname), + filter.Path("networkVlans.vlanNumber").Eq(vlanNumber)) + if name != "" { + filters = append(filters, filter.Path("networkVlans.name").Eq(name)) + } + networkVlans, err := service. + Mask("id,name,subnets[networkIdentifier,cidr,subnetType,id,gateway],virtualGuests[id,domain,hostname]"). + Filter( + filter.Build( + filters..., + ), + ). + GetNetworkVlans() + + if err != nil { + return &datatypes.Network_Vlan{}, fmt.Errorf("Error looking up Vlan: %s", err) + } + + if len(networkVlans) < 1 { + return &datatypes.Network_Vlan{}, fmt.Errorf( + "Unable to locate a vlan matching the provided router hostname and vlan number: %s/%d", + primaryRouterHostname, + vlanNumber) + } + + return &networkVlans[0], nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_org.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_org.go new file mode 100644 index 00000000000..27d08d57a16 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_org.go @@ -0,0 +1,55 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMOrg() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMOrgRead, + + Schema: map[string]*schema.Schema{ + "org": { + Description: "Org name, for example myorg@domain", + Type: schema.TypeString, + Optional: true, + Deprecated: "use name instead", + ExactlyOneOf: []string{"org", "name"}, + }, + "name": { + Description: "Org name, for example myorg@domain", + Type: schema.TypeString, + Optional: true, + ExactlyOneOf: []string{"org", "name"}, + }, + }, + } +} + +func dataSourceIBMOrgRead(d *schema.ResourceData, meta interface{}) error { + cfAPI, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + orgAPI := cfAPI.Organizations() + var org string + if v, ok := d.GetOk("name"); ok { + org = v.(string) + } + if v, ok := d.GetOk("org"); ok { + org = v.(string) + } + + orgFields, err := orgAPI.FindByName(org, BluemixRegion) + if err != nil { + return fmt.Errorf("Error retrieving organisation: %s", err) + } + d.SetId(orgFields.GUID) + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_org_quota.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_org_quota.go new file mode 100644 index 00000000000..9f2c5adbd6e --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_org_quota.go @@ -0,0 +1,105 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMOrgQuota() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMOrgQuotaRead, + + Schema: map[string]*schema.Schema{ + "name": { + Description: "Org quota name, for example qIBM", + Type: schema.TypeString, + Required: true, + }, + "non_basic_services_allowed": { + Description: "Define non basic services are allowed for organization.", + Type: schema.TypeBool, + Computed: true, + }, + "total_services": { + Description: "Defines the total services for organization.", + Type: schema.TypeInt, + Computed: true, + }, + "total_routes": { + Description: "Defines the total route for organization.", + Type: schema.TypeInt, + Computed: true, + }, + "memory_limit": { + Description: "Defines the total memory limit for organization.", + Type: schema.TypeInt, + Computed: true, + }, + "instance_memory_limit": { + Description: "Defines the total instance memory limit for organization.", + Type: schema.TypeInt, + Computed: true, + }, + "trial_db_allowed": { + Description: "Defines trial db are allowed for organization.", + Type: schema.TypeBool, + Computed: true, + }, + "app_instance_limit": { + Description: "Defines the total app instance limit for organization.", + Type: schema.TypeInt, + Computed: true, + }, + "total_private_domains": { + Description: "Defines the total private domain limit for organization.v", + Type: schema.TypeInt, + Computed: true, + }, + "app_tasks_limit": { + Description: "Defines the total app task limit for organization.", + Type: schema.TypeInt, + Computed: true, + }, + "total_service_keys": { + Description: "Defines the total service keys for organization.", + Type: schema.TypeInt, + Computed: true, + }, + "total_reserved_route_ports": { + Description: "Defines the number of reserved route ports for organization. ", + Type: schema.TypeInt, + Computed: true, + }, + }, + } +} + +func dataSourceIBMOrgQuotaRead(d *schema.ResourceData, meta interface{}) error { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + orgQuotaAPI := cfClient.OrgQuotas() + orgQuotaName := d.Get("name").(string) + orgQuotaFields, err := orgQuotaAPI.FindByName(orgQuotaName) + if err != nil { + return fmt.Errorf("Error retrieving org quota: %s", err) + } + d.SetId(orgQuotaFields.GUID) + d.Set("app_instance_limit", orgQuotaFields.AppInstanceLimit) + d.Set("app_tasks_limit", orgQuotaFields.AppTasksLimit) + d.Set("instance_memory_limit", orgQuotaFields.InstanceMemoryLimitInMB) + d.Set("memory_limit", orgQuotaFields.MemoryLimitInMB) + d.Set("non_basic_services_allowed", orgQuotaFields.NonBasicServicesAllowed) + d.Set("total_private_domains", orgQuotaFields.PrivateDomainsLimit) + d.Set("total_reserved_route_ports", orgQuotaFields.RoutePortsLimit) + d.Set("total_routes", orgQuotaFields.RoutesLimit) + d.Set("total_service_keys", orgQuotaFields.ServiceKeysLimit) + d.Set("total_services", orgQuotaFields.ServicesLimit) + d.Set("trial_db_allowed", orgQuotaFields.TrialDBAllowed) + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_pi_catalog_images.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_pi_catalog_images.go new file mode 100644 index 00000000000..a73f28334f7 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_pi_catalog_images.go @@ -0,0 +1,180 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + + "github.com/IBM-Cloud/power-go-client/clients/instance" + "github.com/IBM-Cloud/power-go-client/helpers" +) + +/* +Datasource to get the list of images that are available when a power instance is created + +*/ +func dataSourceIBMPICatalogImages() *schema.Resource { + + return &schema.Resource{ + Read: dataSourceIBMPICatalogImagesRead, + Schema: map[string]*schema.Schema{ + + helpers.PICloudInstanceId: { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.NoZeroValues, + }, + "sap": { + Type: schema.TypeBool, + Optional: true, + }, + "images": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "image_id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "state": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "storage_type": { + Type: schema.TypeString, + Computed: true, + }, + "creation_date": { + Type: schema.TypeString, + Computed: true, + }, + "last_update_date": { + Type: schema.TypeString, + Computed: true, + }, + "image_type": { + Type: schema.TypeString, + Computed: true, + }, + "container_format": { + Type: schema.TypeString, + Computed: true, + }, + "disk_format": { + Type: schema.TypeString, + Computed: true, + }, + "operating_system": { + Type: schema.TypeString, + Computed: true, + }, + "hypervisor_type": { + Type: schema.TypeString, + Computed: true, + }, + "architecture": { + Type: schema.TypeString, + Computed: true, + }, + "endianness": { + Type: schema.TypeString, + Computed: true, + }, + "href": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMPICatalogImagesRead(d *schema.ResourceData, meta interface{}) error { + + sess, err := meta.(ClientSession).IBMPISession() + + if err != nil { + return err + } + sap := false + powerinstanceid := d.Get(helpers.PICloudInstanceId).(string) + if v, ok := d.GetOk("sap"); ok { + sap = v.(bool) + } + + imageC := instance.NewIBMPIImageClient(sess, powerinstanceid) + result, err := imageC.GetSAPImages(powerinstanceid, sap) + if err != nil { + return err + } + imageData := result.Images + images := make([]map[string]interface{}, 0) + for _, i := range imageData { + image := make(map[string]interface{}) + image["image_id"] = *i.ImageID + image["name"] = *i.Name + if i.State != nil { + image["state"] = *i.State + } + if i.Description != nil { + image["description"] = *i.Description + } + if i.StorageType != nil { + image["storage_type"] = *i.StorageType + } + if i.CreationDate != nil { + image["creation_date"] = i.CreationDate.String() + } + if i.LastUpdateDate != nil { + image["last_update_date"] = i.LastUpdateDate.String() + } + if i.Href != nil { + image["href"] = *i.Href + } + if i.Specifications != nil { + s := i.Specifications + if &s.ImageType != nil { + image["image_type"] = s.ImageType + } + if &s.ContainerFormat != nil { + image["container_format"] = s.ContainerFormat + } + if &s.DiskFormat != nil { + image["disk_format"] = s.DiskFormat + } + if &s.OperatingSystem != nil { + image["operating_system"] = s.OperatingSystem + } + if &s.HypervisorType != nil { + image["hypervisor_type"] = s.HypervisorType + } + if &s.Architecture != nil { + image["architecture"] = s.Architecture + } + if &s.Endianness != nil { + image["endianness"] = s.Endianness + } + } + images = append(images, image) + } + d.SetId(time.Now().UTC().String()) + d.Set("images", images) + d.Set(helpers.PICloudInstanceId, powerinstanceid) + return nil + +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_pi_cloud_instance.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_pi_cloud_instance.go new file mode 100644 index 00000000000..4000d194a44 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_pi_cloud_instance.go @@ -0,0 +1,154 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + + "github.com/IBM-Cloud/power-go-client/clients/instance" + "github.com/IBM-Cloud/power-go-client/helpers" + "github.com/IBM-Cloud/power-go-client/power/models" +) + +func dataSourceIBMPICloudInstance() *schema.Resource { + + return &schema.Resource{ + Read: dataSourceIBMPICloudInstanceRead, + Schema: map[string]*schema.Schema{ + helpers.PICloudInstanceId: { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.NoZeroValues, + }, + + // Start of Computed Attributes + + "enabled": { + Type: schema.TypeBool, + Computed: true, + }, + "creation_date": { + Type: schema.TypeString, + Computed: true, + }, + "tenant_id": { + Type: schema.TypeString, + Computed: true, + }, + "region": { + Type: schema.TypeString, + Computed: true, + }, + + "capabilities": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "total_processors_consumed": { + Type: schema.TypeFloat, + Computed: true, + }, + "total_instances": { + Type: schema.TypeFloat, + Computed: true, + }, + "total_memory_consumed": { + Type: schema.TypeFloat, + Computed: true, + }, + "total_ssd_storage_consumed": { + Type: schema.TypeFloat, + Computed: true, + }, + "total_standard_storage_consumed": { + Type: schema.TypeFloat, + Computed: true, + }, + "pvm_instances": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "href": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "systype": { + Type: schema.TypeString, + Computed: true, + }, + "creation_date": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMPICloudInstanceRead(d *schema.ResourceData, meta interface{}) error { + + sess, err := meta.(ClientSession).IBMPISession() + + if err != nil { + return err + } + + powerinstanceid := d.Get(helpers.PICloudInstanceId).(string) + cloud_instance := instance.NewIBMPICloudInstanceClient(sess, powerinstanceid) + cloud_instance_data, err := cloud_instance.Get(powerinstanceid) + + if err != nil { + return err + } + + d.SetId(*cloud_instance_data.CloudInstanceID) + d.Set("tenant_id", (cloud_instance_data.TenantID)) + d.Set("enabled", cloud_instance_data.Enabled) + d.Set("region", cloud_instance_data.Region) + d.Set("capabilities", cloud_instance_data.Capabilities) + d.Set("pvm_instances", flattenpvminstances(cloud_instance_data.PvmInstances)) + d.Set("total_ssd_storage_consumed", cloud_instance_data.Usage.StorageSSD) + d.Set("total_instances", cloud_instance_data.Usage.Instances) + d.Set("total_standard_storage_consumed", cloud_instance_data.Usage.StorageStandard) + d.Set("total_processors_consumed", cloud_instance_data.Usage.Processors) + d.Set("total_memory_consumed", cloud_instance_data.Usage.Memory) + + return nil + +} + +func flattenpvminstances(list []*models.PVMInstanceReference) []map[string]interface{} { + pvms := make([]map[string]interface{}, 0) + for _, lpars := range list { + + l := map[string]interface{}{ + "id": *lpars.PvmInstanceID, + "name": *lpars.ServerName, + "href": *lpars.Href, + "status": *lpars.Status, + "systype": lpars.SysType, + "creation_date": lpars.CreationDate.String(), + } + pvms = append(pvms, l) + + } + return pvms +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_pi_image.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_pi_image.go new file mode 100644 index 00000000000..56949c5d838 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_pi_image.go @@ -0,0 +1,88 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "github.com/IBM-Cloud/power-go-client/helpers" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + //"fmt" + "github.com/IBM-Cloud/power-go-client/clients/instance" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" +) + +func dataSourceIBMPIImage() *schema.Resource { + + return &schema.Resource{ + Read: dataSourceIBMPIImagesRead, + Schema: map[string]*schema.Schema{ + + helpers.PIImageName: { + Type: schema.TypeString, + Required: true, + Description: "Imagename Name to be used for pvminstances", + ValidateFunc: validation.NoZeroValues, + }, + helpers.PICloudInstanceId: { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.NoZeroValues, + }, + + "state": { + Type: schema.TypeString, + Computed: true, + }, + "size": { + Type: schema.TypeInt, + Computed: true, + }, + "architecture": { + Type: schema.TypeString, + Computed: true, + }, + "operatingsystem": { + Type: schema.TypeString, + Computed: true, + }, + "hypervisor": { + Type: schema.TypeString, + Computed: true, + }, + "storage_type": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceIBMPIImagesRead(d *schema.ResourceData, meta interface{}) error { + + sess, err := meta.(ClientSession).IBMPISession() + + if err != nil { + return err + } + + powerinstanceid := d.Get(helpers.PICloudInstanceId).(string) + + imageC := instance.NewIBMPIImageClient(sess, powerinstanceid) + imagedata, err := imageC.Get(d.Get(helpers.PIImageName).(string), powerinstanceid) + + if err != nil { + return err + } + + d.SetId(*imagedata.ImageID) + d.Set("state", imagedata.State) + d.Set("size", imagedata.Size) + d.Set("architecture", imagedata.Specifications.Architecture) + d.Set("hypervisor", imagedata.Specifications.HypervisorType) + d.Set("operatingsystem", imagedata.Specifications.OperatingSystem) + d.Set("storage_type", imagedata.StorageType) + + return nil + +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_pi_images.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_pi_images.go new file mode 100644 index 00000000000..e7bf2fdbc83 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_pi_images.go @@ -0,0 +1,116 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "github.com/IBM-Cloud/power-go-client/helpers" + "github.com/IBM-Cloud/power-go-client/power/models" + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + //"fmt" + "github.com/IBM-Cloud/power-go-client/clients/instance" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" +) + +/* +Datasource to get the list of images that are available when a power instance is created + +*/ +func dataSourceIBMPIImages() *schema.Resource { + + return &schema.Resource{ + Read: dataSourceIBMPIImagesAllRead, + Schema: map[string]*schema.Schema{ + + helpers.PIImageName: { + Type: schema.TypeString, + Optional: true, + Description: "Imagename Name to be used for pvminstances", + ValidateFunc: validation.NoZeroValues, + Deprecated: "This field is deprectaed.", + }, + helpers.PICloudInstanceId: { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.NoZeroValues, + }, + + // Computed Attributes + + "image_info": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "href": { + Type: schema.TypeString, + Computed: true, + }, + "state": { + Type: schema.TypeString, + Computed: true, + }, + "storage_type": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMPIImagesAllRead(d *schema.ResourceData, meta interface{}) error { + + sess, err := meta.(ClientSession).IBMPISession() + + if err != nil { + return err + } + + powerinstanceid := d.Get(helpers.PICloudInstanceId).(string) + + imageC := instance.NewIBMPIImageClient(sess, powerinstanceid) + + imagedata, err := imageC.GetAll(powerinstanceid) + + if err != nil { + return err + } + + var clientgenU, _ = uuid.GenerateUUID() + d.SetId(clientgenU) + _ = d.Set("image_info", flattenStockImages(imagedata.Images)) + + return nil + +} + +func flattenStockImages(list []*models.ImageReference) []map[string]interface{} { + result := make([]map[string]interface{}, 0, len(list)) + for _, i := range list { + + l := map[string]interface{}{ + "id": *i.ImageID, + "state": *i.State, + "href": *i.Href, + "name": *i.Name, + "storage_type": *i.StorageType, + } + + result = append(result, l) + + } + return result +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_pi_instance.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_pi_instance.go new file mode 100644 index 00000000000..b8985dd1605 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_pi_instance.go @@ -0,0 +1,195 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + + "github.com/IBM-Cloud/power-go-client/clients/instance" + "github.com/IBM-Cloud/power-go-client/helpers" +) + +func dataSourceIBMPIInstance() *schema.Resource { + + return &schema.Resource{ + Read: dataSourceIBMPIInstancesRead, + Schema: map[string]*schema.Schema{ + + helpers.PIInstanceName: { + Type: schema.TypeString, + Required: true, + Description: "Server Name to be used for pvminstances", + ValidateFunc: validation.NoZeroValues, + }, + + helpers.PICloudInstanceId: { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.NoZeroValues, + }, + + // Computed Attributes + "volumes": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "state": { + Type: schema.TypeString, + Computed: true, + }, + "memory": { + Type: schema.TypeFloat, + Computed: true, + }, + "processors": { + Type: schema.TypeInt, + Computed: true, + }, + "health_status": { + Type: schema.TypeString, + Computed: true, + }, + "addresses": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ip": { + Type: schema.TypeString, + Computed: true, + }, + "macaddress": { + Type: schema.TypeString, + Computed: true, + }, + "network_id": { + Type: schema.TypeString, + Computed: true, + }, + "network_name": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "external_ip": { + Type: schema.TypeString, + Computed: true, + }, + /*"version": { + Type: schema.TypeFloat, + Computed: true, + },*/ + }, + }, + }, + "proctype": { + Type: schema.TypeString, + Computed: true, + }, + + "status": { + Type: schema.TypeString, + Computed: true, + }, + + "minproc": { + Type: schema.TypeInt, + Computed: true, + }, + "minmem": { + Type: schema.TypeInt, + Computed: true, + }, + "maxproc": { + Type: schema.TypeInt, + Computed: true, + }, + "maxmem": { + Type: schema.TypeInt, + Computed: true, + }, + "pin_policy": { + Type: schema.TypeString, + Computed: true, + }, + "virtual_cores_assigned": { + Type: schema.TypeInt, + Computed: true, + }, + "max_virtual_cores": { + Type: schema.TypeInt, + Computed: true, + }, + "min_virtual_cores": { + Type: schema.TypeInt, + Computed: true, + }, + }, + } +} + +func dataSourceIBMPIInstancesRead(d *schema.ResourceData, meta interface{}) error { + + sess, err := meta.(ClientSession).IBMPISession() + + if err != nil { + return err + } + + powerinstanceid := d.Get(helpers.PICloudInstanceId).(string) + + powerC := instance.NewIBMPIInstanceClient(sess, powerinstanceid) + powervmdata, err := powerC.Get(d.Get(helpers.PIInstanceName).(string), powerinstanceid, getTimeOut) + + if err != nil { + return err + } + + pvminstanceid := *powervmdata.PvmInstanceID + d.SetId(pvminstanceid) + d.Set("memory", powervmdata.Memory) + d.Set("processors", powervmdata.Processors) + d.Set("status", powervmdata.Status) + d.Set("proctype", powervmdata.ProcType) + d.Set("volumes", powervmdata.VolumeIds) + d.Set("minproc", powervmdata.Minproc) + d.Set("minmem", powervmdata.Minmem) + d.Set("maxproc", powervmdata.Maxproc) + d.Set("maxmem", powervmdata.Maxmem) + d.Set("pin_policy", powervmdata.PinPolicy) + d.Set("virtual_cores_assigned", powervmdata.VirtualCores.Assigned) + d.Set("max_virtual_cores", powervmdata.VirtualCores.Max) + d.Set("min_virtual_cores", powervmdata.VirtualCores.Min) + + if powervmdata.Addresses != nil { + pvmaddress := make([]map[string]interface{}, len(powervmdata.Addresses)) + for i, pvmip := range powervmdata.Addresses { + + p := make(map[string]interface{}) + p["ip"] = pvmip.IP + p["network_name"] = pvmip.NetworkName + p["network_id"] = pvmip.NetworkID + p["macaddress"] = pvmip.MacAddress + p["type"] = pvmip.Type + p["external_ip"] = pvmip.ExternalIP + pvmaddress[i] = p + } + d.Set("addresses", pvmaddress) + + } + + if powervmdata.Health != nil { + + d.Set("health_status", powervmdata.Health.Status) + + } + + return nil + +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_pi_instance_ip.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_pi_instance_ip.go new file mode 100644 index 00000000000..ced567d4a27 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_pi_instance_ip.go @@ -0,0 +1,133 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "log" + "net" + "strconv" + + "github.com/IBM-Cloud/power-go-client/clients/instance" + "github.com/IBM-Cloud/power-go-client/helpers" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" +) + +func dataSourceIBMPIInstanceIP() *schema.Resource { + + return &schema.Resource{ + Read: dataSourceIBMPIInstancesIPRead, + Schema: map[string]*schema.Schema{ + + helpers.PIInstanceName: { + Type: schema.TypeString, + Required: true, + Description: "Server Name to be used for pvminstances", + ValidateFunc: validation.NoZeroValues, + }, + + helpers.PICloudInstanceId: { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.NoZeroValues, + }, + + helpers.PINetworkName: { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.NoZeroValues, + }, + "ip": { + Type: schema.TypeString, + Computed: true, + }, + + "ipoctet": { + Type: schema.TypeString, + Computed: true, + }, + "macaddress": { + Type: schema.TypeString, + Computed: true, + }, + + "network_id": { + Type: schema.TypeString, + Computed: true, + }, + + "type": { + Type: schema.TypeString, + Computed: true, + }, + "external_ip": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceIBMPIInstancesIPRead(d *schema.ResourceData, meta interface{}) error { + + sess, err := meta.(ClientSession).IBMPISession() + + if err != nil { + return err + } + + checkValidSubnet(d, meta) + + powerinstanceid := d.Get(helpers.PICloudInstanceId).(string) + powerinstancesubnet := d.Get(helpers.PINetworkName).(string) + powerC := instance.NewIBMPIInstanceClient(sess, powerinstanceid) + powervmdata, err := powerC.Get(d.Get(helpers.PIInstanceName).(string), powerinstanceid, getTimeOut) + + if err != nil { + return err + } + + for i, _ := range powervmdata.Addresses { + if powervmdata.Addresses[i].NetworkName == powerinstancesubnet { + log.Printf("Printing the ip %s", powervmdata.Addresses[i].IP) + d.Set("ip", powervmdata.Addresses[i].IP) + d.Set("network_id", powervmdata.Addresses[i].NetworkID) + d.Set("macaddress", powervmdata.Addresses[i].MacAddress) + d.Set("external_ip", powervmdata.Addresses[i].ExternalIP) + d.Set("type", powervmdata.Addresses[i].Type) + + IPObject := net.ParseIP(powervmdata.Addresses[i].IP).To4() + + d.Set("ipoctet", strconv.Itoa(int(IPObject[3]))) + + } + + } + + return nil + +} + +func checkValidSubnet(d *schema.ResourceData, meta interface{}) error { + + sess, err := meta.(ClientSession).IBMPISession() + + if err != nil { + return err + } + + powerinstanceid := d.Get(helpers.PICloudInstanceId).(string) + powerinstancesubnet := d.Get(helpers.PINetworkName).(string) + + networkC := instance.NewIBMPINetworkClient(sess, powerinstanceid) + networkdata, err := networkC.Get(powerinstancesubnet, powerinstanceid, getTimeOut) + + if err != nil { + return err + } + + d.SetId(*networkdata.NetworkID) + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_pi_instance_volumes.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_pi_instance_volumes.go new file mode 100644 index 00000000000..9bc827ff1d1 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_pi_instance_volumes.go @@ -0,0 +1,127 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "github.com/IBM-Cloud/power-go-client/helpers" + "github.com/IBM-Cloud/power-go-client/power/models" + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/IBM-Cloud/power-go-client/clients/instance" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" +) + +func dataSourceIBMPIInstanceVolumes() *schema.Resource { + + return &schema.Resource{ + Read: dataSourceIBMPIInstanceVolumesRead, + Schema: map[string]*schema.Schema{ + + helpers.PIInstanceName: { + Type: schema.TypeString, + Required: true, + Description: "Instance Name to be used for pvminstances", + ValidateFunc: validation.NoZeroValues, + }, + + helpers.PICloudInstanceId: { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.NoZeroValues, + }, + + //Computed Attributes + + "boot_volume_id": { + Type: schema.TypeString, + Computed: true, + }, + + "instance_volumes": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "size": { + Type: schema.TypeFloat, + Computed: true, + }, + "href": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "state": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "shareable": { + Type: schema.TypeBool, + Computed: true, + }, + "bootable": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMPIInstanceVolumesRead(d *schema.ResourceData, meta interface{}) error { + + sess, err := meta.(ClientSession).IBMPISession() + if err != nil { + return err + } + + powerinstanceid := d.Get(helpers.PICloudInstanceId).(string) + volumeC := instance.NewIBMPIVolumeClient(sess, powerinstanceid) + volumedata, err := volumeC.GetAll(d.Get(helpers.PIInstanceName).(string), powerinstanceid, getTimeOut) + + if err != nil { + return err + } + + var clientgenU, _ = uuid.GenerateUUID() + d.SetId(clientgenU) + d.Set("boot_volume_id", *volumedata.Volumes[0].VolumeID) + d.Set("instance_volumes", flattenVolumesInstances(volumedata.Volumes)) + + return nil + +} + +func flattenVolumesInstances(list []*models.VolumeReference) []map[string]interface{} { + result := make([]map[string]interface{}, 0, len(list)) + for _, i := range list { + l := map[string]interface{}{ + "id": *i.VolumeID, + "state": *i.State, + "href": *i.Href, + "name": *i.Name, + "size": *i.Size, + "type": *i.DiskType, + "shareable": *i.Shareable, + "bootable": *i.Bootable, + } + + result = append(result, l) + } + return result +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_pi_key.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_pi_key.go new file mode 100644 index 00000000000..65f4d6fd353 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_pi_key.go @@ -0,0 +1,69 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + + "github.com/IBM-Cloud/power-go-client/clients/instance" + "github.com/IBM-Cloud/power-go-client/helpers" +) + +func dataSourceIBMPIKey() *schema.Resource { + + return &schema.Resource{ + Read: dataSourceIBMPIKeysRead, + Schema: map[string]*schema.Schema{ + + helpers.PIKeyName: { + Type: schema.TypeString, + Required: true, + Description: "SSHKey Name to be used for pvminstances", + ValidateFunc: validation.NoZeroValues, + }, + helpers.PICloudInstanceId: { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.NoZeroValues, + }, + //Computed Attributes + "creation_date": { + Type: schema.TypeString, + Computed: true, + }, + "sshkey": { + Type: schema.TypeString, + Sensitive: true, + Computed: true, + }, + }, + } +} + +func dataSourceIBMPIKeysRead(d *schema.ResourceData, meta interface{}) error { + + sess, err := meta.(ClientSession).IBMPISession() + + if err != nil { + return err + } + + powerinstanceid := d.Get(helpers.PICloudInstanceId).(string) + sshkeyC := instance.NewIBMPIKeyClient(sess, powerinstanceid) + sshkeydata, err := sshkeyC.Get(d.Get(helpers.PIKeyName).(string), powerinstanceid) + + if err != nil { + return err + } + + d.SetId(*sshkeydata.Name) + d.Set("creation_date", sshkeydata.CreationDate.String()) + d.Set("sshkey", sshkeydata.SSHKey) + d.Set(helpers.PIKeyName, sshkeydata.Name) + d.Set(helpers.PICloudInstanceId, powerinstanceid) + + return nil + +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_pi_network.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_pi_network.go new file mode 100644 index 00000000000..79e9059a51f --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_pi_network.go @@ -0,0 +1,118 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + //"fmt" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + + "github.com/IBM-Cloud/power-go-client/clients/instance" + "github.com/IBM-Cloud/power-go-client/helpers" +) + +func dataSourceIBMPINetwork() *schema.Resource { + + return &schema.Resource{ + Read: dataSourceIBMPINetworksRead, + Schema: map[string]*schema.Schema{ + + helpers.PINetworkName: { + Type: schema.TypeString, + Required: true, + Description: "Network Name to be used for pvminstances", + ValidateFunc: validation.NoZeroValues, + }, + + helpers.PICloudInstanceId: { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.NoZeroValues, + }, + + // Computed Attributes + + "cidr": { + Type: schema.TypeString, + Computed: true, + }, + + "type": { + Type: schema.TypeString, + Computed: true, + }, + + "vlan_id": { + Type: schema.TypeInt, + Computed: true, + }, + "gateway": { + Type: schema.TypeString, + Computed: true, + }, + "available_ip_count": { + Type: schema.TypeFloat, + Computed: true, + }, + "used_ip_count": { + Type: schema.TypeFloat, + Computed: true, + }, + "used_ip_percent": { + Type: schema.TypeFloat, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceIBMPINetworksRead(d *schema.ResourceData, meta interface{}) error { + + sess, err := meta.(ClientSession).IBMPISession() + if err != nil { + return err + } + + powerinstanceid := d.Get(helpers.PICloudInstanceId).(string) + networkC := instance.NewIBMPINetworkClient(sess, powerinstanceid) + networkdata, err := networkC.Get(d.Get(helpers.PINetworkName).(string), powerinstanceid, getTimeOut) + + if err != nil { + return err + } + + d.SetId(*networkdata.NetworkID) + if networkdata.Cidr != nil { + d.Set("cidr", networkdata.Cidr) + } + if networkdata.Type != nil { + d.Set("type", networkdata.Type) + } + if &networkdata.Gateway != nil { + d.Set("gateway", networkdata.Gateway) + } + if networkdata.VlanID != nil { + d.Set("vlan_id", networkdata.VlanID) + } + if networkdata.IPAddressMetrics.Available != nil { + d.Set("available_ip_count", networkdata.IPAddressMetrics.Available) + } + if networkdata.IPAddressMetrics.Used != nil { + d.Set("used_ip_count", networkdata.IPAddressMetrics.Used) + } + if networkdata.IPAddressMetrics.Utilization != nil { + d.Set("used_ip_percent", networkdata.IPAddressMetrics.Utilization) + } + if networkdata.Name != nil { + d.Set("name", networkdata.Name) + } + + return nil + +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_pi_network_port.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_pi_network_port.go new file mode 100644 index 00000000000..5953acc7d1a --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_pi_network_port.go @@ -0,0 +1,119 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "github.com/IBM-Cloud/power-go-client/power/models" + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "log" + + //"fmt" + "github.com/IBM-Cloud/power-go-client/clients/instance" + "github.com/IBM-Cloud/power-go-client/helpers" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" +) + +func dataSourceIBMPINetworkPort() *schema.Resource { + + return &schema.Resource{ + Read: dataSourceIBMPINetworkPortsRead, + Schema: map[string]*schema.Schema{ + + helpers.PINetworkName: { + Type: schema.TypeString, + Required: true, + Description: "Network Name to be used for pvminstances", + ValidateFunc: validation.NoZeroValues, + }, + + helpers.PICloudInstanceId: { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.NoZeroValues, + }, + + // Computed Attributes + + "network_ports": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ipaddress": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "macaddress": { + Type: schema.TypeString, + Computed: true, + }, + "portid": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "href": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Required: true, + }, + "public_ip": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMPINetworkPortsRead(d *schema.ResourceData, meta interface{}) error { + + sess, err := meta.(ClientSession).IBMPISession() + if err != nil { + return err + } + + powerinstanceid := d.Get(helpers.PICloudInstanceId).(string) + networkportC := instance.NewIBMPINetworkClient(sess, powerinstanceid) + networkportdata, err := networkportC.GetAllPort(d.Get(helpers.PINetworkName).(string), powerinstanceid, getTimeOut) + + if err != nil { + return err + } + var clientgenU, _ = uuid.GenerateUUID() + d.SetId(clientgenU) + + d.Set("network_ports", flattenNetworkPorts(networkportdata.Ports)) + + return nil + +} + +func flattenNetworkPorts(networkPorts []*models.NetworkPort) interface{} { + result := make([]map[string]interface{}, 0, len(networkPorts)) + log.Printf("the number of ports is %d", len(networkPorts)) + for _, i := range networkPorts { + l := map[string]interface{}{ + "portid": *i.PortID, + "status": *i.Status, + "href": i.Href, + "ipaddress": *i.IPAddress, + "macaddress": *i.MacAddress, + "public_ip": i.ExternalIP, + } + + result = append(result, l) + } + return result +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_pi_public_network.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_pi_public_network.go new file mode 100644 index 00000000000..1d8735f0f45 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_pi_public_network.go @@ -0,0 +1,96 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + //"fmt" + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + + "github.com/IBM-Cloud/power-go-client/clients/instance" + "github.com/IBM-Cloud/power-go-client/helpers" +) + +func dataSourceIBMPIPublicNetwork() *schema.Resource { + + return &schema.Resource{ + Read: dataSourceIBMPIPublicNetworksRead, + Schema: map[string]*schema.Schema{ + + helpers.PINetworkName: { + Type: schema.TypeString, + Optional: true, + Description: "Network Name to be used for pvminstances", + ValidateFunc: validation.NoZeroValues, + Deprecated: "This field is deprectaed.", + }, + + helpers.PICloudInstanceId: { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.NoZeroValues, + }, + + // Computed Attributes + + "network_id": { + Type: schema.TypeString, + Computed: true, + }, + + "name": { + Type: schema.TypeString, + Computed: true, + }, + + "type": { + Type: schema.TypeString, + Computed: true, + }, + + "vlan_id": { + Type: schema.TypeInt, + Computed: true, + }, + }, + } +} + +func dataSourceIBMPIPublicNetworksRead(d *schema.ResourceData, meta interface{}) error { + + sess, err := meta.(ClientSession).IBMPISession() + if err != nil { + return err + } + + powerinstanceid := d.Get(helpers.PICloudInstanceId).(string) + networkC := instance.NewIBMPINetworkClient(sess, powerinstanceid) + networkdata, err := networkC.GetPublic(powerinstanceid, getTimeOut) + + if err != nil { + return err + } + if len(networkdata.Networks) < 1 { + return fmt.Errorf("No Public Network Found in %s", powerinstanceid) + } + d.SetId(*networkdata.Networks[0].NetworkID) + if networkdata.Networks[0].Type != nil { + d.Set("type", networkdata.Networks[0].Type) + } + if networkdata.Networks[0].Name != nil { + d.Set("name", networkdata.Networks[0].Name) + } + if networkdata.Networks[0].VlanID != nil { + d.Set("vlan_id", networkdata.Networks[0].VlanID) + } + if networkdata.Networks[0].NetworkID != nil { + d.Set("network_id", networkdata.Networks[0].NetworkID) + } + d.Set(helpers.PICloudInstanceId, powerinstanceid) + + return nil + +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_pi_snapshot.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_pi_snapshot.go new file mode 100644 index 00000000000..3d509e2d254 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_pi_snapshot.go @@ -0,0 +1,128 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "github.com/IBM-Cloud/power-go-client/clients/instance" + "github.com/IBM-Cloud/power-go-client/helpers" + "github.com/IBM-Cloud/power-go-client/power/models" + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "log" +) + +func dataSourceIBMPISnapshot() *schema.Resource { + + return &schema.Resource{ + Read: dataSourceIBMPISnapshotRead, + Schema: map[string]*schema.Schema{ + + helpers.PICloudInstanceId: { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.NoZeroValues, + }, + + helpers.PIInstanceName: { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.NoZeroValues, + }, + //Computed Attributes + + "pvm_snapshots": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "percent_complete": { + Type: schema.TypeInt, + Computed: true, + }, + + "description": { + Type: schema.TypeString, + Computed: true, + }, + "action": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "creation_date": { + Type: schema.TypeString, + Computed: true, + }, + "last_updated_date": { + Type: schema.TypeString, + Computed: true, + }, + "volume_snapshots": { + Type: schema.TypeMap, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMPISnapshotRead(d *schema.ResourceData, meta interface{}) error { + + sess, err := meta.(ClientSession).IBMPISession() + if err != nil { + return err + } + + powerinstanceid := d.Get(helpers.PICloudInstanceId).(string) + powerinstancename := d.Get(helpers.PIInstanceName).(string) + snapshot := instance.NewIBMPIInstanceClient(sess, powerinstanceid) + snapshotData, err := snapshot.GetSnapShotVM(powerinstanceid, powerinstancename, getTimeOut) + + if err != nil { + return err + } + + var clientgenU, _ = uuid.GenerateUUID() + d.SetId(clientgenU) + d.Set("pvm_snapshots", flattenPVMSnapshotInstances(snapshotData.Snapshots)) + + return nil + +} + +func flattenPVMSnapshotInstances(list []*models.Snapshot) []map[string]interface{} { + log.Printf("Calling the flattensnapshotinstances call with list %d", len(list)) + result := make([]map[string]interface{}, 0, len(list)) + for _, i := range list { + l := map[string]interface{}{ + "id": *i.SnapshotID, + "name": *i.Name, + "description": i.Description, + "creation_date": i.CreationDate.String(), + "last_updated_date": i.LastUpdateDate.String(), + "action": i.Action, + "percent_complete": i.PercentComplete, + "status": i.Status, + "volume_snapshots": i.VolumeSnapshots, + } + + result = append(result, l) + } + + return result +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_pi_snapshots.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_pi_snapshots.go new file mode 100644 index 00000000000..bf9ec78c0b8 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_pi_snapshots.go @@ -0,0 +1,117 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "github.com/IBM-Cloud/power-go-client/clients/instance" + "github.com/IBM-Cloud/power-go-client/helpers" + "github.com/IBM-Cloud/power-go-client/power/models" + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "log" +) + +func dataSourceIBMPISnapshots() *schema.Resource { + + return &schema.Resource{ + Read: dataSourceIBMPISnapshotsRead, + Schema: map[string]*schema.Schema{ + + helpers.PICloudInstanceId: { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.NoZeroValues, + }, + + //Computed Attributes + + "instance_snapshots": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "percent_complete": { + Type: schema.TypeInt, + Computed: true, + }, + + "description": { + Type: schema.TypeString, + Computed: true, + }, + "action": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "creation_date": { + Type: schema.TypeString, + Computed: true, + }, + "last_updated_date": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMPISnapshotsRead(d *schema.ResourceData, meta interface{}) error { + + sess, err := meta.(ClientSession).IBMPISession() + if err != nil { + return err + } + + powerinstanceid := d.Get(helpers.PICloudInstanceId).(string) + snapshot := instance.NewIBMPISnapshotClient(sess, powerinstanceid) + snapshotData, err := snapshot.GetAll("", powerinstanceid, getTimeOut) + + if err != nil { + return err + } + + var clientgenU, _ = uuid.GenerateUUID() + d.SetId(clientgenU) + d.Set("instance_snapshots", flattenSnapshotsInstances(snapshotData.Snapshots)) + + return nil + +} + +func flattenSnapshotsInstances(list []*models.Snapshot) []map[string]interface{} { + log.Printf("Calling the flattensnapshotsinstances call with list %d", len(list)) + result := make([]map[string]interface{}, 0, len(list)) + for _, i := range list { + l := map[string]interface{}{ + "id": *i.SnapshotID, + "name": *i.Name, + "description": i.Description, + "creation_date": i.CreationDate.String(), + "last_updated_date": i.LastUpdateDate.String(), + "action": i.Action, + "percent_complete": i.PercentComplete, + "status": i.Status, + } + + result = append(result, l) + } + + return result +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_pi_tenant.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_pi_tenant.go new file mode 100644 index 00000000000..f4d3d42ecfa --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_pi_tenant.go @@ -0,0 +1,103 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + //"fmt" + "github.com/IBM-Cloud/power-go-client/clients/instance" + "github.com/IBM-Cloud/power-go-client/helpers" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" +) + +func dataSourceIBMPITenant() *schema.Resource { + + return &schema.Resource{ + Read: dataSourceIBMPITenantRead, + Schema: map[string]*schema.Schema{ + helpers.PICloudInstanceId: { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.NoZeroValues, + }, + + // Computed Attributes + "creation_date": { + Type: schema.TypeString, + Computed: true, + }, + + "enabled": { + Type: schema.TypeBool, + Computed: true, + }, + + "tenant_name": { + Type: schema.TypeString, + Computed: true, + }, + "cloud_instances": { + + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cloud_instance_id": { + Type: schema.TypeString, + Computed: true, + }, + "region": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMPITenantRead(d *schema.ResourceData, meta interface{}) error { + + sess, err := meta.(ClientSession).IBMPISession() + + if err != nil { + return err + } + + powerinstanceid := d.Get(helpers.PICloudInstanceId).(string) + //tenantid := d.Get("tenantid").(string) + + tenantC := instance.NewIBMPITenantClient(sess, powerinstanceid) + tenantData, err := tenantC.Get(powerinstanceid) + + if err != nil { + return err + } + + d.SetId(*tenantData.TenantID) + d.Set("creation_date", tenantData.CreationDate) + d.Set("enabled", tenantData.Enabled) + + if tenantData.CloudInstances != nil { + + d.Set("tenant_name", tenantData.CloudInstances[0].Name) + } + + if tenantData.CloudInstances != nil { + tenants := make([]map[string]interface{}, len(tenantData.CloudInstances)) + for i, cloudinstance := range tenantData.CloudInstances { + j := make(map[string]interface{}) + j["region"] = cloudinstance.Region + j["cloud_instance_id"] = cloudinstance.CloudInstanceID + tenants[i] = j + } + + d.Set("cloud_instances", tenants) + } + + return nil + +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_pi_volume.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_pi_volume.go new file mode 100644 index 00000000000..8a1facda5c3 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_pi_volume.go @@ -0,0 +1,106 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + //"fmt" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + + "github.com/IBM-Cloud/power-go-client/clients/instance" + "github.com/IBM-Cloud/power-go-client/helpers" +) + +func dataSourceIBMPIVolume() *schema.Resource { + + return &schema.Resource{ + Read: dataSourceIBMPIVolumeRead, + Schema: map[string]*schema.Schema{ + + helpers.PIVolumeName: { + Type: schema.TypeString, + Required: true, + Description: "Volume Name to be used for pvminstances", + ValidateFunc: validation.NoZeroValues, + }, + + helpers.PICloudInstanceId: { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.NoZeroValues, + }, + + // Computed Attributes + "state": { + Type: schema.TypeString, + Computed: true, + }, + "size": { + Type: schema.TypeInt, + Computed: true, + }, + "shareable": { + Type: schema.TypeBool, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + + "bootable": { + Type: schema.TypeBool, + Computed: true, + }, + "creation_date": { + Type: schema.TypeString, + Computed: true, + }, + + "disk_type": { + Type: schema.TypeString, + Computed: true, + }, + "wwn": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceIBMPIVolumeRead(d *schema.ResourceData, meta interface{}) error { + + sess, err := meta.(ClientSession).IBMPISession() + if err != nil { + return err + } + + powerinstanceid := d.Get(helpers.PICloudInstanceId).(string) + volumeC := instance.NewIBMPIVolumeClient(sess, powerinstanceid) + volumedata, err := volumeC.Get(d.Get(helpers.PIVolumeName).(string), powerinstanceid, getTimeOut) + if err != nil { + return err + } + + d.SetId(*volumedata.VolumeID) + if volumedata.Size != nil { + d.Set("size", volumedata.Size) + } + if &volumedata.DiskType != nil { + d.Set("disk_type", volumedata.DiskType) + } + if &volumedata.Bootable != nil { + d.Set("bootable", volumedata.Bootable) + } + if &volumedata.State != nil { + d.Set("state", volumedata.State) + } + if &volumedata.Wwn != nil { + d.Set("wwn", volumedata.Wwn) + } + return nil + +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_private_dns_glb_monitors.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_private_dns_glb_monitors.go new file mode 100644 index 00000000000..a5bfcb3e88a --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_private_dns_glb_monitors.go @@ -0,0 +1,173 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + pdnsGLBMonitors = "dns_glb_monitors" +) + +func dataSourceIBMPrivateDNSGLBMonitors() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMPrivateDNSGLBMonitorsRead, + + Schema: map[string]*schema.Schema{ + + pdnsInstanceID: { + Type: schema.TypeString, + Required: true, + Description: "The GUID of the private DNS. ", + }, + + pdnsGLBMonitors: { + Type: schema.TypeList, + Description: "Collection of GLB monitors collectors", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + pdnsGlbMonitorID: { + Type: schema.TypeString, + Computed: true, + Description: "Monitor Id", + }, + + pdnsGlbMonitorName: { + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier of a service instance.", + }, + + pdnsGlbMonitorDescription: { + Type: schema.TypeString, + Computed: true, + Description: "Descriptive text of the load balancer monitor", + }, + + pdnsGlbMonitorType: { + Type: schema.TypeString, + Computed: true, + Description: "The protocol to use for the health check", + }, + + pdnsGlbMonitorPort: { + Type: schema.TypeInt, + Computed: true, + Description: "Port number to connect to for the health check", + }, + + pdnsGlbMonitorInterval: { + Type: schema.TypeInt, + Computed: true, + Description: "The interval between each health check", + }, + + pdnsGlbMonitorRetries: { + Type: schema.TypeInt, + Computed: true, + Description: "The number of retries to attempt in case of a timeout before marking the origin as unhealthy", + }, + + pdnsGlbMonitorTimeout: { + Type: schema.TypeInt, + Computed: true, + Description: "The timeout (in seconds) before marking the health check as failed", + }, + + pdnsGlbMonitorMethod: { + Type: schema.TypeString, + Computed: true, + Description: "The method to use for the health check", + }, + + pdnsGlbMonitorPath: { + Type: schema.TypeString, + Computed: true, + Description: "The endpoint path to health check against", + }, + + pdnsGlbMonitorAllowInsecure: { + Type: schema.TypeBool, + Computed: true, + Description: "Do not validate the certificate when monitor use HTTPS. This parameter is currently only valid for HTTPS monitors.", + }, + + pdnsGlbMonitorExpectedCodes: { + Type: schema.TypeString, + Computed: true, + Description: "The expected HTTP response code or code range of the health check. This parameter is only valid for HTTP and HTTPS", + }, + + pdnsGlbMonitorExpectedBody: { + Type: schema.TypeString, + Computed: true, + Description: "A case-insensitive sub-string to look for in the response body", + }, + + pdnsGlbMonitorCreatedOn: { + Type: schema.TypeString, + Computed: true, + Description: "GLB Monitor creation date", + }, + + pdnsGlbMonitorModifiedOn: { + Type: schema.TypeString, + Computed: true, + Description: "GLB Monitor Modification date", + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMPrivateDNSGLBMonitorsRead(d *schema.ResourceData, meta interface{}) error { + + sess, err := meta.(ClientSession).PrivateDNSClientSession() + if err != nil { + return err + } + instanceID := d.Get(pdnsInstanceID).(string) + listDNSGLBMonitorions := sess.NewListMonitorsOptions(instanceID) + availableGLBMonitors, detail, err := sess.ListMonitors(listDNSGLBMonitorions) + if err != nil { + return fmt.Errorf("Error reading list of pdns GLB monitors:%s\n%s", err, detail) + } + + dnsMonitors := make([]map[string]interface{}, 0) + for _, instance := range availableGLBMonitors.Monitors { + dnsMonitor := map[string]interface{}{} + dnsMonitor[pdnsGlbMonitorID] = *instance.ID + dnsMonitor[pdnsGlbMonitorName] = *instance.Name + dnsMonitor[pdnsGlbMonitorType] = *instance.Type + dnsMonitor[pdnsGlbMonitorCreatedOn] = *instance.CreatedOn + dnsMonitor[pdnsGlbMonitorModifiedOn] = *instance.ModifiedOn + dnsMonitor[pdnsGlbMonitorPort] = *instance.Port + dnsMonitor[pdnsGlbMonitorInterval] = *instance.Interval + dnsMonitor[pdnsGlbMonitorRetries] = *instance.Retries + dnsMonitor[pdnsGlbMonitorTimeout] = *instance.Timeout + dnsMonitor[pdnsGlbMonitorDescription] = *instance.Description + dnsMonitor[pdnsGlbMonitorMethod] = *instance.Method + dnsMonitor[pdnsGlbMonitorPath] = *instance.Path + dnsMonitor[pdnsGlbMonitorExpectedCodes] = *instance.ExpectedCodes + dnsMonitor[pdnsGlbMonitorExpectedBody] = *instance.ExpectedBody + dnsMonitor[pdnsGlbMonitorAllowInsecure] = *instance.AllowInsecure + + dnsMonitors = append(dnsMonitors, dnsMonitor) + } + d.SetId(dataSourceIBMPrivateDNSGLBMonitorsID(d)) + d.Set(pdnsGLBMonitors, dnsMonitors) + return nil +} + +// dataSourceIBMPrivateDNSGLBMonitorsID returns a reasonable ID list. +func dataSourceIBMPrivateDNSGLBMonitorsID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_private_dns_glb_pools.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_private_dns_glb_pools.go new file mode 100644 index 00000000000..4534e35387f --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_private_dns_glb_pools.go @@ -0,0 +1,181 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + pdnsGLBPools = "dns_glb_pools" +) + +func dataSourceIBMPrivateDNSGLBPools() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMPrivateDNSGLBPoolsRead, + Schema: map[string]*schema.Schema{ + pdnsInstanceID: { + Type: schema.TypeString, + Required: true, + Description: "Instance ID", + }, + pdnsGLBPools: { + Type: schema.TypeList, + Description: "Collection of dns resource records", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + pdnsGlbPoolID: { + Type: schema.TypeString, + Computed: true, + Description: "DNS record id", + }, + pdnsGlbPoolName: { + Type: schema.TypeString, + Computed: true, + Description: "DNS record name", + }, + pdnsGlbPoolDescription: { + Type: schema.TypeString, + Computed: true, + Description: "Descriptive text of the load balancer pool", + }, + pdnsGlbPoolEnabled: { + Type: schema.TypeBool, + Computed: true, + Description: "Whether the load balancer pool is enabled", + }, + pdnsGlbPoolHealthyOriginsThreshold: { + Type: schema.TypeInt, + Computed: true, + Description: "The minimum number of origins that must be healthy for this pool to serve traffic", + }, + pdnsGlbPoolCreatedOn: { + Type: schema.TypeString, + Description: "The time when a load balancer pool is created.", + Computed: true, + }, + pdnsGlbPoolModifiedOn: { + Type: schema.TypeString, + Description: "The recent time when a load balancer pool is modified.", + Computed: true, + }, + pdnsGlbPoolMonitor: { + Type: schema.TypeString, + Computed: true, + Description: "The ID of the load balancer monitor to be associated to this pool", + }, + pdnsGlbPoolChannel: { + Type: schema.TypeString, + Computed: true, + Description: "The notification channel,It is a webhook url", + }, + pdnsGlbPoolRegion: { + Type: schema.TypeString, + Computed: true, + Description: "Health check region of VSIs", + }, + pdnsGlbPoolHealth: { + Type: schema.TypeString, + Computed: true, + Description: "Whether the load balancer pool is enabled", + }, + pdnsGlbPoolOrigins: { + Type: schema.TypeList, + Computed: true, + Description: "Origins info", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + pdnsGlbPoolOriginsName: { + Type: schema.TypeString, + Description: "The name of the origin server.", + Computed: true, + }, + pdnsGlbPoolOriginsAddress: { + Type: schema.TypeString, + Description: "The address of the origin server. It can be a hostname or an IP address.", + Computed: true, + }, + pdnsGlbPoolOriginsEnabled: { + Type: schema.TypeBool, + Description: "Whether the origin server is enabled.", + Computed: true, + }, + pdnsGlbPoolOriginsDescription: { + Type: schema.TypeString, + Description: "Description of the origin server.", + Computed: true, + }, + pdnsGlbPoolOriginsHealth: { + Type: schema.TypeBool, + Description: "Whether the health is `true` or `false`.", + Computed: true, + }, + pdnsGlbPoolOriginsHealthFailureReason: { + Type: schema.TypeString, + Description: "The Reason for health check failure", + Computed: true, + }, + }, + }, + }, + pdnsGlbPoolSubnet: { + Type: schema.TypeList, + Computed: true, + Description: "Health check subnet crn of VSIs", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMPrivateDNSGLBPoolsRead(d *schema.ResourceData, meta interface{}) error { + + sess, err := meta.(ClientSession).PrivateDNSClientSession() + if err != nil { + return err + } + instanceID := d.Get(pdnsInstanceID).(string) + listDNSGLBPooloptions := sess.NewListPoolsOptions(instanceID) + availableGLBPools, detail, err := sess.ListPools(listDNSGLBPooloptions) + if err != nil { + return fmt.Errorf("Error reading list of pdns GLB pools:%s\n%s", err, detail) + } + d.Set(pdnsInstanceID, instanceID) + dnsPools := make([]map[string]interface{}, 0) + for _, instance := range availableGLBPools.Pools { + dnsPool := map[string]interface{}{} + dnsPool[pdnsGlbPoolID] = *instance.ID + dnsPool[pdnsGlbPoolName] = *instance.Name + dnsPool[pdnsGlbPoolDescription] = *instance.Description + dnsPool[pdnsGlbPoolEnabled] = *instance.Enabled + dnsPool[pdnsGlbPoolHealth] = *instance.Health + dnsPool[pdnsGlbPoolHealthyOriginsThreshold] = *instance.HealthyOriginsThreshold + dnsPool[pdnsGlbPoolCreatedOn] = *instance.CreatedOn + dnsPool[pdnsGlbPoolModifiedOn] = *instance.ModifiedOn + dnsPool[pdnsGlbPoolMonitor] = *instance.Monitor + dnsPool[pdnsGlbPoolChannel] = *instance.NotificationChannel + dnsPool[pdnsGlbPoolRegion] = *instance.HealthcheckRegion + dnsPool[pdnsGlbPoolOrigins] = flattenPDNSGlbPoolOrigins(instance.Origins) + dnsPool[pdnsGlbPoolSubnet] = instance.HealthcheckSubnets + + dnsPools = append(dnsPools, dnsPool) + } + d.SetId(dataSourceIBMPrivateDNSGLBPoolsID(d)) + d.Set(pdnsGLBPools, dnsPools) + return nil +} + +// dataSourceIBMPrivateDNSGLBMonitorsID returns a reasonable ID list. +func dataSourceIBMPrivateDNSGLBPoolsID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_private_dns_glbs.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_private_dns_glbs.go new file mode 100644 index 00000000000..d11a12e25fe --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_private_dns_glbs.go @@ -0,0 +1,163 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + pdnsGLBs = "dns_glbs" +) + +func dataSourceIBMPrivateDNSGLBs() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMPrivateDNSGLBsRead, + + Schema: map[string]*schema.Schema{ + + pdnsInstanceID: { + Type: schema.TypeString, + Required: true, + Description: "The GUID of the private DNS. ", + }, + pdnsZoneID: { + Type: schema.TypeString, + Required: true, + Description: "Zone GUID ", + }, + pdnsGLBs: { + Type: schema.TypeList, + Description: "Collection of GLB load balancer collectors", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + pdnsGLBID: { + Type: schema.TypeString, + Computed: true, + Description: "Load balancer Id", + }, + pdnsGLBName: { + Type: schema.TypeString, + Computed: true, + Description: "Name of the load balancer", + }, + pdnsGLBDescription: { + Type: schema.TypeString, + Computed: true, + Description: "Descriptive text of the load balancer", + }, + pdnsGLBEnabled: { + Type: schema.TypeBool, + Computed: true, + Description: "Whether the load balancer is enabled", + }, + pdnsGLBTTL: { + Type: schema.TypeInt, + Computed: true, + Description: "Time to live in second", + }, + pdnsGLBHealth: { + Type: schema.TypeString, + Computed: true, + Description: "Healthy state of the load balancer.", + }, + pdnsGLBFallbackPool: { + Type: schema.TypeString, + Computed: true, + Description: "The pool ID to use when all other pools are detected as unhealthy", + }, + pdnsGLBDefaultPool: { + Type: schema.TypeList, + Computed: true, + Description: "A list of pool IDs ordered by their failover priority", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + pdnsGLBAZPools: { + Type: schema.TypeList, + Computed: true, + Description: "Map availability zones to pool ID's.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + pdnsGLBAvailabilityZone: { + Type: schema.TypeString, + Computed: true, + Description: "Availability zone.", + }, + + pdnsGLBAZPoolsPools: { + Type: schema.TypeList, + Computed: true, + Description: "List of load balancer pools", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + pdnsGLBCreatedOn: { + Type: schema.TypeString, + Computed: true, + Description: "GLB Load Balancer creation date", + }, + pdnsGLBModifiedOn: { + Type: schema.TypeString, + Computed: true, + Description: "GLB Load Balancer Modification date", + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMPrivateDNSGLBsRead(d *schema.ResourceData, meta interface{}) error { + + sess, err := meta.(ClientSession).PrivateDNSClientSession() + if err != nil { + return err + } + instanceID := d.Get(pdnsInstanceID).(string) + zoneID := d.Get(pdnsZoneID).(string) + listDNSGLBs := sess.NewListLoadBalancersOptions(instanceID, zoneID) + availableGLBs, detail, err := sess.ListLoadBalancers(listDNSGLBs) + if err != nil { + return fmt.Errorf("Error reading list of pdns GLB load balancers:%s\n%s", err, detail) + } + + dnslbs := make([]interface{}, 0) + for _, instance := range availableGLBs.LoadBalancers { + dnsLoadbalancer := map[string]interface{}{} + dnsLoadbalancer[pdnsGLBID] = *instance.ID + dnsLoadbalancer[pdnsGLBName] = *instance.Name + dnsLoadbalancer[pdnsGLBDescription] = *instance.Description + dnsLoadbalancer[pdnsGLBEnabled] = *instance.Enabled + dnsLoadbalancer[pdnsGLBTTL] = *instance.TTL + dnsLoadbalancer[pdnsGLBHealth] = *instance.Health + dnsLoadbalancer[pdnsGLBFallbackPool] = *instance.FallbackPool + dnsLoadbalancer[pdnsGLBCreatedOn] = *instance.CreatedOn + dnsLoadbalancer[pdnsGLBModifiedOn] = *instance.ModifiedOn + dnsLoadbalancer[pdnsGLBDefaultPool] = instance.DefaultPools + dnsLoadbalancer[pdnsGLBAZPools] = flattenPDNSGlbAZpool(instance.AzPools) + + dnslbs = append(dnslbs, dnsLoadbalancer) + } + d.SetId(dataSourceIBMPrivateDNSGLBsID(d)) + d.Set(pdnsInstanceID, instanceID) + d.Set(pdnsZoneID, zoneID) + d.Set(pdnsGLBs, dnslbs) + return nil +} + +// dataSourceIBMPrivateDNSGLBMonitorsID returns a reasonable ID list. +func dataSourceIBMPrivateDNSGLBsID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_private_dns_permitted_network.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_private_dns_permitted_network.go new file mode 100644 index 00000000000..674fa45c507 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_private_dns_permitted_network.go @@ -0,0 +1,137 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + pdnsPermittedNetworks = "dns_permitted_networks" +) + +func dataSourceIBMPrivateDNSPermittedNetworks() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMPrivateDNSPermittedNetworksRead, + + Schema: map[string]*schema.Schema{ + + pdnsInstanceID: { + Type: schema.TypeString, + Required: true, + Description: "Instance ID", + }, + + pdnsZoneID: { + Type: schema.TypeString, + Required: true, + Description: "Zone ID", + }, + + pdnsPermittedNetworks: { + + Type: schema.TypeList, + Description: "Collection of permitted networks", + Computed: true, + Elem: &schema.Resource{ + + Schema: map[string]*schema.Schema{ + + pdnsPermittedNetworkID: { + Type: schema.TypeString, + Computed: true, + Description: "Network Id", + }, + + pdnsInstanceID: { + Type: schema.TypeString, + Computed: true, + Description: "Instance Id", + }, + + pdnsZoneID: { + Type: schema.TypeString, + Computed: true, + Description: "Zone Id", + }, + + pdnsNetworkType: { + Type: schema.TypeString, + Computed: true, + Description: "Network Type", + }, + + pdnsPermittedNetwork: { + Type: schema.TypeMap, + Computed: true, + Description: "permitted network", + }, + + pdnsPermittedNetworkCreatedOn: { + Type: schema.TypeString, + Computed: true, + Description: "Network creation date", + }, + + pdnsPermittedNetworkModifiedOn: { + Type: schema.TypeString, + Computed: true, + Description: "Network Modification date", + }, + + pdnsPermittedNetworkState: { + Type: schema.TypeString, + Computed: true, + Description: "Network status", + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMPrivateDNSPermittedNetworksRead(d *schema.ResourceData, meta interface{}) error { + sess, err := meta.(ClientSession).PrivateDNSClientSession() + if err != nil { + return err + } + + instanceID := d.Get(pdnsInstanceID).(string) + dnsZoneID := d.Get(pdnsZoneID).(string) + listPermittedNetworkOptions := sess.NewListPermittedNetworksOptions(instanceID, dnsZoneID) + availablePermittedNetworks, detail, err := sess.ListPermittedNetworks(listPermittedNetworkOptions) + if err != nil { + return fmt.Errorf("Error reading list of pdns permitted networks:%s\n%s", err, detail) + } + + permittedNetworks := make([]map[string]interface{}, 0) + + for _, instance := range availablePermittedNetworks.PermittedNetworks { + permittedNetwork := map[string]interface{}{} + permittedNetworkVpcCrn := map[string]interface{}{} + permittedNetwork[pdnsInstanceID] = instanceID + permittedNetwork[pdnsPermittedNetworkID] = instance.ID + permittedNetwork[pdnsPermittedNetworkCreatedOn] = instance.CreatedOn + permittedNetwork[pdnsPermittedNetworkModifiedOn] = instance.ModifiedOn + permittedNetwork[pdnsPermittedNetworkState] = instance.State + permittedNetwork[pdnsNetworkType] = instance.Type + permittedNetworkVpcCrn[pdnsVpcCRN] = instance.PermittedNetwork.VpcCrn + permittedNetwork[pdnsPermittedNetwork] = permittedNetworkVpcCrn + permittedNetwork[pdnsZoneID] = dnsZoneID + + permittedNetworks = append(permittedNetworks, permittedNetwork) + } + d.SetId(dataSourceIBMPrivateDNSPermittedNetworkID(d)) + d.Set(pdnsPermittedNetworks, permittedNetworks) + return nil +} + +// dataSourceIBMPrivateDnsPermittedNetworkID returns a reasonable ID for dns permitted network list. +func dataSourceIBMPrivateDNSPermittedNetworkID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_private_dns_resource_records.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_private_dns_resource_records.go new file mode 100644 index 00000000000..0aaac0ec9af --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_private_dns_resource_records.go @@ -0,0 +1,107 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + pdnsResourceRecords = "dns_resource_records" +) + +func dataSourceIBMPrivateDNSResourceRecords() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMPrivateDNSResourceRecordsRead, + Schema: map[string]*schema.Schema{ + pdnsInstanceID: { + Type: schema.TypeString, + Required: true, + Description: "Instance ID", + }, + pdnsZoneID: { + Type: schema.TypeString, + Required: true, + Description: "Zone Id", + }, + pdnsResourceRecords: { + Type: schema.TypeList, + Description: "Collection of dns resource records", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + Description: "DNS record id", + }, + pdnsRecordName: { + Type: schema.TypeString, + Computed: true, + Description: "DNS record name", + }, + pdnsRecordType: { + Type: schema.TypeString, + Computed: true, + Description: "DNS record Type", + }, + pdnsRdata: { + Type: schema.TypeString, + Computed: true, + Description: "DNS record Data", + }, + pdnsRecordTTL: { + Type: schema.TypeInt, + Computed: true, + Description: "DNS record TTL", + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMPrivateDNSResourceRecordsRead(d *schema.ResourceData, meta interface{}) error { + + sess, err := meta.(ClientSession).PrivateDNSClientSession() + if err != nil { + return err + } + instanceID := d.Get(pdnsInstanceID).(string) + DnszoneID := d.Get(pdnsZoneID).(string) + listDNSResRecOptions := sess.NewListResourceRecordsOptions(instanceID, DnszoneID) + availableDNSResRecs, detail, err := sess.ListResourceRecords(listDNSResRecOptions) + if err != nil { + return fmt.Errorf("Error reading list of pdns resource records:%s\n%s", err, detail) + } + dnsResRecs := make([]map[string]interface{}, 0) + for _, instance := range availableDNSResRecs.ResourceRecords { + dnsRecord := map[string]interface{}{} + dnsRecord["id"] = *instance.ID + dnsRecord[pdnsRecordName] = *instance.Name + dnsRecord[pdnsRecordType] = *instance.Type + // Marshal the rdata map into a JSON string + rData, err := json.Marshal(instance.Rdata) + if err != nil { + return fmt.Errorf("Error reading rdata map of dns resource records:%s", err) + } + jsonStr := string(rData) + dnsRecord[pdnsRdata] = jsonStr + dnsRecord[pdnsRecordTTL] = instance.TTL + dnsResRecs = append(dnsResRecs, dnsRecord) + } + d.SetId(dataSourceIBMPrivateDNSResourceRecordsID(d)) + d.Set(pdnsResourceRecords, dnsResRecs) + return nil +} + +// dataSourceIBMPrivateDNSResourceRecordsID returns a reasonable ID for dns zones list. +func dataSourceIBMPrivateDNSResourceRecordsID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_private_dns_zones.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_private_dns_zones.go new file mode 100644 index 00000000000..6d2e713573f --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_private_dns_zones.go @@ -0,0 +1,112 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + pdnsZones = "dns_zones" +) + +func dataSourceIBMPrivateDNSZones() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMPrivateDNSZonesRead, + Schema: map[string]*schema.Schema{ + pdnsInstanceID: { + Type: schema.TypeString, + Required: true, + Description: "Instance ID", + }, + pdnsZones: { + Type: schema.TypeList, + Description: "Collection of dns zones", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + pdnsInstanceID: { + Type: schema.TypeString, + Computed: true, + Description: "Instance ID", + }, + pdnsZoneID: { + Type: schema.TypeString, + Computed: true, + Description: "Zone ID", + }, + pdnsZoneName: { + Type: schema.TypeString, + Computed: true, + Description: "Zone name", + }, + pdnsZoneDescription: { + Type: schema.TypeString, + Computed: true, + Description: "Zone description", + }, + pdnsZoneState: { + Type: schema.TypeString, + Computed: true, + Description: "Zone state", + }, + pdnsZoneLabel: { + Type: schema.TypeString, + Computed: true, + Description: "Label", + }, + pdnsZoneCreatedOn: { + Type: schema.TypeString, + Computed: true, + Description: "Creation date", + }, + pdnsZoneModifiedOn: { + Type: schema.TypeString, + Computed: true, + Description: "Modification date", + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMPrivateDNSZonesRead(d *schema.ResourceData, meta interface{}) error { + + sess, err := meta.(ClientSession).PrivateDNSClientSession() + if err != nil { + return err + } + instanceID := d.Get(pdnsInstanceID).(string) + listDNSZonesOptions := sess.NewListDnszonesOptions(instanceID) + availableDNSZones, detail, err := sess.ListDnszones(listDNSZonesOptions) + if err != nil { + return fmt.Errorf("Error reading list of dns zones:%s\n%s", err, detail) + } + dnsZones := make([]map[string]interface{}, 0) + for _, instance := range availableDNSZones.Dnszones { + dnsZone := map[string]interface{}{} + dnsZone[pdnsInstanceID] = instance.InstanceID + dnsZone[pdnsZoneID] = instance.ID + dnsZone[pdnsZoneName] = instance.Name + dnsZone[pdnsZoneDescription] = instance.Description + dnsZone[pdnsZoneLabel] = instance.Label + dnsZone[pdnsZoneCreatedOn] = instance.CreatedOn + dnsZone[pdnsZoneModifiedOn] = instance.ModifiedOn + dnsZone[pdnsZoneState] = instance.State + dnsZones = append(dnsZones, dnsZone) + } + d.SetId(dataSourceIBMPrivateDNSZonesID(d)) + d.Set(pdnsZones, dnsZones) + return nil +} + +// dataSourceIBMPrivateDnsZonesID returns a reasonable ID for dns zones list. +func dataSourceIBMPrivateDNSZonesID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_push_notification_chrome.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_push_notification_chrome.go new file mode 100644 index 00000000000..42d9179743d --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_push_notification_chrome.go @@ -0,0 +1,63 @@ +package ibm + +import ( + "context" + "fmt" + "log" + + "github.com/IBM/push-notifications-go-sdk/pushservicev1" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMPNApplicationChrome() *schema.Resource { + return &schema.Resource{ + Read: dataSourceApplicationChromeRead, + + Schema: map[string]*schema.Schema{ + "guid": { + Type: schema.TypeString, + Required: true, + Description: "Unique guid of the application using the push service.", + }, + "server_key": { + Type: schema.TypeString, + Computed: true, + Description: "A server key that gives the push service an authorized access to Google services that is used for Chrome Web Push.", + }, + "web_site_url": { + Type: schema.TypeString, + Computed: true, + Description: "The URL of the WebSite / WebApp that should be permitted to subscribe to WebPush.", + }, + }, + } +} + +func dataSourceApplicationChromeRead(d *schema.ResourceData, meta interface{}) error { + pushServiceClient, err := meta.(ClientSession).PushServiceV1() + if err != nil { + return err + } + + getChromeWebConfOptions := &pushservicev1.GetChromeWebConfOptions{} + + guid := d.Get("guid").(string) + getChromeWebConfOptions.SetApplicationID(guid) + + chromeWebConf, response, err := pushServiceClient.GetChromeWebConfWithContext(context.TODO(), getChromeWebConfOptions) + if err != nil { + log.Printf("[DEBUG] GetChromeWebConfWithContext failed %s\n%d", err, response.StatusCode) + return err + } + + d.SetId(guid) + if err = d.Set("server_key", chromeWebConf.ApiKey); err != nil { + return fmt.Errorf("Error setting server_key: %s", err) + } + if err = d.Set("web_site_url", chromeWebConf.WebSiteURL); err != nil { + return fmt.Errorf("Error setting web_site_url: %s", err) + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_resource_group.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_resource_group.go new file mode 100644 index 00000000000..6e2f676e253 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_resource_group.go @@ -0,0 +1,84 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + + "github.com/IBM-Cloud/bluemix-go/api/resource/resourcev2/managementv2" + "github.com/IBM-Cloud/bluemix-go/models" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMResourceGroup() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMResourceGroupRead, + + Schema: map[string]*schema.Schema{ + "name": { + Description: "Resource group name", + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"is_default"}, + }, + "is_default": { + Description: "Default Resource group", + Type: schema.TypeBool, + Optional: true, + ConflictsWith: []string{"name"}, + }, + }, + } +} + +func dataSourceIBMResourceGroupRead(d *schema.ResourceData, meta interface{}) error { + rsManagementAPI, err := meta.(ClientSession).ResourceManagementAPIv2() + if err != nil { + return err + } + rsGroup := rsManagementAPI.ResourceGroup() + + var defaultGrp bool + if group, ok := d.GetOk("is_default"); ok { + defaultGrp = group.(bool) + } + var name string + if n, ok := d.GetOk("name"); ok { + name = n.(string) + } + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + accountID := userDetails.userAccount + var grp []models.ResourceGroupv2 + if defaultGrp { + resourceGroupQuery := managementv2.ResourceGroupQuery{ + Default: true, + AccountID: accountID, + } + + grp, err = rsGroup.List(&resourceGroupQuery) + + if err != nil { + return fmt.Errorf("Error retrieving default resource group: %s", err) + } + d.SetId(grp[0].ID) + + } else if name != "" { + resourceGroupQuery := &managementv2.ResourceGroupQuery{ + AccountID: accountID, + } + grp, err := rsGroup.FindByName(resourceGroupQuery, name) + if err != nil { + return fmt.Errorf("Error retrieving resource group %s: %s", name, err) + } + d.SetId(grp[0].ID) + + } else { + return fmt.Errorf("Missing required properties. Need a resource group name, or the is_default true") + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_resource_instance.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_resource_instance.go new file mode 100644 index 00000000000..bfc98ab7cac --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_resource_instance.go @@ -0,0 +1,238 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/IBM-Cloud/bluemix-go/api/resource/resourcev2/controllerv2" + "github.com/IBM-Cloud/bluemix-go/models" +) + +func dataSourceIBMResourceInstance() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMResourceInstanceRead, + + Schema: map[string]*schema.Schema{ + "name": { + Description: "Resource instance name for example, myobjectstorage", + Type: schema.TypeString, + Required: true, + }, + + "resource_group_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "The id of the resource group in which the instance is present", + }, + + "location": { + Description: "The location or the environment in which instance exists", + Optional: true, + Type: schema.TypeString, + Computed: true, + }, + + "service": { + Description: "The service type of the instance", + Optional: true, + Type: schema.TypeString, + Computed: true, + }, + + "plan": { + Description: "The plan type of the instance", + Type: schema.TypeString, + Computed: true, + }, + + "status": { + Description: "The resource instance status", + Type: schema.TypeString, + Computed: true, + }, + + "crn": { + Type: schema.TypeString, + Computed: true, + Description: "CRN of resource instance", + }, + "tags": { + Type: schema.TypeSet, + Computed: true, + Description: "Tags of Resource Instance", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "guid": { + Type: schema.TypeString, + Computed: true, + Description: "Guid of resource instance", + }, + + ResourceName: { + Type: schema.TypeString, + Computed: true, + Description: "The name of the resource", + }, + + ResourceCRN: { + Type: schema.TypeString, + Computed: true, + Description: "The crn of the resource", + }, + + ResourceStatus: { + Type: schema.TypeString, + Computed: true, + Description: "The status of the resource", + }, + + ResourceGroupName: { + Type: schema.TypeString, + Computed: true, + Description: "The resource group name in which resource is provisioned", + }, + ResourceControllerURL: { + Type: schema.TypeString, + Computed: true, + Description: "The URL of the IBM Cloud dashboard that can be used to explore and view details about the resource", + }, + + "extensions": { + Type: schema.TypeMap, + Computed: true, + Description: "The extended metadata as a map associated with the resource instance.", + }, + }, + } +} + +func dataSourceIBMResourceInstanceRead(d *schema.ResourceData, meta interface{}) error { + rsConClient, err := meta.(ClientSession).ResourceControllerAPIV2() + if err != nil { + return err + } + rsAPI := rsConClient.ResourceServiceInstanceV2() + name := d.Get("name").(string) + + rsInstQuery := controllerv2.ServiceInstanceQuery{ + Name: name, + } + + if rsGrpID, ok := d.GetOk("resource_group_id"); ok { + rsInstQuery.ResourceGroupID = rsGrpID.(string) + } else { + defaultRg, err := defaultResourceGroup(meta) + if err != nil { + return err + } + rsInstQuery.ResourceGroupID = defaultRg + } + + rsCatClient, err := meta.(ClientSession).ResourceCatalogAPI() + if err != nil { + return err + } + rsCatRepo := rsCatClient.ResourceCatalog() + + if service, ok := d.GetOk("service"); ok { + + serviceOff, err := rsCatRepo.FindByName(service.(string), true) + if err != nil { + return fmt.Errorf("Error retrieving service offering: %s", err) + } + + rsInstQuery.ServiceID = serviceOff[0].ID + } + + var instances []models.ServiceInstanceV2 + + instances, err = rsAPI.ListInstances(rsInstQuery) + if err != nil { + return err + } + var filteredInstances []models.ServiceInstanceV2 + var location string + + if loc, ok := d.GetOk("location"); ok { + location = loc.(string) + for _, instance := range instances { + if getLocation(instance) == location { + filteredInstances = append(filteredInstances, instance) + } + } + } else { + filteredInstances = instances + } + + if len(filteredInstances) == 0 { + return fmt.Errorf("No resource instance found with name [%s]\nIf not specified please specify more filters like resource_group_id if instance doesn't exists in default group, location or service", name) + } + + var instance models.ServiceInstanceV2 + + if len(filteredInstances) > 1 { + return fmt.Errorf( + "More than one resource instance found with name matching [%s]\nIf not specified please specify more filters like resource_group_id if instance doesn't exists in default group, location or service", name) + } + instance = filteredInstances[0] + + d.SetId(instance.ID) + d.Set("status", instance.State) + d.Set("resource_group_id", instance.ResourceGroupID) + d.Set("location", instance.RegionID) + serviceOff, err := rsCatRepo.GetServiceName(instance.ServiceID) + if err != nil { + return fmt.Errorf("Error retrieving service offering: %s", err) + } + + d.Set("service", serviceOff) + + d.Set(ResourceName, instance.Name) + d.Set(ResourceCRN, instance.Crn.String()) + d.Set(ResourceStatus, instance.State) + d.Set(ResourceGroupName, instance.ResourceGroupName) + d.Set("guid", instance.Guid) + if len(instance.Extensions) == 0 { + d.Set("extensions", instance.Extensions) + } else { + d.Set("extensions", Flatten(instance.Extensions)) + } + + rcontroller, err := getBaseController(meta) + if err != nil { + return err + } + d.Set(ResourceControllerURL, rcontroller+"/services/") + + servicePlan, err := rsCatRepo.GetServicePlanName(instance.ResourcePlanID) + if err != nil { + return fmt.Errorf("Error retrieving plan: %s", err) + } + d.Set("plan", servicePlan) + d.Set("crn", instance.Crn.String()) + tags, err := GetTagsUsingCRN(meta, instance.Crn.String()) + if err != nil { + log.Printf( + "Error on get of resource instance tags (%s) tags: %s", d.Id(), err) + } + d.Set("tags", tags) + + return nil +} + +func getLocation(instance models.ServiceInstanceV2) string { + region := instance.Crn.Region + cName := instance.Crn.CName + if cName == "bluemix" || cName == "staging" { + return region + } else { + return cName + "-" + region + } +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_resource_key.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_resource_key.go new file mode 100644 index 00000000000..ae0cb617cff --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_resource_key.go @@ -0,0 +1,179 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "sort" + "strings" + + "github.com/IBM-Cloud/bluemix-go/crn" + "github.com/IBM-Cloud/bluemix-go/models" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMResourceKey() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMResourceKeyRead, + + Schema: map[string]*schema.Schema{ + "name": { + Description: "The name of the resource key", + Type: schema.TypeString, + Required: true, + }, + + "resource_instance_id": { + Type: schema.TypeString, + Optional: true, + Description: "The id of the resource instance", + ConflictsWith: []string{"resource_alias_id"}, + }, + + "resource_alias_id": { + Type: schema.TypeString, + Optional: true, + Description: "The id of the resource alias", + ConflictsWith: []string{"resource_instance_id"}, + }, + + "role": { + Type: schema.TypeString, + Computed: true, + Description: "User role", + }, + + "status": { + Type: schema.TypeString, + Computed: true, + Description: "Status of resource key", + }, + + "credentials": { + Description: "Credentials asociated with the key", + Sensitive: true, + Type: schema.TypeMap, + Computed: true, + }, + + "most_recent": &schema.Schema{ + Description: "If true and multiple entries are found, the most recently created resource key is used. " + + "If false, an error is returned", + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + + "crn": { + Type: schema.TypeString, + Computed: true, + Description: "crn of resource key", + }, + }, + } +} + +func dataSourceIBMResourceKeyRead(d *schema.ResourceData, meta interface{}) error { + rsContClient, err := meta.(ClientSession).ResourceControllerAPI() + if err != nil { + return err + } + rkAPI := rsContClient.ResourceServiceKey() + name := d.Get("name").(string) + mostRecent := d.Get("most_recent").(bool) + + keys, err := rkAPI.GetKeys(name) + if err != nil { + return err + } + var filteredKeys []models.ServiceKey + + if d.Get("resource_instance_id") == "" && d.Get("resource_instance_id") == "" { + filteredKeys = keys + } else { + crn, err := getCRN(d, meta) + if err != nil { + return err + } + for _, key := range keys { + if key.SourceCrn == *crn { + filteredKeys = append(filteredKeys, key) + } + } + + } + + if len(filteredKeys) == 0 { + return fmt.Errorf("No resource keys found with name [%s]", name) + } + + var key models.ServiceKey + + if len(filteredKeys) > 1 { + if mostRecent { + key = mostRecentResourceKey(filteredKeys) + } else { + return fmt.Errorf( + "More than one resource key found with name matching [%s]. "+ + "Set 'most_recent' to true in your configuration to force the most recent resource key "+ + "to be used", name) + } + } else { + key = filteredKeys[0] + } + + d.SetId(key.ID) + + if roleCrn, ok := key.Parameters["role_crn"].(string); ok { + d.Set("role", roleCrn[strings.LastIndex(roleCrn, ":")+1:]) + } else if roleCrn, ok := key.Credentials["iam_role_crn"].(string); ok { + d.Set("role", roleCrn[strings.LastIndex(roleCrn, ":")+1:]) + } + + d.Set("credentials", Flatten(key.Credentials)) + d.Set("status", key.State) + d.Set("crn", key.Crn.String()) + return nil +} + +func getCRN(d *schema.ResourceData, meta interface{}) (*crn.CRN, error) { + + rsContClient, err := meta.(ClientSession).ResourceControllerAPI() + if err != nil { + return nil, err + } + + if insID, ok := d.GetOk("resource_instance_id"); ok { + instance, err := rsContClient.ResourceServiceInstance().GetInstance(insID.(string)) + if err != nil { + return nil, err + } + return &(instance.Crn), nil + + } + + alias, err := rsContClient.ResourceServiceAlias().Alias(d.Get("resource_alias_id").(string)) + if err != nil { + return nil, err + } + return &(alias.CRN), nil + +} + +type resourceKeys []models.ServiceKey + +func (k resourceKeys) Len() int { return len(k) } + +func (k resourceKeys) Swap(i, j int) { k[i], k[j] = k[j], k[i] } + +func (k resourceKeys) Less(i, j int) bool { + return (*k[i].CreatedAt).Before(*k[j].CreatedAt) +} + +func mostRecentResourceKey(keys resourceKeys) models.ServiceKey { + sortedKeys := keys + sort.Sort(sortedKeys) + return sortedKeys[len(sortedKeys)-1] +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_resource_quota.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_resource_quota.go new file mode 100644 index 00000000000..6ccac0ca70f --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_resource_quota.go @@ -0,0 +1,87 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMResourceQuota() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMResourceQuotaRead, + + Schema: map[string]*schema.Schema{ + "name": { + Description: "Resource quota name, for example Trial Quota", + Type: schema.TypeString, + Required: true, + }, + "type": { + Description: "Type of the quota.", + Type: schema.TypeString, + Computed: true, + }, + "max_apps": { + Description: "Defines the total app limit.", + Type: schema.TypeInt, + Computed: true, + }, + "max_instances_per_app": { + Description: "Defines the total instances limit per app.", + Type: schema.TypeInt, + Computed: true, + }, + "max_app_instance_memory": { + Description: "Defines the total memory of app instance.", + Type: schema.TypeString, + Computed: true, + }, + "total_app_memory": { + Description: "Defines the total memory for app.", + Type: schema.TypeString, + Computed: true, + }, + "max_service_instances": { + Description: "Defines the total service instances limit.", + Type: schema.TypeInt, + Computed: true, + }, + "vsi_limit": { + Description: "Defines the VSI limit.", + Type: schema.TypeInt, + Computed: true, + }, + }, + } +} + +func dataSourceIBMResourceQuotaRead(d *schema.ResourceData, meta interface{}) error { + rsManagementAPI, err := meta.(ClientSession).ResourceManagementAPIv2() + if err != nil { + return err + } + rsQuota := rsManagementAPI.ResourceQuota() + rsQuotaName := d.Get("name").(string) + rsQuotas, err := rsQuota.FindByName(rsQuotaName) + if err != nil { + return fmt.Errorf("Error retrieving resource quota: %s", err) + } + + if len(rsQuotas) == 0 { + return fmt.Errorf("Error retrieving resource quota: %s", err) + } + + rsQuotaFields := rsQuotas[0] + d.SetId(rsQuotaFields.ID) + d.Set("type", rsQuotaFields.Type) + d.Set("max_apps", rsQuotaFields.AppCountLimit) + d.Set("max_instances_per_app", rsQuotaFields.AppInstanceCountLimit) + d.Set("max_app_instance_memory", rsQuotaFields.AppInstanceMemoryLimit) + d.Set("total_app_memory", rsQuotaFields.TotalAppMemoryLimit) + d.Set("max_service_instances", rsQuotaFields.ServiceInstanceCountLimit) + d.Set("vsi_limit", rsQuotaFields.VSICountLimit) + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_resource_tag.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_resource_tag.go new file mode 100644 index 00000000000..891d5ca2a04 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_resource_tag.go @@ -0,0 +1,58 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMResourceTag() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMResourceTagRead, + + Schema: map[string]*schema.Schema{ + "resource_id": { + Type: schema.TypeString, + Required: true, + ValidateFunc: InvokeValidator("ibm_resource_tag", resourceID), + Description: "CRN of the resource on which the tags should be attached", + }, + "tags": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString, ValidateFunc: InvokeValidator("ibm_resource_tag", tags)}, + Set: resourceIBMVPCHash, + Description: "List of tags associated with resource instance", + }, + "resource_type": { + Type: schema.TypeString, + Optional: true, + Description: "Resource type on which the tags should be fetched", + }, + }, + } +} + +func dataSourceIBMResourceTagRead(d *schema.ResourceData, meta interface{}) error { + var rID, rType string + rID = d.Get("resource_id").(string) + if v, ok := d.GetOk(resourceType); ok && v != nil { + rType = v.(string) + } + + tags, err := GetGlobalTagsUsingCRN(meta, rID, rType, "") + if err != nil { + return fmt.Errorf( + "Error on get of resource tags (%s) tags: %s", d.Id(), err) + } + + d.SetId(rID) + d.Set("resource_id", rID) + d.Set("resource_type", rType) + d.Set("tags", tags) + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_schematics_action.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_schematics_action.go new file mode 100644 index 00000000000..87bdcb6224e --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_schematics_action.go @@ -0,0 +1,1530 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "context" + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/IBM/schematics-go-sdk/schematicsv1" +) + +func dataSourceIBMSchematicsAction() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMSchematicsActionRead, + + Schema: map[string]*schema.Schema{ + "action_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "Use GET or actions API to look up the action IDs in your IBM Cloud account.", + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Action name (unique for an account).", + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Action description.", + }, + "location": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "List of action locations supported by IBM Cloud Schematics service. **Note** this does not limit the location of the resources provisioned using Schematics.", + }, + "resource_group": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Resource-group name for an action. By default, action is created in default resource group.", + }, + "tags": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Action tags.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "user_state": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "User defined status of the Schematics object.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "state": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "User defined states * `draft` Object can be modified, and can be used by jobs run by an author, during execution * `live` Object can be modified, and can be used by jobs during execution * `locked` Object cannot be modified, and can be used by jobs during execution * `disable` Object can be modified, and cannot be used by Jobs during execution.", + }, + "set_by": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Name of the user who set the state of an Object.", + }, + "set_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "When the user who set the state of an Object.", + }, + }, + }, + }, + "source_readme_url": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "URL of the `README` file, for the source.", + }, + "source": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Source of templates, playbooks, or controls.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "source_type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Type of source for the Template.", + }, + "git": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Connection details to Git source.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "git_repo_url": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "URL to the GIT Repo that can be used to clone the template.", + }, + "git_token": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Personal Access Token to connect to Git URLs.", + }, + "git_repo_folder": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Name of the folder in the Git Repo, that contains the template.", + }, + "git_release": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Name of the release tag, used to fetch the Git Repo.", + }, + "git_branch": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Name of the branch, used to fetch the Git Repo.", + }, + }, + }, + }, + }, + }, + }, + "source_type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Type of source for the Template.", + }, + "command_parameter": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Schematics job command parameter (playbook-name, capsule-name or flow-name).", + }, + "bastion": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Complete target details with the user inputs and the system generated data.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Target name.", + }, + "type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Target type (`cluster`, `vsi`, `icd`, `vpc`).", + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Target description.", + }, + "resource_query": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Resource selection query string.", + }, + "credential_ref": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Override credential for each resource. Reference to credentials values, used by all the resources.", + }, + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Target ID.", + }, + "created_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Targets creation time.", + }, + "created_by": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "E-mail address of the user who created the targets.", + }, + "updated_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Targets updation time.", + }, + "updated_by": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "E-mail address of user who updated the targets.", + }, + "sys_lock": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "System lock status.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "sys_locked": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "Is the Workspace locked by the Schematic action ?.", + }, + "sys_locked_by": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Name of the user who performed the action, that lead to lock the Workspace.", + }, + "sys_locked_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "When the user performed the action that lead to lock the Workspace ?.", + }, + }, + }, + }, + "resource_ids": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Array of the resource IDs.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "targets_ini": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Inventory of host and host group for the playbook in `INI` file format. For example, `\"targets_ini\": \"[webserverhost] 172.22.192.6 [dbhost] 172.22.192.5\"`. For more information, about an inventory host group syntax, see [Inventory host groups](/docs/schematics?topic=schematics-schematics-cli-reference#schematics-inventory-host-grps).", + }, + "credentials": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "credentials of the Action.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Name of the variable.", + }, + "value": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Value for the variable or reference to the value.", + }, + "metadata": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "User editable metadata for the variables.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Type of the variable.", + }, + "aliases": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "List of aliases for the variable name.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Description of the meta data.", + }, + "default_value": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Default value for the variable, if the override value is not specified.", + }, + "secure": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "Is the variable secure or sensitive ?.", + }, + "immutable": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "Is the variable readonly ?.", + }, + "hidden": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "If true, the variable will not be displayed on UI or CLI.", + }, + "options": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "List of possible values for this variable. If type is integer or date, then the array of string will be converted to array of integers or date during runtime.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "min_value": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "Minimum value of the variable. Applicable for integer type.", + }, + "max_value": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "Maximum value of the variable. Applicable for integer type.", + }, + "min_length": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "Minimum length of the variable value. Applicable for string type.", + }, + "max_length": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "Maximum length of the variable value. Applicable for string type.", + }, + "matches": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Regex for the variable value.", + }, + "position": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "Relative position of this variable in a list.", + }, + "group_by": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Display name of the group this variable belongs to.", + }, + "source": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Source of this meta-data.", + }, + }, + }, + }, + "link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Reference link to the variable value By default the expression will point to self.value.", + }, + }, + }, + }, + "action_inputs": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Input variables for an action.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Name of the variable.", + }, + "value": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Value for the variable or reference to the value.", + }, + "metadata": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "User editable metadata for the variables.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Type of the variable.", + }, + "aliases": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "List of aliases for the variable name.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Description of the meta data.", + }, + "default_value": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Default value for the variable, if the override value is not specified.", + }, + "secure": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "Is the variable secure or sensitive ?.", + }, + "immutable": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "Is the variable readonly ?.", + }, + "hidden": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "If true, the variable will not be displayed on UI or CLI.", + }, + "options": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "List of possible values for this variable. If type is integer or date, then the array of string will be converted to array of integers or date during runtime.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "min_value": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "Minimum value of the variable. Applicable for integer type.", + }, + "max_value": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "Maximum value of the variable. Applicable for integer type.", + }, + "min_length": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "Minimum length of the variable value. Applicable for string type.", + }, + "max_length": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "Maximum length of the variable value. Applicable for string type.", + }, + "matches": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Regex for the variable value.", + }, + "position": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "Relative position of this variable in a list.", + }, + "group_by": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Display name of the group this variable belongs to.", + }, + "source": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Source of this meta-data.", + }, + }, + }, + }, + "link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Reference link to the variable value By default the expression will point to self.value.", + }, + }, + }, + }, + "action_outputs": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Output variables for an action.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Name of the variable.", + }, + "value": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Value for the variable or reference to the value.", + }, + "metadata": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "User editable metadata for the variables.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Type of the variable.", + }, + "aliases": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "List of aliases for the variable name.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Description of the meta data.", + }, + "default_value": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Default value for the variable, if the override value is not specified.", + }, + "secure": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "Is the variable secure or sensitive ?.", + }, + "immutable": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "Is the variable readonly ?.", + }, + "hidden": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "If true, the variable will not be displayed on UI or CLI.", + }, + "options": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "List of possible values for this variable. If type is integer or date, then the array of string will be converted to array of integers or date during runtime.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "min_value": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "Minimum value of the variable. Applicable for integer type.", + }, + "max_value": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "Maximum value of the variable. Applicable for integer type.", + }, + "min_length": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "Minimum length of the variable value. Applicable for string type.", + }, + "max_length": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "Maximum length of the variable value. Applicable for string type.", + }, + "matches": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Regex for the variable value.", + }, + "position": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "Relative position of this variable in a list.", + }, + "group_by": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Display name of the group this variable belongs to.", + }, + "source": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Source of this meta-data.", + }, + }, + }, + }, + "link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Reference link to the variable value By default the expression will point to self.value.", + }, + }, + }, + }, + "settings": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Environment variables for an action.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Name of the variable.", + }, + "value": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Value for the variable or reference to the value.", + }, + "metadata": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "User editable metadata for the variables.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Type of the variable.", + }, + "aliases": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "List of aliases for the variable name.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Description of the meta data.", + }, + "default_value": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Default value for the variable, if the override value is not specified.", + }, + "secure": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "Is the variable secure or sensitive ?.", + }, + "immutable": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "Is the variable readonly ?.", + }, + "hidden": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "If true, the variable will not be displayed on UI or CLI.", + }, + "options": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "List of possible values for this variable. If type is integer or date, then the array of string will be converted to array of integers or date during runtime.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "min_value": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "Minimum value of the variable. Applicable for integer type.", + }, + "max_value": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "Maximum value of the variable. Applicable for integer type.", + }, + "min_length": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "Minimum length of the variable value. Applicable for string type.", + }, + "max_length": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "Maximum length of the variable value. Applicable for string type.", + }, + "matches": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Regex for the variable value.", + }, + "position": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "Relative position of this variable in a list.", + }, + "group_by": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Display name of the group this variable belongs to.", + }, + "source": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Source of this meta-data.", + }, + }, + }, + }, + "link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Reference link to the variable value By default the expression will point to self.value.", + }, + }, + }, + }, + "trigger_record_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "ID to the trigger.", + }, + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Action Id.", + }, + "crn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Action Cloud Resource Name.", + }, + "account": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Action account ID.", + }, + "source_created_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Action Playbook Source creation time.", + }, + "source_created_by": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "E-mail address of user who created the Action Playbook Source.", + }, + "source_updated_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The action playbook updation time.", + }, + "source_updated_by": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "E-mail address of user who updated the action playbook source.", + }, + "created_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Action creation time.", + }, + "created_by": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "E-mail address of the user who created an action.", + }, + "updated_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Action updation time.", + }, + "updated_by": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "E-mail address of the user who updated an action.", + }, + "namespace": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Name of the namespace.", + }, + "state": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Computed state of an action.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "status_code": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Status of automation (workspace or action).", + }, + "status_job_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Job id reference for this status.", + }, + "status_message": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Automation status message - to be displayed along with the status_code.", + }, + }, + }, + }, + "playbook_names": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Playbook names retrieved from the respository.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "sys_lock": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "System lock status.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "sys_locked": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "Is the Workspace locked by the Schematic action ?.", + }, + "sys_locked_by": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Name of the user who performed the action, that lead to lock the Workspace.", + }, + "sys_locked_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "When the user performed the action that lead to lock the Workspace ?.", + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMSchematicsActionRead(d *schema.ResourceData, meta interface{}) error { + schematicsClient, err := meta.(ClientSession).SchematicsV1() + if err != nil { + return err + } + + getActionOptions := &schematicsv1.GetActionOptions{} + + getActionOptions.SetActionID(d.Get("action_id").(string)) + + action, response, err := schematicsClient.GetActionWithContext(context.TODO(), getActionOptions) + if err != nil { + log.Printf("[DEBUG] GetActionWithContext failed %s\n%s", err, response) + return err + } + + d.SetId(*action.ID) + if err = d.Set("name", action.Name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + if err = d.Set("description", action.Description); err != nil { + return fmt.Errorf("Error setting description: %s", err) + } + if err = d.Set("location", action.Location); err != nil { + return fmt.Errorf("Error setting location: %s", err) + } + if err = d.Set("resource_group", action.ResourceGroup); err != nil { + return fmt.Errorf("Error setting resource_group: %s", err) + } + if err = d.Set("tags", action.Tags); err != nil { + return fmt.Errorf("Error setting tags: %s", err) + } + + if action.UserState != nil { + err = d.Set("user_state", dataSourceActionFlattenUserState(*action.UserState)) + if err != nil { + return fmt.Errorf("Error setting user_state %s", err) + } + } + if err = d.Set("source_readme_url", action.SourceReadmeURL); err != nil { + return fmt.Errorf("Error setting source_readme_url: %s", err) + } + + if action.Source != nil { + err = d.Set("source", dataSourceActionFlattenSource(*action.Source)) + if err != nil { + return fmt.Errorf("Error setting source %s", err) + } + } + if err = d.Set("source_type", action.SourceType); err != nil { + return fmt.Errorf("Error setting source_type: %s", err) + } + if err = d.Set("command_parameter", action.CommandParameter); err != nil { + return fmt.Errorf("Error setting command_parameter: %s", err) + } + + if action.Bastion != nil { + err = d.Set("bastion", dataSourceActionFlattenBastion(*action.Bastion)) + if err != nil { + return fmt.Errorf("Error setting bastion %s", err) + } + } + if err = d.Set("targets_ini", action.TargetsIni); err != nil { + return fmt.Errorf("Error setting targets_ini: %s", err) + } + + if action.Credentials != nil { + err = d.Set("credentials", dataSourceActionFlattenCredentials(action.Credentials)) + if err != nil { + return fmt.Errorf("Error setting credentials %s", err) + } + } + + if action.Inputs != nil { + err = d.Set("action_inputs", dataSourceActionFlattenInputs(action.Inputs)) + if err != nil { + return fmt.Errorf("Error setting action_inputs %s", err) + } + } + + if action.Outputs != nil { + err = d.Set("action_outputs", dataSourceActionFlattenOutputs(action.Outputs)) + if err != nil { + return fmt.Errorf("Error setting action_outputs %s", err) + } + } + + if action.Settings != nil { + err = d.Set("settings", dataSourceActionFlattenSettings(action.Settings)) + if err != nil { + return fmt.Errorf("Error setting settings %s", err) + } + } + if err = d.Set("trigger_record_id", action.TriggerRecordID); err != nil { + return fmt.Errorf("Error setting trigger_record_id: %s", err) + } + if err = d.Set("id", action.ID); err != nil { + return fmt.Errorf("Error setting id: %s", err) + } + if err = d.Set("crn", action.Crn); err != nil { + return fmt.Errorf("Error setting crn: %s", err) + } + if err = d.Set("account", action.Account); err != nil { + return fmt.Errorf("Error setting account: %s", err) + } + if action.SourceCreatedAt != nil { + if err = d.Set("source_created_at", action.SourceCreatedAt.String()); err != nil { + return fmt.Errorf("Error setting source_created_at: %s", err) + } + } + if err = d.Set("source_created_by", action.SourceCreatedBy); err != nil { + return fmt.Errorf("Error setting source_created_by: %s", err) + } + if action.SourceUpdatedAt != nil { + if err = d.Set("source_updated_at", action.SourceUpdatedAt.String()); err != nil { + return fmt.Errorf("Error setting source_updated_at: %s", err) + } + } + if err = d.Set("source_updated_by", action.SourceUpdatedBy); err != nil { + return fmt.Errorf("Error setting source_updated_by: %s", err) + } + if action.CreatedAt != nil { + if err = d.Set("created_at", action.CreatedAt.String()); err != nil { + return fmt.Errorf("Error setting created_at: %s", err) + } + } + if err = d.Set("created_by", action.CreatedBy); err != nil { + return fmt.Errorf("Error setting created_by: %s", err) + } + if action.UpdatedAt != nil { + if err = d.Set("updated_at", action.UpdatedAt.String()); err != nil { + return fmt.Errorf("Error setting updated_at: %s", err) + } + } + if err = d.Set("updated_by", action.UpdatedBy); err != nil { + return fmt.Errorf("Error setting updated_by: %s", err) + } + if err = d.Set("namespace", action.Namespace); err != nil { + return fmt.Errorf("Error setting namespace: %s", err) + } + + if action.State != nil { + err = d.Set("state", dataSourceActionFlattenState(*action.State)) + if err != nil { + return fmt.Errorf("Error setting state %s", err) + } + } + + if action.PlaybookNames != nil { + if err = d.Set("playbook_names", action.PlaybookNames); err != nil { + return fmt.Errorf("Error setting playbook_names: %s", err) + } + } else { + d.Set("playbook_names", []string{}) + } + + if action.SysLock != nil { + err = d.Set("sys_lock", dataSourceActionFlattenSysLock(*action.SysLock)) + if err != nil { + return fmt.Errorf("Error setting sys_lock %s", err) + } + } + + return nil +} + +func dataSourceActionFlattenUserState(result schematicsv1.UserState) (finalList []map[string]interface{}) { + finalList = []map[string]interface{}{} + finalMap := dataSourceActionUserStateToMap(result) + finalList = append(finalList, finalMap) + + return finalList +} + +func dataSourceActionUserStateToMap(userStateItem schematicsv1.UserState) (userStateMap map[string]interface{}) { + userStateMap = map[string]interface{}{} + + if userStateItem.State != nil { + userStateMap["state"] = userStateItem.State + } + if userStateItem.SetBy != nil { + userStateMap["set_by"] = userStateItem.SetBy + } + if userStateItem.SetAt != nil { + userStateMap["set_at"] = userStateItem.SetAt.String() + } + + return userStateMap +} + +func dataSourceActionFlattenSource(result schematicsv1.ExternalSource) (finalList []map[string]interface{}) { + finalList = []map[string]interface{}{} + finalMap := dataSourceActionSourceToMap(result) + finalList = append(finalList, finalMap) + + return finalList +} + +func dataSourceActionSourceToMap(sourceItem schematicsv1.ExternalSource) (sourceMap map[string]interface{}) { + sourceMap = map[string]interface{}{} + + if sourceItem.SourceType != nil { + sourceMap["source_type"] = sourceItem.SourceType + } + if sourceItem.Git != nil { + gitList := []map[string]interface{}{} + gitMap := dataSourceActionSourceGitToMap(*sourceItem.Git) + gitList = append(gitList, gitMap) + sourceMap["git"] = gitList + } + + return sourceMap +} + +func dataSourceActionSourceGitToMap(gitItem schematicsv1.ExternalSourceGit) (gitMap map[string]interface{}) { + gitMap = map[string]interface{}{} + + if gitItem.GitRepoURL != nil { + gitMap["git_repo_url"] = gitItem.GitRepoURL + } + if gitItem.GitToken != nil { + gitMap["git_token"] = gitItem.GitToken + } + if gitItem.GitRepoFolder != nil { + gitMap["git_repo_folder"] = gitItem.GitRepoFolder + } + if gitItem.GitRelease != nil { + gitMap["git_release"] = gitItem.GitRelease + } + if gitItem.GitBranch != nil { + gitMap["git_branch"] = gitItem.GitBranch + } + + return gitMap +} + +func dataSourceActionFlattenBastion(result schematicsv1.TargetResourceset) (finalList []map[string]interface{}) { + finalList = []map[string]interface{}{} + finalMap := dataSourceActionBastionToMap(result) + finalList = append(finalList, finalMap) + + return finalList +} + +func dataSourceActionBastionToMap(bastionItem schematicsv1.TargetResourceset) (bastionMap map[string]interface{}) { + bastionMap = map[string]interface{}{} + + if bastionItem.Name != nil { + bastionMap["name"] = bastionItem.Name + } + if bastionItem.Type != nil { + bastionMap["type"] = bastionItem.Type + } + if bastionItem.Description != nil { + bastionMap["description"] = bastionItem.Description + } + if bastionItem.ResourceQuery != nil { + bastionMap["resource_query"] = bastionItem.ResourceQuery + } + if bastionItem.CredentialRef != nil { + bastionMap["credential_ref"] = bastionItem.CredentialRef + } + if bastionItem.ID != nil { + bastionMap["id"] = bastionItem.ID + } + if bastionItem.CreatedAt != nil { + bastionMap["created_at"] = bastionItem.CreatedAt.String() + } + if bastionItem.CreatedBy != nil { + bastionMap["created_by"] = bastionItem.CreatedBy + } + if bastionItem.UpdatedAt != nil { + bastionMap["updated_at"] = bastionItem.UpdatedAt.String() + } + if bastionItem.UpdatedBy != nil { + bastionMap["updated_by"] = bastionItem.UpdatedBy + } + if bastionItem.SysLock != nil { + sysLockList := []map[string]interface{}{} + sysLockMap := dataSourceActionBastionSysLockToMap(*bastionItem.SysLock) + sysLockList = append(sysLockList, sysLockMap) + bastionMap["sys_lock"] = sysLockList + } + if bastionItem.ResourceIds != nil { + bastionMap["resource_ids"] = bastionItem.ResourceIds + } + + return bastionMap +} + +func dataSourceActionBastionSysLockToMap(sysLockItem schematicsv1.SystemLock) (sysLockMap map[string]interface{}) { + sysLockMap = map[string]interface{}{} + + if sysLockItem.SysLocked != nil { + sysLockMap["sys_locked"] = sysLockItem.SysLocked + } + if sysLockItem.SysLockedBy != nil { + sysLockMap["sys_locked_by"] = sysLockItem.SysLockedBy + } + if sysLockItem.SysLockedAt != nil { + sysLockMap["sys_locked_at"] = sysLockItem.SysLockedAt.String() + } + + return sysLockMap +} + +func dataSourceActionFlattenCredentials(result []schematicsv1.VariableData) (credentials []map[string]interface{}) { + for _, credentialsItem := range result { + credentials = append(credentials, dataSourceActionCredentialsToMap(credentialsItem)) + } + + return credentials +} + +func dataSourceActionCredentialsToMap(credentialsItem schematicsv1.VariableData) (credentialsMap map[string]interface{}) { + credentialsMap = map[string]interface{}{} + + if credentialsItem.Name != nil { + credentialsMap["name"] = credentialsItem.Name + } + if credentialsItem.Value != nil { + credentialsMap["value"] = credentialsItem.Value + } + if credentialsItem.Metadata != nil { + metadataList := []map[string]interface{}{} + metadataMap := dataSourceActionCredentialsMetadataToMap(*credentialsItem.Metadata) + metadataList = append(metadataList, metadataMap) + credentialsMap["metadata"] = metadataList + } + if credentialsItem.Link != nil { + credentialsMap["link"] = credentialsItem.Link + } + + return credentialsMap +} + +func dataSourceActionCredentialsMetadataToMap(metadataItem schematicsv1.VariableMetadata) (metadataMap map[string]interface{}) { + metadataMap = map[string]interface{}{} + + if metadataItem.Type != nil { + metadataMap["type"] = metadataItem.Type + } + if metadataItem.Aliases != nil { + metadataMap["aliases"] = metadataItem.Aliases + } + if metadataItem.Description != nil { + metadataMap["description"] = metadataItem.Description + } + if metadataItem.DefaultValue != nil { + metadataMap["default_value"] = metadataItem.DefaultValue + } + if metadataItem.Secure != nil { + metadataMap["secure"] = metadataItem.Secure + } + if metadataItem.Immutable != nil { + metadataMap["immutable"] = metadataItem.Immutable + } + if metadataItem.Hidden != nil { + metadataMap["hidden"] = metadataItem.Hidden + } + if metadataItem.Options != nil { + metadataMap["options"] = metadataItem.Options + } + if metadataItem.MinValue != nil { + metadataMap["min_value"] = metadataItem.MinValue + } + if metadataItem.MaxValue != nil { + metadataMap["max_value"] = metadataItem.MaxValue + } + if metadataItem.MinLength != nil { + metadataMap["min_length"] = metadataItem.MinLength + } + if metadataItem.MaxLength != nil { + metadataMap["max_length"] = metadataItem.MaxLength + } + if metadataItem.Matches != nil { + metadataMap["matches"] = metadataItem.Matches + } + if metadataItem.Position != nil { + metadataMap["position"] = metadataItem.Position + } + if metadataItem.GroupBy != nil { + metadataMap["group_by"] = metadataItem.GroupBy + } + if metadataItem.Source != nil { + metadataMap["source"] = metadataItem.Source + } + + return metadataMap +} + +func dataSourceActionFlattenInputs(result []schematicsv1.VariableData) (inputs []map[string]interface{}) { + for _, inputsItem := range result { + inputs = append(inputs, dataSourceActionInputsToMap(inputsItem)) + } + + return inputs +} + +func dataSourceActionInputsToMap(inputsItem schematicsv1.VariableData) (inputsMap map[string]interface{}) { + inputsMap = map[string]interface{}{} + + if inputsItem.Name != nil { + inputsMap["name"] = inputsItem.Name + } + if inputsItem.Value != nil { + inputsMap["value"] = inputsItem.Value + } + if inputsItem.Metadata != nil { + metadataList := []map[string]interface{}{} + metadataMap := dataSourceActionInputsMetadataToMap(*inputsItem.Metadata) + metadataList = append(metadataList, metadataMap) + inputsMap["metadata"] = metadataList + } + if inputsItem.Link != nil { + inputsMap["link"] = inputsItem.Link + } + + return inputsMap +} + +func dataSourceActionInputsMetadataToMap(metadataItem schematicsv1.VariableMetadata) (metadataMap map[string]interface{}) { + metadataMap = map[string]interface{}{} + + if metadataItem.Type != nil { + metadataMap["type"] = metadataItem.Type + } + if metadataItem.Aliases != nil { + metadataMap["aliases"] = metadataItem.Aliases + } + if metadataItem.Description != nil { + metadataMap["description"] = metadataItem.Description + } + if metadataItem.DefaultValue != nil { + metadataMap["default_value"] = metadataItem.DefaultValue + } + if metadataItem.Secure != nil { + metadataMap["secure"] = metadataItem.Secure + } + if metadataItem.Immutable != nil { + metadataMap["immutable"] = metadataItem.Immutable + } + if metadataItem.Hidden != nil { + metadataMap["hidden"] = metadataItem.Hidden + } + if metadataItem.Options != nil { + metadataMap["options"] = metadataItem.Options + } + if metadataItem.MinValue != nil { + metadataMap["min_value"] = metadataItem.MinValue + } + if metadataItem.MaxValue != nil { + metadataMap["max_value"] = metadataItem.MaxValue + } + if metadataItem.MinLength != nil { + metadataMap["min_length"] = metadataItem.MinLength + } + if metadataItem.MaxLength != nil { + metadataMap["max_length"] = metadataItem.MaxLength + } + if metadataItem.Matches != nil { + metadataMap["matches"] = metadataItem.Matches + } + if metadataItem.Position != nil { + metadataMap["position"] = metadataItem.Position + } + if metadataItem.GroupBy != nil { + metadataMap["group_by"] = metadataItem.GroupBy + } + if metadataItem.Source != nil { + metadataMap["source"] = metadataItem.Source + } + + return metadataMap +} + +func dataSourceActionFlattenOutputs(result []schematicsv1.VariableData) (outputs []map[string]interface{}) { + for _, outputsItem := range result { + outputs = append(outputs, dataSourceActionOutputsToMap(outputsItem)) + } + + return outputs +} + +func dataSourceActionOutputsToMap(outputsItem schematicsv1.VariableData) (outputsMap map[string]interface{}) { + outputsMap = map[string]interface{}{} + + if outputsItem.Name != nil { + outputsMap["name"] = outputsItem.Name + } + if outputsItem.Value != nil { + outputsMap["value"] = outputsItem.Value + } + if outputsItem.Metadata != nil { + metadataList := []map[string]interface{}{} + metadataMap := dataSourceActionOutputsMetadataToMap(*outputsItem.Metadata) + metadataList = append(metadataList, metadataMap) + outputsMap["metadata"] = metadataList + } + if outputsItem.Link != nil { + outputsMap["link"] = outputsItem.Link + } + + return outputsMap +} + +func dataSourceActionOutputsMetadataToMap(metadataItem schematicsv1.VariableMetadata) (metadataMap map[string]interface{}) { + metadataMap = map[string]interface{}{} + + if metadataItem.Type != nil { + metadataMap["type"] = metadataItem.Type + } + if metadataItem.Aliases != nil { + metadataMap["aliases"] = metadataItem.Aliases + } + if metadataItem.Description != nil { + metadataMap["description"] = metadataItem.Description + } + if metadataItem.DefaultValue != nil { + metadataMap["default_value"] = metadataItem.DefaultValue + } + if metadataItem.Secure != nil { + metadataMap["secure"] = metadataItem.Secure + } + if metadataItem.Immutable != nil { + metadataMap["immutable"] = metadataItem.Immutable + } + if metadataItem.Hidden != nil { + metadataMap["hidden"] = metadataItem.Hidden + } + if metadataItem.Options != nil { + metadataMap["options"] = metadataItem.Options + } + if metadataItem.MinValue != nil { + metadataMap["min_value"] = metadataItem.MinValue + } + if metadataItem.MaxValue != nil { + metadataMap["max_value"] = metadataItem.MaxValue + } + if metadataItem.MinLength != nil { + metadataMap["min_length"] = metadataItem.MinLength + } + if metadataItem.MaxLength != nil { + metadataMap["max_length"] = metadataItem.MaxLength + } + if metadataItem.Matches != nil { + metadataMap["matches"] = metadataItem.Matches + } + if metadataItem.Position != nil { + metadataMap["position"] = metadataItem.Position + } + if metadataItem.GroupBy != nil { + metadataMap["group_by"] = metadataItem.GroupBy + } + if metadataItem.Source != nil { + metadataMap["source"] = metadataItem.Source + } + + return metadataMap +} + +func dataSourceActionFlattenSettings(result []schematicsv1.VariableData) (settings []map[string]interface{}) { + for _, settingsItem := range result { + settings = append(settings, dataSourceActionSettingsToMap(settingsItem)) + } + + return settings +} + +func dataSourceActionSettingsToMap(settingsItem schematicsv1.VariableData) (settingsMap map[string]interface{}) { + settingsMap = map[string]interface{}{} + + if settingsItem.Name != nil { + settingsMap["name"] = settingsItem.Name + } + if settingsItem.Value != nil { + settingsMap["value"] = settingsItem.Value + } + if settingsItem.Metadata != nil { + metadataList := []map[string]interface{}{} + metadataMap := dataSourceActionSettingsMetadataToMap(*settingsItem.Metadata) + metadataList = append(metadataList, metadataMap) + settingsMap["metadata"] = metadataList + } + if settingsItem.Link != nil { + settingsMap["link"] = settingsItem.Link + } + + return settingsMap +} + +func dataSourceActionSettingsMetadataToMap(metadataItem schematicsv1.VariableMetadata) (metadataMap map[string]interface{}) { + metadataMap = map[string]interface{}{} + + if metadataItem.Type != nil { + metadataMap["type"] = metadataItem.Type + } + if metadataItem.Aliases != nil { + metadataMap["aliases"] = metadataItem.Aliases + } + if metadataItem.Description != nil { + metadataMap["description"] = metadataItem.Description + } + if metadataItem.DefaultValue != nil { + metadataMap["default_value"] = metadataItem.DefaultValue + } + if metadataItem.Secure != nil { + metadataMap["secure"] = metadataItem.Secure + } + if metadataItem.Immutable != nil { + metadataMap["immutable"] = metadataItem.Immutable + } + if metadataItem.Hidden != nil { + metadataMap["hidden"] = metadataItem.Hidden + } + if metadataItem.Options != nil { + metadataMap["options"] = metadataItem.Options + } + if metadataItem.MinValue != nil { + metadataMap["min_value"] = metadataItem.MinValue + } + if metadataItem.MaxValue != nil { + metadataMap["max_value"] = metadataItem.MaxValue + } + if metadataItem.MinLength != nil { + metadataMap["min_length"] = metadataItem.MinLength + } + if metadataItem.MaxLength != nil { + metadataMap["max_length"] = metadataItem.MaxLength + } + if metadataItem.Matches != nil { + metadataMap["matches"] = metadataItem.Matches + } + if metadataItem.Position != nil { + metadataMap["position"] = metadataItem.Position + } + if metadataItem.GroupBy != nil { + metadataMap["group_by"] = metadataItem.GroupBy + } + if metadataItem.Source != nil { + metadataMap["source"] = metadataItem.Source + } + + return metadataMap +} + +func dataSourceActionFlattenState(result schematicsv1.ActionState) (finalList []map[string]interface{}) { + finalList = []map[string]interface{}{} + finalMap := dataSourceActionStateToMap(result) + finalList = append(finalList, finalMap) + + return finalList +} + +func dataSourceActionStateToMap(stateItem schematicsv1.ActionState) (stateMap map[string]interface{}) { + stateMap = map[string]interface{}{} + + if stateItem.StatusCode != nil { + stateMap["status_code"] = stateItem.StatusCode + } + if stateItem.StatusJobID != nil { + stateMap["status_job_id"] = stateItem.StatusJobID + } + if stateItem.StatusMessage != nil { + stateMap["status_message"] = stateItem.StatusMessage + } + + return stateMap +} + +func dataSourceActionFlattenSysLock(result schematicsv1.SystemLock) (finalList []map[string]interface{}) { + finalList = []map[string]interface{}{} + finalMap := dataSourceActionSysLockToMap(result) + finalList = append(finalList, finalMap) + + return finalList +} + +func dataSourceActionSysLockToMap(sysLockItem schematicsv1.SystemLock) (sysLockMap map[string]interface{}) { + sysLockMap = map[string]interface{}{} + + if sysLockItem.SysLocked != nil { + sysLockMap["sys_locked"] = sysLockItem.SysLocked + } + if sysLockItem.SysLockedBy != nil { + sysLockMap["sys_locked_by"] = sysLockItem.SysLockedBy + } + if sysLockItem.SysLockedAt != nil { + sysLockMap["sys_locked_at"] = sysLockItem.SysLockedAt.String() + } + + return sysLockMap +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_schematics_job.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_schematics_job.go new file mode 100644 index 00000000000..0320c52d770 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_schematics_job.go @@ -0,0 +1,1805 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "context" + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/IBM/schematics-go-sdk/schematicsv1" +) + +func dataSourceIBMSchematicsJob() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMSchematicsJobRead, + + Schema: map[string]*schema.Schema{ + "job_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "Use GET jobs API to look up the Job IDs in your IBM Cloud account.", + }, + "command_object": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Name of the Schematics automation resource.", + }, + "command_object_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Job command object ID (`workspace-id, action-id or control-id`).", + }, + "command_name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Schematics job command name.", + }, + "command_options": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Command line options for the command.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "job_inputs": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Job inputs used by an action.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Name of the variable.", + }, + "value": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Value for the variable or reference to the value.", + }, + "metadata": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "User editable metadata for the variables.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Type of the variable.", + }, + "aliases": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "List of aliases for the variable name.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Description of the meta data.", + }, + "default_value": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Default value for the variable, if the override value is not specified.", + }, + "secure": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "Is the variable secure or sensitive ?.", + }, + "immutable": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "Is the variable readonly ?.", + }, + "hidden": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "If true, the variable will not be displayed on UI or CLI.", + }, + "options": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "List of possible values for this variable. If type is integer or date, then the array of string will be converted to array of integers or date during runtime.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "min_value": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "Minimum value of the variable. Applicable for integer type.", + }, + "max_value": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "Maximum value of the variable. Applicable for integer type.", + }, + "min_length": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "Minimum length of the variable value. Applicable for string type.", + }, + "max_length": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "Maximum length of the variable value. Applicable for string type.", + }, + "matches": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Regex for the variable value.", + }, + "position": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "Relative position of this variable in a list.", + }, + "group_by": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Display name of the group this variable belongs to.", + }, + "source": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Source of this meta-data.", + }, + }, + }, + }, + "link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Reference link to the variable value By default the expression will point to self.value.", + }, + }, + }, + }, + "job_env_settings": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Environment variables used by the job while performing an action.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Name of the variable.", + }, + "value": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Value for the variable or reference to the value.", + }, + "metadata": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "User editable metadata for the variables.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Type of the variable.", + }, + "aliases": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "List of aliases for the variable name.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Description of the meta data.", + }, + "default_value": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Default value for the variable, if the override value is not specified.", + }, + "secure": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "Is the variable secure or sensitive ?.", + }, + "immutable": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "Is the variable readonly ?.", + }, + "hidden": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "If true, the variable will not be displayed on UI or CLI.", + }, + "options": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "List of possible values for this variable. If type is integer or date, then the array of string will be converted to array of integers or date during runtime.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "min_value": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "Minimum value of the variable. Applicable for integer type.", + }, + "max_value": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "Maximum value of the variable. Applicable for integer type.", + }, + "min_length": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "Minimum length of the variable value. Applicable for string type.", + }, + "max_length": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "Maximum length of the variable value. Applicable for string type.", + }, + "matches": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Regex for the variable value.", + }, + "position": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "Relative position of this variable in a list.", + }, + "group_by": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Display name of the group this variable belongs to.", + }, + "source": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Source of this meta-data.", + }, + }, + }, + }, + "link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Reference link to the variable value By default the expression will point to self.value.", + }, + }, + }, + }, + "tags": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "User defined tags, while running the job.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Job ID.", + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Job name, uniquely derived from the related action.", + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Job description derived from the related action.", + }, + "location": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "List of action locations supported by IBM Cloud Schematics service. **Note** this does not limit the location of the resources provisioned using Schematics.", + }, + "resource_group": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Resource group name derived from the related action.", + }, + "submitted_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Job submission time.", + }, + "submitted_by": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "E-mail address of the user who submitted the job.", + }, + "start_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Job start time.", + }, + "end_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Job end time.", + }, + "duration": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Duration of job execution, for example, `40 sec`.", + }, + "status": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Job Status.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "action_job_status": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Action Job Status.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "action_name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Action name.", + }, + "status_code": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Status of the jobs.", + }, + "status_message": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Action job status message to be displayed along with the `action_status_code`.", + }, + "bastion_status_code": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Status of the resources.", + }, + "bastion_status_message": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Bastion status message to be displayed along with the `bastion_status_code`.", + }, + "targets_status_code": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Status of the resources.", + }, + "targets_status_message": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Aggregated status message for all target resources, to be displayed along with the `targets_status_code`.", + }, + "updated_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Job status updation timestamp.", + }, + }, + }, + }, + }, + }, + }, + "data": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Job data.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "job_type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Type of the job.", + }, + "action_job_data": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Action Job data.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "action_name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Flow name.", + }, + "inputs": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Input variables data used by an action job.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Name of the variable.", + }, + "value": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Value for the variable or reference to the value.", + }, + "metadata": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "User editable metadata for the variables.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Type of the variable.", + }, + "aliases": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "List of aliases for the variable name.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Description of the meta data.", + }, + "default_value": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Default value for the variable, if the override value is not specified.", + }, + "secure": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "Is the variable secure or sensitive ?.", + }, + "immutable": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "Is the variable readonly ?.", + }, + "hidden": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "If true, the variable will not be displayed on UI or CLI.", + }, + "options": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "List of possible values for this variable. If type is integer or date, then the array of string will be converted to array of integers or date during runtime.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "min_value": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "Minimum value of the variable. Applicable for integer type.", + }, + "max_value": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "Maximum value of the variable. Applicable for integer type.", + }, + "min_length": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "Minimum length of the variable value. Applicable for string type.", + }, + "max_length": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "Maximum length of the variable value. Applicable for string type.", + }, + "matches": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Regex for the variable value.", + }, + "position": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "Relative position of this variable in a list.", + }, + "group_by": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Display name of the group this variable belongs to.", + }, + "source": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Source of this meta-data.", + }, + }, + }, + }, + "link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Reference link to the variable value By default the expression will point to self.value.", + }, + }, + }, + }, + "outputs": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Output variables data from an action job.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Name of the variable.", + }, + "value": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Value for the variable or reference to the value.", + }, + "metadata": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "User editable metadata for the variables.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Type of the variable.", + }, + "aliases": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "List of aliases for the variable name.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Description of the meta data.", + }, + "default_value": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Default value for the variable, if the override value is not specified.", + }, + "secure": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "Is the variable secure or sensitive ?.", + }, + "immutable": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "Is the variable readonly ?.", + }, + "hidden": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "If true, the variable will not be displayed on UI or CLI.", + }, + "options": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "List of possible values for this variable. If type is integer or date, then the array of string will be converted to array of integers or date during runtime.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "min_value": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "Minimum value of the variable. Applicable for integer type.", + }, + "max_value": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "Maximum value of the variable. Applicable for integer type.", + }, + "min_length": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "Minimum length of the variable value. Applicable for string type.", + }, + "max_length": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "Maximum length of the variable value. Applicable for string type.", + }, + "matches": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Regex for the variable value.", + }, + "position": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "Relative position of this variable in a list.", + }, + "group_by": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Display name of the group this variable belongs to.", + }, + "source": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Source of this meta-data.", + }, + }, + }, + }, + "link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Reference link to the variable value By default the expression will point to self.value.", + }, + }, + }, + }, + "settings": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Environment variables used by all the templates in an action.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Name of the variable.", + }, + "value": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Value for the variable or reference to the value.", + }, + "metadata": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "User editable metadata for the variables.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Type of the variable.", + }, + "aliases": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "List of aliases for the variable name.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Description of the meta data.", + }, + "default_value": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Default value for the variable, if the override value is not specified.", + }, + "secure": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "Is the variable secure or sensitive ?.", + }, + "immutable": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "Is the variable readonly ?.", + }, + "hidden": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "If true, the variable will not be displayed on UI or CLI.", + }, + "options": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "List of possible values for this variable. If type is integer or date, then the array of string will be converted to array of integers or date during runtime.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "min_value": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "Minimum value of the variable. Applicable for integer type.", + }, + "max_value": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "Maximum value of the variable. Applicable for integer type.", + }, + "min_length": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "Minimum length of the variable value. Applicable for string type.", + }, + "max_length": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "Maximum length of the variable value. Applicable for string type.", + }, + "matches": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Regex for the variable value.", + }, + "position": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "Relative position of this variable in a list.", + }, + "group_by": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Display name of the group this variable belongs to.", + }, + "source": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Source of this meta-data.", + }, + }, + }, + }, + "link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Reference link to the variable value By default the expression will point to self.value.", + }, + }, + }, + }, + "updated_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Job status updation timestamp.", + }, + }, + }, + }, + }, + }, + }, + "targets_ini": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Inventory of host and host group for the playbook in `INI` file format. For example, `\"targets_ini\": \"[webserverhost] 172.22.192.6 [dbhost] 172.22.192.5\"`. For more information, about an inventory host group syntax, see [Inventory host groups](/docs/schematics?topic=schematics-schematics-cli-reference#schematics-inventory-host-grps).", + }, + "bastion": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Complete target details with the user inputs and the system generated data.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Target name.", + }, + "type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Target type (`cluster`, `vsi`, `icd`, `vpc`).", + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Target description.", + }, + "resource_query": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Resource selection query string.", + }, + "credential_ref": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Override credential for each resource. Reference to credentials values, used by all the resources.", + }, + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Target ID.", + }, + "created_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Targets creation time.", + }, + "created_by": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "E-mail address of the user who created the targets.", + }, + "updated_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Targets updation time.", + }, + "updated_by": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "E-mail address of user who updated the targets.", + }, + "sys_lock": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "System lock status.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "sys_locked": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "Is the Workspace locked by the Schematic action ?.", + }, + "sys_locked_by": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Name of the user who performed the action, that lead to lock the Workspace.", + }, + "sys_locked_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "When the user performed the action that lead to lock the Workspace ?.", + }, + }, + }, + }, + "resource_ids": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Array of the resource IDs.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "job_log_summary": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Job log summary record.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "job_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Workspace ID.", + }, + "job_type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Type of Job.", + }, + "log_start_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Job log start timestamp.", + }, + "log_analyzed_till": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Job log update timestamp.", + }, + "elapsed_time": &schema.Schema{ + Type: schema.TypeFloat, + Computed: true, + Description: "Job log elapsed time (`log_analyzed_till - log_start_at`).", + }, + "log_errors": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Job log errors.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "error_code": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Error code in the Log.", + }, + "error_msg": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Summary error message in the log.", + }, + "error_count": &schema.Schema{ + Type: schema.TypeFloat, + Computed: true, + Description: "Number of occurrence.", + }, + }, + }, + }, + "repo_download_job": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Repo download Job log summary.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "scanned_file_count": &schema.Schema{ + Type: schema.TypeFloat, + Computed: true, + Description: "Number of files scanned.", + }, + "quarantined_file_count": &schema.Schema{ + Type: schema.TypeFloat, + Computed: true, + Description: "Number of files quarantined.", + }, + "detected_filetype": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Detected template or data file type.", + }, + "inputs_count": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Number of inputs detected.", + }, + "outputs_count": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Number of outputs detected.", + }, + }, + }, + }, + "action_job": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Flow Job log summary.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "target_count": &schema.Schema{ + Type: schema.TypeFloat, + Computed: true, + Description: "number of targets or hosts.", + }, + "task_count": &schema.Schema{ + Type: schema.TypeFloat, + Computed: true, + Description: "number of tasks in playbook.", + }, + "play_count": &schema.Schema{ + Type: schema.TypeFloat, + Computed: true, + Description: "number of plays in playbook.", + }, + "recap": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Recap records.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "target": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "List of target or host name.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "ok": &schema.Schema{ + Type: schema.TypeFloat, + Computed: true, + Description: "Number of OK.", + }, + "changed": &schema.Schema{ + Type: schema.TypeFloat, + Computed: true, + Description: "Number of changed.", + }, + "failed": &schema.Schema{ + Type: schema.TypeFloat, + Computed: true, + Description: "Number of failed.", + }, + "skipped": &schema.Schema{ + Type: schema.TypeFloat, + Computed: true, + Description: "Number of skipped.", + }, + "unreachable": &schema.Schema{ + Type: schema.TypeFloat, + Computed: true, + Description: "Number of unreachable.", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "log_store_url": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Job log store URL.", + }, + "state_store_url": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Job state store URL.", + }, + "results_url": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Job results store URL.", + }, + "updated_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Job status updation timestamp.", + }, + }, + } +} + +func dataSourceIBMSchematicsJobRead(d *schema.ResourceData, meta interface{}) error { + schematicsClient, err := meta.(ClientSession).SchematicsV1() + if err != nil { + return err + } + + getJobOptions := &schematicsv1.GetJobOptions{} + + getJobOptions.SetJobID(d.Get("job_id").(string)) + + job, response, err := schematicsClient.GetJobWithContext(context.TODO(), getJobOptions) + if err != nil { + log.Printf("[DEBUG] GetJobWithContext failed %s\n%s", err, response) + return err + } + + d.SetId(*job.ID) + if err = d.Set("command_object", job.CommandObject); err != nil { + return fmt.Errorf("Error setting command_object: %s", err) + } + if err = d.Set("command_object_id", job.CommandObjectID); err != nil { + return fmt.Errorf("Error setting command_object_id: %s", err) + } + if err = d.Set("command_name", job.CommandName); err != nil { + return fmt.Errorf("Error setting command_name: %s", err) + } + if err = d.Set("command_options", job.CommandOptions); err != nil { + return fmt.Errorf("Error setting command_options: %s", err) + } + + if job.Inputs != nil { + err = d.Set("job_inputs", dataSourceJobFlattenInputs(job.Inputs)) + if err != nil { + return fmt.Errorf("Error setting job_inputs %s", err) + } + } + + if job.Settings != nil { + err = d.Set("job_env_settings", dataSourceJobFlattenSettings(job.Settings)) + if err != nil { + return fmt.Errorf("Error setting job_env_settings %s", err) + } + } + if err = d.Set("tags", job.Tags); err != nil { + return fmt.Errorf("Error setting tags: %s", err) + } + if err = d.Set("id", job.ID); err != nil { + return fmt.Errorf("Error setting id: %s", err) + } + if err = d.Set("name", job.Name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + if err = d.Set("description", job.Description); err != nil { + return fmt.Errorf("Error setting description: %s", err) + } + if err = d.Set("location", job.Location); err != nil { + return fmt.Errorf("Error setting location: %s", err) + } + if err = d.Set("resource_group", job.ResourceGroup); err != nil { + return fmt.Errorf("Error setting resource_group: %s", err) + } + if job.SubmittedAt != nil { + if err = d.Set("submitted_at", job.SubmittedAt.String()); err != nil { + return fmt.Errorf("Error setting submitted_at: %s", err) + } + } + if err = d.Set("submitted_by", job.SubmittedBy); err != nil { + return fmt.Errorf("Error setting submitted_by: %s", err) + } + if job.StartAt != nil { + if err = d.Set("start_at", job.StartAt.String()); err != nil { + return fmt.Errorf("Error setting start_at: %s", err) + } + } + if job.EndAt != nil { + if err = d.Set("end_at", job.EndAt.String()); err != nil { + return fmt.Errorf("Error setting end_at: %s", err) + } + } + if err = d.Set("duration", job.Duration); err != nil { + return fmt.Errorf("Error setting duration: %s", err) + } + + if job.Status != nil { + err = d.Set("status", dataSourceJobFlattenStatus(*job.Status)) + if err != nil { + return fmt.Errorf("Error setting status %s", err) + } + } + + if job.Data != nil { + err = d.Set("data", dataSourceJobFlattenData(*job.Data)) + if err != nil { + return fmt.Errorf("Error setting data %s", err) + } + } + if err = d.Set("targets_ini", job.TargetsIni); err != nil { + return fmt.Errorf("Error setting targets_ini: %s", err) + } + + if job.Bastion != nil { + err = d.Set("bastion", dataSourceJobFlattenBastion(*job.Bastion)) + if err != nil { + return fmt.Errorf("Error setting bastion %s", err) + } + } + + if job.LogSummary != nil { + err = d.Set("job_log_summary", dataSourceJobFlattenLogSummary(*job.LogSummary)) + if err != nil { + return fmt.Errorf("Error setting job_log_summary %s", err) + } + } + if err = d.Set("log_store_url", job.LogStoreURL); err != nil { + return fmt.Errorf("Error setting log_store_url: %s", err) + } + if err = d.Set("state_store_url", job.StateStoreURL); err != nil { + return fmt.Errorf("Error setting state_store_url: %s", err) + } + if err = d.Set("results_url", job.ResultsURL); err != nil { + return fmt.Errorf("Error setting results_url: %s", err) + } + if job.UpdatedAt != nil { + if err = d.Set("updated_at", job.UpdatedAt.String()); err != nil { + return fmt.Errorf("Error setting updated_at: %s", err) + } + } + + return nil +} + +func dataSourceJobFlattenInputs(result []schematicsv1.VariableData) (inputs []map[string]interface{}) { + for _, inputsItem := range result { + inputs = append(inputs, dataSourceJobInputsToMap(inputsItem)) + } + + return inputs +} + +func dataSourceJobInputsToMap(inputsItem schematicsv1.VariableData) (inputsMap map[string]interface{}) { + inputsMap = map[string]interface{}{} + + if inputsItem.Name != nil { + inputsMap["name"] = inputsItem.Name + } + if inputsItem.Value != nil { + inputsMap["value"] = inputsItem.Value + } + if inputsItem.Metadata != nil { + metadataList := []map[string]interface{}{} + metadataMap := dataSourceJobInputsMetadataToMap(*inputsItem.Metadata) + metadataList = append(metadataList, metadataMap) + inputsMap["metadata"] = metadataList + } + if inputsItem.Link != nil { + inputsMap["link"] = inputsItem.Link + } + + return inputsMap +} + +func dataSourceJobInputsMetadataToMap(metadataItem schematicsv1.VariableMetadata) (metadataMap map[string]interface{}) { + metadataMap = map[string]interface{}{} + + if metadataItem.Type != nil { + metadataMap["type"] = metadataItem.Type + } + if metadataItem.Aliases != nil { + metadataMap["aliases"] = metadataItem.Aliases + } + if metadataItem.Description != nil { + metadataMap["description"] = metadataItem.Description + } + if metadataItem.DefaultValue != nil { + metadataMap["default_value"] = metadataItem.DefaultValue + } + if metadataItem.Secure != nil { + metadataMap["secure"] = metadataItem.Secure + } + if metadataItem.Immutable != nil { + metadataMap["immutable"] = metadataItem.Immutable + } + if metadataItem.Hidden != nil { + metadataMap["hidden"] = metadataItem.Hidden + } + if metadataItem.Options != nil { + metadataMap["options"] = metadataItem.Options + } + if metadataItem.MinValue != nil { + metadataMap["min_value"] = metadataItem.MinValue + } + if metadataItem.MaxValue != nil { + metadataMap["max_value"] = metadataItem.MaxValue + } + if metadataItem.MinLength != nil { + metadataMap["min_length"] = metadataItem.MinLength + } + if metadataItem.MaxLength != nil { + metadataMap["max_length"] = metadataItem.MaxLength + } + if metadataItem.Matches != nil { + metadataMap["matches"] = metadataItem.Matches + } + if metadataItem.Position != nil { + metadataMap["position"] = metadataItem.Position + } + if metadataItem.GroupBy != nil { + metadataMap["group_by"] = metadataItem.GroupBy + } + if metadataItem.Source != nil { + metadataMap["source"] = metadataItem.Source + } + + return metadataMap +} + +func dataSourceJobOutputsMetadataToMap(metadataItem schematicsv1.VariableMetadata) (metadataMap map[string]interface{}) { + metadataMap = map[string]interface{}{} + + if metadataItem.Type != nil { + metadataMap["type"] = metadataItem.Type + } + if metadataItem.Aliases != nil { + metadataMap["aliases"] = metadataItem.Aliases + } + if metadataItem.Description != nil { + metadataMap["description"] = metadataItem.Description + } + if metadataItem.DefaultValue != nil { + metadataMap["default_value"] = metadataItem.DefaultValue + } + if metadataItem.Secure != nil { + metadataMap["secure"] = metadataItem.Secure + } + if metadataItem.Immutable != nil { + metadataMap["immutable"] = metadataItem.Immutable + } + if metadataItem.Hidden != nil { + metadataMap["hidden"] = metadataItem.Hidden + } + if metadataItem.Options != nil { + metadataMap["options"] = metadataItem.Options + } + if metadataItem.MinValue != nil { + metadataMap["min_value"] = metadataItem.MinValue + } + if metadataItem.MaxValue != nil { + metadataMap["max_value"] = metadataItem.MaxValue + } + if metadataItem.MinLength != nil { + metadataMap["min_length"] = metadataItem.MinLength + } + if metadataItem.MaxLength != nil { + metadataMap["max_length"] = metadataItem.MaxLength + } + if metadataItem.Matches != nil { + metadataMap["matches"] = metadataItem.Matches + } + if metadataItem.Position != nil { + metadataMap["position"] = metadataItem.Position + } + if metadataItem.GroupBy != nil { + metadataMap["group_by"] = metadataItem.GroupBy + } + if metadataItem.Source != nil { + metadataMap["source"] = metadataItem.Source + } + + return metadataMap +} + +func dataSourceJobFlattenSettings(result []schematicsv1.VariableData) (settings []map[string]interface{}) { + for _, settingsItem := range result { + settings = append(settings, dataSourceJobSettingsToMap(settingsItem)) + } + + return settings +} + +func dataSourceJobSettingsToMap(settingsItem schematicsv1.VariableData) (settingsMap map[string]interface{}) { + settingsMap = map[string]interface{}{} + + if settingsItem.Name != nil { + settingsMap["name"] = settingsItem.Name + } + if settingsItem.Value != nil { + settingsMap["value"] = settingsItem.Value + } + if settingsItem.Metadata != nil { + metadataList := []map[string]interface{}{} + metadataMap := dataSourceJobSettingsMetadataToMap(*settingsItem.Metadata) + metadataList = append(metadataList, metadataMap) + settingsMap["metadata"] = metadataList + } + if settingsItem.Link != nil { + settingsMap["link"] = settingsItem.Link + } + + return settingsMap +} + +func dataSourceJobSettingsMetadataToMap(metadataItem schematicsv1.VariableMetadata) (metadataMap map[string]interface{}) { + metadataMap = map[string]interface{}{} + + if metadataItem.Type != nil { + metadataMap["type"] = metadataItem.Type + } + if metadataItem.Aliases != nil { + metadataMap["aliases"] = metadataItem.Aliases + } + if metadataItem.Description != nil { + metadataMap["description"] = metadataItem.Description + } + if metadataItem.DefaultValue != nil { + metadataMap["default_value"] = metadataItem.DefaultValue + } + if metadataItem.Secure != nil { + metadataMap["secure"] = metadataItem.Secure + } + if metadataItem.Immutable != nil { + metadataMap["immutable"] = metadataItem.Immutable + } + if metadataItem.Hidden != nil { + metadataMap["hidden"] = metadataItem.Hidden + } + if metadataItem.Options != nil { + metadataMap["options"] = metadataItem.Options + } + if metadataItem.MinValue != nil { + metadataMap["min_value"] = metadataItem.MinValue + } + if metadataItem.MaxValue != nil { + metadataMap["max_value"] = metadataItem.MaxValue + } + if metadataItem.MinLength != nil { + metadataMap["min_length"] = metadataItem.MinLength + } + if metadataItem.MaxLength != nil { + metadataMap["max_length"] = metadataItem.MaxLength + } + if metadataItem.Matches != nil { + metadataMap["matches"] = metadataItem.Matches + } + if metadataItem.Position != nil { + metadataMap["position"] = metadataItem.Position + } + if metadataItem.GroupBy != nil { + metadataMap["group_by"] = metadataItem.GroupBy + } + if metadataItem.Source != nil { + metadataMap["source"] = metadataItem.Source + } + + return metadataMap +} + +func dataSourceJobFlattenStatus(result schematicsv1.JobStatus) (finalList []map[string]interface{}) { + finalList = []map[string]interface{}{} + finalMap := dataSourceJobStatusToMap(result) + finalList = append(finalList, finalMap) + + return finalList +} + +func dataSourceJobStatusToMap(statusItem schematicsv1.JobStatus) (statusMap map[string]interface{}) { + statusMap = map[string]interface{}{} + + if statusItem.ActionJobStatus != nil { + actionJobStatusList := []map[string]interface{}{} + actionJobStatusMap := dataSourceJobStatusActionJobStatusToMap(*statusItem.ActionJobStatus) + actionJobStatusList = append(actionJobStatusList, actionJobStatusMap) + statusMap["action_job_status"] = actionJobStatusList + } + + return statusMap +} + +func dataSourceJobStatusActionJobStatusToMap(actionJobStatusItem schematicsv1.JobStatusAction) (actionJobStatusMap map[string]interface{}) { + actionJobStatusMap = map[string]interface{}{} + + if actionJobStatusItem.ActionName != nil { + actionJobStatusMap["action_name"] = actionJobStatusItem.ActionName + } + if actionJobStatusItem.StatusCode != nil { + actionJobStatusMap["status_code"] = actionJobStatusItem.StatusCode + } + if actionJobStatusItem.StatusMessage != nil { + actionJobStatusMap["status_message"] = actionJobStatusItem.StatusMessage + } + if actionJobStatusItem.BastionStatusCode != nil { + actionJobStatusMap["bastion_status_code"] = actionJobStatusItem.BastionStatusCode + } + if actionJobStatusItem.BastionStatusMessage != nil { + actionJobStatusMap["bastion_status_message"] = actionJobStatusItem.BastionStatusMessage + } + if actionJobStatusItem.TargetsStatusCode != nil { + actionJobStatusMap["targets_status_code"] = actionJobStatusItem.TargetsStatusCode + } + if actionJobStatusItem.TargetsStatusMessage != nil { + actionJobStatusMap["targets_status_message"] = actionJobStatusItem.TargetsStatusMessage + } + if actionJobStatusItem.UpdatedAt != nil { + actionJobStatusMap["updated_at"] = actionJobStatusItem.UpdatedAt.String() + } + + return actionJobStatusMap +} + +func dataSourceJobFlattenData(result schematicsv1.JobData) (finalList []map[string]interface{}) { + finalList = []map[string]interface{}{} + finalMap := dataSourceJobDataToMap(result) + finalList = append(finalList, finalMap) + + return finalList +} + +func dataSourceJobDataToMap(dataItem schematicsv1.JobData) (dataMap map[string]interface{}) { + dataMap = map[string]interface{}{} + + if dataItem.JobType != nil { + dataMap["job_type"] = dataItem.JobType + } + if dataItem.ActionJobData != nil { + actionJobDataList := []map[string]interface{}{} + actionJobDataMap := dataSourceJobDataActionJobDataToMap(*dataItem.ActionJobData) + actionJobDataList = append(actionJobDataList, actionJobDataMap) + dataMap["action_job_data"] = actionJobDataList + } + + return dataMap +} + +func dataSourceJobDataActionJobDataToMap(actionJobDataItem schematicsv1.JobDataAction) (actionJobDataMap map[string]interface{}) { + actionJobDataMap = map[string]interface{}{} + + if actionJobDataItem.ActionName != nil { + actionJobDataMap["action_name"] = actionJobDataItem.ActionName + } + if actionJobDataItem.Inputs != nil { + inputsList := []map[string]interface{}{} + for _, inputsItem := range actionJobDataItem.Inputs { + inputsList = append(inputsList, dataSourceJobActionJobDataInputsToMap(inputsItem)) + } + actionJobDataMap["inputs"] = inputsList + } + if actionJobDataItem.Outputs != nil { + outputsList := []map[string]interface{}{} + for _, outputsItem := range actionJobDataItem.Outputs { + outputsList = append(outputsList, dataSourceJobActionJobDataOutputsToMap(outputsItem)) + } + actionJobDataMap["outputs"] = outputsList + } + if actionJobDataItem.Settings != nil { + settingsList := []map[string]interface{}{} + for _, settingsItem := range actionJobDataItem.Settings { + settingsList = append(settingsList, dataSourceJobActionJobDataSettingsToMap(settingsItem)) + } + actionJobDataMap["settings"] = settingsList + } + if actionJobDataItem.UpdatedAt != nil { + actionJobDataMap["updated_at"] = actionJobDataItem.UpdatedAt.String() + } + + return actionJobDataMap +} + +func dataSourceJobActionJobDataInputsToMap(inputsItem schematicsv1.VariableData) (inputsMap map[string]interface{}) { + inputsMap = map[string]interface{}{} + + if inputsItem.Name != nil { + inputsMap["name"] = inputsItem.Name + } + if inputsItem.Value != nil { + inputsMap["value"] = inputsItem.Value + } + if inputsItem.Metadata != nil { + metadataList := []map[string]interface{}{} + metadataMap := dataSourceJobInputsMetadataToMap(*inputsItem.Metadata) + metadataList = append(metadataList, metadataMap) + inputsMap["metadata"] = metadataList + } + if inputsItem.Link != nil { + inputsMap["link"] = inputsItem.Link + } + + return inputsMap +} + +func dataSourceJobActionJobDataOutputsToMap(outputsItem schematicsv1.VariableData) (outputsMap map[string]interface{}) { + outputsMap = map[string]interface{}{} + + if outputsItem.Name != nil { + outputsMap["name"] = outputsItem.Name + } + if outputsItem.Value != nil { + outputsMap["value"] = outputsItem.Value + } + if outputsItem.Metadata != nil { + metadataList := []map[string]interface{}{} + metadataMap := dataSourceJobOutputsMetadataToMap(*outputsItem.Metadata) + metadataList = append(metadataList, metadataMap) + outputsMap["metadata"] = metadataList + } + if outputsItem.Link != nil { + outputsMap["link"] = outputsItem.Link + } + + return outputsMap +} + +func dataSourceJobActionJobDataSettingsToMap(settingsItem schematicsv1.VariableData) (settingsMap map[string]interface{}) { + settingsMap = map[string]interface{}{} + + if settingsItem.Name != nil { + settingsMap["name"] = settingsItem.Name + } + if settingsItem.Value != nil { + settingsMap["value"] = settingsItem.Value + } + if settingsItem.Metadata != nil { + metadataList := []map[string]interface{}{} + metadataMap := dataSourceJobSettingsMetadataToMap(*settingsItem.Metadata) + metadataList = append(metadataList, metadataMap) + settingsMap["metadata"] = metadataList + } + if settingsItem.Link != nil { + settingsMap["link"] = settingsItem.Link + } + + return settingsMap +} + +func dataSourceJobFlattenBastion(result schematicsv1.TargetResourceset) (finalList []map[string]interface{}) { + finalList = []map[string]interface{}{} + finalMap := dataSourceJobBastionToMap(result) + finalList = append(finalList, finalMap) + + return finalList +} + +func dataSourceJobBastionToMap(bastionItem schematicsv1.TargetResourceset) (bastionMap map[string]interface{}) { + bastionMap = map[string]interface{}{} + + if bastionItem.Name != nil { + bastionMap["name"] = bastionItem.Name + } + if bastionItem.Type != nil { + bastionMap["type"] = bastionItem.Type + } + if bastionItem.Description != nil { + bastionMap["description"] = bastionItem.Description + } + if bastionItem.ResourceQuery != nil { + bastionMap["resource_query"] = bastionItem.ResourceQuery + } + if bastionItem.CredentialRef != nil { + bastionMap["credential_ref"] = bastionItem.CredentialRef + } + if bastionItem.ID != nil { + bastionMap["id"] = bastionItem.ID + } + if bastionItem.CreatedAt != nil { + bastionMap["created_at"] = bastionItem.CreatedAt.String() + } + if bastionItem.CreatedBy != nil { + bastionMap["created_by"] = bastionItem.CreatedBy + } + if bastionItem.UpdatedAt != nil { + bastionMap["updated_at"] = bastionItem.UpdatedAt.String() + } + if bastionItem.UpdatedBy != nil { + bastionMap["updated_by"] = bastionItem.UpdatedBy + } + if bastionItem.SysLock != nil { + sysLockList := []map[string]interface{}{} + sysLockMap := dataSourceJobBastionSysLockToMap(*bastionItem.SysLock) + sysLockList = append(sysLockList, sysLockMap) + bastionMap["sys_lock"] = sysLockList + } + if bastionItem.ResourceIds != nil { + bastionMap["resource_ids"] = bastionItem.ResourceIds + } + + return bastionMap +} + +func dataSourceJobBastionSysLockToMap(sysLockItem schematicsv1.SystemLock) (sysLockMap map[string]interface{}) { + sysLockMap = map[string]interface{}{} + + if sysLockItem.SysLocked != nil { + sysLockMap["sys_locked"] = sysLockItem.SysLocked + } + if sysLockItem.SysLockedBy != nil { + sysLockMap["sys_locked_by"] = sysLockItem.SysLockedBy + } + if sysLockItem.SysLockedAt != nil { + sysLockMap["sys_locked_at"] = sysLockItem.SysLockedAt.String() + } + + return sysLockMap +} + +func dataSourceJobFlattenLogSummary(result schematicsv1.JobLogSummary) (finalList []map[string]interface{}) { + finalList = []map[string]interface{}{} + finalMap := dataSourceJobLogSummaryToMap(result) + finalList = append(finalList, finalMap) + + return finalList +} + +func dataSourceJobLogSummaryToMap(logSummaryItem schematicsv1.JobLogSummary) (logSummaryMap map[string]interface{}) { + logSummaryMap = map[string]interface{}{} + + if logSummaryItem.JobID != nil { + logSummaryMap["job_id"] = logSummaryItem.JobID + } + if logSummaryItem.JobType != nil { + logSummaryMap["job_type"] = logSummaryItem.JobType + } + if logSummaryItem.LogStartAt != nil { + logSummaryMap["log_start_at"] = logSummaryItem.LogStartAt.String() + } + if logSummaryItem.LogAnalyzedTill != nil { + logSummaryMap["log_analyzed_till"] = logSummaryItem.LogAnalyzedTill.String() + } + if logSummaryItem.ElapsedTime != nil { + logSummaryMap["elapsed_time"] = logSummaryItem.ElapsedTime + } + if logSummaryItem.LogErrors != nil { + logErrorsList := []map[string]interface{}{} + for _, logErrorsItem := range logSummaryItem.LogErrors { + logErrorsList = append(logErrorsList, dataSourceJobLogSummaryLogErrorsToMap(logErrorsItem)) + } + logSummaryMap["log_errors"] = logErrorsList + } + if logSummaryItem.RepoDownloadJob != nil { + repoDownloadJobList := []map[string]interface{}{} + repoDownloadJobMap := dataSourceJobLogSummaryRepoDownloadJobToMap(*logSummaryItem.RepoDownloadJob) + repoDownloadJobList = append(repoDownloadJobList, repoDownloadJobMap) + logSummaryMap["repo_download_job"] = repoDownloadJobList + } + if logSummaryItem.ActionJob != nil { + actionJobList := []map[string]interface{}{} + actionJobMap := dataSourceJobLogSummaryActionJobToMap(*logSummaryItem.ActionJob) + actionJobList = append(actionJobList, actionJobMap) + logSummaryMap["action_job"] = actionJobList + } + + return logSummaryMap +} + +func dataSourceJobLogSummaryLogErrorsToMap(logErrorsItem schematicsv1.JobLogSummaryLogErrorsItem) (logErrorsMap map[string]interface{}) { + logErrorsMap = map[string]interface{}{} + + if logErrorsItem.ErrorCode != nil { + logErrorsMap["error_code"] = logErrorsItem.ErrorCode + } + if logErrorsItem.ErrorMsg != nil { + logErrorsMap["error_msg"] = logErrorsItem.ErrorMsg + } + if logErrorsItem.ErrorCount != nil { + logErrorsMap["error_count"] = logErrorsItem.ErrorCount + } + + return logErrorsMap +} + +func dataSourceJobLogSummaryRepoDownloadJobToMap(repoDownloadJobItem schematicsv1.JobLogSummaryRepoDownloadJob) (repoDownloadJobMap map[string]interface{}) { + repoDownloadJobMap = map[string]interface{}{} + + if repoDownloadJobItem.ScannedFileCount != nil { + repoDownloadJobMap["scanned_file_count"] = repoDownloadJobItem.ScannedFileCount + } + if repoDownloadJobItem.QuarantinedFileCount != nil { + repoDownloadJobMap["quarantined_file_count"] = repoDownloadJobItem.QuarantinedFileCount + } + if repoDownloadJobItem.DetectedFiletype != nil { + repoDownloadJobMap["detected_filetype"] = repoDownloadJobItem.DetectedFiletype + } + if repoDownloadJobItem.InputsCount != nil { + repoDownloadJobMap["inputs_count"] = repoDownloadJobItem.InputsCount + } + if repoDownloadJobItem.OutputsCount != nil { + repoDownloadJobMap["outputs_count"] = repoDownloadJobItem.OutputsCount + } + + return repoDownloadJobMap +} + +func dataSourceJobLogSummaryActionJobToMap(actionJobItem schematicsv1.JobLogSummaryActionJob) (actionJobMap map[string]interface{}) { + actionJobMap = map[string]interface{}{} + + if actionJobItem.TargetCount != nil { + actionJobMap["target_count"] = actionJobItem.TargetCount + } + if actionJobItem.TaskCount != nil { + actionJobMap["task_count"] = actionJobItem.TaskCount + } + if actionJobItem.PlayCount != nil { + actionJobMap["play_count"] = actionJobItem.PlayCount + } + if actionJobItem.Recap != nil { + recapList := []map[string]interface{}{} + recapMap := dataSourceJobActionJobRecapToMap(*actionJobItem.Recap) + recapList = append(recapList, recapMap) + actionJobMap["recap"] = recapList + } + + return actionJobMap +} + +func dataSourceJobActionJobRecapToMap(recapItem schematicsv1.JobLogSummaryActionJobRecap) (recapMap map[string]interface{}) { + recapMap = map[string]interface{}{} + + if recapItem.Target != nil { + recapMap["target"] = recapItem.Target + } + if recapItem.Ok != nil { + recapMap["ok"] = recapItem.Ok + } + if recapItem.Changed != nil { + recapMap["changed"] = recapItem.Changed + } + if recapItem.Failed != nil { + recapMap["failed"] = recapItem.Failed + } + if recapItem.Skipped != nil { + recapMap["skipped"] = recapItem.Skipped + } + if recapItem.Unreachable != nil { + recapMap["unreachable"] = recapItem.Unreachable + } + + return recapMap +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_schematics_output.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_schematics_output.go new file mode 100644 index 00000000000..d83898c2384 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_schematics_output.go @@ -0,0 +1,144 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "encoding/json" + "fmt" + "log" + "time" + + "github.com/IBM/schematics-go-sdk/schematicsv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMSchematicsOutput() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMSchematicsOutputRead, + + Schema: map[string]*schema.Schema{ + "workspace_id": { + Type: schema.TypeString, + Required: true, + Description: "The ID of the workspace for which you want to retrieve output values. To find the workspace ID, use the `GET /workspaces` API.", + }, + "template_id": { + Type: schema.TypeString, + Required: true, + Description: "The id of template", + }, + "output_values": { + Type: schema.TypeMap, + Computed: true, + }, + "output_json": { + Type: schema.TypeString, + Optional: true, + Description: "The json output in string", + }, + ResourceControllerURL: { + Type: schema.TypeString, + Computed: true, + Description: "The URL of the IBM Cloud dashboard that can be used to explore and view details about this Workspace", + }, + }, + } +} + +func dataSourceIBMSchematicsOutputRead(d *schema.ResourceData, meta interface{}) error { + schematicsClient, err := meta.(ClientSession).SchematicsV1() + if err != nil { + return err + } + + workspaceID := d.Get("workspace_id").(string) + templateID := d.Get("template_id").(string) + + getWorkspaceOutputsOptions := &schematicsv1.GetWorkspaceOutputsOptions{} + + getWorkspaceOutputsOptions.SetWID(d.Get("workspace_id").(string)) + + outputValuesList, response, err := schematicsClient.GetWorkspaceOutputs(getWorkspaceOutputsOptions) + if err != nil { + log.Printf("[DEBUG] GetWorkspaceOutputs failed %s\n%s", err, response) + return err + } + + var outputJSON string + items := make(map[string]interface{}) + found := false + for _, fields := range outputValuesList { + if *fields.ID == templateID { + output := fields.OutputValues + found = true + outputByte, err := json.MarshalIndent(output, "", "") + if err != nil { + return err + } + outputJSON = string(outputByte[:]) + // items := map[string]interface{} + for _, value := range output { + for key, val := range value.(map[string]interface{}) { + val2 := val.(map[string]interface{})["value"] + items[key] = val2 + } + } + } + } + + if !(found) { + return fmt.Errorf("Error while fetching template id in workspace: %s", workspaceID) + } + d.Set("output_json", outputJSON) + d.SetId(fmt.Sprintf("%s/%s", workspaceID, templateID)) + d.Set("output_values", Flatten(items)) + + controller, err := getBaseController(meta) + if err != nil { + return err + } + + d.Set(ResourceControllerURL, controller+"/schematics") + + return nil +} + +// dataSourceIBMSchematicsOutputID returns a reasonable ID for the list. +func dataSourceIBMSchematicsOutputID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} + +func dataSourceOutputValuesListFlattenOutputValues(result []schematicsv1.OutputValuesItem) (outputValues interface{}) { + for _, outputValuesItem := range result { + outputValues = dataSourceOutputValuesListOutputValuesToMap(outputValuesItem) + } + + return outputValues +} + +func dataSourceOutputValuesListOutputValuesToMap(outputValuesItem schematicsv1.OutputValuesItem) (outputValuesMap map[string]interface{}) { + outputValuesMap = map[string]interface{}{} + + if outputValuesItem.Folder != nil { + outputValuesMap["folder"] = outputValuesItem.Folder + } + if outputValuesItem.ID != nil { + outputValuesMap["id"] = outputValuesItem.ID + } + + m := []Map{} + + for _, outputValues := range outputValuesItem.OutputValues { + m = append(m, Flatten(outputValues.(map[string]interface{}))) + } + + if outputValuesItem.OutputValues != nil { + outputValuesMap["output_values"] = m + } + if outputValuesItem.ValueType != nil { + outputValuesMap["value_type"] = outputValuesItem.ValueType + } + + return outputValuesMap +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_schematics_state.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_schematics_state.go new file mode 100644 index 00000000000..75707c5f1ae --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_schematics_state.go @@ -0,0 +1,102 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/IBM/schematics-go-sdk/schematicsv1" +) + +func dataSourceIBMSchematicsState() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMSchematicsStateRead, + + Schema: map[string]*schema.Schema{ + "workspace_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "The ID of the workspace for which you want to retrieve the Terraform statefile. To find the workspace ID, use the `GET /v1/workspaces` API.", + }, + "template_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "The ID of the Terraform template for which you want to retrieve the Terraform statefile. When you create a workspace, the Terraform template that your workspace points to is assigned a unique ID. To find this ID, use the `GET /v1/workspaces` API and review the `template_data.id` value.", + }, + "state_store": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "state_store_json": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + ResourceControllerURL: { + Type: schema.TypeString, + Computed: true, + Description: "The URL of the IBM Cloud dashboard that can be used to explore and view details about this workspace", + }, + }, + } +} + +func dataSourceIBMSchematicsStateRead(d *schema.ResourceData, meta interface{}) error { + schematicsClient, err := meta.(ClientSession).SchematicsV1() + if err != nil { + return err + } + + getWorkspaceTemplateStateOptions := &schematicsv1.GetWorkspaceTemplateStateOptions{} + + getWorkspaceTemplateStateOptions.SetWID(d.Get("workspace_id").(string)) + getWorkspaceTemplateStateOptions.SetTID(d.Get("template_id").(string)) + + _, response, err := schematicsClient.GetWorkspaceTemplateStateWithContext(context.TODO(), getWorkspaceTemplateStateOptions) + if err != nil { + log.Printf("[DEBUG] GetWorkspaceTemplateStateWithContext failed %s\n%s", err, response) + return err + } + + d.SetId(dataSourceIBMSchematicsStateID(d)) + + var stateStore map[string]interface{} + json.Unmarshal(response.Result.(json.RawMessage), &stateStore) + + b := bytes.NewReader(response.Result.(json.RawMessage)) + + decoder := json.NewDecoder(b) + decoder.UseNumber() + decoder.Decode(&stateStore) + + statestr := fmt.Sprintf("%v", stateStore) + d.Set("state_store", statestr) + + stateByte, err := json.MarshalIndent(stateStore, "", "") + if err != nil { + return err + } + + stateStoreJSON := string(stateByte[:]) + d.Set("state_store_json", stateStoreJSON) + + controller, err := getBaseController(meta) + if err != nil { + return err + } + d.Set(ResourceControllerURL, controller+"/schematics") + + return nil +} + +// dataSourceIBMSchematicsStateID returns a reasonable ID for the list. +func dataSourceIBMSchematicsStateID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_schematics_workspace.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_schematics_workspace.go new file mode 100644 index 00000000000..d5ef5332acf --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_schematics_workspace.go @@ -0,0 +1,928 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "context" + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/IBM/schematics-go-sdk/schematicsv1" +) + +func dataSourceIBMSchematicsWorkspace() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMSchematicsWorkspaceRead, + + Schema: map[string]*schema.Schema{ + "workspace_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "The ID of the workspace for which you want to retrieve detailed information. To find the workspace ID, use the `GET /v1/workspaces` API.", + }, + "applied_shareddata_ids": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "List of applied shared dataset id.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "catalog_ref": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Information about the software template that you chose from the IBM Cloud catalog. This information is returned for IBM Cloud catalog offerings only.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "dry_run": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "Dry run.", + }, + "item_icon_url": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The URL to the icon of the software template in the IBM Cloud catalog.", + }, + "item_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The ID of the software template that you chose to install from the IBM Cloud catalog. This software is provisioned with Schematics.", + }, + "item_name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The name of the software that you chose to install from the IBM Cloud catalog.", + }, + "item_readme_url": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The URL to the readme file of the software template in the IBM Cloud catalog.", + }, + "item_url": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The URL to the software template in the IBM Cloud catalog.", + }, + "launch_url": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The URL to the dashboard to access your software.", + }, + "offering_version": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The version of the software template that you chose to install from the IBM Cloud catalog.", + }, + }, + }, + }, + "created_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The timestamp when the workspace was created.", + }, + "created_by": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The user ID that created the workspace.", + }, + "crn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Workspace CRN.", + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The description of the workspace.", + }, + "last_health_check_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The timestamp when the last health check was performed by Schematics.", + }, + "location": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The IBM Cloud location where your workspace was provisioned.", + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The name of the workspace.", + }, + "resource_group": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The resource group the workspace was provisioned in.", + }, + "runtime_data": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Information about the provisioning engine, state file, and runtime logs.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "engine_cmd": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The command that was used to apply the Terraform template or IBM Cloud catalog software template.", + }, + "engine_name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The provisioning engine that was used to apply the Terraform template or IBM Cloud catalog software template.", + }, + "engine_version": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The version of the provisioning engine that was used.", + }, + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The ID that was assigned to your Terraform template or IBM Cloud catalog software template.", + }, + "log_store_url": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The URL to access the logs that were created during the creation, update, or deletion of your IBM Cloud resources.", + }, + "output_values": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "List of Output values.", + Elem: &schema.Schema{ + Type: schema.TypeMap, + }, + }, + "resources": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "List of resources.", + Elem: &schema.Schema{ + Type: schema.TypeMap, + }, + }, + "state_store_url": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The URL where the Terraform statefile (`terraform.tfstate`) is stored. You can use the statefile to find an overview of IBM Cloud resources that were created by Schematics. Schematics uses the statefile as an inventory list to determine future create, update, or deletion actions.", + }, + }, + }, + }, + "shared_data": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Information that is shared across templates in IBM Cloud catalog offerings. This information is not provided when you create a workspace from your own Terraform template.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cluster_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The ID of the cluster where you want to provision the resources of all IBM Cloud catalog templates that are included in the catalog offering.", + }, + "cluster_name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Target cluster name.", + }, + "entitlement_keys": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The entitlement key that you want to use to install IBM Cloud entitled software.", + Elem: &schema.Schema{ + Type: schema.TypeMap, + }, + }, + "namespace": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The Kubernetes namespace or OpenShift project where the resources of all IBM Cloud catalog templates that are included in the catalog offering are deployed into.", + }, + "region": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The IBM Cloud region that you want to use for the resources of all IBM Cloud catalog templates that are included in the catalog offering.", + }, + "resource_group_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The ID of the resource group that you want to use for the resources of all IBM Cloud catalog templates that are included in the catalog offering.", + }, + }, + }, + }, + "status": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The status of the workspace. **Active**: After you successfully ran your infrastructure code by applying your Terraform execution plan, the state of your workspace changes to `Active`. **Connecting**: Schematics tries to connect to the template in your source repo. If successfully connected, the template is downloaded and metadata, such as input parameters, is extracted. After the template is downloaded, the state of the workspace changes to `Scanning`. **Draft**: The workspace is created without a reference to a GitHub or GitLab repository. **Failed**: If errors occur during the execution of your infrastructure code in IBM Cloud Schematics, your workspace status is set to `Failed`. **Inactive**: The Terraform template was scanned successfully and the workspace creation is complete. You can now start running Schematics plan and apply actions to provision the IBM Cloud resources that you specified in your template. If you have an `Active` workspace and decide to remove all your resources, your workspace is set to `Inactive` after all your resources are removed. **In progress**: When you instruct IBM Cloud Schematics to run your infrastructure code by applying your Terraform execution plan, the status of our workspace changes to `In progress`. **Scanning**: The download of the Terraform template is complete and vulnerability scanning started. If the scan is successful, the workspace state changes to `Inactive`. If errors in your template are found, the state changes to `Template Error`. **Stopped**: The Schematics plan, apply, or destroy action was cancelled manually. **Template Error**: The Schematics template contains errors and cannot be processed.", + }, + "tags": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "A list of tags that are associated with the workspace.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "template_env_settings": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "List of environment values.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The name of the variable.", + }, + "value": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Enter the value as a string for the primitive types such as `bool`, `number`, `string`, and `HCL` format for the complex variables, as you provide in a `.tfvars` file. **You need to enter escaped string of `HCL` format for the complex variable value**. For more information, about how to declare variables in a terraform configuration file and provide value to schematics, see [Providing values for the declared variables](/docs/schematics?topic=schematics-create-tf-config#declare-variable).", + }, + "secure": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "If set to `true`, the value of your input variable is protected and not returned in your API response.", + }, + "hidden": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "If set to `true`, the value of your input variable is protected and not returned in your API response.", + }, + }, + }, + }, + "template_git_folder": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The subfolder in your GitHub or GitLab repository where your Terraform template is stored. If your template is stored in the root directory, `.` is returned.", + }, + "template_init_state_file": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Init state file.", + }, + "template_type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The Terraform version that was used to run your Terraform code.", + }, + "template_uninstall_script_name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Uninstall script name.", + }, + "template_values": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "A list of variable values that you want to apply during the Helm chart installation. The list must be provided in JSON format, such as `\"\"autoscaling: enabled: true minReplicas: 2\"`. The values that you define here override the default Helm chart values. This field is supported only for IBM Cloud catalog offerings that are provisioned by using the Terraform Helm provider.", + }, + "template_values_metadata": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Computed: true, + Description: "A list of input variables that are associated with the workspace.", + Elem: &schema.Schema{Type: schema.TypeMap}, + }, + "template_inputs": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Information about the input variables that your template uses.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "description": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The description of your input variable.", + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The name of the variable.", + }, + "secure": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "If set to `true`, the value of your input variable is protected and not returned in your API response.", + }, + "type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "`Terraform v0.11` supports `string`, `list`, `map` data type. For more information, about the syntax, see [Configuring input variables](https://www.terraform.io/docs/configuration-0-11/variables.html).
`Terraform v0.12` additionally, supports `bool`, `number` and complex data types such as `list(type)`, `map(type)`, `object({attribute name=type,..})`, `set(type)`, `tuple([type])`. For more information, about the syntax to use the complex data type, see [Configuring variables](https://www.terraform.io/docs/configuration/variables.html#type-constraints).", + }, + "use_default": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "Variable uses default value; and is not over-ridden.", + }, + "value": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Enter the value as a string for the primitive types such as `bool`, `number`, `string`, and `HCL` format for the complex variables, as you provide in a `.tfvars` file. **You need to enter escaped string of `HCL` format for the complex variable value**. For more information, about how to declare variables in a terraform configuration file and provide value to schematics, see [Providing values for the declared variables](/docs/schematics?topic=schematics-create-tf-config#declare-variable).", + }, + }, + }, + }, + "template_ref": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Workspace template ref.", + }, + "template_git_branch": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The branch in GitHub where your Terraform template is stored.", + }, + "template_git_full_url": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Full repo URL.", + }, + "template_git_has_uploadedgitrepotar": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Computed: true, + Description: "Has uploaded git repo tar.", + }, + "template_git_release": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The release tag in GitHub of your Terraform template.", + }, + "template_git_repo_sha_value": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Repo SHA value.", + }, + "template_git_repo_url": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The URL to the repository where the IBM Cloud catalog software template is stored.", + }, + "template_git_url": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The URL to the GitHub or GitLab repository where your Terraform template is stored.", + }, + + /*"template_type": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Description: "List of Workspace type.", + Elem: &schema.Schema{Type: schema.TypeString}, + },*/ + "updated_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The timestamp when the workspace was last updated.", + }, + "updated_by": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The user ID that updated the workspace.", + }, + "is_frozen": { + Type: schema.TypeBool, + Computed: true, + Deprecated: "use frozen instead", + }, + "frozen": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "If set to true, the workspace is frozen and changes to the workspace are disabled.", + }, + "frozen_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The timestamp when the workspace was frozen.", + }, + "frozen_by": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The user ID that froze the workspace.", + }, + "is_locked": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "If set to true, the workspace is locked and disabled for changes.", + Deprecated: "Use locked instead", + }, + "locked": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "If set to true, the workspace is locked and disabled for changes.", + }, + "locked_by": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The user ID that initiated a resource-related action, such as applying or destroying resources, that locked the workspace.", + }, + "locked_time": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The timestamp when the workspace was locked.", + }, + "status_code": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The success or error code that was returned for the last plan, apply, or destroy action that ran against your workspace.", + }, + "status_msg": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The success or error message that was returned for the last plan, apply, or destroy action that ran against your workspace.", + }, + ResourceControllerURL: { + Type: schema.TypeString, + Computed: true, + Description: "The URL of the IBM Cloud dashboard that can be used to explore and view details about this workspace", + }, + }, + } +} + +func dataSourceIBMSchematicsWorkspaceRead(d *schema.ResourceData, meta interface{}) error { + schematicsClient, err := meta.(ClientSession).SchematicsV1() + if err != nil { + return err + } + + getWorkspaceOptions := &schematicsv1.GetWorkspaceOptions{} + + getWorkspaceOptions.SetWID(d.Get("workspace_id").(string)) + + workspaceResponse, response, err := schematicsClient.GetWorkspaceWithContext(context.TODO(), getWorkspaceOptions) + if err != nil { + log.Printf("[DEBUG] GetWorkspaceWithContext failed %s\n%s", err, response) + return err + } + + d.SetId(*workspaceResponse.ID) + if err = d.Set("applied_shareddata_ids", workspaceResponse.AppliedShareddataIds); err != nil { + return fmt.Errorf("Error setting applied_shareddata_ids: %s", err) + } + + if workspaceResponse.CatalogRef != nil { + err = d.Set("catalog_ref", dataSourceWorkspaceResponseFlattenCatalogRef(*workspaceResponse.CatalogRef)) + if err != nil { + return fmt.Errorf("Error setting catalog_ref %s", err) + } + } + if err = d.Set("created_at", workspaceResponse.CreatedAt.String()); err != nil { + return fmt.Errorf("Error setting created_at: %s", err) + } + if err = d.Set("created_by", workspaceResponse.CreatedBy); err != nil { + return fmt.Errorf("Error setting created_by: %s", err) + } + if err = d.Set("crn", workspaceResponse.Crn); err != nil { + return fmt.Errorf("Error setting crn: %s", err) + } + if err = d.Set("description", workspaceResponse.Description); err != nil { + return fmt.Errorf("Error setting description: %s", err) + } + if err = d.Set("last_health_check_at", workspaceResponse.LastHealthCheckAt.String()); err != nil { + return fmt.Errorf("Error setting last_health_check_at: %s", err) + } + if err = d.Set("location", workspaceResponse.Location); err != nil { + return fmt.Errorf("Error setting location: %s", err) + } + if err = d.Set("name", workspaceResponse.Name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + if err = d.Set("resource_group", workspaceResponse.ResourceGroup); err != nil { + return fmt.Errorf("Error setting resource_group: %s", err) + } + + if workspaceResponse.RuntimeData != nil { + err = d.Set("runtime_data", dataSourceWorkspaceResponseFlattenRuntimeData(workspaceResponse.RuntimeData)) + if err != nil { + return fmt.Errorf("Error setting runtime_data %s", err) + } + } + + if workspaceResponse.SharedData != nil { + err = d.Set("shared_data", dataSourceWorkspaceResponseFlattenSharedData(*workspaceResponse.SharedData)) + if err != nil { + return fmt.Errorf("Error setting shared_data %s", err) + } + } + if err = d.Set("status", workspaceResponse.Status); err != nil { + return fmt.Errorf("Error setting status: %s", err) + } + if err = d.Set("tags", workspaceResponse.Tags); err != nil { + return fmt.Errorf("Error setting tags: %s", err) + } + + if workspaceResponse.TemplateData != nil { + templateData := dataSourceWorkspaceResponseFlattenTemplateData(workspaceResponse.TemplateData) + + if err = d.Set("template_env_settings", templateData[0]["env_values"]); err != nil { + return fmt.Errorf("Error reading env_values: %s", err) + } + if err = d.Set("template_git_folder", templateData[0]["folder"]); err != nil { + return fmt.Errorf("Error reading folder: %s", err) + } + if err = d.Set("template_init_state_file", templateData[0]["init_state_file"]); err != nil { + return fmt.Errorf("Error reading init_state_file: %s", err) + } + if err = d.Set("template_type", templateData[0]["type"]); err != nil { + return fmt.Errorf("Error reading type: %s", err) + } + if err = d.Set("template_uninstall_script_name", templateData[0]["uninstall_script_name"]); err != nil { + return fmt.Errorf("Error reading uninstall_script_name: %s", err) + } + if err = d.Set("template_values", templateData[0]["values"]); err != nil { + return fmt.Errorf("Error reading values: %s", err) + } + if err = d.Set("template_values_metadata", templateData[0]["values_metadata"]); err != nil { + return fmt.Errorf("Error reading values_metadata: %s", err) + } + if err = d.Set("template_inputs", templateData[0]["variablestore"]); err != nil { + return fmt.Errorf("Error reading variablestore: %s", err) + } + } + if err = d.Set("template_ref", workspaceResponse.TemplateRef); err != nil { + return fmt.Errorf("Error setting template_ref: %s", err) + } + + if workspaceResponse.TemplateRepo != nil { + templateRepoMap := dataSourceWorkspaceResponseFlattenTemplateRepo(*workspaceResponse.TemplateRepo) + if err = d.Set("template_git_branch", templateRepoMap[0]["branch"]); err != nil { + return fmt.Errorf("Error reading branch: %s", err) + } + if err = d.Set("template_git_release", templateRepoMap[0]["release"]); err != nil { + return fmt.Errorf("Error reading release: %s", err) + } + if err = d.Set("template_git_repo_sha_value", templateRepoMap[0]["repo_sha_value"]); err != nil { + return fmt.Errorf("Error reading repo_sha_value: %s", err) + } + if err = d.Set("template_git_repo_url", templateRepoMap[0]["repo_url"]); err != nil { + return fmt.Errorf("Error reading repo_url: %s", err) + } + if err = d.Set("template_git_url", templateRepoMap[0]["url"]); err != nil { + return fmt.Errorf("Error reading url: %s", err) + } + if err = d.Set("template_git_has_uploadedgitrepotar", templateRepoMap[0]["has_uploadedgitrepotar"]); err != nil { + return fmt.Errorf("Error reading has_uploadedgitrepotar: %s", err) + } + } + /*if err = d.Set("type", workspaceResponse.Type); err != nil { + return fmt.Errorf("Error setting type: %s", err) + }*/ + if workspaceResponse.UpdatedAt != nil { + if err = d.Set("updated_at", workspaceResponse.UpdatedAt.String()); err != nil { + return fmt.Errorf("Error setting updated_at: %s", err) + } + } + if err = d.Set("updated_by", workspaceResponse.UpdatedBy); err != nil { + return fmt.Errorf("Error setting updated_by: %s", err) + } + + if workspaceResponse.WorkspaceStatus != nil { + workspaceStatusMap := dataSourceWorkspaceResponseFlattenWorkspaceStatus(*workspaceResponse.WorkspaceStatus) + if err = d.Set("is_frozen", workspaceStatusMap[0]["frozen"]); err != nil { + return fmt.Errorf("Error reading frozen: %s", err) + } + if err = d.Set("frozen", workspaceStatusMap[0]["frozen"]); err != nil { + return fmt.Errorf("Error reading frozen: %s", err) + } + if err = d.Set("frozen_at", workspaceStatusMap[0]["frozen_at"]); err != nil { + return fmt.Errorf("Error reading frozen_at: %s", err) + } + if err = d.Set("frozen_by", workspaceStatusMap[0]["frozen_by"]); err != nil { + return fmt.Errorf("Error reading frozen_by: %s", err) + } + if err = d.Set("is_locked", workspaceStatusMap[0]["locked"]); err != nil { + return fmt.Errorf("Error reading locked: %s", err) + } + if err = d.Set("locked", workspaceStatusMap[0]["locked"]); err != nil { + return fmt.Errorf("Error reading locked: %s", err) + } + if err = d.Set("locked_by", workspaceStatusMap[0]["locked_by"]); err != nil { + return fmt.Errorf("Error reading locked_by: %s", err) + } + if err = d.Set("locked_time", workspaceStatusMap[0]["locked_time"]); err != nil { + return fmt.Errorf("Error reading locked_time: %s", err) + } + } + + if workspaceResponse.WorkspaceStatusMsg != nil { + workspaceStatusMsgMap := dataSourceWorkspaceResponseFlattenWorkspaceStatusMsg(*workspaceResponse.WorkspaceStatusMsg) + if err = d.Set("status_code", workspaceStatusMsgMap[0]["status_code"]); err != nil { + return fmt.Errorf("Error reading status_code: %s", err) + } + if err = d.Set("status_msg", workspaceStatusMsgMap[0]["status_msg"]); err != nil { + return fmt.Errorf("Error reading status_msg: %s", err) + } + } + + controller, err := getBaseController(meta) + if err != nil { + return err + } + d.Set(ResourceControllerURL, controller+"/schematics") + + return nil +} + +func dataSourceWorkspaceResponseFlattenCatalogRef(result schematicsv1.CatalogRef) (finalList []map[string]interface{}) { + finalList = []map[string]interface{}{} + finalMap := dataSourceWorkspaceResponseCatalogRefToMap(result) + finalList = append(finalList, finalMap) + + return finalList +} + +func dataSourceWorkspaceResponseCatalogRefToMap(catalogRefItem schematicsv1.CatalogRef) (catalogRefMap map[string]interface{}) { + catalogRefMap = map[string]interface{}{} + + if catalogRefItem.DryRun != nil { + catalogRefMap["dry_run"] = catalogRefItem.DryRun + } + if catalogRefItem.ItemIconURL != nil { + catalogRefMap["item_icon_url"] = catalogRefItem.ItemIconURL + } + if catalogRefItem.ItemID != nil { + catalogRefMap["item_id"] = catalogRefItem.ItemID + } + if catalogRefItem.ItemName != nil { + catalogRefMap["item_name"] = catalogRefItem.ItemName + } + if catalogRefItem.ItemReadmeURL != nil { + catalogRefMap["item_readme_url"] = catalogRefItem.ItemReadmeURL + } + if catalogRefItem.ItemURL != nil { + catalogRefMap["item_url"] = catalogRefItem.ItemURL + } + if catalogRefItem.LaunchURL != nil { + catalogRefMap["launch_url"] = catalogRefItem.LaunchURL + } + if catalogRefItem.OfferingVersion != nil { + catalogRefMap["offering_version"] = catalogRefItem.OfferingVersion + } + + return catalogRefMap +} + +func dataSourceWorkspaceResponseFlattenRuntimeData(result []schematicsv1.TemplateRunTimeDataResponse) (runtimeData []map[string]interface{}) { + for _, runtimeDataItem := range result { + runtimeData = append(runtimeData, dataSourceWorkspaceResponseRuntimeDataToMap(runtimeDataItem)) + } + + return runtimeData +} + +func dataSourceWorkspaceResponseRuntimeDataToMap(runtimeDataItem schematicsv1.TemplateRunTimeDataResponse) (runtimeDataMap map[string]interface{}) { + runtimeDataMap = map[string]interface{}{} + + if runtimeDataItem.EngineCmd != nil { + runtimeDataMap["engine_cmd"] = runtimeDataItem.EngineCmd + } + if runtimeDataItem.EngineName != nil { + runtimeDataMap["engine_name"] = runtimeDataItem.EngineName + } + if runtimeDataItem.EngineVersion != nil { + runtimeDataMap["engine_version"] = runtimeDataItem.EngineVersion + } + if runtimeDataItem.ID != nil { + runtimeDataMap["id"] = runtimeDataItem.ID + } + if runtimeDataItem.LogStoreURL != nil { + runtimeDataMap["log_store_url"] = runtimeDataItem.LogStoreURL + } + if runtimeDataItem.OutputValues != nil { + runtimeDataMap["output_values"] = runtimeDataItem.OutputValues + } + if runtimeDataItem.Resources != nil { + runtimeDataMap["resources"] = runtimeDataItem.Resources + } + if runtimeDataItem.StateStoreURL != nil { + runtimeDataMap["state_store_url"] = runtimeDataItem.StateStoreURL + } + + return runtimeDataMap +} + +func dataSourceWorkspaceResponseFlattenSharedData(result schematicsv1.SharedTargetDataResponse) (finalList []map[string]interface{}) { + finalList = []map[string]interface{}{} + finalMap := dataSourceWorkspaceResponseSharedDataToMap(result) + finalList = append(finalList, finalMap) + + return finalList +} + +func dataSourceWorkspaceResponseSharedDataToMap(sharedDataItem schematicsv1.SharedTargetDataResponse) (sharedDataMap map[string]interface{}) { + sharedDataMap = map[string]interface{}{} + + if sharedDataItem.ClusterID != nil { + sharedDataMap["cluster_id"] = sharedDataItem.ClusterID + } + if sharedDataItem.ClusterName != nil { + sharedDataMap["cluster_name"] = sharedDataItem.ClusterName + } + if sharedDataItem.EntitlementKeys != nil { + sharedDataMap["entitlement_keys"] = sharedDataItem.EntitlementKeys + } + if sharedDataItem.Namespace != nil { + sharedDataMap["namespace"] = sharedDataItem.Namespace + } + if sharedDataItem.Region != nil { + sharedDataMap["region"] = sharedDataItem.Region + } + if sharedDataItem.ResourceGroupID != nil { + sharedDataMap["resource_group_id"] = sharedDataItem.ResourceGroupID + } + + return sharedDataMap +} + +func dataSourceWorkspaceResponseFlattenTemplateData(result []schematicsv1.TemplateSourceDataResponse) (templateData []map[string]interface{}) { + for _, templateDataItem := range result { + templateData = append(templateData, dataSourceWorkspaceResponseTemplateDataToMap(templateDataItem)) + } + + return templateData +} + +func dataSourceWorkspaceResponseTemplateDataToMap(templateDataItem schematicsv1.TemplateSourceDataResponse) (templateDataMap map[string]interface{}) { + templateDataMap = map[string]interface{}{} + + if templateDataItem.EnvValues != nil { + envValuesList := []map[string]interface{}{} + for _, envValuesItem := range templateDataItem.EnvValues { + envValuesList = append(envValuesList, dataSourceWorkspaceResponseTemplateDataEnvValuesToMap(envValuesItem)) + } + templateDataMap["env_values"] = envValuesList + } + if templateDataItem.Folder != nil { + templateDataMap["folder"] = templateDataItem.Folder + } + if templateDataItem.HasGithubtoken != nil { + templateDataMap["has_githubtoken"] = templateDataItem.HasGithubtoken + } + if templateDataItem.ID != nil { + templateDataMap["id"] = templateDataItem.ID + } + if templateDataItem.Type != nil { + templateDataMap["type"] = templateDataItem.Type + } + if templateDataItem.UninstallScriptName != nil { + templateDataMap["uninstall_script_name"] = templateDataItem.UninstallScriptName + } + if templateDataItem.Values != nil { + templateDataMap["values"] = templateDataItem.Values + } + if templateDataItem.ValuesMetadata != nil { + templateDataMap["values_metadata"] = templateDataItem.ValuesMetadata + } + if templateDataItem.ValuesURL != nil { + templateDataMap["values_url"] = templateDataItem.ValuesURL + } + if templateDataItem.Variablestore != nil { + variablestoreList := []map[string]interface{}{} + for _, variablestoreItem := range templateDataItem.Variablestore { + variablestoreList = append(variablestoreList, dataSourceWorkspaceResponseTemplateDataVariablestoreToMap(variablestoreItem)) + } + templateDataMap["variablestore"] = variablestoreList + } + + return templateDataMap +} + +func dataSourceWorkspaceResponseTemplateDataEnvValuesToMap(envValuesItem schematicsv1.EnvVariableResponse) (envValuesMap map[string]interface{}) { + envValuesMap = map[string]interface{}{} + + if envValuesItem.Hidden != nil { + envValuesMap["hidden"] = *envValuesItem.Hidden + } + if envValuesItem.Name != nil { + envValuesMap["name"] = envValuesItem.Name + } + if envValuesItem.Secure != nil { + envValuesMap["secure"] = *envValuesItem.Secure + } + if envValuesItem.Value != nil { + envValuesMap["value"] = envValuesItem.Value + } + + return envValuesMap +} + +func dataSourceWorkspaceResponseTemplateDataVariablestoreToMap(variablestoreItem schematicsv1.WorkspaceVariableResponse) (variablestoreMap map[string]interface{}) { + variablestoreMap = map[string]interface{}{} + + if variablestoreItem.Description != nil { + variablestoreMap["description"] = variablestoreItem.Description + } + if variablestoreItem.Name != nil { + variablestoreMap["name"] = variablestoreItem.Name + } + if variablestoreItem.Secure != nil { + variablestoreMap["secure"] = variablestoreItem.Secure + } + if variablestoreItem.Type != nil { + variablestoreMap["type"] = variablestoreItem.Type + } + if variablestoreItem.Value != nil { + variablestoreMap["value"] = variablestoreItem.Value + } + + return variablestoreMap +} + +func dataSourceWorkspaceResponseFlattenTemplateRepo(result schematicsv1.TemplateRepoResponse) (finalList []map[string]interface{}) { + finalList = []map[string]interface{}{} + finalMap := dataSourceWorkspaceResponseTemplateRepoToMap(result) + finalList = append(finalList, finalMap) + + return finalList +} + +func dataSourceWorkspaceResponseTemplateRepoToMap(templateRepoItem schematicsv1.TemplateRepoResponse) (templateRepoMap map[string]interface{}) { + templateRepoMap = map[string]interface{}{} + + if templateRepoItem.Branch != nil { + templateRepoMap["branch"] = templateRepoItem.Branch + } + if templateRepoItem.FullURL != nil { + templateRepoMap["full_url"] = templateRepoItem.FullURL + } + if templateRepoItem.HasUploadedgitrepotar != nil { + templateRepoMap["has_uploadedgitrepotar"] = templateRepoItem.HasUploadedgitrepotar + } + if templateRepoItem.Release != nil { + templateRepoMap["release"] = templateRepoItem.Release + } + if templateRepoItem.RepoShaValue != nil { + templateRepoMap["repo_sha_value"] = templateRepoItem.RepoShaValue + } + if templateRepoItem.RepoURL != nil { + templateRepoMap["repo_url"] = templateRepoItem.RepoURL + } + if templateRepoItem.URL != nil { + templateRepoMap["url"] = templateRepoItem.URL + } + + return templateRepoMap +} + +func dataSourceWorkspaceResponseFlattenWorkspaceStatus(result schematicsv1.WorkspaceStatusResponse) (finalList []map[string]interface{}) { + finalList = []map[string]interface{}{} + finalMap := dataSourceWorkspaceResponseWorkspaceStatusToMap(result) + finalList = append(finalList, finalMap) + + return finalList +} + +func dataSourceWorkspaceResponseWorkspaceStatusToMap(workspaceStatusItem schematicsv1.WorkspaceStatusResponse) (workspaceStatusMap map[string]interface{}) { + workspaceStatusMap = map[string]interface{}{} + + if workspaceStatusItem.Frozen != nil { + workspaceStatusMap["frozen"] = workspaceStatusItem.Frozen + } + if workspaceStatusItem.FrozenAt != nil { + workspaceStatusMap["frozen_at"] = workspaceStatusItem.FrozenAt.String() + } + if workspaceStatusItem.FrozenBy != nil { + workspaceStatusMap["frozen_by"] = workspaceStatusItem.FrozenBy + } + if workspaceStatusItem.Locked != nil { + workspaceStatusMap["locked"] = workspaceStatusItem.Locked + } + if workspaceStatusItem.LockedBy != nil { + workspaceStatusMap["locked_by"] = workspaceStatusItem.LockedBy + } + if workspaceStatusItem.LockedTime != nil { + workspaceStatusMap["locked_time"] = workspaceStatusItem.LockedTime.String() + } + + return workspaceStatusMap +} + +func dataSourceWorkspaceResponseFlattenWorkspaceStatusMsg(result schematicsv1.WorkspaceStatusMessage) (finalList []map[string]interface{}) { + finalList = []map[string]interface{}{} + finalMap := dataSourceWorkspaceResponseWorkspaceStatusMsgToMap(result) + finalList = append(finalList, finalMap) + + return finalList +} + +func dataSourceWorkspaceResponseWorkspaceStatusMsgToMap(workspaceStatusMsgItem schematicsv1.WorkspaceStatusMessage) (workspaceStatusMsgMap map[string]interface{}) { + workspaceStatusMsgMap = map[string]interface{}{} + + if workspaceStatusMsgItem.StatusCode != nil { + workspaceStatusMsgMap["status_code"] = workspaceStatusMsgItem.StatusCode + } + if workspaceStatusMsgItem.StatusMsg != nil { + workspaceStatusMsgMap["status_msg"] = workspaceStatusMsgItem.StatusMsg + } + + return workspaceStatusMsgMap +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_secrets_manager_secret.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_secrets_manager_secret.go new file mode 100644 index 00000000000..499925b1628 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_secrets_manager_secret.go @@ -0,0 +1,427 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "strings" + "time" + + "github.com/IBM/secrets-manager-go-sdk/secretsmanagerv1" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMSecretsManagerSecret() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMSecretsManagerSecretRead, + + Schema: map[string]*schema.Schema{ + "instance_id": { + Type: schema.TypeString, + Required: true, + Description: "Secrets Manager instance GUID", + }, + "secret_type": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ValidateFunc: InvokeDataSourceValidator("ibm_secrets_manager_secret", "secret_type"), + Description: "The secret type. Supported options include: arbitrary, iam_credentials, username_password.", + }, + "secret_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "The v4 UUID that uniquely identifies the secret.", + }, + "endpoint_type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: InvokeDataSourceValidator("ibm_secrets_manager_secret", "endpoint_type"), + Description: "Endpoint Type. 'public' or 'private'", + Default: "public", + }, + "metadata": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The metadata that describes the resource array.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "collection_type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The type of resources in the resource array.", + }, + "collection_total": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The number of elements in the resource array.", + }, + }, + }, + }, + "type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The MIME type that represents the secret.", + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "A human-readable alias to assign to your secret.To protect your privacy, do not use personal data, such as your name or location, as an alias for your secret.", + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "An extended description of your secret.To protect your privacy, do not use personal data, such as your name or location, as a description for your secret.", + }, + "secret_group_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The v4 UUID that uniquely identifies the secret group to assign to this secret.If you omit this parameter, your secret is assigned to the `default` secret group.", + }, + "labels": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Labels that you can use to filter for secrets in your instance.Up to 30 labels can be created. Labels can be between 2-30 characters, including spaces. Special characters not permitted include the angled bracket, comma, colon, ampersand, and vertical pipe character (|).To protect your privacy, do not use personal data, such as your name or location, as a label for your secret.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "state": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The secret state based on NIST SP 800-57. States are integers and correspond to the Pre-activation = 0, Active = 1, Suspended = 2, Deactivated = 3, and Destroyed = 5 values.", + }, + "state_description": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "A text representation of the secret state.", + }, + "crn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The Cloud Resource Name (CRN) that uniquely identifies your Secrets Manager resource.", + }, + "creation_date": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The date the secret was created. The date format follows RFC 3339.", + }, + "created_by": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for the entity that created the secret.", + }, + "last_update_date": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Updates when the actual secret is modified. The date format follows RFC 3339.", + }, + "versions": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "An array that contains metadata for each secret version.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The ID of the secret version.", + }, + "creation_date": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The date that the version of the secret was created.", + }, + "created_by": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for the entity that created the secret.", + }, + "auto_rotated": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "Indicates whether the version of the secret was created by automatic rotation.", + }, + }, + }, + }, + "expiration_date": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The date the secret material expires. The date format follows RFC 3339.You can set an expiration date on supported secret types at their creation. If you create a secret without specifying an expiration date, the secret does not expire. The `expiration_date` field is supported for the following secret types:- `arbitrary`- `username_password`.", + }, + "payload": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Sensitive: true, + Description: "The new secret data to assign to an `arbitrary` secret.", + }, + "secret_data": &schema.Schema{ + Type: schema.TypeMap, + Sensitive: true, + Computed: true, + Description: "The secret data object", + }, + "username": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Sensitive: true, + Description: "The username to assign to this secret.", + }, + "password": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Sensitive: true, + Description: "The password to assign to this secret.", + }, + "next_rotation_date": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The date that the secret is scheduled for automatic rotation.The service automatically creates a new version of the secret on its next rotation date. This field exists only for secrets that can be auto-rotated and have an existing rotation policy.", + }, + "ttl": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The time-to-live (TTL) or lease duration to assign to generated credentials.For `iam_credentials` secrets, the TTL defines for how long each generated API key remains valid. The value can be either an integer that specifies the number of seconds, or the string representation of a duration, such as `120m` or `24h`.", + }, + "access_groups": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The access groups that define the capabilities of the service ID and API key that are generated for an`iam_credentials` secret.**Tip:** To find the ID of an access group, go to **Manage > Access (IAM) > Access groups** in the IBM Cloud console. Select the access group to inspect, and click **Details** to view its ID.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "api_key": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Sensitive: true, + Description: "The API key that is generated for this secret.After the secret reaches the end of its lease (see the `ttl` field), the API key is deleted automatically. If you want to continue to use the same API key for future read operations, see the `reuse_api_key` field.", + }, + "service_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The service ID under which the API key (see the `api_key` field) is created. This service ID is added to the access groups that you assign for this secret.", + }, + "reuse_api_key": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "(IAM credentials) Reuse the service ID and API key for future read operations.", + }, + }, + } +} + +func datasourceIBMSecretsManagerSecretValidator() *ResourceValidator { + + validateSchema := make([]ValidateSchema, 1) + secretType := "arbitrary, iam_credentials, username_password" + endpointType := "public, private" + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: "secret_type", + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: secretType}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: "endpoint_type", + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Optional: true, + AllowedValues: endpointType}) + + ibmSecretsManagerSecretdatasourceValidator := ResourceValidator{ResourceName: "ibm_secrets_manager_secret", Schema: validateSchema} + return &ibmSecretsManagerSecretdatasourceValidator +} + +func dataSourceIBMSecretsManagerSecretRead(d *schema.ResourceData, meta interface{}) error { + bluemixSession, err := meta.(ClientSession).BluemixSession() + if err != nil { + return err + } + region := bluemixSession.Config.Region + + secretsManagerClient, err := meta.(ClientSession).SecretsManagerV1() + if err != nil { + return err + } + rContollerClient, err := meta.(ClientSession).ResourceControllerAPIV2() + if err != nil { + return err + } + + instanceID := d.Get("instance_id").(string) + endpointType := d.Get("endpoint_type").(string) + var smEndpointURL string + + rContollerAPI := rContollerClient.ResourceServiceInstanceV2() + + instanceData, err := rContollerAPI.GetInstance(instanceID) + if err != nil { + return err + } + instanceCRN := instanceData.Crn.String() + + crnData := strings.Split(instanceCRN, ":") + + if crnData[4] == "secrets-manager" { + if endpointType == "private" { + smEndpointURL = "https://" + instanceID + "private" + "." + region + ".secrets-manager.appdomain.cloud" + } else { + smEndpointURL = "https://" + instanceID + "." + region + ".secrets-manager.appdomain.cloud" + } + smUrl := envFallBack([]string{"IBMCLOUD_SECRETS_MANAGER_API_ENDPOINT"}, smEndpointURL) + secretsManagerClient.Service.Options.URL = smUrl + } else { + return fmt.Errorf("Invalid or unsupported service Instance") + } + + secretType := d.Get("secret_type").(string) + secretID := d.Get("secret_id").(string) + getSecretOptions := &secretsmanagerv1.GetSecretOptions{ + SecretType: &secretType, + ID: &secretID, + } + + getSecret, response, err := secretsManagerClient.GetSecret(getSecretOptions) + if err != nil { + log.Printf("[DEBUG] GetSecret failed %s\n%s", err, response) + return err + } + + d.SetId(dataSourceIBMSecretsManagerSecretID(d)) + + if getSecret.Metadata != nil { + err = d.Set("metadata", dataSourceGetSecretFlattenMetadata(*getSecret.Metadata)) + if err != nil { + return fmt.Errorf("Error setting metadata %s", err) + } + } + + if getSecret.Resources != nil { + for _, resourcesItem := range getSecret.Resources { + if ritem, ok := resourcesItem.(*secretsmanagerv1.SecretResource); ok { + if ritem.Type != nil { + d.Set("type", *ritem.Type) + } + if ritem.Name != nil { + d.Set("name", *ritem.Name) + } + if ritem.Description != nil { + d.Set("description", *ritem.Description) + } + if ritem.SecretGroupID != nil { + d.Set("secret_group_id", *ritem.SecretGroupID) + } + if ritem.Labels != nil { + d.Set("labels", ritem.Labels) + } + if ritem.State != nil { + d.Set("state", *ritem.State) + } + if ritem.StateDescription != nil { + d.Set("state_description", *ritem.StateDescription) + } + if ritem.CRN != nil { + d.Set("crn", *ritem.CRN) + } + if ritem.CreationDate != nil { + d.Set("creation_date", (*ritem.CreationDate).String()) + } + if ritem.CreatedBy != nil { + d.Set("created_by", *ritem.CreatedBy) + } + if ritem.LastUpdateDate != nil { + d.Set("last_update_date", (*ritem.LastUpdateDate).String()) + } + if ritem.Versions != nil { + versionsList := []map[string]interface{}{} + for _, versionsItem := range ritem.Versions { + versionsList = append(versionsList, dataSourceGetSecretResourcesVersionsToMap(versionsItem)) + } + d.Set("versions", versionsList) + } + if ritem.SecretData != nil { + secretData := ritem.SecretData.(map[string]interface{}) + d.Set("secret_data", secretData) + if *ritem.SecretType == "username_password" { + d.Set("username", secretData["username"].(string)) + d.Set("password", secretData["password"].(string)) + } else if *ritem.SecretType == "arbitrary" { + d.Set("payload", secretData["payload"].(string)) + } + } + if ritem.NextRotationDate != nil { + d.Set("next_rotation_date", (*ritem.NextRotationDate).String()) + } + if ritem.TTL != nil { + d.Set("ttl", fmt.Sprintf("%v", ritem.TTL)) + } + if ritem.AccessGroups != nil { + d.Set("access_groups", ritem.AccessGroups) + } + if ritem.APIKey != nil { + d.Set("api_key", *ritem.APIKey) + } + if ritem.ServiceID != nil { + d.Set("service_id", *ritem.ServiceID) + } + if ritem.ReuseAPIKey != nil { + d.Set("reuse_api_key", *ritem.ReuseAPIKey) + } + } + } + } + + return nil +} + +// dataSourceIBMSecretsManagerSecretID returns a reasonable ID for the list. +func dataSourceIBMSecretsManagerSecretID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} + +func dataSourceGetSecretFlattenMetadata(result secretsmanagerv1.CollectionMetadata) (finalList []map[string]interface{}) { + finalList = []map[string]interface{}{} + finalMap := dataSourceGetSecretMetadataToMap(result) + finalList = append(finalList, finalMap) + + return finalList +} + +func dataSourceGetSecretMetadataToMap(metadataItem secretsmanagerv1.CollectionMetadata) (metadataMap map[string]interface{}) { + metadataMap = map[string]interface{}{} + + if metadataItem.CollectionType != nil { + metadataMap["collection_type"] = metadataItem.CollectionType + } + if metadataItem.CollectionTotal != nil { + metadataMap["collection_total"] = metadataItem.CollectionTotal + } + + return metadataMap +} + +func dataSourceGetSecretResourcesVersionsToMap(versionsItem secretsmanagerv1.SecretVersion) (versionsMap map[string]interface{}) { + versionsMap = map[string]interface{}{} + + if versionsItem.ID != nil { + versionsMap["id"] = *versionsItem.ID + } + if versionsItem.CreationDate != nil { + versionsMap["creation_date"] = (*versionsItem.CreationDate).String() + } + if versionsItem.CreatedBy != nil { + versionsMap["created_by"] = *versionsItem.CreatedBy + } + if versionsItem.AutoRotated != nil { + versionsMap["auto_rotated"] = *versionsItem.AutoRotated + } + + return versionsMap +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_secrets_manager_secrets.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_secrets_manager_secrets.go new file mode 100644 index 00000000000..ca2320cbfe5 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_secrets_manager_secrets.go @@ -0,0 +1,494 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "context" + "fmt" + "log" + "strings" + "time" + + "github.com/IBM/secrets-manager-go-sdk/secretsmanagerv1" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMSecretsManagerSecrets() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMSecretsManagerSecretsRead, + + Schema: map[string]*schema.Schema{ + "instance_id": { + Type: schema.TypeString, + Required: true, + Description: "Secrets Manager instance GUID", + }, + "secret_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ValidateFunc: InvokeDataSourceValidator("ibm_secrets_manager_secrets", "secret_type"), + Description: "The secret type. Supported options include: arbitrary, iam_credentials, username_password.", + }, + "endpoint_type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: InvokeDataSourceValidator("ibm_secrets_manager_secrets", "endpoint_type"), + Description: "Endpoint Type. 'public' or 'private'", + Default: "public", + }, + "metadata": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The metadata that describes the resource array.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "collection_type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The type of resources in the resource array.", + }, + "collection_total": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The number of elements in the resource array.", + }, + }, + }, + }, + "secrets": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "A collection of secret resources.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The MIME type that represents the secret.", + }, + "secret_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The v4 UUID that uniquely identifies the secret.", + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "A human-readable alias to assign to your secret.To protect your privacy, do not use personal data, such as your name or location, as an alias for your secret.", + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "An extended description of your secret.To protect your privacy, do not use personal data, such as your name or location, as a description for your secret.", + }, + "secret_group_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The v4 UUID that uniquely identifies the secret group to assign to this secret.If you omit this parameter, your secret is assigned to the `default` secret group.", + }, + "labels": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Labels that you can use to filter for secrets in your instance.Up to 30 labels can be created. Labels can be between 2-30 characters, including spaces. Special characters not permitted include the angled bracket, comma, colon, ampersand, and vertical pipe character (|).To protect your privacy, do not use personal data, such as your name or location, as a label for your secret.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "state": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The secret state based on NIST SP 800-57. States are integers and correspond to the Pre-activation = 0, Active = 1, Suspended = 2, Deactivated = 3, and Destroyed = 5 values.", + }, + "state_description": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "A text representation of the secret state.", + }, + "secret_type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The secret type.", + }, + "crn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The Cloud Resource Name (CRN) that uniquely identifies your Secrets Manager resource.", + }, + "creation_date": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The date the secret was created. The date format follows RFC 3339.", + }, + "created_by": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for the entity that created the secret.", + }, + "last_update_date": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Updates when the actual secret is modified. The date format follows RFC 3339.", + }, + "versions": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "An array that contains metadata for each secret version.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The ID of the secret version.", + }, + "creation_date": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The date that the version of the secret was created.", + }, + "created_by": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for the entity that created the secret.", + }, + "auto_rotated": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "Indicates whether the version of the secret was created by automatic rotation.", + }, + }, + }, + }, + "expiration_date": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The date the secret material expires. The date format follows RFC 3339.You can set an expiration date on supported secret types at their creation. If you create a secret without specifying an expiration date, the secret does not expire. The `expiration_date` field is supported for the following secret types:- `arbitrary`- `username_password`.", + }, + "payload": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Sensitive: true, + Description: "The new secret data to assign to an `arbitrary` secret.", + }, + "secret_data": &schema.Schema{ + Type: schema.TypeMap, + Sensitive: true, + Computed: true, + Description: "The secret data object", + }, + "username": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Sensitive: true, + Description: "The username to assign to this secret.", + }, + "password": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Sensitive: true, + Description: "The password to assign to this secret.", + }, + "next_rotation_date": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The date that the secret is scheduled for automatic rotation.The service automatically creates a new version of the secret on its next rotation date. This field exists only for secrets that can be auto-rotated and have an existing rotation policy.", + }, + "ttl": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The time-to-live (TTL) or lease duration to assign to generated credentials.For `iam_credentials` secrets, the TTL defines for how long each generated API key remains valid. The value can be either an integer that specifies the number of seconds, or the string representation of a duration, such as `120m` or `24h`.", + }, + "access_groups": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The access groups that define the capabilities of the service ID and API key that are generated for an`iam_credentials` secret.**Tip:** To find the ID of an access group, go to **Manage > Access (IAM) > Access groups** in the IBM Cloud console. Select the access group to inspect, and click **Details** to view its ID.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "api_key": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Sensitive: true, + Description: "The API key that is generated for this secret.After the secret reaches the end of its lease (see the `ttl` field), the API key is deleted automatically. If you want to continue to use the same API key for future read operations, see the `reuse_api_key` field.", + }, + "service_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The service ID under which the API key (see the `api_key` field) is created. This service ID is added to the access groups that you assign for this secret.", + }, + "reuse_api_key": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "(IAM credentials) Reuse the service ID and API key for future read operations.", + }, + }, + }, + }, + }, + } +} + +func datasourceIBMSecretsManagerSecretsValidator() *ResourceValidator { + + validateSchema := make([]ValidateSchema, 1) + secretType := "arbitrary, iam_credentials, username_password" + endpointType := "public, private" + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: "secret_type", + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Optional: true, + AllowedValues: secretType}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: "endpoint_type", + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Optional: true, + AllowedValues: endpointType}) + + ibmSecretsManagerSecretsdatasourceValidator := ResourceValidator{ResourceName: "ibm_secrets_manager_secrets", Schema: validateSchema} + return &ibmSecretsManagerSecretsdatasourceValidator +} + +func dataSourceIBMSecretsManagerSecretsRead(d *schema.ResourceData, meta interface{}) error { + bluemixSession, err := meta.(ClientSession).BluemixSession() + if err != nil { + return err + } + region := bluemixSession.Config.Region + + secretsManagerClient, err := meta.(ClientSession).SecretsManagerV1() + if err != nil { + return err + } + rContollerClient, err := meta.(ClientSession).ResourceControllerAPIV2() + if err != nil { + return err + } + + instanceID := d.Get("instance_id").(string) + endpointType := d.Get("endpoint_type").(string) + var smEndpointURL string + + rContollerAPI := rContollerClient.ResourceServiceInstanceV2() + + instanceData, err := rContollerAPI.GetInstance(instanceID) + if err != nil { + return err + } + instanceCRN := instanceData.Crn.String() + + crnData := strings.Split(instanceCRN, ":") + + if crnData[4] == "secrets-manager" { + if endpointType == "private" { + smEndpointURL = "https://" + instanceID + "private" + "." + region + ".secrets-manager.appdomain.cloud" + } else { + smEndpointURL = "https://" + instanceID + "." + region + ".secrets-manager.appdomain.cloud" + } + smUrl := envFallBack([]string{"IBMCLOUD_SECRETS_MANAGER_API_ENDPOINT"}, smEndpointURL) + secretsManagerClient.Service.Options.URL = smUrl + } else { + return fmt.Errorf("Invalid or unsupported service Instance") + } + + listAllSecretsOptions := &secretsmanagerv1.ListAllSecretsOptions{} + + listSecrets, response, err := secretsManagerClient.ListAllSecretsWithContext(context.TODO(), listAllSecretsOptions) + if err != nil { + log.Printf("[DEBUG] ListAllSecretsWithContext failed %s\n%s", err, response) + return err + } + + // Use the provided filter argument and construct a new list with only the requested resource(s) + var matchResources []secretsmanagerv1.SecretResourceIntf + var secretType string + var suppliedFilter bool + + if v, ok := d.GetOk("secret_type"); ok { + secretType = v.(string) + suppliedFilter = true + for _, data := range listSecrets.Resources { + if rdata, ok := data.(*secretsmanagerv1.SecretResource); ok { + if *rdata.SecretType == secretType { + matchResources = append(matchResources, data) + } + } + } + } else { + matchResources = listSecrets.Resources + } + listSecrets.Resources = matchResources + + if len(listSecrets.Resources) == 0 { + return fmt.Errorf("no Resources found with secretType %s\nIf not specified, please specify more filters", secretType) + } + + if suppliedFilter { + d.SetId(secretType) + } else { + d.SetId(dataSourceIBMSecretsManagerSecretsID(d)) + } + + if listSecrets.Metadata != nil { + err = d.Set("metadata", dataSourceListSecretsFlattenMetadata(*listSecrets.Metadata)) + if err != nil { + return fmt.Errorf("Error setting metadata %s", err) + } + } + + if listSecrets.Resources != nil { + err = d.Set("secrets", dataSourceListSecretsFlattenResources(listSecrets.Resources)) + if err != nil { + return fmt.Errorf("Error setting resources %s", err) + } + } + + return nil +} + +// dataSourceIBMSecretsManagerSecretsID returns a reasonable ID for the list. +func dataSourceIBMSecretsManagerSecretsID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} + +func dataSourceListSecretsFlattenMetadata(result secretsmanagerv1.CollectionMetadata) (finalList []map[string]interface{}) { + finalList = []map[string]interface{}{} + finalMap := dataSourceListSecretsMetadataToMap(result) + finalList = append(finalList, finalMap) + + return finalList +} + +func dataSourceListSecretsMetadataToMap(metadataItem secretsmanagerv1.CollectionMetadata) (metadataMap map[string]interface{}) { + metadataMap = map[string]interface{}{} + + if metadataItem.CollectionType != nil { + metadataMap["collection_type"] = metadataItem.CollectionType + } + if metadataItem.CollectionTotal != nil { + metadataMap["collection_total"] = metadataItem.CollectionTotal + } + + return metadataMap +} + +func dataSourceListSecretsFlattenResources(result []secretsmanagerv1.SecretResourceIntf) (resources []map[string]interface{}) { + for _, resourcesItem := range result { + if ritem, ok := resourcesItem.(*secretsmanagerv1.SecretResource); ok { + resources = append(resources, dataSourceListSecretsResourcesToMap(*ritem)) + } + } + + return resources +} + +func dataSourceListSecretsResourcesToMap(resourcesItem secretsmanagerv1.SecretResource) (resourcesMap map[string]interface{}) { + resourcesMap = map[string]interface{}{} + + if resourcesItem.Type != nil { + resourcesMap["type"] = *resourcesItem.Type + } + if resourcesItem.ID != nil { + resourcesMap["secret_id"] = *resourcesItem.ID + } + if resourcesItem.Name != nil { + resourcesMap["name"] = *resourcesItem.Name + } + if resourcesItem.Description != nil { + resourcesMap["description"] = *resourcesItem.Description + } + if resourcesItem.SecretGroupID != nil { + resourcesMap["secret_group_id"] = *resourcesItem.SecretGroupID + } + if resourcesItem.Labels != nil { + resourcesMap["labels"] = resourcesItem.Labels + } + if resourcesItem.State != nil { + resourcesMap["state"] = *resourcesItem.State + } + if resourcesItem.StateDescription != nil { + resourcesMap["state_description"] = *resourcesItem.StateDescription + } + if resourcesItem.SecretType != nil { + resourcesMap["secret_type"] = *resourcesItem.SecretType + } + if resourcesItem.CRN != nil { + resourcesMap["crn"] = *resourcesItem.CRN + } + if resourcesItem.CreationDate != nil { + resourcesMap["creation_date"] = (*resourcesItem.CreationDate).String() + } + if resourcesItem.CreatedBy != nil { + resourcesMap["created_by"] = *resourcesItem.CreatedBy + } + if resourcesItem.LastUpdateDate != nil { + resourcesMap["last_update_date"] = (*resourcesItem.LastUpdateDate).String() + } + if resourcesItem.Versions != nil { + versionsList := []map[string]interface{}{} + for _, versionsItem := range resourcesItem.Versions { + versionsList = append(versionsList, dataSourceListSecretsResourcesVersionsToMap(versionsItem)) + } + resourcesMap["versions"] = versionsList + } + if resourcesItem.ExpirationDate != nil { + resourcesMap["expiration_date"] = (*resourcesItem.ExpirationDate).String() + } + if resourcesItem.Payload != nil { + resourcesMap["payload"] = *resourcesItem.Payload + } + if resourcesItem.SecretData != nil { + secretData := resourcesItem.SecretData.(map[string]interface{}) + resourcesMap["secret_data"] = secretData + if *resourcesItem.SecretType == "username_password" { + resourcesMap["username"] = secretData["username"].(string) + resourcesMap["password"] = secretData["password"].(string) + } else if *resourcesItem.SecretType == "arbitrary" { + resourcesMap["payload"] = secretData["payload"].(string) + } + } + if resourcesItem.NextRotationDate != nil { + resourcesMap["next_rotation_date"] = (*resourcesItem.NextRotationDate).String() + } + if resourcesItem.TTL != nil { + resourcesMap["ttl"] = fmt.Sprintf("%v", resourcesItem.TTL) + } + if resourcesItem.AccessGroups != nil { + resourcesMap["access_groups"] = resourcesItem.AccessGroups + } + if resourcesItem.APIKey != nil { + resourcesMap["api_key"] = *resourcesItem.APIKey + } + if resourcesItem.ServiceID != nil { + resourcesMap["service_id"] = *resourcesItem.ServiceID + } + if resourcesItem.ReuseAPIKey != nil { + resourcesMap["reuse_api_key"] = *resourcesItem.ReuseAPIKey + } + + return resourcesMap +} + +func dataSourceListSecretsResourcesVersionsToMap(versionsItem secretsmanagerv1.SecretVersion) (versionsMap map[string]interface{}) { + versionsMap = map[string]interface{}{} + + if versionsItem.ID != nil { + versionsMap["id"] = *versionsItem.ID + } + if versionsItem.CreationDate != nil { + versionsMap["creation_date"] = (*versionsItem.CreationDate).String() + } + if versionsItem.CreatedBy != nil { + versionsMap["created_by"] = *versionsItem.CreatedBy + } + if versionsItem.AutoRotated != nil { + versionsMap["auto_rotated"] = *versionsItem.AutoRotated + } + + return versionsMap +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_security_group.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_security_group.go new file mode 100644 index 00000000000..6d5b1b1c028 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_security_group.go @@ -0,0 +1,99 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "sort" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/filter" + "github.com/softlayer/softlayer-go/services" +) + +func dataSourceIBMSecurityGroup() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMSecurityGroupRead, + + Schema: map[string]*schema.Schema{ + "name": { + Description: "The name of the security group", + Type: schema.TypeString, + Required: true, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "The description of the security group", + }, + "most_recent": &schema.Schema{ + Description: "If true and multiple entries are found, the most recently created group is used. " + + "If false, an error is returned", + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + }, + } +} + +func dataSourceIBMSecurityGroupRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + name := d.Get("name").(string) + mostRecent := d.Get("most_recent").(bool) + + filters := filter.New(filter.Path("securityGroups.name").Eq(name)) + if v, ok := d.GetOk("description"); ok { + filters = append(filters, filter.Path("securityGroups.description").Eq(v.(string))) + } + + groups, err := services.GetAccountService(sess). + Filter(filter.Build( + filters..., + )). + GetSecurityGroups() + + if err != nil { + return fmt.Errorf("Error retrieving Security group: %s", err) + } + if len(groups) == 0 { + return fmt.Errorf("No security group found with name [%s]", name) + } + + var sg datatypes.Network_SecurityGroup + if len(groups) > 1 { + if mostRecent { + sg = mostRecentSecurityGroup(groups) + } else { + return fmt.Errorf( + "More than one security group found with name matching [%s]. "+ + "Either set 'most_recent' to true in your "+ + "configuration to force the most security group "+ + "to be used, or ensure that the name and/or description is unique", name) + } + } else { + sg = groups[0] + } + d.SetId(fmt.Sprintf("%d", *sg.Id)) + d.Set("description", sg.Description) + return nil +} + +type securityGroups []datatypes.Network_SecurityGroup + +func (sgs securityGroups) Len() int { return len(sgs) } + +func (sgs securityGroups) Swap(i, j int) { sgs[i], sgs[j] = sgs[j], sgs[i] } + +func (sgs securityGroups) Less(i, j int) bool { + return sgs[i].CreateDate.Before(sgs[j].CreateDate.Time) +} + +func mostRecentSecurityGroup(sgs securityGroups) datatypes.Network_SecurityGroup { + sortedKeys := sgs + sort.Sort(sortedKeys) + return sortedKeys[len(sortedKeys)-1] +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_service_instance.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_service_instance.go new file mode 100644 index 00000000000..221001d4ad2 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_service_instance.go @@ -0,0 +1,91 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMServiceInstance() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMServiceInstanceRead, + + Schema: map[string]*schema.Schema{ + "name": { + Description: "Service instance name for example, speech_to_text", + Type: schema.TypeString, + Required: true, + }, + + "space_guid": { + Type: schema.TypeString, + Required: true, + Description: "The guid of the space in which the instance is present", + }, + + "credentials": { + Description: "The service broker-provided credentials to use this service.", + Type: schema.TypeMap, + Sensitive: true, + Computed: true, + }, + + "service_keys": { + Description: "Service keys asociated with the service instance", + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + Description: "The service key name", + }, + "credentials": { + Type: schema.TypeMap, + Computed: true, + Sensitive: true, + Description: "The service key credential details like port, username etc", + }, + }, + }, + }, + + "service_plan_guid": { + Description: "The uniquie identifier of the service offering plan type", + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceIBMServiceInstanceRead(d *schema.ResourceData, meta interface{}) error { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + siAPI := cfClient.ServiceInstances() + name := d.Get("name").(string) + spaceGUID := d.Get("space_guid").(string) + inst, err := siAPI.FindByNameInSpace(spaceGUID, name) + if err != nil { + return err + } + + serviceInstance, err := siAPI.Get(inst.GUID, 1) + if err != nil { + return fmt.Errorf("Error retrieving service: %s", err) + } + + d.SetId(serviceInstance.Metadata.GUID) + serviceKeys := serviceInstance.Entity.ServiceKeys + d.Set("credentials", Flatten(serviceInstance.Entity.Credentials)) + d.Set("service_keys", flattenServiceInstanceCredentials(serviceKeys)) + d.Set("service_plan_guid", serviceInstance.Entity.ServicePlanGUID) + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_service_key.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_service_key.go new file mode 100644 index 00000000000..71ca3647602 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_service_key.go @@ -0,0 +1,67 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMServiceKey() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMServiceKeyRead, + + Schema: map[string]*schema.Schema{ + "credentials": { + Description: "Credentials asociated with the key", + Sensitive: true, + Type: schema.TypeMap, + Computed: true, + }, + "name": { + Description: "The name of the service key", + Type: schema.TypeString, + Required: true, + }, + "service_instance_name": { + Description: "Service instance name for example, speech_to_text", + Type: schema.TypeString, + Required: true, + }, + "space_guid": { + Type: schema.TypeString, + Required: true, + Description: "The guid of the space in which the service instance is present", + }, + }, + } +} + +func dataSourceIBMServiceKeyRead(d *schema.ResourceData, meta interface{}) error { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + siAPI := cfClient.ServiceInstances() + skAPI := cfClient.ServiceKeys() + serviceInstanceName := d.Get("service_instance_name").(string) + spaceGUID := d.Get("space_guid").(string) + name := d.Get("name").(string) + inst, err := siAPI.FindByNameInSpace(spaceGUID, serviceInstanceName) + if err != nil { + return err + } + serviceInstance, err := siAPI.Get(inst.GUID) + if err != nil { + return fmt.Errorf("Error retrieving service: %s", err) + } + serviceKey, err := skAPI.FindByName(serviceInstance.Metadata.GUID, name) + if err != nil { + return fmt.Errorf("Error retrieving service key: %s", err) + } + d.SetId(serviceKey.GUID) + d.Set("credentials", Flatten(serviceKey.Credentials)) + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_service_plan.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_service_plan.go new file mode 100644 index 00000000000..a3d174f5e19 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_service_plan.go @@ -0,0 +1,53 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMServicePlan() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMServicePlanRead, + + Schema: map[string]*schema.Schema{ + "service": { + Description: "Service name for example, cloudantNoSQLDB", + Type: schema.TypeString, + Required: true, + }, + + "plan": { + Description: "The plan type ex- shared ", + Type: schema.TypeString, + Required: true, + }, + }, + } +} + +func dataSourceIBMServicePlanRead(d *schema.ResourceData, meta interface{}) error { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + soffAPI := cfClient.ServiceOfferings() + spAPI := cfClient.ServicePlans() + + service := d.Get("service").(string) + plan := d.Get("plan").(string) + serviceOff, err := soffAPI.FindByLabel(service) + if err != nil { + return fmt.Errorf("Error retrieving service offering: %s", err) + } + servicePlan, err := spAPI.FindPlanInServiceOffering(serviceOff.GUID, plan) + if err != nil { + return fmt.Errorf("Error retrieving plan: %s", err) + } + + d.SetId(servicePlan.GUID) + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_space.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_space.go new file mode 100644 index 00000000000..6f320652c1f --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_space.go @@ -0,0 +1,106 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMSpace() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMSpaceRead, + + Schema: map[string]*schema.Schema{ + "space": { + Description: "Space name, for example dev", + Type: schema.TypeString, + Optional: true, + Deprecated: "use name instead", + ExactlyOneOf: []string{"space", "name"}, + }, + "name": { + Description: "Space name, for example dev", + Type: schema.TypeString, + Optional: true, + ExactlyOneOf: []string{"space", "name"}, + }, + "org": { + Description: "The org this space belongs to", + Type: schema.TypeString, + Required: true, + }, + "auditors": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "The IBMID of the users who have auditor role in this space, ex - user@example.com", + }, + "managers": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "The IBMID of the users who have manager role in this space, ex - user@example.com", + }, + "developers": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "The IBMID of the users who have developer role in this space, ex - user@example.com", + }, + }, + } +} + +func dataSourceIBMSpaceRead(d *schema.ResourceData, meta interface{}) error { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + orgAPI := cfClient.Organizations() + spaceAPI := cfClient.Spaces() + var space string + if v, ok := d.GetOk("name"); ok { + space = v.(string) + } + if v, ok := d.GetOk("space"); ok { + space = v.(string) + } + + org := d.Get("org").(string) + + orgFields, err := orgAPI.FindByName(org, BluemixRegion) + if err != nil { + return fmt.Errorf("Error retrieving org: %s", err) + } + spaceFields, err := spaceAPI.FindByNameInOrg(orgFields.GUID, space, BluemixRegion) + if err != nil { + return fmt.Errorf("Error retrieving space: %s", err) + } + + spaceGUID := spaceFields.GUID + d.SetId(spaceGUID) + + auditors, err := spaceAPI.ListAuditors(spaceGUID) + if err != nil { + return fmt.Errorf("Error retrieving auditors in the space: %s", err) + } + + managers, err := spaceAPI.ListManagers(spaceGUID) + if err != nil { + return fmt.Errorf("Error retrieving managers in the space: %s", err) + } + + developers, err := spaceAPI.ListDevelopers(spaceGUID) + if err != nil { + return fmt.Errorf("Error retrieving developers in space: %s", err) + } + + d.Set("auditors", flattenSpaceRoleUsers(auditors)) + d.Set("managers", flattenSpaceRoleUsers(managers)) + d.Set("developers", flattenSpaceRoleUsers(developers)) + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_tg_gateway.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_tg_gateway.go new file mode 100644 index 00000000000..34e4e3a147a --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_tg_gateway.go @@ -0,0 +1,206 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + + "github.com/IBM/networking-go-sdk/transitgatewayapisv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + tgConnName = "name" + tgConnections = "connections" +) + +func dataSourceIBMTransitGateway() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMTransitGatewayRead, + + Schema: map[string]*schema.Schema{ + tgName: { + Type: schema.TypeString, + Required: true, + Description: "The Transit Gateway identifier", + ValidateFunc: InvokeValidator("ibm_tg_gateway", tgName), + }, + tgCrn: { + Type: schema.TypeString, + Computed: true, + }, + tgLocation: { + Type: schema.TypeString, + Computed: true, + }, + tgCreatedAt: { + Type: schema.TypeString, + Computed: true, + }, + tgGlobal: { + Type: schema.TypeBool, + Computed: true, + }, + tgStatus: { + Type: schema.TypeString, + Computed: true, + }, + tgUpdatedAt: { + Type: schema.TypeString, + Computed: true, + }, + tgResourceGroup: { + Type: schema.TypeString, + Computed: true, + }, + tgConnections: { + Type: schema.TypeList, + Description: "Collection of transit gateway connections", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + ID: { + Type: schema.TypeString, + Computed: true, + }, + tgNetworkAccountID: { + Type: schema.TypeString, + Computed: true, + Description: "The ID of the account which owns the network that is being connected. Generally only used if the network is in a different account than the gateway.", + }, + tgNetworkId: { + Type: schema.TypeString, + Computed: true, + }, + tgConnName: { + Type: schema.TypeString, + Computed: true, + }, + tgNetworkType: { + Type: schema.TypeString, + Computed: true, + }, + tgConectionCreatedAt: { + Type: schema.TypeString, + Computed: true, + }, + tgConnectionStatus: { + Type: schema.TypeString, + Computed: true, + }, + tgUpdatedAt: { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMTransitGatewayRead(d *schema.ResourceData, meta interface{}) error { + + client, err := transitgatewayClient(meta) + if err != nil { + return err + } + + listTransitGatewaysOptionsModel := &transitgatewayapisv1.ListTransitGatewaysOptions{} + listTransitGateways, response, err := client.ListTransitGateways(listTransitGatewaysOptionsModel) + if err != nil { + return fmt.Errorf("Error while listing transit gateways %s\n%s", err, response) + } + + gwName := d.Get(tgName).(string) + var foundGateway bool + for _, tgw := range listTransitGateways.TransitGateways { + + if *tgw.Name == gwName { + d.SetId(*tgw.ID) + d.Set(tgCrn, tgw.Crn) + d.Set(tgName, tgw.Name) + d.Set(tgLocation, tgw.Location) + d.Set(tgCreatedAt, tgw.CreatedAt.String()) + + if tgw.UpdatedAt != nil { + d.Set(tgUpdatedAt, tgw.UpdatedAt.String()) + } + d.Set(tgGlobal, tgw.Global) + d.Set(tgStatus, tgw.Status) + + if tgw.ResourceGroup != nil { + rg := tgw.ResourceGroup + d.Set(tgResourceGroup, *rg.ID) + } + foundGateway = true + } + } + + if !foundGateway { + return fmt.Errorf( + "Couldn't find any gateway with the specified name: (%s)", gwName) + } + + return dataSourceIBMTransitGatewayConnectionsRead(d, meta) + +} +func dataSourceIBMTransitGatewayConnectionsRead(d *schema.ResourceData, meta interface{}) error { + + client, err := transitgatewayClient(meta) + if err != nil { + return err + } + listTransitGatewayConnectionsOptions := &transitgatewayapisv1.ListTransitGatewayConnectionsOptions{} + tgGatewayId := d.Id() + log.Println("tgGatewayId: ", tgGatewayId) + + listTransitGatewayConnectionsOptions.SetTransitGatewayID(tgGatewayId) + listTGConnections, response, err := client.ListTransitGatewayConnections(listTransitGatewayConnectionsOptions) + if err != nil { + return fmt.Errorf("Error while listing transit gateway connections %s\n%s", err, response) + } + connections := make([]map[string]interface{}, 0) + + for _, instance := range listTGConnections.Connections { + tgConn := map[string]interface{}{} + + if instance.ID != nil { + tgConn[ID] = *instance.ID + } + if instance.Name != nil { + tgConn[tgConnName] = *instance.Name + } + if instance.NetworkType != nil { + tgConn[tgNetworkType] = *instance.NetworkType + } + + if instance.NetworkID != nil { + tgConn[tgNetworkId] = *instance.NetworkID + } + if instance.NetworkAccountID != nil { + tgConn[tgNetworkAccountID] = *instance.NetworkAccountID + } + + if instance.CreatedAt != nil { + tgConn[tgConectionCreatedAt] = instance.CreatedAt.String() + + } + if instance.UpdatedAt != nil { + tgConn[tgUpdatedAt] = instance.UpdatedAt.String() + + } + if instance.Status != nil { + tgConn[tgConnectionStatus] = *instance.Status + } + + connections = append(connections, tgConn) + + } + d.Set(tgConnections, connections) + + return nil + +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_tg_gateways.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_tg_gateways.go new file mode 100644 index 00000000000..5ba58e34aac --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_tg_gateways.go @@ -0,0 +1,112 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "time" + + "github.com/IBM/networking-go-sdk/transitgatewayapisv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceIBMTransitGateways() *schema.Resource { + + return &schema.Resource{ + Read: dataSourceIBMTransitGatewaysRead, + Schema: map[string]*schema.Schema{ + tgGateways: { + Type: schema.TypeList, + Description: "Collection of transit gateways", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + tgID: { + Type: schema.TypeString, + Computed: true, + }, + tgCrn: { + Type: schema.TypeString, + Computed: true, + }, + tgName: { + Type: schema.TypeString, + Computed: true, + }, + tgLocation: { + Type: schema.TypeString, + Computed: true, + }, + tgCreatedAt: { + Type: schema.TypeString, + Computed: true, + }, + tgGlobal: { + Type: schema.TypeBool, + Computed: true, + }, + tgStatus: { + Type: schema.TypeString, + Computed: true, + }, + tgUpdatedAt: { + Type: schema.TypeString, + Computed: true, + }, + tgResourceGroup: { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMTransitGatewaysRead(d *schema.ResourceData, meta interface{}) error { + + client, err := transitgatewayClient(meta) + if err != nil { + return err + } + + listTransitGatewaysOptionsModel := &transitgatewayapisv1.ListTransitGatewaysOptions{} + listTransitGateways, response, err := client.ListTransitGateways(listTransitGatewaysOptionsModel) + if err != nil { + return fmt.Errorf("Error while listing transit gateways %s\n%s", err, response) + } + + tgws := make([]map[string]interface{}, 0) + for _, instance := range listTransitGateways.TransitGateways { + + transitgateway := map[string]interface{}{} + transitgateway[tgID] = instance.ID + transitgateway[tgName] = instance.Name + transitgateway[tgCreatedAt] = instance.CreatedAt.String() + transitgateway[tgLocation] = instance.Location + transitgateway[tgStatus] = instance.Status + + if instance.UpdatedAt != nil { + transitgateway[tgUpdatedAt] = instance.UpdatedAt.String() + } + transitgateway[tgGlobal] = instance.Global + transitgateway[tgCrn] = instance.Crn + + if instance.ResourceGroup != nil { + rg := instance.ResourceGroup + transitgateway[tgResourceGroup] = *rg.ID + } + + tgws = append(tgws, transitgateway) + } + d.Set(tgGateways, tgws) + d.SetId(dataSourceIBMTransitGatewaysID(d)) + return nil +} + +// dataSourceIBMTransitGatewaysID returns a reasonable ID for a transit gateways list. +func dataSourceIBMTransitGatewaysID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_tg_location.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_tg_location.go new file mode 100644 index 00000000000..f4188780fa4 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_tg_location.go @@ -0,0 +1,102 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "time" + + "github.com/IBM/networking-go-sdk/transitgatewayapisv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + tgLocalConnectionLocations = "local_connection_locations" + tgLocationsDisplayName = "display_name" +) + +func dataSourceIBMTransitGatewaysLocation() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMTransitGatewaysLocationRead, + Schema: map[string]*schema.Schema{ + tgLocationsName: { + Type: schema.TypeString, + Required: true, + Description: "Name of the Location.", + }, + tgLocationsType: { + Type: schema.TypeString, + Computed: true, + Description: "The type of the location, determining is this a multi-zone region, a single data center, or a point of presence.", + }, + + tgLocationsBillingLoc: { + Type: schema.TypeString, + Computed: true, + Description: "The geographical location of this location, used for billing purposes.", + }, + tgLocalConnectionLocations: { + Type: schema.TypeList, + Computed: true, + Description: "The set of network locations that are considered local for this Transit Gateway location.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + tgLocationsName: { + Type: schema.TypeString, + Computed: true, + Description: "Name of the Location.", + }, + tgLocationsDisplayName: { + Type: schema.TypeString, + Computed: true, + Description: "A descriptive display name for the location.", + }, + tgLocationsType: { + Type: schema.TypeString, + Computed: true, + Description: "The type of the location, determining is this a multi-zone region, a single data center, or a point of presence.", + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMTransitGatewaysLocationRead(d *schema.ResourceData, meta interface{}) error { + + client, err := transitgatewayClient(meta) + if err != nil { + return err + } + + detailGatewayLocationOptionsModel := &transitgatewayapisv1.GetGatewayLocationOptions{} + locName := d.Get(tgLocationsName).(string) + detailGatewayLocationOptionsModel.Name = &locName + detailTransitGatewayLocation, response, err := client.GetGatewayLocation(detailGatewayLocationOptionsModel) + if err != nil { + return fmt.Errorf("Error while fetching transit gateway detailed location: %s\n%s", err, response) + } + + if detailTransitGatewayLocation != nil { + d.SetId(dataSourceIBMTransitGatewaysLocationID(d)) + d.Set(tgLocationsType, *detailTransitGatewayLocation.Type) + d.Set(tgLocationsBillingLoc, *detailTransitGatewayLocation.BillingLocation) + tgConnLocationsCol := make([]map[string]interface{}, 0) + for _, instance := range detailTransitGatewayLocation.LocalConnectionLocations { + tgConnLocation := map[string]interface{}{} + tgConnLocation[tgLocationsName] = *instance.Name + tgConnLocation[tgLocationsDisplayName] = *instance.DisplayName + tgConnLocation[tgLocationsType] = *instance.Type + tgConnLocationsCol = append(tgConnLocationsCol, tgConnLocation) + } + d.Set(tgLocalConnectionLocations, tgConnLocationsCol) + } + return nil +} + +// dataSourceIBMTransitGatewaysLocationID returns a reasonable ID for a transit gateways location list. +func dataSourceIBMTransitGatewaysLocationID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_tg_locations.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_tg_locations.go new file mode 100644 index 00000000000..d38cf4c0796 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/data_source_ibm_tg_locations.go @@ -0,0 +1,85 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "time" + + "github.com/IBM/networking-go-sdk/transitgatewayapisv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + tgLocations = "locations" + tgLocationsName = "name" + tgLocationsBillingLoc = "billing_location" + tgLocationsType = "type" +) + +func dataSourceIBMTransitGatewaysLocations() *schema.Resource { + + return &schema.Resource{ + Read: dataSourceIBMTransitGatewaysLocationsRead, + Schema: map[string]*schema.Schema{ + tgLocations: { + Type: schema.TypeList, + Description: "Collection of Transit Gateway locations", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + tgLocationsName: { + Type: schema.TypeString, + Computed: true, + Description: "Name of the Location.", + }, + tgLocationsType: { + Type: schema.TypeString, + Computed: true, + Description: "The type of the location, determining is this a multi-zone region, a single data center, or a point of presence.", + }, + tgLocationsBillingLoc: { + Type: schema.TypeString, + Computed: true, + Description: "The geographical location of this location, used for billing purposes.", + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMTransitGatewaysLocationsRead(d *schema.ResourceData, meta interface{}) error { + + client, err := transitgatewayClient(meta) + if err != nil { + return err + } + + listTransitGatewayLocationsOptionsModel := &transitgatewayapisv1.ListGatewayLocationsOptions{} + listTransitGatewayLocations, response, err := client.ListGatewayLocations(listTransitGatewayLocationsOptionsModel) + if err != nil { + return fmt.Errorf("Error while fetching transit gateways locations: %s\n%s", err, response) + } + + tgLocationsCol := make([]map[string]interface{}, 0) + for _, instance := range listTransitGatewayLocations.Locations { + + transitgatewayLoc := map[string]interface{}{} + transitgatewayLoc[tgLocationsName] = instance.Name + transitgatewayLoc[tgLocationsType] = instance.Type + transitgatewayLoc[tgLocationsBillingLoc] = instance.BillingLocation + + tgLocationsCol = append(tgLocationsCol, transitgatewayLoc) + } + d.Set(tgLocations, tgLocationsCol) + d.SetId(dataSourceIBMTransitGatewaysID(d)) + return nil +} + +// dataSourceIBMTransitGatewaysLocationsID returns a reasonable ID for a transit gateways locations list. +func dataSourceIBMTransitGatewaysLocationsID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/diff_supress_funcs.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/diff_supress_funcs.go new file mode 100644 index 00000000000..845a4480ffa --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/diff_supress_funcs.go @@ -0,0 +1,41 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "encoding/json" + "log" + "reflect" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func suppressEquivalentJSON(k, old, new string, d *schema.ResourceData) bool { + + if old == "" { + return false + } + var oldObj, newObj []map[string]interface{} + err := json.Unmarshal([]byte(old), &oldObj) + if err != nil { + log.Printf("Error unmarshalling old json :: %s", err.Error()) + return false + } + err = json.Unmarshal([]byte(new), &newObj) + if err != nil { + log.Printf("Error unmarshalling new json :: %s", err.Error()) + return false + } + + oldm := make(map[interface{}]interface{}) + newm := make(map[interface{}]interface{}) + + for _, m := range oldObj { + oldm[m["key"]] = m["value"] + } + for _, m := range newObj { + newm[m["key"]] = m["value"] + } + return reflect.DeepEqual(oldm, newm) +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/flatten.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/flatten.go new file mode 100644 index 00000000000..21f0bd1e48e --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/flatten.go @@ -0,0 +1,80 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "reflect" +) + +// Flatten takes a structure and turns into a flat map[string]string. +// +// Within the "thing" parameter, only primitive values are allowed. Structs are +// not supported. Therefore, it can only be slices, maps, primitives, and +// any combination of those together. +// +// See the tests for examples of what inputs are turned into. +func Flatten(thing map[string]interface{}) Map { + result := make(map[string]string) + for k, raw := range thing { + val := reflect.ValueOf(raw) + if val.IsValid() { + flatten(result, k, val) + } + } + + return Map(result) +} + +func flatten(result map[string]string, prefix string, v reflect.Value) { + if v.Kind() == reflect.Interface { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Bool: + if v.Bool() { + result[prefix] = "true" + } else { + result[prefix] = "false" + } + case reflect.Int: + result[prefix] = fmt.Sprintf("%d", v.Int()) + case reflect.Map: + flattenMap(result, prefix, v) + case reflect.Slice: + flattenSlice(result, prefix, v) + case reflect.Float32: + case reflect.Float64: + result[prefix] = fmt.Sprint(v) + case reflect.String: + result[prefix] = v.String() + default: + log.Printf("Unknown: %v", v) + } +} + +func flattenMap(result map[string]string, prefix string, v reflect.Value) { + for _, k := range v.MapKeys() { + if k.Kind() == reflect.Interface { + k = k.Elem() + } + + if k.Kind() != reflect.String { + panic(fmt.Sprintf("%s: map key is not string: %s", prefix, k)) + } + + flatten(result, fmt.Sprintf("%s.%s", prefix, k.String()), v.MapIndex(k)) + } +} + +func flattenSlice(result map[string]string, prefix string, v reflect.Value) { + prefix = prefix + "." + + result[prefix+"#"] = fmt.Sprintf("%d", v.Len()) + for i := 0; i < v.Len(); i++ { + flatten(result, fmt.Sprintf("%s%d", prefix, i), v.Index(i)) + } +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/internal/hashcode/hashcode.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/internal/hashcode/hashcode.go new file mode 100644 index 00000000000..20d789b82cc --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/internal/hashcode/hashcode.go @@ -0,0 +1,46 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package hashcode + +import ( + "bytes" + "fmt" + "hash/crc32" +) + +// String hashes a string to a unique hashcode. +// +// Deprecated: This will be removed in v2 without replacement. If you need +// its functionality, you can copy it, import crc32 directly, or reference the +// v1 package. +// +// crc32 returns a uint32, but for our use we need +// and non negative integer. Here we cast to an integer +// and invert it if the result is negative. +func String(s string) int { + v := int(crc32.ChecksumIEEE([]byte(s))) + if v >= 0 { + return v + } + if -v >= 0 { + return -v + } + // v == MinInt + return 0 +} + +// Strings hashes a list of strings to a unique hashcode. +// +// Deprecated: This will be removed in v2 without replacement. If you need +// its functionality, you can copy it, import crc32 directly, or reference the +// v1 package. +func Strings(strings []string) string { + var buf bytes.Buffer + + for _, s := range strings { + buf.WriteString(fmt.Sprintf("%s-", s)) + } + + return fmt.Sprintf("%d", String(buf.String())) +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/internal/mutexkv/mutexkv.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/internal/mutexkv/mutexkv.go new file mode 100644 index 00000000000..77c00134292 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/internal/mutexkv/mutexkv.go @@ -0,0 +1,66 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package mutexkv + +import ( + "log" + "sync" +) + +// MutexKV is a simple key/value store for arbitrary mutexes. It can be used to +// serialize changes across arbitrary collaborators that share knowledge of the +// keys they must serialize on. +// +// Deprecated: This will be removed in v2 without replacement. If you need +// its functionality, you can copy it or reference the v1 package. +// +// The initial use case is to let aws_security_group_rule resources serialize +// their access to individual security groups based on SG ID. +type MutexKV struct { + lock sync.Mutex + store map[string]*sync.Mutex +} + +// Lock the mutex for the given key. Caller is responsible for calling Unlock +// for the same key +// +// Deprecated: This will be removed in v2 without replacement. If you need +// its functionality, you can copy it or reference the v1 package. +func (m *MutexKV) Lock(key string) { + log.Printf("[DEBUG] Locking %q", key) + m.get(key).Lock() + log.Printf("[DEBUG] Locked %q", key) +} + +// Unlock the mutex for the given key. Caller must have called Lock for the same key first +// +// Deprecated: This will be removed in v2 without replacement. If you need +// its functionality, you can copy it or reference the v1 package. +func (m *MutexKV) Unlock(key string) { + log.Printf("[DEBUG] Unlocking %q", key) + m.get(key).Unlock() + log.Printf("[DEBUG] Unlocked %q", key) +} + +// Returns a mutex for the given key, no guarantee of its lock status +func (m *MutexKV) get(key string) *sync.Mutex { + m.lock.Lock() + defer m.lock.Unlock() + mutex, ok := m.store[key] + if !ok { + mutex = &sync.Mutex{} + m.store[key] = mutex + } + return mutex +} + +// NewMutexKV Returns a properly initalized MutexKV +// +// Deprecated: This will be removed in v2 without replacement. If you need +// its functionality, you can copy it or reference the v1 package. +func NewMutexKV() *MutexKV { + return &MutexKV{ + store: make(map[string]*sync.Mutex), + } +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/map.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/map.go new file mode 100644 index 00000000000..153ae70d691 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/map.go @@ -0,0 +1,85 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "strings" +) + +// Map is a wrapper around map[string]string that provides some helpers +// above it that assume the map is in the format that flatmap expects +// (the result of Flatten). +// +// All modifying functions such as Delete are done in-place unless +// otherwise noted. +type Map map[string]string + +// Contains returns true if the map contains the given key. +func (m Map) Contains(key string) bool { + for _, k := range m.Keys() { + if k == key { + return true + } + } + + return false +} + +// Delete deletes a key out of the map with the given prefix. +func (m Map) Delete(prefix string) { + for k, _ := range m { + match := k == prefix + if !match { + if !strings.HasPrefix(k, prefix) { + continue + } + + if k[len(prefix):len(prefix)+1] != "." { + continue + } + } + + delete(m, k) + } +} + +// Keys returns all of the top-level keys in this map +func (m Map) Keys() []string { + ks := make(map[string]struct{}) + for k, _ := range m { + idx := strings.Index(k, ".") + if idx == -1 { + idx = len(k) + } + + ks[k[:idx]] = struct{}{} + } + + result := make([]string, 0, len(ks)) + for k, _ := range ks { + result = append(result, k) + } + + return result +} + +// Merge merges the contents of the other Map into this one. +// +// This merge is smarter than a simple map iteration because it +// will fully replace arrays and other complex structures that +// are present in this map with the other map's. For example, if +// this map has a 3 element "foo" list, and m2 has a 2 element "foo" +// list, then the result will be that m has a 2 element "foo" +// list. +func (m Map) Merge(m2 Map) { + for _, prefix := range m2.Keys() { + m.Delete(prefix) + + for k, v := range m2 { + if strings.HasPrefix(k, prefix) { + m[k] = v + } + } + } +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/provider.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/provider.go new file mode 100644 index 00000000000..99ef1716f0f --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/provider.go @@ -0,0 +1,815 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "os" + "sync" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/terraform" + + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/internal/mutexkv" +) + +// This is a global MutexKV for use within this plugin. +var ibmMutexKV = mutexkv.NewMutexKV() + +// Provider returns a terraform.ResourceProvider. +func Provider() terraform.ResourceProvider { + return &schema.Provider{ + Schema: map[string]*schema.Schema{ + "bluemix_api_key": { + Type: schema.TypeString, + Optional: true, + Description: "The Bluemix API Key", + DefaultFunc: schema.MultiEnvDefaultFunc([]string{"BM_API_KEY", "BLUEMIX_API_KEY"}, nil), + Deprecated: "This field is deprecated please use ibmcloud_api_key", + }, + "bluemix_timeout": { + Type: schema.TypeInt, + Optional: true, + Description: "The timeout (in seconds) to set for any Bluemix API calls made.", + DefaultFunc: schema.MultiEnvDefaultFunc([]string{"BM_TIMEOUT", "BLUEMIX_TIMEOUT"}, nil), + Deprecated: "This field is deprecated please use ibmcloud_timeout", + }, + "ibmcloud_api_key": { + Type: schema.TypeString, + Optional: true, + Description: "The IBM Cloud API Key", + DefaultFunc: schema.MultiEnvDefaultFunc([]string{"IC_API_KEY", "IBMCLOUD_API_KEY"}, nil), + }, + "ibmcloud_timeout": { + Type: schema.TypeInt, + Optional: true, + Description: "The timeout (in seconds) to set for any IBM Cloud API calls made.", + DefaultFunc: schema.MultiEnvDefaultFunc([]string{"IC_TIMEOUT", "IBMCLOUD_TIMEOUT"}, 60), + }, + "region": { + Type: schema.TypeString, + Optional: true, + Description: "The IBM cloud Region (for example 'us-south').", + DefaultFunc: schema.MultiEnvDefaultFunc([]string{"IC_REGION", "IBMCLOUD_REGION", "BM_REGION", "BLUEMIX_REGION"}, "us-south"), + }, + "zone": { + Type: schema.TypeString, + Optional: true, + Description: "The IBM cloud Region zone (for example 'us-south-1') for power resources.", + DefaultFunc: schema.MultiEnvDefaultFunc([]string{"IC_ZONE", "IBMCLOUD_ZONE"}, ""), + }, + "resource_group": { + Type: schema.TypeString, + Optional: true, + Description: "The Resource group id.", + DefaultFunc: schema.MultiEnvDefaultFunc([]string{"IC_RESOURCE_GROUP", "IBMCLOUD_RESOURCE_GROUP", "BM_RESOURCE_GROUP", "BLUEMIX_RESOURCE_GROUP"}, ""), + }, + "softlayer_api_key": { + Type: schema.TypeString, + Optional: true, + Description: "The SoftLayer API Key", + DefaultFunc: schema.MultiEnvDefaultFunc([]string{"SL_API_KEY", "SOFTLAYER_API_KEY"}, nil), + Deprecated: "This field is deprecated please use iaas_classic_api_key", + }, + "softlayer_username": { + Type: schema.TypeString, + Optional: true, + Description: "The SoftLayer user name", + DefaultFunc: schema.MultiEnvDefaultFunc([]string{"SL_USERNAME", "SOFTLAYER_USERNAME"}, nil), + Deprecated: "This field is deprecated please use iaas_classic_username", + }, + "softlayer_endpoint_url": { + Type: schema.TypeString, + Optional: true, + Description: "The Softlayer Endpoint", + DefaultFunc: schema.MultiEnvDefaultFunc([]string{"SL_ENDPOINT_URL", "SOFTLAYER_ENDPOINT_URL"}, nil), + Deprecated: "This field is deprecated please use iaas_classic_endpoint_url", + }, + "softlayer_timeout": { + Type: schema.TypeInt, + Optional: true, + Description: "The timeout (in seconds) to set for any SoftLayer API calls made.", + DefaultFunc: schema.MultiEnvDefaultFunc([]string{"SL_TIMEOUT", "SOFTLAYER_TIMEOUT"}, nil), + Deprecated: "This field is deprecated please use iaas_classic_timeout", + }, + "iaas_classic_api_key": { + Type: schema.TypeString, + Optional: true, + Description: "The Classic Infrastructure API Key", + DefaultFunc: schema.MultiEnvDefaultFunc([]string{"IAAS_CLASSIC_API_KEY"}, nil), + }, + "iaas_classic_username": { + Type: schema.TypeString, + Optional: true, + Description: "The Classic Infrastructure API user name", + DefaultFunc: schema.MultiEnvDefaultFunc([]string{"IAAS_CLASSIC_USERNAME"}, nil), + }, + "iaas_classic_endpoint_url": { + Type: schema.TypeString, + Optional: true, + Description: "The Classic Infrastructure Endpoint", + DefaultFunc: schema.MultiEnvDefaultFunc([]string{"IAAS_CLASSIC_ENDPOINT_URL"}, "https://api.softlayer.com/rest/v3"), + }, + "iaas_classic_timeout": { + Type: schema.TypeInt, + Optional: true, + Description: "The timeout (in seconds) to set for any Classic Infrastructure API calls made.", + DefaultFunc: schema.MultiEnvDefaultFunc([]string{"IAAS_CLASSIC_TIMEOUT"}, 60), + }, + "max_retries": { + Type: schema.TypeInt, + Optional: true, + Description: "The retry count to set for API calls.", + DefaultFunc: schema.EnvDefaultFunc("MAX_RETRIES", 10), + }, + "function_namespace": { + Type: schema.TypeString, + Optional: true, + Description: "The IBM Cloud Function namespace", + DefaultFunc: schema.EnvDefaultFunc("FUNCTION_NAMESPACE", nil), + Deprecated: "This field will be deprecated soon", + }, + "riaas_endpoint": { + Type: schema.TypeString, + Optional: true, + Description: "The next generation infrastructure service endpoint url.", + DefaultFunc: schema.MultiEnvDefaultFunc([]string{"RIAAS_ENDPOINT"}, nil), + Deprecated: "This field is deprecated use generation", + }, + "generation": { + Type: schema.TypeInt, + Optional: true, + Description: "Generation of Virtual Private Cloud. Default is 2", + //DefaultFunc: schema.MultiEnvDefaultFunc([]string{"IC_GENERATION", "IBMCLOUD_GENERATION"}, nil), + Deprecated: "The generation field is deprecated and will be removed after couple of releases", + }, + "iam_token": { + Type: schema.TypeString, + Optional: true, + Description: "IAM Authentication token", + DefaultFunc: schema.MultiEnvDefaultFunc([]string{"IC_IAM_TOKEN", "IBMCLOUD_IAM_TOKEN"}, nil), + }, + "iam_refresh_token": { + Type: schema.TypeString, + Optional: true, + Description: "IAM Authentication refresh token", + DefaultFunc: schema.MultiEnvDefaultFunc([]string{"IC_IAM_REFRESH_TOKEN", "IBMCLOUD_IAM_REFRESH_TOKEN"}, nil), + }, + "visibility": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateAllowedStringValue([]string{"public", "private", "public-and-private"}), + Description: "Visibility of the provider if it is private or public.", + DefaultFunc: schema.MultiEnvDefaultFunc([]string{"IC_VISIBILITY", "IBMCLOUD_VISIBILITY"}, "public"), + }, + }, + + DataSourcesMap: map[string]*schema.Resource{ + "ibm_api_gateway": dataSourceIBMApiGateway(), + "ibm_account": dataSourceIBMAccount(), + "ibm_app": dataSourceIBMApp(), + "ibm_app_domain_private": dataSourceIBMAppDomainPrivate(), + "ibm_app_domain_shared": dataSourceIBMAppDomainShared(), + "ibm_app_route": dataSourceIBMAppRoute(), + "ibm_function_action": dataSourceIBMFunctionAction(), + "ibm_function_package": dataSourceIBMFunctionPackage(), + "ibm_function_rule": dataSourceIBMFunctionRule(), + "ibm_function_trigger": dataSourceIBMFunctionTrigger(), + "ibm_function_namespace": dataSourceIBMFunctionNamespace(), + "ibm_certificate_manager_certificates": dataIBMCertificateManagerCertificates(), + "ibm_certificate_manager_certificate": dataIBMCertificateManagerCertificate(), + "ibm_cis": dataSourceIBMCISInstance(), + "ibm_cis_dns_records": dataSourceIBMCISDNSRecords(), + "ibm_cis_certificates": dataIBMCISCertificates(), + "ibm_cis_global_load_balancers": dataSourceIBMCISGlbs(), + "ibm_cis_origin_pools": dataSourceIBMCISOriginPools(), + "ibm_cis_healthchecks": dataSourceIBMCISHealthChecks(), + "ibm_cis_domain": dataSourceIBMCISDomain(), + "ibm_cis_firewall": dataIBMCISFirewallsRecord(), + "ibm_cis_cache_settings": dataSourceIBMCISCacheSetting(), + "ibm_cis_waf_packages": dataSourceIBMCISWAFPackages(), + "ibm_cis_range_apps": dataSourceIBMCISRangeApps(), + "ibm_cis_custom_certificates": dataSourceIBMCISCustomCertificates(), + "ibm_cis_rate_limit": dataSourceIBMCISRateLimit(), + "ibm_cis_ip_addresses": dataSourceIBMCISIP(), + "ibm_cis_waf_groups": dataSourceIBMCISWAFGroups(), + "ibm_cis_edge_functions_actions": dataSourceIBMCISEdgeFunctionsActions(), + "ibm_cis_edge_functions_triggers": dataSourceIBMCISEdgeFunctionsTriggers(), + "ibm_cis_custom_pages": dataSourceIBMCISCustomPages(), + "ibm_cis_page_rules": dataSourceIBMCISPageRules(), + "ibm_cis_waf_rules": dataSourceIBMCISWAFRules(), + "ibm_database": dataSourceIBMDatabaseInstance(), + "ibm_compute_bare_metal": dataSourceIBMComputeBareMetal(), + "ibm_compute_image_template": dataSourceIBMComputeImageTemplate(), + "ibm_compute_placement_group": dataSourceIBMComputePlacementGroup(), + "ibm_compute_ssh_key": dataSourceIBMComputeSSHKey(), + "ibm_compute_vm_instance": dataSourceIBMComputeVmInstance(), + "ibm_container_addons": datasourceIBMContainerAddOns(), + "ibm_container_alb": dataSourceIBMContainerALB(), + "ibm_container_alb_cert": dataSourceIBMContainerALBCert(), + "ibm_container_bind_service": dataSourceIBMContainerBindService(), + "ibm_container_cluster": dataSourceIBMContainerCluster(), + "ibm_container_cluster_config": dataSourceIBMContainerClusterConfig(), + "ibm_container_cluster_versions": dataSourceIBMContainerClusterVersions(), + "ibm_container_cluster_worker": dataSourceIBMContainerClusterWorker(), + "ibm_container_vpc_cluster_alb": dataSourceIBMContainerVPCClusterALB(), + "ibm_container_vpc_alb": dataSourceIBMContainerVPCClusterALB(), + "ibm_container_vpc_cluster": dataSourceIBMContainerVPCCluster(), + "ibm_container_vpc_cluster_worker": dataSourceIBMContainerVPCClusterWorker(), + "ibm_container_vpc_cluster_worker_pool": dataSourceIBMContainerVpcClusterWorkerPool(), + "ibm_container_vpc_worker_pool": dataSourceIBMContainerVpcClusterWorkerPool(), + "ibm_container_worker_pool": dataSourceIBMContainerWorkerPool(), + "ibm_cr_namespaces": dataIBMContainerRegistryNamespaces(), + "ibm_cos_bucket": dataSourceIBMCosBucket(), + "ibm_cos_bucket_object": dataSourceIBMCosBucketObject(), + "ibm_dns_domain_registration": dataSourceIBMDNSDomainRegistration(), + "ibm_dns_domain": dataSourceIBMDNSDomain(), + "ibm_dns_secondary": dataSourceIBMDNSSecondary(), + "ibm_event_streams_topic": dataSourceIBMEventStreamsTopic(), + "ibm_iam_access_group": dataSourceIBMIAMAccessGroup(), + "ibm_iam_account_settings": dataSourceIBMIAMAccountSettings(), + "ibm_iam_auth_token": dataSourceIBMIAMAuthToken(), + "ibm_iam_role_actions": datasourceIBMIAMRoleAction(), + "ibm_iam_users": dataSourceIBMIAMUsers(), + "ibm_iam_roles": datasourceIBMIAMRole(), + "ibm_iam_user_policy": dataSourceIBMIAMUserPolicy(), + "ibm_iam_user_profile": dataSourceIBMIAMUserProfile(), + "ibm_iam_service_id": dataSourceIBMIAMServiceID(), + "ibm_iam_service_policy": dataSourceIBMIAMServicePolicy(), + "ibm_iam_api_key": dataSourceIbmIamApiKey(), + "ibm_is_dedicated_host": dataSourceIbmIsDedicatedHost(), + "ibm_is_dedicated_hosts": dataSourceIbmIsDedicatedHosts(), + "ibm_is_dedicated_host_profile": dataSourceIbmIsDedicatedHostProfile(), + "ibm_is_dedicated_host_profiles": dataSourceIbmIsDedicatedHostProfiles(), + "ibm_is_dedicated_host_group": dataSourceIbmIsDedicatedHostGroup(), + "ibm_is_dedicated_host_groups": dataSourceIbmIsDedicatedHostGroups(), + "ibm_is_dedicated_host_disk": dataSourceIbmIsDedicatedHostDisk(), + "ibm_is_dedicated_host_disks": dataSourceIbmIsDedicatedHostDisks(), + "ibm_is_floating_ip": dataSourceIBMISFloatingIP(), + "ibm_is_flow_logs": dataSourceIBMISFlowLogs(), + "ibm_is_image": dataSourceIBMISImage(), + "ibm_is_images": dataSourceIBMISImages(), + "ibm_is_endpoint_gateway_targets": dataSourceIBMISEndpointGatewayTargets(), + "ibm_is_instance_group": dataSourceIBMISInstanceGroup(), + "ibm_is_instance_group_memberships": dataSourceIBMISInstanceGroupMemberships(), + "ibm_is_instance_group_membership": dataSourceIBMISInstanceGroupMembership(), + "ibm_is_instance_group_manager": dataSourceIBMISInstanceGroupManager(), + "ibm_is_instance_group_managers": dataSourceIBMISInstanceGroupManagers(), + "ibm_is_instance_group_manager_policies": dataSourceIBMISInstanceGroupManagerPolicies(), + "ibm_is_instance_group_manager_policy": dataSourceIBMISInstanceGroupManagerPolicy(), + "ibm_is_instance_group_manager_action": dataSourceIBMISInstanceGroupManagerAction(), + "ibm_is_instance_group_manager_actions": dataSourceIBMISInstanceGroupManagerActions(), + "ibm_is_virtual_endpoint_gateways": dataSourceIBMISEndpointGateways(), + "ibm_is_virtual_endpoint_gateway_ips": dataSourceIBMISEndpointGatewayIPs(), + "ibm_is_virtual_endpoint_gateway": dataSourceIBMISEndpointGateway(), + "ibm_is_instance_templates": dataSourceIBMISInstanceTemplates(), + "ibm_is_instance_profile": dataSourceIBMISInstanceProfile(), + "ibm_is_instance_profiles": dataSourceIBMISInstanceProfiles(), + "ibm_is_instance": dataSourceIBMISInstance(), + "ibm_is_instances": dataSourceIBMISInstances(), + "ibm_is_instance_disk": dataSourceIbmIsInstanceDisk(), + "ibm_is_instance_disks": dataSourceIbmIsInstanceDisks(), + "ibm_is_lb": dataSourceIBMISLB(), + "ibm_is_lb_profiles": dataSourceIBMISLbProfiles(), + "ibm_is_lbs": dataSourceIBMISLBS(), + "ibm_is_public_gateway": dataSourceIBMISPublicGateway(), + "ibm_is_public_gateways": dataSourceIBMISPublicGateways(), + "ibm_is_region": dataSourceIBMISRegion(), + "ibm_is_ssh_key": dataSourceIBMISSSHKey(), + "ibm_is_subnet": dataSourceIBMISSubnet(), + "ibm_is_subnets": dataSourceIBMISSubnets(), + "ibm_is_subnet_reserved_ip": dataSourceIBMISReservedIP(), + "ibm_is_subnet_reserved_ips": dataSourceIBMISReservedIPs(), + "ibm_is_security_group": dataSourceIBMISSecurityGroup(), + "ibm_is_security_group_target": dataSourceIBMISSecurityGroupTarget(), + "ibm_is_security_group_targets": dataSourceIBMISSecurityGroupTargets(), + "ibm_is_volume": dataSourceIBMISVolume(), + "ibm_is_volume_profile": dataSourceIBMISVolumeProfile(), + "ibm_is_volume_profiles": dataSourceIBMISVolumeProfiles(), + "ibm_is_vpc": dataSourceIBMISVPC(), + "ibm_is_vpn_gateways": dataSourceIBMISVPNGateways(), + "ibm_is_vpn_gateway_connections": dataSourceIBMISVPNGatewayConnections(), + "ibm_is_vpc_default_routing_table": dataSourceIBMISVPCDefaultRoutingTable(), + "ibm_is_vpc_routing_tables": dataSourceIBMISVPCRoutingTables(), + "ibm_is_vpc_routing_table_routes": dataSourceIBMISVPCRoutingTableRoutes(), + "ibm_is_zone": dataSourceIBMISZone(), + "ibm_is_zones": dataSourceIBMISZones(), + "ibm_is_operating_system": dataSourceIBMISOperatingSystem(), + "ibm_is_operating_systems": dataSourceIBMISOperatingSystems(), + "ibm_lbaas": dataSourceIBMLbaas(), + "ibm_network_vlan": dataSourceIBMNetworkVlan(), + "ibm_org": dataSourceIBMOrg(), + "ibm_org_quota": dataSourceIBMOrgQuota(), + "ibm_kp_key": dataSourceIBMkey(), + "ibm_kms_key_rings": dataSourceIBMKMSkeyRings(), + "ibm_kms_keys": dataSourceIBMKMSkeys(), + "ibm_pn_application_chrome": dataSourceIBMPNApplicationChrome(), + "ibm_app_config_environment": dataSourceIbmAppConfigEnvironment(), + "ibm_app_config_environments": dataSourceIbmAppConfigEnvironments(), + "ibm_app_config_feature": dataSourceIbmAppConfigFeature(), + "ibm_app_config_features": dataSourceIbmAppConfigFeatures(), + "ibm_kms_key": dataSourceIBMKMSkey(), + "ibm_resource_quota": dataSourceIBMResourceQuota(), + "ibm_resource_group": dataSourceIBMResourceGroup(), + "ibm_resource_instance": dataSourceIBMResourceInstance(), + "ibm_resource_key": dataSourceIBMResourceKey(), + "ibm_security_group": dataSourceIBMSecurityGroup(), + "ibm_service_instance": dataSourceIBMServiceInstance(), + "ibm_service_key": dataSourceIBMServiceKey(), + "ibm_service_plan": dataSourceIBMServicePlan(), + "ibm_space": dataSourceIBMSpace(), + + // Added for Schematics + "ibm_schematics_workspace": dataSourceIBMSchematicsWorkspace(), + "ibm_schematics_output": dataSourceIBMSchematicsOutput(), + "ibm_schematics_state": dataSourceIBMSchematicsState(), + "ibm_schematics_action": dataSourceIBMSchematicsAction(), + "ibm_schematics_job": dataSourceIBMSchematicsJob(), + + // Added for Power Resources + + "ibm_pi_key": dataSourceIBMPIKey(), + "ibm_pi_image": dataSourceIBMPIImage(), + "ibm_pi_instance": dataSourceIBMPIInstance(), + "ibm_pi_tenant": dataSourceIBMPITenant(), + "ibm_pi_network": dataSourceIBMPINetwork(), + "ibm_pi_volume": dataSourceIBMPIVolume(), + "ibm_pi_instance_volumes": dataSourceIBMPIInstanceVolumes(), + "ibm_pi_public_network": dataSourceIBMPIPublicNetwork(), + "ibm_pi_images": dataSourceIBMPIImages(), + "ibm_pi_instance_ip": dataSourceIBMPIInstanceIP(), + "ibm_pi_instance_snapshots": dataSourceIBMPISnapshots(), + "ibm_pi_pvm_snapshots": dataSourceIBMPISnapshot(), + "ibm_pi_network_port": dataSourceIBMPINetworkPort(), + "ibm_pi_cloud_instance": dataSourceIBMPICloudInstance(), + "ibm_pi_catalog_images": dataSourceIBMPICatalogImages(), + + // Added for private dns zones + + "ibm_dns_zones": dataSourceIBMPrivateDNSZones(), + "ibm_dns_permitted_networks": dataSourceIBMPrivateDNSPermittedNetworks(), + "ibm_dns_resource_records": dataSourceIBMPrivateDNSResourceRecords(), + "ibm_dns_glb_monitors": dataSourceIBMPrivateDNSGLBMonitors(), + "ibm_dns_glb_pools": dataSourceIBMPrivateDNSGLBPools(), + "ibm_dns_glbs": dataSourceIBMPrivateDNSGLBs(), + + // Added for Direct Link + + "ibm_dl_gateways": dataSourceIBMDLGateways(), + "ibm_dl_offering_speeds": dataSourceIBMDLOfferingSpeeds(), + "ibm_dl_port": dataSourceIBMDirectLinkPort(), + "ibm_dl_ports": dataSourceIBMDirectLinkPorts(), + "ibm_dl_gateway": dataSourceIBMDLGateway(), + "ibm_dl_locations": dataSourceIBMDLLocations(), + "ibm_dl_routers": dataSourceIBMDLRouters(), + "ibm_dl_provider_ports": dataSourceIBMDirectLinkProviderPorts(), + "ibm_dl_provider_gateways": dataSourceIBMDirectLinkProviderGateways(), + + //Added for Transit Gateway + "ibm_tg_gateway": dataSourceIBMTransitGateway(), + "ibm_tg_gateways": dataSourceIBMTransitGateways(), + "ibm_tg_locations": dataSourceIBMTransitGatewaysLocations(), + "ibm_tg_location": dataSourceIBMTransitGatewaysLocation(), + + //Added for BSS Enterprise + "ibm_enterprises": dataSourceIbmEnterprises(), + "ibm_enterprise_account_groups": dataSourceIbmEnterpriseAccountGroups(), + "ibm_enterprise_accounts": dataSourceIbmEnterpriseAccounts(), + + //Added for Secrets Manager + "ibm_secrets_manager_secrets": dataSourceIBMSecretsManagerSecrets(), + "ibm_secrets_manager_secret": dataSourceIBMSecretsManagerSecret(), + + // Catalog related resources + "ibm_cm_catalog": dataSourceIBMCmCatalog(), + "ibm_cm_offering": dataSourceIBMCmOffering(), + "ibm_cm_version": dataSourceIBMCmVersion(), + "ibm_cm_offering_instance": dataSourceIBMCmOfferingInstance(), + + //Added for Resource Tag + "ibm_resource_tag": dataSourceIBMResourceTag(), + }, + + ResourcesMap: map[string]*schema.Resource{ + "ibm_api_gateway_endpoint": resourceIBMApiGatewayEndPoint(), + "ibm_api_gateway_endpoint_subscription": resourceIBMApiGatewayEndpointSubscription(), + "ibm_app": resourceIBMApp(), + "ibm_app_domain_private": resourceIBMAppDomainPrivate(), + "ibm_app_domain_shared": resourceIBMAppDomainShared(), + "ibm_app_route": resourceIBMAppRoute(), + "ibm_function_action": resourceIBMFunctionAction(), + "ibm_function_package": resourceIBMFunctionPackage(), + "ibm_function_rule": resourceIBMFunctionRule(), + "ibm_function_trigger": resourceIBMFunctionTrigger(), + "ibm_function_namespace": resourceIBMFunctionNamespace(), + "ibm_cis": resourceIBMCISInstance(), + "ibm_database": resourceIBMDatabaseInstance(), + "ibm_certificate_manager_import": resourceIBMCertificateManagerImport(), + "ibm_certificate_manager_order": resourceIBMCertificateManagerOrder(), + "ibm_cis_domain": resourceIBMCISDomain(), + "ibm_cis_domain_settings": resourceIBMCISSettings(), + "ibm_cis_firewall": resourceIBMCISFirewallRecord(), + "ibm_cis_range_app": resourceIBMCISRangeApp(), + "ibm_cis_healthcheck": resourceIBMCISHealthCheck(), + "ibm_cis_origin_pool": resourceIBMCISPool(), + "ibm_cis_global_load_balancer": resourceIBMCISGlb(), + "ibm_cis_certificate_upload": resourceIBMCISCertificateUpload(), + "ibm_cis_dns_record": resourceIBMCISDnsRecord(), + "ibm_cis_dns_records_import": resourceIBMCISDNSRecordsImport(), + "ibm_cis_rate_limit": resourceIBMCISRateLimit(), + "ibm_cis_page_rule": resourceIBMCISPageRule(), + "ibm_cis_edge_functions_action": resourceIBMCISEdgeFunctionsAction(), + "ibm_cis_edge_functions_trigger": resourceIBMCISEdgeFunctionsTrigger(), + "ibm_cis_tls_settings": resourceIBMCISTLSSettings(), + "ibm_cis_waf_package": resourceIBMCISWAFPackage(), + "ibm_cis_routing": resourceIBMCISRouting(), + "ibm_cis_waf_group": resourceIBMCISWAFGroup(), + "ibm_cis_cache_settings": resourceIBMCISCacheSettings(), + "ibm_cis_custom_page": resourceIBMCISCustomPage(), + "ibm_cis_waf_rule": resourceIBMCISWAFRule(), + "ibm_cis_certificate_order": resourceIBMCISCertificateOrder(), + "ibm_compute_autoscale_group": resourceIBMComputeAutoScaleGroup(), + "ibm_compute_autoscale_policy": resourceIBMComputeAutoScalePolicy(), + "ibm_compute_bare_metal": resourceIBMComputeBareMetal(), + "ibm_compute_dedicated_host": resourceIBMComputeDedicatedHost(), + "ibm_compute_monitor": resourceIBMComputeMonitor(), + "ibm_compute_placement_group": resourceIBMComputePlacementGroup(), + "ibm_compute_provisioning_hook": resourceIBMComputeProvisioningHook(), + "ibm_compute_ssh_key": resourceIBMComputeSSHKey(), + "ibm_compute_ssl_certificate": resourceIBMComputeSSLCertificate(), + "ibm_compute_user": resourceIBMComputeUser(), + "ibm_compute_vm_instance": resourceIBMComputeVmInstance(), + "ibm_container_addons": resourceIBMContainerAddOns(), + "ibm_container_alb": resourceIBMContainerALB(), + "ibm_container_api_key_reset": resourceIBMContainerAPIKeyReset(), + "ibm_container_vpc_alb": resourceIBMContainerVpcALB(), + "ibm_container_vpc_worker_pool": resourceIBMContainerVpcWorkerPool(), + "ibm_container_vpc_cluster": resourceIBMContainerVpcCluster(), + "ibm_container_alb_cert": resourceIBMContainerALBCert(), + "ibm_container_cluster": resourceIBMContainerCluster(), + "ibm_container_cluster_feature": resourceIBMContainerClusterFeature(), + "ibm_container_bind_service": resourceIBMContainerBindService(), + "ibm_container_worker_pool": resourceIBMContainerWorkerPool(), + "ibm_container_worker_pool_zone_attachment": resourceIBMContainerWorkerPoolZoneAttachment(), + "ibm_cr_namespace": resourceIBMCrNamespace(), + "ibm_cr_retention_policy": resourceIBMCrRetentionPolicy(), + "ibm_ob_logging": resourceIBMObLogging(), + "ibm_ob_monitoring": resourceIBMObMonitoring(), + "ibm_cos_bucket": resourceIBMCOSBucket(), + "ibm_cos_bucket_object": resourceIBMCOSBucketObject(), + "ibm_dns_domain": resourceIBMDNSDomain(), + "ibm_dns_domain_registration_nameservers": resourceIBMDNSDomainRegistrationNameservers(), + "ibm_dns_secondary": resourceIBMDNSSecondary(), + "ibm_dns_record": resourceIBMDNSRecord(), + "ibm_event_streams_topic": resourceIBMEventStreamsTopic(), + "ibm_firewall": resourceIBMFirewall(), + "ibm_firewall_policy": resourceIBMFirewallPolicy(), + "ibm_iam_access_group": resourceIBMIAMAccessGroup(), + "ibm_iam_account_settings": resourceIbmIamAccountSettings(), + "ibm_iam_custom_role": resourceIBMIAMCustomRole(), + "ibm_iam_access_group_dynamic_rule": resourceIBMIAMDynamicRule(), + "ibm_iam_access_group_members": resourceIBMIAMAccessGroupMembers(), + "ibm_iam_access_group_policy": resourceIBMIAMAccessGroupPolicy(), + "ibm_iam_authorization_policy": resourceIBMIAMAuthorizationPolicy(), + "ibm_iam_authorization_policy_detach": resourceIBMIAMAuthorizationPolicyDetach(), + "ibm_iam_user_policy": resourceIBMIAMUserPolicy(), + "ibm_iam_user_settings": resourceIBMUserSettings(), + "ibm_iam_service_id": resourceIBMIAMServiceID(), + "ibm_iam_service_api_key": resourceIBMIAMServiceAPIKey(), + "ibm_iam_service_policy": resourceIBMIAMServicePolicy(), + "ibm_iam_user_invite": resourceIBMUserInvite(), + "ibm_iam_api_key": resourceIbmIamApiKey(), + "ibm_ipsec_vpn": resourceIBMIPSecVPN(), + "ibm_is_dedicated_host": resourceIbmIsDedicatedHost(), + "ibm_is_dedicated_host_group": resourceIbmIsDedicatedHostGroup(), + "ibm_is_dedicated_host_disk_management": resourceIBMISDedicatedHostDiskManagement(), + "ibm_is_floating_ip": resourceIBMISFloatingIP(), + "ibm_is_flow_log": resourceIBMISFlowLog(), + "ibm_is_instance": resourceIBMISInstance(), + "ibm_is_instance_disk_management": resourceIBMISInstanceDiskManagement(), + "ibm_is_instance_group": resourceIBMISInstanceGroup(), + "ibm_is_instance_group_membership": resourceIBMISInstanceGroupMembership(), + "ibm_is_instance_group_manager": resourceIBMISInstanceGroupManager(), + "ibm_is_instance_group_manager_policy": resourceIBMISInstanceGroupManagerPolicy(), + "ibm_is_instance_group_manager_action": resourceIBMISInstanceGroupManagerAction(), + "ibm_is_virtual_endpoint_gateway": resourceIBMISEndpointGateway(), + "ibm_is_virtual_endpoint_gateway_ip": resourceIBMISEndpointGatewayIP(), + "ibm_is_instance_template": resourceIBMISInstanceTemplate(), + "ibm_is_ike_policy": resourceIBMISIKEPolicy(), + "ibm_is_ipsec_policy": resourceIBMISIPSecPolicy(), + "ibm_is_lb": resourceIBMISLB(), + "ibm_is_lb_listener": resourceIBMISLBListener(), + "ibm_is_lb_listener_policy": resourceIBMISLBListenerPolicy(), + "ibm_is_lb_listener_policy_rule": resourceIBMISLBListenerPolicyRule(), + "ibm_is_lb_pool": resourceIBMISLBPool(), + "ibm_is_lb_pool_member": resourceIBMISLBPoolMember(), + "ibm_is_network_acl": resourceIBMISNetworkACL(), + "ibm_is_public_gateway": resourceIBMISPublicGateway(), + "ibm_is_security_group": resourceIBMISSecurityGroup(), + "ibm_is_security_group_rule": resourceIBMISSecurityGroupRule(), + "ibm_is_security_group_target": resourceIBMISSecurityGroupTarget(), + "ibm_is_security_group_network_interface_attachment": resourceIBMISSecurityGroupNetworkInterfaceAttachment(), + "ibm_is_subnet": resourceIBMISSubnet(), + "ibm_is_subnet_reserved_ip": resourceIBMISReservedIP(), + "ibm_is_subnet_network_acl_attachment": resourceIBMISSubnetNetworkACLAttachment(), + "ibm_is_ssh_key": resourceIBMISSSHKey(), + "ibm_is_volume": resourceIBMISVolume(), + "ibm_is_vpn_gateway": resourceIBMISVPNGateway(), + "ibm_is_vpn_gateway_connection": resourceIBMISVPNGatewayConnection(), + "ibm_is_vpc": resourceIBMISVPC(), + "ibm_is_vpc_address_prefix": resourceIBMISVpcAddressPrefix(), + "ibm_is_vpc_route": resourceIBMISVpcRoute(), + "ibm_is_vpc_routing_table": resourceIBMISVPCRoutingTable(), + "ibm_is_vpc_routing_table_route": resourceIBMISVPCRoutingTableRoute(), + "ibm_is_image": resourceIBMISImage(), + "ibm_lb": resourceIBMLb(), + "ibm_lbaas": resourceIBMLbaas(), + "ibm_lbaas_health_monitor": resourceIBMLbaasHealthMonitor(), + "ibm_lbaas_server_instance_attachment": resourceIBMLbaasServerInstanceAttachment(), + "ibm_lb_service": resourceIBMLbService(), + "ibm_lb_service_group": resourceIBMLbServiceGroup(), + "ibm_lb_vpx": resourceIBMLbVpx(), + "ibm_lb_vpx_ha": resourceIBMLbVpxHa(), + "ibm_lb_vpx_service": resourceIBMLbVpxService(), + "ibm_lb_vpx_vip": resourceIBMLbVpxVip(), + "ibm_multi_vlan_firewall": resourceIBMMultiVlanFirewall(), + "ibm_network_gateway": resourceIBMNetworkGateway(), + "ibm_network_gateway_vlan_association": resourceIBMNetworkGatewayVlanAttachment(), + "ibm_network_interface_sg_attachment": resourceIBMNetworkInterfaceSGAttachment(), + "ibm_network_public_ip": resourceIBMNetworkPublicIp(), + "ibm_network_vlan": resourceIBMNetworkVlan(), + "ibm_network_vlan_spanning": resourceIBMNetworkVlanSpan(), + "ibm_object_storage_account": resourceIBMObjectStorageAccount(), + "ibm_org": resourceIBMOrg(), + "ibm_pn_application_chrome": resourceIBMPNApplicationChrome(), + "ibm_app_config_environment": resourceIbmAppConfigEnvironment(), + "ibm_app_config_feature": resourceIbmIbmAppConfigFeature(), + "ibm_kms_key": resourceIBMKmskey(), + "ibm_kms_key_alias": resourceIBMKmskeyAlias(), + "ibm_kms_key_rings": resourceIBMKmskeyRings(), + "ibm_kp_key": resourceIBMkey(), + "ibm_resource_group": resourceIBMResourceGroup(), + "ibm_resource_instance": resourceIBMResourceInstance(), + "ibm_resource_key": resourceIBMResourceKey(), + "ibm_security_group": resourceIBMSecurityGroup(), + "ibm_security_group_rule": resourceIBMSecurityGroupRule(), + "ibm_service_instance": resourceIBMServiceInstance(), + "ibm_service_key": resourceIBMServiceKey(), + "ibm_space": resourceIBMSpace(), + "ibm_storage_evault": resourceIBMStorageEvault(), + "ibm_storage_block": resourceIBMStorageBlock(), + "ibm_storage_file": resourceIBMStorageFile(), + "ibm_subnet": resourceIBMSubnet(), + "ibm_dns_reverse_record": resourceIBMDNSReverseRecord(), + "ibm_ssl_certificate": resourceIBMSSLCertificate(), + "ibm_cdn": resourceIBMCDN(), + "ibm_hardware_firewall_shared": resourceIBMFirewallShared(), + + //Added for Power Colo + + "ibm_pi_key": resourceIBMPIKey(), + "ibm_pi_volume": resourceIBMPIVolume(), + "ibm_pi_network": resourceIBMPINetwork(), + "ibm_pi_instance": resourceIBMPIInstance(), + "ibm_pi_operations": resourceIBMPIIOperations(), + "ibm_pi_volume_attach": resourceIBMPIVolumeAttach(), + "ibm_pi_capture": resourceIBMPICapture(), + "ibm_pi_image": resourceIBMPIImage(), + "ibm_pi_network_port": resourceIBMPINetworkPort(), + "ibm_pi_snapshot": resourceIBMPISnapshot(), + "ibm_pi_network_port_attach": resourceIBMPINetworkPortAttach(), + + //Private DNS related resources + "ibm_dns_zone": resourceIBMPrivateDNSZone(), + "ibm_dns_permitted_network": resourceIBMPrivateDNSPermittedNetwork(), + "ibm_dns_resource_record": resourceIBMPrivateDNSResourceRecord(), + "ibm_dns_glb_monitor": resourceIBMPrivateDNSGLBMonitor(), + "ibm_dns_glb_pool": resourceIBMPrivateDNSGLBPool(), + "ibm_dns_glb": resourceIBMPrivateDNSGLB(), + + //Direct Link related resources + "ibm_dl_gateway": resourceIBMDLGateway(), + "ibm_dl_virtual_connection": resourceIBMDLGatewayVC(), + "ibm_dl_provider_gateway": resourceIBMDLProviderGateway(), + //Added for Transit Gateway + "ibm_tg_gateway": resourceIBMTransitGateway(), + "ibm_tg_connection": resourceIBMTransitGatewayConnection(), + + //Catalog related resources + "ibm_cm_offering_instance": resourceIBMCmOfferingInstance(), + "ibm_cm_catalog": resourceIBMCmCatalog(), + "ibm_cm_offering": resourceIBMCmOffering(), + "ibm_cm_version": resourceIBMCmVersion(), + + //Added for enterprise + "ibm_enterprise": resourceIbmEnterprise(), + "ibm_enterprise_account_group": resourceIbmEnterpriseAccountGroup(), + "ibm_enterprise_account": resourceIbmEnterpriseAccount(), + + //Added for Schematics + "ibm_schematics_workspace": resourceIBMSchematicsWorkspace(), + "ibm_schematics_action": resourceIBMSchematicsAction(), + "ibm_schematics_job": resourceIBMSchematicsJob(), + + //Added for Resource Tag + "ibm_resource_tag": resourceIBMResourceTag(), + }, + + ConfigureFunc: providerConfigure, + } +} + +var globalValidatorDict ValidatorDict +var initOnce sync.Once + +// Validator return validator +func Validator() ValidatorDict { + initOnce.Do(func() { + globalValidatorDict = ValidatorDict{ + ResourceValidatorDictionary: map[string]*ResourceValidator{ + "ibm_iam_account_settings": resourceIBMIAMAccountSettingsValidator(), + "ibm_iam_custom_role": resourceIBMIAMCustomRoleValidator(), + "ibm_cis_healthcheck": resourceIBMCISHealthCheckValidator(), + "ibm_cis_rate_limit": resourceIBMCISRateLimitValidator(), + "ibm_cis": resourceIBMCISValidator(), + "ibm_cis_domain_settings": resourceIBMCISDomainSettingValidator(), + "ibm_cis_tls_settings": resourceIBMCISTLSSettingsValidator(), + "ibm_cis_routing": resourceIBMCISRoutingValidator(), + "ibm_cis_page_rule": resourceCISPageRuleValidator(), + "ibm_cis_waf_package": resourceIBMCISWAFPackageValidator(), + "ibm_cis_waf_group": resourceIBMCISWAFGroupValidator(), + "ibm_cis_certificate_upload": resourceCISCertificateUploadValidator(), + "ibm_cis_cache_settings": resourceIBMCISCacheSettingsValidator(), + "ibm_cis_custom_page": resourceIBMCISCustomPageValidator(), + "ibm_cis_firewall": resourceIBMCISFirewallValidator(), + "ibm_cis_range_app": resourceIBMCISRangeAppValidator(), + "ibm_cis_waf_rule": resourceIBMCISWAFRuleValidator(), + "ibm_cis_certificate_order": resourceIBMCISCertificateOrderValidator(), + "ibm_cr_namespace": resourceIBMCrNamespaceValidator(), + "ibm_tg_gateway": resourceIBMTGValidator(), + "ibm_app_config_feature": resourceIbmAppConfigFeatureValidator(), + "ibm_tg_connection": resourceIBMTransitGatewayConnectionValidator(), + "ibm_dl_virtual_connection": resourceIBMdlGatewayVCValidator(), + "ibm_dl_gateway": resourceIBMDLGatewayValidator(), + "ibm_dl_provider_gateway": resourceIBMDLProviderGatewayValidator(), + "ibm_database": resourceIBMICDValidator(), + "ibm_function_package": resourceIBMFuncPackageValidator(), + "ibm_function_action": resourceIBMFuncActionValidator(), + "ibm_function_rule": resourceIBMFuncRuleValidator(), + "ibm_function_trigger": resourceIBMFuncTriggerValidator(), + "ibm_function_namespace": resourceIBMFuncNamespaceValidator(), + "ibm_is_dedicated_host_group": resourceIbmIsDedicatedHostGroupValidator(), + "ibm_is_dedicated_host": resourceIbmIsDedicatedHostValidator(), + "ibm_is_dedicated_host_disk_management": resourceIBMISDedicatedHostDiskManagementValidator(), + "ibm_is_flow_log": resourceIBMISFlowLogValidator(), + "ibm_is_instance_group": resourceIBMISInstanceGroupValidator(), + "ibm_is_instance_group_membership": resourceIBMISInstanceGroupMembershipValidator(), + "ibm_is_instance_group_manager": resourceIBMISInstanceGroupManagerValidator(), + "ibm_is_instance_group_manager_policy": resourceIBMISInstanceGroupManagerPolicyValidator(), + "ibm_is_instance_group_manager_action": resourceIBMISInstanceGroupManagerActionValidator(), + "ibm_is_floating_ip": resourceIBMISFloatingIPValidator(), + "ibm_is_ike_policy": resourceIBMISIKEValidator(), + "ibm_is_image": resourceIBMISImageValidator(), + "ibm_is_instance": resourceIBMISInstanceValidator(), + "ibm_is_instance_disk_management": resourceIBMISInstanceDiskManagementValidator(), + "ibm_is_ipsec_policy": resourceIBMISIPSECValidator(), + "ibm_is_lb_listener_policy_rule": resourceIBMISLBListenerPolicyRuleValidator(), + "ibm_is_lb_listener_policy": resourceIBMISLBListenerPolicyValidator(), + "ibm_is_lb_listener": resourceIBMISLBListenerValidator(), + "ibm_is_lb_pool": resourceIBMISLBPoolValidator(), + "ibm_is_lb": resourceIBMISLBValidator(), + "ibm_is_network_acl": resourceIBMISNetworkACLValidator(), + "ibm_is_public_gateway": resourceIBMISPublicGatewayValidator(), + "ibm_is_security_group_target": resourceIBMISSecurityGroupTargetValidator(), + "ibm_is_security_group_rule": resourceIBMISSecurityGroupRuleValidator(), + "ibm_is_security_group": resourceIBMISSecurityGroupValidator(), + "ibm_is_ssh_key": resourceIBMISSHKeyValidator(), + "ibm_is_subnet": resourceIBMISSubnetValidator(), + "ibm_is_subnet_reserved_ip": resourceIBMISSubnetReservedIPValidator(), + "ibm_is_volume": resourceIBMISVolumeValidator(), + "ibm_is_address_prefix": resourceIBMISAddressPrefixValidator(), + "ibm_is_route": resourceIBMISRouteValidator(), + "ibm_is_vpc": resourceIBMISVPCValidator(), + "ibm_is_vpc_routing_table": resourceIBMISVPCRoutingTableValidator(), + "ibm_is_vpc_routing_table_route": resourceIBMISVPCRoutingTableRouteValidator(), + "ibm_is_vpn_gateway_connection": resourceIBMISVPNGatewayConnectionValidator(), + "ibm_is_vpn_gateway": resourceIBMISVPNGatewayValidator(), + "ibm_kms_key_rings": resourceIBMKeyRingValidator(), + "ibm_dns_glb_monitor": resourceIBMPrivateDNSGLBMonitorValidator(), + "ibm_dns_glb_pool": resourceIBMPrivateDNSGLBPoolValidator(), + "ibm_schematics_action": resourceIBMSchematicsActionValidator(), + "ibm_schematics_job": resourceIBMSchematicsJobValidator(), + "ibm_schematics_workspace": resourceIBMSchematicsWorkspaceValidator(), + "ibm_resource_instance": resourceIBMResourceInstanceValidator(), + "ibm_is_virtual_endpoint_gateway": resourceIBMISEndpointGatewayValidator(), + "ibm_container_vpc_cluster": resourceIBMContainerVpcClusterValidator(), + "ibm_container_cluster": resourceIBMContainerClusterValidator(), + "ibm_resource_tag": resourceIBMResourceTagValidator(), + }, + DataSourceValidatorDictionary: map[string]*ResourceValidator{ + "ibm_is_subnet": dataSourceIBMISSubnetValidator(), + "ibm_dl_offering_speeds": datasourceIBMDLOfferingSpeedsValidator(), + "ibm_dl_routers": datasourceIBMDLRoutersValidator(), + "ibm_is_vpc": dataSourceIBMISVpcValidator(), + "ibm_is_volume": dataSourceIBMISVolumeValidator(), + "ibm_secrets_manager_secret": datasourceIBMSecretsManagerSecretValidator(), + "ibm_secrets_manager_secrets": datasourceIBMSecretsManagerSecretsValidator(), + }, + } + }) + return globalValidatorDict +} + +func providerConfigure(d *schema.ResourceData) (interface{}, error) { + var bluemixAPIKey string + var bluemixTimeout int + var iamToken, iamRefreshToken string + if key, ok := d.GetOk("bluemix_api_key"); ok { + bluemixAPIKey = key.(string) + } + if key, ok := d.GetOk("ibmcloud_api_key"); ok { + bluemixAPIKey = key.(string) + } + if itoken, ok := d.GetOk("iam_token"); ok { + iamToken = itoken.(string) + } + if rtoken, ok := d.GetOk("iam_refresh_token"); ok { + iamRefreshToken = rtoken.(string) + } + var softlayerUsername, softlayerAPIKey, softlayerEndpointUrl string + var softlayerTimeout int + if username, ok := d.GetOk("softlayer_username"); ok { + softlayerUsername = username.(string) + } + if username, ok := d.GetOk("iaas_classic_username"); ok { + softlayerUsername = username.(string) + } + if apikey, ok := d.GetOk("softlayer_api_key"); ok { + softlayerAPIKey = apikey.(string) + } + if apikey, ok := d.GetOk("iaas_classic_api_key"); ok { + softlayerAPIKey = apikey.(string) + } + if endpoint, ok := d.GetOk("softlayer_endpoint_url"); ok { + softlayerEndpointUrl = endpoint.(string) + } + if endpoint, ok := d.GetOk("iaas_classic_endpoint_url"); ok { + softlayerEndpointUrl = endpoint.(string) + } + if tm, ok := d.GetOk("softlayer_timeout"); ok { + softlayerTimeout = tm.(int) + } + if tm, ok := d.GetOk("iaas_classic_timeout"); ok { + softlayerTimeout = tm.(int) + } + + if tm, ok := d.GetOk("bluemix_timeout"); ok { + bluemixTimeout = tm.(int) + } + if tm, ok := d.GetOk("ibmcloud_timeout"); ok { + bluemixTimeout = tm.(int) + } + var visibility string + if v, ok := d.GetOk("visibility"); ok { + visibility = v.(string) + } + + resourceGrp := d.Get("resource_group").(string) + region := d.Get("region").(string) + zone := d.Get("zone").(string) + retryCount := d.Get("max_retries").(int) + wskNameSpace := d.Get("function_namespace").(string) + riaasEndPoint := d.Get("riaas_endpoint").(string) + + wskEnvVal, err := schema.EnvDefaultFunc("FUNCTION_NAMESPACE", "")() + if err != nil { + return nil, err + } + //Set environment variable to be used in DiffSupressFunction + if wskEnvVal.(string) == "" { + os.Setenv("FUNCTION_NAMESPACE", wskNameSpace) + } + + config := Config{ + BluemixAPIKey: bluemixAPIKey, + Region: region, + ResourceGroup: resourceGrp, + BluemixTimeout: time.Duration(bluemixTimeout) * time.Second, + SoftLayerTimeout: time.Duration(softlayerTimeout) * time.Second, + SoftLayerUserName: softlayerUsername, + SoftLayerAPIKey: softlayerAPIKey, + RetryCount: retryCount, + SoftLayerEndpointURL: softlayerEndpointUrl, + RetryDelay: RetryAPIDelay, + FunctionNameSpace: wskNameSpace, + RiaasEndPoint: riaasEndPoint, + IAMToken: iamToken, + IAMRefreshToken: iamRefreshToken, + Zone: zone, + Visibility: visibility, + //PowerServiceInstance: powerServiceInstance, + } + + return config.ClientSession() +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/qualified_name.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/qualified_name.go new file mode 100644 index 00000000000..95a67fce55b --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/qualified_name.go @@ -0,0 +1,182 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "errors" + "fmt" + "os" + "strings" +) + +type QualifiedName struct { + namespace string // namespace. does not include leading '/'. may be "" (i.e. default namespace) + packageName string // package. may be "". does not include leading/trailing '/' + entity string // entity. should not be "" + EntityName string // pkg+entity +} + +// Imported code from openwhisk cli https://github.com/apache/incubator-openwhisk/tree/26146368f1dd07f817062e662db64c73a8d486d6/tools/cli/go-whisk-cli/commands +/////////////////////////// +// QualifiedName Methods // +/////////////////////////// + +// GetFullQualifiedName() returns a full qualified name in proper string format +// from qualifiedName with proper syntax. +// Example: /namespace/[package/]entity +func (qualifiedName *QualifiedName) GetFullQualifiedName() string { + output := []string{} + + if len(qualifiedName.GetNamespace()) > 0 { + output = append(output, "/", qualifiedName.GetNamespace(), "/") + } + if len(qualifiedName.GetPackageName()) > 0 { + output = append(output, qualifiedName.GetPackageName(), "/") + } + output = append(output, qualifiedName.GetEntity()) + + return strings.Join(output, "") +} + +// GetPackageName() returns the package name from qualifiedName without a +// leading '/' +func (qualifiedName *QualifiedName) GetPackageName() string { + return qualifiedName.packageName +} + +// GetEntityName() returns the entity name ([package/]entity) of qualifiedName +// without a leading '/' +func (qualifiedName *QualifiedName) GetEntityName() string { + return qualifiedName.EntityName +} + +// GetEntity() returns the name of entity in qualifiedName without a leading '/' +func (qualifiedName *QualifiedName) GetEntity() string { + return qualifiedName.entity +} + +// GetNamespace() returns the name of the namespace in qualifiedName without +// a leading '/' +func (qualifiedName *QualifiedName) GetNamespace() string { + return qualifiedName.namespace +} + +// NewQualifiedName(name) initializes and constructs a (possibly fully qualified) +// QualifiedName struct. +// +// NOTE: If the given qualified name is None, then this is a default qualified +// name and it is resolved from properties. +// NOTE: If the namespace is missing from the qualified name, the namespace +// is also resolved from the property file. +// +// Examples: +// foo => qualifiedName {namespace: "_", entityName: foo} +// pkg/foo => qualifiedName {namespace: "_", entityName: pkg/foo} +// /ns/foo => qualifiedName {namespace: ns, entityName: foo} +// /ns/pkg/foo => qualifiedName {namespace: ns, entityName: pkg/foo} +func NewQualifiedName(name string) (*QualifiedName, error) { + qualifiedName := new(QualifiedName) + + // If name has a preceding delimiter (/), or if it has two delimiters with a + // leading non-empty string, then it contains a namespace. Otherwise the name + // does not specify a namespace, so default the namespace to the namespace + // value set in the properties file; if that is not set, use "_" + name = addLeadSlash(name) + parts := strings.Split(name, "/") + if strings.HasPrefix(name, "/") { + qualifiedName.namespace = parts[1] + + if len(parts) < 2 || len(parts) > 4 { + return qualifiedName, qualifiedNameNotSpecifiedErr() + } + + for i := 1; i < len(parts); i++ { + if len(parts[i]) == 0 || parts[i] == "." { + return qualifiedName, qualifiedNameNotSpecifiedErr() + } + } + + qualifiedName.EntityName = strings.Join(parts[2:], "/") + if len(parts) == 4 { + qualifiedName.packageName = parts[2] + } + qualifiedName.entity = parts[len(parts)-1] + } else { + if len(name) == 0 || name == "." { + return qualifiedName, qualifiedNameNotSpecifiedErr() + } + + qualifiedName.entity = parts[len(parts)-1] + if len(parts) == 2 { + qualifiedName.packageName = parts[0] + } + qualifiedName.EntityName = name + qualifiedName.namespace = getNamespaceFromProp() + } + + return qualifiedName, nil +} + +///////////////////// +// Error Functions // +///////////////////// + +// qualifiedNameNotSpecifiedErr() returns generic whisk error for +// invalid qualified names detected while building a new +// QualifiedName struct. +func qualifiedNameNotSpecifiedErr() error { + return errors.New("A valid qualified name must be specified.") +} + +// NewQualifiedNameError(entityName, err) returns specific whisk error +// for invalid qualified names. +func NewQualifiedNameError(entityName string, err error) error { + errorMsg := fmt.Sprintf("%s is not a alid qualified name %s", entityName, err) + return errors.New(errorMsg) +} + +/////////////////////////// +// Helper/Misc Functions // +/////////////////////////// + +// addLeadSlash(name) returns a (possibly fully qualified) resource name, +// inserting a leading '/' if it is of 3 parts (namespace/package/action) +// and lacking the leading '/'. +func addLeadSlash(name string) string { + parts := strings.Split(name, "/") + if len(parts) == 3 && parts[0] != "" { + name = "/" + name + } + return name +} + +// getNamespaceFromProp() returns a namespace from Properties if one exists, +// else defaults to returning "_" +func getNamespaceFromProp() string { + namespace := os.Getenv("FUNCTION_NAMESPACE") + return namespace +} + +// getQualifiedName(name, namespace) returns a fully qualified name given a +// (possibly fully qualified) resource name and optional namespace. +// +// Examples: +// (foo, None) => /_/foo +// (pkg/foo, None) => /_/pkg/foo +// (foo, ns) => /ns/foo +// (/ns/pkg/foo, None) => /ns/pkg/foo +// (/ns/pkg/foo, otherns) => /ns/pkg/foo +func getQualifiedName(name string, namespace string) string { + name = addLeadSlash(name) + if strings.HasPrefix(name, "/") { + return name + } else if strings.HasPrefix(namespace, "/") { + return fmt.Sprintf("%s/%s", namespace, name) + } else { + if len(namespace) == 0 { + namespace = getNamespaceFromProp() + } + return fmt.Sprintf("/%s/%s", namespace, name) + } +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_api_gateway_endpoint.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_api_gateway_endpoint.go new file mode 100644 index 00000000000..b3a63aadd3c --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_api_gateway_endpoint.go @@ -0,0 +1,428 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "io/ioutil" + "path" + "strings" + + apigatewaysdk "github.com/IBM/apigateway-go-sdk" + "github.com/ghodss/yaml" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func resourceIBMApiGatewayEndPoint() *schema.Resource { + + return &schema.Resource{ + Create: resourceIBMApiGatewayEndPointCreate, + Read: resourceIBMApiGatewayEndPointGet, + Update: resourceIBMApiGatewayEndPointUpdate, + Delete: resourceIBMApiGatewayEndPointDelete, + Importer: &schema.ResourceImporter{}, + Exists: resourceIBMApiGatewayEndPointExists, + Schema: map[string]*schema.Schema{ + "service_instance_crn": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Api Gateway Service Instance Crn", + }, + "open_api_doc_name": { + Type: schema.TypeString, + Required: true, + Description: "Json File path", + }, + "name": { + Type: schema.TypeString, + Required: true, + Description: "Endpoint name", + }, + "routes": { + Type: schema.TypeList, + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + Description: "Invokable routes for an endpoint", + }, + "managed": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Managed indicates if endpoint is online or offline.", + }, + "shared": { + Type: schema.TypeBool, + Computed: true, + Description: "The Shared status of an endpoint", + }, + "base_path": { + Type: schema.TypeString, + Computed: true, + Description: " Base path of an endpoint", + }, + "provider_id": { + Type: schema.TypeString, + Optional: true, + Default: "user-defined", + Description: " Provider ID of an endpoint allowable values user-defined and whisk", + }, + "endpoint_id": { + Type: schema.TypeString, + Computed: true, + Description: "Endpoint ID", + }, + "type": { + Type: schema.TypeString, + Optional: true, + Default: "unshare", + Description: "Action type of Endpoint ALoowable values are share, unshare, manage, unmanage", + }, + }, + } +} + +func resourceIBMApiGatewayEndPointCreate(d *schema.ResourceData, meta interface{}) error { + sess, err := meta.(ClientSession).BluemixSession() + if err != nil { + return err + } + endpointservice, err := meta.(ClientSession).APIGateway() + if err != nil { + return err + } + payload := &apigatewaysdk.CreateEndpointOptions{} + + oauthtoken := sess.Config.IAMAccessToken + oauthtoken = strings.Replace(oauthtoken, "Bearer ", "", -1) + payload.Authorization = &oauthtoken + + serviceInstanceCrn := d.Get("service_instance_crn").(string) + payload.ServiceInstanceCrn = &serviceInstanceCrn + payload.ParentCrn = &serviceInstanceCrn + + var name string + if v, ok := d.GetOk("name"); ok && v != nil { + name = v.(string) + payload.Name = &name + } + + openAPIDocName := d.Get("open_api_doc_name").(string) + var document []byte + // set to true as placeholder for logic control swtich + if true { + ext := path.Ext(openAPIDocName) + if strings.ToLower(ext) == ".json" { + data, err := ioutil.ReadFile(openAPIDocName) + if err != nil { + fmt.Println("Error uploading file", err) + return err + } + document = data + } else if strings.ToLower(ext) == ".yaml" || strings.ToLower(ext) == ".yml" { + data, err := ioutil.ReadFile(openAPIDocName) + if err != nil { + fmt.Println("Error uploading file", err) + return err + } + y2j, yErr := yaml.YAMLToJSON(data) + if yErr != nil { + fmt.Println("Error parsing yaml file", err) + return err + } + document = y2j + } else { + return fmt.Errorf("File extension type must be json or yaml") + + } + } + payload.OpenApiDoc = string(document) + + var managed bool + if m, ok := d.GetOk("managed"); ok && m != nil { + managed = m.(bool) + payload.Managed = &managed + } + var routes []string + if r, ok := d.GetOk("routes"); ok && r != nil { + routes = r.([]string) + payload.Routes = routes + } + + result, response, err := endpointservice.CreateEndpoint(payload) + if err != nil { + return fmt.Errorf("Error creating Endpoint: %s,%s", err, response) + } + + d.SetId(fmt.Sprintf("%s//%s", *result.ServiceInstanceCrn, *result.ArtifactID)) + + return resourceIBMApiGatewayEndPointGet(d, meta) +} + +func resourceIBMApiGatewayEndPointGet(d *schema.ResourceData, meta interface{}) error { + sess, err := meta.(ClientSession).BluemixSession() + if err != nil { + return err + } + endpointservice, err := meta.(ClientSession).APIGateway() + if err != nil { + return err + } + + parts := d.Id() + partslist := strings.Split(parts, "//") + + serviceInstanceCrn := partslist[0] + apiID := partslist[1] + + oauthtoken := sess.Config.IAMAccessToken + oauthtoken = strings.Replace(oauthtoken, "Bearer ", "", -1) + + payload := apigatewaysdk.GetEndpointOptions{ + ServiceInstanceCrn: &serviceInstanceCrn, + ID: &apiID, + Authorization: &oauthtoken, + } + result, response, err := endpointservice.GetEndpoint(&payload) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Getting Endpoint: %s\n%s", err, response) + } + d.Set("service_instance_crn", serviceInstanceCrn) + d.Set("endpoint_id", apiID) + if result.Routes != nil { + d.Set("routes", result.Routes) + } + if result.Name != nil { + d.Set("name", result.Name) + } + if result.Managed != nil { + d.Set("managed", result.Managed) + } + d.Set("provider_id", result.ProviderID) + d.Set("shared", result.Shared) + d.Set("base_path", result.BasePath) + return nil +} + +func resourceIBMApiGatewayEndPointUpdate(d *schema.ResourceData, meta interface{}) error { + sess, err := meta.(ClientSession).BluemixSession() + if err != nil { + return err + } + endpointservice, err := meta.(ClientSession).APIGateway() + if err != nil { + return err + } + //payload for updating endpoint + payload := &apigatewaysdk.UpdateEndpointOptions{} + + parts := d.Id() + partslist := strings.Split(parts, "//") + serviceInstanceCrn := partslist[0] + apiID := partslist[1] + + oauthtoken := sess.Config.IAMAccessToken + oauthtoken = strings.Replace(oauthtoken, "Bearer ", "", -1) + payload.Authorization = &oauthtoken + + payload.ID = &apiID + payload.NewArtifactID = &apiID + + payload.ServiceInstanceCrn = &serviceInstanceCrn + payload.NewParentCrn = &serviceInstanceCrn + payload.NewServiceInstanceCrn = &serviceInstanceCrn + + name := d.Get("name").(string) + payload.NewName = &name + + managed := d.Get("managed").(bool) + + openAPIDocName := d.Get("open_api_doc_name").(string) + var document []byte + // set to true as placeholder for logic control swtich + if true { + ext := path.Ext(openAPIDocName) + if strings.ToLower(ext) == ".json" { + data, err := ioutil.ReadFile(openAPIDocName) + if err != nil { + fmt.Println("Error uploading file", err) + return err + } + document = data + } else if strings.ToLower(ext) == ".yaml" || strings.ToLower(ext) == ".yml" { + data, err := ioutil.ReadFile(openAPIDocName) + if err != nil { + fmt.Println("Error uploading file", err) + return err + } + y2j, yErr := yaml.YAMLToJSON(data) + if yErr != nil { + fmt.Println("Error parsing yaml file", err) + return err + } + document = y2j + } else { + return fmt.Errorf("File extension type must be json or yaml") + + } + } + payload.NewOpenApiDoc = string(document) + + //payload for updating action of endpoint + actionPayload := &apigatewaysdk.EndpointActionsOptions{} + + actionPayload.ServiceInstanceCrn = &serviceInstanceCrn + actionPayload.Authorization = &oauthtoken + + actionPayload.ID = &apiID + providerID := d.Get("provider_id").(string) + actionPayload.ProviderID = &providerID + + actionType := d.Get("type").(string) + actionPayload.Type = &actionType + + update := false + + if d.HasChange("name") { + name := d.Get("name").(string) + payload.NewName = &name + update = true + } + if d.HasChange("provider_id") { + providerID := d.Get("provider_id").(string) + actionPayload.ProviderID = &providerID + } + if d.HasChange("type") { + actionType := d.Get("type").(string) + + if managed == false && actionType == "share" { + return fmt.Errorf("Endpoint %s not managed", apiID) + } + actionPayload.Type = &actionType + + _, response, err := endpointservice.EndpointActions(actionPayload) + if err != nil { + return fmt.Errorf("Error updating Endpoint Action: %s,%s", err, response) + } + } + + if d.HasChange("open_api_doc_name") { + openAPIDocName := d.Get("open_api_doc_name").(string) + var document []byte + // set to true as placeholder for logic control swtich + if true { + ext := path.Ext(openAPIDocName) + if strings.ToLower(ext) == ".json" { + data, err := ioutil.ReadFile(openAPIDocName) + if err != nil { + fmt.Println("Error uploading file", err) + return err + } + document = data + } else if strings.ToLower(ext) == ".yaml" || strings.ToLower(ext) == ".yml" { + data, err := ioutil.ReadFile(openAPIDocName) + if err != nil { + fmt.Println("Error uploading file", err) + return err + } + y2j, yErr := yaml.YAMLToJSON(data) + if yErr != nil { + fmt.Println("Error parsing yaml file", err) + return err + } + document = y2j + } else { + return fmt.Errorf("File extension type must be json or yaml") + + } + } + payload.NewOpenApiDoc = string(document) + update = true + } + if d.HasChange("routes") { + routes := d.Get("routes").([]string) + payload.NewRoutes = routes + update = true + } + if update { + _, response, err := endpointservice.UpdateEndpoint(payload) + if err != nil { + return fmt.Errorf("Error updating Endpoint: %s,%s", err, response) + } + } + return resourceIBMApiGatewayEndPointGet(d, meta) +} +func resourceIBMApiGatewayEndPointDelete(d *schema.ResourceData, meta interface{}) error { + sess, err := meta.(ClientSession).BluemixSession() + if err != nil { + return err + } + endpointservice, err := meta.(ClientSession).APIGateway() + if err != nil { + return err + } + + parts := d.Id() + partslist := strings.Split(parts, "//") + serviceInstanceCrn := partslist[0] + apiID := partslist[1] + + oauthtoken := sess.Config.IAMAccessToken + oauthtoken = strings.Replace(oauthtoken, "Bearer ", "", -1) + + payload := apigatewaysdk.DeleteEndpointOptions{ + ServiceInstanceCrn: &serviceInstanceCrn, + ID: &apiID, + Authorization: &oauthtoken, + } + + response, err := endpointservice.DeleteEndpoint(&payload) + + if err != nil { + if response != nil && response.StatusCode == 404 { + return nil + } + return fmt.Errorf("Error deleting Endpoint: %s\n%s", err, response) + } + d.SetId("") + + return nil +} + +func resourceIBMApiGatewayEndPointExists(d *schema.ResourceData, meta interface{}) (bool, error) { + sess, err := meta.(ClientSession).BluemixSession() + if err != nil { + return false, err + } + endpointservice, err := meta.(ClientSession).APIGateway() + if err != nil { + return false, err + } + + parts := d.Id() + partslist := strings.Split(parts, "//") + serviceInstanceCrn := partslist[0] + apiID := partslist[1] + + oauthtoken := sess.Config.IAMAccessToken + oauthtoken = strings.Replace(oauthtoken, "Bearer ", "", -1) + + payload := apigatewaysdk.GetEndpointOptions{ + ServiceInstanceCrn: &serviceInstanceCrn, + ID: &apiID, + Authorization: &oauthtoken, + } + _, response, err := endpointservice.GetEndpoint(&payload) + if err != nil { + if response != nil && response.StatusCode == 404 { + return false, nil + } + return false, err + } + return true, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_api_gateway_endpoint_subscription.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_api_gateway_endpoint_subscription.go new file mode 100644 index 00000000000..2161e23c488 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_api_gateway_endpoint_subscription.go @@ -0,0 +1,290 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "strings" + + apigatewaysdk "github.com/IBM/apigateway-go-sdk" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func resourceIBMApiGatewayEndpointSubscription() *schema.Resource { + + return &schema.Resource{ + Create: resourceIBMApiGatewayEndpointSubscriptionCreate, + Read: resourceIBMApiGatewayEndpointSubscriptionGet, + Update: resourceIBMApiGatewayEndpointSubscriptionUpdate, + Delete: resourceIBMApiGatewayEndpointSubscriptionDelete, + Importer: &schema.ResourceImporter{}, + Exists: resourceIBMApiGatewayEndpointSubscriptionExists, + Schema: map[string]*schema.Schema{ + "artifact_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Endpoint ID", + }, + "client_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Subscription Id, API key that is used to create subscription", + }, + "name": { + Type: schema.TypeString, + Required: true, + Description: "Subscription name", + }, + "type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateAllowedStringValue([]string{"external", "internal"}), + Description: "Subscription type. Allowable values are external, internal", + }, + "client_secret": { + Type: schema.TypeString, + Optional: true, + Sensitive: true, + ConflictsWith: []string{"generate_secret"}, + Description: "Client Sercret of a Subscription", + }, + "generate_secret": { + Type: schema.TypeBool, + Optional: true, + ConflictsWith: []string{"client_secret"}, + Description: "Indicates if Client Sercret has to be autogenerated", + }, + "secret_provided": { + Type: schema.TypeBool, + Computed: true, + Description: "Indicates if client secret is provided to subscription or not", + }, + }, + } +} +func resourceIBMApiGatewayEndpointSubscriptionCreate(d *schema.ResourceData, meta interface{}) error { + sess, err := meta.(ClientSession).BluemixSession() + if err != nil { + return err + } + endpointservice, err := meta.(ClientSession).APIGateway() + if err != nil { + return err + } + payload := &apigatewaysdk.CreateSubscriptionOptions{} + + oauthtoken := sess.Config.IAMAccessToken + oauthtoken = strings.Replace(oauthtoken, "Bearer ", "", -1) + payload.Authorization = &oauthtoken + + artifactID := d.Get("artifact_id").(string) + payload.ArtifactID = &artifactID + + var clientID string + if c, ok := d.GetOk("client_id"); ok && c != nil { + clientID = c.(string) + payload.ClientID = &clientID + } + var name string + if v, ok := d.GetOk("name"); ok && v != nil { + name = v.(string) + payload.Name = &name + } + var shareType string + if v, ok := d.GetOk("type"); ok && v != nil { + shareType = v.(string) + if shareType == "internal" { + shareType = "bluemix" + } + payload.Type = &shareType + } + var clientSecret string + if v, ok := d.GetOk("client_secret"); ok && v != nil { + clientSecret = v.(string) + payload.ClientSecret = &clientSecret + } + var generateSecret bool + if g, ok := d.GetOk("generate_secret"); ok && g != nil { + generateSecret = g.(bool) + payload.GenerateSecret = &generateSecret + } + + result, response, err := endpointservice.CreateSubscription(payload) + if err != nil { + return fmt.Errorf("Error creating Subscription: %s %s", err, response) + } + d.SetId(fmt.Sprintf("%s//%s", *result.ArtifactID, *result.ClientID)) + + return resourceIBMApiGatewayEndpointSubscriptionGet(d, meta) +} + +func resourceIBMApiGatewayEndpointSubscriptionGet(d *schema.ResourceData, meta interface{}) error { + sess, err := meta.(ClientSession).BluemixSession() + if err != nil { + return err + } + endpointservice, err := meta.(ClientSession).APIGateway() + if err != nil { + return err + } + + parts := d.Id() + partslist := strings.Split(parts, "//") + artifactID := partslist[0] + clientID := partslist[1] + + oauthtoken := sess.Config.IAMAccessToken + oauthtoken = strings.Replace(oauthtoken, "Bearer ", "", -1) + + payload := apigatewaysdk.GetSubscriptionOptions{ + ArtifactID: &artifactID, + ID: &clientID, + Authorization: &oauthtoken, + } + result, response, err := endpointservice.GetSubscription(&payload) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Getting Subscription: %s\n%s", err, response) + } + d.Set("artifact_id", result.ArtifactID) + d.Set("client_id", result.ClientID) + if *result.Type == "bluemix" { + *result.Type = "internal" + } + d.Set("type", result.Type) + if result.Name != nil { + d.Set("name", result.Name) + } + d.Set("secret_provided", result.SecretProvided) + return nil +} + +func resourceIBMApiGatewayEndpointSubscriptionUpdate(d *schema.ResourceData, meta interface{}) error { + sess, err := meta.(ClientSession).BluemixSession() + if err != nil { + return err + } + endpointservice, err := meta.(ClientSession).APIGateway() + if err != nil { + return err + } + payload := &apigatewaysdk.UpdateSubscriptionOptions{} + + parts := d.Id() + partslist := strings.Split(parts, "//") + artifactID := partslist[0] + clientID := partslist[1] + + oauthtoken := sess.Config.IAMAccessToken + oauthtoken = strings.Replace(oauthtoken, "Bearer ", "", -1) + payload.Authorization = &oauthtoken + + payload.ID = &clientID + payload.NewClientID = &clientID + + payload.ArtifactID = &artifactID + payload.NewArtifactID = &artifactID + + name := d.Get("name").(string) + payload.NewName = &name + + update := false + + if d.HasChange("name") { + name := d.Get("name").(string) + payload.NewName = &name + update = true + } + if d.HasChange("client_secret") { + clientSecret := d.Get("client_secret").(string) + secretpayload := &apigatewaysdk.AddSubscriptionSecretOptions{ + Authorization: &oauthtoken, + ArtifactID: &artifactID, + ID: &clientID, + ClientSecret: &clientSecret, + } + _, SecretResponse, err := endpointservice.AddSubscriptionSecret(secretpayload) + if err != nil { + return fmt.Errorf("Error Adding Secret to Subscription: %s,%s", err, SecretResponse) + } + } + if update { + _, response, err := endpointservice.UpdateSubscription(payload) + if err != nil { + return fmt.Errorf("Error updating Subscription: %s,%s", err, response) + } + } + return resourceIBMApiGatewayEndpointSubscriptionGet(d, meta) +} + +func resourceIBMApiGatewayEndpointSubscriptionDelete(d *schema.ResourceData, meta interface{}) error { + sess, err := meta.(ClientSession).BluemixSession() + if err != nil { + return err + } + endpointservice, err := meta.(ClientSession).APIGateway() + if err != nil { + return err + } + parts := d.Id() + partslist := strings.Split(parts, "//") + artifactID := partslist[0] + clientID := partslist[1] + + oauthtoken := sess.Config.IAMAccessToken + oauthtoken = strings.Replace(oauthtoken, "Bearer ", "", -1) + + payload := apigatewaysdk.DeleteSubscriptionOptions{ + ArtifactID: &artifactID, + ID: &clientID, + Authorization: &oauthtoken, + } + response, err := endpointservice.DeleteSubscription(&payload) + if err != nil { + if response != nil && response.StatusCode == 404 { + return nil + } + return fmt.Errorf("Error deleting Subscription: %s\n%s", err, response) + } + d.SetId("") + + return nil +} + +func resourceIBMApiGatewayEndpointSubscriptionExists(d *schema.ResourceData, meta interface{}) (bool, error) { + sess, err := meta.(ClientSession).BluemixSession() + if err != nil { + return false, err + } + endpointservice, err := meta.(ClientSession).APIGateway() + if err != nil { + return false, err + } + parts := d.Id() + partslist := strings.Split(parts, "//") + artifactID := partslist[0] + clientID := partslist[1] + + oauthtoken := sess.Config.IAMAccessToken + oauthtoken = strings.Replace(oauthtoken, "Bearer ", "", -1) + + payload := apigatewaysdk.GetSubscriptionOptions{ + ArtifactID: &artifactID, + ID: &clientID, + Authorization: &oauthtoken, + } + _, response, err := endpointservice.GetSubscription(&payload) + if err != nil { + if response != nil && response.StatusCode == 404 { + return false, nil + } + return false, err + } + return true, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_app.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_app.go new file mode 100644 index 00000000000..5737c658cf3 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_app.go @@ -0,0 +1,591 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "time" + + v2 "github.com/IBM-Cloud/bluemix-go/api/mccp/mccpv2" + "github.com/IBM-Cloud/bluemix-go/bmxerror" + "github.com/IBM-Cloud/bluemix-go/helpers" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + homedir "github.com/mitchellh/go-homedir" +) + +func resourceIBMApp() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMAppCreate, + Read: resourceIBMAppRead, + Update: resourceIBMAppUpdate, + Delete: resourceIBMAppDelete, + Exists: resourceIBMAppExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: "The name for the app", + }, + "memory": { + Description: "The amount of memory each instance should have. In megabytes.", + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + "instances": { + Description: "The number of instances", + Type: schema.TypeInt, + Optional: true, + Default: 1, + }, + "disk_quota": { + Description: "The maximum amount of disk available to an instance of an app. In megabytes.", + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + "space_guid": { + Description: "Define space guid to which app belongs", + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "buildpack": { + Description: "Buildpack to build the app. 3 options: a) Blank means autodetection; b) A Git Url pointing to a buildpack; c) Name of an installed buildpack.", + Type: schema.TypeString, + Optional: true, + }, + "environment_json": { + Description: "Key/value pairs of all the environment variables to run in your app. Does not include any system or service variables.", + Type: schema.TypeMap, + Optional: true, + }, + "route_guid": { + Description: "Define the route guids which should be bound to the application.", + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "service_instance_guid": { + Description: "Define the service instance guids that should be bound to this application.", + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "app_path": { + Description: "Define the path of the zip file of the application.", + Type: schema.TypeString, + Required: true, + }, + "app_version": { + Description: "Version of the application", + Type: schema.TypeString, + Optional: true, + }, + "command": { + Description: "The initial command for the app", + Type: schema.TypeString, + Optional: true, + }, + "wait_time_minutes": { + Description: "Define timeout to wait for the app instances to start/update/restage etc.", + Type: schema.TypeInt, + Optional: true, + Default: 20, + }, + "tags": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "health_check_http_endpoint": { + Description: "Endpoint called to determine if the app is healthy.", + Type: schema.TypeString, + Optional: true, + }, + "health_check_type": { + Description: "Type of health check to perform.", + Type: schema.TypeString, + Optional: true, + Default: "port", + ValidateFunc: validateAllowedStringValue([]string{"port", "process"}), + }, + "health_check_timeout": { + Description: "Timeout in seconds for health checking of an staged app when starting up.", + Type: schema.TypeInt, + Optional: true, + }, + }, + } +} + +func resourceIBMAppCreate(d *schema.ResourceData, meta interface{}) error { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + appAPI := cfClient.Apps() + name := d.Get("name").(string) + spaceGUID := d.Get("space_guid").(string) + healthChekcType := d.Get("health_check_type").(string) + + appCreatePayload := v2.AppRequest{ + Name: helpers.String(name), + SpaceGUID: helpers.String(spaceGUID), + HealthCheckType: helpers.String(healthChekcType), + } + + if memory, ok := d.GetOk("memory"); ok { + appCreatePayload.Memory = memory.(int) + } + + if instances, ok := d.GetOk("instances"); ok { + appCreatePayload.Instances = instances.(int) + } + + if diskQuota, ok := d.GetOk("disk_quota"); ok { + appCreatePayload.DiskQuota = diskQuota.(int) + } + + if buildpack, ok := d.GetOk("buildpack"); ok { + appCreatePayload.BuildPack = helpers.String(buildpack.(string)) + } + + if environmentJSON, ok := d.GetOk("environment_json"); ok { + appCreatePayload.EnvironmentJSON = helpers.Map(environmentJSON.(map[string]interface{})) + + } + + if command, ok := d.GetOk("command"); ok { + appCreatePayload.Command = helpers.String(command.(string)) + } + + if healtChkEndpoint, ok := d.GetOk("health_check_http_endpoint"); ok { + appCreatePayload.HealthCheckHTTPEndpoint = helpers.String(healtChkEndpoint.(string)) + } + + if healtChkTimeout, ok := d.GetOk("health_check_timeout"); ok { + appCreatePayload.HealthCheckTimeout = healtChkTimeout.(int) + } + + _, err = appAPI.FindByName(spaceGUID, name) + if err == nil { + return fmt.Errorf("%s already exists in the given space %s", name, spaceGUID) + } + + log.Println("[INFO] Creating Cloud Foundary Application") + app, err := appAPI.Create(appCreatePayload) + if err != nil { + return fmt.Errorf("Error creating app: %s", err) + } + + appGUID := app.Metadata.GUID + log.Println("[INFO] Cloud Foundary Application is created successfully") + + d.SetId(appGUID) + + if v, ok := d.Get("route_guid").(*schema.Set); ok && v.Len() > 0 { + log.Println("[INFO] Bind the route with cloud foundary application") + for _, routeID := range v.List() { + _, err := appAPI.BindRoute(appGUID, routeID.(string)) + if err != nil { + return fmt.Errorf("Error binding route %s to app: %s", routeID.(string), err) + } + } + } + if v, ok := d.Get("service_instance_guid").(*schema.Set); ok && v.Len() > 0 { + sbAPI := cfClient.ServiceBindings() + for _, svcID := range v.List() { + req := v2.ServiceBindingRequest{ + ServiceInstanceGUID: svcID.(string), + AppGUID: appGUID, + } + _, err := sbAPI.Create(req) + if err != nil { + return fmt.Errorf("Error binding service instance %s to app: %s", svcID.(string), err) + } + } + } + log.Println("[INFO] Upload the app bits to the cloud foundary application") + applicationZip, err := processAppZipPath(d.Get("app_path").(string)) + if err != nil { + return err + } + + _, err = appAPI.Upload(appGUID, applicationZip) + if err != nil { + return fmt.Errorf("Error uploading app bits: %s", err) + } + + err = restartApp(appGUID, d, meta) + if err != nil { + return err + } + log.Printf("[INFO] Application: %s has started successfully", name) + return resourceIBMAppRead(d, meta) +} + +func resourceIBMAppRead(d *schema.ResourceData, meta interface{}) error { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + appAPI := cfClient.Apps() + appGUID := d.Id() + + appData, err := appAPI.Get(appGUID) + if err != nil { + return fmt.Errorf("Error retrieving app details %s : %s", appGUID, err) + } + + d.SetId(appData.Metadata.GUID) + d.Set("name", appData.Entity.Name) + d.Set("memory", appData.Entity.Memory) + d.Set("instances", appData.Entity.Instances) + d.Set("space_guid", appData.Entity.SpaceGUID) + d.Set("disk_quota", appData.Entity.DiskQuota) + d.Set("buildpack", appData.Entity.BuildPack) + d.Set("environment_json", Flatten(appData.Entity.EnvironmentJSON)) + d.Set("command", appData.Entity.Command) + d.Set("health_check_type", appData.Entity.HealthCheckType) + d.Set("health_check_http_endpoint", appData.Entity.HealthCheckHTTPEndpoint) + d.Set("health_check_timeout", appData.Entity.HealthCheckTimeout) + + route, err := appAPI.ListRoutes(appGUID) + if err != nil { + return err + } + if len(route) > 0 { + d.Set("route_guid", flattenRoute(route)) + } + + svcBindings, err := appAPI.ListServiceBindings(appGUID) + if err != nil { + return err + } + if len(svcBindings) > 0 { + d.Set("service_instance_guid", flattenServiceBindings(svcBindings)) + } + + return nil + +} + +func resourceIBMAppUpdate(d *schema.ResourceData, meta interface{}) error { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + appAPI := cfClient.Apps() + appGUID := d.Id() + + appUpdatePayload := v2.AppRequest{} + restartRequired := false + restageRequired := false + + waitTimeout := time.Duration(d.Get("wait_time_minutes").(int)) * time.Minute + + if d.HasChange("name") { + appUpdatePayload.Name = helpers.String(d.Get("name").(string)) + } + + if d.HasChange("memory") { + appUpdatePayload.Memory = d.Get("memory").(int) + } + + if d.HasChange("instances") { + appUpdatePayload.Instances = d.Get("instances").(int) + } + + if d.HasChange("disk_quota") { + appUpdatePayload.DiskQuota = d.Get("disk_quota").(int) + } + + if d.HasChange("buildpack") { + appUpdatePayload.BuildPack = helpers.String(d.Get("buildpack").(string)) + restageRequired = true + } + + if d.HasChange("command") { + appUpdatePayload.Command = helpers.String(d.Get("command").(string)) + restartRequired = true + } + + if d.HasChange("environment_json") { + appUpdatePayload.EnvironmentJSON = helpers.Map(d.Get("environment_json").(map[string]interface{})) + restageRequired = true + } + + if d.HasChange("health_check_type") { + appUpdatePayload.HealthCheckType = helpers.String(d.Get("health_check_type").(string)) + restartRequired = true + } + + if d.HasChange("health_check_http_endpoint") { + appUpdatePayload.HealthCheckHTTPEndpoint = helpers.String(d.Get("health_check_http_endpoint").(string)) + restartRequired = true + } + + if d.HasChange("health_check_timeout") { + appUpdatePayload.HealthCheckTimeout = d.Get("health_check_timeout").(int) + restartRequired = true + } + + if d.HasChange("command") { + appUpdatePayload.Command = helpers.String(d.Get("command").(string)) + restartRequired = true + } + log.Println("[INFO] Update cloud foundary application") + + _, err = appAPI.Update(appGUID, appUpdatePayload) + if err != nil { + return fmt.Errorf("Error updating application: %s", err) + } + //TODO find the digest of the zip and avoid upload if it is same + if d.HasChange("app_path") || d.HasChange("app_version") { + appZipLoc, err := processAppZipPath(d.Get("app_path").(string)) + if err != nil { + return err + } + log.Println("[DEBUG] Uploading application bits") + _, err = appAPI.Upload(appGUID, appZipLoc) + if err != nil { + return fmt.Errorf("Error uploading app: %s", err) + } + restartRequired = true + } + + err = updateRouteGUID(appGUID, appAPI, d) + if err != nil { + return err + } + + restage, err := updateServiceInstanceGUID(appGUID, d, meta) + if err != nil { + return err + } + if restage { + restageRequired = true + } + + /*Wait if any previous staging is going on + log.Println("[INFO] Waiting to see any previous staging is on or not") + state, err := appAPI.WaitForAppStatus(v2.AppStagedState, appGUID, waitTimeout) + if waitTimeout != 0 && (err != nil || state == v2.AppPendingState) { + return fmt.Errorf("The application is still in %s from last operations.Please try again after sometime by increasing timeout value %q", state, err) + }*/ + + //If restage and restart both are required then we only need restage as that starts over everything + if restageRequired { + log.Println("[INFO] Restage since buildpack has changed") + err := restageApp(appGUID, d, meta) + if err != nil { + return err + } + } else if restartRequired { + err := restartApp(appGUID, d, meta) + if err != nil { + return err + } + } else { + //In case only memory/disk etc are updated then cloud controller would destroy the current instances + //and spin new ones, so we are waiting till they come up again + state, err := appAPI.WaitForInstanceStatus(v2.AppRunningState, appGUID, waitTimeout) + if waitTimeout != 0 && (err != nil || state != v2.AppRunningState) { + return fmt.Errorf("All applications instances aren't %s, Current status is %s, %q", v2.AppRunningState, state, err) + } + } + + return resourceIBMAppRead(d, meta) +} + +func resourceIBMAppDelete(d *schema.ResourceData, meta interface{}) error { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + appAPI := cfClient.Apps() + id := d.Id() + + err = appAPI.Delete(id, false, true) + if err != nil { + return fmt.Errorf("Error deleting app: %s", err) + } + + d.SetId("") + return nil +} + +func resourceIBMAppExists(d *schema.ResourceData, meta interface{}) (bool, error) { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return false, err + } + appAPI := cfClient.Apps() + id := d.Id() + + app, err := appAPI.Get(id) + if err != nil { + if apiErr, ok := err.(bmxerror.RequestFailure); ok { + if apiErr.StatusCode() == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s", err) + } + + return app.Metadata.GUID == id, nil +} + +func updateRouteGUID(appGUID string, appAPI v2.Apps, d *schema.ResourceData) (err error) { + if d.HasChange("route_guid") { + ors, nrs := d.GetChange("route_guid") + or := ors.(*schema.Set) + nr := nrs.(*schema.Set) + + remove := expandStringList(or.Difference(nr).List()) + add := expandStringList(nr.Difference(or).List()) + + if len(add) > 0 { + for i := range add { + _, err = appAPI.BindRoute(appGUID, add[i]) + if err != nil { + return fmt.Errorf("Error while binding route %q to application %s: %q", add[i], appGUID, err) + } + } + } + if len(remove) > 0 { + for i := range remove { + err = appAPI.UnBindRoute(appGUID, remove[i]) + if err != nil { + return fmt.Errorf("Error while un-binding route %q from application %s: %q", add[i], appGUID, err) + } + } + } + } + return +} + +func updateServiceInstanceGUID(appGUID string, d *schema.ResourceData, meta interface{}) (restageRequired bool, err error) { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return false, err + } + appAPI := cfClient.Apps() + sbAPI := cfClient.ServiceBindings() + if d.HasChange("service_instance_guid") { + oss, nss := d.GetChange("service_instance_guid") + os := oss.(*schema.Set) + ns := nss.(*schema.Set) + remove := expandStringList(os.Difference(ns).List()) + add := expandStringList(ns.Difference(os).List()) + + if len(add) > 0 { + for i := range add { + sbPayload := v2.ServiceBindingRequest{ + ServiceInstanceGUID: add[i], + AppGUID: appGUID, + } + _, err = sbAPI.Create(sbPayload) + if err != nil { + err = fmt.Errorf("Error while binding service instance %s to application %s: %q", add[i], appGUID, err) + return + } + restageRequired = true + } + } + if len(remove) > 0 { + var appFilters, svcFilters string + var bindings []v2.ServiceBinding + appFilters, err = new(v2.Filter).Name("app_guid").Eq(appGUID).Build() + if err != nil { + return + } + svcFilters, err = new(v2.Filter).Name("service_instance_guid").In(remove...).Build() + if err != nil { + return + } + bindings, err = sbAPI.List(appFilters, svcFilters) + if err != nil { + return + } + sbIds := make([]string, len(bindings)) + for i, sb := range bindings { + sbIds[i] = sb.GUID + } + err = appAPI.DeleteServiceBindings(appGUID, sbIds...) + if err != nil { + err = fmt.Errorf("Error while un-binding service instances %s to application %s: %q", remove, appGUID, err) + return + } + } + } + return +} +func restartApp(appGUID string, d *schema.ResourceData, meta interface{}) error { + cfClient, _ := meta.(ClientSession).MccpAPI() + appAPI := cfClient.Apps() + + appUpdatePayload := v2.AppRequest{ + State: helpers.String(v2.AppStoppedState), + } + log.Println("[INFO] Stopping Application") + _, err := appAPI.Update(appGUID, appUpdatePayload) + if err != nil { + return fmt.Errorf("Error updating application status to %s %s", v2.AppStoppedState, err) + } + waitTimeout := time.Duration(d.Get("wait_time_minutes").(int)) * time.Minute + log.Println("[INFO] Starting Application") + status, err := appAPI.Start(appGUID, waitTimeout) + if err != nil { + return fmt.Errorf("Error while starting application : %s", err) + } + if waitTimeout != 0 { + return checkAppStatus(status) + } + return nil +} + +func restageApp(appGUID string, d *schema.ResourceData, meta interface{}) error { + cfClient, _ := meta.(ClientSession).MccpAPI() + appAPI := cfClient.Apps() + + log.Println("[INFO] Restage Application") + waitTimeout := time.Duration(d.Get("wait_time_minutes").(int)) * time.Minute + status, err := appAPI.Restage(appGUID, waitTimeout) + if err != nil { + return fmt.Errorf("Error while restaging application : %s", err) + } + if waitTimeout != 0 { + return checkAppStatus(status) + } + return nil +} + +func checkAppStatus(status *v2.AppState) error { + if status.PackageState != v2.AppStagedState { + return fmt.Errorf("Applications couldn't be staged, current status is %s", status.PackageState) + } + if status.InstanceState != v2.AppRunningState { + return fmt.Errorf("All applications instances aren't %s, Current status is %s", v2.AppRunningState, status.InstanceState) + } + return nil +} + +func processAppZipPath(path string) (string, error) { + applicationZip, err := homedir.Expand(path) + if err != nil { + return path, fmt.Errorf("home directory in the given path %s couldn't be expanded", path) + } + if !helpers.FileExists(applicationZip) { + return path, fmt.Errorf("The given app path: %s doesn't exist", path) + } + return applicationZip, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_app_config_environment.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_app_config_environment.go new file mode 100644 index 00000000000..3d8e4ba49cd --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_app_config_environment.go @@ -0,0 +1,239 @@ +// Copyright IBM Corp. 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + + "github.com/IBM/appconfiguration-go-admin-sdk/appconfigurationv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func resourceIbmAppConfigEnvironment() *schema.Resource { + return &schema.Resource{ + Read: resourceEnvironmentRead, + Create: resourceEnvironmentCreate, + Update: resourceEnvironmentUpdate, + Delete: resourceEnvironmentDelete, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "guid": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "GUID of the App Configuration service. Get it from the service instance credentials section of the dashboard.", + }, + "name": { + Type: schema.TypeString, + Required: true, + Description: "Environment name.", + }, + "environment_id": { + Type: schema.TypeString, + Required: true, + Description: "Environment Id.", + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: "Environment description", + }, + "tags": { + Type: schema.TypeString, + Optional: true, + Description: "Tags associated with the environment", + }, + "color_code": { + Type: schema.TypeString, + Optional: true, + Description: "Color code to distinguish the environment.", + }, + "created_time": { + Type: schema.TypeString, + Computed: true, + Description: "Creation time of the environment.", + }, + "updated_time": { + Type: schema.TypeString, + Computed: true, + Description: "Last modified time of the environment data.", + }, + "href": { + Type: schema.TypeString, + Computed: true, + Description: "Environment URL.", + }, + }, + } +} + +func getAppConfigClient(meta interface{}, guid string) (*appconfigurationv1.AppConfigurationV1, error) { + appconfigClient, err := meta.(ClientSession).AppConfigurationV1() + if err != nil { + return nil, err + } + bluemixSession, err := meta.(ClientSession).BluemixSession() + if err != nil { + return nil, err + } + appConfigURL := fmt.Sprintf("https://%s.apprapp.cloud.ibm.com/apprapp/feature/v1/instances/%s", bluemixSession.Config.Region, guid) + url := envFallBack([]string{"IBMCLOUD_APP_CONFIG_API_ENDPOINT"}, appConfigURL) + appconfigClient.Service.Options.URL = url + return appconfigClient, nil +} + +func resourceEnvironmentCreate(d *schema.ResourceData, meta interface{}) error { + guid := d.Get("guid").(string) + appconfigClient, err := getAppConfigClient(meta, guid) + if err != nil { + return err + } + options := &appconfigurationv1.CreateEnvironmentOptions{} + + options.SetName(d.Get("name").(string)) + options.SetEnvironmentID(d.Get("environment_id").(string)) + if _, ok := d.GetOk("description"); ok { + options.SetDescription(d.Get("description").(string)) + } + if _, ok := d.GetOk("tags"); ok { + options.SetTags(d.Get("tags").(string)) + } + if _, ok := d.GetOk("color_code"); ok { + options.SetColorCode(d.Get("color_code").(string)) + } + _, response, err := appconfigClient.CreateEnvironment(options) + + if err != nil { + return fmt.Errorf("[DEBUG] CreateEnvironment failed %s\n%s", err, response) + } + d.SetId(fmt.Sprintf("%s/%s", guid, *options.EnvironmentID)) + + return resourceEnvironmentRead(d, meta) +} + +func resourceEnvironmentUpdate(d *schema.ResourceData, meta interface{}) error { + if ok := d.HasChanges("name", "tags", "color_code", "description"); ok { + parts, err := idParts(d.Id()) + if err != nil { + return nil + } + appconfigClient, err := getAppConfigClient(meta, parts[0]) + if err != nil { + return err + } + + options := &appconfigurationv1.UpdateEnvironmentOptions{} + + options.SetName(d.Get("name").(string)) + options.SetEnvironmentID(d.Get("environment_id").(string)) + if _, ok := d.GetOk("description"); ok { + options.SetDescription(d.Get("description").(string)) + } + if _, ok := d.GetOk("tags"); ok { + options.SetTags(d.Get("tags").(string)) + } + if _, ok := d.GetOk("color_code"); ok { + options.SetColorCode(d.Get("color_code").(string)) + } + + _, response, err := appconfigClient.UpdateEnvironment(options) + if err != nil { + return fmt.Errorf("[DEBUG] UpdateEnvironment failed %s\n%s", err, response) + } + return resourceEnvironmentRead(d, meta) + } + return nil +} + +func resourceEnvironmentRead(d *schema.ResourceData, meta interface{}) error { + parts, err := idParts(d.Id()) + if err != nil { + return nil + } + appconfigClient, err := getAppConfigClient(meta, parts[0]) + if err != nil { + return err + } + + options := &appconfigurationv1.GetEnvironmentOptions{} + + options.SetExpand(true) + options.SetEnvironmentID(parts[1]) + + result, response, err := appconfigClient.GetEnvironment(options) + + if err != nil { + return fmt.Errorf("[DEBUG] GetEnvironment failed %s\n%s", err, response) + } + d.Set("guid", parts[0]) + if result.Name != nil { + if err = d.Set("name", result.Name); err != nil { + return fmt.Errorf("error setting name: %s", err) + } + } + if result.EnvironmentID != nil { + if err = d.Set("environment_id", result.EnvironmentID); err != nil { + return fmt.Errorf("error setting environment_id: %s", err) + } + } + if result.Description != nil { + if err = d.Set("description", result.Description); err != nil { + return fmt.Errorf("error setting description: %s", err) + } + } + if result.Tags != nil { + if err = d.Set("tags", result.Tags); err != nil { + return fmt.Errorf("error setting tags: %s", err) + } + } + if result.ColorCode != nil { + if err = d.Set("color_code", result.ColorCode); err != nil { + return fmt.Errorf("error setting color_code: %s", err) + } + } + if result.CreatedTime != nil { + if err = d.Set("created_time", result.CreatedTime.String()); err != nil { + return fmt.Errorf("error setting created_time: %s", err) + } + } + if result.UpdatedTime != nil { + if err = d.Set("updated_time", result.UpdatedTime.String()); err != nil { + return fmt.Errorf("error setting updated_time: %s", err) + } + } + if result.Href != nil { + if err = d.Set("href", result.Href); err != nil { + return fmt.Errorf("error setting href: %s", err) + } + } + return nil +} + +func resourceEnvironmentDelete(d *schema.ResourceData, meta interface{}) error { + parts, err := idParts(d.Id()) + if err != nil { + return nil + } + + appconfigClient, err := getAppConfigClient(meta, parts[0]) + if err != nil { + return err + } + + options := &appconfigurationv1.DeleteEnvironmentOptions{} + options.SetEnvironmentID(parts[1]) + + response, err := appconfigClient.DeleteEnvironment(options) + + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("[DEBUG] DeleteEnvironment failed %s\n%s", err, response) + } + d.SetId("") + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_app_config_feature.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_app_config_feature.go new file mode 100644 index 00000000000..3b74e558d2e --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_app_config_feature.go @@ -0,0 +1,517 @@ +// Copyright IBM Corp. 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/IBM/appconfiguration-go-admin-sdk/appconfigurationv1" + "github.com/IBM/go-sdk-core/v5/core" +) + +func resourceIbmIbmAppConfigFeature() *schema.Resource { + return &schema.Resource{ + Create: resourceIbmIbmAppConfigFeatureCreate, + Read: resourceIbmIbmAppConfigFeatureRead, + Update: resourceIbmIbmAppConfigFeatureUpdate, + Delete: resourceIbmIbmAppConfigFeatureDelete, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "guid": { + Type: schema.TypeString, + Required: true, + Description: "GUID of the App Configuration service. Get it from the service instance credentials section of the dashboard.", + }, + "environment_id": { + Type: schema.TypeString, + Required: true, + Description: "Environment Id.", + }, + "name": { + Type: schema.TypeString, + Required: true, + Description: "Feature name.", + }, + "feature_id": { + Type: schema.TypeString, + Required: true, + Description: "Feature id.", + }, + "type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: InvokeValidator("ibm_app_config_feature", "type"), + Description: "Type of the feature (BOOLEAN, STRING, NUMERIC).", + }, + "enabled_value": { + Type: schema.TypeString, + Required: true, + Description: "Value of the feature when it is enabled. The value can be BOOLEAN, STRING or a NUMERIC value as per the `type` attribute.", + }, + "disabled_value": { + Type: schema.TypeString, + Required: true, + Description: "Value of the feature when it is disabled. The value can be BOOLEAN, STRING or a NUMERIC value as per the `type` attribute.", + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: "Feature description.", + }, + "tags": { + Type: schema.TypeString, + Optional: true, + Description: "Tags associated with the feature.", + }, + "segment_rules": { + Type: schema.TypeList, + Optional: true, + Description: "Specify the targeting rules that is used to set different feature flag values for different segments.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "rules": { + Type: schema.TypeList, + Required: true, + Description: "Rules array.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "segments": { + Type: schema.TypeList, + Required: true, + Description: "List of segment ids that are used for targeting using the rule.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + "value": { + Type: schema.TypeString, + Required: true, + Description: "Value to be used for evaluation for this rule. The value can be Boolean, String or a Numeric value as per the `type` attribute.", + }, + "order": { + Type: schema.TypeInt, + Required: true, + Description: "Order of the rule, used during evaluation. The evaluation is performed in the order defined and the value associated with the first matching rule is used for evaluation.", + }, + }, + }, + }, + "collections": { + Type: schema.TypeList, + Optional: true, + Description: "List of collection id representing the collections that are associated with the specified feature flag.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "collection_id": { + Type: schema.TypeString, + Required: true, + Description: "Collection id.", + }, + }, + }, + }, + "segment_exists": { + Type: schema.TypeBool, + Computed: true, + Description: "Denotes if the targeting rules are specified for the feature flag.", + }, + "enabled": { + Type: schema.TypeBool, + Computed: true, + Description: "The state of the feature flag.", + }, + "created_time": { + Type: schema.TypeString, + Computed: true, + Description: "Creation time of the feature flag.", + }, + "updated_time": { + Type: schema.TypeString, + Computed: true, + Description: "Last modified time of the feature flag data.", + }, + "href": { + Type: schema.TypeString, + Computed: true, + Description: "Feature flag URL.", + }, + }, + } +} + +func resourceIbmIbmAppConfigFeatureCreate(d *schema.ResourceData, meta interface{}) error { + guid := d.Get("guid").(string) + appconfigClient, err := getAppConfigClient(meta, guid) + if err != nil { + return err + } + options := &appconfigurationv1.CreateFeatureOptions{} + options.SetType(d.Get("type").(string)) + options.SetName(d.Get("name").(string)) + options.SetFeatureID(d.Get("feature_id").(string)) + options.SetEnabledValue(d.Get("enabled_value").(string)) + options.SetEnvironmentID(d.Get("environment_id").(string)) + options.SetDisabledValue(d.Get("disabled_value").(string)) + + if _, ok := d.GetOk("description"); ok { + options.SetDescription(d.Get("description").(string)) + } + if _, ok := d.GetOk("tags"); ok { + options.SetTags(d.Get("tags").(string)) + } + + if _, ok := d.GetOk("segment_rules"); ok { + var segmentRules []appconfigurationv1.SegmentRule + for _, e := range d.Get("segment_rules").([]interface{}) { + value := e.(map[string]interface{}) + segmentRulesItem, err := resourceIbmAppConfigFeatureMapToSegmentRule(d, value) + if err != nil { + return err + } + segmentRules = append(segmentRules, segmentRulesItem) + } + options.SetSegmentRules(segmentRules) + } + if _, ok := d.GetOk("collections"); ok { + var collections []appconfigurationv1.CollectionRef + for _, e := range d.Get("collections").([]interface{}) { + value := e.(map[string]interface{}) + collectionsItem := resourceIbmAppConfigFeatureMapToCollectionRef(value) + collections = append(collections, collectionsItem) + } + options.SetCollections(collections) + } + + feature, response, err := appconfigClient.CreateFeature(options) + + if err != nil { + log.Printf("CreateFeature failed %s\n%s", err, response) + return err + } + d.SetId(fmt.Sprintf("%s/%s/%s", guid, *options.EnvironmentID, *feature.FeatureID)) + return resourceIbmIbmAppConfigFeatureRead(d, meta) +} + +func resourceIbmIbmAppConfigFeatureUpdate(d *schema.ResourceData, meta interface{}) error { + parts, err := idParts(d.Id()) + if err != nil { + return nil + } + appconfigClient, err := getAppConfigClient(meta, parts[0]) + if err != nil { + return err + } + + options := &appconfigurationv1.UpdateFeatureOptions{} + options.SetEnvironmentID(parts[1]) + options.SetFeatureID(parts[2]) + + if ok := d.HasChanges("name", "enabled_value", "disabled_value", "description", "tags", "segment_rules", "collections"); ok { + options.SetName(d.Get("name").(string)) + options.SetEnabledValue(d.Get("enabled_value").(string)) + options.SetDisabledValue(d.Get("disabled_value").(string)) + + if _, ok := d.GetOk("description"); ok { + options.SetDescription(d.Get("description").(string)) + } + if _, ok := d.GetOk("tags"); ok { + options.SetTags(d.Get("tags").(string)) + } + if _, ok := d.GetOk("segment_rules"); ok { + var segmentRules []appconfigurationv1.SegmentRule + for _, e := range d.Get("segment_rules").([]interface{}) { + value := e.(map[string]interface{}) + segmentRulesItem, err := resourceIbmAppConfigFeatureMapToSegmentRule(d, value) + if err != nil { + return err + } + segmentRules = append(segmentRules, segmentRulesItem) + } + options.SetSegmentRules(segmentRules) + } + if _, ok := d.GetOk("collections"); ok { + var collections []appconfigurationv1.CollectionRef + for _, e := range d.Get("collections").([]interface{}) { + value := e.(map[string]interface{}) + collectionsItem := resourceIbmAppConfigFeatureMapToCollectionRef(value) + collections = append(collections, collectionsItem) + } + options.SetCollections(collections) + } + + _, response, err := appconfigClient.UpdateFeature(options) + if err != nil { + log.Printf("[DEBUG] UpdateFeature %s\n%s", err, response) + return err + } + return resourceIbmIbmAppConfigFeatureRead(d, meta) + } + return nil +} + +func resourceIbmIbmAppConfigFeatureRead(d *schema.ResourceData, meta interface{}) error { + parts, err := idParts(d.Id()) + if err != nil { + return nil + } + appconfigClient, err := getAppConfigClient(meta, parts[0]) + if err != nil { + return err + } + + options := &appconfigurationv1.GetFeatureOptions{} + options.SetEnvironmentID(parts[1]) + options.SetFeatureID(parts[2]) + + result, response, err := appconfigClient.GetFeature(options) + if err != nil { + return fmt.Errorf("[DEBUG] GetFeature failed %s\n%s", err, response) + } + + d.Set("guid", parts[0]) + d.Set("environment_id", parts[1]) + if result.Name != nil { + if err = d.Set("name", result.Name); err != nil { + return fmt.Errorf("error setting name: %s", err) + } + } + if result.FeatureID != nil { + if err = d.Set("feature_id", result.FeatureID); err != nil { + return fmt.Errorf("error setting feature_id: %s", err) + } + } + if result.Type != nil { + if err = d.Set("type", result.Type); err != nil { + return fmt.Errorf("error setting type: %s", err) + } + } + if result.Description != nil { + if err = d.Set("description", result.Description); err != nil { + return fmt.Errorf("error setting description: %s", err) + } + + } + if result.Tags != nil { + if err = d.Set("tags", result.Tags); err != nil { + return fmt.Errorf("error setting tags: %s", err) + } + } + + if result.SegmentRules != nil { + segmentRules := []map[string]interface{}{} + for _, segmentRulesItem := range result.SegmentRules { + segmentRulesItemMap := resourceIbmAppConfigFeatureSegmentRuleToMap(segmentRulesItem) + segmentRules = append(segmentRules, segmentRulesItemMap) + } + if err = d.Set("segment_rules", segmentRules); err != nil { + return fmt.Errorf("error setting segment_rules: %s", err) + } + } + if result.Collections != nil { + collections := []map[string]interface{}{} + for _, collectionsItem := range result.Collections { + collectionsItemMap := resourceIbmAppConfigFeatureCollectionRefToMap(collectionsItem) + collections = append(collections, collectionsItemMap) + } + if err = d.Set("collections", collections); err != nil { + return fmt.Errorf("error setting collections: %s", err) + } + } + if result.SegmentExists != nil { + if err = d.Set("segment_exists", result.SegmentExists); err != nil { + return fmt.Errorf("error setting segment_exists: %s", err) + } + } + if result.CreatedTime != nil { + if err = d.Set("created_time", result.CreatedTime.String()); err != nil { + return fmt.Errorf("error setting created_time: %s", err) + } + } + if result.UpdatedTime != nil { + if err = d.Set("updated_time", result.UpdatedTime.String()); err != nil { + return fmt.Errorf("error setting updated_time: %s", err) + } + } + if result.Href != nil { + if err = d.Set("href", result.Href); err != nil { + return fmt.Errorf("error setting href: %s", err) + } + } + if result.Enabled != nil { + if err = d.Set("enabled", result.Enabled); err != nil { + return fmt.Errorf("error setting enabled: %s", err) + } + } + + if result.EnabledValue != nil { + enabledValue := result.EnabledValue + + switch enabledValue.(interface{}).(type) { + case string: + d.Set("enabled_value", enabledValue.(string)) + case float64: + d.Set("enabled_value", fmt.Sprintf("%v", enabledValue)) + case bool: + d.Set("enabled_value", strconv.FormatBool(enabledValue.(bool))) + } + } + + if result.DisabledValue != nil { + disabledValue := result.DisabledValue + + switch disabledValue.(interface{}).(type) { + case string: + d.Set("disabled_value", disabledValue.(string)) + case float64: + d.Set("disabled_value", fmt.Sprintf("%v", disabledValue)) + case bool: + d.Set("disabled_value", strconv.FormatBool(disabledValue.(bool))) + } + } + return nil +} + +func resourceIbmIbmAppConfigFeatureDelete(d *schema.ResourceData, meta interface{}) error { + parts, err := idParts(d.Id()) + if err != nil { + return nil + } + appconfigClient, err := getAppConfigClient(meta, parts[0]) + if err != nil { + return err + } + + options := &appconfigurationv1.DeleteFeatureOptions{} + options.SetEnvironmentID(parts[1]) + options.SetFeatureID(parts[2]) + + response, err := appconfigClient.DeleteFeature(options) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("[DEBUG] DeleteFeature failed %s\n%s", err, response) + } + + d.SetId("") + + return nil +} + +func resourceIbmAppConfigFeatureValidator() *ResourceValidator { + validateSchema := make([]ValidateSchema, 1) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: "type", + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: "BOOLEAN, NUMERIC, STRING", + }, + ) + + resourceValidator := ResourceValidator{ResourceName: "ibm_app_config_feature", Schema: validateSchema} + return &resourceValidator +} + +// output +func resourceIbmAppConfigFeatureSegmentRuleToMap(segmentRule appconfigurationv1.SegmentRule) map[string]interface{} { + segmentRuleMap := map[string]interface{}{} + + rules := []map[string]interface{}{} + for _, rulesItem := range segmentRule.Rules { + rulesItemMap := resourceIbmAppConfigFeatureRuleToMap(rulesItem) + rules = append(rules, rulesItemMap) + } + + segmentRuleMap["rules"] = rules + segmentRuleMap["order"] = intValue(segmentRule.Order) + + segmentValue := segmentRule.Value + switch segmentValue.(interface{}).(type) { + case string: + segmentRuleMap["value"] = segmentValue.(string) + case float64: + segmentRuleMap["value"] = fmt.Sprintf("%v", segmentValue) + case bool: + segmentRuleMap["value"] = strconv.FormatBool(segmentValue.(bool)) + } + + return segmentRuleMap +} + +func resourceIbmAppConfigFeatureRuleToMap(rule appconfigurationv1.TargetSegments) map[string]interface{} { + ruleMap := map[string]interface{}{} + ruleMap["segments"] = rule.Segments + return ruleMap +} + +func resourceIbmAppConfigFeatureCollectionRefToMap(collectionRef appconfigurationv1.CollectionRef) map[string]interface{} { + collectionRefMap := map[string]interface{}{} + collectionRefMap["collection_id"] = collectionRef.CollectionID + collectionRefMap["name"] = collectionRef.Name + return collectionRefMap +} + +// input +func resourceIbmAppConfigFeatureMapToSegmentRule(d *schema.ResourceData, segmentRuleMap map[string]interface{}) (appconfigurationv1.SegmentRule, error) { + segmentRule := appconfigurationv1.SegmentRule{} + + rules := []appconfigurationv1.TargetSegments{} + for _, rulesItem := range segmentRuleMap["rules"].([]interface{}) { + rulesItemModel := resourceIbmAppConfigFeatureMapToRule(rulesItem.(map[string]interface{})) + rules = append(rules, rulesItemModel) + } + segmentRule.Rules = rules + + segmentRule.Order = core.Int64Ptr(int64(segmentRuleMap["order"].(int))) + + ruleValue := segmentRuleMap["value"].(string) + switch d.Get("type").(string) { + case "STRING": + segmentRule.Value = ruleValue + case "NUMERIC": + v, err := strconv.ParseFloat(ruleValue, 64) + if err != nil { + return segmentRule, fmt.Errorf("'value' parameter in 'segment_rules' has wrong value: %s", err) + } + segmentRule.Value = v + case "BOOLEAN": + if ruleValue == "false" { + segmentRule.Value = false + } else if ruleValue == "true" { + segmentRule.Value = true + } else { + return segmentRule, fmt.Errorf("'value' parameter in 'segment_rules' has wrong value") + } + } + + return segmentRule, nil +} + +func resourceIbmAppConfigFeatureMapToRule(ruleMap map[string]interface{}) appconfigurationv1.TargetSegments { + rule := appconfigurationv1.TargetSegments{} + + segments := []string{} + for _, segmentsItem := range ruleMap["segments"].([]interface{}) { + segments = append(segments, segmentsItem.(string)) + } + rule.Segments = segments + + return rule +} + +func resourceIbmAppConfigFeatureMapToCollectionRef(collectionRefMap map[string]interface{}) appconfigurationv1.CollectionRef { + collectionRef := appconfigurationv1.CollectionRef{} + collectionRef.CollectionID = core.StringPtr(collectionRefMap["collection_id"].(string)) + + return collectionRef +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_app_domain_private.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_app_domain_private.go new file mode 100644 index 00000000000..96ae38867bb --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_app_domain_private.go @@ -0,0 +1,130 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + + v2 "github.com/IBM-Cloud/bluemix-go/api/mccp/mccpv2" + + "github.com/IBM-Cloud/bluemix-go/bmxerror" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func resourceIBMAppDomainPrivate() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMAppDomainPrivateCreate, + Read: resourceIBMAppDomainPrivateRead, + Update: resourceIBMAppDomainPrivateUpdate, + Delete: resourceIBMAppDomainPrivateDelete, + Exists: resourceIBMAppDomainPrivateExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The name of the domain", + ValidateFunc: validateDomainName, + }, + + "org_guid": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The organization that owns the domain.", + }, + "tags": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + }, + } +} + +func resourceIBMAppDomainPrivateCreate(d *schema.ResourceData, meta interface{}) error { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + name := d.Get("name").(string) + orgGUID := d.Get("org_guid").(string) + + params := v2.PrivateDomainRequest{ + Name: name, + OrgGUID: orgGUID, + } + + prdomain, err := cfClient.PrivateDomains().Create(params) + if err != nil { + return fmt.Errorf("Error creating private domain: %s", err) + } + + d.SetId(prdomain.Metadata.GUID) + + return resourceIBMAppDomainPrivateRead(d, meta) +} + +func resourceIBMAppDomainPrivateUpdate(d *schema.ResourceData, meta interface{}) error { + //Only tags are updated and that too locally hence nothing to validate and update in terms of real API at this point + return nil +} + +func resourceIBMAppDomainPrivateRead(d *schema.ResourceData, meta interface{}) error { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + prdomainGUID := d.Id() + + prdomain, err := cfClient.PrivateDomains().Get(prdomainGUID) + if err != nil { + return fmt.Errorf("Error retrieving private domain: %s", err) + } + d.Set("name", prdomain.Entity.Name) + d.Set("org_guid", prdomain.Entity.OwningOrganizationGUID) + + return nil +} + +func resourceIBMAppDomainPrivateDelete(d *schema.ResourceData, meta interface{}) error { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + + prdomainGUID := d.Id() + + err = cfClient.PrivateDomains().Delete(prdomainGUID, false) + if err != nil { + return fmt.Errorf("Error deleting private domain: %s", err) + } + + d.SetId("") + + return nil +} + +func resourceIBMAppDomainPrivateExists(d *schema.ResourceData, meta interface{}) (bool, error) { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return false, err + } + prdomainGUID := d.Id() + + prdomain, err := cfClient.PrivateDomains().Get(prdomainGUID) + if err != nil { + if apiErr, ok := err.(bmxerror.RequestFailure); ok { + if apiErr.StatusCode() == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s", err) + } + + return prdomain.Metadata.GUID == prdomainGUID, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_app_domain_shared.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_app_domain_shared.go new file mode 100644 index 00000000000..c33441baaf2 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_app_domain_shared.go @@ -0,0 +1,131 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + + v2 "github.com/IBM-Cloud/bluemix-go/api/mccp/mccpv2" + + "github.com/IBM-Cloud/bluemix-go/bmxerror" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func resourceIBMAppDomainShared() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMAppDomainSharedCreate, + Read: resourceIBMAppDomainSharedRead, + Update: resourceIBMAppDomainSharedUpdate, + Delete: resourceIBMAppDomainSharedDelete, + Exists: resourceIBMAppDomainSharedExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The name of the domain", + ValidateFunc: validateDomainName, + }, + + "router_group_guid": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The guid of the router group.", + }, + + "tags": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + }, + } +} + +func resourceIBMAppDomainSharedCreate(d *schema.ResourceData, meta interface{}) error { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + name := d.Get("name").(string) + routerGroupGUID := d.Get("router_group_guid").(string) + + params := v2.SharedDomainRequest{ + Name: name, + RouterGroupGUID: routerGroupGUID, + } + + shdomain, err := cfClient.SharedDomains().Create(params) + if err != nil { + return fmt.Errorf("Error creating shared domain: %s", err) + } + + d.SetId(shdomain.Metadata.GUID) + + return resourceIBMAppDomainSharedRead(d, meta) +} + +func resourceIBMAppDomainSharedUpdate(d *schema.ResourceData, meta interface{}) error { + //Only tags are updated and that too locally hence nothing to validate and update in terms of real API at this point + return nil +} + +func resourceIBMAppDomainSharedRead(d *schema.ResourceData, meta interface{}) error { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + shdomainGUID := d.Id() + + shdomain, err := cfClient.SharedDomains().Get(shdomainGUID) + if err != nil { + return fmt.Errorf("Error retrieving shared domain: %s", err) + } + d.Set("name", shdomain.Entity.Name) + d.Set("router_group_guid", shdomain.Entity.RouterGroupGUID) + + return nil +} + +func resourceIBMAppDomainSharedDelete(d *schema.ResourceData, meta interface{}) error { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + + shdomainGUID := d.Id() + + err = cfClient.SharedDomains().Delete(shdomainGUID, false) + if err != nil { + return fmt.Errorf("Error deleting shared domain: %s", err) + } + + d.SetId("") + + return nil +} + +func resourceIBMAppDomainSharedExists(d *schema.ResourceData, meta interface{}) (bool, error) { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return false, err + } + shdomainGUID := d.Id() + + shdomain, err := cfClient.SharedDomains().Get(shdomainGUID) + if err != nil { + if apiErr, ok := err.(bmxerror.RequestFailure); ok { + if apiErr.StatusCode() == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s", err) + } + + return shdomain.Metadata.GUID == shdomainGUID, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_app_route.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_app_route.go new file mode 100644 index 00000000000..d4441c62564 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_app_route.go @@ -0,0 +1,191 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + + v2 "github.com/IBM-Cloud/bluemix-go/api/mccp/mccpv2" + "github.com/IBM-Cloud/bluemix-go/helpers" + + "github.com/IBM-Cloud/bluemix-go/bmxerror" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func resourceIBMAppRoute() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMAppRouteCreate, + Read: resourceIBMAppRouteRead, + Update: resourceIBMAppRouteUpdate, + Delete: resourceIBMAppRouteDelete, + Exists: resourceIBMAppRouteExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "host": { + Type: schema.TypeString, + Optional: true, + Description: "The host portion of the route. Required for shared-domains.", + }, + + "space_guid": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The guid of the associated space", + }, + + "domain_guid": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The guid of the associated domain", + }, + + "port": { + Description: "The port of the route. Supported for domains of TCP router groups only.", + Optional: true, + Type: schema.TypeInt, + ValidateFunc: validateRoutePort, + }, + + "path": { + Description: "The path for a route as raw text.Paths must be between 2 and 128 characters.Paths must start with a forward slash '/'.Paths must not contain a '?'", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validateRoutePath, + }, + + "tags": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + }, + } +} + +func resourceIBMAppRouteCreate(d *schema.ResourceData, meta interface{}) error { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + + spaceGUID := d.Get("space_guid").(string) + domainGUID := d.Get("domain_guid").(string) + + params := v2.RouteRequest{ + SpaceGUID: spaceGUID, + DomainGUID: domainGUID, + } + + if host, ok := d.GetOk("host"); ok { + params.Host = host.(string) + } + + if port, ok := d.GetOk("port"); ok { + params.Port = helpers.Int(port.(int)) + } + + if path, ok := d.GetOk("path"); ok { + params.Path = path.(string) + } + + route, err := cfClient.Routes().Create(params) + if err != nil { + return fmt.Errorf("Error creating route: %s", err) + } + + d.SetId(route.Metadata.GUID) + + return resourceIBMAppRouteRead(d, meta) +} + +func resourceIBMAppRouteRead(d *schema.ResourceData, meta interface{}) error { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + routeGUID := d.Id() + + route, err := cfClient.Routes().Get(routeGUID) + if err != nil { + return fmt.Errorf("Error retrieving route: %s", err) + } + + d.Set("host", route.Entity.Host) + d.Set("space_guid", route.Entity.SpaceGUID) + d.Set("domain_guid", route.Entity.DomainGUID) + if route.Entity.Port != nil { + d.Set("port", route.Entity.Port) + } + d.Set("path", route.Entity.Path) + + return nil +} + +func resourceIBMAppRouteUpdate(d *schema.ResourceData, meta interface{}) error { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + + routeGUID := d.Id() + params := v2.RouteUpdateRequest{} + + if d.HasChange("host") { + params.Host = helpers.String(d.Get("host").(string)) + } + + if d.HasChange("port") { + params.Port = helpers.Int(d.Get("port").(int)) + } + + if d.HasChange("path") { + params.Path = helpers.String(d.Get("path").(string)) + } + + _, err = cfClient.Routes().Update(routeGUID, params) + if err != nil { + return fmt.Errorf("Error updating route: %s", err) + } + return resourceIBMAppRouteRead(d, meta) +} + +func resourceIBMAppRouteDelete(d *schema.ResourceData, meta interface{}) error { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + routeGUID := d.Id() + + err = cfClient.Routes().Delete(routeGUID, false) + if err != nil { + return fmt.Errorf("Error deleting route: %s", err) + } + + d.SetId("") + + return nil +} +func resourceIBMAppRouteExists(d *schema.ResourceData, meta interface{}) (bool, error) { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return false, err + } + routeGUID := d.Id() + + route, err := cfClient.Routes().Get(routeGUID) + if err != nil { + if apiErr, ok := err.(bmxerror.RequestFailure); ok { + if apiErr.StatusCode() == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s", err) + } + + return route.Metadata.GUID == routeGUID, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cdn.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cdn.go new file mode 100644 index 00000000000..3df61c55675 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cdn.go @@ -0,0 +1,609 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "strconv" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/sl" +) + +const str string = ".cdn.appdomain.cloud" + +func resourceIBMCDN() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMCDNCreate, + Read: resourceIBMCDNRead, + Update: resourceIBMCDNUpdate, + Delete: resourceIBMCDNDelete, + Exists: resourceIBMCDNExists, + + Schema: map[string]*schema.Schema{ + "host_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Host name", + }, + "vendor_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "akamai", + ForceNew: true, + Description: "Vendor name", + }, + + "origin_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "HOST_SERVER", + ForceNew: true, + ValidateFunc: validateAllowedStringValue([]string{"HOST_SERVER", "OBJECT_STORAGE"}), + Description: "Origin type info", + }, + "origin_address": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "origin address info", + }, + "bucket_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Bucket name", + }, + "protocol": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "HTTP", + ForceNew: true, + ValidateFunc: validateAllowedStringValue([]string{"HTTP", "HTTPS", "HTTP_AND_HTTPS"}), + Description: "Protocol name", + }, + "http_port": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 80, + Description: "HTTP port number", + }, + "status": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Status info of the CDN instance", + }, + "https_port": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 443, + Description: "HTTPS port number", + }, + "cname": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: func(k, o, n string, d *schema.ResourceData) bool { + if o == "" { + return false + } + + if strings.Compare(n+str, o) == 0 || (n == "" && o != "") { + return true + } + return false + }, + Description: "cname info", + }, + "header": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Header info", + }, + "respect_headers": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: true, + Description: "respect headers info", + }, + "file_extension": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "File extension info", + }, + "certificate_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateAllowedStringValue([]string{"SHARED_SAN_CERT", "WILDCARD_CERT"}), + ForceNew: true, + Description: "Certificate type", + }, + "cache_key_query_rule": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateAllowedStringValue([]string{"include-all", "ignore-all", "ignore: space separated query-args", "include: space separated query-args"}), + Default: "include-all", + Description: "query rule info", + }, + "performance_configuration": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "General web delivery", + ForceNew: true, + Description: "performance configuration info", + }, + "path": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "/*", + ForceNew: true, + Description: "Path details", + }, + }, + } +} + +func resourceIBMCDNCreate(d *schema.ResourceData, meta interface{}) error { + ///create session + sess := meta.(ClientSession).SoftLayerSession() + ///get the value of all the parameters + domain := d.Get("host_name").(string) + vendorname := d.Get("vendor_name").(string) + origintype := d.Get("origin_type").(string) + originaddress := d.Get("origin_address").(string) + protocol := d.Get("protocol").(string) + httpport := d.Get("http_port").(int) + httpsport := d.Get("https_port").(int) + bucketname := d.Get("bucket_name").(string) + path := d.Get("path").(string) + header := d.Get("header").(string) + cachekeyqueryrule := d.Get("cache_key_query_rule").(string) + performanceconfiguration := d.Get("performance_configuration").(string) + respectheaders := d.Get("respect_headers").(bool) + var rHeader = "0" + if respectheaders { + rHeader = "1" + } + cname := d.Get("cname").(string) + certificateType := d.Get("certificate_type").(string) + if name, ok := d.GetOk("cname"); ok { + cname = name.(string) + str + } + + ///creat an object of CDN service + service := services.GetNetworkCdnMarketplaceConfigurationMappingService(sess) + //////pass the parameters to create domain mapping + if origintype == "OBJECT_STORAGE" && protocol == "HTTP" { + receipt1, err := service.CreateDomainMapping(&datatypes.Container_Network_CdnMarketplace_Configuration_Input{ + Origin: sl.String(originaddress), + VendorName: sl.String(vendorname), + Domain: sl.String(domain), + Path: sl.String(path), + Cname: sl.String(cname), + Protocol: sl.String(protocol), + HttpPort: sl.Int(httpport), + OriginType: sl.String(origintype), + BucketName: sl.String(bucketname), + Header: sl.String(header), + RespectHeaders: sl.String(rHeader), + CacheKeyQueryRule: sl.String(cachekeyqueryrule), + PerformanceConfiguration: sl.String(performanceconfiguration), + }) + if err != nil { + return fmt.Errorf("Error creating CDN: %s", err) + } + + d.SetId(*receipt1[0].UniqueId) + id, err := strconv.Atoi((d.Id())) + result1, err := service.VerifyDomainMapping(&id) + log.Print("The status of domain mapping ", result1) + return resourceIBMCDNRead(d, meta) + + } + if origintype == "OBJECT_STORAGE" && protocol == "HTTPS" { + receipt2, err := service.CreateDomainMapping(&datatypes.Container_Network_CdnMarketplace_Configuration_Input{ + Origin: sl.String(originaddress), + VendorName: sl.String(vendorname), + Domain: sl.String(domain), + Path: sl.String(path), + Cname: sl.String(cname), + Protocol: sl.String(protocol), + HttpsPort: sl.Int(httpsport), + OriginType: sl.String(origintype), + BucketName: sl.String(bucketname), + Header: sl.String(header), + RespectHeaders: sl.String(rHeader), + CertificateType: sl.String(certificateType), + CacheKeyQueryRule: sl.String(cachekeyqueryrule), + PerformanceConfiguration: sl.String(performanceconfiguration), + }) + if err != nil { + return fmt.Errorf("Error creating CDN: %s", err) + } + + d.SetId(*receipt2[0].UniqueId) + id, err := strconv.Atoi((d.Id())) + result2, err := service.VerifyDomainMapping(&id) + log.Print("The status of domain mapping ", result2) + return resourceIBMCDNRead(d, meta) + } + if origintype == "OBJECT_STORAGE" && protocol == "HTTP_AND_HTTPS" { + receipt3, err := service.CreateDomainMapping(&datatypes.Container_Network_CdnMarketplace_Configuration_Input{ + Origin: sl.String(originaddress), + VendorName: sl.String(vendorname), + Domain: sl.String(domain), + Path: sl.String(path), + Cname: sl.String(cname), + Protocol: sl.String(protocol), + HttpPort: sl.Int(httpport), + HttpsPort: sl.Int(httpsport), + OriginType: sl.String(origintype), + BucketName: sl.String(bucketname), + Header: sl.String(header), + RespectHeaders: sl.String(rHeader), + CertificateType: sl.String(certificateType), + CacheKeyQueryRule: sl.String(cachekeyqueryrule), + PerformanceConfiguration: sl.String(performanceconfiguration), + }) + if err != nil { + return fmt.Errorf("Error creating CDN: %s", err) + } + + d.SetId(*receipt3[0].UniqueId) + id, err := strconv.Atoi((d.Id())) + result3, err := service.VerifyDomainMapping(&id) + log.Print("The status of domain mapping ", result3) + return resourceIBMCDNRead(d, meta) + } + if origintype == "HOST_SERVER" && protocol == "HTTP" { + receipt4, err := service.CreateDomainMapping(&datatypes.Container_Network_CdnMarketplace_Configuration_Input{ + Origin: sl.String(originaddress), + VendorName: sl.String(vendorname), + Domain: sl.String(domain), + Path: sl.String(path), + Cname: sl.String(cname), + Protocol: sl.String(protocol), + HttpPort: sl.Int(httpport), + OriginType: sl.String(origintype), + Header: sl.String(header), + RespectHeaders: sl.String(rHeader), + CacheKeyQueryRule: sl.String(cachekeyqueryrule), + PerformanceConfiguration: sl.String(performanceconfiguration), + }) + if err != nil { + return fmt.Errorf("Error creating CDN: %s", err) + } + + d.SetId(*receipt4[0].UniqueId) + id, err := strconv.Atoi((d.Id())) + result4, err := service.VerifyDomainMapping(&id) + log.Print("The status of domain mapping ", result4) + return resourceIBMCDNRead(d, meta) + } + if origintype == "HOST_SERVER" && protocol == "HTTPS" { + receipt5, err := service.CreateDomainMapping(&datatypes.Container_Network_CdnMarketplace_Configuration_Input{ + Origin: sl.String(originaddress), + VendorName: sl.String(vendorname), + Domain: sl.String(domain), + Path: sl.String(path), + Cname: sl.String(cname), + Protocol: sl.String(protocol), + HttpsPort: sl.Int(httpsport), + OriginType: sl.String(origintype), + Header: sl.String(header), + RespectHeaders: sl.String(rHeader), + CertificateType: sl.String(certificateType), + CacheKeyQueryRule: sl.String(cachekeyqueryrule), + PerformanceConfiguration: sl.String(performanceconfiguration), + }) + if err != nil { + return fmt.Errorf("Error creating CDN: %s", err) + } + + d.SetId(*receipt5[0].UniqueId) + id, err := strconv.Atoi((d.Id())) + result5, err := service.VerifyDomainMapping(&id) + log.Print("The status of domain mapping ", result5) + return resourceIBMCDNRead(d, meta) + } + if origintype == "HOST_SERVER" && protocol == "HTTP_AND_HTTPS" { + receipt6, err := service.CreateDomainMapping(&datatypes.Container_Network_CdnMarketplace_Configuration_Input{ + Origin: sl.String(originaddress), + VendorName: sl.String(vendorname), + Domain: sl.String(domain), + Path: sl.String(path), + Cname: sl.String(cname), + Protocol: sl.String(protocol), + HttpPort: sl.Int(httpport), + HttpsPort: sl.Int(httpsport), + OriginType: sl.String(origintype), + Header: sl.String(header), + RespectHeaders: sl.String(rHeader), + CertificateType: sl.String(certificateType), + CacheKeyQueryRule: sl.String(cachekeyqueryrule), + PerformanceConfiguration: sl.String(performanceconfiguration), + }) + if err != nil { + return fmt.Errorf("Error creating CDN: %s", err) + } + + d.SetId(*receipt6[0].UniqueId) + id, err := strconv.Atoi((d.Id())) + result6, err := service.VerifyDomainMapping(&id) + log.Print("The status of domain mapping ", result6) + return resourceIBMCDNRead(d, meta) + } + + return nil +} + +func resourceIBMCDNRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkCdnMarketplaceConfigurationMappingService(sess) + cdnId := sl.String(d.Id()) + ///read the changes in the remote resource and update in the local resource. + read, err := service.ListDomainMappingByUniqueId(cdnId) + ///Print the response of the requested the service. + d.Set("originaddress", *read[0].OriginHost) + d.Set("vendorname", *read[0].VendorName) + d.Set("domain", *read[0].Domain) + d.Set("header", *read[0].Header) + d.Set("cname", *read[0].Cname) + d.Set("origin_type", *read[0].OriginType) + d.Set("status", *read[0].Status) + if *read[0].OriginType == "OBJECT_STORAGE" { + d.Set("bucketname", *read[0].BucketName) + } + if *read[0].Protocol == "HTTP" || *read[0].Protocol == "HTTP_AND_HTTPS" { + d.Set("httpport", *read[0].HttpPort) + } + if *read[0].Protocol == "HTTPS" || *read[0].Protocol == "HTTP_AND_HTTPS" { + d.Set("httpsport", *read[0].HttpsPort) + } + d.Set("protocol", *read[0].Protocol) + d.Set("respectheaders", *read[0].RespectHeaders) + d.Set("certificationtype", *read[0].CertificateType) + d.Set("cachekeyqueryrule", *read[0].CacheKeyQueryRule) + d.Set("path", *read[0].Path) + d.Set("performanceconfiguration", *read[0].PerformanceConfiguration) + if err != nil { + log.Println(err) + } + return nil +} + +func resourceIBMCDNUpdate(d *schema.ResourceData, meta interface{}) error { + /// Nothing to update for now. Not supported. + sess := meta.(ClientSession).SoftLayerSession() + domain := d.Get("host_name").(string) + vendorname := d.Get("vendor_name").(string) + origintype := d.Get("origin_type").(string) + originaddress := d.Get("origin_address").(string) + protocol := d.Get("protocol").(string) + httpport := d.Get("http_port").(int) + httpsport := d.Get("https_port").(int) + path := d.Get("path").(string) + cname := d.Get("cname").(string) + header := d.Get("header").(string) + bucketname := d.Get("bucket_name").(string) + var fileextension string + if v, ok := d.GetOk("file_extension"); ok { + fileextension = v.(string) + } else { + fileextension = "" + } + respectheaders := d.Get("respect_headers").(bool) + var rHeader = "0" + if respectheaders { + rHeader = "1" + } + certificateType := d.Get("certificate_type").(string) + cachekeyqueryrule := d.Get("cache_key_query_rule").(string) + performanceconfiguration := d.Get("performance_configuration").(string) + uniqueId := d.Id() + service := services.GetNetworkCdnMarketplaceConfigurationMappingService(sess) + ///pass the changed as well as unchanged parameters to update the resource. + + if origintype == "HOST_SERVER" && protocol == "HTTP_AND_HTTPS" { + update1, err := service.UpdateDomainMapping(&datatypes.Container_Network_CdnMarketplace_Configuration_Input{ + Origin: sl.String(originaddress), + VendorName: sl.String(vendorname), + Domain: sl.String(domain), + Path: sl.String(path), + Protocol: sl.String(protocol), + Cname: sl.String(cname), + HttpPort: sl.Int(httpport), + HttpsPort: sl.Int(httpsport), + OriginType: sl.String(origintype), + RespectHeaders: sl.String(rHeader), + Header: sl.String(header), + UniqueId: sl.String(uniqueId), + CertificateType: sl.String(certificateType), + CacheKeyQueryRule: sl.String(cachekeyqueryrule), + PerformanceConfiguration: sl.String(performanceconfiguration), + }) + ///Print the response of the requested service. + log.Print("Response for cdn update: ", update1) + + if err != nil { + log.Println(err) + } + return resourceIBMCDNRead(d, meta) + } + + if origintype == "HOST_SERVER" && protocol == "HTTPS" { + update2, err := service.UpdateDomainMapping(&datatypes.Container_Network_CdnMarketplace_Configuration_Input{ + Origin: sl.String(originaddress), + VendorName: sl.String(vendorname), + Domain: sl.String(domain), + Path: sl.String(path), + Protocol: sl.String(protocol), + Cname: sl.String(cname), + HttpsPort: sl.Int(httpsport), + OriginType: sl.String(origintype), + RespectHeaders: sl.String(rHeader), + Header: sl.String(header), + UniqueId: sl.String(uniqueId), + CertificateType: sl.String(certificateType), + CacheKeyQueryRule: sl.String(cachekeyqueryrule), + PerformanceConfiguration: sl.String(performanceconfiguration), + }) + ///Print the response of the requested service. + log.Print("Response for cdn update: ", update2) + if err != nil { + log.Println(err) + } + return resourceIBMCDNRead(d, meta) + + } + + if origintype == "HOST_SERVER" && protocol == "HTTP" { + update3, err := service.UpdateDomainMapping(&datatypes.Container_Network_CdnMarketplace_Configuration_Input{ + Origin: sl.String(originaddress), + VendorName: sl.String(vendorname), + Domain: sl.String(domain), + Path: sl.String(path), + Protocol: sl.String(protocol), + Cname: sl.String(cname), + HttpPort: sl.Int(httpport), + OriginType: sl.String(origintype), + RespectHeaders: sl.String(rHeader), + Header: sl.String(header), + UniqueId: sl.String(uniqueId), + CacheKeyQueryRule: sl.String(cachekeyqueryrule), + PerformanceConfiguration: sl.String(performanceconfiguration), + }) + ///Print the response of the requested service. + log.Print("Response for cdn update: ", update3) + if err != nil { + log.Println(err) + } + return resourceIBMCDNRead(d, meta) + + } + + if origintype == "OBJECT_STORAGE" && protocol == "HTTP_AND_HTTPS" { + update4, err := service.UpdateDomainMapping(&datatypes.Container_Network_CdnMarketplace_Configuration_Input{ + Origin: sl.String(originaddress), + VendorName: sl.String(vendorname), + Domain: sl.String(domain), + Path: sl.String(path), + Protocol: sl.String(protocol), + Cname: sl.String(cname), + HttpPort: sl.Int(httpport), + HttpsPort: sl.Int(httpsport), + OriginType: sl.String(origintype), + RespectHeaders: sl.String(rHeader), + BucketName: sl.String(bucketname), + Header: sl.String(header), + FileExtension: sl.String(fileextension), + UniqueId: sl.String(uniqueId), + CertificateType: sl.String(certificateType), + CacheKeyQueryRule: sl.String(cachekeyqueryrule), + PerformanceConfiguration: sl.String(performanceconfiguration), + }) + ///Print the response of the requested service. + log.Print("Response for cdn update: ", update4) + if err != nil { + log.Println(err) + } + return resourceIBMCDNRead(d, meta) + } + + if origintype == "OBJECT_STORAGE" && protocol == "HTTPS" { + update5, err := service.UpdateDomainMapping(&datatypes.Container_Network_CdnMarketplace_Configuration_Input{ + Origin: sl.String(originaddress), + VendorName: sl.String(vendorname), + Domain: sl.String(domain), + Path: sl.String(path), + Protocol: sl.String(protocol), + Cname: sl.String(cname), + HttpsPort: sl.Int(httpsport), + OriginType: sl.String(origintype), + RespectHeaders: sl.String(rHeader), + BucketName: sl.String(bucketname), + Header: sl.String(header), + FileExtension: sl.String(fileextension), + UniqueId: sl.String(uniqueId), + CertificateType: sl.String(certificateType), + CacheKeyQueryRule: sl.String(cachekeyqueryrule), + PerformanceConfiguration: sl.String(performanceconfiguration), + }) + ///Print the response of the requested service. + log.Print("Response for cdn update: ", update5) + if err != nil { + log.Println(err) + } + return resourceIBMCDNRead(d, meta) + } + + if origintype == "OBJECT_STORAGE" && protocol == "HTTP" { + update6, err := service.UpdateDomainMapping(&datatypes.Container_Network_CdnMarketplace_Configuration_Input{ + Origin: sl.String(originaddress), + VendorName: sl.String(vendorname), + Domain: sl.String(domain), + Path: sl.String(path), + Protocol: sl.String(protocol), + Cname: sl.String(cname), + HttpPort: sl.Int(httpport), + OriginType: sl.String(origintype), + RespectHeaders: sl.String(rHeader), + BucketName: sl.String(bucketname), + Header: sl.String(header), + FileExtension: sl.String(fileextension), + UniqueId: sl.String(uniqueId), + CacheKeyQueryRule: sl.String(cachekeyqueryrule), + PerformanceConfiguration: sl.String(performanceconfiguration), + }) + ///Print the response of the requested service. + log.Print("Response for cdn update: ", update6) + if err != nil { + log.Println(err) + } + return resourceIBMCDNRead(d, meta) + } + + return nil +} + +func resourceIBMCDNDelete(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkCdnMarketplaceConfigurationMappingService(sess) + + cdnId := sl.String(d.Id()) + ///pass the id to delete the resource. + delete, err := service.DeleteDomainMapping(cdnId) + if err != nil { + log.Println(err) + return err + } + ///print the delete response + log.Print("Delete response is : ", delete) + d.SetId("") + return nil +} + +func resourceIBMCDNExists(d *schema.ResourceData, meta interface{}) (bool, error) { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkCdnMarketplaceConfigurationMappingService(sess) + cdnId := sl.String(d.Id()) + ///check if the resource exists with the given id. + exists, err := service.ListDomainMappingByUniqueId(cdnId) + ///Print the response for exist request. + log.Print("Exists response is : ", exists) + if err != nil { + if apiErr, ok := err.(sl.Error); ok { + if apiErr.StatusCode == 404 || len(exists) == 0 { + return false, nil + } + } + return false, fmt.Errorf("Error retrieving CDN mapping info: %s", err) + } + return true, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_certificate_manager_import.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_certificate_manager_import.go new file mode 100644 index 00000000000..92c4d8cac94 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_certificate_manager_import.go @@ -0,0 +1,228 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/IBM-Cloud/bluemix-go/bmxerror" + "github.com/IBM-Cloud/bluemix-go/models" +) + +func resourceIBMCertificateManagerImport() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMCertificateManagerImportCertificate, + Read: resourceIBMCertificateManagerGet, + Update: resourceIBMCertificateManagerUpdate, + Importer: &schema.ResourceImporter{}, + Delete: resourceIBMCertificateManagerDelete, + Exists: resourceIBMCertificateManagerExists, + Schema: map[string]*schema.Schema{ + "certificate_manager_instance_id": { + Type: schema.TypeString, + Required: true, + Description: "Instance ID of the certificate manager resource", + }, + "name": { + Type: schema.TypeString, + Required: true, + Description: "Name of the instance", + }, + "data": { + Type: schema.TypeMap, + Required: true, + Description: "certificate data", + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: "Description of the certificate instance", + }, + "issuer": { + Type: schema.TypeString, + Computed: true, + Description: "certificate issuer info", + }, + "begins_on": { + Type: schema.TypeInt, + Computed: true, + Description: "Certificate validity start date", + }, + "expires_on": { + Type: schema.TypeInt, + Computed: true, + Description: "certificate expiry date", + }, + "imported": { + Type: schema.TypeBool, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "has_previous": { + Type: schema.TypeBool, + Computed: true, + }, + "key_algorithm": { + Type: schema.TypeString, + Computed: true, + }, + "algorithm": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceIBMCertificateManagerImportCertificate(d *schema.ResourceData, meta interface{}) error { + + cmService, err := meta.(ClientSession).CertificateManagerAPI() + if err != nil { + return err + } + + instanceID := d.Get("certificate_manager_instance_id").(string) + importData := models.Data{} + name := d.Get("name").(string) + description := d.Get("description").(string) + + if certificateimpdata, ok := d.GetOk("data"); ok && certificateimpdata != nil { + datainfo := certificateimpdata.(map[string]interface{}) + if content, ok := datainfo["content"]; ok && content != nil { + importData.Content = content.(string) + } + if privkey, ok := datainfo["priv_key"]; ok && privkey != nil { + importData.Privatekey = privkey.(string) + } + if intermediate, ok := datainfo["intermediate"]; ok && intermediate != nil { + importData.IntermediateCertificate = intermediate.(string) + } + } + + client := cmService.Certificate() + payload := models.CertificateImportData{Name: name, Description: description, Data: importData} + + result, importCertError := client.ImportCertificate(instanceID, payload) + if importCertError != nil { + return importCertError + } + d.SetId(result.ID) + return resourceIBMCertificateManagerUpdate(d, meta) +} +func resourceIBMCertificateManagerGet(d *schema.ResourceData, meta interface{}) error { + cmService, err := meta.(ClientSession).CertificateManagerAPI() + if err != nil { + return err + } + certID := d.Id() + certificatedata, err := cmService.Certificate().GetCertData(certID) + + cminstanceid := strings.Split(certID, ":certificate:") + d.Set("certificate_manager_instance_id", cminstanceid[0]+"::") + d.Set("name", certificatedata.Name) + d.Set("description", certificatedata.Description) + if certificatedata.Data != nil { + data := map[string]interface{}{ + "content": certificatedata.Data.Content, + } + if certificatedata.Data.Privatekey != "" { + data["priv_key"] = certificatedata.Data.Privatekey + } + if certificatedata.Data.IntermediateCertificate != "" { + data["intermediate"] = certificatedata.Data.IntermediateCertificate + } + d.Set("data", data) + } + d.Set("begins_on", certificatedata.BeginsOn) + d.Set("expires_on", certificatedata.ExpiresOn) + d.Set("status", certificatedata.Status) + d.Set("issuer", certificatedata.Issuer) + d.Set("imported", certificatedata.Imported) + d.Set("has_previous", certificatedata.HasPrevious) + d.Set("key_algorithm", certificatedata.KeyAlgorithm) + d.Set("algorithm", certificatedata.Algorithm) + + return nil +} + +func resourceIBMCertificateManagerUpdate(d *schema.ResourceData, meta interface{}) error { + cmService, err := meta.(ClientSession).CertificateManagerAPI() + if err != nil { + return err + } + certID := d.Id() + client := cmService.Certificate() + if d.HasChange("name") || d.HasChange("description") { + name := d.Get("name").(string) + description := d.Get("description").(string) + payload := models.CertificateMetadataUpdate{Name: name, Description: description} + + importCertError := client.UpdateCertificateMetaData(certID, payload) + if importCertError != nil { + return importCertError + } + } + if d.HasChange("data") { + importData := models.Data{} + if certificateimpdata, ok := d.GetOk("data"); ok && certificateimpdata != nil { + datainfo := certificateimpdata.(map[string]interface{}) + if content, ok := datainfo["content"]; ok && content != nil { + importData.Content = content.(string) + } + if privkey, ok := datainfo["priv_key"]; ok && privkey != nil { + importData.Privatekey = privkey.(string) + } + if intermediate, ok := datainfo["intermediate"]; ok && intermediate != nil { + importData.IntermediateCertificate = intermediate.(string) + } + } + payload := models.CertificateReimportData{Content: importData.Content, Privatekey: importData.Privatekey, IntermediateCertificate: importData.IntermediateCertificate} + _, reImportCertError := client.ReimportCertificate(certID, payload) + if reImportCertError != nil { + return reImportCertError + } + } + return resourceIBMCertificateManagerGet(d, meta) +} +func resourceIBMCertificateManagerDelete(d *schema.ResourceData, meta interface{}) error { + cmService, err := meta.(ClientSession).CertificateManagerAPI() + if err != nil { + return err + } + certID := d.Id() + err = cmService.Certificate().DeleteCertificate(certID) + if err != nil { + return fmt.Errorf("Error deleting Certificate: %s", err) + } + d.SetId("") + + return nil +} + +func resourceIBMCertificateManagerExists(d *schema.ResourceData, meta interface{}) (bool, error) { + cmService, err := meta.(ClientSession).CertificateManagerAPI() + if err != nil { + return false, err + } + client := cmService.Certificate() + certID := d.Id() + + _, err = client.GetCertData(certID) + if err != nil { + if apiErr, ok := err.(bmxerror.RequestFailure); ok { + if apiErr.StatusCode() == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s", err) + } + + return true, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_certificate_manager_order.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_certificate_manager_order.go new file mode 100644 index 00000000000..b62ba8077ce --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_certificate_manager_order.go @@ -0,0 +1,328 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "strconv" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/IBM-Cloud/bluemix-go/bmxerror" + "github.com/IBM-Cloud/bluemix-go/models" +) + +func resourceIBMCertificateManagerOrder() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMCertificateManagerOrderCertificate, + Read: resourceIBMCertificateManagerRead, + Update: resourceIBMCertificateManagerRenew, + Importer: &schema.ResourceImporter{}, + Delete: resourceIBMCertificateManagerDelete, + Exists: resourceIBMCertificateManagerExists, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + }, + Schema: map[string]*schema.Schema{ + "certificate_manager_instance_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Certificate manager instance ID", + }, + "name": { + Type: schema.TypeString, + Required: true, + Description: "Certificate name", + }, + "domains": { + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + ForceNew: true, + Description: "List of domain names", + }, + "rotate_keys": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Keys are sorated if set to true", + }, + "renew_certificate": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Invokes renew functionality", + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: "Certicate description", + }, + "domain_validation_method": { + Type: schema.TypeString, + Optional: true, + Default: "dns-01", + Description: "Domain validation methods", + }, + "dns_provider_instance_crn": { + Type: schema.TypeString, + Optional: true, + Description: "DNS provider instance CRN", + }, + "issuer": { + Type: schema.TypeString, + Computed: true, + Description: "Certificate issuer info", + }, + "key_algorithm": { + Type: schema.TypeString, + Optional: true, + Default: "rsaEncryption 2048 bit", + Description: "Keyalgorithm info", + ValidateFunc: validateAllowedStringValue([]string{"rsaEncryption 2048 bit", "rsaEncryption 4096 bit"}), + }, + "auto_renew_enabled": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "algorithm": { + Type: schema.TypeString, + Computed: true, + Description: "Algorithm info", + }, + "begins_on": { + Type: schema.TypeInt, + Computed: true, + Description: "Cerificate validity from date", + }, + "expires_on": { + Type: schema.TypeInt, + Computed: true, + Description: "Certificaet expairy date", + }, + "imported": { + Type: schema.TypeBool, + Computed: true, + Description: "set to true if certificate is imported", + }, + "status": { + Type: schema.TypeString, + Computed: true, + Description: "Status of the certificate", + }, + "has_previous": { + Type: schema.TypeBool, + Computed: true, + Description: "Has Previous", + }, + "issuance_info": { + Type: schema.TypeMap, + Computed: true, + }, + }, + } +} + +func resourceIBMCertificateManagerOrderCertificate(d *schema.ResourceData, meta interface{}) error { + + cmService, err := meta.(ClientSession).CertificateManagerAPI() + if err != nil { + return err + } + + instanceID := d.Get("certificate_manager_instance_id").(string) + name := d.Get("name").(string) + var description string + if desc, ok := d.GetOk("description"); ok { + description = desc.(string) + } + domainValidationMethod := d.Get("domain_validation_method").(string) + + var dnsProviderInstanceCrn string + if dnsInsCrn, ok := d.GetOk("dns_provider_instance_crn"); ok { + dnsProviderInstanceCrn = dnsInsCrn.(string) + } + + keyAlgorithm := d.Get("key_algorithm").(string) + autoRenew := d.Get("auto_renew_enabled").(bool) + + var domainList = make([]string, 0) + if domains, ok := d.GetOk("domains"); ok { + for _, domain := range domains.([]interface{}) { + domainList = append(domainList, fmt.Sprintf("%v", domain)) + } + } + client := cmService.Certificate() + payload := models.CertificateOrderData{Name: name, Description: description, Domains: domainList, DomainValidationMethod: domainValidationMethod, DNSProviderInstanceCrn: dnsProviderInstanceCrn, KeyAlgorithm: keyAlgorithm, AutoRenewEnabled: autoRenew} + result, err := client.OrderCertificate(instanceID, payload) + if err != nil { + return err + } + d.SetId(result.ID) + + _, err = waitForCertificateOrder(d, meta) + if err != nil { + return fmt.Errorf( + "Error waiting for Ordering Certificate (%s) to be succeeded: %s", d.Id(), err) + } + + return resourceIBMCertificateManagerRead(d, meta) +} +func resourceIBMCertificateManagerRead(d *schema.ResourceData, meta interface{}) error { + cmService, err := meta.(ClientSession).CertificateManagerAPI() + if err != nil { + return err + } + certID := d.Id() + certificatedata, err := cmService.Certificate().GetMetaData(certID) + if err != nil { + return err + } + cminstanceid := strings.Split(certID, ":certificate:") + d.Set("certificate_manager_instance_id", cminstanceid[0]+"::") + d.Set("name", certificatedata.Name) + d.Set("domains", certificatedata.Domains) + d.Set("domain_validation_method", "dns-01") + d.Set("rotate_keys", certificatedata.RotateKeys) + d.Set("description", certificatedata.Description) + d.Set("begins_on", certificatedata.BeginsOn) + d.Set("expires_on", certificatedata.ExpiresOn) + d.Set("imported", certificatedata.Imported) + d.Set("status", certificatedata.Status) + d.Set("algorithm", certificatedata.Algorithm) + d.Set("key_algorithm", certificatedata.KeyAlgorithm) + d.Set("issuer", certificatedata.Issuer) + d.Set("has_previous", certificatedata.HasPrevious) + d.Set("auto_renew_enabled", certificatedata.OrderPolicy.AutoRenewEnabled) + + if certificatedata.IssuanceInfo != nil { + issuanceinfo := map[string]interface{}{} + if certificatedata.IssuanceInfo.Status != "" { + issuanceinfo["status"] = certificatedata.IssuanceInfo.Status + } + if certificatedata.IssuanceInfo.Code != "" { + issuanceinfo["code"] = certificatedata.IssuanceInfo.Code + } + if certificatedata.IssuanceInfo.AdditionalInfo != "" { + issuanceinfo["additional_info"] = certificatedata.IssuanceInfo.AdditionalInfo + } + if certificatedata.IssuanceInfo.OrderedOn != 0 { + order := certificatedata.IssuanceInfo.OrderedOn + orderedOn := strconv.FormatInt(order, 10) + issuanceinfo["ordered_on"] = orderedOn + } + d.Set("issuance_info", issuanceinfo) + } + return nil +} + +func resourceIBMCertificateManagerRenew(d *schema.ResourceData, meta interface{}) error { + cmService, err := meta.(ClientSession).CertificateManagerAPI() + if err != nil { + return err + } + certID := d.Id() + client := cmService.Certificate() + + if d.Get("renew_certificate").(bool) == true { + rotateKeys := d.Get("rotate_keys").(bool) + payload := models.CertificateRenewData{RotateKeys: rotateKeys} + + _, err := client.RenewCertificate(certID, payload) + if err != nil { + return err + } + } + if d.HasChange("name") || d.HasChange("description") { + name := d.Get("name").(string) + description := d.Get("description").(string) + payload := models.CertificateMetadataUpdate{Name: name, Description: description} + + err := client.UpdateCertificateMetaData(certID, payload) + if err != nil { + return err + } + } + if d.HasChange("auto_renew_enabled") { + autoRenew := d.Get("auto_renew_enabled").(bool) + payload := models.OrderPolicy{AutoRenewEnabled: autoRenew} + + _, err := client.UpdateOrderPolicy(certID, payload) + if err != nil { + return err + } + } + _, err = waitForCertificateRenew(d, meta) + if err != nil { + return fmt.Errorf( + "Error waiting for Renew Certificate (%s) to be succeeded: %s", d.Id(), err) + } + return resourceIBMCertificateManagerRead(d, meta) +} +func waitForCertificateOrder(d *schema.ResourceData, meta interface{}) (interface{}, error) { + cmService, err := meta.(ClientSession).CertificateManagerAPI() + if err != nil { + return false, err + } + certID := d.Id() + + stateConf := &resource.StateChangeConf{ + Pending: []string{"pending"}, + Target: []string{"valid"}, + Refresh: func() (interface{}, string, error) { + getcert, err := cmService.Certificate().GetMetaData(certID) + if err != nil { + if apiErr, ok := err.(bmxerror.RequestFailure); ok && apiErr.StatusCode() == 404 { + return nil, "", fmt.Errorf("The certificate %s does not exist anymore: %v", d.Id(), err) + } + return nil, "", err + } + if getcert.Status == "failed" { + return getcert, getcert.Status, fmt.Errorf("The certificate %s failed: %v", d.Id(), err) + } + return getcert, getcert.Status, nil + }, + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 60 * time.Second, + MinTimeout: 60 * time.Second, + } + + return stateConf.WaitForState() +} +func waitForCertificateRenew(d *schema.ResourceData, meta interface{}) (interface{}, error) { + cmService, err := meta.(ClientSession).CertificateManagerAPI() + if err != nil { + return false, err + } + certID := d.Id() + + stateConf := &resource.StateChangeConf{ + Pending: []string{"pending"}, + Target: []string{"valid"}, + Refresh: func() (interface{}, string, error) { + getcert, err := cmService.Certificate().GetMetaData(certID) + if err != nil { + if apiErr, ok := err.(bmxerror.RequestFailure); ok && apiErr.StatusCode() == 404 { + return nil, "", fmt.Errorf("The certificate %s does not exist anymore: %v", d.Id(), err) + } + return nil, "", err + } + if getcert.Status == "failed" { + return getcert, getcert.Status, fmt.Errorf("The certificate %s failed: %v", d.Id(), err) + } + return getcert, getcert.Status, nil + }, + Timeout: d.Timeout(schema.TimeoutUpdate), + Delay: 60 * time.Second, + MinTimeout: 60 * time.Second, + } + + return stateConf.WaitForState() +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis.go new file mode 100644 index 00000000000..0502da831ad --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis.go @@ -0,0 +1,566 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "net/url" + "os" + "strings" + "time" + + rc "github.com/IBM/platform-services-go-sdk/resourcecontrollerv2" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/IBM-Cloud/bluemix-go/bmxerror" + "github.com/IBM-Cloud/bluemix-go/models" +) + +const ( + cisInstanceSuccessStatus = "active" + cisInstanceProgressStatus = "in progress" + cisInstanceProvisioningStatus = "provisioning" + cisInstanceInactiveStatus = "inactive" + cisInstanceFailStatus = "failed" + cisInstanceRemovedStatus = "removed" + cisInstanceReclamation = "pending_reclamation" +) + +func resourceIBMCISInstance() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMCISInstanceCreate, + Read: resourceIBMCISInstanceRead, + Update: resourceIBMCISInstanceUpdate, + Delete: resourceIBMCISInstanceDelete, + Exists: resourceIBMCISInstanceExists, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: "A name for the resource instance", + }, + + "service": { + Type: schema.TypeString, + Computed: true, + Description: "The name of the Cloud Internet Services offering", + }, + + "plan": { + Type: schema.TypeString, + Required: true, + Description: "The plan type of the service", + }, + + "guid": { + Type: schema.TypeString, + Computed: true, + Description: "Unique identifier of resource instance", + }, + + "location": { + Description: "The location where the instance available", + Required: true, + ForceNew: true, + Type: schema.TypeString, + }, + + "resource_group_id": { + Description: "The resource group id", + Optional: true, + ForceNew: true, + Type: schema.TypeString, + Computed: true, + }, + + "parameters": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Arbitrary parameters to pass. Must be a JSON object", + }, + + "tags": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString, ValidateFunc: InvokeValidator("ibm_cis", "tag")}, + Set: schema.HashString, + }, + + "status": { + Type: schema.TypeString, + Computed: true, + Description: "Status of resource instance", + }, + ResourceName: { + Type: schema.TypeString, + Computed: true, + Description: "The name of the resource", + }, + + ResourceCRN: { + Type: schema.TypeString, + Computed: true, + Description: "The crn of the resource", + }, + + ResourceStatus: { + Type: schema.TypeString, + Computed: true, + Description: "The status of the resource", + }, + + ResourceGroupName: { + Type: schema.TypeString, + Computed: true, + Description: "The resource group name in which resource is provisioned", + }, + ResourceControllerURL: { + Type: schema.TypeString, + Computed: true, + Description: "The URL of the IBM Cloud dashboard that can be used to explore and view details about the resource", + }, + }, + } +} + +func resourceIBMCISValidator() *ResourceValidator { + + validateSchema := make([]ValidateSchema, 1) + + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: "tag", + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Optional: true, + Regexp: `^[A-Za-z0-9:_ .-]+$`, + MinValueLength: 1, + MaxValueLength: 128}) + + ibmCISResourceValidator := ResourceValidator{ResourceName: "ibm_cis", Schema: validateSchema} + return &ibmCISResourceValidator +} + +// Replace with func wrapper for resourceIBMResourceInstanceCreate specifying serviceName := "internet-svcs" +func resourceIBMCISInstanceCreate(d *schema.ResourceData, meta interface{}) error { + + rsConClient, err := meta.(ClientSession).ResourceControllerV2API() + if err != nil { + return err + } + serviceName := "internet-svcs" + plan := d.Get("plan").(string) + name := d.Get("name").(string) + location := d.Get("location").(string) + + rsInst := rc.CreateResourceInstanceOptions{ + Name: &name, + } + + rsCatClient, err := meta.(ClientSession).ResourceCatalogAPI() + if err != nil { + return err + } + rsCatRepo := rsCatClient.ResourceCatalog() + + serviceOff, err := rsCatRepo.FindByName(serviceName, true) + if err != nil { + return fmt.Errorf("Error retrieving service offering: %s", err) + } + + servicePlan, err := rsCatRepo.GetServicePlanID(serviceOff[0], plan) + if err != nil { + return fmt.Errorf("Error retrieving plan: %s", err) + } + rsInst.ResourcePlanID = &servicePlan + + deployments, err := rsCatRepo.ListDeployments(servicePlan) + if err != nil { + return fmt.Errorf("Error retrieving deployment for plan %s : %s", plan, err) + } + if len(deployments) == 0 { + return fmt.Errorf("No deployment found for service plan : %s", plan) + } + deployments, supportedLocations := filterCISDeployments(deployments, location) + + if len(deployments) == 0 { + locationList := make([]string, 0, len(supportedLocations)) + for l := range supportedLocations { + locationList = append(locationList, l) + } + return fmt.Errorf("No deployment found for service plan %s at location %s.\nValid location(s) are: %q.", plan, location, locationList) + } + + rsInst.Target = &deployments[0].CatalogCRN + + if rsGrpID, ok := d.GetOk("resource_group_id"); ok { + rg := rsGrpID.(string) + rsInst.ResourceGroup = &rg + } else { + defaultRg, err := defaultResourceGroup(meta) + if err != nil { + return err + } + rsInst.ResourceGroup = &defaultRg + } + + if parameters, ok := d.GetOk("parameters"); ok { + rsInst.Parameters = parameters.(map[string]interface{}) + } + + instance, response, err := rsConClient.CreateResourceInstance(&rsInst) + if err != nil { + return fmt.Errorf("Error creating resource instance: %s %s", err, response) + } + v := os.Getenv("IC_ENV_TAGS") + if _, ok := d.GetOk("tags"); ok || v != "" { + oldList, newList := d.GetChange("tags") + err = UpdateTagsUsingCRN(oldList, newList, meta, *instance.CRN) + if err != nil { + log.Printf( + "Error on create of ibm cis (%s) tags: %s", d.Id(), err) + } + } + + // Moved d.SetId(instance.ID) to after waiting for resource to finish creation. Otherwise Terraform initates depedent tasks too early. + // Original flow had SetId here as its required as input to waitForCISInstanceCreate + + _, err = waitForCISInstanceCreate(d, meta, *instance.ID) + if err != nil { + return fmt.Errorf( + "Error waiting for create resource instance (%s) to be succeeded: %s", d.Id(), err) + } + + d.SetId(*instance.ID) + + return resourceIBMCISInstanceRead(d, meta) +} + +func resourceIBMCISInstanceRead(d *schema.ResourceData, meta interface{}) error { + + rsConClient, err := meta.(ClientSession).ResourceControllerV2API() + if err != nil { + return err + } + + instanceID := d.Id() + rsInst := rc.GetResourceInstanceOptions{ + ID: &instanceID, + } + instance, response, err := rsConClient.GetResourceInstance(&rsInst) + if err != nil { + if strings.Contains(err.Error(), "Object not found") || + strings.Contains(err.Error(), "status code: 404") { + log.Printf("[WARN] Removing record from state because it's not found via the API") + d.SetId("") + return nil + } + return fmt.Errorf("Error retrieving resource instance: %s %s", err, response) + } + if strings.Contains(*instance.State, "removed") { + log.Printf("[WARN] Removing instance from TF state because it's now in removed state") + d.SetId("") + return nil + } + tags, err := GetTagsUsingCRN(meta, *instance.CRN) + if err != nil { + log.Printf( + "Error on get of ibm cis tags (%s) tags: %s", d.Id(), err) + } + d.Set("tags", tags) + d.Set("name", *instance.Name) + d.Set("status", *instance.State) + d.Set("resource_group_id", *instance.ResourceGroupID) + d.Set("parameters", Flatten(instance.Parameters)) + if instance.CRN != nil { + location := strings.Split(*instance.CRN, ":") + if len(location) > 5 { + d.Set("location", location[5]) + } + } + d.Set("guid", *instance.GUID) + + rsCatClient, err := meta.(ClientSession).ResourceCatalogAPI() + if err != nil { + return err + } + rsCatRepo := rsCatClient.ResourceCatalog() + + d.Set("service", "internet-svcs") + + servicePlan, err := rsCatRepo.GetServicePlanName(*instance.ResourcePlanID) + if err != nil { + return fmt.Errorf("Error retrieving plan: %s", err) + } + d.Set("plan", servicePlan) + + d.Set(ResourceName, *instance.Name) + d.Set(ResourceCRN, *instance.CRN) + d.Set(ResourceStatus, *instance.State) + d.Set(ResourceGroupName, *instance.ResourceGroupCRN) + + rcontroller, err := getBaseController(meta) + if err != nil { + return err + } + d.Set(ResourceControllerURL, rcontroller+"/internet-svcs/"+url.QueryEscape(*instance.CRN)) + + return nil +} + +func resourceIBMCISInstanceUpdate(d *schema.ResourceData, meta interface{}) error { + + rsConClient, err := meta.(ClientSession).ResourceControllerV2API() + if err != nil { + return err + } + + instanceID := d.Id() + + updateReq := rc.UpdateResourceInstanceOptions{ + ID: &instanceID, + } + if d.HasChange("name") { + name := d.Get("name").(string) + updateReq.Name = &name + } + + if d.HasChange("plan") { + plan := d.Get("plan").(string) + service := d.Get("service").(string) + rsCatClient, err := meta.(ClientSession).ResourceCatalogAPI() + if err != nil { + return err + } + rsCatRepo := rsCatClient.ResourceCatalog() + + serviceOff, err := rsCatRepo.FindByName(service, true) + if err != nil { + return fmt.Errorf("Error retrieving service offering: %s", err) + } + + servicePlan, err := rsCatRepo.GetServicePlanID(serviceOff[0], plan) + if err != nil { + return fmt.Errorf("Error retrieving plan: %s", err) + } + + updateReq.ResourcePlanID = &servicePlan + + } + + if d.HasChange("tags") { + oldList, newList := d.GetChange("tags") + err = UpdateTagsUsingCRN(oldList, newList, meta, instanceID) + if err != nil { + log.Printf( + "Error on update of CIS (%s) tags: %s", d.Id(), err) + } + } + + _, response, err := rsConClient.UpdateResourceInstance(&updateReq) + if err != nil { + return fmt.Errorf("Error updating resource instance: %s %s", err, response) + } + + _, err = waitForCISInstanceUpdate(d, meta) + if err != nil { + return fmt.Errorf( + "Error waiting for update resource instance (%s) to be succeeded: %s", d.Id(), err) + } + + return resourceIBMCISInstanceRead(d, meta) +} + +func resourceIBMCISInstanceDelete(d *schema.ResourceData, meta interface{}) error { + + rsConClient, err := meta.(ClientSession).ResourceControllerV2API() + if err != nil { + return err + } + id := d.Id() + recursive := true + deleteReq := rc.DeleteResourceInstanceOptions{ + ID: &id, + Recursive: &recursive, + } + response, err := rsConClient.DeleteResourceInstance(&deleteReq) + if err != nil { + // If prior delete occurs, instance is not immediately deleted, but remains in "removed" state" + // RC 410 with "Gone" returned as error + if strings.Contains(err.Error(), "Gone") || + strings.Contains(err.Error(), "status code: 410") { + log.Printf("[WARN] Resource instance already deleted %s\n %s", err, response) + err = nil + } else { + return fmt.Errorf("Error deleting resource instance: %s %s", err, response) + } + } + + _, err = waitForCISInstanceDelete(d, meta) + if err != nil { + return fmt.Errorf( + "Error waiting for resource instance (%s) to be deleted: %s", d.Id(), err) + } + + d.SetId("") + + return nil +} +func resourceIBMCISInstanceExists(d *schema.ResourceData, meta interface{}) (bool, error) { + + rsConClient, err := meta.(ClientSession).ResourceControllerV2API() + if err != nil { + return false, err + } + instanceID := d.Id() + rsInst := rc.GetResourceInstanceOptions{ + ID: &instanceID, + } + instance, response, err := rsConClient.GetResourceInstance(&rsInst) + if err != nil { + if apiErr, ok := err.(bmxerror.RequestFailure); ok { + if apiErr.StatusCode() == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s %s", err, response) + } + if instance != nil && (strings.Contains(*instance.State, "removed") || strings.Contains(*instance.State, cisInstanceReclamation)) { + log.Printf("[WARN] Removing instance from state because it's in removed or pending_reclamation state") + d.SetId("") + return false, nil + } + + return *instance.ID == instanceID, nil +} + +func waitForCISInstanceCreate(d *schema.ResourceData, meta interface{}, instanceID string) (interface{}, error) { + + rsConClient, err := meta.(ClientSession).ResourceControllerV2API() + if err != nil { + return false, err + } + //instanceID := d.Id() + + stateConf := &resource.StateChangeConf{ + Pending: []string{cisInstanceProgressStatus, cisInstanceInactiveStatus, cisInstanceProvisioningStatus}, + Target: []string{cisInstanceSuccessStatus}, + Refresh: func() (interface{}, string, error) { + rsInst := rc.GetResourceInstanceOptions{ + ID: &instanceID, + } + instance, response, err := rsConClient.GetResourceInstance(&rsInst) + if err != nil { + if apiErr, ok := err.(bmxerror.RequestFailure); ok && apiErr.StatusCode() == 404 { + return nil, "", fmt.Errorf("The resource instance %s does not exist anymore: %v %s", d.Id(), err, response) + } + return nil, "", err + } + if *instance.State == cisInstanceFailStatus { + return instance, *instance.State, fmt.Errorf("The resource instance %s failed: %v %s", d.Id(), err, response) + } + return instance, *instance.State, nil + }, + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func waitForCISInstanceUpdate(d *schema.ResourceData, meta interface{}) (interface{}, error) { + + rsConClient, err := meta.(ClientSession).ResourceControllerV2API() + if err != nil { + return false, err + } + instanceID := d.Id() + + stateConf := &resource.StateChangeConf{ + Pending: []string{cisInstanceProgressStatus, cisInstanceInactiveStatus}, + Target: []string{cisInstanceSuccessStatus}, + Refresh: func() (interface{}, string, error) { + rsInst := rc.GetResourceInstanceOptions{ + ID: &instanceID, + } + instance, response, err := rsConClient.GetResourceInstance(&rsInst) + if err != nil { + if apiErr, ok := err.(bmxerror.RequestFailure); ok && apiErr.StatusCode() == 404 { + return nil, "", fmt.Errorf("The resource instance %s does not exist anymore: %v %s", d.Id(), err, response) + } + return nil, "", err + } + if *instance.State == cisInstanceFailStatus { + return instance, *instance.State, fmt.Errorf("The resource instance %s failed: %v %s", d.Id(), err, response) + } + return instance, *instance.State, nil + }, + Timeout: d.Timeout(schema.TimeoutUpdate), + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func waitForCISInstanceDelete(d *schema.ResourceData, meta interface{}) (interface{}, error) { + + rsConClient, err := meta.(ClientSession).ResourceControllerV2API() + if err != nil { + return false, err + } + instanceID := d.Id() + stateConf := &resource.StateChangeConf{ + Pending: []string{cisInstanceProgressStatus, cisInstanceInactiveStatus, cisInstanceSuccessStatus}, + Target: []string{cisInstanceRemovedStatus, cisInstanceReclamation}, + Refresh: func() (interface{}, string, error) { + rsInst := rc.GetResourceInstanceOptions{ + ID: &instanceID, + } + instance, response, err := rsConClient.GetResourceInstance(&rsInst) + if err != nil { + if apiErr, ok := err.(bmxerror.RequestFailure); ok && apiErr.StatusCode() == 404 { + return instance, cisInstanceSuccessStatus, nil + } + return nil, "", err + } + if *instance.State == cisInstanceFailStatus { + return instance, *instance.State, fmt.Errorf("The resource instance %s failed to delete: %v %s", d.Id(), err, response) + } + return instance, *instance.State, nil + }, + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func filterCISDeployments(deployments []models.ServiceDeployment, location string) ([]models.ServiceDeployment, map[string]bool) { + supportedDeployments := []models.ServiceDeployment{} + supportedLocations := make(map[string]bool) + for _, d := range deployments { + if d.Metadata.RCCompatible { + deploymentLocation := d.Metadata.Deployment.Location + supportedLocations[deploymentLocation] = true + if deploymentLocation == location { + supportedDeployments = append(supportedDeployments, d) + } + } + } + return supportedDeployments, supportedLocations +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_cache_settings.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_cache_settings.go new file mode 100644 index 00000000000..5396c484061 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_cache_settings.go @@ -0,0 +1,357 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "log" + + "github.com/IBM/go-sdk-core/v4/core" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + ibmCISCacheSettings = "ibm_cis_cache_settings" + cisCacheSettingsCachingLevel = "caching_level" + cisCacheSettingsBrowserExpiration = "browser_expiration" + cisCacheSettingsDevelopmentMode = "development_mode" + cisCacheSettingsQueryStringSort = "query_string_sort" + cisCachePurgeAll = "purge_all" + cisCachePurgeByURLs = "purge_by_urls" + cisCachePurgeByCacheTags = "purge_by_tags" + cisCachePurgeByHosts = "purge_by_hosts" + cisCacheSettingsOnOffValidatorID = "on_off_validator_id" + cisCacheServeStaleContent = "serve_stale_content" +) + +func resourceIBMCISCacheSettings() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + cisID: { + Type: schema.TypeString, + Description: "CIS instance crn", + Required: true, + }, + cisDomainID: { + Type: schema.TypeString, + Description: "Associated CIS domain", + Required: true, + DiffSuppressFunc: suppressDomainIDDiff, + }, + cisCacheSettingsCachingLevel: { + Type: schema.TypeString, + Description: "Cache level setting", + Optional: true, + Computed: true, + ValidateFunc: InvokeValidator(ibmCISCacheSettings, + cisCacheSettingsCachingLevel), + }, + cisCacheServeStaleContent: { + Type: schema.TypeString, + Description: "Serve Stale Content ", + Default: "on", + Optional: true, + ValidateFunc: InvokeValidator(ibmCISCacheSettings, + cisCacheServeStaleContent), + }, + cisCacheSettingsBrowserExpiration: { + Type: schema.TypeInt, + Description: "Browser Expiration setting", + Optional: true, + Computed: true, + ValidateFunc: InvokeValidator(ibmCISCacheSettings, + cisCacheSettingsBrowserExpiration), + }, + cisCacheSettingsDevelopmentMode: { + Type: schema.TypeString, + Description: "Development mode setting", + Optional: true, + Computed: true, + ValidateFunc: InvokeValidator(ibmCISCacheSettings, + cisCacheSettingsOnOffValidatorID), + }, + cisCacheSettingsQueryStringSort: { + Type: schema.TypeString, + Description: "Query String sort setting", + Optional: true, + Computed: true, + ValidateFunc: InvokeValidator(ibmCISCacheSettings, + cisCacheSettingsOnOffValidatorID), + }, + cisCachePurgeAll: { + Type: schema.TypeBool, + Description: "Purge all setting", + Optional: true, + ConflictsWith: []string{ + cisCachePurgeByURLs, + cisCachePurgeByCacheTags, + cisCachePurgeByHosts}, + }, + cisCachePurgeByURLs: { + Type: schema.TypeList, + Description: "Purge by URLs", + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + ConflictsWith: []string{ + cisCachePurgeAll, + cisCachePurgeByCacheTags, + cisCachePurgeByHosts}, + }, + cisCachePurgeByCacheTags: { + Type: schema.TypeList, + Description: "Purge by tags", + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + ConflictsWith: []string{ + cisCachePurgeAll, + cisCachePurgeByURLs, + cisCachePurgeByHosts}, + }, + cisCachePurgeByHosts: { + Type: schema.TypeList, + Description: "Purge by hosts", + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + ConflictsWith: []string{ + cisCachePurgeAll, + cisCachePurgeByURLs, + cisCachePurgeByCacheTags, + }, + }, + }, + Create: resourceCISCacheSettingsUpdate, + Read: resourceCISCacheSettingsRead, + Update: resourceCISCacheSettingsUpdate, + Delete: resourceCISCacheSettingsDelete, + Importer: &schema.ResourceImporter{}, + } +} + +func resourceIBMCISCacheSettingsValidator() *ResourceValidator { + validateSchema := make([]ValidateSchema, 1) + browserCacheTTL := "0, 30, 60, 300, 1200, 1800, 3600, 7200, 10800, 14400," + + "18000, 28800, 43200, 57600, 72000, 86400, 172800, 259200, 345600, 432000," + + "691200, 1382400, 2073600, 2678400, 5356800, 16070400, 31536000" + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: cisCacheSettingsOnOffValidatorID, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: "on, off"}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: cisCacheSettingsBrowserExpiration, + ValidateFunctionIdentifier: ValidateAllowedIntValue, + Type: TypeInt, + Optional: true, + AllowedValues: browserCacheTTL}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: cisCacheServeStaleContent, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Optional: true, + AllowedValues: "on, off"}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: cisCacheSettingsCachingLevel, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: "basic, simplified, aggressive"}) + ibmCISCacheSettingsResourceValidator := ResourceValidator{ + ResourceName: ibmCISCacheSettings, + Schema: validateSchema} + return &ibmCISCacheSettingsResourceValidator +} + +func resourceCISCacheSettingsUpdate(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisCacheClientSession() + if err != nil { + return err + } + crn := d.Get(cisID).(string) + zoneID, _, err := convertTftoCisTwoVar(d.Get(cisDomainID).(string)) + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneID = core.StringPtr(zoneID) + + if d.HasChange(cisCacheSettingsCachingLevel) || + d.HasChange(cisCacheSettingsBrowserExpiration) || + d.HasChange(cisCacheSettingsDevelopmentMode) || + d.HasChange(cisCacheSettingsQueryStringSort) || + d.HasChange(cisCachePurgeAll) || + d.HasChange(cisCachePurgeByURLs) || + d.HasChange(cisCachePurgeByCacheTags) || + d.HasChange(cisCachePurgeByHosts) || + d.HasChange(cisCacheServeStaleContent) { + + // Caching Level Setting + if value, ok := d.GetOk(cisCacheSettingsCachingLevel); ok { + opt := cisClient.NewUpdateCacheLevelOptions() + opt.SetValue(value.(string)) + _, resp, err := cisClient.UpdateCacheLevel(opt) + if err != nil { + log.Printf("Update caching level failed : %v\n", resp) + return err + } + } + // Serve Stale Content Setting + if value, ok := d.GetOk(cisCacheServeStaleContent); ok { + opt := cisClient.NewUpdateServeStaleContentOptions() + opt.SetValue(value.(string)) + _, resp, err := cisClient.UpdateServeStaleContent(opt) + if err != nil { + log.Printf("Update Serve Stale Content Setting failed : %v\n", resp) + return err + } + } + + // Browser Expiration setting + if value, ok := d.GetOk(cisCacheSettingsBrowserExpiration); ok { + opt := cisClient.NewUpdateBrowserCacheTtlOptions() + opt.SetValue(int64(value.(int))) + _, resp, err := cisClient.UpdateBrowserCacheTTL(opt) + if err != nil { + log.Printf("Update browser expiration setting failed : %v\n", resp) + return err + } + } + + // development mode setting + if value, ok := d.GetOk(cisCacheSettingsDevelopmentMode); ok { + opt := cisClient.NewUpdateDevelopmentModeOptions() + opt.SetValue(value.(string)) + _, resp, err := cisClient.UpdateDevelopmentMode(opt) + if err != nil { + log.Printf("Update development mode setting failed : %v\n", resp) + return err + } + } + // Query string sort setting + if value, ok := d.GetOk(cisCacheSettingsQueryStringSort); ok { + opt := cisClient.NewUpdateQueryStringSortOptions() + opt.SetValue(value.(string)) + _, resp, err := cisClient.UpdateQueryStringSort(opt) + if err != nil { + log.Printf("Update query string sort setting failed : %v\n", resp) + return err + } + } + + if value, ok := d.GetOkExists(cisCachePurgeAll); ok { + if value.(bool) == true { + opt := cisClient.NewPurgeAllOptions() + result, response, err := cisClient.PurgeAll(opt) + if err != nil { + log.Printf("Purge all failed : %v", response) + return err + } + log.Printf("Purge all successful : %s", *result.Result.ID) + } + } + if value, ok := d.GetOk(cisCachePurgeByURLs); ok { + urls := expandStringList(value.([]interface{})) + opt := cisClient.NewPurgeByUrlsOptions() + opt.SetFiles(urls) + _, response, err := cisClient.PurgeByUrls(opt) + if err != nil { + log.Printf("Purge by urls failed : %v", response) + return err + } + } + if value, ok := d.GetOk(cisCachePurgeByCacheTags); ok { + cacheTags := expandStringList(value.([]interface{})) + opt := cisClient.NewPurgeByCacheTagsOptions() + opt.SetTags(cacheTags) + result, response, err := cisClient.PurgeByCacheTags(opt) + if err != nil { + log.Printf("Purge by cache tags failed : %v", response) + return err + } + log.Printf("Purge by tags successful : %s", *result.Result.ID) + + } + if value, ok := d.GetOk(cisCachePurgeByHosts); ok { + hosts := expandStringList(value.([]interface{})) + opt := cisClient.NewPurgeByHostsOptions() + opt.SetHosts(hosts) + result, response, err := cisClient.PurgeByHosts(opt) + if err != nil { + log.Printf("Purge by hosts failed : %v", response) + return err + } + log.Printf("Purge by hosts successful : %s", *result.Result.ID) + } + } + d.SetId(convertCisToTfTwoVar(zoneID, crn)) + return resourceCISCacheSettingsRead(d, meta) +} + +func resourceCISCacheSettingsRead(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisCacheClientSession() + if err != nil { + return err + } + zoneID, crn, _ := convertTftoCisTwoVar(d.Id()) + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneID = core.StringPtr(zoneID) + + // Caching Level Setting + cacheLevel, resp, err := cisClient.GetCacheLevel(cisClient.NewGetCacheLevelOptions()) + if err != nil { + log.Printf("Get caching leve setting failed : %v\n", resp) + return err + } + + // Serve Stale Content setting + servestaleContent, resp, err := cisClient.GetServeStaleContent(cisClient.NewGetServeStaleContentOptions()) + if err != nil { + log.Printf("Get Serve Stale Content setting failed : %v\n", resp) + return err + } + + // Browser Expiration setting + browserCacheTTL, resp, err := cisClient.GetBrowserCacheTTL( + cisClient.NewGetBrowserCacheTtlOptions()) + if err != nil { + log.Printf("Get browser expiration setting failed : %v\n", resp) + return err + } + + // development mode setting + devMode, resp, err := cisClient.GetDevelopmentMode( + cisClient.NewGetDevelopmentModeOptions()) + if err != nil { + log.Printf("Get development mode setting failed : %v", resp) + return err + } + + // Query string sort setting + queryStringSort, resp, err := cisClient.GetQueryStringSort( + cisClient.NewGetQueryStringSortOptions()) + if err != nil { + log.Printf("Get query string sort setting failed : %v", resp) + return err + } + d.Set(cisID, crn) + d.Set(cisDomainID, zoneID) + d.Set(cisCacheSettingsBrowserExpiration, *browserCacheTTL.Result.Value) + d.Set(cisCacheSettingsCachingLevel, *cacheLevel.Result.Value) + d.Set(cisCacheSettingsDevelopmentMode, *devMode.Result.Value) + d.Set(cisCacheSettingsQueryStringSort, *queryStringSort.Result.Value) + d.Set(cisCacheServeStaleContent, *servestaleContent.Result.Value) + return nil +} + +func resourceCISCacheSettingsDelete(d *schema.ResourceData, meta interface{}) error { + // Nothing to delete on CIS resource + d.SetId("") + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_certificate_order.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_certificate_order.go new file mode 100644 index 00000000000..3ef9ac5c740 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_certificate_order.go @@ -0,0 +1,227 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "log" + "time" + + "github.com/IBM/go-sdk-core/v4/core" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + ibmCISCertificateOrder = "ibm_cis_certificate_order" + cisCertificateOrderID = "certificate_id" + cisCertificateOrderHosts = "hosts" + cisCertificateOrderType = "type" + cisCertificateOrderTypeDedicated = "dedicated" + cisCertificateOrderStatus = "status" + cisCertificateOrderDeleted = "deleted" + cisCertificateOrderDeletePending = "deleting" +) + +func resourceIBMCISCertificateOrder() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMCISCertificateOrderCreate, + Update: resourceIBMCISCertificateOrderRead, + Read: resourceIBMCISCertificateOrderRead, + Delete: resourceIBMCISCertificateOrderDelete, + Exists: resourceIBMCISCertificateOrderExist, + Importer: &schema.ResourceImporter{}, + Schema: map[string]*schema.Schema{ + cisID: { + Type: schema.TypeString, + Description: "CIS object id or CRN", + Required: true, + }, + cisDomainID: { + Type: schema.TypeString, + Description: "Associated CIS domain", + Required: true, + DiffSuppressFunc: suppressDomainIDDiff, + }, + cisCertificateOrderID: { + Type: schema.TypeString, + Description: "certificate id", + Computed: true, + }, + cisCertificateOrderType: { + Type: schema.TypeString, + Description: "certificate type", + Optional: true, + Default: cisCertificateOrderTypeDedicated, + }, + cisCertificateOrderHosts: { + Type: schema.TypeList, + Description: "Hosts which certificate need to be ordered", + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + cisCertificateOrderStatus: { + Type: schema.TypeString, + Description: "certificate status", + Computed: true, + }, + }, + } +} + +func resourceIBMCISCertificateOrderValidator() *ResourceValidator { + validateSchema := make([]ValidateSchema, 1) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: cisCertificateOrderType, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: cisCertificateOrderTypeDedicated}) + + cisCertificateOrderValidator := ResourceValidator{ + ResourceName: ibmCISCertificateOrder, + Schema: validateSchema} + return &cisCertificateOrderValidator +} + +func resourceIBMCISCertificateOrderCreate(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisSSLClientSession() + if err != nil { + return err + } + crn := d.Get(cisID).(string) + zoneID, _, err := convertTftoCisTwoVar(d.Get(cisDomainID).(string)) + certType := d.Get(cisCertificateOrderType).(string) + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + + hosts := d.Get(cisCertificateOrderHosts) + hostsList := expandStringList(hosts.([]interface{})) + opt := cisClient.NewOrderCertificateOptions() + opt.SetType(certType) + opt.SetHosts(hostsList) + + result, resp, err := cisClient.OrderCertificate(opt) + if err != nil { + log.Printf("Certificate order failed: %v", resp) + return err + } + + d.SetId(convertCisToTfThreeVar(*result.Result.ID, zoneID, crn)) + return resourceIBMCISCertificateOrderRead(d, meta) +} + +func resourceIBMCISCertificateOrderRead(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisSSLClientSession() + if err != nil { + return err + } + certificateID, zoneID, crn, err := convertTfToCisThreeVar(d.Id()) + if err != nil { + log.Println("Error in reading certificate id") + return err + } + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + opt := cisClient.NewGetCustomCertificateOptions(certificateID) + result, resp, err := cisClient.GetCustomCertificate(opt) + if err != nil { + log.Printf("Certificate read failed: %v", resp) + return err + } + d.Set(cisID, crn) + d.Set(cisDomainID, zoneID) + d.Set(cisCertificateOrderID, result.Result.ID) + d.Set(cisCertificateOrderType, cisCertificateOrderTypeDedicated) + d.Set(cisCertificateOrderHosts, flattenStringList(result.Result.Hosts)) + d.Set(cisCertificateOrderStatus, result.Result.Status) + return nil +} + +func resourceIBMCISCertificateOrderDelete(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisSSLClientSession() + if err != nil { + return err + } + certificateID, zoneID, crn, err := convertTfToCisThreeVar(d.Id()) + if err != nil { + log.Println("Error in reading certificate id") + return err + } + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + opt := cisClient.NewDeleteCertificateOptions(certificateID) + resp, err := cisClient.DeleteCertificate(opt) + if err != nil { + log.Printf("Certificate delete failed: %v", resp) + return err + } + + _, err = waitForCISCertificateOrderDelete(d, meta) + if err != nil { + return err + } + + return nil +} + +func resourceIBMCISCertificateOrderExist(d *schema.ResourceData, meta interface{}) (bool, error) { + cisClient, err := meta.(ClientSession).CisSSLClientSession() + if err != nil { + return false, err + } + certificateID, zoneID, crn, err := convertTfToCisThreeVar(d.Id()) + if err != nil { + log.Println("Error in reading certificate id") + return false, err + } + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + opt := cisClient.NewGetCustomCertificateOptions(certificateID) + _, response, err := cisClient.GetCustomCertificate(opt) + if err != nil { + if response != nil && response.StatusCode == 400 { + log.Printf("Certificate is not found") + return false, nil + } + log.Printf("Get Certificate failed: %v", response) + return false, err + } + return true, nil +} + +func waitForCISCertificateOrderDelete(d *schema.ResourceData, meta interface{}) (interface{}, error) { + cisClient, err := meta.(ClientSession).CisSSLClientSession() + if err != nil { + return nil, err + } + certificateID, zoneID, crn, err := convertTfToCisThreeVar(d.Id()) + if err != nil { + log.Println("Error in reading certificate id") + return nil, err + } + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + opt := cisClient.NewGetCustomCertificateOptions(certificateID) + stateConf := &resource.StateChangeConf{ + Pending: []string{cisCertificateOrderDeletePending}, + Target: []string{cisCertificateOrderDeleted}, + Refresh: func() (interface{}, string, error) { + _, detail, err := cisClient.GetCustomCertificate(opt) + if err != nil { + if detail != nil && detail.StatusCode == 400 { + return detail, cisCertificateOrderDeleted, nil + } + return nil, "", err + } + return detail, cisCertificateOrderDeletePending, nil + }, + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + PollInterval: 10 * time.Second, + } + + return stateConf.WaitForState() +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_certificate_upload.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_certificate_upload.go new file mode 100644 index 00000000000..ced28999096 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_certificate_upload.go @@ -0,0 +1,358 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "log" + "strings" + "time" + + "github.com/IBM/go-sdk-core/v4/core" + cissslv1 "github.com/IBM/networking-go-sdk/sslcertificateapiv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + ibmCISCertificateUpload = "ibm_cis_certificate_upload" + cisCertificateUploadCertificate = "certificate" + cisCertificateUploadPrivateKey = "private_key" + cisCertificateUploadBundleMethod = "bundle_method" + cisCertificateUploadGeoRestrictions = "geo_restrictions" + cisCertificateUploadCustomCertID = "custom_cert_id" + cisCertificateUploadStatus = "status" + cisCertificateUploadPriority = "priority" + cisCertificateUploadHosts = "hosts" + cisCertificateUploadIssuer = "issuer" + cisCertificateUploadSignature = "signature" + cisCertificateUploadUploadedOn = "uploaded_on" + cisCertificateUploadModifiedOn = "modified_on" + cisCertificateUploadExpiresOn = "expires_on" + cisCertificateUploadUbiquitous = "ubiquitous" + cisCertificateUploadDeletePending = "deleting" + cisCertificateUploadDeleted = "deleted" +) + +func resourceIBMCISCertificateUpload() *schema.Resource { + return &schema.Resource{ + Create: resourceCISCertificateUploadCreate, + Read: resourceCISCertificateUploadRead, + Update: resourceCISCertificateUploadUpdate, + Delete: resourceCISCertificateUploadDelete, + Exists: resourceCISCertificateUploadExists, + Importer: &schema.ResourceImporter{}, + Schema: map[string]*schema.Schema{ + cisID: { + Type: schema.TypeString, + Description: "CIS instance crn", + Required: true, + }, + cisDomainID: { + Type: schema.TypeString, + Description: "Associated CIS domain", + Required: true, + DiffSuppressFunc: suppressDomainIDDiff, + }, + cisCertificateUploadCustomCertID: { + Type: schema.TypeString, + Computed: true, + }, + cisCertificateUploadCertificate: { + Type: schema.TypeString, + Description: "Certificate key", + Required: true, + Sensitive: true, + }, + cisCertificateUploadPrivateKey: { + Type: schema.TypeString, + Description: "Certificate private key", + Required: true, + Sensitive: true, + }, + cisCertificateUploadBundleMethod: { + Type: schema.TypeString, + Description: "Certificate bundle method", + Optional: true, + Default: cisCertificateUploadUbiquitous, + ValidateFunc: InvokeValidator( + ibmCISCertificateUpload, + cisCertificateUploadBundleMethod), + }, + cisCertificateUploadHosts: { + Type: schema.TypeList, + Computed: true, + Description: "hosts which the certificate uploaded to", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + cisCertificateUploadPriority: { + Type: schema.TypeInt, + Description: "Certificate priority", + Optional: true, + Computed: true, + }, + cisCertificateUploadStatus: { + Type: schema.TypeString, + Description: "certificate status", + Computed: true, + }, + cisCertificateUploadIssuer: { + Type: schema.TypeString, + Description: "certificate issuer", + Computed: true, + }, + cisCertificateUploadSignature: { + Type: schema.TypeString, + Description: "certificate signature", + Computed: true, + }, + cisCertificateUploadUploadedOn: { + Type: schema.TypeString, + Description: "certificate uploaded date", + Computed: true, + }, + cisCertificateUploadModifiedOn: { + Type: schema.TypeString, + Description: "certificate modified date", + Computed: true, + }, + cisCertificateUploadExpiresOn: { + Type: schema.TypeString, + Description: "certificate expires date", + Computed: true, + }, + }, + } +} + +func resourceCISCertificateUploadValidator() *ResourceValidator { + bundleMethod := "ubiquitous, optimal, force" + validateSchema := make([]ValidateSchema, 1) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: cisCertificateUploadBundleMethod, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: bundleMethod}) + + cisCertificateUploadValidator := ResourceValidator{ResourceName: ibmCISCertificateUpload, Schema: validateSchema} + return &cisCertificateUploadValidator +} + +func resourceCISCertificateUploadCreate(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisSSLClientSession() + if err != nil { + return err + } + + crn := d.Get(cisID).(string) + zoneID, _, err := convertTftoCisTwoVar(d.Get(cisDomainID).(string)) + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + + // upload certificate + opt := cisClient.NewUploadCustomCertificateOptions() + opt.SetCertificate(d.Get(cisCertificateUploadCertificate).(string)) + opt.SetPrivateKey(d.Get(cisCertificateUploadPrivateKey).(string)) + if v, ok := d.GetOk(cisCertificateUploadBundleMethod); ok { + opt.SetBundleMethod(v.(string)) + } + + result, response, err := cisClient.UploadCustomCertificate(opt) + if err != nil { + log.Printf("Upload custom certificate failed: %v", response) + return err + } + certID := *result.Result.ID + d.SetId(convertCisToTfThreeVar(certID, zoneID, crn)) + + // change priority of certificate + certsList := []cissslv1.CertPriorityReqCertificatesItem{} + id := certID + var priority int64 + if v, ok := d.GetOk(cisCertificateUploadPriority); ok { + priority = int64(v.(int)) + certsItem, _ := cisClient.NewCertPriorityReqCertificatesItem(id, priority) + certsList = append(certsList, *certsItem) + priorityOpt := cisClient.NewChangeCertificatePriorityOptions() + priorityOpt.SetCertificates(certsList) + priorityResponse, err := cisClient.ChangeCertificatePriority(priorityOpt) + if err != nil { + log.Printf("Change certificate priority failed: %v", priorityResponse) + return err + } + } + + return resourceCISCertificateUploadRead(d, meta) +} +func resourceCISCertificateUploadRead(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisSSLClientSession() + if err != nil { + return err + } + + certID, zoneID, crn, err := convertTfToCisThreeVar(d.Id()) + if err != nil { + return err + } + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + + opt := cisClient.NewGetCustomCertificateOptions(certID) + result, response, err := cisClient.GetCustomCertificate(opt) + if err != nil { + log.Printf("Get custom certificate failed: %v", response) + return err + } + d.Set(cisID, crn) + d.Set(cisDomainID, zoneID) + d.Set(cisCertificateUploadCustomCertID, result.Result.ID) + d.Set(cisCertificateUploadBundleMethod, result.Result.BundleMethod) + d.Set(cisCertificateUploadIssuer, result.Result.Issuer) + d.Set(cisCertificateUploadHosts, flattenStringList(result.Result.Hosts)) + d.Set(cisCertificateUploadSignature, result.Result.Signature) + d.Set(cisCertificateUploadPriority, result.Result.Priority) + d.Set(cisCertificateUploadStatus, result.Result.Status) + d.Set(cisCertificateUploadUploadedOn, result.Result.UploadedOn) + d.Set(cisCertificateUploadModifiedOn, result.Result.ModifiedOn) + d.Set(cisCertificateUploadExpiresOn, result.Result.ExpiresOn) + return nil +} +func resourceCISCertificateUploadUpdate(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisSSLClientSession() + if err != nil { + return err + } + + certID, zoneID, crn, err := convertTfToCisThreeVar(d.Id()) + if err != nil { + return err + } + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + + if d.HasChange(cisCertificateUploadBundleMethod) { + + opt := cisClient.NewUpdateCustomCertificateOptions(certID) + opt.SetCertificate(d.Get(cisCertificateUploadCertificate).(string)) + opt.SetPrivateKey(d.Get(cisCertificateUploadPrivateKey).(string)) + if v, ok := d.GetOk(cisCertificateUploadBundleMethod); ok { + opt.SetBundleMethod(v.(string)) + } + _, response, err := cisClient.UpdateCustomCertificate(opt) + if err != nil { + log.Printf("Update custom certificate failed: %v", response) + return err + } + } + + if d.HasChange(cisCertificateUploadPriority) { + // change priority of certificate + certsList := []cissslv1.CertPriorityReqCertificatesItem{} + id := certID + var priority int64 + if v, ok := d.GetOk(cisCertificateUploadPriority); ok { + priority = int64(v.(int)) + certsItem, _ := cisClient.NewCertPriorityReqCertificatesItem(id, priority) + certsList = append(certsList, *certsItem) + priorityOpt := cisClient.NewChangeCertificatePriorityOptions() + priorityOpt.SetCertificates(certsList) + _, err := cisClient.ChangeCertificatePriority(priorityOpt) + if err != nil { + log.Printf("Change certificate priority failed: %v", err) + return err + } + } + } + return resourceCISCertificateUploadRead(d, meta) +} + +func resourceCISCertificateUploadDelete(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisSSLClientSession() + if err != nil { + return err + } + + certID, zoneID, crn, err := convertTfToCisThreeVar(d.Id()) + if err != nil { + return err + } + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + opt := cisClient.NewDeleteCustomCertificateOptions(certID) + _, err = cisClient.DeleteCustomCertificate(opt) + if err != nil { + log.Printf("Delete custom certificate failed: %v", err) + return err + } + _, err = waitForCISCertificateUploadDelete(d, meta) + if err != nil { + return err + } + d.SetId("") + return nil +} + +func resourceCISCertificateUploadExists(d *schema.ResourceData, meta interface{}) (bool, error) { + cisClient, err := meta.(ClientSession).CisSSLClientSession() + if err != nil { + return false, err + } + + certID, zoneID, crn, err := convertTfToCisThreeVar(d.Id()) + if err != nil { + return false, err + } + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + + opt := cisClient.NewGetCustomCertificateOptions(certID) + _, detail, err := cisClient.GetCustomCertificate(opt) + if err != nil { + if detail != nil && strings.Contains(err.Error(), "Invalid certificate") { + return false, nil + } + log.Printf("Get custom certificate failed: %v", err) + return false, err + } + return true, nil +} + +func waitForCISCertificateUploadDelete(d *schema.ResourceData, meta interface{}) (interface{}, error) { + cisClient, err := meta.(ClientSession).CisSSLClientSession() + if err != nil { + return nil, err + } + + certID, zoneID, crn, err := convertTfToCisThreeVar(d.Id()) + if err != nil { + return nil, err + } + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + + opt := cisClient.NewGetCustomCertificateOptions(certID) + stateConf := &resource.StateChangeConf{ + Pending: []string{cisCertificateUploadDeletePending}, + Target: []string{cisCertificateUploadDeleted}, + Refresh: func() (interface{}, string, error) { + _, detail, err := cisClient.GetCustomCertificate(opt) + if err != nil { + if detail != nil && strings.Contains(err.Error(), "Invalid certificate") { + return detail, cisCertificateUploadDeleted, nil + } + return nil, "", err + } + return detail, cisCertificateUploadDeletePending, nil + }, + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 5 * time.Second, + MinTimeout: 5 * time.Second, + PollInterval: 5 * time.Second, + } + + return stateConf.WaitForState() +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_custom_page.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_custom_page.go new file mode 100644 index 00000000000..0e455eb5c74 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_custom_page.go @@ -0,0 +1,185 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "log" + + "github.com/IBM/go-sdk-core/v4/core" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + ibmCISCustomPage = "ibm_cis_custom_page" + cisCustomPageIdentifier = "page_id" + cisCustomPageURL = "url" + cisCustomPageState = "state" + cisCustomPageStateDefault = "default" + cisCustomPageStateCustomized = "customized" + cisCustomPageDesc = "description" + cisCustomPageRequiredTokens = "required_tokens" + cisCustomPagePreviewTarget = "preview_target" + cisCustomPageCreatedOn = "created_on" + cisCustomPageModifiedOn = "modified_on" +) + +func resourceIBMCISCustomPage() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + cisID: { + Type: schema.TypeString, + Description: "CIS instance crn", + Required: true, + }, + cisDomainID: { + Type: schema.TypeString, + Description: "Associated CIS domain", + Required: true, + DiffSuppressFunc: suppressDomainIDDiff, + }, + cisCustomPageIdentifier: { + Type: schema.TypeString, + Description: "Custom page identifier", + ForceNew: true, + Required: true, + ValidateFunc: InvokeValidator(ibmCISCustomPage, + cisCustomPageIdentifier), + }, + cisCustomPageURL: { + Type: schema.TypeString, + Description: "Custom page url", + Required: true, + }, + cisCustomPageState: { + Type: schema.TypeString, + Description: "Custom page state", + Computed: true, + }, + cisCustomPageDesc: { + Type: schema.TypeString, + Description: "Free text", + Computed: true, + }, + cisCustomPageRequiredTokens: { + Type: schema.TypeList, + Description: "Custom page state", + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + cisCustomPagePreviewTarget: { + Type: schema.TypeString, + Description: "Custom page preview target", + Computed: true, + }, + cisCustomPageCreatedOn: { + Type: schema.TypeString, + Description: "Custom page created date", + Computed: true, + }, + cisCustomPageModifiedOn: { + Type: schema.TypeString, + Description: "Custom page modified date", + Computed: true, + }, + }, + Create: resourceCISCustomPageUpdate, + Read: resourceCISCustomPageRead, + Update: resourceCISCustomPageUpdate, + Delete: resourceCISCustomPageDelete, + Importer: &schema.ResourceImporter{}, + } +} + +func resourceIBMCISCustomPageValidator() *ResourceValidator { + customPageIDs := "basic_challenge, waf_challenge, waf_block, ratelimit_block," + + "country_challenge, ip_block, under_attack, 500_errors, 1000_errors, always_online" + validateSchema := make([]ValidateSchema, 1) + + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: cisCustomPageIdentifier, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: customPageIDs}) + ibmCISCustomPageResourceValidator := ResourceValidator{ + ResourceName: ibmCISCustomPage, + Schema: validateSchema} + return &ibmCISCustomPageResourceValidator +} + +func resourceCISCustomPageUpdate(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisCustomPageClientSession() + if err != nil { + return err + } + crn := d.Get(cisID).(string) + zoneID, _, err := convertTftoCisTwoVar(d.Get(cisDomainID).(string)) + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + pageID := d.Get(cisCustomPageIdentifier).(string) + + if d.HasChange(cisCustomPageURL) { + + url := d.Get(cisCustomPageURL).(string) + state := cisCustomPageStateDefault + if len(url) > 0 { + state = cisCustomPageStateCustomized + } + opt := cisClient.NewUpdateZoneCustomPageOptions(pageID) + opt.SetURL(url) + opt.SetState(state) + + result, response, err := cisClient.UpdateZoneCustomPage(opt) + if err != nil { + log.Printf("Update custom page failed : %v", response) + return err + } + d.SetId(convertCisToTfThreeVar(*result.Result.ID, zoneID, crn)) + } + return resourceCISCustomPageRead(d, meta) +} + +func resourceCISCustomPageRead(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisCustomPageClientSession() + if err != nil { + return err + } + pageID, zoneID, crn, _ := convertTfToCisThreeVar(d.Id()) + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + + opt := cisClient.NewGetZoneCustomPageOptions(pageID) + + result, response, err := cisClient.GetZoneCustomPage(opt) + if err != nil { + if response != nil && response.StatusCode == 404 { + log.Printf("Custom Page has some error: %v", response) + d.SetId("") + return nil + } + log.Printf("Get custom page failed : %v", response) + return err + } + + d.Set(cisID, crn) + d.Set(cisDomainID, zoneID) + d.Set(cisCustomPageIdentifier, result.Result.ID) + d.Set(cisCustomPageURL, result.Result.URL) + d.Set(cisCustomPageState, result.Result.State) + d.Set(cisCustomPageRequiredTokens, flattenStringList(result.Result.RequiredTokens)) + d.Set(cisCustomPageDesc, result.Result.Description) + d.Set(cisCustomPagePreviewTarget, result.Result.PreviewTarget) + d.Set(cisCustomPageCreatedOn, (*result.Result.CreatedOn).String()) + d.Set(cisCustomPageModifiedOn, (*result.Result.ModifiedOn).String()) + return nil +} + +func resourceCISCustomPageDelete(d *schema.ResourceData, meta interface{}) error { + // Nothing to delete on CIS resource + d.SetId("") + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_dns_record.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_dns_record.go new file mode 100644 index 00000000000..6590e3a5448 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_dns_record.go @@ -0,0 +1,932 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "strconv" + "strings" + + "github.com/IBM/go-sdk-core/v4/core" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + cisID = "cis_id" + cisDomainID = "domain_id" + cisZoneName = "zone_name" + cisDNSRecordID = "record_id" + cisDNSRecordCreatedOn = "created_on" + cisDNSRecordModifiedOn = "modified_on" + cisDNSRecordName = "name" + cisDNSRecordType = "type" + cisDNSRecordContent = "content" + cisDNSRecordProxiable = "proxiable" + cisDNSRecordProxied = "proxied" + cisDNSRecordTTL = "ttl" + cisDNSRecordPriority = "priority" + cisDNSRecordData = "data" +) + +// Constants associated with the DNS Record Type property. +// dns record type. +const ( + cisDNSRecordTypeA = "A" + cisDNSRecordTypeAAAA = "AAAA" + cisDNSRecordTypeCAA = "CAA" + cisDNSRecordTypeCNAME = "CNAME" + cisDNSRecordTypeLOC = "LOC" + cisDNSRecordTypeMX = "MX" + cisDNSRecordTypeNS = "NS" + cisDNSRecordTypeSPF = "SPF" + cisDNSRecordTypeSRV = "SRV" + cisDNSRecordTypeTXT = "TXT" + cisDNSRecordTypePTR = "PTR" +) + +func resourceIBMCISDnsRecord() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMCISDnsRecordCreate, + Read: resourceIBMCISDnsRecordRead, + Update: resourceIBMCISDnsRecordUpdate, + Delete: resourceIBMCISDnsRecordDelete, + Exists: resourceIBMCISDnsRecordExist, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + cisID: { + Type: schema.TypeString, + Description: "CIS object id or CRN", + Required: true, + }, + cisDomainID: { + Type: schema.TypeString, + Description: "Associated CIS domain", + Required: true, + DiffSuppressFunc: suppressDomainIDDiff, + }, + cisZoneName: { + Type: schema.TypeString, + Description: "zone name", + Computed: true, + }, + cisDNSRecordName: { + Type: schema.TypeString, + Optional: true, + StateFunc: func(i interface{}) string { + return strings.ToLower(i.(string)) + }, + DiffSuppressFunc: suppressNameDiff, + Description: "DNS record name", + }, + cisDNSRecordType: { + Type: schema.TypeString, + Required: true, + Description: "Record type", + }, + cisDNSRecordContent: { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{cisDNSRecordData}, + DiffSuppressFunc: suppressContentDiff, + Description: "DNS record content", + }, + cisDNSRecordData: { + Type: schema.TypeMap, + Optional: true, + ConflictsWith: []string{cisDNSRecordContent}, + }, + cisDNSRecordPriority: { + Type: schema.TypeInt, + Optional: true, + DiffSuppressFunc: suppressPriority, + Description: "Priority Value", + }, + cisDNSRecordProxied: { + Default: false, + Optional: true, + Type: schema.TypeBool, + Description: "Boolean value true if proxied else flase", + }, + cisDNSRecordTTL: { + Optional: true, + Type: schema.TypeInt, + Default: 1, + Description: "TTL value", + }, + cisDNSRecordCreatedOn: { + Type: schema.TypeString, + Computed: true, + }, + + cisDNSRecordModifiedOn: { + Type: schema.TypeString, + Computed: true, + }, + cisDNSRecordProxiable: { + Type: schema.TypeBool, + Computed: true, + }, + cisDNSRecordID: { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceIBMCISDnsRecordCreate(d *schema.ResourceData, meta interface{}) error { + + sess, err := meta.(ClientSession).CisDNSRecordClientSession() + if err != nil { + log.Printf("Error: %s", err) + return err + } + var ( + crn string + zoneID string + recordName string + recordType string + recordContent string + recordPriority int + ttl int + ok interface{} + data interface{} + v interface{} + recordData map[string]interface{} + ) + // session options + crn = d.Get(cisID).(string) + zoneID, _, err = convertTftoCisTwoVar(d.Get(cisDomainID).(string)) + sess.Crn = core.StringPtr(crn) + sess.ZoneIdentifier = core.StringPtr(zoneID) + + // Input options + opt := sess.NewCreateDnsRecordOptions() + + // set record type + recordType = d.Get(cisDNSRecordType).(string) + opt.SetType(recordType) + // set ttl value + ttl = d.Get(cisDNSRecordTTL).(int) + opt.SetTTL(int64(ttl)) + + switch recordType { + // A, AAAA, CNAME, SPF, TXT, NS, PTR records inputs + case cisDNSRecordTypeA, + cisDNSRecordTypeAAAA, + cisDNSRecordTypeCNAME, + cisDNSRecordTypeSPF, + cisDNSRecordTypeTXT, + cisDNSRecordTypeNS, + cisDNSRecordTypePTR: + // set record name & content + recordName = d.Get(cisDNSRecordName).(string) + opt.SetName(recordName) + recordContent = d.Get(cisDNSRecordContent).(string) + opt.SetContent(recordContent) + + // MX Record inputs + case cisDNSRecordTypeMX: + + // set record name + recordName = d.Get(cisDNSRecordName).(string) + opt.SetName(recordName) + recordContent = d.Get(cisDNSRecordContent).(string) + opt.SetContent(recordContent) + recordPriority = d.Get(cisDNSRecordPriority).(int) + opt.SetPriority(int64(recordPriority)) + + // LOC Record inputs + case cisDNSRecordTypeLOC: + + // set record name + recordName = d.Get(cisDNSRecordName).(string) + opt.SetName(recordName) + data, ok = d.GetOk(cisDNSRecordData) + if ok == false { + log.Printf("Error in getting data") + return err + } + recordData = make(map[string]interface{}, 0) + var dataMap map[string]interface{} = data.(map[string]interface{}) + + // altitude + v, ok = strconv.ParseFloat(dataMap["altitude"].(string), 64) + if ok != nil { + return fmt.Errorf("data input error") + } + recordData["altitude"] = v + + // lat_degrees + v, ok = strconv.Atoi(dataMap["lat_degrees"].(string)) + if ok != nil { + return fmt.Errorf("data input error") + } + recordData["lat_degrees"] = v + + // lat_direction + recordData["lat_direction"] = dataMap["lat_direction"].(string) + + // long_direction + recordData["long_direction"] = dataMap["long_direction"].(string) + + // lat_minutes + v, ok = strconv.Atoi(dataMap["lat_minutes"].(string)) + if ok != nil { + return fmt.Errorf("data input error") + } + recordData["lat_minutes"] = v + + // lat_seconds + v, ok = strconv.ParseFloat(dataMap["lat_seconds"].(string), 64) + if ok != nil { + return fmt.Errorf("data input error") + + } + recordData["lat_seconds"] = v + + // long_degrees + v, ok := strconv.Atoi(dataMap["long_degrees"].(string)) + if ok != nil { + return ok + } + recordData["long_degrees"] = v + + // long_minutes + v, ok = strconv.Atoi(dataMap["long_minutes"].(string)) + if ok != nil { + return ok + } + recordData["long_minutes"] = v + + // long_seconds + i, ok := strconv.ParseFloat(dataMap["long_seconds"].(string), 64) + if ok != nil { + return ok + } + recordData["long_seconds"] = i + + // percision_horz + i, ok = strconv.ParseFloat(dataMap["precision_horz"].(string), 64) + if ok != nil { + return ok + } + recordData["precision_horz"] = v + + // precision_vert + i, ok = strconv.ParseFloat(dataMap["precision_vert"].(string), 64) + if ok != nil { + return ok + } + recordData["precision_vert"] = i + + // size + i, ok = strconv.ParseFloat(dataMap["size"].(string), 64) + if ok != nil { + return ok + } + recordData["size"] = i + + opt.SetData(recordData) + + // CAA Record inputs + case cisDNSRecordTypeCAA: + + // set record name + recordName = d.Get(cisDNSRecordName).(string) + opt.SetName(recordName) + data, ok = d.GetOk(cisDNSRecordData) + if ok == false { + log.Printf("Error in getting data") + return err + } + recordData = make(map[string]interface{}, 0) + var dataMap map[string]interface{} = data.(map[string]interface{}) + + // tag + v := dataMap["tag"].(string) + recordData["tag"] = v + + // value + v = dataMap["value"].(string) + recordData["value"] = v + + opt.SetData(recordData) + + // SRV record input + case cisDNSRecordTypeSRV: + data, ok = d.GetOk(cisDNSRecordData) + if ok == false { + log.Printf("Error in getting data") + return err + } + recordData = make(map[string]interface{}, 0) + var dataMap map[string]interface{} = data.(map[string]interface{}) + + // name + v := dataMap["name"].(string) + recordData["name"] = v + + // target + v = dataMap["target"].(string) + recordData["target"] = v + + // proto + v = dataMap["proto"].(string) + recordData["proto"] = v + + // service + v = dataMap["service"].(string) + recordData["service"] = v + opt.SetData(recordData) + + // port + s, ok := strconv.Atoi(dataMap["port"].(string)) + if ok != nil { + return ok + } + recordData["port"] = s + + // priority + s, ok = strconv.Atoi(dataMap["priority"].(string)) + if ok != nil { + return ok + } + recordData["priority"] = s + + // weight + s, ok = strconv.Atoi(dataMap["weight"].(string)) + if ok != nil { + return ok + } + recordData["weight"] = s + opt.SetData(recordData) + + default: + name, nameOk := d.GetOk("name") + if nameOk { + opt.SetName(name.(string)) + } + content, contentOk := d.GetOk("content") + if contentOk { + opt.SetContent(content.(string)) + } + + data, dataOk := d.GetOk("data") + + newDataMap := make(map[string]interface{}) + + if dataOk { + for id, content := range data.(map[string]interface{}) { + newData, err := transformToIBMCISDnsData(recordType, id, content) + if err != nil { + return err + } else if newData == nil { + continue + } + newDataMap[id] = newData + } + + opt.SetData(newDataMap) + } + + if contentOk == dataOk { + return fmt.Errorf( + "either 'content' (present: %t) or 'data' (present: %t) must be provided", + contentOk, dataOk) + } + + if priority, ok := d.GetOk("priority"); ok { + opt.SetPriority(priority.(int64)) + } + if ttl, ok := d.GetOk("ttl"); ok { + opt.SetTTL(int64(ttl.(int))) + } + + } + + result, response, err := sess.CreateDnsRecord(opt) + if err != nil { + log.Printf("Error creating dns record: %s, error %s", response, err) + return err + } + + d.SetId(convertCisToTfThreeVar(*result.Result.ID, zoneID, crn)) + return resourceIBMCISDnsRecordUpdate(d, meta) + +} + +func resourceIBMCISDnsRecordRead(d *schema.ResourceData, meta interface{}) error { + var ( + crn string + zoneID string + recordID string + ) + sess, err := meta.(ClientSession).CisDNSRecordClientSession() + if err != nil { + return err + } + + recordID, zoneID, crn, _ = convertTfToCisThreeVar(d.Id()) + if err != nil { + return err + } + sess.Crn = core.StringPtr(crn) + sess.ZoneIdentifier = core.StringPtr(zoneID) + + opt := sess.NewGetDnsRecordOptions(recordID) + result, response, err := sess.GetDnsRecord(opt) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + log.Printf("Error reading dns record: %s", response) + return err + } + + d.Set(cisID, crn) + d.Set(cisDomainID, *result.Result.ZoneID) + d.Set(cisDNSRecordID, *result.Result.ID) + d.Set(cisZoneName, *result.Result.ZoneName) + d.Set(cisDNSRecordCreatedOn, *result.Result.CreatedOn) + d.Set(cisDNSRecordModifiedOn, *result.Result.ModifiedOn) + d.Set(cisDNSRecordName, *result.Result.Name) + d.Set(cisDNSRecordType, *result.Result.Type) + if result.Result.Content != nil { + d.Set(cisDNSRecordContent, *result.Result.Content) + } + d.Set(cisDNSRecordProxiable, *result.Result.Proxiable) + d.Set(cisDNSRecordProxied, *result.Result.Proxied) + d.Set(cisDNSRecordTTL, *result.Result.TTL) + if result.Result.Priority != nil { + d.Set(cisDNSRecordPriority, *result.Result.Priority) + } + if result.Result.Data != nil { + d.Set(cisDNSRecordData, flattenData(result.Result.Data, *result.Result.ZoneName)) + } + return nil +} + +func resourceIBMCISDnsRecordUpdate(d *schema.ResourceData, meta interface{}) error { + sess, err := meta.(ClientSession).CisDNSRecordClientSession() + if err != nil { + log.Printf("Error: %s", err) + return err + } + var ( + recordID string + crn string + zoneID string + recordName string + recordType string + recordContent string + recordPriority int + ttl int + ok bool + proxied bool + data interface{} + recordData map[string]interface{} + ) + // session options + recordID, zoneID, crn, err = convertTfToCisThreeVar(d.Id()) + if err != nil { + log.Println("Error in reading record id") + return err + } + sess.Crn = core.StringPtr(crn) + sess.ZoneIdentifier = core.StringPtr(zoneID) + + // Input options + opt := sess.NewUpdateDnsRecordOptions(recordID) + + if d.HasChange(cisDNSRecordName) || + d.HasChange(cisDNSRecordType) || + d.HasChange(cisDNSRecordContent) || + d.HasChange(cisDNSRecordProxiable) || + d.HasChange(cisDNSRecordProxied) || + d.HasChange(cisDNSRecordTTL) || + d.HasChange(cisDNSRecordPriority) || + d.HasChange(cisDNSRecordData) { + + // set record type + recordType = d.Get(cisDNSRecordType).(string) + opt.SetType(recordType) + // set ttl value + ttl = d.Get(cisDNSRecordTTL).(int) + opt.SetTTL(int64(ttl)) + + // set proxied + proxied = d.Get(cisDNSRecordProxied).(bool) + opt.SetProxied(proxied) + + switch recordType { + // A, AAAA, CNAME, SPF, TXT, NS, PTR records inputs + case cisDNSRecordTypeA, + cisDNSRecordTypeAAAA, + cisDNSRecordTypeCNAME, + cisDNSRecordTypeSPF, + cisDNSRecordTypeTXT, + cisDNSRecordTypeNS, + cisDNSRecordTypePTR: + // set record name & content + recordName = d.Get(cisDNSRecordName).(string) + opt.SetName(recordName) + recordContent = d.Get(cisDNSRecordContent).(string) + opt.SetContent(recordContent) + + // MX Record inputs + case cisDNSRecordTypeMX: + + // set record name + recordName = d.Get(cisDNSRecordName).(string) + opt.SetName(recordName) + + // set content + recordContent = d.Get(cisDNSRecordContent).(string) + opt.SetContent(recordContent) + + // set priority + recordPriority = d.Get(cisDNSRecordPriority).(int) + opt.SetPriority(int64(recordPriority)) + + // LOC Record inputs + case cisDNSRecordTypeLOC: + + // set record name + recordName = d.Get(cisDNSRecordName).(string) + opt.SetName(recordName) + + data, ok = d.GetOk(cisDNSRecordData) + if ok == false { + log.Printf("Error in getting data") + return err + } + recordData = make(map[string]interface{}, 0) + var dataMap map[string]interface{} = data.(map[string]interface{}) + + // altitude + v, ok := strconv.ParseFloat(dataMap["altitude"].(string), 64) + if ok != nil { + return ok + } + recordData["altitude"] = v + + // lat_degrees + i, ok := strconv.Atoi(dataMap["lat_degrees"].(string)) + if ok != nil { + return ok + } + recordData["lat_degrees"] = i + + // lat_direction + recordData["lat_direction"] = dataMap["lat_direction"].(string) + + // long_direction + recordData["long_direction"] = dataMap["long_direction"].(string) + + // lat_minutes + i, ok = strconv.Atoi(dataMap["lat_minutes"].(string)) + if ok != nil { + return ok + } + recordData["lat_minutes"] = i + + // lat_seconds + v, ok = strconv.ParseFloat(dataMap["lat_seconds"].(string), 64) + if ok != nil { + return ok + } + recordData["lat_seconds"] = v + + // long_degrees + i, ok = strconv.Atoi(dataMap["long_degrees"].(string)) + if ok != nil { + return ok + } + recordData["long_degrees"] = i + + // long_minutes + i, ok = strconv.Atoi(dataMap["long_minutes"].(string)) + if ok != nil { + return ok + } + recordData["long_minutes"] = i + + // long_seconds + v, ok = strconv.ParseFloat(dataMap["long_seconds"].(string), 64) + if ok != nil { + return ok + } + recordData["long_seconds"] = v + + // percision_horz + v, ok = strconv.ParseFloat(dataMap["precision_horz"].(string), 64) + if ok != nil { + return ok + } + recordData["precision_horz"] = v + + // precision_vert + v, ok = strconv.ParseFloat(dataMap["precision_vert"].(string), 64) + if ok != nil { + return ok + } + recordData["precision_vert"] = v + + // size + v, ok = strconv.ParseFloat(dataMap["size"].(string), 64) + if ok != nil { + return ok + } + recordData["size"] = v + + opt.SetData(recordData) + + // CAA Record inputs + case cisDNSRecordTypeCAA: + + // set record name + recordName = d.Get(cisDNSRecordName).(string) + opt.SetName(recordName) + data, ok = d.GetOk(cisDNSRecordData) + if ok == false { + log.Printf("Error in getting data") + return err + } + recordData = make(map[string]interface{}, 0) + var dataMap map[string]interface{} = data.(map[string]interface{}) + + // tag + v := dataMap["tag"].(string) + recordData["tag"] = v + + // value + v = dataMap["value"].(string) + recordData["value"] = v + + opt.SetData(recordData) + + // SRV record input + case cisDNSRecordTypeSRV: + data, ok = d.GetOk(cisDNSRecordData) + if ok == false { + log.Printf("Error in getting data") + return err + } + recordData = make(map[string]interface{}, 0) + var dataMap map[string]interface{} = data.(map[string]interface{}) + + // name + v := dataMap["name"].(string) + recordData["name"] = v + + // target + v = dataMap["target"].(string) + recordData["target"] = v + + // proto + v = dataMap["proto"].(string) + recordData["proto"] = v + + // service + v = dataMap["service"].(string) + recordData["service"] = v + opt.SetData(recordData) + + // port + s, ok := strconv.Atoi(dataMap["port"].(string)) + if ok != nil { + return ok + } + recordData["port"] = s + + // priority + s, ok = strconv.Atoi(dataMap["priority"].(string)) + if ok != nil { + return ok + } + recordData["priority"] = s + + // weight + s, ok = strconv.Atoi(dataMap["weight"].(string)) + if ok != nil { + return ok + } + recordData["weight"] = s + opt.SetData(recordData) + default: + if d.HasChange(cisDNSRecordName) || + d.HasChange(cisDNSRecordContent) || + d.HasChange(cisDNSRecordProxied) || + d.HasChange(cisDNSRecordTTL) || + d.HasChange(cisDNSRecordPriority) || + d.HasChange(cisDNSRecordData) { + + if name, ok := d.Get(cisDNSRecordName).(string); ok { + opt.SetName(name) + } + content, contentOk := d.GetOk(cisDNSRecordContent) + if contentOk { + opt.SetContent(content.(string)) + } + proxied, proxiedOk := d.GetOk(cisDNSRecordProxied) + ttl, ttlOK := d.GetOk(cisDNSRecordTTL) + if proxiedOk { + opt.SetProxied(proxied.(bool)) + } + if ttlOK { + opt.SetTTL(int64(ttl.(int))) + } + if ttl != 1 && proxied == true { + return fmt.Errorf("To enable proxy TTL should be Automatic %s", + "i.e it should be set to 1. For the the values other than Automatic, proxy should be disabled.") + } + priority, priorityOk := d.GetOk(cisDNSRecordPriority) + if priorityOk { + opt.SetPriority(priority.(int64)) + } + + data, dataOk := d.GetOk(cisDNSRecordData) + newDataMap := make(map[string]interface{}) + if dataOk { + for id, content := range data.(map[string]interface{}) { + newData, err := transformToIBMCISDnsData(recordType, id, content) + if err != nil { + return err + } else if newData == nil { + continue + } + newDataMap[id] = newData + } + + opt.SetData(newDataMap) + } + if contentOk == dataOk { + return fmt.Errorf( + "either 'content' (present: %t) or 'data' (present: %t) must be provided", + contentOk, dataOk) + } + } + } + + result, response, err := sess.UpdateDnsRecord(opt) + if err != nil { + log.Printf("Error updating dns record: %s, error %s", response, err) + return err + } + log.Printf("record id: %s", *result.Result.ID) + } + return resourceIBMCISDnsRecordRead(d, meta) +} + +func resourceIBMCISDnsRecordDelete(d *schema.ResourceData, meta interface{}) error { + var ( + crn string + zoneID string + recordID string + ) + sess, err := meta.(ClientSession).CisDNSRecordClientSession() + if err != nil { + log.Printf("Error: %s", err) + return err + } + // session options + recordID, zoneID, crn, _ = convertTfToCisThreeVar(d.Id()) + if err != nil { + log.Println("Error in reading input") + return err + } + sess.Crn = core.StringPtr(crn) + sess.ZoneIdentifier = core.StringPtr(zoneID) + + delOpt := sess.NewDeleteDnsRecordOptions(recordID) + result, response, err := sess.DeleteDnsRecord(delOpt) + + if err != nil && !strings.Contains(err.Error(), "Request failed with status code: 404") { + log.Printf("Error deleting dns record %s: %s", *result.Result.ID, response) + return err + } + d.SetId("") + return nil +} + +func resourceIBMCISDnsRecordExist(d *schema.ResourceData, meta interface{}) (bool, error) { + sess, err := meta.(ClientSession).CisDNSRecordClientSession() + if err != nil { + log.Printf("session creation failed: %s", err) + return false, err + } + + // session options + recordID, zoneID, crn, _ := convertTfToCisThreeVar(d.Id()) + if err != nil { + log.Println("Error in reading input") + return false, err + } + + sess.Crn = core.StringPtr(crn) + sess.ZoneIdentifier = core.StringPtr(zoneID) + + opt := sess.NewGetDnsRecordOptions(recordID) + _, response, err := sess.GetDnsRecord(opt) + if err != nil { + if response != nil && response.StatusCode == 404 { + log.Printf("DNS record is not found") + return false, nil + } + log.Printf("get DNS record failed") + return false, err + } + return true, nil +} + +var dnsTypeIntFields = []string{ + "algorithm", + "key_tag", + "type", + "usage", + "selector", + "matching_type", + "weight", + "priority", + "port", + "long_degrees", + "lat_degrees", + "long_minutes", + "lat_minutes", + "protocol", + "digest_type", + "order", + "preference", +} + +var dnsTypeFloatFields = []string{ + "size", + "altitude", + "precision_horz", + "precision_vert", + "long_seconds", + "lat_seconds", +} + +func suppressPriority(k, old, new string, d *schema.ResourceData) bool { + recordType := d.Get("type").(string) + if recordType != "MX" && recordType != "URI" { + return true + } + return false +} + +func suppressNameDiff(k, old, new string, d *schema.ResourceData) bool { + // CIS concantenates name with domain. So just check name is the same + if strings.SplitN(old, ".", 2)[0] == strings.SplitN(new, ".", 2)[0] { + return true + } + // If name is @, its replaced by the domain name. So ignore check. + if new == "@" { + return true + } + + return false +} +func suppressContentDiff(k, old, new string, d *schema.ResourceData) bool { + if new == "" && old != "" { + return true + } + return false +} + +func suppressDataDiff(k, old, new string, d *schema.ResourceData) bool { + // Tuncate after . + if strings.SplitN(old, ".", 2)[0] == strings.SplitN(new, ".", 2)[0] { + return true + } + return false +} + +func suppressDomainIDDiff(k, old, new string, d *schema.ResourceData) bool { + // TF concantenates domain_id with cis_id. So just check when is passed as input it is same as domai_id in the combination that is Set. + if strings.Split(new, ":")[0] == old { + return true + } + return false +} +func flattenData(inVal interface{}, zone string) map[string]string { + outVal := make(map[string]string) + if inVal == nil { + return outVal + } + for k, v := range inVal.(map[string]interface{}) { + strValue := fmt.Sprintf("%v", v) + if k == "name" { + strValue = strings.Replace(strValue, "."+zone, "", -1) + } + outVal[k] = strValue + + } + return outVal +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_dns_records_import.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_dns_records_import.go new file mode 100644 index 00000000000..aac3fcbf4ce --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_dns_records_import.go @@ -0,0 +1,112 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "os" + "strconv" + "strings" + + "github.com/IBM/go-sdk-core/v4/core" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + cisDNSRecordsImportFile = "file" + cisDNSRecordsImportTotalRecordsParsed = "total_records_parsed" + cisDNSRecordsImportRecordsAdded = "records_added" +) + +func resourceIBMCISDNSRecordsImport() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + cisID: { + Type: schema.TypeString, + Description: "CIS instance crn", + Required: true, + }, + cisDomainID: { + Type: schema.TypeString, + Description: "Associated CIS domain", + Required: true, + DiffSuppressFunc: suppressDomainIDDiff, + }, + cisDNSRecordsImportFile: { + Type: schema.TypeString, + Description: "File to import", + Required: true, + ForceNew: true, + }, + cisDNSRecordsImportTotalRecordsParsed: { + Type: schema.TypeInt, + Description: "total records parsed", + Computed: true, + }, + cisDNSRecordsImportRecordsAdded: { + Type: schema.TypeInt, + Description: "added records count", + Computed: true, + }, + }, + + Create: resourceCISDNSRecordsImportUpdate, + Read: resourceCISDNSRecordsImportRead, + Update: resourceCISDNSRecordsImportRead, + Delete: resourceCISDNSRecordsImportDelete, + Importer: &schema.ResourceImporter{}, + } +} +func resourceCISDNSRecordsImportUpdate(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisDNSRecordBulkClientSession() + if err != nil { + return err + } + + crn := d.Get(cisID).(string) + zoneID, _, err := convertTftoCisTwoVar(d.Get(cisDomainID).(string)) + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + file := d.Get(cisDNSRecordsImportFile).(string) + + f, err := os.Open(file) + if err != nil { + return err + } + opt := cisClient.NewPostDnsRecordsBulkOptions() + opt.SetFile(f) + result, response, err := cisClient.PostDnsRecordsBulk(opt) + if err != nil { + log.Printf("Error importing dns records: %v", response) + return err + } + id := fmt.Sprintf("%v:%v:%s:%s:%s", *result.Result.TotalRecordsParsed, + *result.Result.RecsAdded, file, zoneID, crn) + d.SetId(id) + + return nil + +} + +func resourceCISDNSRecordsImportRead(d *schema.ResourceData, meta interface{}) error { + idSplitStr := strings.SplitN(d.Id(), ":", 5) + parsed, _ := strconv.Atoi(idSplitStr[0]) + added, _ := strconv.Atoi(idSplitStr[1]) + file := idSplitStr[2] + zoneID := idSplitStr[3] + crn := idSplitStr[4] + d.Set(cisID, crn) + d.Set(cisDomainID, zoneID) + d.Set(cisDNSRecordsImportFile, file) + d.Set(cisDNSRecordsImportTotalRecordsParsed, parsed) + d.Set(cisDNSRecordsImportRecordsAdded, added) + return nil +} + +func resourceCISDNSRecordsImportDelete(d *schema.ResourceData, meta interface{}) error { + // Nothing to delete on CIS DNS Record import resource + d.SetId("") + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_domain.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_domain.go new file mode 100644 index 00000000000..63962ccb14b --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_domain.go @@ -0,0 +1,170 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "log" + + "github.com/IBM/go-sdk-core/v4/core" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + cisDomain = "domain" + cisDomainPaused = "paused" + cisDomainStatus = "status" + cisDomainNameServers = "name_servers" + cisDomainOriginalNameServers = "original_name_servers" +) + +func resourceIBMCISDomain() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + cisID: { + Type: schema.TypeString, + Description: "CIS object id", + Required: true, + }, + cisDomain: { + Type: schema.TypeString, + Description: "CISzone - Domain", + Required: true, + }, + cisDomainPaused: { + Type: schema.TypeBool, + Computed: true, + }, + cisDomainStatus: { + Type: schema.TypeString, + Computed: true, + }, + cisDomainNameServers: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + cisDomainOriginalNameServers: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + cisDomainID: { + Type: schema.TypeString, + Computed: true, + }, + }, + Create: resourceCISdomainCreate, + Read: resourceCISdomainRead, + Exists: resourceCISdomainExists, + Update: resourceCISdomainUpdate, + Delete: resourceCISdomainDelete, + Importer: &schema.ResourceImporter{}, + } +} + +func resourceCISdomainCreate(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisZonesV1ClientSession() + if err != nil { + return err + } + + crn := d.Get(cisID).(string) + cisClient.Crn = core.StringPtr(crn) + zoneName := d.Get(cisDomain).(string) + + opt := cisClient.NewCreateZoneOptions() + opt.SetName(zoneName) + result, resp, err := cisClient.CreateZone(opt) + if err != nil { + log.Printf("CreateZones Failed %s", resp) + return err + } + d.SetId(convertCisToTfTwoVar(*result.Result.ID, crn)) + return resourceCISdomainRead(d, meta) +} + +func resourceCISdomainRead(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisZonesV1ClientSession() + if err != nil { + return err + } + + zoneID, crn, err := convertTftoCisTwoVar(d.Id()) + if err != nil { + return err + } + cisClient.Crn = core.StringPtr(crn) + opt := cisClient.NewGetZoneOptions(zoneID) + result, resp, err := cisClient.GetZone(opt) + if err != nil { + log.Printf("[WARN] Error getting zone %v\n", resp) + return err + } + d.Set(cisID, crn) + d.Set(cisDomainID, result.Result.ID) + d.Set(cisDomain, result.Result.Name) + d.Set(cisDomainStatus, result.Result.Status) + d.Set(cisDomainPaused, result.Result.Paused) + d.Set(cisDomainNameServers, result.Result.NameServers) + d.Set(cisDomainOriginalNameServers, result.Result.OriginalNameServers) + + return nil +} +func resourceCISdomainExists(d *schema.ResourceData, meta interface{}) (bool, error) { + cisClient, err := meta.(ClientSession).CisZonesV1ClientSession() + if err != nil { + return false, err + } + + zoneID, crn, err := convertTftoCisTwoVar(d.Id()) + log.Println("resource exist :", d.Id()) + if err != nil { + return false, err + } + log.Println("resource exist :", d.Id()) + cisClient.Crn = core.StringPtr(crn) + opt := cisClient.NewGetZoneOptions(zoneID) + _, resp, err := cisClient.GetZone(opt) + if err != nil { + if resp != nil && resp.StatusCode == 404 { + log.Printf("[WARN] zone is not found") + return false, nil + } + log.Printf("[WARN] Error getting zone %v\n", resp) + return false, err + } + return true, nil +} + +func resourceCISdomainUpdate(d *schema.ResourceData, meta interface{}) error { + return resourceCISdomainRead(d, meta) +} + +func resourceCISdomainDelete(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisZonesV1ClientSession() + if err != nil { + return err + } + + zoneID, crn, err := convertTftoCisTwoVar(d.Id()) + log.Println("resource delete :", d.Id()) + + if err != nil { + return err + } + cisClient.Crn = core.StringPtr(crn) + opt := cisClient.NewGetZoneOptions(zoneID) + _, resp, err := cisClient.GetZone(opt) + if err != nil { + log.Printf("[WARN] Error getting zone %v\n", resp) + return err + } + delOpt := cisClient.NewDeleteZoneOptions(zoneID) + _, resp, err = cisClient.DeleteZone(delOpt) + if err != nil { + log.Printf("[ERR] Error deleting zone %v\n", resp) + return err + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_domain_settings.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_domain_settings.go new file mode 100644 index 00000000000..13e965d6503 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_domain_settings.go @@ -0,0 +1,1257 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "log" + + "github.com/IBM/go-sdk-core/v4/core" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + ibmCISDomainSettings = "ibm_cis_domain_settings" + cisDomainSettingsDNSSEC = "dnssec" + cisDomainSettingsWAF = "waf" + cisDomainSettingsSSL = "ssl" + cisDomainSettingsCertificateStatus = "certificate_status" + cisDomainSettingsMinTLSVersion = "min_tls_version" + cisDomainSettingsCNAMEFlattening = "cname_flattening" + cisDomainSettingsOpportunisticEncryption = "opportunistic_encryption" + cisDomainSettingsAutomaticHTPSRewrites = "automatic_https_rewrites" + cisDomainSettingsAlwaysUseHTTPS = "always_use_https" + cisDomainSettingsIPv6 = "ipv6" + cisDomainSettingsBrowserCheck = "browser_check" + cisDomainSettingsHotlinkProtection = "hotlink_protection" + cisDomainSettingsHTTP2 = "http2" + cisDomainSettingsImageLoadOptimization = "image_load_optimization" + cisDomainSettingsImageSizeOptimization = "image_size_optimization" + cisDomainSettingsIPGeoLocation = "ip_geolocation" + cisDomainSettingsOriginErrorPagePassThru = "origin_error_page_pass_thru" + cisDomainSettingsBrotli = "brotli" + cisDomainSettingsPseudoIPv4 = "pseudo_ipv4" + cisDomainSettingsPrefetchPreload = "prefetch_preload" + cisDomainSettingsResponseBuffering = "response_buffering" + cisDomainSettingsScriptLoadOptimisation = "script_load_optimization" + cisDomainSettingsServerSideExclude = "server_side_exclude" + cisDomainSettingsTLSClientAuth = "tls_client_auth" + cisDomainSettingsTrueClientIPHeader = "true_client_ip_header" + cisDomainSettingsWebSockets = "websockets" + cisDomainSettingsChallengeTTL = "challenge_ttl" + cisDomainSettingsMinify = "minify" + cisDomainSettingsMinifyCSS = "css" + cisDomainSettingsMinifyHTML = "html" + cisDomainSettingsMinifyJS = "js" + cisDomainSettingsSecurityHeader = "security_header" + cisDomainSettingsSecurityHeaderEnabled = "enabled" + cisDomainSettingsSecurityHeaderMaxAge = "max_age" + cisDomainSettingsSecurityHeaderIncludeSubdomains = "include_subdomains" + cisDomainSettingsSecurityHeaderNoSniff = "nosniff" + cisDomainSettingsMobileRedirect = "mobile_redirect" + cisDomainSettingsMobileRedirectStatus = "status" + cisDomainSettingsMobileRedirectMobileSubdomain = "mobile_subdomain" + cisDomainSettingsMobileRedirectStripURI = "strip_uri" + cisDomainSettingsMaxUpload = "max_upload" + cisDomainSettingsCipher = "cipher" + cisDomainSettingsONOFFValidatorID = "on_off" + cisDomainSettingsActiveDisableValidatorID = "active_disable" + cisDomainSettingsSSLSettingValidatorID = "ssl_setting" + cisDomainSettingsTLSVersionValidatorID = "tls_version" + cisDomainSettingsCNAMEFlattenValidatorID = "cname_flatten" + cisDomainSettingsImgSizeOptimizeValidatorID = "img_size_optimize" + cisDomainSettingsPseudoIPv4ValidatorID = "psuedo_ipv4" + cisDomainSettingsChallengeTTLValidatorID = "challenge_ttl" + cisDomainSettingsMaxUploadValidatorID = "max_upload" + cisDomainSettingsCipherValidatorID = "cipher" +) + +func resourceIBMCISSettings() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + cisID: { + Type: schema.TypeString, + Description: "CIS instance crn", + Required: true, + }, + cisDomainID: { + Type: schema.TypeString, + Description: "Associated CIS domain", + Required: true, + DiffSuppressFunc: suppressDomainIDDiff, + }, + cisDomainSettingsDNSSEC: { + Type: schema.TypeString, + Description: "DNS Sec setting", + Optional: true, + Computed: true, + ValidateFunc: InvokeValidator( + ibmCISDomainSettings, + cisDomainSettingsActiveDisableValidatorID), + }, + cisDomainSettingsWAF: { + Type: schema.TypeString, + Description: "WAF setting", + Optional: true, + Computed: true, + ValidateFunc: InvokeValidator( + ibmCISDomainSettings, + cisDomainSettingsONOFFValidatorID), + }, + cisDomainSettingsSSL: { + Type: schema.TypeString, + Description: "SSL/TLS setting", + Optional: true, + Computed: true, + ValidateFunc: InvokeValidator( + ibmCISDomainSettings, + cisDomainSettingsSSLSettingValidatorID), + }, + cisDomainSettingsCertificateStatus: { + Type: schema.TypeString, + Description: "Certificate status", + Computed: true, + Deprecated: "This field is deprecated", + }, + cisDomainSettingsMinTLSVersion: { + Type: schema.TypeString, + Description: "Minimum version of TLS required", + Optional: true, + ValidateFunc: InvokeValidator( + ibmCISDomainSettings, + cisDomainSettingsTLSVersionValidatorID), + Default: "1.1", + }, + cisDomainSettingsCNAMEFlattening: { + Type: schema.TypeString, + Description: "cname_flattening setting", + Optional: true, + Computed: true, + ValidateFunc: InvokeValidator( + ibmCISDomainSettings, + cisDomainSettingsCNAMEFlattenValidatorID), + }, + cisDomainSettingsOpportunisticEncryption: { + Type: schema.TypeString, + Description: "opportunistic_encryption setting", + Optional: true, + Computed: true, + ValidateFunc: InvokeValidator( + ibmCISDomainSettings, + cisDomainSettingsONOFFValidatorID), + }, + cisDomainSettingsAutomaticHTPSRewrites: { + Type: schema.TypeString, + Description: "automatic_https_rewrites setting", + Optional: true, + Computed: true, + ValidateFunc: InvokeValidator( + ibmCISDomainSettings, + cisDomainSettingsONOFFValidatorID), + }, + cisDomainSettingsAlwaysUseHTTPS: { + Type: schema.TypeString, + Description: "always_use_https setting", + Optional: true, + Computed: true, + ValidateFunc: InvokeValidator( + ibmCISDomainSettings, + cisDomainSettingsONOFFValidatorID), + }, + cisDomainSettingsIPv6: { + Type: schema.TypeString, + Description: "ipv6 setting", + Optional: true, + Computed: true, + ValidateFunc: InvokeValidator( + ibmCISDomainSettings, + cisDomainSettingsONOFFValidatorID), + }, + cisDomainSettingsBrowserCheck: { + Type: schema.TypeString, + Description: "browser_check setting", + Optional: true, + Computed: true, + ValidateFunc: InvokeValidator( + ibmCISDomainSettings, + cisDomainSettingsONOFFValidatorID), + }, + cisDomainSettingsHotlinkProtection: { + Type: schema.TypeString, + Description: "hotlink_protection setting", + Optional: true, + Computed: true, + ValidateFunc: InvokeValidator( + ibmCISDomainSettings, + cisDomainSettingsONOFFValidatorID), + }, + cisDomainSettingsHTTP2: { + Type: schema.TypeString, + Description: "http2 setting", + Optional: true, + Computed: true, + ValidateFunc: InvokeValidator( + ibmCISDomainSettings, + cisDomainSettingsONOFFValidatorID), + }, + cisDomainSettingsImageLoadOptimization: { + Type: schema.TypeString, + Description: "image_load_optimization setting", + Optional: true, + Computed: true, + ValidateFunc: InvokeValidator( + ibmCISDomainSettings, + cisDomainSettingsONOFFValidatorID), + }, + cisDomainSettingsImageSizeOptimization: { + Type: schema.TypeString, + Description: "image_size_optimization setting", + Optional: true, + Computed: true, + ValidateFunc: InvokeValidator( + ibmCISDomainSettings, + cisDomainSettingsImgSizeOptimizeValidatorID), + }, + cisDomainSettingsIPGeoLocation: { + Type: schema.TypeString, + Description: "ip_geolocation setting", + Optional: true, + Computed: true, + ValidateFunc: InvokeValidator( + ibmCISDomainSettings, + cisDomainSettingsONOFFValidatorID), + }, + cisDomainSettingsOriginErrorPagePassThru: { + Type: schema.TypeString, + Description: "origin_error_page_pass_thru setting", + Optional: true, + Computed: true, + ValidateFunc: InvokeValidator( + ibmCISDomainSettings, + cisDomainSettingsONOFFValidatorID), + }, + cisDomainSettingsBrotli: { + Type: schema.TypeString, + Description: "brotli setting", + Optional: true, + Computed: true, + ValidateFunc: InvokeValidator( + ibmCISDomainSettings, + cisDomainSettingsONOFFValidatorID), + }, + cisDomainSettingsPseudoIPv4: { + Type: schema.TypeString, + Description: "pseudo_ipv4 setting", + Optional: true, + Computed: true, + ValidateFunc: InvokeValidator( + ibmCISDomainSettings, + cisDomainSettingsPseudoIPv4ValidatorID), + }, + cisDomainSettingsPrefetchPreload: { + Type: schema.TypeString, + Description: "prefetch_preload setting", + Optional: true, + Computed: true, + ValidateFunc: InvokeValidator( + ibmCISDomainSettings, + cisDomainSettingsONOFFValidatorID), + }, + cisDomainSettingsResponseBuffering: { + Type: schema.TypeString, + Description: "response_buffering setting", + Optional: true, + Computed: true, + ValidateFunc: InvokeValidator( + ibmCISDomainSettings, + cisDomainSettingsONOFFValidatorID), + }, + cisDomainSettingsScriptLoadOptimisation: { + Type: schema.TypeString, + Description: "script_load_optimization setting", + Optional: true, + Computed: true, + ValidateFunc: InvokeValidator( + ibmCISDomainSettings, + cisDomainSettingsONOFFValidatorID), + }, + cisDomainSettingsServerSideExclude: { + Type: schema.TypeString, + Description: "server_side_exclude setting", + Optional: true, + Computed: true, + ValidateFunc: InvokeValidator( + ibmCISDomainSettings, + cisDomainSettingsONOFFValidatorID), + }, + cisDomainSettingsTLSClientAuth: { + Type: schema.TypeString, + Description: "tls_client_auth setting", + Optional: true, + Computed: true, + ValidateFunc: InvokeValidator( + ibmCISDomainSettings, + cisDomainSettingsONOFFValidatorID), + }, + cisDomainSettingsTrueClientIPHeader: { + Type: schema.TypeString, + Description: "true_client_ip_header setting", + Optional: true, + Computed: true, + ValidateFunc: InvokeValidator( + ibmCISDomainSettings, + cisDomainSettingsONOFFValidatorID), + }, + cisDomainSettingsWebSockets: { + Type: schema.TypeString, + Description: "websockets setting", + Optional: true, + Computed: true, + ValidateFunc: InvokeValidator( + ibmCISDomainSettings, + cisDomainSettingsONOFFValidatorID), + }, + cisDomainSettingsChallengeTTL: { + Type: schema.TypeInt, + Description: "Challenge TTL setting", + Optional: true, + Computed: true, + ValidateFunc: InvokeValidator( + ibmCISDomainSettings, + cisDomainSettingsChallengeTTLValidatorID), + }, + cisDomainSettingsMaxUpload: { + Type: schema.TypeInt, + Description: "Maximum upload", + Optional: true, + Computed: true, + ValidateFunc: InvokeValidator( + ibmCISDomainSettings, + cisDomainSettingsMaxUploadValidatorID), + }, + cisDomainSettingsCipher: { + Type: schema.TypeSet, + Description: "Cipher settings", + Optional: true, + Computed: true, + Set: schema.HashString, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: InvokeValidator( + ibmCISDomainSettings, + cisDomainSettingsCipherValidatorID), + }, + }, + cisDomainSettingsMinify: { + Type: schema.TypeList, + Description: "Minify setting", + Optional: true, + Computed: true, + MinItems: 1, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + cisDomainSettingsMinifyCSS: { + Type: schema.TypeString, + Description: "Minify CSS setting", + Required: true, + ValidateFunc: InvokeValidator( + ibmCISDomainSettings, + cisDomainSettingsONOFFValidatorID), + }, + cisDomainSettingsMinifyHTML: { + Type: schema.TypeString, + Description: "Minify HTML setting", + Required: true, + ValidateFunc: InvokeValidator( + ibmCISDomainSettings, + cisDomainSettingsONOFFValidatorID), + }, + cisDomainSettingsMinifyJS: { + Type: schema.TypeString, + Description: "Minify JS setting", + Required: true, + ValidateFunc: InvokeValidator( + ibmCISDomainSettings, + cisDomainSettingsONOFFValidatorID), + }, + }, + }, + }, + cisDomainSettingsSecurityHeader: { + Type: schema.TypeList, + Description: "Security Header Setting", + Optional: true, + Computed: true, + MinItems: 1, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + cisDomainSettingsSecurityHeaderEnabled: { + Type: schema.TypeBool, + Description: "security header enabled/disabled", + Required: true, + }, + cisDomainSettingsSecurityHeaderIncludeSubdomains: { + Type: schema.TypeBool, + Description: "security header subdomain included or not", + Required: true, + }, + cisDomainSettingsSecurityHeaderMaxAge: { + Type: schema.TypeInt, + Description: "security header max age", + Required: true, + }, + cisDomainSettingsSecurityHeaderNoSniff: { + Type: schema.TypeBool, + Description: "security header no sniff", + Required: true, + }, + }, + }, + }, + cisDomainSettingsMobileRedirect: { + Type: schema.TypeList, + Optional: true, + Computed: true, + MinItems: 1, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + cisDomainSettingsMobileRedirectStatus: { + Type: schema.TypeString, + Description: "mobile redirect status", + Required: true, + ValidateFunc: InvokeValidator( + ibmCISDomainSettings, + cisDomainSettingsONOFFValidatorID), + }, + cisDomainSettingsMobileRedirectMobileSubdomain: { + Type: schema.TypeString, + Description: "Mobile redirect subdomain", + Optional: true, + Computed: true, + }, + cisDomainSettingsMobileRedirectStripURI: { + Type: schema.TypeBool, + Description: "mobile redirect strip URI", + Optional: true, + Computed: true, + }, + }, + }, + }, + }, + + Create: resourceCISSettingsUpdate, + Read: resourceCISSettingsRead, + Update: resourceCISSettingsUpdate, + Delete: resourceCISSettingsDelete, + Importer: &schema.ResourceImporter{}, + } +} + +func resourceIBMCISDomainSettingValidator() *ResourceValidator { + + sslSetting := "off, flexible, full, strict, origin_pull" + tlsVersion := "1.1, 1.2, 1.3, 1.4" + cnameFlatten := "flatten_at_root, flatten_all, flatten_none" + imgSizeOptimize := "lossless, off, lossy" + pseudoIPv4 := "overwrite_header, off, add_header" + challengeTTL := "300, 900, 1800, 2700, 3600, 7200, 10800, 14400, 28800, 57600, 86400, 604800, 2592000, 31536000" + maxUpload := "100, 125, 150, 175, 200, 225, 250, 275, 300, 325, 350, 375, 400, 425, 450, 475, 500" + cipher := "ECDHE-ECDSA-AES128-GCM-SHA256,ECDHE-ECDSA-CHACHA20-POLY1305, ECDHE-RSA-AES128-GCM-SHA256,ECDHE-RSA-CHACHA20-POLY1305, ECDHE-ECDSA-AES128-SHA256, ECDHE-ECDSA-AES128-SHA, ECDHE-RSA-AES128-SHA256, ECDHE-RSA-AES128-SHA, AES128-GCM-SHA256, AES128-SHA256, AES128-SHA, ECDHE-ECDSA-AES256-GCM-SHA384, ECDHE-ECDSA-AES256-SHA384, ECDHE-RSA-AES256-GCM-SHA384, ECDHE-RSA-AES256-SHA384, ECDHE-RSA-AES256-SHA, AES256-GCM-SHA384, AES256-SHA256, AES256-SHA, DES-CBC3-SHA" + + validateSchema := make([]ValidateSchema, 1) + + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: cisDomainSettingsONOFFValidatorID, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: "on, off"}) + + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: cisDomainSettingsActiveDisableValidatorID, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: "active, disabled"}) + + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: cisDomainSettingsSSLSettingValidatorID, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: sslSetting}) + + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: cisDomainSettingsTLSVersionValidatorID, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: tlsVersion}) + + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: cisDomainSettingsCNAMEFlattenValidatorID, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: cnameFlatten}) + + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: cisDomainSettingsImgSizeOptimizeValidatorID, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: imgSizeOptimize}) + + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: cisDomainSettingsPseudoIPv4ValidatorID, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: pseudoIPv4}) + + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: cisDomainSettingsChallengeTTLValidatorID, + ValidateFunctionIdentifier: ValidateAllowedIntValue, + Type: TypeInt, + Optional: true, + AllowedValues: challengeTTL}) + + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: cisDomainSettingsMaxUploadValidatorID, + ValidateFunctionIdentifier: ValidateAllowedIntValue, + Type: TypeInt, + Optional: true, + AllowedValues: maxUpload}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: cisDomainSettingsCipherValidatorID, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: cipher}) + ibmCISDomainSettingResourceValidator := ResourceValidator{ + ResourceName: ibmCISDomainSettings, + Schema: validateSchema} + return &ibmCISDomainSettingResourceValidator +} + +var settingsList = []string{ + cisDomainSettingsDNSSEC, + cisDomainSettingsWAF, + cisDomainSettingsSSL, + cisDomainSettingsMinTLSVersion, + cisDomainSettingsCNAMEFlattening, + cisDomainSettingsOpportunisticEncryption, + cisDomainSettingsAutomaticHTPSRewrites, + cisDomainSettingsAlwaysUseHTTPS, + cisDomainSettingsIPv6, + cisDomainSettingsBrowserCheck, + cisDomainSettingsHotlinkProtection, + cisDomainSettingsHTTP2, + cisDomainSettingsImageLoadOptimization, + cisDomainSettingsImageSizeOptimization, + cisDomainSettingsIPGeoLocation, + cisDomainSettingsOriginErrorPagePassThru, + cisDomainSettingsBrotli, + cisDomainSettingsPseudoIPv4, + cisDomainSettingsPrefetchPreload, + cisDomainSettingsResponseBuffering, + cisDomainSettingsScriptLoadOptimisation, + cisDomainSettingsServerSideExclude, + cisDomainSettingsTLSClientAuth, + cisDomainSettingsTrueClientIPHeader, + cisDomainSettingsWebSockets, + cisDomainSettingsChallengeTTL, + cisDomainSettingsMinify, + cisDomainSettingsSecurityHeader, + cisDomainSettingsMobileRedirect, + cisDomainSettingsMaxUpload, + cisDomainSettingsCipher, +} + +func resourceCISSettingsUpdate(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisDomainSettingsClientSession() + if err != nil { + return err + } + + cisID := d.Get(cisID).(string) + zoneID, _, err := convertTftoCisTwoVar(d.Get(cisDomainID).(string)) + cisClient.Crn = core.StringPtr(cisID) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + + for _, item := range settingsList { + var err error + var resp *core.DetailedResponse + + switch item { + case cisDomainSettingsDNSSEC: + if d.HasChange(item) { + if v, ok := d.GetOk(item); ok { + opt := cisClient.NewUpdateZoneDnssecOptions() + opt.SetStatus(v.(string)) + _, resp, err = cisClient.UpdateZoneDnssec(opt) + } + } + case cisDomainSettingsWAF: + if d.HasChange(item) { + if v, ok := d.GetOk(item); ok { + opt := cisClient.NewUpdateWebApplicationFirewallOptions() + opt.SetValue(v.(string)) + _, resp, err = cisClient.UpdateWebApplicationFirewall(opt) + } + } + case cisDomainSettingsSSL: + if d.HasChange(item) { + if v, ok := d.GetOk(item); ok { + cisClient, err := meta.(ClientSession).CisSSLClientSession() + if err != nil { + return err + } + cisClient.Crn = core.StringPtr(cisID) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + opt := cisClient.NewChangeSslSettingOptions() + opt.SetValue(v.(string)) + _, resp, err = cisClient.ChangeSslSetting(opt) + } + } + + case cisDomainSettingsMinTLSVersion: + if d.HasChange(item) { + if v, ok := d.GetOk(item); ok { + opt := cisClient.NewUpdateMinTlsVersionOptions() + opt.SetValue(v.(string)) + _, resp, err = cisClient.UpdateMinTlsVersion(opt) + } + } + case cisDomainSettingsCNAMEFlattening: + if d.HasChange(item) { + if v, ok := d.GetOk(item); ok { + opt := cisClient.NewUpdateZoneCnameFlatteningOptions() + opt.SetValue(v.(string)) + _, resp, err = cisClient.UpdateZoneCnameFlattening(opt) + } + } + case cisDomainSettingsOpportunisticEncryption: + if d.HasChange(item) { + if v, ok := d.GetOk(item); ok { + opt := cisClient.NewUpdateOpportunisticEncryptionOptions() + opt.SetValue(v.(string)) + _, resp, err = cisClient.UpdateOpportunisticEncryption(opt) + } + } + case cisDomainSettingsAutomaticHTPSRewrites: + if d.HasChange(item) { + if v, ok := d.GetOk(item); ok { + opt := cisClient.NewUpdateAutomaticHttpsRewritesOptions() + opt.SetValue(v.(string)) + _, resp, err = cisClient.UpdateAutomaticHttpsRewrites(opt) + } + } + case cisDomainSettingsAlwaysUseHTTPS: + if d.HasChange(item) { + if v, ok := d.GetOk(item); ok { + opt := cisClient.NewUpdateAlwaysUseHttpsOptions() + opt.SetValue(v.(string)) + _, resp, err = cisClient.UpdateAlwaysUseHttps(opt) + } + } + case cisDomainSettingsIPv6: + if d.HasChange(item) { + if v, ok := d.GetOk(item); ok { + opt := cisClient.NewUpdateIpv6Options() + opt.SetValue(v.(string)) + _, resp, err = cisClient.UpdateIpv6(opt) + } + } + case cisDomainSettingsBrowserCheck: + if d.HasChange(item) { + if v, ok := d.GetOk(item); ok { + opt := cisClient.NewUpdateBrowserCheckOptions() + opt.SetValue(v.(string)) + _, resp, err = cisClient.UpdateBrowserCheck(opt) + } + } + case cisDomainSettingsHotlinkProtection: + if d.HasChange(item) { + if v, ok := d.GetOk(item); ok { + opt := cisClient.NewUpdateHotlinkProtectionOptions() + opt.SetValue(v.(string)) + _, resp, err = cisClient.UpdateHotlinkProtection(opt) + } + } + case cisDomainSettingsHTTP2: + if d.HasChange(item) { + if v, ok := d.GetOk(item); ok { + opt := cisClient.NewUpdateHttp2Options() + opt.SetValue(v.(string)) + _, resp, err = cisClient.UpdateHttp2(opt) + } + } + case cisDomainSettingsImageLoadOptimization: + if d.HasChange(item) { + if v, ok := d.GetOk(item); ok { + opt := cisClient.NewUpdateImageLoadOptimizationOptions() + opt.SetValue(v.(string)) + _, resp, err = cisClient.UpdateImageLoadOptimization(opt) + } + } + case cisDomainSettingsImageSizeOptimization: + if d.HasChange(item) { + if v, ok := d.GetOk(item); ok { + opt := cisClient.NewUpdateImageSizeOptimizationOptions() + opt.SetValue(v.(string)) + _, resp, err = cisClient.UpdateImageSizeOptimization(opt) + } + } + case cisDomainSettingsIPGeoLocation: + if d.HasChange(item) { + if v, ok := d.GetOk(item); ok { + opt := cisClient.NewUpdateIpGeolocationOptions() + opt.SetValue(v.(string)) + _, resp, err = cisClient.UpdateIpGeolocation(opt) + } + } + case cisDomainSettingsOriginErrorPagePassThru: + if d.HasChange(item) { + if v, ok := d.GetOk(item); ok { + opt := cisClient.NewUpdateEnableErrorPagesOnOptions() + opt.SetValue(v.(string)) + _, resp, err = cisClient.UpdateEnableErrorPagesOn(opt) + } + } + case cisDomainSettingsPseudoIPv4: + if d.HasChange(item) { + if v, ok := d.GetOk(item); ok { + opt := cisClient.NewUpdatePseudoIpv4Options() + opt.SetValue(v.(string)) + _, resp, err = cisClient.UpdatePseudoIpv4(opt) + } + } + case cisDomainSettingsPrefetchPreload: + if d.HasChange(item) { + if v, ok := d.GetOk(item); ok { + opt := cisClient.NewUpdatePrefetchPreloadOptions() + opt.SetValue(v.(string)) + _, resp, err = cisClient.UpdatePrefetchPreload(opt) + } + } + case cisDomainSettingsResponseBuffering: + if d.HasChange(item) { + if v, ok := d.GetOk(item); ok { + opt := cisClient.NewUpdateResponseBufferingOptions() + opt.SetValue(v.(string)) + _, resp, err = cisClient.UpdateResponseBuffering(opt) + } + } + case cisDomainSettingsScriptLoadOptimisation: + if d.HasChange(item) { + if v, ok := d.GetOk(item); ok { + opt := cisClient.NewUpdateScriptLoadOptimizationOptions() + opt.SetValue(v.(string)) + _, resp, err = cisClient.UpdateScriptLoadOptimization(opt) + } + } + case cisDomainSettingsServerSideExclude: + if d.HasChange(item) { + if v, ok := d.GetOk(item); ok { + opt := cisClient.NewUpdateServerSideExcludeOptions() + opt.SetValue(v.(string)) + _, resp, err = cisClient.UpdateServerSideExclude(opt) + } + } + case cisDomainSettingsTLSClientAuth: + if d.HasChange(item) { + if v, ok := d.GetOk(item); ok { + opt := cisClient.NewUpdateTlsClientAuthOptions() + opt.SetValue(v.(string)) + _, resp, err = cisClient.UpdateTlsClientAuth(opt) + } + } + case cisDomainSettingsTrueClientIPHeader: + if d.HasChange(item) { + if v, ok := d.GetOk(item); ok { + opt := cisClient.NewUpdateTrueClientIpOptions() + opt.SetValue(v.(string)) + _, resp, err = cisClient.UpdateTrueClientIp(opt) + } + } + case cisDomainSettingsWebSockets: + if d.HasChange(item) { + if v, ok := d.GetOk(item); ok { + opt := cisClient.NewUpdateWebSocketsOptions() + opt.SetValue(v.(string)) + _, resp, err = cisClient.UpdateWebSockets(opt) + } + } + case cisDomainSettingsChallengeTTL: + if d.HasChange(item) { + if v, ok := d.GetOk(item); ok { + opt := cisClient.NewUpdateChallengeTtlOptions() + opt.SetValue(int64(v.(int))) + _, resp, err = cisClient.UpdateChallengeTTL(opt) + } + } + case cisDomainSettingsMaxUpload: + if d.HasChange(item) { + if v, ok := d.GetOk(item); ok { + opt := cisClient.NewUpdateMaxUploadOptions() + opt.SetValue(int64(v.(int))) + _, resp, err = cisClient.UpdateMaxUpload(opt) + } + } + case cisDomainSettingsCipher: + if d.HasChange(item) { + if v, ok := d.GetOk(item); ok { + cipherValue := expandStringList(v.(*schema.Set).List()) + opt := cisClient.NewUpdateCiphersOptions() + opt.SetValue(cipherValue) + _, resp, err = cisClient.UpdateCiphers(opt) + } + } + case cisDomainSettingsMinify: + if d.HasChange(item) { + if v, ok := d.GetOk(item); ok { + dataMap := v.([]interface{})[0].(map[string]interface{}) + css := dataMap[cisDomainSettingsMinifyCSS].(string) + html := dataMap[cisDomainSettingsMinifyHTML].(string) + js := dataMap[cisDomainSettingsMinifyJS].(string) + minifyVal, err := cisClient.NewMinifySettingValue(css, html, js) + if err != nil { + log.Println("Invalid minfiy setting values") + return err + } + opt := cisClient.NewUpdateMinifyOptions() + opt.SetValue(minifyVal) + _, resp, err = cisClient.UpdateMinify(opt) + } + } + case cisDomainSettingsSecurityHeader: + if d.HasChange(item) { + if v, ok := d.GetOk(item); ok { + dataMap := v.([]interface{})[0].(map[string]interface{}) + enabled := dataMap[cisDomainSettingsSecurityHeaderEnabled].(bool) + nosniff := dataMap[cisDomainSettingsSecurityHeaderNoSniff].(bool) + includeSubdomain := dataMap[cisDomainSettingsSecurityHeaderIncludeSubdomains].(bool) + maxAge := int64(dataMap[cisDomainSettingsSecurityHeaderMaxAge].(int)) + securityVal, err := cisClient.NewSecurityHeaderSettingValueStrictTransportSecurity( + enabled, maxAge, includeSubdomain, nosniff) + if err != nil { + log.Println("Invalid security header setting values") + return err + } + securityOpt, err := cisClient.NewSecurityHeaderSettingValue(securityVal) + if err != nil { + log.Println("Invalid security header setting options") + return err + } + opt := cisClient.NewUpdateSecurityHeaderOptions() + opt.SetValue(securityOpt) + _, resp, err = cisClient.UpdateSecurityHeader(opt) + } + } + case cisDomainSettingsMobileRedirect: + if d.HasChange(item) { + if v, ok := d.GetOk(item); ok { + dataMap := v.([]interface{})[0].(map[string]interface{}) + status := dataMap[cisDomainSettingsMobileRedirectStatus].(string) + mobileSubdomain := dataMap[cisDomainSettingsMobileRedirectMobileSubdomain].(string) + stripURI := dataMap[cisDomainSettingsMobileRedirectStripURI].(bool) + mobileOpt, err := cisClient.NewMobileRedirecSettingValue(status, mobileSubdomain, stripURI) + if err != nil { + log.Println("Invalid mobile redirect options") + return err + } + opt := cisClient.NewUpdateMobileRedirectOptions() + opt.SetValue(mobileOpt) + _, resp, err = cisClient.UpdateMobileRedirect(opt) + } + } + } + if err != nil { + if resp != nil && resp.StatusCode == 405 { + log.Printf("[WARN] Update %s : %s", item, err) + continue + } + log.Printf("Update settings Failed on %s, %v\n", item, resp) + return err + } + } + d.SetId(convertCisToTfTwoVar(zoneID, cisID)) + return resourceCISSettingsRead(d, meta) +} + +func resourceCISSettingsRead(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisDomainSettingsClientSession() + if err != nil { + return err + } + + zoneID, crn, _ := convertTftoCisTwoVar(d.Id()) + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + + for _, item := range settingsList { + var settingErr error + var settingResponse *core.DetailedResponse + switch item { + case cisDomainSettingsDNSSEC: + opt := cisClient.NewGetZoneDnssecOptions() + result, resp, err := cisClient.GetZoneDnssec(opt) + if err == nil { + d.Set(cisDomainSettingsDNSSEC, result.Result.Status) + } + settingResponse = resp + settingErr = err + + case cisDomainSettingsWAF: + opt := cisClient.NewGetWebApplicationFirewallOptions() + result, resp, err := cisClient.GetWebApplicationFirewall(opt) + if err == nil { + d.Set(cisDomainSettingsWAF, result.Result.Value) + } + settingResponse = resp + settingErr = err + + case cisDomainSettingsSSL: + cisClient, err := meta.(ClientSession).CisSSLClientSession() + if err != nil { + return err + } + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + opt := cisClient.NewGetSslSettingOptions() + result, resp, err := cisClient.GetSslSetting(opt) + if err == nil { + d.Set(cisDomainSettingsSSL, result.Result.Value) + } + settingResponse = resp + settingErr = err + + case cisDomainSettingsBrotli: + cisClient, err := meta.(ClientSession).CisAPI() + if err != nil { + return err + } + settingsResult, err := cisClient.Settings().GetSetting(crn, zoneID, item) + if err == nil { + settingsObj := *settingsResult + d.Set(item, settingsObj.Value) + } + settingErr = err + + case cisDomainSettingsMinTLSVersion: + opt := cisClient.NewGetMinTlsVersionOptions() + result, resp, err := cisClient.GetMinTlsVersion(opt) + if err == nil { + d.Set(cisDomainSettingsMinTLSVersion, result.Result.Value) + } + settingResponse = resp + settingErr = err + + case cisDomainSettingsCNAMEFlattening: + opt := cisClient.NewGetZoneCnameFlatteningOptions() + result, resp, err := cisClient.GetZoneCnameFlattening(opt) + if err == nil { + d.Set(cisDomainSettingsCNAMEFlattening, result.Result.Value) + } + settingResponse = resp + settingErr = err + + case cisDomainSettingsOpportunisticEncryption: + opt := cisClient.NewGetOpportunisticEncryptionOptions() + result, resp, err := cisClient.GetOpportunisticEncryption(opt) + if err == nil { + d.Set(cisDomainSettingsOpportunisticEncryption, result.Result.Value) + } + settingResponse = resp + settingErr = err + + case cisDomainSettingsAutomaticHTPSRewrites: + opt := cisClient.NewGetAutomaticHttpsRewritesOptions() + result, resp, err := cisClient.GetAutomaticHttpsRewrites(opt) + if err == nil { + d.Set(cisDomainSettingsAutomaticHTPSRewrites, result.Result.Value) + } + settingResponse = resp + settingErr = err + + case cisDomainSettingsAlwaysUseHTTPS: + opt := cisClient.NewGetAlwaysUseHttpsOptions() + result, resp, err := cisClient.GetAlwaysUseHttps(opt) + if err == nil { + d.Set(cisDomainSettingsAlwaysUseHTTPS, result.Result.Value) + } + settingResponse = resp + settingErr = err + + case cisDomainSettingsIPv6: + opt := cisClient.NewGetIpv6Options() + result, resp, err := cisClient.GetIpv6(opt) + if err == nil { + d.Set(cisDomainSettingsIPv6, result.Result.Value) + } + settingResponse = resp + settingErr = err + + case cisDomainSettingsBrowserCheck: + opt := cisClient.NewGetBrowserCheckOptions() + result, resp, err := cisClient.GetBrowserCheck(opt) + if err == nil { + d.Set(cisDomainSettingsBrowserCheck, result.Result.Value) + } + settingResponse = resp + settingErr = err + + case cisDomainSettingsHotlinkProtection: + opt := cisClient.NewGetHotlinkProtectionOptions() + result, resp, err := cisClient.GetHotlinkProtection(opt) + if err == nil { + d.Set(cisDomainSettingsHotlinkProtection, result.Result.Value) + } + settingResponse = resp + settingErr = err + + case cisDomainSettingsHTTP2: + opt := cisClient.NewGetHttp2Options() + result, resp, err := cisClient.GetHttp2(opt) + if err == nil { + d.Set(cisDomainSettingsHTTP2, result.Result.Value) + } + settingResponse = resp + settingErr = err + + case cisDomainSettingsImageLoadOptimization: + opt := cisClient.NewGetImageLoadOptimizationOptions() + result, resp, err := cisClient.GetImageLoadOptimization(opt) + if err == nil { + d.Set(cisDomainSettingsImageLoadOptimization, result.Result.Value) + } + settingResponse = resp + settingErr = err + + case cisDomainSettingsImageSizeOptimization: + opt := cisClient.NewGetImageSizeOptimizationOptions() + result, resp, err := cisClient.GetImageSizeOptimization(opt) + if err == nil { + d.Set(cisDomainSettingsImageSizeOptimization, result.Result.Value) + } + settingResponse = resp + settingErr = err + + case cisDomainSettingsIPGeoLocation: + opt := cisClient.NewGetIpGeolocationOptions() + result, resp, err := cisClient.GetIpGeolocation(opt) + if err == nil { + d.Set(cisDomainSettingsIPGeoLocation, result.Result.Value) + } + settingResponse = resp + settingErr = err + + case cisDomainSettingsOriginErrorPagePassThru: + opt := cisClient.NewGetEnableErrorPagesOnOptions() + result, resp, err := cisClient.GetEnableErrorPagesOn(opt) + if err == nil { + d.Set(cisDomainSettingsOriginErrorPagePassThru, result.Result.Value) + } + settingResponse = resp + settingErr = err + + case cisDomainSettingsPseudoIPv4: + opt := cisClient.NewGetPseudoIpv4Options() + result, resp, err := cisClient.GetPseudoIpv4(opt) + if err == nil { + d.Set(cisDomainSettingsPseudoIPv4, result.Result.Value) + } + settingResponse = resp + settingErr = err + + case cisDomainSettingsPrefetchPreload: + opt := cisClient.NewGetPrefetchPreloadOptions() + result, resp, err := cisClient.GetPrefetchPreload(opt) + if err == nil { + d.Set(cisDomainSettingsPrefetchPreload, result.Result.Value) + } + settingResponse = resp + settingErr = err + + case cisDomainSettingsResponseBuffering: + opt := cisClient.NewGetResponseBufferingOptions() + result, resp, err := cisClient.GetResponseBuffering(opt) + if err == nil { + d.Set(cisDomainSettingsResponseBuffering, result.Result.Value) + } + settingResponse = resp + settingErr = err + + case cisDomainSettingsScriptLoadOptimisation: + opt := cisClient.NewGetScriptLoadOptimizationOptions() + result, resp, err := cisClient.GetScriptLoadOptimization(opt) + if err == nil { + d.Set(cisDomainSettingsScriptLoadOptimisation, result.Result.Value) + } + settingResponse = resp + settingErr = err + + case cisDomainSettingsServerSideExclude: + opt := cisClient.NewGetServerSideExcludeOptions() + result, resp, err := cisClient.GetServerSideExclude(opt) + if err == nil { + d.Set(cisDomainSettingsServerSideExclude, result.Result.Value) + } + settingResponse = resp + settingErr = err + + case cisDomainSettingsTLSClientAuth: + opt := cisClient.NewGetTlsClientAuthOptions() + result, resp, err := cisClient.GetTlsClientAuth(opt) + if err == nil { + d.Set(cisDomainSettingsTLSClientAuth, result.Result.Value) + } + settingResponse = resp + settingErr = err + + case cisDomainSettingsTrueClientIPHeader: + opt := cisClient.NewGetTrueClientIpOptions() + result, resp, err := cisClient.GetTrueClientIp(opt) + if err == nil { + d.Set(cisDomainSettingsTrueClientIPHeader, result.Result.Value) + } + settingResponse = resp + settingErr = err + + case cisDomainSettingsWebSockets: + opt := cisClient.NewGetWebSocketsOptions() + result, resp, err := cisClient.GetWebSockets(opt) + if err == nil { + d.Set(cisDomainSettingsWebSockets, result.Result.Value) + } + settingResponse = resp + settingErr = err + + case cisDomainSettingsChallengeTTL: + opt := cisClient.NewGetChallengeTtlOptions() + result, resp, err := cisClient.GetChallengeTTL(opt) + if err == nil { + d.Set(cisDomainSettingsChallengeTTL, result.Result.Value) + } + settingResponse = resp + settingErr = err + + case cisDomainSettingsMaxUpload: + opt := cisClient.NewGetMaxUploadOptions() + result, resp, err := cisClient.GetMaxUpload(opt) + if err == nil { + d.Set(cisDomainSettingsMaxUpload, result.Result.Value) + } + settingResponse = resp + settingErr = err + + case cisDomainSettingsCipher: + opt := cisClient.NewGetCiphersOptions() + result, resp, err := cisClient.GetCiphers(opt) + if err == nil { + d.Set(cisDomainSettingsCipher, result.Result.Value) + } + settingResponse = resp + settingErr = err + + case cisDomainSettingsMinify: + opt := cisClient.NewGetMinifyOptions() + result, resp, err := cisClient.GetMinify(opt) + if err == nil { + minify := result.Result.Value + value := map[string]string{ + cisDomainSettingsMinifyCSS: *minify.Css, + cisDomainSettingsMinifyHTML: *minify.HTML, + cisDomainSettingsMinifyJS: *minify.Js, + } + d.Set(cisDomainSettingsMinify, []interface{}{value}) + } + settingResponse = resp + settingErr = err + + case cisDomainSettingsSecurityHeader: + opt := cisClient.NewGetSecurityHeaderOptions() + result, resp, err := cisClient.GetSecurityHeader(opt) + if err == nil { + + if result.Result.Value != nil && result.Result.Value.StrictTransportSecurity != nil { + + securityHeader := result.Result.Value.StrictTransportSecurity + value := map[string]interface{}{} + if securityHeader.Enabled != nil { + value[cisDomainSettingsSecurityHeaderEnabled] = *securityHeader.Enabled + } + if securityHeader.Nosniff != nil { + value[cisDomainSettingsSecurityHeaderNoSniff] = *securityHeader.Nosniff + } + if securityHeader.IncludeSubdomains != nil { + value[cisDomainSettingsSecurityHeaderIncludeSubdomains] = *securityHeader.IncludeSubdomains + } + if securityHeader.MaxAge != nil { + value[cisDomainSettingsSecurityHeaderMaxAge] = *securityHeader.MaxAge + } + d.Set(cisDomainSettingsSecurityHeader, []interface{}{value}) + } + } + settingResponse = resp + settingErr = err + + case cisDomainSettingsMobileRedirect: + opt := cisClient.NewGetMobileRedirectOptions() + result, resp, err := cisClient.GetMobileRedirect(opt) + if err == nil { + if result.Result.Value != nil { + + value := result.Result.Value + + uri := map[string]interface{}{} + if value.MobileSubdomain != nil { + uri[cisDomainSettingsMobileRedirectMobileSubdomain] = *value.MobileSubdomain + } + if value.Status != nil { + uri[cisDomainSettingsMobileRedirectStatus] = *value.Status + } + if value.StripURI != nil { + uri[cisDomainSettingsMobileRedirectStripURI] = *value.StripURI + } + d.Set(cisDomainSettingsMobileRedirect, []interface{}{uri}) + } + } + settingResponse = resp + settingErr = err + } + + if settingErr != nil { + if settingResponse != nil && settingResponse.StatusCode == 405 { + log.Printf("[WARN] Get %s. : %s", item, settingErr) + continue + } + log.Printf("Get settings failed on %s, %v\n", item, settingErr) + return settingErr + } + } + d.Set(cisID, crn) + d.Set(cisDomainID, zoneID) + return nil +} + +func resourceCISSettingsDelete(d *schema.ResourceData, meta interface{}) error { + // Nothing to delete on CIS resource + d.SetId("") + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_edge_functions_action.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_edge_functions_action.go new file mode 100644 index 00000000000..cea892d19c4 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_edge_functions_action.go @@ -0,0 +1,166 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "io" + "io/ioutil" + "log" + "strings" + + "github.com/IBM/go-sdk-core/v4/core" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + cisEdgeFunctionsActionActionName = "action_name" + cisEdgeFunctionsActionScript = "script" +) + +func resourceIBMCISEdgeFunctionsAction() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMCISEdgeFunctionsActionCreate, + Read: resourceIBMCISEdgeFunctionsActionRead, + Update: resourceIBMCISEdgeFunctionsActionUpdate, + Delete: resourceIBMCISEdgeFunctionsActionDelete, + Exists: resourceIBMCISEdgeFunctionsActionExists, + Importer: &schema.ResourceImporter{}, + Schema: map[string]*schema.Schema{ + cisID: { + Type: schema.TypeString, + Required: true, + Description: "CIS Intance CRN", + }, + cisDomainID: { + Type: schema.TypeString, + Required: true, + Description: "CIS Domain ID", + DiffSuppressFunc: suppressDomainIDDiff, + }, + cisEdgeFunctionsActionActionName: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Edge function action script name", + }, + cisEdgeFunctionsActionScript: { + Type: schema.TypeString, + Required: true, + Description: "Edge function action script", + }, + }, + } +} + +func resourceIBMCISEdgeFunctionsActionCreate(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisEdgeFunctionClientSession() + if err != nil { + return err + } + + crn := d.Get(cisID).(string) + zoneID, _, err := convertTftoCisTwoVar(d.Get(cisDomainID).(string)) + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + + scriptName := d.Get(cisEdgeFunctionsActionActionName).(string) + script := d.Get(cisEdgeFunctionsActionScript).(string) + r := ioutil.NopCloser(strings.NewReader(script)) + opt := cisClient.NewUpdateEdgeFunctionsActionOptions(scriptName) + opt.SetEdgeFunctionsAction(r) + + _, _, err = cisClient.UpdateEdgeFunctionsAction(opt) + if err != nil { + return fmt.Errorf("Error: %v", err) + } + d.SetId(convertCisToTfThreeVar(scriptName, zoneID, crn)) + return resourceIBMCISEdgeFunctionsActionRead(d, meta) +} + +func resourceIBMCISEdgeFunctionsActionUpdate(d *schema.ResourceData, meta interface{}) error { + if d.HasChange(cisEdgeFunctionsActionScript) { + return resourceIBMCISEdgeFunctionsActionCreate(d, meta) + } + + return resourceIBMCISEdgeFunctionsActionRead(d, meta) +} + +func resourceIBMCISEdgeFunctionsActionRead(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisEdgeFunctionClientSession() + if err != nil { + return err + } + + scriptName, zoneID, crn, err := convertTfToCisThreeVar(d.Id()) + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + + opt := cisClient.NewGetEdgeFunctionsActionOptions(scriptName) + result, resp, err := cisClient.GetEdgeFunctionsAction(opt) + if err != nil { + return fmt.Errorf("Error: %v", resp) + } + + // read script content + content := []byte{} + p := make([]byte, 8) + for { + n, err := result.Read(p) + content = append(content, p[:n]...) + if err == io.EOF || n < 1 { + break + } + } + err = result.Close() + if err != nil { + return fmt.Errorf("Error in closing reader") + } + + d.Set(cisID, crn) + d.Set(cisDomainID, zoneID) + d.Set(cisEdgeFunctionsActionActionName, scriptName) + d.Set(cisEdgeFunctionsActionScript, string(content)) + return nil +} + +func resourceIBMCISEdgeFunctionsActionExists(d *schema.ResourceData, meta interface{}) (bool, error) { + cisClient, err := meta.(ClientSession).CisEdgeFunctionClientSession() + if err != nil { + return false, fmt.Errorf("Error in creating CIS object") + } + + scriptName, zoneID, crn, err := convertTfToCisThreeVar(d.Id()) + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + + opt := cisClient.NewGetEdgeFunctionsActionOptions(scriptName) + _, response, err := cisClient.GetEdgeFunctionsAction(opt) + if err != nil { + if response != nil && response.StatusCode == 404 { + log.Printf("Edge functions action script is not found") + return false, nil + } + return false, fmt.Errorf("Error: %v", response) + } + return true, nil +} + +func resourceIBMCISEdgeFunctionsActionDelete(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisEdgeFunctionClientSession() + if err != nil { + return fmt.Errorf("Error in creating CIS object") + } + + scriptName, zoneID, crn, err := convertTfToCisThreeVar(d.Id()) + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + + opt := cisClient.NewDeleteEdgeFunctionsActionOptions(scriptName) + _, response, err := cisClient.DeleteEdgeFunctionsAction(opt) + if err != nil { + return fmt.Errorf("Error in edge function action script deletion: %v", response) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_edge_functions_trigger.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_edge_functions_trigger.go new file mode 100644 index 00000000000..7f8e35a4e03 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_edge_functions_trigger.go @@ -0,0 +1,180 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + + "github.com/IBM/go-sdk-core/v4/core" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + cisEdgeFunctionsTriggerID = "trigger_id" + cisEdgeFunctionsTriggerPattern = "pattern_url" + cisEdgeFunctionsTriggerActionName = "action_name" + cisEdgeFunctionsTriggerRequestLimitFailOpen = "request_limit_fail_open" +) + +func resourceIBMCISEdgeFunctionsTrigger() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMCISEdgeFunctionsTriggerCreate, + Read: resourceIBMCISEdgeFunctionsTriggerRead, + Update: resourceIBMCISEdgeFunctionsTriggerUpdate, + Delete: resourceIBMCISEdgeFunctionsTriggerDelete, + Exists: resourceIBMCISEdgeFunctionsTriggerExists, + Importer: &schema.ResourceImporter{}, + Schema: map[string]*schema.Schema{ + cisID: { + Type: schema.TypeString, + Required: true, + Description: "CIS Intance CRN", + }, + cisDomainID: { + Type: schema.TypeString, + Required: true, + Description: "CIS Domain ID", + DiffSuppressFunc: suppressDataDiff, + }, + cisEdgeFunctionsTriggerID: { + Type: schema.TypeString, + Computed: true, + Description: "CIS Edge Functions trigger route ID", + }, + cisEdgeFunctionsTriggerPattern: { + Type: schema.TypeString, + Required: true, + Description: "Edge function trigger pattern", + }, + cisEdgeFunctionsTriggerActionName: { + Type: schema.TypeString, + Optional: true, + Description: "Edge function trigger action name", + }, + cisEdgeFunctionsTriggerRequestLimitFailOpen: { + Type: schema.TypeBool, + Computed: true, + Description: "Edge function trigger request limit fail open", + }, + }, + } +} + +func resourceIBMCISEdgeFunctionsTriggerCreate(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisEdgeFunctionClientSession() + if err != nil { + return err + } + + crn := d.Get(cisID).(string) + zoneID, _, err := convertTftoCisTwoVar(d.Get(cisDomainID).(string)) + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + opt := cisClient.NewCreateEdgeFunctionsTriggerOptions() + if action, ok := d.GetOk(cisEdgeFunctionsTriggerActionName); ok { + opt.SetScript(action.(string)) + } + pattern := d.Get(cisEdgeFunctionsTriggerPattern).(string) + opt.SetPattern(pattern) + + result, _, err := cisClient.CreateEdgeFunctionsTrigger(opt) + if err != nil { + return fmt.Errorf("Error creating edge function trigger route : %v", err) + } + d.SetId(convertCisToTfThreeVar(*result.Result.ID, zoneID, crn)) + return resourceIBMCISEdgeFunctionsTriggerRead(d, meta) +} + +func resourceIBMCISEdgeFunctionsTriggerUpdate(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisEdgeFunctionClientSession() + if err != nil { + return err + } + + routeID, zoneID, crn, err := convertTfToCisThreeVar(d.Id()) + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + + if d.HasChange(cisEdgeFunctionsTriggerActionName) || + d.HasChange(cisEdgeFunctionsTriggerPattern) { + opt := cisClient.NewUpdateEdgeFunctionsTriggerOptions(routeID) + + if action, ok := d.GetOk(cisEdgeFunctionsTriggerActionName); ok { + opt.SetScript(action.(string)) + } + pattern := d.Get(cisEdgeFunctionsTriggerPattern).(string) + opt.SetPattern(pattern) + + _, _, err := cisClient.UpdateEdgeFunctionsTrigger(opt) + if err != nil { + return fmt.Errorf("Error updating edge function trigger route : %v", err) + } + } + return resourceIBMCISEdgeFunctionsTriggerRead(d, meta) +} + +func resourceIBMCISEdgeFunctionsTriggerRead(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisEdgeFunctionClientSession() + if err != nil { + return err + } + + routeID, zoneID, crn, err := convertTfToCisThreeVar(d.Id()) + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + + opt := cisClient.NewGetEdgeFunctionsTriggerOptions(routeID) + result, resp, err := cisClient.GetEdgeFunctionsTrigger(opt) + if err != nil { + return fmt.Errorf("Error: %v", resp) + } + d.Set(cisID, crn) + d.Set(cisDomainID, zoneID) + d.Set(cisEdgeFunctionsTriggerID, routeID) + d.Set(cisEdgeFunctionsTriggerActionName, result.Result.Script) + d.Set(cisEdgeFunctionsTriggerPattern, result.Result.Pattern) + d.Set(cisEdgeFunctionsTriggerRequestLimitFailOpen, result.Result.RequestLimitFailOpen) + return nil +} + +func resourceIBMCISEdgeFunctionsTriggerExists(d *schema.ResourceData, meta interface{}) (bool, error) { + cisClient, err := meta.(ClientSession).CisEdgeFunctionClientSession() + if err != nil { + return false, err + } + + routeID, zoneID, crn, err := convertTfToCisThreeVar(d.Id()) + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + + opt := cisClient.NewGetEdgeFunctionsTriggerOptions(routeID) + _, response, err := cisClient.GetEdgeFunctionsTrigger(opt) + if err != nil { + if response != nil && response.StatusCode == 404 { + log.Printf("Edge functions trigger route is not found") + return false, nil + } + return false, fmt.Errorf("Error: %v", response) + } + return true, nil +} + +func resourceIBMCISEdgeFunctionsTriggerDelete(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisEdgeFunctionClientSession() + if err != nil { + return fmt.Errorf("Error in creating CIS object") + } + + routeID, zoneID, crn, err := convertTfToCisThreeVar(d.Id()) + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + + opt := cisClient.NewDeleteEdgeFunctionsTriggerOptions(routeID) + _, response, err := cisClient.DeleteEdgeFunctionsTrigger(opt) + if err != nil { + return fmt.Errorf("Error in edge function trigger route deletion: %v", response) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_firewall.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_firewall.go new file mode 100644 index 00000000000..8ea2f9572e3 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_firewall.go @@ -0,0 +1,817 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + + "github.com/IBM/go-sdk-core/v4/core" + cislockdownv1 "github.com/IBM/networking-go-sdk/zonelockdownv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + ibmCISFirewall = "ibm_cis_firewall" + cisFirewallType = "firewall_type" + cisFirewallTypeLockdowns = "lockdowns" + cisFirewallTypeAccessRules = "access_rules" + cisFirewallTypeUARules = "ua_rules" + cisFirewallLockdown = "lockdown" + cisFirewallLockdownID = "lockdown_id" + cisFirewallLockdownName = "name" + cisFirewallLockdownPaused = "paused" + cisFirewallLockdownDesc = "description" + cisFirewallLockdownPriority = "priority" + cisFirewallLockdownURLs = "urls" + cisFirewallLockdownConfigurations = "configurations" + cisFirewallLockdownConfigurationsTarget = "target" + cisFirewallLockdownConfigurationsTargetIP = "ip" + cisFirewallLockdownConfigurationsTargetIPRange = "ip_range" + cisFirewallLockdownConfigurationsValue = "value" + cisFirewallAccessRule = "access_rule" + cisFirewallAccessRuleID = "access_rule_id" + cisFirewallAccessRuleMode = "mode" + cisFirewallAccessRuleModeBlock = "block" + cisFirewallAccessRuleModeChallenge = "challenge" + cisFirewallAccessRuleModeWhitelist = "whitelist" + cisFirewallAccessRuleModeJSChallenge = "js_challenge" + cisFirewallAccessRuleNotes = "notes" + cisFirewallAccessRuleConfiguration = "configuration" + cisFirewallAccessRuleConfigurationTarget = "target" + cisFirewallAccessRuleConfigurationValue = "value" + cisFirewallUARule = "ua_rule" + cisFirewallUARuleID = "ua_rule_id" + cisFirewallUARulePaused = "paused" + cisFirewallUARuleDesc = "description" + cisFirewallUARuleMode = "mode" + cisFirewallUARuleModeBlock = "block" + cisFirewallUARuleModeChallenge = "challenge" + cisFirewallUARuleModeJSChallenge = "js_challenge" + cisFirewallUARuleConfiguration = "configuration" + cisFirewallUARuleConfigurationTarget = "target" + cisFirewallUARuleConfigurationTargetUA = "ua" + cisFirewallUARuleConfigurationValue = "value" +) + +func resourceIBMCISFirewallRecord() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMCISFirewallRecordCreate, + Read: resourceIBMCISFirewallRecordRead, + Update: resourceIBMCISFirewallRecordUpdate, + Delete: resourceIBMCISFirewallRecordDelete, + Exists: resourceIBMCISFirewallRecordExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + cisID: { + Type: schema.TypeString, + Description: "CIS object id", + Required: true, + ForceNew: true, + }, + cisDomainID: { + Type: schema.TypeString, + Description: "Associated CIS domain", + Required: true, + ForceNew: true, + DiffSuppressFunc: suppressDomainIDDiff, + }, + cisFirewallType: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Type of firewall.Allowable values are access-rules,ua-rules,lockdowns", + ValidateFunc: InvokeValidator(ibmCISFirewall, cisFirewallType), + }, + + cisFirewallLockdown: { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ExactlyOneOf: []string{ + cisFirewallLockdown, + cisFirewallAccessRule, + cisFirewallUARule}, + Description: "Lockdown Data", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + cisFirewallLockdownID: { + Type: schema.TypeString, + Computed: true, + Description: "firewall identifier", + }, + cisFirewallLockdownPaused: { + Type: schema.TypeBool, + Optional: true, + Description: "Firewall rule paused or enabled", + }, + cisFirewallLockdownDesc: { + Type: schema.TypeString, + Optional: true, + Description: "description", + }, + cisFirewallLockdownPriority: { + Type: schema.TypeInt, + Optional: true, + Description: "Firewall priority", + }, + cisFirewallLockdownURLs: { + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "URL in which firewall rule is applied", + }, + cisFirewallLockdownConfigurations: { + Type: schema.TypeList, + Required: true, + MinItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + cisFirewallLockdownConfigurationsTarget: { + Type: schema.TypeString, + Required: true, + Description: "Target type", + ValidateFunc: InvokeValidator( + ibmCISFirewall, + cisFirewallLockdownConfigurationsTarget), + }, + cisFirewallLockdownConfigurationsValue: { + Type: schema.TypeString, + Required: true, + Description: "Target value", + }, + }, + }, + }, + }, + }, + }, + cisFirewallAccessRule: { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ExactlyOneOf: []string{ + cisFirewallLockdown, + cisFirewallAccessRule, + cisFirewallUARule}, + Description: "Access Rule Data", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + cisFirewallAccessRuleID: { + Type: schema.TypeString, + Computed: true, + Description: "access rule firewall identifier", + }, + cisFirewallAccessRuleNotes: { + Type: schema.TypeString, + Optional: true, + Description: "description", + }, + cisFirewallAccessRuleMode: { + Type: schema.TypeString, + Required: true, + Description: "Access rule mode", + ValidateFunc: InvokeValidator(ibmCISFirewall, cisFirewallAccessRuleMode), + }, + cisFirewallAccessRuleConfiguration: { + Type: schema.TypeList, + Required: true, + MinItems: 1, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + cisFirewallUARuleConfigurationTarget: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Target type", + ValidateFunc: InvokeValidator(ibmCISFirewall, + cisFirewallAccessRuleConfigurationTarget), + }, + cisFirewallUARuleConfigurationValue: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Target value", + }, + }, + }, + }, + }, + }, + }, + cisFirewallUARule: { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ExactlyOneOf: []string{ + cisFirewallLockdown, + cisFirewallAccessRule, + cisFirewallUARule}, + Description: "User Agent Rule Data", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + cisFirewallUARuleID: { + Type: schema.TypeString, + Computed: true, + Description: "User Agent firewall identifier", + }, + cisFirewallUARulePaused: { + Type: schema.TypeBool, + Optional: true, + Description: "Rule whether paused or not", + }, + cisFirewallUARuleDesc: { + Type: schema.TypeString, + Optional: true, + Description: "description", + }, + cisFirewallUARuleMode: { + Type: schema.TypeString, + Required: true, + Description: "user agent rule mode", + ValidateFunc: InvokeValidator(ibmCISFirewall, cisFirewallUARuleMode), + }, + cisFirewallUARuleConfiguration: { + Type: schema.TypeList, + Required: true, + MinItems: 1, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + cisFirewallUARuleConfigurationTarget: { + Type: schema.TypeString, + Required: true, + Description: "Target type", + ValidateFunc: InvokeValidator(ibmCISFirewall, + cisFirewallUARuleConfigurationTarget), + }, + cisFirewallUARuleConfigurationValue: { + Type: schema.TypeString, + Required: true, + Description: "Target value", + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func resourceIBMCISFirewallValidator() *ResourceValidator { + firewallTypes := "access_rules, ua_rules, lockdowns" + validateSchema := make([]ValidateSchema, 1) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: cisFirewallType, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: firewallTypes}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: cisFirewallLockdownConfigurationsTarget, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: "ip, ip_range"}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: cisFirewallAccessRuleConfigurationTarget, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: "ip, ip_range, asn, country"}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: cisFirewallUARuleConfigurationTarget, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: "ua"}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: cisFirewallAccessRuleMode, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: "block, challenge, whitelist, js_challenge"}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: cisFirewallUARuleMode, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: "block, challenge, js_challenge"}) + cisFirewallValidator := ResourceValidator{ResourceName: ibmCISHealthCheck, Schema: validateSchema} + return &cisFirewallValidator +} + +func resourceIBMCISFirewallRecordCreate(d *schema.ResourceData, meta interface{}) error { + crn := d.Get(cisID).(string) + zoneID, _, _ := convertTftoCisTwoVar(d.Get(cisDomainID).(string)) + firewallType := d.Get(cisFirewallType).(string) + + if firewallType == cisFirewallTypeLockdowns { + // Firewall Type : Lockdowns + + cisClient, err := meta.(ClientSession).CisLockdownClientSession() + if err != nil { + return err + } + lockdown := d.Get(cisFirewallLockdown).([]interface{})[0].(map[string]interface{}) + + opt := cisClient.NewCreateZoneLockdownRuleOptions() + // not able to check bool variable availability + v, _ := lockdown[cisFirewallLockdownPaused] + opt.SetPaused(v.(bool)) + if v, ok := lockdown[cisFirewallLockdownDesc]; ok && v.(string) != "" { + opt.SetDescription(v.(string)) + } + if v, ok := lockdown[cisFirewallLockdownPriority]; ok && v.(int) > 0 { + opt.SetPriority(int64(v.(int))) + } + urls := expandStringList(lockdown[cisFirewallLockdownURLs].([]interface{})) + configurations, err := expandLockdownsTypeConfiguration( + lockdown[cisFirewallLockdownConfigurations].([]interface{})) + if err != nil { + return err + } + opt.SetUrls(urls) + opt.SetConfigurations(configurations) + + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + result, response, err := cisClient.CreateZoneLockdownRule(opt) + if err != nil { + log.Printf("Create zone firewall lockdown failed: %v", response) + return err + } + d.SetId(convertCisToTfFourVar(firewallType, *result.Result.ID, zoneID, crn)) + + } else if firewallType == cisFirewallTypeAccessRules { + + cisClient, err := meta.(ClientSession).CisAccessRuleClientSession() + if err != nil { + return err + } + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + accessRule := d.Get(cisFirewallAccessRule).([]interface{})[0].(map[string]interface{}) + + mode := accessRule[cisFirewallAccessRuleMode].(string) + + configList := accessRule[cisFirewallAccessRuleConfiguration].([]interface{}) + + config := configList[0].(map[string]interface{}) + target := config[cisFirewallAccessRuleConfigurationTarget].(string) + value := config[cisFirewallAccessRuleConfigurationValue].(string) + + configOpt, err := cisClient.NewZoneAccessRuleInputConfiguration(target, value) + if err != nil { + log.Printf("Error in firewall type %s input: %s", firewallType, err) + return err + } + + opt := cisClient.NewCreateZoneAccessRuleOptions() + opt.SetMode(mode) + opt.SetConfiguration(configOpt) + if v, ok := accessRule[cisFirewallAccessRuleNotes]; ok && v.(string) != "" { + opt.SetNotes(v.(string)) + } + + result, response, err := cisClient.CreateZoneAccessRule(opt) + if err != nil { + log.Printf("Create zone firewall access rule failed: %v", response) + return err + } + d.SetId(convertCisToTfFourVar(firewallType, *result.Result.ID, zoneID, crn)) + + } else if firewallType == cisFirewallTypeUARules { + cisClient, err := meta.(ClientSession).CisUARuleClientSession() + if err != nil { + return err + } + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + uaRule := d.Get(cisFirewallUARule).([]interface{})[0].(map[string]interface{}) + + mode := uaRule[cisFirewallUARuleMode].(string) + configList := uaRule[cisFirewallUARuleConfiguration].([]interface{}) + if len(configList) > 1 { + return fmt.Errorf("Only one configuration is allowed for %s type", firewallType) + } + config := configList[0].(map[string]interface{}) + target := config[cisFirewallLockdownConfigurationsTarget].(string) + value := config[cisFirewallLockdownConfigurationsValue].(string) + + configOpt, err := cisClient.NewUseragentRuleInputConfiguration(target, value) + if err != nil { + log.Printf("Error in firewall type %s input: %s", firewallType, err) + return err + } + + opt := cisClient.NewCreateZoneUserAgentRuleOptions() + opt.SetMode(mode) + opt.SetConfiguration(configOpt) + + if v, ok := uaRule[cisFirewallUARuleDesc]; ok && v.(string) != "" { + opt.SetDescription(v.(string)) + } + // not able to check bool attribute availablity + v, _ := uaRule[cisFirewallUARulePaused] + opt.SetPaused(v.(bool)) + + result, response, err := cisClient.CreateZoneUserAgentRule(opt) + if err != nil { + log.Printf("Create zone user agent rule failed: %v", response) + return err + } + d.SetId(convertCisToTfFourVar(firewallType, *result.Result.ID, zoneID, crn)) + } + + return resourceIBMCISFirewallRecordRead(d, meta) +} + +func resourceIBMCISFirewallRecordRead(d *schema.ResourceData, meta interface{}) error { + firewallType, lockdownID, zoneID, crn, _ := convertTfToCisFourVar(d.Id()) + + if firewallType == cisFirewallTypeLockdowns { + // Firewall Type : Lockdowns + cisClient, err := meta.(ClientSession).CisLockdownClientSession() + if err != nil { + return err + } + + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + + opt := cisClient.NewGetLockdownOptions(lockdownID) + + result, response, err := cisClient.GetLockdown(opt) + if err != nil { + log.Printf("Get zone firewall lockdown failed: %v", response) + return err + } + lockdownList := []interface{}{} + lockdown := map[string]interface{}{} + lockdown[cisFirewallLockdownID] = *result.Result.ID + lockdown[cisFirewallLockdownPaused] = *result.Result.Paused + lockdown[cisFirewallLockdownURLs] = flattenStringList(result.Result.Urls) + lockdown[cisFirewallLockdownConfigurations] = + flattenLockdownsTypeConfiguration(result.Result.Configurations) + if result.Result.Description != nil { + lockdown[cisFirewallLockdownDesc] = *result.Result.Description + } + if result.Result.Priority != nil { + lockdown[cisFirewallLockdownPriority] = *result.Result.Priority + } + lockdownList = append(lockdownList, lockdown) + d.Set(cisFirewallLockdown, lockdownList) + + } else if firewallType == cisFirewallTypeAccessRules { + + // Firewall Type : Zone Access firewall rules + cisClient, err := meta.(ClientSession).CisAccessRuleClientSession() + if err != nil { + return err + } + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + + opt := cisClient.NewGetZoneAccessRuleOptions(lockdownID) + + result, response, err := cisClient.GetZoneAccessRule(opt) + if err != nil { + log.Printf("Get zone firewall lockdown failed: %v", response) + return err + } + + config := map[string]interface{}{} + configList := []interface{}{} + config[cisFirewallUARuleConfigurationTarget] = *result.Result.Configuration.Target + config[cisFirewallUARuleConfigurationValue] = *result.Result.Configuration.Value + configList = append(configList, config) + + accessRuleList := []interface{}{} + accessRule := map[string]interface{}{} + accessRule[cisFirewallAccessRuleID] = *result.Result.ID + accessRule[cisFirewallAccessRuleNotes] = *result.Result.Notes + accessRule[cisFirewallAccessRuleMode] = *result.Result.Mode + accessRule[cisFirewallAccessRuleConfiguration] = configList + accessRuleList = append(accessRuleList, accessRule) + d.Set(cisFirewallAccessRule, accessRuleList) + + } else if firewallType == cisFirewallTypeUARules { + // Firewall Type: User Agent access rules + cisClient, err := meta.(ClientSession).CisUARuleClientSession() + if err != nil { + return err + } + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + + opt := cisClient.NewGetUserAgentRuleOptions(lockdownID) + result, response, err := cisClient.GetUserAgentRule(opt) + if err != nil { + log.Printf("Get zone user agent rule failed: %v", response) + return err + } + + config := map[string]interface{}{} + configList := []interface{}{} + config[cisFirewallUARuleConfigurationTarget] = *result.Result.Configuration.Target + config[cisFirewallUARuleConfigurationValue] = *result.Result.Configuration.Value + configList = append(configList, config) + + uaRuleList := []interface{}{} + uaRule := map[string]interface{}{} + uaRule[cisFirewallUARuleID] = *result.Result.ID + uaRule[cisFirewallUARulePaused] = *result.Result.Paused + uaRule[cisFirewallUARuleMode] = *result.Result.Mode + uaRule[cisFirewallUARuleConfiguration] = configList + if result.Result.Description != nil { + uaRule[cisFirewallUARuleDesc] = *result.Result.Description + } + uaRuleList = append(uaRuleList, uaRule) + d.Set(cisFirewallUARule, uaRuleList) + } + d.Set(cisID, crn) + d.Set(cisDomainID, zoneID) + d.Set(cisFirewallType, firewallType) + return nil +} + +func resourceIBMCISFirewallRecordUpdate(d *schema.ResourceData, meta interface{}) error { + + firewallType, lockdownID, zoneID, crn, _ := convertTfToCisFourVar(d.Id()) + + if d.HasChange(cisFirewallLockdown) || + d.HasChange(cisFirewallAccessRule) || + d.HasChange(cisFirewallUARule) { + + if firewallType == cisFirewallTypeLockdowns { + // Firewall Type : Lockdowns + lockdown := d.Get(cisFirewallLockdown).([]interface{})[0].(map[string]interface{}) + + cisClient, err := meta.(ClientSession).CisLockdownClientSession() + if err != nil { + return err + } + + opt := cisClient.NewUpdateLockdownRuleOptions(lockdownID) + // not able to check bool variable availability + v, _ := lockdown[cisFirewallLockdownPaused] + opt.SetPaused(v.(bool)) + if v, ok := lockdown[cisFirewallLockdownDesc]; ok && v.(string) != "" { + opt.SetDescription(v.(string)) + } + if v, ok := lockdown[cisFirewallLockdownPriority]; ok && v.(int) > 0 { + opt.SetPriority(int64(v.(int))) + } + urls := expandStringList(lockdown[cisFirewallLockdownURLs].([]interface{})) + configurations, err := expandLockdownsTypeConfiguration(lockdown[cisFirewallLockdownConfigurations].([]interface{})) + if err != nil { + return err + } + opt.SetUrls(urls) + opt.SetConfigurations(configurations) + + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + _, response, err := cisClient.UpdateLockdownRule(opt) + if err != nil { + log.Printf("Update zone firewall lockdown failed: %v", response) + return err + } + + } else if firewallType == cisFirewallTypeAccessRules { + + accessRule := d.Get(cisFirewallAccessRule).([]interface{})[0].(map[string]interface{}) + + // Firewall Type : Zone Access firewall rules + cisClient, err := meta.(ClientSession).CisAccessRuleClientSession() + if err != nil { + return err + } + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + + mode := accessRule[cisFirewallAccessRuleMode].(string) + opt := cisClient.NewUpdateZoneAccessRuleOptions(lockdownID) + if v, ok := accessRule[cisFirewallAccessRuleNotes]; ok && v.(string) != "" { + opt.SetNotes(v.(string)) + } + opt.SetMode(mode) + + _, response, err := cisClient.UpdateZoneAccessRule(opt) + if err != nil { + log.Printf("Update zone firewall access rule failed: %v", response) + return err + } + + } else if firewallType == cisFirewallTypeUARules { + // Firewall Type: User Agent access rules + uaRule := d.Get(cisFirewallUARule).([]interface{})[0].(map[string]interface{}) + cisClient, err := meta.(ClientSession).CisUARuleClientSession() + if err != nil { + return err + } + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + + mode := uaRule[cisFirewallUARuleMode].(string) + config := uaRule[cisFirewallUARuleConfiguration].([]interface{})[0].(map[string]interface{}) + target := config[cisFirewallUARuleConfigurationTarget].(string) + value := config[cisFirewallUARuleConfigurationValue].(string) + + configOpt, err := cisClient.NewUseragentRuleInputConfiguration(target, value) + if err != nil { + log.Printf("Error in firewall type %s input: %s", firewallType, err) + return err + } + + opt := cisClient.NewUpdateUserAgentRuleOptions(lockdownID) + opt.SetMode(mode) + opt.SetConfiguration(configOpt) + + if v, ok := uaRule[cisFirewallUARuleDesc]; ok && v.(string) != "" { + opt.SetDescription(v.(string)) + } + // not able to check bool attribute availablity + v, _ := uaRule[cisFirewallUARulePaused] + opt.SetPaused(v.(bool)) + + _, response, err := cisClient.UpdateUserAgentRule(opt) + if err != nil { + log.Printf("Update zone user agent rule failed: %v", response) + return err + } + } + + } + return resourceIBMCISFirewallRecordRead(d, meta) +} + +func resourceIBMCISFirewallRecordDelete(d *schema.ResourceData, meta interface{}) error { + firewallType, lockdownID, zoneID, crn, _ := convertTfToCisFourVar(d.Id()) + + if firewallType == cisFirewallTypeLockdowns { + // Firewall Type : Lockdowns + cisClient, err := meta.(ClientSession).CisLockdownClientSession() + if err != nil { + return err + } + + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + + opt := cisClient.NewDeleteZoneLockdownRuleOptions(lockdownID) + + _, response, err := cisClient.DeleteZoneLockdownRule(opt) + if err != nil { + log.Printf("Delete zone firewall lockdown failed: %v", response) + return err + } + + } else if firewallType == cisFirewallTypeAccessRules { + + // Firewall Type : Zone Access firewall rules + cisClient, err := meta.(ClientSession).CisAccessRuleClientSession() + if err != nil { + return err + } + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + + opt := cisClient.NewDeleteZoneAccessRuleOptions(lockdownID) + + _, response, err := cisClient.DeleteZoneAccessRule(opt) + if err != nil { + log.Printf("Delete zone firewall access rule failed: %v", response) + return err + } + + } else if firewallType == cisFirewallTypeUARules { + // Firewall Type: User Agent access rules + cisClient, err := meta.(ClientSession).CisUARuleClientSession() + if err != nil { + return err + } + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + + opt := cisClient.NewDeleteZoneUserAgentRuleOptions(lockdownID) + _, response, err := cisClient.DeleteZoneUserAgentRule(opt) + if err != nil { + log.Printf("Delete zone user agent rule failed: %v", response) + return err + } + } + + return nil +} +func resourceIBMCISFirewallRecordExists(d *schema.ResourceData, meta interface{}) (bool, error) { + firewallType, lockdownID, zoneID, crn, _ := convertTfToCisFourVar(d.Id()) + + if firewallType == cisFirewallTypeLockdowns { + // Firewall Type : Lockdowns + cisClient, err := meta.(ClientSession).CisLockdownClientSession() + if err != nil { + return false, err + } + + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + + opt := cisClient.NewGetLockdownOptions(lockdownID) + + _, response, err := cisClient.GetLockdown(opt) + if err != nil { + if response != nil && response.StatusCode == 404 { + log.Printf("Zone Firewall Lockdown is not found") + return false, nil + } + log.Printf("Get zone firewall lockdown failed: %v", response) + return false, err + } + + } else if firewallType == cisFirewallTypeAccessRules { + + // Firewall Type : Zone Access firewall rules + cisClient, err := meta.(ClientSession).CisAccessRuleClientSession() + if err != nil { + return false, err + } + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + + opt := cisClient.NewGetZoneAccessRuleOptions(lockdownID) + + _, response, err := cisClient.GetZoneAccessRule(opt) + if err != nil { + if response != nil && response.StatusCode == 404 { + log.Printf("Zone Firewall Access Rule is not found") + return false, nil + } + log.Printf("Get zone firewall lockdown failed: %v", response) + return false, err + } + + } else if firewallType == cisFirewallTypeUARules { + // Firewall Type: User Agent access rules + cisClient, err := meta.(ClientSession).CisUARuleClientSession() + if err != nil { + return false, err + } + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + + opt := cisClient.NewGetUserAgentRuleOptions(lockdownID) + _, response, err := cisClient.GetUserAgentRule(opt) + if err != nil { + if response != nil && response.StatusCode == 404 { + log.Printf("Zone Firewall User Agent Rule does not found") + return false, nil + } + log.Printf("Get zone user agent rule failed: %v", response) + return false, err + } + + } + + return true, nil +} + +func expandLockdownsTypeConfiguration(lockdownConfigs []interface{}) ([]cislockdownv1.LockdownInputConfigurationsItem, error) { + var configListOutput = make([]cislockdownv1.LockdownInputConfigurationsItem, 0) + + for _, lockdownConfig := range lockdownConfigs { + configMap, _ := lockdownConfig.(map[string]interface{}) + target := configMap[cisFirewallLockdownConfigurationsTarget].(string) + value := configMap[cisFirewallLockdownConfigurationsValue].(string) + configOutput := cislockdownv1.LockdownInputConfigurationsItem{ + Target: core.StringPtr(target), + Value: core.StringPtr(value), + } + configListOutput = append(configListOutput, configOutput) + } + return configListOutput, nil +} + +func flattenLockdownsTypeConfiguration(lockdownConfigs []cislockdownv1.LockdownObjectConfigurationsItem) interface{} { + configListOutput := []interface{}{} + + for _, lockdownConfig := range lockdownConfigs { + configOutput := map[string]string{} + configOutput[cisFirewallLockdownConfigurationsTarget] = *lockdownConfig.Target + configOutput[cisFirewallLockdownConfigurationsValue] = *lockdownConfig.Value + configListOutput = append(configListOutput, configOutput) + } + return configListOutput +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_global_load_balancer.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_global_load_balancer.go new file mode 100644 index 00000000000..a7c3816fcac --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_global_load_balancer.go @@ -0,0 +1,405 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + + "github.com/IBM/go-sdk-core/v4/core" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + cisGLBID = "glb_id" + cisGLBName = "name" + cisGLBFallbackPoolID = "fallback_pool_id" + cisGLBDefaultPoolIDs = "default_pool_ids" + cisGLBDesc = "description" + cisGLBProxied = "proxied" + cisGLBTTL = "ttl" + cisGLBSessionAffinity = "session_affinity" + cisGLBEnabled = "enabled" + cisGLBPopPools = "pop_pools" + cisGLBPopPoolsPop = "pop" + cisGLBPopPoolsPoolIDs = "pool_ids" + cisGLBRegionPools = "region_pools" + cisGLBRegionPoolsRegion = "region" + cisGLBRegionPoolsPoolIDs = "pool_ids" + cisGLBCreatedOn = "created_on" + cisGLBModifiedOn = "modified_on" +) + +func resourceIBMCISGlb() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + cisID: { + Type: schema.TypeString, + Description: "CIS instance crn", + Required: true, + }, + cisDomainID: { + Type: schema.TypeString, + Description: "Associated CIS domain", + Required: true, + DiffSuppressFunc: suppressDomainIDDiff, + }, + cisGLBID: { + Type: schema.TypeString, + Description: "global load balancer id", + Computed: true, + }, + cisGLBName: { + Type: schema.TypeString, + Description: "name", + Required: true, + }, + cisGLBFallbackPoolID: { + Type: schema.TypeString, + Description: "fallback pool ID", + Required: true, + }, + cisGLBDefaultPoolIDs: { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: "List of default Pool IDs", + }, + cisGLBDesc: { + Type: schema.TypeString, + Optional: true, + Description: "Description for the load balancer instance", + }, + cisGLBTTL: { + Type: schema.TypeInt, + Optional: true, + Default: 60, + ConflictsWith: []string{"proxied"}, + Description: "TTL value", // this is set to zero regardless of config when proxied=true + + }, + cisGLBProxied: { + Type: schema.TypeBool, + Optional: true, + Default: false, + ConflictsWith: []string{cisGLBTTL}, + Description: "set to true if proxy needs to be enabled", + }, + cisGLBSessionAffinity: { + Type: schema.TypeString, + Optional: true, + Default: "none", + // Set to cookie when proxy=true + ValidateFunc: validateAllowedStringValue([]string{"none", "cookie"}), + Description: "Session affinity info", + }, + cisGLBEnabled: { + Type: schema.TypeBool, + Optional: true, + Default: true, + Description: "set to true of LB needs to enabled", + }, + cisGLBPopPools: { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + cisGLBPopPoolsPop: { + Type: schema.TypeString, + Required: true, + Description: "pop pools region", + }, + + cisGLBPopPoolsPoolIDs: { + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + + cisGLBRegionPools: { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + cisGLBRegionPoolsRegion: { + Type: schema.TypeString, + Required: true, + }, + + cisGLBRegionPoolsPoolIDs: { + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + cisGLBCreatedOn: { + Type: schema.TypeString, + Computed: true, + Description: "Load balancer creation date", + }, + cisGLBModifiedOn: { + Type: schema.TypeString, + Computed: true, + Description: "Load balancer modified date", + }, + }, + + Create: resourceCISGlbCreate, + Read: resourceCISGlbRead, + Update: resourceCISGlbUpdate, + Delete: resourceCISGlbDelete, + Importer: &schema.ResourceImporter{}, + } +} + +func resourceCISGlbCreate(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisGLBClientSession() + if err != nil { + return err + } + + crn := d.Get(cisID).(string) + zoneID, _, err := convertTftoCisTwoVar(d.Get(cisDomainID).(string)) + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + + tfDefaultPoolIds := expandStringList(d.Get(cisGLBDefaultPoolIDs).(*schema.Set).List()) + defaultPoolIds, _, err := convertTfToCisTwoVarSlice(tfDefaultPoolIds) + fbPoolID := d.Get(cisGLBFallbackPoolID).(string) + fallbackPool, _, err := convertTftoCisTwoVar(fbPoolID) + + opt := cisClient.NewCreateLoadBalancerOptions() + opt.SetName(d.Get(cisGLBName).(string)) + opt.SetDefaultPools(defaultPoolIds) + opt.SetFallbackPool(fallbackPool) + opt.SetProxied(d.Get(cisGLBProxied).(bool)) + opt.SetSessionAffinity(d.Get(cisGLBSessionAffinity).(string)) + + if description, ok := d.GetOk(cisGLBDesc); ok { + opt.SetDescription(description.(string)) + } + if ttl, ok := d.GetOk(cisGLBTTL); ok { + opt.SetTTL(int64(ttl.(int))) + } + if regionPools, ok := d.GetOk(cisGLBRegionPools); ok { + expandedRegionPools, err := expandGeoPools(regionPools, cisGLBRegionPoolsRegion) + if err != nil { + return err + } + opt.SetRegionPools(expandedRegionPools) + } + if popPools, ok := d.GetOk(cisGLBPopPools); ok { + expandedPopPools, err := expandGeoPools(popPools, cisGLBPopPoolsPop) + if err != nil { + return err + } + opt.SetPopPools(expandedPopPools) + } + + result, resp, err := cisClient.CreateLoadBalancer(opt) + if err != nil { + log.Printf("Create GLB failed %s\n", resp) + return err + } + d.SetId(convertCisToTfThreeVar(*result.Result.ID, zoneID, crn)) + return resourceCISGlbUpdate(d, meta) +} + +func resourceCISGlbRead(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisGLBClientSession() + if err != nil { + return err + } + + // Extract CIS Ids from TF Id + glbID, zoneID, crn, err := convertTfToCisThreeVar(d.Id()) + if err != nil { + return err + } + + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + + opt := cisClient.NewGetLoadBalancerSettingsOptions(glbID) + + result, resp, err := cisClient.GetLoadBalancerSettings(opt) + if err != nil { + log.Printf("[WARN] GLB Read failed: %v\n", resp) + return err + } + glbObj := result.Result + d.Set(cisID, crn) + d.Set(cisDomainID, zoneID) + d.Set(cisGLBID, glbObj.ID) + d.Set(cisGLBName, glbObj.Name) + d.Set(cisGLBDefaultPoolIDs, convertCisToTfTwoVarSlice(glbObj.DefaultPools, crn)) + d.Set(cisGLBDesc, glbObj.Description) + d.Set(cisGLBFallbackPoolID, convertCisToTfTwoVar(*glbObj.FallbackPool, crn)) + d.Set(cisGLBTTL, glbObj.TTL) + d.Set(cisGLBProxied, glbObj.Proxied) + d.Set(cisGLBEnabled, glbObj.Enabled) + d.Set(cisGLBSessionAffinity, glbObj.SessionAffinity) + flattenPopPools := flattenPools( + glbObj.PopPools, cisGLBPopPoolsPop, crn) + d.Set(cisGLBPopPools, flattenPopPools) + flattenRegionPools := flattenPools( + glbObj.RegionPools, cisGLBRegionPoolsRegion, crn) + d.Set(cisGLBRegionPools, flattenRegionPools) + + return nil +} + +func resourceCISGlbUpdate(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisGLBClientSession() + if err != nil { + return err + } + // Extract CIS Ids from TF Id + glbID, zoneID, crn, err := convertTfToCisThreeVar(d.Id()) + if err != nil { + return err + } + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + + if d.HasChange(cisGLBName) || d.HasChange(cisGLBDefaultPoolIDs) || + d.HasChange(cisGLBFallbackPoolID) || d.HasChange(cisGLBProxied) || + d.HasChange(cisGLBSessionAffinity) || d.HasChange(cisGLBDesc) || + d.HasChange(cisGLBTTL) || d.HasChange(cisGLBEnabled) || + d.HasChange(cisGLBPopPools) || d.HasChange(cisGLBRegionPools) { + + tfDefaultPools := expandStringList(d.Get(cisGLBDefaultPoolIDs).(*schema.Set).List()) + defaultPoolIds, _, err := convertTfToCisTwoVarSlice(tfDefaultPools) + fbPoolID := d.Get(cisGLBFallbackPoolID).(string) + fallbackPool, _, _ := convertTftoCisTwoVar(fbPoolID) + + opt := cisClient.NewEditLoadBalancerOptions(glbID) + opt.SetName(d.Get(cisGLBName).(string)) + opt.SetProxied(d.Get(cisGLBProxied).(bool)) + opt.SetSessionAffinity(d.Get(cisGLBSessionAffinity).(string)) + opt.SetDefaultPools(defaultPoolIds) + opt.SetFallbackPool(fallbackPool) + if description, ok := d.GetOk(cisGLBDesc); ok { + opt.SetDescription(description.(string)) + } + if ttl, ok := d.GetOk(cisGLBTTL); ok { + opt.SetTTL(int64(ttl.(int))) + } + if enabled, ok := d.GetOk(cisGLBEnabled); ok { + opt.SetEnabled(enabled.(bool)) + } + if regionPools, ok := d.GetOk(cisGLBRegionPools); ok { + expandedRegionPools, err := expandGeoPools(regionPools, cisGLBRegionPoolsRegion) + if err != nil { + return err + } + opt.SetRegionPools(expandedRegionPools) + } + if popPools, ok := d.GetOk(cisGLBPopPools); ok { + expandedPopPools, err := expandGeoPools(popPools, cisGLBPopPoolsPop) + if err != nil { + return err + } + opt.SetPopPools(expandedPopPools) + } + + _, resp, err := cisClient.EditLoadBalancer(opt) + if err != nil { + log.Printf("[WARN] Error updating GLB %v\n", resp) + return err + } + } + + return resourceCISGlbRead(d, meta) +} + +func resourceCISGlbDelete(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisGLBClientSession() + if err != nil { + return err + } + // Extract CIS Ids from TF Id + glbID, zoneID, crn, err := convertTfToCisThreeVar(d.Id()) + if err != nil { + return err + } + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + opt := cisClient.NewDeleteLoadBalancerOptions(glbID) + + result, resp, err := cisClient.DeleteLoadBalancer(opt) + if err != nil { + log.Printf("[WARN] Error deleting GLB %v\n", resp) + return err + } + log.Printf("Deletion successful : %s", *result.Result.ID) + return nil +} + +func resourceCISGlbExists(d *schema.ResourceData, meta interface{}) (bool, error) { + cisClient, err := meta.(ClientSession).CisGLBClientSession() + if err != nil { + return false, err + } + // Extract CIS Ids from TF Id + glbID, zoneID, crn, err := convertTfToCisThreeVar(d.Id()) + if err != nil { + return false, err + } + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + opt := cisClient.NewGetLoadBalancerSettingsOptions(glbID) + + _, response, err := cisClient.GetLoadBalancerSettings(opt) + if err != nil { + if response != nil && response.StatusCode == 404 { + log.Printf("global load balancer does not exist.") + return false, nil + } + log.Printf("[WARN] Error getting GLB %v\n", response) + return false, err + } + return true, nil +} + +func expandGeoPools(pool interface{}, geoType string) (map[string][]string, error) { + pools := pool.(*schema.Set).List() + expandPool := make(map[string][]string) + for _, v := range pools { + locationConfig := v.(map[string]interface{}) + location := locationConfig[geoType].(string) + if _, p := expandPool[location]; !p { + geoPools := expandStringList(locationConfig[cisGLBRegionPoolsPoolIDs].([]interface{})) + expandPool[location], _, _ = convertTfToCisTwoVarSlice(geoPools) + } else { + return nil, fmt.Errorf("duplicate entry specified for %s pool in location %q. "+ + "each location must only be specified once", geoType, location) + } + } + return expandPool, nil +} + +func flattenPools(pools interface{}, geoType string, cisID string) []interface{} { + result := make([]interface{}, 0) + for k, v := range pools.(map[string]interface{}) { + poolIds := convertCisToTfTwoVarSlice(expandStringList(v.([]interface{})), cisID) + pool := map[string]interface{}{ + geoType: k, + cisGLBPopPoolsPoolIDs: poolIds, + } + result = append(result, pool) + } + return result +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_healthcheck.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_healthcheck.go new file mode 100644 index 00000000000..231177edec0 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_healthcheck.go @@ -0,0 +1,478 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "log" + + "github.com/IBM/go-sdk-core/v4/core" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + ibmCISHealthCheck = "ibm_cis_healthcheck" + cisGLBHealthCheckID = "monitor_id" + cisGLBHealthCheckPath = "path" + cisGLBHealthCheckPort = "port" + cisGLBHealthCheckExpectedBody = "expected_body" + cisGLBHealthCheckExpectedCodes = "expected_codes" + cisGLBHealthCheckDesc = "description" + cisGLBHealthCheckType = "type" + cisGLBHealthCheckMethod = "method" + cisGLBHealthCheckTimeout = "timeout" + cisGLBHealthCheckRetries = "retries" + cisGLBHealthCheckInterval = "interval" + cisGLBHealthCheckFollowRedirects = "follow_redirects" + cisGLBHealthCheckAllowInsecure = "allow_insecure" + cisGLBHealthCheckCreatedOn = "create_on" + cisGLBHealthCheckModifiedOn = "modified_on" + cisGLBHealthCheckHeaders = "headers" + cisGLBHealthCheckHeadersHeader = "header" + cisGLBHealthCheckHeadersValues = "values" +) + +func resourceIBMCISHealthCheck() *schema.Resource { + return &schema.Resource{ + + Create: resourceCISHealthCheckCreate, + Read: resourceCISHealthCheckRead, + Update: resourceCISHealthCheckUpdate, + Delete: resourceCISHealthCheckDelete, + Exists: resourceCISHealthCheckExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + cisID: { + Type: schema.TypeString, + Description: "CIS instance crn", + Required: true, + }, + cisGLBHealthCheckID: { + Type: schema.TypeString, + Computed: true, + Description: "GLB Monitor/Health check id", + }, + cisGLBHealthCheckPath: { + Type: schema.TypeString, + Description: "path", + Optional: true, + Default: "/", + ValidateFunc: validateURLPath, + }, + cisGLBHealthCheckExpectedBody: { + Type: schema.TypeString, + Description: "expected_body", + Optional: true, + }, + cisGLBHealthCheckExpectedCodes: { + Type: schema.TypeString, + Description: "expected_codes", + Optional: true, + }, + cisGLBHealthCheckDesc: { + Type: schema.TypeString, + Description: "description", + Default: " ", + Optional: true, + }, + cisGLBHealthCheckType: { + Type: schema.TypeString, + Description: "type", + Optional: true, + Default: "http", + ValidateFunc: InvokeValidator(ibmCISHealthCheck, cisGLBHealthCheckType), + }, + cisGLBHealthCheckMethod: { + Type: schema.TypeString, + Description: "method", + Optional: true, + Default: "GET", + ValidateFunc: InvokeValidator(ibmCISHealthCheck, cisGLBHealthCheckMethod), + }, + cisGLBHealthCheckTimeout: { + Type: schema.TypeInt, + Description: "timeout", + Optional: true, + Default: 5, + ValidateFunc: InvokeValidator(ibmCISHealthCheck, cisGLBHealthCheckTimeout), + }, + cisGLBHealthCheckRetries: { + Type: schema.TypeInt, + Description: "retries", + Optional: true, + Default: 2, + ValidateFunc: InvokeValidator(ibmCISHealthCheck, cisGLBHealthCheckRetries), + }, + cisGLBHealthCheckInterval: { + Type: schema.TypeInt, + Description: "interval", + Optional: true, + Default: 60, + ValidateFunc: InvokeValidator(ibmCISHealthCheck, cisGLBHealthCheckInterval), + }, + cisGLBHealthCheckFollowRedirects: { + Type: schema.TypeBool, + Description: "follow_redirects", + Default: false, + Optional: true, + }, + cisGLBHealthCheckAllowInsecure: { + Type: schema.TypeBool, + Description: "allow_insecure", + Optional: true, + Default: false, + }, + cisGLBHealthCheckCreatedOn: { + Type: schema.TypeString, + Computed: true, + }, + cisGLBHealthCheckModifiedOn: { + Type: schema.TypeString, + Computed: true, + }, + cisGLBHealthCheckPort: { + Type: schema.TypeInt, + Description: "port number", + Computed: true, + Optional: true, + ValidateFunc: InvokeValidator(ibmCISHealthCheck, cisGLBHealthCheckPort), + }, + cisGLBHealthCheckHeaders: { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + cisGLBHealthCheckHeadersHeader: { + Type: schema.TypeString, + Required: true, + }, + + cisGLBHealthCheckHeadersValues: { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + Set: hashByMapKey(cisGLBHealthCheckHeadersHeader), + }, + }, + } +} + +func resourceIBMCISHealthCheckValidator() *ResourceValidator { + healthCheckTypes := "http, https, tcp" + methods := "GET, HEAD" + + validateSchema := make([]ValidateSchema, 1) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: cisGLBHealthCheckType, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: healthCheckTypes}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: cisGLBHealthCheckMethod, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: methods}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: cisGLBHealthCheckTimeout, + ValidateFunctionIdentifier: IntBetween, + Type: TypeInt, + Required: true, + MinValue: "1", + MaxValue: "10"}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: cisGLBHealthCheckRetries, + ValidateFunctionIdentifier: IntBetween, + Type: TypeInt, + Required: true, + MinValue: "1", + MaxValue: "3"}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: cisGLBHealthCheckInterval, + ValidateFunctionIdentifier: IntBetween, + Type: TypeInt, + Required: true, + MinValue: "5", + MaxValue: "3600"}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: cisGLBHealthCheckPort, + ValidateFunctionIdentifier: IntBetween, + Type: TypeInt, + Required: true, + MinValue: "1", + MaxValue: "65535"}) + cisHealthCheckValidator := ResourceValidator{ResourceName: ibmCISHealthCheck, Schema: validateSchema} + return &cisHealthCheckValidator +} + +func resourceCISHealthCheckCreate(d *schema.ResourceData, meta interface{}) error { + sess, err := meta.(ClientSession).CisGLBHealthCheckClientSession() + if err != nil { + return err + } + + crn := d.Get(cisID).(string) + sess.Crn = core.StringPtr(crn) + + opt := sess.NewCreateLoadBalancerMonitorOptions() + + if monType, ok := d.GetOk(cisGLBHealthCheckType); ok { + opt.SetType(monType.(string)) + } + if expCodes, ok := d.GetOk(cisGLBHealthCheckExpectedCodes); ok { + opt.SetExpectedCodes(expCodes.(string)) + } + if expBody, ok := d.GetOk(cisGLBHealthCheckExpectedBody); ok { + opt.SetExpectedBody(expBody.(string)) + } + if monPath, ok := d.GetOk(cisGLBHealthCheckPath); ok { + opt.SetPath(monPath.(string)) + } + if description, ok := d.GetOk(cisGLBHealthCheckDesc); ok { + opt.SetDescription(description.(string)) + } + if method, ok := d.GetOk(cisGLBHealthCheckMethod); ok { + opt.SetMethod(method.(string)) + } + if timeout, ok := d.GetOk(cisGLBHealthCheckTimeout); ok { + opt.SetTimeout(int64(timeout.(int))) + } + if retries, ok := d.GetOk(cisGLBHealthCheckRetries); ok { + opt.SetRetries(int64(retries.(int))) + } + if interval, ok := d.GetOk(cisGLBHealthCheckInterval); ok { + opt.SetInterval(int64(interval.(int))) + } + if followRedirects, ok := d.GetOk(cisGLBHealthCheckFollowRedirects); ok { + opt.SetFollowRedirects(followRedirects.(bool)) + } + if allowInsecure, ok := d.GetOk(cisGLBHealthCheckAllowInsecure); ok { + opt.SetAllowInsecure(allowInsecure.(bool)) + } + if port, ok := d.GetOk(cisGLBHealthCheckPort); ok { + opt.SetPort(int64(port.(int))) + } + if header, ok := d.GetOk(cisGLBHealthCheckHeaders); ok { + opt.SetHeader(expandLoadBalancerMonitorHeader(header)) + } + + result, resp, err := sess.CreateLoadBalancerMonitor(opt) + if err != nil { + log.Printf("create global load balancer health check failed %s", resp) + return err + } + log.Printf("global load balancer created successfully : %s", *result.Result.ID) + d.SetId(convertCisToTfTwoVar(*result.Result.ID, crn)) + return resourceCISHealthCheckRead(d, meta) +} + +func resourceCISHealthCheckRead(d *schema.ResourceData, meta interface{}) error { + sess, err := meta.(ClientSession).CisGLBHealthCheckClientSession() + if err != nil { + return err + } + + monitorID, crn, err := convertTftoCisTwoVar(d.Id()) + if err != nil { + return err + } + sess.Crn = core.StringPtr(crn) + + opt := sess.NewGetLoadBalancerMonitorOptions(monitorID) + + result, resp, err := sess.GetLoadBalancerMonitor(opt) + if err != nil { + log.Printf("Error reading global load balancer health check detail: %s", resp) + return err + } + d.Set(cisGLBHealthCheckID, result.Result.ID) + d.Set(cisID, crn) + d.Set(cisGLBHealthCheckDesc, result.Result.Description) + d.Set(cisGLBHealthCheckPath, result.Result.Path) + d.Set(cisGLBHealthCheckExpectedBody, result.Result.ExpectedBody) + d.Set(cisGLBHealthCheckExpectedCodes, result.Result.ExpectedCodes) + d.Set(cisGLBHealthCheckType, result.Result.Type) + d.Set(cisGLBHealthCheckMethod, result.Result.Method) + d.Set(cisGLBHealthCheckTimeout, result.Result.Timeout) + d.Set(cisGLBHealthCheckRetries, result.Result.Retries) + d.Set(cisGLBHealthCheckInterval, result.Result.Interval) + d.Set(cisGLBHealthCheckFollowRedirects, result.Result.FollowRedirects) + d.Set(cisGLBHealthCheckAllowInsecure, result.Result.AllowInsecure) + d.Set(cisGLBHealthCheckPort, result.Result.Port) + d.Set(cisGLBHealthCheckCreatedOn, result.Result.CreatedOn) + d.Set(cisGLBHealthCheckModifiedOn, result.Result.ModifiedOn) + if err := d.Set(cisGLBHealthCheckHeaders, flattenLoadBalancerMonitorHeader(result.Result.Header)); err != nil { + log.Printf("[WARN] Error setting header for load balancer monitor %q: %s", d.Id(), err) + } + + return nil +} + +func resourceCISHealthCheckUpdate(d *schema.ResourceData, meta interface{}) error { + sess, err := meta.(ClientSession).CisGLBHealthCheckClientSession() + if err != nil { + return err + } + + monitorID, crn, err := convertTftoCisTwoVar(d.Id()) + if err != nil { + return err + } + sess.Crn = core.StringPtr(crn) + + opt := sess.NewEditLoadBalancerMonitorOptions(monitorID) + if d.HasChange(cisGLBHealthCheckType) || + d.HasChange(cisGLBHealthCheckDesc) || + d.HasChange(cisGLBHealthCheckPort) || + d.HasChange(cisGLBHealthCheckExpectedCodes) || + d.HasChange(cisGLBHealthCheckExpectedBody) || + d.HasChange(cisGLBHealthCheckMethod) || + d.HasChange(cisGLBHealthCheckTimeout) || + d.HasChange(cisGLBHealthCheckRetries) || + d.HasChange(cisGLBHealthCheckInterval) || + d.HasChange(cisGLBHealthCheckFollowRedirects) || + d.HasChange(cisGLBHealthCheckAllowInsecure) || + d.HasChange(cisGLBHealthCheckPort) || + d.HasChange(cisGLBHealthCheckHeaders) { + if monType, ok := d.GetOk(cisGLBHealthCheckType); ok { + opt.SetType(monType.(string)) + } + if expCodes, ok := d.GetOk(cisGLBHealthCheckExpectedCodes); ok { + opt.SetExpectedCodes(expCodes.(string)) + } + if expBody, ok := d.GetOk(cisGLBHealthCheckExpectedBody); ok { + opt.SetExpectedBody(expBody.(string)) + } + if monPath, ok := d.GetOk(cisGLBHealthCheckPath); ok { + opt.SetPath(monPath.(string)) + } + if description, ok := d.GetOk(cisGLBHealthCheckDesc); ok { + opt.SetDescription(description.(string)) + } + if method, ok := d.GetOk(cisGLBHealthCheckMethod); ok { + opt.SetMethod(method.(string)) + } + if timeout, ok := d.GetOk(cisGLBHealthCheckTimeout); ok { + opt.SetTimeout(int64(timeout.(int))) + } + if retries, ok := d.GetOk(cisGLBHealthCheckRetries); ok { + opt.SetRetries(int64(retries.(int))) + } + if interval, ok := d.GetOk(cisGLBHealthCheckInterval); ok { + opt.SetInterval(int64(interval.(int))) + } + if followRedirects, ok := d.GetOk(cisGLBHealthCheckFollowRedirects); ok { + opt.SetFollowRedirects(followRedirects.(bool)) + } + if allowInsecure, ok := d.GetOk(cisGLBHealthCheckAllowInsecure); ok { + opt.SetAllowInsecure(allowInsecure.(bool)) + } + if port, ok := d.GetOk(cisGLBHealthCheckPort); ok { + opt.SetPort(int64(port.(int))) + } + if header, ok := d.GetOk(cisGLBHealthCheckHeaders); ok { + opt.SetHeader(expandLoadBalancerMonitorHeader(header)) + } + result, resp, err := sess.EditLoadBalancerMonitor(opt) + if err != nil { + log.Printf("Error updating global load balancer health check detail: %s", resp) + return err + } + log.Printf("Monitor update succesful : %s", *result.Result.ID) + } + + return resourceCISHealthCheckRead(d, meta) +} + +func resourceCISHealthCheckDelete(d *schema.ResourceData, meta interface{}) error { + sess, err := meta.(ClientSession).CisGLBHealthCheckClientSession() + if err != nil { + return err + } + + monitorID, crn, err := convertTftoCisTwoVar(d.Id()) + if err != nil { + return err + } + sess.Crn = core.StringPtr(crn) + + opt := sess.NewDeleteLoadBalancerMonitorOptions(monitorID) + + result, resp, err := sess.DeleteLoadBalancerMonitor(opt) + if err != nil { + log.Printf("Error deleting global load balancer health check detail: %s", resp) + return err + } + log.Printf("Monitor ID: %s", *result.Result.ID) + return nil +} + +func resourceCISHealthCheckExists(d *schema.ResourceData, meta interface{}) (bool, error) { + sess, err := meta.(ClientSession).CisGLBHealthCheckClientSession() + if err != nil { + return false, err + } + + monitorID, crn, err := convertTftoCisTwoVar(d.Id()) + if err != nil { + return false, err + } + sess.Crn = core.StringPtr(crn) + + opt := sess.NewGetLoadBalancerMonitorOptions(monitorID) + + result, response, err := sess.GetLoadBalancerMonitor(opt) + if err != nil { + if response != nil && response.StatusCode == 404 { + log.Printf("global load balancer health check does not exist.") + return false, nil + } + log.Printf("Error : %s", response) + return false, err + } + log.Printf("global load balancer health check exists: %s", *result.Result.ID) + return true, nil +} + +func hashByMapKey(key string) func(v interface{}) int { + return func(v interface{}) int { + m := v.(map[string]interface{}) + return schema.HashString(m[key]) + } +} + +func expandLoadBalancerMonitorHeader(cfgSet interface{}) map[string][]string { + header := make(map[string][]string) + cfgList := cfgSet.(*schema.Set).List() + for _, item := range cfgList { + cfg := item.(map[string]interface{}) + header[cfg[cisGLBHealthCheckHeadersHeader].(string)] = + expandStringList(cfg[cisGLBHealthCheckHeadersValues].(*schema.Set).List()) + } + return header +} + +func flattenLoadBalancerMonitorHeader(header map[string][]string) *schema.Set { + flattened := make([]interface{}, 0) + for k, v := range header { + cfg := map[string]interface{}{ + cisGLBHealthCheckHeadersHeader: k, + cisGLBHealthCheckHeadersValues: schema.NewSet(schema.HashString, flattenStringList(v)), + } + flattened = append(flattened, cfg) + } + return schema.NewSet(hashByMapKey(cisGLBHealthCheckHeadersHeader), flattened) +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_origin_pool.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_origin_pool.go new file mode 100644 index 00000000000..0cbe161b2f5 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_origin_pool.go @@ -0,0 +1,391 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "log" + + "github.com/IBM/go-sdk-core/v4/core" + "github.com/IBM/networking-go-sdk/globalloadbalancerpoolsv0" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + cisGLBPoolID = "pool_id" + cisGLBPoolName = "name" + cisGLBPoolRegions = "check_regions" + cisGLBPoolDesc = "description" + cisGLBPoolEnabled = "enabled" + cisGLBPoolMinimumOrigins = "minimum_origins" + cisGLBPoolMonitor = "monitor" + cisGLBPoolNotificationEMail = "notification_email" + cisGLBPoolOrigins = "origins" + cisGLBPoolHealth = "health" + cisGLBPoolHealthy = "healthy" + cisGLBPoolCreatedOn = "created_on" + cisGLBPoolModifiedOn = "modified_on" + cisGLBPoolOriginsName = "name" + cisGLBPoolOriginsAddress = "address" + cisGLBPoolOriginsEnabled = "enabled" + cisGLBPoolOriginsHealthy = "healthy" + cisGLBPoolOriginsWeight = "weight" + cisGLBPoolOriginsDisabledAt = "disabled_at" + cisGLBPoolOriginsFailureReason = "failure_reason" +) + +func resourceIBMCISPool() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + cisID: { + Type: schema.TypeString, + Description: "CIS instance crn", + Required: true, + }, + cisGLBPoolID: { + Type: schema.TypeString, + Computed: true, + }, + cisGLBPoolName: { + Type: schema.TypeString, + Description: "name", + Required: true, + }, + cisGLBPoolRegions: { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: "List of regions", + }, + cisGLBPoolDesc: { + Type: schema.TypeString, + Optional: true, + Description: "Description of the CIS Origin Pool", + }, + cisGLBPoolEnabled: { + Type: schema.TypeBool, + Required: true, + Description: "Boolean value set to true if cis origin pool needs to be enabled", + }, + cisGLBPoolMinimumOrigins: { + Type: schema.TypeInt, + Optional: true, + Default: 1, + Description: "Minimum number of Origins", + }, + cisGLBPoolMonitor: { + Type: schema.TypeString, + Optional: true, + Description: "Monitor value", + DiffSuppressFunc: suppressDomainIDDiff, + }, + cisGLBPoolNotificationEMail: { + Type: schema.TypeString, + Optional: true, + Description: "Email address configured to recieve the notifications", + }, + cisGLBPoolOrigins: { + Type: schema.TypeSet, + Required: true, + Description: "Origins info", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + cisGLBPoolOriginsName: { + Type: schema.TypeString, + Required: true, + }, + cisGLBPoolOriginsAddress: { + Type: schema.TypeString, + Required: true, + }, + cisGLBPoolOriginsEnabled: { + Type: schema.TypeBool, + Required: true, + }, + cisGLBPoolOriginsWeight: { + Type: schema.TypeFloat, + Default: 1, + Optional: true, + }, + cisGLBPoolOriginsHealthy: { + Type: schema.TypeBool, + Computed: true, + }, + cisGLBPoolOriginsDisabledAt: { + Type: schema.TypeString, + Computed: true, + }, + cisGLBPoolOriginsFailureReason: { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + cisGLBPoolHealth: { + Type: schema.TypeString, + Computed: true, + Description: "Health info", + }, + cisGLBPoolHealthy: { + Type: schema.TypeBool, + Computed: true, + Description: "Health status", + }, + cisGLBPoolCreatedOn: { + Type: schema.TypeString, + Computed: true, + Description: "Creation date info", + }, + cisGLBPoolModifiedOn: { + Type: schema.TypeString, + Computed: true, + Description: "Modified date info", + }, + }, + + Create: resourceCISPoolCreate, + Read: resourceCISPoolRead, + Update: resourceCISPoolUpdate, + Delete: resourceCISPoolDelete, + Exists: resourceCISPoolExists, + Importer: &schema.ResourceImporter{}, + } +} + +func resourceCISPoolCreate(d *schema.ResourceData, meta interface{}) error { + var regions []string + cisClient, err := meta.(ClientSession).CisGLBPoolClientSession() + if err != nil { + return err + } + + crn := d.Get(cisID).(string) + cisClient.Crn = core.StringPtr(crn) + name := d.Get(cisGLBPoolName).(string) + origins := d.Get(cisGLBPoolOrigins).(*schema.Set).List() + checkRegions := d.Get(cisGLBPoolRegions).(*schema.Set).List() + + for _, region := range checkRegions { + regions = append(regions, region.(string)) + } + + glbOrigins := []globalloadbalancerpoolsv0.LoadBalancerPoolReqOriginsItem{} + + for _, origin := range origins { + orig := origin.(map[string]interface{}) + glbOrigin := globalloadbalancerpoolsv0.LoadBalancerPoolReqOriginsItem{ + Name: core.StringPtr(orig[cisGLBPoolOriginsName].(string)), + Address: core.StringPtr(orig[cisGLBPoolOriginsAddress].(string)), + Enabled: core.BoolPtr(orig[cisGLBPoolOriginsEnabled].(bool)), + Weight: core.Float64Ptr(orig[cisGLBPoolOriginsWeight].(float64)), + } + glbOrigins = append(glbOrigins, glbOrigin) + } + + opt := cisClient.NewCreateLoadBalancerPoolOptions() + opt.SetName(name) + opt.SetCheckRegions(regions) + opt.SetOrigins(glbOrigins) + opt.SetEnabled(d.Get(cisGLBPoolEnabled).(bool)) + + if notifEmail, ok := d.GetOk(cisGLBPoolNotificationEMail); ok { + opt.SetNotificationEmail(notifEmail.(string)) + } + if monitor, ok := d.GetOk(cisGLBPoolMonitor); ok { + monitorID, _, _ := convertTftoCisTwoVar(monitor.(string)) + opt.SetMonitor(monitorID) + } + if minOrigins, ok := d.GetOk(cisGLBPoolMinimumOrigins); ok { + opt.SetMinimumOrigins(int64(minOrigins.(int))) + } + if description, ok := d.GetOk(cisGLBPoolDesc); ok { + opt.SetDescription(description.(string)) + } + + result, resp, err := cisClient.CreateLoadBalancerPool(opt) + if err != nil { + log.Printf("[WARN] Create GLB Pools failed %s\n", resp) + return err + } + //Set unique TF Id from concatenated CIS Ids + d.SetId(convertCisToTfTwoVar(*result.Result.ID, crn)) + return resourceCISPoolRead(d, meta) +} + +func resourceCISPoolRead(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisGLBPoolClientSession() + if err != nil { + return err + } + poolID, crn, err := convertTftoCisTwoVar(d.Id()) + if err != nil { + return err + } + cisClient.Crn = core.StringPtr(crn) + opt := cisClient.NewGetLoadBalancerPoolOptions(poolID) + result, resp, err := cisClient.GetLoadBalancerPool(opt) + if err != nil { + log.Printf("[WARN] Create GLB Pools failed %s\n", resp) + return err + } + + poolObj := *result.Result + d.Set(cisID, crn) + d.Set(cisGLBPoolID, poolObj.ID) + d.Set(cisGLBPoolName, poolObj.Name) + d.Set(cisGLBPoolOrigins, flattenOrigins(poolObj.Origins)) + d.Set(cisGLBPoolRegions, poolObj.CheckRegions) + d.Set(cisGLBPoolDesc, poolObj.Description) + d.Set(cisGLBPoolEnabled, poolObj.Enabled) + d.Set(cisGLBPoolNotificationEMail, poolObj.NotificationEmail) + d.Set(cisGLBPoolHealthy, poolObj.Healthy) + d.Set(cisGLBPoolMinimumOrigins, poolObj.MinimumOrigins) + d.Set(cisGLBPoolCreatedOn, poolObj.CreatedOn) + d.Set(cisGLBPoolModifiedOn, poolObj.ModifiedOn) + if poolObj.Monitor != nil { + d.Set(cisGLBPoolMonitor, *poolObj.Monitor) + } + return nil +} + +func resourceCISPoolUpdate(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisGLBPoolClientSession() + if err != nil { + return err + } + poolID, crn, err := convertTftoCisTwoVar(d.Id()) + if err != nil { + return err + } + cisClient.Crn = core.StringPtr(crn) + if d.HasChange(cisGLBPoolName) || + d.HasChange(cisGLBPoolOrigins) || + d.HasChange(cisGLBPoolRegions) || + d.HasChange(cisGLBPoolNotificationEMail) || + d.HasChange(cisGLBPoolMonitor) || + d.HasChange(cisGLBPoolEnabled) || + d.HasChange(cisGLBPoolMinimumOrigins) || + d.HasChange(cisGLBPoolDesc) { + + opt := cisClient.NewEditLoadBalancerPoolOptions(poolID) + if monitor, ok := d.GetOk(cisGLBPoolMonitor); ok { + monitorID, _, _ := convertTftoCisTwoVar(monitor.(string)) + opt.SetMonitor(monitorID) + } + + if name, ok := d.GetOk(cisGLBPoolName); ok { + opt.SetName(name.(string)) + } + if origins, ok := d.GetOk(cisGLBPoolOrigins); ok { + glbOrigins := []globalloadbalancerpoolsv0.LoadBalancerPoolReqOriginsItem{} + + for _, origin := range origins.(*schema.Set).List() { + orig := origin.(map[string]interface{}) + glbOrigin := globalloadbalancerpoolsv0.LoadBalancerPoolReqOriginsItem{ + Name: core.StringPtr(orig[cisGLBPoolOriginsName].(string)), + Address: core.StringPtr(orig[cisGLBPoolOriginsAddress].(string)), + Enabled: core.BoolPtr(orig[cisGLBPoolOriginsEnabled].(bool)), + Weight: core.Float64Ptr(orig[cisGLBPoolOriginsWeight].(float64)), + } + glbOrigins = append(glbOrigins, glbOrigin) + } + opt.SetOrigins(glbOrigins) + } + if checkregions, ok := d.GetOk(cisGLBPoolRegions); ok { + checkRegions := checkregions.(*schema.Set).List() + var regions []string + for _, region := range checkRegions { + regions = append(regions, region.(string)) + } + opt.SetCheckRegions(regions) + } + if notEmail, ok := d.GetOk(cisGLBPoolNotificationEMail); ok { + opt.SetNotificationEmail(notEmail.(string)) + } + + if enabled, ok := d.GetOk(cisGLBPoolEnabled); ok { + opt.SetEnabled(enabled.(bool)) + } + if minOrigins, ok := d.GetOk(cisGLBPoolMinimumOrigins); ok { + opt.SetMinimumOrigins(int64(minOrigins.(int))) + } + if description, ok := d.GetOk(cisGLBPoolDesc); ok { + opt.SetDescription(description.(string)) + } + _, resp, err := cisClient.EditLoadBalancerPool(opt) + if err != nil { + log.Printf("[WARN] Error getting zone during PoolUpdate %v\n", resp) + return err + } + } + return resourceCISPoolRead(d, meta) +} + +func resourceCISPoolDelete(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisGLBPoolClientSession() + if err != nil { + return err + } + poolID, crn, err := convertTftoCisTwoVar(d.Id()) + if err != nil { + return err + } + cisClient.Crn = core.StringPtr(crn) + opt := cisClient.NewDeleteLoadBalancerPoolOptions(poolID) + result, resp, err := cisClient.DeleteLoadBalancerPool(opt) + if err != nil { + log.Printf("[WARN] Delete GLB Pools failed %s\n", resp) + return err + } + log.Printf("Pool %s deleted successfully.", *result.Result.ID) + return nil +} + +func resourceCISPoolExists(d *schema.ResourceData, meta interface{}) (bool, error) { + cisClient, err := meta.(ClientSession).CisGLBPoolClientSession() + if err != nil { + return false, err + } + poolID, cisID, err := convertTftoCisTwoVar(d.Id()) + if err != nil { + return false, err + } + cisClient.Crn = core.StringPtr(cisID) + opt := cisClient.NewGetLoadBalancerPoolOptions(poolID) + result, response, err := cisClient.GetLoadBalancerPool(opt) + if err != nil { + if response != nil && response.StatusCode == 404 { + log.Printf("global load balancer pool does not exist.") + return false, nil + } + log.Printf("Error : %s", response) + return false, err + } + log.Printf("global load balancer pool exist : %s", *result.Result.ID) + return true, nil +} + +// Cloud Internet Services +func flattenOrigins(list []globalloadbalancerpoolsv0.LoadBalancerPoolPackOriginsItem) []map[string]interface{} { + origins := []map[string]interface{}{} + for _, origin := range list { + l := map[string]interface{}{ + cisGLBPoolOriginsName: origin.Name, + cisGLBPoolOriginsAddress: origin.Address, + cisGLBPoolOriginsEnabled: origin.Enabled, + cisGLBPoolOriginsHealthy: origin.Healthy, + cisGLBPoolOriginsWeight: origin.Weight, + } + if origin.DisabledAt != nil { + l[cisGLBPoolOriginsDisabledAt] = *origin.DisabledAt + } + if origin.FailureReason != nil { + l[cisGLBPoolOriginsFailureReason] = *origin.FailureReason + } + origins = append(origins, l) + } + return origins +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_page_rule.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_page_rule.go new file mode 100644 index 00000000000..e0c56c53c7b --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_page_rule.go @@ -0,0 +1,424 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "log" + "strconv" + + "github.com/IBM/go-sdk-core/v4/core" + cispagerulev1 "github.com/IBM/networking-go-sdk/pageruleapiv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + ibmCISPageRule = "ibm_cis_page_rule" + cisPageRuleID = "rule_id" + cisPageRuleTargets = "targets" + cisPageRuleTargetsConstraint = "constraint" + cisPageRuleTargetsConstraintOperator = "operator" + cisPageRuleTargetsConstraintValue = "value" + cisPageRuleTargetsTarget = "target" + cisPageRuleActions = "actions" + cisPageRuleActionsID = "id" + cisPageRuleActionsValue = "value" + cisPageRuleActionsValueURL = "url" + cisPageRuleActionsValueStatusCode = "status_code" + cisPageRulePriority = "priority" + cisPageRuleStatus = "status" + cisPageRuleActionsIDForwardingURL = "forwarding_url" + cisPageRuleActionsIDEdgeCacheTTL = "edge_cache_ttl" + cisPageRuleActionsIDBrowserCacheTTL = "browser_cache_ttl" + cisPageRuleActionsIDDisableSecurity = "disable_security" + cisPageRuleActionsIDAlwaysUseHTTPS = "always_use_https" +) + +func resourceIBMCISPageRule() *schema.Resource { + return &schema.Resource{ + Create: resourceCISPageRuleCreate, + Read: resourceCISPageRuleRead, + Update: resourceCISPageRuleUpdate, + Delete: resourceCISPageRuleDelete, + Exists: resourceCISPageRuleExists, + Importer: &schema.ResourceImporter{}, + Schema: map[string]*schema.Schema{ + cisID: { + Type: schema.TypeString, + Description: "CIS instance crn", + Required: true, + }, + cisDomainID: { + Type: schema.TypeString, + Description: "Associated CIS domain", + Required: true, + DiffSuppressFunc: suppressDomainIDDiff, + }, + cisPageRuleID: { + Type: schema.TypeString, + Computed: true, + }, + cisPageRulePriority: { + Type: schema.TypeInt, + Description: "Page rule priority", + Optional: true, + Default: 1, + }, + cisPageRuleStatus: { + Type: schema.TypeString, + Description: "Page Rule status", + Optional: true, + Default: "disabled", + ValidateFunc: InvokeValidator( + ibmCISPageRule, cisPageRuleStatus), + }, + cisPageRuleTargets: { + Type: schema.TypeSet, + Description: "Page rule targets", + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + cisPageRuleTargetsTarget: { + Type: schema.TypeString, + Required: true, + Description: "Page rule target url", + }, + cisPageRuleTargetsConstraint: { + Type: schema.TypeList, + Required: true, + Description: "Page rule constraint", + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + cisPageRuleTargetsConstraintOperator: { + Type: schema.TypeString, + Required: true, + Description: "Constraint operator", + }, + cisPageRuleTargetsConstraintValue: { + Type: schema.TypeString, + Required: true, + Description: "Constraint value", + }, + }, + }, + }, + }, + }, + }, + cisPageRuleActions: { + Type: schema.TypeSet, + Description: "Page rule actions", + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + cisPageRuleActionsID: { + Type: schema.TypeString, + Required: true, + Description: "Page rule target url", + ValidateFunc: InvokeValidator( + ibmCISPageRule, cisPageRuleActionsID), + }, + cisPageRuleActionsValue: { + Type: schema.TypeString, + Optional: true, + Description: "Page rule target url", + }, + cisPageRuleActionsValueURL: { + Type: schema.TypeString, + Optional: true, + Description: "Page rule actions value url", + }, + cisPageRuleActionsValueStatusCode: { + Type: schema.TypeInt, + Optional: true, + Description: "Page rule actions status code", + }, + }, + }, + }, + }, + } +} + +func resourceCISPageRuleValidator() *ResourceValidator { + actions := "disable_security, always_use_https, always_online, ssl, browser_cache_ttl, " + + "security_level, cache_level, edge_cache_ttl, bypass_cache_on_cookie, " + + "browser_check, server_side_exclude, serve_stale_content, email_obfuscation, " + + "automatic_https_rewrites, opportunistic_encryption, ip_geolocation, " + + "explicit_cache_control, cache_deception_armor, waf, forwarding_url, " + + "host_header_override, resolve_override, cache_on_cookie, disable_apps, " + + "disable_performance, image_load_optimization, origin_error_page_pass_thru, " + + "response_buffering, image_size_optimization, script_load_optimization, " + + "true_client_ip_header, sort_query_string_for_cache, respect_strong_etag" + status := "active, disabled" + validateSchema := make([]ValidateSchema, 1) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: cisPageRuleActionsID, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: actions}) + + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: cisPageRuleStatus, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: status}) + cisPageRuleValidator := ResourceValidator{ResourceName: ibmCISPageRule, Schema: validateSchema} + return &cisPageRuleValidator +} + +func resourceCISPageRuleCreate(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisPageRuleClientSession() + if err != nil { + return err + } + + crn := d.Get(cisID).(string) + zoneID, _, err := convertTftoCisTwoVar(d.Get(cisDomainID).(string)) + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneID = core.StringPtr(zoneID) + + targets := expandCISPageRuleTargets(d.Get(cisPageRuleTargets)) + actions := expandCISPageRuleActions(d.Get(cisPageRuleActions)) + + opt := cisClient.NewCreatePageRuleOptions() + opt.SetTargets(targets) + opt.SetActions(actions) + if value, ok := d.GetOk(cisPageRulePriority); ok { + opt.SetPriority(int64(value.(int))) + } + if value, ok := d.GetOk(cisPageRuleStatus); ok { + opt.SetStatus(value.(string)) + } + + result, response, err := cisClient.CreatePageRule(opt) + if err != nil { + log.Printf("Create page rule failed: %v", response) + return err + } + d.SetId(convertCisToTfThreeVar(*result.Result.ID, zoneID, crn)) + return resourceCISPageRuleRead(d, meta) +} +func resourceCISPageRuleRead(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisPageRuleClientSession() + if err != nil { + return err + } + + ruleID, zoneID, crn, err := convertTfToCisThreeVar(d.Id()) + if err != nil { + return err + } + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneID = core.StringPtr(zoneID) + + opt := cisClient.NewGetPageRuleOptions(ruleID) + result, response, err := cisClient.GetPageRule(opt) + if err != nil { + log.Printf("Get page rule failed: %v", response) + return err + } + d.Set(cisID, crn) + d.Set(cisDomainID, zoneID) + d.Set(cisPageRuleID, result.Result.ID) + d.Set(cisPageRulePriority, result.Result.Priority) + d.Set(cisPageRuleStatus, result.Result.Status) + d.Set(cisPageRuleTargets, flattenCISPageRuleTargets(result.Result.Targets)) + d.Set(cisPageRuleActions, flattenCISPageRuleActions(result.Result.Actions)) + return nil +} +func resourceCISPageRuleUpdate(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisPageRuleClientSession() + if err != nil { + return err + } + + ruleID, zoneID, crn, _ := convertTfToCisThreeVar(d.Id()) + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneID = core.StringPtr(zoneID) + + if d.HasChange(cisPageRuleTargets) || + d.HasChange(cisPageRuleActions) || + d.HasChange(cisPageRulePriority) || + d.HasChange(cisPageRuleStatus) { + + targets := expandCISPageRuleTargets(d.Get(cisPageRuleTargets)) + actions := expandCISPageRuleActions(d.Get(cisPageRuleActions)) + + opt := cisClient.NewUpdatePageRuleOptions(ruleID) + opt.SetTargets(targets) + opt.SetActions(actions) + if value, ok := d.GetOk(cisPageRulePriority); ok { + opt.SetPriority(int64(value.(int))) + } + if value, ok := d.GetOk(cisPageRuleStatus); ok { + opt.SetStatus(value.(string)) + } + + _, response, err := cisClient.UpdatePageRule(opt) + if err != nil { + log.Printf("Update page rule failed: %v", response) + return err + } + } + return resourceCISPageRuleRead(d, meta) +} + +func resourceCISPageRuleDelete(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisPageRuleClientSession() + if err != nil { + return err + } + + ruleID, zoneID, crn, _ := convertTfToCisThreeVar(d.Id()) + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneID = core.StringPtr(zoneID) + opt := cisClient.NewDeletePageRuleOptions(ruleID) + _, response, err := cisClient.DeletePageRule(opt) + if err != nil { + log.Printf("Delete page rule failed: %v", response) + return err + } + return nil +} + +func resourceCISPageRuleExists(d *schema.ResourceData, meta interface{}) (bool, error) { + cisClient, err := meta.(ClientSession).CisPageRuleClientSession() + if err != nil { + return false, err + } + + ruleID, zoneID, crn, _ := convertTfToCisThreeVar(d.Id()) + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneID = core.StringPtr(zoneID) + + opt := cisClient.NewGetPageRuleOptions(ruleID) + _, response, err := cisClient.GetPageRule(opt) + if err != nil { + if response != nil && response.StatusCode == 404 { + log.Printf("Page rule does not exist.") + return false, nil + } + log.Printf("Get page rule failed: %v", response) + return false, err + } + return true, nil +} + +func expandCISPageRuleTargets(targets interface{}) []cispagerulev1.TargetsItem { + targetsInput := targets.(*schema.Set).List() + targetsOuptut := make([]cispagerulev1.TargetsItem, 0) + for _, instance := range targetsInput { + targetsItem := instance.(map[string]interface{}) + targetsTarget := targetsItem[cisPageRuleTargetsTarget].(string) + targetsConstraint := targetsItem[cisPageRuleTargetsConstraint].([]interface{})[0].(map[string]interface{}) + targetsConstraintOperator := targetsConstraint[cisPageRuleTargetsConstraintOperator].(string) + targetsConstraintValue := targetsConstraint[cisPageRuleTargetsConstraintValue].(string) + targetsConstraintOpt := cispagerulev1.TargetsItemConstraint{ + Operator: &targetsConstraintOperator, + Value: &targetsConstraintValue, + } + targetItemOpt := cispagerulev1.TargetsItem{ + Target: &targetsTarget, + Constraint: &targetsConstraintOpt, + } + targetsOuptut = append(targetsOuptut, targetItemOpt) + } + return targetsOuptut +} + +func expandCISPageRuleActions(actions interface{}) []cispagerulev1.PageRulesBodyActionsItemIntf { + actionsInput := actions.(*schema.Set).List() + + actionsOutput := make([]cispagerulev1.PageRulesBodyActionsItemIntf, 0) + for _, action := range actionsInput { + instance := action.(map[string]interface{}) + id := instance[cisPageRuleActionsID].(string) + var value interface{} + switch id { + case cisPageRuleActionsIDDisableSecurity, + cisPageRuleActionsIDAlwaysUseHTTPS: + actionItem := &cispagerulev1.PageRulesBodyActionsItem{ + ID: &id, + } + actionsOutput = append(actionsOutput, actionItem) + break + case cisPageRuleActionsIDBrowserCacheTTL, + cisPageRuleActionsIDEdgeCacheTTL: + valueStr := instance[cisPageRuleActionsValue].(string) + value, _ = strconv.ParseInt(valueStr, 10, 64) + actionItem := &cispagerulev1.PageRulesBodyActionsItem{ + ID: &id, + Value: &value, + } + actionsOutput = append(actionsOutput, actionItem) + break + case cisPageRuleActionsIDForwardingURL: + forwardingURL := instance[cisPageRuleActionsValueURL].(string) + statusCode := instance[cisPageRuleActionsValueStatusCode].(int) + value = cispagerulev1.ActionsForwardingUrlValue{ + URL: &forwardingURL, + StatusCode: core.Int64Ptr(int64(statusCode)), + } + actionItem := &cispagerulev1.PageRulesBodyActionsItem{ + ID: &id, + Value: &value, + } + actionsOutput = append(actionsOutput, actionItem) + break + default: + value = instance[cisPageRuleActionsValue] + actionItem := &cispagerulev1.PageRulesBodyActionsItem{ + ID: &id, + Value: &value, + } + actionsOutput = append(actionsOutput, actionItem) + } + } + return actionsOutput +} + +func flattenCISPageRuleTargets(targets []cispagerulev1.TargetsItem) interface{} { + targetsOutput := make([]interface{}, 0) + + for _, item := range targets { + targetItemOutput := map[string]interface{}{} + constraints := []interface{}{} + constraint := map[string]interface{}{} + // flatten constraint + constraint[cisPageRuleTargetsConstraintOperator] = *item.Constraint.Operator + constraint[cisPageRuleTargetsConstraintValue] = *item.Constraint.Value + constraints = append(constraints, constraint) + + // flatten target item + targetItemOutput[cisPageRuleTargetsConstraint] = constraints + targetItemOutput[cisPageRuleTargetsTarget] = *item.Target + + targetsOutput = append(targetsOutput, targetItemOutput) + } + return targetsOutput +} + +func flattenCISPageRuleActions(actions []cispagerulev1.PageRulesBodyActionsItemIntf) interface{} { + actionsOutput := make([]interface{}, 0) + + for _, instance := range actions { + actionItemOutput := map[string]interface{}{} + item := instance.(*cispagerulev1.PageRulesBodyActionsItem) + actionItemOutput[cisPageRuleActionsID] = *item.ID + if *item.ID == cisPageRuleActionsIDForwardingURL { + value := item.Value.(map[string]interface{}) + actionItemOutput[cisPageRuleActionsValueURL] = value[cisPageRuleActionsValueURL] + actionItemOutput[cisPageRuleActionsValueStatusCode] = value[cisPageRuleActionsValueStatusCode] + } else { + actionItemOutput[cisPageRuleActionsValue] = item.Value + } + actionsOutput = append(actionsOutput, actionItemOutput) + } + return actionsOutput +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_range_app.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_range_app.go new file mode 100644 index 00000000000..0ab63ee4030 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_range_app.go @@ -0,0 +1,413 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + + "github.com/IBM/go-sdk-core/v4/core" + cisrangeappv1 "github.com/IBM/networking-go-sdk/rangeapplicationsv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + ibmCISRangeApp = "ibm_cis_range_app" + cisRangeAppID = "app_id" + cisRangeAppProtocol = "protocol" + cisRangeAppDNS = "dns" + cisRangeAppDNSType = "dns_type" + cisRangeAppOriginDirect = "origin_direct" + cisRangeAppOriginDNS = "origin_dns" + cisRangeAppOriginPort = "origin_port" + cisRangeAppIPFirewall = "ip_firewall" + cisRangeAppProxyProtocol = "proxy_protocol" + cisRangeAppProxyProtocolOff = "off" + cisRangeAppProxyProtocolV1 = "v1" + cisRangeAppProxyProtocolV2 = "v2" + cisRangeAppProxyProtocolSimple = "simple" + cisRangeAppEdgeIPsType = "edge_ips_type" + cisRangeAppEdgeIPsTypeDynamic = "dynamic" + cisRangeAppEdgeIPsConnectivity = "edge_ips_connectivity" + cisRangeAppEdgeIPsConnectivityIPv4 = "ipv4" + cisRangeAppEdgeIPsConnectivityIPv6 = "ipv6" + cisRangeAppEdgeIPsConnectivityAll = "all" + cisRangeAppTrafficType = "traffic_type" + cisRangeAppTrafficTypeDirect = "direct" + cisRangeAppTrafficTypeHTTP = "http" + cisRangeAppTrafficTypeHTTPS = "https" + cisRangeAppTLS = "tls" + cisRangeAppTLSOff = "off" + cisRangeAppTLSFlexible = "flexible" + cisRangeAppTLSFull = "full" + cisRangeAppTLSStrict = "strict" + cisRangeAppCreatedOn = "created_on" + cisRangeAppModifiedOn = "modified_on" +) + +func resourceIBMCISRangeApp() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMCISRangeAppCreate, + Read: resourceIBMCISRangeAppRead, + Update: resourceIBMCISRangeAppUpdate, + Delete: resourceIBMCISRangeAppDelete, + Exists: resourceIBMCISRangeAppExists, + Importer: &schema.ResourceImporter{}, + Schema: map[string]*schema.Schema{ + cisID: { + Type: schema.TypeString, + Required: true, + Description: "CIS Intance CRN", + }, + cisDomainID: { + Type: schema.TypeString, + Required: true, + Description: "CIS Domain ID", + DiffSuppressFunc: suppressDomainIDDiff, + }, + cisRangeAppID: { + Type: schema.TypeString, + Computed: true, + Description: "Application identifier", + }, + cisRangeAppProtocol: { + Type: schema.TypeString, + Required: true, + Description: "Defines the protocol and port for this application", + }, + cisRangeAppDNS: { + Type: schema.TypeString, + Required: true, + Description: "Name of the DNS record for this application", + }, + cisRangeAppDNSType: { + Type: schema.TypeString, + Required: true, + Description: "Type of the DNS record for this application", + }, + cisRangeAppOriginDirect: { + Type: schema.TypeList, + Optional: true, + AtLeastOneOf: []string{cisRangeAppOriginDirect, cisRangeAppOriginDNS}, + Description: "IP address and port of the origin for this Range application.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + cisRangeAppOriginDNS: { + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: []string{cisRangeAppOriginDirect, cisRangeAppOriginDNS}, + Description: "DNS record pointing to the origin for this Range application.", + }, + cisRangeAppOriginPort: { + Type: schema.TypeInt, + Optional: true, + ConflictsWith: []string{cisRangeAppOriginDirect}, + Description: "Port at the origin that listens to traffic", + }, + cisRangeAppIPFirewall: { + Type: schema.TypeBool, + Optional: true, + Description: "Enables the IP Firewall for this application. Only available for TCP applications.", + }, + cisRangeAppProxyProtocol: { + Type: schema.TypeString, + Optional: true, + Description: "Allows for the true client IP to be passed to the service.", + ValidateFunc: InvokeValidator(ibmCISRangeApp, cisRangeAppProtocol), + }, + cisRangeAppEdgeIPsType: { + Type: schema.TypeString, + Optional: true, + Default: cisRangeAppEdgeIPsTypeDynamic, + Description: "The type of edge IP configuration.", + ValidateFunc: InvokeValidator(ibmCISRangeApp, cisRangeAppEdgeIPsType), + }, + cisRangeAppEdgeIPsConnectivity: { + Type: schema.TypeString, + Optional: true, + Default: cisRangeAppEdgeIPsConnectivityAll, + Description: "Specifies the IP version.", + ValidateFunc: InvokeValidator(ibmCISRangeApp, cisRangeAppEdgeIPsConnectivity), + }, + cisRangeAppTrafficType: { + Type: schema.TypeString, + Optional: true, + Default: cisRangeAppTrafficTypeDirect, + Description: "Configure how traffic is handled at the edge.", + ValidateFunc: InvokeValidator(ibmCISRangeApp, cisRangeAppTrafficType), + }, + cisRangeAppTLS: { + Type: schema.TypeString, + Optional: true, + Default: cisRangeAppTLSOff, + Description: "Configure if and how TLS connections are terminated at the edge.", + ValidateFunc: InvokeValidator(ibmCISRangeApp, cisRangeAppTLS), + }, + cisRangeAppCreatedOn: { + Type: schema.TypeString, + Computed: true, + Description: "created on date", + }, + cisRangeAppModifiedOn: { + Type: schema.TypeString, + Computed: true, + Description: "modified on date", + }, + }, + } +} +func resourceIBMCISRangeAppValidator() *ResourceValidator { + + validateSchema := make([]ValidateSchema, 1) + proxyProtocol := "off, v1, v2, simple" + connectivity := "ipv4, ipv6, all" + trafficType := "direct, http, https" + tls := "off, flexible, full, strict" + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: cisRangeAppProxyProtocol, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: proxyProtocol}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: cisRangeAppEdgeIPsConnectivity, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: connectivity}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: cisRangeAppEdgeIPsType, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: "dynamic"}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: cisRangeAppTrafficType, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: trafficType}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: cisRangeAppTLS, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: tls}) + + ibmCISRangeAppResourceValidator := ResourceValidator{ResourceName: ibmCISRangeApp, Schema: validateSchema} + return &ibmCISRangeAppResourceValidator +} +func resourceIBMCISRangeAppCreate(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisRangeAppClientSession() + if err != nil { + return err + } + + crn := d.Get(cisID).(string) + zoneID, _, err := convertTftoCisTwoVar(d.Get(cisDomainID).(string)) + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + + protocol := d.Get(cisRangeAppProtocol).(string) + dns := d.Get(cisRangeAppDNS).(string) + dnsType := d.Get(cisRangeAppDNSType).(string) + + dnsOpt := &cisrangeappv1.RangeAppReqDns{ + Type: &dnsType, + Name: &dns, + } + + opt := cisClient.NewCreateRangeAppOptions(protocol, dnsOpt) + + if v, ok := d.GetOk(cisRangeAppOriginDirect); ok { + opt.SetOriginDirect(expandStringList(v.([]interface{}))) + } + if v, ok := d.GetOk(cisRangeAppOriginDNS); ok { + originDNSOpt := &cisrangeappv1.RangeAppReqOriginDns{ + Name: core.StringPtr(v.(string)), + } + opt.SetOriginDns(originDNSOpt) + } + if v, ok := d.GetOk(cisRangeAppOriginPort); ok { + opt.SetOriginPort(int64(v.(int))) + } + if v, ok := d.GetOkExists(cisRangeAppIPFirewall); ok { + opt.SetIpFirewall(v.(bool)) + } + if v, ok := d.GetOk(cisRangeAppProxyProtocol); ok { + opt.SetProxyProtocol(v.(string)) + } + edgeIPsOpt := &cisrangeappv1.RangeAppReqEdgeIps{ + Type: core.StringPtr(cisRangeAppEdgeIPsTypeDynamic), + Connectivity: core.StringPtr(cisRangeAppEdgeIPsConnectivityAll), + } + if v, ok := d.GetOk(cisRangeAppEdgeIPsType); ok { + edgeIPsOpt.Type = core.StringPtr(v.(string)) + } + if v, ok := d.GetOk(cisRangeAppEdgeIPsType); ok { + edgeIPsOpt.Connectivity = core.StringPtr(v.(string)) + } + if v, ok := d.GetOk(cisRangeAppTrafficType); ok { + opt.SetTrafficType(v.(string)) + } + if v, ok := d.GetOk(cisRangeAppTLS); ok { + opt.SetTls(v.(string)) + } + + result, resp, err := cisClient.CreateRangeApp(opt) + if err != nil { + return fmt.Errorf("Failed to create range application: %v", resp) + } + d.SetId(convertCisToTfThreeVar(*result.Result.ID, zoneID, crn)) + return resourceIBMCISRangeAppRead(d, meta) +} + +func resourceIBMCISRangeAppRead(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisRangeAppClientSession() + if err != nil { + return err + } + + rangeAppID, zoneID, crn, _ := convertTfToCisThreeVar(d.Id()) + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + + opt := cisClient.NewGetRangeAppOptions(rangeAppID) + result, resp, err := cisClient.GetRangeApp(opt) + if err != nil { + return fmt.Errorf("Failed to read range application: %v", resp) + } + d.Set(cisID, crn) + d.Set(cisDomainID, zoneID) + d.Set(cisRangeAppID, result.Result.ID) + d.Set(cisRangeAppProtocol, result.Result.Protocol) + d.Set(cisRangeAppDNSType, result.Result.Dns.Type) + d.Set(cisRangeAppDNS, result.Result.Dns.Name) + d.Set(cisRangeAppOriginDirect, flattenStringList(result.Result.OriginDirect)) + d.Set(cisRangeAppProxyProtocol, result.Result.ProxyProtocol) + d.Set(cisRangeAppIPFirewall, result.Result.IpFirewall) + d.Set(cisRangeAppTrafficType, result.Result.TrafficType) + d.Set(cisRangeAppEdgeIPsType, result.Result.EdgeIps.Type) + d.Set(cisRangeAppEdgeIPsConnectivity, result.Result.EdgeIps.Connectivity) + d.Set(cisRangeAppTLS, result.Result.Tls) + d.Set(cisRangeAppCreatedOn, result.Result.CreatedOn.String()) + d.Set(cisRangeAppModifiedOn, result.Result.ModifiedOn.String()) + return nil +} + +func resourceIBMCISRangeAppUpdate(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisRangeAppClientSession() + if err != nil { + return err + } + + if d.HasChange(cisRangeAppOriginDirect) || + d.HasChange(cisRangeAppOriginDNS) || + d.HasChange(cisRangeAppOriginPort) || + d.HasChange(cisRangeAppIPFirewall) || + d.HasChange(cisRangeAppProxyProtocol) || + d.HasChange(cisRangeAppEdgeIPsType) || + d.HasChange(cisRangeAppEdgeIPsConnectivity) || + d.HasChange(cisRangeAppTLS) || + d.HasChange(cisRangeAppTrafficType) { + + rangeAppID, zoneID, crn, _ := convertTfToCisThreeVar(d.Id()) + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + protocol := d.Get(cisRangeAppProtocol).(string) + dns := d.Get(cisRangeAppDNS).(string) + dnsType := d.Get(cisRangeAppDNSType).(string) + + dnsOpt := &cisrangeappv1.RangeAppReqDns{ + Type: &dnsType, + Name: &dns, + } + + opt := cisClient.NewUpdateRangeAppOptions(rangeAppID, protocol, dnsOpt) + + if v, ok := d.GetOk(cisRangeAppOriginDirect); ok { + opt.SetOriginDirect(expandStringList(v.([]interface{}))) + } + if v, ok := d.GetOk(cisRangeAppOriginDNS); ok { + originDNSOpt := &cisrangeappv1.RangeAppReqOriginDns{ + Name: core.StringPtr(v.(string)), + } + opt.SetOriginDns(originDNSOpt) + } + if v, ok := d.GetOk(cisRangeAppOriginPort); ok { + opt.SetOriginPort(int64(v.(int))) + } + if v, ok := d.GetOkExists(cisRangeAppIPFirewall); ok { + opt.SetIpFirewall(v.(bool)) + } + if v, ok := d.GetOk(cisRangeAppProxyProtocol); ok { + opt.SetProxyProtocol(v.(string)) + } + edgeIPsOpt := &cisrangeappv1.RangeAppReqEdgeIps{ + Type: core.StringPtr(cisRangeAppEdgeIPsTypeDynamic), + Connectivity: core.StringPtr(cisRangeAppEdgeIPsConnectivityAll), + } + if v, ok := d.GetOk(cisRangeAppEdgeIPsType); ok { + edgeIPsOpt.Type = core.StringPtr(v.(string)) + } + if v, ok := d.GetOk(cisRangeAppEdgeIPsType); ok { + edgeIPsOpt.Connectivity = core.StringPtr(v.(string)) + } + if v, ok := d.GetOk(cisRangeAppTrafficType); ok { + opt.SetTrafficType(v.(string)) + } + if v, ok := d.GetOk(cisRangeAppTLS); ok { + opt.SetTls(v.(string)) + } + _, resp, err := cisClient.UpdateRangeApp(opt) + if err != nil { + return fmt.Errorf("Failed to update range application: %v", resp) + } + } + return resourceIBMCISRangeAppRead(d, meta) +} + +func resourceIBMCISRangeAppDelete(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisRangeAppClientSession() + if err != nil { + return err + } + + rangeAppID, zoneID, cisID, _ := convertTfToCisThreeVar(d.Id()) + cisClient.Crn = core.StringPtr(cisID) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + opt := cisClient.NewDeleteRangeAppOptions(rangeAppID) + _, resp, err := cisClient.DeleteRangeApp(opt) + if err != nil { + return fmt.Errorf("Failed to delete range application: %v", resp) + } + return nil +} + +func resourceIBMCISRangeAppExists(d *schema.ResourceData, meta interface{}) (bool, error) { + cisClient, err := meta.(ClientSession).CisRangeAppClientSession() + if err != nil { + return false, err + } + rangeAppID, zoneID, cisID, _ := convertTfToCisThreeVar(d.Id()) + cisClient.Crn = core.StringPtr(cisID) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + opt := cisClient.NewGetRangeAppOptions(rangeAppID) + _, resp, err := cisClient.GetRangeApp(opt) + if err != nil { + if resp != nil && resp.StatusCode == 404 { + log.Println("range application is not found") + return false, nil + } + return false, fmt.Errorf("Failed to getting existing range application: %v", err) + } + return true, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_rate_limit.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_rate_limit.go new file mode 100644 index 00000000000..0116036721b --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_rate_limit.go @@ -0,0 +1,791 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + + "github.com/IBM/go-sdk-core/v4/core" + "github.com/IBM/networking-go-sdk/zoneratelimitsv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + cisRLThreshold = "threshold" + cisRLPeriod = "period" + cisRLDescription = "description" + cisRLTimeout = "timeout" + cisRLBody = "body" + cisRLURL = "url" +) + +func resourceIBMCISRateLimit() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMCISRateLimitCreate, + Read: resourceIBMCISRateLimitRead, + Update: resourceIBMCISRateLimitUpdate, + Delete: resourceIBMCISRateLimitDelete, + Exists: resourceIBMCISRateLimitExists, + Importer: &schema.ResourceImporter{}, + Schema: map[string]*schema.Schema{ + "cis_id": { + Type: schema.TypeString, + Required: true, + Description: "CIS Intance CRN", + }, + "domain_id": { + Type: schema.TypeString, + Required: true, + Description: "CIS Domain ID", + DiffSuppressFunc: suppressDomainIDDiff, + }, + "disabled": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Whether this rate limiting rule is currently disabled.", + }, + cisRLDescription: { + Type: schema.TypeString, + Optional: true, + ValidateFunc: InvokeValidator("ibm_cis_rate_limit", cisRLDescription), + Description: "A note that you can use to describe the reason for a rate limiting rule.", + }, + "bypass": { + Type: schema.TypeList, + Optional: true, + Description: "Bypass URL", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + Default: "url", + Description: "bypass URL name", + }, + "value": { + Type: schema.TypeString, + Optional: true, + Description: "bypass URL value", + }, + }, + }, + }, + cisRLThreshold: { + Type: schema.TypeInt, + Required: true, + ValidateFunc: InvokeValidator("ibm_cis_rate_limit", cisRLThreshold), + Description: "Rate Limiting Threshold", + }, + cisRLPeriod: { + Type: schema.TypeInt, + Required: true, + ValidateFunc: InvokeValidator("ibm_cis_rate_limit", cisRLPeriod), + Description: "Rate Limiting Period", + }, + "correlate": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: "Ratelimiting Correlate", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "by": { + Type: schema.TypeString, + Optional: true, + Default: "nat", + ValidateFunc: InvokeValidator("ibm_cis_rate_limit", "by"), + Description: "Whether to enable NAT based rate limiting", + }, + }, + }, + }, + "action": { + Type: schema.TypeList, + Required: true, + MinItems: 1, + MaxItems: 1, + Description: "Rate Limiting Action", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "mode": { + Type: schema.TypeString, + Required: true, + ValidateFunc: InvokeValidator("ibm_cis_rate_limit", "mode"), + Description: "Type of action performed.Valid values are: 'simulate', 'ban', 'challenge', 'js_challenge'.", + }, + "timeout": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: InvokeValidator("ibm_cis_rate_limit", cisRLTimeout), + Description: "The time to perform the mitigation action. Timeout be the same or greater than the period.", + }, + "response": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: "Rate Limiting Action Response", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "content_type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: InvokeValidator("ibm_cis_rate_limit", "content_type"), + Description: "Custom content-type and body to return. It must be one of following 'text/plain', 'text/xml', 'application/json'.", + }, + "body": { + Type: schema.TypeString, + Required: true, + ValidateFunc: InvokeValidator("ibm_cis_rate_limit", cisRLBody), + Description: "The body to return. The content here must confirm to the 'content_type'", + }, + }, + }, + }, + }, + }, + }, + "match": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Computed: true, + Description: "Rate Limiting Match", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "request": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MinItems: 1, + MaxItems: 1, + Description: "Rate Limiting Match Request", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "methods": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Description: "HTTP Methos of matching request. It can be one or many. Example methods 'POST', 'PUT'", + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: InvokeValidator("ibm_cis_rate_limit", "methods"), + }, + }, + "schemes": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Description: "HTTP Schemes of matching request. It can be one or many. Example schemes 'HTTP', 'HTTPS'.", + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: InvokeValidator("ibm_cis_rate_limit", "schemes"), + }, + }, + "url": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "URL pattern of matching request", + ValidateFunc: InvokeValidator("ibm_cis_rate_limit", cisRLURL), + }, + }, + }, + }, + "response": { + Type: schema.TypeList, + Optional: true, + MinItems: 1, + MaxItems: 1, + Description: "Rate Limiting Response", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "status": { + Type: schema.TypeSet, + Optional: true, + Description: "HTTP Status Codes of matching response. It can be one or many. Example status codes '403', '401", + Elem: &schema.Schema{Type: schema.TypeInt}, + }, + "origin_traffic": { + Type: schema.TypeBool, + Optional: true, + Description: "Origin Traffic of matching response.", + }, + "headers": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + Description: "The name of the response header to match.", + }, + "op": { + Type: schema.TypeString, + Optional: true, + Description: "The operator when matching. Valid values are 'eq' and 'ne'.", + }, + "value": { + Type: schema.TypeString, + Optional: true, + Description: "The value of the header, which is exactly matched.", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "rule_id": { + Type: schema.TypeString, + Computed: true, + Description: "Rate Limit rule Id", + }, + }, + } +} +func resourceIBMCISRateLimitValidator() *ResourceValidator { + + validateSchema := make([]ValidateSchema, 1) + byValues := "nat" + modeValues := "simulate, ban, challenge, js_challenge" + ctypeValues := "text/plain, text/xml, application/json" + methodValues := "GET, POST, PUT, DELETE, PATCH, HEAD, _ALL_" + schemeValues := "HTTP, HTTPS, _ALL_" + + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: cisRLDescription, + ValidateFunctionIdentifier: StringLenBetween, + Type: TypeString, + Optional: true, + MinValueLength: 0, + MaxValueLength: 1024}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: cisRLThreshold, + ValidateFunctionIdentifier: IntBetween, + Type: TypeInt, + Required: true, + MinValue: "1", + MaxValue: "1000000"}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: cisRLPeriod, + ValidateFunctionIdentifier: IntBetween, + Type: TypeInt, + Required: true, + MinValue: "1", + MaxValue: "86400"}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: "by", + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: byValues}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: "mode", + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: modeValues}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: "content_type", + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: ctypeValues}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: "methods", + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: methodValues}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: "schemes", + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: schemeValues}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: cisRLTimeout, + ValidateFunctionIdentifier: IntBetween, + Type: TypeInt, + Optional: true, + MinValue: "1", + MaxValue: "86400"}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: cisRLBody, + ValidateFunctionIdentifier: StringLenBetween, + Type: TypeString, + Optional: true, + MinValueLength: 0, + MaxValueLength: 10240}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: cisRLURL, + ValidateFunctionIdentifier: StringLenBetween, + Type: TypeString, + Optional: true, + MinValueLength: 0, + MaxValueLength: 1024}) + + ibmCISRateLimitResourceValidator := ResourceValidator{ResourceName: "ibm_cis_rate_limit", Schema: validateSchema} + return &ibmCISRateLimitResourceValidator +} +func resourceIBMCISRateLimitCreate(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisRLClientSession() + if err != nil { + return err + } + + cisID := d.Get("cis_id").(string) + zoneID, _, err := convertTftoCisTwoVar(d.Get("domain_id").(string)) + if err != nil { + return err + } + cisClient.Crn = core.StringPtr(cisID) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + + //payload to create a rate limit rule + opt := cisClient.NewCreateZoneRateLimitsOptions() + opt.SetThreshold(int64(d.Get(cisRLThreshold).(int))) + opt.SetPeriod(int64(d.Get(cisRLPeriod).(int))) + + if description, ok := d.GetOk(cisRLDescription); ok { + opt.SetDescription(description.(string)) + } + + if disabled, ok := d.GetOk("disabled"); ok { + opt.SetDisabled(disabled.(bool)) + } + + action, err := expandRateLimitAction(d) + if err != nil { + return fmt.Errorf("Error in getting action from expandRateLimitAction %s", err) + } + opt.SetAction(action) + + match, err := expandRateLimitMatch(d) + if err != nil { + return fmt.Errorf("Error in getting match from expandRateLimitMatch %s", err) + } + opt.SetMatch(match) + + correlate, err := expandRateLimitCorrelate(d) + if err == nil { + opt.SetCorrelate(correlate) + } + + byPass, err := expandRateLimitBypass(d) + if err != nil { + return fmt.Errorf("Error in getting bypass from expandRateLimitBypass %s", err) + } + opt.SetBypass(byPass) + + //creating rate limit rule + result, resp, err := cisClient.CreateZoneRateLimits(opt) + if err != nil { + return fmt.Errorf("Failed to create RateLimit: %v", resp) + } + record := result.Result + d.SetId(convertCisToTfThreeVar(*record.ID, zoneID, cisID)) + return resourceIBMCISRateLimitRead(d, meta) +} + +func resourceIBMCISRateLimitRead(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisRLClientSession() + if err != nil { + return err + } + recordID, zoneID, cisID, _ := convertTfToCisThreeVar(d.Id()) + if err != nil { + return err + } + cisClient.Crn = core.StringPtr(cisID) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + opt := cisClient.NewGetRateLimitOptions(recordID) + result, resp, err := cisClient.GetRateLimit(opt) + if err != nil { + return fmt.Errorf("Failed to read RateLimit: %v", resp) + } + + rule := result.Result + d.Set("cis_id", cisID) + d.Set("domain_id", zoneID) + d.Set("rule_id", recordID) + d.Set("disabled", rule.Disabled) + d.Set(cisRLDescription, rule.Description) + d.Set(cisRLThreshold, rule.Threshold) + d.Set(cisRLPeriod, rule.Period) + d.Set("action", flattenRateLimitAction(rule.Action)) + d.Set("match", flattenRateLimitMatch(rule.Match)) + d.Set("correlate", flattenRateLimitCorrelate(rule.Correlate)) + d.Set("bypass", flattenRateLimitByPass(rule.Bypass)) + + return nil +} + +func resourceIBMCISRateLimitUpdate(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisRLClientSession() + if err != nil { + return err + } + + recordID, zoneID, cisID, _ := convertTfToCisThreeVar(d.Id()) + cisClient.Crn = core.StringPtr(cisID) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + + opt := cisClient.NewUpdateRateLimitOptions(recordID) + if d.HasChange("disabled") || + d.HasChange(cisRLThreshold) || + d.HasChange(cisRLPeriod) || + d.HasChange(cisRLDescription) || + d.HasChange("action") || + d.HasChange("match") || + d.HasChange("correlate") || + d.HasChange("bypass") { + + opt.SetThreshold(int64(d.Get(cisRLThreshold).(int))) + opt.SetPeriod(int64(d.Get(cisRLPeriod).(int))) + + if description, ok := d.GetOk(cisRLDescription); ok { + opt.SetDescription(description.(string)) + } + + if disabled, ok := d.GetOk("disabled"); ok { + opt.SetDisabled(disabled.(bool)) + } + + action, err := expandRateLimitAction(d) + if err != nil { + return fmt.Errorf("Error in getting action from expandRateLimitAction %s", err) + } + opt.SetAction(action) + + match, err := expandRateLimitMatch(d) + if err != nil { + return fmt.Errorf("Error in getting match from expandRateLimitMatch %s", err) + } + opt.SetMatch(match) + + correlate, err := expandRateLimitCorrelate(d) + if err != nil { + return fmt.Errorf("Error in getting correlate from expandRateLimitCorrelate %s", err) + } + opt.SetCorrelate(correlate) + + byPass, err := expandRateLimitBypass(d) + if err != nil { + return fmt.Errorf("Error in getting bypass from expandRateLimitBypass %s", err) + } + opt.SetBypass(byPass) + _, resp, err := cisClient.UpdateRateLimit(opt) + if err != nil { + return fmt.Errorf("Failed to update RateLimit: %v", resp) + } + } + d.SetId(convertCisToTfThreeVar(recordID, zoneID, cisID)) + return resourceIBMCISRateLimitRead(d, meta) +} + +func resourceIBMCISRateLimitDelete(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisRLClientSession() + if err != nil { + return err + } + + recordID, zoneID, cisID, _ := convertTfToCisThreeVar(d.Id()) + cisClient.Crn = core.StringPtr(cisID) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + opt := cisClient.NewDeleteZoneRateLimitOptions(recordID) + _, resp, err := cisClient.DeleteZoneRateLimit(opt) + if err != nil { + return fmt.Errorf("Failed to delete RateLimit: %v", resp) + } + return nil +} + +func resourceIBMCISRateLimitExists(d *schema.ResourceData, meta interface{}) (bool, error) { + cisClient, err := meta.(ClientSession).CisRLClientSession() + if err != nil { + return false, err + } + recordID, zoneID, cisID, _ := convertTfToCisThreeVar(d.Id()) + cisClient.Crn = core.StringPtr(cisID) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + opt := cisClient.NewGetRateLimitOptions(recordID) + _, resp, err := cisClient.GetRateLimit(opt) + if err != nil { + if resp != nil && resp.StatusCode == 404 { + log.Println("ratelimit is not found") + return false, nil + } + return false, fmt.Errorf("Failed to getting existing RateLimit: %v", err) + } + return true, nil +} + +func expandRateLimitAction(d *schema.ResourceData) ( + action *zoneratelimitsv1.RatelimitInputAction, err error) { + action = &zoneratelimitsv1.RatelimitInputAction{} + actionRecord := d.Get("action").([]interface{})[0].(map[string]interface{}) + mode := actionRecord["mode"].(string) + timeout := actionRecord["timeout"].(int) + if timeout == 0 { + if mode == "simulate" || mode == "ban" { + return action, fmt.Errorf("For the mode 'simulate' and 'ban' timeout must be %s %s", + "set.. valid range for timeout is 10 - 86400", err) + } + } else { + if mode == "challenge" || mode == "js_challenge" { + return action, fmt.Errorf( + "Timeout field is only valid for 'simulate' and 'ban' modes. %s", err) + } + } + action.Mode = core.StringPtr(mode) + action.Timeout = core.Int64Ptr(int64(timeout)) + + if _, ok := actionRecord["response"]; ok && len(actionRecord["response"].([]interface{})) > 0 { + actionResponse := actionRecord["response"].([]interface{})[0].(map[string]interface{}) + action.Response = &zoneratelimitsv1.RatelimitInputActionResponse{ + ContentType: core.StringPtr(actionResponse["content_type"].(string)), + Body: core.StringPtr(actionResponse["body"].(string)), + } + } + + return action, nil +} + +func expandRateLimitMatch(d *schema.ResourceData) (match *zoneratelimitsv1.RatelimitInputMatch, err error) { + match = new(zoneratelimitsv1.RatelimitInputMatch) + m := d.Get("match") + if len(m.([]interface{})) == 0 { + // Match Request is a mondatory property. So, setting default if none provided. + match.Request = &zoneratelimitsv1.RatelimitInputMatchRequest{ + Methods: []string{"_ALL_"}, + Schemes: []string{"_ALL_"}, + URL: core.StringPtr("*"), + } + return match, nil + } + matchRecord := m.([]interface{})[0].(map[string]interface{}) + + if matchReqRecord, ok := matchRecord["request"]; ok && len(matchReqRecord.([]interface{})) > 0 { + matchRequestRecord := matchReqRecord.([]interface{})[0].(map[string]interface{}) + + url := matchRequestRecord["url"].(string) + // If url is not provided, then set it with * + if len(url) == 0 { + url = "*" + } + matchRequest := &zoneratelimitsv1.RatelimitInputMatchRequest{ + URL: core.StringPtr(url), + } + if methodsRecord, ok := matchRequestRecord["methods"]; ok { + methods := make([]string, methodsRecord.(*schema.Set).Len()) + for i, m := range methodsRecord.(*schema.Set).List() { + methods[i] = m.(string) + } + matchRequest.Methods = methods + } + if schemesRecord, ok := matchRequestRecord["schemes"]; ok { + schemes := make([]string, schemesRecord.(*schema.Set).Len()) + for i, s := range schemesRecord.(*schema.Set).List() { + schemes[i] = s.(string) + } + matchRequest.Schemes = schemes + } + + match.Request = matchRequest + } + if matchResRecord, ok := matchRecord["response"]; ok && len(matchResRecord.([]interface{})) > 0 { + matchResponseRecord := matchResRecord.([]interface{})[0].(map[string]interface{}) + matchResponse := &zoneratelimitsv1.RatelimitInputMatchResponse{} + if statusRecord, ok := matchResponseRecord["status"]; ok { + status := make([]int64, statusRecord.(*schema.Set).Len()) + for i, s := range statusRecord.(*schema.Set).List() { + status[i] = int64(s.(int)) + } + matchResponse.Status = status + } + if originRecord, ok := matchResponseRecord["origin_traffic"]; ok { + originTraffic := originRecord.(bool) + matchResponse.OriginTraffic = &originTraffic + } + if headersRecord, ok := matchResponseRecord["headers"]; ok && len(headersRecord.([]interface{})) > 0 { + matchResponseHeaders := headersRecord.([]interface{}) + + responseHeaders := make([]zoneratelimitsv1.RatelimitInputMatchResponseHeadersItem, 0) + + for _, h := range matchResponseHeaders { + header := h.(map[string]interface{}) + headerRecord := zoneratelimitsv1.RatelimitInputMatchResponseHeadersItem{} + headerRecord.Name = core.StringPtr(header["name"].(string)) + headerRecord.Op = core.StringPtr(header["op"].(string)) + headerRecord.Value = core.StringPtr(header["value"].(string)) + responseHeaders = append(responseHeaders, headerRecord) + } + matchResponse.HeadersVar = responseHeaders + + } + match.Response = matchResponse + } + + return match, nil +} + +func expandRateLimitCorrelate(d *schema.ResourceData) ( + correlate *zoneratelimitsv1.RatelimitInputCorrelate, err error) { + correlate = &zoneratelimitsv1.RatelimitInputCorrelate{} + c, ok := d.GetOk("correlate") + if !ok { + err = fmt.Errorf("correlate field is empty") + return &zoneratelimitsv1.RatelimitInputCorrelate{}, err + } + correlateRecord := c.([]interface{})[0].(map[string]interface{}) + correlate.By = core.StringPtr(correlateRecord["by"].(string)) + + return correlate, nil +} + +func expandRateLimitBypass(d *schema.ResourceData) ( + byPass []zoneratelimitsv1.RatelimitInputBypassItem, err error) { + b, ok := d.GetOk("bypass") + if !ok { + return + } + byPassKV := b.([]interface{}) + + byPassRecord := make([]zoneratelimitsv1.RatelimitInputBypassItem, 0) + + for _, kv := range byPassKV { + keyValue, _ := kv.(map[string]interface{}) + + byPassKeyValue := zoneratelimitsv1.RatelimitInputBypassItem{} + byPassKeyValue.Name = core.StringPtr(keyValue["name"].(string)) + byPassKeyValue.Value = core.StringPtr(keyValue["value"].(string)) + byPassRecord = append(byPassRecord, byPassKeyValue) + } + byPass = byPassRecord + + return byPass, nil +} + +func flattenRateLimitAction(action *zoneratelimitsv1.RatelimitObjectAction) []map[string]interface{} { + actionRecord := map[string]interface{}{ + "mode": *action.Mode, + "timeout": *action.Timeout, + } + + if action.Response != nil { + actionResponseRecord := *action.Response + actionResponse := map[string]interface{}{ + "content_type": *actionResponseRecord.ContentType, + "body": *actionResponseRecord.Body, + } + actionRecord["response"] = []map[string]interface{}{actionResponse} + } + return []map[string]interface{}{actionRecord} +} + +func flattenRateLimitMatch(match *zoneratelimitsv1.RatelimitObjectMatch) []map[string]interface{} { + matchRecord := map[string]interface{}{} + matchRecord["request"] = flattenRateLimitMatchRequest(*match.Request) + if match.Response != nil { + matchRecord["response"] = flattenRateLimitMatchResponse(*match.Response) + } + + return []map[string]interface{}{matchRecord} +} + +func flattenRateLimitMatchRequest(request zoneratelimitsv1.RatelimitObjectMatchRequest) []map[string]interface{} { + + requestRecord := map[string]interface{}{} + methods := make([]string, 0) + for _, m := range request.Methods { + methods = append(methods, m) + } + requestRecord["methods"] = methods + schemes := make([]string, 0) + for _, s := range request.Schemes { + schemes = append(schemes, s) + } + requestRecord["schemes"] = schemes + + requestRecord["url"] = *request.URL + return []map[string]interface{}{requestRecord} +} + +func flattenRateLimitMatchResponse(response zoneratelimitsv1.RatelimitObjectMatchResponse) []interface{} { + responseRecord := map[string]interface{}{} + flag := false + if response.OriginTraffic != nil { + responseRecord["origin_traffic"] = *response.OriginTraffic + flag = true + } + + if len(response.Status) > 0 { + statuses := make([]int64, 0) + for _, s := range response.Status { + statuses = append(statuses, s) + } + responseRecord["status"] = statuses + flag = true + } + + if len(response.HeadersVar) > 0 { + headers := make([]map[string]interface{}, 0) + for _, h := range response.HeadersVar { + header := map[string]interface{}{} + header["name"] = h.Name + header["op"] = h.Op + header["value"] = h.Value + headers = append(headers, header) + + } + responseRecord["headers"] = headers + flag = true + } + if flag == true { + return []interface{}{responseRecord} + } + return []interface{}{} +} +func flattenRateLimitCorrelate(correlate *zoneratelimitsv1.RatelimitObjectCorrelate) []map[string]interface{} { + if correlate == nil { + return []map[string]interface{}{} + } + correlateRecord := map[string]interface{}{} + if *correlate.By != "" { + correlateRecord["by"] = *correlate.By + } + return []map[string]interface{}{correlateRecord} +} + +func flattenRateLimitByPass(byPass []zoneratelimitsv1.RatelimitObjectBypassItem) []map[string]interface{} { + byPassRecord := make([]map[string]interface{}, 0, len(byPass)) + if len(byPass) > 0 { + for _, b := range byPass { + byPassKV := map[string]interface{}{ + "name": *b.Name, + "value": *b.Value, + } + byPassRecord = append(byPassRecord, byPassKV) + } + } + return byPassRecord +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_routing.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_routing.go new file mode 100644 index 00000000000..2f04dc3f3e9 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_routing.go @@ -0,0 +1,114 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "log" + + "github.com/IBM/go-sdk-core/v4/core" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + ibmCISRouting = "ibm_cis_routing" + cisRoutingSmartRouting = "smart_routing" +) + +func resourceIBMCISRouting() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMCISRoutingUpdate, + Read: resourceIBMCISRoutingRead, + Update: resourceIBMCISRoutingUpdate, + Delete: resourceIBMCISRoutingDelete, + Importer: &schema.ResourceImporter{}, + Schema: map[string]*schema.Schema{ + cisID: { + Type: schema.TypeString, + Required: true, + Description: "CIS Intance CRN", + }, + cisDomainID: { + Type: schema.TypeString, + Required: true, + Description: "CIS Domain ID", + DiffSuppressFunc: suppressDomainIDDiff, + }, + cisRoutingSmartRouting: { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Smart Routing value", + ValidateFunc: InvokeValidator(ibmCISRouting, cisRoutingSmartRouting), + }, + }, + } +} + +func resourceIBMCISRoutingValidator() *ResourceValidator { + + validateSchema := make([]ValidateSchema, 1) + smartRoutingValues := "on, off" + + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: cisRoutingSmartRouting, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: smartRoutingValues}) + ibmCISRoutingValidator := ResourceValidator{ResourceName: ibmCISRouting, Schema: validateSchema} + return &ibmCISRoutingValidator +} + +func resourceIBMCISRoutingUpdate(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisRoutingClientSession() + if err != nil { + return err + } + + crn := d.Get(cisID).(string) + zoneID, _, err := convertTftoCisTwoVar(d.Get(cisDomainID).(string)) + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + + if d.HasChange(cisRoutingSmartRouting) { + smartRoutingValue := d.Get(cisRoutingSmartRouting).(string) + opt := cisClient.NewUpdateSmartRoutingOptions() + opt.SetValue(smartRoutingValue) + _, response, err := cisClient.UpdateSmartRouting(opt) + if err != nil { + log.Printf("Update smart route setting failed: %v", response) + return err + } + } + + d.SetId(convertCisToTfTwoVar(zoneID, crn)) + return resourceIBMCISRoutingRead(d, meta) +} + +func resourceIBMCISRoutingRead(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisRoutingClientSession() + if err != nil { + return err + } + zoneID, crn, err := convertTftoCisTwoVar(d.Id()) + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + opt := cisClient.NewGetSmartRoutingOptions() + result, response, err := cisClient.GetSmartRouting(opt) + if err != nil { + log.Printf("Get smart route setting failed: %v", response) + return err + } + d.Set(cisID, crn) + d.Set(cisDomainID, zoneID) + d.Set(cisRoutingSmartRouting, *result.Result.Value) + return nil +} + +func resourceIBMCISRoutingDelete(d *schema.ResourceData, meta interface{}) error { + // Nothing to delete on CIS resource + d.SetId("") + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_tls_settings.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_tls_settings.go new file mode 100644 index 00000000000..f3b29c73b5d --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_tls_settings.go @@ -0,0 +1,203 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "log" + + "github.com/IBM/go-sdk-core/v4/core" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + ibmCISTLSSettings = "ibm_cis_tls_settings" + cisTLSSettingsUniversalSSL = "universal_ssl" + cisTLSSettingsTLS12Only = "tls_1_2_only" + cisTLSSettingsTLS13 = "tls_1_3" + cisTLSSettingsMinTLSVersion = "min_tls_version" +) + +func resourceIBMCISTLSSettings() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + cisID: { + Type: schema.TypeString, + Description: "CIS instance crn", + Required: true, + }, + cisDomainID: { + Type: schema.TypeString, + Description: "Associated CIS domain", + Required: true, + DiffSuppressFunc: suppressDomainIDDiff, + }, + cisTLSSettingsUniversalSSL: { + Type: schema.TypeBool, + Description: "Universal SSL setting", + Optional: true, + Computed: true, + }, + cisTLSSettingsTLS13: { + Type: schema.TypeString, + Description: "TLS 1.3 setting", + Optional: true, + Computed: true, + ValidateFunc: InvokeValidator(ibmCISTLSSettings, cisTLSSettingsTLS13), + DiffSuppressFunc: suppressTLS13Diff, + }, + cisTLSSettingsMinTLSVersion: { + Type: schema.TypeString, + Description: "Minimum version of TLS required", + Optional: true, + ValidateFunc: InvokeValidator(ibmCISTLSSettings, cisTLSSettingsMinTLSVersion), + Default: "1.1", + }, + }, + Create: resourceCISTLSSettingsUpdate, + Read: resourceCISTLSSettingsRead, + Update: resourceCISTLSSettingsUpdate, + Delete: resourceCISTLSSettingsDelete, + Importer: &schema.ResourceImporter{}, + } +} + +func resourceIBMCISTLSSettingsValidator() *ResourceValidator { + validateSchema := make([]ValidateSchema, 1) + + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: cisTLSSettingsTLS13, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: "on, off, zrt"}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: cisTLSSettingsMinTLSVersion, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: "1.1, 1.2, 1.3, 1.4"}) + ibmCISTLSSettingsResourceValidator := ResourceValidator{ + ResourceName: ibmCISTLSSettings, + Schema: validateSchema} + return &ibmCISTLSSettingsResourceValidator +} + +func resourceCISTLSSettingsUpdate(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisSSLClientSession() + if err != nil { + return err + } + crn := d.Get(cisID).(string) + zoneID, _, err := convertTftoCisTwoVar(d.Get(cisDomainID).(string)) + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + + if d.HasChange(cisTLSSettingsTLS12Only) || + d.HasChange(cisTLSSettingsTLS13) || + d.HasChange(cisTLSSettingsUniversalSSL) || + d.HasChange(cisTLSSettingsMinTLSVersion) { + + // TLS 1.3 setting + if tls13, ok := d.GetOk(cisTLSSettingsTLS13); ok { + opt := cisClient.NewChangeTls13SettingOptions() + opt.SetValue(tls13.(string)) + _, resp, err := cisClient.ChangeTls13Setting(opt) + if err != nil { + log.Printf("Update TLS 1.3 setting Failed : %v\n", resp) + return err + } + } + + // Universal SSL setting + if universalSSL, ok := d.GetOkExists(cisTLSSettingsUniversalSSL); ok { + opt := cisClient.NewChangeUniversalCertificateSettingOptions() + opt.SetEnabled(universalSSL.(bool)) + resp, err := cisClient.ChangeUniversalCertificateSetting(opt) + if err != nil { + log.Printf("Update universal ssl setting Failed : %v\n", resp) + return err + } + } + + // Minimum TLS version + if minTLSVer, ok := d.GetOk(cisTLSSettingsMinTLSVersion); ok { + cisClient, err := meta.(ClientSession).CisDomainSettingsClientSession() + if err != nil { + return err + } + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + opt := cisClient.NewUpdateMinTlsVersionOptions() + opt.SetValue(minTLSVer.(string)) + _, resp, err := cisClient.UpdateMinTlsVersion(opt) + if err != nil { + log.Printf("Update minimum TLS version setting Failed : %v\n", resp) + return err + } + } + } + d.SetId(convertCisToTfTwoVar(zoneID, crn)) + return resourceCISTLSSettingsRead(d, meta) +} + +func resourceCISTLSSettingsRead(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisSSLClientSession() + if err != nil { + return err + } + zoneID, crn, _ := convertTftoCisTwoVar(d.Id()) + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneIdentifier = core.StringPtr(zoneID) + + // TLS 1.3 setting + tls13Result, resp, err := cisClient.GetTls13Setting(cisClient.NewGetTls13SettingOptions()) + if err != nil { + log.Printf("Get TLS 1.3 setting failed : %v\n", resp) + return err + } + + // Universal SSL setting + universalSSLResult, resp, err := cisClient.GetUniversalCertificateSetting( + cisClient.NewGetUniversalCertificateSettingOptions()) + if err != nil { + log.Printf("Update TLS 1.3 setting failed : %v\n", resp) + return err + } + + // Minumum TLS version setting + minTLSClient, err := meta.(ClientSession).CisDomainSettingsClientSession() + if err != nil { + return err + } + minTLSClient.Crn = core.StringPtr(crn) + minTLSClient.ZoneIdentifier = core.StringPtr(zoneID) + minTLSVerResult, resp, err := minTLSClient.GetMinTlsVersion( + minTLSClient.NewGetMinTlsVersionOptions()) + if err != nil { + log.Printf("Min TLS Version setting get request failed : %v", resp) + return err + } + d.Set(cisID, crn) + d.Set(cisDomainID, zoneID) + d.Set(cisTLSSettingsTLS13, tls13Result.Result.Value) + d.Set(cisTLSSettingsUniversalSSL, universalSSLResult.Result.Enabled) + d.Set(cisTLSSettingsMinTLSVersion, minTLSVerResult.Result.Value) + return nil +} + +func resourceCISTLSSettingsDelete(d *schema.ResourceData, meta interface{}) error { + // Nothing to delete on CIS resource + d.SetId("") + return nil +} + +func suppressTLS13Diff(k, old, new string, d *schema.ResourceData) bool { + // if we enable TLS 1.3, it gives zrt in output. + if "zrt" == old && new == "on" { + return true + } + return false +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_waf_group.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_waf_group.go new file mode 100644 index 00000000000..c538a1cf38e --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_waf_group.go @@ -0,0 +1,164 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "log" + + "github.com/IBM/go-sdk-core/v4/core" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + ibmCISWAFGroup = "ibm_cis_waf_group" + cisWAFGroupID = "group_id" + cisWAFGroupPackageID = "package_id" + cisWAFGroupMode = "mode" + cisWAFGroupName = "name" + cisWAFGroupRulesCount = "rules_count" + cisWAFGroupModifiedRulesCount = "modified_rules_count" + cisWAFGroupDesc = "description" +) + +func resourceIBMCISWAFGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMCISWAFGroupUpdate, + Read: resourceIBMCISWAFGroupRead, + Update: resourceIBMCISWAFGroupUpdate, + Delete: resourceIBMCISWAFGroupDelete, + Importer: &schema.ResourceImporter{}, + Schema: map[string]*schema.Schema{ + cisID: { + Type: schema.TypeString, + Required: true, + Description: "CIS Intance CRN", + }, + cisDomainID: { + Type: schema.TypeString, + Required: true, + Description: "CIS Domain ID", + DiffSuppressFunc: suppressDomainIDDiff, + }, + cisWAFGroupPackageID: { + Type: schema.TypeString, + Required: true, + Description: "WAF Rule package id", + DiffSuppressFunc: suppressDomainIDDiff, + }, + cisWAFGroupID: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "WAF Rule group id", + DiffSuppressFunc: suppressDomainIDDiff, + }, + cisWAFGroupMode: { + Type: schema.TypeString, + Required: true, + Description: "WAF Rule group mode on/off", + ValidateFunc: InvokeValidator(ibmCISWAFGroup, cisWAFGroupMode), + }, + cisWAFGroupName: { + Type: schema.TypeString, + Computed: true, + Description: "WAF Rule group name", + }, + cisWAFGroupDesc: { + Type: schema.TypeString, + Computed: true, + Description: "WAF Rule group description", + }, + cisWAFGroupRulesCount: { + Type: schema.TypeString, + Computed: true, + Description: "WAF Rule group rules count", + }, + cisWAFGroupModifiedRulesCount: { + Type: schema.TypeString, + Computed: true, + Description: "WAF Rule group modified rules count", + }, + }, + } +} + +func resourceIBMCISWAFGroupValidator() *ResourceValidator { + + validateSchema := make([]ValidateSchema, 1) + mode := "on, off" + + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: cisWAFGroupMode, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: mode}) + ibmCISWAFGroupValidator := ResourceValidator{ResourceName: ibmCISWAFGroup, Schema: validateSchema} + return &ibmCISWAFGroupValidator +} + +func resourceIBMCISWAFGroupUpdate(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisWAFGroupClientSession() + if err != nil { + return err + } + + crn := d.Get(cisID).(string) + zoneID, _, err := convertTftoCisTwoVar(d.Get(cisDomainID).(string)) + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneID = core.StringPtr(zoneID) + packageID, _, _, _ := convertTfToCisThreeVar(d.Get(cisWAFGroupPackageID).(string)) + groupID := d.Get(cisWAFGroupID).(string) + + if d.HasChange(cisWAFGroupMode) { + mode := d.Get(cisWAFGroupMode).(string) + opt := cisClient.NewUpdateWafRuleGroupOptions(packageID, groupID) + opt.SetMode(mode) + _, response, err := cisClient.UpdateWafRuleGroup(opt) + if err != nil { + log.Printf("Update waf rule group mode failed: %v", response) + return err + } + } + d.SetId(convertCisToTfFourVar(groupID, packageID, zoneID, crn)) + return resourceIBMCISWAFGroupRead(d, meta) +} + +func resourceIBMCISWAFGroupRead(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisWAFGroupClientSession() + if err != nil { + return err + } + groupID, packageID, zoneID, crn, err := convertTfToCisFourVar(d.Id()) + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneID = core.StringPtr(zoneID) + opt := cisClient.NewGetWafRuleGroupOptions(packageID, groupID) + result, response, err := cisClient.GetWafRuleGroup(opt) + if err != nil { + if response != nil && response.StatusCode == 404 { + log.Printf("WAF group is not found!") + d.SetId("") + return nil + } + log.Printf("Get waf rule group setting failed: %v", response) + return err + } + d.Set(cisID, crn) + d.Set(cisDomainID, zoneID) + d.Set(cisWAFGroupID, groupID) + d.Set(cisWAFGroupPackageID, result.Result.PackageID) + d.Set(cisWAFGroupMode, result.Result.Mode) + d.Set(cisWAFGroupName, result.Result.Name) + d.Set(cisWAFGroupDesc, result.Result.Description) + d.Set(cisWAFGroupModifiedRulesCount, result.Result.ModifiedRulesCount) + d.Set(cisWAFGroupRulesCount, result.Result.RulesCount) + return nil +} + +func resourceIBMCISWAFGroupDelete(d *schema.ResourceData, meta interface{}) error { + // Nothing to delete on CIS WAF Group resource + d.SetId("") + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_waf_package.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_waf_package.go new file mode 100644 index 00000000000..305edc503d7 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_waf_package.go @@ -0,0 +1,172 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "log" + + "github.com/IBM/go-sdk-core/v4/core" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + ibmCISWAFPackage = "ibm_cis_waf_package" + cisWAFPackageID = "package_id" + cisWAFPackageName = "name" + cisWAFPackageDescription = "description" + cisWAFPackageDetectionMode = "detection_mode" + cisWAFPackageSensitivity = "sensitivity" + cisWAFPackageActionMode = "action_mode" +) + +func resourceIBMCISWAFPackage() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMCISWAFPackageUpdate, + Read: resourceIBMCISWAFPackageRead, + Update: resourceIBMCISWAFPackageUpdate, + Delete: resourceIBMCISWAFPackageDelete, + Importer: &schema.ResourceImporter{}, + Schema: map[string]*schema.Schema{ + cisID: { + Type: schema.TypeString, + Required: true, + Description: "CIS Intance CRN", + }, + cisDomainID: { + Type: schema.TypeString, + Required: true, + Description: "CIS Domain ID", + DiffSuppressFunc: suppressDomainIDDiff, + }, + cisWAFPackageID: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "WAF pakcage ID", + DiffSuppressFunc: suppressDomainIDDiff, + }, + cisWAFPackageName: { + Type: schema.TypeString, + Computed: true, + Description: "WAF pakcage name", + }, + cisWAFPackageDetectionMode: { + Type: schema.TypeString, + Computed: true, + Description: "WAF pakcage detection mode", + }, + cisWAFPackageSensitivity: { + Type: schema.TypeString, + Required: true, + Description: "WAF pakcage sensitivity", + ValidateFunc: InvokeValidator( + ibmCISWAFPackage, cisWAFPackageSensitivity), + }, + cisWAFPackageActionMode: { + Type: schema.TypeString, + Required: true, + Description: "WAF pakcage action mode", + ValidateFunc: InvokeValidator( + ibmCISWAFPackage, cisWAFPackageActionMode), + }, + cisWAFPackageDescription: { + Type: schema.TypeString, + Computed: true, + Description: "WAF package description", + }, + }, + } +} + +func resourceIBMCISWAFPackageValidator() *ResourceValidator { + + validateSchema := make([]ValidateSchema, 1) + sesitivity := "high, medium, low, off" + actionMode := "simulate, block, challenge" + + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: cisWAFPackageSensitivity, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: sesitivity}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: cisWAFPackageActionMode, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: actionMode}) + ibmCISWAFPackageValidator := ResourceValidator{ResourceName: ibmCISWAFPackage, Schema: validateSchema} + return &ibmCISWAFPackageValidator +} + +func resourceIBMCISWAFPackageUpdate(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisWAFPackageClientSession() + if err != nil { + return err + } + + crn := d.Get(cisID).(string) + zoneID, _, err := convertTftoCisTwoVar(d.Get(cisDomainID).(string)) + packageID, _, _, err := convertTfToCisThreeVar(d.Get(cisWAFPackageID).(string)) + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneID = core.StringPtr(zoneID) + + if d.HasChange(cisWAFPackageSensitivity) || + d.HasChange(cisWAFPackageActionMode) { + opt := cisClient.NewUpdateWafPackageOptions(packageID) + if v, ok := d.GetOk(cisWAFPackageSensitivity); ok { + opt.SetSensitivity(v.(string)) + } + if v, ok := d.GetOk(cisWAFPackageActionMode); ok { + opt.SetActionMode(v.(string)) + } + result, response, err := cisClient.UpdateWafPackage(opt) + if err != nil { + log.Printf("Update waf package setting failed: %v", response) + return err + } + d.SetId(convertCisToTfThreeVar(*result.Result.ID, zoneID, crn)) + } + + return resourceIBMCISWAFPackageRead(d, meta) +} + +func resourceIBMCISWAFPackageRead(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisWAFPackageClientSession() + if err != nil { + return err + } + packageID, zoneID, crn, err := convertTfToCisThreeVar(d.Id()) + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneID = core.StringPtr(zoneID) + opt := cisClient.NewGetWafPackageOptions(packageID) + result, response, err := cisClient.GetWafPackage(opt) + if err != nil { + if response != nil && response.StatusCode == 404 { + log.Printf("WAF package is not found!") + d.SetId("") + return nil + } + log.Printf("Get waf package setting failed: %v", response) + return err + } + d.Set(cisID, crn) + d.Set(cisDomainID, zoneID) + d.Set(cisWAFPackageID, result.Result.ID) + d.Set(cisWAFPackageName, result.Result.Name) + d.Set(cisWAFPackageDetectionMode, result.Result.DetectionMode) + d.Set(cisWAFPackageActionMode, result.Result.ActionMode) + d.Set(cisWAFPackageSensitivity, result.Result.Sensitivity) + d.Set(cisWAFPackageDescription, result.Result.Description) + return nil +} + +func resourceIBMCISWAFPackageDelete(d *schema.ResourceData, meta interface{}) error { + // Nothing to delete on CIS WAF Package resource + d.SetId("") + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_waf_rule.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_waf_rule.go new file mode 100644 index 00000000000..a64f7efe477 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cis_waf_rule.go @@ -0,0 +1,211 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "log" + + "github.com/IBM/go-sdk-core/v4/core" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + ibmCISWAFRule = "ibm_cis_waf_rule" + cisWAFRuleID = "rule_id" + cisWAFRuleDesc = "description" + cisWAFRulePriority = "priority" + cisWAFRulePackageID = "package_id" + cisWAFRuleGroup = "group" + cisWAFRuleGroupID = "id" + cisWAFRuleGroupName = "name" + cisWAFRuleMode = "mode" + cisWAFRuleModeOn = "on" + cisWAFRuleModeOff = "off" + cisWAFRuleAllowedModes = "allowed_modes" +) + +func resourceIBMCISWAFRule() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMCISWAFRuleUpdate, + Read: resourceIBMCISWAFRuleRead, + Update: resourceIBMCISWAFRuleUpdate, + Delete: resourceIBMCISWAFRuleDelete, + Importer: &schema.ResourceImporter{}, + Schema: map[string]*schema.Schema{ + cisID: { + Type: schema.TypeString, + Required: true, + Description: "CIS Intance CRN", + }, + cisDomainID: { + Type: schema.TypeString, + Required: true, + Description: "CIS Domain ID", + DiffSuppressFunc: suppressDomainIDDiff, + }, + cisWAFRuleID: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "CIS WAF Rule id", + }, + cisWAFRulePackageID: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "CIS WAF Rule package id", + }, + cisWAFRuleMode: { + Type: schema.TypeString, + Required: true, + Description: "CIS WAF Rule mode", + ValidateFunc: InvokeValidator(ibmCISWAFRule, cisWAFRuleMode), + }, + cisWAFRuleDesc: { + Type: schema.TypeString, + Computed: true, + Description: "CIS WAF Rule descriptions", + }, + cisWAFRulePriority: { + Type: schema.TypeInt, + Computed: true, + Description: "CIS WAF Rule Priority", + }, + cisWAFRuleGroup: { + Type: schema.TypeList, + Computed: true, + Description: "CIS WAF Rule group", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + cisWAFRuleGroupID: { + Type: schema.TypeString, + Computed: true, + Description: "waf rule group id", + }, + cisWAFRuleGroupName: { + Type: schema.TypeString, + Computed: true, + Description: "waf rule group name", + }, + }, + }, + }, + cisWAFRuleAllowedModes: { + Type: schema.TypeString, + Computed: true, + Description: "CIS WAF Rule allowed modes", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + } +} + +func resourceIBMCISWAFRuleValidator() *ResourceValidator { + + validateSchema := make([]ValidateSchema, 1) + modes := "on, off, default, disable, simulate, block, challenge" + + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: cisWAFRuleMode, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: modes}) + ibmCISWAFRuleValidator := ResourceValidator{ResourceName: ibmCISWAFRule, Schema: validateSchema} + return &ibmCISWAFRuleValidator +} + +func resourceIBMCISWAFRuleUpdate(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisWAFRuleClientSession() + if err != nil { + return err + } + + crn := d.Get(cisID).(string) + zoneID, _, err := convertTftoCisTwoVar(d.Get(cisDomainID).(string)) + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneID = core.StringPtr(zoneID) + ruleID := d.Get(cisWAFRuleID).(string) + packageID, _, _, _ := convertTfToCisThreeVar(d.Get(cisWAFRulePackageID).(string)) + + if d.HasChange(cisWAFRuleMode) { + mode := d.Get(cisWAFRuleMode).(string) + + getOpt := cisClient.NewGetWafRuleOptions(packageID, ruleID) + getResult, getResponse, err := cisClient.GetWafRule(getOpt) + if err != nil { + log.Printf("Get WAF rule setting failed: %v", getResponse) + return err + } + getMode := *getResult.Result.Mode + updateOpt := cisClient.NewUpdateWafRuleOptions(packageID, ruleID) + + // Mode differs based on OWASP and CIS + if getMode == cisWAFRuleModeOn || getMode == cisWAFRuleModeOff { + + owaspOpt, _ := cisClient.NewWafRuleBodyOwasp(mode) + updateOpt.SetOwasp(owaspOpt) + + } else { + + cisOpt, _ := cisClient.NewWafRuleBodyCis(mode) + updateOpt.SetCis(cisOpt) + + } + _, response, err := cisClient.UpdateWafRule(updateOpt) + if err != nil { + log.Printf("Update WAF rule setting failed: %v", response) + return err + } + } + + d.SetId(convertCisToTfFourVar(ruleID, packageID, zoneID, crn)) + return resourceIBMCISWAFRuleRead(d, meta) +} + +func resourceIBMCISWAFRuleRead(d *schema.ResourceData, meta interface{}) error { + cisClient, err := meta.(ClientSession).CisWAFRuleClientSession() + if err != nil { + return err + } + ruleID, packageID, zoneID, crn, err := convertTfToCisFourVar(d.Id()) + cisClient.Crn = core.StringPtr(crn) + cisClient.ZoneID = core.StringPtr(zoneID) + opt := cisClient.NewGetWafRuleOptions(packageID, ruleID) + result, response, err := cisClient.GetWafRule(opt) + if err != nil { + if response != nil && response.StatusCode == 404 { + log.Printf("WAF Rule is not found!") + d.SetId("") + return nil + } + log.Printf("Get waf rule setting failed: %v", response) + return err + } + groups := []interface{}{} + group := map[string]interface{}{} + group[cisWAFRuleGroupID] = *result.Result.Group.ID + group[cisWAFRuleGroupName] = *result.Result.Group.Name + groups = append(groups, group) + + d.Set(cisID, crn) + d.Set(cisDomainID, zoneID) + d.Set(cisWAFRuleID, ruleID) + d.Set(cisWAFRulePackageID, packageID) + d.Set(cisWAFRuleDesc, *result.Result.Description) + d.Set(cisWAFRulePriority, *result.Result.Priority) + d.Set(cisWAFRuleGroup, groups) + d.Set(cisWAFRuleMode, *result.Result.Mode) + d.Set(cisWAFRuleAllowedModes, flattenStringList(result.Result.AllowedModes)) + return nil +} + +func resourceIBMCISWAFRuleDelete(d *schema.ResourceData, meta interface{}) error { + // Nothing to delete on CIS WAF rule resource + d.SetId("") + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cm_catalog.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cm_catalog.go new file mode 100644 index 00000000000..51265133a86 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cm_catalog.go @@ -0,0 +1,165 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/IBM/platform-services-go-sdk/catalogmanagementv1" +) + +func resourceIBMCmCatalog() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMCmCatalogCreate, + Read: resourceIBMCmCatalogRead, + Delete: resourceIBMCmCatalogDelete, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "label": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Display Name in the requested language.", + }, + "short_description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Description in the requested language.", + }, + "catalog_icon_url": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "URL for an icon associated with this catalog.", + }, + "tags": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "List of tags associated with this catalog.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "url": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The url for this specific catalog.", + }, + "crn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "CRN associated with the catalog.", + }, + "offerings_url": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "URL path to offerings.", + }, + }, + } +} + +func resourceIBMCmCatalogCreate(d *schema.ResourceData, meta interface{}) error { + catalogManagementClient, err := meta.(ClientSession).CatalogManagementV1() + if err != nil { + return err + } + + createCatalogOptions := &catalogmanagementv1.CreateCatalogOptions{} + + if _, ok := d.GetOk("label"); ok { + createCatalogOptions.SetLabel(d.Get("label").(string)) + } + if _, ok := d.GetOk("short_description"); ok { + createCatalogOptions.SetShortDescription(d.Get("short_description").(string)) + } + if _, ok := d.GetOk("catalog_icon_url"); ok { + createCatalogOptions.SetCatalogIconURL(d.Get("catalog_icon_url").(string)) + } + if _, ok := d.GetOk("tags"); ok { + createCatalogOptions.SetTags(d.Get("tags").([]string)) + } + + catalog, response, err := catalogManagementClient.CreateCatalog(createCatalogOptions) + if err != nil { + log.Printf("[DEBUG] CreateCatalog failed %s\n%s", err, response) + return err + } + + d.SetId(*catalog.ID) + + return resourceIBMCmCatalogRead(d, meta) +} + +func resourceIBMCmCatalogRead(d *schema.ResourceData, meta interface{}) error { + catalogManagementClient, err := meta.(ClientSession).CatalogManagementV1() + if err != nil { + return err + } + log.Printf("[DEBUG] client is a nil pointer: %v\n", catalogManagementClient == nil) + + getCatalogOptions := &catalogmanagementv1.GetCatalogOptions{} + + getCatalogOptions.SetCatalogIdentifier(d.Id()) + + catalog, response, err := catalogManagementClient.GetCatalog(getCatalogOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + log.Printf("[DEBUG] GetCatalog failed %s\n%s", err, response) + return err + } + if err = d.Set("label", catalog.Label); err != nil { + return fmt.Errorf("Error setting label: %s", err) + } + if err = d.Set("short_description", catalog.ShortDescription); err != nil { + return fmt.Errorf("Error setting short_description: %s", err) + } + if err = d.Set("catalog_icon_url", catalog.CatalogIconURL); err != nil { + return fmt.Errorf("Error setting catalog_icon_url: %s", err) + } + if catalog.Tags != nil { + if err = d.Set("tags", catalog.Tags); err != nil { + return fmt.Errorf("Error setting tags: %s", err) + } + } + if err = d.Set("url", catalog.URL); err != nil { + return fmt.Errorf("Error setting url: %s", err) + } + if err = d.Set("crn", catalog.CRN); err != nil { + return fmt.Errorf("Error setting crn: %s", err) + } + if err = d.Set("offerings_url", catalog.OfferingsURL); err != nil { + return fmt.Errorf("Error setting offerings_url: %s", err) + } + + return nil +} + +func resourceIBMCmCatalogDelete(d *schema.ResourceData, meta interface{}) error { + catalogManagementClient, err := meta.(ClientSession).CatalogManagementV1() + if err != nil { + return err + } + + deleteCatalogOptions := &catalogmanagementv1.DeleteCatalogOptions{} + + deleteCatalogOptions.SetCatalogIdentifier(d.Id()) + + response, err := catalogManagementClient.DeleteCatalog(deleteCatalogOptions) + if err != nil { + log.Printf("[DEBUG] DeleteCatalog failed %s\n%s", err, response) + return err + } + + d.SetId("") + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cm_offering.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cm_offering.go new file mode 100644 index 00000000000..0df61dc2f43 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cm_offering.go @@ -0,0 +1,316 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/IBM/platform-services-go-sdk/catalogmanagementv1" +) + +func resourceIBMCmOffering() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMCmOfferingCreate, + Read: resourceIBMCmOfferingRead, + Delete: resourceIBMCmOfferingDelete, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "offering_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The id of the catalog containing this offering.", + }, + "url": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The url for this specific offering.", + }, + "crn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The crn for this specific offering.", + }, + "label": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "Display Name in the requested language.", + ForceNew: true, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The programmatic name of this offering.", + }, + "offering_icon_url": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "URL for an icon associated with this offering.", + }, + "offering_docs_url": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "URL for an additional docs with this offering.", + }, + "offering_support_url": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "URL to be displayed in the Consumption UI for getting support on this offering.", + }, + "tags": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "List of tags associated with this catalog.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "short_description": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Short description in the requested language.", + }, + "long_description": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Long description in the requested language.", + }, + "permit_request_ibm_public_publish": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "Is it permitted to request publishing to IBM or Public.", + }, + "ibm_publish_approved": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "Indicates if this offering has been approved for use by all IBMers.", + }, + "public_publish_approved": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "Indicates if this offering has been approved for use by all IBM Cloud users.", + }, + "public_original_crn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The original offering CRN that this publish entry came from.", + }, + "publish_public_crn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The crn of the public catalog entry of this offering.", + }, + "portal_approval_record": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The portal's approval record ID.", + }, + "portal_ui_url": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The portal UI URL.", + }, + "catalog_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The id of the catalog containing this offering.", + }, + "catalog_name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The name of the catalog.", + }, + "disclaimer": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "A disclaimer for this offering.", + }, + "hidden": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "Determine if this offering should be displayed in the Consumption UI.", + }, + "repo_info": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Repository info for offerings.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "token": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Token for private repos.", + }, + "type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Public or enterprise GitHub.", + }, + }, + }, + }, + }, + } +} + +func resourceIBMCmOfferingCreate(d *schema.ResourceData, meta interface{}) error { + catalogManagementClient, err := meta.(ClientSession).CatalogManagementV1() + if err != nil { + return err + } + + createOfferingOptions := catalogManagementClient.NewCreateOfferingOptions(d.Get("catalog_id").(string)) + + if _, ok := d.GetOk("label"); ok { + createOfferingOptions.SetLabel(d.Get("label").(string)) + } + if _, ok := d.GetOk("offering_icon_url"); ok { + createOfferingOptions.SetOfferingIconURL(d.Get("offering_icon_url").(string)) + } + if _, ok := d.GetOk("offering_docs_url"); ok { + createOfferingOptions.SetOfferingDocsURL(d.Get("offering_docs_url").(string)) + } + if _, ok := d.GetOk("offering_support_url"); ok { + createOfferingOptions.SetOfferingSupportURL(d.Get("offering_support_url").(string)) + } + if tags, ok := d.GetOk("tags"); ok { + list := expandStringList(tags.([]interface{})) + createOfferingOptions.SetTags(list) + + } + + offering, response, err := catalogManagementClient.CreateOffering(createOfferingOptions) + if err != nil { + log.Printf("[DEBUG] CreateOffering failed %s\n%s", err, response) + return err + } + + d.SetId(*offering.ID) + + return resourceIBMCmOfferingRead(d, meta) +} + +func resourceIBMCmOfferingRead(d *schema.ResourceData, meta interface{}) error { + catalogManagementClient, err := meta.(ClientSession).CatalogManagementV1() + if err != nil { + return err + } + + getOfferingOptions := &catalogmanagementv1.GetOfferingOptions{} + + getOfferingOptions.SetCatalogIdentifier(d.Get("catalog_id").(string)) + getOfferingOptions.SetOfferingID(d.Id()) + + offering, response, err := catalogManagementClient.GetOffering(getOfferingOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + log.Printf("[DEBUG] GetOffering failed %s\n%s", err, response) + return err + } + if err = d.Set("url", offering.URL); err != nil { + return fmt.Errorf("Error setting url: %s", err) + } + if err = d.Set("crn", offering.CRN); err != nil { + return fmt.Errorf("Error setting crn: %s", err) + } + if err = d.Set("label", offering.Label); err != nil { + return fmt.Errorf("Error setting label: %s", err) + } + if err = d.Set("name", offering.Name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + if err = d.Set("offering_icon_url", offering.OfferingIconURL); err != nil { + return fmt.Errorf("Error setting offering_icon_url: %s", err) + } + if err = d.Set("offering_docs_url", offering.OfferingDocsURL); err != nil { + return fmt.Errorf("Error setting offering_docs_url: %s", err) + } + if err = d.Set("offering_support_url", offering.OfferingSupportURL); err != nil { + return fmt.Errorf("Error setting offering_support_url: %s", err) + } + if err = d.Set("short_description", offering.ShortDescription); err != nil { + return fmt.Errorf("Error setting short_description: %s", err) + } + if err = d.Set("long_description", offering.LongDescription); err != nil { + return fmt.Errorf("Error setting long_description: %s", err) + } + if err = d.Set("permit_request_ibm_public_publish", offering.PermitRequestIBMPublicPublish); err != nil { + return fmt.Errorf("Error setting permit_request_ibm_public_publish: %s", err) + } + if err = d.Set("ibm_publish_approved", offering.IBMPublishApproved); err != nil { + return fmt.Errorf("Error setting ibm_publish_approved: %s", err) + } + if err = d.Set("public_publish_approved", offering.PublicPublishApproved); err != nil { + return fmt.Errorf("Error setting public_publish_approved: %s", err) + } + if err = d.Set("public_original_crn", offering.PublicOriginalCRN); err != nil { + return fmt.Errorf("Error setting public_original_crn: %s", err) + } + if err = d.Set("publish_public_crn", offering.PublishPublicCRN); err != nil { + return fmt.Errorf("Error setting publish_public_crn: %s", err) + } + if err = d.Set("portal_approval_record", offering.PortalApprovalRecord); err != nil { + return fmt.Errorf("Error setting portal_approval_record: %s", err) + } + if err = d.Set("portal_ui_url", offering.PortalUIURL); err != nil { + return fmt.Errorf("Error setting portal_ui_url: %s", err) + } + if err = d.Set("catalog_id", offering.CatalogID); err != nil { + return fmt.Errorf("Error setting catalog_id: %s", err) + } + if err = d.Set("catalog_name", offering.CatalogName); err != nil { + return fmt.Errorf("Error setting catalog_name: %s", err) + } + if err = d.Set("disclaimer", offering.Disclaimer); err != nil { + return fmt.Errorf("Error setting disclaimer: %s", err) + } + if err = d.Set("hidden", offering.Hidden); err != nil { + return fmt.Errorf("Error setting hidden: %s", err) + } + if offering.RepoInfo != nil { + repoInfoMap := resourceIBMCmOfferingRepoInfoToMap(*offering.RepoInfo) + if err = d.Set("repo_info", []map[string]interface{}{repoInfoMap}); err != nil { + return fmt.Errorf("Error setting repo_info: %s", err) + } + } + + return nil +} + +func resourceIBMCmOfferingRepoInfoToMap(repoInfo catalogmanagementv1.RepoInfo) map[string]interface{} { + repoInfoMap := map[string]interface{}{} + + repoInfoMap["token"] = repoInfo.Token + repoInfoMap["type"] = repoInfo.Type + + return repoInfoMap +} + +func resourceIBMCmOfferingDelete(d *schema.ResourceData, meta interface{}) error { + catalogManagementClient, err := meta.(ClientSession).CatalogManagementV1() + if err != nil { + return err + } + + deleteOfferingOptions := &catalogmanagementv1.DeleteOfferingOptions{} + + deleteOfferingOptions.SetCatalogIdentifier(d.Get("catalog_id").(string)) + deleteOfferingOptions.SetOfferingID(d.Id()) + + response, err := catalogManagementClient.DeleteOffering(deleteOfferingOptions) + if err != nil { + log.Printf("[DEBUG] DeleteOfferingWithContext failed %s\n%s", err, response) + return err + } + + d.SetId("") + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cm_offering_instance.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cm_offering_instance.go new file mode 100644 index 00000000000..b040b3635fd --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cm_offering_instance.go @@ -0,0 +1,339 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "os" + + "github.com/IBM/platform-services-go-sdk/catalogmanagementv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func resourceIBMCmOfferingInstance() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMCmOfferingInstanceCreate, + Read: resourceIBMCmOfferingInstanceRead, + Update: resourceIBMCmOfferingInstanceUpdate, + Delete: resourceIBMCmOfferingInstanceDelete, + Exists: resourceIBMCmOfferingInstanceExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "url": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "url reference to this object.", + }, + "crn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "platform CRN for this instance.", + }, + "_rev": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Cloudant Revision for this instance", + }, + "label": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "the label for this instance.", + }, + "catalog_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "Catalog ID this instance was created from.", + }, + "offering_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "Offering ID this instance was created from.", + }, + "kind_format": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "the format this instance has (helm, operator, ova...).", + }, + "version": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "The version this instance was installed from (not version id).", + }, + "cluster_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "Cluster ID.", + }, + "cluster_region": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "Cluster region (e.g., us-south).", + }, + "cluster_namespaces": &schema.Schema{ + Type: schema.TypeList, + Required: true, + Description: "List of target namespaces to install into.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "cluster_all_namespaces": &schema.Schema{ + Type: schema.TypeBool, + Required: true, + Description: "designate to install into all namespaces.", + }, + "schematics_workspace_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "id of the schematics workspace, for offerings installed through schematics", + }, + "resource_group_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "id of the resource group", + }, + }, + } +} + +func resourceIBMCmOfferingInstanceCreate(d *schema.ResourceData, meta interface{}) error { + catalogManagementClient, err := meta.(ClientSession).CatalogManagementV1() + if err != nil { + return err + } + + rsConClient, err := meta.(ClientSession).BluemixSession() + if err != nil { + return err + } + + createOfferingInstanceOptions := &catalogmanagementv1.CreateOfferingInstanceOptions{} + + schemID, isfound := os.LookupEnv("IC_SCHEMATICS_WORKSPACE_ID") + if isfound { + createOfferingInstanceOptions.SetSchematicsWorkspaceID(schemID) + } + createOfferingInstanceOptions.SetXAuthRefreshToken(rsConClient.Config.IAMRefreshToken) + if _, ok := d.GetOk("label"); ok { + createOfferingInstanceOptions.SetLabel(d.Get("label").(string)) + } + if _, ok := d.GetOk("catalog_id"); ok { + createOfferingInstanceOptions.SetCatalogID(d.Get("catalog_id").(string)) + } + if _, ok := d.GetOk("offering_id"); ok { + createOfferingInstanceOptions.SetOfferingID(d.Get("offering_id").(string)) + } + if _, ok := d.GetOk("kind_format"); ok { + createOfferingInstanceOptions.SetKindFormat(d.Get("kind_format").(string)) + } + if _, ok := d.GetOk("version"); ok { + createOfferingInstanceOptions.SetVersion(d.Get("version").(string)) + } + if _, ok := d.GetOk("cluster_id"); ok { + createOfferingInstanceOptions.SetClusterID(d.Get("cluster_id").(string)) + } + if _, ok := d.GetOk("cluster_region"); ok { + createOfferingInstanceOptions.SetClusterRegion(d.Get("cluster_region").(string)) + } + if ns, ok := d.GetOk("cluster_namespaces"); ok { + list := expandStringList(ns.([]interface{})) + createOfferingInstanceOptions.SetClusterNamespaces(list) + } + if _, ok := d.GetOk("cluster_all_namespaces"); ok { + createOfferingInstanceOptions.SetClusterAllNamespaces(d.Get("cluster_all_namespaces").(bool)) + } + if _, ok := d.GetOk("resource_group_id"); ok { + createOfferingInstanceOptions.SetResourceGroupID(d.Get("resource_group_id").(string)) + } + + offeringInstance, response, err := catalogManagementClient.CreateOfferingInstance(createOfferingInstanceOptions) + if err != nil { + log.Printf("[DEBUG] CreateOfferingInstance failed %s\n%s", err, response) + return err + } + + d.SetId(*offeringInstance.ID) + + log.Printf("LOG2 Service version instance of type %q was created on cluster %q", *createOfferingInstanceOptions.KindFormat, *createOfferingInstanceOptions.ClusterID) + + return resourceIBMCmOfferingInstanceRead(d, meta) +} + +func resourceIBMCmOfferingInstanceRead(d *schema.ResourceData, meta interface{}) error { + catalogManagementClient, err := meta.(ClientSession).CatalogManagementV1() + if err != nil { + return err + } + + getOfferingInstanceOptions := &catalogmanagementv1.GetOfferingInstanceOptions{} + + getOfferingInstanceOptions.SetInstanceIdentifier(d.Id()) + + offeringInstance, response, err := catalogManagementClient.GetOfferingInstance(getOfferingInstanceOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + log.Printf("[DEBUG] GetOfferingInstance failed %s\n%s", err, response) + return err + } + + if err = d.Set("url", offeringInstance.URL); err != nil { + return fmt.Errorf("Error setting url: %s", err) + } + if err = d.Set("crn", offeringInstance.CRN); err != nil { + return fmt.Errorf("Error setting crn: %s", err) + } + if err = d.Set("_rev", offeringInstance.Rev); err != nil { + return fmt.Errorf("Error setting _rev: %s", err) + } + if err = d.Set("label", offeringInstance.Label); err != nil { + return fmt.Errorf("Error setting label: %s", err) + } + if err = d.Set("catalog_id", offeringInstance.CatalogID); err != nil { + return fmt.Errorf("Error setting catalog_id: %s", err) + } + if err = d.Set("offering_id", offeringInstance.OfferingID); err != nil { + return fmt.Errorf("Error setting offering_id: %s", err) + } + if err = d.Set("kind_format", offeringInstance.KindFormat); err != nil { + return fmt.Errorf("Error setting kind_format: %s", err) + } + if err = d.Set("version", offeringInstance.Version); err != nil { + return fmt.Errorf("Error setting version: %s", err) + } + if err = d.Set("cluster_id", offeringInstance.ClusterID); err != nil { + return fmt.Errorf("Error setting cluster_id: %s", err) + } + if err = d.Set("cluster_region", offeringInstance.ClusterRegion); err != nil { + return fmt.Errorf("Error setting cluster_region: %s", err) + } + if offeringInstance.ClusterNamespaces != nil { + if err = d.Set("cluster_namespaces", offeringInstance.ClusterNamespaces); err != nil { + return fmt.Errorf("Error setting cluster_namespaces: %s", err) + } + } + if err = d.Set("cluster_all_namespaces", offeringInstance.ClusterAllNamespaces); err != nil { + return fmt.Errorf("Error setting cluster_all_namespaces: %s", err) + } + if err = d.Set("schematics_workspace_id", offeringInstance.SchematicsWorkspaceID); err != nil { + return fmt.Errorf("Error setting schematics_workspace_id: %s", err) + } + + return nil +} + +func resourceIBMCmOfferingInstanceUpdate(d *schema.ResourceData, meta interface{}) error { + catalogManagementClient, err := meta.(ClientSession).CatalogManagementV1() + if err != nil { + return err + } + + getOfferingInstanceOptions := &catalogmanagementv1.GetOfferingInstanceOptions{} + + getOfferingInstanceOptions.SetInstanceIdentifier(d.Id()) + + offeringInstance, response, err := catalogManagementClient.GetOfferingInstance(getOfferingInstanceOptions) + if err != nil { + log.Printf("[DEBUG] Failed to retrieve rev %s\n%s", err, response) + return err + } + + rsConClient, err := meta.(ClientSession).BluemixSession() + if err != nil { + return err + } + + putOfferingInstanceOptions := &catalogmanagementv1.PutOfferingInstanceOptions{} + + putOfferingInstanceOptions.SetInstanceIdentifier(d.Id()) + putOfferingInstanceOptions.SetID(d.Id()) + putOfferingInstanceOptions.SetXAuthRefreshToken(rsConClient.Config.IAMRefreshToken) + putOfferingInstanceOptions.SetRev(*offeringInstance.Rev) + if _, ok := d.GetOk("label"); ok { + putOfferingInstanceOptions.SetLabel(d.Get("label").(string)) + } + if _, ok := d.GetOk("catalog_id"); ok { + putOfferingInstanceOptions.SetCatalogID(d.Get("catalog_id").(string)) + } + if _, ok := d.GetOk("offering_id"); ok { + putOfferingInstanceOptions.SetOfferingID(d.Get("offering_id").(string)) + } + if _, ok := d.GetOk("kind_format"); ok { + putOfferingInstanceOptions.SetKindFormat(d.Get("kind_format").(string)) + } + if _, ok := d.GetOk("version"); ok { + putOfferingInstanceOptions.SetVersion(d.Get("version").(string)) + } + if _, ok := d.GetOk("cluster_id"); ok { + putOfferingInstanceOptions.SetClusterID(d.Get("cluster_id").(string)) + } + if _, ok := d.GetOk("cluster_region"); ok { + putOfferingInstanceOptions.SetClusterRegion(d.Get("cluster_region").(string)) + } + if ns, ok := d.GetOk("cluster_namespaces"); ok { + list := expandStringList(ns.([]interface{})) + putOfferingInstanceOptions.SetClusterNamespaces(list) + } + if _, ok := d.GetOk("cluster_all_namespaces"); ok { + putOfferingInstanceOptions.SetClusterAllNamespaces(d.Get("cluster_all_namespaces").(bool)) + } + if _, ok := d.GetOk("resource_group_id"); ok { + putOfferingInstanceOptions.SetResourceGroupID(d.Get("resource_group_id").(string)) + } + + _, response, err = catalogManagementClient.PutOfferingInstance(putOfferingInstanceOptions) + if err != nil { + log.Printf("[DEBUG] PutOfferingInstance failed %s\n%s", err, response) + return err + } + + return resourceIBMCmOfferingInstanceRead(d, meta) +} + +func resourceIBMCmOfferingInstanceDelete(d *schema.ResourceData, meta interface{}) error { + catalogManagementClient, err := meta.(ClientSession).CatalogManagementV1() + if err != nil { + return err + } + rsConClient, err := meta.(ClientSession).BluemixSession() + if err != nil { + return err + } + + deleteOfferingInstanceOptions := &catalogmanagementv1.DeleteOfferingInstanceOptions{} + + deleteOfferingInstanceOptions.SetInstanceIdentifier(d.Id()) + deleteOfferingInstanceOptions.SetXAuthRefreshToken(rsConClient.Config.IAMRefreshToken) + + response, err := catalogManagementClient.DeleteOfferingInstance(deleteOfferingInstanceOptions) + if err != nil { + log.Printf("[DEBUG] DeleteOfferingInstance failed %s\n%s", err, response) + return err + } + + d.SetId("") + + return nil +} + +func resourceIBMCmOfferingInstanceExists(d *schema.ResourceData, meta interface{}) (bool, error) { + catalogManagementClient, err := meta.(ClientSession).CatalogManagementV1() + if err != nil { + return false, err + } + + getOfferingInstanceOptions := &catalogmanagementv1.GetOfferingInstanceOptions{} + + getOfferingInstanceOptions.SetInstanceIdentifier(d.Id()) + + offeringInstance, response, err := catalogManagementClient.GetOfferingInstance(getOfferingInstanceOptions) + if err != nil { + log.Printf("[DEBUG] GetOfferingInstance failed %s\n%s", err, response) + return false, err + } + + return *offeringInstance.ID == d.Id(), nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cm_version.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cm_version.go new file mode 100644 index 00000000000..430b3b41290 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cm_version.go @@ -0,0 +1,235 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/IBM/platform-services-go-sdk/catalogmanagementv1" +) + +func resourceIBMCmVersion() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMCmVersionCreate, + Read: resourceIBMCmVersionRead, + Delete: resourceIBMCmVersionDelete, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "catalog_identifier": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Catalog identifier.", + }, + "offering_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Offering identification.", + }, + "tags": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Tags array.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "target_kinds": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Target kinds. Current valid values are 'iks', 'roks', 'vcenter', and 'terraform'.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "content": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "byte array representing the content to be imported. Only supported for OVA images at this time.", + }, + "zipurl": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "URL path to zip location. If not specified, must provide content in the body of this call.", + }, + "target_version": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The semver value for this new version, if not found in the zip url package content.", + }, + "crn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Version's CRN.", + }, + "version": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Version of content type.", + }, + "sha": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "hash of the content.", + }, + "created": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The date and time this version was created.", + }, + "updated": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The date and time this version was last updated.", + }, + "catalog_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Catalog ID.", + }, + "kind_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Kind ID.", + }, + "repo_url": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Content's repo URL.", + }, + "source_url": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Content's source URL (e.g git repo).", + }, + "tgz_url": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "File used to on-board this version.", + }, + }, + } +} + +func resourceIBMCmVersionCreate(d *schema.ResourceData, meta interface{}) error { + catalogManagementClient, err := meta.(ClientSession).CatalogManagementV1() + if err != nil { + return err + } + + importOfferingVersionOptions := catalogManagementClient.NewImportOfferingVersionOptions(d.Get("catalog_identifier").(string), d.Get("offering_id").(string)) + + if _, ok := d.GetOk("tags"); ok { + importOfferingVersionOptions.SetTags(d.Get("tags").([]string)) + } + if _, ok := d.GetOk("target_kinds"); ok { + list := expandStringList(d.Get("target_kinds").([]interface{})) + importOfferingVersionOptions.SetTargetKinds(list) + + } + if _, ok := d.GetOk("content"); ok { + importOfferingVersionOptions.SetContent([]byte(d.Get("content").(string))) + } + if _, ok := d.GetOk("zipurl"); ok { + importOfferingVersionOptions.SetZipurl(d.Get("zipurl").(string)) + } + if _, ok := d.GetOk("target_version"); ok { + importOfferingVersionOptions.SetTargetVersion(d.Get("target_version").(string)) + } + + offering, response, err := catalogManagementClient.ImportOfferingVersion(importOfferingVersionOptions) + + if err != nil { + log.Printf("[DEBUG] ImportOfferingVersion failed %s\n%s", err, response) + return err + } + + versionLocator := *offering.Kinds[0].Versions[0].VersionLocator + + d.SetId(versionLocator) + + return resourceIBMCmVersionRead(d, meta) +} + +func resourceIBMCmVersionRead(d *schema.ResourceData, meta interface{}) error { + catalogManagementClient, err := meta.(ClientSession).CatalogManagementV1() + if err != nil { + return err + } + + getVersionOptions := &catalogmanagementv1.GetVersionOptions{} + + getVersionOptions.SetVersionLocID(d.Id()) + + offering, response, err := catalogManagementClient.GetVersion(getVersionOptions) + version := offering.Kinds[0].Versions[0] + + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + log.Printf("[DEBUG] GetVersion failed %s\n%s", err, response) + return err + } + + if err = d.Set("crn", version.CRN); err != nil { + return fmt.Errorf("Error setting crn: %s", err) + } + if err = d.Set("version", version.Version); err != nil { + return fmt.Errorf("Error setting version: %s", err) + } + if err = d.Set("sha", version.Sha); err != nil { + return fmt.Errorf("Error setting sha: %s", err) + } + if err = d.Set("created", version.Created.String()); err != nil { + return fmt.Errorf("Error setting created: %s", err) + } + if err = d.Set("updated", version.Updated.String()); err != nil { + return fmt.Errorf("Error setting updated: %s", err) + } + if err = d.Set("catalog_id", version.CatalogID); err != nil { + return fmt.Errorf("Error setting catalog_id: %s", err) + } + if err = d.Set("kind_id", version.KindID); err != nil { + return fmt.Errorf("Error setting kind_id: %s", err) + } + if err = d.Set("repo_url", version.RepoURL); err != nil { + return fmt.Errorf("Error setting repo_url: %s", err) + } + if err = d.Set("source_url", version.SourceURL); err != nil { + return fmt.Errorf("Error setting source_url: %s", err) + } + if err = d.Set("tgz_url", version.TgzURL); err != nil { + return fmt.Errorf("Error setting tgz_url: %s", err) + } + + return nil +} + +func resourceIBMCmVersionDelete(d *schema.ResourceData, meta interface{}) error { + catalogManagementClient, err := meta.(ClientSession).CatalogManagementV1() + if err != nil { + return err + } + + deleteVersionOptions := &catalogmanagementv1.DeleteVersionOptions{} + deleteVersionOptions.SetVersionLocID(d.Id()) + + response, err := catalogManagementClient.DeleteVersion(deleteVersionOptions) + if err != nil { + log.Printf("[DEBUG] DeleteVersion failed %s\n%s", err, response) + return err + } + + d.SetId("") + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_compute_autoscale_group.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_compute_autoscale_group.go new file mode 100644 index 00000000000..46daf93767e --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_compute_autoscale_group.go @@ -0,0 +1,687 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "errors" + "fmt" + "log" + "strconv" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +const HEALTH_CHECK_TYPE_HTTP_CUSTOM = "HTTP-CUSTOM" + +var IBMComputeAutoScaleGroupObjectMask = []string{ + "id", + "name", + "minimumMemberCount", + "maximumMemberCount", + "cooldown", + "status[keyName]", + "regionalGroup[id,name]", + "terminationPolicy[keyName]", + "virtualGuestMemberTemplate[blockDeviceTemplateGroup,primaryNetworkComponent[networkVlan[id]],primaryBackendNetworkComponent[networkVlan[id]]]", + "loadBalancers[id,port,virtualServerId,healthCheck[id]]", + "networkVlans[id,networkVlanId,networkVlan[vlanNumber,primaryRouter[hostname]]]", + "loadBalancers[healthCheck[healthCheckTypeId,type[keyname],attributes[value,type[id,keyname]]]]", +} + +func resourceIBMComputeAutoScaleGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMComputeAutoScaleGroupCreate, + Read: resourceIBMComputeAutoScaleGroupRead, + Update: resourceIBMComputeAutoScaleGroupUpdate, + Delete: resourceIBMComputeAutoScaleGroupDelete, + Exists: resourceIBMComputeAutoScaleGroupExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + + "name": { + Type: schema.TypeString, + Required: true, + Description: "Name", + }, + + "regional_group": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "regional group", + }, + + "minimum_member_count": { + Type: schema.TypeInt, + Required: true, + Description: "Minimum member count", + }, + + "maximum_member_count": { + Type: schema.TypeInt, + Required: true, + Description: "Maximum member count", + }, + + "cooldown": { + Type: schema.TypeInt, + Required: true, + Description: "Cooldown value", + }, + + "termination_policy": { + Type: schema.TypeString, + Required: true, + Description: "Termination policy", + }, + + "virtual_server_id": { + Type: schema.TypeInt, + Optional: true, + Description: "virtual server ID", + }, + + "port": { + Type: schema.TypeInt, + Optional: true, + Description: "Port number", + }, + + "health_check": { + Type: schema.TypeMap, + Optional: true, + }, + + // This has to be a TypeList, because TypeMap does not handle non-primitive + // members properly. + "virtual_guest_member_template": { + Type: schema.TypeList, + Required: true, + Elem: getModifiedVirtualGuestResource(), + Description: "Virtual guest member template", + }, + + "network_vlan_ids": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeInt}, + Set: func(v interface{}) int { + return v.(int) + }, + Description: "List of network VLAN ids", + }, + + "tags": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: "List of tags", + }, + }, + } +} + +// Returns a modified version of the virtual guest resource, with all members set to ForceNew = false. +// Otherwise a modified template parameter unnecessarily forces scale group drop/create +func getModifiedVirtualGuestResource() *schema.Resource { + + r := resourceIBMComputeVmInstance() + // wait_time_minutes is only used in virtual_guest resource. + delete(r.Schema, "wait_time_minutes") + + for _, elem := range r.Schema { + elem.ForceNew = false + elem.ConflictsWith = []string{} + } + + return r +} + +// Helper method to parse healthcheck data in the resource schema format to the SoftLayer datatypes +func buildHealthCheckFromResourceData(d map[string]interface{}) (datatypes.Network_Application_Delivery_Controller_LoadBalancer_Health_Check, error) { + healthCheckOpts := datatypes.Network_Application_Delivery_Controller_LoadBalancer_Health_Check{ + Type: &datatypes.Network_Application_Delivery_Controller_LoadBalancer_Health_Check_Type{ + Keyname: sl.String(d["type"].(string)), + }, + } + + if *healthCheckOpts.Type.Keyname == HEALTH_CHECK_TYPE_HTTP_CUSTOM { + // Validate and apply type-specific fields + healthCheckMethod, ok := d["custom_method"] + if !ok { + return datatypes.Network_Application_Delivery_Controller_LoadBalancer_Health_Check{}, errors.New("\"custom_method\" is required when HTTP-CUSTOM healthcheck is specified") + } + + healthCheckRequest, ok := d["custom_request"] + if !ok { + return datatypes.Network_Application_Delivery_Controller_LoadBalancer_Health_Check{}, errors.New("\"custom_request\" is required when HTTP-CUSTOM healthcheck is specified") + } + + healthCheckResponse, ok := d["custom_response"] + if !ok { + return datatypes.Network_Application_Delivery_Controller_LoadBalancer_Health_Check{}, errors.New("\"custom_response\" is required when HTTP-CUSTOM healthcheck is specified") + } + + // HTTP-CUSTOM values are represented as an array of SoftLayer_Health_Check_Attributes + healthCheckOpts.Attributes = []datatypes.Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute{ + { + Type: &datatypes.Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute_Type{ + Keyname: sl.String("HTTP_CUSTOM_TYPE"), + }, + Value: sl.String(healthCheckMethod.(string)), + }, + { + Type: &datatypes.Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute_Type{ + Keyname: sl.String("LOCATION"), + }, + Value: sl.String(healthCheckRequest.(string)), + }, + { + Type: &datatypes.Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute_Type{ + Keyname: sl.String("EXPECTED_RESPONSE"), + }, + Value: sl.String(healthCheckResponse.(string)), + }, + } + } + + return healthCheckOpts, nil +} + +// Helper method to parse network vlan information in the resource schema format to the SoftLayer datatypes +func buildScaleVlansFromResourceData(v interface{}, meta interface{}) ([]datatypes.Scale_Network_Vlan, error) { + vlanIds := v.([]interface{}) + scaleNetworkVlans := make([]datatypes.Scale_Network_Vlan, 0, len(vlanIds)) + + for _, iVlanId := range vlanIds { + vlanId := iVlanId.(int) + scaleNetworkVlans = append( + scaleNetworkVlans, + datatypes.Scale_Network_Vlan{NetworkVlanId: &vlanId}, + ) + } + + return scaleNetworkVlans, nil +} + +func getVirtualGuestTemplate(vGuestTemplateList []interface{}, meta interface{}) (datatypes.Virtual_Guest, error) { + if len(vGuestTemplateList) != 1 { + return datatypes.Virtual_Guest{}, + errors.New("Only one virtual_guest_member_template can be provided") + } + + // Retrieve the map of virtual_guest_member_template attributes + vGuestMap := vGuestTemplateList[0].(map[string]interface{}) + + // Create an empty ResourceData instance for a IBM_Compute_VM_Instance resource + vGuestResourceData := resourceIBMComputeVmInstance().Data(nil) + + // For each item in the map, call Set on the ResourceData. This handles + // validation and yields a completed ResourceData object + for k, v := range vGuestMap { + log.Printf("****** %s: %#v", k, v) + err := vGuestResourceData.Set(k, v) + if err != nil { + return datatypes.Virtual_Guest{}, + fmt.Errorf("Error while parsing virtual_guest_member_template values: %s", err) + } + } + dc := vGuestResourceData.Get("datacenter").(string) + publicVlan := vGuestResourceData.Get("public_vlan_id").(int) + privateVlan := vGuestResourceData.Get("private_vlan_id").(int) + quote_id := 0 + // Get the virtual guest creation template from the completed resource data object + vgs, err := getVirtualGuestTemplateFromResourceData(vGuestResourceData, meta, dc, publicVlan, privateVlan, quote_id) + return vgs[0], err +} + +func resourceIBMComputeAutoScaleGroupCreate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + accountServiceNoRetry := services.GetScaleGroupService(sess.SetRetries(0)) + + virtualGuestTemplateOpts, err := getVirtualGuestTemplate(d.Get("virtual_guest_member_template").([]interface{}), meta) + if err != nil { + return fmt.Errorf("Error while parsing virtual_guest_member_template values: %s", err) + } + + scaleNetworkVlans, err := buildScaleVlansFromResourceData(d.Get("network_vlan_ids").(*schema.Set).List(), meta) + if err != nil { + return fmt.Errorf("Error while parsing network vlan values: %s", err) + } + + locationGroupRegionalId, err := getLocationGroupRegionalId(sess, d.Get("regional_group").(string)) + if err != nil { + return err + } + + // Build up our creation options + opts := datatypes.Scale_Group{ + Name: sl.String(d.Get("name").(string)), + Cooldown: sl.Int(d.Get("cooldown").(int)), + MinimumMemberCount: sl.Int(d.Get("minimum_member_count").(int)), + MaximumMemberCount: sl.Int(d.Get("maximum_member_count").(int)), + SuspendedFlag: sl.Bool(false), + VirtualGuestMemberTemplate: &virtualGuestTemplateOpts, + NetworkVlans: scaleNetworkVlans, + RegionalGroupId: &locationGroupRegionalId, + } + + opts.TerminationPolicy = &datatypes.Scale_Termination_Policy{ + KeyName: sl.String(d.Get("termination_policy").(string)), + } + + opts.LoadBalancers, err = buildLoadBalancers(d) + if err != nil { + return fmt.Errorf("Error creating Scale Group: %s", err) + } + + res, err := accountServiceNoRetry.CreateObject(&opts) + if err != nil { + return fmt.Errorf("Error creating Scale Group: %s", err) + } + + d.SetId(strconv.Itoa(*res.Id)) + log.Printf("[INFO] Scale Group ID: %d", *res.Id) + + time.Sleep(60) + + // wait for scale group to become active + _, err = waitForActiveStatus(d, meta) + + if err != nil { + return fmt.Errorf("Error waiting for scale group (%s) to become active: %s", d.Id(), err) + } + + return resourceIBMComputeAutoScaleGroupRead(d, meta) +} + +func buildLoadBalancers(d *schema.ResourceData, ids ...int) ([]datatypes.Scale_LoadBalancer, error) { + isLoadBalancerEmpty := true + loadBalancers := []datatypes.Scale_LoadBalancer{{}} + + if virtualServerId, ok := d.GetOk("virtual_server_id"); ok { + isLoadBalancerEmpty = false + loadBalancers[0].VirtualServerId = sl.Int(virtualServerId.(int)) + if len(ids) > 0 { + loadBalancers[0].Id = sl.Int(ids[0]) + } + } + + if healthCheck, ok := d.GetOk("health_check"); ok { + isLoadBalancerEmpty = false + healthCheckOpts, err := buildHealthCheckFromResourceData(healthCheck.(map[string]interface{})) + if err != nil { + return []datatypes.Scale_LoadBalancer{}, fmt.Errorf("Error while parsing health check options: %s", err) + } + loadBalancers[0].HealthCheck = &healthCheckOpts + } + + if port, ok := d.GetOk("port"); ok { + isLoadBalancerEmpty = false + loadBalancers[0].Port = sl.Int(port.(int)) + } + + if isLoadBalancerEmpty { + return []datatypes.Scale_LoadBalancer{}, nil + } else { + return loadBalancers, nil + } +} + +func resourceIBMComputeAutoScaleGroupRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetScaleGroupService(sess) + + groupId, _ := strconv.Atoi(d.Id()) + + slGroupObj, err := service.Id(groupId).Mask(strings.Join(IBMComputeAutoScaleGroupObjectMask, ",")).GetObject() + if err != nil { + // If the scale group is somehow already destroyed, mark as successfully gone + if apiErr, ok := err.(sl.Error); ok && apiErr.StatusCode == 404 { + d.SetId("") + return nil + } + + return fmt.Errorf("Error retrieving autoscale Group: %s", err) + } + + d.Set("name", slGroupObj.Name) + if slGroupObj.RegionalGroup != nil && slGroupObj.RegionalGroup.Name != nil { + d.Set("regional_group", slGroupObj.RegionalGroup.Name) + } + d.Set("minimum_member_count", slGroupObj.MinimumMemberCount) + d.Set("maximum_member_count", slGroupObj.MaximumMemberCount) + d.Set("cooldown", slGroupObj.Cooldown) + d.Set("status", slGroupObj.Status.KeyName) + d.Set("termination_policy", slGroupObj.TerminationPolicy.KeyName) + if len(slGroupObj.LoadBalancers) > 0 { + d.Set("virtual_server_id", slGroupObj.LoadBalancers[0].VirtualServerId) + d.Set("port", slGroupObj.LoadBalancers[0].Port) + + // Health Check + healthCheckObj := slGroupObj.LoadBalancers[0].HealthCheck + currentHealthCheck := d.Get("health_check").(map[string]interface{}) + + currentHealthCheck["type"] = *healthCheckObj.Type.Keyname + + if *healthCheckObj.Type.Keyname == HEALTH_CHECK_TYPE_HTTP_CUSTOM { + for _, elem := range healthCheckObj.Attributes { + switch *elem.Type.Keyname { + case "HTTP_CUSTOM_TYPE": + currentHealthCheck["custom_method"] = *elem.Value + case "LOCATION": + currentHealthCheck["custom_request"] = *elem.Value + case "EXPECTED_RESPONSE": + currentHealthCheck["custom_response"] = *elem.Value + } + } + } + + d.Set("health_check", currentHealthCheck) + } + + // Network Vlans + vlanIds := make([]int, len(slGroupObj.NetworkVlans)) + for i, vlan := range slGroupObj.NetworkVlans { + vlanIds[i] = *vlan.NetworkVlanId + } + d.Set("network_vlan_ids", vlanIds) + + virtualGuestTemplate := populateMemberTemplateResourceData(*slGroupObj.VirtualGuestMemberTemplate) + d.Set("virtual_guest_member_template", virtualGuestTemplate) + + return nil +} + +func populateMemberTemplateResourceData(template datatypes.Virtual_Guest) []map[string]interface{} { + + d := make(map[string]interface{}) + + d["hostname"] = *template.Hostname + d["domain"] = *template.Domain + d["datacenter"] = *template.Datacenter.Name + d["network_speed"] = *template.NetworkComponents[0].MaxSpeed + d["cores"] = *template.StartCpus + d["memory"] = *template.MaxMemory + d["private_network_only"] = *template.PrivateNetworkOnlyFlag + d["hourly_billing"] = *template.HourlyBillingFlag + d["local_disk"] = *template.LocalDiskFlag + + // Guard against nil values for optional fields in virtual_guest resource + d["dedicated_acct_host_only"] = sl.Get(template.DedicatedAccountHostOnlyFlag) + d["os_reference_code"] = sl.Get(template.OperatingSystemReferenceCode) + d["post_install_script_uri"] = sl.Get(template.PostInstallScriptUri) + + if template.PrimaryNetworkComponent != nil && template.PrimaryNetworkComponent.NetworkVlan != nil { + d["public_vlan_id"] = sl.Get(template.PrimaryNetworkComponent.NetworkVlan.Id) + } + + if template.PrimaryBackendNetworkComponent != nil && template.PrimaryBackendNetworkComponent.NetworkVlan != nil { + d["private_vlan_id"] = sl.Get(template.PrimaryBackendNetworkComponent.NetworkVlan.Id) + } + if template.BlockDeviceTemplateGroup != nil { + d["image_id"] = sl.Get(template.BlockDeviceTemplateGroup.GlobalIdentifier) + } + + if len(template.UserData) > 0 { + d["user_metadata"] = *template.UserData[0].Value + } + + sshKeys := make([]interface{}, 0, len(template.SshKeys)) + for _, elem := range template.SshKeys { + sshKeys = append(sshKeys, *elem.Id) + } + d["ssh_key_ids"] = sshKeys + + disks := make([]interface{}, 0, len(template.BlockDevices)) + for _, elem := range template.BlockDevices { + disks = append(disks, *elem.DiskImage.Capacity) + } + d["disks"] = disks + + return []map[string]interface{}{d} +} + +func resourceIBMComputeAutoScaleGroupUpdate(d *schema.ResourceData, meta interface{}) error { + + sess := meta.(ClientSession).SoftLayerSession() + scaleGroupService := services.GetScaleGroupService(sess) + scaleNetworkVlanService := services.GetScaleNetworkVlanService(sess) + scaleLoadBalancerService := services.GetScaleLoadBalancerService(sess) + scaleGroupServiceNoRetry := services.GetScaleGroupService(sess.SetRetries(0)) + + groupId, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Not a valid ID. Must be an integer: %s", err) + } + + // Fetch the complete object from SoftLayer, update with current values from the configuration, and send the + // whole thing back to SoftLayer (effectively, a PUT) + groupObj, err := scaleGroupService.Id(groupId).Mask(strings.Join(IBMComputeAutoScaleGroupObjectMask, ",")).GetObject() + if err != nil { + return fmt.Errorf("Error retrieving autoscale_group resource: %s", err) + } + + groupObj.Name = sl.String(d.Get("name").(string)) + groupObj.MinimumMemberCount = sl.Int(d.Get("minimum_member_count").(int)) + groupObj.MaximumMemberCount = sl.Int(d.Get("maximum_member_count").(int)) + groupObj.Cooldown = sl.Int(d.Get("cooldown").(int)) + groupObj.TerminationPolicy.KeyName = sl.String(d.Get("termination_policy").(string)) + + currentLoadBalancers := groupObj.LoadBalancers + if len(currentLoadBalancers) > 0 { + groupObj.LoadBalancers, err = buildLoadBalancers(d, *currentLoadBalancers[0].Id) + } else { + groupObj.LoadBalancers, err = buildLoadBalancers(d) + } + if err != nil { + return fmt.Errorf("Error creating Scale Group: %s", err) + } + + if d.HasChange("network_vlan_ids") { + // Vlans require special handling: + // + // 1. Delete any scale_network_vlans which no longer appear in the updated configuration + // 2. Pass the updated list of vlans to the Scale_Group.editObject function. SoftLayer determines + // which Vlans are new, and which already exist. + + _, newValue := d.GetChange("network_vlan_ids") + newIds := newValue.(*schema.Set).List() + + // Delete all Vlans + oldScaleVlans, err := scaleGroupService. + Id(groupId). + GetNetworkVlans() + if err != nil { + return fmt.Errorf("Could not retrieve current vlans for scale group (%d): %s", groupId, err) + } + + for _, oldScaleVlan := range oldScaleVlans { + _, err := scaleNetworkVlanService.Id(*oldScaleVlan.Id).DeleteObject() + if err != nil { + return fmt.Errorf("Error deleting scale network vlan %d: %s", *oldScaleVlan.Id, err) + } + } + + // Parse the new list of vlans into the appropriate input structure + scaleVlans, err := buildScaleVlansFromResourceData(newIds, meta) + + if err != nil { + return fmt.Errorf("Unable to parse network vlan options: %s", err) + } + + groupObj.NetworkVlans = scaleVlans + } + + if d.HasChange("virtual_guest_member_template") { + virtualGuestTemplateOpts, err := getVirtualGuestTemplate(d.Get("virtual_guest_member_template").([]interface{}), meta) + if err != nil { + return fmt.Errorf("Unable to parse virtual guest member template options: %s", err) + } + + groupObj.VirtualGuestMemberTemplate = &virtualGuestTemplateOpts + + } + _, err = scaleGroupServiceNoRetry.Id(groupId).EditObject(&groupObj) + if err != nil { + return fmt.Errorf("Error received while editing autoscale_group: %s", err) + } + + // wait for scale group to become active + _, err = waitForActiveStatus(d, meta) + + if err != nil { + return fmt.Errorf("Error waiting for scale group (%s) to become active: %s", d.Id(), err) + } + + // Delete a load balancer if there is the load balancer in a scale group + // and a request doesn't have virtual_server_id, port, and health_check. + if len(currentLoadBalancers) > 0 && len(groupObj.LoadBalancers) <= 0 { + _, err = scaleLoadBalancerService.Id(*currentLoadBalancers[0].Id).DeleteObject() + if err != nil { + return fmt.Errorf("Error received while deleting loadbalancers: %s", err) + } + } + + return nil +} + +func resourceIBMComputeAutoScaleGroupDelete(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + scaleGroupService := services.GetScaleGroupService(sess) + + id, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Error deleting scale group: %s", err) + } + + log.Printf("[INFO] Deleting scale group: %d", id) + _, err = scaleGroupService.Id(id).ForceDeleteObject() + if err != nil { + return fmt.Errorf("Error deleting scale group: %s", err) + } + + d.SetId("") + + return nil +} + +func waitForActiveStatus(d *schema.ResourceData, meta interface{}) (interface{}, error) { + sess := meta.(ClientSession).SoftLayerSession() + scaleGroupService := services.GetScaleGroupService(sess) + + log.Printf("Waiting for scale group (%s) to become active", d.Id()) + id, err := strconv.Atoi(d.Id()) + if err != nil { + return nil, fmt.Errorf("The scale group ID %s must be numeric", d.Id()) + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"BUSY", "SCALING", "SUSPENDED"}, + Target: []string{"ACTIVE"}, + Refresh: func() (interface{}, string, error) { + // get the status of the scale group + result, err := scaleGroupService.Id(id).Mask("status.keyName,minimumMemberCount," + + "virtualGuestMembers[virtualGuest[primaryBackendIpAddress,primaryIpAddress,privateNetworkOnlyFlag,fullyQualifiedDomainName]]"). + GetObject() + if err != nil { + if apiErr, ok := err.(sl.Error); ok && apiErr.StatusCode == 404 { + return nil, "", fmt.Errorf("The scale group %d does not exist anymore: %s", id, err) + } + + return result, "BUSY", nil // Retry + } + + status := "BUSY" + + // Return "BUSY" if member VMs don't have ip addresses. + for _, scaleMemberVirtualGuest := range result.VirtualGuestMembers { + // Checking primary backend IP address. + if scaleMemberVirtualGuest.VirtualGuest.PrimaryBackendIpAddress == nil { + log.Printf("The member vm of scale group does not have private IP yet. Hostname : %s", + *scaleMemberVirtualGuest.VirtualGuest.FullyQualifiedDomainName) + return result, status, nil + } + + // Checking primary IP address. + if !(*scaleMemberVirtualGuest.VirtualGuest.PrivateNetworkOnlyFlag) && + scaleMemberVirtualGuest.VirtualGuest.PrimaryIpAddress == nil { + log.Printf("The member vm of scale group does not have IP yet. Hostname : %s", + *scaleMemberVirtualGuest.VirtualGuest.FullyQualifiedDomainName) + return result, status, nil + } + } + if result.Status.KeyName != nil { + status = *result.Status.KeyName + log.Printf("The status of scale group with id (%d) is (%s)", id, *result.Status.KeyName) + } else { + log.Printf("Could not get the status of scale group with id (%d). Retrying...", id) + } + + return result, status, nil + }, + Timeout: 120 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func resourceIBMComputeAutoScaleGroupExists(d *schema.ResourceData, meta interface{}) (bool, error) { + + sess := meta.(ClientSession).SoftLayerSession() + scaleGroupService := services.GetScaleGroupService(sess) + + groupId, err := strconv.Atoi(d.Id()) + if err != nil { + return false, fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + result, err := scaleGroupService.Id(groupId).Mask("id").GetObject() + if err != nil { + if apiErr, ok := err.(sl.Error); ok { + if apiErr.StatusCode == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s", err) + } + return result.Id != nil && *result.Id == groupId, nil +} + +func getLocationGroupRegionalId(sess *session.Session, locationGroupRegionalName string) (int, error) { + locationGroupRegionals, err := services.GetLocationGroupRegionalService(sess). + Mask("id,name"). + // FIXME: Someday, filters may actually work in SoftLayer + //Filter(filter.Build( + // filter.Path("name").Eq(locationGroupRegionalName))). + //Limit(1). + GetAllObjects() + + if err != nil { + return -1, err + } + + if len(locationGroupRegionals) < 1 { + return -1, fmt.Errorf("Invalid location group regional: %s", locationGroupRegionalName) + } + + for _, locationGroupRegional := range locationGroupRegionals { + if *locationGroupRegional.Name == locationGroupRegionalName { + return *locationGroupRegional.Id, nil + } + } + + return -1, fmt.Errorf("Invalid regional_group_id: %s", locationGroupRegionalName) +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_compute_autoscale_policy.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_compute_autoscale_policy.go new file mode 100644 index 00000000000..a916a56d588 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_compute_autoscale_policy.go @@ -0,0 +1,563 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "bytes" + "errors" + "fmt" + "log" + "strconv" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/sl" + + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/internal/hashcode" +) + +const ( + IBMComputeTimeFormat = string("2006-01-02T15:04:05-07:00") + IBMCOMPUTE_SCALE_POLICY_TRIGGER_TYPE_ID_RESOURCE_USE = 1 + IBMCOMPUTE_SCALE_POLICY_TRIGGER_TYPE_ID_REPEATING = 2 + IBMCOMPUTE_SCALE_POLICY_TRIGGER_TYPE_ID_ONE_TIME = 3 +) + +var IBMComputeAutoScalePolicyObjectMask = []string{ + "cooldown", + "id", + "name", + "scaleActions", + "scaleGroupId", + "oneTimeTriggers", + "repeatingTriggers", + "resourceUseTriggers.watches", + "triggers", +} + +func resourceIBMComputeAutoScalePolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMComputeAutoScalePolicyCreate, + Read: resourceIBMComputeAutoScalePolicyRead, + Update: resourceIBMComputeAutoScalePolicyUpdate, + Delete: resourceIBMComputeAutoScalePolicyDelete, + Exists: resourceIBMComputeAutoScalePolicyExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + + "name": { + Type: schema.TypeString, + Required: true, + Description: "Name", + }, + "scale_type": { + Type: schema.TypeString, + Required: true, + Description: "scale type", + }, + "scale_amount": { + Type: schema.TypeInt, + Required: true, + Description: "Scale amount", + }, + "cooldown": { + Type: schema.TypeInt, + Optional: true, + Description: "cooldown value", + }, + "scale_group_id": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: "scale group ID", + }, + "triggers": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeInt, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Required: true, + }, + + // Conditionally-required fields, based on value of "type" + "watches": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeInt, + Computed: true, + }, + "metric": { + Type: schema.TypeString, + Required: true, + }, + "operator": { + Type: schema.TypeString, + Required: true, + }, + "value": { + Type: schema.TypeString, + Required: true, + }, + "period": { + Type: schema.TypeInt, + Required: true, + }, + }, + }, + Set: resourceIBMComputeAutoScalePolicyHandlerHash, + }, + + "date": { + Type: schema.TypeString, + Optional: true, + }, + + "schedule": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + Set: resourceIBMComputeAutoScalePolicyTriggerHash, + }, + + "tags": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: "List of tags", + }, + }, + } +} + +func resourceIBMComputeAutoScalePolicyCreate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetScalePolicyService(sess.SetRetries(0)) + + var err error + + // Build up creation options + opts := datatypes.Scale_Policy{ + Name: sl.String(d.Get("name").(string)), + ScaleGroupId: sl.Int(d.Get("scale_group_id").(int)), + Cooldown: sl.Int(d.Get("cooldown").(int)), + } + + if *opts.Cooldown < 0 || *opts.Cooldown > 864000 { + return fmt.Errorf("Error retrieving scalePolicy: %s", "cooldown must be between 0 seconds and 10 days.") + } + + opts.ScaleActions = []datatypes.Scale_Policy_Action_Scale{{ + Amount: sl.Int(d.Get("scale_amount").(int)), + ScaleType: sl.String(d.Get("scale_type").(string)), + }, + } + opts.ScaleActions[0].TypeId = sl.Int(1) + + if *opts.ScaleActions[0].Amount <= 0 { + return fmt.Errorf("Error retrieving scalePolicy: %s", "scale_amount should be greater than 0.") + } + if *opts.ScaleActions[0].ScaleType != "ABSOLUTE" && *opts.ScaleActions[0].ScaleType != "RELATIVE" && *opts.ScaleActions[0].ScaleType != "PERCENT" { + return fmt.Errorf("Error retrieving scalePolicy: %s", "scale_type should be ABSOLUTE, RELATIVE, or PERCENT.") + } + + if _, ok := d.GetOk("triggers"); ok { + err = validateTriggerTypes(d) + if err != nil { + return fmt.Errorf("Error retrieving scalePolicy: %s", err) + } + + opts.OneTimeTriggers, err = prepareOneTimeTriggers(d) + if err != nil { + return fmt.Errorf("Error retrieving scalePolicy: %s", err) + } + + opts.RepeatingTriggers, err = prepareRepeatingTriggers(d) + if err != nil { + return fmt.Errorf("Error retrieving scalePolicy: %s", err) + } + + opts.ResourceUseTriggers, err = prepareResourceUseTriggers(d) + if err != nil { + return fmt.Errorf("Error retrieving scalePolicy: %s", err) + } + } + + res, err := service.CreateObject(&opts) + if err != nil { + return fmt.Errorf("Error creating Scale Policy: %s $s", err) + } + + d.SetId(strconv.Itoa(*res.Id)) + log.Printf("[INFO] Scale Polocy: %d", res.Id) + + return resourceIBMComputeAutoScalePolicyRead(d, meta) +} + +func resourceIBMComputeAutoScalePolicyRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetScalePolicyService(sess) + + scalePolicyId, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Not a valid scale policy ID, must be an integer: %s", err) + } + + log.Printf("[INFO] Reading Scale Polocy: %d", scalePolicyId) + scalePolicy, err := service.Id(scalePolicyId).Mask(strings.Join(IBMComputeAutoScalePolicyObjectMask, ";")).GetObject() + if err != nil { + return fmt.Errorf("Error retrieving Scale Policy: %s", err) + } + + d.Set("name", scalePolicy.Name) + d.Set("cooldown", scalePolicy.Cooldown) + d.Set("scale_group_id", scalePolicy.ScaleGroupId) + d.Set("scale_type", scalePolicy.ScaleActions[0].ScaleType) + d.Set("scale_amount", scalePolicy.ScaleActions[0].Amount) + triggers := make([]map[string]interface{}, 0) + triggers = append(triggers, readOneTimeTriggers(scalePolicy.OneTimeTriggers)...) + triggers = append(triggers, readRepeatingTriggers(scalePolicy.RepeatingTriggers)...) + triggers = append(triggers, readResourceUseTriggers(scalePolicy.ResourceUseTriggers)...) + + d.Set("triggers", triggers) + + return nil +} + +func resourceIBMComputeAutoScalePolicyUpdate(d *schema.ResourceData, meta interface{}) error { + + sess := meta.(ClientSession).SoftLayerSession() + scalePolicyService := services.GetScalePolicyService(sess) + scalePolicyTriggerService := services.GetScalePolicyTriggerService(sess) + scalePolicyServiceNoRetry := services.GetScalePolicyService(sess.SetRetries(0)) + + scalePolicyId, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Not a valid scale policy ID, must be an integer: %s", err) + } + + scalePolicy, err := scalePolicyService.Id(scalePolicyId).Mask(strings.Join(IBMComputeAutoScalePolicyObjectMask, ";")).GetObject() + if err != nil { + return fmt.Errorf("Error retrieving scalePolicy: %s", err) + } + + var template datatypes.Scale_Policy + + template.Id = sl.Int(scalePolicyId) + + if d.HasChange("name") { + template.Name = sl.String(d.Get("name").(string)) + } + + if d.HasChange("scale_type") || d.HasChange("scale_amount") { + template.ScaleActions = make([]datatypes.Scale_Policy_Action_Scale, 1) + template.ScaleActions[0].Id = scalePolicy.ScaleActions[0].Id + template.ScaleActions[0].TypeId = sl.Int(1) + } + + if d.HasChange("scale_type") { + template.ScaleActions[0].ScaleType = sl.String(d.Get("scale_type").(string)) + if *template.ScaleActions[0].ScaleType != "ABSOLUTE" && *template.ScaleActions[0].ScaleType != "RELATIVE" && *template.ScaleActions[0].ScaleType != "PERCENT" { + return fmt.Errorf("Error retrieving scalePolicy: %s", "scale_type should be ABSOLUTE, RELATIVE, or PERCENT.") + } + } + + if d.HasChange("scale_amount") { + template.ScaleActions[0].Amount = sl.Int(d.Get("scale_amount").(int)) + if *template.ScaleActions[0].Amount <= 0 { + return fmt.Errorf("Error retrieving scalePolicy: %s", "scale_amount should be greater than 0.") + } + } + + if d.HasChange("cooldown") { + template.Cooldown = sl.Int(d.Get("cooldown").(int)) + if *template.Cooldown <= 0 || *template.Cooldown > 864000 { + return fmt.Errorf("Error retrieving scalePolicy: %s", "cooldown must be between 0 seconds and 10 days.") + } + } + + if _, ok := d.GetOk("triggers"); ok { + template.OneTimeTriggers, err = prepareOneTimeTriggers(d) + if err != nil { + return fmt.Errorf("Error retrieving scalePolicy: %s", err) + } + template.RepeatingTriggers, err = prepareRepeatingTriggers(d) + if err != nil { + return fmt.Errorf("Error retrieving scalePolicy: %s", err) + } + template.ResourceUseTriggers, err = prepareResourceUseTriggers(d) + if err != nil { + return fmt.Errorf("Error retrieving scalePolicy: %s", err) + } + } + + for _, triggerList := range scalePolicy.Triggers { + log.Printf("[INFO] DELETE TRIGGERS %d", *triggerList.Id) + scalePolicyTriggerService.Id(*triggerList.Id).DeleteObject() + } + + time.Sleep(60) + log.Printf("[INFO] Updating scale policy: %d", scalePolicyId) + _, err = scalePolicyServiceNoRetry.Id(scalePolicyId).EditObject(&template) + + if err != nil { + return fmt.Errorf("Error updating scalie policy: %s", err) + } + + return nil +} + +func resourceIBMComputeAutoScalePolicyDelete(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetScalePolicyService(sess) + + id, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Error deleting scale policy: %s", err) + } + + log.Printf("[INFO] Deleting scale policy: %d", id) + _, err = service.Id(id).DeleteObject() + if err != nil { + return fmt.Errorf("Error deleting scale policy: %s", err) + } + + d.SetId("") + + return nil +} + +func resourceIBMComputeAutoScalePolicyExists(d *schema.ResourceData, meta interface{}) (bool, error) { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetScalePolicyService(sess) + + policyId, err := strconv.Atoi(d.Id()) + if err != nil { + return false, fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + result, err := service.Id(policyId).Mask("id").GetObject() + if err != nil { + if apiErr, ok := err.(sl.Error); ok { + if apiErr.StatusCode == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s", err) + } + return result.Id != nil && *result.Id == policyId, nil + +} + +func validateTriggerTypes(d *schema.ResourceData) error { + triggerLists := d.Get("triggers").(*schema.Set).List() + for _, triggerList := range triggerLists { + trigger := triggerList.(map[string]interface{}) + trigger_type := trigger["type"].(string) + if trigger_type != "ONE_TIME" && trigger_type != "REPEATING" && trigger_type != "RESOURCE_USE" { + return fmt.Errorf("Invalid trigger type: %s", trigger_type) + } + } + return nil +} + +func prepareOneTimeTriggers(d *schema.ResourceData) ([]datatypes.Scale_Policy_Trigger_OneTime, error) { + triggerLists := d.Get("triggers").(*schema.Set).List() + triggers := make([]datatypes.Scale_Policy_Trigger_OneTime, 0) + + portalTimeZone := time.FixedZone("PortalTimeZone", -5*60*60) + + for _, triggerList := range triggerLists { + trigger := triggerList.(map[string]interface{}) + + if trigger["type"].(string) == "ONE_TIME" { + var oneTimeTrigger datatypes.Scale_Policy_Trigger_OneTime + oneTimeTrigger.TypeId = sl.Int(IBMCOMPUTE_SCALE_POLICY_TRIGGER_TYPE_ID_ONE_TIME) + timeStampString := trigger["date"].(string) + + // Use UTC time zone for a terraform configuration + isUTC := strings.HasSuffix(timeStampString, "+00:00") + if !isUTC { + return nil, errors.New("The time zone should be an UTC(+00:00).") + } + + timeStamp, err := time.Parse(IBMComputeTimeFormat, timeStampString) + if err != nil { + return nil, err + } + oneTimeTrigger.Date = &datatypes.Time{Time: timeStamp.In(portalTimeZone)} + triggers = append(triggers, oneTimeTrigger) + } + } + return triggers, nil +} + +func prepareRepeatingTriggers(d *schema.ResourceData) ([]datatypes.Scale_Policy_Trigger_Repeating, error) { + triggerLists := d.Get("triggers").(*schema.Set).List() + triggers := make([]datatypes.Scale_Policy_Trigger_Repeating, 0) + for _, triggerList := range triggerLists { + trigger := triggerList.(map[string]interface{}) + + if trigger["type"].(string) == "REPEATING" { + var repeatingTrigger datatypes.Scale_Policy_Trigger_Repeating + repeatingTrigger.TypeId = sl.Int(IBMCOMPUTE_SCALE_POLICY_TRIGGER_TYPE_ID_REPEATING) + repeatingTrigger.Schedule = sl.String(trigger["schedule"].(string)) + triggers = append(triggers, repeatingTrigger) + } + } + return triggers, nil +} + +func prepareResourceUseTriggers(d *schema.ResourceData) ([]datatypes.Scale_Policy_Trigger_ResourceUse, error) { + triggerLists := d.Get("triggers").(*schema.Set).List() + triggers := make([]datatypes.Scale_Policy_Trigger_ResourceUse, 0) + for _, triggerList := range triggerLists { + trigger := triggerList.(map[string]interface{}) + + if trigger["type"].(string) == "RESOURCE_USE" { + var resourceUseTrigger datatypes.Scale_Policy_Trigger_ResourceUse + var err error + resourceUseTrigger.TypeId = sl.Int(IBMCOMPUTE_SCALE_POLICY_TRIGGER_TYPE_ID_RESOURCE_USE) + resourceUseTrigger.Watches, err = prepareWatches(trigger["watches"].(*schema.Set)) + if err != nil { + return nil, err + } + triggers = append(triggers, resourceUseTrigger) + } + } + return triggers, nil +} + +func prepareWatches(d *schema.Set) ([]datatypes.Scale_Policy_Trigger_ResourceUse_Watch, error) { + watchLists := d.List() + watches := make([]datatypes.Scale_Policy_Trigger_ResourceUse_Watch, 0) + for _, watcheList := range watchLists { + var watch datatypes.Scale_Policy_Trigger_ResourceUse_Watch + watchMap := watcheList.(map[string]interface{}) + + watch.Metric = sl.String(watchMap["metric"].(string)) + if *watch.Metric != "host.cpu.percent" && *watch.Metric != "host.network.backend.in.rate" && *watch.Metric != "host.network.backend.out.rate" && *watch.Metric != "host.network.frontend.in.rate" && *watch.Metric != "host.network.frontend.out.rate" { + return nil, fmt.Errorf("Invalid metric : %s", *watch.Metric) + } + + watch.Operator = sl.String(watchMap["operator"].(string)) + if *watch.Operator != ">" && *watch.Operator != "<" { + return nil, fmt.Errorf("Invalid operator : %s", *watch.Operator) + } + + watch.Period = sl.Int(watchMap["period"].(int)) + if *watch.Period <= 0 { + return nil, errors.New("period shoud be greater than 0.") + } + + watch.Value = sl.String(watchMap["value"].(string)) + + // Autoscale only support EWMA algorithm. + watch.Algorithm = sl.String("EWMA") + + watches = append(watches, watch) + } + return watches, nil +} + +func readOneTimeTriggers(list []datatypes.Scale_Policy_Trigger_OneTime) []map[string]interface{} { + triggers := make([]map[string]interface{}, 0, len(list)) + UTCZone, _ := time.LoadLocation("UTC") + + for _, trigger := range list { + t := make(map[string]interface{}) + t["id"] = *trigger.Id + t["type"] = "ONE_TIME" + t["date"] = trigger.Date.In(UTCZone).Format(IBMComputeTimeFormat) + triggers = append(triggers, t) + } + return triggers +} + +func readRepeatingTriggers(list []datatypes.Scale_Policy_Trigger_Repeating) []map[string]interface{} { + triggers := make([]map[string]interface{}, 0, len(list)) + for _, trigger := range list { + t := make(map[string]interface{}) + t["id"] = *trigger.Id + t["type"] = "REPEATING" + t["schedule"] = *trigger.Schedule + triggers = append(triggers, t) + } + return triggers +} + +func readResourceUseTriggers(list []datatypes.Scale_Policy_Trigger_ResourceUse) []map[string]interface{} { + triggers := make([]map[string]interface{}, 0, len(list)) + for _, trigger := range list { + t := make(map[string]interface{}) + t["id"] = *trigger.Id + t["type"] = "RESOURCE_USE" + t["watches"] = schema.NewSet(resourceIBMComputeAutoScalePolicyHandlerHash, + readResourceUseWatches(trigger.Watches)) + triggers = append(triggers, t) + } + return triggers +} + +func readResourceUseWatches(list []datatypes.Scale_Policy_Trigger_ResourceUse_Watch) []interface{} { + watches := make([]interface{}, 0, len(list)) + for _, watch := range list { + w := make(map[string]interface{}) + w["id"] = *watch.Id + w["metric"] = *watch.Metric + w["operator"] = *watch.Operator + w["period"] = *watch.Period + w["value"] = *watch.Value + watches = append(watches, w) + } + return watches +} + +func resourceIBMComputeAutoScalePolicyTriggerHash(v interface{}) int { + var buf bytes.Buffer + trigger := v.(map[string]interface{}) + if trigger["type"].(string) == "ONE_TIME" { + buf.WriteString(fmt.Sprintf("%s-", trigger["type"].(string))) + buf.WriteString(fmt.Sprintf("%s-", trigger["date"].(string))) + } + if trigger["type"].(string) == "REPEATING" { + buf.WriteString(fmt.Sprintf("%s-", trigger["type"].(string))) + buf.WriteString(fmt.Sprintf("%s-", trigger["schedule"].(string))) + } + if trigger["type"].(string) == "RESOURCE_USE" { + buf.WriteString(fmt.Sprintf("%s-", trigger["type"].(string))) + for _, watchList := range trigger["watches"].(*schema.Set).List() { + watch := watchList.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", watch["metric"].(string))) + buf.WriteString(fmt.Sprintf("%s-", watch["operator"].(string))) + buf.WriteString(fmt.Sprintf("%s-", watch["value"].(string))) + buf.WriteString(fmt.Sprintf("%d-", watch["period"].(int))) + } + } + return hashcode.String(buf.String()) +} + +func resourceIBMComputeAutoScalePolicyHandlerHash(v interface{}) int { + var buf bytes.Buffer + watch := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", watch["metric"].(string))) + buf.WriteString(fmt.Sprintf("%s-", watch["operator"].(string))) + buf.WriteString(fmt.Sprintf("%s-", watch["value"].(string))) + buf.WriteString(fmt.Sprintf("%d-", watch["period"].(int))) + return hashcode.String(buf.String()) +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_compute_bare_metal.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_compute_bare_metal.go new file mode 100644 index 00000000000..7192171feae --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_compute_bare_metal.go @@ -0,0 +1,1602 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "strconv" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/filter" + "github.com/softlayer/softlayer-go/helpers/location" + "github.com/softlayer/softlayer-go/helpers/product" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +func resourceIBMComputeBareMetal() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMComputeBareMetalCreate, + Read: resourceIBMComputeBareMetalRead, + Update: resourceIBMComputeBareMetalUpdate, + Delete: resourceIBMComputeBareMetalDelete, + Exists: resourceIBMComputeBareMetalExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + + "hostname": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DefaultFunc: genID, + DiffSuppressFunc: func(k, o, n string, d *schema.ResourceData) bool { + // FIXME: Work around another bug in terraform. + // When a default function is used with an optional property, + // terraform will always execute it on apply, even when the property + // already has a value in the state for it. This causes a false diff. + // Making the property Computed:true does not make a difference. + if strings.HasPrefix(o, "terraformed-") && strings.HasPrefix(n, "terraformed-") { + return true + } + + return o == n + }, + Description: "Host name", + }, + + "domain": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Domain name", + }, + + "ssh_key_ids": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeInt}, + ForceNew: true, + Description: "SSH KEY IDS list", + }, + + "user_metadata": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "User metadata info", + }, + + "notes": { + Type: schema.TypeString, + Optional: true, + Description: "Optional notes info", + }, + + "file_storage_ids": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeInt}, + Set: func(v interface{}) int { + return v.(int) + }, + }, + + "block_storage_ids": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeInt}, + Set: func(v interface{}) int { + return v.(int) + }, + }, + + "post_install_script_uri": { + Type: schema.TypeString, + Optional: true, + Default: nil, + ForceNew: true, + DiffSuppressFunc: applyOnce, + }, + + "tags": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + // Hourly only + "fixed_config_preset": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: applyOnce, + Description: "Fixed config preset value", + }, + + // Hourly only + "os_reference_code": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: []string{"image_template_id"}, + DiffSuppressFunc: applyOnce, + Description: "OS refernece code value", + }, + + "image_template_id": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"os_reference_code"}, + Description: "OS image template ID", + }, + + "datacenter": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "network_speed": { + Type: schema.TypeInt, + Optional: true, + Default: 100, + ForceNew: true, + Description: "Network speed in MBPS", + }, + + "hourly_billing": { + Type: schema.TypeBool, + Optional: true, + Default: true, + ForceNew: true, + Description: "Enables hourly billing", + }, + + "private_network_only": { + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + Description: "only private network configured if is true", + }, + + "tcp_monitoring": { + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + DiffSuppressFunc: applyOnce, + Description: "TCP monitoring enabled if set as true", + }, + + "redundant_power_supply": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "software_guard_extensions": { + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + DiffSuppressFunc: applyOnce, + }, + + // Monthly only + "package_key_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: applyOnce, + }, + + // Monthly only + "process_key_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: applyOnce, + }, + + // Monthly only + "os_key_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: applyOnce, + }, + + // Monthly only + "gpu_key_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: applyOnce, + }, + + // Monthly only + "gpu_secondary_key_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: applyOnce, + }, + + // Monthly only + "disk_key_names": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + DiffSuppressFunc: applyOnce, + }, + + // Monthly/Hourly only + "redundant_network": { + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + DiffSuppressFunc: applyOnce, + }, + + // Monthly/Hourly only + "unbonded_network": { + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + DiffSuppressFunc: applyOnce, + }, + + // Monthly only. For controlling datacenter restricted port speed + "restricted_network": { + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + }, + + "extended_hardware_testing": { + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + }, + + // Monthly only + "public_bandwidth": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + DiffSuppressFunc: applyOnce, + }, + + // Monthly only + "memory": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + //Sometime memory returns back as different. Since this resource is immutable at this point + //and memory can't be really updated , suppress the change until we figure out how to handle it + DiffSuppressFunc: applyOnce, + Computed: true, + }, + // Monthly only + "storage_groups": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "array_type_id": { + Type: schema.TypeInt, + Required: true, + Description: "Array type ID", + }, + "hard_drives": { + Type: schema.TypeList, + Elem: &schema.Schema{Type: schema.TypeInt}, + Required: true, + Description: "Hard-drives List", + }, + "array_size": { + Type: schema.TypeInt, + Optional: true, + Description: "Array size of harddrives list", + }, + "partition_template_id": { + Type: schema.TypeInt, + Optional: true, + Description: "Partition template ID", + }, + }, + }, + DiffSuppressFunc: applyOnce, + }, + + // Quote based provisioning only + "quote_id": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + DiffSuppressFunc: applyOnce, + Description: "Quote ID for Quote based provisioning", + }, + + // Quote based provisioning, Monthly + "public_vlan_id": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Computed: true, + }, + + // Quote based provisioning, Monthly + "public_subnet": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + // Quote based provisioning, Monthly + "private_vlan_id": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Computed: true, + }, + + // Quote based provisioning, Monthly + "private_subnet": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "public_ipv4_address": { + Type: schema.TypeString, + Computed: true, + }, + + "public_ipv4_address_id": { + Type: schema.TypeInt, + Computed: true, + }, + + "private_ipv4_address": { + Type: schema.TypeString, + Computed: true, + }, + + "private_ipv4_address_id": { + Type: schema.TypeInt, + Computed: true, + }, + + "secondary_ip_count": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + ValidateFunc: validateSecondaryIPCount, + DiffSuppressFunc: func(k, o, n string, d *schema.ResourceData) bool { + // secondary_ip_count is only used when a virtual_guest resource is created. + if d.State() == nil { + return false + } + return true + }, + Description: "Secondary IP addresses count", + }, + "secondary_ip_addresses": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "ipv6_enabled": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Default: false, + Description: "Boolean value true if IPV6 ia enabled or false", + }, + "ipv6_address": { + Type: schema.TypeString, + Computed: true, + }, + "ipv6_address_id": { + Type: schema.TypeInt, + Computed: true, + }, + "ipv6_static_enabled": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Default: false, + Description: "boolean value true if ipv6 static is enabled else false", + }, + + "global_identifier": &schema.Schema{ + Description: "The unique global identifier of the bare metal server", + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func getBareMetalOrderFromResourceData(d *schema.ResourceData, meta interface{}) (datatypes.Hardware, error) { + dc := datatypes.Location{ + Name: sl.String(d.Get("datacenter").(string)), + } + + networkComponent := datatypes.Network_Component{ + MaxSpeed: sl.Int(d.Get("network_speed").(int)), + } + + hardware := datatypes.Hardware{ + Hostname: sl.String(d.Get("hostname").(string)), + Domain: sl.String(d.Get("domain").(string)), + HourlyBillingFlag: sl.Bool(d.Get("hourly_billing").(bool)), + PrivateNetworkOnlyFlag: sl.Bool(d.Get("private_network_only").(bool)), + Datacenter: &dc, + NetworkComponents: []datatypes.Network_Component{networkComponent}, + PostInstallScriptUri: sl.String(d.Get("post_install_script_uri").(string)), + BareMetalInstanceFlag: sl.Int(1), + FixedConfigurationPreset: &datatypes.Product_Package_Preset{ + KeyName: sl.String(d.Get("fixed_config_preset").(string)), + }, + } + + if operatingSystemReferenceCode, ok := d.GetOk("os_reference_code"); ok { + hardware.OperatingSystemReferenceCode = sl.String(operatingSystemReferenceCode.(string)) + } + + public_vlan_id := d.Get("public_vlan_id").(int) + if public_vlan_id > 0 { + hardware.PrimaryNetworkComponent = &datatypes.Network_Component{ + NetworkVlan: &datatypes.Network_Vlan{Id: sl.Int(public_vlan_id)}, + } + } + + private_vlan_id := d.Get("private_vlan_id").(int) + if private_vlan_id > 0 { + hardware.PrimaryBackendNetworkComponent = &datatypes.Network_Component{ + NetworkVlan: &datatypes.Network_Vlan{Id: sl.Int(private_vlan_id)}, + } + } + + if public_subnet, ok := d.GetOk("public_subnet"); ok { + subnet := public_subnet.(string) + subnetID, err := getSubnetID(subnet, meta) + if err != nil { + return hardware, fmt.Errorf("Error determining id for subnet %s: %s", subnet, err) + } + + hardware.PrimaryNetworkComponent.NetworkVlan.PrimarySubnetId = sl.Int(subnetID) + } + + if private_subnet, ok := d.GetOk("private_subnet"); ok { + subnet := private_subnet.(string) + subnetID, err := getSubnetID(subnet, meta) + if err != nil { + return hardware, fmt.Errorf("Error determining id for subnet %s: %s", subnet, err) + } + + hardware.PrimaryBackendNetworkComponent.NetworkVlan.PrimarySubnetId = sl.Int(subnetID) + } + + if userMetadata, ok := d.GetOk("user_metadata"); ok { + hardware.UserData = []datatypes.Hardware_Attribute{ + {Value: sl.String(userMetadata.(string))}, + } + } + + // Get configured ssh_keys + ssh_key_ids := d.Get("ssh_key_ids").([]interface{}) + if len(ssh_key_ids) > 0 { + hardware.SshKeys = make([]datatypes.Security_Ssh_Key, 0, len(ssh_key_ids)) + for _, ssh_key_id := range ssh_key_ids { + hardware.SshKeys = append(hardware.SshKeys, datatypes.Security_Ssh_Key{ + Id: sl.Int(ssh_key_id.(int)), + }) + } + } + + return hardware, nil +} + +func resourceIBMComputeBareMetalCreate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + hwService := services.GetHardwareService(sess) + var order datatypes.Container_Product_Order + var err error + quote_id := d.Get("quote_id").(int) + hardware := datatypes.Hardware{ + Hostname: sl.String(d.Get("hostname").(string)), + Domain: sl.String(d.Get("domain").(string)), + } + + if quote_id > 0 { + // Build a bare metal template from the quote. + order, err = services.GetBillingOrderQuoteService(sess). + Id(quote_id).GetRecalculatedOrderContainer(nil, sl.Bool(false)) + if err != nil { + return fmt.Errorf( + "Encountered problem trying to get the bare metal order template from quote: %s", err) + } + order.Quantity = sl.Int(1) + order.Hardware = make([]datatypes.Hardware, 0, 1) + order.Hardware = append( + order.Hardware, + hardware, + ) + } else if _, ok := d.GetOk("fixed_config_preset"); ok { + // Build an hourly bare metal server template using fixed_config_preset. + hardware, err = getBareMetalOrderFromResourceData(d, meta) + if err != nil { + return err + } + order, err = services.GetHardwareService(sess).GenerateOrderTemplate(&hardware) + if err != nil { + return fmt.Errorf( + "Encountered problem trying to get the bare metal order template: %s", err) + } + items, err := product.GetPackageProducts(sess, *order.PackageId, productItemMaskWithPriceLocationGroupID) + if err != nil { + return err + } + redundantNetwork := d.Get("redundant_network").(bool) + unbondedNetwork := d.Get("unbonded_network").(bool) + + if redundantNetwork || unbondedNetwork { + // Remove network price + prices := make([]datatypes.Product_Item_Price, len(order.Prices)) + i := 0 + for _, p := range order.Prices { + if !strings.Contains(*p.Item.Description, "Network Uplink") { + prices[i] = p + i++ + } + } + portSpeed, err := findNetworkItemPriceId(items, d) + if err != nil { + return err + } + prices[i] = portSpeed + order.Prices = prices + } + err = setMonthlyHourlyCommonOrder(d, items, &order) + if err != nil { + return err + } + + } else { + // Build a monthly bare metal server template + order, err = getMonthlyBareMetalOrder(d, meta) + if err != nil { + return fmt.Errorf( + "Encountered problem trying to get the custom bare metal order template: %s", err) + } + } + + order, err = setCommonBareMetalOrderOptions(d, meta, order) + if err != nil { + return fmt.Errorf( + "Encountered problem trying to configure bare metal server options: %s", err) + } + + log.Println("[INFO] Ordering bare metal server") + orderReceipt, err := services.GetProductOrderService(sess.SetRetries(0)).PlaceOrder(&order, sl.Bool(false)) + if err != nil { + return fmt.Errorf("Error ordering bare metal server: %s\n%+v\n", err, order) + } + + gID := *orderReceipt.OrderDetails.Hardware[0].GlobalIdentifier + + log.Printf("[INFO] Bare Metal Server ID: %s", d.Id()) + log.Printf("[INFO] Bare Metal Server global ID: %s", gID) + + // wait for machine availability + bm, err := waitForBareMetalProvision(&hardware, d, meta, gID) + if err != nil { + return fmt.Errorf( + "Error waiting for bare metal server (%s) to become ready: %s", d.Id(), err) + } + + id := *bm.(datatypes.Hardware).Id + d.SetId(fmt.Sprintf("%d", id)) + + // Set tags + if _, ok := d.GetOk("tags"); ok { + err = setHardwareTags(id, d, meta) + if err != nil { + return err + } + } + + var storageIds []int + if storageIdsSet := d.Get("file_storage_ids").(*schema.Set); len(storageIdsSet.List()) > 0 { + storageIds = expandIntList(storageIdsSet.List()) + + } + if storageIdsSet := d.Get("block_storage_ids").(*schema.Set); len(storageIdsSet.List()) > 0 { + storageIds = append(storageIds, expandIntList(storageIdsSet.List())...) + } + if len(storageIds) > 0 { + err := addAccessToStorageList(hwService.Id(id), id, storageIds, meta) + if err != nil { + return err + } + } + + // Set notes + if d.Get("notes").(string) != "" { + err = setHardwareNotes(id, d, meta) + if err != nil { + return err + } + } + + return resourceIBMComputeBareMetalRead(d, meta) +} + +func resourceIBMComputeBareMetalRead(d *schema.ResourceData, meta interface{}) error { + service := services.GetHardwareService(meta.(ClientSession).SoftLayerSession()) + + id, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + result, err := service.Id(id).Mask( + "hostname,domain,globalIdentifier," + + "primaryIpAddress,primaryBackendIpAddress,privateNetworkOnlyFlag," + + "notes,userData[value],tagReferences[id,tag[name]]," + + "allowedNetworkStorage[id,nasType]," + + "hourlyBillingFlag," + + "datacenter[id,name,longName]," + + "primaryNetworkComponent[primarySubnet[networkVlan[id,primaryRouter,vlanNumber],id]," + + "primaryIpAddressRecord[id]," + + "primaryVersion6IpAddressRecord[subnet,id]]," + + "primaryBackendNetworkComponent[primarySubnet[networkVlan[id,primaryRouter,vlanNumber],id]," + + "primaryIpAddressRecord[id]," + + "maxSpeed,redundancyEnabledFlag]," + + "memoryCapacity,powerSupplyCount," + + "operatingSystem[softwareLicense[softwareDescription[referenceCode]]]", + ).GetObject() + + if err != nil { + return fmt.Errorf("Error retrieving bare metal server: %s", err) + } + + d.Set("hostname", *result.Hostname) + d.Set("domain", *result.Domain) + d.Set("global_identifier", result.GlobalIdentifier) + + if result.Datacenter != nil { + d.Set("datacenter", *result.Datacenter.Name) + } + + d.Set("network_speed", *result.PrimaryNetworkComponent.MaxSpeed) + if result.PrimaryIpAddress != nil { + d.Set("public_ipv4_address", *result.PrimaryIpAddress) + } + if result.PrimaryNetworkComponent.PrimaryIpAddressRecord != nil { + d.Set("public_ipv4_address_id", *result.PrimaryNetworkComponent.PrimaryIpAddressRecord.Id) + } + d.Set("private_ipv4_address", *result.PrimaryBackendIpAddress) + d.Set("private_ipv4_address_id", + *result.PrimaryBackendNetworkComponent.PrimaryIpAddressRecord.Id) + + d.Set("private_network_only", *result.PrivateNetworkOnlyFlag) + d.Set("hourly_billing", *result.HourlyBillingFlag) + + if result.PrimaryNetworkComponent.PrimarySubnet != nil { + d.Set("public_vlan_id", *result.PrimaryNetworkComponent.PrimarySubnet.NetworkVlan.Id) + d.Set("public_subnet", *result.PrimaryNetworkComponent.PrimarySubnet.Id) + } + + if result.PrimaryBackendNetworkComponent.PrimarySubnet != nil { + d.Set("private_vlan_id", *result.PrimaryBackendNetworkComponent.PrimarySubnet.NetworkVlan.Id) + d.Set("private_subnet", *result.PrimaryBackendNetworkComponent.PrimarySubnet.Id) + } + + userData := result.UserData + if len(userData) > 0 && userData[0].Value != nil { + d.Set("user_metadata", *userData[0].Value) + } + + d.Set("notes", sl.Get(result.Notes, nil)) + d.Set("memory", *result.MemoryCapacity) + + d.Set("redundant_power_supply", false) + + if *result.PowerSupplyCount == 2 { + d.Set("redundant_power_supply", true) + } + + d.Set("redundant_network", false) + d.Set("unbonded_network", false) + + backendNetworkComponent, err := service.Filter( + filter.Build( + filter.Path("backendNetworkComponents.status").Eq("ACTIVE"), + ), + ).Id(id).GetBackendNetworkComponents() + + if err != nil { + return fmt.Errorf("Error retrieving bare metal server network: %s", err) + } + + if len(backendNetworkComponent) > 2 && result.PrimaryBackendNetworkComponent != nil { + if *result.PrimaryBackendNetworkComponent.RedundancyEnabledFlag { + d.Set("redundant_network", true) + } else { + d.Set("unbonded_network", true) + } + } + + if result.OperatingSystem != nil && + result.OperatingSystem.SoftwareLicense != nil && + result.OperatingSystem.SoftwareLicense.SoftwareDescription != nil && + result.OperatingSystem.SoftwareLicense.SoftwareDescription.ReferenceCode != nil { + d.Set("os_reference_code", *result.OperatingSystem.SoftwareLicense.SoftwareDescription.ReferenceCode) + } + + tagReferences := result.TagReferences + tagReferencesLen := len(tagReferences) + if tagReferencesLen > 0 { + tags := make([]string, 0, tagReferencesLen) + for _, tagRef := range tagReferences { + tags = append(tags, *tagRef.Tag.Name) + } + d.Set("tags", tags) + } + + storages := result.AllowedNetworkStorage + if len(storages) > 0 { + d.Set("block_storage_ids", flattenBlockStorageID(storages)) + d.Set("file_storage_ids", flattenFileStorageID(storages)) + } + + connInfo := map[string]string{"type": "ssh"} + if !*result.PrivateNetworkOnlyFlag && result.PrimaryIpAddress != nil { + connInfo["host"] = *result.PrimaryIpAddress + } else { + connInfo["host"] = *result.PrimaryBackendIpAddress + } + d.SetConnInfo(connInfo) + + d.Set("ipv6_enabled", false) + if result.PrimaryNetworkComponent.PrimaryVersion6IpAddressRecord != nil { + d.Set("ipv6_enabled", true) + d.Set("ipv6_address", *result.PrimaryNetworkComponent.PrimaryVersion6IpAddressRecord.IpAddress) + d.Set("ipv6_address_id", *result.PrimaryNetworkComponent.PrimaryVersion6IpAddressRecord.Id) + } + err = readSecondaryIPAddresses(d, meta, result.PrimaryIpAddress) + return err + +} + +func resourceIBMComputeBareMetalUpdate(d *schema.ResourceData, meta interface{}) error { + id, _ := strconv.Atoi(d.Id()) + service := services.GetHardwareService(meta.(ClientSession).SoftLayerSession()) + + if d.HasChange("tags") { + err := setHardwareTags(id, d, meta) + if err != nil { + return err + } + } + + if d.HasChange("notes") { + err := setHardwareNotes(id, d, meta) + if err != nil { + return err + } + } + err := modifyStorageAccess(service.Id(id), id, meta, d) + if err != nil { + return err + } + + return nil +} + +func resourceIBMComputeBareMetalDelete(d *schema.ResourceData, meta interface{}) error { + return deleteHardware(d, meta) +} + +func deleteHardware(d dataRetriever, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetHardwareService(sess) + id, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + _, err = waitForNoBareMetalActiveTransactions(id, meta) + if err != nil { + return fmt.Errorf("Error deleting bare metal server while waiting for zero active transactions: %s", err) + } + + billingItem, err := service.Id(id).GetBillingItem() + if err != nil { + return fmt.Errorf("Error getting billing item for bare metal server: %s", err) + } + + // Monthly bare metal servers only support an anniversary date cancellation option. + billingItemService := services.GetBillingItemService(sess) + _, err = billingItemService.Id(*billingItem.Id).CancelItem( + sl.Bool(d.Get("hourly_billing").(bool)), sl.Bool(true), sl.String("No longer required"), sl.String("Please cancel this server"), + ) + if err != nil { + return fmt.Errorf("Error canceling the bare metal server (%d): %s", id, err) + } + + return nil +} + +func resourceIBMComputeBareMetalExists(d *schema.ResourceData, meta interface{}) (bool, error) { + service := services.GetHardwareService(meta.(ClientSession).SoftLayerSession()) + + id, err := strconv.Atoi(d.Id()) + if err != nil { + return false, fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + result, err := service.Id(id).GetObject() + if err != nil { + if apiErr, ok := err.(sl.Error); !ok || apiErr.StatusCode != 404 { + return false, fmt.Errorf("Error trying to retrieve the Bare Metal server: %s", err) + } + } + + return result.Id != nil && *result.Id == id, nil +} + +// Bare metal creation does not return a bare metal object with an Id. +// Have to wait on provision date to become available on server that matches +// hostname and domain. +// http://sldn.softlayer.com/blog/bpotter/ordering-bare-metal-servers-using-softlayer-api +func waitForBareMetalProvision(hw *datatypes.Hardware, d *schema.ResourceData, meta interface{}, globalIdentifier string) (interface{}, error) { + hostname := *hw.Hostname + domain := *hw.Domain + log.Printf("Waiting for server (%s.%s) to have to be provisioned", hostname, domain) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", "pending"}, + Target: []string{"provisioned"}, + Refresh: func() (interface{}, string, error) { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetAccountService(sess) + bms, err := service.Filter( + filter.Build( + filter.Path("hardware.globalIdentifier").Eq(globalIdentifier))).Mask("id,provisionDate").GetHardware() + if err != nil { + return false, "retry", nil + } + + if len(bms) == 0 || bms[0].ProvisionDate == nil { + return datatypes.Hardware{}, "pending", nil + } + // Check Secondary IP address availability. + if d.Get("secondary_ip_count").(int) > 0 { + log.Println("Refreshing secondary IPs state.") + secondarySubnetResult, err := services.GetAccountService(sess). + Mask("ipAddresses[id,ipAddress]"). + Filter(filter.Build(filter.Path("publicSubnets.endPointIpAddress.hardware.id").Eq(bms[0].Id))). + GetPublicSubnets() + if err != nil { + return nil, "", fmt.Errorf("Error retrieving secondary ip address: %s", err) + } + if len(secondarySubnetResult) == 0 { + return datatypes.Hardware{}, "pending", nil + } + } + + return bms[0], "provisioned", nil + + }, + Timeout: 24 * time.Hour, + Delay: 10 * time.Second, + MinTimeout: 1 * time.Minute, + NotFoundChecks: 24 * 60, + } + + return stateConf.WaitForState() +} + +func waitForNoBareMetalActiveTransactions(id int, meta interface{}) (interface{}, error) { + log.Printf("Waiting for server (%d) to have zero active transactions", id) + service := services.GetHardwareServerService(meta.(ClientSession).SoftLayerSession()) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", "active"}, + Target: []string{"idle"}, + Refresh: func() (interface{}, string, error) { + bm, err := service.Id(id).Mask("id,activeTransactionCount").GetObject() + if err != nil { + return false, "retry", nil + } + + if bm.ActiveTransactionCount != nil && *bm.ActiveTransactionCount == 0 { + return bm, "idle", nil + } + return bm, "active", nil + + }, + Timeout: 24 * time.Hour, + Delay: 10 * time.Second, + MinTimeout: 1 * time.Minute, + NotFoundChecks: 24 * 60, + } + + return stateConf.WaitForState() +} + +func setHardwareTags(id int, d dataRetriever, meta interface{}) error { + service := services.GetHardwareService(meta.(ClientSession).SoftLayerSession()) + + tags := getTags(d) + _, err := service.Id(id).SetTags(sl.String(tags)) + if err != nil { + return fmt.Errorf("Could not set tags on bare metal server %d", id) + } + + return nil +} + +func setHardwareNotes(id int, d dataRetriever, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetHardwareServerService(sess) + + result, err := service.Id(id).GetObject() + if err != nil { + return err + } + + result.Notes = sl.String(d.Get("notes").(string)) + + _, err = service.Id(id).EditObject(&result) + if err != nil { + return err + } + + return nil +} + +// Returns a price from an item list. +// Example usage : getItemPriceId(items, 'server', 'INTEL_XEON_2690_2_60') +func getItemPriceId(items []datatypes.Product_Item, categoryCode string, keyName string, capacity ...int) (datatypes.Product_Item_Price, error) { + availableItems := "" + for _, item := range items { + for _, itemCategory := range item.Categories { + if *itemCategory.CategoryCode == categoryCode { + availableItems = availableItems + *item.KeyName + " ( " + *item.Description + " ) , " + if *item.KeyName == keyName { + for _, price := range item.Prices { + capacityMin := -1 + capacityMax := -1 + var err error + + if price.CapacityRestrictionMinimum != nil && price.CapacityRestrictionMaximum != nil && (*price.CapacityRestrictionType == "CORE" || *price.CapacityRestrictionType == "PROCESSOR") { + capacityMin, err = strconv.Atoi(*price.CapacityRestrictionMinimum) + if err != nil { + return datatypes.Product_Item_Price{}, err + } + capacityMax, err = strconv.Atoi(*price.CapacityRestrictionMaximum) + if err != nil { + return datatypes.Product_Item_Price{}, err + } + + } + for _, category := range price.Categories { + if *category.CategoryCode == categoryCode && price.LocationGroupId == nil && !*price.BareMetalReservedCapacityFlag { + + if len(capacity) > 0 && capacityMin != -1 && capacityMax != -1 { + + if capacity[0] >= capacityMin && capacity[0] <= capacityMax { + return datatypes.Product_Item_Price{Id: price.Id}, nil + } + + } else { + return datatypes.Product_Item_Price{Id: price.Id}, nil + } + + } + } + } + } + } + } + } + return datatypes.Product_Item_Price{}, + fmt.Errorf("Could not find the matching item with categorycode %s and keyName %s. Available item(s) is(are) %s", categoryCode, keyName, availableItems) +} + +func getMonthlyBareMetalOrder(d *schema.ResourceData, meta interface{}) (datatypes.Container_Product_Order, error) { + sess := meta.(ClientSession).SoftLayerSession() + // Validate attributes for monthly bare metal server ordering. + if d.Get("hourly_billing").(bool) { + return datatypes.Container_Product_Order{}, fmt.Errorf("Monthly bare metal server only supports monthly billing.") + } + + model, ok := d.GetOk("package_key_name") + if !ok { + return datatypes.Container_Product_Order{}, fmt.Errorf("The attribute 'package_key_name' is not defined.") + } + + datacenter, ok := d.GetOk("datacenter") + if !ok { + return datatypes.Container_Product_Order{}, fmt.Errorf("The attribute 'datacenter' is not defined.") + } + + osKeyName, ok := d.GetOk("os_key_name") + if !ok { + return datatypes.Container_Product_Order{}, fmt.Errorf("The attribute 'os_key_name' is not defined.") + } + + dc, err := location.GetDatacenterByName(sess, datacenter.(string), "id") + if err != nil { + return datatypes.Container_Product_Order{}, err + } + + // 1. Find a package id using monthly bare metal package key name. + pkg, err := getPackageByModel(sess, model.(string)) + if err != nil { + return datatypes.Container_Product_Order{}, err + } + + if pkg.Id == nil { + return datatypes.Container_Product_Order{}, err + } + + // 2. Get all prices for the package + items, err := product.GetPackageProducts(sess, *pkg.Id, productItemMaskWithPriceLocationGroupID) + if err != nil { + return datatypes.Container_Product_Order{}, err + } + // 3. Build price items + server, err := getItemPriceId(items, "server", d.Get("process_key_name").(string)) + if err != nil { + return datatypes.Container_Product_Order{}, err + } + + coreCapacity := -1 + + restrictionType := getCapacityRestrictionType(items, "os", osKeyName.(string)) + + coreCapacity = getCoreCapacity(items, "server", d.Get("process_key_name").(string), restrictionType) + if err != nil { + return datatypes.Container_Product_Order{}, err + } + var os datatypes.Product_Item_Price + + if coreCapacity == -1 { + os, err = getItemPriceId(items, "os", osKeyName.(string)) + if err != nil { + return datatypes.Container_Product_Order{}, err + } + + } else { + os, err = getItemPriceId(items, "os", osKeyName.(string), coreCapacity) + if err != nil { + return datatypes.Container_Product_Order{}, err + } + + } + + ram, err := findMemoryItemPriceId(items, d) + if err != nil { + return datatypes.Container_Product_Order{}, err + } + + portSpeed, err := findNetworkItemPriceId(items, d) + if err != nil { + return datatypes.Container_Product_Order{}, err + } + var order datatypes.Container_Product_Order + + order = datatypes.Container_Product_Order{ + Quantity: sl.Int(1), + Hardware: []datatypes.Hardware{{ + Hostname: sl.String(d.Get("hostname").(string)), + Domain: sl.String(d.Get("domain").(string)), + }, + }, + Location: sl.String(strconv.Itoa(*dc.Id)), + PackageId: pkg.Id, + Prices: []datatypes.Product_Item_Price{ + server, + os, + ram, + portSpeed, + }, + } + + if d.Get("tcp_monitoring").(bool) { + monitoring, err := getItemPriceId(items, "monitoring", "MONITORING_HOST_PING_AND_TCP_SERVICE") + if err != nil { + return datatypes.Container_Product_Order{}, err + } + order.Prices = append(order.Prices, monitoring) + + } + + order = addCommomDefaultPrices(d, meta, order, items) + + // Add optional price ids. + // Add public bandwidth + privateNetworkOnly := d.Get("private_network_only").(bool) + if publicBandwidth, ok := d.GetOk("public_bandwidth"); ok || privateNetworkOnly { + publicBandwidthStr := "BANDWIDTH_" + strconv.Itoa(publicBandwidth.(int)) + "_GB" + bandwidth, err := getItemPriceId(items, "bandwidth", publicBandwidthStr) + if err != nil { + return datatypes.Container_Product_Order{}, err + } + order.Prices = append(order.Prices, bandwidth) + } + + // Add prices of disks. + disks := d.Get("disk_key_names").([]interface{}) + diskLen := len(disks) + if diskLen > 0 { + for i, disk := range disks { + diskPrice, err := getItemPriceId(items, "disk"+strconv.Itoa(i), disk.(string)) + if err != nil { + return datatypes.Container_Product_Order{}, err + } + order.Prices = append(order.Prices, diskPrice) + } + } + + if _, ok := d.GetOk("storage_groups"); ok { + order.StorageGroups = getStorageGroupsFromResourceData(d) + diskController, err := getItemPriceId(items, "disk_controller", "DISK_CONTROLLER_RAID") + if err != nil { + return datatypes.Container_Product_Order{}, err + } + order.Prices = append(order.Prices, diskController) + } + + err = setMonthlyHourlyCommonOrder(d, items, &order) + if err != nil { + return order, err + } + return order, nil +} + +func setMonthlyHourlyCommonOrder(d *schema.ResourceData, items []datatypes.Product_Item, order *datatypes.Container_Product_Order) error { + if d.Get("redundant_power_supply").(bool) { + powerSupply, err := getItemPriceId(items, "power_supply", "REDUNDANT_POWER_SUPPLY") + if err != nil { + return err + } + order.Prices = append(order.Prices, powerSupply) + } + if d.Get("software_guard_extensions").(bool) { + sgx, err := getItemPriceId(items, "software_guard_extensions", "SOFTWARE_GUARD_EXTENSIONS") + if err != nil { + return err + } + order.Prices = append(order.Prices, sgx) + } + if gpu0, ok := d.GetOk("gpu_key_name"); ok { + gpu0Price, err := getItemPriceId(items, "gpu0", gpu0.(string)) + if err != nil { + return err + } + order.Prices = append(order.Prices, gpu0Price) + } + + if gpu1, ok := d.GetOk("gpu_secondary_key_name"); ok { + gpu1Price, err := getItemPriceId(items, "gpu1", gpu1.(string)) + if err != nil { + return err + } + order.Prices = append(order.Prices, gpu1Price) + } + + secondaryIPCount := d.Get("secondary_ip_count").(int) + privateNetworkOnly := d.Get("private_network_only").(bool) + if secondaryIPCount > 0 { + if privateNetworkOnly { + return fmt.Errorf("Unable to configure public secondary addresses with a private_network_only option") + } + keyName := strconv.Itoa(secondaryIPCount) + "_PUBLIC_IP_ADDRESSES" + + price, err := getItemPriceId(items, "sec_ip_addresses", keyName) + if err != nil { + return err + } + order.Prices = append(order.Prices, price) + } + + if d.Get("ipv6_enabled").(bool) { + if privateNetworkOnly { + return fmt.Errorf("Unable to configure a public IPv6 address with a private_network_only option") + } + keyName := "1_IPV6_ADDRESS" + + price, err := getItemPriceId(items, "pri_ipv6_addresses", keyName) + if err != nil { + return err + } + order.Prices = append(order.Prices, price) + } + + if d.Get("ipv6_static_enabled").(bool) { + if privateNetworkOnly { + return fmt.Errorf("Unable to configure a public static IPv6 address with a private_network_only option") + } + keyName := "64_BLOCK_STATIC_PUBLIC_IPV6_ADDRESSES" + + price, err := getItemPriceId(items, "static_ipv6_addresses", keyName) + if err != nil { + return err + } + order.Prices = append(order.Prices, price) + } + return nil +} + +// Set common parameters for server ordering. +func setCommonBareMetalOrderOptions(d *schema.ResourceData, meta interface{}, order datatypes.Container_Product_Order) (datatypes.Container_Product_Order, error) { + + extendedHardwareTesting := d.Get("extended_hardware_testing").(bool) + order.ExtendedHardwareTesting = sl.Bool(extendedHardwareTesting) + + public_vlan_id := d.Get("public_vlan_id").(int) + if public_vlan_id > 0 { + order.Hardware[0].PrimaryNetworkComponent = &datatypes.Network_Component{ + NetworkVlan: &datatypes.Network_Vlan{Id: sl.Int(public_vlan_id)}, + } + } + + private_vlan_id := d.Get("private_vlan_id").(int) + if private_vlan_id > 0 { + order.Hardware[0].PrimaryBackendNetworkComponent = &datatypes.Network_Component{ + NetworkVlan: &datatypes.Network_Vlan{Id: sl.Int(private_vlan_id)}, + } + } + + if public_subnet, ok := d.GetOk("public_subnet"); ok { + subnet := public_subnet.(string) + subnetId, err := getSubnetId(subnet, meta) + if err != nil { + return datatypes.Container_Product_Order{}, fmt.Errorf("Error determining id for subnet %s: %s", subnet, err) + } + + order.Hardware[0].PrimaryNetworkComponent.NetworkVlan.PrimarySubnetId = sl.Int(subnetId) + } + + if private_subnet, ok := d.GetOk("private_subnet"); ok { + subnet := private_subnet.(string) + subnetId, err := getSubnetId(subnet, meta) + if err != nil { + return datatypes.Container_Product_Order{}, fmt.Errorf("Error determining id for subnet %s: %s", subnet, err) + } + + order.Hardware[0].PrimaryBackendNetworkComponent.NetworkVlan.PrimarySubnetId = sl.Int(subnetId) + } + + if userMetadata, ok := d.GetOk("user_metadata"); ok { + order.Hardware[0].UserData = []datatypes.Hardware_Attribute{ + {Value: sl.String(userMetadata.(string))}, + } + } + + // Get configured ssh_keys + ssh_key_ids := d.Get("ssh_key_ids").([]interface{}) + if len(ssh_key_ids) > 0 { + order.SshKeys = make([]datatypes.Container_Product_Order_SshKeys, 0, len(ssh_key_ids)) + for _, ssh_key_id := range ssh_key_ids { + sshKeyA := make([]int, 1) + sshKeyA[0] = ssh_key_id.(int) + order.SshKeys = append(order.SshKeys, datatypes.Container_Product_Order_SshKeys{ + SshKeyIds: sshKeyA, + }) + } + } + + // Set image template id if it exists + if rawImageTemplateId, ok := d.GetOk("image_template_id"); ok { + imageTemplateId := rawImageTemplateId.(int) + order.ImageTemplateId = sl.Int(imageTemplateId) + } + + if postInstallURI, ok := d.GetOk("post_install_script_uri"); ok { + postInstallURIA := make([]string, 1) + postInstallURIA[0] = postInstallURI.(string) + order.ProvisionScripts = postInstallURIA + } + + return order, nil +} + +// Find price item using network options +func findNetworkItemPriceId(items []datatypes.Product_Item, d dataRetriever) (datatypes.Product_Item_Price, error) { + networkSpeed := d.Get("network_speed").(int) + redundantNetwork := d.Get("redundant_network").(bool) + unbondedNetwork := d.Get("unbonded_network").(bool) + restrictedNetwork := d.Get("restricted_network").(bool) + privateNetworkOnly := d.Get("private_network_only").(bool) + + networkSpeedStr := "_MBPS_" + redundantNetworkStr := "" + unbondedNetworkStr := "" + restrictedNetworkStr := "" + + if networkSpeed < 1000 { + networkSpeedStr = strconv.Itoa(networkSpeed) + networkSpeedStr + } else { + networkSpeedStr = strconv.Itoa(networkSpeed/1000) + "_GBPS" + } + if redundantNetwork { + redundantNetworkStr = "_REDUNDANT" + } + + if unbondedNetwork { + unbondedNetworkStr = "_UNBONDED" + } + + if restrictedNetwork { + restrictedNetworkStr = "_NON_DATACENTER_RESTRICTED" + } + + for _, item := range items { + for _, itemCategory := range item.Categories { + if *itemCategory.CategoryCode == "port_speed" && + strings.HasPrefix(*item.KeyName, networkSpeedStr) && + strings.Contains(*item.KeyName, redundantNetworkStr) && + strings.Contains(*item.KeyName, restrictedNetworkStr) && + strings.Contains(*item.KeyName, unbondedNetworkStr) { + if (privateNetworkOnly && strings.Contains(*item.KeyName, "_PUBLIC_PRIVATE")) || + (!privateNetworkOnly && !strings.Contains(*item.KeyName, "_PUBLIC_PRIVATE")) || + (!unbondedNetwork && strings.Contains(*item.KeyName, "_UNBONDED")) || + (!redundantNetwork && strings.Contains(*item.KeyName, "_REDUNDANT")) || + (!restrictedNetwork && strings.Contains(*item.KeyName, "_DATACENTER_RESTRICTED")) { + break + } + for _, price := range item.Prices { + if price.LocationGroupId == nil && !*price.BareMetalReservedCapacityFlag { + return datatypes.Product_Item_Price{Id: price.Id}, nil + } + } + } + } + } + return datatypes.Product_Item_Price{}, + fmt.Errorf("Could not find the network with %s, %s, %s, and private_network_only = %t", + networkSpeedStr, redundantNetworkStr, unbondedNetworkStr, privateNetworkOnly) +} + +// Find memory price item using memory size. +func findMemoryItemPriceId(items []datatypes.Product_Item, d dataRetriever) (datatypes.Product_Item_Price, error) { + memory := d.Get("memory").(int) + availableMemories := "" + + for _, item := range items { + for _, itemCategory := range item.Categories { + if *itemCategory.CategoryCode == "ram" { + availableMemories = availableMemories + *item.KeyName + "(" + *item.Description + ")" + ", " + if int(*item.Capacity) == memory { + for _, price := range item.Prices { + if price.LocationGroupId == nil && !*price.BareMetalReservedCapacityFlag { + return datatypes.Product_Item_Price{Id: price.Id}, nil + } + } + } + } + } + } + + return datatypes.Product_Item_Price{}, + fmt.Errorf("Could not find the price item for %d GB memory. Available items are %s", memory, availableMemories) +} + +// Find a bare metal package object using a package key name +func getPackageByModel(sess *session.Session, model string) (datatypes.Product_Package, error) { + objectMask := "id,keyName,name,description,isActive,type[keyName]" + service := services.GetProductPackageService(sess) + availableModels := "" + + // Get package id + packages, err := service.Mask(objectMask). + Filter( + filter.Build( + filter.Path("type.keyName").Eq("BARE_METAL_CPU"), + ), + ).GetAllObjects() + if err != nil { + return datatypes.Product_Package{}, err + } + + for _, pkg := range packages { + availableModels = availableModels + *pkg.KeyName + if pkg.Description != nil { + availableModels = availableModels + " ( " + *pkg.Description + " ), " + } else { + availableModels = availableModels + ", " + } + if *pkg.KeyName == model { + return pkg, nil + } + } + + return datatypes.Product_Package{}, fmt.Errorf("No custom bare metal package key name for %s. Available package key name(s) is(are) %s", model, availableModels) +} + +func getStorageGroupsFromResourceData(d dataRetriever) []datatypes.Container_Product_Order_Storage_Group { + storageGroupLists := d.Get("storage_groups").([]interface{}) + storageGroups := make([]datatypes.Container_Product_Order_Storage_Group, 0) + + for _, storageGroupList := range storageGroupLists { + storageGroup := storageGroupList.(map[string]interface{}) + var storageGroupObj datatypes.Container_Product_Order_Storage_Group + storageGroupObj.ArrayTypeId = sl.Int(storageGroup["array_type_id"].(int)) + hardDrives := storageGroup["hard_drives"].([]interface{}) + storageGroupObj.HardDrives = make([]int, 0, len(hardDrives)) + for _, hardDrive := range hardDrives { + storageGroupObj.HardDrives = append(storageGroupObj.HardDrives, hardDrive.(int)) + } + arraySize := storageGroup["array_size"].(int) + if arraySize > 0 { + storageGroupObj.ArraySize = sl.Float(float64(arraySize)) + } + partitionTemplateId := storageGroup["partition_template_id"].(int) + if partitionTemplateId > 0 { + storageGroupObj.PartitionTemplateId = sl.Int(partitionTemplateId) + } + storageGroups = append(storageGroups, storageGroupObj) + } + return storageGroups +} + +// Use this function for attributes which only should be applied in resource creation time. +func applyOnce(k, o, n string, d *schema.ResourceData) bool { + if len(d.Id()) == 0 { + return false + } + return true +} + +func addCommomDefaultPrices(d *schema.ResourceData, meta interface{}, order datatypes.Container_Product_Order, items []datatypes.Product_Item) datatypes.Container_Product_Order { + + if !d.Get("tcp_monitoring").(bool) { + monExists, moniotring := getCommonItemPriceID(items, "monitoring", "MONITORING_HOST_PING") + + if monExists { + order.Prices = append(order.Prices, moniotring) + } + + } + + priExists, priIPAddress := getCommonItemPriceID(items, "pri_ip_addresses", "1_IP_ADDRESS") + if priExists { + order.Prices = append(order.Prices, priIPAddress) + } + + remotExists, remoteManagement := getCommonItemPriceID(items, "remote_management", "REBOOT_KVM_OVER_IP") + if remotExists { + order.Prices = append(order.Prices, remoteManagement) + } + + vpnExists, vpnManagement := getCommonItemPriceID(items, "vpn_management", "UNLIMITED_SSL_VPN_USERS_1_PPTP_VPN_USER_PER_ACCOUNT") + if vpnExists { + order.Prices = append(order.Prices, vpnManagement) + } + + notificationExists, notification := getCommonItemPriceID(items, "notification", "NOTIFICATION_EMAIL_AND_TICKET") + if notificationExists { + order.Prices = append(order.Prices, notification) + } + + resExists, response := getCommonItemPriceID(items, "response", "AUTOMATED_NOTIFICATION") + if resExists { + order.Prices = append(order.Prices, response) + } + + vulExists, vulnerabilityScanner := getCommonItemPriceID(items, "vulnerability_scanner", "NESSUS_VULNERABILITY_ASSESSMENT_REPORTING") + if vulExists { + order.Prices = append(order.Prices, vulnerabilityScanner) + } + + sapExists, sapCertified := getCommonItemPriceID(items, "sap_certified_server", "SAP_HANA_NETWEAVER_CERTIFIED_SERVERS") + if sapExists { + order.Prices = append(order.Prices, sapCertified) + } + + if _, ok := d.GetOk("storage_groups"); !ok { + diskExists, diskController := getCommonItemPriceID(items, "disk_controller", "DISK_CONTROLLER_NONRAID") + if diskExists { + order.Prices = append(order.Prices, diskController) + } + + } + + return order +} + +// Returns a common default sprice from an item list. +// Example usage : getItemPriceId(items, 'server', 'INTEL_XEON_2690_2_60') +func getCommonItemPriceID(items []datatypes.Product_Item, categoryCode string, keyName string) (bool, datatypes.Product_Item_Price) { + availableItems := "" + for _, item := range items { + for _, itemCategory := range item.Categories { + if *itemCategory.CategoryCode == categoryCode { + availableItems = availableItems + *item.KeyName + " ( " + *item.Description + " ) , " + if *item.KeyName == keyName { + for _, price := range item.Prices { + for _, category := range price.Categories { + if *category.CategoryCode == categoryCode && price.LocationGroupId == nil { + return true, datatypes.Product_Item_Price{Id: price.Id} + } + } + } + } + } + } + } + return false, datatypes.Product_Item_Price{} +} + +func getCoreCapacity(items []datatypes.Product_Item, categoryCode string, keyName string, restrictionType string) int { + availableItems := "" + for _, item := range items { + for _, itemCategory := range item.Categories { + if *itemCategory.CategoryCode == categoryCode { + availableItems = availableItems + *item.KeyName + " ( " + *item.Description + " ) , " + if *item.KeyName == keyName { + if restrictionType == "PROCESSOR" && item.TotalProcessorCapacity != nil { + return *item.TotalProcessorCapacity + } + if restrictionType == "CORE" && item.TotalPhysicalCoreCapacity != nil { + return *item.TotalPhysicalCoreCapacity + } + + } + } + } + } + return -1 +} + +func getCapacityRestrictionType(items []datatypes.Product_Item, categoryCode string, keyName string) string { + for _, item := range items { + for _, itemCategory := range item.Categories { + if *itemCategory.CategoryCode == categoryCode { + if *item.KeyName == keyName { + for _, price := range item.Prices { + if price.CapacityRestrictionType != nil { + return *price.CapacityRestrictionType + } + + } + } + } + } + } + return "" +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_compute_dedicated_host.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_compute_dedicated_host.go new file mode 100644 index 00000000000..a10b2d07839 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_compute_dedicated_host.go @@ -0,0 +1,329 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "strconv" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/filter" + "github.com/softlayer/softlayer-go/helpers/hardware" + "github.com/softlayer/softlayer-go/helpers/location" + "github.com/softlayer/softlayer-go/helpers/product" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/sl" +) + +var dedicatedHostPackageType = "DEDICATED_HOST" + +func resourceIBMComputeDedicatedHost() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMComputeDedicatedHostCreate, + Read: resourceIBMComputeDedicatedHostRead, + Delete: resourceIBMComputeDedicatedHostDelete, + Exists: resourceIBMComputeDedicatedHostExists, + Update: resourceIBMComputeDedicatedHostUpdate, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "hostname": { + Type: schema.TypeString, + Required: true, + Description: "The host name of dedicatated host.", + }, + "domain": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The domain of dedicatated host.", + }, + "datacenter": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The data center in which the dedicatated host is to be provisioned.", + }, + "flavor": { + Type: schema.TypeString, + Optional: true, + Default: "56_CORES_X_242_RAM_X_1_4_TB", + ForceNew: true, + Description: "The flavor of the dedicatated host.", + }, + "hourly_billing": { + Type: schema.TypeBool, + Optional: true, + Default: true, + ForceNew: true, + Description: "The billing type for the dedicatated host.", + }, + "router_hostname": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The hostname of the primary router that the dedicated host is associated with.", + }, + "cpu_count": { + Type: schema.TypeInt, + Computed: true, + Description: "The capacity that the dedicated host's CPU allocation is restricted to.", + }, + "disk_capacity": { + Type: schema.TypeInt, + Computed: true, + Description: "The capacity that the dedicated host's disk allocation is restricted to.", + }, + "memory_capacity": { + Type: schema.TypeInt, + Computed: true, + Description: "The capacity that the dedicated host's memory allocation is restricted to.", + }, + "wait_time_minutes": { + Type: schema.TypeInt, + Optional: true, + Default: 90, + }, + "tags": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + }, + } +} + +func resourceIBMComputeDedicatedHostCreate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + + pkg, err := product.GetPackageByType(sess, dedicatedHostPackageType) + if err != nil { + return err + } + + datacenter := d.Get("datacenter").(string) + router := d.Get("router_hostname").(string) + flavor := d.Get("flavor").(string) + + // Lookup the data center ID + dc, err := location.GetDatacenterByName(sess, datacenter) + if err != nil { + return fmt.Errorf("No data centers matching %s could be found", datacenter) + } + + rt, err := hardware.GetRouterByName(sess, router, "id") + if err != nil { + return fmt.Errorf("Error creating dedicated host: %s", err) + } + + primaryBackendNetworkComponent := datatypes.Network_Component{ + Router: &datatypes.Hardware{ + Id: rt.Id, + }, + } + + hardware := datatypes.Hardware{ + Hostname: sl.String(d.Get("hostname").(string)), + Domain: sl.String(d.Get("domain").(string)), + HourlyBillingFlag: sl.Bool(d.Get("hourly_billing").(bool)), + PrimaryBackendNetworkComponent: &primaryBackendNetworkComponent, + } + + // 2. Get all prices for the package + productItems, err := product.GetPackageProducts(sess, *pkg.Id, productItemMaskWithPriceLocationGroupID) + if err != nil { + return err + } + + priceItems := []datatypes.Product_Item_Price{} + for _, item := range productItems { + if *item.KeyName == flavor { + for _, price := range item.Prices { + if price.LocationGroupId == nil { + priceItem := datatypes.Product_Item_Price{ + Id: price.Id, + } + priceItems = append(priceItems, priceItem) + break + } + } + + } + + } + + productOrderContainer := datatypes.Container_Product_Order_Virtual_DedicatedHost{ + Container_Product_Order: datatypes.Container_Product_Order{ + PackageId: pkg.Id, + Location: sl.String(strconv.Itoa(*dc.Id)), + Prices: priceItems, + Quantity: sl.Int(1), + UseHourlyPricing: sl.Bool(true), + }, + } + + productOrderContainer.Hardware = make([]datatypes.Hardware, 0, 1) + productOrderContainer.Hardware = append( + productOrderContainer.Hardware, + hardware, + ) + + log.Println("[INFO] Creating dedicated host") + + //verify order + _, err = services.GetProductOrderService(sess.SetRetries(0)). + VerifyOrder(&productOrderContainer) + if err != nil { + return fmt.Errorf("Error during creation of dedicated host: %s", err) + } + //place order + _, err = services.GetProductOrderService(sess.SetRetries(0)). + PlaceOrder(&productOrderContainer, sl.Bool(false)) + if err != nil { + return fmt.Errorf("Error during creation of dedicated host: %s", err) + } + + // wait for machine availability + dedicated, err := findDedicatedHostByOrderID(&hardware, d, meta) + if err != nil { + return fmt.Errorf( + "Error waiting for dedicated host (%s) to become ready: %s", d.Id(), err) + } + + id := *dedicated.(datatypes.Virtual_DedicatedHost).Id + d.SetId(fmt.Sprintf("%d", id)) + return resourceIBMComputeDedicatedHostRead(d, meta) +} + +func resourceIBMComputeDedicatedHostRead(d *schema.ResourceData, meta interface{}) error { + service := services.GetVirtualDedicatedHostService(meta.(ClientSession).SoftLayerSession()) + + id, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + result, err := service.Id(id).Mask( + "name,cpuCount,datacenter,memoryCapacity,diskCapacity,backendRouter[hostname]", + ).GetObject() + + if err != nil { + return fmt.Errorf("Error retrieving dedicated host: %s", err) + } + + d.Set("hostname", result.Name) + d.Set("datacenter", result.Datacenter.Name) + d.Set("cpu_count", result.CpuCount) + d.Set("disk_capacity", result.DiskCapacity) + d.Set("memory_capacity", result.MemoryCapacity) + d.Set("router_hostname", result.BackendRouter.Hostname) + return nil +} + +func resourceIBMComputeDedicatedHostUpdate(d *schema.ResourceData, meta interface{}) error { + + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetVirtualDedicatedHostService(sess.SetRetries(0)) + + id, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + result, err := service.Id(id).GetObject() + if err != nil { + return fmt.Errorf("Error retrieving dedicated host: %s", err) + } + + if d.HasChange("hostname") { + result.Name = sl.String(d.Get("hostname").(string)) + _, err = service.Id(id).EditObject(&result) + if err != nil { + return fmt.Errorf("Couldn't update dedicated host: %s", err) + } + + } + return resourceIBMComputeDedicatedHostRead(d, meta) +} + +func resourceIBMComputeDedicatedHostDelete(d *schema.ResourceData, meta interface{}) error { + service := services.GetVirtualDedicatedHostService(meta.(ClientSession).SoftLayerSession()) + + id, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + ok, err := service.Id(id).DeleteObject() + + if err != nil { + return fmt.Errorf("Error deleting dedicated host: %s", err) + } + + if !ok { + return fmt.Errorf( + "API reported it was unsuccessful in removing the dedicated host '%d'", id) + } + + d.SetId("") + return nil +} + +func resourceIBMComputeDedicatedHostExists(d *schema.ResourceData, meta interface{}) (bool, error) { + service := services.GetVirtualDedicatedHostService(meta.(ClientSession).SoftLayerSession()) + dedicatedID, err := strconv.Atoi(d.Id()) + if err != nil { + return false, fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + result, err := service.Id(dedicatedID).GetObject() + if err != nil { + if apiErr, ok := err.(sl.Error); ok { + if apiErr.StatusCode == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s", err) + } + + return result.Id != nil && *result.Id == dedicatedID, nil +} + +func findDedicatedHostByOrderID(d *datatypes.Hardware, r *schema.ResourceData, meta interface{}) (interface{}, error) { + hostname := *d.Hostname + + log.Printf("Waiting for dedicated host (%s) to have to be provisioned", hostname) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", "pending"}, + Target: []string{"provisioned"}, + Refresh: func() (interface{}, string, error) { + service := services.GetAccountService(meta.(ClientSession).SoftLayerSession()) + dedicatedHosts, err := service.Filter( + filter.Build( + filter.Path("dedicatedHosts.name").Eq(hostname), + ), + ).Mask("id,createDate").GetDedicatedHosts() + if err != nil { + return false, "retry", nil + } + + if len(dedicatedHosts) == 0 || dedicatedHosts[0].CreateDate == nil { + return datatypes.Hardware{}, "pending", nil + } + return dedicatedHosts[0], "provisioned", nil + + }, + Timeout: time.Duration(r.Get("wait_time_minutes").(int)) * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 1 * time.Minute, + } + + return stateConf.WaitForState() +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_compute_monitor.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_compute_monitor.go new file mode 100644 index 00000000000..4160347aa6a --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_compute_monitor.go @@ -0,0 +1,303 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "strconv" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/sl" +) + +func resourceIBMComputeMonitor() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMComputeMonitorCreate, + Read: resourceIBMComputeMonitorRead, + Update: resourceIBMComputeMonitorUpdate, + Delete: resourceIBMComputeMonitorDelete, + Exists: resourceIBMComputeMonitorExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + + "guest_id": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: "Guest ID", + }, + + "ip_address": { + Type: schema.TypeString, + Optional: true, + Description: "IP Address", + }, + + "query_type_id": { + Type: schema.TypeInt, + Required: true, + Description: "Query Type ID", + }, + + "response_action_id": { + Type: schema.TypeInt, + Required: true, + Description: "Response action ID", + }, + "wait_cycles": { + Type: schema.TypeInt, + Optional: true, + Description: "wait cycles count", + }, + "notified_users": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeInt}, + Set: func(v interface{}) int { + return v.(int) + }, + Description: "List of users notified", + }, + "tags": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: "List of tags", + }, + }, + } +} + +func resourceIBMComputeMonitorCreate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + virtualGuestService := services.GetVirtualGuestService(sess) + monitorService := services.GetNetworkMonitorVersion1QueryHostService(sess.SetRetries(0)) + + guestId := d.Get("guest_id").(int) + ipAddress := d.Get("ip_address").(string) + if ipAddress == "" { + virtualGuest, err := virtualGuestService.Id(guestId).GetObject() + if err != nil { + return fmt.Errorf("Error looking up virtual guest %d: %s", guestId, err) + } + + if virtualGuest.PrimaryIpAddress == nil { + return fmt.Errorf( + "No primary ip address found for virtual guest %d. Please specify it.", guestId) + } + + ipAddress = *virtualGuest.PrimaryIpAddress + } + + // Build up our creation options + opts := datatypes.Network_Monitor_Version1_Query_Host{ + GuestId: &guestId, + IpAddress: &ipAddress, + QueryTypeId: sl.Int(d.Get("query_type_id").(int)), + ResponseActionId: sl.Int(d.Get("response_action_id").(int)), + } + if wait_cycles, ok := d.GetOk("wait_cycles"); ok { + opts.WaitCycles = sl.Int(wait_cycles.(int)) + } + + // Create a monitor + res, err := monitorService.CreateObject(&opts) + if err != nil { + return fmt.Errorf("Error creating Basic Monitor : %s", err) + } + + d.SetId(strconv.Itoa(*res.Id)) + log.Printf("[INFO] Basic Monitor Id: %d", *res.Id) + + err = createNotifications(d, meta, guestId) + if err != nil { + return err + } + + return resourceIBMComputeMonitorRead(d, meta) +} + +func createNotifications(d *schema.ResourceData, meta interface{}, guestId int) error { + sess := meta.(ClientSession).SoftLayerSession() + virtualGuestService := services.GetVirtualGuestService(sess) + notificationService := services.GetUserCustomerNotificationVirtualGuestService(sess.SetRetries(0)) + + // Create a user notification + // This represents a link between a monitored guest instance and a user account + notificationLinks, err := virtualGuestService.Id(guestId).GetMonitoringUserNotification() + if err != nil { + return fmt.Errorf("Error looking up user notifications for virtual guest %d", guestId) + } + + userNotificationOpts := datatypes.User_Customer_Notification_Virtual_Guest{ + GuestId: &guestId, + } + notifiedUsers := d.Get("notified_users").(*schema.Set) + for _, userId := range notifiedUsers.List() { + userNotificationOpts.UserId = sl.Int(userId.(int)) + // Don't create the notification object if one already exists for the same user and vm + if !notificationExists(notificationLinks, userId.(int)) { + _, err := notificationService.CreateObject(&userNotificationOpts) + if err != nil { + return fmt.Errorf("Error creating notification for userID %d: %v", *userNotificationOpts.UserId, err) + } + } + } + + return nil +} + +func notificationExists(notificationLinks []datatypes.User_Customer_Notification_Virtual_Guest, userId int) bool { + for _, link := range notificationLinks { + if *link.UserId == userId { + return true + } + } + + return false +} + +func resourceIBMComputeMonitorRead(d *schema.ResourceData, meta interface{}) error { + + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkMonitorVersion1QueryHostService(sess) + virtualGuestService := services.GetVirtualGuestService(sess) + + basicMonitorId, _ := strconv.Atoi(d.Id()) + + basicMonitor, err := service.Id(basicMonitorId).GetObject() + if err != nil { + // If the monitor is somehow already destroyed, mark as + // succesfully gone + if apiErr, ok := err.(sl.Error); ok && apiErr.StatusCode == 404 { + d.SetId("") + return nil + } + + return fmt.Errorf("Error retrieving Basic Monitor : %s", err) + } + + guestId := *basicMonitor.GuestId + + d.Set("guest_id", guestId) + d.Set("ip_address", strings.TrimSpace(*basicMonitor.IpAddress)) + d.Set("query_type_id", basicMonitor.QueryTypeId) + d.Set("response_action_id", basicMonitor.ResponseActionId) + d.Set("wait_cycles", basicMonitor.WaitCycles) + + notificationLinks, err := virtualGuestService.Id(guestId).GetMonitoringUserNotification() + if err != nil { + return fmt.Errorf("Error looking up user notifications for virtual guest %d", guestId) + } + + notificationUserIds := schema.NewSet(func(v interface{}) int { return v.(int) }, make([]interface{}, 0, len(notificationLinks))) + for _, notificationLink := range notificationLinks { + notificationUserIds.Add(*notificationLink.UserId) + } + + // Only check that the notified user ids we know about are in SoftLayer. If not, set the incoming list + knownNotifiedUserIds := d.Get("notified_users").(*schema.Set) + if knownNotifiedUserIds != nil && knownNotifiedUserIds.Len() > 0 { + notifiedUserIds := notificationUserIds.List() + for _, knownNotifiedUserId := range knownNotifiedUserIds.List() { + match := false + for _, notifiedUserId := range notifiedUserIds { + if knownNotifiedUserId.(int) == notifiedUserId.(int) { + match = true + break + } + } + + if match == false { + d.Set("notified_users", notificationUserIds.List()) + break + } + } + } + + return nil +} + +func resourceIBMComputeMonitorUpdate(d *schema.ResourceData, meta interface{}) error { + + sess := meta.(ClientSession).SoftLayerSession() + serviceNoRetry := services.GetNetworkMonitorVersion1QueryHostService(sess.SetRetries(0)) + service := services.GetNetworkMonitorVersion1QueryHostService(sess) + + basicMonitorId, _ := strconv.Atoi(d.Id()) + guestId := d.Get("guest_id").(int) + + basicMonitor, err := service.Id(basicMonitorId).GetObject() + if err != nil { + return fmt.Errorf("Error retrieving Basic Monitor : %s", err) + } + if d.HasChange("query_type_id") { + basicMonitor.QueryTypeId = sl.Int(d.Get("query_type_id").(int)) + } + if d.HasChange("response_action_id") { + basicMonitor.ResponseActionId = sl.Int(d.Get("response_action_id").(int)) + } + if d.HasChange("wait_cycles") { + basicMonitor.WaitCycles = sl.Int(d.Get("wait_cycles").(int)) + } + + _, err = serviceNoRetry.Id(basicMonitorId).EditObject(&basicMonitor) + if err != nil { + return fmt.Errorf("Error editing Basic Monitor : %s", err) + } + + // Will only create notification objects for user/vm relationships that + // don't exist yet. + err = createNotifications(d, meta, guestId) + if err != nil { + return err + } + + return resourceIBMComputeMonitorRead(d, meta) +} + +func resourceIBMComputeMonitorDelete(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkMonitorVersion1QueryHostService(sess) + + // Delete the basic monitor + id, err := strconv.Atoi(d.Id()) + + log.Printf("[INFO] Deleting Basic Monitor : %d", id) + _, err = service.Id(id).DeleteObject() + if err != nil { + return fmt.Errorf("Error deleting Basic Monitor : %s", err) + } + + d.SetId("") + return nil +} + +func resourceIBMComputeMonitorExists(d *schema.ResourceData, meta interface{}) (bool, error) { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkMonitorVersion1QueryHostService(sess) + + basicMonitorId, err := strconv.Atoi(d.Id()) + if err != nil { + return false, fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + result, err := service.Id(basicMonitorId).GetObject() + if err != nil { + if apiErr, ok := err.(sl.Error); ok { + if apiErr.StatusCode == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error retrieving basic monitor info: %s", err) + } + return *result.Id == basicMonitorId, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_compute_placement_group.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_compute_placement_group.go new file mode 100644 index 00000000000..b8989905341 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_compute_placement_group.go @@ -0,0 +1,242 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "net/http" + "regexp" + "strconv" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/softlayer/softlayer-go/filter" + + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/sl" +) + +func resourceIBMComputePlacementGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMComputePlacementGroupCreate, + Read: resourceIBMComputePlacementGroupRead, + Update: resourceIBMComputePlacementGroupUpdate, + Delete: resourceIBMComputePlacementGroupDelete, + Exists: resourceIBMComputePlacementGroupExists, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "datacenter": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Dataceneter name", + }, + + "pod": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + return strings.TrimSpace(old) == strings.TrimSpace(new) + }, + Description: "Pod name", + }, + + "name": { + Type: schema.TypeString, + Required: true, + Description: "Name", + }, + + "rule": { + Type: schema.TypeString, + Optional: true, + Default: "SPREAD", + ForceNew: true, + ValidateFunc: validateAllowedStringValue([]string{"SPREAD"}), + Description: "Rule info", + }, + + "tags": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: "List of tags", + }, + }, + } +} + +func resourceIBMComputePlacementGroupCreate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + name := d.Get("name").(string) + datacenter := d.Get("datacenter").(string) + pod := d.Get("pod").(string) + podName := datacenter + "." + pod + PodService := services.GetNetworkPodService(sess) + podMask := `backendRouterId,name` + rule := d.Get("rule").(string) + + // 1.Getting the router ID + routerids, err := PodService.Filter(filter.Path("datacenterName").Eq(datacenter).Build()).Mask(podMask).GetAllObjects() + if err != nil { + return fmt.Errorf("Encountered problem trying to get the router ID: %s", err) + } + var routerid int + for _, iterate := range routerids { + if *iterate.Name == podName { + routerid = *iterate.BackendRouterId + } + } + + ruleService := services.GetVirtualPlacementGroupRuleService(sess) + ruleObject, err := ruleService.Id(1). + Mask("id,name"). + Filter(filter.Path("name").Eq(rule).Build()).GetObject() + if err != nil { + return fmt.Errorf("Encountered problem trying to get the placement group rule ID: %s", err) + } + + opts := datatypes.Virtual_PlacementGroup{ + Name: sl.String(name), + BackendRouterId: &routerid, + RuleId: ruleObject.Id, + } + + service := services.GetVirtualPlacementGroupService(sess) + + pgrp, err := service.CreateObject(&opts) + if err != nil { + return fmt.Errorf("Error creating Placement Group: %s", err) + } + + d.SetId(strconv.Itoa(*pgrp.Id)) + log.Printf("[INFO] Placement Group ID: %d", *pgrp.Id) + + return resourceIBMComputePlacementGroupRead(d, meta) +} + +func resourceIBMComputePlacementGroupRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetVirtualPlacementGroupService(sess) + + pgrpID, _ := strconv.Atoi(d.Id()) + + pgrp, err := service.Id(pgrpID).Mask("id,name,rule[name],backendRouter[hostname,datacenter[name]]").GetObject() + if err != nil { + if err, ok := err.(sl.Error); ok { + if err.StatusCode == http.StatusNotFound { + d.SetId("") + return nil + } + } + return fmt.Errorf("Error retrieving Placement Group: %s", err) + } + + d.Set("name", pgrp.Name) + d.Set("datacenter", pgrp.BackendRouter.Datacenter.Name) + pod := strings.SplitAfter(*pgrp.BackendRouter.Hostname, ".")[0] + r, _ := regexp.Compile("[0-9]{2}") + pod = "pod" + r.FindString(pod) + d.Set("pod", pod) + d.Set("rule", pgrp.Rule.Name) + + return nil +} + +func resourceIBMComputePlacementGroupUpdate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetVirtualPlacementGroupService(sess.SetRetries(0)) + + pgrpID, _ := strconv.Atoi(d.Id()) + + opts := datatypes.Virtual_PlacementGroup{} + + if d.HasChange("name") { + opts.Name = sl.String(d.Get("name").(string)) + _, err := service.Id(pgrpID).EditObject(&opts) + + if err != nil { + return fmt.Errorf("Error editing Placement Group: %s", err) + } + } + + return nil +} + +func resourceIBMComputePlacementGroupExists(d *schema.ResourceData, meta interface{}) (bool, error) { + + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetVirtualPlacementGroupService(sess) + + pgrpID, err := strconv.Atoi(d.Id()) + if err != nil { + return false, fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + result, err := service.Id(pgrpID).GetObject() + if err != nil { + if apiErr, ok := err.(sl.Error); ok { + if apiErr.StatusCode == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s", err) + } + return result.Id != nil && *result.Id == pgrpID, nil +} + +func resourceIBMComputePlacementGroupDelete(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetVirtualPlacementGroupService(sess) + + pgrpID, err := strconv.Atoi(d.Id()) + log.Printf("[INFO] Deleting Placement Group: %d", pgrpID) + + const ( + noVms = "There are no vms on the Placement Group" + vmsStillOnPlacementGroup = "VMs are still present on the Placement Group" + ) + + //Wait till all the VMs are disconnected before trying to delete + stateConf := &resource.StateChangeConf{ + Target: []string{noVms}, + Pending: []string{vmsStillOnPlacementGroup}, + Timeout: d.Timeout(schema.TimeoutDelete), + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, // Wait 30 secs before starting + Refresh: func() (interface{}, string, error) { + vms, err := service.Id(pgrpID).GetGuests() + if err != nil { + log.Printf("[ERROR] Received error while fetching virtual guests on placement group to see if placement group can be cancelled now: %#v", err) + return vms, "Error", err + } + if len(vms) != 0 { + return vms, vmsStillOnPlacementGroup, nil + } + return vms, noVms, nil + }, + } + _, err = stateConf.WaitForState() + if err != nil { + return err + } + + _, err = service.Id(pgrpID).DeleteObject() + if err != nil { + return fmt.Errorf("Error deleting Placement Group: %s", err) + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_compute_provisioning_hook.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_compute_provisioning_hook.go new file mode 100644 index 00000000000..fba929e1153 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_compute_provisioning_hook.go @@ -0,0 +1,152 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "net/http" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/sl" +) + +func resourceIBMComputeProvisioningHook() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMComputeProvisioningHookCreate, + Read: resourceIBMComputeProvisioningHookRead, + Update: resourceIBMComputeProvisioningHookUpdate, + Delete: resourceIBMComputeProvisioningHookDelete, + Exists: resourceIBMComputeProvisioningHookExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "Provision hook name", + }, + + "uri": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "URI of the hook", + }, + + "tags": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: "Tags associated with resource", + }, + }, + } +} + +func resourceIBMComputeProvisioningHookCreate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetProvisioningHookService(sess.SetRetries(0)) + + opts := datatypes.Provisioning_Hook{ + Name: sl.String(d.Get("name").(string)), + Uri: sl.String(d.Get("uri").(string)), + } + + hook, err := service.CreateObject(&opts) + if err != nil { + return fmt.Errorf("Error creating Provisioning Hook: %s", err) + } + + d.SetId(strconv.Itoa(*hook.Id)) + log.Printf("[INFO] Provisioning Hook ID: %d", *hook.Id) + + return resourceIBMComputeProvisioningHookRead(d, meta) +} + +func resourceIBMComputeProvisioningHookRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetProvisioningHookService(sess) + + hookId, _ := strconv.Atoi(d.Id()) + + hook, err := service.Id(hookId).GetObject() + if err != nil { + if err, ok := err.(sl.Error); ok { + if err.StatusCode == http.StatusNotFound { + d.SetId("") + return nil + } + } + return fmt.Errorf("Error retrieving Provisioning Hook: %s", err) + } + + d.Set("name", hook.Name) + d.Set("uri", hook.Uri) + + return nil +} + +func resourceIBMComputeProvisioningHookUpdate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetProvisioningHookService(sess.SetRetries(0)) + + hookId, _ := strconv.Atoi(d.Id()) + + opts := datatypes.Provisioning_Hook{} + + if d.HasChange("name") { + opts.Name = sl.String(d.Get("name").(string)) + } + + if d.HasChange("uri") { + opts.Uri = sl.String(d.Get("uri").(string)) + } + + opts.TypeId = sl.Int(1) + _, err := service.Id(hookId).EditObject(&opts) + + if err != nil { + return fmt.Errorf("Error editing Provisioning Hook: %s", err) + } + return nil +} + +func resourceIBMComputeProvisioningHookDelete(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetProvisioningHookService(sess) + + hookId, err := strconv.Atoi(d.Id()) + log.Printf("[INFO] Deleting Provisioning Hook: %d", hookId) + _, err = service.Id(hookId).DeleteObject() + if err != nil { + return fmt.Errorf("Error deleting Provisioning Hook: %s", err) + } + + return nil +} + +func resourceIBMComputeProvisioningHookExists(d *schema.ResourceData, meta interface{}) (bool, error) { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetProvisioningHookService(sess) + + hookId, err := strconv.Atoi(d.Id()) + if err != nil { + return false, fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + result, err := service.Id(hookId).GetObject() + if err != nil { + if apiErr, ok := err.(sl.Error); ok { + if apiErr.StatusCode == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s", err) + } + return result.Id != nil && *result.Id == hookId, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_compute_ssh_key.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_compute_ssh_key.go new file mode 100644 index 00000000000..f68924bbc82 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_compute_ssh_key.go @@ -0,0 +1,243 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "crypto/sha256" + "encoding/base64" + "fmt" + "log" + "strconv" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/filter" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/sl" +) + +func resourceIBMComputeSSHKey() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMComputeSSHKeyCreate, + Read: resourceIBMComputeSSHKeyRead, + Update: resourceIBMComputeSSHKeyUpdate, + Delete: resourceIBMComputeSSHKeyDelete, + Exists: resourceIBMComputeSSHKeyExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "label": { + Type: schema.TypeString, + Required: true, + Description: "SSH Key label", + }, + + "public_key": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + return strings.TrimSpace(old) == strings.TrimSpace(new) + }, + Description: "Plublic Key info", + }, + + "fingerprint": { + Type: schema.TypeString, + Computed: true, + Description: "SSH key fingerprint", + }, + + "notes": { + Type: schema.TypeString, + Optional: true, + Default: nil, + Description: "Additional notes", + }, + + "tags": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: "List of tags for the resource", + }, + }, + } +} + +func resourceIBMComputeSSHKeyCreate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetSecuritySshKeyService(sess) + + // First check if the key exists by fingerprint + // If so, set the Id (and fingerprint), but update notes and label (if any) + key := d.Get("public_key").(string) + label := d.Get("label").(string) + + fingerprint, err := computeSSHKeyFingerprint(key) + if err != nil { + return err + } + + keys, err := services.GetAccountService(sess). + Filter(filter.Path("sshKeys.fingerprint").Eq(fingerprint).Build()). + GetSshKeys() + if err == nil && len(keys) > 0 { + slKey := keys[0] + id := *slKey.Id + slKey.Id = nil + d.SetId(fmt.Sprintf("%d", id)) + d.Set("fingerprint", fingerprint) + editKey := false + + notes := d.Get("notes").(string) + if notes != "" && (slKey.Notes == nil || notes != *slKey.Notes) { + slKey.Notes = sl.String(notes) + editKey = true + } else if slKey.Notes != nil { + d.Set("notes", *slKey.Notes) + } + + if label != *slKey.Label { + slKey.Label = sl.String(label) + editKey = true + } + + if editKey { + _, err = service.Id(id).EditObject(&slKey) + return err + } + + return nil + } // End of "Import" + + // Build up our creation options + opts := datatypes.Security_Ssh_Key{ + Label: sl.String(label), + Key: sl.String(key), + } + + if notes, ok := d.GetOk("notes"); ok { + opts.Notes = sl.String(notes.(string)) + } + + res, err := service.CreateObject(&opts) + if err != nil { + return fmt.Errorf("Error creating SSH Key: %s", err) + } + + d.SetId(strconv.Itoa(*res.Id)) + log.Printf("[INFO] SSH Key: %d", *res.Id) + + return resourceIBMComputeSSHKeyRead(d, meta) +} + +func resourceIBMComputeSSHKeyRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetSecuritySshKeyService(sess) + + keyID, _ := strconv.Atoi(d.Id()) + key, err := service.Id(keyID).GetObject() + if err != nil { + // If the key is somehow already destroyed, mark as + // succesfully gone + if err, ok := err.(sl.Error); ok && err.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error retrieving SSH key: %s", err) + } + + d.Set("label", key.Label) + d.Set("public_key", key.Key) + d.Set("fingerprint", key.Fingerprint) + d.Set("notes", key.Notes) + return nil +} + +func resourceIBMComputeSSHKeyUpdate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetSecuritySshKeyService(sess) + + keyID, _ := strconv.Atoi(d.Id()) + + key, err := service.Id(keyID).GetObject() + if err != nil { + return fmt.Errorf("Error retrieving SSH key: %s", err) + } + + if d.HasChange("label") { + key.Label = sl.String(d.Get("label").(string)) + } + + if d.HasChange("notes") { + key.Notes = sl.String(d.Get("notes").(string)) + } + + _, err = service.Id(keyID).EditObject(&key) + if err != nil { + return fmt.Errorf("Error editing SSH key: %s", err) + } + return resourceIBMComputeSSHKeyRead(d, meta) +} + +func resourceIBMComputeSSHKeyDelete(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetSecuritySshKeyService(sess) + + id, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Error deleting SSH Key: %s", err) + } + + log.Printf("[INFO] Deleting SSH key: %d", id) + _, err = service.Id(id).DeleteObject() + if err != nil { + return fmt.Errorf("Error deleting SSH key: %s", err) + } + + d.SetId("") + return nil +} + +func resourceIBMComputeSSHKeyExists(d *schema.ResourceData, meta interface{}) (bool, error) { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetSecuritySshKeyService(sess) + + keyID, err := strconv.Atoi(d.Id()) + if err != nil { + return false, fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + result, err := service.Id(keyID).GetObject() + if err != nil { + if apiErr, ok := err.(sl.Error); ok { + if apiErr.StatusCode == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s", err) + } + return result.Id != nil && *result.Id == keyID, nil +} + +func computeSSHKeyFingerprint(key string) (fingerprint string, err error) { + parts := strings.Fields(key) + if len(parts) < 2 { + return "", fmt.Errorf("Invalid public key specified :%s\nPlease check the value of public_key", key) + } + k, err := base64.StdEncoding.DecodeString(parts[1]) + if err != nil { + return "", fmt.Errorf("Error decoding the public key: %s\nPlease check the value of public_key", err) + } + fp := sha256.Sum256([]byte(k)) + prints := make([]string, len(fp)) + for i, b := range fp { + prints[i] = fmt.Sprintf("%02x", b) + } + fingerprint = strings.Join(prints, ":") + return +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_compute_ssl_certificate.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_compute_ssl_certificate.go new file mode 100644 index 00000000000..babe12ec975 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_compute_ssl_certificate.go @@ -0,0 +1,224 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "strconv" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/sl" +) + +func resourceIBMComputeSSLCertificate() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMComputeSSLCertificateCreate, + Read: resourceIBMComputeSSLCertificateRead, + Update: resourceIBMComputeSSLCertificateUpdate, + Delete: resourceIBMComputeSSLCertificateDelete, + Exists: resourceIBMComputeSSLCertificateExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + + "certificate": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + StateFunc: normalizeCert, + Description: "SSL Certifcate", + }, + + "intermediate_certificate": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + StateFunc: normalizeCert, + Description: "Intermediate certificate value", + }, + + "private_key": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + Sensitive: true, + StateFunc: normalizeCert, + Description: "SSL Private Key", + }, + + "common_name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Common name", + }, + + "organization_name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Organization name", + }, + + "validity_begin": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Validity begins from", + }, + + "validity_days": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "Validity days", + }, + + "validity_end": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Validity ends before", + }, + + "key_size": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "SSL key size", + }, + + "create_date": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "certificate creation date", + }, + + "modify_date": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "certificate modificatiob date", + }, + + "tags": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: "Tags set for resource", + }, + }, + } +} + +func resourceIBMComputeSSLCertificateCreate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetSecurityCertificateService(sess.SetRetries(0)) + + template := datatypes.Security_Certificate{ + Certificate: sl.String(d.Get("certificate").(string)), + IntermediateCertificate: sl.String(d.Get("intermediate_certificate").(string)), + PrivateKey: sl.String(d.Get("private_key").(string)), + } + + log.Printf("[INFO] Creating Security Certificate") + + cert, err := service.CreateObject(&template) + + if err != nil { + return fmt.Errorf("Error creating Security Certificate: %s", err) + } + + d.SetId(fmt.Sprintf("%d", *cert.Id)) + + return resourceIBMComputeSSLCertificateRead(d, meta) +} + +func resourceIBMComputeSSLCertificateRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetSecurityCertificateService(sess) + + id, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + cert, err := service.Id(id).GetObject() + + if err != nil { + return fmt.Errorf("Unable to get Security Certificate: %s", err) + } + + d.SetId(fmt.Sprintf("%d", *cert.Id)) + d.Set("certificate", *cert.Certificate) + if cert.IntermediateCertificate != nil { + d.Set("intermediate_certificate", *cert.IntermediateCertificate) + } + if cert.PrivateKey != nil { + d.Set("private_key", *cert.PrivateKey) + } + d.Set("common_name", *cert.CommonName) + d.Set("organization_name", *cert.OrganizationName) + validityBegin := *cert.ValidityBegin + d.Set("validity_begin", validityBegin.String()) + d.Set("validity_days", *cert.ValidityDays) + validityEnd := *cert.ValidityEnd + d.Set("validity_end", validityEnd.String()) + d.Set("key_size", *cert.KeySize) + createDate := *cert.CreateDate + d.Set("create_date", createDate.String()) + modifyDate := *cert.ModifyDate + d.Set("modify_date", modifyDate.String()) + + return nil +} + +func resourceIBMComputeSSLCertificateUpdate(d *schema.ResourceData, meta interface{}) error { + //Only tags are updated and that too locally hence nothing to validate and update in terms of real API at this point + return nil +} + +func resourceIBMComputeSSLCertificateDelete(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetSecurityCertificateService(sess) + id, err := strconv.Atoi(d.Id()) + _, err = service.Id(id).DeleteObject() + if err != nil { + return fmt.Errorf("Error deleting Security Certificate %d: %s", id, err) + } + + return nil +} + +func resourceIBMComputeSSLCertificateExists(d *schema.ResourceData, meta interface{}) (bool, error) { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetSecurityCertificateService(sess) + + id, err := strconv.Atoi(d.Id()) + if err != nil { + return false, fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + cert, err := service.Id(id).GetObject() + if err != nil { + if apiErr, ok := err.(sl.Error); ok { + if apiErr.StatusCode == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s", err) + } + return cert.Id != nil && *cert.Id == id, nil +} + +func normalizeCert(cert interface{}) string { + if cert == nil || cert == (*string)(nil) { + return "" + } + + switch cert.(type) { + case string: + return strings.TrimSpace(cert.(string)) + default: + return "" + } +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_compute_user.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_compute_user.go new file mode 100644 index 00000000000..5a2a7ab4795 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_compute_user.go @@ -0,0 +1,542 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "crypto/sha256" + "encoding/hex" + "fmt" + "log" + "strconv" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +const userCustomerCancelStatus = 1021 + +func resourceIBMComputeUser() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMComputeUserCreate, + Read: resourceIBMComputeUserRead, + Update: resourceIBMComputeUserUpdate, + Delete: resourceIBMComputeUserDelete, + Exists: resourceIBMComputeUserExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "username": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "user name", + }, + "first_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "First name of the user", + }, + "last_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "Last name of the user", + }, + "email": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "email address of the user", + }, + "company_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "comapany name", + }, + "address1": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "Address info of the user", + }, + "address2": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Address info of the user", + }, + "city": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "City name", + }, + "state": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "Satate name", + }, + "country": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "Country name", + }, + "timezone": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "time zone info", + }, + "user_status": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "ACTIVE", + Description: "user status info", + }, + "password": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Sensitive: true, + StateFunc: func(v interface{}) string { + hash := sha256.Sum256([]byte(v.(string))) + return hex.EncodeToString(hash[:]) + }, + Description: "password for the user", + }, + "permissions": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: "set of persmissions assigned for the user", + }, + "has_api_key": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "API Key info of the user", + }, + "api_key": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + Sensitive: true, + Description: "API key for the user", + }, + "ibm_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "IBM ID of the user", + }, + "tags": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: "Tags set for the resources", + }, + }, + } +} + +// Create a SoftLayer_User_Customer_CustomerPermission_Permission object from the given string input +func makePermission(p string) datatypes.User_Customer_CustomerPermission_Permission { + return datatypes.User_Customer_CustomerPermission_Permission{ + KeyName: &p, + } +} + +// Convert a "set" of permission strings to a list of SoftLayer_User_Customer_CustomerPermission_Permissions +func getPermissions(d *schema.ResourceData) []datatypes.User_Customer_CustomerPermission_Permission { + permissionsSet := d.Get("permissions").(*schema.Set) + + if permissionsSet.Len() == 0 { + return nil + } + + permissions := make([]datatypes.User_Customer_CustomerPermission_Permission, 0, permissionsSet.Len()) + for _, elem := range permissionsSet.List() { + permission := makePermission(elem.(string)) + + permissions = append(permissions, permission) + } + return permissions +} + +func resourceIBMComputeUserCreate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetUserCustomerService(sess) + serviceNoRetry := services.GetUserCustomerService(sess.SetRetries(0)) + + timezoneID, err := getTimezoneIDByName(sess, d.Get("timezone").(string)) + if err != nil { + return err + } + + userStatusID, err := getUserStatusIDByName(sess, d.Get("user_status").(string)) + if err != nil { + return err + } + + // Build up our creation options + opts := datatypes.User_Customer{ + FirstName: sl.String(d.Get("first_name").(string)), + LastName: sl.String(d.Get("last_name").(string)), + Email: sl.String(d.Get("email").(string)), + CompanyName: sl.String(d.Get("company_name").(string)), + Address1: sl.String(d.Get("address1").(string)), + City: sl.String(d.Get("city").(string)), + State: sl.String(d.Get("state").(string)), + Country: sl.String(d.Get("country").(string)), + TimezoneId: &timezoneID, + UserStatusId: &userStatusID, + } + + if address2, ok := d.GetOk("address2"); ok { + opts.Address2 = sl.String(address2.(string)) + } + + if username, ok := d.GetOk("username"); ok { + opts.Username = sl.String(username.(string)) + } + + pass := sl.String(d.Get("password").(string)) + if *pass == "" { + pass = nil + } + + res, err := serviceNoRetry.CreateObject(&opts, pass, nil) + + if err != nil { + return fmt.Errorf("Error creating IBM Cloud User: %s", err) + } + + d.SetId(strconv.Itoa(*res.Id)) + log.Printf("[INFO] IBM Cloud User: %d", *res.Id) + + permissions := getPermissions(d) + + defaultPortalPermissions := []datatypes.User_Customer_CustomerPermission_Permission{ + {KeyName: sl.String("ACCESS_ALL_GUEST")}, + {KeyName: sl.String("ACCESS_ALL_HARDWARE")}, + } + + log.Printf("Replacing default portal permissions assigned by IBM Cloud with those specified in config") + + // Set the instance ID for the service to act on + service = service.Id(*res.Id) + + _, err = service.RemoveBulkPortalPermission(defaultPortalPermissions, sl.Bool(true)) + if err != nil { + return fmt.Errorf("Error removing default portal permissions for IBM Cloud User: %s", err) + } + + _, err = service.AddBulkPortalPermission(permissions) + if err != nil { + return fmt.Errorf("Error setting portal permissions for IBM Cloud User: %s", err) + } + + create_api_key_flag := d.Get("has_api_key").(bool) + if create_api_key_flag { + // We have to create the API key only if the flag is true. If 'false' we do not + // take the delete action on the API key, as this is the create new user method, + // and not the edit method. + _, err = service.AddApiAuthenticationKey() + if err != nil { + return fmt.Errorf("Error creating API key: %s", err) + } + } + + return resourceIBMComputeUserRead(d, meta) +} + +func resourceIBMComputeUserRead(d *schema.ResourceData, meta interface{}) error { + service := services.GetUserCustomerService(meta.(ClientSession).SoftLayerSession()) + userID, _ := strconv.Atoi(d.Id()) + + mask := strings.Join([]string{ + "id", + "username", + "email", + "firstName", + "lastName", + "companyName", + "address1", + "address2", + "city", + "state", + "country", + "timezone.shortName", + "userStatus.keyName", + "permissions.keyName", + "apiAuthenticationKeys.authenticationKey", + "openIdConnectUserName", + }, ";") + + sluserObj, err := service.Id(userID).Mask(mask).GetObject() + if err != nil { + // If the key is somehow already destroyed, mark as + // successfully gone + if strings.Contains(err.Error(), "404 Not Found") { + d.SetId("") + return nil + } + + return fmt.Errorf("Error retrieving IBM Cloud User: %s", err) + } + + d.Set("username", sluserObj.Username) + d.Set("email", sluserObj.Email) + d.Set("first_name", sluserObj.FirstName) + d.Set("last_name", sluserObj.LastName) + d.Set("company_name", sluserObj.CompanyName) + d.Set("address1", sluserObj.Address1) + d.Set("address2", sluserObj.Address2) + d.Set("city", sluserObj.City) + d.Set("state", sluserObj.State) + d.Set("country", sluserObj.Country) + d.Set("timezone", sluserObj.Timezone.ShortName) + d.Set("user_status", sluserObj.UserStatus.KeyName) + + permissions := make([]string, 0, len(sluserObj.Permissions)) + for _, elem := range sluserObj.Permissions { + permissions = append(permissions, *elem.KeyName) + } + d.Set("permissions", permissions) + + // If present, extract the api key from the SoftLayer response and set the field in the resource + if len(sluserObj.ApiAuthenticationKeys) > 0 { + d.Set("api_key", sluserObj.ApiAuthenticationKeys[0].AuthenticationKey) // as its a computed field + d.Set("has_api_key", true) + } else { + d.Set("api_key", "") + d.Set("has_api_key", false) + } + + if sluserObj.OpenIdConnectUserName != nil { + d.Set("ibm_id", sluserObj.OpenIdConnectUserName) + } + + return nil +} + +func resourceIBMComputeUserUpdate(d *schema.ResourceData, meta interface{}) error { + + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetUserCustomerService(sess) + serviceNoRetry := services.GetUserCustomerService(sess.SetRetries(0)) + + sluid, _ := strconv.Atoi(d.Id()) + + mask := strings.Join([]string{ + "id", + "username", + "email", + "firstName", + "lastName", + "companyName", + "address1", + "address2", + "city", + "state", + "country", + "timezone.shortName", + "userStatus.keyName", + "permissions.keyName", + "apiAuthenticationKeys.authenticationKey", + "apiAuthenticationKeys.id", + }, ";") + + service = service.Id(sluid) + userObj, err := service.Mask(mask).GetObject() + + // Some fields cannot be updated such as username. Computed fields also cannot be updated + // by explicitly providing a value. So only update the fields that are editable. + // Password changes can also not be fully automated, and are not supported + if d.HasChange("first_name") { + userObj.FirstName = sl.String(d.Get("first_name").(string)) + } + if d.HasChange("last_name") { + userObj.LastName = sl.String(d.Get("last_name").(string)) + } + if d.HasChange("email") { + userObj.Email = sl.String(d.Get("email").(string)) + } + if d.HasChange("company_name") { + userObj.CompanyName = sl.String(d.Get("company_name").(string)) + } + if d.HasChange("address1") { + userObj.Address1 = sl.String(d.Get("address1").(string)) + } + if d.HasChange("address2") { + userObj.Address2 = sl.String(d.Get("address2").(string)) + } + if d.HasChange("city") { + userObj.City = sl.String(d.Get("city").(string)) + } + if d.HasChange("state") { + userObj.State = sl.String(d.Get("state").(string)) + } + if d.HasChange("country") { + userObj.Country = sl.String(d.Get("country").(string)) + } + if d.HasChange("timezone") { + tzID, err := getTimezoneIDByName(sess, d.Get("timezone").(string)) + if err != nil { + return err + } + userObj.TimezoneId = &tzID + } + if d.HasChange("user_status") { + userStatusID, err := getUserStatusIDByName(sess, d.Get("user_status").(string)) + if err != nil { + return err + } + userObj.UserStatusId = &userStatusID + } + + _, err = serviceNoRetry.Id(sluid).EditObject(&userObj) + if err != nil { + return fmt.Errorf("Error received while editing ibm_compute_user: %s", err) + } + + if d.HasChange("permissions") { + old, new := d.GetChange("permissions") + + // 1. Remove old permissions no longer appearing in the new set + // 2. Add new permissions not already granted + + remove := old.(*schema.Set).Difference(new.(*schema.Set)).List() + add := new.(*schema.Set).Difference(old.(*schema.Set)).List() + + oldPermissions := make([]datatypes.User_Customer_CustomerPermission_Permission, 0, len(remove)) + newPermissions := make([]datatypes.User_Customer_CustomerPermission_Permission, 0, len(add)) + + for _, elem := range remove { + oldPermissions = append(oldPermissions, makePermission(elem.(string))) + } + + for _, elem := range add { + newPermissions = append(newPermissions, makePermission(elem.(string))) + } + + // 'remove' all old permissions + _, err = service.RemoveBulkPortalPermission(oldPermissions, sl.Bool(true)) + if err != nil { + return fmt.Errorf("Error received while removing old permissions from ibm_compute_user: %s", err) + } + + // 'add' new permission set + _, err = service.AddBulkPortalPermission(newPermissions) + if err != nil { + return fmt.Errorf("Error received while assigning new permissions to ibm_compute_user: %s", err) + } + } + + if d.HasChange("has_api_key") { + // if true, then it means create an api key if none exists. Its a no-op if an api key already exists. + // else false means, delete the api key if one exists. Its a no-op if no api key exists. + api_key_flag := d.Get("has_api_key").(bool) + + // Get the current keys. + keys := userObj.ApiAuthenticationKeys + + // Create a key if flag is true, and a key does not already exist. + if api_key_flag { + if len(keys) == 0 { // means key does not exist, so create one. + key, err := service.AddApiAuthenticationKey() + if err != nil { + return fmt.Errorf("Error creating API key while editing ibm_compute_user resource: %s", err) + } + + d.Set("api_key", key) + } else { + d.Set("api_key", keys[0].AuthenticationKey) // as api_key is a computed field + } + } else { + // If false, then delete the key if there was one. + if len(keys) > 0 { + success, err := service.RemoveApiAuthenticationKey(keys[0].Id) + if err != nil { + return fmt.Errorf("Error deleting API key while editing ibm_compute_user resource: %s", err) + } + + if !success { + return fmt.Errorf( + "The API reported removal of the api key was not successful for %s", + d.Get("email").(string), + ) + } + } + d.Set("api_key", nil) + } + } + return nil +} + +func resourceIBMComputeUserDelete(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetUserCustomerService(sess) + + id, _ := strconv.Atoi(d.Id()) + + user := datatypes.User_Customer{ + UserStatusId: sl.Int(userCustomerCancelStatus), + } + + log.Printf("[INFO] Deleting IBM Cloud user: %d", id) + _, err := service.Id(id).EditObject(&user) + if err != nil { + return fmt.Errorf("Error deleting IBM Cloud user: %s", err) + } + + d.SetId("") + return nil +} + +func resourceIBMComputeUserExists(d *schema.ResourceData, meta interface{}) (bool, error) { + service := services.GetUserCustomerService(meta.(ClientSession).SoftLayerSession()) + + id, err := strconv.Atoi(d.Id()) + + result, err := service.Id(id).GetObject() + + return result.Id != nil && *result.Id == id && err == nil, nil +} + +func getTimezoneIDByName(sess *session.Session, shortName string) (int, error) { + zones, err := services.GetLocaleTimezoneService(sess). + Mask("id,shortName"). + GetAllObjects() + + if err != nil { + return -1, err + } + + for _, zone := range zones { + if *zone.ShortName == shortName { + return *zone.Id, nil + } + } + + return -1, fmt.Errorf("Timezone %s could not be found", shortName) + +} + +func getUserStatusIDByName(sess *session.Session, name string) (int, error) { + statuses, err := services.GetUserCustomerStatusService(sess). + Mask("id,keyName"). + GetAllObjects() + + if err != nil { + return -1, err + } + + for _, status := range statuses { + if *status.KeyName == name { + return *status.Id, nil + } + } + + return -1, fmt.Errorf("User status %s could not be found", name) + +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_compute_vm_instance.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_compute_vm_instance.go new file mode 100644 index 00000000000..df39c82bdb1 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_compute_vm_instance.go @@ -0,0 +1,2084 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "bytes" + "crypto/rand" + "encoding/hex" + "errors" + "fmt" + "log" + "math" + "strconv" + "strings" + "time" + + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/internal/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/filter" + "github.com/softlayer/softlayer-go/helpers/product" + "github.com/softlayer/softlayer-go/helpers/virtual" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +type storageIds []int + +func (s storageIds) Storages(meta interface{}) ([]datatypes.Network_Storage, error) { + storageService := services.GetNetworkStorageService(meta.(ClientSession).SoftLayerSession()) + storages := make([]datatypes.Network_Storage, len(s)) + + for i, id := range s { + var err error + storages[i], err = storageService.Id(id).GetObject() + if err != nil { + return nil, err + } + } + return storages, nil +} + +const ( + staticIPRouted = "STATIC_IP_ROUTED" + + upgradeTransaction = "UPGRADE" + pendingUpgrade = "pending_upgrade" + inProgressUpgrade = "upgrade_started" + + activeTransaction = "active" + idleTransaction = "idle" + + virtualGuestAvailable = "available" + virtualGuestProvisioning = "provisioning" + + networkStorageMassAccessControlModificationException = "SoftLayer_Exception_Network_Storage_Group_MassAccessControlModification" + retryDelayForModifyingStorageAccess = 10 * time.Second +) + +func resourceIBMComputeVmInstance() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMComputeVmInstanceCreate, + Read: resourceIBMComputeVmInstanceRead, + Update: resourceIBMComputeVmInstanceUpdate, + Delete: resourceIBMComputeVmInstanceDelete, + Exists: resourceIBMComputeVmInstanceExists, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(90 * time.Minute), + Delete: schema.DefaultTimeout(90 * time.Minute), + Update: schema.DefaultTimeout(90 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "hostname": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"bulk_vms"}, + DiffSuppressFunc: func(k, o, n string, d *schema.ResourceData) bool { + // FIXME: Work around another bug in terraform. + // When a default function is used with an optional property, + // terraform will always execute it on apply, even when the property + // already has a value in the state for it. This causes a false diff. + // Making the property Computed:true does not make a difference. + if strings.HasPrefix(o, "terraformed-") && strings.HasPrefix(n, "terraformed-") { + return true + } + return o == n + }, + }, + + "domain": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"bulk_vms"}, + }, + + "bulk_vms": { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + MinItems: 2, + ConflictsWith: []string{"hostname", "domain"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "hostname": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: func(k, o, n string, d *schema.ResourceData) bool { + // FIXME: Work around another bug in terraform. + // When a default function is used with an optional property, + // terraform will always execute it on apply, even when the property + // already has a value in the state for it. This causes a false diff. + // Making the property Computed:true does not make a difference. + if strings.HasPrefix(o, "terraformed-") && strings.HasPrefix(n, "terraformed-") { + return true + } + return o == n + }, + }, + + "domain": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + }, + Set: resourceIBMBulkVMHostHash, + }, + + "os_reference_code": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: func(k, o, n string, d *schema.ResourceData) bool { + if strings.HasSuffix(n, "_LATEST") { + t := strings.Trim(n, "_LATEST") + if strings.Contains(o, t) { + return true + } + } + return o == n + }, + ConflictsWith: []string{"image_id"}, + }, + + "hourly_billing": { + Type: schema.TypeBool, + Optional: true, + Default: true, + ForceNew: true, + }, + + "private_network_only": { + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + }, + + "datacenter": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + ConflictsWith: []string{"datacenter_choice"}, + }, + + "datacenter_choice": { + Type: schema.TypeList, + Description: "The user provided datacenter options", + Optional: true, + ConflictsWith: []string{"datacenter", "public_vlan_id", "private_vlan_id", "placement_group_name", "placement_group_id"}, + Elem: &schema.Schema{Type: schema.TypeMap}, + }, + + "placement_group_name": { + Type: schema.TypeString, + Description: "The placement group name", + Optional: true, + ForceNew: true, + ConflictsWith: []string{"datacenter_choice", "dedicated_acct_host_only", "dedicated_host_name", "dedicated_host_id", "placement_group_id"}, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + _, ok := d.GetOk("placement_group_id") + return new == "" && ok + }, + }, + + "placement_group_id": { + Type: schema.TypeInt, + Description: "The placement group id", + Optional: true, + ForceNew: true, + ConflictsWith: []string{"datacenter_choice", "dedicated_acct_host_only", "dedicated_host_name", "dedicated_host_id", "placement_group_name"}, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + _, ok := d.GetOk("placement_group_name") + return new == "0" && ok + }, + }, + + "flavor_key_name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Flavor key name used to provision vm.", + ConflictsWith: []string{"cores", "memory"}, + }, + + "cores": { + + Type: schema.TypeInt, + Optional: true, + Computed: true, + ConflictsWith: []string{"flavor_key_name"}, + }, + + "memory": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + memoryInMB := float64(v.(int)) + + // Validate memory to match gigs format + remaining := math.Mod(memoryInMB, 1024) + if remaining > 0 { + suggested := math.Ceil(memoryInMB/1024) * 1024 + errors = append(errors, fmt.Errorf( + "Invalid 'memory' value %d megabytes, must be a multiple of 1024 (e.g. use %d)", int(memoryInMB), int(suggested))) + } + + return + }, + ConflictsWith: []string{"flavor_key_name"}, + }, + + "dedicated_acct_host_only": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"dedicated_host_name", "dedicated_host_id", "placement_group_id", "placement_group_name"}, + }, + + "dedicated_host_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"dedicated_acct_host_only", "dedicated_host_id", "placement_group_name", "placement_group_id"}, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + _, ok := d.GetOk("dedicated_host_id") + return new == "" && ok + }, + }, + + "dedicated_host_id": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"dedicated_acct_host_only", "dedicated_host_name", "placement_group_name", "placement_group_id"}, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + _, ok := d.GetOk("dedicated_host_name") + return new == "0" && ok + }, + }, + + "transient": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"dedicated_acct_host_only", "dedicated_host_name", "dedicated_host_id", "cores", "memory", "public_bandwidth_limited", "public_bandwidth_unlimited"}, + }, + + "public_vlan_id": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Computed: true, + ConflictsWith: []string{"datacenter_choice"}, + }, + "public_interface_id": { + Type: schema.TypeInt, + Computed: true, + }, + "public_subnet": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "public_subnet_id": { + Type: schema.TypeInt, + Computed: true, + }, + + "public_security_group_ids": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeInt}, + Set: func(v interface{}) int { + return v.(int) + }, + ForceNew: true, + MaxItems: 5, + }, + + "private_vlan_id": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Computed: true, + ConflictsWith: []string{"datacenter_choice"}, + }, + "private_interface_id": { + Type: schema.TypeInt, + Computed: true, + }, + "private_subnet": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "private_subnet_id": { + Type: schema.TypeInt, + Computed: true, + }, + + "private_security_group_ids": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeInt}, + Set: func(v interface{}) int { + return v.(int) + }, + ForceNew: true, + MaxItems: 5, + }, + + "disks": { + Type: schema.TypeList, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeInt}, + }, + + "network_speed": { + Type: schema.TypeInt, + Optional: true, + Default: 100, + }, + + "ipv4_address": { + Type: schema.TypeString, + Computed: true, + }, + + "ipv4_address_private": { + Type: schema.TypeString, + Computed: true, + }, + + "ip_address_id": { + Type: schema.TypeInt, + Computed: true, + }, + + "ip_address_id_private": { + Type: schema.TypeInt, + Computed: true, + }, + + "ipv6_enabled": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Default: false, + }, + + "ipv6_static_enabled": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Default: false, + }, + + "ipv6_address": { + Type: schema.TypeString, + Computed: true, + }, + + "ipv6_address_id": { + Type: schema.TypeInt, + Computed: true, + }, + + // SoftLayer does not support public_ipv6_subnet configuration in vm creation. So, public_ipv6_subnet + // is defined as a computed parameter. + "public_ipv6_subnet": { + Type: schema.TypeString, + Computed: true, + }, + + "public_ipv6_subnet_id": { + Type: schema.TypeInt, + Computed: true, + }, + + "secondary_ip_count": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + ValidateFunc: validateSecondaryIPCount, + DiffSuppressFunc: func(k, o, n string, d *schema.ResourceData) bool { + // secondary_ip_count is only used when a virtual_guest resource is created. + if d.State() == nil { + return false + } + return true + }, + }, + + "secondary_ip_addresses": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "ssh_key_ids": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeInt}, + Set: func(v interface{}) int { + return v.(int) + }, + }, + + "file_storage_ids": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeInt}, + Set: func(v interface{}) int { + return v.(int) + }, + }, + + "block_storage_ids": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeInt}, + Set: func(v interface{}) int { + return v.(int) + }, + }, + "user_metadata": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "notes": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateNotes, + }, + + "local_disk": { + Type: schema.TypeBool, + Optional: true, + Default: true, + ForceNew: true, + }, + + "post_install_script_uri": { + Type: schema.TypeString, + Optional: true, + Default: nil, + ForceNew: true, + }, + + "image_id": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"os_reference_code"}, + }, + + "tags": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "wait_time_minutes": { + Type: schema.TypeInt, + Optional: true, + Deprecated: "This field is deprecated. Use timeouts block instead", + Default: 90, + }, + // Monthly only + // Limited BandWidth + "public_bandwidth_limited": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + DiffSuppressFunc: applyOnce, + ConflictsWith: []string{"private_network_only", "public_bandwidth_unlimited"}, + ValidateFunc: validatePublicBandwidth, + }, + + // Monthly only + // Unlimited BandWidth + "public_bandwidth_unlimited": { + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + DiffSuppressFunc: applyOnce, + ConflictsWith: []string{"private_network_only", "public_bandwidth_limited"}, + }, + + "evault": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + DiffSuppressFunc: applyOnce, + }, + + // Quote based provisioning only + "quote_id": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: "Quote ID for Quote based provisioning", + }, + + ResourceControllerURL: { + Type: schema.TypeString, + Computed: true, + Description: "The URL of the IBM Cloud dashboard that can be used to explore and view details about this instance", + }, + ResourceName: { + Type: schema.TypeString, + Computed: true, + Description: "The name of the resource", + }, + ResourceStatus: { + Type: schema.TypeString, + Computed: true, + Description: "The status of the resource", + }, + }, + } +} + +type vmMember map[string]interface{} + +func getSubnetID(subnet string, meta interface{}) (int, error) { + service := services.GetAccountService(meta.(ClientSession).SoftLayerSession()) + + subnetInfo := strings.Split(subnet, "/") + if len(subnetInfo) != 2 { + return 0, fmt.Errorf( + "Unable to parse the provided subnet: %s", subnet) + } + + networkIdentifier := subnetInfo[0] + cidr := subnetInfo[1] + + subnets, err := service. + Mask("id"). + Filter( + filter.Build( + filter.Path("subnets.cidr").Eq(cidr), + filter.Path("subnets.networkIdentifier").Eq(networkIdentifier), + ), + ). + GetSubnets() + + if err != nil { + return 0, fmt.Errorf("Error looking up Subnet: %s", err) + } + + if len(subnets) < 1 { + return 0, fmt.Errorf( + "Unable to locate a subnet matching the provided subnet: %s", subnet) + } + + return *subnets[0].Id, nil +} + +func resourceIBMBulkVMHostHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", + m["hostname"].(string))) + + return hashcode.String(buf.String()) +} + +func getNameForBlockDevice(i int) string { + // skip 1, which is reserved for the swap disk. + // so we get 0, 2, 3, 4, 5 ... + if i == 0 { + return "0" + } + + return strconv.Itoa(i + 1) +} + +func getNameForBlockDeviceWithFlavor(i int) string { + // skip 0, which is taken from flavor. + // skip 1, which is reserved for the swap disk. + // so we get 2, 3, 4, 5 ... + + return strconv.Itoa(i + 2) +} + +func getBlockDevices(d *schema.ResourceData) []datatypes.Virtual_Guest_Block_Device { + numBlocks := d.Get("disks.#").(int) + if numBlocks == 0 { + return nil + } + blocks := make([]datatypes.Virtual_Guest_Block_Device, 0, numBlocks) + for i := 0; i < numBlocks; i++ { + var name string + blockRef := fmt.Sprintf("disks.%d", i) + if _, ok := d.GetOk("flavor_key_name"); ok { + name = getNameForBlockDeviceWithFlavor(i) + } else { + name = getNameForBlockDevice(i) + } + capacity := d.Get(blockRef).(int) + block := datatypes.Virtual_Guest_Block_Device{ + Device: &name, + DiskImage: &datatypes.Virtual_Disk_Image{ + Capacity: &capacity, + }, + } + blocks = append(blocks, block) + } + + return blocks +} + +func expandSecurityGroupBindings(securityGroupsList []interface{}) ([]datatypes.Virtual_Network_SecurityGroup_NetworkComponentBinding, error) { + if len(securityGroupsList) == 0 { + return nil, nil + } + sgBindings := make([]datatypes.Virtual_Network_SecurityGroup_NetworkComponentBinding, + len(securityGroupsList)) + for i, v := range securityGroupsList { + sgid := v.(int) + sgBindings[i] = datatypes.Virtual_Network_SecurityGroup_NetworkComponentBinding{ + SecurityGroupId: sl.Int(sgid), + } + } + return sgBindings, nil +} + +func getVirtualGuestTemplateFromResourceData(d *schema.ResourceData, meta interface{}, datacenter string, publicVlanID, privateVlanID, quote_id int) ([]datatypes.Virtual_Guest, error) { + + dc := datatypes.Location{ + Name: sl.String(datacenter), + } + // FIXME: Work around bug in terraform (?) + // For properties that have a default value set and a diff suppress function, + // it is not using the default value. + networkSpeed := d.Get("network_speed").(int) + if networkSpeed == 0 { + networkSpeed = resourceIBMComputeVmInstance().Schema["network_speed"].Default.(int) + } + + networkComponent := datatypes.Virtual_Guest_Network_Component{ + MaxSpeed: &networkSpeed, + } + members := []vmMember{} + bulkVMs := d.Get("bulk_vms").(*schema.Set).List() + if len(bulkVMs) > 0 { + for _, vm := range bulkVMs { + members = append(members, vm.(map[string]interface{})) + } + } else { + member := vmMember{ + "hostname": d.Get("hostname").(string), + "domain": d.Get("domain").(string), + } + members = append(members, member) + } + + vms := make([]datatypes.Virtual_Guest, 0) + for _, member := range members { + opts := datatypes.Virtual_Guest{ + Hostname: sl.String(member["hostname"].(string)), + Domain: sl.String(member["domain"].(string)), + HourlyBillingFlag: sl.Bool(d.Get("hourly_billing").(bool)), + PrivateNetworkOnlyFlag: sl.Bool(d.Get("private_network_only").(bool)), + Datacenter: &dc, + NetworkComponents: []datatypes.Virtual_Guest_Network_Component{networkComponent}, + BlockDevices: getBlockDevices(d), + LocalDiskFlag: sl.Bool(d.Get("local_disk").(bool)), + PostInstallScriptUri: sl.String(d.Get("post_install_script_uri").(string)), + } + + if placementGroupID, ok := d.GetOk("placement_group_id"); ok { + grpID := placementGroupID.(int) + service := services.GetVirtualPlacementGroupService(meta.(ClientSession).SoftLayerSession()) + grp, err := service.Id(grpID).Mask("id,name,backendRouter[datacenter[name]]").GetObject() + if err != nil { + return vms, fmt.Errorf("Error looking up placement group: %s", err) + } + + opts.PlacementGroupId = sl.Int(*grp.Id) + + } else if placementGroupName, ok := d.GetOk("placement_group_name"); ok { + grpName := placementGroupName.(string) + service := services.GetAccountService(meta.(ClientSession).SoftLayerSession()) + groups, err := service. + Mask("id,name,backendRouter[hostname,datacenter[name]]"). + Filter(filter.Path("placementGroup.name").Eq(grpName).Build()). + GetPlacementGroups() + + if err != nil { + return vms, fmt.Errorf("Error looking up placement group '%s': %s", grpName, err) + } + grps := []datatypes.Virtual_PlacementGroup{} + for _, g := range groups { + if grpName == *g.Name { + grps = append(grps, g) + + } + } + if len(grps) == 0 { + return vms, fmt.Errorf("Error looking up placement group '%s'", grpName) + } + grp := grps[0] + + opts.PlacementGroupId = sl.Int(*grp.Id) + } + + if startCPUs, ok := d.GetOk("cores"); ok { + opts.StartCpus = sl.Int(startCPUs.(int)) + } + if maxMemory, ok := d.GetOk("memory"); ok { + opts.MaxMemory = sl.Int(maxMemory.(int)) + } + + if flavor, ok := d.GetOk("flavor_key_name"); ok { + flavorComponenet := datatypes.Virtual_Guest_SupplementalCreateObjectOptions{ + FlavorKeyName: sl.String(flavor.(string)), + } + opts.SupplementalCreateObjectOptions = &flavorComponenet + } + + if dedicatedAcctHostOnly, ok := d.GetOk("dedicated_acct_host_only"); ok { + opts.DedicatedAccountHostOnlyFlag = sl.Bool(dedicatedAcctHostOnly.(bool)) + } else if dedicatedHostID, ok := d.GetOk("dedicated_host_id"); ok { + opts.DedicatedHost = &datatypes.Virtual_DedicatedHost{ + Id: sl.Int(dedicatedHostID.(int)), + } + } else if dedicatedHostName, ok := d.GetOk("dedicated_host_name"); ok { + hostName := dedicatedHostName.(string) + service := services.GetAccountService(meta.(ClientSession).SoftLayerSession()) + hosts, err := service. + Mask("id"). + Filter(filter.Path("dedicatedHosts.name").Eq(hostName).Build()). + GetDedicatedHosts() + + if err != nil { + return vms, fmt.Errorf("Error looking up dedicated host '%s': %s", hostName, err) + } else if len(hosts) == 0 { + return vms, fmt.Errorf("Error looking up dedicated host '%s'", hostName) + } + + opts.DedicatedHost = &hosts[0] + } + + if transientFlag, ok := d.GetOk("transient"); ok { + if !*opts.HourlyBillingFlag || *opts.LocalDiskFlag { + return vms, fmt.Errorf("Unable to provision a transient instance with a hourly_billing false or local_disk true") + } + opts.TransientGuestFlag = sl.Bool(transientFlag.(bool)) + } + + if quote_id == 0 { + + if imgID, ok := d.GetOk("image_id"); ok { + imageID := imgID.(int) + service := services. + GetVirtualGuestBlockDeviceTemplateGroupService(meta.(ClientSession).SoftLayerSession()) + + image, err := service. + Mask("id,globalIdentifier").Id(imageID). + GetObject() + if err != nil { + return vms, fmt.Errorf("Error looking up image %d: %s", imageID, err) + } else if image.GlobalIdentifier == nil { + return vms, fmt.Errorf( + "Image template %d does not have a global identifier", imageID) + } + + opts.BlockDeviceTemplateGroup = &datatypes.Virtual_Guest_Block_Device_Template_Group{ + GlobalIdentifier: image.GlobalIdentifier, + } + } + + } + + if operatingSystemReferenceCode, ok := d.GetOk("os_reference_code"); ok { + opts.OperatingSystemReferenceCode = sl.String(operatingSystemReferenceCode.(string)) + } + + publicSubnet := d.Get("public_subnet").(string) + privateSubnet := d.Get("private_subnet").(string) + + primaryNetworkComponent := datatypes.Virtual_Guest_Network_Component{ + NetworkVlan: &datatypes.Network_Vlan{}, + } + + usePrimaryNetworkComponent := false + + if publicVlanID > 0 { + primaryNetworkComponent.NetworkVlan.Id = &publicVlanID + usePrimaryNetworkComponent = true + } + + // Apply public subnet if provided + if publicSubnet != "" { + primarySubnetID, err := getSubnetID(publicSubnet, meta) + if err != nil { + return vms, fmt.Errorf("Error creating virtual guest: %s", err) + } + primaryNetworkComponent.NetworkVlan.PrimarySubnetId = &primarySubnetID + usePrimaryNetworkComponent = true + } + + // Apply security groups if provided + publicSecurityGroupIDList := d.Get("public_security_group_ids").(*schema.Set).List() + sgb, err := expandSecurityGroupBindings(publicSecurityGroupIDList) + if err != nil { + return vms, err + } + if sgb != nil { + primaryNetworkComponent.SecurityGroupBindings = sgb + usePrimaryNetworkComponent = true + } + + if usePrimaryNetworkComponent { + opts.PrimaryNetworkComponent = &primaryNetworkComponent + } + + primaryBackendNetworkComponent := datatypes.Virtual_Guest_Network_Component{ + NetworkVlan: &datatypes.Network_Vlan{}, + } + + usePrimaryBackendNetworkComponent := false + + if privateVlanID > 0 { + primaryBackendNetworkComponent.NetworkVlan.Id = &privateVlanID + usePrimaryBackendNetworkComponent = true + } + + // Apply private subnet if provided + if privateSubnet != "" { + primarySubnetID, err := getSubnetID(privateSubnet, meta) + if err != nil { + return vms, fmt.Errorf("Error creating virtual guest: %s", err) + } + primaryBackendNetworkComponent.NetworkVlan.PrimarySubnetId = &primarySubnetID + usePrimaryBackendNetworkComponent = true + } + + // Apply security groups if provided + privateSecurityGroupIDList := d.Get("private_security_group_ids").(*schema.Set).List() + sgb, err = expandSecurityGroupBindings(privateSecurityGroupIDList) + if err != nil { + return vms, err + } + if sgb != nil { + primaryBackendNetworkComponent.SecurityGroupBindings = sgb + usePrimaryBackendNetworkComponent = true + } + + if usePrimaryBackendNetworkComponent { + opts.PrimaryBackendNetworkComponent = &primaryBackendNetworkComponent + } + + if userData, ok := d.GetOk("user_metadata"); ok { + opts.UserData = []datatypes.Virtual_Guest_Attribute{ + { + Value: sl.String(userData.(string)), + }, + } + } + + if quote_id == 0 { + + // Get configured ssh_keys + sshKeySet := d.Get("ssh_key_ids").(*schema.Set) + sshKeys := sshKeySet.List() + sshKeyLen := len(sshKeys) + if sshKeyLen > 0 { + opts.SshKeys = make([]datatypes.Security_Ssh_Key, 0, sshKeyLen) + for _, sshKey := range sshKeys { + opts.SshKeys = append(opts.SshKeys, datatypes.Security_Ssh_Key{ + Id: sl.Int(sshKey.(int)), + }) + } + } + } + + vms = append(vms, opts) + } + + return vms, nil +} + +func resourceIBMComputeVmInstanceCreate(d *schema.ResourceData, meta interface{}) error { + + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetVirtualGuestService(sess) + + var id int + var receipt datatypes.Container_Product_Order_Receipt + + var err1 error + var err error + + var dcName string + var retryOptions []interface{} + if dc, ok := d.GetOk("datacenter"); ok { + dcName = dc.(string) + } + + quote_id := d.Get("quote_id").(int) + + if options, ok := d.GetOk("datacenter_choice"); ok { + retryOptions = options.([]interface{}) + } + + if dcName == "" && len(retryOptions) == 0 { + return fmt.Errorf("Provide either `datacenter` or `datacenter_choice`") + } + + if (d.Get("hostname").(string) == "" || d.Get("domain").(string) == "") && len(d.Get("bulk_vms").(*schema.Set).List()) == 0 { + return fmt.Errorf("Provide either `hostname` and `domain` or `bulk_vms`") + } + + if dcName != "" { + publicVlan := 0 + privateVlan := 0 + if v, ok := d.GetOk("public_vlan_id"); ok { + publicVlan = v.(int) + + } + if v, ok := d.GetOk("private_vlan_id"); ok { + privateVlan = v.(int) + + } + + receipt, err1 = placeOrder(d, meta, dcName, publicVlan, privateVlan, quote_id) + } else if len(retryOptions) > 0 { + + err := validateDatacenterOption(retryOptions, []string{"datacenter", "public_vlan_id", "private_vlan_id"}) + if err != nil { + return err + } + for _, option := range retryOptions { + if option == nil { + return fmt.Errorf("Provide a valid `datacenter_choice`") + } + center := option.(map[string]interface{}) + var publicVlan, privateVlan int + var name string + + if v, ok := center["datacenter"]; ok { + name = v.(string) + } else { + return fmt.Errorf("Missing datacenter in `datacenter_choice`") + } + + if v, ok := center["public_vlan_id"]; ok { + publicVlan, _ = strconv.Atoi(v.(string)) + } + if v, ok := center["private_vlan_id"]; ok { + privateVlan, _ = strconv.Atoi(v.(string)) + } + + receipt, err1 = placeOrder(d, meta, name, publicVlan, privateVlan, quote_id) + if err1 == nil { + break + + } + } + } + + if err1 != nil { + return fmt.Errorf("Error ordering virtual guest: %s", err1) + } + + var idStrings []string + if quote_id > 0 { + vmId := fmt.Sprintf("%d", *receipt.OrderDetails.VirtualGuests[0].Id) + idStrings = append(idStrings, vmId) + d.SetId(vmId) + + } else if len(receipt.OrderDetails.OrderContainers) > 1 { + for i := 0; i < len(receipt.OrderDetails.OrderContainers); i++ { + idStrings = append(idStrings, fmt.Sprintf("%d", *receipt.OrderDetails.OrderContainers[i].VirtualGuests[0].Id)) + } + d.SetId(strings.Join(idStrings, "/")) + } else { + vmId := fmt.Sprintf("%d", *receipt.OrderDetails.OrderContainers[0].VirtualGuests[0].Id) + idStrings = append(idStrings, vmId) + d.SetId(vmId) + } + log.Printf("[INFO] Virtual Machine ID: %s", d.Id()) + for _, str := range idStrings { + id, err = strconv.Atoi(str) + if err != nil { + return err + } + // Set tags + tags := getTags(d) + if tags != "" { + //Try setting only when it is non empty as we are creating virtual guest + err = setGuestTags(id, tags, meta) + if err != nil { + return err + } + } + + var storageIds []int + if fileStorageSet := d.Get("file_storage_ids").(*schema.Set); len(fileStorageSet.List()) > 0 { + storageIds = expandIntList(fileStorageSet.List()) + + } + if blockStorageSet := d.Get("block_storage_ids").(*schema.Set); len(blockStorageSet.List()) > 0 { + storageIds = append(storageIds, expandIntList(blockStorageSet.List())...) + } + if len(storageIds) > 0 { + err := addAccessToStorageList(service.Id(id), id, storageIds, meta) + if err != nil { + return err + } + } + + // Set notes + err = setNotes(id, d, meta) + if err != nil { + return err + } + + // wait for machine availability + + _, err = WaitForVirtualGuestAvailable(id, d, meta) + + if err != nil { + return fmt.Errorf( + "Error waiting for virtual machine (%s) to become ready: %s", d.Id(), err) + } + } + + return resourceIBMComputeVmInstanceRead(d, meta) +} + +func resourceIBMComputeVmInstanceRead(d *schema.ResourceData, meta interface{}) error { + service := services.GetVirtualGuestService(meta.(ClientSession).SoftLayerSession()) + parts, err := vmIdParts(d.Id()) + if err != nil { + return err + } + id, err := strconv.Atoi(parts[0]) + if err != nil { + return fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + result, err := service.Id(id).Mask( + "hostname,domain,blockDevices[diskImage],startCpus,maxMemory,dedicatedAccountHostOnlyFlag,operatingSystemReferenceCode,blockDeviceTemplateGroup[id],transientGuestFlag," + + "billingItem[orderItem[preset[keyName]]]," + + "primaryIpAddress,primaryBackendIpAddress,privateNetworkOnlyFlag," + + "hourlyBillingFlag,localDiskFlag," + + "allowedNetworkStorage[id,nasType]," + + "notes,userData[value],tagReferences[id,tag[name]]," + + "datacenter[id,name,longName]," + + "sshKeys,status[keyName,name]," + + "primaryNetworkComponent[networkVlan[id],subnets," + + "primaryVersion6IpAddressRecord[subnet,guestNetworkComponentBinding[ipAddressId]]," + + "primaryIpAddressRecord[subnet,guestNetworkComponentBinding[ipAddressId]]," + + "securityGroupBindings[securityGroup]]," + + "primaryBackendNetworkComponent[networkVlan[id]," + + "primaryIpAddressRecord[subnet,guestNetworkComponentBinding[ipAddressId]]," + + "securityGroupBindings[securityGroup]],evaultNetworkStorage[capacityGb]", + ).GetObject() + + if err != nil { + return fmt.Errorf("Error retrieving virtual guest: %s", err) + } + + if len(parts) == 1 { + d.Set("hostname", *result.Hostname) + d.Set("domain", *result.Domain) + } else { + members := make([]vmMember, 0) + for _, part := range parts { + vmId, err := strconv.Atoi(part) + if err != nil { + return fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + vmResult, err := service.Id(vmId).Mask( + "hostname,domain", + ).GetObject() + member := vmMember{ + "hostname": *vmResult.Hostname, + "domain": *vmResult.Domain, + } + members = append(members, member) + } + } + + keyName, ok := sl.GrabOk(result, "BillingItem.OrderItem.Preset.KeyName") + if ok { + d.Set("flavor_key_name", keyName) + } + + if result.BlockDeviceTemplateGroup != nil { + d.Set("image_id", result.BlockDeviceTemplateGroup.Id) + } else { + if result.OperatingSystemReferenceCode != nil { + d.Set("os_reference_code", result.OperatingSystemReferenceCode) + } + } + + if result.Datacenter != nil { + d.Set("datacenter", *result.Datacenter.Name) + } + + if result.DedicatedHost != nil { + d.Set("dedicated_host_id", *result.DedicatedHost.Id) + d.Set("dedicated_host_name", *result.DedicatedHost.Name) + } + + if result.PlacementGroup != nil { + d.Set("placement_group_id", *result.PlacementGroup.Id) + d.Set("placement_group_name", *result.PlacementGroup.Name) + } + + d.Set( + "network_speed", + sl.Grab( + result, + "PrimaryBackendNetworkComponent.MaxSpeed", + d.Get("network_speed").(int), + ), + ) + if result.OperatingSystemReferenceCode != nil && strings.HasPrefix(*result.OperatingSystemReferenceCode, "WIN") { + d.Set("disks", flattenDisksForWindows(result)) + } else { + d.Set("disks", flattenDisks(result)) + } + d.Set("cores", *result.StartCpus) + d.Set("memory", *result.MaxMemory) + d.Set("dedicated_acct_host_only", *result.DedicatedAccountHostOnlyFlag) + d.Set("transient", *result.TransientGuestFlag) + d.Set("ipv4_address", result.PrimaryIpAddress) + d.Set("ipv4_address_private", result.PrimaryBackendIpAddress) + if result.PrimaryNetworkComponent != nil && result.PrimaryNetworkComponent.PrimaryIpAddressRecord != nil { + d.Set("ip_address_id", *result.PrimaryNetworkComponent.PrimaryIpAddressRecord.GuestNetworkComponentBinding.IpAddressId) + } + if result.PrimaryNetworkComponent != nil { + d.Set("public_interface_id", result.PrimaryNetworkComponent.Id) + } + if result.PrimaryBackendNetworkComponent != nil && result.PrimaryBackendNetworkComponent.PrimaryIpAddressRecord != nil { + d.Set("ip_address_id_private", + *result.PrimaryBackendNetworkComponent.PrimaryIpAddressRecord.GuestNetworkComponentBinding.IpAddressId) + } + if result.PrimaryBackendNetworkComponent != nil { + d.Set("private_interface_id", result.PrimaryBackendNetworkComponent.Id) + } + d.Set("private_network_only", *result.PrivateNetworkOnlyFlag) + d.Set("hourly_billing", *result.HourlyBillingFlag) + d.Set("local_disk", *result.LocalDiskFlag) + + if result.PrimaryNetworkComponent != nil && result.PrimaryNetworkComponent.NetworkVlan != nil { + d.Set("public_vlan_id", *result.PrimaryNetworkComponent.NetworkVlan.Id) + } + + if result.PrimaryBackendNetworkComponent != nil && result.PrimaryBackendNetworkComponent.NetworkVlan != nil { + d.Set("private_vlan_id", *result.PrimaryBackendNetworkComponent.NetworkVlan.Id) + } + + if result.PrimaryNetworkComponent != nil && result.PrimaryNetworkComponent.PrimaryIpAddressRecord != nil { + publicSubnet := result.PrimaryNetworkComponent.PrimaryIpAddressRecord.Subnet + d.Set( + "public_subnet", + fmt.Sprintf("%s/%d", *publicSubnet.NetworkIdentifier, *publicSubnet.Cidr), + ) + d.Set("public_subnet_id", result.PrimaryNetworkComponent.PrimaryIpAddressRecord.SubnetId) + } + + if result.PrimaryNetworkComponent != nil && result.PrimaryNetworkComponent.SecurityGroupBindings != nil { + var sgs []int + for _, sg := range result.PrimaryNetworkComponent.SecurityGroupBindings { + sgs = append(sgs, *sg.SecurityGroup.Id) + } + d.Set("public_security_group_ids", sgs) + } + + if result.PrimaryBackendNetworkComponent != nil && result.PrimaryBackendNetworkComponent.PrimaryIpAddressRecord != nil { + privateSubnet := result.PrimaryBackendNetworkComponent.PrimaryIpAddressRecord.Subnet + d.Set( + "private_subnet", + fmt.Sprintf("%s/%d", *privateSubnet.NetworkIdentifier, *privateSubnet.Cidr), + ) + d.Set("private_subnet_id", result.PrimaryBackendNetworkComponent.PrimaryIpAddressRecord.SubnetId) + + } + + if result.PrimaryBackendNetworkComponent != nil && result.PrimaryBackendNetworkComponent.SecurityGroupBindings != nil { + var sgs []int + for _, sg := range result.PrimaryBackendNetworkComponent.SecurityGroupBindings { + sgs = append(sgs, *sg.SecurityGroup.Id) + } + d.Set("private_security_group_ids", sgs) + } + + d.Set("ipv6_enabled", false) + d.Set("ipv6_static_enabled", false) + if result.PrimaryNetworkComponent != nil && result.PrimaryNetworkComponent.PrimaryVersion6IpAddressRecord != nil { + d.Set("ipv6_enabled", true) + d.Set("ipv6_address", *result.PrimaryNetworkComponent.PrimaryVersion6IpAddressRecord.IpAddress) + d.Set("ipv6_address_id", *result.PrimaryNetworkComponent.PrimaryVersion6IpAddressRecord.GuestNetworkComponentBinding.IpAddressId) + publicSubnet := result.PrimaryNetworkComponent.PrimaryVersion6IpAddressRecord.Subnet + log.Println("DUDE", *publicSubnet, result.PrimaryNetworkComponent.PrimaryVersion6IpAddressRecord.SubnetId) + d.Set( + "public_ipv6_subnet", + fmt.Sprintf("%s/%d", *publicSubnet.NetworkIdentifier, *publicSubnet.Cidr), + ) + d.Set("public_ipv6_subnet_id", result.PrimaryNetworkComponent.PrimaryVersion6IpAddressRecord.SubnetId) + } + if result.PrimaryNetworkComponent != nil { + for _, subnet := range result.PrimaryNetworkComponent.Subnets { + if *subnet.SubnetType == "STATIC_IP_ROUTED_6" { + d.Set("ipv6_static_enabled", true) + } + } + } + + userData := result.UserData + + if userData != nil && len(userData) > 0 { + d.Set("user_metadata", userData[0].Value) + } + + d.Set("notes", sl.Get(result.Notes, nil)) + + tagReferences := result.TagReferences + tagReferencesLen := len(tagReferences) + if tagReferencesLen > 0 { + tags := make([]string, 0, tagReferencesLen) + for _, tagRef := range tagReferences { + tags = append(tags, *tagRef.Tag.Name) + } + d.Set("tags", tags) + } + + storages := result.AllowedNetworkStorage + d.Set("block_storage_ids", flattenBlockStorageID(storages)) + d.Set("file_storage_ids", flattenFileStorageID(storages)) + + sshKeys := result.SshKeys + if len(sshKeys) > 0 { + d.Set("ssh_key_ids", flattenSSHKeyIDs(sshKeys)) + } + + // Set connection info + connInfo := map[string]string{"type": "ssh"} + if !*result.PrivateNetworkOnlyFlag && result.PrimaryIpAddress != nil { + connInfo["host"] = *result.PrimaryIpAddress + } else if result.PrimaryBackendIpAddress != nil { + connInfo["host"] = *result.PrimaryBackendIpAddress + } + d.SetConnInfo(connInfo) + + if _, ok := sl.GrabOk(result, "EvaultNetworkStorage"); ok { + if len(result.EvaultNetworkStorage) > 0 { + d.Set("evault", result.EvaultNetworkStorage[0].CapacityGb) + } + + } + d.Set(ResourceControllerURL, fmt.Sprintf("https://cloud.ibm.com/gen1/infrastructure/virtual-server/%s/details#main", d.Id())) + d.Set(ResourceName, *result.Hostname) + d.Set(ResourceStatus, *result.Status.Name) + err = readSecondaryIPAddresses(d, meta, result.PrimaryIpAddress) + return err +} + +func readSecondaryIPAddresses(d *schema.ResourceData, meta interface{}, primaryIPAddress *string) error { + d.Set("secondary_ip_addresses", nil) + if primaryIPAddress != nil { + secondarySubnetResult, err := services.GetAccountService(meta.(ClientSession).SoftLayerSession()). + Mask("ipAddresses[id,ipAddress],subnetType"). + Filter(filter.Build(filter.Path("publicSubnets.endPointIpAddress.ipAddress").Eq(*primaryIPAddress))). + GetPublicSubnets() + if err != nil { + log.Printf("Error getting secondary Ip addresses: %s", err) + } + + secondaryIps := make([]string, 0) + for _, subnet := range secondarySubnetResult { + // Count static secondary ip addresses. + if *subnet.SubnetType == staticIPRouted { + for _, ipAddressObj := range subnet.IpAddresses { + secondaryIps = append(secondaryIps, *ipAddressObj.IpAddress) + } + } + } + if len(secondaryIps) > 0 { + d.Set("secondary_ip_addresses", secondaryIps) + d.Set("secondary_ip_count", len(secondaryIps)) + } + } + return nil +} +func resourceIBMComputeVmInstanceUpdate(d *schema.ResourceData, meta interface{}) error { + + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetVirtualGuestService(sess) + + parts, err := vmIdParts(d.Id()) + if err != nil { + return err + } + id, err := strconv.Atoi(parts[0]) + if err != nil { + return fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + result, err := service.Id(id).GetObject() + if err != nil { + return fmt.Errorf("Error retrieving virtual guest: %s", err) + } + + isChanged := false + + // Update "hostname" and "domain" fields if present and changed + // Those are the only fields, which could be updated + if d.HasChange("hostname") || d.HasChange("domain") { + result.Hostname = sl.String(d.Get("hostname").(string)) + result.Domain = sl.String(d.Get("domain").(string)) + isChanged = true + } + + if d.HasChange("notes") { + result.Notes = sl.String(d.Get("notes").(string)) + isChanged = true + } + + if isChanged { + _, err = service.Id(id).EditObject(&result) + if err != nil { + return fmt.Errorf("Couldn't update virtual guest: %s", err) + } + } + + // Update tags + if d.HasChange("tags") { + tags := getTags(d) + err := setGuestTags(id, tags, meta) + if err != nil { + return err + } + } + + err = modifyStorageAccess(service.Id(id), id, meta, d) + if err != nil { + return err + } + + // Upgrade "cores", "memory" and "network_speed" if provided and changed + upgradeOptions := map[string]float64{} + if d.HasChange("cores") { + upgradeOptions[product.CPUCategoryCode] = float64(d.Get("cores").(int)) + } + + if d.HasChange("memory") { + memoryInMB := float64(d.Get("memory").(int)) + + // Convert memory to GB, as softlayer only allows to upgrade RAM in Gigs + // Must be already validated at this step + upgradeOptions[product.MemoryCategoryCode] = float64(int(memoryInMB / 1024)) + } + + if d.HasChange("network_speed") { + upgradeOptions[product.NICSpeedCategoryCode] = float64(d.Get("network_speed").(int)) + } + + if d.HasChange("disks") { + oldDisks, newDisks := d.GetChange("disks") + oldDisk := oldDisks.([]interface{}) + newDisk := newDisks.([]interface{}) + + //Remove is not supported for now. + if len(oldDisk) > len(newDisk) { + return fmt.Errorf("Removing drives is not supported.") + } + + var diskName string + //Update the disks if any change + for i := 0; i < len(oldDisk); i++ { + if newDisk[i].(int) != oldDisk[i].(int) { + + if _, ok := d.GetOk("flavor_key_name"); ok { + diskName = fmt.Sprintf("guest_disk%d", i+1) + } else { + diskName = fmt.Sprintf("guest_disk%d", i) + } + capacity := newDisk[i].(int) + upgradeOptions[diskName] = float64(capacity) + } + } + //Add new disks + for i := len(oldDisk); i < len(newDisk); i++ { + if _, ok := d.GetOk("flavor_key_name"); ok { + diskName = fmt.Sprintf("guest_disk%d", i+1) + } else { + diskName = fmt.Sprintf("guest_disk%d", i) + } + capacity := newDisk[i].(int) + upgradeOptions[diskName] = float64(capacity) + } + + } + + if len(upgradeOptions) > 0 || d.HasChange("flavor_key_name") { + + if _, ok := d.GetOk("flavor_key_name"); ok { + presetKeyName := d.Get("flavor_key_name").(string) + _, err = virtual.UpgradeVirtualGuestWithPreset(sess.SetRetries(0), &result, presetKeyName, upgradeOptions) + if err != nil { + return fmt.Errorf("Couldn't upgrade virtual guest: %s", err) + } + + } else { + _, err = virtual.UpgradeVirtualGuest(sess.SetRetries(0), &result, upgradeOptions) + if err != nil { + return fmt.Errorf("Couldn't upgrade virtual guest: %s", err) + } + } + + // Wait for softlayer to start upgrading... + _, err = WaitForUpgradeTransactionsToAppear(d, meta) + if err != nil { + return err + } + // Wait for upgrade transactions to finish + _, err = WaitForNoActiveTransactions(id, d, d.Timeout(schema.TimeoutUpdate), meta) + if err != nil { + return err + } + + } + + return resourceIBMComputeVmInstanceRead(d, meta) +} + +func modifyStorageAccess(sam storageAccessModifier, deviceID int, meta interface{}, d *schema.ResourceData) error { + var remove, add []int + if d.HasChange("file_storage_ids") { + o, n := d.GetChange("file_storage_ids") + os := o.(*schema.Set) + ns := n.(*schema.Set) + + remove = expandIntList(os.Difference(ns).List()) + add = expandIntList(ns.Difference(os).List()) + } + if d.HasChange("block_storage_ids") { + o, n := d.GetChange("block_storage_ids") + os := o.(*schema.Set) + ns := n.(*schema.Set) + + remove = append(remove, expandIntList(os.Difference(ns).List())...) + add = append(add, expandIntList(ns.Difference(os).List())...) + } + + if len(add) > 0 { + err := addAccessToStorageList(sam, deviceID, add, meta) + if err != nil { + return err + } + } + if len(remove) > 0 { + err := removeAccessToStorageList(sam, deviceID, remove, meta) + if err != nil { + return err + } + } + return nil +} + +func resourceIBMComputeVmInstanceDelete(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetVirtualGuestService(sess) + parts, err := vmIdParts(d.Id()) + if err != nil { + return err + } + for _, part := range parts { + id, err := strconv.Atoi(part) + if err != nil { + return fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + _, err = WaitForNoActiveTransactions(id, d, d.Timeout(schema.TimeoutDelete), meta) + + if err != nil { + return fmt.Errorf("Error deleting virtual guest, couldn't wait for zero active transactions: %s", err) + } + err = detachSecurityGroupNetworkComponentBindings(d, meta, id) + if err != nil { + return err + } + ok, err := service.Id(id).DeleteObject() + if err != nil { + return fmt.Errorf("Error deleting virtual guest: %s", err) + } + + if !ok { + return fmt.Errorf( + "API reported it was unsuccessful in removing the virtual guest '%d'", id) + } + } + + return nil +} + +func detachSecurityGroupNetworkComponentBindings(d *schema.ResourceData, meta interface{}, id int) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetVirtualGuestService(sess) + publicSgIDs := d.Get("public_security_group_ids").(*schema.Set).List() + privateSgIDS := d.Get("private_security_group_ids").(*schema.Set).List() + if len(publicSgIDs) == 0 && len(privateSgIDS) == 0 { + log.Println("No security groups specified, hence no detachment required before delete operation") + return nil + } + vsi, err := service.Id(id).Mask( + "primaryNetworkComponent[id,securityGroupBindings[securityGroupId,networkComponentId]]," + + "primaryBackendNetworkComponent[id,securityGroupBindings[securityGroupId,networkComponentId]]", + ).GetObject() + + if err != nil { + return err + } + sgService := services.GetNetworkSecurityGroupService(sess) + //Detach security group as destroy might fail if the security group is attempted + //to be destroyed in the same terraform configuration file. VSI destroy takes + //some time andif during the same time security group which was referred in the VSI + //is attempted to be destroyed it will fail. + for _, v := range publicSgIDs { + sgID := v.(int) + for _, v := range vsi.PrimaryNetworkComponent.SecurityGroupBindings { + if sgID == *v.SecurityGroupId { + _, err := sgService.Id(sgID).DetachNetworkComponents([]int{*v.NetworkComponentId}) + if err != nil { + return err + } + } + } + } + for _, v := range privateSgIDS { + sgID := v.(int) + for _, v := range vsi.PrimaryBackendNetworkComponent.SecurityGroupBindings { + if sgID == *v.SecurityGroupId { + _, err := sgService.Id(sgID).DetachNetworkComponents([]int{*v.NetworkComponentId}) + if err != nil { + return err + } + } + } + } + return nil +} + +//genID generates a random string to be used for the optional +//hostname +func genID() (interface{}, error) { + numBytes := 8 + bytes := make([]byte, numBytes) + n, err := rand.Reader.Read(bytes) + if err != nil { + return nil, err + } + + if n != numBytes { + return nil, errors.New("generated insufficient random bytes") + } + + hexStr := hex.EncodeToString(bytes) + return fmt.Sprintf("terraformed-%s", hexStr), nil +} + +// WaitForUpgradeTransactionsToAppear Wait for upgrade transactions +func WaitForUpgradeTransactionsToAppear(d *schema.ResourceData, meta interface{}) (interface{}, error) { + log.Printf("Waiting for server (%s) to have upgrade transactions", d.Id()) + + parts, err := vmIdParts(d.Id()) + if err != nil { + return nil, err + } + id, err := strconv.Atoi(parts[0]) + if err != nil { + return nil, fmt.Errorf("The instance ID %s must be numeric", d.Id()) + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", pendingUpgrade}, + Target: []string{inProgressUpgrade}, + Refresh: func() (interface{}, string, error) { + service := services.GetVirtualGuestService(meta.(ClientSession).SoftLayerSession()) + transactions, err := service.Id(id).GetActiveTransactions() + if err != nil { + if apiErr, ok := err.(sl.Error); ok && apiErr.StatusCode == 404 { + return nil, "", fmt.Errorf("Couldn't fetch active transactions: %s", err) + } + return false, "retry", nil + } + for _, transaction := range transactions { + if strings.Contains(*transaction.TransactionStatus.Name, upgradeTransaction) { + return transactions, inProgressUpgrade, nil + } + } + return transactions, pendingUpgrade, nil + }, + Timeout: 10 * time.Minute, + Delay: 5 * time.Second, + MinTimeout: 5 * time.Second, + } + + return stateConf.WaitForState() +} + +// WaitForNoActiveTransactions Wait for no active transactions +func WaitForNoActiveTransactions(id int, d *schema.ResourceData, timeout time.Duration, meta interface{}) (interface{}, error) { + log.Printf("Waiting for server (%s) to have zero active transactions", d.Id()) + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", activeTransaction}, + Target: []string{idleTransaction}, + Refresh: func() (interface{}, string, error) { + service := services.GetVirtualGuestService(meta.(ClientSession).SoftLayerSession()) + transactions, err := service.Id(id).GetActiveTransactions() + if err != nil { + if apiErr, ok := err.(sl.Error); ok && apiErr.StatusCode == 404 { + return nil, "", nil + } + return false, "retry", fmt.Errorf("Couldn't get active transactions: %s", err) + } + if len(transactions) == 0 { + return transactions, idleTransaction, nil + } + return transactions, activeTransaction, nil + }, + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +// WaitForVirtualGuestAvailable Waits for virtual guest creation +func WaitForVirtualGuestAvailable(id int, d *schema.ResourceData, meta interface{}) (interface{}, error) { + log.Printf("Waiting for server (%s) to be available.", d.Id()) + sess := meta.(ClientSession).SoftLayerSession() + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", virtualGuestProvisioning}, + Target: []string{virtualGuestAvailable}, + Refresh: virtualGuestStateRefreshFunc(sess, id, d), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func virtualGuestStateRefreshFunc(sess *session.Session, instanceID int, d *schema.ResourceData) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + // Check active transactions + publicNetwork := !d.Get("private_network_only").(bool) + service := services.GetVirtualGuestService(sess) + result, err := service.Id(instanceID).Mask("activeTransaction,primaryBackendIpAddress,primaryIpAddress").GetObject() + if err != nil { + if apiErr, ok := err.(sl.Error); ok && apiErr.StatusCode == 404 { + return nil, "", fmt.Errorf("Error retrieving virtual guest: %s", err) + } + return false, "retry", nil + } + // Check active transactions + log.Println("Checking active transactions.") + if result.ActiveTransaction != nil { + return result, virtualGuestProvisioning, nil + } + + // Check Primary IP address availability. + log.Println("Checking primary backend IP address.") + if result.PrimaryBackendIpAddress == nil { + return result, virtualGuestProvisioning, nil + } + + log.Println("Checking primary IP address.") + if publicNetwork && result.PrimaryIpAddress == nil { + return result, virtualGuestProvisioning, nil + } + + // Check Secondary IP address availability. + if d.Get("secondary_ip_count").(int) > 0 { + log.Println("Refreshing secondary IPs state.") + secondarySubnetResult, err := services.GetAccountService(sess). + Mask("ipAddresses[id,ipAddress]"). + Filter(filter.Build(filter.Path("publicSubnets.endPointIpAddress.virtualGuest.id").Eq(fmt.Sprintf("%d", instanceID)))). + GetPublicSubnets() + if err != nil { + return nil, "", fmt.Errorf("Error retrieving secondary ip address: %s", err) + } + if len(secondarySubnetResult) == 0 { + return result, virtualGuestProvisioning, nil + } + } + + return result, virtualGuestAvailable, nil + } +} + +func resourceIBMComputeVmInstanceExists(d *schema.ResourceData, meta interface{}) (bool, error) { + service := services.GetVirtualGuestService(meta.(ClientSession).SoftLayerSession()) + parts, err := vmIdParts(d.Id()) + if err != nil { + return false, err + } + guestID, err := strconv.Atoi(parts[0]) + if err != nil { + return false, fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + result, err := service.Id(guestID).GetObject() + if err != nil { + if apiErr, ok := err.(sl.Error); ok { + if apiErr.StatusCode == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s", err) + } + + return result.Id != nil && *result.Id == guestID, nil +} + +func getTags(d dataRetriever) string { + tagSet := d.Get("tags").(*schema.Set) + + if tagSet.Len() == 0 { + return "" + } + + tags := make([]string, 0, tagSet.Len()) + for _, elem := range tagSet.List() { + tag := elem.(string) + tags = append(tags, tag) + } + return strings.Join(tags, ",") +} + +func setGuestTags(id int, tags string, meta interface{}) error { + service := services.GetVirtualGuestService(meta.(ClientSession).SoftLayerSession()) + _, err := service.Id(id).SetTags(sl.String(tags)) + if err != nil { + return fmt.Errorf("Could not set tags on virtual guest %d", id) + } + return nil +} + +type storageAccessModifier interface { + AllowAccessToNetworkStorageList([]datatypes.Network_Storage) (resp bool, err error) + RemoveAccessToNetworkStorageList([]datatypes.Network_Storage) (resp bool, err error) +} + +func addAccessToStorageList(sam storageAccessModifier, deviceID int, ids storageIds, meta interface{}) error { + s, err := ids.Storages(meta) + if err != nil { + return err + } + for { + _, err := sam.AllowAccessToNetworkStorageList(s) + if err != nil { + if apiErr, ok := err.(sl.Error); ok && apiErr.Exception == networkStorageMassAccessControlModificationException { + log.Printf("[DEBUG] Allow access to storage failed with error %q. Will retry again after %q", err, retryDelayForModifyingStorageAccess) + time.Sleep(retryDelayForModifyingStorageAccess) + continue + } + return fmt.Errorf("Could not authorize Device %d, access to the following storages %q, %q", deviceID, ids, err) + } + log.Printf("[INFO] Device authorized to access %q", ids) + break + } + return nil +} + +func removeAccessToStorageList(sam storageAccessModifier, deviceID int, ids storageIds, meta interface{}) error { + s, err := ids.Storages(meta) + if err != nil { + return err + } + for { + _, err := sam.RemoveAccessToNetworkStorageList(s) + if err != nil { + if apiErr, ok := err.(sl.Error); ok && apiErr.Exception == networkStorageMassAccessControlModificationException { + log.Printf("[DEBUG] Remove access to storage failed with error %q. Will retry again after %q", err, retryDelayForModifyingStorageAccess) + time.Sleep(retryDelayForModifyingStorageAccess) + continue + } + return fmt.Errorf("Could not remove Device %d, access to the following storages %q, %q", deviceID, ids, err) + } + log.Printf("[INFO] Devices's access to %q have been removed", ids) + break + } + return nil +} + +func setNotes(id int, d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetVirtualGuestService(sess) + + if notes := d.Get("notes").(string); notes != "" { + result, err := service.Id(id).GetObject() + if err != nil { + return fmt.Errorf("Error retrieving virtual guest: %s", err) + } + + result.Notes = sl.String(notes) + + _, err = service.Id(id).EditObject(&result) + if err != nil { + return fmt.Errorf("Could not set note on virtual guest %d", id) + } + } + + return nil +} + +func placeOrder(d *schema.ResourceData, meta interface{}, name string, publicVlanID, privateVlanID, quote_id int) (datatypes.Container_Product_Order_Receipt, error) { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetVirtualGuestService(sess) + + options, err := getVirtualGuestTemplateFromResourceData(d, meta, name, publicVlanID, privateVlanID, quote_id) + if err != nil { + return datatypes.Container_Product_Order_Receipt{}, err + } + guestOrders := make([]datatypes.Container_Product_Order, 0) + var template datatypes.Container_Product_Order + if quote_id > 0 { + // Build a virtual instance template from the quote. + template, err = services.GetBillingOrderQuoteService(sess). + Id(quote_id).GetRecalculatedOrderContainer(nil, sl.Bool(false)) + if err != nil { + return datatypes.Container_Product_Order_Receipt{}, fmt.Errorf( + "Encountered problem trying to get the virtual machine order template from quote: %s", err) + } + template.Quantity = sl.Int(1) + template.ComplexType = sl.String("SoftLayer_Container_Product_Order_Virtual_Guest") + template.VirtualGuests = make([]datatypes.Virtual_Guest, 0, 1) + template.VirtualGuests = append( + template.VirtualGuests, + options[0], + ) + // Get configured ssh_keys + sshKeySet := d.Get("ssh_key_ids").(*schema.Set) + sshKeys := sshKeySet.List() + sshKeyLen := len(sshKeys) + if sshKeyLen > 0 { + sshKeyA := make([]int, sshKeyLen) + template.SshKeys = make([]datatypes.Container_Product_Order_SshKeys, 0, sshKeyLen) + for i, sshKey := range sshKeys { + sshKeyA[i] = sshKey.(int) + + } + template.SshKeys = append(template.SshKeys, datatypes.Container_Product_Order_SshKeys{ + SshKeyIds: sshKeyA, + }) + } + if rawImageTemplateId, ok := d.GetOk("image_id"); ok { + imageTemplateId := rawImageTemplateId.(int) + template.ImageTemplateId = sl.Int(imageTemplateId) + } + + if postInstallURI, ok := d.GetOk("post_install_script_uri"); ok { + postInstallURIA := make([]string, 1) + postInstallURIA[0] = postInstallURI.(string) + template.ProvisionScripts = postInstallURIA + } + + guestOrders = append(guestOrders, template) + order := &datatypes.Container_Product_Order{ + OrderContainers: guestOrders, + } + receipt, err1 := services.GetBillingOrderQuoteService(sess). + Id(quote_id).PlaceOrder(order) + return receipt, err1 + } + for i := 0; i < len(options); i++ { + opts := options[i] + + log.Println("[INFO] Creating virtual machine") + + // Build an order template with a custom image. + if opts.BlockDevices != nil && opts.BlockDeviceTemplateGroup != nil { + bd := *opts.BlockDeviceTemplateGroup + opts.BlockDeviceTemplateGroup = nil + opts.OperatingSystemReferenceCode = sl.String("UBUNTU_LATEST") + template, err = service.GenerateOrderTemplate(&opts) + if err != nil { + return datatypes.Container_Product_Order_Receipt{}, fmt.Errorf("Error generating order template: %s", err) + } + + // Remove temporary OS from actual order + prices := make([]datatypes.Product_Item_Price, len(template.Prices)) + i := 0 + for _, p := range template.Prices { + if !strings.Contains(*p.Item.Description, "Ubuntu") { + prices[i] = p + i++ + } + } + template.Prices = prices[:i] + + template.ImageTemplateId = sl.Int(d.Get("image_id").(int)) + template.VirtualGuests[0].BlockDeviceTemplateGroup = &bd + template.VirtualGuests[0].OperatingSystemReferenceCode = nil + } else { + // Build an order template with os_reference_code + template, err = service.GenerateOrderTemplate(&opts) + if err != nil { + return datatypes.Container_Product_Order_Receipt{}, fmt.Errorf("Error generating order template: %s", err) + } + } + + items, err := product.GetPackageProducts(sess, *template.PackageId, productItemMaskWithPriceLocationGroupID) + if err != nil { + return datatypes.Container_Product_Order_Receipt{}, fmt.Errorf("Error generating order template: %s", err) + } + + privateNetworkOnly := d.Get("private_network_only").(bool) + + secondaryIPCount := d.Get("secondary_ip_count").(int) + if secondaryIPCount > 0 { + if privateNetworkOnly { + return datatypes.Container_Product_Order_Receipt{}, fmt.Errorf("Unable to configure public secondary addresses with a private_network_only option") + } + keyName := strconv.Itoa(secondaryIPCount) + "_PUBLIC_IP_ADDRESSES" + price, err := getItemPriceId(items, "sec_ip_addresses", keyName) + if err != nil { + return datatypes.Container_Product_Order_Receipt{}, err + } + template.Prices = append(template.Prices, price) + } + + if d.Get("ipv6_enabled").(bool) { + if privateNetworkOnly { + return datatypes.Container_Product_Order_Receipt{}, fmt.Errorf("Unable to configure a public IPv6 address with a private_network_only option") + } + price, err := getItemPriceId(items, "pri_ipv6_addresses", "1_IPV6_ADDRESS") + if err != nil { + return datatypes.Container_Product_Order_Receipt{}, fmt.Errorf("Error generating order template: %s", err) + } + template.Prices = append(template.Prices, price) + } + + if d.Get("ipv6_static_enabled").(bool) { + if privateNetworkOnly { + return datatypes.Container_Product_Order_Receipt{}, fmt.Errorf("Unable to configure a public static IPv6 address with a private_network_only option") + } + price, err := getItemPriceId(items, "static_ipv6_addresses", "64_BLOCK_STATIC_PUBLIC_IPV6_ADDRESSES") + if err != nil { + return datatypes.Container_Product_Order_Receipt{}, fmt.Errorf("Error generating order template: %s", err) + } + template.Prices = append(template.Prices, price) + } + + // Add optional price ids. + // Add public bandwidth limited + if publicBandwidth, ok := d.GetOk("public_bandwidth_limited"); ok { + if *opts.HourlyBillingFlag { + return datatypes.Container_Product_Order_Receipt{}, fmt.Errorf("Unable to configure a public bandwidth with a hourly_billing true") + } + // Remove Default bandwidth price + prices := make([]datatypes.Product_Item_Price, len(template.Prices)) + i := 0 + for _, p := range template.Prices { + item := p.Item + if item != nil { + if strings.Contains(*item.Description, "Bandwidth") { + continue + } + } + prices[i] = p + i++ + } + template.Prices = prices[:i] + keyName := "BANDWIDTH_" + strconv.Itoa(publicBandwidth.(int)) + "_GB" + price, err := getItemPriceId(items, "bandwidth", keyName) + if err != nil { + return datatypes.Container_Product_Order_Receipt{}, fmt.Errorf("Error generating order template: %s", err) + } + template.Prices = append(template.Prices, price) + } + + // Add public bandwidth unlimited + publicUnlimitedBandwidth := d.Get("public_bandwidth_unlimited").(bool) + if publicUnlimitedBandwidth { + if *opts.HourlyBillingFlag { + return datatypes.Container_Product_Order_Receipt{}, fmt.Errorf("Unable to configure a public bandwidth with a hourly_billing true") + } + networkSpeed := d.Get("network_speed").(int) + if networkSpeed != 100 { + return datatypes.Container_Product_Order_Receipt{}, fmt.Errorf("Network speed must be 100 Mbps to configure public bandwidth unlimited") + } + // Remove Default bandwidth price + prices := make([]datatypes.Product_Item_Price, len(template.Prices)) + i := 0 + for _, p := range template.Prices { + item := p.Item + if item != nil { + if strings.Contains(*item.Description, "Bandwidth") { + continue + } + } + prices[i] = p + i++ + } + template.Prices = prices[:i] + price, err := getItemPriceId(items, "bandwidth", "BANDWIDTH_UNLIMITED_100_MBPS_UPLINK") + if err != nil { + return datatypes.Container_Product_Order_Receipt{}, fmt.Errorf("Error generating order template: %s", err) + } + template.Prices = append(template.Prices, price) + } + + if evault, ok := d.GetOk("evault"); ok { + if *opts.HourlyBillingFlag { + return datatypes.Container_Product_Order_Receipt{}, fmt.Errorf("Unable to configure a evault with hourly_billing true") + } + + keyName := "EVAULT_" + strconv.Itoa(evault.(int)) + "_GB" + price, err := getItemPriceId(items, "evault", keyName) + if err != nil { + return datatypes.Container_Product_Order_Receipt{}, fmt.Errorf("Error generating order template: %s", err) + } + template.Prices = append(template.Prices, price) + } + // GenerateOrderTemplate omits UserData, subnet, and maxSpeed, so configure virtual_guest. + template.VirtualGuests[0] = opts + guestOrders = append(guestOrders, template) + + } + order := &datatypes.Container_Product_Order{ + OrderContainers: guestOrders, + } + + orderService := services.GetProductOrderService(sess.SetRetries(0)) + receipt, err1 := orderService.PlaceOrder(order, sl.Bool(false)) + return receipt, err1 + +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_container_addons.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_container_addons.go new file mode 100644 index 00000000000..ed9f967551d --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_container_addons.go @@ -0,0 +1,468 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "bytes" + "fmt" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + v1 "github.com/IBM-Cloud/bluemix-go/api/container/containerv1" + "github.com/IBM-Cloud/bluemix-go/bmxerror" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/internal/hashcode" +) + +func resourceIBMContainerAddOns() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMContainerAddOnsCreate, + Read: resourceIBMContainerAddOnsRead, + Update: resourceIBMContainerAddOnsUpdate, + Delete: resourceIBMContainerAddOnsDelete, + Exists: resourceIBMContainerAddOnsExists, + Importer: &schema.ResourceImporter{}, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "cluster": { + Type: schema.TypeString, + Required: true, + Description: "Cluster Name or ID", + }, + "resource_group_id": { + Type: schema.TypeString, + Optional: true, + Description: "ID of the resource group.", + ForceNew: true, + Computed: true, + }, + "addons": { + Type: schema.TypeSet, + Required: true, + Set: resourceIBMContainerAddonsHash, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: "The addon name such as 'istio'.", + }, + "version": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: false, + Description: "The addon version, omit the version if you wish to use the default version.", + }, + "allowed_upgrade_versions": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "The versions that the addon can be upgraded to", + }, + "deprecated": { + Type: schema.TypeBool, + Computed: true, + Description: "Determines if this addon version is deprecated", + }, + "health_state": { + Type: schema.TypeString, + Computed: true, + Description: "The health state for this addon, a short indication (e.g. critical, pending)", + }, + "health_status": { + Type: schema.TypeString, + Computed: true, + Description: "The health status for this addon, provides a description of the state (e.g. error message)", + }, + "min_kube_version": { + Type: schema.TypeString, + Computed: true, + Description: "The minimum kubernetes version for this addon.", + }, + "min_ocp_version": { + Type: schema.TypeString, + Computed: true, + Description: "The minimum OpenShift version for this addon.", + }, + "supported_kube_range": { + Type: schema.TypeString, + Computed: true, + Description: "The supported kubernetes version range for this addon.", + }, + "target_version": { + Type: schema.TypeString, + Computed: true, + Description: "The addon target version.", + }, + "vlan_spanning_required": { + Type: schema.TypeBool, + Computed: true, + Description: "VLAN spanning required for multi-zone clusters", + }, + }, + }, + }, + }, + } +} +func resourceIBMContainerAddOnsCreate(d *schema.ResourceData, meta interface{}) error { + csClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return err + } + addOnAPI := csClient.AddOns() + + targetEnv, err := getClusterTargetHeader(d, meta) + if err != nil { + return err + } + cluster := d.Get("cluster").(string) + existingAddons, err := addOnAPI.GetAddons(cluster, targetEnv) + if err != nil { + fmt.Println("[ WARN ] Error getting Addons.") + } + + payload, err := expandAddOns(d, meta, cluster, targetEnv, existingAddons) + if err != nil { + return fmt.Errorf("Error in getting addons from expandAddOns %s", err) + } + payload.Enable = true + _, err = addOnAPI.ConfigureAddons(cluster, &payload, targetEnv) + if err != nil { + return err + } + _, err = waitForContainerAddOns(d, meta, cluster, schema.TimeoutCreate) + if err != nil { + return fmt.Errorf( + "Error waiting for Enabling Addon (%s) : %s", d.Id(), err) + } + d.SetId(cluster) + + return resourceIBMContainerAddOnsRead(d, meta) +} +func expandAddOns(d *schema.ResourceData, meta interface{}, cluster string, targetEnv v1.ClusterTargetHeader, existingAddons []v1.AddOn) (addOns v1.ConfigureAddOns, err error) { + addOnSet := d.Get("addons").(*schema.Set).List() + if existingAddons == nil || len(existingAddons) < 1 { + for _, aoSet := range addOnSet { + ao, _ := aoSet.(map[string]interface{}) + addOn := v1.AddOn{ + Name: ao["name"].(string), + } + if ao["version"] != nil { + addOn.Version = ao["version"].(string) + } + addOns.AddonsList = append(addOns.AddonsList, addOn) + } + } + if existingAddons != nil && len(existingAddons) > 0 { + for _, aoSet := range addOnSet { + ao, _ := aoSet.(map[string]interface{}) + exist := false + for _, existAddon := range existingAddons { + if existAddon.Name == ao["name"].(string) { + exist = true + if existAddon.Version != ao["version"].(string) { + err := updateAddOnVersion(d, meta, ao, cluster, targetEnv) + if err != nil { + return addOns, err + } + } + } + } + if !exist { + addOn := v1.AddOn{ + Name: ao["name"].(string), + } + if ao["version"] != nil { + addOn.Version = ao["version"].(string) + } + addOns.AddonsList = append(addOns.AddonsList, addOn) + } + } + } + + return addOns, nil +} +func updateAddOnVersion(d *schema.ResourceData, meta interface{}, u map[string]interface{}, cluster string, targetEnv v1.ClusterTargetHeader) error { + csClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return err + } + addOnAPI := csClient.AddOns() + + update := v1.AddOn{ + Name: u["name"].(string), + } + if u["version"].(string) != "" { + update.Version = u["version"].(string) + } + updateList := v1.ConfigureAddOns{} + updateList.AddonsList = append(updateList.AddonsList, update) + updateList.Update = true + _, err = addOnAPI.ConfigureAddons(cluster, &updateList, targetEnv) + if err != nil { + return err + } + if !d.IsNewResource() { + _, err = waitForContainerAddOns(d, meta, cluster, schema.TimeoutUpdate) + if err != nil { + return fmt.Errorf( + "Error waiting for Updating Addon (%s) : %s", d.Id(), err) + } + } + + return nil +} +func resourceIBMContainerAddOnsRead(d *schema.ResourceData, meta interface{}) error { + csClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return err + } + addOnAPI := csClient.AddOns() + + targetEnv, err := getClusterTargetHeader(d, meta) + if err != nil { + return err + } + cluster := d.Id() + + result, err := addOnAPI.GetAddons(cluster, targetEnv) + if err != nil { + return err + } + d.Set("cluster", cluster) + addOns, err := flattenAddOns(result) + if err != nil { + fmt.Printf("Error Flattening Addons list %s", err) + } + d.Set("resource_group_id", targetEnv.ResourceGroup) + d.Set("addons", addOns) + return nil +} +func flattenAddOns(result []v1.AddOn) (resp *schema.Set, err error) { + addOns := []interface{}{} + for _, addOn := range result { + record := map[string]interface{}{} + record["name"] = addOn.Name + record["version"] = addOn.Version + if len(addOn.AllowedUpgradeVersion) > 0 { + record["allowed_upgrade_versions"] = addOn.AllowedUpgradeVersion + } + if &addOn.Deprecated != nil { + record["deprecated"] = addOn.Deprecated + } + if &addOn.HealthState != nil { + record["health_state"] = addOn.HealthState + } + if &addOn.HealthStatus != nil { + record["health_status"] = addOn.HealthStatus + } + if addOn.MinKubeVersion != "" { + record["min_kube_version"] = addOn.MinKubeVersion + } + if addOn.MinOCPVersion != "" { + record["min_ocp_version"] = addOn.MinOCPVersion + } + if addOn.SupportedKubeRange != "" { + record["supported_kube_range"] = addOn.SupportedKubeRange + } + if addOn.TargetVersion != "" { + record["target_version"] = addOn.TargetVersion + } + if &addOn.VlanSpanningRequired != nil { + record["vlan_spanning_required"] = addOn.VlanSpanningRequired + } + addOns = append(addOns, record) + } + + return schema.NewSet(resourceIBMContainerAddonsHash, addOns), nil +} +func resourceIBMContainerAddOnsUpdate(d *schema.ResourceData, meta interface{}) error { + csClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return err + } + addOnAPI := csClient.AddOns() + + targetEnv, err := getClusterTargetHeader(d, meta) + if err != nil { + return err + } + cluster := d.Id() + + if d.HasChange("addons") && !d.IsNewResource() { + oldList, newList := d.GetChange("addons") + if oldList == nil { + oldList = new(schema.Set) + } + if newList == nil { + newList = new(schema.Set) + } + os := oldList.(*schema.Set) + ns := newList.(*schema.Set) + for _, nA := range ns.List() { + newPack := nA.(map[string]interface{}) + for _, oA := range os.List() { + oldPack := oA.(map[string]interface{}) + if (strings.Compare(newPack["name"].(string), oldPack["name"].(string)) == 0) && (strings.Compare(newPack["version"].(string), oldPack["version"].(string)) != 0) && (newPack["version"].(string) != "") { + err := updateAddOnVersion(d, meta, newPack, cluster, targetEnv) + if err != nil { + return err + } + ns.Remove(nA) + os.Remove(oA) + } + } + } + remove := os.Difference(ns).List() + add := ns.Difference(os).List() + + if len(add) > 0 { + addOnParams := v1.ConfigureAddOns{} + for _, addon := range add { + newAddon := addon.(map[string]interface{}) + addOnParam := v1.AddOn{ + Name: newAddon["name"].(string), + } + if newAddon["version"] != nil { + addOnParam.Version = newAddon["version"].(string) + } + addOnParams.AddonsList = append(addOnParams.AddonsList, addOnParam) + + } + addOnParams.Enable = true + _, err = addOnAPI.ConfigureAddons(cluster, &addOnParams, targetEnv) + if err != nil { + return err + } + _, err = waitForContainerAddOns(d, meta, cluster, schema.TimeoutCreate) + if err != nil { + return fmt.Errorf( + "Error waiting for Enabling Addon (%s) : %s", d.Id(), err) + } + } + if len(remove) > 0 { + addOnParams := v1.ConfigureAddOns{} + for _, addOn := range remove { + oldAddOn := addOn.(map[string]interface{}) + addOnParam := v1.AddOn{ + Name: oldAddOn["name"].(string), + } + if oldAddOn["version"] != nil { + addOnParam.Version = oldAddOn["version"].(string) + } + addOnParams.AddonsList = append(addOnParams.AddonsList, addOnParam) + } + addOnParams.Enable = false + _, err = addOnAPI.ConfigureAddons(cluster, &addOnParams, targetEnv) + if err != nil { + return err + } + } + } + + return resourceIBMContainerAddOnsRead(d, meta) +} +func resourceIBMContainerAddOnsDelete(d *schema.ResourceData, meta interface{}) error { + csClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return err + } + addOnAPI := csClient.AddOns() + + targetEnv, err := getClusterTargetHeader(d, meta) + if err != nil { + return err + } + cluster := d.Id() + payload, err := expandAddOns(d, meta, cluster, targetEnv, nil) + if err != nil { + return fmt.Errorf("Error in getting addons from expandAddOns %s", err) + } + + payload.Enable = false + _, err = addOnAPI.ConfigureAddons(cluster, &payload, targetEnv) + if err != nil { + return err + } + + return nil +} +func waitForContainerAddOns(d *schema.ResourceData, meta interface{}, cluster, timeout string) (interface{}, error) { + addOnClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return false, err + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"pending", "updating", ""}, + Target: []string{"normal", "warning", "critical", "available"}, + Refresh: func() (interface{}, string, error) { + targetEnv, err := getClusterTargetHeader(d, meta) + if err != nil { + return nil, "", err + } + addOns, err := addOnClient.AddOns().GetAddons(cluster, targetEnv) + if err != nil { + if apiErr, ok := err.(bmxerror.RequestFailure); ok && apiErr.StatusCode() == 404 { + return nil, "", fmt.Errorf("The resource addons %s does not exist anymore: %v", d.Id(), err) + } + return nil, "", err + } + for _, addOn := range addOns { + if addOn.HealthState == "pending" || addOn.HealthState == "updating" || addOn.HealthState == "" { + return addOns, addOn.HealthState, nil + } + } + return addOns, "available", nil + }, + Timeout: d.Timeout(timeout), + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} +func resourceIBMContainerAddOnsExists(d *schema.ResourceData, meta interface{}) (bool, error) { + + csClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return false, err + } + addOnAPI := csClient.AddOns() + + targetEnv, err := getClusterTargetHeader(d, meta) + if err != nil { + return false, err + } + cluster := d.Id() + + _, err = addOnAPI.GetAddons(cluster, targetEnv) + if err != nil { + if apiErr, ok := err.(bmxerror.RequestFailure); ok { + if apiErr.StatusCode() == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s", err) + } + + return true, nil +} + +func resourceIBMContainerAddonsHash(v interface{}) int { + var buf bytes.Buffer + a := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", a["name"].(string))) + buf.WriteString(fmt.Sprintf("%s-", a["version"].(string))) + + return hashcode.String(buf.String()) +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_container_alb.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_container_alb.go new file mode 100644 index 00000000000..59aeecd7d43 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_container_alb.go @@ -0,0 +1,311 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + v1 "github.com/IBM-Cloud/bluemix-go/api/container/containerv1" + "github.com/IBM-Cloud/bluemix-go/bmxerror" +) + +func resourceIBMContainerALB() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMContainerALBCreate, + Read: resourceIBMContainerALBRead, + Update: resourceIBMContainerALBUpdate, + Delete: resourceIBMContainerALBDelete, + Importer: &schema.ResourceImporter{}, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(60 * time.Minute), + Update: schema.DefaultTimeout(60 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "alb_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "ALB ID", + }, + "alb_type": { + Type: schema.TypeString, + Computed: true, + Description: "ALB type", + }, + "cluster": { + Type: schema.TypeString, + Computed: true, + Description: "Cluster id", + }, + "user_ip": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: "IP assigned by the user", + }, + "enable": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + ConflictsWith: []string{"disable_deployment"}, + Description: "set to true if ALB needs to be enabled", + }, + "disable_deployment": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: []string{"enable"}, + Description: "Set to true if ALB needs to be disabled", + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: "ALB name", + }, + "zone": { + Type: schema.TypeString, + Computed: true, + Description: "ALB zone", + }, + "region": { + Type: schema.TypeString, + Optional: true, + Deprecated: "This field is deprecated", + }, + }, + } +} + +func resourceIBMContainerALBCreate(d *schema.ResourceData, meta interface{}) error { + albClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return err + } + var userIP string + var enable, disableDeployment bool + albID := d.Get("alb_id").(string) + if v, ok := d.GetOkExists("enable"); ok { + enable = v.(bool) + } else if v, ok := d.GetOkExists("disable_deployment"); ok { + disableDeployment = v.(bool) + } else { + return fmt.Errorf("Provide either `enable` or `disable_deployment`") + } + + numOfInstances := "2" + if v, ok := d.GetOk("user_ip"); ok { + userIP = v.(string) + } + params := v1.ALBConfig{ + ALBID: albID, + Enable: enable, + NumOfInstances: numOfInstances, + } + if userIP != "" { + params.ALBIP = userIP + } + + _, err = waitForClusterAvailable(d, meta, albID) + if err != nil { + return fmt.Errorf( + "Error waiting for cluster resources availabilty (%s) : %s", d.Id(), err) + } + + albAPI := albClient.Albs() + targetEnv, err := getAlbTargetHeader(d, meta) + if err != nil { + return err + } + err = albAPI.ConfigureALB(albID, params, disableDeployment, targetEnv) + if err != nil { + return err + } + d.SetId(albID) + _, err = waitForContainerALB(d, meta, albID, schema.TimeoutCreate, enable, disableDeployment) + if err != nil { + return fmt.Errorf( + "Error waiting for create resource alb (%s) : %s", d.Id(), err) + } + + return resourceIBMContainerALBRead(d, meta) +} + +func resourceIBMContainerALBRead(d *schema.ResourceData, meta interface{}) error { + albClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return err + } + + albID := d.Id() + + albAPI := albClient.Albs() + targetEnv, err := getAlbTargetHeader(d, meta) + if err != nil { + return err + } + albConfig, err := albAPI.GetALB(albID, targetEnv) + if err != nil { + return err + } + + d.Set("alb_type", albConfig.ALBType) + d.Set("cluster", albConfig.ClusterID) + d.Set("name", albConfig.Name) + d.Set("enable", albConfig.Enable) + d.Set("disable_deployment", albConfig.DisableDeployment) + d.Set("replicas", albConfig.NumOfInstances) + d.Set("resize", albConfig.Resize) + d.Set("user_ip", albConfig.ALBIP) + d.Set("zone", albConfig.Zone) + return nil +} + +func resourceIBMContainerALBUpdate(d *schema.ResourceData, meta interface{}) error { + albClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return err + } + albAPI := albClient.Albs() + + if d.HasChange("enable") { + enable := d.Get("enable").(bool) + disableDeployment := d.Get("disable_deployment").(bool) + albID := d.Id() + params := v1.ALBConfig{ + ALBID: albID, + Enable: enable, + } + + targetEnv, err := getAlbTargetHeader(d, meta) + if err != nil { + return err + } + + _, err = waitForClusterAvailable(d, meta, albID) + if err != nil { + return fmt.Errorf( + "Error waiting for cluster resources availabilty (%s) : %s", d.Id(), err) + } + + err = albAPI.ConfigureALB(albID, params, disableDeployment, targetEnv) + if err != nil { + return err + } + _, err = waitForContainerALB(d, meta, albID, schema.TimeoutUpdate, enable, disableDeployment) + if err != nil { + return fmt.Errorf( + "Error waiting for updating resource alb (%s) : %s", d.Id(), err) + } + + } + return resourceIBMContainerALBRead(d, meta) +} + +func waitForContainerALB(d *schema.ResourceData, meta interface{}, albID, timeout string, enable, disableDeployment bool) (interface{}, error) { + albClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return false, err + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"pending"}, + Target: []string{"active"}, + Refresh: func() (interface{}, string, error) { + targetEnv, err := getAlbTargetHeader(d, meta) + if err != nil { + return nil, "", err + } + alb, err := albClient.Albs().GetALB(albID, targetEnv) + if err != nil { + if apiErr, ok := err.(bmxerror.RequestFailure); ok && apiErr.StatusCode() == 404 { + return nil, "", fmt.Errorf("The resource alb %s does not exist anymore: %v", d.Id(), err) + } + return nil, "", err + } + if enable { + if alb.Enable == false { + return alb, "pending", nil + } + } else if disableDeployment { + if alb.Enable == true { + return alb, "pending", nil + } + } + return alb, "active", nil + }, + Timeout: d.Timeout(timeout), + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func resourceIBMContainerALBDelete(d *schema.ResourceData, meta interface{}) error { + d.SetId("") + + return nil +} + +// WaitForWorkerAvailable Waits for worker creation +func waitForClusterAvailable(d *schema.ResourceData, meta interface{}, albID string) (interface{}, error) { + csClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return nil, err + } + + target, err := getAlbTargetHeader(d, meta) + if err != nil { + return nil, err + } + + albConfig, err := csClient.Albs().GetALB(albID, target) + if err != nil { + return nil, err + } + + ClusterID := albConfig.ClusterID + + log.Printf("Waiting for worker of the cluster (%s) wokers to be available.", ClusterID) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", workerProvisioning}, + Target: []string{workerNormal}, + Refresh: workerStateRefreshFunc(csClient.Workers(), ClusterID, target), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} +func getAlbTargetHeader(d *schema.ResourceData, meta interface{}) (v1.ClusterTargetHeader, error) { + var region string + if v, ok := d.GetOk("region"); ok { + region = v.(string) + } + + sess, err := meta.(ClientSession).BluemixSession() + if err != nil { + return v1.ClusterTargetHeader{}, err + } + + if region == "" { + region = sess.Config.Region + } + + targetEnv := v1.ClusterTargetHeader{ + Region: region, + } + + return targetEnv, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_container_alb_cert.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_container_alb_cert.go new file mode 100644 index 00000000000..3e51caff48c --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_container_alb_cert.go @@ -0,0 +1,370 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + v2 "github.com/IBM-Cloud/bluemix-go/api/container/containerv2" + "github.com/IBM-Cloud/bluemix-go/bmxerror" +) + +func resourceIBMContainerALBCert() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMContainerALBCertCreate, + Read: resourceIBMContainerALBCertRead, + Update: resourceIBMContainerALBCertUpdate, + Delete: resourceIBMContainerALBCertDelete, + Exists: resourceIBMContainerALBCertExists, + Importer: &schema.ResourceImporter{}, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "cert_crn": { + Type: schema.TypeString, + Required: true, + ForceNew: false, + Description: "Certificate CRN id", + }, + "cluster_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Cluster ID", + }, + "secret_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Secret name", + }, + "namespace": { + Type: schema.TypeString, + Optional: true, + Default: "ibm-cert-store", + ForceNew: true, + Description: "Namespace of the secret", + }, + "persistence": { + Type: schema.TypeBool, + Optional: true, + Description: "Persistence of secret", + }, + "domain_name": { + Type: schema.TypeString, + Computed: true, + Description: "Domain name", + }, + "expires_on": { + Type: schema.TypeString, + Computed: true, + Description: "Certificate expaire on date", + }, + "issuer_name": { + Type: schema.TypeString, + Computed: true, + Description: "certificate issuer name", + Deprecated: "This field is depricated and is not available in v2 version of ingress api", + }, + "status": { + Type: schema.TypeString, + Computed: true, + Description: "Secret Status", + }, + "cloud_cert_instance_id": { + Type: schema.TypeString, + Computed: true, + Description: "cloud cert instance ID", + }, + "region": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Deprecated: "This field is deprecated", + Description: "region name", + }, + }, + } +} + +func resourceIBMContainerALBCertCreate(d *schema.ResourceData, meta interface{}) error { + ingressClient, err := meta.(ClientSession).VpcContainerAPI() + if err != nil { + return err + } + + certCRN := d.Get("cert_crn").(string) + cluster := d.Get("cluster_id").(string) + secretName := d.Get("secret_name").(string) + namespace := d.Get("namespace").(string) + + params := v2.SecretCreateConfig{ + CRN: certCRN, + Cluster: cluster, + Name: secretName, + Namespace: namespace, + } + // params.State = "update_false" + if v, ok := d.GetOk("persistence"); ok { + params.Persistence = v.(bool) + } + + ingressAPI := ingressClient.Ingresses() + response, err := ingressAPI.CreateIngressSecret(params) + + if err != nil { + return err + } + d.SetId(fmt.Sprintf("%s/%s/%s", cluster, secretName, response.Namespace)) + _, err = waitForContainerALBCert(d, meta, schema.TimeoutCreate) + if err != nil { + return fmt.Errorf( + "Error waiting for create resource alb cert (%s) : %s", d.Id(), err) + } + + return resourceIBMContainerALBCertRead(d, meta) +} + +func resourceIBMContainerALBCertRead(d *schema.ResourceData, meta interface{}) error { + ingressClient, err := meta.(ClientSession).VpcContainerAPI() + if err != nil { + return err + } + parts, err := idParts(d.Id()) + if err != nil { + return err + } + clusterID := parts[0] + secretName := parts[1] + namespace := "ibm-cert-store" + if len(parts) > 2 && len(parts[2]) > 0 { + namespace = parts[2] + } + + ingressAPI := ingressClient.Ingresses() + ingressSecretConfig, err := ingressAPI.GetIngressSecret(clusterID, secretName, namespace) + if err != nil { + return err + } + d.SetId(fmt.Sprintf("%s/%s/%s", clusterID, secretName, namespace)) + d.Set("cluster_id", ingressSecretConfig.Cluster) + d.Set("secret_name", ingressSecretConfig.Name) + d.Set("namespace", ingressSecretConfig.Namespace) + d.Set("cert_crn", ingressSecretConfig.CRN) + instancecrn := strings.Split(ingressSecretConfig.CRN, ":certificate:") + d.Set("cloud_cert_instance_id", fmt.Sprintf("%s::", instancecrn[0])) + d.Set("domain_name", ingressSecretConfig.Domain) + d.Set("expires_on", ingressSecretConfig.ExpiresOn) + d.Set("status", ingressSecretConfig.Status) + d.Set("persistence", ingressSecretConfig.Persistence) + + return nil +} + +func resourceIBMContainerALBCertDelete(d *schema.ResourceData, meta interface{}) error { + ingressClient, err := meta.(ClientSession).VpcContainerAPI() + if err != nil { + return err + } + + ingressAPI := ingressClient.Ingresses() + + parts, err := idParts(d.Id()) + if err != nil { + return err + } + clusterID := parts[0] + secretName := parts[1] + namespace := "ibm-cert-store" + if len(parts) > 2 && len(parts[2]) > 0 { + namespace = parts[2] + } + params := v2.SecretDeleteConfig{ + Cluster: clusterID, + Name: secretName, + Namespace: namespace, + } + + err = ingressAPI.DeleteIngressSecret(params) + if err != nil { + return err + } + _, albCertDeletionError := waitForALBCertDelete(d, meta, schema.TimeoutDelete) + if albCertDeletionError != nil { + return albCertDeletionError + } + d.SetId("") + return nil +} + +func waitForALBCertDelete(d *schema.ResourceData, meta interface{}, timeout string) (interface{}, error) { + ingressClient, err := meta.(ClientSession).VpcContainerAPI() + if err != nil { + return false, err + } + parts, err := idParts(d.Id()) + if err != nil { + return false, err + } + clusterID := parts[0] + secretName := parts[1] + namespace := "ibm-cert-store" + if len(parts) > 2 && len(parts[2]) > 0 { + namespace = parts[2] + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"deleting"}, + Target: []string{"deleted"}, + Refresh: func() (interface{}, string, error) { + + secret, err := ingressClient.Ingresses().GetIngressSecret(clusterID, secretName, namespace) + if err != nil { + if apiErr, ok := err.(bmxerror.RequestFailure); ok && apiErr.StatusCode() == 404 { + return secret, "deleted", nil + } + return nil, "", err + } + if secret.Status != "deleted" { + return secret, "deleting", nil + } + return secret, "deleted", nil + }, + Timeout: d.Timeout(timeout), + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func resourceIBMContainerALBCertUpdate(d *schema.ResourceData, meta interface{}) error { + ingressClient, err := meta.(ClientSession).VpcContainerAPI() + if err != nil { + return err + } + parts, err := idParts(d.Id()) + if err != nil { + return err + } + cluster := parts[0] + secretName := parts[1] + namespace := "ibm-cert-store" + if len(parts) > 2 && len(parts[2]) > 0 { + namespace = parts[2] + } + + if d.HasChange("cert_crn") { + crn := d.Get("cert_crn").(string) + params := v2.SecretUpdateConfig{ + CRN: crn, + Cluster: cluster, + Name: secretName, + Namespace: namespace, + } + // params.State = "update_true" + + ingressAPI := ingressClient.Ingresses() + _, err = ingressAPI.UpdateIngressSecret(params) + if err != nil { + return err + } + + _, err = waitForContainerALBCert(d, meta, schema.TimeoutUpdate) + if err != nil { + return fmt.Errorf( + "Error waiting for updating resource alb cert (%s) : %s", d.Id(), err) + } + } + return resourceIBMContainerALBCertRead(d, meta) +} + +func resourceIBMContainerALBCertExists(d *schema.ResourceData, meta interface{}) (bool, error) { + ingressClient, err := meta.(ClientSession).VpcContainerAPI() + if err != nil { + return false, err + } + + parts, err := idParts(d.Id()) + if err != nil { + return false, err + } + clusterID := parts[0] + secretName := parts[1] + namespace := "ibm-cert-store" + if len(parts) > 2 && len(parts[2]) > 0 { + namespace = parts[2] + } + + ingressAPI := ingressClient.Ingresses() + ingressSecretConfig, err := ingressAPI.GetIngressSecret(clusterID, secretName, namespace) + + if err != nil { + if apiErr, ok := err.(bmxerror.RequestFailure); ok { + if apiErr.StatusCode() == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s", err) + } + + return ingressSecretConfig.Cluster == clusterID && ingressSecretConfig.Name == secretName, nil +} + +func waitForContainerALBCert(d *schema.ResourceData, meta interface{}, timeout string) (interface{}, error) { + ingressClient, err := meta.(ClientSession).VpcContainerAPI() + if err != nil { + return false, err + } + parts, err := idParts(d.Id()) + if err != nil { + return false, err + } + clusterID := parts[0] + secretName := parts[1] + namespace := "ibm-cert-store" + if len(parts) > 2 && len(parts[2]) > 0 { + namespace = parts[2] + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"creating"}, + Target: []string{"done"}, + Refresh: func() (interface{}, string, error) { + + alb, err := ingressClient.Ingresses().GetIngressSecret(clusterID, secretName, namespace) + if err != nil { + if apiErr, ok := err.(bmxerror.RequestFailure); ok && apiErr.StatusCode() == 404 { + return alb, "creating", nil + } + return nil, "", err + } + if alb.Status != "created" { + if strings.Contains(alb.Status, "failed") { + return alb, "failed", fmt.Errorf("The resource alb cert %s failed: %v", d.Id(), err) + } + + if alb.Status == "updated" { + return alb, "done", nil + } + return alb, "creating", nil + } + return alb, "done", nil + }, + Timeout: d.Timeout(timeout), + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_container_api_key_reset.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_container_api_key_reset.go new file mode 100644 index 00000000000..d604b5cbd21 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_container_api_key_reset.go @@ -0,0 +1,79 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func resourceIBMContainerAPIKeyReset() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMContainerAPIKeyResetUpdate, + Read: resourceIBMContainerAPIKeyResetRead, + Update: resourceIBMContainerAPIKeyResetUpdate, + Delete: resourceIBMContainerAPIKeyResetdelete, + + Schema: map[string]*schema.Schema{ + "region": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Region which api key has to be reset", + }, + "resource_group_id": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Description: "ID of Resource Group", + }, + "reset_api_key": { + Type: schema.TypeInt, + Optional: true, + Description: "Determines if apikey has to be reset or not", + Default: 1, + }, + }, + } +} + +func resourceIBMContainerAPIKeyResetUpdate(d *schema.ResourceData, meta interface{}) error { + + if d.HasChange("reset_api_key") { + apikeyClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return err + } + apikeyAPI := apikeyClient.Apikeys() + region := d.Get("region").(string) + targetEnv, err := getClusterTargetHeader(d, meta) + if err != nil { + return err + } + targetEnv.Region = region + err = apikeyAPI.ResetApiKey(targetEnv) + if err != nil { + return err + } + if targetEnv.ResourceGroup == "" { + defaultRg, err := defaultResourceGroup(meta) + if err != nil { + return err + } + targetEnv.ResourceGroup = defaultRg + } + + d.SetId(fmt.Sprintf("%s/%s", region, targetEnv.ResourceGroup)) + } + + return nil +} +func resourceIBMContainerAPIKeyResetRead(d *schema.ResourceData, meta interface{}) error { + return nil +} +func resourceIBMContainerAPIKeyResetdelete(d *schema.ResourceData, meta interface{}) error { + d.SetId("") + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_container_bind_service.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_container_bind_service.go new file mode 100644 index 00000000000..7cb555e763d --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_container_bind_service.go @@ -0,0 +1,236 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + v1 "github.com/IBM-Cloud/bluemix-go/api/container/containerv1" +) + +func resourceIBMContainerBindService() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMContainerBindServiceCreate, + Read: resourceIBMContainerBindServiceRead, + Update: resourceIBMContainerBindServiceUpdate, + Delete: resourceIBMContainerBindServiceDelete, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "cluster_name_id": { + Type: schema.TypeString, + ForceNew: true, + Required: true, + Description: "Cluster name or ID", + }, + "service_instance_name": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Computed: true, + ConflictsWith: []string{"service_instance_id"}, + Description: "serivice instance name", + }, + "service_instance_id": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Computed: true, + ConflictsWith: []string{"service_instance_name"}, + Description: "Service instance ID", + }, + "namespace_id": { + Type: schema.TypeString, + ForceNew: true, + Required: true, + Description: "namespace ID", + }, + + "org_guid": { + Description: "The bluemix organization guid this cluster belongs to", + Type: schema.TypeString, + Optional: true, + Deprecated: "This field is deprecated", + }, + "space_guid": { + Description: "The bluemix space guid this cluster belongs to", + Type: schema.TypeString, + Optional: true, + Deprecated: "This field is deprecated", + }, + "account_guid": { + Description: "The bluemix account guid this cluster belongs to", + Type: schema.TypeString, + Optional: true, + Deprecated: "This field is deprecated", + }, + "key": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Key info", + }, + "role": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Role info", + }, + "region": { + Type: schema.TypeString, + Optional: true, + Description: "The cluster region", + Deprecated: "This field is deprecated", + }, + "resource_group_id": { + Type: schema.TypeString, + Optional: true, + Description: "ID of the resource group.", + ForceNew: true, + DiffSuppressFunc: applyOnce, + }, + "tags": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: "List of tags for the resource", + }, + }, + } +} + +func getClusterTargetHeader(d *schema.ResourceData, meta interface{}) (v1.ClusterTargetHeader, error) { + _, err := meta.(ClientSession).BluemixSession() + if err != nil { + return v1.ClusterTargetHeader{}, err + } + + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return v1.ClusterTargetHeader{}, err + } + accountID := userDetails.userAccount + + targetEnv := v1.ClusterTargetHeader{ + AccountID: accountID, + } + + resourceGroup := "" + if v, ok := d.GetOk("resource_group_id"); ok { + resourceGroup = v.(string) + targetEnv.ResourceGroup = resourceGroup + } + return targetEnv, nil +} + +func resourceIBMContainerBindServiceCreate(d *schema.ResourceData, meta interface{}) error { + csClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return err + } + clusterNameID := d.Get("cluster_name_id").(string) + namespaceID := d.Get("namespace_id").(string) + var serviceInstanceNameID string + if serviceInstanceName, ok := d.GetOk("service_instance_name"); ok { + serviceInstanceNameID = serviceInstanceName.(string) + } else if serviceInstanceID, ok := d.GetOk("service_instance_id"); ok { + serviceInstanceNameID = serviceInstanceID.(string) + } else { + return fmt.Errorf("Please set either service_instance_name or service_instance_id") + } + + bindService := v1.ServiceBindRequest{ + ClusterNameOrID: clusterNameID, + ServiceInstanceNameOrID: serviceInstanceNameID, + NamespaceID: namespaceID, + } + + if v, ok := d.GetOk("key"); ok { + bindService.ServiceKeyGUID = v.(string) + } + + if v, ok := d.GetOk("role"); ok { + bindService.Role = v.(string) + } + + targetEnv, err := getClusterTargetHeader(d, meta) + if err != nil { + return err + } + _, err = csClient.Clusters().BindService(bindService, targetEnv) + if err != nil { + return err + } + d.SetId(fmt.Sprintf("%s/%s/%s", clusterNameID, serviceInstanceNameID, namespaceID)) + + return resourceIBMContainerBindServiceRead(d, meta) +} + +func resourceIBMContainerBindServiceUpdate(d *schema.ResourceData, meta interface{}) error { + return nil +} + +func resourceIBMContainerBindServiceRead(d *schema.ResourceData, meta interface{}) error { + csClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return err + } + parts, err := idParts(d.Id()) + if err != nil { + return err + } + clusterNameID := parts[0] + serviceInstanceNameID := parts[1] + namespaceID := parts[2] + + targetEnv, err := getClusterTargetHeader(d, meta) + if err != nil { + return err + } + + boundService, err := csClient.Clusters().FindServiceBoundToCluster(clusterNameID, serviceInstanceNameID, namespaceID, targetEnv) + if err != nil { + return err + } + d.Set("namespace_id", boundService.Namespace) + + d.Set("service_instance_name", boundService.ServiceName) + d.Set("service_instance_id", boundService.ServiceID) + //d.Set(key, boundService.ServiceKeyName) + //d.Set(key, boundService.ServiceName) + return nil +} + +func resourceIBMContainerBindServiceDelete(d *schema.ResourceData, meta interface{}) error { + csClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return err + } + parts, err := idParts(d.Id()) + if err != nil { + return err + } + clusterNameID := parts[0] + serviceInstanceNameID := parts[1] + namespace := parts[2] + targetEnv, err := getClusterTargetHeader(d, meta) + if err != nil { + return err + } + + err = csClient.Clusters().UnBindService(clusterNameID, namespace, serviceInstanceNameID, targetEnv) + if err != nil { + return fmt.Errorf("Error unbinding service: %s", err) + } + return nil +} + +//Pure Aramda API not available, we can still find by using k8s api +/* +func resourceIBMContainerBindServiceExists(d *schema.ResourceData, meta interface{}) (bool, error) { + +}*/ diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_container_cluster.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_container_cluster.go new file mode 100644 index 00000000000..afcd01218dd --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_container_cluster.go @@ -0,0 +1,1631 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "os" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + + v1 "github.com/IBM-Cloud/bluemix-go/api/container/containerv1" + "github.com/IBM-Cloud/bluemix-go/bmxerror" +) + +const ( + clusterNormal = "normal" + clusterDeletePending = "deleting" + clusterDeleted = "deleted" + workerNormal = "normal" + subnetNormal = "normal" + workerReadyState = "Ready" + workerDeleteState = "deleted" + workerDeletePending = "deleting" + + versionUpdating = "updating" + clusterProvisioning = "provisioning" + workerProvisioning = "provisioning" + subnetProvisioning = "provisioning" + + hardwareShared = "shared" + hardwareDedicated = "dedicated" + isolationPublic = "public" + isolationPrivate = "private" + + defaultWorkerPool = "default" + computeWorkerPool = "compute" + gatewayWorkerpool = "gateway" +) + +const PUBLIC_SUBNET_TYPE = "public" + +func resourceIBMContainerCluster() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMContainerClusterCreate, + Read: resourceIBMContainerClusterRead, + Update: resourceIBMContainerClusterUpdate, + Delete: resourceIBMContainerClusterDelete, + Exists: resourceIBMContainerClusterExists, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(90 * time.Minute), + Delete: schema.DefaultTimeout(45 * time.Minute), + Update: schema.DefaultTimeout(45 * time.Minute), + }, + + CustomizeDiff: customdiff.Sequence( + func(diff *schema.ResourceDiff, v interface{}) error { + return resourceTagsCustomizeDiff(diff) + }, + ), + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The cluster name", + }, + "datacenter": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The datacenter where this cluster will be deployed", + }, + "region": { + Type: schema.TypeString, + Optional: true, + Deprecated: "This field is deprecated", + Computed: true, + Description: "The cluster region", + }, + + "kms_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: "Enables KMS on a given cluster ", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "instance_id": { + Type: schema.TypeString, + Required: true, + Description: "ID of the KMS instance to use to encrypt the cluster.", + }, + "crk_id": { + Type: schema.TypeString, + Required: true, + Description: "ID of the customer root key.", + }, + "private_endpoint": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Specify this option to use the KMS public service endpoint.", + }, + }, + }, + }, + + "worker_num": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "Number of worker nodes", + ValidateFunc: validateWorkerNum, + Deprecated: "This field is deprecated", + }, + + "default_pool_size": { + Type: schema.TypeInt, + Optional: true, + Default: 1, + Description: "The size of the default worker pool", + ValidateFunc: validateWorkerNum, + }, + + "labels": { + Type: schema.TypeMap, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "list of labels to the default worker pool", + }, + + "workers_info": { + Type: schema.TypeList, + Optional: true, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "version": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "pool_name": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + Description: "The IDs of the worker node", + }, + + "disk_encryption": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Default: true, + Description: "disc encryption done, if set to true.", + }, + + "kube_version": { + Type: schema.TypeString, + Computed: true, + Optional: true, + DiffSuppressFunc: func(k, o, n string, d *schema.ResourceData) bool { + if o == "" { + return false + } + new := strings.Split(n, ".") + old := strings.Split(o, ".") + + if strings.Compare(new[0]+"."+strings.Split(new[1], "_")[0], old[0]+"."+strings.Split(old[1], "_")[0]) == 0 { + return true + } + return false + }, + Description: "Kubernetes version info", + }, + + "patch_version": { + Type: schema.TypeString, + Optional: true, + Description: "Kubernetes patch version", + }, + + "retry_patch_version": { + Type: schema.TypeInt, + Optional: true, + Description: "Argument which helps to retry the patch version updates on worker nodes. Increment the value to retry the patch updates if the previous apply fails", + }, + + "update_all_workers": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Updates all the woker nodes if sets to true", + }, + + "machine_type": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Description: "Machine type", + }, + + "hardware": { + Type: schema.TypeString, + ForceNew: true, + Required: true, + ValidateFunc: validateAllowedStringValue([]string{hardwareShared, hardwareDedicated}), + Description: "Hardware type", + }, + + "billing": { + Type: schema.TypeString, + Optional: true, + Deprecated: "This field is deprecated", + DiffSuppressFunc: applyOnce, + }, + "public_vlan_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: nil, + DiffSuppressFunc: func(k, o, n string, d *schema.ResourceData) bool { + if o == "" { + return false + } + if o != "" && n == "" { + return true + } + return false + }, + Description: "Public VLAN ID", + }, + + "private_vlan_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: nil, + DiffSuppressFunc: func(k, o, n string, d *schema.ResourceData) bool { + if o == "" { + return false + } + if o != "" && n == "" { + return true + } + return false + }, + Description: "Private VLAN ID", + }, + "entitlement": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: applyOnce, + Description: "Entitlement option reduces additional OCP Licence cost in Openshift Clusters", + }, + + "wait_for_worker_update": { + Type: schema.TypeBool, + Optional: true, + Default: true, + Description: "Wait for worker node to update during kube version update.", + }, + "wait_till": { + Type: schema.TypeString, + Optional: true, + Default: ingressReady, + DiffSuppressFunc: applyOnce, + ValidateFunc: validation.StringInSlice([]string{masterNodeReady, oneWorkerNodeReady, ingressReady}, true), + Description: "wait_till can be configured for Master Ready, One worker Ready or Ingress Ready", + }, + "service_subnet": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Custom subnet CIDR to provide private IP addresses for services", + Computed: true, + }, + + "pod_subnet": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Custom subnet CIDR to provide private IP addresses for pods", + Computed: true, + }, + + "ingress_hostname": { + Type: schema.TypeString, + Computed: true, + }, + "ingress_secret": { + Type: schema.TypeString, + Computed: true, + Sensitive: true, + }, + "no_subnet": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Default: false, + DiffSuppressFunc: applyOnce, + Description: "Boolean value set to true when subnet creation is not required.", + }, + "is_trusted": { + Type: schema.TypeBool, + Optional: true, + Deprecated: "This field is deprecated", + DiffSuppressFunc: applyOnce, + }, + "server_url": { + Type: schema.TypeString, + Computed: true, + }, + + "subnet_id": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: "List of subnet IDs", + }, + "webhook": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "level": { + Type: schema.TypeString, + Required: true, + }, + "type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateAllowedStringValue([]string{"slack"}), + }, + "url": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + + "force_delete_storage": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Force the removal of a cluster and its persistent storage. Deleted data cannot be recovered", + }, + + "resource_group_id": { + Type: schema.TypeString, + Optional: true, + Description: "ID of the resource group.", + Computed: true, + DiffSuppressFunc: applyOnce, + }, + + "org_guid": { + Description: "The bluemix organization guid this cluster belongs to", + Type: schema.TypeString, + Optional: true, + Deprecated: "This field is deprecated", + }, + "space_guid": { + Description: "The bluemix space guid this cluster belongs to", + Type: schema.TypeString, + Optional: true, + Deprecated: "This field is deprecated", + }, + "account_guid": { + Description: "The bluemix account guid this cluster belongs to", + Type: schema.TypeString, + Optional: true, + Deprecated: "This field is deprecated", + }, + "wait_time_minutes": { + Type: schema.TypeInt, + Optional: true, + Deprecated: "This field is deprecated", + }, + "tags": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString, ValidateFunc: InvokeValidator("ibm_container_cluster", "tag")}, + Set: resourceIBMVPCHash, + Description: "Tags for the resource", + }, + + "worker_pools": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + }, + "machine_type": { + Type: schema.TypeString, + Computed: true, + }, + "size_per_zone": { + Type: schema.TypeInt, + Computed: true, + }, + "hardware": { + Type: schema.TypeString, + Computed: true, + }, + "id": { + Type: schema.TypeString, + Computed: true, + }, + "state": { + Type: schema.TypeString, + Computed: true, + }, + "labels": { + Type: schema.TypeMap, + Computed: true, + }, + "zones": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "zone": { + Type: schema.TypeString, + Computed: true, + }, + "private_vlan": { + Type: schema.TypeString, + Computed: true, + }, + "public_vlan": { + Type: schema.TypeString, + Computed: true, + }, + "worker_count": { + Type: schema.TypeInt, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + "albs": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "alb_type": { + Type: schema.TypeString, + Computed: true, + }, + "enable": { + Type: schema.TypeBool, + Computed: true, + }, + "state": { + Type: schema.TypeString, + Computed: true, + }, + "num_of_instances": { + Type: schema.TypeString, + Computed: true, + }, + "alb_ip": { + Type: schema.TypeString, + Computed: true, + }, + "resize": { + Type: schema.TypeBool, + Computed: true, + }, + "disable_deployment": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + "public_service_endpoint": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "private_service_endpoint": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Computed: true, + }, + "public_service_endpoint_url": { + Type: schema.TypeString, + Computed: true, + }, + + "private_service_endpoint_url": { + Type: schema.TypeString, + Computed: true, + }, + "gateway_enabled": { + Type: schema.TypeBool, + Optional: true, + DiffSuppressFunc: applyOnce, + Default: false, + Description: "Set true for gateway enabled clusters", + }, + "crn": { + Type: schema.TypeString, + Computed: true, + Description: "CRN of resource instance", + }, + + ResourceControllerURL: { + Type: schema.TypeString, + Computed: true, + Description: "The URL of the IBM Cloud dashboard that can be used to explore and view details about this cluster", + }, + + ResourceName: { + Type: schema.TypeString, + Computed: true, + Description: "The name of the resource", + }, + + ResourceCRN: { + Type: schema.TypeString, + Computed: true, + Description: "The crn of the resource", + }, + + ResourceStatus: { + Type: schema.TypeString, + Computed: true, + Description: "The status of the resource", + }, + + ResourceGroupName: { + Type: schema.TypeString, + Computed: true, + Description: "The resource group name in which resource is provisioned", + }, + }, + } +} + +func resourceIBMContainerClusterValidator() *ResourceValidator { + validateSchema := make([]ValidateSchema, 1) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: "tag", + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Optional: true, + Regexp: `^[A-Za-z0-9:_ .-]+$`, + MinValueLength: 1, + MaxValueLength: 128}) + + ibmContainerClusterResourceValidator := ResourceValidator{ResourceName: "ibm_container_cluster", Schema: validateSchema} + return &ibmContainerClusterResourceValidator +} + +func resourceIBMContainerClusterCreate(d *schema.ResourceData, meta interface{}) error { + + csClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return err + } + + name := d.Get("name").(string) + datacenter := d.Get("datacenter").(string) + machineType := d.Get("machine_type").(string) + publicVlanID := d.Get("public_vlan_id").(string) + privateVlanID := d.Get("private_vlan_id").(string) + noSubnet := d.Get("no_subnet").(bool) + diskEncryption := d.Get("disk_encryption").(bool) + defaultPoolSize := d.Get("default_pool_size").(int) + gatewayEnabled := d.Get("gateway_enabled").(bool) + hardware := d.Get("hardware").(string) + switch strings.ToLower(hardware) { + case hardwareDedicated: + hardware = isolationPrivate + case hardwareShared: + hardware = isolationPublic + } + + params := v1.ClusterCreateRequest{ + Name: name, + Datacenter: datacenter, + WorkerNum: defaultPoolSize, + MachineType: machineType, + PublicVlan: publicVlanID, + PrivateVlan: privateVlanID, + NoSubnet: noSubnet, + Isolation: hardware, + DiskEncryption: diskEncryption, + } + + // Update params with Entitlement option if provided + if v, ok := d.GetOk("entitlement"); ok { + params.DefaultWorkerPoolEntitlement = v.(string) + } + if v, ok := d.GetOk("pod_subnet"); ok { + params.PodSubnet = v.(string) + } + if v, ok := d.GetOk("service_subnet"); ok { + params.ServiceSubnet = v.(string) + } + + if gatewayEnabled { + if v, ok := d.GetOkExists("private_service_endpoint"); ok { + if v.(bool) { + params.PrivateEndpointEnabled = v.(bool) + params.GatewayEnabled = gatewayEnabled + } else { + return fmt.Errorf("set private_service_endpoint to true for gateway_enabled clusters") + } + } else { + return fmt.Errorf("set private_service_endpoint to true for gateway_enabled clusters") + } + } + if v, ok := d.GetOk("kube_version"); ok { + params.MasterVersion = v.(string) + } + if v, ok := d.GetOkExists("private_service_endpoint"); ok { + params.PrivateEndpointEnabled = v.(bool) + } + if v, ok := d.GetOkExists("public_service_endpoint"); ok { + params.PublicEndpointEnabled = v.(bool) + } + + targetEnv, err := getClusterTargetHeader(d, meta) + if err != nil { + return err + } + + cls, err := csClient.Clusters().Create(params, targetEnv) + if err != nil { + return err + } + d.SetId(cls.ID) + + _, err = waitForClusterMasterAvailable(d, meta) + if err != nil { + return err + } + if d.Get("wait_till").(string) == oneWorkerNodeReady { + _, err = waitForClusterOneWorkerAvailable(d, meta) + if err != nil { + return err + } + } + d.Set("force_delete_storage", d.Get("force_delete_storage").(bool)) + + return resourceIBMContainerClusterUpdate(d, meta) +} + +func resourceIBMContainerClusterRead(d *schema.ResourceData, meta interface{}) error { + csClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return err + } + wrkAPI := csClient.Workers() + workerPoolsAPI := csClient.WorkerPools() + albsAPI := csClient.Albs() + + targetEnv, err := getClusterTargetHeader(d, meta) + if err != nil { + return err + } + + clusterID := d.Id() + cls, err := csClient.Clusters().Find(clusterID, targetEnv) + if err != nil { + return fmt.Errorf("Error retrieving armada cluster: %s", err) + } + + workerFields, err := wrkAPI.List(clusterID, targetEnv) + if err != nil { + return fmt.Errorf("Error retrieving workers for cluster: %s", err) + } + workerCount := 0 + workers := []map[string]string{} + for _, w := range workerFields { + var worker = map[string]string{ + "id": w.ID, + "version": strings.Split(w.KubeVersion, "_")[0], + "pool_name": w.PoolName, + } + workers = append(workers, worker) + if w.PoolID == "" && w.PoolName == "" { + workerCount = workerCount + 1 + } + } + + d.Set("worker_num", workerCount) + + workerPools, err := workerPoolsAPI.ListWorkerPools(clusterID, targetEnv) + if err != nil { + return err + } + var poolName string + var poolContains bool + + if len(workerPools) > 0 && workerPoolContains(workerPools, defaultWorkerPool) { + poolName = defaultWorkerPool + poolContains = true + } else if len(workerPools) > 0 && workerPoolContains(workerPools, computeWorkerPool) && workerPoolContains(workerPools, gatewayWorkerpool) { + poolName = computeWorkerPool + poolContains = true + } + if poolContains { + workersByPool, err := wrkAPI.ListByWorkerPool(clusterID, poolName, false, targetEnv) + if err != nil { + return fmt.Errorf("Error retrieving workers of default worker pool for cluster: %s", err) + } + + // to get the private and public vlan IDs of the gateway enabled cluster. + if poolName == computeWorkerPool { + gatewayWorkersByPool, err := wrkAPI.ListByWorkerPool(clusterID, gatewayWorkerpool, false, targetEnv) + if err != nil { + return fmt.Errorf("Error retrieving workers of default worker pool for cluster: %s", err) + } + d.Set("public_vlan_id", gatewayWorkersByPool[0].PublicVlan) + d.Set("private_vlan_id", gatewayWorkersByPool[0].PrivateVlan) + } else { + d.Set("public_vlan_id", workersByPool[0].PublicVlan) + d.Set("private_vlan_id", workersByPool[0].PrivateVlan) + } + d.Set("machine_type", strings.Split(workersByPool[0].MachineType, ".encrypted")[0]) + d.Set("datacenter", cls.DataCenter) + if workersByPool[0].MachineType != "free" { + if strings.HasSuffix(workersByPool[0].MachineType, ".encrypted") { + d.Set("disk_encryption", true) + } else { + d.Set("disk_encryption", false) + } + } + + if len(workersByPool) > 0 { + hardware := workersByPool[0].Isolation + switch strings.ToLower(hardware) { + case "": + hardware = hardwareShared + case isolationPrivate: + hardware = hardwareDedicated + case isolationPublic: + hardware = hardwareShared + } + d.Set("hardware", hardware) + } + + defaultWorkerPool, err := workerPoolsAPI.GetWorkerPool(clusterID, poolName, targetEnv) + if err != nil { + return err + } + d.Set("labels", IgnoreSystemLabels(defaultWorkerPool.Labels)) + zones := defaultWorkerPool.Zones + for _, zone := range zones { + if zone.ID == cls.DataCenter { + d.Set("default_pool_size", zone.WorkerCount) + break + } + } + d.Set("worker_pools", flattenWorkerPools(workerPools)) + } + + albs, err := albsAPI.ListClusterALBs(clusterID, targetEnv) + if err != nil && !strings.Contains(err.Error(), "The specified cluster is a lite cluster.") && !strings.Contains(err.Error(), "This operation is not supported for your cluster's version.") && !strings.Contains(err.Error(), "The specified cluster is a free cluster.") { + + return fmt.Errorf("Error retrieving alb's of the cluster %s: %s", clusterID, err) + } + + d.Set("name", cls.Name) + d.Set("server_url", cls.ServerURL) + d.Set("ingress_hostname", cls.IngressHostname) + d.Set("ingress_secret", cls.IngressSecretName) + d.Set("region", cls.Region) + d.Set("service_subnet", cls.ServiceSubnet) + d.Set("pod_subnet", cls.PodSubnet) + d.Set("subnet_id", d.Get("subnet_id").(*schema.Set)) + d.Set("workers_info", workers) + if strings.HasSuffix(cls.MasterKubeVersion, "_openshift") { + d.Set("kube_version", strings.Split(cls.MasterKubeVersion, "_")[0]+"_openshift") + } else { + d.Set("kube_version", strings.Split(cls.MasterKubeVersion, "_")[0]) + } + d.Set("albs", flattenAlbs(albs, "all")) + d.Set("resource_group_id", cls.ResourceGroupID) + d.Set("public_service_endpoint", cls.PublicServiceEndpointEnabled) + d.Set("private_service_endpoint", cls.PrivateServiceEndpointEnabled) + d.Set("public_service_endpoint_url", cls.PublicServiceEndpointURL) + d.Set("private_service_endpoint_url", cls.PrivateServiceEndpointURL) + d.Set("crn", cls.CRN) + tags, err := GetTagsUsingCRN(meta, cls.CRN) + if err != nil { + log.Printf( + "An error occured during reading of instance (%s) tags : %s", d.Id(), err) + } + d.Set("tags", tags) + controller, err := getBaseController(meta) + if err != nil { + return err + } + d.Set(ResourceControllerURL, controller+"/kubernetes/clusters") + d.Set(ResourceName, cls.Name) + d.Set(ResourceCRN, cls.CRN) + d.Set(ResourceStatus, cls.State) + d.Set(ResourceGroupName, cls.ResourceGroupName) + return nil +} + +func resourceIBMContainerClusterUpdate(d *schema.ResourceData, meta interface{}) error { + + csClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return err + } + + targetEnv, err := getClusterTargetHeader(d, meta) + if err != nil { + return err + } + + subnetAPI := csClient.Subnets() + whkAPI := csClient.WebHooks() + wrkAPI := csClient.Workers() + clusterAPI := csClient.Clusters() + kmsAPI := csClient.Kms() + + clusterID := d.Id() + + if (d.HasChange("kube_version") || d.HasChange("update_all_workers") || d.HasChange("patch_version") || d.HasChange("retry_patch_version")) && !d.IsNewResource() { + if d.HasChange("kube_version") { + var masterVersion string + if v, ok := d.GetOk("kube_version"); ok { + masterVersion = v.(string) + } + params := v1.ClusterUpdateParam{ + Action: "update", + Force: true, + Version: masterVersion, + } + err := clusterAPI.Update(clusterID, params, targetEnv) + if err != nil { + return err + } + _, err = WaitForClusterVersionUpdate(d, meta, targetEnv) + if err != nil { + return fmt.Errorf( + "Error waiting for cluster (%s) version to be updated: %s", d.Id(), err) + } + } + // "update_all_workers" deafult is false, enable to true when all worker nodes to be updated + // with major and minor updates. + updateAllWorkers := d.Get("update_all_workers").(bool) + if updateAllWorkers || d.HasChange("patch_version") || d.HasChange("retry_patch_version") { + patchVersion := d.Get("patch_version").(string) + workerFields, err := wrkAPI.List(clusterID, targetEnv) + if err != nil { + return fmt.Errorf("Error retrieving workers for cluster: %s", err) + } + cluster, err := clusterAPI.Find(clusterID, targetEnv) + if err != nil { + return fmt.Errorf("Error retrieving cluster %s: %s", clusterID, err) + } + + waitForWorkerUpdate := d.Get("wait_for_worker_update").(bool) + + for _, w := range workerFields { + /*kubeversion update done if + 1. There is a change in Major.Minor version + 2. Therese is a change in patch_version & Traget kube patch version and patch_version are same + */ + if strings.Split(w.KubeVersion, "_")[0] != strings.Split(cluster.MasterKubeVersion, "_")[0] || (strings.Split(w.KubeVersion, ".")[2] != patchVersion && strings.Split(w.TargetVersion, ".")[2] == patchVersion) { + params := v1.WorkerUpdateParam{ + Action: "update", + } + err = wrkAPI.Update(clusterID, w.ID, params, targetEnv) + if err != nil { + d.Set("patch_version", nil) + return fmt.Errorf("Error updating worker %s: %s", w.ID, err) + } + if waitForWorkerUpdate { + _, err = WaitForWorkerAvailable(d, meta, targetEnv) + if err != nil { + d.Set("patch_version", nil) + return fmt.Errorf( + "Error waiting for workers of cluster (%s) to become ready: %s", d.Id(), err) + } + } + } + } + } + } + + if d.HasChange("kms_config") { + kmsConfig := v1.KmsEnableReq{} + kmsConfig.Cluster = clusterID + targetEnv := v1.ClusterHeader{} + if kms, ok := d.GetOk("kms_config"); ok { + + kmsConfiglist := kms.([]interface{}) + + for _, l := range kmsConfiglist { + kmsMap, _ := l.(map[string]interface{}) + + //instance_id - Required field + instanceID := kmsMap["instance_id"].(string) + kmsConfig.Kms = instanceID + + //crk_id - Required field + crk := kmsMap["crk_id"].(string) + kmsConfig.Crk = crk + + //Read event - as its optional check for existence + if privateEndpoint := kmsMap["private_endpoint"]; privateEndpoint != nil { + endpoint := privateEndpoint.(bool) + kmsConfig.PrivateEndpoint = endpoint + } + } + } + + err := kmsAPI.EnableKms(kmsConfig, targetEnv) + if err != nil { + log.Printf( + "An error occured during EnableKms (cluster: %s) error: %s", d.Id(), err) + return err + } + } + + if d.HasChange("force_delete_storage") { + var forceDeleteStorage bool + if v, ok := d.GetOk("force_delete_storage"); ok { + forceDeleteStorage = v.(bool) + } + d.Set("force_delete_storage", forceDeleteStorage) + } + + if d.HasChange("default_pool_size") && !d.IsNewResource() { + workerPoolsAPI := csClient.WorkerPools() + workerPools, err := workerPoolsAPI.ListWorkerPools(clusterID, targetEnv) + if err != nil { + return err + } + var poolName string + var poolContains bool + + if len(workerPools) > 0 && workerPoolContains(workerPools, defaultWorkerPool) { + poolName = defaultWorkerPool + + poolContains = true + } else if len(workerPools) > 0 && workerPoolContains(workerPools, computeWorkerPool) && workerPoolContains(workerPools, gatewayWorkerpool) { + poolName = computeWorkerPool + poolContains = true + } + if poolContains { + poolSize := d.Get("default_pool_size").(int) + err = workerPoolsAPI.ResizeWorkerPool(clusterID, poolName, poolSize, targetEnv) + if err != nil { + return fmt.Errorf( + "Error updating the default_pool_size %d: %s", poolSize, err) + } + + _, err = WaitForWorkerAvailable(d, meta, targetEnv) + if err != nil { + return fmt.Errorf( + "Error waiting for workers of cluster (%s) to become ready: %s", d.Id(), err) + } + } else { + return fmt.Errorf( + "The default worker pool does not exist. Use ibm_container_worker_pool and ibm_container_worker_pool_zone attachment resources to make changes to your cluster, such as adding zones, adding worker nodes, or updating worker nodes..") + } + } + + if d.HasChange("labels") { + workerPoolsAPI := csClient.WorkerPools() + workerPools, err := workerPoolsAPI.ListWorkerPools(clusterID, targetEnv) + if err != nil { + return err + } + var poolName string + var poolContains bool + + if len(workerPools) > 0 && workerPoolContains(workerPools, defaultWorkerPool) { + poolName = defaultWorkerPool + poolContains = true + } else if len(workerPools) > 0 && workerPoolContains(workerPools, computeWorkerPool) && workerPoolContains(workerPools, gatewayWorkerpool) { + poolName = computeWorkerPool + poolContains = true + } + if poolContains { + labels := make(map[string]string) + if l, ok := d.GetOk("labels"); ok { + for k, v := range l.(map[string]interface{}) { + labels[k] = v.(string) + } + } + err = workerPoolsAPI.UpdateLabelsWorkerPool(clusterID, poolName, labels, targetEnv) + if err != nil { + return fmt.Errorf( + "Error updating the labels %s", err) + } + + _, err = WaitForWorkerAvailable(d, meta, targetEnv) + if err != nil { + return fmt.Errorf( + "Error waiting for workers of cluster (%s) to become ready: %s", d.Id(), err) + } + } else { + return fmt.Errorf( + "The default worker pool does not exist. Use ibm_container_worker_pool and ibm_container_worker_pool_zone attachment resources to make changes to your cluster, such as adding zones, adding worker nodes, or updating worker nodes..") + } + } + + if d.HasChange("worker_num") { + old, new := d.GetChange("worker_num") + oldCount := old.(int) + newCount := new.(int) + if newCount > oldCount { + count := newCount - oldCount + machineType := d.Get("machine_type").(string) + publicVlanID := d.Get("public_vlan_id").(string) + privateVlanID := d.Get("private_vlan_id").(string) + hardware := d.Get("hardware").(string) + switch strings.ToLower(hardware) { + case hardwareDedicated: + hardware = isolationPrivate + case hardwareShared: + hardware = isolationPublic + } + params := v1.WorkerParam{ + WorkerNum: count, + MachineType: machineType, + PublicVlan: publicVlanID, + PrivateVlan: privateVlanID, + Isolation: hardware, + } + wrkAPI.Add(clusterID, params, targetEnv) + } else if oldCount > newCount { + count := oldCount - newCount + workerFields, err := wrkAPI.List(clusterID, targetEnv) + if err != nil { + return fmt.Errorf("Error retrieving workers for cluster: %s", err) + } + for i := 0; i < count; i++ { + err := wrkAPI.Delete(clusterID, workerFields[i].ID, targetEnv) + if err != nil { + return fmt.Errorf( + "Error deleting workers of cluster (%s): %s", d.Id(), err) + } + } + } + + _, err = WaitForWorkerAvailable(d, meta, targetEnv) + if err != nil { + return fmt.Errorf( + "Error waiting for workers of cluster (%s) to become ready: %s", d.Id(), err) + } + } + + if d.HasChange("workers_info") { + oldWorkers, newWorkers := d.GetChange("workers_info") + oldWorker := oldWorkers.([]interface{}) + newWorker := newWorkers.([]interface{}) + for _, nW := range newWorker { + newPack := nW.(map[string]interface{}) + for _, oW := range oldWorker { + oldPack := oW.(map[string]interface{}) + if strings.Compare(newPack["version"].(string), oldPack["version"].(string)) != 0 { + cluster, err := clusterAPI.Find(clusterID, targetEnv) + if err != nil { + return fmt.Errorf("Error retrieving cluster %s: %s", clusterID, err) + } + if newPack["version"].(string) != strings.Split(cluster.MasterKubeVersion, "_")[0] { + return fmt.Errorf("Worker version %s should match the master kube version %s", newPack["version"].(string), strings.Split(cluster.MasterKubeVersion, "_")[0]) + } + params := v1.WorkerUpdateParam{ + Action: "update", + } + err = wrkAPI.Update(clusterID, oldPack["id"].(string), params, targetEnv) + if err != nil { + return fmt.Errorf("Error updating worker %s: %s", oldPack["id"].(string), err) + } + + _, err = WaitForWorkerAvailable(d, meta, targetEnv) + if err != nil { + return fmt.Errorf( + "Error waiting for workers of cluster (%s) to become ready: %s", d.Id(), err) + } + } + } + } + + } + + //TODO put webhooks can't deleted in the error message if such case is observed in the chnages + if d.HasChange("webhook") { + oldHooks, newHooks := d.GetChange("webhook") + oldHook := oldHooks.([]interface{}) + newHook := newHooks.([]interface{}) + for _, nH := range newHook { + newPack := nH.(map[string]interface{}) + exists := false + for _, oH := range oldHook { + oldPack := oH.(map[string]interface{}) + if (strings.Compare(newPack["level"].(string), oldPack["level"].(string)) == 0) && (strings.Compare(newPack["type"].(string), oldPack["type"].(string)) == 0) && (strings.Compare(newPack["url"].(string), oldPack["url"].(string)) == 0) { + exists = true + } + } + if !exists { + webhook := v1.WebHook{ + Level: newPack["level"].(string), + Type: newPack["type"].(string), + URL: newPack["url"].(string), + } + + whkAPI.Add(clusterID, webhook, targetEnv) + } + } + } + //TODO put subnet can't deleted in the error message if such case is observed in the chnages + var publicSubnetAdded bool + noSubnet := d.Get("no_subnet").(bool) + publicVlanID := d.Get("public_vlan_id").(string) + if noSubnet == false && publicVlanID != "" { + publicSubnetAdded = true + } + if d.HasChange("subnet_id") { + oldSubnets, newSubnets := d.GetChange("subnet_id") + oldSubnet := oldSubnets.(*schema.Set) + newSubnet := newSubnets.(*schema.Set) + rem := oldSubnet.Difference(newSubnet).List() + if len(rem) > 0 { + return fmt.Errorf("Subnet(s) %v cannot be deleted", rem) + } + metro := d.Get("datacenter").(string) + //from datacenter retrive the metro for filtering the subnets + metro = metro[0:3] + subnets, err := subnetAPI.List(targetEnv, metro) + if err != nil { + return err + } + for _, nS := range newSubnet.List() { + exists := false + for _, oS := range oldSubnet.List() { + if strings.Compare(nS.(string), oS.(string)) == 0 { + exists = true + } + } + if !exists { + err := subnetAPI.AddSubnet(clusterID, nS.(string), targetEnv) + if err != nil { + return err + } + subnet := getSubnet(subnets, nS.(string)) + if subnet.Type == PUBLIC_SUBNET_TYPE { + publicSubnetAdded = true + } + } + } + } + if publicSubnetAdded && d.Get("wait_till").(string) == ingressReady { + _, err = WaitForSubnetAvailable(d, meta, targetEnv) + if err != nil { + return fmt.Errorf( + "Error waiting for initializing ingress hostname and secret: %s", err) + } + } + + v := os.Getenv("IC_ENV_TAGS") + if d.HasChange("tags") || v != "" { + oldList, newList := d.GetChange("tags") + cluster, err := clusterAPI.Find(clusterID, targetEnv) + if err != nil { + return fmt.Errorf("Error retrieving cluster %s: %s", clusterID, err) + } + err = UpdateTagsUsingCRN(oldList, newList, meta, cluster.CRN) + if err != nil { + log.Printf( + "An error occured during update of instance (%s) tags: %s", clusterID, err) + } + + } + + return resourceIBMContainerClusterRead(d, meta) +} + +func getID(d *schema.ResourceData, meta interface{}, clusterID string, oldWorkers []interface{}, workerInfo []map[string]string) (string, error) { + targetEnv, err := getClusterTargetHeader(d, meta) + if err != nil { + return "", err + } + csClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return "", err + } + workerFields, err := csClient.Workers().List(clusterID, targetEnv) + if err != nil { + return "", err + } + for _, wF := range workerFields { + exists := false + for _, oW := range oldWorkers { + oldPack := oW.(map[string]interface{}) + if strings.Compare(wF.ID, oldPack["id"].(string)) == 0 || strings.Compare(wF.State, "deleted") == 0 { + exists = true + } + } + if !exists { + for i := 0; i < len(workerInfo); i++ { + pack := workerInfo[i] + exists = exists || (strings.Compare(wF.ID, pack["id"]) == 0) + } + if !exists { + return wF.ID, nil + } + } + } + + return "", fmt.Errorf("Unable to get ID of worker") +} + +func resourceIBMContainerClusterDelete(d *schema.ResourceData, meta interface{}) error { + + targetEnv, err := getClusterTargetHeader(d, meta) + if err != nil { + return err + } + csClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return err + } + clusterID := d.Id() + forceDeleteStorage := d.Get("force_delete_storage").(bool) + err = csClient.Clusters().Delete(clusterID, targetEnv, forceDeleteStorage) + if err != nil { + return fmt.Errorf("Error deleting cluster: %s", err) + } + _, err = waitForClusterDelete(d, meta) + if err != nil { + return err + } + return nil +} + +func waitForClusterDelete(d *schema.ResourceData, meta interface{}) (interface{}, error) { + targetEnv, err := getClusterTargetHeader(d, meta) + if err != nil { + return nil, err + } + csClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return nil, err + } + clusterID := d.Id() + stateConf := &resource.StateChangeConf{ + Pending: []string{clusterDeletePending}, + Target: []string{clusterDeleted}, + Refresh: func() (interface{}, string, error) { + cluster, err := csClient.Clusters().Find(clusterID, targetEnv) + if err != nil { + if apiErr, ok := err.(bmxerror.RequestFailure); ok && (apiErr.StatusCode() == 404) { + return cluster, clusterDeleted, nil + } + return nil, "", err + } + return cluster, clusterDeletePending, nil + }, + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 60 * time.Second, + MinTimeout: 10 * time.Second, + PollInterval: 60 * time.Second, + } + + return stateConf.WaitForState() +} + +// WaitForClusterAvailable Waits for cluster creation +func WaitForClusterAvailable(d *schema.ResourceData, meta interface{}, target v1.ClusterTargetHeader) (interface{}, error) { + csClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return nil, err + } + log.Printf("Waiting for cluster (%s) to be available.", d.Id()) + id := d.Id() + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", clusterProvisioning}, + Target: []string{clusterNormal}, + Refresh: clusterStateRefreshFunc(csClient.Clusters(), id, target), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func clusterStateRefreshFunc(client v1.Clusters, instanceID string, target v1.ClusterTargetHeader) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + clusterFields, err := client.FindWithOutShowResourcesCompatible(instanceID, target) + if err != nil { + return nil, "", fmt.Errorf("Error retrieving cluster: %s", err) + } + // Check active transactions + log.Println("Checking cluster") + //Check for cluster state to be normal + log.Println("Checking cluster state", strings.Compare(clusterFields.State, clusterNormal)) + if strings.Compare(clusterFields.State, clusterNormal) != 0 { + return clusterFields, clusterProvisioning, nil + } + return clusterFields, clusterNormal, nil + } +} + +// waitForClusterMasterAvailable Waits for cluster creation +func waitForClusterMasterAvailable(d *schema.ResourceData, meta interface{}) (interface{}, error) { + targetEnv, err := getClusterTargetHeader(d, meta) + if err != nil { + return nil, err + } + csClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return nil, err + } + clusterID := d.Id() + + stateConf := &resource.StateChangeConf{ + Pending: []string{deployRequested, deployInProgress}, + Target: []string{ready}, + Refresh: func() (interface{}, string, error) { + clusterFields, err := csClient.Clusters().FindWithOutShowResourcesCompatible(clusterID, targetEnv) + if err != nil { + return nil, "", fmt.Errorf("Error retrieving cluster: %s", err) + } + + if clusterFields.MasterStatus == ready { + return clusterFields, ready, nil + } + return clusterFields, deployInProgress, nil + }, + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +// waitForClusterOneWorkerAvailable Waits for cluster creation +func waitForClusterOneWorkerAvailable(d *schema.ResourceData, meta interface{}) (interface{}, error) { + targetEnv, err := getClusterTargetHeader(d, meta) + if err != nil { + return nil, err + } + csClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return nil, err + } + clusterID := d.Id() + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", "deploying", "provisioning"}, + Target: []string{normal}, + Refresh: func() (interface{}, string, error) { + + workerPoolsAPI := csClient.WorkerPools() + workerPools, err := workerPoolsAPI.ListWorkerPools(clusterID, targetEnv) + if err != nil { + return nil, "", err + } + var poolName string + var poolContains bool + + if len(workerPools) > 0 && workerPoolContains(workerPools, defaultWorkerPool) { + poolName = defaultWorkerPool + poolContains = true + } else if len(workerPools) > 0 && workerPoolContains(workerPools, computeWorkerPool) && workerPoolContains(workerPools, gatewayWorkerpool) { + poolName = computeWorkerPool + poolContains = true + } + if poolContains { + wrkAPI := csClient.Workers() + workersByPool, err := wrkAPI.ListByWorkerPool(clusterID, poolName, false, targetEnv) + if err != nil { + return nil, "", fmt.Errorf("Error retrieving workers of default worker pool for cluster: %s", err) + } + if len(workersByPool) == 0 { + return workersByPool, "provisioning", nil + } + for _, worker := range workersByPool { + + if worker.State == normal { + return workersByPool, normal, nil + } + } + return workersByPool, "deploying", nil + } + return nil, normal, nil + }, + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +// WaitForWorkerAvailable Waits for worker creation +func WaitForWorkerAvailable(d *schema.ResourceData, meta interface{}, target v1.ClusterTargetHeader) (interface{}, error) { + csClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return nil, err + } + log.Printf("Waiting for worker of the cluster (%s) to be available.", d.Id()) + id := d.Id() + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", workerProvisioning}, + Target: []string{workerNormal}, + Refresh: workerStateRefreshFunc(csClient.Workers(), id, target), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func workerStateRefreshFunc(client v1.Workers, instanceID string, target v1.ClusterTargetHeader) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + workerFields, err := client.List(instanceID, target) + if err != nil { + return nil, "", fmt.Errorf("Error retrieving workers for cluster: %s", err) + } + log.Println("Checking workers...") + //Done worker has two fields State and Status , so check for those 2 + for _, e := range workerFields { + if strings.Contains(e.KubeVersion, "pending") || strings.Compare(e.State, workerNormal) != 0 || strings.Compare(e.Status, workerReadyState) != 0 { + if strings.Compare(e.State, "deleted") != 0 { + return workerFields, workerProvisioning, nil + } + } + } + return workerFields, workerNormal, nil + } +} + +func WaitForClusterCreation(d *schema.ResourceData, meta interface{}, target v1.ClusterTargetHeader) (interface{}, error) { + csClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return nil, err + } + log.Printf("Waiting for cluster (%s) to be available.", d.Id()) + ClusterID := d.Id() + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", clusterProvisioning}, + Target: []string{clusterNormal}, + Refresh: func() (interface{}, string, error) { + workerFields, err := csClient.Workers().List(ClusterID, target) + log.Println("Total workers: ", len(workerFields)) + if err != nil { + return nil, "", fmt.Errorf("Error retrieving workers for cluster: %s", err) + } + log.Println("Checking workers...") + //verifying for atleast sing node to be in normal state + for _, e := range workerFields { + log.Println("Worker node status: ", e.State) + if e.State == workerNormal { + return workerFields, workerNormal, nil + } + } + return workerFields, workerProvisioning, nil + }, + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func WaitForSubnetAvailable(d *schema.ResourceData, meta interface{}, target v1.ClusterTargetHeader) (interface{}, error) { + csClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return nil, err + } + log.Printf("Waiting for Ingress Subdomain and secret being assigned.") + id := d.Id() + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", workerProvisioning}, + Target: []string{workerNormal}, + Refresh: subnetStateRefreshFunc(csClient.Clusters(), id, d, target), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func subnetStateRefreshFunc(client v1.Clusters, instanceID string, d *schema.ResourceData, target v1.ClusterTargetHeader) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + cluster, err := client.FindWithOutShowResourcesCompatible(instanceID, target) + if err != nil { + return nil, "", fmt.Errorf("Error retrieving cluster: %s", err) + } + if cluster.IngressHostname == "" || cluster.IngressSecretName == "" { + return cluster, subnetProvisioning, nil + } + return cluster, subnetNormal, nil + } +} + +// WaitForClusterVersionUpdate Waits for cluster creation +func WaitForClusterVersionUpdate(d *schema.ResourceData, meta interface{}, target v1.ClusterTargetHeader) (interface{}, error) { + csClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return nil, err + } + log.Printf("Waiting for cluster (%s) version to be updated.", d.Id()) + id := d.Id() + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", versionUpdating}, + Target: []string{clusterNormal}, + Refresh: clusterVersionRefreshFunc(csClient.Clusters(), id, d, target), + Timeout: d.Timeout(schema.TimeoutUpdate), + Delay: 20 * time.Second, + MinTimeout: 10 * time.Second, + ContinuousTargetOccurence: 5, + } + + return stateConf.WaitForState() +} + +func clusterVersionRefreshFunc(client v1.Clusters, instanceID string, d *schema.ResourceData, target v1.ClusterTargetHeader) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + clusterFields, err := client.FindWithOutShowResourcesCompatible(instanceID, target) + if err != nil { + return nil, "", fmt.Errorf("Error retrieving cluster: %s", err) + } + // Check active transactions + kubeversion := d.Get("kube_version").(string) + log.Println("Checking cluster version", clusterFields.MasterKubeVersion, d.Get("kube_version").(string)) + if strings.Contains(clusterFields.MasterKubeVersion, "pending") { + return clusterFields, versionUpdating, nil + } else if !strings.Contains(clusterFields.MasterKubeVersion, kubeversion) { + return clusterFields, versionUpdating, nil + } + return clusterFields, clusterNormal, nil + } +} + +func resourceIBMContainerClusterExists(d *schema.ResourceData, meta interface{}) (bool, error) { + + csClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return false, err + } + targetEnv, err := getClusterTargetHeader(d, meta) + if err != nil { + return false, err + } + clusterID := d.Id() + cls, err := csClient.Clusters().FindWithOutShowResourcesCompatible(clusterID, targetEnv) + if err != nil { + if apiErr, ok := err.(bmxerror.RequestFailure); ok { + if apiErr.StatusCode() == 404 && strings.Contains(apiErr.Description(), "The specified cluster could not be found") { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s", err) + } + return cls.ID == clusterID, nil +} + +func getSubnet(subnets []v1.Subnet, subnetId string) v1.Subnet { + for _, subnet := range subnets { + if subnet.ID == subnetId { + return subnet + } + } + return v1.Subnet{} +} + +func workerPoolContains(workerPools []v1.WorkerPoolResponse, pool string) bool { + for _, workerPool := range workerPools { + if workerPool.Name == pool { + return true + } + } + return false +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_container_cluster_feature.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_container_cluster_feature.go new file mode 100644 index 00000000000..45321b6b18e --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_container_cluster_feature.go @@ -0,0 +1,353 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + v1 "github.com/IBM-Cloud/bluemix-go/api/container/containerv1" +) + +const ( + enablePrivateSECmdAction = "enablePrivateServiceEndpoint" + enablePublicSECmdAction = "enablePublicServiceEndpoint" + disablePublicSECmdAction = "disablePublicServiceEndpoint" + reloadAction = "reload" +) + +func resourceIBMContainerClusterFeature() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMContainerClusterFeatureCreate, + Read: resourceIBMContainerClusterFeatureRead, + Update: resourceIBMContainerClusterFeatureUpdate, + Delete: resourceIBMContainerClusterFeatureDelete, + Importer: &schema.ResourceImporter{}, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(90 * time.Minute), + Update: schema.DefaultTimeout(90 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + + "cluster": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Cluster name of ID", + }, + + "public_service_endpoint": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + + "private_service_endpoint": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + + "public_service_endpoint_url": { + Type: schema.TypeString, + Computed: true, + }, + + "refresh_api_servers": { + Type: schema.TypeBool, + Optional: true, + Default: true, + Description: "Boolean value true of API server to be refreshed in K8S cluster", + }, + + "reload_workers": { + Type: schema.TypeBool, + Optional: true, + Default: true, + Description: "Boolean value set true if worker nodes to be reloaded", + }, + + "private_service_endpoint_url": { + Type: schema.TypeString, + Computed: true, + }, + + "resource_group_id": { + Type: schema.TypeString, + Optional: true, + Description: "ID of the resource group.", + Computed: true, + DiffSuppressFunc: applyOnce, + }, + }, + } +} + +func resourceIBMContainerClusterFeatureCreate(d *schema.ResourceData, meta interface{}) error { + + cluster := d.Get("cluster").(string) + var isOptionSet bool + + if v, ok := d.GetOkExists("private_service_endpoint"); ok { + if v.(bool) { + err := updateCluster(cluster, enablePrivateSECmdAction, d.Timeout(schema.TimeoutCreate), d, meta) + if err != nil { + return err + } + } else { + return fmt.Errorf("The `private_service_endpoint` can not be disabled") + } + d.SetId(cluster) + err := reloadCluster(cluster, d.Timeout(schema.TimeoutCreate), d, meta) + if err != nil { + return err + } + isOptionSet = true + } + + if v, ok := d.GetOkExists("public_service_endpoint"); ok { + var cmd string + if v.(bool) { + cmd = enablePublicSECmdAction + } else { + cmd = disablePublicSECmdAction + } + log.Printf("Started enabling the public ep %s", cmd) + err := updateCluster(cluster, cmd, d.Timeout(schema.TimeoutCreate), d, meta) + if err != nil { + return err + } + d.SetId(cluster) + err = reloadCluster(cluster, d.Timeout(schema.TimeoutCreate), d, meta) + if err != nil { + return err + } + isOptionSet = true + } + + if !isOptionSet { + return fmt.Errorf("Provide either `public_service_endpoint` or `private_service_endpoint` or both.") + } + return resourceIBMContainerClusterFeatureRead(d, meta) +} + +func reloadCluster(cluster string, timeout time.Duration, d *schema.ResourceData, meta interface{}) error { + csClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return err + } + targetEnv, err := getWorkerPoolTargetHeader(d, meta) + if err != nil { + return err + } + if v, ok := d.GetOkExists("refresh_api_servers"); ok { + if v.(bool) { + err = csClient.Clusters().RefreshAPIServers(cluster, targetEnv) + if err != nil { + return err + } + } + } + if v, ok := d.GetOkExists("reload_workers"); ok { + if v.(bool) { + log.Printf("Waiting for cluster (%s) to be available.", cluster) + _, err = WaitForClusterAvailableForFeatureUpdate(cluster, timeout, meta, targetEnv) + if err != nil { + return fmt.Errorf( + "Error waiting for cluster (%s) to become ready: %s", cluster, err) + } + log.Printf("Waiting for workers (%s) to be available.", cluster) + _, err = WaitForWorkerAvailableForFeatureUpdate(cluster, timeout, meta, targetEnv) + if err != nil { + return fmt.Errorf( + "Error waiting for workers of cluster (%s) to become ready: %s", cluster, err) + } + params := v1.UpdateWorkerCommand{ + Action: reloadAction, + } + workerFields, err := csClient.Workers().List(cluster, targetEnv) + if err != nil { + return fmt.Errorf("Error retrieving workers for cluster: %s", err) + } + workers := make([]string, len(workerFields)) + for i, worker := range workerFields { + workers[i] = worker.ID + } + err = csClient.Clusters().UpdateClusterWorkers(cluster, workers, params, targetEnv) + if err != nil { + return err + } + _, err = WaitForClusterAvailableForFeatureUpdate(cluster, timeout, meta, targetEnv) + if err != nil { + return fmt.Errorf( + "Error waiting for cluster (%s) to become ready: %s", d.Id(), err) + } + _, err = WaitForWorkerAvailableForFeatureUpdate(cluster, timeout, meta, targetEnv) + if err != nil { + return fmt.Errorf( + "Error waiting for workers of cluster (%s) to become ready: %s", d.Id(), err) + } + } + } + + return nil +} + +func updateCluster(cluster, actionCmd string, timeout time.Duration, d *schema.ResourceData, meta interface{}) error { + csClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return err + } + params := v1.ClusterUpdateParam{ + Action: actionCmd, + } + targetEnv, err := getWorkerPoolTargetHeader(d, meta) + if err != nil { + return err + } + log.Printf("Waiting for cluster (%s) to be available.", cluster) + _, err = WaitForClusterAvailableForFeatureUpdate(cluster, timeout, meta, targetEnv) + if err != nil { + return fmt.Errorf( + "Error waiting for cluster (%s) to become ready: %s", d.Id(), err) + } + log.Printf("Waiting for workers (%s) to be available.", cluster) + _, err = WaitForWorkerAvailableForFeatureUpdate(cluster, timeout, meta, targetEnv) + if err != nil { + return fmt.Errorf( + "Error waiting for workers of cluster (%s) to become ready: %s", d.Id(), err) + } + log.Printf("Calling update with action cmd %s", actionCmd) + err = csClient.Clusters().Update(cluster, params, targetEnv) + if err != nil { + return err + } + log.Printf("success with action cmd %s", actionCmd) + + return nil +} + +func resourceIBMContainerClusterFeatureRead(d *schema.ResourceData, meta interface{}) error { + csClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return err + } + + clusterID := d.Id() + targetEnv, err := getWorkerPoolTargetHeader(d, meta) + if err != nil { + return err + } + cls, err := csClient.Clusters().Find(clusterID, targetEnv) + if err != nil { + return fmt.Errorf("Error retrieving armada cluster: %s", err) + } + + d.Set("cluster", clusterID) + d.Set("public_service_endpoint", cls.PublicServiceEndpointEnabled) + d.Set("private_service_endpoint_url", cls.PrivateServiceEndpointURL) + d.Set("public_service_endpoint_url", cls.PublicServiceEndpointURL) + d.Set("private_service_endpoint", cls.PrivateServiceEndpointEnabled) + + return nil +} + +func resourceIBMContainerClusterFeatureDelete(d *schema.ResourceData, meta interface{}) error { + d.SetId("") + return nil +} + +func resourceIBMContainerClusterFeatureUpdate(d *schema.ResourceData, meta interface{}) error { + + cluster := d.Get("cluster").(string) + var isOptionSet bool + if d.HasChange("private_service_endpoint") { + if v, ok := d.GetOkExists("private_service_endpoint"); ok { + if v.(bool) { + err := updateCluster(cluster, enablePrivateSECmdAction, d.Timeout(schema.TimeoutUpdate), d, meta) + if err != nil { + return err + } + } else { + return fmt.Errorf("The `private_service_endpoint` can not be disabled") + } + err := reloadCluster(cluster, d.Timeout(schema.TimeoutUpdate), d, meta) + if err != nil { + return err + } + isOptionSet = true + } + } + if d.HasChange("public_service_endpoint") { + if v, ok := d.GetOkExists("public_service_endpoint"); ok { + var cmd string + if v.(bool) { + cmd = enablePublicSECmdAction + } else { + cmd = disablePublicSECmdAction + } + err := updateCluster(cluster, cmd, d.Timeout(schema.TimeoutUpdate), d, meta) + if err != nil { + return err + } + err = reloadCluster(cluster, d.Timeout(schema.TimeoutUpdate), d, meta) + if err != nil { + return err + } + isOptionSet = true + } + } + + if !isOptionSet { + return fmt.Errorf("Provide either `public_service_endpoint` or `private_service_endpoint` or both.") + } + + return resourceIBMContainerClusterFeatureRead(d, meta) +} + +// WaitForClusterAvailable Waits for cluster creation +func WaitForClusterAvailableForFeatureUpdate(cluster string, timeout time.Duration, meta interface{}, target v1.ClusterTargetHeader) (interface{}, error) { + csClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return nil, err + } + log.Printf("Waiting for cluster (%s) to be available.", cluster) + id := cluster + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", clusterProvisioning}, + Target: []string{clusterNormal}, + Refresh: clusterStateRefreshFunc(csClient.Clusters(), id, target), + Timeout: timeout, + Delay: 60 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func WaitForWorkerAvailableForFeatureUpdate(cluster string, timeout time.Duration, meta interface{}, target v1.ClusterTargetHeader) (interface{}, error) { + csClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return nil, err + } + log.Printf("Waiting for worker of the cluster (%s) to be available.", cluster) + id := cluster + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", workerProvisioning}, + Target: []string{workerNormal}, + Refresh: workerStateRefreshFunc(csClient.Workers(), id, target), + Timeout: timeout, + Delay: 60 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_container_vpc_alb.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_container_vpc_alb.go new file mode 100644 index 00000000000..648d9b6e39c --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_container_vpc_alb.go @@ -0,0 +1,317 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "time" + + v2 "github.com/IBM-Cloud/bluemix-go/api/container/containerv2" + "github.com/IBM-Cloud/bluemix-go/bmxerror" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func resourceIBMContainerVpcALB() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMContainerVpcALBCreate, + Read: resourceIBMContainerVpcALBRead, + Update: resourceIBMContainerVpcALBUpdate, + Delete: resourceIBMContainerVpcALBDelete, + Importer: &schema.ResourceImporter{}, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(60 * time.Minute), + Update: schema.DefaultTimeout(60 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "alb_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "ALB ID", + }, + "alb_type": { + Type: schema.TypeString, + Computed: true, + Description: "Type of the ALB", + }, + "cluster": { + Type: schema.TypeString, + Computed: true, + Description: "cluster id", + }, + "enable": { + Type: schema.TypeBool, + Optional: true, + ConflictsWith: []string{"disable_deployment"}, + Description: "Enable the ALB instance in the cluster", + }, + "disable_deployment": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: []string{"enable"}, + Description: "Disable the ALB instance in the cluster", + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: "ALB name", + }, + "load_balancer_hostname": { + Type: schema.TypeString, + Computed: true, + Description: "Load balancer host name", + }, + "resize": { + Type: schema.TypeBool, + Computed: true, + Description: "boolean value to resize the albs", + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: "ALB state", + }, + "status": { + Type: schema.TypeString, + Computed: true, + Description: "Status of the ALB", + }, + "zone": { + Type: schema.TypeString, + Computed: true, + Description: "Zone info.", + }, + }, + } +} + +func resourceIBMContainerVpcALBCreate(d *schema.ResourceData, meta interface{}) error { + albClient, err := meta.(ClientSession).VpcContainerAPI() + if err != nil { + return err + } + var enable, disableDeployment bool + albID := d.Get("alb_id").(string) + if v, ok := d.GetOkExists("enable"); ok { + enable = v.(bool) + } else if v, ok := d.GetOkExists("disable_deployment"); ok { + disableDeployment = v.(bool) + } else { + return fmt.Errorf("Provide either `enable` or `disable_deployment`") + } + + _, err = waitForVpcClusterAvailable(d, meta, albID, schema.TimeoutCreate) + if err != nil { + return fmt.Errorf( + "Error waiting for cluster resource availabilty (%s) : %s", d.Id(), err) + } + + params := v2.AlbConfig{ + AlbID: albID, + Enable: enable, + } + + albAPI := albClient.Albs() + targetEnv := v2.ClusterTargetHeader{} + if err != nil { + return err + } + + if enable { + err = albAPI.EnableAlb(params, targetEnv) + if err != nil { + return err + } + } else { + err = albAPI.DisableAlb(params, targetEnv) + if err != nil { + return err + } + } + + d.SetId(albID) + _, err = waitForVpcContainerALB(d, meta, albID, schema.TimeoutCreate, enable, disableDeployment) + if err != nil { + return fmt.Errorf( + "Error waiting for create resource alb (%s) : %s", d.Id(), err) + } + + return resourceIBMContainerVpcALBRead(d, meta) +} + +func resourceIBMContainerVpcALBRead(d *schema.ResourceData, meta interface{}) error { + albClient, err := meta.(ClientSession).VpcContainerAPI() + if err != nil { + return err + } + + albID := d.Id() + + albAPI := albClient.Albs() + targetEnv := v2.ClusterTargetHeader{} + + albConfig, err := albAPI.GetAlb(albID, targetEnv) + if err != nil { + return err + } + + d.Set("alb_type", albConfig.AlbType) + d.Set("cluster", albConfig.Cluster) + d.Set("name", albConfig.Name) + d.Set("enable", albConfig.Enable) + d.Set("disable_deployment", albConfig.DisableDeployment) + d.Set("alb_id", albID) + d.Set("resize", albConfig.Resize) + d.Set("zone", albConfig.ZoneAlb) + d.Set("status", albConfig.Status) + d.Set("state", albConfig.State) + d.Set("load_balancer_hostname", albConfig.LoadBalancerHostname) + + return nil +} + +func resourceIBMContainerVpcALBUpdate(d *schema.ResourceData, meta interface{}) error { + albClient, err := meta.(ClientSession).VpcContainerAPI() + if err != nil { + return err + } + albAPI := albClient.Albs() + + if d.HasChange("enable") { + enable := d.Get("enable").(bool) + disableDeployment := d.Get("disable_deployment").(bool) + albID := d.Id() + + _, err = waitForVpcClusterAvailable(d, meta, albID, schema.TimeoutCreate) + if err != nil { + return fmt.Errorf( + "Error waiting for cluster resource availabilty (%s) : %s", d.Id(), err) + } + + params := v2.AlbConfig{ + AlbID: albID, + Enable: enable, + } + + targetEnv := v2.ClusterTargetHeader{} + + if enable { + err = albAPI.EnableAlb(params, targetEnv) + if err != nil { + return err + } + } else { + err = albAPI.DisableAlb(params, targetEnv) + if err != nil { + return err + } + } + + _, err = waitForVpcContainerALB(d, meta, albID, schema.TimeoutUpdate, enable, disableDeployment) + if err != nil { + return fmt.Errorf( + "Error waiting for updating resource alb (%s) : %s", d.Id(), err) + } + + } + return resourceIBMContainerVpcALBRead(d, meta) +} + +func waitForVpcContainerALB(d *schema.ResourceData, meta interface{}, albID, timeout string, enable, disableDeployment bool) (interface{}, error) { + albClient, err := meta.(ClientSession).VpcContainerAPI() + if err != nil { + return false, err + } + stateConf := &resource.StateChangeConf{ + Pending: []string{"pending"}, + Target: []string{"active"}, + Refresh: func() (interface{}, string, error) { + targetEnv := v2.ClusterTargetHeader{} + alb, err := albClient.Albs().GetAlb(albID, targetEnv) + if err != nil { + if apiErr, ok := err.(bmxerror.RequestFailure); ok && apiErr.StatusCode() == 404 { + return nil, "", fmt.Errorf("The resource alb %s does not exist anymore: %v", d.Id(), err) + } + return nil, "", err + } + if enable { + if alb.Enable == false { + return alb, "pending", nil + } + } else if disableDeployment { + if alb.Enable == true { + return alb, "pending", nil + } + } + return alb, "active", nil + }, + Timeout: d.Timeout(timeout), + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func resourceIBMContainerVpcALBDelete(d *schema.ResourceData, meta interface{}) error { + d.SetId("") + + return nil +} + +func waitForVpcClusterAvailable(d *schema.ResourceData, meta interface{}, albID, timeout string) (interface{}, error) { + albClient, err := meta.(ClientSession).VpcContainerAPI() + if err != nil { + return false, err + } + createStateConf := &resource.StateChangeConf{ + Pending: []string{deployRequested, deployInProgress}, + Target: []string{ready}, + Refresh: func() (interface{}, string, error) { + targetEnv := v2.ClusterTargetHeader{} + albInfo, err := albClient.Albs().GetAlb(albID, targetEnv) + if err == nil { + cluster := albInfo.Cluster + workerPools, err := albClient.WorkerPools().ListWorkerPools(cluster, targetEnv) + if err != nil { + return workerPools, deployInProgress, err + } + for _, wpool := range workerPools { + workers, err := albClient.Workers().ListByWorkerPool(cluster, wpool.ID, false, targetEnv) + if err != nil { + return wpool, deployInProgress, err + } + healthCounter := 0 + + for _, worker := range workers { + log.Println("worker: ", worker.ID) + log.Println("worker health state: ", worker.Health.State) + + if worker.Health.State == normal { + healthCounter++ + } + } + if healthCounter != len(workers) { + log.Println("all the worker nodes are not in normal state") + return wpool, deployInProgress, nil + } + } + } else { + log.Println("ALB info not available") + return albInfo, deployInProgress, err + } + return albInfo, ready, nil + }, + Timeout: d.Timeout(timeout), + Delay: 10 * time.Second, + MinTimeout: 5 * time.Second, + } + return createStateConf.WaitForState() +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_container_vpc_cluster.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_container_vpc_cluster.go new file mode 100644 index 00000000000..078f46ca89b --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_container_vpc_cluster.go @@ -0,0 +1,1363 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "os" + "strings" + "time" + + "github.com/IBM/go-sdk-core/v3/core" + "github.com/IBM/vpc-go-sdk/vpcclassicv1" + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + + v1 "github.com/IBM-Cloud/bluemix-go/api/container/containerv1" + v2 "github.com/IBM-Cloud/bluemix-go/api/container/containerv2" + "github.com/IBM-Cloud/bluemix-go/bmxerror" +) + +const ( + deployRequested = "Deploy requested" + deployInProgress = "Deploy in progress" + ready = "Ready" + normal = "normal" + masterNodeReady = "MasterNodeReady" + oneWorkerNodeReady = "OneWorkerNodeReady" + ingressReady = "IngressReady" +) + +func resourceIBMContainerVpcCluster() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMContainerVpcClusterCreate, + Read: resourceIBMContainerVpcClusterRead, + Update: resourceIBMContainerVpcClusterUpdate, + Delete: resourceIBMContainerVpcClusterDelete, + Exists: resourceIBMContainerVpcClusterExists, + Importer: &schema.ResourceImporter{}, + + CustomizeDiff: customdiff.Sequence( + func(diff *schema.ResourceDiff, v interface{}) error { + return resourceTagsCustomizeDiff(diff) + }, + ), + + Schema: map[string]*schema.Schema{ + + "flavor": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Cluster nodes flavour", + }, + + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The cluster name", + }, + + "vpc_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The vpc id where the cluster is", + }, + + "kms_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: "Enables KMS on a given cluster ", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "instance_id": { + Type: schema.TypeString, + Required: true, + Description: "ID of the KMS instance to use to encrypt the cluster.", + }, + "crk_id": { + Type: schema.TypeString, + Required: true, + Description: "ID of the customer root key.", + }, + "private_endpoint": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Specify this option to use the KMS public service endpoint.", + }, + }, + }, + }, + + "zones": { + Type: schema.TypeSet, + Required: true, + Description: "Zone info", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: "Zone for the worker pool in a multizone cluster", + }, + + "subnet_id": { + Type: schema.TypeString, + Required: true, + Description: "The VPC subnet to assign the cluster", + }, + }, + }, + }, + //Optionals in cluster creation + + "kube_version": { + Type: schema.TypeString, + Computed: true, + Optional: true, + DiffSuppressFunc: func(k, o, n string, d *schema.ResourceData) bool { + if o == "" { + return false + } + new := strings.Split(n, ".") + old := strings.Split(o, ".") + + if strings.Compare(new[0]+"."+strings.Split(new[1], "_")[0], old[0]+"."+strings.Split(old[1], "_")[0]) == 0 { + return true + } + return false + }, + Description: "Kubernetes version", + }, + + "update_all_workers": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Updates all the woker nodes if sets to true", + }, + + "patch_version": { + Type: schema.TypeString, + Optional: true, + Description: "Kubernetes patch version", + }, + + "retry_patch_version": { + Type: schema.TypeInt, + Optional: true, + Description: "Argument which helps to retry the patch version updates on worker nodes. Increment the value to retry the patch updates if the previous apply fails", + }, + + "wait_for_worker_update": { + Type: schema.TypeBool, + Optional: true, + Default: true, + Description: "Wait for worker node to update during kube version update.", + }, + + "service_subnet": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Custom subnet CIDR to provide private IP addresses for services", + Computed: true, + }, + + "pod_subnet": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Custom subnet CIDR to provide private IP addresses for pods", + Computed: true, + }, + + "worker_count": { + Type: schema.TypeInt, + Optional: true, + Default: 1, + Description: "Number of worker nodes in the cluster", + }, + + "worker_labels": { + Type: schema.TypeMap, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "Labels for default worker pool", + }, + + "disable_public_service_endpoint": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Boolean value true if Public service endpoint to be disabled", + }, + + "tags": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString, ValidateFunc: InvokeValidator("ibm_container_vpc_cluster", "tag")}, + Set: resourceIBMVPCHash, + Description: "List of tags for the resources", + }, + + "wait_till": { + Type: schema.TypeString, + Optional: true, + Default: ingressReady, + DiffSuppressFunc: applyOnce, + ValidateFunc: validation.StringInSlice([]string{masterNodeReady, oneWorkerNodeReady, ingressReady}, true), + Description: "wait_till can be configured for Master Ready, One worker Ready or Ingress Ready", + }, + + "entitlement": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: applyOnce, + Description: "Entitlement option reduces additional OCP Licence cost in Openshift Clusters", + }, + + "cos_instance_crn": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: applyOnce, + Description: "A standard cloud object storage instance CRN to back up the internal registry in your OpenShift on VPC Gen 2 cluster", + }, + + "force_delete_storage": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Force the removal of a cluster and its persistent storage. Deleted data cannot be recovered", + }, + + ResourceControllerURL: { + Type: schema.TypeString, + Computed: true, + Description: "The URL of the IBM Cloud dashboard that can be used to explore and view details about this cluster", + }, + + //Get Cluster info Request + "state": { + Type: schema.TypeString, + Computed: true, + }, + + "master_status": { + Type: schema.TypeString, + Computed: true, + }, + + "resource_group_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + Description: "ID of the resource group.", + }, + + "master_url": { + Type: schema.TypeString, + Computed: true, + }, + "albs": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "alb_type": { + Type: schema.TypeString, + Computed: true, + }, + "enable": { + Type: schema.TypeBool, + Computed: true, + }, + "state": { + Type: schema.TypeString, + Computed: true, + }, + "load_balancer_hostname": { + Type: schema.TypeString, + Computed: true, + }, + "resize": { + Type: schema.TypeBool, + Computed: true, + }, + "disable_deployment": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + "public_service_endpoint_url": { + Type: schema.TypeString, + Computed: true, + }, + + "private_service_endpoint_url": { + Type: schema.TypeString, + Computed: true, + }, + + "crn": { + Type: schema.TypeString, + Computed: true, + Description: "CRN of resource instance", + }, + + "ingress_hostname": { + Type: schema.TypeString, + Computed: true, + }, + "ingress_secret": { + Type: schema.TypeString, + Computed: true, + Sensitive: true, + }, + + ResourceName: { + Type: schema.TypeString, + Computed: true, + Description: "The name of the resource", + }, + + ResourceCRN: { + Type: schema.TypeString, + Computed: true, + Description: "The crn of the resource", + }, + + ResourceStatus: { + Type: schema.TypeString, + Computed: true, + Description: "The status of the resource", + }, + + ResourceGroupName: { + Type: schema.TypeString, + Computed: true, + Description: "The resource group name in which resource is provisioned", + }, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(90 * time.Minute), + Update: schema.DefaultTimeout(60 * time.Minute), + Delete: schema.DefaultTimeout(45 * time.Minute), + }, + } +} + +func resourceIBMContainerVpcClusterValidator() *ResourceValidator { + validateSchema := make([]ValidateSchema, 1) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: "tag", + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Optional: true, + Regexp: `^[A-Za-z0-9:_ .-]+$`, + MinValueLength: 1, + MaxValueLength: 128}) + + ibmContainerVpcClusteresourceValidator := ResourceValidator{ResourceName: "ibm_container_vpc_cluster", Schema: validateSchema} + return &ibmContainerVpcClusteresourceValidator +} + +func resourceIBMContainerVpcClusterCreate(d *schema.ResourceData, meta interface{}) error { + + var vpcProvider string + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + if userDetails.generation == 1 { + vpcProvider = "vpc-classic" + } else { + vpcProvider = "vpc-gen2" + } + + csClient, err := meta.(ClientSession).VpcContainerAPI() + if err != nil { + return err + } + + disablePublicServiceEndpoint := d.Get("disable_public_service_endpoint").(bool) + name := d.Get("name").(string) + var kubeVersion string + if v, ok := d.GetOk("kube_version"); ok { + kubeVersion = v.(string) + } + podSubnet := d.Get("pod_subnet").(string) + serviceSubnet := d.Get("service_subnet").(string) + vpcID := d.Get("vpc_id").(string) + flavor := d.Get("flavor").(string) + workerCount := d.Get("worker_count").(int) + + // timeoutStage will define the timeout stage + var timeoutStage string + if v, ok := d.GetOk("wait_till"); ok { + timeoutStage = v.(string) + } + + var zonesList = make([]v2.Zone, 0) + + if res, ok := d.GetOk("zones"); ok { + zones := res.(*schema.Set).List() + for _, e := range zones { + r, _ := e.(map[string]interface{}) + if ID, subnetID := r["name"], r["subnet_id"]; ID != nil && subnetID != nil { + zoneParam := v2.Zone{} + zoneParam.ID, zoneParam.SubnetID = ID.(string), subnetID.(string) + zonesList = append(zonesList, zoneParam) + } + + } + } + + workerpool := v2.WorkerPoolConfig{ + VpcID: vpcID, + Flavor: flavor, + WorkerCount: workerCount, + Zones: zonesList, + } + + if l, ok := d.GetOk("worker_labels"); ok { + labels := make(map[string]string) + for k, v := range l.(map[string]interface{}) { + labels[k] = v.(string) + } + workerpool.Labels = labels + } + + params := v2.ClusterCreateRequest{ + DisablePublicServiceEndpoint: disablePublicServiceEndpoint, + Name: name, + KubeVersion: kubeVersion, + PodSubnet: podSubnet, + ServiceSubnet: serviceSubnet, + WorkerPools: workerpool, + Provider: vpcProvider, + } + + // Update params with Entitlement option if provided + if v, ok := d.GetOk("entitlement"); ok { + params.DefaultWorkerPoolEntitlement = v.(string) + } + + // Update params with Cloud Object Store instance CRN id option if provided + if v, ok := d.GetOk("cos_instance_crn"); ok { + params.CosInstanceCRN = v.(string) + } + + targetEnv, err := getVpcClusterTargetHeader(d, meta) + if err != nil { + return err + } + + cls, err := csClient.Clusters().Create(params, targetEnv) + + if err != nil { + return err + } + d.SetId(cls.ID) + switch strings.ToLower(timeoutStage) { + + case strings.ToLower(masterNodeReady): + _, err = waitForVpcClusterMasterAvailable(d, meta) + if err != nil { + return err + } + + case strings.ToLower(oneWorkerNodeReady): + _, err = waitForVpcClusterOneWorkerAvailable(d, meta) + if err != nil { + return err + } + + case strings.ToLower(ingressReady): + _, err = waitForVpcClusterIngressAvailable(d, meta) + if err != nil { + return err + } + + } + return resourceIBMContainerVpcClusterUpdate(d, meta) + +} + +func resourceIBMContainerVpcClusterUpdate(d *schema.ResourceData, meta interface{}) error { + + csClient, err := meta.(ClientSession).VpcContainerAPI() + if err != nil { + return err + } + + targetEnv, err := getVpcClusterTargetHeader(d, meta) + if err != nil { + return err + } + + clusterID := d.Id() + + v := os.Getenv("IC_ENV_TAGS") + if d.HasChange("tags") || v != "" { + oldList, newList := d.GetChange("tags") + cluster, err := csClient.Clusters().GetCluster(clusterID, targetEnv) + if err != nil { + return fmt.Errorf("Error retrieving cluster %s: %s", clusterID, err) + } + err = UpdateTagsUsingCRN(oldList, newList, meta, cluster.CRN) + if err != nil { + log.Printf( + "An error occured during update of instance (%s) tags: %s", clusterID, err) + } + } + + if d.HasChange("kms_config") { + kmsConfig := v2.KmsEnableReq{} + kmsConfig.Cluster = clusterID + targetEnv := v2.ClusterHeader{} + if kms, ok := d.GetOk("kms_config"); ok { + + kmsConfiglist := kms.([]interface{}) + + for _, l := range kmsConfiglist { + kmsMap, _ := l.(map[string]interface{}) + + //instance_id - Required field + instanceID := kmsMap["instance_id"].(string) + kmsConfig.Kms = instanceID + + //crk_id - Required field + crk := kmsMap["crk_id"].(string) + kmsConfig.Crk = crk + + //Read event - as its optional check for existence + if privateEndpoint := kmsMap["private_endpoint"]; privateEndpoint != nil { + endpoint := privateEndpoint.(bool) + kmsConfig.PrivateEndpoint = endpoint + } + } + } + + err := csClient.Kms().EnableKms(kmsConfig, targetEnv) + if err != nil { + log.Printf( + "An error occured during EnableKms (cluster: %s) error: %s", d.Id(), err) + return err + } + + } + + if (d.HasChange("kube_version") || d.HasChange("update_all_workers") || d.HasChange("patch_version") || d.HasChange("retry_patch_version")) && !d.IsNewResource() { + + if d.HasChange("kube_version") { + ClusterClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return err + } + var masterVersion string + if v, ok := d.GetOk("kube_version"); ok { + masterVersion = v.(string) + } + params := v1.ClusterUpdateParam{ + Action: "update", + Force: true, + Version: masterVersion, + } + + Env, err := getClusterTargetHeader(d, meta) + + if err != nil { + return err + } + Error := ClusterClient.Clusters().Update(clusterID, params, Env) + if Error != nil { + return Error + } + _, err = WaitForVpcClusterVersionUpdate(d, meta, targetEnv) + if err != nil { + return fmt.Errorf( + "Error waiting for cluster (%s) version to be updated: %s", d.Id(), err) + } + } + + csClient, err := meta.(ClientSession).VpcContainerAPI() + if err != nil { + return err + } + targetEnv, err := getVpcClusterTargetHeader(d, meta) + if err != nil { + return err + } + + clusterID := d.Id() + cls, err := csClient.Clusters().GetCluster(clusterID, targetEnv) + if err != nil { + return fmt.Errorf("Error retrieving conatiner vpc cluster: %s", err) + } + + // Update the worker nodes after master node kube-version is updated. + // workers will store the existing workers info to identify the replaced node + workersInfo := make(map[string]int, 0) + + updateAllWorkers := d.Get("update_all_workers").(bool) + if updateAllWorkers || d.HasChange("patch_version") || d.HasChange("retry_patch_version") { + + patchVersion := d.Get("patch_version").(string) + workers, err := csClient.Workers().ListWorkers(clusterID, false, targetEnv) + if err != nil { + d.Set("patch_version", nil) + return fmt.Errorf("Error retrieving workers for cluster: %s", err) + } + + for index, worker := range workers { + workersInfo[worker.ID] = index + } + workersCount := len(workers) + + waitForWorkerUpdate := d.Get("wait_for_worker_update").(bool) + + for _, worker := range workers { + // check if change is present in MAJOR.MINOR version or in PATCH version + if strings.Split(worker.KubeVersion.Actual, "_")[0] != strings.Split(cls.MasterKubeVersion, "_")[0] || (strings.Split(worker.KubeVersion.Actual, ".")[2] != patchVersion && patchVersion == strings.Split(worker.KubeVersion.Target, ".")[2]) { + _, err := csClient.Workers().ReplaceWokerNode(clusterID, worker.ID, targetEnv) + // As API returns http response 204 NO CONTENT, error raised will be exempted. + if err != nil && !strings.Contains(err.Error(), "EmptyResponseBody") { + d.Set("patch_version", nil) + return fmt.Errorf("Error replacing the worker node from the cluster: %s", err) + } + + if waitForWorkerUpdate { + //1. wait for worker node to delete + _, deleteError := waitForWorkerNodetoDelete(d, meta, targetEnv, worker.ID) + if deleteError != nil { + d.Set("patch_version", nil) + return fmt.Errorf("Worker node - %s is failed to replace", worker.ID) + } + + //2. wait for new workerNode + _, newWorkerError := waitForNewWorker(d, meta, targetEnv, workersCount) + if newWorkerError != nil { + d.Set("patch_version", nil) + return fmt.Errorf("Failed to spawn new worker node") + } + + //3. Get new worker node ID and update the map + newWorkerID, index, newNodeError := getNewWorkerID(d, meta, targetEnv, workersInfo) + if newNodeError != nil { + d.Set("patch_version", nil) + return fmt.Errorf("Unable to find the new worker node info") + } + + delete(workersInfo, worker.ID) + workersInfo[newWorkerID] = index + + //4. wait for the worker's version update and normal state + _, Err := WaitForVpcClusterWokersVersionUpdate(d, meta, targetEnv, cls.MasterKubeVersion, newWorkerID) + if Err != nil { + d.Set("patch_version", nil) + return fmt.Errorf( + "Error waiting for cluster (%s) worker nodes kube version to be updated: %s", d.Id(), Err) + } + } + } + } + } + } + + if d.HasChange("worker_labels") && !d.IsNewResource() { + labels := make(map[string]string) + if l, ok := d.GetOk("worker_labels"); ok { + for k, v := range l.(map[string]interface{}) { + labels[k] = v.(string) + } + } + + ClusterClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return err + } + Env := v1.ClusterTargetHeader{ResourceGroup: targetEnv.ResourceGroup} + + err = ClusterClient.WorkerPools().UpdateLabelsWorkerPool(clusterID, "default", labels, Env) + if err != nil { + return fmt.Errorf( + "Error updating the labels: %s", err) + } + } + + if d.HasChange("worker_count") && !d.IsNewResource() { + count := d.Get("worker_count").(int) + ClusterClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return err + } + Env := v1.ClusterTargetHeader{ResourceGroup: targetEnv.ResourceGroup} + + err = ClusterClient.WorkerPools().ResizeWorkerPool(clusterID, "default", count, Env) + if err != nil { + return fmt.Errorf( + "Error updating the worker_count %d: %s", count, err) + } + } + if d.HasChange("zones") && !d.IsNewResource() { + oldList, newList := d.GetChange("zones") + if oldList == nil { + oldList = new(schema.Set) + } + if newList == nil { + newList = new(schema.Set) + } + os := oldList.(*schema.Set) + ns := newList.(*schema.Set) + remove := os.Difference(ns).List() + add := ns.Difference(os).List() + if len(add) > 0 { + for _, zone := range add { + newZone := zone.(map[string]interface{}) + zoneParam := v2.WorkerPoolZone{ + Cluster: clusterID, + Id: newZone["name"].(string), + SubnetID: newZone["subnet_id"].(string), + WorkerPoolID: "default", + } + err = csClient.WorkerPools().CreateWorkerPoolZone(zoneParam, targetEnv) + if err != nil { + return fmt.Errorf("Error adding zone to conatiner vpc cluster: %s", err) + } + _, err = WaitForWorkerPoolAvailable(d, meta, clusterID, "default", d.Timeout(schema.TimeoutCreate), targetEnv) + if err != nil { + return fmt.Errorf( + "Error waiting for workerpool (%s) to become ready: %s", d.Id(), err) + } + + } + } + if len(remove) > 0 { + for _, zone := range remove { + oldZone := zone.(map[string]interface{}) + ClusterClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return err + } + Env := v1.ClusterTargetHeader{ResourceGroup: targetEnv.ResourceGroup} + err = ClusterClient.WorkerPools().RemoveZone(clusterID, oldZone["name"].(string), "default", Env) + if err != nil { + return fmt.Errorf("Error deleting zone to conatiner vpc cluster: %s", err) + } + _, err = WaitForV2WorkerZoneDeleted(clusterID, "default", oldZone["name"].(string), meta, d.Timeout(schema.TimeoutDelete), targetEnv) + if err != nil { + return fmt.Errorf( + "Error waiting for deleting workers of worker pool (%s) of cluster (%s): %s", "default", clusterID, err) + } + } + } + } + + if d.HasChange("force_delete_storage") { + var forceDeleteStorage bool + if v, ok := d.GetOk("force_delete_storage"); ok { + forceDeleteStorage = v.(bool) + } + d.Set("force_delete_storage", forceDeleteStorage) + } + + return resourceIBMContainerVpcClusterRead(d, meta) +} +func WaitForV2WorkerZoneDeleted(clusterNameOrID, workerPoolNameOrID, zone string, meta interface{}, timeout time.Duration, target v2.ClusterTargetHeader) (interface{}, error) { + csClient, err := meta.(ClientSession).VpcContainerAPI() + if err != nil { + return nil, err + } + stateConf := &resource.StateChangeConf{ + Pending: []string{"deleting"}, + Target: []string{workerDeleteState}, + Refresh: workerPoolV2ZoneDeleteStateRefreshFunc(csClient.Workers(), clusterNameOrID, workerPoolNameOrID, zone, target), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} +func workerPoolV2ZoneDeleteStateRefreshFunc(client v2.Workers, instanceID, workerPoolNameOrID, zone string, target v2.ClusterTargetHeader) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + workerFields, err := client.ListByWorkerPool(instanceID, workerPoolNameOrID, true, target) + if err != nil { + return nil, "", fmt.Errorf("Error retrieving workers for cluster: %s", err) + } + //Done worker has two fields State and Status , so check for those 2 + for _, e := range workerFields { + if e.Location == zone { + if strings.Compare(e.LifeCycle.ActualState, "deleted") != 0 { + return workerFields, "deleting", nil + } + } + } + return workerFields, workerDeleteState, nil + } +} +func resourceIBMContainerVpcClusterRead(d *schema.ResourceData, meta interface{}) error { + + csClient, err := meta.(ClientSession).VpcContainerAPI() + if err != nil { + return err + } + albsAPI := csClient.Albs() + + targetEnv, err := getVpcClusterTargetHeader(d, meta) + if err != nil { + return err + } + + clusterID := d.Id() + cls, err := csClient.Clusters().GetCluster(clusterID, targetEnv) + if err != nil { + return fmt.Errorf("Error retrieving conatiner vpc cluster: %s", err) + } + + workerPool, err := csClient.WorkerPools().GetWorkerPool(clusterID, "default", targetEnv) + + var zones = make([]map[string]interface{}, 0) + for _, zone := range workerPool.Zones { + for _, subnet := range zone.Subnets { + if subnet.Primary == true { + zoneInfo := map[string]interface{}{ + "name": zone.ID, + "subnet_id": subnet.ID, + } + zones = append(zones, zoneInfo) + } + } + } + + albs, err := albsAPI.ListClusterAlbs(clusterID, targetEnv) + if err != nil && !strings.Contains(err.Error(), "This operation is not supported for your cluster's version.") { + return fmt.Errorf("Error retrieving alb's of the cluster %s: %s", clusterID, err) + } + + d.Set("name", cls.Name) + d.Set("crn", cls.CRN) + d.Set("master_status", cls.Lifecycle.MasterStatus) + d.Set("zones", zones) + if strings.HasSuffix(cls.MasterKubeVersion, "_openshift") { + d.Set("kube_version", strings.Split(cls.MasterKubeVersion, "_")[0]+"_openshift") + } else { + d.Set("kube_version", strings.Split(cls.MasterKubeVersion, "_")[0]) + } + d.Set("worker_count", workerPool.WorkerCount) + d.Set("worker_labels", IgnoreSystemLabels(workerPool.Labels)) + if cls.Vpcs != nil { + d.Set("vpc_id", cls.Vpcs[0]) + } + d.Set("master_url", cls.MasterURL) + d.Set("flavor", workerPool.Flavor) + d.Set("service_subnet", cls.ServiceSubnet) + d.Set("pod_subnet", cls.PodSubnet) + d.Set("state", cls.State) + d.Set("ingress_hostname", cls.Ingress.HostName) + d.Set("ingress_secret", cls.Ingress.SecretName) + d.Set("albs", flattenVpcAlbs(albs, "all")) + d.Set("resource_group_id", cls.ResourceGroupID) + d.Set("public_service_endpoint_url", cls.ServiceEndpoints.PublicServiceEndpointURL) + d.Set("private_service_endpoint_url", cls.ServiceEndpoints.PrivateServiceEndpointURL) + if cls.ServiceEndpoints.PublicServiceEndpointURL != "" { + d.Set("disable_public_service_endpoint", false) + } else { + d.Set("disable_public_service_endpoint", true) + } + + tags, err := GetTagsUsingCRN(meta, cls.CRN) + if err != nil { + log.Printf( + "An error occured during reading of instance (%s) tags : %s", d.Id(), err) + } + d.Set("tags", tags) + controller, err := getBaseController(meta) + if err != nil { + return err + } + d.Set(ResourceControllerURL, controller+"/kubernetes/clusters") + d.Set(ResourceName, cls.Name) + d.Set(ResourceCRN, cls.CRN) + d.Set(ResourceStatus, cls.State) + d.Set(ResourceGroupName, cls.ResourceGroupName) + + return nil +} + +func resourceIBMContainerVpcClusterDelete(d *schema.ResourceData, meta interface{}) error { + + targetEnv, err := getVpcClusterTargetHeader(d, meta) + if err != nil { + return err + } + csClient, err := meta.(ClientSession).VpcContainerAPI() + if err != nil { + return err + } + clusterID := d.Id() + + var zonesList = make([]v2.Zone, 0) + + if res, ok := d.GetOk("zones"); ok { + zones := res.(*schema.Set).List() + for _, e := range zones { + r, _ := e.(map[string]interface{}) + if ID, subnetID := r["name"], r["subnet_id"]; ID != nil && subnetID != nil { + zoneParam := v2.Zone{} + zoneParam.ID, zoneParam.SubnetID = ID.(string), subnetID.(string) + zonesList = append(zonesList, zoneParam) + } + + } + } + var region = "" + if len(zonesList) > 0 { + splitZone := strings.Split(zonesList[0].ID, "-") + region = splitZone[0] + "-" + splitZone[1] + } + + bxsession, err := meta.(ClientSession).BluemixSession() + if err != nil { + return err + } + + var authenticator *core.BearerTokenAuthenticator + if strings.HasPrefix(bxsession.Config.IAMAccessToken, "Bearer") { + authenticator = &core.BearerTokenAuthenticator{ + BearerToken: bxsession.Config.IAMAccessToken[7:], + } + } else { + authenticator = &core.BearerTokenAuthenticator{ + BearerToken: bxsession.Config.IAMAccessToken, + } + } + + forceDeleteStorage := d.Get("force_delete_storage").(bool) + err = csClient.Clusters().Delete(clusterID, targetEnv, forceDeleteStorage) + if err != nil { + return fmt.Errorf("Error deleting cluster: %s", err) + } + _, err = waitForVpcClusterDelete(d, meta) + if err != nil { + return err + } + + if region != "" { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + if userDetails.generation == 1 { + vpcclassicurl := fmt.Sprintf("https://%s.iaas.cloud.ibm.com/v1", region) + vpcclassicoptions := &vpcclassicv1.VpcClassicV1Options{ + URL: envFallBack([]string{"IBMCLOUD_IS_API_ENDPOINT"}, vpcclassicurl), + Authenticator: authenticator, + } + sess1, err := vpcclassicv1.NewVpcClassicV1(vpcclassicoptions) + if err != nil { + log.Println("error creating vpcclassic session", err) + } + listlbOptions := &vpcclassicv1.ListLoadBalancersOptions{} + lbs, response, err1 := sess1.ListLoadBalancers(listlbOptions) + if err1 != nil { + log.Printf("Error Retrieving vpc load balancers: %s\n%s", err, response) + } + if lbs != nil && lbs.LoadBalancers != nil && len(lbs.LoadBalancers) > 0 { + for _, lb := range lbs.LoadBalancers { + if strings.Contains(*lb.Name, clusterID) { + log.Println("Deleting Load Balancer", *lb.Name) + id := *lb.ID + _, err = isWaitForClassicLBDeleted(sess1, id, d.Timeout(schema.TimeoutDelete)) + if err != nil { + log.Printf("Error waiting for vpc load balancer to be deleted: %s\n", err) + + } + } + } + } + } else { + vpcurl := fmt.Sprintf("https://%s.iaas.cloud.ibm.com/v1", region) + vpcoptions := &vpcv1.VpcV1Options{ + URL: envFallBack([]string{"IBMCLOUD_IS_NG_API_ENDPOINT"}, vpcurl), + Authenticator: authenticator, + } + sess1, err := vpcv1.NewVpcV1(vpcoptions) + if err != nil { + log.Println("error creating vpc session", err) + } + listlbOptions := &vpcv1.ListLoadBalancersOptions{} + lbs, response, err1 := sess1.ListLoadBalancers(listlbOptions) + if err1 != nil { + log.Printf("Error Retrieving vpc load balancers: %s\n%s", err, response) + } + if lbs != nil && lbs.LoadBalancers != nil && len(lbs.LoadBalancers) > 0 { + for _, lb := range lbs.LoadBalancers { + if strings.Contains(*lb.Name, clusterID) { + log.Println("Deleting Load Balancer", *lb.Name) + id := *lb.ID + _, err = isWaitForLBDeleted(sess1, id, d.Timeout(schema.TimeoutDelete)) + if err != nil { + log.Printf("Error waiting for vpc load balancer to be deleted: %s\n", err) + + } + } + } + } + } + } + return nil +} + +func waitForVpcClusterDelete(d *schema.ResourceData, meta interface{}) (interface{}, error) { + targetEnv, err := getVpcClusterTargetHeader(d, meta) + if err != nil { + return nil, err + } + csClient, err := meta.(ClientSession).VpcContainerAPI() + if err != nil { + return nil, err + } + clusterID := d.Id() + deleteStateConf := &resource.StateChangeConf{ + Pending: []string{clusterDeletePending}, + Target: []string{clusterDeleted}, + Refresh: func() (interface{}, string, error) { + cluster, err := csClient.Clusters().GetCluster(clusterID, targetEnv) + if err != nil { + if apiErr, ok := err.(bmxerror.RequestFailure); ok && (apiErr.StatusCode() == 404) { + return cluster, clusterDeleted, nil + } + return nil, "", err + } + return cluster, clusterDeletePending, nil + }, + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 10 * time.Second, + MinTimeout: 5 * time.Second, + PollInterval: 5 * time.Second, + } + + return deleteStateConf.WaitForState() +} + +func waitForVpcClusterOneWorkerAvailable(d *schema.ResourceData, meta interface{}) (interface{}, error) { + targetEnv, err := getVpcClusterTargetHeader(d, meta) + if err != nil { + return nil, err + } + csClient, err := meta.(ClientSession).VpcContainerAPI() + if err != nil { + return nil, err + } + clusterID := d.Id() + createStateConf := &resource.StateChangeConf{ + Pending: []string{deployRequested, deployInProgress}, + Target: []string{normal}, + Refresh: func() (interface{}, string, error) { + workers, err := csClient.Workers().ListByWorkerPool(clusterID, "default", false, targetEnv) + if err != nil { + return workers, deployInProgress, err + } + if len(workers) == 0 { + return workers, deployInProgress, nil + } + + for _, worker := range workers { + log.Println("worker: ", worker.ID) + log.Println("worker health state: ", worker.Health.State) + + if worker.Health.State == normal { + return workers, normal, nil + } + } + return workers, deployInProgress, nil + + }, + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 10 * time.Second, + MinTimeout: 5 * time.Second, + ContinuousTargetOccurence: 5, + } + return createStateConf.WaitForState() +} + +func waitForVpcClusterMasterAvailable(d *schema.ResourceData, meta interface{}) (interface{}, error) { + targetEnv, err := getVpcClusterTargetHeader(d, meta) + if err != nil { + return nil, err + } + csClient, err := meta.(ClientSession).VpcContainerAPI() + if err != nil { + return nil, err + } + clusterID := d.Id() + createStateConf := &resource.StateChangeConf{ + Pending: []string{deployRequested, deployInProgress}, + Target: []string{ready}, + Refresh: func() (interface{}, string, error) { + clusterInfo, clusterInfoErr := csClient.Clusters().GetCluster(clusterID, targetEnv) + + if err != nil || clusterInfoErr != nil { + return clusterInfo, deployInProgress, err + } + + if clusterInfo.Lifecycle.MasterStatus == ready { + return clusterInfo, ready, nil + } + return clusterInfo, deployInProgress, nil + + }, + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 10 * time.Second, + MinTimeout: 5 * time.Second, + ContinuousTargetOccurence: 5, + } + return createStateConf.WaitForState() +} + +func waitForVpcClusterIngressAvailable(d *schema.ResourceData, meta interface{}) (interface{}, error) { + targetEnv, err := getVpcClusterTargetHeader(d, meta) + if err != nil { + return nil, err + } + csClient, err := meta.(ClientSession).VpcContainerAPI() + if err != nil { + return nil, err + } + clusterID := d.Id() + createStateConf := &resource.StateChangeConf{ + Pending: []string{deployRequested, deployInProgress}, + Target: []string{ready}, + Refresh: func() (interface{}, string, error) { + clusterInfo, clusterInfoErr := csClient.Clusters().GetCluster(clusterID, targetEnv) + + if err != nil || clusterInfoErr != nil { + return clusterInfo, deployInProgress, err + } + + if clusterInfo.Ingress.HostName != "" { + return clusterInfo, ready, nil + } + return clusterInfo, deployInProgress, nil + + }, + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 10 * time.Second, + MinTimeout: 5 * time.Second, + ContinuousTargetOccurence: 5, + } + return createStateConf.WaitForState() +} + +func getVpcClusterTargetHeader(d *schema.ResourceData, meta interface{}) (v2.ClusterTargetHeader, error) { + targetEnv := v2.ClusterTargetHeader{} + var resourceGroup string + if rg, ok := d.GetOk("resource_group_id"); ok { + resourceGroup = rg.(string) + targetEnv.ResourceGroup = resourceGroup + } + + return targetEnv, nil +} + +func resourceIBMContainerVpcClusterExists(d *schema.ResourceData, meta interface{}) (bool, error) { + + csClient, err := meta.(ClientSession).VpcContainerAPI() + if err != nil { + return false, err + } + targetEnv, err := getVpcClusterTargetHeader(d, meta) + if err != nil { + return false, err + } + clusterID := d.Id() + cls, err := csClient.Clusters().GetCluster(clusterID, targetEnv) + if err != nil { + if apiErr, ok := err.(bmxerror.RequestFailure); ok { + if apiErr.StatusCode() == 404 && strings.Contains(apiErr.Description(), "The specified cluster could not be found") { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s", err) + } + return cls.ID == clusterID, nil +} + +// WaitForVpcClusterVersionUpdate Waits for cluster creation +func WaitForVpcClusterVersionUpdate(d *schema.ResourceData, meta interface{}, target v2.ClusterTargetHeader) (interface{}, error) { + csClient, err := meta.(ClientSession).VpcContainerAPI() + if err != nil { + return nil, err + } + log.Printf("Waiting for cluster (%s) version to be updated.", d.Id()) + id := d.Id() + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", versionUpdating}, + Target: []string{clusterNormal}, + Refresh: vpcClusterVersionRefreshFunc(csClient.Clusters(), id, d, target), + Timeout: d.Timeout(schema.TimeoutUpdate), + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + ContinuousTargetOccurence: 5, + } + + return stateConf.WaitForState() +} + +func vpcClusterVersionRefreshFunc(client v2.Clusters, instanceID string, d *schema.ResourceData, target v2.ClusterTargetHeader) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + cls, err := client.GetCluster(instanceID, target) + if err != nil { + return nil, "retry", fmt.Errorf("Error retrieving conatiner vpc cluster: %s", err) + } + + // Check active transactions + log.Println("Checking cluster version", cls.MasterKubeVersion, d.Get("kube_version").(string)) + if strings.Contains(cls.MasterKubeVersion, "(pending)") { + return cls, versionUpdating, nil + } + return cls, clusterNormal, nil + } +} + +// WaitForVpcClusterWokersVersionUpdate Waits for Cluster version Update +func WaitForVpcClusterWokersVersionUpdate(d *schema.ResourceData, meta interface{}, target v2.ClusterTargetHeader, masterVersion, workerID string) (interface{}, error) { + csClient, err := meta.(ClientSession).VpcContainerAPI() + if err != nil { + return nil, err + } + + log.Printf("Waiting for worker (%s) version to be updated.", workerID) + clusterID := d.Id() + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", versionUpdating}, + Target: []string{workerNormal}, + Refresh: vpcClusterWorkersVersionRefreshFunc(csClient.Workers(), workerID, clusterID, d, target, masterVersion), + Timeout: d.Timeout(schema.TimeoutUpdate), + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + ContinuousTargetOccurence: 5, + } + + return stateConf.WaitForState() +} + +func vpcClusterWorkersVersionRefreshFunc(client v2.Workers, workerID, clusterID string, d *schema.ResourceData, target v2.ClusterTargetHeader, masterVersion string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + worker, err := client.Get(clusterID, workerID, target) + if err != nil { + return nil, "retry", fmt.Errorf("Error retrieving worker of container vpc cluster: %s", err) + } + // Check active updates + if worker.Health.State == "normal" && strings.Split(worker.KubeVersion.Actual, "_")[0] == strings.Split(masterVersion, "_")[0] { + return worker, workerNormal, nil + } + return worker, versionUpdating, nil + } +} + +func waitForWorkerNodetoDelete(d *schema.ResourceData, meta interface{}, targetEnv v2.ClusterTargetHeader, workerID string) (interface{}, error) { + + csClient, err := meta.(ClientSession).VpcContainerAPI() + if err != nil { + return nil, err + } + + clusterID := d.Id() + deleteStateConf := &resource.StateChangeConf{ + Pending: []string{workerDeletePending}, + Target: []string{workerDeleteState}, + Refresh: func() (interface{}, string, error) { + worker, err := csClient.Workers().Get(clusterID, workerID, targetEnv) + if err != nil { + return worker, workerDeletePending, nil + } + if worker.LifeCycle.ActualState == "deleted" { + return worker, workerDeleteState, nil + } + return worker, workerDeletePending, nil + }, + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 10 * time.Second, + MinTimeout: 5 * time.Second, + PollInterval: 5 * time.Second, + } + return deleteStateConf.WaitForState() +} + +func waitForNewWorker(d *schema.ResourceData, meta interface{}, targetEnv v2.ClusterTargetHeader, workersCount int) (interface{}, error) { + csClient, err := meta.(ClientSession).VpcContainerAPI() + if err != nil { + return nil, err + } + + clusterID := d.Id() + stateConf := &resource.StateChangeConf{ + Pending: []string{"creating"}, + Target: []string{"created"}, + Refresh: func() (interface{}, string, error) { + workers, err := csClient.Workers().ListWorkers(clusterID, false, targetEnv) + if err != nil { + return workers, "", fmt.Errorf("Error in retriving the list of worker nodes") + } + if len(workers) == workersCount { + return workers, "created", nil + } + return workers, "creating", nil + }, + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 10 * time.Second, + MinTimeout: 5 * time.Second, + PollInterval: 5 * time.Second, + } + return stateConf.WaitForState() +} + +func getNewWorkerID(d *schema.ResourceData, meta interface{}, targetEnv v2.ClusterTargetHeader, workersInfo map[string]int) (string, int, error) { + csClient, err := meta.(ClientSession).VpcContainerAPI() + if err != nil { + return "", -1, err + } + + clusterID := d.Id() + + workers, err := csClient.Workers().ListWorkers(clusterID, false, targetEnv) + if err != nil { + return "", -1, fmt.Errorf("Error in retriving the list of worker nodes") + } + + for index, worker := range workers { + if _, ok := workersInfo[worker.ID]; !ok { + log.Println("found new replaced node: ", worker.ID) + return worker.ID, index, nil + } + } + return "", -1, fmt.Errorf("no new node found") +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_container_vpc_worker_pool.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_container_vpc_worker_pool.go new file mode 100644 index 00000000000..832981bc952 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_container_vpc_worker_pool.go @@ -0,0 +1,515 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + v1 "github.com/IBM-Cloud/bluemix-go/api/container/containerv1" + v2 "github.com/IBM-Cloud/bluemix-go/api/container/containerv2" + "github.com/IBM-Cloud/bluemix-go/bmxerror" +) + +const ( + workerDesired = "deployed" +) + +func resourceIBMContainerVpcWorkerPool() *schema.Resource { + + return &schema.Resource{ + Create: resourceIBMContainerVpcWorkerPoolCreate, + Update: resourceIBMContainerVpcWorkerPoolUpdate, + Read: resourceIBMContainerVpcWorkerPoolRead, + Delete: resourceIBMContainerVpcWorkerPoolDelete, + Exists: resourceIBMContainerVpcWorkerPoolExists, + Importer: &schema.ResourceImporter{}, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(90 * time.Minute), + Delete: schema.DefaultTimeout(90 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "cluster": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Cluster name", + }, + + "flavor": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "cluster node falvor", + }, + + "worker_pool_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "worker pool name", + }, + + "zones": { + Type: schema.TypeSet, + Required: true, + Description: "Zones info", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: "zone name", + }, + + "subnet_id": { + Type: schema.TypeString, + Required: true, + Description: "subnet ID", + }, + }, + }, + }, + + "labels": { + Type: schema.TypeMap, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "Labels", + }, + + "resource_group_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "ID of the resource group.", + ForceNew: true, + }, + "vpc_id": { + Type: schema.TypeString, + Required: true, + Description: "The vpc id where the cluster is", + ForceNew: true, + }, + "worker_count": { + Type: schema.TypeInt, + Required: true, + Description: "The number of workers", + }, + "entitlement": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: applyOnce, + Description: "Entitlement option reduces additional OCP Licence cost in Openshift Clusters", + }, + ResourceControllerURL: { + Type: schema.TypeString, + Computed: true, + Description: "Resource Controller URL", + }, + }, + } +} + +func resourceIBMContainerVpcWorkerPoolCreate(d *schema.ResourceData, meta interface{}) error { + + wpClient, err := meta.(ClientSession).VpcContainerAPI() + if err != nil { + return err + } + + clusterNameorID := d.Get("cluster").(string) + var zonei []interface{} + + zone := []v2.Zone{} + + if res, ok := d.GetOk("zones"); ok { + zonei = res.(*schema.Set).List() + for _, e := range zonei { + r, _ := e.(map[string]interface{}) + zoneParam := v2.Zone{ + ID: r["name"].(string), + SubnetID: r["subnet_id"].(string), + } + zone = append(zone, zoneParam) + } + + } + + // for _, e := range d.Get("zones").(*schema.Set).List() { + // value := e.(map[string]interface{}) + // id := value["id"].(string) + // subnetid := value["subnet_id"].(string) + + // } + + workerPoolConfig := v2.WorkerPoolConfig{ + Name: d.Get("worker_pool_name").(string), + VpcID: d.Get("vpc_id").(string), + Flavor: d.Get("flavor").(string), + WorkerCount: d.Get("worker_count").(int), + Zones: zone, + } + + if l, ok := d.GetOk("labels"); ok { + labels := make(map[string]string) + for k, v := range l.(map[string]interface{}) { + labels[k] = v.(string) + } + workerPoolConfig.Labels = labels + } + params := v2.WorkerPoolRequest{ + WorkerPoolConfig: workerPoolConfig, + Cluster: clusterNameorID, + } + + // Update workerpoolConfig with Entitlement option if provided + if v, ok := d.GetOk("entitlement"); ok { + workerPoolConfig.Entitlement = v.(string) + } + + workerPoolsAPI := wpClient.WorkerPools() + targetEnv, err := getVpcClusterTargetHeader(d, meta) + if err != nil { + return err + } + + res, err := workerPoolsAPI.CreateWorkerPool(params, targetEnv) + if err != nil { + return err + } + + d.SetId(fmt.Sprintf("%s/%s", clusterNameorID, res.ID)) + + //wait for workerpool availability + _, err = WaitForWorkerPoolAvailable(d, meta, clusterNameorID, res.ID, d.Timeout(schema.TimeoutCreate), targetEnv) + if err != nil { + return fmt.Errorf( + "Error waiting for workerpool (%s) to become ready: %s", d.Id(), err) + } + + return resourceIBMContainerVpcWorkerPoolUpdate(d, meta) +} + +func resourceIBMContainerVpcWorkerPoolUpdate(d *schema.ResourceData, meta interface{}) error { + + if d.HasChange("labels") && !d.IsNewResource() { + clusterNameOrID := d.Get("cluster").(string) + workerPoolName := d.Get("worker_pool_name").(string) + + labels := make(map[string]string) + if l, ok := d.GetOk("labels"); ok { + for k, v := range l.(map[string]interface{}) { + labels[k] = v.(string) + } + } + + targetEnv, err := getVpcClusterTargetHeader(d, meta) + if err != nil { + return err + } + ClusterClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return err + } + Env := v1.ClusterTargetHeader{ResourceGroup: targetEnv.ResourceGroup} + + err = ClusterClient.WorkerPools().UpdateLabelsWorkerPool(clusterNameOrID, workerPoolName, labels, Env) + if err != nil { + return fmt.Errorf( + "Error updating the labels: %s", err) + } + } + + if d.HasChange("worker_count") { + clusterNameOrID := d.Get("cluster").(string) + workerPoolName := d.Get("worker_pool_name").(string) + count := d.Get("worker_count").(int) + targetEnv, err := getVpcClusterTargetHeader(d, meta) + if err != nil { + return err + } + ClusterClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return err + } + Env := v1.ClusterTargetHeader{ResourceGroup: targetEnv.ResourceGroup} + + err = ClusterClient.WorkerPools().ResizeWorkerPool(clusterNameOrID, workerPoolName, count, Env) + if err != nil { + return fmt.Errorf( + "Error updating the worker_count %d: %s", count, err) + } + } + + if d.HasChange("zones") && !d.IsNewResource() { + clusterID := d.Get("cluster").(string) + workerPoolName := d.Get("worker_pool_name").(string) + targetEnv, err := getVpcClusterTargetHeader(d, meta) + if err != nil { + return err + } + oldList, newList := d.GetChange("zones") + if oldList == nil { + oldList = new(schema.Set) + } + if newList == nil { + newList = new(schema.Set) + } + os := oldList.(*schema.Set) + ns := newList.(*schema.Set) + remove := os.Difference(ns).List() + add := ns.Difference(os).List() + if len(add) > 0 { + csClient, err := meta.(ClientSession).VpcContainerAPI() + if err != nil { + return err + } + for _, zone := range add { + newZone := zone.(map[string]interface{}) + zoneParam := v2.WorkerPoolZone{ + Cluster: clusterID, + Id: newZone["name"].(string), + SubnetID: newZone["subnet_id"].(string), + WorkerPoolID: workerPoolName, + } + err = csClient.WorkerPools().CreateWorkerPoolZone(zoneParam, targetEnv) + if err != nil { + return fmt.Errorf("Error adding zone to conatiner vpc cluster: %s", err) + } + _, err = WaitForWorkerPoolAvailable(d, meta, clusterID, workerPoolName, d.Timeout(schema.TimeoutCreate), targetEnv) + if err != nil { + return fmt.Errorf( + "Error waiting for workerpool (%s) to become ready: %s", d.Id(), err) + } + + } + } + if len(remove) > 0 { + for _, zone := range remove { + oldZone := zone.(map[string]interface{}) + ClusterClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return err + } + Env := v1.ClusterTargetHeader{ResourceGroup: targetEnv.ResourceGroup} + err = ClusterClient.WorkerPools().RemoveZone(clusterID, oldZone["name"].(string), workerPoolName, Env) + if err != nil { + return fmt.Errorf("Error deleting zone to conatiner vpc cluster: %s", err) + } + _, err = WaitForV2WorkerZoneDeleted(clusterID, workerPoolName, oldZone["name"].(string), meta, d.Timeout(schema.TimeoutDelete), targetEnv) + if err != nil { + return fmt.Errorf( + "Error waiting for deleting workers of worker pool (%s) of cluster (%s): %s", workerPoolName, clusterID, err) + } + } + } + } + return resourceIBMContainerVpcWorkerPoolRead(d, meta) +} + +func resourceIBMContainerVpcWorkerPoolRead(d *schema.ResourceData, meta interface{}) error { + wpClient, err := meta.(ClientSession).VpcContainerAPI() + if err != nil { + return err + } + parts, err := idParts(d.Id()) + if err != nil { + return err + } + cluster := parts[0] + workerPoolID := parts[1] + + workerPoolsAPI := wpClient.WorkerPools() + targetEnv, err := getVpcClusterTargetHeader(d, meta) + if err != nil { + return err + } + + workerPool, err := workerPoolsAPI.GetWorkerPool(cluster, workerPoolID, targetEnv) + if err != nil { + return err + } + + var zones = make([]map[string]interface{}, 0) + for _, zone := range workerPool.Zones { + for _, subnet := range zone.Subnets { + zoneInfo := map[string]interface{}{ + "name": zone.ID, + "subnet_id": subnet.ID, + } + zones = append(zones, zoneInfo) + } + } + + cls, err := wpClient.Clusters().GetCluster(cluster, targetEnv) + if err != nil { + return fmt.Errorf("Error retrieving conatiner vpc cluster: %s", err) + } + + d.Set("worker_pool_name", workerPool.PoolName) + d.Set("flavor", workerPool.Flavor) + d.Set("worker_count", workerPool.WorkerCount) + // d.Set("provider", workerPool.Provider) + d.Set("labels", IgnoreSystemLabels(workerPool.Labels)) + d.Set("zones", zones) + d.Set("resource_group_id", cls.ResourceGroupID) + d.Set("cluster", cluster) + d.Set("vpc_id", workerPool.VpcID) + + controller, err := getBaseController(meta) + if err != nil { + return err + } + d.Set(ResourceControllerURL, controller+"/kubernetes/clusters") + return nil +} + +func resourceIBMContainerVpcWorkerPoolDelete(d *schema.ResourceData, meta interface{}) error { + wpClient, err := meta.(ClientSession).VpcContainerAPI() + if err != nil { + return err + } + parts, err := idParts(d.Id()) + if err != nil { + return err + } + clusterNameorID := parts[0] + workerPoolNameorID := parts[1] + + workerPoolsAPI := wpClient.WorkerPools() + targetEnv, err := getVpcClusterTargetHeader(d, meta) + if err != nil { + return err + } + + err = workerPoolsAPI.DeleteWorkerPool(clusterNameorID, workerPoolNameorID, targetEnv) + if err != nil { + return err + } + _, err = WaitForVpcWorkerDelete(clusterNameorID, workerPoolNameorID, meta, d.Timeout(schema.TimeoutDelete), targetEnv) + if err != nil { + return fmt.Errorf( + "Error waiting for removing workers of worker pool (%s) of cluster (%s): %s", workerPoolNameorID, clusterNameorID, err) + } + d.SetId("") + return nil +} + +func resourceIBMContainerVpcWorkerPoolExists(d *schema.ResourceData, meta interface{}) (bool, error) { + wpClient, err := meta.(ClientSession).VpcContainerAPI() + if err != nil { + return false, err + } + parts, err := idParts(d.Id()) + if err != nil { + return false, err + } + cluster := parts[0] + workerPoolID := parts[1] + + workerPoolsAPI := wpClient.WorkerPools() + targetEnv, err := getVpcClusterTargetHeader(d, meta) + if err != nil { + return false, err + } + + workerPool, err := workerPoolsAPI.GetWorkerPool(cluster, workerPoolID, targetEnv) + if err != nil { + if apiErr, ok := err.(bmxerror.RequestFailure); ok { + if apiErr.StatusCode() == 404 && strings.Contains(apiErr.Description(), "The specified worker pool could not be found") { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s", err) + } + + return workerPool.ID == workerPoolID, nil +} + +// WaitForWorkerPoolAvailable Waits for worker creation +func WaitForWorkerPoolAvailable(d *schema.ResourceData, meta interface{}, clusterNameOrID, workerPoolNameOrID string, timeout time.Duration, target v2.ClusterTargetHeader) (interface{}, error) { + wpClient, err := meta.(ClientSession).VpcContainerAPI() + if err != nil { + return nil, err + } + log.Printf("Waiting for workerpool (%s) to be available.", d.Id()) + // id := d.Id() + + stateConf := &resource.StateChangeConf{ + Pending: []string{"provision_pending"}, + Target: []string{workerDesired}, + Refresh: vpcWorkerPoolStateRefreshFunc(wpClient.Workers(), clusterNameOrID, workerPoolNameOrID, target), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func vpcWorkerPoolStateRefreshFunc(client v2.Workers, instanceID string, workerPoolNameOrID string, target v2.ClusterTargetHeader) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + workerFields, err := client.ListByWorkerPool(instanceID, "", false, target) + if err != nil { + return nil, "", fmt.Errorf("Error retrieving workers for cluster: %s", err) + } + // Check active transactions + //Check for worker state to be deployed + //Done worker has two fields desiredState and actualState , so check for those 2 + for _, e := range workerFields { + if e.PoolName == workerPoolNameOrID || e.PoolID == workerPoolNameOrID { + if strings.Compare(e.LifeCycle.ActualState, "deployed") != 0 { + log.Printf("worker: %s state: %s", e.ID, e.LifeCycle.ActualState) + return workerFields, "provision_pending", nil + } + } + } + return workerFields, workerDesired, nil + } +} + +func WaitForVpcWorkerDelete(clusterNameOrID, workerPoolNameOrID string, meta interface{}, timeout time.Duration, target v2.ClusterTargetHeader) (interface{}, error) { + wpClient, err := meta.(ClientSession).VpcContainerAPI() + if err != nil { + return nil, err + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"deleting"}, + Target: []string{workerDeleteState}, + Refresh: vpcworkerPoolDeleteStateRefreshFunc(wpClient.Workers(), clusterNameOrID, workerPoolNameOrID, target), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func vpcworkerPoolDeleteStateRefreshFunc(client v2.Workers, instanceID, workerPoolNameOrID string, target v2.ClusterTargetHeader) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + workerFields, err := client.ListByWorkerPool(instanceID, "", true, target) + if err != nil { + return nil, "", fmt.Errorf("Error retrieving workers for cluster: %s", err) + } + //Done worker has two fields desiredState and actualState , so check for those 2 + for _, e := range workerFields { + if e.PoolName == workerPoolNameOrID || e.PoolID == workerPoolNameOrID { + if strings.Compare(e.LifeCycle.ActualState, "deleted") != 0 { + log.Printf("Deleting worker %s", e.ID) + return workerFields, "deleting", nil + } + } + } + return workerFields, workerDeleteState, nil + } +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_container_worker_pool.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_container_worker_pool.go new file mode 100644 index 00000000000..81f19906919 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_container_worker_pool.go @@ -0,0 +1,470 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + v1 "github.com/IBM-Cloud/bluemix-go/api/container/containerv1" + "github.com/IBM-Cloud/bluemix-go/bmxerror" +) + +func resourceIBMContainerWorkerPool() *schema.Resource { + + return &schema.Resource{ + Create: resourceIBMContainerWorkerPoolCreate, + Read: resourceIBMContainerWorkerPoolRead, + Update: resourceIBMContainerWorkerPoolUpdate, + Delete: resourceIBMContainerWorkerPoolDelete, + Exists: resourceIBMContainerWorkerPoolExists, + Importer: &schema.ResourceImporter{}, + Timeouts: &schema.ResourceTimeout{ + Update: schema.DefaultTimeout(90 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "cluster": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Cluster name", + }, + + "machine_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "worker nodes machine type", + }, + + "worker_pool_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "worker pool name", + }, + + "size_per_zone": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validateSizePerZone, + Description: "Number of nodes per zone", + }, + + "entitlement": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: applyOnce, + Description: "Entitlement option reduces additional OCP Licence cost in Openshift Clusters", + }, + + "hardware": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: hardwareShared, + ValidateFunc: validateAllowedStringValue([]string{hardwareShared, hardwareDedicated}), + Description: "Hardware type", + }, + + "disk_encryption": { + Type: schema.TypeBool, + Optional: true, + Default: true, + ForceNew: true, + Description: "worker node disk encrypted if set to true", + }, + + "state": { + Type: schema.TypeString, + Computed: true, + Description: "worker pool state", + }, + + "zones": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "zone": { + Type: schema.TypeString, + Computed: true, + }, + + "private_vlan": { + Type: schema.TypeString, + Computed: true, + }, + + "public_vlan": { + Type: schema.TypeString, + Computed: true, + }, + + "worker_count": { + Type: schema.TypeInt, + Computed: true, + }, + }, + }, + }, + + "labels": { + Type: schema.TypeMap, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "list of labels to worker pool", + }, + + "region": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "The worker pool region", + Deprecated: "This field is deprecated", + }, + + "resource_group_id": { + Type: schema.TypeString, + Optional: true, + Description: "ID of the resource group.", + ForceNew: true, + DiffSuppressFunc: applyOnce, + }, + ResourceControllerURL: { + Type: schema.TypeString, + Computed: true, + Description: "The URL of the IBM Cloud dashboard that can be used to explore and view details about this cluster", + }, + }, + } +} + +func resourceIBMContainerWorkerPoolCreate(d *schema.ResourceData, meta interface{}) error { + + csClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return err + } + + clusterNameorID := d.Get("cluster").(string) + + workerPoolConfig := v1.WorkerPoolConfig{ + Name: d.Get("worker_pool_name").(string), + Size: d.Get("size_per_zone").(int), + MachineType: d.Get("machine_type").(string), + } + if v, ok := d.GetOk("hardware"); ok { + hardware := v.(string) + switch strings.ToLower(hardware) { + case "": // do nothing + case hardwareDedicated: + hardware = isolationPrivate + case hardwareShared: + hardware = isolationPublic + } + workerPoolConfig.Isolation = hardware + } + if l, ok := d.GetOk("labels"); ok { + labels := make(map[string]string) + for k, v := range l.(map[string]interface{}) { + labels[k] = v.(string) + } + workerPoolConfig.Labels = labels + } + + // Update workerpoolConfig with Entitlement option if provided + if v, ok := d.GetOk("entitlement"); ok { + workerPoolConfig.Entitlement = v.(string) + } + + params := v1.WorkerPoolRequest{ + WorkerPoolConfig: workerPoolConfig, + DiskEncryption: d.Get("disk_encryption").(bool), + } + + workerPoolsAPI := csClient.WorkerPools() + targetEnv, err := getWorkerPoolTargetHeader(d, meta) + if err != nil { + return err + } + + res, err := workerPoolsAPI.CreateWorkerPool(clusterNameorID, params, targetEnv) + if err != nil { + return err + } + + d.SetId(fmt.Sprintf("%s/%s", clusterNameorID, res.ID)) + + return resourceIBMContainerWorkerPoolRead(d, meta) +} + +func resourceIBMContainerWorkerPoolRead(d *schema.ResourceData, meta interface{}) error { + csClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return err + } + parts, err := idParts(d.Id()) + if err != nil { + return err + } + cluster := parts[0] + workerPoolID := parts[1] + + workerPoolsAPI := csClient.WorkerPools() + targetEnv, err := getWorkerPoolTargetHeader(d, meta) + if err != nil { + return err + } + + workerPool, err := workerPoolsAPI.GetWorkerPool(cluster, workerPoolID, targetEnv) + if err != nil { + return err + } + + machineType := workerPool.MachineType + d.Set("worker_pool_name", workerPool.Name) + d.Set("machine_type", strings.Split(machineType, ".encrypted")[0]) + d.Set("size_per_zone", workerPool.Size) + hardware := workerPool.Isolation + switch strings.ToLower(hardware) { + case "": + hardware = hardwareShared + case isolationPrivate: + hardware = hardwareDedicated + case isolationPublic: + hardware = hardwareShared + } + d.Set("hardware", hardware) + d.Set("state", workerPool.State) + d.Set("labels", IgnoreSystemLabels(workerPool.Labels)) + d.Set("zones", flattenZones(workerPool.Zones)) + d.Set("cluster", cluster) + if strings.Contains(machineType, "encrypted") { + d.Set("disk_encryption", true) + } else { + d.Set("disk_encryption", false) + } + controller, err := getBaseController(meta) + if err != nil { + return err + } + d.Set(ResourceControllerURL, controller+"/kubernetes/clusters") + return nil +} + +func resourceIBMContainerWorkerPoolUpdate(d *schema.ResourceData, meta interface{}) error { + csClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return err + } + parts, err := idParts(d.Id()) + if err != nil { + return err + } + clusterNameorID := parts[0] + workerPoolNameorID := parts[1] + workerPoolsAPI := csClient.WorkerPools() + targetEnv, err := getWorkerPoolTargetHeader(d, meta) + if err != nil { + return err + } + + if d.HasChange("size_per_zone") { + err = workerPoolsAPI.ResizeWorkerPool(clusterNameorID, workerPoolNameorID, d.Get("size_per_zone").(int), targetEnv) + if err != nil { + return err + } + + _, err = WaitForWorkerNormal(clusterNameorID, workerPoolNameorID, meta, d.Timeout(schema.TimeoutUpdate), targetEnv) + if err != nil { + return fmt.Errorf( + "Error waiting for workers of worker pool (%s) of cluster (%s) to become ready: %s", workerPoolNameorID, clusterNameorID, err) + } + } + if d.HasChange("labels") { + labels := make(map[string]string) + if l, ok := d.GetOk("labels"); ok { + for k, v := range l.(map[string]interface{}) { + labels[k] = v.(string) + } + } + err = workerPoolsAPI.UpdateLabelsWorkerPool(clusterNameorID, workerPoolNameorID, labels, targetEnv) + if err != nil { + return err + } + + _, err = WaitForWorkerNormal(clusterNameorID, workerPoolNameorID, meta, d.Timeout(schema.TimeoutUpdate), targetEnv) + if err != nil { + return fmt.Errorf( + "Error waiting for workers of worker pool (%s) of cluster (%s) to become ready: %s", workerPoolNameorID, clusterNameorID, err) + } + } + + return resourceIBMContainerWorkerPoolRead(d, meta) +} + +func resourceIBMContainerWorkerPoolDelete(d *schema.ResourceData, meta interface{}) error { + csClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return err + } + parts, err := idParts(d.Id()) + if err != nil { + return err + } + clusterNameorID := parts[0] + workerPoolNameorID := parts[1] + + workerPoolsAPI := csClient.WorkerPools() + targetEnv, err := getWorkerPoolTargetHeader(d, meta) + if err != nil { + return err + } + + err = workerPoolsAPI.DeleteWorkerPool(clusterNameorID, workerPoolNameorID, targetEnv) + if err != nil { + return err + } + _, err = WaitForWorkerDelete(clusterNameorID, workerPoolNameorID, meta, d.Timeout(schema.TimeoutUpdate), targetEnv) + if err != nil { + return fmt.Errorf( + "Error waiting for removing workers of worker pool (%s) of cluster (%s): %s", workerPoolNameorID, clusterNameorID, err) + } + return nil +} + +func resourceIBMContainerWorkerPoolExists(d *schema.ResourceData, meta interface{}) (bool, error) { + csClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return false, err + } + parts, err := idParts(d.Id()) + if err != nil { + return false, err + } + cluster := parts[0] + workerPoolID := parts[1] + + workerPoolsAPI := csClient.WorkerPools() + targetEnv, err := getWorkerPoolTargetHeader(d, meta) + if err != nil { + return false, err + } + + workerPool, err := workerPoolsAPI.GetWorkerPool(cluster, workerPoolID, targetEnv) + if err != nil { + if apiErr, ok := err.(bmxerror.RequestFailure); ok { + if apiErr.StatusCode() == 404 && strings.Contains(apiErr.Description(), "The specified worker pool could not be found") { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s", err) + } + + return workerPool.ID == workerPoolID, nil +} + +func WaitForWorkerNormal(clusterNameOrID, workerPoolNameOrID string, meta interface{}, timeout time.Duration, target v1.ClusterTargetHeader) (interface{}, error) { + csClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return nil, err + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", workerProvisioning}, + Target: []string{workerNormal}, + Refresh: workerPoolStateRefreshFunc(csClient.Workers(), clusterNameOrID, workerPoolNameOrID, target), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func workerPoolStateRefreshFunc(client v1.Workers, instanceID, workerPoolNameOrID string, target v1.ClusterTargetHeader) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + workerFields, err := client.ListByWorkerPool(instanceID, workerPoolNameOrID, false, target) + if err != nil { + return nil, "", fmt.Errorf("Error retrieving workers for cluster: %s", err) + } + //Done worker has two fields State and Status , so check for those 2 + for _, e := range workerFields { + if strings.Contains(e.KubeVersion, "pending") || strings.Compare(e.State, workerNormal) != 0 || strings.Compare(e.Status, workerReadyState) != 0 { + if strings.Compare(e.State, "deleted") != 0 { + return workerFields, workerProvisioning, nil + } + } + } + return workerFields, workerNormal, nil + } +} + +func WaitForWorkerDelete(clusterNameOrID, workerPoolNameOrID string, meta interface{}, timeout time.Duration, target v1.ClusterTargetHeader) (interface{}, error) { + csClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return nil, err + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"deleting"}, + Target: []string{workerDeleteState}, + Refresh: workerPoolDeleteStateRefreshFunc(csClient.Workers(), clusterNameOrID, workerPoolNameOrID, target), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func workerPoolDeleteStateRefreshFunc(client v1.Workers, instanceID, workerPoolNameOrID string, target v1.ClusterTargetHeader) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + workerFields, err := client.ListByWorkerPool(instanceID, "", true, target) + if err != nil { + return nil, "", fmt.Errorf("Error retrieving workers for cluster: %s", err) + } + //Done worker has two fields State and Status , so check for those 2 + for _, e := range workerFields { + if e.PoolName == workerPoolNameOrID || e.PoolID == workerPoolNameOrID { + if strings.Compare(e.State, "deleted") != 0 { + return workerFields, "deleting", nil + } + } + } + return workerFields, workerDeleteState, nil + } +} + +func getWorkerPoolTargetHeader(d *schema.ResourceData, meta interface{}) (v1.ClusterTargetHeader, error) { + + _, err := meta.(ClientSession).BluemixSession() + if err != nil { + return v1.ClusterTargetHeader{}, err + } + + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return v1.ClusterTargetHeader{}, err + } + accountID := userDetails.userAccount + + targetEnv := v1.ClusterTargetHeader{ + AccountID: accountID, + } + + resourceGroup := "" + if v, ok := d.GetOk("resource_group_id"); ok { + resourceGroup = v.(string) + targetEnv.ResourceGroup = resourceGroup + } + return targetEnv, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_container_worker_pool_zone_attachment.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_container_worker_pool_zone_attachment.go new file mode 100644 index 00000000000..4fad9b814bd --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_container_worker_pool_zone_attachment.go @@ -0,0 +1,434 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "strings" + "time" + + v1 "github.com/IBM-Cloud/bluemix-go/api/container/containerv1" + "github.com/IBM-Cloud/bluemix-go/bmxerror" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func resourceIBMContainerWorkerPoolZoneAttachment() *schema.Resource { + + return &schema.Resource{ + Create: resourceIBMContainerWorkerPoolZoneAttachmentCreate, + Read: resourceIBMContainerWorkerPoolZoneAttachmentRead, + Update: resourceIBMContainerWorkerPoolZoneAttachmentUpdate, + Delete: resourceIBMContainerWorkerPoolZoneAttachmentDelete, + Exists: resourceIBMContainerWorkerPoolZoneAttachmentExists, + Importer: &schema.ResourceImporter{}, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(90 * time.Minute), + Update: schema.DefaultTimeout(90 * time.Minute), + Delete: schema.DefaultTimeout(90 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "zone": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Zone name", + }, + + "cluster": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "cluster name or ID", + }, + + "worker_pool": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Workerpool name", + }, + + "private_vlan_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "public_vlan_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "resource_group_id": { + Type: schema.TypeString, + Optional: true, + Description: "ID of the resource group.", + ForceNew: true, + DiffSuppressFunc: applyOnce, + }, + + "region": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "The zone region", + Deprecated: "This field is deprecated", + }, + + "worker_count": { + Type: schema.TypeInt, + Computed: true, + }, + "wait_till_albs": { + Type: schema.TypeBool, + Optional: true, + Default: true, + DiffSuppressFunc: applyOnce, + Description: "wait_till_albs can be configured to wait for albs during the worker pool zone attachment.", + }, + }, + } +} + +func resourceIBMContainerWorkerPoolZoneAttachmentCreate(d *schema.ResourceData, meta interface{}) error { + csClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return err + } + + zone := d.Get("zone").(string) + var privateVLAN, publicVLAN string + if v, ok := d.GetOk("private_vlan_id"); ok { + privateVLAN = v.(string) + } + + if v, ok := d.GetOk("public_vlan_id"); ok { + publicVLAN = v.(string) + } + + if publicVLAN != "" && privateVLAN == "" { + return fmt.Errorf( + "A private_vlan_id must be specified if a public_vlan_id is specified.") + } + + workerPoolZoneNetwork := v1.WorkerPoolZoneNetwork{ + PrivateVLAN: privateVLAN, + PublicVLAN: publicVLAN, + } + + workerPoolZone := v1.WorkerPoolZone{ + ID: zone, + WorkerPoolZoneNetwork: workerPoolZoneNetwork, + } + + cluster := d.Get("cluster").(string) + workerPool := d.Get("worker_pool").(string) + + workerPoolsAPI := csClient.WorkerPools() + targetEnv, err := getWorkerPoolTargetHeader(d, meta) + if err != nil { + return err + } + + err = workerPoolsAPI.AddZone(cluster, workerPool, workerPoolZone, targetEnv) + if err != nil { + return err + } + d.SetId(fmt.Sprintf("%s/%s/%s", cluster, workerPool, zone)) + + _, err = WaitForWorkerZoneNormal(cluster, workerPool, zone, meta, d.Timeout(schema.TimeoutUpdate), targetEnv) + if err != nil { + return fmt.Errorf( + "Error waiting for workers of worker pool (%s) of cluster (%s) to become ready: %s", workerPool, cluster, err) + } + + var waitTillALBs bool + if v, ok := d.GetOk("wait_till_albs"); ok { + waitTillALBs = v.(bool) + } + + if waitTillALBs { + _, err = waitForWorkerZoneALB(cluster, zone, meta, d.Timeout(schema.TimeoutUpdate), targetEnv) + if err != nil { + return fmt.Errorf( + "Error waiting for ALBs in zone (%s) of cluster (%s) to become ready: %s", zone, cluster, err) + } + } + + return resourceIBMContainerWorkerPoolZoneAttachmentRead(d, meta) + +} + +func resourceIBMContainerWorkerPoolZoneAttachmentRead(d *schema.ResourceData, meta interface{}) error { + csClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return err + } + parts, err := idParts(d.Id()) + if err != nil { + return err + } + cluster := parts[0] + workerPool := parts[1] + zoneName := parts[2] + + workerPoolsAPI := csClient.WorkerPools() + targetEnv, err := getWorkerPoolTargetHeader(d, meta) + if err != nil { + return err + } + + workerPoolRes, err := workerPoolsAPI.GetWorkerPool(cluster, workerPool, targetEnv) + if err != nil { + return err + } + zones := workerPoolRes.Zones + + for _, zone := range zones { + if zone.ID == zoneName { + d.Set("public_vlan_id", zone.PublicVLAN) + d.Set("private_vlan_id", zone.PrivateVLAN) + d.Set("worker_count", zone.WorkerCount) + d.Set("zone", zone.ID) + d.Set("cluster", cluster) + d.Set("worker_pool", workerPool) + + break + } + } + + return nil +} + +func resourceIBMContainerWorkerPoolZoneAttachmentUpdate(d *schema.ResourceData, meta interface{}) error { + csClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return err + } + + workerPoolsAPI := csClient.WorkerPools() + + if d.HasChange("private_vlan_id") || d.HasChange("public_vlan_id") { + privateVLAN := d.Get("private_vlan_id").(string) + publicVLAN := d.Get("public_vlan_id").(string) + if publicVLAN != "" && privateVLAN == "" { + return fmt.Errorf( + "A private VLAN must be specified if a public VLAN is specified.") + } + targetEnv, err := getWorkerPoolTargetHeader(d, meta) + if err != nil { + return err + } + parts, err := idParts(d.Id()) + if err != nil { + return err + } + cluster := parts[0] + workerPool := parts[1] + zone := parts[2] + err = workerPoolsAPI.UpdateZoneNetwork(cluster, zone, workerPool, privateVLAN, publicVLAN, targetEnv) + if err != nil { + return err + } + } + + return resourceIBMContainerWorkerPoolZoneAttachmentRead(d, meta) +} + +func resourceIBMContainerWorkerPoolZoneAttachmentDelete(d *schema.ResourceData, meta interface{}) error { + csClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return err + } + + parts, err := idParts(d.Id()) + if err != nil { + return err + } + cluster := parts[0] + workerPool := parts[1] + zone := parts[2] + + workerPoolsAPI := csClient.WorkerPools() + targetEnv, err := getWorkerPoolTargetHeader(d, meta) + if err != nil { + return err + } + err = workerPoolsAPI.RemoveZone(cluster, zone, workerPool, targetEnv) + if err != nil { + return err + } + _, err = WaitForWorkerZoneDeleted(cluster, workerPool, zone, meta, d.Timeout(schema.TimeoutDelete), targetEnv) + if err != nil { + return fmt.Errorf( + "Error waiting for deleting workers of worker pool (%s) of cluster (%s): %s", workerPool, cluster, err) + } + + return nil +} + +func resourceIBMContainerWorkerPoolZoneAttachmentExists(d *schema.ResourceData, meta interface{}) (bool, error) { + csClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return false, err + } + parts, err := idParts(d.Id()) + if err != nil { + return false, err + } + cluster := parts[0] + workerPoolID := parts[1] + zoneID := parts[2] + + workerPoolsAPI := csClient.WorkerPools() + targetEnv, err := getWorkerPoolTargetHeader(d, meta) + if err != nil { + return false, err + } + + workerPool, err := workerPoolsAPI.GetWorkerPool(cluster, workerPoolID, targetEnv) + if err != nil { + if apiErr, ok := err.(bmxerror.RequestFailure); ok { + if apiErr.StatusCode() == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s", err) + } + zones := workerPool.Zones + var zone v1.WorkerPoolZoneResponse + for _, z := range zones { + if z.ID == zoneID { + zone = z + } + } + return zone.ID == zoneID, nil +} + +func WaitForWorkerZoneNormal(clusterNameOrID, workerPoolNameOrID, zone string, meta interface{}, timeout time.Duration, target v1.ClusterTargetHeader) (interface{}, error) { + csClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return nil, err + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", workerProvisioning}, + Target: []string{workerNormal}, + Refresh: workerPoolZoneStateRefreshFunc(csClient.Workers(), clusterNameOrID, workerPoolNameOrID, zone, target), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func workerPoolZoneStateRefreshFunc(client v1.Workers, instanceID, workerPoolNameOrID, zone string, target v1.ClusterTargetHeader) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + workerFields, err := client.ListByWorkerPool(instanceID, workerPoolNameOrID, false, target) + if err != nil { + return nil, "", fmt.Errorf("Error retrieving workers for cluster: %s", err) + } + //Done worker has two fields State and Status , so check for those 2 + for _, e := range workerFields { + if e.Location == zone { + if strings.Contains(e.KubeVersion, "pending") || strings.Compare(e.State, workerNormal) != 0 || strings.Compare(e.Status, workerReadyState) != 0 { + if strings.Compare(e.State, "deleted") != 0 { + return workerFields, workerProvisioning, nil + } + } + } + } + return workerFields, workerNormal, nil + } +} + +func WaitForWorkerZoneDeleted(clusterNameOrID, workerPoolNameOrID, zone string, meta interface{}, timeout time.Duration, target v1.ClusterTargetHeader) (interface{}, error) { + csClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return nil, err + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"deleting"}, + Target: []string{workerDeleteState}, + Refresh: workerPoolZoneDeleteStateRefreshFunc(csClient.Workers(), clusterNameOrID, workerPoolNameOrID, zone, target), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func workerPoolZoneDeleteStateRefreshFunc(client v1.Workers, instanceID, workerPoolNameOrID, zone string, target v1.ClusterTargetHeader) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + workerFields, err := client.ListByWorkerPool(instanceID, workerPoolNameOrID, true, target) + if err != nil { + return nil, "", fmt.Errorf("Error retrieving workers for cluster: %s", err) + } + //Done worker has two fields State and Status , so check for those 2 + for _, e := range workerFields { + if e.Location == zone { + if strings.Compare(e.State, "deleted") != 0 { + return workerFields, "deleting", nil + } + } + } + return workerFields, workerDeleteState, nil + } +} + +func waitForWorkerZoneALB(clusterNameOrID, zone string, meta interface{}, timeout time.Duration, target v1.ClusterTargetHeader) (interface{}, error) { + csClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return nil, err + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"pending"}, + Target: []string{"ready"}, + Refresh: workerZoneALBStateRefreshFunc(csClient.Albs(), clusterNameOrID, zone, target), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func workerZoneALBStateRefreshFunc(client v1.Albs, instanceID, zone string, target v1.ClusterTargetHeader) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + // Get all ALBs associated with cluster + albs, err := client.ListClusterALBs(instanceID, target) + if err != nil { + return nil, "", fmt.Errorf("Error retrieving ALBs for cluster: %s", err) + } + + privateALBsByZone := []v1.ALBConfig{} + publicALBsByZone := []v1.ALBConfig{} + + // Find ALBs by zone and type + for _, alb := range albs { + if alb.Zone == zone { + if alb.ALBType == "private" { + privateALBsByZone = append(privateALBsByZone, alb) + } + if alb.ALBType == "public" { + publicALBsByZone = append(publicALBsByZone, alb) + } + } + } + + // Ready if both private and public ALBs are present + if len(privateALBsByZone) > 0 && len(publicALBsByZone) > 0 { + return albs, "ready", nil + } + + return albs, "pending", nil + } +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cos_bucket.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cos_bucket.go new file mode 100644 index 00000000000..b28e04362f8 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cos_bucket.go @@ -0,0 +1,1135 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "regexp" + "strings" + "time" + + "github.com/IBM/ibm-cos-sdk-go-config/resourceconfigurationv1" + "github.com/IBM/ibm-cos-sdk-go/aws" + "github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam" + token "github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/token" + "github.com/IBM/ibm-cos-sdk-go/aws/session" + "github.com/IBM/ibm-cos-sdk-go/service/s3" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +var singleSiteLocation = []string{ + "ams03", "che01", "hkg02", "mel01", "mex01", + "mil01", "mon01", "osl01", "par01", "sjc04", "sao01", + "seo01", "sng01", "tor01", +} + +var regionLocation = []string{ + "au-syd", "ca-tor", "eu-de", "eu-gb", "jp-tok", "jp-osa", "us-east", "us-south", +} + +var crossRegionLocation = []string{ + "us", "eu", "ap", +} + +var storageClass = []string{ + "standard", "vault", "cold", "flex", "smart", +} + +const ( + keyAlgorithm = "AES256" +) + +func resourceIBMCOSBucket() *schema.Resource { + return &schema.Resource{ + Read: resourceIBMCOSBucketRead, + Create: resourceIBMCOSBucketCreate, + Update: resourceIBMCOSBucketUpdate, + Delete: resourceIBMCOSBucketDelete, + Exists: resourceIBMCOSBucketExists, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(60 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + Schema: map[string]*schema.Schema{ + "bucket_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "COS Bucket name", + }, + "resource_instance_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "resource instance ID", + ValidateFunc: validateRegexp(`^crn:.+:.+:.+:.+:.+:a\/[0-9a-f]{32}:[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}\:\:$`), + }, + "crn": { + Type: schema.TypeString, + Computed: true, + Description: "CRN of resource instance", + }, + "key_protect": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Description: "CRN of the key you want to use data at rest encryption", + }, + "single_site_location": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateAllowedStringValue(singleSiteLocation), + ForceNew: true, + ConflictsWith: []string{"region_location", "cross_region_location"}, + Description: "single site location info", + }, + "region_location": { + Type: schema.TypeString, + Optional: true, + //ValidateFunc: validateAllowedStringValue(regionLocation), + ForceNew: true, + ConflictsWith: []string{"cross_region_location", "single_site_location"}, + Description: "Region Location info.", + }, + "cross_region_location": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateAllowedStringValue(crossRegionLocation), + ForceNew: true, + ConflictsWith: []string{"region_location", "single_site_location"}, + Description: "Cros region location info", + }, + "storage_class": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateAllowedStringValue(storageClass), + ForceNew: true, + Description: "Storage class info", + }, + "endpoint_type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateAllowedStringValue([]string{"public", "private"}), + Description: "public or private", + DiffSuppressFunc: applyOnce, + Default: "public", + }, + "s3_endpoint_public": { + Type: schema.TypeString, + Computed: true, + Description: "Public endpoint for the COS bucket", + }, + "s3_endpoint_private": { + Type: schema.TypeString, + Computed: true, + Description: "Private endpoint for the COS bucket", + }, + "allowed_ip": { + Type: schema.TypeList, + Optional: true, + ForceNew: false, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "List of IPv4 or IPv6 addresses ", + }, + "activity_tracking": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: "Enables sending log data to Activity Tracker and LogDNA to provide visibility into object read and write events", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "read_data_events": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "If set to true, all object read events will be sent to Activity Tracker.", + }, + "write_data_events": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "If set to true, all object write events will be sent to Activity Tracker.", + }, + "activity_tracker_crn": { + Type: schema.TypeString, + Required: true, + Description: "The instance of Activity Tracker that will receive object event data", + }, + }, + }, + }, + "metrics_monitoring": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: "Enables sending metrics to IBM Cloud Monitoring.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "usage_metrics_enabled": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Usage metrics will be sent to the monitoring service.", + }, + "request_metrics_enabled": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Request metrics will be sent to the monitoring service.", + }, + "metrics_monitoring_crn": { + Type: schema.TypeString, + Required: true, + Description: "Instance of IBM Cloud Monitoring that will receive the bucket metrics.", + }, + }, + }, + }, + "archive_rule": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: "Enable configuration archive_rule (glacier/accelerated) to COS Bucket after a defined period of time", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "rule_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Unique identifier for the rule.Archive rules allow you to set a specific time frame after which objects transition to the archive. Set Rule ID for cos bucket", + }, + "enable": { + Type: schema.TypeBool, + Required: true, + Description: "Enable or disable an archive rule for a bucket", + }, + "days": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validateAllowedRangeInt(0, 3650), + Description: "Specifies the number of days when the specific rule action takes effect.", + }, + "type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateAllowedStringValue([]string{"GLACIER", "ACCELERATED", "Glacier", "Accelerated", "glacier", "accelerated"}), + DiffSuppressFunc: caseDiffSuppress, + Description: "Specifies the storage class/archive type to which you want the object to transition. It can be Glacier or Accelerated", + }, + }, + }, + }, + "expire_rule": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1000, + Description: "Enable configuration expire_rule to COS Bucket after a defined period of time", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "rule_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Unique identifier for the rule.Expire rules allow you to set a specific time frame after which objects are deleted. Set Rule ID for cos bucket", + }, + "enable": { + Type: schema.TypeBool, + Required: true, + Description: "Enable or disable an expire rule for a bucket", + }, + "prefix": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "The rule applies to any objects with keys that match this prefix", + }, + "days": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validateAllowedRangeInt(0, 3650), + Description: "Specifies the number of days when the specific rule action takes effect.", + }, + }, + }, + }, + "retention_rule": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: "A retention policy is enabled at the IBM Cloud Object Storage bucket level. Minimum, maximum and default retention period are defined by this policy and apply to all objects in the bucket.", + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "default": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validateAllowedRangeInt(0, 365243), + Description: "If an object is stored in the bucket without specifying a custom retention period.", + ForceNew: false, + }, + "maximum": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validateAllowedRangeInt(0, 365243), + Description: "Maximum duration of time an object can be kept unmodified in the bucket.", + ForceNew: false, + }, + "minimum": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validateAllowedRangeInt(0, 365243), + Description: "Minimum duration of time an object must be kept unmodified in the bucket", + ForceNew: false, + }, + "permanent": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Enable or disable the permanent retention policy on the bucket", + }, + }, + }, + }, + "object_versioning": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{"retention_rule", "expire_rule"}, + Description: "Protect objects from accidental deletion or overwrites. Versioning allows you to keep multiple versions of an object protecting from unintentional data loss.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enable": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Enable or suspend the versioning for objects in the bucket", + }, + }, + }, + }, + "force_delete": { + Type: schema.TypeBool, + Optional: true, + Default: true, + Description: "COS buckets need to be empty before they can be deleted. force_delete option empty the bucket and delete it.", + }, + }, + } +} + +func archiveRuleList(archiveList []interface{}) []*s3.LifecycleRule { + var archive_status, archiveStorageClass, rule_id string + var days int64 + var rules []*s3.LifecycleRule + + for _, l := range archiveList { + archiveMap, _ := l.(map[string]interface{}) + //Rule ID + if rule_idSet, exist := archiveMap["rule_id"]; exist { + id := rule_idSet.(string) + rule_id = id + } + + //Status Enable/Disable + if archive_statusSet, exist := archiveMap["enable"]; exist { + archiveStatusEnabled := archive_statusSet.(bool) + if archiveStatusEnabled == true { + archive_status = "Enabled" + } else { + archive_status = "Disabled" + } + } + //Days + if daysarchiveSet, exist := archiveMap["days"]; exist { + daysarchive := int64(daysarchiveSet.(int)) + days = daysarchive + } + //Archive Type + if archiveStorgaeClassSet, exist := archiveMap["type"]; exist { + archiveType := archiveStorgaeClassSet.(string) + archiveStorageClass = archiveType + } + + archive_rule := s3.LifecycleRule{ + ID: aws.String(rule_id), + Status: aws.String(archive_status), + Filter: &s3.LifecycleRuleFilter{}, + Transitions: []*s3.Transition{ + { + Days: aws.Int64(days), + StorageClass: aws.String(archiveStorageClass), + }, + }, + } + + rules = append(rules, &archive_rule) + } + return rules +} + +func expireRuleList(expireList []interface{}) []*s3.LifecycleRule { + var expire_prefix, expire_status, rule_id string + var days int64 + var rules []*s3.LifecycleRule + + for _, l := range expireList { + expireMap, _ := l.(map[string]interface{}) + //Rule ID + if rule_idSet, exist := expireMap["rule_id"]; exist { + id := rule_idSet.(string) + rule_id = id + } + + //Status Enable/Disable + if expire_statusSet, exist := expireMap["enable"]; exist { + archiveStatusEnabled := expire_statusSet.(bool) + if archiveStatusEnabled == true { + expire_status = "Enabled" + } else { + expire_status = "Disabled" + } + } + //Days + if daysexpireSet, exist := expireMap["days"]; exist { + daysexpire := int64(daysexpireSet.(int)) + days = daysexpire + } + //Expire Prefix + if expirePrefixClassSet, exist := expireMap["prefix"]; exist { + expire_prefix = expirePrefixClassSet.(string) + } + + expire_rule := s3.LifecycleRule{ + ID: aws.String(rule_id), + Status: aws.String(expire_status), + Filter: &s3.LifecycleRuleFilter{ + Prefix: aws.String(expire_prefix), + }, + Expiration: &s3.LifecycleExpiration{ + Days: aws.Int64(days), + }, + } + + rules = append(rules, &expire_rule) + } + return rules +} + +func resourceIBMCOSBucketUpdate(d *schema.ResourceData, meta interface{}) error { + var s3Conf *aws.Config + rsConClient, err := meta.(ClientSession).BluemixSession() + if err != nil { + return err + } + bucketName := parseBucketId(d.Id(), "bucketName") + serviceID := parseBucketId(d.Id(), "serviceID") + endpointType := parseBucketId(d.Id(), "endpointType") + apiEndpoint, apiEndpointPrivate := selectCosApi(parseBucketId(d.Id(), "apiType"), parseBucketId(d.Id(), "bLocation")) + if endpointType == "private" { + apiEndpoint = apiEndpointPrivate + } + authEndpoint, err := rsConClient.Config.EndpointLocator.IAMEndpoint() + if err != nil { + return err + } + authEndpointPath := fmt.Sprintf("%s%s", authEndpoint, "/identity/token") + apiKey := rsConClient.Config.BluemixAPIKey + if apiKey != "" { + s3Conf = aws.NewConfig().WithEndpoint(envFallBack([]string{"IBMCLOUD_COS_ENDPOINT"}, apiEndpoint)).WithCredentials(ibmiam.NewStaticCredentials(aws.NewConfig(), authEndpointPath, apiKey, serviceID)).WithS3ForcePathStyle(true) + } + iamAccessToken := rsConClient.Config.IAMAccessToken + if iamAccessToken != "" { + initFunc := func() (*token.Token, error) { + return &token.Token{ + AccessToken: rsConClient.Config.IAMAccessToken, + RefreshToken: rsConClient.Config.IAMRefreshToken, + TokenType: "Bearer", + ExpiresIn: int64((time.Hour * 248).Seconds()) * -1, + Expiration: time.Now().Add(-1 * time.Hour).Unix(), + }, nil + } + s3Conf = aws.NewConfig().WithEndpoint(envFallBack([]string{"IBMCLOUD_COS_ENDPOINT"}, apiEndpoint)).WithCredentials(ibmiam.NewCustomInitFuncCredentials(aws.NewConfig(), initFunc, authEndpointPath, serviceID)).WithS3ForcePathStyle(true) + } + s3Sess := session.Must(session.NewSession()) + s3Client := s3.New(s3Sess, s3Conf) + + //// Update the lifecycle (Archive or Expire) + if d.HasChange("archive_rule") || d.HasChange("expire_rule") { + var archive, archive_ok = d.GetOk("archive_rule") + var expire, expire_ok = d.GetOk("expire_rule") + var rules []*s3.LifecycleRule + if archive_ok || expire_ok { + if expire_ok { + rules = append(rules, expireRuleList(expire.([]interface{}))...) + } + if archive_ok { + rules = append(rules, archiveRuleList(archive.([]interface{}))...) + } + + lInput := &s3.PutBucketLifecycleConfigurationInput{ + Bucket: aws.String(bucketName), + LifecycleConfiguration: &s3.LifecycleConfiguration{ + Rules: rules, + }, + } + _, err := s3Client.PutBucketLifecycleConfiguration(lInput) + if err != nil { + return fmt.Errorf("failed to update the archive rule on COS bucket %s, %v", bucketName, err) + } + + } else { + DelInput := &s3.DeleteBucketLifecycleInput{ + Bucket: aws.String(bucketName), + } + + delarchive, _ := s3Client.DeleteBucketLifecycleRequest(DelInput) + err := delarchive.Send() + if err != nil { + return err + } + } + } + + //// Update the Retention policy + if d.HasChange("retention_rule") { + var defaultretention, minretention, maxretention int64 + var permanentretention bool + if retention, ok := d.GetOk("retention_rule"); ok { + retentionList := retention.([]interface{}) + if len(retentionList) > 1 { + return fmt.Errorf("Can not more than 1 retention policy") + } + for _, l := range retentionList { + retentionMap, _ := l.(map[string]interface{}) + //Default Days + if defaultretentionSet, exist := retentionMap["default"]; exist { + defaultdays := int64(defaultretentionSet.(int)) + defaultretention = defaultdays + } + //Maximum Days + if maxretentionSet, exist := retentionMap["maximum"]; exist { + maxdays := int64(maxretentionSet.(int)) + maxretention = maxdays + } + //Minimum Days + if minretentionSet, exist := retentionMap["minimum"]; exist { + mindays := int64(minretentionSet.(int)) + minretention = mindays + } + //Permanent Retention Enable/Disable + if permanentretentionSet, exist := retentionMap["permanent"]; exist { + permanentretention = permanentretentionSet.(bool) + } + } + // PUT BUCKET PROTECTION CONFIGURATION + pInput := &s3.PutBucketProtectionConfigurationInput{ + Bucket: aws.String(bucketName), + ProtectionConfiguration: &s3.ProtectionConfiguration{ + DefaultRetention: &s3.BucketProtectionDefaultRetention{ + Days: aws.Int64(defaultretention), + }, + MaximumRetention: &s3.BucketProtectionMaximumRetention{ + Days: aws.Int64(maxretention), + }, + MinimumRetention: &s3.BucketProtectionMinimumRetention{ + Days: aws.Int64(minretention), + }, + Status: aws.String("Retention"), + EnablePermanentRetention: aws.Bool(permanentretention), + }, + } + _, err := s3Client.PutBucketProtectionConfiguration(pInput) + if err != nil { + return fmt.Errorf("failed to update the retention rule on COS bucket %s, %v", bucketName, err) + } + } + } + + //update the object versioning (object versioning) + if d.HasChange("object_versioning") { + versioningConf := &s3.VersioningConfiguration{} + if versioning, ok := d.GetOk("object_versioning"); ok { + versioningList := versioning.([]interface{}) + for _, l := range versioningList { + versioningMap, _ := l.(map[string]interface{}) + //Status Enable/Disable + if object_versioning_statusSet, exist1 := versioningMap["enable"]; exist1 { + versioningStatusEnabled := object_versioning_statusSet.(bool) + if versioningStatusEnabled == true { + versioningConf.Status = aws.String("Enabled") + } else { + versioningConf.Status = aws.String("Suspended") + } + } + } + } else { + versioningConf.Status = aws.String("Suspended") + } + // PUT BUCKET Object Versioning + input := &s3.PutBucketVersioningInput{ + Bucket: aws.String(bucketName), + VersioningConfiguration: versioningConf, + } + _, err := s3Client.PutBucketVersioning(input) + if err != nil { + return fmt.Errorf("failed to update the object versioning on COS bucket %s, %v", bucketName, err) + } + } + + sess, err := meta.(ClientSession).CosConfigV1API() + if err != nil { + return err + } + if endpointType == "private" { + sess.SetServiceURL("https://config.private.cloud-object-storage.cloud.ibm.com/v1") + } + + hasChanged := false + updateBucketConfigOptions := &resourceconfigurationv1.UpdateBucketConfigOptions{} + + //BucketName + bucketName = d.Get("bucket_name").(string) + updateBucketConfigOptions.Bucket = &bucketName + + if d.HasChange("allowed_ip") { + firewall := &resourceconfigurationv1.Firewall{} + var ips = make([]string, 0) + if ip, ok := d.GetOk("allowed_ip"); ok && ip != nil { + for _, i := range ip.([]interface{}) { + ips = append(ips, i.(string)) + } + firewall.AllowedIp = ips + } else { + firewall.AllowedIp = []string{} + } + hasChanged = true + updateBucketConfigOptions.Firewall = firewall + } + + if d.HasChange("activity_tracking") { + activityTracker := &resourceconfigurationv1.ActivityTracking{} + if activity, ok := d.GetOk("activity_tracking"); ok { + activitylist := activity.([]interface{}) + for _, l := range activitylist { + activityMap, _ := l.(map[string]interface{}) + + //Read event - as its optional check for existence + if readEvent := activityMap["read_data_events"]; readEvent != nil { + readSet := readEvent.(bool) + activityTracker.ReadDataEvents = &readSet + } + + //Write Event - as its optional check for existence + if writeEvent := activityMap["write_data_events"]; writeEvent != nil { + writeSet := writeEvent.(bool) + activityTracker.WriteDataEvents = &writeSet + } + + //crn - Required field + crn := activityMap["activity_tracker_crn"].(string) + activityTracker.ActivityTrackerCrn = &crn + } + } + hasChanged = true + updateBucketConfigOptions.ActivityTracking = activityTracker + } + + if d.HasChange("metrics_monitoring") { + metricsMonitor := &resourceconfigurationv1.MetricsMonitoring{} + if metrics, ok := d.GetOk("metrics_monitoring"); ok { + metricslist := metrics.([]interface{}) + for _, l := range metricslist { + metricsMap, _ := l.(map[string]interface{}) + + //metrics enabled - as its optional check for existence + if metricsSet := metricsMap["usage_metrics_enabled"]; metricsSet != nil { + metrics := metricsSet.(bool) + metricsMonitor.UsageMetricsEnabled = &metrics + } + // request metrics enabled - as its optional check for existence + if metricsSet := metricsMap["request_metrics_enabled"]; metricsSet != nil { + metrics := metricsSet.(bool) + metricsMonitor.RequestMetricsEnabled = &metrics + } + //crn - Required field + crn := metricsMap["metrics_monitoring_crn"].(string) + metricsMonitor.MetricsMonitoringCrn = &crn + } + } + hasChanged = true + updateBucketConfigOptions.MetricsMonitoring = metricsMonitor + } + + if hasChanged { + response, err := sess.UpdateBucketConfig(updateBucketConfigOptions) + if err != nil { + return fmt.Errorf("Error Update COS Bucket: %s\n%s", err, response) + } + } + + return resourceIBMCOSBucketRead(d, meta) +} + +func resourceIBMCOSBucketRead(d *schema.ResourceData, meta interface{}) error { + var s3Conf *aws.Config + rsConClient, err := meta.(ClientSession).BluemixSession() + if err != nil { + return err + } + bucketName := parseBucketId(d.Id(), "bucketName") + serviceID := parseBucketId(d.Id(), "serviceID") + endpointType := parseBucketId(d.Id(), "endpointType") + apiEndpoint, apiEndpointPrivate := selectCosApi(parseBucketId(d.Id(), "apiType"), parseBucketId(d.Id(), "bLocation")) + if endpointType == "private" { + apiEndpoint = apiEndpointPrivate + } + apiEndpoint = envFallBack([]string{"IBMCLOUD_COS_ENDPOINT"}, apiEndpoint) + authEndpoint, err := rsConClient.Config.EndpointLocator.IAMEndpoint() + if err != nil { + return err + } + authEndpointPath := fmt.Sprintf("%s%s", authEndpoint, "/identity/token") + apiKey := rsConClient.Config.BluemixAPIKey + if apiKey != "" { + s3Conf = aws.NewConfig().WithEndpoint(apiEndpoint).WithCredentials(ibmiam.NewStaticCredentials(aws.NewConfig(), authEndpointPath, apiKey, serviceID)).WithS3ForcePathStyle(true) + } + iamAccessToken := rsConClient.Config.IAMAccessToken + if iamAccessToken != "" && apiKey == "" { + initFunc := func() (*token.Token, error) { + return &token.Token{ + AccessToken: rsConClient.Config.IAMAccessToken, + RefreshToken: rsConClient.Config.IAMRefreshToken, + TokenType: "Bearer", + ExpiresIn: int64((time.Hour * 248).Seconds()) * -1, + Expiration: time.Now().Add(-1 * time.Hour).Unix(), + }, nil + } + s3Conf = aws.NewConfig().WithEndpoint(apiEndpoint).WithCredentials(ibmiam.NewCustomInitFuncCredentials(aws.NewConfig(), initFunc, authEndpointPath, serviceID)).WithS3ForcePathStyle(true) + } + s3Sess := session.Must(session.NewSession()) + s3Client := s3.New(s3Sess, s3Conf) + + headInput := &s3.HeadBucketInput{ + Bucket: aws.String(bucketName), + } + err = s3Client.WaitUntilBucketExists(headInput) + if err != nil { + return fmt.Errorf("failed waiting for bucket %s to be created, %v", + bucketName, err) + } + + bucketOutput, err := s3Client.ListBucketsExtended(&s3.ListBucketsExtendedInput{}) + + if err != nil { + return err + } + var bLocationConstraint string + for _, b := range bucketOutput.Buckets { + if *b.Name == bucketName { + bLocationConstraint = *b.LocationConstraint + } + } + + singleSiteLocationRegex, err := regexp.Compile("^[a-z]{3}[0-9][0-9]-[a-z]{4,8}$") + if err != nil { + return err + } + regionLocationRegex, err := regexp.Compile("^[a-z]{2}-[a-z]{2,5}-[a-z]{4,8}$") + if err != nil { + return err + } + crossRegionLocationRegex, err := regexp.Compile("^[a-z]{2}-[a-z]{4,8}$") + if err != nil { + return err + } + + if singleSiteLocationRegex.MatchString(bLocationConstraint) { + d.Set("single_site_location", strings.Split(bLocationConstraint, "-")[0]) + d.Set("storage_class", strings.Split(bLocationConstraint, "-")[1]) + } + if regionLocationRegex.MatchString(bLocationConstraint) { + d.Set("region_location", fmt.Sprintf("%s-%s", strings.Split(bLocationConstraint, "-")[0], strings.Split(bLocationConstraint, "-")[1])) + d.Set("storage_class", strings.Split(bLocationConstraint, "-")[2]) + } + if crossRegionLocationRegex.MatchString(bLocationConstraint) { + d.Set("cross_region_location", strings.Split(bLocationConstraint, "-")[0]) + d.Set("storage_class", strings.Split(bLocationConstraint, "-")[1]) + } + + bucketCRN := fmt.Sprintf("%s:%s:%s", strings.Replace(serviceID, "::", "", -1), "bucket", bucketName) + d.Set("crn", bucketCRN) + d.Set("resource_instance_id", serviceID) + d.Set("bucket_name", bucketName) + d.Set("s3_endpoint_public", apiEndpoint) + d.Set("s3_endpoint_private", apiEndpointPrivate) + if endpointType != "" { + d.Set("endpoint_type", endpointType) + } + + getBucketConfigOptions := &resourceconfigurationv1.GetBucketConfigOptions{ + Bucket: &bucketName, + } + + sess, err := meta.(ClientSession).CosConfigV1API() + if err != nil { + return err + } + if endpointType == "private" { + sess.SetServiceURL("https://config.private.cloud-object-storage.cloud.ibm.com/v1") + } + + bucketPtr, response, err := sess.GetBucketConfig(getBucketConfigOptions) + if err != nil { + return fmt.Errorf("Error in getting bucket info rule: %s\n%s", err, response) + } + + if bucketPtr != nil { + + if bucketPtr.Firewall != nil { + d.Set("allowed_ip", flattenStringList(bucketPtr.Firewall.AllowedIp)) + } + if bucketPtr.ActivityTracking != nil { + d.Set("activity_tracking", flattenActivityTrack(bucketPtr.ActivityTracking)) + } + if bucketPtr.MetricsMonitoring != nil { + d.Set("metrics_monitoring", flattenMetricsMonitor(bucketPtr.MetricsMonitoring)) + } + } + // Read the lifecycle configuration (archive & expiration) + + gInput := &s3.GetBucketLifecycleConfigurationInput{ + Bucket: aws.String(bucketName), + } + + lifecycleptr, err := s3Client.GetBucketLifecycleConfiguration(gInput) + + if (err != nil && !strings.Contains(err.Error(), "NoSuchLifecycleConfiguration: The lifecycle configuration does not exist")) && (err != nil && bucketPtr != nil && bucketPtr.Firewall != nil && !strings.Contains(err.Error(), "AccessDenied: Access Denied")) { + return err + } + + if lifecycleptr != nil { + archiveRules := archiveRuleGet(lifecycleptr.Rules) + expireRules := expireRuleGet(lifecycleptr.Rules) + if len(archiveRules) > 0 { + d.Set("archive_rule", archiveRules) + } + if len(expireRules) > 0 { + d.Set("expire_rule", expireRules) + } + } + + // Read retention rule + retentionInput := &s3.GetBucketProtectionConfigurationInput{ + Bucket: aws.String(bucketName), + } + retentionptr, err := s3Client.GetBucketProtectionConfiguration(retentionInput) + + if err != nil && bucketPtr != nil && bucketPtr.Firewall != nil && !strings.Contains(err.Error(), "AccessDenied: Access Denied") { + return err + } + + if retentionptr != nil { + retentionRules := retentionRuleGet(retentionptr.ProtectionConfiguration) + if len(retentionRules) > 0 { + d.Set("retention_rule", retentionRules) + } + } + + // Read Object versioning + versionInput := &s3.GetBucketVersioningInput{ + Bucket: aws.String(bucketName), + } + versionPtr, err := s3Client.GetBucketVersioning(versionInput) + + if err != nil && bucketPtr != nil && bucketPtr.Firewall != nil && !strings.Contains(err.Error(), "AccessDenied: Access Denied") { + return err + } + if versionPtr != nil { + versioningData := flattenCosObejctVersioning(versionPtr) + if len(versioningData) > 0 { + d.Set("object_versioning", versioningData) + } else { + d.Set("object_versioning", nil) + } + } + return nil +} + +func resourceIBMCOSBucketCreate(d *schema.ResourceData, meta interface{}) error { + var s3Conf *aws.Config + rsConClient, err := meta.(ClientSession).BluemixSession() + if err != nil { + return err + } + bucketName := d.Get("bucket_name").(string) + storageClass := d.Get("storage_class").(string) + var bLocation string + var apiType string + serviceID := d.Get("resource_instance_id").(string) + + if bucketLocation, ok := d.GetOk("cross_region_location"); ok { + bLocation = bucketLocation.(string) + apiType = "crl" + } + if bucketLocation, ok := d.GetOk("region_location"); ok { + bLocation = bucketLocation.(string) + apiType = "rl" + } + if bucketLocation, ok := d.GetOk("single_site_location"); ok { + bLocation = bucketLocation.(string) + apiType = "ssl" + } + if bLocation == "" { + return fmt.Errorf("Provide either `cross_region_location` or `region_location` or `single_site_location`") + } + lConstraint := fmt.Sprintf("%s-%s", bLocation, storageClass) + var endpointType = d.Get("endpoint_type").(string) + apiEndpoint, privateApiEndpoint := selectCosApi(apiType, bLocation) + if endpointType == "private" { + apiEndpoint = privateApiEndpoint + } + apiEndpoint = envFallBack([]string{"IBMCLOUD_COS_ENDPOINT"}, apiEndpoint) + if apiEndpoint == "" { + return fmt.Errorf("The endpoint doesn't exists for given location %s and endpoint type %s", bLocation, endpointType) + } + create := &s3.CreateBucketInput{ + Bucket: aws.String(bucketName), + CreateBucketConfiguration: &s3.CreateBucketConfiguration{ + LocationConstraint: aws.String(lConstraint), + }, + } + + if keyprotect, ok := d.GetOk("key_protect"); ok { + create.IBMSSEKPCustomerRootKeyCrn = aws.String(keyprotect.(string)) + create.IBMSSEKPEncryptionAlgorithm = aws.String(keyAlgorithm) + } + + authEndpoint, err := rsConClient.Config.EndpointLocator.IAMEndpoint() + if err != nil { + return err + } + authEndpointPath := fmt.Sprintf("%s%s", authEndpoint, "/identity/token") + apiKey := rsConClient.Config.BluemixAPIKey + if apiKey != "" { + s3Conf = aws.NewConfig().WithEndpoint(apiEndpoint).WithCredentials(ibmiam.NewStaticCredentials(aws.NewConfig(), authEndpointPath, apiKey, serviceID)).WithS3ForcePathStyle(true) + } + iamAccessToken := rsConClient.Config.IAMAccessToken + if iamAccessToken != "" && apiKey == "" { + initFunc := func() (*token.Token, error) { + return &token.Token{ + AccessToken: rsConClient.Config.IAMAccessToken, + RefreshToken: rsConClient.Config.IAMRefreshToken, + TokenType: "Bearer", + ExpiresIn: int64((time.Hour * 248).Seconds()) * -1, + Expiration: time.Now().Add(-1 * time.Hour).Unix(), + }, nil + } + s3Conf = aws.NewConfig().WithEndpoint(apiEndpoint).WithCredentials(ibmiam.NewCustomInitFuncCredentials(aws.NewConfig(), initFunc, authEndpointPath, serviceID)).WithS3ForcePathStyle(true) + } + + s3Sess := session.Must(session.NewSession()) + s3Client := s3.New(s3Sess, s3Conf) + + _, err = s3Client.CreateBucket(create) + if err != nil { + return err + } + // Generating a fake id which contains every information about to get the bucket via s3 api + bucketID := fmt.Sprintf("%s:%s:%s:meta:%s:%s:%s", strings.Replace(serviceID, "::", "", -1), "bucket", bucketName, apiType, bLocation, endpointType) + d.SetId(bucketID) + + return resourceIBMCOSBucketUpdate(d, meta) + +} + +func resourceIBMCOSBucketDelete(d *schema.ResourceData, meta interface{}) error { + var s3Conf *aws.Config + rsConClient, _ := meta.(ClientSession).BluemixSession() + bucketName := parseBucketId(d.Id(), "bucketName") + serviceID := d.Get("resource_instance_id").(string) + var bLocation string + var apiType string + if bucketLocation, ok := d.GetOk("cross_region_location"); ok { + bLocation = bucketLocation.(string) + apiType = "crl" + } + if bucketLocation, ok := d.GetOk("region_location"); ok { + bLocation = bucketLocation.(string) + apiType = "rl" + } + if bucketLocation, ok := d.GetOk("single_site_location"); ok { + bLocation = bucketLocation.(string) + apiType = "ssl" + } + endpointType := parseBucketId(d.Id(), "endpointType") + apiEndpoint, apiEndpointPrivate := selectCosApi(apiType, bLocation) + if endpointType == "private" { + apiEndpoint = apiEndpointPrivate + } + apiEndpoint = envFallBack([]string{"IBMCLOUD_COS_ENDPOINT"}, apiEndpoint) + if apiEndpoint == "" { + return fmt.Errorf("The endpoint doesn't exists for given location %s and endpoint type %s", bLocation, endpointType) + } + authEndpoint, err := rsConClient.Config.EndpointLocator.IAMEndpoint() + if err != nil { + return err + } + authEndpointPath := fmt.Sprintf("%s%s", authEndpoint, "/identity/token") + + apiKey := rsConClient.Config.BluemixAPIKey + if apiKey != "" { + s3Conf = aws.NewConfig().WithEndpoint(apiEndpoint).WithCredentials(ibmiam.NewStaticCredentials(aws.NewConfig(), authEndpointPath, apiKey, serviceID)).WithS3ForcePathStyle(true) + } + iamAccessToken := rsConClient.Config.IAMAccessToken + if iamAccessToken != "" && apiKey == "" { + initFunc := func() (*token.Token, error) { + return &token.Token{ + AccessToken: rsConClient.Config.IAMAccessToken, + RefreshToken: rsConClient.Config.IAMRefreshToken, + TokenType: "Bearer", + ExpiresIn: int64((time.Hour * 248).Seconds()) * -1, + Expiration: time.Now().Add(-1 * time.Hour).Unix(), + }, nil + } + s3Conf = aws.NewConfig().WithEndpoint(apiEndpoint).WithCredentials(ibmiam.NewCustomInitFuncCredentials(aws.NewConfig(), initFunc, authEndpointPath, serviceID)).WithS3ForcePathStyle(true) + } + + s3Sess := session.Must(session.NewSession()) + s3Client := s3.New(s3Sess, s3Conf) + + delete := &s3.DeleteBucketInput{ + Bucket: aws.String(bucketName), + } + _, err = s3Client.DeleteBucket(delete) + + if err != nil && strings.Contains(err.Error(), "BucketNotEmpty") { + if delbucket, ok := d.GetOk("force_delete"); ok { + if delbucket.(bool) { + // Use a S3 service client that can handle multiple slashes in URIs. + // While ibm_cos_bucket_object resources cannot create these object + // keys, other AWS services and applications using the COS Bucket can. + + // bucket may have things delete them + log.Printf("[DEBUG] COS Bucket attempting to forceDelete %+v", err) + + // Delete everything including locked objects. + // Don't ignore any object errors or we could recurse infinitely. + err = deleteAllCOSObjectVersions(s3Client, bucketName, "", false, false) + + if err != nil { + return fmt.Errorf("error COS Bucket force_delete: %s", err) + } + + // this line recurses until all objects are deleted or an error is returned + return resourceIBMCOSBucketDelete(d, meta) + } + } + } + if err != nil { + return fmt.Errorf("error deleting COS Bucket (%s): %s", d.Id(), err) + } + + return nil +} + +func resourceIBMCOSBucketExists(d *schema.ResourceData, meta interface{}) (bool, error) { + var s3Conf *aws.Config + rsConClient, err := meta.(ClientSession).BluemixSession() + if err != nil { + return false, err + } + + bucketName := parseBucketId(d.Id(), "bucketName") + serviceID := parseBucketId(d.Id(), "serviceID") + endpointType := parseBucketId(d.Id(), "endpointType") + apiEndpoint, apiEndpointPrivate := selectCosApi(parseBucketId(d.Id(), "apiType"), parseBucketId(d.Id(), "bLocation")) + if endpointType == "private" { + apiEndpoint = apiEndpointPrivate + } + apiEndpoint = envFallBack([]string{"IBMCLOUD_COS_ENDPOINT"}, apiEndpoint) + if apiEndpoint == "" { + return false, fmt.Errorf("The endpoint doesn't exists for given endpoint type %s", endpointType) + } + authEndpoint, err := rsConClient.Config.EndpointLocator.IAMEndpoint() + if err != nil { + return false, err + } + authEndpointPath := fmt.Sprintf("%s%s", authEndpoint, "/identity/token") + + apiKey := rsConClient.Config.BluemixAPIKey + if apiKey != "" { + s3Conf = aws.NewConfig().WithEndpoint(apiEndpoint).WithCredentials(ibmiam.NewStaticCredentials(aws.NewConfig(), authEndpointPath, apiKey, serviceID)).WithS3ForcePathStyle(true) + } + iamAccessToken := rsConClient.Config.IAMAccessToken + if iamAccessToken != "" && apiKey == "" { + initFunc := func() (*token.Token, error) { + return &token.Token{ + AccessToken: rsConClient.Config.IAMAccessToken, + RefreshToken: rsConClient.Config.IAMRefreshToken, + TokenType: "Bearer", + ExpiresIn: int64((time.Hour * 248).Seconds()) * -1, + Expiration: time.Now().Add(-1 * time.Hour).Unix(), + }, nil + } + s3Conf = aws.NewConfig().WithEndpoint(apiEndpoint).WithCredentials(ibmiam.NewCustomInitFuncCredentials(aws.NewConfig(), initFunc, authEndpointPath, serviceID)).WithS3ForcePathStyle(true) + } + + s3Sess := session.Must(session.NewSession()) + s3Client := s3.New(s3Sess, s3Conf) + + bucketList, err := s3Client.ListBuckets(&s3.ListBucketsInput{}) + if err != nil { + return false, err + } + for _, bucket := range bucketList.Buckets { + if *bucket.Name == bucketName { + return true, nil + } + } + return false, nil +} + +func selectCosApi(apiType string, bLocation string) (string, string) { + if apiType == "crl" { + return fmt.Sprintf("s3.%s.cloud-object-storage.appdomain.cloud", bLocation), fmt.Sprintf("s3.private.%s.cloud-object-storage.appdomain.cloud", bLocation) + } + if apiType == "rl" { + return fmt.Sprintf("s3.%s.cloud-object-storage.appdomain.cloud", bLocation), fmt.Sprintf("s3.private.%s.cloud-object-storage.appdomain.cloud", bLocation) + } + if apiType == "ssl" { + return fmt.Sprintf("s3.%s.cloud-object-storage.appdomain.cloud", bLocation), fmt.Sprintf("s3.private.%s.cloud-object-storage.appdomain.cloud", bLocation) + } + return "", "" +} + +func parseBucketId(id string, info string) string { + crn := strings.Split(id, ":meta:")[0] + meta := strings.Split(id, ":meta:")[1] + + if info == "bucketName" { + return strings.Split(crn, ":bucket:")[1] + } + if info == "serviceID" { + return fmt.Sprintf("%s::", strings.Split(crn, ":bucket:")[0]) + } + if info == "apiType" { + return strings.Split(meta, ":")[0] + } + if info == "bLocation" { + return strings.Split(meta, ":")[1] + } + if info == "endpointType" { + s := strings.Split(meta, ":") + if len(s) > 2 { + return strings.Split(meta, ":")[2] + } + return "" + + } + return "" +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cos_bucket_object.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cos_bucket_object.go new file mode 100644 index 00000000000..5a77e536f91 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cos_bucket_object.go @@ -0,0 +1,619 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "bytes" + "encoding/base64" + "fmt" + "io" + "log" + "os" + "regexp" + "strings" + "time" + + bxsession "github.com/IBM-Cloud/bluemix-go/session" + "github.com/IBM/ibm-cos-sdk-go/aws" + "github.com/IBM/ibm-cos-sdk-go/aws/awserr" + "github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam" + token "github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/token" + "github.com/IBM/ibm-cos-sdk-go/aws/session" + "github.com/IBM/ibm-cos-sdk-go/service/s3" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func resourceIBMCOSBucketObject() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMCOSBucketObjectCreate, + Read: resourceIBMCOSBucketObjectRead, + Update: resourceIBMCOSBucketObjectUpdate, + Delete: resourceIBMCOSBucketObjectDelete, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(60 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + Schema: map[string]*schema.Schema{ + "body": { + Type: schema.TypeString, + Computed: true, + Description: "COS object body", + }, + "bucket_crn": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "COS bucket CRN", + }, + "bucket_location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "COS bucket location", + }, + "content": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"content_base64", "content_file"}, + Description: "COS object content", + }, + "content_base64": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"content", "content_file"}, + Description: "COS object content in base64 encoding", + }, + "content_file": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"content", "content_base64"}, + Description: "COS object content file path", + }, + "content_length": { + Type: schema.TypeInt, + Computed: true, + Description: "COS object content length", + }, + "content_type": { + Type: schema.TypeString, + Computed: true, + Description: "COS object content type", + }, + "endpoint_type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateAllowedStringValue([]string{"public", "private", "direct"}), + Description: "COS endpoint type: public, private, direct", + Default: "public", + }, + "etag": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: "COS object MD5 hexdigest", + }, + "key": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "COS object key", + }, + "last_modified": { + Type: schema.TypeString, + Computed: true, + Description: "COS object last modified date", + }, + "version_id": { + Type: schema.TypeString, + Computed: true, + }, + "force_delete": { + Type: schema.TypeBool, + Optional: true, + Default: true, + Description: "COS buckets need to be empty before they can be deleted. force_delete option empty the bucket and delete it.", + }, + }, + } +} + +func resourceIBMCOSBucketObjectCreate(d *schema.ResourceData, m interface{}) error { + bucketCRN := d.Get("bucket_crn").(string) + bucketName := strings.Split(bucketCRN, ":bucket:")[1] + instanceCRN := fmt.Sprintf("%s::", strings.Split(bucketCRN, ":bucket:")[0]) + + bucketLocation := d.Get("bucket_location").(string) + endpointType := d.Get("endpoint_type").(string) + + bxSession, err := m.(ClientSession).BluemixSession() + if err != nil { + return err + } + + s3Client, err := getS3Client(bxSession, bucketLocation, endpointType, instanceCRN) + if err != nil { + return err + } + + objectKey := d.Get("key").(string) + + // This check is to make sure new create does not + // overwrite objects that is not managed by Terraform + exists, err := objectExists(s3Client, bucketName, objectKey) + if err != nil { + return err + } + if exists { + return fmt.Errorf("error COS bucket (%s) object (%s) already exists", bucketName, objectKey) + } + + var body io.ReadSeeker + + if v, ok := d.GetOk("content"); ok { + content := v.(string) + body = bytes.NewReader([]byte(content)) + } else if v, ok := d.GetOk("content_base64"); ok { + content := v.(string) + contentRaw, err := base64.StdEncoding.DecodeString(content) + if err != nil { + return fmt.Errorf("error decoding content_base64: %s", err) + } + body = bytes.NewReader(contentRaw) + } else if v, ok := d.GetOk("content_file"); ok { + path := v.(string) + file, err := os.Open(path) + if err != nil { + return fmt.Errorf("error opening COS object file (%s): %s", path, err) + } + + body = file + defer func() { + err := file.Close() + if err != nil { + log.Printf("[WARN] Failed closing COS object file (%s): %s", path, err) + } + }() + } + + putInput := &s3.PutObjectInput{ + Bucket: aws.String(bucketName), + Key: aws.String(objectKey), + Body: body, + } + + if _, err := s3Client.PutObject(putInput); err != nil { + return fmt.Errorf("error putting object (%s) in COS bucket (%s): %s", objectKey, bucketName, err) + } + + objectID := getObjectId(bucketCRN, objectKey, bucketLocation) + d.SetId(objectID) + + return resourceIBMCOSBucketObjectRead(d, m) +} + +func resourceIBMCOSBucketObjectRead(d *schema.ResourceData, m interface{}) error { + objectID := d.Id() + + bucketCRN := parseObjectId(objectID, "bucketCRN") + bucketName := parseObjectId(objectID, "bucketName") + bucketLocation := parseObjectId(objectID, "bucketLocation") + instanceCRN := parseObjectId(objectID, "instanceCRN") + endpointType := d.Get("endpoint_type").(string) + + d.Set("bucket_crn", bucketCRN) + d.Set("bucket_location", bucketLocation) + + bxSession, err := m.(ClientSession).BluemixSession() + if err != nil { + return err + } + + s3Client, err := getS3Client(bxSession, bucketLocation, endpointType, instanceCRN) + if err != nil { + return err + } + + objectKey := parseObjectId(objectID, "objectKey") + headInput := &s3.HeadObjectInput{ + Bucket: aws.String(bucketName), + Key: aws.String(objectKey), + } + + out, err := s3Client.HeadObject(headInput) + if err != nil { + if aerr, ok := err.(awserr.Error); ok && aerr.Code() == "NotFound" { + d.SetId("") // Set state back to empty for terraform refresh + } + return fmt.Errorf("failed getting COS bucket (%s) object (%s): %w", bucketName, objectKey, err) + } + + log.Printf("[DEBUG] Received COS object: %s", out) + + d.Set("content_length", out.ContentLength) + d.Set("content_type", out.ContentType) + d.Set("etag", strings.Trim(aws.StringValue(out.ETag), `"`)) + if out.LastModified != nil { + d.Set("last_modified", out.LastModified.Format(time.RFC1123)) + } else { + d.Set("last_modified", "") + } + + if isContentTypeAllowed(out.ContentType) { + getInput := s3.GetObjectInput{ + Bucket: aws.String(bucketName), + Key: aws.String(objectKey), + } + out, err := s3Client.GetObject(&getInput) + if err != nil { + return fmt.Errorf("failed getting COS object: %w", err) + } + + buf := new(bytes.Buffer) + bytesRead, err := buf.ReadFrom(out.Body) + if err != nil { + return fmt.Errorf("failed reading content of COS bucket (%s) object (%s): %w", bucketName, objectKey, err) + } + log.Printf("[INFO] Saving %d bytes from COS bucket (%s) object (%s)", bytesRead, bucketName, objectKey) + d.Set("body", buf.String()) + } else { + contentType := "" + if out.ContentType == nil { + contentType = "" + } else { + contentType = aws.StringValue(out.ContentType) + } + + log.Printf("[INFO] Ignoring body of COS bucket (%s) object (%s) with Content-Type %q", bucketName, objectKey, contentType) + } + + d.Set("key", objectKey) + d.Set("version_id", out.VersionId) + + return nil +} + +func resourceIBMCOSBucketObjectUpdate(d *schema.ResourceData, m interface{}) error { + if d.HasChanges("content", "content_base64", "content_file", "etag") { + bucketCRN := d.Get("bucket_crn").(string) + bucketName := strings.Split(bucketCRN, ":bucket:")[1] + instanceCRN := fmt.Sprintf("%s::", strings.Split(bucketCRN, ":bucket:")[0]) + + bucketLocation := d.Get("bucket_location").(string) + endpointType := d.Get("endpoint_type").(string) + + bxSession, err := m.(ClientSession).BluemixSession() + if err != nil { + return err + } + + s3Client, err := getS3Client(bxSession, bucketLocation, endpointType, instanceCRN) + if err != nil { + return err + } + + var body io.ReadSeeker + + if v, ok := d.GetOk("content"); ok { + content := v.(string) + body = bytes.NewReader([]byte(content)) + } else if v, ok := d.GetOk("content_base64"); ok { + content := v.(string) + contentRaw, err := base64.StdEncoding.DecodeString(content) + if err != nil { + return fmt.Errorf("error decoding content_base64: %s", err) + } + body = bytes.NewReader(contentRaw) + } else if v, ok := d.GetOk("content_file"); ok { + path := v.(string) + file, err := os.Open(path) + if err != nil { + return fmt.Errorf("error opening COS object file (%s): %s", path, err) + } + + body = file + defer func() { + err := file.Close() + if err != nil { + log.Printf("[WARN] Failed closing COS object file (%s): %s", path, err) + } + }() + } + + objectKey := d.Get("key").(string) + + putInput := &s3.PutObjectInput{ + Bucket: aws.String(bucketName), + Key: aws.String(objectKey), + Body: body, + } + + if _, err := s3Client.PutObject(putInput); err != nil { + return fmt.Errorf("error putting object (%s) in COS bucket (%s): %s", objectKey, bucketName, err) + } + + objectID := getObjectId(bucketCRN, objectKey, bucketLocation) + d.SetId(objectID) + } + + return resourceIBMCOSBucketObjectRead(d, m) +} + +func resourceIBMCOSBucketObjectDelete(d *schema.ResourceData, m interface{}) error { + bucketCRN := d.Get("bucket_crn").(string) + bucketName := strings.Split(bucketCRN, ":bucket:")[1] + instanceCRN := fmt.Sprintf("%s::", strings.Split(bucketCRN, ":bucket:")[0]) + + bucketLocation := d.Get("bucket_location").(string) + endpointType := d.Get("endpoint_type").(string) + + bxSession, err := m.(ClientSession).BluemixSession() + if err != nil { + return err + } + + s3Client, err := getS3Client(bxSession, bucketLocation, endpointType, instanceCRN) + if err != nil { + return err + } + objectKey := d.Get("key").(string) + + if _, ok := d.GetOk("version_id"); ok { + err = deleteAllCOSObjectVersions(s3Client, bucketName, objectKey, d.Get("force_delete").(bool), false) + } else { + err = deleteCOSObjectVersion(s3Client, bucketName, objectKey, "", false) + } + + if err != nil { + return err + } + return nil +} + +func getCosEndpoint(bucketLocation string, endpointType string) string { + if bucketLocation != "" { + switch endpointType { + case "public": + return fmt.Sprintf("s3.%s.cloud-object-storage.appdomain.cloud", bucketLocation) + case "private": + return fmt.Sprintf("s3.private.%s.cloud-object-storage.appdomain.cloud", bucketLocation) + case "direct": + return fmt.Sprintf("s3.direct.%s.cloud-object-storage.appdomain.cloud", bucketLocation) + default: + return fmt.Sprintf("s3.%s.cloud-object-storage.appdomain.cloud", bucketLocation) + } + } + return "" +} + +func getS3Client(bxSession *bxsession.Session, bucketLocation string, endpointType string, instanceCRN string) (*s3.S3, error) { + var s3Conf *aws.Config + + apiEndpoint := getCosEndpoint(bucketLocation, endpointType) + apiEndpoint = envFallBack([]string{"IBMCLOUD_COS_ENDPOINT"}, apiEndpoint) + if apiEndpoint == "" { + return nil, fmt.Errorf("the endpoint doesn't exists for given location %s and endpoint type %s", bucketLocation, endpointType) + } + + authEndpoint, err := bxSession.Config.EndpointLocator.IAMEndpoint() + if err != nil { + return nil, err + } + authEndpointPath := fmt.Sprintf("%s%s", authEndpoint, "/identity/token") + apiKey := bxSession.Config.BluemixAPIKey + if apiKey != "" { + s3Conf = aws.NewConfig().WithEndpoint(apiEndpoint).WithCredentials(ibmiam.NewStaticCredentials(aws.NewConfig(), authEndpointPath, apiKey, instanceCRN)).WithS3ForcePathStyle(true) + } + iamAccessToken := bxSession.Config.IAMAccessToken + if iamAccessToken != "" { + initFunc := func() (*token.Token, error) { + return &token.Token{ + AccessToken: bxSession.Config.IAMAccessToken, + RefreshToken: bxSession.Config.IAMRefreshToken, + TokenType: "Bearer", + ExpiresIn: int64((time.Hour * 248).Seconds()) * -1, + Expiration: time.Now().Add(-1 * time.Hour).Unix(), + }, nil + } + s3Conf = aws.NewConfig().WithEndpoint(apiEndpoint).WithCredentials(ibmiam.NewCustomInitFuncCredentials(aws.NewConfig(), initFunc, authEndpointPath, instanceCRN)).WithS3ForcePathStyle(true) + } + s3Sess := session.Must(session.NewSession()) + return s3.New(s3Sess, s3Conf), nil +} + +// This is to prevent potential issues w/ binary files +// and generally unprintable characters +// See https://github.com/hashicorp/terraform/pull/3858#issuecomment-156856738 +func isContentTypeAllowed(contentType *string) bool { + if contentType == nil { + return false + } + + allowedContentTypes := []*regexp.Regexp{ + regexp.MustCompile("^text/.+"), + regexp.MustCompile("^application/json$"), + } + + for _, r := range allowedContentTypes { + if r.MatchString(*contentType) { + return true + } + } + + return false +} + +func objectExists(s3Client *s3.S3, bucketName string, objectKey string) (bool, error) { + headInput := &s3.HeadObjectInput{ + Bucket: aws.String(bucketName), + Key: aws.String(objectKey), + } + _, err := s3Client.HeadObject(headInput) + if err != nil { + if aerr, ok := err.(awserr.Error); ok && aerr.Code() == "NotFound" { + return false, nil + } + return false, err + } + + return true, nil +} + +func getObjectId(bucketCRN string, objectKey string, bucketLocation string) string { + return fmt.Sprintf("%s:object:%s:location:%s", bucketCRN, objectKey, bucketLocation) +} + +func parseObjectId(id string, info string) string { + splitID := strings.Split(id, ":object:") + bucketCRN := splitID[0] + + if info == "instanceCRN" { + return fmt.Sprintf("%s::", strings.Split(bucketCRN, ":bucket:")[0]) + } + + if info == "bucketCRN" { + return bucketCRN + } + + if info == "bucketName" { + return strings.Split(bucketCRN, ":bucket:")[1] + } + + if info == "objectKey" { + return strings.Split(splitID[1], ":location:")[0] + } + + if info == "bucketLocation" { + return strings.Split(splitID[1], ":location:")[1] + } + + return parseBucketId(splitID[0], info) +} + +// deleteAllCOSObjectVersions deletes all versions of a specified key from an COS bucket. +// If key is empty then all versions of all objects are deleted. +func deleteAllCOSObjectVersions(conn *s3.S3, bucketName, key string, force, ignoreObjectErrors bool) error { + input := &s3.ListObjectVersionsInput{ + Bucket: aws.String(bucketName), + } + if key != "" { + input.Prefix = aws.String(key) + } + + var lastErr error + err := conn.ListObjectVersionsPages(input, func(page *s3.ListObjectVersionsOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } + + for _, objectVersion := range page.Versions { + objectKey := aws.StringValue(objectVersion.Key) + objectVersionID := aws.StringValue(objectVersion.VersionId) + log.Printf("[INFO] Object {%s} Version Id {%s}: ", objectKey, objectVersionID) + + if key != "" && key != objectKey { + continue + } + + err := deleteCOSObjectVersion(conn, bucketName, objectKey, objectVersionID, force) + + if err != nil { + if strings.Contains(err.Error(), "AccessDenied") && force { + // Remove any legal hold. + _, err := conn.HeadObject(&s3.HeadObjectInput{ + Bucket: aws.String(bucketName), + Key: objectVersion.Key, + VersionId: objectVersion.VersionId, + }) + + if err != nil { + log.Printf("[ERROR] Error getting COS Bucket (%s) Object (%s) Version (%s) metadata: %s", bucketName, objectKey, objectVersionID, err) + lastErr = err + continue + } + + // AccessDenied for another reason. + lastErr = fmt.Errorf("AccessDenied deleting COS Bucket (%s) Object (%s) Version: %s", bucketName, objectKey, objectVersionID) + continue + } + } + } + + return !lastPage + }) + + if err != nil { + return err + } + + if lastErr != nil { + if !ignoreObjectErrors { + return fmt.Errorf("error deleting at least one object version, last error: %s", lastErr) + } + + lastErr = nil + } + + err = conn.ListObjectVersionsPages(input, func(page *s3.ListObjectVersionsOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } + + for _, deleteMarker := range page.DeleteMarkers { + deleteMarkerKey := aws.StringValue(deleteMarker.Key) + deleteMarkerVersionID := aws.StringValue(deleteMarker.VersionId) + + if key != "" && key != deleteMarkerKey { + continue + } + + // Delete markers have no object lock protections. + err := deleteCOSObjectVersion(conn, bucketName, deleteMarkerKey, deleteMarkerVersionID, false) + + if err != nil { + lastErr = err + } + } + + return !lastPage + }) + + if err != nil { + return err + } + + if lastErr != nil { + if !ignoreObjectErrors { + return fmt.Errorf("error deleting at least one object delete marker, last error: %s", lastErr) + } + + lastErr = nil + } + + return nil +} + +// deleteCOSObjectVersion deletes a specific bucket object version. +func deleteCOSObjectVersion(conn *s3.S3, b, k, v string, force bool) error { + input := &s3.DeleteObjectInput{ + Bucket: aws.String(b), + Key: aws.String(k), + } + + if v != "" { + input.VersionId = aws.String(v) + } + + log.Printf("[INFO] Deleting COS Bucket (%s) Object (%s) Version: %s", b, k, v) + _, err := conn.DeleteObject(input) + + if err != nil { + log.Printf("[WARN] Error deleting S3 Bucket (%s) Object (%s) Version (%s): %s", b, k, v, err) + } + + return err +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cr_namespace.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cr_namespace.go new file mode 100644 index 00000000000..f619d24446e --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cr_namespace.go @@ -0,0 +1,222 @@ +// Copyright IBM Corp. 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "context" + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/IBM/container-registry-go-sdk/containerregistryv1" +) + +func resourceIBMCrNamespace() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMCrNamespaceCreate, + Read: resourceIBMCrNamespaceRead, + Update: resourceIBMCrNamespaceUpdate, + Delete: resourceIBMCrNamespaceDelete, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: InvokeValidator("ibm_cr_namespace", "name"), + Description: "The name of the namespace.", + }, + "resource_group_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: "The ID of the resource group that the namespace will be created within.", + }, + "tags": &schema.Schema{ + Type: schema.TypeSet, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Optional: true, + Description: "List of tags", + }, + "account": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The IBM Cloud account that owns the namespace.", + }, + "created_date": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "When the namespace was created.", + }, + "crn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "If the namespace has been assigned to a resource group, this is the IBM Cloud CRN representing the namespace.", + }, + "resource_created_date": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "When the namespace was assigned to a resource group.", + }, + "updated_date": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "When the namespace was last updated.", + }, + // HAND-ADDED DEPRECATED FIELDS, TO BE DELETED IN FUTURE + "created_on": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "When the namespace was created.", + Deprecated: "This field is deprecated", + }, + "updated_on": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "When the namespace was last updated.", + Deprecated: "This field is deprecated", + }, + }, + } +} + +func resourceIBMCrNamespaceValidator() *ResourceValidator { + validateSchema := make([]ValidateSchema, 1) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: "name", + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Required: true, + Regexp: `^[a-z0-9]+[a-z0-9_-]+[a-z0-9]+$`, + MinValueLength: 4, + MaxValueLength: 30, + }, + ) + + resourceValidator := ResourceValidator{ResourceName: "ibm_cr_namespace", Schema: validateSchema} + return &resourceValidator +} + +func resourceIBMCrNamespaceCreate(d *schema.ResourceData, meta interface{}) error { + containerRegistryClient, err := meta.(ClientSession).ContainerRegistryV1() + if err != nil { + return err + } + + createNamespaceOptions := &containerregistryv1.CreateNamespaceOptions{} + + createNamespaceOptions.SetName(d.Get("name").(string)) + if _, ok := d.GetOk("resource_group_id"); ok { + createNamespaceOptions.SetXAuthResourceGroup(d.Get("resource_group_id").(string)) + } else { + defaultRg, err := defaultResourceGroup(meta) + if err != nil { + return err + } + createNamespaceOptions.SetXAuthResourceGroup(defaultRg) + } + + namespace, response, err := containerRegistryClient.CreateNamespaceWithContext(context.TODO(), createNamespaceOptions) + if err != nil { + log.Printf("[DEBUG] CreateNamespaceWithContext failed %s\n%s", err, response) + return err + } + + d.SetId(*namespace.Namespace) + + return resourceIBMCrNamespaceRead(d, meta) +} + +func resourceIBMCrNamespaceRead(d *schema.ResourceData, meta interface{}) error { + containerRegistryClient, err := meta.(ClientSession).ContainerRegistryV1() + if err != nil { + return err + } + + listNamespaceDetailsOptions := &containerregistryv1.ListNamespaceDetailsOptions{} + + namespaceDetailsList, response, err := containerRegistryClient.ListNamespaceDetailsWithContext(context.TODO(), listNamespaceDetailsOptions) + + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + log.Printf("[DEBUG] ListNamespaceDetailsWithContext failed %s\n%s", err, response) + return err + } + + var namespaceDetails containerregistryv1.NamespaceDetails + for _, namespaceDetails = range namespaceDetailsList { + if *namespaceDetails.Name == d.Id() { + break + } + } + if namespaceDetails.Name == nil || *namespaceDetails.Name != d.Id() { + d.SetId("") + return nil + } + + if err = d.Set("name", namespaceDetails.Name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + if err = d.Set("resource_group_id", namespaceDetails.ResourceGroup); err != nil { + return fmt.Errorf("Error setting resource_group_id: %s", err) + } + if err = d.Set("account", namespaceDetails.Account); err != nil { + return fmt.Errorf("Error setting account: %s", err) + } + if err = d.Set("created_date", namespaceDetails.CreatedDate); err != nil { + return fmt.Errorf("Error setting created_date: %s", err) + } + if err = d.Set("crn", namespaceDetails.CRN); err != nil { + return fmt.Errorf("Error setting crn: %s", err) + } + if err = d.Set("resource_created_date", namespaceDetails.ResourceCreatedDate); err != nil { + return fmt.Errorf("Error setting resource_created_date: %s", err) + } + if err = d.Set("updated_date", namespaceDetails.UpdatedDate); err != nil { + return fmt.Errorf("Error setting updated_date: %s", err) + } + // HAND-ADDED DEPRECATED FIELDS, TO BE DELETED IN FUTURE + if err = d.Set("updated_on", namespaceDetails.UpdatedDate); err != nil { + return fmt.Errorf("Error setting updated_date: %s", err) + } + if err = d.Set("created_on", namespaceDetails.CreatedDate); err != nil { + return fmt.Errorf("Error setting created_date: %s", err) + } + + return nil +} + +// Dummy update method just for local tags +func resourceIBMCrNamespaceUpdate(d *schema.ResourceData, meta interface{}) error { + return resourceIBMCrNamespaceRead(d, meta) +} + +func resourceIBMCrNamespaceDelete(d *schema.ResourceData, meta interface{}) error { + containerRegistryClient, err := meta.(ClientSession).ContainerRegistryV1() + if err != nil { + return err + } + + deleteNamespaceOptions := &containerregistryv1.DeleteNamespaceOptions{} + + deleteNamespaceOptions.SetName(d.Id()) + + response, err := containerRegistryClient.DeleteNamespaceWithContext(context.TODO(), deleteNamespaceOptions) + if err != nil { + log.Printf("[DEBUG] DeleteNamespaceWithContext failed %s\n%s", err, response) + return err + } + + d.SetId("") + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cr_retention_policy.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cr_retention_policy.go new file mode 100644 index 00000000000..37ba8d6cecb --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_cr_retention_policy.go @@ -0,0 +1,166 @@ +// Copyright IBM Corp. 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "context" + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/IBM/container-registry-go-sdk/containerregistryv1" +) + +func resourceIBMCrRetentionPolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMCrRetentionPolicyCreate, + Read: resourceIBMCrRetentionPolicyRead, + Update: resourceIBMCrRetentionPolicyUpdate, + Delete: resourceIBMCrRetentionPolicyDelete, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "namespace": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "The namespace to which the retention policy is attached.", + }, + "images_per_repo": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + Description: "Determines how many images will be retained for each repository when the retention policy is executed. The value -1 denotes 'Unlimited' (all images are retained).", + }, + "retain_untagged": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Determines if untagged images are retained when executing the retention policy. This is false by default meaning untagged images will be deleted when the policy is executed.", + }, + }, + } +} + +func resourceIBMCrRetentionPolicyCreate(d *schema.ResourceData, meta interface{}) error { + containerRegistryClient, err := meta.(ClientSession).ContainerRegistryV1() + if err != nil { + return err + } + + setRetentionPolicyOptions := &containerregistryv1.SetRetentionPolicyOptions{} + + setRetentionPolicyOptions.SetNamespace(d.Get("namespace").(string)) + setRetentionPolicyOptions.SetImagesPerRepo(int64(d.Get("images_per_repo").(int))) + if _, ok := d.GetOk("retain_untagged"); ok { + setRetentionPolicyOptions.SetRetainUntagged(d.Get("retain_untagged").(bool)) + } + + response, err := containerRegistryClient.SetRetentionPolicyWithContext(context.TODO(), setRetentionPolicyOptions) + if err != nil { + log.Printf("[DEBUG] SetRetentionPolicyWithContext failed %s\n%s", err, response) + return err + } + + d.SetId(d.Get("namespace").(string)) + + return resourceIBMCrRetentionPolicyRead(d, meta) +} + +func resourceIBMCrRetentionPolicyRead(d *schema.ResourceData, meta interface{}) error { + containerRegistryClient, err := meta.(ClientSession).ContainerRegistryV1() + if err != nil { + return err + } + + getRetentionPolicyOptions := &containerregistryv1.GetRetentionPolicyOptions{} + + getRetentionPolicyOptions.SetNamespace(d.Id()) + + retentionPolicy, response, err := containerRegistryClient.GetRetentionPolicyWithContext(context.TODO(), getRetentionPolicyOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + log.Printf("[DEBUG] GetRetentionPolicyWithContext failed %s\n%s", err, response) + return err + } + + // A retention policy "does not exist" if `imagesPerRepo` is -1 `retainUntagged` is true + if *retentionPolicy.ImagesPerRepo == -1 && *retentionPolicy.RetainUntagged == true { + d.SetId("") + return nil + } + + if err = d.Set("namespace", retentionPolicy.Namespace); err != nil { + return fmt.Errorf("Error setting namespace: %s", err) + } + if err = d.Set("images_per_repo", intValue(retentionPolicy.ImagesPerRepo)); err != nil { + return fmt.Errorf("Error setting images_per_repo: %s", err) + } + if err = d.Set("retain_untagged", retentionPolicy.RetainUntagged); err != nil { + return fmt.Errorf("Error setting retain_untagged: %s", err) + } + + return nil +} + +func resourceIBMCrRetentionPolicyUpdate(d *schema.ResourceData, meta interface{}) error { + containerRegistryClient, err := meta.(ClientSession).ContainerRegistryV1() + if err != nil { + return err + } + + setRetentionPolicyOptions := &containerregistryv1.SetRetentionPolicyOptions{} + + setRetentionPolicyOptions.SetNamespace(d.Id()) + + hasChange := false + + if d.HasChange("namespace") { + setRetentionPolicyOptions.SetNamespace(d.Get("namespace").(string)) + hasChange = true + } + if d.HasChange("images_per_repo") { + setRetentionPolicyOptions.SetImagesPerRepo(int64(d.Get("images_per_repo").(int))) + hasChange = true + } + if d.HasChange("retain_untagged") { + setRetentionPolicyOptions.SetRetainUntagged(d.Get("retain_untagged").(bool)) + hasChange = true + } + + if hasChange { + response, err := containerRegistryClient.SetRetentionPolicyWithContext(context.TODO(), setRetentionPolicyOptions) + if err != nil { + log.Printf("[DEBUG] SetRetentionPolicyWithContext failed %s\n%s", err, response) + return err + } + } + + return resourceIBMCrRetentionPolicyRead(d, meta) +} + +func resourceIBMCrRetentionPolicyDelete(d *schema.ResourceData, meta interface{}) error { + containerRegistryClient, err := meta.(ClientSession).ContainerRegistryV1() + if err != nil { + return err + } + + setRetentionPolicyOptions := &containerregistryv1.SetRetentionPolicyOptions{} + + setRetentionPolicyOptions.SetNamespace(d.Id()) + setRetentionPolicyOptions.SetImagesPerRepo(-1) + setRetentionPolicyOptions.SetRetainUntagged(true) + + response, err := containerRegistryClient.SetRetentionPolicyWithContext(context.TODO(), setRetentionPolicyOptions) + if err != nil { + log.Printf("[DEBUG] SetRetentionPolicyWithContext failed %s\n%s", err, response) + return err + } + + d.SetId("") + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_database.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_database.go new file mode 100644 index 00000000000..b21c543e8b4 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_database.go @@ -0,0 +1,2096 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "encoding/json" + "fmt" + "log" + "net/url" + "os" + "strings" + "time" + + rc "github.com/IBM/platform-services-go-sdk/resourcecontrollerv2" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + validation "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + + // "github.com/IBM-Cloud/bluemix-go/api/globaltagging/globaltaggingv3" + "github.com/IBM-Cloud/bluemix-go/api/icd/icdv4" + "github.com/IBM-Cloud/bluemix-go/bmxerror" + "github.com/IBM-Cloud/bluemix-go/models" +) + +const ( + databaseInstanceSuccessStatus = "active" + databaseInstanceProvisioningStatus = "provisioning" + databaseInstanceProgressStatus = "in progress" + databaseInstanceInactiveStatus = "inactive" + databaseInstanceFailStatus = "failed" + databaseInstanceRemovedStatus = "removed" + databaseInstanceReclamation = "pending_reclamation" +) + +const ( + databaseTaskSuccessStatus = "completed" + databaseTaskProgressStatus = "running" + databaseTaskFailStatus = "failed" +) + +type CsEntry struct { + Name string + Password string + String string + Composed string + CertName string + CertBase64 string + Hosts []struct { + HostName string `json:"hostname"` + Port int `json:"port"` + } + Scheme string + QueryOptions map[string]interface{} + Path string + Database string +} + +func resourceIBMDatabaseInstance() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMDatabaseInstanceCreate, + Read: resourceIBMDatabaseInstanceRead, + Update: resourceIBMDatabaseInstanceUpdate, + Delete: resourceIBMDatabaseInstanceDelete, + Exists: resourceIBMDatabaseInstanceExists, + CustomizeDiff: resourceIBMDatabaseInstanceDiff, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(60 * time.Minute), + Update: schema.DefaultTimeout(60 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Description: "Resource instance name for example, my Database instance", + Type: schema.TypeString, + Required: true, + }, + + "resource_group_id": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: "The id of the resource group in which the Database instance is present", + }, + + "location": { + Description: "The location or the region in which Database instance exists", + Type: schema.TypeString, + Required: true, + }, + + "service": { + Description: "The name of the Cloud Internet database service", + Type: schema.TypeString, + Required: true, + ValidateFunc: validateAllowedStringValue([]string{"databases-for-etcd", "databases-for-postgresql", "databases-for-redis", "databases-for-elasticsearch", "databases-for-mongodb", "messages-for-rabbitmq", "databases-for-mysql"}), + }, + "plan": { + Description: "The plan type of the Database instance", + Type: schema.TypeString, + Required: true, + ValidateFunc: validateAllowedStringValue([]string{"standard"}), + }, + + "status": { + Description: "The resource instance status", + Type: schema.TypeString, + Computed: true, + }, + + "guid": { + Type: schema.TypeString, + Computed: true, + Description: "Unique identifier of resource instance", + }, + + "adminuser": { + Description: "The admin user id for the instance", + Type: schema.TypeString, + Computed: true, + }, + "adminpassword": { + Description: "The admin user password for the instance", + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(10, 32), + Sensitive: true, + // DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + // return true + // }, + }, + "version": { + Description: "The database version to provision if specified", + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "members_memory_allocation_mb": { + Description: "Memory allocation required for cluster", + Type: schema.TypeInt, + Optional: true, + Computed: true, + ConflictsWith: []string{"node_count", "node_memory_allocation_mb", "node_disk_allocation_mb", "node_cpu_allocation_count"}, + }, + "members_disk_allocation_mb": { + Description: "Disk allocation required for cluster", + Type: schema.TypeInt, + Optional: true, + Computed: true, + ConflictsWith: []string{"node_count", "node_memory_allocation_mb", "node_disk_allocation_mb", "node_cpu_allocation_count"}, + }, + "members_cpu_allocation_count": { + Description: "CPU allocation required for cluster", + Type: schema.TypeInt, + Optional: true, + Computed: true, + ConflictsWith: []string{"node_count", "node_memory_allocation_mb", "node_disk_allocation_mb", "node_cpu_allocation_count"}, + }, + "node_count": { + Description: "Total number of nodes in the cluster", + Type: schema.TypeInt, + Optional: true, + Computed: true, + ConflictsWith: []string{"members_memory_allocation_mb", "members_disk_allocation_mb", "members_cpu_allocation_count"}, + }, + "node_memory_allocation_mb": { + Description: "Memory allocation per node", + Type: schema.TypeInt, + Optional: true, + Computed: true, + + ConflictsWith: []string{"members_memory_allocation_mb", "members_disk_allocation_mb", "members_cpu_allocation_count"}, + }, + "node_disk_allocation_mb": { + Description: "Disk allocation per node", + Type: schema.TypeInt, + Optional: true, + Computed: true, + ConflictsWith: []string{"members_memory_allocation_mb", "members_disk_allocation_mb", "members_cpu_allocation_count"}, + }, + "node_cpu_allocation_count": { + Description: "CPU allocation per node", + Type: schema.TypeInt, + Optional: true, + Computed: true, + ConflictsWith: []string{"members_memory_allocation_mb", "members_disk_allocation_mb", "members_cpu_allocation_count"}, + }, + "plan_validation": { + Description: "For elasticsearch and postgres perform database parameter validation during the plan phase. Otherwise, database parameter validation happens in apply phase.", + Type: schema.TypeBool, + Optional: true, + Default: true, + DiffSuppressFunc: func(k, o, n string, d *schema.ResourceData) bool { + if o == "" { + return true + } + return false + }, + }, + "service_endpoints": { + Description: "Types of the service endpoints. Possible values are 'public', 'private', 'public-and-private'.", + Type: schema.TypeString, + Optional: true, + Default: "public", + ValidateFunc: validateAllowedStringValue([]string{"public", "private", "public-and-private"}), + }, + "backup_id": { + Description: "The CRN of backup source database", + Type: schema.TypeString, + Optional: true, + }, + "remote_leader_id": { + Description: "The CRN of leader database", + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: applyOnce, + }, + "key_protect_instance": { + Description: "The CRN of Key protect instance", + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "key_protect_key": { + Description: "The CRN of Key protect key", + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "backup_encryption_key_crn": { + Description: "The Backup Encryption Key CRN", + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "tags": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString, ValidateFunc: InvokeValidator("ibm_database", "tag")}, + Set: resourceIBMVPCHash, + }, + "point_in_time_recovery_deployment_id": { + Description: "The CRN of source instance", + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: applyOnce, + }, + "point_in_time_recovery_time": { + Description: "The point in time recovery time stamp of the deployed instance", + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: applyOnce, + }, + "users": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Description: "User name", + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(5, 32), + }, + "password": { + Description: "User password", + Type: schema.TypeString, + Optional: true, + Sensitive: true, + ValidateFunc: validation.StringLenBetween(10, 32), + }, + }, + }, + }, + "connectionstrings": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Description: "User name", + Type: schema.TypeString, + Computed: true, + }, + "composed": { + Description: "Connection string", + Type: schema.TypeString, + Computed: true, + }, + "scheme": { + Description: "DB scheme", + Type: schema.TypeString, + Computed: true, + }, + "certname": { + Description: "Certificate Name", + Type: schema.TypeString, + Computed: true, + }, + "certbase64": { + Description: "Certificate in base64 encoding", + Type: schema.TypeString, + Computed: true, + }, + "password": { + Description: "Password", + Type: schema.TypeString, + Computed: true, + }, + "queryoptions": { + Description: "DB query options", + Type: schema.TypeString, + Computed: true, + }, + "database": { + Description: "DB name", + Type: schema.TypeString, + Computed: true, + }, + "path": { + Description: "DB path", + Type: schema.TypeString, + Computed: true, + }, + "hosts": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "hostname": { + Description: "DB host name", + Type: schema.TypeString, + Computed: true, + }, + "port": { + Description: "DB port", + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + "whitelist": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "address": { + Description: "Whitelist IP address in CIDR notation", + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateCIDR, + }, + "description": { + Description: "Unique white list description", + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 32), + }, + }, + }, + }, + "groups": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "group_id": { + Description: "Scaling group name", + Type: schema.TypeString, + Computed: true, + }, + "count": { + Description: "Count of scaling groups for the instance", + Type: schema.TypeInt, + Computed: true, + }, + "memory": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "units": { + Type: schema.TypeString, + Computed: true, + Description: "The units memory is allocated in.", + }, + "allocation_mb": { + Type: schema.TypeInt, + Computed: true, + Description: "The current memory allocation for a group instance", + }, + "minimum_mb": { + Type: schema.TypeInt, + Computed: true, + Description: "The minimum memory size for a group instance", + }, + "step_size_mb": { + Type: schema.TypeInt, + Computed: true, + Description: "The step size memory increases or decreases in.", + }, + "is_adjustable": { + Type: schema.TypeBool, + Computed: true, + Description: "Is the memory size adjustable.", + }, + "can_scale_down": { + Type: schema.TypeBool, + Computed: true, + Description: "Can memory scale down as well as up.", + }, + }, + }, + }, + "cpu": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "units": { + Type: schema.TypeString, + Computed: true, + Description: "The .", + }, + "allocation_count": { + Type: schema.TypeInt, + Computed: true, + Description: "The current cpu allocation count", + }, + "minimum_count": { + Type: schema.TypeInt, + Computed: true, + Description: "The minimum number of cpus allowed", + }, + "step_size_count": { + Type: schema.TypeInt, + Computed: true, + Description: "The number of CPUs allowed to step up or down by", + }, + "is_adjustable": { + Type: schema.TypeBool, + Computed: true, + Description: "Are the number of CPUs adjustable", + }, + "can_scale_down": { + Type: schema.TypeBool, + Computed: true, + Description: "Can the number of CPUs be scaled down as well as up", + }, + }, + }, + }, + "disk": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "units": { + Type: schema.TypeString, + Computed: true, + Description: "The units disk is allocated in", + }, + "allocation_mb": { + Type: schema.TypeInt, + Computed: true, + Description: "The current disk allocation", + }, + "minimum_mb": { + Type: schema.TypeInt, + Computed: true, + Description: "The minimum disk size allowed", + }, + "step_size_mb": { + Type: schema.TypeInt, + Computed: true, + Description: "The step size disk increases or decreases in", + }, + "is_adjustable": { + Type: schema.TypeBool, + Computed: true, + Description: "Is the disk size adjustable", + }, + "can_scale_down": { + Type: schema.TypeBool, + Computed: true, + Description: "Can the disk size be scaled down as well as up", + }, + }, + }, + }, + }, + }, + }, + "auto_scaling": { + Type: schema.TypeList, + Description: "ICD Auto Scaling", + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disk": { + Type: schema.TypeList, + Description: "Disk Auto Scaling", + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "capacity_enabled": { + Description: "Auto Scaling Scalar: Capacity Enabled", + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + "free_space_less_than_percent": { + Description: "Auto Scaling Scalar: Capacity Free Space Less Than Percent", + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + "io_enabled": { + Description: "Auto Scaling Scalar: IO Utilization Enabled", + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + + "io_over_period": { + Description: "Auto Scaling Scalar: IO Utilization Over Period", + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "io_above_percent": { + Description: "Auto Scaling Scalar: IO Utilization Above Percent", + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + "rate_increase_percent": { + Description: "Auto Scaling Rate: Increase Percent", + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + "rate_period_seconds": { + Description: "Auto Scaling Rate: Period Seconds", + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + "rate_limit_mb_per_member": { + Description: "Auto Scaling Rate: Limit mb per member", + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + "rate_units": { + Description: "Auto Scaling Rate: Units ", + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + }, + }, + }, + "memory": { + Type: schema.TypeList, + Description: "Memory Auto Scaling", + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "io_enabled": { + Description: "Auto Scaling Scalar: IO Utilization Enabled", + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + + "io_over_period": { + Description: "Auto Scaling Scalar: IO Utilization Over Period", + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "io_above_percent": { + Description: "Auto Scaling Scalar: IO Utilization Above Percent", + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + "rate_increase_percent": { + Description: "Auto Scaling Rate: Increase Percent", + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + "rate_period_seconds": { + Description: "Auto Scaling Rate: Period Seconds", + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + "rate_limit_mb_per_member": { + Description: "Auto Scaling Rate: Limit mb per member", + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + "rate_units": { + Description: "Auto Scaling Rate: Units ", + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + }, + }, + }, + "cpu": { + Type: schema.TypeList, + Description: "CPU Auto Scaling", + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "rate_increase_percent": { + Description: "Auto Scaling Rate: Increase Percent", + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + "rate_period_seconds": { + Description: "Auto Scaling Rate: Period Seconds", + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + "rate_limit_count_per_member": { + Description: "Auto Scaling Rate: Limit count per number", + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + "rate_units": { + Description: "Auto Scaling Rate: Units ", + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + ResourceName: { + Type: schema.TypeString, + Computed: true, + Description: "The name of the resource", + }, + + ResourceCRN: { + Type: schema.TypeString, + Computed: true, + Description: "The crn of the resource", + }, + + ResourceStatus: { + Type: schema.TypeString, + Computed: true, + Description: "The status of the resource", + }, + + ResourceGroupName: { + Type: schema.TypeString, + Computed: true, + Description: "The resource group name in which resource is provisioned", + }, + ResourceControllerURL: { + Type: schema.TypeString, + Computed: true, + Description: "The URL of the IBM Cloud dashboard that can be used to explore and view details about the resource", + }, + }, + } +} +func resourceIBMICDValidator() *ResourceValidator { + + validateSchema := make([]ValidateSchema, 1) + + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: "tag", + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Optional: true, + Regexp: `^[A-Za-z0-9:_ .-]+$`, + MinValueLength: 1, + MaxValueLength: 128}) + + ibmICDResourceValidator := ResourceValidator{ResourceName: "ibm_database", Schema: validateSchema} + return &ibmICDResourceValidator +} + +type Params struct { + Version string `json:"version,omitempty"` + KeyProtectKey string `json:"key_protect_key,omitempty"` + BackUpEncryptionCRN string `json:"backup_encryption_key_crn,omitempty"` + Memory int `json:"members_memory_allocation_mb,omitempty"` + Disk int `json:"members_disk_allocation_mb,omitempty"` + CPU int `json:"members_cpu_allocation_count,omitempty"` + KeyProtectInstance string `json:"key_protect_instance,omitempty"` + ServiceEndpoints string `json:"service-endpoints,omitempty"` + BackupID string `json:"backup-id,omitempty"` + RemoteLeaderID string `json:"remote_leader_id,omitempty"` + PITRDeploymentID string `json:"point_in_time_recovery_deployment_id,omitempty"` + PITRTimeStamp string `json:"point_in_time_recovery_time,omitempty"` +} + +func getDatabaseServiceDefaults(service string, meta interface{}) (*icdv4.Group, error) { + icdClient, err := meta.(ClientSession).ICDAPI() + if err != nil { + return nil, fmt.Errorf("Error getting database client settings: %s", err) + } + + var dbType string + if strings.HasPrefix(service, "messages-for-") { + dbType = service[len("messages-for-"):] + } else { + dbType = service[len("databases-for-"):] + } + + groupDefaults, err := icdClient.Groups().GetDefaultGroups(dbType) + if err != nil { + return nil, fmt.Errorf("ICD API is down for plan validation, set plan_validation=false") + } + return &groupDefaults.Groups[0], nil +} + +func getInitialNodeCount(d *schema.ResourceData, meta interface{}) (int, error) { + service := d.Get("service").(string) + planPhase := d.Get("plan_validation").(bool) + if planPhase { + groupDefaults, err := getDatabaseServiceDefaults(service, meta) + if err != nil { + return 0, err + } + return groupDefaults.Members.MinimumCount, nil + } else { + if service == "databases-for-elasticsearch" { + return 3, nil + } + return 2, nil + } +} + +type GroupLimit struct { + Units string + Allocation int + Minimum int + Maximum int + StepSize int + IsAdjustable bool + CanScaleDown bool +} + +func checkGroupValue(name string, limits GroupLimit, divider int, diff *schema.ResourceDiff) error { + if diff.HasChange(name) { + oldSetting, newSetting := diff.GetChange(name) + old := oldSetting.(int) + new := newSetting.(int) + + if new < limits.Minimum/divider || new > limits.Maximum/divider || new%(limits.StepSize/divider) != 0 { + return fmt.Errorf("%s must be >= %d and <= %d in increments of %d", name, limits.Minimum/divider, limits.Maximum/divider/divider, limits.StepSize/divider) + } + if old != new && !limits.IsAdjustable { + return fmt.Errorf("%s can not change value after create", name) + } + if new < old && !limits.CanScaleDown { + return fmt.Errorf("%s can not scale down from %d to %d", name, old, new) + } + return nil + } + return nil +} + +type CountLimit struct { + Units string + AllocationCount int + MinimumCount int + MaximumCount int + StepSizeCount int + IsAdjustable bool + CanScaleDown bool +} + +func checkCountValue(name string, limits CountLimit, divider int, diff *schema.ResourceDiff) error { + groupLimit := GroupLimit{ + Units: limits.Units, + Allocation: limits.AllocationCount, + Minimum: limits.MinimumCount, + Maximum: limits.MaximumCount, + StepSize: limits.StepSizeCount, + IsAdjustable: limits.IsAdjustable, + CanScaleDown: limits.CanScaleDown, + } + return checkGroupValue(name, groupLimit, divider, diff) +} + +type MbLimit struct { + Units string + AllocationMb int + MinimumMb int + MaximumMb int + StepSizeMb int + IsAdjustable bool + CanScaleDown bool +} + +func checkMbValue(name string, limits MbLimit, divider int, diff *schema.ResourceDiff) error { + groupLimit := GroupLimit{ + Units: limits.Units, + Allocation: limits.AllocationMb, + Minimum: limits.MinimumMb, + Maximum: limits.MaximumMb, + StepSize: limits.StepSizeMb, + IsAdjustable: limits.IsAdjustable, + CanScaleDown: limits.CanScaleDown, + } + return checkGroupValue(name, groupLimit, divider, diff) +} + +func resourceIBMDatabaseInstanceDiff(diff *schema.ResourceDiff, meta interface{}) error { + + err := resourceTagsCustomizeDiff(diff) + if err != nil { + return err + } + + service := diff.Get("service").(string) + if service == "databases-for-postgresql" || service == "databases-for-elasticsearch" { + planPhase := diff.Get("plan_validation").(bool) + + if planPhase { + + groupDefaults, err := getDatabaseServiceDefaults(service, meta) + if err != nil { + return err + } + + err = checkMbValue("members_memory_allocation_mb", MbLimit(groupDefaults.Memory), 1, diff) + if err != nil { + return err + } + + err = checkMbValue("members_disk_allocation_mb", MbLimit(groupDefaults.Disk), 1, diff) + if err != nil { + return err + } + + err = checkCountValue("members_cpu_allocation_count", CountLimit(groupDefaults.Cpu), 1, diff) + if err != nil { + return err + } + + err = checkCountValue("node_count", CountLimit(groupDefaults.Members), 1, diff) + if err != nil { + return err + } + + var divider = groupDefaults.Members.MinimumCount + err = checkMbValue("node_memory_allocation_mb", MbLimit(groupDefaults.Memory), divider, diff) + if err != nil { + return err + } + + err = checkMbValue("node_disk_allocation_mb", MbLimit(groupDefaults.Disk), divider, diff) + if err != nil { + return err + } + + if diff.HasChange("node_cpu_allocation_count") { + err = checkCountValue("node_cpu_allocation_count", CountLimit(groupDefaults.Cpu), divider, diff) + if err != nil { + return err + } + } else if diff.HasChange("node_count") { + _, newSetting := diff.GetChange("node_count") + min := groupDefaults.Cpu.MinimumCount / divider + if newSetting != min { + return fmt.Errorf("node_cpu_allocation_count must be set when node_count is greater then the minimum %d", min) + } + } + } + } else if diff.HasChange("node_count") || diff.HasChange("node_memory_allocation_mb") || diff.HasChange("node_disk_allocation_mb") || diff.HasChange("node_cpu_allocation_count") { + return fmt.Errorf("node_count, node_memory_allocation_mb, node_disk_allocation_mb, node_cpu_allocation_count only supported for postgresql and elasticsearch") + } + + return nil +} + +// Replace with func wrapper for resourceIBMResourceInstanceCreate specifying serviceName := "database......." +func resourceIBMDatabaseInstanceCreate(d *schema.ResourceData, meta interface{}) error { + rsConClient, err := meta.(ClientSession).ResourceControllerV2API() + if err != nil { + return err + } + + serviceName := d.Get("service").(string) + plan := d.Get("plan").(string) + name := d.Get("name").(string) + location := d.Get("location").(string) + + rsInst := rc.CreateResourceInstanceOptions{ + Name: &name, + } + + rsCatClient, err := meta.(ClientSession).ResourceCatalogAPI() + if err != nil { + return err + } + rsCatRepo := rsCatClient.ResourceCatalog() + + serviceOff, err := rsCatRepo.FindByName(serviceName, true) + if err != nil { + return fmt.Errorf("Error retrieving database service offering: %s", err) + } + + servicePlan, err := rsCatRepo.GetServicePlanID(serviceOff[0], plan) + if err != nil { + return fmt.Errorf("Error retrieving plan: %s", err) + } + rsInst.ResourcePlanID = &servicePlan + + deployments, err := rsCatRepo.ListDeployments(servicePlan) + if err != nil { + return fmt.Errorf("Error retrieving deployment for plan %s : %s", plan, err) + } + if len(deployments) == 0 { + return fmt.Errorf("No deployment found for service plan : %s", plan) + } + deployments, supportedLocations := filterDatabaseDeployments(deployments, location) + + if len(deployments) == 0 { + locationList := make([]string, 0, len(supportedLocations)) + for l := range supportedLocations { + locationList = append(locationList, l) + } + return fmt.Errorf("No deployment found for service plan %s at location %s.\nValid location(s) are: %q.", plan, location, locationList) + } + catalogCRN := deployments[0].CatalogCRN + rsInst.Target = &catalogCRN + + if rsGrpID, ok := d.GetOk("resource_group_id"); ok { + rgID := rsGrpID.(string) + rsInst.ResourceGroup = &rgID + } else { + defaultRg, err := defaultResourceGroup(meta) + if err != nil { + return err + } + rsInst.ResourceGroup = &defaultRg + } + + initialNodeCount, err := getInitialNodeCount(d, meta) + if err != nil { + return err + } + + params := Params{} + if memory, ok := d.GetOk("members_memory_allocation_mb"); ok { + params.Memory = memory.(int) + } + if memory, ok := d.GetOk("node_memory_allocation_mb"); ok { + params.Memory = memory.(int) * initialNodeCount + } + if disk, ok := d.GetOk("members_disk_allocation_mb"); ok { + params.Disk = disk.(int) + } + if disk, ok := d.GetOk("node_disk_allocation_mb"); ok { + params.Disk = disk.(int) * initialNodeCount + } + if cpu, ok := d.GetOk("members_cpu_allocation_count"); ok { + params.CPU = cpu.(int) + } + if cpu, ok := d.GetOk("node_cpu_allocation_count"); ok { + params.CPU = cpu.(int) * initialNodeCount + } + if version, ok := d.GetOk("version"); ok { + params.Version = version.(string) + } + if keyProtect, ok := d.GetOk("key_protect_key"); ok { + params.KeyProtectKey = keyProtect.(string) + } + if keyProtectInstance, ok := d.GetOk("key_protect_instance"); ok { + params.KeyProtectInstance = keyProtectInstance.(string) + } + if backupID, ok := d.GetOk("backup_id"); ok { + params.BackupID = backupID.(string) + } + if backUpEncryptionKey, ok := d.GetOk("backup_encryption_key_crn"); ok { + params.BackUpEncryptionCRN = backUpEncryptionKey.(string) + } + if remoteLeader, ok := d.GetOk("remote_leader_id"); ok { + params.RemoteLeaderID = remoteLeader.(string) + } + if pitrID, ok := d.GetOk("point_in_time_recovery_deployment_id"); ok { + params.PITRDeploymentID = pitrID.(string) + } + if pitrTime, ok := d.GetOk("point_in_time_recovery_time"); ok { + params.PITRTimeStamp = pitrTime.(string) + } + serviceEndpoint := d.Get("service_endpoints").(string) + params.ServiceEndpoints = serviceEndpoint + parameters, _ := json.Marshal(params) + var raw map[string]interface{} + json.Unmarshal(parameters, &raw) + //paramString := string(parameters[:]) + rsInst.Parameters = raw + + instance, response, err := rsConClient.CreateResourceInstance(&rsInst) + if err != nil { + return fmt.Errorf("Error creating database instance: %s %s", err, response) + } + + // Moved d.SetId(instance.ID) to after waiting for resource to finish creation. Otherwise Terraform initates depedent tasks too early. + // Original flow had SetId here as its required as input to waitForDatabaseInstanceCreate + + _, err = waitForDatabaseInstanceCreate(d, meta, *instance.ID) + if err != nil { + return fmt.Errorf( + "Error waiting for create database instance (%s) to complete: %s", d.Id(), err) + } + + d.SetId(*instance.ID) + + if node_count, ok := d.GetOk("node_count"); ok { + if initialNodeCount != node_count { + icdClient, err := meta.(ClientSession).ICDAPI() + if err != nil { + return fmt.Errorf("Error getting database client settings: %s", err) + } + + err = horizontalScale(d, meta, icdClient) + if err != nil { + return err + } + } + } + v := os.Getenv("IC_ENV_TAGS") + if _, ok := d.GetOk("tags"); ok || v != "" { + oldList, newList := d.GetChange("tags") + err = UpdateTagsUsingCRN(oldList, newList, meta, *instance.CRN) + if err != nil { + log.Printf( + "Error on create of ibm database (%s) tags: %s", d.Id(), err) + } + } + + icdId := EscapeUrlParm(*instance.ID) + icdClient, err := meta.(ClientSession).ICDAPI() + if err != nil { + return fmt.Errorf("Error getting database client settings: %s", err) + } + + if pw, ok := d.GetOk("adminpassword"); ok { + adminPassword := pw.(string) + cdb, err := icdClient.Cdbs().GetCdb(icdId) + if err != nil { + if apiErr, ok := err.(bmxerror.RequestFailure); ok && apiErr.StatusCode() == 404 { + return fmt.Errorf("The database instance was not found in the region set for the Provider, or the default of us-south. Specify the correct region in the provider definition, or create a provider alias for the correct region. %v", err) + } + return fmt.Errorf("Error getting database config while updating adminpassword for: %s with error %s\n", icdId, err) + } + + userParams := icdv4.UserReq{ + User: icdv4.User{ + Password: adminPassword, + }, + } + task, err := icdClient.Users().UpdateUser(icdId, cdb.AdminUser, userParams) + if err != nil { + return fmt.Errorf("Error updating database admin password: %s", err) + } + _, err = waitForDatabaseTaskComplete(task.Id, d, meta, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf( + "Error waiting for update of database (%s) admin password task to complete: %s", icdId, err) + } + } + + if wl, ok := d.GetOk("whitelist"); ok { + whitelist := expandWhitelist(wl.(*schema.Set)) + for _, wlEntry := range whitelist { + whitelistReq := icdv4.WhitelistReq{ + WhitelistEntry: icdv4.WhitelistEntry{ + Address: wlEntry.Address, + Description: wlEntry.Description, + }, + } + task, err := icdClient.Whitelists().CreateWhitelist(icdId, whitelistReq) + if err != nil { + return fmt.Errorf("Error updating database whitelist entry: %s", err) + } + _, err = waitForDatabaseTaskComplete(task.Id, d, meta, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf( + "Error waiting for update of database (%s) whitelist task to complete: %s", icdId, err) + } + } + } + if cpuRecord, ok := d.GetOk("auto_scaling.0.cpu"); ok { + params := icdv4.AutoscalingSetGroup{} + cpuBody, err := expandICDAutoScalingGroup(d, cpuRecord, "cpu") + if err != nil { + return fmt.Errorf("Error in getting cpuBody from expandICDAutoScalingGroup %s", err) + } + params.Autoscaling.CPU = &cpuBody + task, err := icdClient.AutoScaling().SetAutoScaling(icdId, "member", params) + if err != nil { + return fmt.Errorf("Error updating database cpu auto_scaling group: %s", err) + } + _, err = waitForDatabaseTaskComplete(task.Id, d, meta, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf( + "Error waiting for database (%s) cpu auto_scaling group update task to complete: %s", icdId, err) + } + + } + if diskRecord, ok := d.GetOk("auto_scaling.0.disk"); ok { + params := icdv4.AutoscalingSetGroup{} + diskBody, err := expandICDAutoScalingGroup(d, diskRecord, "disk") + if err != nil { + return fmt.Errorf("Error in getting diskBody from expandICDAutoScalingGroup %s", err) + } + params.Autoscaling.Disk = &diskBody + task, err := icdClient.AutoScaling().SetAutoScaling(icdId, "member", params) + if err != nil { + return fmt.Errorf("Error updating database disk auto_scaling group: %s", err) + } + _, err = waitForDatabaseTaskComplete(task.Id, d, meta, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf( + "Error waiting for database (%s) disk auto_scaling group update task to complete: %s", icdId, err) + } + + } + if memoryRecord, ok := d.GetOk("auto_scaling.0.memory"); ok { + params := icdv4.AutoscalingSetGroup{} + memoryBody, err := expandICDAutoScalingGroup(d, memoryRecord, "memory") + if err != nil { + return fmt.Errorf("Error in getting memoryBody from expandICDAutoScalingGroup %s", err) + } + params.Autoscaling.Memory = &memoryBody + task, err := icdClient.AutoScaling().SetAutoScaling(icdId, "member", params) + if err != nil { + return fmt.Errorf("Error updating database memory auto_scaling group: %s", err) + } + _, err = waitForDatabaseTaskComplete(task.Id, d, meta, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf( + "Error waiting for database (%s) memory auto_scaling group update task to complete: %s", icdId, err) + } + + } + + if userlist, ok := d.GetOk("users"); ok { + users := expandUsers(userlist.(*schema.Set)) + for _, user := range users { + userReq := icdv4.UserReq{ + User: icdv4.User{ + UserName: user.UserName, + Password: user.Password, + }, + } + task, err := icdClient.Users().CreateUser(icdId, userReq) + if err != nil { + return fmt.Errorf("Error updating database user (%s) entry: %s", user.UserName, err) + } + _, err = waitForDatabaseTaskComplete(task.Id, d, meta, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf( + "Error waiting for update of database (%s) user (%s) create task to complete: %s", icdId, user.UserName, err) + } + } + } + + return resourceIBMDatabaseInstanceRead(d, meta) +} + +func resourceIBMDatabaseInstanceRead(d *schema.ResourceData, meta interface{}) error { + rsConClient, err := meta.(ClientSession).ResourceControllerV2API() + if err != nil { + return err + } + + instanceID := d.Id() + connectionEndpoint := "public" + rsInst := rc.GetResourceInstanceOptions{ + ID: &instanceID, + } + instance, response, err := rsConClient.GetResourceInstance(&rsInst) + if err != nil { + if strings.Contains(err.Error(), "Object not found") || + strings.Contains(err.Error(), "status code: 404") { + log.Printf("[WARN] Removing record from state because it's not found via the API") + d.SetId("") + return nil + } + return fmt.Errorf("Error retrieving resource instance: %s %s", err, response) + } + if strings.Contains(*instance.State, "removed") { + log.Printf("[WARN] Removing instance from TF state because it's now in removed state") + d.SetId("") + return nil + } + + tags, err := GetTagsUsingCRN(meta, *instance.CRN) + if err != nil { + log.Printf( + "Error on get of ibm Database tags (%s) tags: %s", d.Id(), err) + } + d.Set("tags", tags) + d.Set("name", *instance.Name) + d.Set("status", *instance.State) + d.Set("resource_group_id", *instance.ResourceGroupID) + if instance.CRN != nil { + location := strings.Split(*instance.CRN, ":") + if len(location) > 5 { + d.Set("location", location[5]) + } + } + d.Set("guid", *instance.GUID) + + if instance.Parameters != nil { + if endpoint, ok := instance.Parameters["service-endpoints"]; ok { + if endpoint == "private" { + connectionEndpoint = "private" + } + d.Set("service_endpoints", endpoint) + } + + } + + d.Set(ResourceName, *instance.Name) + d.Set(ResourceCRN, *instance.CRN) + d.Set(ResourceStatus, *instance.State) + d.Set(ResourceGroupName, *instance.ResourceGroupCRN) + + rcontroller, err := getBaseController(meta) + if err != nil { + return err + } + d.Set(ResourceControllerURL, rcontroller+"/services/"+url.QueryEscape(*instance.CRN)) + + rsCatClient, err := meta.(ClientSession).ResourceCatalogAPI() + if err != nil { + return err + } + rsCatRepo := rsCatClient.ResourceCatalog() + + serviceOff, err := rsCatRepo.GetServiceName(*instance.ResourceID) + if err != nil { + return fmt.Errorf("Error retrieving service offering: %s", err) + } + + d.Set("service", serviceOff) + + servicePlan, err := rsCatRepo.GetServicePlanName(*instance.ResourcePlanID) + if err != nil { + return fmt.Errorf("Error retrieving plan: %s", err) + } + d.Set("plan", servicePlan) + + icdClient, err := meta.(ClientSession).ICDAPI() + if err != nil { + return fmt.Errorf("Error getting database client settings: %s", err) + } + + icdId := EscapeUrlParm(instanceID) + cdb, err := icdClient.Cdbs().GetCdb(icdId) + if err != nil { + if apiErr, ok := err.(bmxerror.RequestFailure); ok && apiErr.StatusCode() == 404 { + return fmt.Errorf("The database instance was not found in the region set for the Provider. Specify the correct region in the provider definition. %v", err) + } + return fmt.Errorf("Error getting database config for: %s with error %s\n", icdId, err) + } + d.Set("adminuser", cdb.AdminUser) + d.Set("version", cdb.Version) + + groupList, err := icdClient.Groups().GetGroups(icdId) + if err != nil { + return fmt.Errorf("Error getting database groups: %s", err) + } + d.Set("groups", flattenIcdGroups(groupList)) + d.Set("node_count", groupList.Groups[0].Members.AllocationCount) + + d.Set("members_memory_allocation_mb", groupList.Groups[0].Memory.AllocationMb) + d.Set("node_memory_allocation_mb", groupList.Groups[0].Memory.AllocationMb/groupList.Groups[0].Members.AllocationCount) + + d.Set("members_disk_allocation_mb", groupList.Groups[0].Disk.AllocationMb) + d.Set("node_disk_allocation_mb", groupList.Groups[0].Disk.AllocationMb/groupList.Groups[0].Members.AllocationCount) + + d.Set("members_cpu_allocation_count", groupList.Groups[0].Cpu.AllocationCount) + d.Set("node_cpu_allocation_count", groupList.Groups[0].Cpu.AllocationCount/groupList.Groups[0].Members.AllocationCount) + + autoSclaingGroup, err := icdClient.AutoScaling().GetAutoScaling(icdId, "member") + if err != nil { + return fmt.Errorf("Error getting database groups: %s", err) + } + d.Set("auto_scaling", flattenICDAutoScalingGroup(autoSclaingGroup)) + + whitelist, err := icdClient.Whitelists().GetWhitelist(icdId) + if err != nil { + return fmt.Errorf("Error getting database whitelist: %s", err) + } + d.Set("whitelist", flattenWhitelist(whitelist)) + + var connectionStrings []CsEntry + //ICD does not implement a GetUsers API. Users populated from tf configuration. + tfusers := d.Get("users").(*schema.Set) + users := expandUsers(tfusers) + user := icdv4.User{ + UserName: cdb.AdminUser, + } + users = append(users, user) + for _, user := range users { + userName := user.UserName + csEntry, err := getConnectionString(d, userName, connectionEndpoint, meta) + if err != nil { + return fmt.Errorf("Error getting user connection string for user (%s): %s", userName, err) + } + connectionStrings = append(connectionStrings, csEntry) + } + d.Set("connectionstrings", flattenConnectionStrings(connectionStrings)) + + return nil +} + +func resourceIBMDatabaseInstanceUpdate(d *schema.ResourceData, meta interface{}) error { + rsConClient, err := meta.(ClientSession).ResourceControllerV2API() + if err != nil { + return err + } + + instanceID := d.Id() + updateReq := rc.UpdateResourceInstanceOptions{ + ID: &instanceID, + } + update := false + if d.HasChange("name") { + name := d.Get("name").(string) + updateReq.Name = &name + update = true + } + if d.HasChange("service_endpoints") { + params := Params{} + params.ServiceEndpoints = d.Get("service_endpoints").(string) + parameters, _ := json.Marshal(params) + var raw map[string]interface{} + json.Unmarshal(parameters, &raw) + updateReq.Parameters = raw + update = true + } + + if update { + _, response, err := rsConClient.UpdateResourceInstance(&updateReq) + if err != nil { + return fmt.Errorf("Error updating resource instance: %s %s", err, response) + } + + _, err = waitForDatabaseInstanceUpdate(d, meta) + if err != nil { + return fmt.Errorf( + "Error waiting for update of resource instance (%s) to complete: %s", d.Id(), err) + } + + } + + if d.HasChange("tags") { + + oldList, newList := d.GetChange("tags") + err = UpdateTagsUsingCRN(oldList, newList, meta, instanceID) + if err != nil { + log.Printf( + "Error on update of Database (%s) tags: %s", d.Id(), err) + } + } + + icdClient, err := meta.(ClientSession).ICDAPI() + if err != nil { + return fmt.Errorf("Error getting database client settings: %s", err) + } + icdId := EscapeUrlParm(instanceID) + + if d.HasChange("node_count") { + err = horizontalScale(d, meta, icdClient) + if err != nil { + return err + } + } + + if d.HasChange("members_memory_allocation_mb") || d.HasChange("members_disk_allocation_mb") || d.HasChange("members_cpu_allocation_count") || d.HasChange("node_memory_allocation_mb") || d.HasChange("node_disk_allocation_mb") || d.HasChange("node_cpu_allocation_count") { + params := icdv4.GroupReq{} + if d.HasChange("members_memory_allocation_mb") { + memory := d.Get("members_memory_allocation_mb").(int) + memoryReq := icdv4.MemoryReq{AllocationMb: memory} + params.GroupBdy.Memory = &memoryReq + } + if d.HasChange("node_memory_allocation_mb") || d.HasChange("node_count") { + memory := d.Get("node_memory_allocation_mb").(int) + count := d.Get("node_count").(int) + memoryReq := icdv4.MemoryReq{AllocationMb: memory * count} + params.GroupBdy.Memory = &memoryReq + } + if d.HasChange("members_disk_allocation_mb") { + disk := d.Get("members_disk_allocation_mb").(int) + diskReq := icdv4.DiskReq{AllocationMb: disk} + params.GroupBdy.Disk = &diskReq + } + if d.HasChange("node_disk_allocation_mb") || d.HasChange("node_count") { + disk := d.Get("node_disk_allocation_mb").(int) + count := d.Get("node_count").(int) + diskReq := icdv4.DiskReq{AllocationMb: disk * count} + params.GroupBdy.Disk = &diskReq + } + if d.HasChange("members_cpu_allocation_count") { + cpu := d.Get("members_cpu_allocation_count").(int) + cpuReq := icdv4.CpuReq{AllocationCount: cpu} + params.GroupBdy.Cpu = &cpuReq + } + if d.HasChange("node_cpu_allocation_mb") || d.HasChange("node_count") { + cpu := d.Get("node_cpu_allocation_count").(int) + count := d.Get("node_count").(int) + CpuReq := icdv4.CpuReq{AllocationCount: cpu * count} + params.GroupBdy.Cpu = &CpuReq + } + task, err := icdClient.Groups().UpdateGroup(icdId, "member", params) + if err != nil { + return fmt.Errorf("Error updating database scaling group: %s", err) + } + _, err = waitForDatabaseTaskComplete(task.Id, d, meta, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return fmt.Errorf( + "Error waiting for database (%s) scaling group update task to complete: %s", icdId, err) + } + } + + if d.HasChange("auto_scaling.0.cpu") { + cpuRecord := d.Get("auto_scaling.0.cpu") + params := icdv4.AutoscalingSetGroup{} + cpuBody, err := expandICDAutoScalingGroup(d, cpuRecord, "cpu") + if err != nil { + return fmt.Errorf("Error in getting cpuBody from expandICDAutoScalingGroup %s", err) + } + params.Autoscaling.CPU = &cpuBody + task, err := icdClient.AutoScaling().SetAutoScaling(icdId, "member", params) + if err != nil { + return fmt.Errorf("Error updating database cpu auto_scaling group: %s", err) + } + _, err = waitForDatabaseTaskComplete(task.Id, d, meta, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return fmt.Errorf( + "Error waiting for database (%s) cpu auto_scaling group update task to complete: %s", icdId, err) + } + + } + if d.HasChange("auto_scaling.0.disk") { + diskRecord := d.Get("auto_scaling.0.disk") + params := icdv4.AutoscalingSetGroup{} + diskBody, err := expandICDAutoScalingGroup(d, diskRecord, "disk") + if err != nil { + return fmt.Errorf("Error in getting diskBody from expandICDAutoScalingGroup %s", err) + } + params.Autoscaling.Disk = &diskBody + task, err := icdClient.AutoScaling().SetAutoScaling(icdId, "member", params) + if err != nil { + return fmt.Errorf("Error updating database disk auto_scaling group: %s", err) + } + _, err = waitForDatabaseTaskComplete(task.Id, d, meta, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return fmt.Errorf( + "Error waiting for database (%s) disk auto_scaling group update task to complete: %s", icdId, err) + } + + } + if d.HasChange("auto_scaling.0.memory") { + memoryRecord := d.Get("auto_scaling.0.memory") + params := icdv4.AutoscalingSetGroup{} + memoryBody, err := expandICDAutoScalingGroup(d, memoryRecord, "memory") + if err != nil { + return fmt.Errorf("Error in getting memoryBody from expandICDAutoScalingGroup %s", err) + } + params.Autoscaling.Memory = &memoryBody + task, err := icdClient.AutoScaling().SetAutoScaling(icdId, "member", params) + if err != nil { + return fmt.Errorf("Error updating database memory auto_scaling group: %s", err) + } + _, err = waitForDatabaseTaskComplete(task.Id, d, meta, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return fmt.Errorf( + "Error waiting for database (%s) memory auto_scaling group update task to complete: %s", icdId, err) + } + + } + + if d.HasChange("adminpassword") { + adminUser := d.Get("adminuser").(string) + password := d.Get("adminpassword").(string) + userParams := icdv4.UserReq{ + User: icdv4.User{ + Password: password, + }, + } + task, err := icdClient.Users().UpdateUser(icdId, adminUser, userParams) + if err != nil { + return fmt.Errorf("Error updating database admin password: %s", err) + } + _, err = waitForDatabaseTaskComplete(task.Id, d, meta, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return fmt.Errorf( + "Error waiting for database (%s) admin password update task to complete: %s", icdId, err) + } + } + + if d.HasChange("whitelist") { + oldList, newList := d.GetChange("whitelist") + if oldList == nil { + oldList = new(schema.Set) + } + if newList == nil { + newList = new(schema.Set) + } + os := oldList.(*schema.Set) + ns := newList.(*schema.Set) + remove := os.Difference(ns).List() + add := ns.Difference(os).List() + + if len(add) > 0 { + for _, entry := range add { + newEntry := entry.(map[string]interface{}) + wlEntry := icdv4.WhitelistEntry{ + Address: newEntry["address"].(string), + Description: newEntry["description"].(string), + } + whitelistReq := icdv4.WhitelistReq{ + WhitelistEntry: wlEntry, + } + task, err := icdClient.Whitelists().CreateWhitelist(icdId, whitelistReq) + if err != nil { + return fmt.Errorf("Error updating database whitelist entry %v : %s", wlEntry.Address, err) + } + _, err = waitForDatabaseTaskComplete(task.Id, d, meta, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return fmt.Errorf( + "Error waiting for database (%s) whitelist create task to complete for entry %s : %s", icdId, wlEntry.Address, err) + } + + } + + } + + if len(remove) > 0 { + for _, entry := range remove { + newEntry := entry.(map[string]interface{}) + wlEntry := icdv4.WhitelistEntry{ + Address: newEntry["address"].(string), + Description: newEntry["description"].(string), + } + ipAddress := wlEntry.Address + task, err := icdClient.Whitelists().DeleteWhitelist(icdId, ipAddress) + if err != nil { + return fmt.Errorf("Error deleting database whitelist entry: %s", err) + } + _, err = waitForDatabaseTaskComplete(task.Id, d, meta, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return fmt.Errorf( + "Error waiting for database (%s) whitelist delete task to complete for ipAddress %s : %s", icdId, ipAddress, err) + } + + } + } + } + + if d.HasChange("users") { + oldList, newList := d.GetChange("users") + if oldList == nil { + oldList = new(schema.Set) + } + if newList == nil { + newList = new(schema.Set) + } + os := oldList.(*schema.Set) + ns := newList.(*schema.Set) + remove := os.Difference(ns).List() + add := ns.Difference(os).List() + + if len(add) > 0 { + for _, entry := range add { + newEntry := entry.(map[string]interface{}) + userEntry := icdv4.User{ + UserName: newEntry["name"].(string), + Password: newEntry["password"].(string), + } + userReq := icdv4.UserReq{ + User: userEntry, + } + task, err := icdClient.Users().CreateUser(icdId, userReq) + if err != nil { + // ICD does not report if error was due to user already being defined. Check if can + // successfully update password by itself. + userParams := icdv4.UserReq{ + User: icdv4.User{ + Password: newEntry["password"].(string), + }, + } + task, err := icdClient.Users().UpdateUser(icdId, newEntry["name"].(string), userParams) + if err != nil { + return fmt.Errorf("Error updating database user (%s) password: %s", newEntry["name"].(string), err) + } + _, err = waitForDatabaseTaskComplete(task.Id, d, meta, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return fmt.Errorf( + "Error waiting for database (%s) user (%s) password update task to complete: %s", icdId, newEntry["name"].(string), err) + } + } else { + _, err = waitForDatabaseTaskComplete(task.Id, d, meta, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return fmt.Errorf( + "Error waiting for database (%s) user (%s) create task to complete: %s", icdId, newEntry["name"].(string), err) + } + } + } + + } + + if len(remove) > 0 { + for _, entry := range remove { + newEntry := entry.(map[string]interface{}) + userEntry := icdv4.User{ + UserName: newEntry["name"].(string), + Password: newEntry["password"].(string), + } + user := userEntry.UserName + task, err := icdClient.Users().DeleteUser(icdId, user) + if err != nil { + return fmt.Errorf("Error deleting database user (%s) entry: %s", user, err) + } + _, err = waitForDatabaseTaskComplete(task.Id, d, meta, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return fmt.Errorf( + "Error waiting for database (%s) user (%s) delete task to complete: %s", icdId, user, err) + } + } + } + } + + return resourceIBMDatabaseInstanceRead(d, meta) +} + +func horizontalScale(d *schema.ResourceData, meta interface{}, icdClient icdv4.ICDServiceAPI) error { + params := icdv4.GroupReq{} + + icdId := EscapeUrlParm(d.Id()) + + members := d.Get("node_count").(int) + membersReq := icdv4.MembersReq{AllocationCount: members} + params.GroupBdy.Members = &membersReq + + //task, err := icdClient.Groups().UpdateGroup(icdId, "member", params) + _, err := icdClient.Groups().UpdateGroup(icdId, "member", params) + + if err != nil { + return fmt.Errorf("Error updating database scaling group: %s", err) + } + + //_, err = waitForDatabaseTaskCompleteDuration(task.Id, d, meta, d.Timeout(schema.TimeoutUpdate)) + //if err != nil { + // return fmt.Errorf( + // "Error waiting for database (%s) scaling group update task to complete: %s", icdId, err) + //} + + _, err = waitForDatabaseInstanceUpdate(d, meta) + if err != nil { + return fmt.Errorf( + "Error waiting for database (%s) horizontal scale to complete: %s", d.Id(), err) + } + + return nil +} + +func getConnectionString(d *schema.ResourceData, userName, connectionEndpoint string, meta interface{}) (CsEntry, error) { + csEntry := CsEntry{} + icdClient, err := meta.(ClientSession).ICDAPI() + if err != nil { + return csEntry, fmt.Errorf("Error getting database client settings: %s", err) + } + + icdId := d.Id() + connection, err := icdClient.Connections().GetConnection(icdId, userName, connectionEndpoint) + if err != nil { + return csEntry, fmt.Errorf("Error getting database user connection string via ICD API: %s", err) + } + + service := d.Get("service") + dbConnection := icdv4.Uri{} + switch service { + case "databases-for-postgresql": + dbConnection = connection.Postgres + case "databases-for-redis": + dbConnection = connection.Rediss + case "databases-for-mongodb": + dbConnection = connection.Mongo + // case "databases-for-mysql": + // dbConnection = connection.Mysql + case "databases-for-elasticsearch": + dbConnection = connection.Https + case "databases-for-etcd": + dbConnection = connection.Grpc + case "messages-for-rabbitmq": + dbConnection = connection.Amqps + default: + return csEntry, fmt.Errorf("Unrecognised database type during connection string lookup: %s", service) + } + + csEntry = CsEntry{ + Name: userName, + Password: "", + // Populate only first 'composed' connection string as an example + Composed: dbConnection.Composed[0], + CertName: dbConnection.Certificate.Name, + CertBase64: dbConnection.Certificate.CertificateBase64, + Hosts: dbConnection.Hosts, + Scheme: dbConnection.Scheme, + Path: dbConnection.Path, + QueryOptions: dbConnection.QueryOptions.(map[string]interface{}), + } + // Postgres DB name is of type string, Redis is json.Number, others are nil + if dbConnection.Database != nil { + switch v := dbConnection.Database.(type) { + default: + return csEntry, fmt.Errorf("Unexpected data type: %T", v) + case json.Number: + csEntry.Database = dbConnection.Database.(json.Number).String() + case string: + csEntry.Database = dbConnection.Database.(string) + } + } else { + csEntry.Database = "" + } + return csEntry, nil +} + +func resourceIBMDatabaseInstanceDelete(d *schema.ResourceData, meta interface{}) error { + rsConClient, err := meta.(ClientSession).ResourceControllerV2API() + if err != nil { + return err + } + id := d.Id() + recursive := true + deleteReq := rc.DeleteResourceInstanceOptions{ + Recursive: &recursive, + ID: &id, + } + response, err := rsConClient.DeleteResourceInstance(&deleteReq) + if err != nil { + // If prior delete occurs, instance is not immediately deleted, but remains in "removed" state" + // RC 410 with "Gone" returned as error + if strings.Contains(err.Error(), "Gone") || + strings.Contains(err.Error(), "status code: 410") { + log.Printf("[WARN] Resource instance already deleted %s\n ", err) + err = nil + } else { + return fmt.Errorf("Error deleting resource instance: %s %s ", err, response) + } + } + + _, err = waitForDatabaseInstanceDelete(d, meta) + if err != nil { + return fmt.Errorf( + "Error waiting for resource instance (%s) to be deleted: %s", d.Id(), err) + } + + d.SetId("") + + return nil +} +func resourceIBMDatabaseInstanceExists(d *schema.ResourceData, meta interface{}) (bool, error) { + rsConClient, err := meta.(ClientSession).ResourceControllerV2API() + if err != nil { + return false, err + } + instanceID := d.Id() + rsInst := rc.GetResourceInstanceOptions{ + ID: &instanceID, + } + instance, response, err := rsConClient.GetResourceInstance(&rsInst) + if err != nil { + if apiErr, ok := err.(bmxerror.RequestFailure); ok { + if apiErr.StatusCode() == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s %s", err, response) + } + if instance != nil && (strings.Contains(*instance.State, "removed") || strings.Contains(*instance.State, databaseInstanceReclamation)) { + log.Printf("[WARN] Removing instance from state because it's in removed or pending_reclamation state") + d.SetId("") + return false, nil + } + + return *instance.ID == instanceID, nil +} + +func waitForDatabaseInstanceCreate(d *schema.ResourceData, meta interface{}, instanceID string) (interface{}, error) { + rsConClient, err := meta.(ClientSession).ResourceControllerV2API() + if err != nil { + return false, err + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{databaseInstanceProgressStatus, databaseInstanceInactiveStatus, databaseInstanceProvisioningStatus}, + Target: []string{databaseInstanceSuccessStatus}, + Refresh: func() (interface{}, string, error) { + rsInst := rc.GetResourceInstanceOptions{ + ID: &instanceID, + } + instance, response, err := rsConClient.GetResourceInstance(&rsInst) + if err != nil { + if apiErr, ok := err.(bmxerror.RequestFailure); ok && apiErr.StatusCode() == 404 { + return nil, "", fmt.Errorf("The resource instance %s does not exist anymore: %v %s", d.Id(), err, response) + } + return nil, "", err + } + if *instance.State == databaseInstanceFailStatus { + return *instance, *instance.State, fmt.Errorf("The resource instance %s failed: %v %s", d.Id(), err, response) + } + return *instance, *instance.State, nil + }, + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func waitForDatabaseInstanceUpdate(d *schema.ResourceData, meta interface{}) (interface{}, error) { + rsConClient, err := meta.(ClientSession).ResourceControllerV2API() + if err != nil { + return false, err + } + instanceID := d.Id() + + stateConf := &resource.StateChangeConf{ + Pending: []string{databaseInstanceProgressStatus, databaseInstanceInactiveStatus}, + Target: []string{databaseInstanceSuccessStatus}, + Refresh: func() (interface{}, string, error) { + rsInst := rc.GetResourceInstanceOptions{ + ID: &instanceID, + } + instance, response, err := rsConClient.GetResourceInstance(&rsInst) + if err != nil { + if apiErr, ok := err.(bmxerror.RequestFailure); ok && apiErr.StatusCode() == 404 { + return nil, "", fmt.Errorf("The resource instance %s does not exist anymore: %v %s", d.Id(), err, response) + } + return nil, "", err + } + if *instance.State == databaseInstanceFailStatus { + return *instance, *instance.State, fmt.Errorf("The resource instance %s failed: %v %s", d.Id(), err, response) + } + return *instance, *instance.State, nil + }, + Timeout: d.Timeout(schema.TimeoutUpdate), + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func waitForDatabaseTaskComplete(taskId string, d *schema.ResourceData, meta interface{}, t time.Duration) (bool, error) { + icdClient, err := meta.(ClientSession).ICDAPI() + if err != nil { + return false, fmt.Errorf("Error getting database client settings: %s", err) + } + delayDuration := 5 * time.Second + + timeout := time.After(t) + delay := time.Tick(delayDuration) + innerTask := icdv4.Task{} + + for { + select { + case <-timeout: + return false, fmt.Errorf("[Error] Time out waiting for database task to complete") + case <-delay: + innerTask, err = icdClient.Tasks().GetTask(EscapeUrlParm(taskId)) + if err != nil { + return false, fmt.Errorf("The ICD Get task on database update errored: %v", err) + } + if innerTask.Status == "failed" { + return false, fmt.Errorf("[Error] Database task failed") + } + // Completed status could be returned as "" due to interaction between bluemix-go and icd task response + // Otherwise Running an queued + if innerTask.Status == "completed" || innerTask.Status == "" { + return true, nil + } + } + } +} + +func waitForDatabaseInstanceDelete(d *schema.ResourceData, meta interface{}) (interface{}, error) { + rsConClient, err := meta.(ClientSession).ResourceControllerV2API() + if err != nil { + return false, err + } + instanceID := d.Id() + stateConf := &resource.StateChangeConf{ + Pending: []string{databaseInstanceProgressStatus, databaseInstanceInactiveStatus, databaseInstanceSuccessStatus}, + Target: []string{databaseInstanceRemovedStatus, databaseInstanceReclamation}, + Refresh: func() (interface{}, string, error) { + rsInst := rc.GetResourceInstanceOptions{ + ID: &instanceID, + } + instance, response, err := rsConClient.GetResourceInstance(&rsInst) + if err != nil { + if apiErr, ok := err.(bmxerror.RequestFailure); ok && apiErr.StatusCode() == 404 { + return instance, databaseInstanceSuccessStatus, nil + } + return nil, "", err + } + if *instance.State == databaseInstanceFailStatus { + return instance, *instance.State, fmt.Errorf("The resource instance %s failed to delete: %v %s", d.Id(), err, response) + } + return *instance, *instance.State, nil + }, + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func filterDatabaseDeployments(deployments []models.ServiceDeployment, location string) ([]models.ServiceDeployment, map[string]bool) { + supportedDeployments := []models.ServiceDeployment{} + supportedLocations := make(map[string]bool) + for _, d := range deployments { + if d.Metadata.RCCompatible { + deploymentLocation := d.Metadata.Deployment.Location + supportedLocations[deploymentLocation] = true + if deploymentLocation == location { + supportedDeployments = append(supportedDeployments, d) + } + } + } + return supportedDeployments, supportedLocations +} + +func expandICDAutoScalingGroup(d *schema.ResourceData, asRecord interface{}, asType string) (asgBody icdv4.ASGBody, err error) { + + asgRecord := asRecord.([]interface{})[0].(map[string]interface{}) + asgCapacity := icdv4.CapacityBody{} + if _, ok := asgRecord["capacity_enabled"]; ok { + asgCapacity.Enabled = asgRecord["capacity_enabled"].(bool) + asgBody.Scalers.Capacity = &asgCapacity + } + if _, ok := asgRecord["free_space_less_than_percent"]; ok { + asgCapacity.FreeSpaceLessThanPercent = asgRecord["free_space_less_than_percent"].(int) + asgBody.Scalers.Capacity = &asgCapacity + } + + // IO Payload + asgIO := icdv4.IOBody{} + if _, ok := asgRecord["io_enabled"]; ok { + asgIO.Enabled = asgRecord["io_enabled"].(bool) + asgBody.Scalers.IO = &asgIO + } + if _, ok := asgRecord["io_over_period"]; ok { + asgIO.OverPeriod = asgRecord["io_over_period"].(string) + asgBody.Scalers.IO = &asgIO + } + if _, ok := asgRecord["io_above_percent"]; ok { + asgIO.AbovePercent = asgRecord["io_above_percent"].(int) + asgBody.Scalers.IO = &asgIO + } + + // Rate Payload + asgRate := icdv4.RateBody{} + if _, ok := asgRecord["rate_increase_percent"]; ok { + asgRate.IncreasePercent = asgRecord["rate_increase_percent"].(int) + asgBody.Rate = asgRate + } + if _, ok := asgRecord["rate_period_seconds"]; ok { + asgRate.PeriodSeconds = asgRecord["rate_period_seconds"].(int) + asgBody.Rate = asgRate + } + if _, ok := asgRecord["rate_limit_mb_per_member"]; ok { + asgRate.LimitMBPerMember = asgRecord["rate_limit_mb_per_member"].(int) + asgBody.Rate = asgRate + } + if _, ok := asgRecord["rate_limit_count_per_member"]; ok { + asgRate.LimitCountPerMember = asgRecord["rate_limit_count_per_member"].(int) + asgBody.Rate = asgRate + } + if _, ok := asgRecord["rate_units"]; ok { + asgRate.Units = asgRecord["rate_units"].(string) + asgBody.Rate = asgRate + } + + return asgBody, nil +} + +func flattenICDAutoScalingGroup(autoScalingGroup icdv4.AutoscalingGetGroup) []map[string]interface{} { + result := make([]map[string]interface{}, 0) + + memorys := make([]map[string]interface{}, 0) + memory := make(map[string]interface{}) + + if autoScalingGroup.Autoscaling.Memory.Scalers.IO != nil { + memoryIO := *autoScalingGroup.Autoscaling.Memory.Scalers.IO + memory["io_enabled"] = memoryIO.Enabled + memory["io_over_period"] = memoryIO.OverPeriod + memory["io_above_percent"] = memoryIO.AbovePercent + } + if &autoScalingGroup.Autoscaling.Memory.Rate != nil { + ip, _ := autoScalingGroup.Autoscaling.Memory.Rate.IncreasePercent.Float64() + memory["rate_increase_percent"] = int(ip) + memory["rate_period_seconds"] = autoScalingGroup.Autoscaling.Memory.Rate.PeriodSeconds + lmp, _ := autoScalingGroup.Autoscaling.Memory.Rate.LimitMBPerMember.Float64() + memory["rate_limit_mb_per_member"] = int(lmp) + memory["rate_units"] = autoScalingGroup.Autoscaling.Memory.Rate.Units + } + memorys = append(memorys, memory) + + cpus := make([]map[string]interface{}, 0) + cpu := make(map[string]interface{}) + + if &autoScalingGroup.Autoscaling.CPU.Rate != nil { + + ip, _ := autoScalingGroup.Autoscaling.CPU.Rate.IncreasePercent.Float64() + cpu["rate_increase_percent"] = int(ip) + cpu["rate_period_seconds"] = autoScalingGroup.Autoscaling.CPU.Rate.PeriodSeconds + cpu["rate_limit_count_per_member"] = autoScalingGroup.Autoscaling.CPU.Rate.LimitCountPerMember + cpu["rate_units"] = autoScalingGroup.Autoscaling.CPU.Rate.Units + } + cpus = append(cpus, cpu) + + disks := make([]map[string]interface{}, 0) + disk := make(map[string]interface{}) + if autoScalingGroup.Autoscaling.Disk.Scalers.Capacity != nil { + diskCapacity := *autoScalingGroup.Autoscaling.Disk.Scalers.Capacity + disk["capacity_enabled"] = diskCapacity.Enabled + disk["free_space_less_than_percent"] = diskCapacity.FreeSpaceLessThanPercent + } + if autoScalingGroup.Autoscaling.Disk.Scalers.IO != nil { + diskIO := *autoScalingGroup.Autoscaling.Disk.Scalers.IO + disk["io_enabled"] = diskIO.Enabled + disk["io_over_period"] = diskIO.OverPeriod + disk["io_above_percent"] = diskIO.AbovePercent + } + if &autoScalingGroup.Autoscaling.Disk.Rate != nil { + + ip, _ := autoScalingGroup.Autoscaling.Disk.Rate.IncreasePercent.Float64() + disk["rate_increase_percent"] = int(ip) + disk["rate_period_seconds"] = autoScalingGroup.Autoscaling.Disk.Rate.PeriodSeconds + lpm, _ := autoScalingGroup.Autoscaling.Disk.Rate.LimitMBPerMember.Float64() + disk["rate_limit_mb_per_member"] = int(lpm) + disk["rate_units"] = autoScalingGroup.Autoscaling.Disk.Rate.Units + } + + disks = append(disks, disk) + as := map[string]interface{}{ + "memory": memorys, + "cpu": cpus, + "disk": disks, + } + result = append(result, as) + return result +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_dl_gateway.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_dl_gateway.go new file mode 100644 index 00000000000..035c8a70713 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_dl_gateway.go @@ -0,0 +1,927 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "os" + "strings" + "time" + + "github.com/IBM/networking-go-sdk/directlinkv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + dlLoaRejectReason = "loa_reject_reason" + dlCustomerName = "customer_name" + dlCarrierName = "carrier_name" + dlResourceGroup = "resource_group" + dlBgpAsn = "bgp_asn" + dlBgpBaseCidr = "bgp_base_cidr" + dlBgpCerCidr = "bgp_cer_cidr" + dlBgpIbmCidr = "bgp_ibm_cidr" + dlCrossConnectRouter = "cross_connect_router" + dlGlobal = "global" + dlLocationName = "location_name" + dlName = "name" + dlSpeedMbps = "speed_mbps" + dlOperationalStatus = "operational_status" + dlBgpStatus = "bgp_status" + dlLinkStatus = "link_status" + dlType = "type" + dlCrn = "crn" + dlCreatedAt = "created_at" + dlMetered = "metered" + dlLocationDisplayName = "location_display_name" + dlBgpIbmAsn = "bgp_ibm_asn" + dlCompletionNoticeRejectReason = "completion_notice_reject_reason" + dlPort = "port" + dlProviderAPIManaged = "provider_api_managed" + dlVlan = "vlan" + dlTags = "tags" + dlActive = "active" + dlFallbackCak = "fallback_cak" + dlPrimaryCak = "primary_cak" + dlSakExpiryTime = "sak_expiry_time" + dlWindowSize = "window_size" + dlMacSecConfig = "macsec_config" + dlCipherSuite = "cipher_suite" + dlConfidentialityOffset = "confidentiality_offset" + dlCryptographicAlgorithm = "cryptographic_algorithm" + dlKeyServerPriority = "key_server_priority" + dlMacSecConfigStatus = "status" + dlChangeRequest = "change_request" + dlGatewayProvisioning = "configuring" + dlGatewayProvisioningDone = "provisioned" + dlGatewayProvisioningRejected = "create_rejected" +) + +func resourceIBMDLGateway() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMdlGatewayCreate, + Read: resourceIBMdlGatewayRead, + Delete: resourceIBMdlGatewayDelete, + Exists: resourceIBMdlGatewayExists, + Update: resourceIBMdlGatewayUpdate, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(60 * time.Minute), + Delete: schema.DefaultTimeout(60 * time.Minute), + Update: schema.DefaultTimeout(60 * time.Minute), + }, + + CustomizeDiff: customdiff.Sequence( + func(diff *schema.ResourceDiff, v interface{}) error { + return resourceTagsCustomizeDiff(diff) + }, + ), + + Schema: map[string]*schema.Schema{ + dlBgpAsn: { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: "BGP ASN", + }, + dlBgpBaseCidr: { + Type: schema.TypeString, + Optional: true, + ForceNew: false, + DiffSuppressFunc: applyOnce, + Description: "BGP base CIDR", + }, + dlPort: { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + Description: "Gateway port", + ConflictsWith: []string{"location_name", "cross_connect_router", "carrier_name", "customer_name"}, + }, + + dlCrossConnectRouter: { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Cross connect router", + }, + dlGlobal: { + Type: schema.TypeBool, + Required: true, + ForceNew: false, + Description: "Gateways with global routing (true) can connect to networks outside their associated region", + }, + dlLocationName: { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + Description: "Gateway location", + }, + dlMetered: { + Type: schema.TypeBool, + Required: true, + ForceNew: false, + Description: "Metered billing option", + }, + dlName: { + Type: schema.TypeString, + Required: true, + ForceNew: false, + Description: "The unique user-defined name for this gateway", + ValidateFunc: InvokeValidator("ibm_dl_gateway", dlName), + // ValidateFunc: validateRegexpLen(1, 63, "^([a-zA-Z]|[a-zA-Z][-_a-zA-Z0-9]*[a-zA-Z0-9])$"), + }, + dlCarrierName: { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Carrier name", + // ValidateFunc: validateRegexpLen(1, 128, "^[a-z][A-Z][0-9][ -_]$"), + }, + dlCustomerName: { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Customer name", + // ValidateFunc: validateRegexpLen(1, 128, "^[a-z][A-Z][0-9][ -_]$"), + }, + dlSpeedMbps: { + Type: schema.TypeInt, + Required: true, + ForceNew: false, + Description: "Gateway speed in megabits per second", + }, + dlType: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Gateway type", + ValidateFunc: InvokeValidator("ibm_dl_gateway", dlType), + // ValidateFunc: validateAllowedStringValue([]string{"dedicated", "connect"}), + }, + dlMacSecConfig: { + Type: schema.TypeList, + MinItems: 0, + MaxItems: 1, + Optional: true, + ForceNew: false, + Description: "MACsec configuration information", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + dlActive: { + Type: schema.TypeBool, + Required: true, + ForceNew: false, + Description: "Indicate whether MACsec protection should be active (true) or inactive (false) for this MACsec enabled gateway", + }, + dlPrimaryCak: { + Type: schema.TypeString, + Required: true, + ForceNew: false, + Description: "Desired primary connectivity association key. Keys for a MACsec configuration must have names with an even number of characters from [0-9a-fA-F]", + }, + dlFallbackCak: { + Type: schema.TypeString, + Optional: true, + ForceNew: false, + Description: "Fallback connectivity association key. Keys used for MACsec configuration must have names with an even number of characters from [0-9a-fA-F]", + }, + dlWindowSize: { + Type: schema.TypeInt, + Optional: true, + ForceNew: false, + Default: 148809600, + Description: "Replay protection window size", + }, + dlActiveCak: { + Type: schema.TypeString, + Computed: true, + Description: "Active connectivity association key.", + }, + dlSakExpiryTime: { + Type: schema.TypeInt, + Computed: true, + Description: "Secure Association Key (SAK) expiry time in seconds", + }, + dlCipherSuite: { + Type: schema.TypeString, + Computed: true, + Description: "SAK cipher suite", + }, + dlConfidentialityOffset: { + Type: schema.TypeInt, + Computed: true, + Description: "Confidentiality Offset", + }, + dlCryptographicAlgorithm: { + Type: schema.TypeString, + Computed: true, + Description: "Cryptographic Algorithm", + }, + dlKeyServerPriority: { + Type: schema.TypeInt, + Computed: true, + Description: "Key Server Priority", + }, + dlMacSecConfigStatus: { + Type: schema.TypeString, + Computed: true, + Description: "The current status of MACsec on the device for this gateway", + }, + dlSecurityPolicy: { + Type: schema.TypeString, + Computed: true, + Description: "Packets without MACsec headers are not dropped when security_policy is should_secure.", + }, + }, + }, + }, + dlBgpCerCidr: { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: "BGP customer edge router CIDR", + }, + dlLoaRejectReason: { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: false, + Description: "Loa reject reason", + }, + dlBgpIbmCidr: { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: "BGP IBM CIDR", + }, + dlResourceGroup: { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: "Gateway resource group", + }, + + dlOperationalStatus: { + Type: schema.TypeString, + Computed: true, + Description: "Gateway operational status", + }, + dlProviderAPIManaged: { + Type: schema.TypeBool, + Computed: true, + Description: "Indicates whether gateway was created through a provider portal", + }, + dlVlan: { + Type: schema.TypeInt, + Computed: true, + Description: "VLAN allocated for this gateway", + }, + dlBgpIbmAsn: { + Type: schema.TypeInt, + Computed: true, + Description: "IBM BGP ASN", + }, + + dlBgpStatus: { + Type: schema.TypeString, + Computed: true, + Description: "Gateway BGP status", + }, + dlChangeRequest: { + Type: schema.TypeString, + Computed: true, + Description: "Changes pending approval for provider managed Direct Link Connect gateways", + }, + dlCompletionNoticeRejectReason: { + Type: schema.TypeString, + Computed: true, + Description: "Reason for completion notice rejection", + }, + dlCreatedAt: { + Type: schema.TypeString, + Computed: true, + Description: "The date and time resource was created", + }, + dlCrn: { + Type: schema.TypeString, + Computed: true, + Description: "The CRN (Cloud Resource Name) of this gateway", + }, + dlLinkStatus: { + Type: schema.TypeString, + Computed: true, + Description: "Gateway link status", + }, + dlLocationDisplayName: { + Type: schema.TypeString, + Computed: true, + Description: "Gateway location long name", + }, + dlTags: { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString, ValidateFunc: InvokeValidator("ibm_dl_gateway", "tag")}, + Set: resourceIBMVPCHash, + Description: "Tags for the direct link gateway", + }, + ResourceControllerURL: { + Type: schema.TypeString, + Computed: true, + Description: "The URL of the IBM Cloud dashboard that can be used to explore and view details about this instance", + }, + + ResourceName: { + Type: schema.TypeString, + Computed: true, + Description: "The name of the resource", + }, + + ResourceCRN: { + Type: schema.TypeString, + Computed: true, + Description: "The crn of the resource", + }, + + ResourceStatus: { + Type: schema.TypeString, + Computed: true, + Description: "The status of the resource", + }, + + ResourceGroupName: { + Type: schema.TypeString, + Computed: true, + Description: "The resource group name in which resource is provisioned", + }, + }, + } +} + +func resourceIBMDLGatewayValidator() *ResourceValidator { + + validateSchema := make([]ValidateSchema, 2) + dlTypeAllowedValues := "dedicated, connect" + + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: dlType, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: dlTypeAllowedValues}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: dlName, + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Required: true, + Regexp: `^([a-zA-Z]|[a-zA-Z][-_a-zA-Z0-9]*[a-zA-Z0-9])$`, + MinValueLength: 1, + MaxValueLength: 63}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: "tag", + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Optional: true, + Regexp: `^[A-Za-z0-9:_ .-]+$`, + MinValueLength: 1, + MaxValueLength: 128}) + + ibmISDLGatewayResourceValidator := ResourceValidator{ResourceName: "ibm_dl_gateway", Schema: validateSchema} + return &ibmISDLGatewayResourceValidator +} + +func directlinkClient(meta interface{}) (*directlinkv1.DirectLinkV1, error) { + sess, err := meta.(ClientSession).DirectlinkV1API() + return sess, err +} + +func resourceIBMdlGatewayCreate(d *schema.ResourceData, meta interface{}) error { + directLink, err := directlinkClient(meta) + if err != nil { + return err + } + dtype := d.Get(dlType).(string) + createGatewayOptionsModel := &directlinkv1.CreateGatewayOptions{} + name := d.Get(dlName).(string) + speed := int64(d.Get(dlSpeedMbps).(int)) + global := d.Get(dlGlobal).(bool) + bgpAsn := int64(d.Get(dlBgpAsn).(int)) + metered := d.Get(dlMetered).(bool) + + if dtype == "dedicated" { + var crossConnectRouter, carrierName, locationName, customerName string + if _, ok := d.GetOk(dlCarrierName); ok { + carrierName = d.Get(dlCarrierName).(string) + // gatewayTemplateModel.CarrierName = &carrierName + } else { + err = fmt.Errorf("Error creating gateway, %s is a required field", dlCarrierName) + log.Printf("%s is a required field", dlCarrierName) + return err + } + if _, ok := d.GetOk(dlCrossConnectRouter); ok { + crossConnectRouter = d.Get(dlCrossConnectRouter).(string) + // gatewayTemplateModel.CrossConnectRouter = &crossConnectRouter + } else { + err = fmt.Errorf("Error creating gateway, %s is a required field", dlCrossConnectRouter) + log.Printf("%s is a required field", dlCrossConnectRouter) + return err + } + if _, ok := d.GetOk(dlLocationName); ok { + locationName = d.Get(dlLocationName).(string) + //gatewayTemplateModel.LocationName = &locationName + } else { + err = fmt.Errorf("Error creating gateway, %s is a required field", dlLocationName) + log.Printf("%s is a required field", dlLocationName) + return err + } + if _, ok := d.GetOk(dlCustomerName); ok { + customerName = d.Get(dlCustomerName).(string) + //gatewayTemplateModel.CustomerName = &customerName + } else { + err = fmt.Errorf("Error creating gateway, %s is a required field", dlCustomerName) + log.Printf("%s is a required field", dlCustomerName) + return err + } + gatewayDedicatedTemplateModel, _ := directLink.NewGatewayTemplateGatewayTypeDedicatedTemplate(bgpAsn, global, metered, name, speed, dtype, carrierName, crossConnectRouter, customerName, locationName) + + if _, ok := d.GetOk(dlBgpIbmCidr); ok { + bgpIbmCidr := d.Get(dlBgpIbmCidr).(string) + gatewayDedicatedTemplateModel.BgpIbmCidr = &bgpIbmCidr + + } + if _, ok := d.GetOk(dlBgpCerCidr); ok { + bgpCerCidr := d.Get(dlBgpCerCidr).(string) + gatewayDedicatedTemplateModel.BgpCerCidr = &bgpCerCidr + + } + if _, ok := d.GetOk(dlResourceGroup); ok { + resourceGroup := d.Get(dlResourceGroup).(string) + gatewayDedicatedTemplateModel.ResourceGroup = &directlinkv1.ResourceGroupIdentity{ID: &resourceGroup} + + } + if _, ok := d.GetOk(dlBgpBaseCidr); ok { + bgpBaseCidr := d.Get(dlBgpBaseCidr).(string) + gatewayDedicatedTemplateModel.BgpBaseCidr = &bgpBaseCidr + } + if _, ok := d.GetOk(dlMacSecConfig); ok { + // Construct an instance of the GatewayMacsecConfigTemplate model + gatewayMacsecConfigTemplateModel := new(directlinkv1.GatewayMacsecConfigTemplate) + activebool := d.Get("macsec_config.0.active").(bool) + gatewayMacsecConfigTemplateModel.Active = &activebool + + // Construct an instance of the GatewayMacsecCak model + gatewayMacsecCakModel := new(directlinkv1.GatewayMacsecConfigTemplatePrimaryCak) + primaryCakstr := d.Get("macsec_config.0.primary_cak").(string) + gatewayMacsecCakModel.Crn = &primaryCakstr + gatewayMacsecConfigTemplateModel.PrimaryCak = gatewayMacsecCakModel + + if fallbackCak, ok := d.GetOk("macsec_config.0.fallback_cak"); ok { + // Construct an instance of the GatewayMacsecCak model + gatewayMacsecCakModel := new(directlinkv1.GatewayMacsecConfigTemplateFallbackCak) + fallbackCakstr := fallbackCak.(string) + gatewayMacsecCakModel.Crn = &fallbackCakstr + gatewayMacsecConfigTemplateModel.FallbackCak = gatewayMacsecCakModel + } + if windowSize, ok := d.GetOk("macsec_config.0.window_size"); ok { + windowSizeint := int64(windowSize.(int)) + gatewayMacsecConfigTemplateModel.WindowSize = &windowSizeint + } + gatewayDedicatedTemplateModel.MacsecConfig = gatewayMacsecConfigTemplateModel + } + createGatewayOptionsModel.GatewayTemplate = gatewayDedicatedTemplateModel + + } else if dtype == "connect" { + var portID string + if _, ok := d.GetOk(dlPort); ok { + portID = d.Get(dlPort).(string) + } + if portID != "" { + portIdentity, _ := directLink.NewGatewayPortIdentity(portID) + gatewayConnectTemplateModel, _ := directLink.NewGatewayTemplateGatewayTypeConnectTemplate(bgpAsn, global, metered, name, speed, dtype, portIdentity) + + if _, ok := d.GetOk(dlBgpIbmCidr); ok { + bgpIbmCidr := d.Get(dlBgpIbmCidr).(string) + gatewayConnectTemplateModel.BgpIbmCidr = &bgpIbmCidr + + } + if _, ok := d.GetOk(dlBgpBaseCidr); ok { + bgpBaseCidr := d.Get(dlBgpBaseCidr).(string) + gatewayConnectTemplateModel.BgpBaseCidr = &bgpBaseCidr + } + if _, ok := d.GetOk(dlBgpCerCidr); ok { + bgpCerCidr := d.Get(dlBgpCerCidr).(string) + gatewayConnectTemplateModel.BgpCerCidr = &bgpCerCidr + + } + if _, ok := d.GetOk(dlResourceGroup); ok { + resourceGroup := d.Get(dlResourceGroup).(string) + gatewayConnectTemplateModel.ResourceGroup = &directlinkv1.ResourceGroupIdentity{ID: &resourceGroup} + + } + createGatewayOptionsModel.GatewayTemplate = gatewayConnectTemplateModel + + } else { + err = fmt.Errorf("Error creating direct link connect gateway, %s is a required field", dlPort) + return err + } + } + + gateway, response, err := directLink.CreateGateway(createGatewayOptionsModel) + if err != nil { + return fmt.Errorf("[DEBUG] Create Direct Link Gateway (%s) err %s\n%s", dtype, err, response) + } + d.SetId(*gateway.ID) + + log.Printf("[INFO] Created Direct Link Gateway (%s Template) : %s", dtype, *gateway.ID) + if dtype == "connect" { + getPortOptions := directLink.NewGetPortOptions(*gateway.Port.ID) + port, response, err := directLink.GetPort(getPortOptions) + if err != nil { + return fmt.Errorf("[ERROR] Error getting port %s %s", response, err) + } + if port != nil && port.ProviderName != nil && !strings.Contains(strings.ToLower(*port.ProviderName), "netbond") && !strings.Contains(strings.ToLower(*port.ProviderName), "megaport") { + _, err = isWaitForDirectLinkAvailable(directLink, d.Id(), d.Timeout(schema.TimeoutCreate)) + if err != nil { + return err + } + } + + } + + v := os.Getenv("IC_ENV_TAGS") + if _, ok := d.GetOk(dlTags); ok || v != "" { + oldList, newList := d.GetChange(dlTags) + err = UpdateTagsUsingCRN(oldList, newList, meta, *gateway.Crn) + if err != nil { + log.Printf( + "Error on create of resource direct link gateway %s (%s) tags: %s", dtype, d.Id(), err) + } + } + + return resourceIBMdlGatewayRead(d, meta) +} + +func resourceIBMdlGatewayRead(d *schema.ResourceData, meta interface{}) error { + dtype := d.Get(dlType).(string) + log.Printf("[INFO] Inside resourceIBMdlGatewayRead: %s", dtype) + + directLink, err := directlinkClient(meta) + if err != nil { + return err + } + + ID := d.Id() + + getOptions := &directlinkv1.GetGatewayOptions{ + ID: &ID, + } + log.Printf("[INFO] Calling getgateway api: %s", dtype) + + instance, response, err := directLink.GetGateway(getOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Getting Direct Link Gateway (%s Template): %s\n%s", dtype, err, response) + } + if instance.Name != nil { + d.Set(dlName, *instance.Name) + } + if instance.Crn != nil { + d.Set(dlCrn, *instance.Crn) + } + if instance.BgpAsn != nil { + d.Set(dlBgpAsn, *instance.BgpAsn) + } + if instance.BgpIbmCidr != nil { + d.Set(dlBgpIbmCidr, *instance.BgpIbmCidr) + } + if instance.BgpIbmAsn != nil { + d.Set(dlBgpIbmAsn, *instance.BgpIbmAsn) + } + if instance.Metered != nil { + d.Set(dlMetered, *instance.Metered) + } + if instance.CrossConnectRouter != nil { + d.Set(dlCrossConnectRouter, *instance.CrossConnectRouter) + } + if instance.BgpBaseCidr != nil { + d.Set(dlBgpBaseCidr, *instance.BgpBaseCidr) + } + if instance.BgpCerCidr != nil { + d.Set(dlBgpCerCidr, *instance.BgpCerCidr) + } + if instance.ProviderApiManaged != nil { + d.Set(dlProviderAPIManaged, *instance.ProviderApiManaged) + } + if instance.Type != nil { + d.Set(dlType, *instance.Type) + } + if instance.SpeedMbps != nil { + d.Set(dlSpeedMbps, *instance.SpeedMbps) + } + if instance.OperationalStatus != nil { + d.Set(dlOperationalStatus, *instance.OperationalStatus) + } + if instance.BgpStatus != nil { + d.Set(dlBgpStatus, *instance.BgpStatus) + } + if instance.CompletionNoticeRejectReason != nil { + d.Set(dlCompletionNoticeRejectReason, *instance.CompletionNoticeRejectReason) + } + if instance.LocationName != nil { + d.Set(dlLocationName, *instance.LocationName) + } + if instance.LocationDisplayName != nil { + d.Set(dlLocationDisplayName, *instance.LocationDisplayName) + } + if instance.Vlan != nil { + d.Set(dlVlan, *instance.Vlan) + } + if instance.Global != nil { + d.Set(dlGlobal, *instance.Global) + } + if instance.Port != nil { + d.Set(dlPort, *instance.Port.ID) + } + if instance.LinkStatus != nil { + d.Set(dlLinkStatus, *instance.LinkStatus) + } + if instance.CreatedAt != nil { + d.Set(dlCreatedAt, instance.CreatedAt.String()) + } + if dtype == "dedicated" { + if instance.MacsecConfig != nil { + macsecList := make([]map[string]interface{}, 0) + currentMacSec := map[string]interface{}{} + // Construct an instance of the GatewayMacsecConfigTemplate model + gatewayMacsecConfigTemplateModel := instance.MacsecConfig + if gatewayMacsecConfigTemplateModel.Active != nil { + currentMacSec[dlActive] = *gatewayMacsecConfigTemplateModel.Active + } + if gatewayMacsecConfigTemplateModel.ActiveCak != nil { + if gatewayMacsecConfigTemplateModel.ActiveCak.Crn != nil { + currentMacSec[dlActiveCak] = *gatewayMacsecConfigTemplateModel.ActiveCak.Crn + } + } + if gatewayMacsecConfigTemplateModel.PrimaryCak != nil { + currentMacSec[dlPrimaryCak] = *gatewayMacsecConfigTemplateModel.PrimaryCak.Crn + } + if gatewayMacsecConfigTemplateModel.FallbackCak != nil { + if gatewayMacsecConfigTemplateModel.FallbackCak.Crn != nil { + currentMacSec[dlFallbackCak] = *gatewayMacsecConfigTemplateModel.FallbackCak.Crn + } + } + if gatewayMacsecConfigTemplateModel.SakExpiryTime != nil { + currentMacSec[dlSakExpiryTime] = *gatewayMacsecConfigTemplateModel.SakExpiryTime + } + if gatewayMacsecConfigTemplateModel.SecurityPolicy != nil { + currentMacSec[dlSecurityPolicy] = *gatewayMacsecConfigTemplateModel.SecurityPolicy + } + if gatewayMacsecConfigTemplateModel.WindowSize != nil { + currentMacSec[dlWindowSize] = *gatewayMacsecConfigTemplateModel.WindowSize + } + if gatewayMacsecConfigTemplateModel.CipherSuite != nil { + currentMacSec[dlCipherSuite] = *gatewayMacsecConfigTemplateModel.CipherSuite + } + if gatewayMacsecConfigTemplateModel.ConfidentialityOffset != nil { + currentMacSec[dlConfidentialityOffset] = *gatewayMacsecConfigTemplateModel.ConfidentialityOffset + } + if gatewayMacsecConfigTemplateModel.CryptographicAlgorithm != nil { + currentMacSec[dlCryptographicAlgorithm] = *gatewayMacsecConfigTemplateModel.CryptographicAlgorithm + } + if gatewayMacsecConfigTemplateModel.KeyServerPriority != nil { + currentMacSec[dlKeyServerPriority] = *gatewayMacsecConfigTemplateModel.KeyServerPriority + } + if gatewayMacsecConfigTemplateModel.Status != nil { + currentMacSec[dlMacSecConfigStatus] = *gatewayMacsecConfigTemplateModel.Status + } + macsecList = append(macsecList, currentMacSec) + d.Set(dlMacSecConfig, macsecList) + } + } + if instance.ChangeRequest != nil { + gatewayChangeRequestIntf := instance.ChangeRequest + gatewayChangeRequest := gatewayChangeRequestIntf.(*directlinkv1.GatewayChangeRequest) + d.Set(dlChangeRequest, *gatewayChangeRequest.Type) + } + tags, err := GetTagsUsingCRN(meta, *instance.Crn) + if err != nil { + log.Printf( + "Error on get of resource direct link gateway (%s) tags: %s", d.Id(), err) + } + d.Set(dlTags, tags) + controller, err := getBaseController(meta) + if err != nil { + return err + } + d.Set(ResourceControllerURL, controller+"/interconnectivity/direct-link") + d.Set(ResourceName, *instance.Name) + d.Set(ResourceCRN, *instance.Crn) + d.Set(ResourceStatus, *instance.OperationalStatus) + if instance.ResourceGroup != nil { + rg := instance.ResourceGroup + d.Set(dlResourceGroup, *rg.ID) + d.Set(ResourceGroupName, *rg.ID) + } + + return nil +} +func isWaitForDirectLinkAvailable(client *directlinkv1.DirectLinkV1, id string, timeout time.Duration) (interface{}, error) { + log.Printf("Waiting for direct link (%s) to be provisioned.", id) + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", dlGatewayProvisioning}, + Target: []string{dlGatewayProvisioningDone, ""}, + Refresh: isDirectLinkRefreshFunc(client, id), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + return stateConf.WaitForState() +} +func isDirectLinkRefreshFunc(client *directlinkv1.DirectLinkV1, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + getOptions := &directlinkv1.GetGatewayOptions{ + ID: &id, + } + instance, response, err := client.GetGateway(getOptions) + if err != nil { + return nil, "", fmt.Errorf("Error Getting Direct Link: %s\n%s", err, response) + } + if *instance.OperationalStatus == "provisioned" || *instance.OperationalStatus == "failed" || *instance.OperationalStatus == "create_rejected" { + return instance, dlGatewayProvisioningDone, nil + } + return instance, dlGatewayProvisioning, nil + } +} + +func resourceIBMdlGatewayUpdate(d *schema.ResourceData, meta interface{}) error { + + directLink, err := directlinkClient(meta) + if err != nil { + return err + } + + ID := d.Id() + getOptions := &directlinkv1.GetGatewayOptions{ + ID: &ID, + } + instance, detail, err := directLink.GetGateway(getOptions) + + if err != nil { + log.Printf("Error fetching Direct Link Gateway :%s", detail) + return err + } + + updateGatewayOptionsModel := &directlinkv1.UpdateGatewayOptions{} + updateGatewayOptionsModel.ID = &ID + dtype := *instance.Type + + if d.HasChange(dlTags) { + oldList, newList := d.GetChange(dlTags) + err = UpdateTagsUsingCRN(oldList, newList, meta, *instance.Crn) + if err != nil { + log.Printf( + "Error on update of resource direct link gateway (%s) tags: %s", *instance.ID, err) + } + } + + if d.HasChange(dlName) { + name := d.Get(dlName).(string) + updateGatewayOptionsModel.Name = &name + } + if d.HasChange(dlSpeedMbps) { + speed := int64(d.Get(dlSpeedMbps).(int)) + updateGatewayOptionsModel.SpeedMbps = &speed + } + /* + NOTE: Operational Status cannot be maintained in terraform. The status keeps changing automatically in server side. + Hence, cannot be maintained in terraform. + Operational Status and LoaRejectReason are linked. + Hence, a user cannot update through terraform. + + if d.HasChange(dlOperationalStatus) { + if _, ok := d.GetOk(dlOperationalStatus); ok { + operStatus := d.Get(dlOperationalStatus).(string) + updateGatewayOptionsModel.OperationalStatus = &operStatus + } + if _, ok := d.GetOk(dlLoaRejectReason); ok { + loaRejectReason := d.Get(dlLoaRejectReason).(string) + updateGatewayOptionsModel.LoaRejectReason = &loaRejectReason + } + } + */ + if d.HasChange(dlGlobal) { + global := d.Get(dlGlobal).(bool) + updateGatewayOptionsModel.Global = &global + } + if d.HasChange(dlMetered) { + metered := d.Get(dlMetered).(bool) + updateGatewayOptionsModel.Metered = &metered + } + if dtype == "dedicated" { + if d.HasChange(dlMacSecConfig) && !d.IsNewResource() { + // Construct an instance of the GatewayMacsecConfigTemplate model + gatewayMacsecConfigTemplatePatchModel := new(directlinkv1.GatewayMacsecConfigPatchTemplate) + if d.HasChange("macsec_config.0.active") { + activebool := d.Get("macsec_config.0.active").(bool) + gatewayMacsecConfigTemplatePatchModel.Active = &activebool + } + if d.HasChange("macsec_config.0.primary_cak") { + // Construct an instance of the GatewayMacsecCak model + gatewayMacsecCakModel := new(directlinkv1.GatewayMacsecConfigPatchTemplatePrimaryCak) + primaryCakstr := d.Get("macsec_config.0.primary_cak").(string) + gatewayMacsecCakModel.Crn = &primaryCakstr + gatewayMacsecConfigTemplatePatchModel.PrimaryCak = gatewayMacsecCakModel + } + if d.HasChange("macsec_config.0.fallback_cak") { + // Construct an instance of the GatewayMacsecCak model + gatewayMacsecCakModel := new(directlinkv1.GatewayMacsecConfigPatchTemplateFallbackCak) + if _, ok := d.GetOk("macsec_config.0.fallback_cak"); ok { + fallbackCakstr := d.Get("macsec_config.0.fallback_cak").(string) + gatewayMacsecCakModel.Crn = &fallbackCakstr + gatewayMacsecConfigTemplatePatchModel.FallbackCak = gatewayMacsecCakModel + } else { + fallbackCakstr := "" + gatewayMacsecCakModel.Crn = &fallbackCakstr + } + gatewayMacsecConfigTemplatePatchModel.FallbackCak = gatewayMacsecCakModel + } + if d.HasChange("macsec_config.0.window_size") { + if _, ok := d.GetOk("macsec_config.0.window_size"); ok { + windowSizeint := int64(d.Get("macsec_config.0.window_size").(int)) + gatewayMacsecConfigTemplatePatchModel.WindowSize = &windowSizeint + } + } + updateGatewayOptionsModel.MacsecConfig = gatewayMacsecConfigTemplatePatchModel + } else { + updateGatewayOptionsModel.MacsecConfig = nil + } + } + _, response, err := directLink.UpdateGateway(updateGatewayOptionsModel) + if err != nil { + log.Printf("[DEBUG] Update Direct Link Gateway err %s\n%s", err, response) + return err + } + + return resourceIBMdlGatewayRead(d, meta) +} + +func resourceIBMdlGatewayDelete(d *schema.ResourceData, meta interface{}) error { + + directLink, err := directlinkClient(meta) + if err != nil { + return err + } + + ID := d.Id() + delOptions := &directlinkv1.DeleteGatewayOptions{ + ID: &ID, + } + response, err := directLink.DeleteGateway(delOptions) + + if err != nil && response.StatusCode != 404 { + log.Printf("Error deleting Direct Link Gateway : %s", response) + return err + } + + d.SetId("") + return nil +} + +func resourceIBMdlGatewayExists(d *schema.ResourceData, meta interface{}) (bool, error) { + directLink, err := directlinkClient(meta) + if err != nil { + return false, err + } + + ID := d.Id() + + getOptions := &directlinkv1.GetGatewayOptions{ + ID: &ID, + } + _, response, err := directLink.GetGateway(getOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return false, nil + } + return false, fmt.Errorf("Error Getting Direct Link Gateway : %s\n%s", err, response) + } + return true, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_dl_gateway_virtual_connection.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_dl_gateway_virtual_connection.go new file mode 100644 index 00000000000..a4418788d7a --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_dl_gateway_virtual_connection.go @@ -0,0 +1,317 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "time" + + "github.com/IBM/networking-go-sdk/directlinkv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + dlGatewaysVirtualConnections = "gateway_vcs" + dlVCNetworkAccount = "network_account" + dlVCNetworkId = "network_id" + dlVCName = "name" + dlVCType = "type" + dlVCCreatedAt = "created_at" + dlVCStatus = "status" + dlGatewayId = "gateway" + ID = "id" + dlVirtualConnectionId = "virtual_connection_id" +) + +func resourceIBMDLGatewayVC() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMdlGatewayVCCreate, + Read: resourceIBMdlGatewayVCRead, + Delete: resourceIBMdlGatewayVCDelete, + Exists: resourceIBMdlGatewayVCExists, + Update: resourceIBMdlGatewayVCUpdate, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + dlGatewayId: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The Direct Link gateway identifier", + }, + dlVCType: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: InvokeValidator("ibm_dl_virtual_connection", dlVCType), + Description: "The type of virtual connection.Allowable values (classic,vpc)", + }, + dlVCName: { + Type: schema.TypeString, + Required: true, + ForceNew: false, + ValidateFunc: InvokeValidator("ibm_dl_virtual_connection", dlVCName), + Description: "The user-defined name for this virtual connection. Virtualconnection names are unique within a gateway. This is the name of thevirtual connection itself, the network being connected may have its ownname attribute", + }, + dlVCNetworkId: { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: "Unique identifier of the target network. For type=vpc virtual connections this is the CRN of the target VPC. This field does not apply to type=classic connections.", + }, + dlVCCreatedAt: { + Type: schema.TypeString, + Computed: true, + Description: "The date and time resource was created", + }, + dlVCStatus: { + Type: schema.TypeString, + Computed: true, + Description: "Status of the virtual connection.Possible values: [pending,attached,approval_pending,rejected,expired,deleting,detached_by_network_pending,detached_by_network]", + }, + + dlVCNetworkAccount: { + Type: schema.TypeString, + Computed: true, + Description: "For virtual connections across two different IBM Cloud Accounts network_account indicates the account that owns the target network.", + }, + dlVirtualConnectionId: { + Type: schema.TypeString, + Computed: true, + Description: "The Direct Gateway virtual connection identifier", + }, + + RelatedCRN: { + Type: schema.TypeString, + Computed: true, + Description: "The crn of the Direct link gateway", + }, + }, + } +} +func resourceIBMdlGatewayVCValidator() *ResourceValidator { + + validateSchema := make([]ValidateSchema, 1) + vcType := "classic, vpc" + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: dlVCType, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: vcType}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: dlVCName, + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Optional: true, + Regexp: `^([a-zA-Z]|[a-zA-Z][-_a-zA-Z0-9]*[a-zA-Z0-9])$`, + MinValueLength: 1, + MaxValueLength: 63}) + + ibmDLGatewayVCResourceValidator := ResourceValidator{ResourceName: "ibm_dl_virtual_connection", Schema: validateSchema} + + return &ibmDLGatewayVCResourceValidator +} +func resourceIBMdlGatewayVCCreate(d *schema.ResourceData, meta interface{}) error { + directLink, err := directlinkClient(meta) + if err != nil { + return err + } + + createGatewayVCOptions := &directlinkv1.CreateGatewayVirtualConnectionOptions{} + + gatewayId := d.Get(dlGatewayId).(string) + createGatewayVCOptions.SetGatewayID(gatewayId) + vcName := d.Get(dlVCName).(string) + createGatewayVCOptions.SetName(vcName) + vcType := d.Get(dlVCType).(string) + createGatewayVCOptions.SetType(vcType) + + if _, ok := d.GetOk(dlVCNetworkId); ok { + vcNetworkId := d.Get(dlVCNetworkId).(string) + createGatewayVCOptions.SetNetworkID(vcNetworkId) + } + + gatewayVC, response, err := directLink.CreateGatewayVirtualConnection(createGatewayVCOptions) + if err != nil { + log.Printf("[DEBUG] Create Direct Link Gateway (Dedicated) Virtual connection err %s\n%s", err, response) + return err + } + + d.SetId(fmt.Sprintf("%s/%s", gatewayId, *gatewayVC.ID)) + d.Set(dlVirtualConnectionId, *gatewayVC.ID) + return resourceIBMdlGatewayVCRead(d, meta) +} + +func resourceIBMdlGatewayVCRead(d *schema.ResourceData, meta interface{}) error { + + directLink, err := directlinkClient(meta) + if err != nil { + return err + } + parts, err := idParts(d.Id()) + if err != nil { + return err + } + + gatewayId := parts[0] + ID := parts[1] + + getGatewayVirtualConnectionOptions := &directlinkv1.GetGatewayVirtualConnectionOptions{} + getGatewayVirtualConnectionOptions.SetGatewayID(gatewayId) + getGatewayVirtualConnectionOptions.SetID(ID) + instance, response, err := directLink.GetGatewayVirtualConnection(getGatewayVirtualConnectionOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Getting Directlink Gateway Connection (%s): %s\n%s", ID, err, response) + } + + if instance.Name != nil { + d.Set(dlVCName, *instance.Name) + } + if instance.Type != nil { + d.Set(dlVCType, *instance.Type) + } + if instance.NetworkAccount != nil { + d.Set(dlVCNetworkAccount, *instance.NetworkAccount) + } + if instance.NetworkID != nil { + d.Set(dlVCNetworkId, *instance.NetworkID) + } + if instance.CreatedAt != nil { + d.Set(dlVCCreatedAt, instance.CreatedAt.String()) + } + if instance.Status != nil { + d.Set(dlVCStatus, *instance.Status) + } + d.Set(dlVirtualConnectionId, *instance.ID) + d.Set(dlGatewayId, gatewayId) + getGatewayOptions := &directlinkv1.GetGatewayOptions{ + ID: &gatewayId, + } + dlgw, response, err := directLink.GetGateway(getGatewayOptions) + if err != nil { + return fmt.Errorf("Error Getting Direct Link Gateway (Dedicated Template): %s\n%s", err, response) + } + d.Set(RelatedCRN, *dlgw.Crn) + return nil +} + +func resourceIBMdlGatewayVCUpdate(d *schema.ResourceData, meta interface{}) error { + + directLink, err := directlinkClient(meta) + if err != nil { + return err + } + parts, err := idParts(d.Id()) + if err != nil { + return err + } + + gatewayId := parts[0] + ID := parts[1] + + getVCOptions := &directlinkv1.GetGatewayVirtualConnectionOptions{ + ID: &ID, + } + getVCOptions.SetGatewayID(gatewayId) + _, detail, err := directLink.GetGatewayVirtualConnection(getVCOptions) + + if err != nil { + log.Printf("Error fetching Direct Link Gateway (Dedicated Template) Virtual Connection:%s", detail) + return err + } + + updateGatewayVCOptions := &directlinkv1.UpdateGatewayVirtualConnectionOptions{} + updateGatewayVCOptions.ID = &ID + updateGatewayVCOptions.SetGatewayID(gatewayId) + if d.HasChange(dlName) { + if d.Get(dlName) != nil { + name := d.Get(dlName).(string) + updateGatewayVCOptions.Name = &name + } + } + + _, response, err := directLink.UpdateGatewayVirtualConnection(updateGatewayVCOptions) + if err != nil { + log.Printf("[DEBUG] Update Direct Link Gateway (Dedicated) Virtual Connection err %s\n%s", err, response) + return err + } + + return resourceIBMdlGatewayVCRead(d, meta) +} + +func resourceIBMdlGatewayVCDelete(d *schema.ResourceData, meta interface{}) error { + + directLink, err := directlinkClient(meta) + if err != nil { + return err + } + parts, err := idParts(d.Id()) + if err != nil { + return err + } + + gatewayId := parts[0] + ID := parts[1] + delVCOptions := &directlinkv1.DeleteGatewayVirtualConnectionOptions{ + ID: &ID, + } + delVCOptions.SetGatewayID(gatewayId) + response, err := directLink.DeleteGatewayVirtualConnection(delVCOptions) + + if err != nil && response.StatusCode != 404 { + log.Printf("Error deleting Direct Link Gateway (Dedicated Template) Virtual Connection: %s", response) + return err + } + + d.SetId("") + return nil +} + +func resourceIBMdlGatewayVCExists(d *schema.ResourceData, meta interface{}) (bool, error) { + directLink, err := directlinkClient(meta) + if err != nil { + return false, err + } + parts, err := idParts(d.Id()) + if err != nil { + return false, err + } + + gatewayId := parts[0] + ID := parts[1] + + getVCOptions := &directlinkv1.GetGatewayVirtualConnectionOptions{ + ID: &ID, + } + getVCOptions.SetGatewayID(gatewayId) + _, response, err := directLink.GetGatewayVirtualConnection(getVCOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return false, nil + } + return false, fmt.Errorf("Error Getting Direct Link Gateway (Dedicated Template) Virtual Connection: %s\n%s", err, response) + } + + if response.StatusCode == 404 { + d.SetId("") + return false, nil + } + return true, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_dl_provider_gateway.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_dl_provider_gateway.go new file mode 100644 index 00000000000..f5ac848f371 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_dl_provider_gateway.go @@ -0,0 +1,408 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "os" + + "github.com/hashicorp/terraform-plugin-sdk/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + customerAccountID = "customer_account_id" +) + +func resourceIBMDLProviderGateway() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMdlProviderGatewayCreate, + Read: resourceIBMdlProviderGatewayRead, + Delete: resourceIBMdlProviderGatewayDelete, + Exists: resourceIBMdlProviderGatewayExists, + Update: resourceIBMdlProviderGatewayUpdate, + Importer: &schema.ResourceImporter{}, + CustomizeDiff: customdiff.Sequence( + func(diff *schema.ResourceDiff, v interface{}) error { + return resourceTagsCustomizeDiff(diff) + }, + ), + + Schema: map[string]*schema.Schema{ + dlBgpAsn: { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: "BGP ASN", + }, + dlBgpCerCidr: { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: "BGP customer edge router CIDR", + }, + dlBgpIbmCidr: { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: "BGP IBM CIDR", + }, + dlPort: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Gateway port", + }, + dlCreatedAt: { + Type: schema.TypeString, + Computed: true, + Description: "The date and time resource was created", + }, + dlName: { + Type: schema.TypeString, + Required: true, + ForceNew: false, + Description: "The unique user-defined name for this gateway", + ValidateFunc: InvokeValidator("ibm_dl_provider_gateway", dlName), + // ValidateFunc: validateRegexpLen(1, 63, "^([a-zA-Z]|[a-zA-Z][-_a-zA-Z0-9]*[a-zA-Z0-9])$"), + }, + + dlSpeedMbps: { + Type: schema.TypeInt, + Required: true, + ForceNew: false, + Description: "Gateway speed in megabits per second", + }, + customerAccountID: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Customer IBM Cloud account ID for the new gateway. A gateway object containing the pending create request will become available in the specified account.", + ValidateFunc: InvokeValidator("ibm_dl_provider_gateway", customerAccountID), + }, + dlOperationalStatus: { + Type: schema.TypeString, + Computed: true, + Description: "Gateway operational status", + }, + dlType: { + Type: schema.TypeString, + Description: "Gateway type", + Computed: true, + }, + dlProviderAPIManaged: { + Type: schema.TypeBool, + Computed: true, + Description: "Indicates whether gateway was created through a provider portal", + }, + dlVlan: { + Type: schema.TypeInt, + Computed: true, + Description: "VLAN allocated for this gateway", + }, + dlCrn: { + Type: schema.TypeString, + Computed: true, + Description: "The CRN (Cloud Resource Name) of this gateway", + }, + dlTags: { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString, ValidateFunc: InvokeValidator("ibm_dl_provider_gateway", "tag")}, + Set: resourceIBMVPCHash, + Description: "Tags for the direct link gateway", + }, + ResourceControllerURL: { + Type: schema.TypeString, + Computed: true, + Description: "The URL of the IBM Cloud dashboard that can be used to explore and view details about this instance", + }, + + ResourceName: { + Type: schema.TypeString, + Computed: true, + Description: "The name of the resource", + }, + + ResourceCRN: { + Type: schema.TypeString, + Computed: true, + Description: "The crn of the resource", + }, + + ResourceStatus: { + Type: schema.TypeString, + Computed: true, + Description: "The status of the resource", + }, + + ResourceGroupName: { + Type: schema.TypeString, + Computed: true, + Description: "The resource group name in which resource is provisioned", + }, + }, + } +} + +func resourceIBMDLProviderGatewayValidator() *ResourceValidator { + + validateSchema := make([]ValidateSchema, 2) + + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: customerAccountID, + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Required: true, + Regexp: `^[0-9a-f]+$`, + MinValueLength: 1, + MaxValueLength: 32}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: dlName, + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Required: true, + Regexp: `^([a-zA-Z]|[a-zA-Z][-_a-zA-Z0-9]*[a-zA-Z0-9])$`, + MinValueLength: 1, + MaxValueLength: 63}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: "tag", + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Optional: true, + Regexp: `^[A-Za-z0-9:_ .-]+$`, + MinValueLength: 1, + MaxValueLength: 128}) + + ibmISDLGatewayResourceValidator := ResourceValidator{ResourceName: "ibm_dl_provider_gateway", Schema: validateSchema} + return &ibmISDLGatewayResourceValidator +} + +func resourceIBMdlProviderGatewayCreate(d *schema.ResourceData, meta interface{}) error { + directLink, err := directlinkProviderClient(meta) + if err != nil { + return err + } + name := d.Get(dlName).(string) + speed := int64(d.Get(dlSpeedMbps).(int)) + custAccountID := d.Get(customerAccountID).(string) + bgpAsn := int64(d.Get(dlBgpAsn).(int)) + var portID string + portID = d.Get(dlPort).(string) + portIdentity, _ := directLink.NewProviderGatewayPortIdentity(portID) + gatewayOptions := directLink.NewCreateProviderGatewayOptions(bgpAsn, custAccountID, name, portIdentity, speed) + if _, ok := d.GetOk(dlBgpIbmCidr); ok { + bgpIbmCidr := d.Get(dlBgpIbmCidr).(string) + gatewayOptions.BgpIbmCidr = &bgpIbmCidr + + } + if _, ok := d.GetOk(dlBgpCerCidr); ok { + bgpCerCidr := d.Get(dlBgpCerCidr).(string) + gatewayOptions.BgpCerCidr = &bgpCerCidr + + } + + gateway, response, err := directLink.CreateProviderGateway(gatewayOptions) + if err != nil { + log.Printf("[DEBUG] Create Direct Link Provider Gateway err %s\n%s", err, response) + return err + } + d.SetId(*gateway.ID) + + log.Printf("[INFO] Created Direct Link Provider Gateway : %s", *gateway.ID) + + v := os.Getenv("IC_ENV_TAGS") + if _, ok := d.GetOk(dlTags); ok || v != "" { + oldList, newList := d.GetChange(dlTags) + err = UpdateTagsUsingCRN(oldList, newList, meta, *gateway.Crn) + if err != nil { + log.Printf( + "Error on create of resource direct link Provider gateway (%s) tags: %s", d.Id(), err) + } + } + + return resourceIBMdlProviderGatewayRead(d, meta) +} + +func resourceIBMdlProviderGatewayRead(d *schema.ResourceData, meta interface{}) error { + dtype := d.Get(dlType).(string) + log.Printf("[INFO] Inside resourceIBMdlGatewayRead: %s", dtype) + + directLink, err := directlinkProviderClient(meta) + if err != nil { + return err + } + + ID := d.Id() + + getOptions := directLink.NewGetProviderGatewayOptions(ID) + + log.Printf("[INFO] Calling getgateway api: %s", dtype) + + instance, response, err := directLink.GetProviderGateway(getOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Getting Direct Link Gateway (%s Template): %s\n%s", dtype, err, response) + } + if instance.ID != nil { + d.Set("id", *instance.ID) + } + if instance.Name != nil { + d.Set(dlName, *instance.Name) + } + if instance.Crn != nil { + d.Set(dlCrn, *instance.Crn) + } + if instance.BgpAsn != nil { + d.Set(dlBgpAsn, *instance.BgpAsn) + } + if instance.BgpIbmCidr != nil { + d.Set(dlBgpIbmCidr, *instance.BgpIbmCidr) + } + if instance.BgpIbmAsn != nil { + d.Set(dlBgpIbmAsn, *instance.BgpIbmAsn) + } + + if instance.BgpCerCidr != nil { + d.Set(dlBgpCerCidr, *instance.BgpCerCidr) + } + if instance.ProviderApiManaged != nil { + d.Set(dlProviderAPIManaged, *instance.ProviderApiManaged) + } + if instance.Type != nil { + d.Set(dlType, *instance.Type) + } + if instance.SpeedMbps != nil { + d.Set(dlSpeedMbps, *instance.SpeedMbps) + } + if instance.OperationalStatus != nil { + d.Set(dlOperationalStatus, *instance.OperationalStatus) + } + if instance.BgpStatus != nil { + d.Set(dlBgpStatus, *instance.BgpStatus) + } + + if instance.Vlan != nil { + d.Set(dlVlan, *instance.Vlan) + } + if instance.CustomerAccountID != nil { + d.Set(customerAccountID, *instance.CustomerAccountID) + } + if instance.Port != nil { + d.Set(dlPort, *instance.Port.ID) + } + + if instance.CreatedAt != nil { + d.Set(dlCreatedAt, instance.CreatedAt.String()) + } + tags, err := GetTagsUsingCRN(meta, *instance.Crn) + if err != nil { + log.Printf( + "Error on get of resource direct link gateway (%s) tags: %s", d.Id(), err) + } + d.Set(dlTags, tags) + controller, err := getBaseController(meta) + if err != nil { + return err + } + d.Set(ResourceControllerURL, controller+"/interconnectivity/direct-link") + d.Set(ResourceName, *instance.Name) + d.Set(ResourceCRN, *instance.Crn) + d.Set(ResourceStatus, *instance.OperationalStatus) + return nil +} + +func resourceIBMdlProviderGatewayUpdate(d *schema.ResourceData, meta interface{}) error { + + directLink, err := directlinkProviderClient(meta) + if err != nil { + return err + } + + ID := d.Id() + getOptions := directLink.NewGetProviderGatewayOptions(ID) + + log.Printf("[INFO] Calling getgateway provider api") + + instance, response, err := directLink.GetProviderGateway(getOptions) + + updateGatewayOptionsModel := directLink.NewUpdateProviderGatewayOptions(ID) + + if d.HasChange(dlTags) { + oldList, newList := d.GetChange(dlTags) + err = UpdateTagsUsingCRN(oldList, newList, meta, *instance.Crn) + if err != nil { + log.Printf( + "Error on update of resource direct link gateway dedicated (%s) tags: %s", *instance.ID, err) + } + } + + if d.HasChange(dlName) { + name := d.Get(dlName).(string) + updateGatewayOptionsModel.Name = &name + } + if d.HasChange(dlSpeedMbps) { + speed := int64(d.Get(dlSpeedMbps).(int)) + updateGatewayOptionsModel.SpeedMbps = &speed + } + + _, response, err = directLink.UpdateProviderGateway(updateGatewayOptionsModel) + if err != nil { + log.Printf("[DEBUG] Update Direct Link Provider Gateway err %s\n%s", err, response) + return err + } + + return resourceIBMdlProviderGatewayRead(d, meta) +} + +func resourceIBMdlProviderGatewayDelete(d *schema.ResourceData, meta interface{}) error { + + directLink, err := directlinkProviderClient(meta) + if err != nil { + return err + } + + ID := d.Id() + delOptions := directLink.NewDeleteProviderGatewayOptions(ID) + _, response, err := directLink.DeleteProviderGateway(delOptions) + if err != nil { + if response != nil && response.StatusCode != 404 { + return nil + } + log.Printf("Error deleting Direct Link Provider Gateway: %s %s ", response, err) + } + + d.SetId("") + return nil +} + +func resourceIBMdlProviderGatewayExists(d *schema.ResourceData, meta interface{}) (bool, error) { + directLink, err := directlinkProviderClient(meta) + if err != nil { + return false, err + } + + ID := d.Id() + + getOptions := directLink.NewGetProviderGatewayOptions(ID) + _, response, err := directLink.GetProviderGateway(getOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return false, nil + } + return false, fmt.Errorf("Error Getting Direct Link Provider Gateway : %s\n%s", err, response) + } + return true, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_dns_domain.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_dns_domain.go new file mode 100644 index 00000000000..b3140c1f413 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_dns_domain.go @@ -0,0 +1,220 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "errors" + "fmt" + "log" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/sl" +) + +func resourceIBMDNSDomain() *schema.Resource { + return &schema.Resource{ + Exists: resourceIBMDNSDomainExists, + Create: resourceIBMDNSDomainCreate, + Read: resourceIBMDNSDomainRead, + Update: resourceIBMDNSDomainUpdate, + Delete: resourceIBMDNSDomainDelete, + Importer: &schema.ResourceImporter{}, + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "DNS name", + }, + + "serial": { + Type: schema.TypeString, + Computed: true, + Description: "DNS serial info", + }, + + "update_date": { + Type: schema.TypeString, + Computed: true, + Description: "DNS update date", + }, + + "target": { + Type: schema.TypeString, + Optional: true, + Description: "DNS target info", + }, + + "tags": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: "tags associated with reosurce.", + }, + }, + } +} + +func resourceIBMDNSDomainCreate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetDnsDomainService(sess.SetRetries(0)) + + // prepare creation parameters + opts := datatypes.Dns_Domain{ + Name: sl.String(d.Get("name").(string)), + } + + opts.ResourceRecords = []datatypes.Dns_Domain_ResourceRecord{} + + if targetString, ok := d.GetOk("target"); ok { + opts.ResourceRecords = []datatypes.Dns_Domain_ResourceRecord{ + { + Data: sl.String(targetString.(string)), + Host: sl.String("@"), + Ttl: sl.Int(86400), + Type: sl.String("a"), + }, + } + } + + // create Dns_Domain object + response, err := service.CreateObject(&opts) + if err != nil { + return fmt.Errorf("Error creating Dns Domain: %s", err) + } + + // populate id + id := *response.Id + d.SetId(strconv.Itoa(id)) + log.Printf("[INFO] Created Dns Domain: %d", id) + + // read remote state + return resourceIBMDNSDomainRead(d, meta) +} + +func resourceIBMDNSDomainRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetDnsDomainService(sess) + + dnsId, _ := strconv.Atoi(d.Id()) + + // retrieve remote object state + dns_domain, err := service.Id(dnsId).Mask( + "id,name,updateDate,resourceRecords", + ).GetObject() + if err != nil { + return fmt.Errorf("Error retrieving Dns Domain %d: %s", dnsId, err) + } + + // populate fields + d.Set("name", dns_domain.Name) + d.Set("serial", sl.Get(dns_domain.Serial, nil)) + d.Set("update_date", sl.Get(dns_domain.UpdateDate.String(), nil)) + + // find a record with host @; that will have the current target. + for _, record := range dns_domain.ResourceRecords { + if *record.Type == "a" && *record.Host == "@" { + d.Set("target", *record.Data) + break + } + } + + return nil +} + +func resourceIBMDNSDomainUpdate(d *schema.ResourceData, meta interface{}) error { + // If the target has been updated, find the corresponding dns record and update its data + + sess := meta.(ClientSession).SoftLayerSession() + domainService := services.GetDnsDomainService(sess) + service := services.GetDnsDomainResourceRecordService(sess.SetRetries(0)) + + domainId, _ := strconv.Atoi(d.Id()) + + if !d.HasChange("target") { // target is the only editable field + return nil + } + + newTarget := d.Get("target").(string) + + // retrieve domain state + domain, err := domainService.Id(domainId).Mask( + "id,name,updateDate,resourceRecords", + ).GetObject() + if err != nil { + return fmt.Errorf("Error retrieving DNS resource %d: %s", domainId, err) + } + + // find a record with host @; that will have the current target. + var record datatypes.Dns_Domain_ResourceRecord + for _, record = range domain.ResourceRecords { + if *record.Type == "a" && *record.Host == "@" { + break + } + } + + if record.Id == nil { + return fmt.Errorf("Could not find DNS target record for domain %s (%d)", + sl.Get(domain.Name), sl.Get(domain.Id)) + } + + record.Data = sl.String(newTarget) + + _, err = service.Id(*record.Id).EditObject(&record) + + if err != nil { + return fmt.Errorf("Error editing DNS target record for domain %s (%d): %s", + sl.Get(domain.Name), sl.Get(domain.Id), err) + } + + return nil +} + +func resourceIBMDNSDomainDelete(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetDnsDomainService(sess) + + dnsId, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Error deleting Dns Domain: %s", err) + } + + log.Printf("[INFO] Deleting Dns Domain: %d", dnsId) + result, err := service.Id(dnsId).DeleteObject() + if err != nil { + return fmt.Errorf("Error deleting Dns Domain: %s", err) + } + + if !result { + return errors.New("Error deleting Dns Domain") + } + + d.SetId("") + return nil +} + +func resourceIBMDNSDomainExists(d *schema.ResourceData, meta interface{}) (bool, error) { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetDnsDomainService(sess) + + dnsId, err := strconv.Atoi(d.Id()) + if err != nil { + return false, fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + result, err := service.Id(dnsId).GetObject() + if err != nil { + if apiErr, ok := err.(sl.Error); ok { + if apiErr.StatusCode == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error retrieving domain info: %s", err) + } + return result.Id != nil && *result.Id == dnsId, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_dns_domain_registration_nameservers.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_dns_domain_registration_nameservers.go new file mode 100644 index 00000000000..514d8f4612d --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_dns_domain_registration_nameservers.go @@ -0,0 +1,268 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + //"errors" + "fmt" + "log" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/softlayer/softlayer-go/services" +) + +func resourceIBMDNSDomainRegistrationNameservers() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMDNSDomainRegistrationNSCreate, + Read: resourceIBMDNSDomainRegistrationNSRead, + Update: resourceIBMDNSDomainRegistrationNSUpdate, + Delete: resourceIBMDNSDomainRegistrationNSDelete, + Schema: map[string]*schema.Schema{ + "dns_registration_id": { + Type: schema.TypeString, + Required: true, + Description: "DNS registration ID", + }, + "name_servers": &schema.Schema{ + Description: "Custom name servers for the domain registration", + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "original_name_servers": &schema.Schema{ + Description: "Save of name servers prior to update", + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + } +} + +func resourceIBMDNSDomainRegistrationNSCreate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + nService := services.GetDnsDomainRegistrationService(sess) + dnsId, _ := strconv.Atoi(d.Get("dns_registration_id").(string)) + newNameServers := d.Get("name_servers").(*schema.Set).List() + + // Get existing name servers as these will be replaced later + dns_domain_nameservers, err := nService.Id(dnsId). + Mask("nameservers.name"). + GetDomainNameservers() + + if err != nil { + return fmt.Errorf("Error retrieving domain Registration NSCreate: %s", err) + } + + if len(dns_domain_nameservers) == 0 { + return fmt.Errorf("No domain found with id NSCreate [%d]", dnsId) + } + oldNameServers := make([]string, len(dns_domain_nameservers[0].Nameservers)) + for i, elem := range dns_domain_nameservers[0].Nameservers { + oldNameServers[i] = *elem.Name + } + // + log.Printf("Original DNS registration name servers %s\n", oldNameServers) + + // New NS to add, if not found in old list + var addNs []string + for _, newNs := range newNameServers { + found := false + for _, oldNs := range oldNameServers { + log.Printf("old %s, new %s", oldNs, newNs) + + if oldNs == newNs { + + found = true + break + } + } + if found == false { + addNs = append(addNs, newNs.(string)) + } + } + log.Printf("Name servers to add %v\n", addNs) + + // if no name servers to add then, we already have the correct name servers. + // So return at this point. + if len(addNs) == 0 { + d.SetId(fmt.Sprintf("%d", dnsId)) + return resourceIBMDNSDomainRegistrationNSRead(d, meta) + } + + nsUnlock_res, err := nService.Id(dnsId). + UnlockDomain() + if err != nil || nsUnlock_res != true { + return fmt.Errorf("Error unlocking domain registration record: %s", err) + } + + nsAdd_res := false + nsAdd_res, err = nService.Id(dnsId). + AddNameserversToDomain(addNs) + + if err != nil || nsAdd_res != true { + return fmt.Errorf("Error Adding name servers to record: %s", err) + } + + // old NS to delete, if not found in new list + var delNs []string + for _, oldNs := range oldNameServers { + found := false + for _, newNs := range newNameServers { + log.Printf("old %s, new %s", oldNs, newNs.(string)) + if oldNs == newNs { + found = true + break + } + } + if found == false { + delNs = append(delNs, oldNs) + } + } + + log.Printf("Name servers to delete %v\n", delNs) + + nsDel_res := false + nsDel_res, err = nService.Id(dnsId). + RemoveNameserversFromDomain(delNs) + + if err != nil || nsDel_res != true { + return fmt.Errorf("Error Deleting name servers from record: %s", err) + } + + _, _ = nService.Id(dnsId).LockDomain() + // Ignore lock errors as does not impact operation + + // save the original name servers now as not available on read + d.SetId(fmt.Sprintf("%d", dnsId)) + d.Set("original_name_servers", oldNameServers) + return resourceIBMDNSDomainRegistrationNSRead(d, meta) +} + +func resourceIBMDNSDomainRegistrationNSRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + dnsId, _ := strconv.Atoi(d.Id()) + //service := services.GetDnsDomainService(sess) + + nService := services.GetDnsDomainRegistrationService(sess) + dns_domain_nameservers, err := nService.Id(dnsId). + Mask("nameservers.name"). + GetDomainNameservers() + + if err != nil { + return fmt.Errorf("Error retrieving domain registration NSReaD: %s", err) + } + + if len(dns_domain_nameservers) == 0 { + return fmt.Errorf("No domain found with id [%d]", dnsId) + } + + log.Printf("list %v\n", dns_domain_nameservers) + ns := make([]string, len(dns_domain_nameservers[0].Nameservers)) + for i, elem := range dns_domain_nameservers[0].Nameservers { + ns[i] = *elem.Name + } + + log.Printf("names %v\n", ns) + + if err != nil { + return fmt.Errorf("Error retrieving domain registration nameservers: %s", err) + } + + d.SetId(fmt.Sprintf("%d", dnsId)) + d.Set("name_servers", ns) + return nil +} + +// No delete on IBM Cloud +func resourceIBMDNSDomainRegistrationNSUpdate(d *schema.ResourceData, meta interface{}) error { + return nil +} + +// No delete on IBM Cloud +func resourceIBMDNSDomainRegistrationNSDelete(d *schema.ResourceData, meta interface{}) error { + // Exact reverse of create to restore name servers back to original values + sess := meta.(ClientSession).SoftLayerSession() + nService := services.GetDnsDomainRegistrationService(sess) + dnsId, _ := strconv.Atoi(d.Get("dns_registration_id").(string)) + currentNameServers := d.Get("name_servers").(*schema.Set).List() + originalNameServers := d.Get("original_name_servers").(*schema.Set).List() + + // original NS to restore, if not found in current list + var addNs []string + for _, originalNs := range originalNameServers { + found := false + for _, currentNs := range currentNameServers { + log.Printf("current %s, original %s", currentNs, originalNs) + + if currentNs == originalNs { + + found = true + break + } + } + if found == false { + addNs = append(addNs, originalNs.(string)) + } + } + log.Printf("Name servers to restore %v\n", addNs) + + // if no name servers to add then, we already have the correct name servers. + // So return at this point. + if len(addNs) == 0 { + d.SetId(fmt.Sprintf("%d", dnsId)) + return resourceIBMDNSDomainRegistrationNSRead(d, meta) + } + + nsUnlock_res, err := nService.Id(dnsId). + UnlockDomain() + if err != nil || nsUnlock_res != true { + return fmt.Errorf("Error unlocking domain registration record: %s", err) + } + + nsAdd_res := false + nsAdd_res, err = nService.Id(dnsId). + AddNameserversToDomain(addNs) + + if err != nil || nsAdd_res != true { + return fmt.Errorf("Error Adding name servers to record: %s", err) + } + + // current NS to delete, if not found in original list + var delNs []string + for _, currentNs := range currentNameServers { + found := false + for _, originalNs := range originalNameServers { + log.Printf("current %s, original %s", currentNs, originalNs) + if currentNs == originalNs { + found = true + break + } + } + if found == false { + delNs = append(delNs, currentNs.(string)) + } + } + + log.Printf("Name servers to delete %v\n", delNs) + + nsDel_res := false + nsDel_res, err = nService.Id(dnsId). + RemoveNameserversFromDomain(delNs) + + if err != nil || nsDel_res != true { + return fmt.Errorf("Error Deleting name servers from record: %s", err) + } + + _, _ = nService.Id(dnsId).LockDomain() + // Ignore lock errors as does not impact operation + + d.SetId("") + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_dns_record.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_dns_record.go new file mode 100644 index 00000000000..75f69b9a63b --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_dns_record.go @@ -0,0 +1,455 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "regexp" + "strconv" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/sl" +) + +var allowedDomainRecordTypes = []string{ + "a", "aaaa", "cname", "mx", "ns", "ptr", "soa", "spf", "srv", "txt", +} +var ipv6Regexp *regexp.Regexp +var upcaseRegexp *regexp.Regexp + +func init() { + ipv6Regexp, _ = regexp.Compile( + "[a-zA-Z0-9]{4}:[a-zA-Z0-9]{4}:[a-zA-Z0-9]{4}:[a-zA-Z0-9]{4}:" + + "[a-zA-Z0-9]{4}:[a-zA-Z0-9]{4}:[a-zA-Z0-9]{4}:[a-zA-Z0-9]{4}", + ) + upcaseRegexp, _ = regexp.Compile("[A-Z]") +} + +func resourceIBMDNSRecord() *schema.Resource { + return &schema.Resource{ + Exists: resourceIBMDNSRecordExists, + Create: resourceIBMDNSRecordCreate, + Read: resourceIBMDNSRecordRead, + Update: resourceIBMDNSRecordUpdate, + Delete: resourceIBMDNSRecordDelete, + Importer: &schema.ResourceImporter{}, + Schema: map[string]*schema.Schema{ + "data": { + Type: schema.TypeString, + Required: true, + ValidateFunc: func(val interface{}, field string) (warnings []string, errors []error) { + value := val.(string) + if ipv6Regexp.MatchString(value) && upcaseRegexp.MatchString(value) { + errors = append( + errors, + fmt.Errorf( + "IPv6 addresses in the data property cannot have upper case letters: %s", + value, + ), + ) + } + return + }, + Description: "DNS record data", + }, + + "domain_id": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: "Domain ID of dns record instance", + }, + + "expire": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + Description: "DNS record expiry info", + }, + + "host": { + Type: schema.TypeString, + Required: true, + Description: "Hostname", + }, + + "mx_priority": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "Maximum priority", + }, + + "refresh": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + Description: "refresh rate", + }, + + "responsible_person": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Responsible person for DNS record", + }, + + "retry": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + Description: "Retry count", + }, + + "minimum_ttl": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + Description: "Minimun TTL configuration", + }, + + "ttl": { + Type: schema.TypeInt, + Required: true, + DefaultFunc: func() (interface{}, error) { + return 86400, nil + }, + Description: "TTL configuration", + }, + + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: func(val interface{}, field string) (warnings []string, errors []error) { + value := val.(string) + for _, rtype := range allowedDomainRecordTypes { + if value == rtype { + return + } + } + + errors = append( + errors, + fmt.Errorf("%s is not one of the valid domain record types: %s", + value, strings.Join(allowedDomainRecordTypes, ", "), + ), + ) + return + }, + Description: "DNS record type", + }, + + "service": { + Type: schema.TypeString, + Optional: true, + Description: "service info", + }, + + "protocol": { + Type: schema.TypeString, + Optional: true, + Description: "protocol info", + }, + + "port": { + Type: schema.TypeInt, + Optional: true, + Description: "port number", + }, + + "priority": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "priority info", + }, + + "weight": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "weight info", + }, + + "tags": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: "tags set for the resource", + }, + }, + } +} + +// Creates DNS Domain Resource Record +// https://sldn.softlayer.com/reference/services/SoftLayer_Dns_Domain_ResourceRecord/createObject +func resourceIBMDNSRecordCreate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetDnsDomainResourceRecordService(sess.SetRetries(0)) + + opts := datatypes.Dns_Domain_ResourceRecord{ + Data: sl.String(d.Get("data").(string)), + DomainId: sl.Int(d.Get("domain_id").(int)), + Host: sl.String(d.Get("host").(string)), + Ttl: sl.Int(d.Get("ttl").(int)), + Type: sl.String(d.Get("type").(string)), + } + + if expire, ok := d.GetOk("expire"); ok { + opts.Expire = sl.Int(expire.(int)) + } + + if minimum, ok := d.GetOk("minimum_ttl"); ok { + opts.Minimum = sl.Int(minimum.(int)) + } + + mxPriority := d.Get("mx_priority") + opts.MxPriority = sl.Int(mxPriority.(int)) + + if refresh, ok := d.GetOk("refresh"); ok { + opts.Refresh = sl.Int(refresh.(int)) + } + + if responsiblePerson, ok := d.GetOk("responsible_person"); ok { + opts.ResponsiblePerson = sl.String(responsiblePerson.(string)) + } + + if retry, ok := d.GetOk("retry"); ok { + opts.Retry = sl.Int(retry.(int)) + } + + optsSrv := datatypes.Dns_Domain_ResourceRecord_SrvType{ + Dns_Domain_ResourceRecord: opts, + } + if *opts.Type == "srv" { + if serviceName, ok := d.GetOk("service"); ok { + optsSrv.Service = sl.String(serviceName.(string)) + } + + if protocol, ok := d.GetOk("protocol"); ok { + optsSrv.Protocol = sl.String(protocol.(string)) + } + + optsSrv.Priority = sl.Int(d.Get("priority").(int)) + + optsSrv.Weight = sl.Int(d.Get("weight").(int)) + + if port, ok := d.GetOk("port"); ok { + optsSrv.Port = sl.Int(port.(int)) + } + } + + log.Printf("[INFO] Creating DNS Resource %s Record for '%d' dns domain", *opts.Type, d.Get("domain_id").(int)) + + var err error + var id int + if *opts.Type == "srv" { + var record datatypes.Dns_Domain_ResourceRecord_SrvType + serviceSrv := services.GetDnsDomainResourceRecordSrvTypeService(sess) + record, err = serviceSrv.CreateObject(&optsSrv) + if record.Id != nil { + id = *record.Id + } + } else { + var record datatypes.Dns_Domain_ResourceRecord + record, err = service.CreateObject(&opts) + if record.Id != nil { + id = *record.Id + } + } + + if err != nil { + return fmt.Errorf("Error creating DNS Resource %s Record: %s", *opts.Type, err) + } + + d.SetId(fmt.Sprintf("%d", id)) + + log.Printf("[INFO] Dns Resource %s Record ID: %s", *opts.Type, d.Id()) + + return resourceIBMDNSRecordRead(d, meta) +} + +// Reads DNS Domain Resource Record from SL system +// https://sldn.softlayer.com/reference/services/SoftLayer_Dns_Domain_ResourceRecord/getObject +func resourceIBMDNSRecordRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetDnsDomainResourceRecordService(sess) + + id, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + result, err := service.Id(id).GetObject() + if err != nil { + return fmt.Errorf("Error retrieving DNS Resource Record: %s", err) + } + + // Required fields + d.Set("data", *result.Data) + d.Set("domain_id", *result.DomainId) + d.Set("host", *result.Host) + d.Set("type", *result.Type) + d.Set("ttl", *result.Ttl) + + // Optional fields + d.Set("expire", sl.Get(result.Expire, nil)) + d.Set("minimum_ttl", sl.Get(result.Minimum, nil)) + d.Set("mx_priority", sl.Get(result.MxPriority, nil)) + d.Set("responsible_person", sl.Get(result.ResponsiblePerson, nil)) + d.Set("refresh", sl.Get(result.Refresh, nil)) + d.Set("retry", sl.Get(result.Retry, nil)) + + if *result.Type == "srv" { + d.Set("service", sl.Get(result.Service, nil)) + d.Set("protocol", sl.Get(result.Protocol, nil)) + d.Set("port", sl.Get(result.Port, nil)) + d.Set("priority", sl.Get(result.Priority, nil)) + d.Set("weight", sl.Get(result.Weight, nil)) + } + + return nil +} + +// Updates DNS Domain Resource Record in SL system +// https://sldn.softlayer.com/reference/services/SoftLayer_Dns_Domain_ResourceRecord/editObject +func resourceIBMDNSRecordUpdate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetDnsDomainResourceRecordService(sess) + serviceNoRetry := services.GetDnsDomainResourceRecordService(sess.SetRetries(0)) + + recordId, _ := strconv.Atoi(d.Id()) + record, err := service.Id(recordId).GetObject() + if err != nil { + return fmt.Errorf("Error retrieving DNS Resource Record: %s", err) + } + + recordType := d.Get("type").(string) + + if data, ok := d.GetOk("data"); ok && d.HasChange("data") { + record.Data = sl.String(data.(string)) + } + + if domain_id, ok := d.GetOk("domain_id"); ok && d.HasChange("domain_id") { + record.DomainId = sl.Int(domain_id.(int)) + } + + if host, ok := d.GetOk("host"); ok && d.HasChange("host") { + record.Host = sl.String(host.(string)) + } + + if ttl, ok := d.GetOk("ttl"); ok && d.HasChange("ttl") { + record.Ttl = sl.Int(ttl.(int)) + } + + if expire, ok := d.GetOk("expire"); ok && d.HasChange("expire") { + record.Expire = sl.Int(expire.(int)) + } + + if minimum_ttl, ok := d.GetOk("minimum_ttl"); ok && d.HasChange("minimum_ttl") { + record.Minimum = sl.Int(minimum_ttl.(int)) + } + + if d.HasChange("mx_priority") { + record.MxPriority = sl.Int(d.Get("mx_priority").(int)) + } + + if refresh, ok := d.GetOk("refresh"); ok && d.HasChange("refresh") { + record.Refresh = sl.Int(refresh.(int)) + } + + if contact_email, ok := d.GetOk("responsible_person"); ok && d.HasChange("responsible_person") { + record.ResponsiblePerson = sl.String(contact_email.(string)) + } + + if retry, ok := d.GetOk("retry"); ok && d.HasChange("retry") { + record.Retry = sl.Int(retry.(int)) + } + + recordSrv := datatypes.Dns_Domain_ResourceRecord_SrvType{ + Dns_Domain_ResourceRecord: record, + } + if recordType == "srv" { + if service, ok := d.GetOk("service"); ok && d.HasChange("service") { + recordSrv.Service = sl.String(service.(string)) + } + + if d.HasChange("priority") { + recordSrv.Priority = sl.Int(d.Get("priority").(int)) + } + + if protocol, ok := d.GetOk("protocol"); ok && d.HasChange("protocol") { + recordSrv.Protocol = sl.String(protocol.(string)) + } + + if port, ok := d.GetOk("port"); ok && d.HasChange("port") { + recordSrv.Port = sl.Int(port.(int)) + } + + if d.HasChange("weight") { + recordSrv.Weight = sl.Int(d.Get("weight").(int)) + } + } + + if recordType == "srv" { + _, err = services.GetDnsDomainResourceRecordSrvTypeService(sess.SetRetries(0)). + Id(recordId).EditObject(&recordSrv) + } else { + _, err = serviceNoRetry.Id(recordId).EditObject(&record) + } + + if err != nil { + return fmt.Errorf("Error editing DNS Resource %s Record %d: %s", recordType, recordId, err) + } + + return nil +} + +// Deletes DNS Domain Resource Record in SL system +// https://sldn.softlayer.com/reference/services/SoftLayer_Dns_Domain_ResourceRecord/deleteObject +func resourceIBMDNSRecordDelete(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetDnsDomainResourceRecordService(sess) + + id, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + _, err = service.Id(id).DeleteObject() + + if err != nil { + return fmt.Errorf("Error deleting DNS Resource Record: %s", err) + } + + return nil +} + +// Exists function is called by refresh +// if the entity is absent - it is deleted from the .tfstate file +func resourceIBMDNSRecordExists(d *schema.ResourceData, meta interface{}) (bool, error) { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetDnsDomainResourceRecordService(sess) + + id, err := strconv.Atoi(d.Id()) + if err != nil { + return false, fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + record, err := service.Id(id).GetObject() + if err != nil { + if apiErr, ok := err.(sl.Error); ok { + if apiErr.StatusCode == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error retrieving domain record info: %s", err) + } + return record.Id != nil && *record.Id == id, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_dns_reverse_record.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_dns_reverse_record.go new file mode 100644 index 00000000000..8ba9e422ccb --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_dns_reverse_record.go @@ -0,0 +1,148 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/sl" +) + +func resourceIBMDNSReverseRecord() *schema.Resource { + return &schema.Resource{ + Exists: resourceIBMDNSREVERSERecordExists, + Create: resourceIBMDNSREVERSERecordCreate, + Read: resourceIBMDNSREVERSERecordRead, + Update: resourceIBMDNSREVERSERecordUpdate, + Delete: resourceIBMDNSREVERSERecordDelete, + Importer: &schema.ResourceImporter{}, + Schema: map[string]*schema.Schema{ + "ipaddress": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "IP Address", + }, + "hostname": { + Type: schema.TypeString, + Required: true, + Description: "Host name", + }, + "ttl": { + Type: schema.TypeInt, + Optional: true, + DefaultFunc: func() (interface{}, error) { + return 604800, nil + }, + Description: "TTL value", + }, + }, + } +} + +// Creates DNS Domain Reverse Record +// https://sldn.softlayer.com/reference/services/SoftLayer_Dns_Domain/CreatePtrRecord +func resourceIBMDNSREVERSERecordCreate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetDnsDomainService(sess.SetRetries(0)) + Data := sl.String(d.Get("hostname").(string)) + Ttl := sl.Int(d.Get("ttl").(int)) + Ipaddress := sl.String(d.Get("ipaddress").(string)) + var id int + record, err := service.CreatePtrRecord(Ipaddress, Data, Ttl) + if record.Id != nil { + id = *record.Id + } + + if err != nil { + return fmt.Errorf("Error creating DNS Reverse %s", err) + } + d.SetId(fmt.Sprintf("%d", id)) + log.Printf("[INFO] Dns Reverse %s ", d.Id()) + return resourceIBMDNSREVERSERecordRead(d, meta) +} + +// Reads DNS Domain Reverse Record from SL system +// https://sldn.softlayer.com/reference/services/SoftLayer_Dns_Domain_ResourceRecord/getObject +func resourceIBMDNSREVERSERecordRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetDnsDomainResourceRecordService(sess) + id, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + _, nexterr := service.Id(id).GetObject() + if nexterr != nil { + return fmt.Errorf("Error retrieving DNS Reverse Record: %s", err) + } + return nil +} + +// Updates DNS Domain Reverse Record in SL system +// https://sldn.softlayer.com/reference/services/SoftLayer_Dns_Domain_ResourceRecord/editObject +func resourceIBMDNSREVERSERecordUpdate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetDnsDomainResourceRecordService(sess) + serviceNoRetry := services.GetDnsDomainResourceRecordService(sess.SetRetries(0)) + recordId, _ := strconv.Atoi(d.Id()) + record, err := service.Id(recordId).GetObject() + if err != nil { + return fmt.Errorf("Error retrieving DNS Reverse Record: %s", err) + } + if data, ok := d.GetOk("hostname"); ok && d.HasChange("hostname") { + record.Data = sl.String(data.(string)) + } + if ttl, ok := d.GetOk("ttl"); ok && d.HasChange("ttl") { + record.Ttl = sl.Int(ttl.(int)) + } + record.IsGatewayAddress = nil + _, err = serviceNoRetry.Id(recordId).EditObject(&record) + if err != nil { + return fmt.Errorf("Error editing DNS Reverse Record %d: %s", recordId, err) + } + return nil +} + +// Deletes DNS Domain Reverse Record in SL system +// https://sldn.softlayer.com/reference/services/SoftLayer_Dns_Domain_ResourceRecord/deleteObject +func resourceIBMDNSREVERSERecordDelete(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetDnsDomainResourceRecordService(sess) + id, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + _, err = service.Id(id).DeleteObject() + + if err != nil { + return fmt.Errorf("Error deleting DNS Reverse Record: %s", err) + } + return nil +} + +// Exists function is called by refresh +// if the entity is absent - it is deleted from the .tfstate file +func resourceIBMDNSREVERSERecordExists(d *schema.ResourceData, meta interface{}) (bool, error) { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetDnsDomainResourceRecordService(sess) + id, err := strconv.Atoi(d.Id()) + if err != nil { + return false, fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + record, err := service.Id(id).GetObject() + if err != nil { + if apiErr, ok := err.(sl.Error); ok { + if apiErr.StatusCode == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error retrieving domain reverse record info: %s", err) + } + return record.Id != nil && *record.Id == id, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_dns_secondary.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_dns_secondary.go new file mode 100644 index 00000000000..1a756745f75 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_dns_secondary.go @@ -0,0 +1,179 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "errors" + "fmt" + "log" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/sl" +) + +func resourceIBMDNSSecondary() *schema.Resource { + return &schema.Resource{ + Exists: resourceIBMDNSSecondaryExists, + Create: resourceIBMDNSSecondaryCreate, + Read: resourceIBMDNSSecondaryRead, + Update: resourceIBMDNSSecondaryUpdate, + Delete: resourceIBMDNSSecondaryDelete, + Importer: &schema.ResourceImporter{}, + Schema: map[string]*schema.Schema{ + "master_ip_address": { + Type: schema.TypeString, + Required: true, + Description: "Master IP Address", + }, + + "transfer_frequency": { + Type: schema.TypeInt, + Required: true, + Description: "Transfer frequency value", + }, + + "zone_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Zone name", + }, + + "status_id": { + Type: schema.TypeInt, + Computed: true, + Description: "Status ID", + }, + + "status_text": { + Type: schema.TypeString, + Computed: true, + Description: "Status text", + }, + + "tags": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: "List of tags", + }, + }, + } +} + +func resourceIBMDNSSecondaryCreate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetDnsSecondaryService(sess) + + // prepare creation parameters + opts := datatypes.Dns_Secondary{ + MasterIpAddress: sl.String(d.Get("master_ip_address").(string)), + TransferFrequency: sl.Int(d.Get("transfer_frequency").(int)), + ZoneName: sl.String(d.Get("zone_name").(string)), + } + + // create Dns_Secondary object + response, err := service.CreateObject(&opts) + if err != nil { + return fmt.Errorf("Error creating Dns Secondary Zone: %s", err) + } + + // populate id + id := *response.Id + d.SetId(strconv.Itoa(id)) + log.Printf("[INFO] Created Dns Secondary Zone: %d", id) + + // read remote state + return resourceIBMDNSSecondaryRead(d, meta) +} + +func resourceIBMDNSSecondaryRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetDnsSecondaryService(sess) + + dnsId, _ := strconv.Atoi(d.Id()) + + // retrieve remote object state + dns_domain_secondary, err := service.Id(dnsId).GetObject() + if err != nil { + return fmt.Errorf("Error retrieving Dns Secondary Zone %d: %s", dnsId, err) + } + + // populate fields + d.Set("master_ip_address", *dns_domain_secondary.MasterIpAddress) + d.Set("transfer_frequency", *dns_domain_secondary.TransferFrequency) + d.Set("zone_name", *dns_domain_secondary.ZoneName) + d.Set("status_id", *dns_domain_secondary.StatusId) + d.Set("status_text", *dns_domain_secondary.StatusText) + + return nil +} + +func resourceIBMDNSSecondaryUpdate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + domainId, _ := strconv.Atoi(d.Id()) + hasChange := false + + opts := datatypes.Dns_Secondary{} + if d.HasChange("master_ip_address") { + opts.MasterIpAddress = sl.String(d.Get("master_ip_address").(string)) + hasChange = true + } + + if d.HasChange("transfer_frequency") { + opts.TransferFrequency = sl.Int(d.Get("transfer_frequency").(int)) + hasChange = true + } + + if hasChange { + service := services.GetDnsSecondaryService(sess) + _, err := service.Id(domainId).EditObject(&opts) + + if err != nil { + return fmt.Errorf("Error editing DNS secondary zone (%d): %s", domainId, err) + } + } + + return nil +} + +func resourceIBMDNSSecondaryDelete(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetDnsSecondaryService(sess) + + dnsId, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Error deleting Dns Secondary Zone: %s", err) + } + + log.Printf("[INFO] Deleting Dns Secondary Zone: %d", dnsId) + result, err := service.Id(dnsId).DeleteObject() + if err != nil { + return fmt.Errorf("Error deleting Dns Secondary Zone: %s", err) + } + + if !result { + return errors.New("Error deleting Dns Secondary Zone") + } + + d.SetId("") + return nil +} + +func resourceIBMDNSSecondaryExists(d *schema.ResourceData, meta interface{}) (bool, error) { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetDnsSecondaryService(sess) + + dnsId, err := strconv.Atoi(d.Id()) + if err != nil { + return false, fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + result, err := service.Id(dnsId).GetObject() + return err == nil && result.Id != nil && *result.Id == dnsId, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_enterprise.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_enterprise.go new file mode 100644 index 00000000000..0e35dad54c4 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_enterprise.go @@ -0,0 +1,231 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "context" + "fmt" + + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/IBM/platform-services-go-sdk/enterprisemanagementv1" +) + +func resourceIbmEnterprise() *schema.Resource { + return &schema.Resource{ + Create: resourceIbmEnterpriseCreate, + Read: resourceIbmEnterpriseRead, + Update: resourceIbmEnterpriseUpdate, + Delete: resourceIbmEnterpriseDelete, + Importer: &schema.ResourceImporter{}, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + Schema: map[string]*schema.Schema{ + "source_account_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "The ID of the account that is used to create the enterprise.", + ForceNew: true, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "The name of the enterprise. This field must have 3 - 60 characters.", + ValidateFunc: validateAllowedEnterpriseNameValue(), + }, + "primary_contact_iam_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "The IAM ID of the enterprise primary contact, such as `IBMid-0123ABC`. The IAM ID must already exist.", + }, + "domain": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "A domain or subdomain for the enterprise, such as `example.com` or `my.example.com`.", + }, + "url": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The URL of the enterprise.", + }, + "enterprise_account_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The enterprise account ID.", + }, + "crn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The Cloud Resource Name (CRN) of the enterprise.", + }, + "state": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The state of the enterprise.", + }, + "primary_contact_email": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The email of the primary contact of the enterprise.", + }, + "created_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The time stamp at which the enterprise was created.", + }, + "created_by": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The IAM ID of the user or service that created the enterprise.", + }, + "updated_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The time stamp at which the enterprise was last updated.", + }, + "updated_by": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The IAM ID of the user or service that updated the enterprise.", + }, + }, + } +} + +func resourceIbmEnterpriseCreate(d *schema.ResourceData, meta interface{}) error { + enterpriseManagementClient, err := meta.(ClientSession).EnterpriseManagementV1() + if err != nil { + return err + } + createEnterpriseOptions := &enterprisemanagementv1.CreateEnterpriseOptions{} + createEnterpriseOptions.SetSourceAccountID(d.Get("source_account_id").(string)) + createEnterpriseOptions.SetName(d.Get("name").(string)) + createEnterpriseOptions.SetPrimaryContactIamID(d.Get("primary_contact_iam_id").(string)) + if _, ok := d.GetOk("domain"); ok { + createEnterpriseOptions.SetDomain(d.Get("domain").(string)) + } + createEnterpriseResponse, response, err := enterpriseManagementClient.CreateEnterpriseWithContext(context.TODO(), createEnterpriseOptions) + if err != nil { + log.Printf("[DEBUG] CreateEnterpriseWithContext failed %s\n%s", err, response) + return err + } + d.SetId(*createEnterpriseResponse.EnterpriseID) + return resourceIbmEnterpriseRead(d, meta) +} + +func resourceIbmEnterpriseRead(d *schema.ResourceData, meta interface{}) error { + enterpriseManagementClient, err := meta.(ClientSession).EnterpriseManagementV1() + if err != nil { + return err + } + + getEnterpriseOptions := &enterprisemanagementv1.GetEnterpriseOptions{} + + getEnterpriseOptions.SetEnterpriseID(d.Id()) + + enterprise, response, err := enterpriseManagementClient.GetEnterpriseWithContext(context.TODO(), getEnterpriseOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + log.Printf("[DEBUG] GetEnterpriseWithContext failed %s\n%s", err, response) + return err + } + + //if err = d.Set("source_account_id", enterprise.); err != nil { + // return fmt.Errorf("Error setting source_account_id: %s", err) + //} + if err = d.Set("name", enterprise.Name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + if err = d.Set("primary_contact_iam_id", enterprise.PrimaryContactIamID); err != nil { + return fmt.Errorf("Error setting primary_contact_iam_id: %s", err) + } + if err = d.Set("domain", enterprise.Domain); err != nil { + return fmt.Errorf("Error setting domain: %s", err) + } + if err = d.Set("url", enterprise.URL); err != nil { + return fmt.Errorf("Error setting url: %s", err) + } + if err = d.Set("enterprise_account_id", enterprise.EnterpriseAccountID); err != nil { + return fmt.Errorf("Error setting enterprise_account_id: %s", err) + } + if err = d.Set("crn", enterprise.CRN); err != nil { + return fmt.Errorf("Error setting crn: %s", err) + } + if err = d.Set("state", enterprise.State); err != nil { + return fmt.Errorf("Error setting state: %s", err) + } + if err = d.Set("primary_contact_email", enterprise.PrimaryContactEmail); err != nil { + return fmt.Errorf("Error setting primary_contact_email: %s", err) + } + if err = d.Set("created_at", enterprise.CreatedAt.String()); err != nil { + return fmt.Errorf("Error setting created_at: %s", err) + } + if err = d.Set("created_by", enterprise.CreatedBy); err != nil { + return fmt.Errorf("Error setting created_by: %s", err) + } + if err = d.Set("updated_at", enterprise.UpdatedAt.String()); err != nil { + return fmt.Errorf("Error setting updated_at: %s", err) + } + if err = d.Set("updated_by", enterprise.UpdatedBy); err != nil { + return fmt.Errorf("Error setting updated_by: %s", err) + } + + return nil +} + +func resourceIbmEnterpriseUpdate(d *schema.ResourceData, meta interface{}) error { + enterpriseManagementClient, err := meta.(ClientSession).EnterpriseManagementV1() + if err != nil { + return err + } + + updateEnterpriseOptions := &enterprisemanagementv1.UpdateEnterpriseOptions{} + + updateEnterpriseOptions.SetEnterpriseID(d.Id()) + + hasChange := false + + //if d.HasChange("source_account_id") { + // + // updateEnterpriseOptions.SetSourceAccountID(d.Get("source_account_id").(string)) + // hasChange = true + //} + if d.HasChange("name") { + updateEnterpriseOptions.SetName(d.Get("name").(string)) + hasChange = true + } + if d.HasChange("primary_contact_iam_id") { + updateEnterpriseOptions.SetPrimaryContactIamID(d.Get("primary_contact_iam_id").(string)) + hasChange = true + } + if d.HasChange("domain") { + updateEnterpriseOptions.SetDomain(d.Get("domain").(string)) + hasChange = true + } + + if hasChange { + response, err := enterpriseManagementClient.UpdateEnterpriseWithContext(context.TODO(), updateEnterpriseOptions) + if err != nil { + log.Printf("[DEBUG] UpdateEnterpriseWithContext failed %s\n%s", err, response) + return err + } + } + + return resourceIbmEnterpriseRead(d, meta) +} + +func resourceIbmEnterpriseDelete(d *schema.ResourceData, meta interface{}) error { + d.SetId("") + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_enterprise_account.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_enterprise_account.go new file mode 100644 index 00000000000..45988c41b68 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_enterprise_account.go @@ -0,0 +1,305 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "context" + "errors" + "fmt" + + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/IBM/platform-services-go-sdk/enterprisemanagementv1" +) + +func resourceIbmEnterpriseAccount() *schema.Resource { + return &schema.Resource{ + Create: resourceIbmEnterpriseAccountCreate, + Read: resourceIbmEnterpriseAccountRead, + Update: resourceIbmEnterpriseAccountUpdate, + Delete: resourceIbmEnterpriseAccountDelete, + Importer: &schema.ResourceImporter{}, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(30 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + Schema: map[string]*schema.Schema{ + "parent": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "The CRN of the parent under which the account will be created. The parent can be an existing account group or the enterprise itself.", + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The name of the account. This field must have 3 - 60 characters.", + ForceNew: true, + ValidateFunc: validateAllowedEnterpriseNameValue(), + }, + "owner_iam_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The IAM ID of the account owner, such as `IBMid-0123ABC`. The IAM ID must already exist.", + ForceNew: true, + }, + "url": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The URL of the account.", + }, + "crn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The Cloud Resource Name (CRN) of the account.", + }, + "enterprise_account_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "The enterprise account ID.", + }, + "enterprise_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "The enterprise ID that the account is a part of.", + }, + "account_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "The source account id of account to be imported", + }, + "enterprise_path": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The path from the enterprise to this particular account.", + }, + "state": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The state of the account.", + }, + "paid": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "The type of account - whether it is free or paid.", + }, + "owner_email": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The email address of the owner of the account.", + }, + "is_enterprise_account": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "The flag to indicate whether the account is an enterprise account or not.", + }, + "created_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The time stamp at which the account was created.", + }, + "created_by": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The IAM ID of the user or service that created the account.", + }, + "updated_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The time stamp at which the account was last updated.", + }, + "updated_by": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The IAM ID of the user or service that updated the account.", + }, + }, + } +} + +func checkImportAccount(d *schema.ResourceData) bool { + _, validateEnterpriseAccountId := d.GetOk("account_id") + _, validateEnterpriseId := d.GetOk("enterprise_id") + if validateEnterpriseAccountId && validateEnterpriseId { + return true + } + return false +} + +func checkCreateAccount(d *schema.ResourceData) bool { + _, validateParent := d.GetOk("parent") + _, validateName := d.GetOk("name") + _, validateOwnerIamId := d.GetOk("owner_iam_id") + if validateParent && validateName && validateOwnerIamId { + return true + } + return false +} + +func resourceIbmEnterpriseAccountCreate(d *schema.ResourceData, meta interface{}) error { + enterpriseManagementClient, err := meta.(ClientSession).EnterpriseManagementV1() + if err != nil { + return err + } + + if checkImportAccount(d) { + importAccountToEnterpriseOptions := &enterprisemanagementv1.ImportAccountToEnterpriseOptions{} + importAccountToEnterpriseOptions.SetEnterpriseID(d.Get("enterprise_id").(string)) + importAccountToEnterpriseOptions.SetAccountID(d.Get("account_id").(string)) + response, err := enterpriseManagementClient.ImportAccountToEnterpriseWithContext(context.TODO(), importAccountToEnterpriseOptions) + if err != nil { + log.Printf("[DEBUG] ImportAccountToEnterpriseWithContext failed %s\n%s", err, response) + return err + } + d.SetId(d.Get("account_id").(string)) + } else if checkCreateAccount(d) { + createAccountOptions := &enterprisemanagementv1.CreateAccountOptions{} + createAccountOptions.SetParent(d.Get("parent").(string)) + createAccountOptions.SetName(d.Get("name").(string)) + createAccountOptions.SetOwnerIamID(d.Get("owner_iam_id").(string)) + createAccountResponse, response, err := enterpriseManagementClient.CreateAccountWithContext(context.TODO(), createAccountOptions) + if err != nil { + log.Printf("[DEBUG] CreateAccountWithContext failed %s\n%s", err, response) + return err + } + d.SetId(*createAccountResponse.AccountID) + } else { + + err := errors.New("Required Parameters are missing." + + "Please input parent,name,owner_iam_id for creating a new account in enterprise." + + "Input enterprise_id and enterprise_account_id for importing an existing account to enterprise.") + return err + } + return resourceIbmEnterpriseAccountRead(d, meta) +} + +func resourceIbmEnterpriseAccountRead(d *schema.ResourceData, meta interface{}) error { + enterpriseManagementClient, err := meta.(ClientSession).EnterpriseManagementV1() + if err != nil { + return err + } + + getAccountOptions := &enterprisemanagementv1.GetAccountOptions{} + + getAccountOptions.SetAccountID(d.Id()) + + account, response, err := enterpriseManagementClient.GetAccountWithContext(context.TODO(), getAccountOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + log.Printf("[DEBUG] GetAccountWithContext failed %s\n%s", err, response) + return err + } + + if err = d.Set("parent", account.Parent); err != nil { + return fmt.Errorf("Error setting parent: %s", err) + } + if err = d.Set("name", account.Name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + if err = d.Set("owner_iam_id", account.OwnerIamID); err != nil { + return fmt.Errorf("Error setting owner_iam_id: %s", err) + } + if err = d.Set("account_id", account.ID); err != nil { + return fmt.Errorf("Error setting account_id: %s", err) + } + if err = d.Set("url", account.URL); err != nil { + return fmt.Errorf("Error setting url: %s", err) + } + + if err = d.Set("crn", account.CRN); err != nil { + return fmt.Errorf("Error setting crn: %s", err) + } + if err = d.Set("enterprise_account_id", account.EnterpriseAccountID); err != nil { + return fmt.Errorf("Error setting enterprise_account_id: %s", err) + } + if err = d.Set("enterprise_id", account.EnterpriseID); err != nil { + return fmt.Errorf("Error setting enterprise_id: %s", err) + } + if err = d.Set("enterprise_path", account.EnterprisePath); err != nil { + return fmt.Errorf("Error setting enterprise_path: %s", err) + } + if err = d.Set("state", account.State); err != nil { + return fmt.Errorf("Error setting state: %s", err) + } + if err = d.Set("paid", account.Paid); err != nil { + return fmt.Errorf("Error setting paid: %s", err) + } + if err = d.Set("owner_email", account.OwnerEmail); err != nil { + return fmt.Errorf("Error setting owner_email: %s", err) + } + if err = d.Set("is_enterprise_account", account.IsEnterpriseAccount); err != nil { + return fmt.Errorf("Error setting is_enterprise_account: %s", err) + } + if err = d.Set("created_at", account.CreatedAt.String()); err != nil { + return fmt.Errorf("Error setting created_at: %s", err) + } + if err = d.Set("created_by", account.CreatedBy); err != nil { + return fmt.Errorf("Error setting created_by: %s", err) + } + if account.UpdatedAt != nil { + if err = d.Set("updated_at", account.UpdatedAt.String()); err != nil { + return fmt.Errorf("Error setting updated_at: %s", err) + } + } + if account.UpdatedBy != nil { + if err = d.Set("updated_by", account.UpdatedBy); err != nil { + return fmt.Errorf("Error setting updated_by: %s", err) + } + } + return nil +} + +func resourceIbmEnterpriseAccountUpdate(d *schema.ResourceData, meta interface{}) error { + enterpriseManagementClient, err := meta.(ClientSession).EnterpriseManagementV1() + if err != nil { + return err + } + + updateAccountOptions := &enterprisemanagementv1.UpdateAccountOptions{} + + updateAccountOptions.SetAccountID(d.Id()) + + hasChange := false + + if d.HasChange("parent") { + updateAccountOptions.SetParent(d.Get("parent").(string)) + hasChange = true + } + /** Removed as update call requires only parent **/ + //if d.HasChange("name") { + // + // updateAccountOptions.SetName(d.Get("name").(string)) + // hasChange = true + //} + //if d.HasChange("owner_iam_id") { + // updateAccountOptions.SetOwnerIamID(d.Get("owner_iam_id").(string)) + // hasChange = true + //} + + if hasChange { + response, err := enterpriseManagementClient.UpdateAccountWithContext(context.TODO(), updateAccountOptions) + if err != nil { + log.Printf("[DEBUG] UpdateAccountWithContext failed %s\n%s", err, response) + return err + } + } + + return resourceIbmEnterpriseAccountRead(d, meta) +} + +func resourceIbmEnterpriseAccountDelete(d *schema.ResourceData, meta interface{}) error { + + d.SetId("") + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_enterprise_account_group.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_enterprise_account_group.go new file mode 100644 index 00000000000..14c3027485c --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_enterprise_account_group.go @@ -0,0 +1,239 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 +package ibm + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/IBM/platform-services-go-sdk/enterprisemanagementv1" +) + +func resourceIbmEnterpriseAccountGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceIbmEnterpriseAccountGroupCreate, + Read: resourceIbmEnterpriseAccountGroupRead, + Update: resourceIbmEnterpriseAccountGroupUpdate, + Delete: resourceIbmEnterpriseAccountGroupDelete, + Importer: &schema.ResourceImporter{}, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(30 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + Schema: map[string]*schema.Schema{ + "parent": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "The CRN of the parent under which the account group will be created. The parent can be an existing account group or the enterprise itself.", + ForceNew: true, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "The name of the account group. This field must have 3 - 60 characters.", + ValidateFunc: validateAllowedEnterpriseNameValue(), + }, + "primary_contact_iam_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "The IAM ID of the primary contact for this account group, such as `IBMid-0123ABC`. The IAM ID must already exist.", + }, + "url": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The URL of the account group.", + }, + "crn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The Cloud Resource Name (CRN) of the account group.", + }, + "enterprise_account_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The enterprise account ID.", + }, + "enterprise_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The enterprise ID that the account group is a part of.", + }, + "enterprise_path": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The path from the enterprise to this particular account group.", + }, + "state": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The state of the account group.", + }, + "primary_contact_email": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The email address of the primary contact of the account group.", + }, + "created_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The time stamp at which the account group was created.", + }, + "created_by": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The IAM ID of the user or service that created the account group.", + }, + "updated_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The time stamp at which the account group was last updated.", + }, + "updated_by": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The IAM ID of the user or service that updated the account group.", + }, + }, + } +} + +func resourceIbmEnterpriseAccountGroupCreate(d *schema.ResourceData, meta interface{}) error { + enterpriseManagementClient, err := meta.(ClientSession).EnterpriseManagementV1() + if err != nil { + return err + } + + createAccountGroupOptions := &enterprisemanagementv1.CreateAccountGroupOptions{} + + createAccountGroupOptions.SetParent(d.Get("parent").(string)) + createAccountGroupOptions.SetName(d.Get("name").(string)) + createAccountGroupOptions.SetPrimaryContactIamID(d.Get("primary_contact_iam_id").(string)) + + createAccountGroupResponse, response, err := enterpriseManagementClient.CreateAccountGroupWithContext(context.TODO(), createAccountGroupOptions) + if err != nil { + log.Printf("[DEBUG] CreateAccountGroupWithContext failed %s\n%s", err, response) + return err + } + + d.SetId(*createAccountGroupResponse.AccountGroupID) + + return resourceIbmEnterpriseAccountGroupRead(d, meta) +} + +func resourceIbmEnterpriseAccountGroupRead(d *schema.ResourceData, meta interface{}) error { + enterpriseManagementClient, err := meta.(ClientSession).EnterpriseManagementV1() + if err != nil { + return err + } + + getAccountGroupOptions := &enterprisemanagementv1.GetAccountGroupOptions{} + + getAccountGroupOptions.SetAccountGroupID(d.Id()) + + accountGroup, response, err := enterpriseManagementClient.GetAccountGroupWithContext(context.TODO(), getAccountGroupOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + log.Printf("[DEBUG] GetAccountGroupWithContext failed %s\n%s", err, response) + return err + } + log.Printf("[DEBUG] GetAccountGroupWithContext testing %s", response) + if err = d.Set("parent", accountGroup.Parent); err != nil { + return fmt.Errorf("Error setting parent: %s", err) + } + if err = d.Set("name", accountGroup.Name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + if err = d.Set("primary_contact_iam_id", accountGroup.PrimaryContactIamID); err != nil { + return fmt.Errorf("Error setting primary_contact_iam_id: %s", err) + } + if err = d.Set("url", accountGroup.URL); err != nil { + return fmt.Errorf("Error setting url: %s", err) + } + if err = d.Set("crn", accountGroup.CRN); err != nil { + return fmt.Errorf("Error setting crn: %s", err) + } + if err = d.Set("enterprise_account_id", accountGroup.EnterpriseAccountID); err != nil { + return fmt.Errorf("Error setting enterprise_account_id: %s", err) + } + if err = d.Set("enterprise_id", accountGroup.EnterpriseID); err != nil { + return fmt.Errorf("Error setting enterprise_id: %s", err) + } + if err = d.Set("enterprise_path", accountGroup.EnterprisePath); err != nil { + return fmt.Errorf("Error setting enterprise_path: %s", err) + } + if err = d.Set("state", accountGroup.State); err != nil { + return fmt.Errorf("Error setting state: %s", err) + } + if err = d.Set("primary_contact_email", accountGroup.PrimaryContactEmail); err != nil { + return fmt.Errorf("Error setting primary_contact_email: %s", err) + } + if err = d.Set("created_at", accountGroup.CreatedAt.String()); err != nil { + return fmt.Errorf("Error setting created_at: %s", err) + } + if err = d.Set("created_by", accountGroup.CreatedBy); err != nil { + return fmt.Errorf("Error setting created_by: %s", err) + } + if accountGroup.UpdatedAt != nil { + if err = d.Set("updated_at", accountGroup.UpdatedAt.String()); err != nil { + return fmt.Errorf("Error setting updated_at: %s", err) + } + } + if accountGroup.UpdatedBy != nil { + if err = d.Set("updated_by", accountGroup.UpdatedBy); err != nil { + return fmt.Errorf("Error setting updated_by: %s", err) + } + } + + return nil +} + +func resourceIbmEnterpriseAccountGroupUpdate(d *schema.ResourceData, meta interface{}) error { + enterpriseManagementClient, err := meta.(ClientSession).EnterpriseManagementV1() + if err != nil { + return err + } + + updateAccountGroupOptions := &enterprisemanagementv1.UpdateAccountGroupOptions{} + + updateAccountGroupOptions.SetAccountGroupID(d.Id()) + + hasChange := false + + // if d.HasChange("parent") { + // updateAccountGroupOptions.SetParent(d.Get("parent").(string)) + // hasChange = true + // } + if d.HasChange("name") { + updateAccountGroupOptions.SetName(d.Get("name").(string)) + hasChange = true + } + if d.HasChange("primary_contact_iam_id") { + updateAccountGroupOptions.SetPrimaryContactIamID(d.Get("primary_contact_iam_id").(string)) + hasChange = true + } + + if hasChange { + response, err := enterpriseManagementClient.UpdateAccountGroupWithContext(context.TODO(), updateAccountGroupOptions) + if err != nil { + log.Printf("[DEBUG] UpdateAccountGroupWithContext failed %s\n%s", err, response) + return err + } + } + + return resourceIbmEnterpriseAccountGroupRead(d, meta) +} + +func resourceIbmEnterpriseAccountGroupDelete(d *schema.ResourceData, meta interface{}) error { + + d.SetId("") + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_event_streams_topic.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_event_streams_topic.go new file mode 100644 index 00000000000..17775199899 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_event_streams_topic.go @@ -0,0 +1,316 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "os" + "strings" + + "github.com/Shopify/sarama" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + defaultReplicationFactor = 3 + defaultCleanupPolicy = "delete" + defaultRetentionBytes = 1073741824 // 100 MB + defaultRetentionMs = 86400000 // 24 hours + defaultSegmentBytes = 536870912 // 512 MB +) + +var ( + brokerVersion = sarama.V2_3_0_0 + allowedTopicConfigs = []string{ + "cleanup.policy", + "retention.ms", + "retention.bytes", + "segment.ms", + "segment.bytes", + "segment.index.bytes", + } + defaultConfigs = map[string]interface{}{ + "cleanup.policy": defaultCleanupPolicy, + "retention.ms": defaultRetentionMs, + "retention.bytes": defaultRetentionBytes, + "segment.bytes": defaultSegmentBytes, + } +) + +func resourceIBMEventStreamsTopic() *schema.Resource { + return &schema.Resource{ + Exists: resourceIBMEventStreamsTopicExists, + Create: resourceIBMEventStreamsTopicCreate, + Read: resourceIBMEventStreamsTopicRead, + Update: resourceIBMEventStreamsTopicUpdate, + Delete: resourceIBMEventStreamsTopicDelete, + Importer: &schema.ResourceImporter{}, + Schema: map[string]*schema.Schema{ + "resource_instance_id": &schema.Schema{ + Type: schema.TypeString, + Description: "The CRN of the Event Streams instance", + Required: true, + }, + "kafka_http_url": { + Type: schema.TypeString, + Computed: true, + Description: "API endpoint for interacting with Event Streams REST API", + }, + "kafka_brokers_sasl": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "Kafka brokers addresses for interacting with Kafka native API", + }, + "name": { + Type: schema.TypeString, + Description: "The name of the topic", + Required: true, + }, + "partitions": { + Type: schema.TypeInt, + Description: "The number of partitions", + Optional: true, + Default: 1, + }, + "config": { + Type: schema.TypeMap, + Description: "The configuration parameters of a topic", + Optional: true, + }, + }, + } +} + +// clientPool maintains Kafka admin client for each instance. +// key is instance's CRN +var clientPool = map[string]sarama.ClusterAdmin{} + +func resourceIBMEventStreamsTopicExists(d *schema.ResourceData, meta interface{}) (bool, error) { + adminClient, _, err := createSaramaAdminClient(d, meta) + if err != nil { + log.Printf("[DEBUG] resourceIBMEventStreamsTopicExists createSaramaAdminClient err %s", err) + return false, err + } + topicName := d.Get("name").(string) + topics, err := adminClient.DescribeTopics([]string{topicName}) + if err != nil || len(topics) != 1 { + log.Printf("[DEBUG] resourceIBMEventStreamsTopicExists DescribeTopics err %s", err) + return false, err + } + log.Printf("[INFO] resourceIBMEventStreamsTopicExists topic %s exists", topicName) + return true, nil +} + +func resourceIBMEventStreamsTopicCreate(d *schema.ResourceData, meta interface{}) error { + adminClient, instanceCRN, err := createSaramaAdminClient(d, meta) + if err != nil { + log.Printf("[DEBUG] resourceIBMEventStreamsTopicCreate createSaramaAdminClient err %s", err) + return err + } + topicName := d.Get("name").(string) + partitions := d.Get("partitions").(int) + config := d.Get("config").(map[string]interface{}) + topicDetail := sarama.TopicDetail{ + NumPartitions: int32(partitions), + ReplicationFactor: int16(defaultReplicationFactor), + ConfigEntries: config2TopicDetail(config), + } + err = adminClient.CreateTopic(topicName, &topicDetail, false) + if err != nil { + log.Printf("[DEBUG] resourceIBMEventStreamsTopicCreate CreateTopic err %s", err) + } + log.Printf("[INFO] resourceIBMEventStreamsTopicCreate CreateTopic: topic is %s, detail is %v", topicName, topicDetail) + d.SetId(getTopicID(instanceCRN, topicName)) + return resourceIBMEventStreamsTopicRead(d, meta) +} + +func resourceIBMEventStreamsTopicRead(d *schema.ResourceData, meta interface{}) error { + adminClient, instanceCRN, err := createSaramaAdminClient(d, meta) + if err != nil { + log.Printf("[DEBUG] resourceIBMEventStreamsTopicRead createSaramaAdminClient err %s", err) + return err + } + topicID := d.Id() + topicName := getTopicName(topicID) + topics, err := adminClient.ListTopics() + if err != nil { + log.Printf("[DEBUG] resourceIBMEventStreamsTopicRead ListTopics err %s", err) + return err + } + for name, detail := range topics { + if name == topicName { + d.Set("resource_instance_id", instanceCRN) + d.Set("name", name) + d.Set("partitions", detail.NumPartitions) + if config := d.Get("config"); config != nil { + savedConfig := map[string]*string{} + for k := range config.(map[string]interface{}) { + if value, ok := detail.ConfigEntries[k]; ok { + savedConfig[k] = value + } + } + d.Set("config", topicDetail2Config(savedConfig)) + } + return nil + } + } + log.Printf("[INFO] resourceIBMEventStreamsTopicRead topic %s does not exist", topicName) + d.SetId("") + return nil +} + +func resourceIBMEventStreamsTopicUpdate(d *schema.ResourceData, meta interface{}) error { + adminClient, _, err := createSaramaAdminClient(d, meta) + if err != nil { + log.Printf("[DEBUG] resourceIBMEventStreamsTopicUpdate createSaramaAdminClient err %s", err) + return err + } + topicName := d.Get("name").(string) + if d.HasChange("partitions") { + oi, ni := d.GetChange("partitions") + oldPartitions := oi.(int) + newPartitions := ni.(int) + log.Printf("[INFO]resourceIBMEventStreamsTopicUpdate Updating partitions from %d to %d", oldPartitions, newPartitions) + err = adminClient.CreatePartitions(topicName, int32(newPartitions), nil, false) + if err != nil { + log.Printf("[DEBUG]resourceIBMEventStreamsTopicUpdate CreatePartitions err %s", err) + return err + } + d.Set("partitions", int32(newPartitions)) + log.Printf("[INFO]resourceIBMEventStreamsTopicUpdate partitions is set to %d", newPartitions) + } + if d.HasChange("config") { + config := d.Get("config").(map[string]interface{}) + configEntries := config2TopicDetail(config) + err = adminClient.AlterConfig(sarama.TopicResource, topicName, configEntries, false) + if err != nil { + log.Printf("[DEBUG]resourceIBMEventStreamsTopicUpdate AlterConfig err %s", err) + return err + } + d.Set("config", topicDetail2Config(configEntries)) + log.Printf("[INFO]resourceIBMEventStreamsTopicUpdate config is set to %v", topicDetail2Config(configEntries)) + } + return resourceIBMEventStreamsTopicRead(d, meta) +} + +func resourceIBMEventStreamsTopicDelete(d *schema.ResourceData, meta interface{}) error { + adminClient, _, err := createSaramaAdminClient(d, meta) + if err != nil { + log.Printf("[DEBUG] resourceIBMEventStreamsTopicDelete createSaramaAdminClient err %s", err) + return err + } + topicName := d.Get("name").(string) + err = adminClient.DeleteTopic(topicName) + if err != nil { + log.Printf("[DEBUG] resourceIBMEventStreamsTopicDelete DeleteTopic err %s", err) + return err + } + d.SetId("") + log.Printf("[INFO]resourceIBMEventStreamsTopicDelete topic %s deleted", topicName) + return nil +} + +func createSaramaAdminClient(d *schema.ResourceData, meta interface{}) (sarama.ClusterAdmin, string, error) { + bxSession, err := meta.(ClientSession).BluemixSession() + if err != nil { + log.Printf("[DEBUG] createSaramaAdminClient BluemixSession err %s", err) + return nil, "", err + } + apiKey := bxSession.Config.BluemixAPIKey + if len(apiKey) == 0 { + log.Printf("[DEBUG] createSaramaAdminClient BluemixAPIKey is empty") + return nil, "", fmt.Errorf("failed to get IBM cloud API key") + } + rsConClient, err := meta.(ClientSession).ResourceControllerAPI() + if err != nil { + log.Printf("[DEBUG] createSaramaAdminClient ResourceControllerAPI err %s", err) + return nil, "", err + } + rcAPI := rsConClient.ResourceServiceInstance() + instanceCRN := d.Get("resource_instance_id").(string) + if len(instanceCRN) == 0 { + topicID := d.Id() + if len(topicID) == 0 || !strings.Contains(topicID, ":") { + log.Printf("[DEBUG] createSaramaAdminClient resource_instance_id is missing") + return nil, "", fmt.Errorf("resource_instance_id is required") + } + instanceCRN = getInstanceCRN(topicID) + } + instance, err := rcAPI.GetInstance(instanceCRN) + if err != nil { + log.Printf("[DEBUG] createSaramaAdminClient GetInstance err %s", err) + return nil, "", err + } + if instance.Extensions == nil { + log.Printf("[DEBUG] createSaramaAdminClient instance %s extension is nil", instance.ID) + return nil, "", fmt.Errorf("instance %s extension is nil", instance.ID) + } + adminURL := instance.Extensions["kafka_http_url"].(string) + d.Set("kafka_http_url", adminURL) + log.Printf("[INFO] createSaramaAdminClient kafka_http_url is set to %s", adminURL) + brokerAddress := expandStringList(instance.Extensions["kafka_brokers_sasl"].([]interface{})) + d.Set("kafka_brokers_sasl", brokerAddress) + log.Printf("[INFO] createSaramaAdminClient kafka_brokers_sasl is set to %s", brokerAddress) + tenantID := strings.TrimPrefix(strings.Split(adminURL, ".")[0], "https://") + + config := sarama.NewConfig() + config.ClientID, _ = os.Hostname() + config.Net.SASL.Enable = true + if tenantID != "" && tenantID != "admin" { + config.Net.SASL.AuthIdentity = tenantID + } + config.Net.SASL.User = "token" + config.Net.SASL.Password = apiKey + config.Net.TLS.Enable = true + config.Version = brokerVersion + adminClient, err := sarama.NewClusterAdmin(brokerAddress, config) + if err != nil { + log.Printf("[DEBUG] createSaramaAdminClient NewClusterAdmin err %s", err) + return nil, "", err + } + clientPool[instanceCRN] = adminClient + log.Printf("[INFO] createSaramaAdminClient instance %s 's client is initialized", instanceCRN) + return adminClient, instanceCRN, nil +} + +func topicDetail2Config(topicConfigEntries map[string]*string) map[string]*string { + configs := map[string]*string{} + for key, value := range topicConfigEntries { + if indexOf(key, allowedTopicConfigs) != -1 { + configs[key] = value + } + } + return configs +} + +func config2TopicDetail(config map[string]interface{}) map[string]*string { + configEntries := make(map[string]*string) + for key, value := range config { + switch value := value.(type) { + case string: + configEntries[key] = &value + } + } + return configEntries +} + +func getTopicID(instanceCRN string, topicName string) string { + crnSegments := strings.Split(instanceCRN, ":") + crnSegments[8] = "topic" + crnSegments[9] = topicName + return strings.Join(crnSegments, ":") +} + +func getTopicName(topicID string) string { + return strings.Split(topicID, ":")[9] +} + +func getInstanceCRN(topicID string) string { + crnSegments := strings.Split(topicID, ":") + crnSegments[8] = "" + crnSegments[9] = "" + return strings.Join(crnSegments, ":") +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_firewall.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_firewall.go new file mode 100644 index 00000000000..ad56ba59c5d --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_firewall.go @@ -0,0 +1,384 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "strconv" + + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/filter" + "github.com/softlayer/softlayer-go/helpers/product" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +const ( + FwHardwareDedicatedPackageType = "ADDITIONAL_SERVICES_FIREWALL" + + vlanMask = "firewallNetworkComponents,networkVlanFirewall.billingItem.orderItem.order.id,dedicatedFirewallFlag" + + ",firewallGuestNetworkComponents,firewallInterfaces,firewallRules,highAvailabilityFirewallFlag" + fwMask = "id,datacenter,primaryIpAddress,networkVlan.highAvailabilityFirewallFlag,managementCredentials,tagReferences[id,tag[name]]" + multiVlanMask = "id,name,networkFirewall[id,customerManagedFlag,datacenter.name,billingItem[orderItem.order.id,activeChildren[categoryCode, description,id]],managementCredentials,firewallType],publicIpAddress.ipAddress,publicIpv6Address.ipAddress,publicVlan[id,primaryRouter.hostname],privateVlan[id,primaryRouter.hostname],privateIpAddress.ipAddress,insideVlans[id],memberCount,status.keyName" +) + +func resourceIBMFirewall() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMFirewallCreate, + Read: resourceIBMFirewallRead, + Update: resourceIBMFirewallUpdate, + Delete: resourceIBMFirewallDelete, + Exists: resourceIBMFirewallExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "firewall_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "HARDWARE_FIREWALL_DEDICATED", + ValidateFunc: validateAllowedStringValue([]string{ + "HARDWARE_FIREWALL_DEDICATED", + "FORTIGATE_SECURITY_APPLIANCE", + }), + Description: "Firewall type", + }, + + "ha_enabled": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Default: false, + Description: "set to true if High availability is enabled", + }, + "public_vlan_id": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: "Public VLAN ID", + }, + "tags": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: "List of tags for the firewall", + }, + "location": { + Type: schema.TypeString, + Computed: true, + Description: "Location info", + }, + "primary_ip": { + Type: schema.TypeString, + Computed: true, + Description: "Primary IP address", + }, + "username": { + Type: schema.TypeString, + Computed: true, + Description: "User name", + }, + "password": { + Type: schema.TypeString, + Computed: true, + Description: "Password for the given User", + }, + }, + } +} + +func resourceIBMFirewallCreate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + + keyName := "HARDWARE_FIREWALL_DEDICATED" + firewallType := d.Get("firewall_type").(string) + haEnabled := d.Get("ha_enabled").(bool) + if haEnabled { + if firewallType == "HARDWARE_FIREWALL_DEDICATED" { + keyName = "HARDWARE_FIREWALL_HIGH_AVAILABILITY" + } else { + keyName = "FORTIGATE_SECURITY_APPLIANCE_HIGH_AVAILABILITY" + } + } else { + keyName = firewallType + } + + publicVlanId := d.Get("public_vlan_id").(int) + + pkg, err := product.GetPackageByType(sess, FwHardwareDedicatedPackageType) + if err != nil { + return err + } + + // Get all prices for ADDITIONAL_SERVICES_FIREWALL with the given capacity + productItems, err := product.GetPackageProducts(sess, *pkg.Id) + if err != nil { + return err + } + + // Select only those product items with a matching keyname + targetItems := []datatypes.Product_Item{} + for _, item := range productItems { + if *item.KeyName == keyName { + targetItems = append(targetItems, item) + } + } + + if len(targetItems) == 0 { + return fmt.Errorf("No product items matching %s could be found", keyName) + } + + productOrderContainer := datatypes.Container_Product_Order_Network_Protection_Firewall_Dedicated{ + Container_Product_Order: datatypes.Container_Product_Order{ + PackageId: pkg.Id, + Prices: []datatypes.Product_Item_Price{ + { + Id: targetItems[0].Prices[0].Id, + }, + }, + Quantity: sl.Int(1), + }, + VlanId: sl.Int(publicVlanId), + } + + log.Println("[INFO] Creating dedicated hardware firewall") + + receipt, err := services.GetProductOrderService(sess.SetRetries(0)). + PlaceOrder(&productOrderContainer, sl.Bool(false)) + if err != nil { + return fmt.Errorf("Error during creation of dedicated hardware firewall: %s", err) + } + vlan, _, _, err := findDedicatedFirewallByOrderId(sess, *receipt.OrderId, d) + if err != nil { + return fmt.Errorf("Error during creation of dedicated hardware firewall: %s", err) + } + + id := *vlan.NetworkVlanFirewall.Id + d.SetId(fmt.Sprintf("%d", id)) + d.Set("ha_enabled", *vlan.HighAvailabilityFirewallFlag) + d.Set("public_vlan_id", *vlan.Id) + + log.Printf("[INFO] Firewall ID: %s", d.Id()) + + // Set tags + tags := getTags(d) + if tags != "" { + //Try setting only when it is non empty as we are creating Firewall + err = setFirewallTags(id, tags, meta) + if err != nil { + return err + } + } + + return resourceIBMFirewallRead(d, meta) +} + +func resourceIBMFirewallRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + + fwID, _ := strconv.Atoi(d.Id()) + + fw, err := services.GetNetworkVlanFirewallService(sess). + Id(fwID). + Mask(fwMask). + GetObject() + + if err != nil { + return fmt.Errorf("Error retrieving firewall information: %s", err) + } + + d.Set("public_vlan_id", *fw.NetworkVlan.Id) + d.Set("ha_enabled", *fw.NetworkVlan.HighAvailabilityFirewallFlag) + d.Set("location", *fw.Datacenter.Name) + d.Set("primary_ip", *fw.PrimaryIpAddress) + if fw.ManagementCredentials != nil { + d.Set("username", *fw.ManagementCredentials.Username) + d.Set("password", *fw.ManagementCredentials.Password) + } + + tagRefs := fw.TagReferences + tagRefsLen := len(tagRefs) + if tagRefsLen > 0 { + tags := make([]string, tagRefsLen, tagRefsLen) + for i, tagRef := range tagRefs { + tags[i] = *tagRef.Tag.Name + } + d.Set("tags", tags) + } + + return nil +} + +func resourceIBMFirewallUpdate(d *schema.ResourceData, meta interface{}) error { + + fwID, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Not a valid firewall ID, must be an integer: %s", err) + } + + // Update tags + if d.HasChange("tags") { + tags := getTags(d) + err := setFirewallTags(fwID, tags, meta) + if err != nil { + return err + } + } + return resourceIBMFirewallRead(d, meta) +} + +func resourceIBMFirewallDelete(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + fwService := services.GetNetworkVlanFirewallService(sess) + + fwID, _ := strconv.Atoi(d.Id()) + + // Get billing item associated with the firewall + billingItem, err := fwService.Id(fwID).GetBillingItem() + + if err != nil { + return fmt.Errorf("Error while looking up billing item associated with the firewall: %s", err) + } + + if billingItem.Id == nil { + return fmt.Errorf("Error while looking up billing item associated with the firewall: No billing item for ID:%d", fwID) + } + + success, err := services.GetBillingItemService(sess).Id(*billingItem.Id).CancelService() + if err != nil { + return err + } + + if !success { + return fmt.Errorf("SoftLayer reported an unsuccessful cancellation") + } + + return nil +} + +func resourceIBMFirewallExists(d *schema.ResourceData, meta interface{}) (bool, error) { + sess := meta.(ClientSession).SoftLayerSession() + + fwID, err := strconv.Atoi(d.Id()) + if err != nil { + return false, fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + _, err = services.GetNetworkVlanFirewallService(sess). + Id(fwID). + GetObject() + + if err != nil { + if apiErr, ok := err.(sl.Error); ok && apiErr.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error retrieving firewall information: %s", err) + } + + return true, nil +} + +func findDedicatedFirewallByOrderId(sess *session.Session, orderId int, d *schema.ResourceData) (datatypes.Network_Vlan, datatypes.Network_Gateway, datatypes.Product_Upgrade_Request, error) { + filterPath := "networkVlans.networkVlanFirewall.billingItem.orderItem.order.id" + multivlanfilterpath := "networkGateways.networkFirewall.billingItem.orderItem.order.id" + var vlans []datatypes.Network_Vlan + var err error + var firewalls []datatypes.Network_Gateway + var upgraderequest datatypes.Product_Upgrade_Request + stateConf := &resource.StateChangeConf{ + Pending: []string{"pending"}, + Target: []string{"complete"}, + Refresh: func() (interface{}, string, error) { + + if ok := d.HasChange("addon_configuration") && !d.IsNewResource(); ok { + fwID, _ := strconv.Atoi(d.Id()) + upgraderequest, err = services.GetNetworkVlanFirewallService(sess). + Id(fwID). + Mask("status"). + GetUpgradeRequest() + if err != nil { + return datatypes.Product_Upgrade_Request{}, "", err + } + } else if _, ok := d.GetOk("pod"); ok { + firewalls, err = services.GetAccountService(sess). + Filter(filter.Build( + filter.Path(multivlanfilterpath). + Eq(strconv.Itoa(orderId)))). + Mask(multiVlanMask). + GetNetworkGateways() + if err != nil { + return datatypes.Network_Gateway{}, "", err + } + } else { + vlans, err = services.GetAccountService(sess). + Filter(filter.Build( + filter.Path(filterPath). + Eq(strconv.Itoa(orderId)))). + Mask(vlanMask). + GetNetworkVlans() + if err != nil { + return datatypes.Network_Vlan{}, "", err + } + } + status, ok := sl.GrabOk(upgraderequest, "Status.Name") + if ok && status == "Complete" { + return upgraderequest, "complete", nil + } else if len(vlans) == 1 { + return vlans[0], "complete", nil + } else if len(firewalls) == 1 { + return firewalls[0], "complete", nil + } else if len(vlans) == 0 || len(firewalls) == 0 || *upgraderequest.Status.Name != "Complete" { + return datatypes.Network_Vlan{}, "pending", nil + } + return nil, "", fmt.Errorf("Expected one dedicated firewall: %s", err) + }, + Timeout: 2 * time.Hour, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + NotFoundChecks: 24 * 60, + } + + pendingResult, err := stateConf.WaitForState() + + if err != nil { + return datatypes.Network_Vlan{}, datatypes.Network_Gateway{}, datatypes.Product_Upgrade_Request{}, err + } + if ok := d.HasChange("addon_configuration") && !d.IsNewResource(); ok { + if result, ok := pendingResult.(datatypes.Product_Upgrade_Request); ok { + return datatypes.Network_Vlan{}, datatypes.Network_Gateway{}, result, nil + } + return datatypes.Network_Vlan{}, datatypes.Network_Gateway{}, datatypes.Product_Upgrade_Request{}, + fmt.Errorf("Something went wrong while upgrading '%d'", orderId) + } else if _, ok := d.GetOk("pod"); ok { + if result, ok := pendingResult.(datatypes.Network_Gateway); ok { + return datatypes.Network_Vlan{}, result, datatypes.Product_Upgrade_Request{}, nil + } + return datatypes.Network_Vlan{}, datatypes.Network_Gateway{}, datatypes.Product_Upgrade_Request{}, + fmt.Errorf("Cannot find Dedicated Firewall with order id '%d'", orderId) + } + var result, ok = pendingResult.(datatypes.Network_Vlan) + + if ok { + return result, datatypes.Network_Gateway{}, datatypes.Product_Upgrade_Request{}, nil + } + + return datatypes.Network_Vlan{}, datatypes.Network_Gateway{}, datatypes.Product_Upgrade_Request{}, + fmt.Errorf("Cannot find Dedicated Firewall with order id '%d'", orderId) +} + +func setFirewallTags(id int, tags string, meta interface{}) error { + service := services.GetNetworkVlanFirewallService(meta.(ClientSession).SoftLayerSession()) + _, err := service.Id(id).SetTags(sl.String(tags)) + if err != nil { + return fmt.Errorf("Could not set tags on firewall %d", id) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_firewall_policy.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_firewall_policy.go new file mode 100644 index 00000000000..7521db04017 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_firewall_policy.go @@ -0,0 +1,353 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "net" + "strconv" + "strings" + + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +const ( + aclMask = "name,firewallInterfaces[name,firewallContextAccessControlLists]" +) + +func resourceIBMFirewallPolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMFirewallPolicyCreate, + Read: resourceIBMFirewallPolicyRead, + Update: resourceIBMFirewallPolicyUpdate, + Delete: resourceIBMFirewallPolicyDelete, + Exists: resourceIBMFirewallPolicyExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "firewall_id": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: "Firewall ID", + }, + + "rules": { + Type: schema.TypeList, + Required: true, + Description: "Policy rules info", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "action": { + Type: schema.TypeString, + Required: true, + }, + "src_ip_address": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: func(k, o, n string, d *schema.ResourceData) bool { + newSrcIpAddress := net.ParseIP(n) + return newSrcIpAddress != nil && (newSrcIpAddress.String() == net.ParseIP(o).String()) + }, + }, + "src_ip_cidr": { + Type: schema.TypeInt, + Required: true, + }, + "dst_ip_address": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: func(k, o, n string, d *schema.ResourceData) bool { + newDstIpAddress := net.ParseIP(n) + return newDstIpAddress != nil && (newDstIpAddress.String() == net.ParseIP(o).String()) + }, + }, + "dst_ip_cidr": { + Type: schema.TypeInt, + Required: true, + }, + // ICMP, GRE, AH, and ESP don't require port ranges. + "dst_port_range_start": { + Type: schema.TypeInt, + Optional: true, + }, + "dst_port_range_end": { + Type: schema.TypeInt, + Optional: true, + }, + "protocol": { + Type: schema.TypeString, + Required: true, + }, + "notes": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + + "tags": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: "List of tags", + }, + }, + } +} + +func prepareRules(d *schema.ResourceData) []datatypes.Network_Firewall_Update_Request_Rule { + ruleList := d.Get("rules").([]interface{}) + rules := make([]datatypes.Network_Firewall_Update_Request_Rule, 0) + for i, ruleItem := range ruleList { + ruleMap := ruleItem.(map[string]interface{}) + var rule datatypes.Network_Firewall_Update_Request_Rule + rule.OrderValue = sl.Int(i + 1) + rule.Action = sl.String(ruleMap["action"].(string)) + rule.SourceIpAddress = sl.String(ruleMap["src_ip_address"].(string)) + rule.SourceIpCidr = sl.Int(ruleMap["src_ip_cidr"].(int)) + rule.DestinationIpAddress = sl.String(ruleMap["dst_ip_address"].(string)) + rule.DestinationIpCidr = sl.Int(ruleMap["dst_ip_cidr"].(int)) + + if ruleMap["dst_port_range_start"] != nil { + rule.DestinationPortRangeStart = sl.Int(ruleMap["dst_port_range_start"].(int)) + } + if ruleMap["dst_port_range_end"] != nil { + rule.DestinationPortRangeEnd = sl.Int(ruleMap["dst_port_range_end"].(int)) + } + + rule.Protocol = sl.String(ruleMap["protocol"].(string)) + if len(ruleMap["notes"].(string)) > 0 { + rule.Notes = sl.String(ruleMap["notes"].(string)) + } + + if strings.Contains(*rule.SourceIpAddress, ":") || strings.Contains(*rule.DestinationIpAddress, ":") { + rule.Version = sl.Int(6) + } + rules = append(rules, rule) + } + return rules +} + +func getFirewallContextAccessControlListId(fwId int, sess *session.Session) (int, error) { + service := services.GetNetworkVlanFirewallService(sess) + vlan, err := service.Id(fwId).Mask(aclMask).GetNetworkVlans() + + if err != nil { + return 0, err + } + + for _, fwInterface := range vlan[0].FirewallInterfaces { + if fwInterface.Name != nil && + *fwInterface.Name == "outside" && + len(fwInterface.FirewallContextAccessControlLists) > 0 && + fwInterface.FirewallContextAccessControlLists[0].Id != nil { + return *fwInterface.FirewallContextAccessControlLists[0].Id, nil + } + } + return 0, fmt.Errorf("No firewallContextAccessControlListId.") +} + +func resourceIBMFirewallPolicyCreate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + + fwId := d.Get("firewall_id").(int) + rules := prepareRules(d) + + fwContextACLId, err := getFirewallContextAccessControlListId(fwId, sess) + if err != nil { + return fmt.Errorf("Error during creation of dedicated hardware firewall rules: %s", err) + } + + ruleTemplate := datatypes.Network_Firewall_Update_Request{ + FirewallContextAccessControlListId: sl.Int(fwContextACLId), + Rules: rules, + } + + log.Println("[INFO] Creating dedicated hardware firewall rules") + + _, err = services.GetNetworkFirewallUpdateRequestService(sess.SetRetries(0)).CreateObject(&ruleTemplate) + if err != nil { + return fmt.Errorf("Error during creation of dedicated hardware firewall rules: %s", err) + } + + d.SetId(strconv.Itoa(fwId)) + + log.Printf("[INFO] Firewall rules ID: %s", d.Id()) + log.Printf("[INFO] Wait one minute for applying the rules.") + time.Sleep(time.Minute) + + return resourceIBMFirewallPolicyRead(d, meta) +} + +func resourceIBMFirewallPolicyRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + + fwRulesID, _ := strconv.Atoi(d.Id()) + + fw, err := services.GetNetworkVlanFirewallService(sess). + Id(fwRulesID). + Mask("rules"). + GetObject() + + if err != nil { + return fmt.Errorf("Error retrieving firewall rules: %s", err) + } + + rules := make([]map[string]interface{}, 0, len(fw.Rules)) + for _, rule := range fw.Rules { + r := make(map[string]interface{}) + r["action"] = *rule.Action + r["src_ip_address"] = *rule.SourceIpAddress + r["src_ip_cidr"] = *rule.SourceIpCidr + r["dst_ip_address"] = *rule.DestinationIpAddress + r["dst_ip_cidr"] = *rule.DestinationIpCidr + if rule.DestinationPortRangeStart != nil { + r["dst_port_range_start"] = *rule.DestinationPortRangeStart + } + if rule.DestinationPortRangeEnd != nil { + r["dst_port_range_end"] = *rule.DestinationPortRangeEnd + } + r["protocol"] = *rule.Protocol + //Check if notes is not nil + if rule.Notes != nil { + r["notes"] = *rule.Notes + } + rules = append(rules, r) + } + + d.Set("firewall_id", fwRulesID) + d.Set("rules", rules) + + return nil +} + +func appendAnyOpenRule(rules []datatypes.Network_Firewall_Update_Request_Rule, protocol string) []datatypes.Network_Firewall_Update_Request_Rule { + ruleAnyOpen := datatypes.Network_Firewall_Update_Request_Rule{ + OrderValue: sl.Int(len(rules) + 1), + Action: sl.String("permit"), + SourceIpAddress: sl.String("any"), + DestinationIpAddress: sl.String("any"), + DestinationPortRangeStart: sl.Int(1), + DestinationPortRangeEnd: sl.Int(65535), + Protocol: sl.String(protocol), + Notes: sl.String("terraform-default-anyopen-" + protocol), + } + ruleAnyOpenIpv6 := datatypes.Network_Firewall_Update_Request_Rule{ + OrderValue: sl.Int(len(rules) + 1), + Action: sl.String("permit"), + SourceIpAddress: sl.String("any"), + DestinationIpAddress: sl.String("any"), + DestinationPortRangeStart: sl.Int(1), + DestinationPortRangeEnd: sl.Int(65535), + Protocol: sl.String(protocol), + Notes: sl.String("terraform-default-anyopen-" + protocol + "-ipv6"), + Version: sl.Int(6), + } + + return append(rules, ruleAnyOpen, ruleAnyOpenIpv6) +} + +func resourceIBMFirewallPolicyUpdate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + + fwId, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Not a valid firewall ID, must be an integer: %s", err) + } + rules := prepareRules(d) + + fwContextACLId, err := getFirewallContextAccessControlListId(fwId, sess) + if err != nil { + return fmt.Errorf("Error during updating of dedicated hardware firewall rules: %s", err) + } + + ruleTemplate := datatypes.Network_Firewall_Update_Request{ + FirewallContextAccessControlListId: sl.Int(fwContextACLId), + Rules: rules, + } + + log.Println("[INFO] Updating dedicated hardware firewall rules") + + _, err = services.GetNetworkFirewallUpdateRequestService(sess.SetRetries(0)).CreateObject(&ruleTemplate) + if err != nil { + return fmt.Errorf("Error during updating of dedicated hardware firewall rules: %s", err) + } + time.Sleep(time.Minute) + + return resourceIBMFirewallPolicyRead(d, meta) +} + +func resourceIBMFirewallPolicyDelete(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + fwId, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Not a valid firewall ID, must be an integer: %s", err) + } + + fwContextACLId, err := getFirewallContextAccessControlListId(fwId, sess) + if err != nil { + return fmt.Errorf("Error during deleting of dedicated hardware firewall rules: %s", err) + } + + ruleTemplate := datatypes.Network_Firewall_Update_Request{ + FirewallContextAccessControlListId: sl.Int(fwContextACLId), + } + + ruleTemplate.Rules = appendAnyOpenRule(ruleTemplate.Rules, "tcp") + ruleTemplate.Rules = appendAnyOpenRule(ruleTemplate.Rules, "udp") + ruleTemplate.Rules = appendAnyOpenRule(ruleTemplate.Rules, "icmp") + ruleTemplate.Rules = appendAnyOpenRule(ruleTemplate.Rules, "gre") + ruleTemplate.Rules = appendAnyOpenRule(ruleTemplate.Rules, "pptp") + ruleTemplate.Rules = appendAnyOpenRule(ruleTemplate.Rules, "ah") + ruleTemplate.Rules = appendAnyOpenRule(ruleTemplate.Rules, "esp") + + log.Println("[INFO] Deleting dedicated hardware firewall rules") + + _, err = services.GetNetworkFirewallUpdateRequestService(sess.SetRetries(0)).CreateObject(&ruleTemplate) + if err != nil { + return fmt.Errorf("Error during deleting of dedicated hardware firewall rules: %s", err) + } + time.Sleep(time.Minute) + + return nil +} + +func resourceIBMFirewallPolicyExists(d *schema.ResourceData, meta interface{}) (bool, error) { + sess := meta.(ClientSession).SoftLayerSession() + + fwRulesID, err := strconv.Atoi(d.Id()) + if err != nil { + return false, fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + fw, err := services.GetNetworkVlanFirewallService(sess). + Id(fwRulesID). + Mask("rules"). + GetObject() + + if err != nil { + if apiErr, ok := err.(sl.Error); ok { + if apiErr.StatusCode == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error retrieving firewall rules: %s", err) + } + + if len(fw.Rules) == 0 { + return false, nil + } + + return true, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_firewall_shared.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_firewall_shared.go new file mode 100644 index 00000000000..526a03cb006 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_firewall_shared.go @@ -0,0 +1,286 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "strconv" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/helpers/product" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/sl" +) + +const ( + FwHardwarePackageType = "ADDITIONAL_SERVICES_FIREWALL" +) + +func resourceIBMFirewallShared() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMFirewallSharedCreate, + Read: resourceIBMFirewallSharedRead, + Delete: resourceIBMFirewallSharedDelete, + Exists: resourceIBMFirewallSharedExists, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "billing_item_id": { + Type: schema.TypeInt, + Computed: true, + Description: "Billing Item ID", + }, + "firewall_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateAllowedStringValue([]string{"10MBPS_HARDWARE_FIREWALL", "20MBPS_HARDWARE_FIREWALL", "100MBPS_HARDWARE_FIREWALL", "1000MBPS_HARDWARE_FIREWALL", "200MBPS_HARDWARE_FIREWALL", "2000MBPS_HARDWARE_FIREWALL"}), + Description: "Firewall type", + }, + "virtual_instance_id": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"hardware_instance_id"}, + Description: "Virtual instance ID", + }, + "hardware_instance_id": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"virtual_instance_id"}, + Description: "Hardware instance ID", + }, + }, + } +} + +// keyName is in between:[10MBPS_HARDWARE_FIREWALL, 20MBPS_HARDWARE_FIREWALL, +// 100MBPS_HARDWARE_FIREWALL, 1000MBPS_HARDWARE_FIREWALL] +func resourceIBMFirewallSharedCreate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + + keyName := d.Get("firewall_type").(string) + + var virtualId, hardwareId int + if vID, ok := d.GetOk("virtual_instance_id"); ok { + virtualId = vID.(int) + } + + if hID, ok := d.GetOk("hardware_instance_id"); ok { + hardwareId = hID.(int) + } + + if virtualId == 0 && hardwareId == 0 { + return fmt.Errorf("Provide either `virtual_instance_id` or `hardware_instance_id`") + } + + //var productOrderContainer *string + pkg, err := product.GetPackageByType(sess, FwHardwarePackageType) + if err != nil { + return err + } + + // Get all prices for ADDITIONAL_SERVICES_FIREWALL with the given capacity + productItems, err := product.GetPackageProducts(sess, *pkg.Id) + if err != nil { + return err + } + + // Select only those product items with a matching keyname + targetItems := []datatypes.Product_Item{} + for _, item := range productItems { + if *item.KeyName == keyName { + targetItems = append(targetItems, item) + } + } + + if len(targetItems) == 0 { + return fmt.Errorf("No product items matching %s could be found", keyName) + } + + masked := "id,firewallServiceComponent[id,status]" + if virtualId > 0 { + productOrderContainer := datatypes.Container_Product_Order_Network_Protection_Firewall{ + Container_Product_Order: datatypes.Container_Product_Order{ + PackageId: pkg.Id, + Prices: []datatypes.Product_Item_Price{ + { + Id: targetItems[0].Prices[0].Id, + }, + }, + Quantity: sl.Int(1), + VirtualGuests: []datatypes.Virtual_Guest{{ + Id: sl.Int(virtualId), + }, + }, + }, + } + _, err := services.GetProductOrderService(sess.SetRetries(0)).PlaceOrder(&productOrderContainer, sl.Bool(false)) + if err != nil { + return nil + } + + log.Printf("[INFO] Wait one minute before fetching the firewall/device.") + time.Sleep(time.Second * 30) + service := services.GetVirtualGuestService(sess) + stateConf := &resource.StateChangeConf{ + Target: []string{"completed"}, + Pending: []string{"pending"}, + Refresh: func() (interface{}, string, error) { + result, err := service.Id(virtualId).Mask(masked).GetObject() + if err != nil { + return nil, "", err + } + status, ok := sl.GrabOk(result, "FirewallServiceComponent.Status") + if ok && status == "bypass" { + return result, "completed", nil + } + return result, "pending", nil + }, + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + NotFoundChecks: 24 * 60, + } + + _, err = stateConf.WaitForState() + if err != nil { + return err + } + + result, err := service.Id(virtualId).Mask(masked).GetObject() + idd := *result.FirewallServiceComponent.Id + log.Print(idd) + d.SetId(fmt.Sprintf("%d", idd)) + + if err != nil { + return fmt.Errorf("Error during creation of hardware firewall: %s", err) + } + + } + if hardwareId > 0 { + productOrderContainer := datatypes.Container_Product_Order_Network_Protection_Firewall{ + Container_Product_Order: datatypes.Container_Product_Order{ + PackageId: pkg.Id, + Prices: []datatypes.Product_Item_Price{ + { + Id: targetItems[0].Prices[0].Id, + }, + }, + Quantity: sl.Int(1), + Hardware: []datatypes.Hardware{{ + Id: sl.Int(hardwareId), + }, + }, + }, + } + _, err := services.GetProductOrderService(sess.SetRetries(0)).PlaceOrder(&productOrderContainer, sl.Bool(false)) + if err != nil { + return nil + } + + log.Printf("[INFO] Wait one minute before fetching the firewall/device.") + time.Sleep(time.Second * 30) + + service := services.GetHardwareService(sess) + stateConf := &resource.StateChangeConf{ + Target: []string{"completed"}, + Pending: []string{"pending"}, + Refresh: func() (interface{}, string, error) { + result, err := service.Id(hardwareId).Mask(masked).GetObject() + if err != nil { + return nil, "", err + } + status, ok := sl.GrabOk(result, "FirewallServiceComponent.Status") + if ok && status == "bypass" { + return result, "completed", nil + } + return result, "pending", nil + }, + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + NotFoundChecks: 24 * 60, + } + + _, err = stateConf.WaitForState() + if err != nil { + return err + } + + resultNew, err := service.Id(hardwareId).Mask(masked).GetObject() + idd2 := *resultNew.FirewallServiceComponent.Id + + d.SetId(fmt.Sprintf("%d", idd2)) + log.Print(idd2) + if err != nil { + return fmt.Errorf("Error during creation of hardware firewall: %s", err) + } + + } + log.Println("[INFO] Creating hardware firewall shared") + + return resourceIBMFirewallSharedRead(d, meta) +} + +func resourceIBMFirewallSharedRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + + firewall_type := (d.Get("firewall_type").(string)) + d.Set("firewall_type", firewall_type) + + fservice := services.GetNetworkComponentFirewallService(sess) + + fwID, _ := strconv.Atoi(d.Id()) + + data, err := fservice.Id(fwID).Mask("billingItem.id").GetObject() + d.Set("billing_item_id", *data.BillingItem.Id) + if err != nil { + return fmt.Errorf("Error during creation of hardware firewall: %s", err) + } + + return nil +} + +//detach hardware firewall from particular machine +func resourceIBMFirewallSharedDelete(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + idd2 := (d.Get("billing_item_id")).(int) + + success, err := services.GetBillingItemService(sess).Id(idd2).CancelService() + log.Print(success) + if err != nil { + return err + } + + if !success { + return fmt.Errorf("SoftLayer reported an unsuccessful cancellation") + } + return nil +} + +//exists method +func resourceIBMFirewallSharedExists(d *schema.ResourceData, meta interface{}) (bool, error) { + sess := meta.(ClientSession).SoftLayerSession() + fservice := services.GetNetworkComponentFirewallService(sess) + id, err := strconv.Atoi(d.Id()) + response, err := fservice.Id(id).GetObject() + + if err != nil { + log.Printf("error fetching the firewall resource: %s", err) + return false, err + } + log.Print(response) + return true, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_function_action.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_function_action.go new file mode 100644 index 00000000000..d16657c6143 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_function_action.go @@ -0,0 +1,571 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "net/http" + "os" + "strings" + + "github.com/apache/openwhisk-client-go/whisk" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + funcActionName = "name" + funcActionNamespace = "namespace" + funcActionUsrDefAnnots = "user_defined_annotations" + funcActionUsrDefParams = "user_defined_parameters" +) + +func resourceIBMFunctionAction() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMFunctionActionCreate, + Read: resourceIBMFunctionActionRead, + Update: resourceIBMFunctionActionUpdate, + Delete: resourceIBMFunctionActionDelete, + Exists: resourceIBMFunctionActionExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + funcActionName: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Name of action.", + ValidateFunc: InvokeValidator("ibm_function_action", funcActionName), + }, + funcActionNamespace: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "IBM Cloud function namespace.", + ValidateFunc: InvokeValidator("ibm_function_action", funcActionNamespace), + }, + "limits": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "timeout": { + Type: schema.TypeInt, + Optional: true, + Default: 60000, + Description: "The timeout LIMIT in milliseconds after which the action is terminated.", + }, + "memory": { + Type: schema.TypeInt, + Optional: true, + Default: 256, + Description: "The maximum memory LIMIT in MB for the action (default 256.", + }, + "log_size": { + Type: schema.TypeInt, + Optional: true, + Default: 10, + Description: "The maximum log size LIMIT in MB for the action.", + }, + }, + }, + }, + "exec": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Description: "Execution info", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "image": { + Type: schema.TypeString, + Optional: true, + Description: "Container image name when kind is 'blackbox'.", + ConflictsWith: []string{"exec.0.components"}, + }, + "init": { + Type: schema.TypeString, + Optional: true, + Description: "Optional zipfile reference.", + ConflictsWith: []string{"exec.0.image", "exec.0.components"}, + }, + "code": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: "The code to execute.", + ConflictsWith: []string{"exec.0.components", "exec.0.code_path"}, + }, + "code_path": { + Type: schema.TypeString, + Optional: true, + Description: "The file path of code to execute.", + ConflictsWith: []string{"exec.0.components", "exec.0.code"}, + }, + "kind": { + Type: schema.TypeString, + Required: true, + Description: "The type of action. Possible values can be found here (https://cloud.ibm.com/docs/openwhisk?topic=cloud-functions-runtimes)", + }, + "main": { + Type: schema.TypeString, + Optional: true, + Description: "The name of the action entry point (function or fully-qualified method name when applicable).", + ConflictsWith: []string{"exec.0.image", "exec.0.components"}, + }, + "components": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "The List of fully qualified action.", + ConflictsWith: []string{"exec.0.image", "exec.0.code", "exec.0.code_path"}, + }, + }, + }, + }, + "publish": { + Type: schema.TypeBool, + Optional: true, + Description: "Action visibilty.", + }, + "version": { + Type: schema.TypeString, + Computed: true, + Description: "Semantic version of the item.", + }, + funcActionUsrDefAnnots: { + Type: schema.TypeString, + Optional: true, + Default: "[]", + Description: "Annotation values in KEY VALUE format.", + ValidateFunc: InvokeValidator("ibm_function_action", funcActionUsrDefAnnots), + DiffSuppressFunc: suppressEquivalentJSON, + StateFunc: func(v interface{}) string { + json, _ := normalizeJSONString(v) + return json + }, + }, + funcActionUsrDefParams: { + Type: schema.TypeString, + Optional: true, + Default: "[]", + Description: "Parameters values in KEY VALUE format. Parameter bindings included in the context.TODO() passed to the action.", + ValidateFunc: InvokeValidator("ibm_function_action", funcActionUsrDefParams), + DiffSuppressFunc: suppressEquivalentJSON, + StateFunc: func(v interface{}) string { + json, _ := normalizeJSONString(v) + return json + }, + }, + "annotations": { + Type: schema.TypeString, + Computed: true, + Description: "All annotations set on action by user and those set by the IBM Cloud Function backend/API.", + }, + "parameters": { + Type: schema.TypeString, + Computed: true, + Description: "All paramters set on action by user and those set by the IBM Cloud Function backend/API.", + }, + "action_id": { + Type: schema.TypeString, + Computed: true, + }, + "target_endpoint_url": { + Type: schema.TypeString, + Computed: true, + Description: "Action target endpoint URL.", + }, + }, + } +} + +func resourceIBMFuncActionValidator() *ResourceValidator { + + validateSchema := make([]ValidateSchema, 1) + + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: funcActionName, + ValidateFunctionIdentifier: ValidateRegexp, + Type: TypeString, + Regexp: `^[^/*][a-zA-Z0-9/_@.-]`, + Required: true}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: funcActionNamespace, + ValidateFunctionIdentifier: ValidateNoZeroValues, + Type: TypeString, + Required: true}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: funcActionUsrDefAnnots, + ValidateFunctionIdentifier: ValidateJSONString, + Type: TypeString, + Default: "[]", + Optional: true}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: funcActionUsrDefParams, + ValidateFunctionIdentifier: ValidateJSONString, + Type: TypeString, + Optional: true}) + + ibmFuncActionResourceValidator := ResourceValidator{ResourceName: "ibm_function_action", Schema: validateSchema} + return &ibmFuncActionResourceValidator +} + +func resourceIBMFunctionActionCreate(d *schema.ResourceData, meta interface{}) error { + functionNamespaceAPI, err := meta.(ClientSession).FunctionIAMNamespaceAPI() + if err != nil { + return err + } + + bxSession, err := meta.(ClientSession).BluemixSession() + if err != nil { + return err + } + namespace := d.Get("namespace").(string) + wskClient, err := setupOpenWhiskClientConfig(namespace, bxSession, functionNamespaceAPI) + if err != nil { + return err + + } + + actionService := wskClient.Actions + name := d.Get("name").(string) + + var qualifiedName = new(QualifiedName) + + if qualifiedName, err = NewQualifiedName(name); err != nil { + return NewQualifiedNameError(name, err) + } + + payload := whisk.Action{ + Name: qualifiedName.GetEntityName(), + Namespace: namespace, + } + + exec := d.Get("exec").([]interface{}) + payload.Exec = expandExec(exec) + + userDefinedAnnotations := d.Get("user_defined_annotations").(string) + payload.Annotations, err = expandAnnotations(userDefinedAnnotations) + if err != nil { + return err + } + + userDefinedParameters := d.Get("user_defined_parameters").(string) + payload.Parameters, err = expandParameters(userDefinedParameters) + if err != nil { + return err + } + + if v, ok := d.GetOk("limits"); ok { + payload.Limits = expandLimits(v.([]interface{})) + } + + if publish, ok := d.GetOk("publish"); ok { + p := publish.(bool) + payload.Publish = &p + } + + log.Println("[INFO] Creating IBM Cloud Function Action") + _, _, err = actionService.Insert(&payload, true) + + if err != nil { + return fmt.Errorf("Error creating IBM Cloud Function Action: %s", err) + } + + d.SetId(fmt.Sprintf("%s:%s", namespace, qualifiedName.GetEntityName())) + + return resourceIBMFunctionActionRead(d, meta) +} + +func resourceIBMFunctionActionRead(d *schema.ResourceData, meta interface{}) error { + parts, err := cfIdParts(d.Id()) + if err != nil { + return err + } + + namespace := "" + actionID := "" + if len(parts) == 2 { + namespace = parts[0] + actionID = parts[1] + } else { + namespace = os.Getenv("FUNCTION_NAMESPACE") + actionID = parts[0] + d.SetId(fmt.Sprintf("%s:%s", namespace, actionID)) + } + + functionNamespaceAPI, err := meta.(ClientSession).FunctionIAMNamespaceAPI() + if err != nil { + return err + } + + bxSession, err := meta.(ClientSession).BluemixSession() + if err != nil { + return err + } + wskClient, err := setupOpenWhiskClientConfig(namespace, bxSession, functionNamespaceAPI) + if err != nil { + return err + + } + + actionService := wskClient.Actions + action, _, err := actionService.Get(actionID, true) + if err != nil { + return fmt.Errorf("Error retrieving IBM Cloud Function Action %s : %s", actionID, err) + } + d.Set("namespace", namespace) + d.Set("limits", flattenLimits(action.Limits)) + d.Set("exec", flattenExec(action.Exec, d)) + d.Set("publish", action.Publish) + d.Set("version", action.Version) + d.Set("action_id", action.Name) + annotations, err := flattenAnnotations(action.Annotations) + if err != nil { + return err + } + + d.Set("annotations", annotations) + parameters, err := flattenParameters(action.Parameters) + if err != nil { + return err + } + d.Set("parameters", parameters) + + temp := strings.Split(action.Namespace, "/") + pkgName := "" + if len(temp) == 2 { + pkgName = temp[1] + d.Set("name", fmt.Sprintf("%s/%s", pkgName, action.Name)) + c, err := whisk.NewClient(http.DefaultClient, &whisk.Config{ + Namespace: wskClient.Namespace, + AuthToken: wskClient.AuthToken, + Host: wskClient.Host, + AdditionalHeaders: wskClient.AdditionalHeaders, + }) + + pkg, _, err := c.Packages.Get(pkgName) + if err != nil { + return fmt.Errorf("Error retrieving package IBM Cloud Function package %s : %s", pkgName, err) + } + + userAnnotations, err := flattenAnnotations(filterInheritedAnnotations(pkg.Annotations, action.Annotations)) + if err != nil { + return err + } + + d.Set("user_defined_annotations", userAnnotations) + userParameters, err := flattenParameters(filterInheritedParameters(pkg.Parameters, action.Parameters)) + if err != nil { + return err + } + d.Set("user_defined_parameters", userParameters) + } else { + d.Set("name", action.Name) + userDefinedAnnotations, err := filterActionAnnotations(action.Annotations) + if err != nil { + return err + } + d.Set("user_defined_annotations", userDefinedAnnotations) + + userDefinedParameters, err := filterActionParameters(action.Parameters) + if err != nil { + return err + } + d.Set("user_defined_parameters", userDefinedParameters) + } + + targetUrl, err := action.ActionURL(wskClient.Config.Host, "/api", wskClient.Config.Version, pkgName) + if err != nil { + log.Printf( + "Error creating target endpoint URL for action (%s) targetURL : %s", d.Id(), err) + + } + d.Set("target_endpoint_url", targetUrl) + + return nil +} + +func resourceIBMFunctionActionUpdate(d *schema.ResourceData, meta interface{}) error { + parts, err := cfIdParts(d.Id()) + if err != nil { + return err + } + + namespace := parts[0] + actionID := parts[1] + + functionNamespaceAPI, err := meta.(ClientSession).FunctionIAMNamespaceAPI() + if err != nil { + return err + } + + bxSession, err := meta.(ClientSession).BluemixSession() + if err != nil { + return err + } + + wskClient, err := setupOpenWhiskClientConfig(namespace, bxSession, functionNamespaceAPI) + if err != nil { + return err + + } + + actionService := wskClient.Actions + + var qualifiedName = new(QualifiedName) + + if qualifiedName, err = NewQualifiedName(actionID); err != nil { + return NewQualifiedNameError(actionID, err) + } + + payload := whisk.Action{ + Name: qualifiedName.GetEntityName(), + Namespace: namespace, + } + + ischanged := false + + if d.HasChange("publish") { + p := d.Get("publish").(bool) + payload.Publish = &p + } + + if d.HasChange("user_defined_parameters") { + var err error + payload.Parameters, err = expandParameters(d.Get("user_defined_parameters").(string)) + if err != nil { + return err + } + ischanged = true + } + + if d.HasChange("user_defined_annotations") { + var err error + payload.Annotations, err = expandAnnotations(d.Get("user_defined_annotations").(string)) + if err != nil { + return err + } + ischanged = true + } + + if d.HasChange("exec") { + exec := d.Get("exec").([]interface{}) + payload.Exec = expandExec(exec) + ischanged = true + } + + if d.HasChange("limits") { + limits := d.Get("limits").([]interface{}) + payload.Limits = expandLimits(limits) + ischanged = true + } + + if ischanged { + log.Println("[INFO] Update IBM Cloud Function Action") + _, _, err = actionService.Insert(&payload, true) + if err != nil { + return fmt.Errorf("Error updating IBM Cloud Function Action: %s", err) + } + } + + return resourceIBMFunctionActionRead(d, meta) +} + +func resourceIBMFunctionActionDelete(d *schema.ResourceData, meta interface{}) error { + parts, err := cfIdParts(d.Id()) + if err != nil { + return err + } + + namespace := parts[0] + actionID := parts[1] + + functionNamespaceAPI, err := meta.(ClientSession).FunctionIAMNamespaceAPI() + if err != nil { + return err + } + + bxSession, err := meta.(ClientSession).BluemixSession() + if err != nil { + return err + } + wskClient, err := setupOpenWhiskClientConfig(namespace, bxSession, functionNamespaceAPI) + if err != nil { + return err + + } + + actionService := wskClient.Actions + + _, err = actionService.Delete(actionID) + if err != nil { + return fmt.Errorf("Error deleting IBM Cloud Function Action: %s", err) + } + + d.SetId("") + return nil +} + +func resourceIBMFunctionActionExists(d *schema.ResourceData, meta interface{}) (bool, error) { + parts, err := cfIdParts(d.Id()) + if err != nil { + return false, err + } + + namespace := "" + actionID := "" + if len(parts) >= 2 { + namespace = parts[0] + actionID = parts[1] + } else { + namespace = os.Getenv("FUNCTION_NAMESPACE") + actionID = parts[0] + d.SetId(fmt.Sprintf("%s:%s", namespace, actionID)) + } + + functionNamespaceAPI, err := meta.(ClientSession).FunctionIAMNamespaceAPI() + if err != nil { + return false, err + } + + bxSession, err := meta.(ClientSession).BluemixSession() + if err != nil { + return false, err + } + + wskClient, err := setupOpenWhiskClientConfig(namespace, bxSession, functionNamespaceAPI) + if err != nil { + return false, err + + } + + actionService := wskClient.Actions + + action, resp, err := actionService.Get(actionID, true) + if err != nil { + if resp.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error communicating with IBM Cloud Function Client : %s", err) + } + + temp := strings.Split(action.Namespace, "/") + var name string + + if len(temp) == 2 { + name = fmt.Sprintf("%s/%s", temp[1], action.Name) + } else { + name = action.Name + } + + return name == actionID, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_function_namespace.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_function_namespace.go new file mode 100644 index 00000000000..68f936ed4bc --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_function_namespace.go @@ -0,0 +1,213 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + + "github.com/IBM-Cloud/bluemix-go/api/functions" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + funcNamespaceName = "name" + funcNamespaceResGrpId = "resource_group_id" + funcNamespaceResPlanId = "resource_plan_id" + funcNamespaceDesc = "description" + funcNamespaceLoc = "location" +) + +func resourceIBMFunctionNamespace() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMFunctionNamespaceCreate, + Read: resourceIBMFunctionNamespaceRead, + Update: resourceIBMFunctionNamespaceUpdate, + Delete: resourceIBMFunctionNamespaceDelete, + Exists: resourceIBMFunctionNamespaceExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + funcNamespaceName: { + Type: schema.TypeString, + Required: true, + Description: "Name of namespace.", + ValidateFunc: InvokeValidator("ibm_function_namespace", funcNamespaceName), + }, + funcNamespaceDesc: { + Type: schema.TypeString, + Optional: true, + Description: "Namespace Description.", + }, + funcNamespaceResGrpId: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Resource Group ID.", + ValidateFunc: InvokeValidator("ibm_function_namespace", funcNamespaceResGrpId), + }, + funcNamespaceLoc: { + Type: schema.TypeString, + Computed: true, + Description: "Namespace Location.", + }, + }, + } +} + +func resourceIBMFuncNamespaceValidator() *ResourceValidator { + validateSchema := make([]ValidateSchema, 1) + + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: funcNamespaceName, + ValidateFunctionIdentifier: ValidateNoZeroValues, + Type: TypeString, + Required: true}) + + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: funcNamespaceResGrpId, + ValidateFunctionIdentifier: ValidateNoZeroValues, + Type: TypeString, + Required: true}) + + ibmFuncNamespaceResourceValidator := ResourceValidator{ResourceName: "ibm_function_namespace", Schema: validateSchema} + return &ibmFuncNamespaceResourceValidator +} + +func resourceIBMFunctionNamespaceCreate(d *schema.ResourceData, meta interface{}) error { + functionNamespaceAPI, err := meta.(ClientSession).FunctionIAMNamespaceAPI() + if err != nil { + return err + } + + createNamespaceOptions := functions.CreateNamespaceOptions{} + + name := d.Get(funcNamespaceName).(string) + createNamespaceOptions.Name = &name + resourceGroupID := d.Get(funcNamespaceResGrpId).(string) + createNamespaceOptions.ResourceGroupID = &resourceGroupID + resourcePlanID := "functions-base-plan" + createNamespaceOptions.ResourcePlanID = &resourcePlanID + + if _, ok := d.GetOk(funcNamespaceDesc); ok { + description := d.Get(funcNamespaceDesc).(string) + createNamespaceOptions.Description = &description + } + + namespace, err := functionNamespaceAPI.Namespaces().CreateNamespace(createNamespaceOptions) + if err != nil { + return fmt.Errorf("Error Creating Namespace: %s", err) + } + + d.SetId(*namespace.ID) + log.Printf("[INFO] Created namespace (IAM) : %s", *namespace.Name) + + return resourceIBMFunctionNamespaceRead(d, meta) +} + +func resourceIBMFunctionNamespaceRead(d *schema.ResourceData, meta interface{}) error { + functionNamespaceAPI, err := meta.(ClientSession).FunctionIAMNamespaceAPI() + if err != nil { + return err + } + + id := d.Id() + + getOptions := functions.GetNamespaceOptions{ + ID: &id, + } + instance, err := functionNamespaceAPI.Namespaces().GetNamespace(getOptions) + if err != nil { + d.SetId("") + return nil + } + + if instance.ID != nil { + d.SetId(*instance.ID) + } + if instance.Name != nil { + d.Set(funcNamespaceName, *instance.Name) + } + + if instance.ResourceGroupID != nil { + d.Set(funcNamespaceResGrpId, *instance.ResourceGroupID) + } + + if instance.Location != nil { + d.Set(funcNamespaceLoc, *instance.Location) + } + if instance.Description != nil { + d.Set(funcNamespaceDesc, *instance.Description) + } + + return nil +} + +func resourceIBMFunctionNamespaceUpdate(d *schema.ResourceData, meta interface{}) error { + nsClient, err := meta.(ClientSession).FunctionIAMNamespaceAPI() + if err != nil { + return err + } + + ID := d.Id() + updateNamespaceOptions := functions.UpdateNamespaceOptions{} + if d.HasChange(funcNamespaceName) { + name := d.Get(funcNamespaceName).(string) + updateNamespaceOptions.Name = &name + } + + if d.HasChange(funcNamespaceDesc) { + description := d.Get(funcNamespaceDesc).(string) + updateNamespaceOptions.Description = &description + } + + updateNamespaceOptions.ID = &ID + namespace, err := nsClient.Namespaces().UpdateNamespace(updateNamespaceOptions) + if err != nil { + return fmt.Errorf("Error Updating Namespace: %s", err) + } + + log.Printf("[INFO] Updated namespace (IAM) : %s", *namespace.Name) + + return resourceIBMFunctionNamespaceRead(d, meta) +} + +func resourceIBMFunctionNamespaceDelete(d *schema.ResourceData, meta interface{}) error { + nsClient, err := meta.(ClientSession).FunctionIAMNamespaceAPI() + if err != nil { + return err + } + + ID := d.Id() + _, err = nsClient.Namespaces().DeleteNamespace(ID) + if err != nil { + return fmt.Errorf("Error Deleting Namespace: %s", err) + } + + d.SetId("") + return nil +} + +func resourceIBMFunctionNamespaceExists(d *schema.ResourceData, meta interface{}) (bool, error) { + nsClient, err := meta.(ClientSession).FunctionIAMNamespaceAPI() + if err != nil { + return false, err + } + + ID := d.Id() + + getOptions := functions.GetNamespaceOptions{ + ID: &ID, + } + _, err = nsClient.Namespaces().GetNamespace(getOptions) + if err != nil { + d.SetId("") + return false, fmt.Errorf("Error Getting Namesapce (IAM): %s", err) + } + + return true, nil + +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_function_package.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_function_package.go new file mode 100644 index 00000000000..99b215cd56b --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_function_package.go @@ -0,0 +1,459 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "net/http" + "os" + "strings" + + "github.com/apache/openwhisk-client-go/whisk" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + funcPkgNamespace = "namespace" + funcPkgName = "name" + funcPkgUsrDefAnnots = "user_defined_annotations" + funcPkgUsrDefParams = "user_defined_parameters" + funcPkgBindPkgName = "bind_package_name" +) + +func resourceIBMFunctionPackage() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMFunctionPackageCreate, + Read: resourceIBMFunctionPackageRead, + Update: resourceIBMFunctionPackageUpdate, + Delete: resourceIBMFunctionPackageDelete, + Exists: resourceIBMFunctionPackageExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + funcPkgNamespace: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "IBM Cloud function namespace.", + ValidateFunc: InvokeValidator("ibm_function_package", funcPkgNamespace), + }, + funcPkgName: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Name of package.", + ValidateFunc: InvokeValidator("ibm_function_package", funcPkgName), + }, + "publish": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Package visibilty.", + }, + "version": { + Type: schema.TypeString, + Computed: true, + Description: "Semantic version of the item.", + }, + funcPkgUsrDefAnnots: { + Type: schema.TypeString, + Optional: true, + Description: "Annotation values in KEY VALUE format.", + Default: "[]", + ValidateFunc: InvokeValidator("ibm_function_package", funcPkgUsrDefAnnots), + DiffSuppressFunc: suppressEquivalentJSON, + StateFunc: func(v interface{}) string { + json, _ := normalizeJSONString(v) + return json + }, + }, + funcPkgUsrDefParams: { + Type: schema.TypeString, + Optional: true, + Description: "Parameters values in KEY VALUE format. Parameter bindings included in the context.TODO() passed to the package.", + ValidateFunc: InvokeValidator("ibm_function_package", funcPkgUsrDefParams), + Default: "[]", + DiffSuppressFunc: suppressEquivalentJSON, + StateFunc: func(v interface{}) string { + json, _ := normalizeJSONString(v) + return json + }, + }, + "annotations": { + Type: schema.TypeString, + Computed: true, + Description: "All annotations set on package by user and those set by the IBM Cloud Function backend/API.", + }, + "parameters": { + Type: schema.TypeString, + Computed: true, + Description: "All parameters set on package by user and those set by the IBM Cloud Function backend/API.", + }, + funcPkgBindPkgName: { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Name of package to be binded.", + ValidateFunc: InvokeValidator("ibm_function_package", funcPkgBindPkgName), + DiffSuppressFunc: func(k, o, n string, d *schema.ResourceData) bool { + if o == "" { + return false + } + if strings.Compare(n, o) == 0 { + return true + } + return false + }, + }, + "package_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceIBMFuncPackageValidator() *ResourceValidator { + validateSchema := make([]ValidateSchema, 1) + + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: funcPkgName, + ValidateFunctionIdentifier: ValidateRegexp, + Type: TypeString, + Regexp: `\A([\w]|[\w][\w@ .-]*[\w@.-]+)\z`, + Required: true}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: funcPkgNamespace, + ValidateFunctionIdentifier: ValidateNoZeroValues, + Type: TypeString, + Required: true}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: funcPkgUsrDefAnnots, + ValidateFunctionIdentifier: ValidateJSONString, + Type: TypeString, + Default: "[]", + Optional: true}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: funcPkgBindPkgName, + ValidateFunctionIdentifier: ValidateBindedPackageName, + Type: TypeString, + Optional: true}) + + ibmFuncPackageResourceValidator := ResourceValidator{ResourceName: "ibm_function_package", Schema: validateSchema} + return &ibmFuncPackageResourceValidator +} + +func resourceIBMFunctionPackageCreate(d *schema.ResourceData, meta interface{}) error { + functionNamespaceAPI, err := meta.(ClientSession).FunctionIAMNamespaceAPI() + if err != nil { + return err + } + + bxSession, err := meta.(ClientSession).BluemixSession() + if err != nil { + return err + } + namespace := d.Get("namespace").(string) + wskClient, err := setupOpenWhiskClientConfig(namespace, bxSession, functionNamespaceAPI) + if err != nil { + return err + + } + + packageService := wskClient.Packages + + name := d.Get("name").(string) + + var qualifiedName = new(QualifiedName) + + if qualifiedName, err = NewQualifiedName(name); err != nil { + return NewQualifiedNameError(name, err) + } + + payload := whisk.Package{ + Name: qualifiedName.GetEntityName(), + Namespace: qualifiedName.GetNamespace(), + } + + userDefinedAnnotations := d.Get("user_defined_annotations").(string) + payload.Annotations, err = expandAnnotations(userDefinedAnnotations) + if err != nil { + return err + } + + userDefinedParameters := d.Get("user_defined_parameters").(string) + payload.Parameters, err = expandParameters(userDefinedParameters) + if err != nil { + return err + } + + if publish, ok := d.GetOk("publish"); ok { + p := publish.(bool) + payload.Publish = &p + } + + if v, ok := d.GetOk("bind_package_name"); ok { + var BindingQualifiedName = new(QualifiedName) + if BindingQualifiedName, err = NewQualifiedName(v.(string)); err != nil { + return NewQualifiedNameError(v.(string), err) + } + BindingPayload := whisk.Binding{ + Name: BindingQualifiedName.GetEntityName(), + Namespace: BindingQualifiedName.GetNamespace(), + } + payload.Binding = &BindingPayload + } + + log.Println("[INFO] Creating IBM CLoud Function package") + result, _, err := packageService.Insert(&payload, false) + if err != nil { + return fmt.Errorf("Error creating IBM CLoud Function package: %s", err) + } + + d.SetId(fmt.Sprintf("%s:%s", namespace, result.Name)) + + return resourceIBMFunctionPackageRead(d, meta) +} + +func resourceIBMFunctionPackageRead(d *schema.ResourceData, meta interface{}) error { + parts, err := cfIdParts(d.Id()) + if err != nil { + return err + } + + namespace := "" + packageID := "" + if len(parts) == 2 { + namespace = parts[0] + packageID = parts[1] + } else { + namespace = os.Getenv("FUNCTION_NAMESPACE") + packageID = parts[0] + d.SetId(fmt.Sprintf("%s:%s", namespace, packageID)) + } + + functionNamespaceAPI, err := meta.(ClientSession).FunctionIAMNamespaceAPI() + if err != nil { + return err + } + + bxSession, err := meta.(ClientSession).BluemixSession() + if err != nil { + return err + } + wskClient, err := setupOpenWhiskClientConfig(namespace, bxSession, functionNamespaceAPI) + if err != nil { + return err + + } + packageService := wskClient.Packages + + pkg, _, err := packageService.Get(packageID) + if err != nil { + return fmt.Errorf("Error retrieving IBM Cloud Function package %s : %s", packageID, err) + } + d.Set("package_id", pkg.Name) + d.Set("name", pkg.Name) + d.Set("namespace", namespace) + d.Set("publish", pkg.Publish) + d.Set("version", pkg.Version) + annotations, err := flattenAnnotations(pkg.Annotations) + if err != nil { + return err + } + d.Set("annotations", annotations) + parameters, err := flattenParameters(pkg.Parameters) + if err != nil { + return err + } + d.Set("parameters", parameters) + if isEmpty(*pkg.Binding) { + + d.Set("user_defined_annotations", annotations) + d.Set("user_defined_parameters", parameters) + + } else { + d.Set("bind_package_name", fmt.Sprintf("/%s/%s", pkg.Binding.Namespace, pkg.Binding.Name)) + c, err := whisk.NewClient(http.DefaultClient, &whisk.Config{ + Namespace: pkg.Binding.Namespace, + AuthToken: wskClient.AuthToken, + Host: wskClient.Host, + AdditionalHeaders: wskClient.AdditionalHeaders, + }) + bindedPkg, _, err := c.Packages.Get(pkg.Binding.Name) + if err != nil { + return fmt.Errorf("Error retrieving Binded IBM Cloud Function package %s : %s", pkg.Binding.Name, err) + } + + userAnnotations, err := flattenAnnotations(filterInheritedAnnotations(bindedPkg.Annotations, pkg.Annotations)) + if err != nil { + return err + } + d.Set("user_defined_annotations", userAnnotations) + + userParameters, err := flattenParameters(filterInheritedParameters(bindedPkg.Parameters, pkg.Parameters)) + if err != nil { + return err + } + d.Set("user_defined_parameters", userParameters) + } + + return nil +} + +func resourceIBMFunctionPackageUpdate(d *schema.ResourceData, meta interface{}) error { + parts, err := cfIdParts(d.Id()) + if err != nil { + return err + } + + namespace := parts[0] + + functionNamespaceAPI, err := meta.(ClientSession).FunctionIAMNamespaceAPI() + if err != nil { + return err + } + + bxSession, err := meta.(ClientSession).BluemixSession() + if err != nil { + return err + } + wskClient, err := setupOpenWhiskClientConfig(namespace, bxSession, functionNamespaceAPI) + if err != nil { + return err + + } + + packageService := wskClient.Packages + + var qualifiedName = new(QualifiedName) + + if qualifiedName, err = NewQualifiedName(d.Get("name").(string)); err != nil { + return NewQualifiedNameError(d.Get("name").(string), err) + } + + payload := whisk.Package{ + Name: qualifiedName.GetEntityName(), + Namespace: qualifiedName.GetNamespace(), + } + ischanged := false + if d.HasChange("publish") { + p := d.Get("publish").(bool) + payload.Publish = &p + ischanged = true + } + + if d.HasChange("user_defined_parameters") { + var err error + payload.Parameters, err = expandParameters(d.Get("user_defined_parameters").(string)) + if err != nil { + return err + } + ischanged = true + } + + if d.HasChange("user_defined_annotations") { + var err error + payload.Annotations, err = expandAnnotations(d.Get("user_defined_annotations").(string)) + if err != nil { + return err + } + ischanged = true + } + + if ischanged { + log.Println("[INFO] Update IBM Cloud Function Package") + _, _, err = packageService.Insert(&payload, true) + if err != nil { + return fmt.Errorf("Error updating IBM Cloud Function Package: %s", err) + } + } + + return resourceIBMFunctionPackageRead(d, meta) +} + +func resourceIBMFunctionPackageDelete(d *schema.ResourceData, meta interface{}) error { + parts, err := cfIdParts(d.Id()) + if err != nil { + return err + } + + namespace := parts[0] + packageID := parts[1] + + functionNamespaceAPI, err := meta.(ClientSession).FunctionIAMNamespaceAPI() + if err != nil { + return err + } + + bxSession, err := meta.(ClientSession).BluemixSession() + if err != nil { + return err + } + wskClient, err := setupOpenWhiskClientConfig(namespace, bxSession, functionNamespaceAPI) + if err != nil { + return err + + } + + packageService := wskClient.Packages + + _, err = packageService.Delete(packageID) + if err != nil { + return fmt.Errorf("Error deleting IBM Cloud Function Package: %s", err) + } + + d.SetId("") + return nil +} + +func resourceIBMFunctionPackageExists(d *schema.ResourceData, meta interface{}) (bool, error) { + parts, err := cfIdParts(d.Id()) + if err != nil { + return false, err + } + + namespace := "" + packageID := "" + if len(parts) == 2 { + namespace = parts[0] + packageID = parts[1] + } else { + namespace = os.Getenv("FUNCTION_NAMESPACE") + packageID = parts[0] + d.SetId(fmt.Sprintf("%s:%s", namespace, packageID)) + } + + functionNamespaceAPI, err := meta.(ClientSession).FunctionIAMNamespaceAPI() + if err != nil { + return false, err + } + + bxSession, err := meta.(ClientSession).BluemixSession() + if err != nil { + return false, err + } + wskClient, err := setupOpenWhiskClientConfig(namespace, bxSession, functionNamespaceAPI) + if err != nil { + return false, err + + } + + packageService := wskClient.Packages + + pkg, resp, err := packageService.Get(packageID) + if err != nil { + if resp.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error communicating with IBM Cloud Function Client : %s", err) + } + + return pkg.Name == packageID, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_function_rule.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_function_rule.go new file mode 100644 index 00000000000..7223b6cee46 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_function_rule.go @@ -0,0 +1,371 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "os" + "strings" + + "github.com/apache/openwhisk-client-go/whisk" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + funcRuleNamespace = "namespace" + funcRuleName = "name" +) + +func resourceIBMFunctionRule() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMFunctionRuleCreate, + Read: resourceIBMFunctionRuleRead, + Update: resourceIBMFunctionRuleUpdate, + Delete: resourceIBMFunctionRuleDelete, + Exists: resourceIBMFunctionRuleExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + funcRuleNamespace: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "IBM Cloud function namespace.", + ValidateFunc: InvokeValidator("ibm_function_rule", funcRuleNamespace), + }, + funcRuleName: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Name of rule.", + ValidateFunc: InvokeValidator("ibm_function_rule", funcRuleName), + }, + "trigger_name": { + Type: schema.TypeString, + Required: true, + Description: "Name of trigger.", + }, + "action_name": { + Type: schema.TypeString, + Required: true, + Description: "Name of action.", + DiffSuppressFunc: func(k, o, n string, d *schema.ResourceData) bool { + new := strings.Split(n, "/") + old := strings.Split(o, "/") + action_name_new := new[len(new)-1] + action_name_old := old[len(old)-1] + + if o == "" { + return false + } + if strings.HasPrefix(n, "/_") { + temp := strings.Replace(n, "/_", "/"+d.Get("namespace").(string), 1) + if strings.Compare(temp, o) == 0 { + return true + } + if strings.Compare(action_name_old, action_name_new) == 0 { + return true + } + + } + if !strings.HasPrefix(n, "/") { + if strings.HasPrefix(o, "/"+d.Get("namespace").(string)) { + return true + } + if strings.Compare(action_name_old, action_name_new) == 0 { + return true + } + } + return false + }, + }, + "status": { + Type: schema.TypeString, + Computed: true, + Description: "Status of the rule.", + }, + "publish": { + Type: schema.TypeBool, + Computed: true, + Description: "Rule visbility.", + }, + "version": { + Type: schema.TypeString, + Computed: true, + Description: "Semantic version of the item.", + }, + "rule_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceIBMFuncRuleValidator() *ResourceValidator { + validateSchema := make([]ValidateSchema, 1) + + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: funcRuleName, + ValidateFunctionIdentifier: ValidateRegexp, + Type: TypeString, + Regexp: `\A([\w]|[\w][\w@ .-]*[\w@.-]+)\z`, + Required: true}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: funcRuleNamespace, + ValidateFunctionIdentifier: ValidateNoZeroValues, + Type: TypeString, + Required: true}) + + ibmFuncRuleResourceValidator := ResourceValidator{ResourceName: "ibm_function_rule", Schema: validateSchema} + return &ibmFuncRuleResourceValidator +} + +func resourceIBMFunctionRuleCreate(d *schema.ResourceData, meta interface{}) error { + functionNamespaceAPI, err := meta.(ClientSession).FunctionIAMNamespaceAPI() + if err != nil { + return err + } + + bxSession, err := meta.(ClientSession).BluemixSession() + if err != nil { + return err + } + namespace := d.Get("namespace").(string) + wskClient, err := setupOpenWhiskClientConfig(namespace, bxSession, functionNamespaceAPI) + if err != nil { + return err + + } + + ruleService := wskClient.Rules + + name := d.Get("name").(string) + + var qualifiedName = new(QualifiedName) + if qualifiedName, err = NewQualifiedName(name); err != nil { + return NewQualifiedNameError(name, err) + } + trigger := d.Get("trigger_name").(string) + action := d.Get("action_name").(string) + + triggerName := getQualifiedName(trigger, wskClient.Config.Namespace) + actionName := getQualifiedName(action, wskClient.Config.Namespace) + payload := whisk.Rule{ + Name: qualifiedName.GetEntityName(), + Namespace: qualifiedName.GetNamespace(), + Trigger: triggerName, + Action: actionName, + } + log.Println("[INFO] Creating IBM Cloud Function rule") + result, _, err := ruleService.Insert(&payload, false) + if err != nil { + return fmt.Errorf("Error creating IBM Cloud Function rule: %s", err) + } + + d.SetId(fmt.Sprintf("%s:%s", namespace, result.Name)) + + return resourceIBMFunctionRuleRead(d, meta) +} + +func resourceIBMFunctionRuleRead(d *schema.ResourceData, meta interface{}) error { + parts, err := cfIdParts(d.Id()) + if err != nil { + return err + } + + namespace := "" + ruleID := "" + if len(parts) == 2 { + namespace = parts[0] + ruleID = parts[1] + } else { + namespace = os.Getenv("FUNCTION_NAMESPACE") + ruleID = parts[0] + d.SetId(fmt.Sprintf("%s:%s", namespace, ruleID)) + } + + functionNamespaceAPI, err := meta.(ClientSession).FunctionIAMNamespaceAPI() + if err != nil { + return err + } + + bxSession, err := meta.(ClientSession).BluemixSession() + if err != nil { + return err + } + + wskClient, err := setupOpenWhiskClientConfig(namespace, bxSession, functionNamespaceAPI) + if err != nil { + return err + + } + + ruleService := wskClient.Rules + rule, _, err := ruleService.Get(ruleID) + if err != nil { + return fmt.Errorf("Error retrieving IBM Cloud Function rule %s : %s", ruleID, err) + } + + d.Set("rule_id", rule.Name) + d.Set("name", rule.Name) + d.Set("publish", rule.Publish) + d.Set("namespace", namespace) + d.Set("version", rule.Version) + d.Set("status", rule.Status) + + path := rule.Action.(map[string]interface{})["path"] + d.Set("trigger_name", rule.Trigger.(map[string]interface{})["name"]) + actionName := rule.Action.(map[string]interface{})["name"] + d.Set("action_name", fmt.Sprintf("/%s/%s", path, actionName)) + d.SetId(fmt.Sprintf("%s:%s", namespace, rule.Name)) + return nil +} + +func resourceIBMFunctionRuleUpdate(d *schema.ResourceData, meta interface{}) error { + functionNamespaceAPI, err := meta.(ClientSession).FunctionIAMNamespaceAPI() + if err != nil { + return err + } + + bxSession, err := meta.(ClientSession).BluemixSession() + if err != nil { + return err + } + + parts, err := cfIdParts(d.Id()) + if err != nil { + return err + } + + namespace := parts[0] + wskClient, err := setupOpenWhiskClientConfig(namespace, bxSession, functionNamespaceAPI) + if err != nil { + return err + + } + + ruleService := wskClient.Rules + + var qualifiedName = new(QualifiedName) + + if qualifiedName, err = NewQualifiedName(d.Get("name").(string)); err != nil { + return NewQualifiedNameError(d.Get("name").(string), err) + } + + payload := whisk.Rule{ + Name: qualifiedName.GetEntityName(), + Namespace: qualifiedName.GetNamespace(), + } + ischanged := false + + if d.HasChange("trigger_name") { + trigger := d.Get("trigger_name").(string) + payload.Trigger = getQualifiedName(trigger, wskClient.Config.Namespace) + ischanged = true + } + + if d.HasChange("action_name") { + action := d.Get("action_name").(string) + payload.Action = getQualifiedName(action, wskClient.Config.Namespace) + ischanged = true + } + + if ischanged { + log.Println("[INFO] Update IBM Cloud Function Rule") + result, _, err := ruleService.Insert(&payload, true) + if err != nil { + return fmt.Errorf("Error updating IBM Cloud Function Rule: %s", err) + } + _, _, err = ruleService.SetState(result.Name, "active") + if err != nil { + return fmt.Errorf("Error updating IBM Cloud Function Rule: %s", err) + } + } + + return resourceIBMFunctionRuleRead(d, meta) +} + +func resourceIBMFunctionRuleDelete(d *schema.ResourceData, meta interface{}) error { + functionNamespaceAPI, err := meta.(ClientSession).FunctionIAMNamespaceAPI() + if err != nil { + return err + } + + bxSession, err := meta.(ClientSession).BluemixSession() + if err != nil { + return err + } + + parts, err := cfIdParts(d.Id()) + if err != nil { + return err + } + + namespace := parts[0] + ruleID := parts[1] + wskClient, err := setupOpenWhiskClientConfig(namespace, bxSession, functionNamespaceAPI) + if err != nil { + return err + + } + + ruleService := wskClient.Rules + + _, err = ruleService.Delete(ruleID) + if err != nil { + return fmt.Errorf("Error deleting IBM Cloud Function Rule: %s", err) + } + + d.SetId("") + return nil +} + +func resourceIBMFunctionRuleExists(d *schema.ResourceData, meta interface{}) (bool, error) { + parts, err := cfIdParts(d.Id()) + if err != nil { + return false, err + } + + namespace := "" + ruleID := "" + if len(parts) == 2 { + namespace = parts[0] + ruleID = parts[1] + } else { + namespace = os.Getenv("FUNCTION_NAMESPACE") + ruleID = parts[0] + d.SetId(fmt.Sprintf("%s:%s", namespace, ruleID)) + } + + functionNamespaceAPI, err := meta.(ClientSession).FunctionIAMNamespaceAPI() + if err != nil { + return false, err + } + + bxSession, err := meta.(ClientSession).BluemixSession() + if err != nil { + return false, err + } + + wskClient, err := setupOpenWhiskClientConfig(namespace, bxSession, functionNamespaceAPI) + if err != nil { + return false, err + + } + + ruleService := wskClient.Rules + + rule, resp, err := ruleService.Get(ruleID) + if err != nil { + if resp.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error communicating with IBM Cloud Function Client : %s", err) + } + return rule.Name == ruleID, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_function_trigger.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_function_trigger.go new file mode 100644 index 00000000000..a32deddc93e --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_function_trigger.go @@ -0,0 +1,557 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "net/http" + "os" + + "github.com/apache/openwhisk-client-go/whisk" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + funcTriggerNamespace = "namespace" + funcTriggerName = "name" + funcTriggerParams = "parameters" + funcTriggerUsrDefAnnots = "user_defined_annotations" + funcTriggerUsrDefParams = "user_defined_parameters" + + feedLifeCycleEvent = "lifecycleEvent" + feedTriggerName = "triggerName" + feedAuthKey = "authKey" + feedCreate = "CREATE" + feedDelete = "DELETE" +) + +func resourceIBMFunctionTrigger() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMFunctionTriggerCreate, + Read: resourceIBMFunctionTriggerRead, + Update: resourceIBMFunctionTriggerUpdate, + Delete: resourceIBMFunctionTriggerDelete, + Exists: resourceIBMFunctionTriggerExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + funcTriggerNamespace: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "IBM Cloud function namespace.", + ValidateFunc: InvokeValidator("ibm_function_trigger", funcTriggerNamespace), + }, + funcTriggerName: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Name of Trigger.", + ValidateFunc: InvokeValidator("ibm_function_trigger", funcTriggerName), + }, + "feed": { + Type: schema.TypeList, + ForceNew: true, + Optional: true, + MaxItems: 1, + Description: "Trigger feed", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Trigger feed ACTION_NAME.", + }, + funcTriggerParams: { + Type: schema.TypeString, + Optional: true, + Default: "[]", + Description: "Parameters values in KEY VALUE format. Parameter bindings included in the context.TODO() passed to the action invoke.", + ValidateFunc: InvokeValidator("ibm_function_trigger", funcTriggerParams), + DiffSuppressFunc: func(k, o, n string, d *schema.ResourceData) bool { + if o == "" && n == "" { + return false + } + if o == "[]" { + return true + } + return false + }, + StateFunc: func(v interface{}) string { + json, _ := normalizeJSONString(v) + return json + }, + }, + }, + }, + }, + "publish": { + Type: schema.TypeBool, + Computed: true, + Description: "Trigger visbility.", + }, + "version": { + Type: schema.TypeString, + Computed: true, + Description: "Semantic version of the item.", + }, + funcTriggerUsrDefAnnots: { + Type: schema.TypeString, + Optional: true, + Description: "Annotation values in KEY VALUE format.", + Default: "[]", + ValidateFunc: InvokeValidator("ibm_function_trigger", funcTriggerUsrDefAnnots), + DiffSuppressFunc: suppressEquivalentJSON, + StateFunc: func(v interface{}) string { + json, _ := normalizeJSONString(v) + return json + }, + }, + funcTriggerUsrDefParams: { + Type: schema.TypeString, + Optional: true, + Default: "[]", + Description: "Parameters values in KEY VALUE format. Parameter bindings included in the context.TODO() passed to the trigger.", + ValidateFunc: InvokeValidator("ibm_function_trigger", funcTriggerUsrDefParams), + DiffSuppressFunc: suppressEquivalentJSON, + StateFunc: func(v interface{}) string { + json, _ := normalizeJSONString(v) + return json + }, + }, + "annotations": { + Type: schema.TypeString, + Computed: true, + Description: "All annotations set on trigger by user and those set by the IBM Cloud Function backend/API.", + }, + "parameters": { + Type: schema.TypeString, + Computed: true, + Description: "All parameters set on trigger by user and those set by the IBM Cloud Function backend/API.", + }, + "trigger_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceIBMFuncTriggerValidator() *ResourceValidator { + validateSchema := make([]ValidateSchema, 1) + + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: funcTriggerName, + ValidateFunctionIdentifier: ValidateRegexp, + Type: TypeString, + Regexp: `\A([\w]|[\w][\w@ .-]*[\w@.-]+)\z`, + Required: true}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: funcTriggerNamespace, + ValidateFunctionIdentifier: ValidateNoZeroValues, + Type: TypeString, + Required: true}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: funcTriggerUsrDefAnnots, + ValidateFunctionIdentifier: ValidateJSONString, + Type: TypeString, + Default: "[]", + Optional: true}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: funcTriggerUsrDefParams, + ValidateFunctionIdentifier: ValidateJSONString, + Type: TypeString, + Optional: true}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: funcTriggerParams, + ValidateFunctionIdentifier: ValidateJSONString, + Type: TypeString, + Default: "[]", + Optional: true}) + + ibmFuncTriggerResourceValidator := ResourceValidator{ResourceName: "ibm_function_trigger", Schema: validateSchema} + return &ibmFuncTriggerResourceValidator +} + +func resourceIBMFunctionTriggerCreate(d *schema.ResourceData, meta interface{}) error { + functionNamespaceAPI, err := meta.(ClientSession).FunctionIAMNamespaceAPI() + if err != nil { + return err + } + + bxSession, err := meta.(ClientSession).BluemixSession() + if err != nil { + return err + } + namespace := d.Get("namespace").(string) + wskClient, err := setupOpenWhiskClientConfig(namespace, bxSession, functionNamespaceAPI) + if err != nil { + return err + + } + + triggerService := wskClient.Triggers + feed := false + feedPayload := map[string]interface{}{} + name := d.Get("name").(string) + + var qualifiedName = new(QualifiedName) + + if qualifiedName, err = NewQualifiedName(name); err != nil { + return NewQualifiedNameError(name, err) + } + + payload := whisk.Trigger{ + Name: qualifiedName.GetEntityName(), + Namespace: qualifiedName.GetNamespace(), + } + + userDefinedAnnotations := d.Get("user_defined_annotations").(string) + payload.Annotations, err = expandAnnotations(userDefinedAnnotations) + if err != nil { + return err + } + + userDefinedParameters := d.Get("user_defined_parameters").(string) + payload.Parameters, err = expandParameters(userDefinedParameters) + if err != nil { + return err + } + + if v, ok := d.GetOk("feed"); ok { + feed = true + value := v.([]interface{})[0].(map[string]interface{}) + feedPaylod := whisk.KeyValue{ + Key: "feed", + Value: value["name"], + } + feedArray := make([]whisk.KeyValue, 0, 1) + feedArray = append(feedArray, feedPaylod) + payload.Annotations = payload.Annotations.AppendKeyValueArr(feedArray) + } + + log.Println("[INFO] Creating IBM Cloud Function trigger") + result, _, err := triggerService.Insert(&payload, false) + if err != nil { + return fmt.Errorf("Error creating IBM Cloud Function trigger: %s", err) + } + + d.SetId(fmt.Sprintf("%s:%s", namespace, result.Name)) + + if feed { + feed := d.Get("feed").([]interface{})[0].(map[string]interface{}) + actionName := feed["name"].(string) + parameters := feed["parameters"].(string) + var err error + feedParameters, err := expandParameters(parameters) + if err != nil { + return err + } + for _, value := range feedParameters { + feedPayload[value.Key] = value.Value + } + var feedQualifiedName = new(QualifiedName) + + if feedQualifiedName, err = NewQualifiedName(actionName); err != nil { + _, _, delerr := triggerService.Delete(name) + if delerr != nil { + return fmt.Errorf("Error creating IBM Cloud Function trigger with feed: %s", err) + } + return NewQualifiedNameError(actionName, err) + } + + feedPayload[feedLifeCycleEvent] = feedCreate + feedPayload[feedAuthKey] = wskClient.Config.AuthToken + feedPayload[feedTriggerName] = fmt.Sprintf("/%s/%s", qualifiedName.GetNamespace(), name) + + c, err := whisk.NewClient(http.DefaultClient, &whisk.Config{ + AuthToken: wskClient.AuthToken, + Host: wskClient.Host, + AdditionalHeaders: wskClient.AdditionalHeaders, + }) + + if feedQualifiedName.GetNamespace() != namespace { + c.Config.Namespace = feedQualifiedName.GetNamespace() + } + actionService := c.Actions + _, _, err = actionService.Invoke(feedQualifiedName.GetEntityName(), feedPayload, true, true) + if err != nil { + _, _, delerr := triggerService.Delete(name) + if delerr != nil { + return fmt.Errorf("Error creating IBM Cloud Function trigger with feed: %s", err) + } + d.SetId("") + return fmt.Errorf("Error creating IBM Cloud Function trigger with feed: %s", err) + } + } + + d.SetId(fmt.Sprintf("%s:%s", namespace, result.Name)) + + return resourceIBMFunctionTriggerRead(d, meta) +} + +func resourceIBMFunctionTriggerRead(d *schema.ResourceData, meta interface{}) error { + parts, err := cfIdParts(d.Id()) + if err != nil { + return err + } + + namespace := "" + triggerID := "" + if len(parts) == 2 { + namespace = parts[0] + triggerID = parts[1] + } else { + namespace = os.Getenv("FUNCTION_NAMESPACE") + triggerID = parts[0] + d.SetId(fmt.Sprintf("%s:%s", namespace, triggerID)) + } + + functionNamespaceAPI, err := meta.(ClientSession).FunctionIAMNamespaceAPI() + if err != nil { + return err + } + + bxSession, err := meta.(ClientSession).BluemixSession() + if err != nil { + return err + } + + wskClient, err := setupOpenWhiskClientConfig(namespace, bxSession, functionNamespaceAPI) + if err != nil { + return err + + } + + triggerService := wskClient.Triggers + + trigger, _, err := triggerService.Get(triggerID) + if err != nil { + return fmt.Errorf("Error retrieving IBM Cloud Function Trigger %s : %s", triggerID, err) + } + d.Set("trigger_id", trigger.Name) + d.Set("namespace", namespace) + d.Set("name", trigger.Name) + d.Set("publish", trigger.Publish) + d.Set("version", trigger.Version) + annotations, err := flattenAnnotations(trigger.Annotations) + if err != nil { + return err + } + d.Set("annotations", annotations) + parameters, err := flattenParameters(trigger.Parameters) + if err != nil { + return err + } + d.Set("parameters", parameters) + d.Set("user_defined_parameters", parameters) + + userDefinedAnnotations, err := filterTriggerAnnotations(trigger.Annotations) + if err != nil { + return err + } + d.Set("user_defined_annotations", userDefinedAnnotations) + + found := trigger.Annotations.FindKeyValue("feed") + + if found >= 0 { + d.Set("feed", flattenFeed(trigger.Annotations.GetValue("feed").(string))) + } + + return nil +} + +func resourceIBMFunctionTriggerUpdate(d *schema.ResourceData, meta interface{}) error { + functionNamespaceAPI, err := meta.(ClientSession).FunctionIAMNamespaceAPI() + if err != nil { + return err + } + + bxSession, err := meta.(ClientSession).BluemixSession() + if err != nil { + return err + } + parts, err := cfIdParts(d.Id()) + if err != nil { + return err + } + + namespace := parts[0] + wskClient, err := setupOpenWhiskClientConfig(namespace, bxSession, functionNamespaceAPI) + if err != nil { + return err + + } + + triggerService := wskClient.Triggers + + name := d.Get("name").(string) + + var qualifiedName = new(QualifiedName) + + if qualifiedName, err = NewQualifiedName(name); err != nil { + return NewQualifiedNameError(name, err) + } + + payload := whisk.Trigger{ + Name: qualifiedName.GetEntityName(), + Namespace: qualifiedName.GetNamespace(), + } + ischanged := false + + if d.HasChange("user_defined_parameters") { + var err error + payload.Parameters, err = expandParameters(d.Get("user_defined_parameters").(string)) + if err != nil { + return err + } + ischanged = true + } + + if d.HasChange("user_defined_annotations") { + var err error + payload.Annotations, err = expandAnnotations(d.Get("user_defined_annotations").(string)) + if err != nil { + return err + } + ischanged = true + } + + if ischanged { + log.Println("[INFO] Update IBM Cloud Function Trigger") + + _, _, err = triggerService.Insert(&payload, true) + if err != nil { + return fmt.Errorf("Error updating IBM Cloud Function Trigger: %s", err) + } + } + + return resourceIBMFunctionTriggerRead(d, meta) +} + +func resourceIBMFunctionTriggerDelete(d *schema.ResourceData, meta interface{}) error { + functionNamespaceAPI, err := meta.(ClientSession).FunctionIAMNamespaceAPI() + if err != nil { + return err + } + + bxSession, err := meta.(ClientSession).BluemixSession() + if err != nil { + return err + } + + parts, err := cfIdParts(d.Id()) + if err != nil { + return err + } + namespace := parts[0] + triggerID := parts[1] + + wskClient, err := setupOpenWhiskClientConfig(namespace, bxSession, functionNamespaceAPI) + if err != nil { + return err + + } + + triggerService := wskClient.Triggers + var qualifiedName = new(QualifiedName) + fmt.Println(qualifiedName) + if qualifiedName, err = NewQualifiedName(triggerID); err != nil { + return NewQualifiedNameError(triggerID, err) + } + trigger, _, err := triggerService.Get(triggerID) + if err != nil { + return fmt.Errorf("Error retrieving IBM Cloud Function Trigger %s : %s", triggerID, err) + } + found := trigger.Annotations.FindKeyValue("feed") + if found >= 0 { + actionName := trigger.Annotations.GetValue("feed").(string) + var feedQualifiedName = new(QualifiedName) + + if feedQualifiedName, err = NewQualifiedName(actionName); err != nil { + return NewQualifiedNameError(actionName, err) + } + + feedPayload := map[string]interface{}{ + feedLifeCycleEvent: feedDelete, + feedAuthKey: wskClient.Config.AuthToken, + feedTriggerName: fmt.Sprintf("/%s/%s", qualifiedName.GetNamespace(), triggerID), + } + + c, err := whisk.NewClient(http.DefaultClient, &whisk.Config{ + AuthToken: wskClient.AuthToken, + Host: wskClient.Host, + AdditionalHeaders: wskClient.AdditionalHeaders, + }) + if feedQualifiedName.GetNamespace() != namespace { + c.Config.Namespace = feedQualifiedName.GetNamespace() + } + + actionService := c.Actions + _, _, err = actionService.Invoke(feedQualifiedName.GetEntityName(), feedPayload, true, true) + if err != nil { + return fmt.Errorf("Error deleting IBM Cloud Function trigger with feed: %s", err) + + } + } + + _, _, err = triggerService.Delete(triggerID) + if err != nil { + return fmt.Errorf("Error deleting IBM Cloud Function Trigger: %s", err) + } + + d.SetId("") + return nil +} + +func resourceIBMFunctionTriggerExists(d *schema.ResourceData, meta interface{}) (bool, error) { + parts, err := cfIdParts(d.Id()) + if err != nil { + return false, err + } + + namespace := "" + triggerID := "" + if len(parts) == 2 { + namespace = parts[0] + triggerID = parts[1] + } else { + namespace = os.Getenv("FUNCTION_NAMESPACE") + triggerID = parts[0] + d.SetId(fmt.Sprintf("%s:%s", namespace, triggerID)) + } + + functionNamespaceAPI, err := meta.(ClientSession).FunctionIAMNamespaceAPI() + if err != nil { + return false, err + } + + bxSession, err := meta.(ClientSession).BluemixSession() + if err != nil { + return false, err + } + + wskClient, err := setupOpenWhiskClientConfig(namespace, bxSession, functionNamespaceAPI) + if err != nil { + return false, err + + } + + triggerService := wskClient.Triggers + trigger, resp, err := triggerService.Get(triggerID) + if err != nil { + if resp.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error communicating with IBM Cloud Function Client : %s", err) + } + return trigger.Name == triggerID, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_iam_access_group.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_iam_access_group.go new file mode 100644 index 00000000000..982339345ed --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_iam_access_group.go @@ -0,0 +1,171 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + + "github.com/IBM-Cloud/bluemix-go/api/iamuum/iamuumv2" + "github.com/IBM-Cloud/bluemix-go/models" + + "github.com/IBM-Cloud/bluemix-go/bmxerror" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func resourceIBMIAMAccessGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMIAMAccessGroupCreate, + Read: resourceIBMIAMAccessGroupRead, + Update: resourceIBMIAMAccessGroupUpdate, + Delete: resourceIBMIAMAccessGroupDelete, + Exists: resourceIBMIAMAccessGroupExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: "Name of the access group", + }, + + "description": { + Type: schema.TypeString, + Optional: true, + Description: "Description of the access group", + }, + + "tags": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "version": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceIBMIAMAccessGroupCreate(d *schema.ResourceData, meta interface{}) error { + iamuumClient, err := meta.(ClientSession).IAMUUMAPIV2() + if err != nil { + return err + } + + request := models.AccessGroupV2{ + AccessGroup: models.AccessGroup{ + Name: d.Get("name").(string), + }, + } + + if des, ok := d.GetOk("description"); ok { + request.Description = des.(string) + } + + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + + agrp, err := iamuumClient.AccessGroup().Create(request, userDetails.userAccount) + if err != nil { + return fmt.Errorf("Error creating access group: %s", err) + } + + d.SetId(agrp.ID) + + return resourceIBMIAMAccessGroupRead(d, meta) +} + +func resourceIBMIAMAccessGroupRead(d *schema.ResourceData, meta interface{}) error { + iamuumClient, err := meta.(ClientSession).IAMUUMAPIV2() + if err != nil { + return err + } + agrpID := d.Id() + + agrp, version, err := iamuumClient.AccessGroup().Get(agrpID) + if err != nil { + return fmt.Errorf("Error retrieving access group: %s", err) + } + + d.Set("name", agrp.Name) + d.Set("description", agrp.Description) + d.Set("version", version) + + return nil +} + +func resourceIBMIAMAccessGroupUpdate(d *schema.ResourceData, meta interface{}) error { + + iamuumClient, err := meta.(ClientSession).IAMUUMAPIV2() + if err != nil { + return err + } + agrpID := d.Id() + + hasChange := false + updateReq := iamuumv2.AccessGroupUpdateRequest{} + + if d.HasChange("name") { + updateReq.Name = d.Get("name").(string) + hasChange = true + } + + if d.HasChange("description") { + updateReq.Description = d.Get("description").(string) + hasChange = true + } + + if hasChange { + _, err = iamuumClient.AccessGroup().Update(agrpID, updateReq, d.Get("version").(string)) + if err != nil { + return fmt.Errorf("Error updating access group: %s", err) + } + } + + return resourceIBMIAMAccessGroupRead(d, meta) + +} + +func resourceIBMIAMAccessGroupDelete(d *schema.ResourceData, meta interface{}) error { + iamuumClient, err := meta.(ClientSession).IAMUUMAPIV2() + if err != nil { + return err + } + + agID := d.Id() + + err = iamuumClient.AccessGroup().Delete(agID, true) + if err != nil { + return fmt.Errorf("Error deleting access group: %s", err) + } + + d.SetId("") + + return nil +} + +func resourceIBMIAMAccessGroupExists(d *schema.ResourceData, meta interface{}) (bool, error) { + iamuumClient, err := meta.(ClientSession).IAMUUMAPIV2() + if err != nil { + return false, err + } + agID := d.Id() + + agrp, _, err := iamuumClient.AccessGroup().Get(agID) + if err != nil { + if apiErr, ok := err.(bmxerror.RequestFailure); ok { + if apiErr.StatusCode() == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s", err) + } + + return agrp.ID == agID, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_iam_access_group_dynamic_rule.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_iam_access_group_dynamic_rule.go new file mode 100644 index 00000000000..b2159aba45e --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_iam_access_group_dynamic_rule.go @@ -0,0 +1,255 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "strings" + + "github.com/IBM-Cloud/bluemix-go/api/iamuum/iamuumv2" + "github.com/IBM-Cloud/bluemix-go/bmxerror" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func resourceIBMIAMDynamicRule() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMIAMDynamicRuleCreate, + Read: resourceIBMIAMDynamicRuleRead, + Update: resourceIBMIAMDynamicRuleUpdate, + Delete: resourceIBMIAMDynamicRuleDelete, + Exists: resourceIBMIAMDynamicRuleExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "access_group_id": { + Type: schema.TypeString, + Required: true, + Description: "Unique identifier of the access group", + }, + + "name": { + Type: schema.TypeString, + Required: true, + Description: "The name of the Rule", + }, + "expiration": { + Type: schema.TypeInt, + Required: true, + Description: "The expiration in hours", + ValidateFunc: validatePortRange(1, 24), + }, + "identity_provider": { + Type: schema.TypeString, + Required: true, + Description: "The realm name or identity proivider url", + }, + "conditions": { + Type: schema.TypeList, + Required: true, + Description: "conditions info", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "claim": { + Type: schema.TypeString, + Required: true, + }, + "operator": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateAllowedStringValue([]string{"EQUALS", "EQUALS_IGNORE_CASE", "IN", "NOT_EQUALS_IGNORE_CASE", "NOT_EQUALS", "CONTAINS"}), + }, + "value": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + "rule_id": { + Type: schema.TypeString, + Computed: true, + Description: "id of the rule", + }, + }, + } +} + +func resourceIBMIAMDynamicRuleCreate(d *schema.ResourceData, meta interface{}) error { + iamuumClient, err := meta.(ClientSession).IAMUUMAPIV2() + if err != nil { + return err + } + + grpID := d.Get("access_group_id").(string) + name := d.Get("name").(string) + realm := d.Get("identity_provider").(string) + expiration := d.Get("expiration").(int) + + var cond []interface{} + condition := []iamuumv2.Condition{} + if res, ok := d.GetOk("conditions"); ok { + cond = res.([]interface{}) + for _, e := range cond { + r, _ := e.(map[string]interface{}) + conditionParam := iamuumv2.Condition{ + Claim: r["claim"].(string), + Operator: r["operator"].(string), + Value: fmt.Sprintf("\"%s\"", r["value"].(string)), + } + condition = append(condition, conditionParam) + } + } + + createRuleReq := iamuumv2.CreateRuleRequest{ + Name: name, + RealmName: realm, + Expiration: expiration, + Conditions: condition, + } + + response, err := iamuumClient.DynamicRule().Create(grpID, createRuleReq) + if err != nil { + return err + } + ruleID := response.RuleID + d.SetId(fmt.Sprintf("%s/%s", grpID, ruleID)) + + return resourceIBMIAMDynamicRuleRead(d, meta) +} + +func resourceIBMIAMDynamicRuleRead(d *schema.ResourceData, meta interface{}) error { + iamuumClient, err := meta.(ClientSession).IAMUUMAPIV2() + if err != nil { + return err + } + + parts, err := idParts(d.Id()) + if err != nil { + return err + } + + grpID := parts[0] + ruleID := parts[1] + + rules, _, err := iamuumClient.DynamicRule().Get(grpID, ruleID) + if err != nil && !strings.Contains(err.Error(), "404") { + return fmt.Errorf("Error retrieving access group Rules: %s", err) + } else if err != nil && strings.Contains(err.Error(), "404") { + d.SetId("") + + return nil + } + + d.Set("access_group_id", grpID) + d.Set("name", rules.Name) + d.Set("expiration", rules.Expiration) + d.Set("identity_provider", rules.RealmName) + d.Set("conditions", flattenConditions(rules.Conditions)) + d.Set("rule_id", rules.RuleID) + + return nil +} + +func resourceIBMIAMDynamicRuleUpdate(d *schema.ResourceData, meta interface{}) error { + + iamuumClient, err := meta.(ClientSession).IAMUUMAPIV2() + if err != nil { + return err + } + parts, err := idParts(d.Id()) + if err != nil { + return err + } + + grpID := parts[0] + ruleID := parts[1] + _, etag, err := iamuumClient.DynamicRule().Get(grpID, ruleID) + if err != nil { + return fmt.Errorf("Error retrieving access group Rules: %s", err) + } + + name := d.Get("name").(string) + realm := d.Get("identity_provider").(string) + expiration := d.Get("expiration").(int) + + var cond []interface{} + condition := []iamuumv2.Condition{} + if res, ok := d.GetOk("conditions"); ok { + cond = res.([]interface{}) + for _, e := range cond { + r, _ := e.(map[string]interface{}) + conditionParam := iamuumv2.Condition{ + Claim: r["claim"].(string), + Operator: r["operator"].(string), + Value: fmt.Sprintf("\"%s\"", r["value"].(string)), + } + condition = append(condition, conditionParam) + } + } + + createRuleReq := iamuumv2.CreateRuleRequest{ + Name: name, + RealmName: realm, + Expiration: expiration, + Conditions: condition, + } + _, err = iamuumClient.DynamicRule().Replace(grpID, ruleID, createRuleReq, etag) + if err != nil { + return err + } + + return resourceIBMIAMDynamicRuleRead(d, meta) + +} + +func resourceIBMIAMDynamicRuleDelete(d *schema.ResourceData, meta interface{}) error { + iamuumClient, err := meta.(ClientSession).IAMUUMAPIV2() + if err != nil { + return err + } + + parts, err := idParts(d.Id()) + if err != nil { + return err + } + + grpID := parts[0] + ruleID := parts[1] + + err = iamuumClient.DynamicRule().Delete(grpID, ruleID) + if err != nil && !strings.Contains(err.Error(), "404") { + return err + } + + d.SetId("") + + return nil +} + +func resourceIBMIAMDynamicRuleExists(d *schema.ResourceData, meta interface{}) (bool, error) { + iamuumClient, err := meta.(ClientSession).IAMUUMAPIV2() + if err != nil { + return false, err + } + + parts, err := idParts(d.Id()) + if err != nil { + return false, err + } + grpID := parts[0] + ruleID := parts[1] + + rules, _, err := iamuumClient.DynamicRule().Get(grpID, ruleID) + if err != nil { + if apiErr, ok := err.(bmxerror.RequestFailure); ok { + if apiErr.StatusCode() == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s", err) + } + + return rules.AccessGroupID == grpID, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_iam_access_group_members.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_iam_access_group_members.go new file mode 100644 index 00000000000..d48a976246f --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_iam_access_group_members.go @@ -0,0 +1,340 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "time" + + "github.com/IBM-Cloud/bluemix-go/api/iamuum/iamuumv2" + "github.com/IBM-Cloud/bluemix-go/crn" + "github.com/IBM-Cloud/bluemix-go/models" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func resourceIBMIAMAccessGroupMembers() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMIAMAccessGroupMembersCreate, + Read: resourceIBMIAMAccessGroupMembersRead, + Update: resourceIBMIAMAccessGroupMembersUpdate, + Delete: resourceIBMIAMAccessGroupMembersDelete, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "access_group_id": { + Type: schema.TypeString, + Required: true, + Description: "Unique identifier of the access group", + }, + + "ibm_ids": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "iam_service_ids": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "members": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "iam_id": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func resourceIBMIAMAccessGroupMembersCreate(d *schema.ResourceData, meta interface{}) error { + iamuumClient, err := meta.(ClientSession).IAMUUMAPIV2() + if err != nil { + return err + } + + grpID := d.Get("access_group_id").(string) + + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + + accountID := userDetails.userAccount + + var userids, serviceids []string + + users := expandStringList(d.Get("ibm_ids").(*schema.Set).List()) + services := expandStringList(d.Get("iam_service_ids").(*schema.Set).List()) + + if len(users) == 0 && len(services) == 0 { + return fmt.Errorf("Provide either `ibm_ids` or `iam_service_ids`") + + } + + userids, err = flattenUserIds(accountID, users, meta) + if err != nil { + return err + } + + serviceids, err = flattenServiceIds(services, meta) + if err != nil { + return err + } + + request := prepareMemberAddRequest(userids, serviceids) + + _, err = iamuumClient.AccessGroupMember().Add(grpID, request) + if err != nil { + return err + } + + d.SetId(fmt.Sprintf("%s/%s", grpID, time.Now().UTC().String())) + + return resourceIBMIAMAccessGroupMembersRead(d, meta) +} + +func resourceIBMIAMAccessGroupMembersRead(d *schema.ResourceData, meta interface{}) error { + iamuumClient, err := meta.(ClientSession).IAMUUMAPIV2() + if err != nil { + return err + } + + parts, err := idParts(d.Id()) + if err != nil { + return err + } + + grpID := parts[0] + + members, err := iamuumClient.AccessGroupMember().List(grpID) + if err != nil { + return fmt.Errorf("Error retrieving access group members: %s", err) + } + + d.Set("access_group_id", grpID) + + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + + accountID := userDetails.userAccount + + userManagement, err := meta.(ClientSession).UserManagementAPI() + if err != nil { + return err + } + client := userManagement.UserInvite() + res, err := client.ListUsers(accountID) + if err != nil { + return err + } + + iamClient, err := meta.(ClientSession).IAMAPI() + if err != nil { + return err + } + + boundTo := crn.New(userDetails.cloudName, userDetails.cloudType) + boundTo.ScopeType = crn.ScopeAccount + boundTo.Scope = userDetails.userAccount + + serviceIDs, err := iamClient.ServiceIds().List(boundTo.String()) + if err != nil { + return err + } + + d.Set("members", flattenAccessGroupMembers(members, res, serviceIDs)) + + return nil +} + +func resourceIBMIAMAccessGroupMembersUpdate(d *schema.ResourceData, meta interface{}) error { + + iamuumClient, err := meta.(ClientSession).IAMUUMAPIV2() + if err != nil { + return err + } + + parts, err := idParts(d.Id()) + if err != nil { + return err + } + + grpID := parts[0] + + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + + accountID := userDetails.userAccount + + var removeUsers, addUsers, removeServiceids, addServiceids []string + o, n := d.GetChange("ibm_ids") + ou := o.(*schema.Set) + nu := n.(*schema.Set) + + removeUsers = expandStringList(ou.Difference(nu).List()) + addUsers = expandStringList(nu.Difference(ou).List()) + + os, ns := d.GetChange("iam_service_ids") + osi := os.(*schema.Set) + nsi := ns.(*schema.Set) + + removeServiceids = expandStringList(osi.Difference(nsi).List()) + addServiceids = expandStringList(nsi.Difference(osi).List()) + + if len(addUsers) > 0 || len(addServiceids) > 0 && !d.IsNewResource() { + var userids, serviceids []string + userids, err = flattenUserIds(accountID, addUsers, meta) + if err != nil { + return err + } + + serviceids, err = flattenServiceIds(addServiceids, meta) + if err != nil { + return err + } + request := prepareMemberAddRequest(userids, serviceids) + + _, err = iamuumClient.AccessGroupMember().Add(grpID, request) + if err != nil { + return err + } + + } + if len(removeUsers) > 0 || len(removeServiceids) > 0 && !d.IsNewResource() { + iamClient, err := meta.(ClientSession).IAMAPI() + if err != nil { + return err + } + for _, u := range removeUsers { + ibmUniqueId, err := getIBMUniqueId(accountID, u, meta) + if err != nil { + return err + } + err = iamuumClient.AccessGroupMember().Remove(grpID, ibmUniqueId) + if err != nil { + return err + } + + } + + for _, s := range removeServiceids { + serviceID, err := iamClient.ServiceIds().Get(s) + if err != nil { + return err + } + err = iamuumClient.AccessGroupMember().Remove(grpID, serviceID.IAMID) + if err != nil { + return err + } + + } + } + + return resourceIBMIAMAccessGroupMembersRead(d, meta) + +} + +func resourceIBMIAMAccessGroupMembersDelete(d *schema.ResourceData, meta interface{}) error { + iamuumClient, err := meta.(ClientSession).IAMUUMAPIV2() + if err != nil { + return err + } + + parts, err := idParts(d.Id()) + if err != nil { + return err + } + + grpID := parts[0] + + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + + users := expandStringList(d.Get("ibm_ids").(*schema.Set).List()) + + for _, name := range users { + + ibmUniqueID, err := getIBMUniqueId(userDetails.userAccount, name, meta) + if err != nil { + return err + } + err = iamuumClient.AccessGroupMember().Remove(grpID, ibmUniqueID) + if err != nil { + return err + } + + } + + services := expandStringList(d.Get("iam_service_ids").(*schema.Set).List()) + + for _, id := range services { + serviceID, err := getServiceID(id, meta) + if err != nil { + return err + } + err = iamuumClient.AccessGroupMember().Remove(grpID, serviceID.IAMID) + if err != nil { + return err + } + } + + d.SetId("") + + return nil +} + +func prepareMemberAddRequest(userIds, serviceIds []string) (req iamuumv2.AddGroupMemberRequestV2) { + req.Members = make([]models.AccessGroupMemberV2, len(userIds)+len(serviceIds)) + var i = 0 + for _, id := range userIds { + req.Members[i] = models.AccessGroupMemberV2{ + ID: id, + Type: iamuumv2.AccessGroupMemberUser, + } + i++ + } + + for _, id := range serviceIds { + req.Members[i] = models.AccessGroupMemberV2{ + ID: id, + Type: iamuumv2.AccessGroupMemberService, + } + i++ + } + return +} + +func getServiceID(id string, meta interface{}) (models.ServiceID, error) { + + iamClient, err := meta.(ClientSession).IAMAPI() + if err != nil { + return models.ServiceID{}, err + } + serviceID, err := iamClient.ServiceIds().Get(id) + if err != nil { + return models.ServiceID{}, err + } + + return serviceID, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_iam_access_group_policy.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_iam_access_group_policy.go new file mode 100644 index 00000000000..6a0221a6917 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_iam_access_group_policy.go @@ -0,0 +1,426 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "time" + + "github.com/IBM/go-sdk-core/v5/core" + "github.com/IBM/platform-services-go-sdk/iampolicymanagementv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func resourceIBMIAMAccessGroupPolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMIAMAccessGroupPolicyCreate, + Read: resourceIBMIAMAccessGroupPolicyRead, + Update: resourceIBMIAMAccessGroupPolicyUpdate, + Delete: resourceIBMIAMAccessGroupPolicyDelete, + Exists: resourceIBMIAMAccessGroupPolicyExists, + Importer: &schema.ResourceImporter{ + State: func(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + resources, resourceAttributes, err := importAccessGroupPolicy(d, meta) + if err != nil { + return nil, fmt.Errorf("Error reading resource ID: %s", err) + } + d.Set("resources", resources) + d.Set("resource_attributes", resourceAttributes) + return []*schema.ResourceData{d}, nil + }, + }, + + Schema: map[string]*schema.Schema{ + "access_group_id": { + Type: schema.TypeString, + Required: true, + Description: "ID of access group", + ForceNew: true, + }, + + "roles": { + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "Role names of the policy definition", + }, + + "resources": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{"account_management", "resource_attributes"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "service": { + Type: schema.TypeString, + Optional: true, + Description: "Service name of the policy definition", + }, + + "resource_instance_id": { + Type: schema.TypeString, + Optional: true, + Description: "ID of resource instance of the policy definition", + }, + + "region": { + Type: schema.TypeString, + Optional: true, + Description: "Region of the policy definition", + }, + + "resource_type": { + Type: schema.TypeString, + Optional: true, + Description: "Resource type of the policy definition", + }, + + "resource": { + Type: schema.TypeString, + Optional: true, + Description: "Resource of the policy definition", + }, + + "resource_group_id": { + Type: schema.TypeString, + Optional: true, + Description: "ID of the resource group.", + }, + + "attributes": { + Type: schema.TypeMap, + Optional: true, + Description: "Set resource attributes in the form of 'name=value,name=value....", + Elem: schema.TypeString, + }, + }, + }, + }, + + "resource_attributes": { + Type: schema.TypeSet, + Optional: true, + Description: "Set resource attributes.", + ConflictsWith: []string{"resources", "account_management"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: "Name of attribute.", + }, + "value": { + Type: schema.TypeString, + Required: true, + Description: "Value of attribute.", + }, + "operator": { + Type: schema.TypeString, + Optional: true, + Default: "stringEquals", + Description: "Operator of attribute.", + }, + }, + }, + }, + "account_management": { + Type: schema.TypeBool, + Default: false, + Optional: true, + Description: "Give access to all account management services", + ConflictsWith: []string{"resources", "resource_attributes"}, + }, + + "tags": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "version": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceIBMIAMAccessGroupPolicyCreate(d *schema.ResourceData, meta interface{}) error { + iamPolicyManagementClient, err := meta.(ClientSession).IAMPolicyManagementV1API() + if err != nil { + return err + } + + accessGroupId := d.Get("access_group_id").(string) + + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + + var policyOptions iampolicymanagementv1.CreatePolicyOptions + policyOptions, err = generatePolicyOptions(d, meta) + if err != nil { + return err + } + + // Keep configuring the policy options by adding subject part + accessGroupIdSubject := &iampolicymanagementv1.PolicySubject{ + Attributes: []iampolicymanagementv1.SubjectAttribute{ + { + Name: core.StringPtr("access_group_id"), + Value: &accessGroupId, + }, + }, + } + + accountIdResourceAttribute := &iampolicymanagementv1.ResourceAttribute{ + Name: core.StringPtr("accountId"), + Value: &userDetails.userAccount, + } + + policyResource := &iampolicymanagementv1.PolicyResource{ + Attributes: append(policyOptions.Resources[0].Attributes, *accountIdResourceAttribute), + } + + createPolicyOptions := iamPolicyManagementClient.NewCreatePolicyOptions( + "access", + []iampolicymanagementv1.PolicySubject{*accessGroupIdSubject}, + policyOptions.Roles, + []iampolicymanagementv1.PolicyResource{*policyResource}, + ) + + accessGroupPolicy, res, err := iamPolicyManagementClient.CreatePolicy(createPolicyOptions) + if err != nil || accessGroupPolicy == nil { + return fmt.Errorf("Error creating access group policy: %s\n%s", err, res) + } + + d.SetId(fmt.Sprintf("%s/%s", accessGroupId, *accessGroupPolicy.ID)) + + getPolicyOptions := &iampolicymanagementv1.GetPolicyOptions{ + PolicyID: accessGroupPolicy.ID, + } + + err = resource.Retry(5*time.Minute, func() *resource.RetryError { + var err error + policy, res, err := iamPolicyManagementClient.GetPolicy(getPolicyOptions) + if err != nil || policy == nil { + if res != nil && res.StatusCode == 404 { + return resource.RetryableError(err) + } + return resource.NonRetryableError(err) + } + return nil + }) + + if isResourceTimeoutError(err) { + _, res, err = iamPolicyManagementClient.GetPolicy(getPolicyOptions) + } + if err != nil { + return fmt.Errorf("Error fetching access group policy: %s\n%s", err, res) + } + + return resourceIBMIAMAccessGroupPolicyRead(d, meta) +} + +func resourceIBMIAMAccessGroupPolicyRead(d *schema.ResourceData, meta interface{}) error { + + iamPolicyManagementClient, err := meta.(ClientSession).IAMPolicyManagementV1API() + if err != nil { + return err + } + + parts, err := idParts(d.Id()) + if err != nil { + return err + } + accessGroupId := parts[0] + accessGroupPolicyId := parts[1] + + getPolicyOptions := &iampolicymanagementv1.GetPolicyOptions{ + PolicyID: &accessGroupPolicyId, + } + + accessGroupPolicy, res, err := iamPolicyManagementClient.GetPolicy(getPolicyOptions) + if err != nil { + return fmt.Errorf("Error retrieving access group policy: %s\n%s", err, res) + } + + retrievedAttribute := getSubjectAttribute("access_group_id", accessGroupPolicy.Subjects[0]) + if accessGroupId != *retrievedAttribute { + return fmt.Errorf("Policy %s does not belong to access group %s, retrievedAttr: %s", accessGroupPolicyId, accessGroupId, *retrievedAttribute) + } + + d.Set("access_group_id", accessGroupId) + roles := make([]string, len(accessGroupPolicy.Roles)) + for i, role := range accessGroupPolicy.Roles { + roles[i] = *role.DisplayName + } + d.Set("roles", roles) + d.Set("version", res.Headers.Get("ETag")) + + if _, ok := d.GetOk("resources"); ok { + d.Set("resources", flattenPolicyResource(accessGroupPolicy.Resources)) + } + if _, ok := d.GetOk("resource_attributes"); ok { + d.Set("resource_attributes", flattenPolicyResourceAttributes(accessGroupPolicy.Resources)) + } + if len(accessGroupPolicy.Resources) > 0 { + if *getResourceAttribute("serviceType", accessGroupPolicy.Resources[0]) == "service" { + d.Set("account_management", false) + } + if *getResourceAttribute("serviceType", accessGroupPolicy.Resources[0]) == "platform_service" { + d.Set("account_management", true) + } + } + + return nil +} + +func resourceIBMIAMAccessGroupPolicyUpdate(d *schema.ResourceData, meta interface{}) error { + + iamPolicyManagementClient, err := meta.(ClientSession).IAMPolicyManagementV1API() + if err != nil { + return err + } + if d.HasChange("roles") || d.HasChange("resources") || d.HasChange("resource_attributes") || d.HasChange("account_management") { + parts, err := idParts(d.Id()) + if err != nil { + return err + } + accessGroupId := parts[0] + accessGroupPolicyId := parts[1] + + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + + var policyOptions iampolicymanagementv1.CreatePolicyOptions + policyOptions, err = generatePolicyOptions(d, meta) + if err != nil { + return err + } + + accessGroupIdSubject := &iampolicymanagementv1.PolicySubject{ + Attributes: []iampolicymanagementv1.SubjectAttribute{ + { + Name: core.StringPtr("access_group_id"), + Value: &accessGroupId, + }, + }, + } + + accountIdResourceAttribute := &iampolicymanagementv1.ResourceAttribute{ + Name: core.StringPtr("accountId"), + Value: &userDetails.userAccount, + } + + policyResource := &iampolicymanagementv1.PolicyResource{ + Attributes: append(policyOptions.Resources[0].Attributes, *accountIdResourceAttribute), + } + + updatePolicyOptions := iamPolicyManagementClient.NewUpdatePolicyOptions( + accessGroupPolicyId, + d.Get("version").(string), + "access", + []iampolicymanagementv1.PolicySubject{*accessGroupIdSubject}, + policyOptions.Roles, + []iampolicymanagementv1.PolicyResource{*policyResource}, + ) + + _, res, err := iamPolicyManagementClient.UpdatePolicy(updatePolicyOptions) + if err != nil { + return fmt.Errorf("Error updating access group policy: %s\n%s", err, res) + } + } + + return resourceIBMIAMAccessGroupPolicyRead(d, meta) +} + +func resourceIBMIAMAccessGroupPolicyDelete(d *schema.ResourceData, meta interface{}) error { + iamPolicyManagementClient, err := meta.(ClientSession).IAMPolicyManagementV1API() + if err != nil { + return err + } + + parts, err := idParts(d.Id()) + if err != nil { + return err + } + + accessGroupPolicyId := parts[1] + + deletePolicyOptions := iamPolicyManagementClient.NewDeletePolicyOptions( + accessGroupPolicyId, + ) + + res, err := iamPolicyManagementClient.DeletePolicy(deletePolicyOptions) + if err != nil { + return fmt.Errorf("Error deleting access group policy: %s\n%s", err, res) + } + + d.SetId("") + + return nil +} + +func resourceIBMIAMAccessGroupPolicyExists(d *schema.ResourceData, meta interface{}) (bool, error) { + iamPolicyManagementClient, err := meta.(ClientSession).IAMPolicyManagementV1API() + if err != nil { + return false, err + } + parts, err := idParts(d.Id()) + if err != nil { + return false, err + } + + accessGroupPolicyId := parts[1] + + getPolicyOptions := iamPolicyManagementClient.NewGetPolicyOptions( + accessGroupPolicyId, + ) + + accessGroupPolicy, res, err := iamPolicyManagementClient.GetPolicy(getPolicyOptions) + if err != nil { + if res != nil && res.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error communicating with the API: %s\n%s", err, res) + } + + tempID := fmt.Sprintf("%s/%s", *getSubjectAttribute("access_group_id", accessGroupPolicy.Subjects[0]), *accessGroupPolicy.ID) + + return tempID == d.Id(), nil +} +func importAccessGroupPolicy(d *schema.ResourceData, meta interface{}) (interface{}, interface{}, error) { + + iamPolicyManagementClient, err := meta.(ClientSession).IAMPolicyManagementV1API() + if err != nil { + return nil, nil, err + } + + parts, err := idParts(d.Id()) + if err != nil { + return nil, nil, err + } + accgrpPolicyID := parts[1] + + getPolicyOptions := iamPolicyManagementClient.NewGetPolicyOptions( + accgrpPolicyID, + ) + + accessGroupPolicy, res, err := iamPolicyManagementClient.GetPolicy(getPolicyOptions) + if err != nil { + return nil, nil, fmt.Errorf("Error retrieving access group policy: %s\n%s", err, res) + } + + resources := flattenPolicyResource(accessGroupPolicy.Resources) + resource_attributes := flattenPolicyResourceAttributes(accessGroupPolicy.Resources) + + return resources, resource_attributes, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_iam_account_settings.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_iam_account_settings.go new file mode 100644 index 00000000000..0b395501cf1 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_iam_account_settings.go @@ -0,0 +1,342 @@ +// Copyright IBM Corp. 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/IBM/platform-services-go-sdk/iamidentityv1" +) + +const ( + accountSettings = "ibm_iam_account_settings" + restrictCreateServiceId = "restrict_create_service_id" + restrictCreateApiKey = "restrict_create_platform_apikey" + mfa = "mfa" +) + +func resourceIbmIamAccountSettings() *schema.Resource { + return &schema.Resource{ + Create: resourceIbmIamAccountSettingsCreate, + Read: resourceIbmIamAccountSettingsRead, + Update: resourceIbmIamAccountSettingsUpdate, + Delete: resourceIbmIamAccountSettingsDelete, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "include_history": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Defines if the entity history is included in the response.", + }, + "restrict_create_service_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: InvokeValidator(accountSettings, restrictCreateServiceId), + Description: "Defines whether or not creating a Service Id is access controlled. Valid values: * RESTRICTED - to apply access control * NOT_RESTRICTED - to remove access control * NOT_SET - to 'unset' a previous set value.", + }, + "restrict_create_platform_apikey": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: InvokeValidator(accountSettings, restrictCreateApiKey), + Description: "Defines whether or not creating platform API keys is access controlled. Valid values: * RESTRICTED - to apply access control * NOT_RESTRICTED - to remove access control * NOT_SET - to 'unset' a previous set value.", + }, + "allowed_ip_addresses": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Defines the IP addresses and subnets from which IAM tokens can be created for the account.", + }, + "entity_tag": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Version of the account settings.", + }, + "mfa": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: InvokeValidator(accountSettings, mfa), + Description: "Defines the MFA trait for the account. Valid values: * NONE - No MFA trait set * TOTP - For all non-federated IBMId users * TOTP4ALL - For all users * LEVEL1 - Email-based MFA for all users * LEVEL2 - TOTP-based MFA for all users * LEVEL3 - U2F MFA for all users.", + }, + "if_match": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "*", + Description: "Version of the account settings to be updated. Specify the version that you retrieved as entity_tag (ETag header) when reading the account. This value helps identifying parallel usage of this API. Pass * to indicate to update any version available. This might result in stale updates.", + }, + "history": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "History of the Account Settings.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "timestamp": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "Timestamp when the action was triggered.", + }, + "iam_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "IAM ID of the identity which triggered the action.", + }, + "iam_id_account": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "Account of the identity which triggered the action.", + }, + "action": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "Action of the history entry.", + }, + "params": &schema.Schema{ + Type: schema.TypeList, + Required: true, + Description: "Params of the history entry.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "message": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "Message which summarizes the executed action.", + }, + }, + }, + }, + "session_expiration_in_seconds": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Defines the session expiration in seconds for the account. Valid values: * Any whole number between between '900' and '86400' * NOT_SET - To unset account setting and use service default.", + }, + "session_invalidation_in_seconds": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Defines the period of time in seconds in which a session will be invalidated due to inactivity. Valid values: * Any whole number between '900' and '7200' * NOT_SET - To unset account setting and use service default.", + }, + "max_sessions_per_identity": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Defines the max allowed sessions per identity required by the account. Value values: * Any whole number greater than '0' * NOT_SET - To unset account setting and use service default.", + }, + }, + } +} + +func resourceIBMIAMAccountSettingsValidator() *ResourceValidator { + validateSchema := make([]ValidateSchema, 1) + + restrict_values := "RESTRICTED, NOT_RESTRICTED, NOT_SET" + mfa_values := "NONE, TOTP, TOTP4ALL, LEVEL1, LEVEL2, LEVEL3" + + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: restrictCreateServiceId, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: restrict_values}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: restrictCreateApiKey, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: restrict_values}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: mfa, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: mfa_values}) + + ibmIAMAccountSettingsValidator := ResourceValidator{ResourceName: "ibm_iam_account_settings", Schema: validateSchema} + return &ibmIAMAccountSettingsValidator +} + +func resourceIbmIamAccountSettingsCreate(d *schema.ResourceData, meta interface{}) error { + iamIdentityClient, err := meta.(ClientSession).IAMIdentityV1API() + if err != nil { + return err + } + + getAccountSettingsOptions := &iamidentityv1.GetAccountSettingsOptions{} + + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + getAccountSettingsOptions.SetAccountID(userDetails.userAccount) + if _, ok := d.GetOk("include_history"); ok { + getAccountSettingsOptions.SetIncludeHistory(d.Get("include_history").(bool)) + } + + accountSettingsResponse, response, err := iamIdentityClient.GetAccountSettings(getAccountSettingsOptions) + if err != nil { + log.Printf("[DEBUG] GetAccountSettings failed %s\n%s", err, response) + return err + } + + d.SetId(fmt.Sprintf("%s", *accountSettingsResponse.AccountID)) + + return resourceIbmIamAccountSettingsUpdate(d, meta) +} + +func resourceIbmIamAccountSettingsRead(d *schema.ResourceData, meta interface{}) error { + iamIdentityClient, err := meta.(ClientSession).IAMIdentityV1API() + if err != nil { + return err + } + + getAccountSettingsOptions := &iamidentityv1.GetAccountSettingsOptions{} + + getAccountSettingsOptions.SetAccountID(d.Id()) + getAccountSettingsOptions.SetIncludeHistory(d.Get("include_history").(bool)) + + accountSettingsResponse, response, err := iamIdentityClient.GetAccountSettings(getAccountSettingsOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + log.Printf("[DEBUG] GetAccountSettings failed %s\n%s", err, response) + return err + } + + if err = d.Set("restrict_create_service_id", accountSettingsResponse.RestrictCreateServiceID); err != nil { + return fmt.Errorf("Error setting restrict_create_service_id: %s", err) + } + if err = d.Set("restrict_create_platform_apikey", accountSettingsResponse.RestrictCreatePlatformApikey); err != nil { + return fmt.Errorf("Error setting restrict_create_platform_apikey: %s", err) + } + if err = d.Set("allowed_ip_addresses", accountSettingsResponse.AllowedIPAddresses); err != nil { + return fmt.Errorf("Error setting allowed_ip_addresses: %s", err) + } + if err = d.Set("entity_tag", accountSettingsResponse.EntityTag); err != nil { + return fmt.Errorf("Error setting entity_tag: %s", err) + } + if err = d.Set("mfa", accountSettingsResponse.Mfa); err != nil { + return fmt.Errorf("Error setting mfa: %s", err) + } + if accountSettingsResponse.History != nil { + history := []map[string]interface{}{} + for _, historyItem := range accountSettingsResponse.History { + historyItemMap := resourceIbmIamAccountSettingsEnityHistoryRecordToMap(historyItem) + history = append(history, historyItemMap) + } + if err = d.Set("history", history); err != nil { + return fmt.Errorf("Error setting history: %s", err) + } + } + if err = d.Set("session_expiration_in_seconds", accountSettingsResponse.SessionExpirationInSeconds); err != nil { + return fmt.Errorf("Error setting session_expiration_in_seconds: %s", err) + } + if err = d.Set("session_invalidation_in_seconds", accountSettingsResponse.SessionInvalidationInSeconds); err != nil { + return fmt.Errorf("Error setting session_invalidation_in_seconds: %s", err) + } + if err = d.Set("max_sessions_per_identity", accountSettingsResponse.MaxSessionsPerIdentity); err != nil { + return fmt.Errorf("Error setting max_sessions_per_identity: %s", err) + } + + return nil +} + +func resourceIbmIamAccountSettingsEnityHistoryRecordToMap(enityHistoryRecord iamidentityv1.EnityHistoryRecord) map[string]interface{} { + enityHistoryRecordMap := map[string]interface{}{} + + enityHistoryRecordMap["timestamp"] = enityHistoryRecord.Timestamp + enityHistoryRecordMap["iam_id"] = enityHistoryRecord.IamID + enityHistoryRecordMap["iam_id_account"] = enityHistoryRecord.IamIDAccount + enityHistoryRecordMap["action"] = enityHistoryRecord.Action + enityHistoryRecordMap["params"] = enityHistoryRecord.Params + enityHistoryRecordMap["message"] = enityHistoryRecord.Message + + return enityHistoryRecordMap +} + +func resourceIbmIamAccountSettingsUpdate(d *schema.ResourceData, meta interface{}) error { + iamIdentityClient, err := meta.(ClientSession).IAMIdentityV1API() + if err != nil { + return err + } + + updateAccountSettingsOptions := &iamidentityv1.UpdateAccountSettingsOptions{} + + updateAccountSettingsOptions.SetAccountID(d.Id()) + updateAccountSettingsOptions.SetIfMatch(d.Get("if_match").(string)) + + hasChange := false + + if d.HasChange("allowed_ip_addresses") { + allowed_ip_addresses_str := d.Get("allowed_ip_addresses").(string) + updateAccountSettingsOptions.SetAllowedIPAddresses(allowed_ip_addresses_str) + hasChange = true + } + + if d.HasChange("restrict_create_service_id") { + restrict_create_service_id_str := d.Get("restrict_create_service_id").(string) + updateAccountSettingsOptions.SetRestrictCreateServiceID(restrict_create_service_id_str) + hasChange = true + } + + if d.HasChange("restrict_create_platform_apikey") { + restrict_create_platform_apikey_str := d.Get("restrict_create_platform_apikey").(string) + updateAccountSettingsOptions.SetRestrictCreatePlatformApikey(restrict_create_platform_apikey_str) + hasChange = true + } + + if d.HasChange("mfa") { + mfa_str := d.Get("mfa").(string) + updateAccountSettingsOptions.SetMfa(mfa_str) + hasChange = true + } + + if d.HasChange("session_expiration_in_seconds") { + session_expiration_in_seconds_str := d.Get("session_expiration_in_seconds").(string) + updateAccountSettingsOptions.SetSessionExpirationInSeconds(session_expiration_in_seconds_str) + hasChange = true + } + + if d.HasChange("session_invalidation_in_seconds") { + session_invalidation_in_seconds_str := d.Get("session_invalidation_in_seconds").(string) + updateAccountSettingsOptions.SetSessionInvalidationInSeconds(session_invalidation_in_seconds_str) + hasChange = true + } + + if d.HasChange("max_sessions_per_identity") { + max_sessions_per_identity_str := d.Get("max_sessions_per_identity").(string) + updateAccountSettingsOptions.SetMaxSessionsPerIdentity(max_sessions_per_identity_str) + hasChange = true + } + + if hasChange { + _, response, err := iamIdentityClient.UpdateAccountSettings(updateAccountSettingsOptions) + if err != nil { + log.Printf("[DEBUG] UpdateAccountSettings failed %s\n%s", err, response) + return err + } + } + + return resourceIbmIamAccountSettingsRead(d, meta) +} + +func resourceIbmIamAccountSettingsDelete(d *schema.ResourceData, meta interface{}) error { + + // DELETE NOT SUPPORTED + d.SetId("") + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_iam_api_key.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_iam_api_key.go new file mode 100644 index 00000000000..a7913c763b2 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_iam_api_key.go @@ -0,0 +1,256 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/IBM/platform-services-go-sdk/iamidentityv1" +) + +func resourceIbmIamApiKey() *schema.Resource { + return &schema.Resource{ + Create: resourceIbmIamApiKeyCreate, + Read: resourceIbmIamApiKeyRead, + Update: resourceIbmIamApiKeyUpdate, + Delete: resourceIbmIamApiKeyDelete, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "Name of the API key. The name is not checked for uniqueness. Therefore multiple names with the same value can exist. Access is done via the UUID of the API key.", + }, + "iam_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The iam_id that this API key authenticates.", + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The optional description of the API key. The 'description' property is only available if a description was provided during a create of an API key.", + }, + "account_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The account ID of the API key.", + }, + "apikey": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + Sensitive: true, + Description: "You can optionally passthrough the API key value for this API key. If passed, NO validation of that apiKey value is done, i.e. the value can be non-URL safe. If omitted, the API key management will create an URL safe opaque API key value. The value of the API key is checked for uniqueness. Please ensure enough variations when passing in this value.", + }, + "store_value": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Description: "Send true or false to set whether the API key value is retrievable in the future by using the Get details of an API key request. If you create an API key for a user, you must specify `false` or omit the value. We don't allow storing of API keys for users.", + }, + "entity_lock": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "false", + Description: "Indicates if the API key is locked for further write operations. False by default.", + }, + "apikey_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Unique identifier of this API Key.", + }, + "entity_tag": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Version of the API Key details object. You need to specify this value when updating the API key to avoid stale updates.", + }, + "crn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Cloud Resource Name of the item. Example Cloud Resource Name: 'crn:v1:bluemix:public:iam-identity:us-south:a/myaccount::apikey:1234-9012-5678'.", + }, + "locked": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "The API key cannot be changed if set to true.", + }, + "created_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "If set contains a date time string of the creation date in ISO format.", + }, + "created_by": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "IAM ID of the user or service which created the API key.", + }, + "modified_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "If set contains a date time string of the last modification date in ISO format.", + }, + }, + } +} + +func resourceIbmIamApiKeyCreate(d *schema.ResourceData, meta interface{}) error { + iamIdentityClient, err := meta.(ClientSession).IAMIdentityV1API() + if err != nil { + return err + } + + createApiKeyOptions := &iamidentityv1.CreateAPIKeyOptions{} + + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + iamID := userDetails.userID + accountID := userDetails.userAccount + + createApiKeyOptions.SetName(d.Get("name").(string)) + createApiKeyOptions.SetIamID(iamID) + createApiKeyOptions.SetAccountID(accountID) + + if _, ok := d.GetOk("description"); ok { + createApiKeyOptions.SetDescription(d.Get("description").(string)) + } + if _, ok := d.GetOk("apikey"); ok { + createApiKeyOptions.SetApikey(d.Get("apikey").(string)) + } + if _, ok := d.GetOk("store_value"); ok { + createApiKeyOptions.SetStoreValue(d.Get("store_value").(bool)) + } + if _, ok := d.GetOk("locked"); ok { + createApiKeyOptions.SetEntityLock(d.Get("locked").(string)) + } + + apiKey, response, err := iamIdentityClient.CreateAPIKey(createApiKeyOptions) + if err != nil { + log.Printf("[DEBUG] CreateApiKey failed %s\n%s", err, response) + return err + } + + d.SetId(*apiKey.ID) + d.Set("apikey", *apiKey.Apikey) + + if keyfile, ok := d.GetOk("file"); ok { + if err := saveToFile(apiKey, keyfile.(string)); err != nil { + log.Printf("Error writing API Key Details to file: %s", err) + } + } + + return resourceIbmIamApiKeyRead(d, meta) +} + +func resourceIbmIamApiKeyRead(d *schema.ResourceData, meta interface{}) error { + iamIdentityClient, err := meta.(ClientSession).IAMIdentityV1API() + if err != nil { + return err + } + + getApiKeyOptions := &iamidentityv1.GetAPIKeyOptions{} + + getApiKeyOptions.SetID(d.Id()) + + apiKey, response, err := iamIdentityClient.GetAPIKey(getApiKeyOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + log.Printf("[DEBUG] GetApiKey failed %s\n%s", err, response) + return err + } + + if err = d.Set("name", apiKey.Name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + if err = d.Set("iam_id", apiKey.IamID); err != nil { + return fmt.Errorf("Error setting iam_id: %s", err) + } + if err = d.Set("description", apiKey.Description); err != nil { + return fmt.Errorf("Error setting description: %s", err) + } + if err = d.Set("account_id", apiKey.AccountID); err != nil { + return fmt.Errorf("Error setting account_id: %s", err) + } + if err = d.Set("apikey", apiKey.Apikey); err != nil { + return fmt.Errorf("Error setting apikey: %s", err) + } + if err = d.Set("locked", apiKey.Locked); err != nil { + return fmt.Errorf("Error setting entity_lock: %s", err) + } + if err = d.Set("apikey_id", apiKey.ID); err != nil { + return fmt.Errorf("Error setting id: %s", err) + } + if err = d.Set("entity_tag", apiKey.EntityTag); err != nil { + return fmt.Errorf("Error setting entity_tag: %s", err) + } + if err = d.Set("crn", apiKey.CRN); err != nil { + return fmt.Errorf("Error setting crn: %s", err) + } + if err = d.Set("locked", apiKey.Locked); err != nil { + return fmt.Errorf("Error setting locked: %s", err) + } + if err = d.Set("created_at", apiKey.CreatedAt.String()); err != nil { + return fmt.Errorf("Error setting created_at: %s", err) + } + if err = d.Set("created_by", apiKey.CreatedBy); err != nil { + return fmt.Errorf("Error setting created_by: %s", err) + } + if err = d.Set("modified_at", apiKey.ModifiedAt.String()); err != nil { + return fmt.Errorf("Error setting modified_at: %s", err) + } + + return nil +} + +func resourceIbmIamApiKeyUpdate(d *schema.ResourceData, meta interface{}) error { + iamIdentityClient, err := meta.(ClientSession).IAMIdentityV1API() + if err != nil { + return err + } + + updateApiKeyOptions := &iamidentityv1.UpdateAPIKeyOptions{} + + updateApiKeyOptions.SetIfMatch("*") + updateApiKeyOptions.SetID(d.Id()) + updateApiKeyOptions.SetName(d.Get("name").(string)) + if _, ok := d.GetOk("description"); ok { + updateApiKeyOptions.SetDescription(d.Get("description").(string)) + } + _, response, err := iamIdentityClient.UpdateAPIKey(updateApiKeyOptions) + if err != nil { + log.Printf("[DEBUG] UpdateApiKey failed %s\n%s", err, response) + return err + } + + return resourceIbmIamApiKeyRead(d, meta) +} + +func resourceIbmIamApiKeyDelete(d *schema.ResourceData, meta interface{}) error { + iamIdentityClient, err := meta.(ClientSession).IAMIdentityV1API() + if err != nil { + return err + } + + deleteApiKeyOptions := &iamidentityv1.DeleteAPIKeyOptions{} + + deleteApiKeyOptions.SetID(d.Id()) + + response, err := iamIdentityClient.DeleteAPIKey(deleteApiKeyOptions) + if err != nil { + log.Printf("[DEBUG] DeleteApiKey failed %s\n%s", err, response) + return err + } + + d.SetId("") + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_iam_authorization_policy.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_iam_authorization_policy.go new file mode 100644 index 00000000000..67aa8577842 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_iam_authorization_policy.go @@ -0,0 +1,342 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + + "github.com/IBM-Cloud/bluemix-go/models" + "github.com/IBM/go-sdk-core/v5/core" + "github.com/IBM/platform-services-go-sdk/iampolicymanagementv1" + + "github.com/IBM-Cloud/bluemix-go/bmxerror" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func resourceIBMIAMAuthorizationPolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMIAMAuthorizationPolicyCreate, + Read: resourceIBMIAMAuthorizationPolicyRead, + Update: resourceIBMIAMAuthorizationPolicyUpdate, + Delete: resourceIBMIAMAuthorizationPolicyDelete, + Exists: resourceIBMIAMAuthorizationPolicyExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "source_service_name": { + Type: schema.TypeString, + Required: true, + Description: "The source service name", + ForceNew: true, + }, + + "target_service_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The target service name", + }, + + "roles": { + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "Role names of the policy definition", + }, + + "source_resource_instance_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The source resource instance Id", + }, + + "target_resource_instance_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The target resource instance Id", + }, + + "source_resource_group_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The source resource group Id", + }, + + "target_resource_group_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The target resource group Id", + }, + + "source_resource_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Resource type of source service", + }, + + "target_resource_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Resource type of target service", + }, + + "source_service_account": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: "Account GUID of source service", + }, + + "version": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceIBMIAMAuthorizationPolicyCreate(d *schema.ResourceData, meta interface{}) error { + sourceServiceName := d.Get("source_service_name").(string) + targetServiceName := d.Get("target_service_name").(string) + + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + + iampapClient, err := meta.(ClientSession).IAMPolicyManagementV1API() + if err != nil { + return err + } + + sourceServiceAccount := userDetails.userAccount + + if account, ok := d.GetOk("source_service_account"); ok { + sourceServiceAccount = account.(string) + } + + accountIdSubjectAttribute := &iampolicymanagementv1.SubjectAttribute{ + Name: core.StringPtr("accountId"), + Value: &sourceServiceAccount, + } + serviceNameSubjectAttribute := &iampolicymanagementv1.SubjectAttribute{ + Name: core.StringPtr("serviceName"), + Value: &sourceServiceName, + } + + policySubject := &iampolicymanagementv1.PolicySubject{ + Attributes: []iampolicymanagementv1.SubjectAttribute{*accountIdSubjectAttribute, *serviceNameSubjectAttribute}, + } + + accountIDResourceAttribute := &iampolicymanagementv1.ResourceAttribute{ + Name: core.StringPtr("accountId"), + Value: core.StringPtr(userDetails.userAccount), + Operator: core.StringPtr("stringEquals"), + } + + serviceNameResourceAttribute := &iampolicymanagementv1.ResourceAttribute{ + Name: core.StringPtr("serviceName"), + Value: core.StringPtr(targetServiceName), + Operator: core.StringPtr("stringEquals"), + } + + policyResource := &iampolicymanagementv1.PolicyResource{ + Attributes: []iampolicymanagementv1.ResourceAttribute{*accountIDResourceAttribute, *serviceNameResourceAttribute}, + } + + if sID, ok := d.GetOk("source_resource_instance_id"); ok { + serviceInstanceSubjectAttribute := iampolicymanagementv1.SubjectAttribute{ + Name: core.StringPtr("serviceInstance"), + Value: core.StringPtr(sID.(string)), + } + policySubject.Attributes = append(policySubject.Attributes, serviceInstanceSubjectAttribute) + } + + if tID, ok := d.GetOk("target_resource_instance_id"); ok { + serviceInstanceResourceAttribute := iampolicymanagementv1.ResourceAttribute{ + Name: core.StringPtr("serviceInstance"), + Value: core.StringPtr(tID.(string)), + } + policyResource.Attributes = append(policyResource.Attributes, serviceInstanceResourceAttribute) + } + + if sType, ok := d.GetOk("source_resource_type"); ok { + resourceTypeSubjectAttribute := iampolicymanagementv1.SubjectAttribute{ + Name: core.StringPtr("resourceType"), + Value: core.StringPtr(sType.(string)), + } + policySubject.Attributes = append(policySubject.Attributes, resourceTypeSubjectAttribute) + } + + if tType, ok := d.GetOk("target_resource_type"); ok { + resourceTypeResourceAttribute := iampolicymanagementv1.ResourceAttribute{ + Name: core.StringPtr("resourceType"), + Value: core.StringPtr(tType.(string)), + } + policyResource.Attributes = append(policyResource.Attributes, resourceTypeResourceAttribute) + } + + if sResGrpID, ok := d.GetOk("source_resource_group_id"); ok { + resourceGroupSubjectAttribute := iampolicymanagementv1.SubjectAttribute{ + Name: core.StringPtr("resourceGroupId"), + Value: core.StringPtr(sResGrpID.(string)), + } + policySubject.Attributes = append(policySubject.Attributes, resourceGroupSubjectAttribute) + } + + if tResGrpID, ok := d.GetOk("target_resource_group_id"); ok { + resourceGroupResourceAttribute := iampolicymanagementv1.ResourceAttribute{ + Name: core.StringPtr("resourceGroupId"), + Value: core.StringPtr(tResGrpID.(string)), + } + policyResource.Attributes = append(policyResource.Attributes, resourceGroupResourceAttribute) + } + + roles, err := getAuthorizationRolesByName(expandStringList(d.Get("roles").([]interface{})), sourceServiceName, targetServiceName, meta) + if err != nil { + return err + } + + createPolicyOptions := iampapClient.NewCreatePolicyOptions( + "authorization", + []iampolicymanagementv1.PolicySubject{*policySubject}, + roles, + []iampolicymanagementv1.PolicyResource{*policyResource}, + ) + authPolicy, _, err := iampapClient.CreatePolicy(createPolicyOptions) + + if err != nil { + return fmt.Errorf("Error creating authorization policy: %s", err) + } + + d.SetId(*authPolicy.ID) + + return resourceIBMIAMAuthorizationPolicyRead(d, meta) +} + +func resourceIBMIAMAuthorizationPolicyRead(d *schema.ResourceData, meta interface{}) error { + + iampapClient, err := meta.(ClientSession).IAMPolicyManagementV1API() + if err != nil { + return err + } + + getPolicyOptions := &iampolicymanagementv1.GetPolicyOptions{ + PolicyID: core.StringPtr(d.Id()), + } + + authorizationPolicy, _, err := iampapClient.GetPolicy(getPolicyOptions) + if err != nil { + return fmt.Errorf("Error retrieving authorizationPolicy: %s", err) + } + roles := make([]string, len(authorizationPolicy.Roles)) + for i, role := range authorizationPolicy.Roles { + roles[i] = *role.DisplayName + } + d.Set("roles", roles) + source := authorizationPolicy.Subjects[0] + target := authorizationPolicy.Resources[0] + d.Set("source_service_name", getSubjectAttribute("serviceName", source)) + d.Set("target_service_name", getResourceAttribute("serviceName", target)) + d.Set("source_resource_instance_id", getSubjectAttribute("serviceInstance", source)) + d.Set("target_resource_instance_id", getResourceAttribute("serviceInstance", target)) + d.Set("source_resource_type", getSubjectAttribute("resourceType", source)) + d.Set("target_resource_type", getResourceAttribute("resourceType", target)) + d.Set("source_service_account", getSubjectAttribute("accountId", source)) + d.Set("source_resource_group_id", getSubjectAttribute("resourceGroupId", source)) + d.Set("target_resource_group_id", getResourceAttribute("resourceGroupId", target)) + return nil +} + +// Returns nil, because ibmcloud iam cli authoirization policy does not have an update command +func resourceIBMIAMAuthorizationPolicyUpdate(d *schema.ResourceData, meta interface{}) error { + return nil +} + +func resourceIBMIAMAuthorizationPolicyDelete(d *schema.ResourceData, meta interface{}) error { + iampapClient, err := meta.(ClientSession).IAMPolicyManagementV1API() + if err != nil { + return err + } + + authorizationPolicyID := d.Id() + + deletePolicyOptions := &iampolicymanagementv1.DeletePolicyOptions{ + PolicyID: core.StringPtr(authorizationPolicyID), + } + _, err = iampapClient.DeletePolicy(deletePolicyOptions) + if err != nil { + log.Printf( + "Error deleting authorization policy: %s", err) + } + + d.SetId("") + + return nil +} + +func resourceIBMIAMAuthorizationPolicyExists(d *schema.ResourceData, meta interface{}) (bool, error) { + iampapClient, err := meta.(ClientSession).IAMPolicyManagementV1API() + if err != nil { + return false, err + } + + getPolicyOptions := &iampolicymanagementv1.GetPolicyOptions{ + PolicyID: core.StringPtr(d.Id()), + } + authorizationPolicy, _, err := iampapClient.GetPolicy(getPolicyOptions) + if err != nil { + if apiErr, ok := err.(bmxerror.RequestFailure); ok { + if apiErr.StatusCode() == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s", err) + } + + return *authorizationPolicy.ID == d.Id(), nil +} + +// TODO: Refactor to remove ListAuthorizationRoles (which lives in bluemix-go-sdk) ? +func getAuthorizationRolesByName(roleNames []string, sourceServiceName string, targetServiceName string, meta interface{}) ([]iampolicymanagementv1.PolicyRole, error) { + + iamClient, err := meta.(ClientSession).IAMAPI() + if err != nil { + return []iampolicymanagementv1.PolicyRole{}, err + } + + iamRepo := iamClient.ServiceRoles() + roles, err := iamRepo.ListAuthorizationRoles(sourceServiceName, targetServiceName) + convertedRoles := convertRoleModels(roles) + if err != nil { + return []iampolicymanagementv1.PolicyRole{}, err + } + + filteredRoles := []iampolicymanagementv1.PolicyRole{} + filteredRoles, err = getRolesFromRoleNames(roleNames, convertedRoles) + if err != nil { + return []iampolicymanagementv1.PolicyRole{}, err + } + return filteredRoles, nil +} + +// ConvertRoleModels will transform role models returned from "/v1/roles" to the model used by policy +func convertRoleModels(roles []models.PolicyRole) []iampolicymanagementv1.PolicyRole { + results := make([]iampolicymanagementv1.PolicyRole, len(roles)) + for i, r := range roles { + results[i] = iampolicymanagementv1.PolicyRole{ + RoleID: core.StringPtr(r.ID.String()), + DisplayName: core.StringPtr(r.DisplayName), + } + } + return results +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_iam_authorization_policy_detach.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_iam_authorization_policy_detach.go new file mode 100644 index 00000000000..c9d70fbbaf4 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_iam_authorization_policy_detach.go @@ -0,0 +1,67 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func resourceIBMIAMAuthorizationPolicyDetach() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMIAMAuthorizationPolicyDetachCreate, + Read: resourceIBMIAMAuthorizationPolicyDetachRead, + Delete: resourceIBMIAMAuthorizationPolicyDetachDelete, + Exists: resourceIBMIAMAuthorizationPolicyDetachExists, + + Schema: map[string]*schema.Schema{ + "authorization_policy_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Authorization policy ID", + }, + }, + } +} + +func resourceIBMIAMAuthorizationPolicyDetachCreate(d *schema.ResourceData, meta interface{}) error { + iampapClient, err := meta.(ClientSession).IAMPolicyManagementV1API() + if err != nil { + return err + } + policyID := d.Get("authorization_policy_id").(string) + + deletePolicyOptions := iampapClient.NewDeletePolicyOptions( + policyID, + ) + _, err = iampapClient.DeletePolicy(deletePolicyOptions) + if err != nil { + return fmt.Errorf("Error detaching authorization policy: %s", err) + } + + d.SetId(time.Now().UTC().String()) + + return resourceIBMIAMAuthorizationPolicyDetachRead(d, meta) +} + +func resourceIBMIAMAuthorizationPolicyDetachRead(d *schema.ResourceData, meta interface{}) error { + return nil +} + +func resourceIBMIAMAuthorizationPolicyDetachDelete(d *schema.ResourceData, meta interface{}) error { + + d.SetId("") + + return nil +} + +func resourceIBMIAMAuthorizationPolicyDetachExists(d *schema.ResourceData, meta interface{}) (bool, error) { + if d.Id() == "" { + return false, nil + } + return true, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_iam_custom_role.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_iam_custom_role.go new file mode 100644 index 00000000000..35691905304 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_iam_custom_role.go @@ -0,0 +1,283 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "strings" + + "github.com/IBM/platform-services-go-sdk/iampolicymanagementv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + iamCRDisplayName = "display_name" + iamCRName = "name" + iamCRDescription = "description" + iamCRActions = "actions" + iamCRServiceName = "service" +) + +func resourceIBMIAMCustomRole() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMIAMCustomRoleCreate, + Read: resourceIBMIAMCustomRoleRead, + Update: resourceIBMIAMCustomRoleUpdate, + Delete: resourceIBMIAMCustomRoleDelete, + Exists: resourceIBMIAMCustomRoleExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + iamCRDisplayName: { + Type: schema.TypeString, + Required: true, + Description: "Display Name of the Custom Role", + ValidateFunc: InvokeValidator("ibm_iam_custom_role", iamCRDisplayName), + }, + + iamCRName: { + Type: schema.TypeString, + Required: true, + Description: "The name of the custom Role", + ForceNew: true, + ValidateFunc: InvokeValidator("ibm_iam_custom_role", iamCRName), + }, + iamCRDescription: { + Type: schema.TypeString, + Optional: true, + Description: "The description of the role", + ValidateFunc: InvokeValidator("ibm_iam_custom_role", iamCRDescription), + }, + iamCRServiceName: { + Type: schema.TypeString, + Required: true, + Description: "The Service Name", + ForceNew: true, + }, + iamCRActions: { + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "The actions of the role", + }, + "crn": { + Type: schema.TypeString, + Computed: true, + Description: "crn of the Custom Role", + }, + ResourceName: { + Type: schema.TypeString, + Computed: true, + Description: "The name of the resource", + }, + + ResourceCRN: { + Type: schema.TypeString, + Computed: true, + Description: "The crn of the resource", + }, + ResourceControllerURL: { + Type: schema.TypeString, + Computed: true, + Description: "The URL of the IBM Cloud dashboard that can be used to explore and view details about the resource", + }, + }, + } +} + +func resourceIBMIAMCustomRoleValidator() *ResourceValidator { + + validateSchema := make([]ValidateSchema, 1) + + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: iamCRName, + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Optional: true, + Regexp: `^[A-Z]{1}[A-Za-z0-9]{0,29}$`, + MinValueLength: 1, + MaxValueLength: 30}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: iamCRDisplayName, + ValidateFunctionIdentifier: StringLenBetween, + Type: TypeString, + Optional: true, + MinValueLength: 1, + MaxValueLength: 50}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: iamCRDescription, + ValidateFunctionIdentifier: StringLenBetween, + Type: TypeString, + Optional: true, + MinValueLength: 1, + MaxValueLength: 250}) + + ibmIAMCustomRoleResourceValidator := ResourceValidator{ResourceName: "ibm_iam_custom_role", Schema: validateSchema} + return &ibmIAMCustomRoleResourceValidator +} + +func resourceIBMIAMCustomRoleCreate(d *schema.ResourceData, meta interface{}) error { + iamPolicyManagementClient, err := meta.(ClientSession).IAMPolicyManagementV1API() + if err != nil { + return err + } + + displayName := d.Get(iamCRDisplayName).(string) + name := d.Get(iamCRName).(string) + description := d.Get(iamCRDescription).(string) + serviceName := d.Get(iamCRServiceName).(string) + actionList := expandStringList(d.Get(iamCRActions).([]interface{})) + + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + + roleOptions := &iampolicymanagementv1.CreateRoleOptions{ + DisplayName: &displayName, + Actions: actionList, + Name: &name, + AccountID: &userDetails.userAccount, + ServiceName: &serviceName, + Description: &description, + } + + role, response, err := iamPolicyManagementClient.CreateRole(roleOptions) + if err != nil || role == nil { + return fmt.Errorf("Error creating Custom Roles: %s\n%s", err, response) + } + + d.SetId(*role.ID) + + return resourceIBMIAMCustomRoleRead(d, meta) +} + +func resourceIBMIAMCustomRoleRead(d *schema.ResourceData, meta interface{}) error { + iamPolicyManagementClient, err := meta.(ClientSession).IAMPolicyManagementV1API() + if err != nil { + return err + } + + roleID := d.Id() + roleOptions := &iampolicymanagementv1.GetRoleOptions{ + RoleID: &roleID, + } + + role, response, err := iamPolicyManagementClient.GetRole(roleOptions) + if err != nil || role == nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error retrieving Custom Roles: %s\n%s", err, response) + } + + d.Set(iamCRDisplayName, role.DisplayName) + d.Set(iamCRName, role.Name) + d.Set(iamCRDescription, role.Description) + d.Set(iamCRServiceName, role.ServiceName) + d.Set(iamCRActions, role.Actions) + d.Set("crn", role.CRN) + + d.Set(ResourceName, role.Name) + d.Set(ResourceCRN, role.CRN) + rcontroller, err := getBaseController(meta) + if err != nil { + return err + } + + d.Set(ResourceControllerURL, rcontroller+"/iam/roles") + + return nil +} + +func resourceIBMIAMCustomRoleUpdate(d *schema.ResourceData, meta interface{}) error { + + iamPolicyManagementClient, err := meta.(ClientSession).IAMPolicyManagementV1API() + if err != nil { + return err + } + roleID := d.Id() + + updatedDescription := d.Get(iamCRDescription).(string) + updatedActions := expandStringList(d.Get(iamCRActions).([]interface{})) + updatedDisplayName := d.Get(iamCRDisplayName).(string) + + if d.HasChange("display_name") || d.HasChange("desciption") || d.HasChange("actions") { + roleGetOptions := &iampolicymanagementv1.GetRoleOptions{ + RoleID: &roleID, + } + + role, response, err := iamPolicyManagementClient.GetRole(roleGetOptions) + if err != nil || role == nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error retrieving Custom Roles: %s\n%s", err, response) + } + + roleETag := response.Headers.Get("ETag") + roleUpdateOptions := &iampolicymanagementv1.UpdateRoleOptions{ + RoleID: &roleID, + IfMatch: &roleETag, + DisplayName: &updatedDisplayName, + Description: &updatedDescription, + Actions: updatedActions, + } + + _, response, err = iamPolicyManagementClient.UpdateRole(roleUpdateOptions) + if err != nil { + return fmt.Errorf("Error updating Custom Roles: %s\n%s", err, response) + } + } + + return resourceIBMIAMCustomRoleRead(d, meta) +} + +func resourceIBMIAMCustomRoleDelete(d *schema.ResourceData, meta interface{}) error { + iamPolicyManagementClient, err := meta.(ClientSession).IAMPolicyManagementV1API() + if err != nil { + return err + } + + roleID := d.Id() + roleDeleteOptions := &iampolicymanagementv1.DeleteRoleOptions{ + RoleID: &roleID, + } + + response, err := iamPolicyManagementClient.DeleteRole(roleDeleteOptions) + if err != nil && !strings.Contains(err.Error(), "404") { + return fmt.Errorf("Error deleting Custom Roles: %s\n%s", err, response) + } + + d.SetId("") + + return nil +} + +func resourceIBMIAMCustomRoleExists(d *schema.ResourceData, meta interface{}) (bool, error) { + iamPolicyManagementClient, err := meta.(ClientSession).IAMPolicyManagementV1API() + if err != nil { + return false, err + } + roleID := d.Id() + + roleGetOptions := &iampolicymanagementv1.GetRoleOptions{ + RoleID: &roleID, + } + + role, response, err := iamPolicyManagementClient.GetRole(roleGetOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error retrieving Custom Roles: %s\n%s", err, response) + } + + return *role.ID == roleID, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_iam_service_api_key.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_iam_service_api_key.go new file mode 100644 index 00000000000..21b215330c2 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_iam_service_api_key.go @@ -0,0 +1,364 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "log" + "strconv" + + "github.com/IBM/platform-services-go-sdk/iamidentityv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + homedir "github.com/mitchellh/go-homedir" +) + +func resourceIBMIAMServiceAPIKey() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMIAMServiceAPIkeyCreate, + Read: resourceIBMIAMServiceAPIKeyRead, + Update: resourceIBMIAMServiceAPIKeyUpdate, + Delete: resourceIBMIAMServiceAPIKeyDelete, + Exists: resourceIBMIAMServiceAPIKeyExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: "Name of the Service API key", + }, + + "description": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: " description of the API key", + }, + + "iam_service_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The service iam_id that this API key authenticates", + }, + + "account_id": { + Type: schema.TypeString, + Computed: true, + Description: "The account ID of the API key", + }, + + "apikey": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Sensitive: true, + ForceNew: true, + Description: "API key value for this API key", + }, + + "locked": { + Type: schema.TypeBool, + Optional: true, + DiffSuppressFunc: applyOnce, + Description: "The API key cannot be changed if set to true", + }, + + "store_value": { + Type: schema.TypeBool, + Optional: true, + DiffSuppressFunc: applyOnce, + Description: "Boolean value deciding whether API key value is retrievable in the future", + }, + + "file": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: applyOnce, + Description: "File where api key is to be stored", + }, + + "crn": { + Type: schema.TypeString, + Computed: true, + Description: "crn of the Service API Key", + }, + + "entity_tag": { + Type: schema.TypeString, + Computed: true, + Description: "Version of the API Key details object", + }, + + "created_by": { + Type: schema.TypeString, + Computed: true, + Description: "IAM ID of the service which created the API key", + }, + + "created_at": { + Type: schema.TypeString, + Computed: true, + Description: "The date and time Service API Key was created", + }, + + "modified_at": { + Type: schema.TypeString, + Computed: true, + Description: "The date and time Service API Key was modified", + }, + }, + } +} + +type APIKey struct { + Name string + Description string + Apikey string + CreatedAt string + Locked bool +} + +func resourceIBMIAMServiceAPIkeyCreate(d *schema.ResourceData, meta interface{}) error { + iamIdentityClient, err := meta.(ClientSession).IAMIdentityV1API() + if err != nil { + return err + } + + name := d.Get("name").(string) + iamID := d.Get("iam_service_id").(string) + + createAPIKeyOptions := &iamidentityv1.CreateAPIKeyOptions{ + Name: &name, + IamID: &iamID, + } + + if des, ok := d.GetOk("description"); ok { + desString := des.(string) + createAPIKeyOptions.Description = &desString + } + + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + createAPIKeyOptions.AccountID = &userDetails.userAccount + + if key, ok := d.GetOk("apikey"); ok { + apikeyString := key.(string) + createAPIKeyOptions.Apikey = &apikeyString + } + + if strvalue, ok := d.GetOk("store_value"); ok { + value := strvalue.(bool) + createAPIKeyOptions.StoreValue = &value + } + + if lock, ok := d.GetOk("locked"); ok { + elockstr := strconv.FormatBool(lock.(bool)) + createAPIKeyOptions.EntityLock = &elockstr + } + + apiKey, response, err := iamIdentityClient.CreateAPIKey(createAPIKeyOptions) + if err != nil || apiKey == nil { + return fmt.Errorf("[DEBUG] Service API Key creation Error: %s\n%s", err, response) + } + + d.SetId(*apiKey.ID) + d.Set("apikey", *apiKey.Apikey) + + if keyfile, ok := d.GetOk("file"); ok { + if err := saveToFile(apiKey, keyfile.(string)); err != nil { + log.Printf("Error writing API Key Details to file: %s", err) + } + } + + return resourceIBMIAMServiceAPIKeyRead(d, meta) +} + +func resourceIBMIAMServiceAPIKeyRead(d *schema.ResourceData, meta interface{}) error { + iamIdentityClient, err := meta.(ClientSession).IAMIdentityV1API() + if err != nil { + return err + } + apiKeyID := d.Id() + + getAPIKeyOptions := &iamidentityv1.GetAPIKeyOptions{ + ID: &apiKeyID, + } + + apiKey, response, err := iamIdentityClient.GetAPIKey(getAPIKeyOptions) + if err != nil || apiKey == nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("[DEBUG] Error retrieving Service API Key: %s\n%s", err, response) + } + if apiKey.Name != nil { + d.Set("name", *apiKey.Name) + } + if apiKey.IamID != nil { + d.Set("iam_service_id", *apiKey.IamID) + } + if apiKey.Description != nil { + d.Set("description", *apiKey.Description) + } + if apiKey.AccountID != nil { + d.Set("account_id", *apiKey.AccountID) + } + if *apiKey.Apikey != "" { + d.Set("apikey", *apiKey.Apikey) + } + if apiKey.CRN != nil { + d.Set("crn", *apiKey.CRN) + } + if apiKey.EntityTag != nil { + d.Set("entity_tag", *apiKey.EntityTag) + } + if apiKey.Locked != nil { + d.Set("locked", *apiKey.Locked) + } + if apiKey.CreatedBy != nil { + d.Set("created_by", *apiKey.CreatedBy) + } + if apiKey.CreatedAt != nil { + d.Set("created_at", apiKey.CreatedAt.String()) + } + if apiKey.ModifiedAt != nil { + d.Set("modified_at", apiKey.ModifiedAt.String()) + } + + return nil +} + +func resourceIBMIAMServiceAPIKeyUpdate(d *schema.ResourceData, meta interface{}) error { + + iamIdentityClient, err := meta.(ClientSession).IAMIdentityV1API() + if err != nil { + return err + } + apiKeyID := d.Id() + + getAPIKeyOptions := &iamidentityv1.GetAPIKeyOptions{ + ID: &apiKeyID, + } + + apiKey, resp, err := iamIdentityClient.GetAPIKey(getAPIKeyOptions) + if err != nil || apiKey == nil { + return fmt.Errorf("[DEBUG] Error retrieving Service API Key: %s\n%s", err, resp) + } + + updateAPIKeyOptions := &iamidentityv1.UpdateAPIKeyOptions{ + ID: &apiKeyID, + IfMatch: apiKey.EntityTag, + } + + hasChange := false + + if d.HasChange("name") { + namestr := d.Get("name").(string) + updateAPIKeyOptions.Name = &namestr + hasChange = true + } + + if d.HasChange("description") { + desc := d.Get("description").(string) + updateAPIKeyOptions.Description = &desc + hasChange = true + } + if hasChange { + _, response, err := iamIdentityClient.UpdateAPIKey(updateAPIKeyOptions) + if err != nil { + return fmt.Errorf("[DEBUG] Error updating Service API Key: %s\n%s", err, response) + } + } + + return resourceIBMIAMServiceAPIKeyRead(d, meta) + +} + +func resourceIBMIAMServiceAPIKeyDelete(d *schema.ResourceData, meta interface{}) error { + iamIdentityClient, err := meta.(ClientSession).IAMIdentityV1API() + if err != nil { + return err + } + apiKeyID := d.Id() + + getAPIKeyOptions := &iamidentityv1.GetAPIKeyOptions{ + ID: &apiKeyID, + } + + _, response, err := iamIdentityClient.GetAPIKey(getAPIKeyOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("[DEBUG] Error retrieving Service API Key: %s\n%s", err, response) + } + + deleteAPIKeyOptions := &iamidentityv1.DeleteAPIKeyOptions{ + ID: &apiKeyID, + } + + resp, err := iamIdentityClient.DeleteAPIKey(deleteAPIKeyOptions) + if err != nil { + return fmt.Errorf("[DEBUG] Error deleting Service API Key: %s\n%s", err, resp) + } + d.SetId("") + + return nil +} + +func resourceIBMIAMServiceAPIKeyExists(d *schema.ResourceData, meta interface{}) (bool, error) { + iamIdentityClient, err := meta.(ClientSession).IAMIdentityV1API() + if err != nil { + return false, err + } + apiKeyID := d.Id() + + getAPIKeyOptions := &iamidentityv1.GetAPIKeyOptions{ + ID: &apiKeyID, + } + + apiKey, response, err := iamIdentityClient.GetAPIKey(getAPIKeyOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error retrieving Service API Key: %s\n%s", err, response) + } + return *apiKey.ID == apiKeyID, nil +} + +func saveToFile(apiKey *iamidentityv1.APIKey, filePath string) error { + outputFilePath, err := homedir.Expand(filePath) + if err != nil { + return fmt.Errorf("Error generating API Key file path: %s", err) + } + + key := &APIKey{ + Name: *apiKey.Name, + Apikey: *apiKey.Apikey, + CreatedAt: apiKey.CreatedAt.String(), + Locked: *apiKey.Locked, + } + if apiKey.Description != nil { + key.Description = *apiKey.Description + } else { + key.Description = "" + } + + out, err := json.MarshalIndent(key, "", "\t") + + err = ioutil.WriteFile(outputFilePath, out, 0666) + if err == nil { + log.Println("Successfully save API key information to ", outputFilePath) + } + + return err +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_iam_service_id.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_iam_service_id.go new file mode 100644 index 00000000000..73832c4346f --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_iam_service_id.go @@ -0,0 +1,233 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "strings" + + "github.com/IBM-Cloud/bluemix-go/models" + + "github.com/IBM-Cloud/bluemix-go/bmxerror" + "github.com/IBM-Cloud/bluemix-go/crn" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func resourceIBMIAMServiceID() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMIAMServiceIDCreate, + Read: resourceIBMIAMServiceIDRead, + Update: resourceIBMIAMServiceIDUpdate, + Delete: resourceIBMIAMServiceIDDelete, + Exists: resourceIBMIAMServiceIDExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: "Name of the serviceID", + }, + + "description": { + Type: schema.TypeString, + Optional: true, + Description: "Description of the serviceID", + }, + + "version": { + Type: schema.TypeString, + Computed: true, + Description: "version of the serviceID", + }, + + "crn": { + Type: schema.TypeString, + Computed: true, + Description: "crn of the serviceID", + }, + + "iam_id": { + Type: schema.TypeString, + Computed: true, + Description: "The IAM ID of the serviceID", + }, + + "tags": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + }, + } +} + +func resourceIBMIAMServiceIDCreate(d *schema.ResourceData, meta interface{}) error { + iamClient, err := meta.(ClientSession).IAMAPI() + if err != nil { + return err + } + name := d.Get("name").(string) + + userDetails, err := meta.(ClientSession).BluemixUserDetails() + + boundTo := crn.New(userDetails.cloudName, userDetails.cloudType) + boundTo.ScopeType = crn.ScopeAccount + boundTo.Scope = userDetails.userAccount + + request := models.ServiceID{ + Name: name, + BoundTo: boundTo.String(), + } + + if des, ok := d.GetOk("description"); ok { + request.Description = des.(string) + } + + serviceID, err := iamClient.ServiceIds().Create(request) + if err != nil { + return fmt.Errorf("Error creating serviceID: %s", err) + } + + d.SetId(serviceID.UUID) + + return resourceIBMIAMServiceIDRead(d, meta) +} + +func resourceIBMIAMServiceIDRead(d *schema.ResourceData, meta interface{}) error { + iamClient, err := meta.(ClientSession).IAMAPI() + if err != nil { + return err + } + serviceIDUUID := d.Id() + + serviceID, err := iamClient.ServiceIds().Get(serviceIDUUID) + if err != nil { + return fmt.Errorf("Error retrieving serviceID: %s", err) + } + + d.Set("name", serviceID.Name) + d.Set("description", serviceID.Description) + d.Set("crn", serviceID.CRN) + d.Set("version", serviceID.Version) + d.Set("iam_id", serviceID.IAMID) + + return nil +} + +func resourceIBMIAMServiceIDUpdate(d *schema.ResourceData, meta interface{}) error { + + iamClient, err := meta.(ClientSession).IAMAPI() + if err != nil { + return err + } + serviceIDUUID := d.Id() + + hasChange := false + updateReq := models.ServiceID{} + + if d.HasChange("name") { + updateReq.Name = d.Get("name").(string) + hasChange = true + } + + if d.HasChange("description") { + updateReq.Description = d.Get("description").(string) + hasChange = true + } + + if hasChange { + _, err = iamClient.ServiceIds().Update(serviceIDUUID, updateReq, "*") + if err != nil { + return fmt.Errorf("Error updating serviceID: %s", err) + } + } + + return resourceIBMIAMServiceIDRead(d, meta) + +} + +func resourceIBMIAMServiceIDDelete(d *schema.ResourceData, meta interface{}) error { + iamClient, err := meta.(ClientSession).IAMAPI() + if err != nil { + return err + } + + serviceIDUUID := d.Id() + + err = iamClient.ServiceIds().Delete(serviceIDUUID) + if err != nil { + return fmt.Errorf("Error deleting serviceID: %s", err) + } + + d.SetId("") + + return nil +} + +func resourceIBMIAMServiceIDExists(d *schema.ResourceData, meta interface{}) (bool, error) { + iamClient, err := meta.(ClientSession).IAMAPI() + if err != nil { + return false, err + } + serviceIDUUID := d.Id() + + serviceID, err := iamClient.ServiceIds().Get(serviceIDUUID) + if err != nil { + if apiErr, ok := err.(bmxerror.RequestFailure); ok { + if apiErr.StatusCode() == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s", err) + } + + return serviceID.UUID == serviceIDUUID, nil +} + +func CloudName(region models.Region) string { + regionID := region.ID + if regionID == "" { + return "" + } + + splits := strings.Split(regionID, ":") + if len(splits) != 3 { + return "" + } + + customer := splits[0] + if customer != "ibm" { + return customer + } + + deployment := splits[1] + switch { + case deployment == "yp": + return "bluemix" + case strings.HasPrefix(deployment, "ys"): + return "staging" + default: + return "" + } +} + +func CloudType(region models.Region) string { + return region.Type +} + +func GenerateBoundToCRN(region models.Region, accountID string) crn.CRN { + var boundTo crn.CRN + if region.Type == "dedicated" { + // cname and ctype are hard coded for dedicated + boundTo = crn.New("bluemix", "public") + } else { + boundTo = crn.New(CloudName(region), CloudType(region)) + } + + boundTo.ScopeType = crn.ScopeAccount + boundTo.Scope = accountID + return boundTo +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_iam_service_policy.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_iam_service_policy.go new file mode 100644 index 00000000000..ab21e5d05ab --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_iam_service_policy.go @@ -0,0 +1,482 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "strings" + "time" + + "github.com/IBM/go-sdk-core/v5/core" + "github.com/IBM/platform-services-go-sdk/iampolicymanagementv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/IBM-Cloud/bluemix-go/bmxerror" +) + +func resourceIBMIAMServicePolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMIAMServicePolicyCreate, + Read: resourceIBMIAMServicePolicyRead, + Update: resourceIBMIAMServicePolicyUpdate, + Delete: resourceIBMIAMServicePolicyDelete, + Exists: resourceIBMIAMServicePolicyExists, + Importer: &schema.ResourceImporter{ + State: func(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + resources, resourceAttributes, err := importServicePolicy(d, meta) + if err != nil { + return nil, fmt.Errorf("Error reading resource ID: %s", err) + } + d.Set("resources", resources) + d.Set("resource_attributes", resourceAttributes) + return []*schema.ResourceData{d}, nil + }, + }, + + Schema: map[string]*schema.Schema{ + "iam_service_id": { + Type: schema.TypeString, + Optional: true, + ExactlyOneOf: []string{"iam_service_id", "iam_id"}, + Description: "UUID of ServiceID", + ForceNew: true, + }, + "iam_id": { + Type: schema.TypeString, + Optional: true, + ExactlyOneOf: []string{"iam_service_id", "iam_id"}, + Description: "IAM ID of ServiceID", + ForceNew: true, + }, + "roles": { + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "Role names of the policy definition", + }, + + "resources": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{"account_management", "resource_attributes"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "service": { + Type: schema.TypeString, + Optional: true, + Description: "Service name of the policy definition", + }, + + "resource_instance_id": { + Type: schema.TypeString, + Optional: true, + Description: "ID of resource instance of the policy definition", + }, + + "region": { + Type: schema.TypeString, + Optional: true, + Description: "Region of the policy definition", + }, + + "resource_type": { + Type: schema.TypeString, + Optional: true, + Description: "Resource type of the policy definition", + }, + + "resource": { + Type: schema.TypeString, + Optional: true, + Description: "Resource of the policy definition", + }, + + "resource_group_id": { + Type: schema.TypeString, + Optional: true, + Description: "ID of the resource group.", + }, + + "attributes": { + Type: schema.TypeMap, + Optional: true, + Description: "Set resource attributes in the form of 'name=value,name=value....", + Elem: schema.TypeString, + }, + }, + }, + }, + + "resource_attributes": { + Type: schema.TypeSet, + Optional: true, + Description: "Set resource attributes.", + ConflictsWith: []string{"resources", "account_management"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: "Name of attribute.", + }, + "value": { + Type: schema.TypeString, + Required: true, + Description: "Value of attribute.", + }, + "operator": { + Type: schema.TypeString, + Optional: true, + Default: "stringEquals", + Description: "Operator of attribute.", + }, + }, + }, + }, + "account_management": { + Type: schema.TypeBool, + Default: false, + Optional: true, + Description: "Give access to all account management services", + ConflictsWith: []string{"resources", "resource_attributes"}, + }, + + "tags": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + }, + } +} + +func resourceIBMIAMServicePolicyCreate(d *schema.ResourceData, meta interface{}) error { + + var iamID string + if v, ok := d.GetOk("iam_service_id"); ok && v != nil { + serviceIDUUID := v.(string) + + iamClient, err := meta.(ClientSession).IAMAPI() + if err != nil { + return err + } + serviceID, err := iamClient.ServiceIds().Get(serviceIDUUID) + if err != nil { + return err + } + iamID = serviceID.IAMID + } + if v, ok := d.GetOk("iam_id"); ok && v != nil { + iamID = v.(string) + } + + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + + policyOptions, err := generatePolicyOptions(d, meta) + if err != nil { + return err + } + + subjectAttribute := &iampolicymanagementv1.SubjectAttribute{ + Name: core.StringPtr("iam_id"), + Value: &iamID, + } + + policySubjects := &iampolicymanagementv1.PolicySubject{ + Attributes: []iampolicymanagementv1.SubjectAttribute{*subjectAttribute}, + } + + accountIDResourceAttribute := &iampolicymanagementv1.ResourceAttribute{ + Name: core.StringPtr("accountId"), + Value: core.StringPtr(userDetails.userAccount), + Operator: core.StringPtr("stringEquals"), + } + + policyResources := iampolicymanagementv1.PolicyResource{ + Attributes: append(policyOptions.Resources[0].Attributes, *accountIDResourceAttribute), + } + + iamPolicyManagementClient, err := meta.(ClientSession).IAMPolicyManagementV1API() + if err != nil { + return err + } + + createPolicyOptions := iamPolicyManagementClient.NewCreatePolicyOptions( + "access", + []iampolicymanagementv1.PolicySubject{*policySubjects}, + policyOptions.Roles, + []iampolicymanagementv1.PolicyResource{policyResources}, + ) + + servicePolicy, _, err := iamPolicyManagementClient.CreatePolicy(createPolicyOptions) + + if err != nil { + return fmt.Errorf("Error creating servicePolicy: %s", err) + } + if v, ok := d.GetOk("iam_service_id"); ok && v != nil { + serviceIDUUID := v.(string) + d.SetId(fmt.Sprintf("%s/%s", serviceIDUUID, *servicePolicy.ID)) + } else if v, ok := d.GetOk("iam_id"); ok && v != nil { + iamID := v.(string) + d.SetId(fmt.Sprintf("%s/%s", iamID, *servicePolicy.ID)) + } + + getPolicyOptions := iamPolicyManagementClient.NewGetPolicyOptions( + *servicePolicy.ID, + ) + + err = resource.Retry(5*time.Minute, func() *resource.RetryError { + var err error + _, _, err = iamPolicyManagementClient.GetPolicy(getPolicyOptions) + + if err != nil { + if apiErr, ok := err.(bmxerror.RequestFailure); ok { + if apiErr.StatusCode() == 404 { + return resource.RetryableError(err) + } + return resource.NonRetryableError(err) + } + } + return nil + }) + + if isResourceTimeoutError(err) { + _, _, err = iamPolicyManagementClient.GetPolicy(getPolicyOptions) + } + if err != nil { + return fmt.Errorf("error fetching service policy: %w", err) + } + + return resourceIBMIAMServicePolicyRead(d, meta) +} + +func resourceIBMIAMServicePolicyRead(d *schema.ResourceData, meta interface{}) error { + + iamPolicyManagementClient, err := meta.(ClientSession).IAMPolicyManagementV1API() + if err != nil { + return err + } + + parts, err := idParts(d.Id()) + if err != nil { + return err + } + serviceIDUUID := parts[0] + servicePolicyID := parts[1] + + getPolicyOptions := iamPolicyManagementClient.NewGetPolicyOptions( + servicePolicyID, + ) + servicePolicy, _, err := iamPolicyManagementClient.GetPolicy(getPolicyOptions) + if err != nil { + return fmt.Errorf("Error retrieving servicePolicy: %s", err) + } + if strings.HasPrefix(serviceIDUUID, "iam-") { + d.Set("iam_id", serviceIDUUID) + } else { + d.Set("iam_service_id", serviceIDUUID) + } + + roles := make([]string, len(servicePolicy.Roles)) + for i, role := range servicePolicy.Roles { + roles[i] = *role.DisplayName + } + d.Set("roles", roles) + + if _, ok := d.GetOk("resources"); ok { + d.Set("resources", flattenPolicyResource(servicePolicy.Resources)) + } + if _, ok := d.GetOk("resource_attributes"); ok { + d.Set("resource_attributes", flattenPolicyResourceAttributes(servicePolicy.Resources)) + } + if len(servicePolicy.Resources) > 0 { + if *getResourceAttribute("serviceType", servicePolicy.Resources[0]) == "service" { + d.Set("account_management", false) + } + if *getResourceAttribute("serviceType", servicePolicy.Resources[0]) == "platform_service" { + d.Set("account_management", true) + } + } + + return nil +} + +func resourceIBMIAMServicePolicyUpdate(d *schema.ResourceData, meta interface{}) error { + + if d.HasChange("roles") || d.HasChange("resources") || d.HasChange("resource_attributes") || d.HasChange("account_management") { + + parts, err := idParts(d.Id()) + if err != nil { + return err + } + servicePolicyID := parts[1] + + var iamID string + if v, ok := d.GetOk("iam_service_id"); ok && v != nil { + serviceIDUUID := v.(string) + + iamClient, err := meta.(ClientSession).IAMAPI() + if err != nil { + return err + } + serviceID, err := iamClient.ServiceIds().Get(serviceIDUUID) + if err != nil { + return err + } + iamID = serviceID.IAMID + } + if v, ok := d.GetOk("iam_id"); ok && v != nil { + iamID = v.(string) + } + + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + + createPolicyOptions, err := generatePolicyOptions(d, meta) + if err != nil { + return err + } + + accountIDResourceAttribute := &iampolicymanagementv1.ResourceAttribute{ + Name: core.StringPtr("accountId"), + Value: core.StringPtr(userDetails.userAccount), + Operator: core.StringPtr("stringEquals"), + } + + policyResources := iampolicymanagementv1.PolicyResource{ + Attributes: append(createPolicyOptions.Resources[0].Attributes, *accountIDResourceAttribute), + } + + subjectAttribute := &iampolicymanagementv1.SubjectAttribute{ + Name: core.StringPtr("iam_id"), + Value: &iamID, + } + policySubjects := &iampolicymanagementv1.PolicySubject{ + Attributes: []iampolicymanagementv1.SubjectAttribute{*subjectAttribute}, + } + + iamPolicyManagementClient, err := meta.(ClientSession).IAMPolicyManagementV1API() + if err != nil { + return err + } + + getPolicyOptions := iamPolicyManagementClient.NewGetPolicyOptions( + servicePolicyID, + ) + policy, response, err := iamPolicyManagementClient.GetPolicy(getPolicyOptions) + if err != nil || policy == nil { + if response != nil && response.StatusCode == 404 { + return nil + } + return fmt.Errorf("Error retrieving Policy: %s\n%s", err, response) + } + + servicePolicyETag := response.Headers.Get("ETag") + updatePolicyOptions := iamPolicyManagementClient.NewUpdatePolicyOptions( + servicePolicyID, + servicePolicyETag, + "access", + []iampolicymanagementv1.PolicySubject{*policySubjects}, + createPolicyOptions.Roles, + []iampolicymanagementv1.PolicyResource{policyResources}, + ) + + _, _, err = iamPolicyManagementClient.UpdatePolicy(updatePolicyOptions) + if err != nil { + return fmt.Errorf("Error updating service policy: %s", err) + } + + } + + return resourceIBMIAMServicePolicyRead(d, meta) + +} + +func resourceIBMIAMServicePolicyDelete(d *schema.ResourceData, meta interface{}) error { + iamPolicyManagementClient, err := meta.(ClientSession).IAMPolicyManagementV1API() + if err != nil { + return err + } + + parts, err := idParts(d.Id()) + if err != nil { + return err + } + servicePolicyID := parts[1] + + deletePolicyOptions := iamPolicyManagementClient.NewDeletePolicyOptions( + servicePolicyID, + ) + + _, err = iamPolicyManagementClient.DeletePolicy(deletePolicyOptions) + if err != nil { + return fmt.Errorf("Error deleting service policy: %s", err) + } + + d.SetId("") + + return nil +} + +func resourceIBMIAMServicePolicyExists(d *schema.ResourceData, meta interface{}) (bool, error) { + iamPolicyManagementClient, err := meta.(ClientSession).IAMPolicyManagementV1API() + if err != nil { + return false, err + } + parts, err := idParts(d.Id()) + if err != nil { + return false, err + } + serviceIDUUID := parts[0] + servicePolicyID := parts[1] + + getPolicyOptions := iamPolicyManagementClient.NewGetPolicyOptions( + servicePolicyID, + ) + + servicePolicy, _, err := iamPolicyManagementClient.GetPolicy(getPolicyOptions) + if err != nil { + if apiErr, ok := err.(bmxerror.RequestFailure); ok { + if apiErr.StatusCode() == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s", err) + } + + tempID := fmt.Sprintf("%s/%s", serviceIDUUID, *servicePolicy.ID) + + return tempID == d.Id(), nil +} + +func importServicePolicy(d *schema.ResourceData, meta interface{}) (interface{}, interface{}, error) { + + iamPolicyManagementClient, err := meta.(ClientSession).IAMPolicyManagementV1API() + if err != nil { + return nil, nil, err + } + parts, err := idParts(d.Id()) + if err != nil { + return nil, nil, err + } + servicePolicyID := parts[1] + getPolicyOptions := iamPolicyManagementClient.NewGetPolicyOptions( + servicePolicyID, + ) + servicePolicy, _, err := iamPolicyManagementClient.GetPolicy(getPolicyOptions) + if err != nil { + return nil, nil, fmt.Errorf("Error retrieving servicePolicy: %s", err) + } + resources := flattenPolicyResource(servicePolicy.Resources) + resource_attributes := flattenPolicyResourceAttributes(servicePolicy.Resources) + return resources, resource_attributes, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_iam_user_invite.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_iam_user_invite.go new file mode 100644 index 00000000000..7fe6d3dc0f5 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_iam_user_invite.go @@ -0,0 +1,1105 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "strings" + "time" + + "github.com/IBM-Cloud/bluemix-go/api/iampap/iampapv1" + v2 "github.com/IBM-Cloud/bluemix-go/api/usermanagement/usermanagementv2" + "github.com/IBM/go-sdk-core/v5/core" + "github.com/IBM/platform-services-go-sdk/iampolicymanagementv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + // MEMBER ... + MEMBER = "MEMEBER" + // ACCESS ... + ACCESS = "access" + NOACCESS = "noacess" + VIEWONLY = "viewonly" + BASICUSER = "basicuser" + SUPERUSER = "superuser" + MANAGER = "manager" + AUDITOR = "auditor" + BILLINGMANANGER = "billingmanager" + DEVELOPER = "developer" +) + +var viewOnly = []string{ + "HARDWARE_VIEW", + "BANDWIDTH_MANAGE", + "LICENSE_VIEW", + "CDN_BANDWIDTH_VIEW", + "VIRTUAL_GUEST_VIEW", + "DEDICATED_HOST_VIEW", +} + +var noAccess = make([]string, 0) + +var basicUser = []string{"HARDWARE_VIEW", + "USER_MANAGE", + "BANDWIDTH_MANAGE", + "DNS_MANAGE", + "REMOTE_MANAGEMENT", + "MONITORING_MANAGE", + "LICENSE_VIEW", + "IP_ADD", + "PORT_CONTROL", + "LOADBALANCER_MANAGE", + "FIREWALL_MANAGE", + "SOFTWARE_FIREWALL_MANAGE", + "ANTI_MALWARE_MANAGE", + "HOST_ID_MANAGE", + "VULN_SCAN_MANAGE", + "NTF_SUBSCRIBER_MANAGE", + "CDN_BANDWIDTH_VIEW", + "VIRTUAL_GUEST_VIEW", + "NETWORK_MESSAGE_DELIVERY_MANAGE", + "FIREWALL_RULE_MANAGE", + "DEDICATED_HOST_VIEW", +} + +var superUser = []string{"HARDWARE_VIEW", + "VIEW_CUSTOMER_SOFTWARE_PASSWORD", + "NETWORK_TUNNEL_MANAGE", + "CUSTOMER_POST_PROVISION_SCRIPT_MANAGEMENT", + "VIEW_CPANEL", + "VIEW_PLESK", + "VIEW_HELM", + "VIEW_URCHIN", + "ADD_SERVICE_STORAGE", + "USER_MANAGE", + "SERVER_ADD", + "SERVER_UPGRADE", + "SERVER_CANCEL", + "SERVICE_ADD", + "SERVICE_UPGRADE", + "SERVICE_CANCEL", + "BANDWIDTH_MANAGE", + "DNS_MANAGE", + "REMOTE_MANAGEMENT", + "MONITORING_MANAGE", + "SERVER_RELOAD", + "LICENSE_VIEW", + "IP_ADD", + "LOCKBOX_MANAGE", + "NAS_MANAGE", + "PORT_CONTROL", + "LOADBALANCER_MANAGE", + "FIREWALL_MANAGE", + "SOFTWARE_FIREWALL_MANAGE", + "ANTI_MALWARE_MANAGE", + "HOST_ID_MANAGE", + "VULN_SCAN_MANAGE", + "NTF_SUBSCRIBER_MANAGE", + "NETWORK_VLAN_SPANNING", + "CDN_ACCOUNT_MANAGE", + "CDN_FILE_MANAGE", + "CDN_BANDWIDTH_VIEW", + "NETWORK_ROUTE_MANAGE", + "VIRTUAL_GUEST_VIEW", + "INSTANCE_UPGRADE", + "HOSTNAME_EDIT", + "NETWORK_MESSAGE_DELIVERY_MANAGE", + "USER_EVENT_LOG_VIEW", + "VPN_MANAGE", + "VIEW_QUANTASTOR", + "DATACENTER_ACCESS", + "DATACENTER_ROOM_ACCESS", + "CUSTOMER_SSH_KEY_MANAGEMENT", + "FIREWALL_RULE_MANAGE", + "PUBLIC_IMAGE_MANAGE", + "SECURITY_CERTIFICATE_VIEW", + "SECURITY_CERTIFICATE_MANAGE", + "GATEWAY_MANAGE", + "SCALE_GROUP_MANAGE", + "SAML_AUTHENTICATION_MANAGE", + "MANAGE_SECURITY_GROUPS", + "PUBLIC_NETWORK_COMPUTE", + "DEDICATED_HOST_VIEW", +} + +var permissionSets = map[string][]string{NOACCESS: noAccess, VIEWONLY: viewOnly, + BASICUSER: basicUser, SUPERUSER: superUser} + +func resourceIBMUserInvite() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMIAMInviteUsers, + Read: resourceIBMIAMGetUsers, + Update: resourceIBMIAMUpdateUserProfile, + Delete: resourceIBMIAMRemoveUser, + Exists: resourceIBMIAMGetUserProfileExists, + Importer: &schema.ResourceImporter{}, + Schema: map[string]*schema.Schema{ + + "users": { + Description: "List of ibm id or email of user", + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "access_groups": { + Description: "access group ids to associate the inviting user", + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "iam_policy": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + + "roles": { + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "Role names of the policy definition", + }, + + "resources": { + Type: schema.TypeList, + Optional: true, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "service": { + Type: schema.TypeString, + Optional: true, + Description: "Service name of the policy definition", + }, + + "resource_instance_id": { + Type: schema.TypeString, + Optional: true, + Description: "ID of resource instance of the policy definition", + }, + + "region": { + Type: schema.TypeString, + Optional: true, + Description: "Region of the policy definition", + }, + + "resource_type": { + Type: schema.TypeString, + Optional: true, + Description: "Resource type of the policy definition", + }, + + "resource": { + Type: schema.TypeString, + Optional: true, + Description: "Resource of the policy definition", + }, + + "resource_group_id": { + Type: schema.TypeString, + Optional: true, + Description: "ID of the resource group.", + }, + + "attributes": { + Type: schema.TypeMap, + Optional: true, + Description: "Set resource attributes in the form of 'name=value,name=value....", + Elem: schema.TypeString, + }, + }, + }, + }, + "account_management": { + Type: schema.TypeBool, + Default: false, + Optional: true, + Description: "Give access to all account management services", + }, + }, + }, + }, + "number_of_invited_users": { + Type: schema.TypeInt, + Computed: true, + Description: "Number of users invited to an account", + }, + "invited_users": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + + "user_id": { + Description: "ibm id or email of user", + Type: schema.TypeString, + Computed: true, + }, + + "user_policies": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + + "roles": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "Role names of the policy definition", + }, + + "resources": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "service": { + Type: schema.TypeString, + Computed: true, + Description: "Service name of the policy definition", + }, + + "resource_instance_id": { + Type: schema.TypeString, + Computed: true, + Description: "ID of resource instance of the policy definition", + }, + + "region": { + Type: schema.TypeString, + Computed: true, + Description: "Region of the policy definition", + }, + + "resource_type": { + Type: schema.TypeString, + Computed: true, + Description: "Resource type of the policy definition", + }, + + "resource": { + Type: schema.TypeString, + Computed: true, + Description: "Resource of the policy definition", + }, + + "resource_group_id": { + Type: schema.TypeString, + Computed: true, + Description: "ID of the resource group.", + }, + + "attributes": { + Type: schema.TypeMap, + Computed: true, + Description: "Set resource attributes in the form of 'name=value,name=value....", + Elem: schema.TypeString, + }, + }, + }, + }, + }, + }, + }, + + "access_groups": { + Description: "access group ids to associate the inviting user", + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + + "name": { + Description: "Name of the access group", + Type: schema.TypeString, + Computed: true, + }, + + "policies": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + + "id": { + Type: schema.TypeString, + Computed: true, + }, + + "roles": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "Role names of the policy definition", + }, + + "resources": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "service": { + Type: schema.TypeString, + Computed: true, + Description: "Service name of the policy definition", + }, + + "resource_instance_id": { + Type: schema.TypeString, + Computed: true, + Description: "ID of resource instance of the policy definition", + }, + + "region": { + Type: schema.TypeString, + Computed: true, + Description: "Region of the policy definition", + }, + + "resource_type": { + Type: schema.TypeString, + Computed: true, + Description: "Resource type of the policy definition", + }, + + "resource": { + Type: schema.TypeString, + Computed: true, + Description: "Resource of the policy definition", + }, + + "resource_group_id": { + Type: schema.TypeString, + Computed: true, + Description: "ID of the resource group.", + }, + + "attributes": { + Type: schema.TypeMap, + Computed: true, + Description: "Set resource attributes in the form of 'name=value,name=value....", + Elem: schema.TypeString, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "classic_infra_roles": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "permission_set": { + Type: schema.TypeString, + Optional: true, + Description: "permission set for claasic infrastructure", + ValidateFunc: validateAllowedStringValue([]string{NOACCESS, VIEWONLY, BASICUSER, SUPERUSER}), + }, + + "permissions": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "List of permissions for claasic infrastructure", + }, + }, + }, + }, + "cloud_foundry_roles": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + + "organization_guid": { + Type: schema.TypeString, + Required: true, + Description: "GUID of Organization", + }, + + "org_roles": { + Type: schema.TypeList, + Required: true, + Description: "roles to be assigned to user in given space", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "spaces": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "space_guid": { + Type: schema.TypeString, + Required: true, + Description: "GUID of space", + }, + + "space_roles": { + Type: schema.TypeList, + Required: true, + Description: "roles to be assigned to user in given space", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func resourceIBMIAMInviteUsers(d *schema.ResourceData, meta interface{}) error { + userManagement, err := meta.(ClientSession).UserManagementAPI() + if err != nil { + return err + } + client := userManagement.UserInvite() + + usersSet := d.Get("users").(*schema.Set) + usersList := flattenUsersSet(usersSet) + users := make([]v2.User, 0) + for _, user := range usersList { + users = append(users, v2.User{Email: user, AccountRole: MEMBER}) + } + if len(users) == 0 { + return fmt.Errorf("Users email not provided") + } + var accessGroups = make([]string, 0) + if data, ok := d.GetOk("access_groups"); ok { + for _, accessGroup := range data.([]interface{}) { + accessGroups = append(accessGroups, fmt.Sprintf("%v", accessGroup)) + } + } + + var accessPolicies []v2.UserPolicy + if accessPolicyData, ok := d.GetOk("iam_policy"); ok { + accessPolicies, err = getPolicies(d, meta, accessPolicyData.([]interface{})) + if err != nil { + log.Println("IAM Acess policy: ", err.Error()) + return err + } + } + + inviteUserPayload := v2.UserInvite{} + log.Println(inviteUserPayload) + inviteUserPayload.Users = users + if len(accessGroups) != 0 { + inviteUserPayload.AccessGroup = accessGroups + } + if len(accessPolicies) != 0 { + inviteUserPayload.IAMPolicy = accessPolicies + } + + if infraPermissions := getInfraPermissions(d, meta); len(infraPermissions) != 0 { + inviteUserPayload.InfrastructureRoles = &v2.InfraPermissions{Permissions: infraPermissions} + } + orgRoles, err := getCloudFoundryRoles(d, meta) + if err != nil { + return err + } + if len(orgRoles) != 0 { + inviteUserPayload.OrganizationRoles = orgRoles + } + + accountID, err := getAccountID(d, meta) + if err != nil { + return err + } + + _, InviteUserError := client.InviteUsers(accountID, inviteUserPayload) + if InviteUserError != nil { + return InviteUserError + } + d.SetId(time.Now().UTC().String()) + return resourceIBMIAMUpdateUserProfile(d, meta) +} + +func resourceIBMIAMGetUsers(d *schema.ResourceData, meta interface{}) error { + userManagement, err := meta.(ClientSession).UserManagementAPI() + if err != nil { + return err + } + Client := userManagement.UserInvite() + iamPolicyManagementClient, err := meta.(ClientSession).IAMPolicyManagementV1API() + if err != nil { + return err + } + iamuumClient, err := meta.(ClientSession).IAMUUMAPIV2() + if err != nil { + return err + } + accountID, err := getAccountID(d, meta) + if err != nil { + return err + } + res, err := Client.ListUsers(accountID) + if err != nil { + return err + } + users := make([]string, 0) + invitedUsers := make([]map[string]interface{}, 0, len(res)) + + for _, user := range res { + + if user.AccountID != accountID { + users = append(users, user.Email) + } + /****** For each user ******************* + 1) user_id + 2) user_level_policies + 3) List of access groups + > Name of access group + > acees group level policies + ********************************************/ + //Get User level IAM policies + policyList, _, err := iamPolicyManagementClient.ListPolicies(&iampolicymanagementv1.ListPoliciesOptions{ + AccountID: core.StringPtr(accountID), + IamID: core.StringPtr(user.IamID), + Type: core.StringPtr("access"), + }) + policies := policyList.Policies + + if err != nil { + return fmt.Errorf("Error retrieving user policies: %s", err) + } + userPolicies := make([]map[string]interface{}, 0, len(policies)) + for _, policy := range policies { + //populate ploicy Roles + roles := make([]string, len(policy.Roles)) + for i, role := range policy.Roles { + roles[i] = *role.DisplayName + } + //populate policy resources + resources := flattenPolicyResource(policy.Resources) + p := map[string]interface{}{ + "id": policy.ID, + "roles": roles, + "resources": resources, + } + userPolicies = append(userPolicies, p) + } + + // Get AccessGroups associated with user + retreivedGroups, err := iamuumClient.AccessGroup().List(accountID, user.IamID) + if err != nil { + return fmt.Errorf("Error retrieving access groups: %s", err) + } + + accGroupList := make([]map[string]interface{}, 0, len(retreivedGroups)) + //Get the policies for each access group + for _, grpData := range retreivedGroups { + policyList, _, err := iamPolicyManagementClient.ListPolicies(&iampolicymanagementv1.ListPoliciesOptions{ + AccountID: core.StringPtr(accountID), + AccessGroupID: core.StringPtr(user.IamID), + }) + accgrpPolicy := policyList.Policies + if err != nil { + return fmt.Errorf("Error retrieving access group policy: %s", err) + } + + //Fetch access group policies + grpPolicies := make([]map[string]interface{}, 0, len(accgrpPolicy)) + for _, policy := range accgrpPolicy { + //populate ploicy Roles + roles := make([]string, len(policy.Roles)) + for i, role := range policy.Roles { + roles[i] = *role.DisplayName + } + //populate policy resources + resources := flattenPolicyResource(policy.Resources) + p := map[string]interface{}{ + "id": policy.ID, + "roles": roles, + "resources": resources, + } + grpPolicies = append(grpPolicies, p) + } + //populate name & policies of a access group + agInfo := map[string]interface{}{ + "name": grpData.Name, + "policies": grpPolicies, + } + //add agInfo to list of access groups + accGroupList = append(accGroupList, agInfo) + } + userInfo := map[string]interface{}{ + "user_id": user.Email, + "user_policies": userPolicies, + "access_groups": accGroupList, + } + invitedUsers = append(invitedUsers, userInfo) + } + //set the number of users in an account + d.Set("number_of_invited_users", len(res)-1) + d.Set("invited_users", invitedUsers) + return nil +} + +func resourceIBMIAMUpdateUserProfile(d *schema.ResourceData, meta interface{}) error { + // validate change + userManagement, err := meta.(ClientSession).UserManagementAPI() + if err != nil { + return err + } + Client := userManagement.UserInvite() + + if d.HasChange("users") { + //var removedUsers, addedUsers []string + accountID, err := getAccountID(d, meta) + if err != nil { + return err + } + ousrs, nusrs := d.GetChange("users") + old := ousrs.(*schema.Set) + new := nusrs.(*schema.Set) + + removed := expandStringList(old.Difference(new).List()) + added := expandStringList(new.Difference(old).List()) + + //Update the added users + if len(added) > 0 { + users := make([]v2.User, 0) + for _, user := range added { + users = append(users, v2.User{Email: user, AccountRole: MEMBER}) + } + if len(users) == 0 { + return fmt.Errorf("Users email not provided") + } + + var accessPolicies []v2.UserPolicy + if accessPolicyData, ok := d.GetOk("iam_policy"); ok { + accessPolicies, err = getPolicies(d, meta, accessPolicyData.([]interface{})) + if err != nil { + log.Println("IAM Acess policy: ", err.Error()) + return err + } + } + + var accessGroups = make([]string, 0) + if data, ok := d.GetOk("access_groups"); ok { + for _, accessGroup := range data.([]interface{}) { + accessGroups = append(accessGroups, fmt.Sprintf("%v", accessGroup)) + } + } + + infraPermissions := getInfraPermissions(d, meta) + orgRoles, err := getCloudFoundryRoles(d, meta) + if err != nil { + return err + } + + inviteUserPayload := v2.UserInvite{} + + inviteUserPayload.Users = users + if len(accessGroups) != 0 { + inviteUserPayload.AccessGroup = accessGroups + } + if len(accessPolicies) != 0 { + inviteUserPayload.IAMPolicy = accessPolicies + } + if len(infraPermissions) != 0 { + inviteUserPayload.InfrastructureRoles = &v2.InfraPermissions{Permissions: infraPermissions} + } + if len(orgRoles) != 0 { + inviteUserPayload.OrganizationRoles = orgRoles + } + _, InviteUserError := Client.InviteUsers(accountID, inviteUserPayload) + if InviteUserError != nil { + return InviteUserError + } + } + + //Update the removed users + if len(removed) > 0 { + for _, user := range removed { + IAMID, err := getUserIAMID(d, meta, user) + if err != nil { + return fmt.Errorf("User's IAM ID not found: %s", err.Error()) + } + Err := Client.RemoveUsers(accountID, IAMID) + if Err != nil { + log.Println("Failed to remove user: ", user) + return Err + } + } + } + + } + return resourceIBMIAMGetUsers(d, meta) +} + +func resourceIBMIAMRemoveUser(d *schema.ResourceData, meta interface{}) error { + userManagement, err := meta.(ClientSession).UserManagementAPI() + if err != nil { + return err + } + Client := userManagement.UserInvite() + + accountID, err := getAccountID(d, meta) + if err != nil { + return err + } + + usersSet := d.Get("users").(*schema.Set) + usersList := flattenUsersSet(usersSet) + for _, user := range usersList { + IAMID, err := getUserIAMID(d, meta, user) + + if err != nil { + return fmt.Errorf("User's IAM ID not found: %s", err.Error()) + } + Err := Client.RemoveUsers(accountID, IAMID) + if Err != nil { + return Err + } + } + return nil +} + +func resourceIBMIAMGetUserProfileExists(d *schema.ResourceData, meta interface{}) (bool, error) { + userManagement, err := meta.(ClientSession).UserManagementAPI() + if err != nil { + return false, err + } + Client := userManagement.UserInvite() + + accountID, err := getAccountID(d, meta) + if err != nil { + return false, err + } + + usersSet := d.Get("users").(*schema.Set) + usersList := flattenUsersSet(usersSet) + + res, err := Client.ListUsers(accountID) + if err != nil { + return false, err + } + var isFound bool + for _, user := range usersList { + + for _, userInfo := range res { + if strings.Compare(userInfo.Email, user) == 0 { + isFound = true + } + } + if !isFound { + return false, nil + } + } + return true, nil +} + +// getAccountID returns accountID +func getAccountID(d *schema.ResourceData, meta interface{}) (string, error) { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return "", err + } + return userDetails.userAccount, nil +} + +// getUserIAMID ... +func getUserIAMID(d *schema.ResourceData, meta interface{}, user string) (string, error) { + userManagement, err := meta.(ClientSession).UserManagementAPI() + if err != nil { + return "", err + } + Client := userManagement.UserInvite() + + accountID, err := getAccountID(d, meta) + if err != nil { + return "", err + } + + res, err := Client.ListUsers(accountID) + if err != nil { + return "", err + } + + for _, userInfo := range res { + if strings.Compare(userInfo.Email, user) == 0 { + return userInfo.IamID, nil + } + } + return "", nil + +} + +func getInfraPermissions(d *schema.ResourceData, meta interface{}) []string { + var infraPermissions = make([]string, 0) + if data, ok := d.GetOk("classic_infra_roles"); ok { + for _, resource := range data.([]interface{}) { + d := resource.(map[string]interface{}) + if permissions, ok := d["permissions"]; ok && permissions != nil { + for _, value := range permissions.([]interface{}) { + infraPermissions = append(infraPermissions, fmt.Sprintf("%v", value)) + } + } + if permissionSet, ok := d["permission_set"]; ok && permissionSet != nil { + if permissions, ok := permissionSets[permissionSet.(string)]; ok { + for _, permission := range permissions { + infraPermissions = append(infraPermissions, permission) + } + } + } + } + return infraPermissions + } + return infraPermissions +} + +// getPolicies ... +func getPolicies(d *schema.ResourceData, meta interface{}, policies []interface{}) ([]v2.UserPolicy, error) { + var policyList = make([]v2.UserPolicy, 0) + for _, policy := range policies { + p := policy.(map[string]interface{}) + var serviceName string + resourceAttributes := []iampolicymanagementv1.ResourceAttribute{} + policyResource := iampolicymanagementv1.PolicyResource{} + + if res, ok := p["resources"]; ok { + resources := res.([]interface{}) + for _, resource := range resources { + r, _ := resource.(map[string]interface{}) + serviceName = r["service"].(string) + if r, ok := r["service"]; ok && r != nil { + serviceName = r.(string) + if r.(string) != "" { + resourceAttr := iampolicymanagementv1.ResourceAttribute{ + Name: core.StringPtr("serviceName"), + Value: core.StringPtr(r.(string)), + Operator: core.StringPtr("stringEquals"), + } + resourceAttributes = append(resourceAttributes, resourceAttr) + } + } + + if r, ok := r["resource_instance_id"]; ok { + if r.(string) != "" { + resourceAttr := iampolicymanagementv1.ResourceAttribute{ + Name: core.StringPtr("serviceInstance"), + Value: core.StringPtr(r.(string)), + Operator: core.StringPtr("stringEquals"), + } + resourceAttributes = append(resourceAttributes, resourceAttr) + } + } + + if r, ok := r["region"]; ok { + if r.(string) != "" { + resourceAttr := iampolicymanagementv1.ResourceAttribute{ + Name: core.StringPtr("region"), + Value: core.StringPtr(r.(string)), + Operator: core.StringPtr("stringEquals"), + } + resourceAttributes = append(resourceAttributes, resourceAttr) + } + } + + if r, ok := r["resource_type"]; ok { + if r.(string) != "" { + resourceAttr := iampolicymanagementv1.ResourceAttribute{ + Name: core.StringPtr("resourceType"), + Value: core.StringPtr(r.(string)), + Operator: core.StringPtr("stringEquals"), + } + resourceAttributes = append(resourceAttributes, resourceAttr) + } + } + + if r, ok := r["resource"]; ok { + if r.(string) != "" { + resourceAttr := iampolicymanagementv1.ResourceAttribute{ + Name: core.StringPtr("resource"), + Value: core.StringPtr(r.(string)), + Operator: core.StringPtr("stringEquals"), + } + resourceAttributes = append(resourceAttributes, resourceAttr) + } + } + + if r, ok := r["resource_group_id"]; ok { + if r.(string) != "" { + resourceAttr := iampolicymanagementv1.ResourceAttribute{ + Name: core.StringPtr("resourceGroupId"), + Value: core.StringPtr(r.(string)), + Operator: core.StringPtr("stringEquals"), + } + resourceAttributes = append(resourceAttributes, resourceAttr) + } + } + + if r, ok := r["attributes"]; ok { + for k, v := range r.(map[string]interface{}) { + resourceAttributes = setResourceAttribute(core.StringPtr(k), v.(*string), resourceAttributes) + } + } + + } + } + + if accountManagement, ok := p["account_management"]; ok && accountManagement.(bool) { + serviceTypeResourceAttribute := iampolicymanagementv1.ResourceAttribute{ + Name: core.StringPtr("serviceType"), + Value: core.StringPtr("platform_service"), + Operator: core.StringPtr("stringEquals"), + } + resourceAttributes = append(resourceAttributes, serviceTypeResourceAttribute) + } + + if len(resourceAttributes) == 0 { + serviceTypeResourceAttribute := iampolicymanagementv1.ResourceAttribute{ + Name: core.StringPtr("serviceType"), + Value: core.StringPtr("service"), + Operator: core.StringPtr("stringEquals"), + } + resourceAttributes = append(resourceAttributes, serviceTypeResourceAttribute) + } + + accountID, err := getAccountID(d, meta) + if err != nil { + return policyList, err + } + + accountIDResourceAttribute := iampolicymanagementv1.ResourceAttribute{ + Name: core.StringPtr("accountId"), + Value: core.StringPtr(accountID), + Operator: core.StringPtr("stringEquals"), + } + + resourceAttributes = append(resourceAttributes, accountIDResourceAttribute) + + policyResource.Attributes = resourceAttributes + + iamPolicyManagementClient, err := meta.(ClientSession).IAMPolicyManagementV1API() + if err != nil { + return policyList, err + } + + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return policyList, err + } + + listRoleOptions := &iampolicymanagementv1.ListRolesOptions{ + AccountID: &userDetails.userAccount, + ServiceName: &serviceName, + } + + roleList, _, err := iamPolicyManagementClient.ListRoles(listRoleOptions) + roles := mapRoleListToPolicyRoles(*roleList) + + if err != nil { + return policyList, err + } + var policyRoles = make([]iampolicymanagementv1.PolicyRole, 0) + if userRoles, ok := p["roles"]; ok { + policyRoles, err = getRolesFromRoleNames(expandStringList(userRoles.([]interface{})), roles) + if err != nil { + return policyList, err + } + } + + policyList = append(policyList, v2.UserPolicy{Roles: convertIPMRolesToV1(policyRoles), Resources: convertIPMResourcesToV1(policyResource), Type: ACCESS}) + } + return policyList, nil +} + +func convertIPMRolesToV1(roles []iampolicymanagementv1.PolicyRole) []iampapv1.Role { + results := make([]iampapv1.Role, len(roles)) + for i, r := range roles { + results[i] = iampapv1.Role{ + RoleID: *r.RoleID, + } + } + return results +} + +func convertIPMResourcesToV1(resource iampolicymanagementv1.PolicyResource) []iampapv1.Resource { + attributes := make([]iampapv1.Attribute, len(resource.Attributes)) + for i, a := range resource.Attributes { + attributes[i] = iampapv1.Attribute{ + Name: *a.Name, + Value: *a.Value, + } + } + return []iampapv1.Resource{iampapv1.Resource{Attributes: attributes}} +} + +// getCloudFoundryRoles ... +func getCloudFoundryRoles(d *schema.ResourceData, meta interface{}) ([]v2.OrgRole, error) { + cloudFoundryRoles := make([]v2.OrgRole, 0) + if data, ok := d.GetOk("cloud_foundry_roles"); ok { + sess, err := meta.(ClientSession).BluemixSession() + if err != nil { + return nil, err + } + usersSet := d.Get("users").(*schema.Set) + usersList := flattenUsersSet(usersSet) + for _, d := range data.([]interface{}) { + orgRole := v2.OrgRole{} + role := d.(map[string]interface{}) + orgRole.ID = role["organization_guid"].(string) + orgRole.Region = sess.Config.Region + orgRole.Users = usersList + for _, r := range role["org_roles"].([]interface{}) { + switch strings.ToLower(r.(string)) { + case AUDITOR: + orgRole.Auditors = usersList + case BILLINGMANANGER: + orgRole.BillingManagers = usersList + case MANAGER: + orgRole.Managers = usersList + } + } + if spaces, ok := role["spaces"]; ok { + for _, s := range spaces.([]interface{}) { + spaceInfo := v2.Space{} + space := s.(map[string]interface{}) + if spaceroles, ok := space["space_roles"]; ok { + for _, r := range spaceroles.([]interface{}) { + role := r.(string) + switch strings.ToLower(role) { + case AUDITOR: + spaceInfo.Auditors = usersList + case DEVELOPER: + spaceInfo.Developers = usersList + case MANAGER: + spaceInfo.Managers = usersList + } + + } + } + if spaceName, ok := space["space_guid"]; ok { + spaceInfo.ID = spaceName.(string) + } + orgRole.Spaces = append(orgRole.Spaces, spaceInfo) + } + } + cloudFoundryRoles = append(cloudFoundryRoles, orgRole) + + } + } + return cloudFoundryRoles, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_iam_user_policy.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_iam_user_policy.go new file mode 100644 index 00000000000..43304067f6d --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_iam_user_policy.go @@ -0,0 +1,444 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "time" + + "github.com/IBM/go-sdk-core/v5/core" + "github.com/IBM/platform-services-go-sdk/iampolicymanagementv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/IBM-Cloud/bluemix-go/bmxerror" +) + +func resourceIBMIAMUserPolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMIAMUserPolicyCreate, + Read: resourceIBMIAMUserPolicyRead, + Update: resourceIBMIAMUserPolicyUpdate, + Delete: resourceIBMIAMUserPolicyDelete, + Exists: resourceIBMIAMUserPolicyExists, + Importer: &schema.ResourceImporter{ + State: func(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + resources, resourceAttributes, err := importServicePolicy(d, meta) + if err != nil { + return nil, fmt.Errorf("Error reading resource ID: %s", err) + } + d.Set("resources", resources) + d.Set("resource_attributes", resourceAttributes) + return []*schema.ResourceData{d}, nil + }, + }, + Schema: map[string]*schema.Schema{ + + "ibm_id": { + Description: "The ibm id or email of user", + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "roles": { + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "Role names of the policy definition", + }, + + "resources": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{"account_management", "resource_attributes"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "service": { + Type: schema.TypeString, + Optional: true, + Description: "Service name of the policy definition", + }, + + "resource_instance_id": { + Type: schema.TypeString, + Optional: true, + Description: "ID of resource instance of the policy definition", + }, + + "region": { + Type: schema.TypeString, + Optional: true, + Description: "Region of the policy definition", + }, + + "resource_type": { + Type: schema.TypeString, + Optional: true, + Description: "Resource type of the policy definition", + }, + + "resource": { + Type: schema.TypeString, + Optional: true, + Description: "Resource of the policy definition", + }, + + "resource_group_id": { + Type: schema.TypeString, + Optional: true, + Description: "ID of the resource group.", + }, + + "attributes": { + Type: schema.TypeMap, + Optional: true, + Description: "Set resource attributes in the form of 'name=value,name=value....", + Elem: schema.TypeString, + }, + }, + }, + }, + + "resource_attributes": { + Type: schema.TypeSet, + Optional: true, + Description: "Set resource attributes.", + ConflictsWith: []string{"resources", "account_management"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: "Name of attribute.", + }, + "value": { + Type: schema.TypeString, + Required: true, + Description: "Value of attribute.", + }, + "operator": { + Type: schema.TypeString, + Optional: true, + Default: "stringEquals", + Description: "Operator of attribute.", + }, + }, + }, + }, + "account_management": { + Type: schema.TypeBool, + Default: false, + Optional: true, + Description: "Give access to all account management services", + ConflictsWith: []string{"resources", "resource_attributes"}, + }, + + "tags": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + }, + } +} + +func resourceIBMIAMUserPolicyCreate(d *schema.ResourceData, meta interface{}) error { + iamPolicyManagementClient, err := meta.(ClientSession).IAMPolicyManagementV1API() + if err != nil { + return err + } + + var policyOptions iampolicymanagementv1.CreatePolicyOptions + policyOptions, err = generatePolicyOptions(d, meta) + + if err != nil { + return err + } + + userEmail := d.Get("ibm_id").(string) + + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + + accountID := userDetails.userAccount + + ibmUniqueID, err := getIBMUniqueId(accountID, userEmail, meta) + if err != nil { + return err + } + + subjectAttribute := &iampolicymanagementv1.SubjectAttribute{ + Name: core.StringPtr("iam_id"), + Value: &ibmUniqueID, + } + + policySubjects := &iampolicymanagementv1.PolicySubject{ + Attributes: []iampolicymanagementv1.SubjectAttribute{*subjectAttribute}, + } + + accountIDResourceAttribute := &iampolicymanagementv1.ResourceAttribute{ + Name: core.StringPtr("accountId"), + Value: core.StringPtr(accountID), + Operator: core.StringPtr("stringEquals"), + } + + policyResources := iampolicymanagementv1.PolicyResource{ + Attributes: append(policyOptions.Resources[0].Attributes, *accountIDResourceAttribute), + } + + createPolicyOptions := iamPolicyManagementClient.NewCreatePolicyOptions( + "access", + []iampolicymanagementv1.PolicySubject{*policySubjects}, + policyOptions.Roles, + []iampolicymanagementv1.PolicyResource{policyResources}, + ) + + userPolicy, _, err := iamPolicyManagementClient.CreatePolicy(createPolicyOptions) + + if err != nil { + return err + } + + d.SetId(fmt.Sprintf("%s/%s", userEmail, *userPolicy.ID)) + + getPolicyOptions := &iampolicymanagementv1.GetPolicyOptions{ + PolicyID: userPolicy.ID, + } + + err = resource.Retry(5*time.Minute, func() *resource.RetryError { + var err error + _, _, err = iamPolicyManagementClient.GetPolicy(getPolicyOptions) + + if err != nil { + if apiErr, ok := err.(bmxerror.RequestFailure); ok { + if apiErr.StatusCode() == 404 { + return resource.RetryableError(err) + } + return resource.NonRetryableError(err) + } + } + return nil + }) + + if isResourceTimeoutError(err) { + _, _, err = iamPolicyManagementClient.GetPolicy(getPolicyOptions) + } + if err != nil { + return fmt.Errorf("error fetching user policy: %w", err) + } + + return resourceIBMIAMUserPolicyRead(d, meta) +} + +func resourceIBMIAMUserPolicyRead(d *schema.ResourceData, meta interface{}) error { + iamPolicyManagementClient, err := meta.(ClientSession).IAMPolicyManagementV1API() + if err != nil { + return err + } + + parts, err := idParts(d.Id()) + if err != nil { + return err + } + + userEmail := parts[0] + userPolicyID := parts[1] + + if err != nil { + return err + } + + getPolicyOptions := &iampolicymanagementv1.GetPolicyOptions{ + PolicyID: core.StringPtr(userPolicyID), + } + + userPolicy, _, err := iamPolicyManagementClient.GetPolicy(getPolicyOptions) + if err != nil { + return err + } + d.Set("ibm_id", userEmail) + roles := make([]string, len(userPolicy.Roles)) + + for i, role := range userPolicy.Roles { + roles[i] = *role.DisplayName + } + d.Set("roles", roles) + if _, ok := d.GetOk("resources"); ok { + d.Set("resources", flattenPolicyResource(userPolicy.Resources)) + } + if _, ok := d.GetOk("resource_attributes"); ok { + d.Set("resource_attributes", flattenPolicyResourceAttributes(userPolicy.Resources)) + } + if len(userPolicy.Resources) > 0 { + if *getResourceAttribute("serviceType", userPolicy.Resources[0]) == "service" { + d.Set("account_management", false) + } + if *getResourceAttribute("serviceType", userPolicy.Resources[0]) == "platform_service" { + d.Set("account_management", true) + } + } + return nil +} + +func resourceIBMIAMUserPolicyUpdate(d *schema.ResourceData, meta interface{}) error { + iamPolicyManagementClient, err := meta.(ClientSession).IAMPolicyManagementV1API() + if err != nil { + return err + } + if d.HasChange("roles") || d.HasChange("resources") || d.HasChange("resource_attributes") || d.HasChange("account_management") { + parts, err := idParts(d.Id()) + if err != nil { + return err + } + userEmail := parts[0] + userPolicyID := parts[1] + + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + + accountID := userDetails.userAccount + + createPolicyOptions, err := generatePolicyOptions(d, meta) + if err != nil { + return err + } + + ibmUniqueID, err := getIBMUniqueId(accountID, userEmail, meta) + if err != nil { + return err + } + + accountIDResourceAttribute := &iampolicymanagementv1.ResourceAttribute{ + Name: core.StringPtr("accountId"), + Value: core.StringPtr(accountID), + Operator: core.StringPtr("stringEquals"), + } + + policyResources := iampolicymanagementv1.PolicyResource{ + Attributes: append(createPolicyOptions.Resources[0].Attributes, *accountIDResourceAttribute), + } + + subjectAttribute := &iampolicymanagementv1.SubjectAttribute{ + Name: core.StringPtr("iam_id"), + Value: &ibmUniqueID, + } + policySubjects := &iampolicymanagementv1.PolicySubject{ + Attributes: []iampolicymanagementv1.SubjectAttribute{*subjectAttribute}, + } + + getPolicyOptions := &iampolicymanagementv1.GetPolicyOptions{ + PolicyID: &userPolicyID, + } + policy, response, err := iamPolicyManagementClient.GetPolicy(getPolicyOptions) + if err != nil || policy == nil { + if response != nil && response.StatusCode == 404 { + return nil + } + return fmt.Errorf("Error retrieving Policy: %s\n%s", err, response) + } + + userPolicyETag := response.Headers.Get("ETag") + updatePolicyOptions := iamPolicyManagementClient.NewUpdatePolicyOptions( + userPolicyID, + userPolicyETag, + "access", + []iampolicymanagementv1.PolicySubject{*policySubjects}, + createPolicyOptions.Roles, + []iampolicymanagementv1.PolicyResource{policyResources}, + ) + + policy, _, err = iamPolicyManagementClient.UpdatePolicy(updatePolicyOptions) + if err != nil { + return fmt.Errorf("Error updating user policy: %s", err) + } + } + return resourceIBMIAMUserPolicyRead(d, meta) +} + +func resourceIBMIAMUserPolicyDelete(d *schema.ResourceData, meta interface{}) error { + + iamPolicyManagementClient, err := meta.(ClientSession).IAMPolicyManagementV1API() + if err != nil { + return err + } + + parts, err := idParts(d.Id()) + if err != nil { + return err + } + userPolicyID := parts[1] + + deletePolicyOptions := iamPolicyManagementClient.NewDeletePolicyOptions( + userPolicyID, + ) + _, err = iamPolicyManagementClient.DeletePolicy(deletePolicyOptions) + if err != nil { + return err + } + d.SetId("") + return nil +} + +func resourceIBMIAMUserPolicyExists(d *schema.ResourceData, meta interface{}) (bool, error) { + iamPolicyManagementClient, err := meta.(ClientSession).IAMPolicyManagementV1API() + if err != nil { + return false, err + } + + parts, err := idParts(d.Id()) + if err != nil { + return false, err + } + userEmail := parts[0] + userPolicyID := parts[1] + + getPolicyOptions := iamPolicyManagementClient.NewGetPolicyOptions( + userPolicyID, + ) + + userPolicy, _, err := iamPolicyManagementClient.GetPolicy(getPolicyOptions) + if err != nil { + if apiErr, ok := err.(bmxerror.RequestFailure); ok { + if apiErr.StatusCode() == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s", err) + } + + tempID := fmt.Sprintf("%s/%s", userEmail, *userPolicy.ID) + + return tempID == d.Id(), nil + +} + +func importUserPolicy(d *schema.ResourceData, meta interface{}) (interface{}, interface{}, error) { + + iamPolicyManagementClient, err := meta.(ClientSession).IAMPolicyManagementV1API() + if err != nil { + return nil, nil, err + } + parts, err := idParts(d.Id()) + if err != nil { + return nil, nil, err + } + userPolicyID := parts[1] + + getPolicyOptions := iamPolicyManagementClient.NewGetPolicyOptions( + userPolicyID, + ) + userPolicy, _, err := iamPolicyManagementClient.GetPolicy(getPolicyOptions) + if err != nil { + return nil, nil, fmt.Errorf("Error retrieving User Policy: %s", err) + } + resources := flattenPolicyResource(userPolicy.Resources) + resource_attributes := flattenPolicyResourceAttributes(userPolicy.Resources) + return resources, resource_attributes, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_iam_user_settings.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_iam_user_settings.go new file mode 100644 index 00000000000..6ee750643f9 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_iam_user_settings.go @@ -0,0 +1,221 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "strings" + + v2 "github.com/IBM-Cloud/bluemix-go/api/usermanagement/usermanagementv2" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + iamUserSettingIamID = "iam_id" + iamUserSettingAllowedIPAddresses = "allowed_ip_addresses" +) + +func resourceIBMUserSettings() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMIAMUserSettingsCreate, + Read: resourceIBMIAMUserSettingsRead, + Update: resourceIBMIAMUserSettingsUpdate, + Delete: resourceIBMIAMUserSettingsDelete, + Exists: resourceIBMIAMUserSettingsExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + + iamUserSettingIamID: { + Description: "User's IAM ID or or email of user", + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + iamUserSettingAllowedIPAddresses: { + Type: schema.TypeList, + Optional: true, + ForceNew: false, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "List of allowed IPv4 or IPv6 addresses ", + }, + }, + } +} + +func resourceIBMIAMUserSettingsCreate(d *schema.ResourceData, meta interface{}) error { + userManagement, err := meta.(ClientSession).UserManagementAPI() + if err != nil { + return err + } + client := userManagement.UserInvite() + + userEmail := d.Get(iamUserSettingIamID).(string) + + //Read from Bluemix UserConfig + accountID, err := getUserAccountID(d, meta) + if err != nil { + return err + } + + iamID, err := getIBMUniqueId(accountID, userEmail, meta) + if err != nil { + return err + } + + UserSettingsPayload := v2.UserSettingOptions{} + + if ip, ok := d.GetOk(iamUserSettingAllowedIPAddresses); ok && ip != nil { + var ips = make([]string, 0) + for _, i := range ip.([]interface{}) { + ips = append(ips, i.(string)) + } + ipStr := strings.Join(ips, ",") + UserSettingsPayload.AllowedIPAddresses = ipStr + } + + _, UserSettingError := client.ManageUserSettings(accountID, iamID, UserSettingsPayload) + + if UserSettingError != nil && !strings.Contains(UserSettingError.Error(), "EmptyResponseBody") { + return fmt.Errorf("Error occured during user settings: %s", UserSettingError) + } + + d.SetId(userEmail) + + return resourceIBMIAMUserSettingsRead(d, meta) +} + +func getUserAccountID(d *schema.ResourceData, meta interface{}) (string, error) { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return "", err + } + return userDetails.userAccount, nil +} + +func resourceIBMIAMUserSettingsRead(d *schema.ResourceData, meta interface{}) error { + userManagement, err := meta.(ClientSession).UserManagementAPI() + if err != nil { + return err + } + client := userManagement.UserInvite() + + accountID, err := getUserAccountID(d, meta) + if err != nil { + return err + } + + iamID, err := getIBMUniqueId(accountID, d.Id(), meta) + if err != nil { + return err + } + + UserSettings, UserSettingError := client.GetUserSettings(accountID, iamID) + if UserSettingError != nil { + return UserSettingError + } + + iplist := strings.Split(UserSettings.AllowedIPAddresses, ",") + d.Set(iamUserSettingAllowedIPAddresses, iplist) + + return nil + +} + +func resourceIBMIAMUserSettingsUpdate(d *schema.ResourceData, meta interface{}) error { + // validate change + userManagement, err := meta.(ClientSession).UserManagementAPI() + if err != nil { + return err + } + client := userManagement.UserInvite() + + accountID, err := getUserAccountID(d, meta) + if err != nil { + return err + } + + iamID, err := getIBMUniqueId(accountID, d.Id(), meta) + if err != nil { + return err + } + + hasChanged := false + + userSettingPayload := v2.UserSettingOptions{} + + if d.HasChange(iamUserSettingAllowedIPAddresses) { + if ip, ok := d.GetOk(iamUserSettingAllowedIPAddresses); ok && ip != nil { + var ips = make([]string, 0) + for _, i := range ip.([]interface{}) { + ips = append(ips, i.(string)) + } + ipStr := strings.Join(ips, ",") + userSettingPayload.AllowedIPAddresses = ipStr + } + hasChanged = true + } + + if hasChanged { + _, UserSettingError := client.ManageUserSettings(accountID, iamID, userSettingPayload) + if UserSettingError != nil && !strings.Contains(UserSettingError.Error(), "EmptyResponseBody") { + return fmt.Errorf("Error occured during user settings: %s", UserSettingError) + } + } + + return resourceIBMIAMUserSettingsRead(d, meta) +} + +func resourceIBMIAMUserSettingsDelete(d *schema.ResourceData, meta interface{}) error { + userManagement, err := meta.(ClientSession).UserManagementAPI() + if err != nil { + return err + } + client := userManagement.UserInvite() + + accountID, err := getUserAccountID(d, meta) + if err != nil { + return err + } + + iamID, err := getIBMUniqueId(accountID, d.Id(), meta) + if err != nil { + return err + } + + userSettingPayload := v2.UserSettingOptions{} + + _, UserSettingError := client.ManageUserSettings(accountID, iamID, userSettingPayload) + if UserSettingError != nil && !strings.Contains(UserSettingError.Error(), "EmptyResponseBody") { + return fmt.Errorf("Error occured during user settings: %s", UserSettingError) + } + + return nil +} + +func resourceIBMIAMUserSettingsExists(d *schema.ResourceData, meta interface{}) (bool, error) { + userManagement, err := meta.(ClientSession).UserManagementAPI() + if err != nil { + return false, err + } + client := userManagement.UserInvite() + + accountID, err := getUserAccountID(d, meta) + if err != nil { + return false, err + } + + iamID, err := getIBMUniqueId(accountID, d.Id(), meta) + if err != nil { + return false, err + } + + _, settingErr := client.GetUserSettings(accountID, iamID) + + if settingErr != nil { + return false, settingErr + } + return true, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_ipsec_vpn.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_ipsec_vpn.go new file mode 100644 index 00000000000..bf78f434ba4 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_ipsec_vpn.go @@ -0,0 +1,513 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "strconv" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/filter" + "github.com/softlayer/softlayer-go/helpers/location" + "github.com/softlayer/softlayer-go/helpers/product" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +func resourceIBMIPSecVPN() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMIPSecVpnCreate, + Read: resourceIBMIPSecVPNRead, + Delete: resourceIBMIPSecVPNDelete, + Update: resourceIBMIPSecVPNUpdate, + Exists: resourceIBMIPSecVPNExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "datacenter": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Datacenter name", + }, + "internal_peer_ip_address": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "phase_one": { + Type: schema.TypeList, + MinItems: 1, + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "authentication": { + Type: schema.TypeString, + Optional: true, + Default: "MD5", + ValidateFunc: validateAuthProtocol, + }, + "encryption": { + Type: schema.TypeString, + Optional: true, + Default: "3DES", + ValidateFunc: validateEncyptionProtocol, + }, + "diffie_hellman_group": { + Type: schema.TypeInt, + Optional: true, + Default: 2, + ValidateFunc: validateDiffieHellmanGroup, + }, + "keylife": { + Type: schema.TypeInt, + Optional: true, + Default: 14400, + ValidateFunc: validatekeylife, + }, + }, + }, + }, + "phase_two": { + Type: schema.TypeList, + Optional: true, + MinItems: 1, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "authentication": { + Type: schema.TypeString, + Optional: true, + Default: "MD5", + ValidateFunc: validateAuthProtocol, + }, + "encryption": { + Type: schema.TypeString, + Optional: true, + Default: "3DES", + ValidateFunc: validateEncyptionProtocol, + }, + "diffie_hellman_group": { + Type: schema.TypeInt, + Optional: true, + Default: 2, + ValidateFunc: validateDiffieHellmanGroup, + }, + "keylife": { + Type: schema.TypeInt, + Optional: true, + Default: 3600, + ValidateFunc: validatekeylife, + }, + }, + }, + }, + "address_translation": { //Parameters for creating an adress translation + Type: schema.TypeList, + MinItems: 1, + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "remote_ip_adress": { + Type: schema.TypeString, + Required: true, + }, + "internal_ip_adress": { + Type: schema.TypeString, + Required: true, + }, + "notes": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "preshared_key": { + Type: schema.TypeString, + Optional: true, + Description: "Preshared Key data", + }, + "customer_peer_ip": { + Type: schema.TypeString, + Optional: true, + Description: "Customer Peer IP Address", + }, + "internal_subnet_id": { + Type: schema.TypeInt, + Optional: true, + Description: "Internal subnet ID value", + }, + "remote_subnet_id": { //customer subnet id . need atleast one customer subnet id for applying the configuratons + Type: schema.TypeInt, + Optional: true, + ConflictsWith: []string{"remote_subnet"}, + Description: "Remote subnet ID value", + }, + "remote_subnet": { //parameters to be populated for creating a customer subnet. Specify only one parameter:- remote subnet/remote subnet id + Type: schema.TypeList, + MinItems: 1, + MaxItems: 1, + Optional: true, + ConflictsWith: []string{"remote_subnet_id"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "remote_ip_adress": { + Type: schema.TypeString, + Required: true, + }, + "remote_ip_cidr": { + Type: schema.TypeString, + ValidateFunc: validateCIDR, + Required: true, + }, + "account_id": { + Type: schema.TypeInt, + Optional: true, + }, + }, + }, + }, + "service_subnet_id": { + Type: schema.TypeInt, + Optional: true, + Description: "Service subnet ID value", + }, + }, + } +} + +const ( + ipsecMask = "billingItem.orderItem.order.id,serviceSubnets,staticRouteSubnets" +) + +func resourceIBMIPSecVpnCreate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + datacenter := d.Get("datacenter").(string) + dc, err := location.GetDatacenterByName(sess, datacenter, "id") + locationid := strconv.Itoa(*dc.Id) + packageid := 0 + if err != nil { + return fmt.Errorf("Datacenter not found") + } + locationservice := services.GetLocationService(sess) + priceidds, _ := locationservice.Id(*dc.Id).GetPriceGroups() + var listofpriceids []int + //store all the pricegroups a datacenter belongs to + for _, priceidd := range priceidds { + listofpriceids = append(listofpriceids, *priceidd.Id) + } + actualpriceid, err := product.GetPriceIDByPackageIdandLocationGroups(sess, listofpriceids, 0, "IPSEC - Standard") + priceItems := []datatypes.Product_Item_Price{} + priceItem := datatypes.Product_Item_Price{ + Id: &actualpriceid, + } + priceItems = append(priceItems, priceItem) + IPSecOrder := datatypes.Container_Product_Order_Network_Tunnel_Ipsec{ + Container_Product_Order: datatypes.Container_Product_Order{ + PackageId: sl.Int(packageid), + Prices: priceItems, + Quantity: sl.Int(1), + Location: &locationid, + }, + } + //Calling verify order + _, err = services.GetProductOrderService(sess.SetRetries(0)). + VerifyOrder(&IPSecOrder) + if err != nil { + return fmt.Errorf("Error during Verify order for Creating: %s", err) + } + + //Calling place order + receipt, err := services.GetProductOrderService(sess.SetRetries(0)). + PlaceOrder(&IPSecOrder, sl.Bool(false)) + if err != nil { + return fmt.Errorf("Error during Place order for Creating: %s", err) + } + vpn, _ := findIPSecVpnByOrderID(sess, *receipt.OrderId, d) + if err != nil { + return fmt.Errorf("Error during creation of IPSec VPN: %s", err) + } + id := *vpn.Id + d.SetId(fmt.Sprintf("%d", id)) + log.Printf("[INFO] IPSec VPN ID: %s", d.Id()) + return resourceIBMIPSecVPNUpdate(d, meta) +} + +func findIPSecVpnByOrderID(sess *session.Session, orderID int, d *schema.ResourceData) (datatypes.Network_Tunnel_Module_Context, error) { + filterPath := "networkTunnelContexts.billingItem.orderItem.order.id" + stateConf := &resource.StateChangeConf{ + Pending: []string{"pending"}, + Target: []string{"complete"}, + Refresh: func() (interface{}, string, error) { + vpn, err := services.GetAccountService(sess). + Filter(filter.Build( + filter.Path(filterPath). + Eq(strconv.Itoa(orderID)))). + Mask(ipsecMask). + GetNetworkTunnelContexts() + if err != nil { + return datatypes.Network_Tunnel_Module_Context{}, "", err + } + + if len(vpn) == 1 { + return vpn[0], "complete", nil + } else if len(vpn) == 0 { + return datatypes.Network_Tunnel_Module_Context{}, "pending", nil + } + return nil, "", fmt.Errorf("Expected one IPSec VPN: %s", err) + }, + Timeout: 2 * time.Hour, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + NotFoundChecks: 24 * 60, + } + + pendingResult, err := stateConf.WaitForState() + + if err != nil { + return datatypes.Network_Tunnel_Module_Context{}, err + } + var result, ok = pendingResult.(datatypes.Network_Tunnel_Module_Context) + if ok { + return result, nil + } + + return datatypes.Network_Tunnel_Module_Context{}, + fmt.Errorf("Cannot find IPSec Vpn with order id '%d'", orderID) +} + +func resourceIBMIPSecVPNRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + vpnID, _ := strconv.Atoi(d.Id()) + + vpn, err := services.GetNetworkTunnelModuleContextService(sess). + Id(vpnID).Mask(ipsecMask). + GetObject() + if err != nil { + return fmt.Errorf("Error retrieving firewall information: %s", err) + } + d.Set("name", *vpn.Name) + d.Set("internal_peer_ip_address", *vpn.InternalPeerIpAddress) + if vpn.Datacenter != nil { + d.Set("datacenter", *vpn.Datacenter.Name) + } + d.Set("phase_one", flattenPhaseOneAttributes(&vpn)) + d.Set("phase_two", flattenPhaseTwoAttributes(&vpn)) + fwID, err := strconv.Atoi(d.Id()) + if vpn.AddressTranslations != nil { + d.Set("address_translation", flattenaddressTranslation(&vpn, fwID)) + } + if vpn.CustomerSubnets != nil { + d.Set("remote_subnet", flattenremoteSubnet(&vpn)) + } + if vpn.PresharedKey != nil { + d.Set("preshared_key", *vpn.PresharedKey) + } + if vpn.CustomerPeerIpAddress != nil { + d.Set("customer_peer_ip", *vpn.CustomerPeerIpAddress) + } + if len(vpn.InternalSubnets) > 0 { + d.Set("internal_subnet_id", *vpn.InternalSubnets[0].Id) + } + if len(vpn.CustomerSubnets) > 0 { + d.Set("remote_subnet_id", *vpn.CustomerSubnets[0].Id) + } + if len(vpn.ServiceSubnets) > 0 { + d.Set("service_subnet_id", *vpn.ServiceSubnets[0].Id) + } + return nil +} + +func resourceIBMIPSecVPNExists(d *schema.ResourceData, meta interface{}) (bool, error) { + sess := meta.(ClientSession).SoftLayerSession() + + fwID, err := strconv.Atoi(d.Id()) + if err != nil { + return false, fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + _, err = services.GetNetworkTunnelModuleContextService(sess). + Id(fwID). + GetObject() + + if err != nil { + if apiErr, ok := err.(sl.Error); ok && apiErr.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error retrieving vpn information: %s", err) + } + + return true, nil +} + +func resourceIBMIPSecVPNDelete(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + vpnService := services.GetNetworkTunnelModuleContextService(sess) + + vpnID, _ := strconv.Atoi(d.Id()) + + // Get billing item associated with the firewall + billingItem, err := vpnService.Id(vpnID).GetBillingItem() + + if err != nil { + return fmt.Errorf("Error while looking up billing item associated with the ipsecvpn: %s", err) + } + + if billingItem.Id == nil { + return fmt.Errorf("Error while looking up billing item associated with the ipsecvpn: No billing item for ID:%d", vpnID) + } + + success, err := services.GetBillingItemService(sess).Id(*billingItem.Id).CancelService() + if err != nil { + return err + } + + if !success { + return fmt.Errorf("SoftLayer reported an unsuccessful cancellation") + } + + return nil +} + +func resourceIBMIPSecVPNUpdate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + vpnID, err := strconv.Atoi(d.Id()) + var addresstranslation datatypes.Network_Tunnel_Module_Context_Address_Translation + if err != nil { + return fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + vpn, err := services.GetNetworkTunnelModuleContextService(sess). + Id(vpnID).Mask(ipsecMask). + GetObject() + + if err != nil { + return fmt.Errorf("Error updating storage information: %s", err) + } + if d.HasChange("phase_one") { + for _, e := range d.Get("phase_one").([]interface{}) { + value := e.(map[string]interface{}) + auth := value["authentication"].(string) + vpn.PhaseOneAuthentication = &auth + encryption := value["encryption"].(string) + vpn.PhaseOneEncryption = &encryption + diffiehellman := value["diffie_hellman_group"].(int) + vpn.PhaseOneDiffieHellmanGroup = &diffiehellman + keylife := value["keylife"].(int) + vpn.PhaseOneKeylife = &keylife + } + } + if d.HasChange("phase_two") { + for _, e := range d.Get("phase_two").([]interface{}) { + value := e.(map[string]interface{}) + auth := value["authentication"].(string) + vpn.PhaseTwoAuthentication = &auth + encryption := value["encryption"].(string) + vpn.PhaseTwoEncryption = &encryption + diffiehellman := value["diffie_hellman_group"].(int) + vpn.PhaseTwoDiffieHellmanGroup = &diffiehellman + keylife := value["keylife"].(int) + vpn.PhaseTwoKeylife = &keylife + } + } + if d.HasChange("preshared_key") { + presharedkey := d.Get("preshared_key").(string) + vpn.PresharedKey = &presharedkey + } + if _, ok := d.GetOk("customer_peer_ip"); ok { + if d.HasChange("customer_peer_ip") { + customeripaddr := d.Get("customer_peer_ip").(string) + vpn.CustomerPeerIpAddress = &customeripaddr + } + _, err = services.GetNetworkTunnelModuleContextService(sess).Id(vpnID).EditObject(&vpn) + if err != nil { + return fmt.Errorf("SoftLayer reported an unsuccessful edit") + } + } + if d.HasChange("internal_subnet_id") { + subnetid := d.Get("internal_subnet_id").(int) + _, err = services.GetNetworkTunnelModuleContextService(sess).AddPrivateSubnetToNetworkTunnel(&subnetid) + if err != nil { + return fmt.Errorf("Unable to find object with id of: %s", err) + } + } + if d.HasChange("remote_subnet_id") { + subnetid := d.Get("remote_subnet_id").(int) + _, err = services.GetNetworkTunnelModuleContextService(sess).AddCustomerSubnetToNetworkTunnel(&subnetid) + if err != nil { + return fmt.Errorf("Unable to find object with id of: %s", err) + } + } + if d.HasChange("service_subnet_id") { + subnetid := d.Get("service_subnet_id").(int) + _, err = services.GetNetworkTunnelModuleContextService(sess).AddServiceSubnetToNetworkTunnel(&subnetid) + if err != nil { + return fmt.Errorf("Unable to find object with id of: %s", err) + } + } + if d.HasChange("address_translation") { + for _, e := range d.Get("address_translation").([]interface{}) { + value := e.(map[string]interface{}) + customerIP := value["remote_ip_adress"].(string) + addresstranslation.CustomerIpAddress = &customerIP + internalIP := value["internal_ip_adress"].(string) + addresstranslation.InternalIpAddress = &internalIP + notes := value["notes"].(string) + addresstranslation.Notes = ¬es + } + _, err = services.GetNetworkTunnelModuleContextService(sess).Id(vpnID).CreateAddressTranslation(&addresstranslation) + if err != nil { + return fmt.Errorf("Unable to create the address translation: %s", err) + } + } + if d.HasChange("remote_subnet") { + for _, e := range d.Get("remote_subnet").([]interface{}) { + remoteSubnet := datatypes.Network_Customer_Subnet{} + value := e.(map[string]interface{}) + customerIP := value["remote_ip_adress"].(string) + s := strings.Split(customerIP, "/") + ip, cidr := s[0], s[1] + actualcidr, _ := strconv.Atoi(cidr) + accountID := value["accountid"].(int) + remoteSubnet.NetworkIdentifier = &ip + remoteSubnet.Cidr = &actualcidr + remoteSubnet.AccountId = &accountID + subnet, err := services.GetNetworkCustomerSubnetService(sess).Id(vpnID).CreateObject(&remoteSubnet) + if err != nil { + return fmt.Errorf("Some error occured creating the customer subnet resource %s", err) + } + _, err = services.GetNetworkTunnelModuleContextService(sess).Id(vpnID).AddCustomerSubnetToNetworkTunnel(subnet.Id) + if err != nil { + return fmt.Errorf("Some error occured adding the customer subnet to the network tunnel module %s", err) + } + + } + } + if _, ok := d.GetOk("remote_subnet_id"); ok { + + _, err = services.GetNetworkTunnelModuleContextService(sess).Id(vpnID).ApplyConfigurationsToDevice() + if err != nil { + return fmt.Errorf("There is some erorr applying the configuration %s", err) + } + } else if _, ok := d.GetOk("remote_subnet"); ok { + _, err = services.GetNetworkTunnelModuleContextService(sess).Id(vpnID).ApplyConfigurationsToDevice() + if err != nil { + return fmt.Errorf("There is some erorr applying the configuration %s", err) + } + } + + return resourceIBMIPSecVPNRead(d, meta) +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_dedicated_host.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_dedicated_host.go new file mode 100644 index 00000000000..2e3b2dc26ac --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_dedicated_host.go @@ -0,0 +1,780 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/IBM/go-sdk-core/v5/core" + "github.com/IBM/vpc-go-sdk/vpcv1" +) + +const ( + isDedicatedHostStable = "stable" + isDedicatedHostDeleting = "deleting" + isDedicatedHostDeleteDone = "done" + isDedicatedHostFailed = "failed" + + isDedicatedHostUpdating = "updating" + isDedicatedHostProvisioningDone = "done" + isDedicatedHostWaiting = "waiting" + isDedicatedHostSuspended = "suspended" + isDedicatedHostActionStatusStopping = "stopping" + isDedicatedHostActionStatusStopped = "stopped" + isDedicatedHostStatusPending = "pending" + isDedicatedHostStatusRunning = "running" + isDedicatedHostStatusFailed = "failed" +) + +func resourceIbmIsDedicatedHost() *schema.Resource { + return &schema.Resource{ + Create: resourceIbmIsDedicatedHostCreate, + Read: resourceIbmIsDedicatedHostRead, + Update: resourceIbmIsDedicatedHostUpdate, + Delete: resourceIbmIsDedicatedHostDelete, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "instance_placement_enabled": { + Type: schema.TypeBool, + Optional: true, + Default: true, + Description: "If set to true, instances can be placed on this dedicated host.", + }, + "name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: InvokeValidator("ibm_is_dedicated_host", "name"), + Description: "The unique user-defined name for this dedicated host. If unspecified, the name will be a hyphenated list of randomly-selected words.", + }, + "profile": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The Globally unique name of the dedicated host profile to use for this dedicated host.", + }, + "resource_group": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + Description: "The unique identifier for the resource group to use. If unspecified, the account's [default resourcegroup](https://cloud.ibm.com/apidocs/resource-manager#introduction) is used.", + }, + "host_group": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The unique identifier of the dedicated host group for this dedicated host.", + }, + "available_memory": { + Type: schema.TypeInt, + Computed: true, + Description: "The amount of memory in gibibytes that is currently available for instances.", + }, + "available_vcpu": { + Type: schema.TypeList, + Computed: true, + Description: "The available VCPU for the dedicated host.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "architecture": { + Type: schema.TypeString, + Computed: true, + Description: "The VCPU architecture.", + }, + "count": { + Type: schema.TypeInt, + Computed: true, + Description: "The number of VCPUs assigned.", + }, + }, + }, + }, + "created_at": { + Type: schema.TypeString, + Computed: true, + Description: "The date and time that the dedicated host was created.", + }, + "crn": { + Type: schema.TypeString, + Computed: true, + Description: "The CRN for this dedicated host.", + }, + "disks": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Collection of the dedicated host's disks.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "available": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The remaining space left for instance placement in GB (gigabytes).", + }, + "created_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The date and time that the disk was created.", + }, + "href": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The URL for this disk.", + }, + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this disk.", + }, + "instance_disks": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Instance disks that are on this dedicated host disk.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "deleted": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "If present, this property indicates the referenced resource has been deleted and providessome supplementary information.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "more_info": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Link to documentation about deleted resources.", + }, + }, + }, + }, + "href": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The URL for this instance disk.", + }, + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this instance disk.", + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The user-defined name for this disk.", + }, + "resource_type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The resource type.", + }, + }, + }, + }, + "interface_type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The disk interface used for attaching the diskThe enumerated values for this property are expected to expand in the future. When processing this property, check for and log unknown values. Optionally halt processing and surface the error, or bypass the resource on which the unexpected property value was encountered.", + }, + "lifecycle_state": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The lifecycle state of this dedicated host disk.", + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The user-defined or system-provided name for this disk.", + }, + "provisionable": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "Indicates whether this dedicated host disk is available for instance disk creation.", + }, + "resource_type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The type of resource referenced.", + }, + "size": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The size of the disk in GB (gigabytes).", + }, + "supported_instance_interface_types": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The instance disk interfaces supported for this dedicated host disk.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + "href": { + Type: schema.TypeString, + Computed: true, + Description: "The URL for this dedicated host.", + }, + "instances": { + Type: schema.TypeList, + Computed: true, + Description: "Array of instances that are allocated to this dedicated host.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "crn": { + Type: schema.TypeString, + Computed: true, + Description: "The CRN for this virtual server instance.", + }, + "deleted": { + Type: schema.TypeList, + Computed: true, + Description: "If present, this property indicates the referenced resource has been deleted and providessome supplementary information.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "more_info": { + Type: schema.TypeString, + Computed: true, + Description: "Link to documentation about deleted resources.", + }, + }, + }, + }, + "href": { + Type: schema.TypeString, + Computed: true, + Description: "The URL for this virtual server instance.", + }, + "id": { + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this virtual server instance.", + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: "The user-defined name for this virtual server instance (and default system hostname).", + }, + }, + }, + }, + "lifecycle_state": { + Type: schema.TypeString, + Computed: true, + Description: "The lifecycle state of the dedicated host resource.", + }, + "memory": { + Type: schema.TypeInt, + Computed: true, + Description: "The total amount of memory in gibibytes for this host.", + }, + "provisionable": { + Type: schema.TypeBool, + Computed: true, + Description: "Indicates whether this dedicated host is available for instance creation.", + }, + "resource_type": { + Type: schema.TypeString, + Computed: true, + Description: "The type of resource referenced.", + }, + "socket_count": { + Type: schema.TypeInt, + Computed: true, + Description: "The total number of sockets for this host.", + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: "The administrative state of the dedicated host.The enumerated values for this property are expected to expand in the future. When processing this property, check for and log unknown values. Optionally halt processing and surface the error, or bypass the dedicated host on which the unexpected property value was encountered.", + }, + "supported_instance_profiles": { + Type: schema.TypeList, + Computed: true, + Description: "Array of instance profiles that can be used by instances placed on this dedicated host.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "href": { + Type: schema.TypeString, + Computed: true, + Description: "The URL for this virtual server instance profile.", + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: "The globally unique name for this virtual server instance profile.", + }, + }, + }, + }, + "vcpu": { + Type: schema.TypeList, + Computed: true, + Description: "The total VCPU of the dedicated host.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "architecture": { + Type: schema.TypeString, + Computed: true, + Description: "The VCPU architecture.", + }, + "count": { + Type: schema.TypeInt, + Computed: true, + Description: "The number of VCPUs assigned.", + }, + }, + }, + }, + "zone": { + Type: schema.TypeString, + Computed: true, + Description: "The globally unique name of the zone this dedicated host resides in.", + }, + }, + } +} + +func resourceIbmIsDedicatedHostValidator() *ResourceValidator { + validateSchema := make([]ValidateSchema, 1) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: "name", + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Optional: true, + Regexp: `^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$`, + MinValueLength: 1, + MaxValueLength: 63, + }) + + resourceValidator := ResourceValidator{ResourceName: "ibm_is_dedicated_host", Schema: validateSchema} + return &resourceValidator +} + +func resourceIbmIsDedicatedHostCreate(d *schema.ResourceData, meta interface{}) error { + vpcClient, err := meta.(ClientSession).VpcV1API() + if err != nil { + return err + } + createDedicatedHostOptions := &vpcv1.CreateDedicatedHostOptions{} + dedicatedHostPrototype := vpcv1.DedicatedHostPrototype{} + + if dhname, ok := d.GetOk("name"); ok { + + namestr := dhname.(string) + dedicatedHostPrototype.Name = &namestr + } + if insplacementenabled, ok := d.GetOk("instance_placement_enabled"); ok { + insplacementenabledbool := insplacementenabled.(bool) + dedicatedHostPrototype.InstancePlacementEnabled = &insplacementenabledbool + } + + if dhprofile, ok := d.GetOk("profile"); ok { + dhprofilename := dhprofile.(string) + dedicatedHostProfileIdentity := vpcv1.DedicatedHostProfileIdentity{ + Name: &dhprofilename, + } + dedicatedHostPrototype.Profile = &dedicatedHostProfileIdentity + } + + if dhgroup, ok := d.GetOk("host_group"); ok { + dhgroupid := dhgroup.(string) + dedicatedHostGroupIdentity := vpcv1.DedicatedHostGroupIdentity{ + ID: &dhgroupid, + } + dedicatedHostPrototype.Group = &dedicatedHostGroupIdentity + } + + if resgroup, ok := d.GetOk("resource_group"); ok { + resgroupid := resgroup.(string) + resourceGroupIdentity := vpcv1.ResourceGroupIdentity{ + ID: &resgroupid, + } + dedicatedHostPrototype.ResourceGroup = &resourceGroupIdentity + } + + createDedicatedHostOptions.SetDedicatedHostPrototype(&dedicatedHostPrototype) + + dedicatedHost, response, err := vpcClient.CreateDedicatedHostWithContext(context.TODO(), createDedicatedHostOptions) + if err != nil { + log.Printf("[DEBUG] CreateDedicatedHostWithContext failed %s\n%s", err, response) + return err + } + + d.SetId(*dedicatedHost.ID) + + _, err = isWaitForDedicatedHostAvailable(vpcClient, d.Id(), d.Timeout(schema.TimeoutCreate), d) + if err != nil { + return err + } + + return resourceIbmIsDedicatedHostRead(d, meta) +} + +func resourceIbmIsDedicatedHostRead(d *schema.ResourceData, meta interface{}) error { + vpcClient, err := meta.(ClientSession).VpcV1API() + if err != nil { + return err + } + + getDedicatedHostOptions := &vpcv1.GetDedicatedHostOptions{} + + getDedicatedHostOptions.SetID(d.Id()) + + dedicatedHost, response, err := vpcClient.GetDedicatedHostWithContext(context.TODO(), getDedicatedHostOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + log.Printf("[DEBUG] GetDedicatedHostWithContext failed %s\n%s", err, response) + return err + } + + if err = d.Set("available_memory", intValue(dedicatedHost.AvailableMemory)); err != nil { + return fmt.Errorf("Error setting available_memory: %s", err) + } + availableVcpuMap := resourceIbmIsDedicatedHostVCPUToMap(*dedicatedHost.AvailableVcpu) + if err = d.Set("available_vcpu", []map[string]interface{}{availableVcpuMap}); err != nil { + return fmt.Errorf("Error setting available_vcpu: %s", err) + } + if err = d.Set("created_at", dedicatedHost.CreatedAt.String()); err != nil { + return fmt.Errorf("Error setting created_at: %s", err) + } + if err = d.Set("crn", dedicatedHost.CRN); err != nil { + return fmt.Errorf("Error setting crn: %s", err) + } + disks := []map[string]interface{}{} + for _, disksItem := range dedicatedHost.Disks { + disksItemMap := resourceIbmIsDedicatedHostDedicatedHostDiskToMap(disksItem) + disks = append(disks, disksItemMap) + } + if err = d.Set("disks", disks); err != nil { + return fmt.Errorf("Error setting disks: %s", err) + } + d.Set("host_group", *dedicatedHost.Group.ID) + + if err = d.Set("href", dedicatedHost.Href); err != nil { + return fmt.Errorf("Error setting href: %s", err) + } + if err = d.Set("instance_placement_enabled", dedicatedHost.InstancePlacementEnabled); err != nil { + return fmt.Errorf("Error setting instance_placement_enabled: %s", err) + } + instances := []map[string]interface{}{} + for _, instancesItem := range dedicatedHost.Instances { + instancesItemMap := resourceIbmIsDedicatedHostInstanceReferenceToMap(instancesItem) + instances = append(instances, instancesItemMap) + } + if err = d.Set("instances", instances); err != nil { + return fmt.Errorf("Error setting instances: %s", err) + } + if err = d.Set("lifecycle_state", dedicatedHost.LifecycleState); err != nil { + return fmt.Errorf("Error setting lifecycle_state: %s", err) + } + if err = d.Set("memory", intValue(dedicatedHost.Memory)); err != nil { + return fmt.Errorf("Error setting memory: %s", err) + } + if err = d.Set("name", dedicatedHost.Name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + + if err = d.Set("profile", *dedicatedHost.Profile.Name); err != nil { + return fmt.Errorf("Error setting profile: %s", err) + } + if err = d.Set("provisionable", dedicatedHost.Provisionable); err != nil { + return fmt.Errorf("Error setting provisionable: %s", err) + } + if err = d.Set("resource_group", *dedicatedHost.ResourceGroup.ID); err != nil { + return fmt.Errorf("Error setting resource_group: %s", err) + } + if err = d.Set("resource_type", dedicatedHost.ResourceType); err != nil { + return fmt.Errorf("Error setting resource_type: %s", err) + } + if err = d.Set("socket_count", intValue(dedicatedHost.SocketCount)); err != nil { + return fmt.Errorf("Error setting socket_count: %s", err) + } + if err = d.Set("state", dedicatedHost.State); err != nil { + return fmt.Errorf("Error setting state: %s", err) + } + supportedInstanceProfiles := []map[string]interface{}{} + for _, supportedInstanceProfilesItem := range dedicatedHost.SupportedInstanceProfiles { + supportedInstanceProfilesItemMap := resourceIbmIsDedicatedHostInstanceProfileReferenceToMap(supportedInstanceProfilesItem) + supportedInstanceProfiles = append(supportedInstanceProfiles, supportedInstanceProfilesItemMap) + } + if err = d.Set("supported_instance_profiles", supportedInstanceProfiles); err != nil { + return fmt.Errorf("Error setting supported_instance_profiles: %s", err) + } + vcpuMap := resourceIbmIsDedicatedHostVCPUToMap(*dedicatedHost.Vcpu) + if err = d.Set("vcpu", []map[string]interface{}{vcpuMap}); err != nil { + return fmt.Errorf("Error setting vcpu: %s", err) + } + + if err = d.Set("zone", *dedicatedHost.Zone.Name); err != nil { + return fmt.Errorf("Error setting zone: %s", err) + } + + return nil +} + +func resourceIbmIsDedicatedHostVCPUToMap(vCPU vpcv1.Vcpu) map[string]interface{} { + vCPUMap := map[string]interface{}{} + + vCPUMap["architecture"] = vCPU.Architecture + vCPUMap["count"] = intValue(vCPU.Count) + + return vCPUMap +} + +func resourceIbmIsDedicatedHostInstanceReferenceToMap(instanceReference vpcv1.InstanceReference) map[string]interface{} { + instanceReferenceMap := map[string]interface{}{} + + instanceReferenceMap["crn"] = instanceReference.CRN + if instanceReference.Deleted != nil { + DeletedMap := resourceIbmIsDedicatedHostInstanceReferenceDeletedToMap(*instanceReference.Deleted) + instanceReferenceMap["deleted"] = []map[string]interface{}{DeletedMap} + } + instanceReferenceMap["href"] = instanceReference.Href + instanceReferenceMap["id"] = instanceReference.ID + instanceReferenceMap["name"] = instanceReference.Name + + return instanceReferenceMap +} + +func resourceIbmIsDedicatedHostInstanceReferenceDeletedToMap(instanceReferenceDeleted vpcv1.InstanceReferenceDeleted) map[string]interface{} { + instanceReferenceDeletedMap := map[string]interface{}{} + + instanceReferenceDeletedMap["more_info"] = instanceReferenceDeleted.MoreInfo + + return instanceReferenceDeletedMap +} + +func resourceIbmIsDedicatedHostInstanceProfileReferenceToMap(instanceProfileReference vpcv1.InstanceProfileReference) map[string]interface{} { + instanceProfileReferenceMap := map[string]interface{}{} + + instanceProfileReferenceMap["href"] = instanceProfileReference.Href + instanceProfileReferenceMap["name"] = instanceProfileReference.Name + + return instanceProfileReferenceMap +} + +func resourceIbmIsDedicatedHostUpdate(d *schema.ResourceData, meta interface{}) error { + vpcClient, err := meta.(ClientSession).VpcV1API() + if err != nil { + return err + } + + updateDedicatedHostOptions := &vpcv1.UpdateDedicatedHostOptions{} + + updateDedicatedHostOptions.SetID(d.Id()) + + hasChange := false + + dedicatedHostPrototypemap := map[string]interface{}{} + + if d.HasChange("name") { + + dedicatedHostPrototypemap["name"] = d.Get("name").(interface{}) + hasChange = true + } + if d.HasChange("instance_placement_enabled") { + + dedicatedHostPrototypemap["instance_placement_enabled"] = d.Get("instance_placement_enabled").(interface{}) + hasChange = true + } + if d.HasChange("profile") { + dedicatedHostPrototypemap["profile"] = d.Get("profile").(interface{}) + hasChange = true + } + if d.HasChange("resource_group") { + dedicatedHostPrototypemap["resource_group"] = d.Get("resource_group").(interface{}) + hasChange = true + } + if d.HasChange("host_group") { + dedicatedHostPrototypemap["group"] = d.Get("host_group").(interface{}) + hasChange = true + } + + if hasChange { + updateDedicatedHostOptions.SetDedicatedHostPatch(dedicatedHostPrototypemap) + _, response, err := vpcClient.UpdateDedicatedHostWithContext(context.TODO(), updateDedicatedHostOptions) + if err != nil { + log.Printf("[DEBUG] UpdateDedicatedHostWithContext fails %s\n%s", err, response) + return err + } + } + + return resourceIbmIsDedicatedHostRead(d, meta) +} + +func resourceIbmIsDedicatedHostDelete(d *schema.ResourceData, meta interface{}) error { + vpcClient, err := meta.(ClientSession).VpcV1API() + if err != nil { + return err + } + + getDedicatedHostOptions := &vpcv1.GetDedicatedHostOptions{} + + getDedicatedHostOptions.SetID(d.Id()) + + dedicatedHost, response, err := vpcClient.GetDedicatedHostWithContext(context.TODO(), getDedicatedHostOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + log.Printf("[DEBUG] GetDedicatedHostWithContext failed %s\n%s", err, response) + return err + } + if dedicatedHost != nil && dedicatedHost.LifecycleState != nil && *dedicatedHost.LifecycleState != isDedicatedHostSuspended && *dedicatedHost.LifecycleState != isDedicatedHostFailed { + + updateDedicatedHostOptions := &vpcv1.UpdateDedicatedHostOptions{} + dedicatedHostPrototypeMap := map[string]interface{}{} + dedicatedHostPrototypeMap["instance_placement_enabled"] = core.BoolPtr(false) + updateDedicatedHostOptions.SetID(d.Id()) + updateDedicatedHostOptions.SetDedicatedHostPatch(dedicatedHostPrototypeMap) + _, updateresponse, err := vpcClient.UpdateDedicatedHostWithContext(context.TODO(), updateDedicatedHostOptions) + if err != nil { + log.Printf("[DEBUG] Failed disabling instance placement %s\n%s", err, updateresponse) + return err + } + } + deleteDedicatedHostOptions := &vpcv1.DeleteDedicatedHostOptions{} + + deleteDedicatedHostOptions.SetID(d.Id()) + + response, err = vpcClient.DeleteDedicatedHostWithContext(context.TODO(), deleteDedicatedHostOptions) + if err != nil { + log.Printf("[DEBUG] DeleteDedicatedHostWithContext failed %s\n%s", err, response) + return err + } + _, err = isWaitForDedicatedHostDelete(vpcClient, d, d.Id()) + if err != nil { + return err + } + + d.SetId("") + + return nil +} + +func isWaitForDedicatedHostDelete(instanceC *vpcv1.VpcV1, d *schema.ResourceData, id string) (interface{}, error) { + + stateConf := &resource.StateChangeConf{ + Pending: []string{isDedicatedHostDeleting, isDedicatedHostStable}, + Target: []string{isDedicatedHostDeleteDone, ""}, + Refresh: func() (interface{}, string, error) { + getdhoptions := &vpcv1.GetDedicatedHostOptions{ + ID: &id, + } + dedicatedhost, response, err := instanceC.GetDedicatedHost(getdhoptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return dedicatedhost, isDedicatedHostDeleteDone, nil + } + return nil, "", fmt.Errorf("Error getting dedicated Host: %s\n%s", err, response) + } + if *dedicatedhost.State == isDedicatedHostFailed { + return dedicatedhost, *dedicatedhost.State, fmt.Errorf("The Dedicated host %s failed to delete: %v", d.Id(), err) + } + return dedicatedhost, isDedicatedHostDeleting, nil + }, + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isWaitForDedicatedHostAvailable(instanceC *vpcv1.VpcV1, id string, timeout time.Duration, d *schema.ResourceData) (interface{}, error) { + log.Printf("Waiting for dedicated host (%s) to be available.", id) + + stateConf := &resource.StateChangeConf{ + Pending: []string{isDedicatedHostStatusPending, isDedicatedHostUpdating, isDedicatedHostWaiting}, + Target: []string{isDedicatedHostFailed, isDedicatedHostStable, isDedicatedHostSuspended}, + Refresh: isDedicatedHostRefreshFunc(instanceC, id, d), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isDedicatedHostRefreshFunc(instanceC *vpcv1.VpcV1, id string, d *schema.ResourceData) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + getinsOptions := &vpcv1.GetDedicatedHostOptions{ + ID: &id, + } + dhost, response, err := instanceC.GetDedicatedHost(getinsOptions) + if dhost == nil || err != nil { + return nil, "", fmt.Errorf("Error getting dedicated host : %s\n%s", err, response) + } + d.Set("state", *dhost.State) + d.Set("lifecycle_state", *dhost.LifecycleState) + + if *dhost.LifecycleState == isDedicatedHostSuspended || *dhost.LifecycleState == isDedicatedHostFailed { + + return dhost, *dhost.LifecycleState, fmt.Errorf("status of dedicated host is %s : \n%s", *dhost.LifecycleState, response) + + } + return dhost, *dhost.LifecycleState, nil + } +} + +func resourceIbmIsDedicatedHostDedicatedHostDiskToMap(dedicatedHostDisk vpcv1.DedicatedHostDisk) map[string]interface{} { + dedicatedHostDiskMap := map[string]interface{}{} + + dedicatedHostDiskMap["available"] = intValue(dedicatedHostDisk.Available) + dedicatedHostDiskMap["created_at"] = dedicatedHostDisk.CreatedAt.String() + dedicatedHostDiskMap["href"] = dedicatedHostDisk.Href + dedicatedHostDiskMap["id"] = dedicatedHostDisk.ID + instanceDisks := []map[string]interface{}{} + for _, instanceDisksItem := range dedicatedHostDisk.InstanceDisks { + instanceDisksItemMap := resourceIbmIsDedicatedHostInstanceDiskReferenceToMap(instanceDisksItem) + instanceDisks = append(instanceDisks, instanceDisksItemMap) + // TODO: handle InstanceDisks of type TypeList -- list of non-primitive, not model items + } + dedicatedHostDiskMap["instance_disks"] = instanceDisks + dedicatedHostDiskMap["interface_type"] = dedicatedHostDisk.InterfaceType + if dedicatedHostDisk.LifecycleState != nil { + dedicatedHostDiskMap["lifecycle_state"] = dedicatedHostDisk.LifecycleState + } + dedicatedHostDiskMap["name"] = dedicatedHostDisk.Name + dedicatedHostDiskMap["provisionable"] = dedicatedHostDisk.Provisionable + dedicatedHostDiskMap["resource_type"] = dedicatedHostDisk.ResourceType + dedicatedHostDiskMap["size"] = intValue(dedicatedHostDisk.Size) + dedicatedHostDiskMap["supported_instance_interface_types"] = dedicatedHostDisk.SupportedInstanceInterfaceTypes + + return dedicatedHostDiskMap +} + +func resourceIbmIsDedicatedHostInstanceDiskReferenceToMap(instanceDiskReference vpcv1.InstanceDiskReference) map[string]interface{} { + instanceDiskReferenceMap := map[string]interface{}{} + + if instanceDiskReference.Deleted != nil { + DeletedMap := resourceIbmIsDedicatedHostInstanceDiskReferenceDeletedToMap(*instanceDiskReference.Deleted) + instanceDiskReferenceMap["deleted"] = []map[string]interface{}{DeletedMap} + } + instanceDiskReferenceMap["href"] = instanceDiskReference.Href + instanceDiskReferenceMap["id"] = instanceDiskReference.ID + instanceDiskReferenceMap["name"] = instanceDiskReference.Name + instanceDiskReferenceMap["resource_type"] = instanceDiskReference.ResourceType + + return instanceDiskReferenceMap +} + +func resourceIbmIsDedicatedHostInstanceDiskReferenceDeletedToMap(instanceDiskReferenceDeleted vpcv1.InstanceDiskReferenceDeleted) map[string]interface{} { + instanceDiskReferenceDeletedMap := map[string]interface{}{} + + instanceDiskReferenceDeletedMap["more_info"] = instanceDiskReferenceDeleted.MoreInfo + + return instanceDiskReferenceDeletedMap +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_dedicated_host_disk_management.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_dedicated_host_disk_management.go new file mode 100644 index 00000000000..84566dcbadf --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_dedicated_host_disk_management.go @@ -0,0 +1,157 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const () + +func resourceIBMISDedicatedHostDiskManagement() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMisDedicatedHostDiskManagementCreate, + Read: resourceIBMisDedicatedHostDiskManagementRead, + Update: resourceIBMisDedicatedHostDiskManagementUpdate, + Delete: resourceIBMisDedicatedHostDiskManagementDelete, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "dedicated_host": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "ID of the dedicated host for which disks has to be managed", + }, + "disks": &schema.Schema{ + Type: schema.TypeList, + Required: true, + Description: "Disk information that has to be updated.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "The unique identifier for this disk.", + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ValidateFunc: InvokeValidator("ibm_is_dedicated_host_disk_management", "name"), + Description: "The user-defined name for this disk. The disk will be updated with this new name", + }, + }, + }, + }, + }, + } +} + +func resourceIBMISDedicatedHostDiskManagementValidator() *ResourceValidator { + + validateSchema := make([]ValidateSchema, 1) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: "name", + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Required: true, + Regexp: `^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$`, + MinValueLength: 1, + MaxValueLength: 63}) + + ibmISDedicatedHostDiskManagementValidator := ResourceValidator{ResourceName: "ibm_is_dedicated_host_disk_management", Schema: validateSchema} + return &ibmISDedicatedHostDiskManagementValidator +} + +func resourceIBMisDedicatedHostDiskManagementCreate(d *schema.ResourceData, meta interface{}) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + dedicatedhost := d.Get("dedicated_host").(string) + disks := d.Get("disks") + diskUpdate := disks.([]interface{}) + + for _, disk := range diskUpdate { + diskItem := disk.(map[string]interface{}) + namestr := diskItem["name"].(string) + diskid := diskItem["id"].(string) + + updateDedicatedHostDiskOptions := &vpcv1.UpdateDedicatedHostDiskOptions{} + updateDedicatedHostDiskOptions.SetDedicatedHostID(dedicatedhost) + updateDedicatedHostDiskOptions.SetID(diskid) + dedicatedHostDiskPatchModel := &vpcv1.DedicatedHostDiskPatch{ + Name: &namestr, + } + + dedicatedHostDiskPatch, err := dedicatedHostDiskPatchModel.AsPatch() + if err != nil { + return fmt.Errorf("Error calling asPatch for DedicatedHostDiskPatch: %s", err) + } + updateDedicatedHostDiskOptions.SetDedicatedHostDiskPatch(dedicatedHostDiskPatch) + + _, _, err = sess.UpdateDedicatedHostDisk(updateDedicatedHostDiskOptions) + if err != nil { + return fmt.Errorf("Error calling UpdateDedicatedHostDisk: %s", err) + } + + } + d.SetId(dedicatedhost) + return resourceIBMisDedicatedHostDiskManagementRead(d, meta) +} + +func resourceIBMisDedicatedHostDiskManagementUpdate(d *schema.ResourceData, meta interface{}) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + if d.HasChange("disks") && !d.IsNewResource() { + + disks := d.Get("disks") + diskUpdate := disks.([]interface{}) + + for _, disk := range diskUpdate { + diskItem := disk.(map[string]interface{}) + namestr := diskItem["name"].(string) + diskid := diskItem["id"].(string) + updateDedicatedHostDiskOptions := &vpcv1.UpdateDedicatedHostDiskOptions{} + updateDedicatedHostDiskOptions.SetDedicatedHostID(d.Id()) + updateDedicatedHostDiskOptions.SetID(diskid) + dedicatedHostDiskPatchModel := &vpcv1.DedicatedHostDiskPatch{ + Name: &namestr, + } + + dedicatedHostDiskPatch, err := dedicatedHostDiskPatchModel.AsPatch() + if err != nil { + return fmt.Errorf("Error calling asPatch for DedicatedHostDiskPatch: %s", err) + } + updateDedicatedHostDiskOptions.SetDedicatedHostDiskPatch(dedicatedHostDiskPatch) + + _, response, err := sess.UpdateDedicatedHostDisk(updateDedicatedHostDiskOptions) + if err != nil { + return fmt.Errorf("Error updating dedicated host disk: %s %s", err, response) + } + + } + + } + return resourceIBMisDedicatedHostDiskManagementRead(d, meta) +} + +func resourceIBMisDedicatedHostDiskManagementDelete(d *schema.ResourceData, meta interface{}) error { + + d.SetId("") + return nil +} + +func resourceIBMisDedicatedHostDiskManagementRead(d *schema.ResourceData, meta interface{}) error { + + d.Set("dedicated_host", d.Id()) + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_dedicated_host_group.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_dedicated_host_group.go new file mode 100644 index 00000000000..cdd305773fb --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_dedicated_host_group.go @@ -0,0 +1,453 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "context" + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/IBM/go-sdk-core/v5/core" + "github.com/IBM/vpc-go-sdk/vpcv1" +) + +func resourceIbmIsDedicatedHostGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceIbmIsDedicatedHostGroupCreate, + Read: resourceIbmIsDedicatedHostGroupRead, + Update: resourceIbmIsDedicatedHostGroupUpdate, + Delete: resourceIbmIsDedicatedHostGroupDelete, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "class": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The dedicated host profile class for hosts in this group.", + }, + "family": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: InvokeValidator("ibm_is_dedicated_host_group", "family"), + Description: "The dedicated host profile family for hosts in this group.", + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: InvokeValidator("ibm_is_dedicated_host_group", "name"), + Description: "The unique user-defined name for this dedicated host group. If unspecified, the name will be a hyphenated list of randomly-selected words.", + }, + "resource_group": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + Description: "The unique identifier of the resource group to use. If unspecified, the account's [default resourcegroup](https://cloud.ibm.com/apidocs/resource-manager#introduction) is used.", + }, + "zone": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The globally unique name of the zone this dedicated host group will reside in.", + }, + "created_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The date and time that the dedicated host group was created.", + }, + "crn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The CRN for this dedicated host group.", + }, + "dedicated_hosts": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The dedicated hosts that are in this dedicated host group.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "crn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The CRN for this dedicated host.", + }, + "deleted": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "If present, this property indicates the referenced resource has been deleted and providessome supplementary information.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "more_info": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Link to documentation about deleted resources.", + }, + }, + }, + }, + "href": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The URL for this dedicated host.", + }, + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this dedicated host.", + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The unique user-defined name for this dedicated host. If unspecified, the name will be a hyphenated list of randomly-selected words.", + }, + "resource_type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The type of resource referenced.", + }, + }, + }, + }, + "href": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The URL for this dedicated host group.", + }, + "resource_type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The type of resource referenced.", + }, + "supported_instance_profiles": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Array of instance profiles that can be used by instances placed on this dedicated host group.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "href": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The URL for this virtual server instance profile.", + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The globally unique name for this virtual server instance profile.", + }, + }, + }, + }, + }, + } +} + +func resourceIbmIsDedicatedHostGroupValidator() *ResourceValidator { + validateSchema := make([]ValidateSchema, 1) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: "family", + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Optional: true, + AllowedValues: "balanced, compute, memory", + }) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: "name", + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Optional: true, + Regexp: `^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$`, + MinValueLength: 1, + MaxValueLength: 63, + }) + + resourceValidator := ResourceValidator{ResourceName: "ibm_is_dedicated_host_group", Schema: validateSchema} + return &resourceValidator +} + +func resourceIbmIsDedicatedHostGroupCreate(d *schema.ResourceData, meta interface{}) error { + vpcClient, err := meta.(ClientSession).VpcV1API() + if err != nil { + return err + } + + createDedicatedHostGroupOptions := &vpcv1.CreateDedicatedHostGroupOptions{} + + if class, ok := d.GetOk("class"); ok { + createDedicatedHostGroupOptions.SetClass(class.(string)) + } + if family, ok := d.GetOk("family"); ok { + createDedicatedHostGroupOptions.SetFamily(family.(string)) + } + if name, ok := d.GetOk("name"); ok { + createDedicatedHostGroupOptions.SetName(name.(string)) + } + if resgroup, ok := d.GetOk("resource_group"); ok { + resgroupstr := resgroup.(string) + resourceGroup := vpcv1.ResourceGroupIdentity{ + ID: &resgroupstr, + } + createDedicatedHostGroupOptions.SetResourceGroup(&resourceGroup) + } + if zone, ok := d.GetOk("zone"); ok { + zonestr := zone.(string) + zoneidentity := vpcv1.ZoneIdentity{ + Name: &zonestr, + } + createDedicatedHostGroupOptions.SetZone(&zoneidentity) + } + + dedicatedHostGroup, response, err := vpcClient.CreateDedicatedHostGroupWithContext(context.TODO(), createDedicatedHostGroupOptions) + if err != nil { + log.Printf("[DEBUG] CreateDedicatedHostGroupWithContext failed %s\n%s", err, response) + return err + } + + d.SetId(*dedicatedHostGroup.ID) + + return resourceIbmIsDedicatedHostGroupRead(d, meta) +} + +func resourceIbmIsDedicatedHostGroupMapToResourceGroupIdentity(resourceGroupIdentityMap map[string]interface{}) vpcv1.ResourceGroupIdentity { + resourceGroupIdentity := vpcv1.ResourceGroupIdentity{} + + if resourceGroupIdentityMap["id"] != nil { + resourceGroupIdentity.ID = core.StringPtr(resourceGroupIdentityMap["id"].(string)) + } + + return resourceGroupIdentity +} + +func resourceIbmIsDedicatedHostGroupMapToResourceGroupIdentityByID(resourceGroupIdentityByIDMap map[string]interface{}) vpcv1.ResourceGroupIdentityByID { + resourceGroupIdentityByID := vpcv1.ResourceGroupIdentityByID{} + + resourceGroupIdentityByID.ID = core.StringPtr(resourceGroupIdentityByIDMap["id"].(string)) + + return resourceGroupIdentityByID +} + +func resourceIbmIsDedicatedHostGroupRead(d *schema.ResourceData, meta interface{}) error { + vpcClient, err := meta.(ClientSession).VpcV1API() + if err != nil { + return err + } + + getDedicatedHostGroupOptions := &vpcv1.GetDedicatedHostGroupOptions{} + + getDedicatedHostGroupOptions.SetID(d.Id()) + + dedicatedHostGroup, response, err := vpcClient.GetDedicatedHostGroupWithContext(context.TODO(), getDedicatedHostGroupOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + log.Printf("[DEBUG] GetDedicatedHostGroupWithContext failed %s\n%s", err, response) + return err + } + + if err = d.Set("class", dedicatedHostGroup.Class); err != nil { + return fmt.Errorf("Error setting class: %s", err) + } + if err = d.Set("family", dedicatedHostGroup.Family); err != nil { + return fmt.Errorf("Error setting family: %s", err) + } + if err = d.Set("name", dedicatedHostGroup.Name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + if dedicatedHostGroup.ResourceGroup != nil { + resourceGroupID := *dedicatedHostGroup.ResourceGroup.ID + if err = d.Set("resource_group", resourceGroupID); err != nil { + return fmt.Errorf("Error setting resource_group: %s", err) + } + } + if dedicatedHostGroup.Zone != nil { + zoneName := *dedicatedHostGroup.Zone.Name + if err = d.Set("zone", zoneName); err != nil { + return fmt.Errorf("Error setting zone: %s", err) + } + } + if err = d.Set("created_at", dedicatedHostGroup.CreatedAt.String()); err != nil { + return fmt.Errorf("Error setting created_at: %s", err) + } + if err = d.Set("crn", dedicatedHostGroup.CRN); err != nil { + return fmt.Errorf("Error setting crn: %s", err) + } + dedicatedHosts := []map[string]interface{}{} + for _, dedicatedHostsItem := range dedicatedHostGroup.DedicatedHosts { + dedicatedHostsItemMap := resourceIbmIsDedicatedHostGroupDedicatedHostReferenceToMap(dedicatedHostsItem) + dedicatedHosts = append(dedicatedHosts, dedicatedHostsItemMap) + } + if err = d.Set("dedicated_hosts", dedicatedHosts); err != nil { + return fmt.Errorf("Error setting dedicated_hosts: %s", err) + } + if err = d.Set("href", dedicatedHostGroup.Href); err != nil { + return fmt.Errorf("Error setting href: %s", err) + } + if err = d.Set("resource_type", dedicatedHostGroup.ResourceType); err != nil { + return fmt.Errorf("Error setting resource_type: %s", err) + } + supportedInstanceProfiles := []map[string]interface{}{} + for _, supportedInstanceProfilesItem := range dedicatedHostGroup.SupportedInstanceProfiles { + supportedInstanceProfilesItemMap := resourceIbmIsDedicatedHostGroupInstanceProfileReferenceToMap(supportedInstanceProfilesItem) + supportedInstanceProfiles = append(supportedInstanceProfiles, supportedInstanceProfilesItemMap) + } + if err = d.Set("supported_instance_profiles", supportedInstanceProfiles); err != nil { + return fmt.Errorf("Error setting supported_instance_profiles: %s", err) + } + + return nil +} + +func resourceIbmIsDedicatedHostGroupResourceGroupIdentityToMap(resourceGroupIdentity vpcv1.ResourceGroupIdentity) map[string]interface{} { + resourceGroupIdentityMap := map[string]interface{}{} + + resourceGroupIdentityMap["id"] = resourceGroupIdentity.ID + + return resourceGroupIdentityMap +} + +func resourceIbmIsDedicatedHostGroupResourceGroupIdentityByIDToMap(resourceGroupIdentityByID vpcv1.ResourceGroupIdentityByID) map[string]interface{} { + resourceGroupIdentityByIDMap := map[string]interface{}{} + + resourceGroupIdentityByIDMap["id"] = resourceGroupIdentityByID.ID + + return resourceGroupIdentityByIDMap +} + +func resourceIbmIsDedicatedHostGroupZoneIdentityToMap(zoneIdentity vpcv1.ZoneIdentity) map[string]interface{} { + zoneIdentityMap := map[string]interface{}{} + + zoneIdentityMap["name"] = zoneIdentity.Name + zoneIdentityMap["href"] = zoneIdentity.Href + + return zoneIdentityMap +} + +func resourceIbmIsDedicatedHostGroupZoneIdentityByNameToMap(zoneIdentityByName vpcv1.ZoneIdentityByName) map[string]interface{} { + zoneIdentityByNameMap := map[string]interface{}{} + + zoneIdentityByNameMap["name"] = zoneIdentityByName.Name + + return zoneIdentityByNameMap +} + +func resourceIbmIsDedicatedHostGroupZoneIdentityByHrefToMap(zoneIdentityByHref vpcv1.ZoneIdentityByHref) map[string]interface{} { + zoneIdentityByHrefMap := map[string]interface{}{} + + zoneIdentityByHrefMap["href"] = zoneIdentityByHref.Href + + return zoneIdentityByHrefMap +} + +func resourceIbmIsDedicatedHostGroupDedicatedHostReferenceToMap(dedicatedHostReference vpcv1.DedicatedHostReference) map[string]interface{} { + dedicatedHostReferenceMap := map[string]interface{}{} + + dedicatedHostReferenceMap["crn"] = dedicatedHostReference.CRN + if dedicatedHostReference.Deleted != nil { + DeletedMap := resourceIbmIsDedicatedHostGroupDedicatedHostReferenceDeletedToMap(*dedicatedHostReference.Deleted) + dedicatedHostReferenceMap["deleted"] = []map[string]interface{}{DeletedMap} + } + dedicatedHostReferenceMap["href"] = dedicatedHostReference.Href + dedicatedHostReferenceMap["id"] = dedicatedHostReference.ID + dedicatedHostReferenceMap["name"] = dedicatedHostReference.Name + dedicatedHostReferenceMap["resource_type"] = dedicatedHostReference.ResourceType + + return dedicatedHostReferenceMap +} + +func resourceIbmIsDedicatedHostGroupDedicatedHostReferenceDeletedToMap(dedicatedHostReferenceDeleted vpcv1.DedicatedHostReferenceDeleted) map[string]interface{} { + dedicatedHostReferenceDeletedMap := map[string]interface{}{} + + dedicatedHostReferenceDeletedMap["more_info"] = dedicatedHostReferenceDeleted.MoreInfo + + return dedicatedHostReferenceDeletedMap +} + +func resourceIbmIsDedicatedHostGroupInstanceProfileReferenceToMap(instanceProfileReference vpcv1.InstanceProfileReference) map[string]interface{} { + instanceProfileReferenceMap := map[string]interface{}{} + + instanceProfileReferenceMap["href"] = instanceProfileReference.Href + instanceProfileReferenceMap["name"] = instanceProfileReference.Name + + return instanceProfileReferenceMap +} + +func resourceIbmIsDedicatedHostGroupUpdate(d *schema.ResourceData, meta interface{}) error { + vpcClient, err := meta.(ClientSession).VpcV1API() + if err != nil { + return err + } + + updateDedicatedHostGroupOptions := &vpcv1.UpdateDedicatedHostGroupOptions{} + + updateDedicatedHostGroupOptions.SetID(d.Id()) + + hasChange := false + + if d.HasChange("name") { + groupnamestr := d.Get("name").(string) + dedicatedHostGroupPatchModel := vpcv1.DedicatedHostGroupPatch{ + Name: &groupnamestr, + } + dedicatedHostGroupPatch, err := dedicatedHostGroupPatchModel.AsPatch() + if err != nil { + log.Printf("[DEBUG] Error calling asPatch for DedicatedHostGroupPatch: %s", err) + return err + } + updateDedicatedHostGroupOptions.DedicatedHostGroupPatch = dedicatedHostGroupPatch + hasChange = true + } + + if hasChange { + _, response, err := vpcClient.UpdateDedicatedHostGroupWithContext(context.TODO(), updateDedicatedHostGroupOptions) + if err != nil { + log.Printf("[DEBUG] UpdateDedicatedHostGroupWithContext failed %s\n%s", err, response) + return err + } + } + + return resourceIbmIsDedicatedHostGroupRead(d, meta) +} + +func resourceIbmIsDedicatedHostGroupDelete(d *schema.ResourceData, meta interface{}) error { + vpcClient, err := meta.(ClientSession).VpcV1API() + if err != nil { + return err + } + + getDedicatedHostGroupOptions := &vpcv1.GetDedicatedHostGroupOptions{} + + getDedicatedHostGroupOptions.SetID(d.Id()) + + _, response, err := vpcClient.GetDedicatedHostGroupWithContext(context.TODO(), getDedicatedHostGroupOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + log.Printf("[DEBUG] GetDedicatedHostGroupWithContext failed %s\n%s", err, response) + return err + } + + deleteDedicatedHostGroupOptions := &vpcv1.DeleteDedicatedHostGroupOptions{} + + deleteDedicatedHostGroupOptions.SetID(d.Id()) + + response, err = vpcClient.DeleteDedicatedHostGroupWithContext(context.TODO(), deleteDedicatedHostGroupOptions) + if err != nil { + log.Printf("[DEBUG] DeleteDedicatedHostGroupWithContext failed %s\n%s", err, response) + return err + } + + d.SetId("") + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_floating_ip.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_floating_ip.go new file mode 100644 index 00000000000..0fa5045b74b --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_floating_ip.go @@ -0,0 +1,826 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "os" + "time" + + "github.com/IBM/vpc-go-sdk/vpcclassicv1" + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + isFloatingIPAddress = "address" + isFloatingIPName = "name" + isFloatingIPStatus = "status" + isFloatingIPZone = "zone" + isFloatingIPTarget = "target" + isFloatingIPResourceGroup = "resource_group" + isFloatingIPTags = "tags" + + isFloatingIPPending = "pending" + isFloatingIPAvailable = "available" + isFloatingIPDeleting = "deleting" + isFloatingIPDeleted = "done" +) + +func resourceIBMISFloatingIP() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMISFloatingIPCreate, + Read: resourceIBMISFloatingIPRead, + Update: resourceIBMISFloatingIPUpdate, + Delete: resourceIBMISFloatingIPDelete, + Exists: resourceIBMISFloatingIPExists, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + CustomizeDiff: customdiff.Sequence( + func(diff *schema.ResourceDiff, v interface{}) error { + return resourceTagsCustomizeDiff(diff) + }, + ), + + Schema: map[string]*schema.Schema{ + isFloatingIPAddress: { + Type: schema.TypeString, + Computed: true, + Description: "Floating IP address", + }, + + isFloatingIPName: { + Type: schema.TypeString, + Required: true, + ForceNew: false, + ValidateFunc: InvokeValidator("ibm_is_floating_ip", isFloatingIPName), + Description: "Name of the floating IP", + }, + + isFloatingIPStatus: { + Type: schema.TypeString, + Computed: true, + Description: "Floating IP status", + }, + + isFloatingIPZone: { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Computed: true, + ConflictsWith: []string{isFloatingIPTarget}, + Description: "Zone name", + }, + + isFloatingIPTarget: { + Type: schema.TypeString, + ForceNew: false, + Optional: true, + Computed: true, + ConflictsWith: []string{isFloatingIPZone}, + Description: "Target info", + }, + + isFloatingIPResourceGroup: { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Computed: true, + Description: "Resource group info", + }, + + isFloatingIPTags: { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString, ValidateFunc: InvokeValidator("ibm_is_floating_ip", "tag")}, + Set: resourceIBMVPCHash, + Description: "Floating IP tags", + }, + + ResourceControllerURL: { + Type: schema.TypeString, + Computed: true, + Description: "The URL of the IBM Cloud dashboard that can be used to explore and view details about this instance", + }, + + ResourceName: { + Type: schema.TypeString, + Computed: true, + Description: "The name of the resource", + }, + + ResourceCRN: { + Type: schema.TypeString, + Computed: true, + Description: "The crn of the resource", + }, + + ResourceStatus: { + Type: schema.TypeString, + Computed: true, + Description: "The status of the resource", + }, + + ResourceGroupName: { + Type: schema.TypeString, + Computed: true, + Description: "The resource group name in which resource is provisioned", + }, + }, + } +} + +func classicVpcClient(meta interface{}) (*vpcclassicv1.VpcClassicV1, error) { + sess, err := meta.(ClientSession).VpcClassicV1API() + return sess, err +} + +func vpcClient(meta interface{}) (*vpcv1.VpcV1, error) { + sess, err := meta.(ClientSession).VpcV1API() + return sess, err +} + +func resourceIBMISFloatingIPValidator() *ResourceValidator { + + validateSchema := make([]ValidateSchema, 1) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isFloatingIPName, + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Required: true, + Regexp: `^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$`, + MinValueLength: 1, + MaxValueLength: 63}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: "tag", + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Optional: true, + Regexp: `^[A-Za-z0-9:_ .-]+$`, + MinValueLength: 1, + MaxValueLength: 128}) + + ibmISFloatingIPResourceValidator := ResourceValidator{ResourceName: "ibm_is_floating_ip", Schema: validateSchema} + return &ibmISFloatingIPResourceValidator +} + +func resourceIBMISFloatingIPCreate(d *schema.ResourceData, meta interface{}) error { + + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + + name := d.Get(isFloatingIPName).(string) + if userDetails.generation == 1 { + err := classicFipCreate(d, meta, name) + if err != nil { + return err + } + } else { + err := fipCreate(d, meta, name) + if err != nil { + return err + } + } + + return resourceIBMISFloatingIPRead(d, meta) +} +func classicFipCreate(d *schema.ResourceData, meta interface{}, name string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + + floatingIPPrototype := &vpcclassicv1.FloatingIPPrototype{ + Name: &name, + } + zone, target := "", "" + + if zn, ok := d.GetOk(isFloatingIPZone); ok { + zone = zn.(string) + floatingIPPrototype.Zone = &vpcclassicv1.ZoneIdentity{ + Name: &zone, + } + } + + if tgt, ok := d.GetOk(isFloatingIPTarget); ok { + target = tgt.(string) + floatingIPPrototype.Target = &vpcclassicv1.FloatingIPByTargetNetworkInterfaceIdentity{ + ID: &target, + } + } + + if zone == "" && target == "" { + return fmt.Errorf("%s or %s need to be provided", isFloatingIPZone, isFloatingIPTarget) + } + + createFloatingIPOptions := &vpcclassicv1.CreateFloatingIPOptions{ + FloatingIPPrototype: floatingIPPrototype, + } + floatingip, response, err := sess.CreateFloatingIP(createFloatingIPOptions) + if err != nil { + return fmt.Errorf("[DEBUG] Floating IP err %s\n%s", err, response) + } + d.SetId(*floatingip.ID) + log.Printf("[INFO] Floating IP : %s[%s]", *floatingip.ID, *floatingip.Address) + _, err = isWaitForClassicInstanceFloatingIP(sess, d.Id(), d) + if err != nil { + return err + } + v := os.Getenv("IC_ENV_TAGS") + if _, ok := d.GetOk(isFloatingIPTags); ok || v != "" { + oldList, newList := d.GetChange(isFloatingIPTags) + err = UpdateTagsUsingCRN(oldList, newList, meta, *floatingip.CRN) + if err != nil { + log.Printf( + "Error on create of vpc Floating IP (%s) tags: %s", d.Id(), err) + } + } + return nil +} + +func fipCreate(d *schema.ResourceData, meta interface{}, name string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + + floatingIPPrototype := &vpcv1.FloatingIPPrototype{ + Name: &name, + } + zone, target := "", "" + if zn, ok := d.GetOk(isFloatingIPZone); ok { + zone = zn.(string) + floatingIPPrototype.Zone = &vpcv1.ZoneIdentity{ + Name: &zone, + } + } + + if tgt, ok := d.GetOk(isFloatingIPTarget); ok { + target = tgt.(string) + floatingIPPrototype.Target = &vpcv1.FloatingIPByTargetNetworkInterfaceIdentity{ + ID: &target, + } + } + + if zone == "" && target == "" { + return fmt.Errorf("%s or %s need to be provided", isFloatingIPZone, isFloatingIPTarget) + } + + if rgrp, ok := d.GetOk(isFloatingIPResourceGroup); ok { + rg := rgrp.(string) + floatingIPPrototype.ResourceGroup = &vpcv1.ResourceGroupIdentity{ + ID: &rg, + } + } + + createFloatingIPOptions := &vpcv1.CreateFloatingIPOptions{ + FloatingIPPrototype: floatingIPPrototype, + } + + floatingip, response, err := sess.CreateFloatingIP(createFloatingIPOptions) + if err != nil { + return fmt.Errorf("[DEBUG] Floating IP err %s\n%s", err, response) + } + d.SetId(*floatingip.ID) + log.Printf("[INFO] Floating IP : %s[%s]", *floatingip.ID, *floatingip.Address) + _, err = isWaitForInstanceFloatingIP(sess, d.Id(), d) + if err != nil { + return err + } + v := os.Getenv("IC_ENV_TAGS") + if _, ok := d.GetOk(isFloatingIPTags); ok || v != "" { + oldList, newList := d.GetChange(isFloatingIPTags) + err = UpdateTagsUsingCRN(oldList, newList, meta, *floatingip.CRN) + if err != nil { + log.Printf( + "Error on create of vpc Floating IP (%s) tags: %s", d.Id(), err) + } + } + return nil +} + +func resourceIBMISFloatingIPRead(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + + id := d.Id() + if userDetails.generation == 1 { + err := classicFipGet(d, meta, id) + if err != nil { + return err + } + } else { + err := fipGet(d, meta, id) + if err != nil { + return err + } + } + return nil +} + +func classicFipGet(d *schema.ResourceData, meta interface{}, id string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + getFloatingIPOptions := &vpcclassicv1.GetFloatingIPOptions{ + ID: &id, + } + floatingip, response, err := sess.GetFloatingIP(getFloatingIPOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Getting Floating IP (%s): %s\n%s", id, err, response) + + } + d.Set(isFloatingIPName, *floatingip.Name) + d.Set(isFloatingIPAddress, *floatingip.Address) + d.Set(isFloatingIPStatus, *floatingip.Status) + d.Set(isFloatingIPZone, *floatingip.Zone.Name) + target, ok := floatingip.Target.(*vpcclassicv1.FloatingIPTarget) + if ok { + d.Set(isFloatingIPTarget, target.ID) + } + tags, err := GetTagsUsingCRN(meta, *floatingip.CRN) + if err != nil { + log.Printf( + "Error on get of vpc Floating IP (%s) tags: %s", d.Id(), err) + } + d.Set(isFloatingIPTags, tags) + controller, err := getBaseController(meta) + if err != nil { + return err + } + d.Set(ResourceControllerURL, controller+"/vpc/network/floatingIPs") + d.Set(ResourceName, *floatingip.Name) + d.Set(ResourceCRN, *floatingip.CRN) + d.Set(ResourceStatus, *floatingip.Status) + return nil +} + +func fipGet(d *schema.ResourceData, meta interface{}, id string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + getFloatingIPOptions := &vpcv1.GetFloatingIPOptions{ + ID: &id, + } + floatingip, response, err := sess.GetFloatingIP(getFloatingIPOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Getting Floating IP (%s): %s\n%s", id, err, response) + + } + d.Set(isFloatingIPName, *floatingip.Name) + d.Set(isFloatingIPAddress, *floatingip.Address) + d.Set(isFloatingIPStatus, *floatingip.Status) + d.Set(isFloatingIPZone, *floatingip.Zone.Name) + target, ok := floatingip.Target.(*vpcv1.FloatingIPTarget) + if ok { + d.Set(isFloatingIPTarget, target.ID) + } + tags, err := GetTagsUsingCRN(meta, *floatingip.CRN) + if err != nil { + log.Printf( + "Error on get of vpc Floating IP (%s) tags: %s", d.Id(), err) + } + d.Set(isFloatingIPTags, tags) + controller, err := getBaseController(meta) + if err != nil { + return err + } + d.Set(ResourceControllerURL, controller+"/vpc-ext/network/floatingIPs") + d.Set(ResourceName, *floatingip.Name) + d.Set(ResourceCRN, *floatingip.CRN) + d.Set(ResourceStatus, *floatingip.Status) + if floatingip.ResourceGroup != nil { + d.Set(ResourceGroupName, *floatingip.ResourceGroup.Name) + d.Set(isFloatingIPResourceGroup, *floatingip.ResourceGroup.ID) + } + return nil +} + +func resourceIBMISFloatingIPUpdate(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + + id := d.Id() + if userDetails.generation == 1 { + err := classicFipUpdate(d, meta, id) + if err != nil { + return err + } + } else { + err := fipUpdate(d, meta, id) + if err != nil { + return err + } + } + return resourceIBMISFloatingIPRead(d, meta) +} + +func classicFipUpdate(d *schema.ResourceData, meta interface{}, id string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + if d.HasChange(isFloatingIPTags) { + options := &vpcclassicv1.GetFloatingIPOptions{ + ID: &id, + } + fip, response, err := sess.GetFloatingIP(options) + if err != nil { + return fmt.Errorf("Error getting Floating IP: %s\n%s", err, response) + } + oldList, newList := d.GetChange(isFloatingIPTags) + err = UpdateTagsUsingCRN(oldList, newList, meta, *fip.CRN) + if err != nil { + log.Printf( + "Error on update of vpc Floating IP (%s) tags: %s", id, err) + } + } + hasChanged := false + options := &vpcclassicv1.UpdateFloatingIPOptions{ + ID: &id, + } + floatingIPPatchModel := &vpcclassicv1.FloatingIPPatch{} + if d.HasChange(isFloatingIPName) { + name := d.Get(isFloatingIPName).(string) + floatingIPPatchModel.Name = &name + hasChanged = true + floatingIPPatch, err := floatingIPPatchModel.AsPatch() + if err != nil { + return fmt.Errorf("Error calling asPatch for FloatingIPPatch: %s", err) + } + options.FloatingIPPatch = floatingIPPatch + } + + if d.HasChange(isFloatingIPTarget) { + target := d.Get(isFloatingIPTarget).(string) + floatingIPPatchModel.Target = &vpcclassicv1.FloatingIPPatchTargetNetworkInterfaceIdentity{ + ID: &target, + } + hasChanged = true + floatingIPPatch, err := floatingIPPatchModel.AsPatch() + if err != nil { + return fmt.Errorf("Error calling asPatch for FloatingIPPatch: %s", err) + } + options.FloatingIPPatch = floatingIPPatch + } + + if hasChanged { + _, response, err := sess.UpdateFloatingIP(options) + if err != nil { + return fmt.Errorf("Error updating vpc Floating IP: %s\n%s", err, response) + } + } + return nil +} + +func fipUpdate(d *schema.ResourceData, meta interface{}, id string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + if d.HasChange(isFloatingIPTags) { + options := &vpcv1.GetFloatingIPOptions{ + ID: &id, + } + fip, response, err := sess.GetFloatingIP(options) + if err != nil { + return fmt.Errorf("Error getting Floating IP: %s\n%s", err, response) + } + oldList, newList := d.GetChange(isFloatingIPTags) + err = UpdateTagsUsingCRN(oldList, newList, meta, *fip.CRN) + if err != nil { + log.Printf( + "Error on update of vpc Floating IP (%s) tags: %s", id, err) + } + } + hasChanged := false + options := &vpcv1.UpdateFloatingIPOptions{ + ID: &id, + } + floatingIPPatchModel := &vpcv1.FloatingIPPatch{} + if d.HasChange(isFloatingIPName) { + name := d.Get(isFloatingIPName).(string) + floatingIPPatchModel.Name = &name + hasChanged = true + floatingIPPatch, err := floatingIPPatchModel.AsPatch() + if err != nil { + return fmt.Errorf("Error calling asPatch for FloatingIPPatch: %s", err) + } + options.FloatingIPPatch = floatingIPPatch + } + + if d.HasChange(isFloatingIPTarget) { + target := d.Get(isFloatingIPTarget).(string) + floatingIPPatchModel.Target = &vpcv1.FloatingIPPatchTargetNetworkInterfaceIdentity{ + ID: &target, + } + hasChanged = true + floatingIPPatch, err := floatingIPPatchModel.AsPatch() + if err != nil { + return fmt.Errorf("Error calling asPatch for floatingIPPatch: %s", err) + } + options.FloatingIPPatch = floatingIPPatch + } + if hasChanged { + _, response, err := sess.UpdateFloatingIP(options) + if err != nil { + return fmt.Errorf("Error updating vpc Floating IP: %s\n%s", err, response) + } + } + return nil +} + +func resourceIBMISFloatingIPDelete(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + id := d.Id() + if userDetails.generation == 1 { + err := classicFipDelete(d, meta, id) + if err != nil { + return err + } + } else { + err := fipDelete(d, meta, id) + if err != nil { + return err + } + } + return nil +} + +func classicFipDelete(d *schema.ResourceData, meta interface{}, id string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + getFloatingIpOptions := &vpcclassicv1.GetFloatingIPOptions{ + ID: &id, + } + _, response, err := sess.GetFloatingIP(getFloatingIpOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return nil + } + + return fmt.Errorf("Error Getting Floating IP (%s): %s\n%s", id, err, response) + + } + + options := &vpcclassicv1.DeleteFloatingIPOptions{ + ID: &id, + } + response, err = sess.DeleteFloatingIP(options) + if err != nil { + return fmt.Errorf("Error Deleting Floating IP : %s\n%s", err, response) + } + _, err = isWaitForClassicFloatingIPDeleted(sess, id, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return err + } + d.SetId("") + return nil +} + +func fipDelete(d *schema.ResourceData, meta interface{}, id string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + getFloatingIpOptions := &vpcv1.GetFloatingIPOptions{ + ID: &id, + } + _, response, err := sess.GetFloatingIP(getFloatingIpOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return nil + } + + return fmt.Errorf("Error Getting Floating IP (%s): %s\n%s", id, err, response) + } + + options := &vpcv1.DeleteFloatingIPOptions{ + ID: &id, + } + response, err = sess.DeleteFloatingIP(options) + if err != nil { + return fmt.Errorf("Error Deleting Floating IP : %s\n%s", err, response) + } + _, err = isWaitForFloatingIPDeleted(sess, id, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return err + } + d.SetId("") + return nil +} + +func resourceIBMISFloatingIPExists(d *schema.ResourceData, meta interface{}) (bool, error) { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return false, err + } + id := d.Id() + + if userDetails.generation == 1 { + exists, err := classicFipExists(d, meta, id) + return exists, err + } else { + exists, err := fipExists(d, meta, id) + return exists, err + } +} + +func classicFipExists(d *schema.ResourceData, meta interface{}, id string) (bool, error) { + sess, err := classicVpcClient(meta) + if err != nil { + return false, err + } + getFloatingIpOptions := &vpcclassicv1.GetFloatingIPOptions{ + ID: &id, + } + _, response, err := sess.GetFloatingIP(getFloatingIpOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error getting floating IP: %s\n%s", err, response) + } + return true, nil +} + +func fipExists(d *schema.ResourceData, meta interface{}, id string) (bool, error) { + sess, err := vpcClient(meta) + if err != nil { + return false, err + } + getFloatingIpOptions := &vpcv1.GetFloatingIPOptions{ + ID: &id, + } + _, response, err := sess.GetFloatingIP(getFloatingIpOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error getting floating IP: %s\n%s", err, response) + } + return true, nil +} + +func isWaitForClassicFloatingIPDeleted(fip *vpcclassicv1.VpcClassicV1, id string, timeout time.Duration) (interface{}, error) { + log.Printf("Waiting for FloatingIP (%s) to be deleted.", id) + + stateConf := &resource.StateChangeConf{ + Pending: []string{isFloatingIPPending, isFloatingIPDeleting}, + Target: []string{"", isFloatingIPDeleted}, + Refresh: isClassicFloatingIPDeleteRefreshFunc(fip, id), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isClassicFloatingIPDeleteRefreshFunc(fip *vpcclassicv1.VpcClassicV1, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + log.Printf("[DEBUG] delete function here") + getfipoptions := &vpcclassicv1.GetFloatingIPOptions{ + ID: &id, + } + FloatingIP, response, err := fip.GetFloatingIP(getfipoptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return FloatingIP, isFloatingIPDeleted, nil + } + return FloatingIP, "", fmt.Errorf("Error Getting Floating IP: %s\n%s", err, response) + } + return FloatingIP, isFloatingIPDeleting, err + } +} + +func isWaitForFloatingIPDeleted(fip *vpcv1.VpcV1, id string, timeout time.Duration) (interface{}, error) { + log.Printf("Waiting for FloatingIP (%s) to be deleted.", id) + + stateConf := &resource.StateChangeConf{ + Pending: []string{isFloatingIPPending, isFloatingIPDeleting}, + Target: []string{"", isFloatingIPDeleted}, + Refresh: isFloatingIPDeleteRefreshFunc(fip, id), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isFloatingIPDeleteRefreshFunc(fip *vpcv1.VpcV1, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + log.Printf("[DEBUG] delete function here") + getfipoptions := &vpcv1.GetFloatingIPOptions{ + ID: &id, + } + FloatingIP, response, err := fip.GetFloatingIP(getfipoptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return FloatingIP, isFloatingIPDeleted, nil + } + return FloatingIP, "", fmt.Errorf("Error Getting Floating IP: %s\n%s", err, response) + } + return FloatingIP, isFloatingIPDeleting, err + } +} + +func isWaitForClassicInstanceFloatingIP(floatingipC *vpcclassicv1.VpcClassicV1, id string, d *schema.ResourceData) (interface{}, error) { + log.Printf("Waiting for floating IP (%s) to be available.", id) + + stateConf := &resource.StateChangeConf{ + Pending: []string{isFloatingIPPending}, + Target: []string{isFloatingIPAvailable, ""}, + Refresh: isClassicInstanceFloatingIPRefreshFunc(floatingipC, id), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isClassicInstanceFloatingIPRefreshFunc(floatingipC *vpcclassicv1.VpcClassicV1, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + getfipoptions := &vpcclassicv1.GetFloatingIPOptions{ + ID: &id, + } + instance, response, err := floatingipC.GetFloatingIP(getfipoptions) + if err != nil { + return nil, "", fmt.Errorf("Error Getting Floating IP for the instance: %s\n%s", err, response) + } + + if *instance.Status == "available" { + return instance, isFloatingIPAvailable, nil + } + + return instance, isFloatingIPPending, nil + } +} + +func isWaitForInstanceFloatingIP(floatingipC *vpcv1.VpcV1, id string, d *schema.ResourceData) (interface{}, error) { + log.Printf("Waiting for floating IP (%s) to be available.", id) + + stateConf := &resource.StateChangeConf{ + Pending: []string{isFloatingIPPending}, + Target: []string{isFloatingIPAvailable, ""}, + Refresh: isInstanceFloatingIPRefreshFunc(floatingipC, id), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isInstanceFloatingIPRefreshFunc(floatingipC *vpcv1.VpcV1, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + getfipoptions := &vpcv1.GetFloatingIPOptions{ + ID: &id, + } + instance, response, err := floatingipC.GetFloatingIP(getfipoptions) + if err != nil { + return nil, "", fmt.Errorf("Error Getting Floating IP for the instance: %s\n%s", err, response) + } + + if *instance.Status == "available" { + return instance, isFloatingIPAvailable, nil + } + + return instance, isFloatingIPPending, nil + } +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_flow_log.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_flow_log.go new file mode 100644 index 00000000000..a68cf217df7 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_flow_log.go @@ -0,0 +1,420 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "os" + "time" + + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + isFlowLogName = "name" + isFlowLogActive = "active" + isFlowLogStorageBucket = "storage_bucket" + isFlowLogStorageBucketEndPoint = "endpoint" + isFlowLogTarget = "target" + isFlowLogResourceGroup = "resource_group" + isFlowLogTargetType = "resource_type" + isFlowLogCreatedAt = "created_at" + isFlowLogCrn = "crn" + isFlowLogLifecycleState = "lifecycle_state" + isFlowLogHref = "href" + isFlowLogAutoDelete = "auto_delete" + isFlowLogVpc = "vpc" + isFlowLogTags = "tags" +) + +func resourceIBMISFlowLog() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMISFlowLogCreate, + Read: resourceIBMISFlowLogRead, + Update: resourceIBMISFlowLogUpdate, + Delete: resourceIBMISFlowLogDelete, + Exists: resourceIBMISFlowLogExists, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(30 * time.Minute), + Delete: schema.DefaultTimeout(30 * time.Minute), + Update: schema.DefaultTimeout(30 * time.Minute), + }, + + CustomizeDiff: customdiff.Sequence( + func(diff *schema.ResourceDiff, v interface{}) error { + return resourceTagsCustomizeDiff(diff) + }, + ), + + Schema: map[string]*schema.Schema{ + isFlowLogName: { + Type: schema.TypeString, + Required: true, + ForceNew: false, + Description: "Flow Log Collector name", + ValidateFunc: InvokeValidator("ibm_is_flow_log", isFlowLogName), + }, + + isFlowLogStorageBucket: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The Cloud Object Storage bucket name where the collected flows will be logged", + }, + + isFlowLogTarget: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The target id that the flow log collector is to collect flow logs", + }, + + isFlowLogActive: { + Type: schema.TypeBool, + Optional: true, + Default: true, + Description: "Indicates whether this collector is active", + }, + + isFlowLogResourceGroup: { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Computed: true, + Description: "The resource group of flow log", + }, + + isFlowLogCrn: { + Type: schema.TypeString, + Computed: true, + Description: "The CRN for this flow log collector", + }, + + isFlowLogHref: { + Type: schema.TypeString, + Computed: true, + Description: "The URL for this flow log collector", + }, + + isFlowLogCreatedAt: { + Type: schema.TypeString, + Computed: true, + Description: "The date and time flow log was created", + }, + + isFlowLogVpc: { + Type: schema.TypeString, + Computed: true, + Description: "The VPC this flow log collector is associated with", + }, + + isFlowLogAutoDelete: { + Type: schema.TypeBool, + Computed: true, + Description: "If set to true, this flow log collector will be automatically deleted when the target is deleted", + }, + + isFlowLogLifecycleState: { + Type: schema.TypeString, + Computed: true, + Description: "The lifecycle state of the flow log collector", + }, + + isFlowLogTags: { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString, ValidateFunc: InvokeValidator("ibm_is_flow_log", "tag")}, + Set: resourceIBMVPCHash, + Description: "Tags for the VPC Flow logs", + }, + + ResourceControllerURL: { + Type: schema.TypeString, + Computed: true, + Description: "The URL of the IBM Cloud dashboard that can be used to explore and view details about this instance", + }, + + ResourceName: { + Type: schema.TypeString, + Computed: true, + Description: "The name of the resource", + }, + + ResourceCRN: { + Type: schema.TypeString, + Computed: true, + Description: "The crn of the resource", + }, + + ResourceStatus: { + Type: schema.TypeString, + Computed: true, + Description: "The status of the resource", + }, + + ResourceGroupName: { + Type: schema.TypeString, + Computed: true, + Description: "The resource group name in which resource is provisioned", + }, + }, + } +} + +func resourceIBMISFlowLogValidator() *ResourceValidator { + + validateSchema := make([]ValidateSchema, 1) + + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isFlowLogName, + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Required: true, + Regexp: `^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$`, + MinValueLength: 1, + MaxValueLength: 63}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: "tag", + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Optional: true, + Regexp: `^[A-Za-z0-9:_ .-]+$`, + MinValueLength: 1, + MaxValueLength: 128}) + + ibmISFlowLogValidator := ResourceValidator{ResourceName: "ibm_is_flow_log", Schema: validateSchema} + return &ibmISFlowLogValidator +} + +func resourceIBMISFlowLogCreate(d *schema.ResourceData, meta interface{}) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + + createFlowLogCollectorOptionsModel := &vpcv1.CreateFlowLogCollectorOptions{} + name := d.Get(isFlowLogName).(string) + createFlowLogCollectorOptionsModel.Name = &name + if _, ok := d.GetOk(isFlowLogResourceGroup); ok { + group := d.Get(isFlowLogResourceGroup).(string) + resourceGroupIdentityModel := new(vpcv1.ResourceGroupIdentityByID) + resourceGroupIdentityModel.ID = &group + createFlowLogCollectorOptionsModel.ResourceGroup = resourceGroupIdentityModel + } + + if v, ok := d.GetOkExists(isFlowLogActive); ok { + active := v.(bool) + createFlowLogCollectorOptionsModel.Active = &active + } + + target := d.Get(isFlowLogTarget).(string) + FlowLogCollectorTargetModel := &vpcv1.FlowLogCollectorTargetPrototype{} + FlowLogCollectorTargetModel.ID = &target + createFlowLogCollectorOptionsModel.Target = FlowLogCollectorTargetModel + + bucketname := d.Get(isFlowLogStorageBucket).(string) + cloudObjectStorageBucketIdentityModel := new(vpcv1.CloudObjectStorageBucketIdentityByName) + cloudObjectStorageBucketIdentityModel.Name = &bucketname + createFlowLogCollectorOptionsModel.StorageBucket = cloudObjectStorageBucketIdentityModel + + flowlogCollector, response, err := sess.CreateFlowLogCollector(createFlowLogCollectorOptionsModel) + if err != nil { + return fmt.Errorf("Create Flow Log Collector err %s\n%s", err, response) + } + d.SetId(*flowlogCollector.ID) + + log.Printf("Flow log collector : %s", *flowlogCollector.ID) + + v := os.Getenv("IC_ENV_TAGS") + if _, ok := d.GetOk(isFlowLogTags); ok || v != "" { + oldList, newList := d.GetChange(isFlowLogTags) + err = UpdateTagsUsingCRN(oldList, newList, meta, *flowlogCollector.CRN) + if err != nil { + log.Printf( + "Error on create of resource vpc flow log (%s) tags: %s", d.Id(), err) + } + } + + return resourceIBMISFlowLogRead(d, meta) +} + +func resourceIBMISFlowLogRead(d *schema.ResourceData, meta interface{}) error { + + sess, err := vpcClient(meta) + if err != nil { + return err + } + + ID := d.Id() + + getOptions := &vpcv1.GetFlowLogCollectorOptions{ + ID: &ID, + } + flowlogCollector, response, err := sess.GetFlowLogCollector(getOptions) + if err != nil { + return fmt.Errorf("Error Getting Flow Log Collector: %s\n%s", err, response) + } + + if flowlogCollector.Name != nil { + d.Set(isFlowLogName, *flowlogCollector.Name) + } + + if flowlogCollector.Active != nil { + d.Set(isFlowLogActive, *flowlogCollector.Active) + } + + if flowlogCollector.CreatedAt != nil { + d.Set(isFlowLogCreatedAt, flowlogCollector.CreatedAt.String()) + } + + if flowlogCollector.Href != nil { + d.Set(isFlowLogHref, *flowlogCollector.Href) + } + + if flowlogCollector.CRN != nil { + d.Set(isFlowLogCrn, *flowlogCollector.CRN) + } + + if flowlogCollector.LifecycleState != nil { + d.Set(isFlowLogLifecycleState, *flowlogCollector.LifecycleState) + } + + if flowlogCollector.VPC != nil { + d.Set(isFlowLogVpc, *flowlogCollector.VPC.ID) + } + + if flowlogCollector.Target != nil { + targetIntf := flowlogCollector.Target + target := targetIntf.(*vpcv1.FlowLogCollectorTarget) + d.Set(isFlowLogTarget, *target.ID) + } + + if flowlogCollector.StorageBucket != nil { + bucket := flowlogCollector.StorageBucket + d.Set(isFlowLogStorageBucket, *bucket.Name) + } + + tags, err := GetTagsUsingCRN(meta, *flowlogCollector.CRN) + if err != nil { + log.Printf( + "Error on get of resource vpc flow log (%s) tags: %s", d.Id(), err) + } + d.Set(isFlowLogTags, tags) + controller, err := getBaseController(meta) + if err != nil { + return err + } + + d.Set(ResourceControllerURL, controller+"/vpc-ext/network/flowLogs") + d.Set(ResourceName, *flowlogCollector.Name) + d.Set(ResourceCRN, *flowlogCollector.CRN) + d.Set(ResourceStatus, *flowlogCollector.LifecycleState) + + if flowlogCollector.ResourceGroup != nil { + d.Set(isFlowLogResourceGroup, *flowlogCollector.ResourceGroup.ID) + d.Set(ResourceGroupName, *flowlogCollector.ResourceGroup.ID) + } + + return nil +} + +func resourceIBMISFlowLogUpdate(d *schema.ResourceData, meta interface{}) error { + + sess, err := vpcClient(meta) + if err != nil { + return err + } + + ID := d.Id() + + getOptions := &vpcv1.GetFlowLogCollectorOptions{ + ID: &ID, + } + flowlogCollector, response, err := sess.GetFlowLogCollector(getOptions) + if err != nil { + return fmt.Errorf("Error Getting Flow Log Collector: %s\n%s", err, response) + } + + if d.HasChange(isFlowLogTags) { + oldList, newList := d.GetChange(isFlowLogTags) + err = UpdateTagsUsingCRN(oldList, newList, meta, *flowlogCollector.CRN) + if err != nil { + log.Printf( + "Error on update of resource flow log (%s) tags: %s", *flowlogCollector.ID, err) + } + } + + if d.HasChange(isFlowLogActive) || d.HasChange(isFlowLogName) { + active := d.Get(isFlowLogActive).(bool) + name := d.Get(isFlowLogName).(string) + updoptions := &vpcv1.UpdateFlowLogCollectorOptions{ + ID: &ID, + } + flowLogCollectorPatchModel := &vpcv1.FlowLogCollectorPatch{ + Active: &active, + Name: &name, + } + flowLogCollectorPatch, err := flowLogCollectorPatchModel.AsPatch() + if err != nil { + return fmt.Errorf("Error calling asPatch for FlowLogCollectorPatch: %s", err) + } + updoptions.FlowLogCollectorPatch = flowLogCollectorPatch + _, response, err = sess.UpdateFlowLogCollector(updoptions) + if err != nil { + return fmt.Errorf("Error updating flow log collector:%s\n%s", err, response) + } + } + + return resourceIBMISFlowLogRead(d, meta) +} + +func resourceIBMISFlowLogDelete(d *schema.ResourceData, meta interface{}) error { + + sess, err := vpcClient(meta) + if err != nil { + return err + } + ID := d.Id() + delOptions := &vpcv1.DeleteFlowLogCollectorOptions{ + ID: &ID, + } + response, err := sess.DeleteFlowLogCollector(delOptions) + + if err != nil && response.StatusCode != 404 { + return fmt.Errorf("Error deleting flow log collector:%s\n%s", err, response) + } + + d.SetId("") + return nil +} + +func resourceIBMISFlowLogExists(d *schema.ResourceData, meta interface{}) (bool, error) { + sess, err := vpcClient(meta) + if err != nil { + return false, err + } + + ID := d.Id() + + getOptions := &vpcv1.GetFlowLogCollectorOptions{ + ID: &ID, + } + _, response, err := sess.GetFlowLogCollector(getOptions) + if err != nil && response.StatusCode != 404 { + return false, fmt.Errorf("Error Getting Flow Log Collector : %s\n%s", err, response) + } + if response.StatusCode == 404 { + d.SetId("") + return false, nil + } + return true, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_ike_policy.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_ike_policy.go new file mode 100644 index 00000000000..0639df4fbfb --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_ike_policy.go @@ -0,0 +1,661 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + + "github.com/IBM/vpc-go-sdk/vpcclassicv1" + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + isIKEName = "name" + isIKEAuthenticationAlg = "authentication_algorithm" + isIKEEncryptionAlg = "encryption_algorithm" + isIKEDhGroup = "dh_group" + isIKEVERSION = "ike_version" + isIKEKeyLifeTime = "key_lifetime" + isIKEResourceGroup = "resource_group" + isIKENegotiationMode = "negotiation_mode" + isIKEVPNConnections = "vpn_connections" + isIKEVPNConnectionName = "name" + isIKEVPNConnectionId = "id" + isIKEVPNConnectionHref = "href" + isIKEHref = "href" +) + +func resourceIBMISIKEPolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMISIKEPolicyCreate, + Read: resourceIBMISIKEPolicyRead, + Update: resourceIBMISIKEPolicyUpdate, + Delete: resourceIBMISIKEPolicyDelete, + Exists: resourceIBMISIKEPolicyExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + isIKEName: { + Type: schema.TypeString, + Required: true, + ValidateFunc: InvokeValidator("ibm_is_ike_policy", isIKEName), + Description: "IKE name", + }, + + isIKEAuthenticationAlg: { + Type: schema.TypeString, + Required: true, + ValidateFunc: InvokeValidator("ibm_is_ike_policy", isIKEAuthenticationAlg), + Description: "Authentication algorithm type", + }, + + isIKEEncryptionAlg: { + Type: schema.TypeString, + Required: true, + ValidateFunc: InvokeValidator("ibm_is_ike_policy", isIKEEncryptionAlg), + Description: "Encryption alogorithm type", + }, + + isIKEDhGroup: { + Type: schema.TypeInt, + Required: true, + ValidateFunc: InvokeValidator("ibm_is_ike_policy", isIKEDhGroup), + Description: "IKE DH group", + }, + + isIKEResourceGroup: { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Computed: true, + Description: "IKE resource group ID", + }, + + isIKEKeyLifeTime: { + Type: schema.TypeInt, + Optional: true, + Default: 28800, + ValidateFunc: validateKeyLifeTime, + Description: "IKE Key lifetime", + }, + + isIKEVERSION: { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: InvokeValidator("ibm_is_ike_policy", isIKEVERSION), + Description: "IKE version", + }, + + isIKENegotiationMode: { + Type: schema.TypeString, + Computed: true, + Description: "IKE negotiation mode", + }, + + isIKEHref: { + Type: schema.TypeString, + Computed: true, + Description: "IKE href value", + }, + + isIKEVPNConnections: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + isIKEVPNConnectionName: { + Type: schema.TypeString, + Computed: true, + }, + isIKEVPNConnectionId: { + Type: schema.TypeString, + Computed: true, + }, + isIKEVPNConnectionHref: { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + ResourceControllerURL: { + Type: schema.TypeString, + Computed: true, + Description: "The URL of the IBM Cloud dashboard that can be used to explore and view details about this instance", + }, + ResourceName: { + Type: schema.TypeString, + Computed: true, + Description: "The name of the resource", + }, + + ResourceGroupName: { + Type: schema.TypeString, + Computed: true, + Description: "The resource group name in which resource is provisioned", + }, + }, + } +} + +func resourceIBMISIKEValidator() *ResourceValidator { + + validateSchema := make([]ValidateSchema, 1) + authentication_algorithm := "md5, sha1, sha256, sha512" + encryption_algorithm := "triple_des, aes128, aes256" + dh_group := "2, 5, 14, 19" + ike_version := "1, 2" + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isIKEName, + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Required: true, + Regexp: `^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$`, + MinValueLength: 1, + MaxValueLength: 63}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isIKEAuthenticationAlg, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: authentication_algorithm}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isIKEEncryptionAlg, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: encryption_algorithm}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isIKEDhGroup, + ValidateFunctionIdentifier: ValidateAllowedIntValue, + Type: TypeInt, + Required: true, + AllowedValues: dh_group}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isIKEVERSION, + ValidateFunctionIdentifier: ValidateAllowedIntValue, + Type: TypeInt, + Optional: true, + AllowedValues: ike_version}) + + ibmISIKEResourceValidator := ResourceValidator{ResourceName: "ibm_is_ike_policy", Schema: validateSchema} + return &ibmISIKEResourceValidator +} + +func resourceIBMISIKEPolicyCreate(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + + log.Printf("[DEBUG] IKE Policy create") + name := d.Get(isIKEName).(string) + authenticationAlg := d.Get(isIKEAuthenticationAlg).(string) + encryptionAlg := d.Get(isIKEEncryptionAlg).(string) + dhGroup := int64(d.Get(isIKEDhGroup).(int)) + + if userDetails.generation == 1 { + err := classicIkepCreate(d, meta, authenticationAlg, encryptionAlg, name, dhGroup) + if err != nil { + return err + } + } else { + err := ikepCreate(d, meta, authenticationAlg, encryptionAlg, name, dhGroup) + if err != nil { + return err + } + } + return resourceIBMISIKEPolicyRead(d, meta) +} + +func classicIkepCreate(d *schema.ResourceData, meta interface{}, authenticationAlg, encryptionAlg, name string, dhGroup int64) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + options := &vpcclassicv1.CreateIkePolicyOptions{ + AuthenticationAlgorithm: &authenticationAlg, + EncryptionAlgorithm: &encryptionAlg, + DhGroup: &dhGroup, + Name: &name, + } + + if keylt, ok := d.GetOk(isIKEKeyLifeTime); ok { + keyLifetime := int64(keylt.(int)) + options.KeyLifetime = &keyLifetime + } else { + keyLifetime := int64(28800) + options.KeyLifetime = &keyLifetime + } + + if ikev, ok := d.GetOk(isIKEVERSION); ok { + ikeVersion := int64(ikev.(int)) + options.IkeVersion = &ikeVersion + } + + if rgrp, ok := d.GetOk(isIKEResourceGroup); ok { + rg := rgrp.(string) + options.ResourceGroup = &vpcclassicv1.ResourceGroupIdentity{ + ID: &rg, + } + } + + ike, response, err := sess.CreateIkePolicy(options) + if err != nil { + return fmt.Errorf("[DEBUG] ike policy err %s\n%s", err, response) + } + d.SetId(*ike.ID) + log.Printf("[INFO] ike policy : %s", *ike.ID) + return nil +} + +func ikepCreate(d *schema.ResourceData, meta interface{}, authenticationAlg, encryptionAlg, name string, dhGroup int64) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + options := &vpcv1.CreateIkePolicyOptions{ + AuthenticationAlgorithm: &authenticationAlg, + EncryptionAlgorithm: &encryptionAlg, + DhGroup: &dhGroup, + Name: &name, + } + + if keylt, ok := d.GetOk(isIKEKeyLifeTime); ok { + keyLifetime := int64(keylt.(int)) + options.KeyLifetime = &keyLifetime + } else { + keyLifetime := int64(28800) + options.KeyLifetime = &keyLifetime + } + + if ikev, ok := d.GetOk(isIKEVERSION); ok { + ikeVersion := int64(ikev.(int)) + options.IkeVersion = &ikeVersion + } + + if rgrp, ok := d.GetOk(isIKEResourceGroup); ok { + rg := rgrp.(string) + options.ResourceGroup = &vpcv1.ResourceGroupIdentity{ + ID: &rg, + } + } + ike, response, err := sess.CreateIkePolicy(options) + if err != nil { + return fmt.Errorf("[DEBUG] ike policy err %s\n%s", err, response) + } + d.SetId(*ike.ID) + log.Printf("[INFO] ike policy : %s", *ike.ID) + return nil +} + +func resourceIBMISIKEPolicyRead(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + id := d.Id() + if userDetails.generation == 1 { + err := classicIkepGet(d, meta, id) + if err != nil { + return err + } + } else { + err := ikepGet(d, meta, id) + if err != nil { + return err + } + } + return nil +} + +func classicIkepGet(d *schema.ResourceData, meta interface{}, id string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + getikepoptions := &vpcclassicv1.GetIkePolicyOptions{ + ID: &id, + } + ike, response, err := sess.GetIkePolicy(getikepoptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error getting IKE Policy(%s): %s\n%s", id, err, response) + } + + d.Set(isIKEName, *ike.Name) + d.Set(isIKEAuthenticationAlg, *ike.AuthenticationAlgorithm) + d.Set(isIKEEncryptionAlg, *ike.EncryptionAlgorithm) + if ike.ResourceGroup != nil { + d.Set(isIKEResourceGroup, *ike.ResourceGroup.ID) + } else { + d.Set(isIKEResourceGroup, nil) + } + if ike.KeyLifetime != nil { + d.Set(isIKEKeyLifeTime, *ike.KeyLifetime) + } + d.Set(isIKEHref, *ike.Href) + d.Set(isIKENegotiationMode, *ike.NegotiationMode) + d.Set(isIKEVERSION, *ike.IkeVersion) + d.Set(isIKEDhGroup, *ike.DhGroup) + connList := make([]map[string]interface{}, 0) + if ike.Connections != nil && len(ike.Connections) > 0 { + for _, connection := range ike.Connections { + conn := map[string]interface{}{} + conn[isIKEVPNConnectionName] = *connection.Name + conn[isIKEVPNConnectionId] = *connection.ID + conn[isIKEVPNConnectionHref] = *connection.Href + connList = append(connList, conn) + } + } + d.Set(isIKEVPNConnections, connList) + controller, err := getBaseController(meta) + if err != nil { + return err + } + d.Set(ResourceControllerURL, controller+"/vpc/network/ikepolicies") + d.Set(ResourceName, *ike.Name) + if ike.ResourceGroup != nil { + rsMangClient, err := meta.(ClientSession).ResourceManagementAPIv2() + if err != nil { + return err + } + grp, err := rsMangClient.ResourceGroup().Get(*ike.ResourceGroup.ID) + if err != nil { + return err + } + d.Set(ResourceGroupName, grp.Name) + } + return nil +} + +func ikepGet(d *schema.ResourceData, meta interface{}, id string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + getikepoptions := &vpcv1.GetIkePolicyOptions{ + ID: &id, + } + ike, response, err := sess.GetIkePolicy(getikepoptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error getting IKE Policy(%s): %s\n%s", id, err, response) + } + + d.Set(isIKEName, *ike.Name) + d.Set(isIKEAuthenticationAlg, *ike.AuthenticationAlgorithm) + d.Set(isIKEEncryptionAlg, *ike.EncryptionAlgorithm) + if ike.ResourceGroup != nil { + d.Set(isIKEResourceGroup, *ike.ResourceGroup.ID) + d.Set(ResourceGroupName, *ike.ResourceGroup.Name) + } else { + d.Set(isIKEResourceGroup, nil) + } + if ike.KeyLifetime != nil { + d.Set(isIKEKeyLifeTime, *ike.KeyLifetime) + } + d.Set(isIKEHref, *ike.Href) + d.Set(isIKENegotiationMode, *ike.NegotiationMode) + d.Set(isIKEVERSION, *ike.IkeVersion) + d.Set(isIKEDhGroup, *ike.DhGroup) + connList := make([]map[string]interface{}, 0) + if ike.Connections != nil && len(ike.Connections) > 0 { + for _, connection := range ike.Connections { + conn := map[string]interface{}{} + conn[isIKEVPNConnectionName] = *connection.Name + conn[isIKEVPNConnectionId] = *connection.ID + conn[isIKEVPNConnectionHref] = *connection.Href + connList = append(connList, conn) + } + } + d.Set(isIKEVPNConnections, connList) + controller, err := getBaseController(meta) + if err != nil { + return err + } + d.Set(ResourceControllerURL, controller+"/vpc-ext/network/ikepolicies") + d.Set(ResourceName, *ike.Name) + return nil +} + +func resourceIBMISIKEPolicyUpdate(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + id := d.Id() + + if userDetails.generation == 1 { + err := classicIkepUpdate(d, meta, id) + if err != nil { + return err + } + } else { + err := ikepUpdate(d, meta, id) + if err != nil { + return err + } + } + return resourceIBMISIKEPolicyRead(d, meta) +} + +func classicIkepUpdate(d *schema.ResourceData, meta interface{}, id string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + options := &vpcclassicv1.UpdateIkePolicyOptions{ + ID: &id, + } + if d.HasChange(isIKEName) || d.HasChange(isIKEAuthenticationAlg) || d.HasChange(isIKEEncryptionAlg) || d.HasChange(isIKEDhGroup) || d.HasChange(isIKEVERSION) || d.HasChange(isIKEKeyLifeTime) { + name := d.Get(isIKEName).(string) + authenticationAlg := d.Get(isIKEAuthenticationAlg).(string) + encryptionAlg := d.Get(isIKEEncryptionAlg).(string) + keyLifetime := int64(d.Get(isIKEKeyLifeTime).(int)) + dhGroup := int64(d.Get(isIKEDhGroup).(int)) + ikeVersion := int64(d.Get(isIKEVERSION).(int)) + + ikePolicyPatchModel := &vpcclassicv1.IkePolicyPatch{} + ikePolicyPatchModel.Name = &name + ikePolicyPatchModel.AuthenticationAlgorithm = &authenticationAlg + ikePolicyPatchModel.EncryptionAlgorithm = &encryptionAlg + ikePolicyPatchModel.KeyLifetime = &keyLifetime + ikePolicyPatchModel.DhGroup = &dhGroup + ikePolicyPatchModel.IkeVersion = &ikeVersion + ikePolicyPatch, err := ikePolicyPatchModel.AsPatch() + if err != nil { + return fmt.Errorf("Error calling asPatch for ikePolicyPatch: %s", err) + } + options.IkePolicyPatch = ikePolicyPatch + + _, response, err := sess.UpdateIkePolicy(options) + if err != nil { + return fmt.Errorf("Error on update of IKE Policy(%s): %s\n%s", id, err, response) + } + } + return nil +} + +func ikepUpdate(d *schema.ResourceData, meta interface{}, id string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + options := &vpcv1.UpdateIkePolicyOptions{ + ID: &id, + } + if d.HasChange(isIKEName) || d.HasChange(isIKEAuthenticationAlg) || d.HasChange(isIKEEncryptionAlg) || d.HasChange(isIKEDhGroup) || d.HasChange(isIKEVERSION) || d.HasChange(isIKEKeyLifeTime) { + name := d.Get(isIKEName).(string) + authenticationAlg := d.Get(isIKEAuthenticationAlg).(string) + encryptionAlg := d.Get(isIKEEncryptionAlg).(string) + keyLifetime := int64(d.Get(isIKEKeyLifeTime).(int)) + dhGroup := int64(d.Get(isIKEDhGroup).(int)) + ikeVersion := int64(d.Get(isIKEVERSION).(int)) + + ikePolicyPatchModel := &vpcv1.IkePolicyPatch{} + ikePolicyPatchModel.Name = &name + ikePolicyPatchModel.AuthenticationAlgorithm = &authenticationAlg + ikePolicyPatchModel.EncryptionAlgorithm = &encryptionAlg + ikePolicyPatchModel.KeyLifetime = &keyLifetime + ikePolicyPatchModel.DhGroup = &dhGroup + ikePolicyPatchModel.IkeVersion = &ikeVersion + ikePolicyPatch, err := ikePolicyPatchModel.AsPatch() + if err != nil { + return fmt.Errorf("Error calling asPatch for IkePolicyPatch: %s", err) + } + options.IkePolicyPatch = ikePolicyPatch + + _, response, err := sess.UpdateIkePolicy(options) + if err != nil { + return fmt.Errorf("Error on update of IKE Policy(%s): %s\n%s", id, err, response) + } + } + return nil +} + +func resourceIBMISIKEPolicyDelete(d *schema.ResourceData, meta interface{}) error { + + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + id := d.Id() + if userDetails.generation == 1 { + err := classicIkepDelete(d, meta, id) + if err != nil { + return err + } + } else { + err := ikepDelete(d, meta, id) + if err != nil { + return err + } + } + return nil +} + +func classicIkepDelete(d *schema.ResourceData, meta interface{}, id string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + getikepoptions := &vpcclassicv1.GetIkePolicyOptions{ + ID: &id, + } + _, response, err := sess.GetIkePolicy(getikepoptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + + return fmt.Errorf("Error getting IKE Policy(%s): %s\n%s", id, err, response) + } + + deleteIkePolicyOptions := &vpcclassicv1.DeleteIkePolicyOptions{ + ID: &id, + } + response, err = sess.DeleteIkePolicy(deleteIkePolicyOptions) + if err != nil { + return fmt.Errorf("Error Deleting IKE Policy(%s): %s\n%s", id, err, response) + } + d.SetId("") + return nil +} + +func ikepDelete(d *schema.ResourceData, meta interface{}, id string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + getikepoptions := &vpcv1.GetIkePolicyOptions{ + ID: &id, + } + _, response, err := sess.GetIkePolicy(getikepoptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error getting IKE Policy(%s): %s\n%s", id, err, response) + } + + deleteIkePolicyOptions := &vpcv1.DeleteIkePolicyOptions{ + ID: &id, + } + response, err = sess.DeleteIkePolicy(deleteIkePolicyOptions) + if err != nil { + return fmt.Errorf("Error Deleting IKE Policy(%s): %s\n%s", id, err, response) + } + d.SetId("") + return nil +} + +func resourceIBMISIKEPolicyExists(d *schema.ResourceData, meta interface{}) (bool, error) { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return false, err + } + id := d.Id() + + if userDetails.generation == 1 { + exists, err := classicikepExists(d, meta, id) + return exists, err + } else { + exists, err := ikepExists(d, meta, id) + return exists, err + } +} + +func classicikepExists(d *schema.ResourceData, meta interface{}, id string) (bool, error) { + sess, err := classicVpcClient(meta) + if err != nil { + return false, err + } + options := &vpcclassicv1.GetIkePolicyOptions{ + ID: &id, + } + _, response, err := sess.GetIkePolicy(options) + if err != nil { + if response != nil && response.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error getting IKE Policy(%s): %s\n%s", id, err, response) + } + + return true, nil +} + +func ikepExists(d *schema.ResourceData, meta interface{}, id string) (bool, error) { + sess, err := vpcClient(meta) + if err != nil { + return false, err + } + options := &vpcv1.GetIkePolicyOptions{ + ID: &id, + } + _, response, err := sess.GetIkePolicy(options) + if err != nil { + if response != nil && response.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error getting IKE Policy(%s): %s\n%s", id, err, response) + } + + return true, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_image.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_image.go new file mode 100644 index 00000000000..a69c0afa2d6 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_image.go @@ -0,0 +1,832 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "os" + "time" + + "github.com/IBM/vpc-go-sdk/vpcclassicv1" + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + isImageHref = "href" + isImageName = "name" + isImageTags = "tags" + isImageOperatingSystem = "operating_system" + isImageStatus = "status" + isImageVisibility = "visibility" + isImageFile = "file" + isImageMinimumProvisionedSize = "size" + + isImageResourceGroup = "resource_group" + isImageEncryptedDataKey = "encrypted_data_key" + isImageEncryptionKey = "encryption_key" + isImageEncryption = "encryption" + isImageCheckSum = "checksum" + + isImageProvisioning = "provisioning" + isImageProvisioningDone = "done" + isImageDeleting = "deleting" + isImageDeleted = "done" +) + +func resourceIBMISImage() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMISImageCreate, + Read: resourceIBMISImageRead, + Update: resourceIBMISImageUpdate, + Delete: resourceIBMISImageDelete, + Exists: resourceIBMISImageExists, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + CustomizeDiff: customdiff.Sequence( + func(diff *schema.ResourceDiff, v interface{}) error { + return resourceTagsCustomizeDiff(diff) + }, + ), + + Schema: map[string]*schema.Schema{ + isImageHref: { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: applyOnce, + Description: "Image Href value", + }, + + isImageName: { + Type: schema.TypeString, + Required: true, + ForceNew: false, + ValidateFunc: InvokeValidator("ibm_is_image", isImageName), + Description: "Image name", + }, + + isImageEncryptedDataKey: { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "A base64-encoded, encrypted representation of the key that was used to encrypt the data for this image", + }, + isImageEncryptionKey: { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The CRN of the Key Protect Root Key or Hyper Protect Crypto Service Root Key for this resource", + }, + isImageTags: { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString, ValidateFunc: InvokeValidator("ibm_is_image", "tag")}, + Set: resourceIBMVPCHash, + Description: "Tags for the image", + }, + + isImageOperatingSystem: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Image Operating system", + }, + + isImageEncryption: { + Type: schema.TypeString, + Computed: true, + Description: "The type of encryption used on the image", + }, + isImageStatus: { + Type: schema.TypeString, + Computed: true, + Description: "The status of this image", + }, + + isImageMinimumProvisionedSize: { + Type: schema.TypeInt, + Computed: true, + Description: "The minimum size (in gigabytes) of a volume onto which this image may be provisioned", + }, + + isImageVisibility: { + Type: schema.TypeString, + Computed: true, + Description: "Whether the image is publicly visible or private to the account", + }, + + isImageFile: { + Type: schema.TypeInt, + Computed: true, + Description: "Details for the stored image file", + }, + + isImageResourceGroup: { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Computed: true, + Description: "The resource group for this image", + }, + + ResourceControllerURL: { + Type: schema.TypeString, + Computed: true, + Description: "The URL of the IBM Cloud dashboard that can be used to explore and view details about this instance", + }, + + ResourceName: { + Type: schema.TypeString, + Computed: true, + Description: "The name of the resource", + }, + + ResourceCRN: { + Type: schema.TypeString, + Computed: true, + Description: "The crn of the resource", + }, + + isImageCheckSum: { + Type: schema.TypeString, + Computed: true, + Description: "The SHA256 checksum of this image", + }, + + ResourceStatus: { + Type: schema.TypeString, + Computed: true, + Description: "The status of the resource", + }, + + ResourceGroupName: { + Type: schema.TypeString, + Computed: true, + Description: "The resource group name in which resource is provisioned", + }, + }, + } +} + +func resourceIBMISImageValidator() *ResourceValidator { + + validateSchema := make([]ValidateSchema, 1) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isImageName, + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Required: true, + Regexp: `^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$`, + MinValueLength: 1, + MaxValueLength: 63}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: "tag", + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Optional: true, + Regexp: `^[A-Za-z0-9:_ .-]+$`, + MinValueLength: 1, + MaxValueLength: 128}) + ibmISImageResourceValidator := ResourceValidator{ResourceName: "ibm_is_image", Schema: validateSchema} + return &ibmISImageResourceValidator +} + +func resourceIBMISImageCreate(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + + log.Printf("[DEBUG] Image create") + href := d.Get(isImageHref).(string) + name := d.Get(isImageName).(string) + operatingSystem := d.Get(isImageOperatingSystem).(string) + + if userDetails.generation == 1 { + err := classicImgCreate(d, meta, href, name, operatingSystem) + if err != nil { + return err + } + } else { + err := imgCreate(d, meta, href, name, operatingSystem) + if err != nil { + return err + } + } + return resourceIBMISImageRead(d, meta) +} + +func classicImgCreate(d *schema.ResourceData, meta interface{}, href, name, operatingSystem string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + imagePrototype := &vpcclassicv1.ImagePrototype{ + Name: &name, + File: &vpcclassicv1.ImageFilePrototype{ + Href: &href, + }, + OperatingSystem: &vpcclassicv1.OperatingSystemIdentity{ + Name: &operatingSystem, + }, + } + if rgrp, ok := d.GetOk(isImageResourceGroup); ok { + rg := rgrp.(string) + imagePrototype.ResourceGroup = &vpcclassicv1.ResourceGroupIdentity{ + ID: &rg, + } + } + options := &vpcclassicv1.CreateImageOptions{ + ImagePrototype: imagePrototype, + } + + image, response, err := sess.CreateImage(options) + if err != nil { + return fmt.Errorf("[DEBUG] Image creation err %s\n%s", err, response) + } + d.SetId(*image.ID) + log.Printf("[INFO] Floating IP : %s", *image.ID) + _, err = isWaitForClassicImageAvailable(sess, d.Id(), d.Timeout(schema.TimeoutCreate)) + if err != nil { + return err + } + v := os.Getenv("IC_ENV_TAGS") + if _, ok := d.GetOk(isImageTags); ok || v != "" { + oldList, newList := d.GetChange(isImageTags) + err = UpdateTagsUsingCRN(oldList, newList, meta, *image.CRN) + if err != nil { + log.Printf( + "Error on create of resource vpc image (%s) tags: %s", d.Id(), err) + } + } + return nil +} + +func imgCreate(d *schema.ResourceData, meta interface{}, href, name, operatingSystem string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + imagePrototype := &vpcv1.ImagePrototype{ + Name: &name, + File: &vpcv1.ImageFilePrototype{ + Href: &href, + }, + OperatingSystem: &vpcv1.OperatingSystemIdentity{ + Name: &operatingSystem, + }, + } + if encryptionKey, ok := d.GetOk(isImageEncryptionKey); ok { + encryptionKeyStr := encryptionKey.(string) + // Construct an instance of the EncryptionKeyReference model + encryptionKeyReferenceModel := new(vpcv1.EncryptionKeyIdentity) + encryptionKeyReferenceModel.CRN = &encryptionKeyStr + imagePrototype.EncryptionKey = encryptionKeyReferenceModel + } + if encDataKey, ok := d.GetOk(isImageEncryptedDataKey); ok { + encDataKeyStr := encDataKey.(string) + imagePrototype.EncryptedDataKey = &encDataKeyStr + } + if rgrp, ok := d.GetOk(isImageResourceGroup); ok { + rg := rgrp.(string) + imagePrototype.ResourceGroup = &vpcv1.ResourceGroupIdentity{ + ID: &rg, + } + } + options := &vpcv1.CreateImageOptions{ + ImagePrototype: imagePrototype, + } + + image, response, err := sess.CreateImage(options) + if err != nil { + return fmt.Errorf("[DEBUG] Image creation err %s\n%s", err, response) + } + d.SetId(*image.ID) + log.Printf("[INFO] Image ID : %s", *image.ID) + _, err = isWaitForImageAvailable(sess, d.Id(), d.Timeout(schema.TimeoutCreate)) + if err != nil { + return err + } + v := os.Getenv("IC_ENV_TAGS") + if _, ok := d.GetOk(isImageTags); ok || v != "" { + oldList, newList := d.GetChange(isImageTags) + err = UpdateTagsUsingCRN(oldList, newList, meta, *image.CRN) + if err != nil { + log.Printf( + "Error on create of resource vpc image (%s) tags: %s", d.Id(), err) + } + } + return nil +} + +func isWaitForClassicImageAvailable(imageC *vpcclassicv1.VpcClassicV1, id string, timeout time.Duration) (interface{}, error) { + log.Printf("Waiting for image (%s) to be available.", id) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", isImageProvisioning}, + Target: []string{isImageProvisioningDone, ""}, + Refresh: isClassicImageRefreshFunc(imageC, id), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isClassicImageRefreshFunc(imageC *vpcclassicv1.VpcClassicV1, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + getimgoptions := &vpcclassicv1.GetImageOptions{ + ID: &id, + } + image, response, err := imageC.GetImage(getimgoptions) + if err != nil { + return nil, "", fmt.Errorf("Error Getting Image: %s\n%s", err, response) + } + + if *image.Status == "available" || *image.Status == "failed" { + return image, isImageProvisioningDone, nil + } + + return image, isImageProvisioning, nil + } +} +func isWaitForImageAvailable(imageC *vpcv1.VpcV1, id string, timeout time.Duration) (interface{}, error) { + log.Printf("Waiting for image (%s) to be available.", id) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", isImageProvisioning}, + Target: []string{isImageProvisioningDone, ""}, + Refresh: isImageRefreshFunc(imageC, id), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isImageRefreshFunc(imageC *vpcv1.VpcV1, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + getimgoptions := &vpcv1.GetImageOptions{ + ID: &id, + } + image, response, err := imageC.GetImage(getimgoptions) + if err != nil { + return nil, "", fmt.Errorf("Error Getting Image: %s\n%s", err, response) + } + + if *image.Status == "available" || *image.Status == "failed" { + return image, isImageProvisioningDone, nil + } + + return image, isImageProvisioning, nil + } +} + +func resourceIBMISImageUpdate(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + + id := d.Id() + name := "" + hasChanged := false + + if d.HasChange(isImageName) { + name = d.Get(isImageName).(string) + hasChanged = true + } + if userDetails.generation == 1 { + err := classicImgUpdate(d, meta, id, name, hasChanged) + if err != nil { + return err + } + } else { + err := imgUpdate(d, meta, id, name, hasChanged) + if err != nil { + return err + } + } + return resourceIBMISImageRead(d, meta) +} + +func classicImgUpdate(d *schema.ResourceData, meta interface{}, id, name string, hasChanged bool) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + if d.HasChange(isImageTags) { + options := &vpcclassicv1.GetImageOptions{ + ID: &id, + } + image, response, err := sess.GetImage(options) + if err != nil { + return fmt.Errorf("Error getting Image IP: %s\n%s", err, response) + } + oldList, newList := d.GetChange(isImageTags) + err = UpdateTagsUsingCRN(oldList, newList, meta, *image.CRN) + if err != nil { + log.Printf( + "Error on update of resource vpc Image (%s) tags: %s", id, err) + } + } + if hasChanged { + options := &vpcclassicv1.UpdateImageOptions{ + ID: &id, + } + imagePatchModel := &vpcclassicv1.ImagePatch{ + Name: &name, + } + imagePatch, err := imagePatchModel.AsPatch() + if err != nil { + return fmt.Errorf("Error calling asPatch for ImagePatch: %s", err) + } + options.ImagePatch = imagePatch + + _, response, err := sess.UpdateImage(options) + if err != nil { + return fmt.Errorf("Error on update of resource vpc Image: %s\n%s", err, response) + } + } + return nil +} + +func imgUpdate(d *schema.ResourceData, meta interface{}, id, name string, hasChanged bool) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + if d.HasChange(isImageTags) { + options := &vpcv1.GetImageOptions{ + ID: &id, + } + image, response, err := sess.GetImage(options) + if err != nil { + return fmt.Errorf("Error getting Image IP: %s\n%s", err, response) + } + oldList, newList := d.GetChange(isImageTags) + err = UpdateTagsUsingCRN(oldList, newList, meta, *image.CRN) + if err != nil { + log.Printf( + "Error on update of resource vpc Image (%s) tags: %s", id, err) + } + } + if hasChanged { + options := &vpcv1.UpdateImageOptions{ + ID: &id, + } + imagePatchModel := &vpcv1.ImagePatch{ + Name: &name, + } + imagePatch, err := imagePatchModel.AsPatch() + if err != nil { + return fmt.Errorf("Error calling asPatch for ImagePatch: %s", err) + } + options.ImagePatch = imagePatch + _, response, err := sess.UpdateImage(options) + if err != nil { + return fmt.Errorf("Error on update of resource vpc Image: %s\n%s", err, response) + } + } + return nil +} + +func resourceIBMISImageRead(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + + id := d.Id() + if userDetails.generation == 1 { + err := classicImgGet(d, meta, id) + if err != nil { + return err + } + } else { + err := imgGet(d, meta, id) + if err != nil { + return err + } + } + return nil +} + +func classicImgGet(d *schema.ResourceData, meta interface{}, id string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + options := &vpcclassicv1.GetImageOptions{ + ID: &id, + } + image, response, err := sess.GetImage(options) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Getting Image (%s): %s\n%s", id, err, response) + } + // d.Set(isImageArchitecure, image.Architecture) + d.Set(isImageMinimumProvisionedSize, *image.MinimumProvisionedSize) + d.Set(isImageName, *image.Name) + d.Set(isImageOperatingSystem, *image.OperatingSystem.Name) + // d.Set(isImageFormat, image.Format) + d.Set(isImageFile, *image.File.Size) + d.Set(isImageHref, *image.Href) + d.Set(isImageStatus, *image.Status) + d.Set(isImageVisibility, *image.Visibility) + tags, err := GetTagsUsingCRN(meta, *image.CRN) + if err != nil { + log.Printf( + "Error on get of resource vpc Image (%s) tags: %s", d.Id(), err) + } + d.Set(isImageTags, tags) + controller, err := getBaseController(meta) + if err != nil { + return err + } + d.Set(ResourceControllerURL, controller+"/vpc/compute/image") + d.Set(ResourceName, *image.Name) + d.Set(ResourceStatus, *image.Status) + d.Set(ResourceCRN, *image.CRN) + if image.ResourceGroup != nil { + d.Set(isImageResourceGroup, *image.ResourceGroup.ID) + rsMangClient, err := meta.(ClientSession).ResourceManagementAPIv2() + if err != nil { + return err + } + grp, err := rsMangClient.ResourceGroup().Get(*image.ResourceGroup.ID) + if err != nil { + return err + } + d.Set(ResourceGroupName, grp.Name) + } + return nil +} + +func imgGet(d *schema.ResourceData, meta interface{}, id string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + options := &vpcv1.GetImageOptions{ + ID: &id, + } + image, response, err := sess.GetImage(options) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Getting Image (%s): %s\n%s", id, err, response) + } + // d.Set(isImageArchitecure, image.Architecture) + d.Set(isImageMinimumProvisionedSize, *image.MinimumProvisionedSize) + d.Set(isImageName, *image.Name) + d.Set(isImageOperatingSystem, *image.OperatingSystem.Name) + // d.Set(isImageFormat, image.Format) + d.Set(isImageFile, *image.File.Size) + d.Set(isImageHref, *image.Href) + d.Set(isImageStatus, *image.Status) + d.Set(isImageVisibility, *image.Visibility) + if image.Encryption != nil { + d.Set(isImageEncryption, *image.Encryption) + } + if image.EncryptionKey != nil { + d.Set(isImageEncryptionKey, *image.EncryptionKey.CRN) + } + if image.File != nil && image.File.Checksums != nil { + d.Set(isImageCheckSum, *image.File.Checksums.Sha256) + } + tags, err := GetTagsUsingCRN(meta, *image.CRN) + if err != nil { + log.Printf( + "Error on get of resource vpc Image (%s) tags: %s", d.Id(), err) + } + d.Set(isImageTags, tags) + controller, err := getBaseController(meta) + if err != nil { + return err + } + d.Set(ResourceControllerURL, controller+"/vpc-ext/compute/image") + d.Set(ResourceName, *image.Name) + d.Set(ResourceStatus, *image.Status) + d.Set(ResourceCRN, *image.CRN) + if image.ResourceGroup != nil { + d.Set(isImageResourceGroup, *image.ResourceGroup.ID) + } + return nil +} + +func resourceIBMISImageDelete(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + id := d.Id() + if userDetails.generation == 1 { + err := classicImgDelete(d, meta, id) + if err != nil { + return err + } + } else { + err := imgDelete(d, meta, id) + if err != nil { + return err + } + } + return nil +} + +func classicImgDelete(d *schema.ResourceData, meta interface{}, id string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + getImageOptions := &vpcclassicv1.GetImageOptions{ + ID: &id, + } + _, response, err := sess.GetImage(getImageOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return nil + } + return fmt.Errorf("Error Getting Image (%s): %s\n%s", id, err, response) + } + + options := &vpcclassicv1.DeleteImageOptions{ + ID: &id, + } + response, err = sess.DeleteImage(options) + if err != nil { + return fmt.Errorf("Error Deleting Image : %s\n%s", err, response) + } + _, err = isWaitForClassicImageDeleted(sess, id, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return err + } + d.SetId("") + return nil +} + +func imgDelete(d *schema.ResourceData, meta interface{}, id string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + getImageOptions := &vpcv1.GetImageOptions{ + ID: &id, + } + _, response, err := sess.GetImage(getImageOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return nil + } + return fmt.Errorf("Error Getting Image (%s): %s\n%s", id, err, response) + } + + options := &vpcv1.DeleteImageOptions{ + ID: &id, + } + response, err = sess.DeleteImage(options) + if err != nil { + return fmt.Errorf("Error Deleting Image : %s\n%s", err, response) + } + _, err = isWaitForImageDeleted(sess, id, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return err + } + d.SetId("") + return nil +} + +func isWaitForClassicImageDeleted(imageC *vpcclassicv1.VpcClassicV1, id string, timeout time.Duration) (interface{}, error) { + log.Printf("Waiting for image (%s) to be deleted.", id) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", isImageDeleting}, + Target: []string{"", isImageDeleted}, + Refresh: isClassicImageDeleteRefreshFunc(imageC, id), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isClassicImageDeleteRefreshFunc(imageC *vpcclassicv1.VpcClassicV1, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + log.Printf("[DEBUG] delete function here") + getimgoptions := &vpcclassicv1.GetImageOptions{ + ID: &id, + } + image, response, err := imageC.GetImage(getimgoptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return image, isImageDeleted, nil + } + return image, "", fmt.Errorf("Error Getting Image: %s\n%s", err, response) + } + return image, isImageDeleting, err + } +} +func isWaitForImageDeleted(imageC *vpcv1.VpcV1, id string, timeout time.Duration) (interface{}, error) { + log.Printf("Waiting for image (%s) to be deleted.", id) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", isImageDeleting}, + Target: []string{"", isImageDeleted}, + Refresh: isImageDeleteRefreshFunc(imageC, id), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isImageDeleteRefreshFunc(imageC *vpcv1.VpcV1, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + log.Printf("[DEBUG] delete function here") + getimgoptions := &vpcv1.GetImageOptions{ + ID: &id, + } + image, response, err := imageC.GetImage(getimgoptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return image, isImageDeleted, nil + } + return image, "", fmt.Errorf("Error Getting Image: %s\n%s", err, response) + } + return image, isImageDeleting, err + } +} +func resourceIBMISImageExists(d *schema.ResourceData, meta interface{}) (bool, error) { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return false, err + } + id := d.Id() + + if userDetails.generation == 1 { + exists, err := classicImgExists(d, meta, id) + return exists, err + } else { + exists, err := imgExists(d, meta, id) + return exists, err + } +} + +func classicImgExists(d *schema.ResourceData, meta interface{}, id string) (bool, error) { + sess, err := classicVpcClient(meta) + if err != nil { + return false, err + } + options := &vpcclassicv1.GetImageOptions{ + ID: &id, + } + _, response, err := sess.GetImage(options) + if err != nil { + if response != nil && response.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error getting Image: %s\n%s", err, response) + } + return true, nil +} + +func imgExists(d *schema.ResourceData, meta interface{}, id string) (bool, error) { + sess, err := vpcClient(meta) + if err != nil { + return false, err + } + options := &vpcv1.GetImageOptions{ + ID: &id, + } + _, response, err := sess.GetImage(options) + if err != nil { + if response != nil && response.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error getting Image: %s\n%s", err, response) + } + return true, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_instance.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_instance.go new file mode 100644 index 00000000000..e6d1013351c --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_instance.go @@ -0,0 +1,2595 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "os" + "time" + + "github.com/IBM/vpc-go-sdk/vpcclassicv1" + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + isInstanceName = "name" + isInstanceKeys = "keys" + isInstanceTags = "tags" + isInstanceNetworkInterfaces = "network_interfaces" + isInstancePrimaryNetworkInterface = "primary_network_interface" + isInstanceNicName = "name" + isInstanceProfile = "profile" + isInstanceNicPortSpeed = "port_speed" + isInstanceNicAllowIPSpoofing = "allow_ip_spoofing" + isInstanceNicPrimaryIpv4Address = "primary_ipv4_address" + isInstanceNicPrimaryIpv6Address = "primary_ipv6_address" + isInstanceNicSecondaryAddress = "secondary_addresses" + isInstanceNicSecurityGroups = "security_groups" + isInstanceNicSubnet = "subnet" + isInstanceNicFloatingIPs = "floating_ips" + isInstanceUserData = "user_data" + isInstanceVolumes = "volumes" + isInstanceVPC = "vpc" + isInstanceZone = "zone" + isInstanceBootVolume = "boot_volume" + isInstanceVolAttName = "name" + isInstanceVolAttVolume = "volume" + isInstanceVolAttVolAutoDelete = "auto_delete_volume" + isInstanceVolAttVolCapacity = "capacity" + isInstanceVolAttVolIops = "iops" + isInstanceVolAttVolName = "name" + isInstanceVolAttVolBillingTerm = "billing_term" + isInstanceVolAttVolEncryptionKey = "encryption_key" + isInstanceVolAttVolType = "type" + isInstanceVolAttVolProfile = "profile" + isInstanceImage = "image" + isInstanceCPU = "vcpu" + isInstanceCPUArch = "architecture" + isInstanceCPUCores = "cores" + isInstanceCPUCount = "count" + isInstanceGpu = "gpu" + isInstanceGpuCores = "cores" + isInstanceGpuCount = "count" + isInstanceGpuManufacturer = "manufacturer" + isInstanceGpuMemory = "memory" + isInstanceGpuModel = "model" + isInstanceMemory = "memory" + isInstanceDisks = "disks" + isInstanceDedicatedHost = "dedicated_host" + isInstanceStatus = "status" + + isEnableCleanDelete = "wait_before_delete" + isInstanceProvisioning = "provisioning" + isInstanceProvisioningDone = "done" + isInstanceAvailable = "available" + isInstanceDeleting = "deleting" + isInstanceDeleteDone = "done" + isInstanceFailed = "failed" + + isInstanceActionStatusStopping = "stopping" + isInstanceActionStatusStopped = "stopped" + isInstanceStatusPending = "pending" + isInstanceStatusRunning = "running" + isInstanceStatusFailed = "failed" + + isInstanceBootName = "name" + isInstanceBootSize = "size" + isInstanceBootIOPS = "iops" + isInstanceBootEncryption = "encryption" + isInstanceBootProfile = "profile" + + isInstanceVolumeAttachments = "volume_attachments" + isInstanceVolumeAttaching = "attaching" + isInstanceVolumeAttached = "attached" + isInstanceVolumeDetaching = "detaching" + isInstanceResourceGroup = "resource_group" + + isPlacementTargetDedicatedHost = "dedicated_host" + isPlacementTargetDedicatedHostGroup = "dedicated_host_group" + isInstancePlacementTarget = "placement_target" +) + +func resourceIBMISInstance() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMisInstanceCreate, + Read: resourceIBMisInstanceRead, + Update: resourceIBMisInstanceUpdate, + Delete: resourceIBMisInstanceDelete, + Exists: resourceIBMisInstanceExists, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(30 * time.Minute), + Delete: schema.DefaultTimeout(30 * time.Minute), + Update: schema.DefaultTimeout(30 * time.Minute), + }, + + CustomizeDiff: customdiff.Sequence( + func(diff *schema.ResourceDiff, v interface{}) error { + return resourceTagsCustomizeDiff(diff) + }, + ), + + Schema: map[string]*schema.Schema{ + isInstanceName: { + Type: schema.TypeString, + Required: true, + ForceNew: false, + ValidateFunc: InvokeValidator("ibm_is_instance", isInstanceName), + Description: "Instance name", + }, + + isInstanceVPC: { + Type: schema.TypeString, + ForceNew: true, + Required: true, + Description: "VPC id", + }, + + isInstanceZone: { + Type: schema.TypeString, + ForceNew: true, + Required: true, + Description: "Zone name", + }, + + isInstanceProfile: { + Type: schema.TypeString, + ForceNew: false, + Required: true, + Description: "Profile info", + }, + + isInstanceKeys: { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + DiffSuppressFunc: applyOnce, + Description: "SSH key Ids for the instance", + }, + + isInstanceTags: { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString, ValidateFunc: InvokeValidator("ibm_is_instance", "tag")}, + Set: resourceIBMVPCHash, + Description: "list of tags for the instance", + }, + + isEnableCleanDelete: { + Type: schema.TypeBool, + Optional: true, + Default: true, + Description: "Enables stopping of instance before deleting and waits till deletion is complete", + }, + + isInstanceVolumeAttachments: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "volume_id": { + Type: schema.TypeString, + Computed: true, + }, + "volume_name": { + Type: schema.TypeString, + Computed: true, + }, + "volume_crn": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + + isInstancePrimaryNetworkInterface: { + Type: schema.TypeList, + MinItems: 1, + MaxItems: 1, + Required: true, + Description: "Primary Network interface info", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + isInstanceNicAllowIPSpoofing: { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Indicates whether IP spoofing is allowed on this interface.", + }, + isInstanceNicName: { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + isInstanceNicPortSpeed: { + Type: schema.TypeInt, + Optional: true, + DiffSuppressFunc: applyOnce, + Deprecated: "This field is deprected", + }, + isInstanceNicPrimaryIpv4Address: { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Computed: true, + }, + isInstanceNicSecurityGroups: { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + isInstanceNicSubnet: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + }, + }, + + isInstanceNetworkInterfaces: { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + isInstanceNicAllowIPSpoofing: { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Indicates whether IP spoofing is allowed on this interface.", + }, + isInstanceNicName: { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + isInstanceNicPrimaryIpv4Address: { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Computed: true, + }, + isInstanceNicSecurityGroups: { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + isInstanceNicSubnet: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + }, + }, + + isInstanceUserData: { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Description: "User data given for the instance", + }, + + isInstanceImage: { + Type: schema.TypeString, + ForceNew: true, + Required: true, + Description: "image name", + }, + + isInstanceBootVolume: { + Type: schema.TypeList, + DiffSuppressFunc: applyOnce, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + isInstanceBootName: { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + isInstanceBootEncryption: { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + isInstanceBootSize: { + Type: schema.TypeInt, + Computed: true, + }, + isInstanceBootIOPS: { + Type: schema.TypeInt, + Computed: true, + }, + isInstanceBootProfile: { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + + isInstanceVolumes: { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: "List of volumes", + }, + + isInstanceVolAttVolAutoDelete: { + Type: schema.TypeBool, + Optional: true, + Description: "Auto delete volume along with instance", + }, + + isInstanceResourceGroup: { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Computed: true, + Description: "Instance resource group", + }, + + isPlacementTargetDedicatedHost: { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ConflictsWith: []string{isPlacementTargetDedicatedHostGroup}, + Description: "Unique Identifier of the Dedicated Host where the instance will be placed", + }, + + isPlacementTargetDedicatedHostGroup: { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ConflictsWith: []string{isPlacementTargetDedicatedHost}, + Description: "Unique Identifier of the Dedicated Host Group where the instance will be placed", + }, + + isInstanceCPU: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + isInstanceCPUArch: { + Type: schema.TypeString, + Computed: true, + }, + isInstanceCPUCount: { + Type: schema.TypeInt, + Computed: true, + }, + }, + }, + }, + + isInstanceGpu: { + Type: schema.TypeList, + Computed: true, + Deprecated: "This field is deprecated", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + isInstanceGpuCores: { + Type: schema.TypeInt, + Computed: true, + }, + isInstanceGpuCount: { + Type: schema.TypeInt, + Computed: true, + }, + isInstanceGpuMemory: { + Type: schema.TypeInt, + Computed: true, + }, + isInstanceGpuManufacturer: { + Type: schema.TypeString, + Computed: true, + }, + isInstanceGpuModel: { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + + isInstanceMemory: { + Type: schema.TypeInt, + Computed: true, + Description: "Instance memory", + }, + + isInstanceStatus: { + Type: schema.TypeString, + Computed: true, + Description: "instance status", + }, + + ResourceControllerURL: { + Type: schema.TypeString, + Computed: true, + Description: "The URL of the IBM Cloud dashboard that can be used to explore and view details about this instance", + }, + + ResourceName: { + Type: schema.TypeString, + Computed: true, + Description: "The name of the resource", + }, + + ResourceCRN: { + Type: schema.TypeString, + Computed: true, + Description: "The crn of the resource", + }, + + ResourceStatus: { + Type: schema.TypeString, + Computed: true, + Description: "The status of the resource", + }, + + ResourceGroupName: { + Type: schema.TypeString, + Computed: true, + Description: "The resource group name in which resource is provisioned", + }, + + "force_recovery_time": { + Description: "Define timeout to force the instances to start/stop in minutes.", + Type: schema.TypeInt, + Optional: true, + }, + isInstanceDisks: &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Collection of the instance's disks.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "created_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The date and time that the disk was created.", + }, + "href": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The URL for this instance disk.", + }, + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this instance disk.", + }, + "interface_type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The disk interface used for attaching the disk.The enumerated values for this property are expected to expand in the future. When processing this property, check for and log unknown values. Optionally halt processing and surface the error, or bypass the resource on which the unexpected property value was encountered.", + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The user-defined name for this disk.", + }, + "resource_type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The resource type.", + }, + "size": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The size of the disk in GB (gigabytes).", + }, + }, + }, + }, + }, + } +} + +func resourceIBMISInstanceValidator() *ResourceValidator { + + validateSchema := make([]ValidateSchema, 1) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isInstanceName, + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Required: true, + Regexp: `^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$`, + MinValueLength: 1, + MaxValueLength: 63}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: "tag", + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Optional: true, + Regexp: `^[A-Za-z0-9:_ .-]+$`, + MinValueLength: 1, + MaxValueLength: 128}) + + ibmISInstanceValidator := ResourceValidator{ResourceName: "ibm_is_instance", Schema: validateSchema} + return &ibmISInstanceValidator +} + +func classicInstanceCreate(d *schema.ResourceData, meta interface{}, profile, name, vpcID, zone, image string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + instanceproto := &vpcclassicv1.InstancePrototype{ + Image: &vpcclassicv1.ImageIdentity{ + ID: &image, + }, + Zone: &vpcclassicv1.ZoneIdentity{ + Name: &zone, + }, + Profile: &vpcclassicv1.InstanceProfileIdentity{ + Name: &profile, + }, + Name: &name, + VPC: &vpcclassicv1.VPCIdentity{ + ID: &vpcID, + }, + } + + if boot, ok := d.GetOk(isInstanceBootVolume); ok { + bootvol := boot.([]interface{})[0].(map[string]interface{}) + var volTemplate = &vpcclassicv1.VolumePrototypeInstanceByImageContext{} + name, ok := bootvol[isInstanceBootName] + namestr := name.(string) + if ok { + volTemplate.Name = &namestr + } + enc, ok := bootvol[isInstanceBootEncryption] + encstr := enc.(string) + if ok && encstr != "" { + volTemplate.EncryptionKey = &vpcclassicv1.EncryptionKeyIdentity{ + CRN: &encstr, + } + } + volcap := 100 + volcapint64 := int64(volcap) + volprof := "general-purpose" + volTemplate.Capacity = &volcapint64 + volTemplate.Profile = &vpcclassicv1.VolumeProfileIdentity{ + Name: &volprof, + } + + deletebool := true + instanceproto.BootVolumeAttachment = &vpcclassicv1.VolumeAttachmentPrototypeInstanceByImageContext{ + DeleteVolumeOnInstanceDelete: &deletebool, + Volume: volTemplate, + } + } + + if primnicintf, ok := d.GetOk(isInstancePrimaryNetworkInterface); ok { + primnic := primnicintf.([]interface{})[0].(map[string]interface{}) + subnetintf, _ := primnic[isInstanceNicSubnet] + subnetintfstr := subnetintf.(string) + var primnicobj = &vpcclassicv1.NetworkInterfacePrototype{} + primnicobj.Subnet = &vpcclassicv1.SubnetIdentity{ + ID: &subnetintfstr, + } + name, ok := primnic[isInstanceNicName] + namestr := name.(string) + if ok { + primnicobj.Name = &namestr + } + ipv4, _ := primnic[isInstanceNicPrimaryIpv4Address] + ipv4str := ipv4.(string) + if ipv4str != "" { + primnicobj.PrimaryIpv4Address = &ipv4str + } + secgrpintf, ok := primnic[isInstanceNicSecurityGroups] + if ok { + secgrpSet := secgrpintf.(*schema.Set) + if secgrpSet.Len() != 0 { + var secgrpobjs = make([]vpcclassicv1.SecurityGroupIdentityIntf, secgrpSet.Len()) + for i, secgrpIntf := range secgrpSet.List() { + secgrpIntfstr := secgrpIntf.(string) + secgrpobjs[i] = &vpcclassicv1.SecurityGroupIdentity{ + ID: &secgrpIntfstr, + } + } + primnicobj.SecurityGroups = secgrpobjs + } + } + + instanceproto.PrimaryNetworkInterface = primnicobj + } + + if nicsintf, ok := d.GetOk(isInstanceNetworkInterfaces); ok { + nics := nicsintf.([]interface{}) + var intfs []vpcclassicv1.NetworkInterfacePrototype + for _, resource := range nics { + nic := resource.(map[string]interface{}) + nwInterface := &vpcclassicv1.NetworkInterfacePrototype{} + subnetintf, _ := nic[isInstanceNicSubnet] + subnetintfstr := subnetintf.(string) + nwInterface.Subnet = &vpcclassicv1.SubnetIdentity{ + ID: &subnetintfstr, + } + name, ok := nic[isInstanceNicName] + namestr := name.(string) + if ok && namestr != "" { + nwInterface.Name = &namestr + } + ipv4, _ := nic[isInstanceNicPrimaryIpv4Address] + ipv4str := ipv4.(string) + if ipv4str != "" { + nwInterface.PrimaryIpv4Address = &ipv4str + } + secgrpintf, ok := nic[isInstanceNicSecurityGroups] + if ok { + secgrpSet := secgrpintf.(*schema.Set) + if secgrpSet.Len() != 0 { + var secgrpobjs = make([]vpcclassicv1.SecurityGroupIdentityIntf, secgrpSet.Len()) + for i, secgrpIntf := range secgrpSet.List() { + secgrpIntfstr := secgrpIntf.(string) + secgrpobjs[i] = &vpcclassicv1.SecurityGroupIdentity{ + ID: &secgrpIntfstr, + } + } + nwInterface.SecurityGroups = secgrpobjs + } + } + intfs = append(intfs, *nwInterface) + } + instanceproto.NetworkInterfaces = intfs + } + + keySet := d.Get(isInstanceKeys).(*schema.Set) + if keySet.Len() != 0 { + keyobjs := make([]vpcclassicv1.KeyIdentityIntf, keySet.Len()) + for i, key := range keySet.List() { + keystr := key.(string) + keyobjs[i] = &vpcclassicv1.KeyIdentity{ + ID: &keystr, + } + } + instanceproto.Keys = keyobjs + } + + if userdata, ok := d.GetOk(isInstanceUserData); ok { + userdatastr := userdata.(string) + instanceproto.UserData = &userdatastr + } + + if grp, ok := d.GetOk(isInstanceResourceGroup); ok { + grpstr := grp.(string) + instanceproto.ResourceGroup = &vpcclassicv1.ResourceGroupIdentity{ + ID: &grpstr, + } + + } + + options := &vpcclassicv1.CreateInstanceOptions{ + InstancePrototype: instanceproto, + } + instance, response, err := sess.CreateInstance(options) + if err != nil { + log.Printf("[DEBUG] Instance err %s\n%s", err, response) + return err + } + d.SetId(*instance.ID) + + log.Printf("[INFO] Instance : %s", *instance.ID) + d.Set(isInstanceStatus, instance.Status) + + _, err = isWaitForClassicInstanceAvailable(sess, d.Id(), d.Timeout(schema.TimeoutCreate), d) + if err != nil { + return err + } + + v := os.Getenv("IC_ENV_TAGS") + if _, ok := d.GetOk(isInstanceTags); ok || v != "" { + oldList, newList := d.GetChange(isInstanceTags) + err = UpdateTagsUsingCRN(oldList, newList, meta, *instance.CRN) + if err != nil { + log.Printf( + "Error on create of resource vpc instance (%s) tags: %s", d.Id(), err) + } + } + return nil +} + +func instanceCreate(d *schema.ResourceData, meta interface{}, profile, name, vpcID, zone, image string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + instanceproto := &vpcv1.InstancePrototype{ + Image: &vpcv1.ImageIdentity{ + ID: &image, + }, + Zone: &vpcv1.ZoneIdentity{ + Name: &zone, + }, + Profile: &vpcv1.InstanceProfileIdentity{ + Name: &profile, + }, + Name: &name, + VPC: &vpcv1.VPCIdentity{ + ID: &vpcID, + }, + } + + if dHostIdInf, ok := d.GetOk(isPlacementTargetDedicatedHost); ok { + dHostIdStr := dHostIdInf.(string) + dHostPlaementTarget := &vpcv1.InstancePlacementTargetPrototypeDedicatedHostIdentity{ + ID: &dHostIdStr, + } + instanceproto.PlacementTarget = dHostPlaementTarget + } + + if dHostGrpIdInf, ok := d.GetOk(isPlacementTargetDedicatedHostGroup); ok { + dHostGrpIdStr := dHostGrpIdInf.(string) + dHostGrpPlaementTarget := &vpcv1.InstancePlacementTargetPrototypeDedicatedHostGroupIdentity{ + ID: &dHostGrpIdStr, + } + instanceproto.PlacementTarget = dHostGrpPlaementTarget + } + + if boot, ok := d.GetOk(isInstanceBootVolume); ok { + bootvol := boot.([]interface{})[0].(map[string]interface{}) + var volTemplate = &vpcv1.VolumePrototypeInstanceByImageContext{} + name, ok := bootvol[isInstanceBootName] + namestr := name.(string) + if ok { + volTemplate.Name = &namestr + } + enc, ok := bootvol[isInstanceBootEncryption] + encstr := enc.(string) + if ok && encstr != "" { + volTemplate.EncryptionKey = &vpcv1.EncryptionKeyIdentity{ + CRN: &encstr, + } + } + volcap := 100 + volcapint64 := int64(volcap) + volprof := "general-purpose" + volTemplate.Capacity = &volcapint64 + volTemplate.Profile = &vpcv1.VolumeProfileIdentity{ + Name: &volprof, + } + deletebool := true + instanceproto.BootVolumeAttachment = &vpcv1.VolumeAttachmentPrototypeInstanceByImageContext{ + DeleteVolumeOnInstanceDelete: &deletebool, + Volume: volTemplate, + } + } + + if primnicintf, ok := d.GetOk(isInstancePrimaryNetworkInterface); ok { + primnic := primnicintf.([]interface{})[0].(map[string]interface{}) + subnetintf, _ := primnic[isInstanceNicSubnet] + subnetintfstr := subnetintf.(string) + var primnicobj = &vpcv1.NetworkInterfacePrototype{} + primnicobj.Subnet = &vpcv1.SubnetIdentity{ + ID: &subnetintfstr, + } + name, _ := primnic[isInstanceNicName] + namestr := name.(string) + if namestr != "" { + primnicobj.Name = &namestr + } + ipv4, _ := primnic[isInstanceNicPrimaryIpv4Address] + ipv4str := ipv4.(string) + if ipv4str != "" { + primnicobj.PrimaryIpv4Address = &ipv4str + } + allowIPSpoofing, ok := primnic[isInstanceNicAllowIPSpoofing] + allowIPSpoofingbool := allowIPSpoofing.(bool) + if ok { + primnicobj.AllowIPSpoofing = &allowIPSpoofingbool + } + secgrpintf, ok := primnic[isInstanceNicSecurityGroups] + if ok { + secgrpSet := secgrpintf.(*schema.Set) + if secgrpSet.Len() != 0 { + var secgrpobjs = make([]vpcv1.SecurityGroupIdentityIntf, secgrpSet.Len()) + for i, secgrpIntf := range secgrpSet.List() { + secgrpIntfstr := secgrpIntf.(string) + secgrpobjs[i] = &vpcv1.SecurityGroupIdentity{ + ID: &secgrpIntfstr, + } + } + primnicobj.SecurityGroups = secgrpobjs + } + } + instanceproto.PrimaryNetworkInterface = primnicobj + } + + if nicsintf, ok := d.GetOk(isInstanceNetworkInterfaces); ok { + nics := nicsintf.([]interface{}) + var intfs []vpcv1.NetworkInterfacePrototype + for _, resource := range nics { + nic := resource.(map[string]interface{}) + nwInterface := &vpcv1.NetworkInterfacePrototype{} + subnetintf, _ := nic[isInstanceNicSubnet] + subnetintfstr := subnetintf.(string) + nwInterface.Subnet = &vpcv1.SubnetIdentity{ + ID: &subnetintfstr, + } + name, ok := nic[isInstanceNicName] + namestr := name.(string) + if ok && namestr != "" { + nwInterface.Name = &namestr + } + ipv4, _ := nic[isInstanceNicPrimaryIpv4Address] + ipv4str := ipv4.(string) + if ipv4str != "" { + nwInterface.PrimaryIpv4Address = &ipv4str + } + allowIPSpoofing, ok := nic[isInstanceNicAllowIPSpoofing] + allowIPSpoofingbool := allowIPSpoofing.(bool) + if ok { + nwInterface.AllowIPSpoofing = &allowIPSpoofingbool + } + secgrpintf, ok := nic[isInstanceNicSecurityGroups] + if ok { + secgrpSet := secgrpintf.(*schema.Set) + if secgrpSet.Len() != 0 { + var secgrpobjs = make([]vpcv1.SecurityGroupIdentityIntf, secgrpSet.Len()) + for i, secgrpIntf := range secgrpSet.List() { + secgrpIntfstr := secgrpIntf.(string) + secgrpobjs[i] = &vpcv1.SecurityGroupIdentity{ + ID: &secgrpIntfstr, + } + } + nwInterface.SecurityGroups = secgrpobjs + } + } + intfs = append(intfs, *nwInterface) + } + instanceproto.NetworkInterfaces = intfs + } + + keySet := d.Get(isInstanceKeys).(*schema.Set) + if keySet.Len() != 0 { + keyobjs := make([]vpcv1.KeyIdentityIntf, keySet.Len()) + for i, key := range keySet.List() { + keystr := key.(string) + keyobjs[i] = &vpcv1.KeyIdentity{ + ID: &keystr, + } + } + instanceproto.Keys = keyobjs + } + + if userdata, ok := d.GetOk(isInstanceUserData); ok { + userdatastr := userdata.(string) + instanceproto.UserData = &userdatastr + } + + if grp, ok := d.GetOk(isInstanceResourceGroup); ok { + grpstr := grp.(string) + instanceproto.ResourceGroup = &vpcv1.ResourceGroupIdentity{ + ID: &grpstr, + } + + } + + options := &vpcv1.CreateInstanceOptions{ + InstancePrototype: instanceproto, + } + + instance, response, err := sess.CreateInstance(options) + if err != nil { + log.Printf("[DEBUG] Instance err %s\n%s", err, response) + return err + } + d.SetId(*instance.ID) + + log.Printf("[INFO] Instance : %s", *instance.ID) + d.Set(isInstanceStatus, instance.Status) + + _, err = isWaitForInstanceAvailable(sess, d.Id(), d.Timeout(schema.TimeoutCreate), d) + if err != nil { + return err + } + + v := os.Getenv("IC_ENV_TAGS") + if _, ok := d.GetOk(isInstanceTags); ok || v != "" { + oldList, newList := d.GetChange(isInstanceTags) + err = UpdateTagsUsingCRN(oldList, newList, meta, *instance.CRN) + if err != nil { + log.Printf( + "Error on create of resource vpc instance (%s) tags: %s", d.Id(), err) + } + } + return nil +} + +func resourceIBMisInstanceCreate(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + + profile := d.Get(isInstanceProfile).(string) + name := d.Get(isInstanceName).(string) + vpcID := d.Get(isInstanceVPC).(string) + zone := d.Get(isInstanceZone).(string) + image := d.Get(isInstanceImage).(string) + + if userDetails.generation == 1 { + err := classicInstanceCreate(d, meta, profile, name, vpcID, zone, image) + if err != nil { + return err + } + } else { + err := instanceCreate(d, meta, profile, name, vpcID, zone, image) + if err != nil { + return err + } + } + + return resourceIBMisInstanceUpdate(d, meta) +} + +func isWaitForClassicInstanceAvailable(instanceC *vpcclassicv1.VpcClassicV1, id string, timeout time.Duration, d *schema.ResourceData) (interface{}, error) { + log.Printf("Waiting for instance (%s) to be available.", id) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", isInstanceProvisioning}, + Target: []string{isInstanceStatusRunning, "available", "failed", ""}, + Refresh: isClassicInstanceRefreshFunc(instanceC, id, d), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isWaitForInstanceAvailable(instanceC *vpcv1.VpcV1, id string, timeout time.Duration, d *schema.ResourceData) (interface{}, error) { + log.Printf("Waiting for instance (%s) to be available.", id) + + communicator := make(chan interface{}) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", isInstanceProvisioning}, + Target: []string{isInstanceStatusRunning, "available", "failed", ""}, + Refresh: isInstanceRefreshFunc(instanceC, id, d, communicator), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + if v, ok := d.GetOk("force_recovery_time"); ok { + forceTimeout := v.(int) + go isRestartStartAction(instanceC, id, d, forceTimeout, communicator) + } + return stateConf.WaitForState() +} + +func isClassicInstanceRefreshFunc(instanceC *vpcclassicv1.VpcClassicV1, id string, d *schema.ResourceData) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + getinsOptions := &vpcclassicv1.GetInstanceOptions{ + ID: &id, + } + instance, response, err := instanceC.GetInstance(getinsOptions) + if err != nil { + return nil, "", fmt.Errorf("Error Getting instance: %s\n%s", err, response) + } + + d.Set(isInstanceStatus, *instance.Status) + + if *instance.Status == "available" || *instance.Status == "failed" || *instance.Status == "running" { + return instance, *instance.Status, nil + } + + return instance, isInstanceProvisioning, nil + } +} + +func isInstanceRefreshFunc(instanceC *vpcv1.VpcV1, id string, d *schema.ResourceData, communicator chan interface{}) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + getinsOptions := &vpcv1.GetInstanceOptions{ + ID: &id, + } + instance, response, err := instanceC.GetInstance(getinsOptions) + if err != nil { + return nil, "", fmt.Errorf("Error Getting Instance: %s\n%s", err, response) + } + d.Set(isInstanceStatus, *instance.Status) + + select { + case data := <-communicator: + return nil, "", data.(error) + default: + fmt.Println("no message sent") + } + + if *instance.Status == "available" || *instance.Status == "failed" || *instance.Status == "running" { + // let know the isRestartStartAction() to stop + close(communicator) + return instance, *instance.Status, nil + + } + return instance, isInstanceProvisioning, nil + } +} + +func isRestartStartAction(instanceC *vpcv1.VpcV1, id string, d *schema.ResourceData, forceTimeout int, communicator chan interface{}) { + subticker := time.NewTicker(time.Duration(forceTimeout) * time.Minute) + //subticker := time.NewTicker(time.Duration(forceTimeout) * time.Second) + for { + select { + + case <-subticker.C: + log.Println("Instance is still in starting state, force retry by restarting the instance.") + actiontype := "stop" + createinsactoptions := &vpcv1.CreateInstanceActionOptions{ + InstanceID: &id, + Type: &actiontype, + } + _, response, err := instanceC.CreateInstanceAction(createinsactoptions) + if err != nil { + communicator <- fmt.Errorf("Error retrying instance action start: %s\n%s", err, response) + return + } + waitTimeout := time.Duration(1) * time.Minute + _, _ = isWaitForInstanceActionStop(instanceC, waitTimeout, id, d) + actiontype = "start" + createinsactoptions = &vpcv1.CreateInstanceActionOptions{ + InstanceID: &id, + Type: &actiontype, + } + _, response, err = instanceC.CreateInstanceAction(createinsactoptions) + if err != nil { + communicator <- fmt.Errorf("Error retrying instance action start: %s\n%s", err, response) + return + } + case <-communicator: + // indicates refresh func is reached target and not proceed with the thread + subticker.Stop() + return + + } + } +} +func resourceIBMisInstanceRead(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + ID := d.Id() + if userDetails.generation == 1 { + err := classicInstanceGet(d, meta, ID) + if err != nil { + return err + } + } else { + err := instanceGet(d, meta, ID) + if err != nil { + return err + } + } + return nil +} + +func classicInstanceGet(d *schema.ResourceData, meta interface{}, id string) error { + instanceC, err := classicVpcClient(meta) + if err != nil { + return err + } + getinsOptions := &vpcclassicv1.GetInstanceOptions{ + ID: &id, + } + instance, response, err := instanceC.GetInstance(getinsOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Getting Instance: %s\n%s", err, response) + } + d.Set(isInstanceName, *instance.Name) + if instance.Profile != nil { + d.Set(isInstanceProfile, *instance.Profile.Name) + } + cpuList := make([]map[string]interface{}, 0) + if instance.Vcpu != nil { + currentCPU := map[string]interface{}{} + currentCPU[isInstanceCPUArch] = *instance.Vcpu.Architecture + currentCPU[isInstanceCPUCount] = *instance.Vcpu.Count + cpuList = append(cpuList, currentCPU) + } + d.Set(isInstanceCPU, cpuList) + + d.Set(isInstanceMemory, *instance.Memory) + gpuList := make([]map[string]interface{}, 0) + // if instance.Gpu != nil { + // currentGpu := map[string]interface{}{} + // currentGpu[isInstanceGpuManufacturer] = instance.Gpu.Manufacturer + // currentGpu[isInstanceGpuModel] = instance.Gpu.Model + // currentGpu[isInstanceGpuCores] = instance.Gpu.Cores + // currentGpu[isInstanceGpuCount] = instance.Gpu.Count + // currentGpu[isInstanceGpuMemory] = instance.Gpu.Memory + // gpuList = append(gpuList, currentGpu) + + // } + d.Set(isInstanceGpu, gpuList) + + if instance.PrimaryNetworkInterface != nil { + primaryNicList := make([]map[string]interface{}, 0) + currentPrimNic := map[string]interface{}{} + currentPrimNic["id"] = *instance.PrimaryNetworkInterface.ID + currentPrimNic[isInstanceNicName] = *instance.PrimaryNetworkInterface.Name + currentPrimNic[isInstanceNicPrimaryIpv4Address] = *instance.PrimaryNetworkInterface.PrimaryIpv4Address + getnicoptions := &vpcclassicv1.GetInstanceNetworkInterfaceOptions{ + InstanceID: &id, + ID: instance.PrimaryNetworkInterface.ID, + } + insnic, response, err := instanceC.GetInstanceNetworkInterface(getnicoptions) + if err != nil { + return fmt.Errorf("Error getting network interfaces attached to the instance %s\n%s", err, response) + } + currentPrimNic[isInstanceNicSubnet] = *insnic.Subnet.ID + if len(insnic.SecurityGroups) != 0 { + secgrpList := []string{} + for i := 0; i < len(insnic.SecurityGroups); i++ { + secgrpList = append(secgrpList, string(*(insnic.SecurityGroups[i].ID))) + } + currentPrimNic[isInstanceNicSecurityGroups] = newStringSet(schema.HashString, secgrpList) + } + + primaryNicList = append(primaryNicList, currentPrimNic) + d.Set(isInstancePrimaryNetworkInterface, primaryNicList) + } + + if instance.NetworkInterfaces != nil { + interfacesList := make([]map[string]interface{}, 0) + for _, intfc := range instance.NetworkInterfaces { + if *intfc.ID != *instance.PrimaryNetworkInterface.ID { + currentNic := map[string]interface{}{} + currentNic["id"] = *intfc.ID + currentNic[isInstanceNicName] = *intfc.Name + currentNic[isInstanceNicPrimaryIpv4Address] = *intfc.PrimaryIpv4Address + getnicoptions := &vpcclassicv1.GetInstanceNetworkInterfaceOptions{ + InstanceID: &id, + ID: intfc.ID, + } + insnic, response, err := instanceC.GetInstanceNetworkInterface(getnicoptions) + if err != nil { + return fmt.Errorf("Error getting network interfaces attached to the instance %s\n%s", err, response) + } + currentNic[isInstanceNicSubnet] = *insnic.Subnet.ID + if len(insnic.SecurityGroups) != 0 { + secgrpList := []string{} + for i := 0; i < len(insnic.SecurityGroups); i++ { + secgrpList = append(secgrpList, string(*(insnic.SecurityGroups[i].ID))) + } + currentNic[isInstanceNicSecurityGroups] = newStringSet(schema.HashString, secgrpList) + } + interfacesList = append(interfacesList, currentNic) + + } + } + + d.Set(isInstanceNetworkInterfaces, interfacesList) + } + + if instance.Image != nil { + d.Set(isInstanceImage, *instance.Image.ID) + } + + d.Set(isInstanceStatus, *instance.Status) + d.Set(isInstanceVPC, *instance.VPC.ID) + d.Set(isInstanceZone, *instance.Zone.Name) + + var volumes []string + volumes = make([]string, 0) + if instance.VolumeAttachments != nil { + for _, volume := range instance.VolumeAttachments { + if volume.Volume != nil && *volume.Volume.ID != *instance.BootVolumeAttachment.Volume.ID { + volumes = append(volumes, *volume.Volume.ID) + } + } + } + d.Set(isInstanceVolumes, newStringSet(schema.HashString, volumes)) + if instance.VolumeAttachments != nil { + volList := make([]map[string]interface{}, 0) + for _, volume := range instance.VolumeAttachments { + vol := map[string]interface{}{} + if volume.Volume != nil { + vol["id"] = *volume.ID + vol["volume_id"] = *volume.Volume.ID + vol["name"] = *volume.Name + vol["volume_name"] = *volume.Volume.Name + vol["volume_crn"] = *volume.Volume.CRN + volList = append(volList, vol) + } + } + d.Set(isInstanceVolumeAttachments, volList) + } + if instance.BootVolumeAttachment != nil { + bootVolList := make([]map[string]interface{}, 0) + bootVol := map[string]interface{}{} + bootVol[isInstanceBootName] = *instance.BootVolumeAttachment.Name + // getvolattoptions := &vpcclassicv1.GetVolumeAttachmentOptions{ + // InstanceID: &ID, + // ID: instance.BootVolumeAttachment.Volume.ID, + // } + // vol, _, err := instanceC.GetVolumeAttachment(getvolattoptions) + // if err != nil { + // return fmt.Errorf("Error while retrieving boot volume %s for instance %s: %v", getvolattoptions.ID, d.Id(), err) + // } + if instance.BootVolumeAttachment.Volume.CRN != nil { + bootVol[isInstanceBootEncryption] = *instance.BootVolumeAttachment.Volume.CRN + } + // bootVol[isInstanceBootSize] = instance.BootVolumeAttachment.Capacity + // bootVol[isInstanceBootIOPS] = instance.BootVolumeAttachment.Iops + // bootVol[isInstanceBootProfile] = instance.BootVolumeAttachment.Name + bootVolList = append(bootVolList, bootVol) + + d.Set(isInstanceBootVolume, bootVolList) + } + tags, err := GetTagsUsingCRN(meta, *instance.CRN) + if err != nil { + log.Printf( + "Error on get of resource vpc Instance (%s) tags: %s", d.Id(), err) + } + d.Set(isInstanceTags, tags) + + controller, err := getBaseController(meta) + if err != nil { + return err + } + d.Set(ResourceControllerURL, controller+"/vpc/compute/vs") + d.Set(ResourceName, instance.Name) + d.Set(ResourceCRN, instance.CRN) + d.Set(ResourceStatus, instance.Status) + if instance.ResourceGroup != nil { + d.Set(isInstanceResourceGroup, instance.ResourceGroup.ID) + d.Set(ResourceGroupName, instance.ResourceGroup.ID) + } + return nil +} + +func instanceGet(d *schema.ResourceData, meta interface{}, id string) error { + instanceC, err := vpcClient(meta) + if err != nil { + return err + } + getinsOptions := &vpcv1.GetInstanceOptions{ + ID: &id, + } + instance, response, err := instanceC.GetInstance(getinsOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Getting Instance: %s\n%s", err, response) + } + d.Set(isInstanceName, *instance.Name) + if instance.Profile != nil { + d.Set(isInstanceProfile, *instance.Profile.Name) + } + cpuList := make([]map[string]interface{}, 0) + if instance.Vcpu != nil { + currentCPU := map[string]interface{}{} + currentCPU[isInstanceCPUArch] = *instance.Vcpu.Architecture + currentCPU[isInstanceCPUCount] = *instance.Vcpu.Count + cpuList = append(cpuList, currentCPU) + } + d.Set(isInstanceCPU, cpuList) + + d.Set(isInstanceMemory, *instance.Memory) + gpuList := make([]map[string]interface{}, 0) + // if instance.Gpu != nil { + // currentGpu := map[string]interface{}{} + // currentGpu[isInstanceGpuManufacturer] = instance.Gpu.Manufacturer + // currentGpu[isInstanceGpuModel] = instance.Gpu.Model + // currentGpu[isInstanceGpuCores] = instance.Gpu.Cores + // currentGpu[isInstanceGpuCount] = instance.Gpu.Count + // currentGpu[isInstanceGpuMemory] = instance.Gpu.Memory + // gpuList = append(gpuList, currentGpu) + + // } + d.Set(isInstanceGpu, gpuList) + + if instance.PrimaryNetworkInterface != nil { + primaryNicList := make([]map[string]interface{}, 0) + currentPrimNic := map[string]interface{}{} + currentPrimNic["id"] = *instance.PrimaryNetworkInterface.ID + currentPrimNic[isInstanceNicName] = *instance.PrimaryNetworkInterface.Name + currentPrimNic[isInstanceNicPrimaryIpv4Address] = *instance.PrimaryNetworkInterface.PrimaryIpv4Address + getnicoptions := &vpcv1.GetInstanceNetworkInterfaceOptions{ + InstanceID: &id, + ID: instance.PrimaryNetworkInterface.ID, + } + insnic, response, err := instanceC.GetInstanceNetworkInterface(getnicoptions) + if err != nil { + return fmt.Errorf("Error getting network interfaces attached to the instance %s\n%s", err, response) + } + currentPrimNic[isInstanceNicAllowIPSpoofing] = *insnic.AllowIPSpoofing + currentPrimNic[isInstanceNicSubnet] = *insnic.Subnet.ID + if len(insnic.SecurityGroups) != 0 { + secgrpList := []string{} + for i := 0; i < len(insnic.SecurityGroups); i++ { + secgrpList = append(secgrpList, string(*(insnic.SecurityGroups[i].ID))) + } + currentPrimNic[isInstanceNicSecurityGroups] = newStringSet(schema.HashString, secgrpList) + } + + primaryNicList = append(primaryNicList, currentPrimNic) + d.Set(isInstancePrimaryNetworkInterface, primaryNicList) + } + + if instance.NetworkInterfaces != nil { + interfacesList := make([]map[string]interface{}, 0) + for _, intfc := range instance.NetworkInterfaces { + if *intfc.ID != *instance.PrimaryNetworkInterface.ID { + currentNic := map[string]interface{}{} + currentNic["id"] = *intfc.ID + currentNic[isInstanceNicName] = *intfc.Name + currentNic[isInstanceNicPrimaryIpv4Address] = *intfc.PrimaryIpv4Address + getnicoptions := &vpcv1.GetInstanceNetworkInterfaceOptions{ + InstanceID: &id, + ID: intfc.ID, + } + insnic, response, err := instanceC.GetInstanceNetworkInterface(getnicoptions) + if err != nil { + return fmt.Errorf("Error getting network interfaces attached to the instance %s\n%s", err, response) + } + currentNic[isInstanceNicAllowIPSpoofing] = *insnic.AllowIPSpoofing + currentNic[isInstanceNicSubnet] = *insnic.Subnet.ID + if len(insnic.SecurityGroups) != 0 { + secgrpList := []string{} + for i := 0; i < len(insnic.SecurityGroups); i++ { + secgrpList = append(secgrpList, string(*(insnic.SecurityGroups[i].ID))) + } + currentNic[isInstanceNicSecurityGroups] = newStringSet(schema.HashString, secgrpList) + } + interfacesList = append(interfacesList, currentNic) + + } + } + + d.Set(isInstanceNetworkInterfaces, interfacesList) + } + + if instance.Image != nil { + d.Set(isInstanceImage, *instance.Image.ID) + } + + d.Set(isInstanceStatus, *instance.Status) + d.Set(isInstanceVPC, *instance.VPC.ID) + d.Set(isInstanceZone, *instance.Zone.Name) + + var volumes []string + volumes = make([]string, 0) + if instance.VolumeAttachments != nil { + for _, volume := range instance.VolumeAttachments { + if volume.Volume != nil && *volume.Volume.ID != *instance.BootVolumeAttachment.Volume.ID { + volumes = append(volumes, *volume.Volume.ID) + } + } + } + d.Set(isInstanceVolumes, newStringSet(schema.HashString, volumes)) + if instance.VolumeAttachments != nil { + volList := make([]map[string]interface{}, 0) + for _, volume := range instance.VolumeAttachments { + vol := map[string]interface{}{} + if volume.Volume != nil { + vol["id"] = *volume.ID + vol["volume_id"] = *volume.Volume.ID + vol["name"] = *volume.Name + vol["volume_name"] = *volume.Volume.Name + vol["volume_crn"] = *volume.Volume.CRN + volList = append(volList, vol) + } + } + d.Set(isInstanceVolumeAttachments, volList) + } + if instance.BootVolumeAttachment != nil { + bootVolList := make([]map[string]interface{}, 0) + bootVol := map[string]interface{}{} + if instance.BootVolumeAttachment.Volume != nil { + bootVol[isInstanceBootName] = *instance.BootVolumeAttachment.Volume.Name + options := &vpcv1.GetVolumeOptions{ + ID: instance.BootVolumeAttachment.Volume.ID, + } + vol, response, err := instanceC.GetVolume(options) + if err != nil { + log.Printf("Error Getting Boot Volume (%s): %s\n%s", id, err, response) + } + if vol != nil { + bootVol[isInstanceBootSize] = *vol.Capacity + bootVol[isInstanceBootIOPS] = *vol.Iops + bootVol[isInstanceBootProfile] = *vol.Profile.Name + if vol.EncryptionKey != nil { + bootVol[isInstanceBootEncryption] = *vol.EncryptionKey.CRN + } + } + } + bootVolList = append(bootVolList, bootVol) + d.Set(isInstanceBootVolume, bootVolList) + } + tags, err := GetTagsUsingCRN(meta, *instance.CRN) + if err != nil { + log.Printf( + "Error on get of resource vpc Instance (%s) tags: %s", d.Id(), err) + } + d.Set(isInstanceTags, tags) + + controller, err := getBaseController(meta) + if err != nil { + return err + } + d.Set(ResourceControllerURL, controller+"/vpc-ext/compute/vs") + d.Set(ResourceName, *instance.Name) + d.Set(ResourceCRN, *instance.CRN) + d.Set(ResourceStatus, *instance.Status) + if instance.ResourceGroup != nil { + d.Set(isInstanceResourceGroup, *instance.ResourceGroup.ID) + d.Set(ResourceGroupName, *instance.ResourceGroup.Name) + } + + if instance.Disks != nil { + disks := []map[string]interface{}{} + for _, disksItem := range instance.Disks { + disksItemMap := resourceIbmIsInstanceInstanceDiskToMap(disksItem) + disks = append(disks, disksItemMap) + } + if err = d.Set(isInstanceDisks, disks); err != nil { + return fmt.Errorf("Error setting disks: %s", err) + } + } + return nil +} + +func classicInstanceUpdate(d *schema.ResourceData, meta interface{}) error { + instanceC, err := classicVpcClient(meta) + if err != nil { + return err + } + id := d.Id() + if d.HasChange(isInstanceVolumes) { + ovs, nvs := d.GetChange(isInstanceVolumes) + ov := ovs.(*schema.Set) + nv := nvs.(*schema.Set) + + remove := expandStringList(ov.Difference(nv).List()) + add := expandStringList(nv.Difference(ov).List()) + + if len(add) > 0 { + for i := range add { + createvolattoptions := &vpcclassicv1.CreateInstanceVolumeAttachmentOptions{ + InstanceID: &id, + Volume: &vpcclassicv1.VolumeIdentity{ + ID: &add[i], + }, + } + vol, response, err := instanceC.CreateInstanceVolumeAttachment(createvolattoptions) + if err != nil { + return fmt.Errorf("Error while attaching volume %q for instance %s\n%s: %q", add[i], d.Id(), err, response) + } + _, err = isWaitForClassicInstanceVolumeAttached(instanceC, d, id, *vol.ID) + if err != nil { + return err + } + } + + } + if len(remove) > 0 { + for i := range remove { + listvolattoptions := &vpcclassicv1.ListInstanceVolumeAttachmentsOptions{ + InstanceID: &id, + } + vols, _, err := instanceC.ListInstanceVolumeAttachments(listvolattoptions) + if err != nil { + return err + } + for _, vol := range vols.VolumeAttachments { + if *vol.Volume.ID == remove[i] { + delvolattoptions := &vpcclassicv1.DeleteInstanceVolumeAttachmentOptions{ + InstanceID: &id, + ID: vol.ID, + } + response, err := instanceC.DeleteInstanceVolumeAttachment(delvolattoptions) + if err != nil { + return fmt.Errorf("Error while removing volume %q for instance %s\n%s: %q", remove[i], d.Id(), err, response) + } + _, err = isWaitForClassicInstanceVolumeDetached(instanceC, d, d.Id(), *vol.ID) + if err != nil { + return err + } + break + } + } + } + } + } + + if d.HasChange("primary_network_interface.0.security_groups") && !d.IsNewResource() { + ovs, nvs := d.GetChange("primary_network_interface.0.security_groups") + ov := ovs.(*schema.Set) + nv := nvs.(*schema.Set) + remove := expandStringList(ov.Difference(nv).List()) + add := expandStringList(nv.Difference(ov).List()) + if len(add) > 0 { + networkID := d.Get("primary_network_interface.0.id").(string) + for i := range add { + createsgnicoptions := &vpcclassicv1.AddSecurityGroupNetworkInterfaceOptions{ + SecurityGroupID: &add[i], + ID: &networkID, + } + _, response, err := instanceC.AddSecurityGroupNetworkInterface(createsgnicoptions) + if err != nil { + return fmt.Errorf("Error while creating security group %q for primary network interface of instance %s\n%s: %q", add[i], d.Id(), err, response) + } + _, err = isWaitForClassicInstanceAvailable(instanceC, d.Id(), d.Timeout(schema.TimeoutUpdate), d) + if err != nil { + return err + } + } + + } + if len(remove) > 0 { + networkID := d.Get("primary_network_interface.0.id").(string) + for i := range remove { + deletesgnicoptions := &vpcclassicv1.RemoveSecurityGroupNetworkInterfaceOptions{ + SecurityGroupID: &remove[i], + ID: &networkID, + } + response, err := instanceC.RemoveSecurityGroupNetworkInterface(deletesgnicoptions) + if err != nil { + return fmt.Errorf("Error while removing security group %q for primary network interface of instance %s\n%s: %q", remove[i], d.Id(), err, response) + } + _, err = isWaitForClassicInstanceAvailable(instanceC, d.Id(), d.Timeout(schema.TimeoutUpdate), d) + if err != nil { + return err + } + } + } + } + + // if d.HasChange("primary_network_interface.0.name") && !d.IsNewResource() { + // newName := d.Get("primary_network_interface.0.name").(string) + // networkID := d.Get("primary_network_interface.0.id").(string) + // _, err := instanceC.UpdateInterface(d.Id(), networkID, newName, 0) + // if err != nil { + // return fmt.Errorf("Error while updating name %s for primary network interface of instance %s: %q", newName, d.Id(), err) + // } + // _, err = isWaitForInstanceAvailable(instanceC, d.Id(), d) + // if err != nil { + // return err + // } + // } + + if d.HasChange(isInstanceNetworkInterfaces) && !d.IsNewResource() { + nics := d.Get(isInstanceNetworkInterfaces).([]interface{}) + for i := range nics { + securitygrpKey := fmt.Sprintf("network_interfaces.%d.security_groups", i) + // networkNameKey := fmt.Sprintf("network_interfaces.%d.name", i) + if d.HasChange(securitygrpKey) { + ovs, nvs := d.GetChange(securitygrpKey) + ov := ovs.(*schema.Set) + nv := nvs.(*schema.Set) + remove := expandStringList(ov.Difference(nv).List()) + add := expandStringList(nv.Difference(ov).List()) + if len(add) > 0 { + networkIDKey := fmt.Sprintf("network_interfaces.%d.id", i) + networkID := d.Get(networkIDKey).(string) + for i := range add { + createsgnicoptions := &vpcclassicv1.AddSecurityGroupNetworkInterfaceOptions{ + SecurityGroupID: &add[i], + ID: &networkID, + } + _, response, err := instanceC.AddSecurityGroupNetworkInterface(createsgnicoptions) + if err != nil { + return fmt.Errorf("Error while creating security group %q for network interface of instance %s\n%s: %q", add[i], d.Id(), err, response) + } + _, err = isWaitForClassicInstanceAvailable(instanceC, d.Id(), d.Timeout(schema.TimeoutUpdate), d) + if err != nil { + return err + } + } + + } + if len(remove) > 0 { + networkIDKey := fmt.Sprintf("network_interfaces.%d.id", i) + networkID := d.Get(networkIDKey).(string) + for i := range remove { + deletesgnicoptions := &vpcclassicv1.RemoveSecurityGroupNetworkInterfaceOptions{ + SecurityGroupID: &remove[i], + ID: &networkID, + } + response, err := instanceC.RemoveSecurityGroupNetworkInterface(deletesgnicoptions) + if err != nil { + return fmt.Errorf("Error while removing security group %q for network interface of instance %s\n%s: %q", remove[i], d.Id(), err, response) + } + _, err = isWaitForClassicInstanceAvailable(instanceC, d.Id(), d.Timeout(schema.TimeoutUpdate), d) + if err != nil { + return err + } + } + } + + } + + // if d.HasChange(networkNameKey) { + // newName := d.Get(networkNameKey).(string) + // networkIDKey := fmt.Sprintf("network_interfaces.%d.id", i) + // networkID := d.Get(networkIDKey).(string) + // _, err := instanceC.UpdateInterface(d.Id(), networkID, newName, 0) + // if err != nil { + // return fmt.Errorf("Error while updating name %s for network interface %s of instance %s: %q", newName, networkID, d.Id(), err) + // } + // _, err = isWaitForInstanceAvailable(instanceC, d.Id(), d) + // if err != nil { + // return err + // } + // } + } + + } + + if d.HasChange(isInstanceName) { + name := d.Get(isInstanceName).(string) + updnetoptions := &vpcclassicv1.UpdateInstanceOptions{ + ID: &id, + } + + instancePatchModel := &vpcclassicv1.InstancePatch{ + Name: &name, + } + instancePatch, err := instancePatchModel.AsPatch() + if err != nil { + return fmt.Errorf("Error calling asPatch for ImagePatch: %s", err) + } + updnetoptions.InstancePatch = instancePatch + + _, _, err = instanceC.UpdateInstance(updnetoptions) + if err != nil { + return err + } + } + + if d.HasChange(isInstanceTags) { + getinsOptions := &vpcclassicv1.GetInstanceOptions{ + ID: &id, + } + instance, response, err := instanceC.GetInstance(getinsOptions) + if err != nil { + log.Printf("Error Getting Instance: %s\n%s", err, response) + } + oldList, newList := d.GetChange(isInstanceTags) + err = UpdateTagsUsingCRN(oldList, newList, meta, *instance.CRN) + if err != nil { + log.Printf( + "Error on update of resource vpc Instance (%s) tags: %s", d.Id(), err) + } + } + return nil +} + +func instanceUpdate(d *schema.ResourceData, meta interface{}) error { + instanceC, err := vpcClient(meta) + if err != nil { + return err + } + id := d.Id() + if d.HasChange(isInstanceVolumes) { + ovs, nvs := d.GetChange(isInstanceVolumes) + ov := ovs.(*schema.Set) + nv := nvs.(*schema.Set) + + remove := expandStringList(ov.Difference(nv).List()) + add := expandStringList(nv.Difference(ov).List()) + + var volautoDelete bool + if volumeautodeleteIntf, ok := d.GetOk(isInstanceVolAttVolAutoDelete); ok && volumeautodeleteIntf != nil { + volautoDelete = volumeautodeleteIntf.(bool) + } + + if len(add) > 0 { + for i := range add { + createvolattoptions := &vpcv1.CreateInstanceVolumeAttachmentOptions{ + InstanceID: &id, + Volume: &vpcv1.VolumeAttachmentPrototypeVolume{ + ID: &add[i], + }, + DeleteVolumeOnInstanceDelete: &volautoDelete, + } + vol, _, err := instanceC.CreateInstanceVolumeAttachment(createvolattoptions) + if err != nil { + return fmt.Errorf("Error while attaching volume %q for instance %s: %q", add[i], d.Id(), err) + } + _, err = isWaitForInstanceVolumeAttached(instanceC, d, id, *vol.ID) + if err != nil { + return err + } + } + + } + if len(remove) > 0 { + for i := range remove { + listvolattoptions := &vpcv1.ListInstanceVolumeAttachmentsOptions{ + InstanceID: &id, + } + vols, _, err := instanceC.ListInstanceVolumeAttachments(listvolattoptions) + if err != nil { + return err + } + for _, vol := range vols.VolumeAttachments { + if *vol.Volume.ID == remove[i] { + delvolattoptions := &vpcv1.DeleteInstanceVolumeAttachmentOptions{ + InstanceID: &id, + ID: vol.ID, + } + _, err := instanceC.DeleteInstanceVolumeAttachment(delvolattoptions) + if err != nil { + return fmt.Errorf("Error while removing volume %q for instance %s: %q", remove[i], d.Id(), err) + } + _, err = isWaitForInstanceVolumeDetached(instanceC, d, d.Id(), *vol.ID) + if err != nil { + return err + } + break + } + } + } + } + } + + if d.HasChange("primary_network_interface.0.security_groups") && !d.IsNewResource() { + ovs, nvs := d.GetChange("primary_network_interface.0.security_groups") + ov := ovs.(*schema.Set) + nv := nvs.(*schema.Set) + remove := expandStringList(ov.Difference(nv).List()) + add := expandStringList(nv.Difference(ov).List()) + if len(add) > 0 { + networkID := d.Get("primary_network_interface.0.id").(string) + for i := range add { + createsgnicoptions := &vpcv1.AddSecurityGroupNetworkInterfaceOptions{ + SecurityGroupID: &add[i], + ID: &networkID, + } + _, response, err := instanceC.AddSecurityGroupNetworkInterface(createsgnicoptions) + if err != nil { + return fmt.Errorf("Error while creating security group %q for primary network interface of instance %s\n%s: %q", add[i], d.Id(), err, response) + } + _, err = isWaitForInstanceAvailable(instanceC, d.Id(), d.Timeout(schema.TimeoutUpdate), d) + if err != nil { + return err + } + } + + } + if len(remove) > 0 { + networkID := d.Get("primary_network_interface.0.id").(string) + for i := range remove { + deletesgnicoptions := &vpcv1.RemoveSecurityGroupNetworkInterfaceOptions{ + SecurityGroupID: &remove[i], + ID: &networkID, + } + response, err := instanceC.RemoveSecurityGroupNetworkInterface(deletesgnicoptions) + if err != nil { + return fmt.Errorf("Error while removing security group %q for primary network interface of instance %s\n%s: %q", remove[i], d.Id(), err, response) + } + _, err = isWaitForInstanceAvailable(instanceC, d.Id(), d.Timeout(schema.TimeoutUpdate), d) + if err != nil { + return err + } + } + } + } + + if (d.HasChange("primary_network_interface.0.allow_ip_spoofing") || d.HasChange("primary_network_interface.0.name")) && !d.IsNewResource() { + newName := d.Get("primary_network_interface.0.name").(string) + networkID := d.Get("primary_network_interface.0.id").(string) + allowIPSpoofing := d.Get("primary_network_interface.0.allow_ip_spoofing").(bool) + updatepnicfoptions := &vpcv1.UpdateInstanceNetworkInterfaceOptions{ + InstanceID: &id, + ID: &networkID, + } + + networkInterfacePatchModel := &vpcv1.NetworkInterfacePatch{ + Name: &newName, + AllowIPSpoofing: &allowIPSpoofing, + } + networkInterfacePatch, err := networkInterfacePatchModel.AsPatch() + if err != nil { + return fmt.Errorf("Error calling asPatch for NetworkInterfacePatch: %s", err) + } + updatepnicfoptions.NetworkInterfacePatch = networkInterfacePatch + + _, response, err := instanceC.UpdateInstanceNetworkInterface(updatepnicfoptions) + if err != nil { + return fmt.Errorf("Error while updating name %s for primary network interface of instance %s\n%s: %q", newName, d.Id(), err, response) + } + _, err = isWaitForInstanceAvailable(instanceC, d.Id(), d.Timeout(schema.TimeoutUpdate), d) + if err != nil { + return err + } + } + + if d.HasChange(isInstanceNetworkInterfaces) && !d.IsNewResource() { + nics := d.Get(isInstanceNetworkInterfaces).([]interface{}) + for i := range nics { + securitygrpKey := fmt.Sprintf("network_interfaces.%d.security_groups", i) + networkNameKey := fmt.Sprintf("network_interfaces.%d.name", i) + ipSpoofingKey := fmt.Sprintf("network_interfaces.%d.allow_ip_spoofing", i) + if d.HasChange(securitygrpKey) { + ovs, nvs := d.GetChange(securitygrpKey) + ov := ovs.(*schema.Set) + nv := nvs.(*schema.Set) + remove := expandStringList(ov.Difference(nv).List()) + add := expandStringList(nv.Difference(ov).List()) + if len(add) > 0 { + networkIDKey := fmt.Sprintf("network_interfaces.%d.id", i) + networkID := d.Get(networkIDKey).(string) + for i := range add { + createsgnicoptions := &vpcv1.AddSecurityGroupNetworkInterfaceOptions{ + SecurityGroupID: &add[i], + ID: &networkID, + } + _, response, err := instanceC.AddSecurityGroupNetworkInterface(createsgnicoptions) + if err != nil { + return fmt.Errorf("Error while creating security group %q for network interface of instance %s\n%s: %q", add[i], d.Id(), err, response) + } + _, err = isWaitForInstanceAvailable(instanceC, d.Id(), d.Timeout(schema.TimeoutUpdate), d) + if err != nil { + return err + } + } + + } + if len(remove) > 0 { + networkIDKey := fmt.Sprintf("network_interfaces.%d.id", i) + networkID := d.Get(networkIDKey).(string) + for i := range remove { + deletesgnicoptions := &vpcv1.RemoveSecurityGroupNetworkInterfaceOptions{ + SecurityGroupID: &remove[i], + ID: &networkID, + } + response, err := instanceC.RemoveSecurityGroupNetworkInterface(deletesgnicoptions) + if err != nil { + return fmt.Errorf("Error while removing security group %q for network interface of instance %s\n%s: %q", remove[i], d.Id(), err, response) + } + _, err = isWaitForInstanceAvailable(instanceC, d.Id(), d.Timeout(schema.TimeoutUpdate), d) + if err != nil { + return err + } + } + } + + } + + if d.HasChange(networkNameKey) || d.HasChange(ipSpoofingKey) { + newName := d.Get(networkNameKey).(string) + networkIDKey := fmt.Sprintf("network_interfaces.%d.id", i) + networkID := d.Get(networkIDKey).(string) + ipSpoofing := d.Get(ipSpoofingKey).(bool) + updatepnicfoptions := &vpcv1.UpdateInstanceNetworkInterfaceOptions{ + InstanceID: &id, + ID: &networkID, + } + + instancePatchModel := &vpcv1.NetworkInterfacePatch{ + Name: &newName, + AllowIPSpoofing: &ipSpoofing, + } + networkInterfacePatch, err := instancePatchModel.AsPatch() + if err != nil { + return fmt.Errorf("Error calling asPatch for NetworkInterfacePatch: %s", err) + } + updatepnicfoptions.NetworkInterfacePatch = networkInterfacePatch + + _, response, err := instanceC.UpdateInstanceNetworkInterface(updatepnicfoptions) + if err != nil { + return fmt.Errorf("Error while updating name %s for network interface of instance %s\n%s: %q", newName, d.Id(), err, response) + } + if err != nil { + return err + } + } + } + + } + + if d.HasChange(isInstanceName) { + name := d.Get(isInstanceName).(string) + updnetoptions := &vpcv1.UpdateInstanceOptions{ + ID: &id, + } + + instancePatchModel := &vpcv1.InstancePatch{ + Name: &name, + } + instancePatch, err := instancePatchModel.AsPatch() + if err != nil { + return fmt.Errorf("Error calling asPatch for InstancePatch: %s", err) + } + updnetoptions.InstancePatch = instancePatch + + _, _, err = instanceC.UpdateInstance(updnetoptions) + if err != nil { + return err + } + } + + if d.HasChange(isInstanceProfile) && !d.IsNewResource() { + + getinsOptions := &vpcv1.GetInstanceOptions{ + ID: &id, + } + instance, response, err := instanceC.GetInstance(getinsOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Getting Instance (%s): %s\n%s", id, err, response) + } + + if instance != nil && *instance.Status == "running" { + actiontype := "stop" + createinsactoptions := &vpcv1.CreateInstanceActionOptions{ + InstanceID: &id, + Type: &actiontype, + } + _, response, err = instanceC.CreateInstanceAction(createinsactoptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return nil + } + return fmt.Errorf("Error Creating Instance Action: %s\n%s", err, response) + } + _, err = isWaitForInstanceActionStop(instanceC, d.Timeout(schema.TimeoutUpdate), id, d) + if err != nil { + return err + } + } + + updnetoptions := &vpcv1.UpdateInstanceOptions{ + ID: &id, + } + + instanceProfile := d.Get(isInstanceProfile).(string) + profile := &vpcv1.InstancePatchProfile{ + Name: &instanceProfile, + } + instancePatchModel := &vpcv1.InstancePatch{ + Profile: profile, + } + instancePatch, err := instancePatchModel.AsPatch() + if err != nil { + return fmt.Errorf("Error calling asPatch for InstancePatch: %s", err) + } + updnetoptions.InstancePatch = instancePatch + + _, response, err = instanceC.UpdateInstance(updnetoptions) + if err != nil { + return fmt.Errorf("Error in UpdateInstancePatch: %s\n%s", err, response) + } + + actiontype := "start" + createinsactoptions := &vpcv1.CreateInstanceActionOptions{ + InstanceID: &id, + Type: &actiontype, + } + _, response, err = instanceC.CreateInstanceAction(createinsactoptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return nil + } + return fmt.Errorf("Error Creating Instance Action: %s\n%s", err, response) + } + _, err = isWaitForInstanceAvailable(instanceC, d.Id(), d.Timeout(schema.TimeoutUpdate), d) + if err != nil { + return err + } + + } + + getinsOptions := &vpcv1.GetInstanceOptions{ + ID: &id, + } + instance, response, err := instanceC.GetInstance(getinsOptions) + if err != nil { + return fmt.Errorf("Error Getting Instance: %s\n%s", err, response) + } + if d.HasChange(isInstanceTags) { + oldList, newList := d.GetChange(isInstanceTags) + err = UpdateTagsUsingCRN(oldList, newList, meta, *instance.CRN) + if err != nil { + log.Printf( + "Error on update of resource vpc Instance (%s) tags: %s", d.Id(), err) + } + } + return nil +} + +func resourceIBMisInstanceUpdate(d *schema.ResourceData, meta interface{}) error { + + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + if userDetails.generation == 1 { + err := classicInstanceUpdate(d, meta) + if err != nil { + return err + } + } else { + err := instanceUpdate(d, meta) + if err != nil { + return err + } + } + + return resourceIBMisInstanceRead(d, meta) +} + +func classicInstanceDelete(d *schema.ResourceData, meta interface{}, id string) error { + instanceC, err := classicVpcClient(meta) + if err != nil { + return err + } + + getinsOptions := &vpcclassicv1.GetInstanceOptions{ + ID: &id, + } + _, response, err := instanceC.GetInstance(getinsOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Getting Instance (%s): %s\n%s", id, err, response) + } + actiontype := "stop" + createinsactoptions := &vpcclassicv1.CreateInstanceActionOptions{ + InstanceID: &id, + Type: &actiontype, + } + _, response, err = instanceC.CreateInstanceAction(createinsactoptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return nil + } + return fmt.Errorf("Error Creating Instance Action: %s\n%s", err, response) + } + _, err = isWaitForClassicInstanceActionStop(instanceC, d, meta, id) + if err != nil { + return err + } + listvolattoptions := &vpcclassicv1.ListInstanceVolumeAttachmentsOptions{ + InstanceID: &id, + } + vols, response, err := instanceC.ListInstanceVolumeAttachments(listvolattoptions) + if err != nil { + return fmt.Errorf("Error Listing volume attachments to the instance: %s\n%s", err, response) + } + bootvolid := "" + for _, vol := range vols.VolumeAttachments { + if *vol.Type == "data" { + delvolattoptions := &vpcclassicv1.DeleteInstanceVolumeAttachmentOptions{ + InstanceID: &id, + ID: vol.ID, + } + _, err := instanceC.DeleteInstanceVolumeAttachment(delvolattoptions) + if err != nil { + return fmt.Errorf("Error while removing volume attachment %q for instance %s: %q", *vol.ID, d.Id(), err) + } + _, err = isWaitForClassicInstanceVolumeDetached(instanceC, d, d.Id(), *vol.ID) + if err != nil { + return err + } + } + if *vol.Type == "boot" { + bootvolid = *vol.Volume.ID + } + } + deleteinstanceOptions := &vpcclassicv1.DeleteInstanceOptions{ + ID: &id, + } + _, err = instanceC.DeleteInstance(deleteinstanceOptions) + if err != nil { + return err + } + _, err = isWaitForClassicInstanceDelete(instanceC, d, d.Id()) + if err != nil { + return err + } + if _, ok := d.GetOk(isInstanceBootVolume); ok { + _, err = isWaitForClassicVolumeDeleted(instanceC, bootvolid, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return err + } + } + return nil +} + +func instanceDelete(d *schema.ResourceData, meta interface{}, id string) error { + instanceC, err := vpcClient(meta) + if err != nil { + return err + } + + cleanDelete := d.Get(isEnableCleanDelete).(bool) + getinsOptions := &vpcv1.GetInstanceOptions{ + ID: &id, + } + _, response, err := instanceC.GetInstance(getinsOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Getting Instance (%s): %s\n%s", id, err, response) + } + + bootvolid := "" + + if cleanDelete { + actiontype := "stop" + createinsactoptions := &vpcv1.CreateInstanceActionOptions{ + InstanceID: &id, + Type: &actiontype, + } + _, response, err = instanceC.CreateInstanceAction(createinsactoptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return nil + } + return fmt.Errorf("Error Creating Instance Action: %s\n%s", err, response) + } + _, err = isWaitForInstanceActionStop(instanceC, d.Timeout(schema.TimeoutDelete), id, d) + if err != nil { + return err + } + listvolattoptions := &vpcv1.ListInstanceVolumeAttachmentsOptions{ + InstanceID: &id, + } + vols, response, err := instanceC.ListInstanceVolumeAttachments(listvolattoptions) + if err != nil { + return fmt.Errorf("Error Listing volume attachments to the instance: %s\n%s", err, response) + } + + for _, vol := range vols.VolumeAttachments { + if *vol.Type == "data" { + delvolattoptions := &vpcv1.DeleteInstanceVolumeAttachmentOptions{ + InstanceID: &id, + ID: vol.ID, + } + _, err := instanceC.DeleteInstanceVolumeAttachment(delvolattoptions) + if err != nil { + return fmt.Errorf("Error while removing volume Attachment %q for instance %s: %q", *vol.ID, d.Id(), err) + } + _, err = isWaitForInstanceVolumeDetached(instanceC, d, d.Id(), *vol.ID) + if err != nil { + return err + } + } + if *vol.Type == "boot" { + bootvolid = *vol.Volume.ID + } + } + } + deleteinstanceOptions := &vpcv1.DeleteInstanceOptions{ + ID: &id, + } + _, err = instanceC.DeleteInstance(deleteinstanceOptions) + if err != nil { + return err + } + if cleanDelete { + _, err = isWaitForInstanceDelete(instanceC, d, d.Id()) + if err != nil { + return err + } + if _, ok := d.GetOk(isInstanceBootVolume); ok { + _, err = isWaitForVolumeDeleted(instanceC, bootvolid, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return err + } + } + } + return nil +} + +func resourceIBMisInstanceDelete(d *schema.ResourceData, meta interface{}) error { + + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + id := d.Id() + if userDetails.generation == 1 { + err := classicInstanceDelete(d, meta, id) + if err != nil { + return err + } + } else { + err := instanceDelete(d, meta, id) + if err != nil { + return err + } + } + + d.SetId("") + return nil +} + +func classicInstanceExists(d *schema.ResourceData, meta interface{}, id string) (bool, error) { + instanceC, err := classicVpcClient(meta) + if err != nil { + return false, err + } + getinsOptions := &vpcclassicv1.GetInstanceOptions{ + ID: &id, + } + _, response, err := instanceC.GetInstance(getinsOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error Getting Instance: %s\n%s", err, response) + } + return true, nil +} + +func instanceExists(d *schema.ResourceData, meta interface{}, id string) (bool, error) { + instanceC, err := vpcClient(meta) + if err != nil { + return false, err + } + getinsOptions := &vpcv1.GetInstanceOptions{ + ID: &id, + } + _, response, err := instanceC.GetInstance(getinsOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error Getting Instance: %s\n%s", err, response) + } + return true, nil +} + +func resourceIBMisInstanceExists(d *schema.ResourceData, meta interface{}) (bool, error) { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return false, err + } + id := d.Id() + if userDetails.generation == 1 { + exists, err := classicInstanceExists(d, meta, id) + return exists, err + } else { + exists, err := instanceExists(d, meta, id) + return exists, err + } +} + +func isWaitForClassicInstanceDelete(instanceC *vpcclassicv1.VpcClassicV1, d *schema.ResourceData, id string) (interface{}, error) { + + stateConf := &resource.StateChangeConf{ + Pending: []string{isInstanceDeleting, isInstanceAvailable}, + Target: []string{isInstanceDeleteDone, ""}, + Refresh: func() (interface{}, string, error) { + getinsoptions := &vpcclassicv1.GetInstanceOptions{ + ID: &id, + } + instance, response, err := instanceC.GetInstance(getinsoptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return instance, isInstanceDeleteDone, nil + } + return nil, "", fmt.Errorf("Error Getting Instance: %s\n%s", err, response) + } + if *instance.Status == isInstanceFailed { + return instance, *instance.Status, fmt.Errorf("The instance %s failed to delete: %v", d.Id(), err) + } + return instance, isInstanceDeleting, nil + }, + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isWaitForInstanceDelete(instanceC *vpcv1.VpcV1, d *schema.ResourceData, id string) (interface{}, error) { + + stateConf := &resource.StateChangeConf{ + Pending: []string{isInstanceDeleting, isInstanceAvailable}, + Target: []string{isInstanceDeleteDone, ""}, + Refresh: func() (interface{}, string, error) { + getinsoptions := &vpcv1.GetInstanceOptions{ + ID: &id, + } + instance, response, err := instanceC.GetInstance(getinsoptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return instance, isInstanceDeleteDone, nil + } + return nil, "", fmt.Errorf("Error Getting Instance: %s\n%s", err, response) + } + if *instance.Status == isInstanceFailed { + return instance, *instance.Status, fmt.Errorf("The instance %s failed to delete: %v", d.Id(), err) + } + return instance, isInstanceDeleting, nil + }, + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} +func isWaitForClassicInstanceActionStop(instanceC *vpcclassicv1.VpcClassicV1, d *schema.ResourceData, meta interface{}, id string) (interface{}, error) { + + stateConf := &resource.StateChangeConf{ + Pending: []string{isInstanceStatusRunning, isInstanceStatusPending, isInstanceActionStatusStopping}, + Target: []string{isInstanceActionStatusStopped, isInstanceStatusFailed, ""}, + Refresh: func() (interface{}, string, error) { + getinsoptions := &vpcclassicv1.GetInstanceOptions{ + ID: &id, + } + instance, response, err := instanceC.GetInstance(getinsoptions) + if err != nil { + return nil, "", fmt.Errorf("Error Getting Instance: %s\n%s", err, response) + } + if *instance.Status == isInstanceStatusFailed { + return instance, *instance.Status, fmt.Errorf("The instance %s failed to stop: %v", d.Id(), err) + } + return instance, *instance.Status, nil + }, + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} +func isWaitForInstanceActionStop(instanceC *vpcv1.VpcV1, timeout time.Duration, id string, d *schema.ResourceData) (interface{}, error) { + communicator := make(chan interface{}) + stateConf := &resource.StateChangeConf{ + Pending: []string{isInstanceStatusRunning, isInstanceStatusPending, isInstanceActionStatusStopping}, + Target: []string{isInstanceActionStatusStopped, isInstanceStatusFailed, ""}, + Refresh: func() (interface{}, string, error) { + getinsoptions := &vpcv1.GetInstanceOptions{ + ID: &id, + } + instance, response, err := instanceC.GetInstance(getinsoptions) + if err != nil { + return nil, "", fmt.Errorf("Error Getting Instance: %s\n%s", err, response) + } + select { + case data := <-communicator: + return nil, "", data.(error) + default: + fmt.Println("no message sent") + } + if *instance.Status == isInstanceStatusFailed { + // let know the isRestartStopAction() to stop + close(communicator) + return instance, *instance.Status, fmt.Errorf("The instance %s failed to stop: %v", id, err) + } + return instance, *instance.Status, nil + }, + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + if v, ok := d.GetOk("force_recovery_time"); ok { + forceTimeout := v.(int) + go isRestartStopAction(instanceC, id, d, forceTimeout, communicator) + } + + return stateConf.WaitForState() +} + +func isRestartStopAction(instanceC *vpcv1.VpcV1, id string, d *schema.ResourceData, forceTimeout int, communicator chan interface{}) { + subticker := time.NewTicker(time.Duration(forceTimeout) * time.Minute) + //subticker := time.NewTicker(time.Duration(forceTimeout) * time.Second) + for { + select { + + case <-subticker.C: + log.Println("Instance is still in stopping state, retrying to stop with -force") + actiontype := "stop" + createinsactoptions := &vpcv1.CreateInstanceActionOptions{ + InstanceID: &id, + Type: &actiontype, + } + _, response, err := instanceC.CreateInstanceAction(createinsactoptions) + if err != nil { + communicator <- fmt.Errorf("Error retrying instance action stop: %s\n%s", err, response) + return + } + case <-communicator: + // indicates refresh func is reached target and not proceed with the thread) + subticker.Stop() + return + + } + } +} + +func isWaitForClassicInstanceVolumeAttached(instanceC *vpcclassicv1.VpcClassicV1, d *schema.ResourceData, id, volID string) (interface{}, error) { + log.Printf("Waiting for instance volume (%s) to be attched.", id) + + stateConf := &resource.StateChangeConf{ + Pending: []string{isInstanceVolumeAttaching}, + Target: []string{isInstanceVolumeAttached, ""}, + Refresh: isClassicInstanceVolumeRefreshFunc(instanceC, id, volID), + Timeout: d.Timeout(schema.TimeoutUpdate), + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isClassicInstanceVolumeRefreshFunc(instanceC *vpcclassicv1.VpcClassicV1, id, volID string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + getvolattoptions := &vpcclassicv1.GetInstanceVolumeAttachmentOptions{ + InstanceID: &id, + ID: &volID, + } + vol, response, err := instanceC.GetInstanceVolumeAttachment(getvolattoptions) + if err != nil { + return nil, "", fmt.Errorf("Error Attaching volume: %s\n%s", err, response) + } + + if *vol.Status == isInstanceVolumeAttached { + return vol, isInstanceVolumeAttached, nil + } + + return vol, isInstanceVolumeAttaching, nil + } +} + +func isWaitForInstanceVolumeAttached(instanceC *vpcv1.VpcV1, d *schema.ResourceData, id, volID string) (interface{}, error) { + log.Printf("Waiting for instance volume (%s) to be attched.", id) + + stateConf := &resource.StateChangeConf{ + Pending: []string{isInstanceVolumeAttaching}, + Target: []string{isInstanceVolumeAttached, ""}, + Refresh: isInstanceVolumeRefreshFunc(instanceC, id, volID), + Timeout: d.Timeout(schema.TimeoutUpdate), + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isInstanceVolumeRefreshFunc(instanceC *vpcv1.VpcV1, id, volID string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + getvolattoptions := &vpcv1.GetInstanceVolumeAttachmentOptions{ + InstanceID: &id, + ID: &volID, + } + vol, response, err := instanceC.GetInstanceVolumeAttachment(getvolattoptions) + if err != nil { + return nil, "", fmt.Errorf("Error Attaching volume: %s\n%s", err, response) + } + + if *vol.Status == isInstanceVolumeAttached { + return vol, isInstanceVolumeAttached, nil + } + + return vol, isInstanceVolumeAttaching, nil + } +} + +func isWaitForClassicInstanceVolumeDetached(instanceC *vpcclassicv1.VpcClassicV1, d *schema.ResourceData, id, volID string) (interface{}, error) { + + stateConf := &resource.StateChangeConf{ + Pending: []string{isInstanceVolumeAttached, isInstanceVolumeDetaching}, + Target: []string{isInstanceDeleteDone, ""}, + Refresh: func() (interface{}, string, error) { + getvolattoptions := &vpcclassicv1.GetInstanceVolumeAttachmentOptions{ + InstanceID: &id, + ID: &volID, + } + vol, response, err := instanceC.GetInstanceVolumeAttachment(getvolattoptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return vol, isInstanceDeleteDone, nil + } + return nil, "", fmt.Errorf("Error Detaching volume: %s\n%s", err, response) + } + if *vol.Status == isInstanceFailed { + return vol, *vol.Status, fmt.Errorf("The instance %s failed to detach volume %s: %v", d.Id(), volID, err) + } + return vol, isInstanceVolumeDetaching, nil + }, + Timeout: d.Timeout(schema.TimeoutUpdate), + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isWaitForInstanceVolumeDetached(instanceC *vpcv1.VpcV1, d *schema.ResourceData, id, volID string) (interface{}, error) { + + stateConf := &resource.StateChangeConf{ + Pending: []string{isInstanceVolumeAttached, isInstanceVolumeDetaching}, + Target: []string{isInstanceDeleteDone, ""}, + Refresh: func() (interface{}, string, error) { + getvolattoptions := &vpcv1.GetInstanceVolumeAttachmentOptions{ + InstanceID: &id, + ID: &volID, + } + vol, response, err := instanceC.GetInstanceVolumeAttachment(getvolattoptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return vol, isInstanceDeleteDone, nil + } + return nil, "", fmt.Errorf("Error Detaching: %s\n%s", err, response) + } + if *vol.Status == isInstanceFailed { + return vol, *vol.Status, fmt.Errorf("The instance %s failed to detach volume %s: %v", d.Id(), volID, err) + } + return vol, isInstanceVolumeDetaching, nil + }, + Timeout: d.Timeout(schema.TimeoutUpdate), + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func resourceIbmIsInstanceInstanceDiskToMap(instanceDisk vpcv1.InstanceDisk) map[string]interface{} { + instanceDiskMap := map[string]interface{}{} + + instanceDiskMap["created_at"] = instanceDisk.CreatedAt.String() + instanceDiskMap["href"] = instanceDisk.Href + instanceDiskMap["id"] = instanceDisk.ID + instanceDiskMap["interface_type"] = instanceDisk.InterfaceType + instanceDiskMap["name"] = instanceDisk.Name + instanceDiskMap["resource_type"] = instanceDisk.ResourceType + instanceDiskMap["size"] = intValue(instanceDisk.Size) + + return instanceDiskMap +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_instance_disk_management.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_instance_disk_management.go new file mode 100644 index 00000000000..8fbffdf061d --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_instance_disk_management.go @@ -0,0 +1,158 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const () + +func resourceIBMISInstanceDiskManagement() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMisInstanceDiskManagementCreate, + Read: resourceIBMisInstanceDiskManagementRead, + Update: resourceIBMisInstanceDiskManagementUpdate, + Delete: resourceIBMisInstanceDiskManagementDelete, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "instance": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "ID of the instance for which disks has to be managed", + }, + "disks": &schema.Schema{ + Type: schema.TypeList, + Required: true, + Description: "Disk information that has to be updated.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "The unique identifier for this instance disk.", + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ValidateFunc: InvokeValidator("ibm_is_instance_disk_management", "name"), + Description: "The user-defined name for this disk. The disk will be updated with this new name", + }, + }, + }, + }, + }, + } +} + +func resourceIBMISInstanceDiskManagementValidator() *ResourceValidator { + + validateSchema := make([]ValidateSchema, 1) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: "name", + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Required: true, + Regexp: `^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$`, + MinValueLength: 1, + MaxValueLength: 63}) + + ibmISInstanceDiskManagementValidator := ResourceValidator{ResourceName: "ibm_is_instance_disk_management", Schema: validateSchema} + return &ibmISInstanceDiskManagementValidator +} + +func resourceIBMisInstanceDiskManagementCreate(d *schema.ResourceData, meta interface{}) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + instance := d.Get("instance").(string) + disks := d.Get("disks") + diskUpdate := disks.([]interface{}) + + for _, disk := range diskUpdate { + diskItem := disk.(map[string]interface{}) + + namestr := diskItem["name"].(string) + diskid := diskItem["id"].(string) + + updateInstanceDiskOptions := &vpcv1.UpdateInstanceDiskOptions{} + updateInstanceDiskOptions.SetInstanceID(instance) + updateInstanceDiskOptions.SetID(diskid) + instanceDiskPatchModel := &vpcv1.InstanceDiskPatch{ + Name: &namestr, + } + + instanceDiskPatch, err := instanceDiskPatchModel.AsPatch() + if err != nil { + return fmt.Errorf("Error calling asPatch for InstanceDiskPatch: %s", err) + } + updateInstanceDiskOptions.SetInstanceDiskPatch(instanceDiskPatch) + + _, response, err := sess.UpdateInstanceDisk(updateInstanceDiskOptions) + if err != nil { + return fmt.Errorf("Error calling UpdateInstanceDisk: %s %s", err, response) + } + + } + d.SetId(instance) + return resourceIBMisInstanceDiskManagementRead(d, meta) +} + +func resourceIBMisInstanceDiskManagementUpdate(d *schema.ResourceData, meta interface{}) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + if d.HasChange("disks") && !d.IsNewResource() { + + disks := d.Get("disks") + diskUpdate := disks.([]interface{}) + + for _, disk := range diskUpdate { + diskItem := disk.(map[string]interface{}) + namestr := diskItem["name"].(string) + diskid := diskItem["id"].(string) + + updateInstanceDiskOptions := &vpcv1.UpdateInstanceDiskOptions{} + updateInstanceDiskOptions.SetInstanceID(d.Id()) + updateInstanceDiskOptions.SetID(diskid) + instanceDiskPatchModel := &vpcv1.InstanceDiskPatch{ + Name: &namestr, + } + + instanceDiskPatch, err := instanceDiskPatchModel.AsPatch() + if err != nil { + return fmt.Errorf("Error calling asPatch for InstanceDiskPatch: %s", err) + } + updateInstanceDiskOptions.SetInstanceDiskPatch(instanceDiskPatch) + + _, _, err = sess.UpdateInstanceDisk(updateInstanceDiskOptions) + if err != nil { + return fmt.Errorf("Error updating instance disk: %s", err) + } + + } + } + return resourceIBMisInstanceDiskManagementRead(d, meta) +} + +func resourceIBMisInstanceDiskManagementDelete(d *schema.ResourceData, meta interface{}) error { + + d.SetId("") + return nil +} + +func resourceIBMisInstanceDiskManagementRead(d *schema.ResourceData, meta interface{}) error { + + d.Set("instance", d.Id()) + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_instance_group.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_instance_group.go new file mode 100644 index 00000000000..00b326fe776 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_instance_group.go @@ -0,0 +1,565 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "os" + "strings" + "time" + + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + // SCALING ... + SCALING = "scaling" + // HEALTHY ... + HEALTHY = "healthy" + // DELETING ... + DELETING = "deleting" +) + +func resourceIBMISInstanceGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMISInstanceGroupCreate, + Read: resourceIBMISInstanceGroupRead, + Update: resourceIBMISInstanceGroupUpdate, + Delete: resourceIBMISInstanceGroupDelete, + Exists: resourceIBMISInstanceGroupExists, + Importer: &schema.ResourceImporter{}, + + CustomizeDiff: customdiff.Sequence( + func(diff *schema.ResourceDiff, v interface{}) error { + return resourceTagsCustomizeDiff(diff) + }, + ), + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(5 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + + "name": { + Type: schema.TypeString, + Required: true, + ValidateFunc: InvokeValidator("ibm_is_instance_group", "name"), + Description: "The user-defined name for this instance group", + }, + + "instance_template": { + Type: schema.TypeString, + Required: true, + Description: "instance template ID", + }, + + "instance_count": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + ValidateFunc: InvokeValidator("ibm_is_instance_group", "instance_count"), + Description: "The number of instances in the instance group", + }, + + "resource_group": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Resource group ID", + }, + + "subnets": { + Type: schema.TypeList, + Elem: &schema.Schema{Type: schema.TypeString}, + Required: true, + Description: "list of subnet IDs", + }, + + "application_port": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: InvokeValidator("ibm_is_instance_group", "application_port"), + Description: "Used by the instance group when scaling up instances to supply the port for the load balancer pool member.", + }, + + "load_balancer": { + Type: schema.TypeString, + Optional: true, + Description: "load balancer ID", + }, + + "load_balancer_pool": { + Type: schema.TypeString, + Optional: true, + Description: "load balancer pool ID", + }, + + "managers": { + Type: schema.TypeList, + Elem: &schema.Schema{Type: schema.TypeString}, + Computed: true, + Description: "list of Managers associated with instancegroup", + }, + + "instances": { + Type: schema.TypeInt, + Computed: true, + Description: "number of instances in the intances group", + }, + + "vpc": { + Type: schema.TypeString, + Computed: true, + Description: "vpc instance", + }, + + "status": { + Type: schema.TypeString, + Computed: true, + Description: "Instance group status - deleting, healthy, scaling, unhealthy", + }, + + "tags": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString, ValidateFunc: InvokeValidator("ibm_is_instance_group", "tag")}, + Set: resourceIBMVPCHash, + Description: "List of tags for instance group", + }, + }, + } +} + +func resourceIBMISInstanceGroupValidator() *ResourceValidator { + + validateSchema := make([]ValidateSchema, 1) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: "name", + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Required: true, + Regexp: `^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$`, + MinValueLength: 1, + MaxValueLength: 63}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: "instance_count", + ValidateFunctionIdentifier: IntBetween, + Type: TypeInt, + MinValue: "0", + MaxValue: "1000"}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: "application_port", + ValidateFunctionIdentifier: IntBetween, + Type: TypeInt, + MinValue: "1", + MaxValue: "65535"}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: "tag", + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Optional: true, + Regexp: `^[A-Za-z0-9:_ .-]+$`, + MinValueLength: 1, + MaxValueLength: 128}) + + ibmISInstanceGroupResourceValidator := ResourceValidator{ResourceName: "ibm_is_instance_group", Schema: validateSchema} + return &ibmISInstanceGroupResourceValidator +} + +func resourceIBMISInstanceGroupCreate(d *schema.ResourceData, meta interface{}) error { + + name := d.Get("name").(string) + instanceTemplate := d.Get("instance_template").(string) + + subnets := d.Get("subnets") + + sess, err := vpcClient(meta) + if err != nil { + return err + } + + var subnetIDs []vpcv1.SubnetIdentityIntf + for _, s := range subnets.([]interface{}) { + subnet := s.(string) + subnetIDs = append(subnetIDs, &vpcv1.SubnetIdentity{ID: &subnet}) + } + + instanceGroupOptions := vpcv1.CreateInstanceGroupOptions{ + InstanceTemplate: &vpcv1.InstanceTemplateIdentity{ + ID: &instanceTemplate, + }, + Subnets: subnetIDs, + Name: &name, + } + + var membershipCount int + if v, ok := d.GetOk("instance_count"); ok { + membershipCount = v.(int) + mc := int64(membershipCount) + instanceGroupOptions.MembershipCount = &mc + } + + if v, ok := d.GetOk("load_balancer"); ok { + lbID := v.(string) + instanceGroupOptions.LoadBalancer = &vpcv1.LoadBalancerIdentity{ID: &lbID} + } + + if v, ok := d.GetOk("load_balancer_pool"); ok { + lbPoolID := v.(string) + instanceGroupOptions.LoadBalancerPool = &vpcv1.LoadBalancerPoolIdentity{ID: &lbPoolID} + } + + if v, ok := d.GetOk("resource_group"); ok { + resourceGroup := v.(string) + instanceGroupOptions.ResourceGroup = &vpcv1.ResourceGroupIdentity{ID: &resourceGroup} + } + + if v, ok := d.GetOk("application_port"); ok { + applicatioPort := int64(v.(int)) + instanceGroupOptions.ApplicationPort = &applicatioPort + } + + instanceGroup, response, err := sess.CreateInstanceGroup(&instanceGroupOptions) + if err != nil || instanceGroup == nil { + return fmt.Errorf("Error Creating InstanceGroup: %s\n%s", err, response) + } + d.SetId(*instanceGroup.ID) + + _, healthError := waitForHealthyInstanceGroup(d.Id(), meta, d.Timeout(schema.TimeoutCreate)) + if healthError != nil { + return healthError + } + + v := os.Getenv("IC_ENV_TAGS") + if _, ok := d.GetOk("tags"); ok || v != "" { + oldList, newList := d.GetChange("tags") + err = UpdateTagsUsingCRN(oldList, newList, meta, *instanceGroup.CRN) + if err != nil { + log.Printf( + "Error on create of instance group (%s) tags: %s", d.Id(), err) + } + } + + return resourceIBMISInstanceGroupRead(d, meta) + +} + +func resourceIBMISInstanceGroupUpdate(d *schema.ResourceData, meta interface{}) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + + var changed bool + instanceGroupUpdateOptions := vpcv1.UpdateInstanceGroupOptions{} + instanceGroupPatchModel := vpcv1.InstanceGroupPatch{} + + if d.HasChange("tags") { + instanceGroupID := d.Id() + getInstanceGroupOptions := vpcv1.GetInstanceGroupOptions{ID: &instanceGroupID} + instanceGroup, response, err := sess.GetInstanceGroup(&getInstanceGroupOptions) + if err != nil || instanceGroup == nil { + return fmt.Errorf("Error getting instance group: %s\n%s", err, response) + } + oldList, newList := d.GetChange("tags") + err = UpdateTagsUsingCRN(oldList, newList, meta, *instanceGroup.CRN) + if err != nil { + log.Printf( + "Error on update of instance group (%s) tags: %s", d.Id(), err) + } + } + + if d.HasChange("name") { + name := d.Get("name").(string) + instanceGroupPatchModel.Name = &name + changed = true + } + + if d.HasChange("instance_template") { + instanceTemplate := d.Get("instance_template").(string) + instanceGroupPatchModel.InstanceTemplate = &vpcv1.InstanceTemplateIdentity{ + ID: &instanceTemplate, + } + changed = true + } + + if d.HasChange("instance_count") { + membershipCount := d.Get("instance_count").(int) + mc := int64(membershipCount) + instanceGroupPatchModel.MembershipCount = &mc + changed = true + } + + if d.HasChange("subnets") { + subnets := d.Get("subnets") + var subnetIDs []vpcv1.SubnetIdentityIntf + for _, s := range subnets.([]interface{}) { + subnet := s.(string) + subnetIDs = append(subnetIDs, &vpcv1.SubnetIdentity{ID: &subnet}) + } + instanceGroupPatchModel.Subnets = subnetIDs + changed = true + } + + if d.HasChange("application_port") || d.HasChange("load_balancer") || d.HasChange("load_balancer_pool") { + applicationPort := int64(d.Get("application_port").(int)) + lbID := d.Get("load_balancer").(string) + lbPoolID := d.Get("load_balancer_pool").(string) + instanceGroupPatchModel.ApplicationPort = &applicationPort + instanceGroupPatchModel.LoadBalancer = &vpcv1.LoadBalancerIdentity{ID: &lbID} + instanceGroupPatchModel.LoadBalancerPool = &vpcv1.LoadBalancerPoolIdentity{ID: &lbPoolID} + changed = true + } + + if changed { + instanceGroupID := d.Id() + instanceGroupUpdateOptions.ID = &instanceGroupID + instanceGroupPatch, err := instanceGroupPatchModel.AsPatch() + if err != nil { + return fmt.Errorf("Error calling asPatch for InstanceGroupPatch: %s", err) + } + instanceGroupUpdateOptions.InstanceGroupPatch = instanceGroupPatch + _, response, err := sess.UpdateInstanceGroup(&instanceGroupUpdateOptions) + if err != nil { + return fmt.Errorf("Error Updating InstanceGroup: %s\n%s", err, response) + } + + // wait for instance group health update with update timeout configured. + _, healthError := waitForHealthyInstanceGroup(instanceGroupID, meta, d.Timeout(schema.TimeoutUpdate)) + if healthError != nil { + return healthError + } + } + return resourceIBMISInstanceGroupRead(d, meta) +} + +func resourceIBMISInstanceGroupRead(d *schema.ResourceData, meta interface{}) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + + instanceGroupID := d.Id() + getInstanceGroupOptions := vpcv1.GetInstanceGroupOptions{ID: &instanceGroupID} + instanceGroup, response, err := sess.GetInstanceGroup(&getInstanceGroupOptions) + if err != nil || instanceGroup == nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Getting InstanceGroup: %s\n%s", err, response) + } + d.Set("name", *instanceGroup.Name) + d.Set("instance_template", *instanceGroup.InstanceTemplate.ID) + d.Set("instances", *instanceGroup.MembershipCount) + d.Set("resource_group", *instanceGroup.ResourceGroup.ID) + if instanceGroup.ApplicationPort != nil { + d.Set("application_port", *instanceGroup.ApplicationPort) + } + + subnets := make([]string, 0) + + for i := 0; i < len(instanceGroup.Subnets); i++ { + subnets = append(subnets, string(*(instanceGroup.Subnets[i].ID))) + } + if instanceGroup.LoadBalancerPool != nil { + d.Set("load_balancer_pool", *instanceGroup.LoadBalancerPool.ID) + } + d.Set("subnets", subnets) + managers := make([]string, 0) + + for i := 0; i < len(instanceGroup.Managers); i++ { + managers = append(managers, string(*(instanceGroup.Managers[i].ID))) + } + d.Set("managers", managers) + + d.Set("status", *instanceGroup.Status) + d.Set("vpc", *instanceGroup.VPC.ID) + tags, err := GetTagsUsingCRN(meta, *instanceGroup.CRN) + if err != nil { + log.Printf( + "Error on get of instance group (%s) tags: %s", d.Id(), err) + } + d.Set("tags", tags) + return nil +} + +func getLBStatus(sess *vpcv1.VpcV1, lbId string) (string, error) { + getlboptions := &vpcv1.GetLoadBalancerOptions{ + ID: &lbId, + } + lb, response, err := sess.GetLoadBalancer(getlboptions) + if err != nil || lb == nil { + return "", fmt.Errorf("Error Getting Load Balancer : %s\n%s", err, response) + } + return *lb.ProvisioningStatus, nil +} + +func resourceIBMISInstanceGroupDelete(d *schema.ResourceData, meta interface{}) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + instanceGroupID := d.Id() + + // Before we delete the instance group, we need to + // know if the load balancer attached is in active state + + // First, get the instance + igOpts := vpcv1.GetInstanceGroupOptions{ID: &instanceGroupID} + instanceGroup, response, err := sess.GetInstanceGroup(&igOpts) + if err != nil || instanceGroup == nil { + if response != nil && response.StatusCode == 404 { + return fmt.Errorf("Instance Group with id:[%s] not found!!", instanceGroupID) + } + return fmt.Errorf("Internal Error fetching info for instance group [%s]", instanceGroupID) + } + // Inorder to delete instance group, need to update membership count to 0 + zeroMembers := int64(0) + instanceGroupUpdateOptions := vpcv1.UpdateInstanceGroupOptions{} + instanceGroupPatchModel := vpcv1.InstanceGroupPatch{} + + instanceGroupPatchModel.MembershipCount = &zeroMembers + instanceGroupPatch, err := instanceGroupPatchModel.AsPatch() + if err != nil { + return fmt.Errorf("Error calling asPatch for ImagePatch: %s", err) + } + + instanceGroupUpdateOptions.ID = &instanceGroupID + instanceGroupUpdateOptions.InstanceGroupPatch = instanceGroupPatch + _, response, err = sess.UpdateInstanceGroup(&instanceGroupUpdateOptions) + if err != nil { + return fmt.Errorf("Error updating instanceGroup's instance count to 0 : %s\n%s", err, response) + } + _, healthError := waitForHealthyInstanceGroup(instanceGroupID, meta, d.Timeout(schema.TimeoutUpdate)) + if healthError != nil { + return healthError + } + + // If there is any load balancer, please check if it is active + if instanceGroup.LoadBalancerPool != nil { + loadBalancerPool := *instanceGroup.LoadBalancerPool.Href + // The sixth component is the Load Balancer ID + loadBalancerID := strings.Split(loadBalancerPool, "/")[5] + + // Now check if the load balancer is in active state or not + lbStatus, err := getLBStatus(sess, loadBalancerID) + if err != nil { + return err + } + if lbStatus != "active" { + log.Printf("Load Balancer [%s] is not active....Waiting it to be active!\n", loadBalancerID) + _, err := isWaitForLBAvailable(sess, loadBalancerID, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return err + } + lbStatus, err = getLBStatus(sess, loadBalancerID) + if err != nil { + return err + } + if lbStatus != "active" { + return fmt.Errorf("LoadBalancer [%s] is not active yet! Current Load Balancer status is [%s]", loadBalancerID, lbStatus) + } + } + } + + deleteInstanceGroupOptions := vpcv1.DeleteInstanceGroupOptions{ID: &instanceGroupID} + response, Err := sess.DeleteInstanceGroup(&deleteInstanceGroupOptions) + if Err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Deleting the InstanceGroup: %s\n%s", Err, response) + } + + _, deleteError := waitForInstanceGroupDelete(d, meta) + if deleteError != nil { + return deleteError + } + return nil +} + +func resourceIBMISInstanceGroupExists(d *schema.ResourceData, meta interface{}) (bool, error) { + sess, err := vpcClient(meta) + if err != nil { + return false, err + } + instanceGroupID := d.Id() + getInstanceGroupOptions := vpcv1.GetInstanceGroupOptions{ID: &instanceGroupID} + _, response, err := sess.GetInstanceGroup(&getInstanceGroupOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error Getting InstanceGroup: %s\n%s", err, response) + } + return true, nil +} + +func waitForHealthyInstanceGroup(instanceGroupID string, meta interface{}, timeout time.Duration) (interface{}, error) { + sess, err := vpcClient(meta) + if err != nil { + return nil, err + } + + getInstanceGroupOptions := vpcv1.GetInstanceGroupOptions{ID: &instanceGroupID} + + healthStateConf := &resource.StateChangeConf{ + Pending: []string{SCALING}, + Target: []string{HEALTHY}, + Refresh: func() (interface{}, string, error) { + instanceGroup, response, err := sess.GetInstanceGroup(&getInstanceGroupOptions) + if err != nil || instanceGroup == nil { + return nil, SCALING, fmt.Errorf("Error Getting InstanceGroup: %s\n%s", err, response) + } + log.Println("Status : ", *instanceGroup.Status) + + if *instanceGroup.Status == "" { + return instanceGroup, SCALING, nil + } + return instanceGroup, *instanceGroup.Status, nil + }, + Timeout: timeout, + Delay: 20 * time.Second, + MinTimeout: 5 * time.Second, + PollInterval: 10 * time.Second, + } + + return healthStateConf.WaitForState() + +} + +func waitForInstanceGroupDelete(d *schema.ResourceData, meta interface{}) (interface{}, error) { + healthStateConf := &resource.StateChangeConf{ + Pending: []string{HEALTHY}, + Target: []string{DELETING}, + Refresh: func() (interface{}, string, error) { + resp, err := resourceIBMISInstanceGroupExists(d, meta) + if resp { + return resp, HEALTHY, nil + } + return resp, DELETING, err + }, + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 20 * time.Second, + MinTimeout: 5 * time.Second, + PollInterval: 10 * time.Second, + } + + return healthStateConf.WaitForState() + +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_instance_group_manager.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_instance_group_manager.go new file mode 100644 index 00000000000..c9f073f3099 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_instance_group_manager.go @@ -0,0 +1,455 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "time" + + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func resourceIBMISInstanceGroupManager() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMISInstanceGroupManagerCreate, + Read: resourceIBMISInstanceGroupManagerRead, + Update: resourceIBMISInstanceGroupManagerUpdate, + Delete: resourceIBMISInstanceGroupManagerDelete, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + + "name": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: InvokeValidator("ibm_is_instance_group_manager", "name"), + Description: "instance group manager name", + }, + + "enable_manager": { + Type: schema.TypeBool, + Optional: true, + Default: true, + Description: "enable instance group manager", + }, + + "instance_group": { + Type: schema.TypeString, + Required: true, + Description: "instance group ID", + }, + + "manager_type": { + Type: schema.TypeString, + Optional: true, + Default: "autoscale", + ForceNew: true, + ValidateFunc: InvokeValidator("ibm_is_instance_group_manager", "manager_type"), + Description: "The type of instance group manager.", + }, + + "aggregation_window": { + Type: schema.TypeInt, + Optional: true, + Default: 90, + ValidateFunc: InvokeValidator("ibm_is_instance_group_manager", "aggregation_window"), + Description: "The time window in seconds to aggregate metrics prior to evaluation", + }, + + "cooldown": { + Type: schema.TypeInt, + Optional: true, + Default: 300, + ValidateFunc: InvokeValidator("ibm_is_instance_group_manager", "cooldown"), + Description: "The duration of time in seconds to pause further scale actions after scaling has taken place", + }, + + "max_membership_count": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: InvokeValidator("ibm_is_instance_group_manager", "max_membership_count"), + Description: "The maximum number of members in a managed instance group", + }, + + "min_membership_count": { + Type: schema.TypeInt, + Optional: true, + Default: 1, + ValidateFunc: InvokeValidator("ibm_is_instance_group_manager", "min_membership_count"), + Description: "The minimum number of members in a managed instance group", + }, + + "manager_id": { + Type: schema.TypeString, + Computed: true, + Description: "instance group manager ID", + }, + + "policies": { + Type: schema.TypeList, + Elem: &schema.Schema{Type: schema.TypeString}, + Computed: true, + Description: "list of Policies associated with instancegroup manager", + }, + + "actions": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "instance_group_manager_action": { + Type: schema.TypeString, + Computed: true, + }, + "instance_group_manager_action_name": { + Type: schema.TypeString, + Computed: true, + }, + "resource_type": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func resourceIBMISInstanceGroupManagerValidator() *ResourceValidator { + + validateSchema := make([]ValidateSchema, 1) + managerType := "autoscale, scheduled" + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: "name", + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Required: true, + Regexp: `^([a-z]|[a-z][-a-z0-9]*[a-z0-9]|[0-9][-a-z0-9]*([a-z]|[-a-z][-a-z0-9]*[a-z0-9]))$`, + MinValueLength: 1, + MaxValueLength: 63}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: "manager_type", + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: managerType}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: "aggregation_window", + ValidateFunctionIdentifier: IntBetween, + Type: TypeInt, + MinValue: "90", + MaxValue: "600"}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: "cooldown", + ValidateFunctionIdentifier: IntBetween, + Type: TypeInt, + MinValue: "120", + MaxValue: "3600"}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: "max_membership_count", + ValidateFunctionIdentifier: IntBetween, + Type: TypeInt, + MinValue: "1", + MaxValue: "1000"}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: "min_membership_count", + ValidateFunctionIdentifier: IntBetween, + Type: TypeInt, + MinValue: "1", + MaxValue: "1000"}) + + ibmISInstanceGroupManagerResourceValidator := ResourceValidator{ResourceName: "ibm_is_instance_group_manager", Schema: validateSchema} + return &ibmISInstanceGroupManagerResourceValidator +} + +func resourceIBMISInstanceGroupManagerCreate(d *schema.ResourceData, meta interface{}) error { + + instanceGroupID := d.Get("instance_group").(string) + managerType := d.Get("manager_type").(string) + + sess, err := vpcClient(meta) + if err != nil { + return err + } + + if managerType == "scheduled" { + instanceGroupManagerPrototype := vpcv1.InstanceGroupManagerPrototypeInstanceGroupManagerScheduledPrototype{} + instanceGroupManagerPrototype.ManagerType = &managerType + + if v, ok := d.GetOk("name"); ok { + name := v.(string) + instanceGroupManagerPrototype.Name = &name + } + + if v, ok := d.GetOk("enable_manager"); ok { + enableManager := v.(bool) + instanceGroupManagerPrototype.ManagementEnabled = &enableManager + } + + createInstanceGroupManagerOptions := vpcv1.CreateInstanceGroupManagerOptions{ + InstanceGroupID: &instanceGroupID, + InstanceGroupManagerPrototype: &instanceGroupManagerPrototype, + } + instanceGroupManagerIntf, response, err := sess.CreateInstanceGroupManager(&createInstanceGroupManagerOptions) + if err != nil || instanceGroupManagerIntf == nil { + return fmt.Errorf("Error creating InstanceGroup manager: %s\n%s", err, response) + } + instanceGroupManager := instanceGroupManagerIntf.(*vpcv1.InstanceGroupManager) + d.SetId(fmt.Sprintf("%s/%s", instanceGroupID, *instanceGroupManager.ID)) + + } else { + + instanceGroupManagerPrototype := vpcv1.InstanceGroupManagerPrototypeInstanceGroupManagerAutoScalePrototype{} + instanceGroupManagerPrototype.ManagerType = &managerType + + if v, ok := d.GetOk("name"); ok { + name := v.(string) + instanceGroupManagerPrototype.Name = &name + } + + if v, ok := d.GetOk("enable_manager"); ok { + enableManager := v.(bool) + instanceGroupManagerPrototype.ManagementEnabled = &enableManager + } + + if v, ok := d.GetOk("aggregation_window"); ok { + aggregationWindow := int64(v.(int)) + instanceGroupManagerPrototype.AggregationWindow = &aggregationWindow + } + + if v, ok := d.GetOk("cooldown"); ok { + cooldown := int64(v.(int)) + instanceGroupManagerPrototype.Cooldown = &cooldown + } + + if v, ok := d.GetOk("min_membership_count"); ok { + minMembershipCount := int64(v.(int)) + instanceGroupManagerPrototype.MinMembershipCount = &minMembershipCount + } + + if v, ok := d.GetOk("max_membership_count"); ok { + maxMembershipCount := int64(v.(int)) + instanceGroupManagerPrototype.MaxMembershipCount = &maxMembershipCount + } + + createInstanceGroupManagerOptions := vpcv1.CreateInstanceGroupManagerOptions{ + InstanceGroupID: &instanceGroupID, + InstanceGroupManagerPrototype: &instanceGroupManagerPrototype, + } + + _, healthError := waitForHealthyInstanceGroup(instanceGroupID, meta, d.Timeout(schema.TimeoutCreate)) + if healthError != nil { + return healthError + } + + instanceGroupManagerIntf, response, err := sess.CreateInstanceGroupManager(&createInstanceGroupManagerOptions) + if err != nil || instanceGroupManagerIntf == nil { + return fmt.Errorf("Error creating InstanceGroup manager: %s\n%s", err, response) + } + instanceGroupManager := instanceGroupManagerIntf.(*vpcv1.InstanceGroupManager) + + d.SetId(fmt.Sprintf("%s/%s", instanceGroupID, *instanceGroupManager.ID)) + + } + + return resourceIBMISInstanceGroupManagerRead(d, meta) + +} + +func resourceIBMISInstanceGroupManagerUpdate(d *schema.ResourceData, meta interface{}) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + + managerType := d.Get("manager_type").(string) + + var changed bool + updateInstanceGroupManagerOptions := vpcv1.UpdateInstanceGroupManagerOptions{} + instanceGroupManagerPatchModel := &vpcv1.InstanceGroupManagerPatch{} + + if d.HasChange("name") { + name := d.Get("name").(string) + instanceGroupManagerPatchModel.Name = &name + changed = true + } + if managerType == "autoscale" { + if d.HasChange("aggregation_window") { + aggregationWindow := int64(d.Get("aggregation_window").(int)) + instanceGroupManagerPatchModel.AggregationWindow = &aggregationWindow + changed = true + } + + if d.HasChange("cooldown") { + cooldown := int64(d.Get("cooldown").(int)) + instanceGroupManagerPatchModel.Cooldown = &cooldown + changed = true + } + + if d.HasChange("max_membership_count") { + maxMembershipCount := int64(d.Get("max_membership_count").(int)) + instanceGroupManagerPatchModel.MaxMembershipCount = &maxMembershipCount + changed = true + } + + if d.HasChange("min_membership_count") { + minMembershipCount := int64(d.Get("min_membership_count").(int)) + instanceGroupManagerPatchModel.MinMembershipCount = &minMembershipCount + changed = true + } + } + + if d.HasChange("enable_manager") { + enableManager := d.Get("enable_manager").(bool) + instanceGroupManagerPatchModel.ManagementEnabled = &enableManager + changed = true + } + + if changed { + parts, err := idParts(d.Id()) + if err != nil { + return err + } + instanceGroupID := parts[0] + instanceGroupManagerID := parts[1] + updateInstanceGroupManagerOptions.ID = &instanceGroupManagerID + updateInstanceGroupManagerOptions.InstanceGroupID = &instanceGroupID + instanceGroupManagerPatch, err := instanceGroupManagerPatchModel.AsPatch() + if err != nil { + return fmt.Errorf("Error calling asPatch for InstanceGroupManagerPatch: %s", err) + } + updateInstanceGroupManagerOptions.InstanceGroupManagerPatch = instanceGroupManagerPatch + + _, healthError := waitForHealthyInstanceGroup(instanceGroupID, meta, d.Timeout(schema.TimeoutUpdate)) + if healthError != nil { + return healthError + } + + _, response, err := sess.UpdateInstanceGroupManager(&updateInstanceGroupManagerOptions) + if err != nil { + return fmt.Errorf("Error updating InstanceGroup manager: %s\n%s", err, response) + } + } + return resourceIBMISInstanceGroupManagerRead(d, meta) +} + +func resourceIBMISInstanceGroupManagerRead(d *schema.ResourceData, meta interface{}) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + + parts, err := idParts(d.Id()) + if err != nil { + return err + } + instanceGroupID := parts[0] + instanceGroupManagerID := parts[1] + + getInstanceGroupManagerOptions := vpcv1.GetInstanceGroupManagerOptions{ + ID: &instanceGroupManagerID, + InstanceGroupID: &instanceGroupID, + } + instanceGroupManagerIntf, response, err := sess.GetInstanceGroupManager(&getInstanceGroupManagerOptions) + if err != nil || instanceGroupManagerIntf == nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Getting InstanceGroup Manager: %s\n%s", err, response) + } + instanceGroupManager := instanceGroupManagerIntf.(*vpcv1.InstanceGroupManager) + + managerType := *instanceGroupManager.ManagerType + + if managerType == "scheduled" { + d.Set("name", *instanceGroupManager.Name) + d.Set("enable_manager", *instanceGroupManager.ManagementEnabled) + d.Set("manager_id", instanceGroupManagerID) + d.Set("instance_group", instanceGroupID) + d.Set("manager_type", *instanceGroupManager.ManagerType) + + } else { + + d.Set("name", *instanceGroupManager.Name) + d.Set("aggregation_window", *instanceGroupManager.AggregationWindow) + d.Set("cooldown", *instanceGroupManager.Cooldown) + d.Set("max_membership_count", *instanceGroupManager.MaxMembershipCount) + d.Set("min_membership_count", *instanceGroupManager.MinMembershipCount) + d.Set("enable_manager", *instanceGroupManager.ManagementEnabled) + d.Set("manager_id", instanceGroupManagerID) + d.Set("instance_group", instanceGroupID) + d.Set("manager_type", *instanceGroupManager.ManagerType) + + } + + actions := make([]map[string]interface{}, 0) + if instanceGroupManager.Actions != nil { + for _, action := range instanceGroupManager.Actions { + actn := map[string]interface{}{ + "instance_group_manager_action": *action.ID, + "instance_group_manager_action_name": *action.Name, + "resource_type": *action.ResourceType, + } + actions = append(actions, actn) + } + } + d.Set("actions", actions) + + policies := make([]string, 0) + + for i := 0; i < len(instanceGroupManager.Policies); i++ { + policies = append(policies, string(*(instanceGroupManager.Policies[i].ID))) + } + d.Set("policies", policies) + return nil +} + +func resourceIBMISInstanceGroupManagerDelete(d *schema.ResourceData, meta interface{}) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + + parts, err := idParts(d.Id()) + if err != nil { + return err + } + instanceGroupID := parts[0] + instanceGroupManagerID := parts[1] + + deleteInstanceGroupManagerOptions := vpcv1.DeleteInstanceGroupManagerOptions{ + ID: &instanceGroupManagerID, + InstanceGroupID: &instanceGroupID, + } + + _, healthError := waitForHealthyInstanceGroup(instanceGroupID, meta, d.Timeout(schema.TimeoutDelete)) + if healthError != nil { + return healthError + } + + response, err := sess.DeleteInstanceGroupManager(&deleteInstanceGroupManagerOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Deleting the InstanceGroup Manager: %s\n%s", err, response) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_instance_group_manager_action.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_instance_group_manager_action.go new file mode 100644 index 00000000000..34d7a8821a5 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_instance_group_manager_action.go @@ -0,0 +1,536 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "time" + + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/go-openapi/strfmt" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func resourceIBMISInstanceGroupManagerAction() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMISInstanceGroupManagerActionCreate, + Read: resourceIBMISInstanceGroupManagerActionRead, + Update: resourceIBMISInstanceGroupManagerActionUpdate, + Delete: resourceIBMISInstanceGroupManagerActionDelete, + Exists: resourceIBMISInstanceGroupManagerActionExists, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(5 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + + "name": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: InvokeValidator("ibm_is_instance_group_manager_action", "name"), + Description: "instance group manager action name", + }, + + "action_id": { + Type: schema.TypeString, + Computed: true, + Description: "Instance group manager action ID", + }, + + "instance_group": { + Type: schema.TypeString, + Required: true, + Description: "instance group ID", + }, + + "instance_group_manager": { + Type: schema.TypeString, + Required: true, + Description: "Instance group manager ID of type scheduled", + }, + + "run_at": { + Type: schema.TypeString, + Optional: true, + Description: "The date and time the scheduled action will run.", + ConflictsWith: []string{"cron_spec"}, + }, + + "cron_spec": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: InvokeValidator("ibm_is_instance_group_manager_action", "cron_spec"), + Description: "The cron specification for a recurring scheduled action. Actions can be applied a maximum of one time within a 5 min period.", + ConflictsWith: []string{"run_at"}, + }, + + "membership_count": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: InvokeValidator("ibm_is_instance_group_manager_action", "membership_count"), + Description: "The number of members the instance group should have at the scheduled time.", + ConflictsWith: []string{"target_manager", "max_membership_count", "min_membership_count"}, + }, + + "max_membership_count": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: InvokeValidator("ibm_is_instance_group_manager_action", "max_membership_count"), + Description: "The maximum number of members in a managed instance group", + ConflictsWith: []string{"membership_count"}, + RequiredWith: []string{"target_manager", "min_membership_count"}, + }, + + "min_membership_count": { + Type: schema.TypeInt, + Optional: true, + Default: 1, + ValidateFunc: InvokeValidator("ibm_is_instance_group_manager_action", "min_membership_count"), + Description: "The minimum number of members in a managed instance group", + ConflictsWith: []string{"membership_count"}, + }, + + "target_manager": { + Type: schema.TypeString, + Optional: true, + Description: "The unique identifier for this instance group manager of type autoscale.", + ConflictsWith: []string{"membership_count"}, + RequiredWith: []string{"min_membership_count", "max_membership_count"}, + }, + + "target_manager_name": { + Type: schema.TypeString, + Computed: true, + Description: "Instance group manager name of type autoscale.", + }, + + "resource_type": { + Type: schema.TypeString, + Computed: true, + Description: "The resource type.", + }, + + "status": { + Type: schema.TypeString, + Computed: true, + Description: "The status of the instance group action- `active`: Action is ready to be run- `completed`: Action was completed successfully- `failed`: Action could not be completed successfully- `incompatible`: Action parameters are not compatible with the group or manager- `omitted`: Action was not applied because this action's manager was disabled.", + }, + "updated_at": { + Type: schema.TypeString, + Computed: true, + Description: "The date and time that the instance group manager action was modified.", + }, + "action_type": { + Type: schema.TypeString, + Computed: true, + Description: "The type of action for the instance group.", + }, + + "last_applied_at": { + Type: schema.TypeString, + Computed: true, + Description: "The date and time the scheduled action was last applied. If empty the action has never been applied.", + }, + "next_run_at": { + Type: schema.TypeString, + Computed: true, + Description: "The date and time the scheduled action will next run. If empty the system is currently calculating the next run time.", + }, + "auto_delete": { + Type: schema.TypeBool, + Computed: true, + }, + "auto_delete_timeout": { + Type: schema.TypeInt, + Computed: true, + }, + "created_at": { + Type: schema.TypeString, + Computed: true, + Description: "The date and time that the instance group manager action was modified.", + }, + }, + } +} + +func resourceIBMISInstanceGroupManagerActionValidator() *ResourceValidator { + + validateSchema := make([]ValidateSchema, 1) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: "name", + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Required: true, + Regexp: `^([a-z]|[a-z][-a-z0-9]*[a-z0-9]|[0-9][-a-z0-9]*([a-z]|[-a-z][-a-z0-9]*[a-z0-9]))$`, + MinValueLength: 1, + MaxValueLength: 63}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: "max_membership_count", + ValidateFunctionIdentifier: IntBetween, + Type: TypeInt, + MinValue: "1", + MaxValue: "1000"}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: "min_membership_count", + ValidateFunctionIdentifier: IntBetween, + Type: TypeInt, + MinValue: "1", + MaxValue: "1000"}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: "cron_spec", + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Regexp: `^((((\d+,)+\d+|([\d\*]+(\/|-)\d+)|\d+|\*) ?){5,7})$`, + MinValueLength: 9, + MaxValueLength: 63}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: "membership_count", + ValidateFunctionIdentifier: IntBetween, + Type: TypeInt, + MinValue: "0", + MaxValue: "100"}) + + ibmISInstanceGroupManagerResourceValidator := ResourceValidator{ResourceName: "ibm_is_instance_group_manager_action", Schema: validateSchema} + return &ibmISInstanceGroupManagerResourceValidator +} + +func resourceIBMISInstanceGroupManagerActionCreate(d *schema.ResourceData, meta interface{}) error { // CreateInstanceGroupManagerAction + instanceGroupID := d.Get("instance_group").(string) + instancegroupmanagerscheduledID := d.Get("instance_group_manager").(string) + + sess, err := vpcClient(meta) + if err != nil { + return err + } + + instanceGroupManagerActionOptions := vpcv1.CreateInstanceGroupManagerActionOptions{} + instanceGroupManagerActionOptions.InstanceGroupID = &instanceGroupID + instanceGroupManagerActionOptions.InstanceGroupManagerID = &instancegroupmanagerscheduledID + + instanceGroupManagerActionPrototype := vpcv1.InstanceGroupManagerActionPrototype{} + + if v, ok := d.GetOk("name"); ok { + name := v.(string) + instanceGroupManagerActionPrototype.Name = &name + } + + if v, ok := d.GetOk("run_at"); ok { + runat := v.(string) + datetime, err := strfmt.ParseDateTime(runat) + if err != nil { + return fmt.Errorf("error in converting run_at to datetime format %s", err) + } + instanceGroupManagerActionPrototype.RunAt = &datetime + } + + if v, ok := d.GetOk("cron_spec"); ok { + cron_spec := v.(string) + instanceGroupManagerActionPrototype.CronSpec = &cron_spec + } + + if v, ok := d.GetOk("membership_count"); ok { + membershipCount := int64(v.(int)) + instanceGroupManagerScheduledActionGroupPrototype := vpcv1.InstanceGroupManagerScheduledActionGroupPrototype{} + instanceGroupManagerScheduledActionGroupPrototype.MembershipCount = &membershipCount + instanceGroupManagerActionPrototype.Group = &instanceGroupManagerScheduledActionGroupPrototype + } + + instanceGroupManagerScheduledActionByManagerManager := vpcv1.InstanceGroupManagerScheduledActionManagerPrototype{} + if v, ok := d.GetOk("min_membership_count"); ok { + minmembershipCount := int64(v.(int)) + instanceGroupManagerScheduledActionByManagerManager.MinMembershipCount = &minmembershipCount + } + + if v, ok := d.GetOk("max_membership_count"); ok { + maxmembershipCount := int64(v.(int)) + instanceGroupManagerScheduledActionByManagerManager.MaxMembershipCount = &maxmembershipCount + } + + if v, ok := d.GetOk("target_manager"); ok { + instanceGroupManagerAutoScale := v.(string) + instanceGroupManagerScheduledActionByManagerManager.ID = &instanceGroupManagerAutoScale + instanceGroupManagerActionPrototype.Manager = &instanceGroupManagerScheduledActionByManagerManager + } + + instanceGroupManagerActionOptions.InstanceGroupManagerActionPrototype = &instanceGroupManagerActionPrototype + + _, healthError := waitForHealthyInstanceGroup(instanceGroupID, meta, d.Timeout(schema.TimeoutCreate)) + if healthError != nil { + return healthError + } + + instanceGroupManagerActionIntf, response, err := sess.CreateInstanceGroupManagerAction(&instanceGroupManagerActionOptions) + if err != nil || instanceGroupManagerActionIntf == nil { + return fmt.Errorf("error creating InstanceGroup manager Action: %s\n%s", err, response) + } + instanceGroupManagerAction := instanceGroupManagerActionIntf.(*vpcv1.InstanceGroupManagerAction) + d.SetId(fmt.Sprintf("%s/%s/%s", instanceGroupID, instancegroupmanagerscheduledID, *instanceGroupManagerAction.ID)) + + return resourceIBMISInstanceGroupManagerActionRead(d, meta) + +} + +func resourceIBMISInstanceGroupManagerActionUpdate(d *schema.ResourceData, meta interface{}) error { + + sess, err := vpcClient(meta) + if err != nil { + return err + } + + var changed bool + instanceGroupManagerActionPatchModel := &vpcv1.InstanceGroupManagerActionPatch{} + + if d.HasChange("name") { + name := d.Get("name").(string) + instanceGroupManagerActionPatchModel.Name = &name + changed = true + } + + if d.HasChange("cron_spec") { + cronspec := d.Get("cron_spec").(string) + instanceGroupManagerActionPatchModel.CronSpec = &cronspec + changed = true + } + + if d.HasChange("run_at") { + runat := d.Get("run_at").(string) + datetime, err := strfmt.ParseDateTime(runat) + if err != nil { + return fmt.Errorf("error in converting run_at to datetime format %s", err) + } + instanceGroupManagerActionPatchModel.RunAt = &datetime + changed = true + } + + if d.HasChange("membership_count") { + membershipCount := int64(d.Get("membership_count").(int)) + instanceGroupManagerScheduledActionGroupPatch := vpcv1.InstanceGroupManagerActionGroupPatch{} + instanceGroupManagerScheduledActionGroupPatch.MembershipCount = &membershipCount + instanceGroupManagerActionPatchModel.Group = &instanceGroupManagerScheduledActionGroupPatch + changed = true + } + + instanceGroupManagerScheduledActionByManagerPatchManager := vpcv1.InstanceGroupManagerActionManagerPatch{} + + if d.HasChange("min_membership_count") { + minmembershipCount := int64(d.Get("min_membership_count").(int)) + instanceGroupManagerScheduledActionByManagerPatchManager.MinMembershipCount = &minmembershipCount + changed = true + } + + if d.HasChange("max_membership_count") { + minmembershipCount := int64(d.Get("max_membership_count").(int)) + instanceGroupManagerScheduledActionByManagerPatchManager.MinMembershipCount = &minmembershipCount + changed = true + } + instanceGroupManagerActionPatchModel.Manager = &instanceGroupManagerScheduledActionByManagerPatchManager + + if changed { + + parts, err := idParts(d.Id()) + if err != nil { + return err + } + + instanceGroupID := parts[0] + instancegroupmanagerscheduledID := parts[1] + instanceGroupManagerActionID := parts[2] + + updateInstanceGroupManagerActionOptions := &vpcv1.UpdateInstanceGroupManagerActionOptions{} + updateInstanceGroupManagerActionOptions.InstanceGroupID = &instanceGroupID + updateInstanceGroupManagerActionOptions.InstanceGroupManagerID = &instancegroupmanagerscheduledID + updateInstanceGroupManagerActionOptions.ID = &instanceGroupManagerActionID + + instanceGroupManagerActionPatch, err := instanceGroupManagerActionPatchModel.AsPatch() + if err != nil { + return fmt.Errorf("error calling asPatch for instanceGroupManagerActionPatch: %s", err) + } + updateInstanceGroupManagerActionOptions.InstanceGroupManagerActionPatch = instanceGroupManagerActionPatch + + _, healthError := waitForHealthyInstanceGroup(instanceGroupID, meta, d.Timeout(schema.TimeoutUpdate)) + if healthError != nil { + return healthError + } + _, response, err := sess.UpdateInstanceGroupManagerAction(updateInstanceGroupManagerActionOptions) + if err != nil { + return fmt.Errorf("error updating InstanceGroup manager action: %s\n%s", err, response) + } + } + return resourceIBMISInstanceGroupManagerRead(d, meta) +} + +func resourceIBMISInstanceGroupManagerActionRead(d *schema.ResourceData, meta interface{}) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + + parts, err := idParts(d.Id()) + if err != nil { + return err + } + instanceGroupID := parts[0] + instancegroupmanagerscheduledID := parts[1] + instanceGroupManagerActionID := parts[2] + + getInstanceGroupManagerActionOptions := &vpcv1.GetInstanceGroupManagerActionOptions{ + InstanceGroupID: &instanceGroupID, + InstanceGroupManagerID: &instancegroupmanagerscheduledID, + ID: &instanceGroupManagerActionID, + } + + instanceGroupManagerActionIntf, response, err := sess.GetInstanceGroupManagerAction(getInstanceGroupManagerActionOptions) + if err != nil || instanceGroupManagerActionIntf == nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("error Getting InstanceGroup Manager Action: %s\n%s", err, response) + } + instanceGroupManagerAction := instanceGroupManagerActionIntf.(*vpcv1.InstanceGroupManagerAction) + if err = d.Set("auto_delete", *instanceGroupManagerAction.AutoDelete); err != nil { + return fmt.Errorf("error setting auto_delete: %s", err) + } + + if err = d.Set("auto_delete_timeout", intValue(instanceGroupManagerAction.AutoDeleteTimeout)); err != nil { + return fmt.Errorf("error setting auto_delete_timeout: %s", err) + } + if err = d.Set("created_at", instanceGroupManagerAction.CreatedAt.String()); err != nil { + return fmt.Errorf("error setting created_at: %s", err) + } + + if err = d.Set("action_id", *instanceGroupManagerAction.ID); err != nil { + return fmt.Errorf("error setting instance_group_manager_action : %s", err) + } + + if err = d.Set("name", *instanceGroupManagerAction.Name); err != nil { + return fmt.Errorf("error setting name: %s", err) + } + if err = d.Set("resource_type", *instanceGroupManagerAction.ResourceType); err != nil { + return fmt.Errorf("error setting resource_type: %s", err) + } + if err = d.Set("status", *instanceGroupManagerAction.Status); err != nil { + return fmt.Errorf("error setting status: %s", err) + } + if err = d.Set("updated_at", instanceGroupManagerAction.UpdatedAt.String()); err != nil { + return fmt.Errorf("error setting updated_at: %s", err) + } + if err = d.Set("action_type", *instanceGroupManagerAction.ActionType); err != nil { + return fmt.Errorf("error setting action_type: %s", err) + } + + if instanceGroupManagerAction.CronSpec != nil { + if err = d.Set("cron_spec", *instanceGroupManagerAction.CronSpec); err != nil { + return fmt.Errorf("error setting cron_spec: %s", err) + } + } + + if instanceGroupManagerAction.LastAppliedAt != nil { + if err = d.Set("last_applied_at", instanceGroupManagerAction.LastAppliedAt.String()); err != nil { + return fmt.Errorf("error setting last_applied_at: %s", err) + } + } + + if instanceGroupManagerAction.NextRunAt != nil { + if err = d.Set("next_run_at", instanceGroupManagerAction.NextRunAt.String()); err != nil { + return fmt.Errorf("error setting next_run_at: %s", err) + } + } + + instanceGroupManagerScheduledActionGroupGroup := instanceGroupManagerAction.Group + if instanceGroupManagerScheduledActionGroupGroup != nil && instanceGroupManagerScheduledActionGroupGroup.MembershipCount != nil { + d.Set("membership_count", intValue(instanceGroupManagerScheduledActionGroupGroup.MembershipCount)) + } + instanceGroupManagerScheduledActionManagerManagerInt := instanceGroupManagerAction.Manager + if instanceGroupManagerScheduledActionManagerManagerInt != nil { + instanceGroupManagerScheduledActionManagerManager := instanceGroupManagerScheduledActionManagerManagerInt.(*vpcv1.InstanceGroupManagerScheduledActionManager) + if instanceGroupManagerScheduledActionManagerManager != nil && instanceGroupManagerScheduledActionManagerManager.ID != nil { + + if instanceGroupManagerScheduledActionManagerManager.MaxMembershipCount != nil { + d.Set("max_membership_count", intValue(instanceGroupManagerScheduledActionManagerManager.MaxMembershipCount)) + } + d.Set("min_membership_count", intValue(instanceGroupManagerScheduledActionManagerManager.MinMembershipCount)) + d.Set("target_manager_name", *instanceGroupManagerScheduledActionManagerManager.Name) + d.Set("target_manager", *instanceGroupManagerScheduledActionManagerManager.ID) + } + } + + return nil +} + +func resourceIBMISInstanceGroupManagerActionDelete(d *schema.ResourceData, meta interface{}) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + + parts, err := idParts(d.Id()) + if err != nil { + return err + } + instanceGroupID := parts[0] + instancegroupmanagerscheduledID := parts[1] + instanceGroupManagerActionID := parts[2] + + deleteInstanceGroupManagerActionOptions := &vpcv1.DeleteInstanceGroupManagerActionOptions{} + deleteInstanceGroupManagerActionOptions.InstanceGroupID = &instanceGroupID + deleteInstanceGroupManagerActionOptions.InstanceGroupManagerID = &instancegroupmanagerscheduledID + deleteInstanceGroupManagerActionOptions.ID = &instanceGroupManagerActionID + + _, healthError := waitForHealthyInstanceGroup(instanceGroupID, meta, d.Timeout(schema.TimeoutDelete)) + if healthError != nil { + return healthError + } + + response, err := sess.DeleteInstanceGroupManagerAction(deleteInstanceGroupManagerActionOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("error Deleting the InstanceGroup Manager Action: %s\n%s", err, response) + } + return nil +} + +func resourceIBMISInstanceGroupManagerActionExists(d *schema.ResourceData, meta interface{}) (bool, error) { + + sess, err := vpcClient(meta) + if err != nil { + return false, err + } + + parts, err := idParts(d.Id()) + if err != nil { + return false, err + } + instanceGroupID := parts[0] + instancegroupmanagerscheduledID := parts[1] + instanceGroupManagerActionID := parts[2] + + getInstanceGroupManagerActionOptions := &vpcv1.GetInstanceGroupManagerActionOptions{ + InstanceGroupID: &instanceGroupID, + InstanceGroupManagerID: &instancegroupmanagerscheduledID, + ID: &instanceGroupManagerActionID, + } + + _, response, err := sess.GetInstanceGroupManagerAction(getInstanceGroupManagerActionOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("error Getting InstanceGroup Manager Action: %s\n%s", err, response) + } + + return true, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_instance_group_manager_policy.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_instance_group_manager_policy.go new file mode 100644 index 00000000000..f37d3844a0a --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_instance_group_manager_policy.go @@ -0,0 +1,320 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func resourceIBMISInstanceGroupManagerPolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMISInstanceGroupManagerPolicyCreate, + Read: resourceIBMISInstanceGroupManagerPolicyRead, + Update: resourceIBMISInstanceGroupManagerPolicyUpdate, + Delete: resourceIBMISInstanceGroupManagerPolicyDelete, + Exists: resourceIBMISInstanceGroupManagerPolicyExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + + "name": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: InvokeValidator("ibm_is_instance_group_manager_policy", "name"), + Description: "instance group manager policy name", + }, + + "instance_group": { + Type: schema.TypeString, + Required: true, + Description: "instance group ID", + }, + + "instance_group_manager": { + Type: schema.TypeString, + Required: true, + Description: "Instance group manager ID", + }, + + "metric_type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: InvokeValidator("ibm_is_instance_group_manager_policy", "metric_type"), + Description: "The type of metric to be evaluated", + }, + + "metric_value": { + Type: schema.TypeInt, + Required: true, + Description: "The metric value to be evaluated", + }, + + "policy_type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: InvokeValidator("ibm_is_instance_group_manager_policy", "policy_type"), + Description: "The type of Policy for the Instance Group", + }, + + "policy_id": { + Type: schema.TypeString, + Computed: true, + Description: "The Policy ID", + }, + }, + } +} + +func resourceIBMISInstanceGroupManagerPolicyValidator() *ResourceValidator { + + validateSchema := make([]ValidateSchema, 1) + metricTypes := "cpu,memory,network_in,network_out" + policyType := "target" + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: "name", + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Required: true, + Regexp: `^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$`, + MinValueLength: 1, + MaxValueLength: 63}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: "metric_type", + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: metricTypes}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: "policy_type", + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: policyType}) + + ibmISInstanceGroupManagerPolicyResourceValidator := ResourceValidator{ResourceName: "ibm_is_instance_group_manager_policy", Schema: validateSchema} + return &ibmISInstanceGroupManagerPolicyResourceValidator +} + +func resourceIBMISInstanceGroupManagerPolicyCreate(d *schema.ResourceData, meta interface{}) error { + instanceGroupID := d.Get("instance_group").(string) + instanceGroupManagerID := d.Get("instance_group_manager").(string) + + sess, err := vpcClient(meta) + if err != nil { + return err + } + + instanceGroupManagerPolicyPrototype := vpcv1.InstanceGroupManagerPolicyPrototype{} + + name := d.Get("name").(string) + metricType := d.Get("metric_type").(string) + metricValue := int64(d.Get("metric_value").(int)) + policyType := d.Get("policy_type").(string) + + instanceGroupManagerPolicyPrototype.Name = &name + instanceGroupManagerPolicyPrototype.MetricType = &metricType + instanceGroupManagerPolicyPrototype.MetricValue = &metricValue + instanceGroupManagerPolicyPrototype.PolicyType = &policyType + + createInstanceGroupManagerPolicyOptions := vpcv1.CreateInstanceGroupManagerPolicyOptions{ + InstanceGroupID: &instanceGroupID, + InstanceGroupManagerID: &instanceGroupManagerID, + InstanceGroupManagerPolicyPrototype: &instanceGroupManagerPolicyPrototype, + } + + isInsGrpKey := "Instance_Group_Key_" + instanceGroupID + ibmMutexKV.Lock(isInsGrpKey) + defer ibmMutexKV.Unlock(isInsGrpKey) + + _, healthError := waitForHealthyInstanceGroup(instanceGroupID, meta, d.Timeout(schema.TimeoutCreate)) + if healthError != nil { + return healthError + } + + data, response, err := sess.CreateInstanceGroupManagerPolicy(&createInstanceGroupManagerPolicyOptions) + if err != nil || data == nil { + return fmt.Errorf("Error Creating InstanceGroup Manager Policy: %s\n%s", err, response) + } + instanceGroupManagerPolicy := data.(*vpcv1.InstanceGroupManagerPolicy) + + d.SetId(fmt.Sprintf("%s/%s/%s", instanceGroupID, instanceGroupManagerID, *instanceGroupManagerPolicy.ID)) + + return resourceIBMISInstanceGroupManagerPolicyRead(d, meta) + +} + +func resourceIBMISInstanceGroupManagerPolicyUpdate(d *schema.ResourceData, meta interface{}) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + + var changed bool + updateInstanceGroupManagerPolicyOptions := vpcv1.UpdateInstanceGroupManagerPolicyOptions{} + instanceGroupManagerPolicyPatchModel := &vpcv1.InstanceGroupManagerPolicyPatch{} + if d.HasChange("name") { + name := d.Get("name").(string) + instanceGroupManagerPolicyPatchModel.Name = &name + changed = true + } + + if d.HasChange("metric_type") { + metricType := d.Get("metric_type").(string) + instanceGroupManagerPolicyPatchModel.MetricType = &metricType + changed = true + } + + if d.HasChange("metric_value") { + metricValue := int64(d.Get("metric_value").(int)) + instanceGroupManagerPolicyPatchModel.MetricValue = &metricValue + changed = true + } + + if changed { + parts, err := idParts(d.Id()) + if err != nil { + return err + } + instanceGroupID := parts[0] + instanceGroupManagerID := parts[1] + instanceGroupManagerPolicyID := parts[2] + + updateInstanceGroupManagerPolicyOptions.ID = &instanceGroupManagerPolicyID + updateInstanceGroupManagerPolicyOptions.InstanceGroupID = &instanceGroupID + updateInstanceGroupManagerPolicyOptions.InstanceGroupManagerID = &instanceGroupManagerID + + isInsGrpKey := "Instance_Group_Key_" + instanceGroupID + ibmMutexKV.Lock(isInsGrpKey) + defer ibmMutexKV.Unlock(isInsGrpKey) + + _, healthError := waitForHealthyInstanceGroup(instanceGroupID, meta, d.Timeout(schema.TimeoutUpdate)) + if healthError != nil { + return healthError + } + + _, response, err := sess.UpdateInstanceGroupManagerPolicy(&updateInstanceGroupManagerPolicyOptions) + if err != nil { + return fmt.Errorf("Error Updating InstanceGroup Manager Policy: %s\n%s", err, response) + } + } + return resourceIBMISInstanceGroupManagerPolicyRead(d, meta) +} + +func resourceIBMISInstanceGroupManagerPolicyRead(d *schema.ResourceData, meta interface{}) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + + parts, err := idParts(d.Id()) + if err != nil { + return err + } + instanceGroupID := parts[0] + instanceGroupManagerID := parts[1] + instanceGroupManagerPolicyID := parts[2] + + getInstanceGroupManagerPolicyOptions := vpcv1.GetInstanceGroupManagerPolicyOptions{ + ID: &instanceGroupManagerPolicyID, + InstanceGroupID: &instanceGroupID, + InstanceGroupManagerID: &instanceGroupManagerID, + } + data, response, err := sess.GetInstanceGroupManagerPolicy(&getInstanceGroupManagerPolicyOptions) + if err != nil || data == nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Getting InstanceGroup Manager Policy: %s\n%s", err, response) + } + instanceGroupManagerPolicy := data.(*vpcv1.InstanceGroupManagerPolicy) + d.Set("name", *instanceGroupManagerPolicy.Name) + d.Set("metric_value", instanceGroupManagerPolicy.MetricValue) + d.Set("metric_type", instanceGroupManagerPolicy.MetricType) + d.Set("policy_type", instanceGroupManagerPolicy.PolicyType) + d.Set("policy_id", instanceGroupManagerPolicyID) + d.Set("instance_group", instanceGroupID) + d.Set("instance_group_manager", instanceGroupManagerID) + + return nil +} + +func resourceIBMISInstanceGroupManagerPolicyDelete(d *schema.ResourceData, meta interface{}) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + parts, err := idParts(d.Id()) + if err != nil { + return err + } + instanceGroupID := parts[0] + instanceGroupManagerID := parts[1] + instanceGroupManagerPolicyID := parts[2] + + deleteInstanceGroupManagerPolicyOptions := vpcv1.DeleteInstanceGroupManagerPolicyOptions{ + ID: &instanceGroupManagerPolicyID, + InstanceGroupManagerID: &instanceGroupManagerID, + InstanceGroupID: &instanceGroupID, + } + + isInsGrpKey := "Instance_Group_Key_" + instanceGroupID + ibmMutexKV.Lock(isInsGrpKey) + defer ibmMutexKV.Unlock(isInsGrpKey) + + _, healthError := waitForHealthyInstanceGroup(instanceGroupID, meta, d.Timeout(schema.TimeoutDelete)) + if healthError != nil { + return healthError + } + + response, err := sess.DeleteInstanceGroupManagerPolicy(&deleteInstanceGroupManagerPolicyOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Deleting the InstanceGroup Manager Policy: %s\n%s", err, response) + } + return nil +} + +func resourceIBMISInstanceGroupManagerPolicyExists(d *schema.ResourceData, meta interface{}) (bool, error) { + sess, err := vpcClient(meta) + if err != nil { + return false, err + } + + parts, err := idParts(d.Id()) + if err != nil { + return false, err + } + + if len(parts) != 3 { + return false, fmt.Errorf("Incorrect ID %s: ID should be a combination of instanceGroupID/instanceGroupManagerID/instanceGroupManagerPolicyID", d.Id()) + } + instanceGroupID := parts[0] + instanceGroupManagerID := parts[1] + instanceGroupManagerPolicyID := parts[2] + + getInstanceGroupManagerPolicyOptions := vpcv1.GetInstanceGroupManagerPolicyOptions{ + ID: &instanceGroupManagerPolicyID, + InstanceGroupManagerID: &instanceGroupManagerID, + InstanceGroupID: &instanceGroupID, + } + + _, response, err := sess.GetInstanceGroupManagerPolicy(&getInstanceGroupManagerPolicyOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error Getting InstanceGroup Manager Policy: %s\n%s", err, response) + } + return true, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_instance_group_membership.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_instance_group_membership.go new file mode 100644 index 00000000000..e68c063e64e --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_instance_group_membership.go @@ -0,0 +1,297 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + isInstanceGroupMembership = "instance_group_membership" + isInstanceGroup = "instance_group" + isInstanceGroupMembershipName = "name" + isInstanceGroupMemershipActionDelete = "action_delete" + isInstanceGroupMemershipDeleteInstanceOnMembershipDelete = "delete_instance_on_membership_delete" + isInstanceGroupMemershipInstance = "instance" + isInstanceGroupMemershipInstanceName = "name" + isInstanceGroupMemershipInstanceTemplate = "instance_template" + isInstanceGroupMemershipInstanceTemplateName = "name" + isInstanceGroupMembershipCrn = "crn" + isInstanceGroupMembershipVirtualServerInstance = "virtual_server_instance" + isInstanceGroupMembershipLoadBalancerPoolMember = "load_balancer_pool_member" + isInstanceGroupMembershipStatus = "status" +) + +func resourceIBMISInstanceGroupMembership() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMISInstanceGroupMembershipUpdate, + Read: resourceIBMISInstanceGroupMembershipRead, + Update: resourceIBMISInstanceGroupMembershipUpdate, + Delete: resourceIBMISInstanceGroupMembershipDelete, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + + isInstanceGroup: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: InvokeValidator("ibm_is_instance_group_membership", isInstanceGroup), + Description: "The instance group identifier.", + }, + isInstanceGroupMembership: { + Type: schema.TypeString, + Required: true, + ValidateFunc: InvokeValidator("ibm_is_instance_group_membership", isInstanceGroupMembership), + Description: "The unique identifier for this instance group membership.", + }, + isInstanceGroupMembershipName: { + Type: schema.TypeString, + Optional: true, + ValidateFunc: InvokeValidator("ibm_is_instance_group_membership", isInstanceGroupMembershipName), + Description: "The user-defined name for this instance group membership. Names must be unique within the instance group.", + }, + isInstanceGroupMemershipActionDelete: { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "The delete flag for this instance group membership. Must be set to true to delete instance group membership.", + }, + isInstanceGroupMemershipDeleteInstanceOnMembershipDelete: { + Type: schema.TypeBool, + Computed: true, + Description: "If set to true, when deleting the membership the instance will also be deleted.", + }, + isInstanceGroupMemershipInstance: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + isInstanceGroupMembershipCrn: { + Type: schema.TypeString, + Computed: true, + Description: "The CRN for this virtual server instance.", + }, + isInstanceGroupMembershipVirtualServerInstance: { + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this virtual server instance.", + }, + isInstanceGroupMemershipInstanceName: { + Type: schema.TypeString, + Computed: true, + Description: "The user-defined name for this virtual server instance (and default system hostname).", + }, + }, + }, + }, + isInstanceGroupMemershipInstanceTemplate: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + isInstanceGroupMembershipCrn: { + Type: schema.TypeString, + Computed: true, + Description: "The CRN for this instance template.", + }, + isInstanceGroupMemershipInstanceTemplate: { + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this instance template.", + }, + isInstanceGroupMemershipInstanceTemplateName: { + Type: schema.TypeString, + Computed: true, + Description: "The unique user-defined name for this instance template.", + }, + }, + }, + }, + isInstanceGroupMembershipLoadBalancerPoolMember: { + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this load balancer pool member.", + }, + isInstanceGroupMembershipStatus: { + Type: schema.TypeString, + Computed: true, + Description: "The status of the instance group membership- `deleting`: Membership is deleting dependent resources- `failed`: Membership was unable to maintain dependent resources- `healthy`: Membership is active and serving in the group- `pending`: Membership is waiting for dependent resources- `unhealthy`: Membership has unhealthy dependent resources.", + }, + }, + } +} + +func resourceIBMISInstanceGroupMembershipValidator() *ResourceValidator { + + validateSchema := make([]ValidateSchema, 1) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isInstanceGroupMembershipName, + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Required: true, + Regexp: `^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$`, + MinValueLength: 1, + MaxValueLength: 63}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isInstanceGroup, + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Required: true, + Regexp: `^[-0-9a-z_]+$`, + MinValueLength: 1, + MaxValueLength: 64}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isInstanceGroupMembership, + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Required: true, + Regexp: `^[-0-9a-z_]+$`, + MinValueLength: 1, + MaxValueLength: 64}) + ibmISInstanceGroupMembershipResourceValidator := ResourceValidator{ResourceName: "ibm_is_instance_group_membership", Schema: validateSchema} + return &ibmISInstanceGroupMembershipResourceValidator +} + +func resourceIBMISInstanceGroupMembershipUpdate(d *schema.ResourceData, meta interface{}) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + + instanceGroupID := d.Get(isInstanceGroup).(string) + instanceGroupMembershipID := d.Get(isInstanceGroupMembership).(string) + + getInstanceGroupMembershipOptions := vpcv1.GetInstanceGroupMembershipOptions{ + ID: &instanceGroupMembershipID, + InstanceGroupID: &instanceGroupID, + } + + instanceGroupMembership, response, err := sess.GetInstanceGroupMembership(&getInstanceGroupMembershipOptions) + if err != nil || instanceGroupMembership == nil { + return fmt.Errorf("Error Getting InstanceGroup Membership: %s\n%s", err, response) + } + d.SetId(fmt.Sprintf("%s/%s", instanceGroupID, instanceGroupMembershipID)) + + if v, ok := d.GetOk(isInstanceGroupMemershipActionDelete); ok { + actionDelete := v.(bool) + if actionDelete { + return resourceIBMISInstanceGroupMembershipDelete(d, meta) + } + } + + if v, ok := d.GetOk(isInstanceGroupMembershipName); ok { + name := v.(string) + if name != *instanceGroupMembership.Name { + + updateInstanceGroupMembershipOptions := vpcv1.UpdateInstanceGroupMembershipOptions{} + instanceGroupMembershipPatchModel := &vpcv1.InstanceGroupMembershipPatch{} + instanceGroupMembershipPatchModel.Name = &name + + updateInstanceGroupMembershipOptions.ID = &instanceGroupMembershipID + updateInstanceGroupMembershipOptions.InstanceGroupID = &instanceGroupID + instanceGroupMembershipPatch, err := instanceGroupMembershipPatchModel.AsPatch() + if err != nil { + return fmt.Errorf("Error calling asPatch for InstanceGroupMembershipPatch: %s", err) + } + updateInstanceGroupMembershipOptions.InstanceGroupMembershipPatch = instanceGroupMembershipPatch + _, response, err := sess.UpdateInstanceGroupMembership(&updateInstanceGroupMembershipOptions) + if err != nil { + return fmt.Errorf("Error updating InstanceGroup Membership: %s\n%s", err, response) + } + } + } + return resourceIBMISInstanceGroupMembershipRead(d, meta) +} + +func resourceIBMISInstanceGroupMembershipRead(d *schema.ResourceData, meta interface{}) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + + parts, err := idParts(d.Id()) + if err != nil { + return err + } + instanceGroupID := parts[0] + instanceGroupMembershipID := parts[1] + + getInstanceGroupMembershipOptions := vpcv1.GetInstanceGroupMembershipOptions{ + ID: &instanceGroupMembershipID, + InstanceGroupID: &instanceGroupID, + } + instanceGroupMembership, response, err := sess.GetInstanceGroupMembership(&getInstanceGroupMembershipOptions) + if err != nil || instanceGroupMembership == nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Getting InstanceGroup Membership: %s\n%s", err, response) + } + d.Set(isInstanceGroupMemershipDeleteInstanceOnMembershipDelete, *instanceGroupMembership.DeleteInstanceOnMembershipDelete) + d.Set(isInstanceGroupMembership, *instanceGroupMembership.ID) + d.Set(isInstanceGroupMembershipStatus, *instanceGroupMembership.Status) + + instances := make([]map[string]interface{}, 0) + if instanceGroupMembership.Instance != nil { + instance := map[string]interface{}{ + isInstanceGroupMembershipCrn: *instanceGroupMembership.Instance.CRN, + isInstanceGroupMembershipVirtualServerInstance: *instanceGroupMembership.Instance.ID, + isInstanceGroupMemershipInstanceName: *instanceGroupMembership.Instance.Name, + } + instances = append(instances, instance) + } + d.Set(isInstanceGroupMemershipInstance, instances) + + instance_templates := make([]map[string]interface{}, 0) + if instanceGroupMembership.InstanceTemplate != nil { + instance_template := map[string]interface{}{ + isInstanceGroupMembershipCrn: *instanceGroupMembership.InstanceTemplate.CRN, + isInstanceGroupMemershipInstanceTemplate: *instanceGroupMembership.InstanceTemplate.ID, + isInstanceGroupMemershipInstanceTemplateName: *instanceGroupMembership.InstanceTemplate.Name, + } + instance_templates = append(instance_templates, instance_template) + } + d.Set(isInstanceGroupMemershipInstanceTemplate, instance_templates) + + if instanceGroupMembership.PoolMember != nil && instanceGroupMembership.PoolMember.ID != nil { + d.Set(isInstanceGroupMembershipLoadBalancerPoolMember, *instanceGroupMembership.PoolMember.ID) + } + return nil +} + +func resourceIBMISInstanceGroupMembershipDelete(d *schema.ResourceData, meta interface{}) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + + parts, err := idParts(d.Id()) + if err != nil { + return err + } + instanceGroupID := parts[0] + instanceGroupMembershipID := parts[1] + + deleteInstanceGroupMembershipOptions := vpcv1.DeleteInstanceGroupMembershipOptions{ + ID: &instanceGroupMembershipID, + InstanceGroupID: &instanceGroupID, + } + response, err := sess.DeleteInstanceGroupMembership(&deleteInstanceGroupMembershipOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Deleting the InstanceGroup Membership: %s\n%s", err, response) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_instance_template.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_instance_template.go new file mode 100644 index 00000000000..36566753ee3 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_instance_template.go @@ -0,0 +1,889 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + isInstanceTemplateBootVolume = "boot_volume" + isInstanceTemplateVolAttVolAutoDelete = "auto_delete" + isInstanceTemplateVolAttVol = "volume" + isInstanceTemplateVolAttachmentName = "name" + isInstanceTemplateVolAttVolPrototype = "volume_prototype" + isInstanceTemplateVolAttVolCapacity = "capacity" + isInstanceTemplateVolAttVolIops = "iops" + isInstanceTemplateVolAttVolName = "name" + isInstanceTemplateVolAttVolBillingTerm = "billing_term" + isInstanceTemplateVolAttVolEncryptionKey = "encryption_key" + isInstanceTemplateVolAttVolType = "type" + isInstanceTemplateVolAttVolProfile = "profile" + isInstanceTemplateProvisioning = "provisioning" + isInstanceTemplateProvisioningDone = "done" + isInstanceTemplateAvailable = "available" + isInstanceTemplateDeleting = "deleting" + isInstanceTemplateDeleteDone = "done" + isInstanceTemplateFailed = "failed" + isInstanceTemplateBootName = "name" + isInstanceTemplateBootSize = "size" + isInstanceTemplateBootIOPS = "iops" + isInstanceTemplateBootEncryption = "encryption" + isInstanceTemplateBootProfile = "profile" + isInstanceTemplateVolumeAttaching = "attaching" + isInstanceTemplateVolumeAttached = "attached" + isInstanceTemplateVolumeDetaching = "detaching" + isInstanceTemplatePlacementTarget = "placement_target" + isInstanceTemplateDedicatedHost = "dedicated_host" + isInstanceTemplateDedicatedHostGroup = "dedicated_host_group" + isInstanceTemplateResourceType = "resource_type" + isInstanceTemplateVolumeDeleteOnInstanceDelete = "delete_volume_on_instance_delete" +) + +func resourceIBMISInstanceTemplate() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMisInstanceTemplateCreate, + Read: resourceIBMisInstanceTemplateRead, + Update: resourceIBMisInstanceTemplateUpdate, + Delete: resourceIBMisInstanceTemplateDelete, + Exists: resourceIBMisInstanceTemplateExists, + Importer: &schema.ResourceImporter{}, + + CustomizeDiff: customdiff.All( + customdiff.Sequence( + func(diff *schema.ResourceDiff, v interface{}) error { + return resourceTagsCustomizeDiff(diff) + }, + ), + + customdiff.Sequence( + func(diff *schema.ResourceDiff, v interface{}) error { + return resourceVolumeAttachmentValidate(diff) + }), + ), + + Schema: map[string]*schema.Schema{ + isInstanceTemplateName: { + Type: schema.TypeString, + Required: true, + ForceNew: false, + ValidateFunc: validateISName, + Description: "Instance Template name", + }, + + isInstanceTemplateVPC: { + Type: schema.TypeString, + ForceNew: true, + Required: true, + Description: "VPC id", + }, + + isInstanceTemplateZone: { + Type: schema.TypeString, + ForceNew: true, + Required: true, + Description: "Zone name", + }, + + isInstanceTemplateProfile: { + Type: schema.TypeString, + ForceNew: true, + Required: true, + Description: "Profile info", + }, + + isInstanceTemplateKeys: { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + DiffSuppressFunc: applyOnce, + Description: "SSH key Ids for the instance template", + }, + + isInstanceTemplateVolumeAttachments: { + Type: schema.TypeList, + Optional: true, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + isInstanceTemplateVolumeDeleteOnInstanceDelete: { + Type: schema.TypeBool, + Required: true, + Description: "If set to true, when deleting the instance the volume will also be deleted.", + }, + isInstanceTemplateVolAttachmentName: { + Type: schema.TypeString, + Required: true, + Description: "The user-defined name for this volume attachment.", + }, + isInstanceTemplateVolAttVol: { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The unique identifier for this volume.", + }, + isInstanceTemplateVolAttVolPrototype: { + Type: schema.TypeList, + MaxItems: 1, + MinItems: 1, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + isInstanceTemplateVolAttVolIops: { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: "The maximum I/O operations per second (IOPS) for the volume.", + }, + isInstanceTemplateVolAttVolProfile: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The globally unique name for the volume profile to use for this volume.", + }, + isInstanceTemplateVolAttVolCapacity: { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: "The capacity of the volume in gigabytes. The specified minimum and maximum capacity values for creating or updating volumes may expand in the future.", + }, + isInstanceTemplateVolAttVolEncryptionKey: { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The CRN of the [Key Protect Root Key](https://cloud.ibm.com/docs/key-protect?topic=key-protect-getting-started-tutorial) or [Hyper Protect Crypto Service Root Key](https://cloud.ibm.com/docs/hs-crypto?topic=hs-crypto-get-started) for this resource.", + }, + }, + }, + }, + }, + }, + }, + + isInstanceTemplatePrimaryNetworkInterface: { + Type: schema.TypeList, + MinItems: 1, + MaxItems: 1, + Required: true, + Description: "Primary Network interface info", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + isInstanceTemplateNicAllowIPSpoofing: { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + isInstanceTemplateNicName: { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + isInstanceTemplateNicPrimaryIpv4Address: { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + isInstanceTemplateNicSecurityGroups: { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + isInstanceTemplateNicSubnet: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + }, + }, + + isInstanceTemplateNetworkInterfaces: { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + isInstanceTemplateNicAllowIPSpoofing: { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + isInstanceTemplateNicName: { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + isInstanceTemplateNicPrimaryIpv4Address: { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + isInstanceTemplateNicSecurityGroups: { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + isInstanceTemplateNicSubnet: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + }, + }, + + isInstanceTemplateUserData: { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Description: "User data given for the instance", + }, + + isInstanceTemplateImage: { + Type: schema.TypeString, + ForceNew: true, + Required: true, + Description: "image name", + }, + + isInstanceTemplateBootVolume: { + Type: schema.TypeList, + DiffSuppressFunc: applyOnce, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + isInstanceTemplateBootName: { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + isInstanceTemplateBootEncryption: { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + isInstanceTemplateBootSize: { + Type: schema.TypeInt, + Computed: true, + }, + isInstanceTemplateBootProfile: { + Type: schema.TypeString, + Computed: true, + }, + isInstanceTemplateVolumeDeleteOnInstanceDelete: { + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + }, + }, + }, + + isPlacementTargetDedicatedHost: { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ConflictsWith: []string{isPlacementTargetDedicatedHostGroup}, + Description: "Unique Identifier of the Dedicated Host where the instance will be placed", + }, + + isPlacementTargetDedicatedHostGroup: { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ConflictsWith: []string{isPlacementTargetDedicatedHost}, + Description: "Unique Identifier of the Dedicated Host Group where the instance will be placed", + }, + + isInstanceTemplatePlacementTarget: { + Type: schema.TypeList, + Computed: true, + Description: "The placement restrictions to use for the virtual server instance.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this dedicated host.", + }, + "crn": { + Type: schema.TypeString, + Computed: true, + Description: "The CRN for this dedicated host.", + }, + "href": { + Type: schema.TypeString, + Computed: true, + Description: "The URL for this dedicated host.", + }, + }, + }, + }, + + isInstanceTemplateResourceGroup: { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Computed: true, + Description: "Instance template resource group", + }, + }, + } +} + +func resourceIBMisInstanceTemplateCreate(d *schema.ResourceData, meta interface{}) error { + profile := d.Get(isInstanceTemplateProfile).(string) + name := d.Get(isInstanceTemplateName).(string) + vpcID := d.Get(isInstanceTemplateVPC).(string) + zone := d.Get(isInstanceTemplateZone).(string) + image := d.Get(isInstanceTemplateImage).(string) + + err := instanceTemplateCreate(d, meta, profile, name, vpcID, zone, image) + if err != nil { + return err + } + + return resourceIBMisInstanceTemplateRead(d, meta) +} + +func resourceIBMisInstanceTemplateRead(d *schema.ResourceData, meta interface{}) error { + ID := d.Id() + err := instanceTemplateGet(d, meta, ID) + if err != nil { + return err + } + return nil +} + +func resourceIBMisInstanceTemplateDelete(d *schema.ResourceData, meta interface{}) error { + + ID := d.Id() + + err := instanceTemplateDelete(d, meta, ID) + if err != nil { + return err + } + return nil +} + +func resourceIBMisInstanceTemplateUpdate(d *schema.ResourceData, meta interface{}) error { + + err := instanceTemplateUpdate(d, meta) + if err != nil { + return err + } + return resourceIBMisInstanceTemplateRead(d, meta) +} + +func resourceIBMisInstanceTemplateExists(d *schema.ResourceData, meta interface{}) (bool, error) { + ID := d.Id() + ok, err := instanceTemplateExists(d, meta, ID) + if err != nil { + return false, err + } + return ok, err +} + +func instanceTemplateCreate(d *schema.ResourceData, meta interface{}, profile, name, vpcID, zone, image string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + instanceproto := &vpcv1.InstanceTemplatePrototype{ + Image: &vpcv1.ImageIdentity{ + ID: &image, + }, + Zone: &vpcv1.ZoneIdentity{ + Name: &zone, + }, + Profile: &vpcv1.InstanceProfileIdentity{ + Name: &profile, + }, + Name: &name, + VPC: &vpcv1.VPCIdentity{ + ID: &vpcID, + }, + } + + if dHostIdInf, ok := d.GetOk(isPlacementTargetDedicatedHost); ok { + dHostIdStr := dHostIdInf.(string) + dHostPlaementTarget := &vpcv1.InstancePlacementTargetPrototypeDedicatedHostIdentity{ + ID: &dHostIdStr, + } + instanceproto.PlacementTarget = dHostPlaementTarget + } + + if dHostGrpIdInf, ok := d.GetOk(isPlacementTargetDedicatedHostGroup); ok { + dHostGrpIdStr := dHostGrpIdInf.(string) + dHostGrpPlaementTarget := &vpcv1.InstancePlacementTargetPrototypeDedicatedHostGroupIdentity{ + ID: &dHostGrpIdStr, + } + instanceproto.PlacementTarget = dHostGrpPlaementTarget + } + + // BOOT VOLUME ATTACHMENT for instance template + if boot, ok := d.GetOk(isInstanceTemplateBootVolume); ok { + bootvol := boot.([]interface{})[0].(map[string]interface{}) + var volTemplate = &vpcv1.VolumePrototypeInstanceByImageContext{} + name, ok := bootvol[isInstanceTemplateBootName] + namestr := name.(string) + if ok { + volTemplate.Name = &namestr + } + + volcap := 100 + volcapint64 := int64(volcap) + volprof := "general-purpose" + volTemplate.Capacity = &volcapint64 + volTemplate.Profile = &vpcv1.VolumeProfileIdentity{ + Name: &volprof, + } + + if encryption, ok := bootvol[isInstanceTemplateBootEncryption]; ok { + bootEncryption := encryption.(string) + if bootEncryption != "" { + volTemplate.EncryptionKey = &vpcv1.EncryptionKeyIdentity{ + CRN: &bootEncryption, + } + } + } + + var deleteVolumeOption bool + if deleteVolume, ok := bootvol[isInstanceTemplateVolumeDeleteOnInstanceDelete]; ok { + deleteVolumeOption = deleteVolume.(bool) + } + + instanceproto.BootVolumeAttachment = &vpcv1.VolumeAttachmentPrototypeInstanceByImageContext{ + DeleteVolumeOnInstanceDelete: &deleteVolumeOption, + Volume: volTemplate, + } + } + + // Handle volume attachments + if volsintf, ok := d.GetOk(isInstanceTemplateVolumeAttachments); ok { + vols := volsintf.([]interface{}) + var intfs []vpcv1.VolumeAttachmentPrototypeInstanceContext + for _, resource := range vols { + vol := resource.(map[string]interface{}) + volInterface := &vpcv1.VolumeAttachmentPrototypeInstanceContext{} + deleteVolBool := vol[isInstanceTemplateVolumeDeleteOnInstanceDelete].(bool) + volInterface.DeleteVolumeOnInstanceDelete = &deleteVolBool + attachmentnamestr := vol[isInstanceTemplateVolAttachmentName].(string) + volInterface.Name = &attachmentnamestr + volIdStr := vol[isInstanceTemplateVolAttVol].(string) + + if volIdStr != "" { + volInterface.Volume = &vpcv1.VolumeAttachmentVolumePrototypeInstanceContextVolumeIdentity{ + ID: &volIdStr, + } + } else { + newvolintf := vol[isInstanceTemplateVolAttVolPrototype].([]interface{})[0] + newvol := newvolintf.(map[string]interface{}) + profileName := newvol[isInstanceTemplateVolAttVolProfile].(string) + capacity := int64(newvol[isInstanceTemplateVolAttVolCapacity].(int)) + + volPrototype := &vpcv1.VolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContext{ + Profile: &vpcv1.VolumeProfileIdentity{ + Name: &profileName, + }, + Capacity: &capacity, + } + iops := int64(newvol[isInstanceTemplateVolAttVolIops].(int)) + encryptionKey := newvol[isInstanceTemplateVolAttVolEncryptionKey].(string) + + if iops != 0 { + volPrototype.Iops = &iops + } + + if encryptionKey != "" { + volPrototype.EncryptionKey = &vpcv1.EncryptionKeyIdentity{ + CRN: &encryptionKey, + } + } + volInterface.Volume = volPrototype + } + + intfs = append(intfs, *volInterface) + } + instanceproto.VolumeAttachments = intfs + } + + // Handle primary network interface + if primnicintf, ok := d.GetOk(isInstanceTemplatePrimaryNetworkInterface); ok { + primnic := primnicintf.([]interface{})[0].(map[string]interface{}) + subnetintf, _ := primnic[isInstanceTemplateNicSubnet] + subnetintfstr := subnetintf.(string) + var primnicobj = &vpcv1.NetworkInterfacePrototype{} + primnicobj.Subnet = &vpcv1.SubnetIdentity{ + ID: &subnetintfstr, + } + + if name, ok := primnic[isInstanceTemplateNicName]; ok { + namestr := name.(string) + if namestr != "" { + primnicobj.Name = &namestr + } + } + allowIPSpoofing, ok := primnic[isInstanceTemplateNicAllowIPSpoofing] + allowIPSpoofingbool := allowIPSpoofing.(bool) + if ok { + primnicobj.AllowIPSpoofing = &allowIPSpoofingbool + } + + secgrpintf, ok := primnic[isInstanceTemplateNicSecurityGroups] + if ok { + secgrpSet := secgrpintf.(*schema.Set) + if secgrpSet.Len() != 0 { + var secgrpobjs = make([]vpcv1.SecurityGroupIdentityIntf, secgrpSet.Len()) + for i, secgrpIntf := range secgrpSet.List() { + secgrpIntfstr := secgrpIntf.(string) + secgrpobjs[i] = &vpcv1.SecurityGroupIdentity{ + ID: &secgrpIntfstr, + } + } + primnicobj.SecurityGroups = secgrpobjs + } + } + instanceproto.PrimaryNetworkInterface = primnicobj + + if IPAddress, ok := primnic[isInstanceTemplateNicPrimaryIpv4Address]; ok { + if PrimaryIpv4Address := IPAddress.(string); PrimaryIpv4Address != "" { + primnicobj.PrimaryIpv4Address = &PrimaryIpv4Address + } + } + } + + // Handle additional network interface + if nicsintf, ok := d.GetOk(isInstanceTemplateNetworkInterfaces); ok { + nics := nicsintf.([]interface{}) + var intfs []vpcv1.NetworkInterfacePrototype + for _, resource := range nics { + nic := resource.(map[string]interface{}) + nwInterface := &vpcv1.NetworkInterfacePrototype{} + subnetintf, _ := nic[isInstanceTemplateNicSubnet] + subnetintfstr := subnetintf.(string) + nwInterface.Subnet = &vpcv1.SubnetIdentity{ + ID: &subnetintfstr, + } + + name, ok := nic[isInstanceTemplateNicName] + namestr := name.(string) + if ok && namestr != "" { + nwInterface.Name = &namestr + } + allowIPSpoofing, ok := nic[isInstanceTemplateNicAllowIPSpoofing] + allowIPSpoofingbool := allowIPSpoofing.(bool) + if ok { + nwInterface.AllowIPSpoofing = &allowIPSpoofingbool + } + secgrpintf, ok := nic[isInstanceTemplateNicSecurityGroups] + if ok { + secgrpSet := secgrpintf.(*schema.Set) + if secgrpSet.Len() != 0 { + var secgrpobjs = make([]vpcv1.SecurityGroupIdentityIntf, secgrpSet.Len()) + for i, secgrpIntf := range secgrpSet.List() { + secgrpIntfstr := secgrpIntf.(string) + secgrpobjs[i] = &vpcv1.SecurityGroupIdentity{ + ID: &secgrpIntfstr, + } + } + nwInterface.SecurityGroups = secgrpobjs + } + } + if IPAddress, ok := nic[isInstanceTemplateNicPrimaryIpv4Address]; ok { + if PrimaryIpv4Address := IPAddress.(string); PrimaryIpv4Address != "" { + nwInterface.PrimaryIpv4Address = &PrimaryIpv4Address + } + } + intfs = append(intfs, *nwInterface) + } + instanceproto.NetworkInterfaces = intfs + } + + // Handle SSH Keys + keySet := d.Get(isInstanceTemplateKeys).(*schema.Set) + if keySet.Len() != 0 { + keyobjs := make([]vpcv1.KeyIdentityIntf, keySet.Len()) + for i, key := range keySet.List() { + keystr := key.(string) + keyobjs[i] = &vpcv1.KeyIdentity{ + ID: &keystr, + } + } + instanceproto.Keys = keyobjs + } + + // Handle user data + if userdata, ok := d.GetOk(isInstanceTemplateUserData); ok { + userdatastr := userdata.(string) + instanceproto.UserData = &userdatastr + } + + // handle resource group + if grp, ok := d.GetOk(isInstanceTemplateResourceGroup); ok { + grpstr := grp.(string) + instanceproto.ResourceGroup = &vpcv1.ResourceGroupIdentity{ + ID: &grpstr, + } + + } + + options := &vpcv1.CreateInstanceTemplateOptions{ + InstanceTemplatePrototype: instanceproto, + } + + instanceIntf, response, err := sess.CreateInstanceTemplate(options) + if err != nil { + return fmt.Errorf("Error creating InstanceTemplate: %s\n%s", err, response) + } + instance := instanceIntf.(*vpcv1.InstanceTemplate) + d.SetId(*instance.ID) + return nil +} + +func instanceTemplateGet(d *schema.ResourceData, meta interface{}, ID string) error { + instanceC, err := vpcClient(meta) + if err != nil { + return err + } + getinsOptions := &vpcv1.GetInstanceTemplateOptions{ + ID: &ID, + } + instanceIntf, response, err := instanceC.GetInstanceTemplate(getinsOptions) + if err != nil { + return fmt.Errorf("Error Getting Instance template: %s\n%s", err, response) + } + instance := instanceIntf.(*vpcv1.InstanceTemplate) + d.Set(isInstanceTemplateName, *instance.Name) + if instance.Profile != nil { + instanceProfileIntf := instance.Profile + identity := instanceProfileIntf.(*vpcv1.InstanceProfileIdentity) + d.Set(isInstanceTemplateProfile, *identity.Name) + } + + var placementTargetMap map[string]interface{} + if instance.PlacementTarget != nil { + placementTargetMap = resourceIbmIsInstanceTemplateInstancePlacementTargetPrototypeToMap(*instance.PlacementTarget.(*vpcv1.InstancePlacementTargetPrototype)) + } + if err = d.Set(isInstanceTemplatePlacementTarget, []map[string]interface{}{placementTargetMap}); err != nil { + return fmt.Errorf("Error setting placement_target: %s", err) + } + + if instance.PrimaryNetworkInterface != nil { + primaryNicList := make([]map[string]interface{}, 0) + currentPrimNic := map[string]interface{}{} + currentPrimNic[isInstanceTemplateNicName] = *instance.PrimaryNetworkInterface.Name + if instance.PrimaryNetworkInterface.PrimaryIpv4Address != nil { + currentPrimNic[isInstanceTemplateNicPrimaryIpv4Address] = *instance.PrimaryNetworkInterface.PrimaryIpv4Address + } + subInf := instance.PrimaryNetworkInterface.Subnet + subnetIdentity := subInf.(*vpcv1.SubnetIdentity) + currentPrimNic[isInstanceTemplateNicSubnet] = *subnetIdentity.ID + if instance.PrimaryNetworkInterface.AllowIPSpoofing != nil { + currentPrimNic[isInstanceTemplateNicAllowIPSpoofing] = *instance.PrimaryNetworkInterface.AllowIPSpoofing + } + if len(instance.PrimaryNetworkInterface.SecurityGroups) != 0 { + secgrpList := []string{} + for i := 0; i < len(instance.PrimaryNetworkInterface.SecurityGroups); i++ { + secGrpInf := instance.PrimaryNetworkInterface.SecurityGroups[i] + subnetIdentity := secGrpInf.(*vpcv1.SecurityGroupIdentity) + secgrpList = append(secgrpList, string(*subnetIdentity.ID)) + } + currentPrimNic[isInstanceTemplateNicSecurityGroups] = newStringSet(schema.HashString, secgrpList) + } + primaryNicList = append(primaryNicList, currentPrimNic) + d.Set(isInstanceTemplatePrimaryNetworkInterface, primaryNicList) + } + + if instance.NetworkInterfaces != nil { + interfacesList := make([]map[string]interface{}, 0) + for _, intfc := range instance.NetworkInterfaces { + currentNic := map[string]interface{}{} + currentNic[isInstanceTemplateNicName] = *intfc.Name + if intfc.PrimaryIpv4Address != nil { + currentNic[isInstanceTemplateNicPrimaryIpv4Address] = *intfc.PrimaryIpv4Address + } + if intfc.AllowIPSpoofing != nil { + currentNic[isInstanceTemplateNicAllowIPSpoofing] = *intfc.AllowIPSpoofing + } + subInf := intfc.Subnet + subnetIdentity := subInf.(*vpcv1.SubnetIdentity) + currentNic[isInstanceTemplateNicSubnet] = *subnetIdentity.ID + if len(intfc.SecurityGroups) != 0 { + secgrpList := []string{} + for i := 0; i < len(intfc.SecurityGroups); i++ { + secGrpInf := intfc.SecurityGroups[i] + subnetIdentity := secGrpInf.(*vpcv1.SecurityGroupIdentity) + secgrpList = append(secgrpList, string(*subnetIdentity.ID)) + } + currentNic[isInstanceTemplateNicSecurityGroups] = newStringSet(schema.HashString, secgrpList) + } + interfacesList = append(interfacesList, currentNic) + } + d.Set(isInstanceTemplateNetworkInterfaces, interfacesList) + } + + if instance.Image != nil { + imageInf := instance.Image + imageIdentity := imageInf.(*vpcv1.ImageIdentity) + d.Set(isInstanceTemplateImage, *imageIdentity.ID) + } + vpcInf := instance.VPC + vpcRef := vpcInf.(*vpcv1.VPCIdentity) + d.Set(isInstanceTemplateVPC, vpcRef.ID) + zoneInf := instance.Zone + zone := zoneInf.(*vpcv1.ZoneIdentity) + d.Set(isInstanceTemplateZone, *zone.Name) + + interfacesList := make([]map[string]interface{}, 0) + if instance.VolumeAttachments != nil { + for _, volume := range instance.VolumeAttachments { + volumeAttach := map[string]interface{}{} + volumeAttach[isInstanceTemplateVolAttName] = *volume.Name + volumeAttach[isInstanceTemplateDeleteVolume] = *volume.DeleteVolumeOnInstanceDelete + newVolumeArr := []map[string]interface{}{} + newVolume := map[string]interface{}{} + volumeIntf := volume.Volume + volumeInst := volumeIntf.(*vpcv1.VolumeAttachmentVolumePrototypeInstanceContext) + if volumeInst.ID != nil { + volumeAttach[isInstanceTemplateVolAttVol] = *volumeInst.ID + } + + if volumeInst.Capacity != nil { + newVolume[isInstanceTemplateVolAttVolCapacity] = *volumeInst.Capacity + } + if volumeInst.Profile != nil { + profile := volumeInst.Profile.(*vpcv1.VolumeProfileIdentity) + newVolume[isInstanceTemplateVolAttVolProfile] = profile.Name + } + + if volumeInst.Iops != nil { + newVolume[isInstanceTemplateVolAttVolIops] = *volumeInst.Iops + } + if volumeInst.EncryptionKey != nil { + encryptionKey := volumeInst.EncryptionKey.(*vpcv1.EncryptionKeyIdentity) + newVolume[isInstanceTemplateVolAttVolEncryptionKey] = *encryptionKey.CRN + } + if len(newVolume) > 0 { + newVolumeArr = append(newVolumeArr, newVolume) + } + volumeAttach[isInstanceTemplateVolAttVolPrototype] = newVolumeArr + interfacesList = append(interfacesList, volumeAttach) + } + d.Set(isInstanceTemplateVolumeAttachments, interfacesList) + } + if instance.BootVolumeAttachment != nil { + bootVolList := make([]map[string]interface{}, 0) + bootVol := map[string]interface{}{} + bootVol[isInstanceTemplateDeleteVolume] = *instance.BootVolumeAttachment.DeleteVolumeOnInstanceDelete + if instance.BootVolumeAttachment.Volume != nil { + volumeIntf := instance.BootVolumeAttachment.Volume + bootVol[isInstanceTemplateBootName] = volumeIntf.Name + bootVol[isInstanceTemplateBootSize] = volumeIntf.Capacity + if volumeIntf.Profile != nil { + volProfIntf := volumeIntf.Profile + volProfInst := volProfIntf.(*vpcv1.VolumeProfileIdentity) + bootVol[isInstanceTemplateBootProfile] = volProfInst.Name + } + if volumeIntf.EncryptionKey != nil { + volEncryption := volumeIntf.EncryptionKey + volEncryptionIntf := volEncryption.(*vpcv1.EncryptionKeyIdentity) + bootVol[isInstanceTemplateBootEncryption] = volEncryptionIntf.CRN + } + } + + bootVolList = append(bootVolList, bootVol) + d.Set(isInstanceTemplateBootVolume, bootVolList) + } + + if instance.ResourceGroup != nil { + d.Set(isInstanceTemplateResourceGroup, instance.ResourceGroup.ID) + } + return nil +} + +func resourceIbmIsInstanceTemplateInstancePlacementTargetPrototypeToMap(instancePlacementTargetPrototype vpcv1.InstancePlacementTargetPrototype) map[string]interface{} { + instancePlacementTargetPrototypeMap := map[string]interface{}{} + + instancePlacementTargetPrototypeMap["id"] = instancePlacementTargetPrototype.ID + instancePlacementTargetPrototypeMap["crn"] = instancePlacementTargetPrototype.CRN + instancePlacementTargetPrototypeMap["href"] = instancePlacementTargetPrototype.Href + + return instancePlacementTargetPrototypeMap +} + +func instanceTemplateUpdate(d *schema.ResourceData, meta interface{}) error { + instanceC, err := vpcClient(meta) + if err != nil { + return err + } + ID := d.Id() + + if d.HasChange(isInstanceName) { + name := d.Get(isInstanceTemplateName).(string) + updnetoptions := &vpcv1.UpdateInstanceTemplateOptions{ + ID: &ID, + } + + instanceTemplatePatchModel := &vpcv1.InstanceTemplatePatch{ + Name: &name, + } + instanceTemplatePatch, err := instanceTemplatePatchModel.AsPatch() + if err != nil { + return fmt.Errorf("Error calling asPatch for InstanceTemplatePatch: %s", err) + } + updnetoptions.InstanceTemplatePatch = instanceTemplatePatch + + _, _, err = instanceC.UpdateInstanceTemplate(updnetoptions) + if err != nil { + return err + } + } + return nil +} + +func instanceTemplateDelete(d *schema.ResourceData, meta interface{}, ID string) error { + instanceC, err := vpcClient(meta) + if err != nil { + return err + } + + deleteinstanceTemplateOptions := &vpcv1.DeleteInstanceTemplateOptions{ + ID: &ID, + } + _, err = instanceC.DeleteInstanceTemplate(deleteinstanceTemplateOptions) + if err != nil { + return err + } + return nil +} + +func instanceTemplateExists(d *schema.ResourceData, meta interface{}, ID string) (bool, error) { + instanceC, err := vpcClient(meta) + if err != nil { + return false, err + } + getinsOptions := &vpcv1.GetInstanceTemplateOptions{ + ID: &ID, + } + _, response, err := instanceC.GetInstanceTemplate(getinsOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error Getting InstanceTemplate: %s\n%s", err, response) + } + return true, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_ipsec_policy.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_ipsec_policy.go new file mode 100644 index 00000000000..7791f05f32f --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_ipsec_policy.go @@ -0,0 +1,636 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + + "github.com/IBM/vpc-go-sdk/vpcclassicv1" + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + isIpSecName = "name" + isIpSecAuthenticationAlg = "authentication_algorithm" + isIpSecEncryptionAlg = "encryption_algorithm" + isIpSecPFS = "pfs" + isIpSecKeyLifeTime = "key_lifetime" + isIPSecResourceGroup = "resource_group" + isIPSecEncapsulationMode = "encapsulation_mode" + isIPSecTransformProtocol = "transform_protocol" + isIPSecVPNConnections = "vpn_connections" + isIPSecVPNConnectionName = "name" + isIPSecVPNConnectionId = "id" + isIPSecVPNConnectionHref = "href" +) + +func resourceIBMISIPSecPolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMISIPSecPolicyCreate, + Read: resourceIBMISIPSecPolicyRead, + Update: resourceIBMISIPSecPolicyUpdate, + Delete: resourceIBMISIPSecPolicyDelete, + Exists: resourceIBMISIPSecPolicyExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + isIpSecName: { + Type: schema.TypeString, + Required: true, + ValidateFunc: InvokeValidator("ibm_is_ipsec_policy", isIpSecName), + Description: "IPSEC name", + }, + + isIpSecAuthenticationAlg: { + Type: schema.TypeString, + Required: true, + ValidateFunc: InvokeValidator("ibm_is_ipsec_policy", isIpSecAuthenticationAlg), + Description: "Authentication alorothm", + }, + + isIpSecEncryptionAlg: { + Type: schema.TypeString, + Required: true, + ValidateFunc: InvokeValidator("ibm_is_ipsec_policy", isIpSecEncryptionAlg), + Description: "Encryption algorithm", + }, + + isIpSecPFS: { + Type: schema.TypeString, + Required: true, + ValidateFunc: InvokeValidator("ibm_is_ipsec_policy", isIpSecPFS), + Description: "PFS info", + }, + + isIPSecResourceGroup: { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Computed: true, + Description: "Resource group info", + }, + + isIpSecKeyLifeTime: { + Type: schema.TypeInt, + Optional: true, + Default: 3600, + ValidateFunc: validateKeyLifeTime, + Description: "IPSEC key lifetime", + }, + + isIPSecEncapsulationMode: { + Type: schema.TypeString, + Computed: true, + Description: "IPSEC encapsulation mode", + }, + + isIPSecTransformProtocol: { + Type: schema.TypeString, + Computed: true, + Description: "IPSEC transform protocol", + }, + + isIPSecVPNConnections: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + isIPSecVPNConnectionName: { + Type: schema.TypeString, + Computed: true, + }, + isIPSecVPNConnectionId: { + Type: schema.TypeString, + Computed: true, + }, + isIPSecVPNConnectionHref: { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + ResourceControllerURL: { + Type: schema.TypeString, + Computed: true, + Description: "The URL of the IBM Cloud dashboard that can be used to explore and view details about this instance", + }, + + ResourceName: { + Type: schema.TypeString, + Computed: true, + Description: "The name of the resource", + }, + + ResourceCRN: { + Type: schema.TypeString, + Computed: true, + Description: "The crn of the resource", + }, + + ResourceGroupName: { + Type: schema.TypeString, + Computed: true, + Description: "The resource group name in which resource is provisioned", + }, + }, + } +} + +func resourceIBMISIPSECValidator() *ResourceValidator { + + validateSchema := make([]ValidateSchema, 1) + authentication_algorithm := "md5, sha1, sha256, sha512" + encryption_algorithm := "triple_des, aes128, aes256" + pfs := "disabled, group_2, group_5, group_14" + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isIpSecName, + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Required: true, + Regexp: `^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$`, + MinValueLength: 1, + MaxValueLength: 63}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isIpSecAuthenticationAlg, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: authentication_algorithm}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isIpSecEncryptionAlg, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: encryption_algorithm}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isIpSecPFS, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: pfs}) + + ibmISIPSECResourceValidator := ResourceValidator{ResourceName: "ibm_is_ipsec_policy", Schema: validateSchema} + return &ibmISIPSECResourceValidator +} + +func resourceIBMISIPSecPolicyCreate(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + + log.Printf("[DEBUG] Ip Sec create") + name := d.Get(isIpSecName).(string) + authenticationAlg := d.Get(isIpSecAuthenticationAlg).(string) + encryptionAlg := d.Get(isIpSecEncryptionAlg).(string) + pfs := d.Get(isIpSecPFS).(string) + + if userDetails.generation == 1 { + err := classicIpsecpCreate(d, meta, authenticationAlg, encryptionAlg, name, pfs) + if err != nil { + return err + } + } else { + err := ipsecpCreate(d, meta, authenticationAlg, encryptionAlg, name, pfs) + if err != nil { + return err + } + } + return resourceIBMISIPSecPolicyRead(d, meta) +} + +func classicIpsecpCreate(d *schema.ResourceData, meta interface{}, authenticationAlg, encryptionAlg, name, pfs string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + options := &vpcclassicv1.CreateIpsecPolicyOptions{ + AuthenticationAlgorithm: &authenticationAlg, + EncryptionAlgorithm: &encryptionAlg, + Pfs: &pfs, + Name: &name, + } + + if keylt, ok := d.GetOk(isIpSecKeyLifeTime); ok { + keyLifetime := int64(keylt.(int)) + options.KeyLifetime = &keyLifetime + } else { + keyLifetime := int64(3600) + options.KeyLifetime = &keyLifetime + } + + if rgrp, ok := d.GetOk(isIPSecResourceGroup); ok { + rg := rgrp.(string) + options.ResourceGroup = &vpcclassicv1.ResourceGroupIdentity{ + ID: &rg, + } + } + ipSec, response, err := sess.CreateIpsecPolicy(options) + if err != nil { + return fmt.Errorf("[DEBUG] ipSec policy err %s\n%s", err, response) + } + d.SetId(*ipSec.ID) + log.Printf("[INFO] ipSec policy : %s", *ipSec.ID) + return nil +} + +func ipsecpCreate(d *schema.ResourceData, meta interface{}, authenticationAlg, encryptionAlg, name, pfs string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + options := &vpcv1.CreateIpsecPolicyOptions{ + AuthenticationAlgorithm: &authenticationAlg, + EncryptionAlgorithm: &encryptionAlg, + Pfs: &pfs, + Name: &name, + } + + if keylt, ok := d.GetOk(isIpSecKeyLifeTime); ok { + keyLifetime := int64(keylt.(int)) + options.KeyLifetime = &keyLifetime + } else { + keyLifetime := int64(3600) + options.KeyLifetime = &keyLifetime + } + + if rgrp, ok := d.GetOk(isIPSecResourceGroup); ok { + rg := rgrp.(string) + options.ResourceGroup = &vpcv1.ResourceGroupIdentity{ + ID: &rg, + } + } + ipSec, response, err := sess.CreateIpsecPolicy(options) + if err != nil { + return fmt.Errorf("[DEBUG] ipSec policy err %s\n%s", err, response) + } + d.SetId(*ipSec.ID) + log.Printf("[INFO] ipSec policy : %s", *ipSec.ID) + return nil +} + +func resourceIBMISIPSecPolicyRead(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + id := d.Id() + if userDetails.generation == 1 { + err := classicIpsecpGet(d, meta, id) + if err != nil { + return err + } + } else { + err := ipsecpGet(d, meta, id) + if err != nil { + return err + } + } + return nil +} + +func classicIpsecpGet(d *schema.ResourceData, meta interface{}, id string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + getIpsecPolicyOptions := &vpcclassicv1.GetIpsecPolicyOptions{ + ID: &id, + } + ipSec, response, err := sess.GetIpsecPolicy(getIpsecPolicyOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error getting IPSEC Policy(%s): %s\n%s", id, err, response) + } + + d.Set(isIpSecName, *ipSec.Name) + d.Set(isIpSecAuthenticationAlg, *ipSec.AuthenticationAlgorithm) + d.Set(isIpSecEncryptionAlg, *ipSec.EncryptionAlgorithm) + if ipSec.ResourceGroup != nil { + d.Set(isIPSecResourceGroup, *ipSec.ResourceGroup.ID) + } else { + d.Set(isIPSecResourceGroup, nil) + } + d.Set(isIpSecPFS, *ipSec.Pfs) + if ipSec.KeyLifetime != nil { + d.Set(isIpSecKeyLifeTime, *ipSec.KeyLifetime) + } + d.Set(isIPSecEncapsulationMode, *ipSec.EncapsulationMode) + d.Set(isIPSecTransformProtocol, *ipSec.TransformProtocol) + + connList := make([]map[string]interface{}, 0) + if ipSec.Connections != nil && len(ipSec.Connections) > 0 { + for _, connection := range ipSec.Connections { + conn := map[string]interface{}{} + conn[isIPSecVPNConnectionName] = *connection.Name + conn[isIPSecVPNConnectionId] = *connection.ID + conn[isIPSecVPNConnectionHref] = *connection.Href + connList = append(connList, conn) + } + } + d.Set(isIPSecVPNConnections, connList) + controller, err := getBaseController(meta) + if err != nil { + return err + } + d.Set(ResourceControllerURL, controller+"/vpc/network/ipsecpolicies") + d.Set(ResourceName, *ipSec.Name) + // d.Set(ResourceCRN, *ipSec.Crn) + if ipSec.ResourceGroup != nil { + rsMangClient, err := meta.(ClientSession).ResourceManagementAPIv2() + if err != nil { + return err + } + grp, err := rsMangClient.ResourceGroup().Get(*ipSec.ResourceGroup.ID) + if err != nil { + return err + } + d.Set(ResourceGroupName, grp.Name) + } + return nil +} + +func ipsecpGet(d *schema.ResourceData, meta interface{}, id string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + getIpsecPolicyOptions := &vpcv1.GetIpsecPolicyOptions{ + ID: &id, + } + ipSec, response, err := sess.GetIpsecPolicy(getIpsecPolicyOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error getting IPSEC Policy(%s): %s\n%s", id, err, response) + } + d.Set(isIpSecName, *ipSec.Name) + d.Set(isIpSecAuthenticationAlg, *ipSec.AuthenticationAlgorithm) + d.Set(isIpSecEncryptionAlg, *ipSec.EncryptionAlgorithm) + if ipSec.ResourceGroup != nil { + d.Set(isIPSecResourceGroup, *ipSec.ResourceGroup.ID) + d.Set(ResourceGroupName, *ipSec.ResourceGroup.Name) + } else { + d.Set(isIPSecResourceGroup, nil) + } + d.Set(isIpSecPFS, *ipSec.Pfs) + if ipSec.KeyLifetime != nil { + d.Set(isIpSecKeyLifeTime, *ipSec.KeyLifetime) + } + d.Set(isIPSecEncapsulationMode, *ipSec.EncapsulationMode) + d.Set(isIPSecTransformProtocol, *ipSec.TransformProtocol) + + connList := make([]map[string]interface{}, 0) + if ipSec.Connections != nil && len(ipSec.Connections) > 0 { + for _, connection := range ipSec.Connections { + conn := map[string]interface{}{} + conn[isIPSecVPNConnectionName] = *connection.Name + conn[isIPSecVPNConnectionId] = *connection.ID + conn[isIPSecVPNConnectionHref] = *connection.Href + connList = append(connList, conn) + } + } + d.Set(isIPSecVPNConnections, connList) + controller, err := getBaseController(meta) + if err != nil { + return err + } + d.Set(ResourceControllerURL, controller+"/vpc-ext/network/ipsecpolicies") + d.Set(ResourceName, *ipSec.Name) + // d.Set(ResourceCRN, *ipSec.Crn) + return nil +} + +func resourceIBMISIPSecPolicyUpdate(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + id := d.Id() + if userDetails.generation == 1 { + err := classicIpsecpUpdate(d, meta, id) + if err != nil { + return err + } + } else { + err := ipsecpUpdate(d, meta, id) + if err != nil { + return err + } + } + return resourceIBMISIPSecPolicyRead(d, meta) +} + +func classicIpsecpUpdate(d *schema.ResourceData, meta interface{}, id string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + + options := &vpcclassicv1.UpdateIpsecPolicyOptions{ + ID: &id, + } + if d.HasChange(isIpSecName) || d.HasChange(isIpSecAuthenticationAlg) || d.HasChange(isIpSecEncryptionAlg) || d.HasChange(isIpSecPFS) || d.HasChange(isIpSecKeyLifeTime) { + name := d.Get(isIpSecName).(string) + authenticationAlg := d.Get(isIpSecAuthenticationAlg).(string) + encryptionAlg := d.Get(isIpSecEncryptionAlg).(string) + pfs := d.Get(isIpSecPFS).(string) + keyLifetime := int64(d.Get(isIpSecKeyLifeTime).(int)) + + ipsecPolicyPatchModel := &vpcclassicv1.IPsecPolicyPatch{ + Name: &name, + AuthenticationAlgorithm: &authenticationAlg, + EncryptionAlgorithm: &encryptionAlg, + Pfs: &pfs, + KeyLifetime: &keyLifetime, + } + ipsecPolicyPatch, err := ipsecPolicyPatchModel.AsPatch() + if err != nil { + return fmt.Errorf("Error calling asPatch for IPsecPolicyPatch: %s", err) + } + options.IPsecPolicyPatch = ipsecPolicyPatch + + _, response, err := sess.UpdateIpsecPolicy(options) + if err != nil { + return fmt.Errorf("Error on update of IPSEC Policy(%s): %s\n%s", id, err, response) + } + } + return nil +} + +func ipsecpUpdate(d *schema.ResourceData, meta interface{}, id string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + + options := &vpcv1.UpdateIpsecPolicyOptions{ + ID: &id, + } + if d.HasChange(isIpSecName) || d.HasChange(isIpSecAuthenticationAlg) || d.HasChange(isIpSecEncryptionAlg) || d.HasChange(isIpSecPFS) || d.HasChange(isIpSecKeyLifeTime) { + name := d.Get(isIpSecName).(string) + authenticationAlg := d.Get(isIpSecAuthenticationAlg).(string) + encryptionAlg := d.Get(isIpSecEncryptionAlg).(string) + pfs := d.Get(isIpSecPFS).(string) + keyLifetime := int64(d.Get(isIpSecKeyLifeTime).(int)) + + ipsecPolicyPatchModel := &vpcv1.IPsecPolicyPatch{ + Name: &name, + AuthenticationAlgorithm: &authenticationAlg, + EncryptionAlgorithm: &encryptionAlg, + Pfs: &pfs, + KeyLifetime: &keyLifetime, + } + ipsecPolicyPatch, err := ipsecPolicyPatchModel.AsPatch() + if err != nil { + return fmt.Errorf("Error calling asPatch for IPsecPolicyPatch: %s", err) + } + options.IPsecPolicyPatch = ipsecPolicyPatch + + _, response, err := sess.UpdateIpsecPolicy(options) + if err != nil { + return fmt.Errorf("Error on update of IPSEC Policy(%s): %s\n%s", id, err, response) + } + } + return nil +} + +func resourceIBMISIPSecPolicyDelete(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + id := d.Id() + if userDetails.generation == 1 { + err := classicIpsecpDelete(d, meta, id) + if err != nil { + return err + } + } else { + err := ipsecpDelete(d, meta, id) + if err != nil { + return err + } + } + return nil +} + +func classicIpsecpDelete(d *schema.ResourceData, meta interface{}, id string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + getIpsecPolicyOptions := &vpcclassicv1.GetIpsecPolicyOptions{ + ID: &id, + } + _, response, err := sess.GetIpsecPolicy(getIpsecPolicyOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error getting IPSEC Policy(%s): %s\n%s", id, err, response) + } + + deleteIpsecPolicyOptions := &vpcclassicv1.DeleteIpsecPolicyOptions{ + ID: &id, + } + response, err = sess.DeleteIpsecPolicy(deleteIpsecPolicyOptions) + if err != nil { + return fmt.Errorf("Error Deleting IPSEC Policy(%s): %s\n%s", id, err, response) + } + d.SetId("") + return nil +} + +func ipsecpDelete(d *schema.ResourceData, meta interface{}, id string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + getIpsecPolicyOptions := &vpcv1.GetIpsecPolicyOptions{ + ID: &id, + } + _, response, err := sess.GetIpsecPolicy(getIpsecPolicyOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error getting IPSEC Policy(%s): %s\n%s", id, err, response) + } + deleteIpsecPolicyOptions := &vpcv1.DeleteIpsecPolicyOptions{ + ID: &id, + } + response, err = sess.DeleteIpsecPolicy(deleteIpsecPolicyOptions) + if err != nil { + return fmt.Errorf("Error Deleting IPSEC Policy(%s): %s\n%s", id, err, response) + } + d.SetId("") + return nil +} + +func resourceIBMISIPSecPolicyExists(d *schema.ResourceData, meta interface{}) (bool, error) { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return false, err + } + id := d.Id() + + if userDetails.generation == 1 { + exists, err := classicIpsecpExists(d, meta, id) + return exists, err + } else { + exists, err := ipsecpExists(d, meta, id) + return exists, err + } +} + +func classicIpsecpExists(d *schema.ResourceData, meta interface{}, id string) (bool, error) { + sess, err := classicVpcClient(meta) + if err != nil { + return false, err + } + getIpsecPolicyOptions := &vpcclassicv1.GetIpsecPolicyOptions{ + ID: &id, + } + _, response, err := sess.GetIpsecPolicy(getIpsecPolicyOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error getting IPSEC Policy(%s): %s\n%s", id, err, response) + } + return true, nil +} + +func ipsecpExists(d *schema.ResourceData, meta interface{}, id string) (bool, error) { + sess, err := vpcClient(meta) + if err != nil { + return false, err + } + getIpsecPolicyOptions := &vpcv1.GetIpsecPolicyOptions{ + ID: &id, + } + _, response, err := sess.GetIpsecPolicy(getIpsecPolicyOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error getting IPSEC Policy(%s): %s\n%s", id, err, response) + } + return true, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_lb.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_lb.go new file mode 100644 index 00000000000..1df0128676a --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_lb.go @@ -0,0 +1,1049 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "os" + "time" + + "github.com/IBM/vpc-go-sdk/vpcclassicv1" + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + isLBName = "name" + isLBStatus = "status" + isLBTags = "tags" + isLBType = "type" + isLBSubnets = "subnets" + isLBHostName = "hostname" + isLBPublicIPs = "public_ips" + isLBPrivateIPs = "private_ips" + isLBListeners = "listeners" + isLBPools = "pools" + isLBOperatingStatus = "operating_status" + isLBDeleting = "deleting" + isLBDeleted = "done" + isLBProvisioning = "provisioning" + isLBProvisioningDone = "done" + isLBResourceGroup = "resource_group" + isLBProfile = "profile" + isLBLogging = "logging" + isLBSecurityGroups = "security_groups" + isLBSecurityGroupsSupported = "security_group_supported" +) + +func resourceIBMISLB() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMISLBCreate, + Read: resourceIBMISLBRead, + Update: resourceIBMISLBUpdate, + Delete: resourceIBMISLBDelete, + Exists: resourceIBMISLBExists, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(30 * time.Minute), + Delete: schema.DefaultTimeout(30 * time.Minute), + }, + + CustomizeDiff: customdiff.Sequence( + func(diff *schema.ResourceDiff, v interface{}) error { + return resourceTagsCustomizeDiff(diff) + }, + ), + + Schema: map[string]*schema.Schema{ + + isLBName: { + Type: schema.TypeString, + Required: true, + ForceNew: false, + ValidateFunc: InvokeValidator("ibm_is_lb", isLBName), + Description: "Load Balancer name", + }, + + isLBType: { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Default: "public", + ValidateFunc: InvokeValidator("ibm_is_lb", isLBType), + Description: "Load Balancer type", + }, + + isLBStatus: { + Type: schema.TypeString, + Computed: true, + }, + + isLBOperatingStatus: { + Type: schema.TypeString, + Computed: true, + }, + + isLBPublicIPs: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + isLBPrivateIPs: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + isLBSubnets: { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: "Load Balancer subnets list", + }, + + isLBSecurityGroups: { + Type: schema.TypeSet, + Computed: true, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: "Load Balancer securitygroups list", + ConflictsWith: []string{isLBProfile}, + }, + + isLBSecurityGroupsSupported: { + Type: schema.TypeBool, + Computed: true, + Description: "Security Group Supported for this Load Balancer", + }, + + isLBProfile: { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: "The profile to use for this load balancer.", + ValidateFunc: InvokeValidator("ibm_is_lb", isLBProfile), + ConflictsWith: []string{isLBLogging}, + }, + + isLBTags: { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString, ValidateFunc: InvokeValidator("ibm_is_lb", "tag")}, + Set: resourceIBMVPCHash, + }, + + isLBResourceGroup: { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Computed: true, + }, + + isLBHostName: { + Type: schema.TypeString, + Computed: true, + }, + + isLBLogging: { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Logging of Load Balancer", + ConflictsWith: []string{isLBProfile}, + }, + + ResourceControllerURL: { + Type: schema.TypeString, + Computed: true, + Description: "The URL of the IBM Cloud dashboard that can be used to explore and view details about this instance", + }, + + ResourceName: { + Type: schema.TypeString, + Computed: true, + Description: "The name of the resource", + }, + + ResourceGroupName: { + Type: schema.TypeString, + Computed: true, + Description: "The resource group name in which resource is provisioned", + }, + }, + } +} + +func resourceIBMISLBValidator() *ResourceValidator { + + validateSchema := make([]ValidateSchema, 1) + lbtype := "public, private" + isLBProfileAllowedValues := "network-fixed" + + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isLBName, + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Required: true, + Regexp: `^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$`, + MinValueLength: 1, + MaxValueLength: 63}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isLBType, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: lbtype}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isLBProfile, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: false, + AllowedValues: isLBProfileAllowedValues}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: "tag", + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Optional: true, + Regexp: `^[A-Za-z0-9:_ .-]+$`, + MinValueLength: 1, + MaxValueLength: 128}) + + ibmISLBResourceValidator := ResourceValidator{ResourceName: "ibm_is_lb", Schema: validateSchema} + return &ibmISLBResourceValidator +} + +func resourceIBMISLBCreate(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + + name := d.Get(isLBName).(string) + subnets := d.Get(isLBSubnets).(*schema.Set) + + var isLogging bool + if lbLogging, ok := d.GetOk(isLBLogging); ok { + isLogging = lbLogging.(bool) + } + + var securityGroups *schema.Set + if sg, ok := d.GetOk(isLBSecurityGroups); ok { + securityGroups = sg.(*schema.Set) + } + + // subnets := expandStringList((d.Get(isLBSubnets).(*schema.Set)).List()) + var lbType, rg string + isPublic := true + if types, ok := d.GetOk(isLBType); ok { + lbType = types.(string) + } + + if lbType == "private" { + isPublic = false + } + + if grp, ok := d.GetOk(isLBResourceGroup); ok { + rg = grp.(string) + } + + if userDetails.generation == 1 { + err := classicLBCreate(d, meta, name, lbType, rg, subnets, isPublic) + if err != nil { + return err + } + } else { + err := lbCreate(d, meta, name, lbType, rg, subnets, isPublic, isLogging, securityGroups) + if err != nil { + return err + } + } + return resourceIBMISLBRead(d, meta) +} + +func classicLBCreate(d *schema.ResourceData, meta interface{}, name, lbType, rg string, subnets *schema.Set, isPublic bool) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + options := &vpcclassicv1.CreateLoadBalancerOptions{ + IsPublic: &isPublic, + Name: &name, + } + if subnets.Len() != 0 { + subnetobjs := make([]vpcclassicv1.SubnetIdentityIntf, subnets.Len()) + for i, subnet := range subnets.List() { + subnetstr := subnet.(string) + subnetobjs[i] = &vpcclassicv1.SubnetIdentity{ + ID: &subnetstr, + } + } + options.Subnets = subnetobjs + } + if rg != "" { + options.ResourceGroup = &vpcclassicv1.ResourceGroupIdentity{ + ID: &rg, + } + } + + lb, response, err := sess.CreateLoadBalancer(options) + if err != nil { + return fmt.Errorf("Error while creating Load Balancer err %s\n%s", err, response) + } + d.SetId(*lb.ID) + log.Printf("[INFO] Load Balancer : %s", *lb.ID) + _, err = isWaitForClassicLBAvailable(sess, d.Id(), d.Timeout(schema.TimeoutCreate)) + if err != nil { + return err + } + v := os.Getenv("IC_ENV_TAGS") + if _, ok := d.GetOk(isLBTags); ok || v != "" { + oldList, newList := d.GetChange(isLBTags) + err = UpdateTagsUsingCRN(oldList, newList, meta, *lb.CRN) + if err != nil { + log.Printf( + "Error on create of resource vpc Load Balancer (%s) tags: %s", d.Id(), err) + } + } + return nil +} + +func lbCreate(d *schema.ResourceData, meta interface{}, name, lbType, rg string, subnets *schema.Set, isPublic bool, isLogging bool, securityGroups *schema.Set) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + + options := &vpcv1.CreateLoadBalancerOptions{ + IsPublic: &isPublic, + Name: &name, + } + + if subnets.Len() != 0 { + subnetobjs := make([]vpcv1.SubnetIdentityIntf, subnets.Len()) + for i, subnet := range subnets.List() { + subnetstr := subnet.(string) + subnetobjs[i] = &vpcv1.SubnetIdentity{ + ID: &subnetstr, + } + } + options.Subnets = subnetobjs + } + + if securityGroups != nil && securityGroups.Len() != 0 { + securityGroupobjs := make([]vpcv1.SecurityGroupIdentityIntf, securityGroups.Len()) + for i, securityGroup := range securityGroups.List() { + securityGroupstr := securityGroup.(string) + securityGroupobjs[i] = &vpcv1.SecurityGroupIdentity{ + ID: &securityGroupstr, + } + } + options.SecurityGroups = securityGroupobjs + } + + if rg != "" { + options.ResourceGroup = &vpcv1.ResourceGroupIdentity{ + ID: &rg, + } + } + + if _, ok := d.GetOk(isLBProfile); ok { + profile := d.Get(isLBProfile).(string) + // Construct an instance of the LoadBalancerPoolIdentityByName model + loadBalancerProfileIdentityModel := new(vpcv1.LoadBalancerProfileIdentityByName) + loadBalancerProfileIdentityModel.Name = &profile + options.Profile = loadBalancerProfileIdentityModel + } else { + + dataPath := &vpcv1.LoadBalancerLoggingDatapath{ + Active: &isLogging, + } + loadBalancerLogging := &vpcv1.LoadBalancerLogging{ + Datapath: dataPath, + } + options.Logging = loadBalancerLogging + } + + lb, response, err := sess.CreateLoadBalancer(options) + if err != nil { + return fmt.Errorf("Error while creating Load Balancer err %s\n%s", err, response) + } + d.SetId(*lb.ID) + log.Printf("[INFO] Load Balancer : %s", *lb.ID) + _, err = isWaitForLBAvailable(sess, d.Id(), d.Timeout(schema.TimeoutCreate)) + if err != nil { + return err + } + v := os.Getenv("IC_ENV_TAGS") + if _, ok := d.GetOk(isLBTags); ok || v != "" { + oldList, newList := d.GetChange(isLBTags) + err = UpdateTagsUsingCRN(oldList, newList, meta, *lb.CRN) + if err != nil { + log.Printf( + "Error on create of resource vpc Load Balancer (%s) tags: %s", d.Id(), err) + } + } + return nil +} + +func resourceIBMISLBRead(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + id := d.Id() + if userDetails.generation == 1 { + err := classicLBGet(d, meta, id) + if err != nil { + return err + } + } else { + err := lbGet(d, meta, id) + if err != nil { + return err + } + } + return nil +} + +func classicLBGet(d *schema.ResourceData, meta interface{}, id string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + getLoadBalancerOptions := &vpcclassicv1.GetLoadBalancerOptions{ + ID: &id, + } + lb, response, err := sess.GetLoadBalancer(getLoadBalancerOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error getting Load Balancer : %s\n%s", err, response) + } + d.Set(isLBName, *lb.Name) + if *lb.IsPublic { + d.Set(isLBType, "public") + } else { + d.Set(isLBType, "private") + } + d.Set(isLBStatus, *lb.ProvisioningStatus) + d.Set(isLBOperatingStatus, *lb.OperatingStatus) + publicIpList := make([]string, 0) + if lb.PublicIps != nil { + for _, ip := range lb.PublicIps { + if ip.Address != nil { + pubip := *ip.Address + publicIpList = append(publicIpList, pubip) + } + } + } + d.Set(isLBPublicIPs, publicIpList) + privateIpList := make([]string, 0) + if lb.PrivateIps != nil { + for _, ip := range lb.PrivateIps { + if ip.Address != nil { + prip := *ip.Address + privateIpList = append(privateIpList, prip) + } + } + } + d.Set(isLBPrivateIPs, privateIpList) + if lb.Subnets != nil { + subnetList := make([]string, 0) + for _, subnet := range lb.Subnets { + if subnet.ID != nil { + sub := *subnet.ID + subnetList = append(subnetList, sub) + } + } + d.Set(isLBSubnets, subnetList) + } + d.Set(isLBResourceGroup, *lb.ResourceGroup.ID) + d.Set(isLBHostName, *lb.Hostname) + tags, err := GetTagsUsingCRN(meta, *lb.CRN) + if err != nil { + log.Printf( + "Error on get of resource vpc Load Balancer (%s) tags: %s", d.Id(), err) + } + d.Set(isLBTags, tags) + controller, err := getBaseController(meta) + if err != nil { + return err + } + d.Set(ResourceControllerURL, controller+"/vpc/network/loadBalancers") + d.Set(ResourceName, *lb.Name) + if lb.ResourceGroup != nil { + d.Set(ResourceGroupName, *lb.ResourceGroup.ID) + } + return nil +} + +func lbGet(d *schema.ResourceData, meta interface{}, id string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + getLoadBalancerOptions := &vpcv1.GetLoadBalancerOptions{ + ID: &id, + } + lb, response, err := sess.GetLoadBalancer(getLoadBalancerOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error getting Load Balancer : %s\n%s", err, response) + } + d.Set(isLBName, *lb.Name) + if *lb.IsPublic { + d.Set(isLBType, "public") + } else { + d.Set(isLBType, "private") + } + d.Set(isLBStatus, *lb.ProvisioningStatus) + d.Set(isLBOperatingStatus, *lb.OperatingStatus) + publicIpList := make([]string, 0) + if lb.PublicIps != nil { + for _, ip := range lb.PublicIps { + if ip.Address != nil { + pubip := *ip.Address + publicIpList = append(publicIpList, pubip) + } + } + } + d.Set(isLBPublicIPs, publicIpList) + privateIpList := make([]string, 0) + if lb.PrivateIps != nil { + for _, ip := range lb.PrivateIps { + if ip.Address != nil { + prip := *ip.Address + privateIpList = append(privateIpList, prip) + } + } + } + d.Set(isLBPrivateIPs, privateIpList) + if lb.Subnets != nil { + subnetList := make([]string, 0) + for _, subnet := range lb.Subnets { + if subnet.ID != nil { + sub := *subnet.ID + subnetList = append(subnetList, sub) + } + } + d.Set(isLBSubnets, subnetList) + } + + d.Set(isLBSecurityGroupsSupported, false) + if lb.SecurityGroups != nil { + securitygroupList := make([]string, 0) + for _, SecurityGroup := range lb.SecurityGroups { + if SecurityGroup.ID != nil { + securityGroupID := *SecurityGroup.ID + securitygroupList = append(securitygroupList, securityGroupID) + } + } + d.Set(isLBSecurityGroups, securitygroupList) + d.Set(isLBSecurityGroupsSupported, true) + } + + if lb.Profile != nil { + profile := lb.Profile + if profile.Name != nil { + d.Set(isLBProfile, *lb.Profile.Name) + } + } else { + if lb.Logging != nil && lb.Logging.Datapath != nil && lb.Logging.Datapath.Active != nil { + d.Set(isLBLogging, *lb.Logging.Datapath.Active) + } + } + + d.Set(isLBResourceGroup, *lb.ResourceGroup.ID) + d.Set(isLBHostName, *lb.Hostname) + tags, err := GetTagsUsingCRN(meta, *lb.CRN) + if err != nil { + log.Printf( + "Error on get of resource vpc Load Balancer (%s) tags: %s", d.Id(), err) + } + d.Set(isLBTags, tags) + controller, err := getBaseController(meta) + if err != nil { + return err + } + d.Set(ResourceControllerURL, controller+"/vpc-ext/network/loadBalancers") + d.Set(ResourceName, *lb.Name) + if lb.ResourceGroup != nil { + d.Set(ResourceGroupName, *lb.ResourceGroup.ID) + } + return nil +} + +func resourceIBMISLBUpdate(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + id := d.Id() + name := "" + isLogging := false + hasChanged := false + hasChangedLog := false + var remove, add []string + hasChangedSecurityGroups := false + + if d.HasChange(isLBName) { + name = d.Get(isLBName).(string) + hasChanged = true + } + if d.HasChange(isLBLogging) { + isLogging = d.Get(isLBLogging).(bool) + hasChangedLog = true + } + if d.HasChange(isLBSecurityGroups) { + o, n := d.GetChange(isLBSecurityGroups) + oSecurityGroups := o.(*schema.Set) + nSecurityGroups := n.(*schema.Set) + remove = expandStringList(oSecurityGroups.Difference(nSecurityGroups).List()) + add = expandStringList(nSecurityGroups.Difference(oSecurityGroups).List()) + hasChangedSecurityGroups = true + } + + if userDetails.generation == 1 { + err := classicLBUpdate(d, meta, id, name, hasChanged) + if err != nil { + return err + } + } else { + err := lbUpdate(d, meta, id, name, hasChanged, isLogging, hasChangedLog, hasChangedSecurityGroups, remove, add) + if err != nil { + return err + } + } + + return resourceIBMISLBRead(d, meta) +} +func classicLBUpdate(d *schema.ResourceData, meta interface{}, id, name string, hasChanged bool) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + if d.HasChange(isLBTags) { + getLoadBalancerOptions := &vpcclassicv1.GetLoadBalancerOptions{ + ID: &id, + } + lb, response, err := sess.GetLoadBalancer(getLoadBalancerOptions) + if err != nil { + return fmt.Errorf("Error getting Load Balancer : %s\n%s", err, response) + } + oldList, newList := d.GetChange(isLBTags) + err = UpdateTagsUsingCRN(oldList, newList, meta, *lb.CRN) + if err != nil { + log.Printf( + "Error on update of resource vpc Load Balancer (%s) tags: %s", d.Id(), err) + } + } + if hasChanged { + updateLoadBalancerOptions := &vpcclassicv1.UpdateLoadBalancerOptions{ + ID: &id, + } + + loadBalancerPatchModel := &vpcclassicv1.LoadBalancerPatch{ + Name: &name, + } + loadBalancerPatch, err := loadBalancerPatchModel.AsPatch() + if err != nil { + return fmt.Errorf("Error calling asPatch for LoadBalancerPatch: %s", err) + } + updateLoadBalancerOptions.LoadBalancerPatch = loadBalancerPatch + + _, response, err := sess.UpdateLoadBalancer(updateLoadBalancerOptions) + if err != nil { + return fmt.Errorf("Error Updating vpc Load Balancer : %s\n%s", err, response) + } + } + return nil +} + +func lbUpdate(d *schema.ResourceData, meta interface{}, id, name string, hasChanged bool, isLogging bool, hasChangedLog bool, hasChangedSecurityGroups bool, remove, add []string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + if d.HasChange(isLBTags) { + getLoadBalancerOptions := &vpcv1.GetLoadBalancerOptions{ + ID: &id, + } + lb, response, err := sess.GetLoadBalancer(getLoadBalancerOptions) + if err != nil { + return fmt.Errorf("Error getting Load Balancer : %s\n%s", err, response) + } + oldList, newList := d.GetChange(isLBTags) + err = UpdateTagsUsingCRN(oldList, newList, meta, *lb.CRN) + if err != nil { + log.Printf( + "Error on update of resource vpc Load Balancer (%s) tags: %s", d.Id(), err) + } + } + if hasChanged { + updateLoadBalancerOptions := &vpcv1.UpdateLoadBalancerOptions{ + ID: &id, + } + loadBalancerPatchModel := &vpcv1.LoadBalancerPatch{ + Name: &name, + } + loadBalancerPatch, err := loadBalancerPatchModel.AsPatch() + if err != nil { + return fmt.Errorf("Error calling asPatch for LoadBalancerPatch: %s", err) + } + updateLoadBalancerOptions.LoadBalancerPatch = loadBalancerPatch + + _, response, err := sess.UpdateLoadBalancer(updateLoadBalancerOptions) + if err != nil { + return fmt.Errorf("Error Updating vpc Load Balancer : %s\n%s", err, response) + } + } + if hasChangedLog { + updateLoadBalancerOptions := &vpcv1.UpdateLoadBalancerOptions{ + ID: &id, + } + dataPath := &vpcv1.LoadBalancerLoggingDatapath{ + Active: &isLogging, + } + loadBalancerLogging := &vpcv1.LoadBalancerLogging{ + Datapath: dataPath, + } + loadBalancerPatchModel := &vpcv1.LoadBalancerPatch{ + Logging: loadBalancerLogging, + } + loadBalancerPatch, err := loadBalancerPatchModel.AsPatch() + if err != nil { + return fmt.Errorf("Error calling asPatch for LoadBalancerPatch: %s", err) + } + updateLoadBalancerOptions.LoadBalancerPatch = loadBalancerPatch + + _, response, err := sess.UpdateLoadBalancer(updateLoadBalancerOptions) + if err != nil { + return fmt.Errorf("Error Updating vpc Load Balancer : %s\n%s", err, response) + } + } + + if hasChangedSecurityGroups { + if len(add) > 0 { + for _, d := range add { + createSecurityGroupTargetBindingOptions := &vpcv1.CreateSecurityGroupTargetBindingOptions{} + createSecurityGroupTargetBindingOptions.SecurityGroupID = &d + createSecurityGroupTargetBindingOptions.ID = &id + _, response, err := sess.CreateSecurityGroupTargetBinding(createSecurityGroupTargetBindingOptions) + if err != nil { + return fmt.Errorf("Error while creating Security Group Target Binding %s\n%s", err, response) + } + } + } + if len(remove) > 0 { + for _, d := range remove { + getSecurityGroupTargetOptions := &vpcv1.GetSecurityGroupTargetOptions{ + SecurityGroupID: &d, + ID: &id, + } + _, response, err := sess.GetSecurityGroupTarget(getSecurityGroupTargetOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + continue + } + return fmt.Errorf("Error Getting Security Group Target for this load balancer (%s): %s\n%s", d, err, response) + } + deleteSecurityGroupTargetBindingOptions := sess.NewDeleteSecurityGroupTargetBindingOptions(d, id) + response, err = sess.DeleteSecurityGroupTargetBinding(deleteSecurityGroupTargetBindingOptions) + if err != nil { + return fmt.Errorf("Error Deleting Security Group Target for this load balancer : %s\n%s", err, response) + } + } + } + } + return nil +} + +func resourceIBMISLBDelete(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + id := d.Id() + if userDetails.generation == 1 { + err := classicLBDelete(d, meta, id) + if err != nil { + return err + } + } else { + err := lbDelete(d, meta, id) + if err != nil { + return err + } + } + + d.SetId("") + return nil +} + +func classicLBDelete(d *schema.ResourceData, meta interface{}, id string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + + getLoadBalancerOptions := &vpcclassicv1.GetLoadBalancerOptions{ + ID: &id, + } + _, response, err := sess.GetLoadBalancer(getLoadBalancerOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Getting vpc load balancer(%s): %s\n%s", id, err, response) + } + + deleteLoadBalancerOptions := &vpcclassicv1.DeleteLoadBalancerOptions{ + ID: &id, + } + response, err = sess.DeleteLoadBalancer(deleteLoadBalancerOptions) + if err != nil { + return fmt.Errorf("Error Deleting vpc load balancer : %s\n%s", err, response) + } + _, err = isWaitForClassicLBDeleted(sess, id, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return err + } + d.SetId("") + return nil +} + +func lbDelete(d *schema.ResourceData, meta interface{}, id string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + + getLoadBalancerOptions := &vpcv1.GetLoadBalancerOptions{ + ID: &id, + } + _, response, err := sess.GetLoadBalancer(getLoadBalancerOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Getting vpc load balancer(%s): %s\n%s", id, err, response) + } + + deleteLoadBalancerOptions := &vpcv1.DeleteLoadBalancerOptions{ + ID: &id, + } + response, err = sess.DeleteLoadBalancer(deleteLoadBalancerOptions) + if err != nil { + return fmt.Errorf("Error Deleting vpc load balancer : %s\n%s", err, response) + } + _, err = isWaitForLBDeleted(sess, id, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return err + } + d.SetId("") + return nil +} + +func isWaitForClassicLBDeleted(lbc *vpcclassicv1.VpcClassicV1, id string, timeout time.Duration) (interface{}, error) { + log.Printf("Waiting for (%s) to be deleted.", id) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", isLBDeleting}, + Target: []string{isLBDeleted, "failed"}, + Refresh: isClassicLBDeleteRefreshFunc(lbc, id), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isClassicLBDeleteRefreshFunc(lbc *vpcclassicv1.VpcClassicV1, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + log.Printf("[DEBUG] delete function here") + getLoadBalancerOptions := &vpcclassicv1.GetLoadBalancerOptions{ + ID: &id, + } + lb, response, err := lbc.GetLoadBalancer(getLoadBalancerOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return lb, isLBDeleted, nil + } + return nil, "failed", fmt.Errorf("The vpc load balancer %s failed to delete: %s\n%s", id, err, response) + } + return lb, isLBDeleting, nil + } +} + +func isWaitForLBDeleted(lbc *vpcv1.VpcV1, id string, timeout time.Duration) (interface{}, error) { + log.Printf("Waiting for (%s) to be deleted.", id) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", isLBDeleting}, + Target: []string{isLBDeleted, "failed"}, + Refresh: isLBDeleteRefreshFunc(lbc, id), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isLBDeleteRefreshFunc(lbc *vpcv1.VpcV1, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + log.Printf("[DEBUG] delete function here") + getLoadBalancerOptions := &vpcv1.GetLoadBalancerOptions{ + ID: &id, + } + lb, response, err := lbc.GetLoadBalancer(getLoadBalancerOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return lb, isLBDeleted, nil + } + return nil, "failed", fmt.Errorf("The vpc load balancer %s failed to delete: %s\n%s", id, err, response) + } + return lb, isLBDeleting, nil + } +} + +func resourceIBMISLBExists(d *schema.ResourceData, meta interface{}) (bool, error) { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return false, err + } + id := d.Id() + if userDetails.generation == 1 { + exists, err := classicLBExists(d, meta, id) + return exists, err + } else { + exists, err := lbExists(d, meta, id) + return exists, err + } +} + +func classicLBExists(d *schema.ResourceData, meta interface{}, id string) (bool, error) { + sess, err := classicVpcClient(meta) + if err != nil { + return false, err + } + getLoadBalancerOptions := &vpcclassicv1.GetLoadBalancerOptions{ + ID: &id, + } + _, response, err := sess.GetLoadBalancer(getLoadBalancerOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error getting vpc load balancer: %s\n%s", err, response) + } + return true, nil +} + +func lbExists(d *schema.ResourceData, meta interface{}, id string) (bool, error) { + sess, err := vpcClient(meta) + if err != nil { + return false, err + } + getLoadBalancerOptions := &vpcv1.GetLoadBalancerOptions{ + ID: &id, + } + _, response, err := sess.GetLoadBalancer(getLoadBalancerOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error getting vpc load balancer: %s\n%s", err, response) + } + return true, nil +} + +func isWaitForLBAvailable(sess *vpcv1.VpcV1, lbId string, timeout time.Duration) (interface{}, error) { + log.Printf("Waiting for load balancer (%s) to be available.", lbId) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", isLBProvisioning, "update_pending"}, + Target: []string{isLBProvisioningDone, ""}, + Refresh: isLBRefreshFunc(sess, lbId), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isLBRefreshFunc(sess *vpcv1.VpcV1, lbId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + + getlboptions := &vpcv1.GetLoadBalancerOptions{ + ID: &lbId, + } + lb, response, err := sess.GetLoadBalancer(getlboptions) + if err != nil { + return nil, "", fmt.Errorf("Error Getting Load Balancer : %s\n%s", err, response) + } + + if *lb.ProvisioningStatus == "active" || *lb.ProvisioningStatus == "failed" { + return lb, isLBProvisioningDone, nil + } + + return lb, isLBProvisioning, nil + } +} + +func isWaitForClassicLBAvailable(sess *vpcclassicv1.VpcClassicV1, lbId string, timeout time.Duration) (interface{}, error) { + log.Printf("Waiting for load balancer (%s) to be available.", lbId) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", isLBProvisioning}, + Target: []string{isLBProvisioningDone, ""}, + Refresh: isClassicLBRefreshFunc(sess, lbId), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isClassicLBRefreshFunc(sess *vpcclassicv1.VpcClassicV1, lbId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + + getlboptions := &vpcclassicv1.GetLoadBalancerOptions{ + ID: &lbId, + } + lb, response, err := sess.GetLoadBalancer(getlboptions) + if err != nil { + return nil, "", fmt.Errorf("Error Getting Load Balancer : %s\n%s", err, response) + } + + if *lb.ProvisioningStatus == "active" || *lb.ProvisioningStatus == "failed" { + return lb, isLBProvisioningDone, nil + } + + return lb, isLBProvisioning, nil + } +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_lb_listener.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_lb_listener.go new file mode 100644 index 00000000000..36e11963a3e --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_lb_listener.go @@ -0,0 +1,942 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "strings" + "time" + + "github.com/IBM/vpc-go-sdk/vpcclassicv1" + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + isLBListenerLBID = "lb" + isLBListenerPort = "port" + isLBListenerProtocol = "protocol" + isLBListenerCertificateInstance = "certificate_instance" + isLBListenerConnectionLimit = "connection_limit" + isLBListenerDefaultPool = "default_pool" + isLBListenerStatus = "status" + isLBListenerDeleting = "deleting" + isLBListenerDeleted = "done" + isLBListenerProvisioning = "provisioning" + isLBListenerAcceptProxyProtocol = "accept_proxy_protocol" + isLBListenerProvisioningDone = "done" + isLBListenerID = "listener_id" +) + +func resourceIBMISLBListener() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMISLBListenerCreate, + Read: resourceIBMISLBListenerRead, + Update: resourceIBMISLBListenerUpdate, + Delete: resourceIBMISLBListenerDelete, + Exists: resourceIBMISLBListenerExists, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + + isLBListenerLBID: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Loadbalancer listener ID", + }, + + isLBListenerPort: { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validateLBListenerPort, + Description: "Loadbalancer listener port", + }, + + isLBListenerProtocol: { + Type: schema.TypeString, + Required: true, + ValidateFunc: InvokeValidator("ibm_is_lb_listener", isLBListenerProtocol), + Description: "Loadbalancer protocol", + }, + + isLBListenerCertificateInstance: { + Type: schema.TypeString, + Optional: true, + Description: "certificate instance for the Loadbalancer", + }, + + isLBListenerAcceptProxyProtocol: { + Type: schema.TypeBool, + Optional: true, + Computed: true, + Description: "Listener will forward proxy protocol", + }, + + isLBListenerConnectionLimit: { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validateLBListenerConnectionLimit, + Description: "Connection limit for Loadbalancer", + }, + + isLBListenerDefaultPool: { + Type: schema.TypeString, + Optional: true, + Computed: true, + DiffSuppressFunc: func(k, o, n string, d *schema.ResourceData) bool { + if o == "" { + return false + } + // if state file entry and tf file entry matches + if strings.Compare(n, o) == 0 { + return true + } + + if strings.Contains(n, "/") { + new := strings.Split(n, "/") + if strings.Compare(new[1], o) == 0 { + return true + } + } + + return false + }, + Description: "Loadbalancer default pool info", + }, + + isLBListenerStatus: { + Type: schema.TypeString, + Computed: true, + Description: "Loadbalancer listener status", + }, + + isLBListenerID: { + Type: schema.TypeString, + Computed: true, + }, + + RelatedCRN: { + Type: schema.TypeString, + Computed: true, + Description: "The crn of the LB resource", + }, + }, + } +} + +func resourceIBMISLBListenerValidator() *ResourceValidator { + + validateSchema := make([]ValidateSchema, 1) + protocol := "https, http, tcp" + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isLBListenerProtocol, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: protocol}) + + ibmISLBListenerResourceValidator := ResourceValidator{ResourceName: "ibm_is_lb_listener", Schema: validateSchema} + return &ibmISLBListenerResourceValidator +} + +func resourceIBMISLBListenerCreate(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + + log.Printf("[DEBUG] LB Listener create") + lbID := d.Get(isLBListenerLBID).(string) + port := int64(d.Get(isLBListenerPort).(int)) + protocol := d.Get(isLBListenerProtocol).(string) + acceptProxyProtocol := d.Get(isLBListenerAcceptProxyProtocol).(bool) + var defPool, certificateCRN string + if pool, ok := d.GetOk(isLBListenerDefaultPool); ok { + lbPool, err := getPoolId(pool.(string)) + if err != nil { + return err + } + defPool = lbPool + } + + if crn, ok := d.GetOk(isLBListenerCertificateInstance); ok { + certificateCRN = crn.(string) + } + + var connLimit int64 + + if limit, ok := d.GetOk(isLBListenerConnectionLimit); ok { + connLimit = int64(limit.(int)) + } + + isLBKey := "load_balancer_key_" + lbID + ibmMutexKV.Lock(isLBKey) + defer ibmMutexKV.Unlock(isLBKey) + + if userDetails.generation == 1 { + err := classicLBListenerCreate(d, meta, lbID, protocol, defPool, certificateCRN, port, connLimit) + if err != nil { + return err + } + } else { + err := lbListenerCreate(d, meta, lbID, protocol, defPool, certificateCRN, port, connLimit, acceptProxyProtocol) + if err != nil { + return err + } + } + return resourceIBMISLBListenerRead(d, meta) +} + +func classicLBListenerCreate(d *schema.ResourceData, meta interface{}, lbID, protocol, defPool, certificateCRN string, port, connLimit int64) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + options := &vpcclassicv1.CreateLoadBalancerListenerOptions{ + LoadBalancerID: &lbID, + Port: &port, + Protocol: &protocol, + } + if defPool != "" { + options.DefaultPool = &vpcclassicv1.LoadBalancerPoolIdentity{ + ID: &defPool, + } + } + if certificateCRN != "" { + options.CertificateInstance = &vpcclassicv1.CertificateInstanceIdentity{ + CRN: &certificateCRN, + } + } + if connLimit > int64(0) { + options.ConnectionLimit = &connLimit + } + _, err = isWaitForClassicLBAvailable(sess, lbID, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf( + "Error checking for load balancer (%s) is active: %s", lbID, err) + } + + lbListener, response, err := sess.CreateLoadBalancerListener(options) + if err != nil { + return fmt.Errorf("Error while creating Load Balanacer Listener err %s\n%s", err, response) + } + d.SetId(fmt.Sprintf("%s/%s", lbID, *lbListener.ID)) + _, err = isWaitForClassicLBListenerAvailable(sess, lbID, *lbListener.ID, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf( + "Error waiting for load balancer listener(%s) to become ready: %s", d.Id(), err) + } + _, err = isWaitForClassicLBAvailable(sess, lbID, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf( + "Error waiting for load balancer (%s) to become ready: %s", lbID, err) + } + + log.Printf("[INFO] Load balancer Listener : %s", *lbListener.ID) + return nil +} + +func lbListenerCreate(d *schema.ResourceData, meta interface{}, lbID, protocol, defPool, certificateCRN string, port, connLimit int64, acceptProxyProtocol bool) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + options := &vpcv1.CreateLoadBalancerListenerOptions{ + LoadBalancerID: &lbID, + Port: &port, + Protocol: &protocol, + AcceptProxyProtocol: &acceptProxyProtocol, + } + if defPool != "" { + options.DefaultPool = &vpcv1.LoadBalancerPoolIdentity{ + ID: &defPool, + } + } + if certificateCRN != "" { + options.CertificateInstance = &vpcv1.CertificateInstanceIdentity{ + CRN: &certificateCRN, + } + } + if connLimit > int64(0) { + options.ConnectionLimit = &connLimit + } + _, err = isWaitForLBAvailable(sess, lbID, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf( + "Error checking for load balancer (%s) is active: %s", lbID, err) + } + + lbListener, response, err := sess.CreateLoadBalancerListener(options) + if err != nil { + return fmt.Errorf("Error while creating Load Balanacer Listener err %s\n%s", err, response) + } + d.SetId(fmt.Sprintf("%s/%s", lbID, *lbListener.ID)) + _, err = isWaitForLBListenerAvailable(sess, lbID, *lbListener.ID, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf( + "Error waiting for load balancer listener(%s) to become ready: %s", d.Id(), err) + } + _, err = isWaitForLBAvailable(sess, lbID, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf( + "Error waiting for load balancer (%s) to become ready: %s", lbID, err) + } + + log.Printf("[INFO] Load balancer Listener : %s", *lbListener.ID) + return nil +} + +func isWaitForClassicLBListenerAvailable(sess *vpcclassicv1.VpcClassicV1, lbID, lbListenerID string, timeout time.Duration) (interface{}, error) { + log.Printf("Waiting for load balancer Listener(%s) to be available.", lbListenerID) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", isLBListenerProvisioning, "create_pending", "update_pending", "maintenance_pending"}, + Target: []string{isLBListenerProvisioningDone, ""}, + Refresh: isClassicLBListenerRefreshFunc(sess, lbID, lbListenerID), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isClassicLBListenerRefreshFunc(sess *vpcclassicv1.VpcClassicV1, lbID, lbListenerID string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + + getLoadBalancerListenerOptions := &vpcclassicv1.GetLoadBalancerListenerOptions{ + LoadBalancerID: &lbID, + ID: &lbListenerID, + } + lblis, response, err := sess.GetLoadBalancerListener(getLoadBalancerListenerOptions) + if err != nil { + return nil, "", fmt.Errorf("Error Getting Load Balancer Listener: %s\n%s", err, response) + } + + if *lblis.ProvisioningStatus == "active" || *lblis.ProvisioningStatus == "failed" { + return lblis, isLBListenerProvisioningDone, nil + } + + return lblis, *lblis.ProvisioningStatus, nil + } +} + +func isWaitForLBListenerAvailable(sess *vpcv1.VpcV1, lbID, lbListenerID string, timeout time.Duration) (interface{}, error) { + log.Printf("Waiting for load balancer Listener(%s) to be available.", lbListenerID) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", isLBListenerProvisioning, "create_pending", "update_pending", "maintenance_pending"}, + Target: []string{isLBListenerProvisioningDone, ""}, + Refresh: isLBListenerRefreshFunc(sess, lbID, lbListenerID), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isLBListenerRefreshFunc(sess *vpcv1.VpcV1, lbID, lbListenerID string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + + getLoadBalancerListenerOptions := &vpcv1.GetLoadBalancerListenerOptions{ + LoadBalancerID: &lbID, + ID: &lbListenerID, + } + lblis, response, err := sess.GetLoadBalancerListener(getLoadBalancerListenerOptions) + if err != nil { + return nil, "", fmt.Errorf("Error Getting Load Balancer Listener: %s\n%s", err, response) + } + + if *lblis.ProvisioningStatus == "active" || *lblis.ProvisioningStatus == "failed" { + return lblis, isLBListenerProvisioningDone, nil + } + + return lblis, *lblis.ProvisioningStatus, nil + } +} + +func resourceIBMISLBListenerRead(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + + parts, err := idParts(d.Id()) + if err != nil { + return err + } + + lbID := parts[0] + lbListenerID := parts[1] + + if userDetails.generation == 1 { + err := classicLBListenerGet(d, meta, lbID, lbListenerID) + if err != nil { + return err + } + } else { + err := lbListenerGet(d, meta, lbID, lbListenerID) + if err != nil { + return err + } + } + + return nil +} + +func classicLBListenerGet(d *schema.ResourceData, meta interface{}, lbID, lbListenerID string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + getLoadBalancerListenerOptions := &vpcclassicv1.GetLoadBalancerListenerOptions{ + LoadBalancerID: &lbID, + ID: &lbListenerID, + } + lbListener, response, err := sess.GetLoadBalancerListener(getLoadBalancerListenerOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Getting Load Balancer Listener : %s\n%s", err, response) + } + d.Set(isLBListenerLBID, lbID) + d.Set(isLBListenerPort, *lbListener.Port) + d.Set(isLBListenerProtocol, *lbListener.Protocol) + d.Set(isLBListenerID, lbListenerID) + if lbListener.DefaultPool != nil { + d.Set(isLBListenerDefaultPool, *lbListener.DefaultPool.ID) + } + if lbListener.CertificateInstance != nil { + d.Set(isLBListenerCertificateInstance, *lbListener.CertificateInstance.CRN) + } + if lbListener.ConnectionLimit != nil { + d.Set(isLBListenerConnectionLimit, *lbListener.ConnectionLimit) + } + d.Set(isLBListenerStatus, *lbListener.ProvisioningStatus) + getLoadBalancerOptions := &vpcclassicv1.GetLoadBalancerOptions{ + ID: &lbID, + } + lb, response, err := sess.GetLoadBalancer(getLoadBalancerOptions) + if err != nil { + return fmt.Errorf("Error Getting Load Balancer : %s\n%s", err, response) + } + d.Set(RelatedCRN, *lb.CRN) + return nil +} + +func lbListenerGet(d *schema.ResourceData, meta interface{}, lbID, lbListenerID string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + getLoadBalancerListenerOptions := &vpcv1.GetLoadBalancerListenerOptions{ + LoadBalancerID: &lbID, + ID: &lbListenerID, + } + lbListener, response, err := sess.GetLoadBalancerListener(getLoadBalancerListenerOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Getting Load Balancer Listener : %s\n%s", err, response) + } + d.Set(isLBListenerLBID, lbID) + d.Set(isLBListenerPort, *lbListener.Port) + d.Set(isLBListenerProtocol, *lbListener.Protocol) + d.Set(isLBListenerAcceptProxyProtocol, *lbListener.AcceptProxyProtocol) + d.Set(isLBListenerID, lbListenerID) + if lbListener.DefaultPool != nil { + d.Set(isLBListenerDefaultPool, *lbListener.DefaultPool.ID) + } + if lbListener.CertificateInstance != nil { + d.Set(isLBListenerCertificateInstance, *lbListener.CertificateInstance.CRN) + } + if lbListener.ConnectionLimit != nil { + d.Set(isLBListenerConnectionLimit, *lbListener.ConnectionLimit) + } + d.Set(isLBListenerStatus, *lbListener.ProvisioningStatus) + getLoadBalancerOptions := &vpcv1.GetLoadBalancerOptions{ + ID: &lbID, + } + lb, response, err := sess.GetLoadBalancer(getLoadBalancerOptions) + if err != nil { + return fmt.Errorf("Error Getting Load Balancer : %s\n%s", err, response) + } + d.Set(RelatedCRN, *lb.CRN) + return nil +} + +func resourceIBMISLBListenerUpdate(d *schema.ResourceData, meta interface{}) error { + + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + parts, err := idParts(d.Id()) + if err != nil { + return err + } + + lbID := parts[0] + lbListenerID := parts[1] + + if userDetails.generation == 1 { + err := classicLBListenerUpdate(d, meta, lbID, lbListenerID) + if err != nil { + return err + } + } else { + err := lbListenerUpdate(d, meta, lbID, lbListenerID) + if err != nil { + return err + } + } + + return resourceIBMISLBListenerRead(d, meta) +} + +func classicLBListenerUpdate(d *schema.ResourceData, meta interface{}, lbID, lbListenerID string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + hasChanged := false + var certificateInstance, defPool, protocol string + var connLimit, port int64 + updateLoadBalancerListenerOptions := &vpcclassicv1.UpdateLoadBalancerListenerOptions{ + LoadBalancerID: &lbID, + ID: &lbListenerID, + } + loadBalancerListenerPatchModel := &vpcclassicv1.LoadBalancerListenerPatch{} + + if d.HasChange(isLBListenerCertificateInstance) { + certificateInstance = d.Get(isLBListenerCertificateInstance).(string) + loadBalancerListenerPatchModel.CertificateInstance = &vpcclassicv1.CertificateInstanceIdentity{ + CRN: &certificateInstance, + } + hasChanged = true + } + + if d.HasChange(isLBListenerDefaultPool) { + lbpool, err := getPoolId(d.Get(isLBListenerDefaultPool).(string)) + if err != nil { + return err + } + defPool = lbpool + loadBalancerListenerPatchModel.DefaultPool = &vpcclassicv1.LoadBalancerPoolIdentity{ + ID: &defPool, + } + hasChanged = true + } + if d.HasChange(isLBListenerPort) { + port = int64(d.Get(isLBListenerPort).(int)) + loadBalancerListenerPatchModel.Port = &port + hasChanged = true + } + + if d.HasChange(isLBListenerProtocol) { + protocol = d.Get(isLBListenerProtocol).(string) + loadBalancerListenerPatchModel.Protocol = &protocol + hasChanged = true + } + + if d.HasChange(isLBListenerConnectionLimit) { + connLimit = int64(d.Get(isLBListenerConnectionLimit).(int)) + loadBalancerListenerPatchModel.ConnectionLimit = &connLimit + hasChanged = true + } + + if hasChanged { + loadBalancerListenerPatch, err := loadBalancerListenerPatchModel.AsPatch() + if err != nil { + return fmt.Errorf("Error calling asPatch for LoadBalancerListenerPatch: %s", err) + } + updateLoadBalancerListenerOptions.LoadBalancerListenerPatch = loadBalancerListenerPatch + + isLBKey := "load_balancer_key_" + lbID + ibmMutexKV.Lock(isLBKey) + defer ibmMutexKV.Unlock(isLBKey) + + _, err = isWaitForClassicLBAvailable(sess, lbID, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return fmt.Errorf( + "Error checking for load balancer (%s) is active: %s", lbID, err) + } + _, response, err := sess.UpdateLoadBalancerListener(updateLoadBalancerListenerOptions) + if err != nil { + return fmt.Errorf("Error Updating Load Balancer Listener : %s\n%s", err, response) + } + + _, err = isWaitForClassicLBListenerAvailable(sess, lbID, lbListenerID, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return fmt.Errorf( + "Error waiting for load balancer listener(%s) to become ready: %s", d.Id(), err) + } + + _, err = isWaitForClassicLBAvailable(sess, lbID, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return fmt.Errorf( + "Error waiting for load balancer (%s) to become ready: %s", lbID, err) + } + } + return nil +} + +func lbListenerUpdate(d *schema.ResourceData, meta interface{}, lbID, lbListenerID string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + hasChanged := false + var certificateInstance, defPool, protocol string + var connLimit, port int64 + updateLoadBalancerListenerOptions := &vpcv1.UpdateLoadBalancerListenerOptions{ + LoadBalancerID: &lbID, + ID: &lbListenerID, + } + + loadBalancerListenerPatchModel := &vpcv1.LoadBalancerListenerPatch{} + + if d.HasChange(isLBListenerCertificateInstance) { + certificateInstance = d.Get(isLBListenerCertificateInstance).(string) + loadBalancerListenerPatchModel.CertificateInstance = &vpcv1.CertificateInstanceIdentity{ + CRN: &certificateInstance, + } + hasChanged = true + } + + if d.HasChange(isLBListenerDefaultPool) { + lbpool, err := getPoolId(d.Get(isLBListenerDefaultPool).(string)) + if err != nil { + return err + } + defPool = lbpool + loadBalancerListenerPatchModel.DefaultPool = &vpcv1.LoadBalancerPoolIdentity{ + ID: &defPool, + } + hasChanged = true + } + if d.HasChange(isLBListenerPort) { + port = int64(d.Get(isLBListenerPort).(int)) + loadBalancerListenerPatchModel.Port = &port + hasChanged = true + } + + if d.HasChange(isLBListenerProtocol) { + protocol = d.Get(isLBListenerProtocol).(string) + loadBalancerListenerPatchModel.Protocol = &protocol + hasChanged = true + } + + if d.HasChange(isLBListenerAcceptProxyProtocol) { + acceptProxyProtocol := d.Get(isLBListenerAcceptProxyProtocol).(bool) + loadBalancerListenerPatchModel.AcceptProxyProtocol = &acceptProxyProtocol + hasChanged = true + } + + if d.HasChange(isLBListenerConnectionLimit) { + connLimit = int64(d.Get(isLBListenerConnectionLimit).(int)) + loadBalancerListenerPatchModel.ConnectionLimit = &connLimit + hasChanged = true + } + + if hasChanged { + loadBalancerListenerPatch, err := loadBalancerListenerPatchModel.AsPatch() + if err != nil { + return fmt.Errorf("Error calling asPatch for LoadBalancerListenerPatch: %s", err) + } + updateLoadBalancerListenerOptions.LoadBalancerListenerPatch = loadBalancerListenerPatch + + isLBKey := "load_balancer_key_" + lbID + ibmMutexKV.Lock(isLBKey) + defer ibmMutexKV.Unlock(isLBKey) + + _, err = isWaitForLBAvailable(sess, lbID, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return fmt.Errorf( + "Error checking for load balancer (%s) is active: %s", lbID, err) + } + _, response, err := sess.UpdateLoadBalancerListener(updateLoadBalancerListenerOptions) + if err != nil { + return fmt.Errorf("Error Updating Load Balancer Listener : %s\n%s", err, response) + } + + _, err = isWaitForLBListenerAvailable(sess, lbID, lbListenerID, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return fmt.Errorf( + "Error waiting for load balancer listener(%s) to become ready: %s", d.Id(), err) + } + + _, err = isWaitForLBAvailable(sess, lbID, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return fmt.Errorf( + "Error waiting for load balancer (%s) to become ready: %s", lbID, err) + } + } + return nil +} + +func resourceIBMISLBListenerDelete(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + parts, err := idParts(d.Id()) + if err != nil { + return err + } + + lbID := parts[0] + lbListenerID := parts[1] + + isLBKey := "load_balancer_key_" + lbID + ibmMutexKV.Lock(isLBKey) + defer ibmMutexKV.Unlock(isLBKey) + + if userDetails.generation == 1 { + err := classicLBListenerDelete(d, meta, lbID, lbListenerID) + if err != nil { + return err + } + } else { + err := lbListenerDelete(d, meta, lbID, lbListenerID) + if err != nil { + return err + } + } + return nil +} + +func classicLBListenerDelete(d *schema.ResourceData, meta interface{}, lbID, lbListenerID string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + getLoadBalancerListenerOptions := &vpcclassicv1.GetLoadBalancerListenerOptions{ + LoadBalancerID: &lbID, + ID: &lbListenerID, + } + _, response, err := sess.GetLoadBalancerListener(getLoadBalancerListenerOptions) + + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Getting vpc load balancer listener(%s): %s\n%s", lbListenerID, err, response) + } + _, err = isWaitForClassicLBAvailable(sess, lbID, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return fmt.Errorf( + "Error checking for load balancer (%s) is active: %s", lbID, err) + } + deleteLoadBalancerListenerOptions := &vpcclassicv1.DeleteLoadBalancerListenerOptions{ + LoadBalancerID: &lbID, + ID: &lbListenerID, + } + response, err = sess.DeleteLoadBalancerListener(deleteLoadBalancerListenerOptions) + if err != nil { + return fmt.Errorf("Error Deleting Load Balancer Pool : %s\n%s", err, response) + } + _, err = isWaitForClassicLBListenerDeleted(sess, lbID, lbListenerID, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return err + } + _, err = isWaitForClassicLBAvailable(sess, lbID, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return fmt.Errorf( + "Error waiting for load balancer (%s) to be active: %s", lbID, err) + } + + d.SetId("") + return nil +} + +func lbListenerDelete(d *schema.ResourceData, meta interface{}, lbID, lbListenerID string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + getLoadBalancerListenerOptions := &vpcv1.GetLoadBalancerListenerOptions{ + LoadBalancerID: &lbID, + ID: &lbListenerID, + } + _, response, err := sess.GetLoadBalancerListener(getLoadBalancerListenerOptions) + + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Getting vpc load balancer listener(%s): %s\n%s", lbListenerID, err, response) + } + _, err = isWaitForLBAvailable(sess, lbID, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return fmt.Errorf( + "Error checking for load balancer (%s) is active: %s", lbID, err) + } + deleteLoadBalancerListenerOptions := &vpcv1.DeleteLoadBalancerListenerOptions{ + LoadBalancerID: &lbID, + ID: &lbListenerID, + } + response, err = sess.DeleteLoadBalancerListener(deleteLoadBalancerListenerOptions) + if err != nil { + return fmt.Errorf("Error Deleting Load Balancer Pool : %s\n%s", err, response) + } + _, err = isWaitForLBListenerDeleted(sess, lbID, lbListenerID, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return err + } + _, err = isWaitForLBAvailable(sess, lbID, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return fmt.Errorf( + "Error waiting for load balancer (%s) to be active: %s", lbID, err) + } + + d.SetId("") + return nil +} + +func isWaitForClassicLBListenerDeleted(lbc *vpcclassicv1.VpcClassicV1, lbID, lbListenerID string, timeout time.Duration) (interface{}, error) { + log.Printf("Waiting for (%s) to be deleted.", lbListenerID) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", isLBListenerDeleting, "delete_pending"}, + Target: []string{isLBListenerDeleted, ""}, + Refresh: isClassicLBListenerDeleteRefreshFunc(lbc, lbID, lbListenerID), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isClassicLBListenerDeleteRefreshFunc(lbc *vpcclassicv1.VpcClassicV1, lbID, lbListenerID string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + getLoadBalancerListenerOptions := &vpcclassicv1.GetLoadBalancerListenerOptions{ + LoadBalancerID: &lbID, + ID: &lbListenerID, + } + lbLis, response, err := lbc.GetLoadBalancerListener(getLoadBalancerListenerOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return lbLis, isLBListenerDeleted, nil + } + return nil, "", fmt.Errorf("The vpc load balancer listener %s failed to delete: %s\n%s", lbListenerID, err, response) + } + return lbLis, isLBListenerDeleting, nil + } +} + +func isWaitForLBListenerDeleted(lbc *vpcv1.VpcV1, lbID, lbListenerID string, timeout time.Duration) (interface{}, error) { + log.Printf("Waiting for (%s) to be deleted.", lbListenerID) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", isLBListenerDeleting, "delete_pending"}, + Target: []string{isLBListenerDeleted, ""}, + Refresh: isLBListenerDeleteRefreshFunc(lbc, lbID, lbListenerID), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isLBListenerDeleteRefreshFunc(lbc *vpcv1.VpcV1, lbID, lbListenerID string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + getLoadBalancerListenerOptions := &vpcv1.GetLoadBalancerListenerOptions{ + LoadBalancerID: &lbID, + ID: &lbListenerID, + } + lbLis, response, err := lbc.GetLoadBalancerListener(getLoadBalancerListenerOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return lbLis, isLBListenerDeleted, nil + } + return nil, "", fmt.Errorf("The vpc load balancer listener %s failed to delete: %s\n%s", lbListenerID, err, response) + } + return lbLis, isLBListenerDeleting, nil + } +} + +func resourceIBMISLBListenerExists(d *schema.ResourceData, meta interface{}) (bool, error) { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return false, err + } + + parts, err := idParts(d.Id()) + if err != nil { + return false, err + } + if len(parts) != 2 { + return false, fmt.Errorf("Incorrect ID %s: ID should be a combination of lbID/lbListenerID", d.Id()) + } + lbID := parts[0] + lbListenerID := parts[1] + + if userDetails.generation == 1 { + exists, err := classicLBListenerExists(d, meta, lbID, lbListenerID) + return exists, err + } else { + exists, err := lbListenerExists(d, meta, lbID, lbListenerID) + return exists, err + } +} + +func classicLBListenerExists(d *schema.ResourceData, meta interface{}, lbID, lbListenerID string) (bool, error) { + sess, err := classicVpcClient(meta) + if err != nil { + return false, err + } + + getLoadBalancerListenerOptions := &vpcclassicv1.GetLoadBalancerListenerOptions{ + LoadBalancerID: &lbID, + ID: &lbListenerID, + } + _, response, err := sess.GetLoadBalancerListener(getLoadBalancerListenerOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error getting Load balancer Listener: %s\n%s", err, response) + } + return true, nil +} + +func lbListenerExists(d *schema.ResourceData, meta interface{}, lbID, lbListenerID string) (bool, error) { + sess, err := vpcClient(meta) + if err != nil { + return false, err + } + + getLoadBalancerListenerOptions := &vpcv1.GetLoadBalancerListenerOptions{ + LoadBalancerID: &lbID, + ID: &lbListenerID, + } + _, response, err := sess.GetLoadBalancerListener(getLoadBalancerListenerOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error getting Load balancer Listener: %s\n%s", err, response) + } + return true, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_lb_listener_policy.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_lb_listener_policy.go new file mode 100644 index 00000000000..4cd082aae15 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_lb_listener_policy.go @@ -0,0 +1,1463 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "reflect" + "strings" + "time" + + "github.com/IBM/vpc-go-sdk/vpcclassicv1" + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + isLBListenerPolicyLBID = "lb" + isLBListenerPolicyListenerID = "listener" + isLBListenerPolicyAction = "action" + isLBListenerPolicyPriority = "priority" + isLBListenerPolicyName = "name" + isLBListenerPolicyID = "policy_id" + isLBListenerPolicyRules = "rules" + isLBListenerPolicyRulesInfo = "rule_info" + isLBListenerPolicyTargetID = "target_id" + isLBListenerPolicyTargetHTTPStatusCode = "target_http_status_code" + isLBListenerPolicyTargetURL = "target_url" + isLBListenerPolicyStatus = "provisioning_status" + isLBListenerPolicyRuleID = "rule_id" + isLBListenerPolicyAvailable = "active" + isLBListenerPolicyFailed = "failed" + isLBListenerPolicyPending = "pending" + isLBListenerPolicyDeleting = "deleting" + isLBListenerPolicyDeleted = "done" + isLBListenerPolicyRetry = "retry" + isLBListenerPolicyRuleCondition = "condition" + isLBListenerPolicyRuleType = "type" + isLBListenerPolicyRuleValue = "value" + isLBListenerPolicyRuleField = "field" + isLBListenerPolicyProvisioning = "provisioning" + isLBListenerPolicyProvisioningDone = "done" +) + +func resourceIBMISLBListenerPolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMISLBListenerPolicyCreate, + Read: resourceIBMISLBListenerPolicyRead, + Update: resourceIBMISLBListenerPolicyUpdate, + Delete: resourceIBMISLBListenerPolicyDelete, + Exists: resourceIBMISLBListenerPolicyExists, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + + isLBListenerPolicyLBID: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Load Balancer Listener Policy", + }, + + isLBListenerPolicyListenerID: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: func(k, o, n string, d *schema.ResourceData) bool { + if o == "" { + return false + } + // if state file entry and tf file entry matches + if strings.Compare(n, o) == 0 { + return true + } + + if strings.Contains(n, "/") { + + //Split lbID/listenerID and fetch listenerID + new := strings.Split(n, "/") + + if strings.Compare(new[1], o) == 0 { + return true + } + } + + return false + }, + Description: "Listener ID", + }, + + isLBListenerPolicyAction: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: InvokeValidator("ibm_is_lb_listener_policy", isLBListenerPolicyAction), + Description: "Policy Action", + }, + + isLBListenerPolicyPriority: { + Type: schema.TypeInt, + Required: true, + ForceNew: false, + ValidateFunc: validateLBListenerPolicyPriority, + Description: "Listener Policy Priority", + }, + + isLBListenerPolicyName: { + Type: schema.TypeString, + Optional: true, + ForceNew: false, + Computed: true, + ValidateFunc: InvokeValidator("ibm_is_lb_listener_policy", isLBListenerPolicyName), + Description: "Policy name", + }, + + isLBListenerPolicyID: { + Type: schema.TypeString, + Computed: true, + Description: "Listener Policy ID", + }, + + isLBListenerPolicyRules: { + Type: schema.TypeList, + Optional: true, + Computed: true, + Description: "Policy Rules", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + isLBListenerPolicyRuleCondition: { + Type: schema.TypeString, + Required: true, + ValidateFunc: InvokeValidator("ibm_is_lb_listener_policy_rule", isLBListenerPolicyRulecondition), + Description: "Condition of the rule", + }, + + isLBListenerPolicyRuleType: { + Type: schema.TypeString, + Required: true, + ValidateFunc: InvokeValidator("ibm_is_lb_listener_policy_rule", isLBListenerPolicyRuleType), + Description: "Type of the rule", + }, + + isLBListenerPolicyRuleValue: { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateStringLength, + Description: "Value to be matched for rule condition", + }, + + isLBListenerPolicyRuleField: { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateStringLength, + Description: "HTTP header field. This is only applicable to rule type.", + }, + + isLBListenerPolicyRuleID: { + Type: schema.TypeString, + Computed: true, + Description: "Rule ID", + }, + }, + }, + }, + + isLBListenerPolicyTargetID: { + Type: schema.TypeString, + ForceNew: false, + Optional: true, + DiffSuppressFunc: func(k, o, n string, d *schema.ResourceData) bool { + if o == "" { + return false + } + // if state file entry and tf file entry matches + if strings.Compare(n, o) == 0 { + return true + } + + if strings.Contains(n, "/") { + //Split lbID/listenerID and fetch listenerID + new := strings.Split(n, "/") + + if strings.Compare(new[1], o) == 0 { + return true + } + } + + return false + }, + Description: "Listener Policy Target ID", + }, + + isLBListenerPolicyTargetHTTPStatusCode: { + Type: schema.TypeInt, + ForceNew: false, + Optional: true, + Description: "Listener Policy target HTTPS Status code.", + }, + + isLBListenerPolicyTargetURL: { + Type: schema.TypeString, + ForceNew: false, + Optional: true, + Description: "Policy Target URL", + }, + + isLBListenerPolicyStatus: { + Type: schema.TypeString, + Computed: true, + Description: "Listner Policy status", + }, + + RelatedCRN: { + Type: schema.TypeString, + Computed: true, + Description: "The crn of the LB resource", + }, + }, + } +} + +func resourceIBMISLBListenerPolicyValidator() *ResourceValidator { + + validateSchema := make([]ValidateSchema, 1) + action := "forward, redirect, reject" + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isLBListenerPolicyName, + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Required: true, + Regexp: `^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$`, + MinValueLength: 1, + MaxValueLength: 63}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isLBListenerPolicyAction, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: action}) + + ibmISLBListenerPolicyResourceValidator := ResourceValidator{ResourceName: "ibm_is_lb_listener_policy", Schema: validateSchema} + return &ibmISLBListenerPolicyResourceValidator +} + +func resourceIBMISLBListenerPolicyCreate(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + + //Get the Load balancer ID + lbID := d.Get(isLBListenerPolicyLBID).(string) + + //User can set listener id as combination of lbID/listenerID, parse and get the listenerID + listenerID, err := getListenerID(d.Get(isLBListenerPolicyListenerID).(string)) + if err != nil { + return err + } + + action := d.Get(isLBListenerPolicyAction).(string) + priority := int64(d.Get(isLBListenerPolicyPriority).(int)) + + //user-defined name for this policy. + var name string + if n, ok := d.GetOk(isLBListenerPolicyName); ok { + name = n.(string) + } + + if userDetails.generation == 1 { + err := classicLbListenerPolicyCreate(d, meta, lbID, listenerID, action, name, priority) + if err != nil { + return err + } + } else { + err := lbListenerPolicyCreate(d, meta, lbID, listenerID, action, name, priority) + if err != nil { + return err + } + } + + return resourceIBMISLBListenerPolicyRead(d, meta) +} + +func getListenerID(id string) (string, error) { + if strings.Contains(id, "/") { + parts, err := idParts(id) + if err != nil { + return "", err + } + + return parts[1], nil + } else { + return id, nil + } +} + +func classicLbListenerPolicyCreate(d *schema.ResourceData, meta interface{}, lbID, listenerID, action, name string, priority int64) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + + // When `action` is `forward`, `LoadBalancerPoolIdentity` is required to specify which + // pool the load balancer forwards the traffic to. When `action` is `redirect`, + // `LoadBalancerListenerPolicyRedirectURLPrototype` is required to specify the url and + // http status code used in the redirect response. + + actionChk := d.Get(isLBListenerPolicyAction) + tID, targetIDSet := d.GetOk(isLBListenerPolicyTargetID) + statusCode, statusSet := d.GetOk(isLBListenerPolicyTargetHTTPStatusCode) + url, urlSet := d.GetOk(isLBListenerPolicyTargetURL) + + var target vpcclassicv1.LoadBalancerListenerPolicyTargetPrototypeIntf + + if actionChk.(string) == "forward" { + if targetIDSet { + + //User can set the poolId as combination of lbID/poolID, if so parse the string & get the poolID + id, err := getPoolID(tID.(string)) + if err != nil { + return err + } + + //id := lbPoolID.(string) + target = &vpcclassicv1.LoadBalancerListenerPolicyTargetPrototypeLoadBalancerPoolIdentity{ + ID: &id, + } + } else { + return fmt.Errorf("When action is forward please specify target_id") + } + } else if actionChk.(string) == "redirect" { + + urlPrototype := vpcclassicv1.LoadBalancerListenerPolicyTargetPrototypeLoadBalancerListenerPolicyRedirectURLPrototype{} + + if statusSet { + sc := int64(statusCode.(int)) + urlPrototype.HTTPStatusCode = &sc + } else { + return fmt.Errorf("When action is redirect please specify target_http_status_code") + } + + if urlSet { + link := url.(string) + urlPrototype.URL = &link + } else { + return fmt.Errorf("When action is redirect please specify target_url") + } + + target = &urlPrototype + } + + rulesInfo := make([]vpcclassicv1.LoadBalancerListenerPolicyRulePrototype, 0) + if rules, rulesSet := d.GetOk(isLBListenerPolicyRules); rulesSet { + policyRules := rules.([]interface{}) + for _, rule := range policyRules { + rulex := rule.(map[string]interface{}) + + //condition, type and value are mandatory params + var condition string + if rulex[isLBListenerPolicyRuleCondition] != nil { + condition = rulex[isLBListenerPolicyRuleCondition].(string) + } + + var ty string + if rulex[isLBListenerPolicyRuleType] != nil { + ty = rulex[isLBListenerPolicyRuleType].(string) + } + + var value string + if rulex[isLBListenerPolicyRuleValue] != nil { + value = rulex[isLBListenerPolicyRuleValue].(string) + } + + field := rulex[isLBListenerPolicyRuleField].(string) + + r := vpcclassicv1.LoadBalancerListenerPolicyRulePrototype{ + Condition: &condition, + Field: &field, + Type: &ty, + Value: &value, + } + + rulesInfo = append(rulesInfo, r) + } + } + + options := &vpcclassicv1.CreateLoadBalancerListenerPolicyOptions{ + LoadBalancerID: &lbID, + ListenerID: &listenerID, + Action: &action, + Priority: &priority, + Name: &name, + Target: target, + Rules: rulesInfo, + } + + isLBKey := "load_balancer_key_" + lbID + ibmMutexKV.Lock(isLBKey) + defer ibmMutexKV.Unlock(isLBKey) + + _, err = isWaitForClassicLbAvailable(sess, lbID, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf( + "LB-LP Error checking for load balancer (%s) is active: %s", lbID, err) + } + + policy, response, err := sess.CreateLoadBalancerListenerPolicy(options) + if err != nil { + return fmt.Errorf("Error while creating lb listener policy for LB %s: Error %v Response %v", lbID, err, *response) + } + + d.SetId(fmt.Sprintf("%s/%s/%s", lbID, listenerID, *(policy.ID))) + + _, err = isWaitForClassicLbListenerPolicyAvailable(sess, d.Id(), d.Timeout(schema.TimeoutCreate)) + if err != nil { + return err + } + return nil +} + +func getPoolID(id string) (string, error) { + if strings.Contains(id, "/") { + parts, err := idParts(id) + if err != nil { + return "", err + } + + return parts[1], nil + } + return id, nil + +} + +func isWaitForClassicLbAvailable(vpc *vpcclassicv1.VpcClassicV1, id string, timeout time.Duration) (interface{}, error) { + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", isLBListenerProvisioning, "create_pending", "update_pending", "maintenance_pending"}, + Target: []string{isLBProvisioningDone}, + Refresh: isLbClassicRefreshFunc(vpc, id), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isLbClassicRefreshFunc(vpc *vpcclassicv1.VpcClassicV1, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + + getLbOptions := &vpcclassicv1.GetLoadBalancerOptions{ + ID: &id, + } + + lb, _, err := vpc.GetLoadBalancer(getLbOptions) + if err != nil { + return nil, "", err + } + + if *(lb.ProvisioningStatus) == isLBListenerPolicyAvailable || *lb.ProvisioningStatus == isLBListenerPolicyFailed { + return lb, isLBProvisioningDone, nil + } + + return lb, isLBProvisioning, nil + } +} + +func isWaitForClassicLbListenerPolicyAvailable(vpc *vpcclassicv1.VpcClassicV1, id string, timeout time.Duration) (interface{}, error) { + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", isLBListenerProvisioning, "create_pending", "update_pending", "maintenance_pending"}, + Target: []string{isLBListenerPolicyProvisioningDone}, + Refresh: isLbListenerPolicyClassicRefreshFunc(vpc, id), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isLbListenerPolicyClassicRefreshFunc(vpc *vpcclassicv1.VpcClassicV1, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + parts, err := idParts(id) + if err != nil { + return nil, "", err + } + + lbID := parts[0] + listenerID := parts[1] + policyID := parts[2] + + getLbListenerPolicyOptions := &vpcclassicv1.GetLoadBalancerListenerPolicyOptions{ + LoadBalancerID: &lbID, + ListenerID: &listenerID, + ID: &policyID, + } + + policy, _, err := vpc.GetLoadBalancerListenerPolicy(getLbListenerPolicyOptions) + + if err != nil { + return policy, "", err + } + + if *policy.ProvisioningStatus == isLBListenerPolicyAvailable || *policy.ProvisioningStatus == isLBListenerPolicyFailed { + return policy, isLBListenerProvisioningDone, nil + } + + return policy, *policy.ProvisioningStatus, nil + } +} + +func lbListenerPolicyCreate(d *schema.ResourceData, meta interface{}, lbID, listenerID, action, name string, priority int64) error { + + sess, err := vpcClient(meta) + if err != nil { + return err + } + + // When `action` is `forward`, `LoadBalancerPoolIdentity` is required to specify which + // pool the load balancer forwards the traffic to. When `action` is `redirect`, + // `LoadBalancerListenerPolicyRedirectURLPrototype` is required to specify the url and + // http status code used in the redirect response. + actionChk := d.Get(isLBListenerPolicyAction) + tID, targetIDSet := d.GetOk(isLBListenerPolicyTargetID) + statusCode, statusSet := d.GetOk(isLBListenerPolicyTargetHTTPStatusCode) + url, urlSet := d.GetOk(isLBListenerPolicyTargetURL) + + var target vpcv1.LoadBalancerListenerPolicyTargetPrototypeIntf + + if actionChk.(string) == "forward" { + if targetIDSet { + + //User can set the poolId as combination of lbID/poolID, if so parse the string & get the poolID + id, err := getPoolID(tID.(string)) + if err != nil { + return err + } + + target = &vpcv1.LoadBalancerListenerPolicyTargetPrototypeLoadBalancerPoolIdentity{ + ID: &id, + } + } else { + return fmt.Errorf("When action is forward please specify target_id") + } + } else if actionChk.(string) == "redirect" { + + urlPrototype := vpcv1.LoadBalancerListenerPolicyTargetPrototypeLoadBalancerListenerPolicyRedirectURLPrototype{} + + if statusSet { + sc := int64(statusCode.(int)) + urlPrototype.HTTPStatusCode = &sc + } else { + return fmt.Errorf("When action is redirect please specify target_http_status_code") + } + + if urlSet { + link := url.(string) + urlPrototype.URL = &link + } else { + return fmt.Errorf("When action is redirect please specify target_url") + } + + target = &urlPrototype + } + + //Read Rules + rulesInfo := make([]vpcv1.LoadBalancerListenerPolicyRulePrototype, 0) + if rules, rulesSet := d.GetOk(isLBListenerPolicyRules); rulesSet { + policyRules := rules.([]interface{}) + for _, rule := range policyRules { + rulex := rule.(map[string]interface{}) + + //condition, type and value are mandatory params + var condition string + if rulex[isLBListenerPolicyRuleCondition] != nil { + condition = rulex[isLBListenerPolicyRuleCondition].(string) + } + + var ty string + if rulex[isLBListenerPolicyRuleType] != nil { + ty = rulex[isLBListenerPolicyRuleType].(string) + } + + var value string + if rulex[isLBListenerPolicyRuleValue] != nil { + value = rulex[isLBListenerPolicyRuleValue].(string) + } + + field := rulex[isLBListenerPolicyRuleField].(string) + + r := vpcv1.LoadBalancerListenerPolicyRulePrototype{ + Condition: &condition, + Field: &field, + Type: &ty, + Value: &value, + } + + rulesInfo = append(rulesInfo, r) + } + } + + options := &vpcv1.CreateLoadBalancerListenerPolicyOptions{ + LoadBalancerID: &lbID, + ListenerID: &listenerID, + Action: &action, + Priority: &priority, + Target: target, + Name: &name, + Rules: rulesInfo, + } + + isLBKey := "load_balancer_key_" + lbID + ibmMutexKV.Lock(isLBKey) + defer ibmMutexKV.Unlock(isLBKey) + + _, err = isWaitForLbAvailable(sess, lbID, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf( + "LB-LP Error checking for load balancer (%s) is active: %s", lbID, err) + } + + policy, response, err := sess.CreateLoadBalancerListenerPolicy(options) + if err != nil { + return fmt.Errorf("Error while creating lb listener policy for LB %s: Error %v Response %v", lbID, err, *response) + } + + d.SetId(fmt.Sprintf("%s/%s/%s", lbID, listenerID, *(policy.ID))) + + _, err = isWaitForLbListenerPolicyAvailable(sess, d.Id(), d.Timeout(schema.TimeoutCreate)) + if err != nil { + return err + } + return nil +} + +func isWaitForLbAvailable(vpc *vpcv1.VpcV1, id string, timeout time.Duration) (interface{}, error) { + + stateConf := &resource.StateChangeConf{ + Pending: []string{isLBListenerPolicyPending}, + Target: []string{isLBProvisioningDone}, + Refresh: isLbRefreshFunc(vpc, id), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isLbRefreshFunc(vpc *vpcv1.VpcV1, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + + getLbOptions := &vpcv1.GetLoadBalancerOptions{ + ID: &id, + } + + lb, _, err := vpc.GetLoadBalancer(getLbOptions) + if err != nil { + return nil, "", err + } + + if *(lb.ProvisioningStatus) == isLBListenerPolicyAvailable { + return lb, isLBProvisioningDone, nil + } + + return lb, isLBProvisioning, nil + } +} + +func isWaitForLbListenerPolicyAvailable(vpc *vpcv1.VpcV1, id string, timeout time.Duration) (interface{}, error) { + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", isLBListenerProvisioning, "create_pending", "update_pending", "maintenance_pending"}, + Target: []string{isLBListenerProvisioningDone}, + Refresh: isLbListenerPolicyRefreshFunc(vpc, id), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isLbListenerPolicyRefreshFunc(vpc *vpcv1.VpcV1, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + + parts, err := idParts(id) + if err != nil { + return nil, "", err + } + + lbID := parts[0] + listenerID := parts[1] + policyID := parts[2] + + getLbListenerPolicyOptions := &vpcv1.GetLoadBalancerListenerPolicyOptions{ + LoadBalancerID: &lbID, + ListenerID: &listenerID, + ID: &policyID, + } + + policy, _, err := vpc.GetLoadBalancerListenerPolicy(getLbListenerPolicyOptions) + + if err != nil { + return policy, "", err + } + + if *policy.ProvisioningStatus == isLBListenerPolicyAvailable || *policy.ProvisioningStatus == isLBListenerPolicyFailed { + return policy, isLBListenerProvisioningDone, nil + } + + return policy, *policy.ProvisioningStatus, nil + } +} + +func resourceIBMISLBListenerPolicyRead(d *schema.ResourceData, meta interface{}) error { + + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + + ID := d.Id() + parts, err := idParts(ID) + if err != nil { + return err + } + + lbID := parts[0] + listenerID := parts[1] + policyID := parts[2] + + if userDetails.generation == 1 { + err := classicLbListenerPolicyGet(d, meta, lbID, listenerID, policyID) + if err != nil { + return err + } + } else { + err := lbListenerPolicyGet(d, meta, lbID, listenerID, policyID) + if err != nil { + return err + } + } + return nil +} + +func resourceIBMISLBListenerPolicyExists(d *schema.ResourceData, meta interface{}) (bool, error) { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return false, err + } + ID := d.Id() + if userDetails.generation == 1 { + exists, err := classicLbListenerPolicyExists(d, meta, ID) + return exists, err + } else { + exists, err := lbListenerPolicyExists(d, meta, ID) + return exists, err + } +} + +func classicLbListenerPolicyExists(d *schema.ResourceData, meta interface{}, ID string) (bool, error) { + sess, err := classicVpcClient(meta) + if err != nil { + return false, err + } + + //Retrieve lbID, listenerID and policyID + parts, err := idParts(d.Id()) + if err != nil { + return false, err + } + + lbID := parts[0] + listenerID := parts[1] + policyID := parts[2] + + //populate lblistenerpolicyOPtions + getLbListenerPolicyOptions := &vpcclassicv1.GetLoadBalancerListenerPolicyOptions{ + LoadBalancerID: &lbID, + ListenerID: &listenerID, + ID: &policyID, + } + + //Getting lb listener policy + _, response, err := sess.GetLoadBalancerListenerPolicy(getLbListenerPolicyOptions) + + if err != nil { + if response != nil && response.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error getting Load balancer policy: %s\n%s", err, response) + } + + return true, nil +} + +func lbListenerPolicyExists(d *schema.ResourceData, meta interface{}, ID string) (bool, error) { + sess, err := vpcClient(meta) + if err != nil { + return false, err + } + parts, err := idParts(d.Id()) + if err != nil { + return false, err + } + if len(parts) != 3 { + return false, fmt.Errorf("Incorrect ID %s: ID should be a combination of lbID/listenerID/policyID", d.Id()) + } + + lbID := parts[0] + listenerID := parts[1] + policyID := parts[2] + + getLbListenerPolicyOptions := &vpcv1.GetLoadBalancerListenerPolicyOptions{ + LoadBalancerID: &lbID, + ListenerID: &listenerID, + ID: &policyID, + } + + //Getting lb listener policy + _, response, err := sess.GetLoadBalancerListenerPolicy(getLbListenerPolicyOptions) + + if err != nil { + if response != nil && response.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error getting Load balancer policy: %s\n%s", err, response) + } + return true, nil +} +func resourceIBMISLBListenerPolicyUpdate(d *schema.ResourceData, meta interface{}) error { + + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + + parts, err := idParts(d.Id()) + if err != nil { + return err + } + + lbID := parts[0] + listenerID := parts[1] + policyID := parts[2] + + if userDetails.generation == 1 { + + err := classicLbListenerPolicyUpdate(d, meta, lbID, listenerID, policyID) + if err != nil { + return err + } + } else { + + err := lbListenerPolicyUpdate(d, meta, lbID, listenerID, policyID) + if err != nil { + return err + } + } + + return resourceIBMISLBListenerPolicyRead(d, meta) +} + +func classicLbListenerPolicyUpdate(d *schema.ResourceData, meta interface{}, lbID, listenerID, ID string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + + hasChanged := false + updatePolicyOptions := vpcclassicv1.UpdateLoadBalancerListenerPolicyOptions{} + updatePolicyOptions.LoadBalancerID = &lbID + updatePolicyOptions.ListenerID = &listenerID + updatePolicyOptions.ID = &ID + + loadBalancerListenerPolicyPatchModel := &vpcclassicv1.LoadBalancerListenerPolicyPatch{} + + if d.HasChange(isLBListenerPolicyName) { + policy := d.Get(isLBListenerPolicyName).(string) + loadBalancerListenerPolicyPatchModel.Name = &policy + hasChanged = true + } + + if d.HasChange(isLBListenerPolicyPriority) { + prio := d.Get(isLBListenerPolicyPriority).(int) + priority := int64(prio) + loadBalancerListenerPolicyPatchModel.Priority = &priority + hasChanged = true + } + + var target vpcclassicv1.LoadBalancerListenerPolicyTargetPatchIntf + + //If Action is forward and TargetID is changed, set the target to pool ID + if d.Get(isLBListenerPolicyAction).(string) == "forward" && d.HasChange(isLBListenerPolicyTargetID) { + + //User can set the poolId as combination of lbID/poolID, if so parse the string & get the poolID + id, err := getPoolID(d.Get(isLBListenerPolicyTargetID).(string)) + if err != nil { + return err + } + + target = &vpcclassicv1.LoadBalancerListenerPolicyTargetPatch{ + ID: &id, + } + + loadBalancerListenerPolicyPatchModel.Target = target + hasChanged = true + } else if d.Get(isLBListenerPolicyAction).(string) == "redirect" { + //if Action is redirect and either status code or URL chnaged, set accordingly + //LoadBalancerListenerPolicyPatchTargetLoadBalancerListenerPolicyRedirectURLPatch + + redirectPatch := vpcclassicv1.LoadBalancerListenerPolicyTargetPatchLoadBalancerListenerPolicyRedirectURLPatch{} + + targetChange := false + if d.HasChange(isLBListenerPolicyTargetHTTPStatusCode) { + status := d.Get(isLBListenerPolicyTargetHTTPStatusCode).(int) + sc := int64(status) + redirectPatch.HTTPStatusCode = &sc + hasChanged = true + targetChange = true + } + + if d.HasChange(isLBListenerPolicyTargetURL) { + url := d.Get(isLBListenerPolicyTargetURL).(string) + redirectPatch.URL = &url + hasChanged = true + targetChange = true + } + + //Update the target only if there is a change in either statusCode or URL + if targetChange { + target = &redirectPatch + loadBalancerListenerPolicyPatchModel.Target = target + } + } + + isLBKey := "load_balancer_key_" + lbID + ibmMutexKV.Lock(isLBKey) + defer ibmMutexKV.Unlock(isLBKey) + + if hasChanged { + loadBalancerListenerPolicyPatch, err := loadBalancerListenerPolicyPatchModel.AsPatch() + if err != nil { + return fmt.Errorf("Error calling asPatch for LoadBalancerListenerPolicyPatch: %s", err) + } + updatePolicyOptions.LoadBalancerListenerPolicyPatch = loadBalancerListenerPolicyPatch + _, err = isWaitForClassicLbAvailable(sess, lbID, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf( + "LB-LP Error checking for load balancer (%s) is active: %s", lbID, err) + } + _, response, err := sess.UpdateLoadBalancerListenerPolicy(&updatePolicyOptions) + if err != nil { + return fmt.Errorf("Error Getting Instance: %s\n%s", err, response) + } + + _, err = isWaitForClassicLbListenerPolicyAvailable(sess, d.Id(), d.Timeout(schema.TimeoutCreate)) + if err != nil { + return err + } + } + return nil +} + +func lbListenerPolicyUpdate(d *schema.ResourceData, meta interface{}, lbID, listenerID, ID string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + hasChanged := false + updatePolicyOptions := vpcv1.UpdateLoadBalancerListenerPolicyOptions{} + updatePolicyOptions.LoadBalancerID = &lbID + updatePolicyOptions.ListenerID = &listenerID + updatePolicyOptions.ID = &ID + + loadBalancerListenerPolicyPatchModel := &vpcv1.LoadBalancerListenerPolicyPatch{} + + if d.HasChange(isLBListenerPolicyName) { + policy := d.Get(isLBListenerPolicyName).(string) + loadBalancerListenerPolicyPatchModel.Name = &policy + hasChanged = true + } + + if d.HasChange(isLBListenerPolicyPriority) { + prio := d.Get(isLBListenerPolicyPriority).(int) + priority := int64(prio) + loadBalancerListenerPolicyPatchModel.Priority = &priority + hasChanged = true + } + + var target vpcv1.LoadBalancerListenerPolicyTargetPatchIntf + //If Action is forward and TargetID is changed, set the target to pool ID + if d.Get(isLBListenerPolicyAction).(string) == "forward" && d.HasChange(isLBListenerPolicyTargetID) { + + //User can set the poolId as combination of lbID/poolID, if so parse the string & get the poolID + id, err := getPoolID(d.Get(isLBListenerPolicyTargetID).(string)) + if err != nil { + return err + } + target = &vpcv1.LoadBalancerListenerPolicyTargetPatchLoadBalancerPoolIdentity{ + ID: &id, + } + + loadBalancerListenerPolicyPatchModel.Target = target + hasChanged = true + } else if d.Get(isLBListenerPolicyAction).(string) == "redirect" { + //if Action is redirect and either status code or URL chnaged, set accordingly + //LoadBalancerListenerPolicyPatchTargetLoadBalancerListenerPolicyRedirectURLPatch + + redirectPatch := vpcv1.LoadBalancerListenerPolicyTargetPatchLoadBalancerListenerPolicyRedirectURLPatch{} + + targetChange := false + if d.HasChange(isLBListenerPolicyTargetHTTPStatusCode) { + status := d.Get(isLBListenerPolicyTargetHTTPStatusCode).(int) + sc := int64(status) + redirectPatch.HTTPStatusCode = &sc + hasChanged = true + targetChange = true + } + + if d.HasChange(isLBListenerPolicyTargetURL) { + url := d.Get(isLBListenerPolicyTargetURL).(string) + redirectPatch.URL = &url + hasChanged = true + targetChange = true + } + + //Update the target only if there is a change in either statusCode or URL + if targetChange { + target = &redirectPatch + loadBalancerListenerPolicyPatchModel.Target = target + } + } + + if hasChanged { + loadBalancerListenerPolicyPatch, err := loadBalancerListenerPolicyPatchModel.AsPatch() + if err != nil { + return fmt.Errorf("Error calling asPatch for LoadBalancerListenerPolicyPatch: %s", err) + } + updatePolicyOptions.LoadBalancerListenerPolicyPatch = loadBalancerListenerPolicyPatch + isLBKey := "load_balancer_key_" + lbID + ibmMutexKV.Lock(isLBKey) + defer ibmMutexKV.Unlock(isLBKey) + + _, err = isWaitForLbAvailable(sess, lbID, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf( + "LB-LP Error checking for load balancer (%s) is active: %s", lbID, err) + } + _, response, err := sess.UpdateLoadBalancerListenerPolicy(&updatePolicyOptions) + if err != nil { + return fmt.Errorf("Error Updating in policy : %s\n%s", err, response) + } + + _, err = isWaitForLbListenerPolicyAvailable(sess, d.Id(), d.Timeout(schema.TimeoutCreate)) + if err != nil { + return err + } + } + return nil +} + +func resourceIBMISLBListenerPolicyDelete(d *schema.ResourceData, meta interface{}) error { + + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + + //Retrieve lbId, listenerId and policyID + parts, err := idParts(d.Id()) + if err != nil { + return err + } + + lbID := parts[0] + listenerID := parts[1] + policyID := parts[2] + + isLBKey := "load_balancer_key_" + lbID + ibmMutexKV.Lock(isLBKey) + defer ibmMutexKV.Unlock(isLBKey) + + if userDetails.generation == 1 { + err := classicLbListenerPolicycDelete(d, meta, lbID, listenerID, policyID) + if err != nil { + return err + } + } else { + err := lbListenerPolicyDelete(d, meta, lbID, listenerID, policyID) + if err != nil { + return err + } + } + d.SetId("") + return nil + +} + +func classicLbListenerPolicycDelete(d *schema.ResourceData, meta interface{}, lbID, listenerID, ID string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + + //Getting policy optins + getLbListenerPolicyOptions := &vpcclassicv1.GetLoadBalancerListenerPolicyOptions{ + LoadBalancerID: &lbID, + ListenerID: &listenerID, + ID: &ID, + } + + //Getting lb listener policy + _, response, err := sess.GetLoadBalancerListenerPolicy(getLbListenerPolicyOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error in classicLbListenerPolicyGet : %s\n%s", err, response) + } + + deleteLbListenerPolicyOptions := &vpcclassicv1.DeleteLoadBalancerListenerPolicyOptions{ + LoadBalancerID: &lbID, + ListenerID: &listenerID, + ID: &ID, + } + + _, err = isWaitForClassicLbAvailable(sess, lbID, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf( + "LB-LP Error checking for load balancer (%s) is active: %s", lbID, err) + } + + response, err = sess.DeleteLoadBalancerListenerPolicy(deleteLbListenerPolicyOptions) + if err != nil { + return fmt.Errorf("Error in classicLbListenerPolicycDelete: %s\n%s", err, response) + } + _, err = isWaitForLbListenerPolicyClassicDeleted(sess, d.Id(), d.Timeout(schema.TimeoutDelete)) + if err != nil { + return err + } + return nil +} + +func lbListenerPolicyDelete(d *schema.ResourceData, meta interface{}, lbID, listenerID, ID string) error { + + sess, err := vpcClient(meta) + if err != nil { + return err + } + //Getting policy optins + getLbListenerPolicyOptions := &vpcv1.GetLoadBalancerListenerPolicyOptions{ + LoadBalancerID: &lbID, + ListenerID: &listenerID, + ID: &ID, + } + + //Getting lb listener policy + _, response, err := sess.GetLoadBalancerListenerPolicy(getLbListenerPolicyOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + } + + deleteLbListenerPolicyOptions := &vpcv1.DeleteLoadBalancerListenerPolicyOptions{ + LoadBalancerID: &lbID, + ListenerID: &listenerID, + ID: &ID, + } + + _, err = isWaitForLbAvailable(sess, lbID, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf( + "LB-LP Error checking for load balancer (%s) is active: %s", lbID, err) + } + + response, err = sess.DeleteLoadBalancerListenerPolicy(deleteLbListenerPolicyOptions) + if err != nil { + return fmt.Errorf("Error in lbListenerPolicyDelete: %s\n%s", err, response) + } + _, err = isWaitForLbListnerPolicyDeleted(sess, d.Id(), d.Timeout(schema.TimeoutDelete)) + if err != nil { + return err + } + return nil +} +func isWaitForLbListnerPolicyDeleted(vpc *vpcv1.VpcV1, id string, timeout time.Duration) (interface{}, error) { + + stateConf := &resource.StateChangeConf{ + Pending: []string{isLBListenerPolicyRetry, isLBListenerPolicyDeleting}, + Target: []string{isLBListenerPolicyFailed, isLBListenerPolicyDeleted}, + Refresh: isLbListenerPolicyDeleteRefreshFunc(vpc, id), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isLbListenerPolicyDeleteRefreshFunc(vpc *vpcv1.VpcV1, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + + //Retrieve lbId, listenerId and policyID + parts, err := idParts(id) + if err != nil { + return nil, isLBListenerPolicyFailed, nil + } + + lbID := parts[0] + listenerID := parts[1] + policyID := parts[2] + + getLbListenerPolicyOptions := &vpcv1.GetLoadBalancerListenerPolicyOptions{ + LoadBalancerID: &lbID, + ListenerID: &listenerID, + ID: &policyID, + } + + //Getting lb listener policy + policy, response, err := vpc.GetLoadBalancerListenerPolicy(getLbListenerPolicyOptions) + + if err != nil { + if response != nil && response.StatusCode == 404 { + return policy, isLBListenerPolicyDeleted, nil + } + return nil, isLBListenerPolicyFailed, err + } + return policy, isLBListenerPolicyDeleting, err + } +} + +func classicLbListenerPolicyGet(d *schema.ResourceData, meta interface{}, lbID, listenerID, id string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + + //Getting policy optins + getLbListenerPolicyOptions := &vpcclassicv1.GetLoadBalancerListenerPolicyOptions{ + LoadBalancerID: &lbID, + ListenerID: &listenerID, + ID: &id, + } + + //Getting lb listener policy + policy, response, err := sess.GetLoadBalancerListenerPolicy(getLbListenerPolicyOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error in classicLbListenerPolicyGet : %s\n%s", err, response) + } + + d.Set(isLBListenerPolicyLBID, lbID) + d.Set(isLBListenerPolicyListenerID, listenerID) + d.Set(isLBListenerPolicyAction, policy.Action) + d.Set(isLBListenerPolicyID, id) + d.Set(isLBListenerPolicyPriority, policy.Priority) + d.Set(isLBListenerPolicyName, policy.Name) + d.Set(isLBListenerPolicyStatus, policy.ProvisioningStatus) + + if policy.Rules != nil { + rulesSet := make([]interface{}, 0) + for _, rule := range policy.Rules { + getLbListenerPolicyRulesOptions := &vpcclassicv1.GetLoadBalancerListenerPolicyRuleOptions{ + LoadBalancerID: &lbID, + ListenerID: &listenerID, + ID: rule.ID, + PolicyID: &id, + } + ruleInfo, response, err := sess.GetLoadBalancerListenerPolicyRule(getLbListenerPolicyRulesOptions) + if err != nil { + return fmt.Errorf("Error in classicLbListenerPolicyGet rule: %s\n%s", err, response) + } + + r := map[string]interface{}{ + isLBListenerPolicyRuleID: *ruleInfo.ID, + isLBListenerPolicyRuleCondition: *ruleInfo.Condition, + isLBListenerPolicyRuleType: *ruleInfo.Type, + isLBListenerPolicyRuleField: *ruleInfo.Field, + isLBListenerPolicyRuleValue: *ruleInfo.Value, + } + rulesSet = append(rulesSet, r) + } + d.Set(isLBListenerPolicyRulesInfo, rulesSet) + } + + // `LoadBalancerPoolReference` is in the response if `action` is `forward`. + // `LoadBalancerListenerPolicyRedirectURL` is in the response if `action` is `redirect`. + + if *(policy.Action) == "forward" { + if reflect.TypeOf(policy.Target).String() == "*vpcclassicv1.LoadBalancerListenerPolicyTargetLoadBalancerPoolReference" { + target, ok := policy.Target.(*vpcclassicv1.LoadBalancerListenerPolicyTargetLoadBalancerPoolReference) + if ok { + d.Set(isLBListenerPolicyTargetID, target.ID) + } + } + + } else if *(policy.Action) == "redirect" { + if reflect.TypeOf(policy.Target).String() == "*vpcclassicv1.LoadBalancerListenerPolicyTargetLoadBalancerListenerPolicyRedirectURL" { + target, ok := policy.Target.(*vpcclassicv1.LoadBalancerListenerPolicyTargetLoadBalancerListenerPolicyRedirectURL) + if ok { + d.Set(isLBListenerPolicyTargetURL, target.URL) + d.Set(isLBListenerPolicyTargetHTTPStatusCode, target.HTTPStatusCode) + } + } + } + + getLoadBalancerOptions := &vpcclassicv1.GetLoadBalancerOptions{ + ID: &lbID, + } + lb, response, err := sess.GetLoadBalancer(getLoadBalancerOptions) + if err != nil { + return fmt.Errorf("Error Getting Load Balancer : %s\n%s", err, response) + } + d.Set(RelatedCRN, *lb.CRN) + + return nil +} + +func lbListenerPolicyGet(d *schema.ResourceData, meta interface{}, lbID, listenerID, id string) error { + + sess, err := vpcClient(meta) + if err != nil { + return err + } + + //Getting policy optins + getLbListenerPolicyOptions := &vpcv1.GetLoadBalancerListenerPolicyOptions{ + LoadBalancerID: &lbID, + ListenerID: &listenerID, + ID: &id, + } + + //Getting lb listener policy + policy, response, err := sess.GetLoadBalancerListenerPolicy(getLbListenerPolicyOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return err + } + + //set the argument values + d.Set(isLBListenerPolicyLBID, lbID) + d.Set(isLBListenerPolicyListenerID, listenerID) + d.Set(isLBListenerPolicyAction, policy.Action) + d.Set(isLBListenerPolicyID, id) + d.Set(isLBListenerPolicyPriority, policy.Priority) + d.Set(isLBListenerPolicyName, policy.Name) + d.Set(isLBListenerPolicyStatus, policy.ProvisioningStatus) + + //set rules - Doubt (Rules has condition, type, value, field and id where as SDK has only Href and id, so setting only id) + if policy.Rules != nil { + policyRules := policy.Rules + rulesInfo := make([]map[string]interface{}, 0) + for _, index := range policyRules { + + l := map[string]interface{}{ + isLBListenerPolicyRuleID: index.ID, + } + rulesInfo = append(rulesInfo, l) + } + d.Set(isLBListenerPolicyRules, rulesInfo) + } + + // `LoadBalancerPoolReference` is in the response if `action` is `forward`. + // `LoadBalancerListenerPolicyRedirectURL` is in the response if `action` is `redirect`. + + if *(policy.Action) == "forward" { + if reflect.TypeOf(policy.Target).String() == "*vpcv1.LoadBalancerListenerPolicyTargetLoadBalancerPoolReference" { + target, ok := policy.Target.(*vpcv1.LoadBalancerListenerPolicyTargetLoadBalancerPoolReference) + if ok { + d.Set(isLBListenerPolicyTargetID, target.ID) + } + } + + } else if *(policy.Action) == "redirect" { + if reflect.TypeOf(policy.Target).String() == "*vpcv1.LoadBalancerListenerPolicyTargetLoadBalancerListenerPolicyRedirectURL" { + target, ok := policy.Target.(*vpcv1.LoadBalancerListenerPolicyTargetLoadBalancerListenerPolicyRedirectURL) + if ok { + d.Set(isLBListenerPolicyTargetURL, target.URL) + d.Set(isLBListenerPolicyTargetHTTPStatusCode, target.HTTPStatusCode) + } + } + } + + getLoadBalancerOptions := &vpcv1.GetLoadBalancerOptions{ + ID: &lbID, + } + lb, response, err := sess.GetLoadBalancer(getLoadBalancerOptions) + if err != nil { + return fmt.Errorf("Error Getting Load Balancer : %s\n%s", err, response) + } + d.Set(RelatedCRN, *lb.CRN) + + return nil +} + +func isWaitForLbListenerPolicyClassicDeleted(vpc *vpcclassicv1.VpcClassicV1, id string, timeout time.Duration) (interface{}, error) { + + stateConf := &resource.StateChangeConf{ + Pending: []string{isLBListenerPolicyRetry, isLBListenerPolicyDeleting, "delete_pending"}, + Target: []string{isLBListenerPolicyFailed, isLBListenerPolicyDeleted}, + Refresh: isLbListenerPolicyClassicDeleteRefreshFunc(vpc, id), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isLbListenerPolicyClassicDeleteRefreshFunc(vpc *vpcclassicv1.VpcClassicV1, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + + //Retrieve lbId and listenerId + parts, err := idParts(id) + if err != nil { + return nil, isLBListenerPolicyFailed, nil + } + + lbID := parts[0] + listenerID := parts[1] + policyID := parts[2] + + getLbListenerPolicyOptions := &vpcclassicv1.GetLoadBalancerListenerPolicyOptions{ + LoadBalancerID: &lbID, + ListenerID: &listenerID, + ID: &policyID, + } + + //Getting lb listener policy + policy, response, err := vpc.GetLoadBalancerListenerPolicy(getLbListenerPolicyOptions) + + if err != nil { + if response != nil && response.StatusCode == 404 { + return policy, isLBListenerPolicyDeleted, nil + } + + return nil, isLBListenerPolicyFailed, err + } + + return policy, isLBListenerPolicyDeleting, err + } +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_lb_listener_policy_rule.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_lb_listener_policy_rule.go new file mode 100644 index 00000000000..cf1cb28e81c --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_lb_listener_policy_rule.go @@ -0,0 +1,1103 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/IBM/vpc-go-sdk/vpcclassicv1" + "github.com/IBM/vpc-go-sdk/vpcv1" +) + +const ( + isLBListenerPolicyRuleLBID = "lb" + isLBListenerPolicyRuleListenerID = "listener" + isLBListenerPolicyRulePolicyID = "policy" + isLBListenerPolicyRuleid = "rule" + isLBListenerPolicyRulecondition = "condition" + isLBListenerPolicyRuletype = "type" + isLBListenerPolicyRulevalue = "value" + isLBListenerPolicyRulefield = "field" + isLBListenerPolicyRuleStatus = "provisioning_status" + isLBListenerPolicyRuleAvailable = "active" + isLBListenerPolicyRuleFailed = "failed" + isLBListenerPolicyRulePending = "pending" + isLBListenerPolicyRuleDeleting = "deleting" + isLBListenerPolicyRuleDeleted = "done" + isLBListenerPolicyRuleRetry = "retry" + isLBListenerPolicyRuleProvisioning = "provisioning" + isLBListenerPolicyRuleProvisioningDone = "done" +) + +func resourceIBMISLBListenerPolicyRule() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMISLBListenerPolicyRuleCreate, + Read: resourceIBMISLBListenerPolicyRuleRead, + Update: resourceIBMISLBListenerPolicyRuleUpdate, + Delete: resourceIBMISLBListenerPolicyRuleDelete, + Exists: resourceIBMISLBListenerPolicyRuleExists, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + + isLBListenerPolicyRuleLBID: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Loadbalancer ID", + }, + + isLBListenerPolicyRuleListenerID: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: func(k, o, n string, d *schema.ResourceData) bool { + if o == "" { + return false + } + // if state file entry and tf file entry matches + if strings.Compare(n, o) == 0 { + return true + } + + if strings.Contains(n, "/") { + //Split lbID/listenerID and fetch listenerID + new := strings.Split(n, "/") + if strings.Compare(new[1], o) == 0 { + return true + } + } + + return false + }, + Description: "Listener ID.", + }, + + isLBListenerPolicyRulePolicyID: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: func(k, o, n string, d *schema.ResourceData) bool { + if o == "" { + return false + } + // if state file entry and tf file entry matches + if strings.Compare(n, o) == 0 { + return true + } + + if strings.Contains(n, "/") { + //Split lbID/listenerID and fetch listenerID + new := strings.Split(n, "/") + if strings.Compare(new[2], o) == 0 { + return true + } + } + + return false + }, + Description: "Listener Policy ID", + }, + + isLBListenerPolicyRulecondition: { + Type: schema.TypeString, + Required: true, + ValidateFunc: InvokeValidator("ibm_is_lb_listener_policy_rule", isLBListenerPolicyRulecondition), + Description: "Condition info of the rule.", + }, + + isLBListenerPolicyRuletype: { + Type: schema.TypeString, + Required: true, + ValidateFunc: InvokeValidator("ibm_is_lb_listener_policy_rule", isLBListenerPolicyRuletype), + Description: "Policy rule type.", + }, + + isLBListenerPolicyRulevalue: { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateStringLength, + Description: "policy rule value info", + }, + + isLBListenerPolicyRulefield: { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateStringLength, + }, + + isLBListenerPolicyRuleid: { + Type: schema.TypeString, + Computed: true, + }, + + isLBListenerPolicyStatus: { + Type: schema.TypeString, + Computed: true, + }, + + RelatedCRN: { + Type: schema.TypeString, + Computed: true, + Description: "The crn of the LB resource", + }, + }, + } +} + +func resourceIBMISLBListenerPolicyRuleValidator() *ResourceValidator { + + validateSchema := make([]ValidateSchema, 1) + condition := "contains, equals, matches_regex" + ruletype := "header, hostname, path, body, query" + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isLBListenerPolicyRulecondition, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: condition}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isLBListenerPolicyRuletype, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: ruletype}) + + ibmISLBListenerPolicyRuleResourceValidator := ResourceValidator{ResourceName: "ibm_is_lb_listener_policy_rule", Schema: validateSchema} + return &ibmISLBListenerPolicyRuleResourceValidator +} + +func resourceIBMISLBListenerPolicyRuleCreate(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + + //Read lb, listerner, policy IDs + var field string + lbID := d.Get(isLBListenerPolicyRuleLBID).(string) + listenerID, err := getLbListenerID(d.Get(isLBListenerPolicyRuleListenerID).(string)) + if err != nil { + return err + } + + policyID, err := getLbPolicyID(d.Get(isLBListenerPolicyRulePolicyID).(string)) + if err != nil { + return err + } + + condition := d.Get(isLBListenerPolicyRulecondition).(string) + ty := d.Get(isLBListenerPolicyRuletype).(string) + value := d.Get(isLBListenerPolicyRulevalue).(string) + if n, ok := d.GetOk(isLBListenerPolicyRulefield); ok { + field = n.(string) + } + + if userDetails.generation == 1 { + err := classicLbListenerPolicyRuleCreate(d, meta, lbID, listenerID, policyID, condition, ty, value, field) + if err != nil { + return err + } + } else { + err := lbListenerPolicyRuleCreate(d, meta, lbID, listenerID, policyID, condition, ty, value, field) + if err != nil { + return err + } + } + + return resourceIBMISLBListenerPolicyRuleRead(d, meta) +} + +func getLbListenerID(id string) (string, error) { + if strings.Contains(id, "/") { + parts, err := idParts(id) + if err != nil { + return "", err + } + + return parts[1], nil + } else { + return id, nil + } +} + +func getLbPolicyID(id string) (string, error) { + if strings.Contains(id, "/") { + parts, err := idParts(id) + if err != nil { + return "", err + } + + return parts[2], nil + } else { + return id, nil + } +} + +func classicLbListenerPolicyRuleCreate(d *schema.ResourceData, meta interface{}, lbID, listenerID, policyID, condition, ty, value, field string) error { + sess, err := classicVpcSdkClient(meta) + if err != nil { + return err + } + + options := &vpcclassicv1.CreateLoadBalancerListenerPolicyRuleOptions{ + LoadBalancerID: &lbID, + ListenerID: &listenerID, + PolicyID: &policyID, + Condition: &condition, + Type: &ty, + Value: &value, + Field: &field, + } + + isLBKey := "load_balancer_key_" + lbID + ibmMutexKV.Lock(isLBKey) + defer ibmMutexKV.Unlock(isLBKey) + + _, err = isWaitForClassicLoadbalancerAvailable(sess, lbID, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf( + "LB-LP Error checking for load balancer (%s) is active: %s", lbID, err) + } + + rule, response, err := sess.CreateLoadBalancerListenerPolicyRule(options) + if err != nil { + return fmt.Errorf("Error while creating lb listener policy for LB %s: Error %v Response %v", lbID, err, *response) + } + + d.SetId(fmt.Sprintf("%s/%s/%s/%s", lbID, listenerID, policyID, *(rule.ID))) + + _, err = isWaitForClassicLbListenerPolicyRuleAvailable(sess, d.Id(), d.Timeout(schema.TimeoutCreate)) + if err != nil { + return err + } + return nil +} + +func isWaitForClassicLoadbalancerAvailable(vpc *vpcclassicv1.VpcClassicV1, id string, timeout time.Duration) (interface{}, error) { + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", isLBListenerPolicyRuleProvisioning, "create_pending", "update_pending", "maintenance_pending"}, + Target: []string{isLBProvisioningDone}, + Refresh: isLoadbalancerClassicRefreshFunc(vpc, id), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isLoadbalancerClassicRefreshFunc(vpc *vpcclassicv1.VpcClassicV1, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + + getLbOptions := &vpcclassicv1.GetLoadBalancerOptions{ + ID: &id, + } + + lb, _, err := vpc.GetLoadBalancer(getLbOptions) + if err != nil { + return nil, "", err + } + + if *(lb.ProvisioningStatus) == isLBListenerPolicyAvailable || *lb.ProvisioningStatus == isLBListenerPolicyFailed { + return lb, isLBProvisioningDone, nil + } + + return lb, isLBProvisioning, nil + } +} + +func isWaitForClassicLbListenerPolicyRuleAvailable(vpc *vpcclassicv1.VpcClassicV1, id string, timeout time.Duration) (interface{}, error) { + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", isLBListenerPolicyRuleProvisioning, "create_pending", "update_pending", "maintenance_pending"}, + Target: []string{isLBListenerPolicyProvisioningDone}, + Refresh: isLbListenerPolicyRuleClassicRefreshFunc(vpc, id), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isLbListenerPolicyRuleClassicRefreshFunc(vpc *vpcclassicv1.VpcClassicV1, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + parts, err := idParts(id) + if err != nil { + return nil, "", err + } + + lbID := parts[0] + listenerID := parts[1] + policyID := parts[2] + ruleID := parts[3] + + getLbListenerPolicyRuleOptions := &vpcclassicv1.GetLoadBalancerListenerPolicyRuleOptions{ + LoadBalancerID: &lbID, + ListenerID: &listenerID, + PolicyID: &policyID, + ID: &ruleID, + } + + rule, _, err := vpc.GetLoadBalancerListenerPolicyRule(getLbListenerPolicyRuleOptions) + + if err != nil { + return rule, "", err + } + + if *rule.ProvisioningStatus == isLBListenerPolicyAvailable || *rule.ProvisioningStatus == isLBListenerPolicyFailed { + return rule, isLBListenerProvisioningDone, nil + } + + return rule, *rule.ProvisioningStatus, nil + } +} + +func vpcSdkClient(meta interface{}) (*vpcv1.VpcV1, error) { + sess, err := meta.(ClientSession).VpcV1API() + return sess, err +} + +func lbListenerPolicyRuleCreate(d *schema.ResourceData, meta interface{}, lbID, listenerID, policyID, condition, ty, value, field string) error { + + sess, err := vpcSdkClient(meta) + if err != nil { + return err + } + + options := &vpcv1.CreateLoadBalancerListenerPolicyRuleOptions{ + LoadBalancerID: &lbID, + ListenerID: &listenerID, + PolicyID: &policyID, + Condition: &condition, + Type: &ty, + Value: &value, + Field: &field, + } + + isLBKey := "load_balancer_key_" + lbID + ibmMutexKV.Lock(isLBKey) + defer ibmMutexKV.Unlock(isLBKey) + + _, err = isWaitForLoadbalancerAvailable(sess, lbID, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf( + "LB-LP Error checking for load balancer (%s) is active: %s", lbID, err) + } + + rule, response, err := sess.CreateLoadBalancerListenerPolicyRule(options) + if err != nil { + return fmt.Errorf("Error while creating lb listener policy for LB %s: Error %v Response %v", lbID, err, *response) + } + + d.SetId(fmt.Sprintf("%s/%s/%s/%s", lbID, listenerID, policyID, *(rule.ID))) + + _, err = isWaitForLbListenerPolicyRuleAvailable(sess, d.Id(), d.Timeout(schema.TimeoutCreate)) + if err != nil { + return err + } + + return nil +} + +func isWaitForLoadbalancerAvailable(vpc *vpcv1.VpcV1, id string, timeout time.Duration) (interface{}, error) { + + stateConf := &resource.StateChangeConf{ + Pending: []string{isLBListenerPolicyRulePending}, + Target: []string{isLBProvisioningDone}, + Refresh: isLoadbalancerRefreshFunc(vpc, id), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isLoadbalancerRefreshFunc(vpc *vpcv1.VpcV1, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + + getLbOptions := &vpcv1.GetLoadBalancerOptions{ + ID: &id, + } + + lb, _, err := vpc.GetLoadBalancer(getLbOptions) + if err != nil { + return nil, "", err + } + + if *(lb.ProvisioningStatus) == isLBListenerPolicyAvailable { + return lb, isLBProvisioningDone, nil + } + + return lb, isLBProvisioning, nil + } +} + +func isWaitForLbListenerPolicyRuleAvailable(vpc *vpcv1.VpcV1, id string, timeout time.Duration) (interface{}, error) { + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", isLBListenerPolicyRuleProvisioning, "create_pending", "update_pending", "maintenance_pending"}, + Target: []string{isLBListenerPolicyRuleProvisioningDone}, + Refresh: isLbListenerPolicyRuleRefreshFunc(vpc, id), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isLbListenerPolicyRuleRefreshFunc(vpc *vpcv1.VpcV1, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + + parts, err := idParts(id) + if err != nil { + return nil, "", err + } + + lbID := parts[0] + listenerID := parts[1] + policyID := parts[2] + ruleID := parts[3] + + getLbListenerPolicyRuleOptions := &vpcv1.GetLoadBalancerListenerPolicyRuleOptions{ + LoadBalancerID: &lbID, + ListenerID: &listenerID, + PolicyID: &policyID, + ID: &ruleID, + } + + rule, _, err := vpc.GetLoadBalancerListenerPolicyRule(getLbListenerPolicyRuleOptions) + + if err != nil { + return rule, "", err + } + + if *rule.ProvisioningStatus == isLBListenerPolicyRuleAvailable || *rule.ProvisioningStatus == isLBListenerPolicyRuleFailed { + return rule, isLBListenerPolicyRuleProvisioningDone, nil + } + + return rule, *rule.ProvisioningStatus, nil + } +} + +func resourceIBMISLBListenerPolicyRuleRead(d *schema.ResourceData, meta interface{}) error { + + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + + ID := d.Id() + parts, err := idParts(ID) + if err != nil { + return err + } + + lbID := parts[0] + listenerID := parts[1] + policyID := parts[2] + ruleID := parts[3] + + if userDetails.generation == 1 { + err := classicLbListenerPolicyRuleGet(d, meta, lbID, listenerID, policyID, ruleID) + if err != nil { + return err + } + } else { + err := lbListenerPolicyRuleGet(d, meta, lbID, listenerID, policyID, ruleID) + if err != nil { + return err + } + } + return nil +} + +func resourceIBMISLBListenerPolicyRuleExists(d *schema.ResourceData, meta interface{}) (bool, error) { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return false, err + } + ID := d.Id() + if userDetails.generation == 1 { + exists, err := classicLbListenerPolicyRuleExists(d, meta, ID) + return exists, err + } else { + exists, err := lbListenerPolicyRuleExists(d, meta, ID) + return exists, err + } +} + +func classicLbListenerPolicyRuleExists(d *schema.ResourceData, meta interface{}, ID string) (bool, error) { + sess, err := classicVpcSdkClient(meta) + if err != nil { + return false, err + } + + //Retrieve lbID, listenerID and policyID + parts, err := idParts(d.Id()) + if err != nil { + return false, err + } + + lbID := parts[0] + listenerID := parts[1] + policyID := parts[2] + ruleID := parts[3] + + //populate lblistenerpolicyOPtions + getLbListenerPolicyRuleOptions := &vpcclassicv1.GetLoadBalancerListenerPolicyRuleOptions{ + LoadBalancerID: &lbID, + ListenerID: &listenerID, + PolicyID: &policyID, + ID: &ruleID, + } + + //Getting lb listener policy + _, response, err := sess.GetLoadBalancerListenerPolicyRule(getLbListenerPolicyRuleOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error getting policy: %s\n%s", err, response) + } + return true, nil +} + +func lbListenerPolicyRuleExists(d *schema.ResourceData, meta interface{}, ID string) (bool, error) { + sess, err := vpcSdkClient(meta) + if err != nil { + return false, err + } + parts, err := idParts(d.Id()) + if err != nil { + return false, err + } + if len(parts) != 4 { + return false, fmt.Errorf("Incorrect ID %s: ID should be a combination of lbID/listenerID/policyID/ruleID", d.Id()) + } + lbID := parts[0] + listenerID := parts[1] + policyID := parts[2] + ruleID := parts[3] + + getLbListenerPolicyRuleOptions := &vpcv1.GetLoadBalancerListenerPolicyRuleOptions{ + LoadBalancerID: &lbID, + ListenerID: &listenerID, + PolicyID: &policyID, + ID: &ruleID, + } + + //Getting lb listener policy + _, response, err := sess.GetLoadBalancerListenerPolicyRule(getLbListenerPolicyRuleOptions) + + if err != nil { + if response != nil && response.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error getting policy: %s\n%s", err, response) + } + return true, nil +} +func resourceIBMISLBListenerPolicyRuleUpdate(d *schema.ResourceData, meta interface{}) error { + + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + + parts, err := idParts(d.Id()) + if err != nil { + return err + } + + lbID := parts[0] + listenerID := parts[1] + policyID := parts[2] + ruleID := parts[3] + + if userDetails.generation == 1 { + + err := classicLbListenerPolicyRuleUpdate(d, meta, lbID, listenerID, policyID, ruleID) + if err != nil { + return err + } + } else { + + err := lbListenerPolicyRuleUpdate(d, meta, lbID, listenerID, policyID, ruleID) + if err != nil { + return err + } + } + + return resourceIBMISLBListenerPolicyRuleRead(d, meta) +} + +func classicLbListenerPolicyRuleUpdate(d *schema.ResourceData, meta interface{}, lbID, listenerID, policyID, ID string) error { + sess, err := classicVpcSdkClient(meta) + if err != nil { + return err + } + + hasChanged := false + updatePolicyRuleOptions := vpcclassicv1.UpdateLoadBalancerListenerPolicyRuleOptions{} + updatePolicyRuleOptions.LoadBalancerID = &lbID + updatePolicyRuleOptions.ListenerID = &listenerID + updatePolicyRuleOptions.PolicyID = &policyID + updatePolicyRuleOptions.ID = &ID + + loadBalancerListenerPolicyRulePatchModel := &vpcclassicv1.LoadBalancerListenerPolicyRulePatch{} + + if d.HasChange(isLBListenerPolicyRulecondition) { + condition := d.Get(isLBListenerPolicyRulecondition).(string) + loadBalancerListenerPolicyRulePatchModel.Condition = &condition + hasChanged = true + } + + if d.HasChange(isLBListenerPolicyRuletype) { + ty := d.Get(isLBListenerPolicyRuletype).(string) + loadBalancerListenerPolicyRulePatchModel.Type = &ty + hasChanged = true + } + + if d.HasChange(isLBListenerPolicyRulevalue) { + value := d.Get(isLBListenerPolicyRulevalue).(string) + loadBalancerListenerPolicyRulePatchModel.Value = &value + hasChanged = true + } + + if d.HasChange(isLBListenerPolicyRulefield) { + field := d.Get(isLBListenerPolicyRulefield).(string) + loadBalancerListenerPolicyRulePatchModel.Field = &field + hasChanged = true + } + + isLBKey := "load_balancer_key_" + lbID + ibmMutexKV.Lock(isLBKey) + defer ibmMutexKV.Unlock(isLBKey) + + if hasChanged { + loadBalancerListenerPolicyRulePatch, err := loadBalancerListenerPolicyRulePatchModel.AsPatch() + if err != nil { + return fmt.Errorf("Error calling asPatch for LoadBalancerListenerPolicyRulePatch: %s", err) + } + updatePolicyRuleOptions.LoadBalancerListenerPolicyRulePatch = loadBalancerListenerPolicyRulePatch + + _, err = isWaitForClassicLoadbalancerAvailable(sess, lbID, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf( + "LB-LP Error checking for load balancer (%s) is active: %s", lbID, err) + } + _, response, err := sess.UpdateLoadBalancerListenerPolicyRule(&updatePolicyRuleOptions) + if err != nil { + return fmt.Errorf("Error Getting Instance: %s\n%s", err, response) + } + + _, err = isWaitForClassicLbListenerPolicyRuleAvailable(sess, d.Id(), d.Timeout(schema.TimeoutCreate)) + if err != nil { + return err + } + } + return nil +} + +func lbListenerPolicyRuleUpdate(d *schema.ResourceData, meta interface{}, lbID, listenerID, policyID, ID string) error { + sess, err := vpcSdkClient(meta) + if err != nil { + return err + } + hasChanged := false + updatePolicyRuleOptions := vpcv1.UpdateLoadBalancerListenerPolicyRuleOptions{} + updatePolicyRuleOptions.LoadBalancerID = &lbID + updatePolicyRuleOptions.ListenerID = &listenerID + updatePolicyRuleOptions.PolicyID = &policyID + updatePolicyRuleOptions.ID = &ID + + loadBalancerListenerPolicyRulePatchModel := &vpcv1.LoadBalancerListenerPolicyRulePatch{} + + if d.HasChange(isLBListenerPolicyRulecondition) { + condition := d.Get(isLBListenerPolicyRulecondition).(string) + loadBalancerListenerPolicyRulePatchModel.Condition = &condition + hasChanged = true + } + + if d.HasChange(isLBListenerPolicyRuletype) { + ty := d.Get(isLBListenerPolicyRuletype).(string) + loadBalancerListenerPolicyRulePatchModel.Type = &ty + hasChanged = true + } + + if d.HasChange(isLBListenerPolicyRulevalue) { + value := d.Get(isLBListenerPolicyRulevalue).(string) + loadBalancerListenerPolicyRulePatchModel.Value = &value + hasChanged = true + } + + if d.HasChange(isLBListenerPolicyRulefield) { + field := d.Get(isLBListenerPolicyRulefield).(string) + loadBalancerListenerPolicyRulePatchModel.Field = &field + hasChanged = true + } + + if hasChanged { + loadBalancerListenerPolicyRulePatch, err := loadBalancerListenerPolicyRulePatchModel.AsPatch() + if err != nil { + return fmt.Errorf("Error calling asPatch for LoadBalancerListenerPolicyRulePatch: %s", err) + } + updatePolicyRuleOptions.LoadBalancerListenerPolicyRulePatch = loadBalancerListenerPolicyRulePatch + + isLBKey := "load_balancer_key_" + lbID + ibmMutexKV.Lock(isLBKey) + defer ibmMutexKV.Unlock(isLBKey) + + _, err = isWaitForLoadbalancerAvailable(sess, lbID, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf( + "LB-LP Error checking for load balancer (%s) is active: %s", lbID, err) + } + + _, response, err := sess.UpdateLoadBalancerListenerPolicyRule(&updatePolicyRuleOptions) + if err != nil { + return fmt.Errorf("Error Updating in policy : %s\n%s", err, response) + } + + _, err = isWaitForLbListenerPolicyRuleAvailable(sess, d.Id(), d.Timeout(schema.TimeoutCreate)) + if err != nil { + return err + } + } + return nil +} + +func resourceIBMISLBListenerPolicyRuleDelete(d *schema.ResourceData, meta interface{}) error { + + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + + //Retrieve lbId, listenerId and policyID + parts, err := idParts(d.Id()) + if err != nil { + return err + } + + lbID := parts[0] + listenerID := parts[1] + policyID := parts[2] + ruleID := parts[3] + + isLBKey := "load_balancer_key_" + lbID + ibmMutexKV.Lock(isLBKey) + defer ibmMutexKV.Unlock(isLBKey) + + if userDetails.generation == 1 { + err := classicLbListenerPolicyRuleDelete(d, meta, lbID, listenerID, policyID, ruleID) + if err != nil { + return err + } + } else { + err := lbListenerPolicyRuleDelete(d, meta, lbID, listenerID, policyID, ruleID) + if err != nil { + return err + } + } + + d.SetId("") + return nil + +} + +func classicLbListenerPolicyRuleDelete(d *schema.ResourceData, meta interface{}, lbID, listenerID, policyID, ID string) error { + sess, err := classicVpcSdkClient(meta) + if err != nil { + return err + } + + //Getting rule optins + getLbListenerPolicyRuleOptions := &vpcclassicv1.GetLoadBalancerListenerPolicyRuleOptions{ + LoadBalancerID: &lbID, + ListenerID: &listenerID, + PolicyID: &policyID, + ID: &ID, + } + + //Getting lb listener policy + _, response, err := sess.GetLoadBalancerListenerPolicyRule(getLbListenerPolicyRuleOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error in classicLbListenerPolicyGet : %s\n%s", err, response) + } + + deleteLbListenerPolicyRuleOptions := &vpcclassicv1.DeleteLoadBalancerListenerPolicyRuleOptions{ + LoadBalancerID: &lbID, + ListenerID: &listenerID, + PolicyID: &policyID, + ID: &ID, + } + + response, err = sess.DeleteLoadBalancerListenerPolicyRule(deleteLbListenerPolicyRuleOptions) + + if err != nil { + return fmt.Errorf("Error in classicLbListenerPolicyRuleDelete: %s\n%s", err, response) + } + _, err = isWaitForLbListenerPolicyRuleClassicDeleted(sess, d.Id(), d.Timeout(schema.TimeoutDelete)) + if err != nil { + return err + } + return nil +} + +func lbListenerPolicyRuleDelete(d *schema.ResourceData, meta interface{}, lbID, listenerID, policyID, ID string) error { + + sess, err := vpcSdkClient(meta) + if err != nil { + return err + } + + //Getting rule optins + getLbListenerPolicyRuleOptions := &vpcv1.GetLoadBalancerListenerPolicyRuleOptions{ + LoadBalancerID: &lbID, + ListenerID: &listenerID, + PolicyID: &policyID, + ID: &ID, + } + + //Getting lb listener policy + _, response, err := sess.GetLoadBalancerListenerPolicyRule(getLbListenerPolicyRuleOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error in LbListenerPolicyGet : %s\n%s", err, response) + } + + deleteLbListenerPolicyRuleOptions := &vpcv1.DeleteLoadBalancerListenerPolicyRuleOptions{ + LoadBalancerID: &lbID, + ListenerID: &listenerID, + PolicyID: &policyID, + ID: &ID, + } + response, err = sess.DeleteLoadBalancerListenerPolicyRule(deleteLbListenerPolicyRuleOptions) + if err != nil { + return fmt.Errorf("Error in lbListenerPolicyRuleDelete: %s\n%s", err, response) + } + _, err = isWaitForLbListnerPolicyRuleDeleted(sess, d.Id(), d.Timeout(schema.TimeoutDelete)) + if err != nil { + return err + } + return nil +} +func isWaitForLbListnerPolicyRuleDeleted(vpc *vpcv1.VpcV1, id string, timeout time.Duration) (interface{}, error) { + + stateConf := &resource.StateChangeConf{ + Pending: []string{isLBListenerPolicyRuleRetry, isLBListenerPolicyRuleDeleting}, + Target: []string{isLBListenerPolicyRuleDeleted, isLBListenerPolicyRuleFailed}, + Refresh: isLbListenerPolicyRuleDeleteRefreshFunc(vpc, id), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isLbListenerPolicyRuleDeleteRefreshFunc(vpc *vpcv1.VpcV1, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + + //Retrieve lbId, listenerId and policyID + parts, err := idParts(id) + if err != nil { + return nil, isLBListenerPolicyFailed, nil + } + + lbID := parts[0] + listenerID := parts[1] + policyID := parts[2] + ruleID := parts[3] + + getLbListenerPolicyRuleOptions := &vpcv1.GetLoadBalancerListenerPolicyRuleOptions{ + LoadBalancerID: &lbID, + ListenerID: &listenerID, + PolicyID: &policyID, + ID: &ruleID, + } + + //Getting lb listener policy + rule, response, err := vpc.GetLoadBalancerListenerPolicyRule(getLbListenerPolicyRuleOptions) + + if err != nil { + if response != nil && response.StatusCode == 404 { + return rule, isLBListenerPolicyRuleDeleted, nil + } + return rule, isLBListenerPolicyRuleFailed, err + } + return nil, isLBListenerPolicyRuleDeleting, err + } +} + +func classicVpcSdkClient(meta interface{}) (*vpcclassicv1.VpcClassicV1, error) { + sess, err := meta.(ClientSession).VpcClassicV1API() + return sess, err +} + +func classicLbListenerPolicyRuleGet(d *schema.ResourceData, meta interface{}, lbID, listenerID, policyID, id string) error { + sess, err := classicVpcSdkClient(meta) + if err != nil { + return err + } + + //Getting rule optins + getLbListenerPolicyRuleOptions := &vpcclassicv1.GetLoadBalancerListenerPolicyRuleOptions{ + LoadBalancerID: &lbID, + ListenerID: &listenerID, + PolicyID: &policyID, + ID: &id, + } + + //Getting lb listener policy + rule, response, err := sess.GetLoadBalancerListenerPolicyRule(getLbListenerPolicyRuleOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error in classicLbListenerPolicyGet : %s\n%s", err, response) + } + + d.Set(isLBListenerPolicyRuleLBID, lbID) + d.Set(isLBListenerPolicyRuleListenerID, listenerID) + d.Set(isLBListenerPolicyRulePolicyID, policyID) + d.Set(isLBListenerPolicyRuleid, id) + d.Set(isLBListenerPolicyRulecondition, rule.Condition) + d.Set(isLBListenerPolicyRuletype, rule.Type) + d.Set(isLBListenerPolicyRulevalue, rule.Value) + d.Set(isLBListenerPolicyRulefield, rule.Field) + d.Set(isLBListenerPolicyRuleStatus, rule.ProvisioningStatus) + getLoadBalancerOptions := &vpcclassicv1.GetLoadBalancerOptions{ + ID: &lbID, + } + lb, response, err := sess.GetLoadBalancer(getLoadBalancerOptions) + if err != nil { + return fmt.Errorf("Error Getting Load Balancer : %s\n%s", err, response) + } + d.Set(RelatedCRN, *lb.CRN) + + return nil +} + +func lbListenerPolicyRuleGet(d *schema.ResourceData, meta interface{}, lbID, listenerID, policyID, id string) error { + + sess, err := vpcSdkClient(meta) + if err != nil { + return err + } + + //Getting rule optins + getLbListenerPolicyRuleOptions := &vpcv1.GetLoadBalancerListenerPolicyRuleOptions{ + LoadBalancerID: &lbID, + ListenerID: &listenerID, + PolicyID: &policyID, + ID: &id, + } + + //Getting lb listener policy + rule, response, err := sess.GetLoadBalancerListenerPolicyRule(getLbListenerPolicyRuleOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return err + } + + //set the argument values + d.Set(isLBListenerPolicyRuleLBID, lbID) + d.Set(isLBListenerPolicyRuleListenerID, listenerID) + d.Set(isLBListenerPolicyRulePolicyID, policyID) + d.Set(isLBListenerPolicyRuleid, id) + d.Set(isLBListenerPolicyRulecondition, rule.Condition) + d.Set(isLBListenerPolicyRuletype, rule.Type) + d.Set(isLBListenerPolicyRulevalue, rule.Value) + d.Set(isLBListenerPolicyRulefield, rule.Field) + d.Set(isLBListenerPolicyRuleStatus, rule.ProvisioningStatus) + getLoadBalancerOptions := &vpcv1.GetLoadBalancerOptions{ + ID: &lbID, + } + lb, response, err := sess.GetLoadBalancer(getLoadBalancerOptions) + if err != nil { + return fmt.Errorf("Error Getting Load Balancer : %s\n%s", err, response) + } + d.Set(RelatedCRN, *lb.CRN) + + return nil +} + +func isWaitForLbListenerPolicyRuleClassicDeleted(vpc *vpcclassicv1.VpcClassicV1, id string, timeout time.Duration) (interface{}, error) { + + stateConf := &resource.StateChangeConf{ + Pending: []string{isLBListenerPolicyRuleRetry, isLBListenerPolicyRuleDeleting, "delete_pending"}, + Target: []string{isLBListenerPolicyRuleDeleted, isLBListenerPolicyRuleFailed}, + Refresh: isLbListenerPolicyRuleClassicDeleteRefreshFunc(vpc, id), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isLbListenerPolicyRuleClassicDeleteRefreshFunc(vpc *vpcclassicv1.VpcClassicV1, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + + //Retrieve lbId and listenerId + parts, err := idParts(id) + if err != nil { + return nil, isLBListenerPolicyFailed, nil + } + + lbID := parts[0] + listenerID := parts[1] + policyID := parts[2] + ruleID := parts[3] + + getLbListenerPolicyRuleOptions := &vpcclassicv1.GetLoadBalancerListenerPolicyRuleOptions{ + LoadBalancerID: &lbID, + ListenerID: &listenerID, + PolicyID: &policyID, + ID: &ruleID, + } + + //Getting lb listener policy + rule, response, err := vpc.GetLoadBalancerListenerPolicyRule(getLbListenerPolicyRuleOptions) + //failed := isLBListenerPolicyRuleFailed + if err != nil { + if response != nil && response.StatusCode == 404 { + return rule, isLBListenerPolicyRuleDeleted, nil + } + return nil, isLBListenerPolicyRuleFailed, err + } + return rule, isLBListenerPolicyRuleDeleting, err + } +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_lb_pool.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_lb_pool.go new file mode 100644 index 00000000000..c6e0f2c36c4 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_lb_pool.go @@ -0,0 +1,1094 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "time" + + "github.com/IBM/vpc-go-sdk/vpcclassicv1" + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + isLBPoolName = "name" + isLBID = "lb" + isLBPoolAlgorithm = "algorithm" + isLBPoolProtocol = "protocol" + isLBPoolHealthDelay = "health_delay" + isLBPoolHealthRetries = "health_retries" + isLBPoolHealthTimeout = "health_timeout" + isLBPoolHealthType = "health_type" + isLBPoolHealthMonitorURL = "health_monitor_url" + isLBPoolHealthMonitorPort = "health_monitor_port" + isLBPoolSessPersistenceType = "session_persistence_type" + isLBPoolSessPersistenceCookieName = "session_persistence_cookie_name" + isLBPoolProvisioningStatus = "provisioning_status" + isLBPoolProxyProtocol = "proxy_protocol" + isLBPoolActive = "active" + isLBPoolCreatePending = "create_pending" + isLBPoolUpdatePending = "update_pending" + isLBPoolDeletePending = "delete_pending" + isLBPoolMaintainancePending = "maintenance_pending" + isLBPoolFailed = "failed" + isLBPoolDeleteDone = "deleted" + isLBPool = "pool_id" +) + +func resourceIBMISLBPool() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMISLBPoolCreate, + Read: resourceIBMISLBPoolRead, + Update: resourceIBMISLBPoolUpdate, + Delete: resourceIBMISLBPoolDelete, + Exists: resourceIBMISLBPoolExists, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + isLBPoolName: { + Type: schema.TypeString, + Required: true, + ValidateFunc: InvokeValidator("ibm_is_lb_pool", isLBPoolName), + Description: "Load Balancer Pool name", + }, + + isLBID: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Load Balancer ID", + }, + + isLBPoolAlgorithm: { + Type: schema.TypeString, + Required: true, + ValidateFunc: InvokeValidator("ibm_is_lb_pool", isLBPoolAlgorithm), + Description: "Load Balancer Pool algorithm", + }, + + isLBPoolProtocol: { + Type: schema.TypeString, + Required: true, + ValidateFunc: InvokeValidator("ibm_is_lb_pool", isLBPoolProtocol), + Description: "Load Balancer Protocol", + }, + + isLBPoolHealthDelay: { + Type: schema.TypeInt, + Required: true, + Description: "Load Blancer health delay time period", + }, + + isLBPoolHealthRetries: { + Type: schema.TypeInt, + Required: true, + Description: "Load Balancer health retry count", + }, + + isLBPoolHealthTimeout: { + Type: schema.TypeInt, + Required: true, + Description: "Load Balancer health timeout interval", + }, + + isLBPoolHealthType: { + Type: schema.TypeString, + Required: true, + ValidateFunc: InvokeValidator("ibm_is_lb_pool", isLBPoolHealthType), + Description: "Load Balancer health type", + }, + + isLBPoolHealthMonitorURL: { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Health monitor URL of LB Pool", + }, + + isLBPoolHealthMonitorPort: { + Type: schema.TypeInt, + Optional: true, + Computed: true, + Description: "Health monitor Port the LB Pool", + }, + + isLBPoolSessPersistenceType: { + Type: schema.TypeString, + Optional: true, + ValidateFunc: InvokeValidator("ibm_is_lb_pool", isLBPoolSessPersistenceType), + Description: "Load Balancer Pool session persisence type.", + }, + + isLBPoolSessPersistenceCookieName: { + Type: schema.TypeString, + Optional: true, + Description: "Load Balancer Pool session persisence cookie name", + }, + + isLBPoolProvisioningStatus: { + Type: schema.TypeString, + Computed: true, + Description: "Status of the LB Pool", + }, + + isLBPoolProxyProtocol: { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: InvokeValidator("ibm_is_lb_pool", isLBPoolProxyProtocol), + Description: "PROXY protocol setting for this pool", + }, + + isLBPool: { + Type: schema.TypeString, + Computed: true, + Description: "The LB Pool id", + }, + + RelatedCRN: { + Type: schema.TypeString, + Computed: true, + Description: "The crn of the LB resource", + }, + }, + } +} + +func resourceIBMISLBPoolValidator() *ResourceValidator { + + validateSchema := make([]ValidateSchema, 1) + algorithm := "round_robin, weighted_round_robin, least_connections" + protocol := "http, tcp, https" + persistanceType := "source_ip" + proxyProtocol := "disabled, v1, v2" + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isLBPoolName, + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Required: true, + Regexp: `^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$`, + MinValueLength: 1, + MaxValueLength: 63}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isLBPoolAlgorithm, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: algorithm}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isLBPoolProtocol, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: protocol}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isLBPoolHealthType, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: protocol}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isLBPoolProxyProtocol, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: proxyProtocol}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isLBPoolSessPersistenceType, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: persistanceType}) + + ibmISLBPoolResourceValidator := ResourceValidator{ResourceName: "ibm_is_lb_pool", Schema: validateSchema} + return &ibmISLBPoolResourceValidator +} + +func resourceIBMISLBPoolCreate(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + + log.Printf("[DEBUG] LB Pool create") + name := d.Get(isLBPoolName).(string) + lbID := d.Get(isLBID).(string) + algorithm := d.Get(isLBPoolAlgorithm).(string) + protocol := d.Get(isLBPoolProtocol).(string) + healthDelay := int64(d.Get(isLBPoolHealthDelay).(int)) + maxRetries := int64(d.Get(isLBPoolHealthRetries).(int)) + healthTimeOut := int64(d.Get(isLBPoolHealthTimeout).(int)) + healthType := d.Get(isLBPoolHealthType).(string) + + var spType, cName, healthMonitorURL, pProtocol string + var healthMonitorPort int64 + if pt, ok := d.GetOk(isLBPoolSessPersistenceType); ok { + spType = pt.(string) + } + + if cn, ok := d.GetOk(isLBPoolSessPersistenceCookieName); ok { + cName = cn.(string) + } + if pp, ok := d.GetOk(isLBPoolProxyProtocol); ok { + pProtocol = pp.(string) + } + + if hmu, ok := d.GetOk(isLBPoolHealthMonitorURL); ok { + healthMonitorURL = hmu.(string) + } + + if hmp, ok := d.GetOk(isLBPoolHealthMonitorPort); ok { + healthMonitorPort = int64(hmp.(int)) + } + isLBKey := "load_balancer_key_" + lbID + ibmMutexKV.Lock(isLBKey) + defer ibmMutexKV.Unlock(isLBKey) + + if userDetails.generation == 1 { + err := classicLBPoolCreate(d, meta, name, lbID, algorithm, protocol, healthType, spType, cName, healthMonitorURL, healthDelay, maxRetries, healthTimeOut, healthMonitorPort) + if err != nil { + return err + } + } else { + err := lbPoolCreate(d, meta, name, lbID, algorithm, protocol, healthType, spType, cName, healthMonitorURL, pProtocol, healthDelay, maxRetries, healthTimeOut, healthMonitorPort) + if err != nil { + return err + } + } + return resourceIBMISLBPoolRead(d, meta) +} + +func classicLBPoolCreate(d *schema.ResourceData, meta interface{}, name, lbID, algorithm, protocol, healthType, spType, cName, healthMonitorURL string, healthDelay, maxRetries, healthTimeOut, healthMonitorPort int64) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + + _, err = isWaitForClassicLBAvailable(sess, lbID, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf( + "Error checking for load balancer (%s) is active: %s", lbID, err) + } + + options := &vpcclassicv1.CreateLoadBalancerPoolOptions{ + LoadBalancerID: &lbID, + Algorithm: &algorithm, + Protocol: &protocol, + Name: &name, + HealthMonitor: &vpcclassicv1.LoadBalancerPoolHealthMonitorPrototype{ + Delay: &healthDelay, + MaxRetries: &maxRetries, + Timeout: &healthTimeOut, + Type: &healthType, + }, + } + if healthMonitorURL != "" { + options.HealthMonitor.URLPath = &healthMonitorURL + } + if healthMonitorPort > int64(0) { + options.HealthMonitor.Port = &healthMonitorPort + } + if spType != "" { + options.SessionPersistence = &vpcclassicv1.LoadBalancerPoolSessionPersistencePrototype{ + Type: &spType, + } + } + lbPool, response, err := sess.CreateLoadBalancerPool(options) + if err != nil { + return fmt.Errorf("[DEBUG] lbpool create err: %s\n%s", err, response) + } + + d.SetId(fmt.Sprintf("%s/%s", lbID, *lbPool.ID)) + log.Printf("[INFO] lbpool : %s", *lbPool.ID) + + _, err = isWaitForClassicLBPoolActive(sess, lbID, *lbPool.ID, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf( + "Error checking for load balancer pool (%s) is active: %s", *lbPool.ID, err) + } + + _, err = isWaitForClassicLBAvailable(sess, lbID, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf( + "Error checking for load balancer (%s) is active: %s", lbID, err) + } + + return nil +} + +func lbPoolCreate(d *schema.ResourceData, meta interface{}, name, lbID, algorithm, protocol, healthType, spType, cName, healthMonitorURL, pProtocol string, healthDelay, maxRetries, healthTimeOut, healthMonitorPort int64) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + + _, err = isWaitForLBAvailable(sess, lbID, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf( + "Error checking for load balancer (%s) is active: %s", lbID, err) + } + + options := &vpcv1.CreateLoadBalancerPoolOptions{ + LoadBalancerID: &lbID, + Algorithm: &algorithm, + Protocol: &protocol, + Name: &name, + HealthMonitor: &vpcv1.LoadBalancerPoolHealthMonitorPrototype{ + Delay: &healthDelay, + MaxRetries: &maxRetries, + Timeout: &healthTimeOut, + Type: &healthType, + }, + } + if healthMonitorURL != "" { + options.HealthMonitor.URLPath = &healthMonitorURL + } + if healthMonitorPort > int64(0) { + options.HealthMonitor.Port = &healthMonitorPort + } + if spType != "" { + options.SessionPersistence = &vpcv1.LoadBalancerPoolSessionPersistencePrototype{ + Type: &spType, + } + } + if pProtocol != "" { + options.ProxyProtocol = &pProtocol + } + lbPool, response, err := sess.CreateLoadBalancerPool(options) + if err != nil { + return fmt.Errorf("[DEBUG] lbpool create err: %s\n%s", err, response) + } + + d.SetId(fmt.Sprintf("%s/%s", lbID, *lbPool.ID)) + log.Printf("[INFO] lbpool : %s", *lbPool.ID) + + _, err = isWaitForLBPoolActive(sess, lbID, *lbPool.ID, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf( + "Error checking for load balancer pool (%s) is active: %s", *lbPool.ID, err) + } + + _, err = isWaitForLBAvailable(sess, lbID, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf( + "Error checking for load balancer (%s) is active: %s", lbID, err) + } + + return nil +} + +func resourceIBMISLBPoolRead(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + + parts, err := idParts(d.Id()) + if err != nil { + return err + } + + lbID := parts[0] + lbPoolID := parts[1] + + if userDetails.generation == 1 { + err := classicLBPoolGet(d, meta, lbID, lbPoolID) + if err != nil { + return err + } + } else { + err := lbPoolGet(d, meta, lbID, lbPoolID) + if err != nil { + return err + } + } + return nil +} + +func classicLBPoolGet(d *schema.ResourceData, meta interface{}, lbID, lbPoolID string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + getLoadBalancerPoolOptions := &vpcclassicv1.GetLoadBalancerPoolOptions{ + LoadBalancerID: &lbID, + ID: &lbPoolID, + } + lbPool, response, err := sess.GetLoadBalancerPool(getLoadBalancerPoolOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Getting Load Balancer Pool : %s\n%s", err, response) + } + + d.Set(isLBPoolName, *lbPool.Name) + d.Set(isLBPool, lbPoolID) + d.Set(isLBID, lbID) + d.Set(isLBPoolAlgorithm, *lbPool.Algorithm) + d.Set(isLBPoolProtocol, *lbPool.Protocol) + d.Set(isLBPoolHealthDelay, *lbPool.HealthMonitor.Delay) + d.Set(isLBPoolHealthRetries, *lbPool.HealthMonitor.MaxRetries) + d.Set(isLBPoolHealthTimeout, *lbPool.HealthMonitor.Timeout) + if lbPool.HealthMonitor.Type != nil { + d.Set(isLBPoolHealthType, *lbPool.HealthMonitor.Type) + } + if lbPool.HealthMonitor.URLPath != nil { + d.Set(isLBPoolHealthMonitorURL, *lbPool.HealthMonitor.URLPath) + } + if lbPool.HealthMonitor.Port != nil { + d.Set(isLBPoolHealthMonitorPort, *lbPool.HealthMonitor.Port) + } + if lbPool.SessionPersistence != nil { + d.Set(isLBPoolSessPersistenceType, *lbPool.SessionPersistence.Type) + // d.Set(isLBPoolSessPersistenceCookieName, *lbPool.SessionPersistence.CookieName) + } + d.Set(isLBPoolProvisioningStatus, *lbPool.ProvisioningStatus) + getLoadBalancerOptions := &vpcclassicv1.GetLoadBalancerOptions{ + ID: &lbID, + } + lb, response, err := sess.GetLoadBalancer(getLoadBalancerOptions) + if err != nil { + return fmt.Errorf("Error Getting Load Balancer : %s\n%s", err, response) + } + d.Set(RelatedCRN, *lb.CRN) + return nil +} + +func lbPoolGet(d *schema.ResourceData, meta interface{}, lbID, lbPoolID string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + getLoadBalancerPoolOptions := &vpcv1.GetLoadBalancerPoolOptions{ + LoadBalancerID: &lbID, + ID: &lbPoolID, + } + + lbPool, response, err := sess.GetLoadBalancerPool(getLoadBalancerPoolOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Getting Load Balancer Pool : %s\n%s", err, response) + } + d.Set(isLBPoolName, *lbPool.Name) + d.Set(isLBPool, lbPoolID) + d.Set(isLBID, lbID) + d.Set(isLBPoolAlgorithm, *lbPool.Algorithm) + d.Set(isLBPoolProtocol, *lbPool.Protocol) + d.Set(isLBPoolHealthDelay, *lbPool.HealthMonitor.Delay) + d.Set(isLBPoolHealthRetries, *lbPool.HealthMonitor.MaxRetries) + d.Set(isLBPoolHealthTimeout, *lbPool.HealthMonitor.Timeout) + if lbPool.HealthMonitor.Type != nil { + d.Set(isLBPoolHealthType, *lbPool.HealthMonitor.Type) + } + if lbPool.HealthMonitor.URLPath != nil { + d.Set(isLBPoolHealthMonitorURL, *lbPool.HealthMonitor.URLPath) + } + if lbPool.HealthMonitor.Port != nil { + d.Set(isLBPoolHealthMonitorPort, *lbPool.HealthMonitor.Port) + } + if lbPool.SessionPersistence != nil { + d.Set(isLBPoolSessPersistenceType, *lbPool.SessionPersistence.Type) + // d.Set(isLBPoolSessPersistenceCookieName, *lbPool.SessionPersistence.CookieName) + } + d.Set(isLBPoolProvisioningStatus, *lbPool.ProvisioningStatus) + d.Set(isLBPoolProxyProtocol, *lbPool.ProxyProtocol) + getLoadBalancerOptions := &vpcv1.GetLoadBalancerOptions{ + ID: &lbID, + } + lb, response, err := sess.GetLoadBalancer(getLoadBalancerOptions) + if err != nil { + return fmt.Errorf("Error Getting Load Balancer : %s\n%s", err, response) + } + d.Set(RelatedCRN, *lb.CRN) + return nil +} + +func resourceIBMISLBPoolUpdate(d *schema.ResourceData, meta interface{}) error { + + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + + parts, err := idParts(d.Id()) + if err != nil { + return err + } + + lbID := parts[0] + lbPoolID := parts[1] + + if userDetails.generation == 1 { + err := classicLBPoolUpdate(d, meta, lbID, lbPoolID) + if err != nil { + return err + } + } else { + err := lbPoolUpdate(d, meta, lbID, lbPoolID) + if err != nil { + return err + } + } + return resourceIBMISLBPoolRead(d, meta) +} + +func classicLBPoolUpdate(d *schema.ResourceData, meta interface{}, lbID, lbPoolID string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + + hasChanged := false + + updateLoadBalancerPoolOptions := &vpcclassicv1.UpdateLoadBalancerPoolOptions{ + LoadBalancerID: &lbID, + ID: &lbPoolID, + } + + loadBalancerPoolPatchModel := &vpcclassicv1.LoadBalancerPoolPatch{} + + if d.HasChange(isLBPoolHealthDelay) || d.HasChange(isLBPoolHealthRetries) || + d.HasChange(isLBPoolHealthTimeout) || d.HasChange(isLBPoolHealthType) || d.HasChange(isLBPoolHealthMonitorURL) || d.HasChange(isLBPoolHealthMonitorPort) { + + delay := int64(d.Get(isLBPoolHealthDelay).(int)) + maxretries := int64(d.Get(isLBPoolHealthRetries).(int)) + timeout := int64(d.Get(isLBPoolHealthTimeout).(int)) + healthtype := d.Get(isLBPoolHealthType).(string) + urlpath := d.Get(isLBPoolHealthMonitorURL).(string) + healthMonitorTemplate := &vpcclassicv1.LoadBalancerPoolHealthMonitorPatch{ + Delay: &delay, + MaxRetries: &maxretries, + Timeout: &timeout, + Type: &healthtype, + URLPath: &urlpath, + } + port := int64(d.Get(isLBPoolHealthMonitorPort).(int)) + if port > int64(0) { + healthMonitorTemplate.Port = &port + } + + loadBalancerPoolPatchModel.HealthMonitor = healthMonitorTemplate + hasChanged = true + } + + if d.HasChange(isLBPoolSessPersistenceType) || d.HasChange(isLBPoolSessPersistenceCookieName) { + sesspersistancetype := d.Get(isLBPoolSessPersistenceType).(string) + sessionPersistence := &vpcclassicv1.LoadBalancerPoolSessionPersistencePatch{ + Type: &sesspersistancetype, + } + loadBalancerPoolPatchModel.SessionPersistence = sessionPersistence + hasChanged = true + } + + if d.HasChange(isLBPoolName) || d.HasChange(isLBPoolAlgorithm) || d.HasChange(isLBPoolProtocol) || hasChanged { + name := d.Get(isLBPoolName).(string) + algorithm := d.Get(isLBPoolAlgorithm).(string) + protocol := d.Get(isLBPoolProtocol).(string) + + loadBalancerPoolPatchModel.Algorithm = &algorithm + loadBalancerPoolPatchModel.Name = &name + loadBalancerPoolPatchModel.Protocol = &protocol + + isLBKey := "load_balancer_key_" + lbID + ibmMutexKV.Lock(isLBKey) + defer ibmMutexKV.Unlock(isLBKey) + _, err := isWaitForClassicLBAvailable(sess, lbID, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return fmt.Errorf( + "Error checking for load balancer (%s) is active: %s", lbID, err) + } + + _, err = isWaitForClassicLBPoolActive(sess, lbID, lbPoolID, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return fmt.Errorf( + "Error checking for load balancer pool (%s) is active: %s", lbPoolID, err) + } + + LoadBalancerPoolPatch, err := loadBalancerPoolPatchModel.AsPatch() + if err != nil { + return fmt.Errorf("Error calling asPatch for LoadBalancerPoolPatch: %s", err) + } + updateLoadBalancerPoolOptions.LoadBalancerPoolPatch = LoadBalancerPoolPatch + + _, response, err := sess.UpdateLoadBalancerPool(updateLoadBalancerPoolOptions) + if err != nil { + return fmt.Errorf("Error Updating Load Balancer Pool : %s\n%s", err, response) + } + + _, err = isWaitForClassicLBPoolActive(sess, lbID, lbPoolID, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return fmt.Errorf( + "Error checking for load balancer pool (%s) is active: %s", lbPoolID, err) + } + + _, err = isWaitForClassicLBAvailable(sess, lbID, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return fmt.Errorf( + "Error checking for load balancer (%s) is active: %s", lbID, err) + } + } + return nil +} + +func lbPoolUpdate(d *schema.ResourceData, meta interface{}, lbID, lbPoolID string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + + hasChanged := false + + updateLoadBalancerPoolOptions := &vpcv1.UpdateLoadBalancerPoolOptions{ + LoadBalancerID: &lbID, + ID: &lbPoolID, + } + + loadBalancerPoolPatchModel := &vpcv1.LoadBalancerPoolPatch{} + + if d.HasChange(isLBPoolHealthDelay) || d.HasChange(isLBPoolHealthRetries) || + d.HasChange(isLBPoolHealthTimeout) || d.HasChange(isLBPoolHealthType) || d.HasChange(isLBPoolHealthMonitorURL) || d.HasChange(isLBPoolHealthMonitorPort) { + + delay := int64(d.Get(isLBPoolHealthDelay).(int)) + maxretries := int64(d.Get(isLBPoolHealthRetries).(int)) + timeout := int64(d.Get(isLBPoolHealthTimeout).(int)) + healthtype := d.Get(isLBPoolHealthType).(string) + urlpath := d.Get(isLBPoolHealthMonitorURL).(string) + healthMonitorTemplate := &vpcv1.LoadBalancerPoolHealthMonitorPatch{ + Delay: &delay, + MaxRetries: &maxretries, + Timeout: &timeout, + Type: &healthtype, + URLPath: &urlpath, + } + port := int64(d.Get(isLBPoolHealthMonitorPort).(int)) + if port > int64(0) { + healthMonitorTemplate.Port = &port + } + loadBalancerPoolPatchModel.HealthMonitor = healthMonitorTemplate + hasChanged = true + } + + if d.HasChange(isLBPoolSessPersistenceType) || d.HasChange(isLBPoolSessPersistenceCookieName) { + sesspersistancetype := d.Get(isLBPoolSessPersistenceType).(string) + sessionPersistence := &vpcv1.LoadBalancerPoolSessionPersistencePatch{ + Type: &sesspersistancetype, + } + loadBalancerPoolPatchModel.SessionPersistence = sessionPersistence + hasChanged = true + } + + if d.HasChange(isLBPoolProxyProtocol) { + proxyProtocol := d.Get(isLBPoolProxyProtocol).(string) + loadBalancerPoolPatchModel.ProxyProtocol = &proxyProtocol + hasChanged = true + } + + if d.HasChange(isLBPoolName) || d.HasChange(isLBPoolAlgorithm) || d.HasChange(isLBPoolProtocol) || hasChanged { + name := d.Get(isLBPoolName).(string) + algorithm := d.Get(isLBPoolAlgorithm).(string) + protocol := d.Get(isLBPoolProtocol).(string) + + loadBalancerPoolPatchModel.Algorithm = &algorithm + loadBalancerPoolPatchModel.Name = &name + loadBalancerPoolPatchModel.Protocol = &protocol + + isLBKey := "load_balancer_key_" + lbID + ibmMutexKV.Lock(isLBKey) + defer ibmMutexKV.Unlock(isLBKey) + _, err := isWaitForLBAvailable(sess, lbID, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return fmt.Errorf( + "Error checking for load balancer (%s) is active: %s", lbID, err) + } + + _, err = isWaitForLBPoolActive(sess, lbID, lbPoolID, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return fmt.Errorf( + "Error checking for load balancer pool (%s) is active: %s", lbPoolID, err) + } + + LoadBalancerPoolPatch, err := loadBalancerPoolPatchModel.AsPatch() + if err != nil { + return fmt.Errorf("Error calling asPatch for LoadBalancerPoolPatch: %s", err) + } + updateLoadBalancerPoolOptions.LoadBalancerPoolPatch = LoadBalancerPoolPatch + + _, response, err := sess.UpdateLoadBalancerPool(updateLoadBalancerPoolOptions) + if err != nil { + return fmt.Errorf("Error Updating Load Balancer Pool : %s\n%s", err, response) + } + + _, err = isWaitForLBPoolActive(sess, lbID, lbPoolID, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return fmt.Errorf( + "Error checking for load balancer pool (%s) is active: %s", lbPoolID, err) + } + + _, err = isWaitForLBAvailable(sess, lbID, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return fmt.Errorf( + "Error checking for load balancer (%s) is active: %s", lbID, err) + } + } + return nil +} + +func resourceIBMISLBPoolDelete(d *schema.ResourceData, meta interface{}) error { + + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + parts, err := idParts(d.Id()) + if err != nil { + return err + } + + lbID := parts[0] + lbPoolID := parts[1] + + isLBKey := "load_balancer_key_" + lbID + ibmMutexKV.Lock(isLBKey) + defer ibmMutexKV.Unlock(isLBKey) + + if userDetails.generation == 1 { + err := classicLBPoolDelete(d, meta, lbID, lbPoolID) + if err != nil { + return err + } + } else { + err := lbPoolDelete(d, meta, lbID, lbPoolID) + if err != nil { + return err + } + } + return nil +} + +func classicLBPoolDelete(d *schema.ResourceData, meta interface{}, lbID, lbPoolID string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + getLoadBalancerPoolOptions := &vpcclassicv1.GetLoadBalancerPoolOptions{ + LoadBalancerID: &lbID, + ID: &lbPoolID, + } + _, response, err := sess.GetLoadBalancerPool(getLoadBalancerPoolOptions) + + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Getting vpc load balancer pool(%s): %s\n%s", lbPoolID, err, response) + } + _, err = isWaitForClassicLBAvailable(sess, lbID, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return fmt.Errorf( + "Error checking for load balancer (%s) is active: %s", lbID, err) + } + + _, err = isWaitForClassicLBPoolActive(sess, lbID, lbPoolID, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return fmt.Errorf( + "Error checking for load balancer pool (%s) is active: %s", lbPoolID, err) + } + + deleteLoadBalancerPoolOptions := &vpcclassicv1.DeleteLoadBalancerPoolOptions{ + LoadBalancerID: &lbID, + ID: &lbPoolID, + } + response, err = sess.DeleteLoadBalancerPool(deleteLoadBalancerPoolOptions) + if err != nil { + return fmt.Errorf("Error Deleting Load Balancer Pool : %s\n%s", err, response) + } + _, err = isWaitForClassicLBPoolDeleted(sess, lbID, lbPoolID, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return fmt.Errorf( + "Error checking for load balancer pool (%s) is deleted: %s", lbPoolID, err) + } + + _, err = isWaitForClassicLBAvailable(sess, lbID, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return fmt.Errorf( + "Error checking for load balancer (%s) is active: %s", lbID, err) + } + d.SetId("") + return nil +} + +func lbPoolDelete(d *schema.ResourceData, meta interface{}, lbID, lbPoolID string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + getLoadBalancerPoolOptions := &vpcv1.GetLoadBalancerPoolOptions{ + LoadBalancerID: &lbID, + ID: &lbPoolID, + } + _, response, err := sess.GetLoadBalancerPool(getLoadBalancerPoolOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Getting vpc load balancer pool(%s): %s\n%s", lbPoolID, err, response) + } + _, err = isWaitForLBAvailable(sess, lbID, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return fmt.Errorf( + "Error checking for load balancer (%s) is active: %s", lbID, err) + } + + _, err = isWaitForLBPoolActive(sess, lbID, lbPoolID, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return fmt.Errorf( + "Error checking for load balancer pool (%s) is active: %s", lbPoolID, err) + } + + deleteLoadBalancerPoolOptions := &vpcv1.DeleteLoadBalancerPoolOptions{ + LoadBalancerID: &lbID, + ID: &lbPoolID, + } + response, err = sess.DeleteLoadBalancerPool(deleteLoadBalancerPoolOptions) + if err != nil { + return fmt.Errorf("Error Deleting Load Balancer Pool : %s\n%s", err, response) + } + _, err = isWaitForLBPoolDeleted(sess, lbID, lbPoolID, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return fmt.Errorf( + "Error checking for load balancer pool (%s) is deleted: %s", lbPoolID, err) + } + + _, err = isWaitForLBAvailable(sess, lbID, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return fmt.Errorf( + "Error checking for load balancer (%s) is active: %s", lbID, err) + } + d.SetId("") + return nil +} + +func resourceIBMISLBPoolExists(d *schema.ResourceData, meta interface{}) (bool, error) { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return false, err + } + + parts, err := idParts(d.Id()) + if err != nil { + return false, err + } + if len(parts) != 2 { + return false, fmt.Errorf("Incorrect ID %s: ID should be a combination of lbID/lbPoolID", d.Id()) + } + + lbID := parts[0] + lbPoolID := parts[1] + + if userDetails.generation == 1 { + exists, err := classicLBPoolExists(d, meta, lbID, lbPoolID) + return exists, err + } else { + exists, err := lbPoolExists(d, meta, lbID, lbPoolID) + return exists, err + } +} + +func classicLBPoolExists(d *schema.ResourceData, meta interface{}, lbID, lbPoolID string) (bool, error) { + sess, err := classicVpcClient(meta) + if err != nil { + return false, err + } + + getLoadBalancerPoolOptions := &vpcclassicv1.GetLoadBalancerPoolOptions{ + LoadBalancerID: &lbID, + ID: &lbPoolID, + } + _, response, err := sess.GetLoadBalancerPool(getLoadBalancerPoolOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error getting Load balancer pool: %s\n%s", err, response) + } + return true, nil +} + +func lbPoolExists(d *schema.ResourceData, meta interface{}, lbID, lbPoolID string) (bool, error) { + sess, err := vpcClient(meta) + if err != nil { + return false, err + } + + getLoadBalancerPoolOptions := &vpcv1.GetLoadBalancerPoolOptions{ + LoadBalancerID: &lbID, + ID: &lbPoolID, + } + _, response, err := sess.GetLoadBalancerPool(getLoadBalancerPoolOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error getting Load balancer pool: %s\n%s", err, response) + } + return true, nil +} + +func isWaitForLBPoolActive(sess *vpcv1.VpcV1, lbId, lbPoolId string, timeout time.Duration) (interface{}, error) { + log.Printf("Waiting for load balancer pool (%s) to be available.", lbPoolId) + + stateConf := &resource.StateChangeConf{ + Pending: []string{isLBPoolCreatePending, isLBPoolUpdatePending, isLBPoolMaintainancePending}, + Target: []string{isLBPoolActive, ""}, + Refresh: isLBPoolRefreshFunc(sess, lbId, lbPoolId), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isLBPoolRefreshFunc(sess *vpcv1.VpcV1, lbId, lbPoolId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + + getlbpOptions := &vpcv1.GetLoadBalancerPoolOptions{ + LoadBalancerID: &lbId, + ID: &lbPoolId, + } + lbPool, response, err := sess.GetLoadBalancerPool(getlbpOptions) + if err != nil { + return nil, "", fmt.Errorf("Error Getting Load Balancer Pool: %s\n%s", err, response) + } + + if *lbPool.ProvisioningStatus == isLBPoolActive || *lbPool.ProvisioningStatus == isLBPoolFailed { + return lbPool, isLBPoolActive, nil + } + + return lbPool, *lbPool.ProvisioningStatus, nil + } +} + +func isWaitForClassicLBPoolActive(sess *vpcclassicv1.VpcClassicV1, lbId, lbPoolId string, timeout time.Duration) (interface{}, error) { + log.Printf("Waiting for load balancer pool (%s) to be available.", lbPoolId) + + stateConf := &resource.StateChangeConf{ + Pending: []string{isLBPoolCreatePending, isLBPoolUpdatePending, isLBPoolMaintainancePending}, + Target: []string{isLBPoolActive, ""}, + Refresh: isClassicLBPoolRefreshFunc(sess, lbId, lbPoolId), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isClassicLBPoolRefreshFunc(sess *vpcclassicv1.VpcClassicV1, lbId, lbPoolId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + + getlbpOptions := &vpcclassicv1.GetLoadBalancerPoolOptions{ + LoadBalancerID: &lbId, + ID: &lbPoolId, + } + lbPool, response, err := sess.GetLoadBalancerPool(getlbpOptions) + if err != nil { + return nil, "", fmt.Errorf("Error Getting Load Balancer Pool: %s\n%s", err, response) + } + + if *lbPool.ProvisioningStatus == isLBPoolActive || *lbPool.ProvisioningStatus == isLBPoolFailed { + return lbPool, isLBPoolActive, nil + } + + return lbPool, *lbPool.ProvisioningStatus, nil + } +} + +func isWaitForClassicLBPoolDeleted(lbc *vpcclassicv1.VpcClassicV1, lbId, lbPoolId string, timeout time.Duration) (interface{}, error) { + log.Printf("Waiting for (%s) to be deleted.", lbPoolId) + + stateConf := &resource.StateChangeConf{ + Pending: []string{isLBPoolUpdatePending, isLBPoolMaintainancePending, isLBPoolDeletePending}, + Target: []string{isLBPoolDeleteDone, ""}, + Refresh: isClassicLBPoolDeleteRefreshFunc(lbc, lbId, lbPoolId), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isClassicLBPoolDeleteRefreshFunc(lbc *vpcclassicv1.VpcClassicV1, lbId, lbPoolId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + log.Printf("[DEBUG] delete function here") + getlbpOptions := &vpcclassicv1.GetLoadBalancerPoolOptions{ + LoadBalancerID: &lbId, + ID: &lbPoolId, + } + lbPool, response, err := lbc.GetLoadBalancerPool(getlbpOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return lbPool, isLBPoolDeleteDone, nil + } + return nil, "", fmt.Errorf("The vpc load balancer pool %s failed to delete: %s\n%s", lbPoolId, err, response) + } + return lbPool, isLBPoolDeletePending, nil + } +} + +func isWaitForLBPoolDeleted(lbc *vpcv1.VpcV1, lbId, lbPoolId string, timeout time.Duration) (interface{}, error) { + log.Printf("Waiting for (%s) to be deleted.", lbPoolId) + + stateConf := &resource.StateChangeConf{ + Pending: []string{isLBPoolUpdatePending, isLBPoolMaintainancePending, isLBPoolDeletePending}, + Target: []string{isLBPoolDeleteDone, ""}, + Refresh: isLBPoolDeleteRefreshFunc(lbc, lbId, lbPoolId), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isLBPoolDeleteRefreshFunc(lbc *vpcv1.VpcV1, lbId, lbPoolId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + log.Printf("[DEBUG] delete function here") + getlbpOptions := &vpcv1.GetLoadBalancerPoolOptions{ + LoadBalancerID: &lbId, + ID: &lbPoolId, + } + lbPool, response, err := lbc.GetLoadBalancerPool(getlbpOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return lbPool, isLBPoolDeleteDone, nil + } + return nil, "", fmt.Errorf("The vpc load balancer pool %s failed to delete: %s\n%s", lbPoolId, err, response) + } + return lbPool, isLBPoolDeletePending, nil + } +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_lb_pool_member.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_lb_pool_member.go new file mode 100644 index 00000000000..7c018e63b32 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_lb_pool_member.go @@ -0,0 +1,995 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "strings" + "time" + + "github.com/IBM/vpc-go-sdk/vpcclassicv1" + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + isLBPoolID = "pool" + isLBPoolMemberPort = "port" + isLBPoolMemberTargetAddress = "target_address" + isLBPoolMemberTargetID = "target_id" + isLBPoolMemberWeight = "weight" + isLBPoolMemberProvisioningStatus = "provisioning_status" + isLBPoolMemberHealth = "health" + isLBPoolMemberHref = "href" + isLBPoolMemberDeletePending = "delete_pending" + isLBPoolMemberDeleted = "done" + isLBPoolMemberActive = "active" + isLBPoolUpdating = "updating" +) + +func resourceIBMISLBPoolMember() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMISLBPoolMemberCreate, + Read: resourceIBMISLBPoolMemberRead, + Update: resourceIBMISLBPoolMemberUpdate, + Delete: resourceIBMISLBPoolMemberDelete, + Exists: resourceIBMISLBPoolMemberExists, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + isLBPoolID: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: func(k, o, n string, d *schema.ResourceData) bool { + if o == "" { + return false + } + // if state file entry and tf file entry matches + if strings.Compare(n, o) == 0 { + return true + } + + if strings.Contains(n, "/") { + new := strings.Split(n, "/") + if strings.Compare(new[1], o) == 0 { + return true + } + } + + return false + }, + Description: "Loadblancer Poold ID", + }, + + isLBID: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Load balancer ID", + }, + + isLBPoolMemberPort: { + Type: schema.TypeInt, + Required: true, + Description: "Load Balancer Pool port", + }, + + isLBPoolMemberTargetAddress: { + Type: schema.TypeString, + Optional: true, + ExactlyOneOf: []string{isLBPoolMemberTargetAddress, isLBPoolMemberTargetID}, + Description: "Load balancer pool member target address", + }, + + isLBPoolMemberTargetID: { + Type: schema.TypeString, + Optional: true, + ExactlyOneOf: []string{isLBPoolMemberTargetAddress, isLBPoolMemberTargetID}, + Description: "Load balancer pool member target id", + }, + + isLBPoolMemberWeight: { + Type: schema.TypeInt, + Optional: true, + Computed: true, + Description: "Load balcner pool member weight", + }, + + isLBPoolMemberProvisioningStatus: { + Type: schema.TypeString, + Computed: true, + Description: "Load balancer Pool member provisioning status", + }, + + isLBPoolMemberHealth: { + Type: schema.TypeString, + Computed: true, + Description: "LB Pool member health", + }, + + isLBPoolMemberHref: { + Type: schema.TypeString, + Computed: true, + Description: "LB pool member Href value", + }, + + RelatedCRN: { + Type: schema.TypeString, + Computed: true, + Description: "The crn of the LB resource", + }, + }, + } +} + +func resourceIBMISLBPoolMemberCreate(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + log.Printf("[DEBUG] LB Pool create") + lbPoolID, err := getPoolId(d.Get(isLBPoolID).(string)) + if err != nil { + return err + } + + lbID := d.Get(isLBID).(string) + port := d.Get(isLBPoolMemberPort).(int) + port64 := int64(port) + + var weight int64 + if w, ok := d.GetOk(isLBPoolMemberWeight); ok { + weight = int64(w.(int)) + } + isLBKey := "load_balancer_key_" + lbID + ibmMutexKV.Lock(isLBKey) + defer ibmMutexKV.Unlock(isLBKey) + + if userDetails.generation == 1 { + targetAddress := d.Get(isLBPoolMemberTargetAddress).(string) + err := classiclbpMemberCreate(d, meta, lbID, lbPoolID, targetAddress, port64, weight) + if err != nil { + return err + } + } else { + err := lbpMemberCreate(d, meta, lbID, lbPoolID, port64, weight) + if err != nil { + return err + } + } + + return resourceIBMISLBPoolMemberRead(d, meta) +} + +func classiclbpMemberCreate(d *schema.ResourceData, meta interface{}, lbID, lbPoolID, targetAddress string, port, weight int64) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + _, err = isWaitForClassicLBPoolActive(sess, lbID, lbPoolID, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf( + "Error checking for load balancer pool (%s) is active: %s", lbPoolID, err) + } + + _, err = isWaitForClassicLBAvailable(sess, lbID, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf( + "Error checking for load balancer (%s) is active: %s", lbID, err) + } + + options := &vpcclassicv1.CreateLoadBalancerPoolMemberOptions{ + LoadBalancerID: &lbID, + PoolID: &lbPoolID, + Port: &port, + Target: &vpcclassicv1.LoadBalancerPoolMemberTargetPrototype{ + Address: &targetAddress, + }, + } + if weight > int64(0) { + options.Weight = &weight + } + lbPoolMember, response, err := sess.CreateLoadBalancerPoolMember(options) + if err != nil { + return fmt.Errorf("[DEBUG] lbpool member create err: %s\n%s", err, response) + } + + d.SetId(fmt.Sprintf("%s/%s/%s", lbID, lbPoolID, *lbPoolMember.ID)) + log.Printf("[INFO] lbpool member : %s", *lbPoolMember.ID) + + _, err = isWaitForClassicLBPoolMemberAvailable(sess, lbID, lbPoolID, *lbPoolMember.ID, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return err + } + + _, err = isWaitForClassicLBPoolActive(sess, lbID, lbPoolID, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf( + "Error checking for load balancer pool (%s) is active: %s", lbPoolID, err) + } + + _, err = isWaitForClassicLBAvailable(sess, lbID, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf( + "Error checking for load balancer (%s) is active: %s", lbID, err) + } + + return nil +} + +func lbpMemberCreate(d *schema.ResourceData, meta interface{}, lbID, lbPoolID string, port, weight int64) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + _, err = isWaitForLBPoolActive(sess, lbID, lbPoolID, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf( + "Error checking for load balancer pool (%s) is active: %s", lbPoolID, err) + } + + _, err = isWaitForLBAvailable(sess, lbID, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf( + "Error checking for load balancer (%s) is active: %s", lbID, err) + } + + options := &vpcv1.CreateLoadBalancerPoolMemberOptions{ + LoadBalancerID: &lbID, + PoolID: &lbPoolID, + Port: &port, + } + + if _, ok := d.GetOk(isLBPoolMemberTargetAddress); ok { + targetAddress := d.Get(isLBPoolMemberTargetAddress).(string) + target := &vpcv1.LoadBalancerPoolMemberTargetPrototype{ + Address: &targetAddress, + } + options.Target = target + } else { + targetID := d.Get(isLBPoolMemberTargetID).(string) + target := &vpcv1.LoadBalancerPoolMemberTargetPrototype{ + ID: &targetID, + } + options.Target = target + } + + if weight > int64(0) { + options.Weight = &weight + } + lbPoolMember, response, err := sess.CreateLoadBalancerPoolMember(options) + if err != nil { + return fmt.Errorf("[DEBUG] lbpool member create err: %s\n%s", err, response) + } + + d.SetId(fmt.Sprintf("%s/%s/%s", lbID, lbPoolID, *lbPoolMember.ID)) + log.Printf("[INFO] lbpool member : %s", *lbPoolMember.ID) + + _, err = isWaitForLBPoolMemberAvailable(sess, lbID, lbPoolID, *lbPoolMember.ID, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return err + } + + _, err = isWaitForLBPoolActive(sess, lbID, lbPoolID, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf( + "Error checking for load balancer pool (%s) is active: %s", lbPoolID, err) + } + + _, err = isWaitForLBAvailable(sess, lbID, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf( + "Error checking for load balancer (%s) is active: %s", lbID, err) + } + + return nil +} + +func isWaitForClassicLBPoolMemberAvailable(lbc *vpcclassicv1.VpcClassicV1, lbID, lbPoolID, lbPoolMemID string, timeout time.Duration) (interface{}, error) { + log.Printf("Waiting for load balancer pool member(%s) to be available.", lbPoolMemID) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"create_pending", "update_pending", "maintenance_pending"}, + Target: []string{isLBPoolMemberActive, ""}, + Refresh: isClassicLBPoolMemberRefreshFunc(lbc, lbID, lbPoolID, lbPoolMemID), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isClassicLBPoolMemberRefreshFunc(lbc *vpcclassicv1.VpcClassicV1, lbID, lbPoolID, lbPoolMemID string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + + getlbpmoptions := &vpcclassicv1.GetLoadBalancerPoolMemberOptions{ + LoadBalancerID: &lbID, + PoolID: &lbPoolID, + ID: &lbPoolMemID, + } + lbPoolMem, response, err := lbc.GetLoadBalancerPoolMember(getlbpmoptions) + if err != nil { + return nil, "", fmt.Errorf("Error Getting Load Balancer Pool Member: %s\n%s", err, response) + } + + if *lbPoolMem.ProvisioningStatus == isLBPoolMemberActive { + return lbPoolMem, *lbPoolMem.ProvisioningStatus, nil + } + + return lbPoolMem, *lbPoolMem.ProvisioningStatus, nil + } +} + +func isWaitForLBPoolMemberAvailable(lbc *vpcv1.VpcV1, lbID, lbPoolID, lbPoolMemID string, timeout time.Duration) (interface{}, error) { + log.Printf("Waiting for load balancer pool member(%s) to be available.", lbPoolMemID) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"create_pending", "update_pending", "maintenance_pending"}, + Target: []string{isLBPoolMemberActive, ""}, + Refresh: isLBPoolMemberRefreshFunc(lbc, lbID, lbPoolID, lbPoolMemID), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isLBPoolMemberRefreshFunc(lbc *vpcv1.VpcV1, lbID, lbPoolID, lbPoolMemID string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + + getlbpmoptions := &vpcv1.GetLoadBalancerPoolMemberOptions{ + LoadBalancerID: &lbID, + PoolID: &lbPoolID, + ID: &lbPoolMemID, + } + lbPoolMem, response, err := lbc.GetLoadBalancerPoolMember(getlbpmoptions) + if err != nil { + return nil, "", fmt.Errorf("Error Getting Load Balancer Pool Member: %s\n%s", err, response) + } + + if *lbPoolMem.ProvisioningStatus == isLBPoolMemberActive { + return lbPoolMem, *lbPoolMem.ProvisioningStatus, nil + } + + return lbPoolMem, *lbPoolMem.ProvisioningStatus, nil + } +} + +func resourceIBMISLBPoolMemberRead(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + + parts, err := idParts(d.Id()) + if err != nil { + return err + } + + if len(parts) < 3 { + return fmt.Errorf( + "The id should contain loadbalancer Id, loadbalancer pool Id and loadbalancer poolmemebr Id") + } + + lbID := parts[0] + lbPoolID := parts[1] + lbPoolMemID := parts[2] + + if userDetails.generation == 1 { + err := classiclbpmemberGet(d, meta, lbID, lbPoolID, lbPoolMemID) + if err != nil { + return err + } + } else { + err := lbpmemberGet(d, meta, lbID, lbPoolID, lbPoolMemID) + if err != nil { + return err + } + } + + return nil +} + +func classiclbpmemberGet(d *schema.ResourceData, meta interface{}, lbID, lbPoolID, lbPoolMemID string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + getlbpmoptions := &vpcclassicv1.GetLoadBalancerPoolMemberOptions{ + LoadBalancerID: &lbID, + PoolID: &lbPoolID, + ID: &lbPoolMemID, + } + lbPoolMem, response, err := sess.GetLoadBalancerPoolMember(getlbpmoptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Getting Load Balancer Pool Member: %s\n%s", err, response) + } + + d.Set(isLBPoolID, lbPoolID) + d.Set(isLBID, lbID) + d.Set(isLBPoolMemberPort, *lbPoolMem.Port) + + targetaddress := lbPoolMem.Target.(*vpcclassicv1.LoadBalancerPoolMemberTarget) + d.Set(isLBPoolMemberTargetAddress, *targetaddress.Address) + d.Set(isLBPoolMemberWeight, *lbPoolMem.Weight) + d.Set(isLBPoolMemberProvisioningStatus, *lbPoolMem.ProvisioningStatus) + d.Set(isLBPoolMemberHealth, *lbPoolMem.Health) + d.Set(isLBPoolMemberHref, *lbPoolMem.Href) + getLoadBalancerOptions := &vpcclassicv1.GetLoadBalancerOptions{ + ID: &lbID, + } + lb, response, err := sess.GetLoadBalancer(getLoadBalancerOptions) + if err != nil { + return fmt.Errorf("Error Getting Load Balancer : %s\n%s", err, response) + } + d.Set(RelatedCRN, *lb.CRN) + return nil +} + +func lbpmemberGet(d *schema.ResourceData, meta interface{}, lbID, lbPoolID, lbPoolMemID string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + getlbpmoptions := &vpcv1.GetLoadBalancerPoolMemberOptions{ + LoadBalancerID: &lbID, + PoolID: &lbPoolID, + ID: &lbPoolMemID, + } + lbPoolMem, response, err := sess.GetLoadBalancerPoolMember(getlbpmoptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Getting Load Balancer Pool Member: %s\n%s", err, response) + } + d.Set(isLBPoolID, lbPoolID) + d.Set(isLBID, lbID) + d.Set(isLBPoolMemberPort, *lbPoolMem.Port) + + target := lbPoolMem.Target.(*vpcv1.LoadBalancerPoolMemberTarget) + if target.Address != nil { + d.Set(isLBPoolMemberTargetAddress, *target.Address) + } + if target.ID != nil { + d.Set(isLBPoolMemberTargetID, *target.ID) + } + d.Set(isLBPoolMemberWeight, *lbPoolMem.Weight) + d.Set(isLBPoolMemberProvisioningStatus, *lbPoolMem.ProvisioningStatus) + d.Set(isLBPoolMemberHealth, *lbPoolMem.Health) + d.Set(isLBPoolMemberHref, *lbPoolMem.Href) + getLoadBalancerOptions := &vpcv1.GetLoadBalancerOptions{ + ID: &lbID, + } + lb, response, err := sess.GetLoadBalancer(getLoadBalancerOptions) + if err != nil { + return fmt.Errorf("Error Getting Load Balancer : %s\n%s", err, response) + } + d.Set(RelatedCRN, *lb.CRN) + return nil +} + +func resourceIBMISLBPoolMemberUpdate(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + + parts, err := idParts(d.Id()) + if err != nil { + return err + } + + lbID := parts[0] + lbPoolID := parts[1] + lbPoolMemID := parts[2] + + if userDetails.generation == 1 { + err := classiclbpmemberUpdate(d, meta, lbID, lbPoolID, lbPoolMemID) + if err != nil { + return err + } + } else { + err := lbpmemberUpdate(d, meta, lbID, lbPoolID, lbPoolMemID) + if err != nil { + return err + } + } + + return resourceIBMISLBPoolMemberRead(d, meta) +} +func classiclbpmemberUpdate(d *schema.ResourceData, meta interface{}, lbID, lbPoolID, lbPoolMemID string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + + if d.HasChange(isLBPoolMemberTargetAddress) || d.HasChange(isLBPoolMemberPort) || d.HasChange(isLBPoolMemberWeight) { + port := int64(d.Get(isLBPoolMemberPort).(int)) + targetAddress := d.Get(isLBPoolMemberTargetAddress).(string) + weight := int64(d.Get(isLBPoolMemberWeight).(int)) + + isLBKey := "load_balancer_key_" + lbID + ibmMutexKV.Lock(isLBKey) + defer ibmMutexKV.Unlock(isLBKey) + + _, err = isWaitForClassicLBPoolActive(sess, lbID, lbPoolID, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return fmt.Errorf( + "Error checking for load balancer pool (%s) is active: %s", lbPoolID, err) + } + + _, err = isWaitForClassicLBPoolMemberAvailable(sess, lbID, lbPoolID, lbPoolMemID, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + + _, err = isWaitForClassicLBAvailable(sess, lbID, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return fmt.Errorf( + "Error checking for load balancer (%s) is active: %s", lbID, err) + } + + updatelbpmoptions := &vpcclassicv1.UpdateLoadBalancerPoolMemberOptions{ + LoadBalancerID: &lbID, + PoolID: &lbPoolID, + ID: &lbPoolMemID, + } + + loadBalancerPoolMemberPatchModel := &vpcclassicv1.LoadBalancerPoolMemberPatch{ + Port: &port, + Target: &vpcclassicv1.LoadBalancerPoolMemberTargetPrototype{ + Address: &targetAddress, + }, + Weight: &weight, + } + + loadBalancerPoolMemberPatch, err := loadBalancerPoolMemberPatchModel.AsPatch() + if err != nil { + return fmt.Errorf("Error calling asPatch for LoadBalancerPoolMemberPatch: %s", err) + } + updatelbpmoptions.LoadBalancerPoolMemberPatch = loadBalancerPoolMemberPatch + + _, response, err := sess.UpdateLoadBalancerPoolMember(updatelbpmoptions) + if err != nil { + return fmt.Errorf("Error Updating Load Balancer Pool Member: %s\n%s", err, response) + } + _, err = isWaitForClassicLBPoolMemberAvailable(sess, lbID, lbPoolID, lbPoolMemID, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + + _, err = isWaitForClassicLBPoolActive(sess, lbID, lbPoolID, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return fmt.Errorf( + "Error checking for load balancer pool (%s) is active: %s", lbPoolID, err) + } + + _, err = isWaitForClassicLBAvailable(sess, lbID, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return fmt.Errorf( + "Error checking for load balancer (%s) is active: %s", lbID, err) + } + } + return nil +} + +func lbpmemberUpdate(d *schema.ResourceData, meta interface{}, lbID, lbPoolID, lbPoolMemID string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + + if d.HasChange(isLBPoolMemberTargetID) || d.HasChange(isLBPoolMemberTargetAddress) || d.HasChange(isLBPoolMemberPort) || d.HasChange(isLBPoolMemberWeight) { + + port := int64(d.Get(isLBPoolMemberPort).(int)) + weight := int64(d.Get(isLBPoolMemberWeight).(int)) + + isLBKey := "load_balancer_key_" + lbID + ibmMutexKV.Lock(isLBKey) + defer ibmMutexKV.Unlock(isLBKey) + + _, err = isWaitForLBPoolActive(sess, lbID, lbPoolID, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return fmt.Errorf( + "Error checking for load balancer pool (%s) is active: %s", lbPoolID, err) + } + + _, err = isWaitForLBPoolMemberAvailable(sess, lbID, lbPoolID, lbPoolMemID, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return err + } + + _, err = isWaitForLBAvailable(sess, lbID, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return fmt.Errorf( + "Error checking for load balancer (%s) is active: %s", lbID, err) + } + + updatelbpmoptions := &vpcv1.UpdateLoadBalancerPoolMemberOptions{ + LoadBalancerID: &lbID, + PoolID: &lbPoolID, + ID: &lbPoolMemID, + } + + loadBalancerPoolMemberPatchModel := &vpcv1.LoadBalancerPoolMemberPatch{ + Port: &port, + Weight: &weight, + } + + if _, ok := d.GetOk(isLBPoolMemberTargetAddress); ok { + targetAddress := d.Get(isLBPoolMemberTargetAddress).(string) + target := &vpcv1.LoadBalancerPoolMemberTargetPrototype{ + Address: &targetAddress, + } + loadBalancerPoolMemberPatchModel.Target = target + } else { + targetID := d.Get(isLBPoolMemberTargetID).(string) + target := &vpcv1.LoadBalancerPoolMemberTargetPrototype{ + ID: &targetID, + } + loadBalancerPoolMemberPatchModel.Target = target + } + + loadBalancerPoolMemberPatch, err := loadBalancerPoolMemberPatchModel.AsPatch() + if err != nil { + return fmt.Errorf("Error calling asPatch for LoadBalancerPoolMemberPatch: %s", err) + } + updatelbpmoptions.LoadBalancerPoolMemberPatch = loadBalancerPoolMemberPatch + + _, response, err := sess.UpdateLoadBalancerPoolMember(updatelbpmoptions) + if err != nil { + return fmt.Errorf("Error Updating Load Balancer Pool Member: %s\n%s", err, response) + } + _, err = isWaitForLBPoolMemberAvailable(sess, lbID, lbPoolID, lbPoolMemID, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return err + } + + _, err = isWaitForLBPoolActive(sess, lbID, lbPoolID, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return fmt.Errorf( + "Error checking for load balancer pool (%s) is active: %s", lbPoolID, err) + } + + _, err = isWaitForLBAvailable(sess, lbID, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return fmt.Errorf( + "Error checking for load balancer (%s) is active: %s", lbID, err) + } + } + return nil +} + +func resourceIBMISLBPoolMemberDelete(d *schema.ResourceData, meta interface{}) error { + + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + + parts, err := idParts(d.Id()) + if err != nil { + return err + } + + lbID := parts[0] + lbPoolID := parts[1] + lbPoolMemID := parts[2] + + isLBKey := "load_balancer_key_" + lbID + ibmMutexKV.Lock(isLBKey) + defer ibmMutexKV.Unlock(isLBKey) + + if userDetails.generation == 1 { + err := classiclbpmemberDelete(d, meta, lbID, lbPoolID, lbPoolMemID) + if err != nil { + return err + } + } else { + err := lbpmemberDelete(d, meta, lbID, lbPoolID, lbPoolMemID) + if err != nil { + return err + } + } + + return nil +} + +func classiclbpmemberDelete(d *schema.ResourceData, meta interface{}, lbID, lbPoolID, lbPoolMemID string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + getlbpmoptions := &vpcclassicv1.GetLoadBalancerPoolMemberOptions{ + LoadBalancerID: &lbID, + PoolID: &lbPoolID, + ID: &lbPoolMemID, + } + _, response, err := sess.GetLoadBalancerPoolMember(getlbpmoptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Getting Load Balancer Pool Member: %s\n%s", err, response) + } + _, err = isWaitForClassicLBPoolMemberAvailable(sess, lbID, lbPoolID, lbPoolMemID, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return err + } + + _, err = isWaitForClassicLBPoolActive(sess, lbID, lbPoolID, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return fmt.Errorf( + "Error checking for load balancer pool (%s) is active: %s", lbPoolID, err) + } + + _, err = isWaitForClassicLBAvailable(sess, lbID, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return fmt.Errorf( + "Error checking for load balancer (%s) is active: %s", lbID, err) + } + + dellbpmoptions := &vpcclassicv1.DeleteLoadBalancerPoolMemberOptions{ + LoadBalancerID: &lbID, + PoolID: &lbPoolID, + ID: &lbPoolMemID, + } + response, err = sess.DeleteLoadBalancerPoolMember(dellbpmoptions) + if err != nil { + return fmt.Errorf("Error Deleting Load Balancer Pool Member: %s\n%s", err, response) + } + + _, err = isWaitForClassicLBPoolMemberDeleted(sess, lbID, lbPoolID, lbPoolMemID, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return err + } + + _, err = isWaitForClassicLBPoolActive(sess, lbID, lbPoolID, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return fmt.Errorf( + "Error checking for load balancer pool (%s) is active: %s", lbPoolID, err) + } + + _, err = isWaitForClassicLBAvailable(sess, lbID, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return fmt.Errorf( + "Error checking for load balancer (%s) is active: %s", lbID, err) + } + + d.SetId("") + return nil +} + +func lbpmemberDelete(d *schema.ResourceData, meta interface{}, lbID, lbPoolID, lbPoolMemID string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + + getlbpmoptions := &vpcv1.GetLoadBalancerPoolMemberOptions{ + LoadBalancerID: &lbID, + PoolID: &lbPoolID, + ID: &lbPoolMemID, + } + _, response, err := sess.GetLoadBalancerPoolMember(getlbpmoptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Getting Load Balancer Pool Member: %s\n%s", err, response) + } + _, err = isWaitForLBPoolMemberAvailable(sess, lbID, lbPoolID, lbPoolMemID, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return err + } + + _, err = isWaitForLBPoolActive(sess, lbID, lbPoolID, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return fmt.Errorf( + "Error checking for load balancer pool (%s) is active: %s", lbPoolID, err) + } + + _, err = isWaitForLBAvailable(sess, lbID, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return fmt.Errorf( + "Error checking for load balancer (%s) is active: %s", lbID, err) + } + + dellbpmoptions := &vpcv1.DeleteLoadBalancerPoolMemberOptions{ + LoadBalancerID: &lbID, + PoolID: &lbPoolID, + ID: &lbPoolMemID, + } + response, err = sess.DeleteLoadBalancerPoolMember(dellbpmoptions) + if err != nil { + return fmt.Errorf("Error Deleting Load Balancer Pool Member: %s\n%s", err, response) + } + + _, err = isWaitForLBPoolMemberDeleted(sess, lbID, lbPoolID, lbPoolMemID, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return err + } + + _, err = isWaitForLBPoolActive(sess, lbID, lbPoolID, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return fmt.Errorf( + "Error checking for load balancer pool (%s) is active: %s", lbPoolID, err) + } + + _, err = isWaitForLBAvailable(sess, lbID, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return fmt.Errorf( + "Error checking for load balancer (%s) is active: %s", lbID, err) + } + + d.SetId("") + return nil +} +func isWaitForClassicLBPoolMemberDeleted(lbc *vpcclassicv1.VpcClassicV1, lbID, lbPoolID, lbPoolMemID string, timeout time.Duration) (interface{}, error) { + log.Printf("Waiting for (%s) to be deleted.", lbPoolMemID) + + stateConf := &resource.StateChangeConf{ + Pending: []string{isLBPoolMemberDeletePending}, + Target: []string{isLBPoolMemberDeleted, ""}, + Refresh: isDeleteClassicLBPoolMemberRefreshFunc(lbc, lbID, lbPoolID, lbPoolMemID), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} +func isWaitForLBPoolMemberDeleted(lbc *vpcv1.VpcV1, lbID, lbPoolID, lbPoolMemID string, timeout time.Duration) (interface{}, error) { + log.Printf("Waiting for (%s) to be deleted.", lbPoolMemID) + + stateConf := &resource.StateChangeConf{ + Pending: []string{isLBPoolMemberDeletePending}, + Target: []string{isLBPoolMemberDeleted, ""}, + Refresh: isDeleteLBPoolMemberRefreshFunc(lbc, lbID, lbPoolID, lbPoolMemID), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isDeleteClassicLBPoolMemberRefreshFunc(lbc *vpcclassicv1.VpcClassicV1, lbID, lbPoolID, lbPoolMemID string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + + getlbpmoptions := &vpcclassicv1.GetLoadBalancerPoolMemberOptions{ + LoadBalancerID: &lbID, + PoolID: &lbPoolID, + ID: &lbPoolMemID, + } + lbPoolMem, response, err := lbc.GetLoadBalancerPoolMember(getlbpmoptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return lbPoolMem, isLBPoolMemberDeleted, nil + } + return nil, "", fmt.Errorf("Error Deleting Load balancer pool member: %s\n%s", err, response) + } + return lbPoolMem, isLBPoolMemberDeletePending, nil + } +} + +func isDeleteLBPoolMemberRefreshFunc(lbc *vpcv1.VpcV1, lbID, lbPoolID, lbPoolMemID string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + + getlbpmoptions := &vpcv1.GetLoadBalancerPoolMemberOptions{ + LoadBalancerID: &lbID, + PoolID: &lbPoolID, + ID: &lbPoolMemID, + } + lbPoolMem, response, err := lbc.GetLoadBalancerPoolMember(getlbpmoptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return lbPoolMem, isLBPoolMemberDeleted, nil + } + return nil, "", fmt.Errorf("Error Deleting Load balancer pool member: %s\n%s", err, response) + } + return lbPoolMem, isLBPoolMemberDeletePending, nil + } +} + +func resourceIBMISLBPoolMemberExists(d *schema.ResourceData, meta interface{}) (bool, error) { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return false, err + } + parts, err := idParts(d.Id()) + if err != nil { + return false, err + } + if len(parts) != 3 { + return false, fmt.Errorf( + "The id should contain loadbalancer Id, loadbalancer pool Id and loadbalancer poolmemebr Id") + } + + lbID := parts[0] + lbPoolID := parts[1] + lbPoolMemID := parts[2] + + if userDetails.generation == 1 { + exists, err := classiclbpmemberExists(d, meta, lbID, lbPoolID, lbPoolMemID) + return exists, err + } else { + exists, err := lbpmemberExists(d, meta, lbID, lbPoolID, lbPoolMemID) + return exists, err + } +} + +func classiclbpmemberExists(d *schema.ResourceData, meta interface{}, lbID, lbPoolID, lbPoolMemID string) (bool, error) { + sess, err := classicVpcClient(meta) + if err != nil { + return false, err + } + + getlbpmoptions := &vpcclassicv1.GetLoadBalancerPoolMemberOptions{ + LoadBalancerID: &lbID, + PoolID: &lbPoolID, + ID: &lbPoolMemID, + } + _, response, err := sess.GetLoadBalancerPoolMember(getlbpmoptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error getting Load balancer pool member: %s\n%s", err, response) + } + return true, nil +} + +func lbpmemberExists(d *schema.ResourceData, meta interface{}, lbID, lbPoolID, lbPoolMemID string) (bool, error) { + sess, err := vpcClient(meta) + if err != nil { + return false, err + } + + getlbpmoptions := &vpcv1.GetLoadBalancerPoolMemberOptions{ + LoadBalancerID: &lbID, + PoolID: &lbPoolID, + ID: &lbPoolMemID, + } + _, response, err := sess.GetLoadBalancerPoolMember(getlbpmoptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error getting Load balancer pool member: %s\n%s", err, response) + } + return true, nil +} + +func getPoolId(id string) (string, error) { + if strings.Contains(id, "/") { + parts, err := idParts(id) + if err != nil { + return "", err + } + + return parts[1], nil + } else { + return id, nil + } +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_networkacls.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_networkacls.go new file mode 100644 index 00000000000..58a6f92f159 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_networkacls.go @@ -0,0 +1,1377 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "container/list" + "fmt" + "log" + "os" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/customdiff" + + "github.com/IBM/vpc-go-sdk/vpcclassicv1" + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + isNetworkACLName = "name" + isNetworkACLRules = "rules" + isNetworkACLSubnets = "subnets" + isNetworkACLRuleID = "id" + isNetworkACLRuleName = "name" + isNetworkACLRuleAction = "action" + isNetworkACLRuleIPVersion = "ip_version" + isNetworkACLRuleSource = "source" + isNetworkACLRuleDestination = "destination" + isNetworkACLRuleDirection = "direction" + isNetworkACLRuleProtocol = "protocol" + isNetworkACLRuleICMP = "icmp" + isNetworkACLRuleICMPCode = "code" + isNetworkACLRuleICMPType = "type" + isNetworkACLRuleTCP = "tcp" + isNetworkACLRuleUDP = "udp" + isNetworkACLRulePortMax = "port_max" + isNetworkACLRulePortMin = "port_min" + isNetworkACLRuleSourcePortMax = "source_port_max" + isNetworkACLRuleSourcePortMin = "source_port_min" + isNetworkACLVPC = "vpc" + isNetworkACLResourceGroup = "resource_group" + isNetworkACLTags = "tags" + isNetworkACLCRN = "crn" +) + +func resourceIBMISNetworkACL() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMISNetworkACLCreate, + Read: resourceIBMISNetworkACLRead, + Update: resourceIBMISNetworkACLUpdate, + Delete: resourceIBMISNetworkACLDelete, + Exists: resourceIBMISNetworkACLExists, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + CustomizeDiff: customdiff.Sequence( + func(diff *schema.ResourceDiff, v interface{}) error { + return resourceTagsCustomizeDiff(diff) + }, + ), + + Schema: map[string]*schema.Schema{ + isNetworkACLName: { + Type: schema.TypeString, + Required: true, + ForceNew: false, + ValidateFunc: InvokeValidator("ibm_is_network_acl", isNetworkACLName), + Description: "Network ACL name", + }, + isNetworkACLVPC: { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Description: "Network ACL VPC name", + }, + isNetworkACLResourceGroup: { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Computed: true, + Description: "Resource group ID for the network ACL", + }, + isNetworkACLTags: { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString, ValidateFunc: InvokeValidator("ibm_is_network_acl", "tag")}, + Set: resourceIBMVPCHash, + Description: "List of tags", + }, + + isNetworkACLCRN: { + Type: schema.TypeString, + Computed: true, + Description: "The crn of the resource", + }, + ResourceControllerURL: { + Type: schema.TypeString, + Computed: true, + Description: "The URL of the IBM Cloud dashboard that can be used to explore and view details about this instance", + }, + ResourceName: { + Type: schema.TypeString, + Computed: true, + Description: "The name of the resource", + }, + + ResourceCRN: { + Type: schema.TypeString, + Computed: true, + Description: "The crn of the resource", + }, + + ResourceGroupName: { + Type: schema.TypeString, + Computed: true, + Description: "The resource group name in which resource is provisioned", + }, + isNetworkACLRules: { + Type: schema.TypeList, + Optional: true, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + isNetworkACLRuleID: { + Type: schema.TypeString, + Computed: true, + }, + isNetworkACLRuleName: { + Type: schema.TypeString, + Required: true, + ForceNew: false, + ValidateFunc: InvokeValidator("ibm_is_network_acl", isNetworkACLRuleName), + }, + isNetworkACLRuleAction: { + Type: schema.TypeString, + Required: true, + ForceNew: false, + ValidateFunc: InvokeValidator("ibm_is_network_acl", isNetworkACLRuleAction), + }, + isNetworkACLRuleIPVersion: { + Type: schema.TypeString, + Computed: true, + }, + isNetworkACLRuleSource: { + Type: schema.TypeString, + Required: true, + ForceNew: false, + ValidateFunc: InvokeValidator("ibm_is_network_acl", isNetworkACLRuleSource), + }, + isNetworkACLRuleDestination: { + Type: schema.TypeString, + Required: true, + ForceNew: false, + ValidateFunc: InvokeValidator("ibm_is_network_acl", isNetworkACLRuleDestination), + }, + isNetworkACLRuleDirection: { + Type: schema.TypeString, + Required: true, + ForceNew: false, + Description: "Direction of traffic to enforce, either inbound or outbound", + ValidateFunc: InvokeValidator("ibm_is_network_acl", isNetworkACLRuleDirection), + }, + isNetworkACLSubnets: { + Type: schema.TypeInt, + Computed: true, + }, + isNetworkACLRuleICMP: { + Type: schema.TypeList, + MinItems: 0, + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + isNetworkACLRuleICMPCode: { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: InvokeValidator("ibm_is_network_acl", isNetworkACLRuleICMPCode), + }, + isNetworkACLRuleICMPType: { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: InvokeValidator("ibm_is_network_acl", isNetworkACLRuleICMPType), + }, + }, + }, + }, + + isNetworkACLRuleTCP: { + Type: schema.TypeList, + MinItems: 0, + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + isNetworkACLRulePortMax: { + Type: schema.TypeInt, + Optional: true, + Default: 65535, + ValidateFunc: InvokeValidator("ibm_is_network_acl", isNetworkACLRulePortMax), + }, + isNetworkACLRulePortMin: { + Type: schema.TypeInt, + Optional: true, + Default: 1, + ValidateFunc: InvokeValidator("ibm_is_network_acl", isNetworkACLRulePortMin), + }, + isNetworkACLRuleSourcePortMax: { + Type: schema.TypeInt, + Optional: true, + Default: 65535, + ValidateFunc: InvokeValidator("ibm_is_network_acl", isNetworkACLRuleSourcePortMax), + }, + isNetworkACLRuleSourcePortMin: { + Type: schema.TypeInt, + Optional: true, + Default: 1, + ValidateFunc: InvokeValidator("ibm_is_network_acl", isNetworkACLRuleSourcePortMin), + }, + }, + }, + }, + + isNetworkACLRuleUDP: { + Type: schema.TypeList, + MinItems: 0, + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + isNetworkACLRulePortMax: { + Type: schema.TypeInt, + Optional: true, + Default: 65535, + ValidateFunc: InvokeValidator("ibm_is_network_acl", isNetworkACLRulePortMax), + }, + isNetworkACLRulePortMin: { + Type: schema.TypeInt, + Optional: true, + Default: 1, + ValidateFunc: InvokeValidator("ibm_is_network_acl", isNetworkACLRulePortMin), + }, + isNetworkACLRuleSourcePortMax: { + Type: schema.TypeInt, + Optional: true, + Default: 65535, + ValidateFunc: InvokeValidator("ibm_is_network_acl", isNetworkACLRuleSourcePortMax), + }, + isNetworkACLRuleSourcePortMin: { + Type: schema.TypeInt, + Optional: true, + Default: 1, + ValidateFunc: InvokeValidator("ibm_is_network_acl", isNetworkACLRuleSourcePortMin), + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func resourceIBMISNetworkACLValidator() *ResourceValidator { + + validateSchema := make([]ValidateSchema, 1) + direction := "inbound, outbound" + action := "allow, deny" + + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isNetworkACLRuleAction, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: action}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isNetworkACLRuleDirection, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: direction}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isNetworkACLName, + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Required: true, + Regexp: `^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$`, + MinValueLength: 1, + MaxValueLength: 63}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isNetworkACLRuleName, + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Required: true, + Regexp: `^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$`, + MinValueLength: 1, + MaxValueLength: 63}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isNetworkACLRuleDestination, + ValidateFunctionIdentifier: ValidateIPorCIDR, + Type: TypeString, + Required: true}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isNetworkACLRuleSource, + ValidateFunctionIdentifier: ValidateIPorCIDR, + Type: TypeString, + Required: true}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isNetworkACLRuleICMPType, + ValidateFunctionIdentifier: IntBetween, + Type: TypeInt, + MinValue: "0", + MaxValue: "254"}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isNetworkACLRuleICMPCode, + ValidateFunctionIdentifier: IntBetween, + Type: TypeInt, + MinValue: "0", + MaxValue: "255"}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isNetworkACLRulePortMin, + ValidateFunctionIdentifier: IntBetween, + Type: TypeInt, + MinValue: "1", + MaxValue: "65535"}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isNetworkACLRulePortMax, + ValidateFunctionIdentifier: IntBetween, + Type: TypeInt, + MinValue: "1", + MaxValue: "65535"}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isNetworkACLRuleSourcePortMin, + ValidateFunctionIdentifier: IntBetween, + Type: TypeInt, + MinValue: "1", + MaxValue: "65535"}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isNetworkACLRuleSourcePortMax, + ValidateFunctionIdentifier: IntBetween, + Type: TypeInt, + MinValue: "1", + MaxValue: "65535"}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: "tag", + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Optional: true, + Regexp: `^[A-Za-z0-9:_ .-]+$`, + MinValueLength: 1, + MaxValueLength: 128}) + + ibmISNetworkACLResourceValidator := ResourceValidator{ResourceName: "ibm_is_network_acl", Schema: validateSchema} + return &ibmISNetworkACLResourceValidator +} + +func resourceIBMISNetworkACLCreate(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + name := d.Get(isNetworkACLName).(string) + + if userDetails.generation == 1 { + err := classicNwaclCreate(d, meta, name) + if err != nil { + return err + } + } else { + err := nwaclCreate(d, meta, name) + if err != nil { + return err + } + } + return resourceIBMISNetworkACLRead(d, meta) + +} + +func classicNwaclCreate(d *schema.ResourceData, meta interface{}, name string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + + nwaclTemplate := &vpcclassicv1.NetworkACLPrototype{ + Name: &name, + } + + var rules []interface{} + if rls, ok := d.GetOk(isNetworkACLRules); ok { + rules = rls.([]interface{}) + } + err = validateInlineRules(rules) + if err != nil { + return err + } + + options := &vpcclassicv1.CreateNetworkACLOptions{ + NetworkACLPrototype: nwaclTemplate, + } + + nwacl, response, err := sess.CreateNetworkACL(options) + if err != nil { + return fmt.Errorf("[DEBUG]Error while creating Network ACL err %s\n%s", err, response) + } + d.SetId(*nwacl.ID) + log.Printf("[INFO] Network ACL : %s", *nwacl.ID) + nwaclid := *nwacl.ID + + //Remove default rules + err = classicClearRules(sess, nwaclid) + if err != nil { + return err + } + + err = classicCreateInlineRules(sess, nwaclid, rules) + if err != nil { + return err + } + return nil +} + +func nwaclCreate(d *schema.ResourceData, meta interface{}, name string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + var vpc, rg string + if vpcID, ok := d.GetOk(isNetworkACLVPC); ok { + vpc = vpcID.(string) + } else { + return fmt.Errorf("Required parameter vpc is not set") + } + + nwaclTemplate := &vpcv1.NetworkACLPrototype{ + Name: &name, + VPC: &vpcv1.VPCIdentity{ + ID: &vpc, + }, + } + + if grp, ok := d.GetOk(isNetworkACLResourceGroup); ok { + rg = grp.(string) + nwaclTemplate.ResourceGroup = &vpcv1.ResourceGroupIdentity{ + ID: &rg, + } + } + // validate each rule before attempting to create the ACL + var rules []interface{} + if rls, ok := d.GetOk(isNetworkACLRules); ok { + rules = rls.([]interface{}) + } + err = validateInlineRules(rules) + if err != nil { + return err + } + + options := &vpcv1.CreateNetworkACLOptions{ + NetworkACLPrototype: nwaclTemplate, + } + + nwacl, response, err := sess.CreateNetworkACL(options) + if err != nil { + return fmt.Errorf("[DEBUG]Error while creating Network ACL err %s\n%s", err, response) + } + d.SetId(*nwacl.ID) + log.Printf("[INFO] Network ACL : %s", *nwacl.ID) + nwaclid := *nwacl.ID + + //Remove default rules + err = clearRules(sess, nwaclid) + if err != nil { + return err + } + + err = createInlineRules(sess, nwaclid, rules) + if err != nil { + return err + } + v := os.Getenv("IC_ENV_TAGS") + if _, ok := d.GetOk(isNetworkACLTags); ok || v != "" { + oldList, newList := d.GetChange(isNetworkACLTags) + err = UpdateTagsUsingCRN(oldList, newList, meta, *nwacl.CRN) + if err != nil { + log.Printf( + "Error on create of resource network acl (%s) tags: %s", d.Id(), err) + } + } + return nil +} + +func resourceIBMISNetworkACLRead(d *schema.ResourceData, meta interface{}) error { + + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + id := d.Id() + if userDetails.generation == 1 { + err := classicNwaclGet(d, meta, id) + if err != nil { + return err + } + } else { + err := nwaclGet(d, meta, id) + if err != nil { + return err + } + } + return nil +} + +func classicNwaclGet(d *schema.ResourceData, meta interface{}, id string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + getNetworkAclOptions := &vpcclassicv1.GetNetworkACLOptions{ + ID: &id, + } + nwacl, response, err := sess.GetNetworkACL(getNetworkAclOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error getting Network ACL(%s) : %s\n%s", id, err, response) + } + d.Set(isNetworkACLName, *nwacl.Name) + d.Set(isNetworkACLSubnets, len(nwacl.Subnets)) + + rules := make([]interface{}, 0) + if len(nwacl.Rules) > 0 { + for _, rulex := range nwacl.Rules { + log.Println("[DEBUG] Type of the Rule", reflect.TypeOf(rulex)) + rule := make(map[string]interface{}) + switch reflect.TypeOf(rulex).String() { + case "*vpcclassicv1.NetworkACLRuleItemNetworkACLRuleProtocolIcmp": + { + rulex := rulex.(*vpcclassicv1.NetworkACLRuleItemNetworkACLRuleProtocolIcmp) + rule[isNetworkACLRuleID] = *rulex.ID + rule[isNetworkACLRuleName] = *rulex.Name + rule[isNetworkACLRuleAction] = *rulex.Action + rule[isNetworkACLRuleIPVersion] = *rulex.IPVersion + rule[isNetworkACLRuleSource] = *rulex.Source + rule[isNetworkACLRuleDestination] = *rulex.Destination + rule[isNetworkACLRuleDirection] = *rulex.Direction + rule[isNetworkACLRuleTCP] = make([]map[string]int, 0, 0) + rule[isNetworkACLRuleUDP] = make([]map[string]int, 0, 0) + icmp := make([]map[string]int, 1, 1) + if rulex.Code != nil && rulex.Type != nil { + icmp[0] = map[string]int{ + isNetworkACLRuleICMPCode: int(*rulex.Code), + isNetworkACLRuleICMPType: int(*rulex.Type), + } + } + rule[isNetworkACLRuleICMP] = icmp + } + case "*vpcclassicv1.NetworkACLRuleItemNetworkACLRuleProtocolTcpudp": + { + rulex := rulex.(*vpcclassicv1.NetworkACLRuleItemNetworkACLRuleProtocolTcpudp) + rule[isNetworkACLRuleID] = *rulex.ID + rule[isNetworkACLRuleName] = *rulex.Name + rule[isNetworkACLRuleAction] = *rulex.Action + rule[isNetworkACLRuleIPVersion] = *rulex.IPVersion + rule[isNetworkACLRuleSource] = *rulex.Source + rule[isNetworkACLRuleDestination] = *rulex.Destination + rule[isNetworkACLRuleDirection] = *rulex.Direction + if *rulex.Protocol == "tcp" { + rule[isNetworkACLRuleICMP] = make([]map[string]int, 0, 0) + rule[isNetworkACLRuleUDP] = make([]map[string]int, 0, 0) + tcp := make([]map[string]int, 1, 1) + tcp[0] = map[string]int{ + isNetworkACLRuleSourcePortMax: checkNetworkACLNil(rulex.SourcePortMax), + isNetworkACLRuleSourcePortMin: checkNetworkACLNil(rulex.SourcePortMin), + } + tcp[0][isNetworkACLRulePortMax] = checkNetworkACLNil(rulex.PortMax) + tcp[0][isNetworkACLRulePortMin] = checkNetworkACLNil(rulex.PortMin) + rule[isNetworkACLRuleTCP] = tcp + } else if *rulex.Protocol == "udp" { + rule[isNetworkACLRuleICMP] = make([]map[string]int, 0, 0) + rule[isNetworkACLRuleTCP] = make([]map[string]int, 0, 0) + udp := make([]map[string]int, 1, 1) + udp[0] = map[string]int{ + isNetworkACLRuleSourcePortMax: checkNetworkACLNil(rulex.SourcePortMax), + isNetworkACLRuleSourcePortMin: checkNetworkACLNil(rulex.SourcePortMin), + } + udp[0][isNetworkACLRulePortMax] = checkNetworkACLNil(rulex.PortMax) + udp[0][isNetworkACLRulePortMin] = checkNetworkACLNil(rulex.PortMin) + rule[isNetworkACLRuleUDP] = udp + } + } + case "*vpcclassicv1.NetworkACLRuleItemNetworkACLRuleProtocolAll": + { + rulex := rulex.(*vpcclassicv1.NetworkACLRuleItemNetworkACLRuleProtocolAll) + rule[isNetworkACLRuleID] = *rulex.ID + rule[isNetworkACLRuleName] = *rulex.Name + rule[isNetworkACLRuleAction] = *rulex.Action + rule[isNetworkACLRuleIPVersion] = *rulex.IPVersion + rule[isNetworkACLRuleSource] = *rulex.Source + rule[isNetworkACLRuleDestination] = *rulex.Destination + rule[isNetworkACLRuleDirection] = *rulex.Direction + rule[isNetworkACLRuleICMP] = make([]map[string]int, 0, 0) + rule[isNetworkACLRuleTCP] = make([]map[string]int, 0, 0) + rule[isNetworkACLRuleUDP] = make([]map[string]int, 0, 0) + } + } + rules = append(rules, rule) + } + } + d.Set(isNetworkACLRules, rules) + controller, err := getBaseController(meta) + if err != nil { + return err + } + d.Set(ResourceControllerURL, controller+"/vpc/network/acl") + d.Set(ResourceName, *nwacl.Name) + // d.Set(ResourceCRN, *nwacl.Crn) + return nil +} + +func nwaclGet(d *schema.ResourceData, meta interface{}, id string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + getNetworkAclOptions := &vpcv1.GetNetworkACLOptions{ + ID: &id, + } + nwacl, response, err := sess.GetNetworkACL(getNetworkAclOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error getting Network ACL(%s) : %s\n%s", id, err, response) + } + d.Set(isNetworkACLName, *nwacl.Name) + d.Set(isNetworkACLVPC, *nwacl.VPC.ID) + if nwacl.ResourceGroup != nil { + d.Set(isNetworkACLResourceGroup, *nwacl.ResourceGroup.ID) + d.Set(ResourceGroupName, *nwacl.ResourceGroup.Name) + } + tags, err := GetTagsUsingCRN(meta, *nwacl.CRN) + if err != nil { + log.Printf( + "Error on get of resource network acl (%s) tags: %s", d.Id(), err) + } + d.Set(isNetworkACLTags, tags) + d.Set(isNetworkACLCRN, *nwacl.CRN) + rules := make([]interface{}, 0) + if len(nwacl.Rules) > 0 { + for _, rulex := range nwacl.Rules { + log.Println("[DEBUG] Type of the Rule", reflect.TypeOf(rulex)) + rule := make(map[string]interface{}) + rule[isNetworkACLSubnets] = len(nwacl.Subnets) + switch reflect.TypeOf(rulex).String() { + case "*vpcv1.NetworkACLRuleItemNetworkACLRuleProtocolIcmp": + { + rulex := rulex.(*vpcv1.NetworkACLRuleItemNetworkACLRuleProtocolIcmp) + rule[isNetworkACLRuleID] = *rulex.ID + rule[isNetworkACLRuleName] = *rulex.Name + rule[isNetworkACLRuleAction] = *rulex.Action + rule[isNetworkACLRuleIPVersion] = *rulex.IPVersion + rule[isNetworkACLRuleSource] = *rulex.Source + rule[isNetworkACLRuleDestination] = *rulex.Destination + rule[isNetworkACLRuleDirection] = *rulex.Direction + rule[isNetworkACLRuleTCP] = make([]map[string]int, 0, 0) + rule[isNetworkACLRuleUDP] = make([]map[string]int, 0, 0) + icmp := make([]map[string]int, 1, 1) + if rulex.Code != nil && rulex.Type != nil { + icmp[0] = map[string]int{ + isNetworkACLRuleICMPCode: int(*rulex.Code), + isNetworkACLRuleICMPType: int(*rulex.Type), + } + } + rule[isNetworkACLRuleICMP] = icmp + } + case "*vpcv1.NetworkACLRuleItemNetworkACLRuleProtocolTcpudp": + { + rulex := rulex.(*vpcv1.NetworkACLRuleItemNetworkACLRuleProtocolTcpudp) + rule[isNetworkACLRuleID] = *rulex.ID + rule[isNetworkACLRuleName] = *rulex.Name + rule[isNetworkACLRuleAction] = *rulex.Action + rule[isNetworkACLRuleIPVersion] = *rulex.IPVersion + rule[isNetworkACLRuleSource] = *rulex.Source + rule[isNetworkACLRuleDestination] = *rulex.Destination + rule[isNetworkACLRuleDirection] = *rulex.Direction + if *rulex.Protocol == "tcp" { + rule[isNetworkACLRuleICMP] = make([]map[string]int, 0, 0) + rule[isNetworkACLRuleUDP] = make([]map[string]int, 0, 0) + tcp := make([]map[string]int, 1, 1) + tcp[0] = map[string]int{ + isNetworkACLRuleSourcePortMax: checkNetworkACLNil(rulex.SourcePortMax), + isNetworkACLRuleSourcePortMin: checkNetworkACLNil(rulex.SourcePortMin), + } + tcp[0][isNetworkACLRulePortMax] = checkNetworkACLNil(rulex.DestinationPortMax) + tcp[0][isNetworkACLRulePortMin] = checkNetworkACLNil(rulex.DestinationPortMin) + rule[isNetworkACLRuleTCP] = tcp + } else if *rulex.Protocol == "udp" { + rule[isNetworkACLRuleICMP] = make([]map[string]int, 0, 0) + rule[isNetworkACLRuleTCP] = make([]map[string]int, 0, 0) + udp := make([]map[string]int, 1, 1) + udp[0] = map[string]int{ + isNetworkACLRuleSourcePortMax: checkNetworkACLNil(rulex.SourcePortMax), + isNetworkACLRuleSourcePortMin: checkNetworkACLNil(rulex.SourcePortMin), + } + udp[0][isNetworkACLRulePortMax] = checkNetworkACLNil(rulex.DestinationPortMax) + udp[0][isNetworkACLRulePortMin] = checkNetworkACLNil(rulex.DestinationPortMin) + rule[isNetworkACLRuleUDP] = udp + } + } + case "*vpcv1.NetworkACLRuleItemNetworkACLRuleProtocolAll": + { + rulex := rulex.(*vpcv1.NetworkACLRuleItemNetworkACLRuleProtocolAll) + rule[isNetworkACLRuleID] = *rulex.ID + rule[isNetworkACLRuleName] = *rulex.Name + rule[isNetworkACLRuleAction] = *rulex.Action + rule[isNetworkACLRuleIPVersion] = *rulex.IPVersion + rule[isNetworkACLRuleSource] = *rulex.Source + rule[isNetworkACLRuleDestination] = *rulex.Destination + rule[isNetworkACLRuleDirection] = *rulex.Direction + rule[isNetworkACLRuleICMP] = make([]map[string]int, 0, 0) + rule[isNetworkACLRuleTCP] = make([]map[string]int, 0, 0) + rule[isNetworkACLRuleUDP] = make([]map[string]int, 0, 0) + } + } + rules = append(rules, rule) + } + } + d.Set(isNetworkACLRules, rules) + controller, err := getBaseController(meta) + if err != nil { + return err + } + d.Set(ResourceControllerURL, controller+"/vpc-ext/network/acl") + d.Set(ResourceName, *nwacl.Name) + // d.Set(ResourceCRN, *nwacl.Crn) + return nil +} + +func resourceIBMISNetworkACLUpdate(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + id := d.Id() + + name := "" + hasChanged := false + + if d.HasChange(isNetworkACLName) { + name = d.Get(isNetworkACLName).(string) + hasChanged = true + } + + if userDetails.generation == 1 { + err := classicNwaclUpdate(d, meta, id, name, hasChanged) + if err != nil { + return err + } + } else { + err := nwaclUpdate(d, meta, id, name, hasChanged) + if err != nil { + return err + } + } + return resourceIBMISNetworkACLRead(d, meta) +} + +func classicNwaclUpdate(d *schema.ResourceData, meta interface{}, id, name string, hasChanged bool) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + rules := d.Get(isNetworkACLRules).([]interface{}) + if hasChanged { + updateNetworkAclOptions := &vpcclassicv1.UpdateNetworkACLOptions{ + ID: &id, + } + networkACLPatchModel := &vpcclassicv1.NetworkACLPatch{ + Name: &name, + } + networkACLPatch, err := networkACLPatchModel.AsPatch() + if err != nil { + return fmt.Errorf("Error calling asPatch for NetworkACLPatch: %s", err) + } + updateNetworkAclOptions.NetworkACLPatch = networkACLPatch + + _, response, err := sess.UpdateNetworkACL(updateNetworkAclOptions) + if err != nil { + return fmt.Errorf("Error Updating Network ACL(%s) : %s\n%s", id, err, response) + } + } + if d.HasChange(isNetworkACLRules) { + err := validateInlineRules(rules) + if err != nil { + return err + } + //Delete all existing rules + err = classicClearRules(sess, id) + if err != nil { + return err + } + //Create the rules as per the def + err = classicCreateInlineRules(sess, id, rules) + if err != nil { + return err + } + } + return nil +} + +func nwaclUpdate(d *schema.ResourceData, meta interface{}, id, name string, hasChanged bool) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + rules := d.Get(isNetworkACLRules).([]interface{}) + if hasChanged { + updateNetworkACLOptions := &vpcv1.UpdateNetworkACLOptions{ + ID: &id, + } + networkACLPatchModel := &vpcv1.NetworkACLPatch{ + Name: &name, + } + networkACLPatch, err := networkACLPatchModel.AsPatch() + if err != nil { + return fmt.Errorf("Error calling asPatch for NetworkACLPatch: %s", err) + } + updateNetworkACLOptions.NetworkACLPatch = networkACLPatch + _, response, err := sess.UpdateNetworkACL(updateNetworkACLOptions) + if err != nil { + return fmt.Errorf("Error Updating Network ACL(%s) : %s\n%s", id, err, response) + } + } + if d.HasChange(isNetworkACLTags) { + oldList, newList := d.GetChange(isNetworkACLTags) + err = UpdateTagsUsingCRN(oldList, newList, meta, d.Get(isNetworkACLCRN).(string)) + if err != nil { + log.Printf( + "Error on update of resource network acl (%s) tags: %s", d.Id(), err) + } + } + if d.HasChange(isNetworkACLRules) { + err := validateInlineRules(rules) + if err != nil { + return err + } + //Delete all existing rules + err = clearRules(sess, id) + if err != nil { + return err + } + //Create the rules as per the def + err = createInlineRules(sess, id, rules) + if err != nil { + return err + } + } + return nil +} + +func resourceIBMISNetworkACLDelete(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + id := d.Id() + if userDetails.generation == 1 { + err := classicNwaclDelete(d, meta, id) + if err != nil { + return err + } + } else { + err := nwaclDelete(d, meta, id) + if err != nil { + return err + } + } + + d.SetId("") + return nil +} + +func classicNwaclDelete(d *schema.ResourceData, meta interface{}, id string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + + getNetworkAclOptions := &vpcclassicv1.GetNetworkACLOptions{ + ID: &id, + } + _, response, err := sess.GetNetworkACL(getNetworkAclOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Getting Network ACL (%s): %s\n%s", id, err, response) + } + + deleteNetworkAclOptions := &vpcclassicv1.DeleteNetworkACLOptions{ + ID: &id, + } + response, err = sess.DeleteNetworkACL(deleteNetworkAclOptions) + if err != nil { + return fmt.Errorf("Error Deleting Network ACL : %s\n%s", err, response) + } + d.SetId("") + return nil +} + +func nwaclDelete(d *schema.ResourceData, meta interface{}, id string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + + getNetworkAclOptions := &vpcv1.GetNetworkACLOptions{ + ID: &id, + } + _, response, err := sess.GetNetworkACL(getNetworkAclOptions) + + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Getting Network ACL (%s): %s\n%s", id, err, response) + } + + deleteNetworkAclOptions := &vpcv1.DeleteNetworkACLOptions{ + ID: &id, + } + response, err = sess.DeleteNetworkACL(deleteNetworkAclOptions) + if err != nil { + return fmt.Errorf("Error Deleting Network ACL : %s\n%s", err, response) + } + d.SetId("") + return nil +} + +func resourceIBMISNetworkACLExists(d *schema.ResourceData, meta interface{}) (bool, error) { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return false, err + } + id := d.Id() + if userDetails.generation == 1 { + exists, err := classicNwaclExists(d, meta, id) + return exists, err + } else { + exists, err := nwaclExists(d, meta, id) + return exists, err + } +} + +func classicNwaclExists(d *schema.ResourceData, meta interface{}, id string) (bool, error) { + sess, err := classicVpcClient(meta) + if err != nil { + return false, err + } + getNetworkAclOptions := &vpcclassicv1.GetNetworkACLOptions{ + ID: &id, + } + _, response, err := sess.GetNetworkACL(getNetworkAclOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error getting Network ACL: %s\n%s", err, response) + } + return true, nil +} + +func nwaclExists(d *schema.ResourceData, meta interface{}, id string) (bool, error) { + sess, err := vpcClient(meta) + if err != nil { + return false, err + } + getNetworkAclOptions := &vpcv1.GetNetworkACLOptions{ + ID: &id, + } + _, response, err := sess.GetNetworkACL(getNetworkAclOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error getting Network ACL: %s\n%s", err, response) + } + return true, nil +} + +func sortclassicrules(rules []*vpcclassicv1.NetworkACLRuleItem) *list.List { + sortedrules := list.New() + for _, rule := range rules { + if rule.Before == nil { + sortedrules.PushBack(rule) + } else { + inserted := false + for e := sortedrules.Front(); e != nil; e = e.Next() { + rulex := e.Value.(*vpcclassicv1.NetworkACLRuleItem) + if rulex.ID == rule.Before.ID { + sortedrules.InsertAfter(rule, e) + inserted = true + break + } + } + // if we didnt find before yet, just put it at the head of the list + if !inserted { + sortedrules.PushFront(rule) + } + } + } + return sortedrules +} + +func checkNetworkACLNil(ptr *int64) int { + if ptr == nil { + return 0 + } + return int(*ptr) +} + +func classicClearRules(nwaclC *vpcclassicv1.VpcClassicV1, nwaclid string) error { + start := "" + allrecs := []vpcclassicv1.NetworkACLRuleItemIntf{} + for { + listNetworkAclRulesOptions := &vpcclassicv1.ListNetworkACLRulesOptions{ + NetworkACLID: &nwaclid, + } + if start != "" { + listNetworkAclRulesOptions.Start = &start + } + rawrules, response, err := nwaclC.ListNetworkACLRules(listNetworkAclRulesOptions) + if err != nil { + return fmt.Errorf("Error Listing network ACL rules : %s\n%s", err, response) + } + start = GetNext(rawrules.Next) + allrecs = append(allrecs, rawrules.Rules...) + if start == "" { + break + } + } + + for _, rule := range allrecs { + deleteNetworkAclRuleOptions := &vpcclassicv1.DeleteNetworkACLRuleOptions{ + NetworkACLID: &nwaclid, + } + switch reflect.TypeOf(rule).String() { + case "*vpcclassicv1.NetworkACLRuleItemNetworkACLRuleProtocolIcmp": + rule := rule.(*vpcclassicv1.NetworkACLRuleItemNetworkACLRuleProtocolIcmp) + deleteNetworkAclRuleOptions.ID = rule.ID + case "*vpcclassicv1.NetworkACLRuleItemNetworkACLRuleProtocolTcpudp": + rule := rule.(*vpcclassicv1.NetworkACLRuleItemNetworkACLRuleProtocolTcpudp) + deleteNetworkAclRuleOptions.ID = rule.ID + case "*vpcclassicv1.NetworkACLRuleItemNetworkACLRuleProtocolAll": + rule := rule.(*vpcclassicv1.NetworkACLRuleItemNetworkACLRuleProtocolAll) + deleteNetworkAclRuleOptions.ID = rule.ID + } + + response, err := nwaclC.DeleteNetworkACLRule(deleteNetworkAclRuleOptions) + if err != nil { + return fmt.Errorf("Error Deleting network ACL rule : %s\n%s", err, response) + } + } + return nil +} + +func clearRules(nwaclC *vpcv1.VpcV1, nwaclid string) error { + start := "" + allrecs := []vpcv1.NetworkACLRuleItemIntf{} + for { + listNetworkAclRulesOptions := &vpcv1.ListNetworkACLRulesOptions{ + NetworkACLID: &nwaclid, + } + if start != "" { + listNetworkAclRulesOptions.Start = &start + } + rawrules, response, err := nwaclC.ListNetworkACLRules(listNetworkAclRulesOptions) + if err != nil { + return fmt.Errorf("Error Listing network ACL rules : %s\n%s", err, response) + } + start = GetNext(rawrules.Next) + allrecs = append(allrecs, rawrules.Rules...) + if start == "" { + break + } + } + + for _, rule := range allrecs { + deleteNetworkAclRuleOptions := &vpcv1.DeleteNetworkACLRuleOptions{ + NetworkACLID: &nwaclid, + } + switch reflect.TypeOf(rule).String() { + case "*vpcv1.NetworkACLRuleItemNetworkACLRuleProtocolIcmp": + rule := rule.(*vpcv1.NetworkACLRuleItemNetworkACLRuleProtocolIcmp) + deleteNetworkAclRuleOptions.ID = rule.ID + case "*vpcv1.NetworkACLRuleItemNetworkACLRuleProtocolTcpudp": + rule := rule.(*vpcv1.NetworkACLRuleItemNetworkACLRuleProtocolTcpudp) + deleteNetworkAclRuleOptions.ID = rule.ID + case "*vpcv1.NetworkACLRuleItemNetworkACLRuleProtocolAll": + rule := rule.(*vpcv1.NetworkACLRuleItemNetworkACLRuleProtocolAll) + deleteNetworkAclRuleOptions.ID = rule.ID + } + + response, err := nwaclC.DeleteNetworkACLRule(deleteNetworkAclRuleOptions) + if err != nil { + return fmt.Errorf("Error Deleting network ACL rule : %s\n%s", err, response) + } + } + return nil +} + +func validateInlineRules(rules []interface{}) error { + for _, rule := range rules { + rulex := rule.(map[string]interface{}) + action := rulex[isNetworkACLRuleAction].(string) + if (action != "allow") && (action != "deny") { + return fmt.Errorf("Invalid action. valid values are allow|deny") + } + + direction := rulex[isNetworkACLRuleDirection].(string) + direction = strings.ToLower(direction) + + icmp := len(rulex[isNetworkACLRuleICMP].([]interface{})) > 0 + tcp := len(rulex[isNetworkACLRuleTCP].([]interface{})) > 0 + udp := len(rulex[isNetworkACLRuleUDP].([]interface{})) > 0 + + if (icmp && tcp) || (icmp && udp) || (tcp && udp) { + return fmt.Errorf("Only one of icmp|tcp|udp can be defined per rule") + } + + } + return nil +} + +func classicCreateInlineRules(nwaclC *vpcclassicv1.VpcClassicV1, nwaclid string, rules []interface{}) error { + before := "" + + for i := 0; i <= len(rules)-1; i++ { + rulex := rules[i].(map[string]interface{}) + + name := rulex[isNetworkACLRuleName].(string) + source := rulex[isNetworkACLRuleSource].(string) + destination := rulex[isNetworkACLRuleDestination].(string) + action := rulex[isNetworkACLRuleAction].(string) + direction := rulex[isNetworkACLRuleDirection].(string) + icmp := rulex[isNetworkACLRuleICMP].([]interface{}) + tcp := rulex[isNetworkACLRuleTCP].([]interface{}) + udp := rulex[isNetworkACLRuleUDP].([]interface{}) + icmptype := int64(-1) + icmpcode := int64(-1) + minport := int64(-1) + maxport := int64(-1) + sourceminport := int64(-1) + sourcemaxport := int64(-1) + protocol := "all" + + ruleTemplate := &vpcclassicv1.NetworkACLRulePrototype{ + Action: &action, + Destination: &destination, + Direction: &direction, + Source: &source, + Name: &name, + } + + if before != "" { + ruleTemplate.Before = &vpcclassicv1.NetworkACLRuleBeforePrototype{ + ID: &before, + } + } + + if len(icmp) > 0 { + protocol = "icmp" + ruleTemplate.Protocol = &protocol + if !isNil(icmp[0]) { + icmpval := icmp[0].(map[string]interface{}) + if val, ok := icmpval[isNetworkACLRuleICMPType]; ok { + icmptype = int64(val.(int)) + ruleTemplate.Type = &icmptype + } + if val, ok := icmpval[isNetworkACLRuleICMPCode]; ok { + icmpcode = int64(val.(int)) + ruleTemplate.Code = &icmpcode + } + } + } else if len(tcp) > 0 { + protocol = "tcp" + ruleTemplate.Protocol = &protocol + tcpval := tcp[0].(map[string]interface{}) + if val, ok := tcpval[isNetworkACLRulePortMin]; ok { + minport = int64(val.(int)) + ruleTemplate.PortMin = &minport + } + if val, ok := tcpval[isNetworkACLRulePortMax]; ok { + maxport = int64(val.(int)) + ruleTemplate.PortMax = &maxport + } + if val, ok := tcpval[isNetworkACLRuleSourcePortMin]; ok { + sourceminport = int64(val.(int)) + ruleTemplate.SourcePortMin = &sourceminport + } + if val, ok := tcpval[isNetworkACLRuleSourcePortMax]; ok { + sourcemaxport = int64(val.(int)) + ruleTemplate.SourcePortMax = &sourcemaxport + } + } else if len(udp) > 0 { + protocol = "udp" + ruleTemplate.Protocol = &protocol + udpval := udp[0].(map[string]interface{}) + if val, ok := udpval[isNetworkACLRulePortMin]; ok { + minport = int64(val.(int)) + ruleTemplate.PortMin = &minport + } + if val, ok := udpval[isNetworkACLRulePortMax]; ok { + maxport = int64(val.(int)) + ruleTemplate.PortMax = &maxport + } + if val, ok := udpval[isNetworkACLRuleSourcePortMin]; ok { + sourceminport = int64(val.(int)) + ruleTemplate.SourcePortMin = &sourceminport + } + if val, ok := udpval[isNetworkACLRuleSourcePortMax]; ok { + sourcemaxport = int64(val.(int)) + ruleTemplate.SourcePortMax = &sourcemaxport + } + } + if protocol == "all" { + ruleTemplate.Protocol = &protocol + } + + createNetworkAclRuleOptions := &vpcclassicv1.CreateNetworkACLRuleOptions{ + NetworkACLID: &nwaclid, + NetworkACLRulePrototype: ruleTemplate, + } + _, response, err := nwaclC.CreateNetworkACLRule(createNetworkAclRuleOptions) + if err != nil { + return fmt.Errorf("Error Creating network ACL rule : %s\n%s", err, response) + } + } + return nil +} + +func createInlineRules(nwaclC *vpcv1.VpcV1, nwaclid string, rules []interface{}) error { + before := "" + + for i := 0; i <= len(rules)-1; i++ { + rulex := rules[i].(map[string]interface{}) + + name := rulex[isNetworkACLRuleName].(string) + source := rulex[isNetworkACLRuleSource].(string) + destination := rulex[isNetworkACLRuleDestination].(string) + action := rulex[isNetworkACLRuleAction].(string) + direction := rulex[isNetworkACLRuleDirection].(string) + icmp := rulex[isNetworkACLRuleICMP].([]interface{}) + tcp := rulex[isNetworkACLRuleTCP].([]interface{}) + udp := rulex[isNetworkACLRuleUDP].([]interface{}) + icmptype := int64(-1) + icmpcode := int64(-1) + minport := int64(-1) + maxport := int64(-1) + sourceminport := int64(-1) + sourcemaxport := int64(-1) + protocol := "all" + + ruleTemplate := &vpcv1.NetworkACLRulePrototype{ + Action: &action, + Destination: &destination, + Direction: &direction, + Source: &source, + Name: &name, + } + + if before != "" { + ruleTemplate.Before = &vpcv1.NetworkACLRuleBeforePrototype{ + ID: &before, + } + } + + if len(icmp) > 0 { + protocol = "icmp" + ruleTemplate.Protocol = &protocol + if !isNil(icmp[0]) { + icmpval := icmp[0].(map[string]interface{}) + if val, ok := icmpval[isNetworkACLRuleICMPType]; ok { + icmptype = int64(val.(int)) + ruleTemplate.Type = &icmptype + } + if val, ok := icmpval[isNetworkACLRuleICMPCode]; ok { + icmpcode = int64(val.(int)) + ruleTemplate.Code = &icmpcode + } + } + } else if len(tcp) > 0 { + protocol = "tcp" + ruleTemplate.Protocol = &protocol + tcpval := tcp[0].(map[string]interface{}) + if val, ok := tcpval[isNetworkACLRulePortMin]; ok { + minport = int64(val.(int)) + ruleTemplate.DestinationPortMin = &minport + } + if val, ok := tcpval[isNetworkACLRulePortMax]; ok { + maxport = int64(val.(int)) + ruleTemplate.DestinationPortMax = &maxport + } + if val, ok := tcpval[isNetworkACLRuleSourcePortMin]; ok { + sourceminport = int64(val.(int)) + ruleTemplate.SourcePortMin = &sourceminport + } + if val, ok := tcpval[isNetworkACLRuleSourcePortMax]; ok { + sourcemaxport = int64(val.(int)) + ruleTemplate.SourcePortMax = &sourcemaxport + } + } else if len(udp) > 0 { + protocol = "udp" + ruleTemplate.Protocol = &protocol + udpval := udp[0].(map[string]interface{}) + if val, ok := udpval[isNetworkACLRulePortMin]; ok { + minport = int64(val.(int)) + ruleTemplate.DestinationPortMin = &minport + } + if val, ok := udpval[isNetworkACLRulePortMax]; ok { + maxport = int64(val.(int)) + ruleTemplate.DestinationPortMax = &maxport + } + if val, ok := udpval[isNetworkACLRuleSourcePortMin]; ok { + sourceminport = int64(val.(int)) + ruleTemplate.SourcePortMin = &sourceminport + } + if val, ok := udpval[isNetworkACLRuleSourcePortMax]; ok { + sourcemaxport = int64(val.(int)) + ruleTemplate.SourcePortMax = &sourcemaxport + } + } + if protocol == "all" { + ruleTemplate.Protocol = &protocol + } + + createNetworkAclRuleOptions := &vpcv1.CreateNetworkACLRuleOptions{ + NetworkACLID: &nwaclid, + NetworkACLRulePrototype: ruleTemplate, + } + _, response, err := nwaclC.CreateNetworkACLRule(createNetworkAclRuleOptions) + if err != nil { + return fmt.Errorf("Error Creating network ACL rule : %s\n%s", err, response) + } + } + return nil +} + +func isNil(i interface{}) bool { + return i == nil || reflect.ValueOf(i).IsNil() +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_public_gateway.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_public_gateway.go new file mode 100644 index 00000000000..befc44fd3a7 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_public_gateway.go @@ -0,0 +1,799 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "os" + "time" + + "github.com/IBM/vpc-go-sdk/vpcclassicv1" + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + isPublicGatewayName = "name" + isPublicGatewayFloatingIP = "floating_ip" + isPublicGatewayStatus = "status" + isPublicGatewayVPC = "vpc" + isPublicGatewayZone = "zone" + isPublicGatewayFloatingIPAddress = "address" + isPublicGatewayTags = "tags" + + isPublicGatewayProvisioning = "provisioning" + isPublicGatewayProvisioningDone = "available" + isPublicGatewayDeleting = "deleting" + isPublicGatewayDeleted = "done" + + isPublicGatewayResourceGroup = "resource_group" +) + +func resourceIBMISPublicGateway() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMISPublicGatewayCreate, + Read: resourceIBMISPublicGatewayRead, + Update: resourceIBMISPublicGatewayUpdate, + Delete: resourceIBMISPublicGatewayDelete, + Exists: resourceIBMISPublicGatewayExists, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + CustomizeDiff: customdiff.Sequence( + func(diff *schema.ResourceDiff, v interface{}) error { + return resourceTagsCustomizeDiff(diff) + }, + ), + + Schema: map[string]*schema.Schema{ + isPublicGatewayName: { + Type: schema.TypeString, + Required: true, + ForceNew: false, + ValidateFunc: InvokeValidator("ibm_is_public_gateway", isPublicGatewayName), + Description: "Name of the Public gateway instance", + }, + + isPublicGatewayFloatingIP: { + Type: schema.TypeMap, + Optional: true, + Computed: true, + DiffSuppressFunc: applyOnce, + }, + + isPublicGatewayStatus: { + Type: schema.TypeString, + Computed: true, + Description: "Public gateway instance status", + }, + + isPublicGatewayResourceGroup: { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Computed: true, + Description: "Public gateway resource group info", + }, + + isPublicGatewayVPC: { + Type: schema.TypeString, + ForceNew: true, + Required: true, + Description: "Public gateway VPC info", + }, + + isPublicGatewayZone: { + Type: schema.TypeString, + ForceNew: true, + Required: true, + Description: "Public gateway zone info", + }, + + isPublicGatewayTags: { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString, ValidateFunc: InvokeValidator("ibm_is_public_gateway", "tag")}, + Set: resourceIBMVPCHash, + Description: "Service tags for the public gateway instance", + }, + + ResourceControllerURL: { + Type: schema.TypeString, + Computed: true, + Description: "The URL of the IBM Cloud dashboard that can be used to explore and view details about this instance", + }, + + ResourceName: { + Type: schema.TypeString, + Computed: true, + Description: "The name of the resource", + }, + + ResourceCRN: { + Type: schema.TypeString, + Computed: true, + Description: "The crn of the resource", + }, + + ResourceStatus: { + Type: schema.TypeString, + Computed: true, + Description: "The status of the resource", + }, + + ResourceGroupName: { + Type: schema.TypeString, + Computed: true, + Description: "The resource group name in which resource is provisioned", + }, + }, + } +} + +func resourceIBMISPublicGatewayValidator() *ResourceValidator { + + validateSchema := make([]ValidateSchema, 1) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isPublicGatewayName, + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Required: true, + Regexp: `^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$`, + MinValueLength: 1, + MaxValueLength: 63}) + + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: "tag", + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Optional: true, + Regexp: `^[A-Za-z0-9:_ .-]+$`, + MinValueLength: 1, + MaxValueLength: 128}) + + ibmISPublicGatewayResourceValidator := ResourceValidator{ResourceName: "ibm_is_public_gateway", Schema: validateSchema} + return &ibmISPublicGatewayResourceValidator +} + +func resourceIBMISPublicGatewayCreate(d *schema.ResourceData, meta interface{}) error { + + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + + name := d.Get(isPublicGatewayName).(string) + vpc := d.Get(isPublicGatewayVPC).(string) + zone := d.Get(isPublicGatewayZone).(string) + + if userDetails.generation == 1 { + err := classicPgwCreate(d, meta, name, vpc, zone) + if err != nil { + return err + } + } else { + err := pgwCreate(d, meta, name, vpc, zone) + if err != nil { + return err + } + } + return resourceIBMISPublicGatewayRead(d, meta) +} + +func classicPgwCreate(d *schema.ResourceData, meta interface{}, name, vpc, zone string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + + options := &vpcclassicv1.CreatePublicGatewayOptions{ + Name: &name, + VPC: &vpcclassicv1.VPCIdentity{ + ID: &vpc, + }, + Zone: &vpcclassicv1.ZoneIdentity{ + Name: &zone, + }, + } + floatingipID := "" + floatingipadd := "" + if floatingipdataIntf, ok := d.GetOk(isPublicGatewayFloatingIP); ok && floatingipdataIntf != nil { + fip := &vpcclassicv1.PublicGatewayFloatingIPPrototype{} + floatingipdata := floatingipdataIntf.(map[string]interface{}) + if floatingipidintf, ok := floatingipdata["id"]; ok && floatingipidintf != nil { + floatingipID = floatingipidintf.(string) + fip.ID = &floatingipID + } + if floatingipaddintf, ok := floatingipdata[isPublicGatewayFloatingIPAddress]; ok && floatingipaddintf != nil { + floatingipadd = floatingipaddintf.(string) + fip.Address = &floatingipadd + } + options.FloatingIP = fip + } + + publicgw, response, err := sess.CreatePublicGateway(options) + if err != nil { + return fmt.Errorf("Error while creating Public Gateway %s\n%s", err, response) + } + d.SetId(*publicgw.ID) + log.Printf("[INFO] PublicGateway : %s", *publicgw.ID) + + _, err = isWaitForClassicPublicGatewayAvailable(sess, d.Id(), d.Timeout(schema.TimeoutCreate)) + if err != nil { + return err + } + + v := os.Getenv("IC_ENV_TAGS") + if _, ok := d.GetOk(isPublicGatewayTags); ok || v != "" { + oldList, newList := d.GetChange(isPublicGatewayTags) + err = UpdateTagsUsingCRN(oldList, newList, meta, *publicgw.CRN) + if err != nil { + log.Printf( + "Error on create of vpc public gateway (%s) tags: %s", d.Id(), err) + } + } + return nil +} + +func pgwCreate(d *schema.ResourceData, meta interface{}, name, vpc, zone string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + + options := &vpcv1.CreatePublicGatewayOptions{ + Name: &name, + VPC: &vpcv1.VPCIdentity{ + ID: &vpc, + }, + Zone: &vpcv1.ZoneIdentity{ + Name: &zone, + }, + } + floatingipID := "" + floatingipadd := "" + if floatingipdataIntf, ok := d.GetOk(isPublicGatewayFloatingIP); ok && floatingipdataIntf != nil { + fip := &vpcv1.PublicGatewayFloatingIPPrototype{} + floatingipdata := floatingipdataIntf.(map[string]interface{}) + if floatingipidintf, ok := floatingipdata["id"]; ok && floatingipidintf != nil { + floatingipID = floatingipidintf.(string) + fip.ID = &floatingipID + } + if floatingipaddintf, ok := floatingipdata[isPublicGatewayFloatingIPAddress]; ok && floatingipaddintf != nil { + floatingipadd = floatingipaddintf.(string) + fip.Address = &floatingipadd + } + options.FloatingIP = fip + } + if grp, ok := d.GetOk(isPublicGatewayResourceGroup); ok { + rg := grp.(string) + options.ResourceGroup = &vpcv1.ResourceGroupIdentity{ + ID: &rg, + } + } + + publicgw, response, err := sess.CreatePublicGateway(options) + if err != nil { + return fmt.Errorf("Error while creating Public Gateway %s\n%s", err, response) + } + d.SetId(*publicgw.ID) + log.Printf("[INFO] PublicGateway : %s", *publicgw.ID) + + _, err = isWaitForPublicGatewayAvailable(sess, d.Id(), d.Timeout(schema.TimeoutCreate)) + if err != nil { + return err + } + + v := os.Getenv("IC_ENV_TAGS") + if _, ok := d.GetOk(isPublicGatewayTags); ok || v != "" { + oldList, newList := d.GetChange(isPublicGatewayTags) + err = UpdateTagsUsingCRN(oldList, newList, meta, *publicgw.CRN) + if err != nil { + log.Printf( + "Error on create of vpc public gateway (%s) tags: %s", d.Id(), err) + } + } + return nil +} + +func isWaitForClassicPublicGatewayAvailable(publicgwC *vpcclassicv1.VpcClassicV1, id string, timeout time.Duration) (interface{}, error) { + log.Printf("Waiting for public gateway (%s) to be available.", id) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", isPublicGatewayProvisioning}, + Target: []string{isPublicGatewayProvisioningDone, ""}, + Refresh: isClassicPublicGatewayRefreshFunc(publicgwC, id), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isClassicPublicGatewayRefreshFunc(publicgwC *vpcclassicv1.VpcClassicV1, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + getPublicGatewayOptions := &vpcclassicv1.GetPublicGatewayOptions{ + ID: &id, + } + publicgw, response, err := publicgwC.GetPublicGateway(getPublicGatewayOptions) + if err != nil { + return nil, "", fmt.Errorf("Error getting Public Gateway : %s\n%s", err, response) + } + + if *publicgw.Status == isPublicGatewayProvisioningDone { + return publicgw, isPublicGatewayProvisioningDone, nil + } + + return publicgw, isPublicGatewayProvisioning, nil + } +} + +func isWaitForPublicGatewayAvailable(publicgwC *vpcv1.VpcV1, id string, timeout time.Duration) (interface{}, error) { + log.Printf("Waiting for public gateway (%s) to be available.", id) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", isPublicGatewayProvisioning}, + Target: []string{isPublicGatewayProvisioningDone, ""}, + Refresh: isPublicGatewayRefreshFunc(publicgwC, id), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isPublicGatewayRefreshFunc(publicgwC *vpcv1.VpcV1, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + getPublicGatewayOptions := &vpcv1.GetPublicGatewayOptions{ + ID: &id, + } + publicgw, response, err := publicgwC.GetPublicGateway(getPublicGatewayOptions) + if err != nil { + return nil, "", fmt.Errorf("Error getting Public Gateway : %s\n%s", err, response) + } + + if *publicgw.Status == isPublicGatewayProvisioningDone { + return publicgw, isPublicGatewayProvisioningDone, nil + } + + return publicgw, isPublicGatewayProvisioning, nil + } +} + +func resourceIBMISPublicGatewayRead(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + id := d.Id() + if userDetails.generation == 1 { + err := classicPgwGet(d, meta, id) + if err != nil { + return err + } + } else { + err := pgwGet(d, meta, id) + if err != nil { + return err + } + } + return nil +} + +func classicPgwGet(d *schema.ResourceData, meta interface{}, id string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + getPublicGatewayOptions := &vpcclassicv1.GetPublicGatewayOptions{ + ID: &id, + } + publicgw, response, err := sess.GetPublicGateway(getPublicGatewayOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error getting Public Gateway : %s\n%s", err, response) + } + d.Set(isPublicGatewayName, *publicgw.Name) + if publicgw.FloatingIP != nil { + floatIP := map[string]interface{}{ + "id": *publicgw.FloatingIP.ID, + isPublicGatewayFloatingIPAddress: *publicgw.FloatingIP.Address, + } + d.Set(isPublicGatewayFloatingIP, floatIP) + + } + d.Set(isPublicGatewayStatus, *publicgw.Status) + d.Set(isPublicGatewayZone, *publicgw.Zone.Name) + d.Set(isPublicGatewayVPC, *publicgw.VPC.ID) + tags, err := GetTagsUsingCRN(meta, *publicgw.CRN) + if err != nil { + log.Printf( + "Error on get of vpc public gateway (%s) tags: %s", id, err) + } + d.Set(isPublicGatewayTags, tags) + controller, err := getBaseController(meta) + if err != nil { + return err + } + d.Set(ResourceControllerURL, controller+"/vpc/network/publicGateways") + d.Set(ResourceName, *publicgw.Name) + d.Set(ResourceCRN, *publicgw.CRN) + d.Set(ResourceStatus, *publicgw.Status) + return nil +} + +func pgwGet(d *schema.ResourceData, meta interface{}, id string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + getPublicGatewayOptions := &vpcv1.GetPublicGatewayOptions{ + ID: &id, + } + publicgw, response, err := sess.GetPublicGateway(getPublicGatewayOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error getting Public Gateway : %s\n%s", err, response) + } + d.Set(isPublicGatewayName, *publicgw.Name) + if publicgw.FloatingIP != nil { + floatIP := map[string]interface{}{ + "id": *publicgw.FloatingIP.ID, + isPublicGatewayFloatingIPAddress: *publicgw.FloatingIP.Address, + } + d.Set(isPublicGatewayFloatingIP, floatIP) + + } + d.Set(isPublicGatewayStatus, *publicgw.Status) + d.Set(isPublicGatewayZone, *publicgw.Zone.Name) + d.Set(isPublicGatewayVPC, *publicgw.VPC.ID) + tags, err := GetTagsUsingCRN(meta, *publicgw.CRN) + if err != nil { + log.Printf( + "Error on get of vpc public gateway (%s) tags: %s", id, err) + } + d.Set(isPublicGatewayTags, tags) + controller, err := getBaseController(meta) + if err != nil { + return err + } + d.Set(ResourceControllerURL, controller+"/vpc-ext/network/publicGateways") + d.Set(ResourceName, *publicgw.Name) + d.Set(ResourceCRN, *publicgw.CRN) + d.Set(ResourceStatus, *publicgw.Status) + if publicgw.ResourceGroup != nil { + d.Set(isPublicGatewayResourceGroup, *publicgw.ResourceGroup.ID) + d.Set(ResourceGroupName, *publicgw.ResourceGroup.Name) + } + return nil +} + +func resourceIBMISPublicGatewayUpdate(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + id := d.Id() + + name := "" + hasChanged := false + + if d.HasChange(isPublicGatewayName) { + name = d.Get(isPublicGatewayName).(string) + hasChanged = true + } + if userDetails.generation == 1 { + err := classicPgwUpdate(d, meta, id, name, hasChanged) + if err != nil { + return err + } + } else { + err := pgwUpdate(d, meta, id, name, hasChanged) + if err != nil { + return err + } + } + return resourceIBMISPublicGatewayRead(d, meta) +} + +func classicPgwUpdate(d *schema.ResourceData, meta interface{}, id, name string, hasChanged bool) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + if d.HasChange(isPublicGatewayTags) { + getPublicGatewayOptions := &vpcclassicv1.GetPublicGatewayOptions{ + ID: &id, + } + publicgw, response, err := sess.GetPublicGateway(getPublicGatewayOptions) + if err != nil { + return fmt.Errorf("Error getting Public Gateway : %s\n%s", err, response) + } + oldList, newList := d.GetChange(isPublicGatewayTags) + err = UpdateTagsUsingCRN(oldList, newList, meta, *publicgw.CRN) + if err != nil { + log.Printf( + "Error on update of resource Public Gateway (%s) tags: %s", id, err) + } + } + if hasChanged { + updatePublicGatewayOptions := &vpcclassicv1.UpdatePublicGatewayOptions{ + ID: &id, + } + + PublicGatewayPatchModel := &vpcclassicv1.PublicGatewayPatch{ + Name: &name, + } + PublicGatewayPatch, err := PublicGatewayPatchModel.AsPatch() + if err != nil { + return fmt.Errorf("Error calling asPatch for PublicGatewayPatch: %s", err) + } + updatePublicGatewayOptions.PublicGatewayPatch = PublicGatewayPatch + + _, response, err := sess.UpdatePublicGateway(updatePublicGatewayOptions) + if err != nil { + return fmt.Errorf("Error Updating Public Gateway : %s\n%s", err, response) + } + } + return nil +} + +func pgwUpdate(d *schema.ResourceData, meta interface{}, id, name string, hasChanged bool) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + if d.HasChange(isPublicGatewayTags) { + getPublicGatewayOptions := &vpcv1.GetPublicGatewayOptions{ + ID: &id, + } + publicgw, response, err := sess.GetPublicGateway(getPublicGatewayOptions) + if err != nil { + return fmt.Errorf("Error getting Public Gateway : %s\n%s", err, response) + } + oldList, newList := d.GetChange(isPublicGatewayTags) + err = UpdateTagsUsingCRN(oldList, newList, meta, *publicgw.CRN) + if err != nil { + log.Printf( + "Error on update of resource Public Gateway (%s) tags: %s", id, err) + } + } + if hasChanged { + updatePublicGatewayOptions := &vpcv1.UpdatePublicGatewayOptions{ + ID: &id, + } + PublicGatewayPatchModel := &vpcv1.PublicGatewayPatch{ + Name: &name, + } + PublicGatewayPatch, err := PublicGatewayPatchModel.AsPatch() + if err != nil { + return fmt.Errorf("Error calling asPatch for PublicGatewayPatch: %s", err) + } + updatePublicGatewayOptions.PublicGatewayPatch = PublicGatewayPatch + _, response, err := sess.UpdatePublicGateway(updatePublicGatewayOptions) + if err != nil { + return fmt.Errorf("Error Updating Public Gateway : %s\n%s", err, response) + } + } + return nil +} + +func resourceIBMISPublicGatewayDelete(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + id := d.Id() + if userDetails.generation == 1 { + err := classicPgwDelete(d, meta, id) + if err != nil { + return err + } + } else { + err := pgwDelete(d, meta, id) + if err != nil { + return err + } + } + return nil +} + +func classicPgwDelete(d *schema.ResourceData, meta interface{}, id string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + + getPublicGatewayOptions := &vpcclassicv1.GetPublicGatewayOptions{ + ID: &id, + } + _, response, err := sess.GetPublicGateway(getPublicGatewayOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Getting Public Gateway (%s): %s\n%s", id, err, response) + } + + deletePublicGatewayOptions := &vpcclassicv1.DeletePublicGatewayOptions{ + ID: &id, + } + response, err = sess.DeletePublicGateway(deletePublicGatewayOptions) + if err != nil { + return fmt.Errorf("Error Deleting Public Gateway : %s\n%s", err, response) + } + _, err = isWaitForClassicPublicGatewayDeleted(sess, id, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return err + } + d.SetId("") + return nil +} + +func pgwDelete(d *schema.ResourceData, meta interface{}, id string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + + getPublicGatewayOptions := &vpcv1.GetPublicGatewayOptions{ + ID: &id, + } + _, response, err := sess.GetPublicGateway(getPublicGatewayOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Getting Public Gateway (%s): %s\n%s", id, err, response) + } + + deletePublicGatewayOptions := &vpcv1.DeletePublicGatewayOptions{ + ID: &id, + } + response, err = sess.DeletePublicGateway(deletePublicGatewayOptions) + if err != nil { + return fmt.Errorf("Error Deleting Public Gateway : %s\n%s", err, response) + } + _, err = isWaitForPublicGatewayDeleted(sess, id, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return err + } + d.SetId("") + return nil +} + +func isWaitForClassicPublicGatewayDeleted(pg *vpcclassicv1.VpcClassicV1, id string, timeout time.Duration) (interface{}, error) { + log.Printf("Waiting for public gateway (%s) to be deleted.", id) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", isPublicGatewayDeleting}, + Target: []string{isPublicGatewayDeleted, ""}, + Refresh: isClassicPublicGatewayDeleteRefreshFunc(pg, id), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isClassicPublicGatewayDeleteRefreshFunc(pg *vpcclassicv1.VpcClassicV1, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + log.Printf("[DEBUG] delete function here") + getPublicGatewayOptions := &vpcclassicv1.GetPublicGatewayOptions{ + ID: &id, + } + pgw, response, err := pg.GetPublicGateway(getPublicGatewayOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return pgw, isPublicGatewayDeleted, nil + } + return nil, "", fmt.Errorf("The Public Gateway %s failed to delete: %s\n%s", id, err, response) + } + return pgw, isPublicGatewayDeleting, nil + } +} + +func isWaitForPublicGatewayDeleted(pg *vpcv1.VpcV1, id string, timeout time.Duration) (interface{}, error) { + log.Printf("Waiting for public gateway (%s) to be deleted.", id) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", isPublicGatewayDeleting}, + Target: []string{isPublicGatewayDeleted, ""}, + Refresh: isPublicGatewayDeleteRefreshFunc(pg, id), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isPublicGatewayDeleteRefreshFunc(pg *vpcv1.VpcV1, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + log.Printf("[DEBUG] delete function here") + getPublicGatewayOptions := &vpcv1.GetPublicGatewayOptions{ + ID: &id, + } + pgw, response, err := pg.GetPublicGateway(getPublicGatewayOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return pgw, isPublicGatewayDeleted, nil + } + return nil, "", fmt.Errorf("The Public Gateway %s failed to delete: %s\n%s", id, err, response) + } + return pgw, isPublicGatewayDeleting, nil + } +} + +func resourceIBMISPublicGatewayExists(d *schema.ResourceData, meta interface{}) (bool, error) { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return false, err + } + id := d.Id() + if userDetails.generation == 1 { + exists, err := classicPgwExists(d, meta, id) + return exists, err + } else { + exists, err := pgwExists(d, meta, id) + return exists, err + } +} + +func classicPgwExists(d *schema.ResourceData, meta interface{}, id string) (bool, error) { + sess, err := classicVpcClient(meta) + if err != nil { + return false, err + } + getPublicGatewayOptions := &vpcclassicv1.GetPublicGatewayOptions{ + ID: &id, + } + _, response, err := sess.GetPublicGateway(getPublicGatewayOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error getting Public Gateway: %s\n%s", err, response) + } + return true, nil +} + +func pgwExists(d *schema.ResourceData, meta interface{}, id string) (bool, error) { + sess, err := vpcClient(meta) + if err != nil { + return false, err + } + getPublicGatewayOptions := &vpcv1.GetPublicGatewayOptions{ + ID: &id, + } + _, response, err := sess.GetPublicGateway(getPublicGatewayOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error getting Public Gateway: %s\n%s", err, response) + } + return true, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_security_group.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_security_group.go new file mode 100644 index 00000000000..26e33c04b96 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_security_group.go @@ -0,0 +1,789 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "os" + "reflect" + + "github.com/IBM/vpc-go-sdk/vpcclassicv1" + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + isSecurityGroupName = "name" + isSecurityGroupVPC = "vpc" + isSecurityGroupRules = "rules" + isSecurityGroupResourceGroup = "resource_group" + isSecurityGroupTags = "tags" + isSecurityGroupCRN = "crn" +) + +func resourceIBMISSecurityGroup() *schema.Resource { + + return &schema.Resource{ + Create: resourceIBMISSecurityGroupCreate, + Read: resourceIBMISSecurityGroupRead, + Update: resourceIBMISSecurityGroupUpdate, + Delete: resourceIBMISSecurityGroupDelete, + Exists: resourceIBMISSecurityGroupExists, + Importer: &schema.ResourceImporter{}, + + CustomizeDiff: customdiff.Sequence( + func(diff *schema.ResourceDiff, v interface{}) error { + return resourceTagsCustomizeDiff(diff) + }, + ), + + Schema: map[string]*schema.Schema{ + + isSecurityGroupName: { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Security group name", + ValidateFunc: InvokeValidator("ibm_is_security_group", isSecurityGroupName), + }, + isSecurityGroupVPC: { + Type: schema.TypeString, + Required: true, + Description: "Security group's resource group id", + ForceNew: true, + }, + + isSecurityGroupTags: { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString, ValidateFunc: InvokeValidator("ibm_is_security_group", "tag")}, + Set: resourceIBMVPCHash, + Description: "List of tags", + }, + + isSecurityGroupCRN: { + Type: schema.TypeString, + Computed: true, + Description: "The crn of the resource", + }, + + isSecurityGroupRules: { + Type: schema.TypeList, + Computed: true, + Description: "Security Rules", + Elem: &schema.Resource{ + Schema: makeIBMISSecurityRuleSchema(), + }, + }, + + isSecurityGroupResourceGroup: { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: "Resource Group ID", + }, + + ResourceControllerURL: { + Type: schema.TypeString, + Computed: true, + Description: "The URL of the IBM Cloud dashboard that can be used to explore and view details about this instance", + }, + + ResourceName: { + Type: schema.TypeString, + Computed: true, + Description: "The name of the resource", + }, + + ResourceCRN: { + Type: schema.TypeString, + Computed: true, + Description: "The crn of the resource", + }, + + ResourceGroupName: { + Type: schema.TypeString, + Computed: true, + Description: "The resource group name in which resource is provisioned", + }, + }, + } +} + +func resourceIBMISSecurityGroupValidator() *ResourceValidator { + + validateSchema := make([]ValidateSchema, 1) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isSecurityGroupName, + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Required: true, + Regexp: `^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$`, + MinValueLength: 1, + MaxValueLength: 63}) + + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: "tag", + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Optional: true, + Regexp: `^[A-Za-z0-9:_ .-]+$`, + MinValueLength: 1, + MaxValueLength: 128}) + + ibmISSecurityGroupResourceValidator := ResourceValidator{ResourceName: "ibm_is_security_group", Schema: validateSchema} + return &ibmISSecurityGroupResourceValidator +} + +func resourceIBMISSecurityGroupCreate(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + vpc := d.Get(isSecurityGroupVPC).(string) + if userDetails.generation == 1 { + err := classicSgCreate(d, meta, vpc) + if err != nil { + return err + } + } else { + err := sgCreate(d, meta, vpc) + if err != nil { + return err + } + } + return resourceIBMISSecurityGroupRead(d, meta) +} + +func classicSgCreate(d *schema.ResourceData, meta interface{}, vpc string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + createSecurityGroupOptions := &vpcclassicv1.CreateSecurityGroupOptions{ + VPC: &vpcclassicv1.VPCIdentity{ + ID: &vpc, + }, + } + var rg, name string + if grp, ok := d.GetOk(isSecurityGroupResourceGroup); ok { + rg = grp.(string) + createSecurityGroupOptions.ResourceGroup = &vpcclassicv1.ResourceGroupIdentity{ + ID: &rg, + } + } + if nm, ok := d.GetOk(isSecurityGroupName); ok { + name = nm.(string) + createSecurityGroupOptions.Name = &name + } + + sg, response, err := sess.CreateSecurityGroup(createSecurityGroupOptions) + if err != nil { + return fmt.Errorf("Error while creating Security Group %s\n%s", err, response) + } + d.SetId(*sg.ID) + v := os.Getenv("IC_ENV_TAGS") + if _, ok := d.GetOk(isSecurityGroupTags); ok || v != "" { + oldList, newList := d.GetChange(isSecurityGroupTags) + err = UpdateTagsUsingCRN(oldList, newList, meta, *sg.CRN) + if err != nil { + log.Printf("Error while creating Security Group tags %s\n%s", *sg.ID, err) + } + } + return nil +} + +func sgCreate(d *schema.ResourceData, meta interface{}, vpc string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + createSecurityGroupOptions := &vpcv1.CreateSecurityGroupOptions{ + VPC: &vpcv1.VPCIdentity{ + ID: &vpc, + }, + } + var rg, name string + if grp, ok := d.GetOk(isSecurityGroupResourceGroup); ok { + rg = grp.(string) + createSecurityGroupOptions.ResourceGroup = &vpcv1.ResourceGroupIdentity{ + ID: &rg, + } + } + if nm, ok := d.GetOk(isSecurityGroupName); ok { + name = nm.(string) + createSecurityGroupOptions.Name = &name + } + sg, response, err := sess.CreateSecurityGroup(createSecurityGroupOptions) + if err != nil { + return fmt.Errorf("Error while creating Security Group %s\n%s", err, response) + } + d.SetId(*sg.ID) + v := os.Getenv("IC_ENV_TAGS") + if _, ok := d.GetOk(isSecurityGroupTags); ok || v != "" { + oldList, newList := d.GetChange(isSecurityGroupTags) + err = UpdateTagsUsingCRN(oldList, newList, meta, *sg.CRN) + if err != nil { + log.Printf( + "Error while creating Security Group tags : %s\n%s", *sg.ID, err) + } + } + return nil +} + +func resourceIBMISSecurityGroupRead(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + id := d.Id() + if userDetails.generation == 1 { + err := classicSgGet(d, meta, id) + if err != nil { + return err + } + } else { + err := sgGet(d, meta, id) + if err != nil { + return err + } + } + return nil +} + +func classicSgGet(d *schema.ResourceData, meta interface{}, id string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + getSecurityGroupOptions := &vpcclassicv1.GetSecurityGroupOptions{ + ID: &id, + } + group, response, err := sess.GetSecurityGroup(getSecurityGroupOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error getting Security Group : %s\n%s", err, response) + } + tags, err := GetTagsUsingCRN(meta, *group.CRN) + if err != nil { + log.Printf( + "Error getting Security Group tags : %s\n%s", d.Id(), err) + } + d.Set(isSecurityGroupTags, tags) + d.Set(isSecurityGroupCRN, *group.CRN) + d.Set(isSecurityGroupName, *group.Name) + d.Set(isSecurityGroupVPC, *group.VPC.ID) + rules := make([]map[string]interface{}, 0) + if len(group.Rules) > 0 { + for _, rule := range group.Rules { + switch reflect.TypeOf(rule).String() { + case "*vpcclassicv1.SecurityGroupRuleSecurityGroupRuleProtocolIcmp": + { + rule := rule.(*vpcclassicv1.SecurityGroupRuleSecurityGroupRuleProtocolIcmp) + r := make(map[string]interface{}) + if rule.Code != nil { + r[isSecurityGroupRuleCode] = int(*rule.Code) + } + if rule.Type != nil { + r[isSecurityGroupRuleType] = int(*rule.Type) + } + r[isSecurityGroupRuleDirection] = *rule.Direction + r[isSecurityGroupRuleIPVersion] = *rule.IPVersion + if rule.Protocol != nil { + r[isSecurityGroupRuleProtocol] = *rule.Protocol + } + remote, ok := rule.Remote.(*vpcclassicv1.SecurityGroupRuleRemote) + if ok { + if remote != nil && reflect.ValueOf(remote).IsNil() == false { + if remote.ID != nil { + r[isSecurityGroupRuleRemote] = remote.ID + } else if remote.Address != nil { + r[isSecurityGroupRuleRemote] = remote.Address + } else if remote.CIDRBlock != nil { + r[isSecurityGroupRuleRemote] = remote.CIDRBlock + } + } + } + rules = append(rules, r) + } + case "*vpcclassicv1.SecurityGroupRuleSecurityGroupRuleProtocolAll": + { + rule := rule.(*vpcclassicv1.SecurityGroupRuleSecurityGroupRuleProtocolAll) + r := make(map[string]interface{}) + r[isSecurityGroupRuleDirection] = *rule.Direction + r[isSecurityGroupRuleIPVersion] = *rule.IPVersion + if rule.Protocol != nil { + r[isSecurityGroupRuleProtocol] = *rule.Protocol + } + remote, ok := rule.Remote.(*vpcclassicv1.SecurityGroupRuleRemote) + if ok { + if remote != nil && reflect.ValueOf(remote).IsNil() == false { + if remote.ID != nil { + r[isSecurityGroupRuleRemote] = remote.ID + } else if remote.Address != nil { + r[isSecurityGroupRuleRemote] = remote.Address + } else if remote.CIDRBlock != nil { + r[isSecurityGroupRuleRemote] = remote.CIDRBlock + } + } + } + rules = append(rules, r) + } + case "*vpcclassicv1.SecurityGroupRuleSecurityGroupRuleProtocolTcpudp": + { + rule := rule.(*vpcclassicv1.SecurityGroupRuleSecurityGroupRuleProtocolTcpudp) + r := make(map[string]interface{}) + if rule.PortMin != nil { + r[isSecurityGroupRulePortMin] = int(*rule.PortMin) + } + if rule.PortMax != nil { + r[isSecurityGroupRulePortMax] = int(*rule.PortMax) + } + r[isSecurityGroupRuleDirection] = *rule.Direction + r[isSecurityGroupRuleIPVersion] = *rule.IPVersion + if rule.Protocol != nil { + r[isSecurityGroupRuleProtocol] = *rule.Protocol + } + remote, ok := rule.Remote.(*vpcclassicv1.SecurityGroupRuleRemote) + if ok { + if remote != nil && reflect.ValueOf(remote).IsNil() == false { + if remote.ID != nil { + r[isSecurityGroupRuleRemote] = remote.ID + } else if remote.Address != nil { + r[isSecurityGroupRuleRemote] = remote.Address + } else if remote.CIDRBlock != nil { + r[isSecurityGroupRuleRemote] = remote.CIDRBlock + } + } + } + rules = append(rules, r) + } + } + } + } + d.Set(isSecurityGroupRules, rules) + d.SetId(*group.ID) + if group.ResourceGroup != nil { + d.Set(isSecurityGroupResourceGroup, group.ResourceGroup.ID) + rsMangClient, err := meta.(ClientSession).ResourceManagementAPIv2() + if err != nil { + return err + } + grp, err := rsMangClient.ResourceGroup().Get(*group.ResourceGroup.ID) + if err != nil { + return err + } + d.Set(ResourceGroupName, grp.Name) + } + controller, err := getBaseController(meta) + if err != nil { + return err + } + d.Set(ResourceControllerURL, controller+"/vpc/network/securityGroups") + d.Set(ResourceName, *group.Name) + d.Set(ResourceCRN, *group.CRN) + return nil +} + +func sgGet(d *schema.ResourceData, meta interface{}, id string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + getSecurityGroupOptions := &vpcv1.GetSecurityGroupOptions{ + ID: &id, + } + group, response, err := sess.GetSecurityGroup(getSecurityGroupOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error getting Security Group : %s\n%s", err, response) + } + tags, err := GetTagsUsingCRN(meta, *group.CRN) + if err != nil { + log.Printf( + "Error getting Security Group tags : %s\n%s", d.Id(), err) + } + d.Set(isSecurityGroupTags, tags) + d.Set(isSecurityGroupCRN, *group.CRN) + d.Set(isSecurityGroupName, *group.Name) + d.Set(isSecurityGroupVPC, *group.VPC.ID) + rules := make([]map[string]interface{}, 0) + if len(group.Rules) > 0 { + for _, rule := range group.Rules { + switch reflect.TypeOf(rule).String() { + case "*vpcv1.SecurityGroupRuleSecurityGroupRuleProtocolIcmp": + { + rule := rule.(*vpcv1.SecurityGroupRuleSecurityGroupRuleProtocolIcmp) + r := make(map[string]interface{}) + if rule.Code != nil { + r[isSecurityGroupRuleCode] = int(*rule.Code) + } + if rule.Type != nil { + r[isSecurityGroupRuleType] = int(*rule.Type) + } + r[isSecurityGroupRuleDirection] = *rule.Direction + r[isSecurityGroupRuleIPVersion] = *rule.IPVersion + if rule.Protocol != nil { + r[isSecurityGroupRuleProtocol] = *rule.Protocol + } + remote, ok := rule.Remote.(*vpcv1.SecurityGroupRuleRemote) + if ok { + if remote != nil && reflect.ValueOf(remote).IsNil() == false { + if remote.ID != nil { + r[isSecurityGroupRuleRemote] = remote.ID + } else if remote.Address != nil { + r[isSecurityGroupRuleRemote] = remote.Address + } else if remote.CIDRBlock != nil { + r[isSecurityGroupRuleRemote] = remote.CIDRBlock + } + } + } + rules = append(rules, r) + } + case "*vpcv1.SecurityGroupRuleSecurityGroupRuleProtocolAll": + { + rule := rule.(*vpcv1.SecurityGroupRuleSecurityGroupRuleProtocolAll) + r := make(map[string]interface{}) + r[isSecurityGroupRuleDirection] = *rule.Direction + r[isSecurityGroupRuleIPVersion] = *rule.IPVersion + if rule.Protocol != nil { + r[isSecurityGroupRuleProtocol] = *rule.Protocol + } + remote, ok := rule.Remote.(*vpcv1.SecurityGroupRuleRemote) + if ok { + if remote != nil && reflect.ValueOf(remote).IsNil() == false { + if remote.ID != nil { + r[isSecurityGroupRuleRemote] = remote.ID + } else if remote.Address != nil { + r[isSecurityGroupRuleRemote] = remote.Address + } else if remote.CIDRBlock != nil { + r[isSecurityGroupRuleRemote] = remote.CIDRBlock + } + } + } + rules = append(rules, r) + } + case "*vpcv1.SecurityGroupRuleSecurityGroupRuleProtocolTcpudp": + { + rule := rule.(*vpcv1.SecurityGroupRuleSecurityGroupRuleProtocolTcpudp) + r := make(map[string]interface{}) + if rule.PortMin != nil { + r[isSecurityGroupRulePortMin] = int(*rule.PortMin) + } + if rule.PortMax != nil { + r[isSecurityGroupRulePortMax] = int(*rule.PortMax) + } + r[isSecurityGroupRuleDirection] = *rule.Direction + r[isSecurityGroupRuleIPVersion] = *rule.IPVersion + if rule.Protocol != nil { + r[isSecurityGroupRuleProtocol] = *rule.Protocol + } + remote, ok := rule.Remote.(*vpcv1.SecurityGroupRuleRemote) + if ok { + if remote != nil && reflect.ValueOf(remote).IsNil() == false { + if remote.ID != nil { + r[isSecurityGroupRuleRemote] = remote.ID + } else if remote.Address != nil { + r[isSecurityGroupRuleRemote] = remote.Address + } else if remote.CIDRBlock != nil { + r[isSecurityGroupRuleRemote] = remote.CIDRBlock + } + } + } + rules = append(rules, r) + } + } + } + } + d.Set(isSecurityGroupRules, rules) + d.SetId(*group.ID) + if group.ResourceGroup != nil { + d.Set(isSecurityGroupResourceGroup, group.ResourceGroup.ID) + d.Set(ResourceGroupName, group.ResourceGroup.Name) + } + controller, err := getBaseController(meta) + if err != nil { + return err + } + d.Set(ResourceControllerURL, controller+"/vpc-ext/network/securityGroups") + d.Set(ResourceName, *group.Name) + d.Set(ResourceCRN, *group.CRN) + return nil +} + +func resourceIBMISSecurityGroupUpdate(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + id := d.Id() + name := "" + hasChanged := false + + if d.HasChange(isSecurityGroupTags) { + oldList, newList := d.GetChange(isSecurityGroupTags) + err = UpdateTagsUsingCRN(oldList, newList, meta, d.Get(isSecurityGroupCRN).(string)) + if err != nil { + log.Printf( + "Error Updating Security Group tags: %s\n%s", d.Id(), err) + } + } + + if d.HasChange(isSecurityGroupName) { + name = d.Get(isSecurityGroupName).(string) + hasChanged = true + } else { + return resourceIBMISSecurityGroupRead(d, meta) + } + if userDetails.generation == 1 { + err := classicSgUpdate(d, meta, id, name, hasChanged) + if err != nil { + return err + } + } else { + err := sgUpdate(d, meta, id, name, hasChanged) + if err != nil { + return err + } + } + return resourceIBMISSecurityGroupRead(d, meta) +} + +func classicSgUpdate(d *schema.ResourceData, meta interface{}, id, name string, hasChanged bool) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + if hasChanged { + updateSecurityGroupOptions := &vpcclassicv1.UpdateSecurityGroupOptions{ + ID: &id, + } + securityGroupPatchModel := &vpcclassicv1.SecurityGroupPatch{ + Name: &name, + } + securityGroupPatch, err := securityGroupPatchModel.AsPatch() + if err != nil { + return fmt.Errorf("Error calling asPatch for SecurityGroupPatch: %s", err) + } + updateSecurityGroupOptions.SecurityGroupPatch = securityGroupPatch + _, response, err := sess.UpdateSecurityGroup(updateSecurityGroupOptions) + if err != nil { + return fmt.Errorf("Error Updating Security Group : %s\n%s", err, response) + } + } + return nil +} + +func sgUpdate(d *schema.ResourceData, meta interface{}, id, name string, hasChanged bool) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + if hasChanged { + updateSecurityGroupOptions := &vpcv1.UpdateSecurityGroupOptions{ + ID: &id, + } + securityGroupPatchModel := &vpcv1.SecurityGroupPatch{ + Name: &name, + } + securityGroupPatch, err := securityGroupPatchModel.AsPatch() + if err != nil { + return fmt.Errorf("Error calling asPatch for SecurityGroupPatch: %s", err) + } + updateSecurityGroupOptions.SecurityGroupPatch = securityGroupPatch + _, response, err := sess.UpdateSecurityGroup(updateSecurityGroupOptions) + if err != nil { + return fmt.Errorf("Error Updating Security Group : %s\n%s", err, response) + } + } + return nil +} + +func resourceIBMISSecurityGroupDelete(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + id := d.Id() + if userDetails.generation == 1 { + err := classicSgDelete(d, meta, id) + if err != nil { + return err + } + } else { + err := sgDelete(d, meta, id) + if err != nil { + return err + } + } + d.SetId("") + return nil +} + +func classicSgDelete(d *schema.ResourceData, meta interface{}, id string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + getSecurityGroupOptions := &vpcclassicv1.GetSecurityGroupOptions{ + ID: &id, + } + _, response, err := sess.GetSecurityGroup(getSecurityGroupOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Getting Security Group (%s): %s\n%s", id, err, response) + } + + deleteSecurityGroupOptions := &vpcclassicv1.DeleteSecurityGroupOptions{ + ID: &id, + } + response, err = sess.DeleteSecurityGroup(deleteSecurityGroupOptions) + if err != nil { + return fmt.Errorf("Error Deleting Security Group : %s\n%s", err, response) + } + d.SetId("") + return nil +} + +func sgDelete(d *schema.ResourceData, meta interface{}, id string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + getSecurityGroupOptions := &vpcv1.GetSecurityGroupOptions{ + ID: &id, + } + _, response, err := sess.GetSecurityGroup(getSecurityGroupOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Getting Security Group (%s): %s\n%s", id, err, response) + } + + deleteSecurityGroupOptions := &vpcv1.DeleteSecurityGroupOptions{ + ID: &id, + } + response, err = sess.DeleteSecurityGroup(deleteSecurityGroupOptions) + if err != nil { + return fmt.Errorf("Error Deleting Security Group : %s\n%s", err, response) + } + d.SetId("") + return nil +} + +func resourceIBMISSecurityGroupExists(d *schema.ResourceData, meta interface{}) (bool, error) { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return false, err + } + id := d.Id() + if userDetails.generation == 1 { + exists, err := classicSgExists(d, meta, id) + return exists, err + } else { + exists, err := sgExists(d, meta, id) + return exists, err + } +} + +func classicSgExists(d *schema.ResourceData, meta interface{}, id string) (bool, error) { + sess, err := classicVpcClient(meta) + if err != nil { + return false, err + } + getSecurityGroupOptions := &vpcclassicv1.GetSecurityGroupOptions{ + ID: &id, + } + _, response, err := sess.GetSecurityGroup(getSecurityGroupOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error getting Security Group: %s\n%s", err, response) + } + return true, nil +} + +func sgExists(d *schema.ResourceData, meta interface{}, id string) (bool, error) { + sess, err := vpcClient(meta) + if err != nil { + return false, err + } + getSecurityGroupOptions := &vpcv1.GetSecurityGroupOptions{ + ID: &id, + } + _, response, err := sess.GetSecurityGroup(getSecurityGroupOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error getting Security Group: %s\n%s", err, response) + } + return true, nil +} + +func makeIBMISSecurityRuleSchema() map[string]*schema.Schema { + return map[string]*schema.Schema{ + + isSecurityGroupRuleDirection: { + Type: schema.TypeString, + Computed: true, + Description: "Direction of traffic to enforce, either inbound or outbound", + }, + + isSecurityGroupRuleIPVersion: { + Type: schema.TypeString, + Computed: true, + Description: "IP version: ipv4 or ipv6", + }, + + isSecurityGroupRuleRemote: { + Type: schema.TypeString, + Computed: true, + Description: "Security group id: an IP address, a CIDR block, or a single security group identifier", + }, + + isSecurityGroupRuleType: { + Type: schema.TypeInt, + Computed: true, + }, + + isSecurityGroupRuleCode: { + Type: schema.TypeInt, + Computed: true, + }, + + isSecurityGroupRulePortMin: { + Type: schema.TypeInt, + Computed: true, + }, + + isSecurityGroupRulePortMax: { + Type: schema.TypeInt, + Computed: true, + }, + + isSecurityGroupRuleProtocol: { + Type: schema.TypeString, + Computed: true, + }, + } +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_security_group_network_interface_attachment.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_security_group_network_interface_attachment.go new file mode 100644 index 00000000000..c3a653dd7d2 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_security_group_network_interface_attachment.go @@ -0,0 +1,504 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + + "github.com/IBM/vpc-go-sdk/vpcclassicv1" + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + isSGNICAGroupId = "security_group" + isSGNICANicId = "network_interface" + isSGNICAInstanceNwInterfaceID = "instance_network_interface" + isSGNICAName = "name" + isSGNICAPortSpeed = "port_speed" + isSGNICAPrimaryIPV4Address = "primary_ipv4_address" + isSGNICASecondaryAddresses = "secondary_address" + isSGNICASecurityGroups = "security_groups" + isSGNICASecurityGroupCRN = "crn" + isSGNICASecurityGroupID = "id" + isSGNICASecurityGroupName = "name" + isSGNICAStatus = "status" + isSGNICASubnet = "subnet" + isSGNICAType = "type" + isSGNICAFloatingIps = "floating_ips" + isSGNICAFloatingIpID = "id" + isSGNICAFloatingIpName = "name" + isSGNICAFloatingIpCRN = "crn" +) + +func resourceIBMISSecurityGroupNetworkInterfaceAttachment() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMISSecurityGroupNetworkInterfaceAttachmentCreate, + Read: resourceIBMISSecurityGroupNetworkInterfaceAttachmentRead, + Delete: resourceIBMISSecurityGroupNetworkInterfaceAttachmentDelete, + Exists: resourceIBMISSecurityGroupNetworkInterfaceAttachmentExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + isSGNICAGroupId: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "security group network interface attachment group ID", + }, + isSGNICANicId: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "security group network interface attachment NIC ID", + }, + isSGNICAName: { + Type: schema.TypeString, + Computed: true, + Description: "security group network interface attachment name", + }, + isSGNICAInstanceNwInterfaceID: { + Type: schema.TypeString, + Computed: true, + Description: "security group network interface attachment network interface ID", + }, + isSGNICAPortSpeed: { + Type: schema.TypeInt, + Computed: true, + Description: "security group network interface attachment port speed", + }, + isSGNICAPrimaryIPV4Address: { + Type: schema.TypeString, + Computed: true, + Description: "security group network interface attachment Primary IPV4 address", + }, + isSGNICASecondaryAddresses: { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: "security group network interface attachment secondary address", + }, + isSGNICAStatus: { + Type: schema.TypeString, + Computed: true, + Description: "security group network interface attachment status", + }, + isSGNICASubnet: { + Type: schema.TypeString, + Computed: true, + Description: "security group network interface attachment subnet", + }, + isSGNICAType: { + Type: schema.TypeString, + Computed: true, + Description: "security group network interface attachment type", + }, + isSGNICAFloatingIps: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + isSGNICAFloatingIpID: { + Type: schema.TypeString, + Computed: true, + Description: "security group network interface attachment floating IP ID", + }, + isSGNICAFloatingIpName: { + Type: schema.TypeString, + Computed: true, + Description: "security group network interface attachment floating IP name", + }, + isSGNICAFloatingIpCRN: { + Type: schema.TypeString, + Computed: true, + Description: "security group network interface attachment floating IP CRN", + }, + }, + }, + }, + isSGNICASecurityGroups: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + isSGNICASecurityGroupID: { + Type: schema.TypeString, + Computed: true, + Description: "security group network interface attachment security group ID", + }, + isSGNICASecurityGroupCRN: { + Type: schema.TypeString, + Computed: true, + Description: "security group network interface attachment security group CRN", + }, + isSGNICASecurityGroupName: { + Type: schema.TypeString, + Computed: true, + Description: "security group network interface attachment security group name", + }, + }, + }, + }, + + RelatedCRN: { + Type: schema.TypeString, + Computed: true, + Description: "The crn of the Security Group", + }, + }, + } +} + +func resourceIBMISSecurityGroupNetworkInterfaceAttachmentCreate(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + sgID := d.Get(isSGNICAGroupId).(string) + nicID := d.Get(isSGNICANicId).(string) + + if userDetails.generation == 1 { + err := classicSgnicCreate(d, meta, sgID, nicID) + if err != nil { + return err + } + } else { + err := sgnicCreate(d, meta, sgID, nicID) + if err != nil { + return err + } + } + return resourceIBMISSecurityGroupNetworkInterfaceAttachmentRead(d, meta) + +} + +func classicSgnicCreate(d *schema.ResourceData, meta interface{}, sgID, nicID string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + options := &vpcclassicv1.AddSecurityGroupNetworkInterfaceOptions{ + SecurityGroupID: &sgID, + ID: &nicID, + } + _, response, err := sess.AddSecurityGroupNetworkInterface(options) + if err != nil { + return fmt.Errorf("Error while creating SecurityGroup NetworkInterface Binding %s\n%s", err, response) + } + d.SetId(fmt.Sprintf("%s/%s", sgID, nicID)) + return nil +} + +func sgnicCreate(d *schema.ResourceData, meta interface{}, sgID, nicID string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + options := &vpcv1.AddSecurityGroupNetworkInterfaceOptions{ + SecurityGroupID: &sgID, + ID: &nicID, + } + _, response, err := sess.AddSecurityGroupNetworkInterface(options) + if err != nil { + return fmt.Errorf("Error while creating SecurityGroup NetworkInterface Binding %s\n%s", err, response) + } + d.SetId(fmt.Sprintf("%s/%s", sgID, nicID)) + return nil +} + +func resourceIBMISSecurityGroupNetworkInterfaceAttachmentRead(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + parts, err := idParts(d.Id()) + if err != nil { + return err + } + sgID := parts[0] + nicID := parts[1] + if userDetails.generation == 1 { + err := classicSgnicGet(d, meta, sgID, nicID) + if err != nil { + return err + } + } else { + err := sgnicGet(d, meta, sgID, nicID) + if err != nil { + return err + } + } + return nil +} + +func classicSgnicGet(d *schema.ResourceData, meta interface{}, sgID, nicID string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + getSecurityGroupNetworkInterfaceOptions := &vpcclassicv1.GetSecurityGroupNetworkInterfaceOptions{ + SecurityGroupID: &sgID, + ID: &nicID, + } + instanceNic, response, err := sess.GetSecurityGroupNetworkInterface(getSecurityGroupNetworkInterfaceOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error getting NetworkInterface(%s) for the SecurityGroup (%s) : %s\n%s", nicID, sgID, err, response) + } + d.Set(isSGNICAGroupId, sgID) + d.Set(isSGNICANicId, nicID) + d.Set(isSGNICAInstanceNwInterfaceID, *instanceNic.ID) + d.Set(isSGNICAName, *instanceNic.Name) + d.Set(isSGNICAPortSpeed, *instanceNic.PortSpeed) + d.Set(isSGNICAPrimaryIPV4Address, *instanceNic.PrimaryIpv4Address) + // d.Set(isSGNICAStatus, *instanceNic.Status) + d.Set(isSGNICAType, *instanceNic.Type) + if instanceNic.Subnet != nil { + d.Set(isSGNICASubnet, *instanceNic.Subnet.ID) + } + sgs := make([]map[string]interface{}, len(instanceNic.SecurityGroups)) + for i, sgObj := range instanceNic.SecurityGroups { + sg := make(map[string]interface{}) + sg[isSGNICASecurityGroupCRN] = *sgObj.CRN + sg[isSGNICASecurityGroupID] = *sgObj.ID + sg[isSGNICASecurityGroupName] = *sgObj.Name + sgs[i] = sg + } + d.Set(isSGNICASecurityGroups, sgs) + + fps := make([]map[string]interface{}, len(instanceNic.FloatingIps)) + for i, fpObj := range instanceNic.FloatingIps { + fp := make(map[string]interface{}) + fp[isSGNICAFloatingIpCRN] = fpObj.CRN + fp[isSGNICAFloatingIpID] = *fpObj.ID + fp[isSGNICAFloatingIpName] = *fpObj.Name + fps[i] = fp + } + d.Set(isSGNICAFloatingIps, fps) + + // d.Set(isSGNICASecondaryAddresses, *instanceNic.SecondaryAddresses) + getSecurityGroupOptions := &vpcclassicv1.GetSecurityGroupOptions{ + ID: &sgID, + } + sg, response, err := sess.GetSecurityGroup(getSecurityGroupOptions) + if err != nil { + return fmt.Errorf("Error Getting Security Group : %s\n%s", err, response) + } + d.Set(RelatedCRN, *sg.CRN) + return nil +} + +func sgnicGet(d *schema.ResourceData, meta interface{}, sgID, nicID string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + getSecurityGroupNetworkInterfaceOptions := &vpcv1.GetSecurityGroupNetworkInterfaceOptions{ + SecurityGroupID: &sgID, + ID: &nicID, + } + instanceNic, response, err := sess.GetSecurityGroupNetworkInterface(getSecurityGroupNetworkInterfaceOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error getting NetworkInterface(%s) for the SecurityGroup (%s) : %s\n%s", nicID, sgID, err, response) + } + d.Set(isSGNICAGroupId, sgID) + d.Set(isSGNICANicId, nicID) + d.Set(isSGNICAInstanceNwInterfaceID, *instanceNic.ID) + d.Set(isSGNICAName, *instanceNic.Name) + d.Set(isSGNICAPortSpeed, *instanceNic.PortSpeed) + d.Set(isSGNICAPrimaryIPV4Address, *instanceNic.PrimaryIpv4Address) + d.Set(isSGNICAStatus, *instanceNic.Status) + d.Set(isSGNICAType, *instanceNic.Type) + if instanceNic.Subnet != nil { + d.Set(isSGNICASubnet, *instanceNic.Subnet.ID) + } + sgs := make([]map[string]interface{}, len(instanceNic.SecurityGroups)) + for i, sgObj := range instanceNic.SecurityGroups { + sg := make(map[string]interface{}) + sg[isSGNICASecurityGroupCRN] = *sgObj.CRN + sg[isSGNICASecurityGroupID] = *sgObj.ID + sg[isSGNICASecurityGroupName] = *sgObj.Name + sgs[i] = sg + } + d.Set(isSGNICASecurityGroups, sgs) + + fps := make([]map[string]interface{}, len(instanceNic.FloatingIps)) + for i, fpObj := range instanceNic.FloatingIps { + fp := make(map[string]interface{}) + fp[isSGNICAFloatingIpCRN] = fpObj.CRN + fp[isSGNICAFloatingIpID] = *fpObj.ID + fp[isSGNICAFloatingIpName] = *fpObj.Name + fps[i] = fp + } + d.Set(isSGNICAFloatingIps, fps) + + // d.Set(isSGNICASecondaryAddresses, *instanceNic.SecondaryAddresses) + getSecurityGroupOptions := &vpcv1.GetSecurityGroupOptions{ + ID: &sgID, + } + sg, response, err := sess.GetSecurityGroup(getSecurityGroupOptions) + if err != nil { + return fmt.Errorf("Error Getting Security Group : %s\n%s", err, response) + } + d.Set(RelatedCRN, *sg.CRN) + return nil +} + +func resourceIBMISSecurityGroupNetworkInterfaceAttachmentDelete(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + parts, err := idParts(d.Id()) + if err != nil { + return err + } + + sgID := parts[0] + nicID := parts[1] + if userDetails.generation == 1 { + err := classicSgnicDelete(d, meta, sgID, nicID) + if err != nil { + return err + } + } else { + err := sgnicDelete(d, meta, sgID, nicID) + if err != nil { + return err + } + } + d.SetId("") + return nil +} + +func classicSgnicDelete(d *schema.ResourceData, meta interface{}, sgID, nicID string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + + getSecurityGroupNetworkInterfaceOptions := &vpcclassicv1.GetSecurityGroupNetworkInterfaceOptions{ + SecurityGroupID: &sgID, + ID: &nicID, + } + _, response, err := sess.GetSecurityGroupNetworkInterface(getSecurityGroupNetworkInterfaceOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error getting NetworkInterface(%s) for the SecurityGroup (%s) : %s\n%s", nicID, sgID, err, response) + } + + removeSecurityGroupNetworkInterfaceOptions := &vpcclassicv1.RemoveSecurityGroupNetworkInterfaceOptions{ + SecurityGroupID: &sgID, + ID: &nicID, + } + response, err = sess.RemoveSecurityGroupNetworkInterface(removeSecurityGroupNetworkInterfaceOptions) + if err != nil { + return fmt.Errorf("Error Deleting NetworkInterface(%s) for the SecurityGroup (%s) : %s\n%s", nicID, sgID, err, response) + } + d.SetId("") + return nil +} + +func sgnicDelete(d *schema.ResourceData, meta interface{}, sgID, nicID string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + + getSecurityGroupNetworkInterfaceOptions := &vpcv1.GetSecurityGroupNetworkInterfaceOptions{ + SecurityGroupID: &sgID, + ID: &nicID, + } + _, response, err := sess.GetSecurityGroupNetworkInterface(getSecurityGroupNetworkInterfaceOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error getting NetworkInterface(%s) for the SecurityGroup (%s) : %s\n%s", nicID, sgID, err, response) + } + + removeSecurityGroupNetworkInterfaceOptions := &vpcv1.RemoveSecurityGroupNetworkInterfaceOptions{ + SecurityGroupID: &sgID, + ID: &nicID, + } + response, err = sess.RemoveSecurityGroupNetworkInterface(removeSecurityGroupNetworkInterfaceOptions) + if err != nil { + return fmt.Errorf("Error Deleting NetworkInterface(%s) for the SecurityGroup (%s) : %s\n%s", nicID, sgID, err, response) + } + d.SetId("") + return nil +} + +func resourceIBMISSecurityGroupNetworkInterfaceAttachmentExists(d *schema.ResourceData, meta interface{}) (bool, error) { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return false, err + } + parts, err := idParts(d.Id()) + if err != nil { + return false, err + } + if len(parts) != 2 { + return false, fmt.Errorf("Incorrect ID %s: ID should be a combination of sgID/nicID", d.Id()) + } + sgID := parts[0] + nicID := parts[1] + if userDetails.generation == 1 { + exists, err := classicSgnicExists(d, meta, sgID, nicID) + return exists, err + } else { + exists, err := sgnicExists(d, meta, sgID, nicID) + return exists, err + } +} + +func classicSgnicExists(d *schema.ResourceData, meta interface{}, sgID, nicID string) (bool, error) { + sess, err := classicVpcClient(meta) + if err != nil { + return false, err + } + getSecurityGroupNetworkInterfaceOptions := &vpcclassicv1.GetSecurityGroupNetworkInterfaceOptions{ + SecurityGroupID: &sgID, + ID: &nicID, + } + _, response, err := sess.GetSecurityGroupNetworkInterface(getSecurityGroupNetworkInterfaceOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error getting NetworkInterface(%s) for the SecurityGroup (%s) : %s\n%s", nicID, sgID, err, response) + } + return true, nil +} + +func sgnicExists(d *schema.ResourceData, meta interface{}, sgID, nicID string) (bool, error) { + sess, err := vpcClient(meta) + if err != nil { + return false, err + } + getSecurityGroupNetworkInterfaceOptions := &vpcv1.GetSecurityGroupNetworkInterfaceOptions{ + SecurityGroupID: &sgID, + ID: &nicID, + } + _, response, err := sess.GetSecurityGroupNetworkInterface(getSecurityGroupNetworkInterfaceOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error getting NetworkInterface(%s) for the SecurityGroup (%s) : %s\n%s", nicID, sgID, err, response) + } + return true, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_security_group_rule.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_security_group_rule.go new file mode 100644 index 00000000000..bcc8faf3f32 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_security_group_rule.go @@ -0,0 +1,1142 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "reflect" + "strings" + + "github.com/IBM/vpc-go-sdk/vpcclassicv1" + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + isSecurityGroupRuleCode = "code" + isSecurityGroupRuleDirection = "direction" + isSecurityGroupRuleIPVersion = "ip_version" + isSecurityGroupRuleIPVersionDefault = "ipv4" + isSecurityGroupRulePortMax = "port_max" + isSecurityGroupRulePortMin = "port_min" + isSecurityGroupRuleProtocolICMP = "icmp" + isSecurityGroupRuleProtocolTCP = "tcp" + isSecurityGroupRuleProtocolUDP = "udp" + isSecurityGroupRuleProtocol = "protocol" + isSecurityGroupRuleRemote = "remote" + isSecurityGroupRuleType = "type" + isSecurityGroupID = "group" + isSecurityGroupRuleID = "rule_id" +) + +func resourceIBMISSecurityGroupRule() *schema.Resource { + + return &schema.Resource{ + Create: resourceIBMISSecurityGroupRuleCreate, + Read: resourceIBMISSecurityGroupRuleRead, + Update: resourceIBMISSecurityGroupRuleUpdate, + Delete: resourceIBMISSecurityGroupRuleDelete, + Exists: resourceIBMISSecurityGroupRuleExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + + isSecurityGroupID: { + Type: schema.TypeString, + Required: true, + Description: "Security group id", + ForceNew: true, + }, + + isSecurityGroupRuleID: { + Type: schema.TypeString, + Computed: true, + Description: "Rule id", + }, + + isSecurityGroupRuleDirection: { + Type: schema.TypeString, + Required: true, + Description: "Direction of traffic to enforce, either inbound or outbound", + ValidateFunc: InvokeValidator("ibm_is_security_group_rule", isSecurityGroupRuleDirection), + }, + + isSecurityGroupRuleIPVersion: { + Type: schema.TypeString, + Optional: true, + Description: "IP version: ipv4 or ipv6", + Default: isSecurityGroupRuleIPVersionDefault, + ValidateFunc: InvokeValidator("ibm_is_security_group_rule", isSecurityGroupRuleIPVersion), + }, + + isSecurityGroupRuleRemote: { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Security group id: an IP address, a CIDR block, or a single security group identifier", + }, + + isSecurityGroupRuleProtocolICMP: { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + ForceNew: true, + MinItems: 1, + ConflictsWith: []string{isSecurityGroupRuleProtocolTCP, isSecurityGroupRuleProtocolUDP}, + Description: "protocol=icmp", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + isSecurityGroupRuleType: { + Type: schema.TypeInt, + Optional: true, + ForceNew: false, + ValidateFunc: InvokeValidator("ibm_is_security_group_rule", isSecurityGroupRuleType), + }, + isSecurityGroupRuleCode: { + Type: schema.TypeInt, + Optional: true, + ForceNew: false, + ValidateFunc: InvokeValidator("ibm_is_security_group_rule", isSecurityGroupRuleCode), + }, + }, + }, + }, + + isSecurityGroupRuleProtocolTCP: { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + MinItems: 1, + ForceNew: true, + Description: "protocol=tcp", + ConflictsWith: []string{isSecurityGroupRuleProtocolUDP, isSecurityGroupRuleProtocolICMP}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + isSecurityGroupRulePortMin: { + Type: schema.TypeInt, + Optional: true, + ForceNew: false, + Default: 1, + ValidateFunc: InvokeValidator("ibm_is_security_group_rule", isSecurityGroupRulePortMin), + }, + isSecurityGroupRulePortMax: { + Type: schema.TypeInt, + Optional: true, + ForceNew: false, + Default: 65535, + ValidateFunc: InvokeValidator("ibm_is_security_group_rule", isSecurityGroupRulePortMax), + }, + }, + }, + }, + + isSecurityGroupRuleProtocolUDP: { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + ForceNew: true, + MinItems: 1, + Description: "protocol=udp", + ConflictsWith: []string{isSecurityGroupRuleProtocolTCP, isSecurityGroupRuleProtocolICMP}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + isSecurityGroupRulePortMin: { + Type: schema.TypeInt, + Optional: true, + ForceNew: false, + Default: 1, + ValidateFunc: InvokeValidator("ibm_is_security_group_rule", isSecurityGroupRulePortMin), + }, + isSecurityGroupRulePortMax: { + Type: schema.TypeInt, + Optional: true, + ForceNew: false, + Default: 65535, + ValidateFunc: InvokeValidator("ibm_is_security_group_rule", isSecurityGroupRulePortMax), + }, + }, + }, + }, + + RelatedCRN: { + Type: schema.TypeString, + Computed: true, + Description: "The crn of the Security Group", + }, + isSecurityGroupRuleProtocol: { + Type: schema.TypeString, + Computed: true, + Description: "The Security Group Rule Protocol", + }, + }, + } +} + +func resourceIBMISSecurityGroupRuleValidator() *ResourceValidator { + validateSchema := make([]ValidateSchema, 1) + direction := "inbound, outbound" + ip_version := "ipv4, ipv6" + + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isSecurityGroupRuleDirection, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: direction}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isSecurityGroupRuleIPVersion, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: ip_version}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isSecurityGroupRuleType, + ValidateFunctionIdentifier: IntBetween, + Type: TypeInt, + MinValue: "0", + MaxValue: "254"}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isSecurityGroupRuleCode, + ValidateFunctionIdentifier: IntBetween, + Type: TypeInt, + MinValue: "0", + MaxValue: "255"}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isSecurityGroupRulePortMin, + ValidateFunctionIdentifier: IntBetween, + Type: TypeInt, + MinValue: "1", + MaxValue: "65535"}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isSecurityGroupRulePortMax, + ValidateFunctionIdentifier: IntBetween, + Type: TypeInt, + MinValue: "1", + MaxValue: "65535"}) + + ibmISSecurityGroupRuleResourceValidator := ResourceValidator{ResourceName: "ibm_is_security_group_rule", Schema: validateSchema} + return &ibmISSecurityGroupRuleResourceValidator +} + +func resourceIBMISSecurityGroupRuleCreate(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + + if userDetails.generation == 1 { + err := classicSgRuleCreate(d, meta) + if err != nil { + return err + } + } else { + err := sgRuleCreate(d, meta) + if err != nil { + return err + } + } + return err +} + +func classicSgRuleCreate(d *schema.ResourceData, meta interface{}) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + parsed, sgTemplate, _, err := parseIBMISClassicSecurityGroupRuleDictionary(d, "create", sess) + if err != nil { + return err + } + isSecurityGroupRuleKey := "security_group_rule_key_" + parsed.secgrpID + ibmMutexKV.Lock(isSecurityGroupRuleKey) + defer ibmMutexKV.Unlock(isSecurityGroupRuleKey) + + options := &vpcclassicv1.CreateSecurityGroupRuleOptions{ + SecurityGroupID: &parsed.secgrpID, + SecurityGroupRulePrototype: sgTemplate, + } + + rule, response, err := sess.CreateSecurityGroupRule(options) + if err != nil { + return fmt.Errorf("Error while creating Security Group Rule %s\n%s", err, response) + } + switch reflect.TypeOf(rule).String() { + case "*vpcclassicv1.SecurityGroupRuleSecurityGroupRuleProtocolIcmp": + { + sgrule := rule.(*vpcclassicv1.SecurityGroupRuleSecurityGroupRuleProtocolIcmp) + d.Set(isSecurityGroupRuleID, *sgrule.ID) + tfID := makeTerraformRuleID(parsed.secgrpID, *sgrule.ID) + d.SetId(tfID) + } + case "*vpcclassicv1.SecurityGroupRuleSecurityGroupRuleProtocolAll": + { + sgrule := rule.(*vpcclassicv1.SecurityGroupRuleSecurityGroupRuleProtocolAll) + d.Set(isSecurityGroupRuleID, *sgrule.ID) + tfID := makeTerraformRuleID(parsed.secgrpID, *sgrule.ID) + d.SetId(tfID) + } + case "*vpcclassicv1.SecurityGroupRuleSecurityGroupRuleProtocolTcpudp": + { + sgrule := rule.(*vpcclassicv1.SecurityGroupRuleSecurityGroupRuleProtocolTcpudp) + d.Set(isSecurityGroupRuleID, *sgrule.ID) + tfID := makeTerraformRuleID(parsed.secgrpID, *sgrule.ID) + d.SetId(tfID) + } + } + return nil +} + +func sgRuleCreate(d *schema.ResourceData, meta interface{}) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + parsed, sgTemplate, _, err := parseIBMISSecurityGroupRuleDictionary(d, "create", sess) + if err != nil { + return err + } + isSecurityGroupRuleKey := "security_group_rule_key_" + parsed.secgrpID + ibmMutexKV.Lock(isSecurityGroupRuleKey) + defer ibmMutexKV.Unlock(isSecurityGroupRuleKey) + + options := &vpcv1.CreateSecurityGroupRuleOptions{ + SecurityGroupID: &parsed.secgrpID, + SecurityGroupRulePrototype: sgTemplate, + } + + rule, response, err := sess.CreateSecurityGroupRule(options) + if err != nil { + return fmt.Errorf("Error while creating Security Group Rule %s\n%s", err, response) + } + switch reflect.TypeOf(rule).String() { + case "*vpcv1.SecurityGroupRuleSecurityGroupRuleProtocolIcmp": + { + sgrule := rule.(*vpcv1.SecurityGroupRuleSecurityGroupRuleProtocolIcmp) + d.Set(isSecurityGroupRuleID, *sgrule.ID) + tfID := makeTerraformRuleID(parsed.secgrpID, *sgrule.ID) + d.SetId(tfID) + } + case "*vpcv1.SecurityGroupRuleSecurityGroupRuleProtocolAll": + { + sgrule := rule.(*vpcv1.SecurityGroupRuleSecurityGroupRuleProtocolAll) + d.Set(isSecurityGroupRuleID, *sgrule.ID) + tfID := makeTerraformRuleID(parsed.secgrpID, *sgrule.ID) + d.SetId(tfID) + } + case "*vpcv1.SecurityGroupRuleSecurityGroupRuleProtocolTcpudp": + { + sgrule := rule.(*vpcv1.SecurityGroupRuleSecurityGroupRuleProtocolTcpudp) + d.Set(isSecurityGroupRuleID, *sgrule.ID) + tfID := makeTerraformRuleID(parsed.secgrpID, *sgrule.ID) + d.SetId(tfID) + } + } + return nil +} + +func resourceIBMISSecurityGroupRuleRead(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + secgrpID, ruleID, err := parseISTerraformID(d.Id()) + if err != nil { + return err + } + if userDetails.generation == 1 { + err := classicSgRuleGet(d, meta, secgrpID, ruleID) + if err != nil { + return err + } + } else { + err := sgRuleGet(d, meta, secgrpID, ruleID) + if err != nil { + return err + } + } + return nil +} + +func classicSgRuleGet(d *schema.ResourceData, meta interface{}, secgrpID, ruleID string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + getSecurityGroupRuleOptions := &vpcclassicv1.GetSecurityGroupRuleOptions{ + SecurityGroupID: &secgrpID, + ID: &ruleID, + } + sgrule, response, err := sess.GetSecurityGroupRule(getSecurityGroupRuleOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Getting Security Group Rule (%s): %s\n%s", ruleID, err, response) + } + + d.Set(isSecurityGroupID, secgrpID) + getSecurityGroupOptions := &vpcclassicv1.GetSecurityGroupOptions{ + ID: &secgrpID, + } + sg, response, err := sess.GetSecurityGroup(getSecurityGroupOptions) + if err != nil { + return fmt.Errorf("Error Getting Security Group : %s\n%s", err, response) + } + d.Set(RelatedCRN, *sg.CRN) + switch reflect.TypeOf(sgrule).String() { + case "*vpcclassicv1.SecurityGroupRuleSecurityGroupRuleProtocolIcmp": + { + rule := sgrule.(*vpcclassicv1.SecurityGroupRuleSecurityGroupRuleProtocolIcmp) + d.Set(isSecurityGroupRuleID, *rule.ID) + tfID := makeTerraformRuleID(secgrpID, *rule.ID) + d.SetId(tfID) + d.Set(isSecurityGroupRuleIPVersion, *rule.IPVersion) + d.Set(isSecurityGroupRuleProtocol, *rule.Protocol) + icmpProtocol := map[string]interface{}{} + + if rule.Type != nil { + icmpProtocol["type"] = *rule.Type + } + if rule.Code != nil { + icmpProtocol["code"] = *rule.Code + } + protocolList := make([]map[string]interface{}, 0) + protocolList = append(protocolList, icmpProtocol) + d.Set(isSecurityGroupRuleProtocolICMP, protocolList) + remote, ok := rule.Remote.(*vpcclassicv1.SecurityGroupRuleRemote) + if ok { + if remote != nil && reflect.ValueOf(remote).IsNil() == false { + if remote.ID != nil { + d.Set(isSecurityGroupRuleRemote, remote.ID) + } else if remote.Address != nil { + d.Set(isSecurityGroupRuleRemote, remote.Address) + } else if remote.CIDRBlock != nil { + d.Set(isSecurityGroupRuleRemote, remote.CIDRBlock) + } + } + } + } + case "*vpcclassicv1.SecurityGroupRuleSecurityGroupRuleProtocolAll": + { + rule := sgrule.(*vpcclassicv1.SecurityGroupRuleSecurityGroupRuleProtocolAll) + d.Set(isSecurityGroupRuleID, *rule.ID) + tfID := makeTerraformRuleID(secgrpID, *rule.ID) + d.SetId(tfID) + d.Set(isSecurityGroupRuleIPVersion, *rule.IPVersion) + d.Set(isSecurityGroupRuleProtocol, *rule.Protocol) + remote, ok := rule.Remote.(*vpcclassicv1.SecurityGroupRuleRemote) + if ok { + if remote != nil && reflect.ValueOf(remote).IsNil() == false { + if remote.ID != nil { + d.Set(isSecurityGroupRuleRemote, remote.ID) + } else if remote.Address != nil { + d.Set(isSecurityGroupRuleRemote, remote.Address) + } else if remote.CIDRBlock != nil { + d.Set(isSecurityGroupRuleRemote, remote.CIDRBlock) + } + } + } + } + case "*vpcclassicv1.SecurityGroupRuleSecurityGroupRuleProtocolTcpudp": + { + rule := sgrule.(*vpcclassicv1.SecurityGroupRuleSecurityGroupRuleProtocolTcpudp) + d.Set(isSecurityGroupRuleID, *rule.ID) + tfID := makeTerraformRuleID(secgrpID, *rule.ID) + d.SetId(tfID) + d.Set(isSecurityGroupRuleIPVersion, *rule.IPVersion) + d.Set(isSecurityGroupRuleProtocol, *rule.Protocol) + tcpProtocol := map[string]interface{}{} + + if rule.PortMin != nil { + tcpProtocol["port_min"] = *rule.PortMin + } + if rule.PortMax != nil { + tcpProtocol["port_max"] = *rule.PortMax + } + protocolList := make([]map[string]interface{}, 0) + protocolList = append(protocolList, tcpProtocol) + if *rule.Protocol == isSecurityGroupRuleProtocolTCP { + d.Set(isSecurityGroupRuleProtocolTCP, protocolList) + } else { + d.Set(isSecurityGroupRuleProtocolUDP, protocolList) + } + remote, ok := rule.Remote.(*vpcclassicv1.SecurityGroupRuleRemote) + if ok { + if remote != nil && reflect.ValueOf(remote).IsNil() == false { + if remote.ID != nil { + d.Set(isSecurityGroupRuleRemote, remote.ID) + } else if remote.Address != nil { + d.Set(isSecurityGroupRuleRemote, remote.Address) + } else if remote.CIDRBlock != nil { + d.Set(isSecurityGroupRuleRemote, remote.CIDRBlock) + } + } + } + } + } + return nil +} + +func sgRuleGet(d *schema.ResourceData, meta interface{}, secgrpID, ruleID string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + getSecurityGroupRuleOptions := &vpcv1.GetSecurityGroupRuleOptions{ + SecurityGroupID: &secgrpID, + ID: &ruleID, + } + sgrule, response, err := sess.GetSecurityGroupRule(getSecurityGroupRuleOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Getting Security Group Rule (%s): %s\n%s", ruleID, err, response) + } + d.Set(isSecurityGroupID, secgrpID) + getSecurityGroupOptions := &vpcv1.GetSecurityGroupOptions{ + ID: &secgrpID, + } + sg, response, err := sess.GetSecurityGroup(getSecurityGroupOptions) + if err != nil { + return fmt.Errorf("Error Getting Security Group : %s\n%s", err, response) + } + d.Set(RelatedCRN, *sg.CRN) + switch reflect.TypeOf(sgrule).String() { + case "*vpcv1.SecurityGroupRuleSecurityGroupRuleProtocolIcmp": + { + rule := sgrule.(*vpcv1.SecurityGroupRuleSecurityGroupRuleProtocolIcmp) + d.Set(isSecurityGroupRuleID, *rule.ID) + tfID := makeTerraformRuleID(secgrpID, *rule.ID) + d.SetId(tfID) + d.Set(isSecurityGroupRuleIPVersion, *rule.IPVersion) + d.Set(isSecurityGroupRuleProtocol, *rule.Protocol) + icmpProtocol := map[string]interface{}{} + + if rule.Type != nil { + icmpProtocol["type"] = *rule.Type + } + if rule.Code != nil { + icmpProtocol["code"] = *rule.Code + } + protocolList := make([]map[string]interface{}, 0) + protocolList = append(protocolList, icmpProtocol) + d.Set(isSecurityGroupRuleProtocolICMP, protocolList) + remote, ok := rule.Remote.(*vpcv1.SecurityGroupRuleRemote) + if ok { + if remote != nil && reflect.ValueOf(remote).IsNil() == false { + if remote.ID != nil { + d.Set(isSecurityGroupRuleRemote, remote.ID) + } else if remote.Address != nil { + d.Set(isSecurityGroupRuleRemote, remote.Address) + } else if remote.CIDRBlock != nil { + d.Set(isSecurityGroupRuleRemote, remote.CIDRBlock) + } + } + } + } + case "*vpcv1.SecurityGroupRuleSecurityGroupRuleProtocolAll": + { + rule := sgrule.(*vpcv1.SecurityGroupRuleSecurityGroupRuleProtocolAll) + d.Set(isSecurityGroupRuleID, *rule.ID) + tfID := makeTerraformRuleID(secgrpID, *rule.ID) + d.SetId(tfID) + d.Set(isSecurityGroupRuleIPVersion, *rule.IPVersion) + d.Set(isSecurityGroupRuleProtocol, *rule.Protocol) + remote, ok := rule.Remote.(*vpcv1.SecurityGroupRuleRemote) + if ok { + if remote != nil && reflect.ValueOf(remote).IsNil() == false { + if remote.ID != nil { + d.Set(isSecurityGroupRuleRemote, remote.ID) + } else if remote.Address != nil { + d.Set(isSecurityGroupRuleRemote, remote.Address) + } else if remote.CIDRBlock != nil { + d.Set(isSecurityGroupRuleRemote, remote.CIDRBlock) + } + } + } + } + case "*vpcv1.SecurityGroupRuleSecurityGroupRuleProtocolTcpudp": + { + rule := sgrule.(*vpcv1.SecurityGroupRuleSecurityGroupRuleProtocolTcpudp) + d.Set(isSecurityGroupRuleID, *rule.ID) + tfID := makeTerraformRuleID(secgrpID, *rule.ID) + d.SetId(tfID) + d.Set(isSecurityGroupRuleIPVersion, *rule.IPVersion) + d.Set(isSecurityGroupRuleProtocol, *rule.Protocol) + tcpProtocol := map[string]interface{}{} + + if rule.PortMin != nil { + tcpProtocol["port_min"] = *rule.PortMin + } + if rule.PortMax != nil { + tcpProtocol["port_max"] = *rule.PortMax + } + protocolList := make([]map[string]interface{}, 0) + protocolList = append(protocolList, tcpProtocol) + if *rule.Protocol == isSecurityGroupRuleProtocolTCP { + d.Set(isSecurityGroupRuleProtocolTCP, protocolList) + } else { + d.Set(isSecurityGroupRuleProtocolUDP, protocolList) + } + remote, ok := rule.Remote.(*vpcv1.SecurityGroupRuleRemote) + if ok { + if remote != nil && reflect.ValueOf(remote).IsNil() == false { + if remote.ID != nil { + d.Set(isSecurityGroupRuleRemote, remote.ID) + } else if remote.Address != nil { + d.Set(isSecurityGroupRuleRemote, remote.Address) + } else if remote.CIDRBlock != nil { + d.Set(isSecurityGroupRuleRemote, remote.CIDRBlock) + } + } + } + } + } + return nil +} + +func resourceIBMISSecurityGroupRuleUpdate(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + if userDetails.generation == 1 { + err := classicSgRuleUpdate(d, meta) + if err != nil { + return err + } + } else { + err := sgRuleUpdate(d, meta) + if err != nil { + return err + } + } + return resourceIBMISSecurityGroupRuleRead(d, meta) +} + +func classicSgRuleUpdate(d *schema.ResourceData, meta interface{}) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + parsed, _, sgTemplate, err := parseIBMISClassicSecurityGroupRuleDictionary(d, "update", sess) + if err != nil { + return err + } + isSecurityGroupRuleKey := "security_group_rule_key_" + parsed.secgrpID + ibmMutexKV.Lock(isSecurityGroupRuleKey) + defer ibmMutexKV.Unlock(isSecurityGroupRuleKey) + securityGroupRulePatchBody, _ := sgTemplate.AsPatch() + updateSecurityGroupRuleOptions := &vpcclassicv1.UpdateSecurityGroupRuleOptions{ + SecurityGroupID: &parsed.secgrpID, + ID: &parsed.ruleID, + SecurityGroupRulePatch: securityGroupRulePatchBody, + } + _, response, err := sess.UpdateSecurityGroupRule(updateSecurityGroupRuleOptions) + if err != nil { + return fmt.Errorf("Error Updating Security Group Rule : %s\n%s", err, response) + } + return nil +} + +func sgRuleUpdate(d *schema.ResourceData, meta interface{}) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + parsed, _, sgTemplate, err := parseIBMISSecurityGroupRuleDictionary(d, "update", sess) + if err != nil { + return err + } + isSecurityGroupRuleKey := "security_group_rule_key_" + parsed.secgrpID + ibmMutexKV.Lock(isSecurityGroupRuleKey) + defer ibmMutexKV.Unlock(isSecurityGroupRuleKey) + + updateSecurityGroupRuleOptions := sgTemplate + _, response, err := sess.UpdateSecurityGroupRule(updateSecurityGroupRuleOptions) + if err != nil { + return fmt.Errorf("Error Updating Security Group Rule : %s\n%s", err, response) + } + return nil +} + +func resourceIBMISSecurityGroupRuleDelete(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + secgrpID, ruleID, err := parseISTerraformID(d.Id()) + if err != nil { + return err + } + + isSecurityGroupRuleKey := "security_group_rule_key_" + secgrpID + ibmMutexKV.Lock(isSecurityGroupRuleKey) + defer ibmMutexKV.Unlock(isSecurityGroupRuleKey) + + if userDetails.generation == 1 { + err := classicSgRuleDelete(d, meta, secgrpID, ruleID) + if err != nil { + return err + } + } else { + err := sgRuleDelete(d, meta, secgrpID, ruleID) + if err != nil { + return err + } + } + d.SetId("") + return nil +} + +func classicSgRuleDelete(d *schema.ResourceData, meta interface{}, secgrpID, ruleID string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + + getSecurityGroupRuleOptions := &vpcclassicv1.GetSecurityGroupRuleOptions{ + SecurityGroupID: &secgrpID, + ID: &ruleID, + } + _, response, err := sess.GetSecurityGroupRule(getSecurityGroupRuleOptions) + + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Getting Security Group Rule (%s): %s\n%s", ruleID, err, response) + } + + deleteSecurityGroupRuleOptions := &vpcclassicv1.DeleteSecurityGroupRuleOptions{ + SecurityGroupID: &secgrpID, + ID: &ruleID, + } + response, err = sess.DeleteSecurityGroupRule(deleteSecurityGroupRuleOptions) + if err != nil { + return fmt.Errorf("Error Deleting Security Group Rule : %s\n%s", err, response) + } + d.SetId("") + return nil +} + +func sgRuleDelete(d *schema.ResourceData, meta interface{}, secgrpID, ruleID string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + + getSecurityGroupRuleOptions := &vpcv1.GetSecurityGroupRuleOptions{ + SecurityGroupID: &secgrpID, + ID: &ruleID, + } + _, response, err := sess.GetSecurityGroupRule(getSecurityGroupRuleOptions) + + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Getting Security Group Rule (%s): %s\n%s", ruleID, err, response) + } + + deleteSecurityGroupRuleOptions := &vpcv1.DeleteSecurityGroupRuleOptions{ + SecurityGroupID: &secgrpID, + ID: &ruleID, + } + response, err = sess.DeleteSecurityGroupRule(deleteSecurityGroupRuleOptions) + if err != nil { + return fmt.Errorf("Error Deleting Security Group Rule : %s\n%s", err, response) + } + d.SetId("") + return nil +} + +func resourceIBMISSecurityGroupRuleExists(d *schema.ResourceData, meta interface{}) (bool, error) { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return false, err + } + secgrpID, ruleID, err := parseISTerraformID(d.Id()) + if err != nil { + return false, err + } + if userDetails.generation == 1 { + exists, err := classicSgRuleExists(d, meta, secgrpID, ruleID) + return exists, err + } else { + exists, err := sgRuleExists(d, meta, secgrpID, ruleID) + return exists, err + } +} + +func classicSgRuleExists(d *schema.ResourceData, meta interface{}, secgrpID, ruleID string) (bool, error) { + sess, err := classicVpcClient(meta) + if err != nil { + return false, err + } + getSecurityGroupRuleOptions := &vpcclassicv1.GetSecurityGroupRuleOptions{ + SecurityGroupID: &secgrpID, + ID: &ruleID, + } + _, response, err := sess.GetSecurityGroupRule(getSecurityGroupRuleOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error getting Security Group Rule (%s): %s\n%s", ruleID, err, response) + } + return true, nil +} + +func sgRuleExists(d *schema.ResourceData, meta interface{}, secgrpID, ruleID string) (bool, error) { + sess, err := vpcClient(meta) + if err != nil { + return false, err + } + getSecurityGroupRuleOptions := &vpcv1.GetSecurityGroupRuleOptions{ + SecurityGroupID: &secgrpID, + ID: &ruleID, + } + _, response, err := sess.GetSecurityGroupRule(getSecurityGroupRuleOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error getting Security Group Rule (%s): %s\n%s", ruleID, err, response) + } + return true, nil +} + +func parseISTerraformID(s string) (string, string, error) { + segments := strings.Split(s, ".") + if len(segments) != 2 { + return "", "", fmt.Errorf("invalid terraform Id %s (incorrect number of segments)", s) + } + if segments[0] == "" || segments[1] == "" { + return "", "", fmt.Errorf("invalid terraform Id %s (one or more empty segments)", s) + } + return segments[0], segments[1], nil +} + +type parsedIBMISSecurityGroupRuleDictionary struct { + // After parsing, unused string fields are set to + // "" and unused int64 fields will be set to -1. + // This ("" for unused strings and -1 for unused int64s) + // is expected by our riaas API client. + secgrpID string + ruleID string + direction string + ipversion string + remote string + remoteAddress string + remoteCIDR string + remoteSecGrpID string + protocol string + icmpType int64 + icmpCode int64 + portMin int64 + portMax int64 +} + +func inferRemoteSecurityGroup(s string) (address, cidr, id string, err error) { + if isSecurityGroupAddress(s) { + address = s + return + } else if isSecurityGroupCIDR(s) { + cidr = s + return + } else { + id = s + return + } +} + +func parseIBMISClassicSecurityGroupRuleDictionary(d *schema.ResourceData, tag string, sess *vpcclassicv1.VpcClassicV1) (*parsedIBMISSecurityGroupRuleDictionary, *vpcclassicv1.SecurityGroupRulePrototype, *vpcclassicv1.SecurityGroupRulePatch, error) { + parsed := &parsedIBMISSecurityGroupRuleDictionary{} + sgTemplate := &vpcclassicv1.SecurityGroupRulePrototype{} + sgTemplateUpdate := &vpcclassicv1.SecurityGroupRulePatch{} + var err error + parsed.icmpType = -1 + parsed.icmpCode = -1 + parsed.portMin = -1 + parsed.portMax = -1 + + parsed.secgrpID, parsed.ruleID, err = parseISTerraformID(d.Id()) + if err != nil { + parsed.secgrpID = d.Get(isSecurityGroupID).(string) + } + + parsed.direction = d.Get(isSecurityGroupRuleDirection).(string) + sgTemplate.Direction = &parsed.direction + sgTemplateUpdate.Direction = &parsed.direction + + if version, ok := d.GetOk(isSecurityGroupRuleIPVersion); ok { + parsed.ipversion = version.(string) + sgTemplate.IPVersion = &parsed.ipversion + sgTemplateUpdate.IPVersion = &parsed.ipversion + } else { + parsed.ipversion = "IPv4" + sgTemplate.IPVersion = &parsed.ipversion + sgTemplateUpdate.IPVersion = &parsed.ipversion + } + + parsed.remote = "" + if pr, ok := d.GetOk(isSecurityGroupRuleRemote); ok { + parsed.remote = pr.(string) + } + parsed.remoteAddress = "" + parsed.remoteCIDR = "" + parsed.remoteSecGrpID = "" + err = nil + if parsed.remote != "" { + parsed.remoteAddress, parsed.remoteCIDR, parsed.remoteSecGrpID, err = inferRemoteSecurityGroup(parsed.remote) + remoteTemplate := &vpcclassicv1.SecurityGroupRuleRemotePrototype{} + remoteTemplateUpdate := &vpcclassicv1.SecurityGroupRuleRemotePatch{} + if parsed.remoteAddress != "" { + remoteTemplate.Address = &parsed.remoteAddress + remoteTemplateUpdate.Address = &parsed.remoteAddress + } else if parsed.remoteCIDR != "" { + remoteTemplate.CIDRBlock = &parsed.remoteCIDR + remoteTemplateUpdate.CIDRBlock = &parsed.remoteCIDR + } else if parsed.remoteSecGrpID != "" { + remoteTemplate.ID = &parsed.remoteSecGrpID + remoteTemplateUpdate.ID = &parsed.remoteSecGrpID + } + sgTemplate.Remote = remoteTemplate + sgTemplateUpdate.Remote = remoteTemplateUpdate + } + if err != nil { + return nil, nil, nil, err + } + parsed.protocol = "all" + + if icmpInterface, ok := d.GetOk("icmp"); ok { + if icmpInterface.([]interface{})[0] != nil { + haveType := false + icmp := icmpInterface.([]interface{})[0].(map[string]interface{}) + if value, ok := icmp["type"]; ok { + parsed.icmpType = int64(value.(int)) + haveType = true + } + if value, ok := icmp["code"]; ok { + if !haveType { + return nil, nil, nil, fmt.Errorf("icmp code requires icmp type") + } + parsed.icmpCode = int64(value.(int)) + } + } + parsed.protocol = "icmp" + if icmpInterface.([]interface{})[0] == nil { + parsed.icmpType = 0 + parsed.icmpCode = 0 + } else { + sgTemplate.Type = &parsed.icmpType + sgTemplate.Code = &parsed.icmpCode + } + sgTemplate.Protocol = &parsed.protocol + sgTemplateUpdate.Type = &parsed.icmpType + sgTemplateUpdate.Code = &parsed.icmpCode + } + for _, prot := range []string{"tcp", "udp"} { + if tcpInterface, ok := d.GetOk(prot); ok { + if tcpInterface.([]interface{})[0] != nil { + haveMin := false + haveMax := false + ports := tcpInterface.([]interface{})[0].(map[string]interface{}) + if value, ok := ports["port_min"]; ok { + parsed.portMin = int64(value.(int)) + haveMin = true + } + if value, ok := ports["port_max"]; ok { + parsed.portMax = int64(value.(int)) + haveMax = true + } + + // If only min or max is set, ensure that both min and max are set to the same value + if haveMin && !haveMax { + parsed.portMax = parsed.portMin + } + if haveMax && !haveMin { + parsed.portMin = parsed.portMax + } + } + parsed.protocol = prot + sgTemplate.Protocol = &parsed.protocol + if tcpInterface.([]interface{})[0] == nil { + parsed.portMax = 65535 + parsed.portMin = 1 + } + sgTemplate.PortMax = &parsed.portMax + sgTemplate.PortMin = &parsed.portMin + sgTemplateUpdate.PortMax = &parsed.portMax + sgTemplateUpdate.PortMin = &parsed.portMin + } + } + + if parsed.protocol == "all" { + sgTemplate.Protocol = &parsed.protocol + sgTemplateUpdate.Protocol = &parsed.protocol + } + // log.Printf("[DEBUG] parse tag=%s\n\t%v \n\t%v \n\t%v \n\t%v \n\t%v \n\t%v \n\t%v \n\t%v \n\t%v \n\t%v \n\t%v \n\t%v ", + // tag, parsed.secgrpID, parsed.ruleID, parsed.direction, parsed.ipversion, parsed.protocol, parsed.remoteAddress, + // parsed.remoteCIDR, parsed.remoteSecGrpID, parsed.icmpType, parsed.icmpCode, parsed.portMin, parsed.portMax) + return parsed, sgTemplate, sgTemplateUpdate, nil +} + +func parseIBMISSecurityGroupRuleDictionary(d *schema.ResourceData, tag string, sess *vpcv1.VpcV1) (*parsedIBMISSecurityGroupRuleDictionary, *vpcv1.SecurityGroupRulePrototype, *vpcv1.UpdateSecurityGroupRuleOptions, error) { + parsed := &parsedIBMISSecurityGroupRuleDictionary{} + sgTemplate := &vpcv1.SecurityGroupRulePrototype{} + sgTemplateUpdate := &vpcv1.UpdateSecurityGroupRuleOptions{} + var err error + parsed.icmpType = -1 + parsed.icmpCode = -1 + parsed.portMin = -1 + parsed.portMax = -1 + + parsed.secgrpID, parsed.ruleID, err = parseISTerraformID(d.Id()) + if err != nil { + parsed.secgrpID = d.Get(isSecurityGroupID).(string) + } else { + sgTemplateUpdate.SecurityGroupID = &parsed.secgrpID + sgTemplateUpdate.ID = &parsed.ruleID + } + + securityGroupRulePatchModel := &vpcv1.SecurityGroupRulePatch{} + + parsed.direction = d.Get(isSecurityGroupRuleDirection).(string) + sgTemplate.Direction = &parsed.direction + securityGroupRulePatchModel.Direction = &parsed.direction + + if version, ok := d.GetOk(isSecurityGroupRuleIPVersion); ok { + parsed.ipversion = version.(string) + sgTemplate.IPVersion = &parsed.ipversion + securityGroupRulePatchModel.IPVersion = &parsed.ipversion + } else { + parsed.ipversion = "IPv4" + sgTemplate.IPVersion = &parsed.ipversion + securityGroupRulePatchModel.IPVersion = &parsed.ipversion + } + + parsed.remote = "" + if pr, ok := d.GetOk(isSecurityGroupRuleRemote); ok { + parsed.remote = pr.(string) + } + parsed.remoteAddress = "" + parsed.remoteCIDR = "" + parsed.remoteSecGrpID = "" + err = nil + if parsed.remote != "" { + parsed.remoteAddress, parsed.remoteCIDR, parsed.remoteSecGrpID, err = inferRemoteSecurityGroup(parsed.remote) + remoteTemplate := &vpcv1.SecurityGroupRuleRemotePrototype{} + remoteTemplateUpdate := &vpcv1.SecurityGroupRuleRemotePatch{} + if parsed.remoteAddress != "" { + remoteTemplate.Address = &parsed.remoteAddress + remoteTemplateUpdate.Address = &parsed.remoteAddress + } else if parsed.remoteCIDR != "" { + remoteTemplate.CIDRBlock = &parsed.remoteCIDR + remoteTemplateUpdate.CIDRBlock = &parsed.remoteCIDR + } else if parsed.remoteSecGrpID != "" { + remoteTemplate.ID = &parsed.remoteSecGrpID + remoteTemplateUpdate.ID = &parsed.remoteSecGrpID + } + sgTemplate.Remote = remoteTemplate + securityGroupRulePatchModel.Remote = remoteTemplateUpdate + } + if err != nil { + return nil, nil, nil, err + } + parsed.protocol = "all" + + if icmpInterface, ok := d.GetOk("icmp"); ok { + if icmpInterface.([]interface{})[0] != nil { + haveType := false + icmp := icmpInterface.([]interface{})[0].(map[string]interface{}) + if value, ok := icmp["type"]; ok { + parsed.icmpType = int64(value.(int)) + haveType = true + } + if value, ok := icmp["code"]; ok { + if !haveType { + return nil, nil, nil, fmt.Errorf("icmp code requires icmp type") + } + parsed.icmpCode = int64(value.(int)) + } + } + parsed.protocol = "icmp" + if icmpInterface.([]interface{})[0] == nil { + parsed.icmpType = 0 + parsed.icmpCode = 0 + } else { + sgTemplate.Type = &parsed.icmpType + sgTemplate.Code = &parsed.icmpCode + } + sgTemplate.Protocol = &parsed.protocol + securityGroupRulePatchModel.Type = &parsed.icmpType + securityGroupRulePatchModel.Code = &parsed.icmpCode + } + for _, prot := range []string{"tcp", "udp"} { + if tcpInterface, ok := d.GetOk(prot); ok { + if tcpInterface.([]interface{})[0] != nil { + haveMin := false + haveMax := false + ports := tcpInterface.([]interface{})[0].(map[string]interface{}) + if value, ok := ports["port_min"]; ok { + parsed.portMin = int64(value.(int)) + haveMin = true + } + if value, ok := ports["port_max"]; ok { + parsed.portMax = int64(value.(int)) + haveMax = true + } + + // If only min or max is set, ensure that both min and max are set to the same value + if haveMin && !haveMax { + parsed.portMax = parsed.portMin + } + if haveMax && !haveMin { + parsed.portMin = parsed.portMax + } + } + parsed.protocol = prot + sgTemplate.Protocol = &parsed.protocol + if tcpInterface.([]interface{})[0] == nil { + parsed.portMax = 65535 + parsed.portMin = 1 + } + sgTemplate.PortMax = &parsed.portMax + sgTemplate.PortMin = &parsed.portMin + securityGroupRulePatchModel.PortMax = &parsed.portMax + securityGroupRulePatchModel.PortMin = &parsed.portMin + } + } + if parsed.protocol == "all" { + sgTemplate.Protocol = &parsed.protocol + } + securityGroupRulePatch, err := securityGroupRulePatchModel.AsPatch() + if err != nil { + return nil, nil, nil, fmt.Errorf("Error calling asPatch for SecurityGroupRulePatch: %s", err) + } + sgTemplateUpdate.SecurityGroupRulePatch = securityGroupRulePatch + // log.Printf("[DEBUG] parse tag=%s\n\t%v \n\t%v \n\t%v \n\t%v \n\t%v \n\t%v \n\t%v \n\t%v \n\t%v \n\t%v \n\t%v \n\t%v ", + // tag, parsed.secgrpID, parsed.ruleID, parsed.direction, parsed.ipversion, parsed.protocol, parsed.remoteAddress, + // parsed.remoteCIDR, parsed.remoteSecGrpID, parsed.icmpType, parsed.icmpCode, parsed.portMin, parsed.portMax) + return parsed, sgTemplate, sgTemplateUpdate, nil +} + +func makeTerraformRuleID(id1, id2 string) string { + // Include both group and rule id to create a unique Terraform id. As a bonus, + // we can extract the group id as needed for API calls such as READ. + return id1 + "." + id2 +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_security_group_target.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_security_group_target.go new file mode 100644 index 00000000000..61031407427 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_security_group_target.go @@ -0,0 +1,205 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + isSecurityGroupTargetID = "target" + isSecurityGroupResourceType = "resource_type" +) + +func resourceIBMISSecurityGroupTarget() *schema.Resource { + + return &schema.Resource{ + Create: resourceIBMISSecurityGroupTargetCreate, + Read: resourceIBMISSecurityGroupTargetRead, + Delete: resourceIBMISSecurityGroupTargetDelete, + Exists: resourceIBMISSecurityGroupTargetExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + + "security_group": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Security group id", + }, + + isSecurityGroupTargetID: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "security group target identifier", + ValidateFunc: InvokeValidator("ibm_is_security_group_target", isSecurityGroupTargetID), + }, + + "name": { + Type: schema.TypeString, + Computed: true, + Description: "Security group target name", + }, + + isSecurityGroupResourceType: { + Type: schema.TypeString, + Computed: true, + Description: "Resource Type", + }, + }, + } +} + +func resourceIBMISSecurityGroupTargetValidator() *ResourceValidator { + + validateSchema := make([]ValidateSchema, 1) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isSecurityGroupTargetID, + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Required: true, + Regexp: `^[-0-9a-z_]+$`, + MinValueLength: 1, + MaxValueLength: 64}, + ValidateSchema{ + Identifier: "security_group", + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Required: true, + Regexp: `^[-0-9a-z_]+$`, + MinValueLength: 1, + MaxValueLength: 64}) + + ibmISSecurityGroupResourceValidator := ResourceValidator{ResourceName: "ibm_is_security_group_target", Schema: validateSchema} + return &ibmISSecurityGroupResourceValidator +} + +func resourceIBMISSecurityGroupTargetCreate(d *schema.ResourceData, meta interface{}) error { + + sess, err := vpcClient(meta) + if err != nil { + return err + } + + securityGroupID := d.Get("security_group").(string) + targetID := d.Get(isSecurityGroupTargetID).(string) + + createSecurityGroupTargetBindingOptions := &vpcv1.CreateSecurityGroupTargetBindingOptions{} + createSecurityGroupTargetBindingOptions.SecurityGroupID = &securityGroupID + createSecurityGroupTargetBindingOptions.ID = &targetID + + sg, response, err := sess.CreateSecurityGroupTargetBinding(createSecurityGroupTargetBindingOptions) + if err != nil || sg == nil { + return fmt.Errorf("error while creating Security Group Target Binding %s\n%s", err, response) + } + sgtarget := sg.(*vpcv1.SecurityGroupTargetReference) + d.SetId(fmt.Sprintf("%s/%s", securityGroupID, *sgtarget.ID)) + return resourceIBMISSecurityGroupTargetRead(d, meta) +} + +func resourceIBMISSecurityGroupTargetRead(d *schema.ResourceData, meta interface{}) error { + + sess, err := vpcClient(meta) + if err != nil { + return err + } + + parts, err := idParts(d.Id()) + if err != nil { + return err + } + securityGroupID := parts[0] + securityGroupTargetID := parts[1] + + getSecurityGroupTargetOptions := &vpcv1.GetSecurityGroupTargetOptions{ + SecurityGroupID: &securityGroupID, + ID: &securityGroupTargetID, + } + + data, response, err := sess.GetSecurityGroupTarget(getSecurityGroupTargetOptions) + if err != nil || data == nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("error getting Security Group Target : %s\n%s", err, response) + } + + target := data.(*vpcv1.SecurityGroupTargetReference) + d.Set("name", *target.Name) + d.Set(isSecurityGroupResourceType, *target.ResourceType) + + return nil +} + +func resourceIBMISSecurityGroupTargetDelete(d *schema.ResourceData, meta interface{}) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + + parts, err := idParts(d.Id()) + if err != nil { + return err + } + securityGroupID := parts[0] + securityGroupTargetID := parts[1] + + getSecurityGroupTargetOptions := &vpcv1.GetSecurityGroupTargetOptions{ + SecurityGroupID: &securityGroupID, + ID: &securityGroupTargetID, + } + _, response, err := sess.GetSecurityGroupTarget(getSecurityGroupTargetOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("error Getting Security Group Targets (%s): %s\n%s", securityGroupID, err, response) + } + deleteSecurityGroupTargetBindingOptions := sess.NewDeleteSecurityGroupTargetBindingOptions(securityGroupID, securityGroupTargetID) + response, err = sess.DeleteSecurityGroupTargetBinding(deleteSecurityGroupTargetBindingOptions) + if err != nil { + return fmt.Errorf("error Deleting Security Group Targets : %s\n%s", err, response) + } + d.SetId("") + return nil +} + +func resourceIBMISSecurityGroupTargetExists(d *schema.ResourceData, meta interface{}) (bool, error) { + + sess, err := vpcClient(meta) + if err != nil { + return false, err + } + + parts, err := idParts(d.Id()) + if err != nil { + return false, err + } + securityGroupID := parts[0] + securityGroupTargetID := parts[1] + + getSecurityGroupTargetOptions := &vpcv1.GetSecurityGroupTargetOptions{ + SecurityGroupID: &securityGroupID, + ID: &securityGroupTargetID, + } + + _, response, err := sess.GetSecurityGroupTarget(getSecurityGroupTargetOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return false, nil + } + return false, fmt.Errorf("error getting Security Group Target : %s\n%s", err, response) + } + return true, nil + +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_ssh_key.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_ssh_key.go new file mode 100644 index 00000000000..b55778c7d6d --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_ssh_key.go @@ -0,0 +1,580 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "os" + + "github.com/IBM/vpc-go-sdk/vpcclassicv1" + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + isKeyName = "name" + isKeyPublicKey = "public_key" + isKeyType = "type" + isKeyFingerprint = "fingerprint" + isKeyLength = "length" + isKeyTags = "tags" + isKeyResourceGroup = "resource_group" +) + +func resourceIBMISSSHKey() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMISSSHKeyCreate, + Read: resourceIBMISSSHKeyRead, + Update: resourceIBMISSSHKeyUpdate, + Delete: resourceIBMISSSHKeyDelete, + Exists: resourceIBMISSSHKeyExists, + Importer: &schema.ResourceImporter{}, + + CustomizeDiff: customdiff.Sequence( + func(diff *schema.ResourceDiff, v interface{}) error { + return resourceTagsCustomizeDiff(diff) + }, + ), + + Schema: map[string]*schema.Schema{ + isKeyName: { + Type: schema.TypeString, + Required: true, + ForceNew: false, + ValidateFunc: InvokeValidator("ibm_is_security_group", isKeyName), + Description: "SSH Key name", + }, + + isKeyPublicKey: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "SSH Public key data", + }, + + isKeyType: { + Type: schema.TypeString, + Computed: true, + Description: "Key type", + }, + + isKeyFingerprint: { + Type: schema.TypeString, + Computed: true, + Description: "SSH key Fingerprint info", + }, + + isKeyLength: { + Type: schema.TypeInt, + Computed: true, + Description: "SSH key Length", + }, + isKeyTags: { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString, ValidateFunc: InvokeValidator("ibm_is_ssh_key", "tag")}, + Set: resourceIBMVPCHash, + Description: "List of tags for SSH key", + }, + + isKeyResourceGroup: { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Computed: true, + Description: "Resource group ID", + }, + + ResourceControllerURL: { + Type: schema.TypeString, + Computed: true, + Description: "The URL of the IBM Cloud dashboard that can be used to explore and view details about this instance", + }, + + ResourceName: { + Type: schema.TypeString, + Computed: true, + Description: "The name of the resource", + }, + + ResourceCRN: { + Type: schema.TypeString, + Computed: true, + Description: "The crn of the resource", + }, + + ResourceGroupName: { + Type: schema.TypeString, + Computed: true, + Description: "The resource group name in which resource is provisioned", + }, + }, + } +} + +func resourceIBMISSHKeyValidator() *ResourceValidator { + + validateSchema := make([]ValidateSchema, 1) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isKeyName, + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Required: true, + Regexp: `^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$`, + MinValueLength: 1, + MaxValueLength: 63}) + + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: "tag", + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Optional: true, + Regexp: `^[A-Za-z0-9:_ .-]+$`, + MinValueLength: 1, + MaxValueLength: 128}) + + ibmISSSHKeyResourceValidator := ResourceValidator{ResourceName: "ibm_is_ssh_key", Schema: validateSchema} + return &ibmISSSHKeyResourceValidator +} + +func resourceIBMISSSHKeyCreate(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + log.Printf("[DEBUG] Key create") + name := d.Get(isKeyName).(string) + publickey := d.Get(isKeyPublicKey).(string) + + if userDetails.generation == 1 { + err := classicKeyCreate(d, meta, name, publickey) + if err != nil { + return err + } + } else { + err := keyCreate(d, meta, name, publickey) + if err != nil { + return err + } + } + return resourceIBMISSSHKeyRead(d, meta) +} + +func classicKeyCreate(d *schema.ResourceData, meta interface{}, name, publickey string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + options := &vpcclassicv1.CreateKeyOptions{ + PublicKey: &publickey, + Name: &name, + } + + if rgrp, ok := d.GetOk(isKeyResourceGroup); ok { + rg := rgrp.(string) + options.ResourceGroup = &vpcclassicv1.ResourceGroupIdentity{ + ID: &rg, + } + } + + key, response, err := sess.CreateKey(options) + if err != nil { + return fmt.Errorf("[DEBUG] Create SSH Key %s\n%s", err, response) + } + d.SetId(*key.ID) + log.Printf("[INFO] Key : %s", *key.ID) + + v := os.Getenv("IC_ENV_TAGS") + if _, ok := d.GetOk(isKeyTags); ok || v != "" { + oldList, newList := d.GetChange(isKeyTags) + err = UpdateTagsUsingCRN(oldList, newList, meta, *key.CRN) + if err != nil { + log.Printf( + "Error on create of vpc SSH Key (%s) tags: %s", d.Id(), err) + } + } + return nil +} + +func keyCreate(d *schema.ResourceData, meta interface{}, name, publickey string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + options := &vpcv1.CreateKeyOptions{ + PublicKey: &publickey, + Name: &name, + } + + if rgrp, ok := d.GetOk(isKeyResourceGroup); ok { + rg := rgrp.(string) + options.ResourceGroup = &vpcv1.ResourceGroupIdentity{ + ID: &rg, + } + } + + key, response, err := sess.CreateKey(options) + if err != nil { + return fmt.Errorf("[DEBUG] Create SSH Key %s\n%s", err, response) + } + d.SetId(*key.ID) + log.Printf("[INFO] Key : %s", *key.ID) + + v := os.Getenv("IC_ENV_TAGS") + if _, ok := d.GetOk(isKeyTags); ok || v != "" { + oldList, newList := d.GetChange(isKeyTags) + err = UpdateTagsUsingCRN(oldList, newList, meta, *key.CRN) + if err != nil { + log.Printf( + "Error on create of vpc SSH Key (%s) tags: %s", d.Id(), err) + } + } + return nil +} + +func resourceIBMISSSHKeyRead(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + + id := d.Id() + if userDetails.generation == 1 { + err := classicKeyGet(d, meta, id) + if err != nil { + return err + } + } else { + err := keyGet(d, meta, id) + if err != nil { + return err + } + } + return nil +} + +func classicKeyGet(d *schema.ResourceData, meta interface{}, id string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + options := &vpcclassicv1.GetKeyOptions{ + ID: &id, + } + key, response, err := sess.GetKey(options) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Getting SSH Key (%s): %s\n%s", id, err, response) + } + d.Set(isKeyName, *key.Name) + d.Set(isKeyPublicKey, *key.PublicKey) + d.Set(isKeyType, *key.Type) + d.Set(isKeyFingerprint, *key.Fingerprint) + d.Set(isKeyLength, *key.Length) + tags, err := GetTagsUsingCRN(meta, *key.CRN) + if err != nil { + log.Printf( + "Error on get of vpc SSH Key (%s) tags: %s", d.Id(), err) + } + d.Set(isKeyTags, tags) + controller, err := getBaseController(meta) + if err != nil { + return err + } + d.Set(ResourceControllerURL, controller+"/vpc/compute/sshKeys") + d.Set(ResourceName, *key.Name) + d.Set(ResourceCRN, *key.CRN) + if key.ResourceGroup != nil { + d.Set(ResourceGroupName, *key.ResourceGroup.ID) + d.Set(isKeyResourceGroup, *key.ResourceGroup.ID) + } + return nil +} + +func keyGet(d *schema.ResourceData, meta interface{}, id string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + options := &vpcv1.GetKeyOptions{ + ID: &id, + } + key, response, err := sess.GetKey(options) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Getting SSH Key (%s): %s\n%s", id, err, response) + } + d.Set(isKeyName, *key.Name) + d.Set(isKeyPublicKey, *key.PublicKey) + d.Set(isKeyType, *key.Type) + d.Set(isKeyFingerprint, *key.Fingerprint) + d.Set(isKeyLength, *key.Length) + tags, err := GetTagsUsingCRN(meta, *key.CRN) + if err != nil { + log.Printf( + "Error on get of vpc SSH Key (%s) tags: %s", d.Id(), err) + } + d.Set(isKeyTags, tags) + controller, err := getBaseController(meta) + if err != nil { + return err + } + d.Set(ResourceControllerURL, controller+"/vpc-ext/compute/sshKeys") + d.Set(ResourceName, *key.Name) + d.Set(ResourceCRN, *key.CRN) + if key.ResourceGroup != nil { + d.Set(ResourceGroupName, *key.ResourceGroup.Name) + d.Set(isKeyResourceGroup, *key.ResourceGroup.ID) + } + return nil +} + +func resourceIBMISSSHKeyUpdate(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + + id := d.Id() + name := "" + hasChanged := false + + if d.HasChange(isKeyName) { + name = d.Get(isKeyName).(string) + hasChanged = true + } + + if userDetails.generation == 1 { + err := classicKeyUpdate(d, meta, id, name, hasChanged) + if err != nil { + return err + } + } else { + err := keyUpdate(d, meta, id, name, hasChanged) + if err != nil { + return err + } + } + return resourceIBMISSSHKeyRead(d, meta) +} + +func classicKeyUpdate(d *schema.ResourceData, meta interface{}, id, name string, hasChanged bool) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + if d.HasChange(isKeyTags) { + options := &vpcclassicv1.GetKeyOptions{ + ID: &id, + } + key, response, err := sess.GetKey(options) + if err != nil { + return fmt.Errorf("Error getting SSH Key : %s\n%s", err, response) + } + oldList, newList := d.GetChange(isKeyTags) + err = UpdateTagsUsingCRN(oldList, newList, meta, *key.CRN) + if err != nil { + log.Printf( + "Error on update of resource vpc SSH Key (%s) tags: %s", id, err) + } + } + if hasChanged { + options := &vpcclassicv1.UpdateKeyOptions{ + ID: &id, + } + keyPatchModel := &vpcclassicv1.KeyPatch{ + Name: &name, + } + keyPatch, err := keyPatchModel.AsPatch() + if err != nil { + return fmt.Errorf("Error calling asPatch for KeyPatch: %s", err) + } + options.KeyPatch = keyPatch + _, response, err := sess.UpdateKey(options) + if err != nil { + return fmt.Errorf("Error updating vpc SSH Key: %s\n%s", err, response) + } + } + return nil +} + +func keyUpdate(d *schema.ResourceData, meta interface{}, id, name string, hasChanged bool) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + if d.HasChange(isKeyTags) { + options := &vpcv1.GetKeyOptions{ + ID: &id, + } + key, response, err := sess.GetKey(options) + if err != nil { + return fmt.Errorf("Error getting SSH Key : %s\n%s", err, response) + } + oldList, newList := d.GetChange(isKeyTags) + err = UpdateTagsUsingCRN(oldList, newList, meta, *key.CRN) + if err != nil { + log.Printf( + "Error on update of resource vpc SSH Key (%s) tags: %s", id, err) + } + } + if hasChanged { + options := &vpcv1.UpdateKeyOptions{ + ID: &id, + } + keyPatchModel := &vpcv1.KeyPatch{ + Name: &name, + } + keyPatch, err := keyPatchModel.AsPatch() + if err != nil { + return fmt.Errorf("Error calling asPatch for KeyPatch: %s", err) + } + options.KeyPatch = keyPatch + _, response, err := sess.UpdateKey(options) + if err != nil { + return fmt.Errorf("Error updating vpc SSH Key: %s\n%s", err, response) + } + } + return nil +} + +func resourceIBMISSSHKeyDelete(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + id := d.Id() + if userDetails.generation == 1 { + err := classicKeyDelete(d, meta, id) + if err != nil { + return err + } + } else { + err := keyDelete(d, meta, id) + if err != nil { + return err + } + } + return nil +} + +func classicKeyDelete(d *schema.ResourceData, meta interface{}, id string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + + getKeyOptions := &vpcclassicv1.GetKeyOptions{ + ID: &id, + } + _, response, err := sess.GetKey(getKeyOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return nil + } + return fmt.Errorf("Error Getting SSH Key (%s): %s\n%s", id, err, response) + } + + options := &vpcclassicv1.DeleteKeyOptions{ + ID: &id, + } + response, err = sess.DeleteKey(options) + if err != nil { + return fmt.Errorf("Error Deleting SSH Key : %s\n%s", err, response) + } + d.SetId("") + return nil +} + +func keyDelete(d *schema.ResourceData, meta interface{}, id string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + + getKeyOptions := &vpcv1.GetKeyOptions{ + ID: &id, + } + _, response, err := sess.GetKey(getKeyOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return nil + } + return fmt.Errorf("Error Getting SSH Key (%s): %s\n%s", id, err, response) + } + + options := &vpcv1.DeleteKeyOptions{ + ID: &id, + } + response, err = sess.DeleteKey(options) + if err != nil { + return fmt.Errorf("Error Deleting SSH Key : %s\n%s", err, response) + } + d.SetId("") + return nil +} + +func resourceIBMISSSHKeyExists(d *schema.ResourceData, meta interface{}) (bool, error) { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return false, err + } + id := d.Id() + + if userDetails.generation == 1 { + exists, err := classicKeyExists(d, meta, id) + return exists, err + } else { + exists, err := keyExists(d, meta, id) + return exists, err + } +} + +func classicKeyExists(d *schema.ResourceData, meta interface{}, id string) (bool, error) { + sess, err := classicVpcClient(meta) + if err != nil { + return false, err + } + options := &vpcclassicv1.GetKeyOptions{ + ID: &id, + } + _, response, err := sess.GetKey(options) + if err != nil { + if response != nil && response.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error getting SSH Key: %s\n%s", err, response) + } + + return true, nil +} + +func keyExists(d *schema.ResourceData, meta interface{}, id string) (bool, error) { + sess, err := vpcClient(meta) + if err != nil { + return false, err + } + options := &vpcv1.GetKeyOptions{ + ID: &id, + } + _, response, err := sess.GetKey(options) + if err != nil { + if response != nil && response.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error getting SSH Key: %s\n%s", err, response) + } + return true, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_subnet.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_subnet.go new file mode 100644 index 00000000000..f85da491e5a --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_subnet.go @@ -0,0 +1,1041 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "os" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/customdiff" + + "github.com/IBM/vpc-go-sdk/vpcclassicv1" + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + isSubnetIpv4CidrBlock = "ipv4_cidr_block" + isSubnetIpv6CidrBlock = "ipv6_cidr_block" + isSubnetTotalIpv4AddressCount = "total_ipv4_address_count" + isSubnetIPVersion = "ip_version" + isSubnetName = "name" + isSubnetTags = "tags" + isSubnetCRN = "crn" + isSubnetNetworkACL = "network_acl" + isSubnetPublicGateway = "public_gateway" + isSubnetStatus = "status" + isSubnetVPC = "vpc" + isSubnetZone = "zone" + isSubnetAvailableIpv4AddressCount = "available_ipv4_address_count" + isSubnetResourceGroup = "resource_group" + + isSubnetProvisioning = "provisioning" + isSubnetProvisioningDone = "done" + isSubnetDeleting = "deleting" + isSubnetDeleted = "done" + isSubnetRoutingTableID = "routing_table" +) + +func resourceIBMISSubnet() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMISSubnetCreate, + Read: resourceIBMISSubnetRead, + Update: resourceIBMISSubnetUpdate, + Delete: resourceIBMISSubnetDelete, + Exists: resourceIBMISSubnetExists, + Importer: &schema.ResourceImporter{}, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + CustomizeDiff: customdiff.Sequence( + func(diff *schema.ResourceDiff, v interface{}) error { + return resourceTagsCustomizeDiff(diff) + }, + ), + + Schema: map[string]*schema.Schema{ + isSubnetIpv4CidrBlock: { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Computed: true, + ConflictsWith: []string{isSubnetTotalIpv4AddressCount}, + ValidateFunc: InvokeValidator("ibm_is_subnet", isSubnetIpv4CidrBlock), + Description: "IPV4 subnet - CIDR block", + }, + + isSubnetIpv6CidrBlock: { + Type: schema.TypeString, + Computed: true, + Description: "IPV6 subnet - CIDR block", + }, + + isSubnetAvailableIpv4AddressCount: { + Type: schema.TypeInt, + Computed: true, + Description: "The number of IPv4 addresses in this subnet that are not in-use, and have not been reserved by the user or the provider.", + }, + + isSubnetTotalIpv4AddressCount: { + Type: schema.TypeInt, + ForceNew: true, + Optional: true, + Computed: true, + ConflictsWith: []string{isSubnetIpv4CidrBlock}, + Description: "The total number of IPv4 addresses in this subnet.", + }, + isSubnetIPVersion: { + Type: schema.TypeString, + ForceNew: true, + Default: "ipv4", + Optional: true, + ValidateFunc: validateIPVersion, + Description: "The IP version(s) to support for this subnet.", + }, + + isSubnetName: { + Type: schema.TypeString, + Required: true, + ForceNew: false, + ValidateFunc: InvokeValidator("ibm_is_subnet", isSubnetName), + Description: "Subnet name", + }, + + isSubnetTags: { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString, ValidateFunc: InvokeValidator("ibm_is_subnet", "tag")}, + Set: resourceIBMVPCHash, + Description: "List of tags", + }, + + isSubnetCRN: { + Type: schema.TypeString, + Computed: true, + Description: "The crn of the resource", + }, + + isSubnetNetworkACL: { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: false, + Description: "The network ACL for this subnet", + }, + + isSubnetPublicGateway: { + Type: schema.TypeString, + Optional: true, + ForceNew: false, + Description: "Public Gateway of the subnet", + }, + + isSubnetStatus: { + Type: schema.TypeString, + Computed: true, + Description: "The status of the subnet", + }, + + isSubnetVPC: { + Type: schema.TypeString, + ForceNew: true, + Required: true, + Description: "VPC instance ID", + }, + + isSubnetZone: { + Type: schema.TypeString, + ForceNew: true, + Required: true, + Description: "Subnet zone info", + }, + + isSubnetResourceGroup: { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Computed: true, + Description: "The resource group for this subnet", + }, + isSubnetRoutingTableID: { + Type: schema.TypeString, + Optional: true, + ForceNew: false, + Computed: true, + Description: "routing table id that is associated with the subnet", + }, + + ResourceControllerURL: { + Type: schema.TypeString, + Computed: true, + Description: "The URL of the IBM Cloud dashboard that can be used to explore and view details about this instance", + }, + ResourceName: { + Type: schema.TypeString, + Computed: true, + Description: "The name of the resource", + }, + + ResourceCRN: { + Type: schema.TypeString, + Computed: true, + Description: "The crn of the resource", + }, + + ResourceStatus: { + Type: schema.TypeString, + Computed: true, + Description: "The status of the resource", + }, + + ResourceGroupName: { + Type: schema.TypeString, + Computed: true, + Description: "The resource group name in which resource is provisioned", + }, + }, + } +} + +func resourceIBMISSubnetValidator() *ResourceValidator { + + validateSchema := make([]ValidateSchema, 1) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isSubnetName, + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Required: true, + Regexp: `^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$`, + MinValueLength: 1, + MaxValueLength: 63}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isSubnetIpv4CidrBlock, + ValidateFunctionIdentifier: ValidateCIDRAddress, + Type: TypeString, + ForceNew: true, + Optional: true}) + + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: "tag", + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Optional: true, + Regexp: `^[A-Za-z0-9:_ .-]+$`, + MinValueLength: 1, + MaxValueLength: 128}) + + ibmISSubnetResourceValidator := ResourceValidator{ResourceName: "ibm_is_subnet", Schema: validateSchema} + return &ibmISSubnetResourceValidator +} + +func resourceIBMISSubnetCreate(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + + name := d.Get(isSubnetName).(string) + vpc := d.Get(isSubnetVPC).(string) + zone := d.Get(isSubnetZone).(string) + + ipv4cidr := "" + if cidr, ok := d.GetOk(isSubnetIpv4CidrBlock); ok { + ipv4cidr = cidr.(string) + } + ipv4addrcount64 := int64(0) + ipv4addrcount := 0 + if ipv4addrct, ok := d.GetOk(isSubnetTotalIpv4AddressCount); ok { + ipv4addrcount = ipv4addrct.(int) + ipv4addrcount64 = int64(ipv4addrcount) + } + if ipv4cidr == "" && ipv4addrcount == 0 { + return fmt.Errorf("%s or %s need to be provided", isSubnetIpv4CidrBlock, isSubnetTotalIpv4AddressCount) + } + + if ipv4cidr != "" && ipv4addrcount != 0 { + return fmt.Errorf("only one of %s or %s needs to be provided", isSubnetIpv4CidrBlock, isSubnetTotalIpv4AddressCount) + } + isSubnetKey := "subnet_key_" + vpc + "_" + zone + ibmMutexKV.Lock(isSubnetKey) + defer ibmMutexKV.Unlock(isSubnetKey) + + acl := "" + if nwacl, ok := d.GetOk(isSubnetNetworkACL); ok { + acl = nwacl.(string) + } + + gw := "" + if pgw, ok := d.GetOk(isSubnetPublicGateway); ok { + gw = pgw.(string) + } + + // route table association related + rtID := "" + if rt, ok := d.GetOk(isSubnetRoutingTableID); ok { + rtID = rt.(string) + } + if userDetails.generation == 1 { + err := classicSubnetCreate(d, meta, name, vpc, zone, ipv4cidr, acl, gw, ipv4addrcount64) + if err != nil { + return err + } + } else { + err := subnetCreate(d, meta, name, vpc, zone, ipv4cidr, acl, gw, rtID, ipv4addrcount64) + if err != nil { + return err + } + } + + return resourceIBMISSubnetRead(d, meta) +} + +func classicSubnetCreate(d *schema.ResourceData, meta interface{}, name, vpc, zone, ipv4cidr, acl, gw string, ipv4addrcount64 int64) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + subnetTemplate := &vpcclassicv1.SubnetPrototype{ + Name: &name, + VPC: &vpcclassicv1.VPCIdentity{ + ID: &vpc, + }, + Zone: &vpcclassicv1.ZoneIdentity{ + Name: &zone, + }, + } + if ipv4cidr != "" { + subnetTemplate.Ipv4CIDRBlock = &ipv4cidr + } + if ipv4addrcount64 != int64(0) { + subnetTemplate.TotalIpv4AddressCount = &ipv4addrcount64 + } + if gw != "" { + subnetTemplate.PublicGateway = &vpcclassicv1.PublicGatewayIdentity{ + ID: &gw, + } + } + + if acl != "" { + subnetTemplate.NetworkACL = &vpcclassicv1.NetworkACLIdentity{ + ID: &acl, + } + } + //create a subnet + createSubnetOptions := &vpcclassicv1.CreateSubnetOptions{ + SubnetPrototype: subnetTemplate, + } + subnet, response, err := sess.CreateSubnet(createSubnetOptions) + if err != nil { + log.Printf("[DEBUG] Subnet err %s\n%s", err, response) + return fmt.Errorf("Error while creating Subnet %s\n%s", err, response) + } + d.SetId(*subnet.ID) + log.Printf("[INFO] Subnet : %s", *subnet.ID) + _, err = isWaitForClassicSubnetAvailable(sess, d.Id(), d.Timeout(schema.TimeoutCreate)) + if err != nil { + return err + } + v := os.Getenv("IC_ENV_TAGS") + if _, ok := d.GetOk(isSubnetTags); ok || v != "" { + oldList, newList := d.GetChange(isSubnetTags) + err = UpdateTagsUsingCRN(oldList, newList, meta, *subnet.CRN) + if err != nil { + log.Printf( + "Error on create of resource subnet (%s) tags: %s", d.Id(), err) + } + } + + return nil +} + +func subnetCreate(d *schema.ResourceData, meta interface{}, name, vpc, zone, ipv4cidr, acl, gw, rtID string, ipv4addrcount64 int64) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + subnetTemplate := &vpcv1.SubnetPrototype{ + Name: &name, + VPC: &vpcv1.VPCIdentity{ + ID: &vpc, + }, + Zone: &vpcv1.ZoneIdentity{ + Name: &zone, + }, + } + if ipv4cidr != "" { + subnetTemplate.Ipv4CIDRBlock = &ipv4cidr + } + if ipv4addrcount64 != int64(0) { + subnetTemplate.TotalIpv4AddressCount = &ipv4addrcount64 + } + if gw != "" { + subnetTemplate.PublicGateway = &vpcv1.PublicGatewayIdentity{ + ID: &gw, + } + } + + if acl != "" { + subnetTemplate.NetworkACL = &vpcv1.NetworkACLIdentity{ + ID: &acl, + } + } + if rtID != "" { + rt := rtID + subnetTemplate.RoutingTable = &vpcv1.RoutingTableIdentity{ + ID: &rt, + } + } + rg := "" + if grp, ok := d.GetOk(isSubnetResourceGroup); ok { + rg = grp.(string) + subnetTemplate.ResourceGroup = &vpcv1.ResourceGroupIdentity{ + ID: &rg, + } + } + //create a subnet + createSubnetOptions := &vpcv1.CreateSubnetOptions{ + SubnetPrototype: subnetTemplate, + } + subnet, response, err := sess.CreateSubnet(createSubnetOptions) + if err != nil { + log.Printf("[DEBUG] Subnet err %s\n%s", err, response) + return fmt.Errorf("Error while creating Subnet %s\n%s", err, response) + } + d.SetId(*subnet.ID) + log.Printf("[INFO] Subnet : %s", *subnet.ID) + _, err = isWaitForSubnetAvailable(sess, d.Id(), d.Timeout(schema.TimeoutCreate)) + if err != nil { + return err + } + v := os.Getenv("IC_ENV_TAGS") + if _, ok := d.GetOk(isSubnetTags); ok || v != "" { + oldList, newList := d.GetChange(isSubnetTags) + err = UpdateTagsUsingCRN(oldList, newList, meta, *subnet.CRN) + if err != nil { + log.Printf( + "Error on create of resource subnet (%s) tags: %s", d.Id(), err) + } + } + return nil +} + +func isWaitForClassicSubnetAvailable(subnetC *vpcclassicv1.VpcClassicV1, id string, timeout time.Duration) (interface{}, error) { + log.Printf("Waiting for subnet (%s) to be available.", id) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", isSubnetProvisioning}, + Target: []string{isSubnetProvisioningDone, ""}, + Refresh: isClassicSubnetRefreshFunc(subnetC, id), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isClassicSubnetRefreshFunc(subnetC *vpcclassicv1.VpcClassicV1, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + getSubnetOptions := &vpcclassicv1.GetSubnetOptions{ + ID: &id, + } + subnet, response, err := subnetC.GetSubnet(getSubnetOptions) + if err != nil { + return nil, "", fmt.Errorf("Error getting Subnet : %s\n%s", err, response) + } + + if *subnet.Status == "available" || *subnet.Status == "failed" { + return subnet, isSubnetProvisioningDone, nil + } + + return subnet, isSubnetProvisioning, nil + } +} + +func isWaitForSubnetAvailable(subnetC *vpcv1.VpcV1, id string, timeout time.Duration) (interface{}, error) { + log.Printf("Waiting for subnet (%s) to be available.", id) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", isSubnetProvisioning}, + Target: []string{isSubnetProvisioningDone, ""}, + Refresh: isSubnetRefreshFunc(subnetC, id), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isSubnetRefreshFunc(subnetC *vpcv1.VpcV1, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + getSubnetOptions := &vpcv1.GetSubnetOptions{ + ID: &id, + } + subnet, response, err := subnetC.GetSubnet(getSubnetOptions) + if err != nil { + return nil, "", fmt.Errorf("Error getting Subnet : %s\n%s", err, response) + } + + if *subnet.Status == "available" || *subnet.Status == "failed" { + return subnet, isSubnetProvisioningDone, nil + } + + return subnet, isSubnetProvisioning, nil + } +} + +func resourceIBMISSubnetRead(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + id := d.Id() + if userDetails.generation == 1 { + err := classicSubnetGet(d, meta, id) + if err != nil { + return err + } + } else { + err := subnetGet(d, meta, id) + if err != nil { + return err + } + } + return nil +} +func classicSubnetGet(d *schema.ResourceData, meta interface{}, id string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + getSubnetOptions := &vpcclassicv1.GetSubnetOptions{ + ID: &id, + } + subnet, response, err := sess.GetSubnet(getSubnetOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Getting Subnet (%s): %s\n%s", id, err, response) + } + d.Set(isSubnetName, *subnet.Name) + d.Set(isSubnetIpv4CidrBlock, *subnet.Ipv4CIDRBlock) + // d.Set(isSubnetIpv6CidrBlock, *subnet.IPV6CidrBlock) + d.Set(isSubnetAvailableIpv4AddressCount, *subnet.AvailableIpv4AddressCount) + d.Set(isSubnetTotalIpv4AddressCount, *subnet.TotalIpv4AddressCount) + if subnet.NetworkACL != nil { + d.Set(isSubnetNetworkACL, *subnet.NetworkACL.ID) + } + if subnet.PublicGateway != nil { + d.Set(isSubnetPublicGateway, *subnet.PublicGateway.ID) + } else { + d.Set(isSubnetPublicGateway, nil) + } + d.Set(isSubnetStatus, *subnet.Status) + d.Set(isSubnetZone, *subnet.Zone.Name) + d.Set(isSubnetVPC, *subnet.VPC.ID) + + controller, err := getBaseController(meta) + if err != nil { + return err + } + tags, err := GetTagsUsingCRN(meta, *subnet.CRN) + if err != nil { + log.Printf( + "Error on get of resource subnet (%s) tags: %s", d.Id(), err) + } + d.Set(isSubnetTags, tags) + d.Set(isSubnetCRN, *subnet.CRN) + d.Set(ResourceControllerURL, controller+"/vpc/network/subnets") + d.Set(ResourceName, *subnet.Name) + d.Set(ResourceCRN, *subnet.CRN) + d.Set(ResourceStatus, *subnet.Status) + return nil +} + +func subnetGet(d *schema.ResourceData, meta interface{}, id string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + getSubnetOptions := &vpcv1.GetSubnetOptions{ + ID: &id, + } + subnet, response, err := sess.GetSubnet(getSubnetOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Getting Subnet (%s): %s\n%s", id, err, response) + } + d.Set(isSubnetName, *subnet.Name) + d.Set(isSubnetIPVersion, *subnet.IPVersion) + d.Set(isSubnetIpv4CidrBlock, *subnet.Ipv4CIDRBlock) + // d.Set(isSubnetIpv6CidrBlock, *subnet.IPV6CidrBlock) + d.Set(isSubnetAvailableIpv4AddressCount, *subnet.AvailableIpv4AddressCount) + d.Set(isSubnetTotalIpv4AddressCount, *subnet.TotalIpv4AddressCount) + if subnet.NetworkACL != nil { + d.Set(isSubnetNetworkACL, *subnet.NetworkACL.ID) + } + if subnet.PublicGateway != nil { + d.Set(isSubnetPublicGateway, *subnet.PublicGateway.ID) + } else { + d.Set(isSubnetPublicGateway, nil) + } + if subnet.RoutingTable != nil { + d.Set(isSubnetRoutingTableID, *subnet.RoutingTable.ID) + } else { + d.Set(isSubnetRoutingTableID, nil) + } + d.Set(isSubnetStatus, *subnet.Status) + d.Set(isSubnetZone, *subnet.Zone.Name) + d.Set(isSubnetVPC, *subnet.VPC.ID) + + controller, err := getBaseController(meta) + if err != nil { + return err + } + tags, err := GetTagsUsingCRN(meta, *subnet.CRN) + if err != nil { + log.Printf( + "Error on get of resource subnet (%s) tags: %s", d.Id(), err) + } + d.Set(isSubnetTags, tags) + d.Set(isSubnetCRN, *subnet.CRN) + d.Set(ResourceControllerURL, controller+"/vpc-ext/network/subnets") + d.Set(ResourceName, *subnet.Name) + d.Set(ResourceCRN, *subnet.CRN) + d.Set(ResourceStatus, *subnet.Status) + if subnet.ResourceGroup != nil { + d.Set(isSubnetResourceGroup, *subnet.ResourceGroup.ID) + d.Set(ResourceGroupName, *subnet.ResourceGroup.Name) + } + return nil +} + +func resourceIBMISSubnetUpdate(d *schema.ResourceData, meta interface{}) error { + + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + id := d.Id() + if d.HasChange(isSubnetTags) { + oldList, newList := d.GetChange(isSubnetTags) + err = UpdateTagsUsingCRN(oldList, newList, meta, d.Get(isSubnetCRN).(string)) + if err != nil { + log.Printf( + "Error on update of resource subnet (%s) tags: %s", d.Id(), err) + } + } + if userDetails.generation == 1 { + err := classicSubnetUpdate(d, meta, id) + if err != nil { + return err + } + } else { + err := subnetUpdate(d, meta, id) + if err != nil { + return err + } + } + return resourceIBMISSubnetRead(d, meta) +} + +func classicSubnetUpdate(d *schema.ResourceData, meta interface{}, id string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + hasChanged := false + name := "" + acl := "" + updateSubnetOptions := &vpcclassicv1.UpdateSubnetOptions{} + subnetPatchModel := &vpcclassicv1.SubnetPatch{ + Name: &name, + } + if d.HasChange(isSubnetName) { + name = d.Get(isSubnetName).(string) + subnetPatchModel.Name = &name + hasChanged = true + } + if d.HasChange(isSubnetNetworkACL) { + acl = d.Get(isSubnetNetworkACL).(string) + subnetPatchModel.NetworkACL = &vpcclassicv1.NetworkACLIdentity{ + ID: &acl, + } + hasChanged = true + } + if d.HasChange(isSubnetPublicGateway) { + gw := d.Get(isSubnetPublicGateway).(string) + if gw == "" { + unsetSubnetPublicGatewayOptions := &vpcclassicv1.UnsetSubnetPublicGatewayOptions{ + ID: &id, + } + response, err := sess.UnsetSubnetPublicGateway(unsetSubnetPublicGatewayOptions) + if err != nil { + return fmt.Errorf("Error Detaching the public gateway attached to the subnet : %s\n%s", err, response) + } + _, err = isWaitForClassicSubnetAvailable(sess, d.Id(), d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } else { + setSubnetPublicGatewayOptions := &vpcclassicv1.SetSubnetPublicGatewayOptions{ + ID: &id, + PublicGatewayIdentity: &vpcclassicv1.PublicGatewayIdentity{ + ID: &gw, + }, + } + _, response, err := sess.SetSubnetPublicGateway(setSubnetPublicGatewayOptions) + if err != nil { + return fmt.Errorf("Error Attaching public gateway to the subnet : %s\n%s", err, response) + } + _, err = isWaitForClassicSubnetAvailable(sess, d.Id(), d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + } + if hasChanged { + subnetPatch, err := subnetPatchModel.AsPatch() + if err != nil { + return fmt.Errorf("Error calling asPatch for SubnetPatch: %s", err) + } + updateSubnetOptions.SubnetPatch = subnetPatch + updateSubnetOptions.ID = &id + _, response, err := sess.UpdateSubnet(updateSubnetOptions) + if err != nil { + return fmt.Errorf("Error Updating Subnet : %s\n%s", err, response) + } + } + return nil +} + +func subnetUpdate(d *schema.ResourceData, meta interface{}, id string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + hasChanged := false + name := "" + acl := "" + updateSubnetOptions := &vpcv1.UpdateSubnetOptions{} + subnetPatchModel := &vpcv1.SubnetPatch{} + if d.HasChange(isSubnetName) { + name = d.Get(isSubnetName).(string) + subnetPatchModel.Name = &name + hasChanged = true + } + if d.HasChange(isSubnetNetworkACL) { + acl = d.Get(isSubnetNetworkACL).(string) + subnetPatchModel.NetworkACL = &vpcv1.NetworkACLIdentity{ + ID: &acl, + } + hasChanged = true + } + if d.HasChange(isSubnetPublicGateway) { + gw := d.Get(isSubnetPublicGateway).(string) + if gw == "" { + unsetSubnetPublicGatewayOptions := &vpcv1.UnsetSubnetPublicGatewayOptions{ + ID: &id, + } + response, err := sess.UnsetSubnetPublicGateway(unsetSubnetPublicGatewayOptions) + if err != nil { + return fmt.Errorf("Error Detaching the public gateway attached to the subnet : %s\n%s", err, response) + } + _, err = isWaitForSubnetAvailable(sess, d.Id(), d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } else { + setSubnetPublicGatewayOptions := &vpcv1.SetSubnetPublicGatewayOptions{ + ID: &id, + PublicGatewayIdentity: &vpcv1.PublicGatewayIdentity{ + ID: &gw, + }, + } + _, response, err := sess.SetSubnetPublicGateway(setSubnetPublicGatewayOptions) + if err != nil { + return fmt.Errorf("Error Attaching public gateway to the subnet : %s\n%s", err, response) + } + _, err = isWaitForSubnetAvailable(sess, d.Id(), d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + } + if d.HasChange(isSubnetRoutingTableID) { + hasChanged = true + rtID := d.Get(isSubnetRoutingTableID).(string) + // Construct an instance of the RoutingTableIdentityByID model + routingTableIdentityModel := new(vpcv1.RoutingTableIdentityByID) + routingTableIdentityModel.ID = &rtID + subnetPatchModel.RoutingTable = routingTableIdentityModel + /*rt := &vpcv1.RoutingTableIdentity{ + ID: corev3.StringPtr(rtID), + } + setSubnetRoutingTableBindingOptions := sess.NewReplaceSubnetRoutingTableOptions(id, rt) + setSubnetRoutingTableBindingOptions.SetRoutingTableIdentity(rt) + setSubnetRoutingTableBindingOptions.SetID(id) + _, _, err = sess.ReplaceSubnetRoutingTable(setSubnetRoutingTableBindingOptions) + if err != nil { + log.Printf("SetSubnetRoutingTableBinding eroor: %s", err) + return err + }*/ + } + if hasChanged { + subnetPatch, err := subnetPatchModel.AsPatch() + if err != nil { + return fmt.Errorf("Error calling asPatch for SubnetPatch: %s", err) + } + updateSubnetOptions.SubnetPatch = subnetPatch + updateSubnetOptions.ID = &id + _, response, err := sess.UpdateSubnet(updateSubnetOptions) + if err != nil { + return fmt.Errorf("Error Updating Subnet : %s\n%s", err, response) + } + } + return nil +} + +func resourceIBMISSubnetDelete(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + id := d.Id() + if userDetails.generation == 1 { + err := classicSubnetDelete(d, meta, id) + if err != nil { + return err + } + } else { + err := subnetDelete(d, meta, id) + if err != nil { + return err + } + } + + d.SetId("") + return nil +} + +func classicSubnetDelete(d *schema.ResourceData, meta interface{}, id string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + getSubnetOptions := &vpcclassicv1.GetSubnetOptions{ + ID: &id, + } + subnet, response, err := sess.GetSubnet(getSubnetOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return nil + } + return fmt.Errorf("Error Getting Subnet (%s): %s\n%s", id, err, response) + } + if subnet.PublicGateway != nil { + unsetSubnetPublicGatewayOptions := &vpcclassicv1.UnsetSubnetPublicGatewayOptions{ + ID: &id, + } + _, err = sess.UnsetSubnetPublicGateway(unsetSubnetPublicGatewayOptions) + if err != nil { + return err + } + _, err = isWaitForClassicSubnetAvailable(sess, d.Id(), d.Timeout(schema.TimeoutDelete)) + if err != nil { + return err + } + } + deleteSubnetOptions := &vpcclassicv1.DeleteSubnetOptions{ + ID: &id, + } + response, err = sess.DeleteSubnet(deleteSubnetOptions) + if err != nil { + return fmt.Errorf("Error Deleting Subnet : %s\n%s", err, response) + } + _, err = isWaitForClassicSubnetDeleted(sess, d.Id(), d.Timeout(schema.TimeoutDelete)) + if err != nil { + return err + } + d.SetId("") + return nil +} + +func subnetDelete(d *schema.ResourceData, meta interface{}, id string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + getSubnetOptions := &vpcv1.GetSubnetOptions{ + ID: &id, + } + subnet, response, err := sess.GetSubnet(getSubnetOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return nil + } + return fmt.Errorf("Error Getting Subnet (%s): %s\n%s", id, err, response) + } + if subnet.PublicGateway != nil { + unsetSubnetPublicGatewayOptions := &vpcv1.UnsetSubnetPublicGatewayOptions{ + ID: &id, + } + _, err = sess.UnsetSubnetPublicGateway(unsetSubnetPublicGatewayOptions) + if err != nil { + return err + } + _, err = isWaitForSubnetAvailable(sess, d.Id(), d.Timeout(schema.TimeoutDelete)) + if err != nil { + return err + } + } + deleteSubnetOptions := &vpcv1.DeleteSubnetOptions{ + ID: &id, + } + response, err = sess.DeleteSubnet(deleteSubnetOptions) + if err != nil { + return fmt.Errorf("Error Deleting Subnet : %s\n%s", err, response) + } + _, err = isWaitForSubnetDeleted(sess, d.Id(), d.Timeout(schema.TimeoutDelete)) + if err != nil { + return err + } + d.SetId("") + return nil +} + +func isWaitForClassicSubnetDeleted(subnetC *vpcclassicv1.VpcClassicV1, id string, timeout time.Duration) (interface{}, error) { + log.Printf("Waiting for subnet (%s) to be deleted.", id) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", isSubnetDeleting}, + Target: []string{isSubnetDeleted, ""}, + Refresh: isClassicSubnetDeleteRefreshFunc(subnetC, id), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isClassicSubnetDeleteRefreshFunc(subnetC *vpcclassicv1.VpcClassicV1, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + log.Printf("[DEBUG] delete function here") + getSubnetOptions := &vpcclassicv1.GetSubnetOptions{ + ID: &id, + } + subnet, response, err := subnetC.GetSubnet(getSubnetOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return subnet, isSubnetDeleted, nil + } + return subnet, "", fmt.Errorf("The Subnet %s failed to delete: %s\n%s", id, err, response) + } + return subnet, isSubnetDeleting, err + } +} + +func isWaitForSubnetDeleted(subnetC *vpcv1.VpcV1, id string, timeout time.Duration) (interface{}, error) { + log.Printf("Waiting for subnet (%s) to be deleted.", id) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", isSubnetDeleting}, + Target: []string{isSubnetDeleted, ""}, + Refresh: isSubnetDeleteRefreshFunc(subnetC, id), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isSubnetDeleteRefreshFunc(subnetC *vpcv1.VpcV1, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + log.Printf("[DEBUG] delete function here") + getSubnetOptions := &vpcv1.GetSubnetOptions{ + ID: &id, + } + subnet, response, err := subnetC.GetSubnet(getSubnetOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return subnet, isSubnetDeleted, nil + } + if response != nil && strings.Contains(err.Error(), "please detach all network interfaces from subnet before deleting it") { + return subnet, isSubnetDeleting, nil + } + return subnet, "", fmt.Errorf("The Subnet %s failed to delete: %s\n%s", id, err, response) + } + return subnet, isSubnetDeleting, err + } +} + +func resourceIBMISSubnetExists(d *schema.ResourceData, meta interface{}) (bool, error) { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return false, err + } + id := d.Id() + if userDetails.generation == 1 { + exists, err := classicSubnetExists(d, meta, id) + return exists, err + } else { + exists, err := subnetExists(d, meta, id) + return exists, err + } +} + +func classicSubnetExists(d *schema.ResourceData, meta interface{}, id string) (bool, error) { + sess, err := classicVpcClient(meta) + if err != nil { + return false, err + } + getsubnetOptions := &vpcclassicv1.GetSubnetOptions{ + ID: &id, + } + _, response, err := sess.GetSubnet(getsubnetOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error getting Subnet: %s\n%s", err, response) + } + return true, nil +} + +func subnetExists(d *schema.ResourceData, meta interface{}, id string) (bool, error) { + sess, err := vpcClient(meta) + if err != nil { + return false, err + } + getsubnetOptions := &vpcv1.GetSubnetOptions{ + ID: &id, + } + _, response, err := sess.GetSubnet(getsubnetOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error getting Subnet: %s\n%s", err, response) + } + return true, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_subnet_network_acl_attachment.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_subnet_network_acl_attachment.go new file mode 100644 index 00000000000..b2ffc43db0b --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_subnet_network_acl_attachment.go @@ -0,0 +1,438 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + isSubnetID = "subnet" + isNetworkACLID = "network_acl" +) + +func resourceIBMISSubnetNetworkACLAttachment() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMISSubnetNetworkACLAttachmentCreate, + Read: resourceIBMISSubnetNetworkACLAttachmentRead, + Update: resourceIBMISSubnetNetworkACLAttachmentUpdate, + Delete: resourceIBMISSubnetNetworkACLAttachmentDelete, + Exists: resourceIBMISSubnetNetworkACLAttachmentExists, + Importer: &schema.ResourceImporter{}, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + + isSubnetID: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The subnet identifier", + }, + + isNetworkACLID: { + Type: schema.TypeString, + Required: true, + Description: "The unique identifier of network ACL", + }, + + isNetworkACLName: { + Type: schema.TypeString, + Computed: true, + Description: "Network ACL name", + }, + + isNetworkACLVPC: { + Type: schema.TypeString, + Computed: true, + Description: "Network ACL VPC", + }, + + isNetworkACLResourceGroup: { + Type: schema.TypeString, + Computed: true, + Description: "Resource group ID for the network ACL", + }, + + isNetworkACLRules: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + isNetworkACLRuleID: { + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this Network ACL rule", + }, + isNetworkACLRuleName: { + Type: schema.TypeString, + Computed: true, + Description: "The user-defined name for this rule", + }, + isNetworkACLRuleAction: { + Type: schema.TypeString, + Computed: true, + Description: "Whether to allow or deny matching traffic", + }, + isNetworkACLRuleIPVersion: { + Type: schema.TypeString, + Computed: true, + Description: "The IP version for this rule", + }, + isNetworkACLRuleSource: { + Type: schema.TypeString, + Computed: true, + Description: "The source CIDR block", + }, + isNetworkACLRuleDestination: { + Type: schema.TypeString, + Computed: true, + Description: "The destination CIDR block", + }, + isNetworkACLRuleDirection: { + Type: schema.TypeString, + Computed: true, + Description: "Direction of traffic to enforce, either inbound or outbound", + }, + isNetworkACLRuleICMP: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + isNetworkACLRuleICMPCode: { + Type: schema.TypeInt, + Computed: true, + Description: "The ICMP traffic code to allow", + }, + isNetworkACLRuleICMPType: { + Type: schema.TypeInt, + Computed: true, + Description: "The ICMP traffic type to allow", + }, + }, + }, + }, + + isNetworkACLRuleTCP: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + isNetworkACLRulePortMax: { + Type: schema.TypeInt, + Computed: true, + Description: "The inclusive upper bound of TCP destination port range", + }, + isNetworkACLRulePortMin: { + Type: schema.TypeInt, + Computed: true, + Description: "The inclusive lower bound of TCP destination port range", + }, + isNetworkACLRuleSourcePortMax: { + Type: schema.TypeInt, + Computed: true, + Description: "The inclusive upper bound of TCP source port range", + }, + isNetworkACLRuleSourcePortMin: { + Type: schema.TypeInt, + Computed: true, + Description: "The inclusive lower bound of TCP source port range", + }, + }, + }, + }, + + isNetworkACLRuleUDP: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + isNetworkACLRulePortMax: { + Type: schema.TypeInt, + Computed: true, + Description: "The inclusive upper bound of UDP destination port range", + }, + isNetworkACLRulePortMin: { + Type: schema.TypeInt, + Computed: true, + Description: "The inclusive lower bound of UDP destination port range", + }, + isNetworkACLRuleSourcePortMax: { + Type: schema.TypeInt, + Computed: true, + Description: "The inclusive upper bound of UDP source port range", + }, + isNetworkACLRuleSourcePortMin: { + Type: schema.TypeInt, + Computed: true, + Description: "The inclusive lower bound of UDP source port range", + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func resourceIBMISSubnetNetworkACLAttachmentCreate(d *schema.ResourceData, meta interface{}) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + + subnet := d.Get(isSubnetID).(string) + networkACL := d.Get(isNetworkACLID).(string) + + // Construct an instance of the NetworkACLIdentityByID model + networkACLIdentityModel := new(vpcv1.NetworkACLIdentityByID) + networkACLIdentityModel.ID = &networkACL + + // Construct an instance of the ReplaceSubnetNetworkACLOptions model + replaceSubnetNetworkACLOptionsModel := new(vpcv1.ReplaceSubnetNetworkACLOptions) + replaceSubnetNetworkACLOptionsModel.ID = &subnet + replaceSubnetNetworkACLOptionsModel.NetworkACLIdentity = networkACLIdentityModel + resultACL, response, err := sess.ReplaceSubnetNetworkACL(replaceSubnetNetworkACLOptionsModel) + + if err != nil { + log.Printf("[DEBUG] Error while attaching a network ACL to a subnet %s\n%s", err, response) + return fmt.Errorf("Error while attaching a network ACL to a subnet %s\n%s", err, response) + } + d.SetId(subnet) + log.Printf("[INFO] Network ACL : %s", *resultACL.ID) + log.Printf("[INFO] Subnet ID : %s", subnet) + + return resourceIBMISSubnetNetworkACLAttachmentRead(d, meta) +} + +func resourceIBMISSubnetNetworkACLAttachmentRead(d *schema.ResourceData, meta interface{}) error { + id := d.Id() + sess, err := vpcClient(meta) + if err != nil { + return err + } + + getSubnetNetworkACLOptionsModel := &vpcv1.GetSubnetNetworkACLOptions{ + ID: &id, + } + nwacl, response, err := sess.GetSubnetNetworkACL(getSubnetNetworkACLOptionsModel) + + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error getting subnet's (%s) attached network ACL: %s\n%s", id, err, response) + } + d.Set(isNetworkACLName, *nwacl.Name) + d.Set(isNetworkACLVPC, *nwacl.VPC.ID) + if nwacl.ResourceGroup != nil { + d.Set(isNetworkACLResourceGroup, *nwacl.ResourceGroup.ID) + } + + rules := make([]interface{}, 0) + if len(nwacl.Rules) > 0 { + for _, rulex := range nwacl.Rules { + log.Println("[DEBUG] Type of the Rule", reflect.TypeOf(rulex)) + rule := make(map[string]interface{}) + switch reflect.TypeOf(rulex).String() { + case "*vpcv1.NetworkACLRuleItemNetworkACLRuleProtocolIcmp": + { + rulex := rulex.(*vpcv1.NetworkACLRuleItemNetworkACLRuleProtocolIcmp) + rule[isNetworkACLRuleID] = *rulex.ID + rule[isNetworkACLRuleName] = *rulex.Name + rule[isNetworkACLRuleAction] = *rulex.Action + rule[isNetworkACLRuleIPVersion] = *rulex.IPVersion + rule[isNetworkACLRuleSource] = *rulex.Source + rule[isNetworkACLRuleDestination] = *rulex.Destination + rule[isNetworkACLRuleDirection] = *rulex.Direction + rule[isNetworkACLRuleTCP] = make([]map[string]int, 0, 0) + rule[isNetworkACLRuleUDP] = make([]map[string]int, 0, 0) + icmp := make([]map[string]int, 1, 1) + if rulex.Code != nil && rulex.Type != nil { + icmp[0] = map[string]int{ + isNetworkACLRuleICMPCode: int(*rulex.Code), + isNetworkACLRuleICMPType: int(*rulex.Code), + } + } + rule[isNetworkACLRuleICMP] = icmp + } + case "*vpcv1.NetworkACLRuleItemNetworkACLRuleProtocolTcpudp": + { + rulex := rulex.(*vpcv1.NetworkACLRuleItemNetworkACLRuleProtocolTcpudp) + rule[isNetworkACLRuleID] = *rulex.ID + rule[isNetworkACLRuleName] = *rulex.Name + rule[isNetworkACLRuleAction] = *rulex.Action + rule[isNetworkACLRuleIPVersion] = *rulex.IPVersion + rule[isNetworkACLRuleSource] = *rulex.Source + rule[isNetworkACLRuleDestination] = *rulex.Destination + rule[isNetworkACLRuleDirection] = *rulex.Direction + if *rulex.Protocol == "tcp" { + rule[isNetworkACLRuleICMP] = make([]map[string]int, 0, 0) + rule[isNetworkACLRuleUDP] = make([]map[string]int, 0, 0) + tcp := make([]map[string]int, 1, 1) + tcp[0] = map[string]int{ + isNetworkACLRuleSourcePortMax: checkNetworkACLNil(rulex.SourcePortMax), + isNetworkACLRuleSourcePortMin: checkNetworkACLNil(rulex.SourcePortMin), + } + tcp[0][isNetworkACLRulePortMax] = checkNetworkACLNil(rulex.DestinationPortMax) + tcp[0][isNetworkACLRulePortMin] = checkNetworkACLNil(rulex.DestinationPortMin) + rule[isNetworkACLRuleTCP] = tcp + } else if *rulex.Protocol == "udp" { + rule[isNetworkACLRuleICMP] = make([]map[string]int, 0, 0) + rule[isNetworkACLRuleTCP] = make([]map[string]int, 0, 0) + udp := make([]map[string]int, 1, 1) + udp[0] = map[string]int{ + isNetworkACLRuleSourcePortMax: checkNetworkACLNil(rulex.SourcePortMax), + isNetworkACLRuleSourcePortMin: checkNetworkACLNil(rulex.SourcePortMin), + } + udp[0][isNetworkACLRulePortMax] = checkNetworkACLNil(rulex.DestinationPortMax) + udp[0][isNetworkACLRulePortMin] = checkNetworkACLNil(rulex.DestinationPortMin) + rule[isNetworkACLRuleUDP] = udp + } + } + case "*vpcv1.NetworkACLRuleItemNetworkACLRuleProtocolAll": + { + rulex := rulex.(*vpcv1.NetworkACLRuleItemNetworkACLRuleProtocolAll) + rule[isNetworkACLRuleID] = *rulex.ID + rule[isNetworkACLRuleName] = *rulex.Name + rule[isNetworkACLRuleAction] = *rulex.Action + rule[isNetworkACLRuleIPVersion] = *rulex.IPVersion + rule[isNetworkACLRuleSource] = *rulex.Source + rule[isNetworkACLRuleDestination] = *rulex.Destination + rule[isNetworkACLRuleDirection] = *rulex.Direction + rule[isNetworkACLRuleICMP] = make([]map[string]int, 0, 0) + rule[isNetworkACLRuleTCP] = make([]map[string]int, 0, 0) + rule[isNetworkACLRuleUDP] = make([]map[string]int, 0, 0) + } + } + rules = append(rules, rule) + } + } + d.Set(isNetworkACLRules, rules) + return nil +} + +func resourceIBMISSubnetNetworkACLAttachmentUpdate(d *schema.ResourceData, meta interface{}) error { + + sess, err := vpcClient(meta) + if err != nil { + return err + } + if d.HasChange(isNetworkACLID) { + subnet := d.Get(isSubnetID).(string) + networkACL := d.Get(isNetworkACLID).(string) + + // Construct an instance of the NetworkACLIdentityByID model + networkACLIdentityModel := new(vpcv1.NetworkACLIdentityByID) + networkACLIdentityModel.ID = &networkACL + + // Construct an instance of the ReplaceSubnetNetworkACLOptions model + replaceSubnetNetworkACLOptionsModel := new(vpcv1.ReplaceSubnetNetworkACLOptions) + replaceSubnetNetworkACLOptionsModel.ID = &subnet + replaceSubnetNetworkACLOptionsModel.NetworkACLIdentity = networkACLIdentityModel + resultACL, response, err := sess.ReplaceSubnetNetworkACL(replaceSubnetNetworkACLOptionsModel) + + if err != nil { + log.Printf("[DEBUG] Error while attaching a network ACL to a subnet %s\n%s", err, response) + return fmt.Errorf("Error while attaching a network ACL to a subnet %s\n%s", err, response) + } + log.Printf("[INFO] Updated subnet %s with Network ACL : %s", subnet, *resultACL.ID) + + d.SetId(subnet) + return resourceIBMISSubnetNetworkACLAttachmentRead(d, meta) + } + + return resourceIBMISSubnetNetworkACLAttachmentRead(d, meta) +} + +func resourceIBMISSubnetNetworkACLAttachmentDelete(d *schema.ResourceData, meta interface{}) error { + id := d.Id() + sess, err := vpcClient(meta) + if err != nil { + return err + } + // Set the subnet with VPC default network ACL + getSubnetOptions := &vpcv1.GetSubnetOptions{ + ID: &id, + } + subnet, response, err := sess.GetSubnet(getSubnetOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Getting Subnet (%s): %s\n%s", id, err, response) + } + // Fetch VPC + vpcID := *subnet.VPC.ID + + getvpcOptions := &vpcv1.GetVPCOptions{ + ID: &vpcID, + } + vpc, response, err := sess.GetVPC(getvpcOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error getting VPC : %s\n%s", err, response) + } + + // Fetch default network ACL + if vpc.DefaultNetworkACL != nil { + log.Printf("[DEBUG] vpc default network acl is not null :%s", *vpc.DefaultNetworkACL.ID) + // Construct an instance of the NetworkACLIdentityByID model + networkACLIdentityModel := new(vpcv1.NetworkACLIdentityByID) + networkACLIdentityModel.ID = vpc.DefaultNetworkACL.ID + + // Construct an instance of the ReplaceSubnetNetworkACLOptions model + replaceSubnetNetworkACLOptionsModel := new(vpcv1.ReplaceSubnetNetworkACLOptions) + replaceSubnetNetworkACLOptionsModel.ID = &id + replaceSubnetNetworkACLOptionsModel.NetworkACLIdentity = networkACLIdentityModel + resultACL, response, err := sess.ReplaceSubnetNetworkACL(replaceSubnetNetworkACLOptionsModel) + + if err != nil { + log.Printf("[DEBUG] Error while attaching a network ACL to a subnet %s\n%s", err, response) + return fmt.Errorf("Error while attaching a network ACL to a subnet %s\n%s", err, response) + } + log.Printf("[INFO] Updated subnet %s with VPC default Network ACL : %s", id, *resultACL.ID) + } else { + log.Printf("[DEBUG] vpc default network acl is null") + } + + d.SetId("") + return nil +} + +func resourceIBMISSubnetNetworkACLAttachmentExists(d *schema.ResourceData, meta interface{}) (bool, error) { + id := d.Id() + sess, err := vpcClient(meta) + if err != nil { + return false, err + } + getSubnetNetworkACLOptionsModel := &vpcv1.GetSubnetNetworkACLOptions{ + ID: &id, + } + _, response, err := sess.GetSubnetNetworkACL(getSubnetNetworkACLOptionsModel) + if err != nil { + if response != nil && response.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error getting subnet's attached network ACL: %s\n%s", err, response) + } + return true, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_subnet_reserved_ip.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_subnet_reserved_ip.go new file mode 100644 index 00000000000..0525755eecf --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_subnet_reserved_ip.go @@ -0,0 +1,303 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "time" + + "github.com/IBM/go-sdk-core/v4/core" + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + isReservedIPProvisioning = "provisioning" + isReservedIPProvisioningDone = "done" + isReservedIP = "reserved_ip" + isReservedIPTarget = "target" +) + +func resourceIBMISReservedIP() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMISReservedIPCreate, + Read: resourceIBMISReservedIPRead, + Update: resourceIBMISReservedIPUpdate, + Delete: resourceIBMISReservedIPDelete, + Exists: resourceIBMISReservedIPExists, + Importer: &schema.ResourceImporter{}, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + Schema: map[string]*schema.Schema{ + /* + Request Parameters + ================== + These are mandatory req parameters + */ + isSubNetID: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The subnet identifier.", + }, + isReservedIPAutoDelete: { + Type: schema.TypeBool, + Default: nil, + Computed: true, + Optional: true, + Description: "If set to true, this reserved IP will be automatically deleted", + }, + isReservedIPName: { + Type: schema.TypeString, + Computed: true, + Optional: true, + ValidateFunc: InvokeValidator("ibm_is_subnet_reserved_ip", isReservedIPName), + Description: "The user-defined or system-provided name for this reserved IP.", + }, + isReservedIPTarget: { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: "The unique identifier for target.", + }, + /* + Response Parameters + =================== + All of these are computed and an user doesn't need to provide + these from outside. + */ + + isReservedIPAddress: { + Type: schema.TypeString, + Computed: true, + Description: "The user-defined or system-provided name for this reserved IP.", + }, + isReservedIP: { + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier of the reserved IP.", + }, + isReservedIPCreatedAt: { + Type: schema.TypeString, + Computed: true, + Description: "The date and time that the reserved IP was created.", + }, + isReservedIPhref: { + Type: schema.TypeString, + Computed: true, + Description: "The URL for this reserved IP.", + }, + isReservedIPOwner: { + Type: schema.TypeString, + Computed: true, + Description: "The owner of a reserved IP, defining whether it is managed by the user or the provider.", + }, + isReservedIPType: { + Type: schema.TypeString, + Computed: true, + Description: "The resource type.", + }, + }, + } +} +func resourceIBMISSubnetReservedIPValidator() *ResourceValidator { + + validateSchema := make([]ValidateSchema, 1) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isReservedIPName, + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Optional: true, + Regexp: `^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$`, + MinValueLength: 1, + MaxValueLength: 63}) + + ibmISSubnetReservedIPCResourceValidator := ResourceValidator{ResourceName: "ibm_is_subnet_reserved_ip", Schema: validateSchema} + return &ibmISSubnetReservedIPCResourceValidator +} + +// resourceIBMISReservedIPCreate Creates a reserved IP given a subnet ID +func resourceIBMISReservedIPCreate(d *schema.ResourceData, meta interface{}) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + + subnetID := d.Get(isSubNetID).(string) + options := sess.NewCreateSubnetReservedIPOptions(subnetID) + + nameStr := "" + if name, ok := d.GetOk(isReservedIPName); ok { + nameStr = name.(string) + } + if nameStr != "" { + options.Name = &nameStr + } + + autoDeleteBool := d.Get(isReservedIPAutoDelete).(bool) + options.AutoDelete = &autoDeleteBool + if t, ok := d.GetOk(isReservedIPTarget); ok { + targetId := t.(string) + options.Target = &vpcv1.ReservedIPTargetPrototype{ + ID: &targetId, + } + } + rip, response, err := sess.CreateSubnetReservedIP(options) + if err != nil || response == nil || rip == nil { + return fmt.Errorf("Error creating the reserved IP: %s\n%s", err, response) + } + + // Set id for the reserved IP as combination of subnet ID and reserved IP ID + d.SetId(fmt.Sprintf("%s/%s", subnetID, *rip.ID)) + + return resourceIBMISReservedIPRead(d, meta) +} + +func resourceIBMISReservedIPRead(d *schema.ResourceData, meta interface{}) error { + rip, err := get(d, meta) + if err != nil { + return err + } + + allIDs, err := idParts(d.Id()) + if err != nil { + return fmt.Errorf("The ID can not be split into subnet ID and reserved IP ID. %s", err) + } + subnetID := allIDs[0] + + if rip != nil { + d.Set(isReservedIPAddress, *rip.Address) + d.Set(isReservedIP, *rip.ID) + d.Set(isSubNetID, subnetID) + d.Set(isReservedIPAutoDelete, *rip.AutoDelete) + d.Set(isReservedIPCreatedAt, (*rip.CreatedAt).String()) + d.Set(isReservedIPhref, *rip.Href) + d.Set(isReservedIPName, *rip.Name) + d.Set(isReservedIPOwner, *rip.Owner) + d.Set(isReservedIPType, *rip.ResourceType) + if rip.Target != nil { + target, ok := rip.Target.(*vpcv1.ReservedIPTarget) + if ok { + d.Set(isReservedIPTarget, target.ID) + } + } + } + return nil +} + +func resourceIBMISReservedIPUpdate(d *schema.ResourceData, meta interface{}) error { + + // For updating the name + nameChanged := d.HasChange(isReservedIPName) + autoDeleteChanged := d.HasChange(isReservedIPAutoDelete) + + if nameChanged || autoDeleteChanged { + sess, err := vpcClient(meta) + if err != nil { + return err + } + + allIDs, err := idParts(d.Id()) + if err != nil { + return err + } + subnetID := allIDs[0] + reservedIPID := allIDs[1] + + options := &vpcv1.UpdateSubnetReservedIPOptions{ + SubnetID: &subnetID, + ID: &reservedIPID, + } + + patch := new(vpcv1.ReservedIPPatch) + + if nameChanged { + name := d.Get(isReservedIPName).(string) + patch.Name = core.StringPtr(name) + } + + if autoDeleteChanged { + autoDelete := d.Get(isReservedIPAutoDelete).(bool) + patch.AutoDelete = core.BoolPtr(autoDelete) + } + + reservedIPPatch, err := patch.AsPatch() + if err != nil { + return fmt.Errorf("Error updating the reserved IP %s", err) + } + + options.ReservedIPPatch = reservedIPPatch + + _, response, err := sess.UpdateSubnetReservedIP(options) + if err != nil { + return fmt.Errorf("Error updating the reserved IP %s\n%s", err, response) + } + } + return resourceIBMISReservedIPRead(d, meta) +} + +func resourceIBMISReservedIPDelete(d *schema.ResourceData, meta interface{}) error { + + rip, err := get(d, meta) + if err != nil { + return err + } + if err == nil && rip == nil { + // If there is no such reserved IP, it can not be deleted + return nil + } + + sess, err := vpcClient(meta) + if err != nil { + return err + } + allIDs, err := idParts(d.Id()) + if err != nil { + return err + } + subnetID := allIDs[0] + reservedIPID := allIDs[1] + deleteOptions := sess.NewDeleteSubnetReservedIPOptions(subnetID, reservedIPID) + response, err := sess.DeleteSubnetReservedIP(deleteOptions) + if err != nil || response == nil { + return fmt.Errorf("Error deleting the reserverd ip %s in subnet %s, %s\n%s", reservedIPID, subnetID, err, response) + } + d.SetId("") + return nil +} + +func resourceIBMISReservedIPExists(d *schema.ResourceData, meta interface{}) (bool, error) { + rip, err := get(d, meta) + if err != nil { + return false, err + } + if err == nil && rip == nil { + return false, nil + } + return true, nil +} + +// get is a generic function that gets the reserved ip given subnet id and reserved ip +func get(d *schema.ResourceData, meta interface{}) (*vpcv1.ReservedIP, error) { + sess, err := vpcClient(meta) + if err != nil { + return nil, err + } + allIDs, err := idParts(d.Id()) + subnetID := allIDs[0] + reservedIPID := allIDs[1] + options := sess.NewGetSubnetReservedIPOptions(subnetID, reservedIPID) + rip, response, err := sess.GetSubnetReservedIP(options) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil, nil + } + return nil, fmt.Errorf("Error Getting Reserved IP : %s\n%s", err, response) + } + return rip, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_virtual_endpoint_gateway.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_virtual_endpoint_gateway.go new file mode 100644 index 00000000000..5ca911d544e --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_virtual_endpoint_gateway.go @@ -0,0 +1,424 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "os" + "time" + + "github.com/IBM/go-sdk-core/core" + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + isVirtualEndpointGatewayName = "name" + isVirtualEndpointGatewayResourceType = "resource_type" + isVirtualEndpointGatewayResourceGroupID = "resource_group" + isVirtualEndpointGatewayCreatedAt = "created_at" + isVirtualEndpointGatewayIPs = "ips" + isVirtualEndpointGatewayIPsID = "id" + isVirtualEndpointGatewayIPsAddress = "address" + isVirtualEndpointGatewayIPsName = "name" + isVirtualEndpointGatewayIPsSubnet = "subnet" + isVirtualEndpointGatewayIPsResourceType = "resource_type" + isVirtualEndpointGatewayHealthState = "health_state" + isVirtualEndpointGatewayLifecycleState = "lifecycle_state" + isVirtualEndpointGatewayTarget = "target" + isVirtualEndpointGatewayTargetName = "name" + isVirtualEndpointGatewayTargetCRN = "crn" + isVirtualEndpointGatewayTargetResourceType = "resource_type" + isVirtualEndpointGatewayVpcID = "vpc" + isVirtualEndpointGatewayTags = "tags" +) + +func resourceIBMISEndpointGateway() *schema.Resource { + targetNameFmt := fmt.Sprintf("%s.0.%s", isVirtualEndpointGatewayTarget, isVirtualEndpointGatewayTargetName) + targetCRNFmt := fmt.Sprintf("%s.0.%s", isVirtualEndpointGatewayTarget, isVirtualEndpointGatewayTargetCRN) + return &schema.Resource{ + Create: resourceIBMisVirtualEndpointGatewayCreate, + Read: resourceIBMisVirtualEndpointGatewayRead, + Update: resourceIBMisVirtualEndpointGatewayUpdate, + Delete: resourceIBMisVirtualEndpointGatewayDelete, + Exists: resourceIBMisVirtualEndpointGatewayExists, + Importer: &schema.ResourceImporter{}, + + CustomizeDiff: customdiff.Sequence( + func(diff *schema.ResourceDiff, v interface{}) error { + return resourceTagsCustomizeDiff(diff) + }, + ), + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(30 * time.Minute), + Delete: schema.DefaultTimeout(30 * time.Minute), + Update: schema.DefaultTimeout(30 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + isVirtualEndpointGatewayName: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateISName, + Description: "Endpoint gateway name", + }, + isVirtualEndpointGatewayResourceType: { + Type: schema.TypeString, + Computed: true, + Description: "Endpoint gateway resource type", + }, + isVirtualEndpointGatewayResourceGroupID: { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + Description: "The resource group id", + }, + isVirtualEndpointGatewayCreatedAt: { + Type: schema.TypeString, + Computed: true, + Description: "Endpoint gateway created date and time", + }, + isVirtualEndpointGatewayHealthState: { + Type: schema.TypeString, + Computed: true, + Description: "Endpoint gateway health state", + }, + isVirtualEndpointGatewayLifecycleState: { + Type: schema.TypeString, + Computed: true, + Description: "Endpoint gateway lifecycle state", + }, + isVirtualEndpointGatewayIPs: { + Type: schema.TypeList, + Optional: true, + Computed: true, + Description: "Endpoint gateway resource group", + DiffSuppressFunc: applyOnce, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + isVirtualEndpointGatewayIPsID: { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "The IPs id", + }, + isVirtualEndpointGatewayIPsName: { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "The IPs name", + }, + isVirtualEndpointGatewayIPsSubnet: { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "The Subnet id", + }, + isVirtualEndpointGatewayIPsResourceType: { + Type: schema.TypeString, + Computed: true, + Description: "The VPC Resource Type", + }, + }, + }, + }, + isVirtualEndpointGatewayTarget: { + Type: schema.TypeList, + Required: true, + MinItems: 1, + MaxItems: 1, + Description: "Endpoint gateway target", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + isVirtualEndpointGatewayTargetName: { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + AtLeastOneOf: []string{ + targetNameFmt, + targetCRNFmt, + }, + Description: "The target name", + }, + isVirtualEndpointGatewayTargetCRN: { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + AtLeastOneOf: []string{ + targetNameFmt, + targetCRNFmt, + }, + Description: "The target crn", + }, + isVirtualEndpointGatewayTargetResourceType: { + Type: schema.TypeString, + Required: true, + Description: "The target resource type", + }, + }, + }, + }, + isVirtualEndpointGatewayVpcID: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The VPC id", + }, + isVirtualEndpointGatewayTags: { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString, ValidateFunc: InvokeValidator("ibm_is_virtual_endpoint_gateway", "tag")}, + Set: resourceIBMVPCHash, + Description: "List of tags for VPE", + }, + }, + } +} + +func resourceIBMISEndpointGatewayValidator() *ResourceValidator { + validateSchema := make([]ValidateSchema, 1) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: "tag", + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Optional: true, + Regexp: `^[A-Za-z0-9:_ .-]+$`, + MinValueLength: 1, + MaxValueLength: 128}) + + ibmEndpointGatewayResourceValidator := ResourceValidator{ResourceName: "ibm_is_virtual_endpoint_gateway", Schema: validateSchema} + return &ibmEndpointGatewayResourceValidator +} + +func resourceIBMisVirtualEndpointGatewayCreate(d *schema.ResourceData, meta interface{}) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + + name := d.Get(isVirtualEndpointGatewayName).(string) + + // target opiton + targetOpt := &vpcv1.EndpointGatewayTargetPrototype{} + targetNameFmt := fmt.Sprintf("%s.0.%s", isVirtualEndpointGatewayTarget, isVirtualEndpointGatewayTargetName) + targetCRNFmt := fmt.Sprintf("%s.0.%s", isVirtualEndpointGatewayTarget, isVirtualEndpointGatewayTargetCRN) + targetResourceTypeFmt := fmt.Sprintf("%s.0.%s", isVirtualEndpointGatewayTarget, isVirtualEndpointGatewayTargetResourceType) + targetOpt.ResourceType = core.StringPtr(d.Get(targetResourceTypeFmt).(string)) + if v, ok := d.GetOk(targetNameFmt); ok { + targetOpt.Name = core.StringPtr(v.(string)) + } + if v, ok := d.GetOk(targetCRNFmt); ok { + targetOpt.CRN = core.StringPtr(v.(string)) + } + + // vpc option + vpcID := d.Get(isVirtualEndpointGatewayVpcID).(string) + vpcOpt := &vpcv1.VPCIdentity{ + ID: core.StringPtr(vpcID), + } + + // update option + opt := sess.NewCreateEndpointGatewayOptions(targetOpt, vpcOpt) + opt.SetName(name) + opt.SetTarget(targetOpt) + opt.SetVPC(vpcOpt) + + // IPs option + if ips, ok := d.GetOk(isVirtualEndpointGatewayIPs); ok { + opt.SetIps(expandIPs(ips.([]interface{}))) + } + + // Resource group option + if resourceGroup, ok := d.GetOk(isVirtualEndpointGatewayResourceGroupID); ok { + resourceGroupID := resourceGroup.(string) + + resourceGroupOpt := &vpcv1.ResourceGroupIdentity{ + ID: core.StringPtr(resourceGroupID), + } + opt.SetResourceGroup(resourceGroupOpt) + + } + result, response, err := sess.CreateEndpointGateway(opt) + if err != nil { + log.Printf("Create Endpoint Gateway failed: %v", response) + return err + } + + d.SetId(*result.ID) + v := os.Getenv("IC_ENV_TAGS") + if _, ok := d.GetOk(isVirtualEndpointGatewayTags); ok || v != "" { + oldList, newList := d.GetChange(isVirtualEndpointGatewayTags) + err = UpdateTagsUsingCRN(oldList, newList, meta, *result.CRN) + if err != nil { + log.Printf( + "Error on create of VPE (%s) tags: %s", d.Id(), err) + } + } + return resourceIBMisVirtualEndpointGatewayRead(d, meta) +} + +func resourceIBMisVirtualEndpointGatewayUpdate(d *schema.ResourceData, meta interface{}) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + + if d.HasChange(isVirtualEndpointGatewayName) { + name := d.Get(isVirtualEndpointGatewayName).(string) + + // create option + endpointGatewayPatchModel := new(vpcv1.EndpointGatewayPatch) + endpointGatewayPatchModel.Name = core.StringPtr(name) + endpointGatewayPatchModelAsPatch, _ := endpointGatewayPatchModel.AsPatch() + opt := sess.NewUpdateEndpointGatewayOptions(d.Id(), endpointGatewayPatchModelAsPatch) + _, response, err := sess.UpdateEndpointGateway(opt) + if err != nil { + log.Printf("Update Endpoint Gateway failed: %v", response) + return err + } + + } + if d.HasChange(isVirtualEndpointGatewayTags) { + opt := sess.NewGetEndpointGatewayOptions(d.Id()) + result, response, err := sess.GetEndpointGateway(opt) + if err != nil { + return fmt.Errorf("Error getting VPE: %s\n%s", err, response) + } + oldList, newList := d.GetChange(isVirtualEndpointGatewayTags) + err = UpdateTagsUsingCRN(oldList, newList, meta, *result.CRN) + if err != nil { + log.Printf( + "Error on update of VPE (%s) tags: %s", d.Id(), err) + } + } + return resourceIBMisVirtualEndpointGatewayRead(d, meta) +} + +func resourceIBMisVirtualEndpointGatewayRead(d *schema.ResourceData, meta interface{}) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + // read option + opt := sess.NewGetEndpointGatewayOptions(d.Id()) + result, response, err := sess.GetEndpointGateway(opt) + if err != nil { + log.Printf("Get Endpoint Gateway failed: %v", response) + return err + } + d.Set(isVirtualEndpointGatewayName, result.Name) + d.Set(isVirtualEndpointGatewayHealthState, result.HealthState) + d.Set(isVirtualEndpointGatewayCreatedAt, result.CreatedAt.String()) + d.Set(isVirtualEndpointGatewayLifecycleState, result.LifecycleState) + d.Set(isVirtualEndpointGatewayResourceType, result.ResourceType) + d.Set(isVirtualEndpointGatewayIPs, flattenIPs(result.Ips)) + d.Set(isVirtualEndpointGatewayResourceGroupID, result.ResourceGroup.ID) + d.Set(isVirtualEndpointGatewayTarget, + flattenEndpointGatewayTarget(result.Target.(*vpcv1.EndpointGatewayTarget))) + d.Set(isVirtualEndpointGatewayVpcID, result.VPC.ID) + tags, err := GetTagsUsingCRN(meta, *result.CRN) + if err != nil { + log.Printf( + "Error on get of VPE (%s) tags: %s", d.Id(), err) + } + d.Set(isVirtualEndpointGatewayTags, tags) + return nil +} + +func resourceIBMisVirtualEndpointGatewayDelete(d *schema.ResourceData, meta interface{}) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + + opt := sess.NewDeleteEndpointGatewayOptions(d.Id()) + response, err := sess.DeleteEndpointGateway(opt) + if err != nil { + log.Printf("Delete Endpoint Gateway failed: %v", response) + } + return nil +} + +func resourceIBMisVirtualEndpointGatewayExists(d *schema.ResourceData, meta interface{}) (bool, error) { + sess, err := vpcClient(meta) + if err != nil { + return false, err + } + + opt := sess.NewGetEndpointGatewayOptions(d.Id()) + _, response, err := sess.GetEndpointGateway(opt) + if err != nil { + if response != nil && response.StatusCode == 404 { + log.Printf("Endpoint Gateway does not exist.") + return false, nil + } + log.Printf("Error : %s", response) + return false, err + } + return true, nil +} + +func expandIPs(ipsSet []interface{}) (ipsOptions []vpcv1.EndpointGatewayReservedIPIntf) { + ipsList := ipsSet + for _, item := range ipsList { + ips := item.(map[string]interface{}) + // IPs option + ipsID := ips[isVirtualEndpointGatewayIPsID].(string) + ipsName := ips[isVirtualEndpointGatewayIPsName].(string) + + // IPs subnet option + ipsSubnetID := ips[isVirtualEndpointGatewayIPsSubnet].(string) + + ipsSubnetOpt := &vpcv1.SubnetIdentity{ + ID: &ipsSubnetID, + } + + ipsOpt := &vpcv1.EndpointGatewayReservedIP{ + ID: core.StringPtr(ipsID), + Name: core.StringPtr(ipsName), + Subnet: ipsSubnetOpt, + } + ipsOptions = append(ipsOptions, ipsOpt) + } + return ipsOptions +} + +func flattenIPs(ipsList []vpcv1.ReservedIPReference) interface{} { + ipsListOutput := make([]interface{}, 0) + for _, item := range ipsList { + ips := make(map[string]interface{}, 0) + ips[isVirtualEndpointGatewayIPsID] = *item.ID + ips[isVirtualEndpointGatewayIPsName] = *item.Name + ips[isVirtualEndpointGatewayIPsResourceType] = *item.ResourceType + + ipsListOutput = append(ipsListOutput, ips) + } + return ipsListOutput +} + +func flattenEndpointGatewayTarget(target *vpcv1.EndpointGatewayTarget) interface{} { + targetSlice := []interface{}{} + targetOutput := map[string]string{} + if target == nil { + return targetOutput + } + if target.Name != nil { + targetOutput[isVirtualEndpointGatewayTargetName] = *target.Name + } + if target.CRN != nil { + targetOutput[isVirtualEndpointGatewayTargetCRN] = *target.CRN + } + if target.ResourceType != nil { + targetOutput[isVirtualEndpointGatewayTargetResourceType] = *target.ResourceType + } + targetSlice = append(targetSlice, targetOutput) + return targetSlice +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_virtual_endpoint_gateway_ip.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_virtual_endpoint_gateway_ip.go new file mode 100644 index 00000000000..a967af6d020 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_virtual_endpoint_gateway_ip.go @@ -0,0 +1,220 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "time" + + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + isVirtualEndpointGatewayID = "gateway" + isVirtualEndpointGatewayIPID = "reserved_ip" + isVirtualEndpointGatewayIPName = "name" + isVirtualEndpointGatewayIPAddress = "address" + isVirtualEndpointGatewayIPResourceType = "resource_type" + isVirtualEndpointGatewayIPAutoDelete = "auto_delete" + isVirtualEndpointGatewayIPCreatedAt = "created_at" + isVirtualEndpointGatewayIPTarget = "target" + isVirtualEndpointGatewayIPTargetID = "id" + isVirtualEndpointGatewayIPTargetName = "name" + isVirtualEndpointGatewayIPTargetResourceType = "resource_type" +) + +func resourceIBMISEndpointGatewayIP() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMisVirtualEndpointGatewayIPCreate, + Read: resourceIBMisVirtualEndpointGatewayIPRead, + Delete: resourceIBMisVirtualEndpointGatewayIPDelete, + Exists: resourceIBMisVirtualEndpointGatewayIPExists, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(30 * time.Minute), + Delete: schema.DefaultTimeout(30 * time.Minute), + Update: schema.DefaultTimeout(30 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + isVirtualEndpointGatewayID: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Endpoint gateway ID", + }, + isVirtualEndpointGatewayIPID: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Endpoint gateway IP id", + }, + isVirtualEndpointGatewayIPName: { + Type: schema.TypeString, + Computed: true, + Description: "Endpoint gateway IP name", + }, + isVirtualEndpointGatewayIPResourceType: { + Type: schema.TypeString, + Computed: true, + Description: "Endpoint gateway IP resource type", + }, + isVirtualEndpointGatewayIPCreatedAt: { + Type: schema.TypeString, + Computed: true, + Description: "Endpoint gateway IP created date and time", + }, + isVirtualEndpointGatewayIPAutoDelete: { + Type: schema.TypeBool, + Computed: true, + Description: "Endpoint gateway IP auto delete", + }, + isVirtualEndpointGatewayIPAddress: { + Type: schema.TypeString, + Computed: true, + Description: "Endpoint gateway IP address", + }, + isVirtualEndpointGatewayIPTarget: { + Type: schema.TypeList, + Computed: true, + Description: "Endpoint gateway detail", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + isVirtualEndpointGatewayIPTargetID: { + Type: schema.TypeString, + Computed: true, + Description: "The IPs target id", + }, + isVirtualEndpointGatewayIPTargetName: { + Type: schema.TypeString, + Computed: true, + Description: "The IPs target name", + }, + isVirtualEndpointGatewayIPTargetResourceType: { + Type: schema.TypeString, + Computed: true, + Description: "Endpoint gateway resource type", + }, + }, + }, + }, + }, + } +} + +func resourceIBMisVirtualEndpointGatewayIPCreate(d *schema.ResourceData, meta interface{}) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + gatewayID := d.Get(isVirtualEndpointGatewayID).(string) + ipID := d.Get(isVirtualEndpointGatewayIPID).(string) + opt := sess.NewAddEndpointGatewayIPOptions(gatewayID, ipID) + _, response, err := sess.AddEndpointGatewayIP(opt) + if err != nil { + log.Printf("Add Endpoint Gateway failed: %v", response) + return err + } + d.SetId(fmt.Sprintf("%s/%s", gatewayID, ipID)) + return resourceIBMisVirtualEndpointGatewayIPRead(d, meta) +} + +func resourceIBMisVirtualEndpointGatewayIPRead(d *schema.ResourceData, meta interface{}) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + parts, err := idParts(d.Id()) + if err != nil { + return err + } + gatewayID := parts[0] + ipID := parts[1] + opt := sess.NewGetEndpointGatewayIPOptions(gatewayID, ipID) + result, response, err := sess.GetEndpointGatewayIP(opt) + if err != nil { + log.Printf("Get Endpoint Gateway IP failed: %v", response) + return err + } + d.Set(isVirtualEndpointGatewayIPID, result.ID) + d.Set(isVirtualEndpointGatewayIPName, result.Name) + d.Set(isVirtualEndpointGatewayIPAddress, result.Address) + d.Set(isVirtualEndpointGatewayIPCreatedAt, (result.CreatedAt).String()) + d.Set(isVirtualEndpointGatewayIPResourceType, result.ResourceType) + d.Set(isVirtualEndpointGatewayIPAutoDelete, result.AutoDelete) + d.Set(isVirtualEndpointGatewayIPTarget, + flattenEndpointGatewayIPTarget(result.Target.(*vpcv1.ReservedIPTarget))) + return nil +} + +func resourceIBMisVirtualEndpointGatewayIPDelete(d *schema.ResourceData, meta interface{}) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + parts, err := idParts(d.Id()) + if err != nil { + return err + } + gatewayID := parts[0] + ipID := parts[1] + opt := sess.NewRemoveEndpointGatewayIPOptions(gatewayID, ipID) + response, err := sess.RemoveEndpointGatewayIP(opt) + if err != nil && response.StatusCode != 404 { + log.Printf("Remove Endpoint Gateway IP failed: %v", response) + return err + } + d.SetId("") + return nil +} + +func resourceIBMisVirtualEndpointGatewayIPExists(d *schema.ResourceData, meta interface{}) (bool, error) { + sess, err := vpcClient(meta) + if err != nil { + return false, err + } + + parts, err := idParts(d.Id()) + if err != nil { + return false, err + } + if len(parts) != 3 { + return false, fmt.Errorf("Incorrect ID %s: ID should be a combination of gatewayID/ipID", d.Id()) + } + gatewayID := parts[0] + ipID := parts[1] + opt := sess.NewGetEndpointGatewayIPOptions(gatewayID, ipID) + _, response, err := sess.GetEndpointGatewayIP(opt) + if err != nil { + if response != nil && response.StatusCode == 404 { + log.Printf("Endpoint Gateway IP does not exist.") + return false, nil + } + log.Printf("Error : %s", response) + return false, err + } + return true, nil +} + +func flattenEndpointGatewayIPTarget(target *vpcv1.ReservedIPTarget) interface{} { + targetSlice := []interface{}{} + targetOutput := map[string]string{} + if target == nil { + return targetOutput + } + if target.ID != nil { + targetOutput[isVirtualEndpointGatewayIPTargetID] = *target.ID + } + if target.Name != nil { + targetOutput[isVirtualEndpointGatewayIPTargetName] = *target.Name + } + if target.ResourceType != nil { + targetOutput[isVirtualEndpointGatewayIPTargetResourceType] = *target.ResourceType + } + targetSlice = append(targetSlice, targetOutput) + return targetSlice +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_volume.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_volume.go new file mode 100644 index 00000000000..527196ecf86 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_volume.go @@ -0,0 +1,861 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "os" + "time" + + "github.com/IBM/vpc-go-sdk/vpcclassicv1" + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + isVolumeName = "name" + isVolumeProfileName = "profile" + isVolumeZone = "zone" + isVolumeEncryptionKey = "encryption_key" + isVolumeCapacity = "capacity" + isVolumeIops = "iops" + isVolumeCrn = "crn" + isVolumeTags = "tags" + isVolumeStatus = "status" + isVolumeStatusReasons = "status_reasons" + isVolumeStatusReasonsCode = "code" + isVolumeStatusReasonsMessage = "message" + isVolumeDeleting = "deleting" + isVolumeDeleted = "done" + isVolumeProvisioning = "provisioning" + isVolumeProvisioningDone = "done" + isVolumeResourceGroup = "resource_group" +) + +func resourceIBMISVolume() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMISVolumeCreate, + Read: resourceIBMISVolumeRead, + Update: resourceIBMISVolumeUpdate, + Delete: resourceIBMISVolumeDelete, + Exists: resourceIBMISVolumeExists, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + CustomizeDiff: customdiff.Sequence( + func(diff *schema.ResourceDiff, v interface{}) error { + return resourceTagsCustomizeDiff(diff) + }, + ), + + Schema: map[string]*schema.Schema{ + + isVolumeName: { + Type: schema.TypeString, + Required: true, + ValidateFunc: InvokeValidator("ibm_is_volume", isVolumeName), + Description: "Volume name", + }, + + isVolumeProfileName: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Volume profile name", + }, + + isVolumeZone: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Zone name", + }, + + isVolumeEncryptionKey: { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: "Volume encryption key info", + }, + + isVolumeCapacity: { + Type: schema.TypeInt, + Optional: true, + Default: 100, + ForceNew: true, + Description: "Vloume capacity value", + }, + isVolumeResourceGroup: { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: "Resource group name", + }, + isVolumeIops: { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + Description: "IOPS value for the Volume", + }, + isVolumeCrn: { + Type: schema.TypeString, + Computed: true, + Description: "CRN value for the volume instance", + }, + isVolumeStatus: { + Type: schema.TypeString, + Computed: true, + Description: "Volume status", + }, + + isVolumeStatusReasons: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + isVolumeStatusReasonsCode: { + Type: schema.TypeString, + Computed: true, + Description: "A snake case string succinctly identifying the status reason", + }, + + isVolumeStatusReasonsMessage: { + Type: schema.TypeString, + Computed: true, + Description: "An explanation of the status reason", + }, + }, + }, + }, + + isVolumeTags: { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString, ValidateFunc: InvokeValidator("ibm_is_volume", "tag")}, + Set: resourceIBMVPCHash, + Description: "Tags for the volume instance", + }, + + ResourceControllerURL: { + Type: schema.TypeString, + Computed: true, + Description: "The URL of the IBM Cloud dashboard that can be used to explore and view details about this instance", + }, + + ResourceName: { + Type: schema.TypeString, + Computed: true, + Description: "The name of the resource", + }, + + ResourceCRN: { + Type: schema.TypeString, + Computed: true, + Description: "The crn of the resource", + }, + + ResourceStatus: { + Type: schema.TypeString, + Computed: true, + Description: "The status of the resource", + }, + + ResourceGroupName: { + Type: schema.TypeString, + Computed: true, + Description: "The resource group name in which resource is provisioned", + }, + }, + } +} + +func resourceIBMISVolumeValidator() *ResourceValidator { + + validateSchema := make([]ValidateSchema, 1) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isVolumeName, + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Required: true, + Regexp: `^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$`, + MinValueLength: 1, + MaxValueLength: 63}) + + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: "tag", + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Optional: true, + Regexp: `^[A-Za-z0-9:_ .-]+$`, + MinValueLength: 1, + MaxValueLength: 128}) + + ibmISVolumeResourceValidator := ResourceValidator{ResourceName: "ibm_is_volume", Schema: validateSchema} + return &ibmISVolumeResourceValidator +} + +func resourceIBMISVolumeCreate(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + + volName := d.Get(isVolumeName).(string) + profile := d.Get(isVolumeProfileName).(string) + zone := d.Get(isVolumeZone).(string) + var volCapacity int64 + if capacity, ok := d.GetOk(isVolumeCapacity); ok { + volCapacity = int64(capacity.(int)) + } else { + volCapacity = 100 + } + if userDetails.generation == 1 { + err := classicVolCreate(d, meta, volName, profile, zone, volCapacity) + if err != nil { + return err + } + } else { + err := volCreate(d, meta, volName, profile, zone, volCapacity) + if err != nil { + return err + } + } + return resourceIBMISVolumeRead(d, meta) +} + +func classicVolCreate(d *schema.ResourceData, meta interface{}, volName, profile, zone string, volCapacity int64) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + options := &vpcclassicv1.CreateVolumeOptions{ + VolumePrototype: &vpcclassicv1.VolumePrototype{ + Name: &volName, + Capacity: &volCapacity, + Zone: &vpcclassicv1.ZoneIdentity{ + Name: &zone, + }, + Profile: &vpcclassicv1.VolumeProfileIdentity{ + Name: &profile, + }, + }, + } + volTemplate := options.VolumePrototype.(*vpcclassicv1.VolumePrototype) + + if key, ok := d.GetOk(isVolumeEncryptionKey); ok { + encryptionKey := key.(string) + volTemplate.EncryptionKey = &vpcclassicv1.EncryptionKeyIdentity{ + CRN: &encryptionKey, + } + } + + if rgrp, ok := d.GetOk(isVolumeResourceGroup); ok { + rg := rgrp.(string) + volTemplate.ResourceGroup = &vpcclassicv1.ResourceGroupIdentity{ + ID: &rg, + } + } + + if i, ok := d.GetOk(isVolumeIops); ok { + iops := int64(i.(int)) + volTemplate.Iops = &iops + } + + vol, response, err := sess.CreateVolume(options) + if err != nil { + return fmt.Errorf("[DEBUG] Create volume err %s\n%s", err, response) + } + d.SetId(*vol.ID) + log.Printf("[INFO] Volume : %s", *vol.ID) + _, err = isWaitForClassicVolumeAvailable(sess, d.Id(), d.Timeout(schema.TimeoutCreate)) + if err != nil { + return err + } + v := os.Getenv("IC_ENV_TAGS") + if _, ok := d.GetOk(isVolumeTags); ok || v != "" { + oldList, newList := d.GetChange(isVolumeTags) + err = UpdateTagsUsingCRN(oldList, newList, meta, *vol.CRN) + if err != nil { + log.Printf( + "Error on create of resource vpc volume (%s) tags: %s", d.Id(), err) + } + } + return nil +} + +func volCreate(d *schema.ResourceData, meta interface{}, volName, profile, zone string, volCapacity int64) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + options := &vpcv1.CreateVolumeOptions{ + VolumePrototype: &vpcv1.VolumePrototype{ + Name: &volName, + Capacity: &volCapacity, + Zone: &vpcv1.ZoneIdentity{ + Name: &zone, + }, + Profile: &vpcv1.VolumeProfileIdentity{ + Name: &profile, + }, + }, + } + volTemplate := options.VolumePrototype.(*vpcv1.VolumePrototype) + + if key, ok := d.GetOk(isVolumeEncryptionKey); ok { + encryptionKey := key.(string) + volTemplate.EncryptionKey = &vpcv1.EncryptionKeyIdentity{ + CRN: &encryptionKey, + } + } + + if rgrp, ok := d.GetOk(isVolumeResourceGroup); ok { + rg := rgrp.(string) + volTemplate.ResourceGroup = &vpcv1.ResourceGroupIdentity{ + ID: &rg, + } + } + + if i, ok := d.GetOk(isVolumeIops); ok { + iops := int64(i.(int)) + volTemplate.Iops = &iops + } + + vol, response, err := sess.CreateVolume(options) + if err != nil { + return fmt.Errorf("[DEBUG] Create volume err %s\n%s", err, response) + } + d.SetId(*vol.ID) + log.Printf("[INFO] Volume : %s", *vol.ID) + _, err = isWaitForVolumeAvailable(sess, d.Id(), d.Timeout(schema.TimeoutCreate)) + if err != nil { + return err + } + v := os.Getenv("IC_ENV_TAGS") + if _, ok := d.GetOk(isVolumeTags); ok || v != "" { + oldList, newList := d.GetChange(isVolumeTags) + err = UpdateTagsUsingCRN(oldList, newList, meta, *vol.CRN) + if err != nil { + log.Printf( + "Error on create of resource vpc volume (%s) tags: %s", d.Id(), err) + } + } + return nil +} + +func resourceIBMISVolumeRead(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + + id := d.Id() + if userDetails.generation == 1 { + err := classicVolGet(d, meta, id) + if err != nil { + return err + } + } else { + err := volGet(d, meta, id) + if err != nil { + return err + } + } + return nil +} + +func classicVolGet(d *schema.ResourceData, meta interface{}, id string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + options := &vpcclassicv1.GetVolumeOptions{ + ID: &id, + } + vol, response, err := sess.GetVolume(options) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Getting Volume (%s): %s\n%s", id, err, response) + } + d.SetId(*vol.ID) + d.Set(isVolumeName, *vol.Name) + d.Set(isVolumeProfileName, *vol.Profile.Name) + d.Set(isVolumeZone, *vol.Zone.Name) + if vol.EncryptionKey != nil { + d.Set(isVolumeEncryptionKey, *vol.EncryptionKey.CRN) + } + d.Set(isVolumeIops, *vol.Iops) + d.Set(isVolumeCapacity, *vol.Capacity) + d.Set(isVolumeCrn, *vol.CRN) + d.Set(isVolumeStatus, *vol.Status) + tags, err := GetTagsUsingCRN(meta, *vol.CRN) + if err != nil { + log.Printf( + "Error on get of resource vpc volume (%s) tags: %s", d.Id(), err) + } + d.Set(isVolumeTags, tags) + controller, err := getBaseController(meta) + if err != nil { + return err + } + d.Set(ResourceControllerURL, controller+"/vpc/storage/storageVolumes") + d.Set(ResourceName, *vol.Name) + d.Set(ResourceCRN, *vol.CRN) + d.Set(ResourceStatus, *vol.Status) + if vol.ResourceGroup != nil { + d.Set(ResourceGroupName, *vol.ResourceGroup.ID) + d.Set(isVolumeResourceGroup, *vol.ResourceGroup.ID) + } + return nil +} + +func volGet(d *schema.ResourceData, meta interface{}, id string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + options := &vpcv1.GetVolumeOptions{ + ID: &id, + } + vol, response, err := sess.GetVolume(options) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Getting Volume (%s): %s\n%s", id, err, response) + } + d.SetId(*vol.ID) + d.Set(isVolumeName, *vol.Name) + d.Set(isVolumeProfileName, *vol.Profile.Name) + d.Set(isVolumeZone, *vol.Zone.Name) + if vol.EncryptionKey != nil { + d.Set(isVolumeEncryptionKey, vol.EncryptionKey.CRN) + } + d.Set(isVolumeIops, *vol.Iops) + d.Set(isVolumeCapacity, *vol.Capacity) + d.Set(isVolumeCrn, *vol.CRN) + d.Set(isVolumeStatus, *vol.Status) + //set the status reasons + if vol.StatusReasons != nil { + statusReasonsList := make([]map[string]interface{}, 0) + for _, sr := range vol.StatusReasons { + currentSR := map[string]interface{}{} + if sr.Code != nil && sr.Message != nil { + currentSR[isVolumeStatusReasonsCode] = *sr.Code + currentSR[isVolumeStatusReasonsMessage] = *sr.Message + statusReasonsList = append(statusReasonsList, currentSR) + } + } + d.Set(isVolumeStatusReasons, statusReasonsList) + } + tags, err := GetTagsUsingCRN(meta, *vol.CRN) + if err != nil { + log.Printf( + "Error on get of resource vpc volume (%s) tags: %s", d.Id(), err) + } + d.Set(isVolumeTags, tags) + controller, err := getBaseController(meta) + if err != nil { + return err + } + d.Set(ResourceControllerURL, controller+"/vpc-ext/storage/storageVolumes") + d.Set(ResourceName, *vol.Name) + d.Set(ResourceCRN, *vol.CRN) + d.Set(ResourceStatus, *vol.Status) + if vol.ResourceGroup != nil { + d.Set(ResourceGroupName, *vol.ResourceGroup.Name) + d.Set(isVolumeResourceGroup, *vol.ResourceGroup.ID) + } + return nil +} + +func resourceIBMISVolumeUpdate(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + + id := d.Id() + name := "" + hasChanged := false + + if d.HasChange(isVolumeName) { + name = d.Get(isVolumeName).(string) + hasChanged = true + } + + if userDetails.generation == 1 { + err := classicVolUpdate(d, meta, id, name, hasChanged) + if err != nil { + return err + } + } else { + err := volUpdate(d, meta, id, name, hasChanged) + if err != nil { + return err + } + } + return resourceIBMISVolumeRead(d, meta) +} + +func classicVolUpdate(d *schema.ResourceData, meta interface{}, id, name string, hasChanged bool) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + if d.HasChange(isVolumeTags) { + options := &vpcclassicv1.GetVolumeOptions{ + ID: &id, + } + vol, response, err := sess.GetVolume(options) + if err != nil { + return fmt.Errorf("Error getting Volume : %s\n%s", err, response) + } + oldList, newList := d.GetChange(isVolumeTags) + err = UpdateTagsUsingCRN(oldList, newList, meta, *vol.CRN) + if err != nil { + log.Printf( + "Error on update of resource vpc volume (%s) tags: %s", id, err) + } + } + if hasChanged { + options := &vpcclassicv1.UpdateVolumeOptions{ + ID: &id, + } + volumePatchModel := &vpcclassicv1.VolumePatch{ + Name: &name, + } + volumePatch, err := volumePatchModel.AsPatch() + if err != nil { + return fmt.Errorf("Error calling asPatch for VolumePatch: %s", err) + } + options.VolumePatch = volumePatch + _, response, err := sess.UpdateVolume(options) + if err != nil { + return fmt.Errorf("Error updating vpc volume: %s\n%s", err, response) + } + } + return nil +} + +func volUpdate(d *schema.ResourceData, meta interface{}, id, name string, hasChanged bool) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + if d.HasChange(isVolumeTags) { + options := &vpcv1.GetVolumeOptions{ + ID: &id, + } + vol, response, err := sess.GetVolume(options) + if err != nil { + return fmt.Errorf("Error getting Volume : %s\n%s", err, response) + } + oldList, newList := d.GetChange(isVolumeTags) + err = UpdateTagsUsingCRN(oldList, newList, meta, *vol.CRN) + if err != nil { + log.Printf( + "Error on update of resource vpc volume (%s) tags: %s", id, err) + } + } + if hasChanged { + options := &vpcv1.UpdateVolumeOptions{ + ID: &id, + } + volumePatchModel := &vpcv1.VolumePatch{ + Name: &name, + } + volumePatch, err := volumePatchModel.AsPatch() + if err != nil { + return fmt.Errorf("Error calling asPatch for VolumePatch: %s", err) + } + options.VolumePatch = volumePatch + _, response, err := sess.UpdateVolume(options) + if err != nil { + return fmt.Errorf("Error updating vpc volume: %s\n%s", err, response) + } + } + return nil +} + +func resourceIBMISVolumeDelete(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + id := d.Id() + if userDetails.generation == 1 { + err := classicVolDelete(d, meta, id) + if err != nil { + return err + } + } else { + err := volDelete(d, meta, id) + if err != nil { + return err + } + } + return nil +} + +func classicVolDelete(d *schema.ResourceData, meta interface{}, id string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + + getvoloptions := &vpcclassicv1.GetVolumeOptions{ + ID: &id, + } + _, response, err := sess.GetVolume(getvoloptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return nil + } + return fmt.Errorf("Error Getting Volume (%s): %s\n%s", id, err, response) + } + + options := &vpcclassicv1.DeleteVolumeOptions{ + ID: &id, + } + response, err = sess.DeleteVolume(options) + if err != nil { + return fmt.Errorf("Error Deleting Volume : %s\n%s", err, response) + } + _, err = isWaitForClassicVolumeDeleted(sess, id, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return err + } + d.SetId("") + return nil +} + +func volDelete(d *schema.ResourceData, meta interface{}, id string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + + getvoloptions := &vpcv1.GetVolumeOptions{ + ID: &id, + } + _, response, err := sess.GetVolume(getvoloptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return nil + } + return fmt.Errorf("Error Getting Volume (%s): %s\n%s", id, err, response) + } + + options := &vpcv1.DeleteVolumeOptions{ + ID: &id, + } + response, err = sess.DeleteVolume(options) + if err != nil { + return fmt.Errorf("Error Deleting Volume : %s\n%s", err, response) + } + _, err = isWaitForVolumeDeleted(sess, id, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return err + } + d.SetId("") + return nil +} + +func isWaitForClassicVolumeDeleted(vol *vpcclassicv1.VpcClassicV1, id string, timeout time.Duration) (interface{}, error) { + log.Printf("Waiting for (%s) to be deleted.", id) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", isVolumeDeleting}, + Target: []string{"done", ""}, + Refresh: isClassicVolumeDeleteRefreshFunc(vol, id), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isClassicVolumeDeleteRefreshFunc(vol *vpcclassicv1.VpcClassicV1, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + volgetoptions := &vpcclassicv1.GetVolumeOptions{ + ID: &id, + } + vol, response, err := vol.GetVolume(volgetoptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return vol, isVolumeDeleted, nil + } + return vol, "", fmt.Errorf("Error Getting Volume: %s\n%s", err, response) + } + return vol, isVolumeDeleting, err + } +} + +func isWaitForVolumeDeleted(vol *vpcv1.VpcV1, id string, timeout time.Duration) (interface{}, error) { + log.Printf("Waiting for (%s) to be deleted.", id) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", isVolumeDeleting}, + Target: []string{"done", ""}, + Refresh: isVolumeDeleteRefreshFunc(vol, id), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isVolumeDeleteRefreshFunc(vol *vpcv1.VpcV1, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + volgetoptions := &vpcv1.GetVolumeOptions{ + ID: &id, + } + vol, response, err := vol.GetVolume(volgetoptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return vol, isVolumeDeleted, nil + } + return vol, "", fmt.Errorf("Error Getting Volume: %s\n%s", err, response) + } + return vol, isVolumeDeleting, err + } +} + +func resourceIBMISVolumeExists(d *schema.ResourceData, meta interface{}) (bool, error) { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return false, err + } + id := d.Id() + + if userDetails.generation == 1 { + exists, err := classicVolExists(d, meta, id) + return exists, err + } else { + exists, err := volExists(d, meta, id) + return exists, err + } +} + +func classicVolExists(d *schema.ResourceData, meta interface{}, id string) (bool, error) { + sess, err := classicVpcClient(meta) + if err != nil { + return false, err + } + options := &vpcclassicv1.GetVolumeOptions{ + ID: &id, + } + _, response, err := sess.GetVolume(options) + if err != nil { + if response != nil && response.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error getting Volume: %s\n%s", err, response) + } + return true, nil +} + +func volExists(d *schema.ResourceData, meta interface{}, id string) (bool, error) { + sess, err := vpcClient(meta) + if err != nil { + return false, err + } + options := &vpcv1.GetVolumeOptions{ + ID: &id, + } + _, response, err := sess.GetVolume(options) + if err != nil { + if response != nil && response.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error getting Volume: %s\n%s", err, response) + } + return true, nil +} + +func isWaitForClassicVolumeAvailable(client *vpcclassicv1.VpcClassicV1, id string, timeout time.Duration) (interface{}, error) { + log.Printf("Waiting for Volume (%s) to be available.", id) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", isVolumeProvisioning}, + Target: []string{isVolumeProvisioningDone, ""}, + Refresh: isClassicVolumeRefreshFunc(client, id), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isClassicVolumeRefreshFunc(client *vpcclassicv1.VpcClassicV1, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + volgetoptions := &vpcclassicv1.GetVolumeOptions{ + ID: &id, + } + vol, response, err := client.GetVolume(volgetoptions) + if err != nil { + return nil, "", fmt.Errorf("Error Getting volume: %s\n%s", err, response) + } + + if *vol.Status == "available" { + return vol, isVolumeProvisioningDone, nil + } + + return vol, isVolumeProvisioning, nil + } +} + +func isWaitForVolumeAvailable(client *vpcv1.VpcV1, id string, timeout time.Duration) (interface{}, error) { + log.Printf("Waiting for Volume (%s) to be available.", id) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", isVolumeProvisioning}, + Target: []string{isVolumeProvisioningDone, ""}, + Refresh: isVolumeRefreshFunc(client, id), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isVolumeRefreshFunc(client *vpcv1.VpcV1, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + volgetoptions := &vpcv1.GetVolumeOptions{ + ID: &id, + } + vol, response, err := client.GetVolume(volgetoptions) + if err != nil { + return nil, "", fmt.Errorf("Error Getting volume: %s\n%s", err, response) + } + + if *vol.Status == "available" { + return vol, isVolumeProvisioningDone, nil + } + + return vol, isVolumeProvisioning, nil + } +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_vpc.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_vpc.go new file mode 100644 index 00000000000..54b917b6508 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_vpc.go @@ -0,0 +1,1472 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "bytes" + "fmt" + "log" + "os" + "reflect" + "strings" + "time" + + "github.com/IBM/vpc-go-sdk/vpcclassicv1" + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/internal/hashcode" +) + +const ( + isVPCDefaultNetworkACL = "default_network_acl" + isVPCDefaultSecurityGroup = "default_security_group" + isVPCDefaultRoutingTable = "default_routing_table" + isVPCName = "name" + isVPCDefaultNetworkACLName = "default_network_acl_name" + isVPCDefaultSecurityGroupName = "default_security_group_name" + isVPCDefaultRoutingTableName = "default_routing_table_name" + isVPCResourceGroup = "resource_group" + isVPCStatus = "status" + isVPCDeleting = "deleting" + isVPCDeleted = "done" + isVPCTags = "tags" + isVPCClassicAccess = "classic_access" + isVPCAvailable = "available" + isVPCFailed = "failed" + isVPCPending = "pending" + isVPCAddressPrefixManagement = "address_prefix_management" + cseSourceAddresses = "cse_source_addresses" + subnetsList = "subnets" + totalIPV4AddressCount = "total_ipv4_address_count" + availableIPV4AddressCount = "available_ipv4_address_count" + isVPCCRN = "crn" + isVPCSecurityGroupList = "security_group" + isVPCSecurityGroupName = "group_name" + isVPCSgRules = "rules" + isVPCSecurityGroupRuleID = "rule_id" + isVPCSecurityGroupRuleDirection = "direction" + isVPCSecurityGroupRuleIPVersion = "ip_version" + isVPCSecurityGroupRuleRemote = "remote" + isVPCSecurityGroupRuleType = "type" + isVPCSecurityGroupRuleCode = "code" + isVPCSecurityGroupRulePortMax = "port_max" + isVPCSecurityGroupRulePortMin = "port_min" + isVPCSecurityGroupRuleProtocol = "protocol" + isVPCSecurityGroupID = "group_id" +) + +func resourceIBMISVPC() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMISVPCCreate, + Read: resourceIBMISVPCRead, + Update: resourceIBMISVPCUpdate, + Delete: resourceIBMISVPCDelete, + Exists: resourceIBMISVPCExists, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + CustomizeDiff: customdiff.Sequence( + func(diff *schema.ResourceDiff, v interface{}) error { + return resourceTagsCustomizeDiff(diff) + }, + ), + + Schema: map[string]*schema.Schema{ + isVPCAddressPrefixManagement: { + Type: schema.TypeString, + Optional: true, + Default: "auto", + DiffSuppressFunc: applyOnce, + ValidateFunc: InvokeValidator("ibm_is_vpc", isVPCAddressPrefixManagement), + Description: "Address Prefix management value", + }, + + isVPCDefaultNetworkACL: { + Type: schema.TypeString, + Optional: true, + Default: nil, + Computed: true, + Deprecated: "This field is deprecated", + Description: "Default network ACL", + }, + + isVPCDefaultRoutingTable: { + Type: schema.TypeString, + Computed: true, + Description: "Default routing table associated with VPC", + }, + + isVPCClassicAccess: { + Type: schema.TypeBool, + ForceNew: true, + Default: false, + Optional: true, + Description: "Set to true if classic access needs to enabled to VPC", + }, + + isVPCName: { + Type: schema.TypeString, + Required: true, + ForceNew: false, + ValidateFunc: InvokeValidator("ibm_is_vpc", isVPCName), + Description: "VPC name", + }, + + isVPCDefaultNetworkACLName: { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: InvokeValidator("ibm_is_vpc", isVPCDefaultNetworkACLName), + Description: "Default Network ACL name", + }, + + isVPCDefaultSecurityGroupName: { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: InvokeValidator("ibm_is_vpc", isVPCDefaultSecurityGroupName), + Description: "Default security group name", + }, + + isVPCDefaultRoutingTableName: { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: InvokeValidator("ibm_is_vpc", isVPCDefaultRoutingTableName), + Description: "Default routing table name", + }, + + isVPCResourceGroup: { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Computed: true, + Description: "Resource group info", + }, + + isVPCStatus: { + Type: schema.TypeString, + Computed: true, + Description: "VPC status", + }, + + isVPCDefaultSecurityGroup: { + Type: schema.TypeString, + Computed: true, + Description: "Security group associated with VPC", + }, + isVPCTags: { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString, ValidateFunc: InvokeValidator("ibm_is_vpc", "tag")}, + Set: resourceIBMVPCHash, + Description: "List of tags", + }, + + isVPCCRN: { + Type: schema.TypeString, + Computed: true, + Description: "The crn of the resource", + }, + + ResourceControllerURL: { + Type: schema.TypeString, + Computed: true, + Description: "The URL of the IBM Cloud dashboard that can be used to explore and view details about this instance", + }, + ResourceName: { + Type: schema.TypeString, + Computed: true, + Description: "The name of the resource", + }, + + ResourceCRN: { + Type: schema.TypeString, + Computed: true, + Description: "The crn of the resource", + }, + + ResourceStatus: { + Type: schema.TypeString, + Computed: true, + Description: "The status of the resource", + }, + + ResourceGroupName: { + Type: schema.TypeString, + Computed: true, + Description: "The resource group name in which resource is provisioned", + }, + + cseSourceAddresses: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "address": { + Type: schema.TypeString, + Computed: true, + Description: "Cloud service endpoint IP Address", + }, + + "zone_name": { + Type: schema.TypeString, + Computed: true, + Description: "Location info of CSE Address", + }, + }, + }, + }, + + isVPCSecurityGroupList: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + isVPCSecurityGroupName: { + Type: schema.TypeString, + Computed: true, + Description: "Security group name", + }, + + isVPCSecurityGroupID: { + Type: schema.TypeString, + Computed: true, + Description: "Security group id", + }, + + isSecurityGroupRules: { + Type: schema.TypeList, + Computed: true, + Description: "Security Rules", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + + isVPCSecurityGroupRuleID: { + Type: schema.TypeString, + Computed: true, + Description: "Rule ID", + }, + + isVPCSecurityGroupRuleDirection: { + Type: schema.TypeString, + Computed: true, + Description: "Direction of traffic to enforce, either inbound or outbound", + }, + + isVPCSecurityGroupRuleIPVersion: { + Type: schema.TypeString, + Computed: true, + Description: "IP version: ipv4 or ipv6", + }, + + isVPCSecurityGroupRuleRemote: { + Type: schema.TypeString, + Computed: true, + Description: "Security group id: an IP address, a CIDR block, or a single security group identifier", + }, + + isVPCSecurityGroupRuleType: { + Type: schema.TypeInt, + Computed: true, + }, + + isVPCSecurityGroupRuleCode: { + Type: schema.TypeInt, + Computed: true, + }, + + isVPCSecurityGroupRulePortMin: { + Type: schema.TypeInt, + Computed: true, + }, + + isVPCSecurityGroupRulePortMax: { + Type: schema.TypeInt, + Computed: true, + }, + + isVPCSecurityGroupRuleProtocol: { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + + subnetsList: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + Description: "subent name", + }, + + "id": { + Type: schema.TypeString, + Computed: true, + Description: "subnet ID", + }, + + "status": { + Type: schema.TypeString, + Computed: true, + Description: "subnet status", + }, + + "zone": { + Type: schema.TypeString, + Computed: true, + Description: "subnet location", + }, + + totalIPV4AddressCount: { + Type: schema.TypeInt, + Computed: true, + Description: "Total IPv4 address count in the subnet", + }, + + availableIPV4AddressCount: { + Type: schema.TypeInt, + Computed: true, + Description: "Available IPv4 address count in the subnet", + }, + }, + }, + }, + }, + } +} + +func resourceIBMISVPCValidator() *ResourceValidator { + + validateSchema := make([]ValidateSchema, 1) + address_prefix_management := "auto, manual" + + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isVPCAddressPrefixManagement, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Optional: true, + Default: "auto", + AllowedValues: address_prefix_management}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isVPCName, + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Required: true, + Regexp: `^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$`, + MinValueLength: 1, + MaxValueLength: 63}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isVPCDefaultNetworkACLName, + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Required: true, + Regexp: `^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$`, + MinValueLength: 1, + MaxValueLength: 63}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isVPCDefaultSecurityGroupName, + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Required: true, + Regexp: `^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$`, + MinValueLength: 1, + MaxValueLength: 63}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isVPCDefaultRoutingTableName, + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Required: true, + Regexp: `^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$`, + MinValueLength: 1, + MaxValueLength: 63}) + + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: "tag", + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Optional: true, + Regexp: `^[A-Za-z0-9:_ .-]+$`, + MinValueLength: 1, + MaxValueLength: 128}) + + ibmISVPCResourceValidator := ResourceValidator{ResourceName: "ibm_is_vpc", Schema: validateSchema} + return &ibmISVPCResourceValidator +} + +func resourceIBMISVPCCreate(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + + log.Printf("[DEBUG] VPC create") + name := d.Get(isVPCName).(string) + apm := "" + rg := "" + isClassic := false + + if addprefixmgmt, ok := d.GetOk(isVPCAddressPrefixManagement); ok { + apm = addprefixmgmt.(string) + } + if classic, ok := d.GetOk(isVPCClassicAccess); ok { + isClassic = classic.(bool) + } + + if grp, ok := d.GetOk(isVPCResourceGroup); ok { + rg = grp.(string) + } + if userDetails.generation == 1 { + err := classicVpcCreate(d, meta, name, apm, rg, isClassic) + if err != nil { + return err + } + } else { + err := vpcCreate(d, meta, name, apm, rg, isClassic) + if err != nil { + return err + } + } + return resourceIBMISVPCRead(d, meta) +} + +func classicVpcCreate(d *schema.ResourceData, meta interface{}, name, apm, rg string, isClassic bool) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + options := &vpcclassicv1.CreateVPCOptions{ + Name: &name, + } + if rg != "" { + options.ResourceGroup = &vpcclassicv1.ResourceGroupIdentity{ + ID: &rg, + } + } + if apm != "" { + options.AddressPrefixManagement = &apm + } + options.ClassicAccess = &isClassic + + vpc, response, err := sess.CreateVPC(options) + if err != nil { + return fmt.Errorf("Error while creating VPC err %s\n%s", err, response) + } + d.SetId(*vpc.ID) + log.Printf("[INFO] VPC : %s", *vpc.ID) + _, err = isWaitForClassicVPCAvailable(sess, d.Id(), d.Timeout(schema.TimeoutCreate)) + if err != nil { + return err + } + v := os.Getenv("IC_ENV_TAGS") + if _, ok := d.GetOk(isVPCTags); ok || v != "" { + oldList, newList := d.GetChange(isVPCTags) + err = UpdateTagsUsingCRN(oldList, newList, meta, *vpc.CRN) + if err != nil { + log.Printf( + "Error on create of resource vpc (%s) tags: %s", d.Id(), err) + } + } + return nil +} + +func isWaitForClassicVPCAvailable(vpc *vpcclassicv1.VpcClassicV1, id string, timeout time.Duration) (interface{}, error) { + log.Printf("Waiting for VPC (%s) to be available.", id) + + stateConf := &resource.StateChangeConf{ + Pending: []string{isVPCPending}, + Target: []string{isVPCAvailable, isVPCFailed}, + Refresh: isClassicVPCRefreshFunc(vpc, id), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isClassicVPCRefreshFunc(vpc *vpcclassicv1.VpcClassicV1, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + getvpcOptions := &vpcclassicv1.GetVPCOptions{ + ID: &id, + } + vpc, response, err := vpc.GetVPC(getvpcOptions) + if err != nil { + return nil, isVPCFailed, fmt.Errorf("Error getting VPC : %s\n%s", err, response) + } + + if *vpc.Status == isVPCAvailable || *vpc.Status == isVPCFailed { + return vpc, *vpc.Status, nil + } + + return vpc, isVPCPending, nil + } +} + +func vpcCreate(d *schema.ResourceData, meta interface{}, name, apm, rg string, isClassic bool) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + options := &vpcv1.CreateVPCOptions{ + Name: &name, + } + if rg != "" { + options.ResourceGroup = &vpcv1.ResourceGroupIdentity{ + ID: &rg, + } + } + if apm != "" { + options.AddressPrefixManagement = &apm + } + options.ClassicAccess = &isClassic + + vpc, response, err := sess.CreateVPC(options) + if err != nil { + return fmt.Errorf("Error while creating VPC %s ", beautifyError(err, response)) + } + d.SetId(*vpc.ID) + + if defaultSGName, ok := d.GetOk(isVPCDefaultSecurityGroupName); ok { + sgNameUpdate(sess, *vpc.DefaultSecurityGroup.ID, defaultSGName.(string)) + } + + if defaultRTName, ok := d.GetOk(isVPCDefaultRoutingTableName); ok { + rtNameUpdate(sess, *vpc.ID, *vpc.DefaultRoutingTable.ID, defaultRTName.(string)) + } + + if defaultACLName, ok := d.GetOk(isVPCDefaultNetworkACLName); ok { + nwaclNameUpdate(sess, *vpc.DefaultNetworkACL.ID, defaultACLName.(string)) + } + + log.Printf("[INFO] VPC : %s", *vpc.ID) + _, err = isWaitForVPCAvailable(sess, d.Id(), d.Timeout(schema.TimeoutCreate)) + if err != nil { + return err + } + v := os.Getenv("IC_ENV_TAGS") + if _, ok := d.GetOk(isVPCTags); ok || v != "" { + oldList, newList := d.GetChange(isVPCTags) + err = UpdateTagsUsingCRN(oldList, newList, meta, *vpc.CRN) + if err != nil { + log.Printf( + "Error on create of resource vpc (%s) tags: %s", d.Id(), err) + } + } + return nil +} + +func isWaitForVPCAvailable(vpc *vpcv1.VpcV1, id string, timeout time.Duration) (interface{}, error) { + log.Printf("Waiting for VPC (%s) to be available.", id) + + stateConf := &resource.StateChangeConf{ + Pending: []string{isVPCPending}, + Target: []string{isVPCAvailable, isVPCFailed}, + Refresh: isVPCRefreshFunc(vpc, id), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isVPCRefreshFunc(vpc *vpcv1.VpcV1, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + getvpcOptions := &vpcv1.GetVPCOptions{ + ID: &id, + } + vpc, response, err := vpc.GetVPC(getvpcOptions) + if err != nil { + return nil, isVPCFailed, fmt.Errorf("Error getting VPC : %s\n%s", err, response) + } + + if *vpc.Status == isVPCAvailable || *vpc.Status == isVPCFailed { + return vpc, *vpc.Status, nil + } + + return vpc, isVPCPending, nil + } +} + +func resourceIBMISVPCRead(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + id := d.Id() + if userDetails.generation == 1 { + err := classicVpcGet(d, meta, id) + if err != nil { + return err + } + } else { + err := vpcGet(d, meta, id) + if err != nil { + return err + } + } + return nil +} + +func classicVpcGet(d *schema.ResourceData, meta interface{}, id string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + getvpcOptions := &vpcclassicv1.GetVPCOptions{ + ID: &id, + } + vpc, response, err := sess.GetVPC(getvpcOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error getting VPC : %s\n%s", err, response) + } + + d.Set(isVPCName, *vpc.Name) + d.Set(isVPCClassicAccess, *vpc.ClassicAccess) + d.Set(isVPCStatus, *vpc.Status) + if vpc.DefaultNetworkACL != nil { + log.Printf("[DEBUG] vpc default network acl is not null :%s", *vpc.DefaultNetworkACL.ID) + d.Set(isVPCDefaultNetworkACL, *vpc.DefaultNetworkACL.ID) + } else { + log.Printf("[DEBUG] vpc default network acl is null") + d.Set(isVPCDefaultNetworkACL, nil) + } + if vpc.DefaultSecurityGroup != nil { + d.Set(isVPCDefaultSecurityGroup, *vpc.DefaultSecurityGroup.ID) + } else { + d.Set(isVPCDefaultSecurityGroup, nil) + } + tags, err := GetTagsUsingCRN(meta, *vpc.CRN) + if err != nil { + log.Printf( + "Error on get of resource vpc (%s) tags: %s", d.Id(), err) + } + d.Set(isVPCTags, tags) + controller, err := getBaseController(meta) + if err != nil { + return err + } + d.Set(isVPCCRN, *vpc.CRN) + d.Set(ResourceControllerURL, controller+"/vpc/network/vpcs") + d.Set(ResourceName, *vpc.Name) + d.Set(ResourceCRN, *vpc.CRN) + d.Set(ResourceStatus, *vpc.Status) + if vpc.ResourceGroup != nil { + d.Set(isVPCResourceGroup, *vpc.ResourceGroup.ID) + d.Set(ResourceGroupName, *vpc.ResourceGroup.ID) + } + //set the cse ip addresses info + if vpc.CseSourceIps != nil { + cseSourceIpsList := make([]map[string]interface{}, 0) + for _, sourceIP := range vpc.CseSourceIps { + currentCseSourceIp := map[string]interface{}{} + if sourceIP.IP != nil { + currentCseSourceIp["address"] = *sourceIP.IP.Address + currentCseSourceIp["zone_name"] = *sourceIP.Zone.Name + cseSourceIpsList = append(cseSourceIpsList, currentCseSourceIp) + } + } + d.Set(cseSourceAddresses, cseSourceIpsList) + } + // set the subnets list + start := "" + allrecs := []vpcclassicv1.Subnet{} + for { + options := &vpcclassicv1.ListSubnetsOptions{} + if start != "" { + options.Start = &start + } + s, response, err := sess.ListSubnets(options) + if err != nil { + return fmt.Errorf("Error Fetching subnets %s\n%s", err, response) + } + start = GetNext(s.Next) + allrecs = append(allrecs, s.Subnets...) + if start == "" { + break + } + } + subnetsInfo := make([]map[string]interface{}, 0) + for _, subnet := range allrecs { + if *subnet.VPC.ID == d.Id() { + l := map[string]interface{}{ + "name": *subnet.Name, + "id": *subnet.ID, + "status": *subnet.Status, + "zone": *subnet.Zone.Name, + totalIPV4AddressCount: *subnet.TotalIpv4AddressCount, + availableIPV4AddressCount: *subnet.AvailableIpv4AddressCount, + } + subnetsInfo = append(subnetsInfo, l) + } + } + d.Set(subnetsList, subnetsInfo) + + //Set Security group list + + listSgOptions := &vpcclassicv1.ListSecurityGroupsOptions{} + sgs, _, err := sess.ListSecurityGroups(listSgOptions) + if err != nil { + return err + } + + securityGroupList := make([]map[string]interface{}, 0) + + for _, group := range sgs.SecurityGroups { + + if *group.VPC.ID == d.Id() { + g := make(map[string]interface{}) + + g[isVPCSecurityGroupName] = *group.Name + g[isVPCSecurityGroupID] = *group.ID + + rules := make([]map[string]interface{}, 0) + for _, sgrule := range group.Rules { + switch reflect.TypeOf(sgrule).String() { + case "*vpcclassicv1.SecurityGroupRuleSecurityGroupRuleProtocolIcmp": + { + rule := sgrule.(*vpcclassicv1.SecurityGroupRuleSecurityGroupRuleProtocolIcmp) + r := make(map[string]interface{}) + if rule.Code != nil { + r[isVPCSecurityGroupRuleCode] = int(*rule.Code) + } + if rule.Type != nil { + r[isVPCSecurityGroupRuleType] = int(*rule.Type) + } + r[isVPCSecurityGroupRuleDirection] = *rule.Direction + r[isVPCSecurityGroupRuleIPVersion] = *rule.IPVersion + if rule.Protocol != nil { + r[isVPCSecurityGroupRuleProtocol] = *rule.Protocol + } + r[isVPCSecurityGroupRuleID] = *rule.ID + remote, ok := rule.Remote.(*vpcclassicv1.SecurityGroupRuleRemote) + if ok { + if remote != nil && reflect.ValueOf(remote).IsNil() == false { + if remote.ID != nil { + r[isVPCSecurityGroupRuleRemote] = remote.ID + } else if remote.Address != nil { + r[isVPCSecurityGroupRuleRemote] = remote.Address + } else if remote.CIDRBlock != nil { + r[isVPCSecurityGroupRuleRemote] = remote.CIDRBlock + } + } + } + + rules = append(rules, r) + } + + case "*vpcclassicv1.SecurityGroupRuleSecurityGroupRuleProtocolAll": + { + rule := sgrule.(*vpcclassicv1.SecurityGroupRuleSecurityGroupRuleProtocolAll) + r := make(map[string]interface{}) + r[isVPCSecurityGroupRuleDirection] = *rule.Direction + r[isVPCSecurityGroupRuleIPVersion] = *rule.IPVersion + if rule.Protocol != nil { + r[isVPCSecurityGroupRuleProtocol] = *rule.Protocol + } + r[isVPCSecurityGroupRuleID] = *rule.ID + remote, ok := rule.Remote.(*vpcclassicv1.SecurityGroupRuleRemote) + if ok { + if remote != nil && reflect.ValueOf(remote).IsNil() == false { + if remote.ID != nil { + r[isVPCSecurityGroupRuleRemote] = remote.ID + } else if remote.Address != nil { + r[isVPCSecurityGroupRuleRemote] = remote.Address + } else if remote.CIDRBlock != nil { + r[isVPCSecurityGroupRuleRemote] = remote.CIDRBlock + } + } + } + rules = append(rules, r) + } + + case "*vpcclassicv1.SecurityGroupRuleSecurityGroupRuleProtocolTcpudp": + { + rule := sgrule.(*vpcclassicv1.SecurityGroupRuleSecurityGroupRuleProtocolTcpudp) + r := make(map[string]interface{}) + r[isVPCSecurityGroupRuleDirection] = *rule.Direction + r[isVPCSecurityGroupRuleIPVersion] = *rule.IPVersion + if rule.PortMin != nil { + r[isVPCSecurityGroupRulePortMin] = int(*rule.PortMin) + } + if rule.PortMax != nil { + r[isVPCSecurityGroupRulePortMax] = int(*rule.PortMax) + } + + if rule.Protocol != nil { + r[isVPCSecurityGroupRuleProtocol] = *rule.Protocol + } + + r[isVPCSecurityGroupRuleID] = *rule.ID + remote, ok := rule.Remote.(*vpcclassicv1.SecurityGroupRuleRemote) + if ok { + if remote != nil && reflect.ValueOf(remote).IsNil() == false { + if remote.ID != nil { + r[isVPCSecurityGroupRuleRemote] = remote.ID + } else if remote.Address != nil { + r[isVPCSecurityGroupRuleRemote] = remote.Address + } else if remote.CIDRBlock != nil { + r[isVPCSecurityGroupRuleRemote] = remote.CIDRBlock + } + } + } + + rules = append(rules, r) + } + } + } + g[isVPCSgRules] = rules + securityGroupList = append(securityGroupList, g) + } + } + + d.Set(isVPCSecurityGroupList, securityGroupList) + return nil +} + +func vpcGet(d *schema.ResourceData, meta interface{}, id string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + getvpcOptions := &vpcv1.GetVPCOptions{ + ID: &id, + } + vpc, response, err := sess.GetVPC(getvpcOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error getting VPC : %s\n%s", err, response) + } + + d.Set(isVPCName, *vpc.Name) + d.Set(isVPCClassicAccess, *vpc.ClassicAccess) + d.Set(isVPCStatus, *vpc.Status) + if vpc.DefaultNetworkACL != nil { + log.Printf("[DEBUG] vpc default network acl is not null :%s", *vpc.DefaultNetworkACL.ID) + d.Set(isVPCDefaultNetworkACL, *vpc.DefaultNetworkACL.ID) + d.Set(isVPCDefaultNetworkACLName, *vpc.DefaultNetworkACL.Name) + } else { + log.Printf("[DEBUG] vpc default network acl is null") + d.Set(isVPCDefaultNetworkACL, nil) + } + if vpc.DefaultSecurityGroup != nil { + d.Set(isVPCDefaultSecurityGroup, *vpc.DefaultSecurityGroup.ID) + d.Set(isVPCDefaultSecurityGroupName, *vpc.DefaultSecurityGroup.Name) + } else { + d.Set(isVPCDefaultSecurityGroup, nil) + } + if vpc.DefaultRoutingTable != nil { + d.Set(isVPCDefaultRoutingTable, *vpc.DefaultRoutingTable.ID) + d.Set(isVPCDefaultRoutingTableName, *vpc.DefaultRoutingTable.Name) + } + tags, err := GetTagsUsingCRN(meta, *vpc.CRN) + if err != nil { + log.Printf( + "Error on get of resource vpc (%s) tags: %s", d.Id(), err) + } + d.Set(isVPCTags, tags) + controller, err := getBaseController(meta) + if err != nil { + return err + } + d.Set(isVPCCRN, *vpc.CRN) + d.Set(ResourceControllerURL, controller+"/vpc-ext/network/vpcs") + d.Set(ResourceName, *vpc.Name) + d.Set(ResourceCRN, *vpc.CRN) + d.Set(ResourceStatus, *vpc.Status) + if vpc.ResourceGroup != nil { + d.Set(isVPCResourceGroup, *vpc.ResourceGroup.ID) + d.Set(ResourceGroupName, *vpc.ResourceGroup.Name) + } + //set the cse ip addresses info + if vpc.CseSourceIps != nil { + cseSourceIpsList := make([]map[string]interface{}, 0) + for _, sourceIP := range vpc.CseSourceIps { + currentCseSourceIp := map[string]interface{}{} + if sourceIP.IP != nil { + currentCseSourceIp["address"] = *sourceIP.IP.Address + currentCseSourceIp["zone_name"] = *sourceIP.Zone.Name + cseSourceIpsList = append(cseSourceIpsList, currentCseSourceIp) + } + } + d.Set(cseSourceAddresses, cseSourceIpsList) + } + // set the subnets list + start := "" + allrecs := []vpcv1.Subnet{} + for { + options := &vpcv1.ListSubnetsOptions{} + if start != "" { + options.Start = &start + } + s, response, err := sess.ListSubnets(options) + if err != nil { + return fmt.Errorf("Error Fetching subnets %s\n%s", err, response) + } + start = GetNext(s.Next) + allrecs = append(allrecs, s.Subnets...) + if start == "" { + break + } + } + subnetsInfo := make([]map[string]interface{}, 0) + for _, subnet := range allrecs { + if *subnet.VPC.ID == d.Id() { + l := map[string]interface{}{ + "name": *subnet.Name, + "id": *subnet.ID, + "status": *subnet.Status, + "zone": *subnet.Zone.Name, + totalIPV4AddressCount: *subnet.TotalIpv4AddressCount, + availableIPV4AddressCount: *subnet.AvailableIpv4AddressCount, + } + subnetsInfo = append(subnetsInfo, l) + } + } + d.Set(subnetsList, subnetsInfo) + + //Set Security group list + + listSgOptions := &vpcv1.ListSecurityGroupsOptions{} + sgs, _, err := sess.ListSecurityGroups(listSgOptions) + if err != nil { + return err + } + + securityGroupList := make([]map[string]interface{}, 0) + + for _, group := range sgs.SecurityGroups { + if *group.VPC.ID == d.Id() { + g := make(map[string]interface{}) + + g[isVPCSecurityGroupName] = *group.Name + g[isVPCSecurityGroupID] = *group.ID + + rules := make([]map[string]interface{}, 0) + for _, sgrule := range group.Rules { + switch reflect.TypeOf(sgrule).String() { + case "*vpcv1.SecurityGroupRuleSecurityGroupRuleProtocolIcmp": + { + rule := sgrule.(*vpcv1.SecurityGroupRuleSecurityGroupRuleProtocolIcmp) + r := make(map[string]interface{}) + if rule.Code != nil { + r[isVPCSecurityGroupRuleCode] = int(*rule.Code) + } + if rule.Type != nil { + r[isVPCSecurityGroupRuleType] = int(*rule.Type) + } + r[isVPCSecurityGroupRuleDirection] = *rule.Direction + r[isVPCSecurityGroupRuleIPVersion] = *rule.IPVersion + if rule.Protocol != nil { + r[isVPCSecurityGroupRuleProtocol] = *rule.Protocol + } + r[isVPCSecurityGroupRuleID] = *rule.ID + remote, ok := rule.Remote.(*vpcv1.SecurityGroupRuleRemote) + if ok { + if remote != nil && reflect.ValueOf(remote).IsNil() == false { + if remote.ID != nil { + r[isVPCSecurityGroupRuleRemote] = remote.ID + } else if remote.Address != nil { + r[isVPCSecurityGroupRuleRemote] = remote.Address + } else if remote.CIDRBlock != nil { + r[isVPCSecurityGroupRuleRemote] = remote.CIDRBlock + } + } + } + + rules = append(rules, r) + } + + case "*vpcv1.SecurityGroupRuleSecurityGroupRuleProtocolAll": + { + rule := sgrule.(*vpcv1.SecurityGroupRuleSecurityGroupRuleProtocolAll) + r := make(map[string]interface{}) + r[isVPCSecurityGroupRuleDirection] = *rule.Direction + r[isVPCSecurityGroupRuleIPVersion] = *rule.IPVersion + if rule.Protocol != nil { + r[isVPCSecurityGroupRuleProtocol] = *rule.Protocol + } + r[isVPCSecurityGroupRuleID] = *rule.ID + remote, ok := rule.Remote.(*vpcv1.SecurityGroupRuleRemote) + if ok { + if remote != nil && reflect.ValueOf(remote).IsNil() == false { + if remote.ID != nil { + r[isVPCSecurityGroupRuleRemote] = remote.ID + } else if remote.Address != nil { + r[isVPCSecurityGroupRuleRemote] = remote.Address + } else if remote.CIDRBlock != nil { + r[isVPCSecurityGroupRuleRemote] = remote.CIDRBlock + } + } + } + rules = append(rules, r) + } + + case "*vpcv1.SecurityGroupRuleSecurityGroupRuleProtocolTcpudp": + { + rule := sgrule.(*vpcv1.SecurityGroupRuleSecurityGroupRuleProtocolTcpudp) + r := make(map[string]interface{}) + r[isVPCSecurityGroupRuleDirection] = *rule.Direction + r[isVPCSecurityGroupRuleIPVersion] = *rule.IPVersion + if rule.PortMin != nil { + r[isVPCSecurityGroupRulePortMin] = int(*rule.PortMin) + } + if rule.PortMax != nil { + r[isVPCSecurityGroupRulePortMax] = int(*rule.PortMax) + } + + if rule.Protocol != nil { + r[isVPCSecurityGroupRuleProtocol] = *rule.Protocol + } + + r[isVPCSecurityGroupRuleID] = *rule.ID + remote, ok := rule.Remote.(*vpcv1.SecurityGroupRuleRemote) + if ok { + if remote != nil && reflect.ValueOf(remote).IsNil() == false { + if remote.ID != nil { + r[isVPCSecurityGroupRuleRemote] = remote.ID + } else if remote.Address != nil { + r[isVPCSecurityGroupRuleRemote] = remote.Address + } else if remote.CIDRBlock != nil { + r[isVPCSecurityGroupRuleRemote] = remote.CIDRBlock + } + } + } + rules = append(rules, r) + } + } + } + g[isVPCSgRules] = rules + securityGroupList = append(securityGroupList, g) + } + } + + d.Set(isVPCSecurityGroupList, securityGroupList) + return nil +} + +func resourceIBMISVPCUpdate(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + id := d.Id() + + name := "" + hasChanged := false + + if d.HasChange(isVPCName) { + name = d.Get(isVPCName).(string) + hasChanged = true + } + if userDetails.generation == 1 { + err := classicVpcUpdate(d, meta, id, name, hasChanged) + if err != nil { + return err + } + } else { + err := vpcUpdate(d, meta, id, name, hasChanged) + if err != nil { + return err + } + } + return resourceIBMISVPCRead(d, meta) +} + +func classicVpcUpdate(d *schema.ResourceData, meta interface{}, id, name string, hasChanged bool) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + if d.HasChange(isVPCTags) { + getvpcOptions := &vpcclassicv1.GetVPCOptions{ + ID: &id, + } + vpc, response, err := sess.GetVPC(getvpcOptions) + if err != nil { + return fmt.Errorf("Error getting VPC : %s\n%s", err, response) + } + oldList, newList := d.GetChange(isVPCTags) + err = UpdateTagsUsingCRN(oldList, newList, meta, *vpc.CRN) + if err != nil { + log.Printf( + "Error on update of resource vpc (%s) tags: %s", id, err) + } + } + if hasChanged { + updateVpcOptions := &vpcclassicv1.UpdateVPCOptions{ + ID: &id, + } + vpcPatchModel := &vpcclassicv1.VPCPatch{ + Name: &name, + } + vpcPatch, err := vpcPatchModel.AsPatch() + if err != nil { + return fmt.Errorf("Error calling asPatch for VPCPatch: %s", err) + } + updateVpcOptions.VPCPatch = vpcPatch + _, response, err := sess.UpdateVPC(updateVpcOptions) + if err != nil { + return fmt.Errorf("Error Updating VPC : %s\n%s", err, response) + } + } + return nil +} + +func vpcUpdate(d *schema.ResourceData, meta interface{}, id, name string, hasChanged bool) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + if d.HasChange(isVPCTags) { + getvpcOptions := &vpcv1.GetVPCOptions{ + ID: &id, + } + vpc, response, err := sess.GetVPC(getvpcOptions) + if err != nil { + return fmt.Errorf("Error getting VPC : %s\n%s", err, response) + } + oldList, newList := d.GetChange(isVPCTags) + err = UpdateTagsUsingCRN(oldList, newList, meta, *vpc.CRN) + if err != nil { + log.Printf( + "Error on update of resource vpc (%s) tags: %s", d.Id(), err) + } + } + + if d.HasChange(isVPCDefaultSecurityGroupName) { + if defaultSGName, ok := d.GetOk(isVPCDefaultSecurityGroupName); ok { + sgNameUpdate(sess, d.Get(isVPCDefaultSecurityGroup).(string), defaultSGName.(string)) + } + } + if d.HasChange(isVPCDefaultRoutingTableName) { + if defaultRTName, ok := d.GetOk(isVPCDefaultRoutingTableName); ok { + rtNameUpdate(sess, id, d.Get(isVPCDefaultRoutingTable).(string), defaultRTName.(string)) + } + } + if d.HasChange(isVPCDefaultNetworkACLName) { + if defaultACLName, ok := d.GetOk(isVPCDefaultNetworkACLName); ok { + nwaclNameUpdate(sess, d.Get(isVPCDefaultNetworkACL).(string), defaultACLName.(string)) + } + } + + if hasChanged { + updateVpcOptions := &vpcv1.UpdateVPCOptions{ + ID: &id, + } + vpcPatchModel := &vpcv1.VPCPatch{ + Name: &name, + } + vpcPatch, err := vpcPatchModel.AsPatch() + if err != nil { + return fmt.Errorf("Error calling asPatch for VPCPatch: %s", err) + } + updateVpcOptions.VPCPatch = vpcPatch + _, response, err := sess.UpdateVPC(updateVpcOptions) + if err != nil { + return fmt.Errorf("Error Updating VPC : %s\n%s", err, response) + } + } + return nil +} + +func resourceIBMISVPCDelete(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + id := d.Id() + if userDetails.generation == 1 { + err := classicVpcDelete(d, meta, id) + if err != nil { + return err + } + } else { + err := vpcDelete(d, meta, id) + if err != nil { + return err + } + } + d.SetId("") + return nil +} + +func classicVpcDelete(d *schema.ResourceData, meta interface{}, id string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + + getVpcOptions := &vpcclassicv1.GetVPCOptions{ + ID: &id, + } + _, response, err := sess.GetVPC(getVpcOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + + return fmt.Errorf("Error Getting VPC (%s): %s\n%s", id, err, response) + + } + + deletevpcOptions := &vpcclassicv1.DeleteVPCOptions{ + ID: &id, + } + response, err = sess.DeleteVPC(deletevpcOptions) + if err != nil { + return fmt.Errorf("Error Deleting VPC : %s\n%s", err, response) + } + _, err = isWaitForClassicVPCDeleted(sess, id, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return err + } + d.SetId("") + return nil +} + +func vpcDelete(d *schema.ResourceData, meta interface{}, id string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + + getVpcOptions := &vpcv1.GetVPCOptions{ + ID: &id, + } + _, response, err := sess.GetVPC(getVpcOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Getting VPC (%s): %s\n%s", id, err, response) + } + + deletevpcOptions := &vpcv1.DeleteVPCOptions{ + ID: &id, + } + response, err = sess.DeleteVPC(deletevpcOptions) + if err != nil { + return fmt.Errorf("Error Deleting VPC : %s\n%s", err, response) + } + _, err = isWaitForVPCDeleted(sess, id, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return err + } + d.SetId("") + return nil +} + +func isWaitForClassicVPCDeleted(vpc *vpcclassicv1.VpcClassicV1, id string, timeout time.Duration) (interface{}, error) { + log.Printf("Waiting for VPC (%s) to be deleted.", id) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", isVPCDeleting}, + Target: []string{isVPCDeleted, isVPCFailed}, + Refresh: isClassicVPCDeleteRefreshFunc(vpc, id), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isClassicVPCDeleteRefreshFunc(vpc *vpcclassicv1.VpcClassicV1, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + log.Printf("[DEBUG] delete function here") + getvpcOptions := &vpcclassicv1.GetVPCOptions{ + ID: &id, + } + vpc, response, err := vpc.GetVPC(getvpcOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return vpc, isVPCDeleted, nil + } + return nil, isVPCFailed, fmt.Errorf("The VPC %s failed to delete: %s\n%s", id, err, response) + } + + return vpc, isVPCDeleting, nil + } +} + +func isWaitForVPCDeleted(vpc *vpcv1.VpcV1, id string, timeout time.Duration) (interface{}, error) { + log.Printf("Waiting for VPC (%s) to be deleted.", id) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", isVPCDeleting}, + Target: []string{isVPCDeleted, isVPCFailed}, + Refresh: isVPCDeleteRefreshFunc(vpc, id), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isVPCDeleteRefreshFunc(vpc *vpcv1.VpcV1, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + log.Printf("[DEBUG] delete function here") + getvpcOptions := &vpcv1.GetVPCOptions{ + ID: &id, + } + vpc, response, err := vpc.GetVPC(getvpcOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return vpc, isVPCDeleted, nil + } + return nil, isVPCFailed, fmt.Errorf("The VPC %s failed to delete: %s\n%s", id, err, response) + } + + return vpc, isVPCDeleting, nil + } +} + +func resourceIBMISVPCExists(d *schema.ResourceData, meta interface{}) (bool, error) { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return false, err + } + id := d.Id() + if userDetails.generation == 1 { + exists, err := classicVpcExists(d, meta, id) + return exists, err + } else { + exists, err := vpcExists(d, meta, id) + return exists, err + } +} + +func classicVpcExists(d *schema.ResourceData, meta interface{}, id string) (bool, error) { + sess, err := classicVpcClient(meta) + if err != nil { + return false, err + } + getvpcOptions := &vpcclassicv1.GetVPCOptions{ + ID: &id, + } + _, response, err := sess.GetVPC(getvpcOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error getting VPC: %s\n%s", err, response) + } + + return true, nil +} + +func vpcExists(d *schema.ResourceData, meta interface{}, id string) (bool, error) { + sess, err := vpcClient(meta) + if err != nil { + return false, err + } + getvpcOptions := &vpcv1.GetVPCOptions{ + ID: &id, + } + _, response, err := sess.GetVPC(getvpcOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error getting VPC: %s\n%s", err, response) + } + return true, nil +} + +func resourceIBMVPCHash(v interface{}) int { + var buf bytes.Buffer + buf.WriteString(fmt.Sprintf("%s", + strings.ToLower(v.(string)))) + return hashcode.String(buf.String()) +} + +func nwaclNameUpdate(sess *vpcv1.VpcV1, id, name string) error { + updateNetworkACLOptions := &vpcv1.UpdateNetworkACLOptions{ + ID: &id, + } + networkACLPatchModel := &vpcv1.NetworkACLPatch{ + Name: &name, + } + networkACLPatch, err := networkACLPatchModel.AsPatch() + if err != nil { + return fmt.Errorf("Error calling asPatch for NetworkACLPatch: %s", err) + } + updateNetworkACLOptions.NetworkACLPatch = networkACLPatch + _, response, err := sess.UpdateNetworkACL(updateNetworkACLOptions) + if err != nil { + return fmt.Errorf("Error Updating Network ACL(%s) name : %s\n%s", id, err, response) + } + return nil +} + +func sgNameUpdate(sess *vpcv1.VpcV1, id, name string) error { + updateSecurityGroupOptions := &vpcv1.UpdateSecurityGroupOptions{ + ID: &id, + } + securityGroupPatchModel := &vpcv1.SecurityGroupPatch{ + Name: &name, + } + securityGroupPatch, err := securityGroupPatchModel.AsPatch() + if err != nil { + return fmt.Errorf("Error calling asPatch for SecurityGroupPatch: %s", err) + } + updateSecurityGroupOptions.SecurityGroupPatch = securityGroupPatch + _, response, err := sess.UpdateSecurityGroup(updateSecurityGroupOptions) + if err != nil { + return fmt.Errorf("Error Updating Security Group name : %s\n%s", err, response) + } + return nil +} + +func rtNameUpdate(sess *vpcv1.VpcV1, vpcID, id, name string) error { + updateVpcRoutingTableOptions := new(vpcv1.UpdateVPCRoutingTableOptions) + updateVpcRoutingTableOptions.VPCID = &vpcID + updateVpcRoutingTableOptions.ID = &id + routingTablePatchModel := new(vpcv1.RoutingTablePatch) + routingTablePatchModel.Name = &name + routingTablePatchModelAsPatch, asPatchErr := routingTablePatchModel.AsPatch() + if asPatchErr != nil { + return fmt.Errorf("Error calling asPatch for RoutingTablePatchModel: %s", asPatchErr) + } + updateVpcRoutingTableOptions.RoutingTablePatch = routingTablePatchModelAsPatch + _, response, err := sess.UpdateVPCRoutingTable(updateVpcRoutingTableOptions) + if err != nil { + return fmt.Errorf("Error Updating Routing table name %s\n%s", err, response) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_vpc_address_prefix.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_vpc_address_prefix.go new file mode 100644 index 00000000000..5ec2dcfe110 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_vpc_address_prefix.go @@ -0,0 +1,555 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + + "github.com/IBM/vpc-go-sdk/vpcclassicv1" + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + isVPCAddressPrefixPrefixName = "name" + isVPCAddressPrefixZoneName = "zone" + isVPCAddressPrefixCIDR = "cidr" + isVPCAddressPrefixVPCID = "vpc" + isVPCAddressPrefixHasSubnets = "has_subnets" + isVPCAddressPrefixDefault = "is_default" +) + +func resourceIBMISVpcAddressPrefix() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMISVpcAddressPrefixCreate, + Read: resourceIBMISVpcAddressPrefixRead, + Update: resourceIBMISVpcAddressPrefixUpdate, + Delete: resourceIBMISVpcAddressPrefixDelete, + Exists: resourceIBMISVpcAddressPrefixExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + isVPCAddressPrefixPrefixName: { + Type: schema.TypeString, + Required: true, + ForceNew: false, + ValidateFunc: InvokeValidator("ibm_is_address_prefix", isVPCAddressPrefixPrefixName), + Description: "Name", + }, + isVPCAddressPrefixZoneName: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Zone name", + }, + + isVPCAddressPrefixCIDR: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: InvokeValidator("ibm_is_address_prefix", isVPCAddressPrefixCIDR), + Description: "CIDIR address prefix", + }, + isVPCAddressPrefixDefault: { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Is default prefix for this zone in this VPC", + }, + + isVPCAddressPrefixVPCID: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "VPC id", + }, + + isVPCAddressPrefixHasSubnets: { + Type: schema.TypeBool, + Computed: true, + Description: "Boolean value, set to true if VPC instance have subnets", + }, + + RelatedCRN: { + Type: schema.TypeString, + Computed: true, + Description: "The crn of the VPC resource", + }, + }, + } +} + +func resourceIBMISAddressPrefixValidator() *ResourceValidator { + + validateSchema := make([]ValidateSchema, 1) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isVPCAddressPrefixPrefixName, + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Required: true, + Regexp: `^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$`, + MinValueLength: 1, + MaxValueLength: 63}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isVPCRouteDestinationCIDR, + ValidateFunctionIdentifier: ValidateCIDRAddress, + Type: TypeString, + ForceNew: true, + Required: true}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isVPCAddressPrefixCIDR, + ValidateFunctionIdentifier: ValidateOverlappingAddress, + Type: TypeString, + ForceNew: true, + Required: true}) + + ibmISAddressPrefixResourceValidator := ResourceValidator{ResourceName: "ibm_is_address_prefix", Schema: validateSchema} + return &ibmISAddressPrefixResourceValidator +} + +func resourceIBMISVpcAddressPrefixCreate(d *schema.ResourceData, meta interface{}) error { + + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + isDefault := false + prefixName := d.Get(isVPCAddressPrefixPrefixName).(string) + zoneName := d.Get(isVPCAddressPrefixZoneName).(string) + cidr := d.Get(isVPCAddressPrefixCIDR).(string) + vpcID := d.Get(isVPCAddressPrefixVPCID).(string) + if isDefaultPrefix, ok := d.GetOk(isVPCAddressPrefixDefault); ok { + isDefault = isDefaultPrefix.(bool) + } + + isVPCAddressPrefixKey := "vpc_address_prefix_key_" + vpcID + ibmMutexKV.Lock(isVPCAddressPrefixKey) + defer ibmMutexKV.Unlock(isVPCAddressPrefixKey) + + if userDetails.generation == 1 { + err := classicVpcAddressPrefixCreate(d, meta, prefixName, zoneName, cidr, vpcID) + if err != nil { + return err + } + } else { + err := vpcAddressPrefixCreate(d, meta, prefixName, zoneName, cidr, vpcID, isDefault) + if err != nil { + return err + } + } + return resourceIBMISVpcAddressPrefixRead(d, meta) +} + +func classicVpcAddressPrefixCreate(d *schema.ResourceData, meta interface{}, name, zone, cidr, vpcID string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + options := &vpcclassicv1.CreateVPCAddressPrefixOptions{ + Name: &name, + VPCID: &vpcID, + CIDR: &cidr, + Zone: &vpcclassicv1.ZoneIdentity{ + Name: &zone, + }, + } + addrPrefix, response, err := sess.CreateVPCAddressPrefix(options) + if err != nil { + return fmt.Errorf("Error while creating VPC Address Prefix %s\n%s", err, response) + } + + addrPrefixID := *addrPrefix.ID + + d.SetId(fmt.Sprintf("%s/%s", vpcID, addrPrefixID)) + return nil +} + +func vpcAddressPrefixCreate(d *schema.ResourceData, meta interface{}, name, zone, cidr, vpcID string, isDefault bool) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + options := &vpcv1.CreateVPCAddressPrefixOptions{ + Name: &name, + VPCID: &vpcID, + CIDR: &cidr, + IsDefault: &isDefault, + Zone: &vpcv1.ZoneIdentity{ + Name: &zone, + }, + } + addrPrefix, response, err := sess.CreateVPCAddressPrefix(options) + if err != nil { + return fmt.Errorf("Error while creating VPC Address Prefix %s\n%s", err, response) + } + + addrPrefixID := *addrPrefix.ID + d.SetId(fmt.Sprintf("%s/%s", vpcID, addrPrefixID)) + return nil +} + +func resourceIBMISVpcAddressPrefixRead(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + parts, err := idParts(d.Id()) + if err != nil { + return err + } + + vpcID := parts[0] + addrPrefixID := parts[1] + if userDetails.generation == 1 { + err := classicVpcAddressPrefixGet(d, meta, vpcID, addrPrefixID) + if err != nil { + return err + } + } else { + err := vpcAddressPrefixGet(d, meta, vpcID, addrPrefixID) + if err != nil { + return err + } + } + + return nil +} + +func classicVpcAddressPrefixGet(d *schema.ResourceData, meta interface{}, vpcID, addrPrefixID string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + getvpcAddressPrefixOptions := &vpcclassicv1.GetVPCAddressPrefixOptions{ + VPCID: &vpcID, + ID: &addrPrefixID, + } + addrPrefix, response, err := sess.GetVPCAddressPrefix(getvpcAddressPrefixOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Getting VPC Address Prefix (%s): %s\n%s", addrPrefixID, err, response) + } + d.Set(isVPCAddressPrefixVPCID, vpcID) + d.Set(isVPCAddressPrefixPrefixName, *addrPrefix.Name) + if addrPrefix.Zone != nil { + d.Set(isVPCAddressPrefixZoneName, *addrPrefix.Zone.Name) + } + d.Set(isVPCAddressPrefixCIDR, *addrPrefix.CIDR) + d.Set(isVPCAddressPrefixHasSubnets, *addrPrefix.HasSubnets) + getVPCOptions := &vpcclassicv1.GetVPCOptions{ + ID: &vpcID, + } + vpc, response, err := sess.GetVPC(getVPCOptions) + if err != nil { + return fmt.Errorf("Error Getting VPC : %s\n%s", err, response) + } + d.Set(RelatedCRN, *vpc.CRN) + + return nil +} + +func vpcAddressPrefixGet(d *schema.ResourceData, meta interface{}, vpcID, addrPrefixID string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + getvpcAddressPrefixOptions := &vpcv1.GetVPCAddressPrefixOptions{ + VPCID: &vpcID, + ID: &addrPrefixID, + } + addrPrefix, response, err := sess.GetVPCAddressPrefix(getvpcAddressPrefixOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Getting VPC Address Prefix (%s): %s\n%s", addrPrefixID, err, response) + } + d.Set(isVPCAddressPrefixVPCID, vpcID) + d.Set(isVPCAddressPrefixDefault, *addrPrefix.IsDefault) + d.Set(isVPCAddressPrefixPrefixName, *addrPrefix.Name) + if addrPrefix.Zone != nil { + d.Set(isVPCAddressPrefixZoneName, *addrPrefix.Zone.Name) + } + d.Set(isVPCAddressPrefixCIDR, *addrPrefix.CIDR) + d.Set(isVPCAddressPrefixHasSubnets, *addrPrefix.HasSubnets) + getVPCOptions := &vpcv1.GetVPCOptions{ + ID: &vpcID, + } + vpc, response, err := sess.GetVPC(getVPCOptions) + if err != nil { + return fmt.Errorf("Error Getting VPC : %s\n%s", err, response) + } + d.Set(RelatedCRN, *vpc.CRN) + + return nil +} + +func resourceIBMISVpcAddressPrefixUpdate(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + + name := "" + isDefault := false + hasNameChanged := false + hasIsDefaultChanged := false + + parts, err := idParts(d.Id()) + if err != nil { + return err + } + vpcID := parts[0] + addrPrefixID := parts[1] + + isVPCAddressPrefixKey := "vpc_address_prefix_key_" + vpcID + ibmMutexKV.Lock(isVPCAddressPrefixKey) + defer ibmMutexKV.Unlock(isVPCAddressPrefixKey) + + if d.HasChange(isVPCAddressPrefixPrefixName) { + name = d.Get(isVPCAddressPrefixPrefixName).(string) + hasNameChanged = true + } + if d.HasChange(isVPCAddressPrefixDefault) { + isDefault = d.Get(isVPCAddressPrefixDefault).(bool) + hasIsDefaultChanged = true + } + if userDetails.generation == 1 { + err := classicVpcAddressPrefixUpdate(d, meta, vpcID, addrPrefixID, name, hasNameChanged) + if err != nil { + return err + } + } else { + err := vpcAddressPrefixUpdate(d, meta, vpcID, addrPrefixID, name, isDefault, hasNameChanged, hasIsDefaultChanged) + if err != nil { + return err + } + } + + return resourceIBMISVpcAddressPrefixRead(d, meta) +} + +func classicVpcAddressPrefixUpdate(d *schema.ResourceData, meta interface{}, vpcID, addrPrefixID, name string, hasChanged bool) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + if hasChanged { + updatevpcAddressPrefixoptions := &vpcclassicv1.UpdateVPCAddressPrefixOptions{ + VPCID: &vpcID, + ID: &addrPrefixID, + } + addressPrefixPatchModel := &vpcclassicv1.AddressPrefixPatch{ + Name: &name, + } + addressPrefixPatch, err := addressPrefixPatchModel.AsPatch() + if err != nil { + return fmt.Errorf("Error calling asPatch for AddressPrefixPatch: %s", err) + } + updatevpcAddressPrefixoptions.AddressPrefixPatch = addressPrefixPatch + _, response, err := sess.UpdateVPCAddressPrefix(updatevpcAddressPrefixoptions) + if err != nil { + return fmt.Errorf("Error Updating VPC Address Prefix: %s\n%s", err, response) + } + } + return nil +} + +func vpcAddressPrefixUpdate(d *schema.ResourceData, meta interface{}, vpcID, addrPrefixID, name string, isDefault, hasNameChanged, hasIsDefaultChanged bool) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + if hasNameChanged || hasIsDefaultChanged { + updatevpcAddressPrefixoptions := &vpcv1.UpdateVPCAddressPrefixOptions{ + VPCID: &vpcID, + ID: &addrPrefixID, + } + + addressPrefixPatchModel := &vpcv1.AddressPrefixPatch{} + if hasNameChanged { + addressPrefixPatchModel.Name = &name + } + if hasIsDefaultChanged { + addressPrefixPatchModel.IsDefault = &isDefault + } + addressPrefixPatch, err := addressPrefixPatchModel.AsPatch() + if err != nil { + return fmt.Errorf("Error calling asPatch for AddressPrefixPatch: %s", err) + } + updatevpcAddressPrefixoptions.AddressPrefixPatch = addressPrefixPatch + _, response, err := sess.UpdateVPCAddressPrefix(updatevpcAddressPrefixoptions) + if err != nil { + return fmt.Errorf("Error Updating VPC Address Prefix: %s\n%s", err, response) + } + } + return nil +} + +func resourceIBMISVpcAddressPrefixDelete(d *schema.ResourceData, meta interface{}) error { + + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + parts, err := idParts(d.Id()) + if err != nil { + return err + } + vpcID := parts[0] + addrPrefixID := parts[1] + + isVPCAddressPrefixKey := "vpc_address_prefix_key_" + vpcID + ibmMutexKV.Lock(isVPCAddressPrefixKey) + defer ibmMutexKV.Unlock(isVPCAddressPrefixKey) + + if userDetails.generation == 1 { + err := classicVpcAddressPrefixDelete(d, meta, vpcID, addrPrefixID) + if err != nil { + return err + } + } else { + err := vpcAddressPrefixDelete(d, meta, vpcID, addrPrefixID) + if err != nil { + return err + } + } + + d.SetId("") + return nil +} + +func classicVpcAddressPrefixDelete(d *schema.ResourceData, meta interface{}, vpcID, addrPrefixID string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + + getvpcAddressPrefixOptions := &vpcclassicv1.GetVPCAddressPrefixOptions{ + VPCID: &vpcID, + ID: &addrPrefixID, + } + _, response, err := sess.GetVPCAddressPrefix(getvpcAddressPrefixOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return nil + } + return fmt.Errorf("Error Getting VPC Address Prefix (%s): %s\n%s", addrPrefixID, err, response) + } + deletevpcAddressPrefixOptions := &vpcclassicv1.DeleteVPCAddressPrefixOptions{ + VPCID: &vpcID, + ID: &addrPrefixID, + } + response, err = sess.DeleteVPCAddressPrefix(deletevpcAddressPrefixOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return nil + } + return fmt.Errorf("Error Deleting VPC Address Prefix (%s): %s\n%s", addrPrefixID, err, response) + } + d.SetId("") + return nil +} + +func vpcAddressPrefixDelete(d *schema.ResourceData, meta interface{}, vpcID, addrPrefixID string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + + getvpcAddressPrefixOptions := &vpcv1.GetVPCAddressPrefixOptions{ + VPCID: &vpcID, + ID: &addrPrefixID, + } + _, response, err := sess.GetVPCAddressPrefix(getvpcAddressPrefixOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return nil + } + return fmt.Errorf("Error Getting VPC Address Prefix (%s): %s\n%s", addrPrefixID, err, response) + } + + deletevpcAddressPrefixOptions := &vpcv1.DeleteVPCAddressPrefixOptions{ + VPCID: &vpcID, + ID: &addrPrefixID, + } + response, err = sess.DeleteVPCAddressPrefix(deletevpcAddressPrefixOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return nil + } + return fmt.Errorf("Error Deleting VPC Address Prefix (%s): %s\n%s", addrPrefixID, err, response) + } + d.SetId("") + return nil +} + +func resourceIBMISVpcAddressPrefixExists(d *schema.ResourceData, meta interface{}) (bool, error) { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return false, err + } + parts, err := idParts(d.Id()) + if len(parts) != 2 { + return false, fmt.Errorf("Incorrect ID %s: ID should be a combination of vpcID/addrPrefixID", d.Id()) + } + if err != nil { + return false, err + } + vpcID := parts[0] + addrPrefixID := parts[1] + + if userDetails.generation == 1 { + exists, err := classicVpcAddressPrefixExists(d, meta, vpcID, addrPrefixID) + return exists, err + } else { + exists, err := vpcAddressPrefixExists(d, meta, vpcID, addrPrefixID) + return exists, err + } +} + +func classicVpcAddressPrefixExists(d *schema.ResourceData, meta interface{}, vpcID, addrPrefixID string) (bool, error) { + sess, err := classicVpcClient(meta) + if err != nil { + return false, err + } + getvpcAddressPrefixOptions := &vpcclassicv1.GetVPCAddressPrefixOptions{ + VPCID: &vpcID, + ID: &addrPrefixID, + } + _, response, err := sess.GetVPCAddressPrefix(getvpcAddressPrefixOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error getting VPC Address Prefix: %s\n%s", err, response) + } + return true, nil +} + +func vpcAddressPrefixExists(d *schema.ResourceData, meta interface{}, vpcID, addrPrefixID string) (bool, error) { + sess, err := vpcClient(meta) + if err != nil { + return false, err + } + getvpcAddressPrefixOptions := &vpcv1.GetVPCAddressPrefixOptions{ + VPCID: &vpcID, + ID: &addrPrefixID, + } + _, response, err := sess.GetVPCAddressPrefix(getvpcAddressPrefixOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error getting VPC Address Prefix: %s\n%s", err, response) + } + return true, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_vpc_route.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_vpc_route.go new file mode 100644 index 00000000000..b7d1e1240b1 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_vpc_route.go @@ -0,0 +1,664 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "time" + + "github.com/IBM/vpc-go-sdk/vpcclassicv1" + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + isVPCRouteName = "name" + isVPCRouteState = "status" + isVPCRouteNextHop = "next_hop" + isVPCRouteDestinationCIDR = "destination" + isVPCRouteLocation = "zone" + isVPCRouteVPCID = "vpc" + + isRouteStatusPending = "pending" + isRouteStatusUpdating = "updating" + isRouteStatusStable = "stable" + isRouteStatusFailed = "failed" + + isRouteStatusDeleting = "deleting" + isRouteStatusDeleted = "deleted" +) + +func resourceIBMISVpcRoute() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMISVpcRouteCreate, + Read: resourceIBMISVpcRouteRead, + Update: resourceIBMISVpcRouteUpdate, + Delete: resourceIBMISVpcRouteDelete, + Exists: resourceIBMISVpcRouteExists, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + isVPCRouteName: { + Type: schema.TypeString, + Required: true, + ForceNew: false, + ValidateFunc: InvokeValidator("ibm_is_route", isVPCRouteName), + Description: "VPC route name", + }, + isVPCRouteLocation: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "VPC route location", + }, + + isVPCRouteDestinationCIDR: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: InvokeValidator("ibm_is_route", isVPCRouteDestinationCIDR), + Description: "VPC route destination CIDR value", + }, + + isVPCRouteState: { + Type: schema.TypeString, + Computed: true, + }, + + isVPCRouteVPCID: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "VPC instance ID", + }, + + isVPCRouteNextHop: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "VPC route next hop value", + }, + + RelatedCRN: { + Type: schema.TypeString, + Computed: true, + Description: "The crn of the VPC resource", + }, + }, + } +} + +func resourceIBMISRouteValidator() *ResourceValidator { + + validateSchema := make([]ValidateSchema, 1) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isVPCRouteName, + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Required: true, + Regexp: `^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$`, + MinValueLength: 1, + MaxValueLength: 63}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isVPCRouteDestinationCIDR, + ValidateFunctionIdentifier: ValidateCIDRAddress, + Type: TypeString, + ForceNew: true, + Required: true}) + + ibmISRouteResourceValidator := ResourceValidator{ResourceName: "ibm_is_route", Schema: validateSchema} + return &ibmISRouteResourceValidator +} + +func resourceIBMISVpcRouteCreate(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + + routeName := d.Get(isVPCRouteName).(string) + zoneName := d.Get(isVPCRouteLocation).(string) + cidr := d.Get(isVPCRouteDestinationCIDR).(string) + vpcID := d.Get(isVPCRouteVPCID).(string) + nextHop := d.Get(isVPCRouteNextHop).(string) + if userDetails.generation == 1 { + err := classicVpcRouteCreate(d, meta, routeName, zoneName, cidr, vpcID, nextHop) + if err != nil { + return err + } + } else { + err := vpcRouteCreate(d, meta, routeName, zoneName, cidr, vpcID, nextHop) + if err != nil { + return err + } + } + return resourceIBMISVpcRouteRead(d, meta) +} + +func classicVpcRouteCreate(d *schema.ResourceData, meta interface{}, routeName, zoneName, cidr, vpcID, nextHop string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + createRouteOptions := &vpcclassicv1.CreateVPCRouteOptions{ + VPCID: &vpcID, + Destination: &cidr, + Name: &routeName, + NextHop: &vpcclassicv1.RouteNextHopPrototype{ + Address: &nextHop, + }, + Zone: &vpcclassicv1.ZoneIdentity{ + Name: &zoneName, + }, + } + route, response, err := sess.CreateVPCRoute(createRouteOptions) + if err != nil { + return fmt.Errorf("Error while creating VPC Route %s\n%s", err, response) + } + routeID := *route.ID + + d.SetId(fmt.Sprintf("%s/%s", vpcID, routeID)) + + _, err = isWaitForClassicRouteStable(sess, d, vpcID, routeID) + if err != nil { + return err + } + return nil +} + +func vpcRouteCreate(d *schema.ResourceData, meta interface{}, routeName, zoneName, cidr, vpcID, nextHop string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + createRouteOptions := &vpcv1.CreateVPCRouteOptions{ + VPCID: &vpcID, + Destination: &cidr, + Name: &routeName, + NextHop: &vpcv1.RouteNextHopPrototype{ + Address: &nextHop, + }, + Zone: &vpcv1.ZoneIdentity{ + Name: &zoneName, + }, + } + route, response, err := sess.CreateVPCRoute(createRouteOptions) + if err != nil { + return fmt.Errorf("Error while creating VPC Route err %s\n%s", err, response) + } + routeID := *route.ID + + d.SetId(fmt.Sprintf("%s/%s", vpcID, routeID)) + + _, err = isWaitForRouteStable(sess, d, vpcID, routeID) + if err != nil { + return err + } + return nil +} + +func isWaitForClassicRouteStable(sess *vpcclassicv1.VpcClassicV1, d *schema.ResourceData, vpcID, routeID string) (interface{}, error) { + + stateConf := &resource.StateChangeConf{ + Pending: []string{isRouteStatusPending, isRouteStatusUpdating}, + Target: []string{isRouteStatusStable, isRouteStatusFailed}, + Refresh: func() (interface{}, string, error) { + getVpcRouteOptions := &vpcclassicv1.GetVPCRouteOptions{ + VPCID: &vpcID, + ID: &routeID, + } + route, response, err := sess.GetVPCRoute(getVpcRouteOptions) + if err != nil { + return route, "", fmt.Errorf("Error Getting VPC Route: %s\n%s", err, response) + } + + if *route.LifecycleState == "stable" || *route.LifecycleState == "failed" { + return route, *route.LifecycleState, nil + } + return route, *route.LifecycleState, nil + }, + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isWaitForRouteStable(sess *vpcv1.VpcV1, d *schema.ResourceData, vpcID, routeID string) (interface{}, error) { + + stateConf := &resource.StateChangeConf{ + Pending: []string{isRouteStatusPending, isRouteStatusUpdating}, + Target: []string{isRouteStatusStable, isRouteStatusFailed}, + Refresh: func() (interface{}, string, error) { + getVpcRouteOptions := &vpcv1.GetVPCRouteOptions{ + VPCID: &vpcID, + ID: &routeID, + } + route, response, err := sess.GetVPCRoute(getVpcRouteOptions) + if err != nil { + return route, "", fmt.Errorf("Error Getting VPC Route: %s\n%s", err, response) + } + + if *route.LifecycleState == "stable" || *route.LifecycleState == "failed" { + return route, *route.LifecycleState, nil + } + return route, *route.LifecycleState, nil + }, + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func resourceIBMISVpcRouteRead(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + parts, err := idParts(d.Id()) + if err != nil { + return err + } + + vpcID := parts[0] + routeID := parts[1] + if userDetails.generation == 1 { + err := classicVpcRouteGet(d, meta, vpcID, routeID) + if err != nil { + return err + } + } else { + err := vpcRouteGet(d, meta, vpcID, routeID) + if err != nil { + return err + } + } + return nil +} + +func classicVpcRouteGet(d *schema.ResourceData, meta interface{}, vpcID, routeID string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + getVpcRouteOptions := &vpcclassicv1.GetVPCRouteOptions{ + VPCID: &vpcID, + ID: &routeID, + } + route, response, err := sess.GetVPCRoute(getVpcRouteOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Getting VPC Route (%s): %s\n%s", routeID, err, response) + } + d.Set(isVPCRouteVPCID, vpcID) + d.Set(isVPCRouteName, route.Name) + if route.Zone != nil { + d.Set(isVPCRouteLocation, *route.Zone.Name) + } + d.Set(isVPCRouteDestinationCIDR, *route.Destination) + nexthop := route.NextHop.(*vpcclassicv1.RouteNextHop) + d.Set(isVPCRouteNextHop, *nexthop.Address) + d.Set(isVPCRouteState, *route.LifecycleState) + getVPCOptions := &vpcclassicv1.GetVPCOptions{ + ID: &vpcID, + } + vpc, response, err := sess.GetVPC(getVPCOptions) + if err != nil { + return fmt.Errorf("Error Getting VPC : %s\n%s", err, response) + } + d.Set(RelatedCRN, *vpc.CRN) + return nil +} + +func vpcRouteGet(d *schema.ResourceData, meta interface{}, vpcID, routeID string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + getVpcRouteOptions := &vpcv1.GetVPCRouteOptions{ + VPCID: &vpcID, + ID: &routeID, + } + route, response, err := sess.GetVPCRoute(getVpcRouteOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Getting VPC Route (%s): %s\n%s", routeID, err, response) + } + d.Set(isVPCRouteVPCID, vpcID) + d.Set(isVPCRouteName, route.Name) + if route.Zone != nil { + d.Set(isVPCRouteLocation, *route.Zone.Name) + } + d.Set(isVPCRouteDestinationCIDR, *route.Destination) + nexthop := route.NextHop.(*vpcv1.RouteNextHop) + d.Set(isVPCRouteNextHop, *nexthop.Address) + d.Set(isVPCRouteState, *route.LifecycleState) + getVPCOptions := &vpcv1.GetVPCOptions{ + ID: &vpcID, + } + vpc, response, err := sess.GetVPC(getVPCOptions) + if err != nil { + return fmt.Errorf("Error Getting VPC : %s\n%s", err, response) + } + d.Set(RelatedCRN, *vpc.CRN) + + return nil +} + +func resourceIBMISVpcRouteUpdate(d *schema.ResourceData, meta interface{}) error { + + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + name := "" + hasChanged := false + + parts, err := idParts(d.Id()) + if err != nil { + return err + } + + vpcID := parts[0] + routeID := parts[1] + if d.HasChange(isVPCRouteName) { + name = d.Get(isVPCRouteName).(string) + hasChanged = true + } + + if userDetails.generation == 1 { + err := classicVpcRouteUpdate(d, meta, vpcID, routeID, name, hasChanged) + if err != nil { + return err + } + } else { + err := vpcRouteUpdate(d, meta, vpcID, routeID, name, hasChanged) + if err != nil { + return err + } + } + + return resourceIBMISVpcRouteRead(d, meta) +} + +func classicVpcRouteUpdate(d *schema.ResourceData, meta interface{}, vpcID, routeID, name string, hasChanged bool) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + if hasChanged { + updateVpcRouteOptions := &vpcclassicv1.UpdateVPCRouteOptions{ + VPCID: &vpcID, + ID: &routeID, + } + routePatchModel := &vpcclassicv1.RoutePatch{ + Name: &name, + } + routePatch, err := routePatchModel.AsPatch() + if err != nil { + return fmt.Errorf("Error calling asPatch for RoutePatch: %s", err) + } + updateVpcRouteOptions.RoutePatch = routePatch + _, response, err := sess.UpdateVPCRoute(updateVpcRouteOptions) + if err != nil { + return fmt.Errorf("Error Updating VPC Route: %s\n%s", err, response) + } + } + return nil +} + +func vpcRouteUpdate(d *schema.ResourceData, meta interface{}, vpcID, routeID, name string, hasChanged bool) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + if hasChanged { + updateVpcRouteOptions := &vpcv1.UpdateVPCRouteOptions{ + VPCID: &vpcID, + ID: &routeID, + } + routePatchModel := &vpcv1.RoutePatch{ + Name: &name, + } + routePatch, err := routePatchModel.AsPatch() + if err != nil { + return fmt.Errorf("Error calling asPatch for RoutePatch: %s", err) + } + updateVpcRouteOptions.RoutePatch = routePatch + _, response, err := sess.UpdateVPCRoute(updateVpcRouteOptions) + if err != nil { + return fmt.Errorf("Error Updating VPC Route: %s\n%s", err, response) + } + } + return nil +} + +func resourceIBMISVpcRouteDelete(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + parts, err := idParts(d.Id()) + if err != nil { + return err + } + vpcID := parts[0] + routeID := parts[1] + if userDetails.generation == 1 { + err := classicVpcRouteDelete(d, meta, vpcID, routeID) + if err != nil { + return err + } + } else { + err := vpcRouteDelete(d, meta, vpcID, routeID) + if err != nil { + return err + } + } + + d.SetId("") + return nil +} + +func classicVpcRouteDelete(d *schema.ResourceData, meta interface{}, vpcID, routeID string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + + getVpcRouteOptions := &vpcclassicv1.GetVPCRouteOptions{ + VPCID: &vpcID, + ID: &routeID, + } + _, response, err := sess.GetVPCRoute(getVpcRouteOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return nil + } + return fmt.Errorf("Error Getting VPC Route (%s): %s\n%s", routeID, err, response) + } + deleteRouteOptions := &vpcclassicv1.DeleteVPCRouteOptions{ + VPCID: &vpcID, + ID: &routeID, + } + response, err = sess.DeleteVPCRoute(deleteRouteOptions) + if err != nil { + return fmt.Errorf("Error Deleting VPC Route: %s\n%s", err, response) + } + _, err = isWaitForClassicVPCRouteDeleted(sess, vpcID, routeID, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return err + } + d.SetId("") + return nil +} + +func vpcRouteDelete(d *schema.ResourceData, meta interface{}, vpcID, routeID string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + + getVpcRouteOptions := &vpcv1.GetVPCRouteOptions{ + VPCID: &vpcID, + ID: &routeID, + } + _, response, err := sess.GetVPCRoute(getVpcRouteOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return nil + } + return fmt.Errorf("Error Getting VPC Route (%s): %s\n%s", routeID, err, response) + } + deleteRouteOptions := &vpcv1.DeleteVPCRouteOptions{ + VPCID: &vpcID, + ID: &routeID, + } + response, err = sess.DeleteVPCRoute(deleteRouteOptions) + if err != nil { + return fmt.Errorf("Error Deleting VPC Route: %s\n%s", err, response) + } + _, err = isWaitForVPCRouteDeleted(sess, vpcID, routeID, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return err + } + d.SetId("") + return nil +} + +func isWaitForClassicVPCRouteDeleted(sess *vpcclassicv1.VpcClassicV1, vpcID, routeID string, timeout time.Duration) (interface{}, error) { + + log.Printf("Waiting for VPC Route (%s) to be deleted.", routeID) + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", isRouteStatusDeleting}, + Target: []string{isRouteStatusDeleted, isRouteStatusFailed}, + Refresh: func() (interface{}, string, error) { + getVpcRouteOptions := &vpcclassicv1.GetVPCRouteOptions{ + VPCID: &vpcID, + ID: &routeID, + } + route, response, err := sess.GetVPCRoute(getVpcRouteOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return route, isRouteStatusDeleted, nil + } + return route, isRouteStatusDeleting, fmt.Errorf("The VPC route %s failed to delete: %s\n%s", routeID, err, response) + } + + return route, isRouteStatusDeleting, nil + }, + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isWaitForVPCRouteDeleted(sess *vpcv1.VpcV1, vpcID, routeID string, timeout time.Duration) (interface{}, error) { + + log.Printf("Waiting for VPC Route (%s) to be deleted.", routeID) + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", isRouteStatusDeleting}, + Target: []string{isRouteStatusDeleted, isRouteStatusFailed}, + Refresh: func() (interface{}, string, error) { + getVpcRouteOptions := &vpcv1.GetVPCRouteOptions{ + VPCID: &vpcID, + ID: &routeID, + } + route, response, err := sess.GetVPCRoute(getVpcRouteOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return route, isRouteStatusDeleted, nil + } + return route, isRouteStatusDeleting, fmt.Errorf("The VPC route %s failed to delete: %s\n%s", routeID, err, response) + } + return route, isRouteStatusDeleting, nil + }, + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func resourceIBMISVpcRouteExists(d *schema.ResourceData, meta interface{}) (bool, error) { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return false, err + } + parts, err := idParts(d.Id()) + if err != nil { + return false, err + } + if len(parts) != 2 { + return false, fmt.Errorf("Incorrect ID %s: ID should be a combination of vpcID/routeID", d.Id()) + } + vpcID := parts[0] + routeID := parts[1] + if userDetails.generation == 1 { + exists, err := classicVpcRouteExists(d, meta, vpcID, routeID) + return exists, err + } else { + exists, err := vpcRouteExists(d, meta, vpcID, routeID) + return exists, err + } +} + +func classicVpcRouteExists(d *schema.ResourceData, meta interface{}, vpcID, routeID string) (bool, error) { + sess, err := classicVpcClient(meta) + if err != nil { + return false, err + } + getVpcRouteOptions := &vpcclassicv1.GetVPCRouteOptions{ + VPCID: &vpcID, + ID: &routeID, + } + _, response, err := sess.GetVPCRoute(getVpcRouteOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error getting VPC Route: %s\n%s", err, response) + } + return true, nil +} + +func vpcRouteExists(d *schema.ResourceData, meta interface{}, vpcID, routeID string) (bool, error) { + sess, err := vpcClient(meta) + if err != nil { + return false, err + } + getVpcRouteOptions := &vpcv1.GetVPCRouteOptions{ + VPCID: &vpcID, + ID: &routeID, + } + _, response, err := sess.GetVPCRoute(getVpcRouteOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error getting VPC Route: %s\n%s", err, response) + } + return true, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_vpc_routing_table.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_vpc_routing_table.go new file mode 100644 index 00000000000..8ae2d572ac3 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_vpc_routing_table.go @@ -0,0 +1,324 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "strings" + "time" + + "github.com/IBM/go-sdk-core/v4/core" + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + rtID = "routing_table" + rtVpcID = "vpc" + rtName = "name" + rtRouteDirectLinkIngress = "route_direct_link_ingress" + rtRouteTransitGatewayIngress = "route_transit_gateway_ingress" + rtRouteVPCZoneIngress = "route_vpc_zone_ingress" + rtCreateAt = "created_at" + rtHref = "href" + rtIsDefault = "is_default" + rtResourceType = "resource_type" + rtLifecycleState = "lifecycle_state" + rtSubnets = "subnets" + rtDestination = "destination" + rtAction = "action" + rtNextHop = "next_hop" + rtZone = "zone" + rtOrigin = "origin" +) + +func resourceIBMISVPCRoutingTable() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMISVPCRoutingTableCreate, + Read: resourceIBMISVPCRoutingTableRead, + Update: resourceIBMISVPCRoutingTableUpdate, + Delete: resourceIBMISVPCRoutingTableDelete, + Exists: resourceIBMISVPCRoutingTableExists, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + Schema: map[string]*schema.Schema{ + rtVpcID: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The VPC identifier.", + }, + rtRouteDirectLinkIngress: { + Type: schema.TypeBool, + ForceNew: false, + Default: false, + Optional: true, + Description: "If set to true, this routing table will be used to route traffic that originates from Direct Link to this VPC.", + }, + rtRouteTransitGatewayIngress: { + Type: schema.TypeBool, + ForceNew: false, + Default: false, + Optional: true, + Description: "If set to true, this routing table will be used to route traffic that originates from Transit Gateway to this VPC.", + }, + rtRouteVPCZoneIngress: { + Type: schema.TypeBool, + ForceNew: false, + Default: false, + Optional: true, + Description: "If set to true, this routing table will be used to route traffic that originates from subnets in other zones in this VPC.", + }, + rtName: { + Type: schema.TypeString, + Optional: true, + ForceNew: false, + Computed: true, + ValidateFunc: InvokeValidator("ibm_is_vpc_routing_table", rtName), + Description: "The user-defined name for this routing table.", + }, + rtID: { + Type: schema.TypeString, + Computed: true, + Description: "The routing table identifier.", + }, + rtHref: { + Type: schema.TypeString, + Computed: true, + Description: "Routing table Href", + }, + rtResourceType: { + Type: schema.TypeString, + Computed: true, + Description: "Routing table Resource Type", + }, + rtCreateAt: { + Type: schema.TypeString, + Computed: true, + Description: "Routing table Created At", + }, + rtLifecycleState: { + Type: schema.TypeString, + Computed: true, + Description: "Routing table Lifecycle State", + }, + rtIsDefault: { + Type: schema.TypeBool, + Computed: true, + Description: "Indicates whether this is the default routing table for this VPC", + }, + rtSubnets: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + rtName: { + Type: schema.TypeString, + Computed: true, + Description: "Subnet name", + }, + "id": { + Type: schema.TypeString, + Computed: true, + Description: "Subnet ID", + }, + }, + }, + }, + }, + } +} + +func resourceIBMISVPCRoutingTableValidator() *ResourceValidator { + + validateSchema := make([]ValidateSchema, 2) + actionAllowedValues := "delegate, delegate_vpc, deliver, drop" + + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: rtName, + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Required: false, + Regexp: `^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$`, + MinValueLength: 1, + MaxValueLength: 63}) + + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: rtAction, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: false, + AllowedValues: actionAllowedValues}) + + ibmISVPCRoutingTableValidator := ResourceValidator{ResourceName: "ibm_is_vpc_routing_table", Schema: validateSchema} + return &ibmISVPCRoutingTableValidator +} + +func resourceIBMISVPCRoutingTableCreate(d *schema.ResourceData, meta interface{}) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + + vpcID := d.Get(rtVpcID).(string) + rtName := d.Get(rtName).(string) + + createVpcRoutingTableOptions := sess.NewCreateVPCRoutingTableOptions(vpcID) + createVpcRoutingTableOptions.SetName(rtName) + if _, ok := d.GetOk(rtRouteDirectLinkIngress); ok { + routeDirectLinkIngress := d.Get(rtRouteDirectLinkIngress).(bool) + createVpcRoutingTableOptions.RouteDirectLinkIngress = &routeDirectLinkIngress + } + if _, ok := d.GetOk(rtRouteTransitGatewayIngress); ok { + routeTransitGatewayIngress := d.Get(rtRouteTransitGatewayIngress).(bool) + createVpcRoutingTableOptions.RouteTransitGatewayIngress = &routeTransitGatewayIngress + } + if _, ok := d.GetOk(rtRouteVPCZoneIngress); ok { + routeVPCZoneIngress := d.Get(rtRouteVPCZoneIngress).(bool) + createVpcRoutingTableOptions.RouteVPCZoneIngress = &routeVPCZoneIngress + } + routeTable, response, err := sess.CreateVPCRoutingTable(createVpcRoutingTableOptions) + if err != nil { + log.Printf("[DEBUG] Create VPC Routing table err %s\n%s", err, response) + return err + } + + d.SetId(fmt.Sprintf("%s/%s", vpcID, *routeTable.ID)) + + return resourceIBMISVPCRoutingTableRead(d, meta) +} + +func resourceIBMISVPCRoutingTableRead(d *schema.ResourceData, meta interface{}) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + + idSet := strings.Split(d.Id(), "/") + getVpcRoutingTableOptions := sess.NewGetVPCRoutingTableOptions(idSet[0], idSet[1]) + routeTable, response, err := sess.GetVPCRoutingTable(getVpcRoutingTableOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Getting VPC Routing table: %s\n%s", err, response) + } + + d.Set(rtID, routeTable.ID) + d.Set(rtName, routeTable.Name) + + d.Set(rtHref, routeTable.Href) + d.Set(rtLifecycleState, routeTable.LifecycleState) + d.Set(rtCreateAt, routeTable.CreatedAt.String()) + d.Set(rtResourceType, routeTable.ResourceType) + d.Set(rtRouteDirectLinkIngress, routeTable.RouteDirectLinkIngress) + d.Set(rtRouteTransitGatewayIngress, routeTable.RouteTransitGatewayIngress) + d.Set(rtRouteVPCZoneIngress, routeTable.RouteVPCZoneIngress) + d.Set(rtIsDefault, routeTable.IsDefault) + + subnets := make([]map[string]interface{}, 0) + + for _, s := range routeTable.Subnets { + subnet := make(map[string]interface{}) + subnet[ID] = *s.ID + subnet["name"] = *s.Name + subnets = append(subnets, subnet) + } + + d.Set(rtSubnets, subnets) + + return nil +} + +func resourceIBMISVPCRoutingTableUpdate(d *schema.ResourceData, meta interface{}) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + + idSet := strings.Split(d.Id(), "/") + updateVpcRoutingTableOptions := new(vpcv1.UpdateVPCRoutingTableOptions) + updateVpcRoutingTableOptions.VPCID = &idSet[0] + updateVpcRoutingTableOptions.ID = &idSet[1] + // Construct an instance of the RoutingTablePatch model + routingTablePatchModel := new(vpcv1.RoutingTablePatch) + + if d.HasChange(rtName) { + name := d.Get(rtName).(string) + routingTablePatchModel.Name = core.StringPtr(name) + } + if d.HasChange(rtRouteDirectLinkIngress) { + routeDirectLinkIngress := d.Get(rtRouteDirectLinkIngress).(bool) + routingTablePatchModel.RouteDirectLinkIngress = core.BoolPtr(routeDirectLinkIngress) + } + if d.HasChange(rtRouteTransitGatewayIngress) { + routeTransitGatewayIngress := d.Get(rtRouteTransitGatewayIngress).(bool) + routingTablePatchModel.RouteTransitGatewayIngress = core.BoolPtr(routeTransitGatewayIngress) + } + if d.HasChange(rtRouteVPCZoneIngress) { + routeVPCZoneIngress := d.Get(rtRouteVPCZoneIngress).(bool) + routingTablePatchModel.RouteVPCZoneIngress = core.BoolPtr(routeVPCZoneIngress) + } + routingTablePatchModelAsPatch, asPatchErr := routingTablePatchModel.AsPatch() + if asPatchErr != nil { + return fmt.Errorf("Error calling asPatch for RoutingTablePatchModel: %s", asPatchErr) + } + updateVpcRoutingTableOptions.RoutingTablePatch = routingTablePatchModelAsPatch + _, response, err := sess.UpdateVPCRoutingTable(updateVpcRoutingTableOptions) + if err != nil { + log.Printf("[DEBUG] Update VPC Routing table err %s\n%s", err, response) + return err + } + return resourceIBMISVPCRoutingTableRead(d, meta) +} + +func resourceIBMISVPCRoutingTableDelete(d *schema.ResourceData, meta interface{}) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + + idSet := strings.Split(d.Id(), "/") + + deleteTableOptions := sess.NewDeleteVPCRoutingTableOptions(idSet[0], idSet[1]) + response, err := sess.DeleteVPCRoutingTable(deleteTableOptions) + if err != nil && response.StatusCode != 404 { + log.Printf("Error deleting VPC Routing table : %s", response) + return err + } + + d.SetId("") + return nil +} + +func resourceIBMISVPCRoutingTableExists(d *schema.ResourceData, meta interface{}) (bool, error) { + sess, err := vpcClient(meta) + if err != nil { + return false, err + } + + idSet := strings.Split(d.Id(), "/") + if len(idSet) != 2 { + return false, fmt.Errorf("Incorrect ID %s: ID should be a combination of vpcID/routingTableID", d.Id()) + } + getVpcRoutingTableOptions := sess.NewGetVPCRoutingTableOptions(idSet[0], idSet[1]) + _, response, err := sess.GetVPCRoutingTable(getVpcRoutingTableOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return false, nil + } + return false, fmt.Errorf("Error Getting VPC Routing table : %s\n%s", err, response) + } + return true, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_vpc_routing_table_route.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_vpc_routing_table_route.go new file mode 100644 index 00000000000..6f481b6b280 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_vpc_routing_table_route.go @@ -0,0 +1,308 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "net" + "strings" + "time" + + "github.com/IBM/go-sdk-core/core" + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + rID = "route_id" + rDestination = "destination" + rAction = "action" + rNextHop = "next_hop" + rName = "name" + rZone = "zone" +) + +func resourceIBMISVPCRoutingTableRoute() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMISVPCRoutingTableRouteCreate, + Read: resourceIBMISVPCRoutingTableRouteRead, + Update: resourceIBMISVPCRoutingTableRouteUpdate, + Delete: resourceIBMISVPCRoutingTableRouteDelete, + Exists: resourceIBMISVPCRoutingTableRouteExists, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + rtID: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The routing table identifier.", + }, + rtVpcID: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The VPC identifier.", + }, + rDestination: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The destination of the route.", + }, + rZone: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The zone to apply the route to. Traffic from subnets in this zone will be subject to this route.", + }, + rNextHop: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "If action is deliver, the next hop that packets will be delivered to. For other action values, its address will be 0.0.0.0.", + }, + rAction: { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "deliver", + Description: "The action to perform with a packet matching the route.", + ValidateFunc: InvokeValidator("ibm_is_vpc_routing_table_route", rAction), + }, + rName: { + Type: schema.TypeString, + Optional: true, + ForceNew: false, + Computed: true, + Description: "The user-defined name for this route.", + ValidateFunc: InvokeValidator("ibm_is_vpc_routing_table_route", rName), + }, + rID: { + Type: schema.TypeString, + Computed: true, + Description: "The routing table route identifier.", + }, + rtHref: { + Type: schema.TypeString, + Computed: true, + Description: "Routing table route Href", + }, + rtCreateAt: { + Type: schema.TypeString, + Computed: true, + Description: "Routing table route Created At", + }, + rtLifecycleState: { + Type: schema.TypeString, + Computed: true, + Description: "Routing table route Lifecycle State", + }, + rtOrigin: { + Type: schema.TypeString, + Computed: true, + Description: "The origin of this route.", + }, + }, + } +} + +func resourceIBMISVPCRoutingTableRouteValidator() *ResourceValidator { + + validateSchema := make([]ValidateSchema, 2) + actionAllowedValues := "delegate, delegate_vpc, deliver, drop" + + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: rtName, + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Required: false, + Regexp: `^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$`, + MinValueLength: 1, + MaxValueLength: 63}) + + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: rAction, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: false, + AllowedValues: actionAllowedValues}) + + ibmVPCRoutingTableRouteValidator := ResourceValidator{ResourceName: "ibm_is_vpc_routing_table_route", Schema: validateSchema} + return &ibmVPCRoutingTableRouteValidator +} + +func resourceIBMISVPCRoutingTableRouteCreate(d *schema.ResourceData, meta interface{}) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + + vpcID := d.Get(rtVpcID).(string) + tableID := d.Get(rtID).(string) + destination := d.Get(rDestination).(string) + zone := d.Get(rZone).(string) + z := &vpcv1.ZoneIdentityByName{ + Name: core.StringPtr(zone), + } + + createVpcRoutingTableRouteOptions := sess.NewCreateVPCRoutingTableRouteOptions(vpcID, tableID, destination, z) + createVpcRoutingTableRouteOptions.SetZone(z) + createVpcRoutingTableRouteOptions.SetDestination(destination) + + if add, ok := d.GetOk(rNextHop); ok { + item := add.(string) + if net.ParseIP(item) == nil { + nhConnectionID := &vpcv1.RouteNextHopPrototypeVPNGatewayConnectionIdentity{ + ID: core.StringPtr(item), + } + createVpcRoutingTableRouteOptions.SetNextHop(nhConnectionID) + } else { + nh := &vpcv1.RouteNextHopPrototypeRouteNextHopIP{ + Address: core.StringPtr(item), + } + createVpcRoutingTableRouteOptions.SetNextHop(nh) + } + } + + if action, ok := d.GetOk(rAction); ok { + routeAction := action.(string) + createVpcRoutingTableRouteOptions.SetAction(routeAction) + } + + if name, ok := d.GetOk(rName); ok { + routeName := name.(string) + createVpcRoutingTableRouteOptions.SetName(routeName) + } + + route, response, err := sess.CreateVPCRoutingTableRoute(createVpcRoutingTableRouteOptions) + if err != nil { + log.Printf("[DEBUG] Create VPC Routing table route err %s\n%s", err, response) + return err + } + + d.SetId(fmt.Sprintf("%s/%s/%s", vpcID, tableID, *route.ID)) + d.Set(rID, *route.ID) + return resourceIBMISVPCRoutingTableRouteRead(d, meta) +} + +func resourceIBMISVPCRoutingTableRouteRead(d *schema.ResourceData, meta interface{}) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + + idSet := strings.Split(d.Id(), "/") + getVpcRoutingTableRouteOptions := sess.NewGetVPCRoutingTableRouteOptions(idSet[0], idSet[1], idSet[2]) + route, response, err := sess.GetVPCRoutingTableRoute(getVpcRoutingTableRouteOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Getting VPC Routing table route: %s\n%s", err, response) + } + + d.Set(rID, *route.ID) + d.Set(rName, *route.Name) + d.Set(rDestination, *route.Destination) + if route.NextHop != nil { + nexthop := route.NextHop.(*vpcv1.RouteNextHop) + if nexthop.Address != nil { + d.Set(rNextHop, *nexthop.Address) + } + if nexthop.ID != nil { + d.Set(rNextHop, *nexthop.ID) + } + } + if route.Zone != nil { + d.Set(rZone, *route.Zone.Name) + } + d.Set(rtHref, route.Href) + d.Set(rtLifecycleState, route.LifecycleState) + d.Set(rtCreateAt, route.CreatedAt.String()) + + return nil +} + +func resourceIBMISVPCRoutingTableRouteUpdate(d *schema.ResourceData, meta interface{}) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + + idSet := strings.Split(d.Id(), "/") + if d.HasChange(rName) { + routePatch := make(map[string]interface{}) + updateVpcRoutingTableRouteOptions := sess.NewUpdateVPCRoutingTableRouteOptions(idSet[0], idSet[1], idSet[2], routePatch) + + // Construct an instance of the RoutePatch model + routePatchModel := new(vpcv1.RoutePatch) + name := d.Get(rName).(string) + routePatchModel.Name = &name + routePatchModelAsPatch, patchErr := routePatchModel.AsPatch() + + if patchErr != nil { + return fmt.Errorf("Error calling asPatch for VPC Routing Table Route Patch: %s", patchErr) + } + + updateVpcRoutingTableRouteOptions.RoutePatch = routePatchModelAsPatch + _, response, err := sess.UpdateVPCRoutingTableRoute(updateVpcRoutingTableRouteOptions) + if err != nil { + log.Printf("[DEBUG] Update VPC Routing table route err %s\n%s", err, response) + return err + } + } + + return resourceIBMISVPCRoutingTableRouteRead(d, meta) +} + +func resourceIBMISVPCRoutingTableRouteDelete(d *schema.ResourceData, meta interface{}) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + + idSet := strings.Split(d.Id(), "/") + deleteVpcRoutingTableRouteOptions := sess.NewDeleteVPCRoutingTableRouteOptions(idSet[0], idSet[1], idSet[2]) + response, err := sess.DeleteVPCRoutingTableRoute(deleteVpcRoutingTableRouteOptions) + if err != nil && response.StatusCode != 404 { + log.Printf("Error deleting VPC Routing table route : %s", response) + return err + } + + d.SetId("") + return nil +} + +func resourceIBMISVPCRoutingTableRouteExists(d *schema.ResourceData, meta interface{}) (bool, error) { + sess, err := vpcClient(meta) + if err != nil { + return false, err + } + + idSet := strings.Split(d.Id(), "/") + if len(idSet) != 3 { + return false, fmt.Errorf("Incorrect ID %s: ID should be a combination of vpcID/routingTableID/routeID", d.Id()) + } + getVpcRoutingTableRouteOptions := sess.NewGetVPCRoutingTableRouteOptions(idSet[0], idSet[1], idSet[2]) + _, response, err := sess.GetVPCRoutingTableRoute(getVpcRoutingTableRouteOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return false, nil + } + return false, fmt.Errorf("Error Getting VPC Routing table route : %s\n%s", err, response) + } + return true, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_vpn_gateway.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_vpn_gateway.go new file mode 100644 index 00000000000..e0afc5293df --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_vpn_gateway.go @@ -0,0 +1,883 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "os" + "time" + + "github.com/IBM/vpc-go-sdk/vpcclassicv1" + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + isVPNGatewayName = "name" + isVPNGatewayResourceGroup = "resource_group" + isVPNGatewayMode = "mode" + isVPNGatewayTags = "tags" + isVPNGatewaySubnet = "subnet" + isVPNGatewayStatus = "status" + isVPNGatewayDeleting = "deleting" + isVPNGatewayDeleted = "done" + isVPNGatewayProvisioning = "provisioning" + isVPNGatewayProvisioningDone = "done" + isVPNGatewayPublicIPAddress = "public_ip_address" + isVPNGatewayMembers = "members" + isVPNGatewayCreatedAt = "created_at" + isVPNGatewayPublicIPAddress2 = "public_ip_address2" + isVPNGatewayPrivateIPAddress = "private_ip_address" + isVPNGatewayPrivateIPAddress2 = "private_ip_address2" +) + +func resourceIBMISVPNGateway() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMISVPNGatewayCreate, + Read: resourceIBMISVPNGatewayRead, + Update: resourceIBMISVPNGatewayUpdate, + Delete: resourceIBMISVPNGatewayDelete, + Exists: resourceIBMISVPNGatewayExists, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + CustomizeDiff: customdiff.Sequence( + func(diff *schema.ResourceDiff, v interface{}) error { + return resourceTagsCustomizeDiff(diff) + }, + ), + + Schema: map[string]*schema.Schema{ + + isVPNGatewayName: { + Type: schema.TypeString, + Required: true, + ForceNew: false, + ValidateFunc: InvokeValidator("ibm_is_route", isVPNGatewayName), + Description: "VPN Gateway instance name", + }, + + isVPNGatewaySubnet: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "VPNGateway subnet info", + }, + + isVPNGatewayResourceGroup: { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Computed: true, + Description: "The resource group for this VPN gateway", + }, + + isVPNGatewayStatus: { + Type: schema.TypeString, + Computed: true, + Description: "The status of the VPN gateway", + }, + + isVPNGatewayPublicIPAddress: { + Type: schema.TypeString, + Computed: true, + Description: "The public IP address assigned to the VPN gateway member.", + }, + + isVPNGatewayPublicIPAddress2: { + Type: schema.TypeString, + Computed: true, + Description: "The second public IP address assigned to the VPN gateway member.", + }, + + isVPNGatewayPrivateIPAddress: { + Type: schema.TypeString, + Computed: true, + Description: "The Private IP address assigned to the VPN gateway member.", + }, + + isVPNGatewayPrivateIPAddress2: { + Type: schema.TypeString, + Computed: true, + Description: "The Second Private IP address assigned to the VPN gateway member.", + }, + + isVPNGatewayTags: { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString, ValidateFunc: InvokeValidator("ibm_is_vpn_gateway", "tag")}, + Set: resourceIBMVPCHash, + Description: "VPN Gateway tags list", + }, + + ResourceControllerURL: { + Type: schema.TypeString, + Computed: true, + Description: "The URL of the IBM Cloud dashboard that can be used to explore and view details about this instance", + }, + + ResourceName: { + Type: schema.TypeString, + Computed: true, + Description: "The name of the resource", + }, + + ResourceCRN: { + Type: schema.TypeString, + Computed: true, + Description: "The crn of the resource", + }, + + ResourceStatus: { + Type: schema.TypeString, + Computed: true, + Description: "The status of the resource", + }, + + ResourceGroupName: { + Type: schema.TypeString, + Computed: true, + Description: "The resource group name in which resource is provisioned", + }, + isVPNGatewayCreatedAt: { + Type: schema.TypeString, + Computed: true, + Description: "Created Time of the VPN Gateway", + }, + isVPNGatewayMode: { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "route", + ValidateFunc: InvokeValidator("ibm_is_vpn_gateway", isVPNGatewayMode), + Description: "mode in VPN gateway(route/policy)", + }, + + isVPNGatewayMembers: { + Type: schema.TypeList, + Computed: true, + Description: "Collection of VPN gateway members", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "address": { + Type: schema.TypeString, + Computed: true, + Description: "The public IP address assigned to the VPN gateway member", + }, + + "private_address": { + Type: schema.TypeString, + Computed: true, + Description: "The private IP address assigned to the VPN gateway member", + }, + + "role": { + Type: schema.TypeString, + Computed: true, + Description: "The high availability role assigned to the VPN gateway member", + }, + + "status": { + Type: schema.TypeString, + Computed: true, + Description: "The status of the VPN gateway member", + }, + }, + }, + }, + }, + } +} + +func resourceIBMISVPNGatewayValidator() *ResourceValidator { + + modeCheckTypes := "route,policy" + validateSchema := make([]ValidateSchema, 2) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isVPNGatewayName, + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Required: true, + Regexp: `^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$`, + MinValueLength: 1, + MaxValueLength: 63}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isVPNGatewayMode, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: false, + AllowedValues: modeCheckTypes}) + + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: "tag", + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Optional: true, + Regexp: `^[A-Za-z0-9:_ .-]+$`, + MinValueLength: 1, + MaxValueLength: 128}) + + ibmISVPNGatewayResourceValidator := ResourceValidator{ResourceName: "ibm_is_vpn_gateway", Schema: validateSchema} + return &ibmISVPNGatewayResourceValidator +} + +func resourceIBMISVPNGatewayCreate(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + log.Printf("[DEBUG] VPNGateway create") + name := d.Get(isVPNGatewayName).(string) + subnetID := d.Get(isVPNGatewaySubnet).(string) + mode := d.Get(isVPNGatewayMode).(string) + + if userDetails.generation == 1 { + err := classicVpngwCreate(d, meta, name, subnetID) + if err != nil { + return err + } + } else { + err := vpngwCreate(d, meta, name, subnetID, mode) + if err != nil { + return err + } + } + return resourceIBMISVPNGatewayRead(d, meta) +} + +func classicVpngwCreate(d *schema.ResourceData, meta interface{}, name, subnetID string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + vpnGatewayPrototype := &vpcclassicv1.VPNGatewayPrototype{ + Subnet: &vpcclassicv1.SubnetIdentity{ + ID: &subnetID, + }, + Name: &name, + } + options := &vpcclassicv1.CreateVPNGatewayOptions{ + VPNGatewayPrototype: vpnGatewayPrototype, + } + + if rgrp, ok := d.GetOk(isVPNGatewayResourceGroup); ok { + rg := rgrp.(string) + vpnGatewayPrototype.ResourceGroup = &vpcclassicv1.ResourceGroupIdentity{ + ID: &rg, + } + } + + vpnGatewayIntf, response, err := sess.CreateVPNGateway(options) + if err != nil { + return fmt.Errorf("[DEBUG] Create vpc VPN Gateway %s\n%s", err, response) + } + vpnGateway := vpnGatewayIntf.(*vpcclassicv1.VPNGateway) + _, err = isWaitForClassicVpnGatewayAvailable(sess, *vpnGateway.ID, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return err + } + + d.SetId(*vpnGateway.ID) + log.Printf("[INFO] VPNGateway : %s", *vpnGateway.ID) + + v := os.Getenv("IC_ENV_TAGS") + if _, ok := d.GetOk(isVPNGatewayTags); ok || v != "" { + oldList, newList := d.GetChange(isVPNGatewayTags) + err = UpdateTagsUsingCRN(oldList, newList, meta, *vpnGateway.CRN) + if err != nil { + log.Printf( + "Error on create of resource vpc VPN Gateway (%s) tags: %s", d.Id(), err) + } + } + return nil +} + +func vpngwCreate(d *schema.ResourceData, meta interface{}, name, subnetID, mode string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + vpnGatewayPrototype := &vpcv1.VPNGatewayPrototype{ + Subnet: &vpcv1.SubnetIdentity{ + ID: &subnetID, + }, + Name: &name, + Mode: &mode, + } + options := &vpcv1.CreateVPNGatewayOptions{ + VPNGatewayPrototype: vpnGatewayPrototype, + } + + if rgrp, ok := d.GetOk(isVPNGatewayResourceGroup); ok { + rg := rgrp.(string) + vpnGatewayPrototype.ResourceGroup = &vpcv1.ResourceGroupIdentity{ + ID: &rg, + } + } + + vpnGatewayIntf, response, err := sess.CreateVPNGateway(options) + if err != nil { + return fmt.Errorf("[DEBUG] Create vpc VPN Gateway %s\n%s", err, response) + } + vpnGateway := vpnGatewayIntf.(*vpcv1.VPNGateway) + + _, err = isWaitForVpnGatewayAvailable(sess, *vpnGateway.ID, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return err + } + + d.SetId(*vpnGateway.ID) + log.Printf("[INFO] VPNGateway : %s", *vpnGateway.ID) + + v := os.Getenv("IC_ENV_TAGS") + if _, ok := d.GetOk(isVPNGatewayTags); ok || v != "" { + oldList, newList := d.GetChange(isVPNGatewayTags) + err = UpdateTagsUsingCRN(oldList, newList, meta, *vpnGateway.CRN) + if err != nil { + log.Printf( + "Error on create of resource vpc VPN Gateway (%s) tags: %s", d.Id(), err) + } + } + return nil +} + +func isWaitForClassicVpnGatewayAvailable(vpnGateway *vpcclassicv1.VpcClassicV1, id string, timeout time.Duration) (interface{}, error) { + log.Printf("Waiting for vpn gateway (%s) to be available.", id) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", isVPNGatewayProvisioning}, + Target: []string{isVPNGatewayProvisioningDone, ""}, + Refresh: isClassicVpnGatewayRefreshFunc(vpnGateway, id), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isClassicVpnGatewayRefreshFunc(vpnGateway *vpcclassicv1.VpcClassicV1, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + getVpnGatewayOptions := &vpcclassicv1.GetVPNGatewayOptions{ + ID: &id, + } + vpnGatewayIntf, response, err := vpnGateway.GetVPNGateway(getVpnGatewayOptions) + if err != nil { + return nil, "", fmt.Errorf("Error Getting Vpn Gateway: %s\n%s", err, response) + } + vpnGateway := vpnGatewayIntf.(*vpcclassicv1.VPNGateway) + + if *vpnGateway.Status == "available" || *vpnGateway.Status == "failed" || *vpnGateway.Status == "running" { + return vpnGateway, isVPNGatewayProvisioningDone, nil + } + + return vpnGateway, isVPNGatewayProvisioning, nil + } +} + +func isWaitForVpnGatewayAvailable(vpnGateway *vpcv1.VpcV1, id string, timeout time.Duration) (interface{}, error) { + log.Printf("Waiting for vpn gateway (%s) to be available.", id) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", isVPNGatewayProvisioning}, + Target: []string{isVPNGatewayProvisioningDone, ""}, + Refresh: isVpnGatewayRefreshFunc(vpnGateway, id), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isVpnGatewayRefreshFunc(vpnGateway *vpcv1.VpcV1, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + getVpnGatewayOptions := &vpcv1.GetVPNGatewayOptions{ + ID: &id, + } + vpnGatewayIntf, response, err := vpnGateway.GetVPNGateway(getVpnGatewayOptions) + if err != nil { + return nil, "", fmt.Errorf("Error Getting Vpn Gateway: %s\n%s", err, response) + } + vpnGateway := vpnGatewayIntf.(*vpcv1.VPNGateway) + + if *vpnGateway.Status == "available" || *vpnGateway.Status == "failed" || *vpnGateway.Status == "running" { + return vpnGateway, isVPNGatewayProvisioningDone, nil + } + + return vpnGateway, isVPNGatewayProvisioning, nil + } +} + +func resourceIBMISVPNGatewayRead(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + + id := d.Id() + if userDetails.generation == 1 { + err := classicVpngwGet(d, meta, id) + if err != nil { + return err + } + } else { + err := vpngwGet(d, meta, id) + if err != nil { + return err + } + } + return nil +} + +func classicVpngwGet(d *schema.ResourceData, meta interface{}, id string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + getVpnGatewayOptions := &vpcclassicv1.GetVPNGatewayOptions{ + ID: &id, + } + vpnGatewayIntf, response, err := sess.GetVPNGateway(getVpnGatewayOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Getting Vpn Gateway (%s): %s\n%s", id, err, response) + } + vpnGateway := vpnGatewayIntf.(*vpcclassicv1.VPNGateway) + d.Set(isVPNGatewayName, *vpnGateway.Name) + d.Set(isVPNGatewaySubnet, *vpnGateway.Subnet.ID) + d.Set(isVPNGatewayStatus, *vpnGateway.Status) + members := []vpcclassicv1.VPNGatewayMember{} + for _, member := range vpnGateway.Members { + members = append(members, member) + } + if len(members) > 0 { + d.Set(isVPNGatewayPublicIPAddress, *members[0].PublicIP.Address) + } + if len(members) > 1 { + d.Set(isVPNGatewayPublicIPAddress2, *members[1].PublicIP.Address) + } + tags, err := GetTagsUsingCRN(meta, *vpnGateway.CRN) + if err != nil { + log.Printf( + "Error on get of resource vpc VPN Gateway (%s) tags: %s", d.Id(), err) + } + d.Set(isVPNGatewayTags, tags) + controller, err := getBaseController(meta) + if err != nil { + return err + } + d.Set(ResourceControllerURL, controller+"/vpc/network/vpngateways") + d.Set(ResourceName, *vpnGateway.Name) + d.Set(ResourceCRN, *vpnGateway.CRN) + d.Set(ResourceStatus, *vpnGateway.Status) + if vpnGateway.ResourceGroup != nil { + d.Set(ResourceGroupName, *vpnGateway.ResourceGroup.ID) + d.Set(isVPNGatewayResourceGroup, *vpnGateway.ResourceGroup.ID) + } + return nil +} + +func vpngwGet(d *schema.ResourceData, meta interface{}, id string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + getVpnGatewayOptions := &vpcv1.GetVPNGatewayOptions{ + ID: &id, + } + vpnGatewayIntf, response, err := sess.GetVPNGateway(getVpnGatewayOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Getting Vpn Gateway (%s): %s\n%s", id, err, response) + } + vpnGateway := vpnGatewayIntf.(*vpcv1.VPNGateway) + + d.Set(isVPNGatewayName, *vpnGateway.Name) + d.Set(isVPNGatewaySubnet, *vpnGateway.Subnet.ID) + d.Set(isVPNGatewayStatus, *vpnGateway.Status) + members := []vpcv1.VPNGatewayMember{} + for _, member := range vpnGateway.Members { + members = append(members, member) + } + if len(members) > 0 { + d.Set(isVPNGatewayPublicIPAddress, *members[0].PublicIP.Address) + if members[0].PrivateIP != nil && members[0].PrivateIP.Address != nil { + d.Set(isVPNGatewayPrivateIPAddress, *members[0].PrivateIP.Address) + } + } + if len(members) > 1 { + d.Set(isVPNGatewayPublicIPAddress2, *members[1].PublicIP.Address) + if members[1].PrivateIP != nil && members[1].PrivateIP.Address != nil { + d.Set(isVPNGatewayPrivateIPAddress2, *members[1].PrivateIP.Address) + } + + } + tags, err := GetTagsUsingCRN(meta, *vpnGateway.CRN) + if err != nil { + log.Printf( + "Error on get of resource vpc VPN Gateway (%s) tags: %s", d.Id(), err) + } + d.Set(isVPNGatewayTags, tags) + controller, err := getBaseController(meta) + if err != nil { + return err + } + d.Set(ResourceControllerURL, controller+"/vpc/network/vpngateways") + d.Set(ResourceName, *vpnGateway.Name) + d.Set(ResourceCRN, *vpnGateway.CRN) + d.Set(ResourceStatus, *vpnGateway.Status) + if vpnGateway.ResourceGroup != nil { + d.Set(ResourceGroupName, *vpnGateway.ResourceGroup.Name) + d.Set(isVPNGatewayResourceGroup, *vpnGateway.ResourceGroup.ID) + } + d.Set(isVPNGatewayMode, *vpnGateway.Mode) + if vpnGateway.Members != nil { + vpcMembersIpsList := make([]map[string]interface{}, 0) + for _, memberIP := range vpnGateway.Members { + currentMemberIP := map[string]interface{}{} + if memberIP.PublicIP != nil { + currentMemberIP["address"] = *memberIP.PublicIP.Address + currentMemberIP["role"] = *memberIP.Role + currentMemberIP["status"] = *memberIP.Status + vpcMembersIpsList = append(vpcMembersIpsList, currentMemberIP) + } + if memberIP.PrivateIP != nil { + currentMemberIP["private_address"] = *memberIP.PrivateIP.Address + } + } + d.Set(isVPNGatewayMembers, vpcMembersIpsList) + } + if vpnGateway.CreatedAt != nil { + d.Set(isVPNGatewayCreatedAt, (vpnGateway.CreatedAt).String()) + } + return nil +} + +func resourceIBMISVPNGatewayUpdate(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + + id := d.Id() + name := "" + hasChanged := false + + if d.HasChange(isVPNGatewayName) { + name = d.Get(isVPNGatewayName).(string) + hasChanged = true + } + + if userDetails.generation == 1 { + err := classicVpngwUpdate(d, meta, id, name, hasChanged) + if err != nil { + return err + } + } else { + err := vpngwUpdate(d, meta, id, name, hasChanged) + if err != nil { + return err + } + } + return resourceIBMISVPNGatewayRead(d, meta) +} + +func classicVpngwUpdate(d *schema.ResourceData, meta interface{}, id, name string, hasChanged bool) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + if d.HasChange(isVPNGatewayTags) { + getVpnGatewayOptions := &vpcclassicv1.GetVPNGatewayOptions{ + ID: &id, + } + vpnGatewayIntf, response, err := sess.GetVPNGateway(getVpnGatewayOptions) + if err != nil { + return fmt.Errorf("Error getting Volume : %s\n%s", err, response) + } + vpnGateway := vpnGatewayIntf.(*vpcclassicv1.VPNGateway) + + oldList, newList := d.GetChange(isVPNGatewayTags) + err = UpdateTagsUsingCRN(oldList, newList, meta, *vpnGateway.CRN) + if err != nil { + log.Printf( + "Error on update of resource vpc Vpn Gateway (%s) tags: %s", id, err) + } + } + if hasChanged { + options := &vpcclassicv1.UpdateVPNGatewayOptions{ + ID: &id, + } + vpnGatewayPatchModel := &vpcclassicv1.VPNGatewayPatch{ + Name: &name, + } + vpnGatewayPatch, err := vpnGatewayPatchModel.AsPatch() + if err != nil { + return fmt.Errorf("Error calling asPatch for VPNGatewayPatch: %s", err) + } + options.VPNGatewayPatch = vpnGatewayPatch + _, response, err := sess.UpdateVPNGateway(options) + if err != nil { + return fmt.Errorf("Error updating vpc Vpn Gateway: %s\n%s", err, response) + } + } + return nil +} + +func vpngwUpdate(d *schema.ResourceData, meta interface{}, id, name string, hasChanged bool) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + if d.HasChange(isVPNGatewayTags) { + getVpnGatewayOptions := &vpcv1.GetVPNGatewayOptions{ + ID: &id, + } + vpnGatewayIntf, response, err := sess.GetVPNGateway(getVpnGatewayOptions) + if err != nil { + return fmt.Errorf("Error getting Volume : %s\n%s", err, response) + } + vpnGateway := vpnGatewayIntf.(*vpcv1.VPNGateway) + + oldList, newList := d.GetChange(isVPNGatewayTags) + err = UpdateTagsUsingCRN(oldList, newList, meta, *vpnGateway.CRN) + if err != nil { + log.Printf( + "Error on update of resource vpc Vpn Gateway (%s) tags: %s", id, err) + } + } + if hasChanged { + options := &vpcv1.UpdateVPNGatewayOptions{ + ID: &id, + } + vpnGatewayPatchModel := &vpcv1.VPNGatewayPatch{ + Name: &name, + } + vpnGatewayPatch, err := vpnGatewayPatchModel.AsPatch() + if err != nil { + return fmt.Errorf("Error calling asPatch for VPNGatewayPatch: %s", err) + } + options.VPNGatewayPatch = vpnGatewayPatch + _, response, err := sess.UpdateVPNGateway(options) + if err != nil { + return fmt.Errorf("Error updating vpc Vpn Gateway: %s\n%s", err, response) + } + } + return nil +} + +func resourceIBMISVPNGatewayDelete(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + id := d.Id() + if userDetails.generation == 1 { + err := classicVpngwDelete(d, meta, id) + if err != nil { + return err + } + } else { + err := vpngwDelete(d, meta, id) + if err != nil { + return err + } + } + return nil +} + +func classicVpngwDelete(d *schema.ResourceData, meta interface{}, id string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + + getVpnGatewayOptions := &vpcclassicv1.GetVPNGatewayOptions{ + ID: &id, + } + _, response, err := sess.GetVPNGateway(getVpnGatewayOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return nil + } + return fmt.Errorf("Error Getting Vpn Gateway (%s): %s\n%s", id, err, response) + } + + options := &vpcclassicv1.DeleteVPNGatewayOptions{ + ID: &id, + } + response, err = sess.DeleteVPNGateway(options) + if err != nil { + return fmt.Errorf("Error Deleting Vpn Gateway : %s\n%s", err, response) + } + _, err = isWaitForClassicVpnGatewayDeleted(sess, id, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return err + } + d.SetId("") + return nil +} + +func vpngwDelete(d *schema.ResourceData, meta interface{}, id string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + + getVpnGatewayOptions := &vpcv1.GetVPNGatewayOptions{ + ID: &id, + } + _, response, err := sess.GetVPNGateway(getVpnGatewayOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return nil + } + return fmt.Errorf("Error Getting Vpn Gateway (%s): %s\n%s", id, err, response) + } + + options := &vpcv1.DeleteVPNGatewayOptions{ + ID: &id, + } + response, err = sess.DeleteVPNGateway(options) + if err != nil { + return fmt.Errorf("Error Deleting Vpn Gateway : %s\n%s", err, response) + } + _, err = isWaitForVpnGatewayDeleted(sess, id, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return err + } + d.SetId("") + return nil +} + +func isWaitForClassicVpnGatewayDeleted(vpnGateway *vpcclassicv1.VpcClassicV1, id string, timeout time.Duration) (interface{}, error) { + log.Printf("Waiting for VPNGateway (%s) to be deleted.", id) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", isVPNGatewayDeleting}, + Target: []string{isVPNGatewayDeleted, ""}, + Refresh: isClassicVpnGatewayDeleteRefreshFunc(vpnGateway, id), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isClassicVpnGatewayDeleteRefreshFunc(vpnGateway *vpcclassicv1.VpcClassicV1, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + getVpnGatewayOptions := &vpcclassicv1.GetVPNGatewayOptions{ + ID: &id, + } + vpngw, response, err := vpnGateway.GetVPNGateway(getVpnGatewayOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return "", isVPNGatewayDeleted, nil + } + return "", "", fmt.Errorf("Error Getting Vpn Gateway: %s\n%s", err, response) + } + return vpngw, isVPNGatewayDeleting, err + } +} + +func isWaitForVpnGatewayDeleted(vpnGateway *vpcv1.VpcV1, id string, timeout time.Duration) (interface{}, error) { + log.Printf("Waiting for VPNGateway (%s) to be deleted.", id) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", isVPNGatewayDeleting}, + Target: []string{isVPNGatewayDeleted, ""}, + Refresh: isVpnGatewayDeleteRefreshFunc(vpnGateway, id), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isVpnGatewayDeleteRefreshFunc(vpnGateway *vpcv1.VpcV1, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + getVpnGatewayOptions := &vpcv1.GetVPNGatewayOptions{ + ID: &id, + } + vpngw, response, err := vpnGateway.GetVPNGateway(getVpnGatewayOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return "", isVPNGatewayDeleted, nil + } + return "", "", fmt.Errorf("Error Getting Vpn Gateway: %s\n%s", err, response) + } + return vpngw, isVPNGatewayDeleting, err + } +} + +func resourceIBMISVPNGatewayExists(d *schema.ResourceData, meta interface{}) (bool, error) { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return false, err + } + id := d.Id() + if userDetails.generation == 1 { + exists, err := classicVpngwExists(d, meta, id) + return exists, err + } else { + exists, err := vpngwExists(d, meta, id) + return exists, err + } +} + +func classicVpngwExists(d *schema.ResourceData, meta interface{}, id string) (bool, error) { + sess, err := classicVpcClient(meta) + if err != nil { + return false, err + } + getVpnGatewayOptions := &vpcclassicv1.GetVPNGatewayOptions{ + ID: &id, + } + _, response, err := sess.GetVPNGateway(getVpnGatewayOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error getting Vpn Gatewa: %s\n%s", err, response) + } + return true, nil +} + +func vpngwExists(d *schema.ResourceData, meta interface{}, id string) (bool, error) { + sess, err := vpcClient(meta) + if err != nil { + return false, err + } + getVpnGatewayOptions := &vpcv1.GetVPNGatewayOptions{ + ID: &id, + } + _, response, err := sess.GetVPNGateway(getVpnGatewayOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error getting Vpn Gatewa: %s\n%s", err, response) + } + return true, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_vpn_gateway_connections.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_vpn_gateway_connections.go new file mode 100644 index 00000000000..072b0f61afa --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_vpn_gateway_connections.go @@ -0,0 +1,992 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "time" + + "github.com/IBM/vpc-go-sdk/vpcclassicv1" + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + isVPNGatewayConnectionAdminStateup = "admin_state_up" + isVPNGatewayConnectionAdminAuthenticationmode = "authentication_mode" + isVPNGatewayConnectionName = "name" + isVPNGatewayConnectionVPNGateway = "vpn_gateway" + isVPNGatewayConnection = "gateway_connection" + isVPNGatewayConnectionPeerAddress = "peer_address" + isVPNGatewayConnectionPreSharedKey = "preshared_key" + isVPNGatewayConnectionLocalCIDRS = "local_cidrs" + isVPNGatewayConnectionPeerCIDRS = "peer_cidrs" + isVPNGatewayConnectionIKEPolicy = "ike_policy" + isVPNGatewayConnectionIPSECPolicy = "ipsec_policy" + isVPNGatewayConnectionDeadPeerDetectionAction = "action" + isVPNGatewayConnectionDeadPeerDetectionInterval = "interval" + isVPNGatewayConnectionDeadPeerDetectionTimeout = "timeout" + isVPNGatewayConnectionStatus = "status" + isVPNGatewayConnectionDeleting = "deleting" + isVPNGatewayConnectionDeleted = "done" + isVPNGatewayConnectionProvisioning = "provisioning" + isVPNGatewayConnectionProvisioningDone = "done" + isVPNGatewayConnectionMode = "mode" + isVPNGatewayConnectionTunnels = "tunnels" + isVPNGatewayConnectionResourcetype = "resource_type" + isVPNGatewayConnectionCreatedat = "created_at" +) + +func resourceIBMISVPNGatewayConnection() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMISVPNGatewayConnectionCreate, + Read: resourceIBMISVPNGatewayConnectionRead, + Update: resourceIBMISVPNGatewayConnectionUpdate, + Delete: resourceIBMISVPNGatewayConnectionDelete, + Exists: resourceIBMISVPNGatewayConnectionExists, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + + isVPNGatewayConnectionName: { + Type: schema.TypeString, + Required: true, + ValidateFunc: InvokeValidator("ibm_is_vpn_gateway_connection", isVPNGatewayConnectionName), + Description: "VPN Gateway connection name", + }, + + isVPNGatewayConnectionVPNGateway: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "VPN Gateway info", + }, + + isVPNGatewayConnectionPeerAddress: { + Type: schema.TypeString, + Required: true, + Description: "VPN gateway connection peer address", + }, + + isVPNGatewayConnectionPreSharedKey: { + Type: schema.TypeString, + Required: true, + Description: "vpn gateway", + }, + + isVPNGatewayConnectionAdminStateup: { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "VPN gateway connection admin state", + }, + + isVPNGatewayConnectionLocalCIDRS: { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: "VPN gateway connection local CIDRs", + }, + + isVPNGatewayConnectionPeerCIDRS: { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: "VPN gateway connection peer CIDRs", + }, + + isVPNGatewayConnectionDeadPeerDetectionAction: { + Type: schema.TypeString, + Optional: true, + Default: "restart", + ValidateFunc: InvokeValidator("ibm_is_vpn_gateway_connection", isVPNGatewayConnectionDeadPeerDetectionAction), + Description: "Action detection for dead peer detection action", + }, + isVPNGatewayConnectionDeadPeerDetectionInterval: { + Type: schema.TypeInt, + Optional: true, + Default: 2, + ValidateFunc: InvokeValidator("ibm_is_vpn_gateway_connection", isVPNGatewayConnectionDeadPeerDetectionInterval), + Description: "Interval for dead peer detection interval", + }, + isVPNGatewayConnectionDeadPeerDetectionTimeout: { + Type: schema.TypeInt, + Optional: true, + Default: 10, + ValidateFunc: InvokeValidator("ibm_is_vpn_gateway_connection", isVPNGatewayConnectionDeadPeerDetectionTimeout), + Description: "Timeout for dead peer detection", + }, + + isVPNGatewayConnectionIPSECPolicy: { + Type: schema.TypeString, + Optional: true, + Description: "IP security policy for vpn gateway connection", + }, + + isVPNGatewayConnectionIKEPolicy: { + Type: schema.TypeString, + Optional: true, + Description: "VPN gateway connection IKE Policy", + }, + + isVPNGatewayConnection: { + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this VPN gateway connection", + }, + + isVPNGatewayConnectionStatus: { + Type: schema.TypeString, + Computed: true, + Description: "VPN gateway connection status", + }, + + RelatedCRN: { + Type: schema.TypeString, + Computed: true, + Description: "The crn of the VPN Gateway resource", + }, + + isVPNGatewayConnectionAdminAuthenticationmode: { + Type: schema.TypeString, + Computed: true, + Description: "The authentication mode", + }, + + isVPNGatewayConnectionResourcetype: { + Type: schema.TypeString, + Computed: true, + Description: "The resource type", + }, + + isVPNGatewayConnectionCreatedat: { + Type: schema.TypeString, + Computed: true, + Description: "The date and time that this VPN gateway connection was created", + }, + + isVPNGatewayConnectionMode: { + Type: schema.TypeString, + Computed: true, + Description: "The mode of the VPN gateway", + }, + + isVPNGatewayConnectionTunnels: { + Type: schema.TypeList, + Computed: true, + Description: "The VPN tunnel configuration for this VPN gateway connection (in static route mode)", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "address": { + Type: schema.TypeString, + Computed: true, + Description: "The IP address of the VPN gateway member in which the tunnel resides", + }, + + "status": { + Type: schema.TypeString, + Computed: true, + Description: "The status of the VPN Tunnel", + }, + }, + }, + }, + }, + } +} + +func resourceIBMISVPNGatewayConnectionValidator() *ResourceValidator { + validateSchema := make([]ValidateSchema, 1) + action := "restart, clear, hold, none" + + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isVPNGatewayConnectionName, + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Required: true, + Regexp: `^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$`, + MinValueLength: 1, + MaxValueLength: 63}) + + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isVPNGatewayConnectionDeadPeerDetectionAction, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Optional: true, + AllowedValues: action}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isVPNGatewayConnectionDeadPeerDetectionInterval, + ValidateFunctionIdentifier: IntBetween, + Type: TypeInt, + MinValue: "1", + MaxValue: "86399"}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: isVPNGatewayConnectionDeadPeerDetectionTimeout, + ValidateFunctionIdentifier: IntBetween, + Type: TypeInt, + MinValue: "2", + MaxValue: "86399"}) + + ibmISVPNGatewayConnectionResourceValidator := ResourceValidator{ResourceName: "ibm_is_vpn_gateway_connection", Schema: validateSchema} + return &ibmISVPNGatewayConnectionResourceValidator +} + +func resourceIBMISVPNGatewayConnectionCreate(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + + log.Printf("[DEBUG] VPNGatewayConnection create") + name := d.Get(isVPNGatewayConnectionName).(string) + gatewayID := d.Get(isVPNGatewayConnectionVPNGateway).(string) + peerAddress := d.Get(isVPNGatewayConnectionPeerAddress).(string) + prephasedKey := d.Get(isVPNGatewayConnectionPreSharedKey).(string) + + stateUp := false + if _, ok := d.GetOk(isVPNGatewayConnectionAdminStateup); ok { + stateUp = d.Get(isVPNGatewayConnectionAdminStateup).(bool) + } + var interval, timeout int64 + if intvl, ok := d.GetOk(isVPNGatewayConnectionDeadPeerDetectionInterval); ok { + interval = int64(intvl.(int)) + } else { + interval = 30 + } + + if tout, ok := d.GetOk(isVPNGatewayConnectionDeadPeerDetectionTimeout); ok { + timeout = int64(tout.(int)) + } else { + timeout = 120 + } + var action string + if act, ok := d.GetOk(isVPNGatewayConnectionDeadPeerDetectionAction); ok { + action = act.(string) + } else { + action = "none" + } + + if userDetails.generation == 1 { + err := classicVpngwconCreate(d, meta, name, gatewayID, peerAddress, prephasedKey, action, interval, timeout, stateUp) + if err != nil { + return err + } + } else { + err := vpngwconCreate(d, meta, name, gatewayID, peerAddress, prephasedKey, action, interval, timeout, stateUp) + if err != nil { + return err + } + } + return resourceIBMISVPNGatewayConnectionRead(d, meta) +} + +func classicVpngwconCreate(d *schema.ResourceData, meta interface{}, name, gatewayID, peerAddress, prephasedKey, action string, interval, timeout int64, stateUp bool) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + vpnGatewayConnectionPrototypeModel := &vpcclassicv1.VPNGatewayConnectionPrototype{ + PeerAddress: &peerAddress, + Psk: &prephasedKey, + AdminStateUp: &stateUp, + DeadPeerDetection: &vpcclassicv1.VPNGatewayConnectionDpdPrototype{ + Action: &action, + Interval: &interval, + Timeout: &timeout, + }, + Name: &name, + } + options := &vpcclassicv1.CreateVPNGatewayConnectionOptions{ + VPNGatewayID: &gatewayID, + VPNGatewayConnectionPrototype: vpnGatewayConnectionPrototypeModel, + } + + if _, ok := d.GetOk(isVPNGatewayConnectionLocalCIDRS); ok { + localCidrs := expandStringList((d.Get(isVPNGatewayConnectionLocalCIDRS).(*schema.Set)).List()) + vpnGatewayConnectionPrototypeModel.LocalCIDRs = localCidrs + } + if _, ok := d.GetOk(isVPNGatewayConnectionPeerCIDRS); ok { + peerCidrs := expandStringList((d.Get(isVPNGatewayConnectionPeerCIDRS).(*schema.Set)).List()) + vpnGatewayConnectionPrototypeModel.PeerCIDRs = peerCidrs + } + + var ikePolicyIdentity, ipsecPolicyIdentity string + + if ikePolicy, ok := d.GetOk(isVPNGatewayConnectionIKEPolicy); ok { + ikePolicyIdentity = ikePolicy.(string) + vpnGatewayConnectionPrototypeModel.IkePolicy = &vpcclassicv1.IkePolicyIdentity{ + ID: &ikePolicyIdentity, + } + } + if ipsecPolicy, ok := d.GetOk(isVPNGatewayConnectionIPSECPolicy); ok { + ipsecPolicyIdentity = ipsecPolicy.(string) + vpnGatewayConnectionPrototypeModel.IpsecPolicy = &vpcclassicv1.IPsecPolicyIdentity{ + ID: &ipsecPolicyIdentity, + } + } + + vpnGatewayConnectionIntf, response, err := sess.CreateVPNGatewayConnection(options) + if err != nil { + return fmt.Errorf("[DEBUG] Create VPN Gateway Connection err %s\n%s", err, response) + } + vpnGatewayConnection := vpnGatewayConnectionIntf.(*vpcclassicv1.VPNGatewayConnection) + d.SetId(fmt.Sprintf("%s/%s", gatewayID, *vpnGatewayConnection.ID)) + log.Printf("[INFO] VPNGatewayConnection : %s/%s", gatewayID, *vpnGatewayConnection.ID) + return nil +} + +func vpngwconCreate(d *schema.ResourceData, meta interface{}, name, gatewayID, peerAddress, prephasedKey, action string, interval, timeout int64, stateUp bool) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + + vpnGatewayConnectionPrototypeModel := &vpcv1.VPNGatewayConnectionPrototype{ + PeerAddress: &peerAddress, + Psk: &prephasedKey, + AdminStateUp: &stateUp, + DeadPeerDetection: &vpcv1.VPNGatewayConnectionDpdPrototype{ + Action: &action, + Interval: &interval, + Timeout: &timeout, + }, + Name: &name, + } + options := &vpcv1.CreateVPNGatewayConnectionOptions{ + VPNGatewayID: &gatewayID, + VPNGatewayConnectionPrototype: vpnGatewayConnectionPrototypeModel, + } + + if _, ok := d.GetOk(isVPNGatewayConnectionLocalCIDRS); ok { + localCidrs := expandStringList((d.Get(isVPNGatewayConnectionLocalCIDRS).(*schema.Set)).List()) + vpnGatewayConnectionPrototypeModel.LocalCIDRs = localCidrs + } + if _, ok := d.GetOk(isVPNGatewayConnectionPeerCIDRS); ok { + peerCidrs := expandStringList((d.Get(isVPNGatewayConnectionPeerCIDRS).(*schema.Set)).List()) + vpnGatewayConnectionPrototypeModel.PeerCIDRs = peerCidrs + } + + var ikePolicyIdentity, ipsecPolicyIdentity string + + if ikePolicy, ok := d.GetOk(isVPNGatewayConnectionIKEPolicy); ok { + ikePolicyIdentity = ikePolicy.(string) + vpnGatewayConnectionPrototypeModel.IkePolicy = &vpcv1.IkePolicyIdentity{ + ID: &ikePolicyIdentity, + } + } else { + vpnGatewayConnectionPrototypeModel.IkePolicy = nil + } + if ipsecPolicy, ok := d.GetOk(isVPNGatewayConnectionIPSECPolicy); ok { + ipsecPolicyIdentity = ipsecPolicy.(string) + vpnGatewayConnectionPrototypeModel.IpsecPolicy = &vpcv1.IPsecPolicyIdentity{ + ID: &ipsecPolicyIdentity, + } + } else { + vpnGatewayConnectionPrototypeModel.IpsecPolicy = nil + } + + vpnGatewayConnectionIntf, response, err := sess.CreateVPNGatewayConnection(options) + if err != nil { + return fmt.Errorf("[DEBUG] Create VPN Gateway Connection err %s\n%s", err, response) + } + vpnGatewayConnection := vpnGatewayConnectionIntf.(*vpcv1.VPNGatewayConnection) + d.SetId(fmt.Sprintf("%s/%s", gatewayID, *vpnGatewayConnection.ID)) + log.Printf("[INFO] VPNGatewayConnection : %s/%s", gatewayID, *vpnGatewayConnection.ID) + return nil +} + +func resourceIBMISVPNGatewayConnectionRead(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + + parts, err := idParts(d.Id()) + if err != nil { + return err + } + + gID := parts[0] + gConnID := parts[1] + + if userDetails.generation == 1 { + err := classicVpngwconGet(d, meta, gID, gConnID) + if err != nil { + return err + } + } else { + err := vpngwconGet(d, meta, gID, gConnID) + if err != nil { + return err + } + } + return nil +} + +func classicVpngwconGet(d *schema.ResourceData, meta interface{}, gID, gConnID string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + options := &vpcclassicv1.GetVPNGatewayConnectionOptions{ + VPNGatewayID: &gID, + ID: &gConnID, + } + vpnGatewayConnectionIntf, response, err := sess.GetVPNGatewayConnection(options) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Getting Vpn Gateway Connection (%s): %s\n%s", gConnID, err, response) + } + vpnGatewayConnection := vpnGatewayConnectionIntf.(*vpcclassicv1.VPNGatewayConnection) + d.Set(isVPNGatewayConnectionName, *vpnGatewayConnection.Name) + d.Set(isVPNGatewayConnectionVPNGateway, gID) + d.Set(isVPNGatewayConnectionAdminStateup, *vpnGatewayConnection.AdminStateUp) + d.Set(isVPNGatewayConnectionPeerAddress, *vpnGatewayConnection.PeerAddress) + d.Set(isVPNGatewayConnectionPreSharedKey, *vpnGatewayConnection.Psk) + d.Set(isVPNGatewayConnectionLocalCIDRS, flattenStringList(vpnGatewayConnection.LocalCIDRs)) + d.Set(isVPNGatewayConnectionPeerCIDRS, flattenStringList(vpnGatewayConnection.PeerCIDRs)) + if vpnGatewayConnection.IkePolicy != nil { + d.Set(isVPNGatewayConnectionIKEPolicy, *vpnGatewayConnection.IkePolicy.ID) + } + if vpnGatewayConnection.IpsecPolicy != nil { + d.Set(isVPNGatewayConnectionIPSECPolicy, *vpnGatewayConnection.IpsecPolicy.ID) + } + d.Set(isVPNGatewayConnectionDeadPeerDetectionAction, *vpnGatewayConnection.DeadPeerDetection.Action) + d.Set(isVPNGatewayConnectionDeadPeerDetectionInterval, *vpnGatewayConnection.DeadPeerDetection.Interval) + d.Set(isVPNGatewayConnectionDeadPeerDetectionTimeout, *vpnGatewayConnection.DeadPeerDetection.Timeout) + getVPNGatewayOptions := &vpcclassicv1.GetVPNGatewayOptions{ + ID: &gID, + } + vpngatewayIntf, response, err := sess.GetVPNGateway(getVPNGatewayOptions) + if err != nil { + return fmt.Errorf("Error Getting VPN Gateway : %s\n%s", err, response) + } + vpngateway := vpngatewayIntf.(*vpcclassicv1.VPNGateway) + + d.Set(RelatedCRN, *vpngateway.CRN) + return nil +} + +func vpngwconGet(d *schema.ResourceData, meta interface{}, gID, gConnID string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + options := &vpcv1.GetVPNGatewayConnectionOptions{ + VPNGatewayID: &gID, + ID: &gConnID, + } + vpnGatewayConnectionIntf, response, err := sess.GetVPNGatewayConnection(options) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Getting Vpn Gateway Connection (%s): %s\n%s", gConnID, err, response) + } + d.Set(isVPNGatewayConnection, gConnID) + vpnGatewayConnection := vpnGatewayConnectionIntf.(*vpcv1.VPNGatewayConnection) + d.Set(isVPNGatewayConnectionName, *vpnGatewayConnection.Name) + d.Set(isVPNGatewayConnectionVPNGateway, gID) + d.Set(isVPNGatewayConnectionAdminStateup, *vpnGatewayConnection.AdminStateUp) + d.Set(isVPNGatewayConnectionPeerAddress, *vpnGatewayConnection.PeerAddress) + d.Set(isVPNGatewayConnectionPreSharedKey, *vpnGatewayConnection.Psk) + + if vpnGatewayConnection.LocalCIDRs != nil { + d.Set(isVPNGatewayConnectionLocalCIDRS, flattenStringList(vpnGatewayConnection.LocalCIDRs)) + } + if vpnGatewayConnection.PeerCIDRs != nil { + d.Set(isVPNGatewayConnectionPeerCIDRS, flattenStringList(vpnGatewayConnection.PeerCIDRs)) + } + if vpnGatewayConnection.IkePolicy != nil { + d.Set(isVPNGatewayConnectionIKEPolicy, *vpnGatewayConnection.IkePolicy.ID) + } + if vpnGatewayConnection.IpsecPolicy != nil { + d.Set(isVPNGatewayConnectionIPSECPolicy, *vpnGatewayConnection.IpsecPolicy.ID) + } + if vpnGatewayConnection.AuthenticationMode != nil { + d.Set(isVPNGatewayConnectionAdminAuthenticationmode, *vpnGatewayConnection.AuthenticationMode) + } + if vpnGatewayConnection.Status != nil { + d.Set(isVPNGatewayConnectionStatus, *vpnGatewayConnection.Status) + } + if vpnGatewayConnection.ResourceType != nil { + d.Set(isVPNGatewayConnectionResourcetype, *vpnGatewayConnection.ResourceType) + } + if vpnGatewayConnection.CreatedAt != nil { + d.Set(isVPNGatewayConnectionCreatedat, vpnGatewayConnection.CreatedAt.String()) + } + + if vpnGatewayConnection.Mode != nil { + d.Set(isVPNGatewayConnectionMode, *vpnGatewayConnection.Mode) + } + vpcTunnelsList := make([]map[string]interface{}, 0) + if vpnGatewayConnection.Tunnels != nil { + for _, vpcTunnel := range vpnGatewayConnection.Tunnels { + currentTunnel := map[string]interface{}{} + if vpcTunnel.PublicIP != nil { + publicIP := *vpcTunnel.PublicIP + currentTunnel["address"] = *publicIP.Address + } + if vpcTunnel.Status != nil { + currentTunnel["status"] = *vpcTunnel.Status + } + vpcTunnelsList = append(vpcTunnelsList, currentTunnel) + } + } + d.Set(isVPNGatewayConnectionTunnels, vpcTunnelsList) + + d.Set(isVPNGatewayConnectionDeadPeerDetectionAction, *vpnGatewayConnection.DeadPeerDetection.Action) + d.Set(isVPNGatewayConnectionDeadPeerDetectionInterval, *vpnGatewayConnection.DeadPeerDetection.Interval) + d.Set(isVPNGatewayConnectionDeadPeerDetectionTimeout, *vpnGatewayConnection.DeadPeerDetection.Timeout) + getVPNGatewayOptions := &vpcv1.GetVPNGatewayOptions{ + ID: &gID, + } + vpngatewayIntf, response, err := sess.GetVPNGateway(getVPNGatewayOptions) + if err != nil { + return fmt.Errorf("Error Getting VPN Gateway : %s\n%s", err, response) + } + vpngateway := vpngatewayIntf.(*vpcv1.VPNGateway) + d.Set(RelatedCRN, *vpngateway.CRN) + return nil +} + +func resourceIBMISVPNGatewayConnectionUpdate(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + hasChanged := false + + parts, err := idParts(d.Id()) + if err != nil { + return err + } + + gID := parts[0] + gConnID := parts[1] + + if userDetails.generation == 1 { + err := classicVpngwconUpdate(d, meta, gID, gConnID, hasChanged) + if err != nil { + return err + } + } else { + err := vpngwconUpdate(d, meta, gID, gConnID, hasChanged) + if err != nil { + return err + } + } + return resourceIBMISVPNGatewayConnectionRead(d, meta) +} + +func classicVpngwconUpdate(d *schema.ResourceData, meta interface{}, gID, gConnID string, hasChanged bool) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + + updateVpnGatewayConnectionOptions := &vpcclassicv1.UpdateVPNGatewayConnectionOptions{ + VPNGatewayID: &gID, + ID: &gConnID, + } + vpnGatewayConnectionPatchModel := &vpcclassicv1.VPNGatewayConnectionPatch{} + if d.HasChange(isVPNGatewayConnectionName) { + name := d.Get(isVPNGatewayConnectionName).(string) + vpnGatewayConnectionPatchModel.Name = &name + hasChanged = true + } + + if d.HasChange(isVPNGatewayConnectionPeerAddress) { + peerAddress := d.Get(isVPNGatewayConnectionPeerAddress).(string) + vpnGatewayConnectionPatchModel.PeerAddress = &peerAddress + hasChanged = true + } + + if d.HasChange(isVPNGatewayConnectionPreSharedKey) { + psk := d.Get(isVPNGatewayConnectionPreSharedKey).(string) + vpnGatewayConnectionPatchModel.Psk = &psk + hasChanged = true + } + + if d.HasChange(isVPNGatewayConnectionDeadPeerDetectionAction) || d.HasChange(isVPNGatewayConnectionDeadPeerDetectionInterval) || d.HasChange(isVPNGatewayConnectionDeadPeerDetectionTimeout) { + action := d.Get(isVPNGatewayConnectionDeadPeerDetectionAction).(string) + interval := int64(d.Get(isVPNGatewayConnectionDeadPeerDetectionInterval).(int)) + timeout := int64(d.Get(isVPNGatewayConnectionDeadPeerDetectionTimeout).(int)) + vpnGatewayConnectionPatchModel.DeadPeerDetection.Action = &action + vpnGatewayConnectionPatchModel.DeadPeerDetection.Interval = &interval + vpnGatewayConnectionPatchModel.DeadPeerDetection.Timeout = &timeout + hasChanged = true + } + + if d.HasChange(isVPNGatewayConnectionIKEPolicy) { + ikePolicyIdentity := d.Get(isVPNGatewayConnectionIKEPolicy).(string) + vpnGatewayConnectionPatchModel.IkePolicy = &vpcclassicv1.IkePolicyIdentity{ + ID: &ikePolicyIdentity, + } + hasChanged = true + } else { + vpnGatewayConnectionPatchModel.IkePolicy = nil + } + + if d.HasChange(isVPNGatewayConnectionIPSECPolicy) { + ipsecPolicyIdentity := d.Get(isVPNGatewayConnectionIPSECPolicy).(string) + vpnGatewayConnectionPatchModel.IpsecPolicy = &vpcclassicv1.IPsecPolicyIdentity{ + ID: &ipsecPolicyIdentity, + } + hasChanged = true + } else { + vpnGatewayConnectionPatchModel.IpsecPolicy = nil + } + + if d.HasChange(isVPNGatewayConnectionAdminStateup) { + adminStateUp := d.Get(isVPNGatewayConnectionAdminStateup).(bool) + vpnGatewayConnectionPatchModel.AdminStateUp = &adminStateUp + hasChanged = true + } + + if hasChanged { + vpnGatewayConnectionPatch, err := vpnGatewayConnectionPatchModel.AsPatch() + if err != nil { + return fmt.Errorf("Error calling asPatch for VPNGatewayConnectionPatch: %s", err) + } + updateVpnGatewayConnectionOptions.VPNGatewayConnectionPatch = vpnGatewayConnectionPatch + _, response, err := sess.UpdateVPNGatewayConnection(updateVpnGatewayConnectionOptions) + if err != nil { + return fmt.Errorf("Error updating Vpn Gateway Connection: %s\n%s", err, response) + } + } + return nil +} + +func vpngwconUpdate(d *schema.ResourceData, meta interface{}, gID, gConnID string, hasChanged bool) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + + updateVpnGatewayConnectionOptions := &vpcv1.UpdateVPNGatewayConnectionOptions{ + VPNGatewayID: &gID, + ID: &gConnID, + } + vpnGatewayConnectionPatchModel := &vpcv1.VPNGatewayConnectionPatch{} + if d.HasChange(isVPNGatewayConnectionName) { + name := d.Get(isVPNGatewayConnectionName).(string) + vpnGatewayConnectionPatchModel.Name = &name + hasChanged = true + } + + if d.HasChange(isVPNGatewayConnectionPeerAddress) { + peerAddress := d.Get(isVPNGatewayConnectionPeerAddress).(string) + vpnGatewayConnectionPatchModel.PeerAddress = &peerAddress + hasChanged = true + } + + if d.HasChange(isVPNGatewayConnectionPreSharedKey) { + psk := d.Get(isVPNGatewayConnectionPreSharedKey).(string) + vpnGatewayConnectionPatchModel.Psk = &psk + hasChanged = true + } + + if d.HasChange(isVPNGatewayConnectionDeadPeerDetectionAction) || d.HasChange(isVPNGatewayConnectionDeadPeerDetectionInterval) || d.HasChange(isVPNGatewayConnectionDeadPeerDetectionTimeout) { + action := d.Get(isVPNGatewayConnectionDeadPeerDetectionAction).(string) + interval := int64(d.Get(isVPNGatewayConnectionDeadPeerDetectionInterval).(int)) + timeout := int64(d.Get(isVPNGatewayConnectionDeadPeerDetectionTimeout).(int)) + + // Construct an instance of the VPNGatewayConnectionDpdPrototype model + vpnGatewayConnectionDpdPrototypeModel := new(vpcv1.VPNGatewayConnectionDpdPrototype) + vpnGatewayConnectionDpdPrototypeModel.Action = &action + vpnGatewayConnectionDpdPrototypeModel.Interval = &interval + vpnGatewayConnectionDpdPrototypeModel.Timeout = &timeout + vpnGatewayConnectionPatchModel.DeadPeerDetection = vpnGatewayConnectionDpdPrototypeModel + hasChanged = true + } + + if d.HasChange(isVPNGatewayConnectionIKEPolicy) { + ikePolicyIdentity := d.Get(isVPNGatewayConnectionIKEPolicy).(string) + vpnGatewayConnectionPatchModel.IkePolicy = &vpcv1.IkePolicyIdentity{ + ID: &ikePolicyIdentity, + } + hasChanged = true + } else { + vpnGatewayConnectionPatchModel.IkePolicy = nil + } + + if d.HasChange(isVPNGatewayConnectionIPSECPolicy) { + ipsecPolicyIdentity := d.Get(isVPNGatewayConnectionIPSECPolicy).(string) + vpnGatewayConnectionPatchModel.IpsecPolicy = &vpcv1.IPsecPolicyIdentity{ + ID: &ipsecPolicyIdentity, + } + hasChanged = true + } else { + vpnGatewayConnectionPatchModel.IpsecPolicy = nil + } + + if d.HasChange(isVPNGatewayConnectionAdminStateup) { + adminStateUp := d.Get(isVPNGatewayConnectionAdminStateup).(bool) + vpnGatewayConnectionPatchModel.AdminStateUp = &adminStateUp + hasChanged = true + } + + if hasChanged { + vpnGatewayConnectionPatch, err := vpnGatewayConnectionPatchModel.AsPatch() + if err != nil { + return fmt.Errorf("Error calling asPatch for VPNGatewayConnectionPatch: %s", err) + } + updateVpnGatewayConnectionOptions.VPNGatewayConnectionPatch = vpnGatewayConnectionPatch + _, response, err := sess.UpdateVPNGatewayConnection(updateVpnGatewayConnectionOptions) + if err != nil { + return fmt.Errorf("Error updating Vpn Gateway Connection: %s\n%s", err, response) + } + } + return nil +} + +func resourceIBMISVPNGatewayConnectionDelete(d *schema.ResourceData, meta interface{}) error { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + + parts, err := idParts(d.Id()) + if err != nil { + return err + } + + gID := parts[0] + gConnID := parts[1] + + if userDetails.generation == 1 { + err := classicVpngwconDelete(d, meta, gID, gConnID) + if err != nil { + return err + } + } else { + err := vpngwconDelete(d, meta, gID, gConnID) + if err != nil { + return err + } + } + return nil +} + +func classicVpngwconDelete(d *schema.ResourceData, meta interface{}, gID, gConnID string) error { + sess, err := classicVpcClient(meta) + if err != nil { + return err + } + getVpnGatewayConnectionOptions := &vpcclassicv1.GetVPNGatewayConnectionOptions{ + VPNGatewayID: &gID, + ID: &gConnID, + } + _, response, err := sess.GetVPNGatewayConnection(getVpnGatewayConnectionOptions) + + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Getting Vpn Gateway Connection(%s): %s\n%s", gConnID, err, response) + } + deleteVpnGatewayConnectionOptions := &vpcclassicv1.DeleteVPNGatewayConnectionOptions{ + VPNGatewayID: &gID, + ID: &gConnID, + } + response, err = sess.DeleteVPNGatewayConnection(deleteVpnGatewayConnectionOptions) + if err != nil { + return fmt.Errorf("Error Deleting Vpn Gateway Connection : %s\n%s", err, response) + } + + _, err = isWaitForClassicVPNGatewayConnectionDeleted(sess, gID, gConnID, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return fmt.Errorf( + "Error checking for Vpn Gateway Connection (%s) is deleted: %s", gConnID, err) + } + + d.SetId("") + return nil +} + +func vpngwconDelete(d *schema.ResourceData, meta interface{}, gID, gConnID string) error { + sess, err := vpcClient(meta) + if err != nil { + return err + } + getVpnGatewayConnectionOptions := &vpcv1.GetVPNGatewayConnectionOptions{ + VPNGatewayID: &gID, + ID: &gConnID, + } + _, response, err := sess.GetVPNGatewayConnection(getVpnGatewayConnectionOptions) + + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Getting Vpn Gateway Connection(%s): %s\n%s", gConnID, err, response) + } + + deleteVpnGatewayConnectionOptions := &vpcv1.DeleteVPNGatewayConnectionOptions{ + VPNGatewayID: &gID, + ID: &gConnID, + } + response, err = sess.DeleteVPNGatewayConnection(deleteVpnGatewayConnectionOptions) + if err != nil { + return fmt.Errorf("Error Deleting Vpn Gateway Connection : %s\n%s", err, response) + } + + _, err = isWaitForVPNGatewayConnectionDeleted(sess, gID, gConnID, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return fmt.Errorf( + "Error checking for Vpn Gateway Connection (%s) is deleted: %s", gConnID, err) + } + + d.SetId("") + return nil +} + +func isWaitForClassicVPNGatewayConnectionDeleted(vpnGatewayConnection *vpcclassicv1.VpcClassicV1, gID, gConnID string, timeout time.Duration) (interface{}, error) { + log.Printf("Waiting for VPNGatewayConnection (%s) to be deleted.", gConnID) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", isVPNGatewayConnectionDeleting}, + Target: []string{"", isVPNGatewayConnectionDeleted}, + Refresh: isClassicVPNGatewayConnectionDeleteRefreshFunc(vpnGatewayConnection, gID, gConnID), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isClassicVPNGatewayConnectionDeleteRefreshFunc(vpnGatewayConnection *vpcclassicv1.VpcClassicV1, gID, gConnID string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + getVpnGatewayConnectionOptions := &vpcclassicv1.GetVPNGatewayConnectionOptions{ + VPNGatewayID: &gID, + ID: &gConnID, + } + vpngwcon, response, err := vpnGatewayConnection.GetVPNGatewayConnection(getVpnGatewayConnectionOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return "", isVPNGatewayConnectionDeleted, nil + } + return "", "", fmt.Errorf("The Vpn Gateway Connection %s failed to delete: %s\n%s", gConnID, err, response) + } + return vpngwcon, isVPNGatewayConnectionDeleting, nil + } +} + +func isWaitForVPNGatewayConnectionDeleted(vpnGatewayConnection *vpcv1.VpcV1, gID, gConnID string, timeout time.Duration) (interface{}, error) { + log.Printf("Waiting for VPNGatewayConnection (%s) to be deleted.", gConnID) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", isVPNGatewayConnectionDeleting}, + Target: []string{"", isVPNGatewayConnectionDeleted}, + Refresh: isVPNGatewayConnectionDeleteRefreshFunc(vpnGatewayConnection, gID, gConnID), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isVPNGatewayConnectionDeleteRefreshFunc(vpnGatewayConnection *vpcv1.VpcV1, gID, gConnID string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + getVpnGatewayConnectionOptions := &vpcv1.GetVPNGatewayConnectionOptions{ + VPNGatewayID: &gID, + ID: &gConnID, + } + vpngwcon, response, err := vpnGatewayConnection.GetVPNGatewayConnection(getVpnGatewayConnectionOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return "", isVPNGatewayConnectionDeleted, nil + } + return "", "", fmt.Errorf("The Vpn Gateway Connection %s failed to delete: %s\n%s", gConnID, err, response) + } + return vpngwcon, isVPNGatewayConnectionDeleting, nil + } +} + +func resourceIBMISVPNGatewayConnectionExists(d *schema.ResourceData, meta interface{}) (bool, error) { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return false, err + } + parts, err := idParts(d.Id()) + if err != nil { + return false, err + } + if len(parts) != 2 { + return false, fmt.Errorf("Incorrect ID %s: ID should be a combination of gID/gConnID", d.Id()) + } + + gID := parts[0] + gConnID := parts[1] + if userDetails.generation == 1 { + exists, err := classicVpngwconExists(d, meta, gID, gConnID) + return exists, err + } else { + exists, err := vpngwconExists(d, meta, gID, gConnID) + return exists, err + } +} + +func classicVpngwconExists(d *schema.ResourceData, meta interface{}, gID, gConnID string) (bool, error) { + sess, err := classicVpcClient(meta) + if err != nil { + return false, err + } + + getVpnGatewayConnectionOptions := &vpcclassicv1.GetVPNGatewayConnectionOptions{ + VPNGatewayID: &gID, + ID: &gConnID, + } + _, response, err := sess.GetVPNGatewayConnection(getVpnGatewayConnectionOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error getting Vpn Gateway Connection: %s\n%s", err, response) + } + return true, nil +} + +func vpngwconExists(d *schema.ResourceData, meta interface{}, gID, gConnID string) (bool, error) { + sess, err := vpcClient(meta) + if err != nil { + return false, err + } + + getVpnGatewayConnectionOptions := &vpcv1.GetVPNGatewayConnectionOptions{ + VPNGatewayID: &gID, + ID: &gConnID, + } + _, response, err := sess.GetVPNGatewayConnection(getVpnGatewayConnectionOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error getting Vpn Gateway Connection: %s\n%s", err, response) + } + return true, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_kms_key.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_kms_key.go new file mode 100644 index 00000000000..d7c3573dd1c --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_kms_key.go @@ -0,0 +1,756 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "context" + "fmt" + "log" + "net/url" + "strconv" + "strings" + "time" + + kp "github.com/IBM/keyprotect-go-client" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func resourceIBMKmskey() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMKmsKeyCreate, + Read: resourceIBMKmsKeyRead, + Update: resourceIBMKmsKeyUpdate, + Delete: resourceIBMKmsKeyDelete, + Exists: resourceIBMKmsKeyExists, + Importer: &schema.ResourceImporter{}, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "instance_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Key protect or hpcs instance GUID", + }, + "key_ring_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "default", + Description: "Key Ring for the Key", + }, + "key_id": { + Type: schema.TypeString, + Computed: true, + Description: "Key ID", + }, + "key_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Key name", + }, + "type": { + Type: schema.TypeString, + Computed: true, + Description: "type of service hs-crypto or kms", + }, + "endpoint_type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateAllowedStringValue([]string{"public", "private"}), + Description: "public or private", + ForceNew: true, + Default: "public", + }, + "standard_key": { + Type: schema.TypeBool, + Default: false, + Optional: true, + ForceNew: true, + Description: "Standard key type", + }, + "payload": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "encrypted_nonce": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Only for imported root key", + }, + "iv_value": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Only for imported root key", + }, + "force_delete": { + Type: schema.TypeBool, + Optional: true, + Description: "set to true to force delete the key", + ForceNew: false, + Default: false, + }, + "crn": { + Type: schema.TypeString, + Computed: true, + Description: "Crn of the key", + }, + "expiration_date": { + Type: schema.TypeString, + Optional: true, + Description: "The date the key material expires. The date format follows RFC 3339. You can set an expiration date on any key on its creation. A key moves into the Deactivated state within one hour past its expiration date, if one is assigned. If you create a key without specifying an expiration date, the key does not expire", + ForceNew: true, + }, + "policies": { + Type: schema.TypeList, + Optional: true, + Computed: true, + Description: "Creates or updates one or more policies for the specified key", + MinItems: 1, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "rotation": { + Type: schema.TypeList, + Optional: true, + Computed: true, + AtLeastOneOf: []string{"policies.0.rotation", "policies.0.dual_auth_delete"}, + Description: "Specifies the key rotation time interval in months, with a minimum of 1, and a maximum of 12", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + Description: "The v4 UUID used to uniquely identify the policy resource, as specified by RFC 4122.", + }, + "crn": { + Type: schema.TypeString, + Computed: true, + Description: "Cloud Resource Name (CRN) that uniquely identifies your cloud resources.", + }, + "created_by": { + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for the resource that created the policy.", + }, + "creation_date": { + Type: schema.TypeString, + Computed: true, + Description: "The date the policy was created. The date format follows RFC 3339.", + }, + "updated_by": { + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for the resource that updated the policy.", + }, + "last_update_date": { + Type: schema.TypeString, + Computed: true, + Description: "Updates when the policy is replaced or modified. The date format follows RFC 3339.", + }, + "interval_month": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validateAllowedRangeInt(1, 12), + Description: "Specifies the key rotation time interval in months", + }, + }, + }, + }, + "dual_auth_delete": { + Type: schema.TypeList, + Optional: true, + Computed: true, + AtLeastOneOf: []string{"policies.0.rotation", "policies.0.dual_auth_delete"}, + Description: "Data associated with the dual authorization delete policy.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + Description: "The v4 UUID used to uniquely identify the policy resource, as specified by RFC 4122.", + }, + "crn": { + Type: schema.TypeString, + Computed: true, + Description: "Cloud Resource Name (CRN) that uniquely identifies your cloud resources.", + }, + "created_by": { + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for the resource that created the policy.", + }, + "creation_date": { + Type: schema.TypeString, + Computed: true, + Description: "The date the policy was created. The date format follows RFC 3339.", + }, + "updated_by": { + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for the resource that updated the policy.", + }, + "last_update_date": { + Type: schema.TypeString, + Computed: true, + Description: "Updates when the policy is replaced or modified. The date format follows RFC 3339.", + }, + "enabled": { + Type: schema.TypeBool, + Required: true, + Description: "If set to true, Key Protect enables a dual authorization policy on a single key.", + }, + }, + }, + }, + }, + }, + }, + ResourceName: { + Type: schema.TypeString, + Computed: true, + Description: "The name of the resource", + }, + + ResourceCRN: { + Type: schema.TypeString, + Computed: true, + Description: "The crn of the resource", + }, + + ResourceStatus: { + Type: schema.TypeString, + Computed: true, + Description: "The status of the resource", + }, + + ResourceGroupName: { + Type: schema.TypeString, + Computed: true, + Description: "The resource group name in which resource is provisioned", + }, + ResourceControllerURL: { + Type: schema.TypeString, + Computed: true, + Description: "The URL of the IBM Cloud dashboard that can be used to explore and view details about the resource", + }, + }, + } +} + +func resourceIBMKmsKeyCreate(d *schema.ResourceData, meta interface{}) error { + kpAPI, err := meta.(ClientSession).keyManagementAPI() + if err != nil { + return err + } + + rContollerClient, err := meta.(ClientSession).ResourceControllerAPIV2() + if err != nil { + return err + } + + instanceID := d.Get("instance_id").(string) + endpointType := d.Get("endpoint_type").(string) + + rContollerApi := rContollerClient.ResourceServiceInstanceV2() + + instanceData, err := rContollerApi.GetInstance(instanceID) + if err != nil { + return err + } + instanceCRN := instanceData.Crn.String() + crnData := strings.Split(instanceCRN, ":") + + var hpcsEndpointURL string + + if crnData[4] == "hs-crypto" { + hpcsEndpointAPI, err := meta.(ClientSession).HpcsEndpointAPI() + if err != nil { + return err + } + + resp, err := hpcsEndpointAPI.Endpoint().GetAPIEndpoint(instanceID) + if err != nil { + return err + } + + if endpointType == "public" { + hpcsEndpointURL = "https://" + resp.Kms.Public + "/api/v2/keys" + } else { + hpcsEndpointURL = "https://" + resp.Kms.Private + "/api/v2/keys" + } + + u, err := url.Parse(hpcsEndpointURL) + if err != nil { + return fmt.Errorf("Error Parsing hpcs EndpointURL") + } + kpAPI.URL = u + } else if crnData[4] == "kms" { + if endpointType == "private" { + if !strings.Contains(kpAPI.Config.BaseURL, "private") { + kmsEndpURL := strings.SplitAfter(kpAPI.Config.BaseURL, "https://") + if len(kmsEndpURL) == 2 { + kmsEndpointURL := kmsEndpURL[0] + "private." + kmsEndpURL[1] + u, err := url.Parse(kmsEndpointURL) + if err != nil { + return fmt.Errorf("Error Parsing kms EndpointURL") + } + kpAPI.URL = u + } else { + return fmt.Errorf("Error in Kms EndPoint URL ") + } + } + } + } else { + return fmt.Errorf("Invalid or unsupported service Instance") + } + kpAPI.Config.InstanceID = instanceID + + kpAPI.Config.KeyRing = d.Get("key_ring_id").(string) + + name := d.Get("key_name").(string) + standardKey := d.Get("standard_key").(bool) + + var expiration *time.Time + if es, ok := d.GetOk("expiration_date"); ok { + expiration_string := es.(string) + // parse string to required time format + expiration_time, err := time.Parse(time.RFC3339, expiration_string) + if err != nil { + return fmt.Errorf("Invalid time format (the date format follows RFC 3339): %s", err) + } + expiration = &expiration_time + } else { + expiration = nil + } + + var keyCRN string + if standardKey { + if v, ok := d.GetOk("payload"); ok { + //import standard key + payload := v.(string) + stkey, err := kpAPI.CreateImportedStandardKey(context.Background(), name, expiration, payload) + if err != nil { + return fmt.Errorf( + "Error while creating standard key with payload: %s", err) + } + keyCRN = stkey.CRN + d.SetId(keyCRN) + + } else { + //create standard key + stkey, err := kpAPI.CreateStandardKey(context.Background(), name, expiration) + if err != nil { + return fmt.Errorf( + "Error while creating standard key: %s", err) + } + keyCRN = stkey.CRN + d.SetId(keyCRN) + + } + } else { + if v, ok := d.GetOk("payload"); ok { + payload := v.(string) + encryptedNonce := d.Get("encrypted_nonce").(string) + iv := d.Get("iv_value").(string) + stkey, err := kpAPI.CreateImportedRootKey(context.Background(), name, expiration, payload, encryptedNonce, iv) + if err != nil { + return fmt.Errorf( + "Error while creating Root key with payload: %s", err) + } + keyCRN = stkey.CRN + d.SetId(keyCRN) + + } else { + stkey, err := kpAPI.CreateRootKey(context.Background(), name, expiration) + if err != nil { + return fmt.Errorf( + "Error while creating Root key: %s", err) + } + keyCRN = stkey.CRN + d.SetId(keyCRN) + } + } + return resourceIBMKmsKeyUpdate(d, meta) +} + +func resourceIBMKmsKeyRead(d *schema.ResourceData, meta interface{}) error { + kpAPI, err := meta.(ClientSession).keyManagementAPI() + if err != nil { + return err + } + crn := d.Id() + crnData := strings.Split(crn, ":") + endpointType := crnData[3] + instanceID := crnData[len(crnData)-3] + keyid := crnData[len(crnData)-1] + + var instanceType string + var hpcsEndpointURL string + + if crnData[4] == "hs-crypto" { + instanceType = "hs-crypto" + hpcsEndpointAPI, err := meta.(ClientSession).HpcsEndpointAPI() + if err != nil { + return err + } + + resp, err := hpcsEndpointAPI.Endpoint().GetAPIEndpoint(instanceID) + if err != nil { + return err + } + + if endpointType == "public" { + hpcsEndpointURL = "https://" + resp.Kms.Public + "/api/v2/keys" + } else { + hpcsEndpointURL = "https://" + resp.Kms.Private + "/api/v2/keys" + } + + u, err := url.Parse(hpcsEndpointURL) + if err != nil { + return fmt.Errorf("Error Parsing hpcs EndpointURL") + + } + kpAPI.URL = u + } else if crnData[4] == "kms" { + instanceType = "kms" + if endpointType == "private" { + if !strings.Contains(kpAPI.Config.BaseURL, "private") { + kmsEndpURL := strings.SplitAfter(kpAPI.Config.BaseURL, "https://") + if len(kmsEndpURL) == 2 { + kmsEndpointURL := kmsEndpURL[0] + "private." + kmsEndpURL[1] + u, err := url.Parse(kmsEndpointURL) + if err != nil { + return fmt.Errorf("Error Parsing kms EndpointURL") + } + kpAPI.URL = u + } else { + return fmt.Errorf("Error in Kms EndPoint URL ") + } + } + } + } else { + return fmt.Errorf("Invalid or unsupported service Instance") + } + + kpAPI.Config.InstanceID = instanceID + // keyid := d.Id() + key, err := kpAPI.GetKey(context.Background(), keyid) + if err != nil { + return fmt.Errorf("Get Key failed with error: %s", err) + } + + policies, err := kpAPI.GetPolicies(context.Background(), keyid) + if err != nil && !strings.Contains(fmt.Sprint(err), "Unauthorized: The user does not have access to the specified resource") { + return fmt.Errorf("Failed to read policies: %s", err) + } + if len(policies) == 0 { + log.Printf("No Policy Configurations read\n") + } else { + d.Set("policies", flattenKeyPolicies(policies)) + } + d.Set("instance_id", instanceID) + d.Set("key_id", keyid) + d.Set("standard_key", key.Extractable) + d.Set("payload", key.Payload) + d.Set("encrypted_nonce", key.EncryptedNonce) + d.Set("iv_value", key.IV) + d.Set("key_name", key.Name) + d.Set("crn", key.CRN) + d.Set("endpoint_type", endpointType) + d.Set("type", instanceType) + d.Set("force_delete", d.Get("force_delete").(bool)) + d.Set("key_ring_id", key.KeyRingID) + if key.Expiration != nil { + expiration := key.Expiration + d.Set("expiration_date", expiration.Format(time.RFC3339)) + } else { + d.Set("expiration_date", "") + } + d.Set(ResourceName, key.Name) + d.Set(ResourceCRN, key.CRN) + state := key.State + d.Set(ResourceStatus, strconv.Itoa(state)) + rcontroller, err := getBaseController(meta) + if err != nil { + return err + } + id := key.ID + crn1 := strings.TrimSuffix(key.CRN, ":key:"+id) + + d.Set(ResourceControllerURL, rcontroller+"/services/kms/"+url.QueryEscape(crn1)+"%3A%3A") + + return nil + +} + +func resourceIBMKmsKeyUpdate(d *schema.ResourceData, meta interface{}) error { + + if d.HasChange("force_delete") { + d.Set("force_delete", d.Get("force_delete").(bool)) + } + if d.HasChange("policies") { + + kpAPI, err := meta.(ClientSession).keyManagementAPI() + if err != nil { + return err + } + + rContollerClient, err := meta.(ClientSession).ResourceControllerAPIV2() + if err != nil { + return err + } + + instanceID := d.Get("instance_id").(string) + endpointType := d.Get("endpoint_type").(string) + + rContollerApi := rContollerClient.ResourceServiceInstanceV2() + + instanceData, err := rContollerApi.GetInstance(instanceID) + if err != nil { + return err + } + instanceCRN := instanceData.Crn.String() + crnData := strings.Split(instanceCRN, ":") + + var hpcsEndpointURL string + + if crnData[4] == "hs-crypto" { + hpcsEndpointAPI, err := meta.(ClientSession).HpcsEndpointAPI() + if err != nil { + return err + } + + resp, err := hpcsEndpointAPI.Endpoint().GetAPIEndpoint(instanceID) + if err != nil { + return err + } + + if endpointType == "public" { + hpcsEndpointURL = "https://" + resp.Kms.Public + "/api/v2/keys" + } else { + hpcsEndpointURL = "https://" + resp.Kms.Private + "/api/v2/keys" + } + + u, err := url.Parse(hpcsEndpointURL) + if err != nil { + return fmt.Errorf("Error Parsing hpcs EndpointURL") + } + kpAPI.URL = u + } else if crnData[4] == "kms" { + if endpointType == "private" { + if !strings.Contains(kpAPI.Config.BaseURL, "private") { + kmsEndpURL := strings.SplitAfter(kpAPI.Config.BaseURL, "https://") + if len(kmsEndpURL) == 2 { + kmsEndpointURL := kmsEndpURL[0] + "private." + kmsEndpURL[1] + u, err := url.Parse(kmsEndpointURL) + if err != nil { + return fmt.Errorf("Error Parsing kms EndpointURL") + } + kpAPI.URL = u + } else { + return fmt.Errorf("Error in Kms EndPoint URL ") + } + } + } + } else { + return fmt.Errorf("Invalid or unsupported service Instance") + } + + kpAPI.Config.InstanceID = instanceID + + crn := d.Id() + crnData = strings.Split(crn, ":") + key_id := crnData[len(crnData)-1] + + err = handlePolicies(d, kpAPI, meta, key_id) + if err != nil { + resourceIBMKmsKeyRead(d, meta) + return fmt.Errorf("Could not create policies: %s", err) + } + } + return resourceIBMKmsKeyRead(d, meta) + +} + +func resourceIBMKmsKeyDelete(d *schema.ResourceData, meta interface{}) error { + kpAPI, err := meta.(ClientSession).keyManagementAPI() + if err != nil { + return err + } + crn := d.Id() + crnData := strings.Split(crn, ":") + endpointType := crnData[3] + instanceID := crnData[len(crnData)-3] + keyid := crnData[len(crnData)-1] + kpAPI.Config.InstanceID = instanceID + + var hpcsEndpointURL string + + if crnData[4] == "hs-crypto" { + hpcsEndpointAPI, err := meta.(ClientSession).HpcsEndpointAPI() + if err != nil { + return err + } + + resp, err := hpcsEndpointAPI.Endpoint().GetAPIEndpoint(instanceID) + if err != nil { + return err + } + + if endpointType == "public" { + hpcsEndpointURL = "https://" + resp.Kms.Public + "/api/v2/keys" + } else { + hpcsEndpointURL = "https://" + resp.Kms.Private + "/api/v2/keys" + } + + u, err := url.Parse(hpcsEndpointURL) + if err != nil { + return fmt.Errorf("Error Parsing hpcs EndpointURL") + } + kpAPI.URL = u + } else if crnData[4] == "kms" { + if endpointType == "private" { + if !strings.Contains(kpAPI.Config.BaseURL, "private") { + kmsEndpURL := strings.SplitAfter(kpAPI.Config.BaseURL, "https://") + if len(kmsEndpURL) == 2 { + kmsEndpointURL := kmsEndpURL[0] + "private." + kmsEndpURL[1] + u, err := url.Parse(kmsEndpointURL) + if err != nil { + return fmt.Errorf("Error Parsing kms EndpointURL") + } + kpAPI.URL = u + } else { + return fmt.Errorf("Error in Kms EndPoint URL ") + } + } + } + } else { + return fmt.Errorf("Invalid or unsupported service Instance") + } + + force := d.Get("force_delete").(bool) + f := kp.ForceOpt{ + Force: force, + } + + _, err1 := kpAPI.DeleteKey(context.Background(), keyid, kp.ReturnRepresentation, f) + if err1 != nil { + return fmt.Errorf( + "Error while deleting: %s", err1) + } + d.SetId("") + return nil + +} + +func resourceIBMKmsKeyExists(d *schema.ResourceData, meta interface{}) (bool, error) { + kpAPI, err := meta.(ClientSession).keyManagementAPI() + if err != nil { + return false, err + } + + crn := d.Id() + crnData := strings.Split(crn, ":") + endpointType := crnData[3] + instanceID := crnData[len(crnData)-3] + keyid := crnData[len(crnData)-1] + kpAPI.Config.InstanceID = instanceID + + var hpcsEndpointURL string + + if crnData[4] == "hs-crypto" { + hpcsEndpointAPI, err := meta.(ClientSession).HpcsEndpointAPI() + if err != nil { + return false, err + } + + resp, err := hpcsEndpointAPI.Endpoint().GetAPIEndpoint(instanceID) + if err != nil { + return false, err + } + + if endpointType == "public" { + hpcsEndpointURL = "https://" + resp.Kms.Public + "/api/v2/keys" + } else { + hpcsEndpointURL = "https://" + resp.Kms.Private + "/api/v2/keys" + } + + u, err := url.Parse(hpcsEndpointURL) + if err != nil { + return false, fmt.Errorf("Error Parsing hpcs EndpointURL") + + } + kpAPI.URL = u + } else if crnData[4] == "kms" { + if endpointType == "private" { + if !strings.Contains(kpAPI.Config.BaseURL, "private") { + kmsEndpURL := strings.SplitAfter(kpAPI.Config.BaseURL, "https://") + if len(kmsEndpURL) == 2 { + kmsEndpointURL := kmsEndpURL[0] + "private." + kmsEndpURL[1] + u, err := url.Parse(kmsEndpointURL) + if err != nil { + return false, fmt.Errorf("Error Parsing kms EndpointURL") + } + kpAPI.URL = u + } else { + return false, fmt.Errorf("Error in Kms EndPoint URL ") + } + } + } + } else { + return false, fmt.Errorf("Invalid or unsupported service Instance") + } + + _, err = kpAPI.GetKey(context.Background(), keyid) + if err != nil { + kpError := err.(*kp.Error) + if kpError.StatusCode == 404 { + return false, nil + } + return false, err + } + return true, nil + +} + +func handlePolicies(d *schema.ResourceData, kpAPI *kp.Client, meta interface{}, key_id string) error { + var setRotation, setDualAuthDelete, dualAuthEnable bool + var rotationInterval int + + if policyInfo, ok := d.GetOk("policies"); ok { + + policyDataList := policyInfo.([]interface{}) + policyData := policyDataList[0].(map[string]interface{}) + + if rpd, ok := policyData["rotation"]; ok { + rpdList := rpd.([]interface{}) + if len(rpdList) != 0 { + rotationInterval = rpdList[0].(map[string]interface{})["interval_month"].(int) + setRotation = true + } + } + if dadp, ok := policyData["dual_auth_delete"]; ok { + dadpList := dadp.([]interface{}) + if len(dadpList) != 0 { + dualAuthEnable = dadpList[0].(map[string]interface{})["enabled"].(bool) + setDualAuthDelete = true + } + } + + _, err := kpAPI.SetPolicies(context.Background(), key_id, setRotation, rotationInterval, setDualAuthDelete, dualAuthEnable) + if err != nil { + return fmt.Errorf("Error while creating policies: %s", err) + } + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_kms_key_alias.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_kms_key_alias.go new file mode 100644 index 00000000000..7a1543c9fb7 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_kms_key_alias.go @@ -0,0 +1,249 @@ +package ibm + +import ( + "context" + "fmt" + "net/url" + "strings" + + kp "github.com/IBM/keyprotect-go-client" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func resourceIBMKmskeyAlias() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMKmsKeyAliasCreate, + Delete: resourceIBMKmsKeyAliasDelete, + Read: resourceIBMKmsKeyAliasRead, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "instance_id": { + Type: schema.TypeString, + Required: true, + Description: "Key ID", + ForceNew: true, + }, + "alias": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Key protect or hpcs key alias name", + }, + "key_id": { + Type: schema.TypeString, + Required: true, + Description: "Key ID", + ForceNew: true, + }, + "endpoint_type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateAllowedStringValue([]string{"public", "private"}), + Description: "public or private", + ForceNew: true, + Default: "public", + }, + }, + } +} + +func resourceIBMKmsKeyAliasCreate(d *schema.ResourceData, meta interface{}) error { + kpAPI, err := meta.(ClientSession).keyManagementAPI() + if err != nil { + return err + } + rContollerClient, err := meta.(ClientSession).ResourceControllerAPIV2() + if err != nil { + return err + } + + instanceID := d.Get("instance_id").(string) + endpointType := d.Get("endpoint_type").(string) + + rContollerAPI := rContollerClient.ResourceServiceInstanceV2() + + instanceData, err := rContollerAPI.GetInstance(instanceID) + if err != nil { + return err + } + instanceCRN := instanceData.Crn.String() + crnData := strings.Split(instanceCRN, ":") + + var hpcsEndpointURL string + + if crnData[4] == "hs-crypto" { + hpcsEndpointAPI, err := meta.(ClientSession).HpcsEndpointAPI() + if err != nil { + return err + } + + resp, err := hpcsEndpointAPI.Endpoint().GetAPIEndpoint(instanceID) + if err != nil { + return err + } + + if endpointType == "public" { + hpcsEndpointURL = "https://" + resp.Kms.Public + "/api/v2/keys" + } else { + hpcsEndpointURL = "https://" + resp.Kms.Private + "/api/v2/keys" + } + + u, err := url.Parse(hpcsEndpointURL) + if err != nil { + return fmt.Errorf("Error Parsing hpcs EndpointURL") + } + kpAPI.URL = u + } else if crnData[4] == "kms" { + if endpointType == "private" { + if !strings.HasPrefix(kpAPI.Config.BaseURL, "private") { + kpAPI.Config.BaseURL = "private." + kpAPI.Config.BaseURL + } + } + } else { + return fmt.Errorf("Invalid or unsupported service Instance") + } + kpAPI.Config.InstanceID = instanceID + + aliasName := d.Get("alias").(string) + keyID := d.Get("key_id").(string) + stkey, err := kpAPI.CreateKeyAlias(context.Background(), aliasName, keyID) + if err != nil { + return fmt.Errorf( + "Error while creating alias name for the key: %s", err) + } + key, err := kpAPI.GetKey(context.Background(), stkey.KeyID) + if err != nil { + return fmt.Errorf("Get Key failed with error: %s", err) + } + d.SetId(fmt.Sprintf("%s:alias:%s", stkey.Alias, key.CRN)) + + return resourceIBMKmsKeyAliasRead(d, meta) +} + +func resourceIBMKmsKeyAliasRead(d *schema.ResourceData, meta interface{}) error { + kpAPI, err := meta.(ClientSession).keyManagementAPI() + if err != nil { + return err + } + id := strings.Split(d.Id(), ":alias:") + crn := id[1] + crnData := strings.Split(crn, ":") + endpointType := crnData[3] + instanceID := crnData[len(crnData)-3] + keyid := crnData[len(crnData)-1] + + var hpcsEndpointURL string + + if crnData[4] == "hs-crypto" { + hpcsEndpointAPI, err := meta.(ClientSession).HpcsEndpointAPI() + if err != nil { + return err + } + + resp, err := hpcsEndpointAPI.Endpoint().GetAPIEndpoint(instanceID) + if err != nil { + return err + } + + if endpointType == "public" { + hpcsEndpointURL = "https://" + resp.Kms.Public + "/api/v2/keys" + } else { + hpcsEndpointURL = "https://" + resp.Kms.Private + "/api/v2/keys" + } + + u, err := url.Parse(hpcsEndpointURL) + if err != nil { + return fmt.Errorf("Error Parsing hpcs EndpointURL") + + } + kpAPI.URL = u + } else if crnData[4] == "kms" { + if endpointType == "private" { + if !strings.HasPrefix(kpAPI.Config.BaseURL, "private") { + kpAPI.Config.BaseURL = "private." + kpAPI.Config.BaseURL + } + } + } else { + return fmt.Errorf("Invalid or unsupported service Instance") + } + + kpAPI.Config.InstanceID = instanceID + key, err := kpAPI.GetKey(context.Background(), keyid) + if err != nil { + kpError := err.(*kp.Error) + if kpError.StatusCode == 404 { + d.SetId("") + return nil + } else { + return fmt.Errorf("Get Key failed with error: %s", err) + } + } + d.Set("alias", id[0]) + d.Set("key_id", key.ID) + d.Set("instance_id", instanceID) + d.Set("endpoint_type", endpointType) + + return nil +} + +func resourceIBMKmsKeyAliasDelete(d *schema.ResourceData, meta interface{}) error { + kpAPI, err := meta.(ClientSession).keyManagementAPI() + if err != nil { + return err + } + id := strings.Split(d.Id(), ":alias:") + crn := id[1] + crnData := strings.Split(crn, ":") + endpointType := crnData[3] + instanceID := crnData[len(crnData)-3] + keyid := crnData[len(crnData)-1] + + var hpcsEndpointURL string + + if crnData[4] == "hs-crypto" { + hpcsEndpointAPI, err := meta.(ClientSession).HpcsEndpointAPI() + if err != nil { + return err + } + + resp, err := hpcsEndpointAPI.Endpoint().GetAPIEndpoint(instanceID) + if err != nil { + return err + } + + if endpointType == "public" { + hpcsEndpointURL = "https://" + resp.Kms.Public + "/api/v2/keys" + } else { + hpcsEndpointURL = "https://" + resp.Kms.Private + "/api/v2/keys" + } + + u, err := url.Parse(hpcsEndpointURL) + if err != nil { + return fmt.Errorf("Error Parsing hpcs EndpointURL") + + } + kpAPI.URL = u + } else if crnData[4] == "kms" { + if endpointType == "private" { + if !strings.HasPrefix(kpAPI.Config.BaseURL, "private") { + kpAPI.Config.BaseURL = "private." + kpAPI.Config.BaseURL + } + } + } else { + return fmt.Errorf("Invalid or unsupported service Instance") + } + + kpAPI.Config.InstanceID = instanceID + err1 := kpAPI.DeleteKeyAlias(context.Background(), id[0], keyid) + if err1 != nil { + kpError := err1.(*kp.Error) + if kpError.StatusCode == 404 { + return nil + } else { + return fmt.Errorf(" failed to Destroy alias with error: %s", err1) + } + } + return nil + +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_kms_key_rings.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_kms_key_rings.go new file mode 100644 index 00000000000..2a5a5c3600e --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_kms_key_rings.go @@ -0,0 +1,269 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "context" + "fmt" + "net/url" + "strings" + + kp "github.com/IBM/keyprotect-go-client" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func resourceIBMKmskeyRings() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMKmsKeyRingCreate, + Delete: resourceIBMKmsKeyRingDelete, + Read: resourceIBMKmsKeyRingRead, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "instance_id": { + Type: schema.TypeString, + Required: true, + Description: "Key protect Instance GUID", + ForceNew: true, + }, + "key_ring_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "User defined unique ID for the key ring", + ValidateFunc: InvokeValidator("ibm_kms_key_rings", "key_ring_id"), + }, + "endpoint_type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateAllowedStringValue([]string{"public", "private"}), + Description: "public or private", + ForceNew: true, + Default: "public", + }, + }, + } +} + +func resourceIBMKeyRingValidator() *ResourceValidator { + + validateSchema := make([]ValidateSchema, 1) + + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: "key_ring_id", + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Required: true, + Regexp: `^[a-zA-Z0-9-]*$`, + MinValueLength: 2, + MaxValueLength: 100}) + + ibmKeyRingResourceValidator := ResourceValidator{ResourceName: "ibm_kms_key_rings", Schema: validateSchema} + return &ibmKeyRingResourceValidator +} + +func resourceIBMKmsKeyRingCreate(d *schema.ResourceData, meta interface{}) error { + kpAPI, err := meta.(ClientSession).keyManagementAPI() + if err != nil { + return err + } + rContollerClient, err := meta.(ClientSession).ResourceControllerAPIV2() + if err != nil { + return err + } + + instanceID := d.Get("instance_id").(string) + endpointType := d.Get("endpoint_type").(string) + keyRingID := d.Get("key_ring_id").(string) + + rContollerAPI := rContollerClient.ResourceServiceInstanceV2() + + instanceData, err := rContollerAPI.GetInstance(instanceID) + if err != nil { + return err + } + instanceCRN := instanceData.Crn.String() + crnData := strings.Split(instanceCRN, ":") + + var hpcsEndpointURL string + + if crnData[4] == "hs-crypto" { + hpcsEndpointAPI, err := meta.(ClientSession).HpcsEndpointAPI() + if err != nil { + return err + } + + resp, err := hpcsEndpointAPI.Endpoint().GetAPIEndpoint(instanceID) + if err != nil { + return err + } + + if endpointType == "public" { + hpcsEndpointURL = "https://" + resp.Kms.Public + "/api/v2/keys" + } else { + hpcsEndpointURL = "https://" + resp.Kms.Private + "/api/v2/keys" + } + + u, err := url.Parse(hpcsEndpointURL) + if err != nil { + return fmt.Errorf("Error Parsing hpcs EndpointURL") + } + kpAPI.URL = u + } else if crnData[4] == "kms" { + if endpointType == "private" { + if !strings.HasPrefix(kpAPI.Config.BaseURL, "private") { + kpAPI.Config.BaseURL = "private." + kpAPI.Config.BaseURL + } + } + } else { + return fmt.Errorf("Invalid or unsupported service Instance") + } + kpAPI.Config.InstanceID = instanceID + + err = kpAPI.CreateKeyRing(context.Background(), keyRingID) + if err != nil { + return fmt.Errorf( + "Error while creating key ring : %s", err) + } + var keyRing string + keyRings, err2 := kpAPI.GetKeyRings(context.Background()) + if err2 != nil { + return fmt.Errorf( + "Error while fetching key ring : %s", err2) + } + for _, v := range keyRings.KeyRings { + if v.ID == keyRingID { + keyRing = v.ID + break + } + } + + d.SetId(fmt.Sprintf("%s:keyRing:%s", keyRing, instanceCRN)) + + return resourceIBMKmsKeyRingRead(d, meta) +} + +func resourceIBMKmsKeyRingRead(d *schema.ResourceData, meta interface{}) error { + kpAPI, err := meta.(ClientSession).keyManagementAPI() + if err != nil { + return err + } + id := strings.Split(d.Id(), ":keyRing:") + crn := id[1] + crnData := strings.Split(crn, ":") + endpointType := crnData[3] + instanceID := crnData[len(crnData)-3] + + var hpcsEndpointURL string + + if crnData[4] == "hs-crypto" { + hpcsEndpointAPI, err := meta.(ClientSession).HpcsEndpointAPI() + if err != nil { + return err + } + + resp, err := hpcsEndpointAPI.Endpoint().GetAPIEndpoint(instanceID) + if err != nil { + return err + } + + if endpointType == "public" { + hpcsEndpointURL = "https://" + resp.Kms.Public + "/api/v2/keys" + } else { + hpcsEndpointURL = "https://" + resp.Kms.Private + "/api/v2/keys" + } + + u, err := url.Parse(hpcsEndpointURL) + if err != nil { + return fmt.Errorf("Error Parsing hpcs EndpointURL") + + } + kpAPI.URL = u + } else if crnData[4] == "kms" { + if endpointType == "private" { + if !strings.HasPrefix(kpAPI.Config.BaseURL, "private") { + kpAPI.Config.BaseURL = "private." + kpAPI.Config.BaseURL + } + } + } else { + return fmt.Errorf("Invalid or unsupported service Instance") + } + + kpAPI.Config.InstanceID = instanceID + _, err = kpAPI.GetKeyRings(context.Background()) + if err != nil { + kpError := err.(*kp.Error) + if kpError.StatusCode == 404 || kpError.StatusCode == 409 { + d.SetId("") + return nil + } + return fmt.Errorf("Get Key Rings failed with error: %s", err) + } + + d.Set("instance_id", instanceID) + d.Set("endpoint_type", endpointType) + d.Set("key_ring_id", id[0]) + return nil +} + +func resourceIBMKmsKeyRingDelete(d *schema.ResourceData, meta interface{}) error { + kpAPI, err := meta.(ClientSession).keyManagementAPI() + if err != nil { + return err + } + id := strings.Split(d.Id(), ":keyRing:") + crn := id[1] + crnData := strings.Split(crn, ":") + endpointType := crnData[3] + instanceID := crnData[len(crnData)-3] + + var hpcsEndpointURL string + + if crnData[4] == "hs-crypto" { + hpcsEndpointAPI, err := meta.(ClientSession).HpcsEndpointAPI() + if err != nil { + return err + } + + resp, err := hpcsEndpointAPI.Endpoint().GetAPIEndpoint(instanceID) + if err != nil { + return err + } + + if endpointType == "public" { + hpcsEndpointURL = "https://" + resp.Kms.Public + "/api/v2/keys" + } else { + hpcsEndpointURL = "https://" + resp.Kms.Private + "/api/v2/keys" + } + + u, err := url.Parse(hpcsEndpointURL) + if err != nil { + return fmt.Errorf("Error Parsing hpcs EndpointURL") + + } + kpAPI.URL = u + } else if crnData[4] == "kms" { + if endpointType == "private" { + if !strings.HasPrefix(kpAPI.Config.BaseURL, "private") { + kpAPI.Config.BaseURL = "private." + kpAPI.Config.BaseURL + } + } + } else { + return fmt.Errorf("Invalid or unsupported service Instance") + } + + kpAPI.Config.InstanceID = instanceID + err1 := kpAPI.DeleteKeyRing(context.Background(), id[0]) + if err1 != nil { + kpError := err1.(*kp.Error) + if kpError.StatusCode == 404 || kpError.StatusCode == 409 { + return nil + } else { + return fmt.Errorf(" failed to Destroy key ring with error: %s", err1) + } + } + return nil + +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_kp_key.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_kp_key.go new file mode 100644 index 00000000000..6b26048cbdc --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_kp_key.go @@ -0,0 +1,276 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "context" + "fmt" + "net/url" + "strconv" + "strings" + "time" + + kp "github.com/IBM/keyprotect-go-client" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func resourceIBMkey() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMKeyCreate, + Read: resourceIBMKeyRead, + Update: resourceIBMKeyUpdate, + Delete: resourceIBMKeyDelete, + Exists: resourceIBMKeyExists, + Importer: &schema.ResourceImporter{}, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(60 * time.Minute), + Update: schema.DefaultTimeout(60 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "key_protect_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Key protect instance ID", + }, + "key_id": { + Type: schema.TypeString, + Computed: true, + Description: "Key ID", + }, + "key_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Key name", + }, + "standard_key": { + Type: schema.TypeBool, + Default: false, + Optional: true, + ForceNew: true, + Description: "Standard key type", + }, + "payload": { + Type: schema.TypeString, + Computed: true, + Optional: true, + }, + "force_delete": { + Type: schema.TypeBool, + Optional: true, + Description: "set to true to force delete the key", + ForceNew: false, + Default: false, + }, + "encrypted_nonce": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Only for imported root key", + }, + "iv_value": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Only for imported root key", + }, + "crn": { + Type: schema.TypeString, + Computed: true, + Description: "Crn of the key", + }, + ResourceName: { + Type: schema.TypeString, + Computed: true, + Description: "The name of the resource", + }, + + ResourceCRN: { + Type: schema.TypeString, + Computed: true, + Description: "The crn of the resource", + }, + + ResourceStatus: { + Type: schema.TypeString, + Computed: true, + Description: "The status of the resource", + }, + + ResourceGroupName: { + Type: schema.TypeString, + Computed: true, + Description: "The resource group name in which resource is provisioned", + }, + ResourceControllerURL: { + Type: schema.TypeString, + Computed: true, + Description: "The URL of the IBM Cloud dashboard that can be used to explore and view details about the resource", + }, + }, + } +} + +func resourceIBMKeyCreate(d *schema.ResourceData, meta interface{}) error { + api, err := meta.(ClientSession).keyProtectAPI() + if err != nil { + return err + } + instanceID := d.Get("key_protect_id").(string) + api.Config.InstanceID = instanceID + name := d.Get("key_name").(string) + standardKey := d.Get("standard_key").(bool) + + var keyCRN string + if standardKey { + if v, ok := d.GetOk("payload"); ok { + //import standard key + payload := v.(string) + stkey, err := api.CreateImportedStandardKey(context.Background(), name, nil, payload) + if err != nil { + return fmt.Errorf( + "Error while creating standard key: %s", err) + } + keyCRN = stkey.CRN + } else { + //create standard key + stkey, err := api.CreateStandardKey(context.Background(), name, nil) + if err != nil { + return fmt.Errorf( + "Error while creating standard key: %s", err) + } + keyCRN = stkey.CRN + } + d.SetId(keyCRN) + } else { + if v, ok := d.GetOk("payload"); ok { + payload := v.(string) + encryptedNonce := d.Get("encrypted_nonce").(string) + iv := d.Get("iv_value").(string) + stkey, err := api.CreateImportedRootKey(context.Background(), name, nil, payload, encryptedNonce, iv) + if err != nil { + return fmt.Errorf( + "Error while creating Root key: %s", err) + } + keyCRN = stkey.CRN + } else { + stkey, err := api.CreateRootKey(context.Background(), name, nil) + if err != nil { + return fmt.Errorf( + "Error while creating Root key: %s", err) + } + keyCRN = stkey.CRN + } + + d.SetId(keyCRN) + + } + d.Set("force_delete", d.Get("force_delete").(bool)) + + return resourceIBMKeyRead(d, meta) +} + +func resourceIBMKeyRead(d *schema.ResourceData, meta interface{}) error { + api, err := meta.(ClientSession).keyProtectAPI() + if err != nil { + return err + } + crn := d.Id() + crnData := strings.Split(crn, ":") + + instanceID := crnData[len(crnData)-3] + keyid := crnData[len(crnData)-1] + api.Config.InstanceID = instanceID + // keyid := d.Id() + key, err := api.GetKey(context.Background(), keyid) + if err != nil { + return fmt.Errorf( + "Get Key failed with error: %s", err) + } + d.Set("key_id", keyid) + d.Set("standard_key", key.Extractable) + d.Set("payload", key.Payload) + d.Set("encrypted_nonce", key.EncryptedNonce) + d.Set("iv_value", key.IV) + d.Set("key_name", key.Name) + d.Set("crn", key.CRN) + + d.Set(ResourceName, key.Name) + d.Set(ResourceCRN, key.CRN) + + state := key.State + d.Set(ResourceStatus, strconv.Itoa(state)) + + rcontroller, err := getBaseController(meta) + if err != nil { + return err + } + id := key.ID + crn1 := strings.TrimSuffix(key.CRN, ":key:"+id) + + d.Set(ResourceControllerURL, rcontroller+"/services/kms/"+url.QueryEscape(crn1)+"%3A%3A") + + return nil + +} + +func resourceIBMKeyUpdate(d *schema.ResourceData, meta interface{}) error { + + if d.HasChange("force_delete") { + d.Set("force_delete", d.Get("force_delete").(bool)) + } + return resourceIBMKeyRead(d, meta) + +} + +func resourceIBMKeyDelete(d *schema.ResourceData, meta interface{}) error { + api, err := meta.(ClientSession).keyProtectAPI() + if err != nil { + return err + } + crn := d.Id() + crnData := strings.Split(crn, ":") + + instanceID := crnData[len(crnData)-3] + keyid := crnData[len(crnData)-1] + api.Config.InstanceID = instanceID + force := d.Get("force_delete").(bool) + f := kp.ForceOpt{ + Force: force, + } + _, err1 := api.DeleteKey(context.Background(), keyid, kp.ReturnRepresentation, f) + if err1 != nil { + return fmt.Errorf( + "Error while deleting: %s", err1) + } + d.SetId("") + return nil + +} + +func resourceIBMKeyExists(d *schema.ResourceData, meta interface{}) (bool, error) { + api, err := meta.(ClientSession).keyProtectAPI() + if err != nil { + return false, err + } + crn := d.Id() + crnData := strings.Split(crn, ":") + + instanceID := crnData[len(crnData)-3] + keyid := crnData[len(crnData)-1] + api.Config.InstanceID = instanceID + // keyid := d.Id() + _, err = api.GetKey(context.Background(), keyid) + if err != nil { + kpError := err.(*kp.Error) + if kpError.StatusCode == 404 { + return false, nil + } + return false, err + } + return true, nil + +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_lb.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_lb.go new file mode 100644 index 00000000000..6760d05996a --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_lb.go @@ -0,0 +1,490 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "strconv" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/filter" + "github.com/softlayer/softlayer-go/helpers/location" + "github.com/softlayer/softlayer-go/helpers/product" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +const ( + LB_LARGE_150000_CONNECTIONS = 150000 + LB_SMALL_15000_CONNECTIONS = 15000 + + LbLocalPackageType = "ADDITIONAL_SERVICES_LOAD_BALANCER" + + lbMask = "id,dedicatedFlag,connectionLimit,ipAddressId,securityCertificateId,highAvailabilityFlag," + + "sslEnabledFlag,sslActiveFlag,loadBalancerHardware[datacenter[name]],ipAddress[ipAddress,subnetId],billingItem[upgradeItems[capacity]]" +) + +func resourceIBMLb() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMLbCreate, + Read: resourceIBMLbRead, + Update: resourceIBMLbUpdate, + Delete: resourceIBMLbDelete, + Exists: resourceIBMLbExists, + Importer: &schema.ResourceImporter{}, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(30 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "connections": { + Type: schema.TypeInt, + Required: true, + Description: "Connections value", + }, + "datacenter": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Datacenter name info", + }, + "ha_enabled": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Default: false, + Description: "true if High availability is enabled", + }, + "security_certificate_id": { + Type: schema.TypeInt, + Optional: true, + Description: "Security certificate ID", + }, + "ip_address": { + Type: schema.TypeString, + Computed: true, + }, + "subnet_id": { + Type: schema.TypeInt, + Computed: true, + }, + "dedicated": { + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + Description: "Boolena value true if Load balncer is dedicated type", + }, + "ssl_enabled": { + Type: schema.TypeBool, + Computed: true, + }, + "ssl_offload": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "boolean value true if SSL offload is enabled", + }, + "tags": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: "Tags associated with resource", + }, + + "hostname": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceIBMLbCreate(d *schema.ResourceData, meta interface{}) error { + + sess := meta.(ClientSession).SoftLayerSession() + + connections := d.Get("connections").(int) + haEnabled := d.Get("ha_enabled").(bool) + dedicated := d.Get("dedicated").(bool) + + var categoryCode string + + // SoftLayer capacities don't match the published capacities as seen in the local lb + // ordering screen in the customer portal. Terraform exposes the published capacities. + // Create a translation map for those cases where the published capacity does not + // equal the actual actual capacity on the product_item. + capacities := map[int]float64{ + 15000: 65000.0, + 150000: 130000.0, + } + + var capacity float64 + if c, ok := capacities[connections]; !ok { + capacity = float64(connections) + } else { + capacity = c + } + + var keyFormatter string + if dedicated { + // Dedicated local LB always comes with SSL support + d.Set("ssl_enabled", true) + categoryCode = product.DedicatedLoadBalancerCategoryCode + if haEnabled { + keyFormatter = "DEDICATED_LOAD_BALANCER_WITH_HIGH_AVAILABILITY_AND_SSL_%d_CONNECTIONS" + } else { + keyFormatter = "LOAD_BALANCER_DEDICATED_WITH_SSL_OFFLOAD_%d_CONNECTIONS" + } + } else { + if d.Get("ha_enabled").(bool) { + return fmt.Errorf("High Availability is not supported for shared local load balancers") + } + categoryCode = product.ProxyLoadBalancerCategoryCode + if _, ok := d.GetOk("security_certificate_id"); ok { + d.Set("ssl_enabled", true) + keyFormatter = "LOAD_BALANCER_%d_VIP_CONNECTIONS_WITH_SSL_OFFLOAD" + } else { + d.Set("ssl_enabled", false) + keyFormatter = "LOAD_BALANCER_%d_VIP_CONNECTIONS" + } + } + + keyName := fmt.Sprintf(keyFormatter, connections) + + pkg, err := product.GetPackageByType(sess, LbLocalPackageType) + if err != nil { + return err + } + + // Get all prices for ADDITIONAL_SERVICE_LOAD_BALANCER with the given capacity + productItems, err := product.GetPackageProducts(sess, *pkg.Id) + if err != nil { + return err + } + + // Select only those product items with a matching keyname + targetItems := []datatypes.Product_Item{} + for _, item := range productItems { + if *item.KeyName == keyName { + targetItems = append(targetItems, item) + } + } + + if len(targetItems) == 0 { + return fmt.Errorf("No product items matching %s could be found", keyName) + } + + //select prices with the required capacity + prices := product.SelectProductPricesByCategory( + targetItems, + map[string]float64{ + categoryCode: capacity, + }, + ) + + // Lookup the datacenter ID + dc, err := location.GetDatacenterByName(sess, d.Get("datacenter").(string)) + + productOrderContainer := datatypes.Container_Product_Order_Network_LoadBalancer{ + Container_Product_Order: datatypes.Container_Product_Order{ + PackageId: pkg.Id, + Location: sl.String(strconv.Itoa(*dc.Id)), + Prices: prices[:1], + Quantity: sl.Int(1), + }, + } + + log.Println("[INFO] Creating load balancer") + + receipt, err := services.GetProductOrderService(sess.SetRetries(0)). + PlaceOrder(&productOrderContainer, sl.Bool(false)) + if err != nil { + return fmt.Errorf("Error during creation of load balancer: %s", err) + } + + loadBalancer, err := findLoadBalancerByOrderId(sess, *receipt.OrderId, dedicated, d) + if err != nil { + return fmt.Errorf("Error during creation of load balancer: %s", err) + } + + d.SetId(fmt.Sprintf("%d", *loadBalancer.Id)) + d.Set("connections", getConnectionLimit(*loadBalancer.ConnectionLimit)) + d.Set("datacenter", loadBalancer.LoadBalancerHardware[0].Datacenter.Name) + d.Set("ip_address", loadBalancer.IpAddress.IpAddress) + d.Set("subnet_id", loadBalancer.IpAddress.SubnetId) + d.Set("ha_enabled", loadBalancer.HighAvailabilityFlag) + + log.Printf("[INFO] Load Balancer ID: %s", d.Id()) + + return resourceIBMLbUpdate(d, meta) +} + +func resourceIBMLbUpdate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + + vipID, _ := strconv.Atoi(d.Id()) + + certID := d.Get("security_certificate_id").(int) + + err := setLocalLBSecurityCert(sess, vipID, certID) + if err != nil { + return fmt.Errorf("Update load balancer failed: %s", err) + } + + if d.HasChange("connections") { + vip, err := services.GetNetworkApplicationDeliveryControllerLoadBalancerVirtualIpAddressService(sess). + Id(vipID). + Mask(lbMask). + GetObject() + if err != nil { + return err + } + ors, nrs := d.GetChange("connections") + oldValue := ors.(int) + newValue := nrs.(int) + + if oldValue > 0 { + if *vip.DedicatedFlag { + return fmt.Errorf("Error Updating load balancer connection limit: Upgrade for dedicated loadbalancer is not supported") + } + if vip.BillingItem.UpgradeItems[0].Capacity != nil { + validUpgradeValue := vip.BillingItem.UpgradeItems[0].Capacity + if newValue == int(*validUpgradeValue) { + _, err := services.GetNetworkApplicationDeliveryControllerLoadBalancerVirtualIpAddressService(sess). + Id(vipID).UpgradeConnectionLimit() + if err != nil { + return fmt.Errorf("Error Updating load balancer connection limit: %s", err) + } + } else { + + return fmt.Errorf("Error Updating load balancer connection limit : Valid value to which connection limit can be upgraded is : %d ", int(*validUpgradeValue)) + + } + + } else { + return fmt.Errorf("Error Updating load balancer connection limit: No upgrade available, already it has maximum connection limit") + } + } + + } + + if d.HasChange("ssl_offload") && !d.IsNewResource() { + + if d.Get("ssl_offload").(bool) { + + _, err := services.GetNetworkApplicationDeliveryControllerLoadBalancerVirtualIpAddressService(sess). + Id(vipID).StartSsl() + if err != nil { + return fmt.Errorf("Error starting ssl acceleration for load balancer : %s", err) + } + + } else { + + _, err := services.GetNetworkApplicationDeliveryControllerLoadBalancerVirtualIpAddressService(sess). + Id(vipID).StopSsl() + if err != nil { + return fmt.Errorf("Error stopping ssl acceleration for load balancer : %s", err) + } + + } + } + + return resourceIBMLbRead(d, meta) +} + +func resourceIBMLbRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + vipID, _ := strconv.Atoi(d.Id()) + + vip, err := services.GetNetworkApplicationDeliveryControllerLoadBalancerVirtualIpAddressService(sess). + Id(vipID). + Mask(lbMask). + GetObject() + + if err != nil { + return fmt.Errorf("Error retrieving load balancer: %s", err) + } + + d.Set("connections", getConnectionLimit(*vip.ConnectionLimit)) + d.Set("datacenter", vip.LoadBalancerHardware[0].Datacenter.Name) + d.Set("ip_address", vip.IpAddress.IpAddress) + d.Set("subnet_id", vip.IpAddress.SubnetId) + d.Set("ha_enabled", vip.HighAvailabilityFlag) + d.Set("dedicated", vip.DedicatedFlag) + d.Set("ssl_enabled", vip.SslEnabledFlag) + d.Set("ssl_offload", vip.SslActiveFlag) + // Optional fields. Guard against nil pointer dereferences + d.Set("security_certificate_id", sl.Get(vip.SecurityCertificateId, nil)) + d.Set("hostname", vip.LoadBalancerHardware[0].Hostname) + return nil +} + +func resourceIBMLbDelete(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + vipService := services.GetNetworkApplicationDeliveryControllerLoadBalancerVirtualIpAddressService(sess) + vipID, _ := strconv.Atoi(d.Id()) + + certID := d.Get("security_certificate_id").(int) + + if certID > 0 { + err := setLocalLBSecurityCert(sess, vipID, 0) + if err != nil { + return fmt.Errorf("Remove certificate before deleting load balancer failed: %s", err) + } + + } + + var billingItem datatypes.Billing_Item_Network_LoadBalancer + var err error + + // Get billing item associated with the load balancer + if d.Get("dedicated").(bool) { + billingItem, err = vipService. + Id(vipID). + GetDedicatedBillingItem() + } else { + billingItem.Billing_Item, err = vipService. + Id(vipID). + GetBillingItem() + } + + if err != nil { + return fmt.Errorf("Error while looking up billing item associated with the load balancer: %s", err) + } + + if billingItem.Id == nil { + return fmt.Errorf("Error while looking up billing item associated with the load balancer: No billing item for ID:%d", vipID) + } + success, err := services.GetBillingItemService(sess).Id(*billingItem.Id).CancelService() + if err != nil { + return err + } + + if !success { + return fmt.Errorf("SoftLayer reported an unsuccessful cancellation") + } + + return nil +} + +func resourceIBMLbExists(d *schema.ResourceData, meta interface{}) (bool, error) { + sess := meta.(ClientSession).SoftLayerSession() + + vipID, _ := strconv.Atoi(d.Id()) + + _, err := services.GetNetworkApplicationDeliveryControllerLoadBalancerVirtualIpAddressService(sess). + Id(vipID). + Mask("id"). + GetObject() + + if err != nil { + if apiErr, ok := err.(sl.Error); ok { + if apiErr.StatusCode == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s", err) + } + return true, nil +} + +/* When requesting 15000 SL creates between 15000 and 150000. When requesting 150000 SL creates >= 150000 */ +func getConnectionLimit(connectionLimit int) int { + if connectionLimit >= LB_LARGE_150000_CONNECTIONS { + return LB_LARGE_150000_CONNECTIONS + } else if connectionLimit >= LB_SMALL_15000_CONNECTIONS && + connectionLimit < LB_LARGE_150000_CONNECTIONS { + return LB_SMALL_15000_CONNECTIONS + } else { + return connectionLimit + } +} + +func findLoadBalancerByOrderId(sess *session.Session, orderId int, dedicated bool, d *schema.ResourceData) (datatypes.Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress, error) { + var filterPath string + if dedicated { + filterPath = "adcLoadBalancers.dedicatedBillingItem.orderItem.order.id" + } else { + filterPath = "adcLoadBalancers.billingItem.orderItem.order.id" + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"pending"}, + Target: []string{"complete"}, + Refresh: func() (interface{}, string, error) { + lbs, err := services.GetAccountService(sess). + Filter(filter.Build( + filter.Path(filterPath). + Eq(strconv.Itoa(orderId)))). + Mask(lbMask). + GetAdcLoadBalancers() + if err != nil { + return datatypes.Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress{}, "", err + } + + if len(lbs) == 1 { + return lbs[0], "complete", nil + } else if len(lbs) == 0 { + return datatypes.Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress{}, "pending", nil + } else { + return nil, "", fmt.Errorf("Expected one load balancer: %s", err) + } + }, + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + NotFoundChecks: 24 * 60, + } + + pendingResult, err := stateConf.WaitForState() + + if err != nil { + return datatypes.Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress{}, err + } + + var result, ok = pendingResult.(datatypes.Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress) + + if ok { + return result, nil + } + + return datatypes.Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress{}, + fmt.Errorf("Cannot find Application Delivery Controller Load Balancer with order id '%d'", orderId) +} + +func setLocalLBSecurityCert(sess *session.Session, vipID int, certID int) error { + var vip struct { + SecurityCertificateId *int `json:"securityCertificateId"` + } + + var success bool + + if certID == 0 { + vip.SecurityCertificateId = nil + } else { + vip.SecurityCertificateId = &certID + } + + // In order to send a null value, need to invoke DoRequest directly with a custom struct + err := sess.DoRequest( + "SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress", + "editObject", + []interface{}{&vip}, + &sl.Options{Id: &vipID}, + &success, + ) + + if !success && err == nil { + return fmt.Errorf("Unable to remove ssl security certificate from load balancer") + } + + return err +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_lb_service.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_lb_service.go new file mode 100644 index 00000000000..35cc8f6d685 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_lb_service.go @@ -0,0 +1,376 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "strconv" + "time" + + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/filter" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +func resourceIBMLbService() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMLbServiceCreate, + Read: resourceIBMLbServiceRead, + Update: resourceIBMLbServiceUpdate, + Delete: resourceIBMLbServiceDelete, + Exists: resourceIBMLbServiceExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "service_group_id": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: "service group ID", + }, + "ip_address_id": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: "IP Address ID", + }, + "port": { + Type: schema.TypeInt, + Required: true, + Description: "Port number", + }, + "enabled": { + Type: schema.TypeBool, + Required: true, + Description: "Boolean value true, if enabled else false", + }, + "health_check_type": { + Type: schema.TypeString, + Required: true, + Description: "health check type", + }, + "weight": { + Type: schema.TypeInt, + Required: true, + Description: "Weight value", + }, + "tags": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: "Tags for the resource", + }, + }, + } +} + +func resourceIBMLbServiceCreate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + + // SoftLayer Local LBs consist of a multi-level hierarchy of types. + // (virtualIpAddress -> []virtualServer -> []serviceGroup -> []service) + + // Using the service group ID provided in the config, find the IDs of the + // respective virtualServer and virtualIpAddress + sgID := d.Get("service_group_id").(int) + serviceGroup, err := services.GetNetworkApplicationDeliveryControllerLoadBalancerServiceGroupService(sess). + Id(sgID). + Mask("id,routingMethodId,routingTypeId,virtualServer[id,allocation,port,virtualIpAddress[id]]"). + GetObject() + + if err != nil { + return fmt.Errorf("Error retrieving load balancer service group from SoftLayer, %s", err) + } + + // Store the IDs for later use + vsID := *serviceGroup.VirtualServer.Id + vipID := *serviceGroup.VirtualServer.VirtualIpAddress.Id + + // Convert the health check type name to an ID + healthCheckTypeId, err := getHealthCheckTypeId(sess, d.Get("health_check_type").(string)) + if err != nil { + return err + } + + // The API only exposes edit capability at the root of the tree (virtualIpAddress), + // so need to send the full structure from the root down to the node to be added or + // modified + vip := datatypes.Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress{ + + VirtualServers: []datatypes.Network_Application_Delivery_Controller_LoadBalancer_VirtualServer{{ + Id: &vsID, + Allocation: serviceGroup.VirtualServer.Allocation, + Port: serviceGroup.VirtualServer.Port, + + ServiceGroups: []datatypes.Network_Application_Delivery_Controller_LoadBalancer_Service_Group{{ + Id: &sgID, + RoutingMethodId: serviceGroup.RoutingMethodId, + RoutingTypeId: serviceGroup.RoutingTypeId, + + Services: []datatypes.Network_Application_Delivery_Controller_LoadBalancer_Service{{ + Enabled: sl.Int(1), + Port: sl.Int(d.Get("port").(int)), + IpAddressId: sl.Int(d.Get("ip_address_id").(int)), + + HealthChecks: []datatypes.Network_Application_Delivery_Controller_LoadBalancer_Health_Check{{ + HealthCheckTypeId: &healthCheckTypeId, + }}, + + GroupReferences: []datatypes.Network_Application_Delivery_Controller_LoadBalancer_Service_Group_CrossReference{{ + Weight: sl.Int(d.Get("weight").(int)), + }}, + }}, + }}, + }}, + } + + log.Println("[INFO] Creating load balancer service") + + err = updateLoadBalancerService(sess.SetRetries(0), vipID, &vip) + + if err != nil { + return fmt.Errorf("Error creating load balancer service: %s", err) + } + + // Retrieve the newly created object, to obtain its ID + svcs, err := services.GetNetworkApplicationDeliveryControllerLoadBalancerServiceGroupService(sess). + Id(sgID). + Mask("mask[id,port,ipAddressId]"). + Filter(filter.New( + filter.Path("services.port").Eq(d.Get("port")), + filter.Path("services.ipAddressId").Eq(d.Get("ip_address_id"))).Build()). + GetServices() + + if err != nil || len(svcs) == 0 { + return fmt.Errorf("Error retrieving load balancer: %s", err) + } + + d.SetId(strconv.Itoa(*svcs[0].Id)) + + log.Printf("[INFO] Load Balancer Service ID: %s", d.Id()) + + return resourceIBMLbServiceRead(d, meta) +} + +func resourceIBMLbServiceUpdate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + + // Using the ID stored in the config, find the IDs of the respective + // serviceGroup, virtualServer and virtualIpAddress + svcID, _ := strconv.Atoi(d.Id()) + svc, err := services.GetNetworkApplicationDeliveryControllerLoadBalancerServiceService(sess). + Id(svcID). + Mask("id,serviceGroup[id,routingTypeId,routingMethodId,virtualServer[id,allocation,port,virtualIpAddress[id]]]"). + GetObject() + + if err != nil { + return fmt.Errorf("Error retrieving load balancer service group from SoftLayer, %s", err) + } + + // Store the IDs for later use + sgID := *svc.ServiceGroup.Id + vsID := *svc.ServiceGroup.VirtualServer.Id + vipID := *svc.ServiceGroup.VirtualServer.VirtualIpAddress.Id + + // Convert the health check type name to an ID + healthCheckTypeId, err := getHealthCheckTypeId(sess, d.Get("health_check_type").(string)) + if err != nil { + return err + } + + // The API only exposes edit capability at the root of the tree (virtualIpAddress), + // so need to send the full structure from the root down to the node to be added or + // modified + vip := datatypes.Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress{ + + VirtualServers: []datatypes.Network_Application_Delivery_Controller_LoadBalancer_VirtualServer{{ + Id: &vsID, + Allocation: svc.ServiceGroup.VirtualServer.Allocation, + Port: svc.ServiceGroup.VirtualServer.Port, + + ServiceGroups: []datatypes.Network_Application_Delivery_Controller_LoadBalancer_Service_Group{{ + Id: &sgID, + RoutingMethodId: svc.ServiceGroup.RoutingMethodId, + RoutingTypeId: svc.ServiceGroup.RoutingTypeId, + + Services: []datatypes.Network_Application_Delivery_Controller_LoadBalancer_Service{{ + Id: &svcID, + Enabled: sl.Int(1), + Port: sl.Int(d.Get("port").(int)), + IpAddressId: sl.Int(d.Get("ip_address_id").(int)), + + HealthChecks: []datatypes.Network_Application_Delivery_Controller_LoadBalancer_Health_Check{{ + HealthCheckTypeId: &healthCheckTypeId, + }}, + + GroupReferences: []datatypes.Network_Application_Delivery_Controller_LoadBalancer_Service_Group_CrossReference{{ + Weight: sl.Int(d.Get("weight").(int)), + }}, + }}, + }}, + }}, + } + + log.Println("[INFO] Updating load balancer service") + + err = updateLoadBalancerService(sess.SetRetries(0), vipID, &vip) + + if err != nil { + return fmt.Errorf("Error updating load balancer service: %s", err) + } + + return resourceIBMLbServiceRead(d, meta) +} + +func resourceIBMLbServiceRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + + svcID, _ := strconv.Atoi(d.Id()) + + svc, err := services.GetNetworkApplicationDeliveryControllerLoadBalancerServiceService(sess). + Id(svcID). + Mask("ipAddressId,enabled,port,healthChecks[type[keyname]],groupReferences[weight],serviceGroup[id]"). + GetObject() + + if err != nil { + return fmt.Errorf("Error retrieving service: %s", err) + } + + d.Set("ip_address_id", svc.IpAddressId) + d.Set("port", svc.Port) + d.Set("health_check_type", svc.HealthChecks[0].Type.Keyname) + d.Set("weight", svc.GroupReferences[0].Weight) + d.Set("enabled", (*svc.Enabled == 1)) + d.Set("service_group_id", svc.ServiceGroup.Id) + + return nil +} + +func resourceIBMLbServiceDelete(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + + svcID, _ := strconv.Atoi(d.Id()) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"pending"}, + Target: []string{"complete"}, + Refresh: func() (interface{}, string, error) { + err := services.GetNetworkApplicationDeliveryControllerLoadBalancerServiceService(sess). + Id(svcID). + DeleteObject() + + if apiErr, ok := err.(sl.Error); ok { + switch { + case apiErr.Exception == "SoftLayer_Exception_Network_Timeout" || + strings.Contains(apiErr.Message, "There was a problem saving your configuration to the load balancer.") || + strings.Contains(apiErr.Message, "The selected group could not be removed from the load balancer.") || + strings.Contains(apiErr.Message, "The resource '480' is already in use."): + // The LB is busy with another transaction. Retry + return false, "pending", nil + case apiErr.StatusCode == 404 || // 404 - service was deleted on the previous attempt + strings.Contains(apiErr.Message, "Unable to find object with id"): // xmlrpc returns 200 instead of 404 + return true, "complete", nil + default: + // Any other error is unexpected. Abort + return false, "", err + } + } + + return true, "complete", nil + }, + Timeout: 10 * time.Minute, + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err := stateConf.WaitForState() + + if err != nil { + return fmt.Errorf("Error deleting service: %s", err) + } + + return nil +} + +func resourceIBMLbServiceExists(d *schema.ResourceData, meta interface{}) (bool, error) { + sess := meta.(ClientSession).SoftLayerSession() + + svcID, _ := strconv.Atoi(d.Id()) + + _, err := services.GetNetworkApplicationDeliveryControllerLoadBalancerServiceService(sess). + Id(svcID). + Mask("id"). + GetObject() + + if err != nil { + if apiErr, ok := err.(sl.Error); ok { + if apiErr.StatusCode == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s", err) + } + return true, nil +} + +func getHealthCheckTypeId(sess *session.Session, healthCheckTypeName string) (int, error) { + healthCheckTypes, err := services.GetNetworkApplicationDeliveryControllerLoadBalancerHealthCheckTypeService(sess). + Mask("id"). + Filter(filter.Build( + filter.Path("keyname").Eq(healthCheckTypeName))). + Limit(1). + GetAllObjects() + + if err != nil { + return -1, err + } + + if len(healthCheckTypes) < 1 { + return -1, fmt.Errorf("Invalid health check type: %s", healthCheckTypeName) + } + + return *healthCheckTypes[0].Id, nil +} + +func updateLoadBalancerService(sess *session.Session, vipID int, vip *datatypes.Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress) error { + stateConf := &resource.StateChangeConf{ + Pending: []string{"pending"}, + Target: []string{"complete"}, + Refresh: func() (interface{}, string, error) { + _, err := services.GetNetworkApplicationDeliveryControllerLoadBalancerVirtualIpAddressService(sess). + Id(vipID). + EditObject(vip) + + if apiErr, ok := err.(sl.Error); ok { + // The LB is busy with another transaction. Retry + if apiErr.Exception == "SoftLayer_Exception_Network_Timeout" || + strings.Contains(apiErr.Message, "There was a problem saving your configuration to the load balancer.") || + strings.Contains(apiErr.Message, "The selected group could not be removed from the load balancer.") || + strings.Contains(apiErr.Message, "The resource '480' is already in use.") { + return false, "pending", nil + } + + // Any other error is unexpected. Abort + return false, "", err + } + + return true, "complete", nil + }, + Timeout: 10 * time.Minute, + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err := stateConf.WaitForState() + + return err +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_lb_service_group.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_lb_service_group.go new file mode 100644 index 00000000000..c800ce11ade --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_lb_service_group.go @@ -0,0 +1,327 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + + "strconv" + + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/filter" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +func resourceIBMLbServiceGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMLbServiceGroupCreate, + Read: resourceIBMLbServiceGroupRead, + Update: resourceIBMLbServiceGroupUpdate, + Delete: resourceIBMLbServiceGroupDelete, + Exists: resourceIBMLbServiceGroupExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "virtual_server_id": { + Type: schema.TypeInt, + Computed: true, + Description: "Virtual server ID", + }, + "service_group_id": { + Type: schema.TypeInt, + Computed: true, + Description: "Service group ID", + }, + "load_balancer_id": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: "Loadbalancer ID", + }, + "allocation": { + Type: schema.TypeInt, + Required: true, + Description: "Allocation type", + }, + "port": { + Type: schema.TypeInt, + Required: true, + Description: "Port number", + }, + "routing_method": { + Type: schema.TypeString, + Required: true, + Description: "Routing method", + }, + "routing_type": { + Type: schema.TypeString, + Required: true, + Description: "Routing type", + }, + "timeout": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validateLBTimeout, + Description: "Timeout value", + }, + "tags": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: "List of tags", + }, + }, + } +} + +func resourceIBMLbServiceGroupCreate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + + vipID := d.Get("load_balancer_id").(int) + + routingMethodID, err := getRoutingMethodId(sess, d.Get("routing_method").(string)) + if err != nil { + return err + } + + routingTypeID, err := getRoutingTypeId(sess, d.Get("routing_type").(string)) + if err != nil { + return err + } + + timeout := d.Get("timeout").(int) + + vip := datatypes.Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress{ + + VirtualServers: []datatypes.Network_Application_Delivery_Controller_LoadBalancer_VirtualServer{{ + Allocation: sl.Int(d.Get("allocation").(int)), + Port: sl.Int(d.Get("port").(int)), + ServiceGroups: []datatypes.Network_Application_Delivery_Controller_LoadBalancer_Service_Group{{ + RoutingMethodId: &routingMethodID, + RoutingTypeId: &routingTypeID, + }}, + }}, + } + + if timeout > 0 { + vip.VirtualServers[0].ServiceGroups[0].Timeout = sl.Int(timeout) + } + + log.Println("[INFO] Creating load balancer service group") + + err = updateLoadBalancerService(sess.SetRetries(0), vipID, &vip) + + if err != nil { + return fmt.Errorf("Error creating load balancer service group: %s", err) + } + + // Retrieve the newly created object, to obtain its ID + vs, err := services.GetNetworkApplicationDeliveryControllerLoadBalancerVirtualIpAddressService(sess). + Id(vipID). + Filter(filter.New(filter.Path("virtualServers.port").Eq(d.Get("port"))).Build()). + Mask("id,serviceGroups[id]"). + GetVirtualServers() + + if err != nil { + return fmt.Errorf("Error retrieving load balancer: %s", err) + } + + d.SetId(strconv.Itoa(*vs[0].Id)) + d.Set("service_group_id", vs[0].ServiceGroups[0].Id) + + log.Printf("[INFO] Load Balancer Service Group ID: %s", d.Id()) + + return resourceIBMLbServiceGroupRead(d, meta) +} +func resourceIBMLbServiceGroupUpdate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + + vipID := d.Get("load_balancer_id").(int) + vsID, _ := strconv.Atoi(d.Id()) + sgID := d.Get("service_group_id").(int) + + routingMethodId, err := getRoutingMethodId(sess, d.Get("routing_method").(string)) + if err != nil { + return err + } + + routingTypeId, err := getRoutingTypeId(sess, d.Get("routing_type").(string)) + if err != nil { + return err + } + + vip := datatypes.Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress{ + + VirtualServers: []datatypes.Network_Application_Delivery_Controller_LoadBalancer_VirtualServer{{ + Id: &vsID, + Allocation: sl.Int(d.Get("allocation").(int)), + Port: sl.Int(d.Get("port").(int)), + + ServiceGroups: []datatypes.Network_Application_Delivery_Controller_LoadBalancer_Service_Group{{ + Id: &sgID, + RoutingMethodId: &routingMethodId, + RoutingTypeId: &routingTypeId, + }}, + }}, + } + + if d.HasChange("timeout") { + timeout := d.Get("timeout").(int) + if timeout > 0 { + vip.VirtualServers[0].ServiceGroups[0].Timeout = sl.Int(timeout) + } + + } + + log.Println("[INFO] Updating load balancer service group") + + err = updateLoadBalancerService(sess.SetRetries(0), vipID, &vip) + + if err != nil { + return fmt.Errorf("Error creating load balancer service group: %s", err) + } + + return resourceIBMLbServiceGroupRead(d, meta) +} + +func resourceIBMLbServiceGroupRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + + vsID, _ := strconv.Atoi(d.Id()) + + vs, err := services.GetNetworkApplicationDeliveryControllerLoadBalancerVirtualServerService(sess). + Id(vsID). + Mask("allocation,port,serviceGroups[id,routingMethod[keyname],routingType[keyname], timeout],virtualIpAddressId"). + GetObject() + + if err != nil { + return fmt.Errorf("Error retrieving load balancer: %s", err) + } + + d.Set("allocation", vs.Allocation) + d.Set("port", vs.Port) + d.Set("routing_method", vs.ServiceGroups[0].RoutingMethod.Keyname) + d.Set("routing_type", vs.ServiceGroups[0].RoutingType.Keyname) + d.Set("load_balancer_id", vs.VirtualIpAddressId) + d.Set("service_group_id", vs.ServiceGroups[0].Id) + d.Set("timeout", vs.ServiceGroups[0].Timeout) + + return nil +} + +func resourceIBMLbServiceGroupDelete(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + + vsID, _ := strconv.Atoi(d.Id()) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"pending"}, + Target: []string{"complete"}, + Refresh: func() (interface{}, string, error) { + err := services.GetNetworkApplicationDeliveryControllerLoadBalancerVirtualServerService(sess). + Id(vsID). + DeleteObject() + + if apiErr, ok := err.(sl.Error); ok { + switch { + case apiErr.Exception == "SoftLayer_Exception_Network_Timeout" || + strings.Contains(apiErr.Message, "There was a problem saving your configuration to the load balancer.") || + strings.Contains(apiErr.Message, "The selected group could not be removed from the load balancer.") || + strings.Contains(apiErr.Message, "An error has occurred while processing your request.") || + strings.Contains(apiErr.Message, "The resource '480' is already in use."): + // The LB is busy with another transaction. Retry + return false, "pending", nil + case apiErr.StatusCode == 404: + // 404 - service was deleted on the previous attempt + return true, "complete", nil + default: + // Any other error is unexpected. Abort + return false, "", err + } + } + + return true, "complete", nil + }, + Timeout: 10 * time.Minute, + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err := stateConf.WaitForState() + + if err != nil { + return fmt.Errorf("Error deleting service: %s", err) + } + + return nil +} + +func resourceIBMLbServiceGroupExists(d *schema.ResourceData, meta interface{}) (bool, error) { + sess := meta.(ClientSession).SoftLayerSession() + + vsID, _ := strconv.Atoi(d.Id()) + + _, err := services.GetNetworkApplicationDeliveryControllerLoadBalancerVirtualServerService(sess). + Id(vsID). + Mask("id"). + GetObject() + + if err != nil { + if apiErr, ok := err.(sl.Error); ok { + if apiErr.StatusCode == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s", err) + } + + return true, nil +} + +func getRoutingTypeId(sess *session.Session, routingTypeName string) (int, error) { + routingTypes, err := services.GetNetworkApplicationDeliveryControllerLoadBalancerRoutingTypeService(sess). + Mask("id"). + Filter(filter.Build( + filter.Path("keyname").Eq(routingTypeName))). + Limit(1). + GetAllObjects() + + if err != nil { + return -1, err + } + + if len(routingTypes) < 1 { + return -1, fmt.Errorf("Invalid routing type: %s", routingTypeName) + } + + return *routingTypes[0].Id, nil +} + +func getRoutingMethodId(sess *session.Session, routingMethodName string) (int, error) { + routingMethods, err := services.GetNetworkApplicationDeliveryControllerLoadBalancerRoutingMethodService(sess). + Mask("id"). + Filter(filter.Build( + filter.Path("keyname").Eq(routingMethodName))). + Limit(1). + GetAllObjects() + + if err != nil { + return -1, err + } + + if len(routingMethods) < 1 { + return -1, fmt.Errorf("Invalid routing method: %s", routingMethodName) + } + + return *routingMethods[0].Id, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_lb_vpx.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_lb_vpx.go new file mode 100644 index 00000000000..b3c2d833e5b --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_lb_vpx.go @@ -0,0 +1,627 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "errors" + "fmt" + "log" + "regexp" + "strconv" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/filter" + "github.com/softlayer/softlayer-go/helpers/location" + "github.com/softlayer/softlayer-go/helpers/product" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +const ( + PACKAGE_ID_APPLICATION_DELIVERY_CONTROLLER = 192 + DELIMITER = "_" +) + +func resourceIBMLbVpx() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMLbVpxCreate, + Read: resourceIBMLbVpxRead, + Update: resourceIBMLbVpxUpdate, + Delete: resourceIBMLbVpxDelete, + Exists: resourceIBMLbVpxExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + Description: "Name", + }, + + "type": { + Type: schema.TypeString, + Computed: true, + Description: "Type of the VPX", + }, + + "datacenter": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Datacenter name", + }, + + "speed": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: "Speed value", + }, + + "version": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "version info", + }, + + "plan": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Plan info", + }, + + "ip_count": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: "IP address count", + }, + + "public_vlan_id": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + Description: "Piblic VLAN id", + }, + + "public_subnet": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + Description: "Public subnet", + }, + + "private_vlan_id": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + Description: "Private VLAN id", + }, + + "private_subnet": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + Description: "Private subnet", + }, + + "vip_pool": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "List of VIP ids", + }, + + "management_ip_address": { + Type: schema.TypeString, + Computed: true, + Description: "management IP address", + }, + + "tags": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: "List of the tags", + }, + }, + } +} + +func getSubnetId(subnet string, meta interface{}) (int, error) { + service := services.GetAccountService(meta.(ClientSession).SoftLayerSession()) + + subnetInfo := strings.Split(subnet, "/") + if len(subnetInfo) != 2 { + return 0, fmt.Errorf( + "Unable to parse the provided subnet: %s", subnet) + } + + networkIdentifier := subnetInfo[0] + cidr := subnetInfo[1] + + subnets, err := service. + Mask("id"). + Filter( + filter.Build( + filter.Path("subnets.cidr").Eq(cidr), + filter.Path("subnets.networkIdentifier").Eq(networkIdentifier), + ), + ). + GetSubnets() + + if err != nil { + return 0, fmt.Errorf("Error looking up Subnet: %s", err) + } + + if len(subnets) < 1 { + return 0, fmt.Errorf( + "Unable to locate a subnet matching the provided subnet: %s", subnet) + } + + return *subnets[0].Id, nil +} + +func getVPXVersion(id int, sess *session.Session) (string, error) { + service := services.GetNetworkApplicationDeliveryControllerService(sess) + getObjectResult, err := service.Id(id).Mask("description").GetObject() + + if err != nil { + return "", fmt.Errorf("Error retrieving VPX version: %s", err) + } + + return strings.Split(*getObjectResult.Description, " ")[3], nil +} + +func getVPXPriceItemKeyName(version string, speed int, plan string) string { + name := "NETSCALER_VPX" + speedMeasurements := "MBPS" + + floatVersion, err := strconv.ParseFloat(version, 10) + if err != nil { + return ("Invalid Version :" + version) + } + + newVersion := strconv.FormatFloat(floatVersion, 'f', -1, 64) + + versionReplaced := strings.Replace(newVersion, ".", DELIMITER, -1) + + speedString := strconv.Itoa(speed) + speedMeasurements + + return strings.Join([]string{name, versionReplaced, speedString, strings.ToUpper(plan)}, DELIMITER) +} + +func getPublicIpItemKeyName(ipCount int) string { + + var name string + + if ipCount == 1 { + name = "STATIC_PUBLIC_IP_ADDRESS" + } else { + name = "STATIC_PUBLIC_IP_ADDRESSES" + } + ipCountString := strconv.Itoa(ipCount) + + return strings.Join([]string{ipCountString, name}, DELIMITER) +} + +func findVPXPriceItems(version string, speed int, plan string, ipCount int, meta interface{}) ([]datatypes.Product_Item_Price, error) { + sess := meta.(ClientSession).SoftLayerSession() + + // Get VPX package type. + productPackage, err := product.GetPackageByType(sess, "ADDITIONAL_SERVICES_APPLICATION_DELIVERY_APPLIANCE") + if err != nil { + return []datatypes.Product_Item_Price{}, err + } + + // Get VPX product items + items, err := product.GetPackageProducts(sess, *productPackage.Id) + if err != nil { + return []datatypes.Product_Item_Price{}, err + } + + // Get VPX and static IP items + nadcKey := getVPXPriceItemKeyName(version, speed, plan) + ipKey := getPublicIpItemKeyName(ipCount) + + var nadcItemPrice, ipItemPrice datatypes.Product_Item_Price + + for _, item := range items { + itemKey := item.KeyName + if strings.Contains(*itemKey, nadcKey) { + nadcItemPrice = item.Prices[0] + } + if *itemKey == ipKey { + ipItemPrice = item.Prices[0] + } + } + + var errorMessages []string + + if nadcItemPrice.Id == nil { + errorMessages = append(errorMessages, "VPX version, speed or plan have incorrect values") + } + + if ipItemPrice.Id == nil { + errorMessages = append(errorMessages, "IP quantity value is incorrect") + } + + if len(errorMessages) > 0 { + err = errors.New(strings.Join(errorMessages, "\n")) + return []datatypes.Product_Item_Price{}, err + } + + return []datatypes.Product_Item_Price{ + { + Id: nadcItemPrice.Id, + }, + { + Id: ipItemPrice.Id, + }, + }, nil +} + +func findVPXByOrderId(orderId int, meta interface{}) (datatypes.Network_Application_Delivery_Controller, error) { + service := services.GetAccountService(meta.(ClientSession).SoftLayerSession()) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"pending"}, + Target: []string{"complete"}, + Refresh: func() (interface{}, string, error) { + vpxs, err := service. + Filter( + filter.Build( + filter.Path("applicationDeliveryControllers.billingItem.orderItem.order.id").Eq(orderId), + ), + ).GetApplicationDeliveryControllers() + if err != nil { + return datatypes.Network_Application_Delivery_Controller{}, "", err + } + + if len(vpxs) == 1 { + return vpxs[0], "complete", nil + } else if len(vpxs) == 0 { + return datatypes.Network_Application_Delivery_Controller{}, "pending", nil + } else { + return nil, "", fmt.Errorf("Expected one VPX: %s", err) + } + }, + Timeout: 45 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + pendingResult, err := stateConf.WaitForState() + + if err != nil { + return datatypes.Network_Application_Delivery_Controller{}, err + } + + var result, ok = pendingResult.(datatypes.Network_Application_Delivery_Controller) + + if ok { + return result, nil + } + + return datatypes.Network_Application_Delivery_Controller{}, + fmt.Errorf("Cannot find Application Delivery Controller with order id '%d'", orderId) +} + +func prepareHardwareOptions(d *schema.ResourceData, meta interface{}) ([]datatypes.Hardware, error) { + hardwareOpts := make([]datatypes.Hardware, 1) + publicVlanId := d.Get("public_vlan_id").(int) + publicSubnet := d.Get("public_subnet").(string) + + if publicVlanId > 0 || len(publicSubnet) > 0 { + hardwareOpts[0].PrimaryNetworkComponent = &datatypes.Network_Component{} + } + + if publicVlanId > 0 { + hardwareOpts[0].PrimaryNetworkComponent.NetworkVlanId = &publicVlanId + } + + if len(publicSubnet) > 0 { + primarySubnetId, err := getSubnetId(publicSubnet, meta) + if err != nil { + return nil, fmt.Errorf("Error creating network application delivery controller: %s", err) + } + hardwareOpts[0].PrimaryNetworkComponent.NetworkVlan = &datatypes.Network_Vlan{ + PrimarySubnetId: &primarySubnetId, + } + } + + privateVlanId := d.Get("private_vlan_id").(int) + privateSubnet := d.Get("private_subnet").(string) + if privateVlanId > 0 || len(privateSubnet) > 0 { + hardwareOpts[0].PrimaryBackendNetworkComponent = &datatypes.Network_Component{} + } + + if privateVlanId > 0 { + hardwareOpts[0].PrimaryBackendNetworkComponent.NetworkVlanId = &privateVlanId + } + + if len(privateSubnet) > 0 { + primarySubnetId, err := getSubnetId(privateSubnet, meta) + if err != nil { + return nil, fmt.Errorf("Error creating network application delivery controller: %s", err) + } + hardwareOpts[0].PrimaryBackendNetworkComponent.NetworkVlan = &datatypes.Network_Vlan{ + PrimarySubnetId: &primarySubnetId, + } + } + return hardwareOpts, nil +} + +func resourceIBMLbVpxCreate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + NADCService := services.GetNetworkApplicationDeliveryControllerService(sess) + productOrderService := services.GetProductOrderService(sess.SetRetries(0)) + var err error + + opts := datatypes.Container_Product_Order{ + PackageId: sl.Int(PACKAGE_ID_APPLICATION_DELIVERY_CONTROLLER), + Quantity: sl.Int(1), + } + + opts.Prices, err = findVPXPriceItems( + d.Get("version").(string), + d.Get("speed").(int), + d.Get("plan").(string), + d.Get("ip_count").(int), + meta) + + if err != nil { + return fmt.Errorf("Error Cannot find Application Delivery Controller prices '%s'.", err) + } + + datacenter := d.Get("datacenter").(string) + + if len(datacenter) > 0 { + datacenter, err := location.GetDatacenterByName(sess, datacenter, "id") + if err != nil { + return fmt.Errorf("Error creating network application delivery controller: %s", err) + } + opts.Location = sl.String(strconv.Itoa(*datacenter.Id)) + } + + opts.Hardware, err = prepareHardwareOptions(d, meta) + if err != nil { + return fmt.Errorf("Error Cannot get hardware options '%s'.", err) + } + + log.Println("[INFO] Creating network application delivery controller") + + receipt, err := productOrderService.PlaceOrder(&opts, sl.Bool(false)) + + if err != nil { + return fmt.Errorf("Error creating network application delivery controller: %s", err) + } + + // Wait VPX provisioning + VPX, err := findVPXByOrderId(*receipt.OrderId, meta) + + if err != nil { + return fmt.Errorf("Error creating network application delivery controller: %s", err) + } + + d.SetId(fmt.Sprintf("%d", *VPX.Id)) + + log.Printf("[INFO] Netscaler VPX ID: %s", d.Id()) + + id, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + // Wait Virtual IP provisioning + IsVipReady := false + + for vipWaitCount := 0; vipWaitCount < 270; vipWaitCount++ { + getObjectResult, err := NADCService.Id(id).Mask("subnets[ipAddresses],password[password]").GetObject() + if err != nil { + return fmt.Errorf("Error retrieving network application delivery controller: %s", err) + } + + ipCount := 0 + if getObjectResult.Password != nil && getObjectResult.Password.Password != nil && len(*getObjectResult.Password.Password) > 0 && + getObjectResult.Subnets != nil && len(getObjectResult.Subnets) > 0 && getObjectResult.Subnets[0].IpAddresses != nil { + ipCount = len(getObjectResult.Subnets[0].IpAddresses) + } + if ipCount > 0 { + IsVipReady = true + break + } + log.Printf("[INFO] Wait 10 seconds for Virtual IP provisioning on Netscaler VPX ID: %d", id) + time.Sleep(time.Second * 10) + } + + if !IsVipReady { + return fmt.Errorf("Failed to create VIPs for Netscaler VPX ID: %d", id) + } + + // Wait while VPX service is initializing. GetLoadBalancers() internally calls REST API of VPX and returns + // an error "Could not connect to host" if the REST API is not available. + IsRESTReady := false + + for restWaitCount := 0; restWaitCount < 270; restWaitCount++ { + _, err := NADCService.Id(id).GetLoadBalancers() + // GetLoadBalancers returns an error "There was a problem processing the reply from the + // application tier. Please contact development." if the VPX version is 10.5. + if err == nil || !strings.Contains(err.Error(), "Could not connect to host") { + IsRESTReady = true + break + } + log.Printf("[INFO] Wait 10 seconds for VPX(%d) REST Service ID", id) + time.Sleep(time.Second * 10) + } + + if !IsRESTReady { + return fmt.Errorf("Failed to intialize VPX REST Service for Netscaler VPX ID: %d", id) + } + + // Wait additional buffer time for VPX service. + time.Sleep(time.Minute) + + return resourceIBMLbVpxRead(d, meta) +} + +func resourceIBMLbVpxRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + + service := services.GetNetworkApplicationDeliveryControllerService(sess) + id, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + getObjectResult, err := service. + Id(id). + Mask("id,name,type[name],datacenter,networkVlans[primaryRouter],networkVlans[primarySubnets],subnets[ipAddresses],description,managementIpAddress"). + GetObject() + + if err != nil { + return fmt.Errorf("Error retrieving network application delivery controller: %s", err) + } + + d.Set("name", *getObjectResult.Name) + d.Set("type", *getObjectResult.Type.Name) + if getObjectResult.Datacenter != nil { + d.Set("datacenter", *getObjectResult.Datacenter.Name) + } + + for _, vlan := range getObjectResult.NetworkVlans { + if vlan.PrimaryRouter != nil && *vlan.PrimaryRouter.Hostname != "" { + isFcr := strings.HasPrefix(*vlan.PrimaryRouter.Hostname, "fcr") + isBcr := strings.HasPrefix(*vlan.PrimaryRouter.Hostname, "bcr") + if isFcr { + d.Set("public_vlan_id", *vlan.Id) + if vlan.PrimarySubnets != nil && len(vlan.PrimarySubnets) > 0 { + ipAddress := *vlan.PrimarySubnets[0].NetworkIdentifier + d.Set( + "public_subnet", + fmt.Sprintf("%s/%d", ipAddress, *vlan.PrimarySubnets[0].Cidr), + ) + } + } + + if isBcr { + d.Set("private_vlan_id", *vlan.Id) + if vlan.PrimarySubnets != nil && len(vlan.PrimarySubnets) > 0 { + ipAddress := *vlan.PrimarySubnets[0].NetworkIdentifier + d.Set( + "private_subnet", + fmt.Sprintf("%s/%d", ipAddress, *vlan.PrimarySubnets[0].Cidr), + ) + } + } + } + } + + vips := make([]string, 0) + ipCount := 0 + for i, subnet := range getObjectResult.Subnets { + for _, ipAddressObj := range subnet.IpAddresses { + vips = append(vips, *ipAddressObj.IpAddress) + if i == 0 { + ipCount++ + } + } + } + + d.Set("vip_pool", vips) + d.Set("ip_count", ipCount) + d.Set("management_ip_address", *getObjectResult.ManagementIpAddress) + + description := *getObjectResult.Description + r, _ := regexp.Compile(" [0-9]+Mbps") + speedStr := r.FindString(description) + r, _ = regexp.Compile("[0-9]+") + speed, err := strconv.Atoi(r.FindString(speedStr)) + if err == nil && speed > 0 { + d.Set("speed", speed) + } + + r, _ = regexp.Compile(" VPX [0-9]+\\.[0-9]+ ") + versionStr := r.FindString(description) + r, _ = regexp.Compile("[0-9]+\\.[0-9]+") + version := r.FindString(versionStr) + if version != "" { + d.Set("version", version) + } + + r, _ = regexp.Compile(" [A-Za-z]+$") + planStr := r.FindString(description) + r, _ = regexp.Compile("[A-Za-z]+$") + plan := r.FindString(planStr) + if plan != "" { + d.Set("plan", plan) + } + + return nil +} + +func resourceIBMLbVpxUpdate(d *schema.ResourceData, meta interface{}) error { + //Only tags are updated and that too locally hence nothing to validate and update in terms of real API at this point + return nil +} + +func resourceIBMLbVpxDelete(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkApplicationDeliveryControllerService(sess) + + id, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + billingItem, err := service.Id(id).GetBillingItem() + if err != nil { + return fmt.Errorf("Error deleting network application delivery controller: %s", err) + } + + if *billingItem.Id > 0 { + billingItemService := services.GetBillingItemService(sess) + deleted, err := billingItemService.Id(*billingItem.Id).CancelService() + if err != nil { + return fmt.Errorf("Error deleting network application delivery controller: %s", err) + } + + if deleted { + return nil + } + } + + return nil +} + +func resourceIBMLbVpxExists(d *schema.ResourceData, meta interface{}) (bool, error) { + service := services.GetNetworkApplicationDeliveryControllerService(meta.(ClientSession).SoftLayerSession()) + + id, err := strconv.Atoi(d.Id()) + if err != nil { + return false, fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + nadc, err := service.Mask("id").Id(id).GetObject() + if err != nil { + if apiErr, ok := err.(sl.Error); ok { + if apiErr.StatusCode == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s", err) + } + return nadc.Id != nil && *nadc.Id == id, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_lb_vpx_ha.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_lb_vpx_ha.go new file mode 100644 index 00000000000..7f6fec96aac --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_lb_vpx_ha.go @@ -0,0 +1,355 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "strconv" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/minsikl/netscaler-nitro-go/client" + dt "github.com/minsikl/netscaler-nitro-go/datatypes" + "github.com/minsikl/netscaler-nitro-go/op" +) + +func resourceIBMLbVpxHa() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMLbVpxHaCreate, + Read: resourceIBMLbVpxHaRead, + Update: resourceIBMLbVpxHaUpdate, + Delete: resourceIBMLbVpxHaDelete, + Exists: resourceIBMLbVpxHaExists, + Importer: &schema.ResourceImporter{}, + Schema: map[string]*schema.Schema{ + + "primary_id": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: "primary ID", + }, + "secondary_id": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: "Secondary ID", + }, + "stay_secondary": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + Description: "Boolean value for stay secondary", + }, + "tags": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: "Tags set for the resource", + }, + }, + } +} + +func configureHA(nClient1 *client.NitroClient, nClient2 *client.NitroClient, staySecondary bool) error { + // 1. VPX2 : Sync password + systemuserReq2 := dt.SystemuserReq{ + Systemuser: &dt.Systemuser{ + Username: op.String("root"), + Password: op.String(nClient1.Password), + }, + } + err := nClient2.Update(&systemuserReq2) + if err != nil { + return err + } + nClient2.Password = nClient1.Password + + // 2. VPX1 : Register hanode + hanodeReq1 := dt.HanodeReq{ + Hanode: &dt.Hanode{ + Id: op.String("2"), + Ipaddress: op.String(nClient2.IpAddress), + }, + } + + err = nClient1.Add(&hanodeReq1) + if err != nil { + return err + } + + // Wait 5 secs to make VPX1 a primary node. + time.Sleep(time.Second * 5) + + // 3. VPX2 : Register hanode + hanodeReq2 := dt.HanodeReq{ + Hanode: &dt.Hanode{ + Id: op.String("2"), + Ipaddress: op.String(nClient1.IpAddress), + }, + } + err = nClient2.Add(&hanodeReq2) + if err != nil { + return err + } + + // 4. VPX2 : Update STAYSECONDARY + stay := dt.HanodeReq{Hanode: &dt.Hanode{}} + if staySecondary { + stay.Hanode.Hastatus = op.String("STAYSECONDARY") + } else { + stay.Hanode.Hastatus = op.String("ENABLE") + } + err = nClient2.Update(&stay) + if err != nil { + return err + } + + // 5. VPX1 : Register rpcnode + nsrpcnode1 := dt.NsrpcnodeReq{ + Nsrpcnode: &dt.Nsrpcnode{ + Ipaddress: op.String(nClient1.IpAddress), + Password: op.String(nClient1.Password), + }, + } + err = nClient1.Update(&nsrpcnode1) + if err != nil { + return err + } + nsrpcnode1.Nsrpcnode.Ipaddress = op.String(nClient2.IpAddress) + err = nClient1.Update(&nsrpcnode1) + if err != nil { + return err + } + + // 6. VPX2 : Register rpcnode + nsrpcnode2 := dt.NsrpcnodeReq{ + Nsrpcnode: &dt.Nsrpcnode{ + Ipaddress: op.String(nClient1.IpAddress), + Password: op.String(nClient1.Password), + }, + } + err = nClient2.Update(&nsrpcnode2) + if err != nil { + return err + } + nsrpcnode2.Nsrpcnode.Ipaddress = op.String(nClient2.IpAddress) + err = nClient2.Update(&nsrpcnode2) + if err != nil { + return err + } + + // 7. VPX1 : Sync files + hafiles := dt.HafilesReq{ + Hafiles: &dt.Hafiles{ + Mode: []string{"all"}, + }, + } + err = nClient1.Add(&hafiles, "action=sync") + if err != nil { + return err + } + + return nil +} + +func deleteHA(nClient1 *client.NitroClient, nClient2 *client.NitroClient) error { + // 1. VPX2 : Delete hanode + err := nClient2.Delete(&dt.HanodeReq{}, "2") + if err != nil { + return err + } + + // 2. VPX1 : Delete hanode + err = nClient1.Delete(&dt.HanodeReq{}, "2") + if err != nil { + return err + } + return nil +} + +func parseHAId(id string) (int, int, error) { + if len(id) < 1 { + return 0, 0, fmt.Errorf("Failed to parse id : Unable to get netscaler Ids") + } + idList := strings.Split(id, ":") + if len(idList) != 2 || len(idList[0]) < 1 || len(idList[1]) < 1 { + return 0, 0, fmt.Errorf("Failed to parse id : Invalid HA ID") + } + primaryId, err := strconv.Atoi(idList[0]) + if err != nil { + return 0, 0, fmt.Errorf("Failed to parse id : Unable to get a primaryId %s", err) + } + secondaryId, err := strconv.Atoi(idList[1]) + if err != nil { + return 0, 0, fmt.Errorf("Failed to parse id : Unable to get a secondaryId %s", err) + } + return primaryId, secondaryId, nil +} + +func resourceIBMLbVpxHaCreate(d *schema.ResourceData, meta interface{}) error { + primaryId := d.Get("primary_id").(int) + secondaryId := d.Get("secondary_id").(int) + staySecondary := false + if stay, ok := d.GetOk("stay_secondary"); ok { + staySecondary = stay.(bool) + } + + nClientPrimary, err := getNitroClient(meta.(ClientSession).SoftLayerSession(), primaryId) + if err != nil { + return fmt.Errorf("Error getting primary netscaler information ID: %d", primaryId) + } + + nClientSecondary, err := getNitroClient(meta.(ClientSession).SoftLayerSession(), secondaryId) + if err != nil { + return fmt.Errorf("Error getting secondary netscaler information ID: %d", secondaryId) + } + + err = configureHA(nClientPrimary, nClientSecondary, staySecondary) + if err != nil { + return fmt.Errorf("Error configuration HA %s", err.Error()) + } + + d.SetId(fmt.Sprintf("%d:%d", primaryId, secondaryId)) + + log.Printf("[INFO] Netscaler HA ID: %s", d.Id()) + + return resourceIBMLbVpxHaRead(d, meta) +} + +func resourceIBMLbVpxHaRead(d *schema.ResourceData, meta interface{}) error { + primaryId, secondaryId, err := parseHAId(d.Id()) + if err != nil { + return fmt.Errorf("Error reading HA %s", err.Error()) + } + + nClientPrimary, err := getNitroClient(meta.(ClientSession).SoftLayerSession(), primaryId) + if err != nil { + return fmt.Errorf("Error getting primary netscaler information ID: %d", primaryId) + } + + nClientSecondary, err := getNitroClient(meta.(ClientSession).SoftLayerSession(), secondaryId) + if err != nil { + return fmt.Errorf("Error getting primary netscaler information ID: %d", primaryId) + } + + nClientSecondary.Password = nClientPrimary.Password + + res := dt.HanodeRes{} + err = nClientSecondary.Get(&res, "") + if err != nil { + fmt.Printf("Error getting hnode information : %s", err.Error()) + } + staySecondary := false + if *res.Hanode[0].Hastatus == "STAYSECONDARY" { + staySecondary = true + } + + d.Set("primary_id", primaryId) + d.Set("secondary_id", secondaryId) + d.Set("stay_secondary", staySecondary) + + return nil +} + +func resourceIBMLbVpxHaUpdate(d *schema.ResourceData, meta interface{}) error { + primaryId, secondaryId, err := parseHAId(d.Id()) + if err != nil { + return fmt.Errorf("Error deleting HA %s", err.Error()) + } + + nClientPrimary, err := getNitroClient(meta.(ClientSession).SoftLayerSession(), primaryId) + if err != nil { + return fmt.Errorf("Error getting primary netscaler information ID: %d", primaryId) + } + + nClientSecondary, err := getNitroClient(meta.(ClientSession).SoftLayerSession(), secondaryId) + if err != nil { + return fmt.Errorf("Error getting secondary netscaler information ID: %d", secondaryId) + } + + nClientSecondary.Password = nClientPrimary.Password + + staySecondary := false + if stay, ok := d.GetOk("stay_secondary"); ok { + staySecondary = stay.(bool) + } + + stay := dt.HanodeReq{Hanode: &dt.Hanode{}} + if staySecondary { + stay.Hanode.Hastatus = op.String("STAYSECONDARY") + } else { + stay.Hanode.Hastatus = op.String("ENABLE") + } + + err = nClientSecondary.Update(&stay) + if err != nil { + return err + } + + return nil +} + +func resourceIBMLbVpxHaDelete(d *schema.ResourceData, meta interface{}) error { + primaryId, secondaryId, err := parseHAId(d.Id()) + if err != nil { + return fmt.Errorf("Error deleting HA %s", err.Error()) + } + nClientPrimary, err := getNitroClient(meta.(ClientSession).SoftLayerSession(), primaryId) + if err != nil { + return fmt.Errorf("Error getting primary netscaler information ID: %d", primaryId) + } + nClientSecondary, err := getNitroClient(meta.(ClientSession).SoftLayerSession(), secondaryId) + if err != nil { + return fmt.Errorf("Error getting secondary netscaler information ID: %d", secondaryId) + } + + secondaryPassword := nClientSecondary.Password + nClientSecondary.Password = nClientPrimary.Password + err = deleteHA(nClientPrimary, nClientSecondary) + if err != nil { + return fmt.Errorf("Error deleting HA %s", err.Error()) + } + + // Restore password of the secondary VPX + systemuserReq := dt.SystemuserReq{ + Systemuser: &dt.Systemuser{ + Username: op.String("root"), + Password: op.String(secondaryPassword), + }, + } + err = nClientSecondary.Update(&systemuserReq) + if err != nil { + return err + } + + return nil +} + +func resourceIBMLbVpxHaExists(d *schema.ResourceData, meta interface{}) (bool, error) { + primaryId, _, err := parseHAId(d.Id()) + if err != nil { + return false, fmt.Errorf("Error reading HA %s", err.Error()) + } + + nClientPrimary, err := getNitroClient(meta.(ClientSession).SoftLayerSession(), primaryId) + if err != nil { + return false, fmt.Errorf("Error getting primary netscaler information ID in Exist: %d", primaryId) + } + + res := dt.HanodeRes{} + err = nClientPrimary.Get(&res, "") + if err != nil { + return false, fmt.Errorf("Error getting hnode information in Exist: %s", err.Error()) + } + + if len(res.Hanode) < 2 { + return false, nil + } + + return true, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_lb_vpx_service.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_lb_vpx_service.go new file mode 100644 index 00000000000..77e638efff2 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_lb_vpx_service.go @@ -0,0 +1,699 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "errors" + "fmt" + "log" + "strconv" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + dt "github.com/minsikl/netscaler-nitro-go/datatypes" + "github.com/minsikl/netscaler-nitro-go/op" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/helpers/network" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +var ( + // Healthcheck mapping tables + healthCheckMapFromSLtoVPX105 = map[string]string{ + "HTTP": "http", + "TCP": "tcp", + "ICMP": "ping", + "icmp": "ping", + "DNS": "dns", + } + + healthCheckMapFromVPX105toSL = map[string]string{ + "http": "HTTP", + "tcp": "TCP", + "ping": "ICMP", + "dns": "DNS", + } +) + +func resourceIBMLbVpxService() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMLbVpxServiceCreate, + Read: resourceIBMLbVpxServiceRead, + Update: resourceIBMLbVpxServiceUpdate, + Delete: resourceIBMLbVpxServiceDelete, + Exists: resourceIBMLbVpxServiceExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + + "vip_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "VIP id", + }, + + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "name", + }, + + "destination_ip_address": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Destination IP Address", + }, + + "destination_port": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: "Destination Port number", + }, + + "weight": { + Type: schema.TypeInt, + Required: true, + Description: "Weight value", + }, + + "connection_limit": { + Type: schema.TypeInt, + Required: true, + Description: "Number of connections limit", + }, + + "health_check": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: func(k, o, n string, d *schema.ResourceData) bool { + if strings.ToUpper(o) == strings.ToUpper(n) { + return true + } + return false + }, + Description: "Health check info", + }, + + "usip": { + Type: schema.TypeString, + Optional: true, + Default: "NO", + Description: "usip info", + }, + + "tags": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: "list of tags associated with the resource", + }, + }, + } +} + +func parseServiceId(id string) (string, int, string, error) { + parts := strings.Split(id, ":") + vipId := parts[1] + nacdId, err := strconv.Atoi(parts[0]) + if err != nil { + return "", -1, "", fmt.Errorf("Error parsing vip id: %s", err) + } + + serviceName := "" + if len(parts) > 2 { + serviceName = parts[2] + } + + return vipId, nacdId, serviceName, nil +} + +func updateVpxService(sess *session.Session, nadcId int, lbVip *datatypes.Network_LoadBalancer_VirtualIpAddress) (bool, error) { + service := services.GetNetworkApplicationDeliveryControllerService(sess) + serviceName := *lbVip.Services[0].Name + successFlag := true + var err error + for count := 0; count < 10; count++ { + successFlag, err = service.Id(nadcId).UpdateLiveLoadBalancer(lbVip) + log.Printf("[INFO] Updating LoadBalancer Service %s successFlag : %t", serviceName, successFlag) + + if err != nil && strings.Contains(err.Error(), "Operation already in progress") { + log.Printf("[INFO] Updating LoadBalancer Service %s Error : %s. Retry in 10 secs", serviceName, err.Error()) + time.Sleep(time.Second * 10) + continue + } + + break + } + return successFlag, err +} + +func resourceIBMLbVpxServiceCreate(d *schema.ResourceData, meta interface{}) error { + vipId := d.Get("vip_id").(string) + _, nadcId, _, err := parseServiceId(vipId) + + if err != nil { + return fmt.Errorf("Error parsing vip id: %s", err) + } + + version, err := getVPXVersion(nadcId, meta.(ClientSession).SoftLayerSession()) + if err != nil { + return fmt.Errorf("Error creating Virtual Ip Address: %s", err) + } + + if version == VPX_VERSION_10_1 { + return resourceIBMLbVpxServiceCreate101(d, meta) + } + + return resourceIBMLbVpxServiceCreate105(d, meta) +} + +func resourceIBMLbVpxServiceRead(d *schema.ResourceData, meta interface{}) error { + _, nadcId, _, err := parseServiceId(d.Id()) + if err != nil { + return fmt.Errorf("Error parsing vip id: %s", err) + } + + version, err := getVPXVersion(nadcId, meta.(ClientSession).SoftLayerSession()) + if err != nil { + return fmt.Errorf("Error Reading Virtual Ip Address: %s", err) + } + + if version == VPX_VERSION_10_1 { + return resourceIBMLbVpxServiceRead101(d, meta) + } + + return resourceIBMLbVpxServiceRead105(d, meta) +} + +func resourceIBMLbVpxServiceUpdate(d *schema.ResourceData, meta interface{}) error { + _, nadcId, _, err := parseServiceId(d.Id()) + if err != nil { + return fmt.Errorf("Error updating Virtual IP Address: %s", err) + } + + version, err := getVPXVersion(nadcId, meta.(ClientSession).SoftLayerSession()) + if err != nil { + return fmt.Errorf("Error updating Virtual Ip Address: %s", err) + } + + if version == VPX_VERSION_10_1 { + return resourceIBMLbVpxServiceUpdate101(d, meta) + } + + return resourceIBMLbVpxServiceUpdate105(d, meta) +} + +func resourceIBMLbVpxServiceDelete(d *schema.ResourceData, meta interface{}) error { + _, nadcId, _, err := parseServiceId(d.Id()) + if err != nil { + return fmt.Errorf("Error deleting Virtual Ip Address: %s", err) + } + + version, err := getVPXVersion(nadcId, meta.(ClientSession).SoftLayerSession()) + if err != nil { + return fmt.Errorf("Error deleting Virtual Ip Address: %s", err) + } + + if version == VPX_VERSION_10_1 { + return resourceIBMLbVpxServiceDelete101(d, meta) + } + + return resourceIBMLbVpxServiceDelete105(d, meta) +} + +func resourceIBMLbVpxServiceExists(d *schema.ResourceData, meta interface{}) (bool, error) { + _, nadcId, _, err := parseServiceId(d.Id()) + if err != nil { + return false, fmt.Errorf("Error in exists: %s", err) + } + + version, err := getVPXVersion(nadcId, meta.(ClientSession).SoftLayerSession()) + if err != nil { + return false, fmt.Errorf("Error in exists: %s", err) + } + + if version == VPX_VERSION_10_1 { + return resourceIBMLbVpxServiceExists101(d, meta) + } + + return resourceIBMLbVpxServiceExists105(d, meta) +} + +func resourceIBMLbVpxServiceCreate101(d *schema.ResourceData, meta interface{}) error { + + sess := meta.(ClientSession).SoftLayerSession() + + vipId := d.Get("vip_id").(string) + vipName, nadcId, _, err := parseServiceId(vipId) + serviceName := d.Get("name").(string) + + if err != nil { + return fmt.Errorf("Error parsing vip id: %s", err) + } + + lb_services := []datatypes.Network_LoadBalancer_Service{ + { + Name: sl.String(d.Get("name").(string)), + DestinationIpAddress: sl.String(d.Get("destination_ip_address").(string)), + DestinationPort: sl.Int(d.Get("destination_port").(int)), + Weight: sl.Int(d.Get("weight").(int)), + HealthCheck: sl.String(d.Get("health_check").(string)), + ConnectionLimit: sl.Int(d.Get("connection_limit").(int)), + }, + } + + lbVip := &datatypes.Network_LoadBalancer_VirtualIpAddress{ + Name: sl.String(vipName), + Services: lb_services, + } + + // Check if there is an existed loadbalancer service which has same name. + log.Printf("[INFO] Creating LoadBalancer Service Name %s validation", serviceName) + + _, err = network.GetNadcLbVipServiceByName(sess, nadcId, vipName, serviceName) + if err == nil { + return fmt.Errorf("Error creating LoadBalancer Service: The service name '%s' is already used.", + serviceName) + } + + log.Printf("[INFO] Creating LoadBalancer Service %s", serviceName) + + successFlag, err := updateVpxService(sess.SetRetries(0), nadcId, lbVip) + + if err != nil { + return fmt.Errorf("Error creating LoadBalancer Service: %s", err) + } + + if !successFlag { + return errors.New("Error creating LoadBalancer Service") + } + + d.SetId(fmt.Sprintf("%s:%s", vipId, serviceName)) + + return resourceIBMLbVpxServiceRead(d, meta) +} + +func resourceIBMLbVpxServiceCreate105(d *schema.ResourceData, meta interface{}) error { + + sess := meta.(ClientSession).SoftLayerSession() + + vipId := d.Get("vip_id").(string) + vipName, nadcId, _, err := parseServiceId(vipId) + serviceName := d.Get("name").(string) + + if err != nil { + return fmt.Errorf("Error parsing vip id: %s", err) + } + + nClient, err := getNitroClient(sess, nadcId) + if err != nil { + return fmt.Errorf("Error getting netscaler information ID: %d", nadcId) + } + + // Create a service + svcReq := dt.ServiceReq{ + Service: &dt.Service{ + Name: op.String(d.Get("name").(string)), + Ip: op.String(d.Get("destination_ip_address").(string)), + Port: op.Int(d.Get("destination_port").(int)), + Maxclient: op.String(strconv.Itoa(d.Get("connection_limit").(int))), + Usip: op.String(d.Get("usip").(string)), + }, + } + + // Get serviceType of a virtual server + vip := dt.LbvserverRes{} + err = nClient.Get(&vip, vipName) + if err != nil { + return fmt.Errorf("Error creating LoadBalancer Service : %s", err) + } + + if vip.Lbvserver[0].ServiceType != nil { + svcReq.Service.ServiceType = vip.Lbvserver[0].ServiceType + } else { + return fmt.Errorf("Error creating LoadBalancer : type of VIP '%s' is null.", vipName) + } + + // SSL offload + if *svcReq.Service.ServiceType == "SSL" { + *svcReq.Service.ServiceType = "HTTP" + } + + log.Printf("[INFO] Creating LoadBalancer Service %s", serviceName) + + // Add the service + err = nClient.Add(&svcReq) + if err != nil { + return fmt.Errorf("Error creating LoadBalancer Service: %s", err) + } + + // Bind the virtual server and the service + lbvserverServiceBindingReq := dt.LbvserverServiceBindingReq{ + LbvserverServiceBinding: &dt.LbvserverServiceBinding{ + Name: op.String(vipName), + ServiceName: op.String(serviceName), + }, + } + + err = nClient.Add(&lbvserverServiceBindingReq) + if err != nil { + return fmt.Errorf("Error creating LoadBalancer Service: %s", err) + } + + // Bind Health_check monitor + healthCheck := d.Get("health_check").(string) + if len(healthCheckMapFromSLtoVPX105[healthCheck]) > 0 { + healthCheck = healthCheckMapFromSLtoVPX105[healthCheck] + } + + serviceLbmonitorBindingReq := dt.ServiceLbmonitorBindingReq{ + ServiceLbmonitorBinding: &dt.ServiceLbmonitorBinding{ + Name: op.String(serviceName), + MonitorName: op.String(healthCheck), + }, + } + + err = nClient.Add(&serviceLbmonitorBindingReq) + if err != nil { + return fmt.Errorf("Error creating LoadBalancer Service: %s", err) + } + + d.SetId(fmt.Sprintf("%s:%s", vipId, serviceName)) + + return resourceIBMLbVpxServiceRead(d, meta) +} + +func resourceIBMLbVpxServiceRead101(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + + vipName, nadcId, serviceName, err := parseServiceId(d.Id()) + if err != nil { + return fmt.Errorf("Error parsing vip id: %s", err) + } + + lbService, err := network.GetNadcLbVipServiceByName(sess, nadcId, vipName, serviceName) + if err != nil { + return fmt.Errorf("Unable to get load balancer service %s: %s", serviceName, err) + } + + d.Set("vip_id", strconv.Itoa(nadcId)+":"+vipName) + d.Set("name", *lbService.Name) + d.Set("destination_ip_address", *lbService.DestinationIpAddress) + d.Set("destination_port", *lbService.DestinationPort) + d.Set("weight", *lbService.Weight) + d.Set("health_check", *lbService.HealthCheck) + d.Set("connection_limit", *lbService.ConnectionLimit) + + return nil +} + +func resourceIBMLbVpxServiceRead105(d *schema.ResourceData, meta interface{}) error { + vipName, nadcId, serviceName, err := parseServiceId(d.Id()) + if err != nil { + return fmt.Errorf("Error parsing vip id: %s", err) + } + + nClient, err := getNitroClient(meta.(ClientSession).SoftLayerSession(), nadcId) + if err != nil { + return fmt.Errorf("Error getting netscaler information ID: %d", nadcId) + } + + // Read a service + + svc := dt.ServiceRes{} + err = nClient.Get(&svc, serviceName) + if err != nil { + fmt.Printf("Error getting service information : %s", err.Error()) + } + d.Set("vip_id", strconv.Itoa(nadcId)+":"+vipName) + d.Set("name", *svc.Service[0].Name) + d.Set("destination_ip_address", *svc.Service[0].Ipaddress) + d.Set("destination_port", *svc.Service[0].Port) + d.Set("usip", *svc.Service[0].Usip) + + maxClientStr, err := strconv.Atoi(*svc.Service[0].Maxclient) + if err == nil { + d.Set("connection_limit", maxClientStr) + } + + // Read a monitor information + healthCheck := dt.ServiceLbmonitorBindingRes{} + err = nClient.Get(&healthCheck, serviceName) + if err != nil { + fmt.Printf("Error getting service information : %s", err.Error()) + } + if healthCheck.ServiceLbmonitorBinding[0].MonitorName != nil { + healthCheck := *healthCheck.ServiceLbmonitorBinding[0].MonitorName + if len(healthCheckMapFromVPX105toSL[healthCheck]) > 0 { + healthCheck = healthCheckMapFromVPX105toSL[healthCheck] + } + d.Set("health_check", healthCheck) + } + + return nil +} + +func resourceIBMLbVpxServiceUpdate101(d *schema.ResourceData, meta interface{}) error { + + sess := meta.(ClientSession).SoftLayerSession() + + vipName, nadcId, serviceName, err := parseServiceId(d.Id()) + if err != nil { + return fmt.Errorf("Error parsing vip id: %s", err) + } + + lbService, err := network.GetNadcLbVipServiceByName(sess, nadcId, vipName, serviceName) + if err != nil { + return fmt.Errorf("Unable to get load balancer service: %s", err) + } + + // copy current service + template := datatypes.Network_LoadBalancer_Service(*lbService) + + if data, ok := d.GetOk("name"); ok { + template.Name = sl.String(data.(string)) + } + if data, ok := d.GetOk("destination_ip_address"); ok { + template.DestinationIpAddress = sl.String(data.(string)) + } + if data, ok := d.GetOk("destination_port"); ok { + template.DestinationPort = sl.Int(data.(int)) + } + if data, ok := d.GetOk("weight"); ok { + template.Weight = sl.Int(data.(int)) + } + if data, ok := d.GetOk("health_check"); ok { + template.HealthCheck = sl.String(data.(string)) + } + if data, ok := d.GetOk("connection_limit"); ok { + template.ConnectionLimit = sl.Int(data.(int)) + } + + lbVip := &datatypes.Network_LoadBalancer_VirtualIpAddress{ + Name: sl.String(vipName), + Services: []datatypes.Network_LoadBalancer_Service{ + template}, + } + + successFlag, err := updateVpxService(sess.SetRetries(0), nadcId, lbVip) + + if err != nil { + return fmt.Errorf("Error updating LoadBalancer Service: %s", err) + } + + if !successFlag { + return errors.New("Error updating LoadBalancer Service") + } + + return nil +} + +func resourceIBMLbVpxServiceUpdate105(d *schema.ResourceData, meta interface{}) error { + _, nadcId, serviceName, err := parseServiceId(d.Id()) + if err != nil { + return fmt.Errorf("Error parsing vip id: %s", err) + } + + nClient, err := getNitroClient(meta.(ClientSession).SoftLayerSession(), nadcId) + if err != nil { + return fmt.Errorf("Error getting netscaler information ID: %d", nadcId) + } + + // Update a service + svcReq := dt.ServiceReq{ + Service: &dt.Service{ + Name: op.String(d.Get("name").(string)), + }, + } + + updateFlag := false + + if d.HasChange("health_check") { + healthCheck := dt.ServiceLbmonitorBindingRes{} + err = nClient.Get(&healthCheck, serviceName) + if err != nil { + fmt.Printf("Error getting service information : %s", err.Error()) + } + monitorName := healthCheck.ServiceLbmonitorBinding[0].MonitorName + if monitorName != nil && *monitorName != "tcp-default" { + // Delete previous health_check + err = nClient.Delete(&dt.ServiceLbmonitorBindingReq{}, serviceName, "args=monitor_name:"+*monitorName) + if err != nil { + return fmt.Errorf("Error deleting monitor %s: %s", *monitorName, err) + } + } + + // Add a new health_check + monitor := d.Get("health_check").(string) + if len(healthCheckMapFromSLtoVPX105[monitor]) > 0 { + monitor = healthCheckMapFromSLtoVPX105[monitor] + } + + serviceLbmonitorBindingReq := dt.ServiceLbmonitorBindingReq{ + ServiceLbmonitorBinding: &dt.ServiceLbmonitorBinding{ + Name: op.String(serviceName), + MonitorName: op.String(monitor), + }, + } + + err = nClient.Add(&serviceLbmonitorBindingReq) + if err != nil { + return fmt.Errorf("Error adding a monitor: %s", err) + } + } + + if d.HasChange("connection_limit") { + svcReq.Service.Maxclient = op.String(strconv.Itoa(d.Get("connection_limit").(int))) + updateFlag = true + } + + if d.HasChange("usip") { + svcReq.Service.Usip = op.String(d.Get("usip").(string)) + updateFlag = true + } + + log.Printf("[INFO] Updating LoadBalancer Service %s", serviceName) + + if updateFlag { + err = nClient.Update(&svcReq) + } + + if err != nil { + return fmt.Errorf("Error updating LoadBalancer Service: %s", err) + } + + return nil +} + +func resourceIBMLbVpxServiceDelete101(d *schema.ResourceData, meta interface{}) error { + vipName, nadcId, serviceName, err := parseServiceId(d.Id()) + if err != nil { + return fmt.Errorf("Error parsing vip id: %s", err) + } + + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkApplicationDeliveryControllerService(sess) + + lbSvc := datatypes.Network_LoadBalancer_Service{ + Name: sl.String(serviceName), + Vip: &datatypes.Network_LoadBalancer_VirtualIpAddress{ + Name: sl.String(vipName), + }, + } + + for count := 0; count < 10; count++ { + err = service.Id(nadcId).DeleteLiveLoadBalancerService(&lbSvc) + log.Printf("[INFO] Deleting Loadbalancer service %s", serviceName) + + if err != nil && + (strings.Contains(err.Error(), "Operation already in progress") || + strings.Contains(err.Error(), "Internal Error")) { + log.Printf("[INFO] Deleting Loadbalancer service Error : %s. Retry in 10 secs", err.Error()) + time.Sleep(time.Second * 10) + continue + } + + if err != nil && + (strings.Contains(err.Error(), "No Service") || + strings.Contains(err.Error(), "Unable to find object with unknown identifier of")) { + log.Printf("[INFO] Deleting Loadbalancer service %s Error : %s ", serviceName, err.Error()) + err = nil + } + + break + } + + if err != nil { + return fmt.Errorf("Error deleting LoadBalancer Service %s: %s", serviceName, err) + } + + return nil +} + +func resourceIBMLbVpxServiceDelete105(d *schema.ResourceData, meta interface{}) error { + _, nadcId, serviceName, err := parseServiceId(d.Id()) + if err != nil { + return fmt.Errorf("Error parsing vip id: %s", err) + } + + nClient, err := getNitroClient(meta.(ClientSession).SoftLayerSession(), nadcId) + if err != nil { + return fmt.Errorf("Error getting netscaler information ID: %d", nadcId) + } + + // Delete a service + err = nClient.Delete(&dt.ServiceReq{}, serviceName) + if err != nil { + return fmt.Errorf("Error deleting service %s: %s", serviceName, err) + } + + return nil +} + +func resourceIBMLbVpxServiceExists101(d *schema.ResourceData, meta interface{}) (bool, error) { + vipName, nadcId, serviceName, err := parseServiceId(d.Id()) + if err != nil { + return false, fmt.Errorf("Error parsing vip id: %s", err) + } + lbService, err := network.GetNadcLbVipServiceByName(meta.(ClientSession).SoftLayerSession(), nadcId, vipName, serviceName) + if err != nil { + if apiErr, ok := err.(sl.Error); ok { + if apiErr.StatusCode == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s", err) + } + return *lbService.Name == serviceName, nil +} + +func resourceIBMLbVpxServiceExists105(d *schema.ResourceData, meta interface{}) (bool, error) { + _, nadcId, serviceName, err := parseServiceId(d.Id()) + if err != nil { + return false, fmt.Errorf("Error parsing vip id: %s", err) + } + + nClient, err := getNitroClient(meta.(ClientSession).SoftLayerSession(), nadcId) + if err != nil { + return false, fmt.Errorf("Error getting netscaler information ID: %d", nadcId) + } + + svc := dt.ServiceRes{} + err = nClient.Get(&svc, serviceName) + if err != nil && strings.Contains(err.Error(), "No Service") { + return false, nil + } else if err != nil { + return false, fmt.Errorf("Unable to get load balancer service %s: %s", serviceName, err) + } + + return *svc.Service[0].Name == serviceName, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_lb_vpx_vip.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_lb_vpx_vip.go new file mode 100644 index 00000000000..abd44aeadc6 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_lb_vpx_vip.go @@ -0,0 +1,808 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "encoding/base64" + "errors" + "fmt" + "log" + "strconv" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/helpers/network" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" + + "github.com/minsikl/netscaler-nitro-go/client" + dt "github.com/minsikl/netscaler-nitro-go/datatypes" + "github.com/minsikl/netscaler-nitro-go/op" +) + +const ( + VPX_VERSION_10_1 = "10.1" +) + +var ( + // Load balancing algorithm mapping tables + + lbMethodMapFromSLtoVPX105 = map[string][2]string{ + "rr": {"NONE", "ROUNDROBIN"}, + "sr": {"NONE", "LEASTRESPONSETIME"}, + "lc": {"NONE", "LEASTCONNECTION"}, + "pi": {"SOURCEIP", "ROUNDROBIN"}, + "pi-sr": {"SOURCEIP", "LEASTRESPONSETIME"}, + "pi-lc": {"SOURCEIP", "LEASTCONNECTION"}, + "ic": {"COOKIEINSERT", "ROUNDROBIN"}, + "ic-sr": {"COOKIEINSERT", "LEASTRESPONSETIME"}, + "ic-lc": {"COOKIEINSERT", "LEASTCONNECTION"}, + } + + lbMethodMapFromVPX105toSL = map[[2]string]string{ + {"NONE", "ROUNDROBIN"}: "rr", + {"NONE", "LEASTRESPONSETIME"}: "sr", + {"NONE", "LEASTCONNECTION"}: "lc", + {"SOURCEIP", "ROUNDROBIN"}: "pi", + {"SOURCEIP", "LEASTRESPONSETIME"}: "pi-sr", + {"SOURCEIP", "LEASTCONNECTION"}: "pi-lc", + {"COOKIEINSERT", "ROUNDROBIN"}: "ic", + {"COOKIEINSERT", "LEASTRESPONSETIME"}: "ic-sr", + {"COOKIEINSERT", "LEASTCONNECTION"}: "ic-lc", + } +) + +func resourceIBMLbVpxVip() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMLbVpxVipCreate, + Read: resourceIBMLbVpxVipRead, + Update: resourceIBMLbVpxVipUpdate, + Delete: resourceIBMLbVpxVipDelete, + Exists: resourceIBMLbVpxVipExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "nad_controller_id": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: "NAD controller ID", + }, + + "load_balancing_method": { + Type: schema.TypeString, + Required: true, + Description: "Load balancing method", + }, + + "persistence": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Persistance value", + }, + + // name field is actually used as an ID in SoftLayer + // http://sldn.softlayer.com/reference/services/SoftLayer_Network_Application_Delivery_Controller/updateLiveLoadBalancer + "name": { + Type: schema.TypeString, + Required: true, + Description: "Name", + ForceNew: true, + }, + + "source_port": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: "Source Port number", + }, + + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Type", + }, + + // security_certificate_id is only acceptable with SSL type + "security_certificate_id": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: "security certificate ID", + }, + + "virtual_ip_address": { + Type: schema.TypeString, + Required: true, + Description: "Virtual IP address", + }, + + "tags": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: "List of tags", + }, + }, + } +} + +func resourceIBMLbVpxVipCreate(d *schema.ResourceData, meta interface{}) error { + version, err := getVPXVersion(d.Get("nad_controller_id").(int), meta.(ClientSession).SoftLayerSession()) + if err != nil { + return fmt.Errorf("Error creating Virtual Ip Address: %s", err) + } + + if version == VPX_VERSION_10_1 { + return resourceIBMLbVpxVipCreate101(d, meta) + } + + return resourceIBMLbVpxVipCreate105(d, meta) +} + +func resourceIBMLbVpxVipRead(d *schema.ResourceData, meta interface{}) error { + nadcId, _, err := parseId(d.Id()) + if err != nil { + return fmt.Errorf("Error Reading Virtual IP Address: %s", err) + } + + version, err := getVPXVersion(nadcId, meta.(ClientSession).SoftLayerSession()) + if err != nil { + return fmt.Errorf("Error Reading Virtual Ip Address: %s", err) + } + + if version == VPX_VERSION_10_1 { + return resourceIBMLbVpxVipRead101(d, meta) + } + + return resourceIBMLbVpxVipRead105(d, meta) +} + +func resourceIBMLbVpxVipUpdate(d *schema.ResourceData, meta interface{}) error { + nadcId, _, err := parseId(d.Id()) + if err != nil { + return fmt.Errorf("Error updating Virtual IP Address: %s", err) + } + + version, err := getVPXVersion(nadcId, meta.(ClientSession).SoftLayerSession()) + if err != nil { + return fmt.Errorf("Error updating Virtual Ip Address: %s", err) + } + + if version == VPX_VERSION_10_1 { + return resourceIBMLbVpxVipUpdate101(d, meta) + } + + return resourceIBMLbVpxVipUpdate105(d, meta) +} + +func resourceIBMLbVpxVipDelete(d *schema.ResourceData, meta interface{}) error { + nadcId, _, err := parseId(d.Id()) + if err != nil { + return fmt.Errorf("Error deleting Virtual Ip Address: %s", err) + } + + version, err := getVPXVersion(nadcId, meta.(ClientSession).SoftLayerSession()) + if err != nil { + return fmt.Errorf("Error deleting Virtual Ip Address: %s", err) + } + + if version == VPX_VERSION_10_1 { + return resourceIBMLbVpxVipDelete101(d, meta) + } + + return resourceIBMLbVpxVipDelete105(d, meta) +} + +func resourceIBMLbVpxVipExists(d *schema.ResourceData, meta interface{}) (bool, error) { + nadcId, _, err := parseId(d.Id()) + if err != nil { + return false, fmt.Errorf("Error in exists: %s", err) + } + + version, err := getVPXVersion(nadcId, meta.(ClientSession).SoftLayerSession()) + if err != nil { + return false, fmt.Errorf("Error in exists: %s", err) + } + + if version == VPX_VERSION_10_1 { + return resourceIBMLbVpxVipExists101(d, meta) + } + + return resourceIBMLbVpxVipExists105(d, meta) +} + +func parseId(id string) (int, string, error) { + if len(id) < 1 { + return 0, "", fmt.Errorf("Failed to parse id %s: Unable to get a VIP ID", id) + } + + idList := strings.Split(id, ":") + if len(idList) != 2 || len(idList[0]) < 1 || len(idList[1]) < 1 { + return 0, "", fmt.Errorf("Failed to parse id %s: Invalid VIP ID", id) + } + + nadcId, err := strconv.Atoi(idList[0]) + if err != nil { + return 0, "", fmt.Errorf("Failed to parse id : Unable to get a VIP ID %s", err) + } + + vipName := idList[1] + return nadcId, vipName, nil +} + +func resourceIBMLbVpxVipCreate101(d *schema.ResourceData, meta interface{}) error { + if _, ok := d.GetOk("security_certificate_id"); ok { + return fmt.Errorf("Error creating Virtual Ip Address: security_certificate_id is not supported with VPX 10.1.") + } + + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkApplicationDeliveryControllerService(sess.SetRetries(0)) + + nadcId := d.Get("nad_controller_id").(int) + vipName := d.Get("name").(string) + + template := datatypes.Network_LoadBalancer_VirtualIpAddress{ + LoadBalancingMethod: sl.String(d.Get("load_balancing_method").(string)), + Name: sl.String(vipName), + SourcePort: sl.Int(d.Get("source_port").(int)), + Type: sl.String(d.Get("type").(string)), + VirtualIpAddress: sl.String(d.Get("virtual_ip_address").(string)), + } + + log.Printf("[INFO] Creating Virtual Ip Address %s", *template.VirtualIpAddress) + + var err error + var successFlag bool + + for count := 0; count < 10; count++ { + successFlag, err = service.Id(nadcId).CreateLiveLoadBalancer(&template) + log.Printf("[INFO] Creating Virtual Ip Address %s successFlag : %t", *template.VirtualIpAddress, successFlag) + + if err != nil && strings.Contains(err.Error(), "already exists") { + log.Printf("[INFO] Creating Virtual Ip Address %s error : %s. Ingore the error.", *template.VirtualIpAddress, err.Error()) + successFlag = true + err = nil + break + } + + if err != nil && strings.Contains(err.Error(), "Operation already in progress") { + log.Printf("[INFO] Creating Virtual Ip Address %s error : %s. Retry in 10 secs", *template.VirtualIpAddress, err.Error()) + time.Sleep(time.Second * 10) + continue + } + + break + } + + if err != nil { + return fmt.Errorf("Error creating Virtual Ip Address: %s", err) + } + + if !successFlag { + return errors.New("Error creating Virtual Ip Address") + } + + d.SetId(fmt.Sprintf("%d:%s", nadcId, vipName)) + + log.Printf("[INFO] Netscaler VPX VIP ID: %s", d.Id()) + + return resourceIBMLbVpxVipRead(d, meta) +} + +func resourceIBMLbVpxVipCreate105(d *schema.ResourceData, meta interface{}) error { + nadcId := d.Get("nad_controller_id").(int) + nClient, err := getNitroClient(meta.(ClientSession).SoftLayerSession(), nadcId) + if err != nil { + return fmt.Errorf("Error getting netscaler information ID: %d", nadcId) + } + + vipName := d.Get("name").(string) + vipType := d.Get("type").(string) + securityCertificateId := d.Get("security_certificate_id").(int) + + lbvserverReq := dt.LbvserverReq{ + Lbvserver: &dt.Lbvserver{ + Name: op.String(vipName), + Ipv46: op.String(d.Get("virtual_ip_address").(string)), + Port: op.Int(d.Get("source_port").(int)), + ServiceType: op.String(vipType), + }, + } + + if len(d.Get("persistence").(string)) > 0 { + lbvserverReq.Lbvserver.Lbmethod = op.String(d.Get("persistence").(string)) + } + lbMethodPair := lbMethodMapFromSLtoVPX105[d.Get("load_balancing_method").(string)] + if len(lbMethodPair[1]) > 0 { + if len(lbMethodPair[0]) > 0 { + lbvserverReq.Lbvserver.Persistencetype = &lbMethodPair[0] + } else { + lbvserverReq.Lbvserver.Persistencetype = op.String("NONE") + } + lbvserverReq.Lbvserver.Lbmethod = &lbMethodPair[1] + } + + log.Printf("[INFO] Creating Virtual Ip Address %s", *lbvserverReq.Lbvserver.Ipv46) + + // security_certificated_id is only available when type is 'SSL' + if securityCertificateId > 0 && vipType != "SSL" { + return fmt.Errorf("Error creating VIP : security_certificated_id is only available when type is 'SSL'") + } else if securityCertificateId == 0 && vipType == "SSL" { + return fmt.Errorf("Error creating VIP : 'SSL' type requires security_certificated_id.") + + } + + // Create a virtual server + err = nClient.Add(&lbvserverReq) + if err != nil { + return err + } + + // Configure security_certificate for SSL Offload. + if vipType == "SSL" { + // Delete the previous security certificate. + deleteSecurityCertificate(nClient, vipName, securityCertificateId) + + err = configureSecurityCertificate(nClient, meta.(ClientSession).SoftLayerSession(), vipName, securityCertificateId) + + if err != nil { + // Rollback VIP creation and return an error. + resourceIBMLbVpxVipDelete105(d, meta) + return err + } + } + + d.SetId(fmt.Sprintf("%d:%s", nadcId, vipName)) + + log.Printf("[INFO] Netscaler VPX VIP ID: %s", d.Id()) + + return resourceIBMLbVpxVipRead(d, meta) +} + +func resourceIBMLbVpxVipRead101(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + + nadcId, vipName, err := parseId(d.Id()) + if err != nil { + return fmt.Errorf("ibm_lb_vpx : %s", err) + } + + vip, err := network.GetNadcLbVipByName(sess, nadcId, vipName) + if err != nil { + return fmt.Errorf("ibm_lb_vpx : while looking up a virtual ip address : %s", err) + } + + d.Set("nad_controller_id", nadcId) + if vip.LoadBalancingMethod != nil { + d.Set("load_balancing_method", *vip.LoadBalancingMethod) + } + + if vip.Name != nil { + d.Set("name", *vip.Name) + } + + if vip.SourcePort != nil { + d.Set("source_port", *vip.SourcePort) + } + + if vip.Type != nil { + d.Set("type", *vip.Type) + } + + if vip.VirtualIpAddress != nil { + d.Set("virtual_ip_address", *vip.VirtualIpAddress) + } + + return nil +} + +func resourceIBMLbVpxVipRead105(d *schema.ResourceData, meta interface{}) error { + nadcId, vipName, err := parseId(d.Id()) + if err != nil { + return fmt.Errorf("ibm_lb_vpx : %s", err) + } + + nClient, err := getNitroClient(meta.(ClientSession).SoftLayerSession(), nadcId) + if err != nil { + return fmt.Errorf("Error getting netscaler information ID: %d", nadcId) + } + + // Read a virtual server + vip := dt.LbvserverRes{} + err = nClient.Get(&vip, vipName) + if err != nil { + fmt.Printf("Error getting VIP information : %s", err.Error()) + } + + d.Set("nad_controller_id", nadcId) + if vip.Lbvserver[0].Lbmethod != nil { + d.Set("load_balancing_method", *vip.Lbvserver[0].Lbmethod) + } + + if vip.Lbvserver[0].Name != nil { + d.Set("name", *vip.Lbvserver[0].Name) + } + + if vip.Lbvserver[0].Port != nil { + d.Set("source_port", *vip.Lbvserver[0].Port) + } + + if vip.Lbvserver[0].ServiceType != nil { + d.Set("type", *vip.Lbvserver[0].ServiceType) + } + + if vip.Lbvserver[0].Persistencetype != nil { + if *vip.Lbvserver[0].Persistencetype == "NONE" { + d.Set("persistence", nil) + } else { + d.Set("persistence", *vip.Lbvserver[0].Persistencetype) + } + } + + lbMethod := lbMethodMapFromVPX105toSL[[2]string{*vip.Lbvserver[0].Persistencetype, *vip.Lbvserver[0].Lbmethod}] + if len(lbMethod) > 0 { + d.Set("load_balancing_method", lbMethod) + } + + if vip.Lbvserver[0].Ipv46 != nil { + d.Set("virtual_ip_address", *vip.Lbvserver[0].Ipv46) + } + + // Read a security certificate information + securityCertificateId, err := getSecurityCertificateId(nClient, vipName) + if err == nil { + d.Set("security_certificate_id", securityCertificateId) + } else { + if _, ok := d.GetOk("security_certificate_id"); ok { + d.Set("security_certificate_id", 0) + } + } + + return nil +} + +func resourceIBMLbVpxVipUpdate101(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkApplicationDeliveryControllerService(sess.SetRetries(0)) + + nadcId := d.Get("nad_controller_id").(int) + template := datatypes.Network_LoadBalancer_VirtualIpAddress{ + Name: sl.String(d.Get("name").(string)), + } + + if d.HasChange("load_balancing_method") { + template.LoadBalancingMethod = sl.String(d.Get("load_balancing_method").(string)) + } + + if d.HasChange("virtual_ip_address") { + template.VirtualIpAddress = sl.String(d.Get("virtual_ip_address").(string)) + } + + var err error + + for count := 0; count < 10; count++ { + var successFlag bool + successFlag, err = service.Id(nadcId).UpdateLiveLoadBalancer(&template) + log.Printf("[INFO] Updating Virtual Ip Address successFlag : %t", successFlag) + + if err != nil && strings.Contains(err.Error(), "Operation already in progress") { + log.Printf("[INFO] Updating Virtual Ip Address error : %s. Retry in 10 secs", err.Error()) + time.Sleep(time.Second * 10) + continue + } + + break + } + + if err != nil { + return fmt.Errorf("Error updating Virtual Ip Address: %s", err) + } + + return resourceIBMLbVpxVipRead(d, meta) +} + +func resourceIBMLbVpxVipUpdate105(d *schema.ResourceData, meta interface{}) error { + nadcId := d.Get("nad_controller_id").(int) + nClient, err := getNitroClient(meta.(ClientSession).SoftLayerSession(), nadcId) + if err != nil { + return fmt.Errorf("Error getting netscaler information ID: %d", nadcId) + } + + lbvserverReq := dt.LbvserverReq{ + Lbvserver: &dt.Lbvserver{ + Name: op.String(d.Get("name").(string)), + }, + } + + if d.HasChange("load_balancing_method") || d.HasChange("persistence") { + lbvserverReq.Lbvserver.Persistencetype = op.String(d.Get("persistence").(string)) + lbvserverReq.Lbvserver.Lbmethod = op.String(d.Get("load_balancing_method").(string)) + + lbMethodPair := lbMethodMapFromSLtoVPX105[d.Get("load_balancing_method").(string)] + if len(lbMethodPair[1]) > 0 { + if len(lbMethodPair[0]) > 0 { + lbvserverReq.Lbvserver.Persistencetype = &lbMethodPair[0] + } else { + lbvserverReq.Lbvserver.Persistencetype = op.String("NONE") + } + lbvserverReq.Lbvserver.Lbmethod = &lbMethodPair[1] + } + } + + if d.HasChange("virtual_ip_address") { + lbvserverReq.Lbvserver.Ipv46 = sl.String(d.Get("virtual_ip_address").(string)) + } + + // Update the virtual server + err = nClient.Update(&lbvserverReq) + if err != nil { + return fmt.Errorf("Error updating Virtual Ip Address: " + err.Error()) + } + + return resourceIBMLbVpxVipRead(d, meta) +} + +func resourceIBMLbVpxVipDelete101(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkApplicationDeliveryControllerService(sess) + + nadcId, vipName, err := parseId(d.Id()) + if err != nil { + return fmt.Errorf("ibm_lb_vpx : %s", err) + } + + for count := 0; count < 10; count++ { + var successFlag bool + successFlag, err = service.Id(nadcId).DeleteLiveLoadBalancer( + &datatypes.Network_LoadBalancer_VirtualIpAddress{Name: sl.String(vipName)}, + ) + log.Printf("[INFO] Deleting Virtual Ip Address %s successFlag : %t", vipName, successFlag) + + if err != nil && + (strings.Contains(err.Error(), "Operation already in progress") || + strings.Contains(err.Error(), "No Service")) { + log.Printf("[INFO] Deleting Virtual Ip Address %s Error : %s Retry in 10 secs", vipName, err.Error()) + time.Sleep(time.Second * 10) + continue + } + + // Check if the resource is already deleted. + if err != nil && strings.Contains(err.Error(), "Unable to find object with unknown identifier of") { + log.Printf("[INFO] Deleting Virtual Ip Address %s Error : %s . Ignore the error.", vipName, err.Error()) + err = nil + } + + break + } + + if err != nil { + return fmt.Errorf("Error deleting Virtual Ip Address %s: %s", vipName, err) + } + + return nil +} + +func resourceIBMLbVpxVipDelete105(d *schema.ResourceData, meta interface{}) error { + nadcId, vipName, err := parseId(d.Id()) + if err != nil { + return fmt.Errorf("ibm_lb_vpx : %s", err) + } + + nClient, err := getNitroClient(meta.(ClientSession).SoftLayerSession(), nadcId) + if err != nil { + return fmt.Errorf("Error deleting Virtual Ip Address %s: %s", vipName, err) + } + + // Delete a virtual server + err = nClient.Delete(&dt.LbvserverReq{}, vipName) + if err != nil { + return fmt.Errorf("Error deleting Virtual Ip Address %s: %s", vipName, err) + } + + // Delete a security certificate + securityCertificateId, err := getSecurityCertificateId(nClient, vipName) + if err == nil { + deleteSecurityCertificate(nClient, vipName, securityCertificateId) + } + + return nil +} + +func resourceIBMLbVpxVipExists101(d *schema.ResourceData, meta interface{}) (bool, error) { + sess := meta.(ClientSession).SoftLayerSession() + + nadcId, vipName, err := parseId(d.Id()) + if err != nil { + return false, fmt.Errorf("ibm_lb_vpx : %s", err) + } + + vip, err := network.GetNadcLbVipByName(sess, nadcId, vipName) + if err != nil { + if apiErr, ok := err.(sl.Error); ok { + if apiErr.StatusCode == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s", err) + } + return vip != nil && *vip.Name == vipName, nil +} + +func resourceIBMLbVpxVipExists105(d *schema.ResourceData, meta interface{}) (bool, error) { + nadcId, vipName, err := parseId(d.Id()) + if err != nil { + return false, fmt.Errorf("ibm_lb_vpx : %s", err) + } + + nClient, err := getNitroClient(meta.(ClientSession).SoftLayerSession(), nadcId) + if err != nil { + return false, err + } + + // Read a virtual server + vip := dt.LbvserverRes{} + err = nClient.Get(&vip, vipName) + + if err != nil && strings.Contains(err.Error(), "No such resource") { + return false, nil + } else if err != nil { + return false, err + } + + return true, nil +} + +func getNitroClient(sess *session.Session, nadcId int) (*client.NitroClient, error) { + service := services.GetNetworkApplicationDeliveryControllerService(sess) + nadc, err := service.Id(nadcId).Mask("managementIpAddress,password[password]").GetObject() + if err != nil { + return nil, fmt.Errorf("Error retrieving netscaler: %s", err) + } + return client.NewNitroClient("http", *nadc.ManagementIpAddress, dt.CONFIG, + "root", *nadc.Password.Password, true), nil +} + +func configureSecurityCertificate(nClient *client.NitroClient, sess *session.Session, vipName string, securityCertificateId int) error { + // Read security_certificate + service := services.GetSecurityCertificateService(sess) + cert, err := service.Id(securityCertificateId).GetObject() + + if err != nil { + return fmt.Errorf("Unable to get Security Certificate: %s", err) + } + + certName := vipName + "_" + strconv.Itoa(securityCertificateId) + certFileName := certName + ".cert" + keyFileName := certName + ".key" + + // Delete previous security certificate + deleteSecurityCertificate(nClient, vipName, securityCertificateId) + + // Upload security_certificate + certReq := dt.SystemfileReq{ + Systemfile: &dt.Systemfile{ + Filename: op.String(certFileName), + Filecontent: op.String(base64.StdEncoding.EncodeToString([]byte(*cert.Certificate))), + Filelocation: op.String("/nsconfig/ssl/"), + Fileencoding: op.String("BASE64"), + }, + } + + err = nClient.Add(&certReq) + if err != nil { + deleteSecurityCertificate(nClient, vipName, securityCertificateId) + return err + } + + keyReq := dt.SystemfileReq{ + Systemfile: &dt.Systemfile{ + Filename: op.String(keyFileName), + Filecontent: op.String(base64.StdEncoding.EncodeToString([]byte(*cert.PrivateKey))), + Filelocation: op.String("/nsconfig/ssl/"), + Fileencoding: op.String("BASE64"), + }, + } + + err = nClient.Add(&keyReq) + if err != nil { + deleteSecurityCertificate(nClient, vipName, securityCertificateId) + return err + } + + // Enable SSL + + sslFeature := dt.NsfeatureReq{ + Nsfeature: &dt.Nsfeature{ + Feature: []string{"ssl"}, + }, + } + + err = nClient.Enable(&sslFeature, true) + if err != nil { + deleteSecurityCertificate(nClient, vipName, securityCertificateId) + return err + } + + // Register SSL + + sslCertKey := dt.SslcertkeyReq{ + Sslcertkey: &dt.Sslcertkey{ + Certkey: op.String(certName), + Cert: op.String(certFileName), + Key: op.String(keyFileName), + }, + } + + err = nClient.Add(&sslCertKey) + if err != nil { + deleteSecurityCertificate(nClient, vipName, securityCertificateId) + return err + } + + // Bind security_certificate + + sslBind := dt.SslvserverSslcertkeyBindingReq{ + SslvserverSslcertkeyBinding: &dt.SslvserverSslcertkeyBinding{ + Vservername: op.String(vipName), + Certkeyname: op.String(certName), + }, + } + + err = nClient.Add(&sslBind) + if err != nil { + deleteSecurityCertificate(nClient, vipName, securityCertificateId) + return err + } + return nil +} + +func deleteSecurityCertificate(nClient *client.NitroClient, vipName string, securityCertificateId int) { + certName := vipName + "_" + strconv.Itoa(securityCertificateId) + certFileName := certName + ".cert" + keyFileName := certName + ".key" + + // Delete sslvserversslcertkeybinding + nClient.Delete(&dt.SslvserverSslcertkeyBindingReq{}, vipName, "args=certkeyname:"+certName) + + // Delete sslcertkey + nClient.Delete(&dt.SslcertkeyReq{}, certName) + + // Delete cert + nClient.Delete(&dt.SystemfileReq{}, certFileName, "args=fileLocation:"+"%2Fnsconfig%2Fssl%2F") + + // Delete key + nClient.Delete(&dt.SystemfileReq{}, keyFileName, "args=fileLocation:"+"%2Fnsconfig%2Fssl%2F") +} + +func getSecurityCertificateId(nClient *client.NitroClient, vipName string) (int, error) { + securityCertificateId := 0 + res := dt.SslcertkeyRes{} + err := nClient.Get(&res, "") + if err != nil { + return 0, fmt.Errorf("Error getting securityCertificateId information : %s", err.Error()) + } + + //CertKey name is consisted of `vipName`_`securityCertificateId`. + for _, sslCertKey := range res.Sslcertkey { + sslCertKeyArr := strings.Split(*sslCertKey.Certkey, "_") + if len(sslCertKeyArr) < 2 || !strings.HasPrefix(*sslCertKey.Certkey, vipName+"_") { + continue + } + + securityCertificateId, err = strconv.Atoi(sslCertKeyArr[len(sslCertKeyArr)-1]) + if err != nil { + continue + } else { + return securityCertificateId, nil + } + } + return 0, fmt.Errorf("Error getting securityCertificateId information : No security certificate for %s", vipName) +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_lbaas.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_lbaas.go new file mode 100644 index 00000000000..7ffd541072f --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_lbaas.go @@ -0,0 +1,633 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "bytes" + "fmt" + "log" + "strings" + "time" + + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/internal/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/filter" + "github.com/softlayer/softlayer-go/helpers/product" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +var packageType = "LOAD_BALANCER_AS_A_SERVICE" +var lbMethodToId = make(map[string]string) +var lbIdToMethod = make(map[string]string) + +const ( + lbActive = "ACTIVE" + lbPending = "CREATE_PENDING" + lbUpdatePening = "UPDATE_PENDING" + lbOnline = "ONLINE" + lbDeletePending = "DELETE_PENDING" + lbDeleted = "DELETED" +) + +const NOT_FOUND = "SoftLayer_Exception_Network_LBaaS_ObjectNotFound" + +const productItemMaskWithPriceLocationGroupID = "id,categories,capacity,description,units,keyName,prices[id,categories[id,name,categoryCode],locationGroupId,capacityRestrictionMaximum,capacityRestrictionMinimum,capacityRestrictionType,bareMetalReservedCapacityFlag],totalPhysicalCoreCapacity,totalProcessorCapacity" + +func init() { + + lbMethodToId = map[string]string{ + "round_robin": "ROUNDROBIN", + "weighted_round_robin": "WEIGHTED_RR", + "least_connection": "LEASTCONNECTION", + } + for k, v := range lbMethodToId { + lbIdToMethod[v] = k + } +} + +func resourceIBMLbaas() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMLbaasCreate, + Read: resourceIBMLbaasRead, + Delete: resourceIBMLbaasDelete, + Exists: resourceIBMLbaasExists, + Update: resourceIBMLbaasUpdate, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The load balancer's name.", + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: "Description of a load balancer.", + }, + "type": { + Type: schema.TypeString, + Optional: true, + Default: "PUBLIC", + ForceNew: true, + Description: "Specifies if a load balancer is public or private", + ValidateFunc: validateAllowedStringValue([]string{"PUBLIC", "PRIVATE"}), + }, + "datacenter": { + Type: schema.TypeString, + Computed: true, + }, + "subnets": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "The subnet where this Load Balancer will be provisioned.", + Elem: &schema.Schema{Type: schema.TypeInt}, + MinItems: 1, + MaxItems: 1, + }, + "status": { + Type: schema.TypeString, + Description: "The operation status 'ONLINE' or 'OFFLINE' of a load balancer.", + Computed: true, + }, + "vip": { + Type: schema.TypeString, + Description: "The virtual ip address of this load balancer", + Computed: true, + }, + "use_system_public_ip_pool": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + DiffSuppressFunc: applyOnce, + Description: `"in public loadbalancer - Public IP address allocation done by system public IP pool or public subnet."`, + }, + "protocols": { + Type: schema.TypeSet, + Description: "Protocols to be assigned to this load balancer.", + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "frontend_protocol": { + Type: schema.TypeString, + Required: true, + Description: "Frontend protocol, one of 'TCP', 'HTTP', 'HTTPS'.", + ValidateFunc: validateAllowedStringValue([]string{"HTTP", "HTTPS", "TCP"}), + }, + "frontend_port": { + Type: schema.TypeInt, + Required: true, + Description: "Frontend Protocol port number. Should be in range (1, 65535)", + ValidateFunc: validatePortRange(1, 65535), + }, + "backend_protocol": { + Type: schema.TypeString, + Required: true, + Description: "Backend protocol, one of 'TCP', 'HTTP', 'HTTPS'.", + ValidateFunc: validateAllowedStringValue([]string{"HTTP", "HTTPS", "TCP"}), + }, + "backend_port": { + Type: schema.TypeInt, + Required: true, + Description: "Backend Protocol port number. Should be in range (1, 65535)", + ValidateFunc: validatePortRange(1, 65535), + }, + "load_balancing_method": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateAllowedStringValue([]string{"round_robin", "weighted_round_robin", "least_connection"}), + Default: "round_robin", + Description: "Load balancing algorithm: 'round_robin', 'weighted_round_robin', 'least_connection'", + }, + "session_stickiness": { + Type: schema.TypeString, + Optional: true, + Description: "Session stickness. Valid values is SOURCE_IP and HTTP_COOKIE", + ValidateFunc: validateAllowedStringValue([]string{"SOURCE_IP", "HTTP_COOKIE"}), + }, + "max_conn": { + Type: schema.TypeInt, + Optional: true, + Description: "No. of connections the listener can accept. Should be between 1-64000", + ValidateFunc: validateMaxConn, + }, + "tls_certificate_id": { + Type: schema.TypeInt, + Optional: true, + Description: "This references to SSL/TLS certificate for a protocol", + }, + "protocol_id": { + Type: schema.TypeString, + Description: "The UUID of a load balancer protocol", + Computed: true, + }, + }, + }, + Set: resourceIBMLBProtocolHash, + }, + "ssl_ciphers": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + //ValidateFunc: validateAllowedStringValue([]string{"ECDHE-RSA-AES256-GCM-SHA384", "ECDHE-RSA-AES256-SHA384", "AES256-GCM-SHA384", "AES256-SHA256", "ECDHE-RSA-AES128-GCM-SHA256", "ECDHE-RSA-AES128-SHA256", "AES128-GCM-SHA256", "AES128-SHA256"}), + }, + "wait_time_minutes": { + Type: schema.TypeInt, + Optional: true, + Default: 90, + }, + "health_monitors": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "protocol": { + Type: schema.TypeString, + Computed: true, + }, + "port": { + Type: schema.TypeInt, + Computed: true, + }, + "interval": { + Type: schema.TypeInt, + Computed: true, + }, + "max_retries": { + Type: schema.TypeInt, + Computed: true, + }, + "timeout": { + Type: schema.TypeInt, + Computed: true, + }, + "url_path": { + Type: schema.TypeString, + Computed: true, + }, + "monitor_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + + ResourceControllerURL: { + Type: schema.TypeString, + Computed: true, + Description: "The URL of the IBM Cloud dashboard that can be used to explore and view details about this instance", + }, + ResourceName: { + Type: schema.TypeString, + Computed: true, + Description: "The name of the resource", + }, + ResourceStatus: { + Type: schema.TypeString, + Computed: true, + Description: "The status of the resource", + }, + }, + } +} + +func resourceIBMLbaasCreate(d *schema.ResourceData, meta interface{}) error { + + sess := meta.(ClientSession).SoftLayerSession() + + // Find price items + productOrderContainer, err := buildLbaasLBProductOrderContainer(d, sess) + if err != nil { + return fmt.Errorf("Error creating Load balancer: %s", err) + } + log.Println("[INFO] Creating Load Balancer") + + //verify order + _, err = services.GetProductOrderService(sess). + VerifyOrder(productOrderContainer) + if err != nil { + return fmt.Errorf("Error during creation of Load balancer: %s", err) + } + //place order + _, err = services.GetProductOrderService(sess.SetRetries(0)). + PlaceOrder(productOrderContainer, sl.Bool(false)) + if err != nil { + return fmt.Errorf("Error during creation of Load balancer: %s", err) + } + + name := d.Get("name").(string) + + lbaasLB, err := findLbaasLBByOrderId(sess, name, d) + if err != nil { + return fmt.Errorf("Error during creation of Load balancer: %s", err) + } + + d.SetId(*lbaasLB.Uuid) + + return resourceIBMLbaasUpdate(d, meta) +} + +func resourceIBMLbaasRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkLBaaSLoadBalancerService(sess) + + result, err := service.Mask("datacenter,members,listeners.defaultPool,listeners.defaultPool.sessionAffinity,listeners.defaultPool.healthMonitor,healthMonitors,sslCiphers[name],useSystemPublicIpPool,isPublic,name,description,operatingStatus,address").GetLoadBalancer(sl.String(d.Id())) + if err != nil { + return fmt.Errorf("Error retrieving load balancer: %s", err) + } + + var lbType string + if *result.IsPublic == 1 { + lbType = "PUBLIC" + } else { + lbType = "PRIVATE" + } + //TODO THis is public subnet and we need to set the private subnet + //subnets := [1]int{*result.IpAddress.SubnetId} + //d.Set("subnets", subnets) + d.Set("name", result.Name) + d.Set("description", result.Description) + d.Set("datacenter", result.Datacenter.Name) + d.Set("type", lbType) + d.Set("status", result.OperatingStatus) + d.Set("vip", result.Address) + d.Set("health_monitors", flattenHealthMonitors(result.Listeners)) + d.Set("protocols", flattenProtocols(result.Listeners)) + d.Set("ssl_ciphers", flattenSSLCiphers(result.SslCiphers)) + if *result.UseSystemPublicIpPool == 1 { + d.Set("use_system_public_ip_pool", true) + } else { + d.Set("use_system_public_ip_pool", false) + } + d.Set(ResourceControllerURL, fmt.Sprintf("https://cloud.ibm.com/classic/network/loadbalancing/cloud/details/%s#Overview", d.Id())) + d.Set(ResourceName, *result.Name) + d.Set(ResourceStatus, *result.OperatingStatus) + + return nil +} + +func resourceIBMLbaasUpdate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkLBaaSLoadBalancerService(sess.SetRetries(0)) + + if d.HasChange("description") { + _, err := service.UpdateLoadBalancer(sl.String(d.Id()), sl.String(d.Get("description").(string))) + if err != nil { + return err + } + } + listenerService := services.GetNetworkLBaaSListenerService(sess.SetRetries(0)) + if d.HasChange("protocols") { + o, n := d.GetChange("protocols") + os := o.(*schema.Set) + ns := n.(*schema.Set) + + add, err := expandProtocols(ns.Difference(os).List()) + if err != nil { + return err + } + rem := os.Difference(ns).List() + removeList := make([]string, len(rem), len(rem)) + for i, remove := range rem { + data := remove.(map[string]interface{}) + if v, ok := data["protocol_id"]; ok && v.(string) != "" { + removeList[i] = v.(string) + } + } + if len(removeList) > 0 { + _, err := listenerService.DeleteLoadBalancerProtocols(sl.String(d.Id()), removeList) + if err != nil { + return fmt.Errorf("Error removing protocols: %#v", err) + } + _, err = waitForLbaasLBAvailable(d, meta) + if err != nil { + return fmt.Errorf( + "Error waiting for load balancer (%s) to become ready: %s", d.Id(), err) + } + } + + if len(add) > 0 { + _, err := listenerService.UpdateLoadBalancerProtocols(sl.String(d.Id()), add) + if err != nil { + return fmt.Errorf("Error adding protocols: %#v", err) + } + _, err = waitForLbaasLBAvailable(d, meta) + if err != nil { + return fmt.Errorf( + "Error waiting for load balancer (%s) to become ready: %s", d.Id(), err) + } + } + + } + if d.HasChange("ssl_ciphers") { + if v, ok := d.GetOk("ssl_ciphers"); ok && v.(*schema.Set).Len() > 0 { + service := services.GetNetworkLBaaSLoadBalancerService(sess.SetRetries(0)) + supportedCiphers, err := services.GetNetworkLBaaSSSLCipherService(sess).Mask("id,name").GetAllObjects() + if err != nil { + return fmt.Errorf("Error retreving list of ssl ciphers: %#v", err) + } + ciphers := make([]int, v.(*schema.Set).Len()) + for i, v := range v.(*schema.Set).List() { + for _, c := range supportedCiphers { + if v == *c.Name { + ciphers[i] = *c.Id + break + } + } + } + _, err = service.UpdateSslCiphers(sl.String(d.Id()), ciphers) + if err != nil { + return fmt.Errorf("Error updating ssl ciphers: %#v", err) + } + _, err = waitForLbaasLBAvailable(d, meta) + if err != nil { + return fmt.Errorf( + "Error waiting for load balancer (%s) to become ready: %s", d.Id(), err) + } + + } + + } + + return resourceIBMLbaasRead(d, meta) +} + +func resourceIBMLbaasDelete(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkLBaaSLoadBalancerService(sess) + + _, err := service.CancelLoadBalancer(sl.String(d.Id())) + if err != nil { + if strings.Contains(err.Error(), "DELETE_PENDING") { + log.Println("Deletion is already in progress, probably from previous runs") + } else { + return fmt.Errorf("Error deleting load balancer: %s", err) + } + } + _, err = waitForLbaasLBDelete(d, meta) + if err != nil { + return fmt.Errorf( + "Error waiting for load balancer (%s) to be deleted: %s", d.Id(), err) + } + d.SetId("") + return nil +} + +func resourceIBMLbaasExists(d *schema.ResourceData, meta interface{}) (bool, error) { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkLBaaSLoadBalancerService(sess) + + result, err := service.GetLoadBalancer(sl.String(d.Id())) + if err != nil { + if apiErr, ok := err.(sl.Error); ok && (apiErr.StatusCode == 404 || apiErr.Exception == NOT_FOUND) { + return false, nil + } + return false, fmt.Errorf("Error retrieving load balancer: %s", err) + } + return result.Uuid != nil && *result.Uuid == d.Id(), nil +} + +func buildLbaasLBProductOrderContainer(d *schema.ResourceData, sess *session.Session) (*datatypes.Container_Product_Order_Network_LoadBalancer_AsAService, error) { + // 1. Get a package + name := d.Get("name").(string) + subnets := d.Get("subnets").([]interface{}) + lbType := d.Get("type").(string) + + subnetsParam := []datatypes.Network_Subnet{} + for _, subnet := range subnets { + subnetItem := datatypes.Network_Subnet{ + Id: sl.Int(subnet.(int)), + } + subnetsParam = append(subnetsParam, subnetItem) + } + + pkg, err := product.GetPackageByType(sess, packageType) + if err != nil { + return nil, err + } + + // 2. Get all prices for the package + productItems, err := product.GetPackageProducts(sess, *pkg.Id, productItemMaskWithPriceLocationGroupID) + if err != nil { + return &datatypes.Container_Product_Order_Network_LoadBalancer_AsAService{}, err + } + + priceItems := []datatypes.Product_Item_Price{} + for _, item := range productItems { + for _, price := range item.Prices { + if price.LocationGroupId == nil && !*price.BareMetalReservedCapacityFlag { + priceItem := datatypes.Product_Item_Price{ + Id: price.Id, + } + priceItems = append(priceItems, priceItem) + break + } + } + } + + productOrderContainer := datatypes.Container_Product_Order_Network_LoadBalancer_AsAService{ + Container_Product_Order: datatypes.Container_Product_Order{ + PackageId: pkg.Id, + Prices: priceItems, + Quantity: sl.Int(1), + UseHourlyPricing: sl.Bool(true), + }, + Name: sl.String(name), + Subnets: subnetsParam, + } + if d, ok := d.GetOk("description"); ok { + productOrderContainer.Description = sl.String(d.(string)) + } + + if lbType == "PRIVATE" { + productOrderContainer.IsPublic = sl.Bool(false) + } + if publicIPPool, ok := d.GetOkExists("use_system_public_ip_pool"); ok { + productOrderContainer.UseSystemPublicIpPool = sl.Bool(publicIPPool.(bool)) + } + + return &productOrderContainer, nil +} + +func findLbaasLBByOrderId(sess *session.Session, name string, d *schema.ResourceData) (*datatypes.Network_LBaaS_LoadBalancer, error) { + + isIDSet := false + stateConf := &resource.StateChangeConf{ + Pending: []string{lbPending}, + Target: []string{lbActive}, + Refresh: func() (interface{}, string, error) { + /*lb, err := services.GetAccountService(sess). + Filter(filter.Path("loadbalancer.billingItem.orderItem.order.id"). + Eq(strconv.Itoa(orderId)).Build()). + Mask("id,activeTransaction"). + GetLoadBalancer()*/ + //TODO This is a temporary workaround to find lbass obj by name.Get the lbass obj from order id + lb, err := services.GetNetworkLBaaSLoadBalancerService(sess).Filter(filter.Build( + filter.Path("name").Eq(name))).GetAllObjects() + if err != nil { + return nil, "", err + } + if len(lb) == 1 { + if *lb[0].ProvisioningStatus == lbActive && *lb[0].OperatingStatus == lbOnline { + return lb[0], lbActive, nil + } + if !isIDSet && lb[0].Uuid != nil { + d.SetId(*lb[0].Uuid) + isIDSet = true + } + return lb[0], lbPending, nil + } + return nil, lbPending, nil + }, + Timeout: time.Duration(d.Get("wait_time_minutes").(int)) * time.Minute, + Delay: 60 * time.Second, + MinTimeout: 3 * time.Second, + PollInterval: 60 * time.Second, + NotFoundChecks: 40, + } + + pendingResult, err := stateConf.WaitForState() + + if err != nil { + return nil, err + } + + if result, ok := pendingResult.(datatypes.Network_LBaaS_LoadBalancer); ok { + return &result, nil + } + + return nil, + fmt.Errorf("Cannot find a load balancer with name '%s' ", name) +} + +func waitForLbaasLBAvailable(d *schema.ResourceData, meta interface{}) (interface{}, error) { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkLBaaSLoadBalancerService(sess) + + stateConf := &resource.StateChangeConf{ + Pending: []string{lbUpdatePening}, + Target: []string{lbActive}, + Refresh: func() (interface{}, string, error) { + lb, err := service.GetLoadBalancer(sl.String(d.Id())) + if err != nil { + if apiErr, ok := err.(sl.Error); ok && (apiErr.StatusCode == 404 || apiErr.Exception == NOT_FOUND) { + return nil, "", fmt.Errorf("The load balancer %s does not exist anymore: %v", d.Id(), err) + } + return nil, "", err + } + if *lb.ProvisioningStatus == lbActive && *lb.OperatingStatus == lbOnline { + return lb, lbActive, nil + } + return lb, lbUpdatePening, nil + }, + Timeout: time.Duration(d.Get("wait_time_minutes").(int)) * time.Minute, + Delay: 60 * time.Second, + MinTimeout: 3 * time.Second, + PollInterval: 60 * time.Second, + NotFoundChecks: 40, + } + + return stateConf.WaitForState() +} + +func waitForLbaasLBDelete(d *schema.ResourceData, meta interface{}) (interface{}, error) { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkLBaaSLoadBalancerService(sess) + + stateConf := &resource.StateChangeConf{ + Pending: []string{lbDeletePending}, + Target: []string{lbDeleted}, + Refresh: func() (interface{}, string, error) { + lb, err := service.GetLoadBalancer(sl.String(d.Id())) + if err != nil { + if apiErr, ok := err.(sl.Error); ok && (apiErr.StatusCode == 404 || apiErr.Exception == NOT_FOUND) { + return lb, lbDeleted, nil + } + return datatypes.Network_LBaaS_LoadBalancer{}, "", err + } + return lb, lbDeletePending, nil + }, + Timeout: time.Duration(d.Get("wait_time_minutes").(int)) * time.Minute, + Delay: 60 * time.Second, + MinTimeout: 10 * time.Second, + PollInterval: 60 * time.Second, + } + + return stateConf.WaitForState() +} + +func resourceIBMLBProtocolHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", + m["frontend_protocol"].(string))) + buf.WriteString(fmt.Sprintf("%d-", m["frontend_port"].(int))) + buf.WriteString(fmt.Sprintf("%s-", + m["backend_protocol"].(string))) + buf.WriteString(fmt.Sprintf("%d-", m["backend_port"].(int))) + + if v, ok := m["tls_certificate_id"]; ok && v.(int) != 0 { + buf.WriteString(fmt.Sprintf("%d-", v.(int))) + } + + return hashcode.String(buf.String()) +} + +func resourceIBMLBMemberHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", + m["private_ip_address"].(string))) + + return hashcode.String(buf.String()) +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_lbaas_health_monitor.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_lbaas_health_monitor.go new file mode 100644 index 00000000000..39b7a65317b --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_lbaas_health_monitor.go @@ -0,0 +1,197 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/sl" +) + +func resourceIBMLbaasHealthMonitor() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMLbaasHealthMonitorCreate, + Read: resourceIBMLbaasHealthMonitorRead, + Delete: resourceIBMLbaasHealthMonitorDelete, + Update: resourceIBMLbaasHealthMonitorUpdate, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + + "protocol": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateAllowedStringValue([]string{"HTTP", "HTTPS", "TCP"}), + Description: "Protocol value", + }, + "port": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validatePortRange(1, 65535), + Description: "Port number", + }, + "interval": { + Type: schema.TypeInt, + Optional: true, + Default: 5, + ValidateFunc: validateInterval, + Description: "Interval value", + }, + "max_retries": { + Type: schema.TypeInt, + Optional: true, + Default: 2, + ValidateFunc: validateMaxRetries, + Description: "Maximum retry counts", + }, + "timeout": { + Type: schema.TypeInt, + Optional: true, + Default: 2, + ValidateFunc: validateTimeout, + Description: "Timeout in seconds", + }, + "url_path": { + Type: schema.TypeString, + Optional: true, + Default: "/", + ValidateFunc: validateURLPath, + Description: "URL Path", + }, + "monitor_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Monitor ID", + }, + "lbaas_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "LBAAS id", + }, + }, + } +} + +func resourceIBMLbaasHealthMonitorCreate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + healthMonitorService := services.GetNetworkLBaaSHealthMonitorService(sess.SetRetries(0)) + + lbaasID := d.Get("lbaas_id").(string) + healthMonitors := make([]datatypes.Network_LBaaS_LoadBalancerHealthMonitorConfiguration, 0, 1) + healthMonitor := datatypes.Network_LBaaS_LoadBalancerHealthMonitorConfiguration{ + BackendPort: sl.Int(d.Get("port").(int)), + BackendProtocol: sl.String(d.Get("protocol").(string)), + HealthMonitorUuid: sl.String(d.Get("monitor_id").(string)), + Interval: sl.Int(d.Get("interval").(int)), + Timeout: sl.Int(d.Get("timeout").(int)), + MaxRetries: sl.Int(d.Get("max_retries").(int)), + UrlPath: sl.String(d.Get("url_path").(string)), + } + + healthMonitors = append(healthMonitors, healthMonitor) + + _, err := waitForLbaasLBActive(d, meta) + if err != nil { + return fmt.Errorf( + "Error waiting for load balancer (%s) to become ready: %s", d.Id(), err) + } + + _, err = healthMonitorService.UpdateLoadBalancerHealthMonitors(sl.String(lbaasID), healthMonitors) + if err != nil { + return fmt.Errorf("Error adding health monitors: %#v", err) + } + _, err = waitForLbaasLBActive(d, meta) + if err != nil { + return fmt.Errorf( + "Error waiting for load balancer (%s) to become ready: %s", d.Id(), err) + } + d.SetId(fmt.Sprintf("%s/%s", lbaasID, d.Get("monitor_id").(string))) + return resourceIBMLbaasHealthMonitorRead(d, meta) +} + +func resourceIBMLbaasHealthMonitorRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkLBaaSLoadBalancerService(sess) + parts, err := idParts(d.Id()) + if err != nil { + return err + } + lbaasID := parts[0] + monitorID := parts[1] + + result, err := service.Mask("listeners.defaultPool.healthMonitor").GetLoadBalancer(sl.String(lbaasID)) + if err != nil { + return fmt.Errorf("Error retrieving load balancer: %s", err) + } + for _, i := range result.Listeners { + if monitorID == *i.DefaultPool.HealthMonitor.Uuid { + d.Set("protocol", *i.Protocol) + d.Set("port", *i.DefaultPool.ProtocolPort) + d.Set("interval", *i.DefaultPool.HealthMonitor.Interval) + d.Set("max_retries", *i.DefaultPool.HealthMonitor.MaxRetries) + d.Set("timeout", *i.DefaultPool.HealthMonitor.Timeout) + if i.DefaultPool.HealthMonitor.UrlPath != nil && *i.Protocol == "HTTP" { + d.Set("url_path", *i.DefaultPool.HealthMonitor.UrlPath) + } + + break + } + } + + return nil +} + +func resourceIBMLbaasHealthMonitorUpdate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + healthMonitorService := services.GetNetworkLBaaSHealthMonitorService(sess.SetRetries(0)) + parts, err := idParts(d.Id()) + if err != nil { + return err + } + lbaasID := parts[0] + monitorID := parts[1] + + if d.HasChange("interval") || d.HasChange("timeout") || d.HasChange("max_retries") || d.HasChange("url_path") { + healthMonitors := make([]datatypes.Network_LBaaS_LoadBalancerHealthMonitorConfiguration, 0, 1) + healthMonitor := datatypes.Network_LBaaS_LoadBalancerHealthMonitorConfiguration{ + BackendPort: sl.Int(d.Get("port").(int)), + BackendProtocol: sl.String(d.Get("protocol").(string)), + HealthMonitorUuid: sl.String(monitorID), + Interval: sl.Int(d.Get("interval").(int)), + Timeout: sl.Int(d.Get("timeout").(int)), + MaxRetries: sl.Int(d.Get("max_retries").(int)), + UrlPath: sl.String(d.Get("url_path").(string)), + } + + healthMonitors = append(healthMonitors, healthMonitor) + + _, err = waitForLbaasLBActive(d, meta) + if err != nil { + return fmt.Errorf( + "Error waiting for load balancer (%s) to become ready: %s", d.Id(), err) + } + + _, err := healthMonitorService.UpdateLoadBalancerHealthMonitors(sl.String(lbaasID), healthMonitors) + if err != nil { + return fmt.Errorf("Error adding health monitors: %#v", err) + } + _, err = waitForLbaasLBActive(d, meta) + if err != nil { + return fmt.Errorf( + "Error waiting for load balancer (%s) to become ready: %s", d.Id(), err) + } + } + return resourceIBMLbaasHealthMonitorRead(d, meta) +} + +func resourceIBMLbaasHealthMonitorDelete(d *schema.ResourceData, meta interface{}) error { + fmt.Println("Health monitor is destroyed only when the corresponding protocol is removed") + d.SetId("") + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_lbaas_server_instance_attachment.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_lbaas_server_instance_attachment.go new file mode 100644 index 00000000000..02f9312b06c --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_lbaas_server_instance_attachment.go @@ -0,0 +1,213 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "strconv" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/sl" +) + +func resourceIBMLbaasServerInstanceAttachment() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMLbaasServerInstanceAttachmentCreate, + Read: resourceIBMLbaasServerInstanceAttachmentRead, + Delete: resourceIBMLbaasServerInstanceAttachmentDelete, + Exists: resourceIBMLbaasServerInstanceAttachmentExists, + Update: resourceIBMLbaasServerInstanceAttachmentUpdate, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "private_ip_address": { + Type: schema.TypeString, + Description: "The Private IP address of a load balancer member.", + Required: true, + ForceNew: true, + ValidateFunc: validateIP, + }, + "weight": { + Type: schema.TypeInt, + Description: "The weight of a load balancer member.", + Computed: true, + Optional: true, + ValidateFunc: validateWeight, + }, + "lbaas_id": { + Type: schema.TypeString, + Description: "The UUID of a load balancer", + ForceNew: true, + Required: true, + }, + "uuid": { + Type: schema.TypeString, + Description: "The UUID of a load balancer member", + Computed: true, + }, + }, + } +} + +func resourceIBMLbaasServerInstanceAttachmentCreate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkLBaaSLoadBalancerService(sess) + memberService := services.GetNetworkLBaaSMemberService(sess) + privateIPAddress := d.Get("private_ip_address").(string) + weight := d.Get("weight").(int) + lbaasId := d.Get("lbaas_id").(string) + p := &datatypes.Network_LBaaS_LoadBalancerServerInstanceInfo{} + p.PrivateIpAddress = sl.String(privateIPAddress) + p.Weight = sl.Int(weight) + members := make([]datatypes.Network_LBaaS_LoadBalancerServerInstanceInfo, 0, 1) + members = append(members, *p) + _, err := waitForLbaasLBActive(d, meta) + if err != nil { + return fmt.Errorf( + "Error checking for load balancer (%s) is active: %s", d.Id(), err) + } + _, err = memberService.AddLoadBalancerMembers(sl.String(lbaasId), members) + if err != nil { + return fmt.Errorf("Error adding server instances: %#v", err) + } + _, err = waitForLbaasLBActive(d, meta) + if err != nil { + return fmt.Errorf( + "Error waiting for load balancer (%s) to become ready: %s", d.Id(), err) + } + result, err := service.Mask("members").GetLoadBalancer(sl.String(lbaasId)) + lbaasMembers := result.Members + + for _, member := range lbaasMembers { + if *member.Address == privateIPAddress { + d.SetId(strconv.Itoa(*member.Id)) + } + } + + return resourceIBMLbaasServerInstanceAttachmentRead(d, meta) +} + +func resourceIBMLbaasServerInstanceAttachmentRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + memberService := services.GetNetworkLBaaSMemberService(sess) + id := d.Id() + memId, _ := strconv.Atoi(d.Id()) + member, err := memberService.Id(memId).GetObject() + if err != nil { + return fmt.Errorf( + "Error retrieving load balancer member(%s) : %s", id, err) + } + d.Set("private_ip_address", member.Address) + d.Set("weight", member.Weight) + d.Set("uuid", member.Uuid) + + return nil +} + +func resourceIBMLbaasServerInstanceAttachmentUpdate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + memberService := services.GetNetworkLBaaSMemberService(sess) + if d.HasChange("weight") { + weight := d.Get("weight").(int) + lbaasId := d.Get("lbaas_id").(string) + uuid := d.Get("uuid").(string) + privateIpAddress := d.Get("private_ip_address").(string) + + updateParam := &datatypes.Network_LBaaS_Member{} + updateParam.Weight = sl.Int(weight) + updateParam.Uuid = sl.String(uuid) + updateParam.Address = sl.String(privateIpAddress) + members := make([]datatypes.Network_LBaaS_Member, 0, 1) + members = append(members, *updateParam) + _, err := waitForLbaasLBActive(d, meta) + if err != nil { + return fmt.Errorf( + "Error checking for load balancer (%s) is active: %s", d.Id(), err) + } + _, err = memberService.UpdateLoadBalancerMembers(sl.String(lbaasId), members) + if err != nil { + return fmt.Errorf("Error updating loadbalnacer: %#v", err) + } + _, err = waitForLbaasLBActive(d, meta) + if err != nil { + return fmt.Errorf( + "Error waiting for load balancer (%s) to become ready: %s", lbaasId, err) + } + + } + + return resourceIBMLbaasServerInstanceAttachmentRead(d, meta) +} + +func resourceIBMLbaasServerInstanceAttachmentExists(d *schema.ResourceData, meta interface{}) (bool, error) { + sess := meta.(ClientSession).SoftLayerSession() + memberService := services.GetNetworkLBaaSMemberService(sess) + memId, _ := strconv.Atoi(d.Id()) + result, err := memberService.Id(memId).GetObject() + if err != nil { + if apiErr, ok := err.(sl.Error); ok && (apiErr.StatusCode == 404 || apiErr.Exception == NOT_FOUND) { + return false, nil + } + return false, fmt.Errorf("Error retrieving load balancer member: %s", err) + } + return result.Id != nil && *result.Id == memId, nil +} + +func resourceIBMLbaasServerInstanceAttachmentDelete(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + memberService := services.GetNetworkLBaaSMemberService(sess) + lbaasId := d.Get("lbaas_id").(string) + removeList := make([]string, 0, 1) + removeList = append(removeList, d.Get("uuid").(string)) + _, err := waitForLbaasLBActive(d, meta) + if err != nil { + return fmt.Errorf( + "Error checking for load balancer (%s) is active: %s", d.Id(), err) + } + _, err = memberService.DeleteLoadBalancerMembers(sl.String(lbaasId), removeList) + if err != nil { + return fmt.Errorf("Error removing server instances: %#v", err) + } + _, err = waitForLbaasLBActive(d, meta) + if err != nil { + return fmt.Errorf( + "Error waiting for load balancer (%s) to become ready: %s", d.Id(), err) + } + return nil +} + +func waitForLbaasLBActive(d *schema.ResourceData, meta interface{}) (interface{}, error) { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkLBaaSLoadBalancerService(sess) + lbaasId := d.Get("lbaas_id").(string) + + stateConf := &resource.StateChangeConf{ + Pending: []string{lbUpdatePening}, + Target: []string{lbActive}, + Refresh: func() (interface{}, string, error) { + lb, err := service.GetLoadBalancer(sl.String(lbaasId)) + if err != nil { + if apiErr, ok := err.(sl.Error); ok && (apiErr.StatusCode == 404 || apiErr.Exception == NOT_FOUND) { + return nil, "", fmt.Errorf("The load balancer %s does not exist anymore: %v", d.Id(), err) + } + return nil, "", err + } + if *lb.ProvisioningStatus == lbActive && *lb.OperatingStatus == lbOnline { + return lb, lbActive, nil + } + return lb, lbUpdatePening, nil + }, + Timeout: 10 * time.Minute, + Delay: 60 * time.Second, + MinTimeout: 3 * time.Second, + PollInterval: 60 * time.Second, + NotFoundChecks: 40, + } + + return stateConf.WaitForState() +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_multi_vlan_firewall.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_multi_vlan_firewall.go new file mode 100644 index 00000000000..22328c102ff --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_multi_vlan_firewall.go @@ -0,0 +1,427 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "strconv" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/filter" + "github.com/softlayer/softlayer-go/helpers/location" + "github.com/softlayer/softlayer-go/helpers/product" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/sl" +) + +func resourceIBMMultiVlanFirewall() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMNetworkMultiVlanCreate, + Read: resourceIBMMultiVlanFirewallRead, + Delete: resourceIBMFirewallDelete, + Update: resourceIBMMultiVlanFirewallUpdate, + Exists: resourceIBMMultiVLanFirewallExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "datacenter": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Datacenter name", + }, + + "pod": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + return strings.TrimSpace(old) == strings.TrimSpace(new) + }, + Description: "POD name", + }, + + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "name", + }, + + "public_vlan_id": { + Type: schema.TypeInt, + Computed: true, + Description: "Public VLAN id", + }, + + "private_vlan_id": { + Type: schema.TypeInt, + Computed: true, + Description: "Private VLAN id", + }, + + "firewall_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateAllowedStringValue([]string{"FortiGate Firewall Appliance HA Option", "FortiGate Security Appliance"}), + Description: "Firewall type", + }, + + "public_ip": { + Type: schema.TypeString, + Computed: true, + Description: "Public IP Address", + }, + + "public_ipv6": { + Type: schema.TypeString, + Computed: true, + Description: "Public IPV6 IP", + }, + + "private_ip": { + Type: schema.TypeString, + Computed: true, + Description: "Private IP Address", + }, + + "username": { + Type: schema.TypeString, + Computed: true, + Description: "User name", + }, + + "password": { + Type: schema.TypeString, + Computed: true, + Sensitive: true, + Description: "Password", + }, + + "addon_configuration": { + Type: schema.TypeList, + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + Description: `High Availability - [Web Filtering Add-on, NGFW Add-on, AV Add-on] or [Web Filtering Add-on, NGFW Add-on, AV Add-on]`, + }, + }, + } +} + +const ( + productPackageFilter = `{"keyName":{"operation":"FIREWALL_APPLIANCE"}}` + complexType = "SoftLayer_Container_Product_Order_Network_Protection_Firewall_Dedicated" + productPackageServiceMask = "description,prices.locationGroupId,prices.id" + mandatoryFirewallType = "FortiGate Security Appliance" + multiVlansMask = "id,customerManagedFlag,datacenter.name,bandwidthAllocation" +) + +func resourceIBMNetworkMultiVlanCreate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + name := d.Get("name").(string) + FirewallType := d.Get("firewall_type").(string) + datacenter := d.Get("datacenter").(string) + pod := d.Get("pod").(string) + podName := datacenter + "." + pod + PodService := services.GetNetworkPodService(sess) + podMask := `frontendRouterId,name` + + // 1.Getting the router ID + routerids, err := PodService.Filter(filter.Path("datacenterName").Eq(datacenter).Build()).Mask(podMask).GetAllObjects() + if err != nil { + return fmt.Errorf("Encountered problem trying to get the router ID: %s", err) + } + var routerid int + for _, iterate := range routerids { + if *iterate.Name == podName { + routerid = *iterate.FrontendRouterId + } + } + + //2.Get the datacenter id + dc, err := location.GetDatacenterByName(sess, datacenter, "id") + if err != nil { + return fmt.Errorf("Encountered problem trying to get the Datacenter ID: %s", err) + } + locationservice := services.GetLocationService(sess) + + //3. get the pricegroups that the datacenter belongs to + priceidds, _ := locationservice.Id(*dc.Id).GetPriceGroups() + var listofpriceids []int + //store all the pricegroups a datacenter belongs to + for _, priceidd := range priceidds { + listofpriceids = append(listofpriceids, *priceidd.Id) + } + + //4.get the addons that are specified + var addonconfigurations []interface{} + if _, ok := d.GetOk("addon_configuration"); ok { + addonconfigurations, ok = d.Get("addon_configuration").([]interface{}) + } + + var actualaddons []string + for _, addons := range addonconfigurations { + actualaddons = append(actualaddons, addons.(string)) + } + //appending the 20000GB Bandwidth item as it is mandatory + actualaddons = append(actualaddons, FirewallType, "20000 GB Bandwidth Allotment") + //appending the Fortigate Security Appliance as it is mandatory parameter for placing an order + if FirewallType != mandatoryFirewallType { + actualaddons = append(actualaddons, mandatoryFirewallType) + } + + //5. Getting the priceids of items which have to be ordered + priceItems := []datatypes.Product_Item_Price{} + for _, addon := range actualaddons { + actualpriceid, err := product.GetPriceIDByPackageIdandLocationGroups(sess, listofpriceids, 863, addon) + if err != nil || actualpriceid == 0 { + return fmt.Errorf("Encountered problem trying to get priceIds of items which have to be ordered: %s", err) + } + priceItem := datatypes.Product_Item_Price{ + Id: &actualpriceid, + } + priceItems = append(priceItems, priceItem) + } + + //6.Get the package ID + productpackageservice, _ := services.GetProductPackageService(sess).Filter(productPackageFilter).Mask(`id`).GetAllObjects() + var productid int + for _, packageid := range productpackageservice { + productid = *packageid.Id + } + + //7. Populate the container which needs to be sent for Verify order and Place order + productOrderContainer := datatypes.Container_Product_Order_Network_Protection_Firewall_Dedicated{ + Container_Product_Order: datatypes.Container_Product_Order{ + PackageId: &productid, + Prices: priceItems, + Quantity: sl.Int(1), + Location: &datacenter, + ComplexType: sl.String(complexType), + }, + Name: sl.String(name), + RouterId: &routerid, + } + + //8.Calling verify order + _, err = services.GetProductOrderService(sess.SetRetries(0)). + VerifyOrder(&productOrderContainer) + if err != nil { + return fmt.Errorf("Error during Verify order for Creating: %s", err) + } + //9.Calling place order + receipt, err := services.GetProductOrderService(sess.SetRetries(0)). + PlaceOrder(&productOrderContainer, sl.Bool(false)) + if err != nil { + return fmt.Errorf("Error during Place order for Creating: %s", err) + } + _, vlan, _, err := findDedicatedFirewallByOrderId(sess, *receipt.OrderId, d) + if err != nil { + return fmt.Errorf("Error during creation of dedicated hardware firewall: %s", err) + } + id := *vlan.NetworkFirewall.Id + d.SetId(fmt.Sprintf("%d", id)) + log.Printf("[INFO] Firewall ID: %s", d.Id()) + return resourceIBMMultiVlanFirewallRead(d, meta) +} + +func resourceIBMMultiVlanFirewallRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + + fwID, _ := strconv.Atoi(d.Id()) + + firewalls, err := services.GetAccountService(sess). + Filter(filter.Build( + filter.Path("networkGateways.networkFirewall.id"). + Eq(strconv.Itoa(fwID)))). + Mask(multiVlanMask). + GetNetworkGateways() + if err != nil { + return fmt.Errorf("Error retrieving firewall information: %s", err) + } + d.Set("datacenter", *firewalls[0].NetworkFirewall.Datacenter.Name) + if *firewalls[0].NetworkFirewall.CustomerManagedFlag && *firewalls[0].MemberCount == 1 { + d.Set("firewall_type", "FortiGate Security Appliance") + } else if *firewalls[0].NetworkFirewall.CustomerManagedFlag && *firewalls[0].MemberCount > 1 { + d.Set("firewall_type", "FortiGate Firewall Appliance HA Option") + } + addonConfiguration := make([]interface{}, 0, len(firewalls[0].NetworkFirewall.BillingItem.ActiveChildren)) + for _, elem := range firewalls[0].NetworkFirewall.BillingItem.ActiveChildren { + if *elem.Description != "20000 GB Bandwidth Allotment" && *elem.Description != "FortiGate Firewall Appliance HA Option" { + addonConfiguration = append(addonConfiguration, *elem.Description) + } + } + if len(addonConfiguration) > 0 { + d.Set("addon_configuration", addonConfiguration) + } + pod := *firewalls[0].NetworkFirewall.BillingItem.Notes + pod = "pod" + strings.SplitAfter(pod, "pod")[1] + d.Set("pod", &pod) + d.Set("name", *firewalls[0].Name) + d.Set("public_ip", *firewalls[0].PublicIpAddress.IpAddress) + d.Set("public_ipv6", firewalls[0].PublicIpv6Address.IpAddress) + d.Set("private_ip", *firewalls[0].PrivateIpAddress.IpAddress) + d.Set("public_vlan_id", *firewalls[0].PublicVlan.Id) + d.Set("private_vlan_id", *firewalls[0].PrivateVlan.Id) + d.Set("username", *firewalls[0].NetworkFirewall.ManagementCredentials.Username) + d.Set("password", *firewalls[0].NetworkFirewall.ManagementCredentials.Password) + return nil +} + +func resourceIBMMultiVlanFirewallUpdate(d *schema.ResourceData, meta interface{}) error { + if d.HasChange("addon_configuration") { + sess := meta.(ClientSession).SoftLayerSession() + fwID, _ := strconv.Atoi(d.Id()) + old, new := d.GetChange("addon_configuration") + oldaddons := old.([]interface{}) + newaddons := new.([]interface{}) + var oldaddon, newaddon, add []string + for _, v := range oldaddons { + oldaddon = append(oldaddon, v.(string)) + } + for _, v := range newaddons { + newaddon = append(newaddon, v.(string)) + } + // 1. Remove old addons no longer appearing in the new set + // 2. Add new addons not already provisioned + remove := listdifference(oldaddon, newaddon) + add = listdifference(newaddon, oldaddon) + if len(remove) > 0 { + firewalls, err := services.GetAccountService(sess). + Filter(filter.Build( + filter.Path("networkGateways.networkFirewall.id"). + Eq(strconv.Itoa(fwID)))). + Mask(multiVlanMask). + GetNetworkGateways() + if err != nil { + return fmt.Errorf("Some error occured while fetching the information of the Multi-Vlan Firewall") + } + for _, i := range remove { + for _, j := range firewalls[0].NetworkFirewall.BillingItem.ActiveChildren { + if i == *j.Description { + cancelimmediately := true + cancelAssociatedBillingItems := false + reason := "No longer needed" + customerNote := "No longer needed" + billingitemservice, err := services.GetBillingItemService(sess).Id(*j.Id).CancelItem(&cancelimmediately, &cancelAssociatedBillingItems, &reason, &customerNote) + if err != nil || !billingitemservice { + return fmt.Errorf("Error while cancelling the addon") + } + } + } + } + } + if len(add) > 0 { + datacentername, ok := d.GetOk("datacenter") + if !ok { + return fmt.Errorf("The attribute datacenter is not defined") + } + //2.Get the datacenter id + dc, err := location.GetDatacenterByName(sess, datacentername.(string), "id") + if err != nil { + return fmt.Errorf("Datacenter not found") + } + locationservice := services.GetLocationService(sess) + //3. get the pricegroups that the datacenter belongs to + priceidds, _ := locationservice.Id(*dc.Id).GetPriceGroups() + var listofpriceids []int + //store all the pricegroups a datacenter belongs to + for _, priceidd := range priceidds { + listofpriceids = append(listofpriceids, *priceidd.Id) + } + priceItems := []datatypes.Product_Item_Price{} + for _, addon := range add { + actualpriceid, err := product.GetPriceIDByPackageIdandLocationGroups(sess, listofpriceids, 863, addon) + if err != nil || actualpriceid == 0 { + return fmt.Errorf("The addon or the firewall is not available for the datacenter you have selected. Please enter a different datacenter") + } + priceItem := datatypes.Product_Item_Price{ + Id: &actualpriceid, + } + priceItems = append(priceItems, priceItem) + } + //6.Get the package ID + productpackageservice, _ := services.GetProductPackageService(sess).Filter(productPackageFilter).Mask(`id`).GetAllObjects() + var productid int + for _, packageid := range productpackageservice { + productid = *packageid.Id + } + var properties []datatypes.Container_Product_Order_Property + t := time.Now() + upgradeproductOrderContainer := datatypes.Container_Product_Order_Network_Protection_Firewall_Dedicated_Upgrade{ + Container_Product_Order_Network_Protection_Firewall_Dedicated: datatypes.Container_Product_Order_Network_Protection_Firewall_Dedicated{ + Container_Product_Order: datatypes.Container_Product_Order{ + PackageId: &productid, + Prices: priceItems, + ComplexType: sl.String(complexType), + Properties: append(properties, datatypes.Container_Product_Order_Property{ + Name: sl.String("MAINTENANCE_WINDOW"), + Value: sl.String(time.Date(t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second(), 0, t.Location()).UTC().String()), + }), + }, + }, + FirewallId: &fwID, + } + //8.Calling verify order + _, err = services.GetProductOrderService(sess.SetRetries(0)). + VerifyOrder(&upgradeproductOrderContainer) + if err != nil { + return fmt.Errorf("Error during Verify order for Updating: %s", err) + } + + //9.Calling place order + receipt, err := services.GetProductOrderService(sess.SetRetries(0)). + PlaceOrder(&upgradeproductOrderContainer, sl.Bool(false)) + if err != nil { + return fmt.Errorf("Error during Place order for Updating: %s", err) + } + _, _, _, err = findDedicatedFirewallByOrderId(sess, *receipt.OrderId, d) + if err != nil { + return fmt.Errorf("Error during creation of dedicated hardware firewall: %s", err) + } + } + } + return resourceIBMMultiVlanFirewallRead(d, meta) +} + +func resourceIBMMultiVLanFirewallExists(d *schema.ResourceData, meta interface{}) (bool, error) { + sess := meta.(ClientSession).SoftLayerSession() + + fwID, _ := strconv.Atoi(d.Id()) + + firewalls, err := services.GetAccountService(sess). + Filter(filter.Build( + filter.Path("networkGateways.networkFirewall.id"). + Eq(strconv.Itoa(fwID)))). + Mask(multiVlanMask). + GetNetworkGateways() + if err != nil { + return false, fmt.Errorf("Error retrieving firewall information: %s", err) + } + if firewalls[0].NetworkFirewall.BillingItem == nil { + return false, nil + } + return true, nil +} + +//This function takes two lists and returns the difference between the two lists +//listdifference([1,2] [2,3]) = [1] +func listdifference(a, b []string) []string { + mb := map[string]bool{} + for _, x := range b { + mb[x] = true + } + ab := []string{} + for _, x := range a { + if _, ok := mb[x]; !ok { + ab = append(ab, x) + } + } + return ab +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_network_gateway.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_network_gateway.go new file mode 100644 index 00000000000..45ff01daba6 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_network_gateway.go @@ -0,0 +1,1129 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "bytes" + "fmt" + "log" + "math/rand" + "reflect" + "strconv" + "strings" + "time" + + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/internal/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/filter" + "github.com/softlayer/softlayer-go/helpers/location" + "github.com/softlayer/softlayer-go/helpers/product" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +const highAvailability = "HA" +const GATEWAY_APPLIANCE_CLUSTER = "NETWORK_GATEWAY_APPLIANCE_CLUSTER" + +func resourceIBMNetworkGateway() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMNetworkGatewayCreate, + Read: resourceIBMNetworkGatewayRead, + Update: resourceIBMNetworkGatewayUpdate, + Delete: resourceIBMNetworkGatewayDelete, + Exists: resourceIBMNetworkGatewayExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + + "name": { + Type: schema.TypeString, + Required: true, + Description: "The name of the gateway", + }, + + "ssh_key_ids": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeInt}, + ForceNew: true, + DiffSuppressFunc: applyOnce, + }, + + "post_install_script_uri": { + Type: schema.TypeString, + Optional: true, + Default: nil, + ForceNew: true, + DiffSuppressFunc: applyOnce, + }, + + "private_ip_address_id": { + Type: schema.TypeInt, + Computed: true, + }, + + "private_ipv4_address": { + Type: schema.TypeString, + Computed: true, + }, + "public_ipv4_address": { + Type: schema.TypeString, + Computed: true, + }, + "private_vlan_id": { + Type: schema.TypeInt, + Computed: true, + }, + + "public_ip_address_id": { + Type: schema.TypeInt, + Computed: true, + }, + + "public_ipv6_address_id": { + Type: schema.TypeInt, + Computed: true, + }, + + "public_vlan_id": { + Type: schema.TypeInt, + Computed: true, + }, + + "status": { + Type: schema.TypeString, + Computed: true, + }, + + "members": { + Type: schema.TypeSet, + Description: "The hardware members of this network Gateway", + Required: true, + MinItems: 1, + MaxItems: 2, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "member_id": { + Type: schema.TypeInt, + Computed: true, + }, + "hostname": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DefaultFunc: genID, + DiffSuppressFunc: func(k, o, n string, d *schema.ResourceData) bool { + // FIXME: Work around another bug in terraform. + // When a default function is used with an optional property, + // terraform will always execute it on apply, even when the property + // already has a value in the state for it. This causes a false diff. + // Making the property Computed:true does not make a difference. + if strings.HasPrefix(o, "terraformed-") && strings.HasPrefix(n, "terraformed-") { + return true + } + return o == n + }, + }, + + "domain": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "notes": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "datacenter": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "network_speed": { + Type: schema.TypeInt, + Optional: true, + Default: 100, + ForceNew: true, + }, + + "tcp_monitoring": { + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + DiffSuppressFunc: applyOnce, + }, + + "package_key_name": { + Type: schema.TypeString, + Optional: true, + Default: "NETWORK_GATEWAY_APPLIANCE", + ForceNew: true, + DiffSuppressFunc: applyOnce, + }, + + "redundant_power_supply": { + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + DiffSuppressFunc: applyOnce, + }, + + "process_key_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "INTEL_SINGLE_XEON_1270_3_50", + DiffSuppressFunc: applyOnce, + }, + + "os_key_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "OS_VYATTA_5600_5_X_UP_TO_1GBPS_SUBSCRIPTION_EDITION_64_BIT", + DiffSuppressFunc: applyOnce, + }, + + "redundant_network": { + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + DiffSuppressFunc: applyOnce, + }, + "unbonded_network": { + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + DiffSuppressFunc: applyOnce, + }, + "tags": { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "public_bandwidth": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Default: 20000, + DiffSuppressFunc: applyOnce, + }, + "memory": { + Type: schema.TypeInt, + Required: true, + //Sometime memory returns back as different. Since this resource is immutable at this point + //and memory can't be really updated , suppress the change until we figure out how to handle it + DiffSuppressFunc: applyOnce, + ForceNew: true, + }, + "storage_groups": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "array_type_id": { + Type: schema.TypeInt, + Required: true, + }, + "hard_drives": { + Type: schema.TypeList, + Elem: &schema.Schema{Type: schema.TypeInt}, + Required: true, + }, + "array_size": { + Type: schema.TypeInt, + Optional: true, + }, + "partition_template_id": { + Type: schema.TypeInt, + Optional: true, + }, + }, + }, + DiffSuppressFunc: applyOnce, + }, + + "ssh_key_ids": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeInt}, + ForceNew: true, + DiffSuppressFunc: applyOnce, + }, + + "post_install_script_uri": { + Type: schema.TypeString, + Optional: true, + Default: nil, + ForceNew: true, + DiffSuppressFunc: applyOnce, + }, + + "user_metadata": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "disk_key_names": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + DiffSuppressFunc: applyOnce, + }, + + "public_vlan_id": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Computed: true, + DiffSuppressFunc: applyOnce, + }, + + "private_vlan_id": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Computed: true, + DiffSuppressFunc: applyOnce, + }, + + "public_ipv4_address": { + Type: schema.TypeString, + Computed: true, + }, + + "private_ipv4_address": { + Type: schema.TypeString, + Computed: true, + }, + "ipv6_enabled": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Default: true, + }, + + "ipv6_address": { + Type: schema.TypeString, + Computed: true, + }, + "private_network_only": { + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + }, + }, + }, + Set: resourceIBMMemberHostHash, + }, + + "associated_vlans": { + Type: schema.TypeList, + Description: "The VLAN instances associated with this Network Gateway", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "vlan_id": { + Type: schema.TypeInt, + Computed: true, + }, + "network_vlan_id": { + Type: schema.TypeInt, + Description: "The Identifier of the VLAN which is associated", + Computed: true, + }, + "bypass": { + Type: schema.TypeBool, + Description: "Indicates if the VLAN is in bypass or routed modes", + Default: nil, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func resourceIBMNetworkGatewayCreate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + members := []gatewayMember{} + for _, v := range d.Get("members").(*schema.Set).List() { + m := v.(map[string]interface{}) + members = append(members, m) + } + + if len(members) == 2 { + if !areVlanCompatible(members) { + return fmt.Errorf("Members should have exactly same public and private vlan configuration," + + "please check public_vlan_id and private_vlan_id property on individual members") + } + } + + //Build order for one member + order, err := getMonthlyGatewayOrder(members[0], meta) + if err != nil { + return fmt.Errorf( + "Encountered problem trying to get the Gateway order template: %s", err) + } + err = setHardwareOptions(members[0], &order.Hardware[0]) + if err != nil { + return fmt.Errorf( + "Encountered problem trying to configure Gateway options: %s", err) + } + + // two members can be ordered together if they have same hardware configuration + // and differ only in hostname, domain, user_metadata, post_install_script_uri etc + sameOrder := canBeOrderedTogether(members) + + // Set SSH Key on main order + ssh_key_ids := d.Get("ssh_key_ids").([]interface{}) + if len(ssh_key_ids) > 0 { + order.SshKeys = make([]datatypes.Container_Product_Order_SshKeys, 0) + ids := make([]int, len(ssh_key_ids)) + for i, ssh_key_id := range ssh_key_ids { + ids[i] = ssh_key_id.(int) + } + order.SshKeys = append(order.SshKeys, datatypes.Container_Product_Order_SshKeys{ + SshKeyIds: ids, + }) + } + // Set post_install_script_uri on main order + if v, ok := d.GetOk("post_install_script_uri"); ok { + order.ProvisionScripts = []string{v.(string)} + } + + var productOrder datatypes.Container_Product_Order + + if sameOrder { + //Ordering HA + order.Quantity = sl.Int(2) + order.Hardware = append(order.Hardware, datatypes.Hardware{ + Hostname: sl.String(members[1]["hostname"].(string)), + Domain: sl.String(members[1]["domain"].(string)), + }) + err = setHardwareOptions(members[1], &order.Hardware[1]) + if err != nil { + return fmt.Errorf( + "Encountered problem trying to configure Gateway options: %s", err) + } + + } + + mSshKeys := make([]datatypes.Container_Product_Order_SshKeys, 0) + for _, h := range order.Hardware { + ids := make([]int, 0) + for _, id := range h.SshKeys { + ids = append(ids, *id.Id) + } + if len(ids) > 0 { + mSshKeys = append(mSshKeys, datatypes.Container_Product_Order_SshKeys{ + SshKeyIds: ids, + }) + } + } + + //Create the Gateway Appliance order + // 1. Find a package id using Gateway package key name. + pkg, err := getPackageByModelGateway(sess, GATEWAY_APPLIANCE_CLUSTER, false) + + if err != nil { + return err + } + + if pkg.Id == nil { + return err + } + + // 2. Get all prices for the package + items, err := product.GetPackageProducts(sess, *pkg.Id, productItemMaskWithPriceLocationGroupID) + if err != nil { + return err + } + + // 3. Build price items + gwCluster, err := getItemPriceId(items, "gateway_resource_group", "GATEWAY_APPLIANCE_CLUSTER") + if err != nil { + return err + } + + productOrder = datatypes.Container_Product_Order{ + OrderContainers: []datatypes.Container_Product_Order{ + { + ComplexType: sl.String("SoftLayer_Container_Product_Order_Hardware_Server_Gateway_Appliance"), + Quantity: order.Quantity, + PackageId: order.PackageId, + Prices: order.Prices, + Hardware: order.Hardware, + Location: order.Location, + }, + { + ComplexType: sl.String("SoftLayer_Container_Product_Order_Gateway_Appliance_Cluster"), + Quantity: sl.Int(1), + PackageId: pkg.Id, + Prices: []datatypes.Product_Item_Price{ + gwCluster, + }, + }, + }, + } + + if len(mSshKeys) > 0 { + productOrder.OrderContainers[0].SshKeys = mSshKeys + } + + if len(order.SshKeys) > 0 { + productOrder.OrderContainers[1].SshKeys = order.SshKeys + } + + _, err = services.GetProductOrderService(sess).VerifyOrder(&productOrder) + if err != nil { + return fmt.Errorf( + "Encountered problem trying to verify the order: %s", err) + } + orderReceipt, err := services.GetProductOrderService(sess.SetRetries(0)).PlaceOrder(&productOrder, sl.Bool(false)) + if err != nil { + return fmt.Errorf( + "Encountered problem trying to place the order: %s", err) + } + + gID := *orderReceipt.OrderDetails.OrderContainers[0].Hardware[0].GlobalIdentifier + bm, err := waitForNetworkGatewayMemberProvision(&order.Hardware[0], meta, gID) + if err != nil { + return fmt.Errorf( + "Error waiting for Gateway (%s) to become ready: %s", d.Id(), err) + } + + id := *bm.(datatypes.Hardware).NetworkGatewayMember.NetworkGatewayId + d.SetId(fmt.Sprintf("%d", id)) + log.Printf("[INFO] Gateway ID: %s", d.Id()) + + member1Id := *bm.(datatypes.Hardware).Id + members[0]["member_id"] = member1Id + log.Printf("[INFO] Member 1 ID: %d", member1Id) + + err = setTagsAndNotes(members[0], meta) + if err != nil { + return err + } + + if sameOrder { + // If we ordered HA and then wait for other member + gID1 := *orderReceipt.OrderDetails.OrderContainers[0].Hardware[1].GlobalIdentifier + bm, err := waitForNetworkGatewayMemberProvision(&order.Hardware[1], meta, gID1) + if err != nil { + return fmt.Errorf( + "Error waiting for Gateway (%s) to become ready: %s", d.Id(), err) + } + member2Id := *bm.(datatypes.Hardware).Id + log.Printf("[INFO] Member 2 ID: %d", member2Id) + members[1]["member_id"] = member2Id + err = setTagsAndNotes(members[1], meta) + if err != nil { + return err + } + } else if len(members) == 2 { + //Add the new gateway which has different configuration than the first + err := addGatewayMember(id, members[1], meta) + if err != nil { + return err + } + } + + name := d.Get("name").(string) + err = updateGatewayName(id, name, meta) + if err != nil { + return err + } + + return resourceIBMNetworkGatewayRead(d, meta) +} + +func randomString(length int) string { + charset := + "abcdefghijklmnopqrstuvwxyz" + + "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + var seededRand *rand.Rand = rand.New( + rand.NewSource(time.Now().UnixNano())) + b := make([]byte, length) + for i := range b { + b[i] = charset[seededRand.Intn(len(charset))] + } + return string(b) +} + +func resourceIBMMemberHostHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", + m["hostname"].(string))) + + return hashcode.String(buf.String()) +} + +func resourceIBMNetworkGatewayRead(d *schema.ResourceData, meta interface{}) error { + service := services.GetNetworkGatewayService(meta.(ClientSession).SoftLayerSession()) + id, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + result, err := service.Id(id).Mask( + "insideVlans,members,status,privateIpAddress[ipAddress],publicIpAddress[ipAddress]," + + "members[hardware],members[hardware[datacenter]]," + + "members[hardware[primaryNetworkComponent[primaryVersion6IpAddressRecord]]],members[hardware[backendNetworkComponents,primaryBackendNetworkComponent[redundancyEnabledFlag]," + + "tagReferences,primaryIpAddress,primaryBackendIpAddress,userData," + + "primaryNetworkComponent[primaryVersion6IpAddressRecord],privateNetworkOnlyFlag," + + "powerSupplyCount,primaryNetworkComponent[networkVlan],memoryCapacity,networkVlans[id,vlanNumber]]]", + ).GetObject() + if err != nil { + return fmt.Errorf("Error retrieving Network Gateway: %s", err) + } + d.Set("name", result.Name) + if result.PrivateIpAddress != nil { + d.Set("private_ipv4_address", result.PrivateIpAddress.IpAddress) + } + if result.PublicIpAddress != nil { + d.Set("public_ipv4_address", result.PublicIpAddress.IpAddress) + } + d.Set("private_ip_address_id", result.PrivateIpAddressId) + d.Set("private_vlan_id", result.PrivateVlanId) + d.Set("public_ip_address_id", result.PublicIpAddressId) + d.Set("public_ipv6_address_id", result.PublicIpv6AddressId) + d.Set("public_vlan_id", result.PublicVlanId) + d.Set("status", result.Status.Name) + d.Set("members", flattenGatewayMembers(d, result.Members)) + d.Set("associated_vlans", flattenGatewayVlans(result.InsideVlans)) + + //Set default connection info + connInfo := map[string]string{"type": "ssh", "user": "vyatta"} + if result.PublicIpAddress != nil { + connInfo["host"] = *result.PublicIpAddress.IpAddress + } else { + connInfo["host"] = *result.PrivateIpAddress.IpAddress + } + d.SetConnInfo(connInfo) + + return nil +} + +func updateGatewayName(id int, name string, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkGatewayService(sess) + _, err := service.Id(id).EditObject(&datatypes.Network_Gateway{ + Name: sl.String(name), + }) + if err != nil { + return fmt.Errorf("Couldn't set the gateway name to %s", name) + } + return err +} + +func addGatewayMember(gwID int, member gatewayMember, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + order, err := getMonthlyGatewayOrder(member, meta) + if err != nil { + return fmt.Errorf( + "Encountered problem trying to get the Gateway order template: %s", err) + } + err = setHardwareOptions(member, &order.Hardware[0]) + if err != nil { + return fmt.Errorf( + "Encountered problem trying to configure Gateway options: %s", err) + } + + haOrder := datatypes.Container_Product_Order_Hardware_Server_Gateway_Appliance{} + haOrder.ContainerIdentifier = order.ContainerIdentifier + haOrder.Hardware = order.Hardware + haOrder.PackageId = order.PackageId + haOrder.Location = order.Location + haOrder.Prices = order.Prices + haOrder.ClusterResourceId = sl.Int(gwID) + haOrder.ClusterOrderType = sl.String(highAvailability) + ssh_key_ids := member.Get("ssh_key_ids").([]interface{}) + if len(ssh_key_ids) > 0 { + sshKeyS := make([]int, len(ssh_key_ids)) + for i, ssh_key_id := range ssh_key_ids { + sshKeyS[i] = ssh_key_id.(int) + } + haOrder.SshKeys = make([]datatypes.Container_Product_Order_SshKeys, 1) + haOrder.SshKeys[0] = datatypes.Container_Product_Order_SshKeys{ + SshKeyIds: sshKeyS, + } + } + + _, err = services.GetProductOrderService(sess).VerifyOrder(&haOrder) + if err != nil { + return fmt.Errorf( + "Encountered problem trying to verify the order: %s", err) + } + orderReceipt, err := services.GetProductOrderService(sess.SetRetries(0)).PlaceOrder(&haOrder, sl.Bool(false)) + if err != nil { + return fmt.Errorf( + "Encountered problem trying to place the order: %s", err) + } + + gID := *orderReceipt.OrderDetails.Hardware[0].GlobalIdentifier + + bm, err := waitForNetworkGatewayMemberProvision(&order.Hardware[0], meta, gID) + if err != nil { + return fmt.Errorf( + "Error waiting for Gateway (%d) to become ready: %s", gwID, err) + } + id := *bm.(datatypes.Hardware).Id + log.Printf("[INFO] Newly added member ID: %d", id) + member["member_id"] = id + err = setTagsAndNotes(member, meta) + return err +} + +func resourceIBMNetworkGatewayUpdate(d *schema.ResourceData, meta interface{}) error { + id, _ := strconv.Atoi(d.Id()) + if d.HasChange("name") { + gwName := d.Get("name").(string) + err := updateGatewayName(id, gwName, meta) + if err != nil { + return err + } + } + return resourceIBMNetworkGatewayRead(d, meta) +} + +func resourceIBMNetworkGatewayDelete(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + id, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + service := services.GetNetworkGatewayService(sess) + gw, err := service.Id(id).Mask("members[hardwareId]").GetObject() + for _, v := range gw.Members { + m := gatewayMember{ + "member_id": *v.HardwareId, + } + err := deleteHardware(m, meta) + if err != nil { + return err + } + } + //If both the hardwares have been deleted then gateway will go away as well + d.SetId("") + return nil +} + +func resourceIBMNetworkGatewayExists(d *schema.ResourceData, meta interface{}) (bool, error) { + service := services.GetNetworkGatewayService(meta.(ClientSession).SoftLayerSession()) + + id, err := strconv.Atoi(d.Id()) + if err != nil { + return false, fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + result, err := service.Id(id).GetObject() + if err != nil { + if apiErr, ok := err.(sl.Error); !ok || apiErr.StatusCode != 404 { + return false, fmt.Errorf("Error trying to retrieve Network Gateway: %s", err) + } + } + + return result.Id != nil && *result.Id == id, nil +} + +func getMonthlyGatewayOrder(d dataRetriever, meta interface{}) (datatypes.Container_Product_Order, error) { + sess := meta.(ClientSession).SoftLayerSession() + + // Validate attributes for network gateway ordering. + model := d.Get("package_key_name") + + datacenter := d.Get("datacenter") + + osKeyName := d.Get("os_key_name") + + process_key_name := d.Get("process_key_name") + + dc, err := location.GetDatacenterByName(sess, datacenter.(string), "id") + if err != nil { + return datatypes.Container_Product_Order{}, err + } + + // 1. Find a package id using Gateway package key name. + pkg, err := getPackageByModelGateway(sess, model.(string), true) + + if err != nil { + return datatypes.Container_Product_Order{}, err + } + + if pkg.Id == nil { + return datatypes.Container_Product_Order{}, err + } + + // 2. Get all prices for the package + items, err := product.GetPackageProducts(sess, *pkg.Id, productItemMaskWithPriceLocationGroupID) + if err != nil { + return datatypes.Container_Product_Order{}, err + } + + // 3. Build price items + server, err := getItemPriceId(items, "server", process_key_name.(string)) + if err != nil { + return datatypes.Container_Product_Order{}, err + } + + os, err := getItemPriceId(items, "os", osKeyName.(string)) + if err != nil { + return datatypes.Container_Product_Order{}, err + } + + ram, err := findMemoryItemPriceId(items, d) + if err != nil { + return datatypes.Container_Product_Order{}, err + } + + portSpeed, err := findNetworkItemPriceId(items, d) + if err != nil { + return datatypes.Container_Product_Order{}, err + } + + monitoring, err := getItemPriceId(items, "monitoring", "MONITORING_HOST_PING") + if err != nil { + return datatypes.Container_Product_Order{}, err + } + if d.Get("tcp_monitoring").(bool) { + monitoring, err = getItemPriceId(items, "monitoring", "MONITORING_HOST_PING_AND_TCP_SERVICE") + if err != nil { + return datatypes.Container_Product_Order{}, err + } + } + // Other common default options + priIpAddress, err := getItemPriceId(items, "pri_ip_addresses", "1_IP_ADDRESS") + if err != nil { + return datatypes.Container_Product_Order{}, err + } + + remoteManagement, err := getItemPriceId(items, "remote_management", "REBOOT_KVM_OVER_IP") + if err != nil { + return datatypes.Container_Product_Order{}, err + } + vpnManagement, err := getItemPriceId(items, "vpn_management", "UNLIMITED_SSL_VPN_USERS_1_PPTP_VPN_USER_PER_ACCOUNT") + if err != nil { + return datatypes.Container_Product_Order{}, err + } + + notification, err := getItemPriceId(items, "notification", "NOTIFICATION_EMAIL_AND_TICKET") + if err != nil { + return datatypes.Container_Product_Order{}, err + } + response, err := getItemPriceId(items, "response", "AUTOMATED_NOTIFICATION") + if err != nil { + return datatypes.Container_Product_Order{}, err + } + vulnerabilityScanner, err := getItemPriceId(items, "vulnerability_scanner", "NESSUS_VULNERABILITY_ASSESSMENT_REPORTING") + if err != nil { + return datatypes.Container_Product_Order{}, err + } + + sriovEnabled, err := getItemPriceId(items, "sriov_enabled", "SRIOV_ENABLED") + if err != nil { + return datatypes.Container_Product_Order{}, err + } + + // Define an order object using basic paramters. + + order := datatypes.Container_Product_Order{ + ContainerIdentifier: sl.String(d.Get("hostname").(string)), + Quantity: sl.Int(1), + Hardware: []datatypes.Hardware{ + { + Hostname: sl.String(d.Get("hostname").(string)), + Domain: sl.String(d.Get("domain").(string)), + }, + }, + Location: sl.String(strconv.Itoa(*dc.Id)), + PackageId: pkg.Id, + Prices: []datatypes.Product_Item_Price{ + server, + os, + ram, + portSpeed, + priIpAddress, + remoteManagement, + vpnManagement, + monitoring, + notification, + response, + vulnerabilityScanner, + sriovEnabled, + }, + } + + // Add optional price ids. + // Add public bandwidth + + publicBandwidth := d.Get("public_bandwidth") + publicBandwidthStr := "BANDWIDTH_" + strconv.Itoa(publicBandwidth.(int)) + "_GB" + bandwidth, err := getItemPriceId(items, "bandwidth", publicBandwidthStr) + if err != nil { + return datatypes.Container_Product_Order{}, err + } + order.Prices = append(order.Prices, bandwidth) + privateNetworkOnly := d.Get("private_network_only").(bool) + if d.Get("ipv6_enabled").(bool) { + if privateNetworkOnly { + return datatypes.Container_Product_Order{}, fmt.Errorf("Unable to configure a public IPv6 address with a private_network_only option") + } + keyName := "1_IPV6_ADDRESS" + price, err := getItemPriceId(items, "pri_ipv6_addresses", keyName) + if err != nil { + return datatypes.Container_Product_Order{}, err + } + order.Prices = append(order.Prices, price) + } + + if d.Get("redundant_power_supply").(bool) { + powerSupply, err := getItemPriceId(items, "power_supply", "REDUNDANT_POWER_SUPPLY") + if err != nil { + return datatypes.Container_Product_Order{}, err + } + order.Prices = append(order.Prices, powerSupply) + } + + // Add prices of disks. + disks := d.Get("disk_key_names").([]interface{}) + diskLen := len(disks) + if diskLen > 0 { + for i, disk := range disks { + diskPrice, err := getItemPriceId(items, "disk"+strconv.Itoa(i), disk.(string)) + if err != nil { + return datatypes.Container_Product_Order{}, err + } + order.Prices = append(order.Prices, diskPrice) + } + } + + // Add storage_groups for RAID configuration + diskController, err := getItemPriceId(items, "disk_controller", "DISK_CONTROLLER_NONRAID") + if err != nil { + return datatypes.Container_Product_Order{}, err + } + + if _, ok := d.GetOk("storage_groups"); ok { + order.StorageGroups = getStorageGroupsFromResourceData(d) + diskController, err = getItemPriceId(items, "disk_controller", "DISK_CONTROLLER_RAID") + if err != nil { + return datatypes.Container_Product_Order{}, err + } + } + order.Prices = append(order.Prices, diskController) + + return order, nil +} + +func getPackageByModelGateway(sess *session.Session, model string, isGateway bool) (datatypes.Product_Package, error) { + objectMask := "id,keyName,name,description,isActive,type[keyName],categories[id,name,categoryCode]" + service := services.GetProductPackageService(sess) + availableModels := "" + filterStr := "" + if isGateway { + filterStr = "{\"items\": {\"categories\": {\"categoryCode\": {\"operation\":\"server\"}}},\"type\": {\"keyName\": {\"operation\":\"BARE_METAL_GATEWAY\"}}}" + } else { + filterStr = "{\"type\": {\"keyName\": {\"operation\":\"GATEWAY_RESOURCE_GROUP\"}}}" + } + + // Get package id + packages, err := service.Mask(objectMask). + Filter(filterStr).GetAllObjects() + if err != nil { + return datatypes.Product_Package{}, err + } + for _, pkg := range packages { + availableModels = availableModels + *pkg.KeyName + if pkg.Description != nil { + availableModels = availableModels + " ( " + *pkg.Description + " ), " + } else { + availableModels = availableModels + ", " + } + if *pkg.KeyName == model { + return pkg, nil + } + } + return datatypes.Product_Package{}, fmt.Errorf("No Gateway package key name for %s. Available package key name(s) is(are) %s", model, availableModels) +} +func setHardwareOptions(m gatewayMember, hardware *datatypes.Hardware) error { + public_vlan_id := m.Get("public_vlan_id").(int) + if public_vlan_id > 0 { + hardware.PrimaryNetworkComponent = &datatypes.Network_Component{ + NetworkVlan: &datatypes.Network_Vlan{Id: sl.Int(public_vlan_id)}, + } + } + + private_vlan_id := m.Get("private_vlan_id").(int) + if private_vlan_id > 0 { + hardware.PrimaryBackendNetworkComponent = &datatypes.Network_Component{ + NetworkVlan: &datatypes.Network_Vlan{Id: sl.Int(private_vlan_id)}, + } + } + + if userMetadata, ok := m.GetOk("user_metadata"); ok { + hardware.UserData = []datatypes.Hardware_Attribute{ + {Value: sl.String(userMetadata.(string))}, + } + } + + if v, ok := m.GetOk("post_install_script_uri"); ok { + hardware.PostInstallScriptUri = sl.String(v.(string)) + } + + // Get configured ssh_keys + ssh_key_ids := m.Get("ssh_key_ids").([]interface{}) + if len(ssh_key_ids) > 0 { + hardware.SshKeys = make([]datatypes.Security_Ssh_Key, 0, len(ssh_key_ids)) + for _, ssh_key_id := range ssh_key_ids { + hardware.SshKeys = append(hardware.SshKeys, datatypes.Security_Ssh_Key{ + Id: sl.Int(ssh_key_id.(int)), + }) + } + } + + return nil +} + +// Network gateways or Bare metal creation does not return a object with an Id. +// Have to wait on provision date to become available on server that matches +// hostname and domain. +// http://sldn.softlayer.com/blog/bpotter/ordering-bare-metal-servers-using-softlayer-api +func waitForNetworkGatewayMemberProvision(d *datatypes.Hardware, meta interface{}, globalIdentifier string) (interface{}, error) { + hostname := *d.Hostname + domain := *d.Domain + log.Printf("Waiting for Gateway (%s.%s) to be provisioned", hostname, domain) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", "pending"}, + Target: []string{"provisioned"}, + Refresh: func() (interface{}, string, error) { + service := services.GetAccountService(meta.(ClientSession).SoftLayerSession()) + bms, err := service.Filter( + filter.Build( + filter.Path("hardware.globalIdentifier").Eq(globalIdentifier)), + ).Mask("id,provisionDate,networkGatewayMember[networkGatewayId]").GetHardware() + if err != nil { + return false, "retry", nil + } + + if len(bms) == 0 || bms[0].ProvisionDate == nil { + return datatypes.Hardware{}, "pending", nil + } else { + return bms[0], "provisioned", nil + } + }, + Timeout: 24 * time.Hour, + Delay: 10 * time.Second, + MinTimeout: 1 * time.Minute, + NotFoundChecks: 24 * 60, + } + + return stateConf.WaitForState() +} + +func setTagsAndNotes(m gatewayMember, meta interface{}) error { + err := setHardwareTags(m["member_id"].(int), m, meta) + if err != nil { + return err + } + + if m["notes"].(string) != "" { + err := setHardwareNotes(m["member_id"].(int), m, meta) + if err != nil { + return err + } + } + return nil +} + +//New types to resuse functions from other resources which does the same job +//Essentially mimic schema.ResourceData get functions +type dataRetriever interface { + Get(string) interface{} + GetOk(string) (interface{}, bool) + Id() string +} +type gatewayMember map[string]interface{} + +func (m gatewayMember) Get(k string) interface{} { + if k == "restricted_network" || k == "hourly_billing" { + //findNetworkItemPriceId is used from bare metal and that looks for this key + //deleteHardware looks for hourly_billing + //We won't need this when we support those speed on the gateway + return false + } + return m[k] +} +func (m gatewayMember) GetOk(k string) (i interface{}, ok bool) { + i, ok = m[k] + if ok { + if k == "storage_groups" { + return i, len(i.([]interface{})) > 0 + } + if k == "user_metadata" || k == "post_install_script_uri" { + return i, len(i.(string)) > 0 + } + } + return +} + +func (m gatewayMember) Id() string { + return strconv.Itoa(m["member_id"].(int)) +} + +func areVlanCompatible(m []gatewayMember) bool { + if m[0]["public_vlan_id"].(int) != m[1]["public_vlan_id"].(int) { + return false + } + if m[0]["private_vlan_id"].(int) != m[1]["private_vlan_id"].(int) { + return false + } + return true +} + +func canBeOrderedTogether(members []gatewayMember) bool { + if len(members) != 2 { + return false + } + m1 := members[0] + m2 := members[1] + for k, v := range m1 { + if k == "hostname" || + k == "domain" || + k == "notes" || + k == "tags" || + k == "public_vlan_id" || + k == "private_vlan_id" || + k == "user_metadata" || + k == "post_install_script_uri" { + continue + } + + // If other harware configurations are not equal then they can't be ordered together + // For example different memory + if !reflect.DeepEqual(v, m2[k]) { + return false + } + } + return true +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_network_gateway_vlan_attachment.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_network_gateway_vlan_attachment.go new file mode 100644 index 00000000000..2df1c35e206 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_network_gateway_vlan_attachment.go @@ -0,0 +1,252 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "strconv" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/sl" +) + +func resourceIBMNetworkGatewayVlanAttachment() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMNetworkGatewayVlanAttachmentCreate, + Read: resourceIBMNetworkGatewayVlanAttachmentRead, + Update: resourceIBMNetworkGatewayVlanAttachmentUpdate, + Delete: resourceIBMNetworkGatewayVlanAttachmentDelete, + Exists: resourceIBMNetworkGatewayVlanAttachmentExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "gateway_id": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: "Gateway instance ID", + }, + "network_vlan_id": { + Type: schema.TypeInt, + Description: "The Identifier of the VLAN to be associated", + Required: true, + ForceNew: true, + }, + "bypass": { + Type: schema.TypeBool, + Description: "Indicates if the VLAN should be in bypass or routed modes", + Default: true, + Optional: true, + }, + }, + } +} + +func resourceIBMNetworkGatewayVlanAttachmentCreate(d *schema.ResourceData, meta interface{}) error { + gatewayID := d.Get("gateway_id").(int) + networkVlanID := d.Get("network_vlan_id").(int) + bypass := d.Get("bypass").(bool) + + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkGatewayService(sess) + vlanService := services.GetNetworkGatewayVlanService(sess) + result, err := service.Id(gatewayID).Mask( + "insideVlans", + ).GetObject() + if err == nil && len(result.InsideVlans) > 0 { + insideVlans := result.InsideVlans + for _, i := range insideVlans { + if *i.NetworkVlanId == networkVlanID { + if bypass != *i.BypassFlag { + if !bypass { + err = vlanService.Id(*i.Id).Unbypass() + if err != nil { + return err + } + } else { + err = vlanService.Id(*i.Id).Bypass() + if err != nil { + return err + } + } + _, err = waitForNetworkGatewayActiveState(*i.NetworkGatewayId, meta) + if err != nil { + return err + } + } + vlan, err := vlanService.Id(*i.Id).GetObject() + if err != nil { + return fmt.Errorf("Error trying to retrieve Network Gateway Vlan: %s", err) + } + d.SetId(fmt.Sprintf("%d", *vlan.Id)) + d.Set("bypass", vlan.BypassFlag) + d.Set("gateway_id", vlan.NetworkGatewayId) + d.Set("network_vlan_id", vlan.NetworkVlanId) + return nil + } + } + } + + vlan := datatypes.Network_Gateway_Vlan{ + NetworkGatewayId: sl.Int(gatewayID), + BypassFlag: sl.Bool(bypass), + NetworkVlanId: sl.Int(networkVlanID), + } + + resp, err := resourceIBMNetworkGatewayVlanAssociate(d, meta, vlan) + if err != nil { + return err + } + d.SetId(fmt.Sprintf("%d", *resp.Id)) + _, err = waitForNetworkGatewayActiveState(gatewayID, meta) + if err != nil { + return err + } + return resourceIBMNetworkGatewayVlanAttachmentRead(d, meta) +} + +func resourceIBMNetworkGatewayVlanAttachmentRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + id, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + vlan, err := services.GetNetworkGatewayVlanService(sess).Id(id).GetObject() + if err != nil { + return fmt.Errorf("Error trying to retrieve Network Gateway Vlan: %s", err) + } + d.Set("gateway_id", vlan.NetworkGatewayId) + d.Set("network_vlan_id", vlan.NetworkVlanId) + d.Set("bypass", vlan.BypassFlag) + return nil +} + +func resourceIBMNetworkGatewayVlanAttachmentUpdate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkGatewayVlanService(sess) + id, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + if d.HasChange("bypass") { + bypass := d.Get("bypass").(bool) + var err error + if !bypass { + err = service.Id(id).Unbypass() + if err != nil { + return err + } + } else { + err = service.Id(id).Bypass() + if err != nil { + return err + } + } + vlan, err := service.Id(id).GetObject() + _, err = waitForNetworkGatewayActiveState(*vlan.NetworkGatewayId, meta) + if err != nil { + return err + } + } + + return resourceIBMNetworkGatewayVlanAttachmentRead(d, meta) +} + +func resourceIBMNetworkGatewayVlanAttachmentExists(d *schema.ResourceData, meta interface{}) (bool, error) { + service := services.GetNetworkGatewayVlanService(meta.(ClientSession).SoftLayerSession()) + id, err := strconv.Atoi(d.Id()) + if err != nil { + return false, fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + result, err := service.Id(id).GetObject() + if err != nil { + if apiErr, ok := err.(sl.Error); !ok || apiErr.StatusCode != 404 { + return false, fmt.Errorf("Error trying to retrieve Network Gateway Vlan: %s", err) + } + } + return result.Id != nil && *result.Id == id, nil +} + +func resourceIBMNetworkGatewayVlanAttachmentDelete(d *schema.ResourceData, meta interface{}) error { + + id, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + vlan, err := services.GetNetworkGatewayVlanService(meta.(ClientSession).SoftLayerSession()).Id(id).GetObject() + + err = resourceIBMNetworkGatewayVlanDissociate(d, meta) + if err != nil { + return err + } + + _, err = waitForNetworkGatewayActiveState(*vlan.NetworkGatewayId, meta) + if err != nil { + return err + } + + return nil + +} + +func resourceIBMNetworkGatewayVlanAssociate(d *schema.ResourceData, meta interface{}, vlan datatypes.Network_Gateway_Vlan) (resp datatypes.Network_Gateway_Vlan, err error) { + sess := meta.(ClientSession).SoftLayerSession() + + resp, err = services.GetNetworkGatewayVlanService(sess).CreateObject(&vlan) + if err != nil { + return resp, fmt.Errorf( + "Encountered problem trying to associate the VLAN : %s", err) + } + return resp, nil +} + +func resourceIBMNetworkGatewayVlanDissociate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + id, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + err = services.GetNetworkGatewayVlanService(sess).Id(id).DeleteObject() + if err != nil { + return fmt.Errorf( + "Encountered problem trying to dissociate the VLAN : %s", err) + } + return nil +} + +func waitForNetworkGatewayActiveState(id int, meta interface{}) (interface{}, error) { + log.Printf("Waiting for Gateway (%d) to be active", id) + service := services.GetNetworkGatewayService(meta.(ClientSession).SoftLayerSession()) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"updating"}, + Target: []string{"Active"}, + Refresh: func() (interface{}, string, error) { + gw, err := service.Id(id).GetStatus() + if err != nil { + return false, "updating", nil + } + + if gw.Name != nil && *gw.Name == "Active" { + log.Printf("Gateway (%d) is active", id) + return gw, "Active", nil + } + log.Printf("Gateway (%d) status is %s", id, *gw.Name) + return gw, "updating", nil + + }, + Timeout: 24 * time.Hour, + Delay: 10 * time.Second, + MinTimeout: 1 * time.Minute, + NotFoundChecks: 24 * 60, + } + + return stateConf.WaitForState() +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_network_interface_sg_attachment.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_network_interface_sg_attachment.go new file mode 100644 index 00000000000..f9465d972f7 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_network_interface_sg_attachment.go @@ -0,0 +1,247 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "strconv" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/softlayer/softlayer-go/services" + slsession "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +func resourceIBMNetworkInterfaceSGAttachment() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMNetworkInterfaceSGAttachmentCreate, + Read: resourceIBMNetworkInterfaceSGAttachmentRead, + Delete: resourceIBMNetworkInterfaceSGAttachmentDelete, + Exists: resourceIBMNetworkInterfaceSGAttachmentExists, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(15 * time.Minute), + }, + Schema: map[string]*schema.Schema{ + "security_group_id": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: "Security group ID", + }, + "network_interface_id": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: "Network interface ID", + }, + "soft_reboot": { + Type: schema.TypeBool, + Optional: true, + Default: true, + ForceNew: true, + Description: "Boolean value set to true, if soft reboot needs to be done.", + }, + }, + } +} + +func resourceIBMNetworkInterfaceSGAttachmentCreate(d *schema.ResourceData, meta interface{}) error { + mk := "network_interface_sg_attachment_" + strconv.Itoa(d.Get("network_interface_id").(int)) + ibmMutexKV.Lock(mk) + defer ibmMutexKV.Unlock(mk) + + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkSecurityGroupService(sess) + ncs := services.GetVirtualGuestNetworkComponentService(sess) + + sgID := d.Get("security_group_id").(int) + interfaceID := d.Get("network_interface_id").(int) + _, err := WaitForVSAvailable(d, meta, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return err + } + + _, err = service.Id(sgID).AttachNetworkComponents([]int{interfaceID}) + if err != nil { + return err + } + d.SetId(fmt.Sprintf("%d_%d", sgID, interfaceID)) + + // If user has not explicity disabled soft reboot + if ok := d.Get("soft_reboot").(bool); ok { + //Check if a soft reboot is required and perform it + ready, err := ncs.Id(interfaceID).SecurityGroupsReady() + if err != nil { + return err + } + if !ready { + log.Println("Soft reboot the VSI whose network component is", interfaceID) + } + guest, err := ncs.Id(interfaceID).GetGuest() + if err != nil { + return fmt.Errorf("Couldn't retrieve the virtual guest on interface %d", interfaceID) + } + guestService := services.GetVirtualGuestService(sess) + ok, err := guestService.Id(*guest.Id).RebootSoft() + if err != nil { + return err + } + if !ok { + return fmt.Errorf("Couldn't reboot the VSI %d", *guest.Id) + } + //Wait for security group to be ready again after reboot + stateConf := &resource.StateChangeConf{ + Target: []string{"true"}, + Pending: []string{"false"}, + Timeout: d.Timeout(schema.TimeoutCreate), + Refresh: securityGroupReadyRefreshStateFunc(sess, interfaceID), + } + _, err = stateConf.WaitForState() + if err != nil { + return err + } + } + + return resourceIBMNetworkInterfaceSGAttachmentRead(d, meta) +} + +func resourceIBMNetworkInterfaceSGAttachmentRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkSecurityGroupService(sess) + sgID, interfaceID, err := decomposeNetworkSGAttachmentID(d.Id()) + if err != nil { + return err + } + bindings, err := service.Id(sgID).GetNetworkComponentBindings() + if err != nil { + return err + } + for _, b := range bindings { + if *b.NetworkComponentId == interfaceID { + return nil + } + } + return fmt.Errorf("No association found between security group %d and network interface %d", sgID, interfaceID) +} + +func resourceIBMNetworkInterfaceSGAttachmentDelete(d *schema.ResourceData, meta interface{}) error { + mk := "network_interface_sg_attachment_" + strconv.Itoa(d.Get("network_interface_id").(int)) + ibmMutexKV.Lock(mk) + defer ibmMutexKV.Unlock(mk) + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkSecurityGroupService(sess) + sgID, interfaceID, err := decomposeNetworkSGAttachmentID(d.Id()) + if err != nil { + return err + } + _, err = service.Id(sgID).DetachNetworkComponents([]int{interfaceID}) + if err != nil { + return fmt.Errorf("Error detaching network components from Security Group: %s", err) + } + d.SetId("") + return nil +} + +func resourceIBMNetworkInterfaceSGAttachmentExists(d *schema.ResourceData, meta interface{}) (bool, error) { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkSecurityGroupService(sess) + + sgID, interfaceID, err := decomposeNetworkSGAttachmentID(d.Id()) + if err != nil { + return false, err + } + + bindings, err := service.Id(sgID).GetNetworkComponentBindings() + if err != nil { + if apiErr, ok := err.(sl.Error); ok { + if apiErr.StatusCode == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s", err) + } + for _, b := range bindings { + if *b.NetworkComponentId == interfaceID { + return true, nil + } + } + return false, fmt.Errorf("No association found between security group %d and network interface %d", sgID, interfaceID) +} + +func decomposeNetworkSGAttachmentID(attachmentID string) (sgID, interfaceID int, err error) { + ids := strings.Split(attachmentID, "_") + if len(ids) != 2 { + return -1, -1, fmt.Errorf("The ibm_network_interface_sg_attachment id must be of the form _ but it is %s", attachmentID) + } + sgID, err = strconv.Atoi(ids[0]) + if err != nil { + return -1, -1, fmt.Errorf("Not a valid security group ID, must be an integer: %s", err) + } + + interfaceID, err = strconv.Atoi(ids[1]) + if err != nil { + return -1, -1, fmt.Errorf("Not a valid network interface ID, must be an integer: %s", err) + } + return +} + +func securityGroupReadyRefreshStateFunc(sess *slsession.Session, ifcID int) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + ncs := services.GetVirtualGuestNetworkComponentService(sess) + ready, err := ncs.Id(ifcID).SecurityGroupsReady() + if err != nil { + return ready, "false", err + } + log.Printf("SecurityGroupReady status is %t", ready) + return ready, strconv.FormatBool(ready), nil + } +} + +// WaitForVirtualGuestAvailable Waits for virtual guest creation +func WaitForVSAvailable(d *schema.ResourceData, meta interface{}, timeout time.Duration) (interface{}, error) { + interfaceID := d.Get("network_interface_id").(int) + log.Printf("Waiting for server (%d) to be available.", interfaceID) + sess := meta.(ClientSession).SoftLayerSession() + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", virtualGuestProvisioning}, + Target: []string{virtualGuestAvailable}, + Refresh: vsReadyRefreshStateFunc(sess, interfaceID), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func vsReadyRefreshStateFunc(sess *slsession.Session, ifcID int) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + ncs := services.GetVirtualGuestNetworkComponentService(sess) + guest, err := ncs.Id(ifcID).GetGuest() + if err != nil { + if apiErr, ok := err.(sl.Error); ok && apiErr.StatusCode == 404 { + return nil, "", fmt.Errorf("Error retrieving virtual guest: %s", err) + } + return false, "retry", nil + } + guestService := services.GetVirtualGuestService(sess) + ready, err := guestService.Id(*guest.Id).GetStatus() + if err != nil { + if apiErr, ok := err.(sl.Error); ok && apiErr.StatusCode == 404 { + return nil, "", fmt.Errorf("Error retrieving virtual guest: %s", err) + } + return false, "retry", nil + } + if *ready.KeyName == "ACTIVE" { + readyStatus := *ready.Name + log.Printf("virtual guest status is %q", readyStatus) + return ready, virtualGuestAvailable, nil + } + return ready, virtualGuestProvisioning, nil + } +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_network_public_ip.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_network_public_ip.go new file mode 100644 index 00000000000..74dff492d82 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_network_public_ip.go @@ -0,0 +1,360 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "net" + "strconv" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/filter" + "github.com/softlayer/softlayer-go/helpers/product" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +const ( + AdditionalServicesGlobalIpAddressesPackageType = "ADDITIONAL_SERVICES_GLOBAL_IP_ADDRESSES" + + GlobalIpMask = "id,ipAddress[ipAddress,id,note],destinationIpAddress[ipAddress]" +) + +func resourceIBMNetworkPublicIp() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMNetworkPublicIpCreate, + Read: resourceIBMNetworkPublicIpRead, + Update: resourceIBMNetworkPublicIpUpdate, + Delete: resourceIBMNetworkPublicIpDelete, + Exists: resourceIBMNetworkPublicIpExists, + Importer: &schema.ResourceImporter{}, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(30 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "ip_address": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "IP Address", + }, + + "routes_to": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + address := v.(string) + if net.ParseIP(address) == nil { + errors = append(errors, fmt.Errorf("Invalid IP format: %s", address)) + } + return + }, + DiffSuppressFunc: func(k, o, n string, d *schema.ResourceData) bool { + newRoutesTo := net.ParseIP(n) + // Return true when n has the appropriate IPv6 format and + // the compressed value of n equals the compressed value of o. + return newRoutesTo != nil && (newRoutesTo.String() == net.ParseIP(o).String()) + }, + Description: "Route info", + }, + "tags": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: "List of tags", + }, + "notes": { + Type: schema.TypeString, + Optional: true, + Description: "Additional notes", + }, + }, + } +} + +func resourceIBMNetworkPublicIpCreate(d *schema.ResourceData, meta interface{}) error { + + sess := meta.(ClientSession).SoftLayerSession() + + // Find price items with AdditionalServicesGlobalIpAddresses + productOrderContainer, err := buildGlobalIpProductOrderContainer(d, sess, AdditionalServicesGlobalIpAddressesPackageType) + if err != nil { + // Find price items with AdditionalServices + productOrderContainer, err = buildGlobalIpProductOrderContainer(d, sess, AdditionalServicesPackageType) + if err != nil { + return fmt.Errorf("Error creating network public ip: %s", err) + } + } + + log.Println("[INFO] Creating network public ip") + + receipt, err := services.GetProductOrderService(sess.SetRetries(0)). + PlaceOrder(productOrderContainer, sl.Bool(false)) + if err != nil { + return fmt.Errorf("Error during creation of network public ip: %s", err) + } + + globalIp, err := findGlobalIpByOrderId(sess, *receipt.OrderId, d) + if err != nil { + return fmt.Errorf("Error during creation of network public ip: %s", err) + } + + d.SetId(fmt.Sprintf("%d", *globalIp.Id)) + d.Set("ip_address", *globalIp.IpAddress.IpAddress) + + return resourceIBMNetworkPublicIpUpdate(d, meta) +} + +func resourceIBMNetworkPublicIpRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkSubnetIpAddressGlobalService(sess) + + globalIpId, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Not a valid network public ip ID, must be an integer: %s", err) + } + + globalIp, err := service.Id(globalIpId).Mask(GlobalIpMask).GetObject() + if err != nil { + return fmt.Errorf("Error retrieving network public Ip: %s", err) + } + + d.Set("ip_address", *globalIp.IpAddress.IpAddress) + if globalIp.DestinationIpAddress != nil { + d.Set("routes_to", *globalIp.DestinationIpAddress.IpAddress) + } + if globalIp.IpAddress.Note != nil { + d.Set("notes", *globalIp.IpAddress.Note) + } + return nil +} + +func resourceIBMNetworkPublicIpUpdate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkSubnetIpAddressGlobalService(sess) + + globalIpId, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Not a valid network public ip ID, must be an integer: %s", err) + } + + routes_to := d.Get("routes_to").(string) + if strings.Contains(routes_to, ":") && len(routes_to) != 39 { + parts := strings.Split(routes_to, ":") + for x, s := range parts { + if s == "" { + zeroes := 9 - len(parts) + parts[x] = strings.Repeat("0000:", zeroes)[:(zeroes*4)+(zeroes-1)] + } else { + parts[x] = fmt.Sprintf("%04s", s) + } + } + + routes_to = strings.Join(parts, ":") + d.Set("routes_to", routes_to) + } + + _, err = service.Id(globalIpId).Route(sl.String(routes_to)) + if err != nil { + return fmt.Errorf("Error editing network public Ip: %s", err) + } + // Update notes + if d.HasChange("notes") { + publicIp, err := service.Id(globalIpId).Mask(GlobalIpMask).GetObject() + if err != nil { + return fmt.Errorf("Error updating network public Ip: %s", err) + } + err = updatePublicIPNotes(d, sess, publicIp) + if err != nil { + return fmt.Errorf("Error editing network public Ip: %s", err) + } + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"pending"}, + Target: []string{"complete"}, + Refresh: func() (interface{}, string, error) { + transaction, err := service.Id(globalIpId).GetActiveTransaction() + if err != nil { + return datatypes.Network_Subnet_IpAddress_Global{}, "pending", err + } + if transaction.Id == nil { + return datatypes.Network_Subnet_IpAddress_Global{}, "complete", nil + } + return datatypes.Network_Subnet_IpAddress_Global{}, "pending", nil + }, + Timeout: 10 * time.Minute, + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + pendingResult, err := stateConf.WaitForState() + + if err != nil { + return fmt.Errorf("Error waiting for network public ip destination ip address to become active: %s", err) + } + + if _, ok := pendingResult.(datatypes.Network_Subnet_IpAddress_Global); ok { + return nil + } + + return nil +} + +func resourceIBMNetworkPublicIpDelete(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkSubnetIpAddressGlobalService(sess) + + globalIpId, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Not a valid network public ip ID, must be an integer: %s", err) + } + + billingItem, err := service.Id(globalIpId).GetBillingItem() + if err != nil { + return fmt.Errorf("Error deleting network public ip: %s", err) + } + + if billingItem.Id == nil { + return nil + } + + _, err = services.GetBillingItemService(sess).Id(*billingItem.Id).CancelService() + + return err +} + +func resourceIBMNetworkPublicIpExists(d *schema.ResourceData, meta interface{}) (bool, error) { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkSubnetIpAddressGlobalService(sess) + + globalIpId, err := strconv.Atoi(d.Id()) + if err != nil { + return false, fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + result, err := service.Id(globalIpId).GetObject() + if err != nil { + if apiErr, ok := err.(sl.Error); ok && apiErr.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error retrieving network public ip: %s", err) + } + return result.Id != nil && *result.Id == globalIpId, nil +} + +func findGlobalIpByOrderId(sess *session.Session, orderId int, d *schema.ResourceData) (datatypes.Network_Subnet_IpAddress_Global, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{"pending"}, + Target: []string{"complete"}, + Refresh: func() (interface{}, string, error) { + globalIps, err := services.GetAccountService(sess). + Filter(filter.Path("globalIpRecords.billingItem.orderItem.order.id"). + Eq(strconv.Itoa(orderId)).Build()). + Mask("id,ipAddress[ipAddress]"). + GetGlobalIpRecords() + if err != nil { + return datatypes.Network_Subnet_IpAddress_Global{}, "", err + } + + if len(globalIps) == 1 && globalIps[0].IpAddress != nil { + return globalIps[0], "complete", nil + } else if len(globalIps) == 0 || len(globalIps) == 1 { + return datatypes.Network_Subnet_IpAddress_Global{}, "pending", nil + } else { + return nil, "", fmt.Errorf("Expected one network public ip: %s", err) + } + }, + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + NotFoundChecks: 24 * 60, + } + + pendingResult, err := stateConf.WaitForState() + + if err != nil { + return datatypes.Network_Subnet_IpAddress_Global{}, err + } + + if result, ok := pendingResult.(datatypes.Network_Subnet_IpAddress_Global); ok { + return result, nil + } + + return datatypes.Network_Subnet_IpAddress_Global{}, + fmt.Errorf("Cannot find network public ip with order id '%d'", orderId) +} + +func buildGlobalIpProductOrderContainer(d *schema.ResourceData, sess *session.Session, packageType string) ( + *datatypes.Container_Product_Order_Network_Subnet, error) { + + // 1. Get a package + pkg, err := product.GetPackageByType(sess, packageType) + if err != nil { + return &datatypes.Container_Product_Order_Network_Subnet{}, err + } + + // 2. Get all prices for the package + productItems, err := product.GetPackageProducts(sess, *pkg.Id) + if err != nil { + return &datatypes.Container_Product_Order_Network_Subnet{}, err + } + + // 3. Find global ip prices + // the following looks for only IPV4 Global Ips only + globalIpKeyname := "GLOBAL_IPV4" + if strings.Contains(d.Get("routes_to").(string), ":") { + globalIpKeyname = "GLOBAL_IPV6" + } + + // 4. Select items with a matching keyname + globalIpItems := []datatypes.Product_Item{} + for _, item := range productItems { + if *item.KeyName == globalIpKeyname { + globalIpItems = append(globalIpItems, item) + } + } + + if len(globalIpItems) == 0 { + return &datatypes.Container_Product_Order_Network_Subnet{}, + fmt.Errorf("No product items matching %s could be found", globalIpKeyname) + } + + productOrderContainer := datatypes.Container_Product_Order_Network_Subnet{ + Container_Product_Order: datatypes.Container_Product_Order{ + PackageId: pkg.Id, + Prices: []datatypes.Product_Item_Price{ + { + Id: globalIpItems[0].Prices[0].Id, + }, + }, + Quantity: sl.Int(1), + }, + } + + return &productOrderContainer, nil +} + +func updatePublicIPNotes(d *schema.ResourceData, sess *session.Session, publicIP datatypes.Network_Subnet_IpAddress_Global) error { + id := *publicIP.IpAddress.Id + notes := d.Get("notes").(string) + + if (publicIP.IpAddress.Note != nil && *publicIP.IpAddress.Note != notes) || (publicIP.IpAddress.Note == nil && notes != "") { + _, err := services.GetNetworkSubnetIpAddressService(sess). + Id(id). + EditObject(&datatypes.Network_Subnet_IpAddress{Note: sl.String(notes)}) + if err != nil { + return fmt.Errorf("Error adding note to network public IP (%d): %s", id, err) + } + } + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_network_vlan.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_network_vlan.go new file mode 100644 index 00000000000..59619811df2 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_network_vlan.go @@ -0,0 +1,527 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "errors" + "fmt" + "log" + "regexp" + "strconv" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/filter" + "github.com/softlayer/softlayer-go/helpers/hardware" + "github.com/softlayer/softlayer-go/helpers/location" + "github.com/softlayer/softlayer-go/helpers/product" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +const ( + AdditionalServicesPackageType = "ADDITIONAL_SERVICES" + AdditionalServicesNetworkVlanPackageType = "ADDITIONAL_SERVICES_NETWORK_VLAN" + + VlanMask = "id,name,primaryRouter[datacenter[name]],primaryRouter[hostname],vlanNumber," + + "billingItem[recurringFee],guestNetworkComponentCount,subnets[networkIdentifier,cidr,subnetType],tagReferences[id,tag[name]]" +) + +func resourceIBMNetworkVlan() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMNetworkVlanCreate, + Read: resourceIBMNetworkVlanRead, + Update: resourceIBMNetworkVlanUpdate, + Delete: resourceIBMNetworkVlanDelete, + Exists: resourceIBMNetworkVlanExists, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + + "datacenter": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Datacenter name", + }, + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errs []error) { + vlanType := v.(string) + if vlanType != "PRIVATE" && vlanType != "PUBLIC" { + errs = append(errs, errors.New( + "vlan type should be either 'PRIVATE' or 'PUBLIC'")) + } + return + }, + Description: "VLAN type", + }, + + "name": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateVLANName, + Description: "VLAN name", + }, + + "router_hostname": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: "router host name", + }, + + "vlan_number": { + Type: schema.TypeInt, + Computed: true, + Description: "VLAN number", + }, + "softlayer_managed": { + Type: schema.TypeBool, + Computed: true, + Description: "Zzset to true if VLAN is managed by softlayer", + }, + "child_resource_count": { + Type: schema.TypeInt, + Computed: true, + Description: "Child resource count", + }, + "subnets": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "subnet": { + Type: schema.TypeString, + Computed: true, + }, + "subnet_type": { + Type: schema.TypeString, + Computed: true, + }, + "subnet_size": { + Type: schema.TypeInt, + Computed: true, + }, + "gateway": { + Type: schema.TypeString, + Computed: true, + }, + "cidr": { + Type: schema.TypeInt, + Computed: true, + }, + }, + }, + }, + "tags": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: "List of tags", + }, + ResourceControllerURL: { + Type: schema.TypeString, + Computed: true, + Description: "The URL of the IBM Cloud dashboard that can be used to explore and view details about this instance", + }, + ResourceName: { + Type: schema.TypeString, + Computed: true, + Description: "The name of the resource", + }, + }, + } +} + +func resourceIBMNetworkVlanCreate(d *schema.ResourceData, meta interface{}) error { + + sess := meta.(ClientSession).SoftLayerSession() + router := d.Get("router_hostname").(string) + name := d.Get("name").(string) + + vlanType := d.Get("type").(string) + if (vlanType == "PRIVATE" && len(router) > 0 && strings.Contains(router, "fcr")) || + (vlanType == "PUBLIC" && len(router) > 0 && strings.Contains(router, "bcr")) { + return fmt.Errorf("Error creating vlan: mismatch between vlan_type '%s' and router_hostname '%s'", vlanType, router) + } + + // Find price items with AdditionalServicesNetworkVlan + productOrderContainer, err := buildVlanProductOrderContainer(d, sess, AdditionalServicesNetworkVlanPackageType) + if err != nil { + // Find price items with AdditionalServices + productOrderContainer, err = buildVlanProductOrderContainer(d, sess, AdditionalServicesPackageType) + if err != nil { + return fmt.Errorf("Error creating vlan: %s", err) + } + } + + log.Println("[INFO] Creating vlan") + + receipt, err := services.GetProductOrderService(sess.SetRetries(0)). + PlaceOrder(productOrderContainer, sl.Bool(false)) + if err != nil { + return fmt.Errorf("Error during creation of vlan: %s", err) + } + + vlan, err := findVlanByOrderId(sess, *receipt.OrderId, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf("Error finding VLAN order %d: %s", *receipt.OrderId, err) + } + + if len(name) > 0 { + _, err = services.GetNetworkVlanService(sess). + Id(*vlan.Id).EditObject(&datatypes.Network_Vlan{Name: sl.String(name)}) + if err != nil { + return fmt.Errorf("Error updating vlan: %s", err) + } + } + + d.SetId(fmt.Sprintf("%d", *vlan.Id)) + + id := *vlan.Id + // Set tags + tags := getTags(d) + if tags != "" { + //Try setting only when it is non empty as we are creating vlan + err = setVlanTags(id, tags, meta) + if err != nil { + return err + } + } + return resourceIBMNetworkVlanRead(d, meta) +} + +func resourceIBMNetworkVlanRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkVlanService(sess) + + vlanId, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Not a valid vlan ID, must be an integer: %s", err) + } + + vlan, err := service.Id(vlanId).Mask(VlanMask).GetObject() + + if err != nil { + return fmt.Errorf("Error retrieving vlan: %s", err) + } + + d.Set("vlan_number", *vlan.VlanNumber) + d.Set("child_resource_count", *vlan.GuestNetworkComponentCount) + d.Set("name", sl.Get(vlan.Name, "")) + + if vlan.PrimaryRouter != nil { + d.Set("router_hostname", *vlan.PrimaryRouter.Hostname) + if strings.HasPrefix(*vlan.PrimaryRouter.Hostname, "fcr") { + d.Set("type", "PUBLIC") + } else { + d.Set("type", "PRIVATE") + } + if vlan.PrimaryRouter.Datacenter != nil { + d.Set("datacenter", *vlan.PrimaryRouter.Datacenter.Name) + } + } + + d.Set("softlayer_managed", vlan.BillingItem == nil) + + // Subnets + subnets := make([]map[string]interface{}, 0) + primarySubnets := make([]map[string]interface{}, 0) + validPrimaryType := regexp.MustCompile(`.*PRIMARY.*`) + + for _, elem := range vlan.Subnets { + subnet := make(map[string]interface{}) + if validPrimaryType.MatchString(*elem.SubnetType) { + primarySubnet := map[string]interface{}{ + "subnet": fmt.Sprintf("%s/%d", *elem.NetworkIdentifier, *elem.Cidr), + "subnet_type": *elem.SubnetType, + "subnet_size": 1 << uint(32-*elem.Cidr), + "cidr": *elem.Cidr, + } + if elem.Gateway != nil { + primarySubnet["gateway"] = *elem.Gateway + } + primarySubnets = append(primarySubnets, primarySubnet) + } + subnet["subnet"] = fmt.Sprintf("%s/%s", *elem.NetworkIdentifier, strconv.Itoa(*elem.Cidr)) + subnet["subnet_type"] = *elem.SubnetType + subnet["subnet_size"] = 1 << (uint)(32-*elem.Cidr) + subnet["cidr"] = *elem.Cidr + if elem.Gateway != nil { + subnet["gateway"] = *elem.Gateway + } + subnets = append(subnets, subnet) + } + d.Set("subnets", subnets) + + tagRefs := vlan.TagReferences + tagRefsLen := len(tagRefs) + if tagRefsLen > 0 { + tags := make([]string, tagRefsLen, tagRefsLen) + for i, tagRef := range tagRefs { + tags[i] = *tagRef.Tag.Name + } + d.Set("tags", tags) + } + d.Set(ResourceControllerURL, fmt.Sprintf("https://cloud.ibm.com/classic/network/vlans/%s", d.Id())) + d.Set(ResourceName, sl.Get(vlan.Name, "")) + + return nil +} + +func resourceIBMNetworkVlanUpdate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkVlanService(sess) + + vlanId, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Not a valid vlan ID, must be an integer: %s", err) + } + + opts := datatypes.Network_Vlan{} + + isChanged := false + + if d.HasChange("name") { + opts.Name = sl.String(d.Get("name").(string)) + isChanged = true + } + + // Update tags + if d.HasChange("tags") { + tags := getTags(d) + err := setVlanTags(vlanId, tags, meta) + if err != nil { + return err + } + } + + if isChanged { + _, err = service.Id(vlanId).EditObject(&opts) + + if err != nil { + return fmt.Errorf("Error updating vlan: %s", err) + } + } + + return resourceIBMNetworkVlanRead(d, meta) +} + +func resourceIBMNetworkVlanDelete(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkVlanService(sess) + + vlanId, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Not a valid vlan ID, must be an integer: %s", err) + } + + const ( + noVms = "There are no vms on the VLAN" + vmsStillOnVlan = "VMs are still present on the VLAN" + ) + + //Wait till all the VMs are disconnected before trying to delete + stateConf := &resource.StateChangeConf{ + Target: []string{noVms}, + Pending: []string{vmsStillOnVlan}, + Timeout: d.Timeout(schema.TimeoutDelete), + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, // Wait 30 secs before starting + Refresh: func() (interface{}, string, error) { + vms, err := service.Id(vlanId).GetVirtualGuests() + if err != nil { + log.Printf("[ERROR] Received error while fetching virtual guests on VLAN to see if VLAN can be cancelled now: %#v", err) + return vms, "Error", err + } + if len(vms) != 0 { + return vms, vmsStillOnVlan, nil + } + return vms, noVms, nil + }, + } + _, err = stateConf.WaitForState() + if err != nil { + return err + } + + billingItem, err := service.Id(vlanId).GetBillingItem() + if err != nil { + return fmt.Errorf("Error deleting vlan: %s", err) + } + + // VLANs which don't have billing items are managed by SoftLayer. They can't be deleted by + // users. If a target VLAN doesn't have a billing item, the function will return nil without + // errors and only VLAN resource information in a terraform state file will be deleted. + // Physical VLAN will be deleted automatically which the VLAN doesn't have any child resources. + if billingItem.Id == nil { + return nil + } + + // If the VLAN has a billing item, the function deletes the billing item and returns so that + // the VLAN resource in a terraform state file can be deleted. Physical VLAN will be deleted + // automatically which the VLAN doesn't have any child resources. + _, err = services.GetBillingItemService(sess).Id(*billingItem.Id).CancelService() + + return err +} + +func resourceIBMNetworkVlanExists(d *schema.ResourceData, meta interface{}) (bool, error) { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkVlanService(sess) + + vlanID, err := strconv.Atoi(d.Id()) + if err != nil { + return false, fmt.Errorf("Not a valid vlan ID, must be an integer: %s", err) + } + + result, err := service.Id(vlanID).Mask("id").GetObject() + if err != nil { + if apiErr, ok := err.(sl.Error); ok { + if apiErr.StatusCode == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s", err) + } + return result.Id != nil && *result.Id == vlanID, nil +} + +func findVlanByOrderId(sess *session.Session, orderId int, timeout time.Duration) (datatypes.Network_Vlan, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{"pending"}, + Target: []string{"complete"}, + Refresh: func() (interface{}, string, error) { + vlans, err := services.GetAccountService(sess). + Filter(filter.Path("networkVlans.billingItem.orderItem.order.id"). + Eq(strconv.Itoa(orderId)).Build()). + Mask("id"). + GetNetworkVlans() + if err != nil { + return datatypes.Network_Vlan{}, "", err + } + + if len(vlans) == 1 { + return vlans[0], "complete", nil + } else if len(vlans) == 0 { + return []datatypes.Network_Vlan{}, "pending", nil + } else { + return nil, "", fmt.Errorf("Expected one vlan: %s", err) + } + }, + Timeout: timeout, + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + NotFoundChecks: 300, + } + + pendingResult, err := stateConf.WaitForState() + + if err != nil { + return datatypes.Network_Vlan{}, err + } + + var result, ok = pendingResult.(datatypes.Network_Vlan) + + if ok { + return result, nil + } + + return datatypes.Network_Vlan{}, + fmt.Errorf("Cannot find vlan with order id '%d'", orderId) +} + +func buildVlanProductOrderContainer(d *schema.ResourceData, sess *session.Session, packageType string) ( + *datatypes.Container_Product_Order_Network_Vlan, error) { + var rt datatypes.Hardware + router := d.Get("router_hostname").(string) + + vlanType := d.Get("type").(string) + datacenter := d.Get("datacenter").(string) + + if datacenter == "" { + return &datatypes.Container_Product_Order_Network_Vlan{}, + errors.New("datacenter name is empty.") + } + + dc, err := location.GetDatacenterByName(sess, datacenter, "id") + if err != nil { + return &datatypes.Container_Product_Order_Network_Vlan{}, err + } + + // 1. Get a package + pkg, err := product.GetPackageByType(sess, packageType) + if err != nil { + return &datatypes.Container_Product_Order_Network_Vlan{}, err + } + + // 2. Get all prices for the package + productItems, err := product.GetPackageProducts(sess, *pkg.Id) + if err != nil { + return &datatypes.Container_Product_Order_Network_Vlan{}, err + } + + // 3. Find vlan and subnet prices + vlanKeyname := vlanType + "_NETWORK_VLAN" + + // 4. Select items with a matching keyname + vlanItems := []datatypes.Product_Item{} + for _, item := range productItems { + if *item.KeyName == vlanKeyname { + vlanItems = append(vlanItems, item) + } + } + + if len(vlanItems) == 0 { + return &datatypes.Container_Product_Order_Network_Vlan{}, + fmt.Errorf("No product items matching %s could be found", vlanKeyname) + } + + productOrderContainer := datatypes.Container_Product_Order_Network_Vlan{ + Container_Product_Order: datatypes.Container_Product_Order{ + PackageId: pkg.Id, + Location: sl.String(strconv.Itoa(*dc.Id)), + Prices: []datatypes.Product_Item_Price{ + { + Id: vlanItems[0].Prices[0].Id, + }, + }, + Quantity: sl.Int(1), + }, + } + + if len(router) > 0 { + rt, err = hardware.GetRouterByName(sess, router, "id") + productOrderContainer.RouterId = rt.Id + if err != nil { + return &datatypes.Container_Product_Order_Network_Vlan{}, + fmt.Errorf("Error creating vlan: %s", err) + } + } + + return &productOrderContainer, nil +} + +func setVlanTags(id int, tags string, meta interface{}) error { + service := services.GetNetworkVlanService(meta.(ClientSession).SoftLayerSession()) + _, err := service.Id(id).SetTags(sl.String(tags)) + if err != nil { + return fmt.Errorf("Could not set tags on vlan %d", id) + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_network_vlan_spanning.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_network_vlan_spanning.go new file mode 100644 index 00000000000..94c47250ca1 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_network_vlan_spanning.go @@ -0,0 +1,101 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "math/rand" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/softlayer/softlayer-go/services" +) + +func resourceIBMNetworkVlanSpan() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMNetworkVlanSpanCreate, + Read: resourceIBMNetworkVlanSpanRead, + Update: resourceIBMNetworkVlanSpanUpdate, + Delete: resourceIBMNetworkVlanSpanDelete, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "vlan_spanning": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateAllowedStringValue([]string{"off", "on"}), + Description: "VLAN Spanning set to On or Off", + }, + }, + } +} + +func resourceIBMNetworkVlanSpanRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetAccountService(sess) + + vlanSpan, err := service.GetNetworkVlanSpan() + + if err != nil { + return fmt.Errorf("Error retrieving vlan: %s", err) + } + + if *vlanSpan.EnabledFlag == true { + d.Set("vlan_spanning", "on") + } else { + d.Set("vlan_spanning", "off") + } + + return nil +} + +func resourceIBMNetworkVlanSpanCreate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetAccountService(sess) + + rnd := rand.Intn(8999999) + 1000000 + vlanSpanning := d.Get("vlan_spanning").(string) + + var enabled bool + if vlanSpanning == "on" { + enabled = true + } else { + enabled = false + } + + _, err := service.SetVlanSpan(&enabled) + if err != nil { + return fmt.Errorf("Error settinging VLAN Spanning %s", err) + } + + d.SetId(strconv.Itoa(rnd)) + return resourceIBMNetworkVlanSpanRead(d, meta) +} + +func resourceIBMNetworkVlanSpanUpdate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetAccountService(sess) + vlanSpanning := d.Get("vlan_spanning").(string) + + var enabled bool + if vlanSpanning == "on" { + enabled = true + } else { + enabled = false + } + + _, err := service.SetVlanSpan(&enabled) + if err != nil { + return fmt.Errorf("Error settinging VLAN Spanning %s", err) + } + + return resourceIBMNetworkVlanSpanRead(d, meta) +} + +func resourceIBMNetworkVlanSpanDelete(d *schema.ResourceData, meta interface{}) error { + // Leave VLAN Spanning setting in current state + d.SetId("") + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_ob_logging.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_ob_logging.go new file mode 100644 index 00000000000..08e72c73ab5 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_ob_logging.go @@ -0,0 +1,343 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "strings" + "time" + + v2 "github.com/IBM-Cloud/bluemix-go/api/container/containerv2" + "github.com/IBM-Cloud/bluemix-go/bmxerror" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + obLoggingCluster = "cluster" + obLoggingInstanceID = "instance_id" + obLoggingInstanceName = "instance_name" + obLoggingIngestionkey = "logdna_ingestion_key" + obLoggingPrivateEndpoint = "private_endpoint" + obLoggingDaemonSetName = "daemonset_name" + obLoggingAgentKey = "agent_key" + obLoggingAgentNamespace = "agent_namespace" + obLoggingCrn = "crn" + obLoggingDiscoveredAgent = "discovered_agent" + obLoggingNamespace = "namespace" +) + +func resourceIBMObLogging() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMLoggingCreate, + Read: resourceIBMLoggingRead, + Update: resourceIBMLoggingUpdate, + Delete: resourceIBMLoggingDelete, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + obLoggingCluster: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Name or ID of the cluster to be used.", + }, + + obLoggingInstanceID: { + Type: schema.TypeString, + Required: true, + Description: "ID of the LogDNA service instance to latch", + }, + + obLoggingIngestionkey: { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "LogDNA ingestion key", + }, + + obLoggingPrivateEndpoint: { + Type: schema.TypeBool, + Optional: true, + Computed: true, + Description: "Add this option to connect to your LogDNA service instance through the private service endpoint", + }, + + obLoggingDaemonSetName: { + Type: schema.TypeString, + Computed: true, + Description: "Daemon Set Name", + }, + + obLoggingInstanceName: { + Type: schema.TypeString, + Computed: true, + Description: "LogDNA instance Name", + }, + + obLoggingAgentKey: { + Type: schema.TypeString, + Computed: true, + Description: "Agent key name", + }, + + obLoggingAgentNamespace: { + Type: schema.TypeString, + Computed: true, + Description: "Agent Namespace", + }, + + obLoggingCrn: { + Type: schema.TypeString, + Computed: true, + Description: "CRN", + }, + + obLoggingDiscoveredAgent: { + Type: schema.TypeBool, + Computed: true, + Description: "Discovered agent", + }, + + obLoggingNamespace: { + Type: schema.TypeString, + Computed: true, + Description: "Namespace", + }, + }, + } +} + +func resourceIBMLoggingCreate(d *schema.ResourceData, meta interface{}) error { + client, err := meta.(ClientSession).VpcContainerAPI() + if err != nil { + return err + } + + var ingestionkey string + var privateEndpoint bool + + //Read cluster ID and logging instanceID + clusterName := d.Get(obLoggingCluster).(string) + loggingInstanceID := d.Get(obLoggingInstanceID).(string) + + //Read Ingestionkey + if iKey, ok := d.GetOk(obLoggingIngestionkey); ok { + ingestionkey = iKey.(string) + } + + //Read private enpoint + if endPoint, ok := d.GetOk(obLoggingPrivateEndpoint); ok { + privateEndpoint = endPoint.(bool) + } + + //populate sysdig configure create request + params := v2.LoggingCreateRequest{ + Cluster: clusterName, + IngestionKey: ingestionkey, + LoggingInstance: loggingInstanceID, + PrivateEndpoint: privateEndpoint, + } + + targetEnv, err := getLoggingTargetHeader(d, meta) + if err != nil { + return err + } + + var logging v2.LoggingCreateResponse + err = resource.Retry(10*time.Minute, func() *resource.RetryError { + var err error + logging, err = client.Logging().CreateLoggingConfig(params, targetEnv) + if err != nil { + log.Printf("[DEBUG] logging Instance err %s", err) + if strings.Contains(err.Error(), "The user doesn't have enough privileges to perform this action") { + return resource.RetryableError(err) + } + return resource.NonRetryableError(err) + } + + return nil + }) + + if isResourceTimeoutError(err) { + logging, err = client.Logging().CreateLoggingConfig(params, targetEnv) + } + if err != nil { + return fmt.Errorf("error latching logging instance to cluster: %w", err) + } + + d.SetId(fmt.Sprintf("%s/%s", clusterName, logging.InstanceID)) + + return resourceIBMLoggingRead(d, meta) +} + +func getLoggingTargetHeader(d *schema.ResourceData, meta interface{}) (v2.LoggingTargetHeader, error) { + _, err := meta.(ClientSession).BluemixSession() + if err != nil { + return v2.LoggingTargetHeader{}, err + } + + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return v2.LoggingTargetHeader{}, err + } + accountID := userDetails.userAccount + + targetEnv := v2.LoggingTargetHeader{ + AccountID: accountID, + } + + return targetEnv, nil +} + +func resourceIBMLoggingRead(d *schema.ResourceData, meta interface{}) error { + + client, err := meta.(ClientSession).VpcContainerAPI() + if err != nil { + return err + } + + parts, err := idParts(d.Id()) + if err != nil { + return err + } + clusterName := parts[0] + loggingID := parts[1] + + targetEnv, err := getLoggingTargetHeader(d, meta) + if err != nil { + return err + } + + config, err := client.Logging().GetLoggingConfig(clusterName, loggingID, targetEnv) + if err != nil { + if apiErr, ok := err.(bmxerror.RequestFailure); ok { + if apiErr.StatusCode() == 404 { + d.SetId("") + return nil + } + } + return fmt.Errorf("Error in GetLoggingConfig: %s", err) + } + + d.Set(obLoggingPrivateEndpoint, config.PrivateEndpoint) + d.Set(obLoggingDaemonSetName, config.DaemonsetName) + d.Set(obLoggingInstanceName, config.InstanceName) + d.Set(obLoggingAgentKey, config.AgentKey) + d.Set(obLoggingAgentNamespace, config.AgentNamespace) + d.Set(obLoggingDiscoveredAgent, config.DiscoveredAgent) + d.Set(obLoggingCrn, config.CRN) + d.Set(obLoggingNamespace, config.Namespace) + + return nil + +} + +func resourceIBMLoggingUpdate(d *schema.ResourceData, meta interface{}) error { + + hasChanged := false + idChanged := false + + client, err := meta.(ClientSession).VpcContainerAPI() + if err != nil { + return err + } + + targetEnv, err := getLoggingTargetHeader(d, meta) + if err != nil { + return err + } + + loggingUpdateModel := v2.LoggingUpdateRequest{} + + parts, err := idParts(d.Id()) + if err != nil { + return err + } + cluster := parts[0] + loggingID := parts[1] + + clusterName := cluster + loggingUpdateModel.Cluster = clusterName + + if d.HasChange(obLoggingInstanceID) { + hasChanged = true + idChanged = true + old, new := d.GetChange(obLoggingInstanceID) + loggingUpdateModel.Instance = old.(string) + loggingUpdateModel.NewInstance = new.(string) + } else { + loggingUpdateModel.Instance = loggingID + } + + if d.HasChange(obLoggingIngestionkey) { + key := d.Get(obLoggingIngestionkey).(string) + loggingUpdateModel.IngestionKey = key + hasChanged = true + } + + if d.HasChange(obLoggingPrivateEndpoint) { + endpoint := d.Get(obLoggingPrivateEndpoint).(bool) + loggingUpdateModel.PrivateEndpoint = endpoint + hasChanged = true + } + + if hasChanged { + + _, err := client.Logging().UpdateLoggingConfig(loggingUpdateModel, targetEnv) + if err != nil { + return err + } else if idChanged { + d.SetId(fmt.Sprintf("%s/%s", clusterName, loggingUpdateModel.NewInstance)) + } + } + + return resourceIBMLoggingRead(d, meta) +} + +func resourceIBMLoggingDelete(d *schema.ResourceData, meta interface{}) error { + client, err := meta.(ClientSession).VpcContainerAPI() + if err != nil { + return err + } + + targetEnv, err := getLoggingTargetHeader(d, meta) + if err != nil { + return err + } + + parts, err := idParts(d.Id()) + if err != nil { + return err + } + clusterName := parts[0] + loggingID := parts[1] + + //populate logging logDNA configure create request + params := v2.LoggingDeleteRequest{ + Cluster: clusterName, + Instance: loggingID, + } + + _, err = client.Logging().DeleteLoggingConfig(params, targetEnv) + if err != nil { + if apiErr, ok := err.(bmxerror.RequestFailure); ok { + if apiErr.StatusCode() == 404 { + d.SetId("") + return nil + } + } + return fmt.Errorf("Error in DeleteLoggingConfig: %s", err) + } + d.SetId("") + return nil + +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_ob_monitoring.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_ob_monitoring.go new file mode 100644 index 00000000000..75bcb20181e --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_ob_monitoring.go @@ -0,0 +1,346 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "strings" + "time" + + v2 "github.com/IBM-Cloud/bluemix-go/api/container/containerv2" + "github.com/IBM-Cloud/bluemix-go/bmxerror" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + obMonitoringCluster = "cluster" + obMonitoringInstanceID = "instance_id" + obMonitoringInstanceName = "instance_name" + obMonitoringIngestionkey = "sysdig_access_key" + obMonitoringPrivateEndpoint = "private_endpoint" + obMonitoringDaemonSetName = "daemonset_name" + obMonitoringAgentKey = "agent_key" + obMonitoringAgentNamespace = "agent_namespace" + obMonitoringCrn = "crn" + obMonitoringDiscoveredAgent = "discovered_agent" + obMonitoringNamespace = "namespace" +) + +func resourceIBMObMonitoring() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMMonitoringCreate, + Read: resourceIBMMonitoringRead, + Update: resourceIBMMonitoringUpdate, + Delete: resourceIBMMonitoringDelete, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + obMonitoringCluster: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Name or ID of the cluster to be used.", + }, + + obMonitoringInstanceID: { + Type: schema.TypeString, + Required: true, + Description: "ID of the Sysdig service instance to latch", + }, + + obMonitoringIngestionkey: { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Sysdig ingestion key", + }, + + obMonitoringPrivateEndpoint: { + Type: schema.TypeBool, + Optional: true, + Computed: true, + Description: "Add this option to connect to your Sysdig service instance through the private service endpoint", + }, + + obMonitoringDaemonSetName: { + Type: schema.TypeString, + Computed: true, + Description: "Daemon Set Name", + }, + + obMonitoringInstanceName: { + Type: schema.TypeString, + Computed: true, + Description: "Sysdig instance Name", + }, + + obMonitoringAgentKey: { + Type: schema.TypeString, + Computed: true, + Description: "Agent key name", + }, + + obMonitoringAgentNamespace: { + Type: schema.TypeString, + Computed: true, + Description: "Agent Namespace", + }, + + obMonitoringCrn: { + Type: schema.TypeString, + Computed: true, + Description: "CRN", + }, + + obMonitoringDiscoveredAgent: { + Type: schema.TypeBool, + Computed: true, + Description: "Discovered agent", + }, + + obMonitoringNamespace: { + Type: schema.TypeString, + Computed: true, + Description: "Namespace", + }, + }, + } +} + +func resourceIBMMonitoringCreate(d *schema.ResourceData, meta interface{}) error { + + client, err := meta.(ClientSession).VpcContainerAPI() + if err != nil { + return err + } + + var ingestionkey string + var privateEndpoint bool + + //Read cluster ID and sysdif instanceID + clusterName := d.Get(obMonitoringCluster).(string) + sysdigInstanceID := d.Get(obMonitoringInstanceID).(string) + + //Read Ingestionkey + if iKey, ok := d.GetOk(obMonitoringIngestionkey); ok { + ingestionkey = iKey.(string) + } + + //Read private enpoint + if endPoint, ok := d.GetOk(obMonitoringPrivateEndpoint); ok { + privateEndpoint = endPoint.(bool) + } + + //populate sysdig configure create request + params := v2.MonitoringCreateRequest{ + Cluster: clusterName, + IngestionKey: ingestionkey, + SysidigInstance: sysdigInstanceID, + PrivateEndpoint: privateEndpoint, + } + + targetEnv, err := getMonitoringTargetHeader(d, meta) + if err != nil { + return err + } + + var monitoring v2.MonitoringCreateResponse + err = resource.Retry(10*time.Minute, func() *resource.RetryError { + var err error + monitoring, err = client.Monitoring().CreateMonitoringConfig(params, targetEnv) + if err != nil { + log.Printf("[DEBUG] monitoring Instance err %s", err) + if strings.Contains(err.Error(), "The user doesn't have enough privileges to perform this action") { + return resource.RetryableError(err) + } + return resource.NonRetryableError(err) + } + + return nil + }) + + if isResourceTimeoutError(err) { + monitoring, err = client.Monitoring().CreateMonitoringConfig(params, targetEnv) + } + if err != nil { + return fmt.Errorf("error latching monitoring instance to cluster: %w", err) + } + + d.SetId(fmt.Sprintf("%s/%s", clusterName, monitoring.InstanceID)) + + return resourceIBMMonitoringRead(d, meta) +} + +func getMonitoringTargetHeader(d *schema.ResourceData, meta interface{}) (v2.MonitoringTargetHeader, error) { + _, err := meta.(ClientSession).BluemixSession() + if err != nil { + return v2.MonitoringTargetHeader{}, err + } + + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return v2.MonitoringTargetHeader{}, err + } + accountID := userDetails.userAccount + + targetEnv := v2.MonitoringTargetHeader{ + AccountID: accountID, + } + + return targetEnv, nil +} + +func resourceIBMMonitoringRead(d *schema.ResourceData, meta interface{}) error { + + client, err := meta.(ClientSession).VpcContainerAPI() + if err != nil { + return err + } + + parts, err := idParts(d.Id()) + if err != nil { + return err + } + clusterName := parts[0] + monitoringID := parts[1] + + targetEnv, err := getMonitoringTargetHeader(d, meta) + if err != nil { + return err + } + + config, err := client.Monitoring().GetMonitoringConfig(clusterName, monitoringID, targetEnv) + if err != nil { + if apiErr, ok := err.(bmxerror.RequestFailure); ok { + if apiErr.StatusCode() == 404 { + d.SetId("") + return nil + } + } + return fmt.Errorf("Error in GetMonitoringConfig: %s", err) + } + + d.Set(obMonitoringPrivateEndpoint, config.PrivateEndpoint) + d.Set(obMonitoringDaemonSetName, config.DaemonsetName) + d.Set(obMonitoringInstanceName, config.InstanceName) + d.Set(obMonitoringAgentKey, config.AgentKey) + d.Set(obMonitoringAgentNamespace, config.AgentNamespace) + d.Set(obMonitoringDiscoveredAgent, config.DiscoveredAgent) + d.Set(obMonitoringCrn, config.CRN) + d.Set(obMonitoringNamespace, config.Namespace) + + return nil + +} + +func resourceIBMMonitoringUpdate(d *schema.ResourceData, meta interface{}) error { + + hasChanged := false + idChanged := false + + client, err := meta.(ClientSession).VpcContainerAPI() + if err != nil { + return err + } + + targetEnv, err := getMonitoringTargetHeader(d, meta) + if err != nil { + return err + } + + monitoringUpdateModel := v2.MonitoringUpdateRequest{} + + parts, err := idParts(d.Id()) + if err != nil { + return err + } + cluster := parts[0] + monitoringID := parts[1] + + clusterName := cluster + monitoringUpdateModel.Cluster = clusterName + + //if d.HasChange(obMonitoringInstanceID) && !d.IsNewResource() { + if d.HasChange(obMonitoringInstanceID) { + hasChanged = true + idChanged = true + old, new := d.GetChange(obMonitoringInstanceID) + monitoringUpdateModel.Instance = old.(string) + monitoringUpdateModel.NewInstance = new.(string) + } else { + monitoringUpdateModel.Instance = monitoringID + } + + if d.HasChange(obMonitoringIngestionkey) { + key := d.Get(obMonitoringIngestionkey).(string) + monitoringUpdateModel.IngestionKey = key + hasChanged = true + } + + if d.HasChange(obMonitoringPrivateEndpoint) { + endpoint := d.Get(obMonitoringPrivateEndpoint).(bool) + monitoringUpdateModel.PrivateEndpoint = endpoint + hasChanged = true + } + + if hasChanged { + _, err := client.Monitoring().UpdateMonitoringConfig(monitoringUpdateModel, targetEnv) + if err != nil { + return err + } else if idChanged { + d.SetId(fmt.Sprintf("%s/%s", clusterName, monitoringUpdateModel.NewInstance)) + } + } + + return resourceIBMMonitoringRead(d, meta) +} + +func resourceIBMMonitoringDelete(d *schema.ResourceData, meta interface{}) error { + + client, err := meta.(ClientSession).VpcContainerAPI() + if err != nil { + return err + } + + targetEnv, err := getMonitoringTargetHeader(d, meta) + if err != nil { + return err + } + + parts, err := idParts(d.Id()) + if err != nil { + return err + } + clusterName := parts[0] + monitoringID := parts[1] + + //populate sysdig configure create request + params := v2.MonitoringDeleteRequest{ + Cluster: clusterName, + Instance: monitoringID, + } + + _, err = client.Monitoring().DeleteMonitoringConfig(params, targetEnv) + + if err != nil { + if apiErr, ok := err.(bmxerror.RequestFailure); ok { + if apiErr.StatusCode() == 404 { + d.SetId("") + return nil + } + } + return fmt.Errorf("Error in DeleteMonitoringConfig: %s", err) + } + d.SetId("") + return nil + +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_object_storage_account.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_object_storage_account.go new file mode 100644 index 00000000000..a666f8b41db --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_object_storage_account.go @@ -0,0 +1,178 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "strings" + + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/filter" + "github.com/softlayer/softlayer-go/helpers/order" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/sl" +) + +func resourceIBMObjectStorageAccount() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMObjectStorageAccountCreate, + Read: resourceIBMObjectStorageAccountRead, + Update: resourceIBMObjectStorageAccountUpdate, + Delete: resourceIBMObjectStorageAccountDelete, + Exists: resourceIBMObjectStorageAccountExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "local_note": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "tags": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + }, + } +} + +func resourceIBMObjectStorageAccountCreate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + accountService := services.GetAccountService(sess) + + // Check if an object storage account exists + objectStorageAccounts, err := accountService.GetHubNetworkStorage() + if err != nil { + return fmt.Errorf("resource_ibm_object_storage_account: Error on create: %s", err) + } + + if len(objectStorageAccounts) == 0 { + // Order the account + productOrderService := services.GetProductOrderService(sess.SetRetries(0)) + + receipt, err := productOrderService.PlaceOrder(&datatypes.Container_Product_Order{ + Quantity: sl.Int(1), + PackageId: sl.Int(0), + Prices: []datatypes.Product_Item_Price{ + {Id: sl.Int(30920)}, + }, + }, sl.Bool(false)) + if err != nil { + return fmt.Errorf( + "resource_ibm_object_storage_account: Error ordering account: %s", err) + } + + // Wait for the object storage account order to complete. + billingOrderItem, err := WaitForOrderCompletion(&receipt, meta) + if err != nil { + return fmt.Errorf( + "Error waiting for object storage account order (%d) to complete: %s", receipt.OrderId, err) + } + + // Get accountName using filter on hub network storage + objectStorageAccounts, err = accountService.Filter( + filter.Path("billingItem.id").Eq(billingOrderItem.BillingItem.Id).Build(), + ).GetNetworkStorage() + if err != nil { + return fmt.Errorf("resource_ibm_object_storage_account: Error on retrieving new: %s", err) + } + + if len(objectStorageAccounts) == 0 { + return fmt.Errorf("resource_ibm_object_storage_account: Failed to create object storage account.") + } + } + + // Get account name and set as the Id + d.SetId(*objectStorageAccounts[0].Username) + d.Set("name", *objectStorageAccounts[0].Username) + + return nil +} + +func WaitForOrderCompletion( + receipt *datatypes.Container_Product_Order_Receipt, meta interface{}) (datatypes.Billing_Order_Item, error) { + + log.Printf("Waiting for billing order %d to have zero active transactions", receipt.OrderId) + var billingOrderItem *datatypes.Billing_Order_Item + + stateConf := &resource.StateChangeConf{ + Pending: []string{"", "in progress"}, + Target: []string{"complete"}, + Refresh: func() (interface{}, string, error) { + var err error + var completed bool + + completed, billingOrderItem, err = order.CheckBillingOrderComplete(meta.(ClientSession).SoftLayerSession(), receipt) + if err != nil { + return nil, "", err + } + + if completed { + return billingOrderItem, "complete", nil + } else { + return billingOrderItem, "in progress", nil + } + }, + Timeout: 10 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + _, err := stateConf.WaitForState() + return *billingOrderItem, err +} + +func resourceIBMObjectStorageAccountRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + accountService := services.GetAccountService(sess) + accountName := d.Id() + d.Set("name", accountName) + + // Check if an object storage account exists + objectStorageAccounts, err := accountService.Filter( + filter.Path("username").Eq(accountName).Build(), + ).GetHubNetworkStorage() + if err != nil { + return fmt.Errorf("resource_ibm_object_storage_account: Error on Read: %s", err) + } + + for _, objectStorageAccount := range objectStorageAccounts { + if *objectStorageAccount.Username == accountName { + return nil + } + } + + return fmt.Errorf("resource_ibm_object_storage_account: Could not find account %s", accountName) +} + +func resourceIBMObjectStorageAccountUpdate(d *schema.ResourceData, meta interface{}) error { + // Nothing to update for now. Not supported. + return nil +} + +func resourceIBMObjectStorageAccountDelete(d *schema.ResourceData, meta interface{}) error { + // Delete is not supported for now. + return nil +} + +func resourceIBMObjectStorageAccountExists(d *schema.ResourceData, meta interface{}) (bool, error) { + err := resourceIBMObjectStorageAccountRead(d, meta) + if err != nil { + if strings.Contains(err.Error(), "Could not find account") { + return false, nil + } + return false, err + } + return true, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_org.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_org.go new file mode 100644 index 00000000000..4314d746fc9 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_org.go @@ -0,0 +1,366 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "errors" + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/IBM-Cloud/bluemix-go/api/mccp/mccpv2" + "github.com/IBM-Cloud/bluemix-go/bmxerror" + "github.com/IBM-Cloud/bluemix-go/helpers" +) + +var ( + errManagerRoleAssociation = errors.New("please remove your email from the manager role and try again. " + + "This is done to avoid spurious diffs because a user creating an organization gets the manager role by default.") + + errUserRoleAssociation = errors.New("please remove your email from the user role and try again. " + + "This is done to avoid spurious diffs because a user creating an organization automatically gets the userrole by default.") +) + +func resourceIBMOrg() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMOrgCreate, + Read: resourceIBMOrgRead, + Delete: resourceIBMOrgDelete, + Update: resourceIBMOrgUpdate, + Exists: resourceIBMOrgExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "name": { + Description: "Org name, for example myorg@domain", + Type: schema.TypeString, + Required: true, + }, + "org_quota_definition_guid": { + Description: "Org quota guid", + Type: schema.TypeString, + Computed: true, + Optional: true, + }, + "billing_managers": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "The IBMID of the users who will have billing manager role in this org, ex - user@example.com", + }, + "managers": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "The IBMID of the users who will have manager role in this org, ex - user@example.com", + }, + "auditors": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "The IBMID of the users who will have auditor role in this org, ex - user@example.com", + }, + "users": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "The IBMID of the users who will have user role in this org, ex - user@example.com", + }, + + "tags": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + }, + } +} +func resourceIBMOrgCreate(d *schema.ResourceData, meta interface{}) error { + cfAPI, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + orgAPI := cfAPI.Organizations() + orgName := d.Get("name").(string) + req := mccpv2.OrgCreateRequest{ + Name: orgName, + } + if orgQuotaDefinitionGUID, ok := d.GetOk("org_quota_definition_guid"); ok { + req.OrgQuotaDefinitionGUID = orgQuotaDefinitionGUID.(string) + } + orgFields, err := orgAPI.Create(req) + if err != nil { + return fmt.Errorf("Error creating organisation: %s", err) + } + orgGUID := orgFields.Metadata.GUID + d.SetId(orgGUID) + + return resourceIBMOrgUpdate(d, meta) +} + +func resourceIBMOrgRead(d *schema.ResourceData, meta interface{}) error { + cfAPI, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + orgAPI := cfAPI.Organizations() + id := d.Id() + + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + orgOwnerID := userDetails.userEmail + orgFields, err := orgAPI.Get(id) + if err != nil { + return fmt.Errorf("Error retrieving organisation: %s", err) + } + d.Set("name", orgFields.Entity.Name) + billingManager, err := orgAPI.ListBillingManager(id) + if err != nil { + return fmt.Errorf("Error retrieving billing manager in the org: %s", err) + } + managers, err := orgAPI.ListManager(id) + if err != nil { + return fmt.Errorf("Error retrieving managers in the org: %s", err) + } + auditors, err := orgAPI.ListAuditors(id) + if err != nil { + return fmt.Errorf("Error retrieving auditors in space: %s", err) + } + users, err := orgAPI.ListUsers(id) + if err != nil { + return fmt.Errorf("Error retrieving users in space: %s", err) + } + if len(auditors) > 0 { + d.Set("auditors", flattenOrgRole(auditors, "")) + } + if len(managers) > 0 { + d.Set("managers", flattenOrgRole(managers, orgOwnerID)) + } + if len(billingManager) > 0 { + d.Set("billing_managers", flattenOrgRole(billingManager, "")) + } + if len(users) > 0 { + d.Set("users", flattenOrgRole(users, orgOwnerID)) + } + if orgFields.Entity.OrgQuotaDefinitionGUID != "" { + d.Set("org_quota_definition_guid", orgFields.Entity.OrgQuotaDefinitionGUID) + } + return nil +} + +func resourceIBMOrgUpdate(d *schema.ResourceData, meta interface{}) error { + cfAPI, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + orgAPI := cfAPI.Organizations() + id := d.Id() + + req := mccpv2.OrgUpdateRequest{} + if d.HasChange("name") { + req.Name = helpers.String(d.Get("name").(string)) + } + _, err = orgAPI.Update(id, req) + if err != nil { + return fmt.Errorf("Error updating organisation: %s", err) + } + err = updateOrgBillingManagers(orgAPI, id, d) + if err != nil { + return err + } + err = updateOrgManagers(meta, id, d) + if err != nil { + return err + } + err = updateOrgAuditors(orgAPI, id, d) + if err != nil { + return err + } + err = updateOrgUsers(meta, id, d) + if err != nil { + return err + } + + return resourceIBMOrgRead(d, meta) +} + +func resourceIBMOrgDelete(d *schema.ResourceData, meta interface{}) error { + cfAPI, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + orgAPI := cfAPI.Organizations() + id := d.Id() + err = orgAPI.Delete(id, false) + if err != nil { + return fmt.Errorf("Error deleting organisation: %s", err) + } + d.SetId("") + return nil +} + +func resourceIBMOrgExists(d *schema.ResourceData, meta interface{}) (bool, error) { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return false, err + } + id := d.Id() + org, err := cfClient.Organizations().Get(id) + if err != nil { + if apiErr, ok := err.(bmxerror.RequestFailure); ok { + if apiErr.StatusCode() == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s", err) + } + return org.Metadata.GUID == id, nil +} + +func updateOrgBillingManagers(api mccpv2.Organizations, orgGUID string, d *schema.ResourceData) error { + if !d.HasChange("billing_managers") { + return nil + } + var remove, add []string + o, n := d.GetChange("billing_managers") + os := o.(*schema.Set) + ns := n.(*schema.Set) + remove = expandStringList(os.Difference(ns).List()) + add = expandStringList(ns.Difference(os).List()) + if len(add) > 0 { + for _, d := range add { + _, err := api.AssociateBillingManager(orgGUID, d) + if err != nil { + return fmt.Errorf("Error associating billing manager (%s) with org %s : %s", d, orgGUID, err) + } + } + } + if len(remove) > 0 { + for _, d := range remove { + err := api.DisassociateBillingManager(orgGUID, d) + if err != nil { + return fmt.Errorf("Error dis-associating billing manager (%s) with org %s : %s", d, orgGUID, err) + } + } + } + return nil +} + +func updateOrgManagers(meta interface{}, orgGUID string, d *schema.ResourceData) error { + if !d.HasChange("managers") { + return nil + } + cfAPI, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + api := cfAPI.Organizations() + + var remove, add []string + o, n := d.GetChange("managers") + os := o.(*schema.Set) + ns := n.(*schema.Set) + remove = expandStringList(os.Difference(ns).List()) + add = expandStringList(ns.Difference(os).List()) + + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + orgOwnerID := userDetails.userEmail + + if len(add) > 0 { + for _, d := range add { + if d == orgOwnerID { + return fmt.Errorf("Error associating user (%s) with manager role, %v", d, errManagerRoleAssociation) + } + _, err := api.AssociateManager(orgGUID, d) + if err != nil { + return fmt.Errorf("Error associating manager (%s) with org %s : %s", d, orgGUID, err) + } + } + } + if len(remove) > 0 { + for _, d := range remove { + err := api.DisassociateManager(orgGUID, d) + if err != nil { + return fmt.Errorf("Error dis-associating manager (%s) with org %s : %s", d, orgGUID, err) + } + } + } + return nil +} +func updateOrgAuditors(api mccpv2.Organizations, orgGUID string, d *schema.ResourceData) error { + if !d.HasChange("auditors") { + return nil + } + var remove, add []string + o, n := d.GetChange("auditors") + os := o.(*schema.Set) + ns := n.(*schema.Set) + remove = expandStringList(os.Difference(ns).List()) + add = expandStringList(ns.Difference(os).List()) + if len(add) > 0 { + for _, d := range add { + _, err := api.AssociateAuditor(orgGUID, d) + if err != nil { + return fmt.Errorf("Error associating auditor (%s) with org %s : %s", d, orgGUID, err) + } + } + } + if len(remove) > 0 { + for _, d := range remove { + err := api.DisassociateAuditor(orgGUID, d) + if err != nil { + return fmt.Errorf("Error dis-associating auditor (%s) with org %s : %s", d, orgGUID, err) + } + } + } + return nil +} + +func updateOrgUsers(meta interface{}, orgGUID string, d *schema.ResourceData) error { + if !d.HasChange("users") { + return nil + } + var remove, add []string + o, n := d.GetChange("users") + os := o.(*schema.Set) + ns := n.(*schema.Set) + remove = expandStringList(os.Difference(ns).List()) + add = expandStringList(ns.Difference(os).List()) + cfAPI, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + api := cfAPI.Organizations() + if len(add) > 0 { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + orgOwnerID := userDetails.userEmail + for _, d := range add { + if d == orgOwnerID { + return fmt.Errorf("Error associating user (%s) with User role, %v", d, errUserRoleAssociation) + } + _, err := api.AssociateUser(orgGUID, d) + if err != nil { + return fmt.Errorf("Error associating user (%s) with org %s : %s", d, orgGUID, err) + } + } + } + if len(remove) > 0 { + for _, d := range remove { + err := api.DisassociateUser(orgGUID, d) + if err != nil { + return fmt.Errorf("Error dis-associating user (%s) with org %s : %s", d, orgGUID, err) + } + } + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_pi_capture.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_pi_capture.go new file mode 100644 index 00000000000..dc41549a12f --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_pi_capture.go @@ -0,0 +1,207 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "errors" + "log" + "time" + + st "github.com/IBM-Cloud/power-go-client/clients/instance" + "github.com/IBM-Cloud/power-go-client/helpers" + "github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances" + "github.com/IBM-Cloud/power-go-client/power/models" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func resourceIBMPICapture() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMPICaptureCreate, + Read: resourceIBMPICaptureRead, + Update: resourceIBMPICaptureUpdate, + Delete: resourceIBMPICaptureDelete, + //Exists: resourceIBMPICaptureExists, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(60 * time.Minute), + Delete: schema.DefaultTimeout(60 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + + helpers.PICloudInstanceId: { + Type: schema.TypeString, + Required: true, + Description: " Cloud Instance ID - This is the service_instance_id.", + }, + + helpers.PIInstanceName: { + Type: schema.TypeString, + Required: true, + Description: "Instance Name of the Power VM", + }, + + helpers.PIInstanceCaptureName: { + Type: schema.TypeString, + Required: true, + Description: "Name of the capture to create. Note : this must be unique", + }, + + helpers.PIInstanceCaptureDestination: { + Type: schema.TypeString, + Required: true, + Description: "Name of destination to store the image capture to", + ValidateFunc: validateAllowedStringValue([]string{"image-catalog", "cloud-storage", "both"}), + }, + + helpers.PIInstanceCaptureVolumeIds: { + Type: schema.TypeString, + Optional: true, + Description: "List of volume names that need to be passed in the input", + }, + + helpers.PIInstanceCaptureCloudStorageRegion: { + Type: schema.TypeString, + Optional: true, + Description: "List of Regions to use", + ValidateFunc: validateAllowedStringValue([]string{"us-south", "us-east", "us-de"}), + }, + + helpers.PIInstanceCaptureCloudStorageAccessKey: { + Type: schema.TypeString, + Optional: true, + Description: "Name of Cloud Storage Access Key", + }, + helpers.PIInstanceCaptureCloudStorageSecretKey: { + Type: schema.TypeString, + Optional: true, + Description: "Name of the Cloud Storage Secret Key", + }, + helpers.PIInstanceCaptureCloudStorageImagePath: { + Type: schema.TypeString, + Optional: true, + Description: "Name of the Image Path", + }, + }, + } +} + +func resourceIBMPICaptureCreate(d *schema.ResourceData, meta interface{}) error { + sess, err := meta.(ClientSession).IBMPISession() + if err != nil { + return err + } + + name := d.Get(helpers.PIInstanceName).(string) + capturename := d.Get(helpers.PIInstanceCaptureName).(string) + capturedestination := d.Get(helpers.PIInstanceCaptureDestination).(string) + powerinstanceid := d.Get(helpers.PICloudInstanceId).(string) + + cloudstorageImagePath := d.Get(helpers.PIInstanceCaptureCloudStorageImagePath).(string) + if cloudstorageImagePath == "" { + log.Printf("CloudImagePath is not provided") + + } + + cloudstorageregion := d.Get(helpers.PIInstanceCaptureCloudStorageRegion).(string) + if cloudstorageregion == "" { + log.Printf("CloudStorageRegion is not provided") + } + + client := st.NewIBMPIInstanceClient(sess, powerinstanceid) + + body := &models.PVMInstanceCapture{ + CaptureDestination: ptrToString(capturedestination), + CaptureName: ptrToString(capturename), + CaptureVolumeIds: nil, + CloudStorageAccessKey: "", + CloudStorageImagePath: cloudstorageImagePath, + //CloudStorageRegion: ptrToString(cloudstorageregion), + CloudStorageSecretKey: "", + } + + captureinfo, err := client.CaptureInstanceToImageCatalog(name, powerinstanceid, &p_cloud_p_vm_instances.PcloudPvminstancesCapturePostParams{ + Body: body, + }, createTimeOut) + + log.Printf("Printing the data from the capture %+v", &captureinfo) + + if err != nil { + return errors.New("The capture cannot be performed") + } + + // If this is an image catalog then we need to check what the status is + + imageClient := st.NewIBMPIImageClient(sess, powerinstanceid) + imagedata, err := imageClient.Get(d.Get(helpers.PIInstanceCaptureName).(string), powerinstanceid) + + if err != nil { + return err + } + log.Printf("Printing the data %s - %s", *imagedata.ImageID, imagedata.State) + + _, err = isWaitForImageCaptureAvailable(client, *imagedata.ImageID, powerinstanceid, d.Timeout(schema.TimeoutCreate)) + + //_, err = isWaitForIBMPIVolumeAvailable(client, d.Id(), powerinstanceid, d.Timeout(schema.TimeoutCreate)) + //if err != nil { + // return err + //} + return nil + //return resourceIBMPIVolumeAttachRead(d, meta) + +} + +func resourceIBMPICaptureRead(d *schema.ResourceData, meta interface{}) error { + + return nil +} + +func resourceIBMPICaptureUpdate(d *schema.ResourceData, meta interface{}) error { + + sess, _ := meta.(ClientSession).IBMPISession() + powerinstanceid := d.Get(helpers.PICloudInstanceId).(string) + client := st.NewIBMPIVolumeClient(sess, powerinstanceid) + + name := "" + if d.HasChange(helpers.PIVolumeAttachName) { + name = d.Get(helpers.PIVolumeAttachName).(string) + } + + size := float64(d.Get(helpers.PIVolumeSize).(float64)) + shareable := bool(d.Get(helpers.PIVolumeShareable).(bool)) + + volrequest, err := client.Update(d.Id(), name, size, shareable, powerinstanceid, postTimeOut) + if err != nil { + return err + } + + _, err = isWaitForIBMPIVolumeAvailable(client, *volrequest.VolumeID, powerinstanceid, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return err + } + + return resourceIBMPIVolumeRead(d, meta) +} + +func resourceIBMPICaptureDelete(d *schema.ResourceData, meta interface{}) error { + + sess, _ := meta.(ClientSession).IBMPISession() + powerinstanceid := d.Get(helpers.PICloudInstanceId).(string) + client := st.NewIBMPIVolumeClient(sess, powerinstanceid) + + err := client.Delete(d.Id(), powerinstanceid, deleteTimeOut) + if err != nil { + return err + } + + // wait for power volume states to be back as available. if it's attached it will be in-use + d.SetId("") + return nil +} + +func isWaitForImageCaptureAvailable(client *st.IBMPIInstanceClient, s string, s2 string, timeout time.Duration) (interface{}, error) { + + return nil, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_pi_image.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_pi_image.go new file mode 100644 index 00000000000..e5097441294 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_pi_image.go @@ -0,0 +1,204 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/IBM-Cloud/bluemix-go/bmxerror" + st "github.com/IBM-Cloud/power-go-client/clients/instance" + "github.com/IBM-Cloud/power-go-client/helpers" +) + +func resourceIBMPIImage() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMPIImageCreate, + Read: resourceIBMPIImageRead, + Update: resourceIBMPIImageUpdate, + Delete: resourceIBMPIImageDelete, + Exists: resourceIBMPIImageExists, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(60 * time.Minute), + Delete: schema.DefaultTimeout(60 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + + helpers.PIImageName: { + Type: schema.TypeString, + Required: true, + Description: "Image name", + DiffSuppressFunc: applyOnce, + }, + + helpers.PIInstanceImageName: { + Type: schema.TypeString, + Required: true, + Description: "Instance image name", + DiffSuppressFunc: applyOnce, + }, + + helpers.PICloudInstanceId: { + Type: schema.TypeString, + Required: true, + Description: "PI cloud instance ID", + }, + + // Computed Attribute + + "image_id": { + Type: schema.TypeString, + Computed: true, + Description: "Image ID", + }, + }, + } +} + +func resourceIBMPIImageCreate(d *schema.ResourceData, meta interface{}) error { + sess, err := meta.(ClientSession).IBMPISession() + if err != nil { + log.Printf("Failed to get the session") + return err + } + + powerinstanceid := d.Get(helpers.PICloudInstanceId).(string) + name := d.Get(helpers.PIImageName).(string) + imageid := d.Get(helpers.PIInstanceImageName).(string) + + client := st.NewIBMPIImageClient(sess, powerinstanceid) + + imageResponse, err := client.Create(name, imageid, powerinstanceid) + if err != nil { + return err + } + + IBMPIImageID := imageResponse.ImageID + d.SetId(fmt.Sprintf("%s/%s", powerinstanceid, *IBMPIImageID)) + + _, err = isWaitForIBMPIImageAvailable(client, *IBMPIImageID, d.Timeout(schema.TimeoutCreate), powerinstanceid) + if err != nil { + log.Printf("[DEBUG] err %s", err) + return err + } + + return resourceIBMPIImageRead(d, meta) +} + +func resourceIBMPIImageRead(d *schema.ResourceData, meta interface{}) error { + + sess, err := meta.(ClientSession).IBMPISession() + if err != nil { + return err + } + parts, err := idParts(d.Id()) + if err != nil { + return err + } + powerinstanceid := parts[0] + imageC := st.NewIBMPIImageClient(sess, powerinstanceid) + imagedata, err := imageC.Get(parts[1], powerinstanceid) + + if err != nil { + return err + } + + imageid := *imagedata.ImageID + d.Set("image_id", imageid) + d.Set(helpers.PICloudInstanceId, powerinstanceid) + + return nil + +} + +func resourceIBMPIImageUpdate(data *schema.ResourceData, meta interface{}) error { + return nil +} + +func resourceIBMPIImageDelete(d *schema.ResourceData, meta interface{}) error { + sess, err := meta.(ClientSession).IBMPISession() + if err != nil { + return err + } + parts, err := idParts(d.Id()) + if err != nil { + return err + } + powerinstanceid := parts[0] + imageC := st.NewIBMPIImageClient(sess, powerinstanceid) + err = imageC.Delete(parts[1], powerinstanceid) + + if err != nil { + return err + } + d.SetId("") + return nil + +} + +func resourceIBMPIImageExists(d *schema.ResourceData, meta interface{}) (bool, error) { + + sess, err := meta.(ClientSession).IBMPISession() + if err != nil { + return false, err + } + parts, err := idParts(d.Id()) + if err != nil { + return false, err + } + name := parts[1] + powerinstanceid := parts[0] + client := st.NewIBMPIImageClient(sess, powerinstanceid) + + image, err := client.Get(parts[1], powerinstanceid) + if err != nil { + if apiErr, ok := err.(bmxerror.RequestFailure); ok { + if apiErr.StatusCode() == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s", err) + } + return *image.ImageID == name, nil +} + +func isWaitForIBMPIImageAvailable(client *st.IBMPIImageClient, id string, timeout time.Duration, powerinstanceid string) (interface{}, error) { + log.Printf("Waiting for Power Image (%s) to be available.", id) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", helpers.PIImageQueStatus}, + Target: []string{helpers.PIImageActiveStatus}, + Refresh: isIBMPIImageRefreshFunc(client, id, powerinstanceid), + Timeout: timeout, + Delay: 20 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isIBMPIImageRefreshFunc(client *st.IBMPIImageClient, id, powerinstanceid string) resource.StateRefreshFunc { + + log.Printf("Calling the isIBMPIImageRefreshFunc Refresh Function....") + return func() (interface{}, string, error) { + image, err := client.Get(id, powerinstanceid) + if err != nil { + return nil, "", err + } + + if image.State == "active" { + + return image, helpers.PIImageActiveStatus, nil + } + + return image, helpers.PIImageQueStatus, nil + } +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_pi_instance.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_pi_instance.go new file mode 100644 index 00000000000..91559425f96 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_pi_instance.go @@ -0,0 +1,984 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "encoding/base64" + "fmt" + "log" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/IBM-Cloud/bluemix-go/bmxerror" + st "github.com/IBM-Cloud/power-go-client/clients/instance" + "github.com/IBM-Cloud/power-go-client/helpers" + "github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances" + "github.com/IBM-Cloud/power-go-client/power/models" +) + +const ( + createTimeOut = 120 * time.Second + updateTimeOut = 120 * time.Second + postTimeOut = 60 * time.Second + getTimeOut = 60 * time.Second + deleteTimeOut = 60 * time.Second + //Added timeout values for warning and active status + warningTimeOut = 30 * time.Second + activeTimeOut = 2 * time.Minute +) + +func resourceIBMPIInstance() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMPIInstanceCreate, + Read: resourceIBMPIInstanceRead, + Update: resourceIBMPIInstanceUpdate, + Delete: resourceIBMPIInstanceDelete, + Exists: resourceIBMPIInstanceExists, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(120 * time.Minute), + Update: schema.DefaultTimeout(60 * time.Minute), + Delete: schema.DefaultTimeout(60 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + + helpers.PICloudInstanceId: { + Type: schema.TypeString, + Required: true, + Description: "This is the Power Instance id that is assigned to the account", + }, + "status": { + Type: schema.TypeString, + Computed: true, + Description: "PI instance status", + }, + "migratable": { + Type: schema.TypeBool, + Computed: true, + Description: "set to true to enable migration of the PI instance", + }, + "min_processors": { + Type: schema.TypeFloat, + Computed: true, + Description: "Minimum number of the CPUs", + }, + "min_memory": { + Type: schema.TypeFloat, + Computed: true, + Description: "Minimum memory", + }, + "max_processors": { + Type: schema.TypeFloat, + Computed: true, + Description: "Maximum number of processors", + }, + "max_memory": { + Type: schema.TypeFloat, + Computed: true, + Description: "Maximum memory size", + }, + helpers.PIInstanceNetworkIds: { + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "List of Networks that have been configured for the account", + DiffSuppressFunc: applyOnce, + }, + + helpers.PIInstanceVolumeIds: { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + DiffSuppressFunc: applyOnce, + Description: "List of PI volumes", + }, + + helpers.PIInstanceUserData: { + Type: schema.TypeString, + Optional: true, + Description: "Base64 encoded data to be passed in for invoking a cloud init script", + }, + + "addresses": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ip": { + Type: schema.TypeString, + Computed: true, + }, + "macaddress": { + Type: schema.TypeString, + Computed: true, + }, + "network_id": { + Type: schema.TypeString, + Computed: true, + }, + "network_name": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "external_ip": { + Type: schema.TypeString, + Computed: true, + }, + /*"version": { + Type: schema.TypeFloat, + Computed: true, + },*/ + }, + }, + }, + + "health_status": { + Type: schema.TypeString, + Computed: true, + Description: "PI Instance health status", + }, + "instance_id": { + Type: schema.TypeString, + Computed: true, + Description: "Instance ID", + }, + "pin_policy": { + Type: schema.TypeString, + Computed: true, + Description: "PIN Policy of the Instance", + }, + helpers.PIInstanceImageName: { + Type: schema.TypeString, + Required: true, + Description: "PI instance image name", + }, + helpers.PIInstanceProcessors: { + Type: schema.TypeFloat, + Required: true, + Description: "Processors count", + }, + helpers.PIInstanceName: { + Type: schema.TypeString, + Required: true, + Description: "PI Instance name", + }, + helpers.PIInstanceProcType: { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateAllowedStringValue([]string{"dedicated", "shared", "capped"}), + Description: "Instance processor type", + }, + helpers.PIInstanceSSHKeyName: { + Type: schema.TypeString, + Required: true, + Description: "SSH key name", + }, + helpers.PIInstanceMemory: { + Type: schema.TypeFloat, + Required: true, + Description: "Memory size", + }, + helpers.PIInstanceSystemType: { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateAllowedStringValue([]string{"s922", "e880", "e980"}), + Description: "PI Instance system type", + }, + helpers.PIInstanceReplicants: { + Type: schema.TypeFloat, + Optional: true, + Default: 1.0, + Description: "PI Instance replicas count", + }, + helpers.PIInstanceReplicationPolicy: { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateAllowedStringValue([]string{"affinity", "anti-affinity", "none"}), + Default: "none", + Description: "Replication policy for the PI Instance", + }, + helpers.PIInstanceReplicationScheme: { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateAllowedStringValue([]string{"prefix", "suffix"}), + Default: "suffix", + Description: "Replication scheme", + }, + helpers.PIInstanceProgress: { + Type: schema.TypeFloat, + Computed: true, + Description: "Progress of the operation", + }, + helpers.PIInstancePinPolicy: { + Type: schema.TypeString, + Optional: true, + Description: "Pin Policy of the instance", + Default: "none", + ValidateFunc: validateAllowedStringValue([]string{"none", "soft", "hard"}), + }, + + // "reboot_for_resource_change": { + // Type: schema.TypeString, + // Optional: true, + // Description: "Flag to be passed for CPU/Memory changes that require a reboot to take effect", + // }, + "operating_system": { + Type: schema.TypeString, + Computed: true, + Description: "Operating System", + }, + "os_type": { + Type: schema.TypeString, + Computed: true, + Description: "OS Type", + }, + helpers.PIInstanceHealthStatus: { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateAllowedStringValue([]string{"OK", "WARNING"}), + Default: "OK", + Description: "Allow the user to set the status of the lpar so that they can connect to it faster", + }, + helpers.PIVirtualCoresAssigned: { + Type: schema.TypeInt, + Optional: true, + Computed: true, + Description: "Virtual Cores Assigned to the PVMInstance", + }, + "max_virtual_cores": { + Type: schema.TypeInt, + Computed: true, + Description: "Maximum Virtual Cores Assigned to the PVMInstance", + }, + "min_virtual_cores": { + Type: schema.TypeInt, + Computed: true, + Description: "Minimum Virtual Cores Assigned to the PVMInstance", + }, + }, + } +} + +func resourceIBMPIInstanceCreate(d *schema.ResourceData, meta interface{}) error { + log.Printf("Now in the PowerVMCreate") + sess, err := meta.(ClientSession).IBMPISession() + if err != nil { + return err + } + powerinstanceid := d.Get(helpers.PICloudInstanceId).(string) + name := d.Get(helpers.PIInstanceName).(string) + sshkey := d.Get(helpers.PIInstanceSSHKeyName).(string) + mem := d.Get(helpers.PIInstanceMemory).(float64) + procs := d.Get(helpers.PIInstanceProcessors).(float64) + systype := d.Get(helpers.PIInstanceSystemType).(string) + networks := expandStringList(d.Get(helpers.PIInstanceNetworkIds).([]interface{})) + var volids []string + if v, ok := d.GetOk(helpers.PIInstanceVolumeIds); ok { + volids = expandStringList((v.(*schema.Set)).List()) + } + var replicants float64 + if r, ok := d.GetOk(helpers.PIInstanceReplicants); ok { + replicants = r.(float64) + } + var replicationpolicy string + if r, ok := d.GetOk(helpers.PIInstanceReplicationPolicy); ok { + replicationpolicy = r.(string) + } + var replicationNamingScheme string + if r, ok := d.GetOk(helpers.PIInstanceReplicationScheme); ok { + replicationNamingScheme = r.(string) + } + imageid := d.Get(helpers.PIInstanceImageName).(string) + processortype := d.Get(helpers.PIInstanceProcType).(string) + + var pinpolicy string + if p, ok := d.GetOk(helpers.PIInstancePinPolicy); ok { + pinpolicy = p.(string) + if pinpolicy == "" { + pinpolicy = "none" + } + } + var instanceReadyStatus string + if r, ok := d.GetOk(helpers.PIInstanceHealthStatus); ok { + instanceReadyStatus = r.(string) + } + + var userData string + if u, ok := d.GetOk(helpers.PIInstanceUserData); ok { + userData = u.(string) + } + err = checkBase64(userData) + if err != nil { + log.Printf("Data is not base64 encoded") + return err + } + + //publicinterface := d.Get(helpers.PIInstancePublicNetwork).(bool) + body := &models.PVMInstanceCreate{ + //NetworkIds: networks, + Processors: &procs, + Memory: &mem, + ServerName: ptrToString(name), + SysType: systype, + KeyPairName: sshkey, + ImageID: ptrToString(imageid), + ProcType: ptrToString(processortype), + Replicants: replicants, + UserData: userData, + ReplicantNamingScheme: ptrToString(replicationNamingScheme), + ReplicantAffinityPolicy: ptrToString(replicationpolicy), + Networks: buildPVMNetworks(networks), + } + if len(volids) > 0 { + body.VolumeIds = volids + } + if d.Get(helpers.PIInstancePinPolicy) == "soft" || d.Get(helpers.PIInstancePinPolicy) == "hard" { + body.PinPolicy = models.PinPolicy(pinpolicy) + } + + var assignedVirtualCores int64 + if a, ok := d.GetOk(helpers.PIVirtualCoresAssigned); ok { + assignedVirtualCores = int64(a.(int)) + body.VirtualCores = &models.VirtualCores{Assigned: &assignedVirtualCores} + } + + client := st.NewIBMPIInstanceClient(sess, powerinstanceid) + pvm, err := client.Create(&p_cloud_p_vm_instances.PcloudPvminstancesPostParams{ + Body: body, + }, powerinstanceid, createTimeOut) + + if err != nil { + return fmt.Errorf("failed to provision %s", err) + } + + var pvminstanceids []string + if replicants > 1 { + log.Printf("We are in a multi create mode") + for i := 0; i < int(replicants); i++ { + truepvmid := (*pvm)[i].PvmInstanceID + pvminstanceids = append(pvminstanceids, fmt.Sprintf("%s", *truepvmid)) + d.SetId(fmt.Sprintf("%s/%s", powerinstanceid, *truepvmid)) + } + d.SetId(strings.Join(pvminstanceids, "/")) + } else { + truepvmid := (*pvm)[0].PvmInstanceID + d.SetId(fmt.Sprintf("%s/%s", powerinstanceid, *truepvmid)) + pvminstanceids = append(pvminstanceids, *truepvmid) + } + + for ids := range pvminstanceids { + _, err = isWaitForPIInstanceAvailable(client, pvminstanceids[ids], d.Timeout(schema.TimeoutCreate), powerinstanceid, instanceReadyStatus) + if err != nil { + return err + } + } + + return resourceIBMPIInstanceRead(d, meta) + +} + +func resourceIBMPIInstanceRead(d *schema.ResourceData, meta interface{}) error { + + sess, err := meta.(ClientSession).IBMPISession() + if err != nil { + return err + } + parts, err := idParts(d.Id()) + if err != nil { + return err + } + powerinstanceid := parts[0] + powerC := st.NewIBMPIInstanceClient(sess, powerinstanceid) + powervmdata, err := powerC.Get(parts[1], powerinstanceid, getTimeOut) + if err != nil { + return fmt.Errorf("failed to get the instance %v", err) + } + + d.Set(helpers.PIInstanceMemory, powervmdata.Memory) + d.Set(helpers.PIInstanceProcessors, powervmdata.Processors) + if powervmdata.Status != nil { + d.Set("status", powervmdata.Status) + } + d.Set(helpers.PIInstanceProcType, powervmdata.ProcType) + if powervmdata.Migratable != nil { + d.Set("migratable", powervmdata.Migratable) + } + if &powervmdata.Minproc != nil { + d.Set("min_processors", powervmdata.Minproc) + } + if &powervmdata.Progress != nil { + d.Set(helpers.PIInstanceProgress, powervmdata.Progress) + } + d.Set(helpers.PICloudInstanceId, powerinstanceid) + if powervmdata.PvmInstanceID != nil { + d.Set("instance_id", powervmdata.PvmInstanceID) + } + d.Set(helpers.PIInstanceName, powervmdata.ServerName) + d.Set(helpers.PIInstanceImageName, powervmdata.ImageID) + var networks []string + networks = make([]string, 0) + if powervmdata.Networks != nil { + for _, n := range powervmdata.Networks { + if n != nil { + networks = append(networks, n.NetworkID) + } + + } + } + d.Set(helpers.PIInstanceNetworkIds, networks) + if powervmdata.VolumeIds != nil { + d.Set(helpers.PIInstanceVolumeIds, powervmdata.VolumeIds) + } + d.Set(helpers.PIInstanceSystemType, powervmdata.SysType) + if &powervmdata.Minmem != nil { + d.Set("min_memory", powervmdata.Minmem) + } + if &powervmdata.Maxproc != nil { + d.Set("max_processors", powervmdata.Maxproc) + } + if &powervmdata.Maxmem != nil { + d.Set("max_memory", powervmdata.Maxmem) + } + if &powervmdata.PinPolicy != nil { + d.Set("pin_policy", powervmdata.PinPolicy) + } + if &powervmdata.OperatingSystem != nil { + d.Set("operating_system", powervmdata.OperatingSystem) + } + if &powervmdata.OsType != nil { + d.Set("os_type", powervmdata.OsType) + } + + if powervmdata.Addresses != nil { + pvmaddress := make([]map[string]interface{}, len(powervmdata.Addresses)) + for i, pvmip := range powervmdata.Addresses { + log.Printf("Now entering the powervm address space....") + + p := make(map[string]interface{}) + if &pvmip.IP != nil { + p["ip"] = pvmip.IP + } + if &pvmip.NetworkName != nil { + p["network_name"] = pvmip.NetworkName + } + if &pvmip.NetworkID != nil { + p["network_id"] = pvmip.NetworkID + } + if &pvmip.MacAddress != nil { + p["macaddress"] = pvmip.MacAddress + } + if &pvmip.Type != nil { + p["type"] = pvmip.Type + } + if &pvmip.ExternalIP != nil { + p["external_ip"] = pvmip.ExternalIP + } + pvmaddress[i] = p + } + d.Set("addresses", pvmaddress) + } + + if powervmdata.Health != nil { + d.Set("health_status", powervmdata.Health.Status) + } + if powervmdata.VirtualCores.Assigned != nil { + d.Set(helpers.PIVirtualCoresAssigned, powervmdata.VirtualCores.Assigned) + } + if &powervmdata.VirtualCores.Max != nil { + d.Set("max_virtual_cores", powervmdata.VirtualCores.Max) + } + if &powervmdata.VirtualCores.Min != nil { + d.Set("min_virtual_cores", powervmdata.VirtualCores.Min) + } + + return nil + +} + +func resourceIBMPIInstanceUpdate(d *schema.ResourceData, meta interface{}) error { + + name := d.Get(helpers.PIInstanceName).(string) + mem := d.Get(helpers.PIInstanceMemory).(float64) + procs := d.Get(helpers.PIInstanceProcessors).(float64) + processortype := d.Get(helpers.PIInstanceProcType).(string) + assignedVirtualCores := int64(d.Get(helpers.PIVirtualCoresAssigned).(int)) + + sess, err := meta.(ClientSession).IBMPISession() + if err != nil { + return fmt.Errorf("failed to get the session from the IBM Cloud Service") + } + if d.Get("health_status") == "WARNING" { + + return fmt.Errorf("the operation cannot be performed when the lpar health in the WARNING State") + } + + parts, err := idParts(d.Id()) + if err != nil { + return err + } + powerinstanceid := parts[0] + client := st.NewIBMPIInstanceClient(sess, powerinstanceid) + + //if d.HasChange(helpers.PIInstanceName) || d.HasChange(helpers.PIInstanceProcessors) || d.HasChange(helpers.PIInstanceProcType) || d.HasChange(helpers.PIInstancePinPolicy){ + if d.HasChange(helpers.PIInstanceProcType) { + + // Stop the lpar + processortype := d.Get(helpers.PIInstanceProcType).(string) + if d.Get("status") == "SHUTOFF" { + log.Printf("the lpar is in the shutoff state. Nothing to do . Moving on ") + } else { + + body := &models.PVMInstanceAction{ + Action: ptrToString("immediate-shutdown"), + } + _, err = client.Action(&p_cloud_p_vm_instances.PcloudPvminstancesActionPostParams{Body: body}, parts[1], powerinstanceid, postTimeOut) + if err != nil { + return fmt.Errorf("failed to perform the stop action on the pvm instance %v", err) + + } + + _, err = isWaitForPIInstanceStopped(client, parts[1], d.Timeout(schema.TimeoutUpdate), powerinstanceid) + if err != nil { + return fmt.Errorf("failed to perform the stop action on the pvm instance %v", err) + } + } + + // Modify + + log.Printf("At this point the lpar should be off. Executing the Processor Update Change") + updatebody := &models.PVMInstanceUpdate{ProcType: processortype} + _, err = client.Update(parts[1], powerinstanceid, &p_cloud_p_vm_instances.PcloudPvminstancesPutParams{Body: updatebody}, updateTimeOut) + if err != nil { + return fmt.Errorf("failed to perform the modify operation on the pvm instance %v", err) + } + _, err = isWaitForPIInstanceStopped(client, parts[1], d.Timeout(schema.TimeoutUpdate), powerinstanceid) + if err != nil { + return err + } + + // Start + + startbody := &models.PVMInstanceAction{ + Action: ptrToString("start"), + } + _, err = client.Action(&p_cloud_p_vm_instances.PcloudPvminstancesActionPostParams{Body: startbody}, parts[1], powerinstanceid, postTimeOut) + if err != nil { + return fmt.Errorf("failed to perform the start action on the pvm instance %v", err) + } + + _, err = isWaitForPIInstanceAvailable(client, parts[1], d.Timeout(schema.TimeoutUpdate), powerinstanceid, "OK") + if err != nil { + return err + } + + } + + // Start of the change for Memory and Processors + if d.HasChange(helpers.PIVirtualCoresAssigned) { + parts, err := idParts(d.Id()) + if err != nil { + return err + } + powerinstanceid := parts[0] + + client := st.NewIBMPIInstanceClient(sess, powerinstanceid) + + body := &models.PVMInstanceUpdate{ + VirtualCores: &models.VirtualCores{Assigned: &assignedVirtualCores}, + } + _, err = client.Update(parts[1], powerinstanceid, &p_cloud_p_vm_instances.PcloudPvminstancesPutParams{Body: body}, updateTimeOut) + if err != nil { + return fmt.Errorf("failed to update the lpar with the change for virtual cores %s", err) + } + _, err = isWaitForPIInstanceAvailable(client, parts[1], d.Timeout(schema.TimeoutUpdate), powerinstanceid, "OK") + if err != nil { + return err + } + } + + if d.HasChange(helpers.PIInstanceMemory) || d.HasChange(helpers.PIInstanceProcessors) { + + maxMemLpar := d.Get("max_memory").(float64) + maxCPULpar := d.Get("max_processors").(float64) + //log.Printf("the required memory is set to [%d] and current max memory is set to [%d] ", int(mem), int(maxMemLpar)) + + if mem > maxMemLpar || procs > maxCPULpar { + log.Printf("Will require a shutdown to perform the change") + + } else { + log.Printf("maxMemLpar is set to %f", maxMemLpar) + log.Printf("maxCPULpar is set to %f", maxCPULpar) + } + + //if d.GetOkExists("reboot_for_resource_change") + + if mem > maxMemLpar || procs > maxCPULpar { + + _, err = performChangeAndReboot(client, parts[1], powerinstanceid, mem, procs) + //_, err = stopLparForResourceChange(client, parts[1], powerinstanceid) + if err != nil { + return fmt.Errorf("failed to perform the operation for the change") + } + + } else { + parts, err := idParts(d.Id()) + if err != nil { + return err + } + powerinstanceid := parts[0] + + client := st.NewIBMPIInstanceClient(sess, powerinstanceid) + + body := &models.PVMInstanceUpdate{ + Memory: mem, + ProcType: processortype, + Processors: procs, + ServerName: name, + } + body.VirtualCores = &models.VirtualCores{Assigned: &assignedVirtualCores} + + _, err = client.Update(parts[1], powerinstanceid, &p_cloud_p_vm_instances.PcloudPvminstancesPutParams{Body: body}, updateTimeOut) + if err != nil { + return fmt.Errorf("failed to update the lpar with the change %s", err) + } + _, err = isWaitForPIInstanceAvailable(client, parts[1], d.Timeout(schema.TimeoutUpdate), powerinstanceid, "OK") + if err != nil { + return err + } + + } + + } + + return resourceIBMPIInstanceRead(d, meta) + +} + +func resourceIBMPIInstanceDelete(d *schema.ResourceData, meta interface{}) error { + sess, _ := meta.(ClientSession).IBMPISession() + parts, err := idParts(d.Id()) + if err != nil { + return err + } + powerinstanceid := parts[0] + client := st.NewIBMPIInstanceClient(sess, powerinstanceid) + err = client.Delete(parts[1], powerinstanceid, deleteTimeOut) + if err != nil { + return fmt.Errorf("failed to perform the delete action on the pvm instance %s", err) + } + + _, err = isWaitForPIInstanceDeleted(client, parts[1], d.Timeout(schema.TimeoutDelete), powerinstanceid) + if err != nil { + return err + } + + d.SetId("") + return nil +} + +// Exists + +func resourceIBMPIInstanceExists(d *schema.ResourceData, meta interface{}) (bool, error) { + + log.Printf("Calling the PowerInstance Exists method") + sess, err := meta.(ClientSession).IBMPISession() + if err != nil { + return false, err + } + parts, err := idParts(d.Id()) + if err != nil { + return false, err + } + powerinstanceid := parts[0] + client := st.NewIBMPIInstanceClient(sess, powerinstanceid) + + instance, err := client.Get(parts[1], powerinstanceid, getTimeOut) + if err != nil { + if apiErr, ok := err.(bmxerror.RequestFailure); ok { + if apiErr.StatusCode() == 404 { + return false, nil + } + } + return false, fmt.Errorf("error communicating with the API: %s", err) + } + + truepvmid := *instance.PvmInstanceID + return truepvmid == parts[1], nil +} + +func isWaitForPIInstanceDeleted(client *st.IBMPIInstanceClient, id string, timeout time.Duration, powerinstanceid string) (interface{}, error) { + + log.Printf("Waiting for (%s) to be deleted.", id) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", helpers.PIInstanceDeleting}, + Target: []string{helpers.PIInstanceNotFound}, + Refresh: isPIInstanceDeleteRefreshFunc(client, id, powerinstanceid), + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + Timeout: 10 * time.Minute, + } + + return stateConf.WaitForState() +} + +func isPIInstanceDeleteRefreshFunc(client *st.IBMPIInstanceClient, id, powerinstanceid string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + pvm, err := client.Get(id, powerinstanceid, getTimeOut) + if err != nil { + log.Printf("The power vm does not exist") + return pvm, helpers.PIInstanceNotFound, nil + + } + return pvm, helpers.PIInstanceDeleting, nil + + } +} + +func isWaitForPIInstanceAvailable(client *st.IBMPIInstanceClient, id string, timeout time.Duration, powerinstanceid string, instanceReadyStatus string) (interface{}, error) { + log.Printf("Waiting for PIInstance (%s) to be available and active ", id) + + var queryTimeOut time.Duration + + if instanceReadyStatus == "WARNING" { + queryTimeOut = warningTimeOut + } else { + queryTimeOut = activeTimeOut + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"PENDING", helpers.PIInstanceBuilding, helpers.PIInstanceHealthWarning}, + Target: []string{helpers.PIInstanceAvailable, helpers.PIInstanceHealthOk, "ERROR", ""}, + Refresh: isPIInstanceRefreshFunc(client, id, powerinstanceid, instanceReadyStatus), + Delay: 10 * time.Second, + MinTimeout: queryTimeOut, + Timeout: 120 * time.Minute, + } + + return stateConf.WaitForState() +} + +func isPIInstanceRefreshFunc(client *st.IBMPIInstanceClient, id, powerinstanceid, instanceReadyStatus string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + + pvm, err := client.Get(id, powerinstanceid, getTimeOut) + if err != nil { + return nil, "", err + } + allowableStatus := instanceReadyStatus + if *pvm.Status == helpers.PIInstanceAvailable && (pvm.Health.Status == allowableStatus) { + return pvm, helpers.PIInstanceAvailable, nil + } + if *pvm.Status == "ERROR" { + return pvm, *pvm.Status, fmt.Errorf("Failed to create the lpar") + } + + return pvm, helpers.PIInstanceBuilding, nil + } +} + +func checkBase64(input string) error { + _, err := base64.StdEncoding.DecodeString(input) + if err != nil { + return fmt.Errorf("Failed to check if input is base64 %s", err) + } + return err + +} + +func isWaitForPIInstanceStopped(client *st.IBMPIInstanceClient, id string, timeout time.Duration, powerinstanceid string) (interface{}, error) { + log.Printf("Waiting for PIInstance (%s) to be stopped and powered off ", id) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"STOPPING", "RESIZE", "VERIFY_RESIZE", helpers.PIInstanceHealthWarning}, + Target: []string{"OK", "SHUTOFF"}, + Refresh: isPIInstanceRefreshFuncOff(client, id, powerinstanceid), + Delay: 10 * time.Second, + MinTimeout: 2 * time.Minute, // This is the time that the client will execute to check the status of the request + Timeout: 30 * time.Minute, + } + + return stateConf.WaitForState() +} + +func isPIInstanceRefreshFuncOff(client *st.IBMPIInstanceClient, id, powerinstanceid string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + + log.Printf("Calling the check Refresh status of the pvm [%s] for cloud instance id [%s ]", id, powerinstanceid) + pvm, err := client.Get(id, powerinstanceid, getTimeOut) + if err != nil { + return nil, "", err + } + if *pvm.Status == "SHUTOFF" && pvm.Health.Status == helpers.PIInstanceHealthOk { + return pvm, "SHUTOFF", nil + } + return pvm, "STOPPING", nil + } +} + +func stopLparForResourceChange(client *st.IBMPIInstanceClient, id, powerinstanceid string) (interface{}, error) { + //TODO + + log.Printf("Callin the stop lpar for Resource Change code ..") + body := &models.PVMInstanceAction{ + //Action: ptrToString("stop"), + Action: ptrToString("immediate-shutdown"), + } + _, err := client.Action(&p_cloud_p_vm_instances.PcloudPvminstancesActionPostParams{Body: body}, id, powerinstanceid, postTimeOut) + if err != nil { + return nil, err + } + + _, err = isWaitForPIInstanceStopped(client, id, 30, powerinstanceid) + if err != nil { + return nil, fmt.Errorf("failed to stop the lpar") + } + + return nil, err +} + +// Start the lpar + +func startLparAfterResourceChange(client *st.IBMPIInstanceClient, id, powerinstanceid string) (interface{}, error) { + //TODO + body := &models.PVMInstanceAction{ + Action: ptrToString("start"), + } + _, err := client.Action(&p_cloud_p_vm_instances.PcloudPvminstancesActionPostParams{Body: body}, id, powerinstanceid, postTimeOut) + if err != nil { + return nil, fmt.Errorf("start Action failed on [%s] %s", id, err) + } + + _, err = isWaitForPIInstanceAvailable(client, id, 30, powerinstanceid, "OK") + if err != nil { + return nil, fmt.Errorf("failed to stop the lpar") + } + + return nil, err +} + +// Stop / Modify / Start only when the lpar is off limits + +func performChangeAndReboot(client *st.IBMPIInstanceClient, id, powerinstanceid string, mem, procs float64) (interface{}, error) { + /* + These are the steps + 1. Stop the lpar - Check if the lpar is SHUTOFF + 2. Once the lpar is SHUTOFF - Make the cpu / memory change - DUring this time , you can check for RESIZE and VERIFY_RESIZE as the transition states + 3. If the change is successful , the lpar state will be back in SHUTOFF + 4. Once the LPAR state is SHUTOFF , initiate the start again and check for ACTIVE + OK + */ + //Execute the stop + + log.Printf("Callin the stop lpar for Resource Change code ..") + stopbody := &models.PVMInstanceAction{ + //Action: ptrToString("stop"), + Action: ptrToString("immediate-shutdown"), + } + + _, err := client.Action(&p_cloud_p_vm_instances.PcloudPvminstancesActionPostParams{Body: stopbody}, id, powerinstanceid, postTimeOut) + if err != nil { + return nil, fmt.Errorf("Stop Action failed on [%s]: %s", id, err) + } + _, err = isWaitForPIInstanceStopped(client, id, 30, powerinstanceid) + if err != nil { + return nil, fmt.Errorf("failed to stop the lpar") + } + + body := &models.PVMInstanceUpdate{ + Memory: mem, + //ProcType: processortype, + Processors: procs, + //ServerName: name, + } + + _, updateErr := client.Update(id, powerinstanceid, &p_cloud_p_vm_instances.PcloudPvminstancesPutParams{Body: body}, updateTimeOut) + if updateErr != nil { + return nil, fmt.Errorf("failed to update the lpar with the change, %s", updateErr) + } + + _, err = isWaitforPIInstanceUpdate(client, id, 30, powerinstanceid) + if err != nil { + return nil, fmt.Errorf("failed to get an update from the Service after the resource change, %s", err) + } + + // Now we can start the lpar + + log.Printf("Calling the start lpar After the Resource Change code ..") + startbody := &models.PVMInstanceAction{ + //Action: ptrToString("stop"), + Action: ptrToString("start"), + } + _, starterr := client.Action(&p_cloud_p_vm_instances.PcloudPvminstancesActionPostParams{Body: startbody}, id, powerinstanceid, postTimeOut) + if starterr != nil { + log.Printf("Start Action failed on [%s]", id) + + return nil, fmt.Errorf("the error from the start is %s", starterr) + } + + _, err = isWaitForPIInstanceAvailable(client, id, 30, powerinstanceid, "OK") + if err != nil { + return nil, fmt.Errorf("failed to stop the lpar %s", err) + } + + return nil, err + +} + +func isWaitforPIInstanceUpdate(client *st.IBMPIInstanceClient, id string, timeout time.Duration, powerinstanceid string) (interface{}, error) { + log.Printf("Waiting for PIInstance (%s) to be SHUTOFF AFTER THE RESIZE Due to DLPAR Operation ", id) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"RESIZE", "VERIFY_RESIZE"}, + Target: []string{"ACTIVE", "SHUTOFF", helpers.PIInstanceHealthOk}, + Refresh: isPIInstanceShutAfterResourceChange(client, id, powerinstanceid), + Delay: 10 * time.Second, + MinTimeout: 5 * time.Minute, + Timeout: 60 * time.Minute, + } + + return stateConf.WaitForState() +} + +func isPIInstanceShutAfterResourceChange(client *st.IBMPIInstanceClient, id, powerinstanceid string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + + pvm, err := client.Get(id, powerinstanceid, getTimeOut) + if err != nil { + return nil, "", err + } + + if *pvm.Status == "SHUTOFF" && pvm.Health.Status == helpers.PIInstanceHealthOk { + log.Printf("The lpar is now off after the resource change...") + return pvm, "SHUTOFF", nil + } + + return pvm, "RESIZE", nil + } +} + +func buildPVMNetworks(networks []string) []*models.PVMInstanceAddNetwork { + var pvmNetworks []*models.PVMInstanceAddNetwork + + for i := 0; i < len(networks); i++ { + pvmInstanceNetwork := &models.PVMInstanceAddNetwork{ + //TODO : Enable the functionality to pass in ip address for the network + IPAddress: "", + NetworkID: ptrToString(string(networks[i])), + } + pvmNetworks = append(pvmNetworks, pvmInstanceNetwork) + + } + return pvmNetworks +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_pi_key.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_pi_key.go new file mode 100644 index 00000000000..3f760dde1fe --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_pi_key.go @@ -0,0 +1,168 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/IBM-Cloud/bluemix-go/bmxerror" + st "github.com/IBM-Cloud/power-go-client/clients/instance" + "github.com/IBM-Cloud/power-go-client/helpers" +) + +func resourceIBMPIKey() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMPIKeyCreate, + Read: resourceIBMPIKeyRead, + Update: resourceIBMPIKeyUpdate, + Delete: resourceIBMPIKeyDelete, + Exists: resourceIBMPIKeyExists, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(60 * time.Minute), + Delete: schema.DefaultTimeout(60 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + + helpers.PIKeyName: { + Type: schema.TypeString, + Required: true, + Description: "Key name in the PI instance", + }, + + helpers.PIKey: { + Type: schema.TypeString, + Required: true, + Description: "PI instance key info", + }, + helpers.PIKeyDate: { + Type: schema.TypeString, + Computed: true, + Description: "Date info", + }, + + helpers.PICloudInstanceId: { + Type: schema.TypeString, + Required: true, + Description: "PI cloud instance ID", + }, + + "key_id": { + Type: schema.TypeString, + Computed: true, + Description: "Key ID in the PI instance", + }, + }, + } +} + +func resourceIBMPIKeyCreate(d *schema.ResourceData, meta interface{}) error { + sess, err := meta.(ClientSession).IBMPISession() + if err != nil { + return err + } + + powerinstanceid := d.Get(helpers.PICloudInstanceId).(string) + name := d.Get(helpers.PIKeyName).(string) + sshkey := d.Get(helpers.PIKey).(string) + client := st.NewIBMPIKeyClient(sess, powerinstanceid) + + sshResponse, _, err := client.Create(name, sshkey, powerinstanceid) + if err != nil { + log.Printf("[DEBUG] err %s", err) + return fmt.Errorf("Failed to create the key %v", err) + + } + + log.Printf("Printing the sshkey %+v", &sshResponse) + + d.SetId(fmt.Sprintf("%s/%s", powerinstanceid, name)) + return resourceIBMPIKeyRead(d, meta) +} + +func resourceIBMPIKeyRead(d *schema.ResourceData, meta interface{}) error { + + sess, err := meta.(ClientSession).IBMPISession() + if err != nil { + return err + } + parts, err := idParts(d.Id()) + if err != nil { + return fmt.Errorf("Failed to obtain the key %v", err) + } + + powerinstanceid := parts[0] + sshkeyC := st.NewIBMPIKeyClient(sess, powerinstanceid) + sshkeydata, err := sshkeyC.Get(parts[1], powerinstanceid) + + if err != nil { + return err + } + + d.Set(helpers.PIKeyName, sshkeydata.Name) + d.Set(helpers.PIKey, sshkeydata.SSHKey) + d.Set(helpers.PIKeyDate, sshkeydata.CreationDate.String()) + d.Set("key_id", sshkeydata.Name) + d.Set(helpers.PICloudInstanceId, powerinstanceid) + + return nil + +} + +func resourceIBMPIKeyUpdate(data *schema.ResourceData, meta interface{}) error { + return nil +} + +func resourceIBMPIKeyDelete(d *schema.ResourceData, meta interface{}) error { + sess, err := meta.(ClientSession).IBMPISession() + if err != nil { + return err + } + parts, err := idParts(d.Id()) + if err != nil { + return err + } + + powerinstanceid := parts[0] + sshkeyC := st.NewIBMPIKeyClient(sess, powerinstanceid) + err = sshkeyC.Delete(parts[1], powerinstanceid) + + if err != nil { + return err + } + d.SetId("") + return nil +} + +func resourceIBMPIKeyExists(d *schema.ResourceData, meta interface{}) (bool, error) { + + sess, err := meta.(ClientSession).IBMPISession() + if err != nil { + return false, err + } + parts, err := idParts(d.Id()) + if err != nil { + return false, err + } + name := parts[1] + powerinstanceid := parts[0] + client := st.NewIBMPIKeyClient(sess, powerinstanceid) + + key, err := client.Get(parts[1], powerinstanceid) + if err != nil { + if apiErr, ok := err.(bmxerror.RequestFailure); ok { + if apiErr.StatusCode() == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s", err) + } + return *key.Name == name, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_pi_network.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_pi_network.go new file mode 100644 index 00000000000..901b9a6f8dc --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_pi_network.go @@ -0,0 +1,273 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "net" + "strconv" + "time" + + "github.com/apparentlymart/go-cidr/cidr" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + st "github.com/IBM-Cloud/power-go-client/clients/instance" + "github.com/IBM-Cloud/power-go-client/helpers" +) + +func resourceIBMPINetwork() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMPINetworkCreate, + Read: resourceIBMPINetworkRead, + Update: resourceIBMPINetworkUpdate, + Delete: resourceIBMPINetworkDelete, + //Exists: resourceIBMPINetworkExists, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(60 * time.Minute), + Delete: schema.DefaultTimeout(60 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + + helpers.PINetworkType: { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateAllowedStringValue([]string{"vlan", "pub-vlan"}), + Description: "PI network type", + }, + + helpers.PINetworkName: { + Type: schema.TypeString, + Required: true, + Description: "PI network name", + }, + helpers.PINetworkDNS: { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "List of PI network DNS name", + }, + + helpers.PINetworkCidr: { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "PI network CIDR", + }, + + helpers.PINetworkGateway: { + Type: schema.TypeString, + Optional: true, + Description: "PI network gateway", + }, + + helpers.PICloudInstanceId: { + Type: schema.TypeString, + Required: true, + Description: "PI cloud instance ID", + }, + + //Computed Attributes + + "network_id": { + Type: schema.TypeString, + Computed: true, + Description: "PI network ID", + }, + "vlan_id": { + Type: schema.TypeFloat, + Computed: true, + Description: "VLAN Id value", + }, + }, + } +} + +func resourceIBMPINetworkCreate(d *schema.ResourceData, meta interface{}) error { + sess, err := meta.(ClientSession).IBMPISession() + if err != nil { + return err + } + powerinstanceid := d.Get(helpers.PICloudInstanceId).(string) + networkname := d.Get(helpers.PINetworkName).(string) + networktype := d.Get(helpers.PINetworkType).(string) + networkcidr := d.Get(helpers.PINetworkCidr).(string) + networkdns := expandStringList((d.Get(helpers.PINetworkDNS).(*schema.Set)).List()) + + client := st.NewIBMPINetworkClient(sess, powerinstanceid) + var networkgateway, firstip, lastip string + if networktype == "vlan" { + networkgateway, firstip, lastip = generateIPData(networkcidr) + } + networkResponse, _, err := client.Create(networkname, networktype, networkcidr, networkdns, networkgateway, firstip, lastip, powerinstanceid, postTimeOut) + if err != nil { + return err + } + + IBMPINetworkID := *networkResponse.NetworkID + + d.SetId(fmt.Sprintf("%s/%s", powerinstanceid, IBMPINetworkID)) + + _, err = isWaitForIBMPINetworkAvailable(client, IBMPINetworkID, d.Timeout(schema.TimeoutCreate), powerinstanceid) + if err != nil { + return err + } + + return resourceIBMPINetworkRead(d, meta) +} + +func resourceIBMPINetworkRead(d *schema.ResourceData, meta interface{}) error { + + sess, err := meta.(ClientSession).IBMPISession() + if err != nil { + return err + } + + parts, err := idParts(d.Id()) + if err != nil { + return err + } + + powerinstanceid := parts[0] + networkC := st.NewIBMPINetworkClient(sess, powerinstanceid) + networkdata, err := networkC.Get(parts[1], powerinstanceid, getTimeOut) + if err != nil { + return err + } + + d.Set("network_id", networkdata.NetworkID) + d.Set(helpers.PINetworkCidr, networkdata.Cidr) + d.Set(helpers.PINetworkDNS, networkdata.DNSServers) + d.Set("vlan_id", networkdata.VlanID) + d.Set(helpers.PINetworkName, networkdata.Name) + d.Set(helpers.PINetworkType, networkdata.Type) + d.Set(helpers.PICloudInstanceId, powerinstanceid) + + return nil + +} + +func resourceIBMPINetworkUpdate(data *schema.ResourceData, meta interface{}) error { + return nil +} + +func resourceIBMPINetworkDelete(d *schema.ResourceData, meta interface{}) error { + + log.Printf("Calling the network delete functions. ") + sess, err := meta.(ClientSession).IBMPISession() + if err != nil { + return err + } + parts, err := idParts(d.Id()) + if err != nil { + return err + } + powerinstanceid := parts[0] + networkC := st.NewIBMPINetworkClient(sess, powerinstanceid) + err = networkC.Delete(parts[1], powerinstanceid, deleteTimeOut) + + if err != nil { + return err + } + d.SetId("") + return nil +} + +func resourceIBMPINetworkExists(d *schema.ResourceData, meta interface{}) (bool, error) { + + sess, err := meta.(ClientSession).IBMPISession() + if err != nil { + return false, err + } + parts, err := idParts(d.Id()) + if err != nil { + return false, err + } + powerinstanceid := parts[0] + client := st.NewIBMPINetworkClient(sess, powerinstanceid) + + network, err := client.Get(parts[0], powerinstanceid, getTimeOut) + if err != nil { + + return false, err + } + return *network.NetworkID == parts[1], nil +} + +func isWaitForIBMPINetworkAvailable(client *st.IBMPINetworkClient, id string, timeout time.Duration, powerinstanceid string) (interface{}, error) { + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", helpers.PINetworkProvisioning}, + Target: []string{"NETWORK_READY"}, + Refresh: isIBMPINetworkRefreshFunc(client, id, powerinstanceid), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isIBMPINetworkRefreshFunc(client *st.IBMPINetworkClient, id, powerinstanceid string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + network, err := client.Get(id, powerinstanceid, getTimeOut) + if err != nil { + return nil, "", err + } + + if &network.VlanID != nil { + return network, "NETWORK_READY", nil + } + + return network, helpers.PINetworkProvisioning, nil + } +} + +func generateIPData(cdir string) (gway, firstip, lastip string) { + _, ipv4Net, err := net.ParseCIDR(cdir) + + if err != nil { + log.Fatal(err) + } + + var subnetToSize = map[string]int{ + "21": 2048, + "22": 1024, + "23": 512, + "24": 256, + "25": 128, + "26": 64, + "27": 32, + "28": 16, + "29": 8, + "30": 4, + "31": 2, + } + + //subnetsize, _ := ipv4Net.Mask.Size() + + gateway, err := cidr.Host(ipv4Net, 1) + if err != nil { + log.Printf("Failed to get the gateway for this cdir passed in %s", cdir) + log.Fatal(err) + } + ad := cidr.AddressCount(ipv4Net) + + convertedad := strconv.FormatUint(ad, 10) + // Powervc in wdc04 has to reserve 3 ip address hence we start from the 4th. This will be the default behaviour + firstusable, err := cidr.Host(ipv4Net, 4) + if err != nil { + log.Fatal(err) + } + lastusable, err := cidr.Host(ipv4Net, subnetToSize[convertedad]-2) + if err != nil { + log.Fatal(err) + } + return gateway.String(), firstusable.String(), lastusable.String() + +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_pi_network_port.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_pi_network_port.go new file mode 100644 index 00000000000..15c7cbc3193 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_pi_network_port.go @@ -0,0 +1,228 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + st "github.com/IBM-Cloud/power-go-client/clients/instance" + "github.com/IBM-Cloud/power-go-client/helpers" + "github.com/IBM-Cloud/power-go-client/power/client/p_cloud_networks" + "github.com/IBM-Cloud/power-go-client/power/models" +) + +func resourceIBMPINetworkPort() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMPINetworkPortCreate, + Read: resourceIBMPINetworkPortRead, + Update: resourceIBMPINetworkPortUpdate, + Delete: resourceIBMPINetworkPortDelete, + //Exists: resourceIBMPINetworkExists, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(60 * time.Minute), + Delete: schema.DefaultTimeout(60 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + + helpers.PINetworkName: { + Type: schema.TypeString, + Required: true, + }, + + helpers.PICloudInstanceId: { + Type: schema.TypeString, + Required: true, + }, + + helpers.PINetworkPortDescription: { + Type: schema.TypeString, + Optional: true, + }, + + //Computed Attributes + + helpers.PINetworkPortIPAddress: { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "macaddress": { + Type: schema.TypeString, + Computed: true, + }, + "portid": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "public_ip": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceIBMPINetworkPortCreate(d *schema.ResourceData, meta interface{}) error { + sess, err := meta.(ClientSession).IBMPISession() + if err != nil { + return err + } + powerinstanceid := d.Get(helpers.PICloudInstanceId).(string) + networkname := d.Get(helpers.PINetworkName).(string) + description := d.Get(helpers.PINetworkPortDescription).(string) + + ipaddress := d.Get(helpers.PINetworkPortIPAddress).(string) + + nwportBody := &models.NetworkPortCreate{Description: description} + + if ipaddress != "" { + log.Printf("IP address provided. ") + nwportBody.IPAddress = ipaddress + } + + client := st.NewIBMPINetworkClient(sess, powerinstanceid) + + networkPortResponse, err := client.CreatePort(networkname, powerinstanceid, &p_cloud_networks.PcloudNetworksPortsPostParams{Body: nwportBody}, postTimeOut) + + if err != nil { + return err + } + + log.Printf("Printing the networkresponse %+v", &networkPortResponse) + + IBMPINetworkPortID := *networkPortResponse.PortID + + d.SetId(fmt.Sprintf("%s/%s/%s", powerinstanceid, IBMPINetworkPortID, networkname)) + if err != nil { + log.Printf("[DEBUG] err %s", err) + return err + } + _, err = isWaitForIBMPINetworkPortAvailable(client, IBMPINetworkPortID, d.Timeout(schema.TimeoutCreate), powerinstanceid, networkname) + if err != nil { + return err + } + + return resourceIBMPINetworkPortRead(d, meta) +} + +func resourceIBMPINetworkPortRead(d *schema.ResourceData, meta interface{}) error { + + sess, err := meta.(ClientSession).IBMPISession() + if err != nil { + return err + } + + parts, err := idParts(d.Id()) + if err != nil { + return err + } + var powernetworkname string + if len(parts) > 2 { + powernetworkname = parts[2] + } else { + powernetworkname = d.Get(helpers.PINetworkName).(string) + d.SetId(fmt.Sprintf("%s/%s", d.Id(), powernetworkname)) + } + + powerinstanceid := parts[0] + networkC := st.NewIBMPINetworkClient(sess, powerinstanceid) + networkdata, err := networkC.GetPort(powernetworkname, powerinstanceid, parts[1], getTimeOut) + + if err != nil { + return err + } + + d.Set(helpers.PINetworkPortIPAddress, networkdata.IPAddress) + d.Set("macaddress", networkdata.MacAddress) + d.Set("status", networkdata.Status) + d.Set("portid", networkdata.PortID) + d.Set("public_ip", networkdata.ExternalIP) + + return nil + +} + +func resourceIBMPINetworkPortUpdate(data *schema.ResourceData, meta interface{}) error { + return nil +} + +func resourceIBMPINetworkPortDelete(d *schema.ResourceData, meta interface{}) error { + + log.Printf("Calling the network delete functions. ") + sess, err := meta.(ClientSession).IBMPISession() + if err != nil { + return err + } + parts, err := idParts(d.Id()) + + if err != nil { + return err + } + var powernetworkname string + if len(parts) > 2 { + powernetworkname = parts[2] + } else { + powernetworkname = d.Get(helpers.PINetworkName).(string) + } + powerinstanceid := parts[0] + client := st.NewIBMPINetworkClient(sess, powerinstanceid) + log.Printf("Calling the client %v", client) + + log.Printf("Calling the delete with the following params delete with cloudinstance -> (%s) and networkid --> (%s) and portid --> (%s) ", powerinstanceid, powernetworkname, parts[1]) + networkdata, err := client.DeletePort(powernetworkname, powerinstanceid, parts[1], deleteTimeOut) + + log.Printf("Response from the deleteport call %v", networkdata) + + if err != nil { + return err + } + d.SetId("") + return nil +} + +func isWaitForIBMPINetworkPortAvailable(client *st.IBMPINetworkClient, id string, timeout time.Duration, powerinstanceid, networkname string) (interface{}, error) { + log.Printf("Waiting for Power Network (%s) that was created for Network Zone (%s) to be available.", id, networkname) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", helpers.PINetworkProvisioning}, + Target: []string{"DOWN"}, + Refresh: isIBMPINetworkPortRefreshFunc(client, id, powerinstanceid, networkname), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Minute, + } + + return stateConf.WaitForState() +} + +func isIBMPINetworkPortRefreshFunc(client *st.IBMPINetworkClient, id, powerinstanceid, networkname string) resource.StateRefreshFunc { + + log.Printf("Calling the IsIBMPINetwork Refresh Function....with the following id (%s) for network port and following id (%s) for network name and waiting for network to be READY", id, networkname) + return func() (interface{}, string, error) { + network, err := client.GetPort(networkname, powerinstanceid, id, getTimeOut) + if err != nil { + return nil, "", err + } + + if &network.PortID != nil { + //if network.State == "available" { + log.Printf(" The port has been created with the following ip address and attached to an instance ") + return network, "DOWN", nil + } + + return network, helpers.PINetworkProvisioning, nil + } +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_pi_network_port_attach.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_pi_network_port_attach.go new file mode 100644 index 00000000000..8132f206f90 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_pi_network_port_attach.go @@ -0,0 +1,207 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + + st "github.com/IBM-Cloud/power-go-client/clients/instance" + "github.com/IBM-Cloud/power-go-client/helpers" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func resourceIBMPINetworkPortAttach() *schema.Resource { + return &schema.Resource{ + + Create: resourceIBMPINetworkPortAttachCreate, + Read: resourceIBMPINetworkPortAttachRead, + Update: resourceIBMPINetworkPortAttachUpdate, + Delete: resourceIBMPINetworkPortAttachDelete, + //Exists: resourceIBMPINetworkExists, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(60 * time.Minute), + Delete: schema.DefaultTimeout(60 * time.Minute), + Update: schema.DefaultTimeout(60 * time.Minute), + }, + Schema: map[string]*schema.Schema{ + + "port_id": { + Type: schema.TypeString, + Required: true, + }, + + helpers.PICloudInstanceId: { + Type: schema.TypeString, + Required: true, + }, + + helpers.PIInstanceName: { + Type: schema.TypeString, + Required: true, + Description: "Instance name to attach the network port to", + }, + + helpers.PINetworkName: { + Type: schema.TypeString, + Required: true, + Description: "Network Name - This is the subnet name in the Cloud instance", + }, + + helpers.PINetworkPortDescription: { + Type: schema.TypeString, + Optional: true, + Description: "A human readable description for this network Port", + Default: "Port Created via Terraform", + }, + "public_ip": { + Type: schema.TypeString, + Computed: true, + }, + }, + } + +} + +func resourceIBMPINetworkPortAttachCreate(d *schema.ResourceData, meta interface{}) error { + sess, err := meta.(ClientSession).IBMPISession() + if err != nil { + return err + } + powerinstanceid := d.Get(helpers.PICloudInstanceId).(string) + networkname := d.Get(helpers.PINetworkName).(string) + portid := d.Get("port_id").(string) + instancename := d.Get(helpers.PIInstanceName).(string) + description := d.Get(helpers.PINetworkPortDescription).(string) + client := st.NewIBMPINetworkClient(sess, powerinstanceid) + + log.Printf("Printing the input to the resource powerinstance [%s] and network name [%s] and the portid [%s]", powerinstanceid, networkname, portid) + networkPortResponse, err := client.AttachPort(powerinstanceid, networkname, portid, description, instancename, postTimeOut) + + if err != nil { + return err + } + + log.Printf("Printing the networkresponse %+v", &networkPortResponse) + + IBMPINetworkPortID := *networkPortResponse.PortID + + d.SetId(fmt.Sprintf("%s/%s", powerinstanceid, IBMPINetworkPortID)) + if err != nil { + log.Printf("[DEBUG] err %s", err) + return err + } + _, err = isWaitForIBMPINetworkPortAttachAvailable(client, IBMPINetworkPortID, d.Timeout(schema.TimeoutCreate), powerinstanceid, networkname) + if err != nil { + return err + } + + return resourceIBMPINetworkPortAttachRead(d, meta) +} + +func resourceIBMPINetworkPortAttachRead(d *schema.ResourceData, meta interface{}) error { + log.Printf("Calling ther Network Port Attach Read code") + sess, err := meta.(ClientSession).IBMPISession() + + if err != nil { + fmt.Printf("failed to get a session from the IBM Cloud Service %v", err) + } + + parts, err := idParts(d.Id()) + if err != nil { + return err + } + + powerinstanceid := parts[0] + powernetworkname := d.Get(helpers.PINetworkName).(string) + networkC := st.NewIBMPINetworkClient(sess, powerinstanceid) + networkdata, err := networkC.GetPort(powernetworkname, powerinstanceid, parts[1], getTimeOut) + + d.Set("ipaddress", networkdata.IPAddress) + d.Set("macaddress", networkdata.MacAddress) + d.Set("status", networkdata.Status) + d.Set("portid", networkdata.PortID) + d.Set("pvminstance", networkdata.PvmInstance.Href) + d.Set("public_ip", networkdata.ExternalIP) + + return nil +} + +func resourceIBMPINetworkPortAttachUpdate(d *schema.ResourceData, meta interface{}) error { + log.Printf("Calling the attach update ") + return nil +} + +func resourceIBMPINetworkPortAttachDelete(d *schema.ResourceData, meta interface{}) error { + log.Printf("Detaching the network port from the Instance ") + + sess, err := meta.(ClientSession).IBMPISession() + + if err != nil { + fmt.Printf("failed to get a session from the IBM Cloud Service %v", err) + + } + parts, err := idParts(d.Id()) + if err != nil { + return err + } + + powerinstanceid := parts[0] + powernetworkname := d.Get(helpers.PINetworkName).(string) + portid := d.Get("port_id").(string) + + client := st.NewIBMPINetworkClient(sess, powerinstanceid) + log.Printf("Calling the network delete functions. ") + network, err := client.DetachPort(powerinstanceid, powernetworkname, portid, deleteTimeOut) + if err != nil { + return err + } + + log.Printf("Printing the networkresponse %+v", &network) + + //log.Printf("Printing the networkresponse %s", network.Status) + + d.SetId("") + return nil + +} + +func isWaitForIBMPINetworkPortAttachAvailable(client *st.IBMPINetworkClient, id string, timeout time.Duration, powerinstanceid, networkname string) (interface{}, error) { + log.Printf("Waiting for Power Network (%s) that was created for Network Zone (%s) to be available.", id, networkname) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", helpers.PINetworkProvisioning}, + Target: []string{"ACTIVE"}, + Refresh: isIBMPINetworkPortAttachRefreshFunc(client, id, powerinstanceid, networkname), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Minute, + } + + return stateConf.WaitForState() +} + +func isIBMPINetworkPortAttachRefreshFunc(client *st.IBMPINetworkClient, id, powerinstanceid, networkname string) resource.StateRefreshFunc { + + log.Printf("Calling the IsIBMPINetwork Refresh Function....with the following id (%s) for network port and following id (%s) for network name and waiting for network to be READY", id, networkname) + return func() (interface{}, string, error) { + network, err := client.GetPort(networkname, powerinstanceid, id, getTimeOut) + if err != nil { + return nil, "", err + } + + if &network.PortID != nil && &network.PvmInstance.PvmInstanceID != nil { + //if network.State == "available" { + log.Printf(" The port has been created with the following ip address and attached to an instance ") + return network, "ACTIVE", nil + } + + return network, helpers.PINetworkProvisioning, nil + } +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_pi_operations.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_pi_operations.go new file mode 100644 index 00000000000..b1c7a4f7369 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_pi_operations.go @@ -0,0 +1,275 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + + st "github.com/IBM-Cloud/power-go-client/clients/instance" + "github.com/IBM-Cloud/power-go-client/helpers" + "github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances" + "github.com/IBM-Cloud/power-go-client/power/models" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "log" + "time" +) + +/* +Transition states + +The server can go from + +ACTIVE --> SHUTOFF +ACTIVE --> HARD-REBOOT +ACTIVE --> SOFT-REBOOT +SHUTOFF--> ACTIVE + + + + +*/ + +func resourceIBMPIIOperations() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMPIOperationsCreate, + Read: resourceIBMPIOperationsRead, + Update: resourceIBMPIOperationsUpdate, + Delete: resourceIBMPIOperationsDelete, + //Exists: resourceIBMPIOperationsExists, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(60 * time.Minute), + Delete: schema.DefaultTimeout(60 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + + helpers.PICloudInstanceId: { + Type: schema.TypeString, + Required: true, + Description: "PI Cloud instnce id", + }, + + helpers.PIInstanceOperationStatus: { + Type: schema.TypeString, + Computed: true, + Description: "PI instance operation status", + }, + helpers.PIInstanceOperationServerName: { + Type: schema.TypeString, + Required: true, + Description: "PI instance Operation server name", + }, + + "addresses": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ip": { + Type: schema.TypeString, + Computed: true, + }, + "macaddress": { + Type: schema.TypeString, + Computed: true, + }, + "networkid": { + Type: schema.TypeString, + Computed: true, + }, + "networkname": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + + helpers.PIInstanceHealthStatus: { + Type: schema.TypeString, + Computed: true, + Description: "PI instance health status", + }, + + helpers.PIInstanceOperationType: { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateAllowedStringValue([]string{"start", "stop", "hard-reboot", "soft-reboot", "immediate-shutdown"}), + Description: "PI instance operation type", + }, + + helpers.PIInstanceOperationProgress: { + Type: schema.TypeFloat, + Computed: true, + Description: "Progress of the operation", + }, + }, + } +} + +func resourceIBMPIOperationsCreate(d *schema.ResourceData, meta interface{}) error { + + log.Printf("Now in the Power Operations Code") + sess, err := meta.(ClientSession).IBMPISession() + if err != nil { + return err + } + powerinstanceid := d.Get(helpers.PICloudInstanceId).(string) + operation := d.Get(helpers.PIInstanceOperationType).(string) + name := d.Get(helpers.PIInstanceOperationServerName).(string) + + body := &models.PVMInstanceAction{Action: ptrToString(operation)} + log.Printf("Calling the IBM PI Operations [ %s ] with on the instance with name [ %s ]", operation, name) + client := st.NewIBMPIInstanceClient(sess, powerinstanceid) + + /* + TODO + To add a check if the action performed is applicable on the current state of the instance + */ + + pvmoperation, err := client.Action(&p_cloud_p_vm_instances.PcloudPvminstancesActionPostParams{ + Body: body, + }, name, powerinstanceid, 30*time.Second) + + if err != nil { + log.Printf("[DEBUG] err %s", err) + return fmt.Errorf("Failed to perform the operation on the instance %v", err) + + } else { + log.Printf("Executed the stop operation on the lpar") + } + + log.Printf("Printing the instance info %+v", &pvmoperation) + + if operation == "stop" || operation == "immediate-shutdown" { + var targetStatus = "SHUTOFF" + log.Printf("Calling the check opertion that was invoked [%s] to check for status [ %s ]", operation, targetStatus) + _, err = isWaitForPIInstanceOperationStatus(client, name, d.Timeout(schema.TimeoutCreate), powerinstanceid, operation, targetStatus) + if err != nil { + return err + } else { + log.Printf("Executed the start operation on the lpar") + } + + } + + if operation == "start" || operation == "soft-reboot" || operation == "hard-reboot" { + var targetStatus = "ACTIVE" + log.Printf("Calling the check opertion that was invoked [%s] to check for status [ %s ]", operation, targetStatus) + _, err = isWaitForPIInstanceOperationStatus(client, name, d.Timeout(schema.TimeoutCreate), powerinstanceid, operation, targetStatus) + if err != nil { + return err + } + + } + + return resourceIBMPIOperationsRead(d, meta) +} + +func resourceIBMPIOperationsRead(d *schema.ResourceData, meta interface{}) error { + + log.Printf("Calling the PowerOperations Read code..for instance name %s", d.Get(helpers.PIInstanceOperationServerName).(string)) + + sess, err := meta.(ClientSession).IBMPISession() + if err != nil { + return err + } + powerinstanceid := d.Get(helpers.PICloudInstanceId).(string) + name := d.Get(helpers.PIInstanceOperationServerName).(string) + powerC := st.NewIBMPIInstanceClient(sess, powerinstanceid) + powervmdata, err := powerC.Get(name, powerinstanceid, getTimeOut) + + if err != nil { + return err + } + + d.Set("status", powervmdata.Status) + d.Set("progress", powervmdata.Progress) + + if powervmdata.Health != nil { + d.Set("healthstatus", powervmdata.Health.Status) + + } + + pvminstanceid := *powervmdata.PvmInstanceID + d.SetId(fmt.Sprintf("%s/%s", powerinstanceid, pvminstanceid)) + + return nil + +} + +func resourceIBMPIOperationsUpdate(d *schema.ResourceData, meta interface{}) error { + + return nil +} + +func resourceIBMPIOperationsDelete(data *schema.ResourceData, meta interface{}) error { + + return nil +} + +// Exists + +func resourceIBMPIOperationsExists(d *schema.ResourceData, meta interface{}) (bool, error) { + + sess, err := meta.(ClientSession).IBMPISession() + if err != nil { + return false, err + } + id := d.Id() + powerinstanceid := d.Get(helpers.PICloudInstanceId).(string) + client := st.NewIBMPIInstanceClient(sess, powerinstanceid) + + instance, err := client.Get(d.Id(), powerinstanceid, getTimeOut) + if err != nil { + + return false, err + } + return instance.PvmInstanceID == &id, nil +} + +func isWaitForPIInstanceOperationStatus(client *st.IBMPIInstanceClient, name string, timeout time.Duration, powerinstanceid, operation, targetstatus string) (interface{}, error) { + + log.Printf("Waiting for the Operation [ %s ] to be performed on the instance with name [ %s ]", operation, name) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"ACTIVE", "SHUTOFF", "WARNING"}, + Target: []string{targetstatus}, + Refresh: isPIOperationsRefreshFunc(client, name, powerinstanceid, targetstatus), + Delay: 1 * time.Minute, + MinTimeout: 2 * time.Minute, + Timeout: 120 * time.Minute, + } + + return stateConf.WaitForState() + +} + +func isPIOperationsRefreshFunc(client *st.IBMPIInstanceClient, id, powerinstanceid, targetstatus string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + + log.Printf("Waiting for the target status to be [ %s ]", targetstatus) + pvm, err := client.Get(id, powerinstanceid, getTimeOut) + if err != nil { + return nil, "", err + } + + if *pvm.Status == targetstatus && pvm.Health.Status == helpers.PIInstanceHealthOk { + log.Printf("The health status is now ok") + //if *pvm.Status == "active" ; if *pvm.Addresses[0].IP == nil { + return pvm, targetstatus, nil + //} + } + + return pvm, helpers.PIInstanceHealthWarning, nil + } +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_pi_snapshot.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_pi_snapshot.go new file mode 100644 index 00000000000..579b6445244 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_pi_snapshot.go @@ -0,0 +1,334 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "time" + + "github.com/IBM-Cloud/power-go-client/power/client/p_cloud_p_vm_instances" + "github.com/IBM-Cloud/power-go-client/power/models" + + "github.com/IBM-Cloud/bluemix-go/bmxerror" + st "github.com/IBM-Cloud/power-go-client/clients/instance" + "github.com/IBM-Cloud/power-go-client/helpers" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func resourceIBMPISnapshot() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMPISnapshotCreate, + Read: resourceIBMPISnapshotRead, + Update: resourceIBMPISnapshotUpdate, + Delete: resourceIBMPISnapshotDelete, + Exists: resourceIBMPISnapshotExists, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(60 * time.Minute), + Update: schema.DefaultTimeout(60 * time.Minute), + Delete: schema.DefaultTimeout(60 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + + //Snapshots are created at the pvm instance level + + helpers.PISnapshotName: { + Type: schema.TypeString, + Required: true, + Description: "Unique name of the snapshot", + }, + + helpers.PIInstanceName: { + Type: schema.TypeString, + Required: true, + Description: "Instance name / id of the pvm", + }, + + helpers.PIInstanceVolumeIds: { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + DiffSuppressFunc: applyOnce, + Description: "List of PI volumes", + }, + + helpers.PICloudInstanceId: { + Type: schema.TypeString, + Required: true, + Description: " Cloud Instance ID - This is the service_instance_id.", + }, + + "description": { + Type: schema.TypeString, + Optional: true, + Description: "Snapshot description", + }, + // Computed Attributes + + helpers.PISnapshot: { + Type: schema.TypeString, + Computed: true, + Description: "Id of the snapshot", + }, + + "status": { + Type: schema.TypeString, + Computed: true, + }, + "creation_date": { + Type: schema.TypeString, + Computed: true, + }, + "last_updated_date": { + Type: schema.TypeString, + Computed: true, + }, + "volume_snapshots": { + Type: schema.TypeMap, + Computed: true, + }, + }, + } +} + +func resourceIBMPISnapshotCreate(d *schema.ResourceData, meta interface{}) error { + sess, err := meta.(ClientSession).IBMPISession() + if err != nil { + return err + } + + powerinstanceid := d.Get(helpers.PICloudInstanceId).(string) + instanceid := d.Get(helpers.PIInstanceName).(string) + volids := expandStringList((d.Get(helpers.PIInstanceVolumeIds).(*schema.Set)).List()) + name := d.Get(helpers.PISnapshotName).(string) + description := d.Get("description").(string) + if d.Get(description) == "" { + description = "Testing from Terraform" + } + + client := st.NewIBMPIInstanceClient(sess, powerinstanceid) + + snapshotBody := &models.SnapshotCreate{Name: &name, Description: description} + + if len(volids) > 0 { + snapshotBody.VolumeIds = volids + } else { + log.Printf("no volumeids provided. Will snapshot the entire instance") + } + + snapshotResponse, err := client.CreatePvmSnapShot(&p_cloud_p_vm_instances.PcloudPvminstancesSnapshotsPostParams{ + Body: snapshotBody, + }, instanceid, powerinstanceid, createTimeOut) + + if err != nil { + log.Printf("[DEBUG] err %s", err) + return err + } + + d.SetId(fmt.Sprintf("%s/%s", powerinstanceid, *snapshotResponse.SnapshotID)) + if err != nil { + log.Printf("[DEBUG] err %s", err) + return fmt.Errorf("failed to get the snapshotid %v", err) + } + + pisnapclient := st.NewIBMPISnapshotClient(sess, powerinstanceid) + _, err = isWaitForPIInstanceSnapshotAvailable(pisnapclient, *snapshotResponse.SnapshotID, d.Timeout(schema.TimeoutCreate), powerinstanceid) + if err != nil { + return err + } + + return resourceIBMPISnapshotRead(d, meta) +} + +func resourceIBMPISnapshotRead(d *schema.ResourceData, meta interface{}) error { + log.Printf("Calling the Snapshot Read function post create") + sess, err := meta.(ClientSession).IBMPISession() + if err != nil { + return err + } + parts, err := idParts(d.Id()) + if err != nil { + return err + } + + powerinstanceid := parts[0] + snapshot := st.NewIBMPISnapshotClient(sess, powerinstanceid) + snapshotdata, err := snapshot.Get(parts[1], powerinstanceid, getTimeOut) + + if err != nil { + return err + } + + d.Set(helpers.PISnapshotName, snapshotdata.Name) + d.Set(helpers.PISnapshot, *snapshotdata.SnapshotID) + d.Set("status", snapshotdata.Status) + d.Set("creation_date", snapshotdata.CreationDate.String()) + d.Set("volume_snapshots", snapshotdata.VolumeSnapshots) + d.Set("last_update_date", snapshotdata.LastUpdateDate.String()) + + return nil +} + +func resourceIBMPISnapshotUpdate(d *schema.ResourceData, meta interface{}) error { + + log.Printf("Calling the IBM Power Snapshot update call") + sess, _ := meta.(ClientSession).IBMPISession() + parts, err := idParts(d.Id()) + if err != nil { + return err + } + powerinstanceid := parts[0] + client := st.NewIBMPISnapshotClient(sess, powerinstanceid) + + if d.HasChange(helpers.PISnapshotName) || d.HasChange("description") { + name := d.Get(helpers.PISnapshotName).(string) + description := d.Get("description").(string) + snapshotBody := &models.SnapshotUpdate{Name: name, Description: description} + + _, err := client.Update(parts[1], powerinstanceid, snapshotBody, 60) + + if err != nil { + return fmt.Errorf("failed to update the snapshot request %v", err) + + } + + _, err = isWaitForPIInstanceSnapshotAvailable(client, parts[1], d.Timeout(schema.TimeoutCreate), powerinstanceid) + if err != nil { + return err + } + } + + return resourceIBMPISnapshotRead(d, meta) +} + +func resourceIBMPISnapshotDelete(d *schema.ResourceData, meta interface{}) error { + + sess, _ := meta.(ClientSession).IBMPISession() + parts, err := idParts(d.Id()) + if err != nil { + return err + } + powerinstanceid := parts[0] + + client := st.NewIBMPISnapshotClient(sess, powerinstanceid) + + snapshot, err := client.Get(parts[1], powerinstanceid, getTimeOut) + if err != nil { + return err + } + + log.Printf("The snapshot to be deleted is in the following state .. %s", snapshot.Status) + + snapshotdel_err := client.Delete(parts[1], powerinstanceid, deleteTimeOut) + if snapshotdel_err != nil { + return snapshotdel_err + } + + _, err = isWaitForPIInstanceSnapshotDeleted(client, parts[1], d.Timeout(schema.TimeoutDelete), powerinstanceid) + if err != nil { + return err + } + + d.SetId("") + return nil +} +func resourceIBMPISnapshotExists(d *schema.ResourceData, meta interface{}) (bool, error) { + + sess, err := meta.(ClientSession).IBMPISession() + if err != nil { + return false, err + } + parts, err := idParts(d.Id()) + if err != nil { + return false, err + } + + powerinstanceid := parts[0] + client := st.NewIBMPISnapshotClient(sess, powerinstanceid) + + snapshotdelete, err := client.Get(parts[1], powerinstanceid, getTimeOut) + if err != nil { + if apiErr, ok := err.(bmxerror.RequestFailure); ok { + if apiErr.StatusCode() == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s", err) + } + + log.Printf("Calling the existing function.. %s", *(snapshotdelete.SnapshotID)) + + volumeid := *snapshotdelete.SnapshotID + return volumeid == parts[1], nil +} + +func isWaitForPIInstanceSnapshotAvailable(client *st.IBMPISnapshotClient, id string, timeout time.Duration, powerinstanceid string) (interface{}, error) { + + log.Printf("Waiting for PIInstance Snapshot (%s) to be available and active ", id) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"in_progress", "BUILD"}, + Target: []string{"available", "ACTIVE"}, + Refresh: isPIInstanceSnapshotRefreshFunc(client, id, powerinstanceid), + Delay: 30 * time.Second, + MinTimeout: 2 * time.Minute, + Timeout: 60 * time.Minute, + } + + return stateConf.WaitForState() +} + +func isPIInstanceSnapshotRefreshFunc(client *st.IBMPISnapshotClient, id, powerinstanceid string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + + snapshotInfo, err := client.Get(id, powerinstanceid, getTimeOut) + if err != nil { + return nil, "", err + } + + //if pvm.Health.Status == helpers.PIInstanceHealthOk { + if snapshotInfo.Status == "available" && snapshotInfo.PercentComplete == 100 { + log.Printf("The snapshot is now available") + return snapshotInfo, "available", nil + + } + return snapshotInfo, "in_progress", nil + } +} + +// Delete Snapshot + +func isWaitForPIInstanceSnapshotDeleted(client *st.IBMPISnapshotClient, id string, timeout time.Duration, powerinstanceid string) (interface{}, error) { + + log.Printf("Waiting for (%s) to be deleted.", id) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", helpers.PIInstanceDeleting}, + Target: []string{"Not Found"}, + Refresh: isPIInstanceSnapshotDeleteRefreshFunc(client, id, powerinstanceid), + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + Timeout: 10 * time.Minute, + } + + return stateConf.WaitForState() +} + +func isPIInstanceSnapshotDeleteRefreshFunc(client *st.IBMPISnapshotClient, id, powerinstanceid string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + snapshot, err := client.Get(id, powerinstanceid, getTimeOut) + if err != nil { + log.Printf("The snapshot is not found.") + return snapshot, helpers.PIInstanceNotFound, nil + + } + return snapshot, helpers.PIInstanceNotFound, nil + + } +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_pi_volume.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_pi_volume.go new file mode 100644 index 00000000000..81213195009 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_pi_volume.go @@ -0,0 +1,303 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/IBM-Cloud/bluemix-go/bmxerror" + st "github.com/IBM-Cloud/power-go-client/clients/instance" + "github.com/IBM-Cloud/power-go-client/helpers" +) + +const ( + /* Power Volume creation depends on response from PowerVC */ + volPostTimeOut = 180 * time.Second + volGetTimeOut = 180 * time.Second + volDeleteTimeOut = 180 * time.Second +) + +func resourceIBMPIVolume() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMPIVolumeCreate, + Read: resourceIBMPIVolumeRead, + Update: resourceIBMPIVolumeUpdate, + Delete: resourceIBMPIVolumeDelete, + Exists: resourceIBMPIVolumeExists, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(60 * time.Minute), + Update: schema.DefaultTimeout(60 * time.Minute), + Delete: schema.DefaultTimeout(60 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + + "volume_id": { + Type: schema.TypeString, + Computed: true, + Description: "Volume ID", + }, + + helpers.PIVolumeName: { + Type: schema.TypeString, + Required: true, + Description: "Volume Name to create", + }, + + helpers.PIVolumeShareable: { + Type: schema.TypeBool, + Optional: true, + Description: "Flag to indicate if the volume can be shared across multiple instances?", + }, + helpers.PIVolumeSize: { + Type: schema.TypeFloat, + Required: true, + Description: "Size of the volume in GB", + }, + helpers.PIVolumeType: { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateAllowedStringValue([]string{"ssd", "standard", "tier1", "tier3"}), + Description: "Volume type", + }, + + helpers.PICloudInstanceId: { + Type: schema.TypeString, + Required: true, + Description: " Cloud Instance ID - This is the service_instance_id.", + }, + + // Computed Attributes + + "volume_status": { + Type: schema.TypeString, + Computed: true, + Description: "Volume status", + }, + + "delete_on_termination": { + Type: schema.TypeBool, + Computed: true, + Description: "Should the volume be deleted during termination", + }, + "wwn": { + Type: schema.TypeString, + Computed: true, + Description: "WWN Of the volume", + }, + }, + } +} + +func resourceIBMPIVolumeCreate(d *schema.ResourceData, meta interface{}) error { + sess, err := meta.(ClientSession).IBMPISession() + if err != nil { + return err + } + + name := d.Get(helpers.PIVolumeName).(string) + volType := d.Get(helpers.PIVolumeType).(string) + size := float64(d.Get(helpers.PIVolumeSize).(float64)) + var shared bool + if v, ok := d.GetOk(helpers.PIVolumeShareable); ok { + shared = v.(bool) + } + powerinstanceid := d.Get(helpers.PICloudInstanceId).(string) + + client := st.NewIBMPIVolumeClient(sess, powerinstanceid) + vol, err := client.Create(name, size, volType, shared, powerinstanceid, volPostTimeOut) + if err != nil { + return fmt.Errorf("Failed to Create the volume %v", err) + } + + volumeid := *vol.VolumeID + d.SetId(fmt.Sprintf("%s/%s", powerinstanceid, volumeid)) + + _, err = isWaitForIBMPIVolumeAvailable(client, volumeid, powerinstanceid, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return err + } + + return resourceIBMPIVolumeRead(d, meta) +} + +func resourceIBMPIVolumeRead(d *schema.ResourceData, meta interface{}) error { + sess, _ := meta.(ClientSession).IBMPISession() + parts, err := idParts(d.Id()) + if err != nil { + return err + } + powerinstanceid := parts[0] + client := st.NewIBMPIVolumeClient(sess, powerinstanceid) + + vol, err := client.Get(parts[1], powerinstanceid, volGetTimeOut) + if err != nil { + return fmt.Errorf("Failed to get the volume %v", err) + + } + d.Set(helpers.PIVolumeName, vol.Name) + d.Set(helpers.PIVolumeSize, vol.Size) + if &vol.Shareable != nil { + d.Set(helpers.PIVolumeShareable, vol.Shareable) + } + d.Set(helpers.PIVolumeType, vol.DiskType) + if &vol.State != nil { + d.Set("volume_status", vol.State) + } + if &vol.VolumeID != nil { + d.Set("volume_id", vol.VolumeID) + } + if &vol.DeleteOnTermination != nil { + d.Set("delete_on_termination", vol.DeleteOnTermination) + } + if &vol.Wwn != nil { + d.Set("wwn", vol.Wwn) + } + d.Set(helpers.PICloudInstanceId, powerinstanceid) + + return nil +} + +func resourceIBMPIVolumeUpdate(d *schema.ResourceData, meta interface{}) error { + sess, _ := meta.(ClientSession).IBMPISession() + parts, err := idParts(d.Id()) + if err != nil { + return err + } + powerinstanceid := parts[0] + client := st.NewIBMPIVolumeClient(sess, powerinstanceid) + name := d.Get(helpers.PIVolumeName).(string) + size := float64(d.Get(helpers.PIVolumeSize).(float64)) + var shareable bool + if v, ok := d.GetOk(helpers.PIVolumeShareable); ok { + shareable = v.(bool) + } + volrequest, err := client.Update(parts[1], name, size, shareable, powerinstanceid, volPostTimeOut) + if err != nil { + return err + } + _, err = isWaitForIBMPIVolumeAvailable(client, *volrequest.VolumeID, powerinstanceid, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + + return resourceIBMPIVolumeRead(d, meta) +} + +func resourceIBMPIVolumeDelete(d *schema.ResourceData, meta interface{}) error { + + sess, _ := meta.(ClientSession).IBMPISession() + parts, err := idParts(d.Id()) + if err != nil { + return err + } + powerinstanceid := parts[0] + + client := st.NewIBMPIVolumeClient(sess, powerinstanceid) + voldeleteErr := client.Delete(parts[1], powerinstanceid, deleteTimeOut) + if voldeleteErr != nil { + return voldeleteErr + } + _, err = isWaitForIBMPIVolumeDeleted(client, parts[1], powerinstanceid, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return err + } + d.SetId("") + return nil +} +func resourceIBMPIVolumeExists(d *schema.ResourceData, meta interface{}) (bool, error) { + + sess, err := meta.(ClientSession).IBMPISession() + if err != nil { + return false, err + } + parts, err := idParts(d.Id()) + if err != nil { + return false, err + } + + powerinstanceid := parts[0] + client := st.NewIBMPIVolumeClient(sess, powerinstanceid) + + vol, err := client.Get(parts[1], powerinstanceid, getTimeOut) + if err != nil { + if apiErr, ok := err.(bmxerror.RequestFailure); ok { + if apiErr.StatusCode() == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s", err) + } + + log.Printf("Calling the existing function.. %s", *(vol.VolumeID)) + + volumeid := *vol.VolumeID + return volumeid == parts[1], nil +} + +func isWaitForIBMPIVolumeAvailable(client *st.IBMPIVolumeClient, id, powerinstanceid string, timeout time.Duration) (interface{}, error) { + log.Printf("Waiting for Volume (%s) to be available.", id) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", helpers.PIVolumeProvisioning}, + Target: []string{helpers.PIVolumeProvisioningDone}, + Refresh: isIBMPIVolumeRefreshFunc(client, id, powerinstanceid), + Delay: 10 * time.Second, + MinTimeout: 2 * time.Minute, + Timeout: 30 * time.Minute, + } + + return stateConf.WaitForState() +} + +func isIBMPIVolumeRefreshFunc(client *st.IBMPIVolumeClient, id, powerinstanceid string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + vol, err := client.Get(id, powerinstanceid, volGetTimeOut) + if err != nil { + return nil, "", err + } + + if vol.State == "available" { + return vol, helpers.PIVolumeProvisioningDone, nil + } + + return vol, helpers.PIVolumeProvisioning, nil + } +} + +func isWaitForIBMPIVolumeDeleted(client *st.IBMPIVolumeClient, id, powerinstanceid string, timeout time.Duration) (interface{}, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{"deleting", helpers.PIVolumeProvisioning}, + Target: []string{"deleted"}, + Refresh: isIBMPIVolumeDeleteRefreshFunc(client, id, powerinstanceid), + Delay: 10 * time.Second, + MinTimeout: 2 * time.Minute, + Timeout: 30 * time.Minute, + } + return stateConf.WaitForState() +} + +func isIBMPIVolumeDeleteRefreshFunc(client *st.IBMPIVolumeClient, id, powerinstanceid string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + vol, err := client.Get(id, powerinstanceid, volGetTimeOut) + if err != nil { + if strings.Contains(err.Error(), "Resource not found") { + return vol, "deleted", nil + } + return nil, "", err + } + if vol == nil { + return vol, "deleted", nil + } + return vol, "deleting", nil + } +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_pi_volume_attach.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_pi_volume_attach.go new file mode 100644 index 00000000000..69eb107587f --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_pi_volume_attach.go @@ -0,0 +1,228 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "errors" + "fmt" + "log" + "time" + + st "github.com/IBM-Cloud/power-go-client/clients/instance" + "github.com/IBM-Cloud/power-go-client/helpers" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + + /* Fix for PowerVC taking time to attach volume depending on load*/ + + attachVolumeTimeOut = 240 * time.Second +) + +func resourceIBMPIVolumeAttach() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMPIVolumeAttachCreate, + Read: resourceIBMPIVolumeAttachRead, + Update: resourceIBMPIVolumeAttachUpdate, + Delete: resourceIBMPIVolumeAttachDelete, + //Exists: resourceIBMPowerVolumeExists, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(60 * time.Minute), + Delete: schema.DefaultTimeout(60 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + + "volumeattachid": { + Type: schema.TypeString, + Computed: true, + ForceNew: true, + Optional: true, + Description: "Volume attachment ID", + }, + + helpers.PICloudInstanceId: { + Type: schema.TypeString, + Required: true, + Description: " Cloud Instance ID - This is the service_instance_id.", + }, + + helpers.PIVolumeAttachName: { + Type: schema.TypeString, + Required: true, + Description: "Name of the volume to attach. Note these volumes should have been created", + }, + + helpers.PIInstanceName: { + Type: schema.TypeString, + Required: true, + Description: "PI Instance name", + }, + + helpers.PIVolumeAttachStatus: { + Type: schema.TypeString, + Computed: true, + Optional: true, + }, + helpers.PIVolumeShareable: { + Type: schema.TypeBool, + Computed: true, + Optional: true, + }, + }, + } +} + +func resourceIBMPIVolumeAttachCreate(d *schema.ResourceData, meta interface{}) error { + sess, err := meta.(ClientSession).IBMPISession() + if err != nil { + return err + } + + name := d.Get(helpers.PIVolumeAttachName).(string) + servername := d.Get(helpers.PIInstanceName).(string) + powerinstanceid := d.Get(helpers.PICloudInstanceId).(string) + + client := st.NewIBMPIVolumeClient(sess, powerinstanceid) + + volinfo, err := client.Get(name, powerinstanceid, getTimeOut) + + if err != nil { + return fmt.Errorf("The volume [ %s] cannot be attached since it's not available", name) + } + //log.Print("The volume info is %s", volinfo) + + if volinfo.State == "available" || *volinfo.Shareable == true { + log.Printf(" In the current state the volume can be attached to the instance ") + } + + if volinfo.State == "in-use" && *volinfo.Shareable == true { + + log.Printf("Volume State /Status is permitted and hence attaching the volume to the instance") + } + + if volinfo.State == helpers.PIVolumeAllowableAttachStatus && *volinfo.Shareable == false { + + return errors.New("The volume cannot be attached in the current state. The volume must be in the *available* state. No other states are permissible") + } + + resp, err := client.Attach(servername, name, powerinstanceid, attachVolumeTimeOut) + + if err != nil { + return err + } + log.Printf("Printing the resp %+v", resp) + + d.SetId(*volinfo.VolumeID) + if err != nil { + log.Printf("[DEBUG] err %s", err) + return err + } + + _, err = isWaitForIBMPIVolumeAttachAvailable(client, d.Id(), powerinstanceid, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return err + } + //return nil + return resourceIBMPIVolumeAttachRead(d, meta) +} + +func resourceIBMPIVolumeAttachRead(d *schema.ResourceData, meta interface{}) error { + sess, _ := meta.(ClientSession).IBMPISession() + powerinstanceid := d.Get(helpers.PICloudInstanceId).(string) + servername := d.Get(helpers.PIInstanceName).(string) + + client := st.NewIBMPIVolumeClient(sess, powerinstanceid) + + vol, err := client.CheckVolumeAttach(powerinstanceid, servername, d.Id(), getTimeOut) + if err != nil { + return err + } + + //d.SetId(vol.ID.String()) + d.Set(helpers.PIVolumeAttachName, vol.Name) + d.Set(helpers.PIVolumeSize, vol.Size) + d.Set(helpers.PIVolumeShareable, vol.Shareable) + return nil +} + +func resourceIBMPIVolumeAttachUpdate(d *schema.ResourceData, meta interface{}) error { + + sess, _ := meta.(ClientSession).IBMPISession() + powerinstanceid := d.Get(helpers.PICloudInstanceId).(string) + client := st.NewIBMPIVolumeClient(sess, powerinstanceid) + + name := "" + if d.HasChange(helpers.PIVolumeAttachName) { + name = d.Get(helpers.PIVolumeAttachName).(string) + } + + size := float64(d.Get(helpers.PIVolumeSize).(float64)) + shareable := bool(d.Get(helpers.PIVolumeShareable).(bool)) + + volrequest, err := client.Update(d.Id(), name, size, shareable, powerinstanceid, postTimeOut) + if err != nil { + return err + } + + _, err = isWaitForIBMPIVolumeAttachAvailable(client, *volrequest.VolumeID, powerinstanceid, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return err + } + + return resourceIBMPIVolumeRead(d, meta) +} + +func resourceIBMPIVolumeAttachDelete(d *schema.ResourceData, meta interface{}) error { + + sess, _ := meta.(ClientSession).IBMPISession() + powerinstanceid := d.Get(helpers.PICloudInstanceId).(string) + name := d.Get(helpers.PIVolumeAttachName).(string) + servername := d.Get(helpers.PIInstanceName).(string) + client := st.NewIBMPIVolumeClient(sess, powerinstanceid) + + log.Printf("the id of the volume to detach is%s ", d.Id()) + _, err := client.Detach(servername, name, powerinstanceid, deleteTimeOut) + if err != nil { + return err + } + + // wait for power volume states to be back as available. if it's attached it will be in-use + d.SetId("") + return nil +} + +func isWaitForIBMPIVolumeAttachAvailable(client *st.IBMPIVolumeClient, id, powerinstanceid string, timeout time.Duration) (interface{}, error) { + log.Printf("Waiting for Volume (%s) to be available for attachment", id) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", helpers.PIVolumeProvisioning}, + Target: []string{helpers.PIVolumeAllowableAttachStatus}, + Refresh: isIBMPIVolumeAttachRefreshFunc(client, id, powerinstanceid), + Delay: 10 * time.Second, + MinTimeout: 2 * time.Minute, + Timeout: 10 * time.Minute, + } + + return stateConf.WaitForState() +} + +func isIBMPIVolumeAttachRefreshFunc(client *st.IBMPIVolumeClient, id, powerinstanceid string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + vol, err := client.Get(id, powerinstanceid, getTimeOut) + if err != nil { + return nil, "", err + } + + if vol.State == "in-use" { + return vol, helpers.PIVolumeAllowableAttachStatus, nil + } + + return vol, helpers.PIVolumeProvisioning, nil + } +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_private_dns_glb.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_private_dns_glb.go new file mode 100644 index 00000000000..d8225e85800 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_private_dns_glb.go @@ -0,0 +1,370 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "strings" + "time" + + "github.com/IBM/networking-go-sdk/dnssvcsv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + pdnsGLBName = "name" + pdnsGLBID = "glb_id" + pdnsGLBDescription = "description" + pdnsGLBEnabled = "enabled" + pdnsGLBTTL = "ttl" + pdnsGLBHealth = "health" + pdnsGLBFallbackPool = "fallback_pool" + pdnsGLBDefaultPool = "default_pools" + pdnsGLBAZPools = "az_pools" + pdnsGLBAvailabilityZone = "availability_zone" + pdnsGLBAZPoolsPools = "pools" + pdnsGLBCreatedOn = "created_on" + pdnsGLBModifiedOn = "modified_on" + pdnsGLBDeleting = "deleting" + pdnsGLBDeleted = "done" +) + +func resourceIBMPrivateDNSGLB() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMPrivateDNSGLBCreate, + Read: resourceIBMPrivateDNSGLBRead, + Update: resourceIBMPrivateDNSGLBUpdate, + Delete: resourceIBMPrivateDNSGLBDelete, + Exists: resourceIBMPrivateDNSGLBExists, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + pdnsGLBID: { + Type: schema.TypeString, + Computed: true, + Description: "Load balancer Id", + }, + pdnsInstanceID: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The GUID of the private DNS.", + }, + pdnsZoneID: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Zone Id", + }, + pdnsGLBName: { + Type: schema.TypeString, + Required: true, + Description: "Name of the load balancer", + DiffSuppressFunc: suppressPDNSGlbNameDiff, + }, + pdnsGLBDescription: { + Type: schema.TypeString, + Optional: true, + Description: "Descriptive text of the load balancer", + }, + pdnsGLBEnabled: { + Type: schema.TypeBool, + Optional: true, + Computed: true, + Description: "Whether the load balancer is enabled", + }, + pdnsGLBTTL: { + Type: schema.TypeInt, + Optional: true, + Default: 60, + Description: "Time to live in second", + }, + pdnsGLBHealth: { + Type: schema.TypeString, + Computed: true, + Description: "Healthy state of the load balancer.", + }, + pdnsGLBFallbackPool: { + Type: schema.TypeString, + Required: true, + Description: "The pool ID to use when all other pools are detected as unhealthy", + }, + pdnsGLBDefaultPool: { + Type: schema.TypeList, + Required: true, + Description: "A list of pool IDs ordered by their failover priority", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + pdnsGLBAZPools: { + Type: schema.TypeSet, + Optional: true, + Description: "Map availability zones to pool ID's.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + pdnsGLBAvailabilityZone: { + Type: schema.TypeString, + Required: true, + Description: "Availability zone.", + }, + + pdnsGLBAZPoolsPools: { + Type: schema.TypeList, + Required: true, + Description: "List of load balancer pools", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + pdnsGLBCreatedOn: { + Type: schema.TypeString, + Computed: true, + Description: "GLB Load Balancer creation date", + }, + pdnsGLBModifiedOn: { + Type: schema.TypeString, + Computed: true, + Description: "GLB Load Balancer Modification date", + }, + }, + } +} + +func resourceIBMPrivateDNSGLBCreate(d *schema.ResourceData, meta interface{}) error { + sess, err := meta.(ClientSession).PrivateDNSClientSession() + if err != nil { + return err + } + instanceID := d.Get(pdnsInstanceID).(string) + zoneID := d.Get(pdnsZoneID).(string) + createlbOptions := sess.NewCreateLoadBalancerOptions(instanceID, zoneID) + + lbname := d.Get(pdnsGLBName).(string) + createlbOptions.SetName(lbname) + createlbOptions.SetFallbackPool(d.Get(pdnsGLBFallbackPool).(string)) + createlbOptions.SetDefaultPools(expandStringList(d.Get(pdnsGLBDefaultPool).([]interface{}))) + + if description, ok := d.GetOk(pdnsGLBDescription); ok { + createlbOptions.SetDescription(description.(string)) + } + if enable, ok := d.GetOkExists(pdnsGLBEnabled); ok { + createlbOptions.SetEnabled(enable.(bool)) + } + if ttl, ok := d.GetOk(pdnsGLBTTL); ok { + createlbOptions.SetTTL(int64(ttl.(int))) + } + + if AZpools, ok := d.GetOk(pdnsGLBAZPools); ok { + expandedAzpools, err := expandPDNSGlbAZPools(AZpools) + if err != nil { + return err + } + createlbOptions.SetAzPools(expandedAzpools) + } + + result, resp, err := sess.CreateLoadBalancer(createlbOptions) + if err != nil { + log.Printf("create global load balancer failed %s", resp) + return err + } + + d.SetId(fmt.Sprintf("%s/%s/%s", instanceID, zoneID, *result.ID)) + return resourceIBMPrivateDNSGLBRead(d, meta) +} + +func resourceIBMPrivateDNSGLBRead(d *schema.ResourceData, meta interface{}) error { + sess, err := meta.(ClientSession).PrivateDNSClientSession() + if err != nil { + return err + } + idset := strings.Split(d.Id(), "/") + + getlbOptions := sess.NewGetLoadBalancerOptions(idset[0], idset[1], idset[2]) + presponse, resp, err := sess.GetLoadBalancer(getlbOptions) + if err != nil { + return fmt.Errorf("Error fetching pdns GLB :%s\n%s", err, resp) + } + + response := *presponse + d.Set(pdnsInstanceID, idset[0]) + d.Set(pdnsZoneID, idset[1]) + d.Set(pdnsGLBName, response.Name) + d.Set(pdnsGLBID, response.ID) + d.Set(pdnsGLBDescription, response.Description) + d.Set(pdnsGLBEnabled, response.Enabled) + d.Set(pdnsGLBTTL, response.TTL) + d.Set(pdnsGLBHealth, response.Health) + d.Set(pdnsGLBFallbackPool, response.FallbackPool) + d.Set(pdnsGLBDefaultPool, response.DefaultPools) + d.Set(pdnsGLBCreatedOn, response.CreatedOn) + d.Set(pdnsGLBModifiedOn, response.ModifiedOn) + d.Set(pdnsGLBAZPools, flattenPDNSGlbAZpool(response.AzPools)) + return nil +} + +func resourceIBMPrivateDNSGLBUpdate(d *schema.ResourceData, meta interface{}) error { + sess, err := meta.(ClientSession).PrivateDNSClientSession() + if err != nil { + return err + } + + idset := strings.Split(d.Id(), "/") + + updatelbOptions := sess.NewUpdateLoadBalancerOptions(idset[0], idset[1], idset[2]) + + if d.HasChange(pdnsGLBName) || + d.HasChange(pdnsGLBDescription) || + d.HasChange(pdnsGLBEnabled) || + d.HasChange(pdnsGLBTTL) || + d.HasChange(pdnsGLBFallbackPool) || + d.HasChange(pdnsGLBDefaultPool) || + d.HasChange(pdnsGLBAZPools) { + + updatelbOptions.SetName(d.Get(pdnsGLBName).(string)) + updatelbOptions.SetFallbackPool(d.Get(pdnsGLBFallbackPool).(string)) + updatelbOptions.SetDefaultPools(expandStringList(d.Get(pdnsGLBDefaultPool).([]interface{}))) + + if description, ok := d.GetOk(pdnsGLBDescription); ok { + updatelbOptions.SetDescription(description.(string)) + } + if enable, ok := d.GetOkExists(pdnsGLBEnabled); ok { + updatelbOptions.SetEnabled(enable.(bool)) + } + if ttl, ok := d.GetOk(pdnsGLBTTL); ok { + updatelbOptions.SetTTL(int64(ttl.(int))) + } + + if AZpools, ok := d.GetOk(pdnsGLBAZPools); ok { + expandedAzpools, err := expandPDNSGlbAZPools(AZpools) + if err != nil { + return err + } + updatelbOptions.SetAzPools(expandedAzpools) + } + + _, detail, err := sess.UpdateLoadBalancer(updatelbOptions) + if err != nil { + return fmt.Errorf("Error updating pdns GLB :%s\n%s", err, detail) + } + } + + return resourceIBMPrivateDNSGLBRead(d, meta) +} + +func resourceIBMPrivateDNSGLBDelete(d *schema.ResourceData, meta interface{}) error { + sess, err := meta.(ClientSession).PrivateDNSClientSession() + if err != nil { + return err + } + + idset := strings.Split(d.Id(), "/") + deletelbOptions := sess.NewDeleteLoadBalancerOptions(idset[0], idset[1], idset[2]) + response, err := sess.DeleteLoadBalancer(deletelbOptions) + if err != nil { + return fmt.Errorf("Error deleting pdns GLB :%s\n%s", err, response) + } + _, err = isWaitForLoadBalancerDeleted(sess, d, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return err + } + return nil +} + +func resourceIBMPrivateDNSGLBExists(d *schema.ResourceData, meta interface{}) (bool, error) { + sess, err := meta.(ClientSession).PrivateDNSClientSession() + if err != nil { + return false, err + } + idset := strings.Split(d.Id(), "/") + getlbOptions := sess.NewGetLoadBalancerOptions(idset[0], idset[1], idset[2]) + _, detail, err := sess.GetLoadBalancer(getlbOptions) + if err != nil { + if detail != nil && detail.StatusCode == 404 { + log.Printf("Get GLB failed with status code 404: %v", detail) + return false, nil + } + log.Printf("Get GLB failed: %v", detail) + return false, err + } + return true, nil +} + +func expandPDNSGlbAZPools(azpool interface{}) ([]dnssvcsv1.LoadBalancerAzPoolsItem, error) { + azpools := azpool.(*schema.Set).List() + expandAZpools := make([]dnssvcsv1.LoadBalancerAzPoolsItem, 0) + for _, v := range azpools { + locationConfig := v.(map[string]interface{}) + avzone := locationConfig[pdnsGLBAvailabilityZone].(string) + pools := expandStringList(locationConfig[pdnsGLBAZPoolsPools].([]interface{})) + aZItem := dnssvcsv1.LoadBalancerAzPoolsItem{ + AvailabilityZone: &avzone, + Pools: pools, + } + expandAZpools = append(expandAZpools, aZItem) + } + return expandAZpools, nil +} + +func flattenPDNSGlbAZpool(azpool []dnssvcsv1.LoadBalancerAzPoolsItem) interface{} { + flattened := make([]interface{}, 0) + for _, v := range azpool { + cfg := map[string]interface{}{ + pdnsGLBAvailabilityZone: *v.AvailabilityZone, + pdnsGLBAZPoolsPools: flattenStringList(v.Pools), + } + flattened = append(flattened, cfg) + } + return flattened +} + +func suppressPDNSGlbNameDiff(k, old, new string, d *schema.ResourceData) bool { + // PDNS GLB concantenates name with domain. So just check name is the same + if strings.SplitN(old, ".", 2)[0] == strings.SplitN(new, ".", 2)[0] { + return true + } + return false +} + +func isWaitForLoadBalancerDeleted(LoadBalancer *dnssvcsv1.DnsSvcsV1, d *schema.ResourceData, timeout time.Duration) (interface{}, error) { + idset := strings.Split(d.Id(), "/") + log.Printf("Waiting for PDNS GLB (%s) to be deleted.", idset[2]) + stateConf := &resource.StateChangeConf{ + Pending: []string{pdnsGLBDeleting}, + Target: []string{pdnsGLBDeleted}, + Refresh: isVLoadBalancerDeleteRefreshFunc(LoadBalancer, d), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isVLoadBalancerDeleteRefreshFunc(LoadBalancer *dnssvcsv1.DnsSvcsV1, d *schema.ResourceData) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + idset := strings.Split(d.Id(), "/") + getlbOptions := LoadBalancer.NewGetLoadBalancerOptions(idset[0], idset[1], idset[2]) + _, response, err := LoadBalancer.GetLoadBalancer(getlbOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return "", pdnsGLBDeleted, nil + } + return "", "", fmt.Errorf("Error Getting PDNS Load Balancer : %s\n%s", err, response) + } + return LoadBalancer, pdnsGLBDeleting, err + + } +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_private_dns_glb_monitor.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_private_dns_glb_monitor.go new file mode 100644 index 00000000000..426b2ab4a99 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_private_dns_glb_monitor.go @@ -0,0 +1,468 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "strings" + "time" + + "github.com/IBM/networking-go-sdk/dnssvcsv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + ibmDNSGlbMonitor = "ibm_dns_glb_monitor" + pdnsGlbMonitorName = "name" + pdnsGlbMonitorID = "monitor_id" + pdnsGlbMonitorDescription = "description" + pdnsGlbMonitorType = "type" + pdnsGlbMonitorPort = "port" + pdnsGlbMonitorInterval = "interval" + pdnsGlbMonitorRetries = "retries" + pdnsGlbMonitorTimeout = "timeout" + pdnsGlbMonitorMethod = "method" + pdnsGlbMonitorPath = "path" + pdnsGlbMonitorAllowInsecure = "allow_insecure" + pdnsGlbMonitorExpectedCodes = "expected_codes" + pdnsGlbMonitorExpectedBody = "expected_body" + pdnsGlbMonitorHeaders = "headers" + pdnsGlbMonitorHeadersName = "name" + pdnsGlbMonitorHeadersValue = "value" + pdnsGlbMonitorCreatedOn = "created_on" + pdnsGlbMonitorModifiedOn = "modified_on" +) + +func resourceIBMPrivateDNSGLBMonitor() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMPrivateDNSGLBMonitorCreate, + Read: resourceIBMPrivateDNSGLBMonitorRead, + Update: resourceIBMPrivateDNSGLBMonitorUpdate, + Delete: resourceIBMPrivateDNSGLBMonitorDelete, + Exists: resourceIBMPrivateDNSGLBMonitorExists, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + pdnsGlbMonitorID: { + Type: schema.TypeString, + Computed: true, + Description: "Monitor Id", + }, + + pdnsInstanceID: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Instance Id", + }, + + pdnsGlbMonitorName: { + Type: schema.TypeString, + Required: true, + Description: "The unique identifier of a service instance.", + }, + + pdnsGlbMonitorDescription: { + Type: schema.TypeString, + Optional: true, + Description: "Descriptive text of the load balancer monitor", + }, + + pdnsGlbMonitorType: { + Type: schema.TypeString, + Optional: true, + Default: "HTTP", + ValidateFunc: InvokeValidator(ibmDNSGlbMonitor, pdnsGlbMonitorType), + Description: "The protocol to use for the health check", + }, + + pdnsGlbMonitorPort: { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: "Port number to connect to for the health check", + }, + + pdnsGlbMonitorInterval: { + Type: schema.TypeInt, + Optional: true, + Default: 60, + Description: "The interval between each health check", + }, + + pdnsGlbMonitorRetries: { + Type: schema.TypeInt, + Optional: true, + Default: 1, + Description: "The number of retries to attempt in case of a timeout before marking the origin as unhealthy", + }, + + pdnsGlbMonitorTimeout: { + Type: schema.TypeInt, + Optional: true, + Default: 5, + Description: "The timeout (in seconds) before marking the health check as failed", + }, + + pdnsGlbMonitorMethod: { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: InvokeValidator(ibmDNSGlbMonitor, pdnsGlbMonitorMethod), + Description: "The method to use for the health check", + }, + + pdnsGlbMonitorPath: { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: "The endpoint path to health check against", + }, + + pdnsGlbMonitorHeaders: { + Type: schema.TypeSet, + Optional: true, + Description: "The HTTP request headers to send in the health check", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + pdnsGlbMonitorHeadersName: { + Type: schema.TypeString, + Description: "The name of HTTP request header", + Required: true, + }, + + pdnsGlbMonitorHeadersValue: { + Type: schema.TypeList, + Description: "The value of HTTP request header", + Required: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + + pdnsGlbMonitorAllowInsecure: { + Type: schema.TypeBool, + Computed: true, + Optional: true, + Description: "Do not validate the certificate when monitor use HTTPS. This parameter is currently only valid for HTTPS monitors.", + }, + + pdnsGlbMonitorExpectedCodes: { + Type: schema.TypeString, + Computed: true, + Optional: true, + ValidateFunc: InvokeValidator(ibmDNSGlbMonitor, pdnsGlbMonitorExpectedCodes), + Description: "The expected HTTP response code or code range of the health check. This parameter is only valid for HTTP and HTTPS", + }, + + pdnsGlbMonitorExpectedBody: { + Type: schema.TypeString, + Optional: true, + Description: "A case-insensitive sub-string to look for in the response body", + }, + + pdnsGlbMonitorCreatedOn: { + Type: schema.TypeString, + Computed: true, + Description: "GLB Monitor creation date", + }, + + pdnsGlbMonitorModifiedOn: { + Type: schema.TypeString, + Computed: true, + Description: "GLB Monitor Modification date", + }, + }, + } +} + +func resourceIBMPrivateDNSGLBMonitorValidator() *ResourceValidator { + monitorCheckTypes := "HTTP, HTTPS, TCP" + methods := "GET, HEAD" + expectedcode := "200,201,202,203,204,205,206,207,208,226,2xx" + + validateSchema := make([]ValidateSchema, 1) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: pdnsGlbMonitorType, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: monitorCheckTypes}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: pdnsGlbMonitorMethod, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: methods}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: pdnsGlbMonitorExpectedCodes, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: expectedcode}) + dnsMonitorValidator := ResourceValidator{ResourceName: ibmDNSGlbMonitor, Schema: validateSchema} + return &dnsMonitorValidator +} + +func resourceIBMPrivateDNSGLBMonitorCreate(d *schema.ResourceData, meta interface{}) error { + sess, err := meta.(ClientSession).PrivateDNSClientSession() + if err != nil { + return err + } + instanceID := d.Get(pdnsInstanceID).(string) + createMonitorOptions := sess.NewCreateMonitorOptions(instanceID) + + monitorname := d.Get(pdnsGlbMonitorName).(string) + monitorinterval := int64(d.Get(pdnsGlbMonitorInterval).(int)) + monitorretries := int64(d.Get(pdnsGlbMonitorRetries).(int)) + monitortimeout := int64(d.Get(pdnsGlbMonitorTimeout).(int)) + createMonitorOptions.SetName(monitorname) + createMonitorOptions.SetInterval(monitorinterval) + createMonitorOptions.SetRetries(monitorretries) + createMonitorOptions.SetTimeout(monitortimeout) + if monitordescription, ok := d.GetOk(pdnsGlbMonitorDescription); ok { + createMonitorOptions.SetDescription(monitordescription.(string)) + } + if Mtype, ok := d.GetOk(pdnsGlbMonitorType); ok { + createMonitorOptions.SetType(Mtype.(string)) + } + if Mport, ok := d.GetOk(pdnsGlbMonitorPort); ok { + createMonitorOptions.SetPort(int64(Mport.(int))) + } + if monitorpath, ok := d.GetOk(pdnsGlbMonitorPath); ok { + createMonitorOptions.SetPath((monitorpath).(string)) + } + if monitorexpectedcodes, ok := d.GetOk(pdnsGlbMonitorExpectedCodes); ok { + createMonitorOptions.SetExpectedCodes((monitorexpectedcodes).(string)) + } + if monitormethod, ok := d.GetOk(pdnsGlbMonitorMethod); ok { + createMonitorOptions.SetMethod((monitormethod).(string)) + } + if monitorexpectedbody, ok := d.GetOk(pdnsGlbMonitorExpectedBody); ok { + createMonitorOptions.SetExpectedBody((monitorexpectedbody).(string)) + } + if monitorheaders, ok := d.GetOk(pdnsGlbMonitorHeaders); ok { + expandedmonitorheaders, err := expandPDNSGLBMonitorsHeader(monitorheaders) + if err != nil { + return err + } + createMonitorOptions.SetHeadersVar(expandedmonitorheaders) + } + if monitorallowinsecure, ok := d.GetOkExists(pdnsGlbMonitorAllowInsecure); ok { + createMonitorOptions.SetAllowInsecure((monitorallowinsecure).(bool)) + } + + response, detail, err := sess.CreateMonitor(createMonitorOptions) + if err != nil { + return fmt.Errorf("Error creating pdns GLB monitor:%s\n%s", err, detail) + } + + d.SetId(fmt.Sprintf("%s/%s", instanceID, *response.ID)) + return resourceIBMPrivateDNSGLBMonitorRead(d, meta) +} + +func expandPDNSGLBMonitorsHeader(header interface{}) ([]dnssvcsv1.HealthcheckHeader, error) { + headers := header.(*schema.Set).List() + expandheaders := make([]dnssvcsv1.HealthcheckHeader, 0) + for _, v := range headers { + locationConfig := v.(map[string]interface{}) + hname := locationConfig[pdnsGlbMonitorHeadersName].(string) + headers := expandStringList(locationConfig[pdnsGlbMonitorHeadersValue].([]interface{})) + headerItem := dnssvcsv1.HealthcheckHeader{ + Name: &hname, + Value: headers, + } + expandheaders = append(expandheaders, headerItem) + } + return expandheaders, nil +} + +func resourceIBMPrivateDNSGLBMonitorRead(d *schema.ResourceData, meta interface{}) error { + sess, err := meta.(ClientSession).PrivateDNSClientSession() + if err != nil { + return err + } + idset := strings.Split(d.Id(), "/") + + getMonitorOptions := sess.NewGetMonitorOptions(idset[0], idset[1]) + response, detail, err := sess.GetMonitor(getMonitorOptions) + if err != nil { + return fmt.Errorf("Error fetching pdns GLB Monitor:%s\n%s", err, detail) + } + d.Set(pdnsInstanceID, idset[0]) + d.Set(pdnsGlbMonitorID, response.ID) + d.Set(pdnsGlbMonitorName, response.Name) + d.Set(pdnsGlbMonitorCreatedOn, response.CreatedOn) + d.Set(pdnsGlbMonitorModifiedOn, response.ModifiedOn) + d.Set(pdnsGlbMonitorType, response.Type) + d.Set(pdnsGlbMonitorPort, response.Port) + if response.Path != nil { + d.Set(pdnsGlbMonitorPath, response.Path) + } + if response.Interval != nil { + d.Set(pdnsGlbMonitorInterval, response.Interval) + } + if response.Retries != nil { + d.Set(pdnsGlbMonitorRetries, response.Retries) + } + if response.Timeout != nil { + d.Set(pdnsGlbMonitorTimeout, response.Timeout) + } + if response.Method != nil { + d.Set(pdnsGlbMonitorMethod, response.Method) + } + if response.ExpectedCodes != nil { + d.Set(pdnsGlbMonitorExpectedCodes, response.ExpectedCodes) + } + if response.AllowInsecure != nil { + d.Set(pdnsGlbMonitorAllowInsecure, response.AllowInsecure) + } + if response.Description != nil { + d.Set(pdnsGlbMonitorDescription, response.Description) + } + if response.ExpectedBody != nil { + d.Set(pdnsGlbMonitorExpectedBody, response.ExpectedBody) + } + + d.Set(pdnsGlbMonitorHeaders, flattenDataSourceLoadBalancerHeader(response.HeadersVar)) + + return nil +} + +func flattenDataSourceLoadBalancerHeader(header []dnssvcsv1.HealthcheckHeader) interface{} { + flattened := make([]interface{}, 0) + + for _, v := range header { + cfg := map[string]interface{}{ + pdnsGlbMonitorHeadersName: v.Name, + pdnsGlbMonitorHeadersValue: flattenStringList(v.Value), + } + flattened = append(flattened, cfg) + } + return flattened +} + +func resourceIBMPrivateDNSGLBMonitorUpdate(d *schema.ResourceData, meta interface{}) error { + sess, err := meta.(ClientSession).PrivateDNSClientSession() + if err != nil { + return err + } + + idset := strings.Split(d.Id(), "/") + + // Update PDNS GLB Monitor if attributes has any change + + if d.HasChange(pdnsGlbMonitorName) || + d.HasChange(pdnsGlbMonitorDescription) || + d.HasChange(pdnsGlbMonitorInterval) || + d.HasChange(pdnsGlbMonitorRetries) || + d.HasChange(pdnsGlbMonitorTimeout) || + d.HasChange(pdnsGlbMonitorExpectedBody) || + d.HasChange(pdnsGlbMonitorType) || + d.HasChange(pdnsGlbMonitorPort) || + d.HasChange(pdnsGlbMonitorPath) || + d.HasChange(pdnsGlbMonitorAllowInsecure) || + d.HasChange(pdnsGlbMonitorExpectedCodes) || + d.HasChange(pdnsGlbMonitorHeaders) { + + updateMonitorOptions := sess.NewUpdateMonitorOptions(idset[0], idset[1]) + uname := d.Get(pdnsGlbMonitorName).(string) + udescription := d.Get(pdnsGlbMonitorDescription).(string) + uinterval := int64(d.Get(pdnsGlbMonitorInterval).(int)) + uretries := int64(d.Get(pdnsGlbMonitorRetries).(int)) + utimeout := int64(d.Get(pdnsGlbMonitorTimeout).(int)) + updateMonitorOptions.SetName(uname) + updateMonitorOptions.SetDescription(udescription) + updateMonitorOptions.SetInterval(uinterval) + updateMonitorOptions.SetRetries(uretries) + updateMonitorOptions.SetTimeout(utimeout) + + if Mtype, ok := d.GetOk(pdnsGlbMonitorType); ok { + updateMonitorOptions.SetType(Mtype.(string)) + } + if Mport, ok := d.GetOk(pdnsGlbMonitorPort); ok { + updateMonitorOptions.SetPort(int64(Mport.(int))) + } + if monitorpath, ok := d.GetOk(pdnsGlbMonitorPath); ok { + updateMonitorOptions.SetPath((monitorpath).(string)) + } + if monitorexpectedcodes, ok := d.GetOk(pdnsGlbMonitorExpectedCodes); ok { + updateMonitorOptions.SetExpectedCodes((monitorexpectedcodes).(string)) + } + if monitormethod, ok := d.GetOk(pdnsGlbMonitorMethod); ok { + updateMonitorOptions.SetMethod((monitormethod).(string)) + } + if monitorexpectedbody, ok := d.GetOk(pdnsGlbMonitorExpectedBody); ok { + updateMonitorOptions.SetExpectedBody((monitorexpectedbody).(string)) + } + if monitorheaders, ok := d.GetOk(pdnsGlbMonitorHeaders); ok { + expandedmonitorheaders, err := expandPDNSGLBMonitorsHeader(monitorheaders) + if err != nil { + return err + } + updateMonitorOptions.SetHeadersVar(expandedmonitorheaders) + } + if monitorallowinsecure, ok := d.GetOkExists(pdnsGlbMonitorAllowInsecure); ok { + updateMonitorOptions.SetAllowInsecure((monitorallowinsecure).(bool)) + } + + _, detail, err := sess.UpdateMonitor(updateMonitorOptions) + + if err != nil { + return fmt.Errorf("Error updating pdns GLB Monitor:%s\n%s", err, detail) + } + } + + return resourceIBMPrivateDNSGLBMonitorRead(d, meta) +} + +func resourceIBMPrivateDNSGLBMonitorDelete(d *schema.ResourceData, meta interface{}) error { + sess, err := meta.(ClientSession).PrivateDNSClientSession() + if err != nil { + return err + } + + idset := strings.Split(d.Id(), "/") + + DeleteMonitorOptions := sess.NewDeleteMonitorOptions(idset[0], idset[1]) + response, err := sess.DeleteMonitor(DeleteMonitorOptions) + + if err != nil { + return fmt.Errorf("Error deleting pdns GLB Monitor:%s\n%s", err, response) + } + + d.SetId("") + return nil +} + +func resourceIBMPrivateDNSGLBMonitorExists(d *schema.ResourceData, meta interface{}) (bool, error) { + sess, err := meta.(ClientSession).PrivateDNSClientSession() + if err != nil { + return false, err + } + + idset := strings.Split(d.Id(), "/") + + getMonitorOptions := sess.NewGetMonitorOptions(idset[0], idset[1]) + response, detail, err := sess.GetMonitor(getMonitorOptions) + if err != nil { + if response != nil && detail != nil && detail.StatusCode == 404 { + return false, nil + } + return false, err + } + return true, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_private_dns_glb_pool.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_private_dns_glb_pool.go new file mode 100644 index 00000000000..774cffff230 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_private_dns_glb_pool.go @@ -0,0 +1,421 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "strings" + "time" + + "github.com/IBM/go-sdk-core/v3/core" + dns "github.com/IBM/networking-go-sdk/dnssvcsv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + ibmDNSGlbPool = "ibm_dns_glb_pool" + pdnsGlbPoolID = "pool_id" + pdnsGlbPoolName = "name" + pdnsGlbPoolDescription = "description" + pdnsGlbPoolEnabled = "enabled" + pdnsGlbPoolHealth = "health" + pdnsGlbPoolHealthyOriginsThreshold = "healthy_origins_threshold" + pdnsGlbPoolOrigins = "origins" + pdnsGlbPoolOriginsName = "name" + pdnsGlbPoolOriginsDescription = "description" + pdnsGlbPoolOriginsAddress = "address" + pdnsGlbPoolOriginsEnabled = "enabled" + pdnsGlbPoolOriginsHealth = "health" + pdnsGlbPoolOriginsHealthFailureReason = "health_failure_reason" + pdnsGlbPoolMonitor = "monitor" + pdnsGlbPoolChannel = "notification_channel" + pdnsGlbPoolRegion = "healthcheck_region" + pdnsGlbPoolSubnet = "healthcheck_subnets" + pdnsGlbPoolCreatedOn = "created_on" + pdnsGlbPoolModifiedOn = "modified_on" + pdnsGlbPoolDeletePending = "deleting" + pdnsGlbPoolDeleted = "deleted" +) + +func resourceIBMPrivateDNSGLBPool() *schema.Resource { + return &schema.Resource{ + + Create: resourceIBMPrivateDNSGLBPoolCreate, + Read: resourceIBMPrivateDNSGLBPoolRead, + Update: resourceIBMPrivateDNSGLBPoolUpdate, + Delete: resourceIBMPrivateDNSGLBPoolDelete, + Exists: resourceIBMPrivateDNSGLBPoolExists, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + pdnsInstanceID: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Instance Id", + }, + pdnsGlbPoolID: { + Type: schema.TypeString, + Computed: true, + Description: "Pool Id", + }, + pdnsGlbPoolName: { + Type: schema.TypeString, + Required: true, + Description: "The unique identifier of a service instance.", + }, + pdnsGlbPoolDescription: { + Type: schema.TypeString, + Optional: true, + Description: "Descriptive text of the load balancer pool", + }, + pdnsGlbPoolEnabled: { + Type: schema.TypeBool, + Optional: true, + Description: "Whether the load balancer pool is enabled", + }, + pdnsGlbPoolHealth: { + Type: schema.TypeString, + Computed: true, + Description: "Whether the load balancer pool is enabled", + }, + pdnsGlbPoolHealthyOriginsThreshold: { + Type: schema.TypeInt, + Optional: true, + Description: "The minimum number of origins that must be healthy for this pool to serve traffic", + }, + pdnsGlbPoolOrigins: { + Type: schema.TypeSet, + Required: true, + Description: "Origins info", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + pdnsGlbPoolOriginsName: { + Type: schema.TypeString, + Description: "The name of the origin server.", + Required: true, + }, + pdnsGlbPoolOriginsAddress: { + Type: schema.TypeString, + Description: "The address of the origin server. It can be a hostname or an IP address.", + Required: true, + }, + pdnsGlbPoolOriginsEnabled: { + Type: schema.TypeBool, + Description: "Whether the origin server is enabled.", + Required: true, + }, + pdnsGlbPoolOriginsDescription: { + Type: schema.TypeString, + Description: "Description of the origin server.", + Optional: true, + }, + pdnsGlbPoolOriginsHealth: { + Type: schema.TypeBool, + Description: "Whether the health is `true` or `false`.", + Computed: true, + }, + pdnsGlbPoolOriginsHealthFailureReason: { + Type: schema.TypeString, + Description: "The Reason for health check failure", + Computed: true, + }, + }, + }, + }, + pdnsGlbPoolMonitor: { + Type: schema.TypeString, + Optional: true, + Description: "The ID of the load balancer monitor to be associated to this pool", + }, + pdnsGlbPoolChannel: { + Type: schema.TypeString, + Optional: true, + Description: "The notification channel,It is a webhook url", + }, + pdnsGlbPoolRegion: { + Type: schema.TypeString, + Optional: true, + ValidateFunc: InvokeValidator(ibmDNSGlbPool, pdnsGlbPoolRegion), + Description: "Health check region of VSIs", + }, + pdnsGlbPoolSubnet: { + Type: schema.TypeList, + Optional: true, + Description: "Health check subnet crn of VSIs", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + pdnsGlbPoolCreatedOn: { + Type: schema.TypeString, + Description: "The time when a load balancer pool is created.", + Computed: true, + }, + pdnsGlbPoolModifiedOn: { + Type: schema.TypeString, + Description: "The recent time when a load balancer pool is modified.", + Computed: true, + }, + }, + } +} + +func resourceIBMPrivateDNSGLBPoolValidator() *ResourceValidator { + regions := "us-south,us-east,eu-gb,eu-du,au-syd,jp-tok" + + validateSchema := make([]ValidateSchema, 1) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: pdnsGlbPoolRegion, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: regions}) + dnsPoolValidator := ResourceValidator{ResourceName: ibmDNSGlbPool, Schema: validateSchema} + return &dnsPoolValidator +} + +func resourceIBMPrivateDNSGLBPoolCreate(d *schema.ResourceData, meta interface{}) error { + sess, err := meta.(ClientSession).PrivateDNSClientSession() + if err != nil { + return err + } + + instanceID := d.Get(pdnsInstanceID).(string) + CreatePoolOptions := sess.NewCreatePoolOptions(instanceID) + + poolname := d.Get(pdnsGlbPoolName).(string) + CreatePoolOptions.SetName(poolname) + + if description, ok := d.GetOk(pdnsGlbPoolDescription); ok { + CreatePoolOptions.SetDescription(description.(string)) + } + if enable, ok := d.GetOk(pdnsGlbPoolEnabled); ok { + CreatePoolOptions.SetEnabled(enable.(bool)) + } + if threshold, ok := d.GetOk(pdnsGlbPoolHealthyOriginsThreshold); ok { + CreatePoolOptions.SetHealthyOriginsThreshold(int64(threshold.(int))) + } + if monitor, ok := d.GetOk(pdnsGlbPoolMonitor); ok { + monitorID, _, _ := convertTftoCisTwoVar(monitor.(string)) + CreatePoolOptions.SetMonitor(monitorID) + } + if chanel, ok := d.GetOk(pdnsGlbPoolChannel); ok { + CreatePoolOptions.SetNotificationChannel(chanel.(string)) + } + if region, ok := d.GetOk(pdnsGlbPoolRegion); ok { + CreatePoolOptions.SetHealthcheckRegion(region.(string)) + } + if subnets, ok := d.GetOk(pdnsGlbPoolSubnet); ok { + CreatePoolOptions.SetHealthcheckSubnets(expandStringList(subnets.([]interface{}))) + } + + poolorigins := d.Get(pdnsGlbPoolOrigins).(*schema.Set) + CreatePoolOptions.SetOrigins(expandPDNSGlbPoolOrigins(poolorigins)) + + result, resp, err := sess.CreatePool(CreatePoolOptions) + if err != nil { + log.Printf("create global load balancer pool failed %s", resp) + return err + } + d.SetId(fmt.Sprintf("%s/%s", instanceID, *result.ID)) + + return resourceIBMPrivateDNSGLBPoolRead(d, meta) +} + +func resourceIBMPrivateDNSGLBPoolRead(d *schema.ResourceData, meta interface{}) error { + sess, err := meta.(ClientSession).PrivateDNSClientSession() + if err != nil { + return err + } + idset := strings.Split(d.Id(), "/") + + getPoolOptions := sess.NewGetPoolOptions(idset[0], idset[1]) + presponse, resp, err := sess.GetPool(getPoolOptions) + if err != nil { + return fmt.Errorf("Error fetching pdns GLB Pool:%s\n%s", err, resp) + } + + response := *presponse + d.Set(pdnsGlbPoolName, response.Name) + d.Set(pdnsGlbPoolID, response.ID) + d.Set(pdnsInstanceID, idset[0]) + d.Set(pdnsGlbPoolDescription, response.Description) + d.Set(pdnsGlbPoolEnabled, response.Enabled) + d.Set(pdnsGlbPoolHealth, response.Health) + d.Set(pdnsGlbPoolHealthyOriginsThreshold, response.HealthyOriginsThreshold) + d.Set(pdnsGlbPoolMonitor, response.Monitor) + d.Set(pdnsGlbPoolChannel, response.NotificationChannel) + d.Set(pdnsGlbPoolRegion, response.HealthcheckRegion) + d.Set(pdnsGlbPoolSubnet, response.HealthcheckSubnets) + d.Set(pdnsGlbPoolCreatedOn, response.CreatedOn) + d.Set(pdnsGlbPoolModifiedOn, response.ModifiedOn) + d.Set(pdnsGlbPoolOrigins, flattenPDNSGlbPoolOrigins(response.Origins)) + + return nil +} + +func flattenPDNSGlbPoolOrigins(list []dns.Origin) []map[string]interface{} { + origins := []map[string]interface{}{} + for _, origin := range list { + l := map[string]interface{}{ + pdnsGlbPoolOriginsName: *origin.Name, + pdnsGlbPoolOriginsAddress: *origin.Address, + pdnsGlbPoolOriginsEnabled: *origin.Enabled, + pdnsGlbPoolOriginsDescription: *origin.Description, + pdnsGlbPoolOriginsHealth: *origin.Health, + pdnsGlbPoolOriginsHealthFailureReason: *origin.HealthFailureReason, + } + origins = append(origins, l) + } + return origins +} + +func resourceIBMPrivateDNSGLBPoolUpdate(d *schema.ResourceData, meta interface{}) error { + sess, err := meta.(ClientSession).PrivateDNSClientSession() + if err != nil { + return err + } + + idset := strings.Split(d.Id(), "/") + updatePoolOptions := sess.NewUpdatePoolOptions(idset[0], idset[1]) + + if d.HasChange(pdnsGlbPoolName) || + d.HasChange(pdnsGlbPoolDescription) || + d.HasChange(pdnsGlbPoolEnabled) || + d.HasChange(pdnsGlbPoolHealthyOriginsThreshold) || + d.HasChange(pdnsGlbPoolMonitor) || + d.HasChange(pdnsGlbPoolChannel) || + d.HasChange(pdnsGlbPoolRegion) || + d.HasChange(pdnsGlbPoolOrigins) || + d.HasChange(pdnsGlbPoolSubnet) { + if Mname, ok := d.GetOk(pdnsGlbPoolName); ok { + updatePoolOptions.SetName(Mname.(string)) + } + if description, ok := d.GetOk(pdnsGlbPoolDescription); ok { + updatePoolOptions.SetDescription(description.(string)) + } + if enable, ok := d.GetOk(pdnsGlbPoolEnabled); ok { + updatePoolOptions.SetEnabled(enable.(bool)) + } + if threshold, ok := d.GetOk(pdnsGlbPoolHealthyOriginsThreshold); ok { + updatePoolOptions.SetHealthyOriginsThreshold(int64(threshold.(int))) + } + if monitor, ok := d.GetOk(pdnsGlbPoolMonitor); ok { + monitorID, _, _ := convertTftoCisTwoVar(monitor.(string)) + updatePoolOptions.SetMonitor(monitorID) + } + if chanel, ok := d.GetOk(pdnsGlbPoolChannel); ok { + updatePoolOptions.SetNotificationChannel(chanel.(string)) + } + if region, ok := d.GetOk(pdnsGlbPoolRegion); ok { + updatePoolOptions.SetHealthcheckRegion(region.(string)) + } + if _, ok := d.GetOk(pdnsGlbPoolSubnet); ok { + updatePoolOptions.SetHealthcheckSubnets(expandStringList(d.Get(pdnsGlbPoolSubnet).([]interface{}))) + } + if _, ok := d.GetOk(pdnsGlbPoolOrigins); ok { + poolorigins := d.Get(pdnsGlbPoolOrigins).(*schema.Set) + updatePoolOptions.SetOrigins(expandPDNSGlbPoolOrigins(poolorigins)) + + } + _, detail, err := sess.UpdatePool(updatePoolOptions) + if err != nil { + return fmt.Errorf("Error updating pdns GLB Pool:%s\n%s", err, detail) + } + } + + return resourceIBMPrivateDNSGLBPoolRead(d, meta) +} + +func resourceIBMPrivateDNSGLBPoolDelete(d *schema.ResourceData, meta interface{}) error { + sess, err := meta.(ClientSession).PrivateDNSClientSession() + if err != nil { + return err + } + + idset := strings.Split(d.Id(), "/") + DeletePoolOptions := sess.NewDeletePoolOptions(idset[0], idset[1]) + response, err := sess.DeletePool(DeletePoolOptions) + if err != nil { + return fmt.Errorf("Error deleting pdns GLB Pool:%s\n%s", err, response) + } + _, err = waitForPDNSGlbPoolDelete(d, meta) + if err != nil { + return err + } + d.SetId("") + return nil +} + +func resourceIBMPrivateDNSGLBPoolExists(d *schema.ResourceData, meta interface{}) (bool, error) { + sess, err := meta.(ClientSession).PrivateDNSClientSession() + if err != nil { + return false, err + } + + idset := strings.Split(d.Id(), "/") + + getPoolOptions := sess.NewGetPoolOptions(idset[0], idset[1]) + response, detail, err := sess.GetPool(getPoolOptions) + if err != nil { + if response != nil && detail != nil && detail.StatusCode == 404 { + return false, nil + } + return false, err + } + return true, nil +} + +func expandPDNSGlbPoolOrigins(originsList *schema.Set) (origins []dns.OriginInput) { + for _, iface := range originsList.List() { + orig := iface.(map[string]interface{}) + origin := dns.OriginInput{ + Name: core.StringPtr(orig[pdnsGlbPoolOriginsName].(string)), + Address: core.StringPtr(orig[pdnsGlbPoolOriginsAddress].(string)), + Enabled: core.BoolPtr(orig[pdnsGlbPoolOriginsEnabled].(bool)), + Description: core.StringPtr(orig[pdnsGlbPoolOriginsDescription].(string)), + } + origins = append(origins, origin) + } + return +} + +func waitForPDNSGlbPoolDelete(d *schema.ResourceData, meta interface{}) (interface{}, error) { + cisClient, err := meta.(ClientSession).PrivateDNSClientSession() + if err != nil { + return nil, err + } + idset := strings.Split(d.Id(), "/") + getPoolOptions := cisClient.NewGetPoolOptions(idset[0], idset[1]) + stateConf := &resource.StateChangeConf{ + Pending: []string{pdnsGlbPoolDeletePending}, + Target: []string{pdnsGlbPoolDeleted}, + Refresh: func() (interface{}, string, error) { + _, detail, err := cisClient.GetPool(getPoolOptions) + if err != nil { + if detail != nil && detail.StatusCode == 404 { + return detail, clusterDeleted, nil + } + return nil, "", err + } + return detail, clusterDeletePending, nil + }, + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 60 * time.Second, + MinTimeout: 10 * time.Second, + PollInterval: 60 * time.Second, + } + + return stateConf.WaitForState() +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_private_dns_permitted_network.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_private_dns_permitted_network.go new file mode 100644 index 00000000000..31cc9682710 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_private_dns_permitted_network.go @@ -0,0 +1,197 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + pdnsVpcCRN = "vpc_crn" + pdnsNetworkType = "type" + pdnsPermittedNetworkID = "permitted_network_id" + pdnsPermittedNetworkCreatedOn = "created_on" + pdnsPermittedNetworkModifiedOn = "modified_on" + pdnsPermittedNetworkState = "state" + pdnsPermittedNetwork = "permitted_network" +) + +var allowedNetworkTypes = []string{ + "vpc", +} + +func resourceIBMPrivateDNSPermittedNetwork() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMPrivateDNSPermittedNetworkCreate, + Read: resourceIBMPrivateDNSPermittedNetworkRead, + Delete: resourceIBMPrivateDNSPermittedNetworkDelete, + Exists: resourceIBMPrivateDNSPermittedNetworkExists, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + pdnsPermittedNetworkID: { + Type: schema.TypeString, + Computed: true, + Description: "Network Id", + }, + + pdnsInstanceID: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Instance Id", + }, + + pdnsZoneID: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Zone Id", + }, + + pdnsNetworkType: { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "vpc", + ValidateFunc: validateAllowedStringValue([]string{"vpc"}), + Description: "Network Type", + }, + + pdnsVpcCRN: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "VPC CRN id", + }, + + pdnsPermittedNetworkCreatedOn: { + Type: schema.TypeString, + Computed: true, + Description: "Network creation date", + }, + + pdnsPermittedNetworkModifiedOn: { + Type: schema.TypeString, + Computed: true, + Description: "Network Modification date", + }, + + pdnsPermittedNetworkState: { + Type: schema.TypeString, + Computed: true, + Description: "Network status", + }, + }, + } +} + +func resourceIBMPrivateDNSPermittedNetworkCreate(d *schema.ResourceData, meta interface{}) error { + sess, err := meta.(ClientSession).PrivateDNSClientSession() + if err != nil { + return err + } + + instanceID := d.Get(pdnsInstanceID).(string) + zoneID := d.Get(pdnsZoneID).(string) + vpcCRN := d.Get(pdnsVpcCRN).(string) + nwType := d.Get(pdnsNetworkType).(string) + mk := "private_dns_permitted_network_" + instanceID + zoneID + ibmMutexKV.Lock(mk) + defer ibmMutexKV.Unlock(mk) + + createPermittedNetworkOptions := sess.NewCreatePermittedNetworkOptions(instanceID, zoneID) + permittedNetworkCrn, err := sess.NewPermittedNetworkVpc(vpcCRN) + if err != nil { + return err + } + + createPermittedNetworkOptions.SetPermittedNetwork(permittedNetworkCrn) + createPermittedNetworkOptions.SetType(nwType) + response, detail, err := sess.CreatePermittedNetwork(createPermittedNetworkOptions) + if err != nil { + return fmt.Errorf("Error creating pdns permitted network:%s\n%s", err, detail) + } + + d.SetId(fmt.Sprintf("%s/%s/%s", instanceID, zoneID, *response.ID)) + + return resourceIBMPrivateDNSPermittedNetworkRead(d, meta) +} + +func resourceIBMPrivateDNSPermittedNetworkRead(d *schema.ResourceData, meta interface{}) error { + sess, err := meta.(ClientSession).PrivateDNSClientSession() + if err != nil { + return err + } + + idSet := strings.Split(d.Id(), "/") + getPermittedNetworkOptions := sess.NewGetPermittedNetworkOptions(idSet[0], idSet[1], idSet[2]) + response, detail, err := sess.GetPermittedNetwork(getPermittedNetworkOptions) + + if err != nil { + return fmt.Errorf("Error reading pdns permitted network:%s\n%s", err, detail) + } + + d.Set(pdnsInstanceID, idSet[0]) + d.Set(pdnsZoneID, idSet[1]) + d.Set(pdnsPermittedNetworkID, response.ID) + d.Set(pdnsPermittedNetworkCreatedOn, response.CreatedOn) + d.Set(pdnsPermittedNetworkModifiedOn, response.ModifiedOn) + d.Set(pdnsVpcCRN, response.PermittedNetwork.VpcCrn) + d.Set(pdnsNetworkType, response.Type) + d.Set(pdnsPermittedNetworkState, response.State) + + return nil +} + +func resourceIBMPrivateDNSPermittedNetworkDelete(d *schema.ResourceData, meta interface{}) error { + sess, err := meta.(ClientSession).PrivateDNSClientSession() + if err != nil { + return err + } + + idSet := strings.Split(d.Id(), "/") + mk := "private_dns_permitted_network_" + idSet[0] + idSet[1] + ibmMutexKV.Lock(mk) + defer ibmMutexKV.Unlock(mk) + deletePermittedNetworkOptions := sess.NewDeletePermittedNetworkOptions(idSet[0], idSet[1], idSet[2]) + _, response, err := sess.DeletePermittedNetwork(deletePermittedNetworkOptions) + + if err != nil { + return fmt.Errorf("Error deleting pdns permitted network:%s\n%s", err, response) + } + + d.SetId("") + return nil +} + +func resourceIBMPrivateDNSPermittedNetworkExists(d *schema.ResourceData, meta interface{}) (bool, error) { + sess, err := meta.(ClientSession).PrivateDNSClientSession() + if err != nil { + return false, err + } + + idSet := strings.Split(d.Id(), "/") + mk := "private_dns_permitted_network_" + idSet[0] + idSet[1] + ibmMutexKV.Lock(mk) + defer ibmMutexKV.Unlock(mk) + getPermittedNetworkOptions := sess.NewGetPermittedNetworkOptions(idSet[0], idSet[1], idSet[2]) + _, response, err := sess.GetPermittedNetwork(getPermittedNetworkOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return false, nil + } + return false, err + } + return true, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_private_dns_resource_record.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_private_dns_resource_record.go new file mode 100644 index 00000000000..ac57686f710 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_private_dns_resource_record.go @@ -0,0 +1,531 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +var allowedPrivateDomainRecordTypes = []string{ + "A", "AAAA", "CNAME", "MX", "PTR", "SRV", "TXT", +} + +const ( + pdnsResourceRecordID = "resource_record_id" + pdnsRecordType = "type" + pdnsRecordTTL = "ttl" + pdnsRecordName = "name" + pdnsRdata = "rdata" + pdnsMxPreference = "preference" + pdnsSrvPort = "port" + pdnsSrvPriority = "priority" + pdnsSrvWeight = "weight" + pdnsSrvProtocol = "protocol" + pdnsSrvService = "service" + pdnsRecordCreatedOn = "created_on" + pdnsRecordModifiedOn = "modified_on" +) + +func caseDiffSuppress(_, old, new string, _ *schema.ResourceData) bool { + return strings.ToUpper(old) == strings.ToUpper(new) +} + +func resourceIBMPrivateDNSResourceRecord() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMPrivateDNSResourceRecordCreate, + Read: resourceIBMPrivateDNSResourceRecordRead, + Update: resourceIBMPrivateDNSResourceRecordUpdate, + Delete: resourceIBMPrivateDNSResourceRecordDelete, + Exists: resourceIBMPrivateDNSResourceRecordExists, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + pdnsResourceRecordID: { + Type: schema.TypeString, + Computed: true, + Description: "Resource record ID", + }, + + pdnsInstanceID: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Instance ID", + }, + + pdnsZoneID: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Zone ID", + }, + + pdnsRecordName: { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: suppressPDNSRecordNameDiff, + Description: "DNS record name", + }, + + pdnsRecordType: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: func(val interface{}, field string) (warnings []string, errors []error) { + value := val.(string) + for _, rtype := range allowedPrivateDomainRecordTypes { + if value == rtype { + return + } + } + + errors = append( + errors, + fmt.Errorf("%s is not one of the valid domain record types: %s", + value, strings.Join(allowedPrivateDomainRecordTypes, ", "), + ), + ) + return + }, + Description: "DNS record Type", + }, + + pdnsRdata: { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: caseDiffSuppress, + ValidateFunc: func(val interface{}, field string) (warnings []string, errors []error) { + value := val.(string) + if ipv6Regexp.MatchString(value) && upcaseRegexp.MatchString(value) { + errors = append( + errors, + fmt.Errorf( + "IPv6 addresses in the data property cannot have upper case letters: %s", + value, + ), + ) + } + return + }, + Description: "DNS record Data", + }, + + pdnsRecordTTL: { + Type: schema.TypeInt, + Optional: true, + Default: 900, + DefaultFunc: func() (interface{}, error) { + return 900, nil + }, + Description: "DNS record TTL", + }, + + pdnsMxPreference: { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "DNS maximum preference", + }, + + pdnsSrvPort: { + Type: schema.TypeInt, + Optional: true, + Description: "DNS server Port", + }, + + pdnsSrvPriority: { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "DNS server Priority", + }, + + pdnsSrvWeight: { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "DNS server weight", + }, + + pdnsSrvService: { + Type: schema.TypeString, + Optional: true, + Description: "Service info", + }, + + pdnsSrvProtocol: { + Type: schema.TypeString, + Optional: true, + Description: "Protocol", + }, + + pdnsRecordCreatedOn: { + Type: schema.TypeString, + Computed: true, + Description: "Creation Data", + }, + + pdnsRecordModifiedOn: { + Type: schema.TypeString, + Computed: true, + Description: "Modification date", + }, + }, + } +} + +func resourceIBMPrivateDNSResourceRecordCreate(d *schema.ResourceData, meta interface{}) error { + sess, err := meta.(ClientSession).PrivateDNSClientSession() + if err != nil { + return err + } + + var ( + instanceID string + zoneID string + recordType string + name string + rdata string + service string + protocol string + ttl int + preference int + port int + priority int + weight int + ) + + instanceID = d.Get(pdnsInstanceID).(string) + zoneID = d.Get(pdnsZoneID).(string) + recordType = d.Get(pdnsRecordType).(string) + name = d.Get(pdnsRecordName).(string) + rdata = d.Get(pdnsRdata).(string) + + if v, ok := d.GetOk(pdnsRecordTTL); ok { + ttl = v.(int) + } + + createResourceRecordOptions := sess.NewCreateResourceRecordOptions(instanceID, zoneID) + createResourceRecordOptions.SetName(name) + createResourceRecordOptions.SetType(recordType) + createResourceRecordOptions.SetTTL(int64(ttl)) + + switch recordType { + case "A": + resourceRecordAData, err := sess.NewResourceRecordInputRdataRdataARecord(rdata) + if err != nil { + return fmt.Errorf("Error creating pdns resource record A data:%s", err) + } + createResourceRecordOptions.SetRdata(resourceRecordAData) + case "AAAA": + resourceRecordAaaaData, err := sess.NewResourceRecordInputRdataRdataAaaaRecord(rdata) + if err != nil { + return fmt.Errorf("Error creating pdns resource record Aaaa data:%s", err) + } + createResourceRecordOptions.SetRdata(resourceRecordAaaaData) + case "CNAME": + resourceRecordCnameData, err := sess.NewResourceRecordInputRdataRdataCnameRecord(rdata) + if err != nil { + return fmt.Errorf("Error creating pdns resource record Cname data:%s", err) + } + createResourceRecordOptions.SetRdata(resourceRecordCnameData) + case "PTR": + resourceRecordPtrData, err := sess.NewResourceRecordInputRdataRdataPtrRecord(rdata) + if err != nil { + return fmt.Errorf("Error creating pdns resource record Ptr data:%s", err) + } + createResourceRecordOptions.SetRdata(resourceRecordPtrData) + case "TXT": + resourceRecordTxtData, err := sess.NewResourceRecordInputRdataRdataTxtRecord(rdata) + if err != nil { + return fmt.Errorf("Error creating pdns resource record Txt data:%s", err) + } + createResourceRecordOptions.SetRdata(resourceRecordTxtData) + case "MX": + if v, ok := d.GetOk(pdnsMxPreference); ok { + preference = v.(int) + } + resourceRecordMxData, err := sess.NewResourceRecordInputRdataRdataMxRecord(rdata, int64(preference)) + if err != nil { + return fmt.Errorf("Error creating pdns resource record Mx data:%s", err) + } + createResourceRecordOptions.SetRdata(resourceRecordMxData) + case "SRV": + if v, ok := d.GetOk(pdnsSrvPort); ok { + port = v.(int) + } + if v, ok := d.GetOk(pdnsSrvPriority); ok { + priority = v.(int) + } + if v, ok := d.GetOk(pdnsSrvWeight); ok { + weight = v.(int) + } + resourceRecordSrvData, err := sess.NewResourceRecordInputRdataRdataSrvRecord(int64(port), int64(priority), rdata, int64(weight)) + if err != nil { + return fmt.Errorf("Error creating pdns resource record Srv data:%s", err) + } + if v, ok := d.GetOk(pdnsSrvService); ok { + service = v.(string) + } + if v, ok := d.GetOk(pdnsSrvProtocol); ok { + protocol = v.(string) + } + createResourceRecordOptions.SetRdata(resourceRecordSrvData) + createResourceRecordOptions.SetService(service) + createResourceRecordOptions.SetProtocol(protocol) + } + mk := "private_dns_resource_record_" + instanceID + zoneID + ibmMutexKV.Lock(mk) + defer ibmMutexKV.Unlock(mk) + response, detail, err := sess.CreateResourceRecord(createResourceRecordOptions) + if err != nil { + return fmt.Errorf("Error creating pdns resource record:%s\n%s", err, detail) + } + + d.SetId(fmt.Sprintf("%s/%s/%s", instanceID, zoneID, *response.ID)) + + return resourceIBMPrivateDNSResourceRecordRead(d, meta) +} + +func resourceIBMPrivateDNSResourceRecordRead(d *schema.ResourceData, meta interface{}) error { + idSet := strings.Split(d.Id(), "/") + sess, err := meta.(ClientSession).PrivateDNSClientSession() + if err != nil { + return err + } + getResourceRecordOptions := sess.NewGetResourceRecordOptions(idSet[0], idSet[1], idSet[2]) + response, detail, err := sess.GetResourceRecord(getResourceRecordOptions) + if err != nil { + return fmt.Errorf("Error reading pdns resource record:%s\n%s", err, detail) + } + + // extract the record name by removing zone details + var recordName string + zone := strings.Split(idSet[1], ":") + name := strings.Split(*response.Name, zone[0]) + name[0] = strings.Trim(name[0], ".") + recordName = name[0] + + if *response.Type == "SRV" { + // "_sip._udp.testsrv" + temp := strings.Split(name[0], ".") + recordName = temp[2] + } + + d.Set(pdnsResourceRecordID, response.ID) + d.Set(pdnsInstanceID, idSet[0]) + d.Set(pdnsZoneID, idSet[1]) + d.Set(pdnsRecordName, recordName) + d.Set(pdnsRecordType, response.Type) + d.Set(pdnsRecordTTL, response.TTL) + d.Set(pdnsRecordCreatedOn, response.CreatedOn) + d.Set(pdnsRecordModifiedOn, response.ModifiedOn) + + if *response.Type == "SRV" { + data := response.Rdata.(map[string]interface{}) + d.Set(pdnsSrvPort, data["port"]) + d.Set(pdnsSrvPriority, data["priority"]) + d.Set(pdnsSrvWeight, data["weight"]) + d.Set(pdnsRdata, data["target"].(string)) + d.Set(pdnsSrvService, response.Service) + d.Set(pdnsSrvProtocol, response.Protocol) + } + + if *response.Type == "MX" { + data := response.Rdata.(map[string]interface{}) + d.Set(pdnsMxPreference, data["preference"]) + d.Set(pdnsRdata, data["exchange"].(string)) + } + if *response.Type == "A" || *response.Type == "AAAA" { + data := response.Rdata.(map[string]interface{}) + d.Set(pdnsRdata, data["ip"].(string)) + } + if *response.Type == "CNAME" { + data := response.Rdata.(map[string]interface{}) + d.Set(pdnsRdata, data["cname"].(string)) + } + if *response.Type == "PTR" { + data := response.Rdata.(map[string]interface{}) + d.Set(pdnsRdata, data["ptrdname"].(string)) + } + if *response.Type == "TXT" { + data := response.Rdata.(map[string]interface{}) + d.Set(pdnsRdata, data["text"].(string)) + } + + return nil +} + +func resourceIBMPrivateDNSResourceRecordUpdate(d *schema.ResourceData, meta interface{}) error { + idSet := strings.Split(d.Id(), "/") + + sess, err := meta.(ClientSession).PrivateDNSClientSession() + if err != nil { + return err + } + + mk := "private_dns_resource_record_" + idSet[0] + idSet[1] + ibmMutexKV.Lock(mk) + defer ibmMutexKV.Unlock(mk) + + updateResourceRecordOptions := sess.NewUpdateResourceRecordOptions(idSet[0], idSet[1], idSet[2]) + + var rdata string + + if d.HasChange(pdnsRecordName) || d.HasChange(pdnsRecordTTL) || d.HasChange(pdnsRdata) || + d.HasChange(pdnsSrvPort) || d.HasChange(pdnsSrvPriority) || + d.HasChange(pdnsSrvWeight) || d.HasChange(pdnsSrvService) || + d.HasChange(pdnsSrvProtocol) || d.HasChange(pdnsMxPreference) { + + recordName := d.Get(pdnsRecordName).(string) + recordType := d.Get(pdnsRecordType).(string) + ttl := int64(d.Get(pdnsRecordTTL).(int)) + + if recordType != "PTR" { + updateResourceRecordOptions.SetName(recordName) + } + switch recordType { + case "A": + updateResourceRecordOptions.SetTTL(ttl) + rdata = d.Get(pdnsRdata).(string) + resourceRecordAData, err := sess.NewResourceRecordUpdateInputRdataRdataARecord(rdata) + if err != nil { + return fmt.Errorf("Error creating pdns resource record A data:%s", err) + } + updateResourceRecordOptions.SetRdata(resourceRecordAData) + + case "AAAA": + updateResourceRecordOptions.SetTTL(ttl) + rdata = d.Get(pdnsRdata).(string) + resourceRecordAaaaData, err := sess.NewResourceRecordUpdateInputRdataRdataAaaaRecord(rdata) + if err != nil { + return fmt.Errorf("Error creating pdns resource record Aaaa data:%s", err) + } + updateResourceRecordOptions.SetRdata(resourceRecordAaaaData) + + case "CNAME": + updateResourceRecordOptions.SetTTL(ttl) + rdata = d.Get(pdnsRdata).(string) + resourceRecordCnameData, err := sess.NewResourceRecordUpdateInputRdataRdataCnameRecord(rdata) + if err != nil { + return fmt.Errorf("Error creating pdns resource record Cname data:%s", err) + } + updateResourceRecordOptions.SetRdata(resourceRecordCnameData) + + case "PTR": + updateResourceRecordOptions.SetTTL(ttl) + + case "TXT": + updateResourceRecordOptions.SetTTL(ttl) + rdata = d.Get(pdnsRdata).(string) + resourceRecordTxtData, err := sess.NewResourceRecordUpdateInputRdataRdataTxtRecord(rdata) + if err != nil { + return fmt.Errorf("Error creating pdns resource record Txt data:%s", err) + } + updateResourceRecordOptions.SetRdata(resourceRecordTxtData) + + case "MX": + updateResourceRecordOptions.SetTTL(ttl) + rdata = d.Get(pdnsRdata).(string) + preference := d.Get(pdnsMxPreference).(int) + + resourceRecordMxData, err := sess.NewResourceRecordUpdateInputRdataRdataMxRecord(rdata, int64(preference)) + if err != nil { + return fmt.Errorf("Error creating pdns resource record Mx data:%s", err) + } + updateResourceRecordOptions.SetRdata(resourceRecordMxData) + + case "SRV": + updateResourceRecordOptions.SetTTL(ttl) + rdata = d.Get(pdnsRdata).(string) + port := d.Get(pdnsSrvPort).(int) + priority := d.Get(pdnsSrvPriority).(int) + weight := d.Get(pdnsSrvWeight).(int) + + resourceRecordSrvData, err := sess.NewResourceRecordUpdateInputRdataRdataSrvRecord(int64(port), int64(priority), rdata, int64(weight)) + if err != nil { + return fmt.Errorf("Error creating pdns resource record Srv data:%s", err) + } + updateResourceRecordOptions.SetRdata(resourceRecordSrvData) + + service := d.Get(pdnsSrvService).(string) + protocol := d.Get(pdnsSrvProtocol).(string) + updateResourceRecordOptions.SetService(service) + updateResourceRecordOptions.SetProtocol(protocol) + } + + _, detail, err := sess.UpdateResourceRecord(updateResourceRecordOptions) + if err != nil { + return fmt.Errorf("Error updating pdns resource record:%s\n%s", err, detail) + } + } + + return resourceIBMPrivateDNSResourceRecordRead(d, meta) +} + +func resourceIBMPrivateDNSResourceRecordDelete(d *schema.ResourceData, meta interface{}) error { + idSet := strings.Split(d.Id(), "/") + + sess, err := meta.(ClientSession).PrivateDNSClientSession() + if err != nil { + return err + } + + deleteResourceRecordOptions := sess.NewDeleteResourceRecordOptions(idSet[0], idSet[1], idSet[2]) + mk := "private_dns_resource_record_" + idSet[0] + idSet[1] + ibmMutexKV.Lock(mk) + defer ibmMutexKV.Unlock(mk) + response, err := sess.DeleteResourceRecord(deleteResourceRecordOptions) + if err != nil { + return fmt.Errorf("Error deleting pdns resource record:%s\n%s", err, response) + } + + d.SetId("") + return nil +} + +func resourceIBMPrivateDNSResourceRecordExists(d *schema.ResourceData, meta interface{}) (bool, error) { + sess, err := meta.(ClientSession).PrivateDNSClientSession() + if err != nil { + return false, err + } + + idSet := strings.Split(d.Id(), "/") + getResourceRecordOptions := sess.NewGetResourceRecordOptions(idSet[0], idSet[1], idSet[2]) + mk := "private_dns_resource_record_" + idSet[0] + idSet[1] + ibmMutexKV.Lock(mk) + defer ibmMutexKV.Unlock(mk) + _, response, err := sess.GetResourceRecord(getResourceRecordOptions) + + if err != nil { + if response != nil && response.StatusCode == 404 { + return false, nil + } + return false, err + } + return true, nil +} + +func suppressPDNSRecordNameDiff(k, old, new string, d *schema.ResourceData) bool { + // PDNS concantenates name with domain. So just check name is the same + if strings.ToUpper(strings.SplitN(old, ".", 2)[0]) == strings.ToUpper(strings.SplitN(new, ".", 2)[0]) { + return true + } + // If name is @, its replaced by the domain name. So ignore check. + if new == "@" { + return true + } + + return false +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_private_dns_zones.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_private_dns_zones.go new file mode 100644 index 00000000000..87813a8ff5f --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_private_dns_zones.go @@ -0,0 +1,226 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + pdnsInstanceID = "instance_id" + pdnsZoneName = "name" + pdnsZoneDescription = "description" + pdnsZoneLabel = "label" + pdnsZoneCreatedOn = "created_on" + pdnsZoneModifiedOn = "modified_on" + pdnsZoneState = "state" + pdnsZoneID = "zone_id" +) + +func resourceIBMPrivateDNSZone() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMPrivateDNSZoneCreate, + Read: resourceIBMPrivateDNSZoneRead, + Update: resourceIBMPrivateDNSZoneUpdate, + Delete: resourceIBMPrivateDNSZoneDelete, + Exists: resourceIBMPrivateDNSZoneExists, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + pdnsInstanceID: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Instance ID", + }, + + pdnsZoneID: { + Type: schema.TypeString, + Computed: true, + Description: "Zone ID", + }, + + pdnsZoneName: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Zone name", + }, + + pdnsZoneDescription: { + Type: schema.TypeString, + Required: false, + Optional: true, + Description: "Zone description", + }, + + pdnsZoneState: { + Type: schema.TypeString, + Computed: true, + Description: "Zone state", + }, + + pdnsZoneLabel: { + Type: schema.TypeString, + Required: false, + Optional: true, + Description: "Label", + }, + + pdnsZoneCreatedOn: { + Type: schema.TypeString, + Computed: true, + Description: "Creation date", + }, + + pdnsZoneModifiedOn: { + Type: schema.TypeString, + Computed: true, + Description: "Modification date", + }, + }, + } +} + +func resourceIBMPrivateDNSZoneCreate(d *schema.ResourceData, meta interface{}) error { + sess, err := meta.(ClientSession).PrivateDNSClientSession() + if err != nil { + return err + } + + var ( + instanceID string + zoneName string + zoneDescription string + zoneLabel string + ) + + instanceID = d.Get(pdnsInstanceID).(string) + zoneName = d.Get(pdnsZoneName).(string) + if v, ok := d.GetOk(pdnsZoneDescription); ok { + zoneDescription = v.(string) + } + if v, ok := d.GetOk(pdnsZoneLabel); ok { + zoneLabel = v.(string) + } + createZoneOptions := sess.NewCreateDnszoneOptions(instanceID) + createZoneOptions.SetName(zoneName) + createZoneOptions.SetDescription(zoneDescription) + createZoneOptions.SetLabel(zoneLabel) + response, detail, err := sess.CreateDnszone(createZoneOptions) + if err != nil { + return fmt.Errorf("Error creating pdns zone:%s\n%s", err, detail) + } + + d.SetId(fmt.Sprintf("%s/%s", *response.InstanceID, *response.ID)) + d.Set(pdnsZoneID, *response.ID) + + return resourceIBMPrivateDNSZoneRead(d, meta) +} + +func resourceIBMPrivateDNSZoneRead(d *schema.ResourceData, meta interface{}) error { + sess, err := meta.(ClientSession).PrivateDNSClientSession() + if err != nil { + return err + } + + idSet := strings.Split(d.Id(), "/") + getZoneOptions := sess.NewGetDnszoneOptions(idSet[0], idSet[1]) + response, detail, err := sess.GetDnszone(getZoneOptions) + if err != nil { + return fmt.Errorf("Error fetching pdns zone:%s\n%s", err, detail) + } + + d.Set(pdnsZoneID, response.ID) + d.Set(pdnsInstanceID, response.InstanceID) + d.Set(pdnsZoneName, response.Name) + d.Set(pdnsZoneDescription, response.Description) + d.Set(pdnsZoneLabel, response.Label) + d.Set(pdnsZoneCreatedOn, response.CreatedOn) + d.Set(pdnsZoneModifiedOn, response.ModifiedOn) + d.Set(pdnsZoneState, response.State) + + return nil +} + +func resourceIBMPrivateDNSZoneUpdate(d *schema.ResourceData, meta interface{}) error { + sess, err := meta.(ClientSession).PrivateDNSClientSession() + if err != nil { + return err + } + + idSet := strings.Split(d.Id(), "/") + + // Check DNS zone is present? + getZoneOptions := sess.NewGetDnszoneOptions(idSet[0], idSet[1]) + _, response, err := sess.GetDnszone(getZoneOptions) + if err != nil { + return fmt.Errorf("Error fetching pdns zone:%s\n%s", err, response) + } + + // Update DNS zone if attributes has any change + + if d.HasChange(pdnsZoneLabel) || d.HasChange(pdnsZoneDescription) { + updateZoneOptions := sess.NewUpdateDnszoneOptions(idSet[0], idSet[1]) + description := d.Get(pdnsZoneDescription).(string) + label := d.Get(pdnsZoneLabel).(string) + updateZoneOptions.SetDescription(description) + updateZoneOptions.SetLabel(label) + + _, detail, err := sess.UpdateDnszone(updateZoneOptions) + + if err != nil { + return fmt.Errorf("Error updating pdns zone:%s\n%s", err, detail) + } + } + + return resourceIBMPrivateDNSZoneRead(d, meta) +} + +func resourceIBMPrivateDNSZoneDelete(d *schema.ResourceData, meta interface{}) error { + sess, err := meta.(ClientSession).PrivateDNSClientSession() + if err != nil { + return err + } + + idSet := strings.Split(d.Id(), "/") + + deleteZoneOptions := sess.NewDeleteDnszoneOptions(idSet[0], idSet[1]) + response, err := sess.DeleteDnszone(deleteZoneOptions) + if err != nil { + return fmt.Errorf("Error deleting pdns zone:%s\n%s", err, response) + } + + d.SetId("") + return nil +} + +func resourceIBMPrivateDNSZoneExists(d *schema.ResourceData, meta interface{}) (bool, error) { + + sess, err := meta.(ClientSession).PrivateDNSClientSession() + if err != nil { + return false, err + } + + idSet := strings.Split(d.Id(), "/") + getZoneOptions := sess.NewGetDnszoneOptions(idSet[0], idSet[1]) + _, response, err := sess.GetDnszone(getZoneOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return false, nil + } + return false, err + } + return true, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_push_notification_chrome.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_push_notification_chrome.go new file mode 100644 index 00000000000..7f72e060156 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_push_notification_chrome.go @@ -0,0 +1,124 @@ +package ibm + +import ( + "fmt" + + "github.com/IBM/push-notifications-go-sdk/pushservicev1" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func resourceIBMPNApplicationChrome() *schema.Resource { + return &schema.Resource{ + Read: resourceApplicationChromeRead, + Create: resourceApplicationChromeCreate, + Update: resourceApplicationChromeUpdate, + Delete: resourceApplicationChromeDelete, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "guid": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Unique guid of the push notification instance.", + }, + "server_key": { + Type: schema.TypeString, + Required: true, + Description: "A server key that gives the push service an authorized access to Google services that is used for Chrome Web Push.", + }, + "web_site_url": { + Type: schema.TypeString, + Required: true, + Description: "The URL of the WebSite / WebApp that should be permitted to subscribe to WebPush.", + }, + }, + } +} + +func resourceApplicationChromeCreate(d *schema.ResourceData, meta interface{}) error { + pnClient, err := meta.(ClientSession).PushServiceV1() + if err != nil { + return err + } + + serverKey := d.Get("server_key").(string) + websiteURL := d.Get("web_site_url").(string) + guid := d.Get("guid").(string) + + _, response, err := pnClient.SaveChromeWebConf(&pushservicev1.SaveChromeWebConfOptions{ + ApplicationID: &guid, + ApiKey: &serverKey, + WebSiteURL: &websiteURL, + }) + + if err != nil { + d.SetId("") + return fmt.Errorf("Error configuring chrome web platform: %s with responce code %d", err, response.StatusCode) + } + d.SetId(guid) + + return resourceApplicationChromeRead(d, meta) +} + +func resourceApplicationChromeUpdate(d *schema.ResourceData, meta interface{}) error { + + if d.HasChanges("server_key", "web_site_url") { + return resourceApplicationChromeCreate(d, meta) + } + return nil +} + +func resourceApplicationChromeRead(d *schema.ResourceData, meta interface{}) error { + pnClient, err := meta.(ClientSession).PushServiceV1() + if err != nil { + return err + } + + guid := d.Id() + + chromeWebConf, response, err := pnClient.GetChromeWebConf(&pushservicev1.GetChromeWebConfOptions{ + ApplicationID: &guid, + }) + + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error fetching chrome web platform configuration: %s with responce code %d", err, response.StatusCode) + } + + d.SetId(guid) + + if response.StatusCode == 200 { + d.Set("server_key", *chromeWebConf.ApiKey) + d.Set("web_site_url", *chromeWebConf.WebSiteURL) + } + return nil +} + +func resourceApplicationChromeDelete(d *schema.ResourceData, meta interface{}) error { + pnClient, err := meta.(ClientSession).PushServiceV1() + if err != nil { + return err + } + guid := d.Get("guid").(string) + + response, err := pnClient.DeleteChromeWebConf(&pushservicev1.DeleteChromeWebConfOptions{ + ApplicationID: &guid, + }) + + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error deleting chrome web platform configuration: %s with responce code %d", err, response.StatusCode) + } + + d.SetId("") + + return nil + +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_resource_group.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_resource_group.go new file mode 100644 index 00000000000..44705f15102 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_resource_group.go @@ -0,0 +1,251 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + + rg "github.com/IBM/platform-services-go-sdk/resourcemanagerv2" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func resourceIBMResourceGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMResourceGroupCreate, + Read: resourceIBMResourceGroupRead, + Update: resourceIBMResourceGroupUpdate, + Delete: resourceIBMResourceGroupDelete, + Exists: resourceIBMResourceGroupExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: "The name of the resource group", + }, + "default": { + Description: "Specifies whether its default resource group or not", + Type: schema.TypeBool, + Computed: true, + }, + + "state": { + Type: schema.TypeString, + Description: "State of the resource group", + Computed: true, + }, + + "tags": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "crn": { + Type: schema.TypeString, + Description: "The full CRN associated with the resource group", + Computed: true, + }, + "created_at": { + Type: schema.TypeString, + Description: "The date when the resource group was initially created.", + Computed: true, + }, + "updated_at": { + Type: schema.TypeString, + Description: "The date when the resource group was last updated.", + Computed: true, + }, + "teams_url": { + Type: schema.TypeString, + Description: "The URL to access the team details that associated with the resource group.", + Computed: true, + }, + "payment_methods_url": { + Type: schema.TypeString, + Description: "The URL to access the payment methods details that associated with the resource group.", + Computed: true, + }, + "quota_url": { + Type: schema.TypeString, + Description: "The URL to access the quota details that associated with the resource group.", + Computed: true, + }, + "quota_id": { + Type: schema.TypeString, + Description: "An alpha-numeric value identifying the quota ID associated with the resource group.", + Computed: true, + }, + "resource_linkages": { + Type: schema.TypeSet, + Description: "An array of the resources that linked to the resource group", + Elem: &schema.Schema{Type: schema.TypeString}, + Computed: true, + }, + }, + } +} + +func resourceIBMResourceGroupCreate(d *schema.ResourceData, meta interface{}) error { + rMgtClient, err := meta.(ClientSession).ResourceManagerV2API() + if err != nil { + return err + } + name := d.Get("name").(string) + + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + accountID := userDetails.userAccount + + resourceGroupCreate := rg.CreateResourceGroupOptions{ + Name: &name, + AccountID: &accountID, + } + + resourceGroup, resp, err := rMgtClient.CreateResourceGroup(&resourceGroupCreate) + if err != nil { + return fmt.Errorf("Error creating resource group: %s with responce code %s", err, resp) + } + + d.SetId(*resourceGroup.ID) + + return resourceIBMResourceGroupRead(d, meta) +} + +func resourceIBMResourceGroupRead(d *schema.ResourceData, meta interface{}) error { + rMgtClient, err := meta.(ClientSession).ResourceManagerV2API() + if err != nil { + return err + } + resourceGroupID := d.Id() + resourceGroupGet := rg.GetResourceGroupOptions{ + ID: &resourceGroupID, + } + + resourceGroup, resp, err := rMgtClient.GetResourceGroup(&resourceGroupGet) + if err != nil { + if resp != nil && resp.StatusCode == 404 { + log.Printf("[WARN] Resource Group is not found") + d.SetId("") + return nil + } + return fmt.Errorf("Error retrieving resource group: %swith responce code %s", err, resp) + } + + d.Set("name", *resourceGroup.Name) + if resourceGroup.State != nil { + d.Set("state", *resourceGroup.State) + } + if resourceGroup.Default != nil { + d.Set("default", *resourceGroup.Default) + } + if resourceGroup.CRN != nil { + d.Set("crn", *resourceGroup.CRN) + } + if resourceGroup.CreatedAt != nil { + createdAt := *resourceGroup.CreatedAt + d.Set("created_at", createdAt.String()) + } + if resourceGroup.UpdatedAt != nil { + UpdatedAt := *resourceGroup.UpdatedAt + d.Set("updated_at", UpdatedAt.String()) + } + if resourceGroup.TeamsURL != nil { + d.Set("teams_url", *resourceGroup.TeamsURL) + } + if resourceGroup.PaymentMethodsURL != nil { + d.Set("payment_methods_url", *resourceGroup.PaymentMethodsURL) + } + if resourceGroup.QuotaURL != nil { + d.Set("quota_url", *resourceGroup.QuotaURL) + } + if resourceGroup.QuotaID != nil { + d.Set("quota_id", *resourceGroup.QuotaID) + } + if resourceGroup.ResourceLinkages != nil { + rl := make([]string, 0) + for _, r := range resourceGroup.ResourceLinkages { + rl = append(rl, r.(string)) + } + d.Set("resource_linkages", rl) + } + return nil +} + +func resourceIBMResourceGroupUpdate(d *schema.ResourceData, meta interface{}) error { + rMgtClient, err := meta.(ClientSession).ResourceManagerV2API() + if err != nil { + return err + } + + resourceGroupID := d.Id() + resourceGroupUpdate := rg.UpdateResourceGroupOptions{ + ID: &resourceGroupID, + } + hasChange := false + if d.HasChange("name") { + name := d.Get("name").(string) + resourceGroupUpdate.Name = &name + hasChange = true + } + + if hasChange { + _, resp, err := rMgtClient.UpdateResourceGroup(&resourceGroupUpdate) + if err != nil { + return fmt.Errorf("Error updating resource group: %s with responce code %s", err, resp) + } + + } + return resourceIBMResourceGroupRead(d, meta) +} + +func resourceIBMResourceGroupDelete(d *schema.ResourceData, meta interface{}) error { + rMgtClient, err := meta.(ClientSession).ResourceManagerV2API() + if err != nil { + return err + } + + resourceGroupID := d.Id() + resourceGroupDelete := rg.DeleteResourceGroupOptions{ + ID: &resourceGroupID, + } + + resp, err := rMgtClient.DeleteResourceGroup(&resourceGroupDelete) + if err != nil { + if resp != nil && resp.StatusCode == 404 { + log.Printf("[WARN] Resource Group is not found") + return nil + } + return fmt.Errorf("Error Deleting resource group: %s with responce code %s", err, resp) + } + + d.SetId("") + + return nil +} + +func resourceIBMResourceGroupExists(d *schema.ResourceData, meta interface{}) (bool, error) { + rMgtClient, err := meta.(ClientSession).ResourceManagerV2API() + if err != nil { + return false, err + } + resourceGroupID := d.Id() + resourceGroupGet := rg.GetResourceGroupOptions{ + ID: &resourceGroupID, + } + + resourceGroup, resp, err := rMgtClient.GetResourceGroup(&resourceGroupGet) + if err != nil { + if resp != nil && resp.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error communicating with the API: %s with responce code %s", err, resp) + } + + return *resourceGroup.ID == resourceGroupID, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_resource_instance.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_resource_instance.go new file mode 100644 index 00000000000..0cf1d5f64c8 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_resource_instance.go @@ -0,0 +1,894 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "os" + "strconv" + "strings" + "time" + + rc "github.com/IBM/platform-services-go-sdk/resourcecontrollerv2" + "github.com/hashicorp/terraform-plugin-sdk/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/IBM-Cloud/bluemix-go/models" +) + +const ( + rsInstanceSuccessStatus = "active" + rsInstanceProgressStatus = "in progress" + rsInstanceProvisioningStatus = "provisioning" + rsInstanceInactiveStatus = "inactive" + rsInstanceFailStatus = "failed" + rsInstanceRemovedStatus = "removed" + rsInstanceReclamation = "pending_reclamation" +) + +func resourceIBMResourceInstance() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMResourceInstanceCreate, + Read: resourceIBMResourceInstanceRead, + Update: resourceIBMResourceInstanceUpdate, + Delete: resourceIBMResourceInstanceDelete, + Exists: resourceIBMResourceInstanceExists, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + CustomizeDiff: customdiff.Sequence( + func(diff *schema.ResourceDiff, v interface{}) error { + return resourceTagsCustomizeDiff(diff) + }, + ), + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: "A name for the resource instance", + }, + + "service": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The name of the service offering like cloud-object-storage, kms etc", + }, + + "plan": { + Type: schema.TypeString, + Required: true, + Description: "The plan type of the service", + }, + + "location": { + Description: "The location where the instance available", + Required: true, + ForceNew: true, + Type: schema.TypeString, + }, + + "resource_group_id": { + Description: "The resource group id", + Optional: true, + ForceNew: true, + Type: schema.TypeString, + Computed: true, + }, + + "parameters": { + Type: schema.TypeMap, + Optional: true, + Description: "Arbitrary parameters to pass. Must be a JSON object", + }, + + "tags": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString, ValidateFunc: InvokeValidator("ibm_resource_instance", "tag")}, + Set: resourceIBMVPCHash, + }, + + "status": { + Type: schema.TypeString, + Computed: true, + Description: "Status of resource instance", + }, + + "crn": { + Type: schema.TypeString, + Computed: true, + Description: "CRN of resource instance", + }, + + "guid": { + Type: schema.TypeString, + Computed: true, + Description: "Guid of resource instance", + }, + + "service_endpoints": { + Description: "Types of the service endpoints. Possible values are 'public', 'private', 'public-and-private'.", + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validateAllowedStringValue([]string{"public", "private", "public-and-private"}), + }, + + "dashboard_url": { + Description: "Dashboard URL to access resource.", + Type: schema.TypeString, + Computed: true, + }, + + "plan_history": { + Description: "The plan history of the instance.", + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "resource_plan_id": { + Type: schema.TypeString, + Computed: true, + }, + "start_date": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + + "account_id": { + Description: "An alpha-numeric value identifying the account ID.", + Type: schema.TypeString, + Computed: true, + }, + + "resource_group_crn": { + Description: "The long ID (full CRN) of the resource group", + Type: schema.TypeString, + Computed: true, + }, + + "resource_id": { + Description: "The unique ID of the offering", + Type: schema.TypeString, + Computed: true, + }, + + "resource_plan_id": { + Description: "The unique ID of the plan associated with the offering", + Type: schema.TypeString, + Computed: true, + }, + + "target_crn": { + Description: "The full deployment CRN as defined in the global catalog", + Type: schema.TypeString, + Computed: true, + }, + + "state": { + Description: "The current state of the instance.", + Type: schema.TypeString, + Computed: true, + }, + + "type": { + Description: "The type of the instance, e.g. service_instance.", + Type: schema.TypeString, + Computed: true, + }, + + "sub_type": { + Description: "The sub-type of instance, e.g. cfaas .", + Type: schema.TypeString, + Computed: true, + }, + + "allow_cleanup": { + Description: "A boolean that dictates if the resource instance should be deleted (cleaned up) during the processing of a region instance delete call.", + Type: schema.TypeBool, + Computed: true, + }, + + "locked": { + Description: "A boolean that dictates if the resource instance should be deleted (cleaned up) during the processing of a region instance delete call.", + Type: schema.TypeBool, + Computed: true, + }, + + "last_operation": { + Type: schema.TypeMap, + Computed: true, + Description: "The status of the last operation requested on the instance", + }, + + "resource_aliases_url": { + Description: "The relative path to the resource aliases for the instance.", + Type: schema.TypeString, + Computed: true, + }, + + "resource_bindings_url": { + Description: "The relative path to the resource bindings for the instance.", + Type: schema.TypeString, + Computed: true, + }, + + "resource_keys_url": { + Description: "The relative path to the resource keys for the instance.", + Type: schema.TypeString, + Computed: true, + }, + + "created_at": { + Type: schema.TypeString, + Description: "The date when the instance was created.", + Computed: true, + }, + + "created_by": { + Type: schema.TypeString, + Description: "The subject who created the instance.", + Computed: true, + }, + + "update_at": { + Type: schema.TypeString, + Description: "The date when the instance was last updated.", + Computed: true, + }, + + "update_by": { + Type: schema.TypeString, + Description: "The subject who updated the instance.", + Computed: true, + }, + + "deleted_at": { + Type: schema.TypeString, + Description: "The date when the instance was deleted.", + Computed: true, + }, + + "deleted_by": { + Type: schema.TypeString, + Description: "The subject who deleted the instance.", + Computed: true, + }, + + "scheduled_reclaim_at": { + Type: schema.TypeString, + Description: "The date when the instance was scheduled for reclamation.", + Computed: true, + }, + + "scheduled_reclaim_by": { + Type: schema.TypeString, + Description: "The subject who initiated the instance reclamation.", + Computed: true, + }, + + "restored_at": { + Type: schema.TypeString, + Description: "The date when the instance under reclamation was restored.", + Computed: true, + }, + + "restored_by": { + Type: schema.TypeString, + Description: "The subject who restored the instance back from reclamation.", + Computed: true, + }, + + ResourceName: { + Type: schema.TypeString, + Computed: true, + Description: "The name of the resource", + }, + + ResourceCRN: { + Type: schema.TypeString, + Computed: true, + Description: "The crn of the resource", + }, + + ResourceStatus: { + Type: schema.TypeString, + Computed: true, + Description: "The status of the resource", + }, + + ResourceGroupName: { + Type: schema.TypeString, + Computed: true, + Description: "The resource group name in which resource is provisioned", + }, + ResourceControllerURL: { + Type: schema.TypeString, + Computed: true, + Description: "The URL of the IBM Cloud dashboard that can be used to explore and view details about the resource", + }, + + "extensions": { + Type: schema.TypeMap, + Computed: true, + Description: "The extended metadata as a map associated with the resource instance.", + }, + }, + } +} + +func resourceIBMResourceInstanceValidator() *ResourceValidator { + validateSchema := make([]ValidateSchema, 1) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: "tag", + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Optional: true, + Regexp: `^[A-Za-z0-9:_ .-]+$`, + MinValueLength: 1, + MaxValueLength: 128}) + + ibmResourceInstanceResourceValidator := ResourceValidator{ResourceName: "ibm_resource_instance", Schema: validateSchema} + return &ibmResourceInstanceResourceValidator +} + +func resourceIBMResourceInstanceCreate(d *schema.ResourceData, meta interface{}) error { + rsConClient, err := meta.(ClientSession).ResourceControllerV2API() + if err != nil { + return err + } + + serviceName := d.Get("service").(string) + plan := d.Get("plan").(string) + name := d.Get("name").(string) + location := d.Get("location").(string) + + rsInst := rc.CreateResourceInstanceOptions{ + Name: &name, + } + + rsCatClient, err := meta.(ClientSession).ResourceCatalogAPI() + if err != nil { + return err + } + rsCatRepo := rsCatClient.ResourceCatalog() + + serviceOff, err := rsCatRepo.FindByName(serviceName, true) + if err != nil { + return fmt.Errorf("Error retrieving service offering: %s", err) + } + + if metadata, ok := serviceOff[0].Metadata.(*models.ServiceResourceMetadata); ok { + if !metadata.Service.RCProvisionable { + return fmt.Errorf("%s cannot be provisioned by resource controller", serviceName) + } + } else { + return fmt.Errorf("Cannot create instance of resource %s\nUse 'ibm_service_instance' if the resource is a Cloud Foundry service", serviceName) + } + + servicePlan, err := rsCatRepo.GetServicePlanID(serviceOff[0], plan) + if err != nil { + return fmt.Errorf("Error retrieving plan: %s", err) + } + rsInst.ResourcePlanID = &servicePlan + + deployments, err := rsCatRepo.ListDeployments(servicePlan) + if err != nil { + return fmt.Errorf("Error retrieving deployment for plan %s : %s", plan, err) + } + if len(deployments) == 0 { + return fmt.Errorf("No deployment found for service plan : %s", plan) + } + deployments, supportedLocations := filterDeployments(deployments, location) + + if len(deployments) == 0 { + locationList := make([]string, 0, len(supportedLocations)) + for l := range supportedLocations { + locationList = append(locationList, l) + } + return fmt.Errorf("No deployment found for service plan %s at location %s.\nValid location(s) are: %q.\nUse 'ibm_service_instance' if the service is a Cloud Foundry service.", plan, location, locationList) + } + + rsInst.Target = &deployments[0].CatalogCRN + + if rsGrpID, ok := d.GetOk("resource_group_id"); ok { + rg := rsGrpID.(string) + rsInst.ResourceGroup = &rg + } else { + defaultRg, err := defaultResourceGroup(meta) + if err != nil { + return err + } + rsInst.ResourceGroup = &defaultRg + } + + params := map[string]interface{}{} + + if serviceEndpoints, ok := d.GetOk("service_endpoints"); ok { + params["service-endpoints"] = serviceEndpoints.(string) + } + + if parameters, ok := d.GetOk("parameters"); ok { + temp := parameters.(map[string]interface{}) + for k, v := range temp { + if v == "true" || v == "false" { + b, _ := strconv.ParseBool(v.(string)) + params[k] = b + } else if strings.HasPrefix(v.(string), "[") && strings.HasSuffix(v.(string), "]") { + //transform v.(string) to be []string + arrayString := v.(string) + trimLeft := strings.TrimLeft(arrayString, "[") + trimRight := strings.TrimRight(trimLeft, "]") + array := strings.Split(trimRight, ",") + result := []string{} + for _, a := range array { + result = append(result, strings.Trim(a, "\"")) + } + params[k] = result + } else { + params[k] = v + } + } + + } + + rsInst.Parameters = params + + //Start to create resource instance + instance, resp, err := rsConClient.CreateResourceInstance(&rsInst) + if err != nil { + log.Printf( + "Error when creating resource instance: %s, Instance info NAME->%s, LOCATION->%s, GROUP_ID->%s, PLAN_ID->%s", + err, *rsInst.Name, *rsInst.Target, *rsInst.ResourceGroup, *rsInst.ResourcePlanID) + return fmt.Errorf("Error when creating resource instance: %s with resp code: %s", err, resp) + } + + d.SetId(*instance.ID) + + _, err = waitForResourceInstanceCreate(d, meta) + if err != nil { + return fmt.Errorf( + "Error waiting for create resource instance (%s) to be succeeded: %s", d.Id(), err) + } + + v := os.Getenv("IC_ENV_TAGS") + if _, ok := d.GetOk("tags"); ok || v != "" { + oldList, newList := d.GetChange("tags") + err = UpdateTagsUsingCRN(oldList, newList, meta, *instance.CRN) + if err != nil { + log.Printf( + "Error on create of resource instance (%s) tags: %s", d.Id(), err) + } + } + + return resourceIBMResourceInstanceRead(d, meta) +} +func resourceIBMResourceInstanceRead(d *schema.ResourceData, meta interface{}) error { + rsConClient, err := meta.(ClientSession).ResourceControllerV2API() + if err != nil { + return err + } + + instanceID := d.Id() + resourceInstanceGet := rc.GetResourceInstanceOptions{ + ID: &instanceID, + } + + instance, resp, err := rsConClient.GetResourceInstance(&resourceInstanceGet) + if err != nil { + return fmt.Errorf("Error retrieving resource instance: %s with resp code: %s", err, resp) + } + + tags, err := GetTagsUsingCRN(meta, *instance.CRN) + if err != nil { + log.Printf( + "Error on get of resource instance tags (%s) tags: %s", d.Id(), err) + } + d.Set("tags", tags) + d.Set("name", instance.Name) + d.Set("status", instance.State) + d.Set("resource_group_id", instance.ResourceGroupID) + if instance.CRN != nil { + location := strings.Split(*instance.CRN, ":") + if len(location) > 5 { + d.Set("location", location[5]) + } + } + d.Set("crn", instance.CRN) + d.Set("dashboard_url", instance.DashboardURL) + + rsCatClient, err := meta.(ClientSession).ResourceCatalogAPI() + if err != nil { + return err + } + rsCatRepo := rsCatClient.ResourceCatalog() + + serviceOff, err := rsCatRepo.GetServiceName(*instance.ResourceID) + if err != nil { + return fmt.Errorf("Error retrieving service offering: %s", err) + } + + d.Set("service", serviceOff) + + d.Set(ResourceName, instance.Name) + d.Set(ResourceCRN, instance.CRN) + d.Set(ResourceStatus, instance.State) + d.Set(ResourceGroupName, instance.ResourceGroupCRN) + + rcontroller, err := getBaseController(meta) + if err != nil { + return err + } + d.Set(ResourceControllerURL, rcontroller+"/services/") + + servicePlan, err := rsCatRepo.GetServicePlanName(*instance.ResourcePlanID) + if err != nil { + return fmt.Errorf("Error retrieving plan: %s", err) + } + d.Set("plan", servicePlan) + d.Set("guid", instance.GUID) + if instance.Parameters != nil { + if endpoint, ok := instance.Parameters["service-endpoints"]; ok { + d.Set("service_endpoints", endpoint) + } + } + + if len(instance.Extensions) == 0 { + d.Set("extensions", instance.Extensions) + } else { + d.Set("extensions", Flatten(instance.Extensions)) + } + d.Set("account_id", instance.AccountID) + d.Set("restored_by", instance.RestoredBy) + if instance.RestoredAt != nil { + d.Set("restored_at", instance.RestoredAt.String()) + } + d.Set("scheduled_reclaim_by", instance.ScheduledReclaimBy) + if instance.ScheduledReclaimAt != nil { + d.Set("scheduled_reclaim_at", instance.ScheduledReclaimAt.String()) + } + if instance.ScheduledReclaimAt != nil { + d.Set("deleted_at", instance.DeletedAt.String()) + } + d.Set("deleted_by", instance.DeletedBy) + if instance.UpdatedAt != nil { + d.Set("update_at", instance.UpdatedAt.String()) + } + if instance.CreatedAt != nil { + d.Set("created_at", instance.CreatedAt.String()) + } + d.Set("update_by", instance.UpdatedBy) + d.Set("created_by", instance.CreatedBy) + d.Set("resource_keys_url", instance.ResourceKeysURL) + d.Set("resource_bindings_url", instance.ResourceBindingsURL) + d.Set("resource_aliases_url", instance.ResourceAliasesURL) + if instance.LastOperation != nil { + d.Set("last_operation", Flatten(instance.LastOperation)) + } + d.Set("locked", instance.Locked) + d.Set("allow_cleanup", instance.AllowCleanup) + d.Set("type", instance.Type) + d.Set("state", instance.State) + d.Set("sub_type", instance.SubType) + d.Set("target_crn", instance.TargetCRN) + d.Set("resource_plan_id", instance.ResourcePlanID) + d.Set("resource_id", instance.ResourceID) + d.Set("resource_group_crn", instance.ResourceGroupCRN) + if instance.PlanHistory != nil { + d.Set("plan_history", flattenPlanHistory(instance.PlanHistory)) + } + + return nil +} + +func resourceIBMResourceInstanceUpdate(d *schema.ResourceData, meta interface{}) error { + rsConClient, err := meta.(ClientSession).ResourceControllerV2API() + if err != nil { + return err + } + + instanceID := d.Id() + + resourceInstanceUpdate := rc.UpdateResourceInstanceOptions{ + ID: &instanceID, + } + if d.HasChange("name") { + name := d.Get("name").(string) + resourceInstanceUpdate.Name = &name + } + + if d.HasChange("plan") { + plan := d.Get("plan").(string) + service := d.Get("service").(string) + rsCatClient, err := meta.(ClientSession).ResourceCatalogAPI() + if err != nil { + return err + } + rsCatRepo := rsCatClient.ResourceCatalog() + + serviceOff, err := rsCatRepo.FindByName(service, true) + if err != nil { + return fmt.Errorf("Error retrieving service offering: %s", err) + } + + servicePlan, err := rsCatRepo.GetServicePlanID(serviceOff[0], plan) + if err != nil { + return fmt.Errorf("Error retrieving plan: %s", err) + } + + resourceInstanceUpdate.ResourcePlanID = &servicePlan + + } + params := map[string]interface{}{} + + if d.HasChange("service_endpoints") { + endpoint := d.Get("service_endpoints").(string) + params["service-endpoints"] = endpoint + } + + resourceInstanceGet := rc.GetResourceInstanceOptions{ + ID: &instanceID, + } + if d.HasChange("parameters") { + instance, resp, err := rsConClient.GetResourceInstance(&resourceInstanceGet) + if err != nil { + return fmt.Errorf("Error retrieving resource instance: %s with resp code: %s", err, resp) + } + + if parameters, ok := d.GetOk("parameters"); ok { + temp := parameters.(map[string]interface{}) + for k, v := range temp { + if v == "true" || v == "false" { + b, _ := strconv.ParseBool(v.(string)) + params[k] = b + } else if strings.HasPrefix(v.(string), "[") && strings.HasSuffix(v.(string), "]") { + //transform v.(string) to be []string + arrayString := v.(string) + trimLeft := strings.TrimLeft(arrayString, "[") + trimRight := strings.TrimRight(trimLeft, "]") + array := strings.Split(trimRight, ",") + result := []string{} + for _, a := range array { + result = append(result, strings.Trim(a, "\"")) + } + params[k] = result + } else { + params[k] = v + } + } + } + serviceEndpoints := d.Get("service_endpoints").(string) + if serviceEndpoints != "" { + endpoint := d.Get("service_endpoints").(string) + params["service-endpoints"] = endpoint + } else if _, ok := instance.Parameters["service-endpoints"]; ok { + params["service-endpoints"] = instance.Parameters["service-endpoints"] + } + + } + if d.HasChange("service_endpoints") || d.HasChange("parameters") { + resourceInstanceUpdate.Parameters = params + } + + instance, resp, err := rsConClient.GetResourceInstance(&resourceInstanceGet) + if err != nil { + return fmt.Errorf("Error Getting resource instance: %s with resp code: %s", err, resp) + } + + if d.HasChange("tags") { + oldList, newList := d.GetChange(isVPCTags) + err = UpdateTagsUsingCRN(oldList, newList, meta, *instance.CRN) + if err != nil { + log.Printf( + "Error on update of resource instance (%s) tags: %s", d.Id(), err) + } + } + + _, resp, err = rsConClient.UpdateResourceInstance(&resourceInstanceUpdate) + if err != nil { + return fmt.Errorf("Error updating resource instance: %s with resp code: %s", err, resp) + } + + _, err = waitForResourceInstanceUpdate(d, meta) + if err != nil { + return fmt.Errorf( + "Error waiting for update resource instance (%s) to be succeeded: %s", d.Id(), err) + } + + return resourceIBMResourceInstanceRead(d, meta) +} + +func resourceIBMResourceInstanceDelete(d *schema.ResourceData, meta interface{}) error { + rsConClient, err := meta.(ClientSession).ResourceControllerV2API() + if err != nil { + return err + } + id := d.Id() + recursive := true + resourceInstanceDelete := rc.DeleteResourceInstanceOptions{ + ID: &id, + Recursive: &recursive, + } + + resp, error := rsConClient.DeleteResourceInstance(&resourceInstanceDelete) + if error != nil { + return fmt.Errorf("Error deleting resource instance: %s with resp code: %s", error, resp) + } + + _, err = waitForResourceInstanceDelete(d, meta) + if err != nil { + return fmt.Errorf( + "Error waiting for resource instance (%s) to be deleted: %s", d.Id(), err) + } + + d.SetId("") + + return nil +} +func resourceIBMResourceInstanceExists(d *schema.ResourceData, meta interface{}) (bool, error) { + rsConClient, err := meta.(ClientSession).ResourceControllerV2API() + if err != nil { + return false, err + } + instanceID := d.Id() + resourceInstanceGet := rc.GetResourceInstanceOptions{ + ID: &instanceID, + } + + instance, resp, err := rsConClient.GetResourceInstance(&resourceInstanceGet) + if err != nil { + if resp != nil && resp.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error communicating with the API: %s with resp code: %s", err, resp) + } + if instance != nil && (strings.Contains(*instance.State, "removed") || strings.Contains(*instance.State, rsInstanceReclamation)) { + log.Printf("[WARN] Removing instance from state because it's in removed or pending_reclamation state") + d.SetId("") + return false, nil + } + + return *instance.ID == instanceID, nil +} + +func waitForResourceInstanceCreate(d *schema.ResourceData, meta interface{}) (interface{}, error) { + rsConClient, err := meta.(ClientSession).ResourceControllerV2API() + if err != nil { + return false, err + } + instanceID := d.Id() + resourceInstanceGet := rc.GetResourceInstanceOptions{ + ID: &instanceID, + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{rsInstanceProgressStatus, rsInstanceInactiveStatus, rsInstanceProvisioningStatus}, + Target: []string{rsInstanceSuccessStatus}, + Refresh: func() (interface{}, string, error) { + instance, resp, err := rsConClient.GetResourceInstance(&resourceInstanceGet) + if err != nil { + if resp != nil && resp.StatusCode == 404 { + return nil, "", fmt.Errorf("The resource instance %s does not exist anymore: %v", d.Id(), err) + } + return nil, "", fmt.Errorf("Get the resource instance %s failed with resp code: %s, err: %v", d.Id(), resp, err) + } + if *instance.State == rsInstanceFailStatus { + return instance, *instance.State, fmt.Errorf("The resource instance %s failed: %v", d.Id(), err) + } + return instance, *instance.State, nil + }, + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func waitForResourceInstanceUpdate(d *schema.ResourceData, meta interface{}) (interface{}, error) { + rsConClient, err := meta.(ClientSession).ResourceControllerV2API() + if err != nil { + return false, err + } + instanceID := d.Id() + resourceInstanceGet := rc.GetResourceInstanceOptions{ + ID: &instanceID, + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{rsInstanceProgressStatus, rsInstanceInactiveStatus}, + Target: []string{rsInstanceSuccessStatus}, + Refresh: func() (interface{}, string, error) { + instance, resp, err := rsConClient.GetResourceInstance(&resourceInstanceGet) + if err != nil { + if resp != nil && resp.StatusCode == 404 { + return nil, "", fmt.Errorf("The resource instance %s does not exist anymore: %v", d.Id(), err) + } + return nil, "", fmt.Errorf("Get the resource instance %s failed with resp code: %s, err: %v", d.Id(), resp, err) + } + if *instance.State == rsInstanceFailStatus { + return instance, *instance.State, fmt.Errorf("The resource instance %s failed: %v", d.Id(), err) + } + return instance, *instance.State, nil + }, + Timeout: d.Timeout(schema.TimeoutUpdate), + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func waitForResourceInstanceDelete(d *schema.ResourceData, meta interface{}) (interface{}, error) { + rsConClient, err := meta.(ClientSession).ResourceControllerV2API() + if err != nil { + return false, err + } + instanceID := d.Id() + resourceInstanceGet := rc.GetResourceInstanceOptions{ + ID: &instanceID, + } + stateConf := &resource.StateChangeConf{ + Pending: []string{rsInstanceProgressStatus, rsInstanceInactiveStatus, rsInstanceSuccessStatus}, + Target: []string{rsInstanceRemovedStatus, rsInstanceReclamation}, + Refresh: func() (interface{}, string, error) { + instance, resp, err := rsConClient.GetResourceInstance(&resourceInstanceGet) + if err != nil { + if resp != nil && resp.StatusCode == 404 { + return instance, rsInstanceSuccessStatus, nil + } + return nil, "", fmt.Errorf("Get the resource instance %s failed with resp code: %s, err: %v", d.Id(), resp, err) + } + if *instance.State == rsInstanceFailStatus { + return instance, *instance.State, fmt.Errorf("The resource instance %s failed to delete: %v", d.Id(), err) + } + return instance, *instance.State, nil + }, + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func filterDeployments(deployments []models.ServiceDeployment, location string) ([]models.ServiceDeployment, map[string]bool) { + supportedDeployments := []models.ServiceDeployment{} + supportedLocations := make(map[string]bool) + for _, d := range deployments { + if d.Metadata.RCCompatible { + deploymentLocation := d.Metadata.Deployment.Location + supportedLocations[deploymentLocation] = true + if deploymentLocation == location { + supportedDeployments = append(supportedDeployments, d) + } + } + } + return supportedDeployments, supportedLocations +} + +func flattenPlanHistory(keys []rc.PlanHistoryItem) []interface{} { + var out = make([]interface{}, len(keys), len(keys)) + for i, k := range keys { + m := make(map[string]interface{}) + m["resource_plan_id"] = k.ResourcePlanID + m["start_date"] = k.StartDate.String() + out[i] = m + } + return out +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_resource_key.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_resource_key.go new file mode 100644 index 00000000000..442fb1b778e --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_resource_key.go @@ -0,0 +1,500 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "encoding/json" + "fmt" + "log" + "strconv" + "strings" + "time" + + rc "github.com/IBM/platform-services-go-sdk/resourcecontrollerv2" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/IBM-Cloud/bluemix-go/bmxerror" + "github.com/IBM/platform-services-go-sdk/iampolicymanagementv1" +) + +func resourceIBMResourceKey() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMResourceKeyCreate, + Read: resourceIBMResourceKeyRead, + Update: resourceIBMResourceKeyUpdate, + Delete: resourceIBMResourceKeyDelete, + Exists: resourceIBMResourceKeyExists, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The name of the resource key", + }, + + "role": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Name of the user role.Valid roles are Writer, Reader, Manager, Administrator, Operator, Viewer, Editor and Custom Roles.", + // ValidateFunc: validateRole, + }, + + "resource_instance_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The id of the resource instance for which to create resource key", + ConflictsWith: []string{"resource_alias_id"}, + }, + + "resource_alias_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The id of the resource alias for which to create resource key", + ConflictsWith: []string{"resource_instance_id"}, + }, + + "parameters": { + Type: schema.TypeMap, + Optional: true, + DiffSuppressFunc: applyOnce, + Description: "Arbitrary parameters to pass. Must be a JSON object", + }, + + "credentials": { + Description: "Credentials asociated with the key", + Type: schema.TypeMap, + Sensitive: true, + Computed: true, + }, + + "status": { + Type: schema.TypeString, + Computed: true, + Description: "Status of resource key", + }, + + "tags": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "crn": { + Type: schema.TypeString, + Computed: true, + Description: "crn of resource key", + }, + + "guid": { + Type: schema.TypeString, + Computed: true, + Description: "When you create a new key, a globally unique identifier (GUID) is assigned.", + }, + + "url": { + Type: schema.TypeString, + Computed: true, + Description: "When you created a new key, a relative URL path is created identifying the location of the key.", + }, + + "account_id": { + Type: schema.TypeString, + Computed: true, + Description: "An alpha-numeric value identifying the account ID.", + }, + + "resource_group_id": { + Type: schema.TypeString, + Computed: true, + Description: "The short ID of the resource group.", + }, + + "source_crn": { + Type: schema.TypeString, + Computed: true, + Description: "The CRN of resource instance or alias associated to the key.", + }, + + "state": { + Type: schema.TypeString, + Computed: true, + Description: "The state of the key.", + }, + + "iam_compatible": { + Type: schema.TypeBool, + Computed: true, + Description: "Specifies whether the key’s credentials support IAM.", + }, + + "resource_instance_url": { + Type: schema.TypeString, + Computed: true, + Description: "The relative path to the resource.", + }, + + "created_at": { + Type: schema.TypeString, + Computed: true, + Description: "The date when the key was created.", + }, + + "updated_at": { + Type: schema.TypeString, + Computed: true, + Description: "The date when the key was last updated.", + }, + + "deleted_at": { + Type: schema.TypeString, + Computed: true, + Description: "The date when the key was deleted.", + }, + + "created_by": { + Type: schema.TypeString, + Computed: true, + Description: "The subject who created the key.", + }, + + "updated_by": { + Type: schema.TypeString, + Computed: true, + Description: "The subject who updated the key.", + }, + + "deleted_by": { + Type: schema.TypeString, + Computed: true, + Description: "The subject who deleted the key.", + }, + }, + } +} + +func resourceIBMResourceKeyCreate(d *schema.ResourceData, meta interface{}) error { + rsContClient, err := meta.(ClientSession).ResourceControllerV2API() + if err != nil { + return err + } + name := d.Get("name").(string) + role := d.Get("role").(string) + + var instanceID, aliasID string + if insID, ok := d.GetOk("resource_instance_id"); ok { + instanceID = insID.(string) + } + + if aliID, ok := d.GetOk("resource_alias_id"); ok { + aliasID = aliID.(string) + } + + if instanceID == "" && aliasID == "" { + return fmt.Errorf("Provide either `resource_instance_id` or `resource_alias_id`") + } + + keyParameters := rc.ResourceKeyPostParameters{} + + if parameters, ok := d.GetOk("parameters"); ok { + temp := parameters.(map[string]interface{}) + for k, v := range temp { + if v == "true" || v == "false" { + b, _ := strconv.ParseBool(v.(string)) + keyParameters.SetProperty(k, b) + } else { + keyParameters.SetProperty(k, v) + } + } + } + + resourceInstance, sourceCRN, err := getResourceInstanceAndCRN(d, meta) + if err != nil { + return fmt.Errorf("Error creating resource key when get instance and CRN: %s", err) + } + + serviceID := resourceInstance.ResourceID + + rsCatClient, err := meta.(ClientSession).ResourceCatalogAPI() + if err != nil { + return fmt.Errorf("Error creating resource key when get ResourceCatalogAPI: %s", err) + } + + service, err := rsCatClient.ResourceCatalog().Get(*serviceID, true) + if err != nil { + return fmt.Errorf("Error creating resource key when get service: %s", err) + } + serviceRole, err := getRoleFromName(role, service.Name, meta) + if err != nil { + return fmt.Errorf("Error creating resource key when get role: %s", err) + } + + keyParameters.SetProperty("role_crn", serviceRole.RoleID) + + resourceKeyCreate := rc.CreateResourceKeyOptions{ + Name: &name, + Source: sourceCRN, + Role: serviceRole.RoleID, + Parameters: &keyParameters, + } + resourceKey, resp, err := rsContClient.CreateResourceKey(&resourceKeyCreate) + if err != nil { + return fmt.Errorf("Error creating resource key: %s with resp code: %s", err, resp) + } + + d.SetId(*resourceKey.ID) + + return resourceIBMResourceKeyRead(d, meta) +} + +func resourceIBMResourceKeyUpdate(d *schema.ResourceData, meta interface{}) error { + return nil +} + +func resourceIBMResourceKeyRead(d *schema.ResourceData, meta interface{}) error { + rsContClient, err := meta.(ClientSession).ResourceControllerV2API() + if err != nil { + return err + } + resourceKeyID := d.Id() + resourceKeyGet := rc.GetResourceKeyOptions{ + ID: &resourceKeyID, + } + + resourceKey, resp, err := rsContClient.GetResourceKey(&resourceKeyGet) + if err != nil || resourceKey == nil { + return fmt.Errorf("Error retrieving resource key: %s with resp : %s", err, resp) + } + var credInterface map[string]interface{} + cred, _ := json.Marshal(resourceKey.Credentials) + json.Unmarshal(cred, &credInterface) + d.Set("credentials", Flatten(credInterface)) + d.Set("name", *resourceKey.Name) + d.Set("status", *resourceKey.State) + if resourceKey.Credentials != nil && resourceKey.Credentials.IamRoleCRN != nil { + roleCrn := *resourceKey.Credentials.IamRoleCRN + roleName := roleCrn[strings.LastIndex(roleCrn, ":")+1:] + + // TODO.S: update client + if strings.Contains(roleCrn, ":customRole:") { + iamPolicyManagementClient, err := meta.(ClientSession).IAMPolicyManagementV1API() + if err == nil { + var resourceCRN string + if resourceKey.CRN != nil { + serviceName := strings.Split(*resourceKey.CRN, ":") + if len(serviceName) > 4 { + resourceCRN = serviceName[4] + } + } + listRoleOptions := &iampolicymanagementv1.ListRolesOptions{ + AccountID: resourceKey.AccountID, + ServiceName: &resourceCRN, + } + roleList, _, err := iamPolicyManagementClient.ListRoles(listRoleOptions) + roles := roleList.CustomRoles + if err == nil && len(roles) > 0 { + for _, role := range roles { + if *role.Name == roleName { + customRoleName := role.DisplayName + d.Set("role", customRoleName) + } + } + } + } + } else { + d.Set("role", roleName) + } + } + + sCrn := *resourceKey.SourceCRN + if sCrn != "" { + d.Set("resource_instance_id", sCrn) + } + + d.Set("crn", *resourceKey.CRN) + + d.Set("guid", *resourceKey.GUID) + d.Set("url", *resourceKey.URL) + d.Set("account_id", *resourceKey.AccountID) + d.Set("resource_group_id", *resourceKey.ResourceGroupID) + d.Set("source_crn", *resourceKey.SourceCRN) + d.Set("state", *resourceKey.State) + d.Set("iam_compatible", *resourceKey.IamCompatible) + d.Set("resource_instance_url", *resourceKey.ResourceInstanceURL) + if resourceKey.CreatedAt != nil { + d.Set("created_at", resourceKey.CreatedAt.String()) + } else { + d.Set("created_at", "") + } + if resourceKey.UpdatedAt != nil { + d.Set("updated_at", resourceKey.UpdatedAt.String()) + } else { + d.Set("updated_at", "") + } + if resourceKey.DeletedAt != nil { + d.Set("deleted_at", resourceKey.DeletedAt.String()) + } else { + d.Set("deleted_at", "") + } + d.Set("created_by", *resourceKey.CreatedBy) + d.Set("updated_by", *resourceKey.UpdatedBy) + d.Set("deleted_by", *resourceKey.DeletedBy) + + return nil +} + +func resourceIBMResourceKeyDelete(d *schema.ResourceData, meta interface{}) error { + rsContClient, err := meta.(ClientSession).ResourceControllerV2API() + if err != nil { + return err + } + + resourceKeyID := d.Id() + resourceKeyDelete := rc.DeleteResourceKeyOptions{ + ID: &resourceKeyID, + } + + resp, err := rsContClient.DeleteResourceKey(&resourceKeyDelete) + if err != nil { + return fmt.Errorf("Error deleting resource key: %s with resp code: %s", err, resp) + } + + d.SetId("") + + return nil +} + +func resourceIBMResourceKeyExists(d *schema.ResourceData, meta interface{}) (bool, error) { + rsContClient, err := meta.(ClientSession).ResourceControllerV2API() + if err != nil { + return false, err + } + resourceKeyID := d.Id() + resourceKeyGet := rc.GetResourceKeyOptions{ + ID: &resourceKeyID, + } + + resourceKey, resp, err := rsContClient.GetResourceKey(&resourceKeyGet) + if err != nil { + if resp != nil && (resp.StatusCode == 404 || resp.StatusCode == 410) { + return false, nil + } + return false, fmt.Errorf("Error communicating with the API: %s with resp code: %s", err, resp) + } + if err == nil && *resourceKey.State == "removed" { + return false, nil + } + + return *resourceKey.ID == resourceKeyID, nil +} + +func getResourceInstanceAndCRN(d *schema.ResourceData, meta interface{}) (*rc.ResourceInstance, *string, error) { + rsContClient, err := meta.(ClientSession).ResourceControllerV2API() + if err != nil { + return nil, nil, err + } + if insID, ok := d.GetOk("resource_instance_id"); ok { + insIdString := insID.(string) + resourceInstanceGet := rc.GetResourceInstanceOptions{ + ID: &insIdString, + } + instance, resp, err := rsContClient.GetResourceInstance(&resourceInstanceGet) + if err != nil { + log.Printf("Error when get resource instance in getResourceInstanceAndCRN: %s with resp code: %s", err, resp) + return nil, nil, err + } + return instance, instance.CRN, nil + } + + aliasID := d.Get("resource_alias_id").(string) + resourceAliasGet := rc.GetResourceAliasOptions{ + ID: &aliasID, + } + alias, resp, err := rsContClient.GetResourceAlias(&resourceAliasGet) + if err != nil { + log.Printf("Error when get resource alias in getResourceInstanceAndCRN: %s with resp code: %s", err, resp) + return nil, nil, err + } + resourceInstanceGet := rc.GetResourceInstanceOptions{ + ID: alias.ResourceInstanceID, + } + instance, resp, err := rsContClient.GetResourceInstance(&resourceInstanceGet) + if err != nil { + log.Printf("Error when get resource instance in getResourceInstanceAndCRN: %s with resp code: %s", err, resp) + return nil, nil, err + } + return instance, instance.CRN, nil + +} + +func getRoleFromName(roleName, serviceName string, meta interface{}) (iampolicymanagementv1.PolicyRole, error) { + + role := iampolicymanagementv1.PolicyRole{} + iamPolicyManagementClient, err := meta.(ClientSession).IAMPolicyManagementV1API() + if err != nil { + return role, err + } + + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return role, err + } + + listRoleOptions := &iampolicymanagementv1.ListRolesOptions{ + AccountID: &userDetails.userAccount, + ServiceName: &serviceName, + } + + roleList, _, err := iamPolicyManagementClient.ListRoles(listRoleOptions) + if err != nil { + return role, err + } + + roles := mapRoleListToPolicyRoles(*roleList) + + role, err = findRoleByName(roles, roleName) + if err != nil { + return iampolicymanagementv1.PolicyRole{}, err + } + return role, nil + +} + +func findRoleByName(supported []iampolicymanagementv1.PolicyRole, name string) (iampolicymanagementv1.PolicyRole, error) { + for _, role := range supported { + if role.DisplayName != nil { + if *role.DisplayName == name { + role.DisplayName = nil + return role, nil + } + } + } + supportedRoles := getSupportedRolesStr(supported) + return iampolicymanagementv1.PolicyRole{}, bmxerror.New("RoleDoesnotExist", + fmt.Sprintf("%s was not found. Valid roles are %s", name, supportedRoles)) + +} + +func getSupportedRolesStr(supported []iampolicymanagementv1.PolicyRole) string { + rolesStr := "" + for index, role := range supported { + if index != 0 { + rolesStr += ", " + } + if role.DisplayName != nil { + rolesStr += *role.DisplayName + } + } + return rolesStr +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_resource_tag.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_resource_tag.go new file mode 100644 index 00000000000..2f7f8394b32 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_resource_tag.go @@ -0,0 +1,316 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "os" + "regexp" + "strings" + + "github.com/IBM-Cloud/bluemix-go/bmxerror" + "github.com/IBM/platform-services-go-sdk/globaltaggingv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + resourceID = "resource_id" + tags = "tags" + resourceType = "resource_type" + tagType = "tag_type" + acccountID = "acccount_id" + service = "service" + crnRegex = "^crn:.+:.+:.+:.+:.+:$" +) + +func resourceIBMResourceTag() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMResourceTagCreate, + Read: resourceIBMResourceTagRead, + Update: resourceIBMResourceTagUpdate, + Delete: resourceIBMResourceTagDelete, + Importer: &schema.ResourceImporter{}, + + CustomizeDiff: customdiff.Sequence( + func(diff *schema.ResourceDiff, v interface{}) error { + return resourceTagsCustomizeDiff(diff) + }, + ), + + Schema: map[string]*schema.Schema{ + resourceID: { + Type: schema.TypeString, + Required: true, + ValidateFunc: InvokeValidator("ibm_resource_tag", resourceID), + Description: "CRN of the resource on which the tags should be attached", + }, + tags: { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString, ValidateFunc: InvokeValidator("ibm_resource_tag", tags)}, + Set: resourceIBMVPCHash, + Description: "List of tags associated with resource instance", + }, + resourceType: { + Type: schema.TypeString, + Optional: true, + Description: "Resource type on which the tags should be attached", + }, + tagType: { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validateAllowedStringValue([]string{"service", "access", "user"}), + Description: "Type of the tag. Only allowed values are: user, or service or access (default value : user)", + }, + acccountID: { + Type: schema.TypeString, + Computed: true, + Description: "The ID of the account that owns the resources to be tagged (required if tag-type is set to service)", + }, + }, + } +} + +func resourceIBMResourceTagValidator() *ResourceValidator { + + validateSchema := make([]ValidateSchema, 1) + + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: resourceID, + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Required: true, + Regexp: `^crn:v1(:[a-zA-Z0-9 \-\._~\*\+,;=!$&'\(\)\/\?#\[\]@]*){8}$|^[0-9]+$`, + MinValueLength: 1, + MaxValueLength: 128}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: tags, + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Optional: true, + Regexp: `^[A-Za-z0-9:_ .-]+$`, + MinValueLength: 1, + MaxValueLength: 128}) + + ibmResourceTagValidator := ResourceValidator{ResourceName: "ibm_resource_tag", Schema: validateSchema} + return &ibmResourceTagValidator +} + +func resourceIBMResourceTagCreate(d *schema.ResourceData, meta interface{}) error { + var rType, tType string + resources := []globaltaggingv1.Resource{} + + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + accountID := userDetails.userAccount + + gtClient, err := meta.(ClientSession).GlobalTaggingAPIv1() + if err != nil { + return fmt.Errorf("Error getting global tagging client settings: %s", err) + } + + resourceID := d.Get(resourceID).(string) + if v, ok := d.GetOk(resourceType); ok && v != nil { + rType = v.(string) + } + + r := globaltaggingv1.Resource{ResourceID: ptrToString(resourceID), ResourceType: ptrToString(rType)} + resources = append(resources, r) + + var add []string + if v, ok := d.GetOk(tags); ok { + tags := v.(*schema.Set) + for _, t := range tags.List() { + add = append(add, fmt.Sprint(t)) + } + } + + schematicTags := os.Getenv("IC_ENV_TAGS") + var envTags []string + if schematicTags != "" { + envTags = strings.Split(schematicTags, ",") + add = append(add, envTags...) + } + + AttachTagOptions := &globaltaggingv1.AttachTagOptions{} + AttachTagOptions.Resources = resources + AttachTagOptions.TagNames = add + if v, ok := d.GetOk(tagType); ok && v != nil { + tType = v.(string) + AttachTagOptions.TagType = ptrToString(tType) + + if tType == service { + AttachTagOptions.AccountID = ptrToString(accountID) + } + } + + if len(add) > 0 { + _, resp, err := gtClient.AttachTag(AttachTagOptions) + if err != nil { + return fmt.Errorf("Error attaching resource tags >>>> %v : %s", resp, err) + } + } + + crn, err := regexp.Compile(crnRegex) + if err != nil { + return err + } + + if crn.MatchString(resourceID) { + d.SetId(resourceID) + } else { + d.SetId(fmt.Sprintf("%s/%s", resourceID, resourceType)) + } + + return resourceIBMResourceTagRead(d, meta) +} + +func resourceIBMResourceTagRead(d *schema.ResourceData, meta interface{}) error { + var rID, rType, tType string + + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + acctID := userDetails.userAccount + + crn, err := regexp.Compile(crnRegex) + if err != nil { + return err + } + + if crn.MatchString(d.Id()) { + rID = d.Id() + } else { + parts, err := vmIdParts(d.Id()) + if err != nil { + return err + } + rID = parts[0] + rType = parts[1] + } + + if v, ok := d.GetOk(tagType); ok && v != nil { + tType = v.(string) + + if tType == service { + d.Set(acccountID, acctID) + } + } + + tagList, err := GetGlobalTagsUsingCRN(meta, rID, resourceType, tType) + if err != nil { + if apierr, ok := err.(bmxerror.RequestFailure); ok && apierr.StatusCode() == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error getting resource tags for: %s with error : %s\n", rID, err) + } + + d.Set(resourceID, rID) + d.Set(resourceType, rType) + d.Set(tags, tagList) + + return nil +} + +func resourceIBMResourceTagUpdate(d *schema.ResourceData, meta interface{}) error { + var rID, rType, tType string + + crn, err := regexp.Compile(crnRegex) + if err != nil { + return err + } + + if crn.MatchString(d.Id()) { + rID = d.Id() + } else { + parts, err := vmIdParts(d.Id()) + if err != nil { + return err + } + rID = parts[0] + rType = parts[1] + } + + if v, ok := d.GetOk(tagType); ok && v != nil { + tType = v.(string) + } + + if _, ok := d.GetOk(tags); ok { + oldList, newList := d.GetChange(tags) + err := UpdateGlobalTagsUsingCRN(oldList, newList, meta, rID, rType, tType) + if err != nil { + return fmt.Errorf( + "Error on create of resource tags: %s", err) + } + } + + return resourceIBMResourceTagRead(d, meta) +} + +func resourceIBMResourceTagDelete(d *schema.ResourceData, meta interface{}) error { + var rID, rType string + + crn, err := regexp.Compile(crnRegex) + if err != nil { + return err + } + + if crn.MatchString(d.Id()) { + rID = d.Id() + } else { + parts, err := vmIdParts(d.Id()) + if err != nil { + return err + } + rID = parts[0] + rType = parts[1] + } + + gtClient, err := meta.(ClientSession).GlobalTaggingAPIv1() + if err != nil { + return fmt.Errorf("Error getting global tagging client settings: %s", err) + } + + var remove []string + removeTags := d.Get(tags).(*schema.Set) + remove = make([]string, len(removeTags.List())) + for i, v := range removeTags.List() { + remove[i] = fmt.Sprint(v) + } + + if len(remove) > 0 { + resources := []globaltaggingv1.Resource{} + r := globaltaggingv1.Resource{ResourceID: ptrToString(rID), ResourceType: ptrToString(rType)} + resources = append(resources, r) + + detachTagOptions := &globaltaggingv1.DetachTagOptions{ + Resources: resources, + TagNames: remove, + } + + _, resp, err := gtClient.DetachTag(detachTagOptions) + if err != nil { + return fmt.Errorf("Error detaching resource tags %v: %s\n%s", remove, err, resp) + } + for _, v := range remove { + delTagOptions := &globaltaggingv1.DeleteTagOptions{ + TagName: ptrToString(v), + } + _, resp, err := gtClient.DeleteTag(delTagOptions) + if err != nil { + return fmt.Errorf("Error deleting resource tag %v: %s\n%s", v, err, resp) + } + } + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_schematics_action.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_schematics_action.go new file mode 100644 index 00000000000..7dcfa18080f --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_schematics_action.go @@ -0,0 +1,1679 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "context" + "fmt" + "log" + + "github.com/go-openapi/strfmt" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + + "github.com/IBM/go-sdk-core/v5/core" + "github.com/IBM/schematics-go-sdk/schematicsv1" +) + +const ( + actionName = "name" +) + +func resourceIBMSchematicsAction() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMSchematicsActionCreate, + Read: resourceIBMSchematicsActionRead, + Update: resourceIBMSchematicsActionUpdate, + Delete: resourceIBMSchematicsActionDelete, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "Action name (unique for an account).", + ValidateFunc: InvokeValidator("ibm_schematics_action", actionName), + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Action description.", + }, + "location": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ValidateFunc: InvokeValidator("ibm_schematics_action", "location"), + Description: "List of action locations supported by IBM Cloud Schematics service. **Note** this does not limit the location of the resources provisioned using Schematics.", + }, + "resource_group": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Resource-group name for an action. By default, action is created in default resource group.", + }, + "tags": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Description: "Action tags.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "user_state": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Computed: true, + Description: "User defined status of the Schematics object.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "state": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "User defined states * `draft` Object can be modified, and can be used by jobs run by an author, during execution * `live` Object can be modified, and can be used by jobs during execution * `locked` Object cannot be modified, and can be used by jobs during execution * `disable` Object can be modified, and cannot be used by Jobs during execution.", + }, + "set_by": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Name of the user who set the state of an Object.", + }, + "set_at": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "When the user who set the state of an Object.", + }, + }, + }, + }, + "source_readme_url": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "URL of the `README` file, for the source.", + }, + "source": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Description: "Source of templates, playbooks, or controls.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "source_type": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "Type of source for the Template.", + }, + "git": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Description: "Connection details to Git source.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "git_repo_url": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "URL to the GIT Repo that can be used to clone the template.", + ValidateFunc: validation.IsURLWithHTTPorHTTPS, + }, + "git_token": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Personal Access Token to connect to Git URLs.", + }, + "git_repo_folder": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Name of the folder in the Git Repo, that contains the template.", + }, + "git_release": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Name of the release tag, used to fetch the Git Repo.", + }, + "git_branch": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Name of the branch, used to fetch the Git Repo.", + }, + }, + }, + }, + }, + }, + }, + "source_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ValidateFunc: InvokeValidator("ibm_schematics_action", "source_type"), + Description: "Type of source for the Template.", + }, + "command_parameter": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Schematics job command parameter (playbook-name, capsule-name or flow-name).", + }, + "bastion": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Description: "Complete target details with the user inputs and the system generated data.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Target name.", + }, + "type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Target type (`cluster`, `vsi`, `icd`, `vpc`).", + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Target description.", + }, + "resource_query": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Resource selection query string.", + }, + "credential_ref": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Override credential for each resource. Reference to credentials values, used by all the resources.", + }, + "id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Target ID.", + }, + "created_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Targets creation time.", + }, + "created_by": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "E-mail address of the user who created the targets.", + }, + "updated_at": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Targets updation time.", + }, + "updated_by": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "E-mail address of user who updated the targets.", + }, + "sys_lock": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Computed: true, + Description: "System lock status.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "sys_locked": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Description: "Is the Workspace locked by the Schematic action ?.", + }, + "sys_locked_by": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Name of the user who performed the action, that lead to lock the Workspace.", + }, + "sys_locked_at": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "When the user performed the action that lead to lock the Workspace ?.", + }, + }, + }, + }, + "resource_ids": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Computed: true, + Description: "Array of the resource IDs.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + "targets_ini": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Inventory of host and host group for the playbook in `INI` file format. For example, `\"targets_ini\": \"[webserverhost] 172.22.192.6 [dbhost] 172.22.192.5\"`. For more information, about an inventory host group syntax, see [Inventory host groups](/docs/schematics?topic=schematics-schematics-cli-reference#schematics-inventory-host-grps).", + }, + "credentials": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Description: "credentials of the Action.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Name of the variable.", + }, + "value": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Value for the variable or reference to the value.", + }, + "metadata": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Description: "User editable metadata for the variables.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Type of the variable.", + }, + "aliases": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Description: "List of aliases for the variable name.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Description of the meta data.", + }, + "default_value": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Default value for the variable, if the override value is not specified.", + }, + "secure": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Description: "Is the variable secure or sensitive ?.", + }, + "immutable": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Description: "Is the variable readonly ?.", + }, + "hidden": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Description: "If true, the variable will not be displayed on UI or CLI.", + }, + "options": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Description: "List of possible values for this variable. If type is integer or date, then the array of string will be converted to array of integers or date during runtime.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "min_value": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Description: "Minimum value of the variable. Applicable for integer type.", + }, + "max_value": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Description: "Maximum value of the variable. Applicable for integer type.", + }, + "min_length": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Description: "Minimum length of the variable value. Applicable for string type.", + }, + "max_length": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Description: "Maximum length of the variable value. Applicable for string type.", + }, + "matches": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Regex for the variable value.", + }, + "position": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Description: "Relative position of this variable in a list.", + }, + "group_by": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Display name of the group this variable belongs to.", + }, + "source": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Source of this meta-data.", + }, + }, + }, + }, + "link": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Reference link to the variable value By default the expression will point to self.value.", + }, + }, + }, + }, + "action_inputs": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Description: "Input variables for an action.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Name of the variable.", + }, + "value": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Value for the variable or reference to the value.", + }, + "metadata": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Description: "User editable metadata for the variables.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Type of the variable.", + }, + "aliases": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Description: "List of aliases for the variable name.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Description of the meta data.", + }, + "default_value": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Default value for the variable, if the override value is not specified.", + }, + "secure": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Description: "Is the variable secure or sensitive ?.", + }, + "immutable": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Description: "Is the variable readonly ?.", + }, + "hidden": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Description: "If true, the variable will not be displayed on UI or CLI.", + }, + "options": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Description: "List of possible values for this variable. If type is integer or date, then the array of string will be converted to array of integers or date during runtime.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "min_value": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Description: "Minimum value of the variable. Applicable for integer type.", + }, + "max_value": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Description: "Maximum value of the variable. Applicable for integer type.", + }, + "min_length": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Description: "Minimum length of the variable value. Applicable for string type.", + }, + "max_length": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Description: "Maximum length of the variable value. Applicable for string type.", + }, + "matches": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Regex for the variable value.", + }, + "position": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Description: "Relative position of this variable in a list.", + }, + "group_by": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Display name of the group this variable belongs to.", + }, + "source": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Source of this meta-data.", + }, + }, + }, + }, + "link": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Reference link to the variable value By default the expression will point to self.value.", + }, + }, + }, + }, + "action_outputs": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Description: "Output variables for an action.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Name of the variable.", + }, + "value": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Value for the variable or reference to the value.", + }, + "metadata": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Description: "User editable metadata for the variables.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Type of the variable.", + }, + "aliases": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Description: "List of aliases for the variable name.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Description of the meta data.", + }, + "default_value": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Default value for the variable, if the override value is not specified.", + }, + "secure": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Description: "Is the variable secure or sensitive ?.", + }, + "immutable": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Description: "Is the variable readonly ?.", + }, + "hidden": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Description: "If true, the variable will not be displayed on UI or CLI.", + }, + "options": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Description: "List of possible values for this variable. If type is integer or date, then the array of string will be converted to array of integers or date during runtime.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "min_value": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Description: "Minimum value of the variable. Applicable for integer type.", + }, + "max_value": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Description: "Maximum value of the variable. Applicable for integer type.", + }, + "min_length": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Description: "Minimum length of the variable value. Applicable for string type.", + }, + "max_length": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Description: "Maximum length of the variable value. Applicable for string type.", + }, + "matches": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Regex for the variable value.", + }, + "position": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Description: "Relative position of this variable in a list.", + }, + "group_by": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Display name of the group this variable belongs to.", + }, + "source": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Source of this meta-data.", + }, + }, + }, + }, + "link": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Reference link to the variable value By default the expression will point to self.value.", + }, + }, + }, + }, + "settings": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Description: "Environment variables for an action.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Name of the variable.", + }, + "value": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Value for the variable or reference to the value.", + }, + "metadata": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Description: "User editable metadata for the variables.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Type of the variable.", + }, + "aliases": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Description: "List of aliases for the variable name.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Description of the meta data.", + }, + "default_value": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Default value for the variable, if the override value is not specified.", + }, + "secure": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Description: "Is the variable secure or sensitive ?.", + }, + "immutable": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Description: "Is the variable readonly ?.", + }, + "hidden": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Description: "If true, the variable will not be displayed on UI or CLI.", + }, + "options": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Description: "List of possible values for this variable. If type is integer or date, then the array of string will be converted to array of integers or date during runtime.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "min_value": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Description: "Minimum value of the variable. Applicable for integer type.", + }, + "max_value": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Description: "Maximum value of the variable. Applicable for integer type.", + }, + "min_length": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Description: "Minimum length of the variable value. Applicable for string type.", + }, + "max_length": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Description: "Maximum length of the variable value. Applicable for string type.", + }, + "matches": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Regex for the variable value.", + }, + "position": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Description: "Relative position of this variable in a list.", + }, + "group_by": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Display name of the group this variable belongs to.", + }, + "source": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Source of this meta-data.", + }, + }, + }, + }, + "link": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Reference link to the variable value By default the expression will point to self.value.", + }, + }, + }, + }, + "trigger_record_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "ID to the trigger.", + }, + "state": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Computed state of an action.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "status_code": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Status of automation (workspace or action).", + }, + "status_job_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Job id reference for this status.", + }, + "status_message": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Automation status message - to be displayed along with the status_code.", + }, + }, + }, + }, + "sys_lock": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Computed: true, + Description: "System lock status.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "sys_locked": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Description: "Is the Workspace locked by the Schematic action ?.", + }, + "sys_locked_by": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Name of the user who performed the action, that lead to lock the Workspace.", + }, + "sys_locked_at": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "When the user performed the action that lead to lock the Workspace ?.", + }, + }, + }, + }, + "x_github_token": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The personal access token to authenticate with your private GitHub or GitLab repository and access your Terraform template.", + }, + "crn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Action Cloud Resource Name.", + }, + "account": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Action account ID.", + }, + "source_created_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Action Playbook Source creation time.", + }, + "source_created_by": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "E-mail address of user who created the Action Playbook Source.", + }, + "source_updated_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The action playbook updation time.", + }, + "source_updated_by": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "E-mail address of user who updated the action playbook source.", + }, + "created_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Action creation time.", + }, + "created_by": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "E-mail address of the user who created an action.", + }, + "updated_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Action updation time.", + }, + "updated_by": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "E-mail address of the user who updated an action.", + }, + "namespace": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Name of the namespace.", + }, + "playbook_names": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Playbook names retrieved from the respository.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func resourceIBMSchematicsActionValidator() *ResourceValidator { + validateSchema := make([]ValidateSchema, 1) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: "location", + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Optional: true, + AllowedValues: "eu-de, eu-gb, us-east, us-south", + }, + ValidateSchema{ + Identifier: "source_type", + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Optional: true, + AllowedValues: "external_scm, git_hub, git_hub_enterprise, git_lab, ibm_cloud_catalog, ibm_git_lab, local", + }, + ValidateSchema{ + Identifier: actionName, + ValidateFunctionIdentifier: StringLenBetween, + Type: TypeString, + MinValueLength: 1, + MaxValueLength: 65, + Optional: true, + }) + + resourceValidator := ResourceValidator{ResourceName: "ibm_schematics_action", Schema: validateSchema} + return &resourceValidator +} + +func resourceIBMSchematicsActionCreate(d *schema.ResourceData, meta interface{}) error { + schematicsClient, err := meta.(ClientSession).SchematicsV1() + if err != nil { + return err + } + + createActionOptions := &schematicsv1.CreateActionOptions{} + + if _, ok := d.GetOk("name"); ok { + createActionOptions.SetName(d.Get("name").(string)) + } + if _, ok := d.GetOk("description"); ok { + createActionOptions.SetDescription(d.Get("description").(string)) + } + if _, ok := d.GetOk("location"); ok { + createActionOptions.SetLocation(d.Get("location").(string)) + } + if _, ok := d.GetOk("resource_group"); ok { + createActionOptions.SetResourceGroup(d.Get("resource_group").(string)) + } + if _, ok := d.GetOk("tags"); ok { + createActionOptions.SetTags(expandStringList(d.Get("tags").([]interface{}))) + } + if _, ok := d.GetOk("user_state"); ok { + userStateAttr := d.Get("user_state").([]interface{}) + if len(userStateAttr) > 0 { + userState := resourceIBMSchematicsActionMapToUserState(d.Get("user_state.0").(map[string]interface{})) + createActionOptions.SetUserState(&userState) + } + } + if _, ok := d.GetOk("source_readme_url"); ok { + createActionOptions.SetSourceReadmeURL(d.Get("source_readme_url").(string)) + } + if _, ok := d.GetOk("source"); ok { + sourceAttr := d.Get("source").([]interface{}) + if len(sourceAttr) > 0 { + source := resourceIBMSchematicsActionMapToExternalSource(d.Get("source.0").(map[string]interface{})) + createActionOptions.SetSource(&source) + } + } + if _, ok := d.GetOk("source_type"); ok { + createActionOptions.SetSourceType(d.Get("source_type").(string)) + } + if _, ok := d.GetOk("command_parameter"); ok { + createActionOptions.SetCommandParameter(d.Get("command_parameter").(string)) + } + if _, ok := d.GetOk("bastion"); ok { + bastionAttr := d.Get("bastion").([]interface{}) + if len(bastionAttr) > 0 { + bastion := resourceIBMSchematicsActionMapToTargetResourceset(d.Get("bastion.0").(map[string]interface{})) + createActionOptions.SetBastion(&bastion) + } + } + if _, ok := d.GetOk("targets_ini"); ok { + createActionOptions.SetTargetsIni(d.Get("targets_ini").(string)) + } + if _, ok := d.GetOk("credentials"); ok { + var credentials []schematicsv1.VariableData + for _, e := range d.Get("credentials").([]interface{}) { + value := e.(map[string]interface{}) + credentialsItem := resourceIBMSchematicsActionMapToVariableData(value) + credentials = append(credentials, credentialsItem) + } + createActionOptions.SetCredentials(credentials) + } + if _, ok := d.GetOk("action_inputs"); ok { + var inputs []schematicsv1.VariableData + for _, e := range d.Get("action_inputs").([]interface{}) { + value := e.(map[string]interface{}) + inputsItem := resourceIBMSchematicsActionMapToVariableData(value) + inputs = append(inputs, inputsItem) + } + createActionOptions.SetInputs(inputs) + } + if _, ok := d.GetOk("action_outputs"); ok { + var outputs []schematicsv1.VariableData + for _, e := range d.Get("action_outputs").([]interface{}) { + value := e.(map[string]interface{}) + outputsItem := resourceIBMSchematicsActionMapToVariableData(value) + outputs = append(outputs, outputsItem) + } + createActionOptions.SetOutputs(outputs) + } + if _, ok := d.GetOk("settings"); ok { + var settings []schematicsv1.VariableData + for _, e := range d.Get("settings").([]interface{}) { + value := e.(map[string]interface{}) + settingsItem := resourceIBMSchematicsActionMapToVariableData(value) + settings = append(settings, settingsItem) + } + createActionOptions.SetSettings(settings) + } + if _, ok := d.GetOk("trigger_record_id"); ok { + createActionOptions.SetTriggerRecordID(d.Get("trigger_record_id").(string)) + } + if _, ok := d.GetOk("state"); ok { + stateAttr := d.Get("state").([]interface{}) + if len(stateAttr) > 0 { + state := resourceIBMSchematicsActionMapToActionState(d.Get("state.0").(map[string]interface{})) + createActionOptions.SetState(&state) + } + } + if _, ok := d.GetOk("sys_lock"); ok { + sysLockAttr := d.Get("sys_lock").([]interface{}) + if len(sysLockAttr) > 0 { + sysLock := resourceIBMSchematicsActionMapToSystemLock(d.Get("sys_lock.0").(map[string]interface{})) + createActionOptions.SetSysLock(&sysLock) + } + } + if _, ok := d.GetOk("x_github_token"); ok { + createActionOptions.SetXGithubToken(d.Get("x_github_token").(string)) + } + + action, response, err := schematicsClient.CreateActionWithContext(context.TODO(), createActionOptions) + if err != nil { + log.Printf("[DEBUG] CreateActionWithContext failed %s\n%s", err, response) + return err + } + + d.SetId(*action.ID) + + return resourceIBMSchematicsActionRead(d, meta) +} + +func resourceIBMSchematicsActionMapToUserState(userStateMap map[string]interface{}) schematicsv1.UserState { + userState := schematicsv1.UserState{} + + if userStateMap["state"] != nil { + userState.State = core.StringPtr(userStateMap["state"].(string)) + } + if userStateMap["set_by"] != nil { + userState.SetBy = core.StringPtr(userStateMap["set_by"].(string)) + } + if userStateMap["set_at"] != nil { + setAt, err := strfmt.ParseDateTime(userStateMap["set_at"].(string)) + if err != nil { + userState.SetAt = &setAt + } + } + + return userState +} + +func resourceIBMSchematicsActionMapToExternalSource(externalSourceMap map[string]interface{}) schematicsv1.ExternalSource { + externalSource := schematicsv1.ExternalSource{} + + externalSource.SourceType = core.StringPtr(externalSourceMap["source_type"].(string)) + if externalSourceMap["git"] != nil { + externalSourceGit := resourceIBMSchematicsActionMapToExternalSourceGit(externalSourceMap["git"].([]interface{})[0].(map[string]interface{})) + externalSource.Git = &externalSourceGit + } + + return externalSource +} + +func resourceIBMSchematicsActionMapToExternalSourceGit(externalSourceGitMap map[string]interface{}) schematicsv1.ExternalSourceGit { + externalSourceGit := schematicsv1.ExternalSourceGit{} + + if externalSourceGitMap["git_repo_url"] != nil { + externalSourceGit.GitRepoURL = core.StringPtr(externalSourceGitMap["git_repo_url"].(string)) + } + if externalSourceGitMap["git_token"] != nil { + externalSourceGit.GitToken = core.StringPtr(externalSourceGitMap["git_token"].(string)) + } + if externalSourceGitMap["git_repo_folder"] != nil { + externalSourceGit.GitRepoFolder = core.StringPtr(externalSourceGitMap["git_repo_folder"].(string)) + } + if externalSourceGitMap["git_release"] != nil { + externalSourceGit.GitRelease = core.StringPtr(externalSourceGitMap["git_release"].(string)) + } + if externalSourceGitMap["git_branch"] != nil { + externalSourceGit.GitBranch = core.StringPtr(externalSourceGitMap["git_branch"].(string)) + } + + return externalSourceGit +} + +func resourceIBMSchematicsActionMapToTargetResourceset(targetResourcesetMap map[string]interface{}) schematicsv1.TargetResourceset { + targetResourceset := schematicsv1.TargetResourceset{} + + if targetResourcesetMap["name"] != nil { + targetResourceset.Name = core.StringPtr(targetResourcesetMap["name"].(string)) + } + if targetResourcesetMap["type"] != nil { + targetResourceset.Type = core.StringPtr(targetResourcesetMap["type"].(string)) + } + if targetResourcesetMap["description"] != nil { + targetResourceset.Description = core.StringPtr(targetResourcesetMap["description"].(string)) + } + if targetResourcesetMap["resource_query"] != nil { + targetResourceset.ResourceQuery = core.StringPtr(targetResourcesetMap["resource_query"].(string)) + } + if targetResourcesetMap["credential_ref"] != nil { + targetResourceset.CredentialRef = core.StringPtr(targetResourcesetMap["credential_ref"].(string)) + } + if targetResourcesetMap["id"] != nil { + targetResourceset.ID = core.StringPtr(targetResourcesetMap["id"].(string)) + } + if targetResourcesetMap["created_at"] != nil { + createdAt, err := strfmt.ParseDateTime(targetResourcesetMap["created_at"].(string)) + if err != nil { + targetResourceset.CreatedAt = &createdAt + } + } + if targetResourcesetMap["created_by"] != nil { + targetResourceset.CreatedBy = core.StringPtr(targetResourcesetMap["created_by"].(string)) + } + if targetResourcesetMap["updated_at"] != nil { + updatedAt, err := strfmt.ParseDateTime(targetResourcesetMap["updated_at"].(string)) + if err != nil { + targetResourceset.CreatedAt = &updatedAt + } + } + if targetResourcesetMap["updated_by"] != nil { + targetResourceset.UpdatedBy = core.StringPtr(targetResourcesetMap["updated_by"].(string)) + } + if targetResourcesetMap["sys_lock"] != nil && len(targetResourcesetMap["sys_lock"].([]interface{})) != 0 { + sysLock := resourceIBMSchematicsActionMapToSystemLock(targetResourcesetMap["sys_lock"].([]interface{})[0].(map[string]interface{})) + targetResourceset.SysLock = &sysLock + } + if targetResourcesetMap["resource_ids"] != nil { + resourceIds := []string{} + for _, resourceIdsItem := range targetResourcesetMap["resource_ids"].([]interface{}) { + resourceIds = append(resourceIds, resourceIdsItem.(string)) + } + targetResourceset.ResourceIds = resourceIds + } + + return targetResourceset +} + +func resourceIBMSchematicsActionMapToSystemLock(systemLockMap map[string]interface{}) schematicsv1.SystemLock { + systemLock := schematicsv1.SystemLock{} + + if systemLockMap["sys_locked"] != nil { + systemLock.SysLocked = core.BoolPtr(systemLockMap["sys_locked"].(bool)) + } + if systemLockMap["sys_locked_by"] != nil { + systemLock.SysLockedBy = core.StringPtr(systemLockMap["sys_locked_by"].(string)) + } + if systemLockMap["sys_locked_at"] != nil { + sysLockedAt, err := strfmt.ParseDateTime(systemLockMap["sys_locked_at"].(string)) + if err != nil { + systemLock.SysLockedAt = &sysLockedAt + } + } + + return systemLock +} + +func resourceIBMSchematicsActionMapToVariableData(variableDataMap map[string]interface{}) schematicsv1.VariableData { + variableData := schematicsv1.VariableData{} + + if variableDataMap["name"] != nil { + variableData.Name = core.StringPtr(variableDataMap["name"].(string)) + } + if variableDataMap["value"] != nil { + variableData.Value = core.StringPtr(variableDataMap["value"].(string)) + } + if variableDataMap["metadata"] != nil && len(variableDataMap["metadata"].([]interface{})) != 0 { + variableMetaData := resourceIBMSchematicsJobMapToVariableMetadata(variableDataMap["metadata"].([]interface{})[0].(map[string]interface{})) + variableData.Metadata = &variableMetaData + } + if variableDataMap["link"] != nil { + variableData.Link = core.StringPtr(variableDataMap["link"].(string)) + } + + return variableData +} + +func resourceIBMSchematicsActionMapToVariableMetadata(variableMetadataMap map[string]interface{}) schematicsv1.VariableMetadata { + variableMetadata := schematicsv1.VariableMetadata{} + + if variableMetadataMap["type"] != nil { + variableMetadata.Type = core.StringPtr(variableMetadataMap["type"].(string)) + } + if variableMetadataMap["aliases"] != nil { + aliases := []string{} + for _, aliasesItem := range variableMetadataMap["aliases"].([]interface{}) { + aliases = append(aliases, aliasesItem.(string)) + } + variableMetadata.Aliases = aliases + } + if variableMetadataMap["description"] != nil { + variableMetadata.Description = core.StringPtr(variableMetadataMap["description"].(string)) + } + if variableMetadataMap["default_value"] != nil { + variableMetadata.DefaultValue = core.StringPtr(variableMetadataMap["default_value"].(string)) + } + if variableMetadataMap["secure"] != nil { + variableMetadata.Secure = core.BoolPtr(variableMetadataMap["secure"].(bool)) + } + if variableMetadataMap["immutable"] != nil { + variableMetadata.Immutable = core.BoolPtr(variableMetadataMap["immutable"].(bool)) + } + if variableMetadataMap["hidden"] != nil { + variableMetadata.Hidden = core.BoolPtr(variableMetadataMap["hidden"].(bool)) + } + if variableMetadataMap["options"] != nil { + options := []string{} + for _, optionsItem := range variableMetadataMap["options"].([]interface{}) { + options = append(options, optionsItem.(string)) + } + variableMetadata.Options = options + } + if variableMetadataMap["min_value"] != nil { + variableMetadata.MinValue = core.Int64Ptr(int64(variableMetadataMap["min_value"].(int))) + } + if variableMetadataMap["max_value"] != nil { + variableMetadata.MaxValue = core.Int64Ptr(int64(variableMetadataMap["max_value"].(int))) + } + if variableMetadataMap["min_length"] != nil { + variableMetadata.MinLength = core.Int64Ptr(int64(variableMetadataMap["min_length"].(int))) + } + if variableMetadataMap["max_length"] != nil { + variableMetadata.MaxLength = core.Int64Ptr(int64(variableMetadataMap["max_length"].(int))) + } + if variableMetadataMap["matches"] != nil { + variableMetadata.Matches = core.StringPtr(variableMetadataMap["matches"].(string)) + } + if variableMetadataMap["position"] != nil { + variableMetadata.Position = core.Int64Ptr(int64(variableMetadataMap["position"].(int))) + } + if variableMetadataMap["group_by"] != nil { + variableMetadata.GroupBy = core.StringPtr(variableMetadataMap["group_by"].(string)) + } + if variableMetadataMap["source"] != nil { + variableMetadata.Source = core.StringPtr(variableMetadataMap["source"].(string)) + } + + return variableMetadata +} + +func resourceIBMSchematicsActionMapToActionState(actionStateMap map[string]interface{}) schematicsv1.ActionState { + actionState := schematicsv1.ActionState{} + + if actionStateMap["status_code"] != nil { + actionState.StatusCode = core.StringPtr(actionStateMap["status_code"].(string)) + } + if actionStateMap["status_job_id"] != nil { + actionState.StatusJobID = core.StringPtr(actionStateMap["status_job_id"].(string)) + } + if actionStateMap["status_message"] != nil { + actionState.StatusMessage = core.StringPtr(actionStateMap["status_message"].(string)) + } + + return actionState +} + +func resourceIBMSchematicsActionRead(d *schema.ResourceData, meta interface{}) error { + schematicsClient, err := meta.(ClientSession).SchematicsV1() + if err != nil { + return err + } + + getActionOptions := &schematicsv1.GetActionOptions{} + + getActionOptions.SetActionID(d.Id()) + + action, response, err := schematicsClient.GetActionWithContext(context.TODO(), getActionOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + log.Printf("[DEBUG] GetActionWithContext failed %s\n%s", err, response) + return err + } + + if err = d.Set("name", action.Name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + if err = d.Set("description", action.Description); err != nil { + return fmt.Errorf("Error setting description: %s", err) + } + if err = d.Set("location", action.Location); err != nil { + return fmt.Errorf("Error setting location: %s", err) + } + if err = d.Set("resource_group", action.ResourceGroup); err != nil { + return fmt.Errorf("Error setting resource_group: %s", err) + } + if action.Tags != nil { + if err = d.Set("tags", action.Tags); err != nil { + return fmt.Errorf("Error setting tags: %s", err) + } + } + if action.UserState != nil { + userStateMap := resourceIBMSchematicsActionUserStateToMap(*action.UserState) + if err = d.Set("user_state", []map[string]interface{}{userStateMap}); err != nil { + return fmt.Errorf("Error setting user_state: %s", err) + } + } + if err = d.Set("source_readme_url", action.SourceReadmeURL); err != nil { + return fmt.Errorf("Error setting source_readme_url: %s", err) + } + if _, ok := d.GetOk("source"); ok { + if action.Source != nil { + sourceMap := resourceIBMSchematicsActionExternalSourceToMap(*action.Source) + if err = d.Set("source", []map[string]interface{}{sourceMap}); err != nil { + return fmt.Errorf("Error setting source: %s", err) + } + } + } + if err = d.Set("source_type", action.SourceType); err != nil { + return fmt.Errorf("Error setting source_type: %s", err) + } + if err = d.Set("command_parameter", action.CommandParameter); err != nil { + return fmt.Errorf("Error setting command_parameter: %s", err) + } + if _, ok := d.GetOk("bastion"); ok { + if action.Bastion != nil { + bastionMap := resourceIBMSchematicsActionTargetResourcesetToMap(*action.Bastion) + if err = d.Set("bastion", []map[string]interface{}{bastionMap}); err != nil { + return fmt.Errorf("Error setting bastion: %s", err) + } + } + } + if err = d.Set("targets_ini", action.TargetsIni); err != nil { + return fmt.Errorf("Error setting targets_ini: %s", err) + } + if action.Credentials != nil { + credentials := []map[string]interface{}{} + for _, credentialsItem := range action.Credentials { + credentialsItemMap := resourceIBMSchematicsActionVariableDataToMap(credentialsItem) + credentials = append(credentials, credentialsItemMap) + } + if err = d.Set("credentials", credentials); err != nil { + return fmt.Errorf("Error setting credentials: %s", err) + } + } + if action.Inputs != nil { + inputs := []map[string]interface{}{} + for _, inputsItem := range action.Inputs { + inputsItemMap := resourceIBMSchematicsActionVariableDataToMap(inputsItem) + inputs = append(inputs, inputsItemMap) + } + if err = d.Set("action_inputs", inputs); err != nil { + return fmt.Errorf("Error setting action_inputs: %s", err) + } + } + if action.Outputs != nil { + outputs := []map[string]interface{}{} + for _, outputsItem := range action.Outputs { + outputsItemMap := resourceIBMSchematicsActionVariableDataToMap(outputsItem) + outputs = append(outputs, outputsItemMap) + } + if err = d.Set("action_outputs", outputs); err != nil { + return fmt.Errorf("Error setting action_outputs: %s", err) + } + } + if action.Settings != nil { + settings := []map[string]interface{}{} + for _, settingsItem := range action.Settings { + settingsItemMap := resourceIBMSchematicsActionVariableDataToMap(settingsItem) + settings = append(settings, settingsItemMap) + } + if err = d.Set("settings", settings); err != nil { + return fmt.Errorf("Error setting settings: %s", err) + } + } + if err = d.Set("trigger_record_id", action.TriggerRecordID); err != nil { + return fmt.Errorf("Error setting trigger_record_id: %s", err) + } + if action.State != nil { + stateMap := resourceIBMSchematicsActionActionStateToMap(*action.State) + if err = d.Set("state", []map[string]interface{}{stateMap}); err != nil { + return fmt.Errorf("Error setting state: %s", err) + } + } + if action.SysLock != nil { + sysLockMap := resourceIBMSchematicsActionSystemLockToMap(*action.SysLock) + if err = d.Set("sys_lock", []map[string]interface{}{sysLockMap}); err != nil { + return fmt.Errorf("Error setting sys_lock: %s", err) + } + } + if err = d.Set("crn", action.Crn); err != nil { + return fmt.Errorf("Error setting crn: %s", err) + } + if err = d.Set("account", action.Account); err != nil { + return fmt.Errorf("Error setting account: %s", err) + } + if action.SourceCreatedAt != nil { + if err = d.Set("source_created_at", action.SourceCreatedAt.String()); err != nil { + return fmt.Errorf("Error setting source_created_at: %s", err) + } + } + if err = d.Set("source_created_by", action.SourceCreatedBy); err != nil { + return fmt.Errorf("Error setting source_created_by: %s", err) + } + if action.SourceUpdatedAt != nil { + if err = d.Set("source_updated_at", action.SourceUpdatedAt.String()); err != nil { + return fmt.Errorf("Error setting source_updated_at: %s", err) + } + } + if err = d.Set("source_updated_by", action.SourceUpdatedBy); err != nil { + return fmt.Errorf("Error setting source_updated_by: %s", err) + } + if action.CreatedAt != nil { + if err = d.Set("created_at", action.CreatedAt.String()); err != nil { + return fmt.Errorf("Error setting created_at: %s", err) + } + } + if err = d.Set("created_by", action.CreatedBy); err != nil { + return fmt.Errorf("Error setting created_by: %s", err) + } + if action.UpdatedAt != nil { + if err = d.Set("updated_at", action.UpdatedAt.String()); err != nil { + return fmt.Errorf("Error setting updated_at: %s", err) + } + } + if err = d.Set("updated_by", action.UpdatedBy); err != nil { + return fmt.Errorf("Error setting updated_by: %s", err) + } + if err = d.Set("namespace", action.Namespace); err != nil { + return fmt.Errorf("Error setting namespace: %s", err) + } + if action.PlaybookNames != nil && len(action.PlaybookNames) > 0 { + if err = d.Set("playbook_names", action.PlaybookNames); err != nil { + return fmt.Errorf("Error setting playbook_names: %s", err) + } + } else { + d.Set("playbook_names", []string{}) + } + + return nil +} + +func resourceIBMSchematicsActionUserStateToMap(userState schematicsv1.UserState) map[string]interface{} { + userStateMap := map[string]interface{}{} + + userStateMap["state"] = userState.State + userStateMap["set_by"] = userState.SetBy + userStateMap["set_at"] = userState.SetAt.String() + + return userStateMap +} + +func resourceIBMSchematicsActionExternalSourceToMap(externalSource schematicsv1.ExternalSource) map[string]interface{} { + externalSourceMap := map[string]interface{}{} + + externalSourceMap["source_type"] = externalSource.SourceType + if externalSource.Git != nil { + GitMap := resourceIBMSchematicsActionExternalSourceGitToMap(*externalSource.Git) + externalSourceMap["git"] = []map[string]interface{}{GitMap} + } + + return externalSourceMap +} + +func resourceIBMSchematicsActionExternalSourceGitToMap(externalSourceGit schematicsv1.ExternalSourceGit) map[string]interface{} { + externalSourceGitMap := map[string]interface{}{} + + externalSourceGitMap["git_repo_url"] = externalSourceGit.GitRepoURL + externalSourceGitMap["git_token"] = externalSourceGit.GitToken + externalSourceGitMap["git_repo_folder"] = externalSourceGit.GitRepoFolder + externalSourceGitMap["git_release"] = externalSourceGit.GitRelease + externalSourceGitMap["git_branch"] = externalSourceGit.GitBranch + + return externalSourceGitMap +} + +func resourceIBMSchematicsActionTargetResourcesetToMap(targetResourceset schematicsv1.TargetResourceset) map[string]interface{} { + targetResourcesetMap := map[string]interface{}{} + + targetResourcesetMap["name"] = targetResourceset.Name + targetResourcesetMap["type"] = targetResourceset.Type + targetResourcesetMap["description"] = targetResourceset.Description + targetResourcesetMap["resource_query"] = targetResourceset.ResourceQuery + targetResourcesetMap["credential_ref"] = targetResourceset.CredentialRef + targetResourcesetMap["id"] = targetResourceset.ID + targetResourcesetMap["created_at"] = targetResourceset.CreatedAt.String() + targetResourcesetMap["created_by"] = targetResourceset.CreatedBy + targetResourcesetMap["updated_at"] = targetResourceset.UpdatedAt.String() + targetResourcesetMap["updated_by"] = targetResourceset.UpdatedBy + if targetResourceset.SysLock != nil { + SysLockMap := resourceIBMSchematicsActionSystemLockToMap(*targetResourceset.SysLock) + targetResourcesetMap["sys_lock"] = []map[string]interface{}{SysLockMap} + } + if targetResourceset.ResourceIds != nil { + targetResourcesetMap["resource_ids"] = targetResourceset.ResourceIds + } + + return targetResourcesetMap +} + +func resourceIBMSchematicsActionSystemLockToMap(systemLock schematicsv1.SystemLock) map[string]interface{} { + systemLockMap := map[string]interface{}{} + + systemLockMap["sys_locked"] = systemLock.SysLocked + systemLockMap["sys_locked_by"] = systemLock.SysLockedBy + systemLockMap["sys_locked_at"] = systemLock.SysLockedAt.String() + + return systemLockMap +} + +func resourceIBMSchematicsActionVariableDataToMap(variableData schematicsv1.VariableData) map[string]interface{} { + variableDataMap := map[string]interface{}{} + + variableDataMap["name"] = variableData.Name + variableDataMap["value"] = variableData.Value + if variableData.Metadata != nil { + MetadataMap := resourceIBMSchematicsActionVariableMetadataToMap(*variableData.Metadata) + variableDataMap["metadata"] = []map[string]interface{}{MetadataMap} + } + variableDataMap["link"] = variableData.Link + + return variableDataMap +} + +func resourceIBMSchematicsActionVariableMetadataToMap(variableMetadata schematicsv1.VariableMetadata) map[string]interface{} { + variableMetadataMap := map[string]interface{}{} + + variableMetadataMap["type"] = variableMetadata.Type + if variableMetadata.Aliases != nil { + variableMetadataMap["aliases"] = variableMetadata.Aliases + } + variableMetadataMap["description"] = variableMetadata.Description + variableMetadataMap["default_value"] = variableMetadata.DefaultValue + variableMetadataMap["secure"] = variableMetadata.Secure + variableMetadataMap["immutable"] = variableMetadata.Immutable + variableMetadataMap["hidden"] = variableMetadata.Hidden + if variableMetadata.Options != nil { + variableMetadataMap["options"] = variableMetadata.Options + } + variableMetadataMap["min_value"] = intValue(variableMetadata.MinValue) + variableMetadataMap["max_value"] = intValue(variableMetadata.MaxValue) + variableMetadataMap["min_length"] = intValue(variableMetadata.MinLength) + variableMetadataMap["max_length"] = intValue(variableMetadata.MaxLength) + variableMetadataMap["matches"] = variableMetadata.Matches + variableMetadataMap["position"] = intValue(variableMetadata.Position) + variableMetadataMap["group_by"] = variableMetadata.GroupBy + variableMetadataMap["source"] = variableMetadata.Source + + return variableMetadataMap +} + +func resourceIBMSchematicsActionActionStateToMap(actionState schematicsv1.ActionState) map[string]interface{} { + actionStateMap := map[string]interface{}{} + + actionStateMap["status_code"] = actionState.StatusCode + actionStateMap["status_job_id"] = actionState.StatusJobID + actionStateMap["status_message"] = actionState.StatusMessage + + return actionStateMap +} + +func resourceIBMSchematicsActionUpdate(d *schema.ResourceData, meta interface{}) error { + schematicsClient, err := meta.(ClientSession).SchematicsV1() + if err != nil { + return err + } + + updateActionOptions := &schematicsv1.UpdateActionOptions{} + + updateActionOptions.SetActionID(d.Id()) + + hasChange := false + + if d.HasChange("name") { + updateActionOptions.SetName(d.Get("name").(string)) + hasChange = true + } + if d.HasChange("description") { + updateActionOptions.SetDescription(d.Get("description").(string)) + hasChange = true + } + if d.HasChange("location") { + updateActionOptions.SetLocation(d.Get("location").(string)) + hasChange = true + } + if d.HasChange("resource_group") { + updateActionOptions.SetResourceGroup(d.Get("resource_group").(string)) + hasChange = true + } + if d.HasChange("tags") { + updateActionOptions.SetTags(expandStringList(d.Get("tags").([]interface{}))) + hasChange = true + } + if d.HasChange("user_state") { + userStateAttr := d.Get("user_state").([]interface{}) + if len(userStateAttr) > 0 { + userState := resourceIBMSchematicsActionMapToUserState(d.Get("user_state.0").(map[string]interface{})) + updateActionOptions.SetUserState(&userState) + hasChange = true + } + } + if d.HasChange("source_readme_url") { + updateActionOptions.SetSourceReadmeURL(d.Get("source_readme_url").(string)) + hasChange = true + } + if d.HasChange("source") { + sourceAttr := d.Get("source").([]interface{}) + if len(sourceAttr) > 0 { + source := resourceIBMSchematicsActionMapToExternalSource(d.Get("source.0").(map[string]interface{})) + updateActionOptions.SetSource(&source) + hasChange = true + } + } + if d.HasChange("source_type") { + updateActionOptions.SetSourceType(d.Get("source_type").(string)) + hasChange = true + } + if d.HasChange("command_parameter") { + updateActionOptions.SetCommandParameter(d.Get("command_parameter").(string)) + hasChange = true + } + if d.HasChange("bastion") { + bastionAttr := d.Get("bastion").([]interface{}) + if len(bastionAttr) > 0 { + bastion := resourceIBMSchematicsActionMapToTargetResourceset(d.Get("bastion.0").(map[string]interface{})) + updateActionOptions.SetBastion(&bastion) + hasChange = true + } + } + if d.HasChange("targets_ini") { + updateActionOptions.SetTargetsIni(d.Get("targets_ini").(string)) + hasChange = true + } + if d.HasChange("credentials") { + var credentials []schematicsv1.VariableData + for _, e := range d.Get("credentials").([]interface{}) { + value := e.(map[string]interface{}) + credentialsItem := resourceIBMSchematicsActionMapToVariableData(value) + credentials = append(credentials, credentialsItem) + } + updateActionOptions.SetCredentials(credentials) + hasChange = true + } + if d.HasChange("action_inputs") { + var inputs []schematicsv1.VariableData + for _, e := range d.Get("action_inputs").([]interface{}) { + value := e.(map[string]interface{}) + inputsItem := resourceIBMSchematicsActionMapToVariableData(value) + inputs = append(inputs, inputsItem) + } + updateActionOptions.SetInputs(inputs) + hasChange = true + } + if d.HasChange("action_outputs") { + var outputs []schematicsv1.VariableData + for _, e := range d.Get("action_outputs").([]interface{}) { + value := e.(map[string]interface{}) + outputsItem := resourceIBMSchematicsActionMapToVariableData(value) + outputs = append(outputs, outputsItem) + } + updateActionOptions.SetOutputs(outputs) + hasChange = true + } + if d.HasChange("settings") { + var settings []schematicsv1.VariableData + for _, e := range d.Get("settings").([]interface{}) { + value := e.(map[string]interface{}) + settingsItem := resourceIBMSchematicsActionMapToVariableData(value) + settings = append(settings, settingsItem) + } + updateActionOptions.SetSettings(settings) + hasChange = true + } + if d.HasChange("trigger_record_id") { + updateActionOptions.SetTriggerRecordID(d.Get("trigger_record_id").(string)) + hasChange = true + } + if d.HasChange("state") { + stateAttr := d.Get("state").([]interface{}) + if len(stateAttr) > 0 { + state := resourceIBMSchematicsActionMapToActionState(d.Get("state.0").(map[string]interface{})) + updateActionOptions.SetState(&state) + hasChange = true + } + } + if d.HasChange("sys_lock") { + sysLockAttr := d.Get("sys_lock").([]interface{}) + if len(sysLockAttr) > 0 { + sysLock := resourceIBMSchematicsActionMapToSystemLock(d.Get("sys_lock.0").(map[string]interface{})) + updateActionOptions.SetSysLock(&sysLock) + hasChange = true + } + } + + if hasChange { + _, response, err := schematicsClient.UpdateActionWithContext(context.TODO(), updateActionOptions) + if err != nil { + log.Printf("[DEBUG] UpdateActionWithContext failed %s\n%s", err, response) + return err + } + } + + return resourceIBMSchematicsActionRead(d, meta) +} + +func resourceIBMSchematicsActionDelete(d *schema.ResourceData, meta interface{}) error { + schematicsClient, err := meta.(ClientSession).SchematicsV1() + if err != nil { + return err + } + + deleteActionOptions := &schematicsv1.DeleteActionOptions{} + + deleteActionOptions.SetActionID(d.Id()) + + response, err := schematicsClient.DeleteActionWithContext(context.TODO(), deleteActionOptions) + if err != nil { + log.Printf("[DEBUG] DeleteActionWithContext failed %s\n%s", err, response) + return err + } + + d.SetId("") + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_schematics_job.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_schematics_job.go new file mode 100644 index 00000000000..bcf661f5d7e --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_schematics_job.go @@ -0,0 +1,2050 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "context" + "fmt" + "log" + + "github.com/go-openapi/strfmt" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/IBM/go-sdk-core/v5/core" + "github.com/IBM/schematics-go-sdk/schematicsv1" +) + +func resourceIBMSchematicsJob() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMSchematicsJobCreate, + Read: resourceIBMSchematicsJobRead, + Update: resourceIBMSchematicsJobUpdate, + Delete: resourceIBMSchematicsJobDelete, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "command_object": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ValidateFunc: InvokeValidator("ibm_schematics_job", "command_object"), + Description: "Name of the Schematics automation resource.", + }, + "command_object_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "Job command object ID (`workspace-id, action-id or control-id`).", + }, + "command_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ValidateFunc: InvokeValidator("ibm_schematics_job", "command_name"), + Description: "Schematics job command name.", + }, + "command_parameter": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "Schematics job command parameter (`playbook-name, capsule-name or flow-name`).", + }, + "command_options": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Description: "Command line options for the command.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "job_inputs": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Description: "Job inputs used by an action.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "Name of the variable.", + }, + "value": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "Value for the variable or reference to the value.", + }, + "metadata": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Description: "User editable metadata for the variables.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "Type of the variable.", + }, + "aliases": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Description: "List of aliases for the variable name.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Description of the meta data.", + }, + "default_value": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Default value for the variable, if the override value is not specified.", + }, + "secure": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Description: "Is the variable secure or sensitive ?.", + }, + "immutable": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Description: "Is the variable readonly ?.", + }, + "hidden": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Description: "If true, the variable will not be displayed on UI or CLI.", + }, + "options": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Description: "List of possible values for this variable. If type is integer or date, then the array of string will be converted to array of integers or date during runtime.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "min_value": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Description: "Minimum value of the variable. Applicable for integer type.", + }, + "max_value": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Description: "Maximum value of the variable. Applicable for integer type.", + }, + "min_length": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Description: "Minimum length of the variable value. Applicable for string type.", + }, + "max_length": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Description: "Maximum length of the variable value. Applicable for string type.", + }, + "matches": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Regex for the variable value.", + }, + "position": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Description: "Relative position of this variable in a list.", + }, + "group_by": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Display name of the group this variable belongs to.", + }, + "source": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Source of this meta-data.", + }, + }, + }, + }, + "link": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Reference link to the variable value By default the expression will point to self.value.", + }, + }, + }, + }, + "job_env_settings": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Description: "Environment variables used by the job while performing an action.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "Name of the variable.", + }, + "value": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "Value for the variable or reference to the value.", + }, + "metadata": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Description: "User editable metadata for the variables.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "Type of the variable.", + }, + "aliases": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Description: "List of aliases for the variable name.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Description of the meta data.", + }, + "default_value": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Default value for the variable, if the override value is not specified.", + }, + "secure": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Description: "Is the variable secure or sensitive ?.", + }, + "immutable": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Description: "Is the variable readonly ?.", + }, + "hidden": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Description: "If true, the variable will not be displayed on UI or CLI.", + }, + "options": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Description: "List of possible values for this variable. If type is integer or date, then the array of string will be converted to array of integers or date during runtime.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "min_value": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Description: "Minimum value of the variable. Applicable for integer type.", + }, + "max_value": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Description: "Maximum value of the variable. Applicable for integer type.", + }, + "min_length": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Description: "Minimum length of the variable value. Applicable for string type.", + }, + "max_length": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Description: "Maximum length of the variable value. Applicable for string type.", + }, + "matches": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Regex for the variable value.", + }, + "position": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Description: "Relative position of this variable in a list.", + }, + "group_by": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Display name of the group this variable belongs to.", + }, + "source": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Source of this meta-data.", + }, + }, + }, + }, + "link": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Reference link to the variable value By default the expression will point to self.value.", + }, + }, + }, + }, + "tags": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Description: "User defined tags, while running the job.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "location": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ValidateFunc: InvokeValidator("ibm_schematics_job", "location"), + Description: "List of action locations supported by IBM Cloud Schematics service. **Note** this does not limit the location of the resources provisioned using Schematics.", + }, + "status": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Job Status.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "action_job_status": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Description: "Action Job Status.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "action_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Action name.", + }, + "status_code": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Status of the jobs.", + }, + "status_message": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Action job status message to be displayed along with the `action_status_code`.", + }, + "bastion_status_code": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Status of the resources.", + }, + "bastion_status_message": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Bastion status message to be displayed along with the `bastion_status_code`.", + }, + "targets_status_code": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Status of the resources.", + }, + "targets_status_message": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Aggregated status message for all target resources, to be displayed along with the `targets_status_code`.", + }, + "updated_at": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Job status updation timestamp.", + }, + }, + }, + }, + }, + }, + }, + "data": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Description: "Job data.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "job_type": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "Type of the job.", + }, + "action_job_data": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Description: "Action Job data.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "action_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Flow name.", + }, + "inputs": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Description: "Input variables data used by an action job.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Name of the variable.", + }, + "value": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Value for the variable or reference to the value.", + }, + "metadata": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Description: "User editable metadata for the variables.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Type of the variable.", + }, + "aliases": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Description: "List of aliases for the variable name.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Description of the meta data.", + }, + "default_value": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Default value for the variable, if the override value is not specified.", + }, + "secure": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Description: "Is the variable secure or sensitive ?.", + }, + "immutable": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Description: "Is the variable readonly ?.", + }, + "hidden": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Description: "If true, the variable will not be displayed on UI or CLI.", + }, + "options": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Description: "List of possible values for this variable. If type is integer or date, then the array of string will be converted to array of integers or date during runtime.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "min_value": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Description: "Minimum value of the variable. Applicable for integer type.", + }, + "max_value": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Description: "Maximum value of the variable. Applicable for integer type.", + }, + "min_length": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Description: "Minimum length of the variable value. Applicable for string type.", + }, + "max_length": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Description: "Maximum length of the variable value. Applicable for string type.", + }, + "matches": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Regex for the variable value.", + }, + "position": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Description: "Relative position of this variable in a list.", + }, + "group_by": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Display name of the group this variable belongs to.", + }, + "source": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Source of this meta-data.", + }, + }, + }, + }, + "link": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Reference link to the variable value By default the expression will point to self.value.", + }, + }, + }, + }, + "outputs": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Description: "Output variables data from an action job.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Name of the variable.", + }, + "value": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Value for the variable or reference to the value.", + }, + "metadata": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Description: "User editable metadata for the variables.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Type of the variable.", + }, + "aliases": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Description: "List of aliases for the variable name.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Description of the meta data.", + }, + "default_value": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Default value for the variable, if the override value is not specified.", + }, + "secure": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Description: "Is the variable secure or sensitive ?.", + }, + "immutable": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Description: "Is the variable readonly ?.", + }, + "hidden": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Description: "If true, the variable will not be displayed on UI or CLI.", + }, + "options": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Description: "List of possible values for this variable. If type is integer or date, then the array of string will be converted to array of integers or date during runtime.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "min_value": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Description: "Minimum value of the variable. Applicable for integer type.", + }, + "max_value": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Description: "Maximum value of the variable. Applicable for integer type.", + }, + "min_length": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Description: "Minimum length of the variable value. Applicable for string type.", + }, + "max_length": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Description: "Maximum length of the variable value. Applicable for string type.", + }, + "matches": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Regex for the variable value.", + }, + "position": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Description: "Relative position of this variable in a list.", + }, + "group_by": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Display name of the group this variable belongs to.", + }, + "source": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Source of this meta-data.", + }, + }, + }, + }, + "link": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Reference link to the variable value By default the expression will point to self.value.", + }, + }, + }, + }, + "settings": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Description: "Environment variables used by all the templates in an action.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Name of the variable.", + }, + "value": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Value for the variable or reference to the value.", + }, + "metadata": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Description: "User editable metadata for the variables.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Type of the variable.", + }, + "aliases": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Description: "List of aliases for the variable name.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Description of the meta data.", + }, + "default_value": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Default value for the variable, if the override value is not specified.", + }, + "secure": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Description: "Is the variable secure or sensitive ?.", + }, + "immutable": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Description: "Is the variable readonly ?.", + }, + "hidden": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Description: "If true, the variable will not be displayed on UI or CLI.", + }, + "options": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Description: "List of possible values for this variable. If type is integer or date, then the array of string will be converted to array of integers or date during runtime.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "min_value": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Description: "Minimum value of the variable. Applicable for integer type.", + }, + "max_value": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Description: "Maximum value of the variable. Applicable for integer type.", + }, + "min_length": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Description: "Minimum length of the variable value. Applicable for string type.", + }, + "max_length": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Description: "Maximum length of the variable value. Applicable for string type.", + }, + "matches": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Regex for the variable value.", + }, + "position": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Description: "Relative position of this variable in a list.", + }, + "group_by": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Display name of the group this variable belongs to.", + }, + "source": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Source of this meta-data.", + }, + }, + }, + }, + "link": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Reference link to the variable value By default the expression will point to self.value.", + }, + }, + }, + }, + "updated_at": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Job status updation timestamp.", + }, + }, + }, + }, + }, + }, + }, + "bastion": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Description: "Complete target details with the user inputs and the system generated data.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Target name.", + }, + "type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Target type (`cluster`, `vsi`, `icd`, `vpc`).", + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Target description.", + }, + "resource_query": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Resource selection query string.", + }, + "credential_ref": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Override credential for each resource. Reference to credentials values, used by all the resources.", + }, + "id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Target ID.", + }, + "created_at": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Targets creation time.", + }, + "created_by": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "E-mail address of the user who created the targets.", + }, + "updated_at": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Targets updation time.", + }, + "updated_by": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "E-mail address of user who updated the targets.", + }, + "sys_lock": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Computed: true, + Description: "System lock status.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "sys_locked": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Description: "Is the Workspace locked by the Schematic action ?.", + }, + "sys_locked_by": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Name of the user who performed the action, that lead to lock the Workspace.", + }, + "sys_locked_at": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "When the user performed the action that lead to lock the Workspace ?.", + }, + }, + }, + }, + "resource_ids": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Computed: true, + Description: "Array of the resource IDs.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + "job_log_summary": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Computed: true, + Description: "Job log summary record.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "job_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Workspace ID.", + }, + "job_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Type of Job.", + }, + "log_start_at": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Job log start timestamp.", + }, + "log_analyzed_till": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Job log update timestamp.", + }, + "elapsed_time": &schema.Schema{ + Type: schema.TypeFloat, + Optional: true, + Computed: true, + Description: "Job log elapsed time (`log_analyzed_till - log_start_at`).", + }, + "log_errors": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Computed: true, + Description: "Job log errors.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "error_code": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Error code in the Log.", + }, + "error_msg": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Summary error message in the log.", + }, + "error_count": &schema.Schema{ + Type: schema.TypeFloat, + Optional: true, + Description: "Number of occurrence.", + }, + }, + }, + }, + "repo_download_job": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Description: "Repo download Job log summary.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "scanned_file_count": &schema.Schema{ + Type: schema.TypeFloat, + Optional: true, + Computed: true, + Description: "Number of files scanned.", + }, + "quarantined_file_count": &schema.Schema{ + Type: schema.TypeFloat, + Optional: true, + Computed: true, + Description: "Number of files quarantined.", + }, + "detected_filetype": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Detected template or data file type.", + }, + "inputs_count": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Number of inputs detected.", + }, + "outputs_count": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Number of outputs detected.", + }, + }, + }, + }, + "action_job": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Description: "Flow Job log summary.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "target_count": &schema.Schema{ + Type: schema.TypeFloat, + Optional: true, + Computed: true, + Description: "number of targets or hosts.", + }, + "task_count": &schema.Schema{ + Type: schema.TypeFloat, + Optional: true, + Computed: true, + Description: "number of tasks in playbook.", + }, + "play_count": &schema.Schema{ + Type: schema.TypeFloat, + Optional: true, + Computed: true, + Description: "number of plays in playbook.", + }, + "recap": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Description: "Recap records.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "target": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Description: "List of target or host name.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "ok": &schema.Schema{ + Type: schema.TypeFloat, + Optional: true, + Description: "Number of OK.", + }, + "changed": &schema.Schema{ + Type: schema.TypeFloat, + Optional: true, + Description: "Number of changed.", + }, + "failed": &schema.Schema{ + Type: schema.TypeFloat, + Optional: true, + Description: "Number of failed.", + }, + "skipped": &schema.Schema{ + Type: schema.TypeFloat, + Optional: true, + Description: "Number of skipped.", + }, + "unreachable": &schema.Schema{ + Type: schema.TypeFloat, + Optional: true, + Description: "Number of unreachable.", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Job name, uniquely derived from the related action.", + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Job description derived from the related action.", + }, + "resource_group": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Resource group name derived from the related action.", + }, + "submitted_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Job submission time.", + }, + "submitted_by": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "E-mail address of the user who submitted the job.", + }, + "start_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Job start time.", + }, + "end_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Job end time.", + }, + "duration": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Duration of job execution, for example, `40 sec`.", + }, + "targets_ini": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Inventory of host and host group for the playbook in `INI` file format. For example, `\"targets_ini\": \"[webserverhost] 172.22.192.6 [dbhost] 172.22.192.5\"`. For more information, about an inventory host group syntax, see [Inventory host groups](/docs/schematics?topic=schematics-schematics-cli-reference#schematics-inventory-host-grps).", + }, + "log_store_url": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Job log store URL.", + }, + "state_store_url": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Job state store URL.", + }, + "results_url": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Job results store URL.", + }, + "updated_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Job status updation timestamp.", + }, + }, + } +} + +func resourceIBMSchematicsJobValidator() *ResourceValidator { + validateSchema := make([]ValidateSchema, 1) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: "command_object", + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Optional: true, + AllowedValues: "action, workspace", + }, + ValidateSchema{ + Identifier: "command_name", + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Optional: true, + AllowedValues: "ansible_playbook_check, ansible_playbook_run, helm_install, helm_list, helm_show, opa_evaluate, terraform_init, terrform_apply, terrform_destroy, terrform_plan, terrform_refresh, terrform_show, terrform_taint, workspace_apply_flow, workspace_custom_flow, workspace_destroy_flow, workspace_init_flow, workspace_plan_flow, workspace_refresh_flow, workspace_show_flow", + }, + ValidateSchema{ + Identifier: "location", + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Optional: true, + AllowedValues: "eu-de, eu-gb, us-east, us-south", + }) + + resourceValidator := ResourceValidator{ResourceName: "ibm_schematics_job", Schema: validateSchema} + return &resourceValidator +} + +func resourceIBMSchematicsJobCreate(d *schema.ResourceData, meta interface{}) error { + schematicsClient, err := meta.(ClientSession).SchematicsV1() + if err != nil { + return err + } + + session, err := meta.(ClientSession).BluemixSession() + if err != nil { + return err + } + + iamRefreshToken := session.Config.IAMRefreshToken + + createJobOptions := &schematicsv1.CreateJobOptions{} + createJobOptions.SetRefreshToken(iamRefreshToken) + + if _, ok := d.GetOk("command_object"); ok { + createJobOptions.SetCommandObject(d.Get("command_object").(string)) + } + if _, ok := d.GetOk("command_object_id"); ok { + createJobOptions.SetCommandObjectID(d.Get("command_object_id").(string)) + } + if _, ok := d.GetOk("command_name"); ok { + createJobOptions.SetCommandName(d.Get("command_name").(string)) + } + if _, ok := d.GetOk("command_parameter"); ok { + createJobOptions.SetCommandParameter(d.Get("command_parameter").(string)) + } + if _, ok := d.GetOk("command_options"); ok { + createJobOptions.SetCommandOptions(d.Get("command_options").([]string)) + } + if _, ok := d.GetOk("job_inputs"); ok { + var inputs []schematicsv1.VariableData + for _, e := range d.Get("job_inputs").([]interface{}) { + value := e.(map[string]interface{}) + inputsItem := resourceIBMSchematicsJobMapToVariableData(value) + inputs = append(inputs, inputsItem) + } + createJobOptions.SetInputs(inputs) + } + if _, ok := d.GetOk("job_env_settings"); ok { + var settings []schematicsv1.VariableData + for _, e := range d.Get("job_env_settings").([]interface{}) { + value := e.(map[string]interface{}) + settingsItem := resourceIBMSchematicsJobMapToVariableData(value) + settings = append(settings, settingsItem) + } + createJobOptions.SetSettings(settings) + } + if _, ok := d.GetOk("tags"); ok { + createJobOptions.SetTags(expandStringList(d.Get("tags").([]interface{}))) + } + if _, ok := d.GetOk("location"); ok { + createJobOptions.SetLocation(d.Get("location").(string)) + } + if _, ok := d.GetOk("status"); ok { + statusAttr := d.Get("status").([]interface{}) + if len(statusAttr) > 0 { + status := resourceIBMSchematicsJobMapToJobStatus(d.Get("status.0").(map[string]interface{})) + createJobOptions.SetStatus(&status) + } + } + if _, ok := d.GetOk("data"); ok { + dataAttr := d.Get("data").([]interface{}) + if len(dataAttr) > 0 { + data := resourceIBMSchematicsJobMapToJobData(d.Get("data.0").(map[string]interface{})) + createJobOptions.SetData(&data) + } + } + if _, ok := d.GetOk("bastion"); ok { + bastionAttr := d.Get("bastion").([]interface{}) + if len(bastionAttr) > 0 { + bastion := resourceIBMSchematicsJobMapToTargetResourceset(d.Get("bastion.0").(map[string]interface{})) + createJobOptions.SetBastion(&bastion) + } + } + if _, ok := d.GetOk("job_log_summary"); ok { + jobLogSummaryAttr := d.Get("job_log_summary").([]interface{}) + if len(jobLogSummaryAttr) > 0 { + logSummary := resourceIBMSchematicsJobMapToJobLogSummary(d.Get("job_log_summary.0").(map[string]interface{})) + createJobOptions.SetLogSummary(&logSummary) + } + } + + job, response, err := schematicsClient.CreateJobWithContext(context.TODO(), createJobOptions) + if err != nil { + log.Printf("[DEBUG] CreateJobWithContext failed %s\n%s", err, response) + return err + } + + d.SetId(*job.ID) + + return resourceIBMSchematicsJobRead(d, meta) +} + +func resourceIBMSchematicsJobMapToVariableData(variableDataMap map[string]interface{}) schematicsv1.VariableData { + variableData := schematicsv1.VariableData{} + + if variableDataMap["name"] != nil { + variableData.Name = core.StringPtr(variableDataMap["name"].(string)) + } + if variableDataMap["value"] != nil { + variableData.Value = core.StringPtr(variableDataMap["value"].(string)) + } + if variableDataMap["metadata"] != nil && len(variableDataMap["metadata"].([]interface{})) != 0 { + variableMetaData := resourceIBMSchematicsJobMapToVariableMetadata(variableDataMap["metadata"].([]interface{})[0].(map[string]interface{})) + variableData.Metadata = &variableMetaData + } + if variableDataMap["link"] != nil { + variableData.Link = core.StringPtr(variableDataMap["link"].(string)) + } + + return variableData +} + +func resourceIBMSchematicsJobMapToVariableMetadata(variableMetadataMap map[string]interface{}) schematicsv1.VariableMetadata { + variableMetadata := schematicsv1.VariableMetadata{} + + if variableMetadataMap["type"] != nil { + variableMetadata.Type = core.StringPtr(variableMetadataMap["type"].(string)) + } + if variableMetadataMap["aliases"] != nil { + aliases := []string{} + for _, aliasesItem := range variableMetadataMap["aliases"].([]interface{}) { + aliases = append(aliases, aliasesItem.(string)) + } + variableMetadata.Aliases = aliases + } + if variableMetadataMap["description"] != nil { + variableMetadata.Description = core.StringPtr(variableMetadataMap["description"].(string)) + } + if variableMetadataMap["default_value"] != nil { + variableMetadata.DefaultValue = core.StringPtr(variableMetadataMap["default_value"].(string)) + } + if variableMetadataMap["secure"] != nil { + variableMetadata.Secure = core.BoolPtr(variableMetadataMap["secure"].(bool)) + } + if variableMetadataMap["immutable"] != nil { + variableMetadata.Immutable = core.BoolPtr(variableMetadataMap["immutable"].(bool)) + } + if variableMetadataMap["hidden"] != nil { + variableMetadata.Hidden = core.BoolPtr(variableMetadataMap["hidden"].(bool)) + } + if variableMetadataMap["options"] != nil { + options := []string{} + for _, optionsItem := range variableMetadataMap["options"].([]interface{}) { + options = append(options, optionsItem.(string)) + } + variableMetadata.Options = options + } + if variableMetadataMap["min_value"] != nil { + variableMetadata.MinValue = core.Int64Ptr(int64(variableMetadataMap["min_value"].(int))) + } + if variableMetadataMap["max_value"] != nil { + variableMetadata.MaxValue = core.Int64Ptr(int64(variableMetadataMap["max_value"].(int))) + } + if variableMetadataMap["min_length"] != nil { + variableMetadata.MinLength = core.Int64Ptr(int64(variableMetadataMap["min_length"].(int))) + } + if variableMetadataMap["max_length"] != nil { + variableMetadata.MaxLength = core.Int64Ptr(int64(variableMetadataMap["max_length"].(int))) + } + if variableMetadataMap["matches"] != nil { + variableMetadata.Matches = core.StringPtr(variableMetadataMap["matches"].(string)) + } + if variableMetadataMap["position"] != nil { + variableMetadata.Position = core.Int64Ptr(int64(variableMetadataMap["position"].(int))) + } + if variableMetadataMap["group_by"] != nil { + variableMetadata.GroupBy = core.StringPtr(variableMetadataMap["group_by"].(string)) + } + if variableMetadataMap["source"] != nil { + variableMetadata.Source = core.StringPtr(variableMetadataMap["source"].(string)) + } + + return variableMetadata +} + +func resourceIBMSchematicsJobMapToJobStatus(jobStatusMap map[string]interface{}) schematicsv1.JobStatus { + jobStatus := schematicsv1.JobStatus{} + + if jobStatusMap["action_job_status"] != nil { + actionJobStatus := resourceIBMSchematicsJobMapToJobStatusAction(jobStatusMap["action_job_status"].([]interface{})[0].(map[string]interface{})) + jobStatus.ActionJobStatus = &actionJobStatus + } + + return jobStatus +} + +func resourceIBMSchematicsJobMapToJobStatusAction(jobStatusActionMap map[string]interface{}) schematicsv1.JobStatusAction { + jobStatusAction := schematicsv1.JobStatusAction{} + + if jobStatusActionMap["action_name"] != nil { + jobStatusAction.ActionName = core.StringPtr(jobStatusActionMap["action_name"].(string)) + } + if jobStatusActionMap["status_code"] != nil { + jobStatusAction.StatusCode = core.StringPtr(jobStatusActionMap["status_code"].(string)) + } + if jobStatusActionMap["status_message"] != nil { + jobStatusAction.StatusMessage = core.StringPtr(jobStatusActionMap["status_message"].(string)) + } + if jobStatusActionMap["bastion_status_code"] != nil { + jobStatusAction.BastionStatusCode = core.StringPtr(jobStatusActionMap["bastion_status_code"].(string)) + } + if jobStatusActionMap["bastion_status_message"] != nil { + jobStatusAction.BastionStatusMessage = core.StringPtr(jobStatusActionMap["bastion_status_message"].(string)) + } + if jobStatusActionMap["targets_status_code"] != nil { + jobStatusAction.TargetsStatusCode = core.StringPtr(jobStatusActionMap["targets_status_code"].(string)) + } + if jobStatusActionMap["targets_status_message"] != nil { + jobStatusAction.TargetsStatusMessage = core.StringPtr(jobStatusActionMap["targets_status_message"].(string)) + } + if jobStatusActionMap["updated_at"] != nil { + updatedAt, err := strfmt.ParseDateTime(jobStatusActionMap["updated_at"].(string)) + if err != nil { + jobStatusAction.UpdatedAt = &updatedAt + } + } + + return jobStatusAction +} + +func resourceIBMSchematicsJobMapToJobData(jobDataMap map[string]interface{}) schematicsv1.JobData { + jobData := schematicsv1.JobData{} + + jobData.JobType = core.StringPtr(jobDataMap["job_type"].(string)) + if jobDataMap["action_job_data"] != nil { + actionJobData := resourceIBMSchematicsJobMapToJobDataAction(jobDataMap["action_job_data"].([]interface{})[0].(map[string]interface{})) + jobData.ActionJobData = &actionJobData + } + + return jobData +} + +func resourceIBMSchematicsJobMapToJobDataAction(jobDataActionMap map[string]interface{}) schematicsv1.JobDataAction { + jobDataAction := schematicsv1.JobDataAction{} + + if jobDataActionMap["action_name"] != nil { + jobDataAction.ActionName = core.StringPtr(jobDataActionMap["action_name"].(string)) + } + if jobDataActionMap["inputs"] != nil { + inputs := []schematicsv1.VariableData{} + for _, inputsItem := range jobDataActionMap["inputs"].([]interface{}) { + inputsItemModel := resourceIBMSchematicsJobMapToVariableData(inputsItem.(map[string]interface{})) + inputs = append(inputs, inputsItemModel) + } + jobDataAction.Inputs = inputs + } + if jobDataActionMap["outputs"] != nil { + outputs := []schematicsv1.VariableData{} + for _, outputsItem := range jobDataActionMap["outputs"].([]interface{}) { + outputsItemModel := resourceIBMSchematicsJobMapToVariableData(outputsItem.(map[string]interface{})) + outputs = append(outputs, outputsItemModel) + } + jobDataAction.Outputs = outputs + } + if jobDataActionMap["settings"] != nil { + settings := []schematicsv1.VariableData{} + for _, settingsItem := range jobDataActionMap["settings"].([]interface{}) { + settingsItemModel := resourceIBMSchematicsJobMapToVariableData(settingsItem.(map[string]interface{})) + settings = append(settings, settingsItemModel) + } + jobDataAction.Settings = settings + } + if jobDataActionMap["updated_at"] != nil { + + } + + return jobDataAction +} + +func resourceIBMSchematicsJobMapToTargetResourceset(targetResourcesetMap map[string]interface{}) schematicsv1.TargetResourceset { + targetResourceset := schematicsv1.TargetResourceset{} + + if targetResourcesetMap["name"] != nil { + targetResourceset.Name = core.StringPtr(targetResourcesetMap["name"].(string)) + } + if targetResourcesetMap["type"] != nil { + targetResourceset.Type = core.StringPtr(targetResourcesetMap["type"].(string)) + } + if targetResourcesetMap["description"] != nil { + targetResourceset.Description = core.StringPtr(targetResourcesetMap["description"].(string)) + } + if targetResourcesetMap["resource_query"] != nil { + targetResourceset.ResourceQuery = core.StringPtr(targetResourcesetMap["resource_query"].(string)) + } + if targetResourcesetMap["credential_ref"] != nil { + targetResourceset.CredentialRef = core.StringPtr(targetResourcesetMap["credential_ref"].(string)) + } + if targetResourcesetMap["id"] != nil { + targetResourceset.ID = core.StringPtr(targetResourcesetMap["id"].(string)) + } + if targetResourcesetMap["created_at"] != nil { + + } + if targetResourcesetMap["created_by"] != nil { + targetResourceset.CreatedBy = core.StringPtr(targetResourcesetMap["created_by"].(string)) + } + if targetResourcesetMap["updated_at"] != nil { + + } + if targetResourcesetMap["updated_by"] != nil { + targetResourceset.UpdatedBy = core.StringPtr(targetResourcesetMap["updated_by"].(string)) + } + if targetResourcesetMap["sys_lock"] != nil { + sysLock := resourceIBMSchematicsJobMapToSystemLock(targetResourcesetMap["sys_lock"].(map[string]interface{})) + targetResourceset.SysLock = &sysLock + } + if targetResourcesetMap["resource_ids"] != nil { + resourceIds := []string{} + for _, resourceIdsItem := range targetResourcesetMap["resource_ids"].([]interface{}) { + resourceIds = append(resourceIds, resourceIdsItem.(string)) + } + targetResourceset.ResourceIds = resourceIds + } + + return targetResourceset +} + +func resourceIBMSchematicsJobMapToSystemLock(systemLockMap map[string]interface{}) schematicsv1.SystemLock { + systemLock := schematicsv1.SystemLock{} + + if systemLockMap["sys_locked"] != nil { + systemLock.SysLocked = core.BoolPtr(systemLockMap["sys_locked"].(bool)) + } + if systemLockMap["sys_locked_by"] != nil { + systemLock.SysLockedBy = core.StringPtr(systemLockMap["sys_locked_by"].(string)) + } + if systemLockMap["sys_locked_at"] != nil { + + } + + return systemLock +} + +func resourceIBMSchematicsJobMapToJobLogSummary(jobLogSummaryMap map[string]interface{}) schematicsv1.JobLogSummary { + jobLogSummary := schematicsv1.JobLogSummary{} + + if jobLogSummaryMap["job_id"] != nil { + jobLogSummary.JobID = core.StringPtr(jobLogSummaryMap["job_id"].(string)) + } + if jobLogSummaryMap["job_type"] != nil { + jobLogSummary.JobType = core.StringPtr(jobLogSummaryMap["job_type"].(string)) + } + if jobLogSummaryMap["log_start_at"] != nil { + + } + if jobLogSummaryMap["log_analyzed_till"] != nil { + + } + if jobLogSummaryMap["elapsed_time"] != nil { + jobLogSummary.ElapsedTime = core.Float64Ptr(jobLogSummaryMap["elapsed_time"].(float64)) + } + if jobLogSummaryMap["log_errors"] != nil { + logErrors := []schematicsv1.JobLogSummaryLogErrorsItem{} + for _, logErrorsItem := range jobLogSummaryMap["log_errors"].([]interface{}) { + logErrorsItemModel := resourceIBMSchematicsJobMapToJobLogSummaryLogErrorsItem(logErrorsItem.(map[string]interface{})) + logErrors = append(logErrors, logErrorsItemModel) + } + jobLogSummary.LogErrors = logErrors + } + if jobLogSummaryMap["repo_download_job"] != nil { + repoDownloadJob := resourceIBMSchematicsJobMapToJobLogSummaryRepoDownloadJob(jobLogSummaryMap["repo_download_job"].([]interface{})[0].(map[string]interface{})) + jobLogSummary.RepoDownloadJob = &repoDownloadJob + } + if jobLogSummaryMap["action_job"] != nil { + actionJob := resourceIBMSchematicsJobMapToJobLogSummaryActionJob(jobLogSummaryMap["action_job"].([]interface{})[0].(map[string]interface{})) + jobLogSummary.ActionJob = &actionJob + } + + return jobLogSummary +} + +func resourceIBMSchematicsJobMapToJobLogSummaryLogErrorsItem(jobLogSummaryLogErrorsItemMap map[string]interface{}) schematicsv1.JobLogSummaryLogErrorsItem { + jobLogSummaryLogErrorsItem := schematicsv1.JobLogSummaryLogErrorsItem{} + + if jobLogSummaryLogErrorsItemMap["error_code"] != nil { + jobLogSummaryLogErrorsItem.ErrorCode = core.StringPtr(jobLogSummaryLogErrorsItemMap["error_code"].(string)) + } + if jobLogSummaryLogErrorsItemMap["error_msg"] != nil { + jobLogSummaryLogErrorsItem.ErrorMsg = core.StringPtr(jobLogSummaryLogErrorsItemMap["error_msg"].(string)) + } + if jobLogSummaryLogErrorsItemMap["error_count"] != nil { + jobLogSummaryLogErrorsItem.ErrorCount = core.Float64Ptr(jobLogSummaryLogErrorsItemMap["error_count"].(float64)) + } + + return jobLogSummaryLogErrorsItem +} + +func resourceIBMSchematicsJobMapToJobLogSummaryRepoDownloadJob(jobLogSummaryRepoDownloadJobMap map[string]interface{}) schematicsv1.JobLogSummaryRepoDownloadJob { + jobLogSummaryRepoDownloadJob := schematicsv1.JobLogSummaryRepoDownloadJob{} + + if jobLogSummaryRepoDownloadJobMap["scanned_file_count"] != nil { + jobLogSummaryRepoDownloadJob.ScannedFileCount = core.Float64Ptr(jobLogSummaryRepoDownloadJobMap["scanned_file_count"].(float64)) + } + if jobLogSummaryRepoDownloadJobMap["quarantined_file_count"] != nil { + jobLogSummaryRepoDownloadJob.QuarantinedFileCount = core.Float64Ptr(jobLogSummaryRepoDownloadJobMap["quarantined_file_count"].(float64)) + } + if jobLogSummaryRepoDownloadJobMap["detected_filetype"] != nil { + jobLogSummaryRepoDownloadJob.DetectedFiletype = core.StringPtr(jobLogSummaryRepoDownloadJobMap["detected_filetype"].(string)) + } + if jobLogSummaryRepoDownloadJobMap["inputs_count"] != nil { + jobLogSummaryRepoDownloadJob.InputsCount = core.StringPtr(jobLogSummaryRepoDownloadJobMap["inputs_count"].(string)) + } + if jobLogSummaryRepoDownloadJobMap["outputs_count"] != nil { + jobLogSummaryRepoDownloadJob.OutputsCount = core.StringPtr(jobLogSummaryRepoDownloadJobMap["outputs_count"].(string)) + } + + return jobLogSummaryRepoDownloadJob +} + +func resourceIBMSchematicsJobMapToJobLogSummaryActionJob(jobLogSummaryActionJobMap map[string]interface{}) schematicsv1.JobLogSummaryActionJob { + jobLogSummaryActionJob := schematicsv1.JobLogSummaryActionJob{} + + if jobLogSummaryActionJobMap["target_count"] != nil { + jobLogSummaryActionJob.TargetCount = core.Float64Ptr(jobLogSummaryActionJobMap["target_count"].(float64)) + } + if jobLogSummaryActionJobMap["task_count"] != nil { + jobLogSummaryActionJob.TaskCount = core.Float64Ptr(jobLogSummaryActionJobMap["task_count"].(float64)) + } + if jobLogSummaryActionJobMap["play_count"] != nil { + jobLogSummaryActionJob.PlayCount = core.Float64Ptr(jobLogSummaryActionJobMap["play_count"].(float64)) + } + if jobLogSummaryActionJobMap["recap"] != nil { + recap := resourceIBMSchematicsJobMapToJobLogSummaryActionJobRecap(jobLogSummaryActionJobMap["recap"].([]interface{})[0].(map[string]interface{})) + jobLogSummaryActionJob.Recap = &recap + } + + return jobLogSummaryActionJob +} + +func resourceIBMSchematicsJobMapToJobLogSummaryActionJobRecap(jobLogSummaryActionJobRecapMap map[string]interface{}) schematicsv1.JobLogSummaryActionJobRecap { + jobLogSummaryActionJobRecap := schematicsv1.JobLogSummaryActionJobRecap{} + + if jobLogSummaryActionJobRecapMap["target"] != nil { + target := []string{} + for _, targetItem := range jobLogSummaryActionJobRecapMap["target"].([]interface{}) { + target = append(target, targetItem.(string)) + } + jobLogSummaryActionJobRecap.Target = target + } + if jobLogSummaryActionJobRecapMap["ok"] != nil { + jobLogSummaryActionJobRecap.Ok = core.Float64Ptr(jobLogSummaryActionJobRecapMap["ok"].(float64)) + } + if jobLogSummaryActionJobRecapMap["changed"] != nil { + jobLogSummaryActionJobRecap.Changed = core.Float64Ptr(jobLogSummaryActionJobRecapMap["changed"].(float64)) + } + if jobLogSummaryActionJobRecapMap["failed"] != nil { + jobLogSummaryActionJobRecap.Failed = core.Float64Ptr(jobLogSummaryActionJobRecapMap["failed"].(float64)) + } + if jobLogSummaryActionJobRecapMap["skipped"] != nil { + jobLogSummaryActionJobRecap.Skipped = core.Float64Ptr(jobLogSummaryActionJobRecapMap["skipped"].(float64)) + } + if jobLogSummaryActionJobRecapMap["unreachable"] != nil { + jobLogSummaryActionJobRecap.Unreachable = core.Float64Ptr(jobLogSummaryActionJobRecapMap["unreachable"].(float64)) + } + + return jobLogSummaryActionJobRecap +} + +func resourceIBMSchematicsJobRead(d *schema.ResourceData, meta interface{}) error { + schematicsClient, err := meta.(ClientSession).SchematicsV1() + if err != nil { + return err + } + + getJobOptions := &schematicsv1.GetJobOptions{} + + getJobOptions.SetJobID(d.Id()) + + job, response, err := schematicsClient.GetJobWithContext(context.TODO(), getJobOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + log.Printf("[DEBUG] GetJobWithContext failed %s\n%s", err, response) + return err + } + + if err = d.Set("command_object", job.CommandObject); err != nil { + return fmt.Errorf("Error setting command_object: %s", err) + } + if err = d.Set("command_object_id", job.CommandObjectID); err != nil { + return fmt.Errorf("Error setting command_object_id: %s", err) + } + if err = d.Set("command_name", job.CommandName); err != nil { + return fmt.Errorf("Error setting command_name: %s", err) + } + if _, ok := d.GetOk("command_parameter"); ok { + if err = d.Set("command_parameter", d.Get("command_parameter").(string)); err != nil { + return fmt.Errorf("Error setting command_parameter: %s", err) + } + } + if job.CommandOptions != nil { + if err = d.Set("command_options", job.CommandOptions); err != nil { + return fmt.Errorf("Error setting command_options: %s", err) + } + } + if job.Inputs != nil { + inputs := []map[string]interface{}{} + for _, inputsItem := range job.Inputs { + inputsItemMap := resourceIBMSchematicsJobVariableDataToMap(inputsItem) + inputs = append(inputs, inputsItemMap) + } + if err = d.Set("job_inputs", inputs); err != nil { + return fmt.Errorf("Error setting job_inputs: %s", err) + } + } + if job.Settings != nil { + settings := []map[string]interface{}{} + for _, settingsItem := range job.Settings { + settingsItemMap := resourceIBMSchematicsJobVariableDataToMap(settingsItem) + settings = append(settings, settingsItemMap) + } + if err = d.Set("job_env_settings", settings); err != nil { + return fmt.Errorf("Error setting job_env_settings: %s", err) + } + } + if job.Tags != nil { + if err = d.Set("tags", job.Tags); err != nil { + return fmt.Errorf("Error setting tags: %s", err) + } + } + if err = d.Set("location", job.Location); err != nil { + return fmt.Errorf("Error setting location: %s", err) + } + if job.Status != nil { + statusMap := resourceIBMSchematicsJobJobStatusToMap(*job.Status) + if err = d.Set("status", []map[string]interface{}{statusMap}); err != nil { + return fmt.Errorf("Error setting status: %s", err) + } + } + if job.Data != nil { + dataMap := resourceIBMSchematicsJobJobDataToMap(*job.Data) + if err = d.Set("data", []map[string]interface{}{dataMap}); err != nil { + return fmt.Errorf("Error setting data: %s", err) + } + } + if job.Bastion != nil { + bastionMap := resourceIBMSchematicsJobTargetResourcesetToMap(*job.Bastion) + if err = d.Set("bastion", []map[string]interface{}{bastionMap}); err != nil { + return fmt.Errorf("Error setting bastion: %s", err) + } + } + if job.LogSummary != nil { + logSummaryMap := resourceIBMSchematicsJobJobLogSummaryToMap(*job.LogSummary) + if err = d.Set("job_log_summary", []map[string]interface{}{logSummaryMap}); err != nil { + return fmt.Errorf("Error setting job_log_summary: %s", err) + } + } + if err = d.Set("name", job.Name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + if err = d.Set("description", job.Description); err != nil { + return fmt.Errorf("Error setting description: %s", err) + } + if err = d.Set("resource_group", job.ResourceGroup); err != nil { + return fmt.Errorf("Error setting resource_group: %s", err) + } + if err = d.Set("submitted_at", job.SubmittedAt.String()); err != nil { + return fmt.Errorf("Error setting submitted_at: %s", err) + } + if err = d.Set("submitted_by", job.SubmittedBy); err != nil { + return fmt.Errorf("Error setting submitted_by: %s", err) + } + if err = d.Set("start_at", job.StartAt.String()); err != nil { + return fmt.Errorf("Error setting start_at: %s", err) + } + if err = d.Set("end_at", job.EndAt.String()); err != nil { + return fmt.Errorf("Error setting end_at: %s", err) + } + if err = d.Set("duration", job.Duration); err != nil { + return fmt.Errorf("Error setting duration: %s", err) + } + if err = d.Set("targets_ini", job.TargetsIni); err != nil { + return fmt.Errorf("Error setting targets_ini: %s", err) + } + if err = d.Set("log_store_url", job.LogStoreURL); err != nil { + return fmt.Errorf("Error setting log_store_url: %s", err) + } + if err = d.Set("state_store_url", job.StateStoreURL); err != nil { + return fmt.Errorf("Error setting state_store_url: %s", err) + } + if err = d.Set("results_url", job.ResultsURL); err != nil { + return fmt.Errorf("Error setting results_url: %s", err) + } + if err = d.Set("updated_at", job.UpdatedAt.String()); err != nil { + return fmt.Errorf("Error setting updated_at: %s", err) + } + + return nil +} + +func resourceIBMSchematicsJobVariableDataToMap(variableData schematicsv1.VariableData) map[string]interface{} { + variableDataMap := map[string]interface{}{} + + variableDataMap["name"] = variableData.Name + variableDataMap["value"] = variableData.Value + if variableData.Metadata != nil { + MetadataMap := resourceIBMSchematicsJobVariableMetadataToMap(*variableData.Metadata) + variableDataMap["metadata"] = []map[string]interface{}{MetadataMap} + } + variableDataMap["link"] = variableData.Link + + return variableDataMap +} + +func resourceIBMSchematicsJobVariableMetadataToMap(variableMetadata schematicsv1.VariableMetadata) map[string]interface{} { + variableMetadataMap := map[string]interface{}{} + + variableMetadataMap["type"] = variableMetadata.Type + if variableMetadata.Aliases != nil { + variableMetadataMap["aliases"] = variableMetadata.Aliases + } + variableMetadataMap["description"] = variableMetadata.Description + variableMetadataMap["default_value"] = variableMetadata.DefaultValue + variableMetadataMap["secure"] = variableMetadata.Secure + variableMetadataMap["immutable"] = variableMetadata.Immutable + variableMetadataMap["hidden"] = variableMetadata.Hidden + if variableMetadata.Options != nil { + variableMetadataMap["options"] = variableMetadata.Options + } + variableMetadataMap["min_value"] = intValue(variableMetadata.MinValue) + variableMetadataMap["max_value"] = intValue(variableMetadata.MaxValue) + variableMetadataMap["min_length"] = intValue(variableMetadata.MinLength) + variableMetadataMap["max_length"] = intValue(variableMetadata.MaxLength) + variableMetadataMap["matches"] = variableMetadata.Matches + variableMetadataMap["position"] = intValue(variableMetadata.Position) + variableMetadataMap["group_by"] = variableMetadata.GroupBy + variableMetadataMap["source"] = variableMetadata.Source + + return variableMetadataMap +} + +func resourceIBMSchematicsJobJobStatusToMap(jobStatus schematicsv1.JobStatus) map[string]interface{} { + jobStatusMap := map[string]interface{}{} + + if jobStatus.ActionJobStatus != nil { + ActionJobStatusMap := resourceIBMSchematicsJobJobStatusActionToMap(*jobStatus.ActionJobStatus) + jobStatusMap["action_job_status"] = []map[string]interface{}{ActionJobStatusMap} + } + + return jobStatusMap +} + +func resourceIBMSchematicsJobJobStatusActionToMap(jobStatusAction schematicsv1.JobStatusAction) map[string]interface{} { + jobStatusActionMap := map[string]interface{}{} + + jobStatusActionMap["action_name"] = jobStatusAction.ActionName + jobStatusActionMap["status_code"] = jobStatusAction.StatusCode + jobStatusActionMap["status_message"] = jobStatusAction.StatusMessage + jobStatusActionMap["bastion_status_code"] = jobStatusAction.BastionStatusCode + jobStatusActionMap["bastion_status_message"] = jobStatusAction.BastionStatusMessage + jobStatusActionMap["targets_status_code"] = jobStatusAction.TargetsStatusCode + jobStatusActionMap["targets_status_message"] = jobStatusAction.TargetsStatusMessage + jobStatusActionMap["updated_at"] = jobStatusAction.UpdatedAt.String() + + return jobStatusActionMap +} + +func resourceIBMSchematicsJobJobDataToMap(jobData schematicsv1.JobData) map[string]interface{} { + jobDataMap := map[string]interface{}{} + + jobDataMap["job_type"] = jobData.JobType + if jobData.ActionJobData != nil { + ActionJobDataMap := resourceIBMSchematicsJobJobDataActionToMap(*jobData.ActionJobData) + jobDataMap["action_job_data"] = []map[string]interface{}{ActionJobDataMap} + } + + return jobDataMap +} + +func resourceIBMSchematicsJobJobDataActionToMap(jobDataAction schematicsv1.JobDataAction) map[string]interface{} { + jobDataActionMap := map[string]interface{}{} + + jobDataActionMap["action_name"] = jobDataAction.ActionName + if jobDataAction.Inputs != nil { + inputs := []map[string]interface{}{} + for _, inputsItem := range jobDataAction.Inputs { + inputsItemMap := resourceIBMSchematicsJobVariableDataToMap(inputsItem) + inputs = append(inputs, inputsItemMap) + // TODO: handle Inputs of type TypeList -- list of non-primitive, not model items + } + jobDataActionMap["inputs"] = inputs + } + if jobDataAction.Outputs != nil { + outputs := []map[string]interface{}{} + for _, outputsItem := range jobDataAction.Outputs { + outputsItemMap := resourceIBMSchematicsJobVariableDataToMap(outputsItem) + outputs = append(outputs, outputsItemMap) + // TODO: handle Outputs of type TypeList -- list of non-primitive, not model items + } + jobDataActionMap["outputs"] = outputs + } + if jobDataAction.Settings != nil { + settings := []map[string]interface{}{} + for _, settingsItem := range jobDataAction.Settings { + settingsItemMap := resourceIBMSchematicsJobVariableDataToMap(settingsItem) + settings = append(settings, settingsItemMap) + // TODO: handle Settings of type TypeList -- list of non-primitive, not model items + } + jobDataActionMap["settings"] = settings + } + jobDataActionMap["updated_at"] = jobDataAction.UpdatedAt.String() + + return jobDataActionMap +} + +func resourceIBMSchematicsJobTargetResourcesetToMap(targetResourceset schematicsv1.TargetResourceset) map[string]interface{} { + targetResourcesetMap := map[string]interface{}{} + + targetResourcesetMap["name"] = targetResourceset.Name + targetResourcesetMap["type"] = targetResourceset.Type + targetResourcesetMap["description"] = targetResourceset.Description + targetResourcesetMap["resource_query"] = targetResourceset.ResourceQuery + targetResourcesetMap["credential_ref"] = targetResourceset.CredentialRef + targetResourcesetMap["id"] = targetResourceset.ID + targetResourcesetMap["created_at"] = targetResourceset.CreatedAt.String() + targetResourcesetMap["created_by"] = targetResourceset.CreatedBy + targetResourcesetMap["updated_at"] = targetResourceset.UpdatedAt.String() + targetResourcesetMap["updated_by"] = targetResourceset.UpdatedBy + if targetResourceset.SysLock != nil { + SysLockMap := resourceIBMSchematicsJobSystemLockToMap(*targetResourceset.SysLock) + targetResourcesetMap["sys_lock"] = []map[string]interface{}{SysLockMap} + } + if targetResourceset.ResourceIds != nil { + targetResourcesetMap["resource_ids"] = targetResourceset.ResourceIds + } + + return targetResourcesetMap +} + +func resourceIBMSchematicsJobSystemLockToMap(systemLock schematicsv1.SystemLock) map[string]interface{} { + systemLockMap := map[string]interface{}{} + + systemLockMap["sys_locked"] = systemLock.SysLocked + systemLockMap["sys_locked_by"] = systemLock.SysLockedBy + systemLockMap["sys_locked_at"] = systemLock.SysLockedAt.String() + + return systemLockMap +} + +func resourceIBMSchematicsJobJobLogSummaryToMap(jobLogSummary schematicsv1.JobLogSummary) map[string]interface{} { + jobLogSummaryMap := map[string]interface{}{} + + jobLogSummaryMap["job_id"] = jobLogSummary.JobID + jobLogSummaryMap["job_type"] = jobLogSummary.JobType + jobLogSummaryMap["log_start_at"] = jobLogSummary.LogStartAt.String() + jobLogSummaryMap["log_analyzed_till"] = jobLogSummary.LogAnalyzedTill.String() + jobLogSummaryMap["elapsed_time"] = jobLogSummary.ElapsedTime + if jobLogSummary.LogErrors != nil { + logErrors := []map[string]interface{}{} + for _, logErrorsItem := range jobLogSummary.LogErrors { + logErrorsItemMap := resourceIBMSchematicsJobJobLogSummaryLogErrorsItemToMap(logErrorsItem) + logErrors = append(logErrors, logErrorsItemMap) + // TODO: handle LogErrors of type TypeList -- list of non-primitive, not model items + } + jobLogSummaryMap["log_errors"] = logErrors + } + if jobLogSummary.RepoDownloadJob != nil { + RepoDownloadJobMap := resourceIBMSchematicsJobJobLogSummaryRepoDownloadJobToMap(*jobLogSummary.RepoDownloadJob) + jobLogSummaryMap["repo_download_job"] = []map[string]interface{}{RepoDownloadJobMap} + } + if jobLogSummary.ActionJob != nil { + ActionJobMap := resourceIBMSchematicsJobJobLogSummaryActionJobToMap(*jobLogSummary.ActionJob) + jobLogSummaryMap["action_job"] = []map[string]interface{}{ActionJobMap} + } + + return jobLogSummaryMap +} + +func resourceIBMSchematicsJobJobLogSummaryLogErrorsItemToMap(jobLogSummaryLogErrorsItem schematicsv1.JobLogSummaryLogErrorsItem) map[string]interface{} { + jobLogSummaryLogErrorsItemMap := map[string]interface{}{} + + jobLogSummaryLogErrorsItemMap["error_code"] = jobLogSummaryLogErrorsItem.ErrorCode + jobLogSummaryLogErrorsItemMap["error_msg"] = jobLogSummaryLogErrorsItem.ErrorMsg + jobLogSummaryLogErrorsItemMap["error_count"] = jobLogSummaryLogErrorsItem.ErrorCount + + return jobLogSummaryLogErrorsItemMap +} + +func resourceIBMSchematicsJobJobLogSummaryRepoDownloadJobToMap(jobLogSummaryRepoDownloadJob schematicsv1.JobLogSummaryRepoDownloadJob) map[string]interface{} { + jobLogSummaryRepoDownloadJobMap := map[string]interface{}{} + + jobLogSummaryRepoDownloadJobMap["scanned_file_count"] = jobLogSummaryRepoDownloadJob.ScannedFileCount + jobLogSummaryRepoDownloadJobMap["quarantined_file_count"] = jobLogSummaryRepoDownloadJob.QuarantinedFileCount + jobLogSummaryRepoDownloadJobMap["detected_filetype"] = jobLogSummaryRepoDownloadJob.DetectedFiletype + jobLogSummaryRepoDownloadJobMap["inputs_count"] = jobLogSummaryRepoDownloadJob.InputsCount + jobLogSummaryRepoDownloadJobMap["outputs_count"] = jobLogSummaryRepoDownloadJob.OutputsCount + + return jobLogSummaryRepoDownloadJobMap +} + +func resourceIBMSchematicsJobJobLogSummaryActionJobToMap(jobLogSummaryActionJob schematicsv1.JobLogSummaryActionJob) map[string]interface{} { + jobLogSummaryActionJobMap := map[string]interface{}{} + + jobLogSummaryActionJobMap["target_count"] = jobLogSummaryActionJob.TargetCount + jobLogSummaryActionJobMap["task_count"] = jobLogSummaryActionJob.TaskCount + jobLogSummaryActionJobMap["play_count"] = jobLogSummaryActionJob.PlayCount + if jobLogSummaryActionJob.Recap != nil { + RecapMap := resourceIBMSchematicsJobJobLogSummaryActionJobRecapToMap(*jobLogSummaryActionJob.Recap) + jobLogSummaryActionJobMap["recap"] = []map[string]interface{}{RecapMap} + } + + return jobLogSummaryActionJobMap +} + +func resourceIBMSchematicsJobJobLogSummaryActionJobRecapToMap(jobLogSummaryActionJobRecap schematicsv1.JobLogSummaryActionJobRecap) map[string]interface{} { + jobLogSummaryActionJobRecapMap := map[string]interface{}{} + + if jobLogSummaryActionJobRecap.Target != nil { + jobLogSummaryActionJobRecapMap["target"] = jobLogSummaryActionJobRecap.Target + } + jobLogSummaryActionJobRecapMap["ok"] = jobLogSummaryActionJobRecap.Ok + jobLogSummaryActionJobRecapMap["changed"] = jobLogSummaryActionJobRecap.Changed + jobLogSummaryActionJobRecapMap["failed"] = jobLogSummaryActionJobRecap.Failed + jobLogSummaryActionJobRecapMap["skipped"] = jobLogSummaryActionJobRecap.Skipped + jobLogSummaryActionJobRecapMap["unreachable"] = jobLogSummaryActionJobRecap.Unreachable + + return jobLogSummaryActionJobRecapMap +} + +func resourceIBMSchematicsJobUpdate(d *schema.ResourceData, meta interface{}) error { + schematicsClient, err := meta.(ClientSession).SchematicsV1() + if err != nil { + return err + } + + session, err := meta.(ClientSession).BluemixSession() + if err != nil { + return err + } + + iamRefreshToken := session.Config.IAMRefreshToken + + replaceJobOptions := &schematicsv1.ReplaceJobOptions{} + + replaceJobOptions.SetJobID(d.Id()) + replaceJobOptions.SetRefreshToken(iamRefreshToken) + + if _, ok := d.GetOk("command_object"); ok { + replaceJobOptions.SetCommandObject(d.Get("command_object").(string)) + } + if _, ok := d.GetOk("command_object_id"); ok { + replaceJobOptions.SetCommandObjectID(d.Get("command_object_id").(string)) + } + if _, ok := d.GetOk("command_name"); ok { + replaceJobOptions.SetCommandName(d.Get("command_name").(string)) + } + if _, ok := d.GetOk("command_parameter"); ok { + replaceJobOptions.SetCommandParameter(d.Get("command_parameter").(string)) + } + if _, ok := d.GetOk("command_options"); ok { + replaceJobOptions.SetCommandOptions(expandStringList(d.Get("command_options").([]interface{}))) + } + if _, ok := d.GetOk("job_inputs"); ok { + var inputs []schematicsv1.VariableData + for _, e := range d.Get("job_inputs").([]interface{}) { + value := e.(map[string]interface{}) + inputsItem := resourceIBMSchematicsJobMapToVariableData(value) + inputs = append(inputs, inputsItem) + } + replaceJobOptions.SetInputs(inputs) + } + if _, ok := d.GetOk("job_env_settings"); ok { + var settings []schematicsv1.VariableData + for _, e := range d.Get("job_env_settings").([]interface{}) { + value := e.(map[string]interface{}) + settingsItem := resourceIBMSchematicsJobMapToVariableData(value) + settings = append(settings, settingsItem) + } + replaceJobOptions.SetSettings(settings) + } + if _, ok := d.GetOk("tags"); ok { + replaceJobOptions.SetTags(expandStringList(d.Get("tags").([]interface{}))) + } + if _, ok := d.GetOk("location"); ok { + replaceJobOptions.SetLocation(d.Get("location").(string)) + } + if _, ok := d.GetOk("status"); ok { + statusAttr := d.Get("status").([]interface{}) + if len(statusAttr) > 0 { + status := resourceIBMSchematicsJobMapToJobStatus(d.Get("status.0").(map[string]interface{})) + replaceJobOptions.SetStatus(&status) + } + } + if _, ok := d.GetOk("data"); ok { + dataAttr := d.Get("data").([]interface{}) + if len(dataAttr) > 0 { + data := resourceIBMSchematicsJobMapToJobData(d.Get("data.0").(map[string]interface{})) + replaceJobOptions.SetData(&data) + } + } + if _, ok := d.GetOk("bastion"); ok { + bastionAttr := d.Get("bastion").([]interface{}) + if len(bastionAttr) > 0 { + bastion := resourceIBMSchematicsJobMapToTargetResourceset(d.Get("bastion.0").(map[string]interface{})) + replaceJobOptions.SetBastion(&bastion) + } + } + if _, ok := d.GetOk("job_log_summary"); ok { + jobLogSummaryAttr := d.Get("job_log_summary").([]interface{}) + if len(jobLogSummaryAttr) > 0 { + logSummary := resourceIBMSchematicsJobMapToJobLogSummary(d.Get("job_log_summary.0").(map[string]interface{})) + replaceJobOptions.SetLogSummary(&logSummary) + } + } + + _, response, err := schematicsClient.ReplaceJobWithContext(context.TODO(), replaceJobOptions) + if err != nil { + log.Printf("[DEBUG] ReplaceJobWithContext failed %s\n%s", err, response) + return err + } + + return resourceIBMSchematicsJobRead(d, meta) +} + +func resourceIBMSchematicsJobDelete(d *schema.ResourceData, meta interface{}) error { + + session, err := meta.(ClientSession).BluemixSession() + if err != nil { + return err + } + + schematicsClient, err := meta.(ClientSession).SchematicsV1() + if err != nil { + return err + } + + deleteJobOptions := &schematicsv1.DeleteJobOptions{} + + iamRefreshToken := session.Config.IAMRefreshToken + deleteJobOptions.SetRefreshToken(iamRefreshToken) + + deleteJobOptions.SetJobID(d.Id()) + + response, err := schematicsClient.DeleteJobWithContext(context.TODO(), deleteJobOptions) + if err != nil { + log.Printf("[DEBUG] DeleteJobWithContext failed %s\n%s", err, response) + return err + } + + d.SetId("") + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_schematics_workspace.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_schematics_workspace.go new file mode 100644 index 00000000000..92fc78af1bd --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_schematics_workspace.go @@ -0,0 +1,1512 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "context" + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/IBM/go-sdk-core/v5/core" + "github.com/IBM/schematics-go-sdk/schematicsv1" + "github.com/go-openapi/strfmt" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" +) + +const ( + schematicsWorkspaceName = "name" + schematicsWorkspaceDescription = "description" + schematicsWorkspaceTemplateType = "template_type" +) + +func resourceIBMSchematicsWorkspace() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMSchematicsWorkspaceCreate, + Read: resourceIBMSchematicsWorkspaceRead, + Update: resourceIBMSchematicsWorkspaceUpdate, + Delete: resourceIBMSchematicsWorkspaceDelete, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "applied_shareddata_ids": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Description: "List of applied shared dataset id.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "catalog_ref": &schema.Schema{ + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Description: "Information about the software template that you chose from the IBM Cloud catalog. This information is returned for IBM Cloud catalog offerings only.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "dry_run": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Description: "Dry run.", + }, + "item_icon_url": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The URL to the icon of the software template in the IBM Cloud catalog.", + }, + "item_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The ID of the software template that you chose to install from the IBM Cloud catalog. This software is provisioned with Schematics.", + }, + "item_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The name of the software that you chose to install from the IBM Cloud catalog.", + }, + "item_readme_url": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The URL to the readme file of the software template in the IBM Cloud catalog.", + }, + "item_url": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The URL to the software template in the IBM Cloud catalog.", + }, + "launch_url": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The URL to the dashboard to access your software.", + }, + "offering_version": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The version of the software template that you chose to install from the IBM Cloud catalog.", + }, + }, + }, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The description of the workspace.", + ValidateFunc: InvokeValidator("ibm_schematics_workspace", schematicsWorkspaceDescription), + }, + "location": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The location where you want to create your Schematics workspace and run Schematics actions. The location that you enter must match the API endpoint that you use. For example, if you use the Frankfurt API endpoint, you must specify `eu-de` as your location. If you use an API endpoint for a geography and you do not specify a location, Schematics determines the location based on availability.", + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "The name of your workspace. The name can be up to 128 characters long and can include alphanumeric characters, spaces, dashes, and underscores. When you create a workspace for your own Terraform template, consider including the microservice component that you set up with your Terraform template and the IBM Cloud environment where you want to deploy your resources in your name.", + ValidateFunc: InvokeValidator("ibm_schematics_workspace", schematicsWorkspaceName), + }, + "resource_group": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The ID of the resource group where you want to provision the workspace.", + }, + "shared_data": &schema.Schema{ + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Description: "Information that is shared across templates in IBM Cloud catalog offerings. This information is not provided when you create a workspace from your own Terraform template.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cluster_created_on": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Cluster created on.", + }, + "cluster_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The ID of the cluster where you want to provision the resources of all IBM Cloud catalog templates that are included in the catalog offering.", + }, + "cluster_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Cluster name.", + }, + "cluster_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Cluster type.", + }, + "entitlement_keys": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Description: "The entitlement key that you want to use to install IBM Cloud entitled software.", + Elem: &schema.Schema{Type: schema.TypeMap}, + }, + "namespace": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The Kubernetes namespace or OpenShift project where the resources of all IBM Cloud catalog templates that are included in the catalog offering are deployed into.", + }, + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The IBM Cloud region that you want to use for the resources of all IBM Cloud catalog templates that are included in the catalog offering.", + }, + "resource_group_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The ID of the resource group that you want to use for the resources of all IBM Cloud catalog templates that are included in the catalog offering.", + }, + "worker_count": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Description: "Cluster worker count.", + }, + "worker_machine_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Cluster worker type.", + }, + }, + }, + }, + "tags": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Description: "A list of tags that are associated with the workspace.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "template_env_settings": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Description: "A list of environment variables that you want to apply during the execution of a bash script or Terraform action. This field must be provided as a list of key-value pairs, for example, **TF_LOG=debug**. Each entry will be a map with one entry where `key is the environment variable name and value is value`. You can define environment variables for IBM Cloud catalog offerings that are provisioned by using a bash script.", + Elem: &schema.Schema{Type: schema.TypeMap}, + }, + "template_git_folder": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The subfolder in your GitHub or GitLab repository where your Terraform template is stored.", + }, + "template_init_state_file": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The content of an existing Terraform statefile that you want to import in to your workspace. To get the content of a Terraform statefile for a specific Terraform template in an existing workspace, run `ibmcloud terraform state pull --id --template `.", + }, + "template_type": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "The Terraform version that you want to use to run your Terraform code. Enter `terraform_v0.12` to use Terraform version 0.12, and `terraform_v0.11` to use Terraform version 0.11. Make sure that your Terraform config files are compatible with the Terraform version that you select.", + ValidateFunc: InvokeValidator("ibm_schematics_workspace", schematicsWorkspaceTemplateType), + }, + "template_uninstall_script_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Uninstall script name.", + }, + "template_values": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "A list of variable values that you want to apply during the Helm chart installation. The list must be provided in JSON format, such as `\"autoscaling: enabled: true minReplicas: 2\"`. The values that you define here override the default Helm chart values. This field is supported only for IBM Cloud catalog offerings that are provisioned by using the Terraform Helm provider.", + }, + "template_values_metadata": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Computed: true, + Description: "List of values metadata.", + Elem: &schema.Schema{Type: schema.TypeMap}, + }, + "template_inputs": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Description: "VariablesRequest -.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The description of your input variable.", + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "The name of the variable.", + }, + "secure": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Description: "If set to `true`, the value of your input variable is protected and not returned in your API response.", + }, + "type": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "`Terraform v0.11` supports `string`, `list`, `map` data type. For more information, about the syntax, see [Configuring input variables](https://www.terraform.io/docs/configuration-0-11/variables.html).
`Terraform v0.12` additionally, supports `bool`, `number` and complex data types such as `list(type)`, `map(type)`, `object({attribute name=type,..})`, `set(type)`, `tuple([type])`. For more information, about the syntax to use the complex data type, see [Configuring variables](https://www.terraform.io/docs/configuration/variables.html#type-constraints).", + }, + "use_default": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Description: "Variable uses default value; and is not over-ridden.", + }, + "value": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "Enter the value as a string for the primitive types such as `bool`, `number`, `string`, and `HCL` format for the complex variables, as you provide in a `.tfvars` file. **You need to enter escaped string of `HCL` format for the complex variable value**. For more information, about how to declare variables in a terraform configuration file and provide value to schematics, see [Providing values for the declared variables](/docs/schematics?topic=schematics-create-tf-config#declare-variable).", + }, + }, + }, + }, + "template_ref": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Workspace template ref.", + }, + "template_git_branch": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The branch in GitHub where your Terraform template is stored.", + }, + "template_git_release": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The release tag in GitHub of your Terraform template.", + }, + "template_git_repo_sha_value": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Repo SHA value.", + }, + "template_git_repo_url": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The URL to the repository where the IBM Cloud catalog software template is stored.", + }, + "template_git_url": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The URL to the GitHub or GitLab repository where your Terraform and public bit bucket template is stored. For more information of the environment variable syntax, see [Create workspace new](/docs/schematics?topic=schematics-schematics-cli-reference#schematics-workspace-new).", + ValidateFunc: validation.IsURLWithHTTPorHTTPS, + }, + "template_git_has_uploadedgitrepotar": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Computed: true, + Description: "Has uploaded git repo tar", + }, + /*"template_type": &schema.Schema{ + Type: schema.TypeList, + Required: true, + Description: "List of Workspace type.", + Elem: &schema.Schema{Type: schema.TypeString}, + },*/ + "frozen": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Description: "If set to true, the workspace is frozen and changes to the workspace are disabled.", + }, + "frozen_at": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The timestamp when the workspace was frozen.", + }, + "frozen_by": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The user ID that froze the workspace.", + }, + "locked": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Computed: true, + Description: "If set to true, the workspace is locked and disabled for changes.", + }, + "locked_by": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "The user ID that initiated a resource-related action, such as applying or destroying resources, that locked the workspace.", + }, + "locked_time": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "The timestamp when the workspace was locked.", + }, + "x_github_token": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The personal access token to authenticate with your private GitHub or GitLab repository and access your Terraform template.", + }, + "created_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The timestamp when the workspace was created.", + }, + "created_by": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The user ID that created the workspace.", + }, + "crn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Workspace CRN.", + }, + "last_health_check_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The timestamp when the last health check was performed by Schematics.", + }, + "runtime_data": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Information about the provisioning engine, state file, and runtime logs.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "engine_cmd": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The command that was used to apply the Terraform template or IBM Cloud catalog software template.", + }, + "engine_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The provisioning engine that was used to apply the Terraform template or IBM Cloud catalog software template.", + }, + "engine_version": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The version of the provisioning engine that was used.", + }, + "id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The ID that was assigned to your Terraform template or IBM Cloud catalog software template.", + }, + "log_store_url": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The URL to access the logs that were created during the creation, update, or deletion of your IBM Cloud resources.", + }, + "output_values": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Description: "List of Output values.", + Elem: &schema.Schema{Type: schema.TypeMap}, + }, + "resources": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Description: "List of resources.", + Elem: &schema.Schema{Type: schema.TypeMap}, + }, + "state_store_url": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The URL where the Terraform statefile (`terraform.tfstate`) is stored. You can use the statefile to find an overview of IBM Cloud resources that were created by Schematics. Schematics uses the statefile as an inventory list to determine future create, update, or deletion actions.", + }, + }, + }, + }, + "status": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The status of the workspace. **Active**: After you successfully ran your infrastructure code by applying your Terraform execution plan, the state of your workspace changes to `Active`. **Connecting**: Schematics tries to connect to the template in your source repo. If successfully connected, the template is downloaded and metadata, such as input parameters, is extracted. After the template is downloaded, the state of the workspace changes to `Scanning`. **Draft**: The workspace is created without a reference to a GitHub or GitLab repository. **Failed**: If errors occur during the execution of your infrastructure code in IBM Cloud Schematics, your workspace status is set to `Failed`. **Inactive**: The Terraform template was scanned successfully and the workspace creation is complete. You can now start running Schematics plan and apply actions to provision the IBM Cloud resources that you specified in your template. If you have an `Active` workspace and decide to remove all your resources, your workspace is set to `Inactive` after all your resources are removed. **In progress**: When you instruct IBM Cloud Schematics to run your infrastructure code by applying your Terraform execution plan, the status of our workspace changes to `In progress`. **Scanning**: The download of the Terraform template is complete and vulnerability scanning started. If the scan is successful, the workspace state changes to `Inactive`. If errors in your template are found, the state changes to `Template Error`. **Stopped**: The Schematics plan, apply, or destroy action was cancelled manually. **Template Error**: The Schematics template contains errors and cannot be processed.", + }, + "updated_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The timestamp when the workspace was last updated.", + }, + "updated_by": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The user ID that updated the workspace.", + }, + "status_code": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The success or error code that was returned for the last plan, apply, or destroy action that ran against your workspace.", + }, + "status_msg": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The success or error message that was returned for the last plan, apply, or destroy action that ran against your workspace.", + }, + }, + } +} + +func resourceIBMSchematicsWorkspaceValidator() *ResourceValidator { + + validateSchema := make([]ValidateSchema, 1) + + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: schematicsWorkspaceName, + ValidateFunctionIdentifier: ValidateRegexp, + Type: TypeString, + Regexp: `^[a-zA-Z0-9][a-zA-Z0-9-_ ]*$`, + MinValueLength: 1, + MaxValueLength: 128, + Required: true}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: schematicsWorkspaceDescription, + ValidateFunctionIdentifier: StringLenBetween, + Type: TypeString, + MinValueLength: 0, + MaxValueLength: 2048, + Optional: true}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: schematicsWorkspaceTemplateType, + ValidateFunctionIdentifier: ValidateRegexp, + Type: TypeString, + Regexp: `^terraform_v0\.(?:11|12|13)(?:\.\d+)?$`, + Default: "[]", + Optional: true}) + + ibmSchematicsWorkspaceResourceValidator := ResourceValidator{ResourceName: "ibm_schematics_workspace", Schema: validateSchema} + return &ibmSchematicsWorkspaceResourceValidator +} + +func resourceIBMSchematicsWorkspaceCreate(d *schema.ResourceData, meta interface{}) error { + schematicsClient, err := meta.(ClientSession).SchematicsV1() + if err != nil { + return err + } + + createWorkspaceOptions := &schematicsv1.CreateWorkspaceOptions{} + + if _, ok := d.GetOk("applied_shareddata_ids"); ok { + createWorkspaceOptions.SetAppliedShareddataIds(expandStringList(d.Get("applied_shareddata_ids").([]interface{}))) + } + if _, ok := d.GetOk("catalog_ref"); ok { + catalogRefAttr := d.Get("catalog_ref").([]interface{}) + if len(catalogRefAttr) > 0 { + catalogRef := resourceIBMSchematicsWorkspaceMapToCatalogRef(d.Get("catalog_ref.0").(map[string]interface{})) + createWorkspaceOptions.SetCatalogRef(&catalogRef) + } + } + if _, ok := d.GetOk("description"); ok { + createWorkspaceOptions.SetDescription(d.Get("description").(string)) + } + if _, ok := d.GetOk("location"); ok { + createWorkspaceOptions.SetLocation(d.Get("location").(string)) + } + if _, ok := d.GetOk("name"); ok { + createWorkspaceOptions.SetName(d.Get("name").(string)) + } + if _, ok := d.GetOk("resource_group"); ok { + createWorkspaceOptions.SetResourceGroup(d.Get("resource_group").(string)) + } + if _, ok := d.GetOk("shared_data"); ok { + sharedDataAttr := d.Get("shared_data").([]interface{}) + if len(sharedDataAttr) > 0 { + sharedData := resourceIBMSchematicsWorkspaceMapToSharedTargetData(d.Get("shared_data.0").(map[string]interface{})) + createWorkspaceOptions.SetSharedData(&sharedData) + } + } + if _, ok := d.GetOk("tags"); ok { + createWorkspaceOptions.SetTags(expandStringList(d.Get("tags").([]interface{}))) + } + + var templateData []schematicsv1.TemplateSourceDataRequest + + templateSourceDataRequestMap := map[string]interface{}{} + hasTemplateData := false + + if _, ok := d.GetOk("template_env_settings"); ok { + templateSourceDataRequestMap["env_values"] = d.Get("template_env_settings").([]interface{}) + hasTemplateData = true + } + if _, ok := d.GetOk("template_git_folder"); ok { + templateSourceDataRequestMap["folder"] = d.Get("template_git_folder").(string) + hasTemplateData = true + } + if _, ok := d.GetOk("template_init_state_file"); ok { + templateSourceDataRequestMap["init_state_file"] = d.Get("template_init_state_file").(string) + hasTemplateData = true + } + if _, ok := d.GetOk("template_type"); ok { + templateSourceDataRequestMap["type"] = d.Get("template_type").(string) + createWorkspaceOptions.SetType([]string{d.Get("template_type").(string)}) + hasTemplateData = true + } + if _, ok := d.GetOk("template_uninstall_script_name"); ok { + templateSourceDataRequestMap["uninstall_script_name"] = d.Get("template_uninstall_script_name").(string) + hasTemplateData = true + } + if _, ok := d.GetOk("template_values"); ok { + templateSourceDataRequestMap["values"] = d.Get("template_values").(string) + hasTemplateData = true + } + if _, ok := d.GetOk("template_values_metadata"); ok { + templateSourceDataRequestMap["values_metadata"] = d.Get("template_values_metadata").([]interface{}) + hasTemplateData = true + } + if _, ok := d.GetOk("template_inputs"); ok { + templateSourceDataRequestMap["variablestore"] = d.Get("template_inputs").([]interface{}) + hasTemplateData = true + } + if hasTemplateData { + templateDataItem := resourceIBMSchematicsWorkspaceMapToTemplateSourceDataRequest(templateSourceDataRequestMap) + templateData = append(templateData, templateDataItem) + createWorkspaceOptions.SetTemplateData(templateData) + } + if _, ok := d.GetOk("template_ref"); ok { + createWorkspaceOptions.SetTemplateRef(d.Get("template_ref").(string)) + } + + templateRepoRequestMap := map[string]interface{}{} + hasTemplateRepo := false + if _, ok := d.GetOk("template_git_branch"); ok { + templateRepoRequestMap["branch"] = d.Get("template_git_branch").(string) + hasTemplateRepo = true + } + if _, ok := d.GetOk("template_git_release"); ok { + templateRepoRequestMap["release"] = d.Get("template_git_release").(string) + hasTemplateRepo = true + } + if _, ok := d.GetOk("template_git_repo_sha_value"); ok { + templateRepoRequestMap["repo_sha_value"] = d.Get("template_git_repo_sha_value").(string) + hasTemplateRepo = true + } + if _, ok := d.GetOk("template_git_repo_url"); ok { + templateRepoRequestMap["repo_url"] = d.Get("template_git_repo_url").(string) + hasTemplateRepo = true + } + if _, ok := d.GetOk("template_git_url"); ok { + templateRepoRequestMap["url"] = d.Get("template_git_url").(string) + hasTemplateRepo = true + } + if _, ok := d.GetOk("template_git_has_uploadedgitrepotar"); ok { + templateRepoRequestMap["has_uploadedgitrepotar"] = d.Get("template_git_has_uploadedgitrepotar").(string) + hasTemplateRepo = true + } + if hasTemplateRepo { + templateRepo := resourceIBMSchematicsWorkspaceMapToTemplateRepoRequest(templateRepoRequestMap) + createWorkspaceOptions.SetTemplateRepo(&templateRepo) + } + + /*if _, ok := d.GetOk("template_type"); ok { + createWorkspaceOptions.SetType(expandStringList(d.Get("template_type").([]interface{}))) + }*/ + workspaceStatusRequestMap := map[string]interface{}{} + hasWorkspaceStatus := false + if _, ok := d.GetOk("frozen"); ok { + workspaceStatusRequestMap["frozen"] = d.Get("frozen").(bool) + hasWorkspaceStatus = true + } + if _, ok := d.GetOk("frozen_at"); ok { + workspaceStatusRequestMap["frozen_at"] = d.Get("frozen_at").(string) + hasWorkspaceStatus = true + } + if _, ok := d.GetOk("frozen_by"); ok { + workspaceStatusRequestMap["frozen_by"] = d.Get("frozen_by").(string) + hasWorkspaceStatus = true + } + if _, ok := d.GetOk("locked"); ok { + workspaceStatusRequestMap["locked"] = d.Get("locked").(bool) + hasWorkspaceStatus = true + } + if _, ok := d.GetOk("locked_by"); ok { + workspaceStatusRequestMap["locked_by"] = d.Get("locked_by").(string) + hasWorkspaceStatus = true + } + if _, ok := d.GetOk("locked_time"); ok { + workspaceStatusRequestMap["locked_time"] = d.Get("locked_time").(string) + hasWorkspaceStatus = true + } + if hasWorkspaceStatus { + workspaceStatus := resourceIBMSchematicsWorkspaceMapToWorkspaceStatusRequest(workspaceStatusRequestMap) + createWorkspaceOptions.SetWorkspaceStatus(&workspaceStatus) + } + + if _, ok := d.GetOk("x_github_token"); ok { + createWorkspaceOptions.SetXGithubToken(d.Get("x_github_token").(string)) + } + + workspaceResponse, response, err := schematicsClient.CreateWorkspaceWithContext(context.TODO(), createWorkspaceOptions) + if err != nil { + log.Printf("[DEBUG] CreateWorkspaceWithContext failed %s\n%s", err, response) + return err + } + + d.SetId(*workspaceResponse.ID) + + return resourceIBMSchematicsWorkspaceRead(d, meta) +} + +func resourceIBMSchematicsWorkspaceMapToCatalogRef(catalogRefMap map[string]interface{}) schematicsv1.CatalogRef { + catalogRef := schematicsv1.CatalogRef{} + + if catalogRefMap["dry_run"] != nil { + catalogRef.DryRun = core.BoolPtr(catalogRefMap["dry_run"].(bool)) + } + if catalogRefMap["item_icon_url"] != nil { + catalogRef.ItemIconURL = core.StringPtr(catalogRefMap["item_icon_url"].(string)) + } + if catalogRefMap["item_id"] != nil { + catalogRef.ItemID = core.StringPtr(catalogRefMap["item_id"].(string)) + } + if catalogRefMap["item_name"] != nil { + catalogRef.ItemName = core.StringPtr(catalogRefMap["item_name"].(string)) + } + if catalogRefMap["item_readme_url"] != nil { + catalogRef.ItemReadmeURL = core.StringPtr(catalogRefMap["item_readme_url"].(string)) + } + if catalogRefMap["item_url"] != nil { + catalogRef.ItemURL = core.StringPtr(catalogRefMap["item_url"].(string)) + } + if catalogRefMap["launch_url"] != nil { + catalogRef.LaunchURL = core.StringPtr(catalogRefMap["launch_url"].(string)) + } + if catalogRefMap["offering_version"] != nil { + catalogRef.OfferingVersion = core.StringPtr(catalogRefMap["offering_version"].(string)) + } + + return catalogRef +} + +func resourceIBMSchematicsWorkspaceMapToSharedTargetData(sharedTargetDataMap map[string]interface{}) schematicsv1.SharedTargetData { + sharedTargetData := schematicsv1.SharedTargetData{} + + if sharedTargetDataMap["cluster_created_on"] != nil { + sharedTargetData.ClusterCreatedOn = core.StringPtr(sharedTargetDataMap["cluster_created_on"].(string)) + } + if sharedTargetDataMap["cluster_id"] != nil { + sharedTargetData.ClusterID = core.StringPtr(sharedTargetDataMap["cluster_id"].(string)) + } + if sharedTargetDataMap["cluster_name"] != nil { + sharedTargetData.ClusterName = core.StringPtr(sharedTargetDataMap["cluster_name"].(string)) + } + if sharedTargetDataMap["cluster_type"] != nil { + sharedTargetData.ClusterType = core.StringPtr(sharedTargetDataMap["cluster_type"].(string)) + } + if sharedTargetDataMap["entitlement_keys"] != nil { + entitlementKeys := []interface{}{} + for _, entitlementKeysItem := range sharedTargetDataMap["entitlement_keys"].([]interface{}) { + entitlementKeys = append(entitlementKeys, entitlementKeysItem.(interface{})) + } + sharedTargetData.EntitlementKeys = entitlementKeys + } + if sharedTargetDataMap["namespace"] != nil { + sharedTargetData.Namespace = core.StringPtr(sharedTargetDataMap["namespace"].(string)) + } + if sharedTargetDataMap["region"] != nil { + sharedTargetData.Region = core.StringPtr(sharedTargetDataMap["region"].(string)) + } + if sharedTargetDataMap["resource_group_id"] != nil { + sharedTargetData.ResourceGroupID = core.StringPtr(sharedTargetDataMap["resource_group_id"].(string)) + } + if sharedTargetDataMap["worker_count"] != nil { + sharedTargetData.WorkerCount = core.Int64Ptr(int64(sharedTargetDataMap["worker_count"].(int))) + } + if sharedTargetDataMap["worker_machine_type"] != nil { + sharedTargetData.WorkerMachineType = core.StringPtr(sharedTargetDataMap["worker_machine_type"].(string)) + } + + return sharedTargetData +} + +func resourceIBMSchematicsWorkspaceMapToTemplateSourceDataRequest(templateSourceDataRequestMap map[string]interface{}) schematicsv1.TemplateSourceDataRequest { + templateSourceDataRequest := schematicsv1.TemplateSourceDataRequest{} + + if templateSourceDataRequestMap["env_values"] != nil { + envValues := []interface{}{} + for _, envValuesItem := range templateSourceDataRequestMap["env_values"].([]interface{}) { + envValues = append(envValues, envValuesItem.(interface{})) + } + templateSourceDataRequest.EnvValues = envValues + } + if templateSourceDataRequestMap["folder"] != nil { + templateSourceDataRequest.Folder = core.StringPtr(templateSourceDataRequestMap["folder"].(string)) + } + if templateSourceDataRequestMap["init_state_file"] != nil { + templateSourceDataRequest.InitStateFile = core.StringPtr(templateSourceDataRequestMap["init_state_file"].(string)) + } + if templateSourceDataRequestMap["type"] != nil { + templateSourceDataRequest.Type = core.StringPtr(templateSourceDataRequestMap["type"].(string)) + } + if templateSourceDataRequestMap["uninstall_script_name"] != nil { + templateSourceDataRequest.UninstallScriptName = core.StringPtr(templateSourceDataRequestMap["uninstall_script_name"].(string)) + } + if templateSourceDataRequestMap["values"] != nil { + templateSourceDataRequest.Values = core.StringPtr(templateSourceDataRequestMap["values"].(string)) + } + if templateSourceDataRequestMap["values_metadata"] != nil { + valuesMetadata := []interface{}{} + for _, valuesMetadataItem := range templateSourceDataRequestMap["values_metadata"].([]interface{}) { + valuesMetadata = append(valuesMetadata, valuesMetadataItem.(interface{})) + } + templateSourceDataRequest.ValuesMetadata = valuesMetadata + } + if templateSourceDataRequestMap["variablestore"] != nil { + variablestore := []schematicsv1.WorkspaceVariableRequest{} + for _, variablestoreItem := range templateSourceDataRequestMap["variablestore"].([]interface{}) { + variablestoreItemModel := resourceIBMSchematicsWorkspaceMapToWorkspaceVariableRequest(variablestoreItem.(map[string]interface{})) + variablestore = append(variablestore, variablestoreItemModel) + } + templateSourceDataRequest.Variablestore = variablestore + } + + return templateSourceDataRequest +} + +func resourceIBMSchematicsWorkspaceMapToWorkspaceVariableRequest(workspaceVariableRequestMap map[string]interface{}) schematicsv1.WorkspaceVariableRequest { + workspaceVariableRequest := schematicsv1.WorkspaceVariableRequest{} + + if workspaceVariableRequestMap["description"] != nil { + workspaceVariableRequest.Description = core.StringPtr(workspaceVariableRequestMap["description"].(string)) + } + if workspaceVariableRequestMap["name"] != nil { + workspaceVariableRequest.Name = core.StringPtr(workspaceVariableRequestMap["name"].(string)) + } + if workspaceVariableRequestMap["secure"] != nil { + workspaceVariableRequest.Secure = core.BoolPtr(workspaceVariableRequestMap["secure"].(bool)) + } + if workspaceVariableRequestMap["type"] != nil { + workspaceVariableRequest.Type = core.StringPtr(workspaceVariableRequestMap["type"].(string)) + } + if workspaceVariableRequestMap["use_default"] != nil { + workspaceVariableRequest.UseDefault = core.BoolPtr(workspaceVariableRequestMap["use_default"].(bool)) + } + if workspaceVariableRequestMap["value"] != nil { + workspaceVariableRequest.Value = core.StringPtr(workspaceVariableRequestMap["value"].(string)) + } + + return workspaceVariableRequest +} + +func resourceIBMSchematicsWorkspaceMapToTemplateRepoRequest(templateRepoRequestMap map[string]interface{}) schematicsv1.TemplateRepoRequest { + templateRepoRequest := schematicsv1.TemplateRepoRequest{} + + if templateRepoRequestMap["branch"] != nil { + templateRepoRequest.Branch = core.StringPtr(templateRepoRequestMap["branch"].(string)) + } + if templateRepoRequestMap["release"] != nil { + templateRepoRequest.Release = core.StringPtr(templateRepoRequestMap["release"].(string)) + } + if templateRepoRequestMap["repo_sha_value"] != nil { + templateRepoRequest.RepoShaValue = core.StringPtr(templateRepoRequestMap["repo_sha_value"].(string)) + } + if templateRepoRequestMap["repo_url"] != nil { + templateRepoRequest.RepoURL = core.StringPtr(templateRepoRequestMap["repo_url"].(string)) + } + if templateRepoRequestMap["url"] != nil { + templateRepoRequest.URL = core.StringPtr(templateRepoRequestMap["url"].(string)) + } + + return templateRepoRequest +} + +func resourceIBMSchematicsWorkspaceMapToTemplateRepoUpdateRequest(templateRepoUpdateRequestMap map[string]interface{}) schematicsv1.TemplateRepoUpdateRequest { + templateRepoUpdateRequest := schematicsv1.TemplateRepoUpdateRequest{} + + if templateRepoUpdateRequestMap["branch"] != nil { + templateRepoUpdateRequest.Branch = core.StringPtr(templateRepoUpdateRequestMap["branch"].(string)) + } + if templateRepoUpdateRequestMap["release"] != nil { + templateRepoUpdateRequest.Release = core.StringPtr(templateRepoUpdateRequestMap["release"].(string)) + } + if templateRepoUpdateRequestMap["repo_sha_value"] != nil { + templateRepoUpdateRequest.RepoShaValue = core.StringPtr(templateRepoUpdateRequestMap["repo_sha_value"].(string)) + } + if templateRepoUpdateRequestMap["repo_url"] != nil { + templateRepoUpdateRequest.RepoURL = core.StringPtr(templateRepoUpdateRequestMap["repo_url"].(string)) + } + if templateRepoUpdateRequestMap["url"] != nil { + templateRepoUpdateRequest.URL = core.StringPtr(templateRepoUpdateRequestMap["url"].(string)) + } + + return templateRepoUpdateRequest +} + +func resourceIBMSchematicsWorkspaceMapToWorkspaceStatusRequest(workspaceStatusRequestMap map[string]interface{}) schematicsv1.WorkspaceStatusRequest { + workspaceStatusRequest := schematicsv1.WorkspaceStatusRequest{} + + if workspaceStatusRequestMap["frozen"] != nil { + workspaceStatusRequest.Frozen = core.BoolPtr(workspaceStatusRequestMap["frozen"].(bool)) + } + if workspaceStatusRequestMap["frozen_at"] != nil { + frozenAt, err := strfmt.ParseDateTime(workspaceStatusRequestMap["frozen_at"].(string)) + if err != nil { + workspaceStatusRequest.FrozenAt = &frozenAt + } + } + if workspaceStatusRequestMap["frozen_by"] != nil { + workspaceStatusRequest.FrozenBy = core.StringPtr(workspaceStatusRequestMap["frozen_by"].(string)) + } + if workspaceStatusRequestMap["locked"] != nil { + workspaceStatusRequest.Locked = core.BoolPtr(workspaceStatusRequestMap["locked"].(bool)) + } + if workspaceStatusRequestMap["locked_by"] != nil { + workspaceStatusRequest.LockedBy = core.StringPtr(workspaceStatusRequestMap["locked_by"].(string)) + } + if workspaceStatusRequestMap["locked_time"] != nil { + lockedTime, err := strfmt.ParseDateTime(workspaceStatusRequestMap["locked_time"].(string)) + if err != nil { + workspaceStatusRequest.LockedTime = &lockedTime + } + } + + return workspaceStatusRequest +} + +func resourceIBMSchematicsWorkspaceMapToWorkspaceStatusUpdateRequest(workspaceStatusUpdateRequestMap map[string]interface{}) schematicsv1.WorkspaceStatusUpdateRequest { + workspaceStatusUpdateRequest := schematicsv1.WorkspaceStatusUpdateRequest{} + + if workspaceStatusUpdateRequestMap["frozen"] != nil { + workspaceStatusUpdateRequest.Frozen = core.BoolPtr(workspaceStatusUpdateRequestMap["frozen"].(bool)) + } + if workspaceStatusUpdateRequestMap["frozen_at"] != nil { + frozenAt := workspaceStatusUpdateRequestMap["frozen_at"].(strfmt.DateTime) + workspaceStatusUpdateRequest.FrozenAt = &frozenAt + } + if workspaceStatusUpdateRequestMap["frozen_by"] != nil { + workspaceStatusUpdateRequest.FrozenBy = core.StringPtr(workspaceStatusUpdateRequestMap["frozen_by"].(string)) + } + if workspaceStatusUpdateRequestMap["locked"] != nil { + workspaceStatusUpdateRequest.Locked = core.BoolPtr(workspaceStatusUpdateRequestMap["locked"].(bool)) + } + if workspaceStatusUpdateRequestMap["locked_by"] != nil { + workspaceStatusUpdateRequest.LockedBy = core.StringPtr(workspaceStatusUpdateRequestMap["locked_by"].(string)) + } + if workspaceStatusUpdateRequestMap["locked_time"] != nil { + lockedTime := workspaceStatusUpdateRequestMap["locked_time"].(strfmt.DateTime) + workspaceStatusUpdateRequest.LockedTime = &lockedTime + } + + return workspaceStatusUpdateRequest +} + +func resourceIBMSchematicsWorkspaceRead(d *schema.ResourceData, meta interface{}) error { + schematicsClient, err := meta.(ClientSession).SchematicsV1() + if err != nil { + return err + } + + getWorkspaceOptions := &schematicsv1.GetWorkspaceOptions{} + + getWorkspaceOptions.SetWID(d.Id()) + + workspaceResponse, response, err := schematicsClient.GetWorkspaceWithContext(context.TODO(), getWorkspaceOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + log.Printf("[DEBUG] GetWorkspaceWithContext failed %s\n%s", err, response) + return err + } + + if workspaceResponse.AppliedShareddataIds != nil { + if err = d.Set("applied_shareddata_ids", workspaceResponse.AppliedShareddataIds); err != nil { + return fmt.Errorf("Error setting applied_shareddata_ids: %s", err) + } + } + if workspaceResponse.CatalogRef != nil { + catalogRefMap := resourceIBMSchematicsWorkspaceCatalogRefToMap(*workspaceResponse.CatalogRef) + if err = d.Set("catalog_ref", []map[string]interface{}{catalogRefMap}); err != nil { + return fmt.Errorf("Error setting catalog_ref: %s", err) + } + } + if err = d.Set("description", workspaceResponse.Description); err != nil { + return fmt.Errorf("Error setting description: %s", err) + } + if err = d.Set("location", workspaceResponse.Location); err != nil { + return fmt.Errorf("Error setting location: %s", err) + } + if err = d.Set("name", workspaceResponse.Name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + if err = d.Set("resource_group", workspaceResponse.ResourceGroup); err != nil { + return fmt.Errorf("Error setting resource_group: %s", err) + } + if _, ok := d.GetOk("shared_data"); ok { + if workspaceResponse.SharedData != nil { + sharedDataMap := resourceIBMSchematicsWorkspaceSharedTargetDataResponseToMap(*workspaceResponse.SharedData) + if err = d.Set("shared_data", []map[string]interface{}{sharedDataMap}); err != nil { + return fmt.Errorf("Error reading shared_data: %s", err) + } + } + } + if workspaceResponse.Tags != nil { + if err = d.Set("tags", workspaceResponse.Tags); err != nil { + return fmt.Errorf("Error setting tags: %s", err) + } + } + if workspaceResponse.TemplateData != nil { + templateData := []map[string]interface{}{} + for _, templateDataItem := range workspaceResponse.TemplateData { + templateDataItemMap := resourceIBMSchematicsWorkspaceTemplateSourceDataResponseToMap(templateDataItem) + templateData = append(templateData, templateDataItemMap) + } + if err = d.Set("template_env_settings", templateData[0]["env_values"]); err != nil { + return fmt.Errorf("Error reading env_values: %s", err) + } + if err = d.Set("template_git_folder", templateData[0]["folder"]); err != nil { + return fmt.Errorf("Error reading folder: %s", err) + } + if err = d.Set("template_init_state_file", templateData[0]["init_state_file"]); err != nil { + return fmt.Errorf("Error reading init_state_file: %s", err) + } + if err = d.Set("template_type", templateData[0]["type"]); err != nil { + return fmt.Errorf("Error reading type: %s", err) + } + if err = d.Set("template_uninstall_script_name", templateData[0]["uninstall_script_name"]); err != nil { + return fmt.Errorf("Error reading uninstall_script_name: %s", err) + } + if err = d.Set("template_values", templateData[0]["values"]); err != nil { + return fmt.Errorf("Error reading values: %s", err) + } + if err = d.Set("template_values_metadata", templateData[0]["values_metadata"]); err != nil { + return fmt.Errorf("Error reading values_metadata: %s", err) + } + if err = d.Set("template_inputs", templateData[0]["variablestore"]); err != nil { + return fmt.Errorf("Error reading variablestore: %s", err) + } + + } + if err = d.Set("template_ref", workspaceResponse.TemplateRef); err != nil { + return fmt.Errorf("Error setting template_ref: %s", err) + } + if workspaceResponse.TemplateRepo != nil { + templateRepoMap := resourceIBMSchematicsWorkspaceTemplateRepoResponseToMap(*workspaceResponse.TemplateRepo) + if err = d.Set("template_git_branch", templateRepoMap["branch"]); err != nil { + return fmt.Errorf("Error reading branch: %s", err) + } + if err = d.Set("template_git_release", templateRepoMap["release"]); err != nil { + return fmt.Errorf("Error reading release: %s", err) + } + if err = d.Set("template_git_repo_sha_value", templateRepoMap["repo_sha_value"]); err != nil { + return fmt.Errorf("Error reading repo_sha_value: %s", err) + } + if err = d.Set("template_git_repo_url", templateRepoMap["repo_url"]); err != nil { + return fmt.Errorf("Error reading repo_url: %s", err) + } + if err = d.Set("template_git_url", templateRepoMap["url"]); err != nil { + return fmt.Errorf("Error reading url: %s", err) + } + if err = d.Set("template_git_has_uploadedgitrepotar", templateRepoMap["has_uploadedgitrepotar"]); err != nil { + return fmt.Errorf("Error reading has_uploadedgitrepotar: %s", err) + } + } + /*if workspaceResponse.Type != nil { + if err = d.Set("template_type", workspaceResponse.Type); err != nil { + return fmt.Errorf("Error reading type: %s", err) + } + }*/ + if workspaceResponse.WorkspaceStatus != nil { + workspaceStatusMap := resourceIBMSchematicsWorkspaceWorkspaceStatusResponseToMap(*workspaceResponse.WorkspaceStatus) + if err = d.Set("frozen", workspaceStatusMap["frozen"]); err != nil { + return fmt.Errorf("Error reading frozen: %s", err) + } + if err = d.Set("frozen_at", workspaceStatusMap["frozen_at"]); err != nil { + return fmt.Errorf("Error reading frozen_at: %s", err) + } + if err = d.Set("frozen_by", workspaceStatusMap["frozen_by"]); err != nil { + return fmt.Errorf("Error reading frozen_by: %s", err) + } + if err = d.Set("locked", workspaceStatusMap["locked"]); err != nil { + return fmt.Errorf("Error reading locked: %s", err) + } + if err = d.Set("locked_by", workspaceStatusMap["locked_by"]); err != nil { + return fmt.Errorf("Error reading locked_by: %s", err) + } + if err = d.Set("locked_time", workspaceStatusMap["locked_time"]); err != nil { + return fmt.Errorf("Error reading locked_time: %s", err) + } + } + if workspaceResponse.CreatedAt != nil { + if err = d.Set("created_at", workspaceResponse.CreatedAt.String()); err != nil { + return fmt.Errorf("Error reading created_at: %s", err) + } + } + if err = d.Set("created_by", workspaceResponse.CreatedBy); err != nil { + return fmt.Errorf("Error setting created_by: %s", err) + } + if err = d.Set("crn", workspaceResponse.Crn); err != nil { + return fmt.Errorf("Error reading crn: %s", err) + } + if workspaceResponse.LastHealthCheckAt != nil { + if err = d.Set("last_health_check_at", workspaceResponse.LastHealthCheckAt.String()); err != nil { + return fmt.Errorf("Error reading last_health_check_at: %s", err) + } + } + if workspaceResponse.RuntimeData != nil { + runtimeData := []map[string]interface{}{} + for _, runtimeDataItem := range workspaceResponse.RuntimeData { + runtimeDataItemMap := resourceIBMSchematicsWorkspaceTemplateRunTimeDataResponseToMap(runtimeDataItem) + runtimeData = append(runtimeData, runtimeDataItemMap) + } + if err = d.Set("runtime_data", runtimeData); err != nil { + return fmt.Errorf("Error setting runtime_data: %s", err) + } + } + if err = d.Set("status", workspaceResponse.Status); err != nil { + return fmt.Errorf("Error setting status: %s", err) + } + if workspaceResponse.UpdatedAt != nil { + if err = d.Set("updated_at", workspaceResponse.UpdatedAt.String()); err != nil { + return fmt.Errorf("Error reading updated_at: %s", err) + } + } + if err = d.Set("updated_by", workspaceResponse.UpdatedBy); err != nil { + return fmt.Errorf("Error setting updated_by: %s", err) + } + if workspaceResponse.WorkspaceStatusMsg != nil { + workspaceStatusMsgMap := resourceIBMSchematicsWorkspaceWorkspaceStatusMessageToMap(*workspaceResponse.WorkspaceStatusMsg) + if err = d.Set("status_code", workspaceStatusMsgMap["status_code"]); err != nil { + return fmt.Errorf("Error reading status_code: %s", err) + } + if err = d.Set("status_msg", workspaceStatusMsgMap["status_msg"]); err != nil { + return fmt.Errorf("Error reading status_msg: %s", err) + } + } + + return nil +} + +func resourceIBMSchematicsWorkspaceCatalogRefToMap(catalogRef schematicsv1.CatalogRef) map[string]interface{} { + catalogRefMap := map[string]interface{}{} + + catalogRefMap["dry_run"] = catalogRef.DryRun + catalogRefMap["item_icon_url"] = catalogRef.ItemIconURL + catalogRefMap["item_id"] = catalogRef.ItemID + catalogRefMap["item_name"] = catalogRef.ItemName + catalogRefMap["item_readme_url"] = catalogRef.ItemReadmeURL + catalogRefMap["item_url"] = catalogRef.ItemURL + catalogRefMap["launch_url"] = catalogRef.LaunchURL + catalogRefMap["offering_version"] = catalogRef.OfferingVersion + + return catalogRefMap +} + +func resourceIBMSchematicsWorkspaceSharedTargetDataToMap(sharedTargetData schematicsv1.SharedTargetData) map[string]interface{} { + sharedTargetDataMap := map[string]interface{}{} + + sharedTargetDataMap["cluster_created_on"] = sharedTargetData.ClusterCreatedOn + sharedTargetDataMap["cluster_id"] = sharedTargetData.ClusterID + sharedTargetDataMap["cluster_name"] = sharedTargetData.ClusterName + sharedTargetDataMap["cluster_type"] = sharedTargetData.ClusterType + if sharedTargetData.EntitlementKeys != nil { + entitlementKeys := []interface{}{} + for _, entitlementKeysItem := range sharedTargetData.EntitlementKeys { + entitlementKeys = append(entitlementKeys, entitlementKeysItem) + } + sharedTargetDataMap["entitlement_keys"] = entitlementKeys + } + sharedTargetDataMap["namespace"] = sharedTargetData.Namespace + sharedTargetDataMap["region"] = sharedTargetData.Region + sharedTargetDataMap["resource_group_id"] = sharedTargetData.ResourceGroupID + sharedTargetDataMap["worker_count"] = intValue(sharedTargetData.WorkerCount) + sharedTargetDataMap["worker_machine_type"] = sharedTargetData.WorkerMachineType + + return sharedTargetDataMap +} + +func resourceIBMSchematicsWorkspaceSharedTargetDataResponseToMap(sharedTargetData schematicsv1.SharedTargetDataResponse) map[string]interface{} { + sharedTargetDataResponseMap := map[string]interface{}{} + + sharedTargetDataResponseMap["cluster_id"] = sharedTargetData.ClusterID + sharedTargetDataResponseMap["cluster_name"] = sharedTargetData.ClusterName + if sharedTargetData.EntitlementKeys != nil { + entitlementKeys := []interface{}{} + for _, entitlementKeysItem := range sharedTargetData.EntitlementKeys { + entitlementKeys = append(entitlementKeys, entitlementKeysItem) + } + sharedTargetDataResponseMap["entitlement_keys"] = entitlementKeys + } + sharedTargetDataResponseMap["namespace"] = sharedTargetData.Namespace + sharedTargetDataResponseMap["region"] = sharedTargetData.Region + sharedTargetDataResponseMap["resource_group_id"] = sharedTargetData.ResourceGroupID + + return sharedTargetDataResponseMap +} + +func resourceIBMSchematicsWorkspaceTemplateSourceDataRequestToMap(templateSourceDataRequest schematicsv1.TemplateSourceDataRequest) map[string]interface{} { + templateSourceDataRequestMap := map[string]interface{}{} + + if templateSourceDataRequest.EnvValues != nil { + envValues := []interface{}{} + for _, envValuesItem := range templateSourceDataRequest.EnvValues { + envValues = append(envValues, envValuesItem) + } + templateSourceDataRequestMap["env_values"] = envValues + } + templateSourceDataRequestMap["folder"] = templateSourceDataRequest.Folder + templateSourceDataRequestMap["init_state_file"] = templateSourceDataRequest.InitStateFile + templateSourceDataRequestMap["type"] = templateSourceDataRequest.Type + templateSourceDataRequestMap["uninstall_script_name"] = templateSourceDataRequest.UninstallScriptName + templateSourceDataRequestMap["values"] = templateSourceDataRequest.Values + if templateSourceDataRequest.ValuesMetadata != nil { + valuesMetadata := []interface{}{} + for _, valuesMetadataItem := range templateSourceDataRequest.ValuesMetadata { + valuesMetadata = append(valuesMetadata, valuesMetadataItem) + } + templateSourceDataRequestMap["values_metadata"] = valuesMetadata + } + if templateSourceDataRequest.Variablestore != nil { + variablestore := []map[string]interface{}{} + for _, variablestoreItem := range templateSourceDataRequest.Variablestore { + variablestoreItemMap := resourceIBMSchematicsWorkspaceWorkspaceVariableRequestToMap(variablestoreItem) + variablestore = append(variablestore, variablestoreItemMap) + // TODO: handle Variablestore of type TypeList -- list of non-primitive, not model items + } + templateSourceDataRequestMap["variablestore"] = variablestore + } + + return templateSourceDataRequestMap +} + +func resourceIBMSchematicsWorkspaceTemplateSourceDataResponseToMap(templateSourceDataResponse schematicsv1.TemplateSourceDataResponse) map[string]interface{} { + templateSourceDataResponseMap := map[string]interface{}{} + + if templateSourceDataResponse.EnvValues != nil { + envValues := []map[string]interface{}{} + for _, envValuesItem := range templateSourceDataResponse.EnvValues { + flattenedEnvVals := map[string]interface{}{} + if envValuesItem.Name != nil { + flattenedEnvVals[*envValuesItem.Name] = envValuesItem.Value + } + + envValues = append(envValues, flattenedEnvVals) + } + templateSourceDataResponseMap["env_values"] = envValues + } + if templateSourceDataResponse.Type != nil { + templateSourceDataResponseMap["type"] = templateSourceDataResponse.Type + } + templateSourceDataResponseMap["folder"] = templateSourceDataResponse.Folder + templateSourceDataResponseMap["uninstall_script_name"] = templateSourceDataResponse.UninstallScriptName + templateSourceDataResponseMap["values"] = templateSourceDataResponse.Values + if templateSourceDataResponse.ValuesMetadata != nil { + valuesMetadata := []interface{}{} + for _, valuesMetadataItem := range templateSourceDataResponse.ValuesMetadata { + valuesMetadata = append(valuesMetadata, valuesMetadataItem) + } + templateSourceDataResponseMap["values_metadata"] = valuesMetadata + } + if templateSourceDataResponse.Variablestore != nil { + variablestore := []map[string]interface{}{} + for _, variablestoreItem := range templateSourceDataResponse.Variablestore { + variablestoreItemMap := resourceIBMSchematicsWorkspaceWorkspaceVariableResponseToMap(variablestoreItem) + variablestore = append(variablestore, variablestoreItemMap) + } + templateSourceDataResponseMap["variablestore"] = variablestore + } + + return templateSourceDataResponseMap +} + +func resourceIBMSchematicsWorkspaceWorkspaceVariableRequestToMap(workspaceVariableRequest schematicsv1.WorkspaceVariableRequest) map[string]interface{} { + workspaceVariableRequestMap := map[string]interface{}{} + + workspaceVariableRequestMap["description"] = workspaceVariableRequest.Description + workspaceVariableRequestMap["name"] = workspaceVariableRequest.Name + workspaceVariableRequestMap["secure"] = workspaceVariableRequest.Secure + workspaceVariableRequestMap["type"] = workspaceVariableRequest.Type + workspaceVariableRequestMap["use_default"] = workspaceVariableRequest.UseDefault + workspaceVariableRequestMap["value"] = workspaceVariableRequest.Value + + return workspaceVariableRequestMap +} + +func resourceIBMSchematicsWorkspaceWorkspaceVariableResponseToMap(workspaceVariableResponse schematicsv1.WorkspaceVariableResponse) map[string]interface{} { + workspaceVariableRequestMap := map[string]interface{}{} + + workspaceVariableRequestMap["description"] = workspaceVariableResponse.Description + workspaceVariableRequestMap["name"] = workspaceVariableResponse.Name + workspaceVariableRequestMap["secure"] = workspaceVariableResponse.Secure + workspaceVariableRequestMap["type"] = workspaceVariableResponse.Type + workspaceVariableRequestMap["value"] = workspaceVariableResponse.Value + + return workspaceVariableRequestMap +} + +func resourceIBMSchematicsWorkspaceTemplateRepoRequestToMap(templateRepoRequest schematicsv1.TemplateRepoRequest) map[string]interface{} { + templateRepoRequestMap := map[string]interface{}{} + + templateRepoRequestMap["branch"] = templateRepoRequest.Branch + templateRepoRequestMap["release"] = templateRepoRequest.Release + templateRepoRequestMap["repo_sha_value"] = templateRepoRequest.RepoShaValue + templateRepoRequestMap["repo_url"] = templateRepoRequest.RepoURL + templateRepoRequestMap["url"] = templateRepoRequest.URL + + return templateRepoRequestMap +} + +func resourceIBMSchematicsWorkspaceTemplateRepoResponseToMap(templateRepoResponse schematicsv1.TemplateRepoResponse) map[string]interface{} { + templateRepoResponseMap := map[string]interface{}{} + + templateRepoResponseMap["branch"] = templateRepoResponse.Branch + templateRepoResponseMap["release"] = templateRepoResponse.Release + templateRepoResponseMap["repo_sha_value"] = templateRepoResponse.RepoShaValue + templateRepoResponseMap["repo_url"] = templateRepoResponse.RepoURL + templateRepoResponseMap["url"] = templateRepoResponse.URL + templateRepoResponseMap["has_uploadedgitrepotar"] = templateRepoResponse.HasUploadedgitrepotar + + return templateRepoResponseMap +} + +func resourceIBMSchematicsWorkspaceWorkspaceStatusRequestToMap(workspaceStatusRequest schematicsv1.WorkspaceStatusRequest) map[string]interface{} { + workspaceStatusRequestMap := map[string]interface{}{} + + workspaceStatusRequestMap["frozen"] = workspaceStatusRequest.Frozen + workspaceStatusRequestMap["frozen_at"] = workspaceStatusRequest.FrozenAt.String() + workspaceStatusRequestMap["frozen_by"] = workspaceStatusRequest.FrozenBy + workspaceStatusRequestMap["locked"] = workspaceStatusRequest.Locked + workspaceStatusRequestMap["locked_by"] = workspaceStatusRequest.LockedBy + workspaceStatusRequestMap["locked_time"] = workspaceStatusRequest.LockedTime.String() + + return workspaceStatusRequestMap +} + +func resourceIBMSchematicsWorkspaceWorkspaceStatusResponseToMap(workspaceStatusResponse schematicsv1.WorkspaceStatusResponse) map[string]interface{} { + workspaceStatusResponseMap := map[string]interface{}{} + + workspaceStatusResponseMap["frozen"] = workspaceStatusResponse.Frozen + if workspaceStatusResponse.FrozenAt != nil { + workspaceStatusResponseMap["frozen_at"] = workspaceStatusResponse.FrozenAt.String() + } + workspaceStatusResponseMap["frozen_by"] = workspaceStatusResponse.FrozenBy + workspaceStatusResponseMap["locked"] = workspaceStatusResponse.Locked + workspaceStatusResponseMap["locked_by"] = workspaceStatusResponse.LockedBy + if workspaceStatusResponse.LockedTime != nil { + workspaceStatusResponseMap["locked_time"] = workspaceStatusResponse.LockedTime.String() + } + + return workspaceStatusResponseMap +} + +func resourceIBMSchematicsWorkspaceTemplateRunTimeDataResponseToMap(templateRunTimeDataResponse schematicsv1.TemplateRunTimeDataResponse) map[string]interface{} { + templateRunTimeDataResponseMap := map[string]interface{}{} + + templateRunTimeDataResponseMap["engine_cmd"] = templateRunTimeDataResponse.EngineCmd + templateRunTimeDataResponseMap["engine_name"] = templateRunTimeDataResponse.EngineName + templateRunTimeDataResponseMap["engine_version"] = templateRunTimeDataResponse.EngineVersion + templateRunTimeDataResponseMap["id"] = templateRunTimeDataResponse.ID + templateRunTimeDataResponseMap["log_store_url"] = templateRunTimeDataResponse.LogStoreURL + if templateRunTimeDataResponse.OutputValues != nil { + outputValues := []interface{}{} + for _, outputValuesItem := range templateRunTimeDataResponse.OutputValues { + outputValues = append(outputValues, outputValuesItem) + } + templateRunTimeDataResponseMap["output_values"] = outputValues + } + if templateRunTimeDataResponse.Resources != nil { + resources := []interface{}{} + for _, resourcesItem := range templateRunTimeDataResponse.Resources { + resources = append(resources, resourcesItem) + } + templateRunTimeDataResponseMap["resources"] = resources + } + templateRunTimeDataResponseMap["state_store_url"] = templateRunTimeDataResponse.StateStoreURL + + return templateRunTimeDataResponseMap +} + +func resourceIBMSchematicsWorkspaceWorkspaceStatusMessageToMap(workspaceStatusMessage schematicsv1.WorkspaceStatusMessage) map[string]interface{} { + workspaceStatusMessageMap := map[string]interface{}{} + + workspaceStatusMessageMap["status_code"] = workspaceStatusMessage.StatusCode + workspaceStatusMessageMap["status_msg"] = workspaceStatusMessage.StatusMsg + + return workspaceStatusMessageMap +} + +func resourceIBMSchematicsWorkspaceUpdate(d *schema.ResourceData, meta interface{}) error { + schematicsClient, err := meta.(ClientSession).SchematicsV1() + if err != nil { + return err + } + + updateWorkspaceOptions := &schematicsv1.UpdateWorkspaceOptions{} + + updateWorkspaceOptions.SetWID(d.Id()) + + hasChange := false + + if d.HasChange("catalog_ref") { + catalogRefAttr := d.Get("catalog_ref").([]interface{}) + if len(catalogRefAttr) > 0 { + catalogRef := resourceIBMSchematicsWorkspaceMapToCatalogRef(d.Get("catalog_ref.0").(map[string]interface{})) + updateWorkspaceOptions.SetCatalogRef(&catalogRef) + hasChange = true + } + } + if d.HasChange("description") { + updateWorkspaceOptions.SetDescription(d.Get("description").(string)) + hasChange = true + } + if d.HasChange("name") { + updateWorkspaceOptions.SetName(d.Get("name").(string)) + hasChange = true + } + if d.HasChange("shared_data") { + sharedDataAttr := d.Get("shared_data").([]interface{}) + if len(sharedDataAttr) > 0 { + sharedData := resourceIBMSchematicsWorkspaceMapToSharedTargetData(d.Get("shared_data.0").(map[string]interface{})) + updateWorkspaceOptions.SetSharedData(&sharedData) + hasChange = true + } + } + if d.HasChange("tags") { + updateWorkspaceOptions.SetTags(expandStringList(d.Get("tags").([]interface{}))) + hasChange = true + } + + var templateData []schematicsv1.TemplateSourceDataRequest + + templateSourceDataRequestMap := map[string]interface{}{} + hasTemplateData := false + + if d.HasChange("template_env_settings") { + templateSourceDataRequestMap["env_values"] = d.Get("template_env_settings").([]interface{}) + hasTemplateData = true + } + if d.HasChange("template_git_folder") { + templateSourceDataRequestMap["folder"] = d.Get("template_git_folder").(string) + hasTemplateData = true + } + if d.HasChange("template_init_state_file") { + templateSourceDataRequestMap["init_state_file"] = d.Get("template_init_state_file").(string) + hasTemplateData = true + } + if d.HasChange("template_type") { + templateSourceDataRequestMap["type"] = d.Get("template_type").(string) + updateWorkspaceOptions.SetType([]string{d.Get("template_type").(string)}) + hasTemplateData = true + } + if d.HasChange("template_uninstall_script_name") { + templateSourceDataRequestMap["uninstall_script_name"] = d.Get("template_uninstall_script_name").(string) + hasTemplateData = true + } + if d.HasChange("template_values") { + templateSourceDataRequestMap["values"] = d.Get("template_values").(string) + hasTemplateData = true + } + if d.HasChange("template_values_metadata") { + templateSourceDataRequestMap["values_metadata"] = d.Get("template_values_metadata").([]interface{}) + hasTemplateData = true + } + if d.HasChange("template_inputs") { + templateSourceDataRequestMap["variablestore"] = d.Get("template_inputs").([]interface{}) + hasTemplateData = true + } + if hasTemplateData { + templateDataItem := resourceIBMSchematicsWorkspaceMapToTemplateSourceDataRequest(templateSourceDataRequestMap) + templateData = append(templateData, templateDataItem) + updateWorkspaceOptions.SetTemplateData(templateData) + hasChange = true + } + + templateRepoRequestMap := map[string]interface{}{} + hasTemplateRepo := false + if d.HasChange("template_git_branch") { + templateRepoRequestMap["branch"] = d.Get("template_git_branch").(bool) + hasTemplateRepo = true + } + if d.HasChange("template_git_release") { + templateRepoRequestMap["release"] = d.Get("template_git_release").(string) + hasTemplateRepo = true + } + if d.HasChange("template_git_repo_sha_value") { + templateRepoRequestMap["repo_sha_value"] = d.Get("template_git_repo_sha_value").(string) + hasTemplateRepo = true + } + if d.HasChange("template_git_repo_url") { + templateRepoRequestMap["repo_url"] = d.Get("template_git_repo_url").(string) + hasTemplateRepo = true + } + if d.HasChange("template_git_url") { + templateRepoRequestMap["url"] = d.Get("template_git_url").(string) + hasTemplateRepo = true + } + if d.HasChange("template_git_has_uploadedgitrepotar") { + templateRepoRequestMap["has_uploadedgitrepotar"] = d.Get("template_git_has_uploadedgitrepotar").(string) + hasTemplateRepo = true + } + if hasTemplateRepo { + templateRepo := resourceIBMSchematicsWorkspaceMapToTemplateRepoUpdateRequest(templateRepoRequestMap) + updateWorkspaceOptions.SetTemplateRepo(&templateRepo) + hasChange = true + } + + if d.HasChange("template_type") { + updateWorkspaceOptions.SetType([]string{d.Get("template_type").(string)}) + hasChange = true + } + + workspaceStatusRequestMap := map[string]interface{}{} + workspaceStatus := false + if d.HasChange("frozen") { + workspaceStatusRequestMap["frozen"] = d.Get("frozen").(bool) + workspaceStatus = true + } + if d.HasChange("frozen_at") { + workspaceStatusRequestMap["frozen_at"] = d.Get("frozen_at").(string) + workspaceStatus = true + } + if d.HasChange("frozen_by") { + workspaceStatusRequestMap["frozen_by"] = d.Get("frozen_by").(string) + workspaceStatus = true + } + if d.HasChange("locked") { + workspaceStatusRequestMap["locked"] = d.Get("locked").(bool) + workspaceStatus = true + } + if d.HasChange("locked_by") { + workspaceStatusRequestMap["locked_by"] = d.Get("locked_by").(string) + workspaceStatus = true + } + if d.HasChange("locked_time") { + workspaceStatusRequestMap["locked_time"] = d.Get("locked_time").(string) + workspaceStatus = true + } + if workspaceStatus { + workspaceStatus := resourceIBMSchematicsWorkspaceMapToWorkspaceStatusUpdateRequest(workspaceStatusRequestMap) + updateWorkspaceOptions.SetWorkspaceStatus(&workspaceStatus) + hasChange = true + } + + if hasChange { + _, response, err := schematicsClient.UpdateWorkspaceWithContext(context.TODO(), updateWorkspaceOptions) + if err != nil { + log.Printf("[DEBUG] UpdateWorkspaceWithContext failed %s\n%s", err, response) + return err + } + } + + return resourceIBMSchematicsWorkspaceRead(d, meta) +} + +func resourceIBMSchematicsWorkspaceDelete(d *schema.ResourceData, meta interface{}) error { + schematicsClient, err := meta.(ClientSession).SchematicsV1() + if err != nil { + return err + } + + session, err := meta.(ClientSession).BluemixSession() + if err != nil { + return err + } + + deleteWorkspaceOptions := &schematicsv1.DeleteWorkspaceOptions{} + + deleteWorkspaceOptions.SetWID(d.Id()) + + iamRefreshToken := session.Config.IAMRefreshToken + deleteWorkspaceOptions.SetRefreshToken(iamRefreshToken) + + _, response, err := schematicsClient.DeleteWorkspaceWithContext(context.TODO(), deleteWorkspaceOptions) + if err != nil { + log.Printf("[DEBUG] DeleteWorkspaceWithContext failed %s\n%s", err, response) + return err + } + + d.SetId("") + + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_securitygroup.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_securitygroup.go new file mode 100644 index 00000000000..369a520d36f --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_securitygroup.go @@ -0,0 +1,182 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/filter" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/sl" +) + +func resourceIBMSecurityGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMSecurityGroupCreate, + Read: resourceIBMSecurityGroupRead, + Update: resourceIBMSecurityGroupUpdate, + Delete: resourceIBMSecurityGroupDelete, + Exists: resourceIBMSecurityGroupExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: false, + Description: "Security group name", + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: "Security group description", + }, + }, + } +} + +func resourceIBMSecurityGroupCreate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkSecurityGroupService(sess.SetRetries(0)) + + name := d.Get("name").(string) + var description string + if v, ok := d.GetOk("description"); ok { + description = v.(string) + } else { + description = "" + } + + groups, err := services.GetAccountService(sess). + Filter(filter.Path("securityGroups.name").Eq(name).Build()). + GetSecurityGroups() + + if nil == err && len(groups) > 0 { + group := groups[0] + id := *group.Id + d.SetId(fmt.Sprintf("%d", id)) + editSG := false + + oldDesc := "" + if group.Description != nil { + oldDesc = *group.Description + } + if oldDesc != description { + group.Description = sl.String(description) + editSG = true + } + + if editSG { + _, err = service.EditObject(&group) + return err + } + + return nil + } + sg := &datatypes.Network_SecurityGroup{ + Name: sl.String(name), + Description: sl.String(description), + } + res, err := service.CreateObject(sg) + if err != nil { + return fmt.Errorf("Error creating Security Group: %s", err) + } + + d.SetId(strconv.Itoa(*res.Id)) + log.Printf("[INFO] Security Group: %d", *res.Id) + + return resourceIBMSecurityGroupRead(d, meta) +} + +func resourceIBMSecurityGroupRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkSecurityGroupService(sess) + + groupID, _ := strconv.Atoi(d.Id()) + group, err := service.Id(groupID).GetObject() + if err != nil { + // If the group is somehow already destroyed, mark as + // succesfully gone + if err, ok := err.(sl.Error); ok && err.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error retrieving Security Group: %s", err) + } + + d.Set("name", group.Name) + d.Set("description", group.Description) + return nil +} + +func resourceIBMSecurityGroupUpdate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkSecurityGroupService(sess) + + groupID, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + group, err := services.GetNetworkSecurityGroupService(sess).Id(groupID).GetObject() + if err != nil { + return fmt.Errorf("Error retrieving Security Group: %s", err) + } + + if d.HasChange("description") { + group.Description = sl.String(d.Get("description").(string)) + } + + if d.HasChange("name") { + group.Name = sl.String(d.Get("name").(string)) + } + _, err = service.Id(groupID).EditObject(&group) + if err != nil { + return fmt.Errorf("Error editing Security Group: %s", err) + } + return resourceIBMSecurityGroupRead(d, meta) +} + +func resourceIBMSecurityGroupDelete(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkSecurityGroupService(sess) + + groupID, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + log.Printf("[INFO] Deleting Security Group: %d", groupID) + _, err = service.Id(groupID).DeleteObject() + if err != nil { + return fmt.Errorf("Error deleting Security Group: %s", err) + } + + d.SetId("") + return nil +} + +func resourceIBMSecurityGroupExists(d *schema.ResourceData, meta interface{}) (bool, error) { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkSecurityGroupService(sess) + + groupID, err := strconv.Atoi(d.Id()) + if err != nil { + return false, fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + result, err := service.Id(groupID).GetObject() + if err != nil { + if apiErr, ok := err.(sl.Error); ok { + if apiErr.StatusCode == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s", err) + } + return result.Id != nil && *result.Id == groupID, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_securitygroup_rule.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_securitygroup_rule.go new file mode 100644 index 00000000000..1d179120413 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_securitygroup_rule.go @@ -0,0 +1,316 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/filter" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/sl" +) + +func resourceIBMSecurityGroupRule() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMSecurityGroupRuleCreate, + Read: resourceIBMSecurityGroupRuleRead, + Delete: resourceIBMSecurityGroupRuleDelete, + Update: resourceIBMSecurityGroupRuleUpdate, + Exists: resourceIBMSecurityGroupRuleExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "direction": { + Type: schema.TypeString, + Required: true, + Description: "Direction of rule: ingress or egress", + ValidateFunc: validateSecurityRuleDirection, + }, + "ether_type": { + Type: schema.TypeString, + Optional: true, + Description: "IP version IPv4 or IPv6", + Default: "IPv4", + ValidateFunc: validateSecurityRuleEtherType, + }, + "port_range_min": { + Type: schema.TypeInt, + Optional: true, + Description: "Port number minimum range", + }, + "port_range_max": { + Type: schema.TypeInt, + Optional: true, + Description: "Port number max range", + }, + "remote_group_id": { + Type: schema.TypeInt, + Optional: true, + ConflictsWith: []string{"remote_ip"}, + Description: "remote group ID", + }, + "remote_ip": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"remote_group_id"}, + ValidateFunc: validateRemoteIP, + Description: "Remote IP Address", + }, + "protocol": { + Type: schema.TypeString, + Optional: true, + Description: "icmp, tcp or udp", + ValidateFunc: validateSecurityRuleProtocol, + }, + "security_group_id": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: "Security group ID", + }, + }, + } +} + +func findMatchingRule(sgID int, rule *datatypes.Network_SecurityGroup_Rule, + service services.Network_SecurityGroup) (*datatypes.Network_SecurityGroup_Rule, error) { + + var filters []filter.Filter + if rule.PortRangeMax != nil { + filters = append(filters, filter.Path("rules.portRangeMax").Eq(rule.PortRangeMax)) + } + if rule.PortRangeMin != nil { + filters = append(filters, filter.Path("rules.portRangeMin").Eq(rule.PortRangeMin)) + } + + if rule.RemoteGroupId != nil { + filters = append(filters, filter.Path("rules.remoteGroupId").Eq(rule.RemoteGroupId)) + } + + if rule.RemoteIp != nil { + filters = append(filters, filter.Path("rules.remoteIp").Eq(rule.RemoteIp)) + } + + filters = append(filters, filter.Path("rules.direction").Eq(rule.Direction)) + + if rule.Ethertype != nil { + filters = append(filters, filter.Path("rules.ethertype").Eq(rule.Ethertype)) + } + if rule.Protocol != nil { + filters = append(filters, filter.Path("rules.protocol").Eq(rule.Protocol)) + } + + rules, err := service.Filter(filter.Build(filters...)).Id(sgID).GetRules() + if err != nil { + return nil, fmt.Errorf("Error fetching information for Security Group Rule: %s", err) + } + log.Printf("[INFO] rules %v", rules) + + if len(rules) == 0 { + return nil, nil + } + return &rules[0], nil +} + +func resourceIBMSecurityGroupRuleCreate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkSecurityGroupService(sess) + + sgID := d.Get("security_group_id").(int) + + sgrule := datatypes.Network_SecurityGroup_Rule{} + + sgrule.Direction = sl.String(d.Get("direction").(string)) + + if d.Get("ether_type").(string) != "" { + sgrule.Ethertype = sl.String(d.Get("ether_type").(string)) + } + + if _, ok := d.GetOk("port_range_min"); ok { + sgrule.PortRangeMin = sl.Int(d.Get("port_range_min").(int)) + + } + + if _, ok := d.GetOk("port_range_max"); ok { + sgrule.PortRangeMax = sl.Int(d.Get("port_range_max").(int)) + } + + if d.Get("protocol").(string) != "" { + sgrule.Protocol = sl.String(d.Get("protocol").(string)) + } + + if v, ok := d.GetOk("remote_group_id"); ok { + sgrule.RemoteGroupId = sl.Int(v.(int)) + } + + if v, ok := d.GetOk("remote_ip"); ok { + sgrule.RemoteIp = sl.String(v.(string)) + } + + // if only one of min/max is provided, set the other one to the provided + if sgrule.PortRangeMin != nil && sgrule.PortRangeMax == nil { + sgrule.PortRangeMax = sgrule.PortRangeMin + } + if sgrule.PortRangeMax != nil && sgrule.PortRangeMin == nil { + sgrule.PortRangeMin = sgrule.PortRangeMax + } + + matchingrule, err := findMatchingRule(sgID, &sgrule, service) + if err != nil { + return err + } + + if matchingrule != nil { + log.Printf("[INFO] rule exists") + d.SetId(fmt.Sprintf("%d", *matchingrule.Id)) + return nil + } + + opts := []datatypes.Network_SecurityGroup_Rule{ + sgrule, + } + log.Println("[INFO] creating security group rule") + _, err = service.Id(sgID).AddRules(opts) + if err != nil { + return fmt.Errorf("Error creating Security Group Rule: %s", err) + } + + matchingrule, err = findMatchingRule(sgID, &sgrule, service) + if err != nil { + return err + } + d.SetId(strconv.Itoa(*matchingrule.Id)) + + return resourceIBMSecurityGroupRuleRead(d, meta) +} + +func resourceIBMSecurityGroupRuleRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkSecurityGroupService(sess) + + sgID := d.Get("security_group_id").(int) + matchingrules, err := service.Filter(filter.Build( + filter.Path("rules.id").Eq(d.Id()))).Id(sgID).GetRules() + if err != nil { + // If the group is somehow already destroyed, mark as + // succesfully gone + if err, ok := err.(sl.Error); ok && err.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error retrieving Security Group Rule: %s", err) + } + + if len(matchingrules) == 0 { + d.SetId("") + return nil + } + + d.Set("direction", matchingrules[0].Direction) + + if matchingrules[0].Ethertype != nil { + d.Set("ether_type", matchingrules[0].Ethertype) + } + if matchingrules[0].PortRangeMin != nil { + d.Set("port_range_min", matchingrules[0].PortRangeMin) + } + if matchingrules[0].PortRangeMax != nil { + d.Set("port_range_max", matchingrules[0].PortRangeMax) + } + if matchingrules[0].Protocol != nil { + d.Set("protocol", matchingrules[0].Protocol) + } + + if matchingrules[0].RemoteGroupId != nil { + d.Set("remote_group_id", matchingrules[0].RemoteGroupId) + } + if matchingrules[0].RemoteIp != nil { + d.Set("remote_ip", matchingrules[0].RemoteIp) + } + return nil +} + +func resourceIBMSecurityGroupRuleUpdate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkSecurityGroupService(sess) + securityGroupID := d.Get("security_group_id").(int) + matchingrules, err := service.Filter(filter.Build( + filter.Path("rules.id").Eq(d.Id()))).Id(securityGroupID).GetRules() + if err != nil { + return fmt.Errorf("Error retrieving Security Group Rule: %s", err) + } + if d.HasChange("direction") { + matchingrules[0].Direction = sl.String(d.Get("direction").(string)) + } + if d.HasChange("ether_type") { + matchingrules[0].Ethertype = sl.String(d.Get("ether_type").(string)) + } + if d.HasChange("port_range_min") { + matchingrules[0].PortRangeMin = sl.Int(d.Get("port_range_min").(int)) + } + if d.HasChange("port_range_max") { + matchingrules[0].PortRangeMax = sl.Int(d.Get("port_range_max").(int)) + } + if d.HasChange("protocol") { + matchingrules[0].Protocol = sl.String(d.Get("protocol").(string)) + } + if d.HasChange("remote_group_ip") { + matchingrules[0].RemoteGroupId = sl.Int(d.Get("remote_group_ip").(int)) + } + if d.HasChange("remote_ip") { + matchingrules[0].RemoteIp = sl.String(d.Get("remote_ip").(string)) + } + _, err = service.Id(securityGroupID).EditRules([]datatypes.Network_SecurityGroup_Rule{matchingrules[0]}) + if err != nil { + return fmt.Errorf("Couldn't update Security Group Rule: %s", err) + } + return resourceIBMSecurityGroupRuleRead(d, meta) +} + +func resourceIBMSecurityGroupRuleDelete(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkSecurityGroupService(sess) + sgID := d.Get("security_group_id").(int) + id, _ := strconv.Atoi(d.Id()) + _, err := service.Id(sgID).RemoveRules([]int{id}) + if err != nil { + if err, ok := err.(sl.Error); ok && err.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error deleting Security Group Rule: %s", err) + } + + d.SetId("") + return nil +} + +func resourceIBMSecurityGroupRuleExists(d *schema.ResourceData, meta interface{}) (bool, error) { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkSecurityGroupService(sess) + + sgID := d.Get("security_group_id").(int) + matchingrules, err := service.Filter(filter.Build( + filter.Path("rules.id").Eq(d.Id()))).Id(sgID).GetRules() + if err != nil { + // If the group is somehow already destroyed, mark as + // succesfully gone + if err, ok := err.(sl.Error); ok && err.StatusCode == 404 { + d.SetId("") + return false, nil + } + return false, fmt.Errorf("Error retrieving Security Group Rule: %s", err) + } + + if len(matchingrules) == 0 { + d.SetId("") + return false, nil + } + + return true, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_service_instance.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_service_instance.go new file mode 100644 index 00000000000..65726eb9751 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_service_instance.go @@ -0,0 +1,385 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "strconv" + "time" + + "github.com/IBM-Cloud/bluemix-go/api/mccp/mccpv2" + "github.com/IBM-Cloud/bluemix-go/bmxerror" + "github.com/IBM-Cloud/bluemix-go/helpers" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + svcInstanceSuccessStatus = "succeeded" + svcInstanceProgressStatus = "in progress" + svcInstanceFailStatus = "failed" +) + +func resourceIBMServiceInstance() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMServiceInstanceCreate, + Read: resourceIBMServiceInstanceRead, + Update: resourceIBMServiceInstanceUpdate, + Delete: resourceIBMServiceInstanceDelete, + Exists: resourceIBMServiceInstanceExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: "A name for the service instance", + }, + + "space_guid": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The guid of the space in which the instance will be created", + }, + + "service": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The name of the service offering like speech_to_text, text_to_speech etc", + }, + + "credentials": { + Description: "The service broker-provided credentials to use this service.", + Type: schema.TypeMap, + Sensitive: true, + Computed: true, + }, + + "service_keys": { + Description: "The service keys asociated with the service instance", + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + Description: "The service key name", + }, + "credentials": { + Type: schema.TypeMap, + Computed: true, + Sensitive: true, + Description: "The service key credential details like port, username etc", + }, + }, + }, + }, + + "service_plan_guid": { + Description: "The uniquie identifier of the service offering plan type", + Computed: true, + Type: schema.TypeString, + }, + + "parameters": { + Type: schema.TypeMap, + Optional: true, + Description: "Arbitrary parameters to pass along to the service broker. Must be a JSON object", + }, + + "plan": { + Type: schema.TypeString, + Required: true, + Description: "The plan type of the service", + }, + + "tags": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "wait_time_minutes": { + Description: "Define timeout to wait for the service instances to succeeded/deleted etc.", + Type: schema.TypeInt, + Optional: true, + Default: 10, + }, + "dashboard_url": { + Description: "Dashboard URL to access resource.", + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceIBMServiceInstanceCreate(d *schema.ResourceData, meta interface{}) error { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + serviceName := d.Get("service").(string) + plan := d.Get("plan").(string) + name := d.Get("name").(string) + spaceGUID := d.Get("space_guid").(string) + + svcInst := mccpv2.ServiceInstanceCreateRequest{ + Name: name, + SpaceGUID: spaceGUID, + } + + serviceOff, err := cfClient.ServiceOfferings().FindByLabel(serviceName) + if err != nil { + return fmt.Errorf("Error retrieving service offering: %s", err) + } + + servicePlan, err := cfClient.ServicePlans().FindPlanInServiceOffering(serviceOff.GUID, plan) + if err != nil { + return fmt.Errorf("Error retrieving plan: %s", err) + } + svcInst.PlanGUID = servicePlan.GUID + + if parameters, ok := d.GetOk("parameters"); ok { + temp := parameters.(map[string]interface{}) + keyParams := make(map[string]interface{}) + for k, v := range temp { + if v == "true" || v == "false" { + b, _ := strconv.ParseBool(v.(string)) + keyParams[k] = b + + } else { + keyParams[k] = v + } + } + svcInst.Params = keyParams + } + + if _, ok := d.GetOk("tags"); ok { + svcInst.Tags = getServiceTags(d) + } + + service, err := cfClient.ServiceInstances().Create(svcInst) + if err != nil { + return fmt.Errorf("Error creating service: %s", err) + } + + d.SetId(service.Metadata.GUID) + + _, err = waitForServiceInstanceAvailable(d, meta) + if err != nil { + return fmt.Errorf( + "Error waiting for create service (%s) to be succeeded: %s", d.Id(), err) + } + + return resourceIBMServiceInstanceRead(d, meta) +} + +func resourceIBMServiceInstanceRead(d *schema.ResourceData, meta interface{}) error { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + + serviceGUID := d.Id() + + service, err := cfClient.ServiceInstances().Get(serviceGUID, 1) + if err != nil { + return fmt.Errorf("Error retrieving service: %s", err) + } + + servicePlanGUID := service.Entity.ServicePlanGUID + d.Set("service_plan_guid", servicePlanGUID) + d.Set("space_guid", service.Entity.SpaceGUID) + serviceKeys := service.Entity.ServiceKeys + d.Set("service_keys", flattenServiceInstanceCredentials(serviceKeys)) + d.Set("credentials", Flatten(service.Entity.Credentials)) + d.Set("tags", service.Entity.Tags) + d.Set("name", service.Entity.Name) + d.Set("dashboard_url", service.Entity.DashboardURL) + + d.Set("plan", service.Entity.ServicePlan.Entity.Name) + + svcOff, err := cfClient.ServiceOfferings().Get(service.Entity.ServicePlan.Entity.ServiceGUID) + if err != nil { + return err + } + d.Set("service", svcOff.Entity.Label) + + return nil +} + +func resourceIBMServiceInstanceUpdate(d *schema.ResourceData, meta interface{}) error { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + + serviceGUID := d.Id() + + updateReq := mccpv2.ServiceInstanceUpdateRequest{} + if d.HasChange("name") { + updateReq.Name = helpers.String(d.Get("name").(string)) + } + + if d.HasChange("plan") { + plan := d.Get("plan").(string) + service := d.Get("service").(string) + serviceOff, err := cfClient.ServiceOfferings().FindByLabel(service) + if err != nil { + return fmt.Errorf("Error retrieving service offering: %s", err) + } + + servicePlan, err := cfClient.ServicePlans().FindPlanInServiceOffering(serviceOff.GUID, plan) + if err != nil { + return fmt.Errorf("Error retrieving plan: %s", err) + } + updateReq.PlanGUID = helpers.String(servicePlan.GUID) + + } + + if d.HasChange("parameters") { + updateReq.Params = d.Get("parameters").(map[string]interface{}) + } + + if d.HasChange("tags") { + tags := getServiceTags(d) + updateReq.Tags = tags + } + + _, err = cfClient.ServiceInstances().Update(serviceGUID, updateReq) + if err != nil { + return fmt.Errorf("Error updating service: %s", err) + } + + _, err = waitForServiceInstanceAvailable(d, meta) + if err != nil { + return fmt.Errorf( + "Error waiting for update service (%s) to be succeeded: %s", d.Id(), err) + } + + return resourceIBMServiceInstanceRead(d, meta) +} + +func resourceIBMServiceInstanceDelete(d *schema.ResourceData, meta interface{}) error { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + id := d.Id() + + err = cfClient.ServiceInstances().Delete(id, true) + if err != nil { + return fmt.Errorf("Error deleting service: %s", err) + } + + _, err = waitForServiceInstanceDelete(d, meta) + if err != nil { + return fmt.Errorf( + "Error waiting for service (%s) to be deleted: %s", d.Id(), err) + } + + d.SetId("") + + return nil +} +func resourceIBMServiceInstanceExists(d *schema.ResourceData, meta interface{}) (bool, error) { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return false, err + } + serviceGUID := d.Id() + + service, err := cfClient.ServiceInstances().Get(serviceGUID) + if err != nil { + if apiErr, ok := err.(bmxerror.RequestFailure); ok { + if apiErr.StatusCode() == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s", err) + } + + return service.Metadata.GUID == serviceGUID, nil +} + +func getServiceTags(d *schema.ResourceData) []string { + tagSet := d.Get("tags").(*schema.Set) + + if tagSet.Len() == 0 { + empty := []string{} + return empty + } + + tags := make([]string, 0, tagSet.Len()) + for _, elem := range tagSet.List() { + tag := elem.(string) + tags = append(tags, tag) + } + return tags +} + +func waitForServiceInstanceAvailable(d *schema.ResourceData, meta interface{}) (interface{}, error) { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return false, err + } + serviceGUID := d.Id() + + stateConf := &resource.StateChangeConf{ + Pending: []string{svcInstanceProgressStatus}, + Target: []string{svcInstanceSuccessStatus}, + Refresh: func() (interface{}, string, error) { + service, err := cfClient.ServiceInstances().Get(serviceGUID) + if err != nil { + if apiErr, ok := err.(bmxerror.RequestFailure); ok && apiErr.StatusCode() == 404 { + return nil, "", fmt.Errorf("The service instance %s does not exist anymore: %v", d.Id(), err) + } + return nil, "", err + } + if service.Entity.LastOperation.State == svcInstanceFailStatus { + return service, service.Entity.LastOperation.State, fmt.Errorf("The service instance %s failed: %v", d.Id(), err) + } + return service, service.Entity.LastOperation.State, nil + }, + Timeout: time.Duration(d.Get("wait_time_minutes").(int)) * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func waitForServiceInstanceDelete(d *schema.ResourceData, meta interface{}) (interface{}, error) { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return false, err + } + serviceGUID := d.Id() + stateConf := &resource.StateChangeConf{ + Pending: []string{svcInstanceProgressStatus}, + Target: []string{svcInstanceSuccessStatus}, + Refresh: func() (interface{}, string, error) { + service, err := cfClient.ServiceInstances().Get(serviceGUID) + if err != nil { + if apiErr, ok := err.(bmxerror.RequestFailure); ok && apiErr.StatusCode() == 404 { + return service, svcInstanceSuccessStatus, nil + } + return nil, "", err + } + if service.Entity.LastOperation.State == svcInstanceFailStatus { + return service, service.Entity.LastOperation.State, fmt.Errorf("The service instance %s failed to delete: %v", d.Id(), err) + } + return service, service.Entity.LastOperation.State, nil + }, + Timeout: time.Duration(d.Get("wait_time_minutes").(int)) * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_service_key.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_service_key.go new file mode 100644 index 00000000000..02a72ed20e6 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_service_key.go @@ -0,0 +1,151 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "strconv" + + "github.com/IBM-Cloud/bluemix-go/bmxerror" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func resourceIBMServiceKey() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMServiceKeyCreate, + Read: resourceIBMServiceKeyRead, + Update: resourceIBMServiceKeyUpdate, + Delete: resourceIBMServiceKeyDelete, + Exists: resourceIBMServiceKeyExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The name of the service key", + }, + + "service_instance_guid": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The guid of the service instance for which to create service key", + }, + "parameters": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Arbitrary parameters to pass along to the service broker. Must be a JSON object", + }, + "credentials": { + Description: "Credentials asociated with the key", + Type: schema.TypeMap, + Sensitive: true, + Computed: true, + }, + "tags": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + }, + } +} + +func resourceIBMServiceKeyCreate(d *schema.ResourceData, meta interface{}) error { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + name := d.Get("name").(string) + serviceInstanceGUID := d.Get("service_instance_guid").(string) + + var keyParams map[string]interface{} + + if parameters, ok := d.GetOk("parameters"); ok { + temp := parameters.(map[string]interface{}) + for k, v := range temp { + if v == "true" || v == "false" { + b, _ := strconv.ParseBool(v.(string)) + keyParams[k] = b + + } else { + keyParams[k] = v + } + } + } + + serviceKey, err := cfClient.ServiceKeys().Create(serviceInstanceGUID, name, keyParams) + if err != nil { + return fmt.Errorf("Error creating service key: %s", err) + } + + d.SetId(serviceKey.Metadata.GUID) + + return resourceIBMServiceKeyRead(d, meta) +} + +func resourceIBMServiceKeyUpdate(d *schema.ResourceData, meta interface{}) error { + //Only tags are updated and that too locally hence nothing to validate and update in terms of real API at this point + return nil +} + +func resourceIBMServiceKeyRead(d *schema.ResourceData, meta interface{}) error { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + serviceKeyGUID := d.Id() + + serviceKey, err := cfClient.ServiceKeys().Get(serviceKeyGUID) + if err != nil { + return fmt.Errorf("Error retrieving service key: %s", err) + } + d.Set("credentials", Flatten(serviceKey.Entity.Credentials)) + d.Set("service_instance_guid", serviceKey.Entity.ServiceInstanceGUID) + d.Set("name", serviceKey.Entity.Name) + + return nil +} + +func resourceIBMServiceKeyDelete(d *schema.ResourceData, meta interface{}) error { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + + serviceKeyGUID := d.Id() + + err = cfClient.ServiceKeys().Delete(serviceKeyGUID) + if err != nil { + return fmt.Errorf("Error deleting service key: %s", err) + } + + d.SetId("") + + return nil +} + +func resourceIBMServiceKeyExists(d *schema.ResourceData, meta interface{}) (bool, error) { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return false, err + } + serviceKeyGUID := d.Id() + + serviceKey, err := cfClient.ServiceKeys().Get(serviceKeyGUID) + if err != nil { + if apiErr, ok := err.(bmxerror.RequestFailure); ok { + if apiErr.StatusCode() == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s", err) + } + + return serviceKey.Metadata.GUID == serviceKeyGUID, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_space.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_space.go new file mode 100644 index 00000000000..8811539a70c --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_space.go @@ -0,0 +1,347 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + + "github.com/IBM-Cloud/bluemix-go/api/mccp/mccpv2" + "github.com/IBM-Cloud/bluemix-go/bmxerror" + "github.com/IBM-Cloud/bluemix-go/helpers" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func resourceIBMSpace() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMSpaceCreate, + Read: resourceIBMSpaceRead, + Update: resourceIBMSpaceUpdate, + Delete: resourceIBMSpaceDelete, + Exists: resourceIBMSpaceExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: "The name for the space", + }, + "org": { + Description: "The org this space belongs to", + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "auditors": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "The IBMID of the users who will have auditor role in this space, ex - user@example.com", + }, + "managers": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "The IBMID of the users who will have manager role in this space, ex - user@example.com", + }, + "developers": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "The IBMID of the users who will have developer role in this space, ex - user@example.com", + }, + "space_quota": { + Description: "The name of the Space Quota Definition", + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "tags": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + }, + } +} + +func resourceIBMSpaceCreate(d *schema.ResourceData, meta interface{}) error { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + org := d.Get("org").(string) + name := d.Get("name").(string) + + req := mccpv2.SpaceCreateRequest{ + Name: name, + } + + orgFields, err := cfClient.Organizations().FindByName(org, BluemixRegion) + if err != nil { + return fmt.Errorf("Error retrieving org: %s", err) + } + req.OrgGUID = orgFields.GUID + + if spaceQuota, ok := d.GetOk("space_quota"); ok { + quota, err := cfClient.SpaceQuotas().FindByName(spaceQuota.(string), orgFields.GUID) + if err != nil { + return fmt.Errorf("Error retrieving space quota: %s", err) + } + req.SpaceQuotaGUID = quota.GUID + } + + spaceAPI := cfClient.Spaces() + space, err := spaceAPI.Create(req) + if err != nil { + return fmt.Errorf("Error creating space: %s", err) + } + + spaceGUID := space.Metadata.GUID + d.SetId(spaceGUID) + + if developerSet := d.Get("developers").(*schema.Set); len(developerSet.List()) > 0 { + developers := expandStringList(developerSet.List()) + for _, d := range developers { + _, err := spaceAPI.AssociateDeveloper(spaceGUID, d) + if err != nil { + return fmt.Errorf("Error associating developer %s with space %s : %s", d, spaceGUID, err) + } + } + } + + if auditorSet := d.Get("auditors").(*schema.Set); len(auditorSet.List()) > 0 { + auditors := expandStringList(auditorSet.List()) + for _, d := range auditors { + _, err := spaceAPI.AssociateAuditor(spaceGUID, d) + if err != nil { + return fmt.Errorf("Error associating auditor %s with space %s : %s", d, spaceGUID, err) + } + } + + } + if managerSet := d.Get("managers").(*schema.Set); len(managerSet.List()) > 0 { + managers := expandStringList(managerSet.List()) + for _, d := range managers { + _, err := spaceAPI.AssociateManager(spaceGUID, d) + if err != nil { + return fmt.Errorf("Error associating manager %s with space %s : %s", d, spaceGUID, err) + } + } + } + + return resourceIBMSpaceRead(d, meta) +} + +func resourceIBMSpaceRead(d *schema.ResourceData, meta interface{}) error { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + spaceGUID := d.Id() + + spaceAPI := cfClient.Spaces() + orgAPI := cfClient.Organizations() + spaceDetails, err := spaceAPI.Get(spaceGUID) + if err != nil { + return fmt.Errorf("Error retrieving space: %s", err) + } + + auditors, err := spaceAPI.ListAuditors(spaceGUID) + if err != nil { + return fmt.Errorf("Error retrieving auditors in the space: %s", err) + } + + managers, err := spaceAPI.ListManagers(spaceGUID) + if err != nil { + return fmt.Errorf("Error retrieving managers in the space: %s", err) + } + + developers, err := spaceAPI.ListDevelopers(spaceGUID) + if err != nil { + return fmt.Errorf("Error retrieving developers in space: %s", err) + } + + d.Set("auditors", flattenSpaceRoleUsers(auditors)) + d.Set("managers", flattenSpaceRoleUsers(managers)) + d.Set("developers", flattenSpaceRoleUsers(developers)) + + if spaceDetails.Entity.SpaceQuotaGUID != "" { + sqAPI := cfClient.SpaceQuotas() + quota, err := sqAPI.Get(spaceDetails.Entity.SpaceQuotaGUID) + if err != nil { + return fmt.Errorf("Error retrieving quotas details for space: %s", err) + } + d.Set("space_quota", quota.Entity.Name) + } + d.Set("name", spaceDetails.Entity.Name) + org, err := orgAPI.Get(spaceDetails.Entity.OrgGUID) + if err != nil { + return fmt.Errorf("Error retrieving Organization details for space: %s", err) + } + d.Set("org", org.Entity.Name) + return nil +} + +func resourceIBMSpaceUpdate(d *schema.ResourceData, meta interface{}) error { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + id := d.Id() + + req := mccpv2.SpaceUpdateRequest{} + if d.HasChange("name") { + req.Name = helpers.String(d.Get("name").(string)) + } + + api := cfClient.Spaces() + _, err = api.Update(id, req) + if err != nil { + return fmt.Errorf("Error updating space: %s", err) + } + + err = updateAuditors(api, id, d) + if err != nil { + return err + } + err = updateManagers(api, id, d) + if err != nil { + return err + } + err = updateDevelopers(api, id, d) + if err != nil { + return err + } + return resourceIBMSpaceRead(d, meta) +} + +func resourceIBMSpaceDelete(d *schema.ResourceData, meta interface{}) error { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + id := d.Id() + + err = cfClient.Spaces().Delete(id, false) + if err != nil { + return fmt.Errorf("Error deleting space: %s", err) + } + + d.SetId("") + return nil +} + +func resourceIBMSpaceExists(d *schema.ResourceData, meta interface{}) (bool, error) { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return false, err + } + id := d.Id() + + space, err := cfClient.Spaces().Get(id) + if err != nil { + if apiErr, ok := err.(bmxerror.RequestFailure); ok { + if apiErr.StatusCode() == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s", err) + } + + return space.Metadata.GUID == id, nil +} + +func updateDevelopers(api mccpv2.Spaces, spaceGUID string, d *schema.ResourceData) error { + if !d.HasChange("developers") { + return nil + } + var remove, add []string + o, n := d.GetChange("developers") + os := o.(*schema.Set) + ns := n.(*schema.Set) + + remove = expandStringList(os.Difference(ns).List()) + add = expandStringList(ns.Difference(os).List()) + + if len(add) > 0 { + for _, d := range add { + _, err := api.AssociateDeveloper(spaceGUID, d) + if err != nil { + return fmt.Errorf("Error associating developer %s with space %s : %s", d, spaceGUID, err) + } + } + } + if len(remove) > 0 { + for _, d := range remove { + err := api.DisassociateDeveloper(spaceGUID, d) + if err != nil { + return fmt.Errorf("Error dis-associating developer %s with space %s : %s", d, spaceGUID, err) + } + } + } + return nil +} + +func updateManagers(api mccpv2.Spaces, spaceGUID string, d *schema.ResourceData) error { + if !d.HasChange("managers") { + return nil + } + var remove, add []string + o, n := d.GetChange("managers") + os := o.(*schema.Set) + ns := n.(*schema.Set) + + remove = expandStringList(os.Difference(ns).List()) + add = expandStringList(ns.Difference(os).List()) + + if len(add) > 0 { + for _, d := range add { + _, err := api.AssociateManager(spaceGUID, d) + if err != nil { + return fmt.Errorf("Error associating manager %s with space %s : %s", d, spaceGUID, err) + } + } + } + if len(remove) > 0 { + for _, d := range remove { + err := api.DisassociateManager(spaceGUID, d) + if err != nil { + return fmt.Errorf("Error dis-associating manager %s with space %s : %s", d, spaceGUID, err) + } + } + } + return nil +} +func updateAuditors(api mccpv2.Spaces, spaceGUID string, d *schema.ResourceData) error { + if !d.HasChange("auditors") { + return nil + } + var remove, add []string + o, n := d.GetChange("auditors") + os := o.(*schema.Set) + ns := n.(*schema.Set) + + remove = expandStringList(os.Difference(ns).List()) + add = expandStringList(ns.Difference(os).List()) + + if len(add) > 0 { + for _, d := range add { + _, err := api.AssociateAuditor(spaceGUID, d) + if err != nil { + return fmt.Errorf("Error associating auditor %s with space %s : %s", d, spaceGUID, err) + } + } + } + if len(remove) > 0 { + for _, d := range remove { + err := api.DisassociateAuditor(spaceGUID, d) + if err != nil { + return fmt.Errorf("Error dis-associating auditor %s with space %s : %s", d, spaceGUID, err) + } + } + } + return nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_ssl_certificate.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_ssl_certificate.go new file mode 100644 index 00000000000..b9ed4f5f3de --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_ssl_certificate.go @@ -0,0 +1,854 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + fmt "fmt" + "log" + "strconv" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/filter" + "github.com/softlayer/softlayer-go/helpers/product" + services "github.com/softlayer/softlayer-go/services" + session1 "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +const ( + AdditionalSSLServicesPackageType = "ADDITIONAL_SERVICES" + AdditionalServicesSSLCertificatePackageType = "ADDITIONAL_SERVICES_SSL_CERTIFICATE" + + SSLMask = "id" +) + +func resourceIBMSSLCertificate() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMSSLCertificateCreate, + Read: resourceIBMSSLCertificateRead, + Update: resourceIBMSSLCertificateUpdate, + Delete: resourceIBMSSLCertificateDelete, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + + "server_count": { + Type: schema.TypeInt, + Required: true, + Description: "Server count", + }, + + "server_type": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "server type", + }, + + "validity_months": { + Type: schema.TypeInt, + Required: true, + Description: "vslidity of the ssl certificate in month", + }, + + "ssl_type": { + Type: schema.TypeString, + Required: true, + Description: "ssl type", + }, + + "certificate_signing_request": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "certificate signing request info", + }, + + "renewal_flag": { + Type: schema.TypeBool, + Optional: true, + Default: true, + Description: "Renewal flag", + }, + + "order_approver_email_address": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "Email address of the approver", + }, + + "technical_contact_same_as_org_address_flag": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Technical contact same as org address flag", + }, + + "administrative_contact_same_as_technical_flag": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Administrative contact same as technical flag", + }, + + "billing_contact_same_as_technical_flag": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "billing contact", + }, + + "administrative_address_same_as_organization_flag": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "administrative address same as organization flag", + }, + + "billing_address_same_as_organization_flag": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "billing address same as organization flag", + }, + + "organization_information": { + Type: schema.TypeSet, + Required: true, + MaxItems: 1, + Description: "Organization information", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + + "org_address": { + Type: schema.TypeSet, + Required: true, + Description: "Organization address", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + + "org_address_line1": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "org_address_line2": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "org_city": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "org_country_code": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "org_postal_code": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "org_state": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + "org_organization_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "Organization name", + }, + + "org_phone_number": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "Organization phone number", + }, + + "org_fax_number": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + + "technical_contact": { + Type: schema.TypeSet, + Required: true, + MaxItems: 1, + Description: "Technical contact info", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + + "tech_address": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "tech_address_line1": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "tech_address_line2": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "tech_city": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "tech_country_code": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "tech_postal_code": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "tech_state": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "tech_organization_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "tech_first_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "tech_last_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "tech_email_address": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "tech_phone_number": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "tech_fax_number": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "tech_title": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + "billing_contact": { + Type: schema.TypeSet, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + + "billing_address": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "billing_address_line1": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "billing_address_line2": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "billing_city": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "billing_country_code": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "billing_postal_code": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "billing_state": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "billing_organization_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "billing_first_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "billing_last_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "billing_email_address": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "billing_phone_number": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "billing_fax_number": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "billing_title": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + + "administrative_contact": { + Type: schema.TypeSet, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + + "admin_address": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "admin_address_line1": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "admin_address_line2": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "admin_city": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "", + }, + + "admin_country_code": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "admin_postal_code": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "admin_state": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "admin_organization_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "admin_first_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "admin_last_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "admin_email_address": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "admin_phone_number": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "admin_fax_number": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "admin_title": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + }, + } +} +func resourceIBMSSLCertificateCreate(d *schema.ResourceData, m interface{}) error { + sess := m.(ClientSession).SoftLayerSession() + service := services.GetSecurityCertificateRequestService(sess.SetRetries(0)) + sslKeyName := sl.String(d.Get("ssl_type").(string)) + pkg, err := product.GetPackageByType(sess, AdditionalServicesSSLCertificatePackageType) + if err != nil { + return err + } + productItems, err := product.GetPackageProducts(sess, *pkg.Id) + if err != nil { + return err + } + var itemId *int + for _, item := range productItems { + if *item.KeyName == *sslKeyName { + itemId = item.Id + } + } + validCSR, err := service.ValidateCsr(sl.String(d.Get("certificate_signing_request").(string)), sl.Int(d.Get("validity_months").(int)), itemId, sl.String(d.Get("server_type").(string))) + if err != nil { + return fmt.Errorf("Error during validation of CSR: %s", err) + } + if validCSR == true { + productOrderContainer, err := buildSSLProductOrderContainer(d, sess, AdditionalServicesSSLCertificatePackageType) + if err != nil { + // Find price items with AdditionalServices + productOrderContainer, err = buildSSLProductOrderContainer(d, sess, AdditionalSSLServicesPackageType) + if err != nil { + return fmt.Errorf("Error creating SSL certificate: %s", err) + } + } + log.Printf("[INFO] Creating SSL Certificate") + verifiedOrderContainer, err := services.GetProductOrderService(sess).VerifyOrder(productOrderContainer) + if err != nil { + return fmt.Errorf("Order verification failed: %s", err) + } + + servercorecount := verifiedOrderContainer.ServerCoreCount + log.Println(verifiedOrderContainer) + log.Printf("ServerCoreCount: %d", servercorecount) + receipt, err := services.GetProductOrderService(sess).PlaceOrder(productOrderContainer, sl.Bool(false)) + + if err != nil { + return fmt.Errorf("Error during creation of ssl: %s", err) + } + + ssl, err := findSSLByOrderId(sess, *receipt.OrderId) + d.SetId(fmt.Sprintf("%d", *ssl.Id)) + return resourceIBMSSLCertificateRead(d, m) + } else { + log.Println("Provided CSR is not valid.") + return fmt.Errorf("Error while validating CSR: %s", err) + } +} + +func resourceIBMSSLCertificateRead(d *schema.ResourceData, m interface{}) error { + sess := m.(ClientSession).SoftLayerSession() + service := services.GetSecurityCertificateRequestService(sess) + sslId, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Not a valid SSL ID, must be an integer: %s", err) + } + + ssl, err := service.Id(sslId).Mask(SSLMask).GetObject() + if err != nil { + return fmt.Errorf("Error retrieving SSL: %s", err) + } + d.Set("certificate_signing_request", ssl.CertificateSigningRequest) + return nil +} + +func resourceIBMSSLCertificateUpdate(d *schema.ResourceData, m interface{}) error { + return nil +} + +func resourceIBMSSLCertificateDelete(d *schema.ResourceData, m interface{}) error { + sess := m.(ClientSession).SoftLayerSession() + service := services.GetSecurityCertificateService(sess) + service1 := services.GetSecurityCertificateRequestService(sess) + sslId, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Not a valid SSL ID, must be an integer: %s", err) + } + + value, err := service1.Id(sslId).GetObject() + if err != nil { + return fmt.Errorf("Not a valid Object ID: %s", err) + } + sslReqId := value.StatusId + + if *sslReqId == 49 || *sslReqId == 43 { + deleteObject, err := service.Id(sslId).DeleteObject() + if deleteObject == false { + return fmt.Errorf("Error deleting SSL: %s", err) + } else { + d.SetId("") + return nil + } + } else if *sslReqId == 50 { + cancelObject, err := service1.Id(sslId).CancelSslOrder() + if cancelObject == false { + return fmt.Errorf("Error deleting SSL: %s", err) + } else { + d.SetId("") + return nil + } + } else { + d.SetId("") + return nil + } +} + +func normalizedCert(cert interface{}) string { + if cert == nil || cert == (*string)(nil) { + return "" + } + + switch cert.(type) { + case string: + return strings.TrimSpace(cert.(string)) + default: + return "" + } +} + +func buildSSLProductOrderContainer(d *schema.ResourceData, sess *session1.Session, packageType string) (*datatypes.Container_Product_Order_Security_Certificate, error) { + certificateSigningRequest := sl.String(d.Get("certificate_signing_request").(string)) + orderApproverEmailAddress := sl.String(d.Get("order_approver_email_address").(string)) + renewalFlag := sl.Bool(d.Get("renewal_flag").(bool)) + serverCount := sl.Int(d.Get("server_count").(int)) + validityMonths := sl.Int(d.Get("validity_months").(int)) + serverType := sl.String(d.Get("server_type").(string)) + sslType := sl.String(d.Get("ssl_type").(string)) + orgnizationInfoList := d.Get("organization_information").(*schema.Set).List() + var addressline1, addressline2, city, countryCode, state, postalCode, organizationName, phoneNumber, faxNumber string + for _, orgnizationInfo := range orgnizationInfoList { + org_info := orgnizationInfo.(map[string]interface{}) + org_addressList := org_info["org_address"].(*schema.Set).List() + for _, org_address := range org_addressList { + org_addr := org_address.(map[string]interface{}) + addressline1 = org_addr["org_address_line1"].(string) + addressline2 = org_addr["org_address_line2"].(string) + city = org_addr["org_city"].(string) + countryCode = org_addr["org_country_code"].(string) + state = org_addr["org_state"].(string) + postalCode = org_addr["org_postal_code"].(string) + } + organizationName = org_info["org_organization_name"].(string) + phoneNumber = org_info["org_phone_number"].(string) + faxNumber = org_info["org_fax_number"].(string) + } + org_address_information := datatypes.Container_Product_Order_Attribute_Address{ + AddressLine1: &addressline1, + AddressLine2: &addressline2, + City: &city, + CountryCode: &countryCode, + PostalCode: &postalCode, + State: &state, + } + org_information := datatypes.Container_Product_Order_Attribute_Organization{ + Address: &org_address_information, + OrganizationName: &organizationName, + PhoneNumber: &phoneNumber, + FaxNumber: &faxNumber, + } + TechInfoList := d.Get("technical_contact").(*schema.Set).List() + var tech_addressline1, tech_addressline2, tech_city, tech_countryCode, tech_state, tech_postalCode, tech_organizationName, tech_phoneNumber, tech_faxNumber, tech_emailAddress, tech_firstName, tech_lastName, tech_title string + for _, technicalcont := range TechInfoList { + tech_contact := technicalcont.(map[string]interface{}) + tect_addressList := tech_contact["tech_address"].(*schema.Set).List() + for _, tech_address := range tect_addressList { + tech_addr := tech_address.(map[string]interface{}) + tech_addressline1 = tech_addr["tech_address_line1"].(string) + tech_addressline2 = tech_addr["tech_address_line2"].(string) + tech_city = tech_addr["tech_city"].(string) + tech_countryCode = tech_addr["tech_country_code"].(string) + tech_state = tech_addr["tech_state"].(string) + tech_postalCode = tech_addr["tech_postal_code"].(string) + } + tech_organizationName = tech_contact["tech_organization_name"].(string) + tech_phoneNumber = tech_contact["tech_phone_number"].(string) + tech_faxNumber = tech_contact["tech_fax_number"].(string) + tech_emailAddress = tech_contact["tech_email_address"].(string) + tech_firstName = tech_contact["tech_first_name"].(string) + tech_lastName = tech_contact["tech_last_name"].(string) + tech_title = tech_contact["tech_title"].(string) + } + tech_address_information := datatypes.Container_Product_Order_Attribute_Address{ + AddressLine1: &tech_addressline1, + AddressLine2: &tech_addressline2, + City: &tech_city, + CountryCode: &tech_countryCode, + PostalCode: &tech_postalCode, + State: &tech_state, + } + techAddressFlag := d.Get("technical_contact_same_as_org_address_flag").(bool) + var technical_contact_attr datatypes.Container_Product_Order_Attribute_Contact + if techAddressFlag { + technical_contact_attr = datatypes.Container_Product_Order_Attribute_Contact{ + Address: &org_address_information, + EmailAddress: &tech_emailAddress, + FirstName: &tech_firstName, + LastName: &tech_lastName, + OrganizationName: &tech_organizationName, + PhoneNumber: &tech_phoneNumber, + FaxNumber: &tech_faxNumber, + Title: &tech_title, + } + } else { + technical_contact_attr = datatypes.Container_Product_Order_Attribute_Contact{ + Address: &tech_address_information, + EmailAddress: &tech_emailAddress, + FirstName: &tech_firstName, + LastName: &tech_lastName, + OrganizationName: &tech_organizationName, + PhoneNumber: &tech_phoneNumber, + FaxNumber: &tech_faxNumber, + Title: &tech_title, + } + } + + administrativeContactList := d.Get("administrative_contact").(*schema.Set).List() + var admin_addressline1, admin_addressline2, admin_city, admin_countryCode, admin_state, admin_postalCode, admin_organizationName, admin_phoneNumber, admin_faxNumber, admin_emailAddress, admin_firstName, admin_lastName, admin_title string + for _, administrativecont := range administrativeContactList { + administrative_contact := administrativecont.(map[string]interface{}) + administrative_addressList := administrative_contact["admin_address"].(*schema.Set).List() + for _, admin_address := range administrative_addressList { + admin_addr := admin_address.(map[string]interface{}) + admin_addressline1 = admin_addr["admin_address_line1"].(string) + admin_addressline2 = admin_addr["admin_address_line2"].(string) + admin_city = admin_addr["admin_city"].(string) + admin_countryCode = admin_addr["admin_country_code"].(string) + admin_state = admin_addr["admin_state"].(string) + admin_postalCode = admin_addr["admin_postal_code"].(string) + } + admin_organizationName = administrative_contact["admin_organization_name"].(string) + admin_phoneNumber = administrative_contact["admin_phone_number"].(string) + admin_faxNumber = administrative_contact["admin_fax_number"].(string) + admin_emailAddress = administrative_contact["admin_email_address"].(string) + admin_firstName = administrative_contact["admin_first_name"].(string) + admin_lastName = administrative_contact["admin_last_name"].(string) + admin_title = administrative_contact["admin_title"].(string) + } + administrative_address_information := datatypes.Container_Product_Order_Attribute_Address{ + AddressLine1: &admin_addressline1, + AddressLine2: &admin_addressline2, + City: &admin_city, + CountryCode: &admin_countryCode, + PostalCode: &admin_postalCode, + State: &admin_state, + } + administrativeAddressSameAsOrg := d.Get("administrative_address_same_as_organization_flag").(bool) + var administrative_contact_attr datatypes.Container_Product_Order_Attribute_Contact + if administrativeAddressSameAsOrg { + administrative_contact_attr = datatypes.Container_Product_Order_Attribute_Contact{ + Address: &org_address_information, + EmailAddress: &admin_emailAddress, + FirstName: &admin_firstName, + LastName: &admin_lastName, + OrganizationName: &admin_organizationName, + PhoneNumber: &admin_phoneNumber, + FaxNumber: &admin_faxNumber, + Title: &admin_title, + } + } else { + administrative_contact_attr = datatypes.Container_Product_Order_Attribute_Contact{ + Address: &administrative_address_information, + EmailAddress: &admin_emailAddress, + FirstName: &admin_firstName, + LastName: &admin_lastName, + OrganizationName: &admin_organizationName, + PhoneNumber: &admin_phoneNumber, + FaxNumber: &admin_faxNumber, + Title: &admin_title, + } + } + + billingContactList := d.Get("billing_contact").(*schema.Set).List() + var bill_addressline1, bill_addressline2, bill_city, bill_countryCode, bill_state, bill_postalCode, bill_organizationName, bill_phoneNumber, bill_faxNumber, bill_emailAddress, bill_firstName, bill_lastName, bill_title string + for _, billingcont := range billingContactList { + billing_contact := billingcont.(map[string]interface{}) + billing_addressList := billing_contact["billing_address"].(*schema.Set).List() + for _, billing_address := range billing_addressList { + billing_addr := billing_address.(map[string]interface{}) + bill_addressline1 = billing_addr["billing_address_line1"].(string) + bill_addressline2 = billing_addr["billing_address_line2"].(string) + bill_city = billing_addr["billing_city"].(string) + bill_countryCode = billing_addr["billing_country_code"].(string) + bill_state = billing_addr["billing_state"].(string) + bill_postalCode = billing_addr["billing_postal_code"].(string) + } + bill_organizationName = billing_contact["billing_organization_name"].(string) + bill_phoneNumber = billing_contact["billing_phone_number"].(string) + bill_faxNumber = billing_contact["billing_fax_number"].(string) + bill_emailAddress = billing_contact["billing_email_address"].(string) + bill_firstName = billing_contact["billing_first_name"].(string) + bill_lastName = billing_contact["billing_last_name"].(string) + bill_title = billing_contact["billing_title"].(string) + } + billing_address_information := datatypes.Container_Product_Order_Attribute_Address{ + AddressLine1: &bill_addressline1, + AddressLine2: &bill_addressline2, + City: &bill_city, + CountryCode: &bill_countryCode, + PostalCode: &bill_postalCode, + State: &bill_state, + } + billAddressSameAsOrg := d.Get("billing_address_same_as_organization_flag").(bool) + var billing_contact_attr datatypes.Container_Product_Order_Attribute_Contact + if billAddressSameAsOrg { + billing_contact_attr = datatypes.Container_Product_Order_Attribute_Contact{ + Address: &org_address_information, + EmailAddress: &bill_emailAddress, + FirstName: &bill_firstName, + LastName: &bill_lastName, + OrganizationName: &bill_organizationName, + PhoneNumber: &bill_phoneNumber, + FaxNumber: &bill_faxNumber, + Title: &bill_title, + } + } else { + billing_contact_attr = datatypes.Container_Product_Order_Attribute_Contact{ + Address: &billing_address_information, + EmailAddress: &bill_emailAddress, + FirstName: &bill_firstName, + LastName: &bill_lastName, + OrganizationName: &bill_organizationName, + PhoneNumber: &bill_phoneNumber, + FaxNumber: &bill_faxNumber, + Title: &bill_title, + } + } + + administrativeContactSameAsTechnical := d.Get("administrative_contact_same_as_technical_flag").(bool) + billingContactSameAsTechnical := d.Get("billing_contact_same_as_technical_flag").(bool) + if administrativeContactSameAsTechnical { + administrative_contact_attr = technical_contact_attr + } + if billingContactSameAsTechnical { + billing_contact_attr = technical_contact_attr + } + pkg, err := product.GetPackageByType(sess, packageType) + if err != nil { + return &datatypes.Container_Product_Order_Security_Certificate{}, err + } + + productItems, err := product.GetPackageProducts(sess, *pkg.Id) + if err != nil { + return &datatypes.Container_Product_Order_Security_Certificate{}, err + } + sslKeyName := sslType + + sslItems := []datatypes.Product_Item{} + for _, item := range productItems { + if *item.KeyName == *sslKeyName { + sslItems = append(sslItems, item) + } + } + + if len(sslItems) == 0 { + return &datatypes.Container_Product_Order_Security_Certificate{}, + fmt.Errorf("No product items matching %p could be found", sslKeyName) + } + sslContainer := datatypes.Container_Product_Order_Security_Certificate{ + Container_Product_Order: datatypes.Container_Product_Order{ + PackageId: pkg.Id, + Prices: []datatypes.Product_Item_Price{ + { + Id: sslItems[0].Prices[0].Id, + }, + }, + Quantity: sl.Int(1), + }, + AdministrativeContact: &administrative_contact_attr, + BillingContact: &billing_contact_attr, + CertificateSigningRequest: certificateSigningRequest, + OrderApproverEmailAddress: orderApproverEmailAddress, + OrganizationInformation: &org_information, + RenewalFlag: renewalFlag, + ServerCount: serverCount, + ServerType: serverType, + TechnicalContact: &technical_contact_attr, + ValidityMonths: validityMonths, + } + + return &sslContainer, nil +} + +func findSSLByOrderId(sess *session1.Session, orderId int) (datatypes.Security_Certificate_Request, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{"pending"}, + Target: []string{"complete"}, + Refresh: func() (interface{}, string, error) { + acc := services.GetAccountService(sess) + acc_attr, err := acc.GetAttributes() + acc_id := acc_attr[0].AccountId + ssls, err := services.GetSecurityCertificateRequestService(sess).Filter(filter.Path("securityCertificateRequest.order.id").Eq(strconv.Itoa(orderId)).Build()).Mask("id").GetSslCertificateRequests(acc_id) + if err != nil { + return datatypes.Security_Certificate_Request{}, "", err + } + + if len(ssls) >= 1 { + return ssls[0], "complete", nil + } else { + return datatypes.Security_Certificate_Request{}, "pending", nil + } + }, + Timeout: 10 * time.Minute, + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + pendingResult, err := stateConf.WaitForState() + + if err != nil { + return datatypes.Security_Certificate_Request{}, err + } + + var result, ok = pendingResult.(datatypes.Security_Certificate_Request) + + if ok { + return result, nil + } + + return datatypes.Security_Certificate_Request{}, + fmt.Errorf("Cannot find SSl with order id '%d'", orderId) +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_storage_block.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_storage_block.go new file mode 100644 index 00000000000..525c403e5db --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_storage_block.go @@ -0,0 +1,529 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "regexp" + "strconv" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/helpers/network" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/sl" +) + +func resourceIBMStorageBlock() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMStorageBlockCreate, + Read: resourceIBMStorageBlockRead, + Update: resourceIBMStorageBlockUpdate, + Delete: resourceIBMStorageBlockDelete, + Exists: resourceIBMStorageBlockExists, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(45 * time.Minute), + Update: schema.DefaultTimeout(45 * time.Minute), + Delete: schema.DefaultTimeout(45 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Storage block type", + }, + + "datacenter": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Datacenter name", + }, + + "capacity": { + Type: schema.TypeInt, + Required: true, + Description: "Storage block size", + }, + + "iops": { + Type: schema.TypeFloat, + Required: true, + Description: "IOPS value required", + }, + + "volumename": { + Type: schema.TypeString, + Computed: true, + Description: "Volume name", + }, + + "hostname": { + Type: schema.TypeString, + Computed: true, + Description: "Hostname", + }, + + "lunid": { + Type: schema.TypeString, + Computed: true, + Description: "LUN Id", + }, + + "snapshot_capacity": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: "Snapshot capacity in GB", + }, + + "os_format_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "OS formatr type", + }, + + "allowed_virtual_guest_ids": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeInt}, + Set: func(v interface{}) int { + return v.(int) + }, + Description: "List of allowed virtual guest IDs", + }, + + "notes": { + Type: schema.TypeString, + Optional: true, + Description: "Additional note info", + }, + //TODO in v0.9.0 + "allowed_virtual_guest_info": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeInt, + Computed: true, + }, + "username": { + Type: schema.TypeString, + Computed: true, + }, + "password": { + Type: schema.TypeString, + Computed: true, + }, + "host_iqn": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + Set: func(v interface{}) int { + virtualGuest := v.(map[string]interface{}) + return virtualGuest["id"].(int) + }, + Deprecated: "Please use 'allowed_host_info' instead", + }, + + "allowed_hardware_ids": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeInt}, + Set: func(v interface{}) int { + return v.(int) + }, + Description: "List of allowe hardware IDs", + }, + + //TODO in v0.9.0 + "allowed_hardware_info": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeInt, + Computed: true, + }, + "username": { + Type: schema.TypeString, + Computed: true, + }, + "password": { + Type: schema.TypeString, + Computed: true, + }, + "host_iqn": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + Set: func(v interface{}) int { + baremetal := v.(map[string]interface{}) + return baremetal["id"].(int) + }, + Deprecated: "Please use 'allowed_host_info' instead", + }, + + "allowed_ip_addresses": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "Allowed IP addresses", + }, + "tags": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: "List of tags associated with the resource", + }, + "hourly_billing": { + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + Description: "Billing done hourly, if set to true", + }, + "allowed_host_info": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeInt, + Computed: true, + }, + "username": { + Type: schema.TypeString, + Computed: true, + }, + "password": { + Type: schema.TypeString, + Computed: true, + }, + "host_iqn": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + + "target_address": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "List of target Addresses", + }, + ResourceControllerURL: { + Type: schema.TypeString, + Computed: true, + Description: "The URL of the IBM Cloud dashboard that can be used to explore and view details about this instance", + }, + ResourceName: { + Type: schema.TypeString, + Computed: true, + Description: "The name of the resource", + }, + }, + } +} + +func resourceIBMStorageBlockCreate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + + storageType := d.Get("type").(string) + iops := d.Get("iops").(float64) + datacenter := d.Get("datacenter").(string) + capacity := d.Get("capacity").(int) + snapshotCapacity := d.Get("snapshot_capacity").(int) + osFormatType := d.Get("os_format_type").(string) + osType, err := network.GetOsTypeByName(sess, osFormatType) + hourlyBilling := d.Get("hourly_billing").(bool) + + if err != nil { + return err + } + + storageOrderContainer, err := buildStorageProductOrderContainer(sess, storageType, iops, capacity, snapshotCapacity, blockStorage, datacenter, hourlyBilling) + if err != nil { + return fmt.Errorf("Error while creating storage:%s", err) + } + + log.Println("[INFO] Creating storage") + + var receipt datatypes.Container_Product_Order_Receipt + + switch storageType { + case enduranceType: + receipt, err = services.GetProductOrderService(sess.SetRetries(0)).PlaceOrder( + &datatypes.Container_Product_Order_Network_Storage_AsAService{ + Container_Product_Order: storageOrderContainer, + OsFormatType: &datatypes.Network_Storage_Iscsi_OS_Type{ + Id: osType.Id, + KeyName: osType.KeyName, + }, + VolumeSize: &capacity, + }, sl.Bool(false)) + case performanceType: + receipt, err = services.GetProductOrderService(sess.SetRetries(0)).PlaceOrder( + &datatypes.Container_Product_Order_Network_Storage_AsAService{ + Container_Product_Order: storageOrderContainer, + OsFormatType: &datatypes.Network_Storage_Iscsi_OS_Type{ + Id: osType.Id, + KeyName: osType.KeyName, + }, + Iops: sl.Int(int(iops)), + VolumeSize: &capacity, + }, sl.Bool(false)) + default: + return fmt.Errorf("Error during creation of storage: Invalid storageType %s", storageType) + } + + if err != nil { + return fmt.Errorf("Error during creation of storage: %s", err) + } + + // Find the storage device + blockStorage, err := findStorageByOrderId(sess, *receipt.OrderId, d.Timeout(schema.TimeoutCreate)) + + if err != nil { + return fmt.Errorf("Error during creation of storage: %s", err) + } + d.SetId(fmt.Sprintf("%d", *blockStorage.Id)) + + // Wait for storage availability + _, err = WaitForStorageAvailable(d, meta) + + if err != nil { + return fmt.Errorf( + "Error waiting for storage (%s) to become ready: %s", d.Id(), err) + } + + // SoftLayer changes the device ID after completion of provisioning. It is necessary to refresh device ID. + blockStorage, err = findStorageByOrderId(sess, *receipt.OrderId, d.Timeout(schema.TimeoutCreate)) + + if err != nil { + return fmt.Errorf("Error during creation of storage: %s", err) + } + d.SetId(fmt.Sprintf("%d", *blockStorage.Id)) + + log.Printf("[INFO] Storage ID: %s", d.Id()) + + return resourceIBMStorageBlockUpdate(d, meta) +} + +func resourceIBMStorageBlockRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + storageId, _ := strconv.Atoi(d.Id()) + + storage, err := services.GetNetworkStorageService(sess). + Id(storageId). + Mask(storageDetailMask). + GetObject() + + if err != nil { + return fmt.Errorf("Error retrieving storage information: %s", err) + } + + storageType := strings.Fields(*storage.StorageType.Description)[0] + + // Calculate IOPS + iops, err := getIops(storage, storageType) + if err != nil { + return fmt.Errorf("Error retrieving storage information: %s", err) + } + + d.Set("type", storageType) + d.Set("capacity", *storage.CapacityGb) + d.Set("volumename", *storage.Username) + d.Set("hostname", *storage.ServiceResourceBackendIpAddress) + d.Set("lunid", *storage.LunId) + d.Set("iops", iops) + if storage.SnapshotCapacityGb != nil { + snapshotCapacity, _ := strconv.Atoi(*storage.SnapshotCapacityGb) + d.Set("snapshot_capacity", snapshotCapacity) + } + + // Parse data center short name from ServiceResourceName. For example, + // if SoftLayer API returns "'serviceResourceName': 'PerfStor Aggr aggr_staasdal0601_p01'", + // the data center short name is "dal06". + r, _ := regexp.Compile("[a-zA-Z]{3}[0-9]{2}") + d.Set("datacenter", r.FindString(*storage.ServiceResourceName)) + + allowedHostInfoList := make([]map[string]interface{}, 0) + + // Read allowed_ip_addresses + allowedIpaddressesList := make([]string, 0, len(storage.AllowedIpAddresses)) + for _, allowedIpaddress := range storage.AllowedIpAddresses { + singleHost := make(map[string]interface{}) + singleHost["id"] = *allowedIpaddress.SubnetId + singleHost["username"] = *allowedIpaddress.AllowedHost.Credential.Username + singleHost["password"] = *allowedIpaddress.AllowedHost.Credential.Password + singleHost["host_iqn"] = *allowedIpaddress.AllowedHost.Name + allowedHostInfoList = append(allowedHostInfoList, singleHost) + allowedIpaddressesList = append(allowedIpaddressesList, *allowedIpaddress.IpAddress) + } + d.Set("allowed_ip_addresses", allowedIpaddressesList) + + // Read allowed_virtual_guest_ids and allowed_host_info + allowedVirtualGuestInfoList := make([]map[string]interface{}, 0) + allowedVirtualGuestIdsList := make([]int, 0, len(storage.AllowedVirtualGuests)) + + for _, allowedVirtualGuest := range storage.AllowedVirtualGuests { + singleVirtualGuest := make(map[string]interface{}) + singleVirtualGuest["id"] = *allowedVirtualGuest.Id + singleVirtualGuest["username"] = *allowedVirtualGuest.AllowedHost.Credential.Username + singleVirtualGuest["password"] = *allowedVirtualGuest.AllowedHost.Credential.Password + singleVirtualGuest["host_iqn"] = *allowedVirtualGuest.AllowedHost.Name + allowedHostInfoList = append(allowedHostInfoList, singleVirtualGuest) + allowedVirtualGuestInfoList = append(allowedVirtualGuestInfoList, singleVirtualGuest) + allowedVirtualGuestIdsList = append(allowedVirtualGuestIdsList, *allowedVirtualGuest.Id) + } + d.Set("allowed_virtual_guest_ids", allowedVirtualGuestIdsList) + d.Set("allowed_virtual_guest_info", allowedVirtualGuestInfoList) + + // Read allowed_hardware_ids and allowed_host_info + allowedHardwareInfoList := make([]map[string]interface{}, 0) + allowedHardwareIdsList := make([]int, 0, len(storage.AllowedHardware)) + for _, allowedHW := range storage.AllowedHardware { + singleHardware := make(map[string]interface{}) + singleHardware["id"] = *allowedHW.Id + singleHardware["username"] = *allowedHW.AllowedHost.Credential.Username + singleHardware["password"] = *allowedHW.AllowedHost.Credential.Password + singleHardware["host_iqn"] = *allowedHW.AllowedHost.Name + allowedHostInfoList = append(allowedHostInfoList, singleHardware) + allowedHardwareInfoList = append(allowedHardwareInfoList, singleHardware) + allowedHardwareIdsList = append(allowedHardwareIdsList, *allowedHW.Id) + } + d.Set("allowed_hardware_ids", allowedHardwareIdsList) + d.Set("allowed_hardware_info", allowedHardwareInfoList) + d.Set("allowed_host_info", allowedHostInfoList) + + if storage.OsType != nil { + d.Set("os_format_type", *storage.OsType.Name) + } + + if storage.Notes != nil { + d.Set("notes", *storage.Notes) + } + + if storage.BillingItem != nil { + d.Set("hourly_billing", storage.BillingItem.HourlyFlag) + } + + d.Set("target_address", storage.IscsiTargetIpAddresses) + d.Set(ResourceControllerURL, fmt.Sprintf("https://cloud.ibm.com/classic/storage/block/%s", d.Id())) + d.Set(ResourceName, *storage.ServiceResourceName) + + return nil +} + +func resourceIBMStorageBlockUpdate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + id, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + storage, err := services.GetNetworkStorageService(sess). + Id(id). + Mask(storageDetailMask). + GetObject() + + if err != nil { + return fmt.Errorf("Error updating storage information: %s", err) + } + + // Update allowed_ip_addresses + if d.HasChange("allowed_ip_addresses") { + err := updateAllowedIpAddresses(d, sess, storage) + if err != nil { + return fmt.Errorf("Error updating storage information: %s", err) + } + } + + // Update allowed_subnets + if d.HasChange("allowed_subnets") { + err := updateAllowedSubnets(d, sess, storage) + if err != nil { + return fmt.Errorf("Error updating storage information: %s", err) + } + } + + // Update allowed_virtual_guest_ids + if d.HasChange("allowed_virtual_guest_ids") { + err := updateAllowedVirtualGuestIds(d, sess, storage) + if err != nil { + return fmt.Errorf("Error updating storage information: %s", err) + } + } + + // Update allowed_hardware_ids + if d.HasChange("allowed_hardware_ids") { + err := updateAllowedHardwareIds(d, sess, storage) + if err != nil { + return fmt.Errorf("Error updating storage information: %s", err) + } + } + + // Update notes + if d.HasChange("notes") { + err := updateNotes(d, sess, storage) + if err != nil { + return fmt.Errorf("Error updating storage information: %s", err) + } + } + + if (d.HasChange("capacity") || d.HasChange("iops")) && !d.IsNewResource() { + size := d.Get("capacity").(int) + iops := d.Get("iops").(float64) + + modifyOrder, err := prepareModifyOrder(sess, storage, iops, size) + if err != nil { + return fmt.Errorf("Error updating storage: %s", err) + } + + _, err = services.GetProductOrderService(sess.SetRetries(0)).PlaceOrder( + &datatypes.Container_Product_Order_Network_Storage_AsAService_Upgrade{ + Container_Product_Order_Network_Storage_AsAService: modifyOrder, + Volume: &datatypes.Network_Storage{ + Id: sl.Int(id), + }, + }, sl.Bool(false)) + // Wait for storage availability + _, err = WaitForStorageUpdate(d, meta) + + if err != nil { + return fmt.Errorf( + "Error waiting for storage (%s) to update: %s", d.Id(), err) + } + } + + return resourceIBMStorageBlockRead(d, meta) +} + +func resourceIBMStorageBlockDelete(d *schema.ResourceData, meta interface{}) error { + return resourceIBMStorageFileDelete(d, meta) +} + +func resourceIBMStorageBlockExists(d *schema.ResourceData, meta interface{}) (bool, error) { + return resourceIBMStorageFileExists(d, meta) +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_storage_evault.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_storage_evault.go new file mode 100644 index 00000000000..d9c5b460cd5 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_storage_evault.go @@ -0,0 +1,452 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "strconv" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/filter" + "github.com/softlayer/softlayer-go/helpers/location" + "github.com/softlayer/softlayer-go/helpers/product" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +func resourceIBMStorageEvault() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMStorageEvaultCreate, + Read: resourceIBMStorageEvaultRead, + Update: resourceIBMStorageEvaultUpdate, + Delete: resourceIBMStorageEvaultDelete, + Exists: resourceIBMStorageEvaultExists, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "datacenter": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Datacenter name", + }, + "capacity": { + Type: schema.TypeInt, + Required: true, + Description: "Capacity", + }, + "virtual_instance_id": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"hardware_instance_id"}, + Description: "Virtual instance ID", + }, + "hardware_instance_id": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"virtual_instance_id"}, + Description: "Hardware instance ID", + }, + "username": { + Type: schema.TypeString, + Computed: true, + Description: "user name", + }, + "password": { + Type: schema.TypeString, + Computed: true, + Sensitive: true, + Description: "password", + }, + "service_resource_name": { + Type: schema.TypeString, + Computed: true, + Description: "service resource name", + }, + "tags": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: "Tags set for the resource", + }, + }, + } +} + +const ( + evaultPackageFilter = `{"keyName":{"operation":"ADDITIONAL_PRODUCTS"}}` + evaultStorageMask = "id,billingItem.orderItem.order.id" +) + +func resourceIBMStorageEvaultCreate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + + // Find price items + productOrderContainer, err := buildEvaultProductOrderContainer(d, sess) + if err != nil { + return fmt.Errorf("Error creating evault: %s", err) + } + + log.Println("[INFO] Creating Evault") + + receipt, err := services.GetProductOrderService(sess.SetRetries(0)). + PlaceOrder(productOrderContainer, sl.Bool(false)) + if err != nil { + return fmt.Errorf("Error during creation of evault: %s", err) + } + evaultStorage, err := findEvaultStorageByOrderID(d, meta, *receipt.OrderId) + + if err != nil { + return fmt.Errorf("Error during creation of storage: %s", err) + } + + d.SetId(fmt.Sprintf("%d", *evaultStorage.Id)) + + // Wait for storage availability + _, err = WaitForEvaultAvailable(d, meta, schema.TimeoutCreate) + + if err != nil { + return fmt.Errorf( + "Error waiting for evault (%s) to become ready: %s", d.Id(), err) + } + + // SoftLayer changes the device ID after completion of provisioning. It is necessary to refresh device ID. + evaultStorage, err = findEvaultStorageByOrderID(d, meta, *receipt.OrderId) + + if err != nil { + return fmt.Errorf("Error during creation of storage: %s", err) + } + d.SetId(fmt.Sprintf("%d", *evaultStorage.Id)) + + log.Printf("[INFO] Storage ID: %s", d.Id()) + + return resourceIBMStorageEvaultRead(d, meta) +} + +func buildEvaultProductOrderContainer(d *schema.ResourceData, sess *session.Session) ( + *datatypes.Container_Product_Order, error) { + datacenter := d.Get("datacenter").(string) + capacity := d.Get("capacity").(int) + + var virtualID, hardwareID int + if vID, ok := d.GetOk("virtual_instance_id"); ok { + virtualID = vID.(int) + } + + if hID, ok := d.GetOk("hardware_instance_id"); ok { + hardwareID = hID.(int) + } + + if virtualID == 0 && hardwareID == 0 { + return &datatypes.Container_Product_Order{}, fmt.Errorf("Provide either `virtual_instance_id` or `hardware_instance_id`") + } + + /*pkg, err := product.GetPackageByType(sess, "ADDITIONAL_PRODUCTS") + if err != nil { + return nil, err + }*/ + productpackageservice, _ := services.GetProductPackageService(sess).Filter(evaultPackageFilter).Mask(`id`).GetAllObjects() + var productid int + for _, packageid := range productpackageservice { + productid = *packageid.Id + } + + // Lookup the data center ID + dc, err := location.GetDatacenterByName(sess, datacenter) + if err != nil { + return &datatypes.Container_Product_Order{}, + fmt.Errorf("No data centers matching %s could be found", datacenter) + } + + locationservice := services.GetLocationService(sess) + + //3. get the pricegroups that the datacenter belongs to + priceidds, _ := locationservice.Id(*dc.Id).GetPriceGroups() + + var listofpriceids []int + + //store all the pricegroups a datacenter belongs to + for _, priceidd := range priceidds { + listofpriceids = append(listofpriceids, *priceidd.Id) + } + + description := strconv.Itoa(capacity) + "GB IBM Cloud Backup" + + priceItems := []datatypes.Product_Item_Price{} + actualpriceid, err := product.GetPriceIDByPackageIdandLocationGroups(sess, listofpriceids, 0, description) + if err != nil || actualpriceid == 0 { + return &datatypes.Container_Product_Order{}, fmt.Errorf("The evault with the given capacity is not available for the datacenter you have selected. Please enter a different capacity : %s", err) + } + priceItem := datatypes.Product_Item_Price{ + Id: &actualpriceid, + } + priceItems = append(priceItems, priceItem) + + order := datatypes.Container_Product_Order{ + ComplexType: sl.String("SoftLayer_Container_Product_Order_Network_Storage_Backup_Evault_Vault"), + PackageId: &productid, + Prices: priceItems, + Location: sl.String(strconv.Itoa(*dc.Id)), + } + + if virtualID > 0 { + var guest datatypes.Virtual_Guest + guest.Id = sl.Int(virtualID) + order.VirtualGuests = []datatypes.Virtual_Guest{ + guest, + } + + } else { + var hardware datatypes.Hardware + hardware.Id = sl.Int(hardwareID) + order.Hardware = []datatypes.Hardware{ + hardware, + } + + } + + return &order, nil +} + +func resourceIBMStorageEvaultRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + + evaultID, _ := strconv.Atoi(d.Id()) + + evault, err := services.GetNetworkStorageBackupEvaultService(sess). + Id(evaultID).Mask("billingItem[location[name]]"). + GetObject() + + if err != nil { + return fmt.Errorf("Error retrieving evault information: %s", err) + } + + d.Set("capacity", evault.CapacityGb) + d.Set("datacenter", evault.BillingItem.Location.Name) + + if evault.GuestId != nil { + d.Set("virtual_instance_id", evault.GuestId) + } + + if evault.HardwareId != nil { + d.Set("hardware_instance_id", evault.HardwareId) + } + + d.Set("username", evault.Username) + d.Set("password", evault.Password) + d.Set("service_resource_name", evault.ServiceResourceName) + + return nil +} + +func resourceIBMStorageEvaultUpdate(d *schema.ResourceData, meta interface{}) error { + + if d.HasChange("capacity") && !d.IsNewResource() { + sess := meta.(ClientSession).SoftLayerSession() + + evaultID, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + priceID, err := getEvaultUpgradePriceItem(d, sess) + if err != nil { + return err + } + + _, err = services.GetNetworkStorageBackupEvaultService(sess). + Id(evaultID).UpgradeVolumeCapacity(sl.Int(priceID)) + + if err != nil { + return err + } + + // Wait for storage availability + _, err = WaitForEvaultAvailable(d, meta, schema.TimeoutUpdate) + + if err != nil { + return fmt.Errorf( + "Error waiting for evault upgrade (%s) to become ready: %s", d.Id(), err) + } + + return resourceIBMStorageEvaultRead(d, meta) + } + return nil + +} + +func resourceIBMStorageEvaultDelete(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + evaultService := services.GetNetworkStorageBackupEvaultService(sess) + evaultID, _ := strconv.Atoi(d.Id()) + + // Get billing item associated with the storage + billingItem, err := evaultService.Id(evaultID).GetBillingItem() + + if err != nil { + return fmt.Errorf("Error while looking up billing item associated with the evault: %s", err) + } + + if billingItem.Id == nil { + return fmt.Errorf("Error while looking up billing item associated with the evault: No billing item for ID:%d", evaultID) + } + + success, err := services.GetBillingItemService(sess).Id(*billingItem.Id).CancelService() + if err != nil { + return err + } + + if !success { + return fmt.Errorf("SoftLayer reported an unsuccessful cancellation") + } + return nil +} + +func findEvaultStorageByOrderID(d *schema.ResourceData, meta interface{}, orderId int) (datatypes.Network_Storage, error) { + filterPath := "evaultNetworkStorage.billingItem.orderItem.order.id" + sess := meta.(ClientSession).SoftLayerSession() + + stateConf := &resource.StateChangeConf{ + Pending: []string{"pending"}, + Target: []string{"complete"}, + Refresh: func() (interface{}, string, error) { + storages, err := services.GetAccountService(sess). + Filter(filter.Build( + filter.Path(filterPath). + Eq(strconv.Itoa(orderId)))). + Mask(storageMask). + GetEvaultNetworkStorage() + if err != nil { + return datatypes.Network_Storage{}, "", err + } + + if len(storages) == 1 { + return storages[0], "complete", nil + } else if len(storages) == 0 { + return datatypes.Network_Storage{}, "pending", nil + } else { + return nil, "", fmt.Errorf("Expected one evault: %s", err) + } + + }, + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + NotFoundChecks: 300, + } + + pendingResult, err := stateConf.WaitForState() + + if err != nil { + return datatypes.Network_Storage{}, err + } + + var result, ok = pendingResult.(datatypes.Network_Storage) + + if ok { + return result, nil + } + + return datatypes.Network_Storage{}, + fmt.Errorf("Cannot find evault with order id '%d'", orderId) +} + +// Waits for storage provisioning +func WaitForEvaultAvailable(d *schema.ResourceData, meta interface{}, timeout string) (interface{}, error) { + log.Printf("Waiting for evault (%s) to be available.", d.Id()) + id, err := strconv.Atoi(d.Id()) + if err != nil { + return nil, fmt.Errorf("The evault ID %s must be numeric", d.Id()) + } + sess := meta.(ClientSession).SoftLayerSession() + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", "provisioning"}, + Target: []string{"available"}, + Refresh: func() (interface{}, string, error) { + // Check active transactions + service := services.GetNetworkStorageBackupEvaultService(sess) + result, err := service.Id(id).Mask("activeTransactionCount").GetObject() + if err != nil { + if apiErr, ok := err.(sl.Error); ok && apiErr.StatusCode == 404 { + return nil, "", fmt.Errorf("Error retrieving evault: %s", err) + } + return false, "retry", nil + } + + log.Println("Checking active transactions.") + if *result.ActiveTransactionCount > 0 { + return result, "provisioning", nil + } + + return result, "available", nil + }, + Timeout: d.Timeout(timeout), + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func resourceIBMStorageEvaultExists(d *schema.ResourceData, meta interface{}) (bool, error) { + sess := meta.(ClientSession).SoftLayerSession() + + evaultID, err := strconv.Atoi(d.Id()) + if err != nil { + return false, fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + _, err = services.GetNetworkStorageBackupEvaultService(sess). + Id(evaultID). + GetObject() + + if err != nil { + if apiErr, ok := err.(sl.Error); ok && apiErr.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error retrieving evault information: %s", err) + } + return true, nil +} + +func getEvaultUpgradePriceItem(d *schema.ResourceData, sess *session.Session) (int, error) { + evaultID, _ := strconv.Atoi(d.Id()) + + evault, err := services.GetNetworkStorageBackupEvaultService(sess). + Id(evaultID).Mask("id, billingItem[id,upgradeItems[prices]]"). + GetObject() + + if err != nil { + return 0, fmt.Errorf("Error retrieving evault information: %s", err) + } + + capacity := d.Get("capacity") + + len := len(evault.BillingItem.UpgradeItems) + validCapacities := make([]int, len) + + for i, item := range evault.BillingItem.UpgradeItems { + if int(*item.Capacity) == capacity.(int) { + return *item.Id, nil + } + + validCapacities[i] = int(*item.Capacity) + } + + return 0, fmt.Errorf("The given capacity is not a valid upgrade value. Valid capacity upgrades are: %d", validCapacities) + +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_storage_file.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_storage_file.go new file mode 100644 index 00000000000..0674481c787 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_storage_file.go @@ -0,0 +1,1559 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "bytes" + "fmt" + "log" + "regexp" + "strconv" + "strings" + "time" + + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/internal/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/filter" + "github.com/softlayer/softlayer-go/helpers/location" + "github.com/softlayer/softlayer-go/helpers/product" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +const ( + storagePackageType = "STORAGE_AS_A_SERVICE" + storageMask = "id,billingItem.orderItem.order.id" + storageDetailMask = "id,billingItem[location],storageTierLevel,provisionedIops,capacityGb,iops,lunId,storageType[keyName,description],username,serviceResourceBackendIpAddress,properties[type]" + + ",serviceResourceName,allowedIpAddresses[id,ipAddress,subnetId,allowedHost[name,credential[username,password]]],allowedSubnets[allowedHost[name,credential[username,password]]],allowedHardware[allowedHost[name,credential[username,password]]],allowedVirtualGuests[id,allowedHost[name,credential[username,password]]],snapshotCapacityGb,osType,notes,billingItem[hourlyFlag],serviceResource[datacenter[name]],schedules[dayOfWeek,hour,minute,retentionCount,type[keyname,name]],iscsiTargetIpAddresses" + itemMask = "id,capacity,description,units,keyName,capacityMinimum,capacityMaximum,prices[id,categories[id,name,categoryCode],capacityRestrictionMinimum,capacityRestrictionMaximum,capacityRestrictionType,locationGroupId],itemCategory[categoryCode]" + enduranceType = "Endurance" + performanceType = "Performance" + fileStorage = "file" + blockStorage = "block" + retryTime = 5 +) + +var ( + // Map IOPS value to endurance storage tier keyName in SoftLayer_Product_Item + enduranceIopsMap = map[float64]string{ + 0.25: "LOW_INTENSITY_TIER", + 2: "READHEAVY_TIER", + 4: "WRITEHEAVY_TIER", + 10: "10_IOPS_PER_GB", + } + + // Map IOPS value to endurance storage tier capacityRestrictionMaximum/capacityRestrictionMinimum in SoftLayer_Product_Item + enduranceCapacityRestrictionMap = map[float64]int{ + 0.25: 100, + 2: 200, + 4: 300, + 10: 1000, + } + + snapshotDay = map[string]string{ + "0": "SUNDAY", + "1": "MONDAY", + "2": "TUESDAY", + "3": "WEDNESDAY", + "4": "THURSDAY", + "5": "FRIDAY", + "6": "SATURDAY", + } +) + +func resourceIBMStorageFile() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMStorageFileCreate, + Read: resourceIBMStorageFileRead, + Update: resourceIBMStorageFileUpdate, + Delete: resourceIBMStorageFileDelete, + Exists: resourceIBMStorageFileExists, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(45 * time.Minute), + Update: schema.DefaultTimeout(45 * time.Minute), + Delete: schema.DefaultTimeout(45 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateStorageType, + Description: "Storage type", + }, + + "datacenter": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Datacenter name", + }, + + "capacity": { + Type: schema.TypeInt, + Required: true, + Description: "Storage capacity", + }, + + "iops": { + Type: schema.TypeFloat, + Required: true, + Description: "iops rate", + }, + + "volumename": { + Type: schema.TypeString, + Computed: true, + Description: "Storage volume name", + }, + + "hostname": { + Type: schema.TypeString, + Computed: true, + Description: "Hostname", + }, + + "snapshot_capacity": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: "Snapshot capacity", + }, + + "allowed_virtual_guest_ids": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeInt}, + Set: func(v interface{}) int { + return v.(int) + }, + Description: "Virtual guest ID", + }, + + "allowed_hardware_ids": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeInt}, + Set: func(v interface{}) int { + return v.(int) + }, + Description: "Hardaware ID", + }, + + "allowed_subnets": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "Allowed network subnets", + }, + + "allowed_ip_addresses": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "Allowed range of IP addresses", + }, + + "notes": { + Type: schema.TypeString, + Optional: true, + Description: "Notes", + }, + + "snapshot_schedule": { + Type: schema.TypeSet, + Optional: true, + MaxItems: 3, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "schedule_type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateScheduleType, + Description: "schedule type", + }, + + "retention_count": { + Type: schema.TypeInt, + Required: true, + Description: "Retention count", + }, + + "minute": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validateMinute(0, 59), + Description: "Time duration in minutes", + }, + + "hour": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validateHour(0, 23), + Description: "Time duration in hour", + }, + + "day_of_week": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateDayOfWeek, + Description: "Day of the week", + }, + + "enable": { + Type: schema.TypeBool, + Optional: true, + }, + }, + }, + Set: resourceIBMFilSnapshotHash, + }, + "mountpoint": { + Type: schema.TypeString, + Computed: true, + Description: "Storage mount point", + }, + "tags": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: "Tags set for the storage volume", + }, + "hourly_billing": { + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + Description: "Hourly based billing type", + }, + ResourceControllerURL: { + Type: schema.TypeString, + Computed: true, + Description: "The URL of the IBM Cloud dashboard that can be used to explore and view details about this instance", + }, + ResourceName: { + Type: schema.TypeString, + Computed: true, + Description: "The name of the resource", + }, + ResourceStatus: { + Type: schema.TypeString, + Computed: true, + Description: "The status of the resource", + }, + }, + } +} + +func resourceIBMStorageFileCreate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + + storageType := d.Get("type").(string) + iops := d.Get("iops").(float64) + datacenter := d.Get("datacenter").(string) + capacity := d.Get("capacity").(int) + snapshotCapacity := d.Get("snapshot_capacity").(int) + hourlyBilling := d.Get("hourly_billing").(bool) + + var ( + storageOrderContainer datatypes.Container_Product_Order + err error + ) + + storageOrderContainer, err = buildStorageProductOrderContainer(sess, storageType, iops, capacity, snapshotCapacity, fileStorage, datacenter, hourlyBilling) + if err != nil { + return fmt.Errorf("Error while creating storage:%s", err) + } + + log.Println("[INFO] Creating storage") + + var receipt datatypes.Container_Product_Order_Receipt + + switch storageType { + case enduranceType: + receipt, err = services.GetProductOrderService(sess.SetRetries(0)).PlaceOrder( + &datatypes.Container_Product_Order_Network_Storage_AsAService{ + Container_Product_Order: storageOrderContainer, + VolumeSize: &capacity, + }, sl.Bool(false)) + case performanceType: + receipt, err = services.GetProductOrderService(sess.SetRetries(0)).PlaceOrder( + &datatypes.Container_Product_Order_Network_Storage_AsAService{ + Container_Product_Order: storageOrderContainer, + VolumeSize: &capacity, + Iops: sl.Int(int(iops)), + }, sl.Bool(false)) + + default: + return fmt.Errorf("Error during creation of storage: Invalid storageType %s", storageType) + } + + if err != nil { + return fmt.Errorf("Error during creation of storage: %s", err) + } + + // Find the storage device + fileStorage, err := findStorageByOrderId(sess, *receipt.OrderId, d.Timeout(schema.TimeoutCreate)) + + if err != nil { + return fmt.Errorf("Error during creation of storage: %s", err) + } + d.SetId(fmt.Sprintf("%d", *fileStorage.Id)) + + // Wait for storage availability + _, err = WaitForStorageAvailable(d, meta) + + if err != nil { + return fmt.Errorf( + "Error waiting for storage (%s) to become ready: %s", d.Id(), err) + } + + // SoftLayer changes the device ID after completion of provisioning. It is necessary to refresh device ID. + fileStorage, err = findStorageByOrderId(sess, *receipt.OrderId, d.Timeout(schema.TimeoutCreate)) + + if err != nil { + return fmt.Errorf("Error during creation of storage: %s", err) + } + d.SetId(fmt.Sprintf("%d", *fileStorage.Id)) + + log.Printf("[INFO] Storage ID: %s", d.Id()) + + return resourceIBMStorageFileUpdate(d, meta) +} + +func resourceIBMStorageFileRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + + storageId, _ := strconv.Atoi(d.Id()) + + storage, err := services.GetNetworkStorageService(sess). + Id(storageId). + Mask(storageDetailMask + ",volumeStatus"). + GetObject() + + if err != nil { + return fmt.Errorf("Error retrieving storage information: %s", err) + } + + storageType, err := getStorageTypeFromKeyName(*storage.StorageType.KeyName) + if err != nil { + return fmt.Errorf("Error retrieving storage information: %s", err) + } + + // Calculate IOPS + iops, err := getIops(storage, storageType) + if err != nil { + return fmt.Errorf("Error retrieving storage information: %s", err) + } + d.Set("iops", iops) + + d.Set("type", storageType) + d.Set("capacity", *storage.CapacityGb) + d.Set("volumename", *storage.Username) + d.Set("hostname", *storage.ServiceResourceBackendIpAddress) + + if storage.SnapshotCapacityGb != nil { + snapshotCapacity, _ := strconv.Atoi(*storage.SnapshotCapacityGb) + d.Set("snapshot_capacity", snapshotCapacity) + } + + // Parse data center short name from ServiceResourceName. For example, + // if SoftLayer API returns "'serviceResourceName': 'PerfStor Aggr aggr_staasdal0601_p01'", + // the data center short name is "dal06". + r, _ := regexp.Compile("[a-zA-Z]{3}[0-9]{2}") + d.Set("datacenter", strings.ToLower(r.FindString(*storage.ServiceResourceName))) + // Read allowed_ip_addresses + allowedIpaddressesList := make([]string, 0, len(storage.AllowedIpAddresses)) + for _, allowedIpaddress := range storage.AllowedIpAddresses { + allowedIpaddressesList = append(allowedIpaddressesList, *allowedIpaddress.IpAddress) + } + d.Set("allowed_ip_addresses", allowedIpaddressesList) + + // Read allowed_subnets + allowedSubnetsList := make([]string, 0, len(storage.AllowedSubnets)) + for _, allowedSubnets := range storage.AllowedSubnets { + allowedSubnetsList = append(allowedSubnetsList, *allowedSubnets.NetworkIdentifier+"/"+strconv.Itoa(*allowedSubnets.Cidr)) + } + d.Set("allowed_subnets", allowedSubnetsList) + + // Read allowed_virtual_guest_ids + allowedVirtualGuestIdsList := make([]int, 0, len(storage.AllowedVirtualGuests)) + for _, allowedVirtualGuest := range storage.AllowedVirtualGuests { + allowedVirtualGuestIdsList = append(allowedVirtualGuestIdsList, *allowedVirtualGuest.Id) + } + d.Set("allowed_virtual_guest_ids", allowedVirtualGuestIdsList) + + // Read allowed_hardware_ids + allowedHardwareIdsList := make([]int, 0, len(storage.AllowedHardware)) + for _, allowedHW := range storage.AllowedHardware { + allowedHardwareIdsList = append(allowedHardwareIdsList, *allowedHW.Id) + } + d.Set("allowed_hardware_ids", allowedHardwareIdsList) + + if storage.OsType != nil { + d.Set("os_type", *storage.OsType.Name) + } + + if storage.Notes != nil { + d.Set("notes", *storage.Notes) + } + + mountpoint, err := services.GetNetworkStorageService(sess).Id(storageId).GetFileNetworkMountAddress() + if err != nil { + return fmt.Errorf("Error retrieving storage information: %s", err) + } + d.Set("mountpoint", mountpoint) + + if storage.BillingItem != nil { + d.Set("hourly_billing", storage.BillingItem.HourlyFlag) + } + + schds := make([]interface{}, len(storage.Schedules)) + for i, schd := range storage.Schedules { + s := make(map[string]interface{}) + s["retention_count"], _ = strconv.Atoi(*schd.RetentionCount) + if *schd.Minute != "-1" { + + s["minute"], _ = strconv.Atoi(*schd.Minute) + } + if *schd.Hour != "-1" { + s["hour"], _ = strconv.Atoi(*schd.Hour) + } + if *schd.Active > 0 { + s["enable"], _ = strconv.ParseBool("true") + } else { + s["enable"], _ = strconv.ParseBool("false") + } + + if *schd.DayOfWeek != "-1" { + s["day_of_week"] = snapshotDay[*schd.DayOfWeek] + } + + stype := *schd.Type.Keyname + stype = stype[strings.LastIndex(stype, "_")+1:] + s["schedule_type"] = stype + schds[i] = s + } + d.Set("snapshot_schedule", schds) + d.Set(ResourceControllerURL, fmt.Sprintf("https://cloud.ibm.com/classic/storage/file/%s", d.Id())) + + d.Set(ResourceName, *storage.ServiceResourceName) + + d.Set(ResourceStatus, *storage.VolumeStatus) + + return nil +} + +func resourceIBMStorageFileUpdate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + id, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + storage, err := services.GetNetworkStorageService(sess). + Id(id). + Mask(storageDetailMask). + GetObject() + + if err != nil { + return fmt.Errorf("Error updating storage information: %s", err) + } + + // Update allowed_ip_addresses + if d.HasChange("allowed_ip_addresses") { + err := updateAllowedIpAddresses(d, sess, storage) + if err != nil { + return fmt.Errorf("Error updating storage information: %s", err) + } + } + + // Update allowed_subnets + if d.HasChange("allowed_subnets") { + err := updateAllowedSubnets(d, sess, storage) + if err != nil { + return fmt.Errorf("Error updating storage information: %s", err) + } + } + + // Update allowed_virtual_guest_ids + if d.HasChange("allowed_virtual_guest_ids") { + err := updateAllowedVirtualGuestIds(d, sess, storage) + if err != nil { + return fmt.Errorf("Error updating storage information: %s", err) + } + } + + // Update allowed_hardware_ids + if d.HasChange("allowed_hardware_ids") { + err := updateAllowedHardwareIds(d, sess, storage) + if err != nil { + return fmt.Errorf("Error updating storage information: %s", err) + } + } + + // Update notes + if d.HasChange("notes") { + err := updateNotes(d, sess, storage) + if err != nil { + return fmt.Errorf("Error updating storage information: %s", err) + } + } + + // Enable Storage Snapshot Schedule + if d.HasChange("snapshot_schedule") { + err := enableStorageSnapshot(d, sess, storage) + if err != nil { + return fmt.Errorf("Error creating storage snapshot schedule: %s", err) + } + } + + if (d.HasChange("capacity") || d.HasChange("iops")) && !d.IsNewResource() { + size := d.Get("capacity").(int) + iops := d.Get("iops").(float64) + + modifyOrder, err := prepareModifyOrder(sess, storage, iops, size) + if err != nil { + return fmt.Errorf("Error updating storage: %s", err) + } + + _, err = services.GetProductOrderService(sess.SetRetries(0)).PlaceOrder( + &datatypes.Container_Product_Order_Network_Storage_AsAService_Upgrade{ + Container_Product_Order_Network_Storage_AsAService: modifyOrder, + Volume: &datatypes.Network_Storage{ + Id: sl.Int(id), + }, + }, sl.Bool(false)) + // Wait for storage availability + _, err = WaitForStorageUpdate(d, meta) + + if err != nil { + return fmt.Errorf( + "Error waiting for storage (%s) to update: %s", d.Id(), err) + } + } + + return resourceIBMStorageFileRead(d, meta) +} + +func resourceIBMStorageFileDelete(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + storageService := services.GetNetworkStorageService(sess) + storageID, _ := strconv.Atoi(d.Id()) + + // Get billing item associated with the storage + billingItem, err := storageService.Id(storageID).GetBillingItem() + + if err != nil { + return fmt.Errorf("Error while looking up billing item associated with the storage: %s", err) + } + + if billingItem.Id == nil { + return fmt.Errorf("Error while looking up billing item associated with the storage: No billing item for ID:%d", storageID) + } + + success, err := services.GetBillingItemService(sess).Id(*billingItem.Id).CancelService() + if err != nil { + return err + } + + if !success { + return fmt.Errorf("SoftLayer reported an unsuccessful cancellation") + } + return nil +} + +func resourceIBMStorageFileExists(d *schema.ResourceData, meta interface{}) (bool, error) { + sess := meta.(ClientSession).SoftLayerSession() + + storageID, err := strconv.Atoi(d.Id()) + if err != nil { + return false, fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + _, err = services.GetNetworkStorageService(sess). + Id(storageID). + GetObject() + + if err != nil { + if apiErr, ok := err.(sl.Error); ok && apiErr.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error retrieving storage information: %s", err) + } + return true, nil +} + +func buildStorageProductOrderContainer( + sess *session.Session, + storageType string, + iops float64, + capacity int, + snapshotCapacity int, + storageProtocol string, + datacenter string, + hourlyBilling bool) (datatypes.Container_Product_Order, error) { + + // Get a package type) + pkg, err := product.GetPackageByType(sess, storagePackageType) + if err != nil { + return datatypes.Container_Product_Order{}, err + } + + // Get all prices + productItems, err := product.GetPackageProducts(sess, *pkg.Id, itemMask) + if err != nil { + return datatypes.Container_Product_Order{}, err + } + + // Add IOPS price + targetItemPrices := []datatypes.Product_Item_Price{} + + if storageType == "Performance" { + price, err := getPriceByCategory(productItems, "storage_as_a_service") + if err != nil { + return datatypes.Container_Product_Order{}, err + } + targetItemPrices = append(targetItemPrices, price) + price, err = getPriceByCategory(productItems, "storage_"+storageProtocol) + if err != nil { + return datatypes.Container_Product_Order{}, err + } + targetItemPrices = append(targetItemPrices, price) + + price, err = getSaaSPerformSpacePrice(productItems, capacity) + if err != nil { + return datatypes.Container_Product_Order{}, err + } + targetItemPrices = append(targetItemPrices, price) + + price, err = getSaaSPerformIOPSPrice(productItems, capacity, int(iops)) + if err != nil { + return datatypes.Container_Product_Order{}, err + } + targetItemPrices = append(targetItemPrices, price) + + } else { + + price, err := getPriceByCategory(productItems, "storage_as_a_service") + if err != nil { + return datatypes.Container_Product_Order{}, err + } + targetItemPrices = append(targetItemPrices, price) + price, err = getPriceByCategory(productItems, "storage_"+storageProtocol) + if err != nil { + return datatypes.Container_Product_Order{}, err + } + targetItemPrices = append(targetItemPrices, price) + + price, err = getSaaSEnduranceSpacePrice(productItems, capacity, iops) + if err != nil { + return datatypes.Container_Product_Order{}, err + } + targetItemPrices = append(targetItemPrices, price) + + price, err = getSaaSEnduranceTierPrice(productItems, iops) + if err != nil { + return datatypes.Container_Product_Order{}, err + } + targetItemPrices = append(targetItemPrices, price) + + } + + if snapshotCapacity > 0 { + price, err := getSaaSSnapshotSpacePrice(productItems, snapshotCapacity, iops, storageType) + if err != nil { + return datatypes.Container_Product_Order{}, err + } + targetItemPrices = append(targetItemPrices, price) + + } + + // Lookup the data center ID + dc, err := location.GetDatacenterByName(sess, datacenter) + if err != nil { + return datatypes.Container_Product_Order{}, + fmt.Errorf("No data centers matching %s could be found", datacenter) + } + + productOrderContainer := datatypes.Container_Product_Order{ + PackageId: pkg.Id, + Location: sl.String(strconv.Itoa(*dc.Id)), + Prices: targetItemPrices, + Quantity: sl.Int(1), + UseHourlyPricing: sl.Bool(hourlyBilling), + } + + return productOrderContainer, nil +} + +func findStorageByOrderId(sess *session.Session, orderId int, timeout time.Duration) (datatypes.Network_Storage, error) { + filterPath := "networkStorage.billingItem.orderItem.order.id" + + stateConf := &resource.StateChangeConf{ + Pending: []string{"pending"}, + Target: []string{"complete"}, + Refresh: func() (interface{}, string, error) { + storage, err := services.GetAccountService(sess). + Filter(filter.Build( + filter.Path(filterPath). + Eq(strconv.Itoa(orderId)))). + Mask(storageMask). + GetNetworkStorage() + if err != nil { + return datatypes.Network_Storage{}, "", err + } + + if len(storage) == 1 { + return storage[0], "complete", nil + } else if len(storage) == 0 { + return datatypes.Network_Storage{}, "pending", nil + } else { + return nil, "", fmt.Errorf("Expected one Storage: %s", err) + } + }, + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + NotFoundChecks: 300, + } + + pendingResult, err := stateConf.WaitForState() + + if err != nil { + return datatypes.Network_Storage{}, err + } + + var result, ok = pendingResult.(datatypes.Network_Storage) + + if ok { + return result, nil + } + + return datatypes.Network_Storage{}, + fmt.Errorf("Cannot find Storage with order id '%d'", orderId) +} + +// Waits for storage provisioning +func WaitForStorageAvailable(d *schema.ResourceData, meta interface{}) (interface{}, error) { + log.Printf("Waiting for storage (%s) to be available.", d.Id()) + id, err := strconv.Atoi(d.Id()) + if err != nil { + return nil, fmt.Errorf("The storage ID %s must be numeric", d.Id()) + } + sess := meta.(ClientSession).SoftLayerSession() + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", "provisioning"}, + Target: []string{"available"}, + Refresh: func() (interface{}, string, error) { + // Check active transactions + service := services.GetNetworkStorageService(sess) + result, err := service.Id(id).Mask("activeTransactionCount").GetObject() + if err != nil { + if apiErr, ok := err.(sl.Error); ok && apiErr.StatusCode == 404 { + return nil, "", fmt.Errorf("Error retrieving storage: %s", err) + } + return false, "retry", nil + } + + log.Println("Checking active transactions.") + if *result.ActiveTransactionCount > 0 { + return result, "provisioning", nil + } + + // Check volume status. + log.Println("Checking volume status.") + resultStr := "" + err = sess.DoRequest( + "SoftLayer_Network_Storage", + "getObject", + nil, + &sl.Options{Id: &id, Mask: "volumeStatus"}, + &resultStr, + ) + if err != nil { + return false, "retry", nil + } + + if !strings.Contains(resultStr, "PROVISION_COMPLETED") && + !strings.Contains(resultStr, "Volume Provisioning has completed") { + return result, "provisioning", nil + } + + return result, "available", nil + }, + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func getIops(storage datatypes.Network_Storage, storageType string) (float64, error) { + switch storageType { + case enduranceType: + for _, property := range storage.Properties { + if *property.Type.Keyname == "PROVISIONED_IOPS" { + provisionedIops, err := strconv.Atoi(*property.Value) + if err != nil { + return 0, err + } + enduranceIops := float64(provisionedIops / *storage.CapacityGb) + if enduranceIops < 1 { + enduranceIops = 0.25 + } + return enduranceIops, nil + } + } + case performanceType: + if storage.Iops == nil { + return 0, fmt.Errorf("Failed to retrieve iops information.") + } + iops, err := strconv.Atoi(*storage.Iops) + if err != nil { + return 0, err + } + return float64(iops), nil + } + return 0, fmt.Errorf("Invalid storage type %s", storageType) +} + +func updateAllowedIpAddresses(d *schema.ResourceData, sess *session.Session, storage datatypes.Network_Storage) error { + id := *storage.Id + newIps := d.Get("allowed_ip_addresses").(*schema.Set).List() + + // Add new allowed_ip_addresses + for _, newIp := range newIps { + isNewIp := true + for _, oldAllowedIpAddresses := range storage.AllowedIpAddresses { + if newIp.(string) == *oldAllowedIpAddresses.IpAddress { + isNewIp = false + break + } + } + if isNewIp { + ipObject, err := services.GetAccountService(sess). + Filter(filter.Build( + filter.Path("ipAddresses.ipAddress"). + Eq(newIp.(string)))).GetIpAddresses() + if err != nil { + return err + } + if len(ipObject) != 1 { + return fmt.Errorf("Number of IP address is %d", len(ipObject)) + } + for { + _, err = services.GetNetworkStorageService(sess). + Id(id). + AllowAccessFromHostList([]datatypes.Container_Network_Storage_Host{ + { + Id: ipObject[0].Id, + ObjectType: sl.String("SoftLayer_Network_Subnet_IpAddress"), + }, + }) + if err != nil { + if strings.Contains(err.Error(), "SoftLayer_Exception_Network_Storage_Group_MassAccessControlModification") { + time.Sleep(retryTime * time.Second) + continue + } + return err + } + break + } + } + } + + // Remove deleted allowed_hardware_ids + for _, oldAllowedIpAddresses := range storage.AllowedIpAddresses { + isDeletedId := true + for _, newIp := range newIps { + if newIp.(string) == *oldAllowedIpAddresses.IpAddress { + isDeletedId = false + break + } + } + if isDeletedId { + for { + _, err := services.GetNetworkStorageService(sess). + Id(id). + RemoveAccessFromHostList([]datatypes.Container_Network_Storage_Host{ + { + Id: oldAllowedIpAddresses.Id, + ObjectType: sl.String("SoftLayer_Network_Subnet_IpAddress"), + }, + }) + if err != nil { + if strings.Contains(err.Error(), "SoftLayer_Exception_Network_Storage_Group_MassAccessControlModification") { + time.Sleep(retryTime * time.Second) + continue + } + return err + } + break + } + } + } + return nil +} + +func updateAllowedSubnets(d *schema.ResourceData, sess *session.Session, storage datatypes.Network_Storage) error { + id := *storage.Id + newSubnets := d.Get("allowed_subnets").(*schema.Set).List() + + // Add new allowed_subnets + for _, newSubnet := range newSubnets { + isNewSubnet := true + newSubnetArr := strings.Split(newSubnet.(string), "/") + newNetworkIdentifier := newSubnetArr[0] + newCidr, err := strconv.Atoi(newSubnetArr[1]) + if err != nil { + return err + } + for _, oldAllowedSubnets := range storage.AllowedSubnets { + if newNetworkIdentifier == *oldAllowedSubnets.NetworkIdentifier && newCidr == *oldAllowedSubnets.Cidr { + isNewSubnet = false + break + } + } + if isNewSubnet { + filterStr := fmt.Sprintf("{\"subnets\":{\"networkIdentifier\":{\"operation\":\"%s\"},\"cidr\":{\"operation\":\"%d\"}}}", newNetworkIdentifier, newCidr) + subnetObject, err := services.GetAccountService(sess). + Filter(filterStr).GetSubnets() + if err != nil { + return err + } + if len(subnetObject) != 1 { + return fmt.Errorf("Number of subnet is %d", len(subnetObject)) + } + _, err = services.GetNetworkStorageService(sess). + Id(id). + AllowAccessFromHostList([]datatypes.Container_Network_Storage_Host{ + { + Id: subnetObject[0].Id, + ObjectType: sl.String("SoftLayer_Network_Subnet"), + }, + }) + if err != nil { + return err + } + } + } + + // Remove deleted allowed_subnets + for _, oldAllowedSubnets := range storage.AllowedSubnets { + isDeletedSubnet := true + for _, newSubnet := range newSubnets { + newSubnetArr := strings.Split(newSubnet.(string), "/") + newNetworkIdentifier := newSubnetArr[0] + newCidr, err := strconv.Atoi(newSubnetArr[1]) + if err != nil { + return err + } + + if newNetworkIdentifier == *oldAllowedSubnets.NetworkIdentifier && newCidr == *oldAllowedSubnets.Cidr { + isDeletedSubnet = false + break + } + } + if isDeletedSubnet { + _, err := services.GetNetworkStorageService(sess). + Id(id). + RemoveAccessFromHostList([]datatypes.Container_Network_Storage_Host{ + { + Id: sl.Int(*oldAllowedSubnets.Id), + ObjectType: sl.String("SoftLayer_Network_Subnet"), + }, + }) + if err != nil { + return err + } + } + } + return nil +} + +func updateAllowedVirtualGuestIds(d *schema.ResourceData, sess *session.Session, storage datatypes.Network_Storage) error { + id := *storage.Id + newIds := d.Get("allowed_virtual_guest_ids").(*schema.Set).List() + + // Add new allowed_virtual_guest_ids + for _, newId := range newIds { + isNewId := true + for _, oldAllowedVirtualGuest := range storage.AllowedVirtualGuests { + if newId.(int) == *oldAllowedVirtualGuest.Id { + isNewId = false + break + } + } + if isNewId { + for { + _, err := services.GetNetworkStorageService(sess). + Id(id). + AllowAccessFromHostList([]datatypes.Container_Network_Storage_Host{ + { + Id: sl.Int(newId.(int)), + ObjectType: sl.String("SoftLayer_Virtual_Guest"), + }, + }) + if err != nil { + if strings.Contains(err.Error(), "SoftLayer_Exception_Network_Storage_Group_MassAccessControlModification") { + time.Sleep(retryTime * time.Second) + continue + } + return err + } + break + } + } + } + + // Remove deleted allowed_virtual_guest_ids + for _, oldAllowedVirtualGuest := range storage.AllowedVirtualGuests { + isDeletedId := true + for _, newId := range newIds { + if newId.(int) == *oldAllowedVirtualGuest.Id { + isDeletedId = false + break + } + } + if isDeletedId { + for { + _, err := services.GetNetworkStorageService(sess). + Id(id). + RemoveAccessFromHostList([]datatypes.Container_Network_Storage_Host{ + { + Id: sl.Int(*oldAllowedVirtualGuest.Id), + ObjectType: sl.String("SoftLayer_Virtual_Guest"), + }, + }) + if err != nil { + if strings.Contains(err.Error(), "SoftLayer_Exception_Network_Storage_Group_MassAccessControlModification") { + time.Sleep(retryTime * time.Second) + continue + } + return err + } + break + } + } + } + return nil +} + +func updateAllowedHardwareIds(d *schema.ResourceData, sess *session.Session, storage datatypes.Network_Storage) error { + id := *storage.Id + newIds := d.Get("allowed_hardware_ids").(*schema.Set).List() + + // Add new allowed_hardware_ids + for _, newId := range newIds { + isNewId := true + for _, oldAllowedHardware := range storage.AllowedHardware { + if newId.(int) == *oldAllowedHardware.Id { + isNewId = false + break + } + } + if isNewId { + _, err := services.GetNetworkStorageService(sess). + Id(id). + AllowAccessFromHostList([]datatypes.Container_Network_Storage_Host{ + { + Id: sl.Int(newId.(int)), + ObjectType: sl.String("SoftLayer_Hardware"), + }, + }) + if err != nil { + return err + } + } + } + + // Remove deleted allowed_hardware_ids + for _, oldAllowedHardware := range storage.AllowedHardware { + isDeletedId := true + for _, newId := range newIds { + if newId.(int) == *oldAllowedHardware.Id { + isDeletedId = false + break + } + } + if isDeletedId { + _, err := services.GetNetworkStorageService(sess). + Id(id). + RemoveAccessFromHostList([]datatypes.Container_Network_Storage_Host{ + { + Id: sl.Int(*oldAllowedHardware.Id), + ObjectType: sl.String("SoftLayer_Hardware"), + }, + }) + if err != nil { + return err + } + } + } + return nil +} + +func enableStorageSnapshot(d *schema.ResourceData, sess *session.Session, storage datatypes.Network_Storage) error { + id := *storage.Id + for _, e := range d.Get("snapshot_schedule").(*schema.Set).List() { + value := e.(map[string]interface{}) + enable := value["enable"].(bool) + _, err := services.GetNetworkStorageService(sess). + Id(id). + EnableSnapshots(sl.String(value["schedule_type"].(string)), sl.Int(value["retention_count"].(int)), sl.Int(value["minute"].(int)), sl.Int(value["hour"].(int)), sl.String(value["day_of_week"].(string))) + if err != nil { + return err + } + if !enable { + _, err := services.GetNetworkStorageService(sess). + Id(id). + DisableSnapshots(sl.String(value["schedule_type"].(string))) + if err != nil { + return err + } + + } + } + return nil +} + +func updateNotes(d *schema.ResourceData, sess *session.Session, storage datatypes.Network_Storage) error { + id := *storage.Id + notes := d.Get("notes").(string) + + if (storage.Notes != nil && *storage.Notes != notes) || (storage.Notes == nil && notes != "") { + _, err := services.GetNetworkStorageService(sess). + Id(id). + EditObject(&datatypes.Network_Storage{Notes: sl.String(notes)}) + if err != nil { + return fmt.Errorf("Error adding note to storage (%d): %s", id, err) + } + } + + return nil +} + +func getStorageTypeFromKeyName(key string) (string, error) { + switch key { + case "ENDURANCE_FILE_STORAGE", "ENDURANCE_BLOCK_STORAGE": + return enduranceType, nil + case "PERFORMANCE_FILE_STORAGE", "PERFORMANCE_BLOCK_STORAGE": + return performanceType, nil + } + return "", fmt.Errorf("Couldn't find storage type for key %s", key) +} + +func resourceIBMFilSnapshotHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", + m["schedule_type"].(string))) + buf.WriteString(fmt.Sprintf("%s-", + m["day_of_week"].(string))) + buf.WriteString(fmt.Sprintf("%d-", + m["hour"].(int))) + + buf.WriteString(fmt.Sprintf("%d-", + m["minute"].(int))) + + buf.WriteString(fmt.Sprintf("%d-", + m["retention_count"].(int))) + + return hashcode.String(buf.String()) +} + +func getPrice(prices []datatypes.Product_Item_Price, category, restrictionType string, restrictionValue int) datatypes.Product_Item_Price { + for _, price := range prices { + + if price.LocationGroupId != nil || *price.Categories[0].CategoryCode != category { + continue + } + + if restrictionType != "" && restrictionValue > 0 { + + capacityRestrictionMinimum, _ := strconv.Atoi(*price.CapacityRestrictionMinimum) + capacityRestrictionMaximum, _ := strconv.Atoi(*price.CapacityRestrictionMaximum) + if restrictionType != *price.CapacityRestrictionType || restrictionValue < capacityRestrictionMinimum || restrictionValue > capacityRestrictionMaximum { + continue + } + + } + + return price + + } + + return datatypes.Product_Item_Price{} + +} + +func getPriceByCategory(productItems []datatypes.Product_Item, priceCategory string) (datatypes.Product_Item_Price, error) { + for _, item := range productItems { + price := getPrice(item.Prices, priceCategory, "", 0) + if price.Id != nil { + return price, nil + } + } + + return datatypes.Product_Item_Price{}, + fmt.Errorf("No product items matching with category %s could be found", priceCategory) +} + +func getSaaSPerformSpacePrice(productItems []datatypes.Product_Item, size int) (datatypes.Product_Item_Price, error) { + + for _, item := range productItems { + + category, ok := sl.GrabOk(item, "ItemCategory.CategoryCode") + if ok && category != "performance_storage_space" { + continue + } + if item.CapacityMinimum == nil || item.CapacityMaximum == nil { + continue + } + + capacityMinimum, _ := strconv.Atoi(*item.CapacityMinimum) + capacityMaximum, _ := strconv.Atoi(*item.CapacityMaximum) + + if size < capacityMinimum || + size > capacityMaximum { + continue + } + + keyname := fmt.Sprintf("%d_%d_GBS", capacityMinimum, capacityMaximum) + if item.KeyName == nil || !strings.Contains(*item.KeyName, keyname) { + continue + } + + price := getPrice(item.Prices, "performance_storage_space", "", 0) + if price.Id != nil { + return price, nil + } + } + + return datatypes.Product_Item_Price{}, + fmt.Errorf("Could not find price for performance storage space") + +} + +func getSaaSPerformIOPSPrice(productItems []datatypes.Product_Item, size, iops int) (datatypes.Product_Item_Price, error) { + + for _, item := range productItems { + + category, ok := sl.GrabOk(item, "ItemCategory.CategoryCode") + if ok && category != "performance_storage_iops" { + continue + } + + if item.CapacityMinimum == nil || item.CapacityMaximum == nil { + continue + } + + capacityMinimum, _ := strconv.Atoi(*item.CapacityMinimum) + capacityMaximum, _ := strconv.Atoi(*item.CapacityMaximum) + + if iops < capacityMinimum || + iops > capacityMaximum { + continue + } + + price := getPrice(item.Prices, "performance_storage_iops", "STORAGE_SPACE", size) + if price.Id != nil { + return price, nil + } + } + + return datatypes.Product_Item_Price{}, + fmt.Errorf("Could not find price for iops for the given volume") + +} + +func getSaaSEnduranceSpacePrice(productItems []datatypes.Product_Item, size int, iops float64) (datatypes.Product_Item_Price, error) { + + var keyName string + if iops != 0.25 { + tiers := int(iops) + keyName = fmt.Sprintf("STORAGE_SPACE_FOR_%d_IOPS_PER_GB", tiers) + } else { + + keyName = "STORAGE_SPACE_FOR_0_25_IOPS_PER_GB" + + } + + for _, item := range productItems { + + if item.KeyName == nil || !strings.Contains(*item.KeyName, keyName) { + continue + } + + if item.CapacityMinimum == nil || item.CapacityMaximum == nil { + continue + } + + capacityMinimum, _ := strconv.Atoi(*item.CapacityMinimum) + capacityMaximum, _ := strconv.Atoi(*item.CapacityMaximum) + + if size < capacityMinimum || + size > capacityMaximum { + continue + } + + price := getPrice(item.Prices, "performance_storage_space", "", 0) + if price.Id != nil { + return price, nil + } + } + + return datatypes.Product_Item_Price{}, + fmt.Errorf("Could not find price for endurance storage space") + +} + +func getSaaSEnduranceTierPrice(productItems []datatypes.Product_Item, iops float64) (datatypes.Product_Item_Price, error) { + + targetCapacity := enduranceCapacityRestrictionMap[iops] + + for _, item := range productItems { + + category, ok := sl.GrabOk(item, "ItemCategory.CategoryCode") + if ok && category != "storage_tier_level" { + continue + } + + if int(*item.Capacity) != targetCapacity { + continue + } + + price := getPrice(item.Prices, "storage_tier_level", "", 0) + if price.Id != nil { + return price, nil + } + } + + return datatypes.Product_Item_Price{}, + fmt.Errorf("Could not find price for endurance tier level") + +} + +func getSaaSSnapshotSpacePrice(productItems []datatypes.Product_Item, size int, iops float64, volumeType string) (datatypes.Product_Item_Price, error) { + + var targetValue int + var targetRestrictionType string + if volumeType == "Performance" { + targetValue = int(iops) + targetRestrictionType = "IOPS" + } else { + + targetValue = enduranceCapacityRestrictionMap[iops] + targetRestrictionType = "STORAGE_TIER_LEVEL" + + } + + for _, item := range productItems { + + if int(*item.Capacity) != size { + continue + } + + price := getPrice(item.Prices, "storage_snapshot_space", targetRestrictionType, targetValue) + if price.Id != nil { + return price, nil + } + } + + return datatypes.Product_Item_Price{}, + fmt.Errorf("Could not find price for snapshot space") + +} + +func prepareModifyOrder(sess *session.Session, originalVolume datatypes.Network_Storage, newIops float64, newSize int) (datatypes.Container_Product_Order_Network_Storage_AsAService, error) { + // Verify that the origin volume has not been cancelled + if originalVolume.BillingItem == nil { + return datatypes.Container_Product_Order_Network_Storage_AsAService{}, fmt.Errorf("The volume has been cancelled; unable to modify volume.") + } + + // Get the appropriate package for the order ('storage_as_a_service' is currently used for modifying volumes) + // Get a package type) + pkg, err := product.GetPackageByType(sess, storagePackageType) + if err != nil { + return datatypes.Container_Product_Order_Network_Storage_AsAService{}, err + } + + // Get all prices + productItems, err := product.GetPackageProducts(sess, *pkg.Id, itemMask) + if err != nil { + return datatypes.Container_Product_Order_Network_Storage_AsAService{}, err + } + + // Add IOPS price + targetItemPrices := []datatypes.Product_Item_Price{} + var volumeIsPerformance bool + // Based on volume storage type, ensure at least one volume property is being modified, + // use current values if some are not specified, and lookup price codes for the order + volumeStorageType := *originalVolume.StorageType.KeyName + if strings.Contains(volumeStorageType, "PERFORMANCE") { + volumeIsPerformance = true + if newSize == 0 && newIops == 0 { + return datatypes.Container_Product_Order_Network_Storage_AsAService{}, fmt.Errorf("A size or IOPS value must be given to modify this performance volume.") + } + if newSize == 0 { + newSize = *originalVolume.CapacityGb + } else if newIops == 0 { + storageType, err := getStorageTypeFromKeyName(*originalVolume.StorageType.KeyName) + if err != nil { + return datatypes.Container_Product_Order_Network_Storage_AsAService{}, fmt.Errorf("Error retrieving storage information: %s", err) + } + iops, err := getIops(originalVolume, storageType) + if err != nil { + return datatypes.Container_Product_Order_Network_Storage_AsAService{}, fmt.Errorf("Error retrieving storage information: %s", err) + } + newIops = iops + if newIops <= 0 { + return datatypes.Container_Product_Order_Network_Storage_AsAService{}, fmt.Errorf("Cannot find volume's provisioned IOPS.") + } + + } + // Set up the prices array for the order + price, err := getPriceByCategory(productItems, "storage_as_a_service") + if err != nil { + return datatypes.Container_Product_Order_Network_Storage_AsAService{}, err + } + targetItemPrices = append(targetItemPrices, price) + + price, err = getSaaSPerformSpacePrice(productItems, newSize) + if err != nil { + return datatypes.Container_Product_Order_Network_Storage_AsAService{}, err + } + targetItemPrices = append(targetItemPrices, price) + + price, err = getSaaSPerformIOPSPrice(productItems, newSize, int(newIops)) + if err != nil { + return datatypes.Container_Product_Order_Network_Storage_AsAService{}, err + } + targetItemPrices = append(targetItemPrices, price) + + } else if strings.Contains(volumeStorageType, "ENDURANCE") { + volumeIsPerformance = false + if newSize == 0 && newIops == 0 { + return datatypes.Container_Product_Order_Network_Storage_AsAService{}, fmt.Errorf("A size or IOPS value must be given to modify this performance volume.") + } + if newSize == 0 { + newSize = *originalVolume.CapacityGb + } else if newIops == 0 { + newIops, err = findEnduranceTierIopsPerGb(originalVolume) + if err != nil { + return datatypes.Container_Product_Order_Network_Storage_AsAService{}, err + } + } + // Set up the prices array for the order + price, err := getPriceByCategory(productItems, "storage_as_a_service") + if err != nil { + return datatypes.Container_Product_Order_Network_Storage_AsAService{}, err + } + targetItemPrices = append(targetItemPrices, price) + price, err = getSaaSEnduranceSpacePrice(productItems, newSize, newIops) + if err != nil { + return datatypes.Container_Product_Order_Network_Storage_AsAService{}, err + } + targetItemPrices = append(targetItemPrices, price) + + price, err = getSaaSEnduranceTierPrice(productItems, newIops) + if err != nil { + return datatypes.Container_Product_Order_Network_Storage_AsAService{}, err + } + targetItemPrices = append(targetItemPrices, price) + + } else { + return datatypes.Container_Product_Order_Network_Storage_AsAService{}, fmt.Errorf("Volume does not have a valid storage type (with an appropriate keyName to indicate the volume is a PERFORMANCE or an ENDURANCE volume).") + } + + modifyOrder := datatypes.Container_Product_Order_Network_Storage_AsAService{ + Container_Product_Order: datatypes.Container_Product_Order{ + ComplexType: sl.String("SoftLayer_Container_Product_Order_Network_Storage_AsAService_Upgrade"), + PackageId: pkg.Id, + Prices: targetItemPrices, + }, + VolumeSize: sl.Int(newSize), + } + + if volumeIsPerformance { + modifyOrder.Iops = sl.Int(int(newIops)) + } + + return modifyOrder, nil +} + +func findEnduranceTierIopsPerGb(originalVolume datatypes.Network_Storage) (iopsPerGB float64, err error) { + tier := *originalVolume.StorageTierLevel + iopsPerGB = 0.25 + + if tier == "LOW_INTENSITY_TIER" { + iopsPerGB = 0.25 + } else if tier == "READHEAVY_TIER" { + iopsPerGB = 2 + } else if tier == "WRITEHEAVY_TIER" { + iopsPerGB = 4 + } else if tier == "10_IOPS_PER_GB" { + iopsPerGB = 10 + } else { + return iopsPerGB, fmt.Errorf("Could not find tier IOPS per GB for this volume") + } + + return iopsPerGB, nil + +} + +// Waits for storage update +func WaitForStorageUpdate(d *schema.ResourceData, meta interface{}) (interface{}, error) { + log.Printf("Waiting for storage (%s) to be updated.", d.Id()) + id, err := strconv.Atoi(d.Id()) + if err != nil { + return nil, fmt.Errorf("The storage ID %s must be numeric", d.Id()) + } + size := d.Get("capacity").(int) + iops := d.Get("iops").(float64) + sess := meta.(ClientSession).SoftLayerSession() + stateConf := &resource.StateChangeConf{ + Pending: []string{"provisioning"}, + Target: []string{"available"}, + Refresh: func() (interface{}, string, error) { + service := services.GetNetworkStorageService(sess) + result, err := service.Id(id).Mask(storageDetailMask).GetObject() + if err != nil { + if apiErr, ok := err.(sl.Error); ok && apiErr.StatusCode == 404 { + return nil, "", fmt.Errorf("Error retrieving storage: %s", err) + } + return result, "provisioning", nil + } + storageType, err := getStorageTypeFromKeyName(*result.StorageType.KeyName) + if err != nil { + return nil, "", fmt.Errorf("Error retrieving storage information: %s", err) + } + temp, err := getIops(result, storageType) + if err != nil { + return nil, "", fmt.Errorf("Error retrieving storage information: %s", err) + } + if *result.CapacityGb == size && iops == float64(temp) { + return result, "available", nil + } + return result, "provisioning", nil + }, + Timeout: d.Timeout(schema.TimeoutUpdate), + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_subnet.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_subnet.go new file mode 100644 index 00000000000..56e85fcff2a --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_subnet.go @@ -0,0 +1,413 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "errors" + "fmt" + "log" + "strconv" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/filter" + "github.com/softlayer/softlayer-go/helpers/product" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +const ( + SubnetMask = "id,addressSpace,subnetType,version,ipAddressCount," + + "networkIdentifier,cidr,note,endPointIpAddress[ipAddress],networkVlan[id],totalIpAddresses" +) + +var ( + // Map subnet types to product package keyname in SoftLayer_Product_Item + subnetPackageTypeMap = map[string]string{ + "Static": "ADDITIONAL_SERVICES_STATIC_IP_ADDRESSES", + "Portable": "ADDITIONAL_SERVICES_PORTABLE_IP_ADDRESSES", + } +) + +func resourceIBMSubnet() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMSubnetCreate, + Read: resourceIBMSubnetRead, + Update: resourceIBMSubnetUpdate, + Delete: resourceIBMSubnetDelete, + Exists: resourceIBMSubnetExists, + Importer: &schema.ResourceImporter{}, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(30 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + + "private": { + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + Description: "private subnet", + }, + + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errs []error) { + typeStr := v.(string) + if typeStr != "Portable" && typeStr != "Static" { + errs = append(errs, errors.New( + "type should be either Portable or Static")) + } + return + }, + Description: "subnet type", + }, + + // IP version 4 or IP version 6 + "ip_version": { + Type: schema.TypeInt, + Optional: true, + Default: 4, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errs []error) { + ipVersion := v.(int) + if ipVersion != 4 && ipVersion != 6 { + errs = append(errs, errors.New( + "ip version should be either 4 or 6")) + } + return + }, + Description: "ip version", + }, + + "capacity": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: "number of ip addresses in the subnet", + }, + + // vlan_id should be configured when type is "Portable" + "vlan_id": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: []string{"endpoint_ip"}, + Description: "VLAN ID for the subnet", + }, + + // endpoint_ip should be configured when type is "Static" + "endpoint_ip": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"vlan_id"}, + Description: "endpoint IP", + }, + + // Provides IP address/cidr format (ex. 10.10.10.10/28) + "subnet_cidr": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "CIDR notation for the subnet", + }, + + "notes": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Notes", + }, + + "tags": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: "tags set for the resource", + }, + }, + } +} + +func resourceIBMSubnetCreate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + + // Find price items with AdditionalServicesSubnetAddresses + productOrderContainer, err := buildSubnetProductOrderContainer(d, sess) + if err != nil { + return fmt.Errorf("Error creating subnet: %s", err) + } + + log.Println("[INFO] Creating subnet") + + receipt, err := services.GetProductOrderService(sess.SetRetries(0)). + PlaceOrder(productOrderContainer, sl.Bool(false)) + if err != nil { + return fmt.Errorf("Error during creation of subnet: %s", err) + } + + Subnet, err := findSubnetByOrderID(sess, *receipt.OrderId, d) + if err != nil { + return fmt.Errorf("Error during creation of subnet: %s", err) + } + + d.SetId(fmt.Sprintf("%d", *Subnet.Id)) + + return resourceIBMSubnetUpdate(d, meta) +} + +func resourceIBMSubnetRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkSubnetService(sess) + + subnetID, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Not a valid subnet ID, must be an integer: %s", err) + } + + subnet, err := service.Id(subnetID).Mask(SubnetMask).GetObject() + if err != nil { + return fmt.Errorf("Error retrieving a subnet: %s", err) + } + + if *subnet.AddressSpace == "PRIVATE" { + d.Set("private", true) + } else if *subnet.AddressSpace == "PUBLIC" { + d.Set("private", false) + } + + if subnet.SubnetType == nil { + return fmt.Errorf("Invalid vlan type: the subnet type is null") + } + if strings.Contains(*subnet.SubnetType, "STATIC") { + d.Set("type", "Static") + } else if strings.Contains(*subnet.SubnetType, "VLAN") { + d.Set("type", "Portable") + } else { + return fmt.Errorf("Invalid vlan type: %s", *subnet.SubnetType) + } + d.Set("ip_version", *subnet.Version) + d.Set("capacity", *subnet.TotalIpAddresses) + if *subnet.Version == 6 { + d.Set("capacity", 64) + } + d.Set("subnet_cidr", *subnet.NetworkIdentifier+"/"+strconv.Itoa(*subnet.Cidr)) + if subnet.Note != nil { + d.Set("notes", *subnet.Note) + } + if subnet.EndPointIpAddress != nil { + d.Set("endpoint_ip", *subnet.EndPointIpAddress.IpAddress) + } + if subnet.NetworkVlan != nil { + d.Set("vlan_id", subnet.NetworkVlan.Id) + } + d.Set("notes", sl.Get(subnet.Note, nil)) + + return nil +} + +func resourceIBMSubnetUpdate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkSubnetService(sess) + + subnetID, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Not a valid subnet ID, must be an integer: %s", err) + } + + if d.HasChange("notes") { + _, err = service.Id(subnetID).EditNote(sl.String(d.Get("notes").(string))) + if err != nil { + return fmt.Errorf("Error updating subnet: %s", err) + } + } + return resourceIBMSubnetRead(d, meta) +} + +func resourceIBMSubnetDelete(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkSubnetService(sess) + + subnetID, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Not a valid subnet ID, must be an integer: %s", err) + } + + billingItem, err := service.Id(subnetID).GetBillingItem() + if err != nil { + return fmt.Errorf("Error deleting subnet: %s", err) + } + + if billingItem.Id == nil { + return nil + } + _, err = services.GetBillingItemService(sess).Id(*billingItem.Id).CancelService() + if err != nil { + return fmt.Errorf("Error deleting subnet: %s", err) + } + + return err +} + +func resourceIBMSubnetExists(d *schema.ResourceData, meta interface{}) (bool, error) { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkSubnetService(sess) + + subnetID, err := strconv.Atoi(d.Id()) + if err != nil { + return false, fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + result, err := service.Id(subnetID).GetObject() + if err != nil { + if apiErr, ok := err.(sl.Error); ok && apiErr.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error retrieving subnet: %s", err) + } + return result.Id != nil && *result.Id == subnetID, nil +} + +func findSubnetByOrderID(sess *session.Session, orderID int, d *schema.ResourceData) (datatypes.Network_Subnet, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{"pending"}, + Target: []string{"complete"}, + Refresh: func() (interface{}, string, error) { + subnets, err := services.GetAccountService(sess). + Filter(filter.Path("subnets.billingItem.orderItem.order.id"). + Eq(strconv.Itoa(orderID)).Build()). + Mask("id,activeTransaction"). + GetSubnets() + if err != nil { + return datatypes.Network_Subnet{}, "", err + } + + if len(subnets) == 1 && subnets[0].ActiveTransaction == nil { + return subnets[0], "complete", nil + } + return datatypes.Network_Subnet{}, "pending", nil + }, + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + NotFoundChecks: 1440, + } + + pendingResult, err := stateConf.WaitForState() + + if err != nil { + return datatypes.Network_Subnet{}, err + } + + if result, ok := pendingResult.(datatypes.Network_Subnet); ok { + return result, nil + } + + return datatypes.Network_Subnet{}, + fmt.Errorf("Cannot find a subnet with order id '%d'", orderID) +} + +func buildSubnetProductOrderContainer(d *schema.ResourceData, sess *session.Session) ( + *datatypes.Container_Product_Order_Network_Subnet, error) { + + // 1. Get a package + typeStr := d.Get("type").(string) + vlanID := d.Get("vlan_id").(int) + private := d.Get("private").(bool) + network := "PUBLIC" + if private { + network = "PRIVATE" + } + + pkg, err := product.GetPackageByType(sess, subnetPackageTypeMap[typeStr]) + if err != nil { + return &datatypes.Container_Product_Order_Network_Subnet{}, err + } + + // 2. Get all prices for the package + productItems, err := product.GetPackageProducts(sess, *pkg.Id) + if err != nil { + return &datatypes.Container_Product_Order_Network_Subnet{}, err + } + + // 3. Select items which have a matching capacity, network, and IP version. + capacity := d.Get("capacity").(int) + ipVersionStr := "_IP_" + if d.Get("ip_version").(int) == 6 { + ipVersionStr = "_IPV6_" + } + SubnetItems := []datatypes.Product_Item{} + for _, item := range productItems { + if int(*item.Capacity) == d.Get("capacity").(int) && + strings.Contains(*item.KeyName, network) && + strings.Contains(*item.KeyName, ipVersionStr) { + SubnetItems = append(SubnetItems, item) + } + } + + if len(SubnetItems) == 0 { + return &datatypes.Container_Product_Order_Network_Subnet{}, + fmt.Errorf("No product items matching with capacity %d could be found", capacity) + } + + productOrderContainer := datatypes.Container_Product_Order_Network_Subnet{ + Container_Product_Order: datatypes.Container_Product_Order{ + PackageId: pkg.Id, + Prices: []datatypes.Product_Item_Price{ + { + Id: SubnetItems[0].Prices[0].Id, + }, + }, + Quantity: sl.Int(1), + }, + EndPointVlanId: sl.Int(vlanID), + } + + if endpointIP, ok := d.GetOk("endpoint_ip"); ok { + if typeStr != "Static" { + return &datatypes.Container_Product_Order_Network_Subnet{}, + fmt.Errorf("endpoint_ip is only available when type is Static") + } + endpointIPStr := endpointIP.(string) + subnet, err := services.GetNetworkSubnetService(sess).Mask("ipAddresses").GetSubnetForIpAddress(sl.String(endpointIPStr)) + if err != nil { + return &datatypes.Container_Product_Order_Network_Subnet{}, err + } + for _, ipSubnet := range subnet.IpAddresses { + if *ipSubnet.IpAddress == endpointIPStr { + productOrderContainer.EndPointIpAddressId = ipSubnet.Id + } + } + if productOrderContainer.EndPointIpAddressId == nil { + return &datatypes.Container_Product_Order_Network_Subnet{}, + fmt.Errorf("Unable to find an ID of ipAddress: %s", endpointIPStr) + } + } + return &productOrderContainer, nil +} + +func getVlanType(sess *session.Session, vlanID int) (string, error) { + vlan, err := services.GetNetworkVlanService(sess).Id(vlanID).Mask(VlanMask).GetObject() + + if err != nil { + return "", fmt.Errorf("Error retrieving vlan: %s", err) + } + + if vlan.PrimaryRouter != nil { + if strings.HasPrefix(*vlan.PrimaryRouter.Hostname, "fcr") { + return "PUBLIC", nil + } else { + return "PRIVATE", nil + } + } + return "", fmt.Errorf("Unable to determine network") +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_tg_gateway.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_tg_gateway.go new file mode 100644 index 00000000000..77eeb240253 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_tg_gateway.go @@ -0,0 +1,452 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "os" + "time" + + "github.com/IBM/networking-go-sdk/transitgatewayapisv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + tgGateways = "transit_gateways" + tgResourceGroup = "resource_group" + tgID = "id" + tgCrn = "crn" + tgName = "name" + tgLocation = "location" + tgCreatedAt = "created_at" + tgGlobal = "global" + tgStatus = "status" + tgUpdatedAt = "updated_at" + tgGatewayTags = "tags" + + isTransitGatewayProvisioning = "provisioning" + isTransitGatewayProvisioningDone = "done" + isTransitGatewayDeleting = "deleting" + isTransitGatewayDeleted = "done" +) + +func resourceIBMTransitGateway() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMTransitGatewayCreate, + Read: resourceIBMTransitGatewayRead, + Delete: resourceIBMTransitGatewayDelete, + Exists: resourceIBMTransitGatewayExists, + Update: resourceIBMTransitGatewayUpdate, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + }, + + CustomizeDiff: customdiff.Sequence( + func(diff *schema.ResourceDiff, v interface{}) error { + return resourceTagsCustomizeDiff(diff) + }, + ), + + Schema: map[string]*schema.Schema{ + tgLocation: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Location of Transit Gateway Services", + }, + + tgName: { + Type: schema.TypeString, + Required: true, + ForceNew: false, + Description: "Name Transit Gateway Services", + ValidateFunc: InvokeValidator("ibm_tg_gateway", tgName), + }, + + tgGlobal: { + Type: schema.TypeBool, + Optional: true, + ForceNew: false, + Default: false, + Description: "Allow global routing for a Transit Gateway. If unspecified, the default value is false", + }, + + tgGatewayTags: { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString, ValidateFunc: InvokeValidator("ibm_tg_gateway", "tag")}, + Set: resourceIBMVPCHash, + Description: "Tags for the transit gateway instance", + }, + + tgResourceGroup: { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + tgCrn: { + Type: schema.TypeString, + Computed: true, + Description: "The crn of the resource", + }, + tgCreatedAt: { + Type: schema.TypeString, + Computed: true, + Description: "The creation time of the resource", + }, + tgUpdatedAt: { + Type: schema.TypeString, + Computed: true, + Description: "The updation time of the resource", + }, + tgStatus: { + Type: schema.TypeString, + Computed: true, + Description: "The Status of the resource", + }, + + ResourceControllerURL: { + Type: schema.TypeString, + Computed: true, + Description: "The URL of the IBM Cloud dashboard that can be used to explore and view details about this instance", + }, + + ResourceName: { + Type: schema.TypeString, + Computed: true, + Description: "The name of the resource", + }, + + ResourceCRN: { + Type: schema.TypeString, + Computed: true, + Description: "The crn of the resource", + }, + + ResourceStatus: { + Type: schema.TypeString, + Computed: true, + Description: "The status of the resource", + }, + + ResourceGroupName: { + Type: schema.TypeString, + Computed: true, + Description: "The resource group name in which resource is provisioned", + }, + }, + } +} + +func resourceIBMTGValidator() *ResourceValidator { + + validateSchema := make([]ValidateSchema, 1) + + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: tgName, + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Required: true, + AllowedValues: `^([a-zA-Z]|[a-zA-Z][-_a-zA-Z0-9]*[a-zA-Z0-9])$}`, + MinValueLength: 1, + MaxValueLength: 63}) + + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: "tag", + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Optional: true, + Regexp: `^[A-Za-z0-9:_ .-]+$`, + MinValueLength: 1, + MaxValueLength: 128}) + + ibmTGResourceValidator := ResourceValidator{ResourceName: "ibm_tg_gateway", Schema: validateSchema} + return &ibmTGResourceValidator +} + +func transitgatewayClient(meta interface{}) (*transitgatewayapisv1.TransitGatewayApisV1, error) { + sess, err := meta.(ClientSession).TransitGatewayV1API() + return sess, err +} + +func resourceIBMTransitGatewayCreate(d *schema.ResourceData, meta interface{}) error { + client, err := transitgatewayClient(meta) + if err != nil { + return err + } + + location := d.Get(tgLocation).(string) + name := d.Get(tgName).(string) + global := d.Get(tgGlobal).(bool) + + createTransitGatewayOptions := &transitgatewayapisv1.CreateTransitGatewayOptions{} + + createTransitGatewayOptions.Name = &name + createTransitGatewayOptions.Location = &location + createTransitGatewayOptions.Global = &global + + if rsg, ok := d.GetOk(tgResourceGroup); ok { + resourceGroup := rsg.(string) + createTransitGatewayOptions.ResourceGroup = &transitgatewayapisv1.ResourceGroupIdentity{ID: &resourceGroup} + } + + //log.Println("going to create tgw now with options", *createTransitGatewayOptions.ResourceGroup) + tgw, response, err := client.CreateTransitGateway(createTransitGatewayOptions) + + if err != nil { + log.Printf("[DEBUG] Create Transit Gateway err %s\n%s", err, response) + return err + } + d.SetId(*tgw.ID) + + _, err = isWaitForTransitGatewayAvailable(client, d.Id(), d.Timeout(schema.TimeoutCreate)) + + if err != nil { + return err + } + + v := os.Getenv("IC_ENV_TAGS") + if _, ok := d.GetOk(tgGatewayTags); ok || v != "" { + oldList, newList := d.GetChange(tgGatewayTags) + err = UpdateTagsUsingCRN(oldList, newList, meta, *tgw.Crn) + if err != nil { + log.Printf( + "Error on create of transit gateway (%s) tags: %s", d.Id(), err) + } + } + return resourceIBMTransitGatewayRead(d, meta) +} + +func isWaitForTransitGatewayAvailable(client *transitgatewayapisv1.TransitGatewayApisV1, id string, timeout time.Duration) (interface{}, error) { + log.Printf("Waiting for transit gateway (%s) to be available.", id) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", isTransitGatewayProvisioning}, + Target: []string{isTransitGatewayProvisioningDone, ""}, + Refresh: isTransitGatewayRefreshFunc(client, id), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isTransitGatewayRefreshFunc(client *transitgatewayapisv1.TransitGatewayApisV1, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + gettgwoptions := &transitgatewayapisv1.GetTransitGatewayOptions{ + ID: &id, + } + transitGateway, response, err := client.GetTransitGateway(gettgwoptions) + if err != nil { + return nil, "", fmt.Errorf("Error Getting Transit Gateway: %s\n%s", err, response) + } + + if *transitGateway.Status == "available" || *transitGateway.Status == "failed" { + return transitGateway, isTransitGatewayProvisioningDone, nil + } + + return transitGateway, isTransitGatewayProvisioning, nil + } +} + +func resourceIBMTransitGatewayRead(d *schema.ResourceData, meta interface{}) error { + id := d.Id() + client, err := transitgatewayClient(meta) + if err != nil { + return err + } + tgOptions := &transitgatewayapisv1.GetTransitGatewayOptions{} + if id != "" { + tgOptions.ID = &id + } + + tgw, response, err := client.GetTransitGateway(tgOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return err + } + + d.SetId(*tgw.ID) + d.Set(tgCrn, tgw.Crn) + d.Set(tgName, tgw.Name) + d.Set(tgLocation, tgw.Location) + d.Set(tgCreatedAt, tgw.CreatedAt.String()) + + if tgw.UpdatedAt != nil { + d.Set(tgUpdatedAt, tgw.UpdatedAt.String()) + } + d.Set(tgGlobal, tgw.Global) + d.Set(tgStatus, tgw.Status) + + tags, err := GetTagsUsingCRN(meta, *tgw.Crn) + if err != nil { + log.Printf( + "Error on get of transit gateway (%s) tags: %s", d.Id(), err) + } + d.Set(tgGatewayTags, tags) + + controller, err := getBaseController(meta) + if err != nil { + return err + } + + d.Set(ResourceControllerURL, controller+"/interconnectivity/transit") + d.Set(ResourceName, *tgw.Name) + d.Set(ResourceCRN, *tgw.Crn) + d.Set(ResourceStatus, *tgw.Status) + if tgw.ResourceGroup != nil { + rg := tgw.ResourceGroup + d.Set(tgResourceGroup, *rg.ID) + d.Set(ResourceGroupName, *rg.ID) + } + return nil +} + +func resourceIBMTransitGatewayUpdate(d *schema.ResourceData, meta interface{}) error { + client, err := transitgatewayClient(meta) + + if err != nil { + return err + } + + ID := d.Id() + tgOptions := &transitgatewayapisv1.GetTransitGatewayOptions{ + ID: &ID, + } + tgw, resp, err := client.GetTransitGateway(tgOptions) + + if err != nil { + log.Printf("Error fetching Tranisit Gateway: %s", resp) + return err + } + + updateTransitGatewayOptions := &transitgatewayapisv1.UpdateTransitGatewayOptions{} + updateTransitGatewayOptions.ID = &ID + if d.HasChange(tgName) { + if tgwname, ok := d.GetOk(tgName); ok { + name := tgwname.(string) + updateTransitGatewayOptions.Name = &name + } + } + if d.HasChange(tgGlobal) { + if tgwglobal, ok := d.GetOk(tgGlobal); ok { + global := tgwglobal.(bool) + updateTransitGatewayOptions.Global = &global + } + } + if d.HasChange(tgGatewayTags) { + oldList, newList := d.GetChange(tgGatewayTags) + err = UpdateTagsUsingCRN(oldList, newList, meta, *tgw.Crn) + if err != nil { + log.Printf( + "Error on update of transit gateway (%s) tags: %s", ID, err) + } + } + + _, response, err := client.UpdateTransitGateway(updateTransitGatewayOptions) + if err != nil { + log.Printf("[DEBUG] Update Transit Gateway err %s\n%s", err, response) + return err + } + + return resourceIBMTransitGatewayRead(d, meta) +} + +func resourceIBMTransitGatewayDelete(d *schema.ResourceData, meta interface{}) error { + + client, err := transitgatewayClient(meta) + if err != nil { + return err + } + + ID := d.Id() + delOptions := &transitgatewayapisv1.DeleteTransitGatewayOptions{ + ID: &ID, + } + response, err := client.DeleteTransitGateway(delOptions) + + if err != nil { + if response != nil && response.StatusCode == 404 { + return nil + } + return fmt.Errorf("Error deleting Transit Gateway (%s): %s\n%s", ID, err, response) + } + _, err = isWaitForTransitGatewayDeleted(client, ID, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return err + } + d.SetId("") + return nil +} + +func isWaitForTransitGatewayDeleted(client *transitgatewayapisv1.TransitGatewayApisV1, id string, timeout time.Duration) (interface{}, error) { + log.Printf("Waiting for transit gateway (%s) to be deleted.", id) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", isTransitGatewayDeleting}, + Target: []string{"", isTransitGatewayDeleted}, + Refresh: isTransitGatewayDeleteRefreshFunc(client, id), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isTransitGatewayDeleteRefreshFunc(client *transitgatewayapisv1.TransitGatewayApisV1, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + log.Printf("[DEBUG] delete function here") + gettgwoptions := &transitgatewayapisv1.GetTransitGatewayOptions{ + ID: &id, + } + transitGateway, response, err := client.GetTransitGateway(gettgwoptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return transitGateway, isTransitGatewayDeleted, nil + } + return nil, "", fmt.Errorf("Error Getting Transit Gateway: %s\n%s", err, response) + } + return transitGateway, isTransitGatewayDeleting, err + } +} + +func resourceIBMTransitGatewayExists(d *schema.ResourceData, meta interface{}) (bool, error) { + + client, err := transitgatewayClient(meta) + if err != nil { + return false, err + } + + ID := d.Id() + + tgOptions := &transitgatewayapisv1.GetTransitGatewayOptions{} + if ID != "" { + tgOptions.ID = &ID + } + _, response, err := client.GetTransitGateway(tgOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return false, nil + } + return false, fmt.Errorf("Error Getting Transit Gateway: %s\n%s", err, response) + } + + return true, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_tg_gateway_connection.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_tg_gateway_connection.go new file mode 100644 index 00000000000..1124ffd77e6 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_tg_gateway_connection.go @@ -0,0 +1,436 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + "log" + "time" + + "github.com/IBM/networking-go-sdk/transitgatewayapisv1" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const ( + tgGatewayConnections = "gateway_connections" + tgNetworkId = "network_id" + tgNetworkType = "network_type" + tgNetworkAccountID = "network_account_id" + tgConectionCreatedAt = "created_at" + tgConnectionStatus = "status" + tgGatewayId = "gateway" + isTransitGatewayConnectionDeleting = "deleting" + isTransitGatewayConnectionDetaching = "detaching" + isTransitGatewayConnectionDeleted = "detached" + isTransitGatewayConnectionPending = "pending" + isTransitGatewayConnectionAttached = "attached" + tgRequestStatus = "request_status" + tgConnectionId = "connection_id" +) + +func resourceIBMTransitGatewayConnection() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMTransitGatewayConnectionCreate, + Read: resourceIBMTransitGatewayConnectionRead, + Delete: resourceIBMTransitGatewayConnectionDelete, + Exists: resourceIBMTransitGatewayConnectionExists, + Update: resourceIBMTransitGatewayConnectionUpdate, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + tgGatewayId: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The Transit Gateway identifier", + }, + tgConnectionId: { + Type: schema.TypeString, + Computed: true, + Description: "The Transit Gateway Connection identifier", + }, + tgNetworkType: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: InvokeValidator("ibm_tg_connection", tgNetworkType), + Description: "Defines what type of network is connected via this connection.Allowable values (classic,vpc)", + }, + tgName: { + Type: schema.TypeString, + Optional: true, + ForceNew: false, + ValidateFunc: InvokeValidator("ibm_tg_connection", tgName), + Description: "The user-defined name for this transit gateway. If unspecified, the name will be the network name (the name of the VPC in the case of network type 'vpc', and the word Classic, in the case of network type 'classic').", + }, + tgNetworkId: { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: "The ID of the network being connected via this connection. This field is required for some types, such as 'vpc'. For network type 'vpc' this is the CRN of the VPC to be connected. This field is required to be unspecified for network type 'classic'.", + }, + tgNetworkAccountID: { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: "The ID of the account which owns the network that is being connected. Generally only used if the network is in a different account than the gateway.", + }, + tgCreatedAt: { + Type: schema.TypeString, + Computed: true, + Description: "The date and time that this connection was created", + }, + tgUpdatedAt: { + Type: schema.TypeString, + Computed: true, + Description: "The date and time that this connection was last updated", + }, + tgStatus: { + Type: schema.TypeString, + Computed: true, + Description: "What is the current configuration state of this connection. Possible values: [attached,failed,pending,deleting,detaching,detached]", + }, + tgRequestStatus: { + Type: schema.TypeString, + Computed: true, + Description: "Only visible for cross account connections, this field represents the status of the request to connect the given network between accounts.Possible values: [pending,approved,rejected,expired,detached]", + }, + RelatedCRN: { + Type: schema.TypeString, + Computed: true, + Description: "The crn of the transit gateway", + }, + }, + } +} +func resourceIBMTransitGatewayConnectionValidator() *ResourceValidator { + + validateSchema := make([]ValidateSchema, 1) + networkType := "classic, vpc" + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: tgNetworkType, + ValidateFunctionIdentifier: ValidateAllowedStringValue, + Type: TypeString, + Required: true, + AllowedValues: networkType}) + validateSchema = append(validateSchema, + ValidateSchema{ + Identifier: tgName, + ValidateFunctionIdentifier: ValidateRegexpLen, + Type: TypeString, + Optional: true, + Regexp: `^([a-zA-Z]|[a-zA-Z][-_a-zA-Z0-9]*[a-zA-Z0-9])$`, + MinValueLength: 1, + MaxValueLength: 63}) + + ibmTransitGatewayConnectionResourceValidator := ResourceValidator{ResourceName: "ibm_tg_connection", Schema: validateSchema} + + return &ibmTransitGatewayConnectionResourceValidator +} +func resourceIBMTransitGatewayConnectionCreate(d *schema.ResourceData, meta interface{}) error { + client, err := transitgatewayClient(meta) + if err != nil { + return err + } + + createTransitGatewayConnectionOptions := &transitgatewayapisv1.CreateTransitGatewayConnectionOptions{} + + gatewayId := d.Get(tgGatewayId).(string) + createTransitGatewayConnectionOptions.SetTransitGatewayID(gatewayId) + + if _, ok := d.GetOk(tgName); ok { + name := d.Get(tgName).(string) + createTransitGatewayConnectionOptions.SetName(name) + } + + networkType := d.Get(tgNetworkType).(string) + createTransitGatewayConnectionOptions.SetNetworkType(networkType) + if _, ok := d.GetOk(tgNetworkId); ok { + networkID := d.Get(tgNetworkId).(string) + createTransitGatewayConnectionOptions.SetNetworkID(networkID) + } + if _, ok := d.GetOk(tgNetworkAccountID); ok { + networkAccId := d.Get(tgNetworkAccountID).(string) + createTransitGatewayConnectionOptions.SetNetworkAccountID(networkAccId) + } + + tgConnections, response, err := client.CreateTransitGatewayConnection(createTransitGatewayConnectionOptions) + if err != nil { + return fmt.Errorf("Create Transit Gateway connection err %s\n%s", err, response) + } + + d.SetId(fmt.Sprintf("%s/%s", gatewayId, *tgConnections.ID)) + d.Set(tgConnectionId, *tgConnections.ID) + + if tgConnections.NetworkAccountID != nil { + d.Set(tgNetworkAccountID, *tgConnections.NetworkAccountID) + return resourceIBMTransitGatewayConnectionRead(d, meta) + } + _, err = isWaitForTransitGatewayConnectionAvailable(client, d.Id(), d.Timeout(schema.TimeoutCreate)) + + if err != nil { + return err + } + return resourceIBMTransitGatewayConnectionRead(d, meta) +} +func isWaitForTransitGatewayConnectionAvailable(client *transitgatewayapisv1.TransitGatewayApisV1, id string, timeout time.Duration) (interface{}, error) { + log.Printf("Waiting for transit gateway connection (%s) to be available.", id) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", isTransitGatewayConnectionPending}, + Target: []string{isTransitGatewayConnectionAttached, ""}, + Refresh: isTransitGatewayConnectionRefreshFunc(client, id), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} +func isTransitGatewayConnectionRefreshFunc(client *transitgatewayapisv1.TransitGatewayApisV1, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + + parts, err := idParts(id) + if err != nil { + return nil, "", fmt.Errorf("Error Getting Transit Gateway connection: %s", err) + // return err + } + + gatewayId := parts[0] + ID := parts[1] + getTransitGatewayConnectionOptions := &transitgatewayapisv1.GetTransitGatewayConnectionOptions{} + getTransitGatewayConnectionOptions.SetTransitGatewayID(gatewayId) + getTransitGatewayConnectionOptions.SetID(ID) + tgConnection, response, err := client.GetTransitGatewayConnection(getTransitGatewayConnectionOptions) + if err != nil { + return nil, "", fmt.Errorf("Error Getting Transit Gateway Connection (%s): %s\n%s", ID, err, response) + } + if *tgConnection.Status == "attached" || *tgConnection.Status == "failed" { + return tgConnection, isTransitGatewayConnectionAttached, nil + } + + return tgConnection, isTransitGatewayConnectionPending, nil + } +} +func resourceIBMTransitGatewayConnectionRead(d *schema.ResourceData, meta interface{}) error { + + client, err := transitgatewayClient(meta) + if err != nil { + return err + } + parts, err := idParts(d.Id()) + if err != nil { + return err + } + + gatewayId := parts[0] + ID := parts[1] + + getTransitGatewayConnectionOptions := &transitgatewayapisv1.GetTransitGatewayConnectionOptions{} + getTransitGatewayConnectionOptions.SetTransitGatewayID(gatewayId) + getTransitGatewayConnectionOptions.SetID(ID) + instance, response, err := client.GetTransitGatewayConnection(getTransitGatewayConnectionOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error Getting Transit Gateway Connection (%s): %s\n%s", ID, err, response) + } + + if instance.Name != nil { + d.Set(tgName, *instance.Name) + } + if instance.NetworkType != nil { + d.Set(tgNetworkType, *instance.NetworkType) + } + if instance.UpdatedAt != nil { + d.Set(tgUpdatedAt, instance.UpdatedAt.String()) + } + if instance.NetworkID != nil { + d.Set(tgNetworkId, *instance.NetworkID) + } + if instance.CreatedAt != nil { + d.Set(tgCreatedAt, instance.CreatedAt.String()) + } + if instance.Status != nil { + d.Set(tgStatus, *instance.Status) + } + if instance.NetworkAccountID != nil { + d.Set(tgNetworkAccountID, *instance.NetworkAccountID) + } + if instance.RequestStatus != nil { + d.Set(tgRequestStatus, *instance.RequestStatus) + } + d.Set(tgConnectionId, *instance.ID) + d.Set(tgGatewayId, gatewayId) + getTransitGatewayOptions := &transitgatewayapisv1.GetTransitGatewayOptions{ + ID: &gatewayId, + } + tgw, response, err := client.GetTransitGateway(getTransitGatewayOptions) + if err != nil { + return fmt.Errorf("Error Getting Transit Gateway : %s\n%s", err, response) + } + d.Set(RelatedCRN, *tgw.Crn) + + return nil +} + +func resourceIBMTransitGatewayConnectionUpdate(d *schema.ResourceData, meta interface{}) error { + + client, err := transitgatewayClient(meta) + if err != nil { + return err + } + parts, err := idParts(d.Id()) + if err != nil { + return err + } + + gatewayId := parts[0] + ID := parts[1] + + getTransitGatewayConnectionOptions := &transitgatewayapisv1.GetTransitGatewayConnectionOptions{ + ID: &ID, + } + getTransitGatewayConnectionOptions.SetTransitGatewayID(gatewayId) + + _, response, err := client.GetTransitGatewayConnection(getTransitGatewayConnectionOptions) + if err != nil { + return fmt.Errorf("Error Getting Transit Gateway Connection: %s\n%s", err, response) + } + + updateTransitGatewayConnectionOptions := &transitgatewayapisv1.UpdateTransitGatewayConnectionOptions{} + updateTransitGatewayConnectionOptions.ID = &ID + updateTransitGatewayConnectionOptions.SetTransitGatewayID(gatewayId) + if d.HasChange(tgName) { + if d.Get(tgName) != nil { + name := d.Get(tgName).(string) + updateTransitGatewayConnectionOptions.Name = &name + } + } + + _, response, err = client.UpdateTransitGatewayConnection(updateTransitGatewayConnectionOptions) + if err != nil { + return fmt.Errorf("Error in Update Transit Gateway Connection : %s\n%s", err, response) + } + + return resourceIBMTransitGatewayConnectionRead(d, meta) +} + +func resourceIBMTransitGatewayConnectionDelete(d *schema.ResourceData, meta interface{}) error { + + client, err := transitgatewayClient(meta) + if err != nil { + return err + } + parts, err := idParts(d.Id()) + if err != nil { + return err + } + + gatewayId := parts[0] + ID := parts[1] + deleteTransitGatewayConnectionOptions := &transitgatewayapisv1.DeleteTransitGatewayConnectionOptions{ + ID: &ID, + } + deleteTransitGatewayConnectionOptions.SetTransitGatewayID(gatewayId) + response, err := client.DeleteTransitGatewayConnection(deleteTransitGatewayConnectionOptions) + + if err != nil { + if response != nil && response.StatusCode == 404 { + return nil + } + return fmt.Errorf("Error deleting Transit Gateway Connection(%s): %s\n%s", ID, err, response) + } + _, err = isWaitForTransitGatewayConnectionDeleted(client, d.Id(), d.Timeout(schema.TimeoutCreate)) + + if err != nil { + return err + } + + d.SetId("") + return nil +} + +func isWaitForTransitGatewayConnectionDeleted(client *transitgatewayapisv1.TransitGatewayApisV1, id string, timeout time.Duration) (interface{}, error) { + log.Printf("Waiting for transit gateway Connection(%s) to be deleted.", id) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", isTransitGatewayConnectionDeleting, isTransitGatewayConnectionDetaching}, + Target: []string{"", isTransitGatewayConnectionDeleted}, + Refresh: isTransitGatewayConnectionDeleteRefreshFunc(client, id), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isTransitGatewayConnectionDeleteRefreshFunc(client *transitgatewayapisv1.TransitGatewayApisV1, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + log.Printf("[DEBUG] delete function here") + parts, err := idParts(id) + if err != nil { + return nil, "", fmt.Errorf("Error Getting Transit Gateway connection: %s", err) + + } + + gatewayId := parts[0] + ID := parts[1] + getTransitGatewayConnectionOptions := &transitgatewayapisv1.GetTransitGatewayConnectionOptions{} + getTransitGatewayConnectionOptions.SetTransitGatewayID(gatewayId) + getTransitGatewayConnectionOptions.SetID(ID) + tgConnection, response, err := client.GetTransitGatewayConnection(getTransitGatewayConnectionOptions) + + if err != nil { + + if response != nil && response.StatusCode == 404 { + return tgConnection, isTransitGatewayConnectionDeleted, nil + } + + return nil, "", fmt.Errorf("Error Getting Transit Gateway Connection (%s): %s\n%s", ID, err, response) + } + return tgConnection, isTransitGatewayConnectionDeleting, err + } +} +func resourceIBMTransitGatewayConnectionExists(d *schema.ResourceData, meta interface{}) (bool, error) { + client, err := transitgatewayClient(meta) + if err != nil { + return false, err + } + parts, err := idParts(d.Id()) + if err != nil { + return false, err + } + + gatewayId := parts[0] + ID := parts[1] + + getTransitGatewayConnectionOptions := &transitgatewayapisv1.GetTransitGatewayConnectionOptions{ + ID: &ID, + } + getTransitGatewayConnectionOptions.SetTransitGatewayID(gatewayId) + _, response, err := client.GetTransitGatewayConnection(getTransitGatewayConnectionOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return false, nil + } + return false, fmt.Errorf("Error Getting Transit Gateway Connection: %s\n%s", err, response) + } + + return true, nil +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/structures.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/structures.go new file mode 100644 index 00000000000..248e469a615 --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/structures.go @@ -0,0 +1,2475 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + b64 "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "log" + "net/url" + "os" + "path" + "reflect" + "strconv" + "strings" + + "github.com/IBM/go-sdk-core/v5/core" + "github.com/IBM/ibm-cos-sdk-go-config/resourceconfigurationv1" + "github.com/IBM/ibm-cos-sdk-go/service/s3" + kp "github.com/IBM/keyprotect-go-client" + "github.com/IBM/platform-services-go-sdk/globaltaggingv1" + "github.com/IBM/platform-services-go-sdk/iampolicymanagementv1" + "github.com/apache/openwhisk-client-go/whisk" + "github.com/go-openapi/strfmt" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/sl" + + "github.com/IBM-Cloud/bluemix-go/api/container/containerv1" + "github.com/IBM-Cloud/bluemix-go/api/container/containerv2" + "github.com/IBM-Cloud/bluemix-go/api/iamuum/iamuumv1" + "github.com/IBM-Cloud/bluemix-go/api/iamuum/iamuumv2" + "github.com/IBM-Cloud/bluemix-go/api/icd/icdv4" + "github.com/IBM-Cloud/bluemix-go/api/mccp/mccpv2" + "github.com/IBM-Cloud/bluemix-go/api/resource/resourcev2/managementv2" + "github.com/IBM-Cloud/bluemix-go/api/schematics" + "github.com/IBM-Cloud/bluemix-go/api/usermanagement/usermanagementv2" + "github.com/IBM-Cloud/bluemix-go/models" +) + +const ( + prodBaseController = "https://cloud.ibm.com" + stageBaseController = "https://test.cloud.ibm.com" + //ResourceControllerURL ... + ResourceControllerURL = "resource_controller_url" + //ResourceName ... + ResourceName = "resource_name" + //ResourceCRN ... + ResourceCRN = "resource_crn" + //ResourceStatus ... + ResourceStatus = "resource_status" + //ResourceGroupName ... + ResourceGroupName = "resource_group_name" + //RelatedCRN ... + RelatedCRN = "related_crn" + SystemIBMLabelPrefix = "ibm-cloud.kubernetes.io/" + KubernetesLabelPrefix = "kubernetes.io/" + K8sLabelPrefix = "k8s.io/" +) + +//HashInt ... +func HashInt(v interface{}) int { return v.(int) } + +func expandStringList(input []interface{}) []string { + vs := make([]string, len(input)) + for i, v := range input { + vs[i] = v.(string) + } + return vs +} + +func flattenStringList(list []string) []interface{} { + vs := make([]interface{}, len(list)) + for i, v := range list { + vs[i] = v + } + return vs +} + +func expandIntList(input []interface{}) []int { + vs := make([]int, len(input)) + for i, v := range input { + vs[i] = v.(int) + } + return vs +} + +func flattenIntList(list []int) []interface{} { + vs := make([]interface{}, len(list)) + for i, v := range list { + vs[i] = v + } + return vs +} + +func newStringSet(f schema.SchemaSetFunc, in []string) *schema.Set { + var out = make([]interface{}, len(in), len(in)) + for i, v := range in { + out[i] = v + } + return schema.NewSet(f, out) +} + +func flattenRoute(in []mccpv2.Route) *schema.Set { + vs := make([]string, len(in)) + for i, v := range in { + vs[i] = v.GUID + } + return newStringSet(schema.HashString, vs) +} + +func stringSliceToSet(in []string) *schema.Set { + vs := make([]string, len(in)) + for i, v := range in { + vs[i] = v + } + return newStringSet(schema.HashString, vs) +} + +func flattenServiceBindings(in []mccpv2.ServiceBinding) *schema.Set { + vs := make([]string, len(in)) + for i, v := range in { + vs[i] = v.ServiceInstanceGUID + } + return newStringSet(schema.HashString, vs) +} + +func flattenPort(in []int) *schema.Set { + var out = make([]interface{}, len(in)) + for i, v := range in { + out[i] = v + } + return schema.NewSet(HashInt, out) +} + +func flattenFileStorageID(in []datatypes.Network_Storage) *schema.Set { + var out = []interface{}{} + for _, v := range in { + if *v.NasType == "NAS" { + out = append(out, *v.Id) + } + } + return schema.NewSet(HashInt, out) +} + +func flattenBlockStorageID(in []datatypes.Network_Storage) *schema.Set { + var out = []interface{}{} + for _, v := range in { + if *v.NasType == "ISCSI" { + out = append(out, *v.Id) + } + } + return schema.NewSet(HashInt, out) +} + +func flattenSSHKeyIDs(in []datatypes.Security_Ssh_Key) *schema.Set { + var out = []interface{}{} + for _, v := range in { + out = append(out, *v.Id) + } + return schema.NewSet(HashInt, out) +} + +func flattenSpaceRoleUsers(in []mccpv2.SpaceRole) *schema.Set { + var out = []interface{}{} + for _, v := range in { + out = append(out, v.UserName) + } + return schema.NewSet(schema.HashString, out) +} + +func flattenOrgRole(in []mccpv2.OrgRole, excludeUsername string) *schema.Set { + var out = []interface{}{} + for _, v := range in { + if excludeUsername == "" { + out = append(out, v.UserName) + } else { + if v.UserName != excludeUsername { + out = append(out, v.UserName) + } + } + } + return schema.NewSet(schema.HashString, out) +} + +func flattenMapInterfaceVal(m map[string]interface{}) map[string]string { + out := make(map[string]string) + for k, v := range m { + out[k] = fmt.Sprintf("%v", v) + } + return out +} + +func flattenCredentials(creds map[string]interface{}) map[string]string { + return flattenMapInterfaceVal(creds) +} + +func flattenServiceKeyCredentials(creds map[string]interface{}) map[string]string { + return flattenCredentials(creds) +} + +func flattenServiceInstanceCredentials(keys []mccpv2.ServiceKeyFields) []interface{} { + var out = make([]interface{}, len(keys), len(keys)) + for i, k := range keys { + m := make(map[string]interface{}) + m["name"] = k.Entity.Name + m["credentials"] = Flatten(k.Entity.Credentials) + out[i] = m + } + return out +} + +func flattenUsersSet(userList *schema.Set) []string { + users := make([]string, 0) + for _, user := range userList.List() { + users = append(users, user.(string)) + } + return users +} + +func expandProtocols(configured []interface{}) ([]datatypes.Network_LBaaS_LoadBalancerProtocolConfiguration, error) { + protocols := make([]datatypes.Network_LBaaS_LoadBalancerProtocolConfiguration, 0, len(configured)) + for _, lRaw := range configured { + data := lRaw.(map[string]interface{}) + p := &datatypes.Network_LBaaS_LoadBalancerProtocolConfiguration{ + FrontendProtocol: sl.String(data["frontend_protocol"].(string)), + BackendProtocol: sl.String(data["backend_protocol"].(string)), + FrontendPort: sl.Int(data["frontend_port"].(int)), + BackendPort: sl.Int(data["backend_port"].(int)), + } + if v, ok := data["session_stickiness"]; ok && v.(string) != "" { + p.SessionType = sl.String(v.(string)) + } + if v, ok := data["max_conn"]; ok && v.(int) != 0 { + p.MaxConn = sl.Int(v.(int)) + } + if v, ok := data["tls_certificate_id"]; ok && v.(int) != 0 { + p.TlsCertificateId = sl.Int(v.(int)) + } + if v, ok := data["load_balancing_method"]; ok { + p.LoadBalancingMethod = sl.String(lbMethodToId[v.(string)]) + } + if v, ok := data["protocol_id"]; ok && v.(string) != "" { + p.ListenerUuid = sl.String(v.(string)) + } + + var isValid bool + if p.TlsCertificateId != nil && *p.TlsCertificateId != 0 { + // validate the protocol is correct + if *p.FrontendProtocol == "HTTPS" { + isValid = true + } + } else { + isValid = true + } + + if isValid { + protocols = append(protocols, *p) + } else { + return protocols, fmt.Errorf("tls_certificate_id may be set only when frontend protocol is 'HTTPS'") + } + + } + return protocols, nil +} + +func expandMembers(configured []interface{}) []datatypes.Network_LBaaS_LoadBalancerServerInstanceInfo { + members := make([]datatypes.Network_LBaaS_LoadBalancerServerInstanceInfo, 0, len(configured)) + for _, lRaw := range configured { + data := lRaw.(map[string]interface{}) + p := &datatypes.Network_LBaaS_LoadBalancerServerInstanceInfo{} + if v, ok := data["private_ip_address"]; ok && v.(string) != "" { + p.PrivateIpAddress = sl.String(v.(string)) + } + if v, ok := data["weight"]; ok && v.(int) != 0 { + p.Weight = sl.Int(v.(int)) + } + + members = append(members, *p) + } + return members +} + +func flattenServerInstances(list []datatypes.Network_LBaaS_Member) []map[string]interface{} { + result := make([]map[string]interface{}, 0, len(list)) + for _, i := range list { + l := map[string]interface{}{ + "private_ip_address": *i.Address, + "member_id": *i.Uuid, + } + if i.Weight != nil { + l["weight"] = *i.Weight + } + result = append(result, l) + } + return result +} + +func flattenProtocols(list []datatypes.Network_LBaaS_Listener) []map[string]interface{} { + result := make([]map[string]interface{}, 0, len(list)) + for _, i := range list { + l := map[string]interface{}{ + "frontend_protocol": *i.Protocol, + "frontend_port": *i.ProtocolPort, + "backend_protocol": *i.DefaultPool.Protocol, + "backend_port": *i.DefaultPool.ProtocolPort, + "load_balancing_method": lbIdToMethod[*i.DefaultPool.LoadBalancingAlgorithm], + "protocol_id": *i.Uuid, + } + if i.DefaultPool.SessionAffinity != nil && i.DefaultPool.SessionAffinity.Type != nil && *i.DefaultPool.SessionAffinity.Type != "" { + l["session_stickiness"] = *i.DefaultPool.SessionAffinity.Type + } + if i.ConnectionLimit != nil && *i.ConnectionLimit != 0 { + l["max_conn"] = *i.ConnectionLimit + } + if i.TlsCertificateId != nil && *i.TlsCertificateId != 0 { + l["tls_certificate_id"] = *i.TlsCertificateId + } + result = append(result, l) + } + return result +} + +func flattenVpcWorkerPools(list []containerv2.GetWorkerPoolResponse) []map[string]interface{} { + workerPools := make([]map[string]interface{}, len(list)) + for i, workerPool := range list { + l := map[string]interface{}{ + "id": workerPool.ID, + "name": workerPool.PoolName, + "flavor": workerPool.Flavor, + "worker_count": workerPool.WorkerCount, + "isolation": workerPool.Isolation, + "labels": workerPool.Labels, + "state": workerPool.Lifecycle.ActualState, + } + zones := workerPool.Zones + zonesConfig := make([]map[string]interface{}, len(zones)) + for j, zone := range zones { + z := map[string]interface{}{ + "zone": zone.ID, + "worker_count": zone.WorkerCount, + } + subnets := zone.Subnets + subnetConfig := make([]map[string]interface{}, len(subnets)) + for k, subnet := range subnets { + s := map[string]interface{}{ + "id": subnet.ID, + "primary": subnet.Primary, + } + subnetConfig[k] = s + } + z["subnets"] = subnetConfig + zonesConfig[j] = z + } + l["zones"] = zonesConfig + workerPools[i] = l + } + + return workerPools +} + +func flattenVpcZones(list []containerv2.ZoneResp) []map[string]interface{} { + zones := make([]map[string]interface{}, len(list)) + for i, zone := range list { + l := map[string]interface{}{ + "id": zone.ID, + "subnet_id": flattenSubnets(zone.Subnets), + "worker_count": zone.WorkerCount, + } + zones[i] = l + } + return zones +} +func flattenConditions(list []iamuumv2.Condition) []map[string]interface{} { + conditions := make([]map[string]interface{}, len(list)) + for i, cond := range list { + l := map[string]interface{}{ + "claim": cond.Claim, + "operator": cond.Operator, + "value": strings.ReplaceAll(cond.Value, "\"", ""), + } + conditions[i] = l + } + return conditions +} +func flattenAccessGroupRules(list []iamuumv2.CreateRuleResponse) []map[string]interface{} { + rules := make([]map[string]interface{}, len(list)) + for i, item := range list { + l := map[string]interface{}{ + "name": item.Name, + "expiration": item.Expiration, + "identity_provider": item.RealmName, + "conditions": flattenConditions(item.Conditions), + } + rules[i] = l + } + return rules +} + +func flattenSubnets(list []containerv2.Subnet) []map[string]interface{} { + subs := make([]map[string]interface{}, len(list)) + for i, sub := range list { + l := map[string]interface{}{ + "id": sub.ID, + "worker_count": sub.Primary, + } + subs[i] = l + } + return subs +} + +func flattenZones(list []containerv1.WorkerPoolZoneResponse) []map[string]interface{} { + zones := make([]map[string]interface{}, len(list)) + for i, zone := range list { + l := map[string]interface{}{ + "zone": zone.WorkerPoolZone.ID, + "private_vlan": zone.WorkerPoolZone.WorkerPoolZoneNetwork.PrivateVLAN, + "public_vlan": zone.WorkerPoolZone.WorkerPoolZoneNetwork.PublicVLAN, + "worker_count": zone.WorkerCount, + } + zones[i] = l + } + return zones +} + +func flattenWorkerPools(list []containerv1.WorkerPoolResponse) []map[string]interface{} { + workerPools := make([]map[string]interface{}, len(list)) + for i, workerPool := range list { + l := map[string]interface{}{ + "id": workerPool.ID, + "hardware": workerPool.Isolation, + "name": workerPool.Name, + "machine_type": workerPool.MachineType, + "size_per_zone": workerPool.Size, + "state": workerPool.State, + "labels": workerPool.Labels, + } + zones := workerPool.Zones + zonesConfig := make([]map[string]interface{}, len(zones)) + for j, zone := range zones { + z := map[string]interface{}{ + "zone": zone.ID, + "private_vlan": zone.PrivateVLAN, + "public_vlan": zone.PublicVLAN, + "worker_count": zone.WorkerCount, + } + zonesConfig[j] = z + } + l["zones"] = zonesConfig + workerPools[i] = l + } + + return workerPools +} + +func flattenAlbs(list []containerv1.ALBConfig, filterType string) []map[string]interface{} { + albs := make([]map[string]interface{}, 0) + for _, alb := range list { + if alb.ALBType == filterType || filterType == "all" { + l := map[string]interface{}{ + "id": alb.ALBID, + "name": alb.Name, + "alb_type": alb.ALBType, + "enable": alb.Enable, + "state": alb.State, + "num_of_instances": alb.NumOfInstances, + "alb_ip": alb.ALBIP, + "resize": alb.Resize, + "disable_deployment": alb.DisableDeployment, + } + albs = append(albs, l) + } + } + return albs +} + +func flattenVpcAlbs(list []containerv2.AlbConfig, filterType string) []map[string]interface{} { + albs := make([]map[string]interface{}, 0) + for _, alb := range list { + if alb.AlbType == filterType || filterType == "all" { + l := map[string]interface{}{ + "id": alb.AlbID, + "name": alb.Name, + "alb_type": alb.AlbType, + "enable": alb.Enable, + "state": alb.State, + "resize": alb.Resize, + "disable_deployment": alb.DisableDeployment, + "load_balancer_hostname": alb.LoadBalancerHostname, + } + albs = append(albs, l) + } + } + return albs +} + +func flattenNetworkInterfaces(list []containerv2.Network) []map[string]interface{} { + nwInterfaces := make([]map[string]interface{}, len(list)) + for i, nw := range list { + l := map[string]interface{}{ + "cidr": nw.Cidr, + "ip_address": nw.IpAddress, + "subnet_id": nw.SubnetID, + } + nwInterfaces[i] = l + } + return nwInterfaces +} + +func flattenVlans(list []containerv1.Vlan) []map[string]interface{} { + vlans := make([]map[string]interface{}, len(list)) + for i, vlanR := range list { + subnets := make([]map[string]interface{}, len(vlanR.Subnets)) + for j, subnetR := range vlanR.Subnets { + subnet := make(map[string]interface{}) + subnet["id"] = subnetR.ID + subnet["cidr"] = subnetR.Cidr + subnet["is_byoip"] = subnetR.IsByOIP + subnet["is_public"] = subnetR.IsPublic + ips := make([]string, len(subnetR.Ips)) + for k, ip := range subnetR.Ips { + ips[k] = ip + } + subnet["ips"] = ips + subnets[j] = subnet + } + l := map[string]interface{}{ + "id": vlanR.ID, + "subnets": subnets, + } + vlans[i] = l + } + return vlans +} + +func flattenIcdGroups(grouplist icdv4.GroupList) []map[string]interface{} { + groups := make([]map[string]interface{}, len(grouplist.Groups)) + for i, group := range grouplist.Groups { + memorys := make([]map[string]interface{}, 1) + memory := make(map[string]interface{}) + memory["units"] = group.Memory.Units + memory["allocation_mb"] = group.Memory.AllocationMb + memory["minimum_mb"] = group.Memory.MinimumMb + memory["step_size_mb"] = group.Memory.StepSizeMb + memory["is_adjustable"] = group.Memory.IsAdjustable + memory["can_scale_down"] = group.Memory.CanScaleDown + memorys[0] = memory + + cpus := make([]map[string]interface{}, 1) + cpu := make(map[string]interface{}) + cpu["units"] = group.Cpu.Units + cpu["allocation_count"] = group.Cpu.AllocationCount + cpu["minimum_count"] = group.Cpu.MinimumCount + cpu["step_size_count"] = group.Cpu.StepSizeCount + cpu["is_adjustable"] = group.Cpu.IsAdjustable + cpu["can_scale_down"] = group.Cpu.CanScaleDown + cpus[0] = cpu + + disks := make([]map[string]interface{}, 1) + disk := make(map[string]interface{}) + disk["units"] = group.Disk.Units + disk["allocation_mb"] = group.Disk.AllocationMb + disk["minimum_mb"] = group.Disk.MinimumMb + disk["step_size_mb"] = group.Disk.StepSizeMb + disk["is_adjustable"] = group.Disk.IsAdjustable + disk["can_scale_down"] = group.Disk.CanScaleDown + disks[0] = disk + + l := map[string]interface{}{ + "group_id": group.Id, + "count": group.Count, + "memory": memorys, + "cpu": cpus, + "disk": disks, + } + groups[i] = l + } + return groups +} + +func normalizeJSONString(jsonString interface{}) (string, error) { + var j interface{} + if jsonString == nil || jsonString.(string) == "" { + return "", nil + } + s := jsonString.(string) + err := json.Unmarshal([]byte(s), &j) + if err != nil { + return s, err + } + bytes, err := json.Marshal(j) + if err != nil { + return "", err + } + return string(bytes[:]), nil +} + +func expandAnnotations(annotations string) (whisk.KeyValueArr, error) { + var result whisk.KeyValueArr + dc := json.NewDecoder(strings.NewReader(annotations)) + dc.UseNumber() + err := dc.Decode(&result) + return result, err +} + +func flattenAnnotations(in whisk.KeyValueArr) (string, error) { + b, err := json.Marshal(in) + if err != nil { + return "", err + } + return string(b[:]), nil +} + +func expandParameters(annotations string) (whisk.KeyValueArr, error) { + var result whisk.KeyValueArr + dc := json.NewDecoder(strings.NewReader(annotations)) + dc.UseNumber() + err := dc.Decode(&result) + return result, err +} + +func flattenParameters(in whisk.KeyValueArr) (string, error) { + b, err := json.Marshal(in) + if err != nil { + return "", err + } + return string(b[:]), nil +} + +func expandLimits(l []interface{}) *whisk.Limits { + if len(l) == 0 || l[0] == nil { + return &whisk.Limits{} + } + in := l[0].(map[string]interface{}) + obj := &whisk.Limits{ + Timeout: ptrToInt(in["timeout"].(int)), + Memory: ptrToInt(in["memory"].(int)), + Logsize: ptrToInt(in["log_size"].(int)), + } + return obj +} + +func flattenActivityTrack(in *resourceconfigurationv1.ActivityTracking) []interface{} { + + att := make(map[string]interface{}) + if in != nil { + if in.ReadDataEvents != nil { + att["read_data_events"] = *in.ReadDataEvents + } + if in.WriteDataEvents != nil { + att["write_data_events"] = *in.WriteDataEvents + } + if in.ActivityTrackerCrn != nil { + att["activity_tracker_crn"] = *in.ActivityTrackerCrn + } + } + return []interface{}{att} +} + +func flattenMetricsMonitor(in *resourceconfigurationv1.MetricsMonitoring) []interface{} { + att := make(map[string]interface{}) + if in != nil { + if in.UsageMetricsEnabled != nil { + att["usage_metrics_enabled"] = *in.UsageMetricsEnabled + } + if in.MetricsMonitoringCrn != nil { + att["metrics_monitoring_crn"] = *in.MetricsMonitoringCrn + } + if in.RequestMetricsEnabled != nil { + att["request_metrics_enabled"] = *in.RequestMetricsEnabled + } + } + return []interface{}{att} +} + +func archiveRuleGet(in []*s3.LifecycleRule) []interface{} { + rules := make([]interface{}, 0, len(in)) + for _, r := range in { + // Checking this is not an expire_rule. LifeCycle rules are either archive or expire + if r.Expiration == nil { + rule := make(map[string]interface{}) + + if r.Status != nil { + if *r.Status == "Enabled" { + rule["enable"] = true + + } else { + rule["enable"] = false + } + + } + if r.ID != nil { + rule["rule_id"] = *r.ID + } + + for _, transition := range r.Transitions { + if transition.Days != nil { + rule["days"] = int(*transition.Days) + } + if transition.StorageClass != nil { + rule["type"] = *transition.StorageClass + } + } + + rules = append(rules, rule) + } + } + return rules +} + +func expireRuleGet(in []*s3.LifecycleRule) []interface{} { + rules := make([]interface{}, 0, len(in)) + for _, r := range in { + if r.Expiration != nil { + rule := make(map[string]interface{}) + + if r.Status != nil { + if *r.Status == "Enabled" { + rule["enable"] = true + + } else { + rule["enable"] = false + } + } + if r.ID != nil { + rule["rule_id"] = *r.ID + } + + if r.Expiration != nil { + rule["days"] = int(*(r.Expiration).Days) + } + if r.Filter != nil && r.Filter.Prefix != nil { + rule["prefix"] = *(r.Filter).Prefix + } + + rules = append(rules, rule) + } + } + return rules +} + +func retentionRuleGet(in *s3.ProtectionConfiguration) []interface{} { + rules := make([]interface{}, 0, 1) + if in != nil && in.Status != nil && *in.Status == "COMPLIANCE" { + protectConfig := make(map[string]interface{}) + if in.DefaultRetention != nil { + protectConfig["default"] = int(*(in.DefaultRetention).Days) + } + if in.MaximumRetention != nil { + protectConfig["maximum"] = int(*(in.MaximumRetention).Days) + } + if in.MinimumRetention != nil { + protectConfig["minimum"] = int(*(in.MinimumRetention).Days) + } + if in.EnablePermanentRetention != nil { + protectConfig["permanent"] = *in.EnablePermanentRetention + } + rules = append(rules, protectConfig) + } + return rules +} + +func flattenCosObejctVersioning(in *s3.GetBucketVersioningOutput) []interface{} { + versioning := make([]interface{}, 0, 1) + if in != nil { + if in.Status != nil { + att := make(map[string]interface{}) + if *in.Status == "Enabled" { + att["enable"] = true + } else { + att["enable"] = false + } + versioning = append(versioning, att) + } + } + return versioning +} + +func flattenLimits(in *whisk.Limits) []interface{} { + att := make(map[string]interface{}) + if in.Timeout != nil { + att["timeout"] = *in.Timeout + } + if in.Memory != nil { + att["memory"] = *in.Memory + } + if in.Memory != nil { + att["log_size"] = *in.Logsize + } + return []interface{}{att} +} + +func expandExec(execs []interface{}) *whisk.Exec { + var code string + var document []byte + for _, exec := range execs { + e, _ := exec.(map[string]interface{}) + code_path := e["code_path"].(string) + if code_path != "" { + ext := path.Ext(code_path) + if strings.ToLower(ext) == ".zip" { + data, err := ioutil.ReadFile(code_path) + if err != nil { + log.Println("Error reading file", err) + return &whisk.Exec{} + } + sEnc := b64.StdEncoding.EncodeToString([]byte(data)) + code = sEnc + } else { + data, err := ioutil.ReadFile(code_path) + if err != nil { + log.Println("Error reading file", err) + return &whisk.Exec{} + } + document = data + code = string(document) + } + } else { + code = e["code"].(string) + } + obj := &whisk.Exec{ + Image: e["image"].(string), + Init: e["init"].(string), + Code: ptrToString(code), + Kind: e["kind"].(string), + Main: e["main"].(string), + Components: expandStringList(e["components"].([]interface{})), + } + return obj + } + + return &whisk.Exec{} +} + +func flattenExec(in *whisk.Exec, d *schema.ResourceData) []interface{} { + code_data := 4194304 // length of 'code' parameter should be always <= 4MB data + att := make(map[string]interface{}) + // open-whisk SDK will not return the value for code_path + // Hence using d.GetOk method to setback the code_path value. + if cPath, ok := d.GetOk("exec.0.code_path"); ok { + att["code_path"] = cPath.(string) + } + if in.Image != "" { + att["image"] = in.Image + } + if in.Init != "" { + att["init"] = in.Init + } + if in != nil && in.Code != nil && len(*in.Code) <= code_data { + att["code"] = *in.Code + } + if in.Kind != "" { + att["kind"] = in.Kind + } + if in.Main != "" { + att["main"] = in.Main + } + + if len(in.Components) > 0 { + att["components"] = flattenStringList(in.Components) + } + + return []interface{}{att} +} + +func ptrToInt(i int) *int { + return &i +} + +func ptrToString(s string) *string { + return &s +} + +func intValue(i64 *int64) (i int) { + if i64 != nil { + i = int(*i64) + } + return +} + +func float64Value(f32 *float32) (f float64) { + if f32 != nil { + f = float64(*f32) + } + return +} + +func dateToString(d *strfmt.Date) (s string) { + if d != nil { + s = d.String() + } + return +} + +func dateTimeToString(dt *strfmt.DateTime) (s string) { + if dt != nil { + s = dt.String() + } + return +} + +func filterActionAnnotations(in whisk.KeyValueArr) (string, error) { + noExec := make(whisk.KeyValueArr, 0, len(in)) + for _, v := range in { + if v.Key == "exec" { + continue + } + noExec = append(noExec, v) + } + + return flattenAnnotations(noExec) +} + +func filterActionParameters(in whisk.KeyValueArr) (string, error) { + noAction := make(whisk.KeyValueArr, 0, len(in)) + for _, v := range in { + if v.Key == "_actions" { + continue + } + noAction = append(noAction, v) + } + return flattenParameters(noAction) +} + +func filterInheritedAnnotations(inheritedAnnotations, annotations whisk.KeyValueArr) whisk.KeyValueArr { + userDefinedAnnotations := make(whisk.KeyValueArr, 0) + for _, a := range annotations { + insert := false + if a.Key == "binding" || a.Key == "exec" { + insert = false + break + } + for _, b := range inheritedAnnotations { + if a.Key == b.Key && reflect.DeepEqual(a.Value, b.Value) { + insert = false + break + } + insert = true + } + if insert { + userDefinedAnnotations = append(userDefinedAnnotations, a) + } + } + return userDefinedAnnotations +} + +func filterInheritedParameters(inheritedParameters, parameters whisk.KeyValueArr) whisk.KeyValueArr { + userDefinedParameters := make(whisk.KeyValueArr, 0) + for _, p := range parameters { + insert := false + if p.Key == "_actions" { + insert = false + break + } + for _, b := range inheritedParameters { + if p.Key == b.Key && reflect.DeepEqual(p.Value, b.Value) { + insert = false + break + } + insert = true + } + if insert { + userDefinedParameters = append(userDefinedParameters, p) + } + + } + return userDefinedParameters +} + +func isEmpty(object interface{}) bool { + //First check normal definitions of empty + if object == nil { + return true + } else if object == "" { + return true + } else if object == false { + return true + } + + //Then see if it's a struct + if reflect.ValueOf(object).Kind() == reflect.Struct { + // and create an empty copy of the struct object to compare against + empty := reflect.New(reflect.TypeOf(object)).Elem().Interface() + if reflect.DeepEqual(object, empty) { + return true + } + } + return false +} + +func filterTriggerAnnotations(in whisk.KeyValueArr) (string, error) { + noFeed := make(whisk.KeyValueArr, 0, len(in)) + for _, v := range in { + if v.Key == "feed" { + continue + } + noFeed = append(noFeed, v) + } + return flattenParameters(noFeed) +} + +func flattenFeed(feedName string) []interface{} { + att := make(map[string]interface{}) + att["name"] = feedName + att["parameters"] = "[]" + return []interface{}{att} +} + +func flattenGatewayVlans(list []datatypes.Network_Gateway_Vlan) []map[string]interface{} { + vlans := make([]map[string]interface{}, len(list)) + for i, ele := range list { + vlan := make(map[string]interface{}) + vlan["bypass"] = *ele.BypassFlag + vlan["network_vlan_id"] = *ele.NetworkVlanId + vlan["vlan_id"] = *ele.Id + vlans[i] = vlan + } + return vlans +} + +func flattenGatewayMembers(d *schema.ResourceData, list []datatypes.Network_Gateway_Member) []map[string]interface{} { + members := make([]map[string]interface{}, len(list)) + for i, ele := range list { + hardware := *ele.Hardware + member := make(map[string]interface{}) + member["member_id"] = *ele.HardwareId + member["hostname"] = *hardware.Hostname + member["domain"] = *hardware.Domain + if hardware.Notes != nil { + member["notes"] = *hardware.Notes + } + if hardware.Datacenter != nil { + member["datacenter"] = *hardware.Datacenter.Name + } + if hardware.PrimaryNetworkComponent.MaxSpeed != nil { + member["network_speed"] = *hardware.PrimaryNetworkComponent.MaxSpeed + } + member["redundant_network"] = false + member["unbonded_network"] = false + backendNetworkComponent := ele.Hardware.BackendNetworkComponents + + if len(backendNetworkComponent) > 2 && ele.Hardware.PrimaryBackendNetworkComponent != nil { + if *hardware.PrimaryBackendNetworkComponent.RedundancyEnabledFlag { + member["redundant_network"] = true + } else { + member["unbonded_network"] = true + } + } + tagReferences := ele.Hardware.TagReferences + tagReferencesLen := len(tagReferences) + if tagReferencesLen > 0 { + tags := make([]interface{}, 0, tagReferencesLen) + for _, tagRef := range tagReferences { + tags = append(tags, *tagRef.Tag.Name) + } + member["tags"] = schema.NewSet(schema.HashString, tags) + } + + member["redundant_power_supply"] = false + + if *hardware.PowerSupplyCount == 2 { + member["redundant_power_supply"] = true + } + member["memory"] = *hardware.MemoryCapacity + if !(*hardware.PrivateNetworkOnlyFlag) { + member["public_vlan_id"] = *hardware.NetworkVlans[1].Id + } + member["private_vlan_id"] = *hardware.NetworkVlans[0].Id + + if hardware.PrimaryIpAddress != nil { + member["public_ipv4_address"] = *hardware.PrimaryIpAddress + } + if hardware.PrimaryBackendIpAddress != nil { + member["private_ipv4_address"] = *hardware.PrimaryBackendIpAddress + } + member["ipv6_enabled"] = false + if ele.Hardware.PrimaryNetworkComponent.PrimaryVersion6IpAddressRecord != nil { + member["ipv6_enabled"] = true + member["ipv6_address"] = *hardware.PrimaryNetworkComponent.PrimaryVersion6IpAddressRecord.IpAddress + } + + member["private_network_only"] = *hardware.PrivateNetworkOnlyFlag + userData := hardware.UserData + if len(userData) > 0 && userData[0].Value != nil { + member["user_metadata"] = *userData[0].Value + } + members[i] = member + } + return members +} + +func flattenDisks(result datatypes.Virtual_Guest) []int { + var out = make([]int, 0) + + for _, v := range result.BlockDevices { + // skip 1,7 which is reserved for the swap disk and metadata + _, ok := sl.GrabOk(result, "BillingItem.OrderItem.Preset") + if ok { + if *v.Device != "1" && *v.Device != "7" && *v.Device != "0" { + capacity, ok := sl.GrabOk(v, "DiskImage.Capacity") + + if ok { + out = append(out, capacity.(int)) + } + + } + } else { + if *v.Device != "1" && *v.Device != "7" { + capacity, ok := sl.GrabOk(v, "DiskImage.Capacity") + + if ok { + out = append(out, capacity.(int)) + } + } + } + } + + return out +} + +func flattenDisksForWindows(result datatypes.Virtual_Guest) []int { + var out = make([]int, 0) + + for _, v := range result.BlockDevices { + // skip 1,7 which is reserved for the swap disk and metadata + _, ok := sl.GrabOk(result, "BillingItem.OrderItem.Preset") + if ok { + if *v.Device != "1" && *v.Device != "7" && *v.Device != "0" && *v.Device != "3" { + capacity, ok := sl.GrabOk(v, "DiskImage.Capacity") + + if ok { + out = append(out, capacity.(int)) + } + } + } else { + if *v.Device != "1" && *v.Device != "7" && *v.Device != "3" { + capacity, ok := sl.GrabOk(v, "DiskImage.Capacity") + + if ok { + out = append(out, capacity.(int)) + } + } + } + } + + return out +} + +func filterResourceKeyParameters(params map[string]interface{}) map[string]interface{} { + delete(params, "role_crn") + return params +} + +func idParts(id string) ([]string, error) { + if strings.Contains(id, "/") { + parts := strings.Split(id, "/") + return parts, nil + } + return []string{}, fmt.Errorf("The given id %s does not contain / please check documentation on how to provider id during import command", id) +} + +func sepIdParts(id string, separator string) ([]string, error) { + if strings.Contains(id, separator) { + parts := strings.Split(id, separator) + return parts, nil + } + return []string{}, fmt.Errorf("The given id %s does not contain %s please check documentation on how to provider id during import command", id, separator) +} + +func vmIdParts(id string) ([]string, error) { + parts := strings.Split(id, "/") + return parts, nil +} + +func cfIdParts(id string) ([]string, error) { + parts := strings.Split(id, ":") + return parts, nil +} + +// getCustomAttributes will return all attributes which are not system defined +func getCustomAttributes(r iampolicymanagementv1.PolicyResource) []iampolicymanagementv1.ResourceAttribute { + attributes := []iampolicymanagementv1.ResourceAttribute{} + for _, a := range r.Attributes { + switch *a.Name { + case "accesGroupId": + case "accountId": + case "organizationId": + case "spaceId": + case "region": + case "resource": + case "resourceType": + case "resourceGroupId": + case "serviceType": + case "serviceName": + case "serviceInstance": + default: + attributes = append(attributes, a) + } + } + return attributes +} + +func flattenPolicyResource(list []iampolicymanagementv1.PolicyResource) []map[string]interface{} { + result := make([]map[string]interface{}, 0, len(list)) + for _, i := range list { + l := map[string]interface{}{ + "service": getResourceAttribute("serviceName", i), + "resource_instance_id": getResourceAttribute("serviceInstance", i), + "region": getResourceAttribute("region", i), + "resource_type": getResourceAttribute("resourceType", i), + "resource": getResourceAttribute("resource", i), + "resource_group_id": getResourceAttribute("resourceGroupId", i), + } + customAttributes := getCustomAttributes(i) + if len(customAttributes) > 0 { + out := make(map[string]string) + for _, a := range customAttributes { + out[*a.Name] = *a.Value + } + l["attributes"] = out + } + + result = append(result, l) + } + return result +} +func flattenPolicyResourceAttributes(list []iampolicymanagementv1.PolicyResource) []map[string]interface{} { + result := make([]map[string]interface{}, 0) + for _, i := range list { + for _, a := range i.Attributes { + if *a.Name != "accountId" { + l := map[string]interface{}{ + "name": a.Name, + "value": a.Value, + "operator": a.Operator, + } + result = append(result, l) + } + } + } + return result +} + +// Cloud Internet Services +func flattenHealthMonitors(list []datatypes.Network_LBaaS_Listener) []map[string]interface{} { + result := make([]map[string]interface{}, 0, len(list)) + ports := make([]int, 0, 0) + for _, i := range list { + l := map[string]interface{}{ + "protocol": *i.DefaultPool.Protocol, + "port": *i.DefaultPool.ProtocolPort, + "interval": *i.DefaultPool.HealthMonitor.Interval, + "max_retries": *i.DefaultPool.HealthMonitor.MaxRetries, + "timeout": *i.DefaultPool.HealthMonitor.Timeout, + "monitor_id": *i.DefaultPool.HealthMonitor.Uuid, + } + + if i.DefaultPool.HealthMonitor.UrlPath != nil { + l["url_path"] = *i.DefaultPool.HealthMonitor.UrlPath + } + + if !contains(ports, *i.DefaultPool.ProtocolPort) { + result = append(result, l) + } + + ports = append(ports, *i.DefaultPool.ProtocolPort) + } + return result +} + +func contains(s []int, e int) bool { + for _, a := range s { + if a == e { + return true + } + } + return false +} + +func flattenMembersData(list []models.AccessGroupMemberV2, users []usermanagementv2.UserInfo, serviceids []models.ServiceID) ([]string, []string) { + var ibmid []string + var serviceid []string + for _, m := range list { + if m.Type == iamuumv2.AccessGroupMemberUser { + for _, user := range users { + if user.IamID == m.ID { + ibmid = append(ibmid, user.Email) + break + } + } + } else { + + for _, srid := range serviceids { + if srid.IAMID == m.ID { + serviceid = append(serviceid, srid.UUID) + break + } + } + + } + + } + return ibmid, serviceid +} + +func flattenAccessGroupMembers(list []models.AccessGroupMemberV2, users []usermanagementv2.UserInfo, serviceids []models.ServiceID) []map[string]interface{} { + result := make([]map[string]interface{}, 0, len(list)) + for _, m := range list { + var value, vtype string + if m.Type == iamuumv2.AccessGroupMemberUser { + vtype = iamuumv2.AccessGroupMemberUser + for _, user := range users { + if user.IamID == m.ID { + value = user.Email + break + } + } + } else { + + vtype = iamuumv1.AccessGroupMemberService + for _, srid := range serviceids { + if srid.IAMID == m.ID { + value = srid.UUID + break + } + } + + } + l := map[string]interface{}{ + "iam_id": value, + "type": vtype, + } + result = append(result, l) + } + return result +} + +func flattenUserIds(accountID string, users []string, meta interface{}) ([]string, error) { + userids := make([]string, len(users)) + for i, name := range users { + iamID, err := getIBMUniqueId(accountID, name, meta) + if err != nil { + return nil, err + } + userids[i] = iamID + } + return userids, nil +} + +func flattenServiceIds(services []string, meta interface{}) ([]string, error) { + serviceids := make([]string, len(services)) + for i, id := range services { + serviceID, err := getServiceID(id, meta) + if err != nil { + return nil, err + } + serviceids[i] = serviceID.IAMID + } + return serviceids, nil +} + +func expandUsers(userList *schema.Set) (users []icdv4.User) { + for _, iface := range userList.List() { + userEl := iface.(map[string]interface{}) + user := icdv4.User{ + UserName: userEl["name"].(string), + Password: userEl["password"].(string), + } + users = append(users, user) + } + return +} + +// IBM Cloud Databases +func flattenConnectionStrings(cs []CsEntry) []map[string]interface{} { + entries := make([]map[string]interface{}, len(cs), len(cs)) + for i, csEntry := range cs { + l := map[string]interface{}{ + "name": csEntry.Name, + "password": csEntry.Password, + "composed": csEntry.Composed, + "certname": csEntry.CertName, + "certbase64": csEntry.CertBase64, + "queryoptions": csEntry.QueryOptions, + "scheme": csEntry.Scheme, + "path": csEntry.Path, + "database": csEntry.Database, + } + hosts := csEntry.Hosts + hostsList := make([]map[string]interface{}, len(hosts), len(hosts)) + for j, host := range hosts { + z := map[string]interface{}{ + "hostname": host.HostName, + "port": strconv.Itoa(host.Port), + } + hostsList[j] = z + } + l["hosts"] = hostsList + var queryOpts string + if len(csEntry.QueryOptions) != 0 { + queryOpts = "?" + count := 0 + for k, v := range csEntry.QueryOptions { + if count >= 1 { + queryOpts = queryOpts + "&" + } + queryOpts = queryOpts + fmt.Sprintf("%v", k) + "=" + fmt.Sprintf("%v", v) + count++ + } + } else { + queryOpts = "" + } + l["queryoptions"] = queryOpts + entries[i] = l + } + + return entries +} + +func flattenPhaseOneAttributes(vpn *datatypes.Network_Tunnel_Module_Context) []map[string]interface{} { + phaseoneAttributesMap := make([]map[string]interface{}, 0, 1) + phaseoneAttributes := make(map[string]interface{}) + phaseoneAttributes["authentication"] = *vpn.PhaseOneAuthentication + phaseoneAttributes["encryption"] = *vpn.PhaseOneEncryption + phaseoneAttributes["diffie_hellman_group"] = *vpn.PhaseOneDiffieHellmanGroup + phaseoneAttributes["keylife"] = *vpn.PhaseOneKeylife + phaseoneAttributesMap = append(phaseoneAttributesMap, phaseoneAttributes) + return phaseoneAttributesMap +} + +func flattenPhaseTwoAttributes(vpn *datatypes.Network_Tunnel_Module_Context) []map[string]interface{} { + phasetwoAttributesMap := make([]map[string]interface{}, 0, 1) + phasetwoAttributes := make(map[string]interface{}) + phasetwoAttributes["authentication"] = *vpn.PhaseTwoAuthentication + phasetwoAttributes["encryption"] = *vpn.PhaseTwoEncryption + phasetwoAttributes["diffie_hellman_group"] = *vpn.PhaseTwoDiffieHellmanGroup + phasetwoAttributes["keylife"] = *vpn.PhaseTwoKeylife + phasetwoAttributesMap = append(phasetwoAttributesMap, phasetwoAttributes) + return phasetwoAttributesMap +} + +func flattenaddressTranslation(vpn *datatypes.Network_Tunnel_Module_Context, fwID int) []map[string]interface{} { + addressTranslationMap := make([]map[string]interface{}, 0, 1) + addressTranslationAttributes := make(map[string]interface{}) + for _, networkAddressTranslation := range vpn.AddressTranslations { + if *networkAddressTranslation.NetworkTunnelContext.Id == fwID { + addressTranslationAttributes["remote_ip_adress"] = *networkAddressTranslation.CustomerIpAddress + addressTranslationAttributes["internal_ip_adress"] = *networkAddressTranslation.InternalIpAddress + addressTranslationAttributes["notes"] = *networkAddressTranslation.Notes + } + } + addressTranslationMap = append(addressTranslationMap, addressTranslationAttributes) + return addressTranslationMap +} + +func flattenremoteSubnet(vpn *datatypes.Network_Tunnel_Module_Context) []map[string]interface{} { + remoteSubnetMap := make([]map[string]interface{}, 0, 1) + remoteSubnetAttributes := make(map[string]interface{}) + for _, customerSubnet := range vpn.CustomerSubnets { + remoteSubnetAttributes["remote_ip_adress"] = customerSubnet.NetworkIdentifier + remoteSubnetAttributes["remote_ip_cidr"] = customerSubnet.Cidr + remoteSubnetAttributes["account_id"] = customerSubnet.AccountId + } + remoteSubnetMap = append(remoteSubnetMap, remoteSubnetAttributes) + return remoteSubnetMap +} + +// IBM Cloud Databases +func expandWhitelist(whiteList *schema.Set) (whitelist []icdv4.WhitelistEntry) { + for _, iface := range whiteList.List() { + wlItem := iface.(map[string]interface{}) + wlEntry := icdv4.WhitelistEntry{ + Address: wlItem["address"].(string), + Description: wlItem["description"].(string), + } + whitelist = append(whitelist, wlEntry) + } + return +} + +// Cloud Internet Services +func flattenWhitelist(whitelist icdv4.Whitelist) []map[string]interface{} { + entries := make([]map[string]interface{}, len(whitelist.WhitelistEntrys), len(whitelist.WhitelistEntrys)) + for i, whitelistEntry := range whitelist.WhitelistEntrys { + l := map[string]interface{}{ + "address": whitelistEntry.Address, + "description": whitelistEntry.Description, + } + entries[i] = l + } + return entries +} + +func expandStringMap(inVal interface{}) map[string]string { + outVal := make(map[string]string) + if inVal == nil { + return outVal + } + for k, v := range inVal.(map[string]interface{}) { + strValue := fmt.Sprintf("%v", v) + outVal[k] = strValue + } + return outVal +} + +// Cloud Internet Services +func convertTfToCisThreeVar(glbTfId string) (glbId string, zoneId string, cisId string, err error) { + g := strings.SplitN(glbTfId, ":", 3) + glbId = g[0] + if len(g) > 2 { + zoneId = g[1] + cisId = g[2] + } else { + err = errors.New("cis_id or zone_id not passed") + return + } + return +} +func convertCisToTfFourVar(firewallType string, ID string, ID2 string, cisID string) (buildID string) { + if ID != "" { + buildID = firewallType + ":" + ID + ":" + ID2 + ":" + cisID + } else { + buildID = "" + } + return +} +func convertTfToCisFourVar(TfID string) (firewallType string, ID string, zoneID string, cisID string, err error) { + g := strings.SplitN(TfID, ":", 4) + firewallType = g[0] + if len(g) > 3 { + ID = g[1] + zoneID = g[2] + cisID = g[3] + } else { + err = errors.New("Id or cis_id or zone_id not passed") + return + } + return +} + +// Cloud Internet Services +func convertCisToTfThreeVar(Id string, Id2 string, cisId string) (buildId string) { + if Id != "" { + buildId = Id + ":" + Id2 + ":" + cisId + } else { + buildId = "" + } + return +} + +// Cloud Internet Services +func convertTfToCisTwoVarSlice(tfIds []string) (Ids []string, cisId string, err error) { + for _, item := range tfIds { + Id := strings.SplitN(item, ":", 2) + if len(Id) < 2 { + err = errors.New("cis_id not passed") + return + } + Ids = append(Ids, Id[0]) + cisId = Id[1] + } + return +} + +// Cloud Internet Services +func convertCisToTfTwoVarSlice(Ids []string, cisId string) (buildIds []string) { + for _, Id := range Ids { + buildIds = append(buildIds, Id+":"+cisId) + } + return +} + +// Cloud Internet Services +func convertCisToTfTwoVar(Id string, cisId string) (buildId string) { + if Id != "" { + buildId = Id + ":" + cisId + } else { + buildId = "" + } + return +} + +// Cloud Internet Services +func convertTftoCisTwoVar(tfId string) (Id string, cisId string, err error) { + g := strings.SplitN(tfId, ":", 2) + Id = g[0] + if len(g) > 1 { + cisId = g[1] + } else { + err = errors.New(" cis_id or zone_id not passed") + return + } + return +} + +// Cloud Internet Services +func transformToIBMCISDnsData(recordType string, id string, value interface{}) (newValue interface{}, err error) { + switch { + case id == "flags": + switch { + case strings.ToUpper(recordType) == "SRV", + strings.ToUpper(recordType) == "CAA", + strings.ToUpper(recordType) == "DNSKEY": + newValue, err = strconv.Atoi(value.(string)) + case strings.ToUpper(recordType) == "NAPTR": + newValue, err = value.(string), nil + } + case stringInSlice(id, dnsTypeIntFields): + newValue, err = strconv.Atoi(value.(string)) + case stringInSlice(id, dnsTypeFloatFields): + newValue, err = strconv.ParseFloat(value.(string), 32) + default: + newValue, err = value.(string), nil + } + + return +} + +func indexOf(element string, data []string) int { + for k, v := range data { + if element == v { + return k + } + } + return -1 //not found. +} + +func rcInstanceExists(resourceId string, resourceType string, meta interface{}) (bool, error) { + // Check to see if Resource Manager instance exists + rsConClient, err := meta.(ClientSession).ResourceControllerAPI() + if err != nil { + return true, nil + } + exists := true + instance, err := rsConClient.ResourceServiceInstance().GetInstance(resourceId) + if err != nil { + if strings.Contains(err.Error(), "Object not found") || + strings.Contains(err.Error(), "status code: 404") { + exists = false + } else { + return true, fmt.Errorf("Error checking resource instance exists: %s", err) + } + } else { + if strings.Contains(instance.State, "removed") { + exists = false + } + } + if exists { + return true, nil + } + // Implement when pointer to terraform.State available + // If rcInstance is now in removed state, set TF state to removed + // s := *terraform.State + // for _, r := range s.RootModule().Resources { + // if r.Type != resourceType { + // continue + // } + // if r.Primary.ID == resourceId { + // r.Primary.Set("status", "removed") + // } + // } + return false, nil +} + +// Implement when pointer to terraform.State available +// func resourceInstanceExistsTf(resourceId string, resourceType string) bool { +// // Check TF state to see if Cloud resource instance has already been removed +// s := *terraform.State +// for _, r := range s.RootModule().Resources { +// if r.Type != resourceType { +// continue +// } +// if r.Primary.ID == resourceId { +// if strings.Contains(r.Primary.Attributes["status"], "removed") { +// return false +// } +// } +// } +// return true +// } + +// convert CRN to be url safe +func EscapeUrlParm(urlParm string) string { + if strings.Contains(urlParm, "/") { + newUrlParm := url.PathEscape(urlParm) + return newUrlParm + } + return urlParm +} + +func GetTags(d *schema.ResourceData, meta interface{}) error { + resourceID := d.Id() + gtClient, err := meta.(ClientSession).GlobalTaggingAPI() + if err != nil { + return fmt.Errorf("Error getting global tagging client settings: %s", err) + } + taggingResult, err := gtClient.Tags().GetTags(resourceID) + if err != nil { + return err + } + var taglist []string + for _, item := range taggingResult.Items { + taglist = append(taglist, item.Name) + } + d.Set("tags", flattenStringList(taglist)) + return nil +} + +func UpdateTags(d *schema.ResourceData, meta interface{}) error { + resourceID := d.Id() + gtClient, err := meta.(ClientSession).GlobalTaggingAPI() + if err != nil { + return fmt.Errorf("Error getting global tagging client settings: %s", err) + } + oldList, newList := d.GetChange("tags") + if oldList == nil { + oldList = new(schema.Set) + } + if newList == nil { + newList = new(schema.Set) + } + olds := oldList.(*schema.Set) + news := newList.(*schema.Set) + removeInt := olds.Difference(news).List() + addInt := news.Difference(olds).List() + add := make([]string, len(addInt)) + for i, v := range addInt { + add[i] = fmt.Sprint(v) + } + remove := make([]string, len(removeInt)) + for i, v := range removeInt { + remove[i] = fmt.Sprint(v) + } + + if len(add) > 0 { + _, err := gtClient.Tags().AttachTags(resourceID, add) + if err != nil { + return fmt.Errorf("Error updating database tags %v : %s", add, err) + } + } + if len(remove) > 0 { + _, err := gtClient.Tags().DetachTags(resourceID, remove) + if err != nil { + return fmt.Errorf("Error detaching database tags %v: %s", remove, err) + } + for _, v := range remove { + _, err := gtClient.Tags().DeleteTag(v) + if err != nil { + return fmt.Errorf("Error deleting database tag %v: %s", v, err) + } + } + } + return nil +} + +func GetGlobalTagsUsingCRN(meta interface{}, resourceID, resourceType, tagType string) (*schema.Set, error) { + + gtClient, err := meta.(ClientSession).GlobalTaggingAPIv1() + if err != nil { + return nil, fmt.Errorf("Error getting global tagging client settings: %s", err) + } + + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return nil, err + } + accountID := userDetails.userAccount + + var providers []string + if strings.Contains(resourceType, "SoftLayer_") { + providers = []string{"ims"} + } + + ListTagsOptions := &globaltaggingv1.ListTagsOptions{} + ListTagsOptions.AttachedTo = &resourceID + ListTagsOptions.Providers = providers + if len(tagType) > 0 { + ListTagsOptions.TagType = ptrToString(tagType) + + if tagType == service { + ListTagsOptions.AccountID = ptrToString(accountID) + } + } + taggingResult, _, err := gtClient.ListTags(ListTagsOptions) + if err != nil { + return nil, err + } + var taglist []string + for _, item := range taggingResult.Items { + taglist = append(taglist, *item.Name) + } + log.Println("tagList: ", taglist) + return newStringSet(resourceIBMVPCHash, taglist), nil +} + +func UpdateGlobalTagsUsingCRN(oldList, newList interface{}, meta interface{}, resourceID, resourceType, tagType string) error { + gtClient, err := meta.(ClientSession).GlobalTaggingAPIv1() + if err != nil { + return fmt.Errorf("Error getting global tagging client settings: %s", err) + } + + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return err + } + acctID := userDetails.userAccount + + resources := []globaltaggingv1.Resource{} + r := globaltaggingv1.Resource{ResourceID: ptrToString(resourceID), ResourceType: ptrToString(resourceType)} + resources = append(resources, r) + + if oldList == nil { + oldList = new(schema.Set) + } + if newList == nil { + newList = new(schema.Set) + } + olds := oldList.(*schema.Set) + news := newList.(*schema.Set) + removeInt := olds.Difference(news).List() + addInt := news.Difference(olds).List() + add := make([]string, len(addInt)) + for i, v := range addInt { + add[i] = fmt.Sprint(v) + } + remove := make([]string, len(removeInt)) + for i, v := range removeInt { + remove[i] = fmt.Sprint(v) + } + + schematicTags := os.Getenv("IC_ENV_TAGS") + var envTags []string + if schematicTags != "" { + envTags = strings.Split(schematicTags, ",") + add = append(add, envTags...) + } + + if len(remove) > 0 { + detachTagOptions := &globaltaggingv1.DetachTagOptions{ + Resources: resources, + TagNames: remove, + } + + _, resp, err := gtClient.DetachTag(detachTagOptions) + if err != nil { + return fmt.Errorf("Error detaching database tags %v: %s\n%s", remove, err, resp) + } + for _, v := range remove { + delTagOptions := &globaltaggingv1.DeleteTagOptions{ + TagName: ptrToString(v), + } + _, resp, err := gtClient.DeleteTag(delTagOptions) + if err != nil { + return fmt.Errorf("Error deleting database tag %v: %s\n%s", v, err, resp) + } + } + } + + if len(add) > 0 { + AttachTagOptions := &globaltaggingv1.AttachTagOptions{} + AttachTagOptions.Resources = resources + AttachTagOptions.TagNames = add + if len(tagType) > 0 { + AttachTagOptions.TagType = ptrToString(tagType) + if tagType == service { + AttachTagOptions.AccountID = ptrToString(acctID) + } + } + + _, resp, err := gtClient.AttachTag(AttachTagOptions) + if err != nil { + return fmt.Errorf("Error updating database tags %v : %s\n%s", add, err, resp) + } + } + + return nil +} + +func GetTagsUsingCRN(meta interface{}, resourceCRN string) (*schema.Set, error) { + + gtClient, err := meta.(ClientSession).GlobalTaggingAPI() + if err != nil { + return nil, fmt.Errorf("Error getting global tagging client settings: %s", err) + } + taggingResult, err := gtClient.Tags().GetTags(resourceCRN) + if err != nil { + return nil, err + } + var taglist []string + for _, item := range taggingResult.Items { + taglist = append(taglist, item.Name) + } + log.Println("tagList: ", taglist) + return newStringSet(resourceIBMVPCHash, taglist), nil +} + +func UpdateTagsUsingCRN(oldList, newList interface{}, meta interface{}, resourceCRN string) error { + gtClient, err := meta.(ClientSession).GlobalTaggingAPI() + if err != nil { + return fmt.Errorf("Error getting global tagging client settings: %s", err) + } + if oldList == nil { + oldList = new(schema.Set) + } + if newList == nil { + newList = new(schema.Set) + } + olds := oldList.(*schema.Set) + news := newList.(*schema.Set) + removeInt := olds.Difference(news).List() + addInt := news.Difference(olds).List() + add := make([]string, len(addInt)) + for i, v := range addInt { + add[i] = fmt.Sprint(v) + } + remove := make([]string, len(removeInt)) + for i, v := range removeInt { + remove[i] = fmt.Sprint(v) + } + + schematicTags := os.Getenv("IC_ENV_TAGS") + var envTags []string + if schematicTags != "" { + envTags = strings.Split(schematicTags, ",") + add = append(add, envTags...) + } + + if len(remove) > 0 { + _, err := gtClient.Tags().DetachTags(resourceCRN, remove) + if err != nil { + return fmt.Errorf("Error detaching database tags %v: %s", remove, err) + } + for _, v := range remove { + _, err := gtClient.Tags().DeleteTag(v) + if err != nil { + return fmt.Errorf("Error deleting database tag %v: %s", v, err) + } + } + } + + if len(add) > 0 { + _, err := gtClient.Tags().AttachTags(resourceCRN, add) + if err != nil { + return fmt.Errorf("Error updating database tags %v : %s", add, err) + } + } + + return nil +} + +func getBaseController(meta interface{}) (string, error) { + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return "", err + } + if userDetails != nil && userDetails.cloudName == "staging" { + return stageBaseController, nil + } + return prodBaseController, nil +} + +func flattenSSLCiphers(ciphers []datatypes.Network_LBaaS_SSLCipher) *schema.Set { + c := make([]string, len(ciphers)) + for i, v := range ciphers { + c[i] = *v.Name + } + return newStringSet(schema.HashString, c) +} + +func resourceTagsCustomizeDiff(diff *schema.ResourceDiff) error { + + if diff.Id() != "" && diff.HasChange("tags") { + o, n := diff.GetChange("tags") + oldSet := o.(*schema.Set) + newSet := n.(*schema.Set) + removeInt := oldSet.Difference(newSet).List() + addInt := newSet.Difference(oldSet).List() + if v := os.Getenv("IC_ENV_TAGS"); v != "" { + s := strings.Split(v, ",") + if len(removeInt) == len(s) && len(addInt) == 0 { + fmt.Println("Suppresing the TAG diff ") + return diff.Clear("tags") + } + } + } + return nil +} + +func resourceVolumeAttachmentValidate(diff *schema.ResourceDiff) error { + + if volsintf, ok := diff.GetOk("volume_attachments"); ok { + vols := volsintf.([]interface{}) + for volAttIdx := range vols { + volumeid := "volume_attachments." + strconv.Itoa(volAttIdx) + "." + isInstanceTemplateVolAttVol + volumePrototype := "volume_attachments." + strconv.Itoa(volAttIdx) + "." + isInstanceTemplateVolAttVolPrototype + var volIdnterpolated = false + var volumeIdFound = false + if _, volumeIdFound = diff.GetOk(volumeid); !volumeIdFound { + if !diff.NewValueKnown(volumeid) { + volIdnterpolated = true + } + } + _, volPrototypeFound := diff.GetOk(volumePrototype) + + if volPrototypeFound && (volumeIdFound || volIdnterpolated) { + return fmt.Errorf("InstanceTemplate - volume_attachments[%d]: Cannot provide both 'volume' and 'volume_prototype' together.", volAttIdx) + } + if !volPrototypeFound && !volumeIdFound && !volIdnterpolated { + return fmt.Errorf("InstanceTemplate - volume_attachments[%d]: Volume details missing. Provide either 'volume' or 'volume_prototype'.", volAttIdx) + } + } + } + + return nil +} + +func flattenRoleData(object []iampolicymanagementv1.Role, roleType string) []map[string]string { + var roles []map[string]string + + for _, item := range object { + role := make(map[string]string) + role["name"] = *item.DisplayName + role["type"] = roleType + role["description"] = *item.Description + roles = append(roles, role) + } + return roles +} + +func flattenCustomRoleData(object []iampolicymanagementv1.CustomRole, roleType string) []map[string]string { + var roles []map[string]string + + for _, item := range object { + role := make(map[string]string) + role["name"] = *item.DisplayName + role["type"] = roleType + role["description"] = *item.Description + roles = append(roles, role) + } + return roles +} + +func flattenActions(object []iampolicymanagementv1.Role) map[string]interface{} { + actions := map[string]interface{}{ + "reader": flattenActionbyDisplayName("Reader", object), + "manager": flattenActionbyDisplayName("Manager", object), + "reader_plus": flattenActionbyDisplayName("ReaderPlus", object), + "writer": flattenActionbyDisplayName("Writer", object), + } + return actions +} + +func flattenActionbyDisplayName(displayName string, object []iampolicymanagementv1.Role) []string { + var actionIDs []string + for _, role := range object { + if *role.DisplayName == displayName { + actionIDs = role.Actions + } + } + return actionIDs +} + +func flattenCatalogRef(object schematics.CatalogInfo) map[string]interface{} { + catalogRef := map[string]interface{}{ + "item_id": object.ItemID, + "item_name": object.ItemName, + "item_url": object.ItemURL, + "offering_version": object.OfferingVersion, + } + return catalogRef +} + +// GetNext ... +func GetNext(next interface{}) string { + if reflect.ValueOf(next).IsNil() { + return "" + } + + u, err := url.Parse(reflect.ValueOf(next).Elem().FieldByName("Href").Elem().String()) + if err != nil { + return "" + } + + q := u.Query() + return q.Get("start") +} + +/* Return the default resource group */ +func defaultResourceGroup(meta interface{}) (string, error) { + rsMangClient, err := meta.(ClientSession).ResourceManagementAPIv2() + if err != nil { + return "", err + } + resourceGroupQuery := managementv2.ResourceGroupQuery{ + Default: true, + } + grpList, err := rsMangClient.ResourceGroup().List(&resourceGroupQuery) + if err != nil { + return "", err + } + if len(grpList) <= 0 { + return "", fmt.Errorf("The default resource group could not be found. Make sure you have required permissions to access the resource group.") + } + return grpList[0].ID, nil +} + +func flattenKeyPolicies(policies []kp.Policy) []map[string]interface{} { + policyMap := make([]map[string]interface{}, 0, 1) + rotationMap := make([]map[string]interface{}, 0, 1) + dualAuthMap := make([]map[string]interface{}, 0, 1) + for _, policy := range policies { + policyCRNData := strings.Split(policy.CRN, ":") + policyInstance := map[string]interface{}{ + "id": policyCRNData[9], + "crn": policy.CRN, + "created_by": policy.CreatedBy, + "creation_date": (*(policy.CreatedAt)).String(), + "updated_by": policy.UpdatedBy, + "last_update_date": (*(policy.UpdatedAt)).String(), + } + + if policy.Rotation != nil { + policyInstance["interval_month"] = policy.Rotation.Interval + rotationMap = append(rotationMap, policyInstance) + } else if policy.DualAuth != nil { + policyInstance["enabled"] = *(policy.DualAuth.Enabled) + dualAuthMap = append(dualAuthMap, policyInstance) + } + } + tempMap := map[string]interface{}{ + "rotation": rotationMap, + "dual_auth_delete": dualAuthMap, + } + policyMap = append(policyMap, tempMap) + return policyMap +} + +// IgnoreSystemLabels returns non-IBM tag keys. +func IgnoreSystemLabels(labels map[string]string) map[string]string { + result := make(map[string]string) + + for k, v := range labels { + if strings.HasPrefix(k, SystemIBMLabelPrefix) || + strings.HasPrefix(k, KubernetesLabelPrefix) || + strings.HasPrefix(k, K8sLabelPrefix) { + continue + } + + result[k] = v + } + + return result +} + +// flattenHostLabels .. +func flattenHostLabels(hostLabels []interface{}) map[string]string { + labels := make(map[string]string) + for _, v := range hostLabels { + parts := strings.Split(v.(string), ":") + if parts != nil { + labels[parts[0]] = parts[1] + } + } + + return labels +} + +func flatterSatelliteZones(zones *schema.Set) []string { + zoneList := make([]string, zones.Len()) + for i, v := range zones.List() { + zoneList[i] = fmt.Sprint(v) + } + + return zoneList +} + +// error object +type ServiceErrorResponse struct { + Message string + StatusCode int + Result interface{} +} + +func beautifyError(err error, response *core.DetailedResponse) *ServiceErrorResponse { + var ( + statusCode int + result interface{} + ) + if response != nil { + statusCode = response.StatusCode + result = response.Result + } + return &ServiceErrorResponse{ + Message: err.Error(), + StatusCode: statusCode, + Result: result, + } +} + +func (response *ServiceErrorResponse) String() string { + output, err := json.MarshalIndent(response, "", " ") + if err == nil { + return fmt.Sprintf("%+v\n", string(output)) + } + return fmt.Sprintf("Error : %#v", response) +} + +// IAM Policy Management +func getResourceAttribute(name string, r iampolicymanagementv1.PolicyResource) *string { + for _, a := range r.Attributes { + if *a.Name == name { + return a.Value + } + } + return core.StringPtr("") +} + +func getSubjectAttribute(name string, s iampolicymanagementv1.PolicySubject) *string { + for _, a := range s.Attributes { + if *a.Name == name { + return a.Value + } + } + return core.StringPtr("") +} + +func setResourceAttribute(name *string, value *string, r []iampolicymanagementv1.ResourceAttribute) []iampolicymanagementv1.ResourceAttribute { + for _, a := range r { + if *a.Name == *name { + a.Value = value + return r + } + } + r = append(r, iampolicymanagementv1.ResourceAttribute{ + Name: name, + Value: value, + Operator: core.StringPtr("stringEquals"), + }) + return r +} + +func getRolesFromRoleNames(roleNames []string, roles []iampolicymanagementv1.PolicyRole) ([]iampolicymanagementv1.PolicyRole, error) { + + filteredRoles := []iampolicymanagementv1.PolicyRole{} + for _, roleName := range roleNames { + role, err := findRoleByName(roles, roleName) + if err != nil { + return []iampolicymanagementv1.PolicyRole{}, err + } + role.DisplayName = nil + filteredRoles = append(filteredRoles, role) + } + return filteredRoles, nil +} + +func mapRoleListToPolicyRoles(roleList iampolicymanagementv1.RoleList) []iampolicymanagementv1.PolicyRole { + var policyRoles []iampolicymanagementv1.PolicyRole + for _, customRole := range roleList.CustomRoles { + newPolicyRole := iampolicymanagementv1.PolicyRole{ + DisplayName: customRole.DisplayName, + RoleID: customRole.CRN, + } + policyRoles = append(policyRoles, newPolicyRole) + } + for _, serviceRole := range roleList.ServiceRoles { + newPolicyRole := iampolicymanagementv1.PolicyRole{ + DisplayName: serviceRole.DisplayName, + RoleID: serviceRole.CRN, + } + policyRoles = append(policyRoles, newPolicyRole) + } + for _, systemRole := range roleList.SystemRoles { + newPolicyRole := iampolicymanagementv1.PolicyRole{ + DisplayName: systemRole.DisplayName, + RoleID: systemRole.CRN, + } + policyRoles = append(policyRoles, newPolicyRole) + } + return policyRoles +} + +func generatePolicyOptions(d *schema.ResourceData, meta interface{}) (iampolicymanagementv1.CreatePolicyOptions, error) { + + var serviceName string + var resourceType string + resourceAttributes := []iampolicymanagementv1.ResourceAttribute{} + + if res, ok := d.GetOk("resources"); ok { + resources := res.([]interface{}) + for _, resource := range resources { + r, _ := resource.(map[string]interface{}) + + if r, ok := r["service"]; ok && r != nil { + serviceName = r.(string) + if r.(string) != "" { + resourceAttr := iampolicymanagementv1.ResourceAttribute{ + Name: core.StringPtr("serviceName"), + Value: core.StringPtr(r.(string)), + Operator: core.StringPtr("stringEquals"), + } + resourceAttributes = append(resourceAttributes, resourceAttr) + } + } + + if r, ok := r["resource_instance_id"]; ok { + if r.(string) != "" { + resourceAttr := iampolicymanagementv1.ResourceAttribute{ + Name: core.StringPtr("serviceInstance"), + Value: core.StringPtr(r.(string)), + Operator: core.StringPtr("stringEquals"), + } + resourceAttributes = append(resourceAttributes, resourceAttr) + } + } + + if r, ok := r["region"]; ok { + if r.(string) != "" { + resourceAttr := iampolicymanagementv1.ResourceAttribute{ + Name: core.StringPtr("region"), + Value: core.StringPtr(r.(string)), + Operator: core.StringPtr("stringEquals"), + } + resourceAttributes = append(resourceAttributes, resourceAttr) + } + } + + if r, ok := r["resource_type"]; ok { + if r.(string) != "" { + resourceAttr := iampolicymanagementv1.ResourceAttribute{ + Name: core.StringPtr("resourceType"), + Value: core.StringPtr(r.(string)), + Operator: core.StringPtr("stringEquals"), + } + resourceAttributes = append(resourceAttributes, resourceAttr) + } + } + + if r, ok := r["resource"]; ok { + if r.(string) != "" { + resourceAttr := iampolicymanagementv1.ResourceAttribute{ + Name: core.StringPtr("resource"), + Value: core.StringPtr(r.(string)), + Operator: core.StringPtr("stringEquals"), + } + resourceAttributes = append(resourceAttributes, resourceAttr) + } + } + + if r, ok := r["resource_group_id"]; ok { + if r.(string) != "" { + resourceAttr := iampolicymanagementv1.ResourceAttribute{ + Name: core.StringPtr("resourceGroupId"), + Value: core.StringPtr(r.(string)), + Operator: core.StringPtr("stringEquals"), + } + resourceAttributes = append(resourceAttributes, resourceAttr) + } + } + + if r, ok := r["attributes"]; ok { + for k, v := range r.(map[string]interface{}) { + resourceAttributes = setResourceAttribute(core.StringPtr(k), core.StringPtr(v.(string)), resourceAttributes) + } + } + } + } + if r, ok := d.GetOk("resource_attributes"); ok { + for _, attribute := range r.(*schema.Set).List() { + a := attribute.(map[string]interface{}) + name := a["name"].(string) + value := a["value"].(string) + operator := a["operator"].(string) + at := iampolicymanagementv1.ResourceAttribute{ + Name: &name, + Value: &value, + Operator: &operator, + } + resourceAttributes = append(resourceAttributes, at) + } + } + + var serviceTypeResourceAttribute iampolicymanagementv1.ResourceAttribute + + if d.Get("account_management").(bool) { + serviceTypeResourceAttribute = iampolicymanagementv1.ResourceAttribute{ + Name: core.StringPtr("serviceType"), + Value: core.StringPtr("platform_service"), + Operator: core.StringPtr("stringEquals"), + } + resourceAttributes = append(resourceAttributes, serviceTypeResourceAttribute) + } + + if len(resourceAttributes) == 0 { + serviceTypeResourceAttribute = iampolicymanagementv1.ResourceAttribute{ + Name: core.StringPtr("serviceType"), + Value: core.StringPtr("service"), + Operator: core.StringPtr("stringEquals"), + } + resourceAttributes = append(resourceAttributes, serviceTypeResourceAttribute) + } + + policyResources := iampolicymanagementv1.PolicyResource{ + Attributes: resourceAttributes, + } + + userDetails, err := meta.(ClientSession).BluemixUserDetails() + if err != nil { + return iampolicymanagementv1.CreatePolicyOptions{}, err + } + + iamPolicyManagementClient, err := meta.(ClientSession).IAMPolicyManagementV1API() + + if err != nil { + return iampolicymanagementv1.CreatePolicyOptions{}, err + } + + serviceToQuery := serviceName + + if serviceName == "" && // no specific service specified + !d.Get("account_management").(bool) && // not all account management services + resourceType != "resource-group" { // not to a resource group + serviceToQuery = "alliamserviceroles" + } + + listRoleOptions := &iampolicymanagementv1.ListRolesOptions{ + AccountID: &userDetails.userAccount, + ServiceName: &serviceToQuery, + } + + roleList, _, err := iamPolicyManagementClient.ListRoles(listRoleOptions) + if err != nil { + return iampolicymanagementv1.CreatePolicyOptions{}, err + } + + roles := mapRoleListToPolicyRoles(*roleList) + policyRoles, err := getRolesFromRoleNames(expandStringList(d.Get("roles").([]interface{})), roles) + if err != nil { + return iampolicymanagementv1.CreatePolicyOptions{}, err + } + + return iampolicymanagementv1.CreatePolicyOptions{Roles: policyRoles, Resources: []iampolicymanagementv1.PolicyResource{policyResources}}, nil +} + +func getIBMUniqueId(accountID, userEmail string, meta interface{}) (string, error) { + userManagement, err := meta.(ClientSession).UserManagementAPI() + if err != nil { + return "", err + } + client := userManagement.UserInvite() + res, err := client.ListUsers(accountID) + if err != nil { + return "", err + } + for _, userInfo := range res { + //handling case-sensitivity in userEmail + if strings.ToLower(userInfo.Email) == strings.ToLower(userEmail) { + return userInfo.IamID, nil + } + } + return "", fmt.Errorf("User %s is not found under account %s", userEmail, accountID) +} + +func flattenWorkerPoolHostLabels(hostLabels map[string]string) *schema.Set { + mapped := make([]string, len(hostLabels)) + idx := 0 + for k, v := range hostLabels { + mapped[idx] = fmt.Sprintf("%s:%v", k, v) + idx++ + } + + return newStringSet(schema.HashString, mapped) +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/utils.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/utils.go new file mode 100644 index 00000000000..68647cb54fb --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/utils.go @@ -0,0 +1,36 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" +) + +// Used for retry logic on resource timeout. +func isResourceTimeoutError(err error) bool { + timeoutErr, ok := err.(*resource.TimeoutError) + return ok && timeoutErr.LastError == nil +} +func GetPrivateServiceURLForRegion(region string) (string, error) { + var endpoints = map[string]string{ + "us-south": "https://private.us.icr.io", // us-south + "uk-south": "https://private.uk.icr.io", // uk-south + "eu-gb": "https://private.uk.icr.io", // eu-gb + "eu-central": "https://private.de.icr.io", // eu-central + "eu-de": "https://private.de.icr.io", // eu-de + "ap-north": "https://private.jp.icr.io", // ap-north + "jp-tok": "https://private.jp.icr.io", // jp-tok + "ap-south": "https://private.au.icr.io", // ap-south + "au-syd": "https://private.au.icr.io", // au-syd + "global": "https://private.icr.io", // global + "jp-osa": "https://private.jp2.icr.io", // jp-osa + } + + if url, ok := endpoints[region]; ok { + return url, nil + } + return "", fmt.Errorf("service URL for region '%s' not found", region) +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/validators.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/validators.go new file mode 100644 index 00000000000..96195524bcf --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/validators.go @@ -0,0 +1,1375 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package ibm + +import ( + "encoding/json" + "errors" + "fmt" + "net" + "reflect" + "regexp" + "strconv" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + homedir "github.com/mitchellh/go-homedir" + gouuid "github.com/satori/go.uuid" + + "github.com/IBM-Cloud/bluemix-go/helpers" +) + +var ( + validHRef *regexp.Regexp +) + +func init() { + validHRef = regexp.MustCompile(`^http(s)?:\/\/([^\/?#]*)([^?#]*)(\?([^#]*))?(#(.*))?$`) +} + +func validateSecondaryIPCount(v interface{}, k string) (ws []string, errors []error) { + value := v.(int) + if value != 4 && value != 8 { + errors = append(errors, fmt.Errorf( + "%q must be either 4 or 8", k)) + } + return +} + +func validateServiceTags(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if len(value) > 2048 { + errors = append(errors, fmt.Errorf( + "%q must contain tags whose maximum length is 2048 characters", k)) + } + return +} + +func validateAllowedStringValue(validValues []string) schema.SchemaValidateFunc { + return func(v interface{}, k string) (ws []string, errors []error) { + input := v.(string) + existed := false + for _, s := range validValues { + if s == input { + existed = true + break + } + } + if !existed { + errors = append(errors, fmt.Errorf( + "%q must contain a value from %#v, got %q", + k, validValues, input)) + } + return + + } +} + +func validateRegexpLen(min, max int, regex string) schema.SchemaValidateFunc { + return func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + + acceptedcharacters, _ := regexp.MatchString(regex, value) + + if acceptedcharacters { + if (len(value) < min) || (len(value) > max) && (min > 0 && max > 0) { + errors = append(errors, fmt.Errorf( + "%q (%q) must contain from %d to %d characters ", k, value, min, max)) + } + } else { + errors = append(errors, fmt.Errorf( + "%q (%q) should match regexp %s ", k, v, regex)) + } + + return + + } +} + +func validateAllowedIntValue(is []int) schema.SchemaValidateFunc { + return func(v interface{}, k string) (ws []string, errors []error) { + value := v.(int) + existed := false + for _, i := range is { + if i == value { + existed = true + break + } + } + if !existed { + errors = append(errors, fmt.Errorf( + "%q must contain a valid int value should in array %#v, got %q", + k, is, value)) + } + return + + } +} + +func validateAllowedEnterpriseNameValue() schema.SchemaValidateFunc { + return func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + + if len(value) < 3 || len(value) > 60 { + errors = append(errors, fmt.Errorf( + "%q must contain a valid string value with length between 3 and 60", value)) + } + return + + } +} +func validateRoutePath(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + //Somehow API allows this + if value == "" { + return + } + + if (len(value) < 2) || (len(value) > 128) { + errors = append(errors, fmt.Errorf( + "%q (%q) must contain from 2 to 128 characters ", k, value)) + } + if !(strings.HasPrefix(value, "/")) { + errors = append(errors, fmt.Errorf( + "%q (%q) must start with a forward slash '/'", k, value)) + + } + if strings.Contains(value, "?") { + errors = append(errors, fmt.Errorf( + "%q (%q) must not contain a '?'", k, value)) + } + + return +} + +func validateRoutePort(v interface{}, k string) (ws []string, errors []error) { + return validatePortRange(1024, 65535)(v, k) +} + +func validateAppPort(v interface{}, k string) (ws []string, errors []error) { + return validatePortRange(1024, 65535)(v, k) +} +func validateLBListenerPolicyPriority(v interface{}, k string) (ws []string, errors []error) { + interval := v.(int) + if interval < 1 || interval > 10 { + errors = append(errors, fmt.Errorf( + "%q must be between 1 and 10", + k)) + } + return +} + +func validateStringLength(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + + if (len(value) < 1) || (len(value) > 128) { + errors = append(errors, fmt.Errorf( + "%q (%q) must contain from 1 to 128 characters ", k, value)) + } + return +} + +func validatePortRange(start, end int) func(v interface{}, k string) (ws []string, errors []error) { + f := func(v interface{}, k string) (ws []string, errors []error) { + value := v.(int) + if (value < start) || (value > end) { + errors = append(errors, fmt.Errorf( + "%q (%d) must be in the range of %d to %d", k, value, start, end)) + } + return + } + return f +} + +func validateDomainName(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + + if !(strings.Contains(value, ".")) { + errors = append(errors, fmt.Errorf( + "%q (%q) must contain a '.',example.com,foo.example.com", k, value)) + } + + return +} + +func validateAppInstance(v interface{}, k string) (ws []string, errors []error) { + instances := v.(int) + if instances < 0 { + errors = append(errors, fmt.Errorf( + "%q (%q) must be greater than 0", k, instances)) + } + return + +} + +func validateWorkerNum(v interface{}, k string) (ws []string, errors []error) { + workerNum := v.(int) + if workerNum <= 0 { + errors = append(errors, fmt.Errorf( + "%q must be greater than 0", k)) + } + return + +} + +func validateAppZipPath(v interface{}, k string) (ws []string, errors []error) { + path := v.(string) + applicationZip, err := homedir.Expand(path) + if err != nil { + errors = append(errors, fmt.Errorf( + "%q (%q) home directory in the given path couldn't be expanded", k, path)) + } + if !helpers.FileExists(applicationZip) { + errors = append(errors, fmt.Errorf( + "%q (%q) doesn't exist", k, path)) + } + + return + +} + +func validateNotes(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if len(value) > 1000 { + errors = append(errors, fmt.Errorf( + "%q should not exceed 1000 characters", k)) + } + return +} + +func validatePublicBandwidth(v interface{}, k string) (ws []string, errors []error) { + bandwidth := v.(int) + if bandwidth < 0 { + errors = append(errors, fmt.Errorf( + "%q (%q) must be greater than 0", k, bandwidth)) + return + } + validBandwidths := []int{250, 1000, 5000, 10000, 20000} + for _, b := range validBandwidths { + if b == bandwidth { + return + } + } + errors = append(errors, fmt.Errorf( + "%q (%d) must be one of the value from %d", k, bandwidth, validBandwidths)) + return + +} + +func validateMaxConn(v interface{}, k string) (ws []string, errors []error) { + maxConn := v.(int) + if maxConn < 1 || maxConn > 64000 { + errors = append(errors, fmt.Errorf( + "%q must be between 1 and 64000", + k)) + return + } + return +} + +func validateKeyLifeTime(v interface{}, k string) (ws []string, errors []error) { + secs := v.(int) + if secs < 1800 || secs > 86400 { + errors = append(errors, fmt.Errorf( + "%q must be between 1800 and 86400", + k)) + return + } + return +} + +func validateWeight(v interface{}, k string) (ws []string, errors []error) { + weight := v.(int) + if weight < 0 || weight > 100 { + errors = append(errors, fmt.Errorf( + "%q must be between 1 and 100", + k)) + } + return +} + +func validateSizePerZone(v interface{}, k string) (ws []string, errors []error) { + sizePerZone := v.(int) + if sizePerZone <= 0 { + errors = append(errors, fmt.Errorf( + "%q must be greater than 0", + k)) + } + return +} + +func validateInterval(v interface{}, k string) (ws []string, errors []error) { + interval := v.(int) + if interval < 2 || interval > 60 { + errors = append(errors, fmt.Errorf( + "%q must be between 2 and 60", + k)) + } + return +} + +func validateMaxRetries(v interface{}, k string) (ws []string, errors []error) { + maxRetries := v.(int) + if maxRetries < 1 || maxRetries > 10 { + errors = append(errors, fmt.Errorf( + "%q must be between 1 and 10", + k)) + } + return +} + +func validateTimeout(v interface{}, k string) (ws []string, errors []error) { + timeout := v.(int) + if timeout < 1 || timeout > 59 { + errors = append(errors, fmt.Errorf( + "%q must be between 1 and 59", + k)) + } + return +} + +func validateURLPath(v interface{}, k string) (ws []string, errors []error) { + urlPath := v.(string) + if len(urlPath) > 250 || !strings.HasPrefix(urlPath, "/") { + errors = append(errors, fmt.Errorf( + "%q should start with ‘/‘ and has a max length of 250 characters.", + k)) + } + return +} + +func validateSecurityRuleDirection(v interface{}, k string) (ws []string, errors []error) { + validDirections := map[string]bool{ + "ingress": true, + "egress": true, + } + + value := v.(string) + _, found := validDirections[value] + if !found { + strarray := make([]string, 0, len(validDirections)) + for key := range validDirections { + strarray = append(strarray, key) + } + errors = append(errors, fmt.Errorf( + "%q contains an invalid security group rule direction %q. Valid types are %q.", + k, value, strings.Join(strarray, ","))) + } + return +} + +func validateSecurityRuleEtherType(v interface{}, k string) (ws []string, errors []error) { + validEtherTypes := map[string]bool{ + "IPv4": true, + "IPv6": true, + } + + value := v.(string) + _, found := validEtherTypes[value] + if !found { + strarray := make([]string, 0, len(validEtherTypes)) + for key := range validEtherTypes { + strarray = append(strarray, key) + } + errors = append(errors, fmt.Errorf( + "%q contains an invalid security group rule ethernet type %q. Valid types are %q.", + k, value, strings.Join(strarray, ","))) + } + return +} + +//validateIP... +func validateIP(v interface{}, k string) (ws []string, errors []error) { + address := v.(string) + if net.ParseIP(address) == nil { + errors = append(errors, fmt.Errorf( + "%q must be a valid ip address", + k)) + } + return +} + +//validateCIDR... +func validateCIDR(v interface{}, k string) (ws []string, errors []error) { + address := v.(string) + _, _, err := net.ParseCIDR(address) + if err != nil { + errors = append(errors, fmt.Errorf( + "%q must be a valid cidr address", + k)) + } + return +} + +//validateCIDRAddress... +func validateCIDRAddress() schema.SchemaValidateFunc { + return func(v interface{}, k string) (ws []string, errors []error) { + address := v.(string) + _, _, err := net.ParseCIDR(address) + if err != nil { + errors = append(errors, fmt.Errorf( + "%q must be a valid cidr address", + k)) + } + return + } +} + +//validateOverlappingAddress... +func validateOverlappingAddress() schema.SchemaValidateFunc { + return func(v interface{}, k string) (ws []string, errors []error) { + nonOverlappingCIDR := map[string]bool{ + "127.0.0.0/8": true, + "161.26.0.0/16": true, + "166.8.0.0/14": true, + "169.254.0.0/16": true, + "224.0.0.0/4": true, + } + + address := v.(string) + _, found := nonOverlappingCIDR[address] + if found { + errors = append(errors, fmt.Errorf( + "%q the request is overlapping with reserved address ranges", + k)) + } + return + } +} + +//validateRemoteIP... +func validateRemoteIP(v interface{}, k string) (ws []string, errors []error) { + _, err1 := validateCIDR(v, k) + _, err2 := validateIP(v, k) + + if len(err1) != 0 && len(err2) != 0 { + errors = append(errors, fmt.Errorf( + "%q must be a valid remote ip address (cidr or ip)", + k)) + } + return +} + +//validateIPorCIDR... +func validateIPorCIDR() schema.SchemaValidateFunc { + return func(v interface{}, k string) (ws []string, errors []error) { + _, err1 := validateCIDR(v, k) + _, err2 := validateIP(v, k) + + if len(err1) != 0 && len(err2) != 0 { + errors = append(errors, fmt.Errorf( + "%q must be a valid remote ip address (cidr or ip)", + k)) + } + return + } +} + +func validateSecurityRuleProtocol(v interface{}, k string) (ws []string, errors []error) { + validProtocols := map[string]bool{ + "icmp": true, + "tcp": true, + "udp": true, + } + + value := v.(string) + _, found := validProtocols[value] + if !found { + strarray := make([]string, 0, len(validProtocols)) + for key := range validProtocols { + strarray = append(strarray, key) + } + errors = append(errors, fmt.Errorf( + "%q contains an invalid security group rule ethernet type %q. Valid types are %q.", + k, value, strings.Join(strarray, ","))) + } + return +} + +func validateNamespace(ns string) error { + os := strings.Split(ns, "_") + if len(os) < 2 || (len(os) == 2 && (len(os[0]) == 0 || len(os[1]) == 0)) { + return fmt.Errorf( + "Namespace is (%s), it must be of the form _, provider can't find the auth key if you use _ as well", ns) + } + return nil +} + +//func validateJSONString(v interface{}, k string) (ws []string, errors []error) { +// if _, err := normalizeJSONString(v); err != nil { +// errors = append(errors, fmt.Errorf("%q contains an invalid JSON: %s", k, err)) +// } +// if err := validateKeyValue(v); err != nil { +// errors = append(errors, fmt.Errorf("%q contains an invalid JSON: %s", k, err)) +// } +// return +//} + +func validateJSONString() schema.SchemaValidateFunc { + return func(v interface{}, k string) (ws []string, errors []error) { + if _, err := normalizeJSONString(v); err != nil { + errors = append(errors, fmt.Errorf("%q contains an invalid JSON: %s", k, err)) + } + if err := validateKeyValue(v); err != nil { + errors = append(errors, fmt.Errorf("%q contains an invalid JSON: %s", k, err)) + } + return + } +} + +func validateRegexp(regex string) schema.SchemaValidateFunc { + return func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + + acceptedcharacters, _ := regexp.MatchString(regex, value) + + if !acceptedcharacters { + errors = append(errors, fmt.Errorf( + "%q (%q) should match regexp %s ", k, v, regex)) + } + + return + + } +} + +// NoZeroValues is a SchemaValidateFunc which tests if the provided value is +// not a zero value. It's useful in situations where you want to catch +// explicit zero values on things like required fields during validation. +func validateNoZeroValues() schema.SchemaValidateFunc { + return func(i interface{}, k string) (ws []string, errors []error) { + + if reflect.ValueOf(i).Interface() == reflect.Zero(reflect.TypeOf(i)).Interface() { + switch reflect.TypeOf(i).Kind() { + case reflect.String: + errors = append(errors, fmt.Errorf("%s value must not be empty.", k)) + case reflect.Int, reflect.Float64: + errors = append(errors, fmt.Errorf("%s value must not be zero.", k)) + default: + // this validator should only ever be applied to TypeString, TypeInt and TypeFloat + errors = append(errors, fmt.Errorf("can't use NoZeroValues with %T attribute %s", k, i)) + } + } + return + } +} + +func validateBindedPackageName() schema.SchemaValidateFunc { + return func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + + if !(strings.HasPrefix(value, "/")) { + errors = append(errors, fmt.Errorf( + "%q (%q) must start with a forward slash '/'.The package name should be '/whisk.system/cloudant', '/test@in.ibm.com_new/utils' or '/_/utils'", k, value)) + + } + + index := strings.LastIndex(value, "/") + + if index < 2 || index == len(value)-1 { + errors = append(errors, fmt.Errorf( + "%q (%q) is not a valid bind package name.The package name should be '/whisk.system/cloudant','/test@in.ibm.com_new/utils' or '/_/utils'", k, value)) + + } + + return + } +} + +func validateKeyValue(jsonString interface{}) error { + var j [](map[string]interface{}) + if jsonString == nil || jsonString.(string) == "" { + return nil + } + s := jsonString.(string) + err := json.Unmarshal([]byte(s), &j) + if err != nil { + return err + } + for _, v := range j { + _, exists := v["key"] + if !exists { + return errors.New("'key' is missing from json") + } + _, exists = v["value"] + if !exists { + return errors.New("'value' is missing from json") + } + } + return nil +} + +func validateActionName(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + + if strings.HasPrefix(value, "/") { + errors = append(errors, fmt.Errorf( + "%q (%q) must not start with a forward slash '/'.The action name should be like 'myaction' or utils/cloudant'", k, value)) + + } + + const alphaNumeric = "abcdefghijklmnopqrstuvwxyz0123456789/_@.-" + + for _, char := range value { + if !strings.Contains(alphaNumeric, strings.ToLower(string(char))) { + errors = append(errors, fmt.Errorf( + "%q (%q) The name of the package contains illegal characters", k, value)) + } + } + + return +} + +func validateActionKind(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + kindList := []string{"php:7.3", "nodejs:8", "swift:3", "nodejs", "blackbox", "java", "sequence", "nodejs:10", "python:3", "python", "python:2", "swift", "swift:4.2"} + if !stringInSlice(value, kindList) { + errors = append(errors, fmt.Errorf( + "%q (%q) Invalid kind is provided.Supported list of kinds of actions are (%q)", k, value, kindList)) + } + return +} + +func stringInSlice(str string, list []string) bool { + for _, v := range list { + if v == str { + return true + } + } + return false +} + +func validateFunctionName(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + + var validName = regexp.MustCompile(`\A([\w]|[\w][\w@ .-]*[\w@.-]+)\z`) + if !validName.MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q (%q) The name contains illegal characters", k, value)) + + } + return +} + +func validateStorageType(v interface{}, k string) (ws []string, errors []error) { + validEtherTypes := map[string]bool{ + "Endurance": true, + "Performance": true, + } + + value := v.(string) + _, found := validEtherTypes[value] + if !found { + strarray := make([]string, 0, len(validEtherTypes)) + for key := range validEtherTypes { + strarray = append(strarray, key) + } + errors = append(errors, fmt.Errorf( + "%q contains an invalid storage type %q. Valid types are %q.", + k, value, strings.Join(strarray, ","))) + } + return +} + +func validateRole(v interface{}, k string) (ws []string, errors []error) { + validRolesTypes := map[string]bool{ + "Writer": true, + "Reader": true, + "Manager": true, + "Administrator": true, + "Operator": true, + "Viewer": true, + "Editor": true, + } + + value := v.(string) + _, found := validRolesTypes[value] + if !found { + strarray := make([]string, 0, len(validRolesTypes)) + for key := range validRolesTypes { + strarray = append(strarray, key) + } + errors = append(errors, fmt.Errorf( + "%q contains an invalid role %q. Valid roles are %q.", + k, value, strings.Join(strarray, ","))) + } + return +} + +func validateDayOfWeek(v interface{}, k string) (ws []string, errors []error) { + validDayTypes := map[string]bool{ + "SUNDAY": true, + "MONDAY": true, + "TUESDAY": true, + "WEDNESDAY": true, + "THURSDAY": true, + "FRIDAY": true, + "SATURDAY": true, + } + + value := v.(string) + _, found := validDayTypes[value] + if !found { + strarray := make([]string, 0, len(validDayTypes)) + for key := range validDayTypes { + strarray = append(strarray, key) + } + errors = append(errors, fmt.Errorf( + "%q contains an invalid day %q. Valid days are %q.", + k, value, strings.Join(strarray, ","))) + } + return +} + +func validateScheduleType(v interface{}, k string) (ws []string, errors []error) { + validSchdTypes := map[string]bool{ + "HOURLY": true, + "DAILY": true, + "WEEKLY": true, + } + + value := v.(string) + _, found := validSchdTypes[value] + if !found { + strarray := make([]string, 0, len(validSchdTypes)) + for key := range validSchdTypes { + strarray = append(strarray, key) + } + errors = append(errors, fmt.Errorf( + "%q contains an invalid schedule type %q. Valid schedules are %q.", + k, value, strings.Join(strarray, ","))) + } + return +} + +func validateHour(start, end int) func(v interface{}, k string) (ws []string, errors []error) { + f := func(v interface{}, k string) (ws []string, errors []error) { + value := v.(int) + if (value < start) || (value > end) { + errors = append(errors, fmt.Errorf( + "%q (%d) must be in the range of %d to %d", k, value, start, end)) + } + return + } + return f +} + +func validateMinute(start, end int) func(v interface{}, k string) (ws []string, errors []error) { + f := func(v interface{}, k string) (ws []string, errors []error) { + value := v.(int) + if (value < start) || (value > end) { + errors = append(errors, fmt.Errorf( + "%q (%d) must be in the range of %d to %d", k, value, start, end)) + } + return + } + return f +} + +func validateDatacenterOption(v []interface{}, allowedValues []string) error { + for _, option := range v { + if option == nil { + return fmt.Errorf("Provide a valid `datacenter_choice`") + } + values := option.(map[string]interface{}) + for k := range values { + if !stringInSlice(k, allowedValues) { + return fmt.Errorf( + "%q Invalid values are provided in `datacenter_choice`. Supported list of keys are (%q)", k, allowedValues) + } + + } + } + return nil +} + +func validateLBTimeout(v interface{}, k string) (ws []string, errors []error) { + timeout := v.(int) + if timeout <= 0 || timeout > 3600 { + errors = append(errors, fmt.Errorf( + "%q must be between 1 and 3600", + k)) + } + return +} + +// validateRecordType ensures that the dns record type is valid +func validateRecordType(t string, proxied bool) error { + switch t { + case "A", "AAAA", "CNAME": + return nil + case "TXT", "SRV", "LOC", "MX", "NS", "SPF", "CAA", "CERT", "DNSKEY", "DS", "NAPTR", "SMIMEA", "SSHFP", "TLSA", "URI": + if !proxied { + return nil + } + default: + return fmt.Errorf( + `Invalid type %q. Valid types are "A", "AAAA", "CNAME", "TXT", "SRV", "LOC", "MX", "NS", "SPF", "CAA", "CERT", "DNSKEY", "DS", "NAPTR", "SMIMEA", "SSHFP", "TLSA" or "URI".`, t) + } + + return fmt.Errorf("Type %q cannot be proxied", t) +} + +// validateRecordName ensures that based on supplied record type, the name content matches +// Currently only validates A and AAAA types +func validateRecordName(t string, value string) error { + switch t { + case "A": + // Must be ipv4 addr + addr := net.ParseIP(value) + if addr == nil || !strings.Contains(value, ".") { + return fmt.Errorf("A record must be a valid IPv4 address, got: %q", value) + } + case "AAAA": + // Must be ipv6 addr + addr := net.ParseIP(value) + if addr == nil || !strings.Contains(value, ":") { + return fmt.Errorf("AAAA record must be a valid IPv6 address, got: %q", value) + } + case "TXT": + // Must be printable ASCII + for i := 0; i < len(value); i++ { + char := value[i] + if (char < 0x20) || (0x7F < char) { + return fmt.Errorf("TXT record must contain printable ASCII, found: %q", char) + } + } + } + + return nil +} + +func validateVLANName(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if len(value) > 20 { + errors = append(errors, fmt.Errorf( + "Length provided for '%q' is too long. Maximum length is 20 characters", k)) + } + return +} + +func validateAuthProtocol(v interface{}, k string) (ws []string, errors []error) { + authProtocol := v.(string) + if authProtocol != "MD5" && authProtocol != "SHA1" && authProtocol != "SHA256" { + errors = append(errors, fmt.Errorf( + "%q auth protocol can be MD5 or SHA1 or SHA256", k)) + } + return +} + +//ValidateIPVersion +func validateIPVersion(v interface{}, k string) (ws []string, errors []error) { + validVersions := map[string]bool{ + "ipv4": true, + "ipv6": true, + } + + value := v.(string) + _, found := validVersions[value] + if !found { + strarray := make([]string, 0, len(validVersions)) + for key := range validVersions { + strarray = append(strarray, key) + } + errors = append(errors, fmt.Errorf( + "%q contains an invalid ip version type %q. Valid types are %q.", + k, value, strings.Join(strarray, ","))) + } + return +} + +func validateVPCIdentity(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + // We do not currently accept CRN or HRef + validators := []func(string) bool{isSecurityGroupAddress, isSecurityGroupCIDR, + isVPCIdentityByID} + + for _, validator := range validators { + if validator(value) { + return + } + } + errors = append(errors, fmt.Errorf("%q (%s) invalid vpc identity", k, value)) + return +} + +func validateResourceGroupId(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + _, err := gouuid.FromString(value) + if err != nil { + errors = append(errors, fmt.Errorf("%q contains an invalid resource group id, %q.", k, value)) + } + return +} + +func validateSecurityGroupId(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + _, err := gouuid.FromString(value) + if err != nil { + errors = append(errors, fmt.Errorf("%q contains an invalid security group id, %q.", k, value)) + } + return +} + +func isSecurityGroupAddress(s string) bool { + return net.ParseIP(s) != nil +} + +func isSecurityGroupCIDR(s string) bool { + _, _, err := net.ParseCIDR(s) + return err == nil +} + +func isSecurityGroupIdentityByID(s string) bool { + _, err := gouuid.FromString(s) + return err == nil +} + +func isSecurityGroupIdentityByCRN(s string) bool { + segments := strings.Split(s, ":") + return len(segments) == 10 && segments[0] == "crn" +} + +func isSecurityGroupIdentityByHRef(s string) bool { + return validHRef.MatchString(s) +} + +func isVPCIdentityByID(s string) bool { + _, err := gouuid.FromString(s) + return err == nil +} + +func validateSecurityGroupRemote(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + validators := []func(string) bool{isSecurityGroupAddress, isSecurityGroupCIDR, + isSecurityGroupIdentityByID /*, isSecurityGroupIdentityByCRN, isSecurityGroupIdentityByHRef*/} + + for _, validator := range validators { + if validator(value) { + return + } + } + errors = append(errors, fmt.Errorf("%q (%s) invalid security group remote", k, value)) + return +} + +func validateGeneration(v interface{}, k string) (ws []string, errors []error) { + validVersions := map[string]bool{ + "gc": true, + "gt": true, + } + + value := v.(string) + _, found := validVersions[value] + if !found { + strarray := make([]string, 0, len(validVersions)) + for key := range validVersions { + strarray = append(strarray, key) + } + errors = append(errors, fmt.Errorf( + "%q contains an invalid generation type %q. Valid types are %q.", + k, value, strings.Join(strarray, ","))) + } + return +} + +func validateEncyptionProtocol(v interface{}, k string) (ws []string, errors []error) { + encyptionProtocol := v.(string) + if encyptionProtocol != "DES" && encyptionProtocol != "3DES" && encyptionProtocol != "AES128" && encyptionProtocol != "AES192" && encyptionProtocol != "AES256" { + errors = append(errors, fmt.Errorf( + "%q encryption protocol can be DES or 3DES or AES128 or AES192 or AES256", k)) + } + return +} + +func validateDeadPeerDetectionInterval(v interface{}, k string) (ws []string, errors []error) { + secs := v.(int) + if secs < 15 || secs > 86399 { + errors = append(errors, fmt.Errorf( + "%q must be between 15 and 86399", + k)) + return + } + return +} + +func validateDiffieHellmanGroup(v interface{}, k string) (ws []string, errors []error) { + diffieHellmanGroup := v.(int) + if diffieHellmanGroup != 0 && diffieHellmanGroup != 1 && diffieHellmanGroup != 2 && diffieHellmanGroup != 5 { + errors = append(errors, fmt.Errorf( + "%q Diffie Hellman Group can be 0 or 1 or 2 or 5", k)) + } + return +} + +func validateAllowedRangeInt(start, end int) schema.SchemaValidateFunc { + return func(v interface{}, k string) (ws []string, errors []error) { + value := v.(int) + if value < start || value > end { + errors = append(errors, fmt.Errorf( + "%q must contain a valid int value should be in range(%d, %d), got %d", + k, start, end, value)) + } + return + } +} + +func validateDeadPeerDetectionTimeout(v interface{}, k string) (ws []string, errors []error) { + secs := v.(int) + if secs < 15 || secs > 86399 { + errors = append(errors, fmt.Errorf( + "%q must be between 15 and 86399", + k)) + return + } + return +} + +func validatekeylife(v interface{}, k string) (ws []string, errors []error) { + keylife := v.(int) + if keylife < 120 || keylife > 172800 { + errors = append(errors, fmt.Errorf( + "%q keylife value can be between 120 and 172800", k)) + } + return +} + +func validateLBListenerPort(v interface{}, k string) (ws []string, errors []error) { + return validatePortRange(1, 65535)(v, k) +} + +func validateLBListenerConnectionLimit(v interface{}, k string) (ws []string, errors []error) { + conns := v.(int) + if conns < 1 || conns > 15000 { + errors = append(errors, fmt.Errorf( + "%q must be between 1 and 15000", + k)) + return + } + return +} + +//ValidateISName +func validateISName(v interface{}, k string) (ws []string, errors []error) { + name := v.(string) + acceptedcharacters, _ := regexp.MatchString(`^[a-z][-a-z0-9]*$`, name) + endwithalphanumeric, _ := regexp.MatchString(`.*[a-z0-9]$`, name) + length := len(name) + if acceptedcharacters == true { + if length <= 40 { + if endwithalphanumeric == true { + if strings.Contains(name, "--") != true { + return + } else { + errors = append(errors, fmt.Errorf( + "%q (%q) should not contain consecutive dash(-)", k, v)) + } + } else { + errors = append(errors, fmt.Errorf( + "%q (%q) should not end with dash(-) ", k, v)) + } + } else { + errors = append(errors, fmt.Errorf( + "%q (%q) should not exceed 40 characters", k, v)) + } + + } else { + errors = append(errors, fmt.Errorf( + "%q (%q) should contain only lowercase alphanumeric,dash and should begin with lowercase character", k, v)) + } + return +} + +// ValidateFunc is honored only when the schema's Type is set to TypeInt, +// TypeFloat, TypeString, TypeBool, or TypeMap. It is ignored for all other types. +// enum to list all the validator functions supported by this tool. +type FunctionIdentifier int + +const ( + IntBetween FunctionIdentifier = iota + IntAtLeast + IntAtMost + ValidateAllowedStringValue + StringLenBetween + ValidateIPorCIDR + ValidateCIDRAddress + ValidateAllowedIntValue + ValidateRegexpLen + ValidateRegexp + ValidateNoZeroValues + ValidateJSONString + ValidateJSONParam + ValidateBindedPackageName + ValidateOverlappingAddress +) + +// ValueType -- Copied from Terraform for now. You can refer to Terraform ValueType directly. +// ValueType is an enum of the type that can be represented by a schema. +type ValueType int + +const ( + TypeInvalid ValueType = iota + TypeBool + TypeInt + TypeFloat + TypeString +) + +// Type of constraints required for validation +type ValueConstraintType int + +const ( + MinValue ValueConstraintType = iota + MaxValue + MinValueLength + MaxValueLength + AllowedValues + MatchesValue +) + +// Schema is used to describe the validation schema. +type ValidateSchema struct { + + //This is the parameter name. + //Ex: private_subnet in ibm_compute_bare_metal resource + Identifier string + + // this is similar to schema.ValueType + Type ValueType + + // The actual validation function that needs to be invoked. + // Ex: IntBetween, validateAllowedIntValue, validateAllowedStringValue + ValidateFunctionIdentifier FunctionIdentifier + + MinValue string + MaxValue string + AllowedValues string //Comma separated list of strings. + Matches string + Regexp string + MinValueLength int + MaxValueLength int + + // Is this nullable + Nullable bool + + Optional bool + Required bool + Default interface{} + ForceNew bool +} + +type ResourceValidator struct { + // This is the resource name - Found in provider.go of IBM Terraform provider. + // Ex: ibm_compute_monitor, ibm_compute_bare_metal, ibm_compute_dedicated_host, ibm_cis_global_load_balancer etc., + ResourceName string + + // Array of validator objects. Each object refers to one parameter in the resource provider. + Schema []ValidateSchema +} + +type ValidatorDict struct { + ResourceValidatorDictionary map[string]*ResourceValidator + DataSourceValidatorDictionary map[string]*ResourceValidator +} + +// Resource Validator Dictionary -- For all terraform IBM Resource Providers. +// This is of type - Array of ResourceValidators. +// Each object in this array is a type of map, where key == ResourceName and value == array of ValidateSchema objects. Each of these +// ValidateSchema corresponds to a parameter in the resourceProvider. + +var validatorDict = Validator() + +// This is the main validation function. This function will be used in all the provider code. +func InvokeValidator(resourceName, identifier string) schema.SchemaValidateFunc { + // Loop through dictionary and identify the resource and then the parameter configuration. + var schemaToInvoke ValidateSchema + found := false + resourceItem := validatorDict.ResourceValidatorDictionary[resourceName] + if resourceItem.ResourceName == resourceName { + parameterValidateSchema := resourceItem.Schema + for _, validateSchema := range parameterValidateSchema { + if validateSchema.Identifier == identifier { + schemaToInvoke = validateSchema + found = true + break + } + } + } + + if found { + return invokeValidatorInternal(schemaToInvoke) + } else { + // Add error code later. TODO + return nil + } +} + +func InvokeDataSourceValidator(resourceName, identifier string) schema.SchemaValidateFunc { + // Loop through dictionary and identify the resource and then the parameter configuration. + var schemaToInvoke ValidateSchema + found := false + + dataSourceItem := validatorDict.DataSourceValidatorDictionary[resourceName] + if dataSourceItem.ResourceName == resourceName { + parameterValidateSchema := dataSourceItem.Schema + for _, validateSchema := range parameterValidateSchema { + if validateSchema.Identifier == identifier { + schemaToInvoke = validateSchema + found = true + break + } + } + } + + if found { + return invokeValidatorInternal(schemaToInvoke) + } else { + // Add error code later. TODO + return nil + } +} + +// the function is currently modified to invoke SchemaValidateFunc directly. +// But in terraform, we will just return SchemaValidateFunc as shown below.. So terraform will invoke this func +func invokeValidatorInternal(schema ValidateSchema) schema.SchemaValidateFunc { + + funcIdentifier := schema.ValidateFunctionIdentifier + switch funcIdentifier { + case IntBetween: + minValue := schema.GetValue(MinValue) + maxValue := schema.GetValue(MaxValue) + return validation.IntBetween(minValue.(int), maxValue.(int)) + case IntAtLeast: + minValue := schema.GetValue(MinValue) + return validation.IntAtLeast(minValue.(int)) + case IntAtMost: + maxValue := schema.GetValue(MaxValue) + return validation.IntAtMost(maxValue.(int)) + case ValidateAllowedStringValue: + allowedValues := schema.GetValue(AllowedValues) + return validateAllowedStringValue(allowedValues.([]string)) + case StringLenBetween: + return validation.StringLenBetween(schema.MinValueLength, schema.MaxValueLength) + case ValidateIPorCIDR: + return validateIPorCIDR() + case ValidateCIDRAddress: + return validateCIDRAddress() + case ValidateAllowedIntValue: + allowedValues := schema.GetValue(AllowedValues) + return validateAllowedIntValue(allowedValues.([]int)) + case ValidateRegexpLen: + return validateRegexpLen(schema.MinValueLength, schema.MaxValueLength, schema.Regexp) + case ValidateRegexp: + return validateRegexp(schema.Regexp) + case ValidateNoZeroValues: + return validateNoZeroValues() + case ValidateJSONString: + return validateJSONString() + case ValidateBindedPackageName: + return validateBindedPackageName() + case ValidateOverlappingAddress: + return validateOverlappingAddress() + + default: + return nil + } +} + +// utility functions - Move to different package +func (vs ValidateSchema) GetValue(valueConstraint ValueConstraintType) interface{} { + + var valueToConvert string + switch valueConstraint { + case MinValue: + valueToConvert = vs.MinValue + case MaxValue: + valueToConvert = vs.MaxValue + case AllowedValues: + valueToConvert = vs.AllowedValues + case MatchesValue: + valueToConvert = vs.Matches + } + + switch vs.Type { + case TypeInvalid: + return nil + case TypeBool: + b, err := strconv.ParseBool(valueToConvert) + if err != nil { + return vs.Zero() + } + return b + case TypeInt: + // Convert comma separated string to array + if strings.Contains(valueToConvert, ",") { + var arr2 []int + arr1 := strings.Split(valueToConvert, ",") + for _, ele := range arr1 { + e, err := strconv.Atoi(strings.TrimSpace(ele)) + if err != nil { + return vs.Zero() + } + arr2 = append(arr2, e) + } + return arr2 + } else { + num, err := strconv.Atoi(valueToConvert) + if err != nil { + return vs.Zero() + } + return num + } + + case TypeFloat: + f, err := strconv.ParseFloat(valueToConvert, 32) + if err != nil { + return vs.Zero() + } + return f + case TypeString: + //return valueToConvert + // Convert comma separated string to array + arr := strings.Split(valueToConvert, ",") + for i, ele := range arr { + arr[i] = strings.TrimSpace(ele) + } + return arr + default: + panic(fmt.Sprintf("unknown type %s", vs.Type)) + } +} + +// Use stringer tool to generate this later. +func (i FunctionIdentifier) String() string { + return [...]string{"IntBetween", "IntAtLeast", "IntAtMost"}[i] +} + +// Use Stringer tool to generate this later. +func (i ValueType) String() string { + return [...]string{"TypeInvalid", "TypeBool", "TypeInt", "TypeFloat", "TypeString"}[i] +} + +// Use Stringer tool to generate this later. +func (i ValueConstraintType) String() string { + return [...]string{"MinValue", "MaxValue", "MinValueLength", "MaxValueLength", "AllowedValues", "MatchesValue"}[i] +} + +// Zero returns the zero value for a type. +func (vs ValidateSchema) Zero() interface{} { + switch vs.Type { + case TypeInvalid: + return nil + case TypeBool: + return false + case TypeInt: + return make([]string, 0) + case TypeFloat: + return 0.0 + case TypeString: + return make([]int, 0) + default: + panic(fmt.Sprintf("unknown type %s", vs.Type)) + } +} diff --git a/vendor/github.com/IBM-Cloud/terraform-provider-ibm/version/version.go b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/version/version.go new file mode 100644 index 00000000000..0ee9f39901e --- /dev/null +++ b/vendor/github.com/IBM-Cloud/terraform-provider-ibm/version/version.go @@ -0,0 +1,21 @@ +package version + +import ( + "github.com/hashicorp/go-version" +) + +// Version is the current provider main version +const Version = "1.26.2" + +// GitCommit is the git commit that was compiled. This will be filled in by the compiler. +var GitCommit string + +//VersionPrerelease is the marker for version. If this is "" (empty string) +// then it means that it is a final release. Otherwise, this is a pre-release +// such as "dev" (in development), "beta", "rc1", etc. +var VersionPrerelease = "" + +// SemVersion is an instance of version.Version. This has the secondary +// benefit of verifying during tests and init time that our version is a +// proper semantic version, which should always be the case. +var SemVersion = version.Must(version.NewVersion(Version)) diff --git a/vendor/github.com/IBM/apigateway-go-sdk/LICENSE b/vendor/github.com/IBM/apigateway-go-sdk/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/vendor/github.com/IBM/apigateway-go-sdk/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/IBM/apigateway-go-sdk/README.md b/vendor/github.com/IBM/apigateway-go-sdk/README.md new file mode 100644 index 00000000000..8fe766389dc --- /dev/null +++ b/vendor/github.com/IBM/apigateway-go-sdk/README.md @@ -0,0 +1,2 @@ +# apigateway-go-sdk +The IBM Cloud API Gateway SDK will allow IBM Cloud customers to easily create, manage, and share APIs in an automated fashion using Go. diff --git a/vendor/github.com/IBM/apigateway-go-sdk/api_gateway_controller_api_v1.go b/vendor/github.com/IBM/apigateway-go-sdk/api_gateway_controller_api_v1.go new file mode 100644 index 00000000000..5b05cae28ed --- /dev/null +++ b/vendor/github.com/IBM/apigateway-go-sdk/api_gateway_controller_api_v1.go @@ -0,0 +1,2583 @@ +/** + * (C) Copyright IBM Corp. 2020. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package apigatewaycontrollerapiv1 : Operations and models for the ApiGatewayControllerApiV1 service +package apigatewaycontrollerapiv1 + +import ( + "fmt" + + common "github.com/IBM/apigateway-go-sdk/common" + "github.com/IBM/go-sdk-core/v3/core" +) + +// ApiGatewayControllerApiV1 : Primary REST API for creating and managing APIs within the IBM Cloud API Gateway service. +// +// Version: 1.0.0 +type ApiGatewayControllerApiV1 struct { + Service *core.BaseService +} + +// DefaultServiceURL is the default URL to make service requests to. +const DefaultServiceURL = "https://api.us-south.apigw.cloud.ibm.com/controller" + +// DefaultServiceName is the default key used to find external configuration information. +const DefaultServiceName = "api_gateway_controller_api" + +// ApiGatewayControllerApiV1Options : Service options +type ApiGatewayControllerApiV1Options struct { + ServiceName string + URL string + Authenticator core.Authenticator +} + +// NewApiGatewayControllerApiV1UsingExternalConfig : constructs an instance of ApiGatewayControllerApiV1 with passed in options and external configuration. +func NewApiGatewayControllerApiV1UsingExternalConfig(options *ApiGatewayControllerApiV1Options) (apiGatewayControllerApi *ApiGatewayControllerApiV1, err error) { + if options.ServiceName == "" { + options.ServiceName = DefaultServiceName + } + + if options.Authenticator == nil { + options.Authenticator, err = core.GetAuthenticatorFromEnvironment(options.ServiceName) + if err != nil { + return + } + } + + apiGatewayControllerApi, err = NewApiGatewayControllerApiV1(options) + if err != nil { + return + } + + err = apiGatewayControllerApi.Service.ConfigureService(options.ServiceName) + if err != nil { + return + } + + if options.URL != "" { + err = apiGatewayControllerApi.Service.SetServiceURL(options.URL) + } + return +} + +// NewApiGatewayControllerApiV1 : constructs an instance of ApiGatewayControllerApiV1 with passed in options. +func NewApiGatewayControllerApiV1(options *ApiGatewayControllerApiV1Options) (service *ApiGatewayControllerApiV1, err error) { + serviceOptions := &core.ServiceOptions{ + URL: DefaultServiceURL, + Authenticator: options.Authenticator, + } + + baseService, err := core.NewBaseService(serviceOptions) + if err != nil { + return + } + + if options.URL != "" { + err = baseService.SetServiceURL(options.URL) + if err != nil { + return + } + } + + service = &ApiGatewayControllerApiV1{ + Service: baseService, + } + + return +} + +// SetServiceURL sets the service URL +func (apiGatewayControllerApi *ApiGatewayControllerApiV1) SetServiceURL(url string) error { + return apiGatewayControllerApi.Service.SetServiceURL(url) +} + +// GetAllEndpoints : Get details for all Endpoints +func (apiGatewayControllerApi *ApiGatewayControllerApiV1) GetAllEndpoints(getAllEndpointsOptions *GetAllEndpointsOptions) (result *[]V2Endpoint, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getAllEndpointsOptions, "getAllEndpointsOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getAllEndpointsOptions, "getAllEndpointsOptions") + if err != nil { + return + } + + pathSegments := []string{"v2/endpoints"} + pathParameters := []string{} + + builder := core.NewRequestBuilder(core.GET) + _, err = builder.ConstructHTTPURL(apiGatewayControllerApi.Service.Options.URL, pathSegments, pathParameters) + if err != nil { + return + } + + for headerName, headerValue := range getAllEndpointsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("api_gateway_controller_api", "V1", "GetAllEndpoints") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + builder.AddHeader("Accept", "application/json") + if getAllEndpointsOptions.Authorization != nil { + builder.AddHeader("authorization", fmt.Sprint(*getAllEndpointsOptions.Authorization)) + } + + builder.AddQuery("service_instance_crn", fmt.Sprint(*getAllEndpointsOptions.ServiceInstanceCrn)) + if getAllEndpointsOptions.ProviderID != nil { + builder.AddQuery("provider_id", fmt.Sprint(*getAllEndpointsOptions.ProviderID)) + } + if getAllEndpointsOptions.Shared != nil { + builder.AddQuery("shared", fmt.Sprint(*getAllEndpointsOptions.Shared)) + } + if getAllEndpointsOptions.Managed != nil { + builder.AddQuery("managed", fmt.Sprint(*getAllEndpointsOptions.Managed)) + } + if getAllEndpointsOptions.Swagger != nil { + builder.AddQuery("swagger", fmt.Sprint(*getAllEndpointsOptions.Swagger)) + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = apiGatewayControllerApi.Service.Request(request, make([]map[string]interface{}, 1)) + if err == nil { + s, ok := response.Result.([]interface{}) + if !ok { + err = fmt.Errorf("an error occurred while processing the operation response") + return + } + slice, e := UnmarshalV2EndpointSlice(s) + result = &slice + err = e + response.Result = result + } + + return +} + +// CreateEndpoint : Create an Endpoint +func (apiGatewayControllerApi *ApiGatewayControllerApiV1) CreateEndpoint(createEndpointOptions *CreateEndpointOptions) (result *V2Endpoint, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createEndpointOptions, "createEndpointOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createEndpointOptions, "createEndpointOptions") + if err != nil { + return + } + + pathSegments := []string{"v2/endpoints"} + pathParameters := []string{} + + builder := core.NewRequestBuilder(core.POST) + _, err = builder.ConstructHTTPURL(apiGatewayControllerApi.Service.Options.URL, pathSegments, pathParameters) + if err != nil { + return + } + + for headerName, headerValue := range createEndpointOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("api_gateway_controller_api", "V1", "CreateEndpoint") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + if createEndpointOptions.Authorization != nil { + builder.AddHeader("authorization", fmt.Sprint(*createEndpointOptions.Authorization)) + } + + body := make(map[string]interface{}) + if createEndpointOptions.ArtifactID != nil { + body["artifact_id"] = createEndpointOptions.ArtifactID + } + if createEndpointOptions.ParentCrn != nil { + body["parent_crn"] = createEndpointOptions.ParentCrn + } + if createEndpointOptions.ServiceInstanceCrn != nil { + body["service_instance_crn"] = createEndpointOptions.ServiceInstanceCrn + } + if createEndpointOptions.Name != nil { + body["name"] = createEndpointOptions.Name + } + if createEndpointOptions.Routes != nil { + body["routes"] = createEndpointOptions.Routes + } + if createEndpointOptions.Managed != nil { + body["managed"] = createEndpointOptions.Managed + } + if createEndpointOptions.Metadata != nil { + body["metadata"] = createEndpointOptions.Metadata + } + if createEndpointOptions.OpenApiDoc != nil { + body["open_api_doc"] = createEndpointOptions.OpenApiDoc + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = apiGatewayControllerApi.Service.Request(request, make(map[string]interface{})) + if err == nil { + m, ok := response.Result.(map[string]interface{}) + if !ok { + err = fmt.Errorf("an error occurred while processing the operation response") + return + } + result, err = UnmarshalV2Endpoint(m) + response.Result = result + } + + return +} + +// GetEndpoint : Get details for a given Endpoint +func (apiGatewayControllerApi *ApiGatewayControllerApiV1) GetEndpoint(getEndpointOptions *GetEndpointOptions) (result *V2Endpoint, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getEndpointOptions, "getEndpointOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getEndpointOptions, "getEndpointOptions") + if err != nil { + return + } + + pathSegments := []string{"v2/endpoints"} + pathParameters := []string{*getEndpointOptions.ID} + + builder := core.NewRequestBuilder(core.GET) + _, err = builder.ConstructHTTPURL(apiGatewayControllerApi.Service.Options.URL, pathSegments, pathParameters) + if err != nil { + return + } + + for headerName, headerValue := range getEndpointOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("api_gateway_controller_api", "V1", "GetEndpoint") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + builder.AddHeader("Accept", "application/json") + if getEndpointOptions.Authorization != nil { + builder.AddHeader("authorization", fmt.Sprint(*getEndpointOptions.Authorization)) + } + + builder.AddQuery("service_instance_crn", fmt.Sprint(*getEndpointOptions.ServiceInstanceCrn)) + + request, err := builder.Build() + if err != nil { + return + } + + response, err = apiGatewayControllerApi.Service.Request(request, make(map[string]interface{})) + if err == nil { + m, ok := response.Result.(map[string]interface{}) + if !ok { + err = fmt.Errorf("an error occurred while processing the operation response") + return + } + result, err = UnmarshalV2Endpoint(m) + response.Result = result + } + + return +} + +// UpdateEndpoint : Update an endpoint +func (apiGatewayControllerApi *ApiGatewayControllerApiV1) UpdateEndpoint(updateEndpointOptions *UpdateEndpointOptions) (result *V2Endpoint, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateEndpointOptions, "updateEndpointOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateEndpointOptions, "updateEndpointOptions") + if err != nil { + return + } + + pathSegments := []string{"v2/endpoints"} + pathParameters := []string{*updateEndpointOptions.ID} + + builder := core.NewRequestBuilder(core.PUT) + _, err = builder.ConstructHTTPURL(apiGatewayControllerApi.Service.Options.URL, pathSegments, pathParameters) + if err != nil { + return + } + + for headerName, headerValue := range updateEndpointOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("api_gateway_controller_api", "V1", "UpdateEndpoint") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + if updateEndpointOptions.Authorization != nil { + builder.AddHeader("authorization", fmt.Sprint(*updateEndpointOptions.Authorization)) + } + + builder.AddQuery("service_instance_crn", fmt.Sprint(*updateEndpointOptions.ServiceInstanceCrn)) + + body := make(map[string]interface{}) + if updateEndpointOptions.NewArtifactID != nil { + body["artifact_id"] = updateEndpointOptions.NewArtifactID + } + if updateEndpointOptions.NewParentCrn != nil { + body["parent_crn"] = updateEndpointOptions.NewParentCrn + } + if updateEndpointOptions.NewServiceInstanceCrn != nil { + body["service_instance_crn"] = updateEndpointOptions.NewServiceInstanceCrn + } + if updateEndpointOptions.NewName != nil { + body["name"] = updateEndpointOptions.NewName + } + if updateEndpointOptions.NewRoutes != nil { + body["routes"] = updateEndpointOptions.NewRoutes + } + if updateEndpointOptions.NewManaged != nil { + body["managed"] = updateEndpointOptions.NewManaged + } + if updateEndpointOptions.NewMetadata != nil { + body["metadata"] = updateEndpointOptions.NewMetadata + } + if updateEndpointOptions.NewOpenApiDoc != nil { + body["open_api_doc"] = updateEndpointOptions.NewOpenApiDoc + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = apiGatewayControllerApi.Service.Request(request, make(map[string]interface{})) + if err == nil { + m, ok := response.Result.(map[string]interface{}) + if !ok { + err = fmt.Errorf("an error occurred while processing the operation response") + return + } + result, err = UnmarshalV2Endpoint(m) + response.Result = result + } + + return +} + +// DeleteEndpoint : Delete an Endpoint +func (apiGatewayControllerApi *ApiGatewayControllerApiV1) DeleteEndpoint(deleteEndpointOptions *DeleteEndpointOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteEndpointOptions, "deleteEndpointOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteEndpointOptions, "deleteEndpointOptions") + if err != nil { + return + } + + pathSegments := []string{"v2/endpoints"} + pathParameters := []string{*deleteEndpointOptions.ID} + + builder := core.NewRequestBuilder(core.DELETE) + _, err = builder.ConstructHTTPURL(apiGatewayControllerApi.Service.Options.URL, pathSegments, pathParameters) + if err != nil { + return + } + + for headerName, headerValue := range deleteEndpointOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("api_gateway_controller_api", "V1", "DeleteEndpoint") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + if deleteEndpointOptions.Authorization != nil { + builder.AddHeader("authorization", fmt.Sprint(*deleteEndpointOptions.Authorization)) + } + + builder.AddQuery("service_instance_crn", fmt.Sprint(*deleteEndpointOptions.ServiceInstanceCrn)) + + request, err := builder.Build() + if err != nil { + return + } + + response, err = apiGatewayControllerApi.Service.Request(request, nil) + + return +} + +// GetEndpointSwagger : Get the OpenAPI doc for a given Endpoint +func (apiGatewayControllerApi *ApiGatewayControllerApiV1) GetEndpointSwagger(getEndpointSwaggerOptions *GetEndpointSwaggerOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getEndpointSwaggerOptions, "getEndpointSwaggerOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getEndpointSwaggerOptions, "getEndpointSwaggerOptions") + if err != nil { + return + } + + pathSegments := []string{"v2/endpoints", "swagger"} + pathParameters := []string{*getEndpointSwaggerOptions.ID} + + builder := core.NewRequestBuilder(core.GET) + _, err = builder.ConstructHTTPURL(apiGatewayControllerApi.Service.Options.URL, pathSegments, pathParameters) + if err != nil { + return + } + + for headerName, headerValue := range getEndpointSwaggerOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("api_gateway_controller_api", "V1", "GetEndpointSwagger") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + builder.AddHeader("Accept", "application/json") + if getEndpointSwaggerOptions.Authorization != nil { + builder.AddHeader("authorization", fmt.Sprint(*getEndpointSwaggerOptions.Authorization)) + } + + builder.AddQuery("service_instance_crn", fmt.Sprint(*getEndpointSwaggerOptions.ServiceInstanceCrn)) + if getEndpointSwaggerOptions.Type != nil { + builder.AddQuery("type", fmt.Sprint(*getEndpointSwaggerOptions.Type)) + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = apiGatewayControllerApi.Service.Request(request, make(map[string]interface{})) + + return +} + +// EndpointActions : Execute actions for a given Endpoint +func (apiGatewayControllerApi *ApiGatewayControllerApiV1) EndpointActions(endpointActionsOptions *EndpointActionsOptions) (result *V2Endpoint, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(endpointActionsOptions, "endpointActionsOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(endpointActionsOptions, "endpointActionsOptions") + if err != nil { + return + } + + pathSegments := []string{"v2/endpoints", "actions"} + pathParameters := []string{*endpointActionsOptions.ID} + + builder := core.NewRequestBuilder(core.POST) + _, err = builder.ConstructHTTPURL(apiGatewayControllerApi.Service.Options.URL, pathSegments, pathParameters) + if err != nil { + return + } + + for headerName, headerValue := range endpointActionsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("api_gateway_controller_api", "V1", "EndpointActions") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + if endpointActionsOptions.Authorization != nil { + builder.AddHeader("authorization", fmt.Sprint(*endpointActionsOptions.Authorization)) + } + + builder.AddQuery("service_instance_crn", fmt.Sprint(*endpointActionsOptions.ServiceInstanceCrn)) + builder.AddQuery("provider_id", fmt.Sprint(*endpointActionsOptions.ProviderID)) + + body := make(map[string]interface{}) + if endpointActionsOptions.Type != nil { + body["type"] = endpointActionsOptions.Type + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = apiGatewayControllerApi.Service.Request(request, make(map[string]interface{})) + if err == nil { + m, ok := response.Result.(map[string]interface{}) + if !ok { + err = fmt.Errorf("an error occurred while processing the operation response") + return + } + result, err = UnmarshalV2Endpoint(m) + response.Result = result + } + + return +} + +// EndpointSummary : Get provider sorted summary about all Endpoints +func (apiGatewayControllerApi *ApiGatewayControllerApiV1) EndpointSummary(endpointSummaryOptions *EndpointSummaryOptions) (result *[]V2EndpointSummary, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(endpointSummaryOptions, "endpointSummaryOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(endpointSummaryOptions, "endpointSummaryOptions") + if err != nil { + return + } + + pathSegments := []string{"v2/endpoints/summary"} + pathParameters := []string{} + + builder := core.NewRequestBuilder(core.GET) + _, err = builder.ConstructHTTPURL(apiGatewayControllerApi.Service.Options.URL, pathSegments, pathParameters) + if err != nil { + return + } + + for headerName, headerValue := range endpointSummaryOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("api_gateway_controller_api", "V1", "EndpointSummary") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + builder.AddHeader("Accept", "application/json") + if endpointSummaryOptions.Authorization != nil { + builder.AddHeader("authorization", fmt.Sprint(*endpointSummaryOptions.Authorization)) + } + + builder.AddQuery("account_id", fmt.Sprint(*endpointSummaryOptions.AccountID)) + if endpointSummaryOptions.ServiceInstanceCrn != nil { + builder.AddQuery("service_instance_crn", fmt.Sprint(*endpointSummaryOptions.ServiceInstanceCrn)) + } + if endpointSummaryOptions.Swagger != nil { + builder.AddQuery("swagger", fmt.Sprint(*endpointSummaryOptions.Swagger)) + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = apiGatewayControllerApi.Service.Request(request, make([]map[string]interface{}, 1)) + if err == nil { + s, ok := response.Result.([]interface{}) + if !ok { + err = fmt.Errorf("an error occurred while processing the operation response") + return + } + slice, e := UnmarshalV2EndpointSummarySlice(s) + result = &slice + err = e + response.Result = result + } + + return +} + +// GetAllSubscriptions : Get all subscriptions tied to a given artifact +func (apiGatewayControllerApi *ApiGatewayControllerApiV1) GetAllSubscriptions(getAllSubscriptionsOptions *GetAllSubscriptionsOptions) (result *[]V2Subscription, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getAllSubscriptionsOptions, "getAllSubscriptionsOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getAllSubscriptionsOptions, "getAllSubscriptionsOptions") + if err != nil { + return + } + + pathSegments := []string{"v2/subscriptions"} + pathParameters := []string{} + + builder := core.NewRequestBuilder(core.GET) + _, err = builder.ConstructHTTPURL(apiGatewayControllerApi.Service.Options.URL, pathSegments, pathParameters) + if err != nil { + return + } + + for headerName, headerValue := range getAllSubscriptionsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("api_gateway_controller_api", "V1", "GetAllSubscriptions") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + builder.AddHeader("Accept", "application/json") + if getAllSubscriptionsOptions.Authorization != nil { + builder.AddHeader("authorization", fmt.Sprint(*getAllSubscriptionsOptions.Authorization)) + } + + builder.AddQuery("artifact_id", fmt.Sprint(*getAllSubscriptionsOptions.ArtifactID)) + if getAllSubscriptionsOptions.Type != nil { + builder.AddQuery("type", fmt.Sprint(*getAllSubscriptionsOptions.Type)) + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = apiGatewayControllerApi.Service.Request(request, make([]map[string]interface{}, 1)) + if err == nil { + s, ok := response.Result.([]interface{}) + if !ok { + err = fmt.Errorf("an error occurred while processing the operation response") + return + } + slice, e := UnmarshalV2SubscriptionSlice(s) + result = &slice + err = e + response.Result = result + } + + return +} + +// CreateSubscription : Create a subscription for an artifact +func (apiGatewayControllerApi *ApiGatewayControllerApiV1) CreateSubscription(createSubscriptionOptions *CreateSubscriptionOptions) (result *V2Subscription, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createSubscriptionOptions, "createSubscriptionOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createSubscriptionOptions, "createSubscriptionOptions") + if err != nil { + return + } + + pathSegments := []string{"v2/subscriptions"} + pathParameters := []string{} + + builder := core.NewRequestBuilder(core.POST) + _, err = builder.ConstructHTTPURL(apiGatewayControllerApi.Service.Options.URL, pathSegments, pathParameters) + if err != nil { + return + } + + for headerName, headerValue := range createSubscriptionOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("api_gateway_controller_api", "V1", "CreateSubscription") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + if createSubscriptionOptions.Authorization != nil { + builder.AddHeader("authorization", fmt.Sprint(*createSubscriptionOptions.Authorization)) + } + + body := make(map[string]interface{}) + if createSubscriptionOptions.ClientID != nil { + body["client_id"] = createSubscriptionOptions.ClientID + } + if createSubscriptionOptions.ArtifactID != nil { + body["artifact_id"] = createSubscriptionOptions.ArtifactID + } + if createSubscriptionOptions.ClientSecret != nil { + body["client_secret"] = createSubscriptionOptions.ClientSecret + } + if createSubscriptionOptions.GenerateSecret != nil { + body["generate_secret"] = createSubscriptionOptions.GenerateSecret + } + if createSubscriptionOptions.AccountID != nil { + body["account_id"] = createSubscriptionOptions.AccountID + } + if createSubscriptionOptions.Name != nil { + body["name"] = createSubscriptionOptions.Name + } + if createSubscriptionOptions.Type != nil { + body["type"] = createSubscriptionOptions.Type + } + if createSubscriptionOptions.Metadata != nil { + body["metadata"] = createSubscriptionOptions.Metadata + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = apiGatewayControllerApi.Service.Request(request, make(map[string]interface{})) + if err == nil { + m, ok := response.Result.(map[string]interface{}) + if !ok { + err = fmt.Errorf("an error occurred while processing the operation response") + return + } + result, err = UnmarshalV2Subscription(m) + response.Result = result + } + + return +} + +// GetSubscription : Get subscription for a given clientid +func (apiGatewayControllerApi *ApiGatewayControllerApiV1) GetSubscription(getSubscriptionOptions *GetSubscriptionOptions) (result *V2Subscription, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getSubscriptionOptions, "getSubscriptionOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getSubscriptionOptions, "getSubscriptionOptions") + if err != nil { + return + } + + pathSegments := []string{"v2/subscriptions"} + pathParameters := []string{*getSubscriptionOptions.ID} + + builder := core.NewRequestBuilder(core.GET) + _, err = builder.ConstructHTTPURL(apiGatewayControllerApi.Service.Options.URL, pathSegments, pathParameters) + if err != nil { + return + } + + for headerName, headerValue := range getSubscriptionOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("api_gateway_controller_api", "V1", "GetSubscription") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + builder.AddHeader("Accept", "application/json") + if getSubscriptionOptions.Authorization != nil { + builder.AddHeader("authorization", fmt.Sprint(*getSubscriptionOptions.Authorization)) + } + + builder.AddQuery("artifact_id", fmt.Sprint(*getSubscriptionOptions.ArtifactID)) + + request, err := builder.Build() + if err != nil { + return + } + + response, err = apiGatewayControllerApi.Service.Request(request, make(map[string]interface{})) + if err == nil { + m, ok := response.Result.(map[string]interface{}) + if !ok { + err = fmt.Errorf("an error occurred while processing the operation response") + return + } + result, err = UnmarshalV2Subscription(m) + response.Result = result + } + + return +} + +// UpdateSubscription : Update a subscription +func (apiGatewayControllerApi *ApiGatewayControllerApiV1) UpdateSubscription(updateSubscriptionOptions *UpdateSubscriptionOptions) (result *V2Subscription, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateSubscriptionOptions, "updateSubscriptionOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateSubscriptionOptions, "updateSubscriptionOptions") + if err != nil { + return + } + + pathSegments := []string{"v2/subscriptions"} + pathParameters := []string{*updateSubscriptionOptions.ID} + + builder := core.NewRequestBuilder(core.PUT) + _, err = builder.ConstructHTTPURL(apiGatewayControllerApi.Service.Options.URL, pathSegments, pathParameters) + if err != nil { + return + } + + for headerName, headerValue := range updateSubscriptionOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("api_gateway_controller_api", "V1", "UpdateSubscription") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + if updateSubscriptionOptions.Authorization != nil { + builder.AddHeader("authorization", fmt.Sprint(*updateSubscriptionOptions.Authorization)) + } + + builder.AddQuery("artifact_id", fmt.Sprint(*updateSubscriptionOptions.ArtifactID)) + + body := make(map[string]interface{}) + if updateSubscriptionOptions.NewClientID != nil { + body["client_id"] = updateSubscriptionOptions.NewClientID + } + if updateSubscriptionOptions.NewClientSecret != nil { + body["client_secret"] = updateSubscriptionOptions.NewClientSecret + } + if updateSubscriptionOptions.NewArtifactID != nil { + body["artifact_id"] = updateSubscriptionOptions.NewArtifactID + } + if updateSubscriptionOptions.NewAccountID != nil { + body["account_id"] = updateSubscriptionOptions.NewAccountID + } + if updateSubscriptionOptions.NewName != nil { + body["name"] = updateSubscriptionOptions.NewName + } + if updateSubscriptionOptions.NewType != nil { + body["type"] = updateSubscriptionOptions.NewType + } + if updateSubscriptionOptions.NewMetadata != nil { + body["metadata"] = updateSubscriptionOptions.NewMetadata + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = apiGatewayControllerApi.Service.Request(request, make(map[string]interface{})) + if err == nil { + m, ok := response.Result.(map[string]interface{}) + if !ok { + err = fmt.Errorf("an error occurred while processing the operation response") + return + } + result, err = UnmarshalV2Subscription(m) + response.Result = result + } + + return +} + +// DeleteSubscription : Delete a subscription +func (apiGatewayControllerApi *ApiGatewayControllerApiV1) DeleteSubscription(deleteSubscriptionOptions *DeleteSubscriptionOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteSubscriptionOptions, "deleteSubscriptionOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteSubscriptionOptions, "deleteSubscriptionOptions") + if err != nil { + return + } + + pathSegments := []string{"v2/subscriptions"} + pathParameters := []string{*deleteSubscriptionOptions.ID} + + builder := core.NewRequestBuilder(core.DELETE) + _, err = builder.ConstructHTTPURL(apiGatewayControllerApi.Service.Options.URL, pathSegments, pathParameters) + if err != nil { + return + } + + for headerName, headerValue := range deleteSubscriptionOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("api_gateway_controller_api", "V1", "DeleteSubscription") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + if deleteSubscriptionOptions.Authorization != nil { + builder.AddHeader("authorization", fmt.Sprint(*deleteSubscriptionOptions.Authorization)) + } + + builder.AddQuery("artifact_id", fmt.Sprint(*deleteSubscriptionOptions.ArtifactID)) + + request, err := builder.Build() + if err != nil { + return + } + + response, err = apiGatewayControllerApi.Service.Request(request, nil) + + return +} + +// GetSubscriptionArtifact : Get artifact associated to a subscription +func (apiGatewayControllerApi *ApiGatewayControllerApiV1) GetSubscriptionArtifact(getSubscriptionArtifactOptions *GetSubscriptionArtifactOptions) (result *InlineResponse200, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getSubscriptionArtifactOptions, "getSubscriptionArtifactOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getSubscriptionArtifactOptions, "getSubscriptionArtifactOptions") + if err != nil { + return + } + + pathSegments := []string{"v2/subscriptions/artifact"} + pathParameters := []string{} + + builder := core.NewRequestBuilder(core.GET) + _, err = builder.ConstructHTTPURL(apiGatewayControllerApi.Service.Options.URL, pathSegments, pathParameters) + if err != nil { + return + } + + for headerName, headerValue := range getSubscriptionArtifactOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("api_gateway_controller_api", "V1", "GetSubscriptionArtifact") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("artifact_id", fmt.Sprint(*getSubscriptionArtifactOptions.ArtifactID)) + if getSubscriptionArtifactOptions.ClientID != nil { + builder.AddQuery("client_id", fmt.Sprint(*getSubscriptionArtifactOptions.ClientID)) + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = apiGatewayControllerApi.Service.Request(request, make(map[string]interface{})) + if err == nil { + m, ok := response.Result.(map[string]interface{}) + if !ok { + err = fmt.Errorf("an error occurred while processing the operation response") + return + } + result, err = UnmarshalInlineResponse200(m) + response.Result = result + } + + return +} + +// AddSubscriptionSecret : Add a Subscription Secret +func (apiGatewayControllerApi *ApiGatewayControllerApiV1) AddSubscriptionSecret(addSubscriptionSecretOptions *AddSubscriptionSecretOptions) (result *V2Subscription, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(addSubscriptionSecretOptions, "addSubscriptionSecretOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(addSubscriptionSecretOptions, "addSubscriptionSecretOptions") + if err != nil { + return + } + + pathSegments := []string{"v2/subscriptions", "secret"} + pathParameters := []string{*addSubscriptionSecretOptions.ID} + + builder := core.NewRequestBuilder(core.PUT) + _, err = builder.ConstructHTTPURL(apiGatewayControllerApi.Service.Options.URL, pathSegments, pathParameters) + if err != nil { + return + } + + for headerName, headerValue := range addSubscriptionSecretOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("api_gateway_controller_api", "V1", "AddSubscriptionSecret") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + if addSubscriptionSecretOptions.Authorization != nil { + builder.AddHeader("authorization", fmt.Sprint(*addSubscriptionSecretOptions.Authorization)) + } + + builder.AddQuery("artifact_id", fmt.Sprint(*addSubscriptionSecretOptions.ArtifactID)) + + body := make(map[string]interface{}) + if addSubscriptionSecretOptions.ClientSecret != nil { + body["client_secret"] = addSubscriptionSecretOptions.ClientSecret + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = apiGatewayControllerApi.Service.Request(request, make(map[string]interface{})) + if err == nil { + m, ok := response.Result.(map[string]interface{}) + if !ok { + err = fmt.Errorf("an error occurred while processing the operation response") + return + } + result, err = UnmarshalV2Subscription(m) + response.Result = result + } + + return +} + +// AddSubscriptionSecretOptions : The AddSubscriptionSecret options. +type AddSubscriptionSecretOptions struct { + // Client Id. + ID *string `json:"id" validate:"required"` + + // Artifact Id. + ArtifactID *string `json:"artifact_id" validate:"required"` + + // Client id. + Authorization *string `json:"authorization" validate:"required"` + + ClientSecret *string `json:"client_secret,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewAddSubscriptionSecretOptions : Instantiate AddSubscriptionSecretOptions +func (*ApiGatewayControllerApiV1) NewAddSubscriptionSecretOptions(id string, artifactID string, authorization string) *AddSubscriptionSecretOptions { + return &AddSubscriptionSecretOptions{ + ID: core.StringPtr(id), + ArtifactID: core.StringPtr(artifactID), + Authorization: core.StringPtr(authorization), + } +} + +// SetID : Allow user to set ID +func (options *AddSubscriptionSecretOptions) SetID(id string) *AddSubscriptionSecretOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetArtifactID : Allow user to set ArtifactID +func (options *AddSubscriptionSecretOptions) SetArtifactID(artifactID string) *AddSubscriptionSecretOptions { + options.ArtifactID = core.StringPtr(artifactID) + return options +} + +// SetAuthorization : Allow user to set Authorization +func (options *AddSubscriptionSecretOptions) SetAuthorization(authorization string) *AddSubscriptionSecretOptions { + options.Authorization = core.StringPtr(authorization) + return options +} + +// SetClientSecret : Allow user to set ClientSecret +func (options *AddSubscriptionSecretOptions) SetClientSecret(clientSecret string) *AddSubscriptionSecretOptions { + options.ClientSecret = core.StringPtr(clientSecret) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *AddSubscriptionSecretOptions) SetHeaders(param map[string]string) *AddSubscriptionSecretOptions { + options.Headers = param + return options +} + +// CreateEndpointOptions : The CreateEndpoint options. +type CreateEndpointOptions struct { + // User IAM token. + Authorization *string `json:"authorization" validate:"required"` + + // The endpoint ID. + ArtifactID *string `json:"artifact_id",omitempt` + + // The API Gateway service instance CRN. + ParentCrn *string `json:"parent_crn" validate:"required"` + + // The API Gateway service instance CRN. + ServiceInstanceCrn *string `json:"service_instance_crn" validate:"required"` + + // The endpoint's name. + Name *string `json:"name validate:"required"` + + // Invokable endpoint routes. + Routes []string `json:"routes,omitempty"` + + // Is the endpoint managed?. + Managed *bool `json:"managed,omitempty"` + + Metadata interface{} `json:"metadata,omitempty"` + + // The OpenAPI document. + OpenApiDoc interface{} `json:"open_api_doc validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewCreateEndpointOptions : Instantiate CreateEndpointOptions +func (*ApiGatewayControllerApiV1) NewCreateEndpointOptions(authorization string, artifactID string, parentCrn string, serviceInstanceCrn string) *CreateEndpointOptions { + return &CreateEndpointOptions{ + Authorization: core.StringPtr(authorization), + ArtifactID: core.StringPtr(artifactID), + ParentCrn: core.StringPtr(parentCrn), + ServiceInstanceCrn: core.StringPtr(serviceInstanceCrn), + } +} + +// SetAuthorization : Allow user to set Authorization +func (options *CreateEndpointOptions) SetAuthorization(authorization string) *CreateEndpointOptions { + options.Authorization = core.StringPtr(authorization) + return options +} + +// SetArtifactID : Allow user to set ArtifactID +func (options *CreateEndpointOptions) SetArtifactID(artifactID string) *CreateEndpointOptions { + options.ArtifactID = core.StringPtr(artifactID) + return options +} + +// SetParentCrn : Allow user to set ParentCrn +func (options *CreateEndpointOptions) SetParentCrn(parentCrn string) *CreateEndpointOptions { + options.ParentCrn = core.StringPtr(parentCrn) + return options +} + +// SetServiceInstanceCrn : Allow user to set ServiceInstanceCrn +func (options *CreateEndpointOptions) SetServiceInstanceCrn(serviceInstanceCrn string) *CreateEndpointOptions { + options.ServiceInstanceCrn = core.StringPtr(serviceInstanceCrn) + return options +} + +// SetName : Allow user to set Name +func (options *CreateEndpointOptions) SetName(name string) *CreateEndpointOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetRoutes : Allow user to set Routes +func (options *CreateEndpointOptions) SetRoutes(routes []string) *CreateEndpointOptions { + options.Routes = routes + return options +} + +// SetManaged : Allow user to set Managed +func (options *CreateEndpointOptions) SetManaged(managed bool) *CreateEndpointOptions { + options.Managed = core.BoolPtr(managed) + return options +} + +// SetMetadata : Allow user to set Metadata +func (options *CreateEndpointOptions) SetMetadata(metadata interface{}) *CreateEndpointOptions { + options.Metadata = metadata + return options +} + +// SetOpenApiDoc : Allow user to set OpenApiDoc +func (options *CreateEndpointOptions) SetOpenApiDoc(openApiDoc interface{}) *CreateEndpointOptions { + options.OpenApiDoc = openApiDoc + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateEndpointOptions) SetHeaders(param map[string]string) *CreateEndpointOptions { + options.Headers = param + return options +} + +// CreateSubscriptionOptions : The CreateSubscription options. +type CreateSubscriptionOptions struct { + // User bearer token. + Authorization *string `json:"authorization" validate:"required"` + + ClientID *string `json:"client_id",omitempty` + + ArtifactID *string `json:"artifact_id" validate:"required"` + + ClientSecret *string `json:"client_secret,omitempty"` + + GenerateSecret *bool `json:"generate_secret, omitempty"` + + AccountID *string `json:"account_id,omitempty"` + + Name *string `json:"name,omitempty"` + + Type *string `json:"type,omitempty"` + + Metadata interface{} `json:"metadata,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewCreateSubscriptionOptions : Instantiate CreateSubscriptionOptions +func (*ApiGatewayControllerApiV1) NewCreateSubscriptionOptions(authorization string, clientID string, artifactID string) *CreateSubscriptionOptions { + return &CreateSubscriptionOptions{ + Authorization: core.StringPtr(authorization), + ClientID: core.StringPtr(clientID), + ArtifactID: core.StringPtr(artifactID), + } +} + +// SetAuthorization : Allow user to set Authorization +func (options *CreateSubscriptionOptions) SetAuthorization(authorization string) *CreateSubscriptionOptions { + options.Authorization = core.StringPtr(authorization) + return options +} + +// SetClientID : Allow user to set ClientID +func (options *CreateSubscriptionOptions) SetClientID(clientID string) *CreateSubscriptionOptions { + options.ClientID = core.StringPtr(clientID) + return options +} + +// SetArtifactID : Allow user to set ArtifactID +func (options *CreateSubscriptionOptions) SetArtifactID(artifactID string) *CreateSubscriptionOptions { + options.ArtifactID = core.StringPtr(artifactID) + return options +} + +// SetClientSecret : Allow user to set ClientSecret +func (options *CreateSubscriptionOptions) SetClientSecret(clientSecret string) *CreateSubscriptionOptions { + options.ClientSecret = core.StringPtr(clientSecret) + return options +} + +// SetAccountID : Allow user to set AccountID +func (options *CreateSubscriptionOptions) SetAccountID(accountID string) *CreateSubscriptionOptions { + options.AccountID = core.StringPtr(accountID) + return options +} + +// SetName : Allow user to set Name +func (options *CreateSubscriptionOptions) SetName(name string) *CreateSubscriptionOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetType : Allow user to set Type +func (options *CreateSubscriptionOptions) SetType(typeVar string) *CreateSubscriptionOptions { + options.Type = core.StringPtr(typeVar) + return options +} + +// SetMetadata : Allow user to set Metadata +func (options *CreateSubscriptionOptions) SetMetadata(metadata interface{}) *CreateSubscriptionOptions { + options.Metadata = metadata + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateSubscriptionOptions) SetHeaders(param map[string]string) *CreateSubscriptionOptions { + options.Headers = param + return options +} + +// DeleteEndpointOptions : The DeleteEndpoint options. +type DeleteEndpointOptions struct { + // Endpoint id. + ID *string `json:"id" validate:"required"` + + // Service Instance CRN. + ServiceInstanceCrn *string `json:"service_instance_crn" validate:"required"` + + // User IAM token. + Authorization *string `json:"authorization" validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteEndpointOptions : Instantiate DeleteEndpointOptions +func (*ApiGatewayControllerApiV1) NewDeleteEndpointOptions(id string, serviceInstanceCrn string, authorization string) *DeleteEndpointOptions { + return &DeleteEndpointOptions{ + ID: core.StringPtr(id), + ServiceInstanceCrn: core.StringPtr(serviceInstanceCrn), + Authorization: core.StringPtr(authorization), + } +} + +// SetID : Allow user to set ID +func (options *DeleteEndpointOptions) SetID(id string) *DeleteEndpointOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetServiceInstanceCrn : Allow user to set ServiceInstanceCrn +func (options *DeleteEndpointOptions) SetServiceInstanceCrn(serviceInstanceCrn string) *DeleteEndpointOptions { + options.ServiceInstanceCrn = core.StringPtr(serviceInstanceCrn) + return options +} + +// SetAuthorization : Allow user to set Authorization +func (options *DeleteEndpointOptions) SetAuthorization(authorization string) *DeleteEndpointOptions { + options.Authorization = core.StringPtr(authorization) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteEndpointOptions) SetHeaders(param map[string]string) *DeleteEndpointOptions { + options.Headers = param + return options +} + +// DeleteSubscriptionOptions : The DeleteSubscription options. +type DeleteSubscriptionOptions struct { + // Client Id. + ID *string `json:"id" validate:"required"` + + // Artifact Id. + ArtifactID *string `json:"artifact_id" validate:"required"` + + // User bearer token. + Authorization *string `json:"authorization" validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteSubscriptionOptions : Instantiate DeleteSubscriptionOptions +func (*ApiGatewayControllerApiV1) NewDeleteSubscriptionOptions(id string, artifactID string, authorization string) *DeleteSubscriptionOptions { + return &DeleteSubscriptionOptions{ + ID: core.StringPtr(id), + ArtifactID: core.StringPtr(artifactID), + Authorization: core.StringPtr(authorization), + } +} + +// SetID : Allow user to set ID +func (options *DeleteSubscriptionOptions) SetID(id string) *DeleteSubscriptionOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetArtifactID : Allow user to set ArtifactID +func (options *DeleteSubscriptionOptions) SetArtifactID(artifactID string) *DeleteSubscriptionOptions { + options.ArtifactID = core.StringPtr(artifactID) + return options +} + +// SetAuthorization : Allow user to set Authorization +func (options *DeleteSubscriptionOptions) SetAuthorization(authorization string) *DeleteSubscriptionOptions { + options.Authorization = core.StringPtr(authorization) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteSubscriptionOptions) SetHeaders(param map[string]string) *DeleteSubscriptionOptions { + options.Headers = param + return options +} + +// EndpointActionsOptions : The EndpointActions options. +type EndpointActionsOptions struct { + // Endpoint Id. + ID *string `json:"id" validate:"required"` + + // Service Instance CRN. + ServiceInstanceCrn *string `json:"service_instance_crn" validate:"required"` + + // Provider Id. + ProviderID *string `json:"provider_id" validate:"required"` + + // User IAM token. + Authorization *string `json:"authorization" validate:"required"` + + Type *string `json:"type" validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewEndpointActionsOptions : Instantiate EndpointActionsOptions +func (*ApiGatewayControllerApiV1) NewEndpointActionsOptions(id string, serviceInstanceCrn string, providerID string, authorization string, typeVar string) *EndpointActionsOptions { + return &EndpointActionsOptions{ + ID: core.StringPtr(id), + ServiceInstanceCrn: core.StringPtr(serviceInstanceCrn), + ProviderID: core.StringPtr(providerID), + Authorization: core.StringPtr(authorization), + Type: core.StringPtr(typeVar), + } +} + +// SetID : Allow user to set ID +func (options *EndpointActionsOptions) SetID(id string) *EndpointActionsOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetServiceInstanceCrn : Allow user to set ServiceInstanceCrn +func (options *EndpointActionsOptions) SetServiceInstanceCrn(serviceInstanceCrn string) *EndpointActionsOptions { + options.ServiceInstanceCrn = core.StringPtr(serviceInstanceCrn) + return options +} + +// SetProviderID : Allow user to set ProviderID +func (options *EndpointActionsOptions) SetProviderID(providerID string) *EndpointActionsOptions { + options.ProviderID = core.StringPtr(providerID) + return options +} + +// SetAuthorization : Allow user to set Authorization +func (options *EndpointActionsOptions) SetAuthorization(authorization string) *EndpointActionsOptions { + options.Authorization = core.StringPtr(authorization) + return options +} + +// SetType : Allow user to set Type +func (options *EndpointActionsOptions) SetType(typeVar string) *EndpointActionsOptions { + options.Type = core.StringPtr(typeVar) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *EndpointActionsOptions) SetHeaders(param map[string]string) *EndpointActionsOptions { + options.Headers = param + return options +} + +// EndpointSummaryOptions : The EndpointSummary options. +type EndpointSummaryOptions struct { + // User account ID. + AccountID *string `json:"account_id" validate:"required"` + + // User IAM token. + Authorization *string `json:"authorization" validate:"required"` + + // Service Instance CRN. + ServiceInstanceCrn *string `json:"service_instance_crn,omitempty"` + + // Return OpenAPI doc with list results. Possible values are ['provider', 'consumer']. Defaults to 'provider'. + Swagger *string `json:"swagger,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewEndpointSummaryOptions : Instantiate EndpointSummaryOptions +func (*ApiGatewayControllerApiV1) NewEndpointSummaryOptions(accountID string, authorization string) *EndpointSummaryOptions { + return &EndpointSummaryOptions{ + AccountID: core.StringPtr(accountID), + Authorization: core.StringPtr(authorization), + } +} + +// SetAccountID : Allow user to set AccountID +func (options *EndpointSummaryOptions) SetAccountID(accountID string) *EndpointSummaryOptions { + options.AccountID = core.StringPtr(accountID) + return options +} + +// SetAuthorization : Allow user to set Authorization +func (options *EndpointSummaryOptions) SetAuthorization(authorization string) *EndpointSummaryOptions { + options.Authorization = core.StringPtr(authorization) + return options +} + +// SetServiceInstanceCrn : Allow user to set ServiceInstanceCrn +func (options *EndpointSummaryOptions) SetServiceInstanceCrn(serviceInstanceCrn string) *EndpointSummaryOptions { + options.ServiceInstanceCrn = core.StringPtr(serviceInstanceCrn) + return options +} + +// SetSwagger : Allow user to set Swagger +func (options *EndpointSummaryOptions) SetSwagger(swagger string) *EndpointSummaryOptions { + options.Swagger = core.StringPtr(swagger) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *EndpointSummaryOptions) SetHeaders(param map[string]string) *EndpointSummaryOptions { + options.Headers = param + return options +} + +// GetAllEndpointsOptions : The GetAllEndpoints options. +type GetAllEndpointsOptions struct { + // The API Gateway service instance CRN. + ServiceInstanceCrn *string `json:"service_instance_crn" validate:"required"` + + // Your IBM Cloud Identity and Access Management (IAM) token. To retrieve your IAM token, run `ibmcloud iam + // oauth-tokens`. + Authorization *string `json:"authorization" validate:"required"` + + // Provider Id. + ProviderID *string `json:"provider_id,omitempty"` + + // Only return shared endpoints. + Shared *bool `json:"shared,omitempty"` + + // Only return managed endpoints. + Managed *bool `json:"managed,omitempty"` + + // Return OpenAPI doc with list results. Possible values are ['provider', 'consumer']. Defaults to 'provider'. + Swagger *string `json:"swagger,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetAllEndpointsOptions : Instantiate GetAllEndpointsOptions +func (*ApiGatewayControllerApiV1) NewGetAllEndpointsOptions(serviceInstanceCrn string, authorization string) *GetAllEndpointsOptions { + return &GetAllEndpointsOptions{ + ServiceInstanceCrn: core.StringPtr(serviceInstanceCrn), + Authorization: core.StringPtr(authorization), + } +} + +// SetServiceInstanceCrn : Allow user to set ServiceInstanceCrn +func (options *GetAllEndpointsOptions) SetServiceInstanceCrn(serviceInstanceCrn string) *GetAllEndpointsOptions { + options.ServiceInstanceCrn = core.StringPtr(serviceInstanceCrn) + return options +} + +// SetAuthorization : Allow user to set Authorization +func (options *GetAllEndpointsOptions) SetAuthorization(authorization string) *GetAllEndpointsOptions { + options.Authorization = core.StringPtr(authorization) + return options +} + +// SetProviderID : Allow user to set ProviderID +func (options *GetAllEndpointsOptions) SetProviderID(providerID string) *GetAllEndpointsOptions { + options.ProviderID = core.StringPtr(providerID) + return options +} + +// SetShared : Allow user to set Shared +func (options *GetAllEndpointsOptions) SetShared(shared bool) *GetAllEndpointsOptions { + options.Shared = core.BoolPtr(shared) + return options +} + +// SetManaged : Allow user to set Managed +func (options *GetAllEndpointsOptions) SetManaged(managed bool) *GetAllEndpointsOptions { + options.Managed = core.BoolPtr(managed) + return options +} + +// SetSwagger : Allow user to set Swagger +func (options *GetAllEndpointsOptions) SetSwagger(swagger string) *GetAllEndpointsOptions { + options.Swagger = core.StringPtr(swagger) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetAllEndpointsOptions) SetHeaders(param map[string]string) *GetAllEndpointsOptions { + options.Headers = param + return options +} + +// GetAllSubscriptionsOptions : The GetAllSubscriptions options. +type GetAllSubscriptionsOptions struct { + // Artifact Id. + ArtifactID *string `json:"artifact_id" validate:"required"` + + // User bearer token. + Authorization *string `json:"authorization" validate:"required"` + + // Subscription type. + Type *string `json:"type,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetAllSubscriptionsOptions : Instantiate GetAllSubscriptionsOptions +func (*ApiGatewayControllerApiV1) NewGetAllSubscriptionsOptions(artifactID string, authorization string) *GetAllSubscriptionsOptions { + return &GetAllSubscriptionsOptions{ + ArtifactID: core.StringPtr(artifactID), + Authorization: core.StringPtr(authorization), + } +} + +// SetArtifactID : Allow user to set ArtifactID +func (options *GetAllSubscriptionsOptions) SetArtifactID(artifactID string) *GetAllSubscriptionsOptions { + options.ArtifactID = core.StringPtr(artifactID) + return options +} + +// SetAuthorization : Allow user to set Authorization +func (options *GetAllSubscriptionsOptions) SetAuthorization(authorization string) *GetAllSubscriptionsOptions { + options.Authorization = core.StringPtr(authorization) + return options +} + +// SetType : Allow user to set Type +func (options *GetAllSubscriptionsOptions) SetType(typeVar string) *GetAllSubscriptionsOptions { + options.Type = core.StringPtr(typeVar) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetAllSubscriptionsOptions) SetHeaders(param map[string]string) *GetAllSubscriptionsOptions { + options.Headers = param + return options +} + +// GetEndpointOptions : The GetEndpoint options. +type GetEndpointOptions struct { + // Endpoint Id. + ID *string `json:"id" validate:"required"` + + // Service Instance CRN. + ServiceInstanceCrn *string `json:"service_instance_crn" validate:"required"` + + // User IAM token. + Authorization *string `json:"authorization" validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetEndpointOptions : Instantiate GetEndpointOptions +func (*ApiGatewayControllerApiV1) NewGetEndpointOptions(id string, serviceInstanceCrn string, authorization string) *GetEndpointOptions { + return &GetEndpointOptions{ + ID: core.StringPtr(id), + ServiceInstanceCrn: core.StringPtr(serviceInstanceCrn), + Authorization: core.StringPtr(authorization), + } +} + +// SetID : Allow user to set ID +func (options *GetEndpointOptions) SetID(id string) *GetEndpointOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetServiceInstanceCrn : Allow user to set ServiceInstanceCrn +func (options *GetEndpointOptions) SetServiceInstanceCrn(serviceInstanceCrn string) *GetEndpointOptions { + options.ServiceInstanceCrn = core.StringPtr(serviceInstanceCrn) + return options +} + +// SetAuthorization : Allow user to set Authorization +func (options *GetEndpointOptions) SetAuthorization(authorization string) *GetEndpointOptions { + options.Authorization = core.StringPtr(authorization) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetEndpointOptions) SetHeaders(param map[string]string) *GetEndpointOptions { + options.Headers = param + return options +} + +// GetEndpointSwaggerOptions : The GetEndpointSwagger options. +type GetEndpointSwaggerOptions struct { + // Endpoint Id. + ID *string `json:"id" validate:"required"` + + // Service Instance CRN. + ServiceInstanceCrn *string `json:"service_instance_crn" validate:"required"` + + // User IAM token. + Authorization *string `json:"authorization" validate:"required"` + + // Type of swagger to retrieve. + Type *string `json:"type,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the GetEndpointSwaggerOptions.Type property. +// Type of swagger to retrieve. +const ( + GetEndpointSwaggerOptions_Type_JSON = "json" + GetEndpointSwaggerOptions_Type_Yaml = "yaml" +) + +// NewGetEndpointSwaggerOptions : Instantiate GetEndpointSwaggerOptions +func (*ApiGatewayControllerApiV1) NewGetEndpointSwaggerOptions(id string, serviceInstanceCrn string, authorization string) *GetEndpointSwaggerOptions { + return &GetEndpointSwaggerOptions{ + ID: core.StringPtr(id), + ServiceInstanceCrn: core.StringPtr(serviceInstanceCrn), + Authorization: core.StringPtr(authorization), + } +} + +// SetID : Allow user to set ID +func (options *GetEndpointSwaggerOptions) SetID(id string) *GetEndpointSwaggerOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetServiceInstanceCrn : Allow user to set ServiceInstanceCrn +func (options *GetEndpointSwaggerOptions) SetServiceInstanceCrn(serviceInstanceCrn string) *GetEndpointSwaggerOptions { + options.ServiceInstanceCrn = core.StringPtr(serviceInstanceCrn) + return options +} + +// SetAuthorization : Allow user to set Authorization +func (options *GetEndpointSwaggerOptions) SetAuthorization(authorization string) *GetEndpointSwaggerOptions { + options.Authorization = core.StringPtr(authorization) + return options +} + +// SetType : Allow user to set Type +func (options *GetEndpointSwaggerOptions) SetType(typeVar string) *GetEndpointSwaggerOptions { + options.Type = core.StringPtr(typeVar) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetEndpointSwaggerOptions) SetHeaders(param map[string]string) *GetEndpointSwaggerOptions { + options.Headers = param + return options +} + +// GetSubscriptionArtifactOptions : The GetSubscriptionArtifact options. +type GetSubscriptionArtifactOptions struct { + // Artifact Id. + ArtifactID *string `json:"artifact_id" validate:"required"` + + // Client Id. + ClientID *string `json:"client_id,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetSubscriptionArtifactOptions : Instantiate GetSubscriptionArtifactOptions +func (*ApiGatewayControllerApiV1) NewGetSubscriptionArtifactOptions(artifactID string) *GetSubscriptionArtifactOptions { + return &GetSubscriptionArtifactOptions{ + ArtifactID: core.StringPtr(artifactID), + } +} + +// SetArtifactID : Allow user to set ArtifactID +func (options *GetSubscriptionArtifactOptions) SetArtifactID(artifactID string) *GetSubscriptionArtifactOptions { + options.ArtifactID = core.StringPtr(artifactID) + return options +} + +// SetClientID : Allow user to set ClientID +func (options *GetSubscriptionArtifactOptions) SetClientID(clientID string) *GetSubscriptionArtifactOptions { + options.ClientID = core.StringPtr(clientID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetSubscriptionArtifactOptions) SetHeaders(param map[string]string) *GetSubscriptionArtifactOptions { + options.Headers = param + return options +} + +// GetSubscriptionOptions : The GetSubscription options. +type GetSubscriptionOptions struct { + // Client Id. + ID *string `json:"id" validate:"required"` + + // Artifact Id. + ArtifactID *string `json:"artifact_id" validate:"required"` + + // User bearer token. + Authorization *string `json:"authorization" validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetSubscriptionOptions : Instantiate GetSubscriptionOptions +func (*ApiGatewayControllerApiV1) NewGetSubscriptionOptions(id string, artifactID string, authorization string) *GetSubscriptionOptions { + return &GetSubscriptionOptions{ + ID: core.StringPtr(id), + ArtifactID: core.StringPtr(artifactID), + Authorization: core.StringPtr(authorization), + } +} + +// SetID : Allow user to set ID +func (options *GetSubscriptionOptions) SetID(id string) *GetSubscriptionOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetArtifactID : Allow user to set ArtifactID +func (options *GetSubscriptionOptions) SetArtifactID(artifactID string) *GetSubscriptionOptions { + options.ArtifactID = core.StringPtr(artifactID) + return options +} + +// SetAuthorization : Allow user to set Authorization +func (options *GetSubscriptionOptions) SetAuthorization(authorization string) *GetSubscriptionOptions { + options.Authorization = core.StringPtr(authorization) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetSubscriptionOptions) SetHeaders(param map[string]string) *GetSubscriptionOptions { + options.Headers = param + return options +} + +// UpdateEndpointOptions : The UpdateEndpoint options. +type UpdateEndpointOptions struct { + // Endpoint Id. + ID *string `json:"id" validate:"required"` + + // Service Instance CRN. + ServiceInstanceCrn *string `json:"service_instance_crn" validate:"required"` + + // User IAM token. + Authorization *string `json:"authorization" validate:"required"` + + // The endpoint ID. + NewArtifactID *string `json:"new_artifact_id" validate:"required"` + + // The API Gateway service instance CRN. + NewParentCrn *string `json:"new_parent_crn" validate:"required"` + + // The API Gateway service instance CRN. + NewServiceInstanceCrn *string `json:"new_service_instance_crn" validate:"required"` + + // The endpoint's name. + NewName *string `json:"new_name,omitempty"` + + // Invokable endpoint routes. + NewRoutes []string `json:"new_routes,omitempty"` + + // Is the endpoint managed?. + NewManaged *bool `json:"new_managed,omitempty"` + + NewMetadata interface{} `json:"new_metadata,omitempty"` + + // The OpenAPI document. + NewOpenApiDoc interface{} `json:"new_open_api_doc,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdateEndpointOptions : Instantiate UpdateEndpointOptions +func (*ApiGatewayControllerApiV1) NewUpdateEndpointOptions(id string, serviceInstanceCrn string, authorization string, newArtifactID string, newParentCrn string, newServiceInstanceCrn string) *UpdateEndpointOptions { + return &UpdateEndpointOptions{ + ID: core.StringPtr(id), + ServiceInstanceCrn: core.StringPtr(serviceInstanceCrn), + Authorization: core.StringPtr(authorization), + NewArtifactID: core.StringPtr(newArtifactID), + NewParentCrn: core.StringPtr(newParentCrn), + NewServiceInstanceCrn: core.StringPtr(newServiceInstanceCrn), + } +} + +// SetID : Allow user to set ID +func (options *UpdateEndpointOptions) SetID(id string) *UpdateEndpointOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetServiceInstanceCrn : Allow user to set ServiceInstanceCrn +func (options *UpdateEndpointOptions) SetServiceInstanceCrn(serviceInstanceCrn string) *UpdateEndpointOptions { + options.ServiceInstanceCrn = core.StringPtr(serviceInstanceCrn) + return options +} + +// SetAuthorization : Allow user to set Authorization +func (options *UpdateEndpointOptions) SetAuthorization(authorization string) *UpdateEndpointOptions { + options.Authorization = core.StringPtr(authorization) + return options +} + +// SetNewArtifactID : Allow user to set NewArtifactID +func (options *UpdateEndpointOptions) SetNewArtifactID(newArtifactID string) *UpdateEndpointOptions { + options.NewArtifactID = core.StringPtr(newArtifactID) + return options +} + +// SetNewParentCrn : Allow user to set NewParentCrn +func (options *UpdateEndpointOptions) SetNewParentCrn(newParentCrn string) *UpdateEndpointOptions { + options.NewParentCrn = core.StringPtr(newParentCrn) + return options +} + +// SetNewServiceInstanceCrn : Allow user to set NewServiceInstanceCrn +func (options *UpdateEndpointOptions) SetNewServiceInstanceCrn(newServiceInstanceCrn string) *UpdateEndpointOptions { + options.NewServiceInstanceCrn = core.StringPtr(newServiceInstanceCrn) + return options +} + +// SetNewName : Allow user to set NewName +func (options *UpdateEndpointOptions) SetNewName(newName string) *UpdateEndpointOptions { + options.NewName = core.StringPtr(newName) + return options +} + +// SetNewRoutes : Allow user to set NewRoutes +func (options *UpdateEndpointOptions) SetNewRoutes(newRoutes []string) *UpdateEndpointOptions { + options.NewRoutes = newRoutes + return options +} + +// SetNewManaged : Allow user to set NewManaged +func (options *UpdateEndpointOptions) SetNewManaged(newManaged bool) *UpdateEndpointOptions { + options.NewManaged = core.BoolPtr(newManaged) + return options +} + +// SetNewMetadata : Allow user to set NewMetadata +func (options *UpdateEndpointOptions) SetNewMetadata(newMetadata interface{}) *UpdateEndpointOptions { + options.NewMetadata = newMetadata + return options +} + +// SetNewOpenApiDoc : Allow user to set NewOpenApiDoc +func (options *UpdateEndpointOptions) SetNewOpenApiDoc(newOpenApiDoc interface{}) *UpdateEndpointOptions { + options.NewOpenApiDoc = newOpenApiDoc + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateEndpointOptions) SetHeaders(param map[string]string) *UpdateEndpointOptions { + options.Headers = param + return options +} + +// UpdateSubscriptionOptions : The UpdateSubscription options. +type UpdateSubscriptionOptions struct { + // Client Id. + ID *string `json:"id" validate:"required"` + + // Artifact Id. + ArtifactID *string `json:"artifact_id" validate:"required"` + + // User bearer token. + Authorization *string `json:"authorization" validate:"required"` + + NewClientID *string `json:"new_client_id,omitempty"` + + NewClientSecret *string `json:"new_client_secret,omitempty"` + + NewArtifactID *string `json:"new_artifact_id,omitempty"` + + NewAccountID *string `json:"new_account_id,omitempty"` + + NewName *string `json:"new_name,omitempty"` + + NewType *string `json:"new_type,omitempty"` + + NewMetadata interface{} `json:"new_metadata,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdateSubscriptionOptions : Instantiate UpdateSubscriptionOptions +func (*ApiGatewayControllerApiV1) NewUpdateSubscriptionOptions(id string, artifactID string, authorization string) *UpdateSubscriptionOptions { + return &UpdateSubscriptionOptions{ + ID: core.StringPtr(id), + ArtifactID: core.StringPtr(artifactID), + Authorization: core.StringPtr(authorization), + } +} + +// SetID : Allow user to set ID +func (options *UpdateSubscriptionOptions) SetID(id string) *UpdateSubscriptionOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetArtifactID : Allow user to set ArtifactID +func (options *UpdateSubscriptionOptions) SetArtifactID(artifactID string) *UpdateSubscriptionOptions { + options.ArtifactID = core.StringPtr(artifactID) + return options +} + +// SetAuthorization : Allow user to set Authorization +func (options *UpdateSubscriptionOptions) SetAuthorization(authorization string) *UpdateSubscriptionOptions { + options.Authorization = core.StringPtr(authorization) + return options +} + +// SetNewClientID : Allow user to set NewClientID +func (options *UpdateSubscriptionOptions) SetNewClientID(newClientID string) *UpdateSubscriptionOptions { + options.NewClientID = core.StringPtr(newClientID) + return options +} + +// SetNewClientSecret : Allow user to set NewClientSecret +func (options *UpdateSubscriptionOptions) SetNewClientSecret(newClientSecret string) *UpdateSubscriptionOptions { + options.NewClientSecret = core.StringPtr(newClientSecret) + return options +} + +// SetNewArtifactID : Allow user to set NewArtifactID +func (options *UpdateSubscriptionOptions) SetNewArtifactID(newArtifactID string) *UpdateSubscriptionOptions { + options.NewArtifactID = core.StringPtr(newArtifactID) + return options +} + +// SetNewAccountID : Allow user to set NewAccountID +func (options *UpdateSubscriptionOptions) SetNewAccountID(newAccountID string) *UpdateSubscriptionOptions { + options.NewAccountID = core.StringPtr(newAccountID) + return options +} + +// SetNewName : Allow user to set NewName +func (options *UpdateSubscriptionOptions) SetNewName(newName string) *UpdateSubscriptionOptions { + options.NewName = core.StringPtr(newName) + return options +} + +// SetNewType : Allow user to set NewType +func (options *UpdateSubscriptionOptions) SetNewType(newType string) *UpdateSubscriptionOptions { + options.NewType = core.StringPtr(newType) + return options +} + +// SetNewMetadata : Allow user to set NewMetadata +func (options *UpdateSubscriptionOptions) SetNewMetadata(newMetadata interface{}) *UpdateSubscriptionOptions { + options.NewMetadata = newMetadata + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateSubscriptionOptions) SetHeaders(param map[string]string) *UpdateSubscriptionOptions { + options.Headers = param + return options +} + +// InlineResponse200 : Subscription artifact requested. +type InlineResponse200 struct { + OpenApiDoc interface{} `json:"open_api_doc,omitempty"` + + ManagedURL *string `json:"managed_url,omitempty"` +} + +// UnmarshalInlineResponse200 constructs an instance of InlineResponse200 from the specified map. +func UnmarshalInlineResponse200(m map[string]interface{}) (result *InlineResponse200, err error) { + obj := new(InlineResponse200) + obj.OpenApiDoc, err = core.UnmarshalObject(m, "open_api_doc") + if err != nil { + return + } + obj.ManagedURL, err = core.UnmarshalString(m, "managed_url") + if err != nil { + return + } + result = obj + return +} + +// UnmarshalInlineResponse200Slice unmarshals a slice of InlineResponse200 instances from the specified list of maps. +func UnmarshalInlineResponse200Slice(s []interface{}) (slice []InlineResponse200, err error) { + for _, v := range s { + objMap, ok := v.(map[string]interface{}) + if !ok { + err = fmt.Errorf("slice element should be a map containing an instance of 'InlineResponse200'") + return + } + obj, e := UnmarshalInlineResponse200(objMap) + if e != nil { + err = e + return + } + slice = append(slice, *obj) + } + return +} + +// UnmarshalInlineResponse200AsProperty unmarshals an instance of InlineResponse200 that is stored as a property +// within the specified map. +func UnmarshalInlineResponse200AsProperty(m map[string]interface{}, propertyName string) (result *InlineResponse200, err error) { + v, foundIt := m[propertyName] + if foundIt { + objMap, ok := v.(map[string]interface{}) + if !ok { + err = fmt.Errorf("map property '%s' should be a map containing an instance of 'InlineResponse200'", propertyName) + return + } + result, err = UnmarshalInlineResponse200(objMap) + } + return +} + +// UnmarshalInlineResponse200SliceAsProperty unmarshals a slice of InlineResponse200 instances that are stored as a property +// within the specified map. +func UnmarshalInlineResponse200SliceAsProperty(m map[string]interface{}, propertyName string) (slice []InlineResponse200, err error) { + v, foundIt := m[propertyName] + if foundIt { + vSlice, ok := v.([]interface{}) + if !ok { + err = fmt.Errorf("map property '%s' should be a slice of maps, each containing an instance of 'InlineResponse200'", propertyName) + return + } + slice, err = UnmarshalInlineResponse200Slice(vSlice) + } + return +} + +// V2DiscoveryConfig : V2DiscoveryConfig struct +type V2DiscoveryConfig struct { + Headers interface{} `json:"headers,omitempty"` + + BridgeURL *string `json:"bridge_url" validate:"required"` +} + +// UnmarshalV2DiscoveryConfig constructs an instance of V2DiscoveryConfig from the specified map. +func UnmarshalV2DiscoveryConfig(m map[string]interface{}) (result *V2DiscoveryConfig, err error) { + obj := new(V2DiscoveryConfig) + obj.Headers, err = core.UnmarshalObject(m, "headers") + if err != nil { + return + } + obj.BridgeURL, err = core.UnmarshalString(m, "bridge_url") + if err != nil { + return + } + result = obj + return +} + +// UnmarshalV2DiscoveryConfigSlice unmarshals a slice of V2DiscoveryConfig instances from the specified list of maps. +func UnmarshalV2DiscoveryConfigSlice(s []interface{}) (slice []V2DiscoveryConfig, err error) { + for _, v := range s { + objMap, ok := v.(map[string]interface{}) + if !ok { + err = fmt.Errorf("slice element should be a map containing an instance of 'V2DiscoveryConfig'") + return + } + obj, e := UnmarshalV2DiscoveryConfig(objMap) + if e != nil { + err = e + return + } + slice = append(slice, *obj) + } + return +} + +// UnmarshalV2DiscoveryConfigAsProperty unmarshals an instance of V2DiscoveryConfig that is stored as a property +// within the specified map. +func UnmarshalV2DiscoveryConfigAsProperty(m map[string]interface{}, propertyName string) (result *V2DiscoveryConfig, err error) { + v, foundIt := m[propertyName] + if foundIt { + objMap, ok := v.(map[string]interface{}) + if !ok { + err = fmt.Errorf("map property '%s' should be a map containing an instance of 'V2DiscoveryConfig'", propertyName) + return + } + result, err = UnmarshalV2DiscoveryConfig(objMap) + } + return +} + +// UnmarshalV2DiscoveryConfigSliceAsProperty unmarshals a slice of V2DiscoveryConfig instances that are stored as a property +// within the specified map. +func UnmarshalV2DiscoveryConfigSliceAsProperty(m map[string]interface{}, propertyName string) (slice []V2DiscoveryConfig, err error) { + v, foundIt := m[propertyName] + if foundIt { + vSlice, ok := v.([]interface{}) + if !ok { + err = fmt.Errorf("map property '%s' should be a slice of maps, each containing an instance of 'V2DiscoveryConfig'", propertyName) + return + } + slice, err = UnmarshalV2DiscoveryConfigSlice(vSlice) + } + return +} + +// V2Endpoint : V2Endpoint struct +type V2Endpoint struct { + // The endpoint ID. + ArtifactID *string `json:"artifact_id" validate:"required"` + + // The endpoint CRN. + Crn *string `json:"crn" validate:"required"` + + // The API Gateway service instance CRN. + ParentCrn *string `json:"parent_crn" validate:"required"` + + // The API Gateway service instance CRN. + ServiceInstanceCrn *string `json:"service_instance_crn,omitempty"` + + // The account where the API Gateway service instance was provisioned. + AccountID *string `json:"account_id,omitempty"` + + // Endpoint metadata. + Metadata interface{} `json:"metadata,omitempty"` + + // The provider type of this endpoint. + ProviderID *string `json:"provider_id,omitempty"` + + // THe endpoint's name. + Name *string `json:"name,omitempty"` + + // Invokable endpoint routes. + Routes []string `json:"routes,omitempty"` + + // Invoke your endpoint with this URL. + ManagedURL *string `json:"managed_url,omitempty"` + + // Invoke your endpoint with this alias URL. + AliasURL *string `json:"alias_url,omitempty"` + + // Is your endpoint shared?. + Shared *bool `json:"shared,omitempty"` + + // Is your endpoint managed by the API Gateway service instance?. + Managed *bool `json:"managed,omitempty"` + + // Policies enforced on the endpoint. + Policies []map[string]interface{} `json:"policies,omitempty"` + + // THe OpenAPI doc representing the endpoint. + OpenApiDoc map[string]interface{} `json:"open_api_doc,omitempty"` + + // The base path of the endpoint. + BasePath *string `json:"base_path,omitempty"` +} + +// UnmarshalV2Endpoint constructs an instance of V2Endpoint from the specified map. +func UnmarshalV2Endpoint(m map[string]interface{}) (result *V2Endpoint, err error) { + obj := new(V2Endpoint) + obj.ArtifactID, err = core.UnmarshalString(m, "artifact_id") + if err != nil { + return + } + obj.Crn, err = core.UnmarshalString(m, "crn") + if err != nil { + return + } + obj.ParentCrn, err = core.UnmarshalString(m, "parent_crn") + if err != nil { + return + } + obj.ServiceInstanceCrn, err = core.UnmarshalString(m, "service_instance_crn") + if err != nil { + return + } + obj.AccountID, err = core.UnmarshalString(m, "account_id") + if err != nil { + return + } + obj.Metadata, err = core.UnmarshalObject(m, "metadata") + if err != nil { + return + } + obj.ProviderID, err = core.UnmarshalString(m, "provider_id") + if err != nil { + return + } + obj.Name, err = core.UnmarshalString(m, "name") + if err != nil { + return + } + obj.Routes, err = core.UnmarshalStringSlice(m, "routes") + if err != nil { + return + } + obj.ManagedURL, err = core.UnmarshalString(m, "managed_url") + if err != nil { + return + } + obj.AliasURL, err = core.UnmarshalString(m, "alias_url") + if err != nil { + return + } + obj.Shared, err = core.UnmarshalBool(m, "shared") + if err != nil { + return + } + obj.Managed, err = core.UnmarshalBool(m, "managed") + if err != nil { + return + } + obj.Policies, err = core.UnmarshalObjectSlice(m, "policies") + if err != nil { + return + } + obj.OpenApiDoc, err = core.UnmarshalObject(m, "open_api_doc") + if err != nil { + return + } + obj.BasePath, err = core.UnmarshalString(m, "base_path") + if err != nil { + return + } + result = obj + return +} + +// UnmarshalV2EndpointSlice unmarshals a slice of V2Endpoint instances from the specified list of maps. +func UnmarshalV2EndpointSlice(s []interface{}) (slice []V2Endpoint, err error) { + for _, v := range s { + objMap, ok := v.(map[string]interface{}) + if !ok { + err = fmt.Errorf("slice element should be a map containing an instance of 'V2Endpoint'") + return + } + obj, e := UnmarshalV2Endpoint(objMap) + if e != nil { + err = e + return + } + slice = append(slice, *obj) + } + return +} + +// UnmarshalV2EndpointAsProperty unmarshals an instance of V2Endpoint that is stored as a property +// within the specified map. +func UnmarshalV2EndpointAsProperty(m map[string]interface{}, propertyName string) (result *V2Endpoint, err error) { + v, foundIt := m[propertyName] + if foundIt { + objMap, ok := v.(map[string]interface{}) + if !ok { + err = fmt.Errorf("map property '%s' should be a map containing an instance of 'V2Endpoint'", propertyName) + return + } + result, err = UnmarshalV2Endpoint(objMap) + } + return +} + +// UnmarshalV2EndpointSliceAsProperty unmarshals a slice of V2Endpoint instances that are stored as a property +// within the specified map. +func UnmarshalV2EndpointSliceAsProperty(m map[string]interface{}, propertyName string) (slice []V2Endpoint, err error) { + v, foundIt := m[propertyName] + if foundIt { + vSlice, ok := v.([]interface{}) + if !ok { + err = fmt.Errorf("map property '%s' should be a slice of maps, each containing an instance of 'V2Endpoint'", propertyName) + return + } + slice, err = UnmarshalV2EndpointSlice(vSlice) + } + return +} + +// V2EndpointSummary : V2EndpointSummary struct +type V2EndpointSummary struct { + ID *string `json:"id,omitempty"` + + DisplayName *string `json:"display_name,omitempty"` + + Metadata interface{} `json:"metadata,omitempty"` + + Discoverable *bool `json:"discoverable,omitempty"` + + DiscoveryConfig *V2DiscoveryConfig `json:"discovery_config,omitempty"` + + Endpoints []V2Endpoint `json:"endpoints,omitempty"` +} + +// UnmarshalV2EndpointSummary constructs an instance of V2EndpointSummary from the specified map. +func UnmarshalV2EndpointSummary(m map[string]interface{}) (result *V2EndpointSummary, err error) { + obj := new(V2EndpointSummary) + obj.ID, err = core.UnmarshalString(m, "id") + if err != nil { + return + } + obj.DisplayName, err = core.UnmarshalString(m, "display_name") + if err != nil { + return + } + obj.Metadata, err = core.UnmarshalObject(m, "metadata") + if err != nil { + return + } + obj.Discoverable, err = core.UnmarshalBool(m, "discoverable") + if err != nil { + return + } + obj.DiscoveryConfig, err = UnmarshalV2DiscoveryConfigAsProperty(m, "discovery_config") + if err != nil { + return + } + obj.Endpoints, err = UnmarshalV2EndpointSliceAsProperty(m, "endpoints") + if err != nil { + return + } + result = obj + return +} + +// UnmarshalV2EndpointSummarySlice unmarshals a slice of V2EndpointSummary instances from the specified list of maps. +func UnmarshalV2EndpointSummarySlice(s []interface{}) (slice []V2EndpointSummary, err error) { + for _, v := range s { + objMap, ok := v.(map[string]interface{}) + if !ok { + err = fmt.Errorf("slice element should be a map containing an instance of 'V2EndpointSummary'") + return + } + obj, e := UnmarshalV2EndpointSummary(objMap) + if e != nil { + err = e + return + } + slice = append(slice, *obj) + } + return +} + +// UnmarshalV2EndpointSummaryAsProperty unmarshals an instance of V2EndpointSummary that is stored as a property +// within the specified map. +func UnmarshalV2EndpointSummaryAsProperty(m map[string]interface{}, propertyName string) (result *V2EndpointSummary, err error) { + v, foundIt := m[propertyName] + if foundIt { + objMap, ok := v.(map[string]interface{}) + if !ok { + err = fmt.Errorf("map property '%s' should be a map containing an instance of 'V2EndpointSummary'", propertyName) + return + } + result, err = UnmarshalV2EndpointSummary(objMap) + } + return +} + +// UnmarshalV2EndpointSummarySliceAsProperty unmarshals a slice of V2EndpointSummary instances that are stored as a property +// within the specified map. +func UnmarshalV2EndpointSummarySliceAsProperty(m map[string]interface{}, propertyName string) (slice []V2EndpointSummary, err error) { + v, foundIt := m[propertyName] + if foundIt { + vSlice, ok := v.([]interface{}) + if !ok { + err = fmt.Errorf("map property '%s' should be a slice of maps, each containing an instance of 'V2EndpointSummary'", propertyName) + return + } + slice, err = UnmarshalV2EndpointSummarySlice(vSlice) + } + return +} + +// V2Subscription : V2Subscription struct +type V2Subscription struct { + ClientID *string `json:"client_id" ,omitempty` + + SecretProvided *bool `json:"secret_provided,omitempty"` + + ArtifactID *string `json:"artifact_id" validate:"required"` + + AccountID *string `json:"account_id,omitempty"` + + Name *string `json:"name,omitempty"` + + Type *string `json:"type,omitempty"` + + Metadata interface{} `json:"metadata,omitempty"` +} + +// UnmarshalV2Subscription constructs an instance of V2Subscription from the specified map. +func UnmarshalV2Subscription(m map[string]interface{}) (result *V2Subscription, err error) { + obj := new(V2Subscription) + obj.ClientID, err = core.UnmarshalString(m, "client_id") + if err != nil { + return + } + obj.SecretProvided, err = core.UnmarshalBool(m, "secret_provided") + if err != nil { + return + } + obj.ArtifactID, err = core.UnmarshalString(m, "artifact_id") + if err != nil { + return + } + obj.AccountID, err = core.UnmarshalString(m, "account_id") + if err != nil { + return + } + obj.Name, err = core.UnmarshalString(m, "name") + if err != nil { + return + } + obj.Type, err = core.UnmarshalString(m, "type") + if err != nil { + return + } + obj.Metadata, err = core.UnmarshalObject(m, "metadata") + if err != nil { + return + } + result = obj + return +} + +// UnmarshalV2SubscriptionSlice unmarshals a slice of V2Subscription instances from the specified list of maps. +func UnmarshalV2SubscriptionSlice(s []interface{}) (slice []V2Subscription, err error) { + for _, v := range s { + objMap, ok := v.(map[string]interface{}) + if !ok { + err = fmt.Errorf("slice element should be a map containing an instance of 'V2Subscription'") + return + } + obj, e := UnmarshalV2Subscription(objMap) + if e != nil { + err = e + return + } + slice = append(slice, *obj) + } + return +} + +// UnmarshalV2SubscriptionAsProperty unmarshals an instance of V2Subscription that is stored as a property +// within the specified map. +func UnmarshalV2SubscriptionAsProperty(m map[string]interface{}, propertyName string) (result *V2Subscription, err error) { + v, foundIt := m[propertyName] + if foundIt { + objMap, ok := v.(map[string]interface{}) + if !ok { + err = fmt.Errorf("map property '%s' should be a map containing an instance of 'V2Subscription'", propertyName) + return + } + result, err = UnmarshalV2Subscription(objMap) + } + return +} + +// UnmarshalV2SubscriptionSliceAsProperty unmarshals a slice of V2Subscription instances that are stored as a property +// within the specified map. +func UnmarshalV2SubscriptionSliceAsProperty(m map[string]interface{}, propertyName string) (slice []V2Subscription, err error) { + v, foundIt := m[propertyName] + if foundIt { + vSlice, ok := v.([]interface{}) + if !ok { + err = fmt.Errorf("map property '%s' should be a slice of maps, each containing an instance of 'V2Subscription'", propertyName) + return + } + slice, err = UnmarshalV2SubscriptionSlice(vSlice) + } + return +} diff --git a/vendor/github.com/IBM/apigateway-go-sdk/common/headers.go b/vendor/github.com/IBM/apigateway-go-sdk/common/headers.go new file mode 100644 index 00000000000..6099f4c1fb9 --- /dev/null +++ b/vendor/github.com/IBM/apigateway-go-sdk/common/headers.go @@ -0,0 +1,70 @@ +/** + * (C) Copyright IBM Corp. 2019. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package common + +import ( + "fmt" + "runtime" +) + +const ( + // SdkName - Name of this SDK + SdkName = "go-sdk-template" + + userAgentHeaderName = "User-Agent" +) + +// +// GetSdkHeaders - returns the set of SDK-specific headers to be included in an outgoing request. +// +// This function is invoked by generated service methods (i.e. methods which implement the REST API operations +// defined within the API definition). The purpose of this function is to give the SDK implementor the opportunity +// to provide SDK-specific HTTP headers that will be sent with an outgoing REST API request. +// This function is invoked for each invocation of a generated service method, +// so the set of HTTP headers could be request-specific. +// As an optimization, if your SDK will be returning the same set of HTTP headers for each invocation of this +// function, it is recommended that you initialize the returned map just once (perhaps by using +// lazy initialization) and simply return it each time the function is invoked, instead of building it each time +// as in the example below. +// +// Parameters: +// serviceName - the name of the service as defined in the API definition (e.g. "MyService1") +// serviceVersion - the version of the service as defined in the API definition (e.g. "V1") +// operationID - the operationId as defined in the API definition (e.g. getContext) +// +// Returns: +// a Map which contains the set of headers to be included in the REST API request +// +func GetSdkHeaders(serviceName string, serviceVersion string, operationID string) map[string]string { + sdkHeaders := make(map[string]string) + + sdkHeaders[userAgentHeaderName] = getUserAgentInfo() + + return sdkHeaders +} + +var userAgent string = fmt.Sprintf("%s-%s %s", SdkName, Version, getSystemInfo()) + +func getUserAgentInfo() string { + return userAgent +} + +var systemInfo = fmt.Sprintf("(arch=%s; os=%s; go.version=%s)", runtime.GOARCH, runtime.GOOS, runtime.Version()) + +func getSystemInfo() string { + return systemInfo +} diff --git a/vendor/github.com/IBM/apigateway-go-sdk/common/version.go b/vendor/github.com/IBM/apigateway-go-sdk/common/version.go new file mode 100644 index 00000000000..22cd2571232 --- /dev/null +++ b/vendor/github.com/IBM/apigateway-go-sdk/common/version.go @@ -0,0 +1,20 @@ +/** + * (C) Copyright IBM Corp. 2019. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package common + +// Version of the SDK +const Version = "0.0.1" diff --git a/vendor/github.com/IBM/apigateway-go-sdk/go.mod b/vendor/github.com/IBM/apigateway-go-sdk/go.mod new file mode 100644 index 00000000000..42d14d3024f --- /dev/null +++ b/vendor/github.com/IBM/apigateway-go-sdk/go.mod @@ -0,0 +1,21 @@ +module github.com/IBM/apigateway-go-sdk + +go 1.13 + +require ( + github.com/IBM-Cloud/ibm-cloud-cli-sdk v0.6.7 + github.com/IBM/go-sdk-core/v3 v3.2.4 + github.com/davecgh/go-spew v1.1.1 + github.com/fatih/color v1.9.0 // indirect + github.com/fatih/structs v1.1.0 // indirect + github.com/ghodss/yaml v1.0.0 + github.com/go-openapi/strfmt v0.19.4 + github.com/mattn/go-runewidth v0.0.8 // indirect + github.com/nicksnyder/go-i18n v1.10.0 + github.com/onsi/ginkgo v1.12.0 + github.com/onsi/gomega v1.9.0 + github.com/pelletier/go-toml v1.6.0 // indirect + github.com/stretchr/testify v1.4.0 + github.com/urfave/cli v1.22.2 + golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d // indirect +) diff --git a/vendor/github.com/IBM/apigateway-go-sdk/go.sum b/vendor/github.com/IBM/apigateway-go-sdk/go.sum new file mode 100644 index 00000000000..f3c171bf832 --- /dev/null +++ b/vendor/github.com/IBM/apigateway-go-sdk/go.sum @@ -0,0 +1,124 @@ +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/IBM-Cloud/ibm-cloud-cli-sdk v0.6.7 h1:eHgfQl6IeSmzWUyiSi13CvoFYsovoyqWlpHX0pa9J54= +github.com/IBM-Cloud/ibm-cloud-cli-sdk v0.6.7/go.mod h1:RiUvKuHKTBmBApDMUQzBL14pQUGKcx/IioKQPIcRQjs= +github.com/IBM/apigateway-go-sdk v0.0.0-20200319165837-991ed0dde8e8 h1:RjmhdfIpRwxMQzyeVrGXEaQ8LKNK+olI5AO+rP2+MYE= +github.com/IBM/go-sdk-core v3.0.0+incompatible h1:1QYh6UklIvEBW3qwVf2xDiFZtKa+PiDzdr9LcaTU4pA= +github.com/IBM/go-sdk-core/v3 v3.2.0 h1:nvn5b2Jy/ivcJViiyPHUsQ4Z98ENMs0cQ28kTX3aCBE= +github.com/IBM/go-sdk-core/v3 v3.2.0/go.mod h1:tk/wYvgcWSqyX3NzVnb9fk2t56erbg/A1oX6vg//7o0= +github.com/IBM/go-sdk-core/v3 v3.2.2 h1:8TKg0qfVhV9OOV9ws66nulTfVuAU4uv/aNtSNPmEI5w= +github.com/IBM/go-sdk-core/v3 v3.2.2/go.mod h1:lk9eOzNbNltPf3CBpcg1Ewkhw4qC3u2QCCKDRsUA2M0= +github.com/IBM/go-sdk-core/v3 v3.2.4 h1:WKYJYYKlZnw1y/gM+Qbf5EQVAL9xaoD54+ooJZz/iBQ= +github.com/IBM/go-sdk-core/v3 v3.2.4/go.mod h1:lk9eOzNbNltPf3CBpcg1Ewkhw4qC3u2QCCKDRsUA2M0= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d h1:U+s90UTSYgptZMwQh2aRr3LuazLJIa+Pg3Kc1ylSYVY= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-openapi/errors v0.19.2 h1:a2kIyV3w+OS3S97zxUndRVD46+FhGOUBDFY7nmu4CsY= +github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/strfmt v0.19.4 h1:eRvaqAhpL0IL6Trh5fDsGnGhiXndzHFuA05w6sXH6/g= +github.com/go-openapi/strfmt v0.19.4/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= +github.com/go-playground/locales v0.12.1 h1:2FITxuFt/xuCNP1Acdhv62OzaCiviiE4kotfhkmOqEc= +github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM= +github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q= +github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= +github.com/go-playground/universal-translator v0.16.0 h1:X++omBR/4cE2MNg91AoC3rmGrCjJ8eAeUP/K/EKx4DM= +github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY= +github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no= +github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= +github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= +github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= +github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.11 h1:FxPOTFNqGkuDUGi3H/qkUbQO4ZiBa2brKq5r0l8TGeM= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= +github.com/mattn/go-runewidth v0.0.8 h1:3tS41NlGYSmhhe/8fhGRzc+z3AYCw1Fe1WAyLuujKs0= +github.com/mattn/go-runewidth v0.0.8/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/nicksnyder/go-i18n v1.10.0 h1:5AzlPKvXBH4qBzmZ09Ua9Gipyruv6uApMcrNZdo96+Q= +github.com/nicksnyder/go-i18n v1.10.0/go.mod h1:HrK7VCrbOvQoUAQ7Vpy7i87N7JZZZ7R2xBGjv0j365Q= +github.com/nicksnyder/go-i18n v2.0.3+incompatible h1:XCCaWsCoy4KlWkhOr+63dkv6oJmitJ573uJqDBAiFiQ= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.0 h1:Iw5WCbBcaAAd0fpRb1c9r5YCylv4XDoCSigm1zLevwU= +github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.9.0 h1:R1uwffexN6Pr340GtYRIdZmAiN4J+iw6WG4wog1DUXg= +github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= +github.com/pelletier/go-toml v1.6.0 h1:aetoXYr0Tv7xRU/V4B4IZJ2QcbtMUFoNb3ORp7TzIK4= +github.com/pelletier/go-toml v1.6.0/go.mod h1:5N711Q9dKgbdkxHL+MEfF31hpT7l0S0s/t2kKREewys= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/urfave/cli v1.22.2 h1:gsqYFH8bb9ekPA12kRo0hfjngWQjkJPlN9R0N78BoUo= +github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +go.mongodb.org/mongo-driver v1.0.3 h1:GKoji1ld3tw2aC+GX1wbr/J2fX13yNacEYoJ8Nhr0yU= +go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d h1:1ZiEyfaQIg3Qh0EoqpwAakHVhecoE5wlSg5GjnafJGw= +golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e h1:N7DeIrjYszNmSW409R3frPPwglRwMkXSBzwVbkOjLLA= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/go-playground/assert.v1 v1.2.1 h1:xoYuJVE7KT85PYWrN730RguIQO0ePzVRfFMXadIrXTM= +gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= +gopkg.in/go-playground/validator.v9 v9.30.0 h1:Wk0Z37oBmKj9/n+tPyBHZmeL19LaCoK3Qq48VwYENss= +gopkg.in/go-playground/validator.v9 v9.30.0/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ= +gopkg.in/go-playground/validator.v9 v9.31.0 h1:bmXmP2RSNtFES+bn4uYuHT7iJFJv7Vj+an+ZQdDaD1M= +gopkg.in/go-playground/validator.v9 v9.31.0/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/IBM/appconfiguration-go-admin-sdk/LICENSE b/vendor/github.com/IBM/appconfiguration-go-admin-sdk/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/vendor/github.com/IBM/appconfiguration-go-admin-sdk/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/IBM/appconfiguration-go-admin-sdk/appconfigurationv1/app_configuration_v1.go b/vendor/github.com/IBM/appconfiguration-go-admin-sdk/appconfigurationv1/app_configuration_v1.go new file mode 100644 index 00000000000..f0079bf8ce1 --- /dev/null +++ b/vendor/github.com/IBM/appconfiguration-go-admin-sdk/appconfigurationv1/app_configuration_v1.go @@ -0,0 +1,5299 @@ +/** + * (C) Copyright IBM Corp. 2021. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * IBM OpenAPI SDK Code Generator Version: 3.22.0-937b9a1c-20201211-223043 + */ + + +// Package appconfigurationv1 : Operations and models for the AppConfigurationV1 service +package appconfigurationv1 + +import ( + "context" + "encoding/json" + "fmt" + "github.com/IBM/appconfiguration-go-admin-sdk/common" + "github.com/IBM/go-sdk-core/v5/core" + "github.com/go-openapi/strfmt" + "net/http" + "reflect" + "strings" + "time" +) + +// AppConfigurationV1 : ReST APIs for App Configuration +// +// Version: 1.0 +// See: https://{DomainName}/docs/app-configuration/ +type AppConfigurationV1 struct { + Service *core.BaseService +} + +// DefaultServiceName is the default key used to find external configuration information. +const DefaultServiceName = "app_configuration" + +// AppConfigurationV1Options : Service options +type AppConfigurationV1Options struct { + ServiceName string + URL string + Authenticator core.Authenticator +} + +// NewAppConfigurationV1UsingExternalConfig : constructs an instance of AppConfigurationV1 with passed in options and external configuration. +func NewAppConfigurationV1UsingExternalConfig(options *AppConfigurationV1Options) (appConfiguration *AppConfigurationV1, err error) { + if options.ServiceName == "" { + options.ServiceName = DefaultServiceName + } + + if options.Authenticator == nil { + options.Authenticator, err = core.GetAuthenticatorFromEnvironment(options.ServiceName) + if err != nil { + return + } + } + + appConfiguration, err = NewAppConfigurationV1(options) + if err != nil { + return + } + + err = appConfiguration.Service.ConfigureService(options.ServiceName) + if err != nil { + return + } + + if options.URL != "" { + err = appConfiguration.Service.SetServiceURL(options.URL) + } + return +} + +// NewAppConfigurationV1 : constructs an instance of AppConfigurationV1 with passed in options. +func NewAppConfigurationV1(options *AppConfigurationV1Options) (service *AppConfigurationV1, err error) { + serviceOptions := &core.ServiceOptions{ + Authenticator: options.Authenticator, + } + + baseService, err := core.NewBaseService(serviceOptions) + if err != nil { + return + } + + if options.URL != "" { + err = baseService.SetServiceURL(options.URL) + if err != nil { + return + } + } + + service = &AppConfigurationV1{ + Service: baseService, + } + + return +} + +// GetServiceURLForRegion returns the service URL to be used for the specified region +func GetServiceURLForRegion(region string) (string, error) { + return "", fmt.Errorf("service does not support regional URLs") +} + +// Clone makes a copy of "appConfiguration" suitable for processing requests. +func (appConfiguration *AppConfigurationV1) Clone() *AppConfigurationV1 { + if core.IsNil(appConfiguration) { + return nil + } + clone := *appConfiguration + clone.Service = appConfiguration.Service.Clone() + return &clone +} + +// SetServiceURL sets the service URL +func (appConfiguration *AppConfigurationV1) SetServiceURL(url string) error { + return appConfiguration.Service.SetServiceURL(url) +} + +// GetServiceURL returns the service URL +func (appConfiguration *AppConfigurationV1) GetServiceURL() string { + return appConfiguration.Service.GetServiceURL() +} + +// SetDefaultHeaders sets HTTP headers to be sent in every request +func (appConfiguration *AppConfigurationV1) SetDefaultHeaders(headers http.Header) { + appConfiguration.Service.SetDefaultHeaders(headers) +} + +// SetEnableGzipCompression sets the service's EnableGzipCompression field +func (appConfiguration *AppConfigurationV1) SetEnableGzipCompression(enableGzip bool) { + appConfiguration.Service.SetEnableGzipCompression(enableGzip) +} + +// GetEnableGzipCompression returns the service's EnableGzipCompression field +func (appConfiguration *AppConfigurationV1) GetEnableGzipCompression() bool { + return appConfiguration.Service.GetEnableGzipCompression() +} + +// EnableRetries enables automatic retries for requests invoked for this service instance. +// If either parameter is specified as 0, then a default value is used instead. +func (appConfiguration *AppConfigurationV1) EnableRetries(maxRetries int, maxRetryInterval time.Duration) { + appConfiguration.Service.EnableRetries(maxRetries, maxRetryInterval) +} + +// DisableRetries disables automatic retries for requests invoked for this service instance. +func (appConfiguration *AppConfigurationV1) DisableRetries() { + appConfiguration.Service.DisableRetries() +} + +// ListEnvironments : Get list of Environments +// List all the environments in the App Configuration service instance. +func (appConfiguration *AppConfigurationV1) ListEnvironments(listEnvironmentsOptions *ListEnvironmentsOptions) (result *EnvironmentList, response *core.DetailedResponse, err error) { + return appConfiguration.ListEnvironmentsWithContext(context.Background(), listEnvironmentsOptions) +} + +// ListEnvironmentsWithContext is an alternate form of the ListEnvironments method which supports a Context parameter +func (appConfiguration *AppConfigurationV1) ListEnvironmentsWithContext(ctx context.Context, listEnvironmentsOptions *ListEnvironmentsOptions) (result *EnvironmentList, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listEnvironmentsOptions, "listEnvironmentsOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = appConfiguration.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(appConfiguration.Service.Options.URL, `/environments`, nil) + if err != nil { + return + } + + for headerName, headerValue := range listEnvironmentsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("app_configuration", "V1", "ListEnvironments") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + if listEnvironmentsOptions.Expand != nil { + builder.AddQuery("expand", fmt.Sprint(*listEnvironmentsOptions.Expand)) + } + if listEnvironmentsOptions.Sort != nil { + builder.AddQuery("sort", fmt.Sprint(*listEnvironmentsOptions.Sort)) + } + if listEnvironmentsOptions.Tags != nil { + builder.AddQuery("tags", fmt.Sprint(*listEnvironmentsOptions.Tags)) + } + if listEnvironmentsOptions.Include != nil { + builder.AddQuery("include", strings.Join(listEnvironmentsOptions.Include, ",")) + } + if listEnvironmentsOptions.Limit != nil { + builder.AddQuery("limit", fmt.Sprint(*listEnvironmentsOptions.Limit)) + } + if listEnvironmentsOptions.Offset != nil { + builder.AddQuery("offset", fmt.Sprint(*listEnvironmentsOptions.Offset)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = appConfiguration.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalEnvironmentList) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateEnvironment : Create Environment +// Create an environment. +func (appConfiguration *AppConfigurationV1) CreateEnvironment(createEnvironmentOptions *CreateEnvironmentOptions) (result *Environment, response *core.DetailedResponse, err error) { + return appConfiguration.CreateEnvironmentWithContext(context.Background(), createEnvironmentOptions) +} + +// CreateEnvironmentWithContext is an alternate form of the CreateEnvironment method which supports a Context parameter +func (appConfiguration *AppConfigurationV1) CreateEnvironmentWithContext(ctx context.Context, createEnvironmentOptions *CreateEnvironmentOptions) (result *Environment, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createEnvironmentOptions, "createEnvironmentOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createEnvironmentOptions, "createEnvironmentOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = appConfiguration.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(appConfiguration.Service.Options.URL, `/environments`, nil) + if err != nil { + return + } + + for headerName, headerValue := range createEnvironmentOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("app_configuration", "V1", "CreateEnvironment") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if createEnvironmentOptions.Name != nil { + body["name"] = createEnvironmentOptions.Name + } + if createEnvironmentOptions.EnvironmentID != nil { + body["environment_id"] = createEnvironmentOptions.EnvironmentID + } + if createEnvironmentOptions.Description != nil { + body["description"] = createEnvironmentOptions.Description + } + if createEnvironmentOptions.Tags != nil { + body["tags"] = createEnvironmentOptions.Tags + } + if createEnvironmentOptions.ColorCode != nil { + body["color_code"] = createEnvironmentOptions.ColorCode + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = appConfiguration.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalEnvironment) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateEnvironment : Update Environment +// Update an environment. +func (appConfiguration *AppConfigurationV1) UpdateEnvironment(updateEnvironmentOptions *UpdateEnvironmentOptions) (result *Environment, response *core.DetailedResponse, err error) { + return appConfiguration.UpdateEnvironmentWithContext(context.Background(), updateEnvironmentOptions) +} + +// UpdateEnvironmentWithContext is an alternate form of the UpdateEnvironment method which supports a Context parameter +func (appConfiguration *AppConfigurationV1) UpdateEnvironmentWithContext(ctx context.Context, updateEnvironmentOptions *UpdateEnvironmentOptions) (result *Environment, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateEnvironmentOptions, "updateEnvironmentOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateEnvironmentOptions, "updateEnvironmentOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "environment_id": *updateEnvironmentOptions.EnvironmentID, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = appConfiguration.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(appConfiguration.Service.Options.URL, `/environments/{environment_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateEnvironmentOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("app_configuration", "V1", "UpdateEnvironment") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if updateEnvironmentOptions.Name != nil { + body["name"] = updateEnvironmentOptions.Name + } + if updateEnvironmentOptions.Description != nil { + body["description"] = updateEnvironmentOptions.Description + } + if updateEnvironmentOptions.Tags != nil { + body["tags"] = updateEnvironmentOptions.Tags + } + if updateEnvironmentOptions.ColorCode != nil { + body["color_code"] = updateEnvironmentOptions.ColorCode + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = appConfiguration.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalEnvironment) + if err != nil { + return + } + response.Result = result + + return +} + +// GetEnvironment : Get Environment +// Retrieve the details of the environment. +func (appConfiguration *AppConfigurationV1) GetEnvironment(getEnvironmentOptions *GetEnvironmentOptions) (result *Environment, response *core.DetailedResponse, err error) { + return appConfiguration.GetEnvironmentWithContext(context.Background(), getEnvironmentOptions) +} + +// GetEnvironmentWithContext is an alternate form of the GetEnvironment method which supports a Context parameter +func (appConfiguration *AppConfigurationV1) GetEnvironmentWithContext(ctx context.Context, getEnvironmentOptions *GetEnvironmentOptions) (result *Environment, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getEnvironmentOptions, "getEnvironmentOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getEnvironmentOptions, "getEnvironmentOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "environment_id": *getEnvironmentOptions.EnvironmentID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = appConfiguration.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(appConfiguration.Service.Options.URL, `/environments/{environment_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getEnvironmentOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("app_configuration", "V1", "GetEnvironment") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + if getEnvironmentOptions.Expand != nil { + builder.AddQuery("expand", fmt.Sprint(*getEnvironmentOptions.Expand)) + } + if getEnvironmentOptions.Include != nil { + builder.AddQuery("include", strings.Join(getEnvironmentOptions.Include, ",")) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = appConfiguration.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalEnvironment) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteEnvironment : Delete Environment +// Delete an Environment. +func (appConfiguration *AppConfigurationV1) DeleteEnvironment(deleteEnvironmentOptions *DeleteEnvironmentOptions) (response *core.DetailedResponse, err error) { + return appConfiguration.DeleteEnvironmentWithContext(context.Background(), deleteEnvironmentOptions) +} + +// DeleteEnvironmentWithContext is an alternate form of the DeleteEnvironment method which supports a Context parameter +func (appConfiguration *AppConfigurationV1) DeleteEnvironmentWithContext(ctx context.Context, deleteEnvironmentOptions *DeleteEnvironmentOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteEnvironmentOptions, "deleteEnvironmentOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteEnvironmentOptions, "deleteEnvironmentOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "environment_id": *deleteEnvironmentOptions.EnvironmentID, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = appConfiguration.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(appConfiguration.Service.Options.URL, `/environments/{environment_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteEnvironmentOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("app_configuration", "V1", "DeleteEnvironment") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = appConfiguration.Service.Request(request, nil) + + return +} + +// ListCollections : Get list of Collections +// List of all the collections in the App Configuration service instance. +func (appConfiguration *AppConfigurationV1) ListCollections(listCollectionsOptions *ListCollectionsOptions) (result *CollectionList, response *core.DetailedResponse, err error) { + return appConfiguration.ListCollectionsWithContext(context.Background(), listCollectionsOptions) +} + +// ListCollectionsWithContext is an alternate form of the ListCollections method which supports a Context parameter +func (appConfiguration *AppConfigurationV1) ListCollectionsWithContext(ctx context.Context, listCollectionsOptions *ListCollectionsOptions) (result *CollectionList, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listCollectionsOptions, "listCollectionsOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = appConfiguration.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(appConfiguration.Service.Options.URL, `/collections`, nil) + if err != nil { + return + } + + for headerName, headerValue := range listCollectionsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("app_configuration", "V1", "ListCollections") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + if listCollectionsOptions.Expand != nil { + builder.AddQuery("expand", fmt.Sprint(*listCollectionsOptions.Expand)) + } + if listCollectionsOptions.Sort != nil { + builder.AddQuery("sort", fmt.Sprint(*listCollectionsOptions.Sort)) + } + if listCollectionsOptions.Tags != nil { + builder.AddQuery("tags", fmt.Sprint(*listCollectionsOptions.Tags)) + } + if listCollectionsOptions.Features != nil { + builder.AddQuery("features", strings.Join(listCollectionsOptions.Features, ",")) + } + if listCollectionsOptions.Properties != nil { + builder.AddQuery("properties", strings.Join(listCollectionsOptions.Properties, ",")) + } + if listCollectionsOptions.Include != nil { + builder.AddQuery("include", strings.Join(listCollectionsOptions.Include, ",")) + } + if listCollectionsOptions.Limit != nil { + builder.AddQuery("limit", fmt.Sprint(*listCollectionsOptions.Limit)) + } + if listCollectionsOptions.Offset != nil { + builder.AddQuery("offset", fmt.Sprint(*listCollectionsOptions.Offset)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = appConfiguration.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalCollectionList) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateCollection : Create Collection +// Create a collection. +func (appConfiguration *AppConfigurationV1) CreateCollection(createCollectionOptions *CreateCollectionOptions) (result *CollectionLite, response *core.DetailedResponse, err error) { + return appConfiguration.CreateCollectionWithContext(context.Background(), createCollectionOptions) +} + +// CreateCollectionWithContext is an alternate form of the CreateCollection method which supports a Context parameter +func (appConfiguration *AppConfigurationV1) CreateCollectionWithContext(ctx context.Context, createCollectionOptions *CreateCollectionOptions) (result *CollectionLite, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createCollectionOptions, "createCollectionOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createCollectionOptions, "createCollectionOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = appConfiguration.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(appConfiguration.Service.Options.URL, `/collections`, nil) + if err != nil { + return + } + + for headerName, headerValue := range createCollectionOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("app_configuration", "V1", "CreateCollection") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if createCollectionOptions.Name != nil { + body["name"] = createCollectionOptions.Name + } + if createCollectionOptions.CollectionID != nil { + body["collection_id"] = createCollectionOptions.CollectionID + } + if createCollectionOptions.Description != nil { + body["description"] = createCollectionOptions.Description + } + if createCollectionOptions.Tags != nil { + body["tags"] = createCollectionOptions.Tags + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = appConfiguration.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalCollectionLite) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateCollection : Update Collection +// Update the collection name, tags and description. Collection Id cannot be updated. +func (appConfiguration *AppConfigurationV1) UpdateCollection(updateCollectionOptions *UpdateCollectionOptions) (result *CollectionLite, response *core.DetailedResponse, err error) { + return appConfiguration.UpdateCollectionWithContext(context.Background(), updateCollectionOptions) +} + +// UpdateCollectionWithContext is an alternate form of the UpdateCollection method which supports a Context parameter +func (appConfiguration *AppConfigurationV1) UpdateCollectionWithContext(ctx context.Context, updateCollectionOptions *UpdateCollectionOptions) (result *CollectionLite, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateCollectionOptions, "updateCollectionOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateCollectionOptions, "updateCollectionOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "collection_id": *updateCollectionOptions.CollectionID, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = appConfiguration.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(appConfiguration.Service.Options.URL, `/collections/{collection_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateCollectionOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("app_configuration", "V1", "UpdateCollection") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if updateCollectionOptions.Name != nil { + body["name"] = updateCollectionOptions.Name + } + if updateCollectionOptions.Description != nil { + body["description"] = updateCollectionOptions.Description + } + if updateCollectionOptions.Tags != nil { + body["tags"] = updateCollectionOptions.Tags + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = appConfiguration.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalCollectionLite) + if err != nil { + return + } + response.Result = result + + return +} + +// GetCollection : Get Collection +// Retrieve the details of the collection. +func (appConfiguration *AppConfigurationV1) GetCollection(getCollectionOptions *GetCollectionOptions) (result *Collection, response *core.DetailedResponse, err error) { + return appConfiguration.GetCollectionWithContext(context.Background(), getCollectionOptions) +} + +// GetCollectionWithContext is an alternate form of the GetCollection method which supports a Context parameter +func (appConfiguration *AppConfigurationV1) GetCollectionWithContext(ctx context.Context, getCollectionOptions *GetCollectionOptions) (result *Collection, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getCollectionOptions, "getCollectionOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getCollectionOptions, "getCollectionOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "collection_id": *getCollectionOptions.CollectionID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = appConfiguration.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(appConfiguration.Service.Options.URL, `/collections/{collection_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getCollectionOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("app_configuration", "V1", "GetCollection") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + if getCollectionOptions.Expand != nil { + builder.AddQuery("expand", fmt.Sprint(*getCollectionOptions.Expand)) + } + if getCollectionOptions.Include != nil { + builder.AddQuery("include", strings.Join(getCollectionOptions.Include, ",")) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = appConfiguration.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalCollection) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteCollection : Delete Collection +// Delete the collection. +func (appConfiguration *AppConfigurationV1) DeleteCollection(deleteCollectionOptions *DeleteCollectionOptions) (response *core.DetailedResponse, err error) { + return appConfiguration.DeleteCollectionWithContext(context.Background(), deleteCollectionOptions) +} + +// DeleteCollectionWithContext is an alternate form of the DeleteCollection method which supports a Context parameter +func (appConfiguration *AppConfigurationV1) DeleteCollectionWithContext(ctx context.Context, deleteCollectionOptions *DeleteCollectionOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteCollectionOptions, "deleteCollectionOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteCollectionOptions, "deleteCollectionOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "collection_id": *deleteCollectionOptions.CollectionID, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = appConfiguration.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(appConfiguration.Service.Options.URL, `/collections/{collection_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteCollectionOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("app_configuration", "V1", "DeleteCollection") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = appConfiguration.Service.Request(request, nil) + + return +} + +// ListFeatures : Get list of Features +// List all the feature flags in the specified environment. +func (appConfiguration *AppConfigurationV1) ListFeatures(listFeaturesOptions *ListFeaturesOptions) (result *FeaturesList, response *core.DetailedResponse, err error) { + return appConfiguration.ListFeaturesWithContext(context.Background(), listFeaturesOptions) +} + +// ListFeaturesWithContext is an alternate form of the ListFeatures method which supports a Context parameter +func (appConfiguration *AppConfigurationV1) ListFeaturesWithContext(ctx context.Context, listFeaturesOptions *ListFeaturesOptions) (result *FeaturesList, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(listFeaturesOptions, "listFeaturesOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(listFeaturesOptions, "listFeaturesOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "environment_id": *listFeaturesOptions.EnvironmentID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = appConfiguration.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(appConfiguration.Service.Options.URL, `/environments/{environment_id}/features`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range listFeaturesOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("app_configuration", "V1", "ListFeatures") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + if listFeaturesOptions.Expand != nil { + builder.AddQuery("expand", fmt.Sprint(*listFeaturesOptions.Expand)) + } + if listFeaturesOptions.Sort != nil { + builder.AddQuery("sort", fmt.Sprint(*listFeaturesOptions.Sort)) + } + if listFeaturesOptions.Tags != nil { + builder.AddQuery("tags", fmt.Sprint(*listFeaturesOptions.Tags)) + } + if listFeaturesOptions.Collections != nil { + builder.AddQuery("collections", strings.Join(listFeaturesOptions.Collections, ",")) + } + if listFeaturesOptions.Segments != nil { + builder.AddQuery("segments", strings.Join(listFeaturesOptions.Segments, ",")) + } + if listFeaturesOptions.Include != nil { + builder.AddQuery("include", strings.Join(listFeaturesOptions.Include, ",")) + } + if listFeaturesOptions.Limit != nil { + builder.AddQuery("limit", fmt.Sprint(*listFeaturesOptions.Limit)) + } + if listFeaturesOptions.Offset != nil { + builder.AddQuery("offset", fmt.Sprint(*listFeaturesOptions.Offset)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = appConfiguration.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalFeaturesList) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateFeature : Create Feature +// Create a feature flag. +func (appConfiguration *AppConfigurationV1) CreateFeature(createFeatureOptions *CreateFeatureOptions) (result *Feature, response *core.DetailedResponse, err error) { + return appConfiguration.CreateFeatureWithContext(context.Background(), createFeatureOptions) +} + +// CreateFeatureWithContext is an alternate form of the CreateFeature method which supports a Context parameter +func (appConfiguration *AppConfigurationV1) CreateFeatureWithContext(ctx context.Context, createFeatureOptions *CreateFeatureOptions) (result *Feature, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createFeatureOptions, "createFeatureOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createFeatureOptions, "createFeatureOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "environment_id": *createFeatureOptions.EnvironmentID, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = appConfiguration.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(appConfiguration.Service.Options.URL, `/environments/{environment_id}/features`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range createFeatureOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("app_configuration", "V1", "CreateFeature") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if createFeatureOptions.Name != nil { + body["name"] = createFeatureOptions.Name + } + if createFeatureOptions.FeatureID != nil { + body["feature_id"] = createFeatureOptions.FeatureID + } + if createFeatureOptions.Type != nil { + body["type"] = createFeatureOptions.Type + } + if createFeatureOptions.EnabledValue != nil { + body["enabled_value"] = createFeatureOptions.EnabledValue + } + if createFeatureOptions.DisabledValue != nil { + body["disabled_value"] = createFeatureOptions.DisabledValue + } + if createFeatureOptions.Description != nil { + body["description"] = createFeatureOptions.Description + } + if createFeatureOptions.Enabled != nil { + body["enabled"] = createFeatureOptions.Enabled + } + if createFeatureOptions.Tags != nil { + body["tags"] = createFeatureOptions.Tags + } + if createFeatureOptions.SegmentRules != nil { + body["segment_rules"] = createFeatureOptions.SegmentRules + } + if createFeatureOptions.Collections != nil { + body["collections"] = createFeatureOptions.Collections + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = appConfiguration.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalFeature) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateFeature : Update Feature +// Update a feature flag details. +func (appConfiguration *AppConfigurationV1) UpdateFeature(updateFeatureOptions *UpdateFeatureOptions) (result *Feature, response *core.DetailedResponse, err error) { + return appConfiguration.UpdateFeatureWithContext(context.Background(), updateFeatureOptions) +} + +// UpdateFeatureWithContext is an alternate form of the UpdateFeature method which supports a Context parameter +func (appConfiguration *AppConfigurationV1) UpdateFeatureWithContext(ctx context.Context, updateFeatureOptions *UpdateFeatureOptions) (result *Feature, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateFeatureOptions, "updateFeatureOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateFeatureOptions, "updateFeatureOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "environment_id": *updateFeatureOptions.EnvironmentID, + "feature_id": *updateFeatureOptions.FeatureID, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = appConfiguration.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(appConfiguration.Service.Options.URL, `/environments/{environment_id}/features/{feature_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateFeatureOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("app_configuration", "V1", "UpdateFeature") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if updateFeatureOptions.Name != nil { + body["name"] = updateFeatureOptions.Name + } + if updateFeatureOptions.Description != nil { + body["description"] = updateFeatureOptions.Description + } + if updateFeatureOptions.EnabledValue != nil { + body["enabled_value"] = updateFeatureOptions.EnabledValue + } + if updateFeatureOptions.DisabledValue != nil { + body["disabled_value"] = updateFeatureOptions.DisabledValue + } + if updateFeatureOptions.Enabled != nil { + body["enabled"] = updateFeatureOptions.Enabled + } + if updateFeatureOptions.Tags != nil { + body["tags"] = updateFeatureOptions.Tags + } + if updateFeatureOptions.SegmentRules != nil { + body["segment_rules"] = updateFeatureOptions.SegmentRules + } + if updateFeatureOptions.Collections != nil { + body["collections"] = updateFeatureOptions.Collections + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = appConfiguration.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalFeature) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateFeatureValues : Update Feature Values +// Update the feature values. This method can be executed only by the `writer` role. This method allows the update of +// feature name, feature enabled_value, feature disabled_value, tags, description and feature segment rules, however +// this method does not allow toggling the feature flag and assigning feature to a collection. +func (appConfiguration *AppConfigurationV1) UpdateFeatureValues(updateFeatureValuesOptions *UpdateFeatureValuesOptions) (result *Feature, response *core.DetailedResponse, err error) { + return appConfiguration.UpdateFeatureValuesWithContext(context.Background(), updateFeatureValuesOptions) +} + +// UpdateFeatureValuesWithContext is an alternate form of the UpdateFeatureValues method which supports a Context parameter +func (appConfiguration *AppConfigurationV1) UpdateFeatureValuesWithContext(ctx context.Context, updateFeatureValuesOptions *UpdateFeatureValuesOptions) (result *Feature, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateFeatureValuesOptions, "updateFeatureValuesOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateFeatureValuesOptions, "updateFeatureValuesOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "environment_id": *updateFeatureValuesOptions.EnvironmentID, + "feature_id": *updateFeatureValuesOptions.FeatureID, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = appConfiguration.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(appConfiguration.Service.Options.URL, `/environments/{environment_id}/features/{feature_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateFeatureValuesOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("app_configuration", "V1", "UpdateFeatureValues") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if updateFeatureValuesOptions.Name != nil { + body["name"] = updateFeatureValuesOptions.Name + } + if updateFeatureValuesOptions.Description != nil { + body["description"] = updateFeatureValuesOptions.Description + } + if updateFeatureValuesOptions.Tags != nil { + body["tags"] = updateFeatureValuesOptions.Tags + } + if updateFeatureValuesOptions.EnabledValue != nil { + body["enabled_value"] = updateFeatureValuesOptions.EnabledValue + } + if updateFeatureValuesOptions.DisabledValue != nil { + body["disabled_value"] = updateFeatureValuesOptions.DisabledValue + } + if updateFeatureValuesOptions.SegmentRules != nil { + body["segment_rules"] = updateFeatureValuesOptions.SegmentRules + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = appConfiguration.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalFeature) + if err != nil { + return + } + response.Result = result + + return +} + +// GetFeature : Get Feature +// Retrieve details of a feature. +func (appConfiguration *AppConfigurationV1) GetFeature(getFeatureOptions *GetFeatureOptions) (result *Feature, response *core.DetailedResponse, err error) { + return appConfiguration.GetFeatureWithContext(context.Background(), getFeatureOptions) +} + +// GetFeatureWithContext is an alternate form of the GetFeature method which supports a Context parameter +func (appConfiguration *AppConfigurationV1) GetFeatureWithContext(ctx context.Context, getFeatureOptions *GetFeatureOptions) (result *Feature, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getFeatureOptions, "getFeatureOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getFeatureOptions, "getFeatureOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "environment_id": *getFeatureOptions.EnvironmentID, + "feature_id": *getFeatureOptions.FeatureID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = appConfiguration.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(appConfiguration.Service.Options.URL, `/environments/{environment_id}/features/{feature_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getFeatureOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("app_configuration", "V1", "GetFeature") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + if getFeatureOptions.Include != nil { + builder.AddQuery("include", fmt.Sprint(*getFeatureOptions.Include)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = appConfiguration.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalFeature) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteFeature : Delete Feature +// Delete a feature flag. +func (appConfiguration *AppConfigurationV1) DeleteFeature(deleteFeatureOptions *DeleteFeatureOptions) (response *core.DetailedResponse, err error) { + return appConfiguration.DeleteFeatureWithContext(context.Background(), deleteFeatureOptions) +} + +// DeleteFeatureWithContext is an alternate form of the DeleteFeature method which supports a Context parameter +func (appConfiguration *AppConfigurationV1) DeleteFeatureWithContext(ctx context.Context, deleteFeatureOptions *DeleteFeatureOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteFeatureOptions, "deleteFeatureOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteFeatureOptions, "deleteFeatureOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "environment_id": *deleteFeatureOptions.EnvironmentID, + "feature_id": *deleteFeatureOptions.FeatureID, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = appConfiguration.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(appConfiguration.Service.Options.URL, `/environments/{environment_id}/features/{feature_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteFeatureOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("app_configuration", "V1", "DeleteFeature") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = appConfiguration.Service.Request(request, nil) + + return +} + +// ToggleFeature : Toggle Feature +// Toggle a feature. +func (appConfiguration *AppConfigurationV1) ToggleFeature(toggleFeatureOptions *ToggleFeatureOptions) (result *Feature, response *core.DetailedResponse, err error) { + return appConfiguration.ToggleFeatureWithContext(context.Background(), toggleFeatureOptions) +} + +// ToggleFeatureWithContext is an alternate form of the ToggleFeature method which supports a Context parameter +func (appConfiguration *AppConfigurationV1) ToggleFeatureWithContext(ctx context.Context, toggleFeatureOptions *ToggleFeatureOptions) (result *Feature, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(toggleFeatureOptions, "toggleFeatureOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(toggleFeatureOptions, "toggleFeatureOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "environment_id": *toggleFeatureOptions.EnvironmentID, + "feature_id": *toggleFeatureOptions.FeatureID, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = appConfiguration.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(appConfiguration.Service.Options.URL, `/environments/{environment_id}/features/{feature_id}/toggle`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range toggleFeatureOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("app_configuration", "V1", "ToggleFeature") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if toggleFeatureOptions.Enabled != nil { + body["enabled"] = toggleFeatureOptions.Enabled + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = appConfiguration.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalFeature) + if err != nil { + return + } + response.Result = result + + return +} + +// ListProperties : Get list of Properties +// List all the properties in the specified environment. +func (appConfiguration *AppConfigurationV1) ListProperties(listPropertiesOptions *ListPropertiesOptions) (result *PropertiesList, response *core.DetailedResponse, err error) { + return appConfiguration.ListPropertiesWithContext(context.Background(), listPropertiesOptions) +} + +// ListPropertiesWithContext is an alternate form of the ListProperties method which supports a Context parameter +func (appConfiguration *AppConfigurationV1) ListPropertiesWithContext(ctx context.Context, listPropertiesOptions *ListPropertiesOptions) (result *PropertiesList, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(listPropertiesOptions, "listPropertiesOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(listPropertiesOptions, "listPropertiesOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "environment_id": *listPropertiesOptions.EnvironmentID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = appConfiguration.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(appConfiguration.Service.Options.URL, `/environments/{environment_id}/properties`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range listPropertiesOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("app_configuration", "V1", "ListProperties") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + if listPropertiesOptions.Expand != nil { + builder.AddQuery("expand", fmt.Sprint(*listPropertiesOptions.Expand)) + } + if listPropertiesOptions.Sort != nil { + builder.AddQuery("sort", fmt.Sprint(*listPropertiesOptions.Sort)) + } + if listPropertiesOptions.Tags != nil { + builder.AddQuery("tags", fmt.Sprint(*listPropertiesOptions.Tags)) + } + if listPropertiesOptions.Collections != nil { + builder.AddQuery("collections", strings.Join(listPropertiesOptions.Collections, ",")) + } + if listPropertiesOptions.Segments != nil { + builder.AddQuery("segments", strings.Join(listPropertiesOptions.Segments, ",")) + } + if listPropertiesOptions.Include != nil { + builder.AddQuery("include", strings.Join(listPropertiesOptions.Include, ",")) + } + if listPropertiesOptions.Limit != nil { + builder.AddQuery("limit", fmt.Sprint(*listPropertiesOptions.Limit)) + } + if listPropertiesOptions.Offset != nil { + builder.AddQuery("offset", fmt.Sprint(*listPropertiesOptions.Offset)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = appConfiguration.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalPropertiesList) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateProperty : Create Property +// Create a Property. +func (appConfiguration *AppConfigurationV1) CreateProperty(createPropertyOptions *CreatePropertyOptions) (result *Property, response *core.DetailedResponse, err error) { + return appConfiguration.CreatePropertyWithContext(context.Background(), createPropertyOptions) +} + +// CreatePropertyWithContext is an alternate form of the CreateProperty method which supports a Context parameter +func (appConfiguration *AppConfigurationV1) CreatePropertyWithContext(ctx context.Context, createPropertyOptions *CreatePropertyOptions) (result *Property, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createPropertyOptions, "createPropertyOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createPropertyOptions, "createPropertyOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "environment_id": *createPropertyOptions.EnvironmentID, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = appConfiguration.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(appConfiguration.Service.Options.URL, `/environments/{environment_id}/properties`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range createPropertyOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("app_configuration", "V1", "CreateProperty") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if createPropertyOptions.Name != nil { + body["name"] = createPropertyOptions.Name + } + if createPropertyOptions.PropertyID != nil { + body["property_id"] = createPropertyOptions.PropertyID + } + if createPropertyOptions.Type != nil { + body["type"] = createPropertyOptions.Type + } + if createPropertyOptions.Value != nil { + body["value"] = createPropertyOptions.Value + } + if createPropertyOptions.Description != nil { + body["description"] = createPropertyOptions.Description + } + if createPropertyOptions.Tags != nil { + body["tags"] = createPropertyOptions.Tags + } + if createPropertyOptions.SegmentRules != nil { + body["segment_rules"] = createPropertyOptions.SegmentRules + } + if createPropertyOptions.Collections != nil { + body["collections"] = createPropertyOptions.Collections + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = appConfiguration.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalProperty) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateProperty : Update Property +// Update a Property. +func (appConfiguration *AppConfigurationV1) UpdateProperty(updatePropertyOptions *UpdatePropertyOptions) (result *Property, response *core.DetailedResponse, err error) { + return appConfiguration.UpdatePropertyWithContext(context.Background(), updatePropertyOptions) +} + +// UpdatePropertyWithContext is an alternate form of the UpdateProperty method which supports a Context parameter +func (appConfiguration *AppConfigurationV1) UpdatePropertyWithContext(ctx context.Context, updatePropertyOptions *UpdatePropertyOptions) (result *Property, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updatePropertyOptions, "updatePropertyOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updatePropertyOptions, "updatePropertyOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "environment_id": *updatePropertyOptions.EnvironmentID, + "property_id": *updatePropertyOptions.PropertyID, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = appConfiguration.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(appConfiguration.Service.Options.URL, `/environments/{environment_id}/properties/{property_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updatePropertyOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("app_configuration", "V1", "UpdateProperty") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if updatePropertyOptions.Name != nil { + body["name"] = updatePropertyOptions.Name + } + if updatePropertyOptions.Description != nil { + body["description"] = updatePropertyOptions.Description + } + if updatePropertyOptions.Value != nil { + body["value"] = updatePropertyOptions.Value + } + if updatePropertyOptions.Tags != nil { + body["tags"] = updatePropertyOptions.Tags + } + if updatePropertyOptions.SegmentRules != nil { + body["segment_rules"] = updatePropertyOptions.SegmentRules + } + if updatePropertyOptions.Collections != nil { + body["collections"] = updatePropertyOptions.Collections + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = appConfiguration.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalProperty) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdatePropertyValues : Update Property values +// Update the property values. This method can be executed by the `writer` role. Property value and targeting rules can +// be updated, however this method does not allow assigning property to a collection. +func (appConfiguration *AppConfigurationV1) UpdatePropertyValues(updatePropertyValuesOptions *UpdatePropertyValuesOptions) (result *Property, response *core.DetailedResponse, err error) { + return appConfiguration.UpdatePropertyValuesWithContext(context.Background(), updatePropertyValuesOptions) +} + +// UpdatePropertyValuesWithContext is an alternate form of the UpdatePropertyValues method which supports a Context parameter +func (appConfiguration *AppConfigurationV1) UpdatePropertyValuesWithContext(ctx context.Context, updatePropertyValuesOptions *UpdatePropertyValuesOptions) (result *Property, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updatePropertyValuesOptions, "updatePropertyValuesOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updatePropertyValuesOptions, "updatePropertyValuesOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "environment_id": *updatePropertyValuesOptions.EnvironmentID, + "property_id": *updatePropertyValuesOptions.PropertyID, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = appConfiguration.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(appConfiguration.Service.Options.URL, `/environments/{environment_id}/properties/{property_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updatePropertyValuesOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("app_configuration", "V1", "UpdatePropertyValues") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if updatePropertyValuesOptions.Name != nil { + body["name"] = updatePropertyValuesOptions.Name + } + if updatePropertyValuesOptions.Description != nil { + body["description"] = updatePropertyValuesOptions.Description + } + if updatePropertyValuesOptions.Tags != nil { + body["tags"] = updatePropertyValuesOptions.Tags + } + if updatePropertyValuesOptions.Value != nil { + body["value"] = updatePropertyValuesOptions.Value + } + if updatePropertyValuesOptions.SegmentRules != nil { + body["segment_rules"] = updatePropertyValuesOptions.SegmentRules + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = appConfiguration.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalProperty) + if err != nil { + return + } + response.Result = result + + return +} + +// GetProperty : Get Property +// Retrieve details of a property. +func (appConfiguration *AppConfigurationV1) GetProperty(getPropertyOptions *GetPropertyOptions) (result *Property, response *core.DetailedResponse, err error) { + return appConfiguration.GetPropertyWithContext(context.Background(), getPropertyOptions) +} + +// GetPropertyWithContext is an alternate form of the GetProperty method which supports a Context parameter +func (appConfiguration *AppConfigurationV1) GetPropertyWithContext(ctx context.Context, getPropertyOptions *GetPropertyOptions) (result *Property, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getPropertyOptions, "getPropertyOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getPropertyOptions, "getPropertyOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "environment_id": *getPropertyOptions.EnvironmentID, + "property_id": *getPropertyOptions.PropertyID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = appConfiguration.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(appConfiguration.Service.Options.URL, `/environments/{environment_id}/properties/{property_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getPropertyOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("app_configuration", "V1", "GetProperty") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + if getPropertyOptions.Include != nil { + builder.AddQuery("include", fmt.Sprint(*getPropertyOptions.Include)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = appConfiguration.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalProperty) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteProperty : Delete Property +// Delete a Property. +func (appConfiguration *AppConfigurationV1) DeleteProperty(deletePropertyOptions *DeletePropertyOptions) (response *core.DetailedResponse, err error) { + return appConfiguration.DeletePropertyWithContext(context.Background(), deletePropertyOptions) +} + +// DeletePropertyWithContext is an alternate form of the DeleteProperty method which supports a Context parameter +func (appConfiguration *AppConfigurationV1) DeletePropertyWithContext(ctx context.Context, deletePropertyOptions *DeletePropertyOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deletePropertyOptions, "deletePropertyOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deletePropertyOptions, "deletePropertyOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "environment_id": *deletePropertyOptions.EnvironmentID, + "property_id": *deletePropertyOptions.PropertyID, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = appConfiguration.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(appConfiguration.Service.Options.URL, `/environments/{environment_id}/properties/{property_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deletePropertyOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("app_configuration", "V1", "DeleteProperty") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = appConfiguration.Service.Request(request, nil) + + return +} + +// ListSegments : Get list of Segments +// List all the segments. +func (appConfiguration *AppConfigurationV1) ListSegments(listSegmentsOptions *ListSegmentsOptions) (result *SegmentsList, response *core.DetailedResponse, err error) { + return appConfiguration.ListSegmentsWithContext(context.Background(), listSegmentsOptions) +} + +// ListSegmentsWithContext is an alternate form of the ListSegments method which supports a Context parameter +func (appConfiguration *AppConfigurationV1) ListSegmentsWithContext(ctx context.Context, listSegmentsOptions *ListSegmentsOptions) (result *SegmentsList, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listSegmentsOptions, "listSegmentsOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = appConfiguration.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(appConfiguration.Service.Options.URL, `/segments`, nil) + if err != nil { + return + } + + for headerName, headerValue := range listSegmentsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("app_configuration", "V1", "ListSegments") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + if listSegmentsOptions.Expand != nil { + builder.AddQuery("expand", fmt.Sprint(*listSegmentsOptions.Expand)) + } + if listSegmentsOptions.Sort != nil { + builder.AddQuery("sort", fmt.Sprint(*listSegmentsOptions.Sort)) + } + if listSegmentsOptions.Tags != nil { + builder.AddQuery("tags", fmt.Sprint(*listSegmentsOptions.Tags)) + } + if listSegmentsOptions.Include != nil { + builder.AddQuery("include", fmt.Sprint(*listSegmentsOptions.Include)) + } + if listSegmentsOptions.Limit != nil { + builder.AddQuery("limit", fmt.Sprint(*listSegmentsOptions.Limit)) + } + if listSegmentsOptions.Offset != nil { + builder.AddQuery("offset", fmt.Sprint(*listSegmentsOptions.Offset)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = appConfiguration.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSegmentsList) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateSegment : Create Segment +// Create a segment. +func (appConfiguration *AppConfigurationV1) CreateSegment(createSegmentOptions *CreateSegmentOptions) (result *Segment, response *core.DetailedResponse, err error) { + return appConfiguration.CreateSegmentWithContext(context.Background(), createSegmentOptions) +} + +// CreateSegmentWithContext is an alternate form of the CreateSegment method which supports a Context parameter +func (appConfiguration *AppConfigurationV1) CreateSegmentWithContext(ctx context.Context, createSegmentOptions *CreateSegmentOptions) (result *Segment, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(createSegmentOptions, "createSegmentOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = appConfiguration.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(appConfiguration.Service.Options.URL, `/segments`, nil) + if err != nil { + return + } + + for headerName, headerValue := range createSegmentOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("app_configuration", "V1", "CreateSegment") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if createSegmentOptions.Name != nil { + body["name"] = createSegmentOptions.Name + } + if createSegmentOptions.SegmentID != nil { + body["segment_id"] = createSegmentOptions.SegmentID + } + if createSegmentOptions.Description != nil { + body["description"] = createSegmentOptions.Description + } + if createSegmentOptions.Tags != nil { + body["tags"] = createSegmentOptions.Tags + } + if createSegmentOptions.Rules != nil { + body["rules"] = createSegmentOptions.Rules + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = appConfiguration.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSegment) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateSegment : Update Segment +// Update the segment properties. +func (appConfiguration *AppConfigurationV1) UpdateSegment(updateSegmentOptions *UpdateSegmentOptions) (result *Segment, response *core.DetailedResponse, err error) { + return appConfiguration.UpdateSegmentWithContext(context.Background(), updateSegmentOptions) +} + +// UpdateSegmentWithContext is an alternate form of the UpdateSegment method which supports a Context parameter +func (appConfiguration *AppConfigurationV1) UpdateSegmentWithContext(ctx context.Context, updateSegmentOptions *UpdateSegmentOptions) (result *Segment, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateSegmentOptions, "updateSegmentOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateSegmentOptions, "updateSegmentOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "segment_id": *updateSegmentOptions.SegmentID, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = appConfiguration.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(appConfiguration.Service.Options.URL, `/segments/{segment_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateSegmentOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("app_configuration", "V1", "UpdateSegment") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if updateSegmentOptions.Name != nil { + body["name"] = updateSegmentOptions.Name + } + if updateSegmentOptions.Description != nil { + body["description"] = updateSegmentOptions.Description + } + if updateSegmentOptions.Tags != nil { + body["tags"] = updateSegmentOptions.Tags + } + if updateSegmentOptions.Rules != nil { + body["rules"] = updateSegmentOptions.Rules + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = appConfiguration.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSegment) + if err != nil { + return + } + response.Result = result + + return +} + +// GetSegment : Get Segment +// Retrieve details of a segment. +func (appConfiguration *AppConfigurationV1) GetSegment(getSegmentOptions *GetSegmentOptions) (result *Segment, response *core.DetailedResponse, err error) { + return appConfiguration.GetSegmentWithContext(context.Background(), getSegmentOptions) +} + +// GetSegmentWithContext is an alternate form of the GetSegment method which supports a Context parameter +func (appConfiguration *AppConfigurationV1) GetSegmentWithContext(ctx context.Context, getSegmentOptions *GetSegmentOptions) (result *Segment, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getSegmentOptions, "getSegmentOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getSegmentOptions, "getSegmentOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "segment_id": *getSegmentOptions.SegmentID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = appConfiguration.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(appConfiguration.Service.Options.URL, `/segments/{segment_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getSegmentOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("app_configuration", "V1", "GetSegment") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + if getSegmentOptions.Include != nil { + builder.AddQuery("include", strings.Join(getSegmentOptions.Include, ",")) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = appConfiguration.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSegment) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteSegment : Delete Segment +// Delete a segment. +func (appConfiguration *AppConfigurationV1) DeleteSegment(deleteSegmentOptions *DeleteSegmentOptions) (response *core.DetailedResponse, err error) { + return appConfiguration.DeleteSegmentWithContext(context.Background(), deleteSegmentOptions) +} + +// DeleteSegmentWithContext is an alternate form of the DeleteSegment method which supports a Context parameter +func (appConfiguration *AppConfigurationV1) DeleteSegmentWithContext(ctx context.Context, deleteSegmentOptions *DeleteSegmentOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteSegmentOptions, "deleteSegmentOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteSegmentOptions, "deleteSegmentOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "segment_id": *deleteSegmentOptions.SegmentID, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = appConfiguration.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(appConfiguration.Service.Options.URL, `/segments/{segment_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteSegmentOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("app_configuration", "V1", "DeleteSegment") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = appConfiguration.Service.Request(request, nil) + + return +} + +// Collection : Details of the collection. +type Collection struct { + // Collection name. + Name *string `json:"name" validate:"required"` + + // Collection Id. + CollectionID *string `json:"collection_id" validate:"required"` + + // Collection description. + Description *string `json:"description,omitempty"` + + // Tags associated with the collection. + Tags *string `json:"tags,omitempty"` + + // Creation time of the collection. + CreatedTime *strfmt.DateTime `json:"created_time,omitempty"` + + // Last updated time of the collection data. + UpdatedTime *strfmt.DateTime `json:"updated_time,omitempty"` + + // Collection URL. + Href *string `json:"href,omitempty"` + + // List of Features associated with the collection. + Features []FeatureOutput `json:"features,omitempty"` + + // List of properties associated with the collection. + Properties []PropertyOutput `json:"properties,omitempty"` + + // Number of features associated with the collection. + FeaturesCount *int64 `json:"features_count,omitempty"` + + // Number of features associated with the collection. + PropertiesCount *int64 `json:"properties_count,omitempty"` +} + + +// NewCollection : Instantiate Collection (Generic Model Constructor) +func (*AppConfigurationV1) NewCollection(name string, collectionID string) (model *Collection, err error) { + model = &Collection{ + Name: core.StringPtr(name), + CollectionID: core.StringPtr(collectionID), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalCollection unmarshals an instance of Collection from the specified map of raw messages. +func UnmarshalCollection(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Collection) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "collection_id", &obj.CollectionID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "description", &obj.Description) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "tags", &obj.Tags) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_time", &obj.CreatedTime) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "updated_time", &obj.UpdatedTime) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalModel(m, "features", &obj.Features, UnmarshalFeatureOutput) + if err != nil { + return + } + err = core.UnmarshalModel(m, "properties", &obj.Properties, UnmarshalPropertyOutput) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "features_count", &obj.FeaturesCount) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "properties_count", &obj.PropertiesCount) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// CollectionList : List of all Collections. +type CollectionList struct { + // Array of collections. + Collections []Collection `json:"collections" validate:"required"` + + // Number of records returned. + Limit *int64 `json:"limit" validate:"required"` + + // Skipped number of records. + Offset *int64 `json:"offset" validate:"required"` + + // Total number of records. + TotalCount *int64 `json:"total_count" validate:"required"` + + // Response having URL of the page. + First *PageHrefResponse `json:"first,omitempty"` + + // Response having URL of the page. + Previous *PageHrefResponse `json:"previous,omitempty"` + + // Response having URL of the page. + Next *PageHrefResponse `json:"next,omitempty"` + + // Response having URL of the page. + Last *PageHrefResponse `json:"last,omitempty"` +} + + +// UnmarshalCollectionList unmarshals an instance of CollectionList from the specified map of raw messages. +func UnmarshalCollectionList(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(CollectionList) + err = core.UnmarshalModel(m, "collections", &obj.Collections, UnmarshalCollection) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "limit", &obj.Limit) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "offset", &obj.Offset) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "total_count", &obj.TotalCount) + if err != nil { + return + } + err = core.UnmarshalModel(m, "first", &obj.First, UnmarshalPageHrefResponse) + if err != nil { + return + } + err = core.UnmarshalModel(m, "previous", &obj.Previous, UnmarshalPageHrefResponse) + if err != nil { + return + } + err = core.UnmarshalModel(m, "next", &obj.Next, UnmarshalPageHrefResponse) + if err != nil { + return + } + err = core.UnmarshalModel(m, "last", &obj.Last, UnmarshalPageHrefResponse) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// CollectionLite : Details of the collection. +type CollectionLite struct { + // Collection name. + Name *string `json:"name" validate:"required"` + + // Collection Id. + CollectionID *string `json:"collection_id" validate:"required"` + + // Collection description. + Description *string `json:"description,omitempty"` + + // Tags associated with the collection. + Tags *string `json:"tags,omitempty"` + + // Creation time of the collection. + CreatedTime *strfmt.DateTime `json:"created_time,omitempty"` + + // Last updated time of the collection data. + UpdatedTime *strfmt.DateTime `json:"updated_time,omitempty"` + + // Collection URL. + Href *string `json:"href,omitempty"` +} + + +// UnmarshalCollectionLite unmarshals an instance of CollectionLite from the specified map of raw messages. +func UnmarshalCollectionLite(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(CollectionLite) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "collection_id", &obj.CollectionID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "description", &obj.Description) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "tags", &obj.Tags) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_time", &obj.CreatedTime) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "updated_time", &obj.UpdatedTime) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// CollectionRef : CollectionRef struct +type CollectionRef struct { + // Collection id. + CollectionID *string `json:"collection_id" validate:"required"` + + // Name of the collection. + Name *string `json:"name,omitempty"` +} + + +// NewCollectionRef : Instantiate CollectionRef (Generic Model Constructor) +func (*AppConfigurationV1) NewCollectionRef(collectionID string) (model *CollectionRef, err error) { + model = &CollectionRef{ + CollectionID: core.StringPtr(collectionID), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalCollectionRef unmarshals an instance of CollectionRef from the specified map of raw messages. +func UnmarshalCollectionRef(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(CollectionRef) + err = core.UnmarshalPrimitive(m, "collection_id", &obj.CollectionID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// CreateCollectionOptions : The CreateCollection options. +type CreateCollectionOptions struct { + // Collection name. + Name *string `json:"name" validate:"required"` + + // Collection Id. + CollectionID *string `json:"collection_id" validate:"required"` + + // Collection description. + Description *string `json:"description,omitempty"` + + // Tags associated with the collection. + Tags *string `json:"tags,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewCreateCollectionOptions : Instantiate CreateCollectionOptions +func (*AppConfigurationV1) NewCreateCollectionOptions(name string, collectionID string) *CreateCollectionOptions { + return &CreateCollectionOptions{ + Name: core.StringPtr(name), + CollectionID: core.StringPtr(collectionID), + } +} + +// SetName : Allow user to set Name +func (options *CreateCollectionOptions) SetName(name string) *CreateCollectionOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetCollectionID : Allow user to set CollectionID +func (options *CreateCollectionOptions) SetCollectionID(collectionID string) *CreateCollectionOptions { + options.CollectionID = core.StringPtr(collectionID) + return options +} + +// SetDescription : Allow user to set Description +func (options *CreateCollectionOptions) SetDescription(description string) *CreateCollectionOptions { + options.Description = core.StringPtr(description) + return options +} + +// SetTags : Allow user to set Tags +func (options *CreateCollectionOptions) SetTags(tags string) *CreateCollectionOptions { + options.Tags = core.StringPtr(tags) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateCollectionOptions) SetHeaders(param map[string]string) *CreateCollectionOptions { + options.Headers = param + return options +} + +// CreateEnvironmentOptions : The CreateEnvironment options. +type CreateEnvironmentOptions struct { + // Environment name. + Name *string `json:"name" validate:"required"` + + // Environment id. + EnvironmentID *string `json:"environment_id" validate:"required"` + + // Environment description. + Description *string `json:"description,omitempty"` + + // Tags associated with the environment. + Tags *string `json:"tags,omitempty"` + + // Color code to distinguish the environment. The Hex code for the color. For example `#FF0000` for `red`. + ColorCode *string `json:"color_code,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewCreateEnvironmentOptions : Instantiate CreateEnvironmentOptions +func (*AppConfigurationV1) NewCreateEnvironmentOptions(name string, environmentID string) *CreateEnvironmentOptions { + return &CreateEnvironmentOptions{ + Name: core.StringPtr(name), + EnvironmentID: core.StringPtr(environmentID), + } +} + +// SetName : Allow user to set Name +func (options *CreateEnvironmentOptions) SetName(name string) *CreateEnvironmentOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetEnvironmentID : Allow user to set EnvironmentID +func (options *CreateEnvironmentOptions) SetEnvironmentID(environmentID string) *CreateEnvironmentOptions { + options.EnvironmentID = core.StringPtr(environmentID) + return options +} + +// SetDescription : Allow user to set Description +func (options *CreateEnvironmentOptions) SetDescription(description string) *CreateEnvironmentOptions { + options.Description = core.StringPtr(description) + return options +} + +// SetTags : Allow user to set Tags +func (options *CreateEnvironmentOptions) SetTags(tags string) *CreateEnvironmentOptions { + options.Tags = core.StringPtr(tags) + return options +} + +// SetColorCode : Allow user to set ColorCode +func (options *CreateEnvironmentOptions) SetColorCode(colorCode string) *CreateEnvironmentOptions { + options.ColorCode = core.StringPtr(colorCode) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateEnvironmentOptions) SetHeaders(param map[string]string) *CreateEnvironmentOptions { + options.Headers = param + return options +} + +// CreateFeatureOptions : The CreateFeature options. +type CreateFeatureOptions struct { + // Environment Id. + EnvironmentID *string `json:"environment_id" validate:"required,ne="` + + // Feature name. + Name *string `json:"name" validate:"required"` + + // Feature id. + FeatureID *string `json:"feature_id" validate:"required"` + + // Type of the feature (BOOLEAN, STRING, NUMERIC). + Type *string `json:"type" validate:"required"` + + // Value of the feature when it is enabled. The value can be Boolean, String or a Numeric value as per the `type` + // attribute. + EnabledValue interface{} `json:"enabled_value" validate:"required"` + + // Value of the feature when it is disabled. The value can be Boolean, String or a Numeric value as per the `type` + // attribute. + DisabledValue interface{} `json:"disabled_value" validate:"required"` + + // Feature description. + Description *string `json:"description,omitempty"` + + // The state of the feature flag. + Enabled *bool `json:"enabled,omitempty"` + + // Tags associated with the feature. + Tags *string `json:"tags,omitempty"` + + // Specify the targeting rules that is used to set different feature flag values for different segments. + SegmentRules []SegmentRule `json:"segment_rules,omitempty"` + + // List of collection id representing the collections that are associated with the specified feature flag. + Collections []CollectionRef `json:"collections,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the CreateFeatureOptions.Type property. +// Type of the feature (BOOLEAN, STRING, NUMERIC). +const ( + CreateFeatureOptions_Type_Boolean = "BOOLEAN" + CreateFeatureOptions_Type_Numeric = "NUMERIC" + CreateFeatureOptions_Type_String = "STRING" +) + +// NewCreateFeatureOptions : Instantiate CreateFeatureOptions +func (*AppConfigurationV1) NewCreateFeatureOptions(environmentID string, name string, featureID string, typeVar string, enabledValue interface{}, disabledValue interface{}) *CreateFeatureOptions { + return &CreateFeatureOptions{ + EnvironmentID: core.StringPtr(environmentID), + Name: core.StringPtr(name), + FeatureID: core.StringPtr(featureID), + Type: core.StringPtr(typeVar), + EnabledValue: enabledValue, + DisabledValue: disabledValue, + } +} + +// SetEnvironmentID : Allow user to set EnvironmentID +func (options *CreateFeatureOptions) SetEnvironmentID(environmentID string) *CreateFeatureOptions { + options.EnvironmentID = core.StringPtr(environmentID) + return options +} + +// SetName : Allow user to set Name +func (options *CreateFeatureOptions) SetName(name string) *CreateFeatureOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetFeatureID : Allow user to set FeatureID +func (options *CreateFeatureOptions) SetFeatureID(featureID string) *CreateFeatureOptions { + options.FeatureID = core.StringPtr(featureID) + return options +} + +// SetType : Allow user to set Type +func (options *CreateFeatureOptions) SetType(typeVar string) *CreateFeatureOptions { + options.Type = core.StringPtr(typeVar) + return options +} + +// SetEnabledValue : Allow user to set EnabledValue +func (options *CreateFeatureOptions) SetEnabledValue(enabledValue interface{}) *CreateFeatureOptions { + options.EnabledValue = enabledValue + return options +} + +// SetDisabledValue : Allow user to set DisabledValue +func (options *CreateFeatureOptions) SetDisabledValue(disabledValue interface{}) *CreateFeatureOptions { + options.DisabledValue = disabledValue + return options +} + +// SetDescription : Allow user to set Description +func (options *CreateFeatureOptions) SetDescription(description string) *CreateFeatureOptions { + options.Description = core.StringPtr(description) + return options +} + +// SetEnabled : Allow user to set Enabled +func (options *CreateFeatureOptions) SetEnabled(enabled bool) *CreateFeatureOptions { + options.Enabled = core.BoolPtr(enabled) + return options +} + +// SetTags : Allow user to set Tags +func (options *CreateFeatureOptions) SetTags(tags string) *CreateFeatureOptions { + options.Tags = core.StringPtr(tags) + return options +} + +// SetSegmentRules : Allow user to set SegmentRules +func (options *CreateFeatureOptions) SetSegmentRules(segmentRules []SegmentRule) *CreateFeatureOptions { + options.SegmentRules = segmentRules + return options +} + +// SetCollections : Allow user to set Collections +func (options *CreateFeatureOptions) SetCollections(collections []CollectionRef) *CreateFeatureOptions { + options.Collections = collections + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateFeatureOptions) SetHeaders(param map[string]string) *CreateFeatureOptions { + options.Headers = param + return options +} + +// CreatePropertyOptions : The CreateProperty options. +type CreatePropertyOptions struct { + // Environment Id. + EnvironmentID *string `json:"environment_id" validate:"required,ne="` + + // Property name. + Name *string `json:"name" validate:"required"` + + // Property id. + PropertyID *string `json:"property_id" validate:"required"` + + // Type of the Property (BOOLEAN, STRING, NUMERIC). + Type *string `json:"type" validate:"required"` + + // Value of the Property. The value can be Boolean, String or a Numeric value as per the `type` attribute. + Value interface{} `json:"value" validate:"required"` + + // Property description. + Description *string `json:"description,omitempty"` + + // Tags associated with the property. + Tags *string `json:"tags,omitempty"` + + // Specify the targeting rules that is used to set different property values for different segments. + SegmentRules []SegmentRule `json:"segment_rules,omitempty"` + + // List of collection id representing the collections that are associated with the specified property. + Collections []CollectionRef `json:"collections,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the CreatePropertyOptions.Type property. +// Type of the Property (BOOLEAN, STRING, NUMERIC). +const ( + CreatePropertyOptions_Type_Boolean = "BOOLEAN" + CreatePropertyOptions_Type_Numeric = "NUMERIC" + CreatePropertyOptions_Type_String = "STRING" +) + +// NewCreatePropertyOptions : Instantiate CreatePropertyOptions +func (*AppConfigurationV1) NewCreatePropertyOptions(environmentID string, name string, propertyID string, typeVar string, value interface{}) *CreatePropertyOptions { + return &CreatePropertyOptions{ + EnvironmentID: core.StringPtr(environmentID), + Name: core.StringPtr(name), + PropertyID: core.StringPtr(propertyID), + Type: core.StringPtr(typeVar), + Value: value, + } +} + +// SetEnvironmentID : Allow user to set EnvironmentID +func (options *CreatePropertyOptions) SetEnvironmentID(environmentID string) *CreatePropertyOptions { + options.EnvironmentID = core.StringPtr(environmentID) + return options +} + +// SetName : Allow user to set Name +func (options *CreatePropertyOptions) SetName(name string) *CreatePropertyOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetPropertyID : Allow user to set PropertyID +func (options *CreatePropertyOptions) SetPropertyID(propertyID string) *CreatePropertyOptions { + options.PropertyID = core.StringPtr(propertyID) + return options +} + +// SetType : Allow user to set Type +func (options *CreatePropertyOptions) SetType(typeVar string) *CreatePropertyOptions { + options.Type = core.StringPtr(typeVar) + return options +} + +// SetValue : Allow user to set Value +func (options *CreatePropertyOptions) SetValue(value interface{}) *CreatePropertyOptions { + options.Value = value + return options +} + +// SetDescription : Allow user to set Description +func (options *CreatePropertyOptions) SetDescription(description string) *CreatePropertyOptions { + options.Description = core.StringPtr(description) + return options +} + +// SetTags : Allow user to set Tags +func (options *CreatePropertyOptions) SetTags(tags string) *CreatePropertyOptions { + options.Tags = core.StringPtr(tags) + return options +} + +// SetSegmentRules : Allow user to set SegmentRules +func (options *CreatePropertyOptions) SetSegmentRules(segmentRules []SegmentRule) *CreatePropertyOptions { + options.SegmentRules = segmentRules + return options +} + +// SetCollections : Allow user to set Collections +func (options *CreatePropertyOptions) SetCollections(collections []CollectionRef) *CreatePropertyOptions { + options.Collections = collections + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreatePropertyOptions) SetHeaders(param map[string]string) *CreatePropertyOptions { + options.Headers = param + return options +} + +// CreateSegmentOptions : The CreateSegment options. +type CreateSegmentOptions struct { + // Segment name. + Name *string `json:"name,omitempty"` + + // Segment id. + SegmentID *string `json:"segment_id,omitempty"` + + // Segment description. + Description *string `json:"description,omitempty"` + + // Tags associated with the segments. + Tags *string `json:"tags,omitempty"` + + // List of rules that determine if the entity is part of the segment. + Rules []Rule `json:"rules,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewCreateSegmentOptions : Instantiate CreateSegmentOptions +func (*AppConfigurationV1) NewCreateSegmentOptions() *CreateSegmentOptions { + return &CreateSegmentOptions{} +} + +// SetName : Allow user to set Name +func (options *CreateSegmentOptions) SetName(name string) *CreateSegmentOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetSegmentID : Allow user to set SegmentID +func (options *CreateSegmentOptions) SetSegmentID(segmentID string) *CreateSegmentOptions { + options.SegmentID = core.StringPtr(segmentID) + return options +} + +// SetDescription : Allow user to set Description +func (options *CreateSegmentOptions) SetDescription(description string) *CreateSegmentOptions { + options.Description = core.StringPtr(description) + return options +} + +// SetTags : Allow user to set Tags +func (options *CreateSegmentOptions) SetTags(tags string) *CreateSegmentOptions { + options.Tags = core.StringPtr(tags) + return options +} + +// SetRules : Allow user to set Rules +func (options *CreateSegmentOptions) SetRules(rules []Rule) *CreateSegmentOptions { + options.Rules = rules + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateSegmentOptions) SetHeaders(param map[string]string) *CreateSegmentOptions { + options.Headers = param + return options +} + +// DeleteCollectionOptions : The DeleteCollection options. +type DeleteCollectionOptions struct { + // Collection Id of the collection. + CollectionID *string `json:"collection_id" validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteCollectionOptions : Instantiate DeleteCollectionOptions +func (*AppConfigurationV1) NewDeleteCollectionOptions(collectionID string) *DeleteCollectionOptions { + return &DeleteCollectionOptions{ + CollectionID: core.StringPtr(collectionID), + } +} + +// SetCollectionID : Allow user to set CollectionID +func (options *DeleteCollectionOptions) SetCollectionID(collectionID string) *DeleteCollectionOptions { + options.CollectionID = core.StringPtr(collectionID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteCollectionOptions) SetHeaders(param map[string]string) *DeleteCollectionOptions { + options.Headers = param + return options +} + +// DeleteEnvironmentOptions : The DeleteEnvironment options. +type DeleteEnvironmentOptions struct { + // Environment Id. + EnvironmentID *string `json:"environment_id" validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteEnvironmentOptions : Instantiate DeleteEnvironmentOptions +func (*AppConfigurationV1) NewDeleteEnvironmentOptions(environmentID string) *DeleteEnvironmentOptions { + return &DeleteEnvironmentOptions{ + EnvironmentID: core.StringPtr(environmentID), + } +} + +// SetEnvironmentID : Allow user to set EnvironmentID +func (options *DeleteEnvironmentOptions) SetEnvironmentID(environmentID string) *DeleteEnvironmentOptions { + options.EnvironmentID = core.StringPtr(environmentID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteEnvironmentOptions) SetHeaders(param map[string]string) *DeleteEnvironmentOptions { + options.Headers = param + return options +} + +// DeleteFeatureOptions : The DeleteFeature options. +type DeleteFeatureOptions struct { + // Environment Id. + EnvironmentID *string `json:"environment_id" validate:"required,ne="` + + // Feature Id. + FeatureID *string `json:"feature_id" validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteFeatureOptions : Instantiate DeleteFeatureOptions +func (*AppConfigurationV1) NewDeleteFeatureOptions(environmentID string, featureID string) *DeleteFeatureOptions { + return &DeleteFeatureOptions{ + EnvironmentID: core.StringPtr(environmentID), + FeatureID: core.StringPtr(featureID), + } +} + +// SetEnvironmentID : Allow user to set EnvironmentID +func (options *DeleteFeatureOptions) SetEnvironmentID(environmentID string) *DeleteFeatureOptions { + options.EnvironmentID = core.StringPtr(environmentID) + return options +} + +// SetFeatureID : Allow user to set FeatureID +func (options *DeleteFeatureOptions) SetFeatureID(featureID string) *DeleteFeatureOptions { + options.FeatureID = core.StringPtr(featureID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteFeatureOptions) SetHeaders(param map[string]string) *DeleteFeatureOptions { + options.Headers = param + return options +} + +// DeletePropertyOptions : The DeleteProperty options. +type DeletePropertyOptions struct { + // Environment Id. + EnvironmentID *string `json:"environment_id" validate:"required,ne="` + + // Property Id. + PropertyID *string `json:"property_id" validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeletePropertyOptions : Instantiate DeletePropertyOptions +func (*AppConfigurationV1) NewDeletePropertyOptions(environmentID string, propertyID string) *DeletePropertyOptions { + return &DeletePropertyOptions{ + EnvironmentID: core.StringPtr(environmentID), + PropertyID: core.StringPtr(propertyID), + } +} + +// SetEnvironmentID : Allow user to set EnvironmentID +func (options *DeletePropertyOptions) SetEnvironmentID(environmentID string) *DeletePropertyOptions { + options.EnvironmentID = core.StringPtr(environmentID) + return options +} + +// SetPropertyID : Allow user to set PropertyID +func (options *DeletePropertyOptions) SetPropertyID(propertyID string) *DeletePropertyOptions { + options.PropertyID = core.StringPtr(propertyID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeletePropertyOptions) SetHeaders(param map[string]string) *DeletePropertyOptions { + options.Headers = param + return options +} + +// DeleteSegmentOptions : The DeleteSegment options. +type DeleteSegmentOptions struct { + // Segment Id. + SegmentID *string `json:"segment_id" validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteSegmentOptions : Instantiate DeleteSegmentOptions +func (*AppConfigurationV1) NewDeleteSegmentOptions(segmentID string) *DeleteSegmentOptions { + return &DeleteSegmentOptions{ + SegmentID: core.StringPtr(segmentID), + } +} + +// SetSegmentID : Allow user to set SegmentID +func (options *DeleteSegmentOptions) SetSegmentID(segmentID string) *DeleteSegmentOptions { + options.SegmentID = core.StringPtr(segmentID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteSegmentOptions) SetHeaders(param map[string]string) *DeleteSegmentOptions { + options.Headers = param + return options +} + +// Environment : Details of the environment. +type Environment struct { + // Environment name. + Name *string `json:"name" validate:"required"` + + // Environment id. + EnvironmentID *string `json:"environment_id" validate:"required"` + + // Environment description. + Description *string `json:"description,omitempty"` + + // Tags associated with the environment. + Tags *string `json:"tags,omitempty"` + + // Color code to distinguish the environment. The Hex code for the color. For example `#FF0000` for `red`. + ColorCode *string `json:"color_code,omitempty"` + + // Creation time of the environment. + CreatedTime *strfmt.DateTime `json:"created_time,omitempty"` + + // Last modified time of the environment data. + UpdatedTime *strfmt.DateTime `json:"updated_time,omitempty"` + + // Environment URL. + Href *string `json:"href,omitempty"` + + // List of Features associated with the environment. + Features []FeatureOutput `json:"features,omitempty"` + + // List of properties associated with the environment. + Properties []PropertyOutput `json:"properties,omitempty"` +} + + +// NewEnvironment : Instantiate Environment (Generic Model Constructor) +func (*AppConfigurationV1) NewEnvironment(name string, environmentID string) (model *Environment, err error) { + model = &Environment{ + Name: core.StringPtr(name), + EnvironmentID: core.StringPtr(environmentID), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalEnvironment unmarshals an instance of Environment from the specified map of raw messages. +func UnmarshalEnvironment(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Environment) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "environment_id", &obj.EnvironmentID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "description", &obj.Description) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "tags", &obj.Tags) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "color_code", &obj.ColorCode) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_time", &obj.CreatedTime) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "updated_time", &obj.UpdatedTime) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalModel(m, "features", &obj.Features, UnmarshalFeatureOutput) + if err != nil { + return + } + err = core.UnmarshalModel(m, "properties", &obj.Properties, UnmarshalPropertyOutput) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// EnvironmentList : List of all environments. +type EnvironmentList struct { + // Array of environments. + Environments []Environment `json:"environments" validate:"required"` + + // Number of records returned. + Limit *int64 `json:"limit" validate:"required"` + + // Skipped number of records. + Offset *int64 `json:"offset" validate:"required"` + + // Total number of records. + TotalCount *int64 `json:"total_count" validate:"required"` + + // Response having URL of the page. + First *PageHrefResponse `json:"first,omitempty"` + + // Response having URL of the page. + Previous *PageHrefResponse `json:"previous,omitempty"` + + // Response having URL of the page. + Next *PageHrefResponse `json:"next,omitempty"` + + // Response having URL of the page. + Last *PageHrefResponse `json:"last,omitempty"` +} + + +// UnmarshalEnvironmentList unmarshals an instance of EnvironmentList from the specified map of raw messages. +func UnmarshalEnvironmentList(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(EnvironmentList) + err = core.UnmarshalModel(m, "environments", &obj.Environments, UnmarshalEnvironment) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "limit", &obj.Limit) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "offset", &obj.Offset) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "total_count", &obj.TotalCount) + if err != nil { + return + } + err = core.UnmarshalModel(m, "first", &obj.First, UnmarshalPageHrefResponse) + if err != nil { + return + } + err = core.UnmarshalModel(m, "previous", &obj.Previous, UnmarshalPageHrefResponse) + if err != nil { + return + } + err = core.UnmarshalModel(m, "next", &obj.Next, UnmarshalPageHrefResponse) + if err != nil { + return + } + err = core.UnmarshalModel(m, "last", &obj.Last, UnmarshalPageHrefResponse) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// Feature : Details of the feature. +type Feature struct { + // Feature name. + Name *string `json:"name" validate:"required"` + + // Feature id. + FeatureID *string `json:"feature_id" validate:"required"` + + // Feature description. + Description *string `json:"description,omitempty"` + + // Type of the feature (BOOLEAN, STRING, NUMERIC). + Type *string `json:"type" validate:"required"` + + // Value of the feature when it is enabled. The value can be Boolean, String or a Numeric value as per the `type` + // attribute. + EnabledValue interface{} `json:"enabled_value" validate:"required"` + + // Value of the feature when it is disabled. The value can be Boolean, String or a Numeric value as per the `type` + // attribute. + DisabledValue interface{} `json:"disabled_value" validate:"required"` + + // The state of the feature flag. + Enabled *bool `json:"enabled,omitempty"` + + // Tags associated with the feature. + Tags *string `json:"tags,omitempty"` + + // Specify the targeting rules that is used to set different feature flag values for different segments. + SegmentRules []SegmentRule `json:"segment_rules,omitempty"` + + // Denotes if the targeting rules are specified for the feature flag. + SegmentExists *bool `json:"segment_exists,omitempty"` + + // List of collection id representing the collections that are associated with the specified feature flag. + Collections []CollectionRef `json:"collections,omitempty"` + + // Creation time of the feature flag. + CreatedTime *strfmt.DateTime `json:"created_time,omitempty"` + + // Last modified time of the feature flag data. + UpdatedTime *strfmt.DateTime `json:"updated_time,omitempty"` + + // The last occurrence of the feature flag value evaluation. + EvaluationTime *strfmt.DateTime `json:"evaluation_time,omitempty"` + + // Feature flag URL. + Href *string `json:"href,omitempty"` +} + +// Constants associated with the Feature.Type property. +// Type of the feature (BOOLEAN, STRING, NUMERIC). +const ( + Feature_Type_Boolean = "BOOLEAN" + Feature_Type_Numeric = "NUMERIC" + Feature_Type_String = "STRING" +) + + +// NewFeature : Instantiate Feature (Generic Model Constructor) +func (*AppConfigurationV1) NewFeature(name string, featureID string, typeVar string, enabledValue interface{}, disabledValue interface{}) (model *Feature, err error) { + model = &Feature{ + Name: core.StringPtr(name), + FeatureID: core.StringPtr(featureID), + Type: core.StringPtr(typeVar), + EnabledValue: enabledValue, + DisabledValue: disabledValue, + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalFeature unmarshals an instance of Feature from the specified map of raw messages. +func UnmarshalFeature(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Feature) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "feature_id", &obj.FeatureID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "description", &obj.Description) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "enabled_value", &obj.EnabledValue) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "disabled_value", &obj.DisabledValue) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "enabled", &obj.Enabled) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "tags", &obj.Tags) + if err != nil { + return + } + err = core.UnmarshalModel(m, "segment_rules", &obj.SegmentRules, UnmarshalSegmentRule) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "segment_exists", &obj.SegmentExists) + if err != nil { + return + } + err = core.UnmarshalModel(m, "collections", &obj.Collections, UnmarshalCollectionRef) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_time", &obj.CreatedTime) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "updated_time", &obj.UpdatedTime) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "evaluation_time", &obj.EvaluationTime) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// FeatureOutput : Feature object. +type FeatureOutput struct { + // Feature id. + FeatureID *string `json:"feature_id" validate:"required"` + + // Feature name. + Name *string `json:"name" validate:"required"` +} + + +// NewFeatureOutput : Instantiate FeatureOutput (Generic Model Constructor) +func (*AppConfigurationV1) NewFeatureOutput(featureID string, name string) (model *FeatureOutput, err error) { + model = &FeatureOutput{ + FeatureID: core.StringPtr(featureID), + Name: core.StringPtr(name), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalFeatureOutput unmarshals an instance of FeatureOutput from the specified map of raw messages. +func UnmarshalFeatureOutput(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(FeatureOutput) + err = core.UnmarshalPrimitive(m, "feature_id", &obj.FeatureID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// FeaturesList : List of all features. +type FeaturesList struct { + // Array of Features. + Features []Feature `json:"features" validate:"required"` + + // Number of records returned. + Limit *int64 `json:"limit" validate:"required"` + + // Skipped number of records. + Offset *int64 `json:"offset" validate:"required"` + + // Total number of records. + TotalCount *int64 `json:"total_count" validate:"required"` + + // Response having URL of the page. + First *PageHrefResponse `json:"first,omitempty"` + + // Response having URL of the page. + Previous *PageHrefResponse `json:"previous,omitempty"` + + // Response having URL of the page. + Next *PageHrefResponse `json:"next,omitempty"` + + // Response having URL of the page. + Last *PageHrefResponse `json:"last,omitempty"` +} + + +// UnmarshalFeaturesList unmarshals an instance of FeaturesList from the specified map of raw messages. +func UnmarshalFeaturesList(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(FeaturesList) + err = core.UnmarshalModel(m, "features", &obj.Features, UnmarshalFeature) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "limit", &obj.Limit) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "offset", &obj.Offset) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "total_count", &obj.TotalCount) + if err != nil { + return + } + err = core.UnmarshalModel(m, "first", &obj.First, UnmarshalPageHrefResponse) + if err != nil { + return + } + err = core.UnmarshalModel(m, "previous", &obj.Previous, UnmarshalPageHrefResponse) + if err != nil { + return + } + err = core.UnmarshalModel(m, "next", &obj.Next, UnmarshalPageHrefResponse) + if err != nil { + return + } + err = core.UnmarshalModel(m, "last", &obj.Last, UnmarshalPageHrefResponse) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// GetCollectionOptions : The GetCollection options. +type GetCollectionOptions struct { + // Collection Id of the collection. + CollectionID *string `json:"collection_id" validate:"required,ne="` + + // If set to `true`, returns expanded view of the resource details. + Expand *bool `json:"expand,omitempty"` + + // Include feature and property details in the response. + Include []string `json:"include,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the GetCollectionOptions.Include property. +const ( + GetCollectionOptions_Include_Features = "features" + GetCollectionOptions_Include_Properties = "properties" +) + +// NewGetCollectionOptions : Instantiate GetCollectionOptions +func (*AppConfigurationV1) NewGetCollectionOptions(collectionID string) *GetCollectionOptions { + return &GetCollectionOptions{ + CollectionID: core.StringPtr(collectionID), + } +} + +// SetCollectionID : Allow user to set CollectionID +func (options *GetCollectionOptions) SetCollectionID(collectionID string) *GetCollectionOptions { + options.CollectionID = core.StringPtr(collectionID) + return options +} + +// SetExpand : Allow user to set Expand +func (options *GetCollectionOptions) SetExpand(expand bool) *GetCollectionOptions { + options.Expand = core.BoolPtr(expand) + return options +} + +// SetInclude : Allow user to set Include +func (options *GetCollectionOptions) SetInclude(include []string) *GetCollectionOptions { + options.Include = include + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetCollectionOptions) SetHeaders(param map[string]string) *GetCollectionOptions { + options.Headers = param + return options +} + +// GetEnvironmentOptions : The GetEnvironment options. +type GetEnvironmentOptions struct { + // Environment Id. + EnvironmentID *string `json:"environment_id" validate:"required,ne="` + + // If set to `true`, returns expanded view of the resource details. + Expand *bool `json:"expand,omitempty"` + + // Include feature and property details in the response. + Include []string `json:"include,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the GetEnvironmentOptions.Include property. +const ( + GetEnvironmentOptions_Include_Features = "features" + GetEnvironmentOptions_Include_Properties = "properties" +) + +// NewGetEnvironmentOptions : Instantiate GetEnvironmentOptions +func (*AppConfigurationV1) NewGetEnvironmentOptions(environmentID string) *GetEnvironmentOptions { + return &GetEnvironmentOptions{ + EnvironmentID: core.StringPtr(environmentID), + } +} + +// SetEnvironmentID : Allow user to set EnvironmentID +func (options *GetEnvironmentOptions) SetEnvironmentID(environmentID string) *GetEnvironmentOptions { + options.EnvironmentID = core.StringPtr(environmentID) + return options +} + +// SetExpand : Allow user to set Expand +func (options *GetEnvironmentOptions) SetExpand(expand bool) *GetEnvironmentOptions { + options.Expand = core.BoolPtr(expand) + return options +} + +// SetInclude : Allow user to set Include +func (options *GetEnvironmentOptions) SetInclude(include []string) *GetEnvironmentOptions { + options.Include = include + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetEnvironmentOptions) SetHeaders(param map[string]string) *GetEnvironmentOptions { + options.Headers = param + return options +} + +// GetFeatureOptions : The GetFeature options. +type GetFeatureOptions struct { + // Environment Id. + EnvironmentID *string `json:"environment_id" validate:"required,ne="` + + // Feature Id. + FeatureID *string `json:"feature_id" validate:"required,ne="` + + // Include the associated collections in the response. + Include *string `json:"include,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the GetFeatureOptions.Include property. +// Include the associated collections in the response. +const ( + GetFeatureOptions_Include_Collections = "collections" +) + +// NewGetFeatureOptions : Instantiate GetFeatureOptions +func (*AppConfigurationV1) NewGetFeatureOptions(environmentID string, featureID string) *GetFeatureOptions { + return &GetFeatureOptions{ + EnvironmentID: core.StringPtr(environmentID), + FeatureID: core.StringPtr(featureID), + } +} + +// SetEnvironmentID : Allow user to set EnvironmentID +func (options *GetFeatureOptions) SetEnvironmentID(environmentID string) *GetFeatureOptions { + options.EnvironmentID = core.StringPtr(environmentID) + return options +} + +// SetFeatureID : Allow user to set FeatureID +func (options *GetFeatureOptions) SetFeatureID(featureID string) *GetFeatureOptions { + options.FeatureID = core.StringPtr(featureID) + return options +} + +// SetInclude : Allow user to set Include +func (options *GetFeatureOptions) SetInclude(include string) *GetFeatureOptions { + options.Include = core.StringPtr(include) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetFeatureOptions) SetHeaders(param map[string]string) *GetFeatureOptions { + options.Headers = param + return options +} + +// GetPropertyOptions : The GetProperty options. +type GetPropertyOptions struct { + // Environment Id. + EnvironmentID *string `json:"environment_id" validate:"required,ne="` + + // Property Id. + PropertyID *string `json:"property_id" validate:"required,ne="` + + // Include the associated collections in the response. + Include *string `json:"include,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the GetPropertyOptions.Include property. +// Include the associated collections in the response. +const ( + GetPropertyOptions_Include_Collections = "collections" +) + +// NewGetPropertyOptions : Instantiate GetPropertyOptions +func (*AppConfigurationV1) NewGetPropertyOptions(environmentID string, propertyID string) *GetPropertyOptions { + return &GetPropertyOptions{ + EnvironmentID: core.StringPtr(environmentID), + PropertyID: core.StringPtr(propertyID), + } +} + +// SetEnvironmentID : Allow user to set EnvironmentID +func (options *GetPropertyOptions) SetEnvironmentID(environmentID string) *GetPropertyOptions { + options.EnvironmentID = core.StringPtr(environmentID) + return options +} + +// SetPropertyID : Allow user to set PropertyID +func (options *GetPropertyOptions) SetPropertyID(propertyID string) *GetPropertyOptions { + options.PropertyID = core.StringPtr(propertyID) + return options +} + +// SetInclude : Allow user to set Include +func (options *GetPropertyOptions) SetInclude(include string) *GetPropertyOptions { + options.Include = core.StringPtr(include) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetPropertyOptions) SetHeaders(param map[string]string) *GetPropertyOptions { + options.Headers = param + return options +} + +// GetSegmentOptions : The GetSegment options. +type GetSegmentOptions struct { + // Segment Id. + SegmentID *string `json:"segment_id" validate:"required,ne="` + + // Include feature and property details in the response. + Include []string `json:"include,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the GetSegmentOptions.Include property. +const ( + GetSegmentOptions_Include_Features = "features" + GetSegmentOptions_Include_Properties = "properties" +) + +// NewGetSegmentOptions : Instantiate GetSegmentOptions +func (*AppConfigurationV1) NewGetSegmentOptions(segmentID string) *GetSegmentOptions { + return &GetSegmentOptions{ + SegmentID: core.StringPtr(segmentID), + } +} + +// SetSegmentID : Allow user to set SegmentID +func (options *GetSegmentOptions) SetSegmentID(segmentID string) *GetSegmentOptions { + options.SegmentID = core.StringPtr(segmentID) + return options +} + +// SetInclude : Allow user to set Include +func (options *GetSegmentOptions) SetInclude(include []string) *GetSegmentOptions { + options.Include = include + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetSegmentOptions) SetHeaders(param map[string]string) *GetSegmentOptions { + options.Headers = param + return options +} + +// ListCollectionsOptions : The ListCollections options. +type ListCollectionsOptions struct { + // If set to `true`, returns expanded view of the resource details. + Expand *bool `json:"expand,omitempty"` + + // Sort the collection details based on the specified attribute. + Sort *string `json:"sort,omitempty"` + + // Filter the resources to be returned based on the associated tags. Specify the parameter as a list of comma separated + // tags. Returns resources associated with any of the specified tags. + Tags *string `json:"tags,omitempty"` + + // Filter collections by a list of comma separated features. + Features []string `json:"features,omitempty"` + + // Filter collections by a list of comma separated properties. + Properties []string `json:"properties,omitempty"` + + // Include feature and property details in the response. + Include []string `json:"include,omitempty"` + + // The number of records to retrieve. By default, the list operation return the first 10 records. To retrieve different + // set of records, use `limit` with `offset` to page through the available records. + Limit *int64 `json:"limit,omitempty"` + + // The number of records to skip. By specifying `offset`, you retrieve a subset of items that starts with the `offset` + // value. Use `offset` with `limit` to page through the available records. + Offset *int64 `json:"offset,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the ListCollectionsOptions.Sort property. +// Sort the collection details based on the specified attribute. +const ( + ListCollectionsOptions_Sort_CollectionID = "collection_id" + ListCollectionsOptions_Sort_CreatedTime = "created_time" + ListCollectionsOptions_Sort_UpdatedTime = "updated_time" +) + +// Constants associated with the ListCollectionsOptions.Include property. +const ( + ListCollectionsOptions_Include_Features = "features" + ListCollectionsOptions_Include_Properties = "properties" +) + +// NewListCollectionsOptions : Instantiate ListCollectionsOptions +func (*AppConfigurationV1) NewListCollectionsOptions() *ListCollectionsOptions { + return &ListCollectionsOptions{} +} + +// SetExpand : Allow user to set Expand +func (options *ListCollectionsOptions) SetExpand(expand bool) *ListCollectionsOptions { + options.Expand = core.BoolPtr(expand) + return options +} + +// SetSort : Allow user to set Sort +func (options *ListCollectionsOptions) SetSort(sort string) *ListCollectionsOptions { + options.Sort = core.StringPtr(sort) + return options +} + +// SetTags : Allow user to set Tags +func (options *ListCollectionsOptions) SetTags(tags string) *ListCollectionsOptions { + options.Tags = core.StringPtr(tags) + return options +} + +// SetFeatures : Allow user to set Features +func (options *ListCollectionsOptions) SetFeatures(features []string) *ListCollectionsOptions { + options.Features = features + return options +} + +// SetProperties : Allow user to set Properties +func (options *ListCollectionsOptions) SetProperties(properties []string) *ListCollectionsOptions { + options.Properties = properties + return options +} + +// SetInclude : Allow user to set Include +func (options *ListCollectionsOptions) SetInclude(include []string) *ListCollectionsOptions { + options.Include = include + return options +} + +// SetLimit : Allow user to set Limit +func (options *ListCollectionsOptions) SetLimit(limit int64) *ListCollectionsOptions { + options.Limit = core.Int64Ptr(limit) + return options +} + +// SetOffset : Allow user to set Offset +func (options *ListCollectionsOptions) SetOffset(offset int64) *ListCollectionsOptions { + options.Offset = core.Int64Ptr(offset) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListCollectionsOptions) SetHeaders(param map[string]string) *ListCollectionsOptions { + options.Headers = param + return options +} + +// ListEnvironmentsOptions : The ListEnvironments options. +type ListEnvironmentsOptions struct { + // If set to `true`, returns expanded view of the resource details. + Expand *bool `json:"expand,omitempty"` + + // Sort the environment details based on the specified attribute. + Sort *string `json:"sort,omitempty"` + + // Filter the resources to be returned based on the associated tags. Specify the parameter as a list of comma separated + // tags. Returns resources associated with any of the specified tags. + Tags *string `json:"tags,omitempty"` + + // Include feature and property details in the response. + Include []string `json:"include,omitempty"` + + // The number of records to retrieve. By default, the list operation return the first 10 records. To retrieve different + // set of records, use `limit` with `offset` to page through the available records. + Limit *int64 `json:"limit,omitempty"` + + // The number of records to skip. By specifying `offset`, you retrieve a subset of items that starts with the `offset` + // value. Use `offset` with `limit` to page through the available records. + Offset *int64 `json:"offset,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the ListEnvironmentsOptions.Sort property. +// Sort the environment details based on the specified attribute. +const ( + ListEnvironmentsOptions_Sort_CreatedTime = "created_time" + ListEnvironmentsOptions_Sort_EnvironmentID = "environment_id" + ListEnvironmentsOptions_Sort_UpdatedTime = "updated_time" +) + +// Constants associated with the ListEnvironmentsOptions.Include property. +const ( + ListEnvironmentsOptions_Include_Features = "features" + ListEnvironmentsOptions_Include_Properties = "properties" +) + +// NewListEnvironmentsOptions : Instantiate ListEnvironmentsOptions +func (*AppConfigurationV1) NewListEnvironmentsOptions() *ListEnvironmentsOptions { + return &ListEnvironmentsOptions{} +} + +// SetExpand : Allow user to set Expand +func (options *ListEnvironmentsOptions) SetExpand(expand bool) *ListEnvironmentsOptions { + options.Expand = core.BoolPtr(expand) + return options +} + +// SetSort : Allow user to set Sort +func (options *ListEnvironmentsOptions) SetSort(sort string) *ListEnvironmentsOptions { + options.Sort = core.StringPtr(sort) + return options +} + +// SetTags : Allow user to set Tags +func (options *ListEnvironmentsOptions) SetTags(tags string) *ListEnvironmentsOptions { + options.Tags = core.StringPtr(tags) + return options +} + +// SetInclude : Allow user to set Include +func (options *ListEnvironmentsOptions) SetInclude(include []string) *ListEnvironmentsOptions { + options.Include = include + return options +} + +// SetLimit : Allow user to set Limit +func (options *ListEnvironmentsOptions) SetLimit(limit int64) *ListEnvironmentsOptions { + options.Limit = core.Int64Ptr(limit) + return options +} + +// SetOffset : Allow user to set Offset +func (options *ListEnvironmentsOptions) SetOffset(offset int64) *ListEnvironmentsOptions { + options.Offset = core.Int64Ptr(offset) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListEnvironmentsOptions) SetHeaders(param map[string]string) *ListEnvironmentsOptions { + options.Headers = param + return options +} + +// ListFeaturesOptions : The ListFeatures options. +type ListFeaturesOptions struct { + // Environment Id. + EnvironmentID *string `json:"environment_id" validate:"required,ne="` + + // If set to `true`, returns expanded view of the resource details. + Expand *bool `json:"expand,omitempty"` + + // Sort the feature details based on the specified attribute. + Sort *string `json:"sort,omitempty"` + + // Filter the resources to be returned based on the associated tags. Specify the parameter as a list of comma separated + // tags. Returns resources associated with any of the specified tags. + Tags *string `json:"tags,omitempty"` + + // Filter features by a list of comma separated collections. + Collections []string `json:"collections,omitempty"` + + // Filter features by a list of comma separated segments. + Segments []string `json:"segments,omitempty"` + + // Include the associated collections or targeting rules details in the response. + Include []string `json:"include,omitempty"` + + // The number of records to retrieve. By default, the list operation return the first 10 records. To retrieve different + // set of records, use `limit` with `offset` to page through the available records. + Limit *int64 `json:"limit,omitempty"` + + // The number of records to skip. By specifying `offset`, you retrieve a subset of items that starts with the `offset` + // value. Use `offset` with `limit` to page through the available records. + Offset *int64 `json:"offset,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the ListFeaturesOptions.Sort property. +// Sort the feature details based on the specified attribute. +const ( + ListFeaturesOptions_Sort_CreatedTime = "created_time" + ListFeaturesOptions_Sort_FeatureID = "feature_id" + ListFeaturesOptions_Sort_UpdatedTime = "updated_time" +) + +// Constants associated with the ListFeaturesOptions.Include property. +const ( + ListFeaturesOptions_Include_Collections = "collections" + ListFeaturesOptions_Include_Rules = " rules" +) + +// NewListFeaturesOptions : Instantiate ListFeaturesOptions +func (*AppConfigurationV1) NewListFeaturesOptions(environmentID string) *ListFeaturesOptions { + return &ListFeaturesOptions{ + EnvironmentID: core.StringPtr(environmentID), + } +} + +// SetEnvironmentID : Allow user to set EnvironmentID +func (options *ListFeaturesOptions) SetEnvironmentID(environmentID string) *ListFeaturesOptions { + options.EnvironmentID = core.StringPtr(environmentID) + return options +} + +// SetExpand : Allow user to set Expand +func (options *ListFeaturesOptions) SetExpand(expand bool) *ListFeaturesOptions { + options.Expand = core.BoolPtr(expand) + return options +} + +// SetSort : Allow user to set Sort +func (options *ListFeaturesOptions) SetSort(sort string) *ListFeaturesOptions { + options.Sort = core.StringPtr(sort) + return options +} + +// SetTags : Allow user to set Tags +func (options *ListFeaturesOptions) SetTags(tags string) *ListFeaturesOptions { + options.Tags = core.StringPtr(tags) + return options +} + +// SetCollections : Allow user to set Collections +func (options *ListFeaturesOptions) SetCollections(collections []string) *ListFeaturesOptions { + options.Collections = collections + return options +} + +// SetSegments : Allow user to set Segments +func (options *ListFeaturesOptions) SetSegments(segments []string) *ListFeaturesOptions { + options.Segments = segments + return options +} + +// SetInclude : Allow user to set Include +func (options *ListFeaturesOptions) SetInclude(include []string) *ListFeaturesOptions { + options.Include = include + return options +} + +// SetLimit : Allow user to set Limit +func (options *ListFeaturesOptions) SetLimit(limit int64) *ListFeaturesOptions { + options.Limit = core.Int64Ptr(limit) + return options +} + +// SetOffset : Allow user to set Offset +func (options *ListFeaturesOptions) SetOffset(offset int64) *ListFeaturesOptions { + options.Offset = core.Int64Ptr(offset) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListFeaturesOptions) SetHeaders(param map[string]string) *ListFeaturesOptions { + options.Headers = param + return options +} + +// ListPropertiesOptions : The ListProperties options. +type ListPropertiesOptions struct { + // Environment Id. + EnvironmentID *string `json:"environment_id" validate:"required,ne="` + + // If set to `true`, returns expanded view of the resource details. + Expand *bool `json:"expand,omitempty"` + + // Sort the property details based on the specified attribute. + Sort *string `json:"sort,omitempty"` + + // Filter the resources to be returned based on the associated tags. Specify the parameter as a list of comma separated + // tags. Returns resources associated with any of the specified tags. + Tags *string `json:"tags,omitempty"` + + // Filter properties by a list of comma separated collections. + Collections []string `json:"collections,omitempty"` + + // Filter properties by a list of comma separated segments. + Segments []string `json:"segments,omitempty"` + + // Include the associated collections or targeting rules details in the response. + Include []string `json:"include,omitempty"` + + // The number of records to retrieve. By default, the list operation return the first 10 records. To retrieve different + // set of records, use `limit` with `offset` to page through the available records. + Limit *int64 `json:"limit,omitempty"` + + // The number of records to skip. By specifying `offset`, you retrieve a subset of items that starts with the `offset` + // value. Use `offset` with `limit` to page through the available records. + Offset *int64 `json:"offset,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the ListPropertiesOptions.Sort property. +// Sort the property details based on the specified attribute. +const ( + ListPropertiesOptions_Sort_CreatedTime = "created_time" + ListPropertiesOptions_Sort_PropertyID = "property_id" + ListPropertiesOptions_Sort_UpdatedTime = "updated_time" +) + +// Constants associated with the ListPropertiesOptions.Include property. +const ( + ListPropertiesOptions_Include_Collections = "collections" + ListPropertiesOptions_Include_Rules = " rules" +) + +// NewListPropertiesOptions : Instantiate ListPropertiesOptions +func (*AppConfigurationV1) NewListPropertiesOptions(environmentID string) *ListPropertiesOptions { + return &ListPropertiesOptions{ + EnvironmentID: core.StringPtr(environmentID), + } +} + +// SetEnvironmentID : Allow user to set EnvironmentID +func (options *ListPropertiesOptions) SetEnvironmentID(environmentID string) *ListPropertiesOptions { + options.EnvironmentID = core.StringPtr(environmentID) + return options +} + +// SetExpand : Allow user to set Expand +func (options *ListPropertiesOptions) SetExpand(expand bool) *ListPropertiesOptions { + options.Expand = core.BoolPtr(expand) + return options +} + +// SetSort : Allow user to set Sort +func (options *ListPropertiesOptions) SetSort(sort string) *ListPropertiesOptions { + options.Sort = core.StringPtr(sort) + return options +} + +// SetTags : Allow user to set Tags +func (options *ListPropertiesOptions) SetTags(tags string) *ListPropertiesOptions { + options.Tags = core.StringPtr(tags) + return options +} + +// SetCollections : Allow user to set Collections +func (options *ListPropertiesOptions) SetCollections(collections []string) *ListPropertiesOptions { + options.Collections = collections + return options +} + +// SetSegments : Allow user to set Segments +func (options *ListPropertiesOptions) SetSegments(segments []string) *ListPropertiesOptions { + options.Segments = segments + return options +} + +// SetInclude : Allow user to set Include +func (options *ListPropertiesOptions) SetInclude(include []string) *ListPropertiesOptions { + options.Include = include + return options +} + +// SetLimit : Allow user to set Limit +func (options *ListPropertiesOptions) SetLimit(limit int64) *ListPropertiesOptions { + options.Limit = core.Int64Ptr(limit) + return options +} + +// SetOffset : Allow user to set Offset +func (options *ListPropertiesOptions) SetOffset(offset int64) *ListPropertiesOptions { + options.Offset = core.Int64Ptr(offset) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListPropertiesOptions) SetHeaders(param map[string]string) *ListPropertiesOptions { + options.Headers = param + return options +} + +// ListSegmentsOptions : The ListSegments options. +type ListSegmentsOptions struct { + // If set to `true`, returns expanded view of the resource details. + Expand *bool `json:"expand,omitempty"` + + // Sort the segment details based on the specified attribute. + Sort *string `json:"sort,omitempty"` + + // Filter the resources to be returned based on the associated tags. Specify the parameter as a list of comma separated + // tags. Returns resources associated with any of the specified tags. + Tags *string `json:"tags,omitempty"` + + // Segment details to include the associated rules in the response. + Include *string `json:"include,omitempty"` + + // The number of records to retrieve. By default, the list operation return the first 10 records. To retrieve different + // set of records, use `limit` with `offset` to page through the available records. + Limit *int64 `json:"limit,omitempty"` + + // The number of records to skip. By specifying `offset`, you retrieve a subset of items that starts with the `offset` + // value. Use `offset` with `limit` to page through the available records. + Offset *int64 `json:"offset,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the ListSegmentsOptions.Sort property. +// Sort the segment details based on the specified attribute. +const ( + ListSegmentsOptions_Sort_CreatedTime = "created_time" + ListSegmentsOptions_Sort_SegmentID = "segment_id" + ListSegmentsOptions_Sort_UpdatedTime = "updated_time" +) + +// Constants associated with the ListSegmentsOptions.Include property. +// Segment details to include the associated rules in the response. +const ( + ListSegmentsOptions_Include_Rules = "rules" +) + +// NewListSegmentsOptions : Instantiate ListSegmentsOptions +func (*AppConfigurationV1) NewListSegmentsOptions() *ListSegmentsOptions { + return &ListSegmentsOptions{} +} + +// SetExpand : Allow user to set Expand +func (options *ListSegmentsOptions) SetExpand(expand bool) *ListSegmentsOptions { + options.Expand = core.BoolPtr(expand) + return options +} + +// SetSort : Allow user to set Sort +func (options *ListSegmentsOptions) SetSort(sort string) *ListSegmentsOptions { + options.Sort = core.StringPtr(sort) + return options +} + +// SetTags : Allow user to set Tags +func (options *ListSegmentsOptions) SetTags(tags string) *ListSegmentsOptions { + options.Tags = core.StringPtr(tags) + return options +} + +// SetInclude : Allow user to set Include +func (options *ListSegmentsOptions) SetInclude(include string) *ListSegmentsOptions { + options.Include = core.StringPtr(include) + return options +} + +// SetLimit : Allow user to set Limit +func (options *ListSegmentsOptions) SetLimit(limit int64) *ListSegmentsOptions { + options.Limit = core.Int64Ptr(limit) + return options +} + +// SetOffset : Allow user to set Offset +func (options *ListSegmentsOptions) SetOffset(offset int64) *ListSegmentsOptions { + options.Offset = core.Int64Ptr(offset) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListSegmentsOptions) SetHeaders(param map[string]string) *ListSegmentsOptions { + options.Headers = param + return options +} + +// PageHrefResponse : Response having URL of the page. +type PageHrefResponse struct { + // URL of the response. + Href *string `json:"href" validate:"required"` +} + + +// UnmarshalPageHrefResponse unmarshals an instance of PageHrefResponse from the specified map of raw messages. +func UnmarshalPageHrefResponse(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(PageHrefResponse) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// PropertiesList : List of all properties. +type PropertiesList struct { + // Array of properties. + Properties []Property `json:"properties" validate:"required"` + + // Number of records returned. + Limit *int64 `json:"limit" validate:"required"` + + // Skipped number of records. + Offset *int64 `json:"offset" validate:"required"` + + // Total number of records. + TotalCount *int64 `json:"total_count" validate:"required"` + + // Response having URL of the page. + First *PageHrefResponse `json:"first,omitempty"` + + // Response having URL of the page. + Previous *PageHrefResponse `json:"previous,omitempty"` + + // Response having URL of the page. + Next *PageHrefResponse `json:"next,omitempty"` + + // Response having URL of the page. + Last *PageHrefResponse `json:"last,omitempty"` +} + + +// UnmarshalPropertiesList unmarshals an instance of PropertiesList from the specified map of raw messages. +func UnmarshalPropertiesList(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(PropertiesList) + err = core.UnmarshalModel(m, "properties", &obj.Properties, UnmarshalProperty) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "limit", &obj.Limit) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "offset", &obj.Offset) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "total_count", &obj.TotalCount) + if err != nil { + return + } + err = core.UnmarshalModel(m, "first", &obj.First, UnmarshalPageHrefResponse) + if err != nil { + return + } + err = core.UnmarshalModel(m, "previous", &obj.Previous, UnmarshalPageHrefResponse) + if err != nil { + return + } + err = core.UnmarshalModel(m, "next", &obj.Next, UnmarshalPageHrefResponse) + if err != nil { + return + } + err = core.UnmarshalModel(m, "last", &obj.Last, UnmarshalPageHrefResponse) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// Property : Details of the property. +type Property struct { + // Property name. + Name *string `json:"name" validate:"required"` + + // Property id. + PropertyID *string `json:"property_id" validate:"required"` + + // Property description. + Description *string `json:"description,omitempty"` + + // Type of the Property (BOOLEAN, STRING, NUMERIC). + Type *string `json:"type" validate:"required"` + + // Value of the Property. The value can be Boolean, String or a Numeric value as per the `type` attribute. + Value interface{} `json:"value" validate:"required"` + + // Tags associated with the property. + Tags *string `json:"tags,omitempty"` + + // Specify the targeting rules that is used to set different property values for different segments. + SegmentRules []SegmentRule `json:"segment_rules,omitempty"` + + // Denotes if the targeting rules are specified for the property. + SegmentExists *bool `json:"segment_exists,omitempty"` + + // List of collection id representing the collections that are associated with the specified property. + Collections []CollectionRef `json:"collections,omitempty"` + + // Creation time of the property. + CreatedTime *strfmt.DateTime `json:"created_time,omitempty"` + + // Last modified time of the property data. + UpdatedTime *strfmt.DateTime `json:"updated_time,omitempty"` + + // The last occurrence of the property value evaluation. + EvaluationTime *strfmt.DateTime `json:"evaluation_time,omitempty"` + + // Property URL. + Href *string `json:"href,omitempty"` +} + +// Constants associated with the Property.Type property. +// Type of the Property (BOOLEAN, STRING, NUMERIC). +const ( + Property_Type_Boolean = "BOOLEAN" + Property_Type_Numeric = "NUMERIC" + Property_Type_String = "STRING" +) + + +// NewProperty : Instantiate Property (Generic Model Constructor) +func (*AppConfigurationV1) NewProperty(name string, propertyID string, typeVar string, value interface{}) (model *Property, err error) { + model = &Property{ + Name: core.StringPtr(name), + PropertyID: core.StringPtr(propertyID), + Type: core.StringPtr(typeVar), + Value: value, + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalProperty unmarshals an instance of Property from the specified map of raw messages. +func UnmarshalProperty(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Property) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "property_id", &obj.PropertyID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "description", &obj.Description) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "tags", &obj.Tags) + if err != nil { + return + } + err = core.UnmarshalModel(m, "segment_rules", &obj.SegmentRules, UnmarshalSegmentRule) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "segment_exists", &obj.SegmentExists) + if err != nil { + return + } + err = core.UnmarshalModel(m, "collections", &obj.Collections, UnmarshalCollectionRef) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_time", &obj.CreatedTime) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "updated_time", &obj.UpdatedTime) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "evaluation_time", &obj.EvaluationTime) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// PropertyOutput : Property object. +type PropertyOutput struct { + // Property id. + PropertyID *string `json:"property_id" validate:"required"` + + // Property name. + Name *string `json:"name" validate:"required"` +} + + +// NewPropertyOutput : Instantiate PropertyOutput (Generic Model Constructor) +func (*AppConfigurationV1) NewPropertyOutput(propertyID string, name string) (model *PropertyOutput, err error) { + model = &PropertyOutput{ + PropertyID: core.StringPtr(propertyID), + Name: core.StringPtr(name), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalPropertyOutput unmarshals an instance of PropertyOutput from the specified map of raw messages. +func UnmarshalPropertyOutput(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(PropertyOutput) + err = core.UnmarshalPrimitive(m, "property_id", &obj.PropertyID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// Rule : Rule is used to determine if the entity belongs to the segment during feature / property evaluation. An entity is +// identified by a unique identifier and the attributes that it defines. Any feature flag and property value evaluation +// is performed in the context of an entity when it is targeted to segments. +type Rule struct { + // Attribute name. + AttributeName *string `json:"attribute_name" validate:"required"` + + // Operator to be used for the evaluation if the entity is part of the segment. + Operator *string `json:"operator" validate:"required"` + + // List of values. Entities matching any of the given values will be considered to be part of the segment. + Values []string `json:"values" validate:"required"` +} + +// Constants associated with the Rule.Operator property. +// Operator to be used for the evaluation if the entity is part of the segment. +const ( + Rule_Operator_Contains = "contains" + Rule_Operator_Endswith = "endsWith" + Rule_Operator_Greaterthan = "greaterThan" + Rule_Operator_Greaterthanequals = "greaterThanEquals" + Rule_Operator_Is = "is" + Rule_Operator_Lesserthan = "lesserThan" + Rule_Operator_Lesserthanequals = "lesserThanEquals" + Rule_Operator_Startswith = "startsWith" +) + + +// NewRule : Instantiate Rule (Generic Model Constructor) +func (*AppConfigurationV1) NewRule(attributeName string, operator string, values []string) (model *Rule, err error) { + model = &Rule{ + AttributeName: core.StringPtr(attributeName), + Operator: core.StringPtr(operator), + Values: values, + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalRule unmarshals an instance of Rule from the specified map of raw messages. +func UnmarshalRule(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Rule) + err = core.UnmarshalPrimitive(m, "attribute_name", &obj.AttributeName) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "operator", &obj.Operator) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "values", &obj.Values) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// Segment : Details of the segment. +type Segment struct { + // Segment name. + Name *string `json:"name" validate:"required"` + + // Segment id. + SegmentID *string `json:"segment_id" validate:"required"` + + // Segment description. + Description *string `json:"description,omitempty"` + + // Tags associated with the segments. + Tags *string `json:"tags,omitempty"` + + // List of rules that determine if the entity is part of the segment. + Rules []Rule `json:"rules" validate:"required"` + + // Creation time of the segment. + CreatedTime *strfmt.DateTime `json:"created_time,omitempty"` + + // Last modified time of the segment data. + UpdatedTime *strfmt.DateTime `json:"updated_time,omitempty"` + + // Segment URL. + Href *string `json:"href,omitempty"` + + // List of Features associated with the segment. + Features []FeatureOutput `json:"features,omitempty"` + + // List of properties associated with the segment. + Properties []PropertyOutput `json:"properties,omitempty"` +} + + +// NewSegment : Instantiate Segment (Generic Model Constructor) +func (*AppConfigurationV1) NewSegment(name string, segmentID string, rules []Rule) (model *Segment, err error) { + model = &Segment{ + Name: core.StringPtr(name), + SegmentID: core.StringPtr(segmentID), + Rules: rules, + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalSegment unmarshals an instance of Segment from the specified map of raw messages. +func UnmarshalSegment(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Segment) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "segment_id", &obj.SegmentID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "description", &obj.Description) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "tags", &obj.Tags) + if err != nil { + return + } + err = core.UnmarshalModel(m, "rules", &obj.Rules, UnmarshalRule) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_time", &obj.CreatedTime) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "updated_time", &obj.UpdatedTime) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalModel(m, "features", &obj.Features, UnmarshalFeatureOutput) + if err != nil { + return + } + err = core.UnmarshalModel(m, "properties", &obj.Properties, UnmarshalPropertyOutput) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SegmentRule : SegmentRule struct +type SegmentRule struct { + // The list of targetted segments. + Rules []TargetSegments `json:"rules" validate:"required"` + + // Value to be used for evaluation for this rule. The value can be Boolean, String or a Numeric value as per the `type` + // attribute. + Value interface{} `json:"value" validate:"required"` + + // Order of the rule, used during evaluation. The evaluation is performed in the order defined and the value associated + // with the first matching rule is used for evaluation. + Order *int64 `json:"order" validate:"required"` +} + + +// NewSegmentRule : Instantiate SegmentRule (Generic Model Constructor) +func (*AppConfigurationV1) NewSegmentRule(rules []TargetSegments, value interface{}, order int64) (model *SegmentRule, err error) { + model = &SegmentRule{ + Rules: rules, + Value: value, + Order: core.Int64Ptr(order), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalSegmentRule unmarshals an instance of SegmentRule from the specified map of raw messages. +func UnmarshalSegmentRule(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SegmentRule) + err = core.UnmarshalModel(m, "rules", &obj.Rules, UnmarshalTargetSegments) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "order", &obj.Order) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SegmentsList : List of all segments. +type SegmentsList struct { + // Array of Segments. + Segments []Segment `json:"segments" validate:"required"` + + // Number of records returned. + Limit *int64 `json:"limit" validate:"required"` + + // Skipped number of records. + Offset *int64 `json:"offset" validate:"required"` + + // Total number of records. + TotalCount *int64 `json:"total_count" validate:"required"` + + // Response having URL of the page. + First *PageHrefResponse `json:"first,omitempty"` + + // Response having URL of the page. + Previous *PageHrefResponse `json:"previous,omitempty"` + + // Response having URL of the page. + Next *PageHrefResponse `json:"next,omitempty"` + + // Response having URL of the page. + Last *PageHrefResponse `json:"last,omitempty"` +} + + +// UnmarshalSegmentsList unmarshals an instance of SegmentsList from the specified map of raw messages. +func UnmarshalSegmentsList(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SegmentsList) + err = core.UnmarshalModel(m, "segments", &obj.Segments, UnmarshalSegment) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "limit", &obj.Limit) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "offset", &obj.Offset) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "total_count", &obj.TotalCount) + if err != nil { + return + } + err = core.UnmarshalModel(m, "first", &obj.First, UnmarshalPageHrefResponse) + if err != nil { + return + } + err = core.UnmarshalModel(m, "previous", &obj.Previous, UnmarshalPageHrefResponse) + if err != nil { + return + } + err = core.UnmarshalModel(m, "next", &obj.Next, UnmarshalPageHrefResponse) + if err != nil { + return + } + err = core.UnmarshalModel(m, "last", &obj.Last, UnmarshalPageHrefResponse) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// TargetSegments : TargetSegments struct +type TargetSegments struct { + // List of segment ids that are used for targeting using the rule. + Segments []string `json:"segments" validate:"required"` +} + + +// NewTargetSegments : Instantiate TargetSegments (Generic Model Constructor) +func (*AppConfigurationV1) NewTargetSegments(segments []string) (model *TargetSegments, err error) { + model = &TargetSegments{ + Segments: segments, + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalTargetSegments unmarshals an instance of TargetSegments from the specified map of raw messages. +func UnmarshalTargetSegments(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(TargetSegments) + err = core.UnmarshalPrimitive(m, "segments", &obj.Segments) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ToggleFeatureOptions : The ToggleFeature options. +type ToggleFeatureOptions struct { + // Environment Id. + EnvironmentID *string `json:"environment_id" validate:"required,ne="` + + // Feature Id. + FeatureID *string `json:"feature_id" validate:"required,ne="` + + // The state of the feature flag. + Enabled *bool `json:"enabled,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewToggleFeatureOptions : Instantiate ToggleFeatureOptions +func (*AppConfigurationV1) NewToggleFeatureOptions(environmentID string, featureID string) *ToggleFeatureOptions { + return &ToggleFeatureOptions{ + EnvironmentID: core.StringPtr(environmentID), + FeatureID: core.StringPtr(featureID), + } +} + +// SetEnvironmentID : Allow user to set EnvironmentID +func (options *ToggleFeatureOptions) SetEnvironmentID(environmentID string) *ToggleFeatureOptions { + options.EnvironmentID = core.StringPtr(environmentID) + return options +} + +// SetFeatureID : Allow user to set FeatureID +func (options *ToggleFeatureOptions) SetFeatureID(featureID string) *ToggleFeatureOptions { + options.FeatureID = core.StringPtr(featureID) + return options +} + +// SetEnabled : Allow user to set Enabled +func (options *ToggleFeatureOptions) SetEnabled(enabled bool) *ToggleFeatureOptions { + options.Enabled = core.BoolPtr(enabled) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ToggleFeatureOptions) SetHeaders(param map[string]string) *ToggleFeatureOptions { + options.Headers = param + return options +} + +// UpdateCollectionOptions : The UpdateCollection options. +type UpdateCollectionOptions struct { + // Collection Id of the collection. + CollectionID *string `json:"collection_id" validate:"required,ne="` + + // Collection name. + Name *string `json:"name,omitempty"` + + // Description of the collection. + Description *string `json:"description,omitempty"` + + // Tags associated with the collection. + Tags *string `json:"tags,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdateCollectionOptions : Instantiate UpdateCollectionOptions +func (*AppConfigurationV1) NewUpdateCollectionOptions(collectionID string) *UpdateCollectionOptions { + return &UpdateCollectionOptions{ + CollectionID: core.StringPtr(collectionID), + } +} + +// SetCollectionID : Allow user to set CollectionID +func (options *UpdateCollectionOptions) SetCollectionID(collectionID string) *UpdateCollectionOptions { + options.CollectionID = core.StringPtr(collectionID) + return options +} + +// SetName : Allow user to set Name +func (options *UpdateCollectionOptions) SetName(name string) *UpdateCollectionOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetDescription : Allow user to set Description +func (options *UpdateCollectionOptions) SetDescription(description string) *UpdateCollectionOptions { + options.Description = core.StringPtr(description) + return options +} + +// SetTags : Allow user to set Tags +func (options *UpdateCollectionOptions) SetTags(tags string) *UpdateCollectionOptions { + options.Tags = core.StringPtr(tags) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateCollectionOptions) SetHeaders(param map[string]string) *UpdateCollectionOptions { + options.Headers = param + return options +} + +// UpdateEnvironmentOptions : The UpdateEnvironment options. +type UpdateEnvironmentOptions struct { + // Environment Id. + EnvironmentID *string `json:"environment_id" validate:"required,ne="` + + // Environment name. + Name *string `json:"name,omitempty"` + + // Environment description. + Description *string `json:"description,omitempty"` + + // Tags associated with the environment. + Tags *string `json:"tags,omitempty"` + + // Color code to distinguish the environment. The Hex code for the color. For example `#FF0000` for `red`. + ColorCode *string `json:"color_code,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdateEnvironmentOptions : Instantiate UpdateEnvironmentOptions +func (*AppConfigurationV1) NewUpdateEnvironmentOptions(environmentID string) *UpdateEnvironmentOptions { + return &UpdateEnvironmentOptions{ + EnvironmentID: core.StringPtr(environmentID), + } +} + +// SetEnvironmentID : Allow user to set EnvironmentID +func (options *UpdateEnvironmentOptions) SetEnvironmentID(environmentID string) *UpdateEnvironmentOptions { + options.EnvironmentID = core.StringPtr(environmentID) + return options +} + +// SetName : Allow user to set Name +func (options *UpdateEnvironmentOptions) SetName(name string) *UpdateEnvironmentOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetDescription : Allow user to set Description +func (options *UpdateEnvironmentOptions) SetDescription(description string) *UpdateEnvironmentOptions { + options.Description = core.StringPtr(description) + return options +} + +// SetTags : Allow user to set Tags +func (options *UpdateEnvironmentOptions) SetTags(tags string) *UpdateEnvironmentOptions { + options.Tags = core.StringPtr(tags) + return options +} + +// SetColorCode : Allow user to set ColorCode +func (options *UpdateEnvironmentOptions) SetColorCode(colorCode string) *UpdateEnvironmentOptions { + options.ColorCode = core.StringPtr(colorCode) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateEnvironmentOptions) SetHeaders(param map[string]string) *UpdateEnvironmentOptions { + options.Headers = param + return options +} + +// UpdateFeatureOptions : The UpdateFeature options. +type UpdateFeatureOptions struct { + // Environment Id. + EnvironmentID *string `json:"environment_id" validate:"required,ne="` + + // Feature Id. + FeatureID *string `json:"feature_id" validate:"required,ne="` + + // Feature name. + Name *string `json:"name,omitempty"` + + // Feature description. + Description *string `json:"description,omitempty"` + + // Value of the feature when it is enabled. The value can be Boolean, String or a Numeric value as per the `type` + // attribute. + EnabledValue interface{} `json:"enabled_value,omitempty"` + + // Value of the feature when it is disabled. The value can be Boolean, String or a Numeric value as per the `type` + // attribute. + DisabledValue interface{} `json:"disabled_value,omitempty"` + + // The state of the feature flag. + Enabled *bool `json:"enabled,omitempty"` + + // Tags associated with the feature. + Tags *string `json:"tags,omitempty"` + + // Specify the targeting rules that is used to set different property values for different segments. + SegmentRules []SegmentRule `json:"segment_rules,omitempty"` + + // List of collection id representing the collections that are associated with the specified property. + Collections []CollectionRef `json:"collections,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdateFeatureOptions : Instantiate UpdateFeatureOptions +func (*AppConfigurationV1) NewUpdateFeatureOptions(environmentID string, featureID string) *UpdateFeatureOptions { + return &UpdateFeatureOptions{ + EnvironmentID: core.StringPtr(environmentID), + FeatureID: core.StringPtr(featureID), + } +} + +// SetEnvironmentID : Allow user to set EnvironmentID +func (options *UpdateFeatureOptions) SetEnvironmentID(environmentID string) *UpdateFeatureOptions { + options.EnvironmentID = core.StringPtr(environmentID) + return options +} + +// SetFeatureID : Allow user to set FeatureID +func (options *UpdateFeatureOptions) SetFeatureID(featureID string) *UpdateFeatureOptions { + options.FeatureID = core.StringPtr(featureID) + return options +} + +// SetName : Allow user to set Name +func (options *UpdateFeatureOptions) SetName(name string) *UpdateFeatureOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetDescription : Allow user to set Description +func (options *UpdateFeatureOptions) SetDescription(description string) *UpdateFeatureOptions { + options.Description = core.StringPtr(description) + return options +} + +// SetEnabledValue : Allow user to set EnabledValue +func (options *UpdateFeatureOptions) SetEnabledValue(enabledValue interface{}) *UpdateFeatureOptions { + options.EnabledValue = enabledValue + return options +} + +// SetDisabledValue : Allow user to set DisabledValue +func (options *UpdateFeatureOptions) SetDisabledValue(disabledValue interface{}) *UpdateFeatureOptions { + options.DisabledValue = disabledValue + return options +} + +// SetEnabled : Allow user to set Enabled +func (options *UpdateFeatureOptions) SetEnabled(enabled bool) *UpdateFeatureOptions { + options.Enabled = core.BoolPtr(enabled) + return options +} + +// SetTags : Allow user to set Tags +func (options *UpdateFeatureOptions) SetTags(tags string) *UpdateFeatureOptions { + options.Tags = core.StringPtr(tags) + return options +} + +// SetSegmentRules : Allow user to set SegmentRules +func (options *UpdateFeatureOptions) SetSegmentRules(segmentRules []SegmentRule) *UpdateFeatureOptions { + options.SegmentRules = segmentRules + return options +} + +// SetCollections : Allow user to set Collections +func (options *UpdateFeatureOptions) SetCollections(collections []CollectionRef) *UpdateFeatureOptions { + options.Collections = collections + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateFeatureOptions) SetHeaders(param map[string]string) *UpdateFeatureOptions { + options.Headers = param + return options +} + +// UpdateFeatureValuesOptions : The UpdateFeatureValues options. +type UpdateFeatureValuesOptions struct { + // Environment Id. + EnvironmentID *string `json:"environment_id" validate:"required,ne="` + + // Feature Id. + FeatureID *string `json:"feature_id" validate:"required,ne="` + + // Feature name. + Name *string `json:"name,omitempty"` + + // Feature description. + Description *string `json:"description,omitempty"` + + // Tags associated with the feature. + Tags *string `json:"tags,omitempty"` + + // Value of the feature when it is enabled. The value can be Boolean, String or a Numeric value as per the `type` + // attribute. + EnabledValue interface{} `json:"enabled_value,omitempty"` + + // Value of the feature when it is disabled. The value can be Boolean, String or a Numeric value as per the `type` + // attribute. + DisabledValue interface{} `json:"disabled_value,omitempty"` + + // Specify the targeting rules that is used to set different property values for different segments. + SegmentRules []SegmentRule `json:"segment_rules,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdateFeatureValuesOptions : Instantiate UpdateFeatureValuesOptions +func (*AppConfigurationV1) NewUpdateFeatureValuesOptions(environmentID string, featureID string) *UpdateFeatureValuesOptions { + return &UpdateFeatureValuesOptions{ + EnvironmentID: core.StringPtr(environmentID), + FeatureID: core.StringPtr(featureID), + } +} + +// SetEnvironmentID : Allow user to set EnvironmentID +func (options *UpdateFeatureValuesOptions) SetEnvironmentID(environmentID string) *UpdateFeatureValuesOptions { + options.EnvironmentID = core.StringPtr(environmentID) + return options +} + +// SetFeatureID : Allow user to set FeatureID +func (options *UpdateFeatureValuesOptions) SetFeatureID(featureID string) *UpdateFeatureValuesOptions { + options.FeatureID = core.StringPtr(featureID) + return options +} + +// SetName : Allow user to set Name +func (options *UpdateFeatureValuesOptions) SetName(name string) *UpdateFeatureValuesOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetDescription : Allow user to set Description +func (options *UpdateFeatureValuesOptions) SetDescription(description string) *UpdateFeatureValuesOptions { + options.Description = core.StringPtr(description) + return options +} + +// SetTags : Allow user to set Tags +func (options *UpdateFeatureValuesOptions) SetTags(tags string) *UpdateFeatureValuesOptions { + options.Tags = core.StringPtr(tags) + return options +} + +// SetEnabledValue : Allow user to set EnabledValue +func (options *UpdateFeatureValuesOptions) SetEnabledValue(enabledValue interface{}) *UpdateFeatureValuesOptions { + options.EnabledValue = enabledValue + return options +} + +// SetDisabledValue : Allow user to set DisabledValue +func (options *UpdateFeatureValuesOptions) SetDisabledValue(disabledValue interface{}) *UpdateFeatureValuesOptions { + options.DisabledValue = disabledValue + return options +} + +// SetSegmentRules : Allow user to set SegmentRules +func (options *UpdateFeatureValuesOptions) SetSegmentRules(segmentRules []SegmentRule) *UpdateFeatureValuesOptions { + options.SegmentRules = segmentRules + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateFeatureValuesOptions) SetHeaders(param map[string]string) *UpdateFeatureValuesOptions { + options.Headers = param + return options +} + +// UpdatePropertyOptions : The UpdateProperty options. +type UpdatePropertyOptions struct { + // Environment Id. + EnvironmentID *string `json:"environment_id" validate:"required,ne="` + + // Property Id. + PropertyID *string `json:"property_id" validate:"required,ne="` + + // Property name. + Name *string `json:"name,omitempty"` + + // Property description. + Description *string `json:"description,omitempty"` + + // Value of the property. The value can be Boolean, String or a Numeric value as per the `type` attribute. + Value interface{} `json:"value,omitempty"` + + // Tags associated with the property. + Tags *string `json:"tags,omitempty"` + + // Specify the targeting rules that is used to set different property values for different segments. + SegmentRules []SegmentRule `json:"segment_rules,omitempty"` + + // List of collection id representing the collections that are associated with the specified property. + Collections []CollectionRef `json:"collections,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdatePropertyOptions : Instantiate UpdatePropertyOptions +func (*AppConfigurationV1) NewUpdatePropertyOptions(environmentID string, propertyID string) *UpdatePropertyOptions { + return &UpdatePropertyOptions{ + EnvironmentID: core.StringPtr(environmentID), + PropertyID: core.StringPtr(propertyID), + } +} + +// SetEnvironmentID : Allow user to set EnvironmentID +func (options *UpdatePropertyOptions) SetEnvironmentID(environmentID string) *UpdatePropertyOptions { + options.EnvironmentID = core.StringPtr(environmentID) + return options +} + +// SetPropertyID : Allow user to set PropertyID +func (options *UpdatePropertyOptions) SetPropertyID(propertyID string) *UpdatePropertyOptions { + options.PropertyID = core.StringPtr(propertyID) + return options +} + +// SetName : Allow user to set Name +func (options *UpdatePropertyOptions) SetName(name string) *UpdatePropertyOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetDescription : Allow user to set Description +func (options *UpdatePropertyOptions) SetDescription(description string) *UpdatePropertyOptions { + options.Description = core.StringPtr(description) + return options +} + +// SetValue : Allow user to set Value +func (options *UpdatePropertyOptions) SetValue(value interface{}) *UpdatePropertyOptions { + options.Value = value + return options +} + +// SetTags : Allow user to set Tags +func (options *UpdatePropertyOptions) SetTags(tags string) *UpdatePropertyOptions { + options.Tags = core.StringPtr(tags) + return options +} + +// SetSegmentRules : Allow user to set SegmentRules +func (options *UpdatePropertyOptions) SetSegmentRules(segmentRules []SegmentRule) *UpdatePropertyOptions { + options.SegmentRules = segmentRules + return options +} + +// SetCollections : Allow user to set Collections +func (options *UpdatePropertyOptions) SetCollections(collections []CollectionRef) *UpdatePropertyOptions { + options.Collections = collections + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdatePropertyOptions) SetHeaders(param map[string]string) *UpdatePropertyOptions { + options.Headers = param + return options +} + +// UpdatePropertyValuesOptions : The UpdatePropertyValues options. +type UpdatePropertyValuesOptions struct { + // Environment Id. + EnvironmentID *string `json:"environment_id" validate:"required,ne="` + + // Property Id. + PropertyID *string `json:"property_id" validate:"required,ne="` + + // Property name. + Name *string `json:"name,omitempty"` + + // Property description. + Description *string `json:"description,omitempty"` + + // Tags associated with the property. + Tags *string `json:"tags,omitempty"` + + // Value of the property. The value can be Boolean, String or a Numeric value as per the `type` attribute. + Value interface{} `json:"value,omitempty"` + + // Specify the targeting rules that is used to set different property values for different segments. + SegmentRules []SegmentRule `json:"segment_rules,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdatePropertyValuesOptions : Instantiate UpdatePropertyValuesOptions +func (*AppConfigurationV1) NewUpdatePropertyValuesOptions(environmentID string, propertyID string) *UpdatePropertyValuesOptions { + return &UpdatePropertyValuesOptions{ + EnvironmentID: core.StringPtr(environmentID), + PropertyID: core.StringPtr(propertyID), + } +} + +// SetEnvironmentID : Allow user to set EnvironmentID +func (options *UpdatePropertyValuesOptions) SetEnvironmentID(environmentID string) *UpdatePropertyValuesOptions { + options.EnvironmentID = core.StringPtr(environmentID) + return options +} + +// SetPropertyID : Allow user to set PropertyID +func (options *UpdatePropertyValuesOptions) SetPropertyID(propertyID string) *UpdatePropertyValuesOptions { + options.PropertyID = core.StringPtr(propertyID) + return options +} + +// SetName : Allow user to set Name +func (options *UpdatePropertyValuesOptions) SetName(name string) *UpdatePropertyValuesOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetDescription : Allow user to set Description +func (options *UpdatePropertyValuesOptions) SetDescription(description string) *UpdatePropertyValuesOptions { + options.Description = core.StringPtr(description) + return options +} + +// SetTags : Allow user to set Tags +func (options *UpdatePropertyValuesOptions) SetTags(tags string) *UpdatePropertyValuesOptions { + options.Tags = core.StringPtr(tags) + return options +} + +// SetValue : Allow user to set Value +func (options *UpdatePropertyValuesOptions) SetValue(value interface{}) *UpdatePropertyValuesOptions { + options.Value = value + return options +} + +// SetSegmentRules : Allow user to set SegmentRules +func (options *UpdatePropertyValuesOptions) SetSegmentRules(segmentRules []SegmentRule) *UpdatePropertyValuesOptions { + options.SegmentRules = segmentRules + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdatePropertyValuesOptions) SetHeaders(param map[string]string) *UpdatePropertyValuesOptions { + options.Headers = param + return options +} + +// UpdateSegmentOptions : The UpdateSegment options. +type UpdateSegmentOptions struct { + // Segment Id. + SegmentID *string `json:"segment_id" validate:"required,ne="` + + // Segment name. + Name *string `json:"name,omitempty"` + + // Segment description. + Description *string `json:"description,omitempty"` + + // Tags associated with segments. + Tags *string `json:"tags,omitempty"` + + // List of rules that determine if the entity is part of the segment. + Rules []Rule `json:"rules,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdateSegmentOptions : Instantiate UpdateSegmentOptions +func (*AppConfigurationV1) NewUpdateSegmentOptions(segmentID string) *UpdateSegmentOptions { + return &UpdateSegmentOptions{ + SegmentID: core.StringPtr(segmentID), + } +} + +// SetSegmentID : Allow user to set SegmentID +func (options *UpdateSegmentOptions) SetSegmentID(segmentID string) *UpdateSegmentOptions { + options.SegmentID = core.StringPtr(segmentID) + return options +} + +// SetName : Allow user to set Name +func (options *UpdateSegmentOptions) SetName(name string) *UpdateSegmentOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetDescription : Allow user to set Description +func (options *UpdateSegmentOptions) SetDescription(description string) *UpdateSegmentOptions { + options.Description = core.StringPtr(description) + return options +} + +// SetTags : Allow user to set Tags +func (options *UpdateSegmentOptions) SetTags(tags string) *UpdateSegmentOptions { + options.Tags = core.StringPtr(tags) + return options +} + +// SetRules : Allow user to set Rules +func (options *UpdateSegmentOptions) SetRules(rules []Rule) *UpdateSegmentOptions { + options.Rules = rules + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateSegmentOptions) SetHeaders(param map[string]string) *UpdateSegmentOptions { + options.Headers = param + return options +} diff --git a/vendor/github.com/IBM/appconfiguration-go-admin-sdk/common/headers.go b/vendor/github.com/IBM/appconfiguration-go-admin-sdk/common/headers.go new file mode 100644 index 00000000000..659eaf97d98 --- /dev/null +++ b/vendor/github.com/IBM/appconfiguration-go-admin-sdk/common/headers.go @@ -0,0 +1,87 @@ +/** + * (C) Copyright IBM Corp. 2021. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package common + +import ( + "fmt" + "runtime" +) + +const ( + sdkName = "appconfiguration-admin-sdk-go" + headerNameUserAgent = "User-Agent" +) + +// +// GetSdkHeaders - returns the set of SDK-specific headers to be included in an outgoing request. +// +// This function is invoked by generated service methods (i.e. methods which implement the REST API operations +// defined within the API definition). The purpose of this function is to give the SDK implementor the opportunity +// to provide SDK-specific HTTP headers that will be sent with an outgoing REST API request. +// This function is invoked for each invocation of a generated service method, +// so the set of HTTP headers could be request-specific. +// As an optimization, if your SDK will be returning the same set of HTTP headers for each invocation of this +// function, it is recommended that you initialize the returned map just once (perhaps by using +// lazy initialization) and simply return it each time the function is invoked, instead of building it each time +// as in the example below. +// +// If you plan to gather metrics for your SDK, the User-Agent header value must +// be a string similar to the following: +// appconfiguration-admin-sdk-go/1.0.0 (lang=go; arch=x86_64; os=Linux; go.version=1.12.9) +// +// In the example above, the analytics tool will parse the user-agent header and +// use the following properties: +// "appconfiguration-admin-sdk-go" - the name of your sdk +// "1.0.0"- the version of your sdk +// "lang=go" - the language of the current sdk +// "arch=x86_64; os=Linux; go.version=1.12.9" - system information +// +// Note: It is very important that the sdk name ends with the string `-sdk`, +// as the analytics data collector uses this to gather usage data. +// +// Parameters: +// serviceName - the name of the service as defined in the API definition (e.g. "MyService1") +// serviceVersion - the version of the service as defined in the API definition (e.g. "V1") +// operationId - the operationId as defined in the API definition (e.g. getContext) +// +// Returns: +// a Map which contains the set of headers to be included in the REST API request +// + +func GetHeaderNameUserAgent() string { + return headerNameUserAgent +} + +func GetSdkHeaders(serviceName string, serviceVersion string, operationId string) map[string]string { + sdkHeaders := make(map[string]string) + + sdkHeaders[headerNameUserAgent] = GetUserAgentInfo() + + return sdkHeaders +} + +var userAgent string = fmt.Sprintf("%s/%s %s", sdkName, Version, GetSystemInfo()) + +func GetUserAgentInfo() string { + return userAgent +} + +var systemInfo = fmt.Sprintf("(lang=go; arch=%s; os=%s; go.version=%s)", runtime.GOARCH, runtime.GOOS, runtime.Version()) + +func GetSystemInfo() string { + return systemInfo +} diff --git a/vendor/github.com/IBM/appconfiguration-go-admin-sdk/common/version.go b/vendor/github.com/IBM/appconfiguration-go-admin-sdk/common/version.go new file mode 100644 index 00000000000..5782dae08f3 --- /dev/null +++ b/vendor/github.com/IBM/appconfiguration-go-admin-sdk/common/version.go @@ -0,0 +1,20 @@ +/** + * (C) Copyright IBM Corp. 2021. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package common + +// Version of the SDK +const Version = "0.1.0" diff --git a/vendor/github.com/IBM/container-registry-go-sdk/LICENSE b/vendor/github.com/IBM/container-registry-go-sdk/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/vendor/github.com/IBM/container-registry-go-sdk/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/IBM/container-registry-go-sdk/common/headers.go b/vendor/github.com/IBM/container-registry-go-sdk/common/headers.go new file mode 100644 index 00000000000..4e5017a32b6 --- /dev/null +++ b/vendor/github.com/IBM/container-registry-go-sdk/common/headers.go @@ -0,0 +1,82 @@ +/** + * (C) Copyright IBM Corp. 2019, 2021. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package common + +import ( + "fmt" + "runtime" +) + +const ( + sdkName = "container-registry-go-sdk" + headerNameUserAgent = "User-Agent" +) + +// +// GetSdkHeaders - returns the set of SDK-specific headers to be included in an outgoing request. +// +// This function is invoked by generated service methods (i.e. methods which implement the REST API operations +// defined within the API definition). The purpose of this function is to give the SDK implementor the opportunity +// to provide SDK-specific HTTP headers that will be sent with an outgoing REST API request. +// This function is invoked for each invocation of a generated service method, +// so the set of HTTP headers could be request-specific. +// As an optimization, if your SDK will be returning the same set of HTTP headers for each invocation of this +// function, it is recommended that you initialize the returned map just once (perhaps by using +// lazy initialization) and simply return it each time the function is invoked, instead of building it each time +// as in the example below. +// +// If you plan to gather metrics for your SDK, the User-Agent header value must +// be a string similar to the following: +// container-registry-go-sdk/0.0.1 (lang=go; arch=x86_64; os=Linux; go.version=1.12.9) +// +// In the example above, the analytics tool will parse the user-agent header and +// use the following properties: +// "container-registry-go-sdk" - the name of your sdk +// "0.0.1"- the version of your sdk +// "lang=go" - the language of the current sdk +// "arch=x86_64; os=Linux; go.version=1.12.9" - system information +// +// Note: It is very important that the sdk name ends with the string `-sdk`, +// as the analytics data collector uses this to gather usage data. +// +// Parameters: +// serviceName - the name of the service as defined in the API definition (e.g. "MyService1") +// serviceVersion - the version of the service as defined in the API definition (e.g. "V1") +// operationId - the operationId as defined in the API definition (e.g. getContext) +// +// Returns: +// a Map which contains the set of headers to be included in the REST API request +// +func GetSdkHeaders(serviceName string, serviceVersion string, operationId string) map[string]string { + sdkHeaders := make(map[string]string) + + sdkHeaders[headerNameUserAgent] = GetUserAgentInfo() + + return sdkHeaders +} + +var userAgent string = fmt.Sprintf("%s/%s %s", sdkName, Version, GetSystemInfo()) + +func GetUserAgentInfo() string { + return userAgent +} + +var systemInfo = fmt.Sprintf("(lang=go; arch=%s; os=%s; go.version=%s)", runtime.GOARCH, runtime.GOOS, runtime.Version()) + +func GetSystemInfo() string { + return systemInfo +} diff --git a/vendor/github.com/IBM/container-registry-go-sdk/common/version.go b/vendor/github.com/IBM/container-registry-go-sdk/common/version.go new file mode 100644 index 00000000000..a3cc933d4a7 --- /dev/null +++ b/vendor/github.com/IBM/container-registry-go-sdk/common/version.go @@ -0,0 +1,20 @@ +/** + * (C) Copyright IBM Corp. 2019, 2021. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package common + +// Version of the SDK +const Version = "0.0.13" diff --git a/vendor/github.com/IBM/container-registry-go-sdk/containerregistryv1/container_registry_v1.go b/vendor/github.com/IBM/container-registry-go-sdk/containerregistryv1/container_registry_v1.go new file mode 100644 index 00000000000..2e2d5513a71 --- /dev/null +++ b/vendor/github.com/IBM/container-registry-go-sdk/containerregistryv1/container_registry_v1.go @@ -0,0 +1,3609 @@ +/** + * (C) Copyright IBM Corp. 2020, 2021. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * IBM OpenAPI SDK Code Generator Version: 3.29.1-b338fb38-20210313-010605 + */ + +// Package containerregistryv1 : Operations and models for the ContainerRegistryV1 service +package containerregistryv1 + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "reflect" + "time" + + common "github.com/IBM/container-registry-go-sdk/common" + "github.com/IBM/go-sdk-core/v5/core" +) + +// ContainerRegistryV1 : Management interface for IBM Cloud Container Registry +// +// Version: 1.1 +type ContainerRegistryV1 struct { + Service *core.BaseService + + // The unique ID for your IBM Cloud account. + Account *string +} + +// DefaultServiceURL is the default URL to make service requests to. +const DefaultServiceURL = "https://us.icr.io" + +// DefaultServiceName is the default key used to find external configuration information. +const DefaultServiceName = "container_registry" + +// ContainerRegistryV1Options : Service options +type ContainerRegistryV1Options struct { + ServiceName string + URL string + Authenticator core.Authenticator + + // The unique ID for your IBM Cloud account. + Account *string `validate:"required"` +} + +// NewContainerRegistryV1UsingExternalConfig : constructs an instance of ContainerRegistryV1 with passed in options and external configuration. +func NewContainerRegistryV1UsingExternalConfig(options *ContainerRegistryV1Options) (containerRegistry *ContainerRegistryV1, err error) { + if options.ServiceName == "" { + options.ServiceName = DefaultServiceName + } + + if options.Authenticator == nil { + options.Authenticator, err = core.GetAuthenticatorFromEnvironment(options.ServiceName) + if err != nil { + return + } + } + + containerRegistry, err = NewContainerRegistryV1(options) + if err != nil { + return + } + + err = containerRegistry.Service.ConfigureService(options.ServiceName) + if err != nil { + return + } + + if options.URL != "" { + err = containerRegistry.Service.SetServiceURL(options.URL) + } + return +} + +// NewContainerRegistryV1 : constructs an instance of ContainerRegistryV1 with passed in options. +func NewContainerRegistryV1(options *ContainerRegistryV1Options) (service *ContainerRegistryV1, err error) { + serviceOptions := &core.ServiceOptions{ + URL: DefaultServiceURL, + Authenticator: options.Authenticator, + } + + err = core.ValidateStruct(options, "options") + if err != nil { + return + } + + baseService, err := core.NewBaseService(serviceOptions) + if err != nil { + return + } + + if options.URL != "" { + err = baseService.SetServiceURL(options.URL) + if err != nil { + return + } + } + + service = &ContainerRegistryV1{ + Service: baseService, + Account: options.Account, + } + + return +} + +// GetServiceURLForRegion returns the service URL to be used for the specified region +func GetServiceURLForRegion(region string) (string, error) { + var endpoints = map[string]string{ + "us-south": "https://us.icr.io", // us-south + "uk-south": "https://uk.icr.io", // uk-south + "eu-gb": "https://uk.icr.io", // eu-gb + "eu-central": "https://de.icr.io", // eu-central + "eu-de": "https://de.icr.io", // eu-de + "ap-north": "https://jp.icr.io", // ap-north + "jp-tok": "https://jp.icr.io", // jp-tok + "ap-south": "https://au.icr.io", // ap-south + "au-syd": "https://au.icr.io", // au-syd + "global": "https://icr.io", // global + "jp-osa": "https://jp2.icr.io", // jp-osa + "ca-tor": "https://ca.icr.io", // ca-tor + } + + if url, ok := endpoints[region]; ok { + return url, nil + } + return "", fmt.Errorf("service URL for region '%s' not found", region) +} + +// Clone makes a copy of "containerRegistry" suitable for processing requests. +func (containerRegistry *ContainerRegistryV1) Clone() *ContainerRegistryV1 { + if core.IsNil(containerRegistry) { + return nil + } + clone := *containerRegistry + clone.Service = containerRegistry.Service.Clone() + return &clone +} + +// SetServiceURL sets the service URL +func (containerRegistry *ContainerRegistryV1) SetServiceURL(url string) error { + return containerRegistry.Service.SetServiceURL(url) +} + +// GetServiceURL returns the service URL +func (containerRegistry *ContainerRegistryV1) GetServiceURL() string { + return containerRegistry.Service.GetServiceURL() +} + +// SetDefaultHeaders sets HTTP headers to be sent in every request +func (containerRegistry *ContainerRegistryV1) SetDefaultHeaders(headers http.Header) { + containerRegistry.Service.SetDefaultHeaders(headers) +} + +// SetEnableGzipCompression sets the service's EnableGzipCompression field +func (containerRegistry *ContainerRegistryV1) SetEnableGzipCompression(enableGzip bool) { + containerRegistry.Service.SetEnableGzipCompression(enableGzip) +} + +// GetEnableGzipCompression returns the service's EnableGzipCompression field +func (containerRegistry *ContainerRegistryV1) GetEnableGzipCompression() bool { + return containerRegistry.Service.GetEnableGzipCompression() +} + +// EnableRetries enables automatic retries for requests invoked for this service instance. +// If either parameter is specified as 0, then a default value is used instead. +func (containerRegistry *ContainerRegistryV1) EnableRetries(maxRetries int, maxRetryInterval time.Duration) { + containerRegistry.Service.EnableRetries(maxRetries, maxRetryInterval) +} + +// DisableRetries disables automatic retries for requests invoked for this service instance. +func (containerRegistry *ContainerRegistryV1) DisableRetries() { + containerRegistry.Service.DisableRetries() +} + +// GetAuth : Get authorization options +// Get authorization options for the targeted account. +func (containerRegistry *ContainerRegistryV1) GetAuth(getAuthOptions *GetAuthOptions) (result *AuthOptions, response *core.DetailedResponse, err error) { + return containerRegistry.GetAuthWithContext(context.Background(), getAuthOptions) +} + +// GetAuthWithContext is an alternate form of the GetAuth method which supports a Context parameter +func (containerRegistry *ContainerRegistryV1) GetAuthWithContext(ctx context.Context, getAuthOptions *GetAuthOptions) (result *AuthOptions, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(getAuthOptions, "getAuthOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = containerRegistry.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(containerRegistry.Service.Options.URL, `/api/v1/auth`, nil) + if err != nil { + return + } + + for headerName, headerValue := range getAuthOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("container_registry", "V1", "GetAuth") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + if containerRegistry.Account != nil { + builder.AddHeader("Account", fmt.Sprint(*containerRegistry.Account)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = containerRegistry.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalAuthOptions) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateAuth : Update authorization options +// Update authorization options for the targeted account. +func (containerRegistry *ContainerRegistryV1) UpdateAuth(updateAuthOptions *UpdateAuthOptions) (response *core.DetailedResponse, err error) { + return containerRegistry.UpdateAuthWithContext(context.Background(), updateAuthOptions) +} + +// UpdateAuthWithContext is an alternate form of the UpdateAuth method which supports a Context parameter +func (containerRegistry *ContainerRegistryV1) UpdateAuthWithContext(ctx context.Context, updateAuthOptions *UpdateAuthOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateAuthOptions, "updateAuthOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateAuthOptions, "updateAuthOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = containerRegistry.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(containerRegistry.Service.Options.URL, `/api/v1/auth`, nil) + if err != nil { + return + } + + for headerName, headerValue := range updateAuthOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("container_registry", "V1", "UpdateAuth") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Content-Type", "application/json") + if containerRegistry.Account != nil { + builder.AddHeader("Account", fmt.Sprint(*containerRegistry.Account)) + } + + body := make(map[string]interface{}) + if updateAuthOptions.IamAuthz != nil { + body["iam_authz"] = updateAuthOptions.IamAuthz + } + if updateAuthOptions.PrivateOnly != nil { + body["private_only"] = updateAuthOptions.PrivateOnly + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = containerRegistry.Service.Request(request, nil) + + return +} + +// ListImages : List images +// List all images in namespaces in a targeted IBM Cloud account. +func (containerRegistry *ContainerRegistryV1) ListImages(listImagesOptions *ListImagesOptions) (result []RemoteAPIImage, response *core.DetailedResponse, err error) { + return containerRegistry.ListImagesWithContext(context.Background(), listImagesOptions) +} + +// ListImagesWithContext is an alternate form of the ListImages method which supports a Context parameter +func (containerRegistry *ContainerRegistryV1) ListImagesWithContext(ctx context.Context, listImagesOptions *ListImagesOptions) (result []RemoteAPIImage, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listImagesOptions, "listImagesOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = containerRegistry.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(containerRegistry.Service.Options.URL, `/api/v1/images`, nil) + if err != nil { + return + } + + for headerName, headerValue := range listImagesOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("container_registry", "V1", "ListImages") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + if containerRegistry.Account != nil { + builder.AddHeader("Account", fmt.Sprint(*containerRegistry.Account)) + } + + if listImagesOptions.Namespace != nil { + builder.AddQuery("namespace", fmt.Sprint(*listImagesOptions.Namespace)) + } + if listImagesOptions.IncludeIBM != nil { + builder.AddQuery("includeIBM", fmt.Sprint(*listImagesOptions.IncludeIBM)) + } + if listImagesOptions.IncludePrivate != nil { + builder.AddQuery("includePrivate", fmt.Sprint(*listImagesOptions.IncludePrivate)) + } + if listImagesOptions.IncludeManifestLists != nil { + builder.AddQuery("includeManifestLists", fmt.Sprint(*listImagesOptions.IncludeManifestLists)) + } + if listImagesOptions.Vulnerabilities != nil { + builder.AddQuery("vulnerabilities", fmt.Sprint(*listImagesOptions.Vulnerabilities)) + } + if listImagesOptions.Repository != nil { + builder.AddQuery("repository", fmt.Sprint(*listImagesOptions.Repository)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse []json.RawMessage + response, err = containerRegistry.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalRemoteAPIImage) + if err != nil { + return + } + response.Result = result + + return +} + +// BulkDeleteImages : Bulk delete images +// Remove multiple container images from the registry. +func (containerRegistry *ContainerRegistryV1) BulkDeleteImages(bulkDeleteImagesOptions *BulkDeleteImagesOptions) (result *ImageBulkDeleteResult, response *core.DetailedResponse, err error) { + return containerRegistry.BulkDeleteImagesWithContext(context.Background(), bulkDeleteImagesOptions) +} + +// BulkDeleteImagesWithContext is an alternate form of the BulkDeleteImages method which supports a Context parameter +func (containerRegistry *ContainerRegistryV1) BulkDeleteImagesWithContext(ctx context.Context, bulkDeleteImagesOptions *BulkDeleteImagesOptions) (result *ImageBulkDeleteResult, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(bulkDeleteImagesOptions, "bulkDeleteImagesOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(bulkDeleteImagesOptions, "bulkDeleteImagesOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = containerRegistry.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(containerRegistry.Service.Options.URL, `/api/v1/images/bulkdelete`, nil) + if err != nil { + return + } + + for headerName, headerValue := range bulkDeleteImagesOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("container_registry", "V1", "BulkDeleteImages") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + if containerRegistry.Account != nil { + builder.AddHeader("Account", fmt.Sprint(*containerRegistry.Account)) + } + + _, err = builder.SetBodyContentJSON(bulkDeleteImagesOptions.BulkDelete) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = containerRegistry.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalImageBulkDeleteResult) + if err != nil { + return + } + response.Result = result + + return +} + +// ListImageDigests : List images by digest +// List all images by digest in namespaces in a targeted IBM Cloud account. +func (containerRegistry *ContainerRegistryV1) ListImageDigests(listImageDigestsOptions *ListImageDigestsOptions) (result []ImageDigest, response *core.DetailedResponse, err error) { + return containerRegistry.ListImageDigestsWithContext(context.Background(), listImageDigestsOptions) +} + +// ListImageDigestsWithContext is an alternate form of the ListImageDigests method which supports a Context parameter +func (containerRegistry *ContainerRegistryV1) ListImageDigestsWithContext(ctx context.Context, listImageDigestsOptions *ListImageDigestsOptions) (result []ImageDigest, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(listImageDigestsOptions, "listImageDigestsOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(listImageDigestsOptions, "listImageDigestsOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = containerRegistry.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(containerRegistry.Service.Options.URL, `/api/v1/images/digests`, nil) + if err != nil { + return + } + + for headerName, headerValue := range listImageDigestsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("container_registry", "V1", "ListImageDigests") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + if containerRegistry.Account != nil { + builder.AddHeader("Account", fmt.Sprint(*containerRegistry.Account)) + } + + body := make(map[string]interface{}) + if listImageDigestsOptions.ExcludeTagged != nil { + body["exclude_tagged"] = listImageDigestsOptions.ExcludeTagged + } + if listImageDigestsOptions.ExcludeVa != nil { + body["exclude_va"] = listImageDigestsOptions.ExcludeVa + } + if listImageDigestsOptions.IncludeIBM != nil { + body["include_ibm"] = listImageDigestsOptions.IncludeIBM + } + if listImageDigestsOptions.Repositories != nil { + body["repositories"] = listImageDigestsOptions.Repositories + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse []json.RawMessage + response, err = containerRegistry.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalImageDigest) + if err != nil { + return + } + response.Result = result + + return +} + +// TagImage : Create tag +// Create a new tag in a private registry that refers to an existing image in the same region. If the fromimage has Red +// Hat® signatures and the toimage is in a different repository, those signatures are copied to that repository. +func (containerRegistry *ContainerRegistryV1) TagImage(tagImageOptions *TagImageOptions) (response *core.DetailedResponse, err error) { + return containerRegistry.TagImageWithContext(context.Background(), tagImageOptions) +} + +// TagImageWithContext is an alternate form of the TagImage method which supports a Context parameter +func (containerRegistry *ContainerRegistryV1) TagImageWithContext(ctx context.Context, tagImageOptions *TagImageOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(tagImageOptions, "tagImageOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(tagImageOptions, "tagImageOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = containerRegistry.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(containerRegistry.Service.Options.URL, `/api/v1/images/tags`, nil) + if err != nil { + return + } + + for headerName, headerValue := range tagImageOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("container_registry", "V1", "TagImage") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + if containerRegistry.Account != nil { + builder.AddHeader("Account", fmt.Sprint(*containerRegistry.Account)) + } + + builder.AddQuery("fromimage", fmt.Sprint(*tagImageOptions.Fromimage)) + builder.AddQuery("toimage", fmt.Sprint(*tagImageOptions.Toimage)) + + request, err := builder.Build() + if err != nil { + return + } + + response, err = containerRegistry.Service.Request(request, nil) + + return +} + +// DeleteImage : Delete image +// Delete a container image from the registry. +func (containerRegistry *ContainerRegistryV1) DeleteImage(deleteImageOptions *DeleteImageOptions) (result *ImageDeleteResult, response *core.DetailedResponse, err error) { + return containerRegistry.DeleteImageWithContext(context.Background(), deleteImageOptions) +} + +// DeleteImageWithContext is an alternate form of the DeleteImage method which supports a Context parameter +func (containerRegistry *ContainerRegistryV1) DeleteImageWithContext(ctx context.Context, deleteImageOptions *DeleteImageOptions) (result *ImageDeleteResult, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteImageOptions, "deleteImageOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteImageOptions, "deleteImageOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "image": *deleteImageOptions.Image, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = containerRegistry.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(containerRegistry.Service.Options.URL, `/api/v1/images/{image}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteImageOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("container_registry", "V1", "DeleteImage") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + if containerRegistry.Account != nil { + builder.AddHeader("Account", fmt.Sprint(*containerRegistry.Account)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = containerRegistry.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalImageDeleteResult) + if err != nil { + return + } + response.Result = result + + return +} + +// InspectImage : Inspect an image +// Inspect a container image in the private registry. +func (containerRegistry *ContainerRegistryV1) InspectImage(inspectImageOptions *InspectImageOptions) (result *ImageInspection, response *core.DetailedResponse, err error) { + return containerRegistry.InspectImageWithContext(context.Background(), inspectImageOptions) +} + +// InspectImageWithContext is an alternate form of the InspectImage method which supports a Context parameter +func (containerRegistry *ContainerRegistryV1) InspectImageWithContext(ctx context.Context, inspectImageOptions *InspectImageOptions) (result *ImageInspection, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(inspectImageOptions, "inspectImageOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(inspectImageOptions, "inspectImageOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "image": *inspectImageOptions.Image, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = containerRegistry.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(containerRegistry.Service.Options.URL, `/api/v1/images/{image}/json`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range inspectImageOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("container_registry", "V1", "InspectImage") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + if containerRegistry.Account != nil { + builder.AddHeader("Account", fmt.Sprint(*containerRegistry.Account)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = containerRegistry.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalImageInspection) + if err != nil { + return + } + response.Result = result + + return +} + +// GetImageManifest : Get image manifest +// Get the manifest for a container image in the private registry. +func (containerRegistry *ContainerRegistryV1) GetImageManifest(getImageManifestOptions *GetImageManifestOptions) (result map[string]interface{}, response *core.DetailedResponse, err error) { + return containerRegistry.GetImageManifestWithContext(context.Background(), getImageManifestOptions) +} + +// GetImageManifestWithContext is an alternate form of the GetImageManifest method which supports a Context parameter +func (containerRegistry *ContainerRegistryV1) GetImageManifestWithContext(ctx context.Context, getImageManifestOptions *GetImageManifestOptions) (result map[string]interface{}, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getImageManifestOptions, "getImageManifestOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getImageManifestOptions, "getImageManifestOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "image": *getImageManifestOptions.Image, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = containerRegistry.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(containerRegistry.Service.Options.URL, `/api/v1/images/{image}/manifest`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getImageManifestOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("container_registry", "V1", "GetImageManifest") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + if containerRegistry.Account != nil { + builder.AddHeader("Account", fmt.Sprint(*containerRegistry.Account)) + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = containerRegistry.Service.Request(request, &result) + + return +} + +// GetMessages : Get messages +// Return any published system messages. +func (containerRegistry *ContainerRegistryV1) GetMessages(getMessagesOptions *GetMessagesOptions) (result *string, response *core.DetailedResponse, err error) { + return containerRegistry.GetMessagesWithContext(context.Background(), getMessagesOptions) +} + +// GetMessagesWithContext is an alternate form of the GetMessages method which supports a Context parameter +func (containerRegistry *ContainerRegistryV1) GetMessagesWithContext(ctx context.Context, getMessagesOptions *GetMessagesOptions) (result *string, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(getMessagesOptions, "getMessagesOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = containerRegistry.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(containerRegistry.Service.Options.URL, `/api/v1/messages`, nil) + if err != nil { + return + } + + for headerName, headerValue := range getMessagesOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("container_registry", "V1", "GetMessages") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + response, err = containerRegistry.Service.Request(request, &result) + + return +} + +// ListNamespaces : List namespaces +// List authorized namespaces in the targeted IBM Cloud account. +func (containerRegistry *ContainerRegistryV1) ListNamespaces(listNamespacesOptions *ListNamespacesOptions) (result []string, response *core.DetailedResponse, err error) { + return containerRegistry.ListNamespacesWithContext(context.Background(), listNamespacesOptions) +} + +// ListNamespacesWithContext is an alternate form of the ListNamespaces method which supports a Context parameter +func (containerRegistry *ContainerRegistryV1) ListNamespacesWithContext(ctx context.Context, listNamespacesOptions *ListNamespacesOptions) (result []string, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listNamespacesOptions, "listNamespacesOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = containerRegistry.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(containerRegistry.Service.Options.URL, `/api/v1/namespaces`, nil) + if err != nil { + return + } + + for headerName, headerValue := range listNamespacesOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("container_registry", "V1", "ListNamespaces") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + if containerRegistry.Account != nil { + builder.AddHeader("Account", fmt.Sprint(*containerRegistry.Account)) + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = containerRegistry.Service.Request(request, &result) + + return +} + +// ListNamespaceDetails : Detailed namespace list +// Retrieves details, such as resource group, for all your namespaces in the targeted registry. +func (containerRegistry *ContainerRegistryV1) ListNamespaceDetails(listNamespaceDetailsOptions *ListNamespaceDetailsOptions) (result []NamespaceDetails, response *core.DetailedResponse, err error) { + return containerRegistry.ListNamespaceDetailsWithContext(context.Background(), listNamespaceDetailsOptions) +} + +// ListNamespaceDetailsWithContext is an alternate form of the ListNamespaceDetails method which supports a Context parameter +func (containerRegistry *ContainerRegistryV1) ListNamespaceDetailsWithContext(ctx context.Context, listNamespaceDetailsOptions *ListNamespaceDetailsOptions) (result []NamespaceDetails, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listNamespaceDetailsOptions, "listNamespaceDetailsOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = containerRegistry.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(containerRegistry.Service.Options.URL, `/api/v1/namespaces/details`, nil) + if err != nil { + return + } + + for headerName, headerValue := range listNamespaceDetailsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("container_registry", "V1", "ListNamespaceDetails") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + if containerRegistry.Account != nil { + builder.AddHeader("Account", fmt.Sprint(*containerRegistry.Account)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse []json.RawMessage + response, err = containerRegistry.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalNamespaceDetails) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateNamespace : Create namespace +// Add a namespace to the targeted IBM Cloud account. +func (containerRegistry *ContainerRegistryV1) CreateNamespace(createNamespaceOptions *CreateNamespaceOptions) (result *Namespace, response *core.DetailedResponse, err error) { + return containerRegistry.CreateNamespaceWithContext(context.Background(), createNamespaceOptions) +} + +// CreateNamespaceWithContext is an alternate form of the CreateNamespace method which supports a Context parameter +func (containerRegistry *ContainerRegistryV1) CreateNamespaceWithContext(ctx context.Context, createNamespaceOptions *CreateNamespaceOptions) (result *Namespace, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createNamespaceOptions, "createNamespaceOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createNamespaceOptions, "createNamespaceOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "name": *createNamespaceOptions.Name, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = containerRegistry.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(containerRegistry.Service.Options.URL, `/api/v1/namespaces/{name}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range createNamespaceOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("container_registry", "V1", "CreateNamespace") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + if containerRegistry.Account != nil { + builder.AddHeader("Account", fmt.Sprint(*containerRegistry.Account)) + } + if createNamespaceOptions.XAuthResourceGroup != nil { + builder.AddHeader("X-Auth-Resource-Group", fmt.Sprint(*createNamespaceOptions.XAuthResourceGroup)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = containerRegistry.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalNamespace) + if err != nil { + return + } + response.Result = result + + return +} + +// AssignNamespace : Assign namespace +// Assign a namespace to the specified resource group in the targeted IBM Cloud account. +func (containerRegistry *ContainerRegistryV1) AssignNamespace(assignNamespaceOptions *AssignNamespaceOptions) (result *Namespace, response *core.DetailedResponse, err error) { + return containerRegistry.AssignNamespaceWithContext(context.Background(), assignNamespaceOptions) +} + +// AssignNamespaceWithContext is an alternate form of the AssignNamespace method which supports a Context parameter +func (containerRegistry *ContainerRegistryV1) AssignNamespaceWithContext(ctx context.Context, assignNamespaceOptions *AssignNamespaceOptions) (result *Namespace, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(assignNamespaceOptions, "assignNamespaceOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(assignNamespaceOptions, "assignNamespaceOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "name": *assignNamespaceOptions.Name, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = containerRegistry.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(containerRegistry.Service.Options.URL, `/api/v1/namespaces/{name}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range assignNamespaceOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("container_registry", "V1", "AssignNamespace") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + if containerRegistry.Account != nil { + builder.AddHeader("Account", fmt.Sprint(*containerRegistry.Account)) + } + if assignNamespaceOptions.XAuthResourceGroup != nil { + builder.AddHeader("X-Auth-Resource-Group", fmt.Sprint(*assignNamespaceOptions.XAuthResourceGroup)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = containerRegistry.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalNamespace) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteNamespace : Delete namespace +// Delete the IBM Cloud Container Registry namespace from the targeted IBM Cloud account, and removes all images that +// were in that namespace. +func (containerRegistry *ContainerRegistryV1) DeleteNamespace(deleteNamespaceOptions *DeleteNamespaceOptions) (response *core.DetailedResponse, err error) { + return containerRegistry.DeleteNamespaceWithContext(context.Background(), deleteNamespaceOptions) +} + +// DeleteNamespaceWithContext is an alternate form of the DeleteNamespace method which supports a Context parameter +func (containerRegistry *ContainerRegistryV1) DeleteNamespaceWithContext(ctx context.Context, deleteNamespaceOptions *DeleteNamespaceOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteNamespaceOptions, "deleteNamespaceOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteNamespaceOptions, "deleteNamespaceOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "name": *deleteNamespaceOptions.Name, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = containerRegistry.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(containerRegistry.Service.Options.URL, `/api/v1/namespaces/{name}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteNamespaceOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("container_registry", "V1", "DeleteNamespace") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + if containerRegistry.Account != nil { + builder.AddHeader("Account", fmt.Sprint(*containerRegistry.Account)) + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = containerRegistry.Service.Request(request, nil) + + return +} + +// GetPlans : Get plans +// Get plans for the targeted account. +func (containerRegistry *ContainerRegistryV1) GetPlans(getPlansOptions *GetPlansOptions) (result *Plan, response *core.DetailedResponse, err error) { + return containerRegistry.GetPlansWithContext(context.Background(), getPlansOptions) +} + +// GetPlansWithContext is an alternate form of the GetPlans method which supports a Context parameter +func (containerRegistry *ContainerRegistryV1) GetPlansWithContext(ctx context.Context, getPlansOptions *GetPlansOptions) (result *Plan, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(getPlansOptions, "getPlansOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = containerRegistry.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(containerRegistry.Service.Options.URL, `/api/v1/plans`, nil) + if err != nil { + return + } + + for headerName, headerValue := range getPlansOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("container_registry", "V1", "GetPlans") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + if containerRegistry.Account != nil { + builder.AddHeader("Account", fmt.Sprint(*containerRegistry.Account)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = containerRegistry.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalPlan) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdatePlans : Update plans +// Update plans for the targeted account. +func (containerRegistry *ContainerRegistryV1) UpdatePlans(updatePlansOptions *UpdatePlansOptions) (response *core.DetailedResponse, err error) { + return containerRegistry.UpdatePlansWithContext(context.Background(), updatePlansOptions) +} + +// UpdatePlansWithContext is an alternate form of the UpdatePlans method which supports a Context parameter +func (containerRegistry *ContainerRegistryV1) UpdatePlansWithContext(ctx context.Context, updatePlansOptions *UpdatePlansOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updatePlansOptions, "updatePlansOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updatePlansOptions, "updatePlansOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = containerRegistry.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(containerRegistry.Service.Options.URL, `/api/v1/plans`, nil) + if err != nil { + return + } + + for headerName, headerValue := range updatePlansOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("container_registry", "V1", "UpdatePlans") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Content-Type", "application/json") + if containerRegistry.Account != nil { + builder.AddHeader("Account", fmt.Sprint(*containerRegistry.Account)) + } + + body := make(map[string]interface{}) + if updatePlansOptions.Plan != nil { + body["plan"] = updatePlansOptions.Plan + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = containerRegistry.Service.Request(request, nil) + + return +} + +// GetQuota : Get quotas +// Get quotas for the targeted account. +func (containerRegistry *ContainerRegistryV1) GetQuota(getQuotaOptions *GetQuotaOptions) (result *Quota, response *core.DetailedResponse, err error) { + return containerRegistry.GetQuotaWithContext(context.Background(), getQuotaOptions) +} + +// GetQuotaWithContext is an alternate form of the GetQuota method which supports a Context parameter +func (containerRegistry *ContainerRegistryV1) GetQuotaWithContext(ctx context.Context, getQuotaOptions *GetQuotaOptions) (result *Quota, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(getQuotaOptions, "getQuotaOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = containerRegistry.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(containerRegistry.Service.Options.URL, `/api/v1/quotas`, nil) + if err != nil { + return + } + + for headerName, headerValue := range getQuotaOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("container_registry", "V1", "GetQuota") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + if containerRegistry.Account != nil { + builder.AddHeader("Account", fmt.Sprint(*containerRegistry.Account)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = containerRegistry.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalQuota) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateQuota : Update quotas +// Update quotas for the targeted account. +func (containerRegistry *ContainerRegistryV1) UpdateQuota(updateQuotaOptions *UpdateQuotaOptions) (response *core.DetailedResponse, err error) { + return containerRegistry.UpdateQuotaWithContext(context.Background(), updateQuotaOptions) +} + +// UpdateQuotaWithContext is an alternate form of the UpdateQuota method which supports a Context parameter +func (containerRegistry *ContainerRegistryV1) UpdateQuotaWithContext(ctx context.Context, updateQuotaOptions *UpdateQuotaOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateQuotaOptions, "updateQuotaOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateQuotaOptions, "updateQuotaOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = containerRegistry.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(containerRegistry.Service.Options.URL, `/api/v1/quotas`, nil) + if err != nil { + return + } + + for headerName, headerValue := range updateQuotaOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("container_registry", "V1", "UpdateQuota") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Content-Type", "application/json") + if containerRegistry.Account != nil { + builder.AddHeader("Account", fmt.Sprint(*containerRegistry.Account)) + } + + body := make(map[string]interface{}) + if updateQuotaOptions.StorageMegabytes != nil { + body["storage_megabytes"] = updateQuotaOptions.StorageMegabytes + } + if updateQuotaOptions.TrafficMegabytes != nil { + body["traffic_megabytes"] = updateQuotaOptions.TrafficMegabytes + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = containerRegistry.Service.Request(request, nil) + + return +} + +// ListRetentionPolicies : List retention policies +// List retention policies for all namespaces in the targeted IBM Cloud account. +func (containerRegistry *ContainerRegistryV1) ListRetentionPolicies(listRetentionPoliciesOptions *ListRetentionPoliciesOptions) (result map[string]RetentionPolicy, response *core.DetailedResponse, err error) { + return containerRegistry.ListRetentionPoliciesWithContext(context.Background(), listRetentionPoliciesOptions) +} + +// ListRetentionPoliciesWithContext is an alternate form of the ListRetentionPolicies method which supports a Context parameter +func (containerRegistry *ContainerRegistryV1) ListRetentionPoliciesWithContext(ctx context.Context, listRetentionPoliciesOptions *ListRetentionPoliciesOptions) (result map[string]RetentionPolicy, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listRetentionPoliciesOptions, "listRetentionPoliciesOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = containerRegistry.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(containerRegistry.Service.Options.URL, `/api/v1/retentions`, nil) + if err != nil { + return + } + + for headerName, headerValue := range listRetentionPoliciesOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("container_registry", "V1", "ListRetentionPolicies") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + if containerRegistry.Account != nil { + builder.AddHeader("Account", fmt.Sprint(*containerRegistry.Account)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = containerRegistry.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalRetentionPolicy) + if err != nil { + return + } + response.Result = result + + return +} + +// SetRetentionPolicy : Set retention policy +// Set the retention policy for the specified namespace. +func (containerRegistry *ContainerRegistryV1) SetRetentionPolicy(setRetentionPolicyOptions *SetRetentionPolicyOptions) (response *core.DetailedResponse, err error) { + return containerRegistry.SetRetentionPolicyWithContext(context.Background(), setRetentionPolicyOptions) +} + +// SetRetentionPolicyWithContext is an alternate form of the SetRetentionPolicy method which supports a Context parameter +func (containerRegistry *ContainerRegistryV1) SetRetentionPolicyWithContext(ctx context.Context, setRetentionPolicyOptions *SetRetentionPolicyOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(setRetentionPolicyOptions, "setRetentionPolicyOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(setRetentionPolicyOptions, "setRetentionPolicyOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = containerRegistry.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(containerRegistry.Service.Options.URL, `/api/v1/retentions`, nil) + if err != nil { + return + } + + for headerName, headerValue := range setRetentionPolicyOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("container_registry", "V1", "SetRetentionPolicy") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Content-Type", "application/json") + if containerRegistry.Account != nil { + builder.AddHeader("Account", fmt.Sprint(*containerRegistry.Account)) + } + + body := make(map[string]interface{}) + if setRetentionPolicyOptions.Namespace != nil { + body["namespace"] = setRetentionPolicyOptions.Namespace + } + if setRetentionPolicyOptions.ImagesPerRepo != nil { + body["images_per_repo"] = setRetentionPolicyOptions.ImagesPerRepo + } + if setRetentionPolicyOptions.RetainUntagged != nil { + body["retain_untagged"] = setRetentionPolicyOptions.RetainUntagged + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = containerRegistry.Service.Request(request, nil) + + return +} + +// AnalyzeRetentionPolicy : Analyze retention policy +// Analyze a retention policy, and get a list of what would be deleted by it. +func (containerRegistry *ContainerRegistryV1) AnalyzeRetentionPolicy(analyzeRetentionPolicyOptions *AnalyzeRetentionPolicyOptions) (result map[string][]string, response *core.DetailedResponse, err error) { + return containerRegistry.AnalyzeRetentionPolicyWithContext(context.Background(), analyzeRetentionPolicyOptions) +} + +// AnalyzeRetentionPolicyWithContext is an alternate form of the AnalyzeRetentionPolicy method which supports a Context parameter +func (containerRegistry *ContainerRegistryV1) AnalyzeRetentionPolicyWithContext(ctx context.Context, analyzeRetentionPolicyOptions *AnalyzeRetentionPolicyOptions) (result map[string][]string, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(analyzeRetentionPolicyOptions, "analyzeRetentionPolicyOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(analyzeRetentionPolicyOptions, "analyzeRetentionPolicyOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = containerRegistry.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(containerRegistry.Service.Options.URL, `/api/v1/retentions/analyze`, nil) + if err != nil { + return + } + + for headerName, headerValue := range analyzeRetentionPolicyOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("container_registry", "V1", "AnalyzeRetentionPolicy") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + if containerRegistry.Account != nil { + builder.AddHeader("Account", fmt.Sprint(*containerRegistry.Account)) + } + + body := make(map[string]interface{}) + if analyzeRetentionPolicyOptions.Namespace != nil { + body["namespace"] = analyzeRetentionPolicyOptions.Namespace + } + if analyzeRetentionPolicyOptions.ImagesPerRepo != nil { + body["images_per_repo"] = analyzeRetentionPolicyOptions.ImagesPerRepo + } + if analyzeRetentionPolicyOptions.RetainUntagged != nil { + body["retain_untagged"] = analyzeRetentionPolicyOptions.RetainUntagged + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = containerRegistry.Service.Request(request, &result) + + return +} + +// GetRetentionPolicy : Get retention policy +// Get the retention policy for the specified namespace. +func (containerRegistry *ContainerRegistryV1) GetRetentionPolicy(getRetentionPolicyOptions *GetRetentionPolicyOptions) (result *RetentionPolicy, response *core.DetailedResponse, err error) { + return containerRegistry.GetRetentionPolicyWithContext(context.Background(), getRetentionPolicyOptions) +} + +// GetRetentionPolicyWithContext is an alternate form of the GetRetentionPolicy method which supports a Context parameter +func (containerRegistry *ContainerRegistryV1) GetRetentionPolicyWithContext(ctx context.Context, getRetentionPolicyOptions *GetRetentionPolicyOptions) (result *RetentionPolicy, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getRetentionPolicyOptions, "getRetentionPolicyOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getRetentionPolicyOptions, "getRetentionPolicyOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "namespace": *getRetentionPolicyOptions.Namespace, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = containerRegistry.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(containerRegistry.Service.Options.URL, `/api/v1/retentions/{namespace}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getRetentionPolicyOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("container_registry", "V1", "GetRetentionPolicy") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + if containerRegistry.Account != nil { + builder.AddHeader("Account", fmt.Sprint(*containerRegistry.Account)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = containerRegistry.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalRetentionPolicy) + if err != nil { + return + } + response.Result = result + + return +} + +// GetSettings : Get account settings +// Get account settings for the targeted account. +func (containerRegistry *ContainerRegistryV1) GetSettings(getSettingsOptions *GetSettingsOptions) (result *AccountSettings, response *core.DetailedResponse, err error) { + return containerRegistry.GetSettingsWithContext(context.Background(), getSettingsOptions) +} + +// GetSettingsWithContext is an alternate form of the GetSettings method which supports a Context parameter +func (containerRegistry *ContainerRegistryV1) GetSettingsWithContext(ctx context.Context, getSettingsOptions *GetSettingsOptions) (result *AccountSettings, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(getSettingsOptions, "getSettingsOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = containerRegistry.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(containerRegistry.Service.Options.URL, `/api/v1/settings`, nil) + if err != nil { + return + } + + for headerName, headerValue := range getSettingsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("container_registry", "V1", "GetSettings") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + if containerRegistry.Account != nil { + builder.AddHeader("Account", fmt.Sprint(*containerRegistry.Account)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = containerRegistry.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalAccountSettings) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateSettings : Update account settings +// Update settings for the targeted account. +func (containerRegistry *ContainerRegistryV1) UpdateSettings(updateSettingsOptions *UpdateSettingsOptions) (response *core.DetailedResponse, err error) { + return containerRegistry.UpdateSettingsWithContext(context.Background(), updateSettingsOptions) +} + +// UpdateSettingsWithContext is an alternate form of the UpdateSettings method which supports a Context parameter +func (containerRegistry *ContainerRegistryV1) UpdateSettingsWithContext(ctx context.Context, updateSettingsOptions *UpdateSettingsOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateSettingsOptions, "updateSettingsOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateSettingsOptions, "updateSettingsOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = containerRegistry.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(containerRegistry.Service.Options.URL, `/api/v1/settings`, nil) + if err != nil { + return + } + + for headerName, headerValue := range updateSettingsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("container_registry", "V1", "UpdateSettings") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Content-Type", "application/json") + if containerRegistry.Account != nil { + builder.AddHeader("Account", fmt.Sprint(*containerRegistry.Account)) + } + + body := make(map[string]interface{}) + if updateSettingsOptions.PlatformMetrics != nil { + body["platform_metrics"] = updateSettingsOptions.PlatformMetrics + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = containerRegistry.Service.Request(request, nil) + + return +} + +// DeleteImageTag : Delete tag +// Untag a container image in the registry. +func (containerRegistry *ContainerRegistryV1) DeleteImageTag(deleteImageTagOptions *DeleteImageTagOptions) (result *ImageDeleteResult, response *core.DetailedResponse, err error) { + return containerRegistry.DeleteImageTagWithContext(context.Background(), deleteImageTagOptions) +} + +// DeleteImageTagWithContext is an alternate form of the DeleteImageTag method which supports a Context parameter +func (containerRegistry *ContainerRegistryV1) DeleteImageTagWithContext(ctx context.Context, deleteImageTagOptions *DeleteImageTagOptions) (result *ImageDeleteResult, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteImageTagOptions, "deleteImageTagOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteImageTagOptions, "deleteImageTagOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "image": *deleteImageTagOptions.Image, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = containerRegistry.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(containerRegistry.Service.Options.URL, `/api/v1/tags/{image}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteImageTagOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("container_registry", "V1", "DeleteImageTag") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + if containerRegistry.Account != nil { + builder.AddHeader("Account", fmt.Sprint(*containerRegistry.Account)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = containerRegistry.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalImageDeleteResult) + if err != nil { + return + } + response.Result = result + + return +} + +// ListDeletedImages : List deleted images +// List all images that are in the trash can. +func (containerRegistry *ContainerRegistryV1) ListDeletedImages(listDeletedImagesOptions *ListDeletedImagesOptions) (result map[string]Trash, response *core.DetailedResponse, err error) { + return containerRegistry.ListDeletedImagesWithContext(context.Background(), listDeletedImagesOptions) +} + +// ListDeletedImagesWithContext is an alternate form of the ListDeletedImages method which supports a Context parameter +func (containerRegistry *ContainerRegistryV1) ListDeletedImagesWithContext(ctx context.Context, listDeletedImagesOptions *ListDeletedImagesOptions) (result map[string]Trash, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listDeletedImagesOptions, "listDeletedImagesOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = containerRegistry.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(containerRegistry.Service.Options.URL, `/api/v1/trash`, nil) + if err != nil { + return + } + + for headerName, headerValue := range listDeletedImagesOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("container_registry", "V1", "ListDeletedImages") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + if containerRegistry.Account != nil { + builder.AddHeader("Account", fmt.Sprint(*containerRegistry.Account)) + } + + if listDeletedImagesOptions.Namespace != nil { + builder.AddQuery("namespace", fmt.Sprint(*listDeletedImagesOptions.Namespace)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = containerRegistry.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalTrash) + if err != nil { + return + } + response.Result = result + + return +} + +// RestoreTags : Restore a digest and all associated tags +// In the targeted region, restore a digest, and all of its tags in the same repository, from the trash. +func (containerRegistry *ContainerRegistryV1) RestoreTags(restoreTagsOptions *RestoreTagsOptions) (result *RestoreResult, response *core.DetailedResponse, err error) { + return containerRegistry.RestoreTagsWithContext(context.Background(), restoreTagsOptions) +} + +// RestoreTagsWithContext is an alternate form of the RestoreTags method which supports a Context parameter +func (containerRegistry *ContainerRegistryV1) RestoreTagsWithContext(ctx context.Context, restoreTagsOptions *RestoreTagsOptions) (result *RestoreResult, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(restoreTagsOptions, "restoreTagsOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(restoreTagsOptions, "restoreTagsOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "digest": *restoreTagsOptions.Digest, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = containerRegistry.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(containerRegistry.Service.Options.URL, `/api/v1/trash/{digest}/restoretags`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range restoreTagsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("container_registry", "V1", "RestoreTags") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + if containerRegistry.Account != nil { + builder.AddHeader("Account", fmt.Sprint(*containerRegistry.Account)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = containerRegistry.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalRestoreResult) + if err != nil { + return + } + response.Result = result + + return +} + +// RestoreImage : Restore deleted image +// Restore an image from the trash can. +func (containerRegistry *ContainerRegistryV1) RestoreImage(restoreImageOptions *RestoreImageOptions) (response *core.DetailedResponse, err error) { + return containerRegistry.RestoreImageWithContext(context.Background(), restoreImageOptions) +} + +// RestoreImageWithContext is an alternate form of the RestoreImage method which supports a Context parameter +func (containerRegistry *ContainerRegistryV1) RestoreImageWithContext(ctx context.Context, restoreImageOptions *RestoreImageOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(restoreImageOptions, "restoreImageOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(restoreImageOptions, "restoreImageOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "image": *restoreImageOptions.Image, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = containerRegistry.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(containerRegistry.Service.Options.URL, `/api/v1/trash/{image}/restore`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range restoreImageOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("container_registry", "V1", "RestoreImage") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + if containerRegistry.Account != nil { + builder.AddHeader("Account", fmt.Sprint(*containerRegistry.Account)) + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = containerRegistry.Service.Request(request, nil) + + return +} + +// AccountSettings : Account settings for the targeted IBM Cloud account. +type AccountSettings struct { + // Opt in to IBM Cloud Container Registry publishing platform metrics. + PlatformMetrics *bool `json:"platform_metrics,omitempty"` +} + +// UnmarshalAccountSettings unmarshals an instance of AccountSettings from the specified map of raw messages. +func UnmarshalAccountSettings(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(AccountSettings) + err = core.UnmarshalPrimitive(m, "platform_metrics", &obj.PlatformMetrics) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// AnalyzeRetentionPolicyOptions : The AnalyzeRetentionPolicy options. +type AnalyzeRetentionPolicyOptions struct { + // The namespace to which the retention policy is attached. + Namespace *string `validate:"required"` + + // Determines how many images will be retained for each repository when the retention policy is executed. The value -1 + // denotes 'Unlimited' (all images are retained). + ImagesPerRepo *int64 + + // Determines if untagged images are retained when executing the retention policy. This is false by default meaning + // untagged images will be deleted when the policy is executed. + RetainUntagged *bool + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewAnalyzeRetentionPolicyOptions : Instantiate AnalyzeRetentionPolicyOptions +func (*ContainerRegistryV1) NewAnalyzeRetentionPolicyOptions(namespace string) *AnalyzeRetentionPolicyOptions { + return &AnalyzeRetentionPolicyOptions{ + Namespace: core.StringPtr(namespace), + } +} + +// SetNamespace : Allow user to set Namespace +func (options *AnalyzeRetentionPolicyOptions) SetNamespace(namespace string) *AnalyzeRetentionPolicyOptions { + options.Namespace = core.StringPtr(namespace) + return options +} + +// SetImagesPerRepo : Allow user to set ImagesPerRepo +func (options *AnalyzeRetentionPolicyOptions) SetImagesPerRepo(imagesPerRepo int64) *AnalyzeRetentionPolicyOptions { + options.ImagesPerRepo = core.Int64Ptr(imagesPerRepo) + return options +} + +// SetRetainUntagged : Allow user to set RetainUntagged +func (options *AnalyzeRetentionPolicyOptions) SetRetainUntagged(retainUntagged bool) *AnalyzeRetentionPolicyOptions { + options.RetainUntagged = core.BoolPtr(retainUntagged) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *AnalyzeRetentionPolicyOptions) SetHeaders(param map[string]string) *AnalyzeRetentionPolicyOptions { + options.Headers = param + return options +} + +// AssignNamespaceOptions : The AssignNamespace options. +type AssignNamespaceOptions struct { + // The ID of the resource group that the namespace will be created within. + XAuthResourceGroup *string `validate:"required"` + + // The name of the namespace to be updated. + Name *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewAssignNamespaceOptions : Instantiate AssignNamespaceOptions +func (*ContainerRegistryV1) NewAssignNamespaceOptions(xAuthResourceGroup string, name string) *AssignNamespaceOptions { + return &AssignNamespaceOptions{ + XAuthResourceGroup: core.StringPtr(xAuthResourceGroup), + Name: core.StringPtr(name), + } +} + +// SetXAuthResourceGroup : Allow user to set XAuthResourceGroup +func (options *AssignNamespaceOptions) SetXAuthResourceGroup(xAuthResourceGroup string) *AssignNamespaceOptions { + options.XAuthResourceGroup = core.StringPtr(xAuthResourceGroup) + return options +} + +// SetName : Allow user to set Name +func (options *AssignNamespaceOptions) SetName(name string) *AssignNamespaceOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *AssignNamespaceOptions) SetHeaders(param map[string]string) *AssignNamespaceOptions { + options.Headers = param + return options +} + +// AuthOptions : The authorization options for the targeted IBM Cloud account. +type AuthOptions struct { + // Enable role based authorization when authenticating with IBM Cloud IAM. + IamAuthz *bool `json:"iam_authz,omitempty"` + + // Restrict account to only be able to push and pull images over private connections. + PrivateOnly *bool `json:"private_only,omitempty"` +} + +// UnmarshalAuthOptions unmarshals an instance of AuthOptions from the specified map of raw messages. +func UnmarshalAuthOptions(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(AuthOptions) + err = core.UnmarshalPrimitive(m, "iam_authz", &obj.IamAuthz) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "private_only", &obj.PrivateOnly) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// BulkDeleteImagesOptions : The BulkDeleteImages options. +type BulkDeleteImagesOptions struct { + // The full IBM Cloud registry path to the images that you want to delete, including its digest. All tags for the + // supplied digest are removed. + BulkDelete []string `validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewBulkDeleteImagesOptions : Instantiate BulkDeleteImagesOptions +func (*ContainerRegistryV1) NewBulkDeleteImagesOptions(bulkDelete []string) *BulkDeleteImagesOptions { + return &BulkDeleteImagesOptions{ + BulkDelete: bulkDelete, + } +} + +// SetBulkDelete : Allow user to set BulkDelete +func (options *BulkDeleteImagesOptions) SetBulkDelete(bulkDelete []string) *BulkDeleteImagesOptions { + options.BulkDelete = bulkDelete + return options +} + +// SetHeaders : Allow user to set Headers +func (options *BulkDeleteImagesOptions) SetHeaders(param map[string]string) *BulkDeleteImagesOptions { + options.Headers = param + return options +} + +// Config : The configuration data about a container. +type Config struct { + // True if command is already escaped (Windows specific). + ArgsEscaped *bool `json:"ArgsEscaped,omitempty"` + + // If true, standard error is attached. + AttachStderr *bool `json:"AttachStderr,omitempty"` + + // If true, standard input is attached, which makes possible user interaction. + AttachStdin *bool `json:"AttachStdin,omitempty"` + + // If true, standard output is attached. + AttachStdout *bool `json:"AttachStdout,omitempty"` + + // Command that is run when starting the container. + Cmd []string `json:"Cmd,omitempty"` + + // The FQDN for the container. + Domainname *string `json:"Domainname,omitempty"` + + // Entrypoint to run when starting the container. + Entrypoint []string `json:"Entrypoint,omitempty"` + + // List of environment variables to set in the container. + Env []string `json:"Env,omitempty"` + + // A list of exposed ports in a format [123:{},456:{}]. + ExposedPorts map[string]interface{} `json:"ExposedPorts,omitempty"` + + Healthcheck *HealthConfig `json:"Healthcheck,omitempty"` + + // The host name of the container. + Hostname *string `json:"Hostname,omitempty"` + + // Name of the image as it was passed by the operator (eg. could be symbolic). + Image *string `json:"Image,omitempty"` + + // List of labels set to this container. + Labels map[string]string `json:"Labels,omitempty"` + + // The MAC Address of the container. + MacAddress *string `json:"MacAddress,omitempty"` + + // If true, containers are not given network access. + NetworkDisabled *bool `json:"NetworkDisabled,omitempty"` + + // ONBUILD metadata that were defined on the image Dockerfile + // https://docs.docker.com/engine/reference/builder/#onbuild. + OnBuild []string `json:"OnBuild,omitempty"` + + // Open stdin. + OpenStdin *bool `json:"OpenStdin,omitempty"` + + // Shell for shell-form of RUN, CMD, ENTRYPOINT. + Shell []string `json:"Shell,omitempty"` + + // If true, close stdin after the 1 attached client disconnects. + StdinOnce *bool `json:"StdinOnce,omitempty"` + + // Signal to stop a container. + StopSignal *string `json:"StopSignal,omitempty"` + + // Timeout (in seconds) to stop a container. + StopTimeout *int64 `json:"StopTimeout,omitempty"` + + // Attach standard streams to a tty, including stdin if it is not closed. + Tty *bool `json:"Tty,omitempty"` + + // The user that will run the command(s) inside the container. + User *string `json:"User,omitempty"` + + // List of volumes (mounts) used for the container. + Volumes map[string]interface{} `json:"Volumes,omitempty"` + + // Current working directory (PWD) in the command will be launched. + WorkingDir *string `json:"WorkingDir,omitempty"` +} + +// UnmarshalConfig unmarshals an instance of Config from the specified map of raw messages. +func UnmarshalConfig(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Config) + err = core.UnmarshalPrimitive(m, "ArgsEscaped", &obj.ArgsEscaped) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "AttachStderr", &obj.AttachStderr) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "AttachStdin", &obj.AttachStdin) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "AttachStdout", &obj.AttachStdout) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "Cmd", &obj.Cmd) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "Domainname", &obj.Domainname) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "Entrypoint", &obj.Entrypoint) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "Env", &obj.Env) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "ExposedPorts", &obj.ExposedPorts) + if err != nil { + return + } + err = core.UnmarshalModel(m, "Healthcheck", &obj.Healthcheck, UnmarshalHealthConfig) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "Hostname", &obj.Hostname) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "Image", &obj.Image) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "Labels", &obj.Labels) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "MacAddress", &obj.MacAddress) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "NetworkDisabled", &obj.NetworkDisabled) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "OnBuild", &obj.OnBuild) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "OpenStdin", &obj.OpenStdin) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "Shell", &obj.Shell) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "StdinOnce", &obj.StdinOnce) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "StopSignal", &obj.StopSignal) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "StopTimeout", &obj.StopTimeout) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "Tty", &obj.Tty) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "User", &obj.User) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "Volumes", &obj.Volumes) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "WorkingDir", &obj.WorkingDir) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// CreateNamespaceOptions : The CreateNamespace options. +type CreateNamespaceOptions struct { + // The name of the namespace. + Name *string `validate:"required,ne="` + + // The ID of the resource group that the namespace will be created within. + XAuthResourceGroup *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewCreateNamespaceOptions : Instantiate CreateNamespaceOptions +func (*ContainerRegistryV1) NewCreateNamespaceOptions(name string) *CreateNamespaceOptions { + return &CreateNamespaceOptions{ + Name: core.StringPtr(name), + } +} + +// SetName : Allow user to set Name +func (options *CreateNamespaceOptions) SetName(name string) *CreateNamespaceOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetXAuthResourceGroup : Allow user to set XAuthResourceGroup +func (options *CreateNamespaceOptions) SetXAuthResourceGroup(xAuthResourceGroup string) *CreateNamespaceOptions { + options.XAuthResourceGroup = core.StringPtr(xAuthResourceGroup) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateNamespaceOptions) SetHeaders(param map[string]string) *CreateNamespaceOptions { + options.Headers = param + return options +} + +// DeleteImageOptions : The DeleteImage options. +type DeleteImageOptions struct { + // The full IBM Cloud registry path to the image that you want to delete, including its tag. If you do not provide a + // specific tag, the version with the `latest` tag is removed. + Image *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteImageOptions : Instantiate DeleteImageOptions +func (*ContainerRegistryV1) NewDeleteImageOptions(image string) *DeleteImageOptions { + return &DeleteImageOptions{ + Image: core.StringPtr(image), + } +} + +// SetImage : Allow user to set Image +func (options *DeleteImageOptions) SetImage(image string) *DeleteImageOptions { + options.Image = core.StringPtr(image) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteImageOptions) SetHeaders(param map[string]string) *DeleteImageOptions { + options.Headers = param + return options +} + +// DeleteImageTagOptions : The DeleteImageTag options. +type DeleteImageTagOptions struct { + // The name of the image that you want to delete, in the format <REPOSITORY>:<TAG>. + Image *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteImageTagOptions : Instantiate DeleteImageTagOptions +func (*ContainerRegistryV1) NewDeleteImageTagOptions(image string) *DeleteImageTagOptions { + return &DeleteImageTagOptions{ + Image: core.StringPtr(image), + } +} + +// SetImage : Allow user to set Image +func (options *DeleteImageTagOptions) SetImage(image string) *DeleteImageTagOptions { + options.Image = core.StringPtr(image) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteImageTagOptions) SetHeaders(param map[string]string) *DeleteImageTagOptions { + options.Headers = param + return options +} + +// DeleteNamespaceOptions : The DeleteNamespace options. +type DeleteNamespaceOptions struct { + // The name of the namespace that you want to delete. + Name *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteNamespaceOptions : Instantiate DeleteNamespaceOptions +func (*ContainerRegistryV1) NewDeleteNamespaceOptions(name string) *DeleteNamespaceOptions { + return &DeleteNamespaceOptions{ + Name: core.StringPtr(name), + } +} + +// SetName : Allow user to set Name +func (options *DeleteNamespaceOptions) SetName(name string) *DeleteNamespaceOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteNamespaceOptions) SetHeaders(param map[string]string) *DeleteNamespaceOptions { + options.Headers = param + return options +} + +// GetAuthOptions : The GetAuth options. +type GetAuthOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetAuthOptions : Instantiate GetAuthOptions +func (*ContainerRegistryV1) NewGetAuthOptions() *GetAuthOptions { + return &GetAuthOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *GetAuthOptions) SetHeaders(param map[string]string) *GetAuthOptions { + options.Headers = param + return options +} + +// GetImageManifestOptions : The GetImageManifest options. +type GetImageManifestOptions struct { + // The full IBM Cloud registry path to the image that you want to inspect. Run `ibmcloud cr images` or call the `GET + // /images/json` endpoint to review images that are in the registry. + Image *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetImageManifestOptions : Instantiate GetImageManifestOptions +func (*ContainerRegistryV1) NewGetImageManifestOptions(image string) *GetImageManifestOptions { + return &GetImageManifestOptions{ + Image: core.StringPtr(image), + } +} + +// SetImage : Allow user to set Image +func (options *GetImageManifestOptions) SetImage(image string) *GetImageManifestOptions { + options.Image = core.StringPtr(image) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetImageManifestOptions) SetHeaders(param map[string]string) *GetImageManifestOptions { + options.Headers = param + return options +} + +// GetMessagesOptions : The GetMessages options. +type GetMessagesOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetMessagesOptions : Instantiate GetMessagesOptions +func (*ContainerRegistryV1) NewGetMessagesOptions() *GetMessagesOptions { + return &GetMessagesOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *GetMessagesOptions) SetHeaders(param map[string]string) *GetMessagesOptions { + options.Headers = param + return options +} + +// GetPlansOptions : The GetPlans options. +type GetPlansOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetPlansOptions : Instantiate GetPlansOptions +func (*ContainerRegistryV1) NewGetPlansOptions() *GetPlansOptions { + return &GetPlansOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *GetPlansOptions) SetHeaders(param map[string]string) *GetPlansOptions { + options.Headers = param + return options +} + +// GetQuotaOptions : The GetQuota options. +type GetQuotaOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetQuotaOptions : Instantiate GetQuotaOptions +func (*ContainerRegistryV1) NewGetQuotaOptions() *GetQuotaOptions { + return &GetQuotaOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *GetQuotaOptions) SetHeaders(param map[string]string) *GetQuotaOptions { + options.Headers = param + return options +} + +// GetRetentionPolicyOptions : The GetRetentionPolicy options. +type GetRetentionPolicyOptions struct { + // Gets the retention policy for the specified namespace. + Namespace *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetRetentionPolicyOptions : Instantiate GetRetentionPolicyOptions +func (*ContainerRegistryV1) NewGetRetentionPolicyOptions(namespace string) *GetRetentionPolicyOptions { + return &GetRetentionPolicyOptions{ + Namespace: core.StringPtr(namespace), + } +} + +// SetNamespace : Allow user to set Namespace +func (options *GetRetentionPolicyOptions) SetNamespace(namespace string) *GetRetentionPolicyOptions { + options.Namespace = core.StringPtr(namespace) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetRetentionPolicyOptions) SetHeaders(param map[string]string) *GetRetentionPolicyOptions { + options.Headers = param + return options +} + +// GetSettingsOptions : The GetSettings options. +type GetSettingsOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetSettingsOptions : Instantiate GetSettingsOptions +func (*ContainerRegistryV1) NewGetSettingsOptions() *GetSettingsOptions { + return &GetSettingsOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *GetSettingsOptions) SetHeaders(param map[string]string) *GetSettingsOptions { + options.Headers = param + return options +} + +// HealthConfig : HealthConfig struct +type HealthConfig struct { + // A Duration represents the elapsed time between two instants as an int64 nanosecond count. + Interval *int64 `json:"Interval,omitempty"` + + // The number of consecutive failures needed to consider a container as unhealthy. Zero means inherit. + Retries *int64 `json:"Retries,omitempty"` + + // The test to perform to check that the container is healthy. An empty slice means to inherit the default. The options + // are: + // {} : inherit healthcheck + // {"NONE"} : disable healthcheck + // {"CMD", args...} : exec arguments directly + // {"CMD-SHELL", command} : run command with system's default shell. + Test []string `json:"Test,omitempty"` + + // A Duration represents the elapsed time between two instants as an int64 nanosecond count. + Timeout *int64 `json:"Timeout,omitempty"` +} + +// UnmarshalHealthConfig unmarshals an instance of HealthConfig from the specified map of raw messages. +func UnmarshalHealthConfig(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(HealthConfig) + err = core.UnmarshalPrimitive(m, "Interval", &obj.Interval) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "Retries", &obj.Retries) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "Test", &obj.Test) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "Timeout", &obj.Timeout) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ImageBulkDeleteError : Information about a failure to delete an image as part of a bulk delete. +type ImageBulkDeleteError struct { + // An API error code. + Code *string `json:"code,omitempty"` + + // The English text message associated with the error code. + Message *string `json:"message,omitempty"` +} + +// UnmarshalImageBulkDeleteError unmarshals an instance of ImageBulkDeleteError from the specified map of raw messages. +func UnmarshalImageBulkDeleteError(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ImageBulkDeleteError) + err = core.UnmarshalPrimitive(m, "code", &obj.Code) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "message", &obj.Message) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ImageBulkDeleteResult : The results of a bulk image delete request. +type ImageBulkDeleteResult struct { + // A map of digests to the error object that explains the failure. + Error map[string]ImageBulkDeleteError `json:"error,omitempty"` + + // A list of digests which were deleted successfully. + Success []string `json:"success,omitempty"` +} + +// UnmarshalImageBulkDeleteResult unmarshals an instance of ImageBulkDeleteResult from the specified map of raw messages. +func UnmarshalImageBulkDeleteResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ImageBulkDeleteResult) + err = core.UnmarshalModel(m, "error", &obj.Error, UnmarshalImageBulkDeleteError) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ImageDeleteResult : ImageDeleteResult struct +type ImageDeleteResult struct { + Untagged *string `json:"Untagged,omitempty"` +} + +// UnmarshalImageDeleteResult unmarshals an instance of ImageDeleteResult from the specified map of raw messages. +func UnmarshalImageDeleteResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ImageDeleteResult) + err = core.UnmarshalPrimitive(m, "Untagged", &obj.Untagged) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ImageDigest : Important information about an image. +type ImageDigest struct { + // The build date of the image. + Created *int64 `json:"created,omitempty"` + + // The image digest. + ID *string `json:"id,omitempty"` + + // The type of the image, such as 'Docker Image Manifest V2, Schema 2' or 'OCI Image Manifest v1'. + ManifestType *string `json:"manifestType,omitempty"` + + // A map of image repositories to tags. + RepoTags map[string]interface{} `json:"repoTags,omitempty"` + + // The size of the image in bytes. + Size *int64 `json:"size,omitempty"` +} + +// UnmarshalImageDigest unmarshals an instance of ImageDigest from the specified map of raw messages. +func UnmarshalImageDigest(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ImageDigest) + err = core.UnmarshalPrimitive(m, "created", &obj.Created) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "manifestType", &obj.ManifestType) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "repoTags", &obj.RepoTags) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "size", &obj.Size) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ImageInspection : An image JSON output consistent with the Docker Remote API. +type ImageInspection struct { + // The processor architecture used to build this image, and required to run it. + Architecture *string `json:"Architecture,omitempty"` + + // The author of the image. + Author *string `json:"Author,omitempty"` + + // A plain text description of the image. + Comment *string `json:"Comment,omitempty"` + + // The configuration data about a container. + Config *Config `json:"Config,omitempty"` + + // The ID of the container which created this image. + Container *string `json:"Container,omitempty"` + + // The configuration data about a container. + ContainerConfig *Config `json:"ContainerConfig,omitempty"` + + // The unix timestamp for the date when the image was created. + Created *string `json:"Created,omitempty"` + + // The Docker version used to build this image. + DockerVersion *string `json:"DockerVersion,omitempty"` + + // The image ID. + ID *string `json:"Id,omitempty"` + + // Media type of the manifest for the image. + ManifestType *string `json:"ManifestType,omitempty"` + + // The operating system family used to build this image, and required to run it. + Os *string `json:"Os,omitempty"` + + // The version of the operating system used to build this image. + OsVersion *string `json:"OsVersion,omitempty"` + + // The ID of the base image for this image. + Parent *string `json:"Parent,omitempty"` + + // RootFS contains information about the root filesystem of a container image. + RootFs *RootFs `json:"RootFS,omitempty"` + + // The size of the image in bytes. + Size *int64 `json:"Size,omitempty"` + + // The sum of the size of each layer in the image in bytes. + VirtualSize *int64 `json:"VirtualSize,omitempty"` +} + +// UnmarshalImageInspection unmarshals an instance of ImageInspection from the specified map of raw messages. +func UnmarshalImageInspection(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ImageInspection) + err = core.UnmarshalPrimitive(m, "Architecture", &obj.Architecture) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "Author", &obj.Author) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "Comment", &obj.Comment) + if err != nil { + return + } + err = core.UnmarshalModel(m, "Config", &obj.Config, UnmarshalConfig) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "Container", &obj.Container) + if err != nil { + return + } + err = core.UnmarshalModel(m, "ContainerConfig", &obj.ContainerConfig, UnmarshalConfig) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "Created", &obj.Created) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "DockerVersion", &obj.DockerVersion) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "Id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "ManifestType", &obj.ManifestType) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "Os", &obj.Os) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "OsVersion", &obj.OsVersion) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "Parent", &obj.Parent) + if err != nil { + return + } + err = core.UnmarshalModel(m, "RootFS", &obj.RootFs, UnmarshalRootFs) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "Size", &obj.Size) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "VirtualSize", &obj.VirtualSize) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InspectImageOptions : The InspectImage options. +type InspectImageOptions struct { + // The full IBM Cloud registry path to the image that you want to inspect. Run `ibmcloud cr images` or call the `GET + // /images/json` endpoint to review images that are in the registry. + Image *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewInspectImageOptions : Instantiate InspectImageOptions +func (*ContainerRegistryV1) NewInspectImageOptions(image string) *InspectImageOptions { + return &InspectImageOptions{ + Image: core.StringPtr(image), + } +} + +// SetImage : Allow user to set Image +func (options *InspectImageOptions) SetImage(image string) *InspectImageOptions { + options.Image = core.StringPtr(image) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *InspectImageOptions) SetHeaders(param map[string]string) *InspectImageOptions { + options.Headers = param + return options +} + +// ListDeletedImagesOptions : The ListDeletedImages options. +type ListDeletedImagesOptions struct { + // Limit results to trash can images in the given namespace only. + Namespace *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListDeletedImagesOptions : Instantiate ListDeletedImagesOptions +func (*ContainerRegistryV1) NewListDeletedImagesOptions() *ListDeletedImagesOptions { + return &ListDeletedImagesOptions{} +} + +// SetNamespace : Allow user to set Namespace +func (options *ListDeletedImagesOptions) SetNamespace(namespace string) *ListDeletedImagesOptions { + options.Namespace = core.StringPtr(namespace) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListDeletedImagesOptions) SetHeaders(param map[string]string) *ListDeletedImagesOptions { + options.Headers = param + return options +} + +// ListImageDigestsOptions : The ListImageDigests options. +type ListImageDigestsOptions struct { + // ExcludeTagged returns only untagged digests. + ExcludeTagged *bool + + // ExcludeVA returns the digest list with no VA scan results. + ExcludeVa *bool + + // When true, API will return the IBM public images if they exist in the targeted region. + IncludeIBM *bool + + // Repositories in which to restrict the output. If left empty all images for the account will be returned. + Repositories []string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListImageDigestsOptions : Instantiate ListImageDigestsOptions +func (*ContainerRegistryV1) NewListImageDigestsOptions() *ListImageDigestsOptions { + return &ListImageDigestsOptions{} +} + +// SetExcludeTagged : Allow user to set ExcludeTagged +func (options *ListImageDigestsOptions) SetExcludeTagged(excludeTagged bool) *ListImageDigestsOptions { + options.ExcludeTagged = core.BoolPtr(excludeTagged) + return options +} + +// SetExcludeVa : Allow user to set ExcludeVa +func (options *ListImageDigestsOptions) SetExcludeVa(excludeVa bool) *ListImageDigestsOptions { + options.ExcludeVa = core.BoolPtr(excludeVa) + return options +} + +// SetIncludeIBM : Allow user to set IncludeIBM +func (options *ListImageDigestsOptions) SetIncludeIBM(includeIBM bool) *ListImageDigestsOptions { + options.IncludeIBM = core.BoolPtr(includeIBM) + return options +} + +// SetRepositories : Allow user to set Repositories +func (options *ListImageDigestsOptions) SetRepositories(repositories []string) *ListImageDigestsOptions { + options.Repositories = repositories + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListImageDigestsOptions) SetHeaders(param map[string]string) *ListImageDigestsOptions { + options.Headers = param + return options +} + +// ListImagesOptions : The ListImages options. +type ListImagesOptions struct { + // Lists images that are stored in the specified namespace only. Query multiple namespaces by specifying this option + // for each namespace. If this option is not specified, images from all namespaces in the specified IBM Cloud account + // are listed. + Namespace *string + + // Includes IBM-provided public images in the list of images. If this option is not specified, private images are + // listed only. If this option is specified more than once, the last parsed setting is the setting that is used. + IncludeIBM *bool + + // Includes private images in the list of images. If this option is not specified, private images are listed. If this + // option is specified more than once, the last parsed setting is the setting that is used. + IncludePrivate *bool + + // Includes tags that reference multi-architecture manifest lists in the image list. If this option is not specified, + // tagged manifest lists are not shown in the list. If this option is specified more than once, the last parsed setting + // is the setting that is used. + IncludeManifestLists *bool + + // Displays Vulnerability Advisor status for the listed images. If this option is specified more than once, the last + // parsed setting is the setting that is used. + Vulnerabilities *bool + + // Lists images that are stored in the specified repository, under your namespaces. Query multiple repositories by + // specifying this option for each repository. If this option is not specified, images from all repos are listed. + Repository *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListImagesOptions : Instantiate ListImagesOptions +func (*ContainerRegistryV1) NewListImagesOptions() *ListImagesOptions { + return &ListImagesOptions{} +} + +// SetNamespace : Allow user to set Namespace +func (options *ListImagesOptions) SetNamespace(namespace string) *ListImagesOptions { + options.Namespace = core.StringPtr(namespace) + return options +} + +// SetIncludeIBM : Allow user to set IncludeIBM +func (options *ListImagesOptions) SetIncludeIBM(includeIBM bool) *ListImagesOptions { + options.IncludeIBM = core.BoolPtr(includeIBM) + return options +} + +// SetIncludePrivate : Allow user to set IncludePrivate +func (options *ListImagesOptions) SetIncludePrivate(includePrivate bool) *ListImagesOptions { + options.IncludePrivate = core.BoolPtr(includePrivate) + return options +} + +// SetIncludeManifestLists : Allow user to set IncludeManifestLists +func (options *ListImagesOptions) SetIncludeManifestLists(includeManifestLists bool) *ListImagesOptions { + options.IncludeManifestLists = core.BoolPtr(includeManifestLists) + return options +} + +// SetVulnerabilities : Allow user to set Vulnerabilities +func (options *ListImagesOptions) SetVulnerabilities(vulnerabilities bool) *ListImagesOptions { + options.Vulnerabilities = core.BoolPtr(vulnerabilities) + return options +} + +// SetRepository : Allow user to set Repository +func (options *ListImagesOptions) SetRepository(repository string) *ListImagesOptions { + options.Repository = core.StringPtr(repository) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListImagesOptions) SetHeaders(param map[string]string) *ListImagesOptions { + options.Headers = param + return options +} + +// ListNamespaceDetailsOptions : The ListNamespaceDetails options. +type ListNamespaceDetailsOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListNamespaceDetailsOptions : Instantiate ListNamespaceDetailsOptions +func (*ContainerRegistryV1) NewListNamespaceDetailsOptions() *ListNamespaceDetailsOptions { + return &ListNamespaceDetailsOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *ListNamespaceDetailsOptions) SetHeaders(param map[string]string) *ListNamespaceDetailsOptions { + options.Headers = param + return options +} + +// ListNamespacesOptions : The ListNamespaces options. +type ListNamespacesOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListNamespacesOptions : Instantiate ListNamespacesOptions +func (*ContainerRegistryV1) NewListNamespacesOptions() *ListNamespacesOptions { + return &ListNamespacesOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *ListNamespacesOptions) SetHeaders(param map[string]string) *ListNamespacesOptions { + options.Headers = param + return options +} + +// ListRetentionPoliciesOptions : The ListRetentionPolicies options. +type ListRetentionPoliciesOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListRetentionPoliciesOptions : Instantiate ListRetentionPoliciesOptions +func (*ContainerRegistryV1) NewListRetentionPoliciesOptions() *ListRetentionPoliciesOptions { + return &ListRetentionPoliciesOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *ListRetentionPoliciesOptions) SetHeaders(param map[string]string) *ListRetentionPoliciesOptions { + options.Headers = param + return options +} + +// Namespace : Namespace struct +type Namespace struct { + Namespace *string `json:"namespace,omitempty"` +} + +// UnmarshalNamespace unmarshals an instance of Namespace from the specified map of raw messages. +func UnmarshalNamespace(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Namespace) + err = core.UnmarshalPrimitive(m, "namespace", &obj.Namespace) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// NamespaceDetails : Details of a namespace. +type NamespaceDetails struct { + // The IBM Cloud account that owns the namespace. + Account *string `json:"account,omitempty"` + + // When the namespace was created. + CreatedDate *string `json:"created_date,omitempty"` + + // If the namespace has been assigned to a resource group, this is the IBM Cloud CRN representing the namespace. + CRN *string `json:"crn,omitempty"` + + Name *string `json:"name,omitempty"` + + // When the namespace was assigned to a resource group. + ResourceCreatedDate *string `json:"resource_created_date,omitempty"` + + // The resource group that the namespace is assigned to. + ResourceGroup *string `json:"resource_group,omitempty"` + + // When the namespace was last updated. + UpdatedDate *string `json:"updated_date,omitempty"` +} + +// UnmarshalNamespaceDetails unmarshals an instance of NamespaceDetails from the specified map of raw messages. +func UnmarshalNamespaceDetails(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(NamespaceDetails) + err = core.UnmarshalPrimitive(m, "account", &obj.Account) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_date", &obj.CreatedDate) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "resource_created_date", &obj.ResourceCreatedDate) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "resource_group", &obj.ResourceGroup) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "updated_date", &obj.UpdatedDate) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// Plan : The plan for the targeted IBM Cloud account. +type Plan struct { + Plan *string `json:"plan,omitempty"` +} + +// UnmarshalPlan unmarshals an instance of Plan from the specified map of raw messages. +func UnmarshalPlan(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Plan) + err = core.UnmarshalPrimitive(m, "plan", &obj.Plan) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// Quota : Current usage and limits for the targeted IBM Cloud account. +type Quota struct { + Limit *QuotaDetails `json:"limit,omitempty"` + + Usage *QuotaDetails `json:"usage,omitempty"` +} + +// UnmarshalQuota unmarshals an instance of Quota from the specified map of raw messages. +func UnmarshalQuota(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Quota) + err = core.UnmarshalModel(m, "limit", &obj.Limit, UnmarshalQuotaDetails) + if err != nil { + return + } + err = core.UnmarshalModel(m, "usage", &obj.Usage, UnmarshalQuotaDetails) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// QuotaDetails : QuotaDetails struct +type QuotaDetails struct { + // Storage quota or usage in bytes. The value -1 denotes "Unlimited". + StorageBytes *int64 `json:"storage_bytes,omitempty"` + + // Traffic quota or usage in bytes. The value -1 denotes "Unlimited". + TrafficBytes *int64 `json:"traffic_bytes,omitempty"` +} + +// UnmarshalQuotaDetails unmarshals an instance of QuotaDetails from the specified map of raw messages. +func UnmarshalQuotaDetails(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(QuotaDetails) + err = core.UnmarshalPrimitive(m, "storage_bytes", &obj.StorageBytes) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "traffic_bytes", &obj.TrafficBytes) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// RemoteAPIImage : Information about an image, in a format consistent with the Docker Remote API format. +type RemoteAPIImage struct { + ConfigurationIssueCount *int64 `json:"ConfigurationIssueCount,omitempty"` + + Created *int64 `json:"Created,omitempty"` + + DigestTags map[string][]string `json:"DigestTags,omitempty"` + + ExemptIssueCount *int64 `json:"ExemptIssueCount,omitempty"` + + ID *string `json:"Id,omitempty"` + + IssueCount *int64 `json:"IssueCount,omitempty"` + + Labels map[string]string `json:"Labels,omitempty"` + + ManifestType *string `json:"ManifestType,omitempty"` + + ParentID *string `json:"ParentId,omitempty"` + + RepoDigests []string `json:"RepoDigests,omitempty"` + + RepoTags []string `json:"RepoTags,omitempty"` + + Size *int64 `json:"Size,omitempty"` + + VirtualSize *int64 `json:"VirtualSize,omitempty"` + + VulnerabilityCount *int64 `json:"VulnerabilityCount,omitempty"` + + Vulnerable *string `json:"Vulnerable,omitempty"` +} + +// UnmarshalRemoteAPIImage unmarshals an instance of RemoteAPIImage from the specified map of raw messages. +func UnmarshalRemoteAPIImage(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(RemoteAPIImage) + err = core.UnmarshalPrimitive(m, "ConfigurationIssueCount", &obj.ConfigurationIssueCount) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "Created", &obj.Created) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "DigestTags", &obj.DigestTags) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "ExemptIssueCount", &obj.ExemptIssueCount) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "Id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "IssueCount", &obj.IssueCount) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "Labels", &obj.Labels) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "ManifestType", &obj.ManifestType) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "ParentId", &obj.ParentID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "RepoDigests", &obj.RepoDigests) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "RepoTags", &obj.RepoTags) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "Size", &obj.Size) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "VirtualSize", &obj.VirtualSize) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "VulnerabilityCount", &obj.VulnerabilityCount) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "Vulnerable", &obj.Vulnerable) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// RestoreImageOptions : The RestoreImage options. +type RestoreImageOptions struct { + // The name of the image that you want to restore, in the format <REPOSITORY>:<TAG>. Run `ibmcloud cr + // trash-list` or call the `GET /trash/json` endpoint to review images that are in the trash. + Image *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewRestoreImageOptions : Instantiate RestoreImageOptions +func (*ContainerRegistryV1) NewRestoreImageOptions(image string) *RestoreImageOptions { + return &RestoreImageOptions{ + Image: core.StringPtr(image), + } +} + +// SetImage : Allow user to set Image +func (options *RestoreImageOptions) SetImage(image string) *RestoreImageOptions { + options.Image = core.StringPtr(image) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *RestoreImageOptions) SetHeaders(param map[string]string) *RestoreImageOptions { + options.Headers = param + return options +} + +// RestoreResult : The result of restoring tags for a digest. In a successful request the digest is always restored, and zero or more of +// its tags may be restored. +type RestoreResult struct { + // Successful is a list of tags that were restored. + Successful []string `json:"successful,omitempty"` + + // Unsuccessful is a list of tags that were not restored because of a conflict. + Unsuccessful []string `json:"unsuccessful,omitempty"` +} + +// UnmarshalRestoreResult unmarshals an instance of RestoreResult from the specified map of raw messages. +func UnmarshalRestoreResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(RestoreResult) + err = core.UnmarshalPrimitive(m, "successful", &obj.Successful) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "unsuccessful", &obj.Unsuccessful) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// RestoreTagsOptions : The RestoreTags options. +type RestoreTagsOptions struct { + // The full IBM Cloud registry digest reference for the digest that you want to restore such as + // `icr.io/namespace/repo@sha256:a9be...`. Call the `GET /trash/json` endpoint to review digests that are in the trash + // and their tags in the same repository. + Digest *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewRestoreTagsOptions : Instantiate RestoreTagsOptions +func (*ContainerRegistryV1) NewRestoreTagsOptions(digest string) *RestoreTagsOptions { + return &RestoreTagsOptions{ + Digest: core.StringPtr(digest), + } +} + +// SetDigest : Allow user to set Digest +func (options *RestoreTagsOptions) SetDigest(digest string) *RestoreTagsOptions { + options.Digest = core.StringPtr(digest) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *RestoreTagsOptions) SetHeaders(param map[string]string) *RestoreTagsOptions { + options.Headers = param + return options +} + +// RetentionPolicy : A document that contains the image retention settings for a namespace. +type RetentionPolicy struct { + // Determines how many images will be retained for each repository when the retention policy is executed. The value -1 + // denotes 'Unlimited' (all images are retained). + ImagesPerRepo *int64 `json:"images_per_repo,omitempty"` + + // The namespace to which the retention policy is attached. + Namespace *string `json:"namespace" validate:"required"` + + // Determines if untagged images are retained when executing the retention policy. This is false by default meaning + // untagged images will be deleted when the policy is executed. + RetainUntagged *bool `json:"retain_untagged,omitempty"` +} + +// NewRetentionPolicy : Instantiate RetentionPolicy (Generic Model Constructor) +func (*ContainerRegistryV1) NewRetentionPolicy(namespace string) (model *RetentionPolicy, err error) { + model = &RetentionPolicy{ + Namespace: core.StringPtr(namespace), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalRetentionPolicy unmarshals an instance of RetentionPolicy from the specified map of raw messages. +func UnmarshalRetentionPolicy(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(RetentionPolicy) + err = core.UnmarshalPrimitive(m, "images_per_repo", &obj.ImagesPerRepo) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "namespace", &obj.Namespace) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "retain_untagged", &obj.RetainUntagged) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// RootFs : RootFS contains information about the root filesystem of a container image. +type RootFs struct { + // Descriptor for the base layer in the image. + BaseLayer *string `json:"BaseLayer,omitempty"` + + // Descriptors for each layer in the image. + Layers []string `json:"Layers,omitempty"` + + // The type of filesystem. + Type *string `json:"Type,omitempty"` +} + +// UnmarshalRootFs unmarshals an instance of RootFs from the specified map of raw messages. +func UnmarshalRootFs(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(RootFs) + err = core.UnmarshalPrimitive(m, "BaseLayer", &obj.BaseLayer) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "Layers", &obj.Layers) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "Type", &obj.Type) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SetRetentionPolicyOptions : The SetRetentionPolicy options. +type SetRetentionPolicyOptions struct { + // The namespace to which the retention policy is attached. + Namespace *string `validate:"required"` + + // Determines how many images will be retained for each repository when the retention policy is executed. The value -1 + // denotes 'Unlimited' (all images are retained). + ImagesPerRepo *int64 + + // Determines if untagged images are retained when executing the retention policy. This is false by default meaning + // untagged images will be deleted when the policy is executed. + RetainUntagged *bool + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewSetRetentionPolicyOptions : Instantiate SetRetentionPolicyOptions +func (*ContainerRegistryV1) NewSetRetentionPolicyOptions(namespace string) *SetRetentionPolicyOptions { + return &SetRetentionPolicyOptions{ + Namespace: core.StringPtr(namespace), + } +} + +// SetNamespace : Allow user to set Namespace +func (options *SetRetentionPolicyOptions) SetNamespace(namespace string) *SetRetentionPolicyOptions { + options.Namespace = core.StringPtr(namespace) + return options +} + +// SetImagesPerRepo : Allow user to set ImagesPerRepo +func (options *SetRetentionPolicyOptions) SetImagesPerRepo(imagesPerRepo int64) *SetRetentionPolicyOptions { + options.ImagesPerRepo = core.Int64Ptr(imagesPerRepo) + return options +} + +// SetRetainUntagged : Allow user to set RetainUntagged +func (options *SetRetentionPolicyOptions) SetRetainUntagged(retainUntagged bool) *SetRetentionPolicyOptions { + options.RetainUntagged = core.BoolPtr(retainUntagged) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *SetRetentionPolicyOptions) SetHeaders(param map[string]string) *SetRetentionPolicyOptions { + options.Headers = param + return options +} + +// TagImageOptions : The TagImage options. +type TagImageOptions struct { + // The name of the image that you want to create a new tag for, in the format <REPOSITORY>:<TAG>. Run + // `ibmcloud cr images` or call the `GET /images/json` endpoint to review images that are in the registry. + Fromimage *string `validate:"required"` + + // The new tag for the image, in the format <REPOSITORY>:<TAG>. + Toimage *string `validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewTagImageOptions : Instantiate TagImageOptions +func (*ContainerRegistryV1) NewTagImageOptions(fromimage string, toimage string) *TagImageOptions { + return &TagImageOptions{ + Fromimage: core.StringPtr(fromimage), + Toimage: core.StringPtr(toimage), + } +} + +// SetFromimage : Allow user to set Fromimage +func (options *TagImageOptions) SetFromimage(fromimage string) *TagImageOptions { + options.Fromimage = core.StringPtr(fromimage) + return options +} + +// SetToimage : Allow user to set Toimage +func (options *TagImageOptions) SetToimage(toimage string) *TagImageOptions { + options.Toimage = core.StringPtr(toimage) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *TagImageOptions) SetHeaders(param map[string]string) *TagImageOptions { + options.Headers = param + return options +} + +// Trash : Details of the tags and days until expiry. +type Trash struct { + DaysUntilExpiry *int64 `json:"daysUntilExpiry,omitempty"` + + Tags []string `json:"tags,omitempty"` +} + +// UnmarshalTrash unmarshals an instance of Trash from the specified map of raw messages. +func UnmarshalTrash(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Trash) + err = core.UnmarshalPrimitive(m, "daysUntilExpiry", &obj.DaysUntilExpiry) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "tags", &obj.Tags) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// UpdateAuthOptions : The UpdateAuth options. +type UpdateAuthOptions struct { + // Enable role based authorization when authenticating with IBM Cloud IAM. + IamAuthz *bool + + // Restrict account to only be able to push and pull images over private connections. + PrivateOnly *bool + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdateAuthOptions : Instantiate UpdateAuthOptions +func (*ContainerRegistryV1) NewUpdateAuthOptions() *UpdateAuthOptions { + return &UpdateAuthOptions{} +} + +// SetIamAuthz : Allow user to set IamAuthz +func (options *UpdateAuthOptions) SetIamAuthz(iamAuthz bool) *UpdateAuthOptions { + options.IamAuthz = core.BoolPtr(iamAuthz) + return options +} + +// SetPrivateOnly : Allow user to set PrivateOnly +func (options *UpdateAuthOptions) SetPrivateOnly(privateOnly bool) *UpdateAuthOptions { + options.PrivateOnly = core.BoolPtr(privateOnly) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateAuthOptions) SetHeaders(param map[string]string) *UpdateAuthOptions { + options.Headers = param + return options +} + +// UpdatePlansOptions : The UpdatePlans options. +type UpdatePlansOptions struct { + Plan *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdatePlansOptions : Instantiate UpdatePlansOptions +func (*ContainerRegistryV1) NewUpdatePlansOptions() *UpdatePlansOptions { + return &UpdatePlansOptions{} +} + +// SetPlan : Allow user to set Plan +func (options *UpdatePlansOptions) SetPlan(plan string) *UpdatePlansOptions { + options.Plan = core.StringPtr(plan) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdatePlansOptions) SetHeaders(param map[string]string) *UpdatePlansOptions { + options.Headers = param + return options +} + +// UpdateQuotaOptions : The UpdateQuota options. +type UpdateQuotaOptions struct { + // Storage quota in megabytes. The value -1 denotes "Unlimited". + StorageMegabytes *int64 + + // Traffic quota in megabytes. The value -1 denotes "Unlimited". + TrafficMegabytes *int64 + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdateQuotaOptions : Instantiate UpdateQuotaOptions +func (*ContainerRegistryV1) NewUpdateQuotaOptions() *UpdateQuotaOptions { + return &UpdateQuotaOptions{} +} + +// SetStorageMegabytes : Allow user to set StorageMegabytes +func (options *UpdateQuotaOptions) SetStorageMegabytes(storageMegabytes int64) *UpdateQuotaOptions { + options.StorageMegabytes = core.Int64Ptr(storageMegabytes) + return options +} + +// SetTrafficMegabytes : Allow user to set TrafficMegabytes +func (options *UpdateQuotaOptions) SetTrafficMegabytes(trafficMegabytes int64) *UpdateQuotaOptions { + options.TrafficMegabytes = core.Int64Ptr(trafficMegabytes) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateQuotaOptions) SetHeaders(param map[string]string) *UpdateQuotaOptions { + options.Headers = param + return options +} + +// UpdateSettingsOptions : The UpdateSettings options. +type UpdateSettingsOptions struct { + // Opt in to IBM Cloud Container Registry publishing platform metrics. + PlatformMetrics *bool + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdateSettingsOptions : Instantiate UpdateSettingsOptions +func (*ContainerRegistryV1) NewUpdateSettingsOptions() *UpdateSettingsOptions { + return &UpdateSettingsOptions{} +} + +// SetPlatformMetrics : Allow user to set PlatformMetrics +func (options *UpdateSettingsOptions) SetPlatformMetrics(platformMetrics bool) *UpdateSettingsOptions { + options.PlatformMetrics = core.BoolPtr(platformMetrics) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateSettingsOptions) SetHeaders(param map[string]string) *UpdateSettingsOptions { + options.Headers = param + return options +} diff --git a/vendor/github.com/IBM/go-sdk-core/LICENSE.md b/vendor/github.com/IBM/go-sdk-core/LICENSE.md new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/vendor/github.com/IBM/go-sdk-core/LICENSE.md @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/IBM/go-sdk-core/core/authenticator.go b/vendor/github.com/IBM/go-sdk-core/core/authenticator.go new file mode 100644 index 00000000000..58b46a08699 --- /dev/null +++ b/vendor/github.com/IBM/go-sdk-core/core/authenticator.go @@ -0,0 +1,26 @@ +package core + +// (C) Copyright IBM Corp. 2019. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "net/http" +) + +// Authenticator: each authenticator implements this set of methods. +type Authenticator interface { + AuthenticationType() string + Authenticate(*http.Request) error + Validate() error +} diff --git a/vendor/github.com/IBM/go-sdk-core/core/authenticator_factory.go b/vendor/github.com/IBM/go-sdk-core/core/authenticator_factory.go new file mode 100644 index 00000000000..fba1de6e815 --- /dev/null +++ b/vendor/github.com/IBM/go-sdk-core/core/authenticator_factory.go @@ -0,0 +1,52 @@ +package core + +// (C) Copyright IBM Corp. 2019. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "strings" +) + +// GetAuthenticatorFromEnvironment instantiates an Authenticator using service properties +// retrieved from external config sources. +func GetAuthenticatorFromEnvironment(credentialKey string) (authenticator Authenticator, err error) { + properties, err := getServiceProperties(credentialKey) + if len(properties) == 0 { + return + } + + // Default the authentication type to IAM if not specified. + authType := properties[PROPNAME_AUTH_TYPE] + if authType == "" { + authType = AUTHTYPE_IAM + } + + // Create the authenticator appropriate for the auth type. + if strings.EqualFold(authType, AUTHTYPE_BASIC) { + authenticator, err = newBasicAuthenticatorFromMap(properties) + } else if strings.EqualFold(authType, AUTHTYPE_BEARER_TOKEN) { + authenticator, err = newBearerTokenAuthenticatorFromMap(properties) + } else if strings.EqualFold(authType, AUTHTYPE_IAM) { + authenticator, err = newIamAuthenticatorFromMap(properties) + } else if strings.EqualFold(authType, AUTHTYPE_CP4D) { + authenticator, err = newCloudPakForDataAuthenticatorFromMap(properties) + } else if strings.EqualFold(authType, AUTHTYPE_NOAUTH) { + authenticator, err = NewNoAuthAuthenticator() + } else { + err = fmt.Errorf(ERRORMSG_AUTHTYPE_UNKNOWN, authType) + } + + return +} diff --git a/vendor/github.com/IBM/go-sdk-core/core/base_service.go b/vendor/github.com/IBM/go-sdk-core/core/base_service.go new file mode 100644 index 00000000000..077cb845cdd --- /dev/null +++ b/vendor/github.com/IBM/go-sdk-core/core/base_service.go @@ -0,0 +1,365 @@ +package core + +// (C) Copyright IBM Corp. 2019. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "crypto/tls" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strconv" + "strings" + "time" +) + +const ( + header_name_USER_AGENT = "User-Agent" + sdk_name = "ibm-go-sdk-core" +) + +// ServiceOptions : This struct contains the options supported by the BaseService methods. +type ServiceOptions struct { + // This is the base URL associated with the service instance. + // This value will be combined with the path for each operation to form the request URL. + URL string + + // This field holds the authenticator for the service instance. + // The authenticator will "authenticate" each outbound request by adding additional + // information to the request, typically in the form of the "Authorization" http header. + Authenticator Authenticator +} + +// BaseService : This struct defines a common "service" object that is used by each generated service +// to manage requests and responses, perform authentication, etc. +type BaseService struct { + + // The options related to the base service. + Options *ServiceOptions + + // A set of "default" http headers to be included with each outbound request. + // This can be set by the SDK user. + DefaultHeaders http.Header + + // The HTTP Client used to send requests and receive responses. + Client *http.Client + + // The value to be used for the "User-Agent" HTTP header that is added to each outbound request. + // If this value is not set, then a default value will be used for the header. + UserAgent string +} + +// NewBaseService : This function will construct a new instance of the BaseService struct, while +// performing validation on input parameters and service options. +func NewBaseService(options *ServiceOptions, serviceName, displayName string) (*BaseService, error) { + if HasBadFirstOrLastChar(options.URL) { + return nil, fmt.Errorf(ERRORMSG_PROP_INVALID, "URL") + } + + if options.Authenticator == nil { + return nil, fmt.Errorf(ERRORMSG_NO_AUTHENTICATOR) + } + + if err := options.Authenticator.Validate(); err != nil { + return nil, err + } + + service := BaseService{ + Options: options, + + Client: &http.Client{ + Timeout: time.Second * 30, + }, + } + + // Set a default value for the User-Agent http header. + service.SetUserAgent(service.buildUserAgent()) + + err := service.ConfigureService(serviceName) + if err != nil { + return nil, err + } + + return &service, nil +} + +func (service *BaseService) ConfigureService(serviceName string) error { + // Try to load service properties from external config. + serviceProps, err := getServiceProperties(serviceName) + if err != nil { + return err + } + + // If we were able to load any properties for this service, then check to see if the + // service-level properties were present and set them on the service if so. + if serviceProps != nil { + + // URL + if url, ok := serviceProps[PROPNAME_SVC_URL]; ok && url != "" { + err := service.SetURL(url) + if err != nil { + return err + } + } + + // DISABLE_SSL + if disableSSL, ok := serviceProps[PROPNAME_SVC_DISABLE_SSL]; ok && disableSSL != "" { + // Convert the config string to bool. + boolValue, err := strconv.ParseBool(disableSSL) + if err != nil { + boolValue = false + } + + // If requested, disable SSL. + if boolValue { + service.DisableSSLVerification() + } + } + } + return nil +} + +// SetURL sets the service URL +// +// Deprecated: use SetServiceURL instead. +func (service *BaseService) SetURL(url string) error { + return service.SetServiceURL(url) +} + +// SetServiceURL sets the service URL +func (service *BaseService) SetServiceURL(url string) error { + if HasBadFirstOrLastChar(url) { + return fmt.Errorf(ERRORMSG_PROP_INVALID, "URL") + } + + service.Options.URL = url + return nil +} + +// GetServiceURL returns the service URL +func (service *BaseService) GetServiceURL() string { + return service.Options.URL +} + +// SetDefaultHeaders sets HTTP headers to be sent in every request. +func (service *BaseService) SetDefaultHeaders(headers http.Header) { + service.DefaultHeaders = headers +} + +// SetHTTPClient updates the client handling the requests +func (service *BaseService) SetHTTPClient(client *http.Client) { + service.Client = client +} + +// DisableSSLVerification skips SSL verification +func (service *BaseService) DisableSSLVerification() { + tr := &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + } + service.Client.Transport = tr +} + +// BuildUserAgent : Builds the user agent string +func (service *BaseService) buildUserAgent() string { + return fmt.Sprintf("%s-%s %s", sdk_name, __VERSION__, SystemInfo()) +} + +// SetUserAgent : Sets the user agent value +func (service *BaseService) SetUserAgent(userAgentString string) { + if userAgentString == "" { + service.UserAgent = service.buildUserAgent() + } + service.UserAgent = userAgentString +} + +// Request performs the HTTP request +func (service *BaseService) Request(req *http.Request, result interface{}) (detailedResponse *DetailedResponse, err error) { + // Add default headers. + if service.DefaultHeaders != nil { + for k, v := range service.DefaultHeaders { + req.Header.Add(k, strings.Join(v, "")) + } + } + + // Add the default User-Agent header if not already present. + userAgent := req.Header.Get(header_name_USER_AGENT) + if userAgent == "" { + req.Header.Add(header_name_USER_AGENT, service.UserAgent) + } + + // Add authentication to the outbound request. + if service.Options.Authenticator == nil { + err = fmt.Errorf(ERRORMSG_NO_AUTHENTICATOR) + return + } + + err = service.Options.Authenticator.Authenticate(req) + if err != nil { + return + } + + // Invoke the request. + httpResponse, err := service.Client.Do(req) + if err != nil { + if strings.Contains(err.Error(), SSL_CERTIFICATION_ERROR) { + err = fmt.Errorf(ERRORMSG_SSL_VERIFICATION_FAILED + "\n" + err.Error()) + } + return + } + + // Start to populate the DetailedResponse. + detailedResponse = &DetailedResponse{ + StatusCode: httpResponse.StatusCode, + Headers: httpResponse.Header, + } + + contentType := httpResponse.Header.Get(CONTENT_TYPE) + + // If the operation was unsuccessful, then set up the DetailedResponse + // and error objects appropriately. + if httpResponse.StatusCode < 200 || httpResponse.StatusCode >= 300 { + + var responseBody []byte + + // First, read the response body into a byte array. + if httpResponse.Body != nil { + var readErr error + + defer httpResponse.Body.Close() + responseBody, readErr = ioutil.ReadAll(httpResponse.Body) + if readErr != nil { + err = fmt.Errorf("An error occurred while reading the response body: '%s'", readErr.Error()) + return + } + } + + // If the responseBody is empty, then just return a generic error based on the status code. + if len(responseBody) == 0 { + err = fmt.Errorf(http.StatusText(httpResponse.StatusCode)) + return + } + + // For a JSON-based error response body, decode it into a map (generic JSON object). + if IsJSONMimeType(contentType) { + // Return the error response body as a map, along with an + // error object containing our best guess at an error message. + responseMap, decodeErr := decodeAsMap(responseBody) + if decodeErr == nil { + detailedResponse.Result = responseMap + err = fmt.Errorf(getErrorMessage(responseMap, detailedResponse.StatusCode)) + return + } + } + + // For a non-JSON response or if we tripped while decoding the JSON response, + // just return the response body byte array in the RawResult field along with + // an error object that contains the generic error message for the status code. + detailedResponse.RawResult = responseBody + err = fmt.Errorf(http.StatusText(httpResponse.StatusCode)) + return + } + + // Operation was successful and we are expecting a response, so deserialize the response. + if result != nil { + // For a JSON response, decode it into the response object. + if IsJSONMimeType(contentType) { + + // First, read the response body into a byte array. + defer httpResponse.Body.Close() + responseBody, readErr := ioutil.ReadAll(httpResponse.Body) + if readErr != nil { + err = fmt.Errorf("An error occurred while reading the response body: '%s'", readErr.Error()) + return + } + + // Decode the byte array as JSON. + decodeErr := json.NewDecoder(bytes.NewReader(responseBody)).Decode(&result) + if decodeErr != nil { + // Error decoding the response body. + // Return the response body in RawResult, along with an error. + err = fmt.Errorf("An error occurred while unmarshalling the response body: '%s'", decodeErr.Error()) + detailedResponse.RawResult = responseBody + return + } + + // Decode step was successful. Return the decoded response object in the Result field. + detailedResponse.Result = result + return + } + + // For a non-JSON response body, just return it as an io.Reader in the Result field. + detailedResponse.Result = httpResponse.Body + } + + return +} + +// Errors : a struct for errors array +type Errors struct { + Errors []Error `json:"errors,omitempty"` +} + +// Error : specifies the error +type Error struct { + Message string `json:"message,omitempty"` +} + +// decodeAsMap: Decode the specified JSON byte-stream into a map (akin to a generic JSON object). +// Notes: +// 1) This function will return the map (result of decoding the byte-stream) as well as the raw +// byte buffer. We return the byte buffer in addition to the decoded map so that the caller can +// re-use (if necessary) the stream of bytes after we've consumed them via the JSON decode step. +// 2) The primary return value of this function will be: +// a) an instance of map[string]interface{} if the specified byte-stream was successfully +// decoded as JSON. +// b) the string form of the byte-stream if the byte-stream could not be successfully +// decoded as JSON. +// 3) This function will close the io.ReadCloser before returning. +func decodeAsMap(byteBuffer []byte) (result map[string]interface{}, err error) { + err = json.NewDecoder(bytes.NewReader(byteBuffer)).Decode(&result) + return +} + +// getErrorMessage: try to retrieve an error message from the decoded response body (map). +func getErrorMessage(responseMap map[string]interface{}, statusCode int) string { + + // If the response contained the "errors" field, then try to deserialize responseMap + // into an array of Error structs, then return the first entry's "Message" field. + if _, ok := responseMap["errors"]; ok { + var errors Errors + responseBuffer, _ := json.Marshal(responseMap) + if err := json.Unmarshal(responseBuffer, &errors); err == nil { + return errors.Errors[0].Message + } + } + + // Return the "error" field if present. + if val, ok := responseMap["error"]; ok { + return val.(string) + } + + // Return the "message" field if present. + if val, ok := responseMap["message"]; ok { + return val.(string) + } + + // Finally, return the "errorMessage" field if present. + if val, ok := responseMap["errorMessage"]; ok { + return val.(string) + } + + return http.StatusText(statusCode) +} diff --git a/vendor/github.com/IBM/go-sdk-core/core/basic_authenticator.go b/vendor/github.com/IBM/go-sdk-core/core/basic_authenticator.go new file mode 100644 index 00000000000..113a3333672 --- /dev/null +++ b/vendor/github.com/IBM/go-sdk-core/core/basic_authenticator.go @@ -0,0 +1,80 @@ +package core + +// (C) Copyright IBM Corp. 2019. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "net/http" +) + +// The BasicAuthenticator will perform authentication on outbound requests by adding +// a "Basic" type Authorization header that contains the base64-encoded username and password. +type BasicAuthenticator struct { + // [Required] - the basic auth username and password. + Username string + Password string +} + +// NewBasicAuthenticator: Constructs a new BasicAuthenticator instance. +func NewBasicAuthenticator(username string, password string) (*BasicAuthenticator, error) { + obj := &BasicAuthenticator{ + Username: username, + Password: password, + } + if err := obj.Validate(); err != nil { + return nil, err + } + return obj, nil +} + +// newBasicAuthenticatorFromMap: Constructs a new BasicAuthenticator instance from a map. +func newBasicAuthenticatorFromMap(properties map[string]string) (*BasicAuthenticator, error) { + if properties == nil { + return nil, fmt.Errorf(ERRORMSG_PROPS_MAP_NIL) + } + + return NewBasicAuthenticator(properties[PROPNAME_USERNAME], properties[PROPNAME_PASSWORD]) +} + +func (BasicAuthenticator) AuthenticationType() string { + return AUTHTYPE_BASIC +} + +// Authenticate: authenticates the specified request by adding an Authorizatin header. +func (this BasicAuthenticator) Authenticate(request *http.Request) error { + request.SetBasicAuth(this.Username, this.Password) + return nil +} + +// Validate: validates the configuration +func (this BasicAuthenticator) Validate() error { + if this.Username == "" { + return fmt.Errorf(ERRORMSG_PROP_MISSING, "Username") + } + + if this.Password == "" { + return fmt.Errorf(ERRORMSG_PROP_MISSING, "Password") + } + + if HasBadFirstOrLastChar(this.Username) { + return fmt.Errorf(ERRORMSG_PROP_INVALID, "Username") + } + + if HasBadFirstOrLastChar(this.Password) { + return fmt.Errorf(ERRORMSG_PROP_INVALID, "Password") + } + + return nil +} diff --git a/vendor/github.com/IBM/go-sdk-core/core/bearer_token_authenticator.go b/vendor/github.com/IBM/go-sdk-core/core/bearer_token_authenticator.go new file mode 100644 index 00000000000..95e9ad77508 --- /dev/null +++ b/vendor/github.com/IBM/go-sdk-core/core/bearer_token_authenticator.go @@ -0,0 +1,67 @@ +package core + +// (C) Copyright IBM Corp. 2019. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "net/http" +) + +// The BearerTokenAuthenticator will authenticate requests by adding +// a "Bearer"-type Authorization header that contains the configured bearer token value. +type BearerTokenAuthenticator struct { + + // [Required] - the bearer token value to be used to authenticate request. + BearerToken string +} + +// NewBearerTokenAuthenticator: Constructs a new BearerTokenAuthenticator instance. +func NewBearerTokenAuthenticator(bearerToken string) (*BearerTokenAuthenticator, error) { + obj := &BearerTokenAuthenticator{ + BearerToken: bearerToken, + } + if err := obj.Validate(); err != nil { + return nil, err + } + return obj, nil +} + +// newBearerTokenAuthenticator : Constructs a new BearerTokenAuthenticator instance from a map. +func newBearerTokenAuthenticatorFromMap(properties map[string]string) (*BearerTokenAuthenticator, error) { + if properties == nil { + return nil, fmt.Errorf(ERRORMSG_PROPS_MAP_NIL) + } + + return NewBearerTokenAuthenticator(properties[PROPNAME_BEARER_TOKEN]) +} + +func (BearerTokenAuthenticator) AuthenticationType() string { + return AUTHTYPE_BEARER_TOKEN +} + +// Authenticate: authenticates the specified request by adding an Authorization header +// that contains the bearer token value. +func (this BearerTokenAuthenticator) Authenticate(request *http.Request) error { + request.Header.Set("Authorization", fmt.Sprintf(`Bearer %s`, this.BearerToken)) + return nil +} + +// Validate: validates the configuration +func (this BearerTokenAuthenticator) Validate() error { + if this.BearerToken == "" { + return fmt.Errorf(ERRORMSG_PROP_MISSING, "BearerToken") + } + return nil +} diff --git a/vendor/github.com/IBM/go-sdk-core/core/config_utils.go b/vendor/github.com/IBM/go-sdk-core/core/config_utils.go new file mode 100644 index 00000000000..e063e20fcf2 --- /dev/null +++ b/vendor/github.com/IBM/go-sdk-core/core/config_utils.go @@ -0,0 +1,233 @@ +package core + +// (C) Copyright IBM Corp. 2019. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bufio" + "encoding/json" + "fmt" + "os" + "path" + "strings" +) + +const ( + IBM_CREDENTIAL_FILE_ENVVAR = "IBM_CREDENTIALS_FILE" + DEFAULT_CREDENTIAL_FILE_NAME = "ibm-credentials.env" +) + +// getServiceProperties: This function will retrieve configuration properties for the specified service +// from external config sources in the following precedence order: +// 1) credential file +// 2) environment variables +// 3) VCAP_SERVICES +func getServiceProperties(serviceName string) (serviceProps map[string]string, err error) { + + if serviceName == "" { + err = fmt.Errorf("serviceName was not specified") + return + } + + // First try to retrieve service properties from a credential file. + serviceProps = getServicePropertiesFromCredentialFile(serviceName) + + // Next, try to retrieve them from environment variables. + if serviceProps == nil { + serviceProps = getServicePropertiesFromEnvironment(serviceName) + } + + // Finally, try to retrieve them from VCAP_SERVICES. + if serviceProps == nil { + serviceProps = getServicePropertiesFromVCAP(serviceName) + } + + return +} + +// getServicePropertiesFromCredentialFile: returns a map containing properties found within a credential file +// that are associated with the specified credentialKey. Returns a nil map if no properties are found. +// Credential file search order: +// 1) ${IBM_CREDENTIALS_FILE} +// 2) /ibm-credentials.env +// 3) /ibm-credentials.env +func getServicePropertiesFromCredentialFile(credentialKey string) map[string]string { + + // Check the search order for the credential file that we'll attempt to load: + var credentialFilePath string + + // 1) ${IBM_CREDENTIALS_FILE} + envPath := os.Getenv(IBM_CREDENTIAL_FILE_ENVVAR) + if _, err := os.Stat(envPath); err == nil { + credentialFilePath = envPath + } + + // 2) /ibm-credentials.env + if credentialFilePath == "" { + dir, _ := os.Getwd() + var filePath = path.Join(dir, DEFAULT_CREDENTIAL_FILE_NAME) + if _, err := os.Stat(filePath); err == nil { + credentialFilePath = filePath + } + } + + // 3) /ibm-credentials.env + if credentialFilePath == "" { + var filePath = path.Join(UserHomeDir(), DEFAULT_CREDENTIAL_FILE_NAME) + if _, err := os.Stat(filePath); err == nil { + credentialFilePath = filePath + } + } + + // If we found a file to load, then load it. + if credentialFilePath != "" { + file, err := os.Open(credentialFilePath) + if err != nil { + return nil + } + defer file.Close() + + // Collect the contents of the credential file in a string array. + lines := make([]string, 0) + scanner := bufio.NewScanner(file) + for scanner.Scan() { + lines = append(lines, scanner.Text()) + } + + // Parse the file contents into name/value pairs. + return parsePropertyStrings(credentialKey, lines) + } + + return nil +} + +// getServicePropertiesFromEnvironment: returns a map containing properties found within the environment +// that are associated with the specified credentialKey. Returns a nil map if no properties are found. +func getServicePropertiesFromEnvironment(credentialKey string) map[string]string { + return parsePropertyStrings(credentialKey, os.Environ()) +} + +// GetServicePropertiesFromVCAP: returns a map containing properties found within the VCAP_SERVICES +// environment variable for the specified credentialKey (service name). Returns a nil map if no properties are found. +func getServicePropertiesFromVCAP(credentialKey string) map[string]string { + credentials := loadFromVCAPServices(credentialKey) + if credentials != nil { + props := make(map[string]string) + if credentials.URL != "" { + props[PROPNAME_SVC_URL] = credentials.URL + } + + if credentials.Username != "" { + props[PROPNAME_USERNAME] = credentials.Username + } + + if credentials.Password != "" { + props[PROPNAME_PASSWORD] = credentials.Password + } + + if credentials.APIKey != "" { + props[PROPNAME_APIKEY] = credentials.APIKey + } + + // If no values were actually found in this credential entry, then bail out now. + if len(props) == 0 { + return nil + } + + // Make a (hopefully good) guess at the auth type. + authType := "" + if props[PROPNAME_APIKEY] != "" { + authType = AUTHTYPE_IAM + } else if props[PROPNAME_USERNAME] != "" || props[PROPNAME_PASSWORD] != "" { + authType = AUTHTYPE_BASIC + } else { + authType = AUTHTYPE_IAM + } + props[PROPNAME_AUTH_TYPE] = authType + + return props + } + + return nil +} + +// parsePropertyStrings: accepts an array of strings of the form "=" and parses/filters them to +// produce a map of properties associated with the specified credentialKey. +func parsePropertyStrings(credentialKey string, propertyStrings []string) map[string]string { + if len(propertyStrings) == 0 { + return nil + } + + props := make(map[string]string) + credentialKey = strings.ToUpper(credentialKey) + credentialKey = strings.Replace(credentialKey, "-", "_", -1) + credentialKey += "_" + for _, propertyString := range propertyStrings { + + // Trim the property string and ignore any blank or comment lines. + propertyString = strings.TrimSpace(propertyString) + if propertyString == "" || strings.HasPrefix(propertyString, "#") { + continue + } + + // Parse the property string into name and value tokens + var tokens = strings.Split(propertyString, "=") + if len(tokens) == 2 { + // Does the name start with the credential key? + // If so, then extract the property name by filtering out the credential key, + // then store the name/value pair in the map. + if strings.HasPrefix(tokens[0], credentialKey) && (len(tokens[0]) > len(credentialKey)) { + name := tokens[0][len(credentialKey):] + value := strings.TrimSpace(tokens[1]) + props[name] = value + } + } + } + + if len(props) == 0 { + return nil + } + return props +} + +// Service : The service +type service struct { + Credentials credential `json:"credentials,omitempty"` +} + +// Credential : The service credential +type credential struct { + URL string `json:"url,omitempty"` + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + APIKey string `json:"apikey,omitempty"` +} + +// LoadFromVCAPServices : returns the credential of the service +func loadFromVCAPServices(serviceName string) *credential { + vcapServices := os.Getenv("VCAP_SERVICES") + if vcapServices != "" { + var rawServices map[string][]service + if err := json.Unmarshal([]byte(vcapServices), &rawServices); err != nil { + return nil + } + for name, instances := range rawServices { + if name == serviceName { + creds := &instances[0].Credentials + return creds + } + } + } + return nil +} diff --git a/vendor/github.com/IBM/go-sdk-core/core/constants.go b/vendor/github.com/IBM/go-sdk-core/core/constants.go new file mode 100644 index 00000000000..2e9deadc629 --- /dev/null +++ b/vendor/github.com/IBM/go-sdk-core/core/constants.go @@ -0,0 +1,56 @@ +package core + +// (C) Copyright IBM Corp. 2019. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const ( + // Supported authentication types. + AUTHTYPE_BASIC = "basic" + AUTHTYPE_BEARER_TOKEN = "bearerToken" + AUTHTYPE_NOAUTH = "noAuth" + AUTHTYPE_IAM = "iam" + AUTHTYPE_CP4D = "cp4d" + + // Names of properties that can be defined as part of an external configuration (credential file, env vars, etc.). + // Example: export MYSERVICE_URL=https://myurl + + // Service client properties. + PROPNAME_SVC_URL = "URL" + PROPNAME_SVC_DISABLE_SSL = "DISABLE_SSL" + + // Authenticator properties. + PROPNAME_AUTH_TYPE = "AUTH_TYPE" + PROPNAME_USERNAME = "USERNAME" + PROPNAME_PASSWORD = "PASSWORD" + PROPNAME_BEARER_TOKEN = "BEARER_TOKEN" + PROPNAME_AUTH_URL = "AUTH_URL" + PROPNAME_AUTH_DISABLE_SSL = "AUTH_DISABLE_SSL" + PROPNAME_APIKEY = "APIKEY" + PROPNAME_CLIENT_ID = "CLIENT_ID" + PROPNAME_CLIENT_SECRET = "CLIENT_SECRET" + + // SSL error + SSL_CERTIFICATION_ERROR = "x509: certificate" + + // Common error messages. + ERRORMSG_PROP_MISSING = "The %s property is required but was not specified." + ERRORMSG_PROP_INVALID = "The %s property is invalid. Please remove any surrounding {, }, or \" characters." + ERRORMSG_NO_AUTHENTICATOR = "Authentication information was not properly configured." + ERRORMSG_AUTHTYPE_UNKNOWN = "Unrecognized authentication type: %s" + ERRORMSG_PROPS_MAP_NIL = "The 'properties' map cannot be nil." + ERRORMSG_SSL_VERIFICATION_FAILED = "The connection failed because the SSL certificate is not valid. To use a " + + "self-signed certificate, disable verification of the server's SSL certificate " + + "by invoking the DisableSSLVerification() function on your service instance " + + "and/or use the DisableSSLVerification option of the authenticator." +) diff --git a/vendor/github.com/IBM/go-sdk-core/core/cp4d_authenticator.go b/vendor/github.com/IBM/go-sdk-core/core/cp4d_authenticator.go new file mode 100644 index 00000000000..0d8b8145a45 --- /dev/null +++ b/vendor/github.com/IBM/go-sdk-core/core/cp4d_authenticator.go @@ -0,0 +1,258 @@ +package core + +// (C) Copyright IBM Corp. 2019. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "crypto/tls" + "encoding/json" + "fmt" + "net/http" + "strconv" + "strings" + "time" + + jwt "github.com/dgrijalva/jwt-go" +) + +// Constants for CP4D +const ( + PRE_AUTH_PATH = "/v1/preauth/validateAuth" +) + +// CloudPakForDataAuthenticator : This authenticator will automatically fetch an access token for the +// user-specified username and password. Outbound REST requests invoked by the BaseService are then authenticated +// by adding a Bearer-type Authorization header containing the access token. +type CloudPakForDataAuthenticator struct { + // [Required] The URL representing the token server's endpoing. + URL string + + // [Required] The username and password used to compute the basic auth Authorization header + // to be sent with requests to the token server. + Username string + Password string + + // [Optional] A flag that indicates whether SSL hostname verification should be disabled or not. + // Default: false + DisableSSLVerification bool + + // [Optional] A set of key/value pairs that will be sent as HTTP headers in requests + // made to the token server. + Headers map[string]string + + // [Optional] The http.Client object used to invoke token server requests. + // If not specified by the user, a suitable default Client will be constructed. + Client *http.Client + + // The cached token and expiration time. + tokenData *cp4dTokenData +} + +// NewCloudPakForDataAuthenticator : Constructs a new CloudPakForDataAuthenticator instance. +func NewCloudPakForDataAuthenticator(url string, username string, password string, + disableSSLVerification bool, headers map[string]string) (*CloudPakForDataAuthenticator, error) { + + authenticator := &CloudPakForDataAuthenticator{ + Username: username, + Password: password, + URL: url, + DisableSSLVerification: disableSSLVerification, + Headers: headers, + } + + // Make sure the config is valid. + err := authenticator.Validate() + if err != nil { + return nil, err + } + + return authenticator, nil +} + +// newCloudPakForDataAuthenticatorFromMap : Constructs a new CloudPakForDataAuthenticator instance from a map. +func newCloudPakForDataAuthenticatorFromMap(properties map[string]string) (*CloudPakForDataAuthenticator, error) { + if properties == nil { + return nil, fmt.Errorf(ERRORMSG_PROPS_MAP_NIL) + } + + disableSSL, err := strconv.ParseBool(properties[PROPNAME_AUTH_DISABLE_SSL]) + if err != nil { + disableSSL = false + } + return NewCloudPakForDataAuthenticator(properties[PROPNAME_AUTH_URL], + properties[PROPNAME_USERNAME], properties[PROPNAME_PASSWORD], + disableSSL, nil) +} + +func (CloudPakForDataAuthenticator) AuthenticationType() string { + return AUTHTYPE_CP4D +} + +// Validate: validates the configuration. +func (authenticator CloudPakForDataAuthenticator) Validate() error { + + if authenticator.Username == "" { + return fmt.Errorf(ERRORMSG_PROP_MISSING, "Username") + } + + if authenticator.Password == "" { + return fmt.Errorf(ERRORMSG_PROP_MISSING, "Password") + } + + if authenticator.URL == "" { + return fmt.Errorf(ERRORMSG_PROP_MISSING, "URL") + } + + return nil +} + +// Authenticate: performs the authentication on the specified Request by adding a Bearer-type Authorization header +// containing the access token fetched from the token server. +func (authenticator CloudPakForDataAuthenticator) Authenticate(request *http.Request) error { + token, err := authenticator.getToken() + if err != nil { + return err + } + + request.Header.Set("Authorization", fmt.Sprintf(`Bearer %s`, token)) + return nil +} + +// getToken: returns an access token to be used in an Authorization header. +// Whenever a new token is needed (when a token doesn't yet exist, or the existing token has expired), +// a new access token is fetched from the token server. +func (authenticator *CloudPakForDataAuthenticator) getToken() (string, error) { + if authenticator.tokenData == nil || !authenticator.tokenData.isTokenValid() { + tokenResponse, err := authenticator.requestToken() + if err != nil { + return "", err + } + + authenticator.tokenData, err = newCp4dTokenData(tokenResponse) + if err != nil { + return "", err + } + } + + return authenticator.tokenData.AccessToken, nil +} + +// requestToken: fetches a new access token from the token server. +func (authenticator *CloudPakForDataAuthenticator) requestToken() (*cp4dTokenServerResponse, error) { + // If the user-specified URL does not end with the required path, + // then add it now. + url := authenticator.URL + if !strings.HasSuffix(url, PRE_AUTH_PATH) { + url = fmt.Sprintf("%s%s", url, PRE_AUTH_PATH) + } + + builder, err := NewRequestBuilder(GET).ConstructHTTPURL(url, nil, nil) + if err != nil { + return nil, err + } + + // Add user-defined headers to request. + for headerName, headerValue := range authenticator.Headers { + builder.AddHeader(headerName, headerValue) + } + + req, err := builder.Build() + if err != nil { + return nil, err + } + + req.SetBasicAuth(authenticator.Username, authenticator.Password) + + // If the authenticator does not have a Client, create one now. + if authenticator.Client == nil { + authenticator.Client = &http.Client{ + Timeout: time.Second * 30, + } + + // If the user told us to disable SSL verification, then do it now. + if authenticator.DisableSSLVerification { + transport := &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + } + authenticator.Client.Transport = transport + } + } + + resp, err := authenticator.Client.Do(req) + if err != nil { + return nil, err + } + + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + if resp != nil { + buff := new(bytes.Buffer) + _, _ = buff.ReadFrom(resp.Body) + return nil, fmt.Errorf(buff.String()) + } + } + + tokenResponse := &cp4dTokenServerResponse{} + _ = json.NewDecoder(resp.Body).Decode(tokenResponse) + defer resp.Body.Close() + return tokenResponse, nil +} + +// cp4dTokenServerResponse : This struct models a response received from the token server. +type cp4dTokenServerResponse struct { + Username string `json:"username,omitempty"` + Role string `json:"role,omitempty"` + Permissions []string `json:"permissions,omitempty"` + Subject string `json:"sub,omitempty"` + Issuer string `json:"iss,omitempty"` + Audience string `json:"aud,omitempty"` + UID string `json:"uid,omitempty"` + AccessToken string `json:"accessToken,omitempty"` + MessageCode string `json:"_messageCode_,omitempty"` + Message string `json:"message,omitempty"` +} + +// cp4dTokenData : This struct represents the cached information related to a fetched access token. +type cp4dTokenData struct { + AccessToken string + RefreshTime int64 +} + +// newCp4dTokenData: constructs a new Cp4dTokenData instance from the specified Cp4dTokenServerResponse instance. +func newCp4dTokenData(tokenResponse *cp4dTokenServerResponse) (*cp4dTokenData, error) { + // Need to crack open the access token (a JWToken) to get the expiration and issued-at times. + claims := &jwt.StandardClaims{} + if token, _ := jwt.ParseWithClaims(tokenResponse.AccessToken, claims, nil); token == nil { + return nil, fmt.Errorf("Error while trying to parse access token!") + } + + // Compute the adjusted refresh time (expiration time - 20% of timeToLive) + timeToLive := claims.ExpiresAt - claims.IssuedAt + expireTime := claims.ExpiresAt + refreshTime := expireTime - int64(float64(timeToLive)*0.2) + + tokenData := &cp4dTokenData{ + AccessToken: tokenResponse.AccessToken, + RefreshTime: refreshTime, + } + return tokenData, nil +} + +// isTokenValid: returns true iff the Cp4dTokenData instance represents a valid (non-expired) access token. +func (this *cp4dTokenData) isTokenValid() bool { + if this.AccessToken != "" && GetCurrentTime() < this.RefreshTime { + return true + } + return false +} diff --git a/vendor/github.com/IBM/go-sdk-core/core/detailed_response.go b/vendor/github.com/IBM/go-sdk-core/core/detailed_response.go new file mode 100644 index 00000000000..791ca2f6197 --- /dev/null +++ b/vendor/github.com/IBM/go-sdk-core/core/detailed_response.go @@ -0,0 +1,92 @@ +package core + +// (C) Copyright IBM Corp. 2019. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/json" + "fmt" + "net/http" +) + +// DetailedResponse : Each generated service method will return an instance of this struct. +type DetailedResponse struct { + + // The HTTP status code associated with the response. + StatusCode int + + // The HTTP headers contained in the response. + Headers http.Header + + // Result - this field will contain the result of the operation (obtained from the response body). + // + // If the operation was successful and the response body contains a JSON response, it is unmarshalled + // into an object of the appropriate type (defined by the particular operation), and the Result field will contain + // this response object. To retrieve this response object in its properly-typed form, use the + // generated service's "GetResult()" method. + // If there was an error while unmarshalling the JSON response body, then the RawResult field + // will be set to the byte array containing the response body. + // + // If the operation was successful and the response body contains a non-JSON response, + // the Result field will be an instance of io.ReadCloser that can be used by the application to read + // the response data. + // + // If the operation was unsuccessful and the response body contains a JSON response, + // this field will contain an instance of map[string]interface{} which is the result of unmarshalling the + // response body as a "generic" JSON object. + // If the JSON response for an unsuccessful operation could not be properly unmarshalled, then the + // RawResult field will contain the raw response body. + Result interface{} + + // This field will contain the raw response body as a byte array under these conditions: + // 1) there was a problem unmarshalling a JSON response body - + // either for a successful or unsuccessful operation. + // 2) the operation was unsuccessful, and the response body contains a non-JSON response. + RawResult []byte +} + +// GetHeaders returns the headers +func (response *DetailedResponse) GetHeaders() http.Header { + return response.Headers +} + +// GetStatusCode returns the HTTP status code +func (response *DetailedResponse) GetStatusCode() int { + return response.StatusCode +} + +// GetResult returns the result from the service +func (response *DetailedResponse) GetResult() interface{} { + return response.Result +} + +// GetResultAsMap returns the result as a map (generic JSON object), if the +// DetailedResponse.Result field contains an instance of a map. +func (response *DetailedResponse) GetResultAsMap() (map[string]interface{}, bool) { + m, ok := response.Result.(map[string]interface{}) + return m, ok +} + +// GetRawResult returns the raw response body as a byte array. +func (response *DetailedResponse) GetRawResult() []byte { + return response.RawResult +} + +func (response *DetailedResponse) String() string { + output, err := json.MarshalIndent(response, "", " ") + if err == nil { + return fmt.Sprintf("%+v\n", string(output)) + } + return fmt.Sprintf("Response") +} diff --git a/vendor/github.com/IBM/go-sdk-core/core/iam_authenticator.go b/vendor/github.com/IBM/go-sdk-core/core/iam_authenticator.go new file mode 100644 index 00000000000..23b74b12340 --- /dev/null +++ b/vendor/github.com/IBM/go-sdk-core/core/iam_authenticator.go @@ -0,0 +1,276 @@ +package core + +// (C) Copyright IBM Corp. 2019. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "crypto/tls" + "encoding/json" + "fmt" + "net/http" + "strconv" + "time" +) + +// IamAuthenticator-related constants. +const ( + DEFAULT_IAM_URL = "https://iam.cloud.ibm.com/identity/token" + DEFAULT_CONTENT_TYPE = "application/x-www-form-urlencoded" + REQUEST_TOKEN_GRANT_TYPE = "urn:ibm:params:oauth:grant-type:apikey" + REQUEST_TOKEN_RESPONSE_TYPE = "cloud_iam" +) + +// IamAuthenticator : This authenticator will automatically fetch an access token for the +// configured apikey. Outbound REST requests invoked by the BaseService are then authenticated +// by adding a Bearer-type Authorization header containing the access token. +type IamAuthenticator struct { + + // [Required] The apikey used to fetch the access token from the IAM token server. + ApiKey string + + // [Optional] The URL representing the IAM token server's endpoint. + // If not specified, a suitable default value is used. + URL string + + // [Optional] The ClientId and ClientSecret fields are used to form a "basic auth" Authorization header + // for interactions with the IAM token server. + // If neither field is specified, then no Authorization header will be sent with token server requests. + // These fields are optional, but must be specified together. + // Default: "", "" + ClientId string + ClientSecret string + + // [Optional] A flag that indicates whether verificaton of the server's SSL certificate should be disabled or not. + // Default: false + DisableSSLVerification bool + + // [Optional] A set of key/value pairs that will be sent as HTTP headers in requests + // made to the token server. + Headers map[string]string + + // [Optional] The http.Client object used to invoke token server requests. + // If not specified by the user, a suitable default Client will be constructed. + Client *http.Client + + // The cached token and expiration time. + tokenData *iamTokenData +} + +// NewIamAuthenticator : Constructs a new IamAuthenticator instance. +func NewIamAuthenticator(apikey string, url string, clientId string, clientSecret string, + disableSSLVerification bool, headers map[string]string) (*IamAuthenticator, error) { + + authenticator := &IamAuthenticator{ + ApiKey: apikey, + URL: url, + ClientId: clientId, + ClientSecret: clientSecret, + DisableSSLVerification: disableSSLVerification, + Headers: headers, + } + + // Make sure the config is valid. + err := authenticator.Validate() + if err != nil { + return nil, err + } + + return authenticator, nil +} + +// NewIamAuthenticatorFromMap : Constructs a new IamAuthenticator instance from a map. +func newIamAuthenticatorFromMap(properties map[string]string) (*IamAuthenticator, error) { + if properties == nil { + return nil, fmt.Errorf(ERRORMSG_PROPS_MAP_NIL) + } + + disableSSL, err := strconv.ParseBool(properties[PROPNAME_AUTH_DISABLE_SSL]) + if err != nil { + disableSSL = false + } + return NewIamAuthenticator(properties[PROPNAME_APIKEY], properties[PROPNAME_AUTH_URL], + properties[PROPNAME_CLIENT_ID], properties[PROPNAME_CLIENT_SECRET], + disableSSL, nil) +} + +func (IamAuthenticator) AuthenticationType() string { + return AUTHTYPE_IAM +} + +// Authenticate: Performs the authentication on the specified Request by adding a Bearer-type Authorization header +// containing the access token fetched from the token server. +func (authenticator IamAuthenticator) Authenticate(request *http.Request) error { + token, err := authenticator.getToken() + if err != nil { + return err + } + + request.Header.Set("Authorization", fmt.Sprintf(`Bearer %s`, token)) + return nil +} + +// Validate: validates the configuration of the IamAuthenticator instance. +func (this IamAuthenticator) Validate() error { + if this.ApiKey == "" { + return fmt.Errorf(ERRORMSG_PROP_MISSING, "ApiKey") + } + + if HasBadFirstOrLastChar(this.ApiKey) { + return fmt.Errorf(ERRORMSG_PROP_INVALID, "ApiKey") + } + + // Validate ClientId and ClientSecret. They must both be specified togther or neither should be specified. + if this.ClientId == "" && this.ClientSecret == "" { + // Do nothing as this is the valid scenario + } else { + // Since it is NOT the case that both properties are empty, make sure BOTH are specified. + if this.ClientId == "" { + return fmt.Errorf(ERRORMSG_PROP_MISSING, "ClientId") + } + + if this.ClientSecret == "" { + return fmt.Errorf(ERRORMSG_PROP_MISSING, "ClientSecret") + } + } + + return nil +} + +// getToken: returns an access token to be used in an Authorization header. +// Whenever a new token is needed (when a token doesn't yet exist, or the existing token has expired), +// a new access token is fetched from the token server. +func (authenticator *IamAuthenticator) getToken() (string, error) { + if authenticator.tokenData == nil || !authenticator.tokenData.isTokenValid() { + tokenResponse, err := authenticator.requestToken() + if err != nil { + return "", err + } + + authenticator.tokenData, err = newIamTokenData(tokenResponse) + if err != nil { + return "", err + } + } + + return authenticator.tokenData.AccessToken, nil +} + +// requestToken: fetches a new access token from the token server. +func (authenticator *IamAuthenticator) requestToken() (*iamTokenServerResponse, error) { + // Use the default IAM URL if one was not specified by the user. + url := authenticator.URL + if url == "" { + url = DEFAULT_IAM_URL + } + + builder := NewRequestBuilder(POST) + _, err := builder.ConstructHTTPURL(url, nil, nil) + if err != nil { + return nil, err + } + + builder.AddHeader(CONTENT_TYPE, DEFAULT_CONTENT_TYPE). + AddHeader(Accept, APPLICATION_JSON). + AddFormData("grant_type", "", "", REQUEST_TOKEN_GRANT_TYPE). + AddFormData("apikey", "", "", authenticator.ApiKey). + AddFormData("response_type", "", "", REQUEST_TOKEN_RESPONSE_TYPE) + + // Add user-defined headers to request. + for headerName, headerValue := range authenticator.Headers { + builder.AddHeader(headerName, headerValue) + } + + req, err := builder.Build() + if err != nil { + return nil, err + } + + // If client id and secret were configured by the user, then set them on the request + // as a basic auth header. + if authenticator.ClientId != "" && authenticator.ClientSecret != "" { + req.SetBasicAuth(authenticator.ClientId, authenticator.ClientSecret) + } + + // If the authenticator does not have a Client, create one now. + if authenticator.Client == nil { + authenticator.Client = &http.Client{ + Timeout: time.Second * 30, + } + + // If the user told us to disable SSL verification, then do it now. + if authenticator.DisableSSLVerification { + transport := &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + } + authenticator.Client.Transport = transport + } + } + + resp, err := authenticator.Client.Do(req) + if err != nil { + return nil, err + } + + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + if resp != nil { + buff := new(bytes.Buffer) + _, _ = buff.ReadFrom(resp.Body) + return nil, fmt.Errorf(buff.String()) + } + } + + tokenResponse := &iamTokenServerResponse{} + _ = json.NewDecoder(resp.Body).Decode(tokenResponse) + defer resp.Body.Close() + return tokenResponse, nil +} + +// iamTokenServerResponse : This struct models a response received from the token server. +type iamTokenServerResponse struct { + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + TokenType string `json:"token_type"` + ExpiresIn int64 `json:"expires_in"` + Expiration int64 `json:"expiration"` +} + +// iamTokenData : This struct represents the cached information related to a fetched access token. +type iamTokenData struct { + AccessToken string + RefreshTime int64 +} + +// newIamTokenData: constructs a new IamTokenData instance from the specified IamTokenServerResponse instance. +func newIamTokenData(tokenResponse *iamTokenServerResponse) (*iamTokenData, error) { + // Compute the adjusted refresh time (expiration time - 20% of timeToLive) + timeToLive := tokenResponse.ExpiresIn + expireTime := tokenResponse.Expiration + refreshTime := expireTime - int64(float64(timeToLive)*0.2) + + tokenData := &iamTokenData{ + AccessToken: tokenResponse.AccessToken, + RefreshTime: refreshTime, + } + + return tokenData, nil +} + +// isTokenValid: returns true iff the IamTokenData instance represents a valid (non-expired) access token. +func (this *iamTokenData) isTokenValid() bool { + if this.AccessToken != "" && GetCurrentTime() < this.RefreshTime { + return true + } + return false +} diff --git a/vendor/github.com/IBM/go-sdk-core/core/noauth_authenticator.go b/vendor/github.com/IBM/go-sdk-core/core/noauth_authenticator.go new file mode 100644 index 00000000000..dd7d347712e --- /dev/null +++ b/vendor/github.com/IBM/go-sdk-core/core/noauth_authenticator.go @@ -0,0 +1,42 @@ +package core + +// (C) Copyright IBM Corp. 2019. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "net/http" +) + +// The NoAuthAuthenticator is simply a placeholder implementation of the Authenticator interface +// that perform no authentication. This might be useful in testing/debugging situations. + +type NoAuthAuthenticator struct { +} + +func NewNoAuthAuthenticator() (*NoAuthAuthenticator, error) { + return &NoAuthAuthenticator{}, nil +} + +func (NoAuthAuthenticator) AuthenticationType() string { + return AUTHTYPE_NOAUTH +} + +func (NoAuthAuthenticator) Validate() error { + return nil +} + +func (this NoAuthAuthenticator) Authenticate(request *http.Request) error { + // Nothing to do since we're not providing any authentication. + return nil +} diff --git a/vendor/github.com/IBM/go-sdk-core/core/request_builder.go b/vendor/github.com/IBM/go-sdk-core/core/request_builder.go new file mode 100644 index 00000000000..bac47cdce9f --- /dev/null +++ b/vendor/github.com/IBM/go-sdk-core/core/request_builder.go @@ -0,0 +1,292 @@ +package core + +// (C) Copyright IBM Corp. 2019. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "mime/multipart" + "net/http" + "net/textproto" + "net/url" + "os" + "path/filepath" + "reflect" + "strings" +) + +// common HTTP methods +const ( + POST = http.MethodPost + GET = http.MethodGet + DELETE = http.MethodDelete + PUT = http.MethodPut + PATCH = http.MethodPatch +) + +// common headers +const ( + Accept = "Accept" + APPLICATION_JSON = "application/json" + CONTENT_DISPOSITION = "Content-Disposition" + CONTENT_TYPE = "Content-Type" + FORM_URL_ENCODED_HEADER = "application/x-www-form-urlencoded" + + ERRORMSG_SERVICE_URL_MISSING = "The service URL is required." + ERRORMSG_SERVICE_URL_INVALID = "There was an error parsing the service URL: %s" +) + +// A FormData stores information for form data +type FormData struct { + fileName string + contentType string + contents interface{} +} + +// A RequestBuilder is an HTTP request to be sent to the service +type RequestBuilder struct { + Method string + URL *url.URL + Header http.Header + Body io.Reader + Query map[string][]string + Form map[string][]FormData +} + +// NewRequestBuilder : Initiates a new request +func NewRequestBuilder(method string) *RequestBuilder { + return &RequestBuilder{ + Method: method, + Header: make(http.Header), + Query: make(map[string][]string), + Form: make(map[string][]FormData), + } +} + +// ConstructHTTPURL creates a properly-encoded URL with path parameters. +// This function returns an error if the serviceURL is "" or is an +// invalid URL string (e.g. ":"). +func (requestBuilder *RequestBuilder) ConstructHTTPURL(serviceURL string, pathSegments []string, pathParameters []string) (*RequestBuilder, error) { + if serviceURL == "" { + return requestBuilder, fmt.Errorf(ERRORMSG_SERVICE_URL_MISSING) + } + var URL *url.URL + + URL, err := url.Parse(serviceURL) + if err != nil { + return requestBuilder, fmt.Errorf(ERRORMSG_SERVICE_URL_INVALID, err.Error()) + } + + for i, pathSegment := range pathSegments { + if pathSegment != "" { + URL.Path += "/" + pathSegment + } + + if pathParameters != nil && i < len(pathParameters) { + URL.Path += "/" + pathParameters[i] + } + } + requestBuilder.URL = URL + return requestBuilder, nil +} + +// AddQuery adds Query name and value +func (requestBuilder *RequestBuilder) AddQuery(name string, value string) *RequestBuilder { + requestBuilder.Query[name] = append(requestBuilder.Query[name], value) + return requestBuilder +} + +// AddHeader adds header name and value +func (requestBuilder *RequestBuilder) AddHeader(name string, value string) *RequestBuilder { + requestBuilder.Header[name] = []string{value} + return requestBuilder +} + +// AddFormData makes an entry for Form data +func (requestBuilder *RequestBuilder) AddFormData(fieldName string, fileName string, contentType string, + contents interface{}) *RequestBuilder { + if fileName == "" { + if file, ok := contents.(*os.File); ok { + if !((os.File{}) == *file) { // if file is not empty + name := filepath.Base(file.Name()) + fileName = name + } + } + } + requestBuilder.Form[fieldName] = append(requestBuilder.Form[fieldName], FormData{ + fileName: fileName, + contentType: contentType, + contents: contents, + }) + return requestBuilder +} + +// SetBodyContentJSON - set the body content from a JSON structure +func (requestBuilder *RequestBuilder) SetBodyContentJSON(bodyContent interface{}) (*RequestBuilder, error) { + requestBuilder.Body = new(bytes.Buffer) + err := json.NewEncoder(requestBuilder.Body.(io.Writer)).Encode(bodyContent) + return requestBuilder, err +} + +// SetBodyContentString - set the body content from a string +func (requestBuilder *RequestBuilder) SetBodyContentString(bodyContent string) (*RequestBuilder, error) { + requestBuilder.Body = strings.NewReader(bodyContent) + return requestBuilder, nil +} + +// SetBodyContentStream - set the body content from an io.Reader instance +func (requestBuilder *RequestBuilder) SetBodyContentStream(bodyContent io.Reader) (*RequestBuilder, error) { + requestBuilder.Body = bodyContent + return requestBuilder, nil +} + +// CreateMultipartWriter initializes a new multipart writer +func (requestBuilder *RequestBuilder) createMultipartWriter() *multipart.Writer { + buff := new(bytes.Buffer) + requestBuilder.Body = buff + return multipart.NewWriter(buff) +} + +// CreateFormFile is a convenience wrapper around CreatePart. It creates +// a new form-data header with the provided field name and file name and contentType +func createFormFile(formWriter *multipart.Writer, fieldname string, filename string, contentType string) (io.Writer, error) { + h := make(textproto.MIMEHeader) + contentDisposition := fmt.Sprintf(`form-data; name="%s"`, fieldname) + if filename != "" { + contentDisposition += fmt.Sprintf(`; filename="%s"`, filename) + } + + h.Set(CONTENT_DISPOSITION, contentDisposition) + if contentType != "" { + h.Set(CONTENT_TYPE, contentType) + } + + return formWriter.CreatePart(h) +} + +// SetBodyContentForMultipart - sets the body content for a part in a multi-part form +func (requestBuilder *RequestBuilder) SetBodyContentForMultipart(contentType string, content interface{}, writer io.Writer) error { + var err error + if stream, ok := content.(io.Reader); ok { + _, err = io.Copy(writer, stream) + } else if stream, ok := content.(*io.ReadCloser); ok { + _, err = io.Copy(writer, *stream) + } else if IsJSONMimeType(contentType) || IsJSONPatchMimeType(contentType) { + err = json.NewEncoder(writer).Encode(content) + } else if str, ok := content.(string); ok { + _, err = writer.Write([]byte(str)) + } else if strPtr, ok := content.(*string); ok { + _, err = writer.Write([]byte(*strPtr)) + } else { + err = fmt.Errorf("Error: unable to determine the type of 'content' provided") + } + return err +} + +// Build the request +func (requestBuilder *RequestBuilder) Build() (*http.Request, error) { + // Create multipart form data + if len(requestBuilder.Form) > 0 { + // handle both application/x-www-form-urlencoded or multipart/form-data + contentType := requestBuilder.Header.Get(CONTENT_TYPE) + if contentType == FORM_URL_ENCODED_HEADER { + data := url.Values{} + for fieldName, l := range requestBuilder.Form { + for _, v := range l { + data.Add(fieldName, v.contents.(string)) + } + } + _, err := requestBuilder.SetBodyContentString(data.Encode()) + if err != nil { + return nil, err + } + } else { + formWriter := requestBuilder.createMultipartWriter() + for fieldName, l := range requestBuilder.Form { + for _, v := range l { + dataPartWriter, err := createFormFile(formWriter, fieldName, v.fileName, v.contentType) + if err != nil { + return nil, err + } + if err = requestBuilder.SetBodyContentForMultipart(v.contentType, + v.contents, dataPartWriter); err != nil { + return nil, err + } + } + } + + requestBuilder.AddHeader("Content-Type", formWriter.FormDataContentType()) + err := formWriter.Close() + if err != nil { + return nil, err + } + } + } + + // Create the request + req, err := http.NewRequest(requestBuilder.Method, requestBuilder.URL.String(), requestBuilder.Body) + if err != nil { + return nil, err + } + + // Headers + req.Header = requestBuilder.Header + + // Query + query := req.URL.Query() + for k, l := range requestBuilder.Query { + for _, v := range l { + query.Add(k, v) + } + } + // Encode query + req.URL.RawQuery = query.Encode() + + return req, nil +} + +// SetBodyContent - sets the body content from one of three different sources +func (requestBuilder *RequestBuilder) SetBodyContent(contentType string, jsonContent interface{}, jsonPatchContent interface{}, + nonJSONContent interface{}) (builder *RequestBuilder, err error) { + if jsonContent != nil { + builder, err = requestBuilder.SetBodyContentJSON(jsonContent) + if err != nil { + return + } + } else if jsonPatchContent != nil { + builder, err = requestBuilder.SetBodyContentJSON(jsonPatchContent) + if err != nil { + return + } + } else { + // Set the non-JSON body content based on the type of value passed in, + // which should be a "string", "*string" or an "io.Reader" + if str, ok := nonJSONContent.(string); ok { + builder, err = requestBuilder.SetBodyContentString(str) + } else if strPtr, ok := nonJSONContent.(*string); ok { + builder, err = requestBuilder.SetBodyContentString(*strPtr) + } else if stream, ok := nonJSONContent.(io.Reader); ok { + builder, err = requestBuilder.SetBodyContentStream(stream) + } else if stream, ok := nonJSONContent.(*io.ReadCloser); ok { + builder, err = requestBuilder.SetBodyContentStream(*stream) + } else { + builder = requestBuilder + err = fmt.Errorf("Invalid type for non-JSON body content: %s", reflect.TypeOf(nonJSONContent).String()) + } + } + return +} diff --git a/vendor/github.com/IBM/go-sdk-core/core/utils.go b/vendor/github.com/IBM/go-sdk-core/core/utils.go new file mode 100644 index 00000000000..ff688f581c7 --- /dev/null +++ b/vendor/github.com/IBM/go-sdk-core/core/utils.go @@ -0,0 +1,173 @@ +package core + +// (C) Copyright IBM Corp. 2019. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/json" + "errors" + "fmt" + "os" + "reflect" + "regexp" + "runtime" + "strings" + "time" + + validator "gopkg.in/go-playground/validator.v9" +) + +// Validate single instance of Validate, it caches struct info +var Validate *validator.Validate + +func init() { + Validate = validator.New() +} + +const ( + jsonMimePattern = "(?i)^application\\/((json)|(merge\\-patch\\+json))(;.*)?$" + jsonPatchMimePattern = "(?i)^application\\/json\\-patch\\+json(;.*)?$" +) + +// isNil checks if the specified object is nil or not +func isNil(object interface{}) bool { + if object == nil { + return true + } + + value := reflect.ValueOf(object) + kind := value.Kind() + if kind >= reflect.Chan && kind <= reflect.Slice && value.IsNil() { + return true + } + + return false +} + +// ValidateNotNil - returns the specified error if 'object' is nil, nil otherwise +func ValidateNotNil(object interface{}, errorMsg string) error { + if isNil(object) { + return errors.New(errorMsg) + } + return nil +} + +// ValidateStruct - validates 'param' (assumed to be a struct) according to the annotations attached to its fields +func ValidateStruct(param interface{}, paramName string) error { + if param != nil { + if err := Validate.Struct(param); err != nil { + // If there were validation errors then return an error containing the field errors + if fieldErrors, ok := err.(validator.ValidationErrors); ok { + return fmt.Errorf("%s failed validation:\n%s", paramName, fieldErrors.Error()) + } + return fmt.Errorf("An unexpected system error occurred while validating %s\n%s", paramName, err.Error()) + } + } + return nil +} + +// StringPtr : return pointer to string literal +func StringPtr(literal string) *string { + return &literal +} + +// BoolPtr : return pointer to boolean literal +func BoolPtr(literal bool) *bool { + return &literal +} + +// Int64Ptr : return pointer to int64 literal +func Int64Ptr(literal int64) *int64 { + return &literal +} + +// Float32Ptr : return pointer to float32 literal +func Float32Ptr(literal float32) *float32 { + return &literal +} + +// Float64Ptr : return pointer to float64 literal +func Float64Ptr(literal float64) *float64 { + return &literal +} + +// IsJSONMimeType : Returns true iff the specified mimeType value represents a "JSON" mimetype. +func IsJSONMimeType(mimeType string) bool { + if mimeType == "" { + return false + } + matched, err := regexp.MatchString(jsonMimePattern, mimeType) + if err != nil { + return false + } + return matched +} + +// IsJSONPatchMimeType : Returns true iff the specified mimeType value represents a "JSON Patch" mimetype. +func IsJSONPatchMimeType(mimeType string) bool { + if mimeType == "" { + return false + } + matched, err := regexp.MatchString(jsonPatchMimePattern, mimeType) + if err != nil { + return false + } + return matched +} + +// StringNilMapper - de-references the parameter 's' and returns the result, or "" if 's' is nil +func StringNilMapper(s *string) string { + if s == nil { + return "" + } + return *s +} + +// HasBadFirstOrLastChar checks if the string starts with `{` or `"` +// or ends with `}` or `"` +func HasBadFirstOrLastChar(str string) bool { + return strings.HasPrefix(str, "{") || strings.HasPrefix(str, "\"") || + strings.HasSuffix(str, "}") || strings.HasSuffix(str, "\"") +} + +// UserHomeDir returns the user home directory +func UserHomeDir() string { + if runtime.GOOS == "windows" { + home := os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH") + if home == "" { + home = os.Getenv("USERPROFILE") + } + return home + } + return os.Getenv("HOME") +} + +// SystemInfo : returns the system information +func SystemInfo() string { + return fmt.Sprintf("(arch=%s; os=%s; go.version=%s)", runtime.GOARCH, runtime.GOOS, runtime.Version()) +} + +// PrettyPrint print pretty +func PrettyPrint(result interface{}, resultName string) { + output, err := json.MarshalIndent(result, "", " ") + + if err == nil { + fmt.Printf("%v:\n%+v\n\n", resultName, string(output)) + } +} + +// GetCurrentTime : +func GetCurrentTime() int64 { + return time.Now().Unix() +} diff --git a/vendor/github.com/IBM/go-sdk-core/core/version.go b/vendor/github.com/IBM/go-sdk-core/core/version.go new file mode 100644 index 00000000000..d2d30198ecf --- /dev/null +++ b/vendor/github.com/IBM/go-sdk-core/core/version.go @@ -0,0 +1,18 @@ +package core + +// (C) Copyright IBM Corp. 2019. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Version of the SDK +const __VERSION__ = "1.1.0" diff --git a/vendor/github.com/IBM/go-sdk-core/v3/LICENSE.md b/vendor/github.com/IBM/go-sdk-core/v3/LICENSE.md new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/vendor/github.com/IBM/go-sdk-core/v3/LICENSE.md @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/IBM/go-sdk-core/v3/core/authenticator.go b/vendor/github.com/IBM/go-sdk-core/v3/core/authenticator.go new file mode 100644 index 00000000000..8292490ee80 --- /dev/null +++ b/vendor/github.com/IBM/go-sdk-core/v3/core/authenticator.go @@ -0,0 +1,26 @@ +package core + +// (C) Copyright IBM Corp. 2019. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "net/http" +) + +// Authenticator describes the set of methods implemented by each authenticator. +type Authenticator interface { + AuthenticationType() string + Authenticate(*http.Request) error + Validate() error +} diff --git a/vendor/github.com/IBM/go-sdk-core/v3/core/authenticator_factory.go b/vendor/github.com/IBM/go-sdk-core/v3/core/authenticator_factory.go new file mode 100644 index 00000000000..fba1de6e815 --- /dev/null +++ b/vendor/github.com/IBM/go-sdk-core/v3/core/authenticator_factory.go @@ -0,0 +1,52 @@ +package core + +// (C) Copyright IBM Corp. 2019. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "strings" +) + +// GetAuthenticatorFromEnvironment instantiates an Authenticator using service properties +// retrieved from external config sources. +func GetAuthenticatorFromEnvironment(credentialKey string) (authenticator Authenticator, err error) { + properties, err := getServiceProperties(credentialKey) + if len(properties) == 0 { + return + } + + // Default the authentication type to IAM if not specified. + authType := properties[PROPNAME_AUTH_TYPE] + if authType == "" { + authType = AUTHTYPE_IAM + } + + // Create the authenticator appropriate for the auth type. + if strings.EqualFold(authType, AUTHTYPE_BASIC) { + authenticator, err = newBasicAuthenticatorFromMap(properties) + } else if strings.EqualFold(authType, AUTHTYPE_BEARER_TOKEN) { + authenticator, err = newBearerTokenAuthenticatorFromMap(properties) + } else if strings.EqualFold(authType, AUTHTYPE_IAM) { + authenticator, err = newIamAuthenticatorFromMap(properties) + } else if strings.EqualFold(authType, AUTHTYPE_CP4D) { + authenticator, err = newCloudPakForDataAuthenticatorFromMap(properties) + } else if strings.EqualFold(authType, AUTHTYPE_NOAUTH) { + authenticator, err = NewNoAuthAuthenticator() + } else { + err = fmt.Errorf(ERRORMSG_AUTHTYPE_UNKNOWN, authType) + } + + return +} diff --git a/vendor/github.com/IBM/go-sdk-core/v3/core/base_service.go b/vendor/github.com/IBM/go-sdk-core/v3/core/base_service.go new file mode 100644 index 00000000000..ff2ae7ee376 --- /dev/null +++ b/vendor/github.com/IBM/go-sdk-core/v3/core/base_service.go @@ -0,0 +1,377 @@ +package core + +// (C) Copyright IBM Corp. 2019. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "crypto/tls" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "reflect" + "strconv" + "strings" + "time" +) + +const ( + header_name_USER_AGENT = "User-Agent" + sdk_name = "ibm-go-sdk-core" +) + +// ServiceOptions is a struct of configuration values for a service. +type ServiceOptions struct { + // This is the base URL associated with the service instance. This value will + // be combined with the paths for each operation to form the request URL + // [required]. + URL string + + // Authenticator holds the authenticator implementation to be used by the + // service instance to authenticate outbound requests, typically by adding the + // HTTP "Authorization" header. + Authenticator Authenticator +} + +// BaseService implements the common functionality shared by generated services +// to manage requests and responses, authenticate outbound requests, etc. +type BaseService struct { + + // Configuration values for a service. + Options *ServiceOptions + + // A set of "default" http headers to be included with each outbound request. + DefaultHeaders http.Header + + // The HTTP Client used to send requests and receive responses. + Client *http.Client + + // The value to be used for the "User-Agent" HTTP header that is added to each + // outbound request. If this value is not set, then a default value will be + // used for the header. + UserAgent string +} + +// NewBaseService constructs a new instance of BaseService. Validation on input +// parameters and service options will be performed before instance creation. +func NewBaseService(options *ServiceOptions) (*BaseService, error) { + if HasBadFirstOrLastChar(options.URL) { + return nil, fmt.Errorf(ERRORMSG_PROP_INVALID, "URL") + } + + if options.Authenticator == nil { + return nil, fmt.Errorf(ERRORMSG_NO_AUTHENTICATOR) + } + + if err := options.Authenticator.Validate(); err != nil { + return nil, err + } + + service := BaseService{ + Options: options, + + Client: &http.Client{ + Timeout: time.Second * 30, + }, + } + + // Set a default value for the User-Agent http header. + service.SetUserAgent(service.buildUserAgent()) + + return &service, nil +} + +// ConfigureService updates the service with external configuration values. +func (service *BaseService) ConfigureService(serviceName string) error { + // Try to load service properties from external config. + serviceProps, err := getServiceProperties(serviceName) + if err != nil { + return err + } + + // If we were able to load any properties for this service, then check to see if the + // service-level properties were present and set them on the service if so. + if serviceProps != nil { + + // URL + if url, ok := serviceProps[PROPNAME_SVC_URL]; ok && url != "" { + err := service.SetURL(url) + if err != nil { + return err + } + } + + // DISABLE_SSL + if disableSSL, ok := serviceProps[PROPNAME_SVC_DISABLE_SSL]; ok && disableSSL != "" { + // Convert the config string to bool. + boolValue, err := strconv.ParseBool(disableSSL) + if err != nil { + boolValue = false + } + + // If requested, disable SSL. + if boolValue { + service.DisableSSLVerification() + } + } + } + return nil +} + +// SetURL sets the service URL. +// +// Deprecated: use SetServiceURL instead. +func (service *BaseService) SetURL(url string) error { + return service.SetServiceURL(url) +} + +// SetServiceURL sets the service URL. +func (service *BaseService) SetServiceURL(url string) error { + if HasBadFirstOrLastChar(url) { + return fmt.Errorf(ERRORMSG_PROP_INVALID, "URL") + } + + service.Options.URL = url + return nil +} + +// GetServiceURL returns the service URL. +func (service *BaseService) GetServiceURL() string { + return service.Options.URL +} + +// SetDefaultHeaders sets HTTP headers to be sent in every request. +func (service *BaseService) SetDefaultHeaders(headers http.Header) { + service.DefaultHeaders = headers +} + +// SetHTTPClient updates the client handling the requests. +func (service *BaseService) SetHTTPClient(client *http.Client) { + service.Client = client +} + +// DisableSSLVerification skips SSL verification. +func (service *BaseService) DisableSSLVerification() { + tr := &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + } + service.Client.Transport = tr +} + +// buildUserAgent builds the user agent string. +func (service *BaseService) buildUserAgent() string { + return fmt.Sprintf("%s-%s %s", sdk_name, __VERSION__, SystemInfo()) +} + +// SetUserAgent sets the user agent value. +func (service *BaseService) SetUserAgent(userAgentString string) { + if userAgentString == "" { + service.UserAgent = service.buildUserAgent() + } + service.UserAgent = userAgentString +} + +// Request invokes the specified HTTP request and returns the response. +func (service *BaseService) Request(req *http.Request, result interface{}) (detailedResponse *DetailedResponse, err error) { + // Add default headers. + if service.DefaultHeaders != nil { + for k, v := range service.DefaultHeaders { + req.Header.Add(k, strings.Join(v, "")) + } + } + + // Add the default User-Agent header if not already present. + userAgent := req.Header.Get(header_name_USER_AGENT) + if userAgent == "" { + req.Header.Add(header_name_USER_AGENT, service.UserAgent) + } + + // Add authentication to the outbound request. + if service.Options.Authenticator == nil { + err = fmt.Errorf(ERRORMSG_NO_AUTHENTICATOR) + return + } + + err = service.Options.Authenticator.Authenticate(req) + if err != nil { + return + } + + // Invoke the request. + httpResponse, err := service.Client.Do(req) + if err != nil { + if strings.Contains(err.Error(), SSL_CERTIFICATION_ERROR) { + err = fmt.Errorf(ERRORMSG_SSL_VERIFICATION_FAILED + "\n" + err.Error()) + } + return + } + + // Start to populate the DetailedResponse. + detailedResponse = &DetailedResponse{ + StatusCode: httpResponse.StatusCode, + Headers: httpResponse.Header, + } + + contentType := httpResponse.Header.Get(CONTENT_TYPE) + + // If the operation was unsuccessful, then set up the DetailedResponse + // and error objects appropriately. + if httpResponse.StatusCode < 200 || httpResponse.StatusCode >= 300 { + + var responseBody []byte + + // First, read the response body into a byte array. + if httpResponse.Body != nil { + var readErr error + + defer httpResponse.Body.Close() + responseBody, readErr = ioutil.ReadAll(httpResponse.Body) + if readErr != nil { + err = fmt.Errorf("An error occurred while reading the response body: '%s'", readErr.Error()) + return + } + } + + // If the responseBody is empty, then just return a generic error based on the status code. + if len(responseBody) == 0 { + err = fmt.Errorf(http.StatusText(httpResponse.StatusCode)) + return + } + + // For a JSON-based error response body, decode it into a map (generic JSON object). + if IsJSONMimeType(contentType) { + // Return the error response body as a map, along with an + // error object containing our best guess at an error message. + responseMap, decodeErr := decodeAsMap(responseBody) + if decodeErr == nil { + detailedResponse.Result = responseMap + err = fmt.Errorf(getErrorMessage(responseMap, detailedResponse.StatusCode)) + return + } + } + + // For a non-JSON response or if we tripped while decoding the JSON response, + // just return the response body byte array in the RawResult field along with + // an error object that contains the generic error message for the status code. + detailedResponse.RawResult = responseBody + err = fmt.Errorf(http.StatusText(httpResponse.StatusCode)) + return + } + + // Operation was successful and we are expecting a response, so process the response. + if result != nil { + // For a JSON response, decode it into the response object. + resultType := reflect.TypeOf(result).String() + if IsJSONMimeType(contentType) && resultType != "*io.ReadCloser" { + + // First, read the response body into a byte array. + defer httpResponse.Body.Close() + responseBody, readErr := ioutil.ReadAll(httpResponse.Body) + if readErr != nil { + err = fmt.Errorf("An error occurred while reading the response body: '%s'", readErr.Error()) + return + } + + // Decode the byte array as JSON. + decodeErr := json.NewDecoder(bytes.NewReader(responseBody)).Decode(&result) + if decodeErr != nil { + // Error decoding the response body. + // Return the response body in RawResult, along with an error. + err = fmt.Errorf("An error occurred while unmarshalling the response body: '%s'", decodeErr.Error()) + detailedResponse.RawResult = responseBody + return + } + + // Decode step was successful. Return the decoded response object in the Result field. + detailedResponse.Result = result + return + } + + // For a non-JSON response body, just return it as an io.Reader in the Result field. + detailedResponse.Result = httpResponse.Body + } + + return +} + +// Errors is a struct used to hold an array of errors received in an operation +// response. +type Errors struct { + Errors []Error `json:"errors,omitempty"` +} + +// Error is a struct used to represent a single error received in an operation +// response. +type Error struct { + Message string `json:"message,omitempty"` +} + +// decodeAsMap: Decode the specified JSON byte-stream into a map (akin to a generic JSON object). +// Notes: +// 1) This function will return the map (result of decoding the byte-stream) as well as the raw +// byte buffer. We return the byte buffer in addition to the decoded map so that the caller can +// re-use (if necessary) the stream of bytes after we've consumed them via the JSON decode step. +// 2) The primary return value of this function will be: +// a) an instance of map[string]interface{} if the specified byte-stream was successfully +// decoded as JSON. +// b) the string form of the byte-stream if the byte-stream could not be successfully +// decoded as JSON. +// 3) This function will close the io.ReadCloser before returning. +func decodeAsMap(byteBuffer []byte) (result map[string]interface{}, err error) { + err = json.NewDecoder(bytes.NewReader(byteBuffer)).Decode(&result) + return +} + +// getErrorMessage: try to retrieve an error message from the decoded response body (map). +func getErrorMessage(responseMap map[string]interface{}, statusCode int) string { + + // If the response contained the "errors" field, then try to deserialize responseMap + // into an array of Error structs, then return the first entry's "Message" field. + if _, ok := responseMap["errors"]; ok { + var errors Errors + responseBuffer, _ := json.Marshal(responseMap) + if err := json.Unmarshal(responseBuffer, &errors); err == nil { + return errors.Errors[0].Message + } + } + + // Return the "error" field if present and is a string. + if val, ok := responseMap["error"]; ok { + errorMsg, ok := val.(string) + if ok { + return errorMsg + } + } + + // Return the "message" field if present and is a string. + if val, ok := responseMap["message"]; ok { + errorMsg, ok := val.(string) + if ok { + return errorMsg + } + } + + // Finally, return the "errorMessage" field if present and is a string. + if val, ok := responseMap["errorMessage"]; ok { + errorMsg, ok := val.(string) + if ok { + return errorMsg + } + } + + // If we couldn't find an error message above, just return the generic text + // for the status code. + return http.StatusText(statusCode) +} diff --git a/vendor/github.com/IBM/go-sdk-core/v3/core/basic_authenticator.go b/vendor/github.com/IBM/go-sdk-core/v3/core/basic_authenticator.go new file mode 100644 index 00000000000..460f3ef9111 --- /dev/null +++ b/vendor/github.com/IBM/go-sdk-core/v3/core/basic_authenticator.go @@ -0,0 +1,94 @@ +package core + +// (C) Copyright IBM Corp. 2019. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "net/http" +) + +// BasicAuthenticator takes a user-supplied username and password, and adds +// them to requests via an Authorization header of the form: +// +// Authorization: Basic +// +type BasicAuthenticator struct { + // Username is the user-supplied basic auth username [required]. + Username string + // Password is the user-supplied basic auth password [required]. + Password string +} + +// NewBasicAuthenticator constructs a new BasicAuthenticator instance. +func NewBasicAuthenticator(username string, password string) (*BasicAuthenticator, error) { + obj := &BasicAuthenticator{ + Username: username, + Password: password, + } + if err := obj.Validate(); err != nil { + return nil, err + } + return obj, nil +} + +// newBasicAuthenticatorFromMap constructs a new BasicAuthenticator instance +// from a map. +func newBasicAuthenticatorFromMap(properties map[string]string) (*BasicAuthenticator, error) { + if properties == nil { + return nil, fmt.Errorf(ERRORMSG_PROPS_MAP_NIL) + } + + return NewBasicAuthenticator(properties[PROPNAME_USERNAME], properties[PROPNAME_PASSWORD]) +} + +// AuthenticationType returns the authentication type for this authenticator. +func (BasicAuthenticator) AuthenticationType() string { + return AUTHTYPE_BASIC +} + +// Authenticate adds basic authentication information to a request. +// +// Basic Authorization will be added to the request's headers in the form: +// +// Authorization: Basic +// +func (this *BasicAuthenticator) Authenticate(request *http.Request) error { + request.SetBasicAuth(this.Username, this.Password) + return nil +} + +// Validate the authenticator's configuration. +// +// Ensures the username and password are not Nil. Additionally, ensures +// they do not contain invalid characters. +func (this BasicAuthenticator) Validate() error { + if this.Username == "" { + return fmt.Errorf(ERRORMSG_PROP_MISSING, "Username") + } + + if this.Password == "" { + return fmt.Errorf(ERRORMSG_PROP_MISSING, "Password") + } + + if HasBadFirstOrLastChar(this.Username) { + return fmt.Errorf(ERRORMSG_PROP_INVALID, "Username") + } + + if HasBadFirstOrLastChar(this.Password) { + return fmt.Errorf(ERRORMSG_PROP_INVALID, "Password") + } + + return nil +} diff --git a/vendor/github.com/IBM/go-sdk-core/v3/core/bearer_token_authenticator.go b/vendor/github.com/IBM/go-sdk-core/v3/core/bearer_token_authenticator.go new file mode 100644 index 00000000000..8f2f35fec7a --- /dev/null +++ b/vendor/github.com/IBM/go-sdk-core/v3/core/bearer_token_authenticator.go @@ -0,0 +1,77 @@ +package core + +// (C) Copyright IBM Corp. 2019. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "net/http" +) + +// BearerTokenAuthenticator will take a user-supplied bearer token and adds +// it to requests via an Authorization header of the form: +// +// Authorization: Bearer +// +type BearerTokenAuthenticator struct { + + // The bearer token value to be used to authenticate request [required]. + BearerToken string +} + +// NewBearerTokenAuthenticator constructs a new BearerTokenAuthenticator instance. +func NewBearerTokenAuthenticator(bearerToken string) (*BearerTokenAuthenticator, error) { + obj := &BearerTokenAuthenticator{ + BearerToken: bearerToken, + } + if err := obj.Validate(); err != nil { + return nil, err + } + return obj, nil +} + +// newBearerTokenAuthenticator : Constructs a new BearerTokenAuthenticator instance from a map. +func newBearerTokenAuthenticatorFromMap(properties map[string]string) (*BearerTokenAuthenticator, error) { + if properties == nil { + return nil, fmt.Errorf(ERRORMSG_PROPS_MAP_NIL) + } + + return NewBearerTokenAuthenticator(properties[PROPNAME_BEARER_TOKEN]) +} + +// AuthenticationType returns the authentication type for this authenticator. +func (BearerTokenAuthenticator) AuthenticationType() string { + return AUTHTYPE_BEARER_TOKEN +} + +// Authenticate adds bearer authentication information to the request. +// +// The bearer token will be added to the request's headers in the form: +// +// Authorization: Bearer +// +func (this *BearerTokenAuthenticator) Authenticate(request *http.Request) error { + request.Header.Set("Authorization", fmt.Sprintf(`Bearer %s`, this.BearerToken)) + return nil +} + +// Validate the authenticator's configuration. +// +// Ensures the bearer token is not Nil. +func (this BearerTokenAuthenticator) Validate() error { + if this.BearerToken == "" { + return fmt.Errorf(ERRORMSG_PROP_MISSING, "BearerToken") + } + return nil +} diff --git a/vendor/github.com/IBM/go-sdk-core/v3/core/config_utils.go b/vendor/github.com/IBM/go-sdk-core/v3/core/config_utils.go new file mode 100644 index 00000000000..d4da980a1da --- /dev/null +++ b/vendor/github.com/IBM/go-sdk-core/v3/core/config_utils.go @@ -0,0 +1,245 @@ +package core + +// (C) Copyright IBM Corp. 2019. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bufio" + "encoding/json" + "fmt" + "os" + "path" + "strings" +) + +const ( + // IBM_CREDENTIAL_FILE_ENVVAR is the environment key used to find the path to + // a credentials file. + IBM_CREDENTIAL_FILE_ENVVAR = "IBM_CREDENTIALS_FILE" + + // DEFAULT_CREDENTIAL_FILE_NAME is the default filename for a credentials file. + // It is used when "IBM_CREDENTIALS_FILE" is not specified. The filename will + // be searched for within the program's working directory, and then the OS's + // current user directory. + DEFAULT_CREDENTIAL_FILE_NAME = "ibm-credentials.env" +) + +// getServiceProperties: This function will retrieve configuration properties for the specified service +// from external config sources in the following precedence order: +// 1) credential file +// 2) environment variables +// 3) VCAP_SERVICES +func getServiceProperties(serviceName string) (serviceProps map[string]string, err error) { + + if serviceName == "" { + err = fmt.Errorf("serviceName was not specified") + return + } + + // First try to retrieve service properties from a credential file. + serviceProps = getServicePropertiesFromCredentialFile(serviceName) + + // Next, try to retrieve them from environment variables. + if serviceProps == nil { + serviceProps = getServicePropertiesFromEnvironment(serviceName) + } + + // Finally, try to retrieve them from VCAP_SERVICES. + if serviceProps == nil { + serviceProps = getServicePropertiesFromVCAP(serviceName) + } + + return +} + +// getServicePropertiesFromCredentialFile: returns a map containing properties found within a credential file +// that are associated with the specified credentialKey. Returns a nil map if no properties are found. +// Credential file search order: +// 1) ${IBM_CREDENTIALS_FILE} +// 2) /ibm-credentials.env +// 3) /ibm-credentials.env +func getServicePropertiesFromCredentialFile(credentialKey string) map[string]string { + + // Check the search order for the credential file that we'll attempt to load: + var credentialFilePath string + + // 1) ${IBM_CREDENTIALS_FILE} + envPath := os.Getenv(IBM_CREDENTIAL_FILE_ENVVAR) + if _, err := os.Stat(envPath); err == nil { + credentialFilePath = envPath + } + + // 2) /ibm-credentials.env + if credentialFilePath == "" { + dir, _ := os.Getwd() + var filePath = path.Join(dir, DEFAULT_CREDENTIAL_FILE_NAME) + if _, err := os.Stat(filePath); err == nil { + credentialFilePath = filePath + } + } + + // 3) /ibm-credentials.env + if credentialFilePath == "" { + var filePath = path.Join(UserHomeDir(), DEFAULT_CREDENTIAL_FILE_NAME) + if _, err := os.Stat(filePath); err == nil { + credentialFilePath = filePath + } + } + + // If we found a file to load, then load it. + if credentialFilePath != "" { + file, err := os.Open(credentialFilePath) + if err != nil { + return nil + } + defer file.Close() + + // Collect the contents of the credential file in a string array. + lines := make([]string, 0) + scanner := bufio.NewScanner(file) + for scanner.Scan() { + lines = append(lines, scanner.Text()) + } + + // Parse the file contents into name/value pairs. + return parsePropertyStrings(credentialKey, lines) + } + + return nil +} + +// getServicePropertiesFromEnvironment: returns a map containing properties found within the environment +// that are associated with the specified credentialKey. Returns a nil map if no properties are found. +func getServicePropertiesFromEnvironment(credentialKey string) map[string]string { + return parsePropertyStrings(credentialKey, os.Environ()) +} + +// getServicePropertiesFromVCAP: returns a map containing properties found within the VCAP_SERVICES +// environment variable for the specified credentialKey (service name). Returns a nil map if no properties are found. +func getServicePropertiesFromVCAP(credentialKey string) map[string]string { + credentials := loadFromVCAPServices(credentialKey) + if credentials != nil { + props := make(map[string]string) + if credentials.URL != "" { + props[PROPNAME_SVC_URL] = credentials.URL + } + + if credentials.Username != "" { + props[PROPNAME_USERNAME] = credentials.Username + } + + if credentials.Password != "" { + props[PROPNAME_PASSWORD] = credentials.Password + } + + if credentials.APIKey != "" { + props[PROPNAME_APIKEY] = credentials.APIKey + } + + // If no values were actually found in this credential entry, then bail out now. + if len(props) == 0 { + return nil + } + + // Make a (hopefully good) guess at the auth type. + authType := "" + if props[PROPNAME_APIKEY] != "" { + authType = AUTHTYPE_IAM + } else if props[PROPNAME_USERNAME] != "" || props[PROPNAME_PASSWORD] != "" { + authType = AUTHTYPE_BASIC + } else { + authType = AUTHTYPE_IAM + } + props[PROPNAME_AUTH_TYPE] = authType + + return props + } + + return nil +} + +// parsePropertyStrings: accepts an array of strings of the form "=" and parses/filters them to +// produce a map of properties associated with the specified credentialKey. +func parsePropertyStrings(credentialKey string, propertyStrings []string) map[string]string { + if len(propertyStrings) == 0 { + return nil + } + + props := make(map[string]string) + credentialKey = strings.ToUpper(credentialKey) + credentialKey = strings.Replace(credentialKey, "-", "_", -1) + credentialKey += "_" + for _, propertyString := range propertyStrings { + + // Trim the property string and ignore any blank or comment lines. + propertyString = strings.TrimSpace(propertyString) + if propertyString == "" || strings.HasPrefix(propertyString, "#") { + continue + } + + // Parse the property string into name and value tokens + var tokens = strings.Split(propertyString, "=") + if len(tokens) == 2 { + // Does the name start with the credential key? + // If so, then extract the property name by filtering out the credential key, + // then store the name/value pair in the map. + if strings.HasPrefix(tokens[0], credentialKey) && (len(tokens[0]) > len(credentialKey)) { + name := tokens[0][len(credentialKey):] + value := strings.TrimSpace(tokens[1]) + props[name] = value + } + } + } + + if len(props) == 0 { + return nil + } + return props +} + +// Service : The service +type service struct { + Name string `json:"name,omitempty"` + Credentials *credential `json:"credentials,omitempty"` +} + +// Credential : The service credential +type credential struct { + URL string `json:"url,omitempty"` + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + APIKey string `json:"apikey,omitempty"` +} + +// LoadFromVCAPServices : returns the credential of the service +func loadFromVCAPServices(serviceName string) *credential { + vcapServices := os.Getenv("VCAP_SERVICES") + if vcapServices != "" { + var rawServices map[string][]service + if err := json.Unmarshal([]byte(vcapServices), &rawServices); err != nil { + return nil + } + for _, serviceEntries := range rawServices { + for _, service := range serviceEntries { + if service.Name == serviceName { + return service.Credentials + } + } + } + if serviceList, exists := rawServices[serviceName]; exists && len(serviceList) > 0 { + return serviceList[0].Credentials + } + } + return nil +} diff --git a/vendor/github.com/IBM/go-sdk-core/v3/core/constants.go b/vendor/github.com/IBM/go-sdk-core/v3/core/constants.go new file mode 100644 index 00000000000..2e9deadc629 --- /dev/null +++ b/vendor/github.com/IBM/go-sdk-core/v3/core/constants.go @@ -0,0 +1,56 @@ +package core + +// (C) Copyright IBM Corp. 2019. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const ( + // Supported authentication types. + AUTHTYPE_BASIC = "basic" + AUTHTYPE_BEARER_TOKEN = "bearerToken" + AUTHTYPE_NOAUTH = "noAuth" + AUTHTYPE_IAM = "iam" + AUTHTYPE_CP4D = "cp4d" + + // Names of properties that can be defined as part of an external configuration (credential file, env vars, etc.). + // Example: export MYSERVICE_URL=https://myurl + + // Service client properties. + PROPNAME_SVC_URL = "URL" + PROPNAME_SVC_DISABLE_SSL = "DISABLE_SSL" + + // Authenticator properties. + PROPNAME_AUTH_TYPE = "AUTH_TYPE" + PROPNAME_USERNAME = "USERNAME" + PROPNAME_PASSWORD = "PASSWORD" + PROPNAME_BEARER_TOKEN = "BEARER_TOKEN" + PROPNAME_AUTH_URL = "AUTH_URL" + PROPNAME_AUTH_DISABLE_SSL = "AUTH_DISABLE_SSL" + PROPNAME_APIKEY = "APIKEY" + PROPNAME_CLIENT_ID = "CLIENT_ID" + PROPNAME_CLIENT_SECRET = "CLIENT_SECRET" + + // SSL error + SSL_CERTIFICATION_ERROR = "x509: certificate" + + // Common error messages. + ERRORMSG_PROP_MISSING = "The %s property is required but was not specified." + ERRORMSG_PROP_INVALID = "The %s property is invalid. Please remove any surrounding {, }, or \" characters." + ERRORMSG_NO_AUTHENTICATOR = "Authentication information was not properly configured." + ERRORMSG_AUTHTYPE_UNKNOWN = "Unrecognized authentication type: %s" + ERRORMSG_PROPS_MAP_NIL = "The 'properties' map cannot be nil." + ERRORMSG_SSL_VERIFICATION_FAILED = "The connection failed because the SSL certificate is not valid. To use a " + + "self-signed certificate, disable verification of the server's SSL certificate " + + "by invoking the DisableSSLVerification() function on your service instance " + + "and/or use the DisableSSLVerification option of the authenticator." +) diff --git a/vendor/github.com/IBM/go-sdk-core/v3/core/cp4d_authenticator.go b/vendor/github.com/IBM/go-sdk-core/v3/core/cp4d_authenticator.go new file mode 100644 index 00000000000..c3c3e4cedde --- /dev/null +++ b/vendor/github.com/IBM/go-sdk-core/v3/core/cp4d_authenticator.go @@ -0,0 +1,338 @@ +package core + +// (C) Copyright IBM Corp. 2019. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "crypto/tls" + "encoding/json" + "fmt" + "net/http" + "strconv" + "strings" + "sync" + "time" + + jwt "github.com/dgrijalva/jwt-go" +) + +// Constants for CP4D +const ( + PRE_AUTH_PATH = "/v1/preauth/validateAuth" +) + +// CloudPakForDataAuthenticator uses a username and password pair to obtain a +// suitable bearer token, and adds the bearer token to requests via an +// Authorization header of the form: +// +// Authorization: Bearer +// +type CloudPakForDataAuthenticator struct { + // The URL representing the Cloud Pak for Data token service endpoint [required]. + URL string + + // The username used to obtain a bearer token [required]. + Username string + + // The password used to obtain a bearer token [required]. + Password string + + // A flag that indicates whether verification of the server's SSL certificate + // should be disabled; defaults to false [optional]. + DisableSSLVerification bool + + // Default headers to be sent with every CP4D token request [optional]. + Headers map[string]string + + // The http.Client object used to invoke token server requests [optional]. If + // not specified, a suitable default Client will be constructed. + Client *http.Client + + // The cached token and expiration time. + tokenData *cp4dTokenData +} + +var cp4dRequestTokenMutex sync.Mutex +var cp4dNeedsRefreshMutex sync.Mutex + +// NewCloudPakForDataAuthenticator constructs a new CloudPakForDataAuthenticator +// instance. +func NewCloudPakForDataAuthenticator(url string, username string, password string, + disableSSLVerification bool, headers map[string]string) (*CloudPakForDataAuthenticator, error) { + + authenticator := &CloudPakForDataAuthenticator{ + Username: username, + Password: password, + URL: url, + DisableSSLVerification: disableSSLVerification, + Headers: headers, + } + + // Make sure the config is valid. + err := authenticator.Validate() + if err != nil { + return nil, err + } + + return authenticator, nil +} + +// newCloudPakForDataAuthenticatorFromMap : Constructs a new CloudPakForDataAuthenticator instance from a map. +func newCloudPakForDataAuthenticatorFromMap(properties map[string]string) (*CloudPakForDataAuthenticator, error) { + if properties == nil { + return nil, fmt.Errorf(ERRORMSG_PROPS_MAP_NIL) + } + + disableSSL, err := strconv.ParseBool(properties[PROPNAME_AUTH_DISABLE_SSL]) + if err != nil { + disableSSL = false + } + return NewCloudPakForDataAuthenticator(properties[PROPNAME_AUTH_URL], + properties[PROPNAME_USERNAME], properties[PROPNAME_PASSWORD], + disableSSL, nil) +} + +// AuthenticationType returns the authentication type for this authenticator. +func (CloudPakForDataAuthenticator) AuthenticationType() string { + return AUTHTYPE_CP4D +} + +// Validate the authenticator's configuration. +// +// Ensures the username, password, and url are not Nil. Additionally, ensures +// they do not contain invalid characters. +func (authenticator CloudPakForDataAuthenticator) Validate() error { + + if authenticator.Username == "" { + return fmt.Errorf(ERRORMSG_PROP_MISSING, "Username") + } + + if authenticator.Password == "" { + return fmt.Errorf(ERRORMSG_PROP_MISSING, "Password") + } + + if authenticator.URL == "" { + return fmt.Errorf(ERRORMSG_PROP_MISSING, "URL") + } + + return nil +} + +// Authenticate adds the bearer token (obtained from the token server) to the +// specified request. +// +// The CP4D bearer token will be added to the request's headers in the form: +// +// Authorization: Bearer +// +func (authenticator *CloudPakForDataAuthenticator) Authenticate(request *http.Request) error { + token, err := authenticator.getToken() + if err != nil { + return err + } + + request.Header.Set("Authorization", fmt.Sprintf(`Bearer %s`, token)) + return nil +} + +// getToken: returns an access token to be used in an Authorization header. +// Whenever a new token is needed (when a token doesn't yet exist, needs to be refreshed, +// or the existing token has expired), a new access token is fetched from the token server. +func (authenticator *CloudPakForDataAuthenticator) getToken() (string, error) { + if authenticator.tokenData == nil || !authenticator.tokenData.isTokenValid() { + // synchronously request the token + err := authenticator.synchronizedRequestToken() + if err != nil { + return "", err + } + } else if authenticator.tokenData.needsRefresh() { + // If refresh needed, kick off a go routine in the background to get a new token + ch := make(chan error) + go func() { + ch <- authenticator.getTokenData() + }() + select { + case err := <-ch: + if err != nil { + return "", err + } + default: + } + } + + // return an error if the access token is not valid or was not fetched + if authenticator.tokenData == nil || authenticator.tokenData.AccessToken == "" { + return "", fmt.Errorf("Error while trying to get access token") + } + + return authenticator.tokenData.AccessToken, nil +} + +// synchronizedRequestToken: synchronously checks if the current token in cache +// is valid. If token is not valid or does not exist, it will fetch a new token +// and set the tokenRefreshTime +func (authenticator *CloudPakForDataAuthenticator) synchronizedRequestToken() error { + cp4dRequestTokenMutex.Lock() + defer cp4dRequestTokenMutex.Unlock() + // if cached token is still valid, then just continue to use it + if authenticator.tokenData != nil && authenticator.tokenData.isTokenValid() { + return nil + } + + return authenticator.getTokenData() +} + +// getTokenData: requests a new token from the access server and +// unmarshals the token information to the tokenData cache. Returns +// an error if the token was unable to be fetched, otherwise returns nil +func (authenticator *CloudPakForDataAuthenticator) getTokenData() error { + tokenResponse, err := authenticator.requestToken() + if err != nil { + return err + } + + authenticator.tokenData, err = newCp4dTokenData(tokenResponse) + if err != nil { + return err + } + + return nil +} + +// requestToken: fetches a new access token from the token server. +func (authenticator *CloudPakForDataAuthenticator) requestToken() (*cp4dTokenServerResponse, error) { + // If the user-specified URL does not end with the required path, + // then add it now. + url := authenticator.URL + if !strings.HasSuffix(url, PRE_AUTH_PATH) { + url = fmt.Sprintf("%s%s", url, PRE_AUTH_PATH) + } + + builder, err := NewRequestBuilder(GET).ConstructHTTPURL(url, nil, nil) + if err != nil { + return nil, err + } + + // Add user-defined headers to request. + for headerName, headerValue := range authenticator.Headers { + builder.AddHeader(headerName, headerValue) + } + + req, err := builder.Build() + if err != nil { + return nil, err + } + + req.SetBasicAuth(authenticator.Username, authenticator.Password) + + // If the authenticator does not have a Client, create one now. + if authenticator.Client == nil { + authenticator.Client = &http.Client{ + Timeout: time.Second * 30, + } + + // If the user told us to disable SSL verification, then do it now. + if authenticator.DisableSSLVerification { + transport := &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + } + authenticator.Client.Transport = transport + } + } + + resp, err := authenticator.Client.Do(req) + if err != nil { + return nil, err + } + + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + if resp != nil { + buff := new(bytes.Buffer) + _, _ = buff.ReadFrom(resp.Body) + return nil, fmt.Errorf(buff.String()) + } + } + + tokenResponse := &cp4dTokenServerResponse{} + _ = json.NewDecoder(resp.Body).Decode(tokenResponse) + defer resp.Body.Close() + return tokenResponse, nil +} + +// cp4dTokenServerResponse : This struct models a response received from the token server. +type cp4dTokenServerResponse struct { + Username string `json:"username,omitempty"` + Role string `json:"role,omitempty"` + Permissions []string `json:"permissions,omitempty"` + Subject string `json:"sub,omitempty"` + Issuer string `json:"iss,omitempty"` + Audience string `json:"aud,omitempty"` + UID string `json:"uid,omitempty"` + AccessToken string `json:"accessToken,omitempty"` + MessageCode string `json:"_messageCode_,omitempty"` + Message string `json:"message,omitempty"` +} + +// cp4dTokenData : This struct represents the cached information related to a fetched access token. +type cp4dTokenData struct { + AccessToken string + RefreshTime int64 + Expiration int64 +} + +// newCp4dTokenData: constructs a new Cp4dTokenData instance from the specified Cp4dTokenServerResponse instance. +func newCp4dTokenData(tokenResponse *cp4dTokenServerResponse) (*cp4dTokenData, error) { + // Need to crack open the access token (a JWToken) to get the expiration and issued-at times. + claims := &jwt.StandardClaims{} + if token, _ := jwt.ParseWithClaims(tokenResponse.AccessToken, claims, nil); token == nil { + return nil, fmt.Errorf("Error while trying to parse access token!") + } + // Compute the adjusted refresh time (expiration time - 20% of timeToLive) + timeToLive := claims.ExpiresAt - claims.IssuedAt + expireTime := claims.ExpiresAt + refreshTime := expireTime - int64(float64(timeToLive)*0.2) + + tokenData := &cp4dTokenData{ + AccessToken: tokenResponse.AccessToken, + Expiration: expireTime, + RefreshTime: refreshTime, + } + return tokenData, nil +} + +// isTokenValid: returns true iff the Cp4dTokenData instance represents a valid (non-expired) access token. +func (this *cp4dTokenData) isTokenValid() bool { + if this.AccessToken != "" && GetCurrentTime() < this.Expiration { + return true + } + return false +} + +// needsRefresh: synchronously returns true iff the currently stored access token should be refreshed. This method also +// updates the refresh time if it determines the token needs refreshed to prevent other threads from +// making multiple refresh calls. +func (this *cp4dTokenData) needsRefresh() bool { + cp4dNeedsRefreshMutex.Lock() + defer cp4dNeedsRefreshMutex.Unlock() + + // Advance refresh by one minute + if this.RefreshTime >= 0 && GetCurrentTime() > this.RefreshTime { + this.RefreshTime += 60 + return true + } + + return false + +} diff --git a/vendor/github.com/IBM/go-sdk-core/v3/core/detailed_response.go b/vendor/github.com/IBM/go-sdk-core/v3/core/detailed_response.go new file mode 100644 index 00000000000..1053fa9a21d --- /dev/null +++ b/vendor/github.com/IBM/go-sdk-core/v3/core/detailed_response.go @@ -0,0 +1,95 @@ +package core + +// (C) Copyright IBM Corp. 2019. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/json" + "fmt" + "net/http" +) + +// DetailedResponse holds the response information received from the server. +type DetailedResponse struct { + + // The HTTP status code associated with the response. + StatusCode int + + // The HTTP headers contained in the response. + Headers http.Header + + // Result - this field will contain the result of the operation (obtained from the response body). + // + // If the operation was successful and the response body contains a JSON response, it is un-marshalled + // into an object of the appropriate type (defined by the particular operation), and the Result field will contain + // this response object. If there was an error while un-marshalling the JSON response body, then the RawResult field + // will be set to the byte array containing the response body. + // + // Alternatively, if the generated SDK code passes in a result object which is an io.ReadCloser instance, + // the JSON un-marshalling step is bypassed and the response body is simply returned in the Result field. + // This scenario would occur in a situation where the SDK would like to provide a streaming model for large JSON + // objects. + // + // If the operation was successful and the response body contains a non-JSON response, + // the Result field will be an instance of io.ReadCloser that can be used by generated SDK code + // (or the application) to read the response data. + // + // If the operation was unsuccessful and the response body contains a JSON error response, + // this field will contain an instance of map[string]interface{} which is the result of un-marshalling the + // response body as a "generic" JSON object. + // If the JSON response for an unsuccessful operation could not be properly un-marshalled, then the + // RawResult field will contain the raw response body. + Result interface{} + + // This field will contain the raw response body as a byte array under these conditions: + // 1) there was a problem un-marshalling a JSON response body - + // either for a successful or unsuccessful operation. + // 2) the operation was unsuccessful, and the response body contains a non-JSON response. + RawResult []byte +} + +// GetHeaders returns the headers +func (response *DetailedResponse) GetHeaders() http.Header { + return response.Headers +} + +// GetStatusCode returns the HTTP status code +func (response *DetailedResponse) GetStatusCode() int { + return response.StatusCode +} + +// GetResult returns the result from the service +func (response *DetailedResponse) GetResult() interface{} { + return response.Result +} + +// GetResultAsMap returns the result as a map (generic JSON object), if the +// DetailedResponse.Result field contains an instance of a map. +func (response *DetailedResponse) GetResultAsMap() (map[string]interface{}, bool) { + m, ok := response.Result.(map[string]interface{}) + return m, ok +} + +// GetRawResult returns the raw response body as a byte array. +func (response *DetailedResponse) GetRawResult() []byte { + return response.RawResult +} + +func (response *DetailedResponse) String() string { + output, err := json.MarshalIndent(response, "", " ") + if err == nil { + return fmt.Sprintf("%+v\n", string(output)) + } + return fmt.Sprintf("Response") +} diff --git a/vendor/github.com/IBM/go-sdk-core/v3/core/doc.go b/vendor/github.com/IBM/go-sdk-core/v3/core/doc.go new file mode 100644 index 00000000000..d4a65a4ed5c --- /dev/null +++ b/vendor/github.com/IBM/go-sdk-core/v3/core/doc.go @@ -0,0 +1,44 @@ +// (C) Copyright IBM Corp. 2019. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package core contains functionality used by Go SDK's generated by the IBM +OpenAPI 3 SDK Generator (openapi-sdkgen). +Authenticators + +The go-sdk-core project supports the following types of authentication: + + Basic Authentication + Bearer Token + Identity and Access Management (IAM) + Cloud Pak for Data + No Authentication + +The authentication types that are appropriate for a particular service may +vary from service to service. Each authentication type is implemented as an +Authenticator for consumption by a service. To read more about authenticators +and how to use them see here: +https://github.com/IBM/go-sdk-core/blob/master/Authentication.md + +Services + +Services are the API clients generated by the IBM OpenAPI 3 SDK +Generator. These services make use of the code within the core package +BaseService instances to perform service operations. + + + + +*/ +package core diff --git a/vendor/github.com/IBM/go-sdk-core/v3/core/iam_authenticator.go b/vendor/github.com/IBM/go-sdk-core/v3/core/iam_authenticator.go new file mode 100644 index 00000000000..f0705b5e68b --- /dev/null +++ b/vendor/github.com/IBM/go-sdk-core/v3/core/iam_authenticator.go @@ -0,0 +1,365 @@ +package core + +// (C) Copyright IBM Corp. 2019. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "crypto/tls" + "encoding/json" + "fmt" + "net/http" + "strconv" + "sync" + "time" +) + +// IamAuthenticator-related constants. +const ( + DEFAULT_IAM_URL = "https://iam.cloud.ibm.com/identity/token" + DEFAULT_CONTENT_TYPE = "application/x-www-form-urlencoded" + REQUEST_TOKEN_GRANT_TYPE = "urn:ibm:params:oauth:grant-type:apikey" + REQUEST_TOKEN_RESPONSE_TYPE = "cloud_iam" +) + +// IamAuthenticator uses an apikey to obtain a suitable bearer token value, +// and adds the bearer token to requests via an Authorization header +// of the form: +// +// Authorization: Bearer +// +type IamAuthenticator struct { + + // The apikey used to fetch the bearer token from the IAM token server + // [required]. + ApiKey string + + // The URL representing the IAM token server's endpoint; If not specified, + // a suitable default value will be used [optional]. + URL string + + // The ClientId and ClientSecret fields are used to form a "basic auth" + // Authorization header for interactions with the IAM token server + + // If neither field is specified, then no Authorization header will be sent + // with token server requests [optional]. These fields are optional, but must + // be specified together. + ClientId string + + // If neither field is specified, then no Authorization header will be sent + // with token server requests [optional]. These fields are optional, but must + // be specified together. + ClientSecret string + + // A flag that indicates whether verification of the server's SSL certificate + // should be disabled; defaults to false [optional]. + DisableSSLVerification bool + + // [Optional] A set of key/value pairs that will be sent as HTTP headers in requests + // made to the token server. + Headers map[string]string + + // [Optional] The http.Client object used to invoke token server requests. + // If not specified by the user, a suitable default Client will be constructed. + Client *http.Client + + // The cached token and expiration time. + tokenData *iamTokenData +} + +var iamRequestTokenMutex sync.Mutex +var iamNeedsRefreshMutex sync.Mutex + +// NewIamAuthenticator constructs a new IamAuthenticator instance. +func NewIamAuthenticator(apikey string, url string, clientId string, clientSecret string, + disableSSLVerification bool, headers map[string]string) (*IamAuthenticator, error) { + authenticator := &IamAuthenticator{ + ApiKey: apikey, + URL: url, + ClientId: clientId, + ClientSecret: clientSecret, + DisableSSLVerification: disableSSLVerification, + Headers: headers, + } + + // Make sure the config is valid. + err := authenticator.Validate() + if err != nil { + return nil, err + } + + return authenticator, nil +} + +// NewIamAuthenticatorFromMap constructs a new IamAuthenticator instance from a +// map. +func newIamAuthenticatorFromMap(properties map[string]string) (*IamAuthenticator, error) { + if properties == nil { + return nil, fmt.Errorf(ERRORMSG_PROPS_MAP_NIL) + } + + disableSSL, err := strconv.ParseBool(properties[PROPNAME_AUTH_DISABLE_SSL]) + if err != nil { + disableSSL = false + } + return NewIamAuthenticator(properties[PROPNAME_APIKEY], properties[PROPNAME_AUTH_URL], + properties[PROPNAME_CLIENT_ID], properties[PROPNAME_CLIENT_SECRET], + disableSSL, nil) +} + +// AuthenticationType returns the authentication type for this authenticator. +func (IamAuthenticator) AuthenticationType() string { + return AUTHTYPE_IAM +} + +// Authenticate adds IAM authentication information to the request. +// +// The IAM bearer token will be added to the request's headers in the form: +// +// Authorization: Bearer +// +func (authenticator *IamAuthenticator) Authenticate(request *http.Request) error { + token, err := authenticator.getToken() + if err != nil { + return err + } + + request.Header.Set("Authorization", fmt.Sprintf(`Bearer %s`, token)) + return nil +} + +// Validate the authenticator's configuration. +// +// Ensures the ApiKey is valid, and the ClientId and ClientSecret pair are +// mutually inclusive. +func (this IamAuthenticator) Validate() error { + if this.ApiKey == "" { + return fmt.Errorf(ERRORMSG_PROP_MISSING, "ApiKey") + } + + if HasBadFirstOrLastChar(this.ApiKey) { + return fmt.Errorf(ERRORMSG_PROP_INVALID, "ApiKey") + } + + // Validate ClientId and ClientSecret. They must both be specified togther or neither should be specified. + if this.ClientId == "" && this.ClientSecret == "" { + // Do nothing as this is the valid scenario + } else { + // Since it is NOT the case that both properties are empty, make sure BOTH are specified. + if this.ClientId == "" { + return fmt.Errorf(ERRORMSG_PROP_MISSING, "ClientId") + } + + if this.ClientSecret == "" { + return fmt.Errorf(ERRORMSG_PROP_MISSING, "ClientSecret") + } + } + + return nil +} + +// getToken: returns an access token to be used in an Authorization header. +// Whenever a new token is needed (when a token doesn't yet exist, needs to be refreshed, +// or the existing token has expired), a new access token is fetched from the token server. +func (authenticator *IamAuthenticator) getToken() (string, error) { + if authenticator.tokenData == nil || !authenticator.tokenData.isTokenValid() { + // synchronously request the token + err := authenticator.synchronizedRequestToken() + if err != nil { + return "", err + } + } else if authenticator.tokenData.needsRefresh() { + // If refresh needed, kick off a go routine in the background to get a new token + ch := make(chan error) + go func() { + ch <- authenticator.getTokenData() + }() + select { + case err := <-ch: + if err != nil { + return "", err + } + default: + } + } + + // return an error if the access token is not valid or was not fetched + if authenticator.tokenData == nil || authenticator.tokenData.AccessToken == "" { + return "", fmt.Errorf("Error while trying to get access token") + } + + return authenticator.tokenData.AccessToken, nil +} + +// synchronizedRequestToken: synchronously checks if the current token in cache +// is valid. If token is not valid or does not exist, it will fetch a new token +// and set the tokenRefreshTime +func (authenticator *IamAuthenticator) synchronizedRequestToken() error { + iamRequestTokenMutex.Lock() + defer iamRequestTokenMutex.Unlock() + // if cached token is still valid, then just continue to use it + if authenticator.tokenData != nil && authenticator.tokenData.isTokenValid() { + return nil + } + + return authenticator.getTokenData() +} + +// getTokenData: requests a new token from the access server and +// unmarshals the token information to the tokenData cache. Returns +// an error if the token was unable to be fetched, otherwise returns nil +func (authenticator *IamAuthenticator) getTokenData() error { + tokenResponse, err := authenticator.requestToken() + if err != nil { + return err + } + + authenticator.tokenData, err = newIamTokenData(tokenResponse) + if err != nil { + return err + } + + return nil +} + +// requestToken: fetches a new access token from the token server. +func (authenticator *IamAuthenticator) requestToken() (*iamTokenServerResponse, error) { + // Use the default IAM URL if one was not specified by the user. + url := authenticator.URL + if url == "" { + url = DEFAULT_IAM_URL + } + + builder := NewRequestBuilder(POST) + _, err := builder.ConstructHTTPURL(url, nil, nil) + if err != nil { + return nil, err + } + + builder.AddHeader(CONTENT_TYPE, DEFAULT_CONTENT_TYPE). + AddHeader(Accept, APPLICATION_JSON). + AddFormData("grant_type", "", "", REQUEST_TOKEN_GRANT_TYPE). + AddFormData("apikey", "", "", authenticator.ApiKey). + AddFormData("response_type", "", "", REQUEST_TOKEN_RESPONSE_TYPE) + + // Add user-defined headers to request. + for headerName, headerValue := range authenticator.Headers { + builder.AddHeader(headerName, headerValue) + } + + req, err := builder.Build() + if err != nil { + return nil, err + } + + // If client id and secret were configured by the user, then set them on the request + // as a basic auth header. + if authenticator.ClientId != "" && authenticator.ClientSecret != "" { + req.SetBasicAuth(authenticator.ClientId, authenticator.ClientSecret) + } + + // If the authenticator does not have a Client, create one now. + if authenticator.Client == nil { + authenticator.Client = &http.Client{ + Timeout: time.Second * 30, + } + + // If the user told us to disable SSL verification, then do it now. + if authenticator.DisableSSLVerification { + transport := &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + } + authenticator.Client.Transport = transport + } + } + + resp, err := authenticator.Client.Do(req) + if err != nil { + return nil, err + } + + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + if resp != nil { + buff := new(bytes.Buffer) + _, _ = buff.ReadFrom(resp.Body) + return nil, fmt.Errorf(buff.String()) + } + } + + tokenResponse := &iamTokenServerResponse{} + _ = json.NewDecoder(resp.Body).Decode(tokenResponse) + defer resp.Body.Close() + return tokenResponse, nil +} + +// iamTokenServerResponse : This struct models a response received from the token server. +type iamTokenServerResponse struct { + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + TokenType string `json:"token_type"` + ExpiresIn int64 `json:"expires_in"` + Expiration int64 `json:"expiration"` +} + +// iamTokenData : This struct represents the cached information related to a fetched access token. +type iamTokenData struct { + AccessToken string + RefreshTime int64 + Expiration int64 +} + +// newIamTokenData: constructs a new IamTokenData instance from the specified IamTokenServerResponse instance. +func newIamTokenData(tokenResponse *iamTokenServerResponse) (*iamTokenData, error) { + + if tokenResponse == nil { + return nil, fmt.Errorf("Error while trying to parse access token!") + } + // Compute the adjusted refresh time (expiration time - 20% of timeToLive) + timeToLive := tokenResponse.ExpiresIn + expireTime := tokenResponse.Expiration + refreshTime := expireTime - int64(float64(timeToLive)*0.2) + + tokenData := &iamTokenData{ + AccessToken: tokenResponse.AccessToken, + Expiration: expireTime, + RefreshTime: refreshTime, + } + + return tokenData, nil +} + +// isTokenValid: returns true iff the IamTokenData instance represents a valid (non-expired) access token. +func (this *iamTokenData) isTokenValid() bool { + if this.AccessToken != "" && GetCurrentTime() < this.Expiration { + return true + } + return false +} + +// needsRefresh: synchronously returns true iff the currently stored access token should be refreshed. This method also +// updates the refresh time if it determines the token needs refreshed to prevent other threads from +// making multiple refresh calls. +func (this *iamTokenData) needsRefresh() bool { + iamNeedsRefreshMutex.Lock() + defer iamNeedsRefreshMutex.Unlock() + + // Advance refresh by one minute + if this.RefreshTime >= 0 && GetCurrentTime() > this.RefreshTime { + this.RefreshTime += 60 + return true + } + + return false + +} diff --git a/vendor/github.com/IBM/go-sdk-core/v3/core/noauth_authenticator.go b/vendor/github.com/IBM/go-sdk-core/v3/core/noauth_authenticator.go new file mode 100644 index 00000000000..f77565a6acd --- /dev/null +++ b/vendor/github.com/IBM/go-sdk-core/v3/core/noauth_authenticator.go @@ -0,0 +1,41 @@ +package core + +// (C) Copyright IBM Corp. 2019. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "net/http" +) + +// NoAuthAuthenticator is simply a placeholder implementation of the Authenticator interface +// that performs no authentication. This might be useful in testing/debugging situations. +type NoAuthAuthenticator struct { +} + +func NewNoAuthAuthenticator() (*NoAuthAuthenticator, error) { + return &NoAuthAuthenticator{}, nil +} + +func (NoAuthAuthenticator) AuthenticationType() string { + return AUTHTYPE_NOAUTH +} + +func (NoAuthAuthenticator) Validate() error { + return nil +} + +func (this *NoAuthAuthenticator) Authenticate(request *http.Request) error { + // Nothing to do since we're not providing any authentication. + return nil +} diff --git a/vendor/github.com/IBM/go-sdk-core/v3/core/request_builder.go b/vendor/github.com/IBM/go-sdk-core/v3/core/request_builder.go new file mode 100644 index 00000000000..74704dbff40 --- /dev/null +++ b/vendor/github.com/IBM/go-sdk-core/v3/core/request_builder.go @@ -0,0 +1,294 @@ +package core + +// (C) Copyright IBM Corp. 2019. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "mime/multipart" + "net/http" + "net/textproto" + "net/url" + "os" + "path/filepath" + "reflect" + "strings" +) + +// common HTTP methods +const ( + POST = http.MethodPost + GET = http.MethodGet + DELETE = http.MethodDelete + PUT = http.MethodPut + PATCH = http.MethodPatch + HEAD = http.MethodHead +) + +// common headers +const ( + Accept = "Accept" + APPLICATION_JSON = "application/json" + CONTENT_DISPOSITION = "Content-Disposition" + CONTENT_TYPE = "Content-Type" + FORM_URL_ENCODED_HEADER = "application/x-www-form-urlencoded" + + ERRORMSG_SERVICE_URL_MISSING = "The service URL is required." + ERRORMSG_SERVICE_URL_INVALID = "There was an error parsing the service URL: %s" +) + +// FormData stores information for form data. +type FormData struct { + fileName string + contentType string + contents interface{} +} + +// RequestBuilder is used to build an HTTP Request instance. +type RequestBuilder struct { + Method string + URL *url.URL + Header http.Header + Body io.Reader + Query map[string][]string + Form map[string][]FormData +} + +// NewRequestBuilder initiates a new request. +func NewRequestBuilder(method string) *RequestBuilder { + return &RequestBuilder{ + Method: method, + Header: make(http.Header), + Query: make(map[string][]string), + Form: make(map[string][]FormData), + } +} + +// ConstructHTTPURL creates a properly-encoded URL with path parameters. +// This function returns an error if the serviceURL is "" or is an +// invalid URL string (e.g. ":"). +func (requestBuilder *RequestBuilder) ConstructHTTPURL(serviceURL string, pathSegments []string, pathParameters []string) (*RequestBuilder, error) { + if serviceURL == "" { + return requestBuilder, fmt.Errorf(ERRORMSG_SERVICE_URL_MISSING) + } + var URL *url.URL + + URL, err := url.Parse(serviceURL) + if err != nil { + return requestBuilder, fmt.Errorf(ERRORMSG_SERVICE_URL_INVALID, err.Error()) + } + + for i, pathSegment := range pathSegments { + if pathSegment != "" { + URL.Path += "/" + pathSegment + } + + if pathParameters != nil && i < len(pathParameters) { + URL.Path += "/" + pathParameters[i] + } + } + requestBuilder.URL = URL + return requestBuilder, nil +} + +// AddQuery adds a query parameter name and value to the request. +func (requestBuilder *RequestBuilder) AddQuery(name string, value string) *RequestBuilder { + requestBuilder.Query[name] = append(requestBuilder.Query[name], value) + return requestBuilder +} + +// AddHeader adds a header name and value to the request. +func (requestBuilder *RequestBuilder) AddHeader(name string, value string) *RequestBuilder { + requestBuilder.Header[name] = []string{value} + return requestBuilder +} + +// AddFormData adds a new mime part (constructed from the input parameters) +// to the request's multi-part form. +func (requestBuilder *RequestBuilder) AddFormData(fieldName string, fileName string, contentType string, + contents interface{}) *RequestBuilder { + if fileName == "" { + if file, ok := contents.(*os.File); ok { + if !((os.File{}) == *file) { // if file is not empty + name := filepath.Base(file.Name()) + fileName = name + } + } + } + requestBuilder.Form[fieldName] = append(requestBuilder.Form[fieldName], FormData{ + fileName: fileName, + contentType: contentType, + contents: contents, + }) + return requestBuilder +} + +// SetBodyContentJSON sets the body content from a JSON structure. +func (requestBuilder *RequestBuilder) SetBodyContentJSON(bodyContent interface{}) (*RequestBuilder, error) { + requestBuilder.Body = new(bytes.Buffer) + err := json.NewEncoder(requestBuilder.Body.(io.Writer)).Encode(bodyContent) + return requestBuilder, err +} + +// SetBodyContentString sets the body content from a string. +func (requestBuilder *RequestBuilder) SetBodyContentString(bodyContent string) (*RequestBuilder, error) { + requestBuilder.Body = strings.NewReader(bodyContent) + return requestBuilder, nil +} + +// SetBodyContentStream sets the body content from an io.Reader instance. +func (requestBuilder *RequestBuilder) SetBodyContentStream(bodyContent io.Reader) (*RequestBuilder, error) { + requestBuilder.Body = bodyContent + return requestBuilder, nil +} + +// CreateMultipartWriter initializes a new multipart writer. +func (requestBuilder *RequestBuilder) createMultipartWriter() *multipart.Writer { + buff := new(bytes.Buffer) + requestBuilder.Body = buff + return multipart.NewWriter(buff) +} + +// CreateFormFile is a convenience wrapper around CreatePart. It creates +// a new form-data header with the provided field name and file name and contentType. +func createFormFile(formWriter *multipart.Writer, fieldname string, filename string, contentType string) (io.Writer, error) { + h := make(textproto.MIMEHeader) + contentDisposition := fmt.Sprintf(`form-data; name="%s"`, fieldname) + if filename != "" { + contentDisposition += fmt.Sprintf(`; filename="%s"`, filename) + } + + h.Set(CONTENT_DISPOSITION, contentDisposition) + if contentType != "" { + h.Set(CONTENT_TYPE, contentType) + } + + return formWriter.CreatePart(h) +} + +// SetBodyContentForMultipart sets the body content for a part in a multi-part form. +func (requestBuilder *RequestBuilder) SetBodyContentForMultipart(contentType string, content interface{}, writer io.Writer) error { + var err error + if stream, ok := content.(io.Reader); ok { + _, err = io.Copy(writer, stream) + } else if stream, ok := content.(*io.ReadCloser); ok { + _, err = io.Copy(writer, *stream) + } else if IsJSONMimeType(contentType) || IsJSONPatchMimeType(contentType) { + err = json.NewEncoder(writer).Encode(content) + } else if str, ok := content.(string); ok { + _, err = writer.Write([]byte(str)) + } else if strPtr, ok := content.(*string); ok { + _, err = writer.Write([]byte(*strPtr)) + } else { + err = fmt.Errorf("Error: unable to determine the type of 'content' provided") + } + return err +} + +// Build builds an HTTP Request object from this RequestBuilder instance. +func (requestBuilder *RequestBuilder) Build() (*http.Request, error) { + // Create multipart form data + if len(requestBuilder.Form) > 0 { + // handle both application/x-www-form-urlencoded or multipart/form-data + contentType := requestBuilder.Header.Get(CONTENT_TYPE) + if contentType == FORM_URL_ENCODED_HEADER { + data := url.Values{} + for fieldName, l := range requestBuilder.Form { + for _, v := range l { + data.Add(fieldName, v.contents.(string)) + } + } + _, err := requestBuilder.SetBodyContentString(data.Encode()) + if err != nil { + return nil, err + } + } else { + formWriter := requestBuilder.createMultipartWriter() + for fieldName, l := range requestBuilder.Form { + for _, v := range l { + dataPartWriter, err := createFormFile(formWriter, fieldName, v.fileName, v.contentType) + if err != nil { + return nil, err + } + if err = requestBuilder.SetBodyContentForMultipart(v.contentType, + v.contents, dataPartWriter); err != nil { + return nil, err + } + } + } + + requestBuilder.AddHeader("Content-Type", formWriter.FormDataContentType()) + err := formWriter.Close() + if err != nil { + return nil, err + } + } + } + + // Create the request + req, err := http.NewRequest(requestBuilder.Method, requestBuilder.URL.String(), requestBuilder.Body) + if err != nil { + return nil, err + } + + // Headers + req.Header = requestBuilder.Header + + // Query + query := req.URL.Query() + for k, l := range requestBuilder.Query { + for _, v := range l { + query.Add(k, v) + } + } + // Encode query + req.URL.RawQuery = query.Encode() + + return req, nil +} + +// SetBodyContent sets the body content from one of three different sources. +func (requestBuilder *RequestBuilder) SetBodyContent(contentType string, jsonContent interface{}, jsonPatchContent interface{}, + nonJSONContent interface{}) (builder *RequestBuilder, err error) { + if jsonContent != nil { + builder, err = requestBuilder.SetBodyContentJSON(jsonContent) + if err != nil { + return + } + } else if jsonPatchContent != nil { + builder, err = requestBuilder.SetBodyContentJSON(jsonPatchContent) + if err != nil { + return + } + } else { + // Set the non-JSON body content based on the type of value passed in, + // which should be a "string", "*string" or an "io.Reader" + if str, ok := nonJSONContent.(string); ok { + builder, err = requestBuilder.SetBodyContentString(str) + } else if strPtr, ok := nonJSONContent.(*string); ok { + builder, err = requestBuilder.SetBodyContentString(*strPtr) + } else if stream, ok := nonJSONContent.(io.Reader); ok { + builder, err = requestBuilder.SetBodyContentStream(stream) + } else if stream, ok := nonJSONContent.(*io.ReadCloser); ok { + builder, err = requestBuilder.SetBodyContentStream(*stream) + } else { + builder = requestBuilder + err = fmt.Errorf("Invalid type for non-JSON body content: %s", reflect.TypeOf(nonJSONContent).String()) + } + } + return +} diff --git a/vendor/github.com/IBM/go-sdk-core/v3/core/unmarshal.go b/vendor/github.com/IBM/go-sdk-core/v3/core/unmarshal.go new file mode 100644 index 00000000000..4e45433bbb0 --- /dev/null +++ b/vendor/github.com/IBM/go-sdk-core/v3/core/unmarshal.go @@ -0,0 +1,1137 @@ +/** + * (C) Copyright IBM Corp. 2020. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package core + +import ( + "encoding/base64" + "fmt" + "github.com/go-openapi/strfmt" + "reflect" +) + +// +// This file contains a set of methods that are used by generated code to unmarshal common data types. +// Specifically, these methods are designed to fit within the unmarshalling framework implemented by the Go generator. +// For user-defined models, the Go generator will emit code that first unmarshals the operation response body into a +// generic map (or a slice of maps for a response which is a JSON array), then it invokes the appropriate generated +// "Unmarshal()" method to transform the map into an instance of the model (struct). The generated +// "Unmarshal()" methods use the methods in this file to handle primitive and other common data types. +// +// In the methods below that deal with numeric values, they first cast the data value to a "float64" value, then +// eventually transform the float64 value into the appropriate numeric type (int64, float32, etc.). +// This is done because the json unmarshaller unmarshals all JSON numbers into float64 values within the generic +// map. +// +// For each data type, there are two methods - Unmarshal() and UnmarshalSlice(). +// +// Each Unmarshal() method will retrieve the specified property from the generic map, and then return +// an appropriate value that can then be assigned to a field within a generated struct of that type. +// For example, UnmarshalString() will return a pointer to the string value from the generic map. UnmarshalInt64() +// will return a pointer to the int64 value, etc. +// +// Each UnmarshalSlice() method will retrieve the specified property from the generic map and interpret it as +// a slice of . Within these methods, we first cast the value retrieved from the map as a slice of interface{} +// (i.e. a slice of anything), then we walk through the slice and cast each element to the appropriate type. +// Some of the Unmarshal() and UnmarshalSlice() methods perform an additional transformation on the +// data values retrieved from the generic map. For example, the UnmarshalByteArray() method will interpret the +// JSON value as a string, then perform a base64-decoding of the string to produce the resulting byte array ([]byte). +// + +// Error messages constants. +const ( + errorPropertyValue = "property '%s' value error: %s" + errorPropValueType = "the '%s' property value should be a %s but was %s" + errorValueType = "value should be a %s but was %s" + errorNotAnArray = "the '%s' property value should be an array but was %s" + errorNotAMap = "value should be a map[string]%s but was %s" + errorElementValueType = "the '%s' property array element should be a %s but was %s" + errorDecodeBase64 = "error decoding base64-encoded string value '%s': %s" + errorDecodeDate = "error decoding Date value '%s': %s" + errorDecodeDateTime = "error decoding DateTime value '%s': %s" +) + +// CopyMap returns a shallow copy of the specified map. +// The returned map contains a copy of the mappings found in the original map, +// but uses values from the original map. +func CopyMap(m map[string]interface{}) map[string]interface{} { + newMap := make(map[string]interface{}) + for k, v := range m { + newMap[k] = v + } + return newMap +} + +// UnmarshalString retrieves the specified property from the map and returns it as a string +// or nil if the property wasn't found in the map. +func UnmarshalString(m map[string]interface{}, propertyName string) (result *string, err error) { + var v interface{} + v, foundIt := m[propertyName] + if foundIt && v != nil { + // Interpret the map value as a string. + s, ok := v.(string) + if ok { + result = StringPtr(s) + } else { + err = fmt.Errorf(errorPropValueType, propertyName, "string", reflect.TypeOf(v).String()) + } + } + return +} + +// UnmarshalStringSlice retrieves the specified property from the map and returns it as a string slice +// or nil if the property wasn't found in the map. +func UnmarshalStringSlice(m map[string]interface{}, propertyName string) (slice []string, err error) { + var v interface{} + v, foundIt := m[propertyName] + if foundIt && v != nil { + // Interpret the map value as a slice of anything. + vSlice, ok := v.([]interface{}) + if !ok { + err = fmt.Errorf(errorNotAnArray, propertyName, reflect.TypeOf(v).String()) + return + } + for _, element := range vSlice { + // Interpret each slice element as a string and add it to the result slice. + s, ok := element.(string) + if !ok { + err = fmt.Errorf(errorElementValueType, propertyName, "string", reflect.TypeOf(element).String()) + return + } + slice = append(slice, s) + } + } + return +} + +// assertMapOfString will return value "v" as a map[string]string. +func assertMapOfString(v interface{}) (result map[string]string, err error) { + m, ok := v.(map[string]interface{}) + if !ok { + err = fmt.Errorf(errorNotAMap, "string", reflect.TypeOf(v).String()) + return + } + + r := make(map[string]string) + for key, value := range m { + newValue, ok := value.(string) + if ok { + r[key] = newValue + } else { + err = fmt.Errorf(errorValueType, "string", reflect.TypeOf(value).String()) + return + } + } + result = r + return +} + +// UnmarshalStringMap retrieves the specified property from the map "m" and returns it +// as a map[string]string, or nil if the property wasn't found in the map. +func UnmarshalStringMap(m map[string]interface{}, propertyName string) (result map[string]string, err error) { + var v interface{} + v, foundIt := m[propertyName] + if foundIt && v != nil { + result, err = assertMapOfString(v) + if err != nil { + err = fmt.Errorf(errorPropertyValue, propertyName, err.Error()) + } + } + return +} + +// UnmarshalStringMapSlice retrieves the specified property from the map "m" and returns it +// as a []map[string]string, or nil if the property wasn't found in the map. +func UnmarshalStringMapSlice(m map[string]interface{}, propertyName string) (slice []map[string]string, err error) { + var v interface{} + v, foundIt := m[propertyName] + if foundIt && v != nil { + vSlice, ok := v.([]interface{}) + if !ok { + err = fmt.Errorf(errorNotAnArray, propertyName, reflect.TypeOf(v).String()) + return + } + for _, element := range vSlice { + elementAsMap, localErr := assertMapOfString(element) + if localErr != nil { + err = fmt.Errorf(errorPropertyValue, propertyName, localErr.Error()) + return + } + slice = append(slice, elementAsMap) + } + } + return +} + +// decodeBase64String will convert "v" to a string via type assertion, then base64-decode it into a []byte. +func decodeBase64String(v interface{}) (result *[]byte, err error) { + // Interpret the value as a string. + s, ok := v.(string) + if ok { + // Decode the string into a byte array. + ba, localErr := base64.StdEncoding.DecodeString(s) + if localErr != nil { + err = fmt.Errorf(errorDecodeBase64, truncateString(s, 16), localErr.Error()) + return + } else { + result = &ba + } + } else { + err = fmt.Errorf(errorValueType, "base64-encoded string", reflect.TypeOf(v).String()) + } + return +} + +// UnmarshalByteArray retrieves the specified property from the map and returns it as a byte array +// or nil if the property wasn't found in the map. +func UnmarshalByteArray(m map[string]interface{}, propertyName string) (result *[]byte, err error) { + var v interface{} + v, foundIt := m[propertyName] + if foundIt && v != nil { + ba, localErr := decodeBase64String(v) + if localErr != nil { + err = fmt.Errorf(errorPropertyValue, propertyName, localErr.Error()) + return + } + result = ba + } + return +} + +// UnmarshalByteArraySlice retrieves the specified property from the map and returns it as a byte array slice +// or nil if the property wasn't found in the map. +func UnmarshalByteArraySlice(m map[string]interface{}, propertyName string) (slice [][]byte, err error) { + var v interface{} + v, foundIt := m[propertyName] + if foundIt && v != nil { + // Interpret the map value as a slice of anything. + vSlice, ok := v.([]interface{}) + if ok { + for _, element := range vSlice { + ba, localErr := decodeBase64String(element) + if localErr != nil { + err = fmt.Errorf(errorPropertyValue, propertyName, localErr.Error()) + return + } else { + slice = append(slice, *ba) + } + } + } else { + err = fmt.Errorf(errorNotAnArray, propertyName, reflect.TypeOf(v).String()) + } + } + return +} + +// assertMapOfByteArray will return value "v" as a map[string][]byte. +func assertMapOfByteArray(v interface{}) (result map[string][]byte, err error) { + m, ok := v.(map[string]interface{}) + if !ok { + err = fmt.Errorf(errorNotAMap, "string", reflect.TypeOf(v).String()) + return + } + + r := make(map[string][]byte) + for key, value := range m { + ba, localErr := decodeBase64String(value) + if localErr != nil { + err = localErr + return + } else { + r[key] = *ba + } + } + result = r + return +} + +// UnmarshalByteArrayMap retrieves the specified property from the map "m" and returns it +// as a map[string][]byte, or nil if the property wasn't found in the map. +func UnmarshalByteArrayMap(m map[string]interface{}, propertyName string) (result map[string][]byte, err error) { + var v interface{} + v, foundIt := m[propertyName] + if foundIt && v != nil { + result, err = assertMapOfByteArray(v) + if err != nil { + err = fmt.Errorf(errorPropertyValue, propertyName, err.Error()) + result = nil + } + } + return +} + +// UnmarshalByteArrayMapSlice retrieves the specified property from the map "m" and returns it +// as a []map[string][]byte, or nil if the property wasn't found in the map. +func UnmarshalByteArrayMapSlice(m map[string]interface{}, propertyName string) (slice []map[string][]byte, err error) { + var v interface{} + v, foundIt := m[propertyName] + if foundIt && v != nil { + vSlice, ok := v.([]interface{}) + if !ok { + err = fmt.Errorf(errorNotAnArray, propertyName, reflect.TypeOf(v).String()) + return + } + for _, element := range vSlice { + elementAsMap, localErr := assertMapOfByteArray(element) + if localErr != nil { + err = fmt.Errorf(errorPropertyValue, propertyName, localErr.Error()) + return + } + slice = append(slice, elementAsMap) + } + } + return +} + +// UnmarshalBool retrieves the specified property from the map and returns it as a bool +// or nil if the property wasn't found in the map. +func UnmarshalBool(m map[string]interface{}, propertyName string) (result *bool, err error) { + var v interface{} + v, foundIt := m[propertyName] + if foundIt && v != nil { + // Interpret the map value as a bool. + b, ok := v.(bool) + if ok { + result = BoolPtr(b) + } else { + err = fmt.Errorf(errorPropValueType, propertyName, "boolean", reflect.TypeOf(v).String()) + } + } + return +} + +// UnmarshalBoolSlice retrieves the specified property from the map and returns it as a bool slice +// or nil if the property wasn't found in the map. +func UnmarshalBoolSlice(m map[string]interface{}, propertyName string) (slice []bool, err error) { + var v interface{} + v, foundIt := m[propertyName] + if foundIt && v != nil { + // Interpret the map value as a slice of anything. + vSlice, ok := v.([]interface{}) + if !ok { + err = fmt.Errorf(errorNotAnArray, propertyName, reflect.TypeOf(v).String()) + return + } + for _, element := range vSlice { + // Interpret each slice element as a bool and add it to the result slice. + b, ok := element.(bool) + if !ok { + err = fmt.Errorf(errorElementValueType, propertyName, "boolean", reflect.TypeOf(element).String()) + return + } + slice = append(slice, b) + } + } + return +} + +// assertMapOfBool will return value "v" as a map[string]bool. +func assertMapOfBool(v interface{}) (result map[string]bool, err error) { + m, ok := v.(map[string]interface{}) + if !ok { + err = fmt.Errorf(errorNotAMap, "bool", reflect.TypeOf(v).String()) + return + } + + r := make(map[string]bool) + for key, value := range m { + newValue, ok := value.(bool) + if ok { + r[key] = newValue + } else { + err = fmt.Errorf(errorValueType, "bool", reflect.TypeOf(value).String()) + return + } + } + result = r + return +} + +// UnmarshalBoolMap retrieves the specified property from the map "m" and returns it +// as a map[string]bool, or nil if the property wasn't found in the map. +func UnmarshalBoolMap(m map[string]interface{}, propertyName string) (result map[string]bool, err error) { + var v interface{} + v, foundIt := m[propertyName] + if foundIt && v != nil { + result, err = assertMapOfBool(v) + if err != nil { + err = fmt.Errorf(errorPropertyValue, propertyName, err.Error()) + } + } + return +} + +// UnmarshalBoolMapSlice retrieves the specified property from the map "m" and returns it +// as a []map[string]bool, or nil if the property wasn't found in the map. +func UnmarshalBoolMapSlice(m map[string]interface{}, propertyName string) (slice []map[string]bool, err error) { + var v interface{} + v, foundIt := m[propertyName] + if foundIt && v != nil { + vSlice, ok := v.([]interface{}) + if !ok { + err = fmt.Errorf(errorNotAnArray, propertyName, reflect.TypeOf(v).String()) + return + } + for _, element := range vSlice { + elementAsMap, localErr := assertMapOfBool(element) + if localErr != nil { + err = fmt.Errorf(errorPropertyValue, propertyName, localErr.Error()) + return + } + slice = append(slice, elementAsMap) + } + } + return +} + +// UnmarshalInt64 retrieves the specified property from the map and returns it as an int64 +// or nil if the property wasn't found in the map. +func UnmarshalInt64(m map[string]interface{}, propertyName string) (result *int64, err error) { + var v interface{} + v, foundIt := m[propertyName] + if foundIt && v != nil { + // Interpret the map value as a float64 to match the behavior of the JSON unmarshaller. + f, ok := v.(float64) + if ok { + // Convert the value to an int64 and return a pointer to it. + result = Int64Ptr(int64(f)) + } else { + err = fmt.Errorf(errorPropValueType, propertyName, "integer", reflect.TypeOf(v).String()) + } + } + return +} + +// UnmarshalInt64Slice retrieves the specified property from the map and returns it as an int64 slice +// or nil if the property wasn't found in the map. +func UnmarshalInt64Slice(m map[string]interface{}, propertyName string) (slice []int64, err error) { + var v interface{} + v, foundIt := m[propertyName] + if foundIt && v != nil { + // Interpret the map value as a slice of anything. + vSlice, ok := v.([]interface{}) + if !ok { + err = fmt.Errorf(errorNotAnArray, propertyName, reflect.TypeOf(v).String()) + return + } + for _, element := range vSlice { + // Interpret each slice element as a float64, then convert it to an int64 and add it to the result slice. + f, ok := element.(float64) + if !ok { + err = fmt.Errorf(errorElementValueType, propertyName, "integer", reflect.TypeOf(element).String()) + return + } + slice = append(slice, int64(f)) + } + } + return +} + +// assertMapOfInt64 will return value "v" as a map[string]int64. +func assertMapOfInt64(v interface{}) (result map[string]int64, err error) { + m, ok := v.(map[string]interface{}) + if !ok { + err = fmt.Errorf(errorNotAMap, "int64", reflect.TypeOf(v).String()) + return + } + + r := make(map[string]int64) + for key, value := range m { + newValue, ok := value.(float64) + if ok { + r[key] = int64(newValue) + } else { + err = fmt.Errorf(errorValueType, "int64", reflect.TypeOf(value).String()) + return + } + } + result = r + return +} + +// UnmarshalInt64Map retrieves the specified property from the map "m" and returns it +// as a map[string]int64, or nil if the property wasn't found in the map. +func UnmarshalInt64Map(m map[string]interface{}, propertyName string) (result map[string]int64, err error) { + var v interface{} + v, foundIt := m[propertyName] + if foundIt && v != nil { + result, err = assertMapOfInt64(v) + if err != nil { + err = fmt.Errorf(errorPropertyValue, propertyName, err.Error()) + } + } + return +} + +// UnmarshalInt64MapSlice retrieves the specified property from the map "m" and returns it +// as a []map[string]int64, or nil if the property wasn't found in the map. +func UnmarshalInt64MapSlice(m map[string]interface{}, propertyName string) (slice []map[string]int64, err error) { + var v interface{} + v, foundIt := m[propertyName] + if foundIt && v != nil { + vSlice, ok := v.([]interface{}) + if !ok { + err = fmt.Errorf(errorNotAnArray, propertyName, reflect.TypeOf(v).String()) + return + } + for _, element := range vSlice { + elementAsMap, localErr := assertMapOfInt64(element) + if localErr != nil { + err = fmt.Errorf(errorPropertyValue, propertyName, localErr.Error()) + return + } + slice = append(slice, elementAsMap) + } + } + return +} + +// UnmarshalFloat32 retrieves the specified property from the map and returns it as a float32 +// or nil if the property wasn't found in the map. +func UnmarshalFloat32(m map[string]interface{}, propertyName string) (result *float32, err error) { + var v interface{} + v, foundIt := m[propertyName] + if foundIt && v != nil { + // Interpret the map value as a float64 to match the behavior of the JSON unmarshaller. + f, ok := v.(float64) + if ok { + // Convert the value to a float32 and return a pointer to it. + result = Float32Ptr(float32(f)) + } else { + err = fmt.Errorf(errorPropValueType, propertyName, "float32", reflect.TypeOf(v).String()) + } + } + return +} + +// UnmarshalFloat32Slice retrieves the specified property from the map and returns it as a float32 slice +// or nil if the property wasn't found in the map. +func UnmarshalFloat32Slice(m map[string]interface{}, propertyName string) (slice []float32, err error) { + var v interface{} + v, foundIt := m[propertyName] + if foundIt && v != nil { + // Interpret the map value as a slice of anything. + vSlice, ok := v.([]interface{}) + if !ok { + err = fmt.Errorf(errorNotAnArray, propertyName, reflect.TypeOf(v).String()) + return + } + for _, element := range vSlice { + // Interpret each slice element as a float64, then convert it to a float32 and add it to the result slice. + f, ok := element.(float64) + if !ok { + err = fmt.Errorf(errorElementValueType, propertyName, "float32", reflect.TypeOf(element).String()) + return + } + slice = append(slice, float32(f)) + } + } + return +} + +// assertMapOfFloat32 will return value "v" as a map[string]float32. +func assertMapOfFloat32(v interface{}) (result map[string]float32, err error) { + m, ok := v.(map[string]interface{}) + if !ok { + err = fmt.Errorf(errorNotAMap, "float32", reflect.TypeOf(v).String()) + return + } + + r := make(map[string]float32) + for key, value := range m { + newValue, ok := value.(float64) + if ok { + r[key] = float32(newValue) + } else { + err = fmt.Errorf(errorValueType, "float32", reflect.TypeOf(value).String()) + return + } + } + result = r + return +} + +// UnmarshalFloat32Map retrieves the specified property from the map "m" and returns it +// as a map[string]float32, or nil if the property wasn't found in the map. +func UnmarshalFloat32Map(m map[string]interface{}, propertyName string) (result map[string]float32, err error) { + var v interface{} + v, foundIt := m[propertyName] + if foundIt && v != nil { + result, err = assertMapOfFloat32(v) + if err != nil { + err = fmt.Errorf(errorPropertyValue, propertyName, err.Error()) + } + } + return +} + +// UnmarshalFloat32MapSlice retrieves the specified property from the map "m" and returns it +// as a []map[string]float32, or nil if the property wasn't found in the map. +func UnmarshalFloat32MapSlice(m map[string]interface{}, propertyName string) (slice []map[string]float32, err error) { + var v interface{} + v, foundIt := m[propertyName] + if foundIt && v != nil { + vSlice, ok := v.([]interface{}) + if !ok { + err = fmt.Errorf(errorNotAnArray, propertyName, reflect.TypeOf(v).String()) + return + } + for _, element := range vSlice { + elementAsMap, localErr := assertMapOfFloat32(element) + if localErr != nil { + err = fmt.Errorf(errorPropertyValue, propertyName, localErr.Error()) + return + } + slice = append(slice, elementAsMap) + } + } + return +} + +// UnmarshalFloat64 retrieves the specified property from the map and returns it as a float64 +// or nil if the property wasn't found in the map. +func UnmarshalFloat64(m map[string]interface{}, propertyName string) (result *float64, err error) { + var v interface{} + v, foundIt := m[propertyName] + if foundIt && v != nil { + // Interpret the map value as a float64. + f, ok := v.(float64) + if ok { + result = Float64Ptr(f) + } else { + err = fmt.Errorf(errorPropValueType, propertyName, "float64", reflect.TypeOf(v).String()) + } + } + return +} + +// UnmarshalFloat64Slice retrieves the specified property from the map and returns it as a float64 slice +// or nil if the property wasn't found in the map. +func UnmarshalFloat64Slice(m map[string]interface{}, propertyName string) (slice []float64, err error) { + var v interface{} + v, foundIt := m[propertyName] + if foundIt && v != nil { + // Interpret the map value as a slice of anything. + vSlice, ok := v.([]interface{}) + if !ok { + err = fmt.Errorf(errorNotAnArray, propertyName, reflect.TypeOf(v).String()) + return + } + for _, element := range vSlice { + // Interpret each slice element as a float64 and add it to the result slice. + f, ok := element.(float64) + if !ok { + err = fmt.Errorf(errorElementValueType, propertyName, "float64", reflect.TypeOf(element).String()) + return + } + slice = append(slice, f) + } + } + return +} + +// assertMapOfFloat64 will return value "v" as a map[string]float64. +func assertMapOfFloat64(v interface{}) (result map[string]float64, err error) { + m, ok := v.(map[string]interface{}) + if !ok { + err = fmt.Errorf(errorNotAMap, "float64", reflect.TypeOf(v).String()) + return + } + + r := make(map[string]float64) + for key, value := range m { + newValue, ok := value.(float64) + if ok { + r[key] = newValue + } else { + err = fmt.Errorf(errorValueType, "float64", reflect.TypeOf(value).String()) + return + } + } + result = r + return +} + +// UnmarshalFloat64Map retrieves the specified property from the map "m" and returns it +// as a map[string]float64, or nil if the property wasn't found in the map. +func UnmarshalFloat64Map(m map[string]interface{}, propertyName string) (result map[string]float64, err error) { + var v interface{} + v, foundIt := m[propertyName] + if foundIt && v != nil { + result, err = assertMapOfFloat64(v) + if err != nil { + err = fmt.Errorf(errorPropertyValue, propertyName, err.Error()) + } + } + return +} + +// UnmarshalFloat64MapSlice retrieves the specified property from the map "m" and returns it +// as a []map[string]float64, or nil if the property wasn't found in the map. +func UnmarshalFloat64MapSlice(m map[string]interface{}, propertyName string) (slice []map[string]float64, err error) { + var v interface{} + v, foundIt := m[propertyName] + if foundIt && v != nil { + vSlice, ok := v.([]interface{}) + if !ok { + err = fmt.Errorf(errorNotAnArray, propertyName, reflect.TypeOf(v).String()) + return + } + for _, element := range vSlice { + elementAsMap, localErr := assertMapOfFloat64(element) + if localErr != nil { + err = fmt.Errorf(errorPropertyValue, propertyName, localErr.Error()) + return + } + slice = append(slice, elementAsMap) + } + } + return +} + +// UnmarshalUUID retrieves the specified property from the map and returns it as a UUID +// or nil if the property wasn't found in the map. +func UnmarshalUUID(m map[string]interface{}, propertyName string) (result *strfmt.UUID, err error) { + var v interface{} + v, foundIt := m[propertyName] + if foundIt && v != nil { + // Interpret the map value as a string. + s, ok := v.(string) + if ok { + // Convert the string to a UUID. + uuid := strfmt.UUID(s) + result = &uuid + } else { + err = fmt.Errorf(errorPropValueType, propertyName, "UUID", reflect.TypeOf(v).String()) + } + } + return +} + +// UnmarshalUUIDSlice retrieves the specified property from the map and returns it as a UUID slice +// or nil if the property wasn't found in the map. +func UnmarshalUUIDSlice(m map[string]interface{}, propertyName string) (slice []strfmt.UUID, err error) { + var v interface{} + v, foundIt := m[propertyName] + if foundIt && v != nil { + // Interpret the map value as a slice of anything. + vSlice, ok := v.([]interface{}) + if !ok { + err = fmt.Errorf(errorNotAnArray, propertyName, reflect.TypeOf(v).String()) + return + } + for _, element := range vSlice { + // Interpret each slice element as a string, then convert it to a UUID value and add it to the result slice. + s, ok := element.(string) + if !ok { + err = fmt.Errorf(errorElementValueType, propertyName, "UUID", reflect.TypeOf(element).String()) + return + } + slice = append(slice, strfmt.UUID(s)) + } + } + return +} + +// assertMapOfUUID will return value "v" as a map[string]strfmt.UUID. +func assertMapOfUUID(v interface{}) (result map[string]strfmt.UUID, err error) { + m, ok := v.(map[string]interface{}) + if !ok { + err = fmt.Errorf(errorNotAMap, "UUID", reflect.TypeOf(v).String()) + return + } + + r := make(map[string]strfmt.UUID) + for key, value := range m { + newValue, ok := value.(string) + if ok { + r[key] = strfmt.UUID(newValue) + } else { + err = fmt.Errorf(errorValueType, "UUID", reflect.TypeOf(value).String()) + return + } + } + result = r + return +} + +// UnmarshalUUIDMap retrieves the specified property from the map "m" and returns it +// as a map[string]strfmt.UUID, or nil if the property wasn't found in the map. +func UnmarshalUUIDMap(m map[string]interface{}, propertyName string) (result map[string]strfmt.UUID, err error) { + var v interface{} + v, foundIt := m[propertyName] + if foundIt && v != nil { + result, err = assertMapOfUUID(v) + if err != nil { + err = fmt.Errorf(errorPropertyValue, propertyName, err.Error()) + } + } + return +} + +// UnmarshalUUIDMapSlice retrieves the specified property from the map "m" and returns it +// as a []map[string]strfmt.UUID, or nil if the property wasn't found in the map. +func UnmarshalUUIDMapSlice(m map[string]interface{}, propertyName string) (slice []map[string]strfmt.UUID, err error) { + var v interface{} + v, foundIt := m[propertyName] + if foundIt && v != nil { + vSlice, ok := v.([]interface{}) + if !ok { + err = fmt.Errorf(errorNotAnArray, propertyName, reflect.TypeOf(v).String()) + return + } + for _, element := range vSlice { + elementAsMap, localErr := assertMapOfUUID(element) + if localErr != nil { + err = fmt.Errorf(errorPropertyValue, propertyName, localErr.Error()) + return + } + slice = append(slice, elementAsMap) + } + } + return +} + +// decodeDate will convert "v" to a string via type assertion, then decode that +// into a strfmt.Date value. +func decodeDate(v interface{}) (result *strfmt.Date, err error) { + // Interpret the value as a string. + s, ok := v.(string) + if ok { + // Convert the string to a Date value. + var date strfmt.Date + localErr := date.UnmarshalText([]byte(s)) + if localErr != nil { + err = fmt.Errorf(errorDecodeDate, truncateString(s, 16), localErr.Error()) + } else { + result = &date + } + } else { + err = fmt.Errorf(errorValueType, "Date", reflect.TypeOf(v).String()) + } + return +} + +// UnmarshalDate retrieves the specified property from the map and returns it as a Date +// or nil if the property wasn't found in the map. +func UnmarshalDate(m map[string]interface{}, propertyName string) (result *strfmt.Date, err error) { + var v interface{} + v, foundIt := m[propertyName] + if foundIt && v != nil { + date, localErr := decodeDate(v) + if localErr != nil { + err = fmt.Errorf(errorPropertyValue, propertyName, localErr.Error()) + } else { + result = date + } + } + return +} + +// UnmarshalDateSlice retrieves the specified property from the map and returns it as a Date slice +// or nil if the property wasn't found in the map. +func UnmarshalDateSlice(m map[string]interface{}, propertyName string) (slice []strfmt.Date, err error) { + var v interface{} + v, foundIt := m[propertyName] + if foundIt && v != nil { + // Interpret the map value as a slice of anything. + vSlice, ok := v.([]interface{}) + if !ok { + err = fmt.Errorf(errorNotAnArray, propertyName, reflect.TypeOf(v).String()) + return + } + for _, element := range vSlice { + date, localErr := decodeDate(element) + if localErr != nil { + err = fmt.Errorf(errorPropertyValue, propertyName, localErr.Error()) + } else { + slice = append(slice, *date) + } + } + } + return +} + +// assertMapOfDate will return value "v" as a map[string]strfmt.Date. +func assertMapOfDate(v interface{}) (result map[string]strfmt.Date, err error) { + m, ok := v.(map[string]interface{}) + if !ok { + err = fmt.Errorf(errorNotAMap, "Date", reflect.TypeOf(v).String()) + return + } + + r := make(map[string]strfmt.Date) + for key, value := range m { + date, localErr := decodeDate(value) + if localErr != nil { + err = localErr + return + } else { + r[key] = *date + } + } + result = r + return +} + +// UnmarshalDateMap retrieves the specified property from the map "m" and returns it +// as a map[string]strfmt.Date, or nil if the property wasn't found in the map. +func UnmarshalDateMap(m map[string]interface{}, propertyName string) (result map[string]strfmt.Date, err error) { + var v interface{} + v, foundIt := m[propertyName] + if foundIt && v != nil { + result, err = assertMapOfDate(v) + if err != nil { + err = fmt.Errorf(errorPropertyValue, propertyName, err.Error()) + } + } + return +} + +// UnmarshalDateMapSlice retrieves the specified property from the map "m" and returns it +// as a []map[string]strfmt.Date, or nil if the property wasn't found in the map. +func UnmarshalDateMapSlice(m map[string]interface{}, propertyName string) (slice []map[string]strfmt.Date, err error) { + var v interface{} + v, foundIt := m[propertyName] + if foundIt && v != nil { + vSlice, ok := v.([]interface{}) + if !ok { + err = fmt.Errorf(errorNotAnArray, propertyName, reflect.TypeOf(v).String()) + return + } + for _, element := range vSlice { + elementAsMap, localErr := assertMapOfDate(element) + if localErr != nil { + err = fmt.Errorf(errorPropertyValue, propertyName, localErr.Error()) + return + } + slice = append(slice, elementAsMap) + } + } + return +} + +// decodeDateTime will convert "v" to a string via type assertion, then decode that +// into a strfmt.DateTime value. +func decodeDateTime(v interface{}) (result *strfmt.DateTime, err error) { + // Interpret the value as a string. + s, ok := v.(string) + if ok { + // Convert the string to a DateTime value. + var date strfmt.DateTime + localErr := date.UnmarshalText([]byte(s)) + if localErr != nil { + err = fmt.Errorf(errorDecodeDateTime, truncateString(s, 16), localErr.Error()) + } else { + result = &date + } + } else { + err = fmt.Errorf(errorValueType, "DateTime", reflect.TypeOf(v).String()) + } + return +} + +// UnmarshalDateTime retrieves the specified property from the map and returns it as a DateTime +// or nil if the property wasn't found in the map. +func UnmarshalDateTime(m map[string]interface{}, propertyName string) (result *strfmt.DateTime, err error) { + var v interface{} + v, foundIt := m[propertyName] + if foundIt && v != nil { + date, localErr := decodeDateTime(v) + if localErr != nil { + err = fmt.Errorf(errorPropertyValue, propertyName, localErr.Error()) + } else { + result = date + } + } + return +} + +// UnmarshalDateTimeSlice retrieves the specified property from the map and returns it as a DateTime slice +// or nil if the property wasn't found in the map. +func UnmarshalDateTimeSlice(m map[string]interface{}, propertyName string) (slice []strfmt.DateTime, err error) { + var v interface{} + v, foundIt := m[propertyName] + if foundIt && v != nil { + // Interpret the map value as a slice of anything. + vSlice, ok := v.([]interface{}) + if !ok { + err = fmt.Errorf(errorNotAnArray, propertyName, reflect.TypeOf(v).String()) + return + } + for _, element := range vSlice { + datetime, localErr := decodeDateTime(element) + if localErr != nil { + err = fmt.Errorf(errorPropertyValue, propertyName, localErr.Error()) + } else { + slice = append(slice, *datetime) + } + } + } + return +} + +// assertMapOfDateTime will return value "v" as a map[string]strfmt.DateTime. +func assertMapOfDateTime(v interface{}) (result map[string]strfmt.DateTime, err error) { + m, ok := v.(map[string]interface{}) + if !ok { + err = fmt.Errorf(errorNotAMap, "DateTime", reflect.TypeOf(v).String()) + return + } + + r := make(map[string]strfmt.DateTime) + for key, value := range m { + datetime, localErr := decodeDateTime(value) + if localErr != nil { + err = localErr + return + } else { + r[key] = *datetime + } + } + result = r + return +} + +// UnmarshalDateTimeMap retrieves the specified property from the map "m" and returns it +// as a map[string]strfmt.DateTime, or nil if the property wasn't found in the map. +func UnmarshalDateTimeMap(m map[string]interface{}, propertyName string) (result map[string]strfmt.DateTime, err error) { + var v interface{} + v, foundIt := m[propertyName] + if foundIt && v != nil { + result, err = assertMapOfDateTime(v) + if err != nil { + err = fmt.Errorf(errorPropertyValue, propertyName, err.Error()) + } + } + return +} + +// UnmarshalDateTimeMapSlice retrieves the specified property from the map "m" and returns it +// as a []map[string]strfmt.DateTime, or nil if the property wasn't found in the map. +func UnmarshalDateTimeMapSlice(m map[string]interface{}, propertyName string) (slice []map[string]strfmt.DateTime, err error) { + var v interface{} + v, foundIt := m[propertyName] + if foundIt && v != nil { + vSlice, ok := v.([]interface{}) + if !ok { + err = fmt.Errorf(errorNotAnArray, propertyName, reflect.TypeOf(v).String()) + return + } + for _, element := range vSlice { + elementAsMap, localErr := assertMapOfDateTime(element) + if localErr != nil { + err = fmt.Errorf(errorPropertyValue, propertyName, localErr.Error()) + return + } + slice = append(slice, elementAsMap) + } + } + return +} + +// UnmarshalObject retrieves the specified property from the map and returns it as a generic +// object (i.e. map[string]interface{}), or nil if the property wasn't found in the map. +func UnmarshalObject(m map[string]interface{}, propertyName string) (result map[string]interface{}, err error) { + var v interface{} + v, foundIt := m[propertyName] + if foundIt && v != nil { + // Interpret the map value as a generic map containing a JSON object. + jsonMap, ok := v.(map[string]interface{}) + if ok { + result = jsonMap + } else { + err = fmt.Errorf(errorPropValueType, propertyName, "JSON object", reflect.TypeOf(v).String()) + } + } + return +} + +// UnmarshalObjectSlice retrieves the specified property from the map and returns it as a slice of +// generic objects (i.e. []map[string]interface{}), or nil if the property wasn't found in the map. +func UnmarshalObjectSlice(m map[string]interface{}, propertyName string) (slice []map[string]interface{}, err error) { + v, foundIt := m[propertyName] + if foundIt && v != nil { + // Interpret the map value as a slice of anything. + vSlice, ok := v.([]interface{}) + if !ok { + err = fmt.Errorf(errorNotAnArray, propertyName, reflect.TypeOf(v).String()) + return + } + for _, element := range vSlice { + // Interpret each slice element as a map and then add it to the result slice. + jsonMap, ok := element.(map[string]interface{}) + if !ok { + err = fmt.Errorf(errorElementValueType, propertyName, "JSON object", reflect.TypeOf(element).String()) + return + } + slice = append(slice, jsonMap) + } + } + return +} + +// UnmarshalAny retrieves the specified property from the map and returns it as a generic +// value (i.e. interface{}), or nil if the property wasn't found in the map. +func UnmarshalAny(m map[string]interface{}, propertyName string) (result interface{}, err error) { + var v interface{} + v, foundIt := m[propertyName] + if foundIt && v != nil { + result = v + } + return +} + +// UnmarshalAnySlice retrieves the specified property from the map and returns it as a slice of +// generic values (i.e. []interface{}), or nil if the property wasn't found in the map. +func UnmarshalAnySlice(m map[string]interface{}, propertyName string) (slice []interface{}, err error) { + v, foundIt := m[propertyName] + if foundIt && v != nil { + // Interpret the map value as a slice of anything. + vSlice, ok := v.([]interface{}) + if !ok { + err = fmt.Errorf(errorNotAnArray, propertyName, reflect.TypeOf(v).String()) + return + } + slice = vSlice + } + return +} + +// UnmarshalAnyMap retrieves the specified property from the map "m" and returns it +// as a map[string]interface{}, or nil if the property wasn't found in the map. +func UnmarshalAnyMap(m map[string]interface{}, propertyName string) (result map[string]interface{}, err error) { + result, err = UnmarshalObject(m, propertyName) + return +} + +// UnmarshalAnyMapSlice retrieves the specified property from the map "m" and returns it +// as a []map[string]interface{}, or nil if the property wasn't found in the map. +func UnmarshalAnyMapSlice(m map[string]interface{}, propertyName string) (slice []map[string]interface{}, err error) { + slice, err = UnmarshalObjectSlice(m, propertyName) + return +} + +// truncateString returns a string suitable for inclusion in an error message. +// If the input string is longer than the specified length, we'll just return the first +// bytes followed by "...". +func truncateString(s string, length int) string { + if len(s) > length { + runes := []rune(s) + return string(runes[0:length]) + "..." + } + return s +} diff --git a/vendor/github.com/IBM/go-sdk-core/v3/core/utils.go b/vendor/github.com/IBM/go-sdk-core/v3/core/utils.go new file mode 100644 index 00000000000..d99fb98a03a --- /dev/null +++ b/vendor/github.com/IBM/go-sdk-core/v3/core/utils.go @@ -0,0 +1,177 @@ +package core + +// (C) Copyright IBM Corp. 2019. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/json" + "errors" + "fmt" + "os" + "reflect" + "regexp" + "runtime" + "strings" + "time" + + validator "gopkg.in/go-playground/validator.v9" +) + +// Validate is a shared validator instance used to perform validation of structs. +var Validate *validator.Validate + +func init() { + Validate = validator.New() +} + +const ( + jsonMimePattern = "(?i)^application\\/((json)|(merge\\-patch\\+json))(;.*)?$" + jsonPatchMimePattern = "(?i)^application\\/json\\-patch\\+json(;.*)?$" +) + +// isNil checks if the specified object is nil or not. +func isNil(object interface{}) bool { + if object == nil { + return true + } + + value := reflect.ValueOf(object) + kind := value.Kind() + if kind >= reflect.Chan && kind <= reflect.Slice && value.IsNil() { + return true + } + + return false +} + +// ValidateNotNil returns the specified error if 'object' is nil, nil otherwise. +func ValidateNotNil(object interface{}, errorMsg string) error { + if isNil(object) { + return errors.New(errorMsg) + } + return nil +} + +// ValidateStruct validates 'param' (assumed to be a struct) according to the +// annotations attached to its fields. +func ValidateStruct(param interface{}, paramName string) error { + if param != nil { + if err := Validate.Struct(param); err != nil { + // If there were validation errors then return an error containing the field errors + if fieldErrors, ok := err.(validator.ValidationErrors); ok { + return fmt.Errorf("%s failed validation:\n%s", paramName, fieldErrors.Error()) + } + return fmt.Errorf("An unexpected system error occurred while validating %s\n%s", paramName, err.Error()) + } + } + return nil +} + +// StringPtr returns a pointer to string literal. +func StringPtr(literal string) *string { + return &literal +} + +// BoolPtr returns a pointer to boolean literal. +func BoolPtr(literal bool) *bool { + return &literal +} + +// Int64Ptr returns a pointer to int64 literal. +func Int64Ptr(literal int64) *int64 { + return &literal +} + +// Float32Ptr returns a pointer to float32 literal. +func Float32Ptr(literal float32) *float32 { + return &literal +} + +// Float64Ptr returns a pointer to float64 literal. +func Float64Ptr(literal float64) *float64 { + return &literal +} + +// IsJSONMimeType Returns true iff the specified mimeType value represents a +// "JSON" mimetype. +func IsJSONMimeType(mimeType string) bool { + if mimeType == "" { + return false + } + matched, err := regexp.MatchString(jsonMimePattern, mimeType) + if err != nil { + return false + } + return matched +} + +// IsJSONPatchMimeType returns true iff the specified mimeType value represents +// a "JSON Patch" mimetype. +func IsJSONPatchMimeType(mimeType string) bool { + if mimeType == "" { + return false + } + matched, err := regexp.MatchString(jsonPatchMimePattern, mimeType) + if err != nil { + return false + } + return matched +} + +// StringNilMapper de-references the parameter 's' and returns the result, or "" +// if 's' is nil. +func StringNilMapper(s *string) string { + if s == nil { + return "" + } + return *s +} + +// HasBadFirstOrLastChar checks if the string starts with `{` or `"` +// or ends with `}` or `"`. +func HasBadFirstOrLastChar(str string) bool { + return strings.HasPrefix(str, "{") || strings.HasPrefix(str, "\"") || + strings.HasSuffix(str, "}") || strings.HasSuffix(str, "\"") +} + +// UserHomeDir returns the user home directory. +func UserHomeDir() string { + if runtime.GOOS == "windows" { + home := os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH") + if home == "" { + home = os.Getenv("USERPROFILE") + } + return home + } + return os.Getenv("HOME") +} + +// SystemInfo returns the system information. +func SystemInfo() string { + return fmt.Sprintf("(arch=%s; os=%s; go.version=%s)", runtime.GOARCH, runtime.GOOS, runtime.Version()) +} + +// PrettyPrint print pretty. +func PrettyPrint(result interface{}, resultName string) { + output, err := json.MarshalIndent(result, "", " ") + + if err == nil { + fmt.Printf("%v:\n%+v\n\n", resultName, string(output)) + } +} + +// GetCurrentTime returns the current Unix time. +func GetCurrentTime() int64 { + return time.Now().Unix() +} diff --git a/vendor/github.com/IBM/go-sdk-core/v3/core/version.go b/vendor/github.com/IBM/go-sdk-core/v3/core/version.go new file mode 100644 index 00000000000..176b78cea0a --- /dev/null +++ b/vendor/github.com/IBM/go-sdk-core/v3/core/version.go @@ -0,0 +1,18 @@ +package core + +// (C) Copyright IBM Corp. 2019. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Version of the SDK +const __VERSION__ = "3.3.1" diff --git a/vendor/github.com/IBM/go-sdk-core/v4/core/constants.go b/vendor/github.com/IBM/go-sdk-core/v4/core/constants.go index 2876ad7a971..55611c78bea 100644 --- a/vendor/github.com/IBM/go-sdk-core/v4/core/constants.go +++ b/vendor/github.com/IBM/go-sdk-core/v4/core/constants.go @@ -1,6 +1,6 @@ package core -// (C) Copyright IBM Corp. 2019. +// (C) Copyright IBM Corp. 2019, 2021. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -51,6 +51,7 @@ const ( // Common error messages. ERRORMSG_PROP_MISSING = "The %s property is required but was not specified." ERRORMSG_PROP_INVALID = "The %s property is invalid. Please remove any surrounding {, }, or \" characters." + ERRORMSG_EXCLUSIVE_PROPS_ERROR = "Exactly one of %s or %s must be specified." ERRORMSG_NO_AUTHENTICATOR = "Authentication information was not properly configured." ERRORMSG_AUTHTYPE_UNKNOWN = "Unrecognized authentication type: %s" ERRORMSG_PROPS_MAP_NIL = "The 'properties' map cannot be nil." @@ -67,4 +68,5 @@ const ( ERRORMSG_CONVERT_SLICE = "An error occurred while converting 'slice' to string slice" ERRORMSG_CREATE_RETRYABLE_REQ = "An error occurred while creating a retryable http Request: %s" ERRORMSG_UNEXPECTED_STATUS_CODE = "Unexpected HTTP status code %d (%s)" + ERRORMSG_UNMARSHAL_AUTH_RESPONSE = "error unmarshalling authentication response: %s" ) diff --git a/vendor/github.com/IBM/go-sdk-core/v4/core/cp4d_authenticator.go b/vendor/github.com/IBM/go-sdk-core/v4/core/cp4d_authenticator.go index d68722e4c04..5fa2cf46654 100644 --- a/vendor/github.com/IBM/go-sdk-core/v4/core/cp4d_authenticator.go +++ b/vendor/github.com/IBM/go-sdk-core/v4/core/cp4d_authenticator.go @@ -1,6 +1,6 @@ package core -// (C) Copyright IBM Corp. 2019. +// (C) Copyright IBM Corp. 2019, 2021. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -21,21 +21,14 @@ import ( "fmt" "net/http" "strconv" - "strings" "sync" "time" - - jwt "github.com/form3tech-oss/jwt-go" -) - -// Constants for CP4D -const ( - PRE_AUTH_PATH = "/v1/preauth/validateAuth" ) -// CloudPakForDataAuthenticator uses a username and password pair to obtain a -// suitable bearer token, and adds the bearer token to requests via an -// Authorization header of the form: +// +// CloudPakForDataAuthenticator uses either a username/password pair or a +// username/apikey pair to obtain a suitable bearer token from the CP4D authentication service, +// and adds the bearer token to requests via an Authorization header of the form: // // Authorization: Bearer // @@ -46,9 +39,14 @@ type CloudPakForDataAuthenticator struct { // The username used to obtain a bearer token [required]. Username string - // The password used to obtain a bearer token [required]. + // The password used to obtain a bearer token [required if APIKey not specified]. + // One of Password or APIKey must be specified. Password string + // The apikey used to obtain a bearer token [required if Password not specified]. + // One of Password or APIKey must be specified. + APIKey string + // A flag that indicates whether verification of the server's SSL certificate // should be disabled; defaults to false [optional]. DisableSSLVerification bool @@ -68,25 +66,47 @@ var cp4dRequestTokenMutex sync.Mutex var cp4dNeedsRefreshMutex sync.Mutex // NewCloudPakForDataAuthenticator constructs a new CloudPakForDataAuthenticator -// instance. +// instance from a username/password pair. +// This is the default way to create an authenticator and is a wrapper around +// the NewCloudPakForDataAuthenticatorUsingPassword() function func NewCloudPakForDataAuthenticator(url string, username string, password string, disableSSLVerification bool, headers map[string]string) (*CloudPakForDataAuthenticator, error) { + return NewCloudPakForDataAuthenticatorUsingPassword(url, username, password, disableSSLVerification, headers) +} + +// NewCloudPakForDataAuthenticatorUsingPassword constructs a new CloudPakForDataAuthenticator +// instance from a username/password pair. +func NewCloudPakForDataAuthenticatorUsingPassword(url string, username string, password string, + disableSSLVerification bool, headers map[string]string) (*CloudPakForDataAuthenticator, error) { + return newAuthenticator(url, username, password, "", disableSSLVerification, headers) +} - authenticator := &CloudPakForDataAuthenticator{ +// NewCloudPakForDataAuthenticatorUsingAPIKey constructs a new CloudPakForDataAuthenticator +// instance from a username/apikey pair. +func NewCloudPakForDataAuthenticatorUsingAPIKey(url string, username string, apikey string, + disableSSLVerification bool, headers map[string]string) (*CloudPakForDataAuthenticator, error) { + return newAuthenticator(url, username, "", apikey, disableSSLVerification, headers) +} + +func newAuthenticator(url string, username string, password string, apikey string, + disableSSLVerification bool, headers map[string]string) (authenticator *CloudPakForDataAuthenticator, err error) { + + authenticator = &CloudPakForDataAuthenticator{ Username: username, Password: password, + APIKey: apikey, URL: url, DisableSSLVerification: disableSSLVerification, Headers: headers, } // Make sure the config is valid. - err := authenticator.Validate() + err = authenticator.Validate() if err != nil { return nil, err } - return authenticator, nil + return } // newCloudPakForDataAuthenticatorFromMap : Constructs a new CloudPakForDataAuthenticator instance from a map. @@ -99,9 +119,10 @@ func newCloudPakForDataAuthenticatorFromMap(properties map[string]string) (*Clou if err != nil { disableSSL = false } - return NewCloudPakForDataAuthenticator(properties[PROPNAME_AUTH_URL], + + return newAuthenticator(properties[PROPNAME_AUTH_URL], properties[PROPNAME_USERNAME], properties[PROPNAME_PASSWORD], - disableSSL, nil) + properties[PROPNAME_APIKEY], disableSSL, nil) } // AuthenticationType returns the authentication type for this authenticator. @@ -119,8 +140,10 @@ func (authenticator CloudPakForDataAuthenticator) Validate() error { return fmt.Errorf(ERRORMSG_PROP_MISSING, "Username") } - if authenticator.Password == "" { - return fmt.Errorf(ERRORMSG_PROP_MISSING, "Password") + // The user should specify exactly one of APIKey or Password. + if (authenticator.APIKey == "" && authenticator.Password == "") || + (authenticator.APIKey != "" && authenticator.Password != "") { + return fmt.Errorf(ERRORMSG_EXCLUSIVE_PROPS_ERROR, "APIKey", "Password") } if authenticator.URL == "" { @@ -194,35 +217,49 @@ func (authenticator *CloudPakForDataAuthenticator) synchronizedRequestToken() er return authenticator.getTokenData() } -// getTokenData: requests a new token from the access server and +// getTokenData: requests a new token from the token server and // unmarshals the token information to the tokenData cache. Returns // an error if the token was unable to be fetched, otherwise returns nil func (authenticator *CloudPakForDataAuthenticator) getTokenData() error { tokenResponse, err := authenticator.requestToken() if err != nil { + authenticator.tokenData = nil return err } authenticator.tokenData, err = newCp4dTokenData(tokenResponse) if err != nil { + authenticator.tokenData = nil return err } return nil } +// cp4dRequestBody is a struct used to model the request body for the "POST /v1/authorize" operation. +// Note: we list both Password and APIKey fields, although exactly one of those will be used for +// a specific invocation of the POST /v1/authorize operation. +type cp4dRequestBody struct { + Username string `json:"username"` + Password string `json:"password,omitempty"` + APIKey string `json:"api_key,omitempty"` +} + // requestToken: fetches a new access token from the token server. -func (authenticator *CloudPakForDataAuthenticator) requestToken() (*cp4dTokenServerResponse, error) { - // If the user-specified URL does not end with the required path, - // then add it now. - url := authenticator.URL - if !strings.HasSuffix(url, PRE_AUTH_PATH) { - url = fmt.Sprintf("%s%s", url, PRE_AUTH_PATH) +func (authenticator *CloudPakForDataAuthenticator) requestToken() (tokenResponse *cp4dTokenServerResponse, err error) { + + // Create the request body (only one of APIKey or Password should be set + // on the authenticator so only one of them should end up in the serialized JSON). + body := &cp4dRequestBody{ + Username: authenticator.Username, + Password: authenticator.Password, + APIKey: authenticator.APIKey, } - builder, err := NewRequestBuilder(GET).ConstructHTTPURL(url, nil, nil) + builder := NewRequestBuilder(POST) + _, err = builder.ResolveRequestURL(authenticator.URL, "/v1/authorize", nil) if err != nil { - return nil, err + return } // Add user-defined headers to request. @@ -230,12 +267,20 @@ func (authenticator *CloudPakForDataAuthenticator) requestToken() (*cp4dTokenSer builder.AddHeader(headerName, headerValue) } - req, err := builder.Build() + // Add the Content-Type header. + builder.AddHeader("Content-Type", "application/json") + + // Add the request body to request. + _, err = builder.SetBodyContentJSON(body) if err != nil { - return nil, err + return } - req.SetBasicAuth(authenticator.Username, authenticator.Password) + // Build the request object. + req, err := builder.Build() + if err != nil { + return + } // If the authenticator does not have a Client, create one now. if authenticator.Client == nil { @@ -255,7 +300,7 @@ func (authenticator *CloudPakForDataAuthenticator) requestToken() (*cp4dTokenSer resp, err := authenticator.Client.Do(req) if err != nil { - return nil, err + return } if resp.StatusCode < 200 || resp.StatusCode >= 300 { @@ -269,30 +314,30 @@ func (authenticator *CloudPakForDataAuthenticator) requestToken() (*cp4dTokenSer RawResult: buff.Bytes(), } - return nil, NewAuthenticationError(detailedResponse, fmt.Errorf(buff.String())) + err = NewAuthenticationError(detailedResponse, fmt.Errorf(buff.String())) + return } - tokenResponse := &cp4dTokenServerResponse{} - _ = json.NewDecoder(resp.Body).Decode(tokenResponse) + tokenResponse = &cp4dTokenServerResponse{} + err = json.NewDecoder(resp.Body).Decode(tokenResponse) defer resp.Body.Close() - return tokenResponse, nil + if err != nil { + err = fmt.Errorf(ERRORMSG_UNMARSHAL_AUTH_RESPONSE, err.Error()) + tokenResponse = nil + return + } + + return } -// cp4dTokenServerResponse : This struct models a response received from the token server. +// cp4dTokenServerResponse is a struct that models a response received from the token server. type cp4dTokenServerResponse struct { - Username string `json:"username,omitempty"` - Role string `json:"role,omitempty"` - Permissions []string `json:"permissions,omitempty"` - Subject string `json:"sub,omitempty"` - Issuer string `json:"iss,omitempty"` - Audience string `json:"aud,omitempty"` - UID string `json:"uid,omitempty"` - AccessToken string `json:"accessToken,omitempty"` - MessageCode string `json:"_messageCode_,omitempty"` - Message string `json:"message,omitempty"` + Token string `json:"token,omitempty"` + MessageCode string `json:"_messageCode_,omitempty"` + Message string `json:"message,omitempty"` } -// cp4dTokenData : This struct represents the cached information related to a fetched access token. +// cp4dTokenData is a struct that represents the cached information related to a fetched access token. type cp4dTokenData struct { AccessToken string RefreshTime int64 @@ -301,27 +346,29 @@ type cp4dTokenData struct { // newCp4dTokenData: constructs a new Cp4dTokenData instance from the specified Cp4dTokenServerResponse instance. func newCp4dTokenData(tokenResponse *cp4dTokenServerResponse) (*cp4dTokenData, error) { - // Need to crack open the access token (a JWToken) to get the expiration and issued-at times. - claims := &jwt.StandardClaims{} - if token, _ := jwt.ParseWithClaims(tokenResponse.AccessToken, claims, nil); token == nil { - return nil, fmt.Errorf("Error while trying to parse access token!") + // Need to crack open the access token (a JWT) to get the expiration and issued-at times. + claims, err := parseJWT(tokenResponse.Token) + if err != nil { + return nil, err } + // Compute the adjusted refresh time (expiration time - 20% of timeToLive) timeToLive := claims.ExpiresAt - claims.IssuedAt expireTime := claims.ExpiresAt refreshTime := expireTime - int64(float64(timeToLive)*0.2) tokenData := &cp4dTokenData{ - AccessToken: tokenResponse.AccessToken, + AccessToken: tokenResponse.Token, Expiration: expireTime, RefreshTime: refreshTime, } + return tokenData, nil } // isTokenValid: returns true iff the Cp4dTokenData instance represents a valid (non-expired) access token. -func (this *cp4dTokenData) isTokenValid() bool { - if this.AccessToken != "" && GetCurrentTime() < this.Expiration { +func (tokenData *cp4dTokenData) isTokenValid() bool { + if tokenData.AccessToken != "" && GetCurrentTime() < tokenData.Expiration { return true } return false @@ -330,16 +377,14 @@ func (this *cp4dTokenData) isTokenValid() bool { // needsRefresh: synchronously returns true iff the currently stored access token should be refreshed. This method also // updates the refresh time if it determines the token needs refreshed to prevent other threads from // making multiple refresh calls. -func (this *cp4dTokenData) needsRefresh() bool { +func (tokenData *cp4dTokenData) needsRefresh() bool { cp4dNeedsRefreshMutex.Lock() defer cp4dNeedsRefreshMutex.Unlock() // Advance refresh by one minute - if this.RefreshTime >= 0 && GetCurrentTime() > this.RefreshTime { - this.RefreshTime = GetCurrentTime() + 60 + if tokenData.RefreshTime >= 0 && GetCurrentTime() > tokenData.RefreshTime { + tokenData.RefreshTime = GetCurrentTime() + 60 return true } - return false - } diff --git a/vendor/github.com/IBM/go-sdk-core/v4/core/jwt_utils.go b/vendor/github.com/IBM/go-sdk-core/v4/core/jwt_utils.go new file mode 100644 index 00000000000..e648a6dd6fe --- /dev/null +++ b/vendor/github.com/IBM/go-sdk-core/v4/core/jwt_utils.go @@ -0,0 +1,57 @@ +package core + +// (C) Copyright IBM Corp. 2021. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/json" + "fmt" + "strings" + + jwt "github.com/dgrijalva/jwt-go" +) + +// coreJWTClaims are the fields within a JWT's "claims" segment that we're interested in. +type coreJWTClaims struct { + ExpiresAt int64 `json:"exp,omitempty"` + IssuedAt int64 `json:"iat,omitempty"` +} + +// parseJWT parses the specified JWT token string and returns an instance of the coreJWTClaims struct. +func parseJWT(tokenString string) (claims *coreJWTClaims, err error) { + // A JWT consists of three .-separated segments + segments := strings.Split(tokenString, ".") + if len(segments) != 3 { + err = fmt.Errorf("token contains an invalid number of segments") + return + } + + // Parse Claims segment. + var claimBytes []byte + claimBytes, err = jwt.DecodeSegment(segments[1]) + if err != nil { + err = fmt.Errorf("error decoding claims segment: %s", err.Error()) + return + } + + // Now deserialize the claims segment into our coreClaims struct. + claims = &coreJWTClaims{} + err = json.Unmarshal(claimBytes, claims) + if err != nil { + err = fmt.Errorf("error unmarshalling token: %s", err.Error()) + return + } + + return +} diff --git a/vendor/github.com/IBM/go-sdk-core/v4/core/version.go b/vendor/github.com/IBM/go-sdk-core/v4/core/version.go index 016a388b5f8..dad03021a97 100644 --- a/vendor/github.com/IBM/go-sdk-core/v4/core/version.go +++ b/vendor/github.com/IBM/go-sdk-core/v4/core/version.go @@ -15,4 +15,4 @@ package core // limitations under the License. // Version of the SDK -const __VERSION__ = "4.9.0" +const __VERSION__ = "4.10.0" diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go-config/LICENSE b/vendor/github.com/IBM/ibm-cos-sdk-go-config/LICENSE new file mode 100644 index 00000000000..73f60021208 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go-config/LICENSE @@ -0,0 +1,10 @@ +# Copyright 2020 IBM Corp. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on +# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go-config/common/headers.go b/vendor/github.com/IBM/ibm-cos-sdk-go-config/common/headers.go new file mode 100644 index 00000000000..1a2ae70815c --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go-config/common/headers.go @@ -0,0 +1,66 @@ +/** + * (C) Copyright IBM Corp. 2019. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package common + +import ( + "fmt" + "runtime" +) + +const ( + // HeaderSDKAnalytics Header + HeaderSDKAnalytics = "X-IBMCloud-SDK-Analytics" + // HeaderUserAgent Header + HeaderUserAgent = "User-Agent" + + // SDKName name of this SDK + SDKName = "ibm-cos-resource-config-sdk-go" +) + +// GetSdkHeaders - returns the set of SDK-specific headers to be included in an outgoing request. +// Parameters: +// serviceName - the name of the service as defined in the API definition (e.g. "MyService1") +// serviceVersion - the version of the service as defined in the API definition (e.g. "V1") +// operationId - the operationId as defined in the API definition (e.g. getContext) +// +// Returns: +// a Map which contains the set of headers to be included in the REST API request +// +func GetSdkHeaders(serviceName string, serviceVersion string, operationID string) map[string]string { + sdkHeaders := make(map[string]string) + + sdkHeaders[HeaderSDKAnalytics] = fmt.Sprintf("service_name=%s;service_version=%s;operation_id=%s", + serviceName, serviceVersion, operationID) + + sdkHeaders[HeaderUserAgent] = GetUserAgentInfo() + + return sdkHeaders +} + +var userAgent = fmt.Sprintf("%s-%s %s", SDKName, SDKVersion, GetSystemInfo()) + +// GetUserAgentInfo returns user agent +func GetUserAgentInfo() string { + return userAgent +} + +var systemInfo = fmt.Sprintf("(arch=%s; os=%s; go.version=%s)", runtime.GOARCH, runtime.GOOS, runtime.Version()) + +// GetSystemInfo returns system information +func GetSystemInfo() string { + return systemInfo +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go-config/common/version.go b/vendor/github.com/IBM/ibm-cos-sdk-go-config/common/version.go new file mode 100644 index 00000000000..c8949123302 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go-config/common/version.go @@ -0,0 +1,20 @@ +/** + * (C) Copyright IBM Corp. 2019. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package common + +// SDKVersion : version of this SDK +const SDKVersion = "1.2.0" diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go-config/resourceconfigurationv1/resource_configuration_v1.go b/vendor/github.com/IBM/ibm-cos-sdk-go-config/resourceconfigurationv1/resource_configuration_v1.go new file mode 100644 index 00000000000..3b36168e262 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go-config/resourceconfigurationv1/resource_configuration_v1.go @@ -0,0 +1,428 @@ +/** + * (C) Copyright IBM Corp. 2019. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package resourceconfigurationv1 : Operations and models for the ResourceConfigurationV1 service +package resourceconfigurationv1 + +import ( + "fmt" + "github.com/IBM/go-sdk-core/v3/core" + "github.com/go-openapi/strfmt" + common "github.com/IBM/ibm-cos-sdk-go-config/common" +) + +// ResourceConfigurationV1 : REST API used to configure Cloud Object Storage buckets. This version of the API only +// supports reading bucket metadata and setting IP access controls. +// +// Version: 1.0.0 +type ResourceConfigurationV1 struct { + Service *core.BaseService +} + +// DefaultServiceURL is the default URL to make service requests to. +const DefaultServiceURL = "https://config.cloud-object-storage.cloud.ibm.com/v1" + +// DefaultServiceName is the default key used to find external configuration information. +const DefaultServiceName = "resource_configuration" + +// ResourceConfigurationV1Options : Service options +type ResourceConfigurationV1Options struct { + ServiceName string + URL string + Authenticator core.Authenticator +} + +// NewResourceConfigurationV1UsingExternalConfig : constructs an instance of ResourceConfigurationV1 with passed in options and external configuration. +func NewResourceConfigurationV1UsingExternalConfig(options *ResourceConfigurationV1Options) (resourceConfiguration *ResourceConfigurationV1, err error) { + if options.ServiceName == "" { + options.ServiceName = DefaultServiceName + } + + if options.Authenticator == nil { + options.Authenticator, err = core.GetAuthenticatorFromEnvironment(options.ServiceName) + if err != nil { + return + } + } + + resourceConfiguration, err = NewResourceConfigurationV1(options) + if err != nil { + return + } + + err = resourceConfiguration.Service.ConfigureService(options.ServiceName) + if err != nil { + return + } + + if options.URL != "" { + resourceConfiguration.Service.SetServiceURL(options.URL) + } + return +} + +// NewResourceConfigurationV1 : constructs an instance of ResourceConfigurationV1 with passed in options. +func NewResourceConfigurationV1(options *ResourceConfigurationV1Options) (service *ResourceConfigurationV1, err error) { + serviceOptions := &core.ServiceOptions{ + URL: DefaultServiceURL, + Authenticator: options.Authenticator, + } + + baseService, err := core.NewBaseService(serviceOptions) + if err != nil { + return + } + + if options.URL != "" { + baseService.SetServiceURL(options.URL) + } + + service = &ResourceConfigurationV1{ + Service: baseService, + } + + return +} + +// SetServiceURL sets the service URL +func (resourceConfiguration *ResourceConfigurationV1) SetServiceURL(url string) error { + return resourceConfiguration.Service.SetServiceURL(url) +} + + +// GetBucketConfig : Returns metadata for the specified bucket +// Returns metadata for the specified bucket. +func (resourceConfiguration *ResourceConfigurationV1) GetBucketConfig(getBucketConfigOptions *GetBucketConfigOptions) (result *Bucket, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getBucketConfigOptions, "getBucketConfigOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getBucketConfigOptions, "getBucketConfigOptions") + if err != nil { + return + } + + pathSegments := []string{"b"} + pathParameters := []string{*getBucketConfigOptions.Bucket} + + builder := core.NewRequestBuilder(core.GET) + _, err = builder.ConstructHTTPURL(resourceConfiguration.Service.Options.URL, pathSegments, pathParameters) + if err != nil { + return + } + + for headerName, headerValue := range getBucketConfigOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("resource_configuration", "V1", "GetBucketConfig") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + response, err = resourceConfiguration.Service.Request(request, new(Bucket)) + if err == nil { + var ok bool + result, ok = response.Result.(*Bucket) + if !ok { + err = fmt.Errorf("An error occurred while processing the operation response.") + } + } + + return +} + + +// UpdateBucketConfig : Make changes to a bucket's configuration +// Updates a bucket using [JSON Merge Patch](https://tools.ietf.org/html/rfc7396). This request is used to add +// functionality (like an IP access filter) or to update existing parameters. **Primitives are overwritten and replaced +// in their entirety. It is not possible to append a new (or to delete a specific) value to an array.** Arrays can be +// cleared by updating the parameter with an empty array `[]`. Only updates specified mutable fields. Please don't use +// `PATCH` trying to update the number of objects in a bucket, any timestamps, or other non-mutable fields. +func (resourceConfiguration *ResourceConfigurationV1) UpdateBucketConfig(updateBucketConfigOptions *UpdateBucketConfigOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateBucketConfigOptions, "updateBucketConfigOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateBucketConfigOptions, "updateBucketConfigOptions") + if err != nil { + return + } + + pathSegments := []string{"b"} + pathParameters := []string{*updateBucketConfigOptions.Bucket} + + builder := core.NewRequestBuilder(core.PATCH) + _, err = builder.ConstructHTTPURL(resourceConfiguration.Service.Options.URL, pathSegments, pathParameters) + if err != nil { + return + } + + for headerName, headerValue := range updateBucketConfigOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("resource_configuration", "V1", "UpdateBucketConfig") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + builder.AddHeader("Content-Type", "application/json") + if updateBucketConfigOptions.IfMatch != nil { + builder.AddHeader("if-match", fmt.Sprint(*updateBucketConfigOptions.IfMatch)) + } + + body := make(map[string]interface{}) + if updateBucketConfigOptions.Firewall != nil { + body["firewall"] = updateBucketConfigOptions.Firewall + } + if updateBucketConfigOptions.ActivityTracking != nil { + body["activity_tracking"] = updateBucketConfigOptions.ActivityTracking + } + if updateBucketConfigOptions.MetricsMonitoring != nil { + body["metrics_monitoring"] = updateBucketConfigOptions.MetricsMonitoring + } + if updateBucketConfigOptions.HardQuota != nil { + body["hard_quota"] = updateBucketConfigOptions.HardQuota + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = resourceConfiguration.Service.Request(request, nil) + + return +} + + +// ActivityTracking : Enables sending log data to Activity Tracker and LogDNA to provide visibility into object read and write events. All +// object events are sent to the activity tracker instance defined in the `activity_tracker_crn` field. +type ActivityTracking struct { + + // If set to `true`, all object read events (i.e. downloads) will be sent to Activity Tracker. + ReadDataEvents *bool `json:"read_data_events,omitempty"` + + // If set to `true`, all object write events (i.e. uploads) will be sent to Activity Tracker. + WriteDataEvents *bool `json:"write_data_events,omitempty"` + + // Required the first time `activity_tracking` is configured. The instance of Activity Tracker that will receive object + // event data. The format is "crn:v1:bluemix:public:logdnaat:{bucket location}:a/{storage account}:{activity tracker + // service instance}::". + ActivityTrackerCrn *string `json:"activity_tracker_crn,omitempty"` +} + +// Bucket : A bucket. +type Bucket struct { + + // The name of the bucket. Non-mutable. + Name *string `json:"name,omitempty"` + + // The service instance that holds the bucket. Non-mutable. + Crn *string `json:"crn,omitempty"` + + // The service instance that holds the bucket. Non-mutable. + ServiceInstanceID *string `json:"service_instance_id,omitempty"` + + // The service instance that holds the bucket. Non-mutable. + ServiceInstanceCrn *string `json:"service_instance_crn,omitempty"` + + // The creation time of the bucket in RFC 3339 format. Non-mutable. + TimeCreated *strfmt.DateTime `json:"time_created,omitempty"` + + // The modification time of the bucket in RFC 3339 format. Non-mutable. + TimeUpdated *strfmt.DateTime `json:"time_updated,omitempty"` + + // Total number of objects in the bucket. Non-mutable. + ObjectCount *int64 `json:"object_count,omitempty"` + + // Total size of all objects in the bucket. Non-mutable. + BytesUsed *int64 `json:"bytes_used,omitempty"` + + // Number of non-current object versions in the bucket. Non-mutable. + NoncurrentObjectCount *int64 `json:"noncurrent_object_count,omitempty"` + + // Total size of all non-current object versions in the bucket. Non-mutable. + NoncurrentBytesUsed *int64 `json:"noncurrent_bytes_used,omitempty"` + + // Total number of delete markers in the bucket. Non-mutable. + DeleteMarkerCount *int64 `json:"delete_marker_count,omitempty"` + + // Maximum bytes for this bucket. + HardQuota *int64 `json:"hard_quota,omitempty"` + + // An access control mechanism based on the network (IP address) where request originated. Requests not originating + // from IP addresses listed in the `allowed_ip` field will be denied regardless of any access policies (including + // public access) that might otherwise permit the request. Viewing or updating the `Firewall` element requires the + // requester to have the `manager` role. + Firewall *Firewall `json:"firewall,omitempty"` + + // Enables sending log data to Activity Tracker and LogDNA to provide visibility into object read and write events. All + // object events are sent to the activity tracker instance defined in the `activity_tracker_crn` field. + ActivityTracking *ActivityTracking `json:"activity_tracking,omitempty"` + + // Enables sending metrics to IBM Cloud Monitoring. All metrics are sent to the IBM Cloud Monitoring instance defined + // in the `monitoring_crn` field. + MetricsMonitoring *MetricsMonitoring `json:"metrics_monitoring,omitempty"` +} + +// Firewall : An access control mechanism based on the network (IP address) where request originated. Requests not originating from +// IP addresses listed in the `allowed_ip` field will be denied regardless of any access policies (including public +// access) that might otherwise permit the request. Viewing or updating the `Firewall` element requires the requester +// to have the `manager` role. +type Firewall struct { + + // List of IPv4 or IPv6 addresses in CIDR notation to be affected by firewall in CIDR notation is supported. Passing an + // empty array will lift the IP address filter. The `allowed_ip` array can contain a maximum of 1000 items. + AllowedIp []string `json:"allowed_ip"` +} + +// GetBucketConfigOptions : The GetBucketConfig options. +type GetBucketConfigOptions struct { + + // Name of a bucket. + Bucket *string `json:"bucket" validate:"required"` + + // Allows users to set headers to be GDPR compliant + Headers map[string]string +} + +// NewGetBucketConfigOptions : Instantiate GetBucketConfigOptions +func (_ *ResourceConfigurationV1) NewGetBucketConfigOptions(bucket string) *GetBucketConfigOptions { + return &GetBucketConfigOptions{ + Bucket: core.StringPtr(bucket), + } +} + +// SetBucket : Allow user to set Bucket +func (options *GetBucketConfigOptions) SetBucket(bucket string) *GetBucketConfigOptions { + options.Bucket = core.StringPtr(bucket) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetBucketConfigOptions) SetHeaders(param map[string]string) *GetBucketConfigOptions { + options.Headers = param + return options +} + +// MetricsMonitoring : Enables sending metrics to IBM Cloud Monitoring. All metrics are sent to the IBM Cloud Monitoring instance defined in +// the `monitoring_crn` field. +type MetricsMonitoring struct { + + // If set to `true`, all usage metrics (i.e. `bytes_used`) will be sent to the monitoring service. + UsageMetricsEnabled *bool `json:"usage_metrics_enabled,omitempty"` + + // If set to `true`, all request metrics (i.e. `rest.object.head`) will be sent to the monitoring service. + RequestMetricsEnabled *bool `json:"request_metrics_enabled,omitempty"` + + // Required the first time `metrics_monitoring` is configured. The instance of IBM Cloud Monitoring that will receive + // the bucket metrics. The format is "crn:v1:bluemix:public:logdnaat:{bucket location}:a/{storage account}:{monitoring + // service instance}::". + MetricsMonitoringCrn *string `json:"metrics_monitoring_crn,omitempty"` +} + +// UpdateBucketConfigOptions : The UpdateBucketConfig options. +type UpdateBucketConfigOptions struct { + + // Name of a bucket. + Bucket *string `json:"bucket" validate:"required"` + + // An access control mechanism based on the network (IP address) where request originated. Requests not originating + // from IP addresses listed in the `allowed_ip` field will be denied regardless of any access policies (including + // public access) that might otherwise permit the request. Viewing or updating the `Firewall` element requires the + // requester to have the `manager` role. + Firewall *Firewall `json:"firewall,omitempty"` + + // Enables sending log data to Activity Tracker and LogDNA to provide visibility into object read and write events. All + // object events are sent to the activity tracker instance defined in the `activity_tracker_crn` field. + ActivityTracking *ActivityTracking `json:"activity_tracking,omitempty"` + + // Enables sending metrics to IBM Cloud Monitoring. All metrics are sent to the IBM Cloud Monitoring instance defined + // in the `monitoring_crn` field. + MetricsMonitoring *MetricsMonitoring `json:"metrics_monitoring,omitempty"` + + // Maximum bytes for this bucket. + HardQuota *int64 `json:"hard_quota,omitempty"` + + // An Etag previously returned in a header when fetching or updating a bucket's metadata. If this value does not match + // the active Etag, the request will fail. + IfMatch *string `json:"if-match,omitempty"` + + // Allows users to set headers to be GDPR compliant + Headers map[string]string +} + +// NewUpdateBucketConfigOptions : Instantiate UpdateBucketConfigOptions +func (_ *ResourceConfigurationV1) NewUpdateBucketConfigOptions(bucket string) *UpdateBucketConfigOptions { + return &UpdateBucketConfigOptions{ + Bucket: core.StringPtr(bucket), + } +} + +// SetBucket : Allow user to set Bucket +func (options *UpdateBucketConfigOptions) SetBucket(bucket string) *UpdateBucketConfigOptions { + options.Bucket = core.StringPtr(bucket) + return options +} + +// SetFirewall : Allow user to set Firewall +func (options *UpdateBucketConfigOptions) SetFirewall(firewall *Firewall) *UpdateBucketConfigOptions { + options.Firewall = firewall + return options +} + +// SetActivityTracking : Allow user to set ActivityTracking +func (options *UpdateBucketConfigOptions) SetActivityTracking(activityTracking *ActivityTracking) *UpdateBucketConfigOptions { + options.ActivityTracking = activityTracking + return options +} + +// SetMetricsMonitoring : Allow user to set MetricsMonitoring +func (options *UpdateBucketConfigOptions) SetMetricsMonitoring(metricsMonitoring *MetricsMonitoring) *UpdateBucketConfigOptions { + options.MetricsMonitoring = metricsMonitoring + return options +} + +// SetHardQuota : Allow user to set HardQuota +func (options *UpdateBucketConfigOptions) SetHardQuota(hardQuota int64) *UpdateBucketConfigOptions { + options.HardQuota = core.Int64Ptr(hardQuota) + return options +} + +// SetIfMatch : Allow user to set IfMatch +func (options *UpdateBucketConfigOptions) SetIfMatch(ifMatch string) *UpdateBucketConfigOptions { + options.IfMatch = core.StringPtr(ifMatch) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateBucketConfigOptions) SetHeaders(param map[string]string) *UpdateBucketConfigOptions { + options.Headers = param + return options +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/LICENSE.txt b/vendor/github.com/IBM/ibm-cos-sdk-go/LICENSE.txt new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/NOTICE.txt b/vendor/github.com/IBM/ibm-cos-sdk-go/NOTICE.txt new file mode 100644 index 00000000000..899129ecc46 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/NOTICE.txt @@ -0,0 +1,3 @@ +AWS SDK for Go +Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. +Copyright 2014-2015 Stripe, Inc. diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/arn/arn.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/arn/arn.go new file mode 100644 index 00000000000..1c496742903 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/arn/arn.go @@ -0,0 +1,93 @@ +// Package arn provides a parser for interacting with Amazon Resource Names. +package arn + +import ( + "errors" + "strings" +) + +const ( + arnDelimiter = ":" + arnSections = 6 + arnPrefix = "arn:" + + // zero-indexed + sectionPartition = 1 + sectionService = 2 + sectionRegion = 3 + sectionAccountID = 4 + sectionResource = 5 + + // errors + invalidPrefix = "arn: invalid prefix" + invalidSections = "arn: not enough sections" +) + +// ARN captures the individual fields of an Amazon Resource Name. +// See http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html for more information. +type ARN struct { + // The partition that the resource is in. For standard AWS regions, the partition is "aws". If you have resources in + // other partitions, the partition is "aws-partitionname". For example, the partition for resources in the China + // (Beijing) region is "aws-cn". + Partition string + + // The service namespace that identifies the AWS product (for example, Amazon S3, IAM, or Amazon RDS). For a list of + // namespaces, see + // http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces. + Service string + + // The region the resource resides in. Note that the ARNs for some resources do not require a region, so this + // component might be omitted. + Region string + + // The ID of the AWS account that owns the resource, without the hyphens. For example, 123456789012. Note that the + // ARNs for some resources don't require an account number, so this component might be omitted. + AccountID string + + // The content of this part of the ARN varies by service. It often includes an indicator of the type of resource — + // for example, an IAM user or Amazon RDS database - followed by a slash (/) or a colon (:), followed by the + // resource name itself. Some services allows paths for resource names, as described in + // http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arns-paths. + Resource string +} + +// Parse parses an ARN into its constituent parts. +// +// Some example ARNs: +// arn:aws:elasticbeanstalk:us-east-1:123456789012:environment/My App/MyEnvironment +// arn:aws:iam::123456789012:user/David +// arn:aws:rds:eu-west-1:123456789012:db:mysql-db +// arn:aws:s3:::my_corporate_bucket/exampleobject.png +func Parse(arn string) (ARN, error) { + if !strings.HasPrefix(arn, arnPrefix) { + return ARN{}, errors.New(invalidPrefix) + } + sections := strings.SplitN(arn, arnDelimiter, arnSections) + if len(sections) != arnSections { + return ARN{}, errors.New(invalidSections) + } + return ARN{ + Partition: sections[sectionPartition], + Service: sections[sectionService], + Region: sections[sectionRegion], + AccountID: sections[sectionAccountID], + Resource: sections[sectionResource], + }, nil +} + +// IsARN returns whether the given string is an ARN by looking for +// whether the string starts with "arn:" and contains the correct number +// of sections delimited by colons(:). +func IsARN(arn string) bool { + return strings.HasPrefix(arn, arnPrefix) && strings.Count(arn, ":") >= arnSections-1 +} + +// String returns the canonical representation of the ARN +func (arn ARN) String() string { + return arnPrefix + + arn.Partition + arnDelimiter + + arn.Service + arnDelimiter + + arn.Region + arnDelimiter + + arn.AccountID + arnDelimiter + + arn.Resource +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/awserr/error.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/awserr/error.go new file mode 100644 index 00000000000..99849c0e19c --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/awserr/error.go @@ -0,0 +1,164 @@ +// Package awserr represents API error interface accessors for the SDK. +package awserr + +// An Error wraps lower level errors with code, message and an original error. +// The underlying concrete error type may also satisfy other interfaces which +// can be to used to obtain more specific information about the error. +// +// Calling Error() or String() will always include the full information about +// an error based on its underlying type. +// +// Example: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if awsErr, ok := err.(awserr.Error); ok { +// // Get error details +// log.Println("Error:", awsErr.Code(), awsErr.Message()) +// +// // Prints out full error message, including original error if there was one. +// log.Println("Error:", awsErr.Error()) +// +// // Get original error +// if origErr := awsErr.OrigErr(); origErr != nil { +// // operate on original error. +// } +// } else { +// fmt.Println(err.Error()) +// } +// } +// +type Error interface { + // Satisfy the generic error interface. + error + + // Returns the short phrase depicting the classification of the error. + Code() string + + // Returns the error details message. + Message() string + + // Returns the original error if one was set. Nil is returned if not set. + OrigErr() error +} + +// BatchError is a batch of errors which also wraps lower level errors with +// code, message, and original errors. Calling Error() will include all errors +// that occurred in the batch. +// +// Deprecated: Replaced with BatchedErrors. Only defined for backwards +// compatibility. +type BatchError interface { + // Satisfy the generic error interface. + error + + // Returns the short phrase depicting the classification of the error. + Code() string + + // Returns the error details message. + Message() string + + // Returns the original error if one was set. Nil is returned if not set. + OrigErrs() []error +} + +// BatchedErrors is a batch of errors which also wraps lower level errors with +// code, message, and original errors. Calling Error() will include all errors +// that occurred in the batch. +// +// Replaces BatchError +type BatchedErrors interface { + // Satisfy the base Error interface. + Error + + // Returns the original error if one was set. Nil is returned if not set. + OrigErrs() []error +} + +// New returns an Error object described by the code, message, and origErr. +// +// If origErr satisfies the Error interface it will not be wrapped within a new +// Error object and will instead be returned. +func New(code, message string, origErr error) Error { + var errs []error + if origErr != nil { + errs = append(errs, origErr) + } + return newBaseError(code, message, errs) +} + +// NewBatchError returns an BatchedErrors with a collection of errors as an +// array of errors. +func NewBatchError(code, message string, errs []error) BatchedErrors { + return newBaseError(code, message, errs) +} + +// A RequestFailure is an interface to extract request failure information from +// an Error such as the request ID of the failed request returned by a service. +// RequestFailures may not always have a requestID value if the request failed +// prior to reaching the service such as a connection error. +// +// Example: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if reqerr, ok := err.(RequestFailure); ok { +// log.Println("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID()) +// } else { +// log.Println("Error:", err.Error()) +// } +// } +// +// Combined with awserr.Error: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if awsErr, ok := err.(awserr.Error); ok { +// // Generic AWS Error with Code, Message, and original error (if any) +// fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) +// +// if reqErr, ok := err.(awserr.RequestFailure); ok { +// // A service error occurred +// fmt.Println(reqErr.StatusCode(), reqErr.RequestID()) +// } +// } else { +// fmt.Println(err.Error()) +// } +// } +// +type RequestFailure interface { + Error + + // The status code of the HTTP response. + StatusCode() int + + // The request ID returned by the service for a request failure. This will + // be empty if no request ID is available such as the request failed due + // to a connection error. + RequestID() string +} + +// NewRequestFailure returns a wrapped error with additional information for +// request status code, and service requestID. +// +// Should be used to wrap all request which involve service requests. Even if +// the request failed without a service response, but had an HTTP status code +// that may be meaningful. +func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure { + return newRequestError(err, statusCode, reqID) +} + +// UnmarshalError provides the interface for the SDK failing to unmarshal data. +type UnmarshalError interface { + awsError + Bytes() []byte +} + +// NewUnmarshalError returns an initialized UnmarshalError error wrapper adding +// the bytes that fail to unmarshal to the error. +func NewUnmarshalError(err error, msg string, bytes []byte) UnmarshalError { + return &unmarshalError{ + awsError: New("UnmarshalError", msg, err), + bytes: bytes, + } +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/awserr/types.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/awserr/types.go new file mode 100644 index 00000000000..9cf7eaf4007 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/awserr/types.go @@ -0,0 +1,221 @@ +package awserr + +import ( + "encoding/hex" + "fmt" +) + +// SprintError returns a string of the formatted error code. +// +// Both extra and origErr are optional. If they are included their lines +// will be added, but if they are not included their lines will be ignored. +func SprintError(code, message, extra string, origErr error) string { + msg := fmt.Sprintf("%s: %s", code, message) + if extra != "" { + msg = fmt.Sprintf("%s\n\t%s", msg, extra) + } + if origErr != nil { + msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error()) + } + return msg +} + +// A baseError wraps the code and message which defines an error. It also +// can be used to wrap an original error object. +// +// Should be used as the root for errors satisfying the awserr.Error. Also +// for any error which does not fit into a specific error wrapper type. +type baseError struct { + // Classification of error + code string + + // Detailed information about error + message string + + // Optional original error this error is based off of. Allows building + // chained errors. + errs []error +} + +// newBaseError returns an error object for the code, message, and errors. +// +// code is a short no whitespace phrase depicting the classification of +// the error that is being created. +// +// message is the free flow string containing detailed information about the +// error. +// +// origErrs is the error objects which will be nested under the new errors to +// be returned. +func newBaseError(code, message string, origErrs []error) *baseError { + b := &baseError{ + code: code, + message: message, + errs: origErrs, + } + + return b +} + +// Error returns the string representation of the error. +// +// See ErrorWithExtra for formatting. +// +// Satisfies the error interface. +func (b baseError) Error() string { + size := len(b.errs) + if size > 0 { + return SprintError(b.code, b.message, "", errorList(b.errs)) + } + + return SprintError(b.code, b.message, "", nil) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (b baseError) String() string { + return b.Error() +} + +// Code returns the short phrase depicting the classification of the error. +func (b baseError) Code() string { + return b.code +} + +// Message returns the error details message. +func (b baseError) Message() string { + return b.message +} + +// OrigErr returns the original error if one was set. Nil is returned if no +// error was set. This only returns the first element in the list. If the full +// list is needed, use BatchedErrors. +func (b baseError) OrigErr() error { + switch len(b.errs) { + case 0: + return nil + case 1: + return b.errs[0] + default: + if err, ok := b.errs[0].(Error); ok { + return NewBatchError(err.Code(), err.Message(), b.errs[1:]) + } + return NewBatchError("BatchedErrors", + "multiple errors occurred", b.errs) + } +} + +// OrigErrs returns the original errors if one was set. An empty slice is +// returned if no error was set. +func (b baseError) OrigErrs() []error { + return b.errs +} + +// So that the Error interface type can be included as an anonymous field +// in the requestError struct and not conflict with the error.Error() method. +type awsError Error + +// A requestError wraps a request or service error. +// +// Composed of baseError for code, message, and original error. +type requestError struct { + awsError + statusCode int + requestID string + bytes []byte +} + +// newRequestError returns a wrapped error with additional information for +// request status code, and service requestID. +// +// Should be used to wrap all request which involve service requests. Even if +// the request failed without a service response, but had an HTTP status code +// that may be meaningful. +// +// Also wraps original errors via the baseError. +func newRequestError(err Error, statusCode int, requestID string) *requestError { + return &requestError{ + awsError: err, + statusCode: statusCode, + requestID: requestID, + } +} + +// Error returns the string representation of the error. +// Satisfies the error interface. +func (r requestError) Error() string { + extra := fmt.Sprintf("status code: %d, request id: %s", + r.statusCode, r.requestID) + return SprintError(r.Code(), r.Message(), extra, r.OrigErr()) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (r requestError) String() string { + return r.Error() +} + +// StatusCode returns the wrapped status code for the error +func (r requestError) StatusCode() int { + return r.statusCode +} + +// RequestID returns the wrapped requestID +func (r requestError) RequestID() string { + return r.requestID +} + +// OrigErrs returns the original errors if one was set. An empty slice is +// returned if no error was set. +func (r requestError) OrigErrs() []error { + if b, ok := r.awsError.(BatchedErrors); ok { + return b.OrigErrs() + } + return []error{r.OrigErr()} +} + +type unmarshalError struct { + awsError + bytes []byte +} + +// Error returns the string representation of the error. +// Satisfies the error interface. +func (e unmarshalError) Error() string { + extra := hex.Dump(e.bytes) + return SprintError(e.Code(), e.Message(), extra, e.OrigErr()) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (e unmarshalError) String() string { + return e.Error() +} + +// Bytes returns the bytes that failed to unmarshal. +func (e unmarshalError) Bytes() []byte { + return e.bytes +} + +// An error list that satisfies the golang interface +type errorList []error + +// Error returns the string representation of the error. +// +// Satisfies the error interface. +func (e errorList) Error() string { + msg := "" + // How do we want to handle the array size being zero + if size := len(e); size > 0 { + for i := 0; i < size; i++ { + msg += e[i].Error() + // We check the next index to see if it is within the slice. + // If it is, then we append a newline. We do this, because unit tests + // could be broken with the additional '\n' + if i+1 < size { + msg += "\n" + } + } + } + return msg +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/awsutil/copy.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/awsutil/copy.go new file mode 100644 index 00000000000..1a3d106d5c1 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/awsutil/copy.go @@ -0,0 +1,108 @@ +package awsutil + +import ( + "io" + "reflect" + "time" +) + +// Copy deeply copies a src structure to dst. Useful for copying request and +// response structures. +// +// Can copy between structs of different type, but will only copy fields which +// are assignable, and exist in both structs. Fields which are not assignable, +// or do not exist in both structs are ignored. +func Copy(dst, src interface{}) { + dstval := reflect.ValueOf(dst) + if !dstval.IsValid() { + panic("Copy dst cannot be nil") + } + + rcopy(dstval, reflect.ValueOf(src), true) +} + +// CopyOf returns a copy of src while also allocating the memory for dst. +// src must be a pointer type or this operation will fail. +func CopyOf(src interface{}) (dst interface{}) { + dsti := reflect.New(reflect.TypeOf(src).Elem()) + dst = dsti.Interface() + rcopy(dsti, reflect.ValueOf(src), true) + return +} + +// rcopy performs a recursive copy of values from the source to destination. +// +// root is used to skip certain aspects of the copy which are not valid +// for the root node of a object. +func rcopy(dst, src reflect.Value, root bool) { + if !src.IsValid() { + return + } + + switch src.Kind() { + case reflect.Ptr: + if _, ok := src.Interface().(io.Reader); ok { + if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() { + dst.Elem().Set(src) + } else if dst.CanSet() { + dst.Set(src) + } + } else { + e := src.Type().Elem() + if dst.CanSet() && !src.IsNil() { + if _, ok := src.Interface().(*time.Time); !ok { + dst.Set(reflect.New(e)) + } else { + tempValue := reflect.New(e) + tempValue.Elem().Set(src.Elem()) + // Sets time.Time's unexported values + dst.Set(tempValue) + } + } + if src.Elem().IsValid() { + // Keep the current root state since the depth hasn't changed + rcopy(dst.Elem(), src.Elem(), root) + } + } + case reflect.Struct: + t := dst.Type() + for i := 0; i < t.NumField(); i++ { + name := t.Field(i).Name + srcVal := src.FieldByName(name) + dstVal := dst.FieldByName(name) + if srcVal.IsValid() && dstVal.CanSet() { + rcopy(dstVal, srcVal, false) + } + } + case reflect.Slice: + if src.IsNil() { + break + } + + s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap()) + dst.Set(s) + for i := 0; i < src.Len(); i++ { + rcopy(dst.Index(i), src.Index(i), false) + } + case reflect.Map: + if src.IsNil() { + break + } + + s := reflect.MakeMap(src.Type()) + dst.Set(s) + for _, k := range src.MapKeys() { + v := src.MapIndex(k) + v2 := reflect.New(v.Type()).Elem() + rcopy(v2, v, false) + dst.SetMapIndex(k, v2) + } + default: + // Assign the value if possible. If its not assignable, the value would + // need to be converted and the impact of that may be unexpected, or is + // not compatible with the dst type. + if src.Type().AssignableTo(dst.Type()) { + dst.Set(src) + } + } +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/awsutil/equal.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/awsutil/equal.go new file mode 100644 index 00000000000..142a7a01c52 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/awsutil/equal.go @@ -0,0 +1,27 @@ +package awsutil + +import ( + "reflect" +) + +// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual. +// In addition to this, this method will also dereference the input values if +// possible so the DeepEqual performed will not fail if one parameter is a +// pointer and the other is not. +// +// DeepEqual will not perform indirection of nested values of the input parameters. +func DeepEqual(a, b interface{}) bool { + ra := reflect.Indirect(reflect.ValueOf(a)) + rb := reflect.Indirect(reflect.ValueOf(b)) + + if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid { + // If the elements are both nil, and of the same type they are equal + // If they are of different types they are not equal + return reflect.TypeOf(a) == reflect.TypeOf(b) + } else if raValid != rbValid { + // Both values must be valid to be equal + return false + } + + return reflect.DeepEqual(ra.Interface(), rb.Interface()) +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/awsutil/path_value.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/awsutil/path_value.go new file mode 100644 index 00000000000..a4eb6a7f43a --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/awsutil/path_value.go @@ -0,0 +1,221 @@ +package awsutil + +import ( + "reflect" + "regexp" + "strconv" + "strings" + + "github.com/jmespath/go-jmespath" +) + +var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`) + +// rValuesAtPath returns a slice of values found in value v. The values +// in v are explored recursively so all nested values are collected. +func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value { + pathparts := strings.Split(path, "||") + if len(pathparts) > 1 { + for _, pathpart := range pathparts { + vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm) + if len(vals) > 0 { + return vals + } + } + return nil + } + + values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))} + components := strings.Split(path, ".") + for len(values) > 0 && len(components) > 0 { + var index *int64 + var indexStar bool + c := strings.TrimSpace(components[0]) + if c == "" { // no actual component, illegal syntax + return nil + } else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] { + // TODO normalize case for user + return nil // don't support unexported fields + } + + // parse this component + if m := indexRe.FindStringSubmatch(c); m != nil { + c = m[1] + if m[2] == "" { + index = nil + indexStar = true + } else { + i, _ := strconv.ParseInt(m[2], 10, 32) + index = &i + indexStar = false + } + } + + nextvals := []reflect.Value{} + for _, value := range values { + // pull component name out of struct member + if value.Kind() != reflect.Struct { + continue + } + + if c == "*" { // pull all members + for i := 0; i < value.NumField(); i++ { + if f := reflect.Indirect(value.Field(i)); f.IsValid() { + nextvals = append(nextvals, f) + } + } + continue + } + + value = value.FieldByNameFunc(func(name string) bool { + if c == name { + return true + } else if !caseSensitive && strings.EqualFold(name, c) { + return true + } + return false + }) + + if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 { + if !value.IsNil() { + value.Set(reflect.Zero(value.Type())) + } + return []reflect.Value{value} + } + + if createPath && value.Kind() == reflect.Ptr && value.IsNil() { + // TODO if the value is the terminus it should not be created + // if the value to be set to its position is nil. + value.Set(reflect.New(value.Type().Elem())) + value = value.Elem() + } else { + value = reflect.Indirect(value) + } + + if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { + if !createPath && value.IsNil() { + value = reflect.ValueOf(nil) + } + } + + if value.IsValid() { + nextvals = append(nextvals, value) + } + } + values = nextvals + + if indexStar || index != nil { + nextvals = []reflect.Value{} + for _, valItem := range values { + value := reflect.Indirect(valItem) + if value.Kind() != reflect.Slice { + continue + } + + if indexStar { // grab all indices + for i := 0; i < value.Len(); i++ { + idx := reflect.Indirect(value.Index(i)) + if idx.IsValid() { + nextvals = append(nextvals, idx) + } + } + continue + } + + // pull out index + i := int(*index) + if i >= value.Len() { // check out of bounds + if createPath { + // TODO resize slice + } else { + continue + } + } else if i < 0 { // support negative indexing + i = value.Len() + i + } + value = reflect.Indirect(value.Index(i)) + + if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { + if !createPath && value.IsNil() { + value = reflect.ValueOf(nil) + } + } + + if value.IsValid() { + nextvals = append(nextvals, value) + } + } + values = nextvals + } + + components = components[1:] + } + return values +} + +// ValuesAtPath returns a list of values at the case insensitive lexical +// path inside of a structure. +func ValuesAtPath(i interface{}, path string) ([]interface{}, error) { + result, err := jmespath.Search(path, i) + if err != nil { + return nil, err + } + + v := reflect.ValueOf(result) + if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) { + return nil, nil + } + if s, ok := result.([]interface{}); ok { + return s, err + } + if v.Kind() == reflect.Map && v.Len() == 0 { + return nil, nil + } + if v.Kind() == reflect.Slice { + out := make([]interface{}, v.Len()) + for i := 0; i < v.Len(); i++ { + out[i] = v.Index(i).Interface() + } + return out, nil + } + + return []interface{}{result}, nil +} + +// SetValueAtPath sets a value at the case insensitive lexical path inside +// of a structure. +func SetValueAtPath(i interface{}, path string, v interface{}) { + rvals := rValuesAtPath(i, path, true, false, v == nil) + for _, rval := range rvals { + if rval.Kind() == reflect.Ptr && rval.IsNil() { + continue + } + setValue(rval, v) + } +} + +func setValue(dstVal reflect.Value, src interface{}) { + if dstVal.Kind() == reflect.Ptr { + dstVal = reflect.Indirect(dstVal) + } + srcVal := reflect.ValueOf(src) + + if !srcVal.IsValid() { // src is literal nil + if dstVal.CanAddr() { + // Convert to pointer so that pointer's value can be nil'ed + // dstVal = dstVal.Addr() + } + dstVal.Set(reflect.Zero(dstVal.Type())) + + } else if srcVal.Kind() == reflect.Ptr { + if srcVal.IsNil() { + srcVal = reflect.Zero(dstVal.Type()) + } else { + srcVal = reflect.ValueOf(src).Elem() + } + dstVal.Set(srcVal) + } else { + dstVal.Set(srcVal) + } + +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/awsutil/prettify.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/awsutil/prettify.go new file mode 100644 index 00000000000..710eb432f85 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/awsutil/prettify.go @@ -0,0 +1,113 @@ +package awsutil + +import ( + "bytes" + "fmt" + "io" + "reflect" + "strings" +) + +// Prettify returns the string representation of a value. +func Prettify(i interface{}) string { + var buf bytes.Buffer + prettify(reflect.ValueOf(i), 0, &buf) + return buf.String() +} + +// prettify will recursively walk value v to build a textual +// representation of the value. +func prettify(v reflect.Value, indent int, buf *bytes.Buffer) { + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + strtype := v.Type().String() + if strtype == "time.Time" { + fmt.Fprintf(buf, "%s", v.Interface()) + break + } else if strings.HasPrefix(strtype, "io.") { + buf.WriteString("") + break + } + + buf.WriteString("{\n") + + names := []string{} + for i := 0; i < v.Type().NumField(); i++ { + name := v.Type().Field(i).Name + f := v.Field(i) + if name[0:1] == strings.ToLower(name[0:1]) { + continue // ignore unexported fields + } + if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() { + continue // ignore unset fields + } + names = append(names, name) + } + + for i, n := range names { + val := v.FieldByName(n) + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(n + ": ") + prettify(val, indent+2, buf) + + if i < len(names)-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + case reflect.Slice: + strtype := v.Type().String() + if strtype == "[]uint8" { + fmt.Fprintf(buf, " len %d", v.Len()) + break + } + + nl, id, id2 := "", "", "" + if v.Len() > 3 { + nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) + } + buf.WriteString("[" + nl) + for i := 0; i < v.Len(); i++ { + buf.WriteString(id2) + prettify(v.Index(i), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString("," + nl) + } + } + + buf.WriteString(nl + id + "]") + case reflect.Map: + buf.WriteString("{\n") + + for i, k := range v.MapKeys() { + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(k.String() + ": ") + prettify(v.MapIndex(k), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + default: + if !v.IsValid() { + fmt.Fprint(buf, "") + return + } + format := "%v" + switch v.Interface().(type) { + case string: + format = "%q" + case io.ReadSeeker, io.Reader: + format = "buffer(%p)" + } + fmt.Fprintf(buf, format, v.Interface()) + } +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/awsutil/string_value.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/awsutil/string_value.go new file mode 100644 index 00000000000..645df2450fc --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/awsutil/string_value.go @@ -0,0 +1,88 @@ +package awsutil + +import ( + "bytes" + "fmt" + "reflect" + "strings" +) + +// StringValue returns the string representation of a value. +func StringValue(i interface{}) string { + var buf bytes.Buffer + stringValue(reflect.ValueOf(i), 0, &buf) + return buf.String() +} + +func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) { + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + buf.WriteString("{\n") + + for i := 0; i < v.Type().NumField(); i++ { + ft := v.Type().Field(i) + fv := v.Field(i) + + if ft.Name[0:1] == strings.ToLower(ft.Name[0:1]) { + continue // ignore unexported fields + } + if (fv.Kind() == reflect.Ptr || fv.Kind() == reflect.Slice) && fv.IsNil() { + continue // ignore unset fields + } + + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(ft.Name + ": ") + + if tag := ft.Tag.Get("sensitive"); tag == "true" { + buf.WriteString("") + } else { + stringValue(fv, indent+2, buf) + } + + buf.WriteString(",\n") + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + case reflect.Slice: + nl, id, id2 := "", "", "" + if v.Len() > 3 { + nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) + } + buf.WriteString("[" + nl) + for i := 0; i < v.Len(); i++ { + buf.WriteString(id2) + stringValue(v.Index(i), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString("," + nl) + } + } + + buf.WriteString(nl + id + "]") + case reflect.Map: + buf.WriteString("{\n") + + for i, k := range v.MapKeys() { + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(k.String() + ": ") + stringValue(v.MapIndex(k), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + default: + format := "%v" + switch v.Interface().(type) { + case string: + format = "%q" + } + fmt.Fprintf(buf, format, v.Interface()) + } +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/client/client.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/client/client.go new file mode 100644 index 00000000000..049ad80f5f6 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/client/client.go @@ -0,0 +1,93 @@ +package client + +import ( + "fmt" + + "github.com/IBM/ibm-cos-sdk-go/aws" + "github.com/IBM/ibm-cos-sdk-go/aws/client/metadata" + "github.com/IBM/ibm-cos-sdk-go/aws/request" +) + +// A Config provides configuration to a service client instance. +type Config struct { + Config *aws.Config + Handlers request.Handlers + PartitionID string + Endpoint string + SigningRegion string + SigningName string + + // States that the signing name did not come from a modeled source but + // was derived based on other data. Used by service client constructors + // to determine if the signin name can be overridden based on metadata the + // service has. + SigningNameDerived bool +} + +// ConfigProvider provides a generic way for a service client to receive +// the ClientConfig without circular dependencies. +type ConfigProvider interface { + ClientConfig(serviceName string, cfgs ...*aws.Config) Config +} + +// ConfigNoResolveEndpointProvider same as ConfigProvider except it will not +// resolve the endpoint automatically. The service client's endpoint must be +// provided via the aws.Config.Endpoint field. +type ConfigNoResolveEndpointProvider interface { + ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) Config +} + +// A Client implements the base client request and response handling +// used by all service clients. +type Client struct { + request.Retryer + metadata.ClientInfo + + Config aws.Config + Handlers request.Handlers +} + +// New will return a pointer to a new initialized service client. +func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, options ...func(*Client)) *Client { + svc := &Client{ + Config: cfg, + ClientInfo: info, + Handlers: handlers.Copy(), + } + + switch retryer, ok := cfg.Retryer.(request.Retryer); { + case ok: + svc.Retryer = retryer + case cfg.Retryer != nil && cfg.Logger != nil: + s := fmt.Sprintf("WARNING: %T does not implement request.Retryer; using DefaultRetryer instead", cfg.Retryer) + cfg.Logger.Log(s) + fallthrough + default: + maxRetries := aws.IntValue(cfg.MaxRetries) + if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries { + maxRetries = DefaultRetryerMaxNumRetries + } + svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries} + } + + svc.AddDebugHandlers() + + for _, option := range options { + option(svc) + } + + return svc +} + +// NewRequest returns a new Request pointer for the service API +// operation and parameters. +func (c *Client) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request { + return request.New(c.Config, c.ClientInfo, c.Handlers, c.Retryer, operation, params, data) +} + +// AddDebugHandlers injects debug logging handlers into the service to log request +// debug information. +func (c *Client) AddDebugHandlers() { + c.Handlers.Send.PushFrontNamed(LogHTTPRequestHandler) + c.Handlers.Send.PushBackNamed(LogHTTPResponseHandler) +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/client/default_retryer.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/client/default_retryer.go new file mode 100644 index 00000000000..9d9ef8921e6 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/client/default_retryer.go @@ -0,0 +1,177 @@ +package client + +import ( + "math" + "strconv" + "time" + + "github.com/IBM/ibm-cos-sdk-go/aws/request" + "github.com/IBM/ibm-cos-sdk-go/internal/sdkrand" +) + +// DefaultRetryer implements basic retry logic using exponential backoff for +// most services. If you want to implement custom retry logic, you can implement the +// request.Retryer interface. +// +type DefaultRetryer struct { + // Num max Retries is the number of max retries that will be performed. + // By default, this is zero. + NumMaxRetries int + + // MinRetryDelay is the minimum retry delay after which retry will be performed. + // If not set, the value is 0ns. + MinRetryDelay time.Duration + + // MinThrottleRetryDelay is the minimum retry delay when throttled. + // If not set, the value is 0ns. + MinThrottleDelay time.Duration + + // MaxRetryDelay is the maximum retry delay before which retry must be performed. + // If not set, the value is 0ns. + MaxRetryDelay time.Duration + + // MaxThrottleDelay is the maximum retry delay when throttled. + // If not set, the value is 0ns. + MaxThrottleDelay time.Duration +} + +const ( + // DefaultRetryerMaxNumRetries sets maximum number of retries + DefaultRetryerMaxNumRetries = 3 + + // DefaultRetryerMinRetryDelay sets minimum retry delay + DefaultRetryerMinRetryDelay = 30 * time.Millisecond + + // DefaultRetryerMinThrottleDelay sets minimum delay when throttled + DefaultRetryerMinThrottleDelay = 500 * time.Millisecond + + // DefaultRetryerMaxRetryDelay sets maximum retry delay + DefaultRetryerMaxRetryDelay = 300 * time.Second + + // DefaultRetryerMaxThrottleDelay sets maximum delay when throttled + DefaultRetryerMaxThrottleDelay = 300 * time.Second +) + +// MaxRetries returns the number of maximum returns the service will use to make +// an individual API request. +func (d DefaultRetryer) MaxRetries() int { + return d.NumMaxRetries +} + +// setRetryerDefaults sets the default values of the retryer if not set +func (d *DefaultRetryer) setRetryerDefaults() { + if d.MinRetryDelay == 0 { + d.MinRetryDelay = DefaultRetryerMinRetryDelay + } + if d.MaxRetryDelay == 0 { + d.MaxRetryDelay = DefaultRetryerMaxRetryDelay + } + if d.MinThrottleDelay == 0 { + d.MinThrottleDelay = DefaultRetryerMinThrottleDelay + } + if d.MaxThrottleDelay == 0 { + d.MaxThrottleDelay = DefaultRetryerMaxThrottleDelay + } +} + +// RetryRules returns the delay duration before retrying this request again +func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration { + + // if number of max retries is zero, no retries will be performed. + if d.NumMaxRetries == 0 { + return 0 + } + + // Sets default value for retryer members + d.setRetryerDefaults() + + // minDelay is the minimum retryer delay + minDelay := d.MinRetryDelay + + var initialDelay time.Duration + + isThrottle := r.IsErrorThrottle() + if isThrottle { + if delay, ok := getRetryAfterDelay(r); ok { + initialDelay = delay + } + minDelay = d.MinThrottleDelay + } + + retryCount := r.RetryCount + + // maxDelay the maximum retryer delay + maxDelay := d.MaxRetryDelay + + if isThrottle { + maxDelay = d.MaxThrottleDelay + } + + var delay time.Duration + + // Logic to cap the retry count based on the minDelay provided + actualRetryCount := int(math.Log2(float64(minDelay))) + 1 + if actualRetryCount < 63-retryCount { + delay = time.Duration(1< maxDelay { + delay = getJitterDelay(maxDelay / 2) + } + } else { + delay = getJitterDelay(maxDelay / 2) + } + return delay + initialDelay +} + +// getJitterDelay returns a jittered delay for retry +func getJitterDelay(duration time.Duration) time.Duration { + return time.Duration(sdkrand.SeededRand.Int63n(int64(duration)) + int64(duration)) +} + +// ShouldRetry returns true if the request should be retried. +func (d DefaultRetryer) ShouldRetry(r *request.Request) bool { + + // ShouldRetry returns false if number of max retries is 0. + if d.NumMaxRetries == 0 { + return false + } + + // If one of the other handlers already set the retry state + // we don't want to override it based on the service's state + if r.Retryable != nil { + return *r.Retryable + } + return r.IsErrorRetryable() || r.IsErrorThrottle() +} + +// This will look in the Retry-After header, RFC 7231, for how long +// it will wait before attempting another request +func getRetryAfterDelay(r *request.Request) (time.Duration, bool) { + if !canUseRetryAfterHeader(r) { + return 0, false + } + + delayStr := r.HTTPResponse.Header.Get("Retry-After") + if len(delayStr) == 0 { + return 0, false + } + + delay, err := strconv.Atoi(delayStr) + if err != nil { + return 0, false + } + + return time.Duration(delay) * time.Second, true +} + +// Will look at the status code to see if the retry header pertains to +// the status code. +func canUseRetryAfterHeader(r *request.Request) bool { + switch r.HTTPResponse.StatusCode { + case 429: + case 503: + default: + return false + } + + return true +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/client/logger.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/client/logger.go new file mode 100644 index 00000000000..9539941b213 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/client/logger.go @@ -0,0 +1,202 @@ +package client + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http/httputil" + + "github.com/IBM/ibm-cos-sdk-go/aws" + "github.com/IBM/ibm-cos-sdk-go/aws/request" +) + +const logReqMsg = `DEBUG: Request %s/%s Details: +---[ REQUEST POST-SIGN ]----------------------------- +%s +-----------------------------------------------------` + +const logReqErrMsg = `DEBUG ERROR: Request %s/%s: +---[ REQUEST DUMP ERROR ]----------------------------- +%s +------------------------------------------------------` + +type logWriter struct { + // Logger is what we will use to log the payload of a response. + Logger aws.Logger + // buf stores the contents of what has been read + buf *bytes.Buffer +} + +func (logger *logWriter) Write(b []byte) (int, error) { + return logger.buf.Write(b) +} + +type teeReaderCloser struct { + // io.Reader will be a tee reader that is used during logging. + // This structure will read from a body and write the contents to a logger. + io.Reader + // Source is used just to close when we are done reading. + Source io.ReadCloser +} + +func (reader *teeReaderCloser) Close() error { + return reader.Source.Close() +} + +// LogHTTPRequestHandler is a SDK request handler to log the HTTP request sent +// to a service. Will include the HTTP request body if the LogLevel of the +// request matches LogDebugWithHTTPBody. +var LogHTTPRequestHandler = request.NamedHandler{ + Name: "awssdk.client.LogRequest", + Fn: logRequest, +} + +func logRequest(r *request.Request) { + if !r.Config.LogLevel.AtLeast(aws.LogDebug) { + return + } + + logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) + bodySeekable := aws.IsReaderSeekable(r.Body) + + b, err := httputil.DumpRequestOut(r.HTTPRequest, logBody) + if err != nil { + r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + + if logBody { + if !bodySeekable { + r.SetReaderBody(aws.ReadSeekCloser(r.HTTPRequest.Body)) + } + // Reset the request body because dumpRequest will re-wrap the + // r.HTTPRequest's Body as a NoOpCloser and will not be reset after + // read by the HTTP client reader. + if err := r.Error; err != nil { + r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + } + + r.Config.Logger.Log(fmt.Sprintf(logReqMsg, + r.ClientInfo.ServiceName, r.Operation.Name, string(b))) +} + +// LogHTTPRequestHeaderHandler is a SDK request handler to log the HTTP request sent +// to a service. Will only log the HTTP request's headers. The request payload +// will not be read. +var LogHTTPRequestHeaderHandler = request.NamedHandler{ + Name: "awssdk.client.LogRequestHeader", + Fn: logRequestHeader, +} + +func logRequestHeader(r *request.Request) { + b, err := httputil.DumpRequestOut(r.HTTPRequest, false) + if err != nil { + r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + + r.Config.Logger.Log(fmt.Sprintf(logReqMsg, + r.ClientInfo.ServiceName, r.Operation.Name, string(b))) +} + +const logRespMsg = `DEBUG: Response %s/%s Details: +---[ RESPONSE ]-------------------------------------- +%s +-----------------------------------------------------` + +const logRespErrMsg = `DEBUG ERROR: Response %s/%s: +---[ RESPONSE DUMP ERROR ]----------------------------- +%s +-----------------------------------------------------` + +// LogHTTPResponseHandler is a SDK request handler to log the HTTP response +// received from a service. Will include the HTTP response body if the LogLevel +// of the request matches LogDebugWithHTTPBody. +var LogHTTPResponseHandler = request.NamedHandler{ + Name: "awssdk.client.LogResponse", + Fn: logResponse, +} + +func logResponse(r *request.Request) { + if !r.Config.LogLevel.AtLeast(aws.LogDebug) { + return + } + + lw := &logWriter{r.Config.Logger, bytes.NewBuffer(nil)} + + if r.HTTPResponse == nil { + lw.Logger.Log(fmt.Sprintf(logRespErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, "request's HTTPResponse is nil")) + return + } + + logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) + if logBody { + r.HTTPResponse.Body = &teeReaderCloser{ + Reader: io.TeeReader(r.HTTPResponse.Body, lw), + Source: r.HTTPResponse.Body, + } + } + + handlerFn := func(req *request.Request) { + b, err := httputil.DumpResponse(req.HTTPResponse, false) + if err != nil { + lw.Logger.Log(fmt.Sprintf(logRespErrMsg, + req.ClientInfo.ServiceName, req.Operation.Name, err)) + return + } + + lw.Logger.Log(fmt.Sprintf(logRespMsg, + req.ClientInfo.ServiceName, req.Operation.Name, string(b))) + + if logBody { + b, err := ioutil.ReadAll(lw.buf) + if err != nil { + lw.Logger.Log(fmt.Sprintf(logRespErrMsg, + req.ClientInfo.ServiceName, req.Operation.Name, err)) + return + } + + lw.Logger.Log(string(b)) + } + } + + const handlerName = "awsdk.client.LogResponse.ResponseBody" + + r.Handlers.Unmarshal.SetBackNamed(request.NamedHandler{ + Name: handlerName, Fn: handlerFn, + }) + r.Handlers.UnmarshalError.SetBackNamed(request.NamedHandler{ + Name: handlerName, Fn: handlerFn, + }) +} + +// LogHTTPResponseHeaderHandler is a SDK request handler to log the HTTP +// response received from a service. Will only log the HTTP response's headers. +// The response payload will not be read. +var LogHTTPResponseHeaderHandler = request.NamedHandler{ + Name: "awssdk.client.LogResponseHeader", + Fn: logResponseHeader, +} + +func logResponseHeader(r *request.Request) { + if r.Config.Logger == nil { + return + } + + b, err := httputil.DumpResponse(r.HTTPResponse, false) + if err != nil { + r.Config.Logger.Log(fmt.Sprintf(logRespErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + + r.Config.Logger.Log(fmt.Sprintf(logRespMsg, + r.ClientInfo.ServiceName, r.Operation.Name, string(b))) +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/client/metadata/client_info.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/client/metadata/client_info.go new file mode 100644 index 00000000000..0c48f72e08e --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/client/metadata/client_info.go @@ -0,0 +1,14 @@ +package metadata + +// ClientInfo wraps immutable data from the client.Client structure. +type ClientInfo struct { + ServiceName string + ServiceID string + APIVersion string + PartitionID string + Endpoint string + SigningName string + SigningRegion string + JSONVersion string + TargetPrefix string +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/client/no_op_retryer.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/client/no_op_retryer.go new file mode 100644 index 00000000000..8c8ac064791 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/client/no_op_retryer.go @@ -0,0 +1,28 @@ +package client + +import ( + "time" + + "github.com/IBM/ibm-cos-sdk-go/aws/request" +) + +// NoOpRetryer provides a retryer that performs no retries. +// It should be used when we do not want retries to be performed. +type NoOpRetryer struct{} + +// MaxRetries returns the number of maximum returns the service will use to make +// an individual API; For NoOpRetryer the MaxRetries will always be zero. +func (d NoOpRetryer) MaxRetries() int { + return 0 +} + +// ShouldRetry will always return false for NoOpRetryer, as it should never retry. +func (d NoOpRetryer) ShouldRetry(_ *request.Request) bool { + return false +} + +// RetryRules returns the delay duration before retrying this request again; +// since NoOpRetryer does not retry, RetryRules always returns 0. +func (d NoOpRetryer) RetryRules(_ *request.Request) time.Duration { + return 0 +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/config.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/config.go new file mode 100644 index 00000000000..cca61248ea8 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/config.go @@ -0,0 +1,591 @@ +package aws + +import ( + "net/http" + "time" + + "github.com/IBM/ibm-cos-sdk-go/aws/credentials" + "github.com/IBM/ibm-cos-sdk-go/aws/endpoints" +) + +// UseServiceDefaultRetries instructs the config to use the service's own +// default number of retries. This will be the default action if +// Config.MaxRetries is nil also. +const UseServiceDefaultRetries = -1 + +// RequestRetryer is an alias for a type that implements the request.Retryer +// interface. +type RequestRetryer interface{} + +// A Config provides service configuration for service clients. By default, +// all clients will use the defaults.DefaultConfig structure. +// +// // Create Session with MaxRetries configuration to be shared by multiple +// // service clients. +// sess := session.Must(session.NewSession(&aws.Config{ +// MaxRetries: aws.Int(3), +// })) +// +// // Create S3 service client with a specific Region. +// svc := s3.New(sess, &aws.Config{ +// Region: aws.String("us-west-2"), +// }) +type Config struct { + // Enables verbose error printing of all credential chain errors. + // Should be used when wanting to see all errors while attempting to + // retrieve credentials. + CredentialsChainVerboseErrors *bool + + // The credentials object to use when signing requests. Defaults to a + // chain of credential providers to search for credentials in environment + // variables, shared credential file, and EC2 Instance Roles. + Credentials *credentials.Credentials + + // An optional endpoint URL (hostname only or fully qualified URI) + // that overrides the default generated endpoint for a client. Set this + // to `nil` or the value to `""` to use the default generated endpoint. + // + // Note: You must still provide a `Region` value when specifying an + // endpoint for a client. + Endpoint *string + + // The resolver to use for looking up endpoints for AWS service clients + // to use based on region. + EndpointResolver endpoints.Resolver + + // EnforceShouldRetryCheck is used in the AfterRetryHandler to always call + // ShouldRetry regardless of whether or not if request.Retryable is set. + // This will utilize ShouldRetry method of custom retryers. If EnforceShouldRetryCheck + // is not set, then ShouldRetry will only be called if request.Retryable is nil. + // Proper handling of the request.Retryable field is important when setting this field. + EnforceShouldRetryCheck *bool + + // The region to send requests to. This parameter is required and must + // be configured globally or on a per-client basis unless otherwise + // noted. A full list of regions is found in the "Regions and Endpoints" + // document. + // + // See http://docs.aws.amazon.com/general/latest/gr/rande.html for AWS + // Regions and Endpoints. + Region *string + + // Set this to `true` to disable SSL when sending requests. Defaults + // to `false`. + DisableSSL *bool + + // The HTTP client to use when sending requests. Defaults to + // `http.DefaultClient`. + HTTPClient *http.Client + + // An integer value representing the logging level. The default log level + // is zero (LogOff), which represents no logging. To enable logging set + // to a LogLevel Value. + LogLevel *LogLevelType + + // The logger writer interface to write logging messages to. Defaults to + // standard out. + Logger Logger + + // The maximum number of times that a request will be retried for failures. + // Defaults to -1, which defers the max retry setting to the service + // specific configuration. + MaxRetries *int + + // Retryer guides how HTTP requests should be retried in case of + // recoverable failures. + // + // When nil or the value does not implement the request.Retryer interface, + // the client.DefaultRetryer will be used. + // + // When both Retryer and MaxRetries are non-nil, the former is used and + // the latter ignored. + // + // To set the Retryer field in a type-safe manner and with chaining, use + // the request.WithRetryer helper function: + // + // cfg := request.WithRetryer(aws.NewConfig(), myRetryer) + // + Retryer RequestRetryer + + // Disables semantic parameter validation, which validates input for + // missing required fields and/or other semantic request input errors. + DisableParamValidation *bool + + // Disables the computation of request and response checksums, e.g., + // CRC32 checksums in Amazon DynamoDB. + DisableComputeChecksums *bool + + // Set this to `true` to force the request to use path-style addressing, + // i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client + // will use virtual hosted bucket addressing when possible + // (`http://BUCKET.s3.amazonaws.com/KEY`). + // + // Note: This configuration option is specific to the Amazon S3 service. + // + // See http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html + // for Amazon S3: Virtual Hosting of Buckets + S3ForcePathStyle *bool + + // Set this to `true` to disable the SDK adding the `Expect: 100-Continue` + // header to PUT requests over 2MB of content. 100-Continue instructs the + // HTTP client not to send the body until the service responds with a + // `continue` status. This is useful to prevent sending the request body + // until after the request is authenticated, and validated. + // + // http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html + // + // 100-Continue is only enabled for Go 1.6 and above. See `http.Transport`'s + // `ExpectContinueTimeout` for information on adjusting the continue wait + // timeout. https://golang.org/pkg/net/http/#Transport + // + // You should use this flag to disable 100-Continue if you experience issues + // with proxies or third party S3 compatible services. + S3Disable100Continue *bool + + // Set this to `true` to enable S3 Accelerate feature. For all operations + // compatible with S3 Accelerate will use the accelerate endpoint for + // requests. Requests not compatible will fall back to normal S3 requests. + // + // The bucket must be enable for accelerate to be used with S3 client with + // accelerate enabled. If the bucket is not enabled for accelerate an error + // will be returned. The bucket name must be DNS compatible to also work + // with accelerate. + S3UseAccelerate *bool + + // S3DisableContentMD5Validation config option is temporarily disabled, + // For S3 GetObject API calls, #1837. + // + // Set this to `true` to disable the S3 service client from automatically + // adding the ContentMD5 to S3 Object Put and Upload API calls. This option + // will also disable the SDK from performing object ContentMD5 validation + // on GetObject API calls. + S3DisableContentMD5Validation *bool + + // Set this to `true` to have the S3 service client to use the region specified + // in the ARN, when an ARN is provided as an argument to a bucket parameter. + S3UseARNRegion *bool + + // Set this to `true` to enable the SDK to unmarshal API response header maps to + // normalized lower case map keys. + // + // For example S3's X-Amz-Meta prefixed header will be unmarshaled to lower case + // Metadata member's map keys. The value of the header in the map is unaffected. + LowerCaseHeaderMaps *bool + + // Set this to `true` to disable the EC2Metadata client from overriding the + // default http.Client's Timeout. This is helpful if you do not want the + // EC2Metadata client to create a new http.Client. This options is only + // meaningful if you're not already using a custom HTTP client with the + // SDK. Enabled by default. + // + // Must be set and provided to the session.NewSession() in order to disable + // the EC2Metadata overriding the timeout for default credentials chain. + // + // Example: + // sess := session.Must(session.NewSession(aws.NewConfig() + // .WithEC2MetadataDisableTimeoutOverride(true))) + // + // svc := s3.New(sess) + // + EC2MetadataDisableTimeoutOverride *bool + + // Instructs the endpoint to be generated for a service client to + // be the dual stack endpoint. The dual stack endpoint will support + // both IPv4 and IPv6 addressing. + // + // Setting this for a service which does not support dual stack will fail + // to make requests. It is not recommended to set this value on the session + // as it will apply to all service clients created with the session. Even + // services which don't support dual stack endpoints. + // + // If the Endpoint config value is also provided the UseDualStack flag + // will be ignored. + // + // Only supported with. + // + // sess := session.Must(session.NewSession()) + // + // svc := s3.New(sess, &aws.Config{ + // UseDualStack: aws.Bool(true), + // }) + UseDualStack *bool + + // SleepDelay is an override for the func the SDK will call when sleeping + // during the lifecycle of a request. Specifically this will be used for + // request delays. This value should only be used for testing. To adjust + // the delay of a request see the aws/client.DefaultRetryer and + // aws/request.Retryer. + // + // SleepDelay will prevent any Context from being used for canceling retry + // delay of an API operation. It is recommended to not use SleepDelay at all + // and specify a Retryer instead. + SleepDelay func(time.Duration) + + // DisableRestProtocolURICleaning will not clean the URL path when making rest protocol requests. + // Will default to false. This would only be used for empty directory names in s3 requests. + // + // Example: + // sess := session.Must(session.NewSession(&aws.Config{ + // DisableRestProtocolURICleaning: aws.Bool(true), + // })) + // + // svc := s3.New(sess) + // out, err := svc.GetObject(&s3.GetObjectInput { + // Bucket: aws.String("bucketname"), + // Key: aws.String("//foo//bar//moo"), + // }) + DisableRestProtocolURICleaning *bool + + // EnableEndpointDiscovery will allow for endpoint discovery on operations that + // have the definition in its model. By default, endpoint discovery is off. + // To use EndpointDiscovery, Endpoint should be unset or set to an empty string. + // + // Example: + // sess := session.Must(session.NewSession(&aws.Config{ + // EnableEndpointDiscovery: aws.Bool(true), + // })) + // + // svc := s3.New(sess) + // out, err := svc.GetObject(&s3.GetObjectInput { + // Bucket: aws.String("bucketname"), + // Key: aws.String("/foo/bar/moo"), + // }) + EnableEndpointDiscovery *bool + + // DisableEndpointHostPrefix will disable the SDK's behavior of prefixing + // request endpoint hosts with modeled information. + // + // Disabling this feature is useful when you want to use local endpoints + // for testing that do not support the modeled host prefix pattern. + DisableEndpointHostPrefix *bool + + // S3UsEast1RegionalEndpoint will enable regional or legacy endpoint resolving + S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint +} + +// NewConfig returns a new Config pointer that can be chained with builder +// methods to set multiple configuration values inline without using pointers. +// +// // Create Session with MaxRetries configuration to be shared by multiple +// // service clients. +// sess := session.Must(session.NewSession(aws.NewConfig(). +// WithMaxRetries(3), +// )) +// +// // Create S3 service client with a specific Region. +// svc := s3.New(sess, aws.NewConfig(). +// WithRegion("us-west-2"), +// ) +func NewConfig() *Config { + return &Config{} +} + +// WithCredentialsChainVerboseErrors sets a config verbose errors boolean and returning +// a Config pointer. +func (c *Config) WithCredentialsChainVerboseErrors(verboseErrs bool) *Config { + c.CredentialsChainVerboseErrors = &verboseErrs + return c +} + +// WithCredentials sets a config Credentials value returning a Config pointer +// for chaining. +func (c *Config) WithCredentials(creds *credentials.Credentials) *Config { + c.Credentials = creds + return c +} + +// WithEndpoint sets a config Endpoint value returning a Config pointer for +// chaining. +func (c *Config) WithEndpoint(endpoint string) *Config { + c.Endpoint = &endpoint + return c +} + +// WithEndpointResolver sets a config EndpointResolver value returning a +// Config pointer for chaining. +func (c *Config) WithEndpointResolver(resolver endpoints.Resolver) *Config { + c.EndpointResolver = resolver + return c +} + +// WithRegion sets a config Region value returning a Config pointer for +// chaining. +func (c *Config) WithRegion(region string) *Config { + c.Region = ®ion + return c +} + +// WithDisableSSL sets a config DisableSSL value returning a Config pointer +// for chaining. +func (c *Config) WithDisableSSL(disable bool) *Config { + c.DisableSSL = &disable + return c +} + +// WithHTTPClient sets a config HTTPClient value returning a Config pointer +// for chaining. +func (c *Config) WithHTTPClient(client *http.Client) *Config { + c.HTTPClient = client + return c +} + +// WithMaxRetries sets a config MaxRetries value returning a Config pointer +// for chaining. +func (c *Config) WithMaxRetries(max int) *Config { + c.MaxRetries = &max + return c +} + +// WithDisableParamValidation sets a config DisableParamValidation value +// returning a Config pointer for chaining. +func (c *Config) WithDisableParamValidation(disable bool) *Config { + c.DisableParamValidation = &disable + return c +} + +// WithDisableComputeChecksums sets a config DisableComputeChecksums value +// returning a Config pointer for chaining. +func (c *Config) WithDisableComputeChecksums(disable bool) *Config { + c.DisableComputeChecksums = &disable + return c +} + +// WithLogLevel sets a config LogLevel value returning a Config pointer for +// chaining. +func (c *Config) WithLogLevel(level LogLevelType) *Config { + c.LogLevel = &level + return c +} + +// WithLogger sets a config Logger value returning a Config pointer for +// chaining. +func (c *Config) WithLogger(logger Logger) *Config { + c.Logger = logger + return c +} + +// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config +// pointer for chaining. +func (c *Config) WithS3ForcePathStyle(force bool) *Config { + c.S3ForcePathStyle = &force + return c +} + +// WithS3Disable100Continue sets a config S3Disable100Continue value returning +// a Config pointer for chaining. +func (c *Config) WithS3Disable100Continue(disable bool) *Config { + c.S3Disable100Continue = &disable + return c +} + +// WithS3UseAccelerate sets a config S3UseAccelerate value returning a Config +// pointer for chaining. +func (c *Config) WithS3UseAccelerate(enable bool) *Config { + c.S3UseAccelerate = &enable + return c + +} + +// WithS3DisableContentMD5Validation sets a config +// S3DisableContentMD5Validation value returning a Config pointer for chaining. +func (c *Config) WithS3DisableContentMD5Validation(enable bool) *Config { + c.S3DisableContentMD5Validation = &enable + return c + +} + +// WithS3UseARNRegion sets a config S3UseARNRegion value and +// returning a Config pointer for chaining +func (c *Config) WithS3UseARNRegion(enable bool) *Config { + c.S3UseARNRegion = &enable + return c +} + +// WithUseDualStack sets a config UseDualStack value returning a Config +// pointer for chaining. +func (c *Config) WithUseDualStack(enable bool) *Config { + c.UseDualStack = &enable + return c +} + +// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value +// returning a Config pointer for chaining. +func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config { + c.EC2MetadataDisableTimeoutOverride = &enable + return c +} + +// WithSleepDelay overrides the function used to sleep while waiting for the +// next retry. Defaults to time.Sleep. +func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config { + c.SleepDelay = fn + return c +} + +// WithEndpointDiscovery will set whether or not to use endpoint discovery. +func (c *Config) WithEndpointDiscovery(t bool) *Config { + c.EnableEndpointDiscovery = &t + return c +} + +// WithDisableEndpointHostPrefix will set whether or not to use modeled host prefix +// when making requests. +func (c *Config) WithDisableEndpointHostPrefix(t bool) *Config { + c.DisableEndpointHostPrefix = &t + return c +} + +// WithS3UsEast1RegionalEndpoint will set whether or not to use regional endpoint flag +// when resolving the endpoint for a service +func (c *Config) WithS3UsEast1RegionalEndpoint(sre endpoints.S3UsEast1RegionalEndpoint) *Config { + c.S3UsEast1RegionalEndpoint = sre + return c +} + +// WithLowerCaseHeaderMaps sets a config LowerCaseHeaderMaps value +// returning a Config pointer for chaining. +func (c *Config) WithLowerCaseHeaderMaps(t bool) *Config { + c.LowerCaseHeaderMaps = &t + return c +} + +// WithDisableRestProtocolURICleaning sets a config DisableRestProtocolURICleaning value +// returning a Config pointer for chaining. +func (c *Config) WithDisableRestProtocolURICleaning(t bool) *Config { + c.DisableRestProtocolURICleaning = &t + return c +} + +// MergeIn merges the passed in configs into the existing config object. +func (c *Config) MergeIn(cfgs ...*Config) { + for _, other := range cfgs { + mergeInConfig(c, other) + } +} + +func mergeInConfig(dst *Config, other *Config) { + if other == nil { + return + } + + if other.CredentialsChainVerboseErrors != nil { + dst.CredentialsChainVerboseErrors = other.CredentialsChainVerboseErrors + } + + if other.Credentials != nil { + dst.Credentials = other.Credentials + } + + if other.Endpoint != nil { + dst.Endpoint = other.Endpoint + } + + if other.EndpointResolver != nil { + dst.EndpointResolver = other.EndpointResolver + } + + if other.Region != nil { + dst.Region = other.Region + } + + if other.DisableSSL != nil { + dst.DisableSSL = other.DisableSSL + } + + if other.HTTPClient != nil { + dst.HTTPClient = other.HTTPClient + } + + if other.LogLevel != nil { + dst.LogLevel = other.LogLevel + } + + if other.Logger != nil { + dst.Logger = other.Logger + } + + if other.MaxRetries != nil { + dst.MaxRetries = other.MaxRetries + } + + if other.Retryer != nil { + dst.Retryer = other.Retryer + } + + if other.DisableParamValidation != nil { + dst.DisableParamValidation = other.DisableParamValidation + } + + if other.DisableComputeChecksums != nil { + dst.DisableComputeChecksums = other.DisableComputeChecksums + } + + if other.S3ForcePathStyle != nil { + dst.S3ForcePathStyle = other.S3ForcePathStyle + } + + if other.S3Disable100Continue != nil { + dst.S3Disable100Continue = other.S3Disable100Continue + } + + if other.S3UseAccelerate != nil { + dst.S3UseAccelerate = other.S3UseAccelerate + } + + if other.S3DisableContentMD5Validation != nil { + dst.S3DisableContentMD5Validation = other.S3DisableContentMD5Validation + } + + if other.S3UseARNRegion != nil { + dst.S3UseARNRegion = other.S3UseARNRegion + } + + if other.UseDualStack != nil { + dst.UseDualStack = other.UseDualStack + } + + if other.EC2MetadataDisableTimeoutOverride != nil { + dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride + } + + if other.SleepDelay != nil { + dst.SleepDelay = other.SleepDelay + } + + if other.DisableRestProtocolURICleaning != nil { + dst.DisableRestProtocolURICleaning = other.DisableRestProtocolURICleaning + } + + if other.EnforceShouldRetryCheck != nil { + dst.EnforceShouldRetryCheck = other.EnforceShouldRetryCheck + } + + if other.EnableEndpointDiscovery != nil { + dst.EnableEndpointDiscovery = other.EnableEndpointDiscovery + } + + if other.DisableEndpointHostPrefix != nil { + dst.DisableEndpointHostPrefix = other.DisableEndpointHostPrefix + } + + if other.S3UsEast1RegionalEndpoint != endpoints.UnsetS3UsEast1Endpoint { + dst.S3UsEast1RegionalEndpoint = other.S3UsEast1RegionalEndpoint + } + + if other.LowerCaseHeaderMaps != nil { + dst.LowerCaseHeaderMaps = other.LowerCaseHeaderMaps + } +} + +// Copy will return a shallow copy of the Config object. If any additional +// configurations are provided they will be merged into the new config returned. +func (c *Config) Copy(cfgs ...*Config) *Config { + dst := &Config{} + dst.MergeIn(c) + + for _, cfg := range cfgs { + dst.MergeIn(cfg) + } + + return dst +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/context_1_5.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/context_1_5.go new file mode 100644 index 00000000000..2866f9a7fb9 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/context_1_5.go @@ -0,0 +1,37 @@ +// +build !go1.9 + +package aws + +import "time" + +// Context is an copy of the Go v1.7 stdlib's context.Context interface. +// It is represented as a SDK interface to enable you to use the "WithContext" +// API methods with Go v1.6 and a Context type such as golang.org/x/net/context. +// +// See https://golang.org/pkg/context on how to use contexts. +type Context interface { + // Deadline returns the time when work done on behalf of this context + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. + Deadline() (deadline time.Time, ok bool) + + // Done returns a channel that's closed when work done on behalf of this + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. + Done() <-chan struct{} + + // Err returns a non-nil error value after Done is closed. Err returns + // Canceled if the context was canceled or DeadlineExceeded if the + // context's deadline passed. No other values for Err are defined. + // After Done is closed, successive calls to Err return the same value. + Err() error + + // Value returns the value associated with this context for key, or nil + // if no value is associated with key. Successive calls to Value with + // the same key returns the same result. + // + // Use context values only for request-scoped data that transits + // processes and API boundaries, not for passing optional parameters to + // functions. + Value(key interface{}) interface{} +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/context_1_9.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/context_1_9.go new file mode 100644 index 00000000000..3718b26e101 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/context_1_9.go @@ -0,0 +1,11 @@ +// +build go1.9 + +package aws + +import "context" + +// Context is an alias of the Go stdlib's context.Context interface. +// It can be used within the SDK's API operation "WithContext" methods. +// +// See https://golang.org/pkg/context on how to use contexts. +type Context = context.Context diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/context_background_1_7.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/context_background_1_7.go new file mode 100644 index 00000000000..9c29f29af17 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/context_background_1_7.go @@ -0,0 +1,20 @@ +// +build go1.7 + +package aws + +import "context" + +// BackgroundContext returns a context that will never be canceled, has no +// values, and no deadline. This context is used by the SDK to provide +// backwards compatibility with non-context API operations and functionality. +// +// Go 1.6 and before: +// This context function is equivalent to context.Background in the Go stdlib. +// +// Go 1.7 and later: +// The context returned will be the value returned by context.Background() +// +// See https://golang.org/pkg/context for more information on Contexts. +func BackgroundContext() Context { + return context.Background() +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/context_sleep.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/context_sleep.go new file mode 100644 index 00000000000..304fd156120 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/context_sleep.go @@ -0,0 +1,24 @@ +package aws + +import ( + "time" +) + +// SleepWithContext will wait for the timer duration to expire, or the context +// is canceled. Which ever happens first. If the context is canceled the Context's +// error will be returned. +// +// Expects Context to always return a non-nil error if the Done channel is closed. +func SleepWithContext(ctx Context, dur time.Duration) error { + t := time.NewTimer(dur) + defer t.Stop() + + select { + case <-t.C: + break + case <-ctx.Done(): + return ctx.Err() + } + + return nil +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/convert_types.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/convert_types.go new file mode 100644 index 00000000000..4e076c1837a --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/convert_types.go @@ -0,0 +1,918 @@ +package aws + +import "time" + +// String returns a pointer to the string value passed in. +func String(v string) *string { + return &v +} + +// StringValue returns the value of the string pointer passed in or +// "" if the pointer is nil. +func StringValue(v *string) string { + if v != nil { + return *v + } + return "" +} + +// StringSlice converts a slice of string values into a slice of +// string pointers +func StringSlice(src []string) []*string { + dst := make([]*string, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// StringValueSlice converts a slice of string pointers into a slice of +// string values +func StringValueSlice(src []*string) []string { + dst := make([]string, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// StringMap converts a string map of string values into a string +// map of string pointers +func StringMap(src map[string]string) map[string]*string { + dst := make(map[string]*string) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// StringValueMap converts a string map of string pointers into a string +// map of string values +func StringValueMap(src map[string]*string) map[string]string { + dst := make(map[string]string) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Bool returns a pointer to the bool value passed in. +func Bool(v bool) *bool { + return &v +} + +// BoolValue returns the value of the bool pointer passed in or +// false if the pointer is nil. +func BoolValue(v *bool) bool { + if v != nil { + return *v + } + return false +} + +// BoolSlice converts a slice of bool values into a slice of +// bool pointers +func BoolSlice(src []bool) []*bool { + dst := make([]*bool, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// BoolValueSlice converts a slice of bool pointers into a slice of +// bool values +func BoolValueSlice(src []*bool) []bool { + dst := make([]bool, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// BoolMap converts a string map of bool values into a string +// map of bool pointers +func BoolMap(src map[string]bool) map[string]*bool { + dst := make(map[string]*bool) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// BoolValueMap converts a string map of bool pointers into a string +// map of bool values +func BoolValueMap(src map[string]*bool) map[string]bool { + dst := make(map[string]bool) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int returns a pointer to the int value passed in. +func Int(v int) *int { + return &v +} + +// IntValue returns the value of the int pointer passed in or +// 0 if the pointer is nil. +func IntValue(v *int) int { + if v != nil { + return *v + } + return 0 +} + +// IntSlice converts a slice of int values into a slice of +// int pointers +func IntSlice(src []int) []*int { + dst := make([]*int, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// IntValueSlice converts a slice of int pointers into a slice of +// int values +func IntValueSlice(src []*int) []int { + dst := make([]int, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// IntMap converts a string map of int values into a string +// map of int pointers +func IntMap(src map[string]int) map[string]*int { + dst := make(map[string]*int) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// IntValueMap converts a string map of int pointers into a string +// map of int values +func IntValueMap(src map[string]*int) map[string]int { + dst := make(map[string]int) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint returns a pointer to the uint value passed in. +func Uint(v uint) *uint { + return &v +} + +// UintValue returns the value of the uint pointer passed in or +// 0 if the pointer is nil. +func UintValue(v *uint) uint { + if v != nil { + return *v + } + return 0 +} + +// UintSlice converts a slice of uint values uinto a slice of +// uint pointers +func UintSlice(src []uint) []*uint { + dst := make([]*uint, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// UintValueSlice converts a slice of uint pointers uinto a slice of +// uint values +func UintValueSlice(src []*uint) []uint { + dst := make([]uint, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// UintMap converts a string map of uint values uinto a string +// map of uint pointers +func UintMap(src map[string]uint) map[string]*uint { + dst := make(map[string]*uint) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// UintValueMap converts a string map of uint pointers uinto a string +// map of uint values +func UintValueMap(src map[string]*uint) map[string]uint { + dst := make(map[string]uint) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int8 returns a pointer to the int8 value passed in. +func Int8(v int8) *int8 { + return &v +} + +// Int8Value returns the value of the int8 pointer passed in or +// 0 if the pointer is nil. +func Int8Value(v *int8) int8 { + if v != nil { + return *v + } + return 0 +} + +// Int8Slice converts a slice of int8 values into a slice of +// int8 pointers +func Int8Slice(src []int8) []*int8 { + dst := make([]*int8, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int8ValueSlice converts a slice of int8 pointers into a slice of +// int8 values +func Int8ValueSlice(src []*int8) []int8 { + dst := make([]int8, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int8Map converts a string map of int8 values into a string +// map of int8 pointers +func Int8Map(src map[string]int8) map[string]*int8 { + dst := make(map[string]*int8) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int8ValueMap converts a string map of int8 pointers into a string +// map of int8 values +func Int8ValueMap(src map[string]*int8) map[string]int8 { + dst := make(map[string]int8) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int16 returns a pointer to the int16 value passed in. +func Int16(v int16) *int16 { + return &v +} + +// Int16Value returns the value of the int16 pointer passed in or +// 0 if the pointer is nil. +func Int16Value(v *int16) int16 { + if v != nil { + return *v + } + return 0 +} + +// Int16Slice converts a slice of int16 values into a slice of +// int16 pointers +func Int16Slice(src []int16) []*int16 { + dst := make([]*int16, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int16ValueSlice converts a slice of int16 pointers into a slice of +// int16 values +func Int16ValueSlice(src []*int16) []int16 { + dst := make([]int16, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int16Map converts a string map of int16 values into a string +// map of int16 pointers +func Int16Map(src map[string]int16) map[string]*int16 { + dst := make(map[string]*int16) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int16ValueMap converts a string map of int16 pointers into a string +// map of int16 values +func Int16ValueMap(src map[string]*int16) map[string]int16 { + dst := make(map[string]int16) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int32 returns a pointer to the int32 value passed in. +func Int32(v int32) *int32 { + return &v +} + +// Int32Value returns the value of the int32 pointer passed in or +// 0 if the pointer is nil. +func Int32Value(v *int32) int32 { + if v != nil { + return *v + } + return 0 +} + +// Int32Slice converts a slice of int32 values into a slice of +// int32 pointers +func Int32Slice(src []int32) []*int32 { + dst := make([]*int32, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int32ValueSlice converts a slice of int32 pointers into a slice of +// int32 values +func Int32ValueSlice(src []*int32) []int32 { + dst := make([]int32, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int32Map converts a string map of int32 values into a string +// map of int32 pointers +func Int32Map(src map[string]int32) map[string]*int32 { + dst := make(map[string]*int32) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int32ValueMap converts a string map of int32 pointers into a string +// map of int32 values +func Int32ValueMap(src map[string]*int32) map[string]int32 { + dst := make(map[string]int32) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int64 returns a pointer to the int64 value passed in. +func Int64(v int64) *int64 { + return &v +} + +// Int64Value returns the value of the int64 pointer passed in or +// 0 if the pointer is nil. +func Int64Value(v *int64) int64 { + if v != nil { + return *v + } + return 0 +} + +// Int64Slice converts a slice of int64 values into a slice of +// int64 pointers +func Int64Slice(src []int64) []*int64 { + dst := make([]*int64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int64ValueSlice converts a slice of int64 pointers into a slice of +// int64 values +func Int64ValueSlice(src []*int64) []int64 { + dst := make([]int64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int64Map converts a string map of int64 values into a string +// map of int64 pointers +func Int64Map(src map[string]int64) map[string]*int64 { + dst := make(map[string]*int64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int64ValueMap converts a string map of int64 pointers into a string +// map of int64 values +func Int64ValueMap(src map[string]*int64) map[string]int64 { + dst := make(map[string]int64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint8 returns a pointer to the uint8 value passed in. +func Uint8(v uint8) *uint8 { + return &v +} + +// Uint8Value returns the value of the uint8 pointer passed in or +// 0 if the pointer is nil. +func Uint8Value(v *uint8) uint8 { + if v != nil { + return *v + } + return 0 +} + +// Uint8Slice converts a slice of uint8 values into a slice of +// uint8 pointers +func Uint8Slice(src []uint8) []*uint8 { + dst := make([]*uint8, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint8ValueSlice converts a slice of uint8 pointers into a slice of +// uint8 values +func Uint8ValueSlice(src []*uint8) []uint8 { + dst := make([]uint8, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint8Map converts a string map of uint8 values into a string +// map of uint8 pointers +func Uint8Map(src map[string]uint8) map[string]*uint8 { + dst := make(map[string]*uint8) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint8ValueMap converts a string map of uint8 pointers into a string +// map of uint8 values +func Uint8ValueMap(src map[string]*uint8) map[string]uint8 { + dst := make(map[string]uint8) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint16 returns a pointer to the uint16 value passed in. +func Uint16(v uint16) *uint16 { + return &v +} + +// Uint16Value returns the value of the uint16 pointer passed in or +// 0 if the pointer is nil. +func Uint16Value(v *uint16) uint16 { + if v != nil { + return *v + } + return 0 +} + +// Uint16Slice converts a slice of uint16 values into a slice of +// uint16 pointers +func Uint16Slice(src []uint16) []*uint16 { + dst := make([]*uint16, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint16ValueSlice converts a slice of uint16 pointers into a slice of +// uint16 values +func Uint16ValueSlice(src []*uint16) []uint16 { + dst := make([]uint16, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint16Map converts a string map of uint16 values into a string +// map of uint16 pointers +func Uint16Map(src map[string]uint16) map[string]*uint16 { + dst := make(map[string]*uint16) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint16ValueMap converts a string map of uint16 pointers into a string +// map of uint16 values +func Uint16ValueMap(src map[string]*uint16) map[string]uint16 { + dst := make(map[string]uint16) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint32 returns a pointer to the uint32 value passed in. +func Uint32(v uint32) *uint32 { + return &v +} + +// Uint32Value returns the value of the uint32 pointer passed in or +// 0 if the pointer is nil. +func Uint32Value(v *uint32) uint32 { + if v != nil { + return *v + } + return 0 +} + +// Uint32Slice converts a slice of uint32 values into a slice of +// uint32 pointers +func Uint32Slice(src []uint32) []*uint32 { + dst := make([]*uint32, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint32ValueSlice converts a slice of uint32 pointers into a slice of +// uint32 values +func Uint32ValueSlice(src []*uint32) []uint32 { + dst := make([]uint32, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint32Map converts a string map of uint32 values into a string +// map of uint32 pointers +func Uint32Map(src map[string]uint32) map[string]*uint32 { + dst := make(map[string]*uint32) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint32ValueMap converts a string map of uint32 pointers into a string +// map of uint32 values +func Uint32ValueMap(src map[string]*uint32) map[string]uint32 { + dst := make(map[string]uint32) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint64 returns a pointer to the uint64 value passed in. +func Uint64(v uint64) *uint64 { + return &v +} + +// Uint64Value returns the value of the uint64 pointer passed in or +// 0 if the pointer is nil. +func Uint64Value(v *uint64) uint64 { + if v != nil { + return *v + } + return 0 +} + +// Uint64Slice converts a slice of uint64 values into a slice of +// uint64 pointers +func Uint64Slice(src []uint64) []*uint64 { + dst := make([]*uint64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint64ValueSlice converts a slice of uint64 pointers into a slice of +// uint64 values +func Uint64ValueSlice(src []*uint64) []uint64 { + dst := make([]uint64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint64Map converts a string map of uint64 values into a string +// map of uint64 pointers +func Uint64Map(src map[string]uint64) map[string]*uint64 { + dst := make(map[string]*uint64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint64ValueMap converts a string map of uint64 pointers into a string +// map of uint64 values +func Uint64ValueMap(src map[string]*uint64) map[string]uint64 { + dst := make(map[string]uint64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Float32 returns a pointer to the float32 value passed in. +func Float32(v float32) *float32 { + return &v +} + +// Float32Value returns the value of the float32 pointer passed in or +// 0 if the pointer is nil. +func Float32Value(v *float32) float32 { + if v != nil { + return *v + } + return 0 +} + +// Float32Slice converts a slice of float32 values into a slice of +// float32 pointers +func Float32Slice(src []float32) []*float32 { + dst := make([]*float32, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Float32ValueSlice converts a slice of float32 pointers into a slice of +// float32 values +func Float32ValueSlice(src []*float32) []float32 { + dst := make([]float32, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Float32Map converts a string map of float32 values into a string +// map of float32 pointers +func Float32Map(src map[string]float32) map[string]*float32 { + dst := make(map[string]*float32) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Float32ValueMap converts a string map of float32 pointers into a string +// map of float32 values +func Float32ValueMap(src map[string]*float32) map[string]float32 { + dst := make(map[string]float32) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Float64 returns a pointer to the float64 value passed in. +func Float64(v float64) *float64 { + return &v +} + +// Float64Value returns the value of the float64 pointer passed in or +// 0 if the pointer is nil. +func Float64Value(v *float64) float64 { + if v != nil { + return *v + } + return 0 +} + +// Float64Slice converts a slice of float64 values into a slice of +// float64 pointers +func Float64Slice(src []float64) []*float64 { + dst := make([]*float64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Float64ValueSlice converts a slice of float64 pointers into a slice of +// float64 values +func Float64ValueSlice(src []*float64) []float64 { + dst := make([]float64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Float64Map converts a string map of float64 values into a string +// map of float64 pointers +func Float64Map(src map[string]float64) map[string]*float64 { + dst := make(map[string]*float64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Float64ValueMap converts a string map of float64 pointers into a string +// map of float64 values +func Float64ValueMap(src map[string]*float64) map[string]float64 { + dst := make(map[string]float64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Time returns a pointer to the time.Time value passed in. +func Time(v time.Time) *time.Time { + return &v +} + +// TimeValue returns the value of the time.Time pointer passed in or +// time.Time{} if the pointer is nil. +func TimeValue(v *time.Time) time.Time { + if v != nil { + return *v + } + return time.Time{} +} + +// SecondsTimeValue converts an int64 pointer to a time.Time value +// representing seconds since Epoch or time.Time{} if the pointer is nil. +func SecondsTimeValue(v *int64) time.Time { + if v != nil { + return time.Unix((*v / 1000), 0) + } + return time.Time{} +} + +// MillisecondsTimeValue converts an int64 pointer to a time.Time value +// representing milliseconds sinch Epoch or time.Time{} if the pointer is nil. +func MillisecondsTimeValue(v *int64) time.Time { + if v != nil { + return time.Unix(0, (*v * 1000000)) + } + return time.Time{} +} + +// TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC". +// The result is undefined if the Unix time cannot be represented by an int64. +// Which includes calling TimeUnixMilli on a zero Time is undefined. +// +// This utility is useful for service API's such as CloudWatch Logs which require +// their unix time values to be in milliseconds. +// +// See Go stdlib https://golang.org/pkg/time/#Time.UnixNano for more information. +func TimeUnixMilli(t time.Time) int64 { + return t.UnixNano() / int64(time.Millisecond/time.Nanosecond) +} + +// TimeSlice converts a slice of time.Time values into a slice of +// time.Time pointers +func TimeSlice(src []time.Time) []*time.Time { + dst := make([]*time.Time, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// TimeValueSlice converts a slice of time.Time pointers into a slice of +// time.Time values +func TimeValueSlice(src []*time.Time) []time.Time { + dst := make([]time.Time, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// TimeMap converts a string map of time.Time values into a string +// map of time.Time pointers +func TimeMap(src map[string]time.Time) map[string]*time.Time { + dst := make(map[string]*time.Time) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// TimeValueMap converts a string map of time.Time pointers into a string +// map of time.Time values +func TimeValueMap(src map[string]*time.Time) map[string]time.Time { + dst := make(map[string]time.Time) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/corehandlers/handlers.go new file mode 100644 index 00000000000..2d0e8f416f5 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/corehandlers/handlers.go @@ -0,0 +1,247 @@ +package corehandlers + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "regexp" + "strconv" + "time" + + "github.com/IBM/ibm-cos-sdk-go/aws" + "github.com/IBM/ibm-cos-sdk-go/aws/awserr" + "github.com/IBM/ibm-cos-sdk-go/aws/credentials" + "github.com/IBM/ibm-cos-sdk-go/aws/request" +) + +// Interface for matching types which also have a Len method. +type lener interface { + Len() int +} + +// BuildContentLengthHandler builds the content length of a request based on the body, +// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable +// to determine request body length and no "Content-Length" was specified it will panic. +// +// The Content-Length will only be added to the request if the length of the body +// is greater than 0. If the body is empty or the current `Content-Length` +// header is <= 0, the header will also be stripped. +var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) { + var length int64 + + if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" { + length, _ = strconv.ParseInt(slength, 10, 64) + } else { + if r.Body != nil { + var err error + length, err = aws.SeekerLen(r.Body) + if err != nil { + r.Error = awserr.New(request.ErrCodeSerialization, "failed to get request body's length", err) + return + } + } + } + + if length > 0 { + r.HTTPRequest.ContentLength = length + r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length)) + } else { + r.HTTPRequest.ContentLength = 0 + r.HTTPRequest.Header.Del("Content-Length") + } +}} + +var reStatusCode = regexp.MustCompile(`^(\d{3})`) + +// ValidateReqSigHandler is a request handler to ensure that the request's +// signature doesn't expire before it is sent. This can happen when a request +// is built and signed significantly before it is sent. Or significant delays +// occur when retrying requests that would cause the signature to expire. +var ValidateReqSigHandler = request.NamedHandler{ + Name: "core.ValidateReqSigHandler", + Fn: func(r *request.Request) { + // Unsigned requests are not signed + if r.Config.Credentials == credentials.AnonymousCredentials { + return + } + + signedTime := r.Time + if !r.LastSignedAt.IsZero() { + signedTime = r.LastSignedAt + } + + // 5 minutes to allow for some clock skew/delays in transmission. + // Would be improved with aws/aws-sdk-go#423 + if signedTime.Add(5 * time.Minute).After(time.Now()) { + return + } + + fmt.Println("request expired, resigning") + r.Sign() + }, +} + +// SendHandler is a request handler to send service request using HTTP client. +var SendHandler = request.NamedHandler{ + Name: "core.SendHandler", + Fn: func(r *request.Request) { + sender := sendFollowRedirects + if r.DisableFollowRedirects { + sender = sendWithoutFollowRedirects + } + + if request.NoBody == r.HTTPRequest.Body { + // Strip off the request body if the NoBody reader was used as a + // place holder for a request body. This prevents the SDK from + // making requests with a request body when it would be invalid + // to do so. + // + // Use a shallow copy of the http.Request to ensure the race condition + // of transport on Body will not trigger + reqOrig, reqCopy := r.HTTPRequest, *r.HTTPRequest + reqCopy.Body = nil + r.HTTPRequest = &reqCopy + defer func() { + r.HTTPRequest = reqOrig + }() + } + + var err error + r.HTTPResponse, err = sender(r) + if err != nil { + handleSendError(r, err) + } + }, +} + +func sendFollowRedirects(r *request.Request) (*http.Response, error) { + return r.Config.HTTPClient.Do(r.HTTPRequest) +} + +func sendWithoutFollowRedirects(r *request.Request) (*http.Response, error) { + transport := r.Config.HTTPClient.Transport + if transport == nil { + transport = http.DefaultTransport + } + + return transport.RoundTrip(r.HTTPRequest) +} + +func handleSendError(r *request.Request, err error) { + // Prevent leaking if an HTTPResponse was returned. Clean up + // the body. + if r.HTTPResponse != nil { + r.HTTPResponse.Body.Close() + } + // Capture the case where url.Error is returned for error processing + // response. e.g. 301 without location header comes back as string + // error and r.HTTPResponse is nil. Other URL redirect errors will + // comeback in a similar method. + if e, ok := err.(*url.Error); ok && e.Err != nil { + if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil { + code, _ := strconv.ParseInt(s[1], 10, 64) + r.HTTPResponse = &http.Response{ + StatusCode: int(code), + Status: http.StatusText(int(code)), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + return + } + } + if r.HTTPResponse == nil { + // Add a dummy request response object to ensure the HTTPResponse + // value is consistent. + r.HTTPResponse = &http.Response{ + StatusCode: int(0), + Status: http.StatusText(int(0)), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + } + // Catch all request errors, and let the default retrier determine + // if the error is retryable. + r.Error = awserr.New(request.ErrCodeRequestError, "send request failed", err) + + // Override the error with a context canceled error, if that was canceled. + ctx := r.Context() + select { + case <-ctx.Done(): + r.Error = awserr.New(request.CanceledErrorCode, + "request context canceled", ctx.Err()) + r.Retryable = aws.Bool(false) + default: + } +} + +// ValidateResponseHandler is a request handler to validate service response. +var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) { + if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 { + // this may be replaced by an UnmarshalError handler + r.Error = awserr.New("UnknownError", "unknown error", nil) + } +}} + +// AfterRetryHandler performs final checks to determine if the request should +// be retried and how long to delay. +var AfterRetryHandler = request.NamedHandler{ + Name: "core.AfterRetryHandler", + Fn: func(r *request.Request) { + // If one of the other handlers already set the retry state + // we don't want to override it based on the service's state + if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) { + r.Retryable = aws.Bool(r.ShouldRetry(r)) + } + + if r.WillRetry() { + r.RetryDelay = r.RetryRules(r) + + if sleepFn := r.Config.SleepDelay; sleepFn != nil { + // Support SleepDelay for backwards compatibility and testing + sleepFn(r.RetryDelay) + } else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil { + r.Error = awserr.New(request.CanceledErrorCode, + "request context canceled", err) + r.Retryable = aws.Bool(false) + return + } + + // when the expired token exception occurs the credentials + // need to be expired locally so that the next request to + // get credentials will trigger a credentials refresh. + if r.IsErrorExpired() { + r.Config.Credentials.Expire() + } + + r.RetryCount++ + r.Error = nil + } + }} + +// ValidateEndpointHandler is a request handler to validate a request had the +// appropriate Region and Endpoint set. Will set r.Error if the endpoint or +// region is not valid. +var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointHandler", Fn: func(r *request.Request) { + + // IBM COS SDK Code -- START + checkIfRegionPresent := true + + // Anonymous Creds support + if r.Config.Credentials != credentials.AnonymousCredentials { + value, err := r.Config.Credentials.Get() + if err != nil { + r.Error = err + return + } + checkIfRegionPresent = value.ProviderType == "" || value.ProviderType == "v4" + } + + if checkIfRegionPresent && r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" { + r.Error = aws.ErrMissingRegion + } else if r.ClientInfo.Endpoint == "" { + // Was any endpoint provided by the user, or one was derived by the + // SDK's endpoint resolver? + r.Error = aws.ErrMissingEndpoint + } + // IBM COS SDK Code -- END +}} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/corehandlers/param_validator.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/corehandlers/param_validator.go new file mode 100644 index 00000000000..7aab8aa389c --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/corehandlers/param_validator.go @@ -0,0 +1,17 @@ +package corehandlers + +import "github.com/IBM/ibm-cos-sdk-go/aws/request" + +// ValidateParametersHandler is a request handler to validate the input parameters. +// Validating parameters only has meaning if done prior to the request being sent. +var ValidateParametersHandler = request.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *request.Request) { + if !r.ParamsFilled() { + return + } + + if v, ok := r.Params.(request.Validator); ok { + if err := v.Validate(); err != nil { + r.Error = err + } + } +}} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/corehandlers/user_agent.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/corehandlers/user_agent.go new file mode 100644 index 00000000000..47cb960a224 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/corehandlers/user_agent.go @@ -0,0 +1,37 @@ +package corehandlers + +import ( + "os" + "runtime" + + "github.com/IBM/ibm-cos-sdk-go/aws" + "github.com/IBM/ibm-cos-sdk-go/aws/request" +) + +// SDKVersionUserAgentHandler is a request handler for adding the SDK Version +// to the user agent. +var SDKVersionUserAgentHandler = request.NamedHandler{ + Name: "core.SDKVersionUserAgentHandler", + Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion, + runtime.Version(), runtime.GOOS, runtime.GOARCH), +} + +const execEnvVar = `AWS_EXECUTION_ENV` +const execEnvUAKey = `exec-env` + +// AddHostExecEnvUserAgentHander is a request handler appending the SDK's +// execution environment to the user agent. +// +// If the environment variable AWS_EXECUTION_ENV is set, its value will be +// appended to the user agent string. +var AddHostExecEnvUserAgentHander = request.NamedHandler{ + Name: "core.AddHostExecEnvUserAgentHander", + Fn: func(r *request.Request) { + v := os.Getenv(execEnvVar) + if len(v) == 0 { + return + } + + request.AddToUserAgent(r, execEnvUAKey+"/"+v) + }, +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/chain_provider.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/chain_provider.go new file mode 100644 index 00000000000..dd8cc715878 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/chain_provider.go @@ -0,0 +1,100 @@ +package credentials + +import ( + "github.com/IBM/ibm-cos-sdk-go/aws/awserr" +) + +var ( + // ErrNoValidProvidersFoundInChain Is returned when there are no valid + // providers in the ChainProvider. + // + // This has been deprecated. For verbose error messaging set + // aws.Config.CredentialsChainVerboseErrors to true. + ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders", + `no valid providers in chain. Deprecated. + For verbose messaging see aws.Config.CredentialsChainVerboseErrors`, + nil) +) + +// A ChainProvider will search for a provider which returns credentials +// and cache that provider until Retrieve is called again. +// +// The ChainProvider provides a way of chaining multiple providers together +// which will pick the first available using priority order of the Providers +// in the list. +// +// If none of the Providers retrieve valid credentials Value, ChainProvider's +// Retrieve() will return the error ErrNoValidProvidersFoundInChain. +// +// If a Provider is found which returns valid credentials Value ChainProvider +// will cache that Provider for all calls to IsExpired(), until Retrieve is +// called again. +// +// Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider. +// In this example EnvProvider will first check if any credentials are available +// via the environment variables. If there are none ChainProvider will check +// the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider +// does not return any credentials ChainProvider will return the error +// ErrNoValidProvidersFoundInChain +// +// creds := credentials.NewChainCredentials( +// []credentials.Provider{ +// &credentials.EnvProvider{}, +// &ec2rolecreds.EC2RoleProvider{ +// Client: ec2metadata.New(sess), +// }, +// }) +// +// // Usage of ChainCredentials with aws.Config +// svc := ec2.New(session.Must(session.NewSession(&aws.Config{ +// Credentials: creds, +// }))) +// +type ChainProvider struct { + Providers []Provider + curr Provider + VerboseErrors bool +} + +// NewChainCredentials returns a pointer to a new Credentials object +// wrapping a chain of providers. +func NewChainCredentials(providers []Provider) *Credentials { + return NewCredentials(&ChainProvider{ + Providers: append([]Provider{}, providers...), + }) +} + +// Retrieve returns the credentials value or error if no provider returned +// without error. +// +// If a provider is found it will be cached and any calls to IsExpired() +// will return the expired state of the cached provider. +func (c *ChainProvider) Retrieve() (Value, error) { + var errs []error + for _, p := range c.Providers { + creds, err := p.Retrieve() + if err == nil { + c.curr = p + return creds, nil + } + errs = append(errs, err) + } + c.curr = nil + + var err error + err = ErrNoValidProvidersFoundInChain + if c.VerboseErrors { + err = awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs) + } + return Value{}, err +} + +// IsExpired will returned the expired state of the currently cached provider +// if there is one. If there is no current provider, true will be returned. +func (c *ChainProvider) IsExpired() bool { + if c.curr != nil { + return c.curr.IsExpired() + } + + return true +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/context_background_go1.7.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/context_background_go1.7.go new file mode 100644 index 00000000000..388b2154182 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/context_background_go1.7.go @@ -0,0 +1,20 @@ +// +build go1.7 + +package credentials + +import "context" + +// backgroundContext returns a context that will never be canceled, has no +// values, and no deadline. This context is used by the SDK to provide +// backwards compatibility with non-context API operations and functionality. +// +// Go 1.6 and before: +// This context function is equivalent to context.Background in the Go stdlib. +// +// Go 1.7 and later: +// The context returned will be the value returned by context.Background() +// +// See https://golang.org/pkg/context for more information on Contexts. +func backgroundContext() Context { + return context.Background() +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/context_go1.9.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/context_go1.9.go new file mode 100644 index 00000000000..4356edb3d5d --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/context_go1.9.go @@ -0,0 +1,13 @@ +// +build go1.9 + +package credentials + +import "context" + +// Context is an alias of the Go stdlib's context.Context interface. +// It can be used within the SDK's API operation "WithContext" methods. +// +// This type, aws.Context, and context.Context are equivalent. +// +// See https://golang.org/pkg/context on how to use contexts. +type Context = context.Context diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/credentials.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/credentials.go new file mode 100644 index 00000000000..881be489db8 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/credentials.go @@ -0,0 +1,398 @@ +// Package credentials provides credential retrieval and management +// +// The Credentials is the primary method of getting access to and managing +// credentials Values. Using dependency injection retrieval of the credential +// values is handled by a object which satisfies the Provider interface. +// +// By default the Credentials.Get() will cache the successful result of a +// Provider's Retrieve() until Provider.IsExpired() returns true. At which +// point Credentials will call Provider's Retrieve() to get new credential Value. +// +// The Provider is responsible for determining when credentials Value have expired. +// It is also important to note that Credentials will always call Retrieve the +// first time Credentials.Get() is called. +// +// Example of using the environment variable credentials. +// +// creds := credentials.NewEnvCredentials() +// +// // Retrieve the credentials value +// credValue, err := creds.Get() +// if err != nil { +// // handle error +// } +// +// Example of forcing credentials to expire and be refreshed on the next Get(). +// This may be helpful to proactively expire credentials and refresh them sooner +// than they would naturally expire on their own. +// +// creds := credentials.NewCredentials(&ec2rolecreds.EC2RoleProvider{}) +// creds.Expire() +// credsValue, err := creds.Get() +// // New credentials will be retrieved instead of from cache. +// +// +// Custom Provider +// +// Each Provider built into this package also provides a helper method to generate +// a Credentials pointer setup with the provider. To use a custom Provider just +// create a type which satisfies the Provider interface and pass it to the +// NewCredentials method. +// +// type MyProvider struct{} +// func (m *MyProvider) Retrieve() (Value, error) {...} +// func (m *MyProvider) IsExpired() bool {...} +// +// creds := credentials.NewCredentials(&MyProvider{}) +// credValue, err := creds.Get() +// +package credentials + +import ( + "fmt" + "sync" + "time" + + "github.com/IBM/ibm-cos-sdk-go/aws/awserr" + "github.com/IBM/ibm-cos-sdk-go/internal/sync/singleflight" + + // IBM COS SDK Code -- START + "github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/token" + // IBM COS SDK Code -- END +) + +// AnonymousCredentials is an empty Credential object that can be used as +// dummy placeholder credentials for requests that do not need signed. +// +// This Credentials can be used to configure a service to not sign requests +// when making service API calls. For example, when accessing public +// s3 buckets. +// +// svc := s3.New(session.Must(session.NewSession(&aws.Config{ +// Credentials: credentials.AnonymousCredentials, +// }))) +// // Access public S3 buckets. +var AnonymousCredentials = NewStaticCredentials("", "", "") + +// A Value is the AWS credentials value for individual credential fields. +type Value struct { + // AWS Access key ID + AccessKeyID string + + // AWS Secret Access Key + SecretAccessKey string + + // AWS Session Token + SessionToken string + + // Provider used to get credentials + ProviderName string + + // Provider Type + ProviderType string + + // IBM COS SDK Code -- START + // IBM IAM token value + token.Token + + // Service Intance ID + ServiceInstanceID string + // IBM COS SDK Code -- END +} + +// HasKeys returns if the credentials Value has both AccessKeyID and +// SecretAccessKey value set. +func (v Value) HasKeys() bool { + return len(v.AccessKeyID) != 0 && len(v.SecretAccessKey) != 0 +} + +// A Provider is the interface for any component which will provide credentials +// Value. A provider is required to manage its own Expired state, and what to +// be expired means. +// +// The Provider should not need to implement its own mutexes, because +// that will be managed by Credentials. +type Provider interface { + // Retrieve returns nil if it successfully retrieved the value. + // Error is returned if the value were not obtainable, or empty. + Retrieve() (Value, error) + + // IsExpired returns if the credentials are no longer valid, and need + // to be retrieved. + IsExpired() bool +} + +// ProviderWithContext is a Provider that can retrieve credentials with a Context +type ProviderWithContext interface { + Provider + + RetrieveWithContext(Context) (Value, error) +} + +// An Expirer is an interface that Providers can implement to expose the expiration +// time, if known. If the Provider cannot accurately provide this info, +// it should not implement this interface. +type Expirer interface { + // The time at which the credentials are no longer valid + ExpiresAt() time.Time +} + +// An ErrorProvider is a stub credentials provider that always returns an error +// this is used by the SDK when construction a known provider is not possible +// due to an error. +type ErrorProvider struct { + // The error to be returned from Retrieve + Err error + + // The provider name to set on the Retrieved returned Value + ProviderName string +} + +// Retrieve will always return the error that the ErrorProvider was created with. +func (p ErrorProvider) Retrieve() (Value, error) { + return Value{ProviderName: p.ProviderName}, p.Err +} + +// IsExpired will always return not expired. +func (p ErrorProvider) IsExpired() bool { + return false +} + +// A Expiry provides shared expiration logic to be used by credentials +// providers to implement expiry functionality. +// +// The best method to use this struct is as an anonymous field within the +// provider's struct. +// +// Example: +// type EC2RoleProvider struct { +// Expiry +// ... +// } +type Expiry struct { + // The date/time when to expire on + expiration time.Time + + // If set will be used by IsExpired to determine the current time. + // Defaults to time.Now if CurrentTime is not set. Available for testing + // to be able to mock out the current time. + CurrentTime func() time.Time +} + +// SetExpiration sets the expiration IsExpired will check when called. +// +// If window is greater than 0 the expiration time will be reduced by the +// window value. +// +// Using a window is helpful to trigger credentials to expire sooner than +// the expiration time given to ensure no requests are made with expired +// tokens. +func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) { + // Passed in expirations should have the monotonic clock values stripped. + // This ensures time comparisons will be based on wall-time. + e.expiration = expiration.Round(0) + if window > 0 { + e.expiration = e.expiration.Add(-window) + } +} + +// IsExpired returns if the credentials are expired. +func (e *Expiry) IsExpired() bool { + curTime := e.CurrentTime + if curTime == nil { + curTime = time.Now + } + return e.expiration.Before(curTime()) +} + +// ExpiresAt returns the expiration time of the credential +func (e *Expiry) ExpiresAt() time.Time { + return e.expiration +} + +// A Credentials provides concurrency safe retrieval of AWS credentials Value. +// Credentials will cache the credentials value until they expire. Once the value +// expires the next Get will attempt to retrieve valid credentials. +// +// Credentials is safe to use across multiple goroutines and will manage the +// synchronous state so the Providers do not need to implement their own +// synchronization. +// +// The first Credentials.Get() will always call Provider.Retrieve() to get the +// first instance of the credentials Value. All calls to Get() after that +// will return the cached credentials Value until IsExpired() returns true. +type Credentials struct { + sf singleflight.Group + + m sync.RWMutex + creds Value + provider Provider +} + +// NewCredentials returns a pointer to a new Credentials with the provider set. +func NewCredentials(provider Provider) *Credentials { + c := &Credentials{ + provider: provider, + } + return c +} + +// GetWithContext returns the credentials value, or error if the credentials +// Value failed to be retrieved. Will return early if the passed in context is +// canceled. +// +// Will return the cached credentials Value if it has not expired. If the +// credentials Value has expired the Provider's Retrieve() will be called +// to refresh the credentials. +// +// If Credentials.Expire() was called the credentials Value will be force +// expired, and the next call to Get() will cause them to be refreshed. +// +// Passed in Context is equivalent to aws.Context, and context.Context. +func (c *Credentials) GetWithContext(ctx Context) (Value, error) { + // Check if credentials are cached, and not expired. + select { + case curCreds, ok := <-c.asyncIsExpired(): + // ok will only be true, of the credentials were not expired. ok will + // be false and have no value if the credentials are expired. + if ok { + return curCreds, nil + } + case <-ctx.Done(): + return Value{}, awserr.New("RequestCanceled", + "request context canceled", ctx.Err()) + } + + // Cannot pass context down to the actual retrieve, because the first + // context would cancel the whole group when there is not direct + // association of items in the group. + resCh := c.sf.DoChan("", func() (interface{}, error) { + return c.singleRetrieve(&suppressedContext{ctx}) + }) + select { + case res := <-resCh: + return res.Val.(Value), res.Err + case <-ctx.Done(): + return Value{}, awserr.New("RequestCanceled", + "request context canceled", ctx.Err()) + } +} + +func (c *Credentials) singleRetrieve(ctx Context) (interface{}, error) { + c.m.Lock() + defer c.m.Unlock() + + if curCreds := c.creds; !c.isExpiredLocked(curCreds) { + return curCreds, nil + } + + var creds Value + var err error + if p, ok := c.provider.(ProviderWithContext); ok { + creds, err = p.RetrieveWithContext(ctx) + } else { + creds, err = c.provider.Retrieve() + } + if err == nil { + c.creds = creds + } + + return creds, err +} + +// Get returns the credentials value, or error if the credentials Value failed +// to be retrieved. +// +// Will return the cached credentials Value if it has not expired. If the +// credentials Value has expired the Provider's Retrieve() will be called +// to refresh the credentials. +// +// If Credentials.Expire() was called the credentials Value will be force +// expired, and the next call to Get() will cause them to be refreshed. +func (c *Credentials) Get() (Value, error) { + return c.GetWithContext(backgroundContext()) +} + +// Expire expires the credentials and forces them to be retrieved on the +// next call to Get(). +// +// This will override the Provider's expired state, and force Credentials +// to call the Provider's Retrieve(). +func (c *Credentials) Expire() { + c.m.Lock() + defer c.m.Unlock() + + c.creds = Value{} +} + +// IsExpired returns if the credentials are no longer valid, and need +// to be retrieved. +// +// If the Credentials were forced to be expired with Expire() this will +// reflect that override. +func (c *Credentials) IsExpired() bool { + c.m.RLock() + defer c.m.RUnlock() + + return c.isExpiredLocked(c.creds) +} + +// asyncIsExpired returns a channel of credentials Value. If the channel is +// closed the credentials are expired and credentials value are not empty. +func (c *Credentials) asyncIsExpired() <-chan Value { + ch := make(chan Value, 1) + go func() { + c.m.RLock() + defer c.m.RUnlock() + + if curCreds := c.creds; !c.isExpiredLocked(curCreds) { + ch <- curCreds + } + + close(ch) + }() + + return ch +} + +// isExpiredLocked helper method wrapping the definition of expired credentials. +func (c *Credentials) isExpiredLocked(creds interface{}) bool { + return creds == nil || creds.(Value) == Value{} || c.provider.IsExpired() +} + +// ExpiresAt provides access to the functionality of the Expirer interface of +// the underlying Provider, if it supports that interface. Otherwise, it returns +// an error. +func (c *Credentials) ExpiresAt() (time.Time, error) { + c.m.RLock() + defer c.m.RUnlock() + + expirer, ok := c.provider.(Expirer) + if !ok { + return time.Time{}, awserr.New("ProviderNotExpirer", + fmt.Sprintf("provider %s does not support ExpiresAt()", + c.creds.ProviderName), + nil) + } + if c.creds == (Value{}) { + // set expiration time to the distant past + return time.Time{}, nil + } + return expirer.ExpiresAt(), nil +} + +type suppressedContext struct { + Context +} + +func (s *suppressedContext) Deadline() (deadline time.Time, ok bool) { + return time.Time{}, false +} + +func (s *suppressedContext) Done() <-chan struct{} { + return nil +} + +func (s *suppressedContext) Err() error { + return nil +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/endpointcreds/provider.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/endpointcreds/provider.go new file mode 100644 index 00000000000..f657f3bc1c5 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/endpointcreds/provider.go @@ -0,0 +1,210 @@ +// Package endpointcreds provides support for retrieving credentials from an +// arbitrary HTTP endpoint. +// +// The credentials endpoint Provider can receive both static and refreshable +// credentials that will expire. Credentials are static when an "Expiration" +// value is not provided in the endpoint's response. +// +// Static credentials will never expire once they have been retrieved. The format +// of the static credentials response: +// { +// "AccessKeyId" : "MUA...", +// "SecretAccessKey" : "/7PC5om....", +// } +// +// Refreshable credentials will expire within the "ExpiryWindow" of the Expiration +// value in the response. The format of the refreshable credentials response: +// { +// "AccessKeyId" : "MUA...", +// "SecretAccessKey" : "/7PC5om....", +// "Token" : "AQoDY....=", +// "Expiration" : "2016-02-25T06:03:31Z" +// } +// +// Errors should be returned in the following format and only returned with 400 +// or 500 HTTP status codes. +// { +// "code": "ErrorCode", +// "message": "Helpful error message." +// } +package endpointcreds + +import ( + "encoding/json" + "time" + + "github.com/IBM/ibm-cos-sdk-go/aws" + "github.com/IBM/ibm-cos-sdk-go/aws/awserr" + "github.com/IBM/ibm-cos-sdk-go/aws/client" + "github.com/IBM/ibm-cos-sdk-go/aws/client/metadata" + "github.com/IBM/ibm-cos-sdk-go/aws/credentials" + "github.com/IBM/ibm-cos-sdk-go/aws/request" + "github.com/IBM/ibm-cos-sdk-go/private/protocol/json/jsonutil" +) + +// ProviderName is the name of the credentials provider. +const ProviderName = `CredentialsEndpointProvider` + +// Provider satisfies the credentials.Provider interface, and is a client to +// retrieve credentials from an arbitrary endpoint. +type Provider struct { + staticCreds bool + credentials.Expiry + + // Requires a AWS Client to make HTTP requests to the endpoint with. + // the Endpoint the request will be made to is provided by the aws.Config's + // Endpoint value. + Client *client.Client + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration + + // Optional authorization token value if set will be used as the value of + // the Authorization header of the endpoint credential request. + AuthorizationToken string +} + +// NewProviderClient returns a credentials Provider for retrieving AWS credentials +// from arbitrary endpoint. +func NewProviderClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) credentials.Provider { + p := &Provider{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "CredentialsEndpoint", + Endpoint: endpoint, + }, + handlers, + ), + } + + p.Client.Handlers.Unmarshal.PushBack(unmarshalHandler) + p.Client.Handlers.UnmarshalError.PushBack(unmarshalError) + p.Client.Handlers.Validate.Clear() + p.Client.Handlers.Validate.PushBack(validateEndpointHandler) + + for _, option := range options { + option(p) + } + + return p +} + +// NewCredentialsClient returns a pointer to a new Credentials object +// wrapping the endpoint credentials Provider. +func NewCredentialsClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) *credentials.Credentials { + return credentials.NewCredentials(NewProviderClient(cfg, handlers, endpoint, options...)) +} + +// IsExpired returns true if the credentials retrieved are expired, or not yet +// retrieved. +func (p *Provider) IsExpired() bool { + if p.staticCreds { + return false + } + return p.Expiry.IsExpired() +} + +// Retrieve will attempt to request the credentials from the endpoint the Provider +// was configured for. And error will be returned if the retrieval fails. +func (p *Provider) Retrieve() (credentials.Value, error) { + return p.RetrieveWithContext(aws.BackgroundContext()) +} + +// RetrieveWithContext will attempt to request the credentials from the endpoint the Provider +// was configured for. And error will be returned if the retrieval fails. +func (p *Provider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { + resp, err := p.getCredentials(ctx) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, + awserr.New("CredentialsEndpointError", "failed to load credentials", err) + } + + if resp.Expiration != nil { + p.SetExpiration(*resp.Expiration, p.ExpiryWindow) + } else { + p.staticCreds = true + } + + return credentials.Value{ + AccessKeyID: resp.AccessKeyID, + SecretAccessKey: resp.SecretAccessKey, + SessionToken: resp.Token, + ProviderName: ProviderName, + }, nil +} + +type getCredentialsOutput struct { + Expiration *time.Time + AccessKeyID string + SecretAccessKey string + Token string +} + +type errorOutput struct { + Code string `json:"code"` + Message string `json:"message"` +} + +func (p *Provider) getCredentials(ctx aws.Context) (*getCredentialsOutput, error) { + op := &request.Operation{ + Name: "GetCredentials", + HTTPMethod: "GET", + } + + out := &getCredentialsOutput{} + req := p.Client.NewRequest(op, nil, out) + req.SetContext(ctx) + req.HTTPRequest.Header.Set("Accept", "application/json") + if authToken := p.AuthorizationToken; len(authToken) != 0 { + req.HTTPRequest.Header.Set("Authorization", authToken) + } + + return out, req.Send() +} + +func validateEndpointHandler(r *request.Request) { + if len(r.ClientInfo.Endpoint) == 0 { + r.Error = aws.ErrMissingEndpoint + } +} + +func unmarshalHandler(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + out := r.Data.(*getCredentialsOutput) + if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&out); err != nil { + r.Error = awserr.New(request.ErrCodeSerialization, + "failed to decode endpoint credentials", + err, + ) + } +} + +func unmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + var errOut errorOutput + err := jsonutil.UnmarshalJSONError(&errOut, r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to decode error message", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + // Response body format is not consistent between metadata endpoints. + // Grab the error message as a string and include that as the source error + r.Error = awserr.New(errOut.Code, errOut.Message, nil) +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/env_provider.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/env_provider.go new file mode 100644 index 00000000000..a0edc133bd7 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/env_provider.go @@ -0,0 +1,74 @@ +package credentials + +import ( + "os" + + "github.com/IBM/ibm-cos-sdk-go/aws/awserr" +) + +// EnvProviderName provides a name of Env provider +const EnvProviderName = "EnvProvider" + +var ( + // ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be + // found in the process's environment. + ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil) + + // ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key + // can't be found in the process's environment. + ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil) +) + +// A EnvProvider retrieves credentials from the environment variables of the +// running process. Environment credentials never expire. +// +// Environment variables used: +// +// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY +// +// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY +type EnvProvider struct { + retrieved bool +} + +// NewEnvCredentials returns a pointer to a new Credentials object +// wrapping the environment variable provider. +func NewEnvCredentials() *Credentials { + return NewCredentials(&EnvProvider{}) +} + +// Retrieve retrieves the keys from the environment. +func (e *EnvProvider) Retrieve() (Value, error) { + e.retrieved = false + + id := os.Getenv("AWS_ACCESS_KEY_ID") + if id == "" { + id = os.Getenv("AWS_ACCESS_KEY") + } + + secret := os.Getenv("AWS_SECRET_ACCESS_KEY") + if secret == "" { + secret = os.Getenv("AWS_SECRET_KEY") + } + + if id == "" { + return Value{ProviderName: EnvProviderName}, ErrAccessKeyIDNotFound + } + + if secret == "" { + return Value{ProviderName: EnvProviderName}, ErrSecretAccessKeyNotFound + } + + e.retrieved = true + return Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: os.Getenv("AWS_SESSION_TOKEN"), + ProviderName: EnvProviderName, + }, nil +} + +// IsExpired returns if the credentials have been retrieved. +func (e *EnvProvider) IsExpired() bool { + return !e.retrieved +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/example.ini b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/example.ini new file mode 100644 index 00000000000..7fc91d9d204 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/example.ini @@ -0,0 +1,12 @@ +[default] +aws_access_key_id = accessKey +aws_secret_access_key = secret +aws_session_token = token + +[no_token] +aws_access_key_id = accessKey +aws_secret_access_key = secret + +[with_colon] +aws_access_key_id: accessKey +aws_secret_access_key: secret diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/common.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/common.go new file mode 100644 index 00000000000..6d2fa6c828d --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/common.go @@ -0,0 +1,141 @@ +package ibmiam + +import ( + "runtime" + + "github.com/IBM/ibm-cos-sdk-go/aws" + "github.com/IBM/ibm-cos-sdk-go/aws/awserr" + "github.com/IBM/ibm-cos-sdk-go/aws/credentials" + "github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/tokenmanager" +) + +const ( + // Constants + // Default IBM IAM Authentication Server Endpoint + defaultAuthEndPoint = `https://iam.cloud.ibm.com/identity/token` + + // Logger constants + // Debug Log constant + debugLog = "" + // IBM IAM Provider Log constant + ibmiamProviderLog = "IBM IAM PROVIDER" +) + +// Provider Struct +type Provider struct { + // Name of Provider + providerName string + + // Type of Provider - SharedCred, SharedConfig, etc. + providerType string + + // Token Manager Provider uses + tokenManager tokenmanager.API + + // Service Instance ID passes in a provider + serviceInstanceID string + + // Error + ErrorStatus error + + // Logger attributes + logger aws.Logger + logLevel *aws.LogLevelType +} + +// NewProvider allows the creation of a custom IBM IAM Provider +// Parameters: +// Provider Name +// AWS Config +// API Key +// IBM IAM Authentication Server Endpoint +// Service Instance ID +// Token Manager client +// Returns: +// Provider +func NewProvider(providerName string, config *aws.Config, apiKey, authEndPoint, serviceInstanceID string, + client tokenmanager.IBMClientDo) (provider *Provider) { //linter complain about (provider *Provider) { + provider = new(Provider) + + provider.providerName = providerName + provider.providerType = "oauth" + + logLevel := aws.LogLevel(aws.LogOff) + if config != nil && config.LogLevel != nil && config.Logger != nil { + logLevel = config.LogLevel + provider.logger = config.Logger + } + provider.logLevel = logLevel + + if apiKey == "" { + provider.ErrorStatus = awserr.New("IbmApiKeyIdNotFound", "IBM API Key Id not found", nil) + if provider.logLevel.Matches(aws.LogDebug) { + provider.logger.Log(debugLog, "", provider.ErrorStatus) + } + return + } + + provider.serviceInstanceID = serviceInstanceID + + if authEndPoint == "" { + authEndPoint = defaultAuthEndPoint + if provider.logLevel.Matches(aws.LogDebug) { + provider.logger.Log(debugLog, "", "using default auth endpoint", authEndPoint) + } + } + + if client == nil { + client = tokenmanager.DefaultIBMClient(config) + } + + provider.tokenManager = tokenmanager.NewTokenManagerFromAPIKey(config, apiKey, authEndPoint, nil, nil, nil, client) + + runtime.SetFinalizer(provider, func(p *Provider) { + p.tokenManager.StopBackgroundRefresh() + }) + + return +} + +// IsValid ... +// Returns: +// Provider validation - boolean +func (p *Provider) IsValid() bool { + return nil == p.ErrorStatus +} + +// Retrieve ... +// Returns: +// Credential values +// Error +func (p *Provider) Retrieve() (credentials.Value, error) { + if p.ErrorStatus != nil { + if p.logLevel.Matches(aws.LogDebug) { + p.logger.Log(debugLog, ibmiamProviderLog, p.providerName, p.ErrorStatus) + } + return credentials.Value{ProviderName: p.providerName}, p.ErrorStatus + } + tokenValue, err := p.tokenManager.Get() + if err != nil { + var returnErr error + if p.logLevel.Matches(aws.LogDebug) { + p.logger.Log(debugLog, ibmiamProviderLog, p.providerName, "ERROR ON GET", err) + returnErr = awserr.New("TokenManagerRetrieveError", "error retrieving the token", err) + } else { + returnErr = awserr.New("TokenManagerRetrieveError", "error retrieving the token", nil) + } + return credentials.Value{}, returnErr + } + if p.logLevel.Matches(aws.LogDebug) { + p.logger.Log(debugLog, ibmiamProviderLog, p.providerName, "GET TOKEN", tokenValue) + } + + return credentials.Value{Token: *tokenValue, ProviderName: p.providerName, ProviderType: p.providerType, + ServiceInstanceID: p.serviceInstanceID}, nil +} + +// IsExpired ... +// Provider expired or not - boolean +func (p *Provider) IsExpired() bool { + return true +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/common_ini_provider.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/common_ini_provider.go new file mode 100644 index 00000000000..5d344803ff0 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/common_ini_provider.go @@ -0,0 +1,67 @@ +package ibmiam + +import ( + "github.com/IBM/ibm-cos-sdk-go/aws" + "github.com/IBM/ibm-cos-sdk-go/aws/awserr" + "github.com/IBM/ibm-cos-sdk-go/internal/ini" +) + +const ( + // Default Profile + defaultProfile = "default" +) + +// commonIni constructor of the IBM IAM provider that loads IAM credentials from +// an ini file +// Parameters: +// AWS Config +// Profile filename +// Profile prefix +// Returns: +// New provider with Provider name, config, API Key, IBM IAM Authentication Server end point, +// Service Instance ID +func commonIniProvider(providerName string, config *aws.Config, filename, profilename string) *Provider { + + // Opens an ini file with the filename passed in for shared credentials + // If fails, returns error + ini, err := ini.OpenFile(filename) + if err != nil { + e := awserr.New("SharedCredentialsOpenError", "Shared Credentials Open Error", err) + logFromConfigHelper(config, "", "", providerName, e) + return &Provider{ + providerName: SharedConfProviderName, + ErrorStatus: e, + } + } + + // Gets section of the shared credentials ini file + // If fails, returns error + iniProfile, ok := ini.GetSection(profilename) + if !ok { + e := awserr.New("SharedCredentialsProfileNotFound", + "Shared Credentials Section '"+profilename+"' not Found in file '"+filename+"'", nil) + logFromConfigHelper(config, "", "", providerName, e) + return &Provider{ + providerName: SharedConfProviderName, + ErrorStatus: e, + } + } + + // Populaute the IBM IAM Credential values + apiKey := iniProfile.String("ibm_api_key_id") + serviceInstanceID := iniProfile.String("ibm_service_instance_id") + authEndPoint := iniProfile.String("ibm_auth_endpoint") + + return NewProvider(providerName, config, apiKey, authEndPoint, serviceInstanceID, nil) +} + +// Log From Config +func logFromConfigHelper(config *aws.Config, params ...interface{}) { + logLevel := aws.LogLevel(aws.LogOff) + if config != nil && config.LogLevel != nil && config.Logger != nil { + logLevel = config.LogLevel + } + if logLevel.Matches(aws.LogDebug) { + config.Logger.Log(params) + } +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/custom_init_func_provider.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/custom_init_func_provider.go new file mode 100644 index 00000000000..73735eea2d2 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/custom_init_func_provider.go @@ -0,0 +1,64 @@ +package ibmiam + +import ( + "github.com/IBM/ibm-cos-sdk-go/aws" + "github.com/IBM/ibm-cos-sdk-go/aws/credentials" + "github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/token" + "github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/tokenmanager" +) + +// CustomInitFuncProviderName the Name of the IBM IAM provider with a custom init function +const CustomInitFuncProviderName = "CustomInitFuncProviderIBM" + +// NewCustomInitFuncProvider constructor of IBM IAM Provider with a custom init Function +// Parameters: +// aws.config: AWS Config to provide service configuration for service clients. By default, +// all clients will use the defaults.DefaultConfig structure. +// initFunc token: Contents of the token +// authEndPoint: IAM Authentication Server end point +// serviceInstanceID: service instance ID of the IBM account +// client: Token Management's client +// Returns: +// A complete Provider with Token Manager initialized +func NewCustomInitFuncProvider(config *aws.Config, initFunc func() (*token.Token, error), authEndPoint, + serviceInstanceID string, client tokenmanager.IBMClientDo) *Provider { + + // New provider with oauth request type + provider := new(Provider) + provider.providerName = CustomInitFuncProviderName + provider.providerType = "oauth" + + // Initialize LOGGER and inserts into the provider + logLevel := aws.LogLevel(aws.LogOff) + if config != nil && config.LogLevel != nil && config.Logger != nil { + logLevel = config.LogLevel + provider.logger = config.Logger + } + provider.logLevel = logLevel + + provider.serviceInstanceID = serviceInstanceID + + // Checks local IAM Authentication Server Endpoint; if none, sets the default auth end point + if authEndPoint == "" { + authEndPoint = defaultAuthEndPoint + if provider.logLevel.Matches(aws.LogDebug) { + provider.logger.Log("", "", "using default auth endpoint", authEndPoint) + } + } + + // Checks if the client has been passed in; otherwise, create one with token manager's default IBM client + if client == nil { + client = tokenmanager.DefaultIBMClient(config) + } + + provider.tokenManager = tokenmanager.NewTokenManager(config, initFunc, authEndPoint, nil, nil, nil, client) + return provider + +} + +// NewCustomInitFuncCredentials costructor +func NewCustomInitFuncCredentials(config *aws.Config, initFunc func() (*token.Token, error), authEndPoint, + serviceInstanceID string) *credentials.Credentials { + return credentials.NewCredentials(NewCustomInitFuncProvider(config, initFunc, authEndPoint, + serviceInstanceID, nil)) +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/env_provider.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/env_provider.go new file mode 100644 index 00000000000..514fe705ba0 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/env_provider.go @@ -0,0 +1,32 @@ +package ibmiam + +import ( + "os" + + "github.com/IBM/ibm-cos-sdk-go/aws" + "github.com/IBM/ibm-cos-sdk-go/aws/credentials" +) + +// EnvProviderName name of the IBM IAM provider that loads IAM credentials from environment +// variables +const EnvProviderName = "EnvProviderIBM" + +// NewEnvProvider constructor of the IBM IAM provider that loads IAM credentials from environment +// variables +// Parameter: +// AWS Config +// Returns: +// A new provider with AWS config, API Key, IBM IAM Authentication Server Endpoint and +// Service Instance ID +func NewEnvProvider(config *aws.Config) *Provider { + apiKey := os.Getenv("IBM_API_KEY_ID") + serviceInstanceID := os.Getenv("IBM_SERVICE_INSTANCE_ID") + authEndPoint := os.Getenv("IBM_AUTH_ENDPOINT") + + return NewProvider(EnvProviderName, config, apiKey, authEndPoint, serviceInstanceID, nil) +} + +// NewEnvCredentials Constructor +func NewEnvCredentials(config *aws.Config) *credentials.Credentials { + return credentials.NewCredentials(NewEnvProvider(config)) +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/shared_config_provider.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/shared_config_provider.go new file mode 100644 index 00000000000..4133a6cf4e1 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/shared_config_provider.go @@ -0,0 +1,70 @@ +package ibmiam + +import ( + "os" + + "github.com/IBM/ibm-cos-sdk-go/aws" + "github.com/IBM/ibm-cos-sdk-go/aws/awserr" + "github.com/IBM/ibm-cos-sdk-go/aws/credentials" + "github.com/IBM/ibm-cos-sdk-go/internal/shareddefaults" +) + +const ( + // SharedConfProviderName of the IBM IAM provider that loads IAM credentials + // from shared config + SharedConfProviderName = "SharedConfigProviderIBM" + + // Profile prefix + profilePrefix = "profile " +) + +// NewSharedConfigProvider constructor of the IBM IAM provider that loads IAM Credentials +// from shared config +// Parameters: +// AWS Config +// Profile filename +// Profile name +// Returns: +// Common Ini Provider with values +func NewSharedConfigProvider(config *aws.Config, filename, profilename string) *Provider { + + // Sets the file name from possible locations + // - AWS_CONFIG_FILE environment variable + // Error if the filename is missing + if filename == "" { + filename = os.Getenv("AWS_CONFIG_FILE") + if filename == "" { + // BUG? + home := shareddefaults.UserHomeDir() + if home == "" { + e := awserr.New("SharedCredentialsHomeNotFound", "Shared Credentials Home folder not found", nil) + logFromConfigHelper(config, "", "", SharedConfProviderName, e) + return &Provider{ + providerName: SharedConfProviderName, + ErrorStatus: e, + } + } + filename = shareddefaults.SharedConfigFilename() + } + } + + // Sets the profile name + // Otherwise sets the prefix with profile name passed in + if profilename == "" { + profilename = os.Getenv("AWS_PROFILE") + if profilename == "" { + profilename = defaultProfile + } else { + profilename = profilePrefix + profilename + } + } else { + profilename = profilePrefix + profilename + } + + return commonIniProvider(SharedConfProviderName, config, filename, profilename) +} + +// NewConfigCredentials Constructor +func NewConfigCredentials(config *aws.Config, filename, profilename string) *credentials.Credentials { + return credentials.NewCredentials(NewSharedConfigProvider(config, filename, profilename)) +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/shared_credentials_provider.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/shared_credentials_provider.go new file mode 100644 index 00000000000..76ee0831bb3 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/shared_credentials_provider.go @@ -0,0 +1,65 @@ +package ibmiam + +import ( + "os" + + "github.com/IBM/ibm-cos-sdk-go/aws" + "github.com/IBM/ibm-cos-sdk-go/aws/awserr" + "github.com/IBM/ibm-cos-sdk-go/aws/credentials" + "github.com/IBM/ibm-cos-sdk-go/internal/shareddefaults" +) + +const ( + // SharedCredsProviderName name of the IBM IAM provider that loads IAM credentials + // from shared credentials file + SharedCredsProviderName = "SharedCredentialsProviderIBM" +) + +// NewSharedCredentialsProvider constructor of the IBM IAM provider that loads +// IAM credentials from shared credentials file +// Parameters: +// AWS Config +// Profile filename +// Profile prefix +// Returns: +// Common initial provider with config file/profile +func NewSharedCredentialsProvider(config *aws.Config, filename, profilename string) *Provider { + + // Sets the file name from possible locations + // - AWS_SHARED_CREDENTIALS_FILE environment variable + // Error if the filename is missing + if filename == "" { + filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE") + if filename == "" { + // BUG where will we use home? + home := shareddefaults.UserHomeDir() + if home == "" { + e := awserr.New("SharedCredentialsHomeNotFound", "Shared Credentials Home folder not found", nil) + logFromConfigHelper(config, "", "", SharedCredsProviderName, e) + return &Provider{ + providerName: SharedCredsProviderName, + ErrorStatus: e, + } + } + filename = shareddefaults.SharedCredentialsFilename() + } + } + + // Sets the profile name from AWS_PROFILE environment variable + // Otherwise sets the profile name with defaultProfile passed in + if profilename == "" { + profilename = os.Getenv("AWS_PROFILE") + if profilename == "" { + profilename = defaultProfile + } + } + + return commonIniProvider(SharedCredsProviderName, config, filename, profilename) +} + +// NewSharedCredentials constructor for IBM IAM that uses IAM credentials passed in +// Returns: +// credentials.NewCredentials(newSharedCredentialsProvider()) (AWS type) +func NewSharedCredentials(config *aws.Config, filename, profilename string) *credentials.Credentials { + return credentials.NewCredentials(NewSharedCredentialsProvider(config, filename, profilename)) +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/static_provider.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/static_provider.go new file mode 100644 index 00000000000..561b7ba4da6 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/static_provider.go @@ -0,0 +1,21 @@ +package ibmiam + +import ( + "github.com/IBM/ibm-cos-sdk-go/aws" + "github.com/IBM/ibm-cos-sdk-go/aws/credentials" +) + +// StaticProviderName name of the IBM IAM provider that uses IAM details passed directly +const StaticProviderName = "StaticProviderIBM" + +// NewStaticProvider constructor of the IBM IAM provider that uses IAM details passed directly +// Returns: New Provider (AWS type) +func NewStaticProvider(config *aws.Config, authEndPoint, apiKey, serviceInstanceID string) *Provider { + return NewProvider(StaticProviderName, config, apiKey, authEndPoint, serviceInstanceID, nil) +} + +// NewStaticCredentials constructor for IBM IAM that uses IAM credentials passed in +// Returns: credentials.NewCredentials(newStaticProvider()) (AWS type) +func NewStaticCredentials(config *aws.Config, authEndPoint, apiKey, serviceInstanceID string) *credentials.Credentials { + return credentials.NewCredentials(NewStaticProvider(config, authEndPoint, apiKey, serviceInstanceID)) +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/token/token.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/token/token.go new file mode 100644 index 00000000000..c6b290a34bd --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/token/token.go @@ -0,0 +1,40 @@ +package token + +import "encoding/json" + +// IBMIAMToken holder for the IBM IAM token details +type Token struct { + + // Sets the access token + AccessToken string `json:"access_token"` + + // Sets the refresh token + RefreshToken string `json:"refresh_token"` + + // Sets the token type + TokenType string `json:"token_type"` + + // Scope string `json:"scope"` + + // Sets the expiry timestamp + ExpiresIn int64 `json:"expires_in"` + + // Sets the expiration timestamp + Expiration int64 `json:"expiration"` +} + +// Error type to help parse errors of IAM calls +type Error struct { + Context map[string]interface{} `json:"context"` + ErrorCode string `json:"errorCode"` + ErrorMessage string `json:"errorMessage"` +} + +// Error function +func (ie *Error) Error() string { + bytes, err := json.Marshal(ie) + if err != nil { + return err.Error() + } + return string(bytes) +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/tokenmanager/client.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/tokenmanager/client.go new file mode 100644 index 00000000000..3b425f36c3f --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/tokenmanager/client.go @@ -0,0 +1,176 @@ +package tokenmanager + +import ( + "bytes" + "io/ioutil" + "net/http" + "time" + + "github.com/IBM/ibm-cos-sdk-go/aws" +) + +var ( + // NewIBMClient client constructor + NewIBMClient = newIBMClient + + // DefaultIBMClient client constructor with default values + DefaultIBMClient = defaultIBMClient +) + +// IBMClientDo wrapper type to the Do operation +type IBMClientDo interface { + + // HTTP Client Do op + Do(req *http.Request) (*http.Response, error) +} + +// IBM Client Implementation typer wrapper +type defaultIBMCImplementation struct { + + // Internal client + Client *http.Client + + // Sets maximum number of retries + MaxRetries int + + // Times the initial of back off + InitialBackOff time.Duration + + // Times the duration of back off progress + BackOffProgression func(duration time.Duration) time.Duration + + // Logs the client implementation + logger aws.Logger + + // Log level for the client implementation + logLevel *aws.LogLevelType +} + +// newIBMClient constructor +// Parameters: +// AWS Config +// Initial Backoff of the refresh +// Duration of backoff progression +// Returns: +// Default IBM Client Implementation +func newIBMClient(config *aws.Config, initialBackOff time.Duration, + backOffProgression func(time.Duration) time.Duration) *defaultIBMCImplementation { + var httpClient *http.Client + if config != nil && config.HTTPClient != nil { + httpClient = config.HTTPClient + } else { + httpClient = http.DefaultClient + } + + // Initialize number of maximum retries + maxRetries := 0 + if config != nil && config.MaxRetries != nil && *config.MaxRetries > maxRetries { + maxRetries = *config.MaxRetries + } + + // Sets the loglevel + logLevel := aws.LogLevel(aws.LogOff) + if config != nil && config.LogLevel != nil && config.Logger != nil { + logLevel = config.LogLevel + } + + // If initial backoff is less than zero - sets it to zero + if initialBackOff < time.Duration(0) { + initialBackOff = time.Duration(0) + } + + // If back off progressoin is nil, set it to time duration of zero + if backOffProgression == nil { + backOffProgression = func(_ time.Duration) time.Duration { return time.Duration(0) } + } + + return &defaultIBMCImplementation{ + Client: httpClient, + MaxRetries: maxRetries, + InitialBackOff: initialBackOff, + BackOffProgression: backOffProgression, + logger: config.Logger, + logLevel: logLevel, + } +} + +// Default IBM Client +// Parameter: +// AWS Config +// Returns: +// A HTTP Client with IBM IAM Credentials in the config +func defaultIBMClient(config *aws.Config) *defaultIBMCImplementation { + f := func(duration time.Duration) time.Duration { + return time.Duration(float64(duration.Nanoseconds())*1.75) * time.Nanosecond + } + return newIBMClient(config, 500*time.Millisecond, f) +} + +// Internal IBM Client HTTP Client request execution +// Parameter: +// An HTTP Request Object +// Returns: +// An HTTP Response Object +// Error +func (c *defaultIBMCImplementation) Do(req *http.Request) (r *http.Response, e error) { + + // Enablese Log if Debugger is turned on + if c.logLevel.Matches(aws.LogDebug) { + c.logger.Log(debugLog, defaultIBMCImpLog, req.Method, req.URL) + } + r, e = c.Client.Do(req) + if e == nil && isSuccess(r) { + return + } + + // Sets the current status if request is nil + var status string + if r != nil { + status = r.Status + } + + // Sets logger to track request + if c.logLevel.Matches(aws.LogDebugWithRequestErrors) { + c.logger.Log(debugLog, defaultIBMCImpLog, req.Method, req.URL, "Status:", status, "Error:", e) + } + + // Needs explanation -- RDS + for i, sleep := 0, c.InitialBackOff; i < c.MaxRetries; i, sleep = i+1, c.BackOffProgression(sleep) { + if c.logLevel.Matches(aws.LogDebugWithRequestRetries) { + c.logger.Log(debugLog, defaultIBMCImpLog, req.Method, req.URL, "Retry:", i+1) + } + time.Sleep(sleep) + req = copyRequest(req) + r, e = c.Client.Do(req) + if e == nil && isSuccess(r) { + return + } + + if r != nil { + status = r.Status + } + if c.logLevel.Matches(aws.LogDebugWithRequestErrors) { + c.logger.Log(debugLog, defaultIBMCImpLog, req.Method, + req.URL, "Retry:", i+1, "Status:", status, "Error:", e) + } + } + return +} + +// only copies method, url, body , headers +// tight coupled to the token manager and the way request is build +// Paramter: +// An HTTP Request object +// Returns: +// A built HTTP Request object with header +func copyRequest(r *http.Request) *http.Request { + buf, _ := ioutil.ReadAll(r.Body) + newReader := ioutil.NopCloser(bytes.NewBuffer(buf)) + req, _ := http.NewRequest(r.Method, r.URL.String(), newReader) + for k, lv := range r.Header { + for _, v := range lv { + req.Header.Add(k, v) + } + } + return req +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/tokenmanager/helper.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/tokenmanager/helper.go new file mode 100644 index 00000000000..25b4db91f41 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/tokenmanager/helper.go @@ -0,0 +1,43 @@ +package tokenmanager + +import "net/http" + +// Constant values for the token manager package and testcases +const ( + // Not Match Constants + httpClientNotMatch = "Http Client did not match" + maxRetriesNotMatch = "Max Retries did not match" + logLevelNotMatch = "Log Level did not match" + loggerNotMatch = "logger did not match" + backoffProgressionNotMatch = "BackOff Progression did not match" + numberOfLogEntriesNotMatch = "Number of log entries do not match" + tokensNotMatch = "Tokens do not Match" + + // Backoff constants + initialBackoffUnset = "Initial BackOff unset" + backoffProgressionUnset = "BackOff Progression unset" + + // Error constants + errorBuildingRequest = "Error Building Request" + badNumberOfRetries = "Bad Number of retries" + errorGettingToken = "Error getting token" + + // Global LOGGER constant + debugLog = "" + + // LOGGER constant for IBM Client Implementation + defaultIBMCImpLog = "defaultIBMCImplementation" + + // LOGGER constant for IBM Token Management Implementation + defaultTMImpLog = "defaultTMImplementation" + getOpsLog = "GET OPERATION" + backgroundRefreshLog = "BACKGROUND REFRESH" + + // Global constants + endPoint = "EndPoint" +) + +// Returns a success response code (200 <= code < 300) +func isSuccess(response *http.Response) bool { + return response.StatusCode >= 200 && response.StatusCode < 300 +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/tokenmanager/token_manager.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/tokenmanager/token_manager.go new file mode 100644 index 00000000000..db2f3432ad5 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/tokenmanager/token_manager.go @@ -0,0 +1,539 @@ +package tokenmanager + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strings" + "sync" + "time" + + "github.com/IBM/ibm-cos-sdk-go/aws/awserr" + + "github.com/IBM/ibm-cos-sdk-go/aws" + "github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/token" +) + +// Constants used to retrieve the initial token and to refresh tokens +const ( + iamClientID = "bx" + iamClientSecret = "bx" + + grantAPIKey = "urn:ibm:params:oauth:grant-type:apikey" + grantRefreshToken = "refresh_token" +) + +var ( + // minimum time before expiration to refresh + minimumDelta = time.Duration(3) * time.Second + + // minimum time between refresh daemons calls, + // to avoid background thread flooding and + // starvation of the Get when the token does not renew + minimumWait = time.Duration(1) * time.Second + + // DefaultAdvisoryTimeoutFunc set the advisory timeout to 25% of remaining time - usually 15 minutes on 1 hour expiry + DefaultAdvisoryTimeoutFunc = func(ttl time.Duration) time.Duration { + return time.Duration(float64(ttl.Nanoseconds())*0.25) * time.Nanosecond + } + + // DefaultMandatoryTimeoutFunc set the mandatory timeout to 17% of remaining time - usually 10 minutes on 1 hour expiry + DefaultMandatoryTimeoutFunc = func(ttl time.Duration) time.Duration { + return time.Duration(float64(ttl.Nanoseconds())*0.17) * time.Nanosecond + } + + // ErrFetchingIAMTokenFn returns the error fetching token for Token Manager + ErrFetchingIAMTokenFn = func(err error) awserr.Error { + return awserr.New("ErrFetchingIAMToken", "error fetching token", err) + } +) + +type defaultTMImplementation struct { + // endpoint used to retrieve tokens + authEndPoint string + // client used to retrieve tokens, implements the Retry behaviour + client IBMClientDo + + // timeout used by background thread + advisoryRefreshTimeout func(ttl time.Duration) time.Duration + // timeout used by get token to decide if blocks and refresh token + // and by background refresh when advisory not set or smaller than mandatory + mandatoryRefreshTimeout func(ttl time.Duration) time.Duration + // time provider used to get current time + timeProvider func() time.Time + // original time to live of the token at the moment it was retrieved + tokenTTL time.Duration + + // token value kept for its TTL + Cache *token.Token + // timer used to refresh the Token + timer *time.Timer + // nullable boolean used to enable disable the background refresh + enableBackgroundRefresh *bool + // read write mutex to sync access + mutex sync.RWMutex + // function used to retrieve initial token + initFunc func() (*token.Token, error) + + // logger where the logging is sent + logger aws.Logger + // level of logging enabled + logLevel *aws.LogLevelType +} + +// function to create a new token manager using an APIKey to retrieve first token +func newTokenManagerFromAPIKey(config *aws.Config, apiKey, authEndPoint string, advisoryRefreshTimeout, + mandatoryRefreshTimeout func(time.Duration) time.Duration, timeFunc func() time.Time, + client IBMClientDo) *defaultTMImplementation { + // when the client is nil creates a new one using the config passed as argument + if client == nil { + client = defaultIBMClient(config) + } + + // set the function to get the initial token the defaultInit that uses the APIKey passed as argument + initFunc := defaultInit(apiKey, authEndPoint, client) + return newTokenManager(config, initFunc, authEndPoint, advisoryRefreshTimeout, mandatoryRefreshTimeout, timeFunc, + client) +} + +// default init function, +// uses the APIKey passed as argument to obtain the first token +func defaultInit(apiKey string, authEndPoint string, client IBMClientDo) func() (*token.Token, error) { + return func() (*token.Token, error) { + data := url.Values{ + "apikey": {apiKey}, + } + // build the http request + req, err := buildRequest(authEndPoint, grantAPIKey, data) + // checks for errors + if err != nil { + return nil, ErrFetchingIAMTokenFn(err) + } + // calls the end point + response, err := client.Do(req) + // checks for errors + if err != nil { + return nil, ErrFetchingIAMTokenFn(err) + } + // parse the response + tokenValue, err := processResponse(response) + // checks for errors + if err != nil { + return nil, ErrFetchingIAMTokenFn(err) + } + // returns the token + return tokenValue, nil + } +} + +// creates a token manager, +// the initial token is obtained using a custom function +func newTokenManager(config *aws.Config, initFunc func() (*token.Token, error), authEndPoint string, + advisoryRefreshTimeout, mandatoryRefreshTimeout func(time.Duration) time.Duration, timeFunc func() time.Time, + client IBMClientDo) *defaultTMImplementation { + // if no time function passed uses the time.Now + if timeFunc == nil { + timeFunc = time.Now + } + + // if no value passed use the one stored as global + if advisoryRefreshTimeout == nil { + advisoryRefreshTimeout = DefaultAdvisoryTimeoutFunc + } + + // if no value passed use the one stored as global + if mandatoryRefreshTimeout == nil { + mandatoryRefreshTimeout = DefaultMandatoryTimeoutFunc + } + + // checks the logLevel and logger, + // only sets the loveLevel when logLevel and logger are not ZERO values + // helps reducing the logic since logLevel needs to be checked + logLevel := aws.LogLevel(aws.LogOff) + if config != nil && config.LogLevel != nil && config.Logger != nil { + logLevel = config.LogLevel + } + + // builds a defaultTMImplementation using the provided parameters + tm := &defaultTMImplementation{ + authEndPoint: authEndPoint, + client: client, + advisoryRefreshTimeout: advisoryRefreshTimeout, + mandatoryRefreshTimeout: mandatoryRefreshTimeout, + timeProvider: timeFunc, + initFunc: initFunc, + + logLevel: logLevel, + logger: config.Logger, + } + return tm +} + +// function to obtain to initialize the token manager in a concurrent safe way +func (tm *defaultTMImplementation) init() (*token.Token, error) { + // checks logLevel and logs + if tm.logLevel.Matches(aws.LogDebug) { + tm.logger.Log(debugLog, defaultTMImpLog, "INIT") + } + // fetches the initial vale using the init function + tokenValue, err := tm.initFunc() + if err != nil { + // checks logLevel and logs + if tm.logLevel.Matches(aws.LogDebug) { + tm.logger.Log(debugLog, defaultTMImpLog, "INIT FAILED", err) + } + return nil, err + } + // sets current cache value the value fetched by the init call + tm.Cache = tokenValue + result := *tm.Cache + // sets token time to live + tm.tokenTTL = getTTL(tokenValue.Expiration, tm.timeProvider) + // checks and sets if background thread is enabled + if tm.enableBackgroundRefresh == nil { + tm.enableBackgroundRefresh = aws.Bool(true) + } + // resets the time + tm.resetTimer() + // checks logLevel and logs + if tm.logLevel.Matches(aws.LogDebug) { + tm.logger.Log(debugLog, defaultTMImpLog, "INIT SUCCEEDED") + } + return &result, nil +} + +// function to call the init operation in a concurrent safe way, managing the RWLock +func retrieveInit(tm *defaultTMImplementation) (unlockOP func(), tk *token.Token, err error) { + // escalate the READ lock to a WRITE lock + now := time.Now() + tm.mutex.RUnlock() + tm.mutex.Lock() + // set unlock Operation to Write Unlock + unlockOP = tm.mutex.Unlock + + // checks logLevel and logs + if tm.logLevel.Matches(aws.LogDebug) { + tm.logger.Log(debugLog, defaultTMImpLog, getOpsLog, + "TOKEN MANAGER NOT INITIALIZED - ACQUIRED FULL LOCK IN", time.Now().Sub(now)) + } + + // since another routine could be scheduled between the release of Read mutex and the acquire of Write mutex + // re-check the init is still required + if tm.Cache == nil { + tk, err = tm.init() + } else { + tk = retrieveCheckGet(tm) + } + return +} + +// function to call the refresh operation in a concurrent safe way, managing the RWLock +func retrieveFetch(tm *defaultTMImplementation) (unlockOP func(), tk *token.Token, err error) { + + // escalate the READ lock to a WRITE lock + now := time.Now() + tm.mutex.RUnlock() + tm.mutex.Lock() + // set unlock Operation to Write Unlock + unlockOP = tm.mutex.Unlock + + // checks logLevel and logs + if tm.logLevel.Matches(aws.LogDebug) { + tm.logger.Log(debugLog, defaultTMImpLog, getOpsLog, + "TOKEN REFRESH - ACQUIRED FULL LOCK IN", time.Now().Sub(now)) + } + + // since another routine could be scheduled between the release of Read mutex and the acquire of Write mutex + // re-check the refresh is still required + tk = retrieveCheckGet(tm) + for tk == nil { + err := tm.refresh() + if err != nil { + // checks logLevel and logs + if tm.logLevel.Matches(aws.LogDebug) { + tm.logger.Log(debugLog, defaultTMImpLog, getOpsLog, "REFRESH FAILED", err) + } + return unlockOP, nil, err + } + tk = retrieveCheckGet(tm) + } + + return +} + +// function to check the the token in cache and get it if valid +func retrieveCheckGet(tm *defaultTMImplementation) (tk *token.Token) { + // calculates the TTL of the token in the cache + tokenTTL := waitingTime(tm.tokenTTL, tm.Cache.Expiration, nil, tm.mandatoryRefreshTimeout, tm.timeProvider) + // check if token is valid + if tokenTTL == nil || *tokenTTL > minimumDelta { + // set result to be cache content + tk = tm.Cache + } + return +} + +// Get retrieves the value of the auth token, checks the cache if the token is valid returns it, +// if not valid does a refresh and then returns it +func (tm *defaultTMImplementation) Get() (tk *token.Token, err error) { + + // holder for the func to be called in the defer + var unlockOP func() + // defer the call of the unlock operation + defer func() { + unlockOP() + }() + + now := time.Now() + + // acquire Read lock + tm.mutex.RLock() + // set unlock operation to ReadUnlock + unlockOP = tm.mutex.RUnlock + + // checks logLevel and logs + if tm.logLevel.Matches(aws.LogDebug) { + tm.logger.Log(debugLog, defaultTMImpLog, getOpsLog, "ACQUIRED RLOCK IN", time.Now().Sub(now)) + } + + //check if cache was initialized + if tm.Cache == nil { + // if cache not initialized, initialize it + unlockOP, tk, err = retrieveInit(tm) + return + } + + // check and retrieves content of cache + tk = retrieveCheckGet(tm) + // check if the content of cache is valid + if tk == nil { + // content of the cache invalid + // refresh cache content + unlockOP, tk, err = retrieveFetch(tm) + } + + return +} + +// function to do the refresh operation calls +func (tm *defaultTMImplementation) refresh() error { + // stop the timer + tm.stopTimer() + // defer timer reset + defer tm.resetTimer() + // set the refresh token parameter of the request + data := url.Values{ + "refresh_token": {tm.Cache.RefreshToken}, + } + // build the request + req, err := buildRequest(tm.authEndPoint, grantRefreshToken, data) + if err != nil { + return ErrFetchingIAMTokenFn(err) + } + // call the endpoint + response, err := tm.client.Do(req) + if err != nil { + return ErrFetchingIAMTokenFn(err) + } + // parse the response + tokenValue, err := processResponse(response) + if err != nil { + return ErrFetchingIAMTokenFn(err) + } + // sets current token to the value fetched + tm.Cache = tokenValue + // sets TTL + tm.tokenTTL = getTTL(tokenValue.Expiration, tm.timeProvider) + return nil +} + +// Refresh forces the refresh of the token in the cache in a concurrent safe way +func (tm *defaultTMImplementation) Refresh() error { + // acquire a Write lock + tm.mutex.Lock() + // defer the release of the write lock + defer tm.mutex.Unlock() + // checks logLevel and logs + if tm.logLevel.Matches(aws.LogDebug) { + tm.logger.Log(debugLog, defaultTMImpLog, "MANUAL TRIGGER BACKGROUND REFRESH") + } + return tm.refresh() +} + +// callback function used by to timer to refresh tokens in background +func (tm *defaultTMImplementation) backgroundRefreshFunc() { + now := time.Now() + // acquire a Write lock + tm.mutex.Lock() + // defer the release of the write lock + defer tm.mutex.Unlock() + + // checks logLevel and logs + if tm.logLevel.Matches(aws.LogDebug) { + tm.logger.Log(debugLog, defaultTMImpLog, backgroundRefreshLog, + "ACQUIRED FULL LOCK IN", time.Now().Sub(now)) + } + wait := waitingTime(tm.tokenTTL, tm.Cache.Expiration, tm.advisoryRefreshTimeout, + tm.mandatoryRefreshTimeout, tm.timeProvider) + // checks logLevel and logs + if tm.logLevel.Matches(aws.LogDebug) { + tm.logger.Log(debugLog, defaultTMImpLog, backgroundRefreshLog, "TOKEN TTL", wait) + } + if wait != nil && *wait < minimumDelta { + // checks logLevel and logs + if tm.logLevel.Matches(aws.LogDebug) { + tm.logger.Log(debugLog, defaultTMImpLog, backgroundRefreshLog, "TOKEN NEED UPDATE") + } + tm.refresh() + } else { + // checks logLevel and logs + if tm.logLevel.Matches(aws.LogDebug) { + tm.logger.Log(debugLog, defaultTMImpLog, backgroundRefreshLog, "TOKEN UPDATE SKIPPED") + } + tm.resetTimer() + } +} + +// StopBackgroundRefresh force the stop of the refresh background token in a concurrent safe way +func (tm *defaultTMImplementation) StopBackgroundRefresh() { + // acquire a Write lock + tm.mutex.Lock() + // defer the release of the write lock + defer tm.mutex.Unlock() + tm.stopTimer() + tm.enableBackgroundRefresh = aws.Bool(false) + // checks logLevel and logs + if tm.logLevel.Matches(aws.LogDebug) { + tm.logger.Log(debugLog, defaultTMImpLog, "STOP BACKGROUND REFRESH") + } +} + +// StartBackgroundRefresh starts the background refresh thread in a concurrent sage way +func (tm *defaultTMImplementation) StartBackgroundRefresh() { + // acquire a Write lock + tm.mutex.Lock() + // defer the release of the write lock + defer tm.mutex.Unlock() + tm.enableBackgroundRefresh = aws.Bool(true) + tm.resetTimer() + // checks logLevel and logs + if tm.logLevel.Matches(aws.LogDebug) { + tm.logger.Log(debugLog, defaultTMImpLog, "START BACKGROUND REFRESH") + } +} + +// helper function to stop the timer in the token manager used to trigger token background refresh +func (tm *defaultTMImplementation) stopTimer() { + if tm.timer != nil { + tm.timer.Stop() + } +} + +// helper function used to reset the timer +func (tm *defaultTMImplementation) resetTimer() { + // checks if background refresh is enabled + if tm.enableBackgroundRefresh != nil && *tm.enableBackgroundRefresh { + // calculates the how long tpo wait for next refresh + refreshIn := waitingTime(tm.tokenTTL, tm.Cache.Expiration, tm.advisoryRefreshTimeout, + tm.mandatoryRefreshTimeout, tm.timeProvider) + // checks if waiting time is not nil, + // no nedd to refresh + if refreshIn != nil { + // checks if timer exists + // rest time of the existing timer + if tm.timer != nil { + if minimumWait > *refreshIn { + *refreshIn = minimumWait + } + tm.timer.Reset(*refreshIn) + } else { + // if timer not exists + // create a new timer + tm.timer = time.AfterFunc(*refreshIn, tm.backgroundRefreshFunc) + } + } else { + tm.timer = nil + } + } +} + +// helper function used to build the http request used to retrieve initial and refresh tokens +func buildRequest(endPoint string, grantType string, customValues url.Values) (*http.Request, error) { + data := url.Values{ + "grant_type": {grantType}, + "response_type": {"cloud_iam"}, + } + for key, value := range customValues { + data[key] = value + } + req, err := http.NewRequest(http.MethodPost, endPoint, strings.NewReader(data.Encode())) + if err != nil { + return nil, err + } + req.Header.Set("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", + iamClientID, iamClientSecret)))) + req.Header.Set("accept", "application/json") + req.Header.Set("Cache-control", "no-Cache") + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + return req, nil +} + +// helper function used to parse the http response into a Token struct +func processResponse(response *http.Response) (*token.Token, error) { + bodyContent, err := ioutil.ReadAll(response.Body) + if err != nil { + return nil, err + } + err = response.Body.Close() + if err != nil { + return nil, err + } + if isSuccess(response) { + tokenValue := token.Token{} + err = json.Unmarshal(bodyContent, &tokenValue) + if err != nil { + return nil, err + } + return &tokenValue, nil + } else if response.StatusCode == 400 || response.StatusCode == 401 || response.StatusCode == 403 { + apiErr := token.Error{} + err = json.Unmarshal(bodyContent, &apiErr) + if err != nil { + return nil, err + } + return nil, &apiErr + } else { + return nil, fmt.Errorf("Response: Bad Status Code: %s", response.Status) + } +} + +// helper function used to calculate the time before token expires, +// it takes in consideration the mandatory and advisory timeouts +func waitingTime(ttl time.Duration, unixTime int64, advisoryRefreshTimeout, + mandatoryRefreshTimeout func(time.Duration) time.Duration, timeFunc func() time.Time) *time.Duration { + if unixTime == 0 { + return nil + } + timeoutAt := time.Unix(unixTime, 0) + result := timeoutAt.Sub(timeFunc()) + delta := minimumDelta + if advisoryRefreshTimeout != nil && advisoryRefreshTimeout(ttl) > minimumDelta { + delta = advisoryRefreshTimeout(ttl) + } + if mandatoryRefreshTimeout != nil && mandatoryRefreshTimeout(ttl) > delta { + delta = mandatoryRefreshTimeout(ttl) + } + result -= delta + return &result +} + +func getTTL(unixTime int64, timeFunc func() time.Time) time.Duration { + if unixTime > 0 { + timeoutAt := time.Unix(unixTime, 0) + return timeoutAt.Sub(timeFunc()) + } + return 0 +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/tokenmanager/token_manager_interface.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/tokenmanager/token_manager_interface.go new file mode 100644 index 00000000000..49dc7201f8e --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/tokenmanager/token_manager_interface.go @@ -0,0 +1,45 @@ +package tokenmanager + +import ( + "time" + + "github.com/IBM/ibm-cos-sdk-go/aws" + "github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/token" +) + +// API Token Manager interface +type API interface { + + // Token Management Get Function + Get() (*token.Token, error) + + // Token Management Retries Function + Refresh() error + + // Token Management Stop Background Refresh + StopBackgroundRefresh() + + // Token Management Start Background Refresh + StartBackgroundRefresh() +} + +// default implementations +// wrap implementation in the interface +var ( + // NewTokenManager token manager constructor using a custom initial function to retrieve first token + NewTokenManager = func(config *aws.Config, initFunc func() (*token.Token, error), authEndPoint string, + advisoryRefreshTimeout, mandatoryRefreshTimeout func(time.Duration) time.Duration, timeFunc func() time.Time, + client IBMClientDo) API { + return newTokenManager(config, initFunc, authEndPoint, advisoryRefreshTimeout, mandatoryRefreshTimeout, + timeFunc, client) + + } + + // NewTokenManagerFromAPIKey token manager constructor using api key to retrieve first token + NewTokenManagerFromAPIKey = func(config *aws.Config, apiKey, authEndPoint string, advisoryRefreshTimeout, + mandatoryRefreshTimeout func(time.Duration) time.Duration, timeFunc func() time.Time, + client IBMClientDo) API { + return newTokenManagerFromAPIKey(config, apiKey, authEndPoint, advisoryRefreshTimeout, + mandatoryRefreshTimeout, timeFunc, client) + } +) diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/processcreds/provider.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/processcreds/provider.go new file mode 100644 index 00000000000..a81dda5f32d --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/processcreds/provider.go @@ -0,0 +1,426 @@ +/* +Package processcreds is a credential Provider to retrieve `credential_process` +credentials. + +WARNING: The following describes a method of sourcing credentials from an external +process. This can potentially be dangerous, so proceed with caution. Other +credential providers should be preferred if at all possible. If using this +option, you should make sure that the config file is as locked down as possible +using security best practices for your operating system. + +You can use credentials from a `credential_process` in a variety of ways. + +One way is to setup your shared config file, located in the default +location, with the `credential_process` key and the command you want to be +called. You also need to set the AWS_SDK_LOAD_CONFIG environment variable +(e.g., `export AWS_SDK_LOAD_CONFIG=1`) to use the shared config file. + + [default] + credential_process = /command/to/call + +Creating a new session will use the credential process to retrieve credentials. +NOTE: If there are credentials in the profile you are using, the credential +process will not be used. + + // Initialize a session to load credentials. + sess, _ := session.NewSession(&aws.Config{ + Region: aws.String("us-east-1")}, + ) + + // Create S3 service client to use the credentials. + svc := s3.New(sess) + +Another way to use the `credential_process` method is by using +`credentials.NewCredentials()` and providing a command to be executed to +retrieve credentials: + + // Create credentials using the ProcessProvider. + creds := processcreds.NewCredentials("/path/to/command") + + // Create service client value configured for credentials. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +You can set a non-default timeout for the `credential_process` with another +constructor, `credentials.NewCredentialsTimeout()`, providing the timeout. To +set a one minute timeout: + + // Create credentials using the ProcessProvider. + creds := processcreds.NewCredentialsTimeout( + "/path/to/command", + time.Duration(500) * time.Millisecond) + +If you need more control, you can set any configurable options in the +credentials using one or more option functions. For example, you can set a two +minute timeout, a credential duration of 60 minutes, and a maximum stdout +buffer size of 2k. + + creds := processcreds.NewCredentials( + "/path/to/command", + func(opt *ProcessProvider) { + opt.Timeout = time.Duration(2) * time.Minute + opt.Duration = time.Duration(60) * time.Minute + opt.MaxBufSize = 2048 + }) + +You can also use your own `exec.Cmd`: + + // Create an exec.Cmd + myCommand := exec.Command("/path/to/command") + + // Create credentials using your exec.Cmd and custom timeout + creds := processcreds.NewCredentialsCommand( + myCommand, + func(opt *processcreds.ProcessProvider) { + opt.Timeout = time.Duration(1) * time.Second + }) +*/ +package processcreds + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "runtime" + "strings" + "time" + + "github.com/IBM/ibm-cos-sdk-go/aws/awserr" + "github.com/IBM/ibm-cos-sdk-go/aws/credentials" + "github.com/IBM/ibm-cos-sdk-go/internal/sdkio" +) + +const ( + // ProviderName is the name this credentials provider will label any + // returned credentials Value with. + ProviderName = `ProcessProvider` + + // ErrCodeProcessProviderParse error parsing process output + ErrCodeProcessProviderParse = "ProcessProviderParseError" + + // ErrCodeProcessProviderVersion version error in output + ErrCodeProcessProviderVersion = "ProcessProviderVersionError" + + // ErrCodeProcessProviderRequired required attribute missing in output + ErrCodeProcessProviderRequired = "ProcessProviderRequiredError" + + // ErrCodeProcessProviderExecution execution of command failed + ErrCodeProcessProviderExecution = "ProcessProviderExecutionError" + + // errMsgProcessProviderTimeout process took longer than allowed + errMsgProcessProviderTimeout = "credential process timed out" + + // errMsgProcessProviderProcess process error + errMsgProcessProviderProcess = "error in credential_process" + + // errMsgProcessProviderParse problem parsing output + errMsgProcessProviderParse = "parse failed of credential_process output" + + // errMsgProcessProviderVersion version error in output + errMsgProcessProviderVersion = "wrong version in process output (not 1)" + + // errMsgProcessProviderMissKey missing access key id in output + errMsgProcessProviderMissKey = "missing AccessKeyId in process output" + + // errMsgProcessProviderMissSecret missing secret acess key in output + errMsgProcessProviderMissSecret = "missing SecretAccessKey in process output" + + // errMsgProcessProviderPrepareCmd prepare of command failed + errMsgProcessProviderPrepareCmd = "failed to prepare command" + + // errMsgProcessProviderEmptyCmd command must not be empty + errMsgProcessProviderEmptyCmd = "command must not be empty" + + // errMsgProcessProviderPipe failed to initialize pipe + errMsgProcessProviderPipe = "failed to initialize pipe" + + // DefaultDuration is the default amount of time in minutes that the + // credentials will be valid for. + DefaultDuration = time.Duration(15) * time.Minute + + // DefaultBufSize limits buffer size from growing to an enormous + // amount due to a faulty process. + DefaultBufSize = int(8 * sdkio.KibiByte) + + // DefaultTimeout default limit on time a process can run. + DefaultTimeout = time.Duration(1) * time.Minute +) + +// ProcessProvider satisfies the credentials.Provider interface, and is a +// client to retrieve credentials from a process. +type ProcessProvider struct { + staticCreds bool + credentials.Expiry + originalCommand []string + + // Expiry duration of the credentials. Defaults to 15 minutes if not set. + Duration time.Duration + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration + + // A string representing an os command that should return a JSON with + // credential information. + command *exec.Cmd + + // MaxBufSize limits memory usage from growing to an enormous + // amount due to a faulty process. + MaxBufSize int + + // Timeout limits the time a process can run. + Timeout time.Duration +} + +// NewCredentials returns a pointer to a new Credentials object wrapping the +// ProcessProvider. The credentials will expire every 15 minutes by default. +func NewCredentials(command string, options ...func(*ProcessProvider)) *credentials.Credentials { + p := &ProcessProvider{ + command: exec.Command(command), + Duration: DefaultDuration, + Timeout: DefaultTimeout, + MaxBufSize: DefaultBufSize, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsTimeout returns a pointer to a new Credentials object with +// the specified command and timeout, and default duration and max buffer size. +func NewCredentialsTimeout(command string, timeout time.Duration) *credentials.Credentials { + p := NewCredentials(command, func(opt *ProcessProvider) { + opt.Timeout = timeout + }) + + return p +} + +// NewCredentialsCommand returns a pointer to a new Credentials object with +// the specified command, and default timeout, duration and max buffer size. +func NewCredentialsCommand(command *exec.Cmd, options ...func(*ProcessProvider)) *credentials.Credentials { + p := &ProcessProvider{ + command: command, + Duration: DefaultDuration, + Timeout: DefaultTimeout, + MaxBufSize: DefaultBufSize, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +type credentialProcessResponse struct { + Version int + AccessKeyID string `json:"AccessKeyId"` + SecretAccessKey string + SessionToken string + Expiration *time.Time +} + +// Retrieve executes the 'credential_process' and returns the credentials. +func (p *ProcessProvider) Retrieve() (credentials.Value, error) { + out, err := p.executeCredentialProcess() + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + // Serialize and validate response + resp := &credentialProcessResponse{} + if err = json.Unmarshal(out, resp); err != nil { + return credentials.Value{ProviderName: ProviderName}, awserr.New( + ErrCodeProcessProviderParse, + fmt.Sprintf("%s: %s", errMsgProcessProviderParse, string(out)), + err) + } + + if resp.Version != 1 { + return credentials.Value{ProviderName: ProviderName}, awserr.New( + ErrCodeProcessProviderVersion, + errMsgProcessProviderVersion, + nil) + } + + if len(resp.AccessKeyID) == 0 { + return credentials.Value{ProviderName: ProviderName}, awserr.New( + ErrCodeProcessProviderRequired, + errMsgProcessProviderMissKey, + nil) + } + + if len(resp.SecretAccessKey) == 0 { + return credentials.Value{ProviderName: ProviderName}, awserr.New( + ErrCodeProcessProviderRequired, + errMsgProcessProviderMissSecret, + nil) + } + + // Handle expiration + p.staticCreds = resp.Expiration == nil + if resp.Expiration != nil { + p.SetExpiration(*resp.Expiration, p.ExpiryWindow) + } + + return credentials.Value{ + ProviderName: ProviderName, + AccessKeyID: resp.AccessKeyID, + SecretAccessKey: resp.SecretAccessKey, + SessionToken: resp.SessionToken, + }, nil +} + +// IsExpired returns true if the credentials retrieved are expired, or not yet +// retrieved. +func (p *ProcessProvider) IsExpired() bool { + if p.staticCreds { + return false + } + return p.Expiry.IsExpired() +} + +// prepareCommand prepares the command to be executed. +func (p *ProcessProvider) prepareCommand() error { + + var cmdArgs []string + if runtime.GOOS == "windows" { + cmdArgs = []string{"cmd.exe", "/C"} + } else { + cmdArgs = []string{"sh", "-c"} + } + + if len(p.originalCommand) == 0 { + p.originalCommand = make([]string, len(p.command.Args)) + copy(p.originalCommand, p.command.Args) + + // check for empty command because it succeeds + if len(strings.TrimSpace(p.originalCommand[0])) < 1 { + return awserr.New( + ErrCodeProcessProviderExecution, + fmt.Sprintf( + "%s: %s", + errMsgProcessProviderPrepareCmd, + errMsgProcessProviderEmptyCmd), + nil) + } + } + + cmdArgs = append(cmdArgs, p.originalCommand...) + p.command = exec.Command(cmdArgs[0], cmdArgs[1:]...) + p.command.Env = os.Environ() + + return nil +} + +// executeCredentialProcess starts the credential process on the OS and +// returns the results or an error. +func (p *ProcessProvider) executeCredentialProcess() ([]byte, error) { + + if err := p.prepareCommand(); err != nil { + return nil, err + } + + // Setup the pipes + outReadPipe, outWritePipe, err := os.Pipe() + if err != nil { + return nil, awserr.New( + ErrCodeProcessProviderExecution, + errMsgProcessProviderPipe, + err) + } + + p.command.Stderr = os.Stderr // display stderr on console for MFA + p.command.Stdout = outWritePipe // get creds json on process's stdout + p.command.Stdin = os.Stdin // enable stdin for MFA + + output := bytes.NewBuffer(make([]byte, 0, p.MaxBufSize)) + + stdoutCh := make(chan error, 1) + go readInput( + io.LimitReader(outReadPipe, int64(p.MaxBufSize)), + output, + stdoutCh) + + execCh := make(chan error, 1) + go executeCommand(*p.command, execCh) + + finished := false + var errors []error + for !finished { + select { + case readError := <-stdoutCh: + errors = appendError(errors, readError) + finished = true + case execError := <-execCh: + err := outWritePipe.Close() + errors = appendError(errors, err) + errors = appendError(errors, execError) + if errors != nil { + return output.Bytes(), awserr.NewBatchError( + ErrCodeProcessProviderExecution, + errMsgProcessProviderProcess, + errors) + } + case <-time.After(p.Timeout): + finished = true + return output.Bytes(), awserr.NewBatchError( + ErrCodeProcessProviderExecution, + errMsgProcessProviderTimeout, + errors) // errors can be nil + } + } + + out := output.Bytes() + + if runtime.GOOS == "windows" { + // windows adds slashes to quotes + out = []byte(strings.Replace(string(out), `\"`, `"`, -1)) + } + + return out, nil +} + +// appendError conveniently checks for nil before appending slice +func appendError(errors []error, err error) []error { + if err != nil { + return append(errors, err) + } + return errors +} + +func executeCommand(cmd exec.Cmd, exec chan error) { + // Start the command + err := cmd.Start() + if err == nil { + err = cmd.Wait() + } + + exec <- err +} + +func readInput(r io.Reader, w io.Writer, read chan error) { + tee := io.TeeReader(r, w) + + _, err := ioutil.ReadAll(tee) + + if err == io.EOF { + err = nil + } + + read <- err // will only arrive here when write end of pipe is closed +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/shared_credentials_provider.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/shared_credentials_provider.go new file mode 100644 index 00000000000..5328e0a6c10 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/shared_credentials_provider.go @@ -0,0 +1,151 @@ +package credentials + +import ( + "fmt" + "os" + + "github.com/IBM/ibm-cos-sdk-go/aws/awserr" + "github.com/IBM/ibm-cos-sdk-go/internal/ini" + "github.com/IBM/ibm-cos-sdk-go/internal/shareddefaults" +) + +// SharedCredsProviderName provides a name of SharedCreds provider +const SharedCredsProviderName = "SharedCredentialsProvider" + +var ( + // ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found. + ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil) +) + +// A SharedCredentialsProvider retrieves access key pair (access key ID, +// secret access key, and session token if present) credentials from the current +// user's home directory, and keeps track if those credentials are expired. +// +// Profile ini file example: $HOME/.aws/credentials +type SharedCredentialsProvider struct { + // Path to the shared credentials file. + // + // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the + // env value is empty will default to current user's home directory. + // Linux/OSX: "$HOME/.aws/credentials" + // Windows: "%USERPROFILE%\.aws\credentials" + Filename string + + // AWS Profile to extract credentials from the shared credentials file. If empty + // will default to environment variable "AWS_PROFILE" or "default" if + // environment variable is also not set. + Profile string + + // retrieved states if the credentials have been successfully retrieved. + retrieved bool +} + +// NewSharedCredentials returns a pointer to a new Credentials object +// wrapping the Profile file provider. +func NewSharedCredentials(filename, profile string) *Credentials { + return NewCredentials(&SharedCredentialsProvider{ + Filename: filename, + Profile: profile, + }) +} + +// Retrieve reads and extracts the shared credentials from the current +// users home directory. +func (p *SharedCredentialsProvider) Retrieve() (Value, error) { + p.retrieved = false + + filename, err := p.filename() + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, err + } + + creds, err := loadProfile(filename, p.profile()) + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, err + } + + p.retrieved = true + return creds, nil +} + +// IsExpired returns if the shared credentials have expired. +func (p *SharedCredentialsProvider) IsExpired() bool { + return !p.retrieved +} + +// loadProfiles loads from the file pointed to by shared credentials filename for profile. +// The credentials retrieved from the profile will be returned or error. Error will be +// returned if it fails to read from the file, or the data is invalid. +func loadProfile(filename, profile string) (Value, error) { + config, err := ini.OpenFile(filename) + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err) + } + + iniProfile, ok := config.GetSection(profile) + if !ok { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", nil) + } + + id := iniProfile.String("aws_access_key_id") + if len(id) == 0 { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey", + fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename), + nil) + } + + secret := iniProfile.String("aws_secret_access_key") + if len(secret) == 0 { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret", + fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename), + nil) + } + + // Default to empty string if not found + token := iniProfile.String("aws_session_token") + + return Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: token, + ProviderName: SharedCredsProviderName, + }, nil +} + +// filename returns the filename to use to read AWS shared credentials. +// +// Will return an error if the user's home directory path cannot be found. +func (p *SharedCredentialsProvider) filename() (string, error) { + if len(p.Filename) != 0 { + return p.Filename, nil + } + + if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); len(p.Filename) != 0 { + return p.Filename, nil + } + + if home := shareddefaults.UserHomeDir(); len(home) == 0 { + // Backwards compatibility of home directly not found error being returned. + // This error is too verbose, failure when opening the file would of been + // a better error to return. + return "", ErrSharedCredentialsHomeNotFound + } + + p.Filename = shareddefaults.SharedCredentialsFilename() + + return p.Filename, nil +} + +// profile returns the AWS shared credentials profile. If empty will read +// environment variable "AWS_PROFILE". If that is not set profile will +// return "default". +func (p *SharedCredentialsProvider) profile() string { + if p.Profile == "" { + p.Profile = os.Getenv("AWS_PROFILE") + } + if p.Profile == "" { + p.Profile = "default" + } + + return p.Profile +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/static_provider.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/static_provider.go new file mode 100644 index 00000000000..12e10f09639 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/static_provider.go @@ -0,0 +1,57 @@ +package credentials + +import ( + "github.com/IBM/ibm-cos-sdk-go/aws/awserr" +) + +// StaticProviderName provides a name of Static provider +const StaticProviderName = "StaticProvider" + +var ( + // ErrStaticCredentialsEmpty is emitted when static credentials are empty. + ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil) +) + +// A StaticProvider is a set of credentials which are set programmatically, +// and will never expire. +type StaticProvider struct { + Value +} + +// NewStaticCredentials returns a pointer to a new Credentials object +// wrapping a static credentials value provider. Token is only required +// for temporary security credentials retrieved via STS, otherwise an empty +// string can be passed for this parameter. +func NewStaticCredentials(id, secret, token string) *Credentials { + return NewCredentials(&StaticProvider{Value: Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: token, + }}) +} + +// NewStaticCredentialsFromCreds returns a pointer to a new Credentials object +// wrapping the static credentials value provide. Same as NewStaticCredentials +// but takes the creds Value instead of individual fields +func NewStaticCredentialsFromCreds(creds Value) *Credentials { + return NewCredentials(&StaticProvider{Value: creds}) +} + +// Retrieve returns the credentials or error if the credentials are invalid. +func (s *StaticProvider) Retrieve() (Value, error) { + if s.AccessKeyID == "" || s.SecretAccessKey == "" { + return Value{ProviderName: StaticProviderName}, ErrStaticCredentialsEmpty + } + + if len(s.Value.ProviderName) == 0 { + s.Value.ProviderName = StaticProviderName + } + return s.Value, nil +} + +// IsExpired returns if the credentials are expired. +// +// For StaticProvider, the credentials never expired. +func (s *StaticProvider) IsExpired() bool { + return false +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/defaults/defaults.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/defaults/defaults.go new file mode 100644 index 00000000000..f6cbb4481ca --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/defaults/defaults.go @@ -0,0 +1,181 @@ +// Package defaults is a collection of helpers to retrieve the SDK's default +// configuration and handlers. +// +// Generally this package shouldn't be used directly, but session.Session +// instead. This package is useful when you need to reset the defaults +// of a session or service client to the SDK defaults before setting +// additional parameters. +package defaults + +import ( + "fmt" + "net" + "net/http" + "net/url" + "os" + "time" + + "github.com/IBM/ibm-cos-sdk-go/aws" + "github.com/IBM/ibm-cos-sdk-go/aws/awserr" + "github.com/IBM/ibm-cos-sdk-go/aws/corehandlers" + "github.com/IBM/ibm-cos-sdk-go/aws/credentials" + "github.com/IBM/ibm-cos-sdk-go/aws/credentials/endpointcreds" + "github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam" + "github.com/IBM/ibm-cos-sdk-go/aws/endpoints" + "github.com/IBM/ibm-cos-sdk-go/aws/request" +) + +// A Defaults provides a collection of default values for SDK clients. +type Defaults struct { + Config *aws.Config + Handlers request.Handlers +} + +// Get returns the SDK's default values with Config and handlers pre-configured. +func Get() Defaults { + cfg := Config() + handlers := Handlers() + cfg.Credentials = CredChain(cfg, handlers) + + return Defaults{ + Config: cfg, + Handlers: handlers, + } +} + +// Config returns the default configuration without credentials. +// To retrieve a config with credentials also included use +// `defaults.Get().Config` instead. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the configuration of an +// existing service client or session. +func Config() *aws.Config { + return aws.NewConfig(). + WithCredentials(credentials.AnonymousCredentials). + WithRegion(os.Getenv("AWS_REGION")). + WithHTTPClient(http.DefaultClient). + WithMaxRetries(aws.UseServiceDefaultRetries). + WithLogger(aws.NewDefaultLogger()). + WithLogLevel(aws.LogOff). + WithEndpointResolver(endpoints.DefaultResolver()) +} + +// Handlers returns the default request handlers. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the request handlers of an +// existing service client or session. +func Handlers() request.Handlers { + var handlers request.Handlers + + handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler) + handlers.Validate.AfterEachFn = request.HandlerListStopOnError + handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler) + handlers.Build.PushBackNamed(corehandlers.AddHostExecEnvUserAgentHander) + handlers.Build.AfterEachFn = request.HandlerListStopOnError + handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) + handlers.Send.PushBackNamed(corehandlers.ValidateReqSigHandler) + handlers.Send.PushBackNamed(corehandlers.SendHandler) + handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler) + handlers.ValidateResponse.PushBackNamed(corehandlers.ValidateResponseHandler) + + return handlers +} + +// CredChain returns the default credential chain. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the credentials of an +// existing service client or session's Config. +func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials { + return credentials.NewCredentials(&credentials.ChainProvider{ + VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), + Providers: CredProviders(cfg, handlers), + }) +} + +// CredProviders returns the slice of providers used in +// the default credential chain. +// +// For applications that need to use some other provider (for example use +// different environment variables for legacy reasons) but still fall back +// on the default chain of providers. This allows that default chain to be +// automatically updated +// IBM COS SDK Code -- START +func CredProviders(cfg *aws.Config, handlers request.Handlers) []credentials.Provider { + return []credentials.Provider{ + ibmiam.NewEnvProvider(cfg), + ibmiam.NewSharedCredentialsProvider(cfg, "", ""), + ibmiam.NewSharedConfigProvider(cfg, "", ""), + &credentials.EnvProvider{}, + &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, + } +} + +// IBM COS SDK Code -- END + +const ( + httpProviderAuthorizationEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN" +) + +var lookupHostFn = net.LookupHost + +func isLoopbackHost(host string) (bool, error) { + ip := net.ParseIP(host) + if ip != nil { + return ip.IsLoopback(), nil + } + + // Host is not an ip, perform lookup + addrs, err := lookupHostFn(host) + if err != nil { + return false, err + } + for _, addr := range addrs { + if !net.ParseIP(addr).IsLoopback() { + return false, nil + } + } + + return true, nil +} + +func localHTTPCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider { + var errMsg string + + parsed, err := url.Parse(u) + if err != nil { + errMsg = fmt.Sprintf("invalid URL, %v", err) + } else { + host := aws.URLHostname(parsed) + if len(host) == 0 { + errMsg = "unable to parse host from local HTTP cred provider URL" + } else if isLoopback, loopbackErr := isLoopbackHost(host); loopbackErr != nil { + errMsg = fmt.Sprintf("failed to resolve host %q, %v", host, loopbackErr) + } else if !isLoopback { + errMsg = fmt.Sprintf("invalid endpoint host, %q, only loopback hosts are allowed.", host) + } + } + + if len(errMsg) > 0 { + if cfg.Logger != nil { + cfg.Logger.Log("Ignoring, HTTP credential provider", errMsg, err) + } + return credentials.ErrorProvider{ + Err: awserr.New("CredentialsEndpointError", errMsg, err), + ProviderName: endpointcreds.ProviderName, + } + } + + return httpCredProvider(cfg, handlers, u) +} + +func httpCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider { + return endpointcreds.NewProviderClient(cfg, handlers, u, + func(p *endpointcreds.Provider) { + p.ExpiryWindow = 5 * time.Minute + p.AuthorizationToken = os.Getenv(httpProviderAuthorizationEnvVar) + }, + ) +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/defaults/shared_config.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/defaults/shared_config.go new file mode 100644 index 00000000000..618e39af1aa --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/defaults/shared_config.go @@ -0,0 +1,27 @@ +package defaults + +import ( + "github.com/IBM/ibm-cos-sdk-go/internal/shareddefaults" +) + +// SharedCredentialsFilename returns the SDK's default file path +// for the shared credentials file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/credentials +// - Windows: %USERPROFILE%\.aws\credentials +func SharedCredentialsFilename() string { + return shareddefaults.SharedCredentialsFilename() +} + +// SharedConfigFilename returns the SDK's default file path for +// the shared config file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/config +// - Windows: %USERPROFILE%\.aws\config +func SharedConfigFilename() string { + return shareddefaults.SharedConfigFilename() +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/doc.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/doc.go new file mode 100644 index 00000000000..4fcb6161848 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/doc.go @@ -0,0 +1,56 @@ +// Package aws provides the core SDK's utilities and shared types. Use this package's +// utilities to simplify setting and reading API operations parameters. +// +// Value and Pointer Conversion Utilities +// +// This package includes a helper conversion utility for each scalar type the SDK's +// API use. These utilities make getting a pointer of the scalar, and dereferencing +// a pointer easier. +// +// Each conversion utility comes in two forms. Value to Pointer and Pointer to Value. +// The Pointer to value will safely dereference the pointer and return its value. +// If the pointer was nil, the scalar's zero value will be returned. +// +// The value to pointer functions will be named after the scalar type. So get a +// *string from a string value use the "String" function. This makes it easy to +// to get pointer of a literal string value, because getting the address of a +// literal requires assigning the value to a variable first. +// +// var strPtr *string +// +// // Without the SDK's conversion functions +// str := "my string" +// strPtr = &str +// +// // With the SDK's conversion functions +// strPtr = aws.String("my string") +// +// // Convert *string to string value +// str = aws.StringValue(strPtr) +// +// In addition to scalars the aws package also includes conversion utilities for +// map and slice for commonly types used in API parameters. The map and slice +// conversion functions use similar naming pattern as the scalar conversion +// functions. +// +// var strPtrs []*string +// var strs []string = []string{"Go", "Gophers", "Go"} +// +// // Convert []string to []*string +// strPtrs = aws.StringSlice(strs) +// +// // Convert []*string to []string +// strs = aws.StringValueSlice(strPtrs) +// +// SDK Default HTTP Client +// +// The SDK will use the http.DefaultClient if a HTTP client is not provided to +// the SDK's Session, or service client constructor. This means that if the +// http.DefaultClient is modified by other components of your application the +// modifications will be picked up by the SDK as well. +// +// In some cases this might be intended, but it is a better practice to create +// a custom HTTP Client to share explicitly through your application. You can +// configure the SDK to use the custom HTTP Client by setting the HTTPClient +// value of the SDK's Config type when creating a Session or service client. +package aws diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/endpoints/decode.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/endpoints/decode.go new file mode 100644 index 00000000000..0669e5dff06 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/endpoints/decode.go @@ -0,0 +1,216 @@ +package endpoints + +import ( + "encoding/json" + "fmt" + "io" + + "github.com/IBM/ibm-cos-sdk-go/aws/awserr" +) + +type modelDefinition map[string]json.RawMessage + +// A DecodeModelOptions are the options for how the endpoints model definition +// are decoded. +type DecodeModelOptions struct { + SkipCustomizations bool +} + +// Set combines all of the option functions together. +func (d *DecodeModelOptions) Set(optFns ...func(*DecodeModelOptions)) { + for _, fn := range optFns { + fn(d) + } +} + +// DecodeModel unmarshals a Regions and Endpoint model definition file into +// a endpoint Resolver. If the file format is not supported, or an error occurs +// when unmarshaling the model an error will be returned. +// +// Casting the return value of this func to a EnumPartitions will +// allow you to get a list of the partitions in the order the endpoints +// will be resolved in. +// +// resolver, err := endpoints.DecodeModel(reader) +// +// partitions := resolver.(endpoints.EnumPartitions).Partitions() +// for _, p := range partitions { +// // ... inspect partitions +// } +func DecodeModel(r io.Reader, optFns ...func(*DecodeModelOptions)) (Resolver, error) { + var opts DecodeModelOptions + opts.Set(optFns...) + + // Get the version of the partition file to determine what + // unmarshaling model to use. + modelDef := modelDefinition{} + if err := json.NewDecoder(r).Decode(&modelDef); err != nil { + return nil, newDecodeModelError("failed to decode endpoints model", err) + } + + var version string + if b, ok := modelDef["version"]; ok { + version = string(b) + } else { + return nil, newDecodeModelError("endpoints version not found in model", nil) + } + + if version == "3" { + return decodeV3Endpoints(modelDef, opts) + } + + return nil, newDecodeModelError( + fmt.Sprintf("endpoints version %s, not supported", version), nil) +} + +func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resolver, error) { + b, ok := modelDef["partitions"] + if !ok { + return nil, newDecodeModelError("endpoints model missing partitions", nil) + } + + ps := partitions{} + if err := json.Unmarshal(b, &ps); err != nil { + return nil, newDecodeModelError("failed to decode endpoints model", err) + } + + if opts.SkipCustomizations { + return ps, nil + } + + // Customization + for i := 0; i < len(ps); i++ { + p := &ps[i] + custAddEC2Metadata(p) + custAddS3DualStack(p) + custRegionalS3(p) + custRmIotDataService(p) + custFixAppAutoscalingChina(p) + custFixAppAutoscalingUsGov(p) + } + + return ps, nil +} + +func custAddS3DualStack(p *partition) { + if !(p.ID == "aws" || p.ID == "aws-cn" || p.ID == "aws-us-gov") { + return + } + + custAddDualstack(p, "s3") + custAddDualstack(p, "s3-control") +} + +func custRegionalS3(p *partition) { + if p.ID != "aws" { + return + } + + service, ok := p.Services["s3"] + if !ok { + return + } + + // If global endpoint already exists no customization needed. + if _, ok := service.Endpoints["aws-global"]; ok { + return + } + + service.PartitionEndpoint = "aws-global" + service.Endpoints["us-east-1"] = endpoint{} + service.Endpoints["aws-global"] = endpoint{ + Hostname: "s3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + } + + p.Services["s3"] = service +} + +func custAddDualstack(p *partition, svcName string) { + s, ok := p.Services[svcName] + if !ok { + return + } + + s.Defaults.HasDualStack = boxedTrue + s.Defaults.DualStackHostname = "{service}.dualstack.{region}.{dnsSuffix}" + + p.Services[svcName] = s +} + +func custAddEC2Metadata(p *partition) { + p.Services["ec2metadata"] = service{ + IsRegionalized: boxedFalse, + PartitionEndpoint: "aws-global", + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + } +} + +func custRmIotDataService(p *partition) { + delete(p.Services, "data.iot") +} + +func custFixAppAutoscalingChina(p *partition) { + if p.ID != "aws-cn" { + return + } + + const serviceName = "application-autoscaling" + s, ok := p.Services[serviceName] + if !ok { + return + } + + const expectHostname = `autoscaling.{region}.amazonaws.com` + if e, a := s.Defaults.Hostname, expectHostname; e != a { + fmt.Printf("custFixAppAutoscalingChina: ignoring customization, expected %s, got %s\n", e, a) + return + } + + s.Defaults.Hostname = expectHostname + ".cn" + p.Services[serviceName] = s +} + +func custFixAppAutoscalingUsGov(p *partition) { + if p.ID != "aws-us-gov" { + return + } + + const serviceName = "application-autoscaling" + s, ok := p.Services[serviceName] + if !ok { + return + } + + if a := s.Defaults.CredentialScope.Service; a != "" { + fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty credential scope service, got %s\n", a) + return + } + + if a := s.Defaults.Hostname; a != "" { + fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty hostname, got %s\n", a) + return + } + + s.Defaults.CredentialScope.Service = "application-autoscaling" + s.Defaults.Hostname = "autoscaling.{region}.amazonaws.com" + + p.Services[serviceName] = s +} + +type decodeModelError struct { + awsError +} + +func newDecodeModelError(msg string, err error) decodeModelError { + return decodeModelError{ + awsError: awserr.New("DecodeEndpointsModelError", msg, err), + } +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/endpoints/defaults.go new file mode 100644 index 00000000000..cce4f16a350 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/endpoints/defaults.go @@ -0,0 +1,10879 @@ +// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT. + +package endpoints + +import ( + "regexp" +) + +// Partition identifiers +const ( + AwsPartitionID = "aws" // AWS Standard partition. + AwsCnPartitionID = "aws-cn" // AWS China partition. + AwsUsGovPartitionID = "aws-us-gov" // AWS GovCloud (US) partition. + AwsIsoPartitionID = "aws-iso" // AWS ISO (US) partition. + AwsIsoBPartitionID = "aws-iso-b" // AWS ISOB (US) partition. +) + +// AWS Standard partition's regions. +const ( + AfSouth1RegionID = "af-south-1" // Africa (Cape Town). + ApEast1RegionID = "ap-east-1" // Asia Pacific (Hong Kong). + ApNortheast1RegionID = "ap-northeast-1" // Asia Pacific (Tokyo). + ApNortheast2RegionID = "ap-northeast-2" // Asia Pacific (Seoul). + ApNortheast3RegionID = "ap-northeast-3" // Asia Pacific (Osaka). + ApSouth1RegionID = "ap-south-1" // Asia Pacific (Mumbai). + ApSoutheast1RegionID = "ap-southeast-1" // Asia Pacific (Singapore). + ApSoutheast2RegionID = "ap-southeast-2" // Asia Pacific (Sydney). + CaCentral1RegionID = "ca-central-1" // Canada (Central). + EuCentral1RegionID = "eu-central-1" // Europe (Frankfurt). + EuNorth1RegionID = "eu-north-1" // Europe (Stockholm). + EuSouth1RegionID = "eu-south-1" // Europe (Milan). + EuWest1RegionID = "eu-west-1" // Europe (Ireland). + EuWest2RegionID = "eu-west-2" // Europe (London). + EuWest3RegionID = "eu-west-3" // Europe (Paris). + MeSouth1RegionID = "me-south-1" // Middle East (Bahrain). + SaEast1RegionID = "sa-east-1" // South America (Sao Paulo). + UsEast1RegionID = "us-east-1" // US East (N. Virginia). + UsEast2RegionID = "us-east-2" // US East (Ohio). + UsWest1RegionID = "us-west-1" // US West (N. California). + UsWest2RegionID = "us-west-2" // US West (Oregon). +) + +// AWS China partition's regions. +const ( + CnNorth1RegionID = "cn-north-1" // China (Beijing). + CnNorthwest1RegionID = "cn-northwest-1" // China (Ningxia). +) + +// AWS GovCloud (US) partition's regions. +const ( + UsGovEast1RegionID = "us-gov-east-1" // AWS GovCloud (US-East). + UsGovWest1RegionID = "us-gov-west-1" // AWS GovCloud (US-West). +) + +// AWS ISO (US) partition's regions. +const ( + UsIsoEast1RegionID = "us-iso-east-1" // US ISO East. +) + +// AWS ISOB (US) partition's regions. +const ( + UsIsobEast1RegionID = "us-isob-east-1" // US ISOB East (Ohio). +) + +// DefaultResolver returns an Endpoint resolver that will be able +// to resolve endpoints for: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), and AWS ISOB (US). +// +// Use DefaultPartitions() to get the list of the default partitions. +func DefaultResolver() Resolver { + return defaultPartitions +} + +// DefaultPartitions returns a list of the partitions the SDK is bundled +// with. The available partitions are: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), and AWS ISOB (US). +// +// partitions := endpoints.DefaultPartitions +// for _, p := range partitions { +// // ... inspect partitions +// } +func DefaultPartitions() []Partition { + return defaultPartitions.Partitions() +} + +var defaultPartitions = partitions{ + awsPartition, + awscnPartition, + awsusgovPartition, + awsisoPartition, + awsisobPartition, +} + +// AwsPartition returns the Resolver for AWS Standard. +func AwsPartition() Partition { + return awsPartition.Partition() +} + +var awsPartition = partition{ + ID: "aws", + Name: "AWS Standard", + DNSSuffix: "amazonaws.com", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "af-south-1": region{ + Description: "Africa (Cape Town)", + }, + "ap-east-1": region{ + Description: "Asia Pacific (Hong Kong)", + }, + "ap-northeast-1": region{ + Description: "Asia Pacific (Tokyo)", + }, + "ap-northeast-2": region{ + Description: "Asia Pacific (Seoul)", + }, + "ap-northeast-3": region{ + Description: "Asia Pacific (Osaka)", + }, + "ap-south-1": region{ + Description: "Asia Pacific (Mumbai)", + }, + "ap-southeast-1": region{ + Description: "Asia Pacific (Singapore)", + }, + "ap-southeast-2": region{ + Description: "Asia Pacific (Sydney)", + }, + "ca-central-1": region{ + Description: "Canada (Central)", + }, + "eu-central-1": region{ + Description: "Europe (Frankfurt)", + }, + "eu-north-1": region{ + Description: "Europe (Stockholm)", + }, + "eu-south-1": region{ + Description: "Europe (Milan)", + }, + "eu-west-1": region{ + Description: "Europe (Ireland)", + }, + "eu-west-2": region{ + Description: "Europe (London)", + }, + "eu-west-3": region{ + Description: "Europe (Paris)", + }, + "me-south-1": region{ + Description: "Middle East (Bahrain)", + }, + "sa-east-1": region{ + Description: "South America (Sao Paulo)", + }, + "us-east-1": region{ + Description: "US East (N. Virginia)", + }, + "us-east-2": region{ + Description: "US East (Ohio)", + }, + "us-west-1": region{ + Description: "US West (N. California)", + }, + "us-west-2": region{ + Description: "US West (Oregon)", + }, + }, + Services: services{ + "a4b": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "access-analyzer": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "access-analyzer-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "access-analyzer-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "access-analyzer-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "access-analyzer-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "access-analyzer-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "acm": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "ca-central-1-fips": endpoint{ + Hostname: "acm-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "acm-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "acm-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "acm-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "acm-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "acm-pca": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "acm-pca-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "acm-pca-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "acm-pca-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "acm-pca-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "acm-pca-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "airflow": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "amplifybackend": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "api.detective": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "api.detective-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "api.detective-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "api.detective-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "api.detective-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "api.ecr": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{ + Hostname: "api.ecr.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + "ap-east-1": endpoint{ + Hostname: "api.ecr.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + "ap-northeast-1": endpoint{ + Hostname: "api.ecr.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "api.ecr.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-northeast-3": endpoint{ + Hostname: "api.ecr.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, + "ap-south-1": endpoint{ + Hostname: "api.ecr.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "api.ecr.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "api.ecr.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "ca-central-1": endpoint{ + Hostname: "api.ecr.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{ + Hostname: "api.ecr.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-north-1": endpoint{ + Hostname: "api.ecr.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + "eu-south-1": endpoint{ + Hostname: "api.ecr.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "api.ecr.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "api.ecr.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "eu-west-3": endpoint{ + Hostname: "api.ecr.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "fips-dkr-us-east-1": endpoint{ + Hostname: "ecr-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-dkr-us-east-2": endpoint{ + Hostname: "ecr-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-dkr-us-west-1": endpoint{ + Hostname: "ecr-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-dkr-us-west-2": endpoint{ + Hostname: "ecr-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "ecr-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "ecr-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "ecr-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "ecr-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{ + Hostname: "api.ecr.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + "sa-east-1": endpoint{ + Hostname: "api.ecr.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "us-east-1": endpoint{ + Hostname: "api.ecr.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "api.ecr.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{ + Hostname: "api.ecr.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{ + Hostname: "api.ecr.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "api.elastic-inference": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Hostname: "api.elastic-inference.ap-northeast-1.amazonaws.com", + }, + "ap-northeast-2": endpoint{ + Hostname: "api.elastic-inference.ap-northeast-2.amazonaws.com", + }, + "eu-west-1": endpoint{ + Hostname: "api.elastic-inference.eu-west-1.amazonaws.com", + }, + "us-east-1": endpoint{ + Hostname: "api.elastic-inference.us-east-1.amazonaws.com", + }, + "us-east-2": endpoint{ + Hostname: "api.elastic-inference.us-east-2.amazonaws.com", + }, + "us-west-2": endpoint{ + Hostname: "api.elastic-inference.us-west-2.amazonaws.com", + }, + }, + }, + "api.fleethub.iot": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "api.mediatailor": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "api.pricing": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "pricing", + }, + }, + Endpoints: endpoints{ + "ap-south-1": endpoint{}, + "us-east-1": endpoint{}, + }, + }, + "api.sagemaker": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "api-fips.sagemaker.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "api-fips.sagemaker.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "api-fips.sagemaker.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "api-fips.sagemaker.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "apigateway": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "app-integrations": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "appflow": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "application-autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "appmesh": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "appstream2": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Service: "appstream", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "fips": endpoint{ + Hostname: "appstream2-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "appsync": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "athena": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "athena-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "athena-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "athena-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "athena-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "autoscaling-plans": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "backup": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "batch": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "fips.batch.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "fips.batch.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "fips.batch.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "fips.batch.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "budgets": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "budgets.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "ce": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "ce.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "chime": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "chime.us-east-1.amazonaws.com", + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "cloud9": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "clouddirectory": service{ + + Endpoints: endpoints{ + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "cloudformation-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "cloudformation-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "cloudformation-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "cloudformation-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "cloudfront": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "cloudfront.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "cloudhsm": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cloudhsmv2": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "cloudhsm", + }, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cloudsearch": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cloudtrail": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "cloudtrail-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "cloudtrail-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "cloudtrail-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "cloudtrail-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codeartifact": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codebuild": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "codebuild-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "codebuild-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "codebuild-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "codebuild-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "codecommit": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips": endpoint{ + Hostname: "codecommit-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codedeploy": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "codedeploy-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "codedeploy-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "codedeploy-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "codedeploy-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "codeguru-reviewer": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codepipeline": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "codepipeline-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "codepipeline-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "codepipeline-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "codepipeline-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "codepipeline-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codestar": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codestar-connections": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cognito-identity": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "cognito-identity-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "cognito-identity-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "cognito-identity-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cognito-idp": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "cognito-idp-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "cognito-idp-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "cognito-idp-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "cognito-idp-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cognito-sync": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "comprehend": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "comprehend-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "comprehend-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "comprehend-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "comprehendmedical": service{ + + Endpoints: endpoints{ + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "comprehendmedical-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "comprehendmedical-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "comprehendmedical-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "config": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "config-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "config-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "config-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "config-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "connect": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "contact-lens": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cur": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "data.mediastore": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "dataexchange": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "datapipeline": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "datasync": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "datasync-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "datasync-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "datasync-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "datasync-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "datasync-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "dax": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "devicefarm": service{ + + Endpoints: endpoints{ + "us-west-2": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "directconnect-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "directconnect-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "directconnect-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "directconnect-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "discovery": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "dms": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "dms-fips": endpoint{ + Hostname: "dms-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "docdb": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Hostname: "rds.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "rds.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-south-1": endpoint{ + Hostname: "rds.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "rds.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "rds.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "ca-central-1": endpoint{ + Hostname: "rds.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{ + Hostname: "rds.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "rds.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "rds.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "eu-west-3": endpoint{ + Hostname: "rds.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "sa-east-1": endpoint{ + Hostname: "rds.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "us-east-1": endpoint{ + Hostname: "rds.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "rds.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-2": endpoint{ + Hostname: "rds.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "ds": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "ds-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "ds-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "ds-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "ds-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "ds-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "ca-central-1-fips": endpoint{ + Hostname: "dynamodb-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "local": endpoint{ + Hostname: "localhost:8000", + Protocols: []string{"http"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "dynamodb-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "dynamodb-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "dynamodb-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "dynamodb-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "ebs": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "ebs-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "ebs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "ebs-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "ebs-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "ebs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ec2": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "ec2-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "ec2-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "ec2-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "ec2-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "ec2-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "ecs": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "ecs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "ecs-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "ecs-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "ecs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "eks": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "fips.eks.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "fips.eks.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "fips.eks.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "fips.eks.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips": endpoint{ + Hostname: "elasticache-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticbeanstalk": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "elasticbeanstalk-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "elasticbeanstalk-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "elasticbeanstalk-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "elasticbeanstalk-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticfilesystem": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-af-south-1": endpoint{ + Hostname: "elasticfilesystem-fips.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + "fips-ap-east-1": endpoint{ + Hostname: "elasticfilesystem-fips.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + "fips-ap-northeast-1": endpoint{ + Hostname: "elasticfilesystem-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "fips-ap-northeast-2": endpoint{ + Hostname: "elasticfilesystem-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "fips-ap-northeast-3": endpoint{ + Hostname: "elasticfilesystem-fips.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, + "fips-ap-south-1": endpoint{ + Hostname: "elasticfilesystem-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "fips-ap-southeast-1": endpoint{ + Hostname: "elasticfilesystem-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "fips-ap-southeast-2": endpoint{ + Hostname: "elasticfilesystem-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "fips-ca-central-1": endpoint{ + Hostname: "elasticfilesystem-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-eu-central-1": endpoint{ + Hostname: "elasticfilesystem-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "fips-eu-north-1": endpoint{ + Hostname: "elasticfilesystem-fips.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + "fips-eu-south-1": endpoint{ + Hostname: "elasticfilesystem-fips.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + "fips-eu-west-1": endpoint{ + Hostname: "elasticfilesystem-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "fips-eu-west-2": endpoint{ + Hostname: "elasticfilesystem-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "fips-eu-west-3": endpoint{ + Hostname: "elasticfilesystem-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "fips-me-south-1": endpoint{ + Hostname: "elasticfilesystem-fips.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + "fips-sa-east-1": endpoint{ + Hostname: "elasticfilesystem-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "elasticfilesystem-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "elasticfilesystem-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "elasticfilesystem-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "elasticfilesystem-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticloadbalancing": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "elasticloadbalancing-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "elasticloadbalancing-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "elasticloadbalancing-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "elasticloadbalancing-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticmapreduce": service{ + Defaults: endpoint{ + SSLCommonName: "{region}.{service}.{dnsSuffix}", + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{ + SSLCommonName: "{service}.{region}.{dnsSuffix}", + }, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "elasticmapreduce-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "elasticmapreduce-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "elasticmapreduce-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "elasticmapreduce-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "elasticmapreduce-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + SSLCommonName: "{service}.{region}.{dnsSuffix}", + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elastictranscoder": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "email": service{ + + Endpoints: endpoints{ + "ap-south-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "emr-containers": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "entitlement.marketplace": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "es": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips": endpoint{ + Hostname: "es-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "events": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "events-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "events-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "events-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "events-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "firehose": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "firehose-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "firehose-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "firehose-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "firehose-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "fms": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-af-south-1": endpoint{ + Hostname: "fms-fips.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + "fips-ap-east-1": endpoint{ + Hostname: "fms-fips.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + "fips-ap-northeast-1": endpoint{ + Hostname: "fms-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "fips-ap-northeast-2": endpoint{ + Hostname: "fms-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "fips-ap-south-1": endpoint{ + Hostname: "fms-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "fips-ap-southeast-1": endpoint{ + Hostname: "fms-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "fips-ap-southeast-2": endpoint{ + Hostname: "fms-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "fips-ca-central-1": endpoint{ + Hostname: "fms-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-eu-central-1": endpoint{ + Hostname: "fms-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "fips-eu-south-1": endpoint{ + Hostname: "fms-fips.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + "fips-eu-west-1": endpoint{ + Hostname: "fms-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "fips-eu-west-2": endpoint{ + Hostname: "fms-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "fips-eu-west-3": endpoint{ + Hostname: "fms-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "fips-me-south-1": endpoint{ + Hostname: "fms-fips.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + "fips-sa-east-1": endpoint{ + Hostname: "fms-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "fms-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "fms-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "fms-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "fms-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "forecast": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "forecastquery": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "fsx": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-prod-ca-central-1": endpoint{ + Hostname: "fsx-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-prod-us-east-1": endpoint{ + Hostname: "fsx-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-prod-us-east-2": endpoint{ + Hostname: "fsx-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-prod-us-west-1": endpoint{ + Hostname: "fsx-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-prod-us-west-2": endpoint{ + Hostname: "fsx-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "gamelift": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "glacier": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "glacier-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "glacier-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "glacier-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "glacier-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "glacier-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "glue": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "glue-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "glue-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "glue-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "glue-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "greengrass": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "groundstation": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "groundstation-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "groundstation-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "groundstation-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "guardduty": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "guardduty-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "guardduty-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "guardduty-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "guardduty-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "health": service{ + + Endpoints: endpoints{ + "fips-us-east-2": endpoint{ + Hostname: "health-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + }, + }, + "healthlake": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "honeycode": service{ + + Endpoints: endpoints{ + "us-west-2": endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "iam.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "iam-fips": endpoint{ + Hostname: "iam-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "identitystore": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "importexport": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "importexport.amazonaws.com", + SignatureVersions: []string{"v2", "v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + Service: "IngestionService", + }, + }, + }, + }, + "inspector": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "inspector-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "inspector-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "inspector-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "inspector-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "iot": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "execute-api", + }, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "iotanalytics": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "iotevents": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ioteventsdata": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Hostname: "data.iotevents.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "data.iotevents.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "data.iotevents.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "data.iotevents.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "eu-central-1": endpoint{ + Hostname: "data.iotevents.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "data.iotevents.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "data.iotevents.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "us-east-1": endpoint{ + Hostname: "data.iotevents.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "data.iotevents.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-2": endpoint{ + Hostname: "data.iotevents.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "iotsecuredtunneling": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "iotthingsgraph": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "iotthingsgraph", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "iotwireless": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{ + Hostname: "api.iotwireless.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "us-east-1": endpoint{ + Hostname: "api.iotwireless.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "kafka": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "kinesis-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "kinesis-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "kinesis-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "kinesis-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kinesisanalytics": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kinesisvideo": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kms": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "lakeformation": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "lakeformation-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "lakeformation-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "lakeformation-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "lakeformation-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "lambda": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "lambda-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "lambda-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "lambda-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "lambda-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "license-manager": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "license-manager-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "license-manager-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "license-manager-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "license-manager-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "lightsail": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "logs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "logs-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "logs-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "logs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "lookoutequipment": service{ + + Endpoints: endpoints{ + "ap-northeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + }, + }, + "lookoutvision": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "machinelearning": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + }, + }, + "macie": service{ + + Endpoints: endpoints{ + "fips-us-east-1": endpoint{ + Hostname: "macie-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "macie-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "macie2": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "macie2-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "macie2-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "macie2-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "macie2-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "managedblockchain": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + }, + }, + "marketplacecommerceanalytics": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "mediaconnect": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mediaconvert": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "mediaconvert-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "mediaconvert-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "mediaconvert-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "mediaconvert-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "mediaconvert-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "medialive": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "medialive-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "medialive-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "medialive-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mediapackage": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mediastore": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "metering.marketplace": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mgh": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mobileanalytics": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "models.lex": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "models-fips.lex.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "models-fips.lex.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "monitoring": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "monitoring-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "monitoring-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "monitoring-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "monitoring-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mq": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "mq-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "mq-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "mq-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "mq-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mturk-requester": service{ + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "sandbox": endpoint{ + Hostname: "mturk-requester-sandbox.us-east-1.amazonaws.com", + }, + "us-east-1": endpoint{}, + }, + }, + "neptune": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{ + Hostname: "rds.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + "ap-northeast-1": endpoint{ + Hostname: "rds.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "rds.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-south-1": endpoint{ + Hostname: "rds.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "rds.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "rds.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "ca-central-1": endpoint{ + Hostname: "rds.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{ + Hostname: "rds.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-north-1": endpoint{ + Hostname: "rds.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "rds.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "rds.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "eu-west-3": endpoint{ + Hostname: "rds.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "me-south-1": endpoint{ + Hostname: "rds.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + "sa-east-1": endpoint{ + Hostname: "rds.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "us-east-1": endpoint{ + Hostname: "rds.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "rds.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{ + Hostname: "rds.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{ + Hostname: "rds.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "oidc": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Hostname: "oidc.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "oidc.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-south-1": endpoint{ + Hostname: "oidc.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "oidc.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "oidc.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "ca-central-1": endpoint{ + Hostname: "oidc.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{ + Hostname: "oidc.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-north-1": endpoint{ + Hostname: "oidc.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "oidc.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "oidc.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "us-east-1": endpoint{ + Hostname: "oidc.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "oidc.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-2": endpoint{ + Hostname: "oidc.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "opsworks": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "opsworks-cm": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "organizations": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "organizations.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-aws-global": endpoint{ + Hostname: "organizations-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "outposts": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "outposts-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "outposts-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "outposts-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "outposts-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "outposts-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "personalize": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "pinpoint": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "mobiletargeting", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "pinpoint-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "pinpoint-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{ + Hostname: "pinpoint.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-west-2": endpoint{ + Hostname: "pinpoint.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "polly": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "polly-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "polly-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "polly-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "polly-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "portal.sso": service{ + + Endpoints: endpoints{ + "ap-southeast-1": endpoint{ + Hostname: "portal.sso.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "portal.sso.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "ca-central-1": endpoint{ + Hostname: "portal.sso.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{ + Hostname: "portal.sso.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "portal.sso.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "portal.sso.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "us-east-1": endpoint{ + Hostname: "portal.sso.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "portal.sso.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-2": endpoint{ + Hostname: "portal.sso.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "profile": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "projects.iot1click": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "qldb": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ram": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "ram-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "ram-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "ram-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "ram-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "ram-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "rds-fips.ca-central-1": endpoint{ + Hostname: "rds-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "rds-fips.us-east-1": endpoint{ + Hostname: "rds-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "rds-fips.us-east-2": endpoint{ + Hostname: "rds-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "rds-fips.us-west-1": endpoint{ + Hostname: "rds-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "rds-fips.us-west-2": endpoint{ + Hostname: "rds-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + SSLCommonName: "{service}.{dnsSuffix}", + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "redshift-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "redshift-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "redshift-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "redshift-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "redshift-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "rekognition": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "rekognition-fips.ca-central-1": endpoint{ + Hostname: "rekognition-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "rekognition-fips.us-east-1": endpoint{ + Hostname: "rekognition-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "rekognition-fips.us-east-2": endpoint{ + Hostname: "rekognition-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "rekognition-fips.us-west-1": endpoint{ + Hostname: "rekognition-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "rekognition-fips.us-west-2": endpoint{ + Hostname: "rekognition-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "resource-groups": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "resource-groups-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "resource-groups-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "resource-groups-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "resource-groups-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "robomaker": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "route53": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "route53.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-aws-global": endpoint{ + Hostname: "route53-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "route53domains": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "route53resolver": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "runtime.lex": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "runtime-fips.lex.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "runtime-fips.lex.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "runtime.sagemaker": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "runtime-fips.sagemaker.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "runtime-fips.sagemaker.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "runtime-fips.sagemaker.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "runtime-fips.sagemaker.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "s3": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", + }, + Endpoints: endpoints{ + "accesspoint-af-south-1": endpoint{ + Hostname: "s3-accesspoint.af-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-ap-east-1": endpoint{ + Hostname: "s3-accesspoint.ap-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-ap-northeast-1": endpoint{ + Hostname: "s3-accesspoint.ap-northeast-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-ap-northeast-2": endpoint{ + Hostname: "s3-accesspoint.ap-northeast-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-ap-northeast-3": endpoint{ + Hostname: "s3-accesspoint.ap-northeast-3.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-ap-south-1": endpoint{ + Hostname: "s3-accesspoint.ap-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-ap-southeast-1": endpoint{ + Hostname: "s3-accesspoint.ap-southeast-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-ap-southeast-2": endpoint{ + Hostname: "s3-accesspoint.ap-southeast-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-ca-central-1": endpoint{ + Hostname: "s3-accesspoint.ca-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-eu-central-1": endpoint{ + Hostname: "s3-accesspoint.eu-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-eu-north-1": endpoint{ + Hostname: "s3-accesspoint.eu-north-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-eu-south-1": endpoint{ + Hostname: "s3-accesspoint.eu-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-eu-west-1": endpoint{ + Hostname: "s3-accesspoint.eu-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-eu-west-2": endpoint{ + Hostname: "s3-accesspoint.eu-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-eu-west-3": endpoint{ + Hostname: "s3-accesspoint.eu-west-3.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-me-south-1": endpoint{ + Hostname: "s3-accesspoint.me-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-sa-east-1": endpoint{ + Hostname: "s3-accesspoint.sa-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-us-east-1": endpoint{ + Hostname: "s3-accesspoint.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-us-east-2": endpoint{ + Hostname: "s3-accesspoint.us-east-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-us-west-1": endpoint{ + Hostname: "s3-accesspoint.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-us-west-2": endpoint{ + Hostname: "s3-accesspoint.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{ + Hostname: "s3.ap-northeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{ + Hostname: "s3.ap-southeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "ap-southeast-2": endpoint{ + Hostname: "s3.ap-southeast-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "aws-global": endpoint{ + Hostname: "s3.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{ + Hostname: "s3.eu-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-accesspoint-ca-central-1": endpoint{ + Hostname: "s3-accesspoint-fips.ca-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "fips-accesspoint-us-east-1": endpoint{ + Hostname: "s3-accesspoint-fips.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "fips-accesspoint-us-east-2": endpoint{ + Hostname: "s3-accesspoint-fips.us-east-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "fips-accesspoint-us-west-1": endpoint{ + Hostname: "s3-accesspoint-fips.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "fips-accesspoint-us-west-2": endpoint{ + Hostname: "s3-accesspoint-fips.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "me-south-1": endpoint{}, + "s3-external-1": endpoint{ + Hostname: "s3-external-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "sa-east-1": endpoint{ + Hostname: "s3.sa-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "us-east-1": endpoint{ + Hostname: "s3.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{ + Hostname: "s3.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "us-west-2": endpoint{ + Hostname: "s3.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + }, + }, + "s3-control": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Hostname: "s3-control.ap-northeast-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "s3-control.ap-northeast-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-northeast-3": endpoint{ + Hostname: "s3-control.ap-northeast-3.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, + "ap-south-1": endpoint{ + Hostname: "s3-control.ap-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "s3-control.ap-southeast-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "s3-control.ap-southeast-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "ca-central-1": endpoint{ + Hostname: "s3-control.ca-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "ca-central-1-fips": endpoint{ + Hostname: "s3-control-fips.ca-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{ + Hostname: "s3-control.eu-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-north-1": endpoint{ + Hostname: "s3-control.eu-north-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "s3-control.eu-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "s3-control.eu-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "eu-west-3": endpoint{ + Hostname: "s3-control.eu-west-3.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "sa-east-1": endpoint{ + Hostname: "s3-control.sa-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "us-east-1": endpoint{ + Hostname: "s3-control.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-1-fips": endpoint{ + Hostname: "s3-control-fips.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "s3-control.us-east-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-east-2-fips": endpoint{ + Hostname: "s3-control-fips.us-east-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{ + Hostname: "s3-control.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-1-fips": endpoint{ + Hostname: "s3-control-fips.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{ + Hostname: "s3-control.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-west-2-fips": endpoint{ + Hostname: "s3-control-fips.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "savingsplans": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "savingsplans.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "schemas": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "sdb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"v2"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + Hostname: "sdb.amazonaws.com", + }, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "secretsmanager": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "secretsmanager-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "secretsmanager-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "secretsmanager-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "secretsmanager-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "securityhub": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "securityhub-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "securityhub-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "securityhub-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "securityhub-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "serverlessrepo": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{ + Protocols: []string{"https"}, + }, + "ap-northeast-1": endpoint{ + Protocols: []string{"https"}, + }, + "ap-northeast-2": endpoint{ + Protocols: []string{"https"}, + }, + "ap-south-1": endpoint{ + Protocols: []string{"https"}, + }, + "ap-southeast-1": endpoint{ + Protocols: []string{"https"}, + }, + "ap-southeast-2": endpoint{ + Protocols: []string{"https"}, + }, + "ca-central-1": endpoint{ + Protocols: []string{"https"}, + }, + "eu-central-1": endpoint{ + Protocols: []string{"https"}, + }, + "eu-north-1": endpoint{ + Protocols: []string{"https"}, + }, + "eu-west-1": endpoint{ + Protocols: []string{"https"}, + }, + "eu-west-2": endpoint{ + Protocols: []string{"https"}, + }, + "eu-west-3": endpoint{ + Protocols: []string{"https"}, + }, + "me-south-1": endpoint{ + Protocols: []string{"https"}, + }, + "sa-east-1": endpoint{ + Protocols: []string{"https"}, + }, + "us-east-1": endpoint{ + Protocols: []string{"https"}, + }, + "us-east-2": endpoint{ + Protocols: []string{"https"}, + }, + "us-west-1": endpoint{ + Protocols: []string{"https"}, + }, + "us-west-2": endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "servicecatalog": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "servicecatalog-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "servicecatalog-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "servicecatalog-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "servicecatalog-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "servicediscovery": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "servicediscovery-fips": endpoint{ + Hostname: "servicediscovery-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "servicequotas": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "session.qldb": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "shield": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Defaults: endpoint{ + SSLCommonName: "shield.us-east-1.amazonaws.com", + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "shield.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-aws-global": endpoint{ + Hostname: "shield-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "sms": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "sms-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "sms-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "sms-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "sms-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "snowball": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ap-northeast-1": endpoint{ + Hostname: "snowball-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "fips-ap-northeast-2": endpoint{ + Hostname: "snowball-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "fips-ap-northeast-3": endpoint{ + Hostname: "snowball-fips.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, + "fips-ap-south-1": endpoint{ + Hostname: "snowball-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "fips-ap-southeast-1": endpoint{ + Hostname: "snowball-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "fips-ap-southeast-2": endpoint{ + Hostname: "snowball-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "fips-ca-central-1": endpoint{ + Hostname: "snowball-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-eu-central-1": endpoint{ + Hostname: "snowball-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "fips-eu-west-1": endpoint{ + Hostname: "snowball-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "fips-eu-west-2": endpoint{ + Hostname: "snowball-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "fips-eu-west-3": endpoint{ + Hostname: "snowball-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "fips-sa-east-1": endpoint{ + Hostname: "snowball-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "snowball-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "snowball-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "snowball-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "snowball-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "sns": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "sns-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "sns-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "sns-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "sns-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "sqs": service{ + Defaults: endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "sqs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "sqs-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "sqs-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "sqs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + SSLCommonName: "queue.{dnsSuffix}", + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ssm": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "ssm-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "ssm-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "ssm-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "ssm-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "ssm-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "states": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "states-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "states-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "states-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "states-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "storagegateway": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips": endpoint{ + Hostname: "storagegateway-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "ca-central-1-fips": endpoint{ + Hostname: "dynamodb-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "local": endpoint{ + Hostname: "localhost:8000", + Protocols: []string{"http"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "dynamodb-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "dynamodb-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "dynamodb-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "dynamodb-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "sts": service{ + PartitionEndpoint: "aws-global", + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "aws-global": endpoint{ + Hostname: "sts.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "sts-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "sts-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "sts-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "sts-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "support": service{ + PartitionEndpoint: "aws-global", + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "support.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "swf-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "swf-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "swf-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "swf-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "tagging": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "transcribe": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "fips.transcribe.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "fips.transcribe.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "fips.transcribe.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "fips.transcribe.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "transcribestreaming": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "transfer": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "transfer-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "transfer-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "transfer-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "transfer-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "transfer-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "translate": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "translate-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "translate-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "translate-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "waf": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-fips": endpoint{ + Hostname: "waf-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "aws-global": endpoint{ + Hostname: "waf.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "waf-regional": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{ + Hostname: "waf-regional.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + "ap-east-1": endpoint{ + Hostname: "waf-regional.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + "ap-northeast-1": endpoint{ + Hostname: "waf-regional.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "waf-regional.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-south-1": endpoint{ + Hostname: "waf-regional.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "waf-regional.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "waf-regional.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "ca-central-1": endpoint{ + Hostname: "waf-regional.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{ + Hostname: "waf-regional.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-north-1": endpoint{ + Hostname: "waf-regional.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + "eu-south-1": endpoint{ + Hostname: "waf-regional.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "waf-regional.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "waf-regional.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "eu-west-3": endpoint{ + Hostname: "waf-regional.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "fips-af-south-1": endpoint{ + Hostname: "waf-regional-fips.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + "fips-ap-east-1": endpoint{ + Hostname: "waf-regional-fips.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + "fips-ap-northeast-1": endpoint{ + Hostname: "waf-regional-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "fips-ap-northeast-2": endpoint{ + Hostname: "waf-regional-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "fips-ap-south-1": endpoint{ + Hostname: "waf-regional-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "fips-ap-southeast-1": endpoint{ + Hostname: "waf-regional-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "fips-ap-southeast-2": endpoint{ + Hostname: "waf-regional-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "fips-ca-central-1": endpoint{ + Hostname: "waf-regional-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-eu-central-1": endpoint{ + Hostname: "waf-regional-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "fips-eu-north-1": endpoint{ + Hostname: "waf-regional-fips.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + "fips-eu-south-1": endpoint{ + Hostname: "waf-regional-fips.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + "fips-eu-west-1": endpoint{ + Hostname: "waf-regional-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "fips-eu-west-2": endpoint{ + Hostname: "waf-regional-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "fips-eu-west-3": endpoint{ + Hostname: "waf-regional-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "fips-me-south-1": endpoint{ + Hostname: "waf-regional-fips.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + "fips-sa-east-1": endpoint{ + Hostname: "waf-regional-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "waf-regional-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "waf-regional-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "waf-regional-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "waf-regional-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{ + Hostname: "waf-regional.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + "sa-east-1": endpoint{ + Hostname: "waf-regional.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "us-east-1": endpoint{ + Hostname: "waf-regional.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "waf-regional.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{ + Hostname: "waf-regional.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{ + Hostname: "waf-regional.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "workdocs": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "workdocs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "workdocs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "workmail": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "workspaces": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "workspaces-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "workspaces-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "xray": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "xray-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "xray-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "xray-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "xray-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + }, +} + +// AwsCnPartition returns the Resolver for AWS China. +func AwsCnPartition() Partition { + return awscnPartition.Partition() +} + +var awscnPartition = partition{ + ID: "aws-cn", + Name: "AWS China", + DNSSuffix: "amazonaws.com.cn", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^cn\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "cn-north-1": region{ + Description: "China (Beijing)", + }, + "cn-northwest-1": region{ + Description: "China (Ningxia)", + }, + }, + Services: services{ + "access-analyzer": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "acm": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "api.ecr": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{ + Hostname: "api.ecr.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + "cn-northwest-1": endpoint{ + Hostname: "api.ecr.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "api.sagemaker": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "apigateway": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "application-autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "appsync": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "athena": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "autoscaling-plans": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "backup": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "batch": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "budgets": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "budgets.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "ce": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "ce.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "cloudfront": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "cloudfront.cn-northwest-1.amazonaws.com.cn", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "cloudtrail": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "codebuild": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "codecommit": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "codedeploy": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "cognito-identity": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "config": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "cur": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{}, + }, + }, + "dax": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "dms": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "docdb": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{ + Hostname: "rds.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "ds": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "ebs": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "ec2": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "ecs": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "eks": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "elasticbeanstalk": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "elasticfilesystem": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + "fips-cn-north-1": endpoint{ + Hostname: "elasticfilesystem-fips.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + "fips-cn-northwest-1": endpoint{ + Hostname: "elasticfilesystem-fips.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "elasticloadbalancing": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "elasticmapreduce": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "es": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "events": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "firehose": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "fsx": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "gamelift": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "glacier": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "glue": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "greengrass": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "guardduty": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "health": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "iam.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + }, + }, + "iot": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "execute-api", + }, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "iotanalytics": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "iotevents": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "ioteventsdata": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{ + Hostname: "data.iotevents.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + }, + }, + "iotsecuredtunneling": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "kafka": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "kinesisanalytics": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "kms": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "lakeformation": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "lambda": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "license-manager": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "mediaconvert": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{ + Hostname: "subscribe.mediaconvert.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "monitoring": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "mq": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "neptune": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{ + Hostname: "rds.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "organizations": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "organizations.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "personalize": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "polly": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{}, + }, + }, + "ram": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "resource-groups": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "route53": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "route53.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "route53resolver": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "runtime.sagemaker": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "s3": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", + }, + Endpoints: endpoints{ + "accesspoint-cn-north-1": endpoint{ + Hostname: "s3-accesspoint.cn-north-1.amazonaws.com.cn", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-cn-northwest-1": endpoint{ + Hostname: "s3-accesspoint.cn-northwest-1.amazonaws.com.cn", + SignatureVersions: []string{"s3v4"}, + }, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "s3-control": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{ + Hostname: "s3-control.cn-north-1.amazonaws.com.cn", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + "cn-northwest-1": endpoint{ + Hostname: "s3-control.cn-northwest-1.amazonaws.com.cn", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "secretsmanager": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "securityhub": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "serverlessrepo": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{ + Protocols: []string{"https"}, + }, + "cn-northwest-1": endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "servicediscovery": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "sms": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "snowball": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + "fips-cn-north-1": endpoint{ + Hostname: "snowball-fips.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + "fips-cn-northwest-1": endpoint{ + Hostname: "snowball-fips.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "sns": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "sqs": service{ + Defaults: endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "ssm": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "states": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "storagegateway": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "sts": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "support": service{ + PartitionEndpoint: "aws-cn-global", + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "support.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "tagging": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "transcribe": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{ + Hostname: "cn.transcribe.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + "cn-northwest-1": endpoint{ + Hostname: "cn.transcribe.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "workspaces": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{}, + }, + }, + "xray": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + }, +} + +// AwsUsGovPartition returns the Resolver for AWS GovCloud (US). +func AwsUsGovPartition() Partition { + return awsusgovPartition.Partition() +} + +var awsusgovPartition = partition{ + ID: "aws-us-gov", + Name: "AWS GovCloud (US)", + DNSSuffix: "amazonaws.com", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^us\\-gov\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "us-gov-east-1": region{ + Description: "AWS GovCloud (US-East)", + }, + "us-gov-west-1": region{ + Description: "AWS GovCloud (US-West)", + }, + }, + Services: services{ + "access-analyzer": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "access-analyzer.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "access-analyzer.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "acm": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "acm.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "acm.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "acm-pca": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "acm-pca.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "acm-pca.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "api.detective": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "api.detective-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "api.detective-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "api.ecr": service{ + + Endpoints: endpoints{ + "fips-dkr-us-gov-east-1": endpoint{ + Hostname: "ecr-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-dkr-us-gov-west-1": endpoint{ + Hostname: "ecr-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "fips-us-gov-east-1": endpoint{ + Hostname: "ecr-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "ecr-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{ + Hostname: "api.ecr.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "api.ecr.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "api.sagemaker": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "api-fips.sagemaker.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1-fips-secondary": endpoint{ + Hostname: "api.sagemaker.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "apigateway": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "application-autoscaling": service{ + Defaults: endpoint{ + Hostname: "autoscaling.{region}.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "application-autoscaling", + }, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "appstream2": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Service: "appstream", + }, + }, + Endpoints: endpoints{ + "fips": endpoint{ + Hostname: "appstream2-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, + "athena": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "athena-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "athena-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "autoscaling": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "autoscaling-plans": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "backup": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "batch": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "batch.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "batch.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "clouddirectory": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "cloudformation.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "cloudformation.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "cloudhsm": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "cloudhsmv2": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "cloudhsm", + }, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "cloudtrail": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "cloudtrail.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "cloudtrail.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "codebuild": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "codebuild-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "codebuild-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "codecommit": service{ + + Endpoints: endpoints{ + "fips": endpoint{ + Hostname: "codecommit-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "codedeploy": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "codedeploy-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "codedeploy-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "codepipeline": service{ + + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "codepipeline-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, + "cognito-identity": service{ + + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "cognito-identity-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, + "cognito-idp": service{ + + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "cognito-idp-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, + "comprehend": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "comprehend-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, + "comprehendmedical": service{ + + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "comprehendmedical-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, + "config": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "config.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "config.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "connect": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "datasync": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "datasync-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "datasync-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "directconnect.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "directconnect.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "dms": service{ + + Endpoints: endpoints{ + "dms-fips": endpoint{ + Hostname: "dms.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "docdb": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{ + Hostname: "rds.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "ds": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "ds-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "ds-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "dynamodb": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "dynamodb.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "dynamodb.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "ebs": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "ec2": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "ec2.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "ec2.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "ecs": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "ecs-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "ecs-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "eks": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "eks.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "eks.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "fips": endpoint{ + Hostname: "elasticache.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "elasticbeanstalk": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "elasticbeanstalk.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "elasticbeanstalk.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "elasticfilesystem": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "elasticfilesystem-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "elasticfilesystem-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "elasticloadbalancing": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "elasticloadbalancing.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "elasticloadbalancing.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "elasticmapreduce": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "elasticmapreduce.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "elasticmapreduce.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "email": service{ + + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "email-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, + "es": service{ + + Endpoints: endpoints{ + "fips": endpoint{ + Hostname: "es-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "events": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "events.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "events.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "firehose": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "firehose-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "firehose-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "fms": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "fms-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "fms-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "fsx": service{ + + Endpoints: endpoints{ + "fips-prod-us-gov-east-1": endpoint{ + Hostname: "fsx-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-prod-us-gov-west-1": endpoint{ + Hostname: "fsx-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "glacier": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "glacier.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "glacier.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "glue": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "glue-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "glue-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "greengrass": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "dataplane-us-gov-east-1": endpoint{ + Hostname: "greengrass-ats.iot.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "dataplane-us-gov-west-1": endpoint{ + Hostname: "greengrass-ats.iot.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "fips-us-gov-east-1": endpoint{ + Hostname: "greengrass-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-east-1": endpoint{ + Hostname: "greengrass.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "greengrass.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "guardduty": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "guardduty.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "guardduty.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "health": service{ + + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "health-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-us-gov-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-us-gov-global": endpoint{ + Hostname: "iam.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "iam-govcloud-fips": endpoint{ + Hostname: "iam.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "inspector": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "inspector-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "inspector-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "iot": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "execute-api", + }, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "iotsecuredtunneling": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "kafka": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "kinesis.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "kinesis.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "kinesisanalytics": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "kms": service{ + + Endpoints: endpoints{ + "ProdFips": endpoint{ + Hostname: "kms-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "lakeformation": service{ + + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "lakeformation-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, + "lambda": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "lambda-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "lambda-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "license-manager": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "license-manager-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "license-manager-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "logs.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "logs.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "mediaconvert": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{ + Hostname: "mediaconvert.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "metering.marketplace": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "models.lex": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "models-fips.lex.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "monitoring": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "monitoring.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "monitoring.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "neptune": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "rds.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "rds.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "organizations": service{ + PartitionEndpoint: "aws-us-gov-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-us-gov-global": endpoint{ + Hostname: "organizations.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "fips-aws-us-gov-global": endpoint{ + Hostname: "organizations.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "outposts": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "outposts.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "outposts.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "pinpoint": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "mobiletargeting", + }, + }, + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "pinpoint-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "pinpoint.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "polly": service{ + + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "polly-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, + "ram": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "ram.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "ram.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "rds.us-gov-east-1": endpoint{ + Hostname: "rds.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "rds.us-gov-west-1": endpoint{ + Hostname: "rds.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "redshift.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "redshift.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "rekognition": service{ + + Endpoints: endpoints{ + "rekognition-fips.us-gov-west-1": endpoint{ + Hostname: "rekognition-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, + "resource-groups": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "resource-groups.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "resource-groups.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "route53": service{ + PartitionEndpoint: "aws-us-gov-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-us-gov-global": endpoint{ + Hostname: "route53.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "fips-aws-us-gov-global": endpoint{ + Hostname: "route53.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "route53resolver": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "runtime.lex": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "runtime-fips.lex.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "runtime.sagemaker": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "runtime.sagemaker.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "s3": service{ + Defaults: endpoint{ + SignatureVersions: []string{"s3", "s3v4"}, + + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", + }, + Endpoints: endpoints{ + "accesspoint-us-gov-east-1": endpoint{ + Hostname: "s3-accesspoint.us-gov-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-us-gov-west-1": endpoint{ + Hostname: "s3-accesspoint.us-gov-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "fips-accesspoint-us-gov-east-1": endpoint{ + Hostname: "s3-accesspoint-fips.us-gov-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "fips-accesspoint-us-gov-west-1": endpoint{ + Hostname: "s3-accesspoint-fips.us-gov-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "s3-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{ + Hostname: "s3.us-gov-east-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + "us-gov-west-1": endpoint{ + Hostname: "s3.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + }, + }, + "s3-control": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "s3-control.us-gov-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-east-1-fips": endpoint{ + Hostname: "s3-control-fips.us-gov-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "s3-control.us-gov-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1-fips": endpoint{ + Hostname: "s3-control-fips.us-gov-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "secretsmanager": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "secretsmanager-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "secretsmanager-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "securityhub": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "securityhub-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "securityhub-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "serverlessrepo": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "serverlessrepo.us-gov-east-1.amazonaws.com", + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "serverlessrepo.us-gov-west-1.amazonaws.com", + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "servicecatalog": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "servicecatalog-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "servicecatalog-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "servicequotas": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "servicequotas.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "servicequotas.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "sms": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "sms-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "sms-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "snowball": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "snowball-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "snowball-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "sns": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "sns.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "sns.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "sqs": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "sqs.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "sqs.us-gov-west-1.amazonaws.com", + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "ssm": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "ssm.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "ssm.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "states": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "states-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "states.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "storagegateway": service{ + + Endpoints: endpoints{ + "fips": endpoint{ + Hostname: "storagegateway-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "dynamodb.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "dynamodb.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "sts": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "sts.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "sts.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "support": service{ + PartitionEndpoint: "aws-us-gov-global", + + Endpoints: endpoints{ + "aws-us-gov-global": endpoint{ + Hostname: "support.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "support.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "swf.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "swf.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "tagging": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "transcribe": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "fips.transcribe.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "fips.transcribe.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "transfer": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "transfer-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "transfer-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "translate": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "translate-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "waf-regional": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "waf-regional-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "waf-regional-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{ + Hostname: "waf-regional.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "waf-regional.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "workspaces": service{ + + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "workspaces-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, + "xray": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "xray-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "xray-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + }, +} + +// AwsIsoPartition returns the Resolver for AWS ISO (US). +func AwsIsoPartition() Partition { + return awsisoPartition.Partition() +} + +var awsisoPartition = partition{ + ID: "aws-iso", + Name: "AWS ISO (US)", + DNSSuffix: "c2s.ic.gov", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^us\\-iso\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "us-iso-east-1": region{ + Description: "US ISO East", + }, + }, + Services: services{ + "api.ecr": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Hostname: "api.ecr.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + }, + }, + "api.sagemaker": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "apigateway": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "application-autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "autoscaling": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "cloudtrail": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "codedeploy": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "comprehend": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "config": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "datapipeline": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "dms": service{ + + Endpoints: endpoints{ + "dms-fips": endpoint{ + Hostname: "dms.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + "us-iso-east-1": endpoint{}, + }, + }, + "ds": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "dynamodb": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "ec2": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "ecs": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "elasticloadbalancing": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "elasticmapreduce": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "es": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "events": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "firehose": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "glacier": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "health": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-iso-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-iso-global": endpoint{ + Hostname: "iam.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "kms": service{ + + Endpoints: endpoints{ + "ProdFips": endpoint{ + Hostname: "kms-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + "us-iso-east-1": endpoint{}, + }, + }, + "lambda": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "medialive": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "mediapackage": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "monitoring": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "outposts": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "route53": service{ + PartitionEndpoint: "aws-iso-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-iso-global": endpoint{ + Hostname: "route53.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + }, + }, + "runtime.sagemaker": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "s3": service{ + Defaults: endpoint{ + SignatureVersions: []string{"s3v4"}, + }, + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + }, + }, + "secretsmanager": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "snowball": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "sns": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "sqs": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "ssm": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "states": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "sts": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "support": service{ + PartitionEndpoint: "aws-iso-global", + + Endpoints: endpoints{ + "aws-iso-global": endpoint{ + Hostname: "support.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "transcribe": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "transcribestreaming": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "translate": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "workspaces": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + }, +} + +// AwsIsoBPartition returns the Resolver for AWS ISOB (US). +func AwsIsoBPartition() Partition { + return awsisobPartition.Partition() +} + +var awsisobPartition = partition{ + ID: "aws-iso-b", + Name: "AWS ISOB (US)", + DNSSuffix: "sc2s.sgov.gov", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^us\\-isob\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "us-isob-east-1": region{ + Description: "US ISOB East (Ohio)", + }, + }, + Services: services{ + "api.ecr": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{ + Hostname: "api.ecr.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + }, + }, + "application-autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "cloudtrail": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "codedeploy": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "config": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "dms": service{ + + Endpoints: endpoints{ + "dms-fips": endpoint{ + Hostname: "dms.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + "us-isob-east-1": endpoint{}, + }, + }, + "dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "ec2": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "ecs": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "elasticloadbalancing": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "elasticmapreduce": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "es": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "events": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "glacier": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "health": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-iso-b-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-iso-b-global": endpoint{ + Hostname: "iam.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "kms": service{ + + Endpoints: endpoints{ + "ProdFips": endpoint{ + Hostname: "kms-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + "us-isob-east-1": endpoint{}, + }, + }, + "lambda": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "license-manager": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "monitoring": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "route53": service{ + PartitionEndpoint: "aws-iso-b-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-iso-b-global": endpoint{ + Hostname: "route53.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + }, + }, + "s3": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "snowball": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "sns": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "sqs": service{ + Defaults: endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "ssm": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "states": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "sts": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "support": service{ + PartitionEndpoint: "aws-iso-b-global", + + Endpoints: endpoints{ + "aws-iso-b-global": endpoint{ + Hostname: "support.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + }, +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/endpoints/dep_service_ids.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/endpoints/dep_service_ids.go new file mode 100644 index 00000000000..ca8fc828e15 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/endpoints/dep_service_ids.go @@ -0,0 +1,141 @@ +package endpoints + +// Service identifiers +// +// Deprecated: Use client package's EndpointsID value instead of these +// ServiceIDs. These IDs are not maintained, and are out of date. +const ( + A4bServiceID = "a4b" // A4b. + AcmServiceID = "acm" // Acm. + AcmPcaServiceID = "acm-pca" // AcmPca. + ApiMediatailorServiceID = "api.mediatailor" // ApiMediatailor. + ApiPricingServiceID = "api.pricing" // ApiPricing. + ApiSagemakerServiceID = "api.sagemaker" // ApiSagemaker. + ApigatewayServiceID = "apigateway" // Apigateway. + ApplicationAutoscalingServiceID = "application-autoscaling" // ApplicationAutoscaling. + Appstream2ServiceID = "appstream2" // Appstream2. + AppsyncServiceID = "appsync" // Appsync. + AthenaServiceID = "athena" // Athena. + AutoscalingServiceID = "autoscaling" // Autoscaling. + AutoscalingPlansServiceID = "autoscaling-plans" // AutoscalingPlans. + BatchServiceID = "batch" // Batch. + BudgetsServiceID = "budgets" // Budgets. + CeServiceID = "ce" // Ce. + ChimeServiceID = "chime" // Chime. + Cloud9ServiceID = "cloud9" // Cloud9. + ClouddirectoryServiceID = "clouddirectory" // Clouddirectory. + CloudformationServiceID = "cloudformation" // Cloudformation. + CloudfrontServiceID = "cloudfront" // Cloudfront. + CloudhsmServiceID = "cloudhsm" // Cloudhsm. + Cloudhsmv2ServiceID = "cloudhsmv2" // Cloudhsmv2. + CloudsearchServiceID = "cloudsearch" // Cloudsearch. + CloudtrailServiceID = "cloudtrail" // Cloudtrail. + CodebuildServiceID = "codebuild" // Codebuild. + CodecommitServiceID = "codecommit" // Codecommit. + CodedeployServiceID = "codedeploy" // Codedeploy. + CodepipelineServiceID = "codepipeline" // Codepipeline. + CodestarServiceID = "codestar" // Codestar. + CognitoIdentityServiceID = "cognito-identity" // CognitoIdentity. + CognitoIdpServiceID = "cognito-idp" // CognitoIdp. + CognitoSyncServiceID = "cognito-sync" // CognitoSync. + ComprehendServiceID = "comprehend" // Comprehend. + ConfigServiceID = "config" // Config. + CurServiceID = "cur" // Cur. + DatapipelineServiceID = "datapipeline" // Datapipeline. + DaxServiceID = "dax" // Dax. + DevicefarmServiceID = "devicefarm" // Devicefarm. + DirectconnectServiceID = "directconnect" // Directconnect. + DiscoveryServiceID = "discovery" // Discovery. + DmsServiceID = "dms" // Dms. + DsServiceID = "ds" // Ds. + DynamodbServiceID = "dynamodb" // Dynamodb. + Ec2ServiceID = "ec2" // Ec2. + Ec2metadataServiceID = "ec2metadata" // Ec2metadata. + EcrServiceID = "ecr" // Ecr. + EcsServiceID = "ecs" // Ecs. + ElasticacheServiceID = "elasticache" // Elasticache. + ElasticbeanstalkServiceID = "elasticbeanstalk" // Elasticbeanstalk. + ElasticfilesystemServiceID = "elasticfilesystem" // Elasticfilesystem. + ElasticloadbalancingServiceID = "elasticloadbalancing" // Elasticloadbalancing. + ElasticmapreduceServiceID = "elasticmapreduce" // Elasticmapreduce. + ElastictranscoderServiceID = "elastictranscoder" // Elastictranscoder. + EmailServiceID = "email" // Email. + EntitlementMarketplaceServiceID = "entitlement.marketplace" // EntitlementMarketplace. + EsServiceID = "es" // Es. + EventsServiceID = "events" // Events. + FirehoseServiceID = "firehose" // Firehose. + FmsServiceID = "fms" // Fms. + GameliftServiceID = "gamelift" // Gamelift. + GlacierServiceID = "glacier" // Glacier. + GlueServiceID = "glue" // Glue. + GreengrassServiceID = "greengrass" // Greengrass. + GuarddutyServiceID = "guardduty" // Guardduty. + HealthServiceID = "health" // Health. + IamServiceID = "iam" // Iam. + ImportexportServiceID = "importexport" // Importexport. + InspectorServiceID = "inspector" // Inspector. + IotServiceID = "iot" // Iot. + IotanalyticsServiceID = "iotanalytics" // Iotanalytics. + KinesisServiceID = "kinesis" // Kinesis. + KinesisanalyticsServiceID = "kinesisanalytics" // Kinesisanalytics. + KinesisvideoServiceID = "kinesisvideo" // Kinesisvideo. + KmsServiceID = "kms" // Kms. + LambdaServiceID = "lambda" // Lambda. + LightsailServiceID = "lightsail" // Lightsail. + LogsServiceID = "logs" // Logs. + MachinelearningServiceID = "machinelearning" // Machinelearning. + MarketplacecommerceanalyticsServiceID = "marketplacecommerceanalytics" // Marketplacecommerceanalytics. + MediaconvertServiceID = "mediaconvert" // Mediaconvert. + MedialiveServiceID = "medialive" // Medialive. + MediapackageServiceID = "mediapackage" // Mediapackage. + MediastoreServiceID = "mediastore" // Mediastore. + MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace. + MghServiceID = "mgh" // Mgh. + MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics. + ModelsLexServiceID = "models.lex" // ModelsLex. + MonitoringServiceID = "monitoring" // Monitoring. + MturkRequesterServiceID = "mturk-requester" // MturkRequester. + NeptuneServiceID = "neptune" // Neptune. + OpsworksServiceID = "opsworks" // Opsworks. + OpsworksCmServiceID = "opsworks-cm" // OpsworksCm. + OrganizationsServiceID = "organizations" // Organizations. + PinpointServiceID = "pinpoint" // Pinpoint. + PollyServiceID = "polly" // Polly. + RdsServiceID = "rds" // Rds. + RedshiftServiceID = "redshift" // Redshift. + RekognitionServiceID = "rekognition" // Rekognition. + ResourceGroupsServiceID = "resource-groups" // ResourceGroups. + Route53ServiceID = "route53" // Route53. + Route53domainsServiceID = "route53domains" // Route53domains. + RuntimeLexServiceID = "runtime.lex" // RuntimeLex. + RuntimeSagemakerServiceID = "runtime.sagemaker" // RuntimeSagemaker. + S3ServiceID = "s3" // S3. + S3ControlServiceID = "s3-control" // S3Control. + SagemakerServiceID = "api.sagemaker" // Sagemaker. + SdbServiceID = "sdb" // Sdb. + SecretsmanagerServiceID = "secretsmanager" // Secretsmanager. + ServerlessrepoServiceID = "serverlessrepo" // Serverlessrepo. + ServicecatalogServiceID = "servicecatalog" // Servicecatalog. + ServicediscoveryServiceID = "servicediscovery" // Servicediscovery. + ShieldServiceID = "shield" // Shield. + SmsServiceID = "sms" // Sms. + SnowballServiceID = "snowball" // Snowball. + SnsServiceID = "sns" // Sns. + SqsServiceID = "sqs" // Sqs. + SsmServiceID = "ssm" // Ssm. + StatesServiceID = "states" // States. + StoragegatewayServiceID = "storagegateway" // Storagegateway. + StreamsDynamodbServiceID = "streams.dynamodb" // StreamsDynamodb. + StsServiceID = "sts" // Sts. + SupportServiceID = "support" // Support. + SwfServiceID = "swf" // Swf. + TaggingServiceID = "tagging" // Tagging. + TransferServiceID = "transfer" // Transfer. + TranslateServiceID = "translate" // Translate. + WafServiceID = "waf" // Waf. + WafRegionalServiceID = "waf-regional" // WafRegional. + WorkdocsServiceID = "workdocs" // Workdocs. + WorkmailServiceID = "workmail" // Workmail. + WorkspacesServiceID = "workspaces" // Workspaces. + XrayServiceID = "xray" // Xray. +) diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/endpoints/doc.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/endpoints/doc.go new file mode 100644 index 00000000000..84316b92c05 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/endpoints/doc.go @@ -0,0 +1,66 @@ +// Package endpoints provides the types and functionality for defining regions +// and endpoints, as well as querying those definitions. +// +// The SDK's Regions and Endpoints metadata is code generated into the endpoints +// package, and is accessible via the DefaultResolver function. This function +// returns a endpoint Resolver will search the metadata and build an associated +// endpoint if one is found. The default resolver will search all partitions +// known by the SDK. e.g AWS Standard (aws), AWS China (aws-cn), and +// AWS GovCloud (US) (aws-us-gov). +// . +// +// Enumerating Regions and Endpoint Metadata +// +// Casting the Resolver returned by DefaultResolver to a EnumPartitions interface +// will allow you to get access to the list of underlying Partitions with the +// Partitions method. This is helpful if you want to limit the SDK's endpoint +// resolving to a single partition, or enumerate regions, services, and endpoints +// in the partition. +// +// resolver := endpoints.DefaultResolver() +// partitions := resolver.(endpoints.EnumPartitions).Partitions() +// +// for _, p := range partitions { +// fmt.Println("Regions for", p.ID()) +// for id, _ := range p.Regions() { +// fmt.Println("*", id) +// } +// +// fmt.Println("Services for", p.ID()) +// for id, _ := range p.Services() { +// fmt.Println("*", id) +// } +// } +// +// Using Custom Endpoints +// +// The endpoints package also gives you the ability to use your own logic how +// endpoints are resolved. This is a great way to define a custom endpoint +// for select services, without passing that logic down through your code. +// +// If a type implements the Resolver interface it can be used to resolve +// endpoints. To use this with the SDK's Session and Config set the value +// of the type to the EndpointsResolver field of aws.Config when initializing +// the session, or service client. +// +// In addition the ResolverFunc is a wrapper for a func matching the signature +// of Resolver.EndpointFor, converting it to a type that satisfies the +// Resolver interface. +// +// +// myCustomResolver := func(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) { +// if service == endpoints.S3ServiceID { +// return endpoints.ResolvedEndpoint{ +// URL: "s3.custom.endpoint.com", +// SigningRegion: "custom-signing-region", +// }, nil +// } +// +// return endpoints.DefaultResolver().EndpointFor(service, region, optFns...) +// } +// +// sess := session.Must(session.NewSession(&aws.Config{ +// Region: aws.String("us-west-2"), +// EndpointResolver: endpoints.ResolverFunc(myCustomResolver), +// })) +package endpoints diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/endpoints/endpoints.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/endpoints/endpoints.go new file mode 100644 index 00000000000..cf71e726534 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/endpoints/endpoints.go @@ -0,0 +1,508 @@ +package endpoints + +import ( + "fmt" + "regexp" + "strings" + + "github.com/IBM/ibm-cos-sdk-go/aws/awserr" +) + +// Options provide the configuration needed to direct how the +// endpoints will be resolved. +type Options struct { + // DisableSSL forces the endpoint to be resolved as HTTP. + // instead of HTTPS if the service supports it. + DisableSSL bool + + // Sets the resolver to resolve the endpoint as a dualstack endpoint + // for the service. If dualstack support for a service is not known and + // StrictMatching is not enabled a dualstack endpoint for the service will + // be returned. This endpoint may not be valid. If StrictMatching is + // enabled only services that are known to support dualstack will return + // dualstack endpoints. + UseDualStack bool + + // Enables strict matching of services and regions resolved endpoints. + // If the partition doesn't enumerate the exact service and region an + // error will be returned. This option will prevent returning endpoints + // that look valid, but may not resolve to any real endpoint. + StrictMatching bool + + // Enables resolving a service endpoint based on the region provided if the + // service does not exist. The service endpoint ID will be used as the service + // domain name prefix. By default the endpoint resolver requires the service + // to be known when resolving endpoints. + // + // If resolving an endpoint on the partition list the provided region will + // be used to determine which partition's domain name pattern to the service + // endpoint ID with. If both the service and region are unknown and resolving + // the endpoint on partition list an UnknownEndpointError error will be returned. + // + // If resolving and endpoint on a partition specific resolver that partition's + // domain name pattern will be used with the service endpoint ID. If both + // region and service do not exist when resolving an endpoint on a specific + // partition the partition's domain pattern will be used to combine the + // endpoint and region together. + // + // This option is ignored if StrictMatching is enabled. + ResolveUnknownService bool + + // S3 Regional Endpoint flag helps with resolving the S3 endpoint + S3UsEast1RegionalEndpoint S3UsEast1RegionalEndpoint +} + +// S3UsEast1RegionalEndpoint is an enum for the states of the S3 us-east-1 +// Regional Endpoint options. +type S3UsEast1RegionalEndpoint int + +func (e S3UsEast1RegionalEndpoint) String() string { + switch e { + case LegacyS3UsEast1Endpoint: + return "legacy" + case RegionalS3UsEast1Endpoint: + return "regional" + case UnsetS3UsEast1Endpoint: + return "" + default: + return "unknown" + } +} + +const ( + + // UnsetS3UsEast1Endpoint represents that S3 Regional Endpoint flag is not + // specified. + UnsetS3UsEast1Endpoint S3UsEast1RegionalEndpoint = iota + + // LegacyS3UsEast1Endpoint represents when S3 Regional Endpoint flag is + // specified to use legacy endpoints. + LegacyS3UsEast1Endpoint + + // RegionalS3UsEast1Endpoint represents when S3 Regional Endpoint flag is + // specified to use regional endpoints. + RegionalS3UsEast1Endpoint +) + +// GetS3UsEast1RegionalEndpoint function returns the S3UsEast1RegionalEndpointFlag based +// on the input string provided in env config or shared config by the user. +// +// `legacy`, `regional` are the only case-insensitive valid strings for +// resolving the S3 regional Endpoint flag. +func GetS3UsEast1RegionalEndpoint(s string) (S3UsEast1RegionalEndpoint, error) { + switch { + case strings.EqualFold(s, "legacy"): + return LegacyS3UsEast1Endpoint, nil + case strings.EqualFold(s, "regional"): + return RegionalS3UsEast1Endpoint, nil + default: + return UnsetS3UsEast1Endpoint, + fmt.Errorf("unable to resolve the value of S3UsEast1RegionalEndpoint for %v", s) + } +} + +// Set combines all of the option functions together. +func (o *Options) Set(optFns ...func(*Options)) { + for _, fn := range optFns { + fn(o) + } +} + +// DisableSSLOption sets the DisableSSL options. Can be used as a functional +// option when resolving endpoints. +func DisableSSLOption(o *Options) { + o.DisableSSL = true +} + +// UseDualStackOption sets the UseDualStack option. Can be used as a functional +// option when resolving endpoints. +func UseDualStackOption(o *Options) { + o.UseDualStack = true +} + +// StrictMatchingOption sets the StrictMatching option. Can be used as a functional +// option when resolving endpoints. +func StrictMatchingOption(o *Options) { + o.StrictMatching = true +} + +// ResolveUnknownServiceOption sets the ResolveUnknownService option. Can be used +// as a functional option when resolving endpoints. +func ResolveUnknownServiceOption(o *Options) { + o.ResolveUnknownService = true +} + +// A Resolver provides the interface for functionality to resolve endpoints. +// The build in Partition and DefaultResolver return value satisfy this interface. +type Resolver interface { + EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) +} + +// ResolverFunc is a helper utility that wraps a function so it satisfies the +// Resolver interface. This is useful when you want to add additional endpoint +// resolving logic, or stub out specific endpoints with custom values. +type ResolverFunc func(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) + +// EndpointFor wraps the ResolverFunc function to satisfy the Resolver interface. +func (fn ResolverFunc) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return fn(service, region, opts...) +} + +var schemeRE = regexp.MustCompile("^([^:]+)://") + +// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no +// scheme. If disableSSL is true HTTP will set HTTP instead of the default HTTPS. +// +// If disableSSL is set, it will only set the URL's scheme if the URL does not +// contain a scheme. +func AddScheme(endpoint string, disableSSL bool) string { + if !schemeRE.MatchString(endpoint) { + scheme := "https" + if disableSSL { + scheme = "http" + } + endpoint = fmt.Sprintf("%s://%s", scheme, endpoint) + } + + return endpoint +} + +// EnumPartitions a provides a way to retrieve the underlying partitions that +// make up the SDK's default Resolver, or any resolver decoded from a model +// file. +// +// Use this interface with DefaultResolver and DecodeModels to get the list of +// Partitions. +type EnumPartitions interface { + Partitions() []Partition +} + +// RegionsForService returns a map of regions for the partition and service. +// If either the partition or service does not exist false will be returned +// as the second parameter. +// +// This example shows how to get the regions for DynamoDB in the AWS partition. +// rs, exists := endpoints.RegionsForService(endpoints.DefaultPartitions(), endpoints.AwsPartitionID, endpoints.DynamodbServiceID) +// +// This is equivalent to using the partition directly. +// rs := endpoints.AwsPartition().Services()[endpoints.DynamodbServiceID].Regions() +func RegionsForService(ps []Partition, partitionID, serviceID string) (map[string]Region, bool) { + for _, p := range ps { + if p.ID() != partitionID { + continue + } + if _, ok := p.p.Services[serviceID]; !ok { + break + } + + s := Service{ + id: serviceID, + p: p.p, + } + return s.Regions(), true + } + + return map[string]Region{}, false +} + +// PartitionForRegion returns the first partition which includes the region +// passed in. This includes both known regions and regions which match +// a pattern supported by the partition which may include regions that are +// not explicitly known by the partition. Use the Regions method of the +// returned Partition if explicit support is needed. +func PartitionForRegion(ps []Partition, regionID string) (Partition, bool) { + for _, p := range ps { + if _, ok := p.p.Regions[regionID]; ok || p.p.RegionRegex.MatchString(regionID) { + return p, true + } + } + + return Partition{}, false +} + +// A Partition provides the ability to enumerate the partition's regions +// and services. +type Partition struct { + id, dnsSuffix string + p *partition +} + +// DNSSuffix returns the base domain name of the partition. +func (p Partition) DNSSuffix() string { return p.dnsSuffix } + +// ID returns the identifier of the partition. +func (p Partition) ID() string { return p.id } + +// EndpointFor attempts to resolve the endpoint based on service and region. +// See Options for information on configuring how the endpoint is resolved. +// +// If the service cannot be found in the metadata the UnknownServiceError +// error will be returned. This validation will occur regardless if +// StrictMatching is enabled. To enable resolving unknown services set the +// "ResolveUnknownService" option to true. When StrictMatching is disabled +// this option allows the partition resolver to resolve a endpoint based on +// the service endpoint ID provided. +// +// When resolving endpoints you can choose to enable StrictMatching. This will +// require the provided service and region to be known by the partition. +// If the endpoint cannot be strictly resolved an error will be returned. This +// mode is useful to ensure the endpoint resolved is valid. Without +// StrictMatching enabled the endpoint returned may look valid but may not work. +// StrictMatching requires the SDK to be updated if you want to take advantage +// of new regions and services expansions. +// +// Errors that can be returned. +// * UnknownServiceError +// * UnknownEndpointError +func (p Partition) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return p.p.EndpointFor(service, region, opts...) +} + +// Regions returns a map of Regions indexed by their ID. This is useful for +// enumerating over the regions in a partition. +func (p Partition) Regions() map[string]Region { + rs := make(map[string]Region, len(p.p.Regions)) + for id, r := range p.p.Regions { + rs[id] = Region{ + id: id, + desc: r.Description, + p: p.p, + } + } + + return rs +} + +// Services returns a map of Service indexed by their ID. This is useful for +// enumerating over the services in a partition. +func (p Partition) Services() map[string]Service { + ss := make(map[string]Service, len(p.p.Services)) + for id := range p.p.Services { + ss[id] = Service{ + id: id, + p: p.p, + } + } + + return ss +} + +// A Region provides information about a region, and ability to resolve an +// endpoint from the context of a region, given a service. +type Region struct { + id, desc string + p *partition +} + +// ID returns the region's identifier. +func (r Region) ID() string { return r.id } + +// Description returns the region's description. The region description +// is free text, it can be empty, and it may change between SDK releases. +func (r Region) Description() string { return r.desc } + +// ResolveEndpoint resolves an endpoint from the context of the region given +// a service. See Partition.EndpointFor for usage and errors that can be returned. +func (r Region) ResolveEndpoint(service string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return r.p.EndpointFor(service, r.id, opts...) +} + +// Services returns a list of all services that are known to be in this region. +func (r Region) Services() map[string]Service { + ss := map[string]Service{} + for id, s := range r.p.Services { + if _, ok := s.Endpoints[r.id]; ok { + ss[id] = Service{ + id: id, + p: r.p, + } + } + } + + return ss +} + +// A Service provides information about a service, and ability to resolve an +// endpoint from the context of a service, given a region. +type Service struct { + id string + p *partition +} + +// ID returns the identifier for the service. +func (s Service) ID() string { return s.id } + +// ResolveEndpoint resolves an endpoint from the context of a service given +// a region. See Partition.EndpointFor for usage and errors that can be returned. +func (s Service) ResolveEndpoint(region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return s.p.EndpointFor(s.id, region, opts...) +} + +// Regions returns a map of Regions that the service is present in. +// +// A region is the AWS region the service exists in. Whereas a Endpoint is +// an URL that can be resolved to a instance of a service. +func (s Service) Regions() map[string]Region { + rs := map[string]Region{} + for id := range s.p.Services[s.id].Endpoints { + if r, ok := s.p.Regions[id]; ok { + rs[id] = Region{ + id: id, + desc: r.Description, + p: s.p, + } + } + } + + return rs +} + +// Endpoints returns a map of Endpoints indexed by their ID for all known +// endpoints for a service. +// +// A region is the AWS region the service exists in. Whereas a Endpoint is +// an URL that can be resolved to a instance of a service. +func (s Service) Endpoints() map[string]Endpoint { + es := make(map[string]Endpoint, len(s.p.Services[s.id].Endpoints)) + for id := range s.p.Services[s.id].Endpoints { + es[id] = Endpoint{ + id: id, + serviceID: s.id, + p: s.p, + } + } + + return es +} + +// A Endpoint provides information about endpoints, and provides the ability +// to resolve that endpoint for the service, and the region the endpoint +// represents. +type Endpoint struct { + id string + serviceID string + p *partition +} + +// ID returns the identifier for an endpoint. +func (e Endpoint) ID() string { return e.id } + +// ServiceID returns the identifier the endpoint belongs to. +func (e Endpoint) ServiceID() string { return e.serviceID } + +// ResolveEndpoint resolves an endpoint from the context of a service and +// region the endpoint represents. See Partition.EndpointFor for usage and +// errors that can be returned. +func (e Endpoint) ResolveEndpoint(opts ...func(*Options)) (ResolvedEndpoint, error) { + return e.p.EndpointFor(e.serviceID, e.id, opts...) +} + +// A ResolvedEndpoint is an endpoint that has been resolved based on a partition +// service, and region. +type ResolvedEndpoint struct { + // The endpoint URL + URL string + + // The endpoint partition + PartitionID string + + // The region that should be used for signing requests. + SigningRegion string + + // The service name that should be used for signing requests. + SigningName string + + // States that the signing name for this endpoint was derived from metadata + // passed in, but was not explicitly modeled. + SigningNameDerived bool + + // The signing method that should be used for signing requests. + SigningMethod string +} + +// So that the Error interface type can be included as an anonymous field +// in the requestError struct and not conflict with the error.Error() method. +type awsError awserr.Error + +// A EndpointNotFoundError is returned when in StrictMatching mode, and the +// endpoint for the service and region cannot be found in any of the partitions. +type EndpointNotFoundError struct { + awsError + Partition string + Service string + Region string +} + +// A UnknownServiceError is returned when the service does not resolve to an +// endpoint. Includes a list of all known services for the partition. Returned +// when a partition does not support the service. +type UnknownServiceError struct { + awsError + Partition string + Service string + Known []string +} + +// NewUnknownServiceError builds and returns UnknownServiceError. +func NewUnknownServiceError(p, s string, known []string) UnknownServiceError { + return UnknownServiceError{ + awsError: awserr.New("UnknownServiceError", + "could not resolve endpoint for unknown service", nil), + Partition: p, + Service: s, + Known: known, + } +} + +// String returns the string representation of the error. +func (e UnknownServiceError) Error() string { + extra := fmt.Sprintf("partition: %q, service: %q", + e.Partition, e.Service) + if len(e.Known) > 0 { + extra += fmt.Sprintf(", known: %v", e.Known) + } + return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr()) +} + +// String returns the string representation of the error. +func (e UnknownServiceError) String() string { + return e.Error() +} + +// A UnknownEndpointError is returned when in StrictMatching mode and the +// service is valid, but the region does not resolve to an endpoint. Includes +// a list of all known endpoints for the service. +type UnknownEndpointError struct { + awsError + Partition string + Service string + Region string + Known []string +} + +// NewUnknownEndpointError builds and returns UnknownEndpointError. +func NewUnknownEndpointError(p, s, r string, known []string) UnknownEndpointError { + return UnknownEndpointError{ + awsError: awserr.New("UnknownEndpointError", + "could not resolve endpoint", nil), + Partition: p, + Service: s, + Region: r, + Known: known, + } +} + +// String returns the string representation of the error. +func (e UnknownEndpointError) Error() string { + extra := fmt.Sprintf("partition: %q, service: %q, region: %q", + e.Partition, e.Service, e.Region) + if len(e.Known) > 0 { + extra += fmt.Sprintf(", known: %v", e.Known) + } + return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr()) +} + +// String returns the string representation of the error. +func (e UnknownEndpointError) String() string { + return e.Error() +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/endpoints/legacy_regions.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/endpoints/legacy_regions.go new file mode 100644 index 00000000000..28a7556cca3 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/endpoints/legacy_regions.go @@ -0,0 +1,7 @@ +package endpoints + +var legacyGlobalRegions = map[string]map[string]struct{}{ + "s3": { + "us-east-1": {}, + }, +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/endpoints/v3model.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/endpoints/v3model.go new file mode 100644 index 00000000000..5490965d246 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/endpoints/v3model.go @@ -0,0 +1,350 @@ +package endpoints + +import ( + "fmt" + "regexp" + "strconv" + "strings" +) + +var regionValidationRegex = regexp.MustCompile(`^[[:alnum:]]([[:alnum:]\-]*[[:alnum:]])?$`) + +type partitions []partition + +func (ps partitions) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + var opt Options + opt.Set(opts...) + + for i := 0; i < len(ps); i++ { + if !ps[i].canResolveEndpoint(service, region, opt.StrictMatching) { + continue + } + + return ps[i].EndpointFor(service, region, opts...) + } + + // If loose matching fallback to first partition format to use + // when resolving the endpoint. + if !opt.StrictMatching && len(ps) > 0 { + return ps[0].EndpointFor(service, region, opts...) + } + + return ResolvedEndpoint{}, NewUnknownEndpointError("all partitions", service, region, []string{}) +} + +// Partitions satisfies the EnumPartitions interface and returns a list +// of Partitions representing each partition represented in the SDK's +// endpoints model. +func (ps partitions) Partitions() []Partition { + parts := make([]Partition, 0, len(ps)) + for i := 0; i < len(ps); i++ { + parts = append(parts, ps[i].Partition()) + } + + return parts +} + +type partition struct { + ID string `json:"partition"` + Name string `json:"partitionName"` + DNSSuffix string `json:"dnsSuffix"` + RegionRegex regionRegex `json:"regionRegex"` + Defaults endpoint `json:"defaults"` + Regions regions `json:"regions"` + Services services `json:"services"` +} + +func (p partition) Partition() Partition { + return Partition{ + dnsSuffix: p.DNSSuffix, + id: p.ID, + p: &p, + } +} + +func (p partition) canResolveEndpoint(service, region string, strictMatch bool) bool { + s, hasService := p.Services[service] + _, hasEndpoint := s.Endpoints[region] + + if hasEndpoint && hasService { + return true + } + + if strictMatch { + return false + } + + return p.RegionRegex.MatchString(region) +} + +func allowLegacyEmptyRegion(service string) bool { + legacy := map[string]struct{}{ + "budgets": {}, + "ce": {}, + "chime": {}, + "cloudfront": {}, + "ec2metadata": {}, + "iam": {}, + "importexport": {}, + "organizations": {}, + "route53": {}, + "sts": {}, + "support": {}, + "waf": {}, + } + + _, allowed := legacy[service] + return allowed +} + +func (p partition) EndpointFor(service, region string, opts ...func(*Options)) (resolved ResolvedEndpoint, err error) { + var opt Options + opt.Set(opts...) + + s, hasService := p.Services[service] + if len(service) == 0 || !(hasService || opt.ResolveUnknownService) { + // Only return error if the resolver will not fallback to creating + // endpoint based on service endpoint ID passed in. + return resolved, NewUnknownServiceError(p.ID, service, serviceList(p.Services)) + } + + if len(region) == 0 && allowLegacyEmptyRegion(service) && len(s.PartitionEndpoint) != 0 { + region = s.PartitionEndpoint + } + + if service == "s3" && opt.S3UsEast1RegionalEndpoint != RegionalS3UsEast1Endpoint { + if _, ok := legacyGlobalRegions[service][region]; ok { + region = "aws-global" + } + } + + e, hasEndpoint := s.endpointForRegion(region) + if len(region) == 0 || (!hasEndpoint && opt.StrictMatching) { + return resolved, NewUnknownEndpointError(p.ID, service, region, endpointList(s.Endpoints)) + } + + defs := []endpoint{p.Defaults, s.Defaults} + + return e.resolve(service, p.ID, region, p.DNSSuffix, defs, opt) +} + +func serviceList(ss services) []string { + list := make([]string, 0, len(ss)) + for k := range ss { + list = append(list, k) + } + return list +} +func endpointList(es endpoints) []string { + list := make([]string, 0, len(es)) + for k := range es { + list = append(list, k) + } + return list +} + +type regionRegex struct { + *regexp.Regexp +} + +func (rr *regionRegex) UnmarshalJSON(b []byte) (err error) { + // Strip leading and trailing quotes + regex, err := strconv.Unquote(string(b)) + if err != nil { + return fmt.Errorf("unable to strip quotes from regex, %v", err) + } + + rr.Regexp, err = regexp.Compile(regex) + if err != nil { + return fmt.Errorf("unable to unmarshal region regex, %v", err) + } + return nil +} + +type regions map[string]region + +type region struct { + Description string `json:"description"` +} + +type services map[string]service + +type service struct { + PartitionEndpoint string `json:"partitionEndpoint"` + IsRegionalized boxedBool `json:"isRegionalized,omitempty"` + Defaults endpoint `json:"defaults"` + Endpoints endpoints `json:"endpoints"` +} + +func (s *service) endpointForRegion(region string) (endpoint, bool) { + if s.IsRegionalized == boxedFalse { + return s.Endpoints[s.PartitionEndpoint], region == s.PartitionEndpoint + } + + if e, ok := s.Endpoints[region]; ok { + return e, true + } + + // Unable to find any matching endpoint, return + // blank that will be used for generic endpoint creation. + return endpoint{}, false +} + +type endpoints map[string]endpoint + +type endpoint struct { + Hostname string `json:"hostname"` + Protocols []string `json:"protocols"` + CredentialScope credentialScope `json:"credentialScope"` + + // Custom fields not modeled + HasDualStack boxedBool `json:"-"` + DualStackHostname string `json:"-"` + + // Signature Version not used + SignatureVersions []string `json:"signatureVersions"` + + // SSLCommonName not used. + SSLCommonName string `json:"sslCommonName"` +} + +const ( + defaultProtocol = "https" + defaultSigner = "v4" +) + +var ( + protocolPriority = []string{"https", "http"} + signerPriority = []string{"v4", "v2"} +) + +func getByPriority(s []string, p []string, def string) string { + if len(s) == 0 { + return def + } + + for i := 0; i < len(p); i++ { + for j := 0; j < len(s); j++ { + if s[j] == p[i] { + return s[j] + } + } + } + + return s[0] +} + +func (e endpoint) resolve(service, partitionID, region, dnsSuffix string, defs []endpoint, opts Options) (ResolvedEndpoint, error) { + var merged endpoint + for _, def := range defs { + merged.mergeIn(def) + } + merged.mergeIn(e) + e = merged + + signingRegion := e.CredentialScope.Region + if len(signingRegion) == 0 { + signingRegion = region + } + + signingName := e.CredentialScope.Service + var signingNameDerived bool + if len(signingName) == 0 { + signingName = service + signingNameDerived = true + } + + hostname := e.Hostname + // Offset the hostname for dualstack if enabled + if opts.UseDualStack && e.HasDualStack == boxedTrue { + hostname = e.DualStackHostname + region = signingRegion + } + + if !validateInputRegion(region) { + return ResolvedEndpoint{}, fmt.Errorf("invalid region identifier format provided") + } + + u := strings.Replace(hostname, "{service}", service, 1) + u = strings.Replace(u, "{region}", region, 1) + u = strings.Replace(u, "{dnsSuffix}", dnsSuffix, 1) + + scheme := getEndpointScheme(e.Protocols, opts.DisableSSL) + u = fmt.Sprintf("%s://%s", scheme, u) + + return ResolvedEndpoint{ + URL: u, + PartitionID: partitionID, + SigningRegion: signingRegion, + SigningName: signingName, + SigningNameDerived: signingNameDerived, + SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner), + }, nil +} + +func getEndpointScheme(protocols []string, disableSSL bool) string { + if disableSSL { + return "http" + } + + return getByPriority(protocols, protocolPriority, defaultProtocol) +} + +func (e *endpoint) mergeIn(other endpoint) { + if len(other.Hostname) > 0 { + e.Hostname = other.Hostname + } + if len(other.Protocols) > 0 { + e.Protocols = other.Protocols + } + if len(other.SignatureVersions) > 0 { + e.SignatureVersions = other.SignatureVersions + } + if len(other.CredentialScope.Region) > 0 { + e.CredentialScope.Region = other.CredentialScope.Region + } + if len(other.CredentialScope.Service) > 0 { + e.CredentialScope.Service = other.CredentialScope.Service + } + if len(other.SSLCommonName) > 0 { + e.SSLCommonName = other.SSLCommonName + } + if other.HasDualStack != boxedBoolUnset { + e.HasDualStack = other.HasDualStack + } + if len(other.DualStackHostname) > 0 { + e.DualStackHostname = other.DualStackHostname + } +} + +type credentialScope struct { + Region string `json:"region"` + Service string `json:"service"` +} + +type boxedBool int + +func (b *boxedBool) UnmarshalJSON(buf []byte) error { + v, err := strconv.ParseBool(string(buf)) + if err != nil { + return err + } + + if v { + *b = boxedTrue + } else { + *b = boxedFalse + } + + return nil +} + +const ( + boxedBoolUnset boxedBool = iota + boxedFalse + boxedTrue +) + +func validateInputRegion(region string) bool { + return regionValidationRegex.MatchString(region) +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/endpoints/v3model_codegen.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/endpoints/v3model_codegen.go new file mode 100644 index 00000000000..0fdfcc56e05 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/endpoints/v3model_codegen.go @@ -0,0 +1,351 @@ +// +build codegen + +package endpoints + +import ( + "fmt" + "io" + "reflect" + "strings" + "text/template" + "unicode" +) + +// A CodeGenOptions are the options for code generating the endpoints into +// Go code from the endpoints model definition. +type CodeGenOptions struct { + // Options for how the model will be decoded. + DecodeModelOptions DecodeModelOptions + + // Disables code generation of the service endpoint prefix IDs defined in + // the model. + DisableGenerateServiceIDs bool +} + +// Set combines all of the option functions together +func (d *CodeGenOptions) Set(optFns ...func(*CodeGenOptions)) { + for _, fn := range optFns { + fn(d) + } +} + +// CodeGenModel given a endpoints model file will decode it and attempt to +// generate Go code from the model definition. Error will be returned if +// the code is unable to be generated, or decoded. +func CodeGenModel(modelFile io.Reader, outFile io.Writer, optFns ...func(*CodeGenOptions)) error { + var opts CodeGenOptions + opts.Set(optFns...) + + resolver, err := DecodeModel(modelFile, func(d *DecodeModelOptions) { + *d = opts.DecodeModelOptions + }) + if err != nil { + return err + } + + v := struct { + Resolver + CodeGenOptions + }{ + Resolver: resolver, + CodeGenOptions: opts, + } + + tmpl := template.Must(template.New("tmpl").Funcs(funcMap).Parse(v3Tmpl)) + if err := tmpl.ExecuteTemplate(outFile, "defaults", v); err != nil { + return fmt.Errorf("failed to execute template, %v", err) + } + + return nil +} + +func toSymbol(v string) string { + out := []rune{} + for _, c := range strings.Title(v) { + if !(unicode.IsNumber(c) || unicode.IsLetter(c)) { + continue + } + + out = append(out, c) + } + + return string(out) +} + +func quoteString(v string) string { + return fmt.Sprintf("%q", v) +} + +func regionConstName(p, r string) string { + return toSymbol(p) + toSymbol(r) +} + +func partitionGetter(id string) string { + return fmt.Sprintf("%sPartition", toSymbol(id)) +} + +func partitionVarName(id string) string { + return fmt.Sprintf("%sPartition", strings.ToLower(toSymbol(id))) +} + +func listPartitionNames(ps partitions) string { + names := []string{} + switch len(ps) { + case 1: + return ps[0].Name + case 2: + return fmt.Sprintf("%s and %s", ps[0].Name, ps[1].Name) + default: + for i, p := range ps { + if i == len(ps)-1 { + names = append(names, "and "+p.Name) + } else { + names = append(names, p.Name) + } + } + return strings.Join(names, ", ") + } +} + +func boxedBoolIfSet(msg string, v boxedBool) string { + switch v { + case boxedTrue: + return fmt.Sprintf(msg, "boxedTrue") + case boxedFalse: + return fmt.Sprintf(msg, "boxedFalse") + default: + return "" + } +} + +func stringIfSet(msg, v string) string { + if len(v) == 0 { + return "" + } + + return fmt.Sprintf(msg, v) +} + +func stringSliceIfSet(msg string, vs []string) string { + if len(vs) == 0 { + return "" + } + + names := []string{} + for _, v := range vs { + names = append(names, `"`+v+`"`) + } + + return fmt.Sprintf(msg, strings.Join(names, ",")) +} + +func endpointIsSet(v endpoint) bool { + return !reflect.DeepEqual(v, endpoint{}) +} + +func serviceSet(ps partitions) map[string]struct{} { + set := map[string]struct{}{} + for _, p := range ps { + for id := range p.Services { + set[id] = struct{}{} + } + } + + return set +} + +var funcMap = template.FuncMap{ + "ToSymbol": toSymbol, + "QuoteString": quoteString, + "RegionConst": regionConstName, + "PartitionGetter": partitionGetter, + "PartitionVarName": partitionVarName, + "ListPartitionNames": listPartitionNames, + "BoxedBoolIfSet": boxedBoolIfSet, + "StringIfSet": stringIfSet, + "StringSliceIfSet": stringSliceIfSet, + "EndpointIsSet": endpointIsSet, + "ServicesSet": serviceSet, +} + +const v3Tmpl = ` +{{ define "defaults" -}} +// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT. + +package endpoints + +import ( + "regexp" +) + + {{ template "partition consts" $.Resolver }} + + {{ range $_, $partition := $.Resolver }} + {{ template "partition region consts" $partition }} + {{ end }} + + {{ if not $.DisableGenerateServiceIDs -}} + {{ template "service consts" $.Resolver }} + {{- end }} + + {{ template "endpoint resolvers" $.Resolver }} +{{- end }} + +{{ define "partition consts" }} + // Partition identifiers + const ( + {{ range $_, $p := . -}} + {{ ToSymbol $p.ID }}PartitionID = {{ QuoteString $p.ID }} // {{ $p.Name }} partition. + {{ end -}} + ) +{{- end }} + +{{ define "partition region consts" }} + // {{ .Name }} partition's regions. + const ( + {{ range $id, $region := .Regions -}} + {{ ToSymbol $id }}RegionID = {{ QuoteString $id }} // {{ $region.Description }}. + {{ end -}} + ) +{{- end }} + +{{ define "service consts" }} + // Service identifiers + const ( + {{ $serviceSet := ServicesSet . -}} + {{ range $id, $_ := $serviceSet -}} + {{ ToSymbol $id }}ServiceID = {{ QuoteString $id }} // {{ ToSymbol $id }}. + {{ end -}} + ) +{{- end }} + +{{ define "endpoint resolvers" }} + // DefaultResolver returns an Endpoint resolver that will be able + // to resolve endpoints for: {{ ListPartitionNames . }}. + // + // Use DefaultPartitions() to get the list of the default partitions. + func DefaultResolver() Resolver { + return defaultPartitions + } + + // DefaultPartitions returns a list of the partitions the SDK is bundled + // with. The available partitions are: {{ ListPartitionNames . }}. + // + // partitions := endpoints.DefaultPartitions + // for _, p := range partitions { + // // ... inspect partitions + // } + func DefaultPartitions() []Partition { + return defaultPartitions.Partitions() + } + + var defaultPartitions = partitions{ + {{ range $_, $partition := . -}} + {{ PartitionVarName $partition.ID }}, + {{ end }} + } + + {{ range $_, $partition := . -}} + {{ $name := PartitionGetter $partition.ID -}} + // {{ $name }} returns the Resolver for {{ $partition.Name }}. + func {{ $name }}() Partition { + return {{ PartitionVarName $partition.ID }}.Partition() + } + var {{ PartitionVarName $partition.ID }} = {{ template "gocode Partition" $partition }} + {{ end }} +{{ end }} + +{{ define "default partitions" }} + func DefaultPartitions() []Partition { + return []partition{ + {{ range $_, $partition := . -}} + // {{ ToSymbol $partition.ID}}Partition(), + {{ end }} + } + } +{{ end }} + +{{ define "gocode Partition" -}} +partition{ + {{ StringIfSet "ID: %q,\n" .ID -}} + {{ StringIfSet "Name: %q,\n" .Name -}} + {{ StringIfSet "DNSSuffix: %q,\n" .DNSSuffix -}} + RegionRegex: {{ template "gocode RegionRegex" .RegionRegex }}, + {{ if EndpointIsSet .Defaults -}} + Defaults: {{ template "gocode Endpoint" .Defaults }}, + {{- end }} + Regions: {{ template "gocode Regions" .Regions }}, + Services: {{ template "gocode Services" .Services }}, +} +{{- end }} + +{{ define "gocode RegionRegex" -}} +regionRegex{ + Regexp: func() *regexp.Regexp{ + reg, _ := regexp.Compile({{ QuoteString .Regexp.String }}) + return reg + }(), +} +{{- end }} + +{{ define "gocode Regions" -}} +regions{ + {{ range $id, $region := . -}} + "{{ $id }}": {{ template "gocode Region" $region }}, + {{ end -}} +} +{{- end }} + +{{ define "gocode Region" -}} +region{ + {{ StringIfSet "Description: %q,\n" .Description -}} +} +{{- end }} + +{{ define "gocode Services" -}} +services{ + {{ range $id, $service := . -}} + "{{ $id }}": {{ template "gocode Service" $service }}, + {{ end }} +} +{{- end }} + +{{ define "gocode Service" -}} +service{ + {{ StringIfSet "PartitionEndpoint: %q,\n" .PartitionEndpoint -}} + {{ BoxedBoolIfSet "IsRegionalized: %s,\n" .IsRegionalized -}} + {{ if EndpointIsSet .Defaults -}} + Defaults: {{ template "gocode Endpoint" .Defaults -}}, + {{- end }} + {{ if .Endpoints -}} + Endpoints: {{ template "gocode Endpoints" .Endpoints }}, + {{- end }} +} +{{- end }} + +{{ define "gocode Endpoints" -}} +endpoints{ + {{ range $id, $endpoint := . -}} + "{{ $id }}": {{ template "gocode Endpoint" $endpoint }}, + {{ end }} +} +{{- end }} + +{{ define "gocode Endpoint" -}} +endpoint{ + {{ StringIfSet "Hostname: %q,\n" .Hostname -}} + {{ StringIfSet "SSLCommonName: %q,\n" .SSLCommonName -}} + {{ StringSliceIfSet "Protocols: []string{%s},\n" .Protocols -}} + {{ StringSliceIfSet "SignatureVersions: []string{%s},\n" .SignatureVersions -}} + {{ if or .CredentialScope.Region .CredentialScope.Service -}} + CredentialScope: credentialScope{ + {{ StringIfSet "Region: %q,\n" .CredentialScope.Region -}} + {{ StringIfSet "Service: %q,\n" .CredentialScope.Service -}} + }, + {{- end }} + {{ BoxedBoolIfSet "HasDualStack: %s,\n" .HasDualStack -}} + {{ StringIfSet "DualStackHostname: %q,\n" .DualStackHostname -}} + +} +{{- end }} +` diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/errors.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/errors.go new file mode 100644 index 00000000000..012fb487adb --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/errors.go @@ -0,0 +1,13 @@ +package aws + +import "github.com/IBM/ibm-cos-sdk-go/aws/awserr" + +var ( + // ErrMissingRegion is an error that is returned if region configuration is + // not found. + ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil) + + // ErrMissingEndpoint is an error that is returned if an endpoint cannot be + // resolved for a service. + ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil) +) diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/jsonvalue.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/jsonvalue.go new file mode 100644 index 00000000000..91a6f277a7e --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/jsonvalue.go @@ -0,0 +1,12 @@ +package aws + +// JSONValue is a representation of a grab bag type that will be marshaled +// into a json string. This type can be used just like any other map. +// +// Example: +// +// values := aws.JSONValue{ +// "Foo": "Bar", +// } +// values["Baz"] = "Qux" +type JSONValue map[string]interface{} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/logger.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/logger.go new file mode 100644 index 00000000000..6ed15b2ecc2 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/logger.go @@ -0,0 +1,118 @@ +package aws + +import ( + "log" + "os" +) + +// A LogLevelType defines the level logging should be performed at. Used to instruct +// the SDK which statements should be logged. +type LogLevelType uint + +// LogLevel returns the pointer to a LogLevel. Should be used to workaround +// not being able to take the address of a non-composite literal. +func LogLevel(l LogLevelType) *LogLevelType { + return &l +} + +// Value returns the LogLevel value or the default value LogOff if the LogLevel +// is nil. Safe to use on nil value LogLevelTypes. +func (l *LogLevelType) Value() LogLevelType { + if l != nil { + return *l + } + return LogOff +} + +// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be +// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If +// LogLevel is nil, will default to LogOff comparison. +func (l *LogLevelType) Matches(v LogLevelType) bool { + c := l.Value() + return c&v == v +} + +// AtLeast returns true if this LogLevel is at least high enough to satisfies v. +// Is safe to use on nil value LogLevelTypes. If LogLevel is nil, will default +// to LogOff comparison. +func (l *LogLevelType) AtLeast(v LogLevelType) bool { + c := l.Value() + return c >= v +} + +const ( + // LogOff states that no logging should be performed by the SDK. This is the + // default state of the SDK, and should be use to disable all logging. + LogOff LogLevelType = iota * 0x1000 + + // LogDebug state that debug output should be logged by the SDK. This should + // be used to inspect request made and responses received. + LogDebug +) + +// Debug Logging Sub Levels +const ( + // LogDebugWithSigning states that the SDK should log request signing and + // presigning events. This should be used to log the signing details of + // requests for debugging. Will also enable LogDebug. + LogDebugWithSigning LogLevelType = LogDebug | (1 << iota) + + // LogDebugWithHTTPBody states the SDK should log HTTP request and response + // HTTP bodys in addition to the headers and path. This should be used to + // see the body content of requests and responses made while using the SDK + // Will also enable LogDebug. + LogDebugWithHTTPBody + + // LogDebugWithRequestRetries states the SDK should log when service requests will + // be retried. This should be used to log when you want to log when service + // requests are being retried. Will also enable LogDebug. + LogDebugWithRequestRetries + + // LogDebugWithRequestErrors states the SDK should log when service requests fail + // to build, send, validate, or unmarshal. + LogDebugWithRequestErrors + + // LogDebugWithEventStreamBody states the SDK should log EventStream + // request and response bodys. This should be used to log the EventStream + // wire unmarshaled message content of requests and responses made while + // using the SDK Will also enable LogDebug. + LogDebugWithEventStreamBody +) + +// A Logger is a minimalistic interface for the SDK to log messages to. Should +// be used to provide custom logging writers for the SDK to use. +type Logger interface { + Log(...interface{}) +} + +// A LoggerFunc is a convenience type to convert a function taking a variadic +// list of arguments and wrap it so the Logger interface can be used. +// +// Example: +// s3.New(sess, &aws.Config{Logger: aws.LoggerFunc(func(args ...interface{}) { +// fmt.Fprintln(os.Stdout, args...) +// })}) +type LoggerFunc func(...interface{}) + +// Log calls the wrapped function with the arguments provided +func (f LoggerFunc) Log(args ...interface{}) { + f(args...) +} + +// NewDefaultLogger returns a Logger which will write log messages to stdout, and +// use same formatting runes as the stdlib log.Logger +func NewDefaultLogger() Logger { + return &defaultLogger{ + logger: log.New(os.Stdout, "", log.LstdFlags), + } +} + +// A defaultLogger provides a minimalistic logger satisfying the Logger interface. +type defaultLogger struct { + logger *log.Logger +} + +// Log logs the parameters to the stdlib logger. See log.Println. +func (l defaultLogger) Log(args ...interface{}) { + l.logger.Println(args...) +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/connection_reset_error.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/connection_reset_error.go new file mode 100644 index 00000000000..2ba3c56c11f --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/connection_reset_error.go @@ -0,0 +1,19 @@ +package request + +import ( + "strings" +) + +func isErrConnectionReset(err error) bool { + if strings.Contains(err.Error(), "read: connection reset") { + return false + } + + if strings.Contains(err.Error(), "use of closed network connection") || + strings.Contains(err.Error(), "connection reset") || + strings.Contains(err.Error(), "broken pipe") { + return true + } + + return false +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/handlers.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/handlers.go new file mode 100644 index 00000000000..e819ab6c0e8 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/handlers.go @@ -0,0 +1,343 @@ +package request + +import ( + "fmt" + "strings" +) + +// A Handlers provides a collection of request handlers for various +// stages of handling requests. +type Handlers struct { + Validate HandlerList + Build HandlerList + BuildStream HandlerList + Sign HandlerList + Send HandlerList + ValidateResponse HandlerList + Unmarshal HandlerList + UnmarshalStream HandlerList + UnmarshalMeta HandlerList + UnmarshalError HandlerList + Retry HandlerList + AfterRetry HandlerList + CompleteAttempt HandlerList + Complete HandlerList +} + +// Copy returns a copy of this handler's lists. +func (h *Handlers) Copy() Handlers { + return Handlers{ + Validate: h.Validate.copy(), + Build: h.Build.copy(), + BuildStream: h.BuildStream.copy(), + Sign: h.Sign.copy(), + Send: h.Send.copy(), + ValidateResponse: h.ValidateResponse.copy(), + Unmarshal: h.Unmarshal.copy(), + UnmarshalStream: h.UnmarshalStream.copy(), + UnmarshalError: h.UnmarshalError.copy(), + UnmarshalMeta: h.UnmarshalMeta.copy(), + Retry: h.Retry.copy(), + AfterRetry: h.AfterRetry.copy(), + CompleteAttempt: h.CompleteAttempt.copy(), + Complete: h.Complete.copy(), + } +} + +// Clear removes callback functions for all handlers. +func (h *Handlers) Clear() { + h.Validate.Clear() + h.Build.Clear() + h.BuildStream.Clear() + h.Send.Clear() + h.Sign.Clear() + h.Unmarshal.Clear() + h.UnmarshalStream.Clear() + h.UnmarshalMeta.Clear() + h.UnmarshalError.Clear() + h.ValidateResponse.Clear() + h.Retry.Clear() + h.AfterRetry.Clear() + h.CompleteAttempt.Clear() + h.Complete.Clear() +} + +// IsEmpty returns if there are no handlers in any of the handlerlists. +func (h *Handlers) IsEmpty() bool { + if h.Validate.Len() != 0 { + return false + } + if h.Build.Len() != 0 { + return false + } + if h.BuildStream.Len() != 0 { + return false + } + if h.Send.Len() != 0 { + return false + } + if h.Sign.Len() != 0 { + return false + } + if h.Unmarshal.Len() != 0 { + return false + } + if h.UnmarshalStream.Len() != 0 { + return false + } + if h.UnmarshalMeta.Len() != 0 { + return false + } + if h.UnmarshalError.Len() != 0 { + return false + } + if h.ValidateResponse.Len() != 0 { + return false + } + if h.Retry.Len() != 0 { + return false + } + if h.AfterRetry.Len() != 0 { + return false + } + if h.CompleteAttempt.Len() != 0 { + return false + } + if h.Complete.Len() != 0 { + return false + } + + return true +} + +// A HandlerListRunItem represents an entry in the HandlerList which +// is being run. +type HandlerListRunItem struct { + Index int + Handler NamedHandler + Request *Request +} + +// A HandlerList manages zero or more handlers in a list. +type HandlerList struct { + list []NamedHandler + + // Called after each request handler in the list is called. If set + // and the func returns true the HandlerList will continue to iterate + // over the request handlers. If false is returned the HandlerList + // will stop iterating. + // + // Should be used if extra logic to be performed between each handler + // in the list. This can be used to terminate a list's iteration + // based on a condition such as error like, HandlerListStopOnError. + // Or for logging like HandlerListLogItem. + AfterEachFn func(item HandlerListRunItem) bool +} + +// A NamedHandler is a struct that contains a name and function callback. +type NamedHandler struct { + Name string + Fn func(*Request) +} + +// copy creates a copy of the handler list. +func (l *HandlerList) copy() HandlerList { + n := HandlerList{ + AfterEachFn: l.AfterEachFn, + } + if len(l.list) == 0 { + return n + } + + n.list = append(make([]NamedHandler, 0, len(l.list)), l.list...) + return n +} + +// Clear clears the handler list. +func (l *HandlerList) Clear() { + l.list = l.list[0:0] +} + +// Len returns the number of handlers in the list. +func (l *HandlerList) Len() int { + return len(l.list) +} + +// PushBack pushes handler f to the back of the handler list. +func (l *HandlerList) PushBack(f func(*Request)) { + l.PushBackNamed(NamedHandler{"__anonymous", f}) +} + +// PushBackNamed pushes named handler f to the back of the handler list. +func (l *HandlerList) PushBackNamed(n NamedHandler) { + if cap(l.list) == 0 { + l.list = make([]NamedHandler, 0, 5) + } + l.list = append(l.list, n) +} + +// PushFront pushes handler f to the front of the handler list. +func (l *HandlerList) PushFront(f func(*Request)) { + l.PushFrontNamed(NamedHandler{"__anonymous", f}) +} + +// PushFrontNamed pushes named handler f to the front of the handler list. +func (l *HandlerList) PushFrontNamed(n NamedHandler) { + if cap(l.list) == len(l.list) { + // Allocating new list required + l.list = append([]NamedHandler{n}, l.list...) + } else { + // Enough room to prepend into list. + l.list = append(l.list, NamedHandler{}) + copy(l.list[1:], l.list) + l.list[0] = n + } +} + +// Remove removes a NamedHandler n +func (l *HandlerList) Remove(n NamedHandler) { + l.RemoveByName(n.Name) +} + +// RemoveByName removes a NamedHandler by name. +func (l *HandlerList) RemoveByName(name string) { + for i := 0; i < len(l.list); i++ { + m := l.list[i] + if m.Name == name { + // Shift array preventing creating new arrays + copy(l.list[i:], l.list[i+1:]) + l.list[len(l.list)-1] = NamedHandler{} + l.list = l.list[:len(l.list)-1] + + // decrement list so next check to length is correct + i-- + } + } +} + +// SwapNamed will swap out any existing handlers with the same name as the +// passed in NamedHandler returning true if handlers were swapped. False is +// returned otherwise. +func (l *HandlerList) SwapNamed(n NamedHandler) (swapped bool) { + for i := 0; i < len(l.list); i++ { + if l.list[i].Name == n.Name { + l.list[i].Fn = n.Fn + swapped = true + } + } + + return swapped +} + +// Swap will swap out all handlers matching the name passed in. The matched +// handlers will be swapped in. True is returned if the handlers were swapped. +func (l *HandlerList) Swap(name string, replace NamedHandler) bool { + var swapped bool + + for i := 0; i < len(l.list); i++ { + if l.list[i].Name == name { + l.list[i] = replace + swapped = true + } + } + + return swapped +} + +// SetBackNamed will replace the named handler if it exists in the handler list. +// If the handler does not exist the handler will be added to the end of the list. +func (l *HandlerList) SetBackNamed(n NamedHandler) { + if !l.SwapNamed(n) { + l.PushBackNamed(n) + } +} + +// SetFrontNamed will replace the named handler if it exists in the handler list. +// If the handler does not exist the handler will be added to the beginning of +// the list. +func (l *HandlerList) SetFrontNamed(n NamedHandler) { + if !l.SwapNamed(n) { + l.PushFrontNamed(n) + } +} + +// Run executes all handlers in the list with a given request object. +func (l *HandlerList) Run(r *Request) { + for i, h := range l.list { + h.Fn(r) + item := HandlerListRunItem{ + Index: i, Handler: h, Request: r, + } + if l.AfterEachFn != nil && !l.AfterEachFn(item) { + return + } + } +} + +// HandlerListLogItem logs the request handler and the state of the +// request's Error value. Always returns true to continue iterating +// request handlers in a HandlerList. +func HandlerListLogItem(item HandlerListRunItem) bool { + if item.Request.Config.Logger == nil { + return true + } + item.Request.Config.Logger.Log("DEBUG: RequestHandler", + item.Index, item.Handler.Name, item.Request.Error) + + return true +} + +// HandlerListStopOnError returns false to stop the HandlerList iterating +// over request handlers if Request.Error is not nil. True otherwise +// to continue iterating. +func HandlerListStopOnError(item HandlerListRunItem) bool { + return item.Request.Error == nil +} + +// WithAppendUserAgent will add a string to the user agent prefixed with a +// single white space. +func WithAppendUserAgent(s string) Option { + return func(r *Request) { + r.Handlers.Build.PushBack(func(r2 *Request) { + AddToUserAgent(r, s) + }) + } +} + +// MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request +// header. If the extra parameters are provided they will be added as metadata to the +// name/version pair resulting in the following format. +// "name/version (extra0; extra1; ...)" +// The user agent part will be concatenated with this current request's user agent string. +func MakeAddToUserAgentHandler(name, version string, extra ...string) func(*Request) { + ua := fmt.Sprintf("%s/%s", name, version) + if len(extra) > 0 { + ua += fmt.Sprintf(" (%s)", strings.Join(extra, "; ")) + } + return func(r *Request) { + AddToUserAgent(r, ua) + } +} + +// MakeAddToUserAgentFreeFormHandler adds the input to the User-Agent request header. +// The input string will be concatenated with the current request's user agent string. +func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) { + return func(r *Request) { + AddToUserAgent(r, s) + } +} + +// WithSetRequestHeaders updates the operation request's HTTP header to contain +// the header key value pairs provided. If the header key already exists in the +// request's HTTP header set, the existing value(s) will be replaced. +func WithSetRequestHeaders(h map[string]string) Option { + return withRequestHeader(h).SetRequestHeaders +} + +type withRequestHeader map[string]string + +func (h withRequestHeader) SetRequestHeaders(r *Request) { + for k, v := range h { + r.HTTPRequest.Header[k] = []string{v} + } +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/http_request.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/http_request.go new file mode 100644 index 00000000000..79f79602b03 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/http_request.go @@ -0,0 +1,24 @@ +package request + +import ( + "io" + "net/http" + "net/url" +) + +func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request { + req := new(http.Request) + *req = *r + req.URL = &url.URL{} + *req.URL = *r.URL + req.Body = body + + req.Header = http.Header{} + for k, v := range r.Header { + for _, vv := range v { + req.Header.Add(k, vv) + } + } + + return req +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/offset_reader.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/offset_reader.go new file mode 100644 index 00000000000..d6c80a4291b --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/offset_reader.go @@ -0,0 +1,65 @@ +package request + +import ( + "io" + "sync" + + "github.com/IBM/ibm-cos-sdk-go/internal/sdkio" +) + +// offsetReader is a thread-safe io.ReadCloser to prevent racing +// with retrying requests +type offsetReader struct { + buf io.ReadSeeker + lock sync.Mutex + closed bool +} + +func newOffsetReader(buf io.ReadSeeker, offset int64) (*offsetReader, error) { + reader := &offsetReader{} + _, err := buf.Seek(offset, sdkio.SeekStart) + if err != nil { + return nil, err + } + + reader.buf = buf + return reader, nil +} + +// Close will close the instance of the offset reader's access to +// the underlying io.ReadSeeker. +func (o *offsetReader) Close() error { + o.lock.Lock() + defer o.lock.Unlock() + o.closed = true + return nil +} + +// Read is a thread-safe read of the underlying io.ReadSeeker +func (o *offsetReader) Read(p []byte) (int, error) { + o.lock.Lock() + defer o.lock.Unlock() + + if o.closed { + return 0, io.EOF + } + + return o.buf.Read(p) +} + +// Seek is a thread-safe seeking operation. +func (o *offsetReader) Seek(offset int64, whence int) (int64, error) { + o.lock.Lock() + defer o.lock.Unlock() + + return o.buf.Seek(offset, whence) +} + +// CloseAndCopy will return a new offsetReader with a copy of the old buffer +// and close the old buffer. +func (o *offsetReader) CloseAndCopy(offset int64) (*offsetReader, error) { + if err := o.Close(); err != nil { + return nil, err + } + return newOffsetReader(o.buf, offset) +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/request.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/request.go new file mode 100644 index 00000000000..537dc87c2e9 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/request.go @@ -0,0 +1,698 @@ +package request + +import ( + "bytes" + "fmt" + "io" + "net/http" + "net/url" + "reflect" + "strings" + "time" + + "github.com/IBM/ibm-cos-sdk-go/aws" + "github.com/IBM/ibm-cos-sdk-go/aws/awserr" + "github.com/IBM/ibm-cos-sdk-go/aws/client/metadata" + "github.com/IBM/ibm-cos-sdk-go/internal/sdkio" +) + +const ( + // ErrCodeSerialization is the serialization error code that is received + // during protocol unmarshaling. + ErrCodeSerialization = "SerializationError" + + // ErrCodeRead is an error that is returned during HTTP reads. + ErrCodeRead = "ReadError" + + // ErrCodeResponseTimeout is the connection timeout error that is received + // during body reads. + ErrCodeResponseTimeout = "ResponseTimeout" + + // ErrCodeInvalidPresignExpire is returned when the expire time provided to + // presign is invalid + ErrCodeInvalidPresignExpire = "InvalidPresignExpireError" + + // CanceledErrorCode is the error code that will be returned by an + // API request that was canceled. Requests given a aws.Context may + // return this error when canceled. + CanceledErrorCode = "RequestCanceled" + + // ErrCodeRequestError is an error preventing the SDK from continuing to + // process the request. + ErrCodeRequestError = "RequestError" +) + +// A Request is the service request to be made. +type Request struct { + Config aws.Config + ClientInfo metadata.ClientInfo + Handlers Handlers + + Retryer + AttemptTime time.Time + Time time.Time + Operation *Operation + HTTPRequest *http.Request + HTTPResponse *http.Response + Body io.ReadSeeker + streamingBody io.ReadCloser + BodyStart int64 // offset from beginning of Body that the request body starts + Params interface{} + Error error + Data interface{} + RequestID string + RetryCount int + Retryable *bool + RetryDelay time.Duration + NotHoist bool + SignedHeaderVals http.Header + LastSignedAt time.Time + DisableFollowRedirects bool + + // Additional API error codes that should be retried. IsErrorRetryable + // will consider these codes in addition to its built in cases. + RetryErrorCodes []string + + // Additional API error codes that should be retried with throttle backoff + // delay. IsErrorThrottle will consider these codes in addition to its + // built in cases. + ThrottleErrorCodes []string + + // A value greater than 0 instructs the request to be signed as Presigned URL + // You should not set this field directly. Instead use Request's + // Presign or PresignRequest methods. + ExpireTime time.Duration + + context aws.Context + + built bool + + // Need to persist an intermediate body between the input Body and HTTP + // request body because the HTTP Client's transport can maintain a reference + // to the HTTP request's body after the client has returned. This value is + // safe to use concurrently and wrap the input Body for each HTTP request. + safeBody *offsetReader +} + +// An Operation is the service API operation to be made. +type Operation struct { + Name string + HTTPMethod string + HTTPPath string + *Paginator + + BeforePresignFn func(r *Request) error +} + +// New returns a new Request pointer for the service API operation and +// parameters. +// +// A Retryer should be provided to direct how the request is retried. If +// Retryer is nil, a default no retry value will be used. You can use +// NoOpRetryer in the Client package to disable retry behavior directly. +// +// Params is any value of input parameters to be the request payload. +// Data is pointer value to an object which the request's response +// payload will be deserialized to. +func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, + retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request { + + if retryer == nil { + retryer = noOpRetryer{} + } + + method := operation.HTTPMethod + if method == "" { + method = "POST" + } + + httpReq, _ := http.NewRequest(method, "", nil) + + var err error + httpReq.URL, err = url.Parse(clientInfo.Endpoint + operation.HTTPPath) + if err != nil { + httpReq.URL = &url.URL{} + err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err) + } + + r := &Request{ + Config: cfg, + ClientInfo: clientInfo, + Handlers: handlers.Copy(), + + Retryer: retryer, + Time: time.Now(), + ExpireTime: 0, + Operation: operation, + HTTPRequest: httpReq, + Body: nil, + Params: params, + Error: err, + Data: data, + } + r.SetBufferBody([]byte{}) + + return r +} + +// A Option is a functional option that can augment or modify a request when +// using a WithContext API operation method. +type Option func(*Request) + +// WithGetResponseHeader builds a request Option which will retrieve a single +// header value from the HTTP Response. If there are multiple values for the +// header key use WithGetResponseHeaders instead to access the http.Header +// map directly. The passed in val pointer must be non-nil. +// +// This Option can be used multiple times with a single API operation. +// +// var id2, versionID string +// svc.PutObjectWithContext(ctx, params, +// request.WithGetResponseHeader("x-amz-id-2", &id2), +// request.WithGetResponseHeader("x-amz-version-id", &versionID), +// ) +func WithGetResponseHeader(key string, val *string) Option { + return func(r *Request) { + r.Handlers.Complete.PushBack(func(req *Request) { + *val = req.HTTPResponse.Header.Get(key) + }) + } +} + +// WithGetResponseHeaders builds a request Option which will retrieve the +// headers from the HTTP response and assign them to the passed in headers +// variable. The passed in headers pointer must be non-nil. +// +// var headers http.Header +// svc.PutObjectWithContext(ctx, params, request.WithGetResponseHeaders(&headers)) +func WithGetResponseHeaders(headers *http.Header) Option { + return func(r *Request) { + r.Handlers.Complete.PushBack(func(req *Request) { + *headers = req.HTTPResponse.Header + }) + } +} + +// WithLogLevel is a request option that will set the request to use a specific +// log level when the request is made. +// +// svc.PutObjectWithContext(ctx, params, request.WithLogLevel(aws.LogDebugWithHTTPBody) +func WithLogLevel(l aws.LogLevelType) Option { + return func(r *Request) { + r.Config.LogLevel = aws.LogLevel(l) + } +} + +// ApplyOptions will apply each option to the request calling them in the order +// the were provided. +func (r *Request) ApplyOptions(opts ...Option) { + for _, opt := range opts { + opt(r) + } +} + +// Context will always returns a non-nil context. If Request does not have a +// context aws.BackgroundContext will be returned. +func (r *Request) Context() aws.Context { + if r.context != nil { + return r.context + } + return aws.BackgroundContext() +} + +// SetContext adds a Context to the current request that can be used to cancel +// a in-flight request. The Context value must not be nil, or this method will +// panic. +// +// Unlike http.Request.WithContext, SetContext does not return a copy of the +// Request. It is not safe to use use a single Request value for multiple +// requests. A new Request should be created for each API operation request. +// +// Go 1.6 and below: +// The http.Request's Cancel field will be set to the Done() value of +// the context. This will overwrite the Cancel field's value. +// +// Go 1.7 and above: +// The http.Request.WithContext will be used to set the context on the underlying +// http.Request. This will create a shallow copy of the http.Request. The SDK +// may create sub contexts in the future for nested requests such as retries. +func (r *Request) SetContext(ctx aws.Context) { + if ctx == nil { + panic("context cannot be nil") + } + setRequestContext(r, ctx) +} + +// WillRetry returns if the request's can be retried. +func (r *Request) WillRetry() bool { + if !aws.IsReaderSeekable(r.Body) && r.HTTPRequest.Body != NoBody { + return false + } + return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries() +} + +func fmtAttemptCount(retryCount, maxRetries int) string { + return fmt.Sprintf("attempt %v/%v", retryCount, maxRetries) +} + +// ParamsFilled returns if the request's parameters have been populated +// and the parameters are valid. False is returned if no parameters are +// provided or invalid. +func (r *Request) ParamsFilled() bool { + return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid() +} + +// DataFilled returns true if the request's data for response deserialization +// target has been set and is a valid. False is returned if data is not +// set, or is invalid. +func (r *Request) DataFilled() bool { + return r.Data != nil && reflect.ValueOf(r.Data).Elem().IsValid() +} + +// SetBufferBody will set the request's body bytes that will be sent to +// the service API. +func (r *Request) SetBufferBody(buf []byte) { + r.SetReaderBody(bytes.NewReader(buf)) +} + +// SetStringBody sets the body of the request to be backed by a string. +func (r *Request) SetStringBody(s string) { + r.SetReaderBody(strings.NewReader(s)) +} + +// SetReaderBody will set the request's body reader. +func (r *Request) SetReaderBody(reader io.ReadSeeker) { + r.Body = reader + + if aws.IsReaderSeekable(reader) { + var err error + // Get the Bodies current offset so retries will start from the same + // initial position. + r.BodyStart, err = reader.Seek(0, sdkio.SeekCurrent) + if err != nil { + r.Error = awserr.New(ErrCodeSerialization, + "failed to determine start of request body", err) + return + } + } + r.ResetBody() +} + +// SetStreamingBody set the reader to be used for the request that will stream +// bytes to the server. Request's Body must not be set to any reader. +func (r *Request) SetStreamingBody(reader io.ReadCloser) { + r.streamingBody = reader + r.SetReaderBody(aws.ReadSeekCloser(reader)) +} + +// Presign returns the request's signed URL. Error will be returned +// if the signing fails. The expire parameter is only used for presigned Amazon +// S3 API requests. All other AWS services will use a fixed expiration +// time of 15 minutes. +// +// It is invalid to create a presigned URL with a expire duration 0 or less. An +// error is returned if expire duration is 0 or less. +func (r *Request) Presign(expire time.Duration) (string, error) { + r = r.copy() + + // Presign requires all headers be hoisted. There is no way to retrieve + // the signed headers not hoisted without this. Making the presigned URL + // useless. + r.NotHoist = false + + u, _, err := getPresignedURL(r, expire) + return u, err +} + +// PresignRequest behaves just like presign, with the addition of returning a +// set of headers that were signed. The expire parameter is only used for +// presigned Amazon S3 API requests. All other AWS services will use a fixed +// expiration time of 15 minutes. +// +// It is invalid to create a presigned URL with a expire duration 0 or less. An +// error is returned if expire duration is 0 or less. +// +// Returns the URL string for the API operation with signature in the query string, +// and the HTTP headers that were included in the signature. These headers must +// be included in any HTTP request made with the presigned URL. +// +// To prevent hoisting any headers to the query string set NotHoist to true on +// this Request value prior to calling PresignRequest. +func (r *Request) PresignRequest(expire time.Duration) (string, http.Header, error) { + r = r.copy() + return getPresignedURL(r, expire) +} + +// IsPresigned returns true if the request represents a presigned API url. +func (r *Request) IsPresigned() bool { + return r.ExpireTime != 0 +} + +func getPresignedURL(r *Request, expire time.Duration) (string, http.Header, error) { + if expire <= 0 { + return "", nil, awserr.New( + ErrCodeInvalidPresignExpire, + "presigned URL requires an expire duration greater than 0", + nil, + ) + } + + r.ExpireTime = expire + + if r.Operation.BeforePresignFn != nil { + if err := r.Operation.BeforePresignFn(r); err != nil { + return "", nil, err + } + } + + if err := r.Sign(); err != nil { + return "", nil, err + } + + return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil +} + +const ( + notRetrying = "not retrying" +) + +func debugLogReqError(r *Request, stage, retryStr string, err error) { + if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) { + return + } + + r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v", + stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err)) +} + +// Build will build the request's object so it can be signed and sent +// to the service. Build will also validate all the request's parameters. +// Any additional build Handlers set on this request will be run +// in the order they were set. +// +// The request will only be built once. Multiple calls to build will have +// no effect. +// +// If any Validate or Build errors occur the build will stop and the error +// which occurred will be returned. +func (r *Request) Build() error { + if !r.built { + r.Handlers.Validate.Run(r) + if r.Error != nil { + debugLogReqError(r, "Validate Request", notRetrying, r.Error) + return r.Error + } + r.Handlers.Build.Run(r) + if r.Error != nil { + debugLogReqError(r, "Build Request", notRetrying, r.Error) + return r.Error + } + r.built = true + } + + return r.Error +} + +// Sign will sign the request, returning error if errors are encountered. +// +// Sign will build the request prior to signing. All Sign Handlers will +// be executed in the order they were set. +func (r *Request) Sign() error { + r.Build() + if r.Error != nil { + debugLogReqError(r, "Build Request", notRetrying, r.Error) + return r.Error + } + + SanitizeHostForHeader(r.HTTPRequest) + + r.Handlers.Sign.Run(r) + return r.Error +} + +func (r *Request) getNextRequestBody() (body io.ReadCloser, err error) { + if r.streamingBody != nil { + return r.streamingBody, nil + } + + if r.safeBody != nil { + r.safeBody.Close() + } + + r.safeBody, err = newOffsetReader(r.Body, r.BodyStart) + if err != nil { + return nil, awserr.New(ErrCodeSerialization, + "failed to get next request body reader", err) + } + + // Go 1.8 tightened and clarified the rules code needs to use when building + // requests with the http package. Go 1.8 removed the automatic detection + // of if the Request.Body was empty, or actually had bytes in it. The SDK + // always sets the Request.Body even if it is empty and should not actually + // be sent. This is incorrect. + // + // Go 1.8 did add a http.NoBody value that the SDK can use to tell the http + // client that the request really should be sent without a body. The + // Request.Body cannot be set to nil, which is preferable, because the + // field is exported and could introduce nil pointer dereferences for users + // of the SDK if they used that field. + // + // Related golang/go#18257 + l, err := aws.SeekerLen(r.Body) + if err != nil { + return nil, awserr.New(ErrCodeSerialization, + "failed to compute request body size", err) + } + + if l == 0 { + body = NoBody + } else if l > 0 { + body = r.safeBody + } else { + // Hack to prevent sending bodies for methods where the body + // should be ignored by the server. Sending bodies on these + // methods without an associated ContentLength will cause the + // request to socket timeout because the server does not handle + // Transfer-Encoding: chunked bodies for these methods. + // + // This would only happen if a aws.ReaderSeekerCloser was used with + // a io.Reader that was not also an io.Seeker, or did not implement + // Len() method. + switch r.Operation.HTTPMethod { + case "GET", "HEAD", "DELETE": + body = NoBody + default: + body = r.safeBody + } + } + + return body, nil +} + +// GetBody will return an io.ReadSeeker of the Request's underlying +// input body with a concurrency safe wrapper. +func (r *Request) GetBody() io.ReadSeeker { + return r.safeBody +} + +// Send will send the request, returning error if errors are encountered. +// +// Send will sign the request prior to sending. All Send Handlers will +// be executed in the order they were set. +// +// Canceling a request is non-deterministic. If a request has been canceled, +// then the transport will choose, randomly, one of the state channels during +// reads or getting the connection. +// +// readLoop() and getConn(req *Request, cm connectMethod) +// https://github.com/golang/go/blob/master/src/net/http/transport.go +// +// Send will not close the request.Request's body. +func (r *Request) Send() error { + defer func() { + // Regardless of success or failure of the request trigger the Complete + // request handlers. + r.Handlers.Complete.Run(r) + }() + + if err := r.Error; err != nil { + return err + } + + for { + r.Error = nil + r.AttemptTime = time.Now() + + if err := r.Sign(); err != nil { + debugLogReqError(r, "Sign Request", notRetrying, err) + return err + } + + if err := r.sendRequest(); err == nil { + return nil + } + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + + if r.Error != nil || !aws.BoolValue(r.Retryable) { + return r.Error + } + + if err := r.prepareRetry(); err != nil { + r.Error = err + return err + } + } +} + +func (r *Request) prepareRetry() error { + if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) { + r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d", + r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount)) + } + + // The previous http.Request will have a reference to the r.Body + // and the HTTP Client's Transport may still be reading from + // the request's body even though the Client's Do returned. + r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, nil) + r.ResetBody() + if err := r.Error; err != nil { + return awserr.New(ErrCodeSerialization, + "failed to prepare body for retry", err) + + } + + // Closing response body to ensure that no response body is leaked + // between retry attempts. + if r.HTTPResponse != nil && r.HTTPResponse.Body != nil { + r.HTTPResponse.Body.Close() + } + + return nil +} + +func (r *Request) sendRequest() (sendErr error) { + defer r.Handlers.CompleteAttempt.Run(r) + + r.Retryable = nil + r.Handlers.Send.Run(r) + if r.Error != nil { + debugLogReqError(r, "Send Request", + fmtAttemptCount(r.RetryCount, r.MaxRetries()), + r.Error) + return r.Error + } + + r.Handlers.UnmarshalMeta.Run(r) + r.Handlers.ValidateResponse.Run(r) + if r.Error != nil { + r.Handlers.UnmarshalError.Run(r) + debugLogReqError(r, "Validate Response", + fmtAttemptCount(r.RetryCount, r.MaxRetries()), + r.Error) + return r.Error + } + + r.Handlers.Unmarshal.Run(r) + if r.Error != nil { + debugLogReqError(r, "Unmarshal Response", + fmtAttemptCount(r.RetryCount, r.MaxRetries()), + r.Error) + return r.Error + } + + return nil +} + +// copy will copy a request which will allow for local manipulation of the +// request. +func (r *Request) copy() *Request { + req := &Request{} + *req = *r + req.Handlers = r.Handlers.Copy() + op := *r.Operation + req.Operation = &op + return req +} + +// AddToUserAgent adds the string to the end of the request's current user agent. +func AddToUserAgent(r *Request, s string) { + curUA := r.HTTPRequest.Header.Get("User-Agent") + if len(curUA) > 0 { + s = curUA + " " + s + } + r.HTTPRequest.Header.Set("User-Agent", s) +} + +// SanitizeHostForHeader removes default port from host and updates request.Host +func SanitizeHostForHeader(r *http.Request) { + host := getHost(r) + port := portOnly(host) + if port != "" && isDefaultPort(r.URL.Scheme, port) { + r.Host = stripPort(host) + } +} + +// Returns host from request +func getHost(r *http.Request) string { + if r.Host != "" { + return r.Host + } + + if r.URL == nil { + return "" + } + + return r.URL.Host +} + +// Hostname returns u.Host, without any port number. +// +// If Host is an IPv6 literal with a port number, Hostname returns the +// IPv6 literal without the square brackets. IPv6 literals may include +// a zone identifier. +// +// Copied from the Go 1.8 standard library (net/url) +func stripPort(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return hostport + } + if i := strings.IndexByte(hostport, ']'); i != -1 { + return strings.TrimPrefix(hostport[:i], "[") + } + return hostport[:colon] +} + +// Port returns the port part of u.Host, without the leading colon. +// If u.Host doesn't contain a port, Port returns an empty string. +// +// Copied from the Go 1.8 standard library (net/url) +func portOnly(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return "" + } + if i := strings.Index(hostport, "]:"); i != -1 { + return hostport[i+len("]:"):] + } + if strings.Contains(hostport, "]") { + return "" + } + return hostport[colon+len(":"):] +} + +// Returns true if the specified URI is using the standard port +// (i.e. port 80 for HTTP URIs or 443 for HTTPS URIs) +func isDefaultPort(scheme, port string) bool { + if port == "" { + return true + } + + lowerCaseScheme := strings.ToLower(scheme) + if (lowerCaseScheme == "http" && port == "80") || (lowerCaseScheme == "https" && port == "443") { + return true + } + + return false +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/request_1_8.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/request_1_8.go new file mode 100644 index 00000000000..1e0fc174334 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/request_1_8.go @@ -0,0 +1,36 @@ +// +build go1.8 + +package request + +import ( + "net/http" + + "github.com/IBM/ibm-cos-sdk-go/aws/awserr" +) + +// NoBody is a http.NoBody reader instructing Go HTTP client to not include +// and body in the HTTP request. +var NoBody = http.NoBody + +// ResetBody rewinds the request body back to its starting position, and +// sets the HTTP Request body reference. When the body is read prior +// to being sent in the HTTP request it will need to be rewound. +// +// ResetBody will automatically be called by the SDK's build handler, but if +// the request is being used directly ResetBody must be called before the request +// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically +// call ResetBody. +// +// Will also set the Go 1.8's http.Request.GetBody member to allow retrying +// PUT/POST redirects. +func (r *Request) ResetBody() { + body, err := r.getNextRequestBody() + if err != nil { + r.Error = awserr.New(ErrCodeSerialization, + "failed to reset request body", err) + return + } + + r.HTTPRequest.Body = body + r.HTTPRequest.GetBody = r.getNextRequestBody +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/request_context.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/request_context.go new file mode 100644 index 00000000000..1351c687026 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/request_context.go @@ -0,0 +1,14 @@ +// +build go1.7 + +package request + +import "github.com/IBM/ibm-cos-sdk-go/aws" + +// setContext updates the Request to use the passed in context for cancellation. +// Context will also be used for request retry delay. +// +// Creates shallow copy of the http.Request with the WithContext method. +func setRequestContext(r *Request, ctx aws.Context) { + r.context = ctx + r.HTTPRequest = r.HTTPRequest.WithContext(ctx) +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/request_pagination.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/request_pagination.go new file mode 100644 index 00000000000..cbe280a0cd1 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/request_pagination.go @@ -0,0 +1,266 @@ +package request + +import ( + "reflect" + "sync/atomic" + + "github.com/IBM/ibm-cos-sdk-go/aws" + "github.com/IBM/ibm-cos-sdk-go/aws/awsutil" +) + +// A Pagination provides paginating of SDK API operations which are paginatable. +// Generally you should not use this type directly, but use the "Pages" API +// operations method to automatically perform pagination for you. Such as, +// "S3.ListObjectsPages", and "S3.ListObjectsPagesWithContext" methods. +// +// Pagination differs from a Paginator type in that pagination is the type that +// does the pagination between API operations, and Paginator defines the +// configuration that will be used per page request. +// +// for p.Next() { +// data := p.Page().(*s3.ListObjectsOutput) +// // process the page's data +// // ... +// // break out of loop to stop fetching additional pages +// } +// +// return p.Err() +// +// See service client API operation Pages methods for examples how the SDK will +// use the Pagination type. +type Pagination struct { + // Function to return a Request value for each pagination request. + // Any configuration or handlers that need to be applied to the request + // prior to getting the next page should be done here before the request + // returned. + // + // NewRequest should always be built from the same API operations. It is + // undefined if different API operations are returned on subsequent calls. + NewRequest func() (*Request, error) + // EndPageOnSameToken, when enabled, will allow the paginator to stop on + // token that are the same as its previous tokens. + EndPageOnSameToken bool + + started bool + prevTokens []interface{} + nextTokens []interface{} + + err error + curPage interface{} +} + +// HasNextPage will return true if Pagination is able to determine that the API +// operation has additional pages. False will be returned if there are no more +// pages remaining. +// +// Will always return true if Next has not been called yet. +func (p *Pagination) HasNextPage() bool { + if !p.started { + return true + } + + hasNextPage := len(p.nextTokens) != 0 + if p.EndPageOnSameToken { + return hasNextPage && !awsutil.DeepEqual(p.nextTokens, p.prevTokens) + } + return hasNextPage +} + +// Err returns the error Pagination encountered when retrieving the next page. +func (p *Pagination) Err() error { + return p.err +} + +// Page returns the current page. Page should only be called after a successful +// call to Next. It is undefined what Page will return if Page is called after +// Next returns false. +func (p *Pagination) Page() interface{} { + return p.curPage +} + +// Next will attempt to retrieve the next page for the API operation. When a page +// is retrieved true will be returned. If the page cannot be retrieved, or there +// are no more pages false will be returned. +// +// Use the Page method to retrieve the current page data. The data will need +// to be cast to the API operation's output type. +// +// Use the Err method to determine if an error occurred if Page returns false. +func (p *Pagination) Next() bool { + if !p.HasNextPage() { + return false + } + + req, err := p.NewRequest() + if err != nil { + p.err = err + return false + } + + if p.started { + for i, intok := range req.Operation.InputTokens { + awsutil.SetValueAtPath(req.Params, intok, p.nextTokens[i]) + } + } + p.started = true + + err = req.Send() + if err != nil { + p.err = err + return false + } + + p.prevTokens = p.nextTokens + p.nextTokens = req.nextPageTokens() + p.curPage = req.Data + + return true +} + +// A Paginator is the configuration data that defines how an API operation +// should be paginated. This type is used by the API service models to define +// the generated pagination config for service APIs. +// +// The Pagination type is what provides iterating between pages of an API. It +// is only used to store the token metadata the SDK should use for performing +// pagination. +type Paginator struct { + InputTokens []string + OutputTokens []string + LimitToken string + TruncationToken string +} + +// nextPageTokens returns the tokens to use when asking for the next page of data. +func (r *Request) nextPageTokens() []interface{} { + if r.Operation.Paginator == nil { + return nil + } + if r.Operation.TruncationToken != "" { + tr, _ := awsutil.ValuesAtPath(r.Data, r.Operation.TruncationToken) + if len(tr) == 0 { + return nil + } + + switch v := tr[0].(type) { + case *bool: + if !aws.BoolValue(v) { + return nil + } + case bool: + if !v { + return nil + } + } + } + + tokens := []interface{}{} + tokenAdded := false + for _, outToken := range r.Operation.OutputTokens { + vs, _ := awsutil.ValuesAtPath(r.Data, outToken) + if len(vs) == 0 { + tokens = append(tokens, nil) + continue + } + v := vs[0] + + switch tv := v.(type) { + case *string: + if len(aws.StringValue(tv)) == 0 { + tokens = append(tokens, nil) + continue + } + case string: + if len(tv) == 0 { + tokens = append(tokens, nil) + continue + } + } + + tokenAdded = true + tokens = append(tokens, v) + } + if !tokenAdded { + return nil + } + + return tokens +} + +// Ensure a deprecated item is only logged once instead of each time its used. +func logDeprecatedf(logger aws.Logger, flag *int32, msg string) { + if logger == nil { + return + } + if atomic.CompareAndSwapInt32(flag, 0, 1) { + logger.Log(msg) + } +} + +var ( + logDeprecatedHasNextPage int32 + logDeprecatedNextPage int32 + logDeprecatedEachPage int32 +) + +// HasNextPage returns true if this request has more pages of data available. +// +// Deprecated Use Pagination type for configurable pagination of API operations +func (r *Request) HasNextPage() bool { + logDeprecatedf(r.Config.Logger, &logDeprecatedHasNextPage, + "Request.HasNextPage deprecated. Use Pagination type for configurable pagination of API operations") + + return len(r.nextPageTokens()) > 0 +} + +// NextPage returns a new Request that can be executed to return the next +// page of result data. Call .Send() on this request to execute it. +// +// Deprecated Use Pagination type for configurable pagination of API operations +func (r *Request) NextPage() *Request { + logDeprecatedf(r.Config.Logger, &logDeprecatedNextPage, + "Request.NextPage deprecated. Use Pagination type for configurable pagination of API operations") + + tokens := r.nextPageTokens() + if len(tokens) == 0 { + return nil + } + + data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface() + nr := New(r.Config, r.ClientInfo, r.Handlers, r.Retryer, r.Operation, awsutil.CopyOf(r.Params), data) + for i, intok := range nr.Operation.InputTokens { + awsutil.SetValueAtPath(nr.Params, intok, tokens[i]) + } + return nr +} + +// EachPage iterates over each page of a paginated request object. The fn +// parameter should be a function with the following sample signature: +// +// func(page *T, lastPage bool) bool { +// return true // return false to stop iterating +// } +// +// Where "T" is the structure type matching the output structure of the given +// operation. For example, a request object generated by +// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput +// as the structure "T". The lastPage value represents whether the page is +// the last page of data or not. The return value of this function should +// return true to keep iterating or false to stop. +// +// Deprecated Use Pagination type for configurable pagination of API operations +func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error { + logDeprecatedf(r.Config.Logger, &logDeprecatedEachPage, + "Request.EachPage deprecated. Use Pagination type for configurable pagination of API operations") + + for page := r; page != nil; page = page.NextPage() { + if err := page.Send(); err != nil { + return err + } + if getNextPage := fn(page.Data, !page.HasNextPage()); !getNextPage { + return page.Error + } + } + + return nil +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/retryer.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/retryer.go new file mode 100644 index 00000000000..c7523748556 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/retryer.go @@ -0,0 +1,309 @@ +package request + +import ( + "net" + "net/url" + "strings" + "time" + + "github.com/IBM/ibm-cos-sdk-go/aws" + "github.com/IBM/ibm-cos-sdk-go/aws/awserr" +) + +// Retryer provides the interface drive the SDK's request retry behavior. The +// Retryer implementation is responsible for implementing exponential backoff, +// and determine if a request API error should be retried. +// +// client.DefaultRetryer is the SDK's default implementation of the Retryer. It +// uses the which uses the Request.IsErrorRetryable and Request.IsErrorThrottle +// methods to determine if the request is retried. +type Retryer interface { + // RetryRules return the retry delay that should be used by the SDK before + // making another request attempt for the failed request. + RetryRules(*Request) time.Duration + + // ShouldRetry returns if the failed request is retryable. + // + // Implementations may consider request attempt count when determining if a + // request is retryable, but the SDK will use MaxRetries to limit the + // number of attempts a request are made. + ShouldRetry(*Request) bool + + // MaxRetries is the number of times a request may be retried before + // failing. + MaxRetries() int +} + +// WithRetryer sets a Retryer value to the given Config returning the Config +// value for chaining. The value must not be nil. +func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config { + if retryer == nil { + if cfg.Logger != nil { + cfg.Logger.Log("ERROR: Request.WithRetryer called with nil retryer. Replacing with retry disabled Retryer.") + } + retryer = noOpRetryer{} + } + cfg.Retryer = retryer + return cfg + +} + +// noOpRetryer is a internal no op retryer used when a request is created +// without a retryer. +// +// Provides a retryer that performs no retries. +// It should be used when we do not want retries to be performed. +type noOpRetryer struct{} + +// MaxRetries returns the number of maximum returns the service will use to make +// an individual API; For NoOpRetryer the MaxRetries will always be zero. +func (d noOpRetryer) MaxRetries() int { + return 0 +} + +// ShouldRetry will always return false for NoOpRetryer, as it should never retry. +func (d noOpRetryer) ShouldRetry(_ *Request) bool { + return false +} + +// RetryRules returns the delay duration before retrying this request again; +// since NoOpRetryer does not retry, RetryRules always returns 0. +func (d noOpRetryer) RetryRules(_ *Request) time.Duration { + return 0 +} + +// retryableCodes is a collection of service response codes which are retry-able +// without any further action. +var retryableCodes = map[string]struct{}{ + ErrCodeRequestError: {}, + "RequestTimeout": {}, + ErrCodeResponseTimeout: {}, + "RequestTimeoutException": {}, // Glacier's flavor of RequestTimeout +} + +var throttleCodes = map[string]struct{}{ + "ProvisionedThroughputExceededException": {}, + "ThrottledException": {}, // SNS, XRay, ResourceGroupsTagging API + "Throttling": {}, + "ThrottlingException": {}, + "RequestLimitExceeded": {}, + "RequestThrottled": {}, + "RequestThrottledException": {}, + "TooManyRequestsException": {}, // Lambda functions + "PriorRequestNotComplete": {}, // Route53 + "TransactionInProgressException": {}, + "EC2ThrottledException": {}, // EC2 +} + +// credsExpiredCodes is a collection of error codes which signify the credentials +// need to be refreshed. Expired tokens require refreshing of credentials, and +// resigning before the request can be retried. +var credsExpiredCodes = map[string]struct{}{ + "ExpiredToken": {}, + "ExpiredTokenException": {}, + "RequestExpired": {}, // EC2 Only +} + +func isCodeThrottle(code string) bool { + _, ok := throttleCodes[code] + return ok +} + +func isCodeRetryable(code string) bool { + if _, ok := retryableCodes[code]; ok { + return true + } + + return isCodeExpiredCreds(code) +} + +func isCodeExpiredCreds(code string) bool { + _, ok := credsExpiredCodes[code] + return ok +} + +var validParentCodes = map[string]struct{}{ + ErrCodeSerialization: {}, + ErrCodeRead: {}, +} + +func isNestedErrorRetryable(parentErr awserr.Error) bool { + if parentErr == nil { + return false + } + + if _, ok := validParentCodes[parentErr.Code()]; !ok { + return false + } + + err := parentErr.OrigErr() + if err == nil { + return false + } + + if aerr, ok := err.(awserr.Error); ok { + return isCodeRetryable(aerr.Code()) + } + + if t, ok := err.(temporary); ok { + return t.Temporary() || isErrConnectionReset(err) + } + + return isErrConnectionReset(err) +} + +// IsErrorRetryable returns whether the error is retryable, based on its Code. +// Returns false if error is nil. +func IsErrorRetryable(err error) bool { + if err == nil { + return false + } + return shouldRetryError(err) +} + +type temporary interface { + Temporary() bool +} + +func shouldRetryError(origErr error) bool { + switch err := origErr.(type) { + case awserr.Error: + if err.Code() == CanceledErrorCode { + return false + } + if isNestedErrorRetryable(err) { + return true + } + + origErr := err.OrigErr() + var shouldRetry bool + if origErr != nil { + shouldRetry = shouldRetryError(origErr) + if err.Code() == ErrCodeRequestError && !shouldRetry { + return false + } + } + if isCodeRetryable(err.Code()) { + return true + } + return shouldRetry + + case *url.Error: + if strings.Contains(err.Error(), "connection refused") { + // Refused connections should be retried as the service may not yet + // be running on the port. Go TCP dial considers refused + // connections as not temporary. + return true + } + // *url.Error only implements Temporary after golang 1.6 but since + // url.Error only wraps the error: + return shouldRetryError(err.Err) + + case temporary: + if netErr, ok := err.(*net.OpError); ok && netErr.Op == "dial" { + return true + } + // If the error is temporary, we want to allow continuation of the + // retry process + return err.Temporary() || isErrConnectionReset(origErr) + + case nil: + // `awserr.Error.OrigErr()` can be nil, meaning there was an error but + // because we don't know the cause, it is marked as retryable. See + // TestRequest4xxUnretryable for an example. + return true + + default: + switch err.Error() { + case "net/http: request canceled", + "net/http: request canceled while waiting for connection": + // known 1.5 error case when an http request is cancelled + return false + } + // here we don't know the error; so we allow a retry. + return true + } +} + +// IsErrorThrottle returns whether the error is to be throttled based on its code. +// Returns false if error is nil. +func IsErrorThrottle(err error) bool { + if aerr, ok := err.(awserr.Error); ok && aerr != nil { + return isCodeThrottle(aerr.Code()) + } + return false +} + +// IsErrorExpiredCreds returns whether the error code is a credential expiry +// error. Returns false if error is nil. +func IsErrorExpiredCreds(err error) bool { + if aerr, ok := err.(awserr.Error); ok && aerr != nil { + return isCodeExpiredCreds(aerr.Code()) + } + return false +} + +// IsErrorRetryable returns whether the error is retryable, based on its Code. +// Returns false if the request has no Error set. +// +// Alias for the utility function IsErrorRetryable +func (r *Request) IsErrorRetryable() bool { + if isErrCode(r.Error, r.RetryErrorCodes) { + return true + } + + // HTTP response status code 501 should not be retried. + // 501 represents Not Implemented which means the request method is not + // supported by the server and cannot be handled. + if r.HTTPResponse != nil { + // HTTP response status code 500 represents internal server error and + // should be retried without any throttle. + if r.HTTPResponse.StatusCode == 500 { + return true + } + } + return IsErrorRetryable(r.Error) +} + +// IsErrorThrottle returns whether the error is to be throttled based on its +// code. Returns false if the request has no Error set. +// +// Alias for the utility function IsErrorThrottle +func (r *Request) IsErrorThrottle() bool { + if isErrCode(r.Error, r.ThrottleErrorCodes) { + return true + } + + if r.HTTPResponse != nil { + switch r.HTTPResponse.StatusCode { + case + 429, // error caused due to too many requests + 502, // Bad Gateway error should be throttled + 503, // caused when service is unavailable + 504: // error occurred due to gateway timeout + return true + } + } + + return IsErrorThrottle(r.Error) +} + +func isErrCode(err error, codes []string) bool { + if aerr, ok := err.(awserr.Error); ok && aerr != nil { + for _, code := range codes { + if code == aerr.Code() { + return true + } + } + } + + return false +} + +// IsErrorExpired returns whether the error code is a credential expiry error. +// Returns false if the request has no Error set. +// +// Alias for the utility function IsErrorExpiredCreds +func (r *Request) IsErrorExpired() bool { + return IsErrorExpiredCreds(r.Error) +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/timeout_read_closer.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/timeout_read_closer.go new file mode 100644 index 00000000000..87a37232b9c --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/timeout_read_closer.go @@ -0,0 +1,94 @@ +package request + +import ( + "io" + "time" + + "github.com/IBM/ibm-cos-sdk-go/aws/awserr" +) + +var timeoutErr = awserr.New( + ErrCodeResponseTimeout, + "read on body has reached the timeout limit", + nil, +) + +type readResult struct { + n int + err error +} + +// timeoutReadCloser will handle body reads that take too long. +// We will return a ErrReadTimeout error if a timeout occurs. +type timeoutReadCloser struct { + reader io.ReadCloser + duration time.Duration +} + +// Read will spin off a goroutine to call the reader's Read method. We will +// select on the timer's channel or the read's channel. Whoever completes first +// will be returned. +func (r *timeoutReadCloser) Read(b []byte) (int, error) { + timer := time.NewTimer(r.duration) + c := make(chan readResult, 1) + + go func() { + n, err := r.reader.Read(b) + timer.Stop() + c <- readResult{n: n, err: err} + }() + + select { + case data := <-c: + return data.n, data.err + case <-timer.C: + return 0, timeoutErr + } +} + +func (r *timeoutReadCloser) Close() error { + return r.reader.Close() +} + +const ( + // HandlerResponseTimeout is what we use to signify the name of the + // response timeout handler. + HandlerResponseTimeout = "ResponseTimeoutHandler" +) + +// adaptToResponseTimeoutError is a handler that will replace any top level error +// to a ErrCodeResponseTimeout, if its child is that. +func adaptToResponseTimeoutError(req *Request) { + if err, ok := req.Error.(awserr.Error); ok { + aerr, ok := err.OrigErr().(awserr.Error) + if ok && aerr.Code() == ErrCodeResponseTimeout { + req.Error = aerr + } + } +} + +// WithResponseReadTimeout is a request option that will wrap the body in a timeout read closer. +// This will allow for per read timeouts. If a timeout occurred, we will return the +// ErrCodeResponseTimeout. +// +// svc.PutObjectWithContext(ctx, params, request.WithTimeoutReadCloser(30 * time.Second) +func WithResponseReadTimeout(duration time.Duration) Option { + return func(r *Request) { + + var timeoutHandler = NamedHandler{ + HandlerResponseTimeout, + func(req *Request) { + req.HTTPResponse.Body = &timeoutReadCloser{ + reader: req.HTTPResponse.Body, + duration: duration, + } + }} + + // remove the handler so we are not stomping over any new durations. + r.Handlers.Send.RemoveByName(HandlerResponseTimeout) + r.Handlers.Send.PushBackNamed(timeoutHandler) + + r.Handlers.Unmarshal.PushBack(adaptToResponseTimeoutError) + r.Handlers.UnmarshalError.PushBack(adaptToResponseTimeoutError) + } +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/validation.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/validation.go new file mode 100644 index 00000000000..3ffdf5915b9 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/validation.go @@ -0,0 +1,286 @@ +package request + +import ( + "bytes" + "fmt" + + "github.com/IBM/ibm-cos-sdk-go/aws/awserr" +) + +const ( + // InvalidParameterErrCode is the error code for invalid parameters errors + InvalidParameterErrCode = "InvalidParameter" + // ParamRequiredErrCode is the error code for required parameter errors + ParamRequiredErrCode = "ParamRequiredError" + // ParamMinValueErrCode is the error code for fields with too low of a + // number value. + ParamMinValueErrCode = "ParamMinValueError" + // ParamMinLenErrCode is the error code for fields without enough elements. + ParamMinLenErrCode = "ParamMinLenError" + // ParamMaxLenErrCode is the error code for value being too long. + ParamMaxLenErrCode = "ParamMaxLenError" + + // ParamFormatErrCode is the error code for a field with invalid + // format or characters. + ParamFormatErrCode = "ParamFormatInvalidError" +) + +// Validator provides a way for types to perform validation logic on their +// input values that external code can use to determine if a type's values +// are valid. +type Validator interface { + Validate() error +} + +// An ErrInvalidParams provides wrapping of invalid parameter errors found when +// validating API operation input parameters. +type ErrInvalidParams struct { + // Context is the base context of the invalid parameter group. + Context string + errs []ErrInvalidParam +} + +// Add adds a new invalid parameter error to the collection of invalid +// parameters. The context of the invalid parameter will be updated to reflect +// this collection. +func (e *ErrInvalidParams) Add(err ErrInvalidParam) { + err.SetContext(e.Context) + e.errs = append(e.errs, err) +} + +// AddNested adds the invalid parameter errors from another ErrInvalidParams +// value into this collection. The nested errors will have their nested context +// updated and base context to reflect the merging. +// +// Use for nested validations errors. +func (e *ErrInvalidParams) AddNested(nestedCtx string, nested ErrInvalidParams) { + for _, err := range nested.errs { + err.SetContext(e.Context) + err.AddNestedContext(nestedCtx) + e.errs = append(e.errs, err) + } +} + +// Len returns the number of invalid parameter errors +func (e ErrInvalidParams) Len() int { + return len(e.errs) +} + +// Code returns the code of the error +func (e ErrInvalidParams) Code() string { + return InvalidParameterErrCode +} + +// Message returns the message of the error +func (e ErrInvalidParams) Message() string { + return fmt.Sprintf("%d validation error(s) found.", len(e.errs)) +} + +// Error returns the string formatted form of the invalid parameters. +func (e ErrInvalidParams) Error() string { + w := &bytes.Buffer{} + fmt.Fprintf(w, "%s: %s\n", e.Code(), e.Message()) + + for _, err := range e.errs { + fmt.Fprintf(w, "- %s\n", err.Message()) + } + + return w.String() +} + +// OrigErr returns the invalid parameters as a awserr.BatchedErrors value +func (e ErrInvalidParams) OrigErr() error { + return awserr.NewBatchError( + InvalidParameterErrCode, e.Message(), e.OrigErrs()) +} + +// OrigErrs returns a slice of the invalid parameters +func (e ErrInvalidParams) OrigErrs() []error { + errs := make([]error, len(e.errs)) + for i := 0; i < len(errs); i++ { + errs[i] = e.errs[i] + } + + return errs +} + +// An ErrInvalidParam represents an invalid parameter error type. +type ErrInvalidParam interface { + awserr.Error + + // Field name the error occurred on. + Field() string + + // SetContext updates the context of the error. + SetContext(string) + + // AddNestedContext updates the error's context to include a nested level. + AddNestedContext(string) +} + +type errInvalidParam struct { + context string + nestedContext string + field string + code string + msg string +} + +// Code returns the error code for the type of invalid parameter. +func (e *errInvalidParam) Code() string { + return e.code +} + +// Message returns the reason the parameter was invalid, and its context. +func (e *errInvalidParam) Message() string { + return fmt.Sprintf("%s, %s.", e.msg, e.Field()) +} + +// Error returns the string version of the invalid parameter error. +func (e *errInvalidParam) Error() string { + return fmt.Sprintf("%s: %s", e.code, e.Message()) +} + +// OrigErr returns nil, Implemented for awserr.Error interface. +func (e *errInvalidParam) OrigErr() error { + return nil +} + +// Field Returns the field and context the error occurred. +func (e *errInvalidParam) Field() string { + field := e.context + if len(field) > 0 { + field += "." + } + if len(e.nestedContext) > 0 { + field += fmt.Sprintf("%s.", e.nestedContext) + } + field += e.field + + return field +} + +// SetContext updates the base context of the error. +func (e *errInvalidParam) SetContext(ctx string) { + e.context = ctx +} + +// AddNestedContext prepends a context to the field's path. +func (e *errInvalidParam) AddNestedContext(ctx string) { + if len(e.nestedContext) == 0 { + e.nestedContext = ctx + } else { + e.nestedContext = fmt.Sprintf("%s.%s", ctx, e.nestedContext) + } + +} + +// An ErrParamRequired represents an required parameter error. +type ErrParamRequired struct { + errInvalidParam +} + +// NewErrParamRequired creates a new required parameter error. +func NewErrParamRequired(field string) *ErrParamRequired { + return &ErrParamRequired{ + errInvalidParam{ + code: ParamRequiredErrCode, + field: field, + msg: fmt.Sprintf("missing required field"), + }, + } +} + +// An ErrParamMinValue represents a minimum value parameter error. +type ErrParamMinValue struct { + errInvalidParam + min float64 +} + +// NewErrParamMinValue creates a new minimum value parameter error. +func NewErrParamMinValue(field string, min float64) *ErrParamMinValue { + return &ErrParamMinValue{ + errInvalidParam: errInvalidParam{ + code: ParamMinValueErrCode, + field: field, + msg: fmt.Sprintf("minimum field value of %v", min), + }, + min: min, + } +} + +// MinValue returns the field's require minimum value. +// +// float64 is returned for both int and float min values. +func (e *ErrParamMinValue) MinValue() float64 { + return e.min +} + +// An ErrParamMinLen represents a minimum length parameter error. +type ErrParamMinLen struct { + errInvalidParam + min int +} + +// NewErrParamMinLen creates a new minimum length parameter error. +func NewErrParamMinLen(field string, min int) *ErrParamMinLen { + return &ErrParamMinLen{ + errInvalidParam: errInvalidParam{ + code: ParamMinLenErrCode, + field: field, + msg: fmt.Sprintf("minimum field size of %v", min), + }, + min: min, + } +} + +// MinLen returns the field's required minimum length. +func (e *ErrParamMinLen) MinLen() int { + return e.min +} + +// An ErrParamMaxLen represents a maximum length parameter error. +type ErrParamMaxLen struct { + errInvalidParam + max int +} + +// NewErrParamMaxLen creates a new maximum length parameter error. +func NewErrParamMaxLen(field string, max int, value string) *ErrParamMaxLen { + return &ErrParamMaxLen{ + errInvalidParam: errInvalidParam{ + code: ParamMaxLenErrCode, + field: field, + msg: fmt.Sprintf("maximum size of %v, %v", max, value), + }, + max: max, + } +} + +// MaxLen returns the field's required minimum length. +func (e *ErrParamMaxLen) MaxLen() int { + return e.max +} + +// An ErrParamFormat represents a invalid format parameter error. +type ErrParamFormat struct { + errInvalidParam + format string +} + +// NewErrParamFormat creates a new invalid format parameter error. +func NewErrParamFormat(field string, format, value string) *ErrParamFormat { + return &ErrParamFormat{ + errInvalidParam: errInvalidParam{ + code: ParamFormatErrCode, + field: field, + msg: fmt.Sprintf("format %v, %v", format, value), + }, + format: format, + } +} + +// Format returns the field's required format. +func (e *ErrParamFormat) Format() string { + return e.format +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/waiter.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/waiter.go new file mode 100644 index 00000000000..e3d0f16b335 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/waiter.go @@ -0,0 +1,295 @@ +package request + +import ( + "fmt" + "time" + + "github.com/IBM/ibm-cos-sdk-go/aws" + "github.com/IBM/ibm-cos-sdk-go/aws/awserr" + "github.com/IBM/ibm-cos-sdk-go/aws/awsutil" +) + +// WaiterResourceNotReadyErrorCode is the error code returned by a waiter when +// the waiter's max attempts have been exhausted. +const WaiterResourceNotReadyErrorCode = "ResourceNotReady" + +// A WaiterOption is a function that will update the Waiter value's fields to +// configure the waiter. +type WaiterOption func(*Waiter) + +// WithWaiterMaxAttempts returns the maximum number of times the waiter should +// attempt to check the resource for the target state. +func WithWaiterMaxAttempts(max int) WaiterOption { + return func(w *Waiter) { + w.MaxAttempts = max + } +} + +// WaiterDelay will return a delay the waiter should pause between attempts to +// check the resource state. The passed in attempt is the number of times the +// Waiter has checked the resource state. +// +// Attempt is the number of attempts the Waiter has made checking the resource +// state. +type WaiterDelay func(attempt int) time.Duration + +// ConstantWaiterDelay returns a WaiterDelay that will always return a constant +// delay the waiter should use between attempts. It ignores the number of +// attempts made. +func ConstantWaiterDelay(delay time.Duration) WaiterDelay { + return func(attempt int) time.Duration { + return delay + } +} + +// WithWaiterDelay will set the Waiter to use the WaiterDelay passed in. +func WithWaiterDelay(delayer WaiterDelay) WaiterOption { + return func(w *Waiter) { + w.Delay = delayer + } +} + +// WithWaiterLogger returns a waiter option to set the logger a waiter +// should use to log warnings and errors to. +func WithWaiterLogger(logger aws.Logger) WaiterOption { + return func(w *Waiter) { + w.Logger = logger + } +} + +// WithWaiterRequestOptions returns a waiter option setting the request +// options for each request the waiter makes. Appends to waiter's request +// options already set. +func WithWaiterRequestOptions(opts ...Option) WaiterOption { + return func(w *Waiter) { + w.RequestOptions = append(w.RequestOptions, opts...) + } +} + +// A Waiter provides the functionality to perform a blocking call which will +// wait for a resource state to be satisfied by a service. +// +// This type should not be used directly. The API operations provided in the +// service packages prefixed with "WaitUntil" should be used instead. +type Waiter struct { + Name string + Acceptors []WaiterAcceptor + Logger aws.Logger + + MaxAttempts int + Delay WaiterDelay + + RequestOptions []Option + NewRequest func([]Option) (*Request, error) + SleepWithContext func(aws.Context, time.Duration) error +} + +// ApplyOptions updates the waiter with the list of waiter options provided. +func (w *Waiter) ApplyOptions(opts ...WaiterOption) { + for _, fn := range opts { + fn(w) + } +} + +// WaiterState are states the waiter uses based on WaiterAcceptor definitions +// to identify if the resource state the waiter is waiting on has occurred. +type WaiterState int + +// String returns the string representation of the waiter state. +func (s WaiterState) String() string { + switch s { + case SuccessWaiterState: + return "success" + case FailureWaiterState: + return "failure" + case RetryWaiterState: + return "retry" + default: + return "unknown waiter state" + } +} + +// States the waiter acceptors will use to identify target resource states. +const ( + SuccessWaiterState WaiterState = iota // waiter successful + FailureWaiterState // waiter failed + RetryWaiterState // waiter needs to be retried +) + +// WaiterMatchMode is the mode that the waiter will use to match the WaiterAcceptor +// definition's Expected attribute. +type WaiterMatchMode int + +// Modes the waiter will use when inspecting API response to identify target +// resource states. +const ( + PathAllWaiterMatch WaiterMatchMode = iota // match on all paths + PathWaiterMatch // match on specific path + PathAnyWaiterMatch // match on any path + PathListWaiterMatch // match on list of paths + StatusWaiterMatch // match on status code + ErrorWaiterMatch // match on error +) + +// String returns the string representation of the waiter match mode. +func (m WaiterMatchMode) String() string { + switch m { + case PathAllWaiterMatch: + return "pathAll" + case PathWaiterMatch: + return "path" + case PathAnyWaiterMatch: + return "pathAny" + case PathListWaiterMatch: + return "pathList" + case StatusWaiterMatch: + return "status" + case ErrorWaiterMatch: + return "error" + default: + return "unknown waiter match mode" + } +} + +// WaitWithContext will make requests for the API operation using NewRequest to +// build API requests. The request's response will be compared against the +// Waiter's Acceptors to determine the successful state of the resource the +// waiter is inspecting. +// +// The passed in context must not be nil. If it is nil a panic will occur. The +// Context will be used to cancel the waiter's pending requests and retry delays. +// Use aws.BackgroundContext if no context is available. +// +// The waiter will continue until the target state defined by the Acceptors, +// or the max attempts expires. +// +// Will return the WaiterResourceNotReadyErrorCode error code if the waiter's +// retryer ShouldRetry returns false. This normally will happen when the max +// wait attempts expires. +func (w Waiter) WaitWithContext(ctx aws.Context) error { + + for attempt := 1; ; attempt++ { + req, err := w.NewRequest(w.RequestOptions) + if err != nil { + waiterLogf(w.Logger, "unable to create request %v", err) + return err + } + req.Handlers.Build.PushBack(MakeAddToUserAgentFreeFormHandler("Waiter")) + err = req.Send() + + // See if any of the acceptors match the request's response, or error + for _, a := range w.Acceptors { + if matched, matchErr := a.match(w.Name, w.Logger, req, err); matched { + return matchErr + } + } + + // The Waiter should only check the resource state MaxAttempts times + // This is here instead of in the for loop above to prevent delaying + // unnecessary when the waiter will not retry. + if attempt == w.MaxAttempts { + break + } + + // Delay to wait before inspecting the resource again + delay := w.Delay(attempt) + if sleepFn := req.Config.SleepDelay; sleepFn != nil { + // Support SleepDelay for backwards compatibility and testing + sleepFn(delay) + } else { + sleepCtxFn := w.SleepWithContext + if sleepCtxFn == nil { + sleepCtxFn = aws.SleepWithContext + } + + if err := sleepCtxFn(ctx, delay); err != nil { + return awserr.New(CanceledErrorCode, "waiter context canceled", err) + } + } + } + + return awserr.New(WaiterResourceNotReadyErrorCode, "exceeded wait attempts", nil) +} + +// A WaiterAcceptor provides the information needed to wait for an API operation +// to complete. +type WaiterAcceptor struct { + State WaiterState + Matcher WaiterMatchMode + Argument string + Expected interface{} +} + +// match returns if the acceptor found a match with the passed in request +// or error. True is returned if the acceptor made a match, error is returned +// if there was an error attempting to perform the match. +func (a *WaiterAcceptor) match(name string, l aws.Logger, req *Request, err error) (bool, error) { + result := false + var vals []interface{} + + switch a.Matcher { + case PathAllWaiterMatch, PathWaiterMatch: + // Require all matches to be equal for result to match + vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) + if len(vals) == 0 { + break + } + result = true + for _, val := range vals { + if !awsutil.DeepEqual(val, a.Expected) { + result = false + break + } + } + case PathAnyWaiterMatch: + // Only a single match needs to equal for the result to match + vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) + for _, val := range vals { + if awsutil.DeepEqual(val, a.Expected) { + result = true + break + } + } + case PathListWaiterMatch: + // ignored matcher + case StatusWaiterMatch: + s := a.Expected.(int) + result = s == req.HTTPResponse.StatusCode + case ErrorWaiterMatch: + if aerr, ok := err.(awserr.Error); ok { + result = aerr.Code() == a.Expected.(string) + } + default: + waiterLogf(l, "WARNING: Waiter %s encountered unexpected matcher: %s", + name, a.Matcher) + } + + if !result { + // If there was no matching result found there is nothing more to do + // for this response, retry the request. + return false, nil + } + + switch a.State { + case SuccessWaiterState: + // waiter completed + return true, nil + case FailureWaiterState: + // Waiter failure state triggered + return true, awserr.New(WaiterResourceNotReadyErrorCode, + "failed waiting for successful resource state", err) + case RetryWaiterState: + // clear the error and retry the operation + return false, nil + default: + waiterLogf(l, "WARNING: Waiter %s encountered unexpected state: %s", + name, a.State) + return false, nil + } +} + +func waiterLogf(logger aws.Logger, msg string, args ...interface{}) { + if logger != nil { + logger.Log(fmt.Sprintf(msg, args...)) + } +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/session/credentials.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/session/credentials.go new file mode 100644 index 00000000000..70a3d2bc7c1 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/session/credentials.go @@ -0,0 +1,122 @@ +package session + +import ( + "fmt" + + "github.com/IBM/ibm-cos-sdk-go/aws" + "github.com/IBM/ibm-cos-sdk-go/aws/awserr" + "github.com/IBM/ibm-cos-sdk-go/aws/credentials" + "github.com/IBM/ibm-cos-sdk-go/aws/credentials/processcreds" + "github.com/IBM/ibm-cos-sdk-go/aws/request" +) + +func resolveCredentials(cfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) (*credentials.Credentials, error) { + + switch { + case len(sessOpts.Profile) != 0: + // User explicitly provided an Profile in the session's configuration + // so load that profile from shared config first. + // Github(aws/aws-sdk-go#2727) + return resolveCredsFromProfile(cfg, envCfg, sharedCfg, handlers, sessOpts) + + case envCfg.Creds.HasKeys(): + // Environment credentials + return credentials.NewStaticCredentialsFromCreds(envCfg.Creds), nil + + default: + // Fallback to the "default" credential resolution chain. + return resolveCredsFromProfile(cfg, envCfg, sharedCfg, handlers, sessOpts) + } +} + +func resolveCredsFromProfile(cfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) (creds *credentials.Credentials, err error) { + + switch { + case sharedCfg.SourceProfile != nil: + // Assume IAM role with credentials source from a different profile. + creds, err = resolveCredsFromProfile(cfg, envCfg, + *sharedCfg.SourceProfile, handlers, sessOpts, + ) + + case sharedCfg.Creds.HasKeys(): + // Static Credentials from Shared Config/Credentials file. + creds = credentials.NewStaticCredentialsFromCreds( + sharedCfg.Creds, + ) + + case len(sharedCfg.CredentialProcess) != 0: + // Get credentials from CredentialProcess + creds = processcreds.NewCredentials(sharedCfg.CredentialProcess) + + case len(sharedCfg.CredentialSource) != 0: + creds, err = resolveCredsFromSource(cfg, envCfg, + sharedCfg, handlers, sessOpts, + ) + + default: + // Fallback to default credentials provider, include mock errors for + // the credential chain so user can identify why credentials failed to + // be retrieved. + creds = credentials.NewCredentials(&credentials.ChainProvider{ + VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), + Providers: []credentials.Provider{ + &credProviderError{ + Err: awserr.New("EnvAccessKeyNotFound", + "failed to find credentials in the environment.", nil), + }, + &credProviderError{ + Err: awserr.New("SharedCredsLoad", + fmt.Sprintf("failed to load profile, %s.", envCfg.Profile), nil), + }, + //defaults.RemoteCredProvider(*cfg, handlers), IBM + }, + }) + } + if err != nil { + return nil, err + } + + return creds, nil +} + +// valid credential source values +const ( + credSourceEnvironment = "Environment" +) + +func resolveCredsFromSource(cfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) (creds *credentials.Credentials, err error) { + + switch sharedCfg.CredentialSource { + + case credSourceEnvironment: + creds = credentials.NewStaticCredentialsFromCreds(envCfg.Creds) + + default: + return nil, ErrSharedConfigInvalidCredSource + } + + return creds, nil +} + +type credProviderError struct { + Err error +} + +func (c credProviderError) Retrieve() (credentials.Value, error) { + return credentials.Value{}, c.Err +} +func (c credProviderError) IsExpired() bool { + return true +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/session/custom_transport.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/session/custom_transport.go new file mode 100644 index 00000000000..593aedc4218 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/session/custom_transport.go @@ -0,0 +1,27 @@ +// +build go1.13 + +package session + +import ( + "net" + "net/http" + "time" +) + +// Transport that should be used when a custom CA bundle is specified with the +// SDK. +func getCustomTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + ForceAttemptHTTP2: true, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + } +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/session/custom_transport_go1.12.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/session/custom_transport_go1.12.go new file mode 100644 index 00000000000..1bf31cf8e56 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/session/custom_transport_go1.12.go @@ -0,0 +1,26 @@ +// +build !go1.13,go1.7 + +package session + +import ( + "net" + "net/http" + "time" +) + +// Transport that should be used when a custom CA bundle is specified with the +// SDK. +func getCustomTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + } +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/session/custom_transport_go1.5.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/session/custom_transport_go1.5.go new file mode 100644 index 00000000000..253d7bc9d55 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/session/custom_transport_go1.5.go @@ -0,0 +1,22 @@ +// +build !go1.6,go1.5 + +package session + +import ( + "net" + "net/http" + "time" +) + +// Transport that should be used when a custom CA bundle is specified with the +// SDK. +func getCustomTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + } +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/session/custom_transport_go1.6.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/session/custom_transport_go1.6.go new file mode 100644 index 00000000000..db240605441 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/session/custom_transport_go1.6.go @@ -0,0 +1,23 @@ +// +build !go1.7,go1.6 + +package session + +import ( + "net" + "net/http" + "time" +) + +// Transport that should be used when a custom CA bundle is specified with the +// SDK. +func getCustomTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + } +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/session/doc.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/session/doc.go new file mode 100644 index 00000000000..2fda523f993 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/session/doc.go @@ -0,0 +1,261 @@ +/* +Package session provides configuration for the SDK's service clients. Sessions +can be shared across service clients that share the same base configuration. + +Sessions are safe to use concurrently as long as the Session is not being +modified. Sessions should be cached when possible, because creating a new +Session will load all configuration values from the environment, and config +files each time the Session is created. Sharing the Session value across all of +your service clients will ensure the configuration is loaded the fewest number +of times possible. + +Sessions options from Shared Config + +By default NewSession will only load credentials from the shared credentials +file (~/.aws/credentials). If the AWS_SDK_LOAD_CONFIG environment variable is +set to a truthy value the Session will be created from the configuration +values from the shared config (~/.aws/config) and shared credentials +(~/.aws/credentials) files. Using the NewSessionWithOptions with +SharedConfigState set to SharedConfigEnable will create the session as if the +AWS_SDK_LOAD_CONFIG environment variable was set. + +Credential and config loading order + +The Session will attempt to load configuration and credentials from the +environment, configuration files, and other credential sources. The order +configuration is loaded in is: + + * Environment Variables + * Shared Credentials file + * Shared Configuration file (if SharedConfig is enabled) + +The Environment variables for credentials will have precedence over shared +config even if SharedConfig is enabled. To override this behavior, and use +shared config credentials instead specify the session.Options.Profile, (e.g. +when using credential_source=Environment to assume a role). + + sess, err := session.NewSessionWithOptions(session.Options{ + Profile: "myProfile", + }) + +Creating Sessions + +Creating a Session without additional options will load credentials region, and +profile loaded from the environment and shared config automatically. See, +"Environment Variables" section for information on environment variables used +by Session. + + // Create Session + sess, err := session.NewSession() + + +When creating Sessions optional aws.Config values can be passed in that will +override the default, or loaded, config values the Session is being created +with. This allows you to provide additional, or case based, configuration +as needed. + + // Create a Session with a custom region + sess, err := session.NewSession(&aws.Config{ + Region: aws.String("us-west-2"), + }) + +Use NewSessionWithOptions to provide additional configuration driving how the +Session's configuration will be loaded. Such as, specifying shared config +profile, or override the shared config state, (AWS_SDK_LOAD_CONFIG). + + // Equivalent to session.NewSession() + sess, err := session.NewSessionWithOptions(session.Options{ + // Options + }) + + sess, err := session.NewSessionWithOptions(session.Options{ + // Specify profile to load for the session's config + Profile: "profile_name", + + // Provide SDK Config options, such as Region. + Config: aws.Config{ + Region: aws.String("us-west-2"), + }, + + // Force enable Shared Config support + SharedConfigState: session.SharedConfigEnable, + }) + +Adding Handlers + +You can add handlers to a session to decorate API operation, (e.g. adding HTTP +headers). All clients that use the Session receive a copy of the Session's +handlers. For example, the following request handler added to the Session logs +every requests made. + + // Create a session, and add additional handlers for all service + // clients created with the Session to inherit. Adds logging handler. + sess := session.Must(session.NewSession()) + + sess.Handlers.Send.PushFront(func(r *request.Request) { + // Log every request made and its payload + logger.Printf("Request: %s/%s, Params: %s", + r.ClientInfo.ServiceName, r.Operation, r.Params) + }) + +Shared Config Fields + +By default the SDK will only load the shared credentials file's +(~/.aws/credentials) credentials values, and all other config is provided by +the environment variables, SDK defaults, and user provided aws.Config values. + +If the AWS_SDK_LOAD_CONFIG environment variable is set, or SharedConfigEnable +option is used to create the Session the full shared config values will be +loaded. This includes credentials, region, and support for assume role. In +addition the Session will load its configuration from both the shared config +file (~/.aws/config) and shared credentials file (~/.aws/credentials). Both +files have the same format. + +If both config files are present the configuration from both files will be +read. The Session will be created from configuration values from the shared +credentials file (~/.aws/credentials) over those in the shared config file +(~/.aws/config). + +Credentials are the values the SDK uses to authenticating requests with AWS +Services. When specified in a file, both aws_access_key_id and +aws_secret_access_key must be provided together in the same file to be +considered valid. They will be ignored if both are not present. +aws_session_token is an optional field that can be provided in addition to the +other two fields. + + aws_access_key_id = AKID + aws_secret_access_key = SECRET + aws_session_token = TOKEN + + ; region only supported if SharedConfigEnabled. + region = us-east-1 + +Environment Variables + +When a Session is created several environment variables can be set to adjust +how the SDK functions, and what configuration data it loads when creating +Sessions. All environment values are optional, but some values like credentials +require multiple of the values to set or the partial values will be ignored. +All environment variable values are strings unless otherwise noted. + +Environment configuration values. If set both Access Key ID and Secret Access +Key must be provided. Session Token and optionally also be provided, but is +not required. + + # Access Key ID + AWS_ACCESS_KEY_ID=AKID + AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set. + + # Secret Access Key + AWS_SECRET_ACCESS_KEY=SECRET + AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set. + + # Session Token + AWS_SESSION_TOKEN=TOKEN + +Region value will instruct the SDK where to make service API requests to. If is +not provided in the environment the region must be provided before a service +client request is made. + + AWS_REGION=us-east-1 + + # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set, + # and AWS_REGION is not also set. + AWS_DEFAULT_REGION=us-east-1 + +Profile name the SDK should load use when loading shared config from the +configuration files. If not provided "default" will be used as the profile name. + + AWS_PROFILE=my_profile + + # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set, + # and AWS_PROFILE is not also set. + AWS_DEFAULT_PROFILE=my_profile + +SDK load config instructs the SDK to load the shared config in addition to +shared credentials. This also expands the configuration loaded so the shared +credentials will have parity with the shared config file. This also enables +Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE +env values as well. + + AWS_SDK_LOAD_CONFIG=1 + +Custom Shared Config and Credential Files + +Shared credentials file path can be set to instruct the SDK to use an alternative +file for the shared credentials. If not set the file will be loaded from +$HOME/.aws/credentials on Linux/Unix based systems, and +%USERPROFILE%\.aws\credentials on Windows. + + AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials + +Shared config file path can be set to instruct the SDK to use an alternative +file for the shared config. If not set the file will be loaded from +$HOME/.aws/config on Linux/Unix based systems, and +%USERPROFILE%\.aws\config on Windows. + + AWS_CONFIG_FILE=$HOME/my_shared_config + +Custom CA Bundle + +Path to a custom Credentials Authority (CA) bundle PEM file that the SDK +will use instead of the default system's root CA bundle. Use this only +if you want to replace the CA bundle the SDK uses for TLS requests. + + AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle + +Enabling this option will attempt to merge the Transport into the SDK's HTTP +client. If the client's Transport is not a http.Transport an error will be +returned. If the Transport's TLS config is set this option will cause the SDK +to overwrite the Transport's TLS config's RootCAs value. If the CA bundle file +contains multiple certificates all of them will be loaded. + +The Session option CustomCABundle is also available when creating sessions +to also enable this feature. CustomCABundle session option field has priority +over the AWS_CA_BUNDLE environment variable, and will be used if both are set. + +Setting a custom HTTPClient in the aws.Config options will override this setting. +To use this option and custom HTTP client, the HTTP client needs to be provided +when creating the session. Not the service client. + +Custom Client TLS Certificate + +The SDK supports the environment and session option being configured with +Client TLS certificates that are sent as a part of the client's TLS handshake +for client authentication. If used, both Cert and Key values are required. If +one is missing, or either fail to load the contents of the file an error will +be returned. + +HTTP Client's Transport concrete implementation must be a http.Transport +or creating the session will fail. + + AWS_SDK_GO_CLIENT_TLS_KEY=$HOME/my_client_key + AWS_SDK_GO_CLIENT_TLS_CERT=$HOME/my_client_cert + +This can also be configured via the session.Options ClientTLSCert and ClientTLSKey. + + sess, err := session.NewSessionWithOptions(session.Options{ + ClientTLSCert: myCertFile, + ClientTLSKey: myKeyFile, + }) + +Custom EC2 IMDS Endpoint + +The endpoint of the EC2 IMDS client can be configured via the environment +variable, AWS_EC2_METADATA_SERVICE_ENDPOINT when creating the client with a +Session. See Options.EC2IMDSEndpoint for more details. + + AWS_EC2_METADATA_SERVICE_ENDPOINT=http://169.254.169.254 + +If using an URL with an IPv6 address literal, the IPv6 address +component must be enclosed in square brackets. + + AWS_EC2_METADATA_SERVICE_ENDPOINT=http://[::1] + +The custom EC2 IMDS endpoint can also be specified via the Session options. + + sess, err := session.NewSessionWithOptions(session.Options{ + EC2IMDSEndpoint: "http://[::1]", + }) +*/ +package session diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/session/env_config.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/session/env_config.go new file mode 100644 index 00000000000..f78bf6424f3 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/session/env_config.go @@ -0,0 +1,294 @@ +package session + +import ( + "fmt" + "os" + "strconv" + "strings" + + "github.com/IBM/ibm-cos-sdk-go/aws" + "github.com/IBM/ibm-cos-sdk-go/aws/credentials" + "github.com/IBM/ibm-cos-sdk-go/aws/defaults" + "github.com/IBM/ibm-cos-sdk-go/aws/endpoints" +) + +// EnvProviderName provides a name of the provider when config is loaded from environment. +const EnvProviderName = "EnvConfigCredentials" + +// envConfig is a collection of environment values the SDK will read +// setup config from. All environment values are optional. But some values +// such as credentials require multiple values to be complete or the values +// will be ignored. +type envConfig struct { + // Environment configuration values. If set both Access Key ID and Secret Access + // Key must be provided. Session Token and optionally also be provided, but is + // not required. + // + // # Access Key ID + // AWS_ACCESS_KEY_ID=AKID + // AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set. + // + // # Secret Access Key + // AWS_SECRET_ACCESS_KEY=SECRET + // AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set. + // + // # Session Token + // AWS_SESSION_TOKEN=TOKEN + Creds credentials.Value + + // Region value will instruct the SDK where to make service API requests to. If is + // not provided in the environment the region must be provided before a service + // client request is made. + // + // AWS_REGION=us-east-1 + // + // # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set, + // # and AWS_REGION is not also set. + // AWS_DEFAULT_REGION=us-east-1 + Region string + + // Profile name the SDK should load use when loading shared configuration from the + // shared configuration files. If not provided "default" will be used as the + // profile name. + // + // AWS_PROFILE=my_profile + // + // # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set, + // # and AWS_PROFILE is not also set. + // AWS_DEFAULT_PROFILE=my_profile + Profile string + + // SDK load config instructs the SDK to load the shared config in addition to + // shared credentials. This also expands the configuration loaded from the shared + // credentials to have parity with the shared config file. This also enables + // Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE + // env values as well. + // + // AWS_SDK_LOAD_CONFIG=1 + EnableSharedConfig bool + + // Shared credentials file path can be set to instruct the SDK to use an alternate + // file for the shared credentials. If not set the file will be loaded from + // $HOME/.aws/credentials on Linux/Unix based systems, and + // %USERPROFILE%\.aws\credentials on Windows. + // + // AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials + SharedCredentialsFile string + + // Shared config file path can be set to instruct the SDK to use an alternate + // file for the shared config. If not set the file will be loaded from + // $HOME/.aws/config on Linux/Unix based systems, and + // %USERPROFILE%\.aws\config on Windows. + // + // AWS_CONFIG_FILE=$HOME/my_shared_config + SharedConfigFile string + + // Sets the path to a custom Credentials Authority (CA) Bundle PEM file + // that the SDK will use instead of the system's root CA bundle. + // Only use this if you want to configure the SDK to use a custom set + // of CAs. + // + // Enabling this option will attempt to merge the Transport + // into the SDK's HTTP client. If the client's Transport is + // not a http.Transport an error will be returned. If the + // Transport's TLS config is set this option will cause the + // SDK to overwrite the Transport's TLS config's RootCAs value. + // + // Setting a custom HTTPClient in the aws.Config options will override this setting. + // To use this option and custom HTTP client, the HTTP client needs to be provided + // when creating the session. Not the service client. + // + // AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle + CustomCABundle string + + // Sets the TLC client certificate that should be used by the SDK's HTTP transport + // when making requests. The certificate must be paired with a TLS client key file. + // + // AWS_SDK_GO_CLIENT_TLS_CERT=$HOME/my_client_cert + ClientTLSCert string + + // Sets the TLC client key that should be used by the SDK's HTTP transport + // when making requests. The key must be paired with a TLS client certificate file. + // + // AWS_SDK_GO_CLIENT_TLS_KEY=$HOME/my_client_key + ClientTLSKey string + + // Enables endpoint discovery via environment variables. + // + // AWS_ENABLE_ENDPOINT_DISCOVERY=true + EnableEndpointDiscovery *bool + enableEndpointDiscovery string + + // Specifies the S3 Regional Endpoint flag for the SDK to resolve the + // endpoint for a service. + // + // AWS_S3_US_EAST_1_REGIONAL_ENDPOINT=regional + // This can take value as `regional` or `legacy` + S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint + + // Specifies if the S3 service should allow ARNs to direct the region + // the client's requests are sent to. + // + // AWS_S3_USE_ARN_REGION=true + S3UseARNRegion bool +} + +var ( + credAccessEnvKey = []string{ + "AWS_ACCESS_KEY_ID", + "AWS_ACCESS_KEY", + } + credSecretEnvKey = []string{ + "AWS_SECRET_ACCESS_KEY", + "AWS_SECRET_KEY", + } + credSessionEnvKey = []string{ + "AWS_SESSION_TOKEN", + } + + enableEndpointDiscoveryEnvKey = []string{ + "AWS_ENABLE_ENDPOINT_DISCOVERY", + } + + regionEnvKeys = []string{ + "AWS_REGION", + "AWS_DEFAULT_REGION", // Only read if AWS_SDK_LOAD_CONFIG is also set + } + profileEnvKeys = []string{ + "AWS_PROFILE", + "AWS_DEFAULT_PROFILE", // Only read if AWS_SDK_LOAD_CONFIG is also set + } + sharedCredsFileEnvKey = []string{ + "AWS_SHARED_CREDENTIALS_FILE", + } + sharedConfigFileEnvKey = []string{ + "AWS_CONFIG_FILE", + } + s3UsEast1RegionalEndpoint = []string{ + "AWS_S3_US_EAST_1_REGIONAL_ENDPOINT", + } + s3UseARNRegionEnvKey = []string{ + "AWS_S3_USE_ARN_REGION", + } + useCABundleKey = []string{ + "AWS_CA_BUNDLE", + } + useClientTLSCert = []string{ + "AWS_SDK_GO_CLIENT_TLS_CERT", + } + useClientTLSKey = []string{ + "AWS_SDK_GO_CLIENT_TLS_KEY", + } +) + +// loadEnvConfig retrieves the SDK's environment configuration. +// See `envConfig` for the values that will be retrieved. +// +// If the environment variable `AWS_SDK_LOAD_CONFIG` is set to a truthy value +// the shared SDK config will be loaded in addition to the SDK's specific +// configuration values. +func loadEnvConfig() (envConfig, error) { + enableSharedConfig, _ := strconv.ParseBool(os.Getenv("AWS_SDK_LOAD_CONFIG")) + return envConfigLoad(enableSharedConfig) +} + +// loadEnvSharedConfig retrieves the SDK's environment configuration, and the +// SDK shared config. See `envConfig` for the values that will be retrieved. +// +// Loads the shared configuration in addition to the SDK's specific configuration. +// This will load the same values as `loadEnvConfig` if the `AWS_SDK_LOAD_CONFIG` +// environment variable is set. +func loadSharedEnvConfig() (envConfig, error) { + return envConfigLoad(true) +} + +func envConfigLoad(enableSharedConfig bool) (envConfig, error) { + cfg := envConfig{} + + cfg.EnableSharedConfig = enableSharedConfig + + // Static environment credentials + var creds credentials.Value + setFromEnvVal(&creds.AccessKeyID, credAccessEnvKey) + setFromEnvVal(&creds.SecretAccessKey, credSecretEnvKey) + setFromEnvVal(&creds.SessionToken, credSessionEnvKey) + if creds.HasKeys() { + // Require logical grouping of credentials + creds.ProviderName = EnvProviderName + cfg.Creds = creds + } + + // Require logical grouping of credentials + if len(cfg.Creds.AccessKeyID) == 0 || len(cfg.Creds.SecretAccessKey) == 0 { + cfg.Creds = credentials.Value{} + } else { + cfg.Creds.ProviderName = EnvProviderName + } + + regionKeys := regionEnvKeys + profileKeys := profileEnvKeys + if !cfg.EnableSharedConfig { + regionKeys = regionKeys[:1] + profileKeys = profileKeys[:1] + } + + setFromEnvVal(&cfg.Region, regionKeys) + setFromEnvVal(&cfg.Profile, profileKeys) + + // endpoint discovery is in reference to it being enabled. + setFromEnvVal(&cfg.enableEndpointDiscovery, enableEndpointDiscoveryEnvKey) + if len(cfg.enableEndpointDiscovery) > 0 { + cfg.EnableEndpointDiscovery = aws.Bool(cfg.enableEndpointDiscovery != "false") + } + + setFromEnvVal(&cfg.SharedCredentialsFile, sharedCredsFileEnvKey) + setFromEnvVal(&cfg.SharedConfigFile, sharedConfigFileEnvKey) + + if len(cfg.SharedCredentialsFile) == 0 { + cfg.SharedCredentialsFile = defaults.SharedCredentialsFilename() + } + if len(cfg.SharedConfigFile) == 0 { + cfg.SharedConfigFile = defaults.SharedConfigFilename() + } + + setFromEnvVal(&cfg.CustomCABundle, useCABundleKey) + setFromEnvVal(&cfg.ClientTLSCert, useClientTLSCert) + setFromEnvVal(&cfg.ClientTLSKey, useClientTLSKey) + + var err error + // S3 Regional Endpoint variable + for _, k := range s3UsEast1RegionalEndpoint { + if v := os.Getenv(k); len(v) != 0 { + cfg.S3UsEast1RegionalEndpoint, err = endpoints.GetS3UsEast1RegionalEndpoint(v) + if err != nil { + return cfg, fmt.Errorf("failed to load, %v from env config, %v", k, err) + } + } + } + + var s3UseARNRegion string + setFromEnvVal(&s3UseARNRegion, s3UseARNRegionEnvKey) + if len(s3UseARNRegion) != 0 { + switch { + case strings.EqualFold(s3UseARNRegion, "false"): + cfg.S3UseARNRegion = false + case strings.EqualFold(s3UseARNRegion, "true"): + cfg.S3UseARNRegion = true + default: + return envConfig{}, fmt.Errorf( + "invalid value for environment variable, %s=%s, need true or false", + s3UseARNRegionEnvKey[0], s3UseARNRegion) + } + } + + return cfg, nil +} + +func setFromEnvVal(dst *string, keys []string) { + for _, k := range keys { + if v := os.Getenv(k); len(v) != 0 { + *dst = v + break + } + } +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/session/session.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/session/session.go new file mode 100644 index 00000000000..4bb9821a713 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/session/session.go @@ -0,0 +1,718 @@ +package session + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "time" + + "github.com/IBM/ibm-cos-sdk-go/aws" + "github.com/IBM/ibm-cos-sdk-go/aws/awserr" + "github.com/IBM/ibm-cos-sdk-go/aws/client" + "github.com/IBM/ibm-cos-sdk-go/aws/corehandlers" + "github.com/IBM/ibm-cos-sdk-go/aws/credentials" + "github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam" + "github.com/IBM/ibm-cos-sdk-go/aws/defaults" + "github.com/IBM/ibm-cos-sdk-go/aws/endpoints" + "github.com/IBM/ibm-cos-sdk-go/aws/request" +) + +const ( + // ErrCodeSharedConfig represents an error that occurs in the shared + // configuration logic + ErrCodeSharedConfig = "SharedConfigErr" + + // ErrCodeLoadCustomCABundle error code for unable to load custom CA bundle. + ErrCodeLoadCustomCABundle = "LoadCustomCABundleError" + + // ErrCodeLoadClientTLSCert error code for unable to load client TLS + // certificate or key + ErrCodeLoadClientTLSCert = "LoadClientTLSCertError" +) + +// ErrSharedConfigSourceCollision will be returned if a section contains both +// source_profile and credential_source +var ErrSharedConfigSourceCollision = awserr.New(ErrCodeSharedConfig, "only one credential type may be specified per profile: source profile, credential source, credential process, web identity token, or sso", nil) + +// ErrSharedConfigECSContainerEnvVarEmpty will be returned if the environment +// variables are empty and Environment was set as the credential source +var ErrSharedConfigECSContainerEnvVarEmpty = awserr.New(ErrCodeSharedConfig, "EcsContainer was specified as the credential_source, but 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI' was not set", nil) + +// ErrSharedConfigInvalidCredSource will be returned if an invalid credential source was provided +var ErrSharedConfigInvalidCredSource = awserr.New(ErrCodeSharedConfig, "credential source values must be EcsContainer, Ec2InstanceMetadata, or Environment", nil) + +// A Session provides a central location to create service clients from and +// store configurations and request handlers for those services. +// +// Sessions are safe to create service clients concurrently, but it is not safe +// to mutate the Session concurrently. +// +// The Session satisfies the service client's client.ConfigProvider. +type Session struct { + Config *aws.Config + Handlers request.Handlers + + options Options +} + +// New creates a new instance of the handlers merging in the provided configs +// on top of the SDK's default configurations. Once the Session is created it +// can be mutated to modify the Config or Handlers. The Session is safe to be +// read concurrently, but it should not be written to concurrently. +// +// If the AWS_SDK_LOAD_CONFIG environment is set to a truthy value, the New +// method could now encounter an error when loading the configuration. When +// The environment variable is set, and an error occurs, New will return a +// session that will fail all requests reporting the error that occurred while +// loading the session. Use NewSession to get the error when creating the +// session. +// +// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value +// the shared config file (~/.aws/config) will also be loaded, in addition to +// the shared credentials file (~/.aws/credentials). Values set in both the +// shared config, and shared credentials will be taken from the shared +// credentials file. +// +// Deprecated: Use NewSession functions to create sessions instead. NewSession +// has the same functionality as New except an error can be returned when the +// func is called instead of waiting to receive an error until a request is made. +// IBM removed + +// NewSession returns a new Session created from SDK defaults, config files, +// environment, and user provided config files. Once the Session is created +// it can be mutated to modify the Config or Handlers. The Session is safe to +// be read concurrently, but it should not be written to concurrently. +// +// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value +// the shared config file (~/.aws/config) will also be loaded in addition to +// the shared credentials file (~/.aws/credentials). Values set in both the +// shared config, and shared credentials will be taken from the shared +// credentials file. Enabling the Shared Config will also allow the Session +// to be built with retrieving credentials with AssumeRole set in the config. +// +// See the NewSessionWithOptions func for information on how to override or +// control through code how the Session will be created, such as specifying the +// config profile, and controlling if shared config is enabled or not. +func NewSession(cfgs ...*aws.Config) (*Session, error) { + opts := Options{} + opts.Config.MergeIn(cfgs...) + + return NewSessionWithOptions(opts) +} + +// SharedConfigState provides the ability to optionally override the state +// of the session's creation based on the shared config being enabled or +// disabled. +type SharedConfigState int + +const ( + // SharedConfigStateFromEnv does not override any state of the + // AWS_SDK_LOAD_CONFIG env var. It is the default value of the + // SharedConfigState type. + SharedConfigStateFromEnv SharedConfigState = iota + + // SharedConfigDisable overrides the AWS_SDK_LOAD_CONFIG env var value + // and disables the shared config functionality. + SharedConfigDisable + + // SharedConfigEnable overrides the AWS_SDK_LOAD_CONFIG env var value + // and enables the shared config functionality. + SharedConfigEnable +) + +// Options provides the means to control how a Session is created and what +// configuration values will be loaded. +// +type Options struct { + // Provides config values for the SDK to use when creating service clients + // and making API requests to services. Any value set in with this field + // will override the associated value provided by the SDK defaults, + // environment or config files where relevant. + // + // If not set, configuration values from from SDK defaults, environment, + // config will be used. + Config aws.Config + + // Overrides the config profile the Session should be created from. If not + // set the value of the environment variable will be loaded (AWS_PROFILE, + // or AWS_DEFAULT_PROFILE if the Shared Config is enabled). + // + // If not set and environment variables are not set the "default" + // (DefaultSharedConfigProfile) will be used as the profile to load the + // session config from. + Profile string + + // Instructs how the Session will be created based on the AWS_SDK_LOAD_CONFIG + // environment variable. By default a Session will be created using the + // value provided by the AWS_SDK_LOAD_CONFIG environment variable. + // + // Setting this value to SharedConfigEnable or SharedConfigDisable + // will allow you to override the AWS_SDK_LOAD_CONFIG environment variable + // and enable or disable the shared config functionality. + SharedConfigState SharedConfigState + + // Ordered list of files the session will load configuration from. + // It will override environment variable AWS_SHARED_CREDENTIALS_FILE, AWS_CONFIG_FILE. + SharedConfigFiles []string + + // When the SDK's shared config is configured to assume a role with MFA + // this option is required in order to provide the mechanism that will + // retrieve the MFA token. There is no default value for this field. If + // it is not set an error will be returned when creating the session. + // + // This token provider will be called when ever the assumed role's + // credentials need to be refreshed. Within the context of service clients + // all sharing the same session the SDK will ensure calls to the token + // provider are atomic. When sharing a token provider across multiple + // sessions additional synchronization logic is needed to ensure the + // token providers do not introduce race conditions. It is recommend to + // share the session where possible. + // + // stscreds.StdinTokenProvider is a basic implementation that will prompt + // from stdin for the MFA token code. + // + // This field is only used if the shared configuration is enabled, and + // the config enables assume role wit MFA via the mfa_serial field. + AssumeRoleTokenProvider func() (string, error) + + // When the SDK's shared config is configured to assume a role this option + // may be provided to set the expiry duration of the STS credentials. + // Defaults to 15 minutes if not set as documented in the + // stscreds.AssumeRoleProvider. + AssumeRoleDuration time.Duration + + // Reader for a custom Credentials Authority (CA) bundle in PEM format that + // the SDK will use instead of the default system's root CA bundle. Use this + // only if you want to replace the CA bundle the SDK uses for TLS requests. + // + // HTTP Client's Transport concrete implementation must be a http.Transport + // or creating the session will fail. + // + // If the Transport's TLS config is set this option will cause the SDK + // to overwrite the Transport's TLS config's RootCAs value. If the CA + // bundle reader contains multiple certificates all of them will be loaded. + // + // Can also be specified via the environment variable: + // + // AWS_CA_BUNDLE=$HOME/ca_bundle + // + // Can also be specified via the shared config field: + // + // ca_bundle = $HOME/ca_bundle + CustomCABundle io.Reader + + // Reader for the TLC client certificate that should be used by the SDK's + // HTTP transport when making requests. The certificate must be paired with + // a TLS client key file. Will be ignored if both are not provided. + // + // HTTP Client's Transport concrete implementation must be a http.Transport + // or creating the session will fail. + // + // Can also be specified via the environment variable: + // + // AWS_SDK_GO_CLIENT_TLS_CERT=$HOME/my_client_cert + ClientTLSCert io.Reader + + // Reader for the TLC client key that should be used by the SDK's HTTP + // transport when making requests. The key must be paired with a TLS client + // certificate file. Will be ignored if both are not provided. + // + // HTTP Client's Transport concrete implementation must be a http.Transport + // or creating the session will fail. + // + // Can also be specified via the environment variable: + // + // AWS_SDK_GO_CLIENT_TLS_KEY=$HOME/my_client_key + ClientTLSKey io.Reader + + // The handlers that the session and all API clients will be created with. + // This must be a complete set of handlers. Use the defaults.Handlers() + // function to initialize this value before changing the handlers to be + // used by the SDK. + Handlers request.Handlers +} + +// NewSessionWithOptions returns a new Session created from SDK defaults, config files, +// environment, and user provided config files. This func uses the Options +// values to configure how the Session is created. +// +// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value +// the shared config file (~/.aws/config) will also be loaded in addition to +// the shared credentials file (~/.aws/credentials). Values set in both the +// shared config, and shared credentials will be taken from the shared +// credentials file. Enabling the Shared Config will also allow the Session +// to be built with retrieving credentials with AssumeRole set in the config. +// +// // Equivalent to session.New +// sess := session.Must(session.NewSessionWithOptions(session.Options{})) +// +// // Specify profile to load for the session's config +// sess := session.Must(session.NewSessionWithOptions(session.Options{ +// Profile: "profile_name", +// })) +// +// // Specify profile for config and region for requests +// sess := session.Must(session.NewSessionWithOptions(session.Options{ +// Config: aws.Config{Region: aws.String("us-east-1")}, +// Profile: "profile_name", +// })) +// +// // Force enable Shared Config support +// sess := session.Must(session.NewSessionWithOptions(session.Options{ +// SharedConfigState: session.SharedConfigEnable, +// })) +func NewSessionWithOptions(opts Options) (*Session, error) { + var envCfg envConfig + var err error + if opts.SharedConfigState == SharedConfigEnable { + envCfg, err = loadSharedEnvConfig() + if err != nil { + return nil, fmt.Errorf("failed to load shared config, %v", err) + } + } else { + envCfg, err = loadEnvConfig() + if err != nil { + return nil, fmt.Errorf("failed to load environment config, %v", err) + } + } + + if len(opts.Profile) != 0 { + envCfg.Profile = opts.Profile + } + + switch opts.SharedConfigState { + case SharedConfigDisable: + envCfg.EnableSharedConfig = false + case SharedConfigEnable: + envCfg.EnableSharedConfig = true + } + + return newSession(opts, envCfg, &opts.Config) +} + +// Must is a helper function to ensure the Session is valid and there was no +// error when calling a NewSession function. +// +// This helper is intended to be used in variable initialization to load the +// Session and configuration at startup. Such as: +// +// var sess = session.Must(session.NewSession()) +func Must(sess *Session, err error) *Session { + if err != nil { + panic(err) + } + + return sess +} + +func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, error) { + cfg := defaults.Config() + + handlers := opts.Handlers + if handlers.IsEmpty() { + handlers = defaults.Handlers() + } + + // Get a merged version of the user provided config to determine if + // credentials were. + userCfg := &aws.Config{} + userCfg.MergeIn(cfgs...) + cfg.MergeIn(userCfg) + + // Ordered config files will be loaded in with later files overwriting + // previous config file values. + var cfgFiles []string + if opts.SharedConfigFiles != nil { + cfgFiles = opts.SharedConfigFiles + } else { + cfgFiles = []string{envCfg.SharedConfigFile, envCfg.SharedCredentialsFile} + if !envCfg.EnableSharedConfig { + // The shared config file (~/.aws/config) is only loaded if instructed + // to load via the envConfig.EnableSharedConfig (AWS_SDK_LOAD_CONFIG). + cfgFiles = cfgFiles[1:] + } + } + + // Load additional config from file(s) + sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles, envCfg.EnableSharedConfig) + if err != nil { + if len(envCfg.Profile) == 0 && !envCfg.EnableSharedConfig && (envCfg.Creds.HasKeys() || userCfg.Credentials != nil) { + // Special case where the user has not explicitly specified an AWS_PROFILE, + // or session.Options.profile, shared config is not enabled, and the + // environment has credentials, allow the shared config file to fail to + // load since the user has already provided credentials, and nothing else + // is required to be read file. Github(aws/aws-sdk-go#2455) + } else if _, ok := err.(SharedConfigProfileNotExistsError); !ok { + return nil, err + } + } + + if err := mergeConfigSrcs(cfg, userCfg, envCfg, sharedCfg, handlers, opts); err != nil { + return nil, err + } + + if err := setTLSOptions(&opts, cfg, envCfg, sharedCfg); err != nil { + return nil, err + } + + s := &Session{ + Config: cfg, + Handlers: handlers, + options: opts, + } + + initHandlers(s) + + return s, nil +} + +func setTLSOptions(opts *Options, cfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig) error { + // CA Bundle can be specified in both environment variable shared config file. + var caBundleFilename = envCfg.CustomCABundle + if len(caBundleFilename) == 0 { + caBundleFilename = sharedCfg.CustomCABundle + } + + // Only use environment value if session option is not provided. + customTLSOptions := map[string]struct { + filename string + field *io.Reader + errCode string + }{ + "custom CA bundle PEM": {filename: caBundleFilename, field: &opts.CustomCABundle, errCode: ErrCodeLoadCustomCABundle}, + "custom client TLS cert": {filename: envCfg.ClientTLSCert, field: &opts.ClientTLSCert, errCode: ErrCodeLoadClientTLSCert}, + "custom client TLS key": {filename: envCfg.ClientTLSKey, field: &opts.ClientTLSKey, errCode: ErrCodeLoadClientTLSCert}, + } + for name, v := range customTLSOptions { + if len(v.filename) != 0 && *v.field == nil { + f, err := os.Open(v.filename) + if err != nil { + return awserr.New(v.errCode, fmt.Sprintf("failed to open %s file", name), err) + } + defer f.Close() + *v.field = f + } + } + + // Setup HTTP client with custom cert bundle if enabled + if opts.CustomCABundle != nil { + if err := loadCustomCABundle(cfg.HTTPClient, opts.CustomCABundle); err != nil { + return err + } + } + + // Setup HTTP client TLS certificate and key for client TLS authentication. + if opts.ClientTLSCert != nil && opts.ClientTLSKey != nil { + if err := loadClientTLSCert(cfg.HTTPClient, opts.ClientTLSCert, opts.ClientTLSKey); err != nil { + return err + } + } else if opts.ClientTLSCert == nil && opts.ClientTLSKey == nil { + // Do nothing if neither values are available. + + } else { + return awserr.New(ErrCodeLoadClientTLSCert, + fmt.Sprintf("client TLS cert(%t) and key(%t) must both be provided", + opts.ClientTLSCert != nil, opts.ClientTLSKey != nil), nil) + } + + return nil +} + +func getHTTPTransport(client *http.Client) (*http.Transport, error) { + var t *http.Transport + switch v := client.Transport.(type) { + case *http.Transport: + t = v + default: + if client.Transport != nil { + return nil, fmt.Errorf("unsupported transport, %T", client.Transport) + } + } + if t == nil { + // Nil transport implies `http.DefaultTransport` should be used. Since + // the SDK cannot modify, nor copy the `DefaultTransport` specifying + // the values the next closest behavior. + t = getCustomTransport() + } + + return t, nil +} + +func loadCustomCABundle(client *http.Client, bundle io.Reader) error { + t, err := getHTTPTransport(client) + if err != nil { + return awserr.New(ErrCodeLoadCustomCABundle, + "unable to load custom CA bundle, HTTPClient's transport unsupported type", err) + } + + p, err := loadCertPool(bundle) + if err != nil { + return err + } + if t.TLSClientConfig == nil { + t.TLSClientConfig = &tls.Config{} + } + t.TLSClientConfig.RootCAs = p + + client.Transport = t + + return nil +} + +func loadCertPool(r io.Reader) (*x509.CertPool, error) { + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, awserr.New(ErrCodeLoadCustomCABundle, + "failed to read custom CA bundle PEM file", err) + } + + p := x509.NewCertPool() + if !p.AppendCertsFromPEM(b) { + return nil, awserr.New(ErrCodeLoadCustomCABundle, + "failed to load custom CA bundle PEM file", err) + } + + return p, nil +} + +func loadClientTLSCert(client *http.Client, certFile, keyFile io.Reader) error { + t, err := getHTTPTransport(client) + if err != nil { + return awserr.New(ErrCodeLoadClientTLSCert, + "unable to get usable HTTP transport from client", err) + } + + cert, err := ioutil.ReadAll(certFile) + if err != nil { + return awserr.New(ErrCodeLoadClientTLSCert, + "unable to get read client TLS cert file", err) + } + + key, err := ioutil.ReadAll(keyFile) + if err != nil { + return awserr.New(ErrCodeLoadClientTLSCert, + "unable to get read client TLS key file", err) + } + + clientCert, err := tls.X509KeyPair(cert, key) + if err != nil { + return awserr.New(ErrCodeLoadClientTLSCert, + "unable to load x509 key pair from client cert", err) + } + + tlsCfg := t.TLSClientConfig + if tlsCfg == nil { + tlsCfg = &tls.Config{} + } + + tlsCfg.Certificates = append(tlsCfg.Certificates, clientCert) + + t.TLSClientConfig = tlsCfg + client.Transport = t + + return nil +} + +func mergeConfigSrcs(cfg, userCfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) error { + // Merge in user provided configuration + cfg.MergeIn(userCfg) + + // Region if not already set by user + if len(aws.StringValue(cfg.Region)) == 0 { + if len(envCfg.Region) > 0 { + cfg.WithRegion(envCfg.Region) + } else if envCfg.EnableSharedConfig && len(sharedCfg.Region) > 0 { + cfg.WithRegion(sharedCfg.Region) + } + } + + if cfg.EnableEndpointDiscovery == nil { + if envCfg.EnableEndpointDiscovery != nil { + cfg.WithEndpointDiscovery(*envCfg.EnableEndpointDiscovery) + } else if envCfg.EnableSharedConfig && sharedCfg.EnableEndpointDiscovery != nil { + cfg.WithEndpointDiscovery(*sharedCfg.EnableEndpointDiscovery) + } + } + + // Regional Endpoint flag for S3 endpoint resolving + mergeS3UsEast1RegionalEndpointConfig(cfg, []endpoints.S3UsEast1RegionalEndpoint{ + userCfg.S3UsEast1RegionalEndpoint, + envCfg.S3UsEast1RegionalEndpoint, + sharedCfg.S3UsEast1RegionalEndpoint, + endpoints.LegacyS3UsEast1Endpoint, + }) + + // Configure credentials if not already set by the user when creating the + // Session. + if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil { + // IBM COS SDK Code -- START + if iBmIamCreds := getIBMIAMCredentials(userCfg); iBmIamCreds != nil { + cfg.Credentials = iBmIamCreds + } else { + creds, err := resolveCredentials(cfg, envCfg, sharedCfg, handlers, sessOpts) + if err != nil { + return err + } + cfg.Credentials = creds + } + // IBM COS SDK Code -- END + } + + cfg.S3UseARNRegion = userCfg.S3UseARNRegion + if cfg.S3UseARNRegion == nil { + cfg.S3UseARNRegion = &envCfg.S3UseARNRegion + } + if cfg.S3UseARNRegion == nil { + cfg.S3UseARNRegion = &sharedCfg.S3UseARNRegion + } + + return nil +} + +func mergeS3UsEast1RegionalEndpointConfig(cfg *aws.Config, values []endpoints.S3UsEast1RegionalEndpoint) { + for _, v := range values { + if v != endpoints.UnsetS3UsEast1Endpoint { + cfg.S3UsEast1RegionalEndpoint = v + break + } + } +} + +// IBM COS SDK Code -- START +// getIBMIAMCredentials retrieve token manager creds or ibm based credentials +func getIBMIAMCredentials(config *aws.Config) *credentials.Credentials { + + if provider := ibmiam.NewEnvProvider(config); provider.IsValid() { + return credentials.NewCredentials(provider) + } + + if provider := ibmiam.NewSharedCredentialsProvider(config, "", ""); provider.IsValid() { + return credentials.NewCredentials(provider) + } + + if provider := ibmiam.NewSharedConfigProvider(config, "", ""); provider.IsValid() { + return credentials.NewCredentials(provider) + } + + return nil +} + +// IBM COS SDK Code -- END + +func initHandlers(s *Session) { + // Add the Validate parameter handler if it is not disabled. + s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler) + if !aws.BoolValue(s.Config.DisableParamValidation) { + s.Handlers.Validate.PushBackNamed(corehandlers.ValidateParametersHandler) + } +} + +// Copy creates and returns a copy of the current Session, copying the config +// and handlers. If any additional configs are provided they will be merged +// on top of the Session's copied config. +// +// // Create a copy of the current Session, configured for the us-west-2 region. +// sess.Copy(&aws.Config{Region: aws.String("us-west-2")}) +func (s *Session) Copy(cfgs ...*aws.Config) *Session { + newSession := &Session{ + Config: s.Config.Copy(cfgs...), + Handlers: s.Handlers.Copy(), + options: s.options, + } + + initHandlers(newSession) + + return newSession +} + +// ClientConfig satisfies the client.ConfigProvider interface and is used to +// configure the service client instances. Passing the Session to the service +// client's constructor (New) will use this method to configure the client. +func (s *Session) ClientConfig(service string, cfgs ...*aws.Config) client.Config { + s = s.Copy(cfgs...) + + region := aws.StringValue(s.Config.Region) + resolved, err := s.resolveEndpoint(service, region, s.Config) + if err != nil { + s.Handlers.Validate.PushBack(func(r *request.Request) { + if len(r.ClientInfo.Endpoint) != 0 { + // Error occurred while resolving endpoint, but the request + // being invoked has had an endpoint specified after the client + // was created. + return + } + r.Error = err + }) + } + + return client.Config{ + Config: s.Config, + Handlers: s.Handlers, + PartitionID: resolved.PartitionID, + Endpoint: resolved.URL, + SigningRegion: resolved.SigningRegion, + SigningNameDerived: resolved.SigningNameDerived, + SigningName: resolved.SigningName, + } +} + +func (s *Session) resolveEndpoint(service, region string, cfg *aws.Config) (endpoints.ResolvedEndpoint, error) { + + if ep := aws.StringValue(cfg.Endpoint); len(ep) != 0 { + return endpoints.ResolvedEndpoint{ + URL: endpoints.AddScheme(ep, aws.BoolValue(cfg.DisableSSL)), + SigningRegion: region, + }, nil + } + + resolved, err := cfg.EndpointResolver.EndpointFor(service, region, + func(opt *endpoints.Options) { + opt.DisableSSL = aws.BoolValue(cfg.DisableSSL) + opt.UseDualStack = aws.BoolValue(cfg.UseDualStack) + + // Support for S3UsEast1RegionalEndpoint where the S3UsEast1RegionalEndpoint is + // provided in envConfig or sharedConfig with envConfig getting + // precedence. + opt.S3UsEast1RegionalEndpoint = cfg.S3UsEast1RegionalEndpoint + + // Support the condition where the service is modeled but its + // endpoint metadata is not available. + opt.ResolveUnknownService = true + }, + ) + if err != nil { + return endpoints.ResolvedEndpoint{}, err + } + + return resolved, nil +} + +// ClientConfigNoResolveEndpoint is the same as ClientConfig with the exception +// that the EndpointResolver will not be used to resolve the endpoint. The only +// endpoint set must come from the aws.Config.Endpoint field. +func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Config { + s = s.Copy(cfgs...) + + var resolved endpoints.ResolvedEndpoint + if ep := aws.StringValue(s.Config.Endpoint); len(ep) > 0 { + resolved.URL = endpoints.AddScheme(ep, aws.BoolValue(s.Config.DisableSSL)) + resolved.SigningRegion = aws.StringValue(s.Config.Region) + } + + return client.Config{ + Config: s.Config, + Handlers: s.Handlers, + Endpoint: resolved.URL, + SigningRegion: resolved.SigningRegion, + SigningNameDerived: resolved.SigningNameDerived, + SigningName: resolved.SigningName, + } +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/session/shared_config.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/session/shared_config.go new file mode 100644 index 00000000000..cab2a803c48 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/session/shared_config.go @@ -0,0 +1,443 @@ +package session + +import ( + "fmt" + + "github.com/IBM/ibm-cos-sdk-go/aws/awserr" + "github.com/IBM/ibm-cos-sdk-go/aws/credentials" + "github.com/IBM/ibm-cos-sdk-go/aws/endpoints" + "github.com/IBM/ibm-cos-sdk-go/internal/ini" +) + +const ( + // Static Credentials group + accessKeyIDKey = `aws_access_key_id` // group required + secretAccessKey = `aws_secret_access_key` // group required + sessionTokenKey = `aws_session_token` // optional + + // Assume Role Credentials group + roleArnKey = `role_arn` // group required + sourceProfileKey = `source_profile` // group required (or credential_source) + credentialSourceKey = `credential_source` // group required (or source_profile) + externalIDKey = `external_id` // optional + mfaSerialKey = `mfa_serial` // optional + roleSessionNameKey = `role_session_name` // optional + roleDurationSecondsKey = "duration_seconds" // optional + + // Additional Config fields + regionKey = `region` + + // custom CA Bundle filename + customCABundleKey = `ca_bundle` + + // endpoint discovery group + enableEndpointDiscoveryKey = `endpoint_discovery_enabled` // optional + + // External Credential Process + credentialProcessKey = `credential_process` // optional + + // Additional config fields for regional or legacy endpoints + stsRegionalEndpointSharedKey = `sts_regional_endpoints` + + // Additional config fields for regional or legacy endpoints + s3UsEast1RegionalSharedKey = `s3_us_east_1_regional_endpoint` + + // DefaultSharedConfigProfile is the default profile to be used when + // loading configuration from the config files if another profile name + // is not provided. + DefaultSharedConfigProfile = `default` + + // S3 ARN Region Usage + s3UseARNRegionKey = "s3_use_arn_region" +) + +// sharedConfig represents the configuration fields of the SDK config files. +type sharedConfig struct { + Profile string + + // Credentials values from the config file. Both aws_access_key_id and + // aws_secret_access_key must be provided together in the same file to be + // considered valid. The values will be ignored if not a complete group. + // aws_session_token is an optional field that can be provided if both of + // the other two fields are also provided. + // + // aws_access_key_id + // aws_secret_access_key + // aws_session_token + Creds credentials.Value + + CredentialSource string + CredentialProcess string + + RoleARN string + RoleSessionName string + ExternalID string + MFASerial string + + SourceProfileName string + SourceProfile *sharedConfig + + // Region is the region the SDK should use for looking up AWS service + // endpoints and signing requests. + // + // region + Region string + + // CustomCABundle is the file path to a PEM file the SDK will read and + // use to configure the HTTP transport with additional CA certs that are + // not present in the platforms default CA store. + // + // This value will be ignored if the file does not exist. + // + // ca_bundle + CustomCABundle string + + // EnableEndpointDiscovery can be enabled in the shared config by setting + // endpoint_discovery_enabled to true + // + // endpoint_discovery_enabled = true + EnableEndpointDiscovery *bool + + // Specifies the Regional Endpoint flag for the SDK to resolve the endpoint for a service + // + // s3_us_east_1_regional_endpoint = regional + // This can take value as `LegacyS3UsEast1Endpoint` or `RegionalS3UsEast1Endpoint` + S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint + + // Specifies if the S3 service should allow ARNs to direct the region + // the client's requests are sent to. + // + // s3_use_arn_region=true + S3UseARNRegion bool +} + +type sharedConfigFile struct { + Filename string + IniData ini.Sections +} + +// loadSharedConfig retrieves the configuration from the list of files using +// the profile provided. The order the files are listed will determine +// precedence. Values in subsequent files will overwrite values defined in +// earlier files. +// +// For example, given two files A and B. Both define credentials. If the order +// of the files are A then B, B's credential values will be used instead of +// A's. +// +// See sharedConfig.setFromFile for information how the config files +// will be loaded. +func loadSharedConfig(profile string, filenames []string, exOpts bool) (sharedConfig, error) { + if len(profile) == 0 { + profile = DefaultSharedConfigProfile + } + + files, err := loadSharedConfigIniFiles(filenames) + if err != nil { + return sharedConfig{}, err + } + + cfg := sharedConfig{} + profiles := map[string]struct{}{} + if err = cfg.setFromIniFiles(profiles, profile, files, exOpts); err != nil { + return sharedConfig{}, err + } + + return cfg, nil +} + +func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) { + files := make([]sharedConfigFile, 0, len(filenames)) + + for _, filename := range filenames { + sections, err := ini.OpenFile(filename) + if aerr, ok := err.(awserr.Error); ok && aerr.Code() == ini.ErrCodeUnableToReadFile { + // Skip files which can't be opened and read for whatever reason + continue + } else if err != nil { + return nil, SharedConfigLoadError{Filename: filename, Err: err} + } + + files = append(files, sharedConfigFile{ + Filename: filename, IniData: sections, + }) + } + + return files, nil +} + +func (cfg *sharedConfig) setFromIniFiles(profiles map[string]struct{}, profile string, files []sharedConfigFile, exOpts bool) error { + cfg.Profile = profile + + // Trim files from the list that don't exist. + var skippedFiles int + var profileNotFoundErr error + for _, f := range files { + if err := cfg.setFromIniFile(profile, f, exOpts); err != nil { + if _, ok := err.(SharedConfigProfileNotExistsError); ok { + // Ignore profiles not defined in individual files. + profileNotFoundErr = err + skippedFiles++ + continue + } + return err + } + } + if skippedFiles == len(files) { + // If all files were skipped because the profile is not found, return + // the original profile not found error. + return profileNotFoundErr + } + + profiles[profile] = struct{}{} + + if err := cfg.validateCredentialType(); err != nil { + return err + } + + // Link source profiles for assume roles + if len(cfg.SourceProfileName) != 0 { + // Linked profile via source_profile ignore credential provider + // options, the source profile must provide the credentials. + cfg.clearCredentialOptions() + + srcCfg := &sharedConfig{} + err := srcCfg.setFromIniFiles(profiles, cfg.SourceProfileName, files, exOpts) + if err != nil { + return err + } + + cfg.SourceProfile = srcCfg + } + + return nil +} + +// setFromFile loads the configuration from the file using the profile +// provided. A sharedConfig pointer type value is used so that multiple config +// file loadings can be chained. +// +// Only loads complete logically grouped values, and will not set fields in cfg +// for incomplete grouped values in the config. Such as credentials. For +// example if a config file only includes aws_access_key_id but no +// aws_secret_access_key the aws_access_key_id will be ignored. +func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, exOpts bool) error { + section, ok := file.IniData.GetSection(profile) + if !ok { + // Fallback to to alternate profile name: profile + section, ok = file.IniData.GetSection(fmt.Sprintf("profile %s", profile)) + if !ok { + return SharedConfigProfileNotExistsError{Profile: profile, Err: nil} + } + } + + if exOpts { + // Assume Role Parameters + updateString(&cfg.RoleARN, section, roleArnKey) + updateString(&cfg.ExternalID, section, externalIDKey) + updateString(&cfg.MFASerial, section, mfaSerialKey) + updateString(&cfg.RoleSessionName, section, roleSessionNameKey) + updateString(&cfg.SourceProfileName, section, sourceProfileKey) + updateString(&cfg.CredentialSource, section, credentialSourceKey) + updateString(&cfg.Region, section, regionKey) + updateString(&cfg.CustomCABundle, section, customCABundleKey) + + if v := section.String(s3UsEast1RegionalSharedKey); len(v) != 0 { + sre, err := endpoints.GetS3UsEast1RegionalEndpoint(v) + if err != nil { + return fmt.Errorf("failed to load %s from shared config, %s, %v", + s3UsEast1RegionalSharedKey, file.Filename, err) + } + cfg.S3UsEast1RegionalEndpoint = sre + } + } + + updateString(&cfg.CredentialProcess, section, credentialProcessKey) + + // Shared Credentials + creds := credentials.Value{ + AccessKeyID: section.String(accessKeyIDKey), + SecretAccessKey: section.String(secretAccessKey), + SessionToken: section.String(sessionTokenKey), + ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", file.Filename), + } + if creds.HasKeys() { + cfg.Creds = creds + } + + // `credential_process` + if credProc := section.String(credentialProcessKey); len(credProc) > 0 { + cfg.CredentialProcess = credProc + } + + // Region + if v := section.String(regionKey); len(v) > 0 { + cfg.Region = v + } + + // Endpoint discovery + updateBoolPtr(&cfg.EnableEndpointDiscovery, section, enableEndpointDiscoveryKey) + + updateBool(&cfg.S3UseARNRegion, section, s3UseARNRegionKey) + + return nil +} + +func (cfg *sharedConfig) validateCredentialType() error { + // Only one or no credential type can be defined. + if !oneOrNone( + len(cfg.SourceProfileName) != 0, + len(cfg.CredentialSource) != 0, + len(cfg.CredentialProcess) != 0, + ) { + return ErrSharedConfigSourceCollision + } + + return nil +} + +func (cfg *sharedConfig) hasCredentials() bool { + switch { + case len(cfg.SourceProfileName) != 0: + case len(cfg.CredentialSource) != 0: + case len(cfg.CredentialProcess) != 0: + case cfg.Creds.HasKeys(): + default: + return false + } + + return true +} + +func (cfg *sharedConfig) clearCredentialOptions() { + cfg.CredentialSource = "" + cfg.CredentialProcess = "" + cfg.Creds = credentials.Value{} +} + +func oneOrNone(bs ...bool) bool { + var count int + + for _, b := range bs { + if b { + count++ + if count > 1 { + return false + } + } + } + + return true +} + +// updateString will only update the dst with the value in the section key, key +// is present in the section. +func updateString(dst *string, section ini.Section, key string) { + if !section.Has(key) { + return + } + *dst = section.String(key) +} + +// updateBool will only update the dst with the value in the section key, key +// is present in the section. +func updateBool(dst *bool, section ini.Section, key string) { + if !section.Has(key) { + return + } + *dst = section.Bool(key) +} + +// updateBoolPtr will only update the dst with the value in the section key, +// key is present in the section. +func updateBoolPtr(dst **bool, section ini.Section, key string) { + if !section.Has(key) { + return + } + *dst = new(bool) + **dst = section.Bool(key) +} + +// SharedConfigLoadError is an error for the shared config file failed to load. +type SharedConfigLoadError struct { + Filename string + Err error +} + +// Code is the short id of the error. +func (e SharedConfigLoadError) Code() string { + return "SharedConfigLoadError" +} + +// Message is the description of the error +func (e SharedConfigLoadError) Message() string { + return fmt.Sprintf("failed to load config file, %s", e.Filename) +} + +// OrigErr is the underlying error that caused the failure. +func (e SharedConfigLoadError) OrigErr() error { + return e.Err +} + +// Error satisfies the error interface. +func (e SharedConfigLoadError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", e.Err) +} + +// SharedConfigProfileNotExistsError is an error for the shared config when +// the profile was not find in the config file. +type SharedConfigProfileNotExistsError struct { + Profile string + Err error +} + +// Code is the short id of the error. +func (e SharedConfigProfileNotExistsError) Code() string { + return "SharedConfigProfileNotExistsError" +} + +// Message is the description of the error +func (e SharedConfigProfileNotExistsError) Message() string { + return fmt.Sprintf("failed to get profile, %s", e.Profile) +} + +// OrigErr is the underlying error that caused the failure. +func (e SharedConfigProfileNotExistsError) OrigErr() error { + return e.Err +} + +// Error satisfies the error interface. +func (e SharedConfigProfileNotExistsError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", e.Err) +} + +// SharedConfigAssumeRoleError is an error for the shared config when the +// profile contains assume role information, but that information is invalid +// or not complete. +type SharedConfigAssumeRoleError struct { + RoleARN string + SourceProfile string +} + +// Code is the short id of the error. +func (e SharedConfigAssumeRoleError) Code() string { + return "SharedConfigAssumeRoleError" +} + +// Message is the description of the error +func (e SharedConfigAssumeRoleError) Message() string { + return fmt.Sprintf( + "failed to load assume role for %s, source profile %s has no shared credentials", + e.RoleARN, e.SourceProfile, + ) +} + +// OrigErr is the underlying error that caused the failure. +func (e SharedConfigAssumeRoleError) OrigErr() error { + return nil +} + +// Error satisfies the error interface. +func (e SharedConfigAssumeRoleError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", nil) +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/signer/ibmiam/common.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/signer/ibmiam/common.go new file mode 100644 index 00000000000..37ab64d04e4 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/signer/ibmiam/common.go @@ -0,0 +1,17 @@ +package ibmiam + +// IBM COS SDK Code -- START +const ( + // LOGGER constants + debugLog = "" + signRequestHandlerLog = "ibmiam.SignRequestHandler" + + // Error constants + errorExpectedNotFound = "Error Expected Not Found" + errorNotMatch = "Error not match" + + // Global constant + operation = "Operation" +) + +// IBM COS SDK Code -- END diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/signer/ibmiam/ibmiam.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/signer/ibmiam/ibmiam.go new file mode 100644 index 00000000000..1b6e78b774c --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/signer/ibmiam/ibmiam.go @@ -0,0 +1,95 @@ +package ibmiam + +// IBM COS SDK Code -- START +import ( + "github.com/IBM/ibm-cos-sdk-go/aws" + "github.com/IBM/ibm-cos-sdk-go/aws/awserr" + "github.com/IBM/ibm-cos-sdk-go/aws/request" +) + +// SignRequestHandler is a named request handler the SDK will use to sign +// service client request with using the IBM IAM signature. +var SignRequestHandler = request.NamedHandler{ + Name: signRequestHandlerLog, Fn: Sign, +} + +var ( + // Errors for Sign Request Handler + errTokenTypeNotSet = awserr.New(signRequestHandlerLog, "Token Type Not Set", nil) + errAccessTokenNotSet = awserr.New(signRequestHandlerLog, "Access Token Not Set", nil) + errServiceInstanceIDNotSet = awserr.New(signRequestHandlerLog, "Service Instance Id Not Set", nil) +) + +// Sign signs IBM IAM requests with the token type, access token and service +// instance id request is made to, and time the request is signed at. +// +// Returns a list of HTTP headers that were included in the signature or an +// error if signing the request failed. Generally for signed requests this value +// is not needed as the full request context will be captured by the http.Request +// value. It is included for reference though. +func Sign(req *request.Request) { + + // Sets the logger for the Request to be signed + logger := req.Config.Logger + if !req.Config.LogLevel.Matches(aws.LogDebug) { + logger = nil + } + + // Obtains the IBM IAM Credentials Object + // The objects includes: + // IBM IAM Token + // IBM IAM Service Instance ID + value, err := req.Config.Credentials.Get() + if err != nil { + if logger != nil { + logger.Log(debugLog, signRequestHandlerLog, "CREDENTIAL GET ERROR", err) + } + req.Error = err + req.SignedHeaderVals = nil + return + } + + // Check the type of the Token + // If does not exist, return with an error in the request + if value.TokenType == "" { + err = errTokenTypeNotSet + if logger != nil { + logger.Log(debugLog, err) + } + req.Error = err + req.SignedHeaderVals = nil + return + } + + // Checks the Access Token + // If does not exist, return with an error in the request + if value.AccessToken == "" { + err = errAccessTokenNotSet + if logger != nil { + logger.Log(debugLog, err) + } + req.Error = err + req.SignedHeaderVals = nil + return + } + + // Get the Service Instance ID from the IBM IAM Credentials object + serviceInstanceID := req.HTTPRequest.Header.Get("ibm-service-instance-id") + if serviceInstanceID == "" && value.ServiceInstanceID != "" { + // Log the Service Instance ID + if logger != nil { + logger.Log(debugLog, "Setting the 'ibm-service-instance-id' from the Credentials") + + } + req.HTTPRequest.Header.Set("ibm-service-instance-id", value.ServiceInstanceID) + } + + // Use the IBM IAM Token Bearer as the Authorization Header + authString := value.TokenType + " " + value.AccessToken + req.HTTPRequest.Header.Set("Authorization", authString) + if logger != nil { + logger.Log(debugLog, signRequestHandlerLog, "Set Header Authorization", authString) + } +} + +// IBM COS SDK Code -- END diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/signer/signer_router.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/signer/signer_router.go new file mode 100644 index 00000000000..d5fd68735ba --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/signer/signer_router.go @@ -0,0 +1,112 @@ +package signer + +import ( + "time" + + "github.com/IBM/ibm-cos-sdk-go/aws" + "github.com/IBM/ibm-cos-sdk-go/aws/awserr" + "github.com/IBM/ibm-cos-sdk-go/aws/credentials" + "github.com/IBM/ibm-cos-sdk-go/aws/request" + "github.com/IBM/ibm-cos-sdk-go/aws/signer/ibmiam" + "github.com/IBM/ibm-cos-sdk-go/aws/signer/v4" +) + +const ( + debugLog = "" + signerRouterLog = "SignerRouter" +) + +type requestSignerRouter struct { + signers map[string]request.NamedHandler +} + +// SignRequestHandler handler to route the request to a signer according the credential type +var SignRequestHandler = defaultRequestSignerRouter() + +// DefaultSignerHandlerForProviderType a map with default handlers per credential type +var DefaultSignerHandlerForProviderType = map[string]request.NamedHandler{ + "": v4.SignRequestHandler, + "oauth": ibmiam.SignRequestHandler, + "v4": v4.SignRequestHandler, +} + +func defaultRequestSignerRouter() request.NamedHandler { + router := requestSignerRouter{signers: make(map[string]request.NamedHandler)} + for k, v := range DefaultSignerHandlerForProviderType { + router.signers[k] = v + } + return request.NamedHandler{ + Name: "signer.requestsignerrouter", Fn: router.delegateRequestToSigner, + } +} + +// to be as close as possible to aws template +// *** required make public the method SignSDKRequestWithCurrTime + +// CustomRequestSignerRouter routes the request to a signer according to the current credentials type +func CustomRequestSignerRouter(opts ...func(*v4.Signer)) request.NamedHandler { + + router := requestSignerRouter{signers: make(map[string]request.NamedHandler)} + for k, v := range DefaultSignerHandlerForProviderType { + router.signers[k] = v + } + + customV4Handler := request.NamedHandler{ + Name: v4.SignRequestHandler.Name, + Fn: func(req *request.Request) { + v4.SignSDKRequestWithCurrentTime(req, time.Now, opts...) + }, + } + + router.signers[""] = customV4Handler + router.signers["v4"] = customV4Handler + + return request.NamedHandler{ + Name: SignRequestHandler.Name, Fn: router.delegateRequestToSigner, + } +} + +// use req config to access config and logging stufff +func (r requestSignerRouter) delegateRequestToSigner(req *request.Request) { + + logger := req.Config.Logger + if !req.Config.LogLevel.Matches(aws.LogDebug) { + logger = nil + } + + if req.Config.Credentials == credentials.AnonymousCredentials { + if logger != nil { + logger.Log(debugLog, signerRouterLog, "AnonymousCredentials") + } + return + } + + value, err := req.Config.Credentials.Get() + if err != nil { + if logger != nil { + logger.Log(debugLog, signerRouterLog, "CREDENTIAL GET ERROR", err) + } + req.Error = err + req.SignedHeaderVals = nil + return + } + + if logger != nil { + logger.Log(debugLog, signerRouterLog, "Provider Type", value.ProviderType) + } + + if handler, ok := r.signers[value.ProviderType]; ok { + if logger != nil { + logger.Log(debugLog, signerRouterLog, "Delegating to", handler.Name) + } + handler.Fn(req) + } else { + err = awserr.New("SignerRouterMissingHandler", "No Handler Found for Type "+value.ProviderType, nil) + if logger != nil { + logger.Log(debugLog, signerRouterLog, "No Handler Found", err) + } + req.Error = err + req.SignedHeaderVals = nil + return + } +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/signer/v4/header_rules.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/signer/v4/header_rules.go new file mode 100644 index 00000000000..10f2a6f8c42 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/signer/v4/header_rules.go @@ -0,0 +1,81 @@ +package v4 + +import ( + "github.com/IBM/ibm-cos-sdk-go/internal/strings" +) + +// validator houses a set of rule needed for validation of a +// string value +type rules []rule + +// rule interface allows for more flexible rules and just simply +// checks whether or not a value adheres to that rule +type rule interface { + IsValid(value string) bool +} + +// IsValid will iterate through all rules and see if any rules +// apply to the value and supports nested rules +func (r rules) IsValid(value string) bool { + for _, rule := range r { + if rule.IsValid(value) { + return true + } + } + return false +} + +// mapRule generic rule for maps +type mapRule map[string]struct{} + +// IsValid for the map rule satisfies whether it exists in the map +func (m mapRule) IsValid(value string) bool { + _, ok := m[value] + return ok +} + +// whitelist is a generic rule for whitelisting +type whitelist struct { + rule +} + +// IsValid for whitelist checks if the value is within the whitelist +func (w whitelist) IsValid(value string) bool { + return w.rule.IsValid(value) +} + +// blacklist is a generic rule for blacklisting +type blacklist struct { + rule +} + +// IsValid for whitelist checks if the value is within the whitelist +func (b blacklist) IsValid(value string) bool { + return !b.rule.IsValid(value) +} + +type patterns []string + +// IsValid for patterns checks each pattern and returns if a match has +// been found +func (p patterns) IsValid(value string) bool { + for _, pattern := range p { + if strings.HasPrefixFold(value, pattern) { + return true + } + } + return false +} + +// inclusiveRules rules allow for rules to depend on one another +type inclusiveRules []rule + +// IsValid will return true if all rules are true +func (r inclusiveRules) IsValid(value string) bool { + for _, rule := range r { + if !rule.IsValid(value) { + return false + } + } + return true +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/signer/v4/options.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/signer/v4/options.go new file mode 100644 index 00000000000..6aa2ed241bb --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/signer/v4/options.go @@ -0,0 +1,7 @@ +package v4 + +// WithUnsignedPayload will enable and set the UnsignedPayload field to +// true of the signer. +func WithUnsignedPayload(v4 *Signer) { + v4.UnsignedPayload = true +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/signer/v4/request_context_go1.7.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/signer/v4/request_context_go1.7.go new file mode 100644 index 00000000000..9663d555388 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/signer/v4/request_context_go1.7.go @@ -0,0 +1,13 @@ +// +build go1.7 + +package v4 + +import ( + "net/http" + + "github.com/IBM/ibm-cos-sdk-go/aws" +) + +func requestContext(r *http.Request) aws.Context { + return r.Context() +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/signer/v4/stream.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/signer/v4/stream.go new file mode 100644 index 00000000000..e0f5d86c5ca --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/signer/v4/stream.go @@ -0,0 +1,63 @@ +package v4 + +import ( + "encoding/hex" + "strings" + "time" + + "github.com/IBM/ibm-cos-sdk-go/aws/credentials" +) + +type credentialValueProvider interface { + Get() (credentials.Value, error) +} + +// StreamSigner implements signing of event stream encoded payloads +type StreamSigner struct { + region string + service string + + credentials credentialValueProvider + + prevSig []byte +} + +// NewStreamSigner creates a SigV4 signer used to sign Event Stream encoded messages +func NewStreamSigner(region, service string, seedSignature []byte, credentials *credentials.Credentials) *StreamSigner { + return &StreamSigner{ + region: region, + service: service, + credentials: credentials, + prevSig: seedSignature, + } +} + +// GetSignature takes an event stream encoded headers and payload and returns a signature +func (s *StreamSigner) GetSignature(headers, payload []byte, date time.Time) ([]byte, error) { + credValue, err := s.credentials.Get() + if err != nil { + return nil, err + } + + sigKey := deriveSigningKey(s.region, s.service, credValue.SecretAccessKey, date) + + keyPath := buildSigningScope(s.region, s.service, date) + + stringToSign := buildEventStreamStringToSign(headers, payload, s.prevSig, keyPath, date) + + signature := hmacSHA256(sigKey, []byte(stringToSign)) + s.prevSig = signature + + return signature, nil +} + +func buildEventStreamStringToSign(headers, payload, prevSig []byte, scope string, date time.Time) string { + return strings.Join([]string{ + "AWS4-HMAC-SHA256-PAYLOAD", + formatTime(date), + scope, + hex.EncodeToString(prevSig), + hex.EncodeToString(hashSHA256(headers)), + hex.EncodeToString(hashSHA256(payload)), + }, "\n") +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/signer/v4/uri_path.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/signer/v4/uri_path.go new file mode 100644 index 00000000000..bd082e9d1f7 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/signer/v4/uri_path.go @@ -0,0 +1,24 @@ +// +build go1.5 + +package v4 + +import ( + "net/url" + "strings" +) + +func getURIPath(u *url.URL) string { + var uri string + + if len(u.Opaque) > 0 { + uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/") + } else { + uri = u.EscapedPath() + } + + if len(uri) == 0 { + uri = "/" + } + + return uri +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/signer/v4/v4.go new file mode 100644 index 00000000000..8501d2098c9 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/signer/v4/v4.go @@ -0,0 +1,849 @@ +// Package v4 implements signing for AWS V4 signer +// +// Provides request signing for request that need to be signed with +// AWS V4 Signatures. +// +// Standalone Signer +// +// Generally using the signer outside of the SDK should not require any additional +// logic when using Go v1.5 or higher. The signer does this by taking advantage +// of the URL.EscapedPath method. If your request URI requires additional escaping +// you many need to use the URL.Opaque to define what the raw URI should be sent +// to the service as. +// +// The signer will first check the URL.Opaque field, and use its value if set. +// The signer does require the URL.Opaque field to be set in the form of: +// +// "///" +// +// // e.g. +// "//example.com/some/path" +// +// The leading "//" and hostname are required or the URL.Opaque escaping will +// not work correctly. +// +// If URL.Opaque is not set the signer will fallback to the URL.EscapedPath() +// method and using the returned value. If you're using Go v1.4 you must set +// URL.Opaque if the URI path needs escaping. If URL.Opaque is not set with +// Go v1.5 the signer will fallback to URL.Path. +// +// AWS v4 signature validation requires that the canonical string's URI path +// element must be the URI escaped form of the HTTP request's path. +// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html +// +// The Go HTTP client will perform escaping automatically on the request. Some +// of these escaping may cause signature validation errors because the HTTP +// request differs from the URI path or query that the signature was generated. +// https://golang.org/pkg/net/url/#URL.EscapedPath +// +// Because of this, it is recommended that when using the signer outside of the +// SDK that explicitly escaping the request prior to being signed is preferable, +// and will help prevent signature validation errors. This can be done by setting +// the URL.Opaque or URL.RawPath. The SDK will use URL.Opaque first and then +// call URL.EscapedPath() if Opaque is not set. +// +// If signing a request intended for HTTP2 server, and you're using Go 1.6.2 +// through 1.7.4 you should use the URL.RawPath as the pre-escaped form of the +// request URL. https://github.com/golang/go/issues/16847 points to a bug in +// Go pre 1.8 that fails to make HTTP2 requests using absolute URL in the HTTP +// message. URL.Opaque generally will force Go to make requests with absolute URL. +// URL.RawPath does not do this, but RawPath must be a valid escaping of Path +// or url.EscapedPath will ignore the RawPath escaping. +// +// Test `TestStandaloneSign` provides a complete example of using the signer +// outside of the SDK and pre-escaping the URI path. +package v4 + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + "time" + + "github.com/IBM/ibm-cos-sdk-go/aws" + "github.com/IBM/ibm-cos-sdk-go/aws/credentials" + "github.com/IBM/ibm-cos-sdk-go/aws/request" + "github.com/IBM/ibm-cos-sdk-go/internal/sdkio" + "github.com/IBM/ibm-cos-sdk-go/private/protocol/rest" +) + +const ( + authorizationHeader = "Authorization" + authHeaderSignatureElem = "Signature=" + signatureQueryKey = "X-Amz-Signature" + + authHeaderPrefix = "AWS4-HMAC-SHA256" + timeFormat = "20060102T150405Z" + shortTimeFormat = "20060102" + awsV4Request = "aws4_request" + + // emptyStringSHA256 is a SHA256 of an empty string + emptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855` +) + +var ignoredHeaders = rules{ + blacklist{ + mapRule{ + authorizationHeader: struct{}{}, + "User-Agent": struct{}{}, + "X-Amzn-Trace-Id": struct{}{}, + }, + }, +} + +// requiredSignedHeaders is a whitelist for build canonical headers. +var requiredSignedHeaders = rules{ + whitelist{ + mapRule{ + "Cache-Control": struct{}{}, + "Content-Disposition": struct{}{}, + "Content-Encoding": struct{}{}, + "Content-Language": struct{}{}, + "Content-Md5": struct{}{}, + "Content-Type": struct{}{}, + "Expires": struct{}{}, + "If-Match": struct{}{}, + "If-Modified-Since": struct{}{}, + "If-None-Match": struct{}{}, + "If-Unmodified-Since": struct{}{}, + "Range": struct{}{}, + "X-Amz-Acl": struct{}{}, + "X-Amz-Copy-Source": struct{}{}, + "X-Amz-Copy-Source-If-Match": struct{}{}, + "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, + "X-Amz-Copy-Source-If-None-Match": struct{}{}, + "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, + "X-Amz-Copy-Source-Range": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Grant-Full-control": struct{}{}, + "X-Amz-Grant-Read": struct{}{}, + "X-Amz-Grant-Read-Acp": struct{}{}, + "X-Amz-Grant-Write": struct{}{}, + "X-Amz-Grant-Write-Acp": struct{}{}, + "X-Amz-Metadata-Directive": struct{}{}, + "X-Amz-Mfa": struct{}{}, + "X-Amz-Request-Payer": struct{}{}, + "X-Amz-Server-Side-Encryption": struct{}{}, + "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Storage-Class": struct{}{}, + "X-Amz-Tagging": struct{}{}, + "X-Amz-Website-Redirect-Location": struct{}{}, + "X-Amz-Content-Sha256": struct{}{}, + }, + }, + patterns{"X-Amz-Meta-"}, +} + +// allowedHoisting is a whitelist for build query headers. The boolean value +// represents whether or not it is a pattern. +var allowedQueryHoisting = inclusiveRules{ + blacklist{requiredSignedHeaders}, + patterns{"X-Amz-"}, +} + +// Signer applies AWS v4 signing to given request. Use this to sign requests +// that need to be signed with AWS V4 Signatures. +type Signer struct { + // The authentication credentials the request will be signed against. + // This value must be set to sign requests. + Credentials *credentials.Credentials + + // Sets the log level the signer should use when reporting information to + // the logger. If the logger is nil nothing will be logged. See + // aws.LogLevelType for more information on available logging levels + // + // By default nothing will be logged. + Debug aws.LogLevelType + + // The logger loging information will be written to. If there the logger + // is nil, nothing will be logged. + Logger aws.Logger + + // Disables the Signer's moving HTTP header key/value pairs from the HTTP + // request header to the request's query string. This is most commonly used + // with pre-signed requests preventing headers from being added to the + // request's query string. + DisableHeaderHoisting bool + + // Disables the automatic escaping of the URI path of the request for the + // siganture's canonical string's path. For services that do not need additional + // escaping then use this to disable the signer escaping the path. + // + // S3 is an example of a service that does not need additional escaping. + // + // http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html + DisableURIPathEscaping bool + + // Disables the automatical setting of the HTTP request's Body field with the + // io.ReadSeeker passed in to the signer. This is useful if you're using a + // custom wrapper around the body for the io.ReadSeeker and want to preserve + // the Body value on the Request.Body. + // + // This does run the risk of signing a request with a body that will not be + // sent in the request. Need to ensure that the underlying data of the Body + // values are the same. + DisableRequestBodyOverwrite bool + + // currentTimeFn returns the time value which represents the current time. + // This value should only be used for testing. If it is nil the default + // time.Now will be used. + currentTimeFn func() time.Time + + // UnsignedPayload will prevent signing of the payload. This will only + // work for services that have support for this. + UnsignedPayload bool +} + +// NewSigner returns a Signer pointer configured with the credentials and optional +// option values provided. If not options are provided the Signer will use its +// default configuration. +func NewSigner(credentials *credentials.Credentials, options ...func(*Signer)) *Signer { + v4 := &Signer{ + Credentials: credentials, + } + + for _, option := range options { + option(v4) + } + + return v4 +} + +type signingCtx struct { + ServiceName string + Region string + Request *http.Request + Body io.ReadSeeker + Query url.Values + Time time.Time + ExpireTime time.Duration + SignedHeaderVals http.Header + + DisableURIPathEscaping bool + + credValues credentials.Value + isPresign bool + unsignedPayload bool + + bodyDigest string + signedHeaders string + canonicalHeaders string + canonicalString string + credentialString string + stringToSign string + signature string + authorization string +} + +// Sign signs AWS v4 requests with the provided body, service name, region the +// request is made to, and time the request is signed at. The signTime allows +// you to specify that a request is signed for the future, and cannot be +// used until then. +// +// Returns a list of HTTP headers that were included in the signature or an +// error if signing the request failed. Generally for signed requests this value +// is not needed as the full request context will be captured by the http.Request +// value. It is included for reference though. +// +// Sign will set the request's Body to be the `body` parameter passed in. If +// the body is not already an io.ReadCloser, it will be wrapped within one. If +// a `nil` body parameter passed to Sign, the request's Body field will be +// also set to nil. Its important to note that this functionality will not +// change the request's ContentLength of the request. +// +// Sign differs from Presign in that it will sign the request using HTTP +// header values. This type of signing is intended for http.Request values that +// will not be shared, or are shared in a way the header values on the request +// will not be lost. +// +// The requests body is an io.ReadSeeker so the SHA256 of the body can be +// generated. To bypass the signer computing the hash you can set the +// "X-Amz-Content-Sha256" header with a precomputed value. The signer will +// only compute the hash if the request header value is empty. +func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region string, signTime time.Time) (http.Header, error) { + return v4.signWithBody(r, body, service, region, 0, false, signTime) +} + +// Presign signs AWS v4 requests with the provided body, service name, region +// the request is made to, and time the request is signed at. The signTime +// allows you to specify that a request is signed for the future, and cannot +// be used until then. +// +// Returns a list of HTTP headers that were included in the signature or an +// error if signing the request failed. For presigned requests these headers +// and their values must be included on the HTTP request when it is made. This +// is helpful to know what header values need to be shared with the party the +// presigned request will be distributed to. +// +// Presign differs from Sign in that it will sign the request using query string +// instead of header values. This allows you to share the Presigned Request's +// URL with third parties, or distribute it throughout your system with minimal +// dependencies. +// +// Presign also takes an exp value which is the duration the +// signed request will be valid after the signing time. This is allows you to +// set when the request will expire. +// +// The requests body is an io.ReadSeeker so the SHA256 of the body can be +// generated. To bypass the signer computing the hash you can set the +// "X-Amz-Content-Sha256" header with a precomputed value. The signer will +// only compute the hash if the request header value is empty. +// +// Presigning a S3 request will not compute the body's SHA256 hash by default. +// This is done due to the general use case for S3 presigned URLs is to share +// PUT/GET capabilities. If you would like to include the body's SHA256 in the +// presigned request's signature you can set the "X-Amz-Content-Sha256" +// HTTP header and that will be included in the request's signature. +func (v4 Signer) Presign(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) { + return v4.signWithBody(r, body, service, region, exp, true, signTime) +} + +func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, isPresign bool, signTime time.Time) (http.Header, error) { + currentTimeFn := v4.currentTimeFn + if currentTimeFn == nil { + currentTimeFn = time.Now + } + + ctx := &signingCtx{ + Request: r, + Body: body, + Query: r.URL.Query(), + Time: signTime, + ExpireTime: exp, + isPresign: isPresign, + ServiceName: service, + Region: region, + DisableURIPathEscaping: v4.DisableURIPathEscaping, + unsignedPayload: v4.UnsignedPayload, + } + + for key := range ctx.Query { + sort.Strings(ctx.Query[key]) + } + + if ctx.isRequestSigned() { + ctx.Time = currentTimeFn() + ctx.handlePresignRemoval() + } + + var err error + ctx.credValues, err = v4.Credentials.GetWithContext(requestContext(r)) + if err != nil { + return http.Header{}, err + } + + ctx.sanitizeHostForHeader() + ctx.assignAmzQueryValues() + if err := ctx.build(v4.DisableHeaderHoisting); err != nil { + return nil, err + } + + // If the request is not presigned the body should be attached to it. This + // prevents the confusion of wanting to send a signed request without + // the body the request was signed for attached. + if !(v4.DisableRequestBodyOverwrite || ctx.isPresign) { + var reader io.ReadCloser + if body != nil { + var ok bool + if reader, ok = body.(io.ReadCloser); !ok { + reader = ioutil.NopCloser(body) + } + } + r.Body = reader + } + + if v4.Debug.Matches(aws.LogDebugWithSigning) { + v4.logSigningInfo(ctx) + } + + return ctx.SignedHeaderVals, nil +} + +func (ctx *signingCtx) sanitizeHostForHeader() { + request.SanitizeHostForHeader(ctx.Request) +} + +func (ctx *signingCtx) handlePresignRemoval() { + if !ctx.isPresign { + return + } + + // The credentials have expired for this request. The current signing + // is invalid, and needs to be request because the request will fail. + ctx.removePresign() + + // Update the request's query string to ensure the values stays in + // sync in the case retrieving the new credentials fails. + ctx.Request.URL.RawQuery = ctx.Query.Encode() +} + +func (ctx *signingCtx) assignAmzQueryValues() { + if ctx.isPresign { + ctx.Query.Set("X-Amz-Algorithm", authHeaderPrefix) + if ctx.credValues.SessionToken != "" { + ctx.Query.Set("X-Amz-Security-Token", ctx.credValues.SessionToken) + } else { + ctx.Query.Del("X-Amz-Security-Token") + } + + return + } + + if ctx.credValues.SessionToken != "" { + ctx.Request.Header.Set("X-Amz-Security-Token", ctx.credValues.SessionToken) + } +} + +// SignRequestHandler is a named request handler the SDK will use to sign +// service client request with using the V4 signature. +var SignRequestHandler = request.NamedHandler{ + Name: "v4.SignRequestHandler", Fn: SignSDKRequest, +} + +// SignSDKRequest signs an AWS request with the V4 signature. This +// request handler should only be used with the SDK's built in service client's +// API operation requests. +// +// This function should not be used on its on its own, but in conjunction with +// an AWS service client's API operation call. To sign a standalone request +// not created by a service client's API operation method use the "Sign" or +// "Presign" functions of the "Signer" type. +// +// If the credentials of the request's config are set to +// credentials.AnonymousCredentials the request will not be signed. +func SignSDKRequest(req *request.Request) { + SignSDKRequestWithCurrentTime(req, time.Now) +} + +// BuildNamedHandler will build a generic handler for signing. +func BuildNamedHandler(name string, opts ...func(*Signer)) request.NamedHandler { + return request.NamedHandler{ + Name: name, + Fn: func(req *request.Request) { + SignSDKRequestWithCurrentTime(req, time.Now, opts...) + }, + } +} + +// SignSDKRequestWithCurrentTime will sign the SDK's request using the time +// function passed in. Behaves the same as SignSDKRequest with the exception +// the request is signed with the value returned by the current time function. +func SignSDKRequestWithCurrentTime(req *request.Request, curTimeFn func() time.Time, opts ...func(*Signer)) { + // If the request does not need to be signed ignore the signing of the + // request if the AnonymousCredentials object is used. + if req.Config.Credentials == credentials.AnonymousCredentials { + return + } + + region := req.ClientInfo.SigningRegion + if region == "" { + region = aws.StringValue(req.Config.Region) + } + + name := req.ClientInfo.SigningName + if name == "" { + name = req.ClientInfo.ServiceName + } + + v4 := NewSigner(req.Config.Credentials, func(v4 *Signer) { + v4.Debug = req.Config.LogLevel.Value() + v4.Logger = req.Config.Logger + v4.DisableHeaderHoisting = req.NotHoist + v4.currentTimeFn = curTimeFn + if name == "s3" { + // S3 service should not have any escaping applied + v4.DisableURIPathEscaping = true + } + // Prevents setting the HTTPRequest's Body. Since the Body could be + // wrapped in a custom io.Closer that we do not want to be stompped + // on top of by the signer. + v4.DisableRequestBodyOverwrite = true + }) + + for _, opt := range opts { + opt(v4) + } + + curTime := curTimeFn() + signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.GetBody(), + name, region, req.ExpireTime, req.ExpireTime > 0, curTime, + ) + if err != nil { + req.Error = err + req.SignedHeaderVals = nil + return + } + + req.SignedHeaderVals = signedHeaders + req.LastSignedAt = curTime +} + +const logSignInfoMsg = `DEBUG: Request Signature: +---[ CANONICAL STRING ]----------------------------- +%s +---[ STRING TO SIGN ]-------------------------------- +%s%s +-----------------------------------------------------` +const logSignedURLMsg = ` +---[ SIGNED URL ]------------------------------------ +%s` + +func (v4 *Signer) logSigningInfo(ctx *signingCtx) { + signedURLMsg := "" + if ctx.isPresign { + signedURLMsg = fmt.Sprintf(logSignedURLMsg, ctx.Request.URL.String()) + } + msg := fmt.Sprintf(logSignInfoMsg, ctx.canonicalString, ctx.stringToSign, signedURLMsg) + v4.Logger.Log(msg) +} + +func (ctx *signingCtx) build(disableHeaderHoisting bool) error { + ctx.buildTime() // no depends + ctx.buildCredentialString() // no depends + + if err := ctx.buildBodyDigest(); err != nil { + return err + } + + unsignedHeaders := ctx.Request.Header + if ctx.isPresign { + if !disableHeaderHoisting { + urlValues := url.Values{} + urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends + for k := range urlValues { + ctx.Query[k] = urlValues[k] + } + } + } + + ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders) + ctx.buildCanonicalString() // depends on canon headers / signed headers + ctx.buildStringToSign() // depends on canon string + ctx.buildSignature() // depends on string to sign + + if ctx.isPresign { + ctx.Request.URL.RawQuery += "&" + signatureQueryKey + "=" + ctx.signature + } else { + parts := []string{ + authHeaderPrefix + " Credential=" + ctx.credValues.AccessKeyID + "/" + ctx.credentialString, + "SignedHeaders=" + ctx.signedHeaders, + authHeaderSignatureElem + ctx.signature, + } + ctx.Request.Header.Set(authorizationHeader, strings.Join(parts, ", ")) + } + + return nil +} + +// GetSignedRequestSignature attempts to extract the signature of the request. +// Returning an error if the request is unsigned, or unable to extract the +// signature. +func GetSignedRequestSignature(r *http.Request) ([]byte, error) { + + if auth := r.Header.Get(authorizationHeader); len(auth) != 0 { + ps := strings.Split(auth, ", ") + for _, p := range ps { + if idx := strings.Index(p, authHeaderSignatureElem); idx >= 0 { + sig := p[len(authHeaderSignatureElem):] + if len(sig) == 0 { + return nil, fmt.Errorf("invalid request signature authorization header") + } + return hex.DecodeString(sig) + } + } + } + + if sig := r.URL.Query().Get("X-Amz-Signature"); len(sig) != 0 { + return hex.DecodeString(sig) + } + + return nil, fmt.Errorf("request not signed") +} + +func (ctx *signingCtx) buildTime() { + if ctx.isPresign { + duration := int64(ctx.ExpireTime / time.Second) + ctx.Query.Set("X-Amz-Date", formatTime(ctx.Time)) + ctx.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10)) + } else { + ctx.Request.Header.Set("X-Amz-Date", formatTime(ctx.Time)) + } +} + +func (ctx *signingCtx) buildCredentialString() { + ctx.credentialString = buildSigningScope(ctx.Region, ctx.ServiceName, ctx.Time) + + if ctx.isPresign { + ctx.Query.Set("X-Amz-Credential", ctx.credValues.AccessKeyID+"/"+ctx.credentialString) + } +} + +func buildQuery(r rule, header http.Header) (url.Values, http.Header) { + query := url.Values{} + unsignedHeaders := http.Header{} + for k, h := range header { + if r.IsValid(k) { + query[k] = h + } else { + unsignedHeaders[k] = h + } + } + + return query, unsignedHeaders +} +func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) { + var headers []string + headers = append(headers, "host") + for k, v := range header { + if !r.IsValid(k) { + continue // ignored header + } + if ctx.SignedHeaderVals == nil { + ctx.SignedHeaderVals = make(http.Header) + } + + lowerCaseKey := strings.ToLower(k) + if _, ok := ctx.SignedHeaderVals[lowerCaseKey]; ok { + // include additional values + ctx.SignedHeaderVals[lowerCaseKey] = append(ctx.SignedHeaderVals[lowerCaseKey], v...) + continue + } + + headers = append(headers, lowerCaseKey) + ctx.SignedHeaderVals[lowerCaseKey] = v + } + sort.Strings(headers) + + ctx.signedHeaders = strings.Join(headers, ";") + + if ctx.isPresign { + ctx.Query.Set("X-Amz-SignedHeaders", ctx.signedHeaders) + } + + headerValues := make([]string, len(headers)) + for i, k := range headers { + if k == "host" { + if ctx.Request.Host != "" { + headerValues[i] = "host:" + ctx.Request.Host + } else { + headerValues[i] = "host:" + ctx.Request.URL.Host + } + } else { + headerValues[i] = k + ":" + + strings.Join(ctx.SignedHeaderVals[k], ",") + } + } + stripExcessSpaces(headerValues) + ctx.canonicalHeaders = strings.Join(headerValues, "\n") +} + +func (ctx *signingCtx) buildCanonicalString() { + ctx.Request.URL.RawQuery = strings.Replace(ctx.Query.Encode(), "+", "%20", -1) + + uri := getURIPath(ctx.Request.URL) + + if !ctx.DisableURIPathEscaping { + uri = rest.EscapePath(uri, false) + } + + ctx.canonicalString = strings.Join([]string{ + ctx.Request.Method, + uri, + ctx.Request.URL.RawQuery, + ctx.canonicalHeaders + "\n", + ctx.signedHeaders, + ctx.bodyDigest, + }, "\n") +} + +func (ctx *signingCtx) buildStringToSign() { + ctx.stringToSign = strings.Join([]string{ + authHeaderPrefix, + formatTime(ctx.Time), + ctx.credentialString, + hex.EncodeToString(hashSHA256([]byte(ctx.canonicalString))), + }, "\n") +} + +func (ctx *signingCtx) buildSignature() { + creds := deriveSigningKey(ctx.Region, ctx.ServiceName, ctx.credValues.SecretAccessKey, ctx.Time) + signature := hmacSHA256(creds, []byte(ctx.stringToSign)) + ctx.signature = hex.EncodeToString(signature) +} + +func (ctx *signingCtx) buildBodyDigest() error { + hash := ctx.Request.Header.Get("X-Amz-Content-Sha256") + if hash == "" { + includeSHA256Header := ctx.unsignedPayload || + ctx.ServiceName == "s3" || + ctx.ServiceName == "s3-object-lambda" || + ctx.ServiceName == "glacier" + + s3Presign := ctx.isPresign && + (ctx.ServiceName == "s3" || + ctx.ServiceName == "s3-object-lambda") + + if ctx.unsignedPayload || s3Presign { + hash = "UNSIGNED-PAYLOAD" + includeSHA256Header = !s3Presign + } else if ctx.Body == nil { + hash = emptyStringSHA256 + } else { + if !aws.IsReaderSeekable(ctx.Body) { + return fmt.Errorf("cannot use unseekable request body %T, for signed request with body", ctx.Body) + } + hashBytes, err := makeSha256Reader(ctx.Body) + if err != nil { + return err + } + hash = hex.EncodeToString(hashBytes) + } + + if includeSHA256Header { + ctx.Request.Header.Set("X-Amz-Content-Sha256", hash) + } + } + ctx.bodyDigest = hash + + return nil +} + +// isRequestSigned returns if the request is currently signed or presigned +func (ctx *signingCtx) isRequestSigned() bool { + if ctx.isPresign && ctx.Query.Get("X-Amz-Signature") != "" { + return true + } + if ctx.Request.Header.Get("Authorization") != "" { + return true + } + + return false +} + +// unsign removes signing flags for both signed and presigned requests. +func (ctx *signingCtx) removePresign() { + ctx.Query.Del("X-Amz-Algorithm") + ctx.Query.Del("X-Amz-Signature") + ctx.Query.Del("X-Amz-Security-Token") + ctx.Query.Del("X-Amz-Date") + ctx.Query.Del("X-Amz-Expires") + ctx.Query.Del("X-Amz-Credential") + ctx.Query.Del("X-Amz-SignedHeaders") +} + +func hmacSHA256(key []byte, data []byte) []byte { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil) +} + +func hashSHA256(data []byte) []byte { + hash := sha256.New() + hash.Write(data) + return hash.Sum(nil) +} + +func makeSha256Reader(reader io.ReadSeeker) (hashBytes []byte, err error) { + hash := sha256.New() + start, err := reader.Seek(0, sdkio.SeekCurrent) + if err != nil { + return nil, err + } + defer func() { + // ensure error is return if unable to seek back to start of payload. + _, err = reader.Seek(start, sdkio.SeekStart) + }() + + // Use CopyN to avoid allocating the 32KB buffer in io.Copy for bodies + // smaller than 32KB. Fall back to io.Copy if we fail to determine the size. + size, err := aws.SeekerLen(reader) + if err != nil { + io.Copy(hash, reader) + } else { + io.CopyN(hash, reader, size) + } + + return hash.Sum(nil), nil +} + +const doubleSpace = " " + +// stripExcessSpaces will rewrite the passed in slice's string values to not +// contain multiple side-by-side spaces. +func stripExcessSpaces(vals []string) { + var j, k, l, m, spaces int + for i, str := range vals { + // Trim trailing spaces + for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- { + } + + // Trim leading spaces + for k = 0; k < j && str[k] == ' '; k++ { + } + str = str[k : j+1] + + // Strip multiple spaces. + j = strings.Index(str, doubleSpace) + if j < 0 { + vals[i] = str + continue + } + + buf := []byte(str) + for k, m, l = j, j, len(buf); k < l; k++ { + if buf[k] == ' ' { + if spaces == 0 { + // First space. + buf[m] = buf[k] + m++ + } + spaces++ + } else { + // End of multiple spaces. + spaces = 0 + buf[m] = buf[k] + m++ + } + } + + vals[i] = string(buf[:m]) + } +} + +func buildSigningScope(region, service string, dt time.Time) string { + return strings.Join([]string{ + formatShortTime(dt), + region, + service, + awsV4Request, + }, "/") +} + +func deriveSigningKey(region, service, secretKey string, dt time.Time) []byte { + kDate := hmacSHA256([]byte("AWS4"+secretKey), []byte(formatShortTime(dt))) + kRegion := hmacSHA256(kDate, []byte(region)) + kService := hmacSHA256(kRegion, []byte(service)) + signingKey := hmacSHA256(kService, []byte(awsV4Request)) + return signingKey +} + +func formatShortTime(dt time.Time) string { + return dt.UTC().Format(shortTimeFormat) +} + +func formatTime(dt time.Time) string { + return dt.UTC().Format(timeFormat) +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/types.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/types.go new file mode 100644 index 00000000000..de696e73daa --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/types.go @@ -0,0 +1,264 @@ +package aws + +import ( + "io" + "strings" + "sync" + + "github.com/IBM/ibm-cos-sdk-go/internal/sdkio" +) + +// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Allows the +// SDK to accept an io.Reader that is not also an io.Seeker for unsigned +// streaming payload API operations. +// +// A ReadSeekCloser wrapping an nonseekable io.Reader used in an API +// operation's input will prevent that operation being retried in the case of +// network errors, and cause operation requests to fail if the operation +// requires payload signing. +// +// Note: If using With S3 PutObject to stream an object upload The SDK's S3 +// Upload manager (s3manager.Uploader) provides support for streaming with the +// ability to retry network errors. +func ReadSeekCloser(r io.Reader) ReaderSeekerCloser { + return ReaderSeekerCloser{r} +} + +// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and +// io.Closer interfaces to the underlying object if they are available. +type ReaderSeekerCloser struct { + r io.Reader +} + +// IsReaderSeekable returns if the underlying reader type can be seeked. A +// io.Reader might not actually be seekable if it is the ReaderSeekerCloser +// type. +func IsReaderSeekable(r io.Reader) bool { + switch v := r.(type) { + case ReaderSeekerCloser: + return v.IsSeeker() + case *ReaderSeekerCloser: + return v.IsSeeker() + case io.ReadSeeker: + return true + default: + return false + } +} + +// Read reads from the reader up to size of p. The number of bytes read, and +// error if it occurred will be returned. +// +// If the reader is not an io.Reader zero bytes read, and nil error will be +// returned. +// +// Performs the same functionality as io.Reader Read +func (r ReaderSeekerCloser) Read(p []byte) (int, error) { + switch t := r.r.(type) { + case io.Reader: + return t.Read(p) + } + return 0, nil +} + +// Seek sets the offset for the next Read to offset, interpreted according to +// whence: 0 means relative to the origin of the file, 1 means relative to the +// current offset, and 2 means relative to the end. Seek returns the new offset +// and an error, if any. +// +// If the ReaderSeekerCloser is not an io.Seeker nothing will be done. +func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) { + switch t := r.r.(type) { + case io.Seeker: + return t.Seek(offset, whence) + } + return int64(0), nil +} + +// IsSeeker returns if the underlying reader is also a seeker. +func (r ReaderSeekerCloser) IsSeeker() bool { + _, ok := r.r.(io.Seeker) + return ok +} + +// HasLen returns the length of the underlying reader if the value implements +// the Len() int method. +func (r ReaderSeekerCloser) HasLen() (int, bool) { + type lenner interface { + Len() int + } + + if lr, ok := r.r.(lenner); ok { + return lr.Len(), true + } + + return 0, false +} + +// GetLen returns the length of the bytes remaining in the underlying reader. +// Checks first for Len(), then io.Seeker to determine the size of the +// underlying reader. +// +// Will return -1 if the length cannot be determined. +func (r ReaderSeekerCloser) GetLen() (int64, error) { + if l, ok := r.HasLen(); ok { + return int64(l), nil + } + + if s, ok := r.r.(io.Seeker); ok { + return seekerLen(s) + } + + return -1, nil +} + +// SeekerLen attempts to get the number of bytes remaining at the seeker's +// current position. Returns the number of bytes remaining or error. +func SeekerLen(s io.Seeker) (int64, error) { + // Determine if the seeker is actually seekable. ReaderSeekerCloser + // hides the fact that a io.Readers might not actually be seekable. + switch v := s.(type) { + case ReaderSeekerCloser: + return v.GetLen() + case *ReaderSeekerCloser: + return v.GetLen() + } + + return seekerLen(s) +} + +func seekerLen(s io.Seeker) (int64, error) { + curOffset, err := s.Seek(0, sdkio.SeekCurrent) + if err != nil { + return 0, err + } + + endOffset, err := s.Seek(0, sdkio.SeekEnd) + if err != nil { + return 0, err + } + + _, err = s.Seek(curOffset, sdkio.SeekStart) + if err != nil { + return 0, err + } + + return endOffset - curOffset, nil +} + +// Close closes the ReaderSeekerCloser. +// +// If the ReaderSeekerCloser is not an io.Closer nothing will be done. +func (r ReaderSeekerCloser) Close() error { + switch t := r.r.(type) { + case io.Closer: + return t.Close() + } + return nil +} + +// A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface +// Can be used with the s3manager.Downloader to download content to a buffer +// in memory. Safe to use concurrently. +type WriteAtBuffer struct { + buf []byte + m sync.Mutex + + // GrowthCoeff defines the growth rate of the internal buffer. By + // default, the growth rate is 1, where expanding the internal + // buffer will allocate only enough capacity to fit the new expected + // length. + GrowthCoeff float64 +} + +// NewWriteAtBuffer creates a WriteAtBuffer with an internal buffer +// provided by buf. +func NewWriteAtBuffer(buf []byte) *WriteAtBuffer { + return &WriteAtBuffer{buf: buf} +} + +// WriteAt writes a slice of bytes to a buffer starting at the position provided +// The number of bytes written will be returned, or error. Can overwrite previous +// written slices if the write ats overlap. +func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) { + pLen := len(p) + expLen := pos + int64(pLen) + b.m.Lock() + defer b.m.Unlock() + if int64(len(b.buf)) < expLen { + if int64(cap(b.buf)) < expLen { + if b.GrowthCoeff < 1 { + b.GrowthCoeff = 1 + } + newBuf := make([]byte, expLen, int64(b.GrowthCoeff*float64(expLen))) + copy(newBuf, b.buf) + b.buf = newBuf + } + b.buf = b.buf[:expLen] + } + copy(b.buf[pos:], p) + return pLen, nil +} + +// Bytes returns a slice of bytes written to the buffer. +func (b *WriteAtBuffer) Bytes() []byte { + b.m.Lock() + defer b.m.Unlock() + return b.buf +} + +// MultiCloser is a utility to close multiple io.Closers within a single +// statement. +type MultiCloser []io.Closer + +// Close closes all of the io.Closers making up the MultiClosers. Any +// errors that occur while closing will be returned in the order they +// occur. +func (m MultiCloser) Close() error { + var errs errors + for _, c := range m { + err := c.Close() + if err != nil { + errs = append(errs, err) + } + } + if len(errs) != 0 { + return errs + } + + return nil +} + +type errors []error + +func (es errors) Error() string { + var parts []string + for _, e := range es { + parts = append(parts, e.Error()) + } + + return strings.Join(parts, "\n") +} + +// CopySeekableBody copies the seekable body to an io.Writer +func CopySeekableBody(dst io.Writer, src io.ReadSeeker) (int64, error) { + curPos, err := src.Seek(0, sdkio.SeekCurrent) + if err != nil { + return 0, err + } + + // copy errors may be assumed to be from the body. + n, err := io.Copy(dst, src) + if err != nil { + return n, err + } + + // seek back to the first position after reading to reset + // the body for transmission. + _, err = src.Seek(curPos, sdkio.SeekStart) + if err != nil { + return n, err + } + + return n, nil +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/url.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/url.go new file mode 100644 index 00000000000..6192b2455b6 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/url.go @@ -0,0 +1,12 @@ +// +build go1.8 + +package aws + +import "net/url" + +// URLHostname will extract the Hostname without port from the URL value. +// +// Wrapper of net/url#URL.Hostname for backwards Go version compatibility. +func URLHostname(url *url.URL) string { + return url.Hostname() +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/version.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/version.go new file mode 100644 index 00000000000..ee2c8f2c518 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/version.go @@ -0,0 +1,12 @@ +// Package aws provides core functionality for making requests to IBM COS services. +package aws + +// IBM COS SDK Code -- START + +// SDKName is the name of this AWS SDK +const SDKName = "ibm-cos-sdk-go" + +// SDKVersion is the version of this SDK +const SDKVersion = "1.7.0" + +// IBM COS SDK Code -- END diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/ast.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/ast.go new file mode 100644 index 00000000000..e83a99886bc --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/ast.go @@ -0,0 +1,120 @@ +package ini + +// ASTKind represents different states in the parse table +// and the type of AST that is being constructed +type ASTKind int + +// ASTKind* is used in the parse table to transition between +// the different states +const ( + ASTKindNone = ASTKind(iota) + ASTKindStart + ASTKindExpr + ASTKindEqualExpr + ASTKindStatement + ASTKindSkipStatement + ASTKindExprStatement + ASTKindSectionStatement + ASTKindNestedSectionStatement + ASTKindCompletedNestedSectionStatement + ASTKindCommentStatement + ASTKindCompletedSectionStatement +) + +func (k ASTKind) String() string { + switch k { + case ASTKindNone: + return "none" + case ASTKindStart: + return "start" + case ASTKindExpr: + return "expr" + case ASTKindStatement: + return "stmt" + case ASTKindSectionStatement: + return "section_stmt" + case ASTKindExprStatement: + return "expr_stmt" + case ASTKindCommentStatement: + return "comment" + case ASTKindNestedSectionStatement: + return "nested_section_stmt" + case ASTKindCompletedSectionStatement: + return "completed_stmt" + case ASTKindSkipStatement: + return "skip" + default: + return "" + } +} + +// AST interface allows us to determine what kind of node we +// are on and casting may not need to be necessary. +// +// The root is always the first node in Children +type AST struct { + Kind ASTKind + Root Token + RootToken bool + Children []AST +} + +func newAST(kind ASTKind, root AST, children ...AST) AST { + return AST{ + Kind: kind, + Children: append([]AST{root}, children...), + } +} + +func newASTWithRootToken(kind ASTKind, root Token, children ...AST) AST { + return AST{ + Kind: kind, + Root: root, + RootToken: true, + Children: children, + } +} + +// AppendChild will append to the list of children an AST has. +func (a *AST) AppendChild(child AST) { + a.Children = append(a.Children, child) +} + +// GetRoot will return the root AST which can be the first entry +// in the children list or a token. +func (a *AST) GetRoot() AST { + if a.RootToken { + return *a + } + + if len(a.Children) == 0 { + return AST{} + } + + return a.Children[0] +} + +// GetChildren will return the current AST's list of children +func (a *AST) GetChildren() []AST { + if len(a.Children) == 0 { + return []AST{} + } + + if a.RootToken { + return a.Children + } + + return a.Children[1:] +} + +// SetChildren will set and override all children of the AST. +func (a *AST) SetChildren(children []AST) { + if a.RootToken { + a.Children = children + } else { + a.Children = append(a.Children[:1], children...) + } +} + +// Start is used to indicate the starting state of the parse table. +var Start = newAST(ASTKindStart, AST{}) diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/comma_token.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/comma_token.go new file mode 100644 index 00000000000..0895d53cbe6 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/comma_token.go @@ -0,0 +1,11 @@ +package ini + +var commaRunes = []rune(",") + +func isComma(b rune) bool { + return b == ',' +} + +func newCommaToken() Token { + return newToken(TokenComma, commaRunes, NoneType) +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/comment_token.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/comment_token.go new file mode 100644 index 00000000000..0b76999ba1f --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/comment_token.go @@ -0,0 +1,35 @@ +package ini + +// isComment will return whether or not the next byte(s) is a +// comment. +func isComment(b []rune) bool { + if len(b) == 0 { + return false + } + + switch b[0] { + case ';': + return true + case '#': + return true + } + + return false +} + +// newCommentToken will create a comment token and +// return how many bytes were read. +func newCommentToken(b []rune) (Token, int, error) { + i := 0 + for ; i < len(b); i++ { + if b[i] == '\n' { + break + } + + if len(b)-i > 2 && b[i] == '\r' && b[i+1] == '\n' { + break + } + } + + return newToken(TokenComment, b[:i], NoneType), i, nil +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/doc.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/doc.go new file mode 100644 index 00000000000..25ce0fe134d --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/doc.go @@ -0,0 +1,29 @@ +// Package ini is an LL(1) parser for configuration files. +// +// Example: +// sections, err := ini.OpenFile("/path/to/file") +// if err != nil { +// panic(err) +// } +// +// profile := "foo" +// section, ok := sections.GetSection(profile) +// if !ok { +// fmt.Printf("section %q could not be found", profile) +// } +// +// Below is the BNF that describes this parser +// Grammar: +// stmt -> value stmt' +// stmt' -> epsilon | op stmt +// value -> number | string | boolean | quoted_string +// +// section -> [ section' +// section' -> value section_close +// section_close -> ] +// +// SkipState will skip (NL WS)+ +// +// comment -> # comment' | ; comment' +// comment' -> epsilon | value +package ini diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/empty_token.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/empty_token.go new file mode 100644 index 00000000000..04345a54c20 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/empty_token.go @@ -0,0 +1,4 @@ +package ini + +// emptyToken is used to satisfy the Token interface +var emptyToken = newToken(TokenNone, []rune{}, NoneType) diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/expression.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/expression.go new file mode 100644 index 00000000000..91ba2a59dd5 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/expression.go @@ -0,0 +1,24 @@ +package ini + +// newExpression will return an expression AST. +// Expr represents an expression +// +// grammar: +// expr -> string | number +func newExpression(tok Token) AST { + return newASTWithRootToken(ASTKindExpr, tok) +} + +func newEqualExpr(left AST, tok Token) AST { + return newASTWithRootToken(ASTKindEqualExpr, tok, left) +} + +// EqualExprKey will return a LHS value in the equal expr +func EqualExprKey(ast AST) string { + children := ast.GetChildren() + if len(children) == 0 || ast.Kind != ASTKindEqualExpr { + return "" + } + + return string(children[0].Root.Raw()) +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/fuzz.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/fuzz.go new file mode 100644 index 00000000000..8d462f77e24 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/fuzz.go @@ -0,0 +1,17 @@ +// +build gofuzz + +package ini + +import ( + "bytes" +) + +func Fuzz(data []byte) int { + b := bytes.NewReader(data) + + if _, err := Parse(b); err != nil { + return 0 + } + + return 1 +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/ini.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/ini.go new file mode 100644 index 00000000000..d72432096a4 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/ini.go @@ -0,0 +1,51 @@ +package ini + +import ( + "io" + "os" + + "github.com/IBM/ibm-cos-sdk-go/aws/awserr" +) + +// OpenFile takes a path to a given file, and will open and parse +// that file. +func OpenFile(path string) (Sections, error) { + f, err := os.Open(path) + if err != nil { + return Sections{}, awserr.New(ErrCodeUnableToReadFile, "unable to open file", err) + } + defer f.Close() + + return Parse(f) +} + +// Parse will parse the given file using the shared config +// visitor. +func Parse(f io.Reader) (Sections, error) { + tree, err := ParseAST(f) + if err != nil { + return Sections{}, err + } + + v := NewDefaultVisitor() + if err = Walk(tree, v); err != nil { + return Sections{}, err + } + + return v.Sections, nil +} + +// ParseBytes will parse the given bytes and return the parsed sections. +func ParseBytes(b []byte) (Sections, error) { + tree, err := ParseASTBytes(b) + if err != nil { + return Sections{}, err + } + + v := NewDefaultVisitor() + if err = Walk(tree, v); err != nil { + return Sections{}, err + } + + return v.Sections, nil +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/ini_lexer.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/ini_lexer.go new file mode 100644 index 00000000000..b015c9dabea --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/ini_lexer.go @@ -0,0 +1,165 @@ +package ini + +import ( + "bytes" + "io" + "io/ioutil" + + "github.com/IBM/ibm-cos-sdk-go/aws/awserr" +) + +const ( + // ErrCodeUnableToReadFile is used when a file is failed to be + // opened or read from. + ErrCodeUnableToReadFile = "FailedRead" +) + +// TokenType represents the various different tokens types +type TokenType int + +func (t TokenType) String() string { + switch t { + case TokenNone: + return "none" + case TokenLit: + return "literal" + case TokenSep: + return "sep" + case TokenOp: + return "op" + case TokenWS: + return "ws" + case TokenNL: + return "newline" + case TokenComment: + return "comment" + case TokenComma: + return "comma" + default: + return "" + } +} + +// TokenType enums +const ( + TokenNone = TokenType(iota) + TokenLit + TokenSep + TokenComma + TokenOp + TokenWS + TokenNL + TokenComment +) + +type iniLexer struct{} + +// Tokenize will return a list of tokens during lexical analysis of the +// io.Reader. +func (l *iniLexer) Tokenize(r io.Reader) ([]Token, error) { + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, awserr.New(ErrCodeUnableToReadFile, "unable to read file", err) + } + + return l.tokenize(b) +} + +func (l *iniLexer) tokenize(b []byte) ([]Token, error) { + runes := bytes.Runes(b) + var err error + n := 0 + tokenAmount := countTokens(runes) + tokens := make([]Token, tokenAmount) + count := 0 + + for len(runes) > 0 && count < tokenAmount { + switch { + case isWhitespace(runes[0]): + tokens[count], n, err = newWSToken(runes) + case isComma(runes[0]): + tokens[count], n = newCommaToken(), 1 + case isComment(runes): + tokens[count], n, err = newCommentToken(runes) + case isNewline(runes): + tokens[count], n, err = newNewlineToken(runes) + case isSep(runes): + tokens[count], n, err = newSepToken(runes) + case isOp(runes): + tokens[count], n, err = newOpToken(runes) + default: + tokens[count], n, err = newLitToken(runes) + } + + if err != nil { + return nil, err + } + + count++ + + runes = runes[n:] + } + + return tokens[:count], nil +} + +func countTokens(runes []rune) int { + count, n := 0, 0 + var err error + + for len(runes) > 0 { + switch { + case isWhitespace(runes[0]): + _, n, err = newWSToken(runes) + case isComma(runes[0]): + _, n = newCommaToken(), 1 + case isComment(runes): + _, n, err = newCommentToken(runes) + case isNewline(runes): + _, n, err = newNewlineToken(runes) + case isSep(runes): + _, n, err = newSepToken(runes) + case isOp(runes): + _, n, err = newOpToken(runes) + default: + _, n, err = newLitToken(runes) + } + + if err != nil { + return 0 + } + + count++ + runes = runes[n:] + } + + return count + 1 +} + +// Token indicates a metadata about a given value. +type Token struct { + t TokenType + ValueType ValueType + base int + raw []rune +} + +var emptyValue = Value{} + +func newToken(t TokenType, raw []rune, v ValueType) Token { + return Token{ + t: t, + raw: raw, + ValueType: v, + } +} + +// Raw return the raw runes that were consumed +func (tok Token) Raw() []rune { + return tok.raw +} + +// Type returns the token type +func (tok Token) Type() TokenType { + return tok.t +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/ini_parser.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/ini_parser.go new file mode 100644 index 00000000000..55fa73ebcf2 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/ini_parser.go @@ -0,0 +1,357 @@ +package ini + +import ( + "fmt" + "io" +) + +// State enums for the parse table +const ( + InvalidState = iota + // stmt -> value stmt' + StatementState + // stmt' -> MarkComplete | op stmt + StatementPrimeState + // value -> number | string | boolean | quoted_string + ValueState + // section -> [ section' + OpenScopeState + // section' -> value section_close + SectionState + // section_close -> ] + CloseScopeState + // SkipState will skip (NL WS)+ + SkipState + // SkipTokenState will skip any token and push the previous + // state onto the stack. + SkipTokenState + // comment -> # comment' | ; comment' + // comment' -> MarkComplete | value + CommentState + // MarkComplete state will complete statements and move that + // to the completed AST list + MarkCompleteState + // TerminalState signifies that the tokens have been fully parsed + TerminalState +) + +// parseTable is a state machine to dictate the grammar above. +var parseTable = map[ASTKind]map[TokenType]int{ + ASTKindStart: map[TokenType]int{ + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: TerminalState, + }, + ASTKindCommentStatement: map[TokenType]int{ + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindExpr: map[TokenType]int{ + TokenOp: StatementPrimeState, + TokenLit: ValueState, + TokenSep: OpenScopeState, + TokenWS: ValueState, + TokenNL: SkipState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindEqualExpr: map[TokenType]int{ + TokenLit: ValueState, + TokenWS: SkipTokenState, + TokenNL: SkipState, + TokenNone: SkipState, + }, + ASTKindStatement: map[TokenType]int{ + TokenLit: SectionState, + TokenSep: CloseScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindExprStatement: map[TokenType]int{ + TokenLit: ValueState, + TokenSep: OpenScopeState, + TokenOp: ValueState, + TokenWS: ValueState, + TokenNL: MarkCompleteState, + TokenComment: CommentState, + TokenNone: TerminalState, + TokenComma: SkipState, + }, + ASTKindSectionStatement: map[TokenType]int{ + TokenLit: SectionState, + TokenOp: SectionState, + TokenSep: CloseScopeState, + TokenWS: SectionState, + TokenNL: SkipTokenState, + }, + ASTKindCompletedSectionStatement: map[TokenType]int{ + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindSkipStatement: map[TokenType]int{ + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: TerminalState, + }, +} + +// ParseAST will parse input from an io.Reader using +// an LL(1) parser. +func ParseAST(r io.Reader) ([]AST, error) { + lexer := iniLexer{} + tokens, err := lexer.Tokenize(r) + if err != nil { + return []AST{}, err + } + + return parse(tokens) +} + +// ParseASTBytes will parse input from a byte slice using +// an LL(1) parser. +func ParseASTBytes(b []byte) ([]AST, error) { + lexer := iniLexer{} + tokens, err := lexer.tokenize(b) + if err != nil { + return []AST{}, err + } + + return parse(tokens) +} + +func parse(tokens []Token) ([]AST, error) { + start := Start + stack := newParseStack(3, len(tokens)) + + stack.Push(start) + s := newSkipper() + +loop: + for stack.Len() > 0 { + k := stack.Pop() + + var tok Token + if len(tokens) == 0 { + // this occurs when all the tokens have been processed + // but reduction of what's left on the stack needs to + // occur. + tok = emptyToken + } else { + tok = tokens[0] + } + + step := parseTable[k.Kind][tok.Type()] + if s.ShouldSkip(tok) { + // being in a skip state with no tokens will break out of + // the parse loop since there is nothing left to process. + if len(tokens) == 0 { + break loop + } + // if should skip is true, we skip the tokens until should skip is set to false. + step = SkipTokenState + } + + switch step { + case TerminalState: + // Finished parsing. Push what should be the last + // statement to the stack. If there is anything left + // on the stack, an error in parsing has occurred. + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } + break loop + case SkipTokenState: + // When skipping a token, the previous state was popped off the stack. + // To maintain the correct state, the previous state will be pushed + // onto the stack. + stack.Push(k) + case StatementState: + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } + expr := newExpression(tok) + stack.Push(expr) + case StatementPrimeState: + if tok.Type() != TokenOp { + stack.MarkComplete(k) + continue + } + + if k.Kind != ASTKindExpr { + return nil, NewParseError( + fmt.Sprintf("invalid expression: expected Expr type, but found %T type", k), + ) + } + + k = trimSpaces(k) + expr := newEqualExpr(k, tok) + stack.Push(expr) + case ValueState: + // ValueState requires the previous state to either be an equal expression + // or an expression statement. + // + // This grammar occurs when the RHS is a number, word, or quoted string. + // equal_expr -> lit op equal_expr' + // equal_expr' -> number | string | quoted_string + // quoted_string -> " quoted_string' + // quoted_string' -> string quoted_string_end + // quoted_string_end -> " + // + // otherwise + // expr_stmt -> equal_expr (expr_stmt')* + // expr_stmt' -> ws S | op S | MarkComplete + // S -> equal_expr' expr_stmt' + switch k.Kind { + case ASTKindEqualExpr: + // assigning a value to some key + k.AppendChild(newExpression(tok)) + stack.Push(newExprStatement(k)) + case ASTKindExpr: + k.Root.raw = append(k.Root.raw, tok.Raw()...) + stack.Push(k) + case ASTKindExprStatement: + root := k.GetRoot() + children := root.GetChildren() + if len(children) == 0 { + return nil, NewParseError( + fmt.Sprintf("invalid expression: AST contains no children %s", k.Kind), + ) + } + + rhs := children[len(children)-1] + + if rhs.Root.ValueType != QuotedStringType { + rhs.Root.ValueType = StringType + rhs.Root.raw = append(rhs.Root.raw, tok.Raw()...) + + } + + children[len(children)-1] = rhs + k.SetChildren(children) + + stack.Push(k) + } + case OpenScopeState: + if !runeCompare(tok.Raw(), openBrace) { + return nil, NewParseError("expected '['") + } + // If OpenScopeState is not at the start, we must mark the previous ast as complete + // + // for example: if previous ast was a skip statement; + // we should mark it as complete before we create a new statement + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } + + stmt := newStatement() + stack.Push(stmt) + case CloseScopeState: + if !runeCompare(tok.Raw(), closeBrace) { + return nil, NewParseError("expected ']'") + } + + k = trimSpaces(k) + stack.Push(newCompletedSectionStatement(k)) + case SectionState: + var stmt AST + + switch k.Kind { + case ASTKindStatement: + // If there are multiple literals inside of a scope declaration, + // then the current token's raw value will be appended to the Name. + // + // This handles cases like [ profile default ] + // + // k will represent a SectionStatement with the children representing + // the label of the section + stmt = newSectionStatement(tok) + case ASTKindSectionStatement: + k.Root.raw = append(k.Root.raw, tok.Raw()...) + stmt = k + default: + return nil, NewParseError( + fmt.Sprintf("invalid statement: expected statement: %v", k.Kind), + ) + } + + stack.Push(stmt) + case MarkCompleteState: + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } + + if stack.Len() == 0 { + stack.Push(start) + } + case SkipState: + stack.Push(newSkipStatement(k)) + s.Skip() + case CommentState: + if k.Kind == ASTKindStart { + stack.Push(k) + } else { + stack.MarkComplete(k) + } + + stmt := newCommentStatement(tok) + stack.Push(stmt) + default: + return nil, NewParseError( + fmt.Sprintf("invalid state with ASTKind %v and TokenType %v", + k, tok.Type())) + } + + if len(tokens) > 0 { + tokens = tokens[1:] + } + } + + // this occurs when a statement has not been completed + if stack.top > 1 { + return nil, NewParseError(fmt.Sprintf("incomplete ini expression")) + } + + // returns a sublist which excludes the start symbol + return stack.List(), nil +} + +// trimSpaces will trim spaces on the left and right hand side of +// the literal. +func trimSpaces(k AST) AST { + // trim left hand side of spaces + for i := 0; i < len(k.Root.raw); i++ { + if !isWhitespace(k.Root.raw[i]) { + break + } + + k.Root.raw = k.Root.raw[1:] + i-- + } + + // trim right hand side of spaces + for i := len(k.Root.raw) - 1; i >= 0; i-- { + if !isWhitespace(k.Root.raw[i]) { + break + } + + k.Root.raw = k.Root.raw[:len(k.Root.raw)-1] + } + + return k +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/literal_tokens.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/literal_tokens.go new file mode 100644 index 00000000000..24df543d38c --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/literal_tokens.go @@ -0,0 +1,324 @@ +package ini + +import ( + "fmt" + "strconv" + "strings" +) + +var ( + runesTrue = []rune("true") + runesFalse = []rune("false") +) + +var literalValues = [][]rune{ + runesTrue, + runesFalse, +} + +func isBoolValue(b []rune) bool { + for _, lv := range literalValues { + if isLitValue(lv, b) { + return true + } + } + return false +} + +func isLitValue(want, have []rune) bool { + if len(have) < len(want) { + return false + } + + for i := 0; i < len(want); i++ { + if want[i] != have[i] { + return false + } + } + + return true +} + +// isNumberValue will return whether not the leading characters in +// a byte slice is a number. A number is delimited by whitespace or +// the newline token. +// +// A number is defined to be in a binary, octal, decimal (int | float), hex format, +// or in scientific notation. +func isNumberValue(b []rune) bool { + negativeIndex := 0 + helper := numberHelper{} + needDigit := false + + for i := 0; i < len(b); i++ { + negativeIndex++ + + switch b[i] { + case '-': + if helper.IsNegative() || negativeIndex != 1 { + return false + } + helper.Determine(b[i]) + needDigit = true + continue + case 'e', 'E': + if err := helper.Determine(b[i]); err != nil { + return false + } + negativeIndex = 0 + needDigit = true + continue + case 'b': + if helper.numberFormat == hex { + break + } + fallthrough + case 'o', 'x': + needDigit = true + if i == 0 { + return false + } + + fallthrough + case '.': + if err := helper.Determine(b[i]); err != nil { + return false + } + needDigit = true + continue + } + + if i > 0 && (isNewline(b[i:]) || isWhitespace(b[i])) { + return !needDigit + } + + if !helper.CorrectByte(b[i]) { + return false + } + needDigit = false + } + + return !needDigit +} + +func isValid(b []rune) (bool, int, error) { + if len(b) == 0 { + // TODO: should probably return an error + return false, 0, nil + } + + return isValidRune(b[0]), 1, nil +} + +func isValidRune(r rune) bool { + return r != ':' && r != '=' && r != '[' && r != ']' && r != ' ' && r != '\n' +} + +// ValueType is an enum that will signify what type +// the Value is +type ValueType int + +func (v ValueType) String() string { + switch v { + case NoneType: + return "NONE" + case DecimalType: + return "FLOAT" + case IntegerType: + return "INT" + case StringType: + return "STRING" + case BoolType: + return "BOOL" + } + + return "" +} + +// ValueType enums +const ( + NoneType = ValueType(iota) + DecimalType + IntegerType + StringType + QuotedStringType + BoolType +) + +// Value is a union container +type Value struct { + Type ValueType + raw []rune + + integer int64 + decimal float64 + boolean bool + str string +} + +func newValue(t ValueType, base int, raw []rune) (Value, error) { + v := Value{ + Type: t, + raw: raw, + } + var err error + + switch t { + case DecimalType: + v.decimal, err = strconv.ParseFloat(string(raw), 64) + case IntegerType: + if base != 10 { + raw = raw[2:] + } + + v.integer, err = strconv.ParseInt(string(raw), base, 64) + case StringType: + v.str = string(raw) + case QuotedStringType: + v.str = string(raw[1 : len(raw)-1]) + case BoolType: + v.boolean = runeCompare(v.raw, runesTrue) + } + + // issue 2253 + // + // if the value trying to be parsed is too large, then we will use + // the 'StringType' and raw value instead. + if nerr, ok := err.(*strconv.NumError); ok && nerr.Err == strconv.ErrRange { + v.Type = StringType + v.str = string(raw) + err = nil + } + + return v, err +} + +// Append will append values and change the type to a string +// type. +func (v *Value) Append(tok Token) { + r := tok.Raw() + if v.Type != QuotedStringType { + v.Type = StringType + r = tok.raw[1 : len(tok.raw)-1] + } + if tok.Type() != TokenLit { + v.raw = append(v.raw, tok.Raw()...) + } else { + v.raw = append(v.raw, r...) + } +} + +func (v Value) String() string { + switch v.Type { + case DecimalType: + return fmt.Sprintf("decimal: %f", v.decimal) + case IntegerType: + return fmt.Sprintf("integer: %d", v.integer) + case StringType: + return fmt.Sprintf("string: %s", string(v.raw)) + case QuotedStringType: + return fmt.Sprintf("quoted string: %s", string(v.raw)) + case BoolType: + return fmt.Sprintf("bool: %t", v.boolean) + default: + return "union not set" + } +} + +func newLitToken(b []rune) (Token, int, error) { + n := 0 + var err error + + token := Token{} + if b[0] == '"' { + n, err = getStringValue(b) + if err != nil { + return token, n, err + } + + token = newToken(TokenLit, b[:n], QuotedStringType) + } else if isNumberValue(b) { + var base int + base, n, err = getNumericalValue(b) + if err != nil { + return token, 0, err + } + + value := b[:n] + vType := IntegerType + if contains(value, '.') || hasExponent(value) { + vType = DecimalType + } + token = newToken(TokenLit, value, vType) + token.base = base + } else if isBoolValue(b) { + n, err = getBoolValue(b) + + token = newToken(TokenLit, b[:n], BoolType) + } else { + n, err = getValue(b) + token = newToken(TokenLit, b[:n], StringType) + } + + return token, n, err +} + +// IntValue returns an integer value +func (v Value) IntValue() int64 { + return v.integer +} + +// FloatValue returns a float value +func (v Value) FloatValue() float64 { + return v.decimal +} + +// BoolValue returns a bool value +func (v Value) BoolValue() bool { + return v.boolean +} + +func isTrimmable(r rune) bool { + switch r { + case '\n', ' ': + return true + } + return false +} + +// StringValue returns the string value +func (v Value) StringValue() string { + switch v.Type { + case StringType: + return strings.TrimFunc(string(v.raw), isTrimmable) + case QuotedStringType: + // preserve all characters in the quotes + return string(removeEscapedCharacters(v.raw[1 : len(v.raw)-1])) + default: + return strings.TrimFunc(string(v.raw), isTrimmable) + } +} + +func contains(runes []rune, c rune) bool { + for i := 0; i < len(runes); i++ { + if runes[i] == c { + return true + } + } + + return false +} + +func runeCompare(v1 []rune, v2 []rune) bool { + if len(v1) != len(v2) { + return false + } + + for i := 0; i < len(v1); i++ { + if v1[i] != v2[i] { + return false + } + } + + return true +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/newline_token.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/newline_token.go new file mode 100644 index 00000000000..e52ac399f17 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/newline_token.go @@ -0,0 +1,30 @@ +package ini + +func isNewline(b []rune) bool { + if len(b) == 0 { + return false + } + + if b[0] == '\n' { + return true + } + + if len(b) < 2 { + return false + } + + return b[0] == '\r' && b[1] == '\n' +} + +func newNewlineToken(b []rune) (Token, int, error) { + i := 1 + if b[0] == '\r' && isNewline(b[1:]) { + i++ + } + + if !isNewline([]rune(b[:i])) { + return emptyToken, 0, NewParseError("invalid new line token") + } + + return newToken(TokenNL, b[:i], NoneType), i, nil +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/number_helper.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/number_helper.go new file mode 100644 index 00000000000..a45c0bc5662 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/number_helper.go @@ -0,0 +1,152 @@ +package ini + +import ( + "bytes" + "fmt" + "strconv" +) + +const ( + none = numberFormat(iota) + binary + octal + decimal + hex + exponent +) + +type numberFormat int + +// numberHelper is used to dictate what format a number is in +// and what to do for negative values. Since -1e-4 is a valid +// number, we cannot just simply check for duplicate negatives. +type numberHelper struct { + numberFormat numberFormat + + negative bool + negativeExponent bool +} + +func (b numberHelper) Exists() bool { + return b.numberFormat != none +} + +func (b numberHelper) IsNegative() bool { + return b.negative || b.negativeExponent +} + +func (b *numberHelper) Determine(c rune) error { + if b.Exists() { + return NewParseError(fmt.Sprintf("multiple number formats: 0%v", string(c))) + } + + switch c { + case 'b': + b.numberFormat = binary + case 'o': + b.numberFormat = octal + case 'x': + b.numberFormat = hex + case 'e', 'E': + b.numberFormat = exponent + case '-': + if b.numberFormat != exponent { + b.negative = true + } else { + b.negativeExponent = true + } + case '.': + b.numberFormat = decimal + default: + return NewParseError(fmt.Sprintf("invalid number character: %v", string(c))) + } + + return nil +} + +func (b numberHelper) CorrectByte(c rune) bool { + switch { + case b.numberFormat == binary: + if !isBinaryByte(c) { + return false + } + case b.numberFormat == octal: + if !isOctalByte(c) { + return false + } + case b.numberFormat == hex: + if !isHexByte(c) { + return false + } + case b.numberFormat == decimal: + if !isDigit(c) { + return false + } + case b.numberFormat == exponent: + if !isDigit(c) { + return false + } + case b.negativeExponent: + if !isDigit(c) { + return false + } + case b.negative: + if !isDigit(c) { + return false + } + default: + if !isDigit(c) { + return false + } + } + + return true +} + +func (b numberHelper) Base() int { + switch b.numberFormat { + case binary: + return 2 + case octal: + return 8 + case hex: + return 16 + default: + return 10 + } +} + +func (b numberHelper) String() string { + buf := bytes.Buffer{} + i := 0 + + switch b.numberFormat { + case binary: + i++ + buf.WriteString(strconv.Itoa(i) + ": binary format\n") + case octal: + i++ + buf.WriteString(strconv.Itoa(i) + ": octal format\n") + case hex: + i++ + buf.WriteString(strconv.Itoa(i) + ": hex format\n") + case exponent: + i++ + buf.WriteString(strconv.Itoa(i) + ": exponent format\n") + default: + i++ + buf.WriteString(strconv.Itoa(i) + ": integer format\n") + } + + if b.negative { + i++ + buf.WriteString(strconv.Itoa(i) + ": negative format\n") + } + + if b.negativeExponent { + i++ + buf.WriteString(strconv.Itoa(i) + ": negative exponent format\n") + } + + return buf.String() +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/op_tokens.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/op_tokens.go new file mode 100644 index 00000000000..8a84c7cbe08 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/op_tokens.go @@ -0,0 +1,39 @@ +package ini + +import ( + "fmt" +) + +var ( + equalOp = []rune("=") + equalColonOp = []rune(":") +) + +func isOp(b []rune) bool { + if len(b) == 0 { + return false + } + + switch b[0] { + case '=': + return true + case ':': + return true + default: + return false + } +} + +func newOpToken(b []rune) (Token, int, error) { + tok := Token{} + + switch b[0] { + case '=': + tok = newToken(TokenOp, equalOp, NoneType) + case ':': + tok = newToken(TokenOp, equalColonOp, NoneType) + default: + return tok, 0, NewParseError(fmt.Sprintf("unexpected op type, %v", b[0])) + } + return tok, 1, nil +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/parse_error.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/parse_error.go new file mode 100644 index 00000000000..45728701931 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/parse_error.go @@ -0,0 +1,43 @@ +package ini + +import "fmt" + +const ( + // ErrCodeParseError is returned when a parsing error + // has occurred. + ErrCodeParseError = "INIParseError" +) + +// ParseError is an error which is returned during any part of +// the parsing process. +type ParseError struct { + msg string +} + +// NewParseError will return a new ParseError where message +// is the description of the error. +func NewParseError(message string) *ParseError { + return &ParseError{ + msg: message, + } +} + +// Code will return the ErrCodeParseError +func (err *ParseError) Code() string { + return ErrCodeParseError +} + +// Message returns the error's message +func (err *ParseError) Message() string { + return err.msg +} + +// OrigError return nothing since there will never be any +// original error. +func (err *ParseError) OrigError() error { + return nil +} + +func (err *ParseError) Error() string { + return fmt.Sprintf("%s: %s", err.Code(), err.Message()) +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/parse_stack.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/parse_stack.go new file mode 100644 index 00000000000..7f01cf7c703 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/parse_stack.go @@ -0,0 +1,60 @@ +package ini + +import ( + "bytes" + "fmt" +) + +// ParseStack is a stack that contains a container, the stack portion, +// and the list which is the list of ASTs that have been successfully +// parsed. +type ParseStack struct { + top int + container []AST + list []AST + index int +} + +func newParseStack(sizeContainer, sizeList int) ParseStack { + return ParseStack{ + container: make([]AST, sizeContainer), + list: make([]AST, sizeList), + } +} + +// Pop will return and truncate the last container element. +func (s *ParseStack) Pop() AST { + s.top-- + return s.container[s.top] +} + +// Push will add the new AST to the container +func (s *ParseStack) Push(ast AST) { + s.container[s.top] = ast + s.top++ +} + +// MarkComplete will append the AST to the list of completed statements +func (s *ParseStack) MarkComplete(ast AST) { + s.list[s.index] = ast + s.index++ +} + +// List will return the completed statements +func (s ParseStack) List() []AST { + return s.list[:s.index] +} + +// Len will return the length of the container +func (s *ParseStack) Len() int { + return s.top +} + +func (s ParseStack) String() string { + buf := bytes.Buffer{} + for i, node := range s.list { + buf.WriteString(fmt.Sprintf("%d: %v\n", i+1, node)) + } + + return buf.String() +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/sep_tokens.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/sep_tokens.go new file mode 100644 index 00000000000..f82095ba259 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/sep_tokens.go @@ -0,0 +1,41 @@ +package ini + +import ( + "fmt" +) + +var ( + emptyRunes = []rune{} +) + +func isSep(b []rune) bool { + if len(b) == 0 { + return false + } + + switch b[0] { + case '[', ']': + return true + default: + return false + } +} + +var ( + openBrace = []rune("[") + closeBrace = []rune("]") +) + +func newSepToken(b []rune) (Token, int, error) { + tok := Token{} + + switch b[0] { + case '[': + tok = newToken(TokenSep, openBrace, NoneType) + case ']': + tok = newToken(TokenSep, closeBrace, NoneType) + default: + return tok, 0, NewParseError(fmt.Sprintf("unexpected sep type, %v", b[0])) + } + return tok, 1, nil +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/skipper.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/skipper.go new file mode 100644 index 00000000000..da7a4049cfa --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/skipper.go @@ -0,0 +1,45 @@ +package ini + +// skipper is used to skip certain blocks of an ini file. +// Currently skipper is used to skip nested blocks of ini +// files. See example below +// +// [ foo ] +// nested = ; this section will be skipped +// a=b +// c=d +// bar=baz ; this will be included +type skipper struct { + shouldSkip bool + TokenSet bool + prevTok Token +} + +func newSkipper() skipper { + return skipper{ + prevTok: emptyToken, + } +} + +func (s *skipper) ShouldSkip(tok Token) bool { + // should skip state will be modified only if previous token was new line (NL); + // and the current token is not WhiteSpace (WS). + if s.shouldSkip && + s.prevTok.Type() == TokenNL && + tok.Type() != TokenWS { + s.Continue() + return false + } + s.prevTok = tok + return s.shouldSkip +} + +func (s *skipper) Skip() { + s.shouldSkip = true +} + +func (s *skipper) Continue() { + s.shouldSkip = false + // empty token is assigned as we return to default state, when should skip is false + s.prevTok = emptyToken +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/statement.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/statement.go new file mode 100644 index 00000000000..18f3fe89317 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/statement.go @@ -0,0 +1,35 @@ +package ini + +// Statement is an empty AST mostly used for transitioning states. +func newStatement() AST { + return newAST(ASTKindStatement, AST{}) +} + +// SectionStatement represents a section AST +func newSectionStatement(tok Token) AST { + return newASTWithRootToken(ASTKindSectionStatement, tok) +} + +// ExprStatement represents a completed expression AST +func newExprStatement(ast AST) AST { + return newAST(ASTKindExprStatement, ast) +} + +// CommentStatement represents a comment in the ini definition. +// +// grammar: +// comment -> #comment' | ;comment' +// comment' -> epsilon | value +func newCommentStatement(tok Token) AST { + return newAST(ASTKindCommentStatement, newExpression(tok)) +} + +// CompletedSectionStatement represents a completed section +func newCompletedSectionStatement(ast AST) AST { + return newAST(ASTKindCompletedSectionStatement, ast) +} + +// SkipStatement is used to skip whole statements +func newSkipStatement(ast AST) AST { + return newAST(ASTKindSkipStatement, ast) +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/value_util.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/value_util.go new file mode 100644 index 00000000000..305999d29be --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/value_util.go @@ -0,0 +1,284 @@ +package ini + +import ( + "fmt" +) + +// getStringValue will return a quoted string and the amount +// of bytes read +// +// an error will be returned if the string is not properly formatted +func getStringValue(b []rune) (int, error) { + if b[0] != '"' { + return 0, NewParseError("strings must start with '\"'") + } + + endQuote := false + i := 1 + + for ; i < len(b) && !endQuote; i++ { + if escaped := isEscaped(b[:i], b[i]); b[i] == '"' && !escaped { + endQuote = true + break + } else if escaped { + /*c, err := getEscapedByte(b[i]) + if err != nil { + return 0, err + } + + b[i-1] = c + b = append(b[:i], b[i+1:]...) + i--*/ + + continue + } + } + + if !endQuote { + return 0, NewParseError("missing '\"' in string value") + } + + return i + 1, nil +} + +// getBoolValue will return a boolean and the amount +// of bytes read +// +// an error will be returned if the boolean is not of a correct +// value +func getBoolValue(b []rune) (int, error) { + if len(b) < 4 { + return 0, NewParseError("invalid boolean value") + } + + n := 0 + for _, lv := range literalValues { + if len(lv) > len(b) { + continue + } + + if isLitValue(lv, b) { + n = len(lv) + } + } + + if n == 0 { + return 0, NewParseError("invalid boolean value") + } + + return n, nil +} + +// getNumericalValue will return a numerical string, the amount +// of bytes read, and the base of the number +// +// an error will be returned if the number is not of a correct +// value +func getNumericalValue(b []rune) (int, int, error) { + if !isDigit(b[0]) { + return 0, 0, NewParseError("invalid digit value") + } + + i := 0 + helper := numberHelper{} + +loop: + for negativeIndex := 0; i < len(b); i++ { + negativeIndex++ + + if !isDigit(b[i]) { + switch b[i] { + case '-': + if helper.IsNegative() || negativeIndex != 1 { + return 0, 0, NewParseError("parse error '-'") + } + + n := getNegativeNumber(b[i:]) + i += (n - 1) + helper.Determine(b[i]) + continue + case '.': + if err := helper.Determine(b[i]); err != nil { + return 0, 0, err + } + case 'e', 'E': + if err := helper.Determine(b[i]); err != nil { + return 0, 0, err + } + + negativeIndex = 0 + case 'b': + if helper.numberFormat == hex { + break + } + fallthrough + case 'o', 'x': + if i == 0 && b[i] != '0' { + return 0, 0, NewParseError("incorrect base format, expected leading '0'") + } + + if i != 1 { + return 0, 0, NewParseError(fmt.Sprintf("incorrect base format found %s at %d index", string(b[i]), i)) + } + + if err := helper.Determine(b[i]); err != nil { + return 0, 0, err + } + default: + if isWhitespace(b[i]) { + break loop + } + + if isNewline(b[i:]) { + break loop + } + + if !(helper.numberFormat == hex && isHexByte(b[i])) { + if i+2 < len(b) && !isNewline(b[i:i+2]) { + return 0, 0, NewParseError("invalid numerical character") + } else if !isNewline([]rune{b[i]}) { + return 0, 0, NewParseError("invalid numerical character") + } + + break loop + } + } + } + } + + return helper.Base(), i, nil +} + +// isDigit will return whether or not something is an integer +func isDigit(b rune) bool { + return b >= '0' && b <= '9' +} + +func hasExponent(v []rune) bool { + return contains(v, 'e') || contains(v, 'E') +} + +func isBinaryByte(b rune) bool { + switch b { + case '0', '1': + return true + default: + return false + } +} + +func isOctalByte(b rune) bool { + switch b { + case '0', '1', '2', '3', '4', '5', '6', '7': + return true + default: + return false + } +} + +func isHexByte(b rune) bool { + if isDigit(b) { + return true + } + return (b >= 'A' && b <= 'F') || + (b >= 'a' && b <= 'f') +} + +func getValue(b []rune) (int, error) { + i := 0 + + for i < len(b) { + if isNewline(b[i:]) { + break + } + + if isOp(b[i:]) { + break + } + + valid, n, err := isValid(b[i:]) + if err != nil { + return 0, err + } + + if !valid { + break + } + + i += n + } + + return i, nil +} + +// getNegativeNumber will return a negative number from a +// byte slice. This will iterate through all characters until +// a non-digit has been found. +func getNegativeNumber(b []rune) int { + if b[0] != '-' { + return 0 + } + + i := 1 + for ; i < len(b); i++ { + if !isDigit(b[i]) { + return i + } + } + + return i +} + +// isEscaped will return whether or not the character is an escaped +// character. +func isEscaped(value []rune, b rune) bool { + if len(value) == 0 { + return false + } + + switch b { + case '\'': // single quote + case '"': // quote + case 'n': // newline + case 't': // tab + case '\\': // backslash + default: + return false + } + + return value[len(value)-1] == '\\' +} + +func getEscapedByte(b rune) (rune, error) { + switch b { + case '\'': // single quote + return '\'', nil + case '"': // quote + return '"', nil + case 'n': // newline + return '\n', nil + case 't': // table + return '\t', nil + case '\\': // backslash + return '\\', nil + default: + return b, NewParseError(fmt.Sprintf("invalid escaped character %c", b)) + } +} + +func removeEscapedCharacters(b []rune) []rune { + for i := 0; i < len(b); i++ { + if isEscaped(b[:i], b[i]) { + c, err := getEscapedByte(b[i]) + if err != nil { + return b + } + + b[i-1] = c + b = append(b[:i], b[i+1:]...) + i-- + } + } + + return b +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/visitor.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/visitor.go new file mode 100644 index 00000000000..94841c32443 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/visitor.go @@ -0,0 +1,166 @@ +package ini + +import ( + "fmt" + "sort" +) + +// Visitor is an interface used by walkers that will +// traverse an array of ASTs. +type Visitor interface { + VisitExpr(AST) error + VisitStatement(AST) error +} + +// DefaultVisitor is used to visit statements and expressions +// and ensure that they are both of the correct format. +// In addition, upon visiting this will build sections and populate +// the Sections field which can be used to retrieve profile +// configuration. +type DefaultVisitor struct { + scope string + Sections Sections +} + +// NewDefaultVisitor return a DefaultVisitor +func NewDefaultVisitor() *DefaultVisitor { + return &DefaultVisitor{ + Sections: Sections{ + container: map[string]Section{}, + }, + } +} + +// VisitExpr visits expressions... +func (v *DefaultVisitor) VisitExpr(expr AST) error { + t := v.Sections.container[v.scope] + if t.values == nil { + t.values = values{} + } + + switch expr.Kind { + case ASTKindExprStatement: + opExpr := expr.GetRoot() + switch opExpr.Kind { + case ASTKindEqualExpr: + children := opExpr.GetChildren() + if len(children) <= 1 { + return NewParseError("unexpected token type") + } + + rhs := children[1] + + if rhs.Root.Type() != TokenLit { + return NewParseError("unexpected token type") + } + + key := EqualExprKey(opExpr) + v, err := newValue(rhs.Root.ValueType, rhs.Root.base, rhs.Root.Raw()) + if err != nil { + return err + } + + t.values[key] = v + default: + return NewParseError(fmt.Sprintf("unsupported expression %v", expr)) + } + default: + return NewParseError(fmt.Sprintf("unsupported expression %v", expr)) + } + + v.Sections.container[v.scope] = t + return nil +} + +// VisitStatement visits statements... +func (v *DefaultVisitor) VisitStatement(stmt AST) error { + switch stmt.Kind { + case ASTKindCompletedSectionStatement: + child := stmt.GetRoot() + if child.Kind != ASTKindSectionStatement { + return NewParseError(fmt.Sprintf("unsupported child statement: %T", child)) + } + + name := string(child.Root.Raw()) + v.Sections.container[name] = Section{} + v.scope = name + default: + return NewParseError(fmt.Sprintf("unsupported statement: %s", stmt.Kind)) + } + + return nil +} + +// Sections is a map of Section structures that represent +// a configuration. +type Sections struct { + container map[string]Section +} + +// GetSection will return section p. If section p does not exist, +// false will be returned in the second parameter. +func (t Sections) GetSection(p string) (Section, bool) { + v, ok := t.container[p] + return v, ok +} + +// values represents a map of union values. +type values map[string]Value + +// List will return a list of all sections that were successfully +// parsed. +func (t Sections) List() []string { + keys := make([]string, len(t.container)) + i := 0 + for k := range t.container { + keys[i] = k + i++ + } + + sort.Strings(keys) + return keys +} + +// Section contains a name and values. This represent +// a sectioned entry in a configuration file. +type Section struct { + Name string + values values +} + +// Has will return whether or not an entry exists in a given section +func (t Section) Has(k string) bool { + _, ok := t.values[k] + return ok +} + +// ValueType will returned what type the union is set to. If +// k was not found, the NoneType will be returned. +func (t Section) ValueType(k string) (ValueType, bool) { + v, ok := t.values[k] + return v.Type, ok +} + +// Bool returns a bool value at k +func (t Section) Bool(k string) bool { + return t.values[k].BoolValue() +} + +// Int returns an integer value at k +func (t Section) Int(k string) int64 { + return t.values[k].IntValue() +} + +// Float64 returns a float value at k +func (t Section) Float64(k string) float64 { + return t.values[k].FloatValue() +} + +// String returns the string value at k +func (t Section) String(k string) string { + _, ok := t.values[k] + if !ok { + return "" + } + return t.values[k].StringValue() +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/walker.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/walker.go new file mode 100644 index 00000000000..99915f7f777 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/walker.go @@ -0,0 +1,25 @@ +package ini + +// Walk will traverse the AST using the v, the Visitor. +func Walk(tree []AST, v Visitor) error { + for _, node := range tree { + switch node.Kind { + case ASTKindExpr, + ASTKindExprStatement: + + if err := v.VisitExpr(node); err != nil { + return err + } + case ASTKindStatement, + ASTKindCompletedSectionStatement, + ASTKindNestedSectionStatement, + ASTKindCompletedNestedSectionStatement: + + if err := v.VisitStatement(node); err != nil { + return err + } + } + } + + return nil +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/ws_token.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/ws_token.go new file mode 100644 index 00000000000..7ffb4ae06ff --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/ws_token.go @@ -0,0 +1,24 @@ +package ini + +import ( + "unicode" +) + +// isWhitespace will return whether or not the character is +// a whitespace character. +// +// Whitespace is defined as a space or tab. +func isWhitespace(c rune) bool { + return unicode.IsSpace(c) && c != '\n' && c != '\r' +} + +func newWSToken(b []rune) (Token, int, error) { + i := 0 + for ; i < len(b); i++ { + if !isWhitespace(b[i]) { + break + } + } + + return newToken(TokenWS, b[:i], NoneType), i, nil +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/s3shared/arn/accesspoint_arn.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/s3shared/arn/accesspoint_arn.go new file mode 100644 index 00000000000..9cf7bb66c09 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/s3shared/arn/accesspoint_arn.go @@ -0,0 +1,50 @@ +package arn + +import ( + "strings" + + "github.com/IBM/ibm-cos-sdk-go/aws/arn" +) + +// AccessPointARN provides representation +type AccessPointARN struct { + arn.ARN + AccessPointName string +} + +// GetARN returns the base ARN for the Access Point resource +func (a AccessPointARN) GetARN() arn.ARN { + return a.ARN +} + +// ParseAccessPointResource attempts to parse the ARN's resource as an +// AccessPoint resource. +// +// Supported Access point resource format: +// - Access point format: arn:{partition}:s3:{region}:{accountId}:accesspoint/{accesspointName} +// - example: arn.aws.s3.us-west-2.012345678901:accesspoint/myaccesspoint +// +func ParseAccessPointResource(a arn.ARN, resParts []string) (AccessPointARN, error) { + if len(a.Region) == 0 { + return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "region not set"} + } + if len(a.AccountID) == 0 { + return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "account-id not set"} + } + if len(resParts) == 0 { + return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "resource-id not set"} + } + if len(resParts) > 1 { + return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "sub resource not supported"} + } + + resID := resParts[0] + if len(strings.TrimSpace(resID)) == 0 { + return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "resource-id not set"} + } + + return AccessPointARN{ + ARN: a, + AccessPointName: resID, + }, nil +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/s3shared/arn/arn.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/s3shared/arn/arn.go new file mode 100644 index 00000000000..e2aefc9a05f --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/s3shared/arn/arn.go @@ -0,0 +1,90 @@ +package arn + +import ( + "fmt" + "strings" + + "github.com/IBM/ibm-cos-sdk-go/aws/arn" +) + +var supportedServiceARN = []string{ + "s3", + "s3-outposts", + "s3-object-lambda", +} + +func isSupportedServiceARN(service string) bool { + for _, name := range supportedServiceARN { + if name == service { + return true + } + } + return false +} + +// Resource provides the interfaces abstracting ARNs of specific resource +// types. +type Resource interface { + GetARN() arn.ARN + String() string +} + +// ResourceParser provides the function for parsing an ARN's resource +// component into a typed resource. +type ResourceParser func(arn.ARN) (Resource, error) + +// ParseResource parses an AWS ARN into a typed resource for the S3 API. +func ParseResource(s string, resParser ResourceParser) (resARN Resource, err error) { + a, err := arn.Parse(s) + if err != nil { + return nil, err + } + + if len(a.Partition) == 0 { + return nil, InvalidARNError{ARN: a, Reason: "partition not set"} + } + + if !isSupportedServiceARN(a.Service) { + return nil, InvalidARNError{ARN: a, Reason: "service is not supported"} + } + + if len(a.Resource) == 0 { + return nil, InvalidARNError{ARN: a, Reason: "resource not set"} + } + + return resParser(a) +} + +// SplitResource splits the resource components by the ARN resource delimiters. +func SplitResource(v string) []string { + var parts []string + var offset int + + for offset <= len(v) { + idx := strings.IndexAny(v[offset:], "/:") + if idx < 0 { + parts = append(parts, v[offset:]) + break + } + parts = append(parts, v[offset:idx+offset]) + offset += idx + 1 + } + + return parts +} + +// IsARN returns whether the given string is an ARN +func IsARN(s string) bool { + return arn.IsARN(s) +} + +// InvalidARNError provides the error for an invalid ARN error. +type InvalidARNError struct { + ARN arn.ARN + Reason string +} + +// Error returns a string denoting the occurred InvalidARNError +func (e InvalidARNError) Error() string { + return fmt.Sprintf("invalid Amazon %s ARN, %s, %s", e.ARN.Service, e.Reason, e.ARN.String()) +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/s3shared/arn/outpost_arn.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/s3shared/arn/outpost_arn.go new file mode 100644 index 00000000000..45bb994af7c --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/s3shared/arn/outpost_arn.go @@ -0,0 +1,126 @@ +package arn + +import ( + "strings" + + "github.com/IBM/ibm-cos-sdk-go/aws/arn" +) + +// OutpostARN interface that should be satisfied by outpost ARNs +type OutpostARN interface { + Resource + GetOutpostID() string +} + +// ParseOutpostARNResource will parse a provided ARNs resource using the appropriate ARN format +// and return a specific OutpostARN type +// +// Currently supported outpost ARN formats: +// * Outpost AccessPoint ARN format: +// - ARN format: arn:{partition}:s3-outposts:{region}:{accountId}:outpost/{outpostId}/accesspoint/{accesspointName} +// - example: arn:aws:s3-outposts:us-west-2:012345678901:outpost/op-1234567890123456/accesspoint/myaccesspoint +// +// * Outpost Bucket ARN format: +// - ARN format: arn:{partition}:s3-outposts:{region}:{accountId}:outpost/{outpostId}/bucket/{bucketName} +// - example: arn:aws:s3-outposts:us-west-2:012345678901:outpost/op-1234567890123456/bucket/mybucket +// +// Other outpost ARN formats may be supported and added in the future. +// +func ParseOutpostARNResource(a arn.ARN, resParts []string) (OutpostARN, error) { + if len(a.Region) == 0 { + return nil, InvalidARNError{ARN: a, Reason: "region not set"} + } + + if len(a.AccountID) == 0 { + return nil, InvalidARNError{ARN: a, Reason: "account-id not set"} + } + + // verify if outpost id is present and valid + if len(resParts) == 0 || len(strings.TrimSpace(resParts[0])) == 0 { + return nil, InvalidARNError{ARN: a, Reason: "outpost resource-id not set"} + } + + // verify possible resource type exists + if len(resParts) < 3 { + return nil, InvalidARNError{ + ARN: a, Reason: "incomplete outpost resource type. Expected bucket or access-point resource to be present", + } + } + + // Since we know this is a OutpostARN fetch outpostID + outpostID := strings.TrimSpace(resParts[0]) + + switch resParts[1] { + case "accesspoint": + accesspointARN, err := ParseAccessPointResource(a, resParts[2:]) + if err != nil { + return OutpostAccessPointARN{}, err + } + return OutpostAccessPointARN{ + AccessPointARN: accesspointARN, + OutpostID: outpostID, + }, nil + + case "bucket": + bucketName, err := parseBucketResource(a, resParts[2:]) + if err != nil { + return nil, err + } + return OutpostBucketARN{ + ARN: a, + BucketName: bucketName, + OutpostID: outpostID, + }, nil + + default: + return nil, InvalidARNError{ARN: a, Reason: "unknown resource set for outpost ARN"} + } +} + +// OutpostAccessPointARN represents outpost access point ARN. +type OutpostAccessPointARN struct { + AccessPointARN + OutpostID string +} + +// GetOutpostID returns the outpost id of outpost access point arn +func (o OutpostAccessPointARN) GetOutpostID() string { + return o.OutpostID +} + +// OutpostBucketARN represents the outpost bucket ARN. +type OutpostBucketARN struct { + arn.ARN + BucketName string + OutpostID string +} + +// GetOutpostID returns the outpost id of outpost bucket arn +func (o OutpostBucketARN) GetOutpostID() string { + return o.OutpostID +} + +// GetARN retrives the base ARN from outpost bucket ARN resource +func (o OutpostBucketARN) GetARN() arn.ARN { + return o.ARN +} + +// parseBucketResource attempts to parse the ARN's bucket resource and retrieve the +// bucket resource id. +// +// parseBucketResource only parses the bucket resource id. +// +func parseBucketResource(a arn.ARN, resParts []string) (bucketName string, err error) { + if len(resParts) == 0 { + return bucketName, InvalidARNError{ARN: a, Reason: "bucket resource-id not set"} + } + if len(resParts) > 1 { + return bucketName, InvalidARNError{ARN: a, Reason: "sub resource not supported"} + } + + bucketName = strings.TrimSpace(resParts[0]) + if len(bucketName) == 0 { + return bucketName, InvalidARNError{ARN: a, Reason: "bucket resource-id not set"} + } + return bucketName, err +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/s3shared/arn/s3_object_lambda_arn.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/s3shared/arn/s3_object_lambda_arn.go new file mode 100644 index 00000000000..513154cc0e3 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/s3shared/arn/s3_object_lambda_arn.go @@ -0,0 +1,15 @@ +package arn + +// S3ObjectLambdaARN represents an ARN for the s3-object-lambda service +type S3ObjectLambdaARN interface { + Resource + + isS3ObjectLambdasARN() +} + +// S3ObjectLambdaAccessPointARN is an S3ObjectLambdaARN for the Access Point resource type +type S3ObjectLambdaAccessPointARN struct { + AccessPointARN +} + +func (s S3ObjectLambdaAccessPointARN) isS3ObjectLambdasARN() {} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/s3shared/endpoint_errors.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/s3shared/endpoint_errors.go new file mode 100644 index 00000000000..be6102c90b3 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/s3shared/endpoint_errors.go @@ -0,0 +1,189 @@ +package s3shared + +import ( + "fmt" + + "github.com/IBM/ibm-cos-sdk-go/aws/awserr" + "github.com/IBM/ibm-cos-sdk-go/internal/s3shared/arn" +) + +const ( + invalidARNErrorErrCode = "InvalidARNError" + configurationErrorErrCode = "ConfigurationError" +) + +// InvalidARNError denotes the error for Invalid ARN +type InvalidARNError struct { + message string + resource arn.Resource + origErr error +} + +// Error returns the InvalidARNError +func (e InvalidARNError) Error() string { + var extra string + if e.resource != nil { + extra = "ARN: " + e.resource.String() + } + return awserr.SprintError(e.Code(), e.Message(), extra, e.origErr) +} + +// Code returns the invalid ARN error code +func (e InvalidARNError) Code() string { + return invalidARNErrorErrCode +} + +// Message returns the message for Invalid ARN error +func (e InvalidARNError) Message() string { + return e.message +} + +// OrigErr is the original error wrapped by Invalid ARN Error +func (e InvalidARNError) OrigErr() error { + return e.origErr +} + +// NewInvalidARNError denotes invalid arn error +func NewInvalidARNError(resource arn.Resource, err error) InvalidARNError { + return InvalidARNError{ + message: "invalid ARN", + origErr: err, + resource: resource, + } +} + +// NewInvalidARNWithCustomEndpointError ARN not supported for custom clients endpoints +func NewInvalidARNWithCustomEndpointError(resource arn.Resource, err error) InvalidARNError { + return InvalidARNError{ + message: "resource ARN not supported with custom client endpoints", + origErr: err, + resource: resource, + } +} + +// NewInvalidARNWithUnsupportedPartitionError ARN not supported for the target partition +func NewInvalidARNWithUnsupportedPartitionError(resource arn.Resource, err error) InvalidARNError { + return InvalidARNError{ + message: "resource ARN not supported for the target ARN partition", + origErr: err, + resource: resource, + } +} + +// NewInvalidARNWithFIPSError ARN not supported for FIPS region +func NewInvalidARNWithFIPSError(resource arn.Resource, err error) InvalidARNError { + return InvalidARNError{ + message: "resource ARN not supported for FIPS region", + resource: resource, + origErr: err, + } +} + +// ConfigurationError is used to denote a client configuration error +type ConfigurationError struct { + message string + resource arn.Resource + clientPartitionID string + clientRegion string + origErr error +} + +// Error returns the Configuration error string +func (e ConfigurationError) Error() string { + extra := fmt.Sprintf("ARN: %s, client partition: %s, client region: %s", + e.resource, e.clientPartitionID, e.clientRegion) + + return awserr.SprintError(e.Code(), e.Message(), extra, e.origErr) +} + +// Code returns configuration error's error-code +func (e ConfigurationError) Code() string { + return configurationErrorErrCode +} + +// Message returns the configuration error message +func (e ConfigurationError) Message() string { + return e.message +} + +// OrigErr is the original error wrapped by Configuration Error +func (e ConfigurationError) OrigErr() error { + return e.origErr +} + +// NewClientPartitionMismatchError stub +func NewClientPartitionMismatchError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "client partition does not match provided ARN partition", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewClientRegionMismatchError denotes cross region access error +func NewClientRegionMismatchError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "client region does not match provided ARN region", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewFailedToResolveEndpointError denotes endpoint resolving error +func NewFailedToResolveEndpointError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "endpoint resolver failed to find an endpoint for the provided ARN region", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewClientConfiguredForFIPSError denotes client config error for unsupported cross region FIPS access +func NewClientConfiguredForFIPSError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "client configured for fips but cross-region resource ARN provided", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewClientConfiguredForAccelerateError denotes client config error for unsupported S3 accelerate +func NewClientConfiguredForAccelerateError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "client configured for S3 Accelerate but is not supported with resource ARN", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewClientConfiguredForCrossRegionFIPSError denotes client config error for unsupported cross region FIPS request +func NewClientConfiguredForCrossRegionFIPSError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "client configured for FIPS with cross-region enabled but is supported with cross-region resource ARN", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewClientConfiguredForDualStackError denotes client config error for unsupported S3 Dual-stack +func NewClientConfiguredForDualStackError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "client configured for S3 Dual-stack but is not supported with resource ARN", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/s3shared/resource_request.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/s3shared/resource_request.go new file mode 100644 index 00000000000..1431c697016 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/s3shared/resource_request.go @@ -0,0 +1,56 @@ +package s3shared + +import ( + "strings" + + "github.com/IBM/ibm-cos-sdk-go/aws" + "github.com/IBM/ibm-cos-sdk-go/aws/request" + "github.com/IBM/ibm-cos-sdk-go/internal/s3shared/arn" +) + +// ResourceRequest represents the request and arn resource +type ResourceRequest struct { + Resource arn.Resource + Request *request.Request +} + +// AllowCrossRegion returns a bool value to denote if S3UseARNRegion flag is set +func (r ResourceRequest) AllowCrossRegion() bool { + return aws.BoolValue(r.Request.Config.S3UseARNRegion) +} + +// UseFIPS returns true if request config region is FIPS +func (r ResourceRequest) UseFIPS() bool { + return IsFIPS(aws.StringValue(r.Request.Config.Region)) +} + +// ResourceConfiguredForFIPS returns true if resource ARNs region is FIPS +func (r ResourceRequest) ResourceConfiguredForFIPS() bool { + return false // IBM does not support AWS ARN +} + +// IsCrossPartition returns true if client is configured for another partition, than +// the partition that resource ARN region resolves to. +func (r ResourceRequest) IsCrossPartition() bool { + return r.Request.ClientInfo.PartitionID != r.Resource.GetARN().Partition +} + +// IsCrossRegion returns true if ARN region is different than client configured region +func (r ResourceRequest) IsCrossRegion() bool { + return IsCrossRegion(r.Request, r.Resource.GetARN().Region) +} + +// HasCustomEndpoint returns true if custom client endpoint is provided +func (r ResourceRequest) HasCustomEndpoint() bool { + return len(aws.StringValue(r.Request.Config.Endpoint)) > 0 +} + +// IsFIPS returns true if region is a fips region +func IsFIPS(clientRegion string) bool { + return strings.HasPrefix(clientRegion, "fips-") || strings.HasSuffix(clientRegion, "-fips") +} + +// IsCrossRegion returns true if request signing region is not same as configured region +func IsCrossRegion(req *request.Request, otherRegion string) bool { + return req.ClientInfo.SigningRegion != otherRegion +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/s3shared/s3err/error.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/s3shared/s3err/error.go new file mode 100644 index 00000000000..f555483f0c7 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/s3shared/s3err/error.go @@ -0,0 +1,57 @@ +package s3err + +import ( + "fmt" + + "github.com/IBM/ibm-cos-sdk-go/aws/awserr" + "github.com/IBM/ibm-cos-sdk-go/aws/request" +) + +// RequestFailure provides additional S3 specific metadata for the request +// failure. +type RequestFailure struct { + awserr.RequestFailure + + hostID string +} + +// NewRequestFailure returns a request failure error decordated with S3 +// specific metadata. +func NewRequestFailure(err awserr.RequestFailure, hostID string) *RequestFailure { + return &RequestFailure{RequestFailure: err, hostID: hostID} +} + +func (r RequestFailure) Error() string { + extra := fmt.Sprintf("status code: %d, request id: %s, host id: %s", + r.StatusCode(), r.RequestID(), r.hostID) + return awserr.SprintError(r.Code(), r.Message(), extra, r.OrigErr()) +} +func (r RequestFailure) String() string { + return r.Error() +} + +// HostID returns the HostID request response value. +func (r RequestFailure) HostID() string { + return r.hostID +} + +// RequestFailureWrapperHandler returns a handler to rap an +// awserr.RequestFailure with the S3 request ID 2 from the response. +func RequestFailureWrapperHandler() request.NamedHandler { + return request.NamedHandler{ + Name: "awssdk.s3.errorHandler", + Fn: func(req *request.Request) { + reqErr, ok := req.Error.(awserr.RequestFailure) + if !ok || reqErr == nil { + return + } + + hostID := req.HTTPResponse.Header.Get("X-Amz-Id-2") + if req.Error == nil { + return + } + + req.Error = NewRequestFailure(reqErr, hostID) + }, + } +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/sdkio/byte.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/sdkio/byte.go new file mode 100644 index 00000000000..6c443988bbc --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/sdkio/byte.go @@ -0,0 +1,12 @@ +package sdkio + +const ( + // Byte is 8 bits + Byte int64 = 1 + // KibiByte (KiB) is 1024 Bytes + KibiByte = Byte * 1024 + // MebiByte (MiB) is 1024 KiB + MebiByte = KibiByte * 1024 + // GibiByte (GiB) is 1024 MiB + GibiByte = MebiByte * 1024 +) diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/sdkio/io_go1.7.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/sdkio/io_go1.7.go new file mode 100644 index 00000000000..e5f005613b7 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/sdkio/io_go1.7.go @@ -0,0 +1,12 @@ +// +build go1.7 + +package sdkio + +import "io" + +// Alias for Go 1.7 io package Seeker constants +const ( + SeekStart = io.SeekStart // seek relative to the origin of the file + SeekCurrent = io.SeekCurrent // seek relative to the current offset + SeekEnd = io.SeekEnd // seek relative to the end +) diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/sdkmath/floor.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/sdkmath/floor.go new file mode 100644 index 00000000000..44898eed0fd --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/sdkmath/floor.go @@ -0,0 +1,15 @@ +// +build go1.10 + +package sdkmath + +import "math" + +// Round returns the nearest integer, rounding half away from zero. +// +// Special cases are: +// Round(±0) = ±0 +// Round(±Inf) = ±Inf +// Round(NaN) = NaN +func Round(x float64) float64 { + return math.Round(x) +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/sdkmath/floor_go1.9.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/sdkmath/floor_go1.9.go new file mode 100644 index 00000000000..810ec7f08b0 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/sdkmath/floor_go1.9.go @@ -0,0 +1,56 @@ +// +build !go1.10 + +package sdkmath + +import "math" + +// Copied from the Go standard library's (Go 1.12) math/floor.go for use in +// Go version prior to Go 1.10. +const ( + uvone = 0x3FF0000000000000 + mask = 0x7FF + shift = 64 - 11 - 1 + bias = 1023 + signMask = 1 << 63 + fracMask = 1<= 0.5 { + // return t + Copysign(1, x) + // } + // return t + // } + bits := math.Float64bits(x) + e := uint(bits>>shift) & mask + if e < bias { + // Round abs(x) < 1 including denormals. + bits &= signMask // +-0 + if e == bias-1 { + bits |= uvone // +-1 + } + } else if e < bias+shift { + // Round any abs(x) >= 1 containing a fractional component [0,1). + // + // Numbers with larger exponents are returned unchanged since they + // must be either an integer, infinity, or NaN. + const half = 1 << (shift - 1) + e -= bias + bits += half >> e + bits &^= fracMask >> e + } + return math.Float64frombits(bits) +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/sdkrand/locked_source.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/sdkrand/locked_source.go new file mode 100644 index 00000000000..0c9802d8770 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/sdkrand/locked_source.go @@ -0,0 +1,29 @@ +package sdkrand + +import ( + "math/rand" + "sync" + "time" +) + +// lockedSource is a thread-safe implementation of rand.Source +type lockedSource struct { + lk sync.Mutex + src rand.Source +} + +func (r *lockedSource) Int63() (n int64) { + r.lk.Lock() + n = r.src.Int63() + r.lk.Unlock() + return +} + +func (r *lockedSource) Seed(seed int64) { + r.lk.Lock() + r.src.Seed(seed) + r.lk.Unlock() +} + +// SeededRand is a new RNG using a thread safe implementation of rand.Source +var SeededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())}) diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/sdkrand/read.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/sdkrand/read.go new file mode 100644 index 00000000000..f4651da2da5 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/sdkrand/read.go @@ -0,0 +1,11 @@ +// +build go1.6 + +package sdkrand + +import "math/rand" + +// Read provides the stub for math.Rand.Read method support for go version's +// 1.6 and greater. +func Read(r *rand.Rand, p []byte) (int, error) { + return r.Read(p) +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/shareddefaults/ecs_container.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/shareddefaults/ecs_container.go new file mode 100644 index 00000000000..7da8a49ce52 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/shareddefaults/ecs_container.go @@ -0,0 +1,12 @@ +package shareddefaults + +const ( + // ECSCredsProviderEnvVar is an environmental variable key used to + // determine which path needs to be hit. + ECSCredsProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI" +) + +// ECSContainerCredentialsURI is the endpoint to retrieve container +// credentials. This can be overridden to test to ensure the credential process +// is behaving correctly. +var ECSContainerCredentialsURI = "http://169.254.170.2" diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/shareddefaults/shared_config.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/shareddefaults/shared_config.go new file mode 100644 index 00000000000..ebcbc2b40a3 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/shareddefaults/shared_config.go @@ -0,0 +1,40 @@ +package shareddefaults + +import ( + "os" + "path/filepath" + "runtime" +) + +// SharedCredentialsFilename returns the SDK's default file path +// for the shared credentials file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/credentials +// - Windows: %USERPROFILE%\.aws\credentials +func SharedCredentialsFilename() string { + return filepath.Join(UserHomeDir(), ".aws", "credentials") +} + +// SharedConfigFilename returns the SDK's default file path for +// the shared config file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/config +// - Windows: %USERPROFILE%\.aws\config +func SharedConfigFilename() string { + return filepath.Join(UserHomeDir(), ".aws", "config") +} + +// UserHomeDir returns the home directory for the user the process is +// running under. +func UserHomeDir() string { + if runtime.GOOS == "windows" { // Windows + return os.Getenv("USERPROFILE") + } + + // *nix + return os.Getenv("HOME") +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/strings/strings.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/strings/strings.go new file mode 100644 index 00000000000..d008ae27cb3 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/strings/strings.go @@ -0,0 +1,11 @@ +package strings + +import ( + "strings" +) + +// HasPrefixFold tests whether the string s begins with prefix, interpreted as UTF-8 strings, +// under Unicode case-folding. +func HasPrefixFold(s, prefix string) bool { + return len(s) >= len(prefix) && strings.EqualFold(s[0:len(prefix)], prefix) +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/sync/singleflight/LICENSE b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/sync/singleflight/LICENSE new file mode 100644 index 00000000000..6a66aea5eaf --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/sync/singleflight/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/sync/singleflight/singleflight.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/sync/singleflight/singleflight.go new file mode 100644 index 00000000000..14ad0c58911 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/sync/singleflight/singleflight.go @@ -0,0 +1,120 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package singleflight provides a duplicate function call suppression +// mechanism. +package singleflight + +import "sync" + +// call is an in-flight or completed singleflight.Do call +type call struct { + wg sync.WaitGroup + + // These fields are written once before the WaitGroup is done + // and are only read after the WaitGroup is done. + val interface{} + err error + + // forgotten indicates whether Forget was called with this call's key + // while the call was still in flight. + forgotten bool + + // These fields are read and written with the singleflight + // mutex held before the WaitGroup is done, and are read but + // not written after the WaitGroup is done. + dups int + chans []chan<- Result +} + +// Group represents a class of work and forms a namespace in +// which units of work can be executed with duplicate suppression. +type Group struct { + mu sync.Mutex // protects m + m map[string]*call // lazily initialized +} + +// Result holds the results of Do, so they can be passed +// on a channel. +type Result struct { + Val interface{} + Err error + Shared bool +} + +// Do executes and returns the results of the given function, making +// sure that only one execution is in-flight for a given key at a +// time. If a duplicate comes in, the duplicate caller waits for the +// original to complete and receives the same results. +// The return value shared indicates whether v was given to multiple callers. +func (g *Group) Do(key string, fn func() (interface{}, error)) (v interface{}, err error, shared bool) { + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + g.mu.Unlock() + c.wg.Wait() + return c.val, c.err, true + } + c := new(call) + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + g.doCall(c, key, fn) + return c.val, c.err, c.dups > 0 +} + +// DoChan is like Do but returns a channel that will receive the +// results when they are ready. +func (g *Group) DoChan(key string, fn func() (interface{}, error)) <-chan Result { + ch := make(chan Result, 1) + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + c.chans = append(c.chans, ch) + g.mu.Unlock() + return ch + } + c := &call{chans: []chan<- Result{ch}} + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + go g.doCall(c, key, fn) + + return ch +} + +// doCall handles the single call for a key. +func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) { + c.val, c.err = fn() + c.wg.Done() + + g.mu.Lock() + if !c.forgotten { + delete(g.m, key) + } + for _, ch := range c.chans { + ch <- Result{c.val, c.err, c.dups > 0} + } + g.mu.Unlock() +} + +// Forget tells the singleflight to forget about a key. Future calls +// to Do for this key will call the function rather than waiting for +// an earlier call to complete. +func (g *Group) Forget(key string) { + g.mu.Lock() + if c, ok := g.m[key]; ok { + c.forgotten = true + } + delete(g.m, key) + g.mu.Unlock() +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/private/checksum/content_md5.go b/vendor/github.com/IBM/ibm-cos-sdk-go/private/checksum/content_md5.go new file mode 100644 index 00000000000..47aaa4a5253 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/private/checksum/content_md5.go @@ -0,0 +1,53 @@ +package checksum + +import ( + "crypto/md5" + "encoding/base64" + "fmt" + + "github.com/IBM/ibm-cos-sdk-go/aws" + "github.com/IBM/ibm-cos-sdk-go/aws/awserr" + "github.com/IBM/ibm-cos-sdk-go/aws/request" +) + +const contentMD5Header = "Content-Md5" + +// AddBodyContentMD5Handler computes and sets the HTTP Content-MD5 header for requests that +// require it. +func AddBodyContentMD5Handler(r *request.Request) { + // if Content-MD5 header is already present, return + if v := r.HTTPRequest.Header.Get(contentMD5Header); len(v) != 0 { + return + } + + // if S3DisableContentMD5Validation flag is set, return + if aws.BoolValue(r.Config.S3DisableContentMD5Validation) { + return + } + + // if request is presigned, return + if r.IsPresigned() { + return + } + + // if body is not seekable, return + if !aws.IsReaderSeekable(r.Body) { + if r.Config.Logger != nil { + r.Config.Logger.Log(fmt.Sprintf( + "Unable to compute Content-MD5 for unseekable body, S3.%s", + r.Operation.Name)) + } + return + } + + h := md5.New() + + if _, err := aws.CopySeekableBody(h, r.Body); err != nil { + r.Error = awserr.New("ContentMD5", "failed to compute body MD5", err) + return + } + + // encode the md5 checksum in base64 and set the request header. + v := base64.StdEncoding.EncodeToString(h.Sum(nil)) + r.HTTPRequest.Header.Set(contentMD5Header, v) +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/host.go b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/host.go new file mode 100644 index 00000000000..82938e66dc1 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/host.go @@ -0,0 +1,104 @@ +package protocol + +import ( + "github.com/IBM/ibm-cos-sdk-go/aws/request" + "net" + "strconv" + "strings" +) + +// ValidateEndpointHostHandler is a request handler that will validate the +// request endpoint's hosts is a valid RFC 3986 host. +var ValidateEndpointHostHandler = request.NamedHandler{ + Name: "awssdk.protocol.ValidateEndpointHostHandler", + Fn: func(r *request.Request) { + err := ValidateEndpointHost(r.Operation.Name, r.HTTPRequest.URL.Host) + if err != nil { + r.Error = err + } + }, +} + +// ValidateEndpointHost validates that the host string passed in is a valid RFC +// 3986 host. Returns error if the host is not valid. +func ValidateEndpointHost(opName, host string) error { + paramErrs := request.ErrInvalidParams{Context: opName} + + var hostname string + var port string + var err error + + if strings.Contains(host, ":") { + hostname, port, err = net.SplitHostPort(host) + + if err != nil { + paramErrs.Add(request.NewErrParamFormat("endpoint", err.Error(), host)) + } + + if !ValidPortNumber(port) { + paramErrs.Add(request.NewErrParamFormat("endpoint port number", "[0-65535]", port)) + } + } else { + hostname = host + } + + labels := strings.Split(hostname, ".") + for i, label := range labels { + if i == len(labels)-1 && len(label) == 0 { + // Allow trailing dot for FQDN hosts. + continue + } + + if !ValidHostLabel(label) { + paramErrs.Add(request.NewErrParamFormat( + "endpoint host label", "[a-zA-Z0-9-]{1,63}", label)) + } + } + + if len(hostname) == 0 { + paramErrs.Add(request.NewErrParamMinLen("endpoint host", 1)) + } + + if len(hostname) > 255 { + paramErrs.Add(request.NewErrParamMaxLen( + "endpoint host", 255, host, + )) + } + + if paramErrs.Len() > 0 { + return paramErrs + } + return nil +} + +// ValidHostLabel returns if the label is a valid RFC 3986 host label. +func ValidHostLabel(label string) bool { + if l := len(label); l == 0 || l > 63 { + return false + } + for _, r := range label { + switch { + case r >= '0' && r <= '9': + case r >= 'A' && r <= 'Z': + case r >= 'a' && r <= 'z': + case r == '-': + default: + return false + } + } + + return true +} + +// ValidPortNumber return if the port is valid RFC 3986 port +func ValidPortNumber(port string) bool { + i, err := strconv.Atoi(port) + if err != nil { + return false + } + + if i < 0 || i > 65535 { + return false + } + return true +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/host_prefix.go b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/host_prefix.go new file mode 100644 index 00000000000..446fb77c350 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/host_prefix.go @@ -0,0 +1,54 @@ +package protocol + +import ( + "strings" + + "github.com/IBM/ibm-cos-sdk-go/aws" + "github.com/IBM/ibm-cos-sdk-go/aws/request" +) + +// HostPrefixHandlerName is the handler name for the host prefix request +// handler. +const HostPrefixHandlerName = "awssdk.endpoint.HostPrefixHandler" + +// NewHostPrefixHandler constructs a build handler +func NewHostPrefixHandler(prefix string, labelsFn func() map[string]string) request.NamedHandler { + builder := HostPrefixBuilder{ + Prefix: prefix, + LabelsFn: labelsFn, + } + + return request.NamedHandler{ + Name: HostPrefixHandlerName, + Fn: builder.Build, + } +} + +// HostPrefixBuilder provides the request handler to expand and prepend +// the host prefix into the operation's request endpoint host. +type HostPrefixBuilder struct { + Prefix string + LabelsFn func() map[string]string +} + +// Build updates the passed in Request with the HostPrefix template expanded. +func (h HostPrefixBuilder) Build(r *request.Request) { + if aws.BoolValue(r.Config.DisableEndpointHostPrefix) { + return + } + + var labels map[string]string + if h.LabelsFn != nil { + labels = h.LabelsFn() + } + + prefix := h.Prefix + for name, value := range labels { + prefix = strings.Replace(prefix, "{"+name+"}", value, -1) + } + + r.HTTPRequest.URL.Host = prefix + r.HTTPRequest.URL.Host + if len(r.HTTPRequest.Host) > 0 { + r.HTTPRequest.Host = prefix + r.HTTPRequest.Host + } +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/idempotency.go b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/idempotency.go new file mode 100644 index 00000000000..53831dff984 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/idempotency.go @@ -0,0 +1,75 @@ +package protocol + +import ( + "crypto/rand" + "fmt" + "reflect" +) + +// RandReader is the random reader the protocol package will use to read +// random bytes from. This is exported for testing, and should not be used. +var RandReader = rand.Reader + +const idempotencyTokenFillTag = `idempotencyToken` + +// CanSetIdempotencyToken returns true if the struct field should be +// automatically populated with a Idempotency token. +// +// Only *string and string type fields that are tagged with idempotencyToken +// which are not already set can be auto filled. +func CanSetIdempotencyToken(v reflect.Value, f reflect.StructField) bool { + switch u := v.Interface().(type) { + // To auto fill an Idempotency token the field must be a string, + // tagged for auto fill, and have a zero value. + case *string: + return u == nil && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 + case string: + return len(u) == 0 && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 + } + + return false +} + +// GetIdempotencyToken returns a randomly generated idempotency token. +func GetIdempotencyToken() string { + b := make([]byte, 16) + RandReader.Read(b) + + return UUIDVersion4(b) +} + +// SetIdempotencyToken will set the value provided with a Idempotency Token. +// Given that the value can be set. Will panic if value is not setable. +func SetIdempotencyToken(v reflect.Value) { + if v.Kind() == reflect.Ptr { + if v.IsNil() && v.CanSet() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + v = reflect.Indirect(v) + + if !v.CanSet() { + panic(fmt.Sprintf("unable to set idempotnecy token %v", v)) + } + + b := make([]byte, 16) + _, err := rand.Read(b) + if err != nil { + // TODO handle error + return + } + + v.Set(reflect.ValueOf(UUIDVersion4(b))) +} + +// UUIDVersion4 returns a Version 4 random UUID from the byte slice provided +func UUIDVersion4(u []byte) string { + // https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_.28random.29 + // 13th character is "4" + u[6] = (u[6] | 0x40) & 0x4F + // 17th character is "8", "9", "a", or "b" + u[8] = (u[8] | 0x80) & 0xBF + + return fmt.Sprintf(`%X-%X-%X-%X-%X`, u[0:4], u[4:6], u[6:8], u[8:10], u[10:]) +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/json/jsonutil/build.go b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/json/jsonutil/build.go new file mode 100644 index 00000000000..df2616e4738 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/json/jsonutil/build.go @@ -0,0 +1,296 @@ +// Package jsonutil provides JSON serialization of AWS requests and responses. +package jsonutil + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "time" + + "github.com/IBM/ibm-cos-sdk-go/aws" + "github.com/IBM/ibm-cos-sdk-go/private/protocol" +) + +var timeType = reflect.ValueOf(time.Time{}).Type() +var byteSliceType = reflect.ValueOf([]byte{}).Type() + +// BuildJSON builds a JSON string for a given object v. +func BuildJSON(v interface{}) ([]byte, error) { + var buf bytes.Buffer + + err := buildAny(reflect.ValueOf(v), &buf, "") + return buf.Bytes(), err +} + +func buildAny(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + origVal := value + value = reflect.Indirect(value) + if !value.IsValid() { + return nil + } + + vtype := value.Type() + + t := tag.Get("type") + if t == "" { + switch vtype.Kind() { + case reflect.Struct: + // also it can't be a time object + if value.Type() != timeType { + t = "structure" + } + case reflect.Slice: + // also it can't be a byte slice + if _, ok := value.Interface().([]byte); !ok { + t = "list" + } + case reflect.Map: + // cannot be a JSONValue map + if _, ok := value.Interface().(aws.JSONValue); !ok { + t = "map" + } + } + } + + switch t { + case "structure": + if field, ok := vtype.FieldByName("_"); ok { + tag = field.Tag + } + return buildStruct(value, buf, tag) + case "list": + return buildList(value, buf, tag) + case "map": + return buildMap(value, buf, tag) + default: + return buildScalar(origVal, buf, tag) + } +} + +func buildStruct(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + if !value.IsValid() { + return nil + } + + // unwrap payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := value.Type().FieldByName(payload) + tag = field.Tag + value = elemOf(value.FieldByName(payload)) + + if !value.IsValid() { + return nil + } + } + + buf.WriteByte('{') + + t := value.Type() + first := true + for i := 0; i < t.NumField(); i++ { + member := value.Field(i) + + // This allocates the most memory. + // Additionally, we cannot skip nil fields due to + // idempotency auto filling. + field := t.Field(i) + + if field.PkgPath != "" { + continue // ignore unexported fields + } + if field.Tag.Get("json") == "-" { + continue + } + if field.Tag.Get("location") != "" { + continue // ignore non-body elements + } + if field.Tag.Get("ignore") != "" { + continue + } + + if protocol.CanSetIdempotencyToken(member, field) { + token := protocol.GetIdempotencyToken() + member = reflect.ValueOf(&token) + } + + if (member.Kind() == reflect.Ptr || member.Kind() == reflect.Slice || member.Kind() == reflect.Map) && member.IsNil() { + continue // ignore unset fields + } + + if first { + first = false + } else { + buf.WriteByte(',') + } + + // figure out what this field is called + name := field.Name + if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + + writeString(name, buf) + buf.WriteString(`:`) + + err := buildAny(member, buf, field.Tag) + if err != nil { + return err + } + + } + + buf.WriteString("}") + + return nil +} + +func buildList(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + buf.WriteString("[") + + for i := 0; i < value.Len(); i++ { + buildAny(value.Index(i), buf, "") + + if i < value.Len()-1 { + buf.WriteString(",") + } + } + + buf.WriteString("]") + + return nil +} + +type sortedValues []reflect.Value + +func (sv sortedValues) Len() int { return len(sv) } +func (sv sortedValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } +func (sv sortedValues) Less(i, j int) bool { return sv[i].String() < sv[j].String() } + +func buildMap(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + buf.WriteString("{") + + sv := sortedValues(value.MapKeys()) + sort.Sort(sv) + + for i, k := range sv { + if i > 0 { + buf.WriteByte(',') + } + + writeString(k.String(), buf) + buf.WriteString(`:`) + + buildAny(value.MapIndex(k), buf, "") + } + + buf.WriteString("}") + + return nil +} + +func buildScalar(v reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + // prevents allocation on the heap. + scratch := [64]byte{} + switch value := reflect.Indirect(v); value.Kind() { + case reflect.String: + writeString(value.String(), buf) + case reflect.Bool: + if value.Bool() { + buf.WriteString("true") + } else { + buf.WriteString("false") + } + case reflect.Int64: + buf.Write(strconv.AppendInt(scratch[:0], value.Int(), 10)) + case reflect.Float64: + f := value.Float() + if math.IsInf(f, 0) || math.IsNaN(f) { + return &json.UnsupportedValueError{Value: v, Str: strconv.FormatFloat(f, 'f', -1, 64)} + } + buf.Write(strconv.AppendFloat(scratch[:0], f, 'f', -1, 64)) + default: + switch converted := value.Interface().(type) { + case time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.UnixTimeFormatName + } + + ts := protocol.FormatTime(format, converted) + if format != protocol.UnixTimeFormatName { + ts = `"` + ts + `"` + } + + buf.WriteString(ts) + case []byte: + if !value.IsNil() { + buf.WriteByte('"') + if len(converted) < 1024 { + // for small buffers, using Encode directly is much faster. + dst := make([]byte, base64.StdEncoding.EncodedLen(len(converted))) + base64.StdEncoding.Encode(dst, converted) + buf.Write(dst) + } else { + // for large buffers, avoid unnecessary extra temporary + // buffer space. + enc := base64.NewEncoder(base64.StdEncoding, buf) + enc.Write(converted) + enc.Close() + } + buf.WriteByte('"') + } + case aws.JSONValue: + str, err := protocol.EncodeJSONValue(converted, protocol.QuotedEscape) + if err != nil { + return fmt.Errorf("unable to encode JSONValue, %v", err) + } + buf.WriteString(str) + default: + return fmt.Errorf("unsupported JSON value %v (%s)", value.Interface(), value.Type()) + } + } + return nil +} + +var hex = "0123456789abcdef" + +func writeString(s string, buf *bytes.Buffer) { + buf.WriteByte('"') + for i := 0; i < len(s); i++ { + if s[i] == '"' { + buf.WriteString(`\"`) + } else if s[i] == '\\' { + buf.WriteString(`\\`) + } else if s[i] == '\b' { + buf.WriteString(`\b`) + } else if s[i] == '\f' { + buf.WriteString(`\f`) + } else if s[i] == '\r' { + buf.WriteString(`\r`) + } else if s[i] == '\t' { + buf.WriteString(`\t`) + } else if s[i] == '\n' { + buf.WriteString(`\n`) + } else if s[i] < 32 { + buf.WriteString("\\u00") + buf.WriteByte(hex[s[i]>>4]) + buf.WriteByte(hex[s[i]&0xF]) + } else { + buf.WriteByte(s[i]) + } + } + buf.WriteByte('"') +} + +// Returns the reflection element of a value, if it is a pointer. +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/json/jsonutil/unmarshal.go b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/json/jsonutil/unmarshal.go new file mode 100644 index 00000000000..1a60f43d884 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/json/jsonutil/unmarshal.go @@ -0,0 +1,304 @@ +package jsonutil + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "math/big" + "reflect" + "strings" + "time" + + "github.com/IBM/ibm-cos-sdk-go/aws" + "github.com/IBM/ibm-cos-sdk-go/aws/awserr" + "github.com/IBM/ibm-cos-sdk-go/private/protocol" +) + +var millisecondsFloat = new(big.Float).SetInt64(1e3) + +// UnmarshalJSONError unmarshal's the reader's JSON document into the passed in +// type. The value to unmarshal the json document into must be a pointer to the +// type. +func UnmarshalJSONError(v interface{}, stream io.Reader) error { + var errBuf bytes.Buffer + body := io.TeeReader(stream, &errBuf) + + err := json.NewDecoder(body).Decode(v) + if err != nil { + msg := "failed decoding error message" + if err == io.EOF { + msg = "error message missing" + err = nil + } + return awserr.NewUnmarshalError(err, msg, errBuf.Bytes()) + } + + return nil +} + +// UnmarshalJSON reads a stream and unmarshals the results in object v. +func UnmarshalJSON(v interface{}, stream io.Reader) error { + var out interface{} + + decoder := json.NewDecoder(stream) + decoder.UseNumber() + err := decoder.Decode(&out) + if err == io.EOF { + return nil + } else if err != nil { + return err + } + + return unmarshaler{}.unmarshalAny(reflect.ValueOf(v), out, "") +} + +// UnmarshalJSONCaseInsensitive reads a stream and unmarshals the result into the +// object v. Ignores casing for structure members. +func UnmarshalJSONCaseInsensitive(v interface{}, stream io.Reader) error { + var out interface{} + + decoder := json.NewDecoder(stream) + decoder.UseNumber() + err := decoder.Decode(&out) + if err == io.EOF { + return nil + } else if err != nil { + return err + } + + return unmarshaler{ + caseInsensitive: true, + }.unmarshalAny(reflect.ValueOf(v), out, "") +} + +type unmarshaler struct { + caseInsensitive bool +} + +func (u unmarshaler) unmarshalAny(value reflect.Value, data interface{}, tag reflect.StructTag) error { + vtype := value.Type() + if vtype.Kind() == reflect.Ptr { + vtype = vtype.Elem() // check kind of actual element type + } + + t := tag.Get("type") + if t == "" { + switch vtype.Kind() { + case reflect.Struct: + // also it can't be a time object + if _, ok := value.Interface().(*time.Time); !ok { + t = "structure" + } + case reflect.Slice: + // also it can't be a byte slice + if _, ok := value.Interface().([]byte); !ok { + t = "list" + } + case reflect.Map: + // cannot be a JSONValue map + if _, ok := value.Interface().(aws.JSONValue); !ok { + t = "map" + } + } + } + + switch t { + case "structure": + if field, ok := vtype.FieldByName("_"); ok { + tag = field.Tag + } + return u.unmarshalStruct(value, data, tag) + case "list": + return u.unmarshalList(value, data, tag) + case "map": + return u.unmarshalMap(value, data, tag) + default: + return u.unmarshalScalar(value, data, tag) + } +} + +func (u unmarshaler) unmarshalStruct(value reflect.Value, data interface{}, tag reflect.StructTag) error { + if data == nil { + return nil + } + mapData, ok := data.(map[string]interface{}) + if !ok { + return fmt.Errorf("JSON value is not a structure (%#v)", data) + } + + t := value.Type() + if value.Kind() == reflect.Ptr { + if value.IsNil() { // create the structure if it's nil + s := reflect.New(value.Type().Elem()) + value.Set(s) + value = s + } + + value = value.Elem() + t = t.Elem() + } + + // unwrap any payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := t.FieldByName(payload) + return u.unmarshalAny(value.FieldByName(payload), data, field.Tag) + } + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + if field.PkgPath != "" { + continue // ignore unexported fields + } + + // figure out what this field is called + name := field.Name + if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + if u.caseInsensitive { + if _, ok := mapData[name]; !ok { + // Fallback to uncased name search if the exact name didn't match. + for kn, v := range mapData { + if strings.EqualFold(kn, name) { + mapData[name] = v + } + } + } + } + + member := value.FieldByIndex(field.Index) + err := u.unmarshalAny(member, mapData[name], field.Tag) + if err != nil { + return err + } + } + return nil +} + +func (u unmarshaler) unmarshalList(value reflect.Value, data interface{}, tag reflect.StructTag) error { + if data == nil { + return nil + } + listData, ok := data.([]interface{}) + if !ok { + return fmt.Errorf("JSON value is not a list (%#v)", data) + } + + if value.IsNil() { + l := len(listData) + value.Set(reflect.MakeSlice(value.Type(), l, l)) + } + + for i, c := range listData { + err := u.unmarshalAny(value.Index(i), c, "") + if err != nil { + return err + } + } + + return nil +} + +func (u unmarshaler) unmarshalMap(value reflect.Value, data interface{}, tag reflect.StructTag) error { + if data == nil { + return nil + } + mapData, ok := data.(map[string]interface{}) + if !ok { + return fmt.Errorf("JSON value is not a map (%#v)", data) + } + + if value.IsNil() { + value.Set(reflect.MakeMap(value.Type())) + } + + for k, v := range mapData { + kvalue := reflect.ValueOf(k) + vvalue := reflect.New(value.Type().Elem()).Elem() + + u.unmarshalAny(vvalue, v, "") + value.SetMapIndex(kvalue, vvalue) + } + + return nil +} + +func (u unmarshaler) unmarshalScalar(value reflect.Value, data interface{}, tag reflect.StructTag) error { + + switch d := data.(type) { + case nil: + return nil // nothing to do here + case string: + switch value.Interface().(type) { + case *string: + value.Set(reflect.ValueOf(&d)) + case []byte: + b, err := base64.StdEncoding.DecodeString(d) + if err != nil { + return err + } + value.Set(reflect.ValueOf(b)) + case *time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + t, err := protocol.ParseTime(format, d) + if err != nil { + return err + } + value.Set(reflect.ValueOf(&t)) + case aws.JSONValue: + // No need to use escaping as the value is a non-quoted string. + v, err := protocol.DecodeJSONValue(d, protocol.NoEscape) + if err != nil { + return err + } + value.Set(reflect.ValueOf(v)) + default: + return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) + } + case json.Number: + switch value.Interface().(type) { + case *int64: + // Retain the old behavior where we would just truncate the float64 + // calling d.Int64() here could cause an invalid syntax error due to the usage of strconv.ParseInt + f, err := d.Float64() + if err != nil { + return err + } + di := int64(f) + value.Set(reflect.ValueOf(&di)) + case *float64: + f, err := d.Float64() + if err != nil { + return err + } + value.Set(reflect.ValueOf(&f)) + case *time.Time: + float, ok := new(big.Float).SetString(d.String()) + if !ok { + return fmt.Errorf("unsupported float time representation: %v", d.String()) + } + float = float.Mul(float, millisecondsFloat) + ms, _ := float.Int64() + t := time.Unix(0, ms*1e6).UTC() + value.Set(reflect.ValueOf(&t)) + default: + return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) + } + case bool: + switch value.Interface().(type) { + case *bool: + value.Set(reflect.ValueOf(&d)) + default: + return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) + } + default: + return fmt.Errorf("unsupported JSON value (%v)", data) + } + return nil +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/jsonvalue.go b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/jsonvalue.go new file mode 100644 index 00000000000..513377fa921 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/jsonvalue.go @@ -0,0 +1,76 @@ +package protocol + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "strconv" + + "github.com/IBM/ibm-cos-sdk-go/aws" +) + +// EscapeMode is the mode that should be use for escaping a value +type EscapeMode uint + +// The modes for escaping a value before it is marshaled, and unmarshaled. +const ( + NoEscape EscapeMode = iota + Base64Escape + QuotedEscape +) + +// EncodeJSONValue marshals the value into a JSON string, and optionally base64 +// encodes the string before returning it. +// +// Will panic if the escape mode is unknown. +func EncodeJSONValue(v aws.JSONValue, escape EscapeMode) (string, error) { + b, err := json.Marshal(v) + if err != nil { + return "", err + } + + switch escape { + case NoEscape: + return string(b), nil + case Base64Escape: + return base64.StdEncoding.EncodeToString(b), nil + case QuotedEscape: + return strconv.Quote(string(b)), nil + } + + panic(fmt.Sprintf("EncodeJSONValue called with unknown EscapeMode, %v", escape)) +} + +// DecodeJSONValue will attempt to decode the string input as a JSONValue. +// Optionally decoding base64 the value first before JSON unmarshaling. +// +// Will panic if the escape mode is unknown. +func DecodeJSONValue(v string, escape EscapeMode) (aws.JSONValue, error) { + var b []byte + var err error + + switch escape { + case NoEscape: + b = []byte(v) + case Base64Escape: + b, err = base64.StdEncoding.DecodeString(v) + case QuotedEscape: + var u string + u, err = strconv.Unquote(v) + b = []byte(u) + default: + panic(fmt.Sprintf("DecodeJSONValue called with unknown EscapeMode, %v", escape)) + } + + if err != nil { + return nil, err + } + + m := aws.JSONValue{} + err = json.Unmarshal(b, &m) + if err != nil { + return nil, err + } + + return m, nil +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/payload.go b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/payload.go new file mode 100644 index 00000000000..b33ac985a10 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/payload.go @@ -0,0 +1,81 @@ +package protocol + +import ( + "io" + "io/ioutil" + "net/http" + + "github.com/IBM/ibm-cos-sdk-go/aws" + "github.com/IBM/ibm-cos-sdk-go/aws/client/metadata" + "github.com/IBM/ibm-cos-sdk-go/aws/request" +) + +// PayloadUnmarshaler provides the interface for unmarshaling a payload's +// reader into a SDK shape. +type PayloadUnmarshaler interface { + UnmarshalPayload(io.Reader, interface{}) error +} + +// HandlerPayloadUnmarshal implements the PayloadUnmarshaler from a +// HandlerList. This provides the support for unmarshaling a payload reader to +// a shape without needing a SDK request first. +type HandlerPayloadUnmarshal struct { + Unmarshalers request.HandlerList +} + +// UnmarshalPayload unmarshals the io.Reader payload into the SDK shape using +// the Unmarshalers HandlerList provided. Returns an error if unable +// unmarshaling fails. +func (h HandlerPayloadUnmarshal) UnmarshalPayload(r io.Reader, v interface{}) error { + req := &request.Request{ + HTTPRequest: &http.Request{}, + HTTPResponse: &http.Response{ + StatusCode: 200, + Header: http.Header{}, + Body: ioutil.NopCloser(r), + }, + Data: v, + } + + h.Unmarshalers.Run(req) + + return req.Error +} + +// PayloadMarshaler provides the interface for marshaling a SDK shape into and +// io.Writer. +type PayloadMarshaler interface { + MarshalPayload(io.Writer, interface{}) error +} + +// HandlerPayloadMarshal implements the PayloadMarshaler from a HandlerList. +// This provides support for marshaling a SDK shape into an io.Writer without +// needing a SDK request first. +type HandlerPayloadMarshal struct { + Marshalers request.HandlerList +} + +// MarshalPayload marshals the SDK shape into the io.Writer using the +// Marshalers HandlerList provided. Returns an error if unable if marshal +// fails. +func (h HandlerPayloadMarshal) MarshalPayload(w io.Writer, v interface{}) error { + req := request.New( + aws.Config{}, + metadata.ClientInfo{}, + request.Handlers{}, + nil, + &request.Operation{HTTPMethod: "PUT"}, + v, + nil, + ) + + h.Marshalers.Run(req) + + if req.Error != nil { + return req.Error + } + + io.Copy(w, req.GetBody()) + + return nil +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/protocol.go b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/protocol.go new file mode 100644 index 00000000000..baffdfe2656 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/protocol.go @@ -0,0 +1,49 @@ +package protocol + +import ( + "fmt" + "strings" + + "github.com/IBM/ibm-cos-sdk-go/aws/awserr" + "github.com/IBM/ibm-cos-sdk-go/aws/request" +) + +// RequireHTTPMinProtocol request handler is used to enforce that +// the target endpoint supports the given major and minor HTTP protocol version. +type RequireHTTPMinProtocol struct { + Major, Minor int +} + +// Handler will mark the request.Request with an error if the +// target endpoint did not connect with the required HTTP protocol +// major and minor version. +func (p RequireHTTPMinProtocol) Handler(r *request.Request) { + if r.Error != nil || r.HTTPResponse == nil { + return + } + + if !strings.HasPrefix(r.HTTPResponse.Proto, "HTTP") { + r.Error = newMinHTTPProtoError(p.Major, p.Minor, r) + } + + if r.HTTPResponse.ProtoMajor < p.Major || r.HTTPResponse.ProtoMinor < p.Minor { + r.Error = newMinHTTPProtoError(p.Major, p.Minor, r) + } +} + +// ErrCodeMinimumHTTPProtocolError error code is returned when the target endpoint +// did not match the required HTTP major and minor protocol version. +const ErrCodeMinimumHTTPProtocolError = "MinimumHTTPProtocolError" + +func newMinHTTPProtoError(major, minor int, r *request.Request) error { + return awserr.NewRequestFailure( + awserr.New("MinimumHTTPProtocolError", + fmt.Sprintf( + "operation requires minimum HTTP protocol of HTTP/%d.%d, but was %s", + major, minor, r.HTTPResponse.Proto, + ), + nil, + ), + r.HTTPResponse.StatusCode, r.RequestID, + ) +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/query/build.go b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/query/build.go new file mode 100644 index 00000000000..05b702167c0 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/query/build.go @@ -0,0 +1,36 @@ +// Package query provides serialization of AWS query requests, and responses. +package query + +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/input/query.json build_test.go + +import ( + "net/url" + + "github.com/IBM/ibm-cos-sdk-go/aws/awserr" + "github.com/IBM/ibm-cos-sdk-go/aws/request" + "github.com/IBM/ibm-cos-sdk-go/private/protocol/query/queryutil" +) + +// BuildHandler is a named request handler for building query protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.query.Build", Fn: Build} + +// Build builds a request for an AWS Query service. +func Build(r *request.Request) { + body := url.Values{ + "Action": {r.Operation.Name}, + "Version": {r.ClientInfo.APIVersion}, + } + if err := queryutil.Parse(body, r.Params, false); err != nil { + r.Error = awserr.New(request.ErrCodeSerialization, "failed encoding Query request", err) + return + } + + if !r.IsPresigned() { + r.HTTPRequest.Method = "POST" + r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") + r.SetBufferBody([]byte(body.Encode())) + } else { // This is a pre-signed request + r.HTTPRequest.Method = "GET" + r.HTTPRequest.URL.RawQuery = body.Encode() + } +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/query/queryutil/queryutil.go b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/query/queryutil/queryutil.go new file mode 100644 index 00000000000..4f869600b11 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/query/queryutil/queryutil.go @@ -0,0 +1,246 @@ +package queryutil + +import ( + "encoding/base64" + "fmt" + "net/url" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/IBM/ibm-cos-sdk-go/private/protocol" +) + +// Parse parses an object i and fills a url.Values object. The isEC2 flag +// indicates if this is the EC2 Query sub-protocol. +func Parse(body url.Values, i interface{}, isEC2 bool) error { + q := queryParser{isEC2: isEC2} + return q.parseValue(body, reflect.ValueOf(i), "", "") +} + +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} + +type queryParser struct { + isEC2 bool +} + +func (q *queryParser) parseValue(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + value = elemOf(value) + + // no need to handle zero values + if !value.IsValid() { + return nil + } + + t := tag.Get("type") + if t == "" { + switch value.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + return q.parseStruct(v, value, prefix) + case "list": + return q.parseList(v, value, prefix, tag) + case "map": + return q.parseMap(v, value, prefix, tag) + default: + return q.parseScalar(v, value, prefix, tag) + } +} + +func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix string) error { + if !value.IsValid() { + return nil + } + + t := value.Type() + for i := 0; i < value.NumField(); i++ { + elemValue := elemOf(value.Field(i)) + field := t.Field(i) + + if field.PkgPath != "" { + continue // ignore unexported fields + } + if field.Tag.Get("ignore") != "" { + continue + } + + if protocol.CanSetIdempotencyToken(value.Field(i), field) { + token := protocol.GetIdempotencyToken() + elemValue = reflect.ValueOf(token) + } + + var name string + if q.isEC2 { + name = field.Tag.Get("queryName") + } + if name == "" { + if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { + name = field.Tag.Get("locationNameList") + } else if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + if name != "" && q.isEC2 { + name = strings.ToUpper(name[0:1]) + name[1:] + } + } + if name == "" { + name = field.Name + } + + if prefix != "" { + name = prefix + "." + name + } + + if err := q.parseValue(v, elemValue, name, field.Tag); err != nil { + return err + } + } + return nil +} + +func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + // If it's empty, generate an empty value + if !value.IsNil() && value.Len() == 0 { + v.Set(prefix, "") + return nil + } + + if _, ok := value.Interface().([]byte); ok { + return q.parseScalar(v, value, prefix, tag) + } + + // check for unflattened list member + if !q.isEC2 && tag.Get("flattened") == "" { + if listName := tag.Get("locationNameList"); listName == "" { + prefix += ".member" + } else { + prefix += "." + listName + } + } + + for i := 0; i < value.Len(); i++ { + slicePrefix := prefix + if slicePrefix == "" { + slicePrefix = strconv.Itoa(i + 1) + } else { + slicePrefix = slicePrefix + "." + strconv.Itoa(i+1) + } + if err := q.parseValue(v, value.Index(i), slicePrefix, ""); err != nil { + return err + } + } + return nil +} + +func (q *queryParser) parseMap(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + // If it's empty, generate an empty value + if !value.IsNil() && value.Len() == 0 { + v.Set(prefix, "") + return nil + } + + // check for unflattened list member + if !q.isEC2 && tag.Get("flattened") == "" { + prefix += ".entry" + } + + // sort keys for improved serialization consistency. + // this is not strictly necessary for protocol support. + mapKeyValues := value.MapKeys() + mapKeys := map[string]reflect.Value{} + mapKeyNames := make([]string, len(mapKeyValues)) + for i, mapKey := range mapKeyValues { + name := mapKey.String() + mapKeys[name] = mapKey + mapKeyNames[i] = name + } + sort.Strings(mapKeyNames) + + for i, mapKeyName := range mapKeyNames { + mapKey := mapKeys[mapKeyName] + mapValue := value.MapIndex(mapKey) + + kname := tag.Get("locationNameKey") + if kname == "" { + kname = "key" + } + vname := tag.Get("locationNameValue") + if vname == "" { + vname = "value" + } + + // serialize key + var keyName string + if prefix == "" { + keyName = strconv.Itoa(i+1) + "." + kname + } else { + keyName = prefix + "." + strconv.Itoa(i+1) + "." + kname + } + + if err := q.parseValue(v, mapKey, keyName, ""); err != nil { + return err + } + + // serialize value + var valueName string + if prefix == "" { + valueName = strconv.Itoa(i+1) + "." + vname + } else { + valueName = prefix + "." + strconv.Itoa(i+1) + "." + vname + } + + if err := q.parseValue(v, mapValue, valueName, ""); err != nil { + return err + } + } + + return nil +} + +func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, tag reflect.StructTag) error { + switch value := r.Interface().(type) { + case string: + v.Set(name, value) + case []byte: + if !r.IsNil() { + v.Set(name, base64.StdEncoding.EncodeToString(value)) + } + case bool: + v.Set(name, strconv.FormatBool(value)) + case int64: + v.Set(name, strconv.FormatInt(value, 10)) + case int: + v.Set(name, strconv.Itoa(value)) + case float64: + v.Set(name, strconv.FormatFloat(value, 'f', -1, 64)) + case float32: + v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32)) + case time.Time: + const ISO8601UTC = "2006-01-02T15:04:05Z" + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + v.Set(name, protocol.FormatTime(format, value)) + default: + return fmt.Errorf("unsupported value for param %s: %v (%s)", name, r.Interface(), r.Type().Name()) + } + return nil +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/query/unmarshal.go b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/query/unmarshal.go new file mode 100644 index 00000000000..0fb0eccbe85 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/query/unmarshal.go @@ -0,0 +1,39 @@ +package query + +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/output/query.json unmarshal_test.go + +import ( + "encoding/xml" + + "github.com/IBM/ibm-cos-sdk-go/aws/awserr" + "github.com/IBM/ibm-cos-sdk-go/aws/request" + "github.com/IBM/ibm-cos-sdk-go/private/protocol/xml/xmlutil" +) + +// UnmarshalHandler is a named request handler for unmarshaling query protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.query.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling query protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalMeta", Fn: UnmarshalMeta} + +// Unmarshal unmarshals a response for an AWS Query service. +func Unmarshal(r *request.Request) { + defer r.HTTPResponse.Body.Close() + if r.DataFilled() { + decoder := xml.NewDecoder(r.HTTPResponse.Body) + err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result") + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, "failed decoding Query response", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + } +} + +// UnmarshalMeta unmarshals header response values for an AWS Query service. +func UnmarshalMeta(r *request.Request) { + r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/query/unmarshal_error.go b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/query/unmarshal_error.go new file mode 100644 index 00000000000..9d6f859d113 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/query/unmarshal_error.go @@ -0,0 +1,69 @@ +package query + +import ( + "encoding/xml" + "fmt" + + "github.com/IBM/ibm-cos-sdk-go/aws/awserr" + "github.com/IBM/ibm-cos-sdk-go/aws/request" + "github.com/IBM/ibm-cos-sdk-go/private/protocol/xml/xmlutil" +) + +// UnmarshalErrorHandler is a name request handler to unmarshal request errors +var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError} + +type xmlErrorResponse struct { + Code string `xml:"Error>Code"` + Message string `xml:"Error>Message"` + RequestID string `xml:"RequestId"` +} + +type xmlResponseError struct { + xmlErrorResponse +} + +func (e *xmlResponseError) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + const svcUnavailableTagName = "ServiceUnavailableException" + const errorResponseTagName = "ErrorResponse" + + switch start.Name.Local { + case svcUnavailableTagName: + e.Code = svcUnavailableTagName + e.Message = "service is unavailable" + return d.Skip() + + case errorResponseTagName: + return d.DecodeElement(&e.xmlErrorResponse, &start) + + default: + return fmt.Errorf("unknown error response tag, %v", start) + } +} + +// UnmarshalError unmarshals an error response for an AWS Query service. +func UnmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + var respErr xmlResponseError + err := xmlutil.UnmarshalXMLError(&respErr, r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to unmarshal error message", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + reqID := respErr.RequestID + if len(reqID) == 0 { + reqID = r.RequestID + } + + r.Error = awserr.NewRequestFailure( + awserr.New(respErr.Code, respErr.Message, nil), + r.HTTPResponse.StatusCode, + reqID, + ) +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/rest/build.go b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/rest/build.go new file mode 100644 index 00000000000..28908d42b30 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/rest/build.go @@ -0,0 +1,310 @@ +// Package rest provides RESTful serialization of AWS requests and responses. +package rest + +import ( + "bytes" + "encoding/base64" + "fmt" + "io" + "net/http" + "net/url" + "path" + "reflect" + "strconv" + "strings" + "time" + + "github.com/IBM/ibm-cos-sdk-go/aws" + "github.com/IBM/ibm-cos-sdk-go/aws/awserr" + "github.com/IBM/ibm-cos-sdk-go/aws/request" + "github.com/IBM/ibm-cos-sdk-go/private/protocol" +) + +// Whether the byte value can be sent without escaping in AWS URLs +var noEscape [256]bool + +var errValueNotSet = fmt.Errorf("value not set") + +var byteSliceType = reflect.TypeOf([]byte{}) + +func init() { + for i := 0; i < len(noEscape); i++ { + // AWS expects every character except these to be escaped + noEscape[i] = (i >= 'A' && i <= 'Z') || + (i >= 'a' && i <= 'z') || + (i >= '0' && i <= '9') || + i == '-' || + i == '.' || + i == '_' || + i == '~' + } +} + +// BuildHandler is a named request handler for building rest protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.rest.Build", Fn: Build} + +// Build builds the REST component of a service request. +func Build(r *request.Request) { + if r.ParamsFilled() { + v := reflect.ValueOf(r.Params).Elem() + buildLocationElements(r, v, false) + buildBody(r, v) + } +} + +// BuildAsGET builds the REST component of a service request with the ability to hoist +// data from the body. +func BuildAsGET(r *request.Request) { + if r.ParamsFilled() { + v := reflect.ValueOf(r.Params).Elem() + buildLocationElements(r, v, true) + buildBody(r, v) + } +} + +func buildLocationElements(r *request.Request, v reflect.Value, buildGETQuery bool) { + query := r.HTTPRequest.URL.Query() + + // Setup the raw path to match the base path pattern. This is needed + // so that when the path is mutated a custom escaped version can be + // stored in RawPath that will be used by the Go client. + r.HTTPRequest.URL.RawPath = r.HTTPRequest.URL.Path + + for i := 0; i < v.NumField(); i++ { + m := v.Field(i) + if n := v.Type().Field(i).Name; n[0:1] == strings.ToLower(n[0:1]) { + continue + } + + if m.IsValid() { + field := v.Type().Field(i) + name := field.Tag.Get("locationName") + if name == "" { + name = field.Name + } + if kind := m.Kind(); kind == reflect.Ptr { + m = m.Elem() + } else if kind == reflect.Interface { + if !m.Elem().IsValid() { + continue + } + } + if !m.IsValid() { + continue + } + if field.Tag.Get("ignore") != "" { + continue + } + + // Support the ability to customize values to be marshaled as a + // blob even though they were modeled as a string. Required for S3 + // API operations like SSECustomerKey is modeled as stirng but + // required to be base64 encoded in request. + if field.Tag.Get("marshal-as") == "blob" { + m = m.Convert(byteSliceType) + } + + var err error + switch field.Tag.Get("location") { + case "headers": // header maps + err = buildHeaderMap(&r.HTTPRequest.Header, m, field.Tag) + case "header": + err = buildHeader(&r.HTTPRequest.Header, m, name, field.Tag) + case "uri": + err = buildURI(r.HTTPRequest.URL, m, name, field.Tag) + case "querystring": + err = buildQueryString(query, m, name, field.Tag) + default: + if buildGETQuery { + err = buildQueryString(query, m, name, field.Tag) + } + } + r.Error = err + } + if r.Error != nil { + return + } + } + + r.HTTPRequest.URL.RawQuery = query.Encode() + if !aws.BoolValue(r.Config.DisableRestProtocolURICleaning) { + cleanPath(r.HTTPRequest.URL) + } +} + +func buildBody(r *request.Request, v reflect.Value) { + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + pfield, _ := v.Type().FieldByName(payloadName) + if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { + payload := reflect.Indirect(v.FieldByName(payloadName)) + if payload.IsValid() && payload.Interface() != nil { + switch reader := payload.Interface().(type) { + case io.ReadSeeker: + r.SetReaderBody(reader) + case []byte: + r.SetBufferBody(reader) + case string: + r.SetStringBody(reader) + default: + r.Error = awserr.New(request.ErrCodeSerialization, + "failed to encode REST request", + fmt.Errorf("unknown payload type %s", payload.Type())) + } + } + } + } + } +} + +func buildHeader(header *http.Header, v reflect.Value, name string, tag reflect.StructTag) error { + str, err := convertType(v, tag) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) + } + + name = strings.TrimSpace(name) + str = strings.TrimSpace(str) + + header.Add(name, str) + + return nil +} + +func buildHeaderMap(header *http.Header, v reflect.Value, tag reflect.StructTag) error { + prefix := tag.Get("locationName") + for _, key := range v.MapKeys() { + str, err := convertType(v.MapIndex(key), tag) + if err == errValueNotSet { + continue + } else if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) + + } + keyStr := strings.TrimSpace(key.String()) + str = strings.TrimSpace(str) + + header.Add(prefix+keyStr, str) + } + return nil +} + +func buildURI(u *url.URL, v reflect.Value, name string, tag reflect.StructTag) error { + value, err := convertType(v, tag) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) + } + + u.Path = strings.Replace(u.Path, "{"+name+"}", value, -1) + u.Path = strings.Replace(u.Path, "{"+name+"+}", value, -1) + + u.RawPath = strings.Replace(u.RawPath, "{"+name+"}", EscapePath(value, true), -1) + u.RawPath = strings.Replace(u.RawPath, "{"+name+"+}", EscapePath(value, false), -1) + + return nil +} + +func buildQueryString(query url.Values, v reflect.Value, name string, tag reflect.StructTag) error { + switch value := v.Interface().(type) { + case []*string: + for _, item := range value { + query.Add(name, *item) + } + case map[string]*string: + for key, item := range value { + query.Add(key, *item) + } + case map[string][]*string: + for key, items := range value { + for _, item := range items { + query.Add(key, *item) + } + } + default: + str, err := convertType(v, tag) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) + } + query.Set(name, str) + } + + return nil +} + +func cleanPath(u *url.URL) { + hasSlash := strings.HasSuffix(u.Path, "/") + + // clean up path, removing duplicate `/` + u.Path = path.Clean(u.Path) + u.RawPath = path.Clean(u.RawPath) + + if hasSlash && !strings.HasSuffix(u.Path, "/") { + u.Path += "/" + u.RawPath += "/" + } +} + +// EscapePath escapes part of a URL path in Amazon style +func EscapePath(path string, encodeSep bool) string { + var buf bytes.Buffer + for i := 0; i < len(path); i++ { + c := path[i] + if noEscape[c] || (c == '/' && !encodeSep) { + buf.WriteByte(c) + } else { + fmt.Fprintf(&buf, "%%%02X", c) + } + } + return buf.String() +} + +func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error) { + v = reflect.Indirect(v) + if !v.IsValid() { + return "", errValueNotSet + } + + switch value := v.Interface().(type) { + case string: + str = value + case []byte: + str = base64.StdEncoding.EncodeToString(value) + case bool: + str = strconv.FormatBool(value) + case int64: + str = strconv.FormatInt(value, 10) + case float64: + str = strconv.FormatFloat(value, 'f', -1, 64) + case time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.RFC822TimeFormatName + if tag.Get("location") == "querystring" { + format = protocol.ISO8601TimeFormatName + } + } + str = protocol.FormatTime(format, value) + case aws.JSONValue: + if len(value) == 0 { + return "", errValueNotSet + } + escaping := protocol.NoEscape + if tag.Get("location") == "header" { + escaping = protocol.Base64Escape + } + str, err = protocol.EncodeJSONValue(value, escaping) + if err != nil { + return "", fmt.Errorf("unable to encode JSONValue, %v", err) + } + default: + err := fmt.Errorf("unsupported value for param %v (%s)", v.Interface(), v.Type()) + return "", err + } + return str, nil +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/rest/payload.go b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/rest/payload.go new file mode 100644 index 00000000000..4366de2e1e8 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/rest/payload.go @@ -0,0 +1,45 @@ +package rest + +import "reflect" + +// PayloadMember returns the payload field member of i if there is one, or nil. +func PayloadMember(i interface{}) interface{} { + if i == nil { + return nil + } + + v := reflect.ValueOf(i).Elem() + if !v.IsValid() { + return nil + } + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + field, _ := v.Type().FieldByName(payloadName) + if field.Tag.Get("type") != "structure" { + return nil + } + + payload := v.FieldByName(payloadName) + if payload.IsValid() || (payload.Kind() == reflect.Ptr && !payload.IsNil()) { + return payload.Interface() + } + } + } + return nil +} + +// PayloadType returns the type of a payload field member of i if there is one, or "". +func PayloadType(i interface{}) string { + v := reflect.Indirect(reflect.ValueOf(i)) + if !v.IsValid() { + return "" + } + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + if member, ok := v.Type().FieldByName(payloadName); ok { + return member.Tag.Get("type") + } + } + } + return "" +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/rest/unmarshal.go b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/rest/unmarshal.go new file mode 100644 index 00000000000..7acc78ff7bd --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/rest/unmarshal.go @@ -0,0 +1,257 @@ +package rest + +import ( + "bytes" + "encoding/base64" + "fmt" + "io" + "io/ioutil" + "net/http" + "reflect" + "strconv" + "strings" + "time" + + "github.com/IBM/ibm-cos-sdk-go/aws" + "github.com/IBM/ibm-cos-sdk-go/aws/awserr" + "github.com/IBM/ibm-cos-sdk-go/aws/request" + awsStrings "github.com/IBM/ibm-cos-sdk-go/internal/strings" + "github.com/IBM/ibm-cos-sdk-go/private/protocol" +) + +// UnmarshalHandler is a named request handler for unmarshaling rest protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.rest.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling rest protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.rest.UnmarshalMeta", Fn: UnmarshalMeta} + +// Unmarshal unmarshals the REST component of a response in a REST service. +func Unmarshal(r *request.Request) { + if r.DataFilled() { + v := reflect.Indirect(reflect.ValueOf(r.Data)) + if err := unmarshalBody(r, v); err != nil { + r.Error = err + } + } +} + +// UnmarshalMeta unmarshals the REST metadata of a response in a REST service +func UnmarshalMeta(r *request.Request) { + r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") + if r.RequestID == "" { + // Alternative version of request id in the header + r.RequestID = r.HTTPResponse.Header.Get("X-Amz-Request-Id") + } + if r.DataFilled() { + if err := UnmarshalResponse(r.HTTPResponse, r.Data, aws.BoolValue(r.Config.LowerCaseHeaderMaps)); err != nil { + r.Error = err + } + } +} + +// UnmarshalResponse attempts to unmarshal the REST response headers to +// the data type passed in. The type must be a pointer. An error is returned +// with any error unmarshaling the response into the target datatype. +func UnmarshalResponse(resp *http.Response, data interface{}, lowerCaseHeaderMaps bool) error { + v := reflect.Indirect(reflect.ValueOf(data)) + return unmarshalLocationElements(resp, v, lowerCaseHeaderMaps) +} + +func unmarshalBody(r *request.Request, v reflect.Value) error { + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + pfield, _ := v.Type().FieldByName(payloadName) + if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { + payload := v.FieldByName(payloadName) + if payload.IsValid() { + switch payload.Interface().(type) { + case []byte: + defer r.HTTPResponse.Body.Close() + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) + } + + payload.Set(reflect.ValueOf(b)) + + case *string: + defer r.HTTPResponse.Body.Close() + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) + } + + str := string(b) + payload.Set(reflect.ValueOf(&str)) + + default: + switch payload.Type().String() { + case "io.ReadCloser": + payload.Set(reflect.ValueOf(r.HTTPResponse.Body)) + + case "io.ReadSeeker": + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + return awserr.New(request.ErrCodeSerialization, + "failed to read response body", err) + } + payload.Set(reflect.ValueOf(ioutil.NopCloser(bytes.NewReader(b)))) + + default: + io.Copy(ioutil.Discard, r.HTTPResponse.Body) + r.HTTPResponse.Body.Close() + return awserr.New(request.ErrCodeSerialization, + "failed to decode REST response", + fmt.Errorf("unknown payload type %s", payload.Type())) + } + } + } + } + } + } + + return nil +} + +func unmarshalLocationElements(resp *http.Response, v reflect.Value, lowerCaseHeaderMaps bool) error { + for i := 0; i < v.NumField(); i++ { + m, field := v.Field(i), v.Type().Field(i) + if n := field.Name; n[0:1] == strings.ToLower(n[0:1]) { + continue + } + + if m.IsValid() { + name := field.Tag.Get("locationName") + if name == "" { + name = field.Name + } + + switch field.Tag.Get("location") { + case "statusCode": + unmarshalStatusCode(m, resp.StatusCode) + + case "header": + err := unmarshalHeader(m, resp.Header.Get(name), field.Tag) + if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) + } + + case "headers": + prefix := field.Tag.Get("locationName") + err := unmarshalHeaderMap(m, resp.Header, prefix, lowerCaseHeaderMaps) + if err != nil { + awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) + } + } + } + } + + return nil +} + +func unmarshalStatusCode(v reflect.Value, statusCode int) { + if !v.IsValid() { + return + } + + switch v.Interface().(type) { + case *int64: + s := int64(statusCode) + v.Set(reflect.ValueOf(&s)) + } +} + +func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string, normalize bool) error { + if len(headers) == 0 { + return nil + } + switch r.Interface().(type) { + case map[string]*string: // we only support string map value types + out := map[string]*string{} + for k, v := range headers { + if awsStrings.HasPrefixFold(k, prefix) { + if normalize == true { + k = strings.ToLower(k) + } else { + k = http.CanonicalHeaderKey(k) + } + out[k[len(prefix):]] = &v[0] + } + } + if len(out) != 0 { + r.Set(reflect.ValueOf(out)) + } + + } + return nil +} + +func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) error { + switch tag.Get("type") { + case "jsonvalue": + if len(header) == 0 { + return nil + } + case "blob": + if len(header) == 0 { + return nil + } + default: + if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { + return nil + } + } + + switch v.Interface().(type) { + case *string: + v.Set(reflect.ValueOf(&header)) + case []byte: + b, err := base64.StdEncoding.DecodeString(header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(b)) + case *bool: + b, err := strconv.ParseBool(header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&b)) + case *int64: + i, err := strconv.ParseInt(header, 10, 64) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&i)) + case *float64: + f, err := strconv.ParseFloat(header, 64) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&f)) + case *time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.RFC822TimeFormatName + } + t, err := protocol.ParseTime(format, header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&t)) + case aws.JSONValue: + escaping := protocol.NoEscape + if tag.Get("location") == "header" { + escaping = protocol.Base64Escape + } + m, err := protocol.DecodeJSONValue(header, escaping) + if err != nil { + return err + } + v.Set(reflect.ValueOf(m)) + default: + err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) + return err + } + return nil +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/restxml/restxml.go b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/restxml/restxml.go new file mode 100644 index 00000000000..6e9ab32cdf3 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/restxml/restxml.go @@ -0,0 +1,79 @@ +// Package restxml provides RESTful XML serialization of AWS +// requests and responses. +package restxml + +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/input/rest-xml.json build_test.go +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/output/rest-xml.json unmarshal_test.go + +import ( + "bytes" + "encoding/xml" + + "github.com/IBM/ibm-cos-sdk-go/aws/awserr" + "github.com/IBM/ibm-cos-sdk-go/aws/request" + "github.com/IBM/ibm-cos-sdk-go/private/protocol/query" + "github.com/IBM/ibm-cos-sdk-go/private/protocol/rest" + "github.com/IBM/ibm-cos-sdk-go/private/protocol/xml/xmlutil" +) + +// BuildHandler is a named request handler for building restxml protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.restxml.Build", Fn: Build} + +// UnmarshalHandler is a named request handler for unmarshaling restxml protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.restxml.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling restxml protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.restxml.UnmarshalMeta", Fn: UnmarshalMeta} + +// UnmarshalErrorHandler is a named request handler for unmarshaling restxml protocol request errors +var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.restxml.UnmarshalError", Fn: UnmarshalError} + +// Build builds a request payload for the REST XML protocol. +func Build(r *request.Request) { + rest.Build(r) + + if t := rest.PayloadType(r.Params); t == "structure" || t == "" { + var buf bytes.Buffer + err := xmlutil.BuildXML(r.Params, xml.NewEncoder(&buf)) + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to encode rest XML request", err), + 0, + r.RequestID, + ) + return + } + r.SetBufferBody(buf.Bytes()) + } +} + +// Unmarshal unmarshals a payload response for the REST XML protocol. +func Unmarshal(r *request.Request) { + if t := rest.PayloadType(r.Data); t == "structure" || t == "" { + defer r.HTTPResponse.Body.Close() + decoder := xml.NewDecoder(r.HTTPResponse.Body) + err := xmlutil.UnmarshalXML(r.Data, decoder, "") + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to decode REST XML response", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + } else { + rest.Unmarshal(r) + } +} + +// UnmarshalMeta unmarshals response headers for the REST XML protocol. +func UnmarshalMeta(r *request.Request) { + rest.UnmarshalMeta(r) +} + +// UnmarshalError unmarshals a response error for the REST XML protocol. +func UnmarshalError(r *request.Request) { + query.UnmarshalError(r) +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/timestamp.go b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/timestamp.go new file mode 100644 index 00000000000..46e8648a140 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/timestamp.go @@ -0,0 +1,101 @@ +package protocol + +import ( + "math" + "strconv" + "time" + + "github.com/IBM/ibm-cos-sdk-go/internal/sdkmath" +) + +// Names of time formats supported by the SDK +const ( + RFC822TimeFormatName = "rfc822" + ISO8601TimeFormatName = "iso8601" + UnixTimeFormatName = "unixTimestamp" +) + +// Time formats supported by the SDK +// Output time is intended to not contain decimals +const ( + // RFC 7231#section-7.1.1.1 timetamp format. e.g Tue, 29 Apr 2014 18:30:38 GMT + RFC822TimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT" + + // This format is used for output time without seconds precision + RFC822OutputTimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT" + + // RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z + ISO8601TimeFormat = "2006-01-02T15:04:05.999999999Z" + + // This format is used for output time with fractional second precision up to milliseconds + ISO8601OutputTimeFormat = "2006-01-02T15:04:05.999999999Z" +) + +// IsKnownTimestampFormat returns if the timestamp format name +// is know to the SDK's protocols. +func IsKnownTimestampFormat(name string) bool { + switch name { + case RFC822TimeFormatName: + fallthrough + case ISO8601TimeFormatName: + fallthrough + case UnixTimeFormatName: + return true + default: + return false + } +} + +// FormatTime returns a string value of the time. +func FormatTime(name string, t time.Time) string { + t = t.UTC().Truncate(time.Millisecond) + + switch name { + case RFC822TimeFormatName: + return t.Format(RFC822OutputTimeFormat) + case ISO8601TimeFormatName: + return t.Format(ISO8601OutputTimeFormat) + case UnixTimeFormatName: + ms := t.UnixNano() / int64(time.Millisecond) + return strconv.FormatFloat(float64(ms)/1e3, 'f', -1, 64) + default: + panic("unknown timestamp format name, " + name) + } +} + +// ParseTime attempts to parse the time given the format. Returns +// the time if it was able to be parsed, and fails otherwise. +func ParseTime(formatName, value string) (time.Time, error) { + switch formatName { + case RFC822TimeFormatName: + return time.Parse(RFC822TimeFormat, value) + case ISO8601TimeFormatName: + return time.Parse(ISO8601TimeFormat, value) + case UnixTimeFormatName: + v, err := strconv.ParseFloat(value, 64) + _, dec := math.Modf(v) + dec = sdkmath.Round(dec*1e3) / 1e3 //Rounds 0.1229999 to 0.123 + if err != nil { + return time.Time{}, err + } + return time.Unix(int64(v), int64(dec*(1e9))), nil + default: + panic("unknown timestamp format name, " + formatName) + } +} + +// IBM COS SDK Code -- START + +// ParseIbmTime - checks to see if first character of date string is a letter +// if so it tries to parse it as an RFC822 formatted date +func ParseIbmTime(formatName, value string) (time.Time, error) { + if formatName == ISO8601TimeFormatName && len(value) != 0 { + ch := value[0] + if ('a' <= ch && ch <= 'z') || ('A' <= ch && ch <= 'Z') { + formatName = RFC822TimeFormatName + } + } + return ParseTime(formatName, value) +} + +// IBM COS SDK Code -- END diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/unmarshal.go b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/unmarshal.go new file mode 100644 index 00000000000..183a4fabd79 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/unmarshal.go @@ -0,0 +1,27 @@ +package protocol + +import ( + "io" + "io/ioutil" + + "github.com/IBM/ibm-cos-sdk-go/aws/request" +) + +// UnmarshalDiscardBodyHandler is a named request handler to empty and close a response's body +var UnmarshalDiscardBodyHandler = request.NamedHandler{Name: "awssdk.shared.UnmarshalDiscardBody", Fn: UnmarshalDiscardBody} + +// UnmarshalDiscardBody is a request handler to empty a response's body and closing it. +func UnmarshalDiscardBody(r *request.Request) { + if r.HTTPResponse == nil || r.HTTPResponse.Body == nil { + return + } + + io.Copy(ioutil.Discard, r.HTTPResponse.Body) + r.HTTPResponse.Body.Close() +} + +// ResponseMetadata provides the SDK response metadata attributes. +type ResponseMetadata struct { + StatusCode int + RequestID string +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/unmarshal_error.go b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/unmarshal_error.go new file mode 100644 index 00000000000..ac5ef29e0ff --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/unmarshal_error.go @@ -0,0 +1,65 @@ +package protocol + +import ( + "net/http" + + "github.com/IBM/ibm-cos-sdk-go/aws/awserr" + "github.com/IBM/ibm-cos-sdk-go/aws/request" +) + +// UnmarshalErrorHandler provides unmarshaling errors API response errors for +// both typed and untyped errors. +type UnmarshalErrorHandler struct { + unmarshaler ErrorUnmarshaler +} + +// ErrorUnmarshaler is an abstract interface for concrete implementations to +// unmarshal protocol specific response errors. +type ErrorUnmarshaler interface { + UnmarshalError(*http.Response, ResponseMetadata) (error, error) +} + +// NewUnmarshalErrorHandler returns an UnmarshalErrorHandler +// initialized for the set of exception names to the error unmarshalers +func NewUnmarshalErrorHandler(unmarshaler ErrorUnmarshaler) *UnmarshalErrorHandler { + return &UnmarshalErrorHandler{ + unmarshaler: unmarshaler, + } +} + +// UnmarshalErrorHandlerName is the name of the named handler. +const UnmarshalErrorHandlerName = "awssdk.protocol.UnmarshalError" + +// NamedHandler returns a NamedHandler for the unmarshaler using the set of +// errors the unmarshaler was initialized for. +func (u *UnmarshalErrorHandler) NamedHandler() request.NamedHandler { + return request.NamedHandler{ + Name: UnmarshalErrorHandlerName, + Fn: u.UnmarshalError, + } +} + +// UnmarshalError will attempt to unmarshal the API response's error message +// into either a generic SDK error type, or a typed error corresponding to the +// errors exception name. +func (u *UnmarshalErrorHandler) UnmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + respMeta := ResponseMetadata{ + StatusCode: r.HTTPResponse.StatusCode, + RequestID: r.RequestID, + } + + v, err := u.unmarshaler.UnmarshalError(r.HTTPResponse, respMeta) + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to unmarshal response error", err), + respMeta.StatusCode, + respMeta.RequestID, + ) + return + } + + r.Error = v +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/xml/xmlutil/build.go b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/xml/xmlutil/build.go new file mode 100644 index 00000000000..1e94131bc0b --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/xml/xmlutil/build.go @@ -0,0 +1,315 @@ +// Package xmlutil provides XML serialization of AWS requests and responses. +package xmlutil + +import ( + "encoding/base64" + "encoding/xml" + "fmt" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/IBM/ibm-cos-sdk-go/private/protocol" +) + +// BuildXML will serialize params into an xml.Encoder. Error will be returned +// if the serialization of any of the params or nested values fails. +func BuildXML(params interface{}, e *xml.Encoder) error { + return buildXML(params, e, false) +} + +func buildXML(params interface{}, e *xml.Encoder, sorted bool) error { + b := xmlBuilder{encoder: e, namespaces: map[string]string{}} + root := NewXMLElement(xml.Name{}) + if err := b.buildValue(reflect.ValueOf(params), root, ""); err != nil { + return err + } + for _, c := range root.Children { + for _, v := range c { + return StructToXML(e, v, sorted) + } + } + return nil +} + +// Returns the reflection element of a value, if it is a pointer. +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} + +// A xmlBuilder serializes values from Go code to XML +type xmlBuilder struct { + encoder *xml.Encoder + namespaces map[string]string +} + +// buildValue generic XMLNode builder for any type. Will build value for their specific type +// struct, list, map, scalar. +// +// Also takes a "type" tag value to set what type a value should be converted to XMLNode as. If +// type is not provided reflect will be used to determine the value's type. +func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + value = elemOf(value) + if !value.IsValid() { // no need to handle zero values + return nil + } else if tag.Get("location") != "" { // don't handle non-body location values + return nil + } + + xml := tag.Get("xml") + if len(xml) != 0 { + name := strings.SplitAfterN(xml, ",", 2)[0] + if name == "-" { + return nil + } + } + + t := tag.Get("type") + if t == "" { + switch value.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := value.Type().FieldByName("_"); ok { + tag = tag + reflect.StructTag(" ") + field.Tag + } + return b.buildStruct(value, current, tag) + case "list": + return b.buildList(value, current, tag) + case "map": + return b.buildMap(value, current, tag) + default: + return b.buildScalar(value, current, tag) + } +} + +// buildStruct adds a struct and its fields to the current XMLNode. All fields and any nested +// types are converted to XMLNodes also. +func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if !value.IsValid() { + return nil + } + + // unwrap payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := value.Type().FieldByName(payload) + tag = field.Tag + value = elemOf(value.FieldByName(payload)) + + if !value.IsValid() { + return nil + } + } + + child := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) + + // there is an xmlNamespace associated with this struct + if prefix, uri := tag.Get("xmlPrefix"), tag.Get("xmlURI"); uri != "" { + ns := xml.Attr{ + Name: xml.Name{Local: "xmlns"}, + Value: uri, + } + if prefix != "" { + b.namespaces[prefix] = uri // register the namespace + ns.Name.Local = "xmlns:" + prefix + } + + child.Attr = append(child.Attr, ns) + } + + var payloadFields, nonPayloadFields int + + t := value.Type() + for i := 0; i < value.NumField(); i++ { + member := elemOf(value.Field(i)) + field := t.Field(i) + + if field.PkgPath != "" { + continue // ignore unexported fields + } + if field.Tag.Get("ignore") != "" { + continue + } + + mTag := field.Tag + if mTag.Get("location") != "" { // skip non-body members + nonPayloadFields++ + continue + } + payloadFields++ + + if protocol.CanSetIdempotencyToken(value.Field(i), field) { + token := protocol.GetIdempotencyToken() + member = reflect.ValueOf(token) + } + + memberName := mTag.Get("locationName") + if memberName == "" { + memberName = field.Name + mTag = reflect.StructTag(string(mTag) + ` locationName:"` + memberName + `"`) + } + if err := b.buildValue(member, child, mTag); err != nil { + return err + } + } + + // Only case where the child shape is not added is if the shape only contains + // non-payload fields, e.g headers/query. + if !(payloadFields == 0 && nonPayloadFields > 0) { + current.AddChild(child) + } + + return nil +} + +// buildList adds the value's list items to the current XMLNode as children nodes. All +// nested values in the list are converted to XMLNodes also. +func (b *xmlBuilder) buildList(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if value.IsNil() { // don't build omitted lists + return nil + } + + // check for unflattened list member + flattened := tag.Get("flattened") != "" + + xname := xml.Name{Local: tag.Get("locationName")} + if flattened { + for i := 0; i < value.Len(); i++ { + child := NewXMLElement(xname) + current.AddChild(child) + if err := b.buildValue(value.Index(i), child, ""); err != nil { + return err + } + } + } else { + list := NewXMLElement(xname) + current.AddChild(list) + + for i := 0; i < value.Len(); i++ { + iname := tag.Get("locationNameList") + if iname == "" { + iname = "member" + } + + child := NewXMLElement(xml.Name{Local: iname}) + list.AddChild(child) + if err := b.buildValue(value.Index(i), child, ""); err != nil { + return err + } + } + } + + return nil +} + +// buildMap adds the value's key/value pairs to the current XMLNode as children nodes. All +// nested values in the map are converted to XMLNodes also. +// +// Error will be returned if it is unable to build the map's values into XMLNodes +func (b *xmlBuilder) buildMap(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if value.IsNil() { // don't build omitted maps + return nil + } + + maproot := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) + current.AddChild(maproot) + current = maproot + + kname, vname := "key", "value" + if n := tag.Get("locationNameKey"); n != "" { + kname = n + } + if n := tag.Get("locationNameValue"); n != "" { + vname = n + } + + // sorting is not required for compliance, but it makes testing easier + keys := make([]string, value.Len()) + for i, k := range value.MapKeys() { + keys[i] = k.String() + } + sort.Strings(keys) + + for _, k := range keys { + v := value.MapIndex(reflect.ValueOf(k)) + + mapcur := current + if tag.Get("flattened") == "" { // add "entry" tag to non-flat maps + child := NewXMLElement(xml.Name{Local: "entry"}) + mapcur.AddChild(child) + mapcur = child + } + + kchild := NewXMLElement(xml.Name{Local: kname}) + kchild.Text = k + vchild := NewXMLElement(xml.Name{Local: vname}) + mapcur.AddChild(kchild) + mapcur.AddChild(vchild) + + if err := b.buildValue(v, vchild, ""); err != nil { + return err + } + } + + return nil +} + +// buildScalar will convert the value into a string and append it as a attribute or child +// of the current XMLNode. +// +// The value will be added as an attribute if tag contains a "xmlAttribute" attribute value. +// +// Error will be returned if the value type is unsupported. +func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + var str string + switch converted := value.Interface().(type) { + case string: + str = converted + case []byte: + if !value.IsNil() { + str = base64.StdEncoding.EncodeToString(converted) + } + case bool: + str = strconv.FormatBool(converted) + case int64: + str = strconv.FormatInt(converted, 10) + case int: + str = strconv.Itoa(converted) + case float64: + str = strconv.FormatFloat(converted, 'f', -1, 64) + case float32: + str = strconv.FormatFloat(float64(converted), 'f', -1, 32) + case time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + str = protocol.FormatTime(format, converted) + default: + return fmt.Errorf("unsupported value for param %s: %v (%s)", + tag.Get("locationName"), value.Interface(), value.Type().Name()) + } + + xname := xml.Name{Local: tag.Get("locationName")} + if tag.Get("xmlAttribute") != "" { // put into current node's attribute list + attr := xml.Attr{Name: xname, Value: str} + current.Attr = append(current.Attr, attr) + } else { // regular text node + current.AddChild(&XMLNode{Name: xname, Text: str}) + } + return nil +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/xml/xmlutil/sort.go b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/xml/xmlutil/sort.go new file mode 100644 index 00000000000..c1a511851f6 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/xml/xmlutil/sort.go @@ -0,0 +1,32 @@ +package xmlutil + +import ( + "encoding/xml" + "strings" +) + +type xmlAttrSlice []xml.Attr + +func (x xmlAttrSlice) Len() int { + return len(x) +} + +func (x xmlAttrSlice) Less(i, j int) bool { + spaceI, spaceJ := x[i].Name.Space, x[j].Name.Space + localI, localJ := x[i].Name.Local, x[j].Name.Local + valueI, valueJ := x[i].Value, x[j].Value + + spaceCmp := strings.Compare(spaceI, spaceJ) + localCmp := strings.Compare(localI, localJ) + valueCmp := strings.Compare(valueI, valueJ) + + if spaceCmp == -1 || (spaceCmp == 0 && (localCmp == -1 || (localCmp == 0 && valueCmp == -1))) { + return true + } + + return false +} + +func (x xmlAttrSlice) Swap(i, j int) { + x[i], x[j] = x[j], x[i] +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/xml/xmlutil/unmarshal.go new file mode 100644 index 00000000000..0d504037b85 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/xml/xmlutil/unmarshal.go @@ -0,0 +1,301 @@ +package xmlutil + +import ( + "bytes" + "encoding/base64" + "encoding/xml" + "fmt" + "io" + "reflect" + "strconv" + "strings" + "time" + + "github.com/IBM/ibm-cos-sdk-go/aws/awserr" + "github.com/IBM/ibm-cos-sdk-go/private/protocol" +) + +// UnmarshalXMLError unmarshals the XML error from the stream into the value +// type specified. The value must be a pointer. If the message fails to +// unmarshal, the message content will be included in the returned error as a +// awserr.UnmarshalError. +func UnmarshalXMLError(v interface{}, stream io.Reader) error { + var errBuf bytes.Buffer + body := io.TeeReader(stream, &errBuf) + + err := xml.NewDecoder(body).Decode(v) + if err != nil && err != io.EOF { + return awserr.NewUnmarshalError(err, + "failed to unmarshal error message", errBuf.Bytes()) + } + + return nil +} + +// UnmarshalXML deserializes an xml.Decoder into the container v. V +// needs to match the shape of the XML expected to be decoded. +// If the shape doesn't match unmarshaling will fail. +func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error { + n, err := XMLToStruct(d, nil) + if err != nil { + return err + } + if n.Children != nil { + for _, root := range n.Children { + for _, c := range root { + if wrappedChild, ok := c.Children[wrapper]; ok { + c = wrappedChild[0] // pull out wrapped element + } + + err = parse(reflect.ValueOf(v), c, "") + if err != nil { + if err == io.EOF { + return nil + } + return err + } + } + } + return nil + } + return nil +} + +// parse deserializes any value from the XMLNode. The type tag is used to infer the type, or reflect +// will be used to determine the type from r. +func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + xml := tag.Get("xml") + if len(xml) != 0 { + name := strings.SplitAfterN(xml, ",", 2)[0] + if name == "-" { + return nil + } + } + + rtype := r.Type() + if rtype.Kind() == reflect.Ptr { + rtype = rtype.Elem() // check kind of actual element type + } + + t := tag.Get("type") + if t == "" { + switch rtype.Kind() { + case reflect.Struct: + // also it can't be a time object + if _, ok := r.Interface().(*time.Time); !ok { + t = "structure" + } + case reflect.Slice: + // also it can't be a byte slice + if _, ok := r.Interface().([]byte); !ok { + t = "list" + } + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := rtype.FieldByName("_"); ok { + tag = field.Tag + } + return parseStruct(r, node, tag) + case "list": + return parseList(r, node, tag) + case "map": + return parseMap(r, node, tag) + default: + return parseScalar(r, node, tag) + } +} + +// parseStruct deserializes a structure and its fields from an XMLNode. Any nested +// types in the structure will also be deserialized. +func parseStruct(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + t := r.Type() + if r.Kind() == reflect.Ptr { + if r.IsNil() { // create the structure if it's nil + s := reflect.New(r.Type().Elem()) + r.Set(s) + r = s + } + + r = r.Elem() + t = t.Elem() + } + + // unwrap any payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := t.FieldByName(payload) + return parseStruct(r.FieldByName(payload), node, field.Tag) + } + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + if c := field.Name[0:1]; strings.ToLower(c) == c { + continue // ignore unexported fields + } + + // figure out what this field is called + name := field.Name + if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { + name = field.Tag.Get("locationNameList") + } else if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + + // try to find the field by name in elements + elems := node.Children[name] + + if elems == nil { // try to find the field in attributes + if val, ok := node.findElem(name); ok { + elems = []*XMLNode{{Text: val}} + } + } + + member := r.FieldByName(field.Name) + for _, elem := range elems { + err := parse(member, elem, field.Tag) + if err != nil { + return err + } + } + } + return nil +} + +// parseList deserializes a list of values from an XML node. Each list entry +// will also be deserialized. +func parseList(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + t := r.Type() + + if tag.Get("flattened") == "" { // look at all item entries + mname := "member" + if name := tag.Get("locationNameList"); name != "" { + mname = name + } + + if Children, ok := node.Children[mname]; ok { + if r.IsNil() { + r.Set(reflect.MakeSlice(t, len(Children), len(Children))) + } + + for i, c := range Children { + err := parse(r.Index(i), c, "") + if err != nil { + return err + } + } + } + } else { // flattened list means this is a single element + if r.IsNil() { + r.Set(reflect.MakeSlice(t, 0, 0)) + } + + childR := reflect.Zero(t.Elem()) + r.Set(reflect.Append(r, childR)) + err := parse(r.Index(r.Len()-1), node, "") + if err != nil { + return err + } + } + + return nil +} + +// parseMap deserializes a map from an XMLNode. The direct children of the XMLNode +// will also be deserialized as map entries. +func parseMap(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + if r.IsNil() { + r.Set(reflect.MakeMap(r.Type())) + } + + if tag.Get("flattened") == "" { // look at all child entries + for _, entry := range node.Children["entry"] { + parseMapEntry(r, entry, tag) + } + } else { // this element is itself an entry + parseMapEntry(r, node, tag) + } + + return nil +} + +// parseMapEntry deserializes a map entry from a XML node. +func parseMapEntry(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + kname, vname := "key", "value" + if n := tag.Get("locationNameKey"); n != "" { + kname = n + } + if n := tag.Get("locationNameValue"); n != "" { + vname = n + } + + keys, ok := node.Children[kname] + values := node.Children[vname] + if ok { + for i, key := range keys { + keyR := reflect.ValueOf(key.Text) + value := values[i] + valueR := reflect.New(r.Type().Elem()).Elem() + + parse(valueR, value, "") + r.SetMapIndex(keyR, valueR) + } + } + return nil +} + +// parseScaller deserializes an XMLNode value into a concrete type based on the +// interface type of r. +// +// Error is returned if the deserialization fails due to invalid type conversion, +// or unsupported interface type. +func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + switch r.Interface().(type) { + case *string: + r.Set(reflect.ValueOf(&node.Text)) + return nil + case []byte: + b, err := base64.StdEncoding.DecodeString(node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(b)) + case *bool: + v, err := strconv.ParseBool(node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *int64: + v, err := strconv.ParseInt(node.Text, 10, 64) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *float64: + v, err := strconv.ParseFloat(node.Text, 64) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + // IBM COS SDK Code -- START + t, err := protocol.ParseIbmTime(format, node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&t)) + // IBM COS SDK Code -- END + default: + return fmt.Errorf("unsupported value: %v (%s)", r.Interface(), r.Type()) + } + return nil +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go new file mode 100644 index 00000000000..42f71648eee --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go @@ -0,0 +1,159 @@ +package xmlutil + +import ( + "encoding/xml" + "fmt" + "io" + "sort" +) + +// A XMLNode contains the values to be encoded or decoded. +type XMLNode struct { + Name xml.Name `json:",omitempty"` + Children map[string][]*XMLNode `json:",omitempty"` + Text string `json:",omitempty"` + Attr []xml.Attr `json:",omitempty"` + + namespaces map[string]string + parent *XMLNode +} + +// NewXMLElement returns a pointer to a new XMLNode initialized to default values. +func NewXMLElement(name xml.Name) *XMLNode { + return &XMLNode{ + Name: name, + Children: map[string][]*XMLNode{}, + Attr: []xml.Attr{}, + } +} + +// AddChild adds child to the XMLNode. +func (n *XMLNode) AddChild(child *XMLNode) { + child.parent = n + if _, ok := n.Children[child.Name.Local]; !ok { + n.Children[child.Name.Local] = []*XMLNode{} + } + n.Children[child.Name.Local] = append(n.Children[child.Name.Local], child) +} + +// XMLToStruct converts a xml.Decoder stream to XMLNode with nested values. +func XMLToStruct(d *xml.Decoder, s *xml.StartElement) (*XMLNode, error) { + out := &XMLNode{} + for { + tok, err := d.Token() + if err != nil { + if err == io.EOF { + break + } else { + return out, err + } + } + + if tok == nil { + break + } + + switch typed := tok.(type) { + case xml.CharData: + out.Text = string(typed.Copy()) + case xml.StartElement: + el := typed.Copy() + out.Attr = el.Attr + if out.Children == nil { + out.Children = map[string][]*XMLNode{} + } + + name := typed.Name.Local + slice := out.Children[name] + if slice == nil { + slice = []*XMLNode{} + } + node, e := XMLToStruct(d, &el) + out.findNamespaces() + if e != nil { + return out, e + } + node.Name = typed.Name + node.findNamespaces() + tempOut := *out + // Save into a temp variable, simply because out gets squashed during + // loop iterations + node.parent = &tempOut + slice = append(slice, node) + out.Children[name] = slice + case xml.EndElement: + if s != nil && s.Name.Local == typed.Name.Local { // matching end token + return out, nil + } + out = &XMLNode{} + } + } + return out, nil +} + +func (n *XMLNode) findNamespaces() { + ns := map[string]string{} + for _, a := range n.Attr { + if a.Name.Space == "xmlns" { + ns[a.Value] = a.Name.Local + } + } + + n.namespaces = ns +} + +func (n *XMLNode) findElem(name string) (string, bool) { + for node := n; node != nil; node = node.parent { + for _, a := range node.Attr { + namespace := a.Name.Space + if v, ok := node.namespaces[namespace]; ok { + namespace = v + } + if name == fmt.Sprintf("%s:%s", namespace, a.Name.Local) { + return a.Value, true + } + } + } + return "", false +} + +// StructToXML writes an XMLNode to a xml.Encoder as tokens. +func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error { + // Sort Attributes + attrs := node.Attr + if sorted { + sortedAttrs := make([]xml.Attr, len(attrs)) + for _, k := range node.Attr { + sortedAttrs = append(sortedAttrs, k) + } + sort.Sort(xmlAttrSlice(sortedAttrs)) + attrs = sortedAttrs + } + + e.EncodeToken(xml.StartElement{Name: node.Name, Attr: attrs}) + + if node.Text != "" { + e.EncodeToken(xml.CharData([]byte(node.Text))) + } else if sorted { + sortedNames := []string{} + for k := range node.Children { + sortedNames = append(sortedNames, k) + } + sort.Strings(sortedNames) + + for _, k := range sortedNames { + for _, v := range node.Children[k] { + StructToXML(e, v, sorted) + } + } + } else { + for _, c := range node.Children { + for _, v := range c { + StructToXML(e, v, sorted) + } + } + } + + e.EncodeToken(xml.EndElement{Name: node.Name}) + return e.Flush() +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/api.go b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/api.go new file mode 100644 index 00000000000..ac2fec24249 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/api.go @@ -0,0 +1,19183 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package s3 + +import ( + "fmt" + "io" + "time" + + "github.com/IBM/ibm-cos-sdk-go/aws" + "github.com/IBM/ibm-cos-sdk-go/aws/awsutil" + "github.com/IBM/ibm-cos-sdk-go/aws/request" + "github.com/IBM/ibm-cos-sdk-go/private/checksum" + "github.com/IBM/ibm-cos-sdk-go/private/protocol" + "github.com/IBM/ibm-cos-sdk-go/private/protocol/restxml" +) + +const opAbortMultipartUpload = "AbortMultipartUpload" + +// AbortMultipartUploadRequest generates a "aws/request.Request" representing the +// client's request for the AbortMultipartUpload operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AbortMultipartUpload for more information on using the AbortMultipartUpload +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AbortMultipartUploadRequest method. +// req, resp := client.AbortMultipartUploadRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload +func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req *request.Request, output *AbortMultipartUploadOutput) { + op := &request.Operation{ + Name: opAbortMultipartUpload, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &AbortMultipartUploadInput{} + } + + output = &AbortMultipartUploadOutput{} + req = c.newRequest(op, input, output) + return +} + +// AbortMultipartUpload API operation for Amazon Simple Storage Service. +// +// This action aborts a multipart upload. After a multipart upload is aborted, +// no additional parts can be uploaded using that upload ID. The storage consumed +// by any previously uploaded parts will be freed. However, if any part uploads +// are currently in progress, those part uploads might or might not succeed. +// As a result, it might be necessary to abort a given multipart upload multiple +// times in order to completely free all storage consumed by all parts. +// +// To verify that all parts have been removed, so you don't get charged for +// the part storage, you should call the ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// action and ensure that the parts list is empty. +// +// For information about permissions required to use the multipart upload, see +// Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// +// The following operations are related to AbortMultipartUpload: +// +// * CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// +// * UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// +// * CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// +// * ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// * ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation AbortMultipartUpload for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchUpload "NoSuchUpload" +// The specified multipart upload does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload +func (c *S3) AbortMultipartUpload(input *AbortMultipartUploadInput) (*AbortMultipartUploadOutput, error) { + req, out := c.AbortMultipartUploadRequest(input) + return out, req.Send() +} + +// AbortMultipartUploadWithContext is the same as AbortMultipartUpload with the addition of +// the ability to pass a context and additional request options. +// +// See AbortMultipartUpload for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) AbortMultipartUploadWithContext(ctx aws.Context, input *AbortMultipartUploadInput, opts ...request.Option) (*AbortMultipartUploadOutput, error) { + req, out := c.AbortMultipartUploadRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAddLegalHold = "AddLegalHold" + +// AddLegalHoldRequest generates a "aws/request.Request" representing the +// client's request for the AddLegalHold operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AddLegalHold for more information on using the AddLegalHold +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AddLegalHoldRequest method. +// req, resp := client.AddLegalHoldRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AddLegalHold +func (c *S3) AddLegalHoldRequest(input *AddLegalHoldInput) (req *request.Request, output *AddLegalHoldOutput) { + op := &request.Operation{ + Name: opAddLegalHold, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}?legalHold", + } + + if input == nil { + input = &AddLegalHoldInput{} + } + + output = &AddLegalHoldOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// AddLegalHold API operation for Amazon Simple Storage Service. +// +// Add a legal hold on an object. The legal hold identifiers are stored in the +// object metadata along with the timestamp of when they are POSTed to the object. +// The presence of any legal hold identifiers prevents the modification or deletion +// of the object data, even if the retention period has expired. Legal Holds +// can only be added to objects in a bucket with a protection policy. Otherwise +// a 400 error will be returned. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation AddLegalHold for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AddLegalHold +func (c *S3) AddLegalHold(input *AddLegalHoldInput) (*AddLegalHoldOutput, error) { + req, out := c.AddLegalHoldRequest(input) + return out, req.Send() +} + +// AddLegalHoldWithContext is the same as AddLegalHold with the addition of +// the ability to pass a context and additional request options. +// +// See AddLegalHold for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) AddLegalHoldWithContext(ctx aws.Context, input *AddLegalHoldInput, opts ...request.Option) (*AddLegalHoldOutput, error) { + req, out := c.AddLegalHoldRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCompleteMultipartUpload = "CompleteMultipartUpload" + +// CompleteMultipartUploadRequest generates a "aws/request.Request" representing the +// client's request for the CompleteMultipartUpload operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CompleteMultipartUpload for more information on using the CompleteMultipartUpload +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CompleteMultipartUploadRequest method. +// req, resp := client.CompleteMultipartUploadRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload +func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) (req *request.Request, output *CompleteMultipartUploadOutput) { + op := &request.Operation{ + Name: opCompleteMultipartUpload, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &CompleteMultipartUploadInput{} + } + + output = &CompleteMultipartUploadOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// CompleteMultipartUpload API operation for Amazon Simple Storage Service. +// +// Completes a multipart upload by assembling previously uploaded parts. +// +// You first initiate the multipart upload and then upload all parts using the +// UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// operation. After successfully uploading all relevant parts of an upload, +// you call this action to complete the upload. Upon receiving this request, +// Amazon S3 concatenates all the parts in ascending order by part number to +// create a new object. In the Complete Multipart Upload request, you must provide +// the parts list. You must ensure that the parts list is complete. This action +// concatenates the parts that you provide in the list. For each part in the +// list, you must provide the part number and the ETag value, returned after +// that part was uploaded. +// +// Processing of a Complete Multipart Upload request could take several minutes +// to complete. After Amazon S3 begins processing the request, it sends an HTTP +// response header that specifies a 200 OK response. While processing is in +// progress, Amazon S3 periodically sends white space characters to keep the +// connection from timing out. Because a request could fail after the initial +// 200 OK response has been sent, it is important that you check the response +// body to determine whether the request succeeded. +// +// Note that if CompleteMultipartUpload fails, applications should be prepared +// to retry the failed requests. For more information, see Amazon S3 Error Best +// Practices (https://docs.aws.amazon.com/AmazonS3/latest/dev/ErrorBestPractices.html). +// +// For more information about multipart uploads, see Uploading Objects Using +// Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). +// +// For information about permissions required to use the multipart upload API, +// see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// +// CompleteMultipartUpload has the following special errors: +// +// * Error code: EntityTooSmall Description: Your proposed upload is smaller +// than the minimum allowed object size. Each part must be at least 5 MB +// in size, except the last part. 400 Bad Request +// +// * Error code: InvalidPart Description: One or more of the specified parts +// could not be found. The part might not have been uploaded, or the specified +// entity tag might not have matched the part's entity tag. 400 Bad Request +// +// * Error code: InvalidPartOrder Description: The list of parts was not +// in ascending order. The parts list must be specified in order by part +// number. 400 Bad Request +// +// * Error code: NoSuchUpload Description: The specified multipart upload +// does not exist. The upload ID might be invalid, or the multipart upload +// might have been aborted or completed. 404 Not Found +// +// The following operations are related to CompleteMultipartUpload: +// +// * CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// +// * UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// +// * AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// +// * ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// * ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation CompleteMultipartUpload for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload +func (c *S3) CompleteMultipartUpload(input *CompleteMultipartUploadInput) (*CompleteMultipartUploadOutput, error) { + req, out := c.CompleteMultipartUploadRequest(input) + return out, req.Send() +} + +// CompleteMultipartUploadWithContext is the same as CompleteMultipartUpload with the addition of +// the ability to pass a context and additional request options. +// +// See CompleteMultipartUpload for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) CompleteMultipartUploadWithContext(ctx aws.Context, input *CompleteMultipartUploadInput, opts ...request.Option) (*CompleteMultipartUploadOutput, error) { + req, out := c.CompleteMultipartUploadRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCopyObject = "CopyObject" + +// CopyObjectRequest generates a "aws/request.Request" representing the +// client's request for the CopyObject operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CopyObject for more information on using the CopyObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CopyObjectRequest method. +// req, resp := client.CopyObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject +func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, output *CopyObjectOutput) { + op := &request.Operation{ + Name: opCopyObject, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &CopyObjectInput{} + } + + output = &CopyObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// CopyObject API operation for Amazon Simple Storage Service. +// +// Creates a copy of an object that is already stored in Amazon S3. +// +// You can store individual objects of up to 5 TB in Amazon S3. You create a +// copy of your object up to 5 GB in size in a single atomic action using this +// API. However, to copy an object greater than 5 GB, you must use the multipart +// upload Upload Part - Copy API. For more information, see Copy Object Using +// the REST Multipart Upload API (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html). +// +// All copy requests must be authenticated. Additionally, you must have read +// access to the source object and write access to the destination bucket. For +// more information, see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html). +// Both the Region that you want to copy the object from and the Region that +// you want to copy the object to must be enabled for your account. +// +// A copy request might return an error when Amazon S3 receives the copy request +// or while Amazon S3 is copying the files. If the error occurs before the copy +// action starts, you receive a standard Amazon S3 error. If the error occurs +// during the copy operation, the error response is embedded in the 200 OK response. +// This means that a 200 OK response can contain either a success or an error. +// Design your application to parse the contents of the response and handle +// it appropriately. +// +// If the copy is successful, you receive a response with information about +// the copied object. +// +// If the request is an HTTP 1.1 request, the response is chunk encoded. If +// it were not, it would not contain the content-length, and you would need +// to read the entire body. +// +// The copy request charge is based on the storage class and Region that you +// specify for the destination object. For pricing information, see Amazon S3 +// pricing (http://aws.amazon.com/s3/pricing/). +// +// Amazon S3 transfer acceleration does not support cross-Region copies. If +// you request a cross-Region copy using a transfer acceleration endpoint, you +// get a 400 Bad Request error. For more information, see Transfer Acceleration +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html). +// +// Metadata +// +// When copying an object, you can preserve all metadata (default) or specify +// new metadata. However, the ACL is not preserved and is set to private for +// the user making the request. To override the default ACL setting, specify +// a new ACL when generating a copy request. For more information, see Using +// ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). +// +// To specify whether you want the object metadata copied from the source object +// or replaced with metadata provided in the request, you can optionally add +// the x-amz-metadata-directive header. When you grant permissions, you can +// use the s3:x-amz-metadata-directive condition key to enforce certain metadata +// behavior when objects are uploaded. For more information, see Specifying +// Conditions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/amazon-s3-policy-keys.html) +// in the Amazon S3 Developer Guide. For a complete list of Amazon S3-specific +// condition keys, see Actions, Resources, and Condition Keys for Amazon S3 +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html). +// +// x-amz-copy-source-if Headers +// +// To only copy an object under certain conditions, such as whether the Etag +// matches or whether the object was modified before or after a specified date, +// use the following request parameters: +// +// * x-amz-copy-source-if-match +// +// * x-amz-copy-source-if-none-match +// +// * x-amz-copy-source-if-unmodified-since +// +// * x-amz-copy-source-if-modified-since +// +// If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since +// headers are present in the request and evaluate as follows, Amazon S3 returns +// 200 OK and copies the data: +// +// * x-amz-copy-source-if-match condition evaluates to true +// +// * x-amz-copy-source-if-unmodified-since condition evaluates to false +// +// If both the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since +// headers are present in the request and evaluate as follows, Amazon S3 returns +// the 412 Precondition Failed response code: +// +// * x-amz-copy-source-if-none-match condition evaluates to false +// +// * x-amz-copy-source-if-modified-since condition evaluates to true +// +// All headers with the x-amz- prefix, including x-amz-copy-source, must be +// signed. +// +// Server-side encryption +// +// When you perform a CopyObject operation, you can optionally use the appropriate +// encryption-related headers to encrypt the object using server-side encryption +// with AWS managed encryption keys (SSE-S3 or SSE-KMS) or a customer-provided +// encryption key. With server-side encryption, Amazon S3 encrypts your data +// as it writes it to disks in its data centers and decrypts the data when you +// access it. For more information about server-side encryption, see Using Server-Side +// Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html). +// +// If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the +// object. For more information, see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) +// in the Amazon S3 User Guide. +// +// Access Control List (ACL)-Specific Request Headers +// +// When copying an object, you can optionally use headers to grant ACL-based +// permissions. By default, all objects are private. Only the owner has full +// access control. When adding a new object, you can grant permissions to individual +// AWS accounts or to predefined groups defined by Amazon S3. These permissions +// are then added to the ACL on the object. For more information, see Access +// Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) +// and Managing ACLs Using the REST API (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html). +// +// Storage Class Options +// +// You can use the CopyObject action to change the storage class of an object +// that is already stored in Amazon S3 using the StorageClass parameter. For +// more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) +// in the Amazon S3 Service Developer Guide. +// +// Versioning +// +// By default, x-amz-copy-source identifies the current version of an object +// to copy. If the current version is a delete marker, Amazon S3 behaves as +// if the object was deleted. To copy a different version, use the versionId +// subresource. +// +// If you enable versioning on the target bucket, Amazon S3 generates a unique +// version ID for the object being copied. This version ID is different from +// the version ID of the source object. Amazon S3 returns the version ID of +// the copied object in the x-amz-version-id response header in the response. +// +// If you do not enable versioning or suspend it on the target bucket, the version +// ID that Amazon S3 generates is always null. +// +// If the source object's storage class is GLACIER, you must restore a copy +// of this object before you can use it as a source object for the copy operation. +// For more information, see RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html). +// +// The following operations are related to CopyObject: +// +// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// For more information, see Copying Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation CopyObject for usage and error information. +// +// Returned Error Codes: +// * ErrCodeObjectNotInActiveTierError "ObjectNotInActiveTierError" +// The source object of the COPY action is not in the active tier and is only +// stored in Amazon S3 Glacier. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject +func (c *S3) CopyObject(input *CopyObjectInput) (*CopyObjectOutput, error) { + req, out := c.CopyObjectRequest(input) + return out, req.Send() +} + +// CopyObjectWithContext is the same as CopyObject with the addition of +// the ability to pass a context and additional request options. +// +// See CopyObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) CopyObjectWithContext(ctx aws.Context, input *CopyObjectInput, opts ...request.Option) (*CopyObjectOutput, error) { + req, out := c.CopyObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateBucket = "CreateBucket" + +// CreateBucketRequest generates a "aws/request.Request" representing the +// client's request for the CreateBucket operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateBucket for more information on using the CreateBucket +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateBucketRequest method. +// req, resp := client.CreateBucketRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket +func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request, output *CreateBucketOutput) { + op := &request.Operation{ + Name: opCreateBucket, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}", + } + + if input == nil { + input = &CreateBucketInput{} + } + + output = &CreateBucketOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateBucket API operation for Amazon Simple Storage Service. +// +// Creates a new S3 bucket. To create a bucket, you must register with Amazon +// S3 and have a valid AWS Access Key ID to authenticate requests. Anonymous +// requests are never allowed to create buckets. By creating the bucket, you +// become the bucket owner. +// +// Not every string is an acceptable bucket name. For information about bucket +// naming restrictions, see Working with Amazon S3 buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html). +// +// If you want to create an Amazon S3 on Outposts bucket, see Create Bucket +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateBucket.html). +// +// By default, the bucket is created in the US East (N. Virginia) Region. You +// can optionally specify a Region in the request body. You might choose a Region +// to optimize latency, minimize costs, or address regulatory requirements. +// For example, if you reside in Europe, you will probably find it advantageous +// to create buckets in the Europe (Ireland) Region. For more information, see +// Accessing a bucket (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro). +// +// If you send your create bucket request to the s3.amazonaws.com endpoint, +// the request goes to the us-east-1 Region. Accordingly, the signature calculations +// in Signature Version 4 must use us-east-1 as the Region, even if the location +// constraint in the request specifies another Region where the bucket is to +// be created. If you create a bucket in a Region other than US East (N. Virginia), +// your application must be able to handle 307 redirect. For more information, +// see Virtual hosting of buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html). +// +// When creating a bucket using this operation, you can optionally specify the +// accounts or groups that should be granted specific permissions on the bucket. +// There are two ways to grant the appropriate permissions using the request +// headers. +// +// * Specify a canned ACL using the x-amz-acl request header. Amazon S3 supports +// a set of predefined ACLs, known as canned ACLs. Each canned ACL has a +// predefined set of grantees and permissions. For more information, see +// Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// * Specify access permissions explicitly using the x-amz-grant-read, x-amz-grant-write, +// x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control +// headers. These headers map to the set of permissions Amazon S3 supports +// in an ACL. For more information, see Access control list (ACL) overview +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). You +// specify each grantee as a type=value pair, where the type is one of the +// following: id – if the value specified is the canonical user ID of an +// AWS account uri – if you are granting permissions to a predefined group +// emailAddress – if the value specified is the email address of an AWS +// account Using email addresses to specify a grantee is only supported in +// the following AWS Regions: US East (N. Virginia) US West (N. California) +// US West (Oregon) Asia Pacific (Singapore) Asia Pacific (Sydney) Asia Pacific +// (Tokyo) Europe (Ireland) South America (São Paulo) For a list of all +// the Amazon S3 supported Regions and endpoints, see Regions and Endpoints +// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in +// the AWS General Reference. For example, the following x-amz-grant-read +// header grants the AWS accounts identified by account IDs permissions to +// read object data and its metadata: x-amz-grant-read: id="11112222333", +// id="444455556666" +// +// You can use either a canned ACL or specify access permissions explicitly. +// You cannot do both. +// +// The following operations are related to CreateBucket: +// +// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// * DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation CreateBucket for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBucketAlreadyExists "BucketAlreadyExists" +// The requested bucket name is not available. The bucket namespace is shared +// by all users of the system. Select a different name and try again. +// +// * ErrCodeBucketAlreadyOwnedByYou "BucketAlreadyOwnedByYou" +// The bucket you tried to create already exists, and you own it. Amazon S3 +// returns this error in all AWS Regions except in the North Virginia Region. +// For legacy compatibility, if you re-create an existing bucket that you already +// own in the North Virginia Region, Amazon S3 returns 200 OK and resets the +// bucket access control lists (ACLs). +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket +func (c *S3) CreateBucket(input *CreateBucketInput) (*CreateBucketOutput, error) { + req, out := c.CreateBucketRequest(input) + return out, req.Send() +} + +// CreateBucketWithContext is the same as CreateBucket with the addition of +// the ability to pass a context and additional request options. +// +// See CreateBucket for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) CreateBucketWithContext(ctx aws.Context, input *CreateBucketInput, opts ...request.Option) (*CreateBucketOutput, error) { + req, out := c.CreateBucketRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateMultipartUpload = "CreateMultipartUpload" + +// CreateMultipartUploadRequest generates a "aws/request.Request" representing the +// client's request for the CreateMultipartUpload operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateMultipartUpload for more information on using the CreateMultipartUpload +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateMultipartUploadRequest method. +// req, resp := client.CreateMultipartUploadRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload +func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (req *request.Request, output *CreateMultipartUploadOutput) { + op := &request.Operation{ + Name: opCreateMultipartUpload, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}?uploads", + } + + if input == nil { + input = &CreateMultipartUploadInput{} + } + + output = &CreateMultipartUploadOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateMultipartUpload API operation for Amazon Simple Storage Service. +// +// This action initiates a multipart upload and returns an upload ID. This upload +// ID is used to associate all of the parts in the specific multipart upload. +// You specify this upload ID in each of your subsequent upload part requests +// (see UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)). +// You also include this upload ID in the final request to either complete or +// abort the multipart upload request. +// +// For more information about multipart uploads, see Multipart Upload Overview +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html). +// +// If you have configured a lifecycle rule to abort incomplete multipart uploads, +// the upload must complete within the number of days specified in the bucket +// lifecycle configuration. Otherwise, the incomplete multipart upload becomes +// eligible for an abort action and Amazon S3 aborts the multipart upload. For +// more information, see Aborting Incomplete Multipart Uploads Using a Bucket +// Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). +// +// For information about the permissions required to use the multipart upload +// API, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// +// For request signing, multipart upload is just a series of regular requests. +// You initiate a multipart upload, send one or more requests to upload parts, +// and then complete the multipart upload process. You sign each request individually. +// There is nothing special about signing multipart upload requests. For more +// information about signing, see Authenticating Requests (AWS Signature Version +// 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html). +// +// After you initiate a multipart upload and upload one or more parts, to stop +// being charged for storing the uploaded parts, you must either complete or +// abort the multipart upload. Amazon S3 frees up the space used to store the +// parts and stop charging you for storing them only after you either complete +// or abort a multipart upload. +// +// You can optionally request server-side encryption. For server-side encryption, +// Amazon S3 encrypts your data as it writes it to disks in its data centers +// and decrypts it when you access it. You can provide your own encryption key, +// or use AWS Key Management Service (AWS KMS) customer master keys (CMKs) or +// Amazon S3-managed encryption keys. If you choose to provide your own encryption +// key, the request headers you provide in UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// and UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) +// requests must match the headers you used in the request to initiate the upload +// by using CreateMultipartUpload. +// +// To perform a multipart upload with encryption using an AWS KMS CMK, the requester +// must have permission to the kms:Encrypt, kms:Decrypt, kms:ReEncrypt*, kms:GenerateDataKey*, +// and kms:DescribeKey actions on the key. These permissions are required because +// Amazon S3 must decrypt and read data from the encrypted file parts before +// it completes the multipart upload. +// +// If your AWS Identity and Access Management (IAM) user or role is in the same +// AWS account as the AWS KMS CMK, then you must have these permissions on the +// key policy. If your IAM user or role belongs to a different account than +// the key, then you must have the permissions on both the key policy and your +// IAM user or role. +// +// For more information, see Protecting Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html). +// +// Access Permissions +// +// When copying an object, you can optionally specify the accounts or groups +// that should be granted specific permissions on the new object. There are +// two ways to grant the permissions using the request headers: +// +// * Specify a canned ACL with the x-amz-acl request header. For more information, +// see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// * Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, +// x-amz-grant-write-acp, and x-amz-grant-full-control headers. These parameters +// map to the set of permissions that Amazon S3 supports in an ACL. For more +// information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). +// +// You can use either a canned ACL or specify access permissions explicitly. +// You cannot do both. +// +// Server-Side- Encryption-Specific Request Headers +// +// You can optionally tell Amazon S3 to encrypt data at rest using server-side +// encryption. Server-side encryption is for data encryption at rest. Amazon +// S3 encrypts your data as it writes it to disks in its data centers and decrypts +// it when you access it. The option you use depends on whether you want to +// use AWS managed encryption keys or provide your own encryption key. +// +// * Use encryption keys managed by Amazon S3 or customer master keys (CMKs) +// stored in AWS Key Management Service (AWS KMS) – If you want AWS to +// manage the keys used to encrypt data, specify the following headers in +// the request. x-amz-server-side-encryption x-amz-server-side-encryption-aws-kms-key-id +// x-amz-server-side-encryption-context If you specify x-amz-server-side-encryption:aws:kms, +// but don't provide x-amz-server-side-encryption-aws-kms-key-id, Amazon +// S3 uses the AWS managed CMK in AWS KMS to protect the data. All GET and +// PUT requests for an object protected by AWS KMS fail if you don't make +// them with SSL or by using SigV4. For more information about server-side +// encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data +// Using Server-Side Encryption with CMKs stored in AWS KMS (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). +// +// * Use customer-provided encryption keys – If you want to manage your +// own encryption keys, provide all the following headers in the request. +// x-amz-server-side-encryption-customer-algorithm x-amz-server-side-encryption-customer-key +// x-amz-server-side-encryption-customer-key-MD5 For more information about +// server-side encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting +// Data Using Server-Side Encryption with CMKs stored in AWS KMS (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). +// +// Access-Control-List (ACL)-Specific Request Headers +// +// You also can use the following access control–related headers with this +// operation. By default, all objects are private. Only the owner has full access +// control. When adding a new object, you can grant permissions to individual +// AWS accounts or to predefined groups defined by Amazon S3. These permissions +// are then added to the access control list (ACL) on the object. For more information, +// see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). +// With this operation, you can grant access permissions using one of the following +// two methods: +// +// * Specify a canned ACL (x-amz-acl) — Amazon S3 supports a set of predefined +// ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees +// and permissions. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// * Specify access permissions explicitly — To explicitly grant access +// permissions to specific AWS accounts or groups, use the following headers. +// Each header maps to specific permissions that Amazon S3 supports in an +// ACL. For more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). +// In the header, you specify a list of grantees who get the specific permission. +// To grant permissions explicitly, use: x-amz-grant-read x-amz-grant-write +// x-amz-grant-read-acp x-amz-grant-write-acp x-amz-grant-full-control You +// specify each grantee as a type=value pair, where the type is one of the +// following: id – if the value specified is the canonical user ID of an +// AWS account uri – if you are granting permissions to a predefined group +// emailAddress – if the value specified is the email address of an AWS +// account Using email addresses to specify a grantee is only supported in +// the following AWS Regions: US East (N. Virginia) US West (N. California) +// US West (Oregon) Asia Pacific (Singapore) Asia Pacific (Sydney) Asia Pacific +// (Tokyo) Europe (Ireland) South America (São Paulo) For a list of all +// the Amazon S3 supported Regions and endpoints, see Regions and Endpoints +// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in +// the AWS General Reference. For example, the following x-amz-grant-read +// header grants the AWS accounts identified by account IDs permissions to +// read object data and its metadata: x-amz-grant-read: id="11112222333", +// id="444455556666" +// +// The following operations are related to CreateMultipartUpload: +// +// * UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// +// * CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// +// * AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// +// * ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// * ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation CreateMultipartUpload for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload +func (c *S3) CreateMultipartUpload(input *CreateMultipartUploadInput) (*CreateMultipartUploadOutput, error) { + req, out := c.CreateMultipartUploadRequest(input) + return out, req.Send() +} + +// CreateMultipartUploadWithContext is the same as CreateMultipartUpload with the addition of +// the ability to pass a context and additional request options. +// +// See CreateMultipartUpload for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) CreateMultipartUploadWithContext(ctx aws.Context, input *CreateMultipartUploadInput, opts ...request.Option) (*CreateMultipartUploadOutput, error) { + req, out := c.CreateMultipartUploadRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucket = "DeleteBucket" + +// DeleteBucketRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucket operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucket for more information on using the DeleteBucket +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketRequest method. +// req, resp := client.DeleteBucketRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket +func (c *S3) DeleteBucketRequest(input *DeleteBucketInput) (req *request.Request, output *DeleteBucketOutput) { + op := &request.Operation{ + Name: opDeleteBucket, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}", + } + + if input == nil { + input = &DeleteBucketInput{} + } + + output = &DeleteBucketOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucket API operation for Amazon Simple Storage Service. +// +// Deletes the S3 bucket. All objects (including all object versions and delete +// markers) in the bucket must be deleted before the bucket itself can be deleted. +// +// Related Resources +// +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// * DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucket for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket +func (c *S3) DeleteBucket(input *DeleteBucketInput) (*DeleteBucketOutput, error) { + req, out := c.DeleteBucketRequest(input) + return out, req.Send() +} + +// DeleteBucketWithContext is the same as DeleteBucket with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucket for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketWithContext(ctx aws.Context, input *DeleteBucketInput, opts ...request.Option) (*DeleteBucketOutput, error) { + req, out := c.DeleteBucketRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketCors = "DeleteBucketCors" + +// DeleteBucketCorsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketCors operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketCors for more information on using the DeleteBucketCors +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketCorsRequest method. +// req, resp := client.DeleteBucketCorsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors +func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request.Request, output *DeleteBucketCorsOutput) { + op := &request.Operation{ + Name: opDeleteBucketCors, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?cors", + } + + if input == nil { + input = &DeleteBucketCorsInput{} + } + + output = &DeleteBucketCorsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketCors API operation for Amazon Simple Storage Service. +// +// Deletes the cors configuration information set for the bucket. +// +// To use this operation, you must have permission to perform the s3:PutBucketCORS +// action. The bucket owner has this permission by default and can grant this +// permission to others. +// +// For information about cors, see Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) +// in the Amazon S3 User Guide. +// +// Related Resources: +// +// * PutBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html) +// +// * RESTOPTIONSobject (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketCors for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors +func (c *S3) DeleteBucketCors(input *DeleteBucketCorsInput) (*DeleteBucketCorsOutput, error) { + req, out := c.DeleteBucketCorsRequest(input) + return out, req.Send() +} + +// DeleteBucketCorsWithContext is the same as DeleteBucketCors with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketCors for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketCorsWithContext(ctx aws.Context, input *DeleteBucketCorsInput, opts ...request.Option) (*DeleteBucketCorsOutput, error) { + req, out := c.DeleteBucketCorsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketLifecycle = "DeleteBucketLifecycle" + +// DeleteBucketLifecycleRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketLifecycle operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketLifecycle for more information on using the DeleteBucketLifecycle +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketLifecycleRequest method. +// req, resp := client.DeleteBucketLifecycleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle +func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (req *request.Request, output *DeleteBucketLifecycleOutput) { + op := &request.Operation{ + Name: opDeleteBucketLifecycle, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &DeleteBucketLifecycleInput{} + } + + output = &DeleteBucketLifecycleOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketLifecycle API operation for Amazon Simple Storage Service. +// +// Deletes the lifecycle configuration from the specified bucket. Amazon S3 +// removes all the lifecycle configuration rules in the lifecycle subresource +// associated with the bucket. Your objects never expire, and Amazon S3 no longer +// automatically deletes any objects on the basis of rules contained in the +// deleted lifecycle configuration. +// +// To use this operation, you must have permission to perform the s3:PutLifecycleConfiguration +// action. By default, the bucket owner has this permission and the bucket owner +// can grant this permission to others. +// +// There is usually some time lag before lifecycle configuration deletion is +// fully propagated to all the Amazon S3 systems. +// +// For more information about the object expiration, see Elements to Describe +// Lifecycle Actions (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#intro-lifecycle-rules-actions). +// +// Related actions include: +// +// * PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) +// +// * GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketLifecycle for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle +func (c *S3) DeleteBucketLifecycle(input *DeleteBucketLifecycleInput) (*DeleteBucketLifecycleOutput, error) { + req, out := c.DeleteBucketLifecycleRequest(input) + return out, req.Send() +} + +// DeleteBucketLifecycleWithContext is the same as DeleteBucketLifecycle with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketLifecycle for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketLifecycleWithContext(ctx aws.Context, input *DeleteBucketLifecycleInput, opts ...request.Option) (*DeleteBucketLifecycleOutput, error) { + req, out := c.DeleteBucketLifecycleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketWebsite = "DeleteBucketWebsite" + +// DeleteBucketWebsiteRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketWebsite operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketWebsite for more information on using the DeleteBucketWebsite +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketWebsiteRequest method. +// req, resp := client.DeleteBucketWebsiteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite +func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *request.Request, output *DeleteBucketWebsiteOutput) { + op := &request.Operation{ + Name: opDeleteBucketWebsite, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?website", + } + + if input == nil { + input = &DeleteBucketWebsiteInput{} + } + + output = &DeleteBucketWebsiteOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketWebsite API operation for Amazon Simple Storage Service. +// +// This action removes the website configuration for a bucket. Amazon S3 returns +// a 200 OK response upon successfully deleting a website configuration on the +// specified bucket. You will get a 200 OK response if the website configuration +// you are trying to delete does not exist on the bucket. Amazon S3 returns +// a 404 response if the bucket specified in the request does not exist. +// +// This DELETE action requires the S3:DeleteBucketWebsite permission. By default, +// only the bucket owner can delete the website configuration attached to a +// bucket. However, bucket owners can grant other users permission to delete +// the website configuration by writing a bucket policy granting them the S3:DeleteBucketWebsite +// permission. +// +// For more information about hosting websites, see Hosting Websites on Amazon +// S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html). +// +// The following operations are related to DeleteBucketWebsite: +// +// * GetBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketWebsite.html) +// +// * PutBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketWebsite for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite +func (c *S3) DeleteBucketWebsite(input *DeleteBucketWebsiteInput) (*DeleteBucketWebsiteOutput, error) { + req, out := c.DeleteBucketWebsiteRequest(input) + return out, req.Send() +} + +// DeleteBucketWebsiteWithContext is the same as DeleteBucketWebsite with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketWebsite for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketWebsiteWithContext(ctx aws.Context, input *DeleteBucketWebsiteInput, opts ...request.Option) (*DeleteBucketWebsiteOutput, error) { + req, out := c.DeleteBucketWebsiteRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteLegalHold = "DeleteLegalHold" + +// DeleteLegalHoldRequest generates a "aws/request.Request" representing the +// client's request for the DeleteLegalHold operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteLegalHold for more information on using the DeleteLegalHold +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteLegalHoldRequest method. +// req, resp := client.DeleteLegalHoldRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteLegalHold +func (c *S3) DeleteLegalHoldRequest(input *DeleteLegalHoldInput) (req *request.Request, output *DeleteLegalHoldOutput) { + op := &request.Operation{ + Name: opDeleteLegalHold, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}?legalHold", + } + + if input == nil { + input = &DeleteLegalHoldInput{} + } + + output = &DeleteLegalHoldOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteLegalHold API operation for Amazon Simple Storage Service. +// +// Remove Legal hold on an object. The legal hold identifiers are stored in +// the object metadata along with the timestamp of when they are POSTed to the +// object. The presence of any legal hold identifiers prevents the modification +// or deletion of the object data, even if the retention period has expired. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteLegalHold for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteLegalHold +func (c *S3) DeleteLegalHold(input *DeleteLegalHoldInput) (*DeleteLegalHoldOutput, error) { + req, out := c.DeleteLegalHoldRequest(input) + return out, req.Send() +} + +// DeleteLegalHoldWithContext is the same as DeleteLegalHold with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteLegalHold for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteLegalHoldWithContext(ctx aws.Context, input *DeleteLegalHoldInput, opts ...request.Option) (*DeleteLegalHoldOutput, error) { + req, out := c.DeleteLegalHoldRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteObject = "DeleteObject" + +// DeleteObjectRequest generates a "aws/request.Request" representing the +// client's request for the DeleteObject operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteObject for more information on using the DeleteObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteObjectRequest method. +// req, resp := client.DeleteObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject +func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request, output *DeleteObjectOutput) { + op := &request.Operation{ + Name: opDeleteObject, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &DeleteObjectInput{} + } + + output = &DeleteObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteObject API operation for Amazon Simple Storage Service. +// +// Removes the null version (if there is one) of an object and inserts a delete +// marker, which becomes the latest version of the object. If there isn't a +// null version, Amazon S3 does not remove any objects but will still respond +// that the command was successful. +// +// To remove a specific version, you must be the bucket owner and you must use +// the version Id subresource. Using this subresource permanently deletes the +// version. If the object deleted is a delete marker, Amazon S3 sets the response +// header, x-amz-delete-marker, to true. +// +// If the object you want to delete is in a bucket where the bucket versioning +// configuration is MFA Delete enabled, you must include the x-amz-mfa request +// header in the DELETE versionId request. Requests that include x-amz-mfa must +// use HTTPS. +// +// For more information about MFA Delete, see Using MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html). +// To see sample requests that use versioning, see Sample Request (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html#ExampleVersionObjectDelete). +// +// You can delete objects by explicitly calling DELETE Object or configure its +// lifecycle (PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html)) +// to enable Amazon S3 to remove them for you. If you want to block users or +// accounts from removing or deleting objects from your bucket, you must deny +// them the s3:DeleteObject, s3:DeleteObjectVersion, and s3:PutLifeCycleConfiguration +// actions. +// +// The following action is related to DeleteObject: +// +// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteObject for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject +func (c *S3) DeleteObject(input *DeleteObjectInput) (*DeleteObjectOutput, error) { + req, out := c.DeleteObjectRequest(input) + return out, req.Send() +} + +// DeleteObjectWithContext is the same as DeleteObject with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteObjectWithContext(ctx aws.Context, input *DeleteObjectInput, opts ...request.Option) (*DeleteObjectOutput, error) { + req, out := c.DeleteObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteObjectTagging = "DeleteObjectTagging" + +// DeleteObjectTaggingRequest generates a "aws/request.Request" representing the +// client's request for the DeleteObjectTagging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteObjectTagging for more information on using the DeleteObjectTagging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteObjectTaggingRequest method. +// req, resp := client.DeleteObjectTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging +func (c *S3) DeleteObjectTaggingRequest(input *DeleteObjectTaggingInput) (req *request.Request, output *DeleteObjectTaggingOutput) { + op := &request.Operation{ + Name: opDeleteObjectTagging, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}/{Key+}?tagging", + } + + if input == nil { + input = &DeleteObjectTaggingInput{} + } + + output = &DeleteObjectTaggingOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteObjectTagging API operation for Amazon Simple Storage Service. +// +// Removes the entire tag set from the specified object. For more information +// about managing object tags, see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). +// +// To use this operation, you must have permission to perform the s3:DeleteObjectTagging +// action. +// +// To delete tags of a specific object version, add the versionId query parameter +// in the request. You will need permission for the s3:DeleteObjectVersionTagging +// action. +// +// The following operations are related to DeleteBucketMetricsConfiguration: +// +// * PutObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html) +// +// * GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteObjectTagging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging +func (c *S3) DeleteObjectTagging(input *DeleteObjectTaggingInput) (*DeleteObjectTaggingOutput, error) { + req, out := c.DeleteObjectTaggingRequest(input) + return out, req.Send() +} + +// DeleteObjectTaggingWithContext is the same as DeleteObjectTagging with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteObjectTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteObjectTaggingWithContext(ctx aws.Context, input *DeleteObjectTaggingInput, opts ...request.Option) (*DeleteObjectTaggingOutput, error) { + req, out := c.DeleteObjectTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteObjects = "DeleteObjects" + +// DeleteObjectsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteObjects operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteObjects for more information on using the DeleteObjects +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteObjectsRequest method. +// req, resp := client.DeleteObjectsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects +func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *request.Request, output *DeleteObjectsOutput) { + op := &request.Operation{ + Name: opDeleteObjects, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}?delete", + } + + if input == nil { + input = &DeleteObjectsInput{} + } + + output = &DeleteObjectsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// DeleteObjects API operation for Amazon Simple Storage Service. +// +// This action enables you to delete multiple objects from a bucket using a +// single HTTP request. If you know the object keys that you want to delete, +// then this action provides a suitable alternative to sending individual delete +// requests, reducing per-request overhead. +// +// The request contains a list of up to 1000 keys that you want to delete. In +// the XML, you provide the object key names, and optionally, version IDs if +// you want to delete a specific version of the object from a versioning-enabled +// bucket. For each key, Amazon S3 performs a delete action and returns the +// result of that delete, success, or failure, in the response. Note that if +// the object specified in the request is not found, Amazon S3 returns the result +// as deleted. +// +// The action supports two modes for the response: verbose and quiet. By default, +// the action uses verbose mode in which the response includes the result of +// deletion of each key in your request. In quiet mode the response includes +// only keys where the delete action encountered an error. For a successful +// deletion, the action does not return any information about the delete in +// the response body. +// +// When performing this action on an MFA Delete enabled bucket, that attempts +// to delete any versioned objects, you must include an MFA token. If you do +// not provide one, the entire request will fail, even if there are non-versioned +// objects you are trying to delete. If you provide an invalid token, whether +// there are versioned keys in the request or not, the entire Multi-Object Delete +// request will fail. For information about MFA Delete, see MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete). +// +// Finally, the Content-MD5 header is required for all Multi-Object Delete requests. +// Amazon S3 uses the header value to ensure that your request body has not +// been altered in transit. +// +// The following operations are related to DeleteObjects: +// +// * CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// +// * UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// +// * CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// +// * ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// * AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteObjects for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects +func (c *S3) DeleteObjects(input *DeleteObjectsInput) (*DeleteObjectsOutput, error) { + req, out := c.DeleteObjectsRequest(input) + return out, req.Send() +} + +// DeleteObjectsWithContext is the same as DeleteObjects with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteObjects for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteObjectsWithContext(ctx aws.Context, input *DeleteObjectsInput, opts ...request.Option) (*DeleteObjectsOutput, error) { + req, out := c.DeleteObjectsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeletePublicAccessBlock = "DeletePublicAccessBlock" + +// DeletePublicAccessBlockRequest generates a "aws/request.Request" representing the +// client's request for the DeletePublicAccessBlock operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeletePublicAccessBlock for more information on using the DeletePublicAccessBlock +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeletePublicAccessBlockRequest method. +// req, resp := client.DeletePublicAccessBlockRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeletePublicAccessBlock +func (c *S3) DeletePublicAccessBlockRequest(input *DeletePublicAccessBlockInput) (req *request.Request, output *DeletePublicAccessBlockOutput) { + op := &request.Operation{ + Name: opDeletePublicAccessBlock, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?publicAccessBlock", + } + + if input == nil { + input = &DeletePublicAccessBlockInput{} + } + + output = &DeletePublicAccessBlockOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeletePublicAccessBlock API operation for Amazon Simple Storage Service. +// +// Removes the PublicAccessBlock configuration for an Amazon S3 bucket. To use +// this operation, you must have the s3:PutBucketPublicAccessBlock permission. +// For more information about permissions, see Permissions Related to Bucket +// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// +// The following operations are related to DeletePublicAccessBlock: +// +// * Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) +// +// * GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) +// +// * PutPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html) +// +// * GetBucketPolicyStatus (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeletePublicAccessBlock for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeletePublicAccessBlock +func (c *S3) DeletePublicAccessBlock(input *DeletePublicAccessBlockInput) (*DeletePublicAccessBlockOutput, error) { + req, out := c.DeletePublicAccessBlockRequest(input) + return out, req.Send() +} + +// DeletePublicAccessBlockWithContext is the same as DeletePublicAccessBlock with the addition of +// the ability to pass a context and additional request options. +// +// See DeletePublicAccessBlock for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeletePublicAccessBlockWithContext(ctx aws.Context, input *DeletePublicAccessBlockInput, opts ...request.Option) (*DeletePublicAccessBlockOutput, error) { + req, out := c.DeletePublicAccessBlockRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opExtendObjectRetention = "ExtendObjectRetention" + +// ExtendObjectRetentionRequest generates a "aws/request.Request" representing the +// client's request for the ExtendObjectRetention operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ExtendObjectRetention for more information on using the ExtendObjectRetention +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ExtendObjectRetentionRequest method. +// req, resp := client.ExtendObjectRetentionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ExtendObjectRetention +func (c *S3) ExtendObjectRetentionRequest(input *ExtendObjectRetentionInput) (req *request.Request, output *ExtendObjectRetentionOutput) { + op := &request.Operation{ + Name: opExtendObjectRetention, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}?extendRetention", + } + + if input == nil { + input = &ExtendObjectRetentionInput{} + } + + output = &ExtendObjectRetentionOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// ExtendObjectRetention API operation for Amazon Simple Storage Service. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ExtendObjectRetention for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ExtendObjectRetention +func (c *S3) ExtendObjectRetention(input *ExtendObjectRetentionInput) (*ExtendObjectRetentionOutput, error) { + req, out := c.ExtendObjectRetentionRequest(input) + return out, req.Send() +} + +// ExtendObjectRetentionWithContext is the same as ExtendObjectRetention with the addition of +// the ability to pass a context and additional request options. +// +// See ExtendObjectRetention for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ExtendObjectRetentionWithContext(ctx aws.Context, input *ExtendObjectRetentionInput, opts ...request.Option) (*ExtendObjectRetentionOutput, error) { + req, out := c.ExtendObjectRetentionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketAcl = "GetBucketAcl" + +// GetBucketAclRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketAcl operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketAcl for more information on using the GetBucketAcl +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketAclRequest method. +// req, resp := client.GetBucketAclRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl +func (c *S3) GetBucketAclRequest(input *GetBucketAclInput) (req *request.Request, output *GetBucketAclOutput) { + op := &request.Operation{ + Name: opGetBucketAcl, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?acl", + } + + if input == nil { + input = &GetBucketAclInput{} + } + + output = &GetBucketAclOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketAcl API operation for Amazon Simple Storage Service. +// +// This implementation of the GET action uses the acl subresource to return +// the access control list (ACL) of a bucket. To use GET to return the ACL of +// the bucket, you must have READ_ACP access to the bucket. If READ_ACP permission +// is granted to the anonymous user, you can return the ACL of the bucket without +// using an authorization header. +// +// Related Resources +// +// * ListObjects (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketAcl for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl +func (c *S3) GetBucketAcl(input *GetBucketAclInput) (*GetBucketAclOutput, error) { + req, out := c.GetBucketAclRequest(input) + return out, req.Send() +} + +// GetBucketAclWithContext is the same as GetBucketAcl with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketAcl for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketAclWithContext(ctx aws.Context, input *GetBucketAclInput, opts ...request.Option) (*GetBucketAclOutput, error) { + req, out := c.GetBucketAclRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketCors = "GetBucketCors" + +// GetBucketCorsRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketCors operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketCors for more information on using the GetBucketCors +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketCorsRequest method. +// req, resp := client.GetBucketCorsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors +func (c *S3) GetBucketCorsRequest(input *GetBucketCorsInput) (req *request.Request, output *GetBucketCorsOutput) { + op := &request.Operation{ + Name: opGetBucketCors, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?cors", + } + + if input == nil { + input = &GetBucketCorsInput{} + } + + output = &GetBucketCorsOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketCors API operation for Amazon Simple Storage Service. +// +// Returns the cors configuration information set for the bucket. +// +// To use this operation, you must have permission to perform the s3:GetBucketCORS +// action. By default, the bucket owner has this permission and can grant it +// to others. +// +// For more information about cors, see Enabling Cross-Origin Resource Sharing +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html). +// +// The following operations are related to GetBucketCors: +// +// * PutBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html) +// +// * DeleteBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketCors for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors +func (c *S3) GetBucketCors(input *GetBucketCorsInput) (*GetBucketCorsOutput, error) { + req, out := c.GetBucketCorsRequest(input) + return out, req.Send() +} + +// GetBucketCorsWithContext is the same as GetBucketCors with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketCors for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketCorsWithContext(ctx aws.Context, input *GetBucketCorsInput, opts ...request.Option) (*GetBucketCorsOutput, error) { + req, out := c.GetBucketCorsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketLifecycleConfiguration = "GetBucketLifecycleConfiguration" + +// GetBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketLifecycleConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketLifecycleConfiguration for more information on using the GetBucketLifecycleConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketLifecycleConfigurationRequest method. +// req, resp := client.GetBucketLifecycleConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration +func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleConfigurationInput) (req *request.Request, output *GetBucketLifecycleConfigurationOutput) { + op := &request.Operation{ + Name: opGetBucketLifecycleConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &GetBucketLifecycleConfigurationInput{} + } + + output = &GetBucketLifecycleConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketLifecycleConfiguration API operation for Amazon Simple Storage Service. +// +// +// Bucket lifecycle configuration now supports specifying a lifecycle rule using +// an object key name prefix, one or more object tags, or a combination of both. +// Accordingly, this section describes the latest API. The response describes +// the new filter element that you can use to specify a filter to select a subset +// of objects to which the rule applies. If you are using a previous version +// of the lifecycle configuration, it still works. For the earlier action, see +// GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html). +// +// Returns the lifecycle configuration information set on the bucket. For information +// about lifecycle configuration, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html). +// +// To use this operation, you must have permission to perform the s3:GetLifecycleConfiguration +// action. The bucket owner has this permission, by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// +// GetBucketLifecycleConfiguration has the following special error: +// +// * Error code: NoSuchLifecycleConfiguration Description: The lifecycle +// configuration does not exist. HTTP Status Code: 404 Not Found SOAP Fault +// Code Prefix: Client +// +// The following operations are related to GetBucketLifecycleConfiguration: +// +// * GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html) +// +// * PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html) +// +// * DeleteBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketLifecycleConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration +func (c *S3) GetBucketLifecycleConfiguration(input *GetBucketLifecycleConfigurationInput) (*GetBucketLifecycleConfigurationOutput, error) { + req, out := c.GetBucketLifecycleConfigurationRequest(input) + return out, req.Send() +} + +// GetBucketLifecycleConfigurationWithContext is the same as GetBucketLifecycleConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketLifecycleConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketLifecycleConfigurationWithContext(ctx aws.Context, input *GetBucketLifecycleConfigurationInput, opts ...request.Option) (*GetBucketLifecycleConfigurationOutput, error) { + req, out := c.GetBucketLifecycleConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketLocation = "GetBucketLocation" + +// GetBucketLocationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketLocation operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketLocation for more information on using the GetBucketLocation +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketLocationRequest method. +// req, resp := client.GetBucketLocationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation +func (c *S3) GetBucketLocationRequest(input *GetBucketLocationInput) (req *request.Request, output *GetBucketLocationOutput) { + op := &request.Operation{ + Name: opGetBucketLocation, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?location", + } + + if input == nil { + input = &GetBucketLocationInput{} + } + + output = &GetBucketLocationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketLocation API operation for Amazon Simple Storage Service. +// +// Returns the Region the bucket resides in. You set the bucket's Region using +// the LocationConstraint request parameter in a CreateBucket request. For more +// information, see CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html). +// +// To use this implementation of the operation, you must be the bucket owner. +// +// The following operations are related to GetBucketLocation: +// +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketLocation for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation +func (c *S3) GetBucketLocation(input *GetBucketLocationInput) (*GetBucketLocationOutput, error) { + req, out := c.GetBucketLocationRequest(input) + return out, req.Send() +} + +// GetBucketLocationWithContext is the same as GetBucketLocation with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketLocation for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketLocationWithContext(ctx aws.Context, input *GetBucketLocationInput, opts ...request.Option) (*GetBucketLocationOutput, error) { + req, out := c.GetBucketLocationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketLogging = "GetBucketLogging" + +// GetBucketLoggingRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketLogging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketLogging for more information on using the GetBucketLogging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketLoggingRequest method. +// req, resp := client.GetBucketLoggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging +func (c *S3) GetBucketLoggingRequest(input *GetBucketLoggingInput) (req *request.Request, output *GetBucketLoggingOutput) { + op := &request.Operation{ + Name: opGetBucketLogging, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?logging", + } + + if input == nil { + input = &GetBucketLoggingInput{} + } + + output = &GetBucketLoggingOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketLogging API operation for Amazon Simple Storage Service. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketLogging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging +func (c *S3) GetBucketLogging(input *GetBucketLoggingInput) (*GetBucketLoggingOutput, error) { + req, out := c.GetBucketLoggingRequest(input) + return out, req.Send() +} + +// GetBucketLoggingWithContext is the same as GetBucketLogging with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketLogging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketLoggingWithContext(ctx aws.Context, input *GetBucketLoggingInput, opts ...request.Option) (*GetBucketLoggingOutput, error) { + req, out := c.GetBucketLoggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketProtectionConfiguration = "GetBucketProtectionConfiguration" + +// GetBucketProtectionConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketProtectionConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketProtectionConfiguration for more information on using the GetBucketProtectionConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketProtectionConfigurationRequest method. +// req, resp := client.GetBucketProtectionConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketProtectionConfiguration +func (c *S3) GetBucketProtectionConfigurationRequest(input *GetBucketProtectionConfigurationInput) (req *request.Request, output *GetBucketProtectionConfigurationOutput) { + op := &request.Operation{ + Name: opGetBucketProtectionConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?protection", + } + + if input == nil { + input = &GetBucketProtectionConfigurationInput{} + } + + output = &GetBucketProtectionConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketProtectionConfiguration API operation for Amazon Simple Storage Service. +// +// Returns the protection configuration of a bucket.EnablePermanentRetention +// flag will only be returned if the flag is set to true for a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketProtectionConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketProtectionConfiguration +func (c *S3) GetBucketProtectionConfiguration(input *GetBucketProtectionConfigurationInput) (*GetBucketProtectionConfigurationOutput, error) { + req, out := c.GetBucketProtectionConfigurationRequest(input) + return out, req.Send() +} + +// GetBucketProtectionConfigurationWithContext is the same as GetBucketProtectionConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketProtectionConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketProtectionConfigurationWithContext(ctx aws.Context, input *GetBucketProtectionConfigurationInput, opts ...request.Option) (*GetBucketProtectionConfigurationOutput, error) { + req, out := c.GetBucketProtectionConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketVersioning = "GetBucketVersioning" + +// GetBucketVersioningRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketVersioning operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketVersioning for more information on using the GetBucketVersioning +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketVersioningRequest method. +// req, resp := client.GetBucketVersioningRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning +func (c *S3) GetBucketVersioningRequest(input *GetBucketVersioningInput) (req *request.Request, output *GetBucketVersioningOutput) { + op := &request.Operation{ + Name: opGetBucketVersioning, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?versioning", + } + + if input == nil { + input = &GetBucketVersioningInput{} + } + + output = &GetBucketVersioningOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketVersioning API operation for Amazon Simple Storage Service. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketVersioning for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning +func (c *S3) GetBucketVersioning(input *GetBucketVersioningInput) (*GetBucketVersioningOutput, error) { + req, out := c.GetBucketVersioningRequest(input) + return out, req.Send() +} + +// GetBucketVersioningWithContext is the same as GetBucketVersioning with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketVersioning for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketVersioningWithContext(ctx aws.Context, input *GetBucketVersioningInput, opts ...request.Option) (*GetBucketVersioningOutput, error) { + req, out := c.GetBucketVersioningRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketWebsite = "GetBucketWebsite" + +// GetBucketWebsiteRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketWebsite operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketWebsite for more information on using the GetBucketWebsite +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketWebsiteRequest method. +// req, resp := client.GetBucketWebsiteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite +func (c *S3) GetBucketWebsiteRequest(input *GetBucketWebsiteInput) (req *request.Request, output *GetBucketWebsiteOutput) { + op := &request.Operation{ + Name: opGetBucketWebsite, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?website", + } + + if input == nil { + input = &GetBucketWebsiteInput{} + } + + output = &GetBucketWebsiteOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketWebsite API operation for Amazon Simple Storage Service. +// +// Returns the website configuration for a bucket. To host website on Amazon +// S3, you can configure a bucket as website by adding a website configuration. +// For more information about hosting websites, see Hosting Websites on Amazon +// S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html). +// +// This GET action requires the S3:GetBucketWebsite permission. By default, +// only the bucket owner can read the bucket website configuration. However, +// bucket owners can allow other users to read the website configuration by +// writing a bucket policy granting them the S3:GetBucketWebsite permission. +// +// The following operations are related to DeleteBucketWebsite: +// +// * DeleteBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketWebsite.html) +// +// * PutBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketWebsite for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite +func (c *S3) GetBucketWebsite(input *GetBucketWebsiteInput) (*GetBucketWebsiteOutput, error) { + req, out := c.GetBucketWebsiteRequest(input) + return out, req.Send() +} + +// GetBucketWebsiteWithContext is the same as GetBucketWebsite with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketWebsite for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketWebsiteWithContext(ctx aws.Context, input *GetBucketWebsiteInput, opts ...request.Option) (*GetBucketWebsiteOutput, error) { + req, out := c.GetBucketWebsiteRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetObject = "GetObject" + +// GetObjectRequest generates a "aws/request.Request" representing the +// client's request for the GetObject operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetObject for more information on using the GetObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetObjectRequest method. +// req, resp := client.GetObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject +func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, output *GetObjectOutput) { + op := &request.Operation{ + Name: opGetObject, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &GetObjectInput{} + } + + output = &GetObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObject API operation for Amazon Simple Storage Service. +// +// Retrieves objects from Amazon S3. To use GET, you must have READ access to +// the object. If you grant READ access to the anonymous user, you can return +// the object without using an authorization header. +// +// An Amazon S3 bucket has no directory hierarchy such as you would find in +// a typical computer file system. You can, however, create a logical hierarchy +// by using object key names that imply a folder structure. For example, instead +// of naming an object sample.jpg, you can name it photos/2006/February/sample.jpg. +// +// To get an object from such a logical hierarchy, specify the full key name +// for the object in the GET operation. For a virtual hosted-style request example, +// if you have the object photos/2006/February/sample.jpg, specify the resource +// as /photos/2006/February/sample.jpg. For a path-style request example, if +// you have the object photos/2006/February/sample.jpg in the bucket named examplebucket, +// specify the resource as /examplebucket/photos/2006/February/sample.jpg. For +// more information about request types, see HTTP Host Header Bucket Specification +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#VirtualHostingSpecifyBucket). +// +// To distribute large files to many people, you can save bandwidth costs by +// using BitTorrent. For more information, see Amazon S3 Torrent (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3Torrent.html). +// For more information about returning the ACL of an object, see GetObjectAcl +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html). +// +// If the object you are retrieving is stored in the S3 Glacier or S3 Glacier +// Deep Archive storage class, or S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering +// Deep Archive tiers, before you can retrieve the object you must first restore +// a copy using RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html). +// Otherwise, this action returns an InvalidObjectStateError error. For information +// about restoring archived objects, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html). +// +// Encryption request headers, like x-amz-server-side-encryption, should not +// be sent for GET requests if your object uses server-side encryption with +// CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with Amazon S3–managed +// encryption keys (SSE-S3). If your object does use these types of keys, you’ll +// get an HTTP 400 BadRequest error. +// +// If you encrypt an object by using server-side encryption with customer-provided +// encryption keys (SSE-C) when you store the object in Amazon S3, then when +// you GET the object, you must use the following headers: +// +// * x-amz-server-side-encryption-customer-algorithm +// +// * x-amz-server-side-encryption-customer-key +// +// * x-amz-server-side-encryption-customer-key-MD5 +// +// For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided +// Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). +// +// Assuming you have permission to read object tags (permission for the s3:GetObjectVersionTagging +// action), the response also returns the x-amz-tagging-count header that provides +// the count of number of tags associated with the object. You can use GetObjectTagging +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) +// to retrieve the tag set associated with an object. +// +// Permissions +// +// You need the s3:GetObject permission for this operation. For more information, +// see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// If the object you request does not exist, the error Amazon S3 returns depends +// on whether you also have the s3:ListBucket permission. +// +// * If you have the s3:ListBucket permission on the bucket, Amazon S3 will +// return an HTTP status code 404 ("no such key") error. +// +// * If you don’t have the s3:ListBucket permission, Amazon S3 will return +// an HTTP status code 403 ("access denied") error. +// +// Versioning +// +// By default, the GET action returns the current version of an object. To return +// a different version, use the versionId subresource. +// +// If the current version of the object is a delete marker, Amazon S3 behaves +// as if the object was deleted and includes x-amz-delete-marker: true in the +// response. +// +// For more information about versioning, see PutBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketVersioning.html). +// +// Overriding Response Header Values +// +// There are times when you want to override certain response header values +// in a GET response. For example, you might override the Content-Disposition +// response header value in your GET request. +// +// You can override values for a set of response headers using the following +// query parameters. These response header values are sent only on a successful +// request, that is, when status code 200 OK is returned. The set of headers +// you can override using these parameters is a subset of the headers that Amazon +// S3 accepts when you create an object. The response headers that you can override +// for the GET response are Content-Type, Content-Language, Expires, Cache-Control, +// Content-Disposition, and Content-Encoding. To override these header values +// in the GET response, you use the following request parameters. +// +// You must sign the request, either using an Authorization header or a presigned +// URL, when using these parameters. They cannot be used with an unsigned (anonymous) +// request. +// +// * response-content-type +// +// * response-content-language +// +// * response-expires +// +// * response-cache-control +// +// * response-content-disposition +// +// * response-content-encoding +// +// Additional Considerations about Request Headers +// +// If both of the If-Match and If-Unmodified-Since headers are present in the +// request as follows: If-Match condition evaluates to true, and; If-Unmodified-Since +// condition evaluates to false; then, S3 returns 200 OK and the data requested. +// +// If both of the If-None-Match and If-Modified-Since headers are present in +// the request as follows:If-None-Match condition evaluates to false, and; If-Modified-Since +// condition evaluates to true; then, S3 returns 304 Not Modified response code. +// +// For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232). +// +// The following operations are related to GetObject: +// +// * ListBuckets (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html) +// +// * GetObjectAcl (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObject for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchKey "NoSuchKey" +// The specified key does not exist. +// +// * ErrCodeInvalidObjectState "InvalidObjectState" +// Object is archived and inaccessible until restored. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject +func (c *S3) GetObject(input *GetObjectInput) (*GetObjectOutput, error) { + req, out := c.GetObjectRequest(input) + return out, req.Send() +} + +// GetObjectWithContext is the same as GetObject with the addition of +// the ability to pass a context and additional request options. +// +// See GetObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectWithContext(ctx aws.Context, input *GetObjectInput, opts ...request.Option) (*GetObjectOutput, error) { + req, out := c.GetObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetObjectAcl = "GetObjectAcl" + +// GetObjectAclRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectAcl operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetObjectAcl for more information on using the GetObjectAcl +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetObjectAclRequest method. +// req, resp := client.GetObjectAclRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl +func (c *S3) GetObjectAclRequest(input *GetObjectAclInput) (req *request.Request, output *GetObjectAclOutput) { + op := &request.Operation{ + Name: opGetObjectAcl, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?acl", + } + + if input == nil { + input = &GetObjectAclInput{} + } + + output = &GetObjectAclOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObjectAcl API operation for Amazon Simple Storage Service. +// +// Returns the access control list (ACL) of an object. To use this operation, +// you must have READ_ACP access to the object. +// +// This action is not supported by Amazon S3 on Outposts. +// +// Versioning +// +// By default, GET returns ACL information about the current version of an object. +// To return ACL information about a different version, use the versionId subresource. +// +// The following operations are related to GetObjectAcl: +// +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// * DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +// +// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObjectAcl for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchKey "NoSuchKey" +// The specified key does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl +func (c *S3) GetObjectAcl(input *GetObjectAclInput) (*GetObjectAclOutput, error) { + req, out := c.GetObjectAclRequest(input) + return out, req.Send() +} + +// GetObjectAclWithContext is the same as GetObjectAcl with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectAcl for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectAclWithContext(ctx aws.Context, input *GetObjectAclInput, opts ...request.Option) (*GetObjectAclOutput, error) { + req, out := c.GetObjectAclRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetObjectTagging = "GetObjectTagging" + +// GetObjectTaggingRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectTagging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetObjectTagging for more information on using the GetObjectTagging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetObjectTaggingRequest method. +// req, resp := client.GetObjectTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging +func (c *S3) GetObjectTaggingRequest(input *GetObjectTaggingInput) (req *request.Request, output *GetObjectTaggingOutput) { + op := &request.Operation{ + Name: opGetObjectTagging, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?tagging", + } + + if input == nil { + input = &GetObjectTaggingInput{} + } + + output = &GetObjectTaggingOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObjectTagging API operation for Amazon Simple Storage Service. +// +// Returns the tag-set of an object. You send the GET request against the tagging +// subresource associated with the object. +// +// To use this operation, you must have permission to perform the s3:GetObjectTagging +// action. By default, the GET action returns information about current version +// of an object. For a versioned bucket, you can have multiple versions of an +// object in your bucket. To retrieve tags of any other version, use the versionId +// query parameter. You also need permission for the s3:GetObjectVersionTagging +// action. +// +// By default, the bucket owner has this permission and can grant this permission +// to others. +// +// For information about the Amazon S3 object tagging feature, see Object Tagging +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). +// +// The following action is related to GetObjectTagging: +// +// * PutObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html) +// +// * DeleteObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObjectTagging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging +func (c *S3) GetObjectTagging(input *GetObjectTaggingInput) (*GetObjectTaggingOutput, error) { + req, out := c.GetObjectTaggingRequest(input) + return out, req.Send() +} + +// GetObjectTaggingWithContext is the same as GetObjectTagging with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectTaggingWithContext(ctx aws.Context, input *GetObjectTaggingInput, opts ...request.Option) (*GetObjectTaggingOutput, error) { + req, out := c.GetObjectTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetPublicAccessBlock = "GetPublicAccessBlock" + +// GetPublicAccessBlockRequest generates a "aws/request.Request" representing the +// client's request for the GetPublicAccessBlock operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetPublicAccessBlock for more information on using the GetPublicAccessBlock +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetPublicAccessBlockRequest method. +// req, resp := client.GetPublicAccessBlockRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetPublicAccessBlock +func (c *S3) GetPublicAccessBlockRequest(input *GetPublicAccessBlockInput) (req *request.Request, output *GetPublicAccessBlockOutput) { + op := &request.Operation{ + Name: opGetPublicAccessBlock, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?publicAccessBlock", + } + + if input == nil { + input = &GetPublicAccessBlockInput{} + } + + output = &GetPublicAccessBlockOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetPublicAccessBlock API operation for Amazon Simple Storage Service. +// +// Retrieves the PublicAccessBlock configuration for an Amazon S3 bucket. To +// use this operation, you must have the s3:GetBucketPublicAccessBlock permission. +// For more information about Amazon S3 permissions, see Specifying Permissions +// in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// +// When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket +// or an object, it checks the PublicAccessBlock configuration for both the +// bucket (or the bucket that contains the object) and the bucket owner's account. +// If the PublicAccessBlock settings are different between the bucket and the +// account, Amazon S3 uses the most restrictive combination of the bucket-level +// and account-level settings. +// +// For more information about when Amazon S3 considers a bucket or an object +// public, see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status). +// +// The following operations are related to GetPublicAccessBlock: +// +// * Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) +// +// * PutPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html) +// +// * GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) +// +// * DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetPublicAccessBlock for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetPublicAccessBlock +func (c *S3) GetPublicAccessBlock(input *GetPublicAccessBlockInput) (*GetPublicAccessBlockOutput, error) { + req, out := c.GetPublicAccessBlockRequest(input) + return out, req.Send() +} + +// GetPublicAccessBlockWithContext is the same as GetPublicAccessBlock with the addition of +// the ability to pass a context and additional request options. +// +// See GetPublicAccessBlock for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetPublicAccessBlockWithContext(ctx aws.Context, input *GetPublicAccessBlockInput, opts ...request.Option) (*GetPublicAccessBlockOutput, error) { + req, out := c.GetPublicAccessBlockRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opHeadBucket = "HeadBucket" + +// HeadBucketRequest generates a "aws/request.Request" representing the +// client's request for the HeadBucket operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See HeadBucket for more information on using the HeadBucket +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the HeadBucketRequest method. +// req, resp := client.HeadBucketRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket +func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, output *HeadBucketOutput) { + op := &request.Operation{ + Name: opHeadBucket, + HTTPMethod: "HEAD", + HTTPPath: "/{Bucket}", + } + + if input == nil { + input = &HeadBucketInput{} + } + + output = &HeadBucketOutput{} + req = c.newRequest(op, input, output) + return +} + +// HeadBucket API operation for Amazon Simple Storage Service. +// +// This action is useful to determine if a bucket exists and you have permission +// to access it. The action returns a 200 OK if the bucket exists and you have +// permission to access it. +// +// If the bucket does not exist or you do not have permission to access it, +// the HEAD request returns a generic 404 Not Found or 403 Forbidden code. A +// message body is not included, so you cannot determine the exception beyond +// these error codes. +// +// To use this operation, you must have permissions to perform the s3:ListBucket +// action. The bucket owner has this permission by default and can grant this +// permission to others. For more information about permissions, see Permissions +// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation HeadBucket for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchBucket "NoSuchBucket" +// The specified bucket does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket +func (c *S3) HeadBucket(input *HeadBucketInput) (*HeadBucketOutput, error) { + req, out := c.HeadBucketRequest(input) + return out, req.Send() +} + +// HeadBucketWithContext is the same as HeadBucket with the addition of +// the ability to pass a context and additional request options. +// +// See HeadBucket for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) HeadBucketWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.Option) (*HeadBucketOutput, error) { + req, out := c.HeadBucketRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opHeadObject = "HeadObject" + +// HeadObjectRequest generates a "aws/request.Request" representing the +// client's request for the HeadObject operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See HeadObject for more information on using the HeadObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the HeadObjectRequest method. +// req, resp := client.HeadObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject +func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, output *HeadObjectOutput) { + op := &request.Operation{ + Name: opHeadObject, + HTTPMethod: "HEAD", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &HeadObjectInput{} + } + + output = &HeadObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// HeadObject API operation for Amazon Simple Storage Service. +// +// The HEAD action retrieves metadata from an object without returning the object +// itself. This action is useful if you're only interested in an object's metadata. +// To use HEAD, you must have READ access to the object. +// +// A HEAD request has the same options as a GET action on an object. The response +// is identical to the GET response except that there is no response body. Because +// of this, if the HEAD request generates an error, it returns a generic 404 +// Not Found or 403 Forbidden code. It is not possible to retrieve the exact +// exception beyond these error codes. +// +// If you encrypt an object by using server-side encryption with customer-provided +// encryption keys (SSE-C) when you store the object in Amazon S3, then when +// you retrieve the metadata from the object, you must use the following headers: +// +// * x-amz-server-side-encryption-customer-algorithm +// +// * x-amz-server-side-encryption-customer-key +// +// * x-amz-server-side-encryption-customer-key-MD5 +// +// For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided +// Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). +// +// * Encryption request headers, like x-amz-server-side-encryption, should +// not be sent for GET requests if your object uses server-side encryption +// with CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with Amazon +// S3–managed encryption keys (SSE-S3). If your object does use these types +// of keys, you’ll get an HTTP 400 BadRequest error. +// +// * The last modified property in this case is the creation date of the +// object. +// +// Request headers are limited to 8 KB in size. For more information, see Common +// Request Headers (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonRequestHeaders.html). +// +// Consider the following when using request headers: +// +// * Consideration 1 – If both of the If-Match and If-Unmodified-Since +// headers are present in the request as follows: If-Match condition evaluates +// to true, and; If-Unmodified-Since condition evaluates to false; Then Amazon +// S3 returns 200 OK and the data requested. +// +// * Consideration 2 – If both of the If-None-Match and If-Modified-Since +// headers are present in the request as follows: If-None-Match condition +// evaluates to false, and; If-Modified-Since condition evaluates to true; +// Then Amazon S3 returns the 304 Not Modified response code. +// +// For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232). +// +// Permissions +// +// You need the s3:GetObject permission for this operation. For more information, +// see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// If the object you request does not exist, the error Amazon S3 returns depends +// on whether you also have the s3:ListBucket permission. +// +// * If you have the s3:ListBucket permission on the bucket, Amazon S3 returns +// an HTTP status code 404 ("no such key") error. +// +// * If you don’t have the s3:ListBucket permission, Amazon S3 returns +// an HTTP status code 403 ("access denied") error. +// +// The following action is related to HeadObject: +// +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// See http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#RESTErrorResponses +// for more information on returned errors. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation HeadObject for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject +func (c *S3) HeadObject(input *HeadObjectInput) (*HeadObjectOutput, error) { + req, out := c.HeadObjectRequest(input) + return out, req.Send() +} + +// HeadObjectWithContext is the same as HeadObject with the addition of +// the ability to pass a context and additional request options. +// +// See HeadObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) HeadObjectWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.Option) (*HeadObjectOutput, error) { + req, out := c.HeadObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListBuckets = "ListBuckets" + +// ListBucketsRequest generates a "aws/request.Request" representing the +// client's request for the ListBuckets operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListBuckets for more information on using the ListBuckets +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListBucketsRequest method. +// req, resp := client.ListBucketsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets +func (c *S3) ListBucketsRequest(input *ListBucketsInput) (req *request.Request, output *ListBucketsOutput) { + op := &request.Operation{ + Name: opListBuckets, + HTTPMethod: "GET", + HTTPPath: "/", + } + + if input == nil { + input = &ListBucketsInput{} + } + + output = &ListBucketsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListBuckets API operation for Amazon Simple Storage Service. +// +// Returns a list of all buckets owned by the authenticated sender of the request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListBuckets for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets +func (c *S3) ListBuckets(input *ListBucketsInput) (*ListBucketsOutput, error) { + req, out := c.ListBucketsRequest(input) + return out, req.Send() +} + +// ListBucketsWithContext is the same as ListBuckets with the addition of +// the ability to pass a context and additional request options. +// +// See ListBuckets for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListBucketsWithContext(ctx aws.Context, input *ListBucketsInput, opts ...request.Option) (*ListBucketsOutput, error) { + req, out := c.ListBucketsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListBucketsExtended = "ListBucketsExtended" + +// ListBucketsExtendedRequest generates a "aws/request.Request" representing the +// client's request for the ListBucketsExtended operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListBucketsExtended for more information on using the ListBucketsExtended +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListBucketsExtendedRequest method. +// req, resp := client.ListBucketsExtendedRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketsExtended +func (c *S3) ListBucketsExtendedRequest(input *ListBucketsExtendedInput) (req *request.Request, output *ListBucketsExtendedOutput) { + op := &request.Operation{ + Name: opListBucketsExtended, + HTTPMethod: "GET", + HTTPPath: "/?extended", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Buckets[-1].Name"}, + LimitToken: "MaxKeys", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListBucketsExtendedInput{} + } + + output = &ListBucketsExtendedOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListBucketsExtended API operation for Amazon Simple Storage Service. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListBucketsExtended for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketsExtended +func (c *S3) ListBucketsExtended(input *ListBucketsExtendedInput) (*ListBucketsExtendedOutput, error) { + req, out := c.ListBucketsExtendedRequest(input) + return out, req.Send() +} + +// ListBucketsExtendedWithContext is the same as ListBucketsExtended with the addition of +// the ability to pass a context and additional request options. +// +// See ListBucketsExtended for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListBucketsExtendedWithContext(ctx aws.Context, input *ListBucketsExtendedInput, opts ...request.Option) (*ListBucketsExtendedOutput, error) { + req, out := c.ListBucketsExtendedRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListBucketsExtendedPages iterates over the pages of a ListBucketsExtended operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListBucketsExtended method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListBucketsExtended operation. +// pageNum := 0 +// err := client.ListBucketsExtendedPages(params, +// func(page *s3.ListBucketsExtendedOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *S3) ListBucketsExtendedPages(input *ListBucketsExtendedInput, fn func(*ListBucketsExtendedOutput, bool) bool) error { + return c.ListBucketsExtendedPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListBucketsExtendedPagesWithContext same as ListBucketsExtendedPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListBucketsExtendedPagesWithContext(ctx aws.Context, input *ListBucketsExtendedInput, fn func(*ListBucketsExtendedOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListBucketsExtendedInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListBucketsExtendedRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListBucketsExtendedOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListLegalHolds = "ListLegalHolds" + +// ListLegalHoldsRequest generates a "aws/request.Request" representing the +// client's request for the ListLegalHolds operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListLegalHolds for more information on using the ListLegalHolds +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListLegalHoldsRequest method. +// req, resp := client.ListLegalHoldsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListLegalHolds +func (c *S3) ListLegalHoldsRequest(input *ListLegalHoldsInput) (req *request.Request, output *ListLegalHoldsOutput) { + op := &request.Operation{ + Name: opListLegalHolds, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?legalHold", + } + + if input == nil { + input = &ListLegalHoldsInput{} + } + + output = &ListLegalHoldsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListLegalHolds API operation for Amazon Simple Storage Service. +// +// Returns a list of legal holds on an object. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListLegalHolds for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListLegalHolds +func (c *S3) ListLegalHolds(input *ListLegalHoldsInput) (*ListLegalHoldsOutput, error) { + req, out := c.ListLegalHoldsRequest(input) + return out, req.Send() +} + +// ListLegalHoldsWithContext is the same as ListLegalHolds with the addition of +// the ability to pass a context and additional request options. +// +// See ListLegalHolds for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListLegalHoldsWithContext(ctx aws.Context, input *ListLegalHoldsInput, opts ...request.Option) (*ListLegalHoldsOutput, error) { + req, out := c.ListLegalHoldsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListMultipartUploads = "ListMultipartUploads" + +// ListMultipartUploadsRequest generates a "aws/request.Request" representing the +// client's request for the ListMultipartUploads operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListMultipartUploads for more information on using the ListMultipartUploads +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListMultipartUploadsRequest method. +// req, resp := client.ListMultipartUploadsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads +func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req *request.Request, output *ListMultipartUploadsOutput) { + op := &request.Operation{ + Name: opListMultipartUploads, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?uploads", + Paginator: &request.Paginator{ + InputTokens: []string{"KeyMarker", "UploadIdMarker"}, + OutputTokens: []string{"NextKeyMarker", "NextUploadIdMarker"}, + LimitToken: "MaxUploads", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListMultipartUploadsInput{} + } + + output = &ListMultipartUploadsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListMultipartUploads API operation for Amazon Simple Storage Service. +// +// This action lists in-progress multipart uploads. An in-progress multipart +// upload is a multipart upload that has been initiated using the Initiate Multipart +// Upload request, but has not yet been completed or aborted. +// +// This action returns at most 1,000 multipart uploads in the response. 1,000 +// multipart uploads is the maximum number of uploads a response can include, +// which is also the default value. You can further limit the number of uploads +// in a response by specifying the max-uploads parameter in the response. If +// additional multipart uploads satisfy the list criteria, the response will +// contain an IsTruncated element with the value true. To list the additional +// multipart uploads, use the key-marker and upload-id-marker request parameters. +// +// In the response, the uploads are sorted by key. If your application has initiated +// more than one multipart upload using the same object key, then uploads in +// the response are first sorted by key. Additionally, uploads are sorted in +// ascending order within each key by the upload initiation time. +// +// For more information on multipart uploads, see Uploading Objects Using Multipart +// Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). +// +// For information on permissions required to use the multipart upload API, +// see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// +// The following operations are related to ListMultipartUploads: +// +// * CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// +// * UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// +// * CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// +// * ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// * AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListMultipartUploads for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads +func (c *S3) ListMultipartUploads(input *ListMultipartUploadsInput) (*ListMultipartUploadsOutput, error) { + req, out := c.ListMultipartUploadsRequest(input) + return out, req.Send() +} + +// ListMultipartUploadsWithContext is the same as ListMultipartUploads with the addition of +// the ability to pass a context and additional request options. +// +// See ListMultipartUploads for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListMultipartUploadsWithContext(ctx aws.Context, input *ListMultipartUploadsInput, opts ...request.Option) (*ListMultipartUploadsOutput, error) { + req, out := c.ListMultipartUploadsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListMultipartUploadsPages iterates over the pages of a ListMultipartUploads operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListMultipartUploads method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListMultipartUploads operation. +// pageNum := 0 +// err := client.ListMultipartUploadsPages(params, +// func(page *s3.ListMultipartUploadsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *S3) ListMultipartUploadsPages(input *ListMultipartUploadsInput, fn func(*ListMultipartUploadsOutput, bool) bool) error { + return c.ListMultipartUploadsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListMultipartUploadsPagesWithContext same as ListMultipartUploadsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListMultipartUploadsPagesWithContext(ctx aws.Context, input *ListMultipartUploadsInput, fn func(*ListMultipartUploadsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListMultipartUploadsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListMultipartUploadsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListMultipartUploadsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListObjectVersions = "ListObjectVersions" + +// ListObjectVersionsRequest generates a "aws/request.Request" representing the +// client's request for the ListObjectVersions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListObjectVersions for more information on using the ListObjectVersions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListObjectVersionsRequest method. +// req, resp := client.ListObjectVersionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions +func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *request.Request, output *ListObjectVersionsOutput) { + op := &request.Operation{ + Name: opListObjectVersions, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?versions", + Paginator: &request.Paginator{ + InputTokens: []string{"KeyMarker", "VersionIdMarker"}, + OutputTokens: []string{"NextKeyMarker", "NextVersionIdMarker"}, + LimitToken: "MaxKeys", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListObjectVersionsInput{} + } + + output = &ListObjectVersionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListObjectVersions API operation for Amazon Simple Storage Service. +// +// Returns metadata about all versions of the objects in a bucket. You can also +// use request parameters as selection criteria to return metadata about a subset +// of all the object versions. +// +// To use this operation, you must have permissions to perform the s3:ListBucketVersions +// action. Be aware of the name difference. +// +// A 200 OK response can contain valid or invalid XML. Make sure to design your +// application to parse the contents of the response and handle it appropriately. +// +// To use this operation, you must have READ access to the bucket. +// +// This action is not supported by Amazon S3 on Outposts. +// +// The following operations are related to ListObjectVersions: +// +// * ListObjectsV2 (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html) +// +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// * DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListObjectVersions for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions +func (c *S3) ListObjectVersions(input *ListObjectVersionsInput) (*ListObjectVersionsOutput, error) { + req, out := c.ListObjectVersionsRequest(input) + return out, req.Send() +} + +// ListObjectVersionsWithContext is the same as ListObjectVersions with the addition of +// the ability to pass a context and additional request options. +// +// See ListObjectVersions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectVersionsWithContext(ctx aws.Context, input *ListObjectVersionsInput, opts ...request.Option) (*ListObjectVersionsOutput, error) { + req, out := c.ListObjectVersionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListObjectVersionsPages iterates over the pages of a ListObjectVersions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListObjectVersions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListObjectVersions operation. +// pageNum := 0 +// err := client.ListObjectVersionsPages(params, +// func(page *s3.ListObjectVersionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *S3) ListObjectVersionsPages(input *ListObjectVersionsInput, fn func(*ListObjectVersionsOutput, bool) bool) error { + return c.ListObjectVersionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListObjectVersionsPagesWithContext same as ListObjectVersionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectVersionsPagesWithContext(ctx aws.Context, input *ListObjectVersionsInput, fn func(*ListObjectVersionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListObjectVersionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListObjectVersionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListObjectVersionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListObjects = "ListObjects" + +// ListObjectsRequest generates a "aws/request.Request" representing the +// client's request for the ListObjects operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListObjects for more information on using the ListObjects +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListObjectsRequest method. +// req, resp := client.ListObjectsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects +func (c *S3) ListObjectsRequest(input *ListObjectsInput) (req *request.Request, output *ListObjectsOutput) { + op := &request.Operation{ + Name: opListObjects, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"NextMarker || Contents[-1].Key"}, + LimitToken: "MaxKeys", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListObjectsInput{} + } + + output = &ListObjectsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListObjects API operation for Amazon Simple Storage Service. +// +// Returns some or all (up to 1,000) of the objects in a bucket. You can use +// the request parameters as selection criteria to return a subset of the objects +// in a bucket. A 200 OK response can contain valid or invalid XML. Be sure +// to design your application to parse the contents of the response and handle +// it appropriately. +// +// This action has been revised. We recommend that you use the newer version, +// ListObjectsV2 (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html), +// when developing applications. For backward compatibility, Amazon S3 continues +// to support ListObjects. +// +// The following operations are related to ListObjects: +// +// * ListObjectsV2 (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html) +// +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// * ListBuckets (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListObjects for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchBucket "NoSuchBucket" +// The specified bucket does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects +func (c *S3) ListObjects(input *ListObjectsInput) (*ListObjectsOutput, error) { + req, out := c.ListObjectsRequest(input) + return out, req.Send() +} + +// ListObjectsWithContext is the same as ListObjects with the addition of +// the ability to pass a context and additional request options. +// +// See ListObjects for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectsWithContext(ctx aws.Context, input *ListObjectsInput, opts ...request.Option) (*ListObjectsOutput, error) { + req, out := c.ListObjectsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListObjectsPages iterates over the pages of a ListObjects operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListObjects method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListObjects operation. +// pageNum := 0 +// err := client.ListObjectsPages(params, +// func(page *s3.ListObjectsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *S3) ListObjectsPages(input *ListObjectsInput, fn func(*ListObjectsOutput, bool) bool) error { + return c.ListObjectsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListObjectsPagesWithContext same as ListObjectsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectsPagesWithContext(ctx aws.Context, input *ListObjectsInput, fn func(*ListObjectsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListObjectsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListObjectsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListObjectsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListObjectsV2 = "ListObjectsV2" + +// ListObjectsV2Request generates a "aws/request.Request" representing the +// client's request for the ListObjectsV2 operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListObjectsV2 for more information on using the ListObjectsV2 +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListObjectsV2Request method. +// req, resp := client.ListObjectsV2Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2 +func (c *S3) ListObjectsV2Request(input *ListObjectsV2Input) (req *request.Request, output *ListObjectsV2Output) { + op := &request.Operation{ + Name: opListObjectsV2, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?list-type=2", + Paginator: &request.Paginator{ + InputTokens: []string{"ContinuationToken"}, + OutputTokens: []string{"NextContinuationToken"}, + LimitToken: "MaxKeys", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListObjectsV2Input{} + } + + output = &ListObjectsV2Output{} + req = c.newRequest(op, input, output) + return +} + +// ListObjectsV2 API operation for Amazon Simple Storage Service. +// +// Returns some or all (up to 1,000) of the objects in a bucket. You can use +// the request parameters as selection criteria to return a subset of the objects +// in a bucket. A 200 OK response can contain valid or invalid XML. Make sure +// to design your application to parse the contents of the response and handle +// it appropriately. Objects are returned sorted in an ascending order of the +// respective key names in the list. +// +// To use this operation, you must have READ access to the bucket. +// +// To use this action in an AWS Identity and Access Management (IAM) policy, +// you must have permissions to perform the s3:ListBucket action. The bucket +// owner has this permission by default and can grant this permission to others. +// For more information about permissions, see Permissions Related to Bucket +// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// +// This section describes the latest revision of this action. We recommend that +// you use this revised API for application development. For backward compatibility, +// Amazon S3 continues to support the prior version of this API, ListObjects +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html). +// +// To get a list of your buckets, see ListBuckets (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html). +// +// The following operations are related to ListObjectsV2: +// +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListObjectsV2 for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchBucket "NoSuchBucket" +// The specified bucket does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2 +func (c *S3) ListObjectsV2(input *ListObjectsV2Input) (*ListObjectsV2Output, error) { + req, out := c.ListObjectsV2Request(input) + return out, req.Send() +} + +// ListObjectsV2WithContext is the same as ListObjectsV2 with the addition of +// the ability to pass a context and additional request options. +// +// See ListObjectsV2 for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectsV2WithContext(ctx aws.Context, input *ListObjectsV2Input, opts ...request.Option) (*ListObjectsV2Output, error) { + req, out := c.ListObjectsV2Request(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListObjectsV2Pages iterates over the pages of a ListObjectsV2 operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListObjectsV2 method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListObjectsV2 operation. +// pageNum := 0 +// err := client.ListObjectsV2Pages(params, +// func(page *s3.ListObjectsV2Output, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *S3) ListObjectsV2Pages(input *ListObjectsV2Input, fn func(*ListObjectsV2Output, bool) bool) error { + return c.ListObjectsV2PagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListObjectsV2PagesWithContext same as ListObjectsV2Pages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectsV2PagesWithContext(ctx aws.Context, input *ListObjectsV2Input, fn func(*ListObjectsV2Output, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListObjectsV2Input + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListObjectsV2Request(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListObjectsV2Output), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListParts = "ListParts" + +// ListPartsRequest generates a "aws/request.Request" representing the +// client's request for the ListParts operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListParts for more information on using the ListParts +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListPartsRequest method. +// req, resp := client.ListPartsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts +func (c *S3) ListPartsRequest(input *ListPartsInput) (req *request.Request, output *ListPartsOutput) { + op := &request.Operation{ + Name: opListParts, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}", + Paginator: &request.Paginator{ + InputTokens: []string{"PartNumberMarker"}, + OutputTokens: []string{"NextPartNumberMarker"}, + LimitToken: "MaxParts", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListPartsInput{} + } + + output = &ListPartsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListParts API operation for Amazon Simple Storage Service. +// +// Lists the parts that have been uploaded for a specific multipart upload. +// This operation must include the upload ID, which you obtain by sending the +// initiate multipart upload request (see CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)). +// This request returns a maximum of 1,000 uploaded parts. The default number +// of parts returned is 1,000 parts. You can restrict the number of parts returned +// by specifying the max-parts request parameter. If your multipart upload consists +// of more than 1,000 parts, the response returns an IsTruncated field with +// the value of true, and a NextPartNumberMarker element. In subsequent ListParts +// requests you can include the part-number-marker query string parameter and +// set its value to the NextPartNumberMarker field value from the previous response. +// +// For more information on multipart uploads, see Uploading Objects Using Multipart +// Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). +// +// For information on permissions required to use the multipart upload API, +// see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// +// The following operations are related to ListParts: +// +// * CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// +// * UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// +// * CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// +// * AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// +// * ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListParts for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts +func (c *S3) ListParts(input *ListPartsInput) (*ListPartsOutput, error) { + req, out := c.ListPartsRequest(input) + return out, req.Send() +} + +// ListPartsWithContext is the same as ListParts with the addition of +// the ability to pass a context and additional request options. +// +// See ListParts for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListPartsWithContext(ctx aws.Context, input *ListPartsInput, opts ...request.Option) (*ListPartsOutput, error) { + req, out := c.ListPartsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListPartsPages iterates over the pages of a ListParts operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListParts method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListParts operation. +// pageNum := 0 +// err := client.ListPartsPages(params, +// func(page *s3.ListPartsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *S3) ListPartsPages(input *ListPartsInput, fn func(*ListPartsOutput, bool) bool) error { + return c.ListPartsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListPartsPagesWithContext same as ListPartsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListPartsPagesWithContext(ctx aws.Context, input *ListPartsInput, fn func(*ListPartsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListPartsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListPartsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListPartsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opPutBucketAcl = "PutBucketAcl" + +// PutBucketAclRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketAcl operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketAcl for more information on using the PutBucketAcl +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketAclRequest method. +// req, resp := client.PutBucketAclRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl +func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request, output *PutBucketAclOutput) { + op := &request.Operation{ + Name: opPutBucketAcl, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?acl", + } + + if input == nil { + input = &PutBucketAclInput{} + } + + output = &PutBucketAclOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketAcl API operation for Amazon Simple Storage Service. +// +// Sets the permissions on an existing bucket using access control lists (ACL). +// For more information, see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). +// To set the ACL of a bucket, you must have WRITE_ACP permission. +// +// You can use one of the following two ways to set a bucket's permissions: +// +// * Specify the ACL in the request body +// +// * Specify permissions using request headers +// +// You cannot specify access permission using both the body and the request +// headers. +// +// Depending on your application needs, you may choose to set the ACL on a bucket +// using either the request body or the headers. For example, if you have an +// existing application that updates a bucket ACL using the request body, then +// you can continue to use that approach. +// +// Access Permissions +// +// You can set access permissions using one of the following methods: +// +// * Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports +// a set of predefined ACLs, known as canned ACLs. Each canned ACL has a +// predefined set of grantees and permissions. Specify the canned ACL name +// as the value of x-amz-acl. If you use this header, you cannot use other +// access control-specific headers in your request. For more information, +// see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// * Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, +// x-amz-grant-write-acp, and x-amz-grant-full-control headers. When using +// these headers, you specify explicit access permissions and grantees (AWS +// accounts or Amazon S3 groups) who will receive the permission. If you +// use these ACL-specific headers, you cannot use the x-amz-acl header to +// set a canned ACL. These parameters map to the set of permissions that +// Amazon S3 supports in an ACL. For more information, see Access Control +// List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). +// You specify each grantee as a type=value pair, where the type is one of +// the following: id – if the value specified is the canonical user ID +// of an AWS account uri – if you are granting permissions to a predefined +// group emailAddress – if the value specified is the email address of +// an AWS account Using email addresses to specify a grantee is only supported +// in the following AWS Regions: US East (N. Virginia) US West (N. California) +// US West (Oregon) Asia Pacific (Singapore) Asia Pacific (Sydney) Asia Pacific +// (Tokyo) Europe (Ireland) South America (São Paulo) For a list of all +// the Amazon S3 supported Regions and endpoints, see Regions and Endpoints +// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in +// the AWS General Reference. For example, the following x-amz-grant-write +// header grants create, overwrite, and delete objects permission to LogDelivery +// group predefined by Amazon S3 and two AWS accounts identified by their +// email addresses. x-amz-grant-write: uri="http://acs.amazonaws.com/groups/s3/LogDelivery", +// id="111122223333", id="555566667777" +// +// You can use either a canned ACL or specify access permissions explicitly. +// You cannot do both. +// +// Grantee Values +// +// You can specify the person (grantee) to whom you're assigning access rights +// (using request elements) in the following ways: +// +// * By the person's ID: <>ID<><>GranteesEmail<> +// DisplayName is optional and ignored in the request +// +// * By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> +// +// * By Email address: <>Grantees@email.com<>lt;/Grantee> +// The grantee is resolved to the CanonicalUser and, in a response to a GET +// Object acl request, appears as the CanonicalUser. Using email addresses +// to specify a grantee is only supported in the following AWS Regions: US +// East (N. Virginia) US West (N. California) US West (Oregon) Asia Pacific +// (Singapore) Asia Pacific (Sydney) Asia Pacific (Tokyo) Europe (Ireland) +// South America (São Paulo) For a list of all the Amazon S3 supported Regions +// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) +// in the AWS General Reference. +// +// Related Resources +// +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// * DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) +// +// * GetObjectAcl (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketAcl for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl +func (c *S3) PutBucketAcl(input *PutBucketAclInput) (*PutBucketAclOutput, error) { + req, out := c.PutBucketAclRequest(input) + return out, req.Send() +} + +// PutBucketAclWithContext is the same as PutBucketAcl with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketAcl for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketAclWithContext(ctx aws.Context, input *PutBucketAclInput, opts ...request.Option) (*PutBucketAclOutput, error) { + req, out := c.PutBucketAclRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketCors = "PutBucketCors" + +// PutBucketCorsRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketCors operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketCors for more information on using the PutBucketCors +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketCorsRequest method. +// req, resp := client.PutBucketCorsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors +func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Request, output *PutBucketCorsOutput) { + op := &request.Operation{ + Name: opPutBucketCors, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?cors", + } + + if input == nil { + input = &PutBucketCorsInput{} + } + + output = &PutBucketCorsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketCors API operation for Amazon Simple Storage Service. +// +// Sets the cors configuration for your bucket. If the configuration exists, +// Amazon S3 replaces it. +// +// To use this operation, you must be allowed to perform the s3:PutBucketCORS +// action. By default, the bucket owner has this permission and can grant it +// to others. +// +// You set this configuration on a bucket so that the bucket can service cross-origin +// requests. For example, you might want to enable a request whose origin is +// http://www.example.com to access your Amazon S3 bucket at my.example.bucket.com +// by using the browser's XMLHttpRequest capability. +// +// To enable cross-origin resource sharing (CORS) on a bucket, you add the cors +// subresource to the bucket. The cors subresource is an XML document in which +// you configure rules that identify origins and the HTTP methods that can be +// executed on your bucket. The document is limited to 64 KB in size. +// +// When Amazon S3 receives a cross-origin request (or a pre-flight OPTIONS request) +// against a bucket, it evaluates the cors configuration on the bucket and uses +// the first CORSRule rule that matches the incoming browser request to enable +// a cross-origin request. For a rule to match, the following conditions must +// be met: +// +// * The request's Origin header must match AllowedOrigin elements. +// +// * The request method (for example, GET, PUT, HEAD, and so on) or the Access-Control-Request-Method +// header in case of a pre-flight OPTIONS request must be one of the AllowedMethod +// elements. +// +// * Every header specified in the Access-Control-Request-Headers request +// header of a pre-flight request must match an AllowedHeader element. +// +// For more information about CORS, go to Enabling Cross-Origin Resource Sharing +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon +// S3 User Guide. +// +// Related Resources +// +// * GetBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketCors.html) +// +// * DeleteBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html) +// +// * RESTOPTIONSobject (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketCors for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors +func (c *S3) PutBucketCors(input *PutBucketCorsInput) (*PutBucketCorsOutput, error) { + req, out := c.PutBucketCorsRequest(input) + return out, req.Send() +} + +// PutBucketCorsWithContext is the same as PutBucketCors with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketCors for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketCorsWithContext(ctx aws.Context, input *PutBucketCorsInput, opts ...request.Option) (*PutBucketCorsOutput, error) { + req, out := c.PutBucketCorsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketLifecycleConfiguration = "PutBucketLifecycleConfiguration" + +// PutBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketLifecycleConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketLifecycleConfiguration for more information on using the PutBucketLifecycleConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketLifecycleConfigurationRequest method. +// req, resp := client.PutBucketLifecycleConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration +func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleConfigurationInput) (req *request.Request, output *PutBucketLifecycleConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketLifecycleConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &PutBucketLifecycleConfigurationInput{} + } + + output = &PutBucketLifecycleConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketLifecycleConfiguration API operation for Amazon Simple Storage Service. +// +// Creates a new lifecycle configuration for the bucket or replaces an existing +// lifecycle configuration. For information about lifecycle configuration, see +// Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// +// Bucket lifecycle configuration now supports specifying a lifecycle rule using +// an object key name prefix, one or more object tags, or a combination of both. +// Accordingly, this section describes the latest API. The previous version +// of the API supported filtering based only on an object key name prefix, which +// is supported for backward compatibility. For the related API description, +// see PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html). +// +// Rules +// +// You specify the lifecycle configuration in your request body. The lifecycle +// configuration is specified as XML consisting of one or more rules. Each rule +// consists of the following: +// +// * Filter identifying a subset of objects to which the rule applies. The +// filter can be based on a key name prefix, object tags, or a combination +// of both. +// +// * Status whether the rule is in effect. +// +// * One or more lifecycle transition and expiration actions that you want +// Amazon S3 to perform on the objects identified by the filter. If the state +// of your bucket is versioning-enabled or versioning-suspended, you can +// have many versions of the same object (one current version and zero or +// more noncurrent versions). Amazon S3 provides predefined actions that +// you can specify for current and noncurrent object versions. +// +// For more information, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) +// and Lifecycle Configuration Elements (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html). +// +// Permissions +// +// By default, all Amazon S3 resources are private, including buckets, objects, +// and related subresources (for example, lifecycle configuration and website +// configuration). Only the resource owner (that is, the AWS account that created +// it) can access the resource. The resource owner can optionally grant access +// permissions to others by writing an access policy. For this operation, a +// user must get the s3:PutLifecycleConfiguration permission. +// +// You can also explicitly deny permissions. Explicit deny also supersedes any +// other permissions. If you want to block users or accounts from removing or +// deleting objects from your bucket, you must deny them permissions for the +// following actions: +// +// * s3:DeleteObject +// +// * s3:DeleteObjectVersion +// +// * s3:PutLifecycleConfiguration +// +// For more information about permissions, see Managing Access Permissions to +// Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// +// The following are related to PutBucketLifecycleConfiguration: +// +// * Examples of Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-configuration-examples.html) +// +// * GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) +// +// * DeleteBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketLifecycleConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration +func (c *S3) PutBucketLifecycleConfiguration(input *PutBucketLifecycleConfigurationInput) (*PutBucketLifecycleConfigurationOutput, error) { + req, out := c.PutBucketLifecycleConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketLifecycleConfigurationWithContext is the same as PutBucketLifecycleConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketLifecycleConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketLifecycleConfigurationWithContext(ctx aws.Context, input *PutBucketLifecycleConfigurationInput, opts ...request.Option) (*PutBucketLifecycleConfigurationOutput, error) { + req, out := c.PutBucketLifecycleConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketLogging = "PutBucketLogging" + +// PutBucketLoggingRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketLogging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketLogging for more information on using the PutBucketLogging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketLoggingRequest method. +// req, resp := client.PutBucketLoggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging +func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request.Request, output *PutBucketLoggingOutput) { + op := &request.Operation{ + Name: opPutBucketLogging, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?logging", + } + + if input == nil { + input = &PutBucketLoggingInput{} + } + + output = &PutBucketLoggingOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketLogging API operation for Amazon Simple Storage Service. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketLogging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging +func (c *S3) PutBucketLogging(input *PutBucketLoggingInput) (*PutBucketLoggingOutput, error) { + req, out := c.PutBucketLoggingRequest(input) + return out, req.Send() +} + +// PutBucketLoggingWithContext is the same as PutBucketLogging with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketLogging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketLoggingWithContext(ctx aws.Context, input *PutBucketLoggingInput, opts ...request.Option) (*PutBucketLoggingOutput, error) { + req, out := c.PutBucketLoggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketProtectionConfiguration = "PutBucketProtectionConfiguration" + +// PutBucketProtectionConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketProtectionConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketProtectionConfiguration for more information on using the PutBucketProtectionConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketProtectionConfigurationRequest method. +// req, resp := client.PutBucketProtectionConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketProtectionConfiguration +func (c *S3) PutBucketProtectionConfigurationRequest(input *PutBucketProtectionConfigurationInput) (req *request.Request, output *PutBucketProtectionConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketProtectionConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?protection", + } + + if input == nil { + input = &PutBucketProtectionConfigurationInput{} + } + + output = &PutBucketProtectionConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketProtectionConfiguration API operation for Amazon Simple Storage Service. +// +// Sets the protection configuration of an existing bucket. EnablePermanentRetention +// is optional and if not included is considered to be false. Once set to true, +// must be included in any subsequent PUT Bucket?protection requests for that +// bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketProtectionConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketProtectionConfiguration +func (c *S3) PutBucketProtectionConfiguration(input *PutBucketProtectionConfigurationInput) (*PutBucketProtectionConfigurationOutput, error) { + req, out := c.PutBucketProtectionConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketProtectionConfigurationWithContext is the same as PutBucketProtectionConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketProtectionConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketProtectionConfigurationWithContext(ctx aws.Context, input *PutBucketProtectionConfigurationInput, opts ...request.Option) (*PutBucketProtectionConfigurationOutput, error) { + req, out := c.PutBucketProtectionConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketVersioning = "PutBucketVersioning" + +// PutBucketVersioningRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketVersioning operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketVersioning for more information on using the PutBucketVersioning +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketVersioningRequest method. +// req, resp := client.PutBucketVersioningRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning +func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *request.Request, output *PutBucketVersioningOutput) { + op := &request.Operation{ + Name: opPutBucketVersioning, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?versioning", + } + + if input == nil { + input = &PutBucketVersioningInput{} + } + + output = &PutBucketVersioningOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketVersioning API operation for Amazon Simple Storage Service. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketVersioning for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning +func (c *S3) PutBucketVersioning(input *PutBucketVersioningInput) (*PutBucketVersioningOutput, error) { + req, out := c.PutBucketVersioningRequest(input) + return out, req.Send() +} + +// PutBucketVersioningWithContext is the same as PutBucketVersioning with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketVersioning for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketVersioningWithContext(ctx aws.Context, input *PutBucketVersioningInput, opts ...request.Option) (*PutBucketVersioningOutput, error) { + req, out := c.PutBucketVersioningRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketWebsite = "PutBucketWebsite" + +// PutBucketWebsiteRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketWebsite operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketWebsite for more information on using the PutBucketWebsite +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketWebsiteRequest method. +// req, resp := client.PutBucketWebsiteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite +func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request.Request, output *PutBucketWebsiteOutput) { + op := &request.Operation{ + Name: opPutBucketWebsite, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?website", + } + + if input == nil { + input = &PutBucketWebsiteInput{} + } + + output = &PutBucketWebsiteOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketWebsite API operation for Amazon Simple Storage Service. +// +// Sets the configuration of the website that is specified in the website subresource. +// To configure a bucket as a website, you can add this subresource on the bucket +// with website configuration information such as the file name of the index +// document and any redirect rules. For more information, see Hosting Websites +// on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html). +// +// This PUT action requires the S3:PutBucketWebsite permission. By default, +// only the bucket owner can configure the website attached to a bucket; however, +// bucket owners can allow other users to set the website configuration by writing +// a bucket policy that grants them the S3:PutBucketWebsite permission. +// +// To redirect all website requests sent to the bucket's website endpoint, you +// add a website configuration with the following elements. Because all requests +// are sent to another website, you don't need to provide index document name +// for the bucket. +// +// * WebsiteConfiguration +// +// * RedirectAllRequestsTo +// +// * HostName +// +// * Protocol +// +// If you want granular control over redirects, you can use the following elements +// to add routing rules that describe conditions for redirecting requests and +// information about the redirect destination. In this case, the website configuration +// must provide an index document for the bucket, because some requests might +// not be redirected. +// +// * WebsiteConfiguration +// +// * IndexDocument +// +// * Suffix +// +// * ErrorDocument +// +// * Key +// +// * RoutingRules +// +// * RoutingRule +// +// * Condition +// +// * HttpErrorCodeReturnedEquals +// +// * KeyPrefixEquals +// +// * Redirect +// +// * Protocol +// +// * HostName +// +// * ReplaceKeyPrefixWith +// +// * ReplaceKeyWith +// +// * HttpRedirectCode +// +// Amazon S3 has a limitation of 50 routing rules per website configuration. +// If you require more than 50 routing rules, you can use object redirect. For +// more information, see Configuring an Object Redirect (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html) +// in the Amazon S3 User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketWebsite for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite +func (c *S3) PutBucketWebsite(input *PutBucketWebsiteInput) (*PutBucketWebsiteOutput, error) { + req, out := c.PutBucketWebsiteRequest(input) + return out, req.Send() +} + +// PutBucketWebsiteWithContext is the same as PutBucketWebsite with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketWebsite for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketWebsiteWithContext(ctx aws.Context, input *PutBucketWebsiteInput, opts ...request.Option) (*PutBucketWebsiteOutput, error) { + req, out := c.PutBucketWebsiteRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutObject = "PutObject" + +// PutObjectRequest generates a "aws/request.Request" representing the +// client's request for the PutObject operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutObject for more information on using the PutObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutObjectRequest method. +// req, resp := client.PutObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject +func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, output *PutObjectOutput) { + op := &request.Operation{ + Name: opPutObject, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &PutObjectInput{} + } + + output = &PutObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutObject API operation for Amazon Simple Storage Service. +// +// Adds an object to a bucket. You must have WRITE permissions on a bucket to +// add an object to it. +// +// Amazon S3 never adds partial objects; if you receive a success response, +// Amazon S3 added the entire object to the bucket. +// +// Amazon S3 is a distributed system. If it receives multiple write requests +// for the same object simultaneously, it overwrites all but the last object +// written. Amazon S3 does not provide object locking; if you need this, make +// sure to build it into your application layer or use versioning instead. +// +// To ensure that data is not corrupted traversing the network, use the Content-MD5 +// header. When you use this header, Amazon S3 checks the object against the +// provided MD5 value and, if they do not match, returns an error. Additionally, +// you can calculate the MD5 while putting an object to Amazon S3 and compare +// the returned ETag to the calculated MD5 value. +// +// The Content-MD5 header is required for any request to upload an object with +// a retention period configured using Amazon S3 Object Lock. For more information +// about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html) +// in the Amazon S3 User Guide. +// +// Server-side Encryption +// +// You can optionally request server-side encryption. With server-side encryption, +// Amazon S3 encrypts your data as it writes it to disks in its data centers +// and decrypts the data when you access it. You have the option to provide +// your own encryption key or use AWS managed encryption keys (SSE-S3 or SSE-KMS). +// For more information, see Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html). +// +// If you request server-side encryption using AWS Key Management Service (SSE-KMS), +// you can enable an S3 Bucket Key at the object-level. For more information, +// see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) +// in the Amazon S3 User Guide. +// +// Access Control List (ACL)-Specific Request Headers +// +// You can use headers to grant ACL- based permissions. By default, all objects +// are private. Only the owner has full access control. When adding a new object, +// you can grant permissions to individual AWS accounts or to predefined groups +// defined by Amazon S3. These permissions are then added to the ACL on the +// object. For more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) +// and Managing ACLs Using the REST API (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html). +// +// Storage Class Options +// +// By default, Amazon S3 uses the STANDARD Storage Class to store newly created +// objects. The STANDARD storage class provides high durability and high availability. +// Depending on performance needs, you can specify a different Storage Class. +// Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, +// see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) +// in the Amazon S3 Service Developer Guide. +// +// Versioning +// +// If you enable versioning for a bucket, Amazon S3 automatically generates +// a unique version ID for the object being stored. Amazon S3 returns this ID +// in the response. When you enable versioning for a bucket, if Amazon S3 receives +// multiple write requests for the same object simultaneously, it stores all +// of the objects. +// +// For more information about versioning, see Adding Objects to Versioning Enabled +// Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html). +// For information about returning the versioning state of a bucket, see GetBucketVersioning +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html). +// +// Related Resources +// +// * CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) +// +// * DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutObject for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject +func (c *S3) PutObject(input *PutObjectInput) (*PutObjectOutput, error) { + req, out := c.PutObjectRequest(input) + return out, req.Send() +} + +// PutObjectWithContext is the same as PutObject with the addition of +// the ability to pass a context and additional request options. +// +// See PutObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutObjectWithContext(ctx aws.Context, input *PutObjectInput, opts ...request.Option) (*PutObjectOutput, error) { + req, out := c.PutObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutObjectAcl = "PutObjectAcl" + +// PutObjectAclRequest generates a "aws/request.Request" representing the +// client's request for the PutObjectAcl operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutObjectAcl for more information on using the PutObjectAcl +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutObjectAclRequest method. +// req, resp := client.PutObjectAclRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl +func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request, output *PutObjectAclOutput) { + op := &request.Operation{ + Name: opPutObjectAcl, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}?acl", + } + + if input == nil { + input = &PutObjectAclInput{} + } + + output = &PutObjectAclOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutObjectAcl API operation for Amazon Simple Storage Service. +// +// Uses the acl subresource to set the access control list (ACL) permissions +// for a new or existing object in an S3 bucket. You must have WRITE_ACP permission +// to set the ACL of an object. For more information, see What permissions can +// I grant? (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#permissions) +// in the Amazon S3 User Guide. +// +// This action is not supported by Amazon S3 on Outposts. +// +// Depending on your application needs, you can choose to set the ACL on an +// object using either the request body or the headers. For example, if you +// have an existing application that updates a bucket ACL using the request +// body, you can continue to use that approach. For more information, see Access +// Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) +// in the Amazon S3 Developer Guide. +// +// Access Permissions +// +// You can set access permissions using one of the following methods: +// +// * Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports +// a set of predefined ACLs, known as canned ACLs. Each canned ACL has a +// predefined set of grantees and permissions. Specify the canned ACL name +// as the value of x-amz-acl. If you use this header, you cannot use other +// access control-specific headers in your request. For more information, +// see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// * Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, +// x-amz-grant-write-acp, and x-amz-grant-full-control headers. When using +// these headers, you specify explicit access permissions and grantees (AWS +// accounts or Amazon S3 groups) who will receive the permission. If you +// use these ACL-specific headers, you cannot use x-amz-acl header to set +// a canned ACL. These parameters map to the set of permissions that Amazon +// S3 supports in an ACL. For more information, see Access Control List (ACL) +// Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). +// You specify each grantee as a type=value pair, where the type is one of +// the following: id – if the value specified is the canonical user ID +// of an AWS account uri – if you are granting permissions to a predefined +// group emailAddress – if the value specified is the email address of +// an AWS account Using email addresses to specify a grantee is only supported +// in the following AWS Regions: US East (N. Virginia) US West (N. California) +// US West (Oregon) Asia Pacific (Singapore) Asia Pacific (Sydney) Asia Pacific +// (Tokyo) Europe (Ireland) South America (São Paulo) For a list of all +// the Amazon S3 supported Regions and endpoints, see Regions and Endpoints +// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in +// the AWS General Reference. For example, the following x-amz-grant-read +// header grants list objects permission to the two AWS accounts identified +// by their email addresses. x-amz-grant-read: emailAddress="xyz@amazon.com", +// emailAddress="abc@amazon.com" +// +// You can use either a canned ACL or specify access permissions explicitly. +// You cannot do both. +// +// Grantee Values +// +// You can specify the person (grantee) to whom you're assigning access rights +// (using request elements) in the following ways: +// +// * By the person's ID: <>ID<><>GranteesEmail<> +// DisplayName is optional and ignored in the request. +// +// * By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> +// +// * By Email address: <>Grantees@email.com<>lt;/Grantee> +// The grantee is resolved to the CanonicalUser and, in a response to a GET +// Object acl request, appears as the CanonicalUser. Using email addresses +// to specify a grantee is only supported in the following AWS Regions: US +// East (N. Virginia) US West (N. California) US West (Oregon) Asia Pacific +// (Singapore) Asia Pacific (Sydney) Asia Pacific (Tokyo) Europe (Ireland) +// South America (São Paulo) For a list of all the Amazon S3 supported Regions +// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) +// in the AWS General Reference. +// +// Versioning +// +// The ACL of an object is set at the object version level. By default, PUT +// sets the ACL of the current version of an object. To set the ACL of a different +// version, use the versionId subresource. +// +// Related Resources +// +// * CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) +// +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutObjectAcl for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchKey "NoSuchKey" +// The specified key does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl +func (c *S3) PutObjectAcl(input *PutObjectAclInput) (*PutObjectAclOutput, error) { + req, out := c.PutObjectAclRequest(input) + return out, req.Send() +} + +// PutObjectAclWithContext is the same as PutObjectAcl with the addition of +// the ability to pass a context and additional request options. +// +// See PutObjectAcl for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutObjectAclWithContext(ctx aws.Context, input *PutObjectAclInput, opts ...request.Option) (*PutObjectAclOutput, error) { + req, out := c.PutObjectAclRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutObjectTagging = "PutObjectTagging" + +// PutObjectTaggingRequest generates a "aws/request.Request" representing the +// client's request for the PutObjectTagging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutObjectTagging for more information on using the PutObjectTagging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutObjectTaggingRequest method. +// req, resp := client.PutObjectTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging +func (c *S3) PutObjectTaggingRequest(input *PutObjectTaggingInput) (req *request.Request, output *PutObjectTaggingOutput) { + op := &request.Operation{ + Name: opPutObjectTagging, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}?tagging", + } + + if input == nil { + input = &PutObjectTaggingInput{} + } + + output = &PutObjectTaggingOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutObjectTagging API operation for Amazon Simple Storage Service. +// +// Sets the supplied tag-set to an object that already exists in a bucket. +// +// A tag is a key-value pair. You can associate tags with an object by sending +// a PUT request against the tagging subresource that is associated with the +// object. You can retrieve tags by sending a GET request. For more information, +// see GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html). +// +// For tagging-related restrictions related to characters and encodings, see +// Tag Restrictions (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html). +// Note that Amazon S3 limits the maximum number of tags to 10 tags per object. +// +// To use this operation, you must have permission to perform the s3:PutObjectTagging +// action. By default, the bucket owner has this permission and can grant this +// permission to others. +// +// To put tags of any other version, use the versionId query parameter. You +// also need permission for the s3:PutObjectVersionTagging action. +// +// For information about the Amazon S3 object tagging feature, see Object Tagging +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). +// +// Special Errors +// +// * Code: InvalidTagError Cause: The tag provided was not a valid tag. This +// error can occur if the tag did not pass input validation. For more information, +// see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). +// +// * Code: MalformedXMLError Cause: The XML provided does not match the schema. +// +// * Code: OperationAbortedError Cause: A conflicting conditional action +// is currently in progress against this resource. Please try again. +// +// * Code: InternalError Cause: The service was unable to apply the provided +// tag to the object. +// +// Related Resources +// +// * GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) +// +// * DeleteObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutObjectTagging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging +func (c *S3) PutObjectTagging(input *PutObjectTaggingInput) (*PutObjectTaggingOutput, error) { + req, out := c.PutObjectTaggingRequest(input) + return out, req.Send() +} + +// PutObjectTaggingWithContext is the same as PutObjectTagging with the addition of +// the ability to pass a context and additional request options. +// +// See PutObjectTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutObjectTaggingWithContext(ctx aws.Context, input *PutObjectTaggingInput, opts ...request.Option) (*PutObjectTaggingOutput, error) { + req, out := c.PutObjectTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutPublicAccessBlock = "PutPublicAccessBlock" + +// PutPublicAccessBlockRequest generates a "aws/request.Request" representing the +// client's request for the PutPublicAccessBlock operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutPublicAccessBlock for more information on using the PutPublicAccessBlock +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutPublicAccessBlockRequest method. +// req, resp := client.PutPublicAccessBlockRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutPublicAccessBlock +func (c *S3) PutPublicAccessBlockRequest(input *PutPublicAccessBlockInput) (req *request.Request, output *PutPublicAccessBlockOutput) { + op := &request.Operation{ + Name: opPutPublicAccessBlock, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?publicAccessBlock", + } + + if input == nil { + input = &PutPublicAccessBlockInput{} + } + + output = &PutPublicAccessBlockOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutPublicAccessBlock API operation for Amazon Simple Storage Service. +// +// Creates or modifies the PublicAccessBlock configuration for an Amazon S3 +// bucket. To use this operation, you must have the s3:PutBucketPublicAccessBlock +// permission. For more information about Amazon S3 permissions, see Specifying +// Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// +// When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket +// or an object, it checks the PublicAccessBlock configuration for both the +// bucket (or the bucket that contains the object) and the bucket owner's account. +// If the PublicAccessBlock configurations are different between the bucket +// and the account, Amazon S3 uses the most restrictive combination of the bucket-level +// and account-level settings. +// +// For more information about when Amazon S3 considers a bucket or an object +// public, see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status). +// +// Related Resources +// +// * GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) +// +// * DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) +// +// * GetBucketPolicyStatus (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html) +// +// * Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutPublicAccessBlock for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutPublicAccessBlock +func (c *S3) PutPublicAccessBlock(input *PutPublicAccessBlockInput) (*PutPublicAccessBlockOutput, error) { + req, out := c.PutPublicAccessBlockRequest(input) + return out, req.Send() +} + +// PutPublicAccessBlockWithContext is the same as PutPublicAccessBlock with the addition of +// the ability to pass a context and additional request options. +// +// See PutPublicAccessBlock for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutPublicAccessBlockWithContext(ctx aws.Context, input *PutPublicAccessBlockInput, opts ...request.Option) (*PutPublicAccessBlockOutput, error) { + req, out := c.PutPublicAccessBlockRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRestoreObject = "RestoreObject" + +// RestoreObjectRequest generates a "aws/request.Request" representing the +// client's request for the RestoreObject operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RestoreObject for more information on using the RestoreObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RestoreObjectRequest method. +// req, resp := client.RestoreObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject +func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Request, output *RestoreObjectOutput) { + op := &request.Operation{ + Name: opRestoreObject, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}?restore", + } + + if input == nil { + input = &RestoreObjectInput{} + } + + output = &RestoreObjectOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// RestoreObject API operation for Amazon Simple Storage Service. +// +// Restores an archived copy of an object back into Amazon S3 +// +// This action is not supported by Amazon S3 on Outposts. +// +// This action performs the following types of requests: +// +// * select - Perform a select query on an archived object +// +// * restore an archive - Restore an archived object +// +// To use this operation, you must have permissions to perform the s3:RestoreObject +// action. The bucket owner has this permission by default and can grant this +// permission to others. For more information about permissions, see Permissions +// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. +// +// Querying Archives with Select Requests +// +// You use a select type of request to perform SQL queries on archived objects. +// The archived objects that are being queried by the select request must be +// formatted as uncompressed comma-separated values (CSV) files. You can run +// queries and custom analytics on your archived data without having to restore +// your data to a hotter Amazon S3 tier. For an overview about select requests, +// see Querying Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/querying-glacier-archives.html) +// in the Amazon S3 User Guide. +// +// When making a select request, do the following: +// +// * Define an output location for the select query's output. This must be +// an Amazon S3 bucket in the same AWS Region as the bucket that contains +// the archive object that is being queried. The AWS account that initiates +// the job must have permissions to write to the S3 bucket. You can specify +// the storage class and encryption for the output objects stored in the +// bucket. For more information about output, see Querying Archived Objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/querying-glacier-archives.html) +// in the Amazon S3 User Guide. For more information about the S3 structure +// in the request body, see the following: PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// Managing Access with ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html) +// in the Amazon S3 User Guide Protecting Data Using Server-Side Encryption +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) +// in the Amazon S3 User Guide +// +// * Define the SQL expression for the SELECT type of restoration for your +// query in the request body's SelectParameters structure. You can use expressions +// like the following examples. The following expression returns all records +// from the specified object. SELECT * FROM Object Assuming that you are +// not using any headers for data stored in the object, you can specify columns +// with positional headers. SELECT s._1, s._2 FROM Object s WHERE s._3 > +// 100 If you have headers and you set the fileHeaderInfo in the CSV structure +// in the request body to USE, you can specify headers in the query. (If +// you set the fileHeaderInfo field to IGNORE, the first row is skipped for +// the query.) You cannot mix ordinal positions with header column names. +// SELECT s.Id, s.FirstName, s.SSN FROM S3Object s +// +// For more information about using SQL with S3 Glacier Select restore, see +// SQL Reference for Amazon S3 Select and S3 Glacier Select (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) +// in the Amazon S3 User Guide. +// +// When making a select request, you can also do the following: +// +// * To expedite your queries, specify the Expedited tier. For more information +// about tiers, see "Restoring Archives," later in this topic. +// +// * Specify details about the data serialization format of both the input +// object that is being queried and the serialization of the CSV-encoded +// query results. +// +// The following are additional important facts about the select feature: +// +// * The output results are new Amazon S3 objects. Unlike archive retrievals, +// they are stored until explicitly deleted-manually or through a lifecycle +// policy. +// +// * You can issue more than one select request on the same Amazon S3 object. +// Amazon S3 doesn't deduplicate requests, so avoid issuing duplicate requests. +// +// * Amazon S3 accepts a select request even if the object has already been +// restored. A select request doesn’t return error response 409. +// +// Restoring objects +// +// Objects that you archive to the S3 Glacier or S3 Glacier Deep Archive storage +// class, and S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep +// Archive tiers are not accessible in real time. For objects in Archive Access +// or Deep Archive Access tiers you must first initiate a restore request, and +// then wait until the object is moved into the Frequent Access tier. For objects +// in S3 Glacier or S3 Glacier Deep Archive storage classes you must first initiate +// a restore request, and then wait until a temporary copy of the object is +// available. To access an archived object, you must restore the object for +// the duration (number of days) that you specify. +// +// To restore a specific object version, you can provide a version ID. If you +// don't provide a version ID, Amazon S3 restores the current version. +// +// When restoring an archived object (or using a select request), you can specify +// one of the following data access tier options in the Tier element of the +// request body: +// +// * Expedited - Expedited retrievals allow you to quickly access your data +// stored in the S3 Glacier storage class or S3 Intelligent-Tiering Archive +// tier when occasional urgent requests for a subset of archives are required. +// For all but the largest archived objects (250 MB+), data accessed using +// Expedited retrievals is typically made available within 1–5 minutes. +// Provisioned capacity ensures that retrieval capacity for Expedited retrievals +// is available when you need it. Expedited retrievals and provisioned capacity +// are not available for objects stored in the S3 Glacier Deep Archive storage +// class or S3 Intelligent-Tiering Deep Archive tier. +// +// * Standard - Standard retrievals allow you to access any of your archived +// objects within several hours. This is the default option for retrieval +// requests that do not specify the retrieval option. Standard retrievals +// typically finish within 3–5 hours for objects stored in the S3 Glacier +// storage class or S3 Intelligent-Tiering Archive tier. They typically finish +// within 12 hours for objects stored in the S3 Glacier Deep Archive storage +// class or S3 Intelligent-Tiering Deep Archive tier. Standard retrievals +// are free for objects stored in S3 Intelligent-Tiering. +// +// * Bulk - Bulk retrievals are the lowest-cost retrieval option in S3 Glacier, +// enabling you to retrieve large amounts, even petabytes, of data inexpensively. +// Bulk retrievals typically finish within 5–12 hours for objects stored +// in the S3 Glacier storage class or S3 Intelligent-Tiering Archive tier. +// They typically finish within 48 hours for objects stored in the S3 Glacier +// Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier. +// Bulk retrievals are free for objects stored in S3 Intelligent-Tiering. +// +// For more information about archive retrieval options and provisioned capacity +// for Expedited data access, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) +// in the Amazon S3 User Guide. +// +// You can use Amazon S3 restore speed upgrade to change the restore speed to +// a faster speed while it is in progress. For more information, see Upgrading +// the speed of an in-progress restore (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html#restoring-objects-upgrade-tier.title.html) +// in the Amazon S3 User Guide. +// +// To get the status of object restoration, you can send a HEAD request. Operations +// return the x-amz-restore header, which provides information about the restoration +// status, in the response. You can use Amazon S3 event notifications to notify +// you when a restore is initiated or completed. For more information, see Configuring +// Amazon S3 Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) +// in the Amazon S3 User Guide. +// +// After restoring an archived object, you can update the restoration period +// by reissuing the request with a new period. Amazon S3 updates the restoration +// period relative to the current time and charges only for the request-there +// are no data transfer charges. You cannot update the restoration period when +// Amazon S3 is actively processing your current restore request for the object. +// +// If your bucket has a lifecycle configuration with a rule that includes an +// expiration action, the object expiration overrides the life span that you +// specify in a restore request. For example, if you restore an object copy +// for 10 days, but the object is scheduled to expire in 3 days, Amazon S3 deletes +// the object in 3 days. For more information about lifecycle configuration, +// see PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) +// and Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) +// in Amazon S3 User Guide. +// +// Responses +// +// A successful action returns either the 200 OK or 202 Accepted status code. +// +// * If the object is not previously restored, then Amazon S3 returns 202 +// Accepted in the response. +// +// * If the object is previously restored, Amazon S3 returns 200 OK in the +// response. +// +// Special Errors +// +// * Code: RestoreAlreadyInProgress Cause: Object restore is already in progress. +// (This error does not apply to SELECT type requests.) HTTP Status Code: +// 409 Conflict SOAP Fault Code Prefix: Client +// +// * Code: GlacierExpeditedRetrievalNotAvailable Cause: expedited retrievals +// are currently not available. Try again later. (Returned if there is insufficient +// capacity to process the Expedited request. This error applies only to +// Expedited retrievals and not to S3 Standard or Bulk retrievals.) HTTP +// Status Code: 503 SOAP Fault Code Prefix: N/A +// +// Related Resources +// +// * PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) +// +// * GetBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html) +// +// * SQL Reference for Amazon S3 Select and S3 Glacier Select (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) +// in the Amazon S3 User Guide +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation RestoreObject for usage and error information. +// +// Returned Error Codes: +// * ErrCodeObjectAlreadyInActiveTierError "ObjectAlreadyInActiveTierError" +// This action is not allowed against this storage tier. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject +func (c *S3) RestoreObject(input *RestoreObjectInput) (*RestoreObjectOutput, error) { + req, out := c.RestoreObjectRequest(input) + return out, req.Send() +} + +// RestoreObjectWithContext is the same as RestoreObject with the addition of +// the ability to pass a context and additional request options. +// +// See RestoreObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) RestoreObjectWithContext(ctx aws.Context, input *RestoreObjectInput, opts ...request.Option) (*RestoreObjectOutput, error) { + req, out := c.RestoreObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUploadPart = "UploadPart" + +// UploadPartRequest generates a "aws/request.Request" representing the +// client's request for the UploadPart operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UploadPart for more information on using the UploadPart +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UploadPartRequest method. +// req, resp := client.UploadPartRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart +func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, output *UploadPartOutput) { + op := &request.Operation{ + Name: opUploadPart, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &UploadPartInput{} + } + + output = &UploadPartOutput{} + req = c.newRequest(op, input, output) + return +} + +// UploadPart API operation for Amazon Simple Storage Service. +// +// Uploads a part in a multipart upload. +// +// In this operation, you provide part data in your request. However, you have +// an option to specify your existing Amazon S3 object as a data source for +// the part you are uploading. To upload a part from an existing object, you +// use the UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) +// operation. +// +// You must initiate a multipart upload (see CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)) +// before you can upload any part. In response to your initiate request, Amazon +// S3 returns an upload ID, a unique identifier, that you must include in your +// upload part request. +// +// Part numbers can be any number from 1 to 10,000, inclusive. A part number +// uniquely identifies a part and also defines its position within the object +// being created. If you upload a new part using the same part number that was +// used with a previous part, the previously uploaded part is overwritten. Each +// part must be at least 5 MB in size, except the last part. There is no size +// limit on the last part of your multipart upload. +// +// To ensure that data is not corrupted when traversing the network, specify +// the Content-MD5 header in the upload part request. Amazon S3 checks the part +// data against the provided MD5 value. If they do not match, Amazon S3 returns +// an error. +// +// If the upload request is signed with Signature Version 4, then AWS S3 uses +// the x-amz-content-sha256 header as a checksum instead of Content-MD5. For +// more information see Authenticating Requests: Using the Authorization Header +// (AWS Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html). +// +// Note: After you initiate multipart upload and upload one or more parts, you +// must either complete or abort multipart upload in order to stop getting charged +// for storage of the uploaded parts. Only after you either complete or abort +// multipart upload, Amazon S3 frees up the parts storage and stops charging +// you for the parts storage. +// +// For more information on multipart uploads, go to Multipart Upload Overview +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) in the +// Amazon S3 User Guide . +// +// For information on the permissions required to use the multipart upload API, +// go to Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon S3 User Guide. +// +// You can optionally request server-side encryption where Amazon S3 encrypts +// your data as it writes it to disks in its data centers and decrypts it for +// you when you access it. You have the option of providing your own encryption +// key, or you can use the AWS managed encryption keys. If you choose to provide +// your own encryption key, the request headers you provide in the request must +// match the headers you used in the request to initiate the upload by using +// CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html). +// For more information, go to Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) +// in the Amazon S3 User Guide. +// +// Server-side encryption is supported by the S3 Multipart Upload actions. Unless +// you are using a customer-provided encryption key, you don't need to specify +// the encryption parameters in each UploadPart request. Instead, you only need +// to specify the server-side encryption parameters in the initial Initiate +// Multipart request. For more information, see CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html). +// +// If you requested server-side encryption using a customer-provided encryption +// key in your initiate multipart upload request, you must provide identical +// encryption information in each part upload using the following headers. +// +// * x-amz-server-side-encryption-customer-algorithm +// +// * x-amz-server-side-encryption-customer-key +// +// * x-amz-server-side-encryption-customer-key-MD5 +// +// Special Errors +// +// * Code: NoSuchUpload Cause: The specified multipart upload does not exist. +// The upload ID might be invalid, or the multipart upload might have been +// aborted or completed. HTTP Status Code: 404 Not Found SOAP Fault Code +// Prefix: Client +// +// Related Resources +// +// * CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// +// * CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// +// * AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// +// * ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// * ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation UploadPart for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart +func (c *S3) UploadPart(input *UploadPartInput) (*UploadPartOutput, error) { + req, out := c.UploadPartRequest(input) + return out, req.Send() +} + +// UploadPartWithContext is the same as UploadPart with the addition of +// the ability to pass a context and additional request options. +// +// See UploadPart for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) UploadPartWithContext(ctx aws.Context, input *UploadPartInput, opts ...request.Option) (*UploadPartOutput, error) { + req, out := c.UploadPartRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUploadPartCopy = "UploadPartCopy" + +// UploadPartCopyRequest generates a "aws/request.Request" representing the +// client's request for the UploadPartCopy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UploadPartCopy for more information on using the UploadPartCopy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UploadPartCopyRequest method. +// req, resp := client.UploadPartCopyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy +func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Request, output *UploadPartCopyOutput) { + op := &request.Operation{ + Name: opUploadPartCopy, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &UploadPartCopyInput{} + } + + output = &UploadPartCopyOutput{} + req = c.newRequest(op, input, output) + return +} + +// UploadPartCopy API operation for Amazon Simple Storage Service. +// +// Uploads a part by copying data from an existing object as data source. You +// specify the data source by adding the request header x-amz-copy-source in +// your request and a byte range by adding the request header x-amz-copy-source-range +// in your request. +// +// The minimum allowable part size for a multipart upload is 5 MB. For more +// information about multipart upload limits, go to Quick Facts (https://docs.aws.amazon.com/AmazonS3/latest/dev/qfacts.html) +// in the Amazon S3 User Guide. +// +// Instead of using an existing object as part data, you might use the UploadPart +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) action +// and provide data in your request. +// +// You must initiate a multipart upload before you can upload any part. In response +// to your initiate request. Amazon S3 returns a unique identifier, the upload +// ID, that you must include in your upload part request. +// +// For more information about using the UploadPartCopy operation, see the following: +// +// * For conceptual information about multipart uploads, see Uploading Objects +// Using Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) +// in the Amazon S3 User Guide. +// +// * For information about permissions required to use the multipart upload +// API, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon S3 User Guide. +// +// * For information about copying objects using a single atomic action vs. +// the multipart upload, see Operations on Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html) +// in the Amazon S3 User Guide. +// +// * For information about using server-side encryption with customer-provided +// encryption keys with the UploadPartCopy operation, see CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) +// and UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html). +// +// Note the following additional considerations about the request headers x-amz-copy-source-if-match, +// x-amz-copy-source-if-none-match, x-amz-copy-source-if-unmodified-since, and +// x-amz-copy-source-if-modified-since: +// +// * Consideration 1 - If both of the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since +// headers are present in the request as follows: x-amz-copy-source-if-match +// condition evaluates to true, and; x-amz-copy-source-if-unmodified-since +// condition evaluates to false; Amazon S3 returns 200 OK and copies the +// data. +// +// * Consideration 2 - If both of the x-amz-copy-source-if-none-match and +// x-amz-copy-source-if-modified-since headers are present in the request +// as follows: x-amz-copy-source-if-none-match condition evaluates to false, +// and; x-amz-copy-source-if-modified-since condition evaluates to true; +// Amazon S3 returns 412 Precondition Failed response code. +// +// Versioning +// +// If your bucket has versioning enabled, you could have multiple versions of +// the same object. By default, x-amz-copy-source identifies the current version +// of the object to copy. If the current version is a delete marker and you +// don't specify a versionId in the x-amz-copy-source, Amazon S3 returns a 404 +// error, because the object does not exist. If you specify versionId in the +// x-amz-copy-source and the versionId is a delete marker, Amazon S3 returns +// an HTTP 400 error, because you are not allowed to specify a delete marker +// as a version for the x-amz-copy-source. +// +// You can optionally specify a specific version of the source object to copy +// by adding the versionId subresource as shown in the following example: +// +// x-amz-copy-source: /bucket/object?versionId=version id +// +// Special Errors +// +// * Code: NoSuchUpload Cause: The specified multipart upload does not exist. +// The upload ID might be invalid, or the multipart upload might have been +// aborted or completed. HTTP Status Code: 404 Not Found +// +// * Code: InvalidRequest Cause: The specified copy source is not supported +// as a byte-range copy source. HTTP Status Code: 400 Bad Request +// +// Related Resources +// +// * CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// +// * UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// +// * CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// +// * AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// +// * ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// * ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation UploadPartCopy for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy +func (c *S3) UploadPartCopy(input *UploadPartCopyInput) (*UploadPartCopyOutput, error) { + req, out := c.UploadPartCopyRequest(input) + return out, req.Send() +} + +// UploadPartCopyWithContext is the same as UploadPartCopy with the addition of +// the ability to pass a context and additional request options. +// +// See UploadPartCopy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) UploadPartCopyWithContext(ctx aws.Context, input *UploadPartCopyInput, opts ...request.Option) (*UploadPartCopyOutput, error) { + req, out := c.UploadPartCopyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +type AbortMultipartUploadInput struct { + _ struct{} `locationName:"AbortMultipartUploadRequest" type:"structure"` + + // The bucket name to which the upload was taking place. + // + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // When using this action with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this action using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Key of the object for which the multipart upload was initiated. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Upload ID that identifies the multipart upload. + // + // UploadId is a required field + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s AbortMultipartUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AbortMultipartUploadInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AbortMultipartUploadInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AbortMultipartUploadInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *AbortMultipartUploadInput) SetBucket(v string) *AbortMultipartUploadInput { + s.Bucket = &v + return s +} + +func (s *AbortMultipartUploadInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *AbortMultipartUploadInput) SetExpectedBucketOwner(v string) *AbortMultipartUploadInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *AbortMultipartUploadInput) SetKey(v string) *AbortMultipartUploadInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *AbortMultipartUploadInput) SetRequestPayer(v string) *AbortMultipartUploadInput { + s.RequestPayer = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *AbortMultipartUploadInput) SetUploadId(v string) *AbortMultipartUploadInput { + s.UploadId = &v + return s +} + +type AbortMultipartUploadOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s AbortMultipartUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AbortMultipartUploadOutput) GoString() string { + return s.String() +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *AbortMultipartUploadOutput) SetRequestCharged(v string) *AbortMultipartUploadOutput { + s.RequestCharged = &v + return s +} + +// Contains the elements that set the ACL permissions for an object per grantee. +type AccessControlPolicy struct { + _ struct{} `type:"structure"` + + // A list of grants. + Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + + // Container for the bucket owner's display name and ID. + Owner *Owner `type:"structure"` +} + +// String returns the string representation +func (s AccessControlPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccessControlPolicy) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AccessControlPolicy) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AccessControlPolicy"} + if s.Grants != nil { + for i, v := range s.Grants { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Grants", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGrants sets the Grants field's value. +func (s *AccessControlPolicy) SetGrants(v []*Grant) *AccessControlPolicy { + s.Grants = v + return s +} + +// SetOwner sets the Owner field's value. +func (s *AccessControlPolicy) SetOwner(v *Owner) *AccessControlPolicy { + s.Owner = v + return s +} + +type AddLegalHoldInput struct { + _ struct{} `locationName:"AddLegalHoldRequest" type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // RetentionLegalHoldId is a required field + RetentionLegalHoldId *string `location:"querystring" locationName:"add" type:"string" required:"true"` +} + +// String returns the string representation +func (s AddLegalHoldInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddLegalHoldInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddLegalHoldInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AddLegalHoldInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.RetentionLegalHoldId == nil { + invalidParams.Add(request.NewErrParamRequired("RetentionLegalHoldId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *AddLegalHoldInput) SetBucket(v string) *AddLegalHoldInput { + s.Bucket = &v + return s +} + +func (s *AddLegalHoldInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetKey sets the Key field's value. +func (s *AddLegalHoldInput) SetKey(v string) *AddLegalHoldInput { + s.Key = &v + return s +} + +// SetRetentionLegalHoldId sets the RetentionLegalHoldId field's value. +func (s *AddLegalHoldInput) SetRetentionLegalHoldId(v string) *AddLegalHoldInput { + s.RetentionLegalHoldId = &v + return s +} + +type AddLegalHoldOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AddLegalHoldOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddLegalHoldOutput) GoString() string { + return s.String() +} + +// In terms of implementation, a Bucket is a resource. An Amazon S3 bucket name +// is globally unique, and the namespace is shared by all AWS accounts. +type Bucket struct { + _ struct{} `type:"structure"` + + // Date the bucket was created. This date can change when making changes to + // your bucket, such as editing its bucket policy. + CreationDate *time.Time `type:"timestamp"` + + // The name of the bucket. + Name *string `type:"string"` +} + +// String returns the string representation +func (s Bucket) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Bucket) GoString() string { + return s.String() +} + +// SetCreationDate sets the CreationDate field's value. +func (s *Bucket) SetCreationDate(v time.Time) *Bucket { + s.CreationDate = &v + return s +} + +// SetName sets the Name field's value. +func (s *Bucket) SetName(v string) *Bucket { + s.Name = &v + return s +} + +type BucketExtended struct { + _ struct{} `type:"structure"` + + CreationDate *time.Time `type:"timestamp"` + + CreationTemplateId *string `type:"string"` + + // Specifies the region where the bucket was created. + LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` + + // The name of the bucket. + Name *string `type:"string"` +} + +// String returns the string representation +func (s BucketExtended) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BucketExtended) GoString() string { + return s.String() +} + +// SetCreationDate sets the CreationDate field's value. +func (s *BucketExtended) SetCreationDate(v time.Time) *BucketExtended { + s.CreationDate = &v + return s +} + +// SetCreationTemplateId sets the CreationTemplateId field's value. +func (s *BucketExtended) SetCreationTemplateId(v string) *BucketExtended { + s.CreationTemplateId = &v + return s +} + +// SetLocationConstraint sets the LocationConstraint field's value. +func (s *BucketExtended) SetLocationConstraint(v string) *BucketExtended { + s.LocationConstraint = &v + return s +} + +// SetName sets the Name field's value. +func (s *BucketExtended) SetName(v string) *BucketExtended { + s.Name = &v + return s +} + +// Container for logging status information. +type BucketLoggingStatus struct { + _ struct{} `type:"structure"` + + // Describes where logs are stored and the prefix that Amazon S3 assigns to + // all log object keys for a bucket. For more information, see PUT Bucket logging + // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) + // in the Amazon Simple Storage Service API Reference. + LoggingEnabled *LoggingEnabled `type:"structure"` +} + +// String returns the string representation +func (s BucketLoggingStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BucketLoggingStatus) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BucketLoggingStatus) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BucketLoggingStatus"} + if s.LoggingEnabled != nil { + if err := s.LoggingEnabled.Validate(); err != nil { + invalidParams.AddNested("LoggingEnabled", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLoggingEnabled sets the LoggingEnabled field's value. +func (s *BucketLoggingStatus) SetLoggingEnabled(v *LoggingEnabled) *BucketLoggingStatus { + s.LoggingEnabled = v + return s +} + +type BucketProtectionDefaultRetention struct { + _ struct{} `type:"structure"` + + // Days is a required field + Days *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s BucketProtectionDefaultRetention) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BucketProtectionDefaultRetention) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BucketProtectionDefaultRetention) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BucketProtectionDefaultRetention"} + if s.Days == nil { + invalidParams.Add(request.NewErrParamRequired("Days")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDays sets the Days field's value. +func (s *BucketProtectionDefaultRetention) SetDays(v int64) *BucketProtectionDefaultRetention { + s.Days = &v + return s +} + +type BucketProtectionMaximumRetention struct { + _ struct{} `type:"structure"` + + // Days is a required field + Days *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s BucketProtectionMaximumRetention) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BucketProtectionMaximumRetention) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BucketProtectionMaximumRetention) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BucketProtectionMaximumRetention"} + if s.Days == nil { + invalidParams.Add(request.NewErrParamRequired("Days")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDays sets the Days field's value. +func (s *BucketProtectionMaximumRetention) SetDays(v int64) *BucketProtectionMaximumRetention { + s.Days = &v + return s +} + +type BucketProtectionMinimumRetention struct { + _ struct{} `type:"structure"` + + // Days is a required field + Days *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s BucketProtectionMinimumRetention) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BucketProtectionMinimumRetention) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BucketProtectionMinimumRetention) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BucketProtectionMinimumRetention"} + if s.Days == nil { + invalidParams.Add(request.NewErrParamRequired("Days")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDays sets the Days field's value. +func (s *BucketProtectionMinimumRetention) SetDays(v int64) *BucketProtectionMinimumRetention { + s.Days = &v + return s +} + +// Describes the cross-origin access configuration for objects in an Amazon +// S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon +// S3 User Guide. +type CORSConfiguration struct { + _ struct{} `type:"structure"` + + // A set of origins and methods (cross-origin access that you want to allow). + // You can add up to 100 rules to the configuration. + // + // CORSRules is a required field + CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s CORSConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CORSConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CORSConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CORSConfiguration"} + if s.CORSRules == nil { + invalidParams.Add(request.NewErrParamRequired("CORSRules")) + } + if s.CORSRules != nil { + for i, v := range s.CORSRules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "CORSRules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCORSRules sets the CORSRules field's value. +func (s *CORSConfiguration) SetCORSRules(v []*CORSRule) *CORSConfiguration { + s.CORSRules = v + return s +} + +// Specifies a cross-origin access rule for an Amazon S3 bucket. +type CORSRule struct { + _ struct{} `type:"structure"` + + // Headers that are specified in the Access-Control-Request-Headers header. + // These headers are allowed in a preflight OPTIONS request. In response to + // any preflight OPTIONS request, Amazon S3 returns any requested headers that + // are allowed. + AllowedHeaders []*string `locationName:"AllowedHeader" type:"list" flattened:"true"` + + // An HTTP method that you allow the origin to execute. Valid values are GET, + // PUT, HEAD, POST, and DELETE. + // + // AllowedMethods is a required field + AllowedMethods []*string `locationName:"AllowedMethod" type:"list" flattened:"true" required:"true"` + + // One or more origins you want customers to be able to access the bucket from. + // + // AllowedOrigins is a required field + AllowedOrigins []*string `locationName:"AllowedOrigin" type:"list" flattened:"true" required:"true"` + + // One or more headers in the response that you want customers to be able to + // access from their applications (for example, from a JavaScript XMLHttpRequest + // object). + ExposeHeaders []*string `locationName:"ExposeHeader" type:"list" flattened:"true"` + + // The time in seconds that your browser is to cache the preflight response + // for the specified resource. + MaxAgeSeconds *int64 `type:"integer"` +} + +// String returns the string representation +func (s CORSRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CORSRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CORSRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CORSRule"} + if s.AllowedMethods == nil { + invalidParams.Add(request.NewErrParamRequired("AllowedMethods")) + } + if s.AllowedOrigins == nil { + invalidParams.Add(request.NewErrParamRequired("AllowedOrigins")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAllowedHeaders sets the AllowedHeaders field's value. +func (s *CORSRule) SetAllowedHeaders(v []*string) *CORSRule { + s.AllowedHeaders = v + return s +} + +// SetAllowedMethods sets the AllowedMethods field's value. +func (s *CORSRule) SetAllowedMethods(v []*string) *CORSRule { + s.AllowedMethods = v + return s +} + +// SetAllowedOrigins sets the AllowedOrigins field's value. +func (s *CORSRule) SetAllowedOrigins(v []*string) *CORSRule { + s.AllowedOrigins = v + return s +} + +// SetExposeHeaders sets the ExposeHeaders field's value. +func (s *CORSRule) SetExposeHeaders(v []*string) *CORSRule { + s.ExposeHeaders = v + return s +} + +// SetMaxAgeSeconds sets the MaxAgeSeconds field's value. +func (s *CORSRule) SetMaxAgeSeconds(v int64) *CORSRule { + s.MaxAgeSeconds = &v + return s +} + +// Container for all (if there are any) keys between Prefix and the next occurrence +// of the string specified by a delimiter. CommonPrefixes lists keys that act +// like subdirectories in the directory specified by Prefix. For example, if +// the prefix is notes/ and the delimiter is a slash (/) as in notes/summer/july, +// the common prefix is notes/summer/. +type CommonPrefix struct { + _ struct{} `type:"structure"` + + // Container for the specified common prefix. + Prefix *string `type:"string"` +} + +// String returns the string representation +func (s CommonPrefix) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CommonPrefix) GoString() string { + return s.String() +} + +// SetPrefix sets the Prefix field's value. +func (s *CommonPrefix) SetPrefix(v string) *CommonPrefix { + s.Prefix = &v + return s +} + +type CompleteMultipartUploadInput struct { + _ struct{} `locationName:"CompleteMultipartUploadRequest" type:"structure" payload:"MultipartUpload"` + + // Name of the bucket to which the multipart upload was initiated. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Object key for which the multipart upload was initiated. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // The container for the multipart upload request information. + MultipartUpload *CompletedMultipartUpload `locationName:"CompleteMultipartUpload" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Date on which it will be legal to delete or modify the object. This field + // can only be specified if Retention-Directive is REPLACE. You can only specify + // this or the Retention-Period header. If both are specified a 400 error will + // be returned. If neither is specified the bucket's DefaultRetention period + // will be used. + RetentionExpirationDate *time.Time `location:"header" locationName:"Retention-Expiration-Date" type:"timestamp"` + + // A single legal hold to apply to the object. This field can only be specified + // if Retention-Directive is REPLACE. A legal hold is a character long string + // of max length 64. The object cannot be overwritten or deleted until all legal + // holds associated with the object are removed. + RetentionLegalHoldId *string `location:"header" locationName:"Retention-Legal-Hold-ID" type:"string"` + + // Retention period to store on the object in seconds. If this field and Retention-Expiration-Date + // are specified a 400 error is returned. If neither is specified the bucket's + // DefaultRetention period will be used. 0 is a legal value assuming the bucket's + // minimum retention period is also 0. + RetentionPeriod *int64 `location:"header" locationName:"Retention-Period" type:"integer"` + + // ID for the initiated multipart upload. + // + // UploadId is a required field + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s CompleteMultipartUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteMultipartUploadInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CompleteMultipartUploadInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CompleteMultipartUploadInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *CompleteMultipartUploadInput) SetBucket(v string) *CompleteMultipartUploadInput { + s.Bucket = &v + return s +} + +func (s *CompleteMultipartUploadInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *CompleteMultipartUploadInput) SetExpectedBucketOwner(v string) *CompleteMultipartUploadInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *CompleteMultipartUploadInput) SetKey(v string) *CompleteMultipartUploadInput { + s.Key = &v + return s +} + +// SetMultipartUpload sets the MultipartUpload field's value. +func (s *CompleteMultipartUploadInput) SetMultipartUpload(v *CompletedMultipartUpload) *CompleteMultipartUploadInput { + s.MultipartUpload = v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *CompleteMultipartUploadInput) SetRequestPayer(v string) *CompleteMultipartUploadInput { + s.RequestPayer = &v + return s +} + +// SetRetentionExpirationDate sets the RetentionExpirationDate field's value. +func (s *CompleteMultipartUploadInput) SetRetentionExpirationDate(v time.Time) *CompleteMultipartUploadInput { + s.RetentionExpirationDate = &v + return s +} + +// SetRetentionLegalHoldId sets the RetentionLegalHoldId field's value. +func (s *CompleteMultipartUploadInput) SetRetentionLegalHoldId(v string) *CompleteMultipartUploadInput { + s.RetentionLegalHoldId = &v + return s +} + +// SetRetentionPeriod sets the RetentionPeriod field's value. +func (s *CompleteMultipartUploadInput) SetRetentionPeriod(v int64) *CompleteMultipartUploadInput { + s.RetentionPeriod = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *CompleteMultipartUploadInput) SetUploadId(v string) *CompleteMultipartUploadInput { + s.UploadId = &v + return s +} + +type CompleteMultipartUploadOutput struct { + _ struct{} `type:"structure"` + + // The name of the bucket that contains the newly created object. + // + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // When using this action with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this action using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + Bucket *string `type:"string"` + + // Entity tag that identifies the newly created object's data. Objects with + // different object data will have different entity tags. The entity tag is + // an opaque string. The entity tag may or may not be an MD5 digest of the object + // data. If the entity tag is not an MD5 digest of the object data, it will + // contain one or more nonhexadecimal characters and/or will consist of less + // than 32 or more than 32 hexadecimal digits. + ETag *string `type:"string"` + + // If the object expiration is configured, this will contain the expiration + // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // The object key of the newly created object. + Key *string `min:"1" type:"string"` + + // The URI that identifies the newly created object. + Location *string `type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If present, specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetric customer managed customer master key (CMK) that was used for the + // object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // If you specified server-side encryption either with an Amazon S3-managed + // encryption key or an AWS KMS customer master key (CMK) in your initiate multipart + // upload request, the response includes this header. It confirms the encryption + // algorithm that Amazon S3 used to encrypt the object. + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Version ID of the newly created object, in case the bucket has versioning + // turned on. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s CompleteMultipartUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteMultipartUploadOutput) GoString() string { + return s.String() +} + +// SetBucket sets the Bucket field's value. +func (s *CompleteMultipartUploadOutput) SetBucket(v string) *CompleteMultipartUploadOutput { + s.Bucket = &v + return s +} + +func (s *CompleteMultipartUploadOutput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetETag sets the ETag field's value. +func (s *CompleteMultipartUploadOutput) SetETag(v string) *CompleteMultipartUploadOutput { + s.ETag = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *CompleteMultipartUploadOutput) SetExpiration(v string) *CompleteMultipartUploadOutput { + s.Expiration = &v + return s +} + +// SetKey sets the Key field's value. +func (s *CompleteMultipartUploadOutput) SetKey(v string) *CompleteMultipartUploadOutput { + s.Key = &v + return s +} + +// SetLocation sets the Location field's value. +func (s *CompleteMultipartUploadOutput) SetLocation(v string) *CompleteMultipartUploadOutput { + s.Location = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *CompleteMultipartUploadOutput) SetRequestCharged(v string) *CompleteMultipartUploadOutput { + s.RequestCharged = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *CompleteMultipartUploadOutput) SetSSEKMSKeyId(v string) *CompleteMultipartUploadOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *CompleteMultipartUploadOutput) SetServerSideEncryption(v string) *CompleteMultipartUploadOutput { + s.ServerSideEncryption = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *CompleteMultipartUploadOutput) SetVersionId(v string) *CompleteMultipartUploadOutput { + s.VersionId = &v + return s +} + +// The container for the completed multipart upload details. +type CompletedMultipartUpload struct { + _ struct{} `type:"structure"` + + // Array of CompletedPart data types. + Parts []*CompletedPart `locationName:"Part" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s CompletedMultipartUpload) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompletedMultipartUpload) GoString() string { + return s.String() +} + +// SetParts sets the Parts field's value. +func (s *CompletedMultipartUpload) SetParts(v []*CompletedPart) *CompletedMultipartUpload { + s.Parts = v + return s +} + +// Details of the parts that were uploaded. +type CompletedPart struct { + _ struct{} `type:"structure"` + + // Entity tag returned when the part was uploaded. + ETag *string `type:"string"` + + // Part number that identifies the part. This is a positive integer between + // 1 and 10,000. + PartNumber *int64 `type:"integer"` +} + +// String returns the string representation +func (s CompletedPart) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompletedPart) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *CompletedPart) SetETag(v string) *CompletedPart { + s.ETag = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *CompletedPart) SetPartNumber(v int64) *CompletedPart { + s.PartNumber = &v + return s +} + +// A container for describing a condition that must be met for the specified +// redirect to apply. For example, 1. If request is for pages in the /docs folder, +// redirect to the /documents folder. 2. If request results in HTTP error 4xx, +// redirect request to another host where you might process the error. +type Condition struct { + _ struct{} `type:"structure"` + + // The HTTP error code when the redirect is applied. In the event of an error, + // if the error code equals this value, then the specified redirect is applied. + // Required when parent element Condition is specified and sibling KeyPrefixEquals + // is not specified. If both are specified, then both must be true for the redirect + // to be applied. + HttpErrorCodeReturnedEquals *string `type:"string"` + + // The object key name prefix when the redirect is applied. For example, to + // redirect requests for ExamplePage.html, the key prefix will be ExamplePage.html. + // To redirect request for all pages with the prefix docs/, the key prefix will + // be /docs, which identifies all objects in the docs/ folder. Required when + // the parent element Condition is specified and sibling HttpErrorCodeReturnedEquals + // is not specified. If both conditions are specified, both must be true for + // the redirect to be applied. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + KeyPrefixEquals *string `type:"string"` +} + +// String returns the string representation +func (s Condition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Condition) GoString() string { + return s.String() +} + +// SetHttpErrorCodeReturnedEquals sets the HttpErrorCodeReturnedEquals field's value. +func (s *Condition) SetHttpErrorCodeReturnedEquals(v string) *Condition { + s.HttpErrorCodeReturnedEquals = &v + return s +} + +// SetKeyPrefixEquals sets the KeyPrefixEquals field's value. +func (s *Condition) SetKeyPrefixEquals(v string) *Condition { + s.KeyPrefixEquals = &v + return s +} + +type CopyObjectInput struct { + _ struct{} `locationName:"CopyObjectRequest" type:"structure"` + + // The canned ACL to apply to the object. + // + // This action is not supported by Amazon S3 on Outposts. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + // The name of the destination bucket. + // + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // When using this action with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this action using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // Specifies the source object for the copy operation. You specify the value + // in one of two formats, depending on whether you want to access the source + // object through an access point (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html): + // + // * For objects not accessed through an access point, specify the name of + // the source bucket and the key of the source object, separated by a slash + // (/). For example, to copy the object reports/january.pdf from the bucket + // awsexamplebucket, use awsexamplebucket/reports/january.pdf. The value + // must be URL encoded. + // + // * For objects accessed through access points, specify the Amazon Resource + // Name (ARN) of the object as accessed through the access point, in the + // format arn:aws:s3:::accesspoint//object/. + // For example, to copy the object reports/january.pdf through access point + // my-access-point owned by account 123456789012 in Region us-west-2, use + // the URL encoding of arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf. + // The value must be URL encoded. Amazon S3 supports copy operations using + // access points only when the source and destination buckets are in the + // same AWS Region. Alternatively, for objects accessed through Amazon S3 + // on Outposts, specify the ARN of the object as accessed in the format arn:aws:s3-outposts:::outpost//object/. + // For example, to copy the object reports/january.pdf through outpost my-outpost + // owned by account 123456789012 in Region us-west-2, use the URL encoding + // of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf. + // The value must be URL encoded. + // + // To copy a specific version of an object, append ?versionId= to + // the value (for example, awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893). + // If you don't specify a version ID, Amazon S3 copies the latest version of + // the source object. + // + // CopySource is a required field + CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"` + + // Copies the object if its entity tag (ETag) matches the specified tag. + CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"` + + // Copies the object if it has been modified since the specified time. + CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp"` + + // Copies the object if its entity tag (ETag) is different than the specified + // ETag. + CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"` + + // Copies the object if it hasn't been modified since the specified time. + CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp"` + + // Specifies the algorithm to use when decrypting the source object (for example, + // AES256). + CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt + // the source object. The encryption key provided in this header must be one + // that was used when the source object was created. + CopySourceSSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + // + // This action is not supported by Amazon S3 on Outposts. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + // + // This action is not supported by Amazon S3 on Outposts. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + // + // This action is not supported by Amazon S3 on Outposts. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + // + // This action is not supported by Amazon S3 on Outposts. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // The key of the destination object. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Specifies whether the metadata is copied from the source object or replaced + // with metadata provided in the request. + MetadataDirective *string `location:"header" locationName:"x-amz-metadata-directive" type:"string" enum:"MetadataDirective"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // This header controls how the Protection state of the source object is copied + // to the destination object.If copied, the retention period and all legal holds + // are copied onto the new object. The legal hold date's is set to the date + // of the copy. + RetentionDirective *string `location:"header" locationName:"Retention-Directive" type:"string" enum:"RetentionDirective"` + + // Date on which it will be legal to delete or modify the object. This field + // can only be specified if Retention-Directive is REPLACE. You can only specify + // this or the Retention-Period header. If both are specified a 400 error will + // be returned. If neither is specified the bucket's DefaultRetention period + // will be used. + RetentionExpirationDate *time.Time `location:"header" locationName:"Retention-Expiration-Date" type:"timestamp"` + + // A single legal hold to apply to the object. This field can only be specified + // if Retention-Directive is REPLACE. A legal hold is a character long string + // of max length 64. The object cannot be overwritten or deleted until all legal + // holds associated with the object are removed. + RetentionLegalHoldId *string `location:"header" locationName:"Retention-Legal-Hold-ID" type:"string"` + + // Retention period to store on the object in seconds. The object can be neither + // overwritten nor deleted until the amount of time specified in the retention + // period has elapsed. If this field and Retention-Expiration-Date are specified + // a 400 error is returned. If neither is specified the bucket's DefaultRetention + // period will be used. 0 is a legal value assuming the bucket's minimum retention + // period is also 0. + RetentionPeriod *int64 `location:"header" locationName:"Retention-Period" type:"integer"` + + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm + // header. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT + // requests for an object protected by AWS KMS will fail if not made via SSL + // or using SigV4. For information about configuring using any of the officially + // supported AWS SDKs and AWS CLI, see Specifying the Signature Version in Request + // Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) + // in the Amazon S3 Developer Guide. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // By default, Amazon S3 uses the STANDARD Storage Class to store newly created + // objects. The STANDARD storage class provides high durability and high availability. + // Depending on performance needs, you can specify a different Storage Class. + // Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, + // see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) + // in the Amazon S3 Service Developer Guide. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // The tag-set for the object destination object this value must be used in + // conjunction with the TaggingDirective. The tag-set must be encoded as URL + // Query parameters. + Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` + + // Specifies whether the object tag-set are copied from the source object or + // replaced with tag-set provided in the request. + TaggingDirective *string `location:"header" locationName:"x-amz-tagging-directive" type:"string" enum:"TaggingDirective"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s CopyObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CopyObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CopyObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.CopySource == nil { + invalidParams.Add(request.NewErrParamRequired("CopySource")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetACL sets the ACL field's value. +func (s *CopyObjectInput) SetACL(v string) *CopyObjectInput { + s.ACL = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *CopyObjectInput) SetBucket(v string) *CopyObjectInput { + s.Bucket = &v + return s +} + +func (s *CopyObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetCacheControl sets the CacheControl field's value. +func (s *CopyObjectInput) SetCacheControl(v string) *CopyObjectInput { + s.CacheControl = &v + return s +} + +// SetContentDisposition sets the ContentDisposition field's value. +func (s *CopyObjectInput) SetContentDisposition(v string) *CopyObjectInput { + s.ContentDisposition = &v + return s +} + +// SetContentEncoding sets the ContentEncoding field's value. +func (s *CopyObjectInput) SetContentEncoding(v string) *CopyObjectInput { + s.ContentEncoding = &v + return s +} + +// SetContentLanguage sets the ContentLanguage field's value. +func (s *CopyObjectInput) SetContentLanguage(v string) *CopyObjectInput { + s.ContentLanguage = &v + return s +} + +// SetContentType sets the ContentType field's value. +func (s *CopyObjectInput) SetContentType(v string) *CopyObjectInput { + s.ContentType = &v + return s +} + +// SetCopySource sets the CopySource field's value. +func (s *CopyObjectInput) SetCopySource(v string) *CopyObjectInput { + s.CopySource = &v + return s +} + +// SetCopySourceIfMatch sets the CopySourceIfMatch field's value. +func (s *CopyObjectInput) SetCopySourceIfMatch(v string) *CopyObjectInput { + s.CopySourceIfMatch = &v + return s +} + +// SetCopySourceIfModifiedSince sets the CopySourceIfModifiedSince field's value. +func (s *CopyObjectInput) SetCopySourceIfModifiedSince(v time.Time) *CopyObjectInput { + s.CopySourceIfModifiedSince = &v + return s +} + +// SetCopySourceIfNoneMatch sets the CopySourceIfNoneMatch field's value. +func (s *CopyObjectInput) SetCopySourceIfNoneMatch(v string) *CopyObjectInput { + s.CopySourceIfNoneMatch = &v + return s +} + +// SetCopySourceIfUnmodifiedSince sets the CopySourceIfUnmodifiedSince field's value. +func (s *CopyObjectInput) SetCopySourceIfUnmodifiedSince(v time.Time) *CopyObjectInput { + s.CopySourceIfUnmodifiedSince = &v + return s +} + +// SetCopySourceSSECustomerAlgorithm sets the CopySourceSSECustomerAlgorithm field's value. +func (s *CopyObjectInput) SetCopySourceSSECustomerAlgorithm(v string) *CopyObjectInput { + s.CopySourceSSECustomerAlgorithm = &v + return s +} + +// SetCopySourceSSECustomerKey sets the CopySourceSSECustomerKey field's value. +func (s *CopyObjectInput) SetCopySourceSSECustomerKey(v string) *CopyObjectInput { + s.CopySourceSSECustomerKey = &v + return s +} + +func (s *CopyObjectInput) getCopySourceSSECustomerKey() (v string) { + if s.CopySourceSSECustomerKey == nil { + return v + } + return *s.CopySourceSSECustomerKey +} + +// SetCopySourceSSECustomerKeyMD5 sets the CopySourceSSECustomerKeyMD5 field's value. +func (s *CopyObjectInput) SetCopySourceSSECustomerKeyMD5(v string) *CopyObjectInput { + s.CopySourceSSECustomerKeyMD5 = &v + return s +} + +// SetExpires sets the Expires field's value. +func (s *CopyObjectInput) SetExpires(v time.Time) *CopyObjectInput { + s.Expires = &v + return s +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *CopyObjectInput) SetGrantFullControl(v string) *CopyObjectInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *CopyObjectInput) SetGrantRead(v string) *CopyObjectInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *CopyObjectInput) SetGrantReadACP(v string) *CopyObjectInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *CopyObjectInput) SetGrantWriteACP(v string) *CopyObjectInput { + s.GrantWriteACP = &v + return s +} + +// SetKey sets the Key field's value. +func (s *CopyObjectInput) SetKey(v string) *CopyObjectInput { + s.Key = &v + return s +} + +// SetMetadata sets the Metadata field's value. +func (s *CopyObjectInput) SetMetadata(v map[string]*string) *CopyObjectInput { + s.Metadata = v + return s +} + +// SetMetadataDirective sets the MetadataDirective field's value. +func (s *CopyObjectInput) SetMetadataDirective(v string) *CopyObjectInput { + s.MetadataDirective = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *CopyObjectInput) SetRequestPayer(v string) *CopyObjectInput { + s.RequestPayer = &v + return s +} + +// SetRetentionDirective sets the RetentionDirective field's value. +func (s *CopyObjectInput) SetRetentionDirective(v string) *CopyObjectInput { + s.RetentionDirective = &v + return s +} + +// SetRetentionExpirationDate sets the RetentionExpirationDate field's value. +func (s *CopyObjectInput) SetRetentionExpirationDate(v time.Time) *CopyObjectInput { + s.RetentionExpirationDate = &v + return s +} + +// SetRetentionLegalHoldId sets the RetentionLegalHoldId field's value. +func (s *CopyObjectInput) SetRetentionLegalHoldId(v string) *CopyObjectInput { + s.RetentionLegalHoldId = &v + return s +} + +// SetRetentionPeriod sets the RetentionPeriod field's value. +func (s *CopyObjectInput) SetRetentionPeriod(v int64) *CopyObjectInput { + s.RetentionPeriod = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *CopyObjectInput) SetSSECustomerAlgorithm(v string) *CopyObjectInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *CopyObjectInput) SetSSECustomerKey(v string) *CopyObjectInput { + s.SSECustomerKey = &v + return s +} + +func (s *CopyObjectInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *CopyObjectInput) SetSSECustomerKeyMD5(v string) *CopyObjectInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *CopyObjectInput) SetSSEKMSKeyId(v string) *CopyObjectInput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *CopyObjectInput) SetServerSideEncryption(v string) *CopyObjectInput { + s.ServerSideEncryption = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *CopyObjectInput) SetStorageClass(v string) *CopyObjectInput { + s.StorageClass = &v + return s +} + +// SetTagging sets the Tagging field's value. +func (s *CopyObjectInput) SetTagging(v string) *CopyObjectInput { + s.Tagging = &v + return s +} + +// SetTaggingDirective sets the TaggingDirective field's value. +func (s *CopyObjectInput) SetTaggingDirective(v string) *CopyObjectInput { + s.TaggingDirective = &v + return s +} + +// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. +func (s *CopyObjectInput) SetWebsiteRedirectLocation(v string) *CopyObjectInput { + s.WebsiteRedirectLocation = &v + return s +} + +type CopyObjectOutput struct { + _ struct{} `type:"structure" payload:"CopyObjectResult"` + + // Container for all response elements. + CopyObjectResult *CopyObjectResult `type:"structure"` + + // Version of the copied object in the destination bucket. + CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"` + + // If the object expiration is configured, the response includes this header. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round-trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetric customer managed customer master key (CMK) that was used for the + // object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Version ID of the newly created copy. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s CopyObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyObjectOutput) GoString() string { + return s.String() +} + +// SetCopyObjectResult sets the CopyObjectResult field's value. +func (s *CopyObjectOutput) SetCopyObjectResult(v *CopyObjectResult) *CopyObjectOutput { + s.CopyObjectResult = v + return s +} + +// SetCopySourceVersionId sets the CopySourceVersionId field's value. +func (s *CopyObjectOutput) SetCopySourceVersionId(v string) *CopyObjectOutput { + s.CopySourceVersionId = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *CopyObjectOutput) SetExpiration(v string) *CopyObjectOutput { + s.Expiration = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *CopyObjectOutput) SetRequestCharged(v string) *CopyObjectOutput { + s.RequestCharged = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *CopyObjectOutput) SetSSECustomerAlgorithm(v string) *CopyObjectOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *CopyObjectOutput) SetSSECustomerKeyMD5(v string) *CopyObjectOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *CopyObjectOutput) SetSSEKMSKeyId(v string) *CopyObjectOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *CopyObjectOutput) SetServerSideEncryption(v string) *CopyObjectOutput { + s.ServerSideEncryption = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *CopyObjectOutput) SetVersionId(v string) *CopyObjectOutput { + s.VersionId = &v + return s +} + +// Container for all response elements. +type CopyObjectResult struct { + _ struct{} `type:"structure"` + + // Returns the ETag of the new object. The ETag reflects only changes to the + // contents of an object, not its metadata. The source and destination ETag + // is identical for a successfully copied non-multipart object. + ETag *string `type:"string"` + + // Creation date of the object. + LastModified *time.Time `type:"timestamp"` +} + +// String returns the string representation +func (s CopyObjectResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyObjectResult) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *CopyObjectResult) SetETag(v string) *CopyObjectResult { + s.ETag = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *CopyObjectResult) SetLastModified(v time.Time) *CopyObjectResult { + s.LastModified = &v + return s +} + +// Container for all response elements. +type CopyPartResult struct { + _ struct{} `type:"structure"` + + // Entity tag of the object. + ETag *string `type:"string"` + + // Date and time at which the object was uploaded. + LastModified *time.Time `type:"timestamp"` +} + +// String returns the string representation +func (s CopyPartResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyPartResult) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *CopyPartResult) SetETag(v string) *CopyPartResult { + s.ETag = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *CopyPartResult) SetLastModified(v time.Time) *CopyPartResult { + s.LastModified = &v + return s +} + +// The configuration information for the bucket. +type CreateBucketConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies the Region where the bucket will be created. If you don't specify + // a Region, the bucket is created in the US East (N. Virginia) Region (us-east-1). + LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` +} + +// String returns the string representation +func (s CreateBucketConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBucketConfiguration) GoString() string { + return s.String() +} + +// SetLocationConstraint sets the LocationConstraint field's value. +func (s *CreateBucketConfiguration) SetLocationConstraint(v string) *CreateBucketConfiguration { + s.LocationConstraint = &v + return s +} + +type CreateBucketInput struct { + _ struct{} `locationName:"CreateBucketRequest" type:"structure" payload:"CreateBucketConfiguration"` + + // The canned ACL to apply to the bucket. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` + + // The name of the bucket to create. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The configuration information for the bucket. + CreateBucketConfiguration *CreateBucketConfiguration `locationName:"CreateBucketConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to list the objects in the bucket. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the bucket ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to create, overwrite, and delete any object in the bucket. + GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` + + // Allows grantee to write the ACL for the applicable bucket. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // The root key used by Key Protect to encrypt this bucket. This value must + // be the full CRN of the root key. + IBMSSEKPCustomerRootKeyCrn *string `location:"header" locationName:"ibm-sse-kp-customer-root-key-crn" type:"string"` + + // The algorithm and key size to use with the encryption key stored by using + // Key Protect. This value must be set to the string "AES256". + IBMSSEKPEncryptionAlgorithm *string `location:"header" locationName:"ibm-sse-kp-encryption-algorithm" type:"string"` + + // Sets the IBM Service Instance Id in the request. + // + // Only Valid for IBM IAM Authentication + IBMServiceInstanceId *string `location:"header" locationName:"ibm-service-instance-id" type:"string"` +} + +// String returns the string representation +func (s CreateBucketInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBucketInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateBucketInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateBucketInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetACL sets the ACL field's value. +func (s *CreateBucketInput) SetACL(v string) *CreateBucketInput { + s.ACL = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *CreateBucketInput) SetBucket(v string) *CreateBucketInput { + s.Bucket = &v + return s +} + +func (s *CreateBucketInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetCreateBucketConfiguration sets the CreateBucketConfiguration field's value. +func (s *CreateBucketInput) SetCreateBucketConfiguration(v *CreateBucketConfiguration) *CreateBucketInput { + s.CreateBucketConfiguration = v + return s +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *CreateBucketInput) SetGrantFullControl(v string) *CreateBucketInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *CreateBucketInput) SetGrantRead(v string) *CreateBucketInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *CreateBucketInput) SetGrantReadACP(v string) *CreateBucketInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWrite sets the GrantWrite field's value. +func (s *CreateBucketInput) SetGrantWrite(v string) *CreateBucketInput { + s.GrantWrite = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *CreateBucketInput) SetGrantWriteACP(v string) *CreateBucketInput { + s.GrantWriteACP = &v + return s +} + +// SetIBMSSEKPCustomerRootKeyCrn sets the IBMSSEKPCustomerRootKeyCrn field's value. +func (s *CreateBucketInput) SetIBMSSEKPCustomerRootKeyCrn(v string) *CreateBucketInput { + s.IBMSSEKPCustomerRootKeyCrn = &v + return s +} + +// SetIBMSSEKPEncryptionAlgorithm sets the IBMSSEKPEncryptionAlgorithm field's value. +func (s *CreateBucketInput) SetIBMSSEKPEncryptionAlgorithm(v string) *CreateBucketInput { + s.IBMSSEKPEncryptionAlgorithm = &v + return s +} + +// SetIBMServiceInstanceId sets the IBMServiceInstanceId field's value. +func (s *CreateBucketInput) SetIBMServiceInstanceId(v string) *CreateBucketInput { + s.IBMServiceInstanceId = &v + return s +} + +type CreateBucketOutput struct { + _ struct{} `type:"structure"` + + // Specifies the Region where the bucket will be created. If you are creating + // a bucket on the US East (N. Virginia) Region (us-east-1), you do not need + // to specify the location. + Location *string `location:"header" locationName:"Location" type:"string"` +} + +// String returns the string representation +func (s CreateBucketOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBucketOutput) GoString() string { + return s.String() +} + +// SetLocation sets the Location field's value. +func (s *CreateBucketOutput) SetLocation(v string) *CreateBucketOutput { + s.Location = &v + return s +} + +type CreateMultipartUploadInput struct { + _ struct{} `locationName:"CreateMultipartUploadRequest" type:"structure"` + + // The canned ACL to apply to the object. + // + // This action is not supported by Amazon S3 on Outposts. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + // The name of the bucket to which to initiate the upload + // + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // When using this action with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this action using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + // + // This action is not supported by Amazon S3 on Outposts. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + // + // This action is not supported by Amazon S3 on Outposts. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + // + // This action is not supported by Amazon S3 on Outposts. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + // + // This action is not supported by Amazon S3 on Outposts. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // Object key for which the multipart upload is to be initiated. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm + // header. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the ID of the symmetric customer managed AWS KMS CMK to use for + // object encryption. All GET and PUT requests for an object protected by AWS + // KMS will fail if not made via SSL or using SigV4. For information about configuring + // using any of the officially supported AWS SDKs and AWS CLI, see Specifying + // the Signature Version in Request Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) + // in the Amazon S3 Developer Guide. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // By default, Amazon S3 uses the STANDARD Storage Class to store newly created + // objects. The STANDARD storage class provides high durability and high availability. + // Depending on performance needs, you can specify a different Storage Class. + // Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, + // see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) + // in the Amazon S3 Service Developer Guide. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // The tag-set for the object. The tag-set must be encoded as URL Query parameters. + Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s CreateMultipartUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateMultipartUploadInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateMultipartUploadInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateMultipartUploadInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetACL sets the ACL field's value. +func (s *CreateMultipartUploadInput) SetACL(v string) *CreateMultipartUploadInput { + s.ACL = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *CreateMultipartUploadInput) SetBucket(v string) *CreateMultipartUploadInput { + s.Bucket = &v + return s +} + +func (s *CreateMultipartUploadInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetCacheControl sets the CacheControl field's value. +func (s *CreateMultipartUploadInput) SetCacheControl(v string) *CreateMultipartUploadInput { + s.CacheControl = &v + return s +} + +// SetContentDisposition sets the ContentDisposition field's value. +func (s *CreateMultipartUploadInput) SetContentDisposition(v string) *CreateMultipartUploadInput { + s.ContentDisposition = &v + return s +} + +// SetContentEncoding sets the ContentEncoding field's value. +func (s *CreateMultipartUploadInput) SetContentEncoding(v string) *CreateMultipartUploadInput { + s.ContentEncoding = &v + return s +} + +// SetContentLanguage sets the ContentLanguage field's value. +func (s *CreateMultipartUploadInput) SetContentLanguage(v string) *CreateMultipartUploadInput { + s.ContentLanguage = &v + return s +} + +// SetContentType sets the ContentType field's value. +func (s *CreateMultipartUploadInput) SetContentType(v string) *CreateMultipartUploadInput { + s.ContentType = &v + return s +} + +// SetExpires sets the Expires field's value. +func (s *CreateMultipartUploadInput) SetExpires(v time.Time) *CreateMultipartUploadInput { + s.Expires = &v + return s +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *CreateMultipartUploadInput) SetGrantFullControl(v string) *CreateMultipartUploadInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *CreateMultipartUploadInput) SetGrantRead(v string) *CreateMultipartUploadInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *CreateMultipartUploadInput) SetGrantReadACP(v string) *CreateMultipartUploadInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *CreateMultipartUploadInput) SetGrantWriteACP(v string) *CreateMultipartUploadInput { + s.GrantWriteACP = &v + return s +} + +// SetKey sets the Key field's value. +func (s *CreateMultipartUploadInput) SetKey(v string) *CreateMultipartUploadInput { + s.Key = &v + return s +} + +// SetMetadata sets the Metadata field's value. +func (s *CreateMultipartUploadInput) SetMetadata(v map[string]*string) *CreateMultipartUploadInput { + s.Metadata = v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *CreateMultipartUploadInput) SetRequestPayer(v string) *CreateMultipartUploadInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *CreateMultipartUploadInput) SetSSECustomerAlgorithm(v string) *CreateMultipartUploadInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *CreateMultipartUploadInput) SetSSECustomerKey(v string) *CreateMultipartUploadInput { + s.SSECustomerKey = &v + return s +} + +func (s *CreateMultipartUploadInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *CreateMultipartUploadInput) SetSSECustomerKeyMD5(v string) *CreateMultipartUploadInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *CreateMultipartUploadInput) SetSSEKMSKeyId(v string) *CreateMultipartUploadInput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *CreateMultipartUploadInput) SetServerSideEncryption(v string) *CreateMultipartUploadInput { + s.ServerSideEncryption = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *CreateMultipartUploadInput) SetStorageClass(v string) *CreateMultipartUploadInput { + s.StorageClass = &v + return s +} + +// SetTagging sets the Tagging field's value. +func (s *CreateMultipartUploadInput) SetTagging(v string) *CreateMultipartUploadInput { + s.Tagging = &v + return s +} + +// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. +func (s *CreateMultipartUploadInput) SetWebsiteRedirectLocation(v string) *CreateMultipartUploadInput { + s.WebsiteRedirectLocation = &v + return s +} + +type CreateMultipartUploadOutput struct { + _ struct{} `type:"structure"` + + // If the bucket has a lifecycle rule configured with an action to abort incomplete + // multipart uploads and the prefix in the lifecycle rule matches the object + // name in the request, the response includes this header. The header indicates + // when the initiated multipart upload becomes eligible for an abort operation. + // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket + // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). + // + // The response also includes the x-amz-abort-rule-id header that provides the + // ID of the lifecycle configuration rule that defines this action. + AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp"` + + // This header is returned along with the x-amz-abort-date header. It identifies + // the applicable lifecycle configuration rule that defines the action to abort + // incomplete multipart uploads. + AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"` + + // The name of the bucket to which the multipart upload was initiated. + // + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // When using this action with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this action using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + Bucket *string `locationName:"Bucket" type:"string"` + + // Object key for which the multipart upload was initiated. + Key *string `min:"1" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round-trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetric customer managed customer master key (CMK) that was used for the + // object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // ID for the initiated multipart upload. + UploadId *string `type:"string"` +} + +// String returns the string representation +func (s CreateMultipartUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateMultipartUploadOutput) GoString() string { + return s.String() +} + +// SetAbortDate sets the AbortDate field's value. +func (s *CreateMultipartUploadOutput) SetAbortDate(v time.Time) *CreateMultipartUploadOutput { + s.AbortDate = &v + return s +} + +// SetAbortRuleId sets the AbortRuleId field's value. +func (s *CreateMultipartUploadOutput) SetAbortRuleId(v string) *CreateMultipartUploadOutput { + s.AbortRuleId = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *CreateMultipartUploadOutput) SetBucket(v string) *CreateMultipartUploadOutput { + s.Bucket = &v + return s +} + +func (s *CreateMultipartUploadOutput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetKey sets the Key field's value. +func (s *CreateMultipartUploadOutput) SetKey(v string) *CreateMultipartUploadOutput { + s.Key = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *CreateMultipartUploadOutput) SetRequestCharged(v string) *CreateMultipartUploadOutput { + s.RequestCharged = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *CreateMultipartUploadOutput) SetSSECustomerAlgorithm(v string) *CreateMultipartUploadOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *CreateMultipartUploadOutput) SetSSECustomerKeyMD5(v string) *CreateMultipartUploadOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *CreateMultipartUploadOutput) SetSSEKMSKeyId(v string) *CreateMultipartUploadOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *CreateMultipartUploadOutput) SetServerSideEncryption(v string) *CreateMultipartUploadOutput { + s.ServerSideEncryption = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *CreateMultipartUploadOutput) SetUploadId(v string) *CreateMultipartUploadOutput { + s.UploadId = &v + return s +} + +// Container for the objects to delete. +type Delete struct { + _ struct{} `type:"structure"` + + // The objects to delete. + // + // Objects is a required field + Objects []*ObjectIdentifier `locationName:"Object" type:"list" flattened:"true" required:"true"` + + // Element to enable quiet mode for the request. When you add this element, + // you must set its value to true. + Quiet *bool `type:"boolean"` +} + +// String returns the string representation +func (s Delete) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Delete) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Delete) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Delete"} + if s.Objects == nil { + invalidParams.Add(request.NewErrParamRequired("Objects")) + } + if s.Objects != nil { + for i, v := range s.Objects { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Objects", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetObjects sets the Objects field's value. +func (s *Delete) SetObjects(v []*ObjectIdentifier) *Delete { + s.Objects = v + return s +} + +// SetQuiet sets the Quiet field's value. +func (s *Delete) SetQuiet(v bool) *Delete { + s.Quiet = &v + return s +} + +type DeleteBucketCorsInput struct { + _ struct{} `locationName:"DeleteBucketCorsRequest" type:"structure"` + + // Specifies the bucket whose cors configuration is being deleted. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s DeleteBucketCorsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketCorsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketCorsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketCorsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketCorsInput) SetBucket(v string) *DeleteBucketCorsInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketCorsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketCorsInput) SetExpectedBucketOwner(v string) *DeleteBucketCorsInput { + s.ExpectedBucketOwner = &v + return s +} + +type DeleteBucketCorsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketCorsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketCorsOutput) GoString() string { + return s.String() +} + +type DeleteBucketInput struct { + _ struct{} `locationName:"DeleteBucketRequest" type:"structure"` + + // Specifies the bucket being deleted. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s DeleteBucketInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketInput) SetBucket(v string) *DeleteBucketInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketInput) SetExpectedBucketOwner(v string) *DeleteBucketInput { + s.ExpectedBucketOwner = &v + return s +} + +type DeleteBucketLifecycleInput struct { + _ struct{} `locationName:"DeleteBucketLifecycleRequest" type:"structure"` + + // The bucket name of the lifecycle to delete. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s DeleteBucketLifecycleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketLifecycleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketLifecycleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketLifecycleInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketLifecycleInput) SetBucket(v string) *DeleteBucketLifecycleInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketLifecycleInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketLifecycleInput) SetExpectedBucketOwner(v string) *DeleteBucketLifecycleInput { + s.ExpectedBucketOwner = &v + return s +} + +type DeleteBucketLifecycleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketLifecycleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketLifecycleOutput) GoString() string { + return s.String() +} + +type DeleteBucketOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketOutput) GoString() string { + return s.String() +} + +type DeleteBucketWebsiteInput struct { + _ struct{} `locationName:"DeleteBucketWebsiteRequest" type:"structure"` + + // The bucket name for which you want to remove the website configuration. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s DeleteBucketWebsiteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketWebsiteInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketWebsiteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketWebsiteInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketWebsiteInput) SetBucket(v string) *DeleteBucketWebsiteInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketWebsiteInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketWebsiteInput) SetExpectedBucketOwner(v string) *DeleteBucketWebsiteInput { + s.ExpectedBucketOwner = &v + return s +} + +type DeleteBucketWebsiteOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketWebsiteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketWebsiteOutput) GoString() string { + return s.String() +} + +type DeleteLegalHoldInput struct { + _ struct{} `locationName:"DeleteLegalHoldRequest" type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // RetentionLegalHoldId is a required field + RetentionLegalHoldId *string `location:"querystring" locationName:"remove" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteLegalHoldInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLegalHoldInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteLegalHoldInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteLegalHoldInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.RetentionLegalHoldId == nil { + invalidParams.Add(request.NewErrParamRequired("RetentionLegalHoldId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteLegalHoldInput) SetBucket(v string) *DeleteLegalHoldInput { + s.Bucket = &v + return s +} + +func (s *DeleteLegalHoldInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetKey sets the Key field's value. +func (s *DeleteLegalHoldInput) SetKey(v string) *DeleteLegalHoldInput { + s.Key = &v + return s +} + +// SetRetentionLegalHoldId sets the RetentionLegalHoldId field's value. +func (s *DeleteLegalHoldInput) SetRetentionLegalHoldId(v string) *DeleteLegalHoldInput { + s.RetentionLegalHoldId = &v + return s +} + +type DeleteLegalHoldOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteLegalHoldOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLegalHoldOutput) GoString() string { + return s.String() +} + +// Information about the delete marker. +type DeleteMarkerEntry struct { + _ struct{} `type:"structure"` + + // Specifies whether the object is (true) or is not (false) the latest version + // of an object. + IsLatest *bool `type:"boolean"` + + // The object key. + Key *string `min:"1" type:"string"` + + // Date and time the object was last modified. + LastModified *time.Time `type:"timestamp"` + + // The account that created the delete marker.> + Owner *Owner `type:"structure"` + + // Version ID of an object. + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s DeleteMarkerEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMarkerEntry) GoString() string { + return s.String() +} + +// SetIsLatest sets the IsLatest field's value. +func (s *DeleteMarkerEntry) SetIsLatest(v bool) *DeleteMarkerEntry { + s.IsLatest = &v + return s +} + +// SetKey sets the Key field's value. +func (s *DeleteMarkerEntry) SetKey(v string) *DeleteMarkerEntry { + s.Key = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *DeleteMarkerEntry) SetLastModified(v time.Time) *DeleteMarkerEntry { + s.LastModified = &v + return s +} + +// SetOwner sets the Owner field's value. +func (s *DeleteMarkerEntry) SetOwner(v *Owner) *DeleteMarkerEntry { + s.Owner = v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *DeleteMarkerEntry) SetVersionId(v string) *DeleteMarkerEntry { + s.VersionId = &v + return s +} + +type DeleteObjectInput struct { + _ struct{} `locationName:"DeleteObjectRequest" type:"structure"` + + // The bucket name of the bucket containing the object. + // + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // When using this action with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this action using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Key name of the object to delete. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // The concatenation of the authentication device's serial number, a space, + // and the value that is displayed on your authentication device. Required to + // permanently delete a versioned object if versioning is configured with MFA + // delete enabled. + MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s DeleteObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteObjectInput) SetBucket(v string) *DeleteObjectInput { + s.Bucket = &v + return s +} + +func (s *DeleteObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteObjectInput) SetExpectedBucketOwner(v string) *DeleteObjectInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *DeleteObjectInput) SetKey(v string) *DeleteObjectInput { + s.Key = &v + return s +} + +// SetMFA sets the MFA field's value. +func (s *DeleteObjectInput) SetMFA(v string) *DeleteObjectInput { + s.MFA = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *DeleteObjectInput) SetRequestPayer(v string) *DeleteObjectInput { + s.RequestPayer = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *DeleteObjectInput) SetVersionId(v string) *DeleteObjectInput { + s.VersionId = &v + return s +} + +type DeleteObjectOutput struct { + _ struct{} `type:"structure"` + + // Specifies whether the versioned object that was permanently deleted was (true) + // or was not (false) a delete marker. + DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Returns the version ID of the delete marker created as a result of the DELETE + // operation. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s DeleteObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectOutput) GoString() string { + return s.String() +} + +// SetDeleteMarker sets the DeleteMarker field's value. +func (s *DeleteObjectOutput) SetDeleteMarker(v bool) *DeleteObjectOutput { + s.DeleteMarker = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *DeleteObjectOutput) SetRequestCharged(v string) *DeleteObjectOutput { + s.RequestCharged = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *DeleteObjectOutput) SetVersionId(v string) *DeleteObjectOutput { + s.VersionId = &v + return s +} + +type DeleteObjectTaggingInput struct { + _ struct{} `locationName:"DeleteObjectTaggingRequest" type:"structure"` + + // The bucket name containing the objects from which to remove the tags. + // + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // When using this action with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this action using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The key that identifies the object in the bucket from which to remove all + // tags. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // The versionId of the object that the tag-set will be removed from. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s DeleteObjectTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteObjectTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteObjectTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteObjectTaggingInput) SetBucket(v string) *DeleteObjectTaggingInput { + s.Bucket = &v + return s +} + +func (s *DeleteObjectTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteObjectTaggingInput) SetExpectedBucketOwner(v string) *DeleteObjectTaggingInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *DeleteObjectTaggingInput) SetKey(v string) *DeleteObjectTaggingInput { + s.Key = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *DeleteObjectTaggingInput) SetVersionId(v string) *DeleteObjectTaggingInput { + s.VersionId = &v + return s +} + +type DeleteObjectTaggingOutput struct { + _ struct{} `type:"structure"` + + // The versionId of the object the tag-set was removed from. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s DeleteObjectTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectTaggingOutput) GoString() string { + return s.String() +} + +// SetVersionId sets the VersionId field's value. +func (s *DeleteObjectTaggingOutput) SetVersionId(v string) *DeleteObjectTaggingOutput { + s.VersionId = &v + return s +} + +type DeleteObjectsInput struct { + _ struct{} `locationName:"DeleteObjectsRequest" type:"structure" payload:"Delete"` + + // The bucket name containing the objects to delete. + // + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // When using this action with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this action using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Container for the request. + // + // Delete is a required field + Delete *Delete `locationName:"Delete" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The concatenation of the authentication device's serial number, a space, + // and the value that is displayed on your authentication device. Required to + // permanently delete a versioned object if versioning is configured with MFA + // delete enabled. + MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` +} + +// String returns the string representation +func (s DeleteObjectsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteObjectsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteObjectsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Delete == nil { + invalidParams.Add(request.NewErrParamRequired("Delete")) + } + if s.Delete != nil { + if err := s.Delete.Validate(); err != nil { + invalidParams.AddNested("Delete", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteObjectsInput) SetBucket(v string) *DeleteObjectsInput { + s.Bucket = &v + return s +} + +func (s *DeleteObjectsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetDelete sets the Delete field's value. +func (s *DeleteObjectsInput) SetDelete(v *Delete) *DeleteObjectsInput { + s.Delete = v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteObjectsInput) SetExpectedBucketOwner(v string) *DeleteObjectsInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetMFA sets the MFA field's value. +func (s *DeleteObjectsInput) SetMFA(v string) *DeleteObjectsInput { + s.MFA = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *DeleteObjectsInput) SetRequestPayer(v string) *DeleteObjectsInput { + s.RequestPayer = &v + return s +} + +type DeleteObjectsOutput struct { + _ struct{} `type:"structure"` + + // Container element for a successful delete. It identifies the object that + // was successfully deleted. + Deleted []*DeletedObject `type:"list" flattened:"true"` + + // Container for a failed delete action that describes the object that Amazon + // S3 attempted to delete and the error it encountered. + Errors []*Error `locationName:"Error" type:"list" flattened:"true"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s DeleteObjectsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectsOutput) GoString() string { + return s.String() +} + +// SetDeleted sets the Deleted field's value. +func (s *DeleteObjectsOutput) SetDeleted(v []*DeletedObject) *DeleteObjectsOutput { + s.Deleted = v + return s +} + +// SetErrors sets the Errors field's value. +func (s *DeleteObjectsOutput) SetErrors(v []*Error) *DeleteObjectsOutput { + s.Errors = v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *DeleteObjectsOutput) SetRequestCharged(v string) *DeleteObjectsOutput { + s.RequestCharged = &v + return s +} + +type DeletePublicAccessBlockInput struct { + _ struct{} `locationName:"DeletePublicAccessBlockRequest" type:"structure"` + + // The Amazon S3 bucket whose PublicAccessBlock configuration you want to delete. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s DeletePublicAccessBlockInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePublicAccessBlockInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeletePublicAccessBlockInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeletePublicAccessBlockInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeletePublicAccessBlockInput) SetBucket(v string) *DeletePublicAccessBlockInput { + s.Bucket = &v + return s +} + +func (s *DeletePublicAccessBlockInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeletePublicAccessBlockInput) SetExpectedBucketOwner(v string) *DeletePublicAccessBlockInput { + s.ExpectedBucketOwner = &v + return s +} + +type DeletePublicAccessBlockOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeletePublicAccessBlockOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePublicAccessBlockOutput) GoString() string { + return s.String() +} + +// Information about the deleted object. +type DeletedObject struct { + _ struct{} `type:"structure"` + + // Specifies whether the versioned object that was permanently deleted was (true) + // or was not (false) a delete marker. In a simple DELETE, this header indicates + // whether (true) or not (false) a delete marker was created. + DeleteMarker *bool `type:"boolean"` + + // The version ID of the delete marker created as a result of the DELETE operation. + // If you delete a specific object version, the value returned by this header + // is the version ID of the object version deleted. + DeleteMarkerVersionId *string `type:"string"` + + // The name of the deleted object. + Key *string `min:"1" type:"string"` + + // The version ID of the deleted object. + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s DeletedObject) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletedObject) GoString() string { + return s.String() +} + +// SetDeleteMarker sets the DeleteMarker field's value. +func (s *DeletedObject) SetDeleteMarker(v bool) *DeletedObject { + s.DeleteMarker = &v + return s +} + +// SetDeleteMarkerVersionId sets the DeleteMarkerVersionId field's value. +func (s *DeletedObject) SetDeleteMarkerVersionId(v string) *DeletedObject { + s.DeleteMarkerVersionId = &v + return s +} + +// SetKey sets the Key field's value. +func (s *DeletedObject) SetKey(v string) *DeletedObject { + s.Key = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *DeletedObject) SetVersionId(v string) *DeletedObject { + s.VersionId = &v + return s +} + +// Container for all error elements. +type Error struct { + _ struct{} `type:"structure"` + + // The error code is a string that uniquely identifies an error condition. It + // is meant to be read and understood by programs that detect and handle errors + // by type. + // + // Amazon S3 error codes + // + // * Code: AccessDenied Description: Access Denied HTTP Status Code: 403 + // Forbidden SOAP Fault Code Prefix: Client + // + // * Code: AccountProblem Description: There is a problem with your AWS account + // that prevents the action from completing successfully. Contact AWS Support + // for further assistance. HTTP Status Code: 403 Forbidden SOAP Fault Code + // Prefix: Client + // + // * Code: AllAccessDisabled Description: All access to this Amazon S3 resource + // has been disabled. Contact AWS Support for further assistance. HTTP Status + // Code: 403 Forbidden SOAP Fault Code Prefix: Client + // + // * Code: AmbiguousGrantByEmailAddress Description: The email address you + // provided is associated with more than one account. HTTP Status Code: 400 + // Bad Request SOAP Fault Code Prefix: Client + // + // * Code: AuthorizationHeaderMalformed Description: The authorization header + // you provided is invalid. HTTP Status Code: 400 Bad Request HTTP Status + // Code: N/A + // + // * Code: BadDigest Description: The Content-MD5 you specified did not match + // what we received. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: BucketAlreadyExists Description: The requested bucket name is + // not available. The bucket namespace is shared by all users of the system. + // Please select a different name and try again. HTTP Status Code: 409 Conflict + // SOAP Fault Code Prefix: Client + // + // * Code: BucketAlreadyOwnedByYou Description: The bucket you tried to create + // already exists, and you own it. Amazon S3 returns this error in all AWS + // Regions except in the North Virginia Region. For legacy compatibility, + // if you re-create an existing bucket that you already own in the North + // Virginia Region, Amazon S3 returns 200 OK and resets the bucket access + // control lists (ACLs). Code: 409 Conflict (in all Regions except the North + // Virginia Region) SOAP Fault Code Prefix: Client + // + // * Code: BucketNotEmpty Description: The bucket you tried to delete is + // not empty. HTTP Status Code: 409 Conflict SOAP Fault Code Prefix: Client + // + // * Code: CredentialsNotSupported Description: This request does not support + // credentials. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: CrossLocationLoggingProhibited Description: Cross-location logging + // not allowed. Buckets in one geographic location cannot log information + // to a bucket in another location. HTTP Status Code: 403 Forbidden SOAP + // Fault Code Prefix: Client + // + // * Code: EntityTooSmall Description: Your proposed upload is smaller than + // the minimum allowed object size. HTTP Status Code: 400 Bad Request SOAP + // Fault Code Prefix: Client + // + // * Code: EntityTooLarge Description: Your proposed upload exceeds the maximum + // allowed object size. HTTP Status Code: 400 Bad Request SOAP Fault Code + // Prefix: Client + // + // * Code: ExpiredToken Description: The provided token has expired. HTTP + // Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: IllegalVersioningConfigurationException Description: Indicates + // that the versioning configuration specified in the request is invalid. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: IncompleteBody Description: You did not provide the number of + // bytes specified by the Content-Length HTTP header HTTP Status Code: 400 + // Bad Request SOAP Fault Code Prefix: Client + // + // * Code: IncorrectNumberOfFilesInPostRequest Description: POST requires + // exactly one file upload per request. HTTP Status Code: 400 Bad Request + // SOAP Fault Code Prefix: Client + // + // * Code: InlineDataTooLarge Description: Inline data exceeds the maximum + // allowed size. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: InternalError Description: We encountered an internal error. Please + // try again. HTTP Status Code: 500 Internal Server Error SOAP Fault Code + // Prefix: Server + // + // * Code: InvalidAccessKeyId Description: The AWS access key ID you provided + // does not exist in our records. HTTP Status Code: 403 Forbidden SOAP Fault + // Code Prefix: Client + // + // * Code: InvalidAddressingHeader Description: You must specify the Anonymous + // role. HTTP Status Code: N/A SOAP Fault Code Prefix: Client + // + // * Code: InvalidArgument Description: Invalid Argument HTTP Status Code: + // 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidBucketName Description: The specified bucket is not valid. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidBucketState Description: The request is not valid with + // the current state of the bucket. HTTP Status Code: 409 Conflict SOAP Fault + // Code Prefix: Client + // + // * Code: InvalidDigest Description: The Content-MD5 you specified is not + // valid. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidEncryptionAlgorithmError Description: The encryption request + // you specified is not valid. The valid value is AES256. HTTP Status Code: + // 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidLocationConstraint Description: The specified location + // constraint is not valid. For more information about Regions, see How to + // Select a Region for Your Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro). + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidObjectState Description: The action is not valid for the + // current state of the object. HTTP Status Code: 403 Forbidden SOAP Fault + // Code Prefix: Client + // + // * Code: InvalidPart Description: One or more of the specified parts could + // not be found. The part might not have been uploaded, or the specified + // entity tag might not have matched the part's entity tag. HTTP Status Code: + // 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidPartOrder Description: The list of parts was not in ascending + // order. Parts list must be specified in order by part number. HTTP Status + // Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidPayer Description: All access to this object has been disabled. + // Please contact AWS Support for further assistance. HTTP Status Code: 403 + // Forbidden SOAP Fault Code Prefix: Client + // + // * Code: InvalidPolicyDocument Description: The content of the form does + // not meet the conditions specified in the policy document. HTTP Status + // Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidRange Description: The requested range cannot be satisfied. + // HTTP Status Code: 416 Requested Range Not Satisfiable SOAP Fault Code + // Prefix: Client + // + // * Code: InvalidRequest Description: Please use AWS4-HMAC-SHA256. HTTP + // Status Code: 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: SOAP requests must be made over an + // HTTPS connection. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Acceleration is + // not supported for buckets with non-DNS compliant names. HTTP Status Code: + // 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Acceleration is + // not supported for buckets with periods (.) in their names. HTTP Status + // Code: 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Accelerate endpoint + // only supports virtual style requests. HTTP Status Code: 400 Bad Request + // Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Accelerate is not + // configured on this bucket. HTTP Status Code: 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Accelerate is disabled + // on this bucket. HTTP Status Code: 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Acceleration is + // not supported on this bucket. Contact AWS Support for more information. + // HTTP Status Code: 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Acceleration cannot + // be enabled on this bucket. Contact AWS Support for more information. HTTP + // Status Code: 400 Bad Request Code: N/A + // + // * Code: InvalidSecurity Description: The provided security credentials + // are not valid. HTTP Status Code: 403 Forbidden SOAP Fault Code Prefix: + // Client + // + // * Code: InvalidSOAPRequest Description: The SOAP request body is invalid. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidStorageClass Description: The storage class you specified + // is not valid. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: InvalidTargetBucketForLogging Description: The target bucket for + // logging does not exist, is not owned by you, or does not have the appropriate + // grants for the log-delivery group. HTTP Status Code: 400 Bad Request SOAP + // Fault Code Prefix: Client + // + // * Code: InvalidToken Description: The provided token is malformed or otherwise + // invalid. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidURI Description: Couldn't parse the specified URI. HTTP + // Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: KeyTooLongError Description: Your key is too long. HTTP Status + // Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: MalformedACLError Description: The XML you provided was not well-formed + // or did not validate against our published schema. HTTP Status Code: 400 + // Bad Request SOAP Fault Code Prefix: Client + // + // * Code: MalformedPOSTRequest Description: The body of your POST request + // is not well-formed multipart/form-data. HTTP Status Code: 400 Bad Request + // SOAP Fault Code Prefix: Client + // + // * Code: MalformedXML Description: This happens when the user sends malformed + // XML (XML that doesn't conform to the published XSD) for the configuration. + // The error message is, "The XML you provided was not well-formed or did + // not validate against our published schema." HTTP Status Code: 400 Bad + // Request SOAP Fault Code Prefix: Client + // + // * Code: MaxMessageLengthExceeded Description: Your request was too big. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: MaxPostPreDataLengthExceededError Description: Your POST request + // fields preceding the upload file were too large. HTTP Status Code: 400 + // Bad Request SOAP Fault Code Prefix: Client + // + // * Code: MetadataTooLarge Description: Your metadata headers exceed the + // maximum allowed metadata size. HTTP Status Code: 400 Bad Request SOAP + // Fault Code Prefix: Client + // + // * Code: MethodNotAllowed Description: The specified method is not allowed + // against this resource. HTTP Status Code: 405 Method Not Allowed SOAP Fault + // Code Prefix: Client + // + // * Code: MissingAttachment Description: A SOAP attachment was expected, + // but none were found. HTTP Status Code: N/A SOAP Fault Code Prefix: Client + // + // * Code: MissingContentLength Description: You must provide the Content-Length + // HTTP header. HTTP Status Code: 411 Length Required SOAP Fault Code Prefix: + // Client + // + // * Code: MissingRequestBodyError Description: This happens when the user + // sends an empty XML document as a request. The error message is, "Request + // body is empty." HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: MissingSecurityElement Description: The SOAP 1.1 request is missing + // a security element. HTTP Status Code: 400 Bad Request SOAP Fault Code + // Prefix: Client + // + // * Code: MissingSecurityHeader Description: Your request is missing a required + // header. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: NoLoggingStatusForKey Description: There is no such thing as a + // logging status subresource for a key. HTTP Status Code: 400 Bad Request + // SOAP Fault Code Prefix: Client + // + // * Code: NoSuchBucket Description: The specified bucket does not exist. + // HTTP Status Code: 404 Not Found SOAP Fault Code Prefix: Client + // + // * Code: NoSuchBucketPolicy Description: The specified bucket does not + // have a bucket policy. HTTP Status Code: 404 Not Found SOAP Fault Code + // Prefix: Client + // + // * Code: NoSuchKey Description: The specified key does not exist. HTTP + // Status Code: 404 Not Found SOAP Fault Code Prefix: Client + // + // * Code: NoSuchLifecycleConfiguration Description: The lifecycle configuration + // does not exist. HTTP Status Code: 404 Not Found SOAP Fault Code Prefix: + // Client + // + // * Code: NoSuchUpload Description: The specified multipart upload does + // not exist. The upload ID might be invalid, or the multipart upload might + // have been aborted or completed. HTTP Status Code: 404 Not Found SOAP Fault + // Code Prefix: Client + // + // * Code: NoSuchVersion Description: Indicates that the version ID specified + // in the request does not match an existing version. HTTP Status Code: 404 + // Not Found SOAP Fault Code Prefix: Client + // + // * Code: NotImplemented Description: A header you provided implies functionality + // that is not implemented. HTTP Status Code: 501 Not Implemented SOAP Fault + // Code Prefix: Server + // + // * Code: NotSignedUp Description: Your account is not signed up for the + // Amazon S3 service. You must sign up before you can use Amazon S3. You + // can sign up at the following URL: https://aws.amazon.com/s3 HTTP Status + // Code: 403 Forbidden SOAP Fault Code Prefix: Client + // + // * Code: OperationAborted Description: A conflicting conditional action + // is currently in progress against this resource. Try again. HTTP Status + // Code: 409 Conflict SOAP Fault Code Prefix: Client + // + // * Code: PermanentRedirect Description: The bucket you are attempting to + // access must be addressed using the specified endpoint. Send all future + // requests to this endpoint. HTTP Status Code: 301 Moved Permanently SOAP + // Fault Code Prefix: Client + // + // * Code: PreconditionFailed Description: At least one of the preconditions + // you specified did not hold. HTTP Status Code: 412 Precondition Failed + // SOAP Fault Code Prefix: Client + // + // * Code: Redirect Description: Temporary redirect. HTTP Status Code: 307 + // Moved Temporarily SOAP Fault Code Prefix: Client + // + // * Code: RestoreAlreadyInProgress Description: Object restore is already + // in progress. HTTP Status Code: 409 Conflict SOAP Fault Code Prefix: Client + // + // * Code: RequestIsNotMultiPartContent Description: Bucket POST must be + // of the enclosure-type multipart/form-data. HTTP Status Code: 400 Bad Request + // SOAP Fault Code Prefix: Client + // + // * Code: RequestTimeout Description: Your socket connection to the server + // was not read from or written to within the timeout period. HTTP Status + // Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: RequestTimeTooSkewed Description: The difference between the request + // time and the server's time is too large. HTTP Status Code: 403 Forbidden + // SOAP Fault Code Prefix: Client + // + // * Code: RequestTorrentOfBucketError Description: Requesting the torrent + // file of a bucket is not permitted. HTTP Status Code: 400 Bad Request SOAP + // Fault Code Prefix: Client + // + // * Code: SignatureDoesNotMatch Description: The request signature we calculated + // does not match the signature you provided. Check your AWS secret access + // key and signing method. For more information, see REST Authentication + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) + // and SOAP Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/SOAPAuthentication.html) + // for details. HTTP Status Code: 403 Forbidden SOAP Fault Code Prefix: Client + // + // * Code: ServiceUnavailable Description: Reduce your request rate. HTTP + // Status Code: 503 Service Unavailable SOAP Fault Code Prefix: Server + // + // * Code: SlowDown Description: Reduce your request rate. HTTP Status Code: + // 503 Slow Down SOAP Fault Code Prefix: Server + // + // * Code: TemporaryRedirect Description: You are being redirected to the + // bucket while DNS updates. HTTP Status Code: 307 Moved Temporarily SOAP + // Fault Code Prefix: Client + // + // * Code: TokenRefreshRequired Description: The provided token must be refreshed. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: TooManyBuckets Description: You have attempted to create more + // buckets than allowed. HTTP Status Code: 400 Bad Request SOAP Fault Code + // Prefix: Client + // + // * Code: UnexpectedContent Description: This request does not support content. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: UnresolvableGrantByEmailAddress Description: The email address + // you provided does not match any account on record. HTTP Status Code: 400 + // Bad Request SOAP Fault Code Prefix: Client + // + // * Code: UserKeyMustBeSpecified Description: The bucket POST must contain + // the specified field name. If it is specified, check the order of the fields. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + Code *string `type:"string"` + + // The error key. + Key *string `min:"1" type:"string"` + + // The error message contains a generic description of the error condition in + // English. It is intended for a human audience. Simple programs display the + // message directly to the end user if they encounter an error condition they + // don't know how or don't care to handle. Sophisticated programs with more + // exhaustive error handling and proper internationalization are more likely + // to ignore the error message. + Message *string `type:"string"` + + // The version ID of the error. + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s Error) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Error) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *Error) SetCode(v string) *Error { + s.Code = &v + return s +} + +// SetKey sets the Key field's value. +func (s *Error) SetKey(v string) *Error { + s.Key = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *Error) SetMessage(v string) *Error { + s.Message = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *Error) SetVersionId(v string) *Error { + s.VersionId = &v + return s +} + +// The error information. +type ErrorDocument struct { + _ struct{} `type:"structure"` + + // The object key name to use when a 4XX class error occurs. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ErrorDocument) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ErrorDocument) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ErrorDocument) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ErrorDocument"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *ErrorDocument) SetKey(v string) *ErrorDocument { + s.Key = &v + return s +} + +type ExtendObjectRetentionInput struct { + _ struct{} `locationName:"ExtendObjectRetentionRequest" type:"structure"` + + // Additional time, in seconds, to add to the existing retention period for + // the object. If this field and New-Retention-Time and/or New-Retention-Expiration-Date + // are specified, a 400 error will be returned. If none of the Request Headers + // are specified, a 400 error will be returned to the user. The retention period + // of an object may be extended up to bucket maximum retention period from the + // time of the request. + AdditionalRetentionPeriod *int64 `location:"header" locationName:"Additional-Retention-Period" type:"integer"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Retention Period in seconds for the object. The Retention will be enforced + // from the current time until current time + the value in this header. This + // value has to be within the ranges defined for the bucket. + ExtendRetentionFromCurrentTime *int64 `location:"header" locationName:"Extend-Retention-From-Current-Time" type:"integer"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + NewRetentionExpirationDate *time.Time `location:"header" locationName:"New-Retention-Expiration-Date" type:"timestamp" timestampFormat:"iso8601"` + + NewRetentionPeriod *int64 `location:"header" locationName:"New-Retention-Period" type:"integer"` +} + +// String returns the string representation +func (s ExtendObjectRetentionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExtendObjectRetentionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ExtendObjectRetentionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ExtendObjectRetentionInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAdditionalRetentionPeriod sets the AdditionalRetentionPeriod field's value. +func (s *ExtendObjectRetentionInput) SetAdditionalRetentionPeriod(v int64) *ExtendObjectRetentionInput { + s.AdditionalRetentionPeriod = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *ExtendObjectRetentionInput) SetBucket(v string) *ExtendObjectRetentionInput { + s.Bucket = &v + return s +} + +func (s *ExtendObjectRetentionInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExtendRetentionFromCurrentTime sets the ExtendRetentionFromCurrentTime field's value. +func (s *ExtendObjectRetentionInput) SetExtendRetentionFromCurrentTime(v int64) *ExtendObjectRetentionInput { + s.ExtendRetentionFromCurrentTime = &v + return s +} + +// SetKey sets the Key field's value. +func (s *ExtendObjectRetentionInput) SetKey(v string) *ExtendObjectRetentionInput { + s.Key = &v + return s +} + +// SetNewRetentionExpirationDate sets the NewRetentionExpirationDate field's value. +func (s *ExtendObjectRetentionInput) SetNewRetentionExpirationDate(v time.Time) *ExtendObjectRetentionInput { + s.NewRetentionExpirationDate = &v + return s +} + +// SetNewRetentionPeriod sets the NewRetentionPeriod field's value. +func (s *ExtendObjectRetentionInput) SetNewRetentionPeriod(v int64) *ExtendObjectRetentionInput { + s.NewRetentionPeriod = &v + return s +} + +type ExtendObjectRetentionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ExtendObjectRetentionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExtendObjectRetentionOutput) GoString() string { + return s.String() +} + +type GetBucketAclInput struct { + _ struct{} `locationName:"GetBucketAclRequest" type:"structure"` + + // Specifies the S3 bucket whose ACL is being requested. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s GetBucketAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAclInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketAclInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketAclInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketAclInput) SetBucket(v string) *GetBucketAclInput { + s.Bucket = &v + return s +} + +func (s *GetBucketAclInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketAclInput) SetExpectedBucketOwner(v string) *GetBucketAclInput { + s.ExpectedBucketOwner = &v + return s +} + +type GetBucketAclOutput struct { + _ struct{} `type:"structure"` + + // A list of grants. + Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + + // Container for the bucket owner's display name and ID. + Owner *Owner `type:"structure"` +} + +// String returns the string representation +func (s GetBucketAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAclOutput) GoString() string { + return s.String() +} + +// SetGrants sets the Grants field's value. +func (s *GetBucketAclOutput) SetGrants(v []*Grant) *GetBucketAclOutput { + s.Grants = v + return s +} + +// SetOwner sets the Owner field's value. +func (s *GetBucketAclOutput) SetOwner(v *Owner) *GetBucketAclOutput { + s.Owner = v + return s +} + +type GetBucketCorsInput struct { + _ struct{} `locationName:"GetBucketCorsRequest" type:"structure"` + + // The bucket name for which to get the cors configuration. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s GetBucketCorsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketCorsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketCorsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketCorsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketCorsInput) SetBucket(v string) *GetBucketCorsInput { + s.Bucket = &v + return s +} + +func (s *GetBucketCorsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketCorsInput) SetExpectedBucketOwner(v string) *GetBucketCorsInput { + s.ExpectedBucketOwner = &v + return s +} + +type GetBucketCorsOutput struct { + _ struct{} `type:"structure"` + + // A set of origins and methods (cross-origin access that you want to allow). + // You can add up to 100 rules to the configuration. + CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s GetBucketCorsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketCorsOutput) GoString() string { + return s.String() +} + +// SetCORSRules sets the CORSRules field's value. +func (s *GetBucketCorsOutput) SetCORSRules(v []*CORSRule) *GetBucketCorsOutput { + s.CORSRules = v + return s +} + +type GetBucketLifecycleConfigurationInput struct { + _ struct{} `locationName:"GetBucketLifecycleConfigurationRequest" type:"structure"` + + // The name of the bucket for which to get the lifecycle information. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s GetBucketLifecycleConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLifecycleConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketLifecycleConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketLifecycleConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketLifecycleConfigurationInput) SetBucket(v string) *GetBucketLifecycleConfigurationInput { + s.Bucket = &v + return s +} + +func (s *GetBucketLifecycleConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketLifecycleConfigurationInput) SetExpectedBucketOwner(v string) *GetBucketLifecycleConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +type GetBucketLifecycleConfigurationOutput struct { + _ struct{} `type:"structure"` + + // COS allows only one Rule. + Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s GetBucketLifecycleConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLifecycleConfigurationOutput) GoString() string { + return s.String() +} + +// SetRules sets the Rules field's value. +func (s *GetBucketLifecycleConfigurationOutput) SetRules(v []*LifecycleRule) *GetBucketLifecycleConfigurationOutput { + s.Rules = v + return s +} + +type GetBucketLocationInput struct { + _ struct{} `locationName:"GetBucketLocationRequest" type:"structure"` + + // The name of the bucket for which to get the location. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s GetBucketLocationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLocationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketLocationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketLocationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketLocationInput) SetBucket(v string) *GetBucketLocationInput { + s.Bucket = &v + return s +} + +func (s *GetBucketLocationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketLocationInput) SetExpectedBucketOwner(v string) *GetBucketLocationInput { + s.ExpectedBucketOwner = &v + return s +} + +type GetBucketLocationOutput struct { + _ struct{} `type:"structure"` + + // Specifies the Region where the bucket resides. For a list of all the Amazon + // S3 supported location constraints by Region, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region). + // Buckets in Region us-east-1 have a LocationConstraint of null. + LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` +} + +// String returns the string representation +func (s GetBucketLocationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLocationOutput) GoString() string { + return s.String() +} + +// SetLocationConstraint sets the LocationConstraint field's value. +func (s *GetBucketLocationOutput) SetLocationConstraint(v string) *GetBucketLocationOutput { + s.LocationConstraint = &v + return s +} + +type GetBucketLoggingInput struct { + _ struct{} `locationName:"GetBucketLoggingRequest" type:"structure"` + + // The bucket name for which to get the logging information. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s GetBucketLoggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLoggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketLoggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketLoggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketLoggingInput) SetBucket(v string) *GetBucketLoggingInput { + s.Bucket = &v + return s +} + +func (s *GetBucketLoggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketLoggingInput) SetExpectedBucketOwner(v string) *GetBucketLoggingInput { + s.ExpectedBucketOwner = &v + return s +} + +type GetBucketLoggingOutput struct { + _ struct{} `type:"structure"` + + // Describes where logs are stored and the prefix that Amazon S3 assigns to + // all log object keys for a bucket. For more information, see PUT Bucket logging + // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) + // in the Amazon Simple Storage Service API Reference. + LoggingEnabled *LoggingEnabled `type:"structure"` +} + +// String returns the string representation +func (s GetBucketLoggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLoggingOutput) GoString() string { + return s.String() +} + +// SetLoggingEnabled sets the LoggingEnabled field's value. +func (s *GetBucketLoggingOutput) SetLoggingEnabled(v *LoggingEnabled) *GetBucketLoggingOutput { + s.LoggingEnabled = v + return s +} + +type GetBucketProtectionConfigurationInput struct { + _ struct{} `locationName:"GetBucketProtectionConfigurationRequest" type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketProtectionConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketProtectionConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketProtectionConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketProtectionConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketProtectionConfigurationInput) SetBucket(v string) *GetBucketProtectionConfigurationInput { + s.Bucket = &v + return s +} + +func (s *GetBucketProtectionConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type GetBucketProtectionConfigurationOutput struct { + _ struct{} `type:"structure" payload:"ProtectionConfiguration"` + + // Bucket protection configuration + ProtectionConfiguration *ProtectionConfiguration `type:"structure"` +} + +// String returns the string representation +func (s GetBucketProtectionConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketProtectionConfigurationOutput) GoString() string { + return s.String() +} + +// SetProtectionConfiguration sets the ProtectionConfiguration field's value. +func (s *GetBucketProtectionConfigurationOutput) SetProtectionConfiguration(v *ProtectionConfiguration) *GetBucketProtectionConfigurationOutput { + s.ProtectionConfiguration = v + return s +} + +type GetBucketVersioningInput struct { + _ struct{} `locationName:"GetBucketVersioningRequest" type:"structure"` + + // The name of the bucket for which to get the versioning information. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s GetBucketVersioningInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketVersioningInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketVersioningInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketVersioningInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketVersioningInput) SetBucket(v string) *GetBucketVersioningInput { + s.Bucket = &v + return s +} + +func (s *GetBucketVersioningInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketVersioningInput) SetExpectedBucketOwner(v string) *GetBucketVersioningInput { + s.ExpectedBucketOwner = &v + return s +} + +type GetBucketVersioningOutput struct { + _ struct{} `type:"structure"` + + // Specifies whether MFA delete is enabled in the bucket versioning configuration. + // This element is only returned if the bucket has been configured with MFA + // delete. If the bucket has never been so configured, this element is not returned. + MFADelete *string `locationName:"MfaDelete" type:"string" enum:"MFADeleteStatus"` + + // The versioning state of the bucket. + Status *string `type:"string" enum:"BucketVersioningStatus"` +} + +// String returns the string representation +func (s GetBucketVersioningOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketVersioningOutput) GoString() string { + return s.String() +} + +// SetMFADelete sets the MFADelete field's value. +func (s *GetBucketVersioningOutput) SetMFADelete(v string) *GetBucketVersioningOutput { + s.MFADelete = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *GetBucketVersioningOutput) SetStatus(v string) *GetBucketVersioningOutput { + s.Status = &v + return s +} + +type GetBucketWebsiteInput struct { + _ struct{} `locationName:"GetBucketWebsiteRequest" type:"structure"` + + // The bucket name for which to get the website configuration. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s GetBucketWebsiteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketWebsiteInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketWebsiteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketWebsiteInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketWebsiteInput) SetBucket(v string) *GetBucketWebsiteInput { + s.Bucket = &v + return s +} + +func (s *GetBucketWebsiteInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketWebsiteInput) SetExpectedBucketOwner(v string) *GetBucketWebsiteInput { + s.ExpectedBucketOwner = &v + return s +} + +type GetBucketWebsiteOutput struct { + _ struct{} `type:"structure"` + + // The object key name of the website error document to use for 4XX class errors. + ErrorDocument *ErrorDocument `type:"structure"` + + // The name of the index document for the website (for example index.html). + IndexDocument *IndexDocument `type:"structure"` + + // Specifies the redirect behavior of all requests to a website endpoint of + // an Amazon S3 bucket. + RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"` + + // Rules that define when a redirect is applied and the redirect behavior. + RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` +} + +// String returns the string representation +func (s GetBucketWebsiteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketWebsiteOutput) GoString() string { + return s.String() +} + +// SetErrorDocument sets the ErrorDocument field's value. +func (s *GetBucketWebsiteOutput) SetErrorDocument(v *ErrorDocument) *GetBucketWebsiteOutput { + s.ErrorDocument = v + return s +} + +// SetIndexDocument sets the IndexDocument field's value. +func (s *GetBucketWebsiteOutput) SetIndexDocument(v *IndexDocument) *GetBucketWebsiteOutput { + s.IndexDocument = v + return s +} + +// SetRedirectAllRequestsTo sets the RedirectAllRequestsTo field's value. +func (s *GetBucketWebsiteOutput) SetRedirectAllRequestsTo(v *RedirectAllRequestsTo) *GetBucketWebsiteOutput { + s.RedirectAllRequestsTo = v + return s +} + +// SetRoutingRules sets the RoutingRules field's value. +func (s *GetBucketWebsiteOutput) SetRoutingRules(v []*RoutingRule) *GetBucketWebsiteOutput { + s.RoutingRules = v + return s +} + +type GetObjectAclInput struct { + _ struct{} `locationName:"GetObjectAclRequest" type:"structure"` + + // The bucket name that contains the object for which to get the ACL information. + // + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The key of the object for which to get the ACL information. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s GetObjectAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectAclInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectAclInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectAclInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectAclInput) SetBucket(v string) *GetObjectAclInput { + s.Bucket = &v + return s +} + +func (s *GetObjectAclInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetObjectAclInput) SetExpectedBucketOwner(v string) *GetObjectAclInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *GetObjectAclInput) SetKey(v string) *GetObjectAclInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectAclInput) SetRequestPayer(v string) *GetObjectAclInput { + s.RequestPayer = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectAclInput) SetVersionId(v string) *GetObjectAclInput { + s.VersionId = &v + return s +} + +type GetObjectAclOutput struct { + _ struct{} `type:"structure"` + + // A list of grants. + Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + + // Container for the bucket owner's display name and ID. + Owner *Owner `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s GetObjectAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectAclOutput) GoString() string { + return s.String() +} + +// SetGrants sets the Grants field's value. +func (s *GetObjectAclOutput) SetGrants(v []*Grant) *GetObjectAclOutput { + s.Grants = v + return s +} + +// SetOwner sets the Owner field's value. +func (s *GetObjectAclOutput) SetOwner(v *Owner) *GetObjectAclOutput { + s.Owner = v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *GetObjectAclOutput) SetRequestCharged(v string) *GetObjectAclOutput { + s.RequestCharged = &v + return s +} + +type GetObjectInput struct { + _ struct{} `locationName:"GetObjectRequest" type:"structure"` + + // The bucket name containing the object. + // + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // When using this action with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this action using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Return the object only if its entity tag (ETag) is the same as the one specified, + // otherwise return a 412 (precondition failed). + IfMatch *string `location:"header" locationName:"If-Match" type:"string"` + + // Return the object only if it has been modified since the specified time, + // otherwise return a 304 (not modified). + IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp"` + + // Return the object only if its entity tag (ETag) is different from the one + // specified, otherwise return a 304 (not modified). + IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"` + + // Return the object only if it has not been modified since the specified time, + // otherwise return a 412 (precondition failed). + IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp"` + + // Key of the object to get. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Part number of the object being read. This is a positive integer between + // 1 and 10,000. Effectively performs a 'ranged' GET request for the part specified. + // Useful for downloading just a part of an object. + PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer"` + + // Downloads the specified range bytes of an object. For more information about + // the HTTP Range header, see https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35 + // (https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35). + // + // Amazon S3 doesn't support retrieving multiple ranges of data per GET request. + Range *string `location:"header" locationName:"Range" type:"string"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Sets the Cache-Control header of the response. + ResponseCacheControl *string `location:"querystring" locationName:"response-cache-control" type:"string"` + + // Sets the Content-Disposition header of the response + ResponseContentDisposition *string `location:"querystring" locationName:"response-content-disposition" type:"string"` + + // Sets the Content-Encoding header of the response. + ResponseContentEncoding *string `location:"querystring" locationName:"response-content-encoding" type:"string"` + + // Sets the Content-Language header of the response. + ResponseContentLanguage *string `location:"querystring" locationName:"response-content-language" type:"string"` + + // Sets the Content-Type header of the response. + ResponseContentType *string `location:"querystring" locationName:"response-content-type" type:"string"` + + // Sets the Expires header of the response. + ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp"` + + // Specifies the algorithm to use to when decrypting the object (for example, + // AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 used to encrypt + // the data. This value is used to decrypt the object when recovering it and + // must match the one used when storing the data. The key must be appropriate + // for use with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm + // header. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s GetObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectInput) SetBucket(v string) *GetObjectInput { + s.Bucket = &v + return s +} + +func (s *GetObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetObjectInput) SetExpectedBucketOwner(v string) *GetObjectInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetIfMatch sets the IfMatch field's value. +func (s *GetObjectInput) SetIfMatch(v string) *GetObjectInput { + s.IfMatch = &v + return s +} + +// SetIfModifiedSince sets the IfModifiedSince field's value. +func (s *GetObjectInput) SetIfModifiedSince(v time.Time) *GetObjectInput { + s.IfModifiedSince = &v + return s +} + +// SetIfNoneMatch sets the IfNoneMatch field's value. +func (s *GetObjectInput) SetIfNoneMatch(v string) *GetObjectInput { + s.IfNoneMatch = &v + return s +} + +// SetIfUnmodifiedSince sets the IfUnmodifiedSince field's value. +func (s *GetObjectInput) SetIfUnmodifiedSince(v time.Time) *GetObjectInput { + s.IfUnmodifiedSince = &v + return s +} + +// SetKey sets the Key field's value. +func (s *GetObjectInput) SetKey(v string) *GetObjectInput { + s.Key = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *GetObjectInput) SetPartNumber(v int64) *GetObjectInput { + s.PartNumber = &v + return s +} + +// SetRange sets the Range field's value. +func (s *GetObjectInput) SetRange(v string) *GetObjectInput { + s.Range = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectInput) SetRequestPayer(v string) *GetObjectInput { + s.RequestPayer = &v + return s +} + +// SetResponseCacheControl sets the ResponseCacheControl field's value. +func (s *GetObjectInput) SetResponseCacheControl(v string) *GetObjectInput { + s.ResponseCacheControl = &v + return s +} + +// SetResponseContentDisposition sets the ResponseContentDisposition field's value. +func (s *GetObjectInput) SetResponseContentDisposition(v string) *GetObjectInput { + s.ResponseContentDisposition = &v + return s +} + +// SetResponseContentEncoding sets the ResponseContentEncoding field's value. +func (s *GetObjectInput) SetResponseContentEncoding(v string) *GetObjectInput { + s.ResponseContentEncoding = &v + return s +} + +// SetResponseContentLanguage sets the ResponseContentLanguage field's value. +func (s *GetObjectInput) SetResponseContentLanguage(v string) *GetObjectInput { + s.ResponseContentLanguage = &v + return s +} + +// SetResponseContentType sets the ResponseContentType field's value. +func (s *GetObjectInput) SetResponseContentType(v string) *GetObjectInput { + s.ResponseContentType = &v + return s +} + +// SetResponseExpires sets the ResponseExpires field's value. +func (s *GetObjectInput) SetResponseExpires(v time.Time) *GetObjectInput { + s.ResponseExpires = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *GetObjectInput) SetSSECustomerAlgorithm(v string) *GetObjectInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *GetObjectInput) SetSSECustomerKey(v string) *GetObjectInput { + s.SSECustomerKey = &v + return s +} + +func (s *GetObjectInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *GetObjectInput) SetSSECustomerKeyMD5(v string) *GetObjectInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectInput) SetVersionId(v string) *GetObjectInput { + s.VersionId = &v + return s +} + +type GetObjectOutput struct { + _ struct{} `type:"structure" payload:"Body"` + + // Indicates that a range of bytes was specified. + AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` + + // Object data. + Body io.ReadCloser `type:"blob"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // Size of the body in bytes. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + + // The portion of the object returned in the response. + ContentRange *string `location:"header" locationName:"Content-Range" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // Specifies whether the object retrieved was (true) or was not (false) a Delete + // Marker. If false, this response header does not appear in the response. + DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` + + // An ETag is an opaque identifier assigned by a web server to a specific version + // of a resource found at a URL. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If the object expiration is configured (see PUT Bucket lifecycle), the response + // includes this header. It includes the expiry-date and rule-id key-value pairs + // providing object expiration information. The value of the rule-id is URL + // encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *string `location:"header" locationName:"Expires" type:"string"` + + IBMRestoredCopyStorageClass *string `location:"header" locationName:"x-ibm-restored-copy-storage-class" type:"string" enum:"StorageClass"` + + // This header is only included if an object has transition metadata. This header + // will indicate the transition storage class and time of transition. If this + // header and the x-amz-restore header are both included, this header will indicate + // the time at which the object was originally archived. + IBMTransition *string `location:"header" locationName:"x-ibm-transition" type:"string"` + + // Creation date of the object. + LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"` + + // A map of metadata to store with the object in S3. + // + // By default unmarshaled keys are written as a map keys in following canonicalized format: + // the first letter and any letter following a hyphen will be capitalized, and the rest as lowercase. + // Set `aws.Config.LowerCaseHeaderMaps` to `true` to write unmarshaled keys to the map as lowercase. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // This is set to the number of metadata entries not returned in x-amz-meta + // headers. This can happen if you create metadata using an API like SOAP that + // supports more flexible metadata than the REST API. For example, using SOAP, + // you can create metadata whose values are not legal HTTP headers. + MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` + + // The count of parts this object has. + PartsCount *int64 `location:"header" locationName:"x-amz-mp-parts-count" type:"integer"` + + // Amazon S3 can return this if your request involves a bucket that is either + // a source or destination in a replication rule. + ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Provides information about object restoration action and expiration time + // of the restored object copy. + Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` + + // Date on which it will be legal to delete or modify the object. You can only + // specify this or the Retention-Period header. If both are specified a 400 + // error will be returned. If neither is specified the bucket's DefaultRetention + // period will be used. + RetentionExpirationDate *time.Time `location:"header" locationName:"Retention-Expiration-Date" type:"timestamp"` + + RetentionLegalHoldCount *int64 `location:"header" locationName:"Retention-Legal-Hold-Count" type:"integer"` + + // Retention period to store on the object in seconds. If this field and Retention-Expiration-Date + // are specified a 400 error is returned. If neither is specified the bucket's + // DefaultRetention period will be used. 0 is a legal value assuming the bucket's + // minimum retention period is also 0. + RetentionPeriod *int64 `location:"header" locationName:"Retention-Period" type:"integer"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round-trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetric customer managed customer master key (CMK) that was used for the + // object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Provides storage class information of the object. Amazon S3 returns this + // header for all objects except for S3 Standard storage class objects. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // The number of tags, if any, on the object. + TagCount *int64 `location:"header" locationName:"x-amz-tagging-count" type:"integer"` + + // Version of the object. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s GetObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectOutput) GoString() string { + return s.String() +} + +// SetAcceptRanges sets the AcceptRanges field's value. +func (s *GetObjectOutput) SetAcceptRanges(v string) *GetObjectOutput { + s.AcceptRanges = &v + return s +} + +// SetBody sets the Body field's value. +func (s *GetObjectOutput) SetBody(v io.ReadCloser) *GetObjectOutput { + s.Body = v + return s +} + +// SetCacheControl sets the CacheControl field's value. +func (s *GetObjectOutput) SetCacheControl(v string) *GetObjectOutput { + s.CacheControl = &v + return s +} + +// SetContentDisposition sets the ContentDisposition field's value. +func (s *GetObjectOutput) SetContentDisposition(v string) *GetObjectOutput { + s.ContentDisposition = &v + return s +} + +// SetContentEncoding sets the ContentEncoding field's value. +func (s *GetObjectOutput) SetContentEncoding(v string) *GetObjectOutput { + s.ContentEncoding = &v + return s +} + +// SetContentLanguage sets the ContentLanguage field's value. +func (s *GetObjectOutput) SetContentLanguage(v string) *GetObjectOutput { + s.ContentLanguage = &v + return s +} + +// SetContentLength sets the ContentLength field's value. +func (s *GetObjectOutput) SetContentLength(v int64) *GetObjectOutput { + s.ContentLength = &v + return s +} + +// SetContentRange sets the ContentRange field's value. +func (s *GetObjectOutput) SetContentRange(v string) *GetObjectOutput { + s.ContentRange = &v + return s +} + +// SetContentType sets the ContentType field's value. +func (s *GetObjectOutput) SetContentType(v string) *GetObjectOutput { + s.ContentType = &v + return s +} + +// SetDeleteMarker sets the DeleteMarker field's value. +func (s *GetObjectOutput) SetDeleteMarker(v bool) *GetObjectOutput { + s.DeleteMarker = &v + return s +} + +// SetETag sets the ETag field's value. +func (s *GetObjectOutput) SetETag(v string) *GetObjectOutput { + s.ETag = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *GetObjectOutput) SetExpiration(v string) *GetObjectOutput { + s.Expiration = &v + return s +} + +// SetExpires sets the Expires field's value. +func (s *GetObjectOutput) SetExpires(v string) *GetObjectOutput { + s.Expires = &v + return s +} + +// SetIBMRestoredCopyStorageClass sets the IBMRestoredCopyStorageClass field's value. +func (s *GetObjectOutput) SetIBMRestoredCopyStorageClass(v string) *GetObjectOutput { + s.IBMRestoredCopyStorageClass = &v + return s +} + +// SetIBMTransition sets the IBMTransition field's value. +func (s *GetObjectOutput) SetIBMTransition(v string) *GetObjectOutput { + s.IBMTransition = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *GetObjectOutput) SetLastModified(v time.Time) *GetObjectOutput { + s.LastModified = &v + return s +} + +// SetMetadata sets the Metadata field's value. +func (s *GetObjectOutput) SetMetadata(v map[string]*string) *GetObjectOutput { + s.Metadata = v + return s +} + +// SetMissingMeta sets the MissingMeta field's value. +func (s *GetObjectOutput) SetMissingMeta(v int64) *GetObjectOutput { + s.MissingMeta = &v + return s +} + +// SetPartsCount sets the PartsCount field's value. +func (s *GetObjectOutput) SetPartsCount(v int64) *GetObjectOutput { + s.PartsCount = &v + return s +} + +// SetReplicationStatus sets the ReplicationStatus field's value. +func (s *GetObjectOutput) SetReplicationStatus(v string) *GetObjectOutput { + s.ReplicationStatus = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *GetObjectOutput) SetRequestCharged(v string) *GetObjectOutput { + s.RequestCharged = &v + return s +} + +// SetRestore sets the Restore field's value. +func (s *GetObjectOutput) SetRestore(v string) *GetObjectOutput { + s.Restore = &v + return s +} + +// SetRetentionExpirationDate sets the RetentionExpirationDate field's value. +func (s *GetObjectOutput) SetRetentionExpirationDate(v time.Time) *GetObjectOutput { + s.RetentionExpirationDate = &v + return s +} + +// SetRetentionLegalHoldCount sets the RetentionLegalHoldCount field's value. +func (s *GetObjectOutput) SetRetentionLegalHoldCount(v int64) *GetObjectOutput { + s.RetentionLegalHoldCount = &v + return s +} + +// SetRetentionPeriod sets the RetentionPeriod field's value. +func (s *GetObjectOutput) SetRetentionPeriod(v int64) *GetObjectOutput { + s.RetentionPeriod = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *GetObjectOutput) SetSSECustomerAlgorithm(v string) *GetObjectOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *GetObjectOutput) SetSSECustomerKeyMD5(v string) *GetObjectOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *GetObjectOutput) SetSSEKMSKeyId(v string) *GetObjectOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *GetObjectOutput) SetServerSideEncryption(v string) *GetObjectOutput { + s.ServerSideEncryption = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *GetObjectOutput) SetStorageClass(v string) *GetObjectOutput { + s.StorageClass = &v + return s +} + +// SetTagCount sets the TagCount field's value. +func (s *GetObjectOutput) SetTagCount(v int64) *GetObjectOutput { + s.TagCount = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectOutput) SetVersionId(v string) *GetObjectOutput { + s.VersionId = &v + return s +} + +// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. +func (s *GetObjectOutput) SetWebsiteRedirectLocation(v string) *GetObjectOutput { + s.WebsiteRedirectLocation = &v + return s +} + +type GetObjectTaggingInput struct { + _ struct{} `locationName:"GetObjectTaggingRequest" type:"structure"` + + // The bucket name containing the object for which to get the tagging information. + // + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // When using this action with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this action using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Object key for which to get the tagging information. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // The versionId of the object for which to get the tagging information. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s GetObjectTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectTaggingInput) SetBucket(v string) *GetObjectTaggingInput { + s.Bucket = &v + return s +} + +func (s *GetObjectTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetObjectTaggingInput) SetExpectedBucketOwner(v string) *GetObjectTaggingInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *GetObjectTaggingInput) SetKey(v string) *GetObjectTaggingInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectTaggingInput) SetRequestPayer(v string) *GetObjectTaggingInput { + s.RequestPayer = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectTaggingInput) SetVersionId(v string) *GetObjectTaggingInput { + s.VersionId = &v + return s +} + +type GetObjectTaggingOutput struct { + _ struct{} `type:"structure"` + + // Contains the tag set. + // + // TagSet is a required field + TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` + + // The versionId of the object for which you got the tagging information. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s GetObjectTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectTaggingOutput) GoString() string { + return s.String() +} + +// SetTagSet sets the TagSet field's value. +func (s *GetObjectTaggingOutput) SetTagSet(v []*Tag) *GetObjectTaggingOutput { + s.TagSet = v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectTaggingOutput) SetVersionId(v string) *GetObjectTaggingOutput { + s.VersionId = &v + return s +} + +type GetPublicAccessBlockInput struct { + _ struct{} `locationName:"GetPublicAccessBlockRequest" type:"structure"` + + // The name of the Amazon S3 bucket whose PublicAccessBlock configuration you + // want to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s GetPublicAccessBlockInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPublicAccessBlockInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetPublicAccessBlockInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetPublicAccessBlockInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetPublicAccessBlockInput) SetBucket(v string) *GetPublicAccessBlockInput { + s.Bucket = &v + return s +} + +func (s *GetPublicAccessBlockInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetPublicAccessBlockInput) SetExpectedBucketOwner(v string) *GetPublicAccessBlockInput { + s.ExpectedBucketOwner = &v + return s +} + +type GetPublicAccessBlockOutput struct { + _ struct{} `type:"structure" payload:"PublicAccessBlockConfiguration"` + + // The PublicAccessBlock configuration currently in effect for this Amazon S3 + // bucket. + PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `type:"structure"` +} + +// String returns the string representation +func (s GetPublicAccessBlockOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPublicAccessBlockOutput) GoString() string { + return s.String() +} + +// SetPublicAccessBlockConfiguration sets the PublicAccessBlockConfiguration field's value. +func (s *GetPublicAccessBlockOutput) SetPublicAccessBlockConfiguration(v *PublicAccessBlockConfiguration) *GetPublicAccessBlockOutput { + s.PublicAccessBlockConfiguration = v + return s +} + +// Container for S3 Glacier job parameters. +type GlacierJobParameters struct { + _ struct{} `type:"structure"` + + // Retrieval tier at which the restore will be processed. + // + // Tier is a required field + Tier *string `type:"string" required:"true" enum:"Tier"` +} + +// String returns the string representation +func (s GlacierJobParameters) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GlacierJobParameters) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GlacierJobParameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GlacierJobParameters"} + if s.Tier == nil { + invalidParams.Add(request.NewErrParamRequired("Tier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTier sets the Tier field's value. +func (s *GlacierJobParameters) SetTier(v string) *GlacierJobParameters { + s.Tier = &v + return s +} + +// Container for grant information. +type Grant struct { + _ struct{} `type:"structure"` + + // The person being granted permissions. + Grantee *Grantee `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` + + // Specifies the permission given to the grantee. + Permission *string `type:"string" enum:"Permission"` +} + +// String returns the string representation +func (s Grant) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Grant) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Grant) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Grant"} + if s.Grantee != nil { + if err := s.Grantee.Validate(); err != nil { + invalidParams.AddNested("Grantee", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGrantee sets the Grantee field's value. +func (s *Grant) SetGrantee(v *Grantee) *Grant { + s.Grantee = v + return s +} + +// SetPermission sets the Permission field's value. +func (s *Grant) SetPermission(v string) *Grant { + s.Permission = &v + return s +} + +// Container for the person being granted permissions. +type Grantee struct { + _ struct{} `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` + + // Screen name of the grantee. + DisplayName *string `type:"string"` + + // Email address of the grantee. + // + // Using email addresses to specify a grantee is only supported in the following + // AWS Regions: + // + // * US East (N. Virginia) + // + // * US West (N. California) + // + // * US West (Oregon) + // + // * Asia Pacific (Singapore) + // + // * Asia Pacific (Sydney) + // + // * Asia Pacific (Tokyo) + // + // * Europe (Ireland) + // + // * South America (São Paulo) + // + // For a list of all the Amazon S3 supported Regions and endpoints, see Regions + // and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) + // in the AWS General Reference. + EmailAddress *string `type:"string"` + + // The canonical user ID of the grantee. + ID *string `type:"string"` + + // Type of grantee + // + // Type is a required field + Type *string `locationName:"xsi:type" type:"string" xmlAttribute:"true" required:"true" enum:"Type"` + + // URI of the grantee group. + URI *string `type:"string"` +} + +// String returns the string representation +func (s Grantee) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Grantee) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Grantee) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Grantee"} + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDisplayName sets the DisplayName field's value. +func (s *Grantee) SetDisplayName(v string) *Grantee { + s.DisplayName = &v + return s +} + +// SetEmailAddress sets the EmailAddress field's value. +func (s *Grantee) SetEmailAddress(v string) *Grantee { + s.EmailAddress = &v + return s +} + +// SetID sets the ID field's value. +func (s *Grantee) SetID(v string) *Grantee { + s.ID = &v + return s +} + +// SetType sets the Type field's value. +func (s *Grantee) SetType(v string) *Grantee { + s.Type = &v + return s +} + +// SetURI sets the URI field's value. +func (s *Grantee) SetURI(v string) *Grantee { + s.URI = &v + return s +} + +type HeadBucketInput struct { + _ struct{} `locationName:"HeadBucketRequest" type:"structure"` + + // The bucket name. + // + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // When using this action with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this action using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s HeadBucketInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HeadBucketInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *HeadBucketInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HeadBucketInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *HeadBucketInput) SetBucket(v string) *HeadBucketInput { + s.Bucket = &v + return s +} + +func (s *HeadBucketInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *HeadBucketInput) SetExpectedBucketOwner(v string) *HeadBucketInput { + s.ExpectedBucketOwner = &v + return s +} + +type HeadBucketOutput struct { + _ struct{} `type:"structure"` + + // The root key used by Key Protect to encrypt this bucket. This value must + // be the full CRN of the root key. + IBMSSEKPCrkId *string `location:"header" locationName:"ibm-sse-kp-customer-root-key-crn" type:"string"` + + // Specifies whether the Bucket has Key Protect enabled. + IBMSSEKPEnabled *bool `location:"header" locationName:"ibm-sse-kp-enabled" type:"boolean"` +} + +// String returns the string representation +func (s HeadBucketOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HeadBucketOutput) GoString() string { + return s.String() +} + +// SetIBMSSEKPCrkId sets the IBMSSEKPCrkId field's value. +func (s *HeadBucketOutput) SetIBMSSEKPCrkId(v string) *HeadBucketOutput { + s.IBMSSEKPCrkId = &v + return s +} + +// SetIBMSSEKPEnabled sets the IBMSSEKPEnabled field's value. +func (s *HeadBucketOutput) SetIBMSSEKPEnabled(v bool) *HeadBucketOutput { + s.IBMSSEKPEnabled = &v + return s +} + +type HeadObjectInput struct { + _ struct{} `locationName:"HeadObjectRequest" type:"structure"` + + // The name of the bucket containing the object. + // + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // When using this action with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this action using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Return the object only if its entity tag (ETag) is the same as the one specified, + // otherwise return a 412 (precondition failed). + IfMatch *string `location:"header" locationName:"If-Match" type:"string"` + + // Return the object only if it has been modified since the specified time, + // otherwise return a 304 (not modified). + IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp"` + + // Return the object only if its entity tag (ETag) is different from the one + // specified, otherwise return a 304 (not modified). + IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"` + + // Return the object only if it has not been modified since the specified time, + // otherwise return a 412 (precondition failed). + IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp"` + + // The object key. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Part number of the object being read. This is a positive integer between + // 1 and 10,000. Effectively performs a 'ranged' HEAD request for the part specified. + // Useful querying about the size of the part and the number of parts in this + // object. + PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer"` + + // Downloads the specified range bytes of an object. For more information about + // the HTTP Range header, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35 + // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35). + // + // Amazon S3 doesn't support retrieving multiple ranges of data per GET request. + Range *string `location:"header" locationName:"Range" type:"string"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm + // header. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s HeadObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HeadObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *HeadObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HeadObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *HeadObjectInput) SetBucket(v string) *HeadObjectInput { + s.Bucket = &v + return s +} + +func (s *HeadObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *HeadObjectInput) SetExpectedBucketOwner(v string) *HeadObjectInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetIfMatch sets the IfMatch field's value. +func (s *HeadObjectInput) SetIfMatch(v string) *HeadObjectInput { + s.IfMatch = &v + return s +} + +// SetIfModifiedSince sets the IfModifiedSince field's value. +func (s *HeadObjectInput) SetIfModifiedSince(v time.Time) *HeadObjectInput { + s.IfModifiedSince = &v + return s +} + +// SetIfNoneMatch sets the IfNoneMatch field's value. +func (s *HeadObjectInput) SetIfNoneMatch(v string) *HeadObjectInput { + s.IfNoneMatch = &v + return s +} + +// SetIfUnmodifiedSince sets the IfUnmodifiedSince field's value. +func (s *HeadObjectInput) SetIfUnmodifiedSince(v time.Time) *HeadObjectInput { + s.IfUnmodifiedSince = &v + return s +} + +// SetKey sets the Key field's value. +func (s *HeadObjectInput) SetKey(v string) *HeadObjectInput { + s.Key = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *HeadObjectInput) SetPartNumber(v int64) *HeadObjectInput { + s.PartNumber = &v + return s +} + +// SetRange sets the Range field's value. +func (s *HeadObjectInput) SetRange(v string) *HeadObjectInput { + s.Range = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *HeadObjectInput) SetRequestPayer(v string) *HeadObjectInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *HeadObjectInput) SetSSECustomerAlgorithm(v string) *HeadObjectInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *HeadObjectInput) SetSSECustomerKey(v string) *HeadObjectInput { + s.SSECustomerKey = &v + return s +} + +func (s *HeadObjectInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *HeadObjectInput) SetSSECustomerKeyMD5(v string) *HeadObjectInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *HeadObjectInput) SetVersionId(v string) *HeadObjectInput { + s.VersionId = &v + return s +} + +type HeadObjectOutput struct { + _ struct{} `type:"structure"` + + // Indicates that a range of bytes was specified. + AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // Size of the body in bytes. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // Specifies whether the object retrieved was (true) or was not (false) a Delete + // Marker. If false, this response header does not appear in the response. + DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` + + // An ETag is an opaque identifier assigned by a web server to a specific version + // of a resource found at a URL. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If the object expiration is configured (see PUT Bucket lifecycle), the response + // includes this header. It includes the expiry-date and rule-id key-value pairs + // providing object expiration information. The value of the rule-id is URL + // encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *string `location:"header" locationName:"Expires" type:"string"` + + IBMRestoredCopyStorageClass *string `location:"header" locationName:"x-ibm-restored-copy-storage-class" type:"string" enum:"StorageClass"` + + // This header is only included if an object has transition metadata. This header + // will indicate the transition storage class and time of transition. If this + // header and the x-amz-restore header are both included, this header will indicate + // the time at which the object was originally archived. + IBMTransition *string `location:"header" locationName:"x-ibm-transition" type:"string"` + + // Creation date of the object. + LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"` + + // A map of metadata to store with the object in S3. + // + // By default unmarshaled keys are written as a map keys in following canonicalized format: + // the first letter and any letter following a hyphen will be capitalized, and the rest as lowercase. + // Set `aws.Config.LowerCaseHeaderMaps` to `true` to write unmarshaled keys to the map as lowercase. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // This is set to the number of metadata entries not returned in x-amz-meta + // headers. This can happen if you create metadata using an API like SOAP that + // supports more flexible metadata than the REST API. For example, using SOAP, + // you can create metadata whose values are not legal HTTP headers. + MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` + + // The count of parts this object has. + PartsCount *int64 `location:"header" locationName:"x-amz-mp-parts-count" type:"integer"` + + // Amazon S3 can return this header if your request involves a bucket that is + // either a source or a destination in a replication rule. + // + // In replication, you have a source bucket on which you configure replication + // and destination bucket or buckets where Amazon S3 stores object replicas. + // When you request an object (GetObject) or object metadata (HeadObject) from + // these buckets, Amazon S3 will return the x-amz-replication-status header + // in the response as follows: + // + // * If requesting an object from the source bucket — Amazon S3 will return + // the x-amz-replication-status header if the object in your request is eligible + // for replication. For example, suppose that in your replication configuration, + // you specify object prefix TaxDocs requesting Amazon S3 to replicate objects + // with key prefix TaxDocs. Any objects you upload with this key name prefix, + // for example TaxDocs/document1.pdf, are eligible for replication. For any + // object request with this key name prefix, Amazon S3 will return the x-amz-replication-status + // header with value PENDING, COMPLETED or FAILED indicating object replication + // status. + // + // * If requesting an object from a destination bucket — Amazon S3 will + // return the x-amz-replication-status header with value REPLICA if the object + // in your request is a replica that Amazon S3 created and there is no replica + // modification replication in progress. + // + // * When replicating objects to multiple destination buckets the x-amz-replication-status + // header acts differently. The header of the source object will only return + // a value of COMPLETED when replication is successful to all destinations. + // The header will remain at value PENDING until replication has completed + // for all destinations. If one or more destinations fails replication the + // header will return FAILED. + // + // For more information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). + ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If the object is an archived object (an object whose storage class is GLACIER), + // the response includes this header if either the archive restoration is in + // progress (see RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html) + // or an archive copy is already restored. + // + // If an archive copy is already restored, the header value indicates when Amazon + // S3 is scheduled to delete the object copy. For example: + // + // x-amz-restore: ongoing-request="false", expiry-date="Fri, 21 Dec 2012 00:00:00 + // GMT" + // + // If the object restoration is in progress, the header returns the value ongoing-request="true". + // + // For more information about archiving objects, see Transitioning Objects: + // General Considerations (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-transition-general-considerations). + Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` + + // Date on which it will be legal to delete or modify the object. You can only + // specify this or the Retention-Period header. If both are specified a 400 + // error will be returned. If neither is specified the bucket's DefaultRetention + // period will be used. + RetentionExpirationDate *time.Time `location:"header" locationName:"Retention-Expiration-Date" type:"timestamp"` + + RetentionLegalHoldCount *int64 `location:"header" locationName:"Retention-Legal-Hold-Count" type:"integer"` + + // Retention period to store on the object in seconds. If this field and Retention-Expiration-Date + // are specified a 400 error is returned. If neither is specified the bucket's + // DefaultRetention period will be used. 0 is a legal value assuming the bucket's + // minimum retention period is also 0. + RetentionPeriod *int64 `location:"header" locationName:"Retention-Period" type:"integer"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round-trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetric customer managed customer master key (CMK) that was used for the + // object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // If the object is stored using server-side encryption either with an AWS KMS + // customer master key (CMK) or an Amazon S3-managed encryption key, the response + // includes this header with the value of the server-side encryption algorithm + // used when storing this object in Amazon S3 (for example, AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Provides storage class information of the object. Amazon S3 returns this + // header for all objects except for S3 Standard storage class objects. + // + // For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html). + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // Version of the object. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s HeadObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HeadObjectOutput) GoString() string { + return s.String() +} + +// SetAcceptRanges sets the AcceptRanges field's value. +func (s *HeadObjectOutput) SetAcceptRanges(v string) *HeadObjectOutput { + s.AcceptRanges = &v + return s +} + +// SetCacheControl sets the CacheControl field's value. +func (s *HeadObjectOutput) SetCacheControl(v string) *HeadObjectOutput { + s.CacheControl = &v + return s +} + +// SetContentDisposition sets the ContentDisposition field's value. +func (s *HeadObjectOutput) SetContentDisposition(v string) *HeadObjectOutput { + s.ContentDisposition = &v + return s +} + +// SetContentEncoding sets the ContentEncoding field's value. +func (s *HeadObjectOutput) SetContentEncoding(v string) *HeadObjectOutput { + s.ContentEncoding = &v + return s +} + +// SetContentLanguage sets the ContentLanguage field's value. +func (s *HeadObjectOutput) SetContentLanguage(v string) *HeadObjectOutput { + s.ContentLanguage = &v + return s +} + +// SetContentLength sets the ContentLength field's value. +func (s *HeadObjectOutput) SetContentLength(v int64) *HeadObjectOutput { + s.ContentLength = &v + return s +} + +// SetContentType sets the ContentType field's value. +func (s *HeadObjectOutput) SetContentType(v string) *HeadObjectOutput { + s.ContentType = &v + return s +} + +// SetDeleteMarker sets the DeleteMarker field's value. +func (s *HeadObjectOutput) SetDeleteMarker(v bool) *HeadObjectOutput { + s.DeleteMarker = &v + return s +} + +// SetETag sets the ETag field's value. +func (s *HeadObjectOutput) SetETag(v string) *HeadObjectOutput { + s.ETag = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *HeadObjectOutput) SetExpiration(v string) *HeadObjectOutput { + s.Expiration = &v + return s +} + +// SetExpires sets the Expires field's value. +func (s *HeadObjectOutput) SetExpires(v string) *HeadObjectOutput { + s.Expires = &v + return s +} + +// SetIBMRestoredCopyStorageClass sets the IBMRestoredCopyStorageClass field's value. +func (s *HeadObjectOutput) SetIBMRestoredCopyStorageClass(v string) *HeadObjectOutput { + s.IBMRestoredCopyStorageClass = &v + return s +} + +// SetIBMTransition sets the IBMTransition field's value. +func (s *HeadObjectOutput) SetIBMTransition(v string) *HeadObjectOutput { + s.IBMTransition = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *HeadObjectOutput) SetLastModified(v time.Time) *HeadObjectOutput { + s.LastModified = &v + return s +} + +// SetMetadata sets the Metadata field's value. +func (s *HeadObjectOutput) SetMetadata(v map[string]*string) *HeadObjectOutput { + s.Metadata = v + return s +} + +// SetMissingMeta sets the MissingMeta field's value. +func (s *HeadObjectOutput) SetMissingMeta(v int64) *HeadObjectOutput { + s.MissingMeta = &v + return s +} + +// SetPartsCount sets the PartsCount field's value. +func (s *HeadObjectOutput) SetPartsCount(v int64) *HeadObjectOutput { + s.PartsCount = &v + return s +} + +// SetReplicationStatus sets the ReplicationStatus field's value. +func (s *HeadObjectOutput) SetReplicationStatus(v string) *HeadObjectOutput { + s.ReplicationStatus = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *HeadObjectOutput) SetRequestCharged(v string) *HeadObjectOutput { + s.RequestCharged = &v + return s +} + +// SetRestore sets the Restore field's value. +func (s *HeadObjectOutput) SetRestore(v string) *HeadObjectOutput { + s.Restore = &v + return s +} + +// SetRetentionExpirationDate sets the RetentionExpirationDate field's value. +func (s *HeadObjectOutput) SetRetentionExpirationDate(v time.Time) *HeadObjectOutput { + s.RetentionExpirationDate = &v + return s +} + +// SetRetentionLegalHoldCount sets the RetentionLegalHoldCount field's value. +func (s *HeadObjectOutput) SetRetentionLegalHoldCount(v int64) *HeadObjectOutput { + s.RetentionLegalHoldCount = &v + return s +} + +// SetRetentionPeriod sets the RetentionPeriod field's value. +func (s *HeadObjectOutput) SetRetentionPeriod(v int64) *HeadObjectOutput { + s.RetentionPeriod = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *HeadObjectOutput) SetSSECustomerAlgorithm(v string) *HeadObjectOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *HeadObjectOutput) SetSSECustomerKeyMD5(v string) *HeadObjectOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *HeadObjectOutput) SetSSEKMSKeyId(v string) *HeadObjectOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *HeadObjectOutput) SetServerSideEncryption(v string) *HeadObjectOutput { + s.ServerSideEncryption = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *HeadObjectOutput) SetStorageClass(v string) *HeadObjectOutput { + s.StorageClass = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *HeadObjectOutput) SetVersionId(v string) *HeadObjectOutput { + s.VersionId = &v + return s +} + +// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. +func (s *HeadObjectOutput) SetWebsiteRedirectLocation(v string) *HeadObjectOutput { + s.WebsiteRedirectLocation = &v + return s +} + +// Container for the Suffix element. +type IndexDocument struct { + _ struct{} `type:"structure"` + + // A suffix that is appended to a request that is for a directory on the website + // endpoint (for example,if the suffix is index.html and you make a request + // to samplebucket/images/ the data that is returned will be for the object + // with the key name images/index.html) The suffix must not be empty and must + // not include a slash character. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + // + // Suffix is a required field + Suffix *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s IndexDocument) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IndexDocument) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *IndexDocument) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IndexDocument"} + if s.Suffix == nil { + invalidParams.Add(request.NewErrParamRequired("Suffix")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSuffix sets the Suffix field's value. +func (s *IndexDocument) SetSuffix(v string) *IndexDocument { + s.Suffix = &v + return s +} + +// Container element that identifies who initiated the multipart upload. +type Initiator struct { + _ struct{} `type:"structure"` + + // Name of the Principal. + DisplayName *string `type:"string"` + + // If the principal is an AWS account, it provides the Canonical User ID. If + // the principal is an IAM User, it provides a user ARN value. + ID *string `type:"string"` +} + +// String returns the string representation +func (s Initiator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Initiator) GoString() string { + return s.String() +} + +// SetDisplayName sets the DisplayName field's value. +func (s *Initiator) SetDisplayName(v string) *Initiator { + s.DisplayName = &v + return s +} + +// SetID sets the ID field's value. +func (s *Initiator) SetID(v string) *Initiator { + s.ID = &v + return s +} + +type LegalHold struct { + _ struct{} `type:"structure"` + + Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + ID *string `type:"string"` +} + +// String returns the string representation +func (s LegalHold) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LegalHold) GoString() string { + return s.String() +} + +// SetDate sets the Date field's value. +func (s *LegalHold) SetDate(v time.Time) *LegalHold { + s.Date = &v + return s +} + +// SetID sets the ID field's value. +func (s *LegalHold) SetID(v string) *LegalHold { + s.ID = &v + return s +} + +// Container for lifecycle rules. You can add as many as 1000 rules. +type LifecycleConfiguration struct { + _ struct{} `type:"structure"` + + // Rules is a required field + Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s LifecycleConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecycleConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LifecycleConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LifecycleConfiguration"} + if s.Rules == nil { + invalidParams.Add(request.NewErrParamRequired("Rules")) + } + if s.Rules != nil { + for i, v := range s.Rules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRules sets the Rules field's value. +func (s *LifecycleConfiguration) SetRules(v []*LifecycleRule) *LifecycleConfiguration { + s.Rules = v + return s +} + +// Container for the expiration for the lifecycle of the object. +type LifecycleExpiration struct { + _ struct{} `type:"structure"` + + // Indicates at what date the object is to be moved or deleted. Should be in + // GMT ISO 8601 Format. + Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Indicates the lifetime, in days, of the objects that are subject to the rule. + // The value must be a non-zero positive integer. + Days *int64 `type:"integer"` +} + +// String returns the string representation +func (s LifecycleExpiration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecycleExpiration) GoString() string { + return s.String() +} + +// SetDate sets the Date field's value. +func (s *LifecycleExpiration) SetDate(v time.Time) *LifecycleExpiration { + s.Date = &v + return s +} + +// SetDays sets the Days field's value. +func (s *LifecycleExpiration) SetDays(v int64) *LifecycleExpiration { + s.Days = &v + return s +} + +// A lifecycle rule for individual objects in an Amazon S3 bucket. +type LifecycleRule struct { + _ struct{} `type:"structure"` + + // Specifies the expiration for the lifecycle of the object in the form of date, + // days and, whether the object has a delete marker. + Expiration *LifecycleExpiration `type:"structure"` + + // The Filter is used to identify objects that a Lifecycle Rule applies to. + // A Filter must have exactly one of Prefix, Tag, or And specified. Filter is + // required if the LifecycleRule does not containt a Prefix element. + // + // Filter is a required field + Filter *LifecycleRuleFilter `type:"structure" required:"true"` + + // Unique identifier for the rule. The value cannot be longer than 255 characters. + ID *string `type:"string"` + + // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule + // is not currently being applied. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"ExpirationStatus"` + + // Specifies when an Amazon S3 object transitions to a specified storage class. + Transitions []*Transition `locationName:"Transition" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s LifecycleRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecycleRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LifecycleRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LifecycleRule"} + if s.Filter == nil { + invalidParams.Add(request.NewErrParamRequired("Filter")) + } + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetExpiration sets the Expiration field's value. +func (s *LifecycleRule) SetExpiration(v *LifecycleExpiration) *LifecycleRule { + s.Expiration = v + return s +} + +// SetFilter sets the Filter field's value. +func (s *LifecycleRule) SetFilter(v *LifecycleRuleFilter) *LifecycleRule { + s.Filter = v + return s +} + +// SetID sets the ID field's value. +func (s *LifecycleRule) SetID(v string) *LifecycleRule { + s.ID = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *LifecycleRule) SetStatus(v string) *LifecycleRule { + s.Status = &v + return s +} + +// SetTransitions sets the Transitions field's value. +func (s *LifecycleRule) SetTransitions(v []*Transition) *LifecycleRule { + s.Transitions = v + return s +} + +// The Filter is used to identify objects that a Lifecycle Rule applies to. +// A Filter must have exactly one of Prefix, Tag, or And specified. +type LifecycleRuleFilter struct { + _ struct{} `type:"structure"` + + // Prefix identifying one or more objects to which the rule applies. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + Prefix *string `type:"string"` +} + +// String returns the string representation +func (s LifecycleRuleFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecycleRuleFilter) GoString() string { + return s.String() +} + +// SetPrefix sets the Prefix field's value. +func (s *LifecycleRuleFilter) SetPrefix(v string) *LifecycleRuleFilter { + s.Prefix = &v + return s +} + +type ListBucketsExtendedInput struct { + _ struct{} `locationName:"ListBucketsExtendedRequest" type:"structure"` + + // Sets the IBM Service Instance Id in the request. + // + // Only Valid for IBM IAM Authentication + IBMServiceInstanceId *string `location:"header" locationName:"ibm-service-instance-id" type:"string"` + + // Specifies the bucket to start with when listing all buckets. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // Sets the maximum number of keys returned in the response. The response might + // contain fewer keys but will never contain more. + MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` + + // Limits the response to buckets that begin with the specified prefix. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` +} + +// String returns the string representation +func (s ListBucketsExtendedInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketsExtendedInput) GoString() string { + return s.String() +} + +// SetIBMServiceInstanceId sets the IBMServiceInstanceId field's value. +func (s *ListBucketsExtendedInput) SetIBMServiceInstanceId(v string) *ListBucketsExtendedInput { + s.IBMServiceInstanceId = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *ListBucketsExtendedInput) SetMarker(v string) *ListBucketsExtendedInput { + s.Marker = &v + return s +} + +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListBucketsExtendedInput) SetMaxKeys(v int64) *ListBucketsExtendedInput { + s.MaxKeys = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListBucketsExtendedInput) SetPrefix(v string) *ListBucketsExtendedInput { + s.Prefix = &v + return s +} + +type ListBucketsExtendedOutput struct { + _ struct{} `type:"structure"` + + Buckets []*BucketExtended `locationNameList:"Bucket" type:"list"` + + // Indicates whether the returned list of buckets is truncated. + IsTruncated *bool `type:"boolean"` + + // The bucket at or after which the listing began. + Marker *string `type:"string"` + + MaxKeys *int64 `type:"integer"` + + // Container for the owner's display name and ID. + Owner *Owner `type:"structure"` + + // When a prefix is provided in the request, this field contains the specified + // prefix. The result contains only buckets starting with the specified prefix. + Prefix *string `type:"string"` +} + +// String returns the string representation +func (s ListBucketsExtendedOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketsExtendedOutput) GoString() string { + return s.String() +} + +// SetBuckets sets the Buckets field's value. +func (s *ListBucketsExtendedOutput) SetBuckets(v []*BucketExtended) *ListBucketsExtendedOutput { + s.Buckets = v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListBucketsExtendedOutput) SetIsTruncated(v bool) *ListBucketsExtendedOutput { + s.IsTruncated = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *ListBucketsExtendedOutput) SetMarker(v string) *ListBucketsExtendedOutput { + s.Marker = &v + return s +} + +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListBucketsExtendedOutput) SetMaxKeys(v int64) *ListBucketsExtendedOutput { + s.MaxKeys = &v + return s +} + +// SetOwner sets the Owner field's value. +func (s *ListBucketsExtendedOutput) SetOwner(v *Owner) *ListBucketsExtendedOutput { + s.Owner = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListBucketsExtendedOutput) SetPrefix(v string) *ListBucketsExtendedOutput { + s.Prefix = &v + return s +} + +type ListBucketsInput struct { + _ struct{} `locationName:"ListBucketsRequest" type:"structure"` + + // Sets the IBM Service Instance Id in the request. + // + // Only Valid for IBM IAM Authentication + IBMServiceInstanceId *string `location:"header" locationName:"ibm-service-instance-id" type:"string"` +} + +// String returns the string representation +func (s ListBucketsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketsInput) GoString() string { + return s.String() +} + +// SetIBMServiceInstanceId sets the IBMServiceInstanceId field's value. +func (s *ListBucketsInput) SetIBMServiceInstanceId(v string) *ListBucketsInput { + s.IBMServiceInstanceId = &v + return s +} + +type ListBucketsOutput struct { + _ struct{} `type:"structure"` + + // The list of buckets owned by the requestor. + Buckets []*Bucket `locationNameList:"Bucket" type:"list"` + + // The owner of the buckets listed. + Owner *Owner `type:"structure"` +} + +// String returns the string representation +func (s ListBucketsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketsOutput) GoString() string { + return s.String() +} + +// SetBuckets sets the Buckets field's value. +func (s *ListBucketsOutput) SetBuckets(v []*Bucket) *ListBucketsOutput { + s.Buckets = v + return s +} + +// SetOwner sets the Owner field's value. +func (s *ListBucketsOutput) SetOwner(v *Owner) *ListBucketsOutput { + s.Owner = v + return s +} + +type ListLegalHoldsInput struct { + _ struct{} `locationName:"ListLegalHoldsRequest" type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListLegalHoldsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListLegalHoldsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListLegalHoldsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListLegalHoldsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListLegalHoldsInput) SetBucket(v string) *ListLegalHoldsInput { + s.Bucket = &v + return s +} + +func (s *ListLegalHoldsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetKey sets the Key field's value. +func (s *ListLegalHoldsInput) SetKey(v string) *ListLegalHoldsInput { + s.Key = &v + return s +} + +type ListLegalHoldsOutput struct { + _ struct{} `type:"structure"` + + CreateTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + LegalHolds []*LegalHold `type:"list"` + + // Retention period to store on the object in seconds. The object can be neither + // overwritten nor deleted until the amount of time specified in the retention + // period has elapsed. If this field and Retention-Expiration-Date are specified + // a 400 error is returned. If neither is specified the bucket's DefaultRetention + // period will be used. 0 is a legal value assuming the bucket's minimum retention + // period is also 0. + RetentionPeriod *int64 `type:"integer"` + + RetentionPeriodExpirationDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s ListLegalHoldsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListLegalHoldsOutput) GoString() string { + return s.String() +} + +// SetCreateTime sets the CreateTime field's value. +func (s *ListLegalHoldsOutput) SetCreateTime(v time.Time) *ListLegalHoldsOutput { + s.CreateTime = &v + return s +} + +// SetLegalHolds sets the LegalHolds field's value. +func (s *ListLegalHoldsOutput) SetLegalHolds(v []*LegalHold) *ListLegalHoldsOutput { + s.LegalHolds = v + return s +} + +// SetRetentionPeriod sets the RetentionPeriod field's value. +func (s *ListLegalHoldsOutput) SetRetentionPeriod(v int64) *ListLegalHoldsOutput { + s.RetentionPeriod = &v + return s +} + +// SetRetentionPeriodExpirationDate sets the RetentionPeriodExpirationDate field's value. +func (s *ListLegalHoldsOutput) SetRetentionPeriodExpirationDate(v time.Time) *ListLegalHoldsOutput { + s.RetentionPeriodExpirationDate = &v + return s +} + +type ListMultipartUploadsInput struct { + _ struct{} `locationName:"ListMultipartUploadsRequest" type:"structure"` + + // The name of the bucket to which the multipart upload was initiated. + // + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // When using this action with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this action using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Character you use to group keys. + // + // All keys that contain the same string between the prefix, if specified, and + // the first occurrence of the delimiter after the prefix are grouped under + // a single result element, CommonPrefixes. If you don't specify the prefix + // parameter, then the substring starts at the beginning of the key. The keys + // that are grouped under CommonPrefixes result element are not returned elsewhere + // in the response. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Requests Amazon S3 to encode the object keys in the response and specifies + // the encoding method to use. An object key may contain any Unicode character; + // however, XML 1.0 parser cannot parse some characters, such as characters + // with an ASCII value from 0 to 10. For characters that are not supported in + // XML 1.0, you can add this parameter to request that Amazon S3 encode the + // keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Together with upload-id-marker, this parameter specifies the multipart upload + // after which listing should begin. + // + // If upload-id-marker is not specified, only the keys lexicographically greater + // than the specified key-marker will be included in the list. + // + // If upload-id-marker is specified, any multipart uploads for a key equal to + // the key-marker might also be included, provided those multipart uploads have + // upload IDs lexicographically greater than the specified upload-id-marker. + KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` + + // Sets the maximum number of multipart uploads, from 1 to 1,000, to return + // in the response body. 1,000 is the maximum number of uploads that can be + // returned in a response. + MaxUploads *int64 `location:"querystring" locationName:"max-uploads" type:"integer"` + + // Lists in-progress uploads only for those keys that begin with the specified + // prefix. You can use prefixes to separate a bucket into different grouping + // of keys. (You can think of using prefix to make groups in the same way you'd + // use a folder in a file system.) + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + + // Together with key-marker, specifies the multipart upload after which listing + // should begin. If key-marker is not specified, the upload-id-marker parameter + // is ignored. Otherwise, any multipart uploads for a key equal to the key-marker + // might be included in the list only if they have an upload ID lexicographically + // greater than the specified upload-id-marker. + UploadIdMarker *string `location:"querystring" locationName:"upload-id-marker" type:"string"` +} + +// String returns the string representation +func (s ListMultipartUploadsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListMultipartUploadsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListMultipartUploadsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListMultipartUploadsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListMultipartUploadsInput) SetBucket(v string) *ListMultipartUploadsInput { + s.Bucket = &v + return s +} + +func (s *ListMultipartUploadsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListMultipartUploadsInput) SetDelimiter(v string) *ListMultipartUploadsInput { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListMultipartUploadsInput) SetEncodingType(v string) *ListMultipartUploadsInput { + s.EncodingType = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *ListMultipartUploadsInput) SetExpectedBucketOwner(v string) *ListMultipartUploadsInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKeyMarker sets the KeyMarker field's value. +func (s *ListMultipartUploadsInput) SetKeyMarker(v string) *ListMultipartUploadsInput { + s.KeyMarker = &v + return s +} + +// SetMaxUploads sets the MaxUploads field's value. +func (s *ListMultipartUploadsInput) SetMaxUploads(v int64) *ListMultipartUploadsInput { + s.MaxUploads = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListMultipartUploadsInput) SetPrefix(v string) *ListMultipartUploadsInput { + s.Prefix = &v + return s +} + +// SetUploadIdMarker sets the UploadIdMarker field's value. +func (s *ListMultipartUploadsInput) SetUploadIdMarker(v string) *ListMultipartUploadsInput { + s.UploadIdMarker = &v + return s +} + +type ListMultipartUploadsOutput struct { + _ struct{} `type:"structure"` + + // The name of the bucket to which the multipart upload was initiated. + Bucket *string `type:"string"` + + // If you specify a delimiter in the request, then the result returns each distinct + // key prefix containing the delimiter in a CommonPrefixes element. The distinct + // key prefixes are returned in the Prefix child element. + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + // Contains the delimiter you specified in the request. If you don't specify + // a delimiter in your request, this element is absent from the response. + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + // + // If you specify encoding-type request parameter, Amazon S3 includes this element + // in the response, and returns encoded key name values in the following response + // elements: + // + // Delimiter, KeyMarker, Prefix, NextKeyMarker, Key. + EncodingType *string `type:"string" enum:"EncodingType"` + + // Indicates whether the returned list of multipart uploads is truncated. A + // value of true indicates that the list was truncated. The list can be truncated + // if the number of multipart uploads exceeds the limit allowed or specified + // by max uploads. + IsTruncated *bool `type:"boolean"` + + // The key at or after which the listing began. + KeyMarker *string `type:"string"` + + // Maximum number of multipart uploads that could have been included in the + // response. + MaxUploads *int64 `type:"integer"` + + // When a list is truncated, this element specifies the value that should be + // used for the key-marker request parameter in a subsequent request. + NextKeyMarker *string `type:"string"` + + // When a list is truncated, this element specifies the value that should be + // used for the upload-id-marker request parameter in a subsequent request. + NextUploadIdMarker *string `type:"string"` + + // When a prefix is provided in the request, this field contains the specified + // prefix. The result contains only keys starting with the specified prefix. + Prefix *string `type:"string"` + + // Upload ID after which listing began. + UploadIdMarker *string `type:"string"` + + // Container for elements related to a particular multipart upload. A response + // can contain zero or more Upload elements. + Uploads []*MultipartUpload `locationName:"Upload" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s ListMultipartUploadsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListMultipartUploadsOutput) GoString() string { + return s.String() +} + +// SetBucket sets the Bucket field's value. +func (s *ListMultipartUploadsOutput) SetBucket(v string) *ListMultipartUploadsOutput { + s.Bucket = &v + return s +} + +func (s *ListMultipartUploadsOutput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetCommonPrefixes sets the CommonPrefixes field's value. +func (s *ListMultipartUploadsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListMultipartUploadsOutput { + s.CommonPrefixes = v + return s +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListMultipartUploadsOutput) SetDelimiter(v string) *ListMultipartUploadsOutput { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListMultipartUploadsOutput) SetEncodingType(v string) *ListMultipartUploadsOutput { + s.EncodingType = &v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListMultipartUploadsOutput) SetIsTruncated(v bool) *ListMultipartUploadsOutput { + s.IsTruncated = &v + return s +} + +// SetKeyMarker sets the KeyMarker field's value. +func (s *ListMultipartUploadsOutput) SetKeyMarker(v string) *ListMultipartUploadsOutput { + s.KeyMarker = &v + return s +} + +// SetMaxUploads sets the MaxUploads field's value. +func (s *ListMultipartUploadsOutput) SetMaxUploads(v int64) *ListMultipartUploadsOutput { + s.MaxUploads = &v + return s +} + +// SetNextKeyMarker sets the NextKeyMarker field's value. +func (s *ListMultipartUploadsOutput) SetNextKeyMarker(v string) *ListMultipartUploadsOutput { + s.NextKeyMarker = &v + return s +} + +// SetNextUploadIdMarker sets the NextUploadIdMarker field's value. +func (s *ListMultipartUploadsOutput) SetNextUploadIdMarker(v string) *ListMultipartUploadsOutput { + s.NextUploadIdMarker = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListMultipartUploadsOutput) SetPrefix(v string) *ListMultipartUploadsOutput { + s.Prefix = &v + return s +} + +// SetUploadIdMarker sets the UploadIdMarker field's value. +func (s *ListMultipartUploadsOutput) SetUploadIdMarker(v string) *ListMultipartUploadsOutput { + s.UploadIdMarker = &v + return s +} + +// SetUploads sets the Uploads field's value. +func (s *ListMultipartUploadsOutput) SetUploads(v []*MultipartUpload) *ListMultipartUploadsOutput { + s.Uploads = v + return s +} + +type ListObjectVersionsInput struct { + _ struct{} `locationName:"ListObjectVersionsRequest" type:"structure"` + + // The bucket name that contains the objects. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // A delimiter is a character that you specify to group keys. All keys that + // contain the same string between the prefix and the first occurrence of the + // delimiter are grouped under a single result element in CommonPrefixes. These + // groups are counted as one result against the max-keys limitation. These keys + // are not returned elsewhere in the response. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Requests Amazon S3 to encode the object keys in the response and specifies + // the encoding method to use. An object key may contain any Unicode character; + // however, XML 1.0 parser cannot parse some characters, such as characters + // with an ASCII value from 0 to 10. For characters that are not supported in + // XML 1.0, you can add this parameter to request that Amazon S3 encode the + // keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Specifies the key to start with when listing objects in a bucket. + KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` + + // Sets the maximum number of keys returned in the response. By default the + // action returns up to 1,000 key names. The response might contain fewer keys + // but will never contain more. If additional keys satisfy the search criteria, + // but were not returned because max-keys was exceeded, the response contains + // true. To return the additional keys, see key-marker + // and version-id-marker. + MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` + + // Use this parameter to select only those keys that begin with the specified + // prefix. You can use prefixes to separate a bucket into different groupings + // of keys. (You can think of using prefix to make groups in the same way you'd + // use a folder in a file system.) You can use prefix with delimiter to roll + // up numerous objects into a single result under CommonPrefixes. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + + // Specifies the object version you want to start listing from. + VersionIdMarker *string `location:"querystring" locationName:"version-id-marker" type:"string"` +} + +// String returns the string representation +func (s ListObjectVersionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectVersionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListObjectVersionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListObjectVersionsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListObjectVersionsInput) SetBucket(v string) *ListObjectVersionsInput { + s.Bucket = &v + return s +} + +func (s *ListObjectVersionsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectVersionsInput) SetDelimiter(v string) *ListObjectVersionsInput { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectVersionsInput) SetEncodingType(v string) *ListObjectVersionsInput { + s.EncodingType = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *ListObjectVersionsInput) SetExpectedBucketOwner(v string) *ListObjectVersionsInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKeyMarker sets the KeyMarker field's value. +func (s *ListObjectVersionsInput) SetKeyMarker(v string) *ListObjectVersionsInput { + s.KeyMarker = &v + return s +} + +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectVersionsInput) SetMaxKeys(v int64) *ListObjectVersionsInput { + s.MaxKeys = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListObjectVersionsInput) SetPrefix(v string) *ListObjectVersionsInput { + s.Prefix = &v + return s +} + +// SetVersionIdMarker sets the VersionIdMarker field's value. +func (s *ListObjectVersionsInput) SetVersionIdMarker(v string) *ListObjectVersionsInput { + s.VersionIdMarker = &v + return s +} + +type ListObjectVersionsOutput struct { + _ struct{} `type:"structure"` + + // All of the keys rolled up into a common prefix count as a single return when + // calculating the number of returns. + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + // Container for an object that is a delete marker. + DeleteMarkers []*DeleteMarkerEntry `locationName:"DeleteMarker" type:"list" flattened:"true"` + + // The delimiter grouping the included keys. A delimiter is a character that + // you specify to group keys. All keys that contain the same string between + // the prefix and the first occurrence of the delimiter are grouped under a + // single result element in CommonPrefixes. These groups are counted as one + // result against the max-keys limitation. These keys are not returned elsewhere + // in the response. + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object key names in the XML response. + // + // If you specify encoding-type request parameter, Amazon S3 includes this element + // in the response, and returns encoded key name values in the following response + // elements: + // + // KeyMarker, NextKeyMarker, Prefix, Key, and Delimiter. + EncodingType *string `type:"string" enum:"EncodingType"` + + // A flag that indicates whether Amazon S3 returned all of the results that + // satisfied the search criteria. If your results were truncated, you can make + // a follow-up paginated request using the NextKeyMarker and NextVersionIdMarker + // response parameters as a starting place in another request to return the + // rest of the results. + IsTruncated *bool `type:"boolean"` + + // Marks the last key returned in a truncated response. + KeyMarker *string `type:"string"` + + // Specifies the maximum number of objects to return. + MaxKeys *int64 `type:"integer"` + + // The bucket name. + Name *string `type:"string"` + + // When the number of responses exceeds the value of MaxKeys, NextKeyMarker + // specifies the first key not returned that satisfies the search criteria. + // Use this value for the key-marker request parameter in a subsequent request. + NextKeyMarker *string `type:"string"` + + // When the number of responses exceeds the value of MaxKeys, NextVersionIdMarker + // specifies the first object version not returned that satisfies the search + // criteria. Use this value for the version-id-marker request parameter in a + // subsequent request. + NextVersionIdMarker *string `type:"string"` + + // Selects objects that start with the value supplied by this parameter. + Prefix *string `type:"string"` + + // Marks the last version of the key returned in a truncated response. + VersionIdMarker *string `type:"string"` + + // Container for version information. + Versions []*ObjectVersion `locationName:"Version" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s ListObjectVersionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectVersionsOutput) GoString() string { + return s.String() +} + +// SetCommonPrefixes sets the CommonPrefixes field's value. +func (s *ListObjectVersionsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListObjectVersionsOutput { + s.CommonPrefixes = v + return s +} + +// SetDeleteMarkers sets the DeleteMarkers field's value. +func (s *ListObjectVersionsOutput) SetDeleteMarkers(v []*DeleteMarkerEntry) *ListObjectVersionsOutput { + s.DeleteMarkers = v + return s +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectVersionsOutput) SetDelimiter(v string) *ListObjectVersionsOutput { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectVersionsOutput) SetEncodingType(v string) *ListObjectVersionsOutput { + s.EncodingType = &v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListObjectVersionsOutput) SetIsTruncated(v bool) *ListObjectVersionsOutput { + s.IsTruncated = &v + return s +} + +// SetKeyMarker sets the KeyMarker field's value. +func (s *ListObjectVersionsOutput) SetKeyMarker(v string) *ListObjectVersionsOutput { + s.KeyMarker = &v + return s +} + +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectVersionsOutput) SetMaxKeys(v int64) *ListObjectVersionsOutput { + s.MaxKeys = &v + return s +} + +// SetName sets the Name field's value. +func (s *ListObjectVersionsOutput) SetName(v string) *ListObjectVersionsOutput { + s.Name = &v + return s +} + +// SetNextKeyMarker sets the NextKeyMarker field's value. +func (s *ListObjectVersionsOutput) SetNextKeyMarker(v string) *ListObjectVersionsOutput { + s.NextKeyMarker = &v + return s +} + +// SetNextVersionIdMarker sets the NextVersionIdMarker field's value. +func (s *ListObjectVersionsOutput) SetNextVersionIdMarker(v string) *ListObjectVersionsOutput { + s.NextVersionIdMarker = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListObjectVersionsOutput) SetPrefix(v string) *ListObjectVersionsOutput { + s.Prefix = &v + return s +} + +// SetVersionIdMarker sets the VersionIdMarker field's value. +func (s *ListObjectVersionsOutput) SetVersionIdMarker(v string) *ListObjectVersionsOutput { + s.VersionIdMarker = &v + return s +} + +// SetVersions sets the Versions field's value. +func (s *ListObjectVersionsOutput) SetVersions(v []*ObjectVersion) *ListObjectVersionsOutput { + s.Versions = v + return s +} + +type ListObjectsInput struct { + _ struct{} `locationName:"ListObjectsRequest" type:"structure"` + + // The name of the bucket containing the objects. + // + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // When using this action with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this action using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // A delimiter is a character you use to group keys. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Requests Amazon S3 to encode the object keys in the response and specifies + // the encoding method to use. An object key may contain any Unicode character; + // however, XML 1.0 parser cannot parse some characters, such as characters + // with an ASCII value from 0 to 10. For characters that are not supported in + // XML 1.0, you can add this parameter to request that Amazon S3 encode the + // keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Specifies the key to start with when listing objects in a bucket. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // Sets the maximum number of keys returned in the response. By default the + // action returns up to 1,000 key names. The response might contain fewer keys + // but will never contain more. + MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` + + // Limits the response to keys that begin with the specified prefix. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // list objects request. Bucket owners need not specify this parameter in their + // requests. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` +} + +// String returns the string representation +func (s ListObjectsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListObjectsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListObjectsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListObjectsInput) SetBucket(v string) *ListObjectsInput { + s.Bucket = &v + return s +} + +func (s *ListObjectsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectsInput) SetDelimiter(v string) *ListObjectsInput { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectsInput) SetEncodingType(v string) *ListObjectsInput { + s.EncodingType = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *ListObjectsInput) SetExpectedBucketOwner(v string) *ListObjectsInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *ListObjectsInput) SetMarker(v string) *ListObjectsInput { + s.Marker = &v + return s +} + +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectsInput) SetMaxKeys(v int64) *ListObjectsInput { + s.MaxKeys = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListObjectsInput) SetPrefix(v string) *ListObjectsInput { + s.Prefix = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *ListObjectsInput) SetRequestPayer(v string) *ListObjectsInput { + s.RequestPayer = &v + return s +} + +type ListObjectsOutput struct { + _ struct{} `type:"structure"` + + // All of the keys (up to 1,000) rolled up in a common prefix count as a single + // return when calculating the number of returns. + // + // A response can contain CommonPrefixes only if you specify a delimiter. + // + // CommonPrefixes contains all (if there are any) keys between Prefix and the + // next occurrence of the string specified by the delimiter. + // + // CommonPrefixes lists keys that act like subdirectories in the directory specified + // by Prefix. + // + // For example, if the prefix is notes/ and the delimiter is a slash (/) as + // in notes/summer/july, the common prefix is notes/summer/. All of the keys + // that roll up into a common prefix count as a single return when calculating + // the number of returns. + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + // Metadata about each object returned. + Contents []*Object `type:"list" flattened:"true"` + + // Causes keys that contain the same string between the prefix and the first + // occurrence of the delimiter to be rolled up into a single result element + // in the CommonPrefixes collection. These rolled-up keys are not returned elsewhere + // in the response. Each rolled-up result counts as only one return against + // the MaxKeys value. + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `type:"string" enum:"EncodingType"` + + // The root key used by Key Protect to encrypt this bucket. This value must + // be the full CRN of the root key. + IBMSSEKPCrkId *string `location:"header" locationName:"ibm-sse-kp-customer-root-key-crn" type:"string"` + + // Specifies whether the Bucket has Key Protect enabled. + IBMSSEKPEnabled *bool `location:"header" locationName:"ibm-sse-kp-enabled" type:"boolean"` + + // A flag that indicates whether Amazon S3 returned all of the results that + // satisfied the search criteria. + IsTruncated *bool `type:"boolean"` + + // Indicates where in the bucket listing begins. Marker is included in the response + // if it was sent with the request. + Marker *string `type:"string"` + + // The maximum number of keys returned in the response body. + MaxKeys *int64 `type:"integer"` + + // The bucket name. + Name *string `type:"string"` + + // When response is truncated (the IsTruncated element value in the response + // is true), you can use the key name in this field as marker in the subsequent + // request to get next set of objects. Amazon S3 lists objects in alphabetical + // order Note: This element is returned only if you have delimiter request parameter + // specified. If response does not include the NextMarker and it is truncated, + // you can use the value of the last Key in the response as the marker in the + // subsequent request to get the next set of object keys. + NextMarker *string `type:"string"` + + // Keys that begin with the indicated prefix. + Prefix *string `type:"string"` +} + +// String returns the string representation +func (s ListObjectsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectsOutput) GoString() string { + return s.String() +} + +// SetCommonPrefixes sets the CommonPrefixes field's value. +func (s *ListObjectsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListObjectsOutput { + s.CommonPrefixes = v + return s +} + +// SetContents sets the Contents field's value. +func (s *ListObjectsOutput) SetContents(v []*Object) *ListObjectsOutput { + s.Contents = v + return s +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectsOutput) SetDelimiter(v string) *ListObjectsOutput { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectsOutput) SetEncodingType(v string) *ListObjectsOutput { + s.EncodingType = &v + return s +} + +// SetIBMSSEKPCrkId sets the IBMSSEKPCrkId field's value. +func (s *ListObjectsOutput) SetIBMSSEKPCrkId(v string) *ListObjectsOutput { + s.IBMSSEKPCrkId = &v + return s +} + +// SetIBMSSEKPEnabled sets the IBMSSEKPEnabled field's value. +func (s *ListObjectsOutput) SetIBMSSEKPEnabled(v bool) *ListObjectsOutput { + s.IBMSSEKPEnabled = &v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListObjectsOutput) SetIsTruncated(v bool) *ListObjectsOutput { + s.IsTruncated = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *ListObjectsOutput) SetMarker(v string) *ListObjectsOutput { + s.Marker = &v + return s +} + +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectsOutput) SetMaxKeys(v int64) *ListObjectsOutput { + s.MaxKeys = &v + return s +} + +// SetName sets the Name field's value. +func (s *ListObjectsOutput) SetName(v string) *ListObjectsOutput { + s.Name = &v + return s +} + +// SetNextMarker sets the NextMarker field's value. +func (s *ListObjectsOutput) SetNextMarker(v string) *ListObjectsOutput { + s.NextMarker = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListObjectsOutput) SetPrefix(v string) *ListObjectsOutput { + s.Prefix = &v + return s +} + +type ListObjectsV2Input struct { + _ struct{} `locationName:"ListObjectsV2Request" type:"structure"` + + // Bucket name to list. + // + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // When using this action with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this action using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // ContinuationToken indicates Amazon S3 that the list is being continued on + // this bucket with a token. ContinuationToken is obfuscated and is not a real + // key. + ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` + + // A delimiter is a character you use to group keys. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The owner field is not present in listV2 by default, if you want to return + // owner field with each key in the result then set the fetch owner field to + // true. + FetchOwner *bool `location:"querystring" locationName:"fetch-owner" type:"boolean"` + + // Sets the maximum number of keys returned in the response. By default the + // action returns up to 1,000 key names. The response might contain fewer keys + // but will never contain more. + MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` + + // Limits the response to keys that begin with the specified prefix. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // list objects request in V2 style. Bucket owners need not specify this parameter + // in their requests. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts + // listing after this specified key. StartAfter can be any key in the bucket. + StartAfter *string `location:"querystring" locationName:"start-after" type:"string"` +} + +// String returns the string representation +func (s ListObjectsV2Input) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectsV2Input) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListObjectsV2Input) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListObjectsV2Input"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListObjectsV2Input) SetBucket(v string) *ListObjectsV2Input { + s.Bucket = &v + return s +} + +func (s *ListObjectsV2Input) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListObjectsV2Input) SetContinuationToken(v string) *ListObjectsV2Input { + s.ContinuationToken = &v + return s +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectsV2Input) SetDelimiter(v string) *ListObjectsV2Input { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectsV2Input) SetEncodingType(v string) *ListObjectsV2Input { + s.EncodingType = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *ListObjectsV2Input) SetExpectedBucketOwner(v string) *ListObjectsV2Input { + s.ExpectedBucketOwner = &v + return s +} + +// SetFetchOwner sets the FetchOwner field's value. +func (s *ListObjectsV2Input) SetFetchOwner(v bool) *ListObjectsV2Input { + s.FetchOwner = &v + return s +} + +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectsV2Input) SetMaxKeys(v int64) *ListObjectsV2Input { + s.MaxKeys = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListObjectsV2Input) SetPrefix(v string) *ListObjectsV2Input { + s.Prefix = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *ListObjectsV2Input) SetRequestPayer(v string) *ListObjectsV2Input { + s.RequestPayer = &v + return s +} + +// SetStartAfter sets the StartAfter field's value. +func (s *ListObjectsV2Input) SetStartAfter(v string) *ListObjectsV2Input { + s.StartAfter = &v + return s +} + +type ListObjectsV2Output struct { + _ struct{} `type:"structure"` + + // All of the keys (up to 1,000) rolled up into a common prefix count as a single + // return when calculating the number of returns. + // + // A response can contain CommonPrefixes only if you specify a delimiter. + // + // CommonPrefixes contains all (if there are any) keys between Prefix and the + // next occurrence of the string specified by a delimiter. + // + // CommonPrefixes lists keys that act like subdirectories in the directory specified + // by Prefix. + // + // For example, if the prefix is notes/ and the delimiter is a slash (/) as + // in notes/summer/july, the common prefix is notes/summer/. All of the keys + // that roll up into a common prefix count as a single return when calculating + // the number of returns. + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + // Metadata about each object returned. + Contents []*Object `type:"list" flattened:"true"` + + // If ContinuationToken was sent with the request, it is included in the response. + ContinuationToken *string `type:"string"` + + // Causes keys that contain the same string between the prefix and the first + // occurrence of the delimiter to be rolled up into a single result element + // in the CommonPrefixes collection. These rolled-up keys are not returned elsewhere + // in the response. Each rolled-up result counts as only one return against + // the MaxKeys value. + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object key names in the XML response. + // + // If you specify the encoding-type request parameter, Amazon S3 includes this + // element in the response, and returns encoded key name values in the following + // response elements: + // + // Delimiter, Prefix, Key, and StartAfter. + EncodingType *string `type:"string" enum:"EncodingType"` + + // Set to false if all of the results were returned. Set to true if more keys + // are available to return. If the number of results exceeds that specified + // by MaxKeys, all of the results might not be returned. + IsTruncated *bool `type:"boolean"` + + // KeyCount is the number of keys returned with this request. KeyCount will + // always be less than or equals to MaxKeys field. Say you ask for 50 keys, + // your result will include less than equals 50 keys + KeyCount *int64 `type:"integer"` + + // Sets the maximum number of keys returned in the response. By default the + // action returns up to 1,000 key names. The response might contain fewer keys + // but will never contain more. + MaxKeys *int64 `type:"integer"` + + // The bucket name. + // + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // When using this action with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this action using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + Name *string `type:"string"` + + // NextContinuationToken is sent when isTruncated is true, which means there + // are more keys in the bucket that can be listed. The next list requests to + // Amazon S3 can be continued with this NextContinuationToken. NextContinuationToken + // is obfuscated and is not a real key + NextContinuationToken *string `type:"string"` + + // Keys that begin with the indicated prefix. + Prefix *string `type:"string"` + + // If StartAfter was sent with the request, it is included in the response. + StartAfter *string `type:"string"` +} + +// String returns the string representation +func (s ListObjectsV2Output) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectsV2Output) GoString() string { + return s.String() +} + +// SetCommonPrefixes sets the CommonPrefixes field's value. +func (s *ListObjectsV2Output) SetCommonPrefixes(v []*CommonPrefix) *ListObjectsV2Output { + s.CommonPrefixes = v + return s +} + +// SetContents sets the Contents field's value. +func (s *ListObjectsV2Output) SetContents(v []*Object) *ListObjectsV2Output { + s.Contents = v + return s +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListObjectsV2Output) SetContinuationToken(v string) *ListObjectsV2Output { + s.ContinuationToken = &v + return s +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectsV2Output) SetDelimiter(v string) *ListObjectsV2Output { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectsV2Output) SetEncodingType(v string) *ListObjectsV2Output { + s.EncodingType = &v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListObjectsV2Output) SetIsTruncated(v bool) *ListObjectsV2Output { + s.IsTruncated = &v + return s +} + +// SetKeyCount sets the KeyCount field's value. +func (s *ListObjectsV2Output) SetKeyCount(v int64) *ListObjectsV2Output { + s.KeyCount = &v + return s +} + +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectsV2Output) SetMaxKeys(v int64) *ListObjectsV2Output { + s.MaxKeys = &v + return s +} + +// SetName sets the Name field's value. +func (s *ListObjectsV2Output) SetName(v string) *ListObjectsV2Output { + s.Name = &v + return s +} + +// SetNextContinuationToken sets the NextContinuationToken field's value. +func (s *ListObjectsV2Output) SetNextContinuationToken(v string) *ListObjectsV2Output { + s.NextContinuationToken = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListObjectsV2Output) SetPrefix(v string) *ListObjectsV2Output { + s.Prefix = &v + return s +} + +// SetStartAfter sets the StartAfter field's value. +func (s *ListObjectsV2Output) SetStartAfter(v string) *ListObjectsV2Output { + s.StartAfter = &v + return s +} + +type ListPartsInput struct { + _ struct{} `locationName:"ListPartsRequest" type:"structure"` + + // The name of the bucket to which the parts are being uploaded. + // + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // When using this action with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this action using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Object key for which the multipart upload was initiated. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Sets the maximum number of parts to return. + MaxParts *int64 `location:"querystring" locationName:"max-parts" type:"integer"` + + // Specifies the part after which listing should begin. Only parts with higher + // part numbers will be listed. + PartNumberMarker *int64 `location:"querystring" locationName:"part-number-marker" type:"integer"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Upload ID identifying the multipart upload whose parts are being listed. + // + // UploadId is a required field + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListPartsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPartsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListPartsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListPartsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListPartsInput) SetBucket(v string) *ListPartsInput { + s.Bucket = &v + return s +} + +func (s *ListPartsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *ListPartsInput) SetExpectedBucketOwner(v string) *ListPartsInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *ListPartsInput) SetKey(v string) *ListPartsInput { + s.Key = &v + return s +} + +// SetMaxParts sets the MaxParts field's value. +func (s *ListPartsInput) SetMaxParts(v int64) *ListPartsInput { + s.MaxParts = &v + return s +} + +// SetPartNumberMarker sets the PartNumberMarker field's value. +func (s *ListPartsInput) SetPartNumberMarker(v int64) *ListPartsInput { + s.PartNumberMarker = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *ListPartsInput) SetRequestPayer(v string) *ListPartsInput { + s.RequestPayer = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *ListPartsInput) SetUploadId(v string) *ListPartsInput { + s.UploadId = &v + return s +} + +type ListPartsOutput struct { + _ struct{} `type:"structure"` + + // If the bucket has a lifecycle rule configured with an action to abort incomplete + // multipart uploads and the prefix in the lifecycle rule matches the object + // name in the request, then the response includes this header indicating when + // the initiated multipart upload will become eligible for abort operation. + // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket + // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). + // + // The response will also include the x-amz-abort-rule-id header that will provide + // the ID of the lifecycle configuration rule that defines this action. + AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp"` + + // This header is returned along with the x-amz-abort-date header. It identifies + // applicable lifecycle configuration rule that defines the action to abort + // incomplete multipart uploads. + AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"` + + // The name of the bucket to which the multipart upload was initiated. + Bucket *string `type:"string"` + + // Container element that identifies who initiated the multipart upload. If + // the initiator is an AWS account, this element provides the same information + // as the Owner element. If the initiator is an IAM User, this element provides + // the user ARN and display name. + Initiator *Initiator `type:"structure"` + + // Indicates whether the returned list of parts is truncated. A true value indicates + // that the list was truncated. A list can be truncated if the number of parts + // exceeds the limit returned in the MaxParts element. + IsTruncated *bool `type:"boolean"` + + // Object key for which the multipart upload was initiated. + Key *string `min:"1" type:"string"` + + // Maximum number of parts that were allowed in the response. + MaxParts *int64 `type:"integer"` + + // When a list is truncated, this element specifies the last part in the list, + // as well as the value to use for the part-number-marker request parameter + // in a subsequent request. + NextPartNumberMarker *int64 `type:"integer"` + + // Container element that identifies the object owner, after the object is created. + // If multipart upload is initiated by an IAM user, this element provides the + // parent account ID and display name. + Owner *Owner `type:"structure"` + + // When a list is truncated, this element specifies the last part in the list, + // as well as the value to use for the part-number-marker request parameter + // in a subsequent request. + PartNumberMarker *int64 `type:"integer"` + + // Container for elements related to a particular part. A response can contain + // zero or more Part elements. + Parts []*Part `locationName:"Part" type:"list" flattened:"true"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Class of storage (STANDARD or REDUCED_REDUNDANCY) used to store the uploaded + // object. + StorageClass *string `type:"string" enum:"StorageClass"` + + // Upload ID identifying the multipart upload whose parts are being listed. + UploadId *string `type:"string"` +} + +// String returns the string representation +func (s ListPartsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPartsOutput) GoString() string { + return s.String() +} + +// SetAbortDate sets the AbortDate field's value. +func (s *ListPartsOutput) SetAbortDate(v time.Time) *ListPartsOutput { + s.AbortDate = &v + return s +} + +// SetAbortRuleId sets the AbortRuleId field's value. +func (s *ListPartsOutput) SetAbortRuleId(v string) *ListPartsOutput { + s.AbortRuleId = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *ListPartsOutput) SetBucket(v string) *ListPartsOutput { + s.Bucket = &v + return s +} + +func (s *ListPartsOutput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetInitiator sets the Initiator field's value. +func (s *ListPartsOutput) SetInitiator(v *Initiator) *ListPartsOutput { + s.Initiator = v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListPartsOutput) SetIsTruncated(v bool) *ListPartsOutput { + s.IsTruncated = &v + return s +} + +// SetKey sets the Key field's value. +func (s *ListPartsOutput) SetKey(v string) *ListPartsOutput { + s.Key = &v + return s +} + +// SetMaxParts sets the MaxParts field's value. +func (s *ListPartsOutput) SetMaxParts(v int64) *ListPartsOutput { + s.MaxParts = &v + return s +} + +// SetNextPartNumberMarker sets the NextPartNumberMarker field's value. +func (s *ListPartsOutput) SetNextPartNumberMarker(v int64) *ListPartsOutput { + s.NextPartNumberMarker = &v + return s +} + +// SetOwner sets the Owner field's value. +func (s *ListPartsOutput) SetOwner(v *Owner) *ListPartsOutput { + s.Owner = v + return s +} + +// SetPartNumberMarker sets the PartNumberMarker field's value. +func (s *ListPartsOutput) SetPartNumberMarker(v int64) *ListPartsOutput { + s.PartNumberMarker = &v + return s +} + +// SetParts sets the Parts field's value. +func (s *ListPartsOutput) SetParts(v []*Part) *ListPartsOutput { + s.Parts = v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *ListPartsOutput) SetRequestCharged(v string) *ListPartsOutput { + s.RequestCharged = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *ListPartsOutput) SetStorageClass(v string) *ListPartsOutput { + s.StorageClass = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *ListPartsOutput) SetUploadId(v string) *ListPartsOutput { + s.UploadId = &v + return s +} + +// Describes where logs are stored and the prefix that Amazon S3 assigns to +// all log object keys for a bucket. For more information, see PUT Bucket logging +// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) +// in the Amazon Simple Storage Service API Reference. +type LoggingEnabled struct { + _ struct{} `type:"structure"` + + // Specifies the bucket where you want Amazon S3 to store server access logs. + // You can have your logs delivered to any bucket that you own, including the + // same bucket that is being logged. You can also configure multiple buckets + // to deliver their logs to the same target bucket. In this case, you should + // choose a different TargetPrefix for each source bucket so that the delivered + // log files can be distinguished by key. + // + // TargetBucket is a required field + TargetBucket *string `type:"string" required:"true"` + + // Container for granting information. + TargetGrants []*TargetGrant `locationNameList:"Grant" type:"list"` + + // A prefix for all log object keys. If you store log files from multiple Amazon + // S3 buckets in a single bucket, you can use a prefix to distinguish which + // log files came from which bucket. + // + // TargetPrefix is a required field + TargetPrefix *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s LoggingEnabled) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoggingEnabled) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LoggingEnabled) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LoggingEnabled"} + if s.TargetBucket == nil { + invalidParams.Add(request.NewErrParamRequired("TargetBucket")) + } + if s.TargetPrefix == nil { + invalidParams.Add(request.NewErrParamRequired("TargetPrefix")) + } + if s.TargetGrants != nil { + for i, v := range s.TargetGrants { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TargetGrants", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTargetBucket sets the TargetBucket field's value. +func (s *LoggingEnabled) SetTargetBucket(v string) *LoggingEnabled { + s.TargetBucket = &v + return s +} + +// SetTargetGrants sets the TargetGrants field's value. +func (s *LoggingEnabled) SetTargetGrants(v []*TargetGrant) *LoggingEnabled { + s.TargetGrants = v + return s +} + +// SetTargetPrefix sets the TargetPrefix field's value. +func (s *LoggingEnabled) SetTargetPrefix(v string) *LoggingEnabled { + s.TargetPrefix = &v + return s +} + +// Container for the MultipartUpload for the Amazon S3 object. +type MultipartUpload struct { + _ struct{} `type:"structure"` + + // Date and time at which the multipart upload was initiated. + Initiated *time.Time `type:"timestamp"` + + // Identifies who initiated the multipart upload. + Initiator *Initiator `type:"structure"` + + // Key of the object for which the multipart upload was initiated. + Key *string `min:"1" type:"string"` + + // Specifies the owner of the object that is part of the multipart upload. + Owner *Owner `type:"structure"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"StorageClass"` + + // Upload ID that identifies the multipart upload. + UploadId *string `type:"string"` +} + +// String returns the string representation +func (s MultipartUpload) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MultipartUpload) GoString() string { + return s.String() +} + +// SetInitiated sets the Initiated field's value. +func (s *MultipartUpload) SetInitiated(v time.Time) *MultipartUpload { + s.Initiated = &v + return s +} + +// SetInitiator sets the Initiator field's value. +func (s *MultipartUpload) SetInitiator(v *Initiator) *MultipartUpload { + s.Initiator = v + return s +} + +// SetKey sets the Key field's value. +func (s *MultipartUpload) SetKey(v string) *MultipartUpload { + s.Key = &v + return s +} + +// SetOwner sets the Owner field's value. +func (s *MultipartUpload) SetOwner(v *Owner) *MultipartUpload { + s.Owner = v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *MultipartUpload) SetStorageClass(v string) *MultipartUpload { + s.StorageClass = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *MultipartUpload) SetUploadId(v string) *MultipartUpload { + s.UploadId = &v + return s +} + +// An object consists of data and its descriptive metadata. +type Object struct { + _ struct{} `type:"structure"` + + // The entity tag is a hash of the object. The ETag reflects changes only to + // the contents of an object, not its metadata. The ETag may or may not be an + // MD5 digest of the object data. Whether or not it is depends on how the object + // was created and how it is encrypted as described below: + // + // * Objects created by the PUT Object, POST Object, or Copy operation, or + // through the AWS Management Console, and are encrypted by SSE-S3 or plaintext, + // have ETags that are an MD5 digest of their object data. + // + // * Objects created by the PUT Object, POST Object, or Copy operation, or + // through the AWS Management Console, and are encrypted by SSE-C or SSE-KMS, + // have ETags that are not an MD5 digest of their object data. + // + // * If an object is created by either the Multipart Upload or Part Copy + // operation, the ETag is not an MD5 digest, regardless of the method of + // encryption. + ETag *string `type:"string"` + + // The name that you assign to an object. You use the object key to retrieve + // the object. + Key *string `min:"1" type:"string"` + + // Creation date of the object. + LastModified *time.Time `type:"timestamp"` + + // The owner of the object + Owner *Owner `type:"structure"` + + // Size in bytes of the object + Size *int64 `type:"integer"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"ObjectStorageClass"` +} + +// String returns the string representation +func (s Object) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Object) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *Object) SetETag(v string) *Object { + s.ETag = &v + return s +} + +// SetKey sets the Key field's value. +func (s *Object) SetKey(v string) *Object { + s.Key = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *Object) SetLastModified(v time.Time) *Object { + s.LastModified = &v + return s +} + +// SetOwner sets the Owner field's value. +func (s *Object) SetOwner(v *Owner) *Object { + s.Owner = v + return s +} + +// SetSize sets the Size field's value. +func (s *Object) SetSize(v int64) *Object { + s.Size = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *Object) SetStorageClass(v string) *Object { + s.StorageClass = &v + return s +} + +// Object Identifier is unique value to identify objects. +type ObjectIdentifier struct { + _ struct{} `type:"structure"` + + // Key name of the object. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` + + // VersionId for the specific version of the object to delete. + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s ObjectIdentifier) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ObjectIdentifier) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ObjectIdentifier) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ObjectIdentifier"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *ObjectIdentifier) SetKey(v string) *ObjectIdentifier { + s.Key = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *ObjectIdentifier) SetVersionId(v string) *ObjectIdentifier { + s.VersionId = &v + return s +} + +// The version of an object. +type ObjectVersion struct { + _ struct{} `type:"structure"` + + // The entity tag is an MD5 hash of that version of the object. + ETag *string `type:"string"` + + // Specifies whether the object is (true) or is not (false) the latest version + // of an object. + IsLatest *bool `type:"boolean"` + + // The object key. + Key *string `min:"1" type:"string"` + + // Date and time the object was last modified. + LastModified *time.Time `type:"timestamp"` + + // Specifies the owner of the object. + Owner *Owner `type:"structure"` + + // Size in bytes of the object. + Size *int64 `type:"integer"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"ObjectVersionStorageClass"` + + // Version ID of an object. + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s ObjectVersion) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ObjectVersion) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *ObjectVersion) SetETag(v string) *ObjectVersion { + s.ETag = &v + return s +} + +// SetIsLatest sets the IsLatest field's value. +func (s *ObjectVersion) SetIsLatest(v bool) *ObjectVersion { + s.IsLatest = &v + return s +} + +// SetKey sets the Key field's value. +func (s *ObjectVersion) SetKey(v string) *ObjectVersion { + s.Key = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *ObjectVersion) SetLastModified(v time.Time) *ObjectVersion { + s.LastModified = &v + return s +} + +// SetOwner sets the Owner field's value. +func (s *ObjectVersion) SetOwner(v *Owner) *ObjectVersion { + s.Owner = v + return s +} + +// SetSize sets the Size field's value. +func (s *ObjectVersion) SetSize(v int64) *ObjectVersion { + s.Size = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *ObjectVersion) SetStorageClass(v string) *ObjectVersion { + s.StorageClass = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *ObjectVersion) SetVersionId(v string) *ObjectVersion { + s.VersionId = &v + return s +} + +// Container for the owner's display name and ID. +type Owner struct { + _ struct{} `type:"structure"` + + // Container for the display name of the owner. + DisplayName *string `type:"string"` + + // Container for the ID of the owner. + ID *string `type:"string"` +} + +// String returns the string representation +func (s Owner) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Owner) GoString() string { + return s.String() +} + +// SetDisplayName sets the DisplayName field's value. +func (s *Owner) SetDisplayName(v string) *Owner { + s.DisplayName = &v + return s +} + +// SetID sets the ID field's value. +func (s *Owner) SetID(v string) *Owner { + s.ID = &v + return s +} + +// Container for elements related to a part. +type Part struct { + _ struct{} `type:"structure"` + + // Entity tag returned when the part was uploaded. + ETag *string `type:"string"` + + // Date and time at which the part was uploaded. + LastModified *time.Time `type:"timestamp"` + + // Part number identifying the part. This is a positive integer between 1 and + // 10,000. + PartNumber *int64 `type:"integer"` + + // Size in bytes of the uploaded part data. + Size *int64 `type:"integer"` +} + +// String returns the string representation +func (s Part) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Part) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *Part) SetETag(v string) *Part { + s.ETag = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *Part) SetLastModified(v time.Time) *Part { + s.LastModified = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *Part) SetPartNumber(v int64) *Part { + s.PartNumber = &v + return s +} + +// SetSize sets the Size field's value. +func (s *Part) SetSize(v int64) *Part { + s.Size = &v + return s +} + +type ProtectionConfiguration struct { + _ struct{} `type:"structure"` + + // Default retention period for an object, if a PUT of an object does not specify + // a retention period this value will be converted to seconds and used. + // + // DefaultRetention is a required field + DefaultRetention *BucketProtectionDefaultRetention `type:"structure" required:"true"` + + // Enable permanent retention for an object. + EnablePermanentRetention *bool `type:"boolean"` + + // Maximum retention period for an object, if a PUT of an object specifies a + // longer retention period the PUT object will fail. + // + // MaximumRetention is a required field + MaximumRetention *BucketProtectionMaximumRetention `type:"structure" required:"true"` + + // Minimum retention period for an object, if a PUT of an object specifies a + // shorter retention period the PUT object will fail. + // + // MinimumRetention is a required field + MinimumRetention *BucketProtectionMinimumRetention `type:"structure" required:"true"` + + // Retention status of a bucket. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"BucketProtectionStatus"` +} + +// String returns the string representation +func (s ProtectionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ProtectionConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ProtectionConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ProtectionConfiguration"} + if s.DefaultRetention == nil { + invalidParams.Add(request.NewErrParamRequired("DefaultRetention")) + } + if s.MaximumRetention == nil { + invalidParams.Add(request.NewErrParamRequired("MaximumRetention")) + } + if s.MinimumRetention == nil { + invalidParams.Add(request.NewErrParamRequired("MinimumRetention")) + } + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + if s.DefaultRetention != nil { + if err := s.DefaultRetention.Validate(); err != nil { + invalidParams.AddNested("DefaultRetention", err.(request.ErrInvalidParams)) + } + } + if s.MaximumRetention != nil { + if err := s.MaximumRetention.Validate(); err != nil { + invalidParams.AddNested("MaximumRetention", err.(request.ErrInvalidParams)) + } + } + if s.MinimumRetention != nil { + if err := s.MinimumRetention.Validate(); err != nil { + invalidParams.AddNested("MinimumRetention", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDefaultRetention sets the DefaultRetention field's value. +func (s *ProtectionConfiguration) SetDefaultRetention(v *BucketProtectionDefaultRetention) *ProtectionConfiguration { + s.DefaultRetention = v + return s +} + +// SetEnablePermanentRetention sets the EnablePermanentRetention field's value. +func (s *ProtectionConfiguration) SetEnablePermanentRetention(v bool) *ProtectionConfiguration { + s.EnablePermanentRetention = &v + return s +} + +// SetMaximumRetention sets the MaximumRetention field's value. +func (s *ProtectionConfiguration) SetMaximumRetention(v *BucketProtectionMaximumRetention) *ProtectionConfiguration { + s.MaximumRetention = v + return s +} + +// SetMinimumRetention sets the MinimumRetention field's value. +func (s *ProtectionConfiguration) SetMinimumRetention(v *BucketProtectionMinimumRetention) *ProtectionConfiguration { + s.MinimumRetention = v + return s +} + +// SetStatus sets the Status field's value. +func (s *ProtectionConfiguration) SetStatus(v string) *ProtectionConfiguration { + s.Status = &v + return s +} + +// The PublicAccessBlock configuration that you want to apply to this Amazon +// S3 bucket. You can enable the configuration options in any combination. For +// more information about when Amazon S3 considers a bucket or object public, +// see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) +// in the Amazon S3 User Guide. +type PublicAccessBlockConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies whether Amazon S3 should block public access control lists (ACLs) + // for this bucket and objects in this bucket. Setting this element to TRUE + // causes the following behavior: + // + // * PUT Bucket acl and PUT Object acl calls fail if the specified ACL is + // public. + // + // * PUT Object calls fail if the request includes a public ACL. + // + // * PUT Bucket calls fail if the request includes a public ACL. + // + // Enabling this setting doesn't affect existing policies or ACLs. + BlockPublicAcls *bool `locationName:"BlockPublicAcls" type:"boolean"` + + // Specifies whether Amazon S3 should ignore public ACLs for this bucket and + // objects in this bucket. Setting this element to TRUE causes Amazon S3 to + // ignore all public ACLs on this bucket and objects in this bucket. + // + // Enabling this setting doesn't affect the persistence of any existing ACLs + // and doesn't prevent new public ACLs from being set. + IgnorePublicAcls *bool `locationName:"IgnorePublicAcls" type:"boolean"` +} + +// String returns the string representation +func (s PublicAccessBlockConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PublicAccessBlockConfiguration) GoString() string { + return s.String() +} + +// SetBlockPublicAcls sets the BlockPublicAcls field's value. +func (s *PublicAccessBlockConfiguration) SetBlockPublicAcls(v bool) *PublicAccessBlockConfiguration { + s.BlockPublicAcls = &v + return s +} + +// SetIgnorePublicAcls sets the IgnorePublicAcls field's value. +func (s *PublicAccessBlockConfiguration) SetIgnorePublicAcls(v bool) *PublicAccessBlockConfiguration { + s.IgnorePublicAcls = &v + return s +} + +type PutBucketAclInput struct { + _ struct{} `locationName:"PutBucketAclRequest" type:"structure" payload:"AccessControlPolicy"` + + // The canned ACL to apply to the bucket. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` + + // Contains the elements that set the ACL permissions for an object per grantee. + AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The bucket to which to apply the ACL. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to list the objects in the bucket. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the bucket ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to create, overwrite, and delete any object in the bucket. + GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` + + // Allows grantee to write the ACL for the applicable bucket. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` +} + +// String returns the string representation +func (s PutBucketAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketAclInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketAclInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketAclInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.AccessControlPolicy != nil { + if err := s.AccessControlPolicy.Validate(); err != nil { + invalidParams.AddNested("AccessControlPolicy", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetACL sets the ACL field's value. +func (s *PutBucketAclInput) SetACL(v string) *PutBucketAclInput { + s.ACL = &v + return s +} + +// SetAccessControlPolicy sets the AccessControlPolicy field's value. +func (s *PutBucketAclInput) SetAccessControlPolicy(v *AccessControlPolicy) *PutBucketAclInput { + s.AccessControlPolicy = v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketAclInput) SetBucket(v string) *PutBucketAclInput { + s.Bucket = &v + return s +} + +func (s *PutBucketAclInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketAclInput) SetExpectedBucketOwner(v string) *PutBucketAclInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *PutBucketAclInput) SetGrantFullControl(v string) *PutBucketAclInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *PutBucketAclInput) SetGrantRead(v string) *PutBucketAclInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *PutBucketAclInput) SetGrantReadACP(v string) *PutBucketAclInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWrite sets the GrantWrite field's value. +func (s *PutBucketAclInput) SetGrantWrite(v string) *PutBucketAclInput { + s.GrantWrite = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *PutBucketAclInput) SetGrantWriteACP(v string) *PutBucketAclInput { + s.GrantWriteACP = &v + return s +} + +type PutBucketAclOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketAclOutput) GoString() string { + return s.String() +} + +type PutBucketCorsInput struct { + _ struct{} `locationName:"PutBucketCorsRequest" type:"structure" payload:"CORSConfiguration"` + + // Specifies the bucket impacted by the corsconfiguration. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Describes the cross-origin access configuration for objects in an Amazon + // S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon + // S3 User Guide. + // + // CORSConfiguration is a required field + CORSConfiguration *CORSConfiguration `locationName:"CORSConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s PutBucketCorsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketCorsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketCorsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketCorsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.CORSConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("CORSConfiguration")) + } + if s.CORSConfiguration != nil { + if err := s.CORSConfiguration.Validate(); err != nil { + invalidParams.AddNested("CORSConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketCorsInput) SetBucket(v string) *PutBucketCorsInput { + s.Bucket = &v + return s +} + +func (s *PutBucketCorsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetCORSConfiguration sets the CORSConfiguration field's value. +func (s *PutBucketCorsInput) SetCORSConfiguration(v *CORSConfiguration) *PutBucketCorsInput { + s.CORSConfiguration = v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketCorsInput) SetExpectedBucketOwner(v string) *PutBucketCorsInput { + s.ExpectedBucketOwner = &v + return s +} + +type PutBucketCorsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketCorsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketCorsOutput) GoString() string { + return s.String() +} + +type PutBucketLifecycleConfigurationInput struct { + _ struct{} `locationName:"PutBucketLifecycleConfigurationRequest" type:"structure" payload:"LifecycleConfiguration"` + + // The name of the bucket for which to set the configuration. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Container for lifecycle rules. You can add as many as 1000 rules. + LifecycleConfiguration *LifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutBucketLifecycleConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLifecycleConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketLifecycleConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketLifecycleConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.LifecycleConfiguration != nil { + if err := s.LifecycleConfiguration.Validate(); err != nil { + invalidParams.AddNested("LifecycleConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketLifecycleConfigurationInput) SetBucket(v string) *PutBucketLifecycleConfigurationInput { + s.Bucket = &v + return s +} + +func (s *PutBucketLifecycleConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketLifecycleConfigurationInput) SetExpectedBucketOwner(v string) *PutBucketLifecycleConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetLifecycleConfiguration sets the LifecycleConfiguration field's value. +func (s *PutBucketLifecycleConfigurationInput) SetLifecycleConfiguration(v *LifecycleConfiguration) *PutBucketLifecycleConfigurationInput { + s.LifecycleConfiguration = v + return s +} + +type PutBucketLifecycleConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketLifecycleConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLifecycleConfigurationOutput) GoString() string { + return s.String() +} + +type PutBucketLoggingInput struct { + _ struct{} `locationName:"PutBucketLoggingRequest" type:"structure" payload:"BucketLoggingStatus"` + + // The name of the bucket for which to set the logging parameters. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Container for logging status information. + // + // BucketLoggingStatus is a required field + BucketLoggingStatus *BucketLoggingStatus `locationName:"BucketLoggingStatus" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s PutBucketLoggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLoggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketLoggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketLoggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.BucketLoggingStatus == nil { + invalidParams.Add(request.NewErrParamRequired("BucketLoggingStatus")) + } + if s.BucketLoggingStatus != nil { + if err := s.BucketLoggingStatus.Validate(); err != nil { + invalidParams.AddNested("BucketLoggingStatus", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketLoggingInput) SetBucket(v string) *PutBucketLoggingInput { + s.Bucket = &v + return s +} + +func (s *PutBucketLoggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBucketLoggingStatus sets the BucketLoggingStatus field's value. +func (s *PutBucketLoggingInput) SetBucketLoggingStatus(v *BucketLoggingStatus) *PutBucketLoggingInput { + s.BucketLoggingStatus = v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketLoggingInput) SetExpectedBucketOwner(v string) *PutBucketLoggingInput { + s.ExpectedBucketOwner = &v + return s +} + +type PutBucketLoggingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketLoggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLoggingOutput) GoString() string { + return s.String() +} + +type PutBucketProtectionConfigurationInput struct { + _ struct{} `locationName:"PutBucketProtectionConfigurationRequest" type:"structure" payload:"ProtectionConfiguration"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // ProtectionConfiguration is a required field + ProtectionConfiguration *ProtectionConfiguration `locationName:"ProtectionConfiguration" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketProtectionConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketProtectionConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketProtectionConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketProtectionConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.ProtectionConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("ProtectionConfiguration")) + } + if s.ProtectionConfiguration != nil { + if err := s.ProtectionConfiguration.Validate(); err != nil { + invalidParams.AddNested("ProtectionConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketProtectionConfigurationInput) SetBucket(v string) *PutBucketProtectionConfigurationInput { + s.Bucket = &v + return s +} + +func (s *PutBucketProtectionConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetProtectionConfiguration sets the ProtectionConfiguration field's value. +func (s *PutBucketProtectionConfigurationInput) SetProtectionConfiguration(v *ProtectionConfiguration) *PutBucketProtectionConfigurationInput { + s.ProtectionConfiguration = v + return s +} + +type PutBucketProtectionConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketProtectionConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketProtectionConfigurationOutput) GoString() string { + return s.String() +} + +type PutBucketVersioningInput struct { + _ struct{} `locationName:"PutBucketVersioningRequest" type:"structure" payload:"VersioningConfiguration"` + + // The bucket name. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The concatenation of the authentication device's serial number, a space, + // and the value that is displayed on your authentication device. + MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` + + // Container for setting the versioning state. + // + // VersioningConfiguration is a required field + VersioningConfiguration *VersioningConfiguration `locationName:"VersioningConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutBucketVersioningInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketVersioningInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketVersioningInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketVersioningInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.VersioningConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("VersioningConfiguration")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketVersioningInput) SetBucket(v string) *PutBucketVersioningInput { + s.Bucket = &v + return s +} + +func (s *PutBucketVersioningInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketVersioningInput) SetExpectedBucketOwner(v string) *PutBucketVersioningInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetMFA sets the MFA field's value. +func (s *PutBucketVersioningInput) SetMFA(v string) *PutBucketVersioningInput { + s.MFA = &v + return s +} + +// SetVersioningConfiguration sets the VersioningConfiguration field's value. +func (s *PutBucketVersioningInput) SetVersioningConfiguration(v *VersioningConfiguration) *PutBucketVersioningInput { + s.VersioningConfiguration = v + return s +} + +type PutBucketVersioningOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketVersioningOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketVersioningOutput) GoString() string { + return s.String() +} + +type PutBucketWebsiteInput struct { + _ struct{} `locationName:"PutBucketWebsiteRequest" type:"structure" payload:"WebsiteConfiguration"` + + // The bucket name. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Container for the request. + // + // WebsiteConfiguration is a required field + WebsiteConfiguration *WebsiteConfiguration `locationName:"WebsiteConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutBucketWebsiteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketWebsiteInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketWebsiteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketWebsiteInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.WebsiteConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("WebsiteConfiguration")) + } + if s.WebsiteConfiguration != nil { + if err := s.WebsiteConfiguration.Validate(); err != nil { + invalidParams.AddNested("WebsiteConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketWebsiteInput) SetBucket(v string) *PutBucketWebsiteInput { + s.Bucket = &v + return s +} + +func (s *PutBucketWebsiteInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketWebsiteInput) SetExpectedBucketOwner(v string) *PutBucketWebsiteInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetWebsiteConfiguration sets the WebsiteConfiguration field's value. +func (s *PutBucketWebsiteInput) SetWebsiteConfiguration(v *WebsiteConfiguration) *PutBucketWebsiteInput { + s.WebsiteConfiguration = v + return s +} + +type PutBucketWebsiteOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketWebsiteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketWebsiteOutput) GoString() string { + return s.String() +} + +type PutObjectAclInput struct { + _ struct{} `locationName:"PutObjectAclRequest" type:"structure" payload:"AccessControlPolicy"` + + // The canned ACL to apply to the object. For more information, see Canned ACL + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + // Contains the elements that set the ACL permissions for an object per grantee. + AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The bucket name that contains the object to which you want to attach the + // ACL. + // + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. + // + // This action is not supported by Amazon S3 on Outposts. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to list the objects in the bucket. + // + // This action is not supported by Amazon S3 on Outposts. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the bucket ACL. + // + // This action is not supported by Amazon S3 on Outposts. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to create, overwrite, and delete any object in the bucket. + GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` + + // Allows grantee to write the ACL for the applicable bucket. + // + // This action is not supported by Amazon S3 on Outposts. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // Key for which the PUT action was initiated. + // + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // When using this action with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this action using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s PutObjectAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectAclInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutObjectAclInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectAclInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.AccessControlPolicy != nil { + if err := s.AccessControlPolicy.Validate(); err != nil { + invalidParams.AddNested("AccessControlPolicy", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetACL sets the ACL field's value. +func (s *PutObjectAclInput) SetACL(v string) *PutObjectAclInput { + s.ACL = &v + return s +} + +// SetAccessControlPolicy sets the AccessControlPolicy field's value. +func (s *PutObjectAclInput) SetAccessControlPolicy(v *AccessControlPolicy) *PutObjectAclInput { + s.AccessControlPolicy = v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *PutObjectAclInput) SetBucket(v string) *PutObjectAclInput { + s.Bucket = &v + return s +} + +func (s *PutObjectAclInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutObjectAclInput) SetExpectedBucketOwner(v string) *PutObjectAclInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *PutObjectAclInput) SetGrantFullControl(v string) *PutObjectAclInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *PutObjectAclInput) SetGrantRead(v string) *PutObjectAclInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *PutObjectAclInput) SetGrantReadACP(v string) *PutObjectAclInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWrite sets the GrantWrite field's value. +func (s *PutObjectAclInput) SetGrantWrite(v string) *PutObjectAclInput { + s.GrantWrite = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *PutObjectAclInput) SetGrantWriteACP(v string) *PutObjectAclInput { + s.GrantWriteACP = &v + return s +} + +// SetKey sets the Key field's value. +func (s *PutObjectAclInput) SetKey(v string) *PutObjectAclInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *PutObjectAclInput) SetRequestPayer(v string) *PutObjectAclInput { + s.RequestPayer = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *PutObjectAclInput) SetVersionId(v string) *PutObjectAclInput { + s.VersionId = &v + return s +} + +type PutObjectAclOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s PutObjectAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectAclOutput) GoString() string { + return s.String() +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *PutObjectAclOutput) SetRequestCharged(v string) *PutObjectAclOutput { + s.RequestCharged = &v + return s +} + +type PutObjectInput struct { + _ struct{} `locationName:"PutObjectRequest" type:"structure" payload:"Body"` + + // The canned ACL to apply to the object. For more information, see Canned ACL + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). + // + // This action is not supported by Amazon S3 on Outposts. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + // Object data. + Body io.ReadSeeker `type:"blob"` + + // The bucket name to which the PUT action was initiated. + // + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // When using this action with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this action using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Can be used to specify caching behavior along the request/reply chain. For + // more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9 + // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9). + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. For more information, + // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1). + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11 + // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11). + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // Size of the body in bytes. This parameter is useful when the size of the + // body cannot be determined automatically. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13 + // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13). + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + + // The base64-encoded 128-bit MD5 digest of the message (without the headers) + // according to RFC 1864. This header can be used as a message integrity check + // to verify that the data is the same data that was originally sent. Although + // it is optional, we recommend using the Content-MD5 mechanism as an end-to-end + // integrity check. For more information about REST request authentication, + // see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html). + ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` + + // A standard MIME type describing the format of the contents. For more information, + // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17). + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // The date and time at which the object is no longer cacheable. For more information, + // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21). + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + // + // This action is not supported by Amazon S3 on Outposts. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + // + // This action is not supported by Amazon S3 on Outposts. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + // + // This action is not supported by Amazon S3 on Outposts. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + // + // This action is not supported by Amazon S3 on Outposts. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // Object key for which the PUT action was initiated. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Date on which it will be legal to delete or modify the object. This field + // can only be specified if Retention-Directive is REPLACE. You can only specify + // this or the Retention-Period header. If both are specified a 400 error will + // be returned. If neither is specified the bucket's DefaultRetention period + // will be used. + RetentionExpirationDate *time.Time `location:"header" locationName:"Retention-Expiration-Date" type:"timestamp"` + + // A single legal hold to apply to the object. This field can only be specified + // if Retention-Directive is REPLACE. A legal hold is a character long string + // of max length 64. The object cannot be overwritten or deleted until all legal + // holds associated with the object are removed. + RetentionLegalHoldId *string `location:"header" locationName:"Retention-Legal-Hold-ID" type:"string"` + + // Retention period to store on the object in seconds. If this field and Retention-Expiration-Date + // are specified a 400 error is returned. If neither is specified the bucket's + // DefaultRetention period will be used. 0 is a legal value assuming the bucket's + // minimum retention period is also 0. + RetentionPeriod *int64 `location:"header" locationName:"Retention-Period" type:"integer"` + + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm + // header. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If x-amz-server-side-encryption is present and has the value of aws:kms, + // this header specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetrical customer managed customer master key (CMK) that was used for + // the object. + // + // If the value of x-amz-server-side-encryption is aws:kms, this header specifies + // the ID of the symmetric customer managed AWS KMS CMK that will be used for + // the object. If you specify x-amz-server-side-encryption:aws:kms, but do not + // providex-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the AWS + // managed CMK in AWS to protect the data. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // By default, Amazon S3 uses the STANDARD Storage Class to store newly created + // objects. The STANDARD storage class provides high durability and high availability. + // Depending on performance needs, you can specify a different Storage Class. + // Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, + // see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) + // in the Amazon S3 Service Developer Guide. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // The tag-set for the object. The tag-set must be encoded as URL Query parameters. + // (For example, "Key1=Value1") + Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. For information about object + // metadata, see Object Key and Metadata (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html). + // + // In the following example, the request header sets the redirect to an object + // (anotherPage.html) in the same bucket: + // + // x-amz-website-redirect-location: /anotherPage.html + // + // In the following example, the request header sets the object redirect to + // another website: + // + // x-amz-website-redirect-location: http://www.example.com/ + // + // For more information about website hosting in Amazon S3, see Hosting Websites + // on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) + // and How to Configure Website Page Redirects (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html). + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s PutObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetACL sets the ACL field's value. +func (s *PutObjectInput) SetACL(v string) *PutObjectInput { + s.ACL = &v + return s +} + +// SetBody sets the Body field's value. +func (s *PutObjectInput) SetBody(v io.ReadSeeker) *PutObjectInput { + s.Body = v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *PutObjectInput) SetBucket(v string) *PutObjectInput { + s.Bucket = &v + return s +} + +func (s *PutObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetCacheControl sets the CacheControl field's value. +func (s *PutObjectInput) SetCacheControl(v string) *PutObjectInput { + s.CacheControl = &v + return s +} + +// SetContentDisposition sets the ContentDisposition field's value. +func (s *PutObjectInput) SetContentDisposition(v string) *PutObjectInput { + s.ContentDisposition = &v + return s +} + +// SetContentEncoding sets the ContentEncoding field's value. +func (s *PutObjectInput) SetContentEncoding(v string) *PutObjectInput { + s.ContentEncoding = &v + return s +} + +// SetContentLanguage sets the ContentLanguage field's value. +func (s *PutObjectInput) SetContentLanguage(v string) *PutObjectInput { + s.ContentLanguage = &v + return s +} + +// SetContentLength sets the ContentLength field's value. +func (s *PutObjectInput) SetContentLength(v int64) *PutObjectInput { + s.ContentLength = &v + return s +} + +// SetContentMD5 sets the ContentMD5 field's value. +func (s *PutObjectInput) SetContentMD5(v string) *PutObjectInput { + s.ContentMD5 = &v + return s +} + +// SetContentType sets the ContentType field's value. +func (s *PutObjectInput) SetContentType(v string) *PutObjectInput { + s.ContentType = &v + return s +} + +// SetExpires sets the Expires field's value. +func (s *PutObjectInput) SetExpires(v time.Time) *PutObjectInput { + s.Expires = &v + return s +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *PutObjectInput) SetGrantFullControl(v string) *PutObjectInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *PutObjectInput) SetGrantRead(v string) *PutObjectInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *PutObjectInput) SetGrantReadACP(v string) *PutObjectInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *PutObjectInput) SetGrantWriteACP(v string) *PutObjectInput { + s.GrantWriteACP = &v + return s +} + +// SetKey sets the Key field's value. +func (s *PutObjectInput) SetKey(v string) *PutObjectInput { + s.Key = &v + return s +} + +// SetMetadata sets the Metadata field's value. +func (s *PutObjectInput) SetMetadata(v map[string]*string) *PutObjectInput { + s.Metadata = v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *PutObjectInput) SetRequestPayer(v string) *PutObjectInput { + s.RequestPayer = &v + return s +} + +// SetRetentionExpirationDate sets the RetentionExpirationDate field's value. +func (s *PutObjectInput) SetRetentionExpirationDate(v time.Time) *PutObjectInput { + s.RetentionExpirationDate = &v + return s +} + +// SetRetentionLegalHoldId sets the RetentionLegalHoldId field's value. +func (s *PutObjectInput) SetRetentionLegalHoldId(v string) *PutObjectInput { + s.RetentionLegalHoldId = &v + return s +} + +// SetRetentionPeriod sets the RetentionPeriod field's value. +func (s *PutObjectInput) SetRetentionPeriod(v int64) *PutObjectInput { + s.RetentionPeriod = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *PutObjectInput) SetSSECustomerAlgorithm(v string) *PutObjectInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *PutObjectInput) SetSSECustomerKey(v string) *PutObjectInput { + s.SSECustomerKey = &v + return s +} + +func (s *PutObjectInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *PutObjectInput) SetSSECustomerKeyMD5(v string) *PutObjectInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *PutObjectInput) SetSSEKMSKeyId(v string) *PutObjectInput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *PutObjectInput) SetServerSideEncryption(v string) *PutObjectInput { + s.ServerSideEncryption = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *PutObjectInput) SetStorageClass(v string) *PutObjectInput { + s.StorageClass = &v + return s +} + +// SetTagging sets the Tagging field's value. +func (s *PutObjectInput) SetTagging(v string) *PutObjectInput { + s.Tagging = &v + return s +} + +// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. +func (s *PutObjectInput) SetWebsiteRedirectLocation(v string) *PutObjectInput { + s.WebsiteRedirectLocation = &v + return s +} + +type PutObjectOutput struct { + _ struct{} `type:"structure"` + + // Entity tag for the uploaded object. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If the expiration is configured for the object (see PutBucketLifecycleConfiguration + // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html)), + // the response includes this header. It includes the expiry-date and rule-id + // key-value pairs that provide information about object expiration. The value + // of the rule-id is URL encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round-trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If x-amz-server-side-encryption is present and has the value of aws:kms, + // this header specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetric customer managed customer master key (CMK) that was used for the + // object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // If you specified server-side encryption either with an AWS KMS customer master + // key (CMK) or Amazon S3-managed encryption key in your PUT request, the response + // includes this header. It confirms the encryption algorithm that Amazon S3 + // used to encrypt the object. + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Version of the object. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s PutObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectOutput) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *PutObjectOutput) SetETag(v string) *PutObjectOutput { + s.ETag = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *PutObjectOutput) SetExpiration(v string) *PutObjectOutput { + s.Expiration = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *PutObjectOutput) SetRequestCharged(v string) *PutObjectOutput { + s.RequestCharged = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *PutObjectOutput) SetSSECustomerAlgorithm(v string) *PutObjectOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *PutObjectOutput) SetSSECustomerKeyMD5(v string) *PutObjectOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *PutObjectOutput) SetSSEKMSKeyId(v string) *PutObjectOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *PutObjectOutput) SetServerSideEncryption(v string) *PutObjectOutput { + s.ServerSideEncryption = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *PutObjectOutput) SetVersionId(v string) *PutObjectOutput { + s.VersionId = &v + return s +} + +type PutObjectTaggingInput struct { + _ struct{} `locationName:"PutObjectTaggingRequest" type:"structure" payload:"Tagging"` + + // The bucket name containing the object. + // + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // When using this action with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this action using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Name of the object key. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Container for the TagSet and Tag elements + // + // Tagging is a required field + Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The versionId of the object that the tag-set will be added to. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s PutObjectTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutObjectTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Tagging == nil { + invalidParams.Add(request.NewErrParamRequired("Tagging")) + } + if s.Tagging != nil { + if err := s.Tagging.Validate(); err != nil { + invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutObjectTaggingInput) SetBucket(v string) *PutObjectTaggingInput { + s.Bucket = &v + return s +} + +func (s *PutObjectTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutObjectTaggingInput) SetExpectedBucketOwner(v string) *PutObjectTaggingInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *PutObjectTaggingInput) SetKey(v string) *PutObjectTaggingInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *PutObjectTaggingInput) SetRequestPayer(v string) *PutObjectTaggingInput { + s.RequestPayer = &v + return s +} + +// SetTagging sets the Tagging field's value. +func (s *PutObjectTaggingInput) SetTagging(v *Tagging) *PutObjectTaggingInput { + s.Tagging = v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *PutObjectTaggingInput) SetVersionId(v string) *PutObjectTaggingInput { + s.VersionId = &v + return s +} + +type PutObjectTaggingOutput struct { + _ struct{} `type:"structure"` + + // The versionId of the object the tag-set was added to. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s PutObjectTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectTaggingOutput) GoString() string { + return s.String() +} + +// SetVersionId sets the VersionId field's value. +func (s *PutObjectTaggingOutput) SetVersionId(v string) *PutObjectTaggingOutput { + s.VersionId = &v + return s +} + +type PutPublicAccessBlockInput struct { + _ struct{} `locationName:"PutPublicAccessBlockRequest" type:"structure" payload:"PublicAccessBlockConfiguration"` + + // The name of the Amazon S3 bucket whose PublicAccessBlock configuration you + // want to set. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The PublicAccessBlock configuration that you want to apply to this Amazon + // S3 bucket. You can enable the configuration options in any combination. For + // more information about when Amazon S3 considers a bucket or object public, + // see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) + // in the Amazon S3 User Guide. + // + // PublicAccessBlockConfiguration is a required field + PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `locationName:"PublicAccessBlockConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutPublicAccessBlockInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutPublicAccessBlockInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutPublicAccessBlockInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutPublicAccessBlockInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.PublicAccessBlockConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("PublicAccessBlockConfiguration")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutPublicAccessBlockInput) SetBucket(v string) *PutPublicAccessBlockInput { + s.Bucket = &v + return s +} + +func (s *PutPublicAccessBlockInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutPublicAccessBlockInput) SetExpectedBucketOwner(v string) *PutPublicAccessBlockInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetPublicAccessBlockConfiguration sets the PublicAccessBlockConfiguration field's value. +func (s *PutPublicAccessBlockInput) SetPublicAccessBlockConfiguration(v *PublicAccessBlockConfiguration) *PutPublicAccessBlockInput { + s.PublicAccessBlockConfiguration = v + return s +} + +type PutPublicAccessBlockOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutPublicAccessBlockOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutPublicAccessBlockOutput) GoString() string { + return s.String() +} + +// Specifies how requests are redirected. In the event of an error, you can +// specify a different error code to return. +type Redirect struct { + _ struct{} `type:"structure"` + + // The host name to use in the redirect request. + HostName *string `type:"string"` + + // The HTTP redirect code to use on the response. Not required if one of the + // siblings is present. + HttpRedirectCode *string `type:"string"` + + // Protocol to use when redirecting requests. The default is the protocol that + // is used in the original request. + Protocol *string `type:"string" enum:"Protocol"` + + // The object key prefix to use in the redirect request. For example, to redirect + // requests for all pages with prefix docs/ (objects in the docs/ folder) to + // documents/, you can set a condition block with KeyPrefixEquals set to docs/ + // and in the Redirect set ReplaceKeyPrefixWith to /documents. Not required + // if one of the siblings is present. Can be present only if ReplaceKeyWith + // is not provided. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + ReplaceKeyPrefixWith *string `type:"string"` + + // The specific object key to use in the redirect request. For example, redirect + // request to error.html. Not required if one of the siblings is present. Can + // be present only if ReplaceKeyPrefixWith is not provided. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + ReplaceKeyWith *string `type:"string"` +} + +// String returns the string representation +func (s Redirect) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Redirect) GoString() string { + return s.String() +} + +// SetHostName sets the HostName field's value. +func (s *Redirect) SetHostName(v string) *Redirect { + s.HostName = &v + return s +} + +// SetHttpRedirectCode sets the HttpRedirectCode field's value. +func (s *Redirect) SetHttpRedirectCode(v string) *Redirect { + s.HttpRedirectCode = &v + return s +} + +// SetProtocol sets the Protocol field's value. +func (s *Redirect) SetProtocol(v string) *Redirect { + s.Protocol = &v + return s +} + +// SetReplaceKeyPrefixWith sets the ReplaceKeyPrefixWith field's value. +func (s *Redirect) SetReplaceKeyPrefixWith(v string) *Redirect { + s.ReplaceKeyPrefixWith = &v + return s +} + +// SetReplaceKeyWith sets the ReplaceKeyWith field's value. +func (s *Redirect) SetReplaceKeyWith(v string) *Redirect { + s.ReplaceKeyWith = &v + return s +} + +// Specifies the redirect behavior of all requests to a website endpoint of +// an Amazon S3 bucket. +type RedirectAllRequestsTo struct { + _ struct{} `type:"structure"` + + // Name of the host where requests are redirected. + // + // HostName is a required field + HostName *string `type:"string" required:"true"` + + // Protocol to use when redirecting requests. The default is the protocol that + // is used in the original request. + Protocol *string `type:"string" enum:"Protocol"` +} + +// String returns the string representation +func (s RedirectAllRequestsTo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RedirectAllRequestsTo) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RedirectAllRequestsTo) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RedirectAllRequestsTo"} + if s.HostName == nil { + invalidParams.Add(request.NewErrParamRequired("HostName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetHostName sets the HostName field's value. +func (s *RedirectAllRequestsTo) SetHostName(v string) *RedirectAllRequestsTo { + s.HostName = &v + return s +} + +// SetProtocol sets the Protocol field's value. +func (s *RedirectAllRequestsTo) SetProtocol(v string) *RedirectAllRequestsTo { + s.Protocol = &v + return s +} + +type RestoreObjectInput struct { + _ struct{} `locationName:"RestoreObjectRequest" type:"structure" payload:"RestoreRequest"` + + // The bucket name containing the object to restore. + // + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // When using this action with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this action using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Object key for which the action was initiated. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Container for restore job parameters. + RestoreRequest *RestoreRequest `locationName:"RestoreRequest" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s RestoreObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RestoreObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RestoreObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.RestoreRequest != nil { + if err := s.RestoreRequest.Validate(); err != nil { + invalidParams.AddNested("RestoreRequest", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *RestoreObjectInput) SetBucket(v string) *RestoreObjectInput { + s.Bucket = &v + return s +} + +func (s *RestoreObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *RestoreObjectInput) SetExpectedBucketOwner(v string) *RestoreObjectInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *RestoreObjectInput) SetKey(v string) *RestoreObjectInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *RestoreObjectInput) SetRequestPayer(v string) *RestoreObjectInput { + s.RequestPayer = &v + return s +} + +// SetRestoreRequest sets the RestoreRequest field's value. +func (s *RestoreObjectInput) SetRestoreRequest(v *RestoreRequest) *RestoreObjectInput { + s.RestoreRequest = v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *RestoreObjectInput) SetVersionId(v string) *RestoreObjectInput { + s.VersionId = &v + return s +} + +type RestoreObjectOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RestoreObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreObjectOutput) GoString() string { + return s.String() +} + +// Container for restore job parameters. +type RestoreRequest struct { + _ struct{} `type:"structure"` + + // Lifetime of the active copy in days. Do not use with restores that specify + // OutputLocation. + // + // The Days element is required for regular restores, and must not be provided + // for select requests. + // + // Days is a required field + Days *int64 `type:"integer" required:"true"` + + // S3 Glacier related parameters pertaining to this job. Do not use with restores + // that specify OutputLocation. + GlacierJobParameters *GlacierJobParameters `type:"structure"` +} + +// String returns the string representation +func (s RestoreRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RestoreRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RestoreRequest"} + if s.Days == nil { + invalidParams.Add(request.NewErrParamRequired("Days")) + } + if s.GlacierJobParameters != nil { + if err := s.GlacierJobParameters.Validate(); err != nil { + invalidParams.AddNested("GlacierJobParameters", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDays sets the Days field's value. +func (s *RestoreRequest) SetDays(v int64) *RestoreRequest { + s.Days = &v + return s +} + +// SetGlacierJobParameters sets the GlacierJobParameters field's value. +func (s *RestoreRequest) SetGlacierJobParameters(v *GlacierJobParameters) *RestoreRequest { + s.GlacierJobParameters = v + return s +} + +// Specifies the redirect behavior and when a redirect is applied. For more +// information about routing rules, see Configuring advanced conditional redirects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html#advanced-conditional-redirects) +// in the Amazon S3 User Guide. +type RoutingRule struct { + _ struct{} `type:"structure"` + + // A container for describing a condition that must be met for the specified + // redirect to apply. For example, 1. If request is for pages in the /docs folder, + // redirect to the /documents folder. 2. If request results in HTTP error 4xx, + // redirect request to another host where you might process the error. + Condition *Condition `type:"structure"` + + // Container for redirect information. You can redirect requests to another + // host, to another page, or with another protocol. In the event of an error, + // you can specify a different error code to return. + // + // Redirect is a required field + Redirect *Redirect `type:"structure" required:"true"` +} + +// String returns the string representation +func (s RoutingRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RoutingRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RoutingRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RoutingRule"} + if s.Redirect == nil { + invalidParams.Add(request.NewErrParamRequired("Redirect")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCondition sets the Condition field's value. +func (s *RoutingRule) SetCondition(v *Condition) *RoutingRule { + s.Condition = v + return s +} + +// SetRedirect sets the Redirect field's value. +func (s *RoutingRule) SetRedirect(v *Redirect) *RoutingRule { + s.Redirect = v + return s +} + +// A container of a key value name pair. +type Tag struct { + _ struct{} `type:"structure"` + + // Name of the object key. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` + + // Value of the tag. + // + // Value is a required field + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *Tag) SetKey(v string) *Tag { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Tag) SetValue(v string) *Tag { + s.Value = &v + return s +} + +// Container for TagSet elements. +type Tagging struct { + _ struct{} `type:"structure"` + + // A collection for a set of tags + // + // TagSet is a required field + TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` +} + +// String returns the string representation +func (s Tagging) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tagging) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tagging) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tagging"} + if s.TagSet == nil { + invalidParams.Add(request.NewErrParamRequired("TagSet")) + } + if s.TagSet != nil { + for i, v := range s.TagSet { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TagSet", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTagSet sets the TagSet field's value. +func (s *Tagging) SetTagSet(v []*Tag) *Tagging { + s.TagSet = v + return s +} + +// Container for granting information. +type TargetGrant struct { + _ struct{} `type:"structure"` + + // Container for the person being granted permissions. + Grantee *Grantee `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` + + // Logging permissions assigned to the grantee for the bucket. + Permission *string `type:"string" enum:"BucketLogsPermission"` +} + +// String returns the string representation +func (s TargetGrant) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TargetGrant) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TargetGrant) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TargetGrant"} + if s.Grantee != nil { + if err := s.Grantee.Validate(); err != nil { + invalidParams.AddNested("Grantee", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGrantee sets the Grantee field's value. +func (s *TargetGrant) SetGrantee(v *Grantee) *TargetGrant { + s.Grantee = v + return s +} + +// SetPermission sets the Permission field's value. +func (s *TargetGrant) SetPermission(v string) *TargetGrant { + s.Permission = &v + return s +} + +// Specifies when an object transitions to a specified storage class. For more +// information about Amazon S3 lifecycle configuration rules, see Transitioning +// Objects Using Amazon S3 Lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-transition-general-considerations.html) +// in the Amazon S3 User Guide. +type Transition struct { + _ struct{} `type:"structure"` + + // Indicates when objects are transitioned to the specified storage class. The + // date value must be in ISO 8601 format. The time is always midnight UTC. + Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Indicates the number of days after creation when objects are transitioned + // to the specified storage class. The value must be a positive integer. + Days *int64 `type:"integer"` + + // The storage class to which you want the object to transition. + StorageClass *string `type:"string" enum:"TransitionStorageClass"` +} + +// String returns the string representation +func (s Transition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Transition) GoString() string { + return s.String() +} + +// SetDate sets the Date field's value. +func (s *Transition) SetDate(v time.Time) *Transition { + s.Date = &v + return s +} + +// SetDays sets the Days field's value. +func (s *Transition) SetDays(v int64) *Transition { + s.Days = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *Transition) SetStorageClass(v string) *Transition { + s.StorageClass = &v + return s +} + +type UploadPartCopyInput struct { + _ struct{} `locationName:"UploadPartCopyRequest" type:"structure"` + + // The bucket name. + // + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // When using this action with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this action using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies the source object for the copy operation. You specify the value + // in one of two formats, depending on whether you want to access the source + // object through an access point (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html): + // + // * For objects not accessed through an access point, specify the name of + // the source bucket and key of the source object, separated by a slash (/). + // For example, to copy the object reports/january.pdf from the bucket awsexamplebucket, + // use awsexamplebucket/reports/january.pdf. The value must be URL encoded. + // + // * For objects accessed through access points, specify the Amazon Resource + // Name (ARN) of the object as accessed through the access point, in the + // format arn:aws:s3:::accesspoint//object/. + // For example, to copy the object reports/january.pdf through access point + // my-access-point owned by account 123456789012 in Region us-west-2, use + // the URL encoding of arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf. + // The value must be URL encoded. Amazon S3 supports copy operations using + // access points only when the source and destination buckets are in the + // same AWS Region. Alternatively, for objects accessed through Amazon S3 + // on Outposts, specify the ARN of the object as accessed in the format arn:aws:s3-outposts:::outpost//object/. + // For example, to copy the object reports/january.pdf through outpost my-outpost + // owned by account 123456789012 in Region us-west-2, use the URL encoding + // of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf. + // The value must be URL encoded. + // + // To copy a specific version of an object, append ?versionId= to + // the value (for example, awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893). + // If you don't specify a version ID, Amazon S3 copies the latest version of + // the source object. + // + // CopySource is a required field + CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"` + + // Copies the object if its entity tag (ETag) matches the specified tag. + CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"` + + // Copies the object if it has been modified since the specified time. + CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp"` + + // Copies the object if its entity tag (ETag) is different than the specified + // ETag. + CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"` + + // Copies the object if it hasn't been modified since the specified time. + CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp"` + + // The range of bytes to copy from the source object. The range value must use + // the form bytes=first-last, where the first and last are the zero-based byte + // offsets to copy. For example, bytes=0-9 indicates that you want to copy the + // first 10 bytes of the source. You can copy a range only if the source object + // is greater than 5 MB. + CopySourceRange *string `location:"header" locationName:"x-amz-copy-source-range" type:"string"` + + // Specifies the algorithm to use when decrypting the source object (for example, + // AES256). + CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt + // the source object. The encryption key provided in this header must be one + // that was used when the source object was created. + CopySourceSSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` + + // The account ID of the expected destination bucket owner. If the destination + // bucket is owned by a different account, the request will fail with an HTTP + // 403 (Access Denied) error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The account ID of the expected source bucket owner. If the source bucket + // is owned by a different account, the request will fail with an HTTP 403 (Access + // Denied) error. + ExpectedSourceBucketOwner *string `location:"header" locationName:"x-amz-source-expected-bucket-owner" type:"string"` + + // Object key for which the multipart upload was initiated. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Part number of part being copied. This is a positive integer between 1 and + // 10,000. + // + // PartNumber is a required field + PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm + // header. This must be the same encryption key specified in the initiate multipart + // upload request. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Upload ID identifying the multipart upload whose part is being copied. + // + // UploadId is a required field + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s UploadPartCopyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadPartCopyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UploadPartCopyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UploadPartCopyInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.CopySource == nil { + invalidParams.Add(request.NewErrParamRequired("CopySource")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.PartNumber == nil { + invalidParams.Add(request.NewErrParamRequired("PartNumber")) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *UploadPartCopyInput) SetBucket(v string) *UploadPartCopyInput { + s.Bucket = &v + return s +} + +func (s *UploadPartCopyInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetCopySource sets the CopySource field's value. +func (s *UploadPartCopyInput) SetCopySource(v string) *UploadPartCopyInput { + s.CopySource = &v + return s +} + +// SetCopySourceIfMatch sets the CopySourceIfMatch field's value. +func (s *UploadPartCopyInput) SetCopySourceIfMatch(v string) *UploadPartCopyInput { + s.CopySourceIfMatch = &v + return s +} + +// SetCopySourceIfModifiedSince sets the CopySourceIfModifiedSince field's value. +func (s *UploadPartCopyInput) SetCopySourceIfModifiedSince(v time.Time) *UploadPartCopyInput { + s.CopySourceIfModifiedSince = &v + return s +} + +// SetCopySourceIfNoneMatch sets the CopySourceIfNoneMatch field's value. +func (s *UploadPartCopyInput) SetCopySourceIfNoneMatch(v string) *UploadPartCopyInput { + s.CopySourceIfNoneMatch = &v + return s +} + +// SetCopySourceIfUnmodifiedSince sets the CopySourceIfUnmodifiedSince field's value. +func (s *UploadPartCopyInput) SetCopySourceIfUnmodifiedSince(v time.Time) *UploadPartCopyInput { + s.CopySourceIfUnmodifiedSince = &v + return s +} + +// SetCopySourceRange sets the CopySourceRange field's value. +func (s *UploadPartCopyInput) SetCopySourceRange(v string) *UploadPartCopyInput { + s.CopySourceRange = &v + return s +} + +// SetCopySourceSSECustomerAlgorithm sets the CopySourceSSECustomerAlgorithm field's value. +func (s *UploadPartCopyInput) SetCopySourceSSECustomerAlgorithm(v string) *UploadPartCopyInput { + s.CopySourceSSECustomerAlgorithm = &v + return s +} + +// SetCopySourceSSECustomerKey sets the CopySourceSSECustomerKey field's value. +func (s *UploadPartCopyInput) SetCopySourceSSECustomerKey(v string) *UploadPartCopyInput { + s.CopySourceSSECustomerKey = &v + return s +} + +func (s *UploadPartCopyInput) getCopySourceSSECustomerKey() (v string) { + if s.CopySourceSSECustomerKey == nil { + return v + } + return *s.CopySourceSSECustomerKey +} + +// SetCopySourceSSECustomerKeyMD5 sets the CopySourceSSECustomerKeyMD5 field's value. +func (s *UploadPartCopyInput) SetCopySourceSSECustomerKeyMD5(v string) *UploadPartCopyInput { + s.CopySourceSSECustomerKeyMD5 = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *UploadPartCopyInput) SetExpectedBucketOwner(v string) *UploadPartCopyInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetExpectedSourceBucketOwner sets the ExpectedSourceBucketOwner field's value. +func (s *UploadPartCopyInput) SetExpectedSourceBucketOwner(v string) *UploadPartCopyInput { + s.ExpectedSourceBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *UploadPartCopyInput) SetKey(v string) *UploadPartCopyInput { + s.Key = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *UploadPartCopyInput) SetPartNumber(v int64) *UploadPartCopyInput { + s.PartNumber = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *UploadPartCopyInput) SetRequestPayer(v string) *UploadPartCopyInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *UploadPartCopyInput) SetSSECustomerAlgorithm(v string) *UploadPartCopyInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *UploadPartCopyInput) SetSSECustomerKey(v string) *UploadPartCopyInput { + s.SSECustomerKey = &v + return s +} + +func (s *UploadPartCopyInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *UploadPartCopyInput) SetSSECustomerKeyMD5(v string) *UploadPartCopyInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *UploadPartCopyInput) SetUploadId(v string) *UploadPartCopyInput { + s.UploadId = &v + return s +} + +type UploadPartCopyOutput struct { + _ struct{} `type:"structure" payload:"CopyPartResult"` + + // Container for all response elements. + CopyPartResult *CopyPartResult `type:"structure"` + + // The version of the source object that was copied, if you have enabled versioning + // on the source bucket. + CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round-trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetric customer managed customer master key (CMK) that was used for the + // object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` +} + +// String returns the string representation +func (s UploadPartCopyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadPartCopyOutput) GoString() string { + return s.String() +} + +// SetCopyPartResult sets the CopyPartResult field's value. +func (s *UploadPartCopyOutput) SetCopyPartResult(v *CopyPartResult) *UploadPartCopyOutput { + s.CopyPartResult = v + return s +} + +// SetCopySourceVersionId sets the CopySourceVersionId field's value. +func (s *UploadPartCopyOutput) SetCopySourceVersionId(v string) *UploadPartCopyOutput { + s.CopySourceVersionId = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *UploadPartCopyOutput) SetRequestCharged(v string) *UploadPartCopyOutput { + s.RequestCharged = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *UploadPartCopyOutput) SetSSECustomerAlgorithm(v string) *UploadPartCopyOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *UploadPartCopyOutput) SetSSECustomerKeyMD5(v string) *UploadPartCopyOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *UploadPartCopyOutput) SetSSEKMSKeyId(v string) *UploadPartCopyOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *UploadPartCopyOutput) SetServerSideEncryption(v string) *UploadPartCopyOutput { + s.ServerSideEncryption = &v + return s +} + +type UploadPartInput struct { + _ struct{} `locationName:"UploadPartRequest" type:"structure" payload:"Body"` + + // Object data. + Body io.ReadSeeker `type:"blob"` + + // The name of the bucket to which the multipart upload was initiated. + // + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // When using this action with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this action using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Size of the body in bytes. This parameter is useful when the size of the + // body cannot be determined automatically. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + + // The base64-encoded 128-bit MD5 digest of the part data. This parameter is + // auto-populated when using the command from the CLI. This parameter is required + // if object lock parameters are specified. + ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Object key for which the multipart upload was initiated. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Part number of part being uploaded. This is a positive integer between 1 + // and 10,000. + // + // PartNumber is a required field + PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm + // header. This must be the same encryption key specified in the initiate multipart + // upload request. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Upload ID identifying the multipart upload whose part is being uploaded. + // + // UploadId is a required field + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s UploadPartInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadPartInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UploadPartInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UploadPartInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.PartNumber == nil { + invalidParams.Add(request.NewErrParamRequired("PartNumber")) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBody sets the Body field's value. +func (s *UploadPartInput) SetBody(v io.ReadSeeker) *UploadPartInput { + s.Body = v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *UploadPartInput) SetBucket(v string) *UploadPartInput { + s.Bucket = &v + return s +} + +func (s *UploadPartInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetContentLength sets the ContentLength field's value. +func (s *UploadPartInput) SetContentLength(v int64) *UploadPartInput { + s.ContentLength = &v + return s +} + +// SetContentMD5 sets the ContentMD5 field's value. +func (s *UploadPartInput) SetContentMD5(v string) *UploadPartInput { + s.ContentMD5 = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *UploadPartInput) SetExpectedBucketOwner(v string) *UploadPartInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *UploadPartInput) SetKey(v string) *UploadPartInput { + s.Key = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *UploadPartInput) SetPartNumber(v int64) *UploadPartInput { + s.PartNumber = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *UploadPartInput) SetRequestPayer(v string) *UploadPartInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *UploadPartInput) SetSSECustomerAlgorithm(v string) *UploadPartInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *UploadPartInput) SetSSECustomerKey(v string) *UploadPartInput { + s.SSECustomerKey = &v + return s +} + +func (s *UploadPartInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *UploadPartInput) SetSSECustomerKeyMD5(v string) *UploadPartInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *UploadPartInput) SetUploadId(v string) *UploadPartInput { + s.UploadId = &v + return s +} + +type UploadPartOutput struct { + _ struct{} `type:"structure"` + + // Entity tag for the uploaded object. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round-trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetric customer managed customer master key (CMK) was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` +} + +// String returns the string representation +func (s UploadPartOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadPartOutput) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *UploadPartOutput) SetETag(v string) *UploadPartOutput { + s.ETag = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *UploadPartOutput) SetRequestCharged(v string) *UploadPartOutput { + s.RequestCharged = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *UploadPartOutput) SetSSECustomerAlgorithm(v string) *UploadPartOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *UploadPartOutput) SetSSECustomerKeyMD5(v string) *UploadPartOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *UploadPartOutput) SetSSEKMSKeyId(v string) *UploadPartOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *UploadPartOutput) SetServerSideEncryption(v string) *UploadPartOutput { + s.ServerSideEncryption = &v + return s +} + +// Describes the versioning state of an Amazon S3 bucket. For more information, +// see PUT Bucket versioning (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html) +// in the Amazon Simple Storage Service API Reference. +type VersioningConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies whether MFA delete is enabled in the bucket versioning configuration. + // This element is only returned if the bucket has been configured with MFA + // delete. If the bucket has never been so configured, this element is not returned. + MFADelete *string `locationName:"MfaDelete" type:"string" enum:"MFADelete"` + + // The versioning state of the bucket. + Status *string `type:"string" enum:"BucketVersioningStatus"` +} + +// String returns the string representation +func (s VersioningConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VersioningConfiguration) GoString() string { + return s.String() +} + +// SetMFADelete sets the MFADelete field's value. +func (s *VersioningConfiguration) SetMFADelete(v string) *VersioningConfiguration { + s.MFADelete = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *VersioningConfiguration) SetStatus(v string) *VersioningConfiguration { + s.Status = &v + return s +} + +// Specifies website configuration parameters for an Amazon S3 bucket. +type WebsiteConfiguration struct { + _ struct{} `type:"structure"` + + // The name of the error document for the website. + ErrorDocument *ErrorDocument `type:"structure"` + + // The name of the index document for the website. + IndexDocument *IndexDocument `type:"structure"` + + // The redirect behavior for every request to this bucket's website endpoint. + // + // If you specify this property, you can't specify any other property. + RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"` + + // Rules that define when a redirect is applied and the redirect behavior. + RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` +} + +// String returns the string representation +func (s WebsiteConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WebsiteConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *WebsiteConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "WebsiteConfiguration"} + if s.ErrorDocument != nil { + if err := s.ErrorDocument.Validate(); err != nil { + invalidParams.AddNested("ErrorDocument", err.(request.ErrInvalidParams)) + } + } + if s.IndexDocument != nil { + if err := s.IndexDocument.Validate(); err != nil { + invalidParams.AddNested("IndexDocument", err.(request.ErrInvalidParams)) + } + } + if s.RedirectAllRequestsTo != nil { + if err := s.RedirectAllRequestsTo.Validate(); err != nil { + invalidParams.AddNested("RedirectAllRequestsTo", err.(request.ErrInvalidParams)) + } + } + if s.RoutingRules != nil { + for i, v := range s.RoutingRules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RoutingRules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetErrorDocument sets the ErrorDocument field's value. +func (s *WebsiteConfiguration) SetErrorDocument(v *ErrorDocument) *WebsiteConfiguration { + s.ErrorDocument = v + return s +} + +// SetIndexDocument sets the IndexDocument field's value. +func (s *WebsiteConfiguration) SetIndexDocument(v *IndexDocument) *WebsiteConfiguration { + s.IndexDocument = v + return s +} + +// SetRedirectAllRequestsTo sets the RedirectAllRequestsTo field's value. +func (s *WebsiteConfiguration) SetRedirectAllRequestsTo(v *RedirectAllRequestsTo) *WebsiteConfiguration { + s.RedirectAllRequestsTo = v + return s +} + +// SetRoutingRules sets the RoutingRules field's value. +func (s *WebsiteConfiguration) SetRoutingRules(v []*RoutingRule) *WebsiteConfiguration { + s.RoutingRules = v + return s +} + +const ( + // BucketCannedACLPrivate is a BucketCannedACL enum value + BucketCannedACLPrivate = "private" + + // BucketCannedACLPublicRead is a BucketCannedACL enum value + BucketCannedACLPublicRead = "public-read" + + // BucketCannedACLPublicReadWrite is a BucketCannedACL enum value + BucketCannedACLPublicReadWrite = "public-read-write" + + // BucketCannedACLAuthenticatedRead is a BucketCannedACL enum value + BucketCannedACLAuthenticatedRead = "authenticated-read" +) + +// BucketCannedACL_Values returns all elements of the BucketCannedACL enum +func BucketCannedACL_Values() []string { + return []string{ + BucketCannedACLPrivate, + BucketCannedACLPublicRead, + BucketCannedACLPublicReadWrite, + BucketCannedACLAuthenticatedRead, + } +} + +const ( + // BucketLocationConstraintEu is a BucketLocationConstraint enum value + BucketLocationConstraintEu = "EU" + + // BucketLocationConstraintEuWest1 is a BucketLocationConstraint enum value + BucketLocationConstraintEuWest1 = "eu-west-1" + + // BucketLocationConstraintUsWest1 is a BucketLocationConstraint enum value + BucketLocationConstraintUsWest1 = "us-west-1" + + // BucketLocationConstraintUsWest2 is a BucketLocationConstraint enum value + BucketLocationConstraintUsWest2 = "us-west-2" + + // BucketLocationConstraintApSouth1 is a BucketLocationConstraint enum value + BucketLocationConstraintApSouth1 = "ap-south-1" + + // BucketLocationConstraintApSoutheast1 is a BucketLocationConstraint enum value + BucketLocationConstraintApSoutheast1 = "ap-southeast-1" + + // BucketLocationConstraintApSoutheast2 is a BucketLocationConstraint enum value + BucketLocationConstraintApSoutheast2 = "ap-southeast-2" + + // BucketLocationConstraintApNortheast1 is a BucketLocationConstraint enum value + BucketLocationConstraintApNortheast1 = "ap-northeast-1" + + // BucketLocationConstraintSaEast1 is a BucketLocationConstraint enum value + BucketLocationConstraintSaEast1 = "sa-east-1" + + // BucketLocationConstraintCnNorth1 is a BucketLocationConstraint enum value + BucketLocationConstraintCnNorth1 = "cn-north-1" + + // BucketLocationConstraintEuCentral1 is a BucketLocationConstraint enum value + BucketLocationConstraintEuCentral1 = "eu-central-1" +) + +// BucketLocationConstraint_Values returns all elements of the BucketLocationConstraint enum +func BucketLocationConstraint_Values() []string { + return []string{ + BucketLocationConstraintEu, + BucketLocationConstraintEuWest1, + BucketLocationConstraintUsWest1, + BucketLocationConstraintUsWest2, + BucketLocationConstraintApSouth1, + BucketLocationConstraintApSoutheast1, + BucketLocationConstraintApSoutheast2, + BucketLocationConstraintApNortheast1, + BucketLocationConstraintSaEast1, + BucketLocationConstraintCnNorth1, + BucketLocationConstraintEuCentral1, + } +} + +const ( + // BucketLogsPermissionFullControl is a BucketLogsPermission enum value + BucketLogsPermissionFullControl = "FULL_CONTROL" + + // BucketLogsPermissionRead is a BucketLogsPermission enum value + BucketLogsPermissionRead = "READ" + + // BucketLogsPermissionWrite is a BucketLogsPermission enum value + BucketLogsPermissionWrite = "WRITE" +) + +// BucketLogsPermission_Values returns all elements of the BucketLogsPermission enum +func BucketLogsPermission_Values() []string { + return []string{ + BucketLogsPermissionFullControl, + BucketLogsPermissionRead, + BucketLogsPermissionWrite, + } +} + +const ( + // BucketProtectionStatusRetention is a BucketProtectionStatus enum value + BucketProtectionStatusRetention = "Retention" +) + +// BucketProtectionStatus_Values returns all elements of the BucketProtectionStatus enum +func BucketProtectionStatus_Values() []string { + return []string{ + BucketProtectionStatusRetention, + } +} + +const ( + // BucketVersioningStatusEnabled is a BucketVersioningStatus enum value + BucketVersioningStatusEnabled = "Enabled" + + // BucketVersioningStatusSuspended is a BucketVersioningStatus enum value + BucketVersioningStatusSuspended = "Suspended" +) + +// BucketVersioningStatus_Values returns all elements of the BucketVersioningStatus enum +func BucketVersioningStatus_Values() []string { + return []string{ + BucketVersioningStatusEnabled, + BucketVersioningStatusSuspended, + } +} + +// Requests Amazon S3 to encode the object keys in the response and specifies +// the encoding method to use. An object key may contain any Unicode character; +// however, XML 1.0 parser cannot parse some characters, such as characters +// with an ASCII value from 0 to 10. For characters that are not supported in +// XML 1.0, you can add this parameter to request that Amazon S3 encode the +// keys in the response. +const ( + // EncodingTypeUrl is a EncodingType enum value + EncodingTypeUrl = "url" +) + +// EncodingType_Values returns all elements of the EncodingType enum +func EncodingType_Values() []string { + return []string{ + EncodingTypeUrl, + } +} + +const ( + // ExpirationStatusEnabled is a ExpirationStatus enum value + ExpirationStatusEnabled = "Enabled" + + // ExpirationStatusDisabled is a ExpirationStatus enum value + ExpirationStatusDisabled = "Disabled" +) + +// ExpirationStatus_Values returns all elements of the ExpirationStatus enum +func ExpirationStatus_Values() []string { + return []string{ + ExpirationStatusEnabled, + ExpirationStatusDisabled, + } +} + +const ( + // MFADeleteEnabled is a MFADelete enum value + MFADeleteEnabled = "Enabled" + + // MFADeleteDisabled is a MFADelete enum value + MFADeleteDisabled = "Disabled" +) + +// MFADelete_Values returns all elements of the MFADelete enum +func MFADelete_Values() []string { + return []string{ + MFADeleteEnabled, + MFADeleteDisabled, + } +} + +const ( + // MFADeleteStatusEnabled is a MFADeleteStatus enum value + MFADeleteStatusEnabled = "Enabled" + + // MFADeleteStatusDisabled is a MFADeleteStatus enum value + MFADeleteStatusDisabled = "Disabled" +) + +// MFADeleteStatus_Values returns all elements of the MFADeleteStatus enum +func MFADeleteStatus_Values() []string { + return []string{ + MFADeleteStatusEnabled, + MFADeleteStatusDisabled, + } +} + +const ( + // MetadataDirectiveCopy is a MetadataDirective enum value + MetadataDirectiveCopy = "COPY" + + // MetadataDirectiveReplace is a MetadataDirective enum value + MetadataDirectiveReplace = "REPLACE" +) + +// MetadataDirective_Values returns all elements of the MetadataDirective enum +func MetadataDirective_Values() []string { + return []string{ + MetadataDirectiveCopy, + MetadataDirectiveReplace, + } +} + +const ( + // ObjectCannedACLPrivate is a ObjectCannedACL enum value + ObjectCannedACLPrivate = "private" + + // ObjectCannedACLPublicRead is a ObjectCannedACL enum value + ObjectCannedACLPublicRead = "public-read" + + // ObjectCannedACLPublicReadWrite is a ObjectCannedACL enum value + ObjectCannedACLPublicReadWrite = "public-read-write" + + // ObjectCannedACLAuthenticatedRead is a ObjectCannedACL enum value + ObjectCannedACLAuthenticatedRead = "authenticated-read" + + // ObjectCannedACLAwsExecRead is a ObjectCannedACL enum value + ObjectCannedACLAwsExecRead = "aws-exec-read" + + // ObjectCannedACLBucketOwnerRead is a ObjectCannedACL enum value + ObjectCannedACLBucketOwnerRead = "bucket-owner-read" + + // ObjectCannedACLBucketOwnerFullControl is a ObjectCannedACL enum value + ObjectCannedACLBucketOwnerFullControl = "bucket-owner-full-control" +) + +// ObjectCannedACL_Values returns all elements of the ObjectCannedACL enum +func ObjectCannedACL_Values() []string { + return []string{ + ObjectCannedACLPrivate, + ObjectCannedACLPublicRead, + ObjectCannedACLPublicReadWrite, + ObjectCannedACLAuthenticatedRead, + ObjectCannedACLAwsExecRead, + ObjectCannedACLBucketOwnerRead, + ObjectCannedACLBucketOwnerFullControl, + } +} + +const ( + // ObjectStorageClassStandard is a ObjectStorageClass enum value + ObjectStorageClassStandard = "STANDARD" + + // ObjectStorageClassReducedRedundancy is a ObjectStorageClass enum value + ObjectStorageClassReducedRedundancy = "REDUCED_REDUNDANCY" + + // ObjectStorageClassGlacier is a ObjectStorageClass enum value + ObjectStorageClassGlacier = "GLACIER" + + // ObjectStorageClassAccelerated is a ObjectStorageClass enum value + ObjectStorageClassAccelerated = "ACCELERATED" + + // ObjectStorageClassStandardIa is a ObjectStorageClass enum value + ObjectStorageClassStandardIa = "STANDARD_IA" + + // ObjectStorageClassOnezoneIa is a ObjectStorageClass enum value + ObjectStorageClassOnezoneIa = "ONEZONE_IA" + + // ObjectStorageClassIntelligentTiering is a ObjectStorageClass enum value + ObjectStorageClassIntelligentTiering = "INTELLIGENT_TIERING" + + // ObjectStorageClassDeepArchive is a ObjectStorageClass enum value + ObjectStorageClassDeepArchive = "DEEP_ARCHIVE" +) + +// ObjectStorageClass_Values returns all elements of the ObjectStorageClass enum +func ObjectStorageClass_Values() []string { + return []string{ + ObjectStorageClassStandard, + ObjectStorageClassReducedRedundancy, + ObjectStorageClassGlacier, + ObjectStorageClassAccelerated, + ObjectStorageClassStandardIa, + ObjectStorageClassOnezoneIa, + ObjectStorageClassIntelligentTiering, + ObjectStorageClassDeepArchive, + } +} + +const ( + // ObjectVersionStorageClassStandard is a ObjectVersionStorageClass enum value + ObjectVersionStorageClassStandard = "STANDARD" +) + +// ObjectVersionStorageClass_Values returns all elements of the ObjectVersionStorageClass enum +func ObjectVersionStorageClass_Values() []string { + return []string{ + ObjectVersionStorageClassStandard, + } +} + +const ( + // PermissionFullControl is a Permission enum value + PermissionFullControl = "FULL_CONTROL" + + // PermissionWrite is a Permission enum value + PermissionWrite = "WRITE" + + // PermissionWriteAcp is a Permission enum value + PermissionWriteAcp = "WRITE_ACP" + + // PermissionRead is a Permission enum value + PermissionRead = "READ" + + // PermissionReadAcp is a Permission enum value + PermissionReadAcp = "READ_ACP" +) + +// Permission_Values returns all elements of the Permission enum +func Permission_Values() []string { + return []string{ + PermissionFullControl, + PermissionWrite, + PermissionWriteAcp, + PermissionRead, + PermissionReadAcp, + } +} + +const ( + // ProtocolHttp is a Protocol enum value + ProtocolHttp = "http" + + // ProtocolHttps is a Protocol enum value + ProtocolHttps = "https" +) + +// Protocol_Values returns all elements of the Protocol enum +func Protocol_Values() []string { + return []string{ + ProtocolHttp, + ProtocolHttps, + } +} + +const ( + // ReplicationStatusComplete is a ReplicationStatus enum value + ReplicationStatusComplete = "COMPLETE" + + // ReplicationStatusPending is a ReplicationStatus enum value + ReplicationStatusPending = "PENDING" + + // ReplicationStatusFailed is a ReplicationStatus enum value + ReplicationStatusFailed = "FAILED" + + // ReplicationStatusReplica is a ReplicationStatus enum value + ReplicationStatusReplica = "REPLICA" +) + +// ReplicationStatus_Values returns all elements of the ReplicationStatus enum +func ReplicationStatus_Values() []string { + return []string{ + ReplicationStatusComplete, + ReplicationStatusPending, + ReplicationStatusFailed, + ReplicationStatusReplica, + } +} + +// If present, indicates that the requester was successfully charged for the +// request. +const ( + // RequestChargedRequester is a RequestCharged enum value + RequestChargedRequester = "requester" +) + +// RequestCharged_Values returns all elements of the RequestCharged enum +func RequestCharged_Values() []string { + return []string{ + RequestChargedRequester, + } +} + +// Confirms that the requester knows that they will be charged for the request. +// Bucket owners need not specify this parameter in their requests. For information +// about downloading objects from requester pays buckets, see Downloading Objects +// in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) +// in the Amazon S3 Developer Guide. +const ( + // RequestPayerRequester is a RequestPayer enum value + RequestPayerRequester = "requester" +) + +// RequestPayer_Values returns all elements of the RequestPayer enum +func RequestPayer_Values() []string { + return []string{ + RequestPayerRequester, + } +} + +const ( + // RetentionDirectiveCopy is a RetentionDirective enum value + RetentionDirectiveCopy = "COPY" + + // RetentionDirectiveReplace is a RetentionDirective enum value + RetentionDirectiveReplace = "REPLACE" +) + +// RetentionDirective_Values returns all elements of the RetentionDirective enum +func RetentionDirective_Values() []string { + return []string{ + RetentionDirectiveCopy, + RetentionDirectiveReplace, + } +} + +const ( + // ServerSideEncryptionAes256 is a ServerSideEncryption enum value + ServerSideEncryptionAes256 = "AES256" + + // ServerSideEncryptionAwsKms is a ServerSideEncryption enum value + ServerSideEncryptionAwsKms = "aws:kms" +) + +// ServerSideEncryption_Values returns all elements of the ServerSideEncryption enum +func ServerSideEncryption_Values() []string { + return []string{ + ServerSideEncryptionAes256, + ServerSideEncryptionAwsKms, + } +} + +const ( + // StorageClassStandard is a StorageClass enum value + StorageClassStandard = "STANDARD" + + // StorageClassReducedRedundancy is a StorageClass enum value + StorageClassReducedRedundancy = "REDUCED_REDUNDANCY" + + // StorageClassStandardIa is a StorageClass enum value + StorageClassStandardIa = "STANDARD_IA" + + // StorageClassOnezoneIa is a StorageClass enum value + StorageClassOnezoneIa = "ONEZONE_IA" + + // StorageClassIntelligentTiering is a StorageClass enum value + StorageClassIntelligentTiering = "INTELLIGENT_TIERING" + + // StorageClassGlacier is a StorageClass enum value + StorageClassGlacier = "GLACIER" + + // StorageClassAccelerated is a StorageClass enum value + StorageClassAccelerated = "ACCELERATED" + + // StorageClassDeepArchive is a StorageClass enum value + StorageClassDeepArchive = "DEEP_ARCHIVE" +) + +// StorageClass_Values returns all elements of the StorageClass enum +func StorageClass_Values() []string { + return []string{ + StorageClassStandard, + StorageClassReducedRedundancy, + StorageClassStandardIa, + StorageClassOnezoneIa, + StorageClassIntelligentTiering, + StorageClassGlacier, + StorageClassAccelerated, + StorageClassDeepArchive, + } +} + +const ( + // TaggingDirectiveCopy is a TaggingDirective enum value + TaggingDirectiveCopy = "COPY" + + // TaggingDirectiveReplace is a TaggingDirective enum value + TaggingDirectiveReplace = "REPLACE" +) + +// TaggingDirective_Values returns all elements of the TaggingDirective enum +func TaggingDirective_Values() []string { + return []string{ + TaggingDirectiveCopy, + TaggingDirectiveReplace, + } +} + +const ( + // TierAccelerated is a Tier enum value + TierAccelerated = "Accelerated" + + // TierStandard is a Tier enum value + TierStandard = "Standard" + + // TierBulk is a Tier enum value + TierBulk = "Bulk" + + // TierExpedited is a Tier enum value + TierExpedited = "Expedited" +) + +// Tier_Values returns all elements of the Tier enum +func Tier_Values() []string { + return []string{ + TierAccelerated, + TierStandard, + TierBulk, + TierExpedited, + } +} + +const ( + // TransitionStorageClassGlacier is a TransitionStorageClass enum value + TransitionStorageClassGlacier = "GLACIER" + + // TransitionStorageClassAccelerated is a TransitionStorageClass enum value + TransitionStorageClassAccelerated = "ACCELERATED" + + // TransitionStorageClassStandardIa is a TransitionStorageClass enum value + TransitionStorageClassStandardIa = "STANDARD_IA" + + // TransitionStorageClassOnezoneIa is a TransitionStorageClass enum value + TransitionStorageClassOnezoneIa = "ONEZONE_IA" + + // TransitionStorageClassIntelligentTiering is a TransitionStorageClass enum value + TransitionStorageClassIntelligentTiering = "INTELLIGENT_TIERING" + + // TransitionStorageClassDeepArchive is a TransitionStorageClass enum value + TransitionStorageClassDeepArchive = "DEEP_ARCHIVE" +) + +// TransitionStorageClass_Values returns all elements of the TransitionStorageClass enum +func TransitionStorageClass_Values() []string { + return []string{ + TransitionStorageClassGlacier, + TransitionStorageClassAccelerated, + TransitionStorageClassStandardIa, + TransitionStorageClassOnezoneIa, + TransitionStorageClassIntelligentTiering, + TransitionStorageClassDeepArchive, + } +} + +const ( + // TypeCanonicalUser is a Type enum value + TypeCanonicalUser = "CanonicalUser" + + // TypeAmazonCustomerByEmail is a Type enum value + TypeAmazonCustomerByEmail = "AmazonCustomerByEmail" + + // TypeGroup is a Type enum value + TypeGroup = "Group" +) + +// Type_Values returns all elements of the Type enum +func Type_Values() []string { + return []string{ + TypeCanonicalUser, + TypeAmazonCustomerByEmail, + TypeGroup, + } +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/body_hash.go b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/body_hash.go new file mode 100644 index 00000000000..f6b387e8533 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/body_hash.go @@ -0,0 +1,202 @@ +package s3 + +import ( + "bytes" + "crypto/md5" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "fmt" + "hash" + "io" + + "github.com/IBM/ibm-cos-sdk-go/aws" + "github.com/IBM/ibm-cos-sdk-go/aws/awserr" + "github.com/IBM/ibm-cos-sdk-go/aws/request" +) + +const ( + contentMD5Header = "Content-Md5" + contentSha256Header = "X-Amz-Content-Sha256" + amzTeHeader = "X-Amz-Te" + amzTxEncodingHeader = "X-Amz-Transfer-Encoding" + + appendMD5TxEncoding = "append-md5" +) + +// computeBodyHashes will add Content MD5 and Content Sha256 hashes to the +// request. If the body is not seekable or S3DisableContentMD5Validation set +// this handler will be ignored. +func computeBodyHashes(r *request.Request) { + if aws.BoolValue(r.Config.S3DisableContentMD5Validation) { + return + } + if r.IsPresigned() { + return + } + if r.Error != nil || !aws.IsReaderSeekable(r.Body) { + return + } + + var md5Hash, sha256Hash hash.Hash + hashers := make([]io.Writer, 0, 2) + + // Determine upfront which hashes can be set without overriding user + // provide header data. + if v := r.HTTPRequest.Header.Get(contentMD5Header); len(v) == 0 { + md5Hash = md5.New() + hashers = append(hashers, md5Hash) + } + + if v := r.HTTPRequest.Header.Get(contentSha256Header); len(v) == 0 { + sha256Hash = sha256.New() + hashers = append(hashers, sha256Hash) + } + + // Create the destination writer based on the hashes that are not already + // provided by the user. + var dst io.Writer + switch len(hashers) { + case 0: + return + case 1: + dst = hashers[0] + default: + dst = io.MultiWriter(hashers...) + } + + if _, err := aws.CopySeekableBody(dst, r.Body); err != nil { + r.Error = awserr.New("BodyHashError", "failed to compute body hashes", err) + return + } + + // For the hashes created, set the associated headers that the user did not + // already provide. + if md5Hash != nil { + sum := make([]byte, md5.Size) + encoded := make([]byte, md5Base64EncLen) + + base64.StdEncoding.Encode(encoded, md5Hash.Sum(sum[0:0])) + r.HTTPRequest.Header[contentMD5Header] = []string{string(encoded)} + } + + if sha256Hash != nil { + encoded := make([]byte, sha256HexEncLen) + sum := make([]byte, sha256.Size) + + hex.Encode(encoded, sha256Hash.Sum(sum[0:0])) + r.HTTPRequest.Header[contentSha256Header] = []string{string(encoded)} + } +} + +const ( + md5Base64EncLen = (md5.Size + 2) / 3 * 4 // base64.StdEncoding.EncodedLen + sha256HexEncLen = sha256.Size * 2 // hex.EncodedLen +) + +// Adds the x-amz-te: append_md5 header to the request. This requests the service +// responds with a trailing MD5 checksum. +// +// Will not ask for append MD5 if disabled, the request is presigned or, +// or the API operation does not support content MD5 validation. +func askForTxEncodingAppendMD5(r *request.Request) { + if aws.BoolValue(r.Config.S3DisableContentMD5Validation) { + return + } + if r.IsPresigned() { + return + } + r.HTTPRequest.Header.Set(amzTeHeader, appendMD5TxEncoding) +} + +func useMD5ValidationReader(r *request.Request) { + if r.Error != nil { + return + } + + if v := r.HTTPResponse.Header.Get(amzTxEncodingHeader); v != appendMD5TxEncoding { + return + } + + var bodyReader *io.ReadCloser + var contentLen int64 + switch tv := r.Data.(type) { + case *GetObjectOutput: + bodyReader = &tv.Body + contentLen = aws.Int64Value(tv.ContentLength) + // Update ContentLength hiden the trailing MD5 checksum. + tv.ContentLength = aws.Int64(contentLen - md5.Size) + tv.ContentRange = aws.String(r.HTTPResponse.Header.Get("X-Amz-Content-Range")) + default: + r.Error = awserr.New("ChecksumValidationError", + fmt.Sprintf("%s: %s header received on unsupported API, %s", + amzTxEncodingHeader, appendMD5TxEncoding, r.Operation.Name, + ), nil) + return + } + + if contentLen < md5.Size { + r.Error = awserr.New("ChecksumValidationError", + fmt.Sprintf("invalid Content-Length %d for %s %s", + contentLen, appendMD5TxEncoding, amzTxEncodingHeader, + ), nil) + return + } + + // Wrap and swap the response body reader with the validation reader. + *bodyReader = newMD5ValidationReader(*bodyReader, contentLen-md5.Size) +} + +type md5ValidationReader struct { + rawReader io.ReadCloser + payload io.Reader + hash hash.Hash + + payloadLen int64 + read int64 +} + +func newMD5ValidationReader(reader io.ReadCloser, payloadLen int64) *md5ValidationReader { + h := md5.New() + return &md5ValidationReader{ + rawReader: reader, + payload: io.TeeReader(&io.LimitedReader{R: reader, N: payloadLen}, h), + hash: h, + payloadLen: payloadLen, + } +} + +func (v *md5ValidationReader) Read(p []byte) (n int, err error) { + n, err = v.payload.Read(p) + if err != nil && err != io.EOF { + return n, err + } + + v.read += int64(n) + + if err == io.EOF { + if v.read != v.payloadLen { + return n, io.ErrUnexpectedEOF + } + expectSum := make([]byte, md5.Size) + actualSum := make([]byte, md5.Size) + if _, sumReadErr := io.ReadFull(v.rawReader, expectSum); sumReadErr != nil { + return n, sumReadErr + } + actualSum = v.hash.Sum(actualSum[0:0]) + if !bytes.Equal(expectSum, actualSum) { + return n, awserr.New("InvalidChecksum", + fmt.Sprintf("expected MD5 checksum %s, got %s", + hex.EncodeToString(expectSum), + hex.EncodeToString(actualSum), + ), + nil) + } + } + + return n, err +} + +func (v *md5ValidationReader) Close() error { + return v.rawReader.Close() +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/bucket_location.go b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/bucket_location.go new file mode 100644 index 00000000000..410fca10611 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/bucket_location.go @@ -0,0 +1,108 @@ +package s3 + +import ( + "io/ioutil" + "regexp" + + "github.com/IBM/ibm-cos-sdk-go/aws" + "github.com/IBM/ibm-cos-sdk-go/aws/awserr" + "github.com/IBM/ibm-cos-sdk-go/aws/request" +) + +var reBucketLocation = regexp.MustCompile(`>([^<>]+)<\/Location`) + +// NormalizeBucketLocation is a utility function which will update the +// passed in value to always be a region ID. Generally this would be used +// with GetBucketLocation API operation. +// +// Replaces empty string with "us-east-1", and "EU" with "eu-west-1". +// +// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html +// for more information on the values that can be returned. +func NormalizeBucketLocation(loc string) string { + switch loc { + case "": + loc = "us-east-1" + case "EU": + loc = "eu-west-1" + } + + return loc +} + +// NormalizeBucketLocationHandler is a request handler which will update the +// GetBucketLocation's result LocationConstraint value to always be a region ID. +// +// Replaces empty string with "us-east-1", and "EU" with "eu-west-1". +// +// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html +// for more information on the values that can be returned. +// +// req, result := svc.GetBucketLocationRequest(&s3.GetBucketLocationInput{ +// Bucket: aws.String(bucket), +// }) +// req.Handlers.Unmarshal.PushBackNamed(NormalizeBucketLocationHandler) +// err := req.Send() +var NormalizeBucketLocationHandler = request.NamedHandler{ + Name: "awssdk.s3.NormalizeBucketLocation", + Fn: func(req *request.Request) { + if req.Error != nil { + return + } + + out := req.Data.(*GetBucketLocationOutput) + loc := NormalizeBucketLocation(aws.StringValue(out.LocationConstraint)) + out.LocationConstraint = aws.String(loc) + }, +} + +// WithNormalizeBucketLocation is a request option which will update the +// GetBucketLocation's result LocationConstraint value to always be a region ID. +// +// Replaces empty string with "us-east-1", and "EU" with "eu-west-1". +// +// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html +// for more information on the values that can be returned. +// +// result, err := svc.GetBucketLocationWithContext(ctx, +// &s3.GetBucketLocationInput{ +// Bucket: aws.String(bucket), +// }, +// s3.WithNormalizeBucketLocation, +// ) +func WithNormalizeBucketLocation(r *request.Request) { + r.Handlers.Unmarshal.PushBackNamed(NormalizeBucketLocationHandler) +} + +func buildGetBucketLocation(r *request.Request) { + if r.DataFilled() { + out := r.Data.(*GetBucketLocationOutput) + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New(request.ErrCodeSerialization, + "failed reading response body", err) + return + } + + match := reBucketLocation.FindSubmatch(b) + if len(match) > 1 { + loc := string(match[1]) + out.LocationConstraint = aws.String(loc) + } + } +} + +// IBM COS SDK Code -- START +// func populateLocationConstraint(r *request.Request) { +// if r.ParamsFilled() && aws.StringValue(r.Config.Region) != "us-east-1" { +// in := r.Params.(*CreateBucketInput) +// if in.CreateBucketConfiguration == nil { +// r.Params = awsutil.CopyOf(r.Params) +// in = r.Params.(*CreateBucketInput) +// in.CreateBucketConfiguration = &CreateBucketConfiguration{ +// LocationConstraint: r.Config.Region, +// } +// } +// } +// } +// IBM COS SDK Code -- END diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/customizations.go b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/customizations.go new file mode 100644 index 00000000000..71a3bdf8e8a --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/customizations.go @@ -0,0 +1,77 @@ +package s3 + +import ( + "github.com/IBM/ibm-cos-sdk-go/aws/client" + "github.com/IBM/ibm-cos-sdk-go/aws/request" + "github.com/IBM/ibm-cos-sdk-go/internal/s3shared/arn" + "github.com/IBM/ibm-cos-sdk-go/internal/s3shared/s3err" +) + +func init() { + initClient = defaultInitClientFn + initRequest = defaultInitRequestFn +} + +func defaultInitClientFn(c *client.Client) { + // Support building custom endpoints based on config + c.Handlers.Build.PushFront(endpointHandler) + + // Require SSL when using SSE keys + c.Handlers.Validate.PushBack(validateSSERequiresSSL) + c.Handlers.Build.PushBack(computeSSEKeyMD5) + c.Handlers.Build.PushBack(computeCopySourceSSEKeyMD5) + + // S3 uses custom error unmarshaling logic + c.Handlers.UnmarshalError.Clear() + c.Handlers.UnmarshalError.PushBack(unmarshalError) + c.Handlers.UnmarshalError.PushBackNamed(s3err.RequestFailureWrapperHandler()) +} + +func defaultInitRequestFn(r *request.Request) { + // Add request handlers for specific platforms. + // e.g. 100-continue support for PUT requests using Go 1.6 + platformRequestHandlers(r) + + switch r.Operation.Name { + case opGetBucketLocation: + // GetBucketLocation has custom parsing logic + r.Handlers.Unmarshal.PushFront(buildGetBucketLocation) + // IBM COS SDK Code -- START + // IBM do not popluate opCreateBucket LocationConstraint + // IBM COS SDK Code -- END + case opCopyObject, opUploadPartCopy, opCompleteMultipartUpload: + r.Handlers.Unmarshal.PushFront(copyMultipartStatusOKUnmarhsalError) + r.Handlers.Unmarshal.PushBackNamed(s3err.RequestFailureWrapperHandler()) + case opPutObject, opUploadPart: + r.Handlers.Build.PushBack(computeBodyHashes) + // Disabled until #1837 root issue is resolved. + // case opGetObject: + // r.Handlers.Build.PushBack(askForTxEncodingAppendMD5) + // r.Handlers.Unmarshal.PushBack(useMD5ValidationReader) + } +} + +// bucketGetter is an accessor interface to grab the "Bucket" field from +// an S3 type. +type bucketGetter interface { + getBucket() string +} + +// sseCustomerKeyGetter is an accessor interface to grab the "SSECustomerKey" +// field from an S3 type. +type sseCustomerKeyGetter interface { + getSSECustomerKey() string +} + +// copySourceSSECustomerKeyGetter is an accessor interface to grab the +// "CopySourceSSECustomerKey" field from an S3 type. +type copySourceSSECustomerKeyGetter interface { + getCopySourceSSECustomerKey() string +} + +// endpointARNGetter is an accessor interface to grab the +// the field corresponding to an endpoint ARN input. +type endpointARNGetter interface { + getEndpointARN() (arn.Resource, error) + hasEndpointARN() bool +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/doc.go b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/doc.go new file mode 100644 index 00000000000..0def02255ac --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/doc.go @@ -0,0 +1,26 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package s3 provides the client and types for making API +// requests to Amazon Simple Storage Service. +// +// See https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01 for more information on this service. +// +// See s3 package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/ +// +// Using the Client +// +// To contact Amazon Simple Storage Service with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the Amazon Simple Storage Service client S3 for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/#New +package s3 diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/doc_custom.go b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/doc_custom.go new file mode 100644 index 00000000000..7f7aca20859 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/doc_custom.go @@ -0,0 +1,110 @@ +// Upload Managers +// +// The s3manager package's Uploader provides concurrent upload of content to S3 +// by taking advantage of S3's Multipart APIs. The Uploader also supports both +// io.Reader for streaming uploads, and will also take advantage of io.ReadSeeker +// for optimizations if the Body satisfies that type. Once the Uploader instance +// is created you can call Upload concurrently from multiple goroutines safely. +// +// // The session the S3 Uploader will use +// sess := session.Must(session.NewSession()) +// +// // Create an uploader with the session and default options +// uploader := s3manager.NewUploader(sess) +// +// f, err := os.Open(filename) +// if err != nil { +// return fmt.Errorf("failed to open file %q, %v", filename, err) +// } +// +// // Upload the file to S3. +// result, err := uploader.Upload(&s3manager.UploadInput{ +// Bucket: aws.String(myBucket), +// Key: aws.String(myString), +// Body: f, +// }) +// if err != nil { +// return fmt.Errorf("failed to upload file, %v", err) +// } +// fmt.Printf("file uploaded to, %s\n", aws.StringValue(result.Location)) +// +// See the s3manager package's Uploader type documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#Uploader +// +// Download Manager +// +// The s3manager package's Downloader provides concurrently downloading of Objects +// from S3. The Downloader will write S3 Object content with an io.WriterAt. +// Once the Downloader instance is created you can call Download concurrently from +// multiple goroutines safely. +// +// // The session the S3 Downloader will use +// sess := session.Must(session.NewSession()) +// +// // Create a downloader with the session and default options +// downloader := s3manager.NewDownloader(sess) +// +// // Create a file to write the S3 Object contents to. +// f, err := os.Create(filename) +// if err != nil { +// return fmt.Errorf("failed to create file %q, %v", filename, err) +// } +// +// // Write the contents of S3 Object to the file +// n, err := downloader.Download(f, &s3.GetObjectInput{ +// Bucket: aws.String(myBucket), +// Key: aws.String(myString), +// }) +// if err != nil { +// return fmt.Errorf("failed to download file, %v", err) +// } +// fmt.Printf("file downloaded, %d bytes\n", n) +// +// See the s3manager package's Downloader type documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#Downloader +// +// Automatic URI cleaning +// +// Interacting with objects whose keys contain adjacent slashes (e.g. bucketname/foo//bar/objectname) +// requires setting DisableRestProtocolURICleaning to true in the aws.Config struct +// used by the service client. +// +// svc := s3.New(sess, &aws.Config{ +// DisableRestProtocolURICleaning: aws.Bool(true), +// }) +// out, err := svc.GetObject(&s3.GetObjectInput { +// Bucket: aws.String("bucketname"), +// Key: aws.String("//foo//bar//moo"), +// }) +// +// Get Bucket Region +// +// GetBucketRegion will attempt to get the region for a bucket using a region +// hint to determine which AWS partition to perform the query on. Use this utility +// to determine the region a bucket is in. +// +// sess := session.Must(session.NewSession()) +// +// bucket := "my-bucket" +// region, err := s3manager.GetBucketRegion(ctx, sess, bucket, "us-west-2") +// if err != nil { +// if aerr, ok := err.(awserr.Error); ok && aerr.Code() == "NotFound" { +// fmt.Fprintf(os.Stderr, "unable to find bucket %s's region not found\n", bucket) +// } +// return err +// } +// fmt.Printf("Bucket %s is in %s region\n", bucket, region) +// +// See the s3manager package's GetBucketRegion function documentation for more information +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#GetBucketRegion +// +// S3 Crypto Client +// +// The s3crypto package provides the tools to upload and download encrypted +// content from S3. The Encryption and Decryption clients can be used concurrently +// once the client is created. +// +// See the s3crypto package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3crypto/ +// +package s3 diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/endpoint.go b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/endpoint.go new file mode 100644 index 00000000000..3f8854bab2a --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/endpoint.go @@ -0,0 +1,290 @@ +package s3 + +import ( + "fmt" + "github.com/IBM/ibm-cos-sdk-go/aws/awserr" + "net/url" + "strings" + + "github.com/IBM/ibm-cos-sdk-go/aws" + awsarn "github.com/IBM/ibm-cos-sdk-go/aws/arn" + "github.com/IBM/ibm-cos-sdk-go/aws/request" + "github.com/IBM/ibm-cos-sdk-go/internal/s3shared" + "github.com/IBM/ibm-cos-sdk-go/internal/s3shared/arn" +) + +const ( + s3Namespace = "s3" + s3AccessPointNamespace = "s3-accesspoint" + s3ObjectsLambdaNamespace = "s3-object-lambda" + s3OutpostsNamespace = "s3-outposts" +) + +// Used by shapes with members decorated as endpoint ARN. +func parseEndpointARN(v string) (arn.Resource, error) { + return arn.ParseResource(v, accessPointResourceParser) +} + +func accessPointResourceParser(a awsarn.ARN) (arn.Resource, error) { + resParts := arn.SplitResource(a.Resource) + switch resParts[0] { + case "accesspoint": + switch a.Service { + case s3Namespace: + return arn.ParseAccessPointResource(a, resParts[1:]) + case s3ObjectsLambdaNamespace: + return parseS3ObjectLambdaAccessPointResource(a, resParts) + default: + return arn.AccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: fmt.Sprintf("service is not %s or %s", s3Namespace, s3ObjectsLambdaNamespace)} + } + case "outpost": + if a.Service != "s3-outposts" { + return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "service is not s3-outposts"} + } + return parseOutpostAccessPointResource(a, resParts[1:]) + default: + return nil, arn.InvalidARNError{ARN: a, Reason: "unknown resource type"} + } +} + +// parseOutpostAccessPointResource attempts to parse the ARNs resource as an +// outpost access-point resource. +// +// Supported Outpost AccessPoint ARN format: +// - ARN format: arn:{partition}:s3-outposts:{region}:{accountId}:outpost/{outpostId}/accesspoint/{accesspointName} +// - example: arn:aws:s3-outposts:us-west-2:012345678901:outpost/op-1234567890123456/accesspoint/myaccesspoint +// +func parseOutpostAccessPointResource(a awsarn.ARN, resParts []string) (arn.OutpostAccessPointARN, error) { + // outpost accesspoint arn is only valid if service is s3-outposts + if a.Service != "s3-outposts" { + return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "service is not s3-outposts"} + } + + if len(resParts) == 0 { + return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "outpost resource-id not set"} + } + + if len(resParts) < 3 { + return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ + ARN: a, Reason: "access-point resource not set in Outpost ARN", + } + } + + resID := strings.TrimSpace(resParts[0]) + if len(resID) == 0 { + return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "outpost resource-id not set"} + } + + var outpostAccessPointARN = arn.OutpostAccessPointARN{} + switch resParts[1] { + case "accesspoint": + accessPointARN, err := arn.ParseAccessPointResource(a, resParts[2:]) + if err != nil { + return arn.OutpostAccessPointARN{}, err + } + // set access-point arn + outpostAccessPointARN.AccessPointARN = accessPointARN + default: + return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "access-point resource not set in Outpost ARN"} + } + + // set outpost id + outpostAccessPointARN.OutpostID = resID + return outpostAccessPointARN, nil +} + +func parseS3ObjectLambdaAccessPointResource(a awsarn.ARN, resParts []string) (arn.S3ObjectLambdaAccessPointARN, error) { + if a.Service != s3ObjectsLambdaNamespace { + return arn.S3ObjectLambdaAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: fmt.Sprintf("service is not %s", s3ObjectsLambdaNamespace)} + } + + accessPointARN, err := arn.ParseAccessPointResource(a, resParts[1:]) + if err != nil { + return arn.S3ObjectLambdaAccessPointARN{}, err + } + + if len(accessPointARN.Region) == 0 { + return arn.S3ObjectLambdaAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: fmt.Sprintf("%s region not set", s3ObjectsLambdaNamespace)} + } + + return arn.S3ObjectLambdaAccessPointARN{ + AccessPointARN: accessPointARN, + }, nil +} + +func endpointHandler(req *request.Request) { + endpoint, ok := req.Params.(endpointARNGetter) + if !ok || !endpoint.hasEndpointARN() { + updateBucketEndpointFromParams(req) + return + } + + resource, err := endpoint.getEndpointARN() + if err != nil { + req.Error = s3shared.NewInvalidARNError(nil, err) + return + } + + resReq := s3shared.ResourceRequest{ + Resource: resource, + Request: req, + } + + if len(resReq.Request.ClientInfo.PartitionID) != 0 && resReq.IsCrossPartition() { + req.Error = s3shared.NewClientPartitionMismatchError(resource, + req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) + return + } + + if !resReq.AllowCrossRegion() && resReq.IsCrossRegion() { + req.Error = s3shared.NewClientRegionMismatchError(resource, + req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) + return + } + + switch tv := resource.(type) { + case arn.AccessPointARN: + err = updateRequestAccessPointEndpoint(req, tv) + if err != nil { + req.Error = err + } + case arn.S3ObjectLambdaAccessPointARN: + err = updateRequestS3ObjectLambdaAccessPointEndpoint(req, tv) + if err != nil { + req.Error = err + } + case arn.OutpostAccessPointARN: + // outposts does not support FIPS regions + if resReq.ResourceConfiguredForFIPS() { + req.Error = s3shared.NewInvalidARNWithFIPSError(resource, nil) + return + } + + err = updateRequestOutpostAccessPointEndpoint(req, tv) + if err != nil { + req.Error = err + } + default: + req.Error = s3shared.NewInvalidARNError(resource, nil) + } +} + +func updateBucketEndpointFromParams(r *request.Request) { + bucket, ok := bucketNameFromReqParams(r.Params) + if !ok { + // Ignore operation requests if the bucket name was not provided + // if this is an input validation error the validation handler + // will report it. + return + } + updateEndpointForS3Config(r, bucket) +} + +func updateRequestAccessPointEndpoint(req *request.Request, accessPoint arn.AccessPointARN) error { + // Accelerate not supported + if aws.BoolValue(req.Config.S3UseAccelerate) { + return s3shared.NewClientConfiguredForAccelerateError(accessPoint, + req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) + } + + // Ignore the disable host prefix for access points + req.Config.DisableEndpointHostPrefix = aws.Bool(false) + + if err := accessPointEndpointBuilder(accessPoint).build(req); err != nil { + return err + } + + removeBucketFromPath(req.HTTPRequest.URL) + + return nil +} + +func updateRequestS3ObjectLambdaAccessPointEndpoint(req *request.Request, accessPoint arn.S3ObjectLambdaAccessPointARN) error { + // DualStack not supported + if aws.BoolValue(req.Config.UseDualStack) { + return s3shared.NewClientConfiguredForDualStackError(accessPoint, + req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) + } + + // Accelerate not supported + if aws.BoolValue(req.Config.S3UseAccelerate) { + return s3shared.NewClientConfiguredForAccelerateError(accessPoint, + req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) + } + + // Ignore the disable host prefix for access points + req.Config.DisableEndpointHostPrefix = aws.Bool(false) + + if err := s3ObjectLambdaAccessPointEndpointBuilder(accessPoint).build(req); err != nil { + return err + } + + removeBucketFromPath(req.HTTPRequest.URL) + + return nil +} + +func updateRequestOutpostAccessPointEndpoint(req *request.Request, accessPoint arn.OutpostAccessPointARN) error { + // Accelerate not supported + if aws.BoolValue(req.Config.S3UseAccelerate) { + return s3shared.NewClientConfiguredForAccelerateError(accessPoint, + req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) + } + + // Dualstack not supported + if aws.BoolValue(req.Config.UseDualStack) { + return s3shared.NewClientConfiguredForDualStackError(accessPoint, + req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) + } + + // Ignore the disable host prefix for access points + req.Config.DisableEndpointHostPrefix = aws.Bool(false) + + if err := outpostAccessPointEndpointBuilder(accessPoint).build(req); err != nil { + return err + } + + removeBucketFromPath(req.HTTPRequest.URL) + return nil +} + +func removeBucketFromPath(u *url.URL) { + u.Path = strings.Replace(u.Path, "/{Bucket}", "", -1) + if u.Path == "" { + u.Path = "/" + } +} + +func buildWriteGetObjectResponseEndpoint(req *request.Request) { + // DualStack not supported + if aws.BoolValue(req.Config.UseDualStack) { + req.Error = awserr.New("ConfigurationError", "client configured for dualstack but not supported for operation", nil) + return + } + + // Accelerate not supported + if aws.BoolValue(req.Config.S3UseAccelerate) { + req.Error = awserr.New("ConfigurationError", "client configured for accelerate but not supported for operation", nil) + return + } + + signingName := s3ObjectsLambdaNamespace + signingRegion := req.ClientInfo.SigningRegion + + if !hasCustomEndpoint(req) { + endpoint, err := resolveRegionalEndpoint(req, aws.StringValue(req.Config.Region), EndpointsID) + if err != nil { + req.Error = awserr.New(request.ErrCodeSerialization, "failed to resolve endpoint", err) + return + } + signingRegion = endpoint.SigningRegion + + if err = updateRequestEndpoint(req, endpoint.URL); err != nil { + req.Error = err + return + } + updateS3HostPrefixForS3ObjectLambda(req) + } + + redirectSigner(req, signingName, signingRegion) +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/endpoint_builder.go b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/endpoint_builder.go new file mode 100644 index 00000000000..6d44946de4c --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/endpoint_builder.go @@ -0,0 +1,261 @@ +package s3 + +import ( + "net/url" + "strings" + + "github.com/IBM/ibm-cos-sdk-go/aws" + "github.com/IBM/ibm-cos-sdk-go/aws/awserr" + "github.com/IBM/ibm-cos-sdk-go/aws/endpoints" + "github.com/IBM/ibm-cos-sdk-go/aws/request" + "github.com/IBM/ibm-cos-sdk-go/internal/s3shared" + "github.com/IBM/ibm-cos-sdk-go/internal/s3shared/arn" + "github.com/IBM/ibm-cos-sdk-go/private/protocol" +) + +const ( + accessPointPrefixLabel = "accesspoint" + accountIDPrefixLabel = "accountID" + accessPointPrefixTemplate = "{" + accessPointPrefixLabel + "}-{" + accountIDPrefixLabel + "}." + + outpostPrefixLabel = "outpost" + outpostAccessPointPrefixTemplate = accessPointPrefixTemplate + "{" + outpostPrefixLabel + "}." +) + +// hasCustomEndpoint returns true if endpoint is a custom endpoint +func hasCustomEndpoint(r *request.Request) bool { + return len(aws.StringValue(r.Config.Endpoint)) > 0 +} + +// accessPointEndpointBuilder represents the endpoint builder for access point arn +type accessPointEndpointBuilder arn.AccessPointARN + +// build builds the endpoint for corresponding access point arn +// +// For building an endpoint from access point arn, format used is: +// - Access point endpoint format : {accesspointName}-{accountId}.s3-accesspoint.{region}.{dnsSuffix} +// - example : myaccesspoint-012345678901.s3-accesspoint.us-west-2.amazonaws.com +// +// Access Point Endpoint requests are signed using "s3" as signing name. +// +func (a accessPointEndpointBuilder) build(req *request.Request) error { + resolveService := arn.AccessPointARN(a).Service + resolveRegion := arn.AccessPointARN(a).Region + cfgRegion := aws.StringValue(req.Config.Region) + + if s3shared.IsFIPS(cfgRegion) { + if aws.BoolValue(req.Config.S3UseARNRegion) && s3shared.IsCrossRegion(req, resolveRegion) { + // FIPS with cross region is not supported, the SDK must fail + // because there is no well defined method for SDK to construct a + // correct FIPS endpoint. + return s3shared.NewClientConfiguredForCrossRegionFIPSError(arn.AccessPointARN(a), + req.ClientInfo.PartitionID, cfgRegion, nil) + } + resolveRegion = cfgRegion + } + + endpoint, err := resolveRegionalEndpoint(req, resolveRegion, resolveService) + if err != nil { + return s3shared.NewFailedToResolveEndpointError(arn.AccessPointARN(a), + req.ClientInfo.PartitionID, cfgRegion, err) + } + + endpoint.URL = endpoints.AddScheme(endpoint.URL, aws.BoolValue(req.Config.DisableSSL)) + + if !hasCustomEndpoint(req) { + if err = updateRequestEndpoint(req, endpoint.URL); err != nil { + return err + } + + // dual stack provided by endpoint resolver + updateS3HostForS3AccessPoint(req) + } + + protocol.HostPrefixBuilder{ + Prefix: accessPointPrefixTemplate, + LabelsFn: a.hostPrefixLabelValues, + }.Build(req) + + // signer redirection + redirectSigner(req, endpoint.SigningName, endpoint.SigningRegion) + + err = protocol.ValidateEndpointHost(req.Operation.Name, req.HTTPRequest.URL.Host) + if err != nil { + return s3shared.NewInvalidARNError(arn.AccessPointARN(a), err) + } + + return nil +} + +func (a accessPointEndpointBuilder) hostPrefixLabelValues() map[string]string { + return map[string]string{ + accessPointPrefixLabel: arn.AccessPointARN(a).AccessPointName, + accountIDPrefixLabel: arn.AccessPointARN(a).AccountID, + } +} + +// s3ObjectLambdaAccessPointEndpointBuilder represents the endpoint builder for an s3 object lambda access point arn +type s3ObjectLambdaAccessPointEndpointBuilder arn.S3ObjectLambdaAccessPointARN + +// build builds the endpoint for corresponding access point arn +// +// For building an endpoint from access point arn, format used is: +// - Access point endpoint format : {accesspointName}-{accountId}.s3-object-lambda.{region}.{dnsSuffix} +// - example : myaccesspoint-012345678901.s3-object-lambda.us-west-2.amazonaws.com +// +// Access Point Endpoint requests are signed using "s3-object-lambda" as signing name. +// +func (a s3ObjectLambdaAccessPointEndpointBuilder) build(req *request.Request) error { + resolveRegion := arn.S3ObjectLambdaAccessPointARN(a).Region + cfgRegion := aws.StringValue(req.Config.Region) + + if s3shared.IsFIPS(cfgRegion) { + if aws.BoolValue(req.Config.S3UseARNRegion) && s3shared.IsCrossRegion(req, resolveRegion) { + // FIPS with cross region is not supported, the SDK must fail + // because there is no well defined method for SDK to construct a + // correct FIPS endpoint. + return s3shared.NewClientConfiguredForCrossRegionFIPSError(arn.S3ObjectLambdaAccessPointARN(a), + req.ClientInfo.PartitionID, cfgRegion, nil) + } + resolveRegion = cfgRegion + } + + endpoint, err := resolveRegionalEndpoint(req, resolveRegion, EndpointsID) + if err != nil { + return s3shared.NewFailedToResolveEndpointError(arn.S3ObjectLambdaAccessPointARN(a), + req.ClientInfo.PartitionID, cfgRegion, err) + } + + endpoint.URL = endpoints.AddScheme(endpoint.URL, aws.BoolValue(req.Config.DisableSSL)) + + endpoint.SigningName = s3ObjectsLambdaNamespace + + if !hasCustomEndpoint(req) { + if err = updateRequestEndpoint(req, endpoint.URL); err != nil { + return err + } + + updateS3HostPrefixForS3ObjectLambda(req) + } + + protocol.HostPrefixBuilder{ + Prefix: accessPointPrefixTemplate, + LabelsFn: a.hostPrefixLabelValues, + }.Build(req) + + // signer redirection + redirectSigner(req, endpoint.SigningName, endpoint.SigningRegion) + + err = protocol.ValidateEndpointHost(req.Operation.Name, req.HTTPRequest.URL.Host) + if err != nil { + return s3shared.NewInvalidARNError(arn.S3ObjectLambdaAccessPointARN(a), err) + } + + return nil +} + +func (a s3ObjectLambdaAccessPointEndpointBuilder) hostPrefixLabelValues() map[string]string { + return map[string]string{ + accessPointPrefixLabel: arn.S3ObjectLambdaAccessPointARN(a).AccessPointName, + accountIDPrefixLabel: arn.S3ObjectLambdaAccessPointARN(a).AccountID, + } +} + +// outpostAccessPointEndpointBuilder represents the Endpoint builder for outpost access point arn. +type outpostAccessPointEndpointBuilder arn.OutpostAccessPointARN + +// build builds an endpoint corresponding to the outpost access point arn. +// +// For building an endpoint from outpost access point arn, format used is: +// - Outpost access point endpoint format : {accesspointName}-{accountId}.{outpostId}.s3-outposts.{region}.{dnsSuffix} +// - example : myaccesspoint-012345678901.op-01234567890123456.s3-outposts.us-west-2.amazonaws.com +// +// Outpost AccessPoint Endpoint request are signed using "s3-outposts" as signing name. +// +func (o outpostAccessPointEndpointBuilder) build(req *request.Request) error { + resolveRegion := o.Region + resolveService := o.Service + + endpointsID := resolveService + if resolveService == s3OutpostsNamespace { + endpointsID = "s3" + } + + endpoint, err := resolveRegionalEndpoint(req, resolveRegion, endpointsID) + if err != nil { + return s3shared.NewFailedToResolveEndpointError(o, + req.ClientInfo.PartitionID, resolveRegion, err) + } + + endpoint.URL = endpoints.AddScheme(endpoint.URL, aws.BoolValue(req.Config.DisableSSL)) + + if !hasCustomEndpoint(req) { + if err = updateRequestEndpoint(req, endpoint.URL); err != nil { + return err + } + updateHostPrefix(req, endpointsID, resolveService) + } + + protocol.HostPrefixBuilder{ + Prefix: outpostAccessPointPrefixTemplate, + LabelsFn: o.hostPrefixLabelValues, + }.Build(req) + + // set the signing region, name to resolved names from ARN + redirectSigner(req, resolveService, resolveRegion) + + err = protocol.ValidateEndpointHost(req.Operation.Name, req.HTTPRequest.URL.Host) + if err != nil { + return s3shared.NewInvalidARNError(o, err) + } + + return nil +} + +func (o outpostAccessPointEndpointBuilder) hostPrefixLabelValues() map[string]string { + return map[string]string{ + accessPointPrefixLabel: o.AccessPointName, + accountIDPrefixLabel: o.AccountID, + outpostPrefixLabel: o.OutpostID, + } +} + +func resolveRegionalEndpoint(r *request.Request, region string, endpointsID string) (endpoints.ResolvedEndpoint, error) { + return r.Config.EndpointResolver.EndpointFor(endpointsID, region, func(opts *endpoints.Options) { + opts.DisableSSL = aws.BoolValue(r.Config.DisableSSL) + opts.UseDualStack = aws.BoolValue(r.Config.UseDualStack) + opts.S3UsEast1RegionalEndpoint = endpoints.RegionalS3UsEast1Endpoint + }) +} + +func updateRequestEndpoint(r *request.Request, endpoint string) (err error) { + r.HTTPRequest.URL, err = url.Parse(endpoint + r.Operation.HTTPPath) + if err != nil { + return awserr.New(request.ErrCodeSerialization, + "failed to parse endpoint URL", err) + } + + return nil +} + +// redirectSigner sets signing name, signing region for a request +func redirectSigner(req *request.Request, signingName string, signingRegion string) { + req.ClientInfo.SigningName = signingName + req.ClientInfo.SigningRegion = signingRegion +} + +func updateS3HostForS3AccessPoint(req *request.Request) { + updateHostPrefix(req, "s3", s3AccessPointNamespace) +} + +func updateS3HostPrefixForS3ObjectLambda(req *request.Request) { + updateHostPrefix(req, "s3", s3ObjectsLambdaNamespace) +} + +func updateHostPrefix(req *request.Request, oldEndpointPrefix, newEndpointPrefix string) { + host := req.HTTPRequest.URL.Host + if strings.HasPrefix(host, oldEndpointPrefix) { + // replace service hostlabel oldEndpointPrefix to newEndpointPrefix + req.HTTPRequest.URL.Host = newEndpointPrefix + host[len(oldEndpointPrefix):] + } +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/errors.go b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/errors.go new file mode 100644 index 00000000000..6d3e726cf51 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/errors.go @@ -0,0 +1,60 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package s3 + +const ( + + // ErrCodeBucketAlreadyExists for service response error code + // "BucketAlreadyExists". + // + // The requested bucket name is not available. The bucket namespace is shared + // by all users of the system. Select a different name and try again. + ErrCodeBucketAlreadyExists = "BucketAlreadyExists" + + // ErrCodeBucketAlreadyOwnedByYou for service response error code + // "BucketAlreadyOwnedByYou". + // + // The bucket you tried to create already exists, and you own it. Amazon S3 + // returns this error in all AWS Regions except in the North Virginia Region. + // For legacy compatibility, if you re-create an existing bucket that you already + // own in the North Virginia Region, Amazon S3 returns 200 OK and resets the + // bucket access control lists (ACLs). + ErrCodeBucketAlreadyOwnedByYou = "BucketAlreadyOwnedByYou" + + // ErrCodeInvalidObjectState for service response error code + // "InvalidObjectState". + // + // Object is archived and inaccessible until restored. + ErrCodeInvalidObjectState = "InvalidObjectState" + + // ErrCodeNoSuchBucket for service response error code + // "NoSuchBucket". + // + // The specified bucket does not exist. + ErrCodeNoSuchBucket = "NoSuchBucket" + + // ErrCodeNoSuchKey for service response error code + // "NoSuchKey". + // + // The specified key does not exist. + ErrCodeNoSuchKey = "NoSuchKey" + + // ErrCodeNoSuchUpload for service response error code + // "NoSuchUpload". + // + // The specified multipart upload does not exist. + ErrCodeNoSuchUpload = "NoSuchUpload" + + // ErrCodeObjectAlreadyInActiveTierError for service response error code + // "ObjectAlreadyInActiveTierError". + // + // This action is not allowed against this storage tier. + ErrCodeObjectAlreadyInActiveTierError = "ObjectAlreadyInActiveTierError" + + // ErrCodeObjectNotInActiveTierError for service response error code + // "ObjectNotInActiveTierError". + // + // The source object of the COPY action is not in the active tier and is only + // stored in Amazon S3 Glacier. + ErrCodeObjectNotInActiveTierError = "ObjectNotInActiveTierError" +) diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/host_style_bucket.go b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/host_style_bucket.go new file mode 100644 index 00000000000..68094cd741d --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/host_style_bucket.go @@ -0,0 +1,136 @@ +package s3 + +import ( + "fmt" + "net/url" + "regexp" + "strings" + + "github.com/IBM/ibm-cos-sdk-go/aws" + "github.com/IBM/ibm-cos-sdk-go/aws/awserr" + "github.com/IBM/ibm-cos-sdk-go/aws/request" +) + +// an operationBlacklist is a list of operation names that should a +// request handler should not be executed with. +type operationBlacklist []string + +// Continue will return true of the Request's operation name is not +// in the blacklist. False otherwise. +func (b operationBlacklist) Continue(r *request.Request) bool { + for i := 0; i < len(b); i++ { + if b[i] == r.Operation.Name { + return false + } + } + return true +} + +var accelerateOpBlacklist = operationBlacklist{ + opListBuckets, opCreateBucket, opDeleteBucket, +} + +// Automatically add the bucket name to the endpoint domain +// if possible. This style of bucket is valid for all bucket names which are +// DNS compatible and do not contain "." +func updateEndpointForS3Config(r *request.Request, bucketName string) { + forceHostStyle := aws.BoolValue(r.Config.S3ForcePathStyle) + accelerate := aws.BoolValue(r.Config.S3UseAccelerate) + + if accelerate && accelerateOpBlacklist.Continue(r) { + if forceHostStyle { + if r.Config.Logger != nil { + r.Config.Logger.Log("ERROR: aws.Config.S3UseAccelerate is not compatible with aws.Config.S3ForcePathStyle, ignoring S3ForcePathStyle.") + } + } + updateEndpointForAccelerate(r, bucketName) + } else if !forceHostStyle && r.Operation.Name != opGetBucketLocation { + updateEndpointForHostStyle(r, bucketName) + } +} + +func updateEndpointForHostStyle(r *request.Request, bucketName string) { + if !hostCompatibleBucketName(r.HTTPRequest.URL, bucketName) { + // bucket name must be valid to put into the host + return + } + + moveBucketToHost(r.HTTPRequest.URL, bucketName) +} + +var ( + accelElem = []byte("s3-accelerate.dualstack.") +) + +func updateEndpointForAccelerate(r *request.Request, bucketName string) { + if !hostCompatibleBucketName(r.HTTPRequest.URL, bucketName) { + r.Error = awserr.New("InvalidParameterException", + fmt.Sprintf("bucket name %s is not compatible with S3 Accelerate", bucketName), + nil) + return + } + + parts := strings.Split(r.HTTPRequest.URL.Host, ".") + if len(parts) < 3 { + r.Error = awserr.New("InvalidParameterExecption", + fmt.Sprintf("unable to update endpoint host for S3 accelerate, hostname invalid, %s", + r.HTTPRequest.URL.Host), nil) + return + } + + if parts[0] == "s3" || strings.HasPrefix(parts[0], "s3-") { + parts[0] = "s3-accelerate" + } + for i := 1; i+1 < len(parts); i++ { + if parts[i] == aws.StringValue(r.Config.Region) { + parts = append(parts[:i], parts[i+1:]...) + break + } + } + + r.HTTPRequest.URL.Host = strings.Join(parts, ".") + + moveBucketToHost(r.HTTPRequest.URL, bucketName) +} + +// Attempts to retrieve the bucket name from the request input parameters. +// If no bucket is found, or the field is empty "", false will be returned. +func bucketNameFromReqParams(params interface{}) (string, bool) { + if iface, ok := params.(bucketGetter); ok { + b := iface.getBucket() + return b, len(b) > 0 + } + + return "", false +} + +// hostCompatibleBucketName returns true if the request should +// put the bucket in the host. This is false if S3ForcePathStyle is +// explicitly set or if the bucket is not DNS compatible. +func hostCompatibleBucketName(u *url.URL, bucket string) bool { + // Bucket might be DNS compatible but dots in the hostname will fail + // certificate validation, so do not use host-style. + if u.Scheme == "https" && strings.Contains(bucket, ".") { + return false + } + + // if the bucket is DNS compatible + return dnsCompatibleBucketName(bucket) +} + +var reDomain = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`) +var reIPAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`) + +// dnsCompatibleBucketName returns true if the bucket name is DNS compatible. +// Buckets created outside of the classic region MUST be DNS compatible. +func dnsCompatibleBucketName(bucket string) bool { + return reDomain.MatchString(bucket) && + !reIPAddress.MatchString(bucket) && + !strings.Contains(bucket, "..") +} + +// moveBucketToHost moves the bucket name from the URI path to URL host. +func moveBucketToHost(u *url.URL, bucket string) { + u.Host = bucket + "." + u.Host + removeBucketFromPath(u) +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/platform_handlers.go b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/platform_handlers.go new file mode 100644 index 00000000000..0f48d7a7617 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/platform_handlers.go @@ -0,0 +1,8 @@ +// +build !go1.6 + +package s3 + +import "github.com/IBM/ibm-cos-sdk-go/aws/request" + +func platformRequestHandlers(r *request.Request) { +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/platform_handlers_go1.6.go b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/platform_handlers_go1.6.go new file mode 100644 index 00000000000..d74285c3773 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/platform_handlers_go1.6.go @@ -0,0 +1,28 @@ +// +build go1.6 + +package s3 + +import ( + "github.com/IBM/ibm-cos-sdk-go/aws" + "github.com/IBM/ibm-cos-sdk-go/aws/request" +) + +func platformRequestHandlers(r *request.Request) { + if r.Operation.HTTPMethod == "PUT" { + // 100-Continue should only be used on put requests. + r.Handlers.Sign.PushBack(add100Continue) + } +} + +func add100Continue(r *request.Request) { + if aws.BoolValue(r.Config.S3Disable100Continue) { + return + } + if r.HTTPRequest.ContentLength < 1024*1024*2 { + // Ignore requests smaller than 2MB. This helps prevent delaying + // requests unnecessarily. + return + } + + r.HTTPRequest.Header.Set("Expect", "100-Continue") +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/service.go b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/service.go new file mode 100644 index 00000000000..d513dbdd98a --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/service.go @@ -0,0 +1,104 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package s3 + +import ( + "github.com/IBM/ibm-cos-sdk-go/aws" + "github.com/IBM/ibm-cos-sdk-go/aws/client" + "github.com/IBM/ibm-cos-sdk-go/aws/client/metadata" + "github.com/IBM/ibm-cos-sdk-go/aws/request" + "github.com/IBM/ibm-cos-sdk-go/aws/signer" + "github.com/IBM/ibm-cos-sdk-go/aws/signer/v4" + "github.com/IBM/ibm-cos-sdk-go/private/protocol/restxml" +) + +// S3 provides the API operation methods for making requests to +// Amazon Simple Storage Service. See this package's package overview docs +// for details on the service. +// +// S3 methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type S3 struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "s3" // Name of service. + EndpointsID = ServiceName // ID to lookup a service endpoint with. + ServiceID = "S3" // ServiceID is a unique identifier of a specific service. +) + +// New creates a new instance of the S3 client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// mySession := session.Must(session.NewSession()) +// +// // Create a S3 client from just a session. +// svc := s3.New(mySession) +// +// // Create a S3 client with additional configuration +// svc := s3.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *S3 { + c := p.ClientConfig(EndpointsID, cfgs...) + if c.SigningNameDerived || len(c.SigningName) == 0 { + c.SigningName = "s3" + } + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *S3 { + svc := &S3{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + PartitionID: partitionID, + Endpoint: endpoint, + APIVersion: "2006-03-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(signer.CustomRequestSignerRouter(func(s *v4.Signer) { + s.DisableURIPathEscaping = true + })) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a S3 operation and runs any +// custom request initialization. +func (c *S3) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/sse.go b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/sse.go new file mode 100644 index 00000000000..f364a139562 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/sse.go @@ -0,0 +1,84 @@ +package s3 + +import ( + "crypto/md5" + "encoding/base64" + "net/http" + + "github.com/IBM/ibm-cos-sdk-go/aws/awserr" + "github.com/IBM/ibm-cos-sdk-go/aws/request" +) + +var errSSERequiresSSL = awserr.New("ConfigError", "cannot send SSE keys over HTTP.", nil) + +func validateSSERequiresSSL(r *request.Request) { + if r.HTTPRequest.URL.Scheme == "https" { + return + } + + if iface, ok := r.Params.(sseCustomerKeyGetter); ok { + if len(iface.getSSECustomerKey()) > 0 { + r.Error = errSSERequiresSSL + return + } + } + + if iface, ok := r.Params.(copySourceSSECustomerKeyGetter); ok { + if len(iface.getCopySourceSSECustomerKey()) > 0 { + r.Error = errSSERequiresSSL + return + } + } +} + +const ( + sseKeyHeader = "x-amz-server-side-encryption-customer-key" + sseKeyMD5Header = sseKeyHeader + "-md5" +) + +func computeSSEKeyMD5(r *request.Request) { + var key string + if g, ok := r.Params.(sseCustomerKeyGetter); ok { + key = g.getSSECustomerKey() + } + + computeKeyMD5(sseKeyHeader, sseKeyMD5Header, key, r.HTTPRequest) +} + +const ( + copySrcSSEKeyHeader = "x-amz-copy-source-server-side-encryption-customer-key" + copySrcSSEKeyMD5Header = copySrcSSEKeyHeader + "-md5" +) + +func computeCopySourceSSEKeyMD5(r *request.Request) { + var key string + if g, ok := r.Params.(copySourceSSECustomerKeyGetter); ok { + key = g.getCopySourceSSECustomerKey() + } + + computeKeyMD5(copySrcSSEKeyHeader, copySrcSSEKeyMD5Header, key, r.HTTPRequest) +} + +func computeKeyMD5(keyHeader, keyMD5Header, key string, r *http.Request) { + if len(key) == 0 { + // Backwards compatiablity where user just set the header value instead + // of using the API parameter, or setting the header value for an + // operation without the parameters modeled. + key = r.Header.Get(keyHeader) + if len(key) == 0 { + return + } + + // In backwards compatible, the header's value is not base64 encoded, + // and needs to be encoded and updated by the SDK's customizations. + b64Key := base64.StdEncoding.EncodeToString([]byte(key)) + r.Header.Set(keyHeader, b64Key) + } + + // Only update Key's MD5 if not already set. + if len(r.Header.Get(keyMD5Header)) == 0 { + sum := md5.Sum([]byte(key)) + keyMD5 := base64.StdEncoding.EncodeToString(sum[:]) + r.Header.Set(keyMD5Header, keyMD5) + } +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/statusok_error.go b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/statusok_error.go new file mode 100644 index 00000000000..0eb64163380 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/statusok_error.go @@ -0,0 +1,42 @@ +package s3 + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" + + "github.com/IBM/ibm-cos-sdk-go/aws/awserr" + "github.com/IBM/ibm-cos-sdk-go/aws/request" + "github.com/IBM/ibm-cos-sdk-go/internal/sdkio" +) + +func copyMultipartStatusOKUnmarhsalError(r *request.Request) { + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, "unable to read response body", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + body := bytes.NewReader(b) + r.HTTPResponse.Body = ioutil.NopCloser(body) + defer body.Seek(0, sdkio.SeekStart) + + unmarshalError(r) + if err, ok := r.Error.(awserr.Error); ok && err != nil { + if err.Code() == request.ErrCodeSerialization && + err.OrigErr() != io.EOF { + r.Error = nil + return + } + // if empty payload + if err.OrigErr() == io.EOF { + r.HTTPResponse.StatusCode = http.StatusInternalServerError + } else { + r.HTTPResponse.StatusCode = http.StatusServiceUnavailable + } + } +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/unmarshal_error.go b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/unmarshal_error.go new file mode 100644 index 00000000000..782bcc33f4b --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/unmarshal_error.go @@ -0,0 +1,114 @@ +package s3 + +import ( + "bytes" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net/http" + "strings" + + "github.com/IBM/ibm-cos-sdk-go/aws" + "github.com/IBM/ibm-cos-sdk-go/aws/awserr" + "github.com/IBM/ibm-cos-sdk-go/aws/request" + "github.com/IBM/ibm-cos-sdk-go/private/protocol/xml/xmlutil" +) + +type xmlErrorResponse struct { + XMLName xml.Name `xml:"Error"` + Code string `xml:"Code"` + Message string `xml:"Message"` +} + +func unmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + defer io.Copy(ioutil.Discard, r.HTTPResponse.Body) + + // Bucket exists in a different region, and request needs + // to be made to the correct region. + if r.HTTPResponse.StatusCode == http.StatusMovedPermanently { + msg := fmt.Sprintf( + "incorrect region, the bucket is not in '%s' region at endpoint '%s'", + aws.StringValue(r.Config.Region), + aws.StringValue(r.Config.Endpoint), + ) + if v := r.HTTPResponse.Header.Get("x-amz-bucket-region"); len(v) != 0 { + msg += fmt.Sprintf(", bucket is in '%s' region", v) + } + r.Error = awserr.NewRequestFailure( + awserr.New("BucketRegionError", msg, nil), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + // Attempt to parse error from body if it is known + var errResp xmlErrorResponse + var err error + if r.HTTPResponse.StatusCode >= 200 && r.HTTPResponse.StatusCode < 300 { + err = s3unmarshalXMLError(&errResp, r.HTTPResponse.Body) + } else { + err = xmlutil.UnmarshalXMLError(&errResp, r.HTTPResponse.Body) + } + + if err != nil { + var errorMsg string + if err == io.EOF { + errorMsg = "empty response payload" + } else { + errorMsg = "failed to unmarshal error message" + } + + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + errorMsg, err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + // Fallback to status code converted to message if still no error code + if len(errResp.Code) == 0 { + statusText := http.StatusText(r.HTTPResponse.StatusCode) + errResp.Code = strings.Replace(statusText, " ", "", -1) + errResp.Message = statusText + } + + r.Error = awserr.NewRequestFailure( + awserr.New(errResp.Code, errResp.Message, err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) +} + +// A RequestFailure provides access to the S3 Request ID and Host ID values +// returned from API operation errors. Getting the error as a string will +// return the formated error with the same information as awserr.RequestFailure, +// while also adding the HostID value from the response. +type RequestFailure interface { + awserr.RequestFailure + + // Host ID is the S3 Host ID needed for debug, and contacting support + HostID() string +} + +// s3unmarshalXMLError is s3 specific xml error unmarshaler +// for 200 OK errors and response payloads. +// This function differs from the xmlUtil.UnmarshalXMLError +// func. It does not ignore the EOF error and passes it up. +// Related to bug fix for `s3 200 OK response with empty payload` +func s3unmarshalXMLError(v interface{}, stream io.Reader) error { + var errBuf bytes.Buffer + body := io.TeeReader(stream, &errBuf) + + err := xml.NewDecoder(body).Decode(v) + if err != nil && err != io.EOF { + return awserr.NewUnmarshalError(err, + "failed to unmarshal error message", errBuf.Bytes()) + } + + return err +} diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/waiters.go b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/waiters.go new file mode 100644 index 00000000000..f6fa0cf6078 --- /dev/null +++ b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/waiters.go @@ -0,0 +1,214 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package s3 + +import ( + "time" + + "github.com/IBM/ibm-cos-sdk-go/aws" + "github.com/IBM/ibm-cos-sdk-go/aws/request" +) + +// WaitUntilBucketExists uses the Amazon S3 API operation +// HeadBucket to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *S3) WaitUntilBucketExists(input *HeadBucketInput) error { + return c.WaitUntilBucketExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilBucketExistsWithContext is an extended version of WaitUntilBucketExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) WaitUntilBucketExistsWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilBucketExists", + MaxAttempts: 20, + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 200, + }, + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 301, + }, + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 403, + }, + { + State: request.RetryWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 404, + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *HeadBucketInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.HeadBucketRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilBucketNotExists uses the Amazon S3 API operation +// HeadBucket to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *S3) WaitUntilBucketNotExists(input *HeadBucketInput) error { + return c.WaitUntilBucketNotExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilBucketNotExistsWithContext is an extended version of WaitUntilBucketNotExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) WaitUntilBucketNotExistsWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilBucketNotExists", + MaxAttempts: 20, + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 404, + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *HeadBucketInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.HeadBucketRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilObjectExists uses the Amazon S3 API operation +// HeadObject to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *S3) WaitUntilObjectExists(input *HeadObjectInput) error { + return c.WaitUntilObjectExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilObjectExistsWithContext is an extended version of WaitUntilObjectExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) WaitUntilObjectExistsWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilObjectExists", + MaxAttempts: 20, + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 200, + }, + { + State: request.RetryWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 404, + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *HeadObjectInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.HeadObjectRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilObjectNotExists uses the Amazon S3 API operation +// HeadObject to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *S3) WaitUntilObjectNotExists(input *HeadObjectInput) error { + return c.WaitUntilObjectNotExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilObjectNotExistsWithContext is an extended version of WaitUntilObjectNotExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) WaitUntilObjectNotExistsWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilObjectNotExists", + MaxAttempts: 20, + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 404, + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *HeadObjectInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.HeadObjectRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} diff --git a/vendor/github.com/IBM/keyprotect-go-client/.gitignore b/vendor/github.com/IBM/keyprotect-go-client/.gitignore new file mode 100644 index 00000000000..54873995963 --- /dev/null +++ b/vendor/github.com/IBM/keyprotect-go-client/.gitignore @@ -0,0 +1,44 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test +vendor + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +.DS_Store + +cover.* + +# Eclipse +.project + +# Visual Studio Code +.vscode +/*.covtmp +**/coverage +**/coverage.htmlvendor +**/coverage.txt +**/coverage.html +**/cover.html +**/coverage.out +**/coverage.tmp +*.coverprofile diff --git a/vendor/github.com/IBM/keyprotect-go-client/.travis.yml b/vendor/github.com/IBM/keyprotect-go-client/.travis.yml new file mode 100644 index 00000000000..f269b7b4b50 --- /dev/null +++ b/vendor/github.com/IBM/keyprotect-go-client/.travis.yml @@ -0,0 +1,25 @@ +language: go +dist: xenial + +go: + - 1.14.x + - 1.15.x + +env: + - GO111MODULE=on + +before_script: + - GO111MODULE=off go get -u github.com/haya14busa/goverage + +install: + - go build ./... + +script: + - $GOPATH/bin/goverage -v -race -coverprofile=cover.out $(go list ./... | grep -v '/vendor|/scripts') + - go tool cover -func=cover.out + - go tool cover -html=cover.out -o=cover.html + +# FIXME: these scripts don't exist in this repo +# after_success: +# - ./scripts/calculateCoverage.sh +# - ./scripts/publishCoverage.sh diff --git a/vendor/github.com/IBM/keyprotect-go-client/CONTRIBUTING.md b/vendor/github.com/IBM/keyprotect-go-client/CONTRIBUTING.md new file mode 100644 index 00000000000..3f9ac45dc28 --- /dev/null +++ b/vendor/github.com/IBM/keyprotect-go-client/CONTRIBUTING.md @@ -0,0 +1,49 @@ +# Contributing to keyprotect-go-client + +`keyprotect-go-client` is open for code perusal and contributions. We welcome contributions in the form of feedback, bugs, or patches. + +## Bugs and Feature Requests + +If you find something that does not work as expected or would like to see a new feature added, +please open a [Github Issue](https://github.com/IBM/keyprotect-go-client/issues) + +## Pull Requests + +For your pull request to be merged, it must meet the criteria of a "correct patch", and also +be fully reviewed and approved by two Maintainer level contributors. + +A correct patch is defined as the following: + + - If the patch fixes a bug, it must be the simplest way to fix the issue + - Your patch must come with unit tests + - Unit tests (CI job) must pass + - New feature function should have integration tests as well + + +# Development + +## Compiling the package + +```sh +go build ./... +``` + +The client relies on go modules to pull in required dependencies at build time. + +https://github.com/golang/go/wiki/Modules#how-to-use-modules + +## Running the test cases + +Using `go test` + +```sh +go test -v -race ./... +``` + +The test cases are also runnable through `make` + +```sh +make test +# or +make test-integration +``` diff --git a/vendor/github.com/IBM/keyprotect-go-client/LICENSE b/vendor/github.com/IBM/keyprotect-go-client/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/vendor/github.com/IBM/keyprotect-go-client/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/IBM/keyprotect-go-client/Makefile b/vendor/github.com/IBM/keyprotect-go-client/Makefile new file mode 100644 index 00000000000..6e166501cc2 --- /dev/null +++ b/vendor/github.com/IBM/keyprotect-go-client/Makefile @@ -0,0 +1,8 @@ + +.PHONY: test test-integration + +test: + go test -v -race ./... + +test-integration: + go test -v -tags=integration ./... diff --git a/vendor/github.com/IBM/keyprotect-go-client/README.md b/vendor/github.com/IBM/keyprotect-go-client/README.md new file mode 100644 index 00000000000..2fe0bd79bbe --- /dev/null +++ b/vendor/github.com/IBM/keyprotect-go-client/README.md @@ -0,0 +1,173 @@ +# keyprotect-go-client + +[![Build Status](https://travis-ci.com/IBM/keyprotect-go-client.svg?branch=master)](https://travis-ci.com/IBM/keyprotect-go-client) +[![GoDoc](https://godoc.org/github.com/keyprotect-go-client?status.svg)](https://godoc.org/github.com/IBM/keyprotect-go-client) + +keyprotect-go-client is a Go client library for interacting with the IBM KeyProtect service. + +* [Questions / Support](#questions--support) +* [Usage](#usage) + * [Migrating](#migrating) + * [Authentication](#authentication) + * [Finding Instance UUIDs](#finding-a-keyprotect-service-instances-uuid) + * [Examples](#examples) +* [Contributing](/CONTRIBUTING.md) + +## Questions / Support + +There are many channels for asking questions about KeyProtect and this client. + +- Ask a question on Stackoverflow and tag it with `key-protect` and `ibm-cloud` +- Open a [Github Issue](https://github.com/IBM/keyprotect-go-client/issues) +- If you work at IBM and have access to the internal Slack, you can join the `#key-protect` channel and ask there. + +## Usage + +This client expects that you have an existing IBM Cloud Key Protect Service Instance. To get started, visit the [IBM KeyProtect Catalog Page](https://cloud.ibm.com/catalog/services/key-protect). + +Build a client with `ClientConfig` and `New`, then use the client to do some operations. +```go +import "github.com/IBM/keyprotect-go-client" + +// Use your IAM API Key and your KeyProtect Service Instance GUID/UUID to create a ClientConfig +cc := kp.ClientConfig{ + BaseURL: kp.DefaultBaseURL, + APIKey: "......", + InstanceID: "1234abcd-906d-438a-8a68-deadbeef1a2b3", +} + +// Build a new client from the config +client := kp.New(cc, kp.DefaultTransport()) + +// List keys in your KeyProtect instance +keys, err := client.GetKeys(context.Background(), 0, 0) +``` + +### Migrating + +For users of the original `key-protect-client` that is now deprecated, this library is a drop in replacement. Updating the package reference to `github.com/IBM/keyprotect-go-client` should be the only change needed. If you are worried about new incompatible changes, version `v0.3.1` of `key-protect-client` is equivalent to version `v0.3.3` of `keyprotect-go-client`, so pinning `v0.3.3` of the new library should be sufficient to pull from the new repo with no new functional changes. + +## Authentication + +The KeyProtect client requires a valid [IAM API Key](https://cloud.ibm.com/docs/iam?topic=iam-userapikey#create_user_key) that is passed via the `APIKey` field in the `ClientConfig`. The client will call IAM to get an access token for that API key, caches the access token, and reuses that token on subsequent calls. If the access token is expired, the client will call IAM to get a new access token. + +Alternatively, you may also inject your own tokens during runtime. When using your own tokens, it's the responsibilty of the caller to ensure the access token is valid and is not expired. You can specify the access token in either the `ClientConfig` structure or on the context (see below.) + +To specify authorization token on the context: + +```go +// Create a ClientConfig and Client like before, but without an APIKey +cc := kp.ClientConfig{ + BaseURL: kp.DefaultBaseURL, + InstanceID: "1234abcd-906d-438a-8a68-deadbeef1a2b3", +} +client := kp.New(cc, kp.DefaultTransport()) + +// Use NewContextWithAuth to add your token into the context +ctx := context.Background() +ctx = kp.NewContextWithAuth(ctx, "Bearer ABCDEF123456....") + +// List keys with our injected token via the context +keys, err := api.GetKeys(ctx, 0, 0) +``` + +For information on IAM API Keys and tokens please refer to the [IAM docs](https://cloud.ibm.com/docs/iam?topic=iam-manapikey) + +## Finding a KeyProtect Service Instance's UUID + +The client requires a valid UUID that identifies your KeyProtect Service Instance to be able to interact with your key data in the instance. An instance is somewhat like a folder or directory of keys; you can have many of them per account, but the keys they contain are separate and cannot be shared between instances. + +The [IBM Cloud CLI](https://cloud.ibm.com/docs/cli?topic=cloud-cli-getting-started) can be used to find the UUID for your KeyProtect instance. + +```sh +$ ic resource service-instances +OK +Name Location State Type +Key Protect-private us-south active service_instance +Key Protect-abc123 us-east active service_instance +``` + +Find the name of your KeyProtect instance as you created it, and the use the client to get its details. The Instance ID is the GUID field, or if you do not see GUID, it will be the last part of the CRN. For example: + +```sh +$ ic resource service-instance "Key Protect-private" +OK + +Name: Key Protect-private +ID: crn:v1:bluemix:public:kms:us-south:a/.......:1234abcd-906d-438a-8a68-deadbeef1a2b3:: +GUID: 1234abcd-906d-438a-8a68-deadbeef1a2b3 +``` + +## Examples + +### Generating a root key (CRK) + +```go +// Create a root key named MyRootKey with no expiration +key, err := client.CreateRootKey(ctx, "MyRootKey", nil) +if err != nil { + fmt.Println(err) +} +fmt.Println(key.ID, key.Name) + +crkID := key.ID +``` + +### Wrapping and Unwrapping a DEK using a specific Root Key. + +```go +myDEK := []byte{"thisisadataencryptionkey"} +// Do some encryption with myDEK +// Wrap the DEK so we can safely store it +wrappedDEK, err := client.Wrap(ctx, crkID, myDEK, nil) + + +// Unwrap the DEK +dek, err := client.Unwrap(ctx, crkID, wrappedDEK, nil) +// Do some encryption/decryption using the DEK +// Discard the DEK +dek = nil +``` + +Note you can also pass additional authentication data (AAD) to wrap and unwrap calls +to provide another level of protection for your DEK. The AAD is a string array with +each element up to 255 chars. For example: + +```go +myAAD := []string{"First aad string", "second aad string", "third aad string"} +myDEK := []byte{"thisisadataencryptionkey"} +// Do some encryption with myDEK +// Wrap the DEK so we can safely store it +wrappedDEK, err := client.Wrap(ctx, crkID, myDEK, &myAAD) + + +// Unwrap the DEK +dek, err := client.Unwrap(ctx, crkID, wrappedDEK, &myAAD) +// Do some encryption/decryption using the DEK +// Discard the DEK +dek = nil +``` + +Have key protect create a DEK for you: + +```go +dek, wrappedDek, err := client.WrapCreateDEK(ctx, crkID, nil) +// Do some encrypt/decrypt with the dek +// Discard the DEK +dek = nil + +// Save the wrapped DEK for later. Use Unwrap to use it. +``` + +Can also specify AAD: + +```go +myAAD := []string{"First aad string", "second aad string", "third aad string"} +dek, wrappedDek, err := client.WrapCreateDEK(ctx, crkID, &myAAD) +// Do some encrypt/decrypt with the dek +// Discard the DEK +dek = nil + +// Save the wrapped DEK for later. Call Unwrap to use it, make +// sure to specify the same AAD. +``` diff --git a/vendor/github.com/IBM/keyprotect-go-client/go.mod b/vendor/github.com/IBM/keyprotect-go-client/go.mod new file mode 100644 index 00000000000..0914be4b56f --- /dev/null +++ b/vendor/github.com/IBM/keyprotect-go-client/go.mod @@ -0,0 +1,10 @@ +module github.com/IBM/keyprotect-go-client + +go 1.12 + +require ( + github.com/google/uuid v1.1.1 + github.com/hashicorp/go-retryablehttp v0.6.2 + github.com/stretchr/testify v1.7.0 + gopkg.in/h2non/gock.v1 v1.0.15 +) diff --git a/vendor/github.com/IBM/keyprotect-go-client/go.sum b/vendor/github.com/IBM/keyprotect-go-client/go.sum new file mode 100644 index 00000000000..8a8a25ac7cf --- /dev/null +++ b/vendor/github.com/IBM/keyprotect-go-client/go.sum @@ -0,0 +1,29 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 h1:2VTzZjLZBgl62/EtslCrtky5vbi9dd7HrQPQIx6wqiw= +github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542/go.mod h1:Ow0tF8D4Kplbc8s8sSb3V2oUCygFHVp8gC3Dn6U4MNI= +github.com/hashicorp/go-cleanhttp v0.5.0 h1:wvCrVc9TjDls6+YGAF2hAifE1E5U1+b4tH6KdvN3Gig= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI= +github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-retryablehttp v0.6.0 h1:uuJM3gyY2D+1naT0kkGVZRQLf3AaXcyuxYY1hYSkqDs= +github.com/hashicorp/go-retryablehttp v0.6.0/go.mod h1:EM1UNuajSSS84RdcM8GK0q0UIPtYqPHVLu9oE2Wm8FY= +github.com/hashicorp/go-retryablehttp v0.6.2 h1:bHM2aVXwBtBJWxHtkSrWuI4umABCUczs52eiUS9nSiw= +github.com/hashicorp/go-retryablehttp v0.6.2/go.mod h1:gEx6HMUGxYYhJScX7W1Il64m6cc2C1mDaW3NQ9sY1FY= +github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32 h1:W6apQkHrMkS0Muv8G/TipAy/FJl/rCYT0+EuS8+Z0z4= +github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32/go.mod h1:9wM+0iRr9ahx58uYLpLIr5fm8diHn0JbqRycJi6w0Ms= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +gopkg.in/h2non/gock.v1 v1.0.15 h1:SzLqcIlb/fDfg7UvukMpNcWsu7sI5tWwL+KCATZqks0= +gopkg.in/h2non/gock.v1 v1.0.15/go.mod h1:sX4zAkdYX1TRGJ2JY156cFspQn4yRWn6p9EMdODlynE= diff --git a/vendor/github.com/IBM/keyprotect-go-client/iam/iam.go b/vendor/github.com/IBM/keyprotect-go-client/iam/iam.go new file mode 100644 index 00000000000..25407b4db7e --- /dev/null +++ b/vendor/github.com/IBM/keyprotect-go-client/iam/iam.go @@ -0,0 +1,252 @@ +// Copyright 2019 IBM Corp. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package iam + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "sync" + "time" + + rhttp "github.com/hashicorp/go-retryablehttp" +) + +// IAMTokenURL is the global endpoint URL for the IAM token service +const IAMTokenURL = "https://iam.cloud.ibm.com/oidc/token" + +var ( + // RetryWaitMax is the maximum time to wait between HTTP retries + RetryWaitMax = 30 * time.Second + + // RetryMax is the max number of attempts to retry for failed HTTP requests + RetryMax = 4 +) + +type TokenSource interface { + Token() (*Token, error) +} + +// CredentialFromAPIKey returns an IAMTokenSource that requests access tokens +// from the default token endpoint using an IAM API Key as the authentication mechanism +func CredentialFromAPIKey(apiKey string) *IAMTokenSource { + return &IAMTokenSource{ + TokenURL: IAMTokenURL, + APIKey: apiKey, + } +} + +// Token represents an IAM credential used to authorize requests to another service. +type Token struct { + AccessToken string + RefreshToken string + TokenType string + Expiry time.Time +} + +func (t *Token) Valid() bool { + if t == nil || t.AccessToken == "" { + return false + } + + if t.Expiry.Before(time.Now()) { + return false + } + + return true +} + +// jsonToken is for deserializing the token from the response body +type jsonToken struct { + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + TokenType string `json:"token_type"` + ExpiresIn int32 `json:"expires_in"` +} + +// getExpireTime uses local time and the ExpiresIn offset to calculate an +// expiration time based off our local clock, which is more accurate for +// us to determine when it expires relative to our client. +// we also pad the time a bit, because long running requests can fail +// mid-request if we send a soon-to-expire token along +func (jt jsonToken) getExpireTime() time.Time { + // set the expiration time for 1 min less than the + // actual time to prevent timeout errors + return time.Now().Add(time.Duration(jt.ExpiresIn-60) * time.Second) +} + +// IAMTokenSource is used to retrieve access tokens from the IAM token service. +// Most will probably want to use CredentialFromAPIKey to build an IAMTokenSource type, +// but it can also be created directly if one wishes to override the default IAM +// endpoint by setting TokenURL +type IAMTokenSource struct { + TokenURL string + APIKey string + + mu sync.Mutex + t *Token +} + +// Token requests an access token from IAM using the IAMTokenSource config. +func (ts *IAMTokenSource) Token() (*Token, error) { + ts.mu.Lock() + defer ts.mu.Unlock() + + if ts.t.Valid() { + return ts.t, nil + } + + if ts.APIKey == "" { + return nil, errors.New("iam: APIKey is empty") + } + + v := url.Values{} + v.Set("grant_type", "urn:ibm:params:oauth:grant-type:apikey") + v.Set("apikey", ts.APIKey) + reqBody := []byte(v.Encode()) + + u, err := url.Parse(ts.TokenURL) + if err != nil { + return nil, err + } + + // NewRequest will calculate Content-Length if we pass it a bytes.Buffer + // instead of a io.Reader type + bodyBuf := bytes.NewBuffer(reqBody) + request, err := rhttp.NewRequest("POST", u.String(), bodyBuf) + if err != nil { + return nil, err + } + + request.Header.Set("Content-Type", "application/x-www-form-urlencoded") + request.Header.Set("Accept", "application/json") + + // use hashicorp retryable client with max wait time and attempts from module vars + client := rhttp.NewClient() + client.Logger = nil + client.RetryWaitMax = RetryWaitMax + client.RetryMax = RetryMax + client.ErrorHandler = rhttp.PassthroughErrorHandler + + // need to use the go http DefaultTransport for tests to override with stubs (gock HTTP stubbing) + client.HTTPClient = &http.Client{ + Timeout: time.Duration(60) * time.Second, + } + + // this is the DefaultRetryPolicy but with retry on 429s as well + client.CheckRetry = func(ctx context.Context, resp *http.Response, err error) (bool, error) { + // do not retry on context.Canceled or context.DeadlineExceeded + if ctx.Err() != nil { + return false, ctx.Err() + } + + if err != nil { + return true, err + } + + // retry on connection error (code == 0), all 500s except 501, and 429s + if resp.StatusCode == 0 || (resp.StatusCode >= 500 && resp.StatusCode != 501) || resp.StatusCode == 429 { + return true, nil + } + + return false, nil + } + + resp, err := client.Do(request) + if err != nil { + return nil, err + } + + buf := new(bytes.Buffer) + if _, err := buf.ReadFrom(resp.Body); err != nil { + return nil, err + } + resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + var iamErr Error + if err = json.Unmarshal(buf.Bytes(), &iamErr); err != nil { + return nil, err + } + iamErr.HTTPResponse = resp + return nil, iamErr + } + + var jToken jsonToken + if err = json.Unmarshal(buf.Bytes(), &jToken); err != nil { + return nil, err + } + + token := &Token{ + AccessToken: jToken.AccessToken, + RefreshToken: jToken.RefreshToken, + TokenType: jToken.TokenType, + Expiry: jToken.getExpireTime(), + } + + ts.t = token + + return token, nil +} + +// Error is a type to hold error information that the IAM services sends back +// when a request cannot be completed. ErrorCode, ErrorMessage, and Context.RequestID +// are probably the most useful fields. IAM will most likely ask you for the RequestID +// if you ask for support. +// +// Also of note is that the http.Response object is included in HTTPResponse for +// error handling at the higher application levels. +type Error struct { + ErrorCode string `json:"errorCode"` + ErrorMessage string `json:"errorMessage"` + Context *iamRequestContext `json:"context"` + HTTPResponse *http.Response +} + +type iamRequestContext struct { + ClientIP string `json:"clientIp"` + ClusterName string `json:"clusterName"` + Host string `json:"host"` + InstanceID string `json:"instanceId"` + RequestID string `json:"requestId"` + RequestType string `json:"requestType"` + ElapsedTime string `json:"elapsedTime"` + StartTime string `json:"startTime"` + EndTime string `json:"endTime"` + ThreadID string `json:"threadId"` + URL string `json:"url"` + UserAgent string `json:"userAgent"` + Locale string `json:"locale"` +} + +func (ie Error) Error() string { + + reqId := "" + if ie.Context != nil { + reqId = ie.Context.RequestID + } + + statusCode := 0 + if ie.HTTPResponse != nil { + statusCode = ie.HTTPResponse.StatusCode + } + + return fmt.Sprintf("iam.Error: HTTP %d requestId='%s' message='%s %s'", + statusCode, reqId, ie.ErrorCode, ie.ErrorMessage) +} diff --git a/vendor/github.com/IBM/keyprotect-go-client/import_token.go b/vendor/github.com/IBM/keyprotect-go-client/import_token.go new file mode 100644 index 00000000000..fcdd0859946 --- /dev/null +++ b/vendor/github.com/IBM/keyprotect-go-client/import_token.go @@ -0,0 +1,219 @@ +// Copyright 2019 IBM Corp. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kp + +import ( + "bytes" + "context" + "crypto/aes" + "crypto/cipher" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/sha256" + "crypto/x509" + "encoding/base64" + "encoding/pem" + "fmt" + "hash" + "io" + "time" +) + +const importTokenEncAlgo = "RSAES_OAEP_SHA_256" // currently the only one supported + +// ImportTokenCreateRequest represents request parameters for creating a +// ImportToken. +type ImportTokenCreateRequest struct { + MaxAllowedRetrievals int `json:"maxAllowedRetrievals,omitempty"` + ExpiresInSeconds int `json:"expiration,omitempty"` +} + +// ImportTokenKeyResponse represents the response body for various ImportToken +// API calls. +type ImportTokenKeyResponse struct { + ID string `json:"id"` + CreationDate *time.Time `json:"creationDate"` + ExpirationDate *time.Time `json:"expirationDate"` + Payload string `json:"payload"` + Nonce string `json:"nonce"` +} + +// ImportTokenMetadata represents the metadata of a ImportToken. +type ImportTokenMetadata struct { + ID string `json:"id"` + CreationDate *time.Time `json:"creationDate"` + ExpirationDate *time.Time `json:"expirationDate"` + MaxAllowedRetrievals int `json:"maxAllowedRetrievals"` + RemainingRetrievals int `json:"remainingRetrievals"` +} + +// CreateImportToken creates a key ImportToken. +func (c *Client) CreateImportToken(ctx context.Context, expiration, maxAllowedRetrievals int) (*ImportTokenMetadata, error) { + reqBody := ImportTokenCreateRequest{ + MaxAllowedRetrievals: maxAllowedRetrievals, + ExpiresInSeconds: expiration, + } + + req, err := c.newRequest("POST", "import_token", &reqBody) + if err != nil { + return nil, err + } + + res := ImportTokenMetadata{} + if _, err := c.do(ctx, req, &res); err != nil { + return nil, err + } + + return &res, nil +} + +// GetImportTokenTransportKey retrieves the ImportToken transport key. +func (c *Client) GetImportTokenTransportKey(ctx context.Context) (*ImportTokenKeyResponse, error) { + res := ImportTokenKeyResponse{} + + req, err := c.newRequest("GET", "import_token", nil) + if err != nil { + return nil, err + } + + if _, err := c.do(ctx, req, &res); err != nil { + return nil, err + } + + return &res, nil +} + +// EncryptNonce will wrap the KP generated nonce with the users key-material +func EncryptNonce(key, value, iv string) (string, string, error) { + return encryptNonce(key, value, iv) +} + +// EncryptKey will encrypt the user key-material with the public key from key protect +func EncryptKey(key, pubkey string) (string, error) { + return encryptKey(key, pubkey) +} + +func encryptNonce(key, value, iv string) (string, string, error) { + var cipherText []byte + pubKey, err := base64.StdEncoding.DecodeString(key) + if err != nil { + return "", "", fmt.Errorf("Failed to decode public key: %s", err) + } + nonce, err := base64.StdEncoding.DecodeString(value) + if err != nil { + return "", "", fmt.Errorf("Failed to decode nonce: %s", err) + } + block, err := aes.NewCipher(pubKey) + if err != nil { + return "", "", err + } + aesgcm, err := cipher.NewGCM(block) + if err != nil { + return "", "", err + } + if iv == "" { + newIv := make([]byte, 12) + if _, err := io.ReadFull(rand.Reader, newIv); err != nil { + panic(err.Error()) + } + cipherText = aesgcm.Seal(nil, newIv, nonce, nil) + return base64.StdEncoding.EncodeToString(cipherText), base64.StdEncoding.EncodeToString(newIv), nil + } + cipherText = aesgcm.Seal(nil, []byte(iv), nonce, nil) + return base64.StdEncoding.EncodeToString(cipherText), iv, nil +} + +// EncryptNonceWithCBCPAD encrypts the nonce using the user's key-material +// with CBC encrypter. It will also pad the nonce using pkcs7. This is needed +// for Hyper Protect Crypto Services, since it supports only CBC Encryption. +func EncryptNonceWithCBCPAD(key, value, iv string) (string, string, error) { + keyMat, err := base64.StdEncoding.DecodeString(key) + if err != nil { + return "", "", fmt.Errorf("Failed to decode Key: %s", err) + } + + nonce, err := base64.StdEncoding.DecodeString(value) + if err != nil { + return "", "", fmt.Errorf("Failed to decode Nonce: %s", err) + } + + block, err := aes.NewCipher(keyMat) + if err != nil { + return "", "", err + } + + // PKCS7 Padding + paddingLength := aes.BlockSize - (len(nonce) % aes.BlockSize) + paddingBytes := []byte{byte(paddingLength)} + paddingText := bytes.Repeat(paddingBytes, paddingLength) + nonce = append(nonce, paddingText...) + + var newIv []byte + if iv != "" { + newIv = []byte(iv) + } else { + newIv = make([]byte, aes.BlockSize) + // Generate an IV to achieve semantic security + if _, err := io.ReadFull(rand.Reader, newIv); err != nil { + return "", "", fmt.Errorf("Failed to generate IV: %s", err) + } + } + + cipherText := make([]byte, len(nonce)) + + mode := cipher.NewCBCEncrypter(block, newIv) + mode.CryptBlocks(cipherText, nonce) + + return base64.StdEncoding.EncodeToString(cipherText), base64.StdEncoding.EncodeToString(newIv), nil +} + +// encryptKey uses sha256 to encrypt the key +func encryptKey(key, pubKey string) (string, error) { + return encryptKeyWithSHA(key, pubKey, sha256.New()) +} + +// EncryptKeyWithSHA1 uses sha1 to encrypt the key +func EncryptKeyWithSHA1(key, pubKey string) (string, error) { + return encryptKeyWithSHA(key, pubKey, sha1.New()) +} + +func encryptKeyWithSHA(key, pubKey string, sha hash.Hash) (string, error) { + decodedPubKey, err := base64.StdEncoding.DecodeString(pubKey) + if err != nil { + return "", fmt.Errorf("Failed to decode public key: %s", err) + } + keyMat, err := base64.StdEncoding.DecodeString(key) + if err != nil { + return "", fmt.Errorf("Failed to decode key material: %s", err) + } + pubKeyBlock, _ := pem.Decode(decodedPubKey) + if pubKeyBlock == nil { + return "", fmt.Errorf("Failed to decode public key into pem format: %s", err) + } + parsedPubKey, err := x509.ParsePKIXPublicKey(pubKeyBlock.Bytes) + if err != nil { + return "", fmt.Errorf("Failed to parse public key: %s", err) + } + publicKey, isRSAPublicKey := parsedPubKey.(*rsa.PublicKey) + if !isRSAPublicKey { + return "", fmt.Errorf("invalid public key") + } + encryptedKey, err := rsa.EncryptOAEP(sha, rand.Reader, publicKey, keyMat, []byte("")) + if err != nil { + return "", fmt.Errorf("Failed to encrypt key: %s", err) + } + return base64.StdEncoding.EncodeToString(encryptedKey), nil +} diff --git a/vendor/github.com/IBM/keyprotect-go-client/instances.go b/vendor/github.com/IBM/keyprotect-go-client/instances.go new file mode 100644 index 00000000000..257950648e2 --- /dev/null +++ b/vendor/github.com/IBM/keyprotect-go-client/instances.go @@ -0,0 +1,543 @@ +// Copyright 2019 IBM Corp. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kp + +import ( + "context" + "fmt" + "net/url" + "time" +) + +const ( + // DualAuthDelete defines the policy type as dual auth delete + DualAuthDelete = "dualAuthDelete" + + // AllowedNetwork defines the policy type as allowed network + AllowedNetwork = "allowedNetwork" + + // AllowedIP defines the policy type as allowed ip that are whitelisted + AllowedIP = "allowedIP" + + // Metrics defines the policy type as metrics + Metrics = "metrics" + // KeyAccess defines the policy type as key create import access + KeyCreateImportAccess = "keyCreateImportAccess" + + // KeyAccess policy attributes + CreateRootKey = "CreateRootKey" + CreateStandardKey = "CreateStandardKey" + ImportRootKey = "ImportRootKey" + ImportStandardKey = "ImportStandardKey" + EnforceToken = "EnforceToken" +) + + +// InstancePolicy represents a instance-level policy of a key as returned by the KP API. +// this policy enables dual authorization for deleting a key +type InstancePolicy struct { + CreatedBy string `json:"createdBy,omitempty"` + CreatedAt *time.Time `json:"creationDate,omitempty"` + UpdatedAt *time.Time `json:"lastUpdated,omitempty"` + UpdatedBy string `json:"updatedBy,omitempty"` + PolicyType string `json:"policy_type,omitempty"` + PolicyData PolicyData `json:"policy_data,omitempty" mapstructure:"policyData"` +} + +// PolicyData contains the details of the policy type +type PolicyData struct { + Enabled *bool `json:"enabled,omitempty"` + Attributes *Attributes `json:"attributes,omitempty"` +} + +// Attributes contains the detals of allowed network policy type +type Attributes struct { + AllowedNetwork *string `json:"allowed_network,omitempty"` + AllowedIP IPAddresses `json:"allowed_ip,omitempty"` + CreateRootKey *bool `json:"create_root_key,omitempty"` + CreateStandardKey *bool `json:"create_standard_key,omitempty"` + ImportRootKey *bool `json:"import_root_key,omitempty"` + ImportStandardKey *bool `json:"import_standard_key,omitempty"` + EnforceToken *bool `json:"enforce_token,omitempty"` +} + +// IPAddresses ... +type IPAddresses []string + +// InstancePolicies represents a collection of Policies associated with Key Protect instances. +type InstancePolicies struct { + Metadata PoliciesMetadata `json:"metadata"` + Policies []InstancePolicy `json:"resources"` +} + +// GetDualAuthInstancePolicy retrieves the dual auth delete policy details associated with the instance +// For more information can refer the Key Protect docs in the link below: +// https://cloud.ibm.com/docs/key-protect?topic=key-protect-manage-dual-auth +func (c *Client) GetDualAuthInstancePolicy(ctx context.Context) (*InstancePolicy, error) { + policyResponse := InstancePolicies{} + + err := c.getInstancePolicy(ctx, DualAuthDelete, &policyResponse) + if err != nil { + return nil, err + } + + if len(policyResponse.Policies) == 0 { + return nil, nil + } + return &policyResponse.Policies[0], nil +} + +// GetAllowedNetworkInstancePolicy retrieves the allowed network policy details associated with the instance. +// For more information can refer the Key Protect docs in the link below: +// https://cloud.ibm.com/docs/key-protect?topic=key-protect-managing-network-access-policies +func (c *Client) GetAllowedNetworkInstancePolicy(ctx context.Context) (*InstancePolicy, error) { + policyResponse := InstancePolicies{} + + err := c.getInstancePolicy(ctx, AllowedNetwork, &policyResponse) + if err != nil { + return nil, err + } + + if len(policyResponse.Policies) == 0 { + return nil, nil + } + + return &policyResponse.Policies[0], nil +} + +// GetAllowedIPInstancePolicy retrieves the allowed IP instance policy details associated with the instance. +// For more information can refer the Key Protect docs in the link below: +// https://cloud.ibm.com/docs/key-protect?topic=key-protect-manage-allowed-ip +func (c *Client) GetAllowedIPInstancePolicy(ctx context.Context) (*InstancePolicy, error) { + policyResponse := InstancePolicies{} + + err := c.getInstancePolicy(ctx, AllowedIP, &policyResponse) + if err != nil { + return nil, err + } + + if len(policyResponse.Policies) == 0 { + return nil, nil + } + + return &policyResponse.Policies[0], nil +} + +// GetKeyCreateImportAccessInstancePolicy retrieves the key create import access policy details associated with the instance. +// For more information can refer the Key Protect docs in the link below: +// https://cloud.ibm.com/docs/key-protect?topic=key-protect-manage-keyCreateImportAccess +func (c *Client) GetKeyCreateImportAccessInstancePolicy(ctx context.Context) (*InstancePolicy, error) { + policyResponse := InstancePolicies{} + + err := c.getInstancePolicy(ctx, KeyCreateImportAccess, &policyResponse) + if err != nil { + return nil, err + } + + if len(policyResponse.Policies) == 0 { + return nil, nil + } + + return &policyResponse.Policies[0], nil +} + +func (c *Client) getInstancePolicy(ctx context.Context, policyType string, policyResponse *InstancePolicies) error { + req, err := c.newRequest("GET", "instance/policies", nil) + if err != nil { + return err + } + + v := url.Values{} + v.Set("policy", policyType) + req.URL.RawQuery = v.Encode() + + _, err = c.do(ctx, req, &policyResponse) + + return err +} + +// GetMetricsInstancePolicy retrieves the metrics policy details associated with the instance +// For more information can refer the Key Protect docs in the link below: +// https://cloud.ibm.com/docs/key-protect?topic=key-protect-manage-sysdig-metrics +func (c *Client) GetMetricsInstancePolicy(ctx context.Context) (*InstancePolicy, error) { + policyResponse := InstancePolicies{} + + err := c.getInstancePolicy(ctx, Metrics, &policyResponse) + if err != nil { + return nil, err + } + + if len(policyResponse.Policies) == 0 { + return nil, nil + } + return &policyResponse.Policies[0], nil +} + +// GetInstancePolicies retrieves all policies of an Instance. +func (c *Client) GetInstancePolicies(ctx context.Context) ([]InstancePolicy, error) { + policyresponse := InstancePolicies{} + + req, err := c.newRequest("GET", "instance/policies", nil) + if err != nil { + return nil, err + } + + _, err = c.do(ctx, req, &policyresponse) + if err != nil { + return nil, err + } + + return policyresponse.Policies, nil +} + +func (c *Client) setInstancePolicy(ctx context.Context, policyType string, policyRequest InstancePolicies) error { + req, err := c.newRequest("PUT", "instance/policies", &policyRequest) + if err != nil { + return err + } + + v := url.Values{} + v.Set("policy", policyType) + req.URL.RawQuery = v.Encode() + + policiesResponse := InstancePolicies{} + _, err = c.do(ctx, req, &policiesResponse) + + return err +} + +// SetDualAuthInstancePolicy updates the dual auth delete policy details associated with an instance +// For more information can refer the Key Protect docs in the link below: +// https://cloud.ibm.com/docs/key-protect?topic=key-protect-manage-dual-auth +func (c *Client) SetDualAuthInstancePolicy(ctx context.Context, enable bool) error { + policy := InstancePolicy{ + PolicyType: DualAuthDelete, + PolicyData: PolicyData{ + Enabled: &enable, + }, + } + + policyRequest := InstancePolicies{ + Metadata: PoliciesMetadata{ + CollectionType: policyType, + NumberOfPolicies: 1, + }, + Policies: []InstancePolicy{policy}, + } + + err := c.setInstancePolicy(ctx, DualAuthDelete, policyRequest) + + return err +} + +// SetAllowedIPInstancePolices updates the allowed IP instance policy details associated with an instance. +// For more information can refet to the Key Protect docs in the link below: +// https://cloud.ibm.com/docs/key-protect?topic=key-protect-manage-allowed-ip +func (c *Client) SetAllowedIPInstancePolicy(ctx context.Context, enable bool, allowedIPs []string) error { + + policy := InstancePolicy{ + PolicyType: AllowedIP, + PolicyData: PolicyData{ + Enabled: &enable, + }, + } + + // The IP address validation is performed by the key protect service. + if enable && len(allowedIPs) != 0 { + policy.PolicyData.Attributes = &Attributes{} + policy.PolicyData.Attributes.AllowedIP = allowedIPs + } else if enable && len(allowedIPs) == 0 { + return fmt.Errorf("Please provide at least 1 IP subnet specified with CIDR notation") + } else if !enable && len(allowedIPs) != 0 { + return fmt.Errorf("IP address list should only be provided if the policy is being enabled") + } + + policyRequest := InstancePolicies{ + Metadata: PoliciesMetadata{ + CollectionType: policyType, + NumberOfPolicies: 1, + }, + Policies: []InstancePolicy{policy}, + } + err := c.setInstancePolicy(ctx, AllowedIP, policyRequest) + + return err +} + +// SetAllowedNetWorkInstancePolicy updates the allowed network policy details associated with an instance +// For more information can refer to the Key Protect docs in the link below: +// https://cloud.ibm.com/docs/key-protect?topic=key-protect-managing-network-access-policies +func (c *Client) SetAllowedNetworkInstancePolicy(ctx context.Context, enable bool, networkType string) error { + policy := InstancePolicy{ + PolicyType: AllowedNetwork, + PolicyData: PolicyData{ + Enabled: &enable, + Attributes: &Attributes{}, + }, + } + if networkType != "" { + policy.PolicyData.Attributes.AllowedNetwork = &networkType + } + + policyRequest := InstancePolicies{ + Metadata: PoliciesMetadata{ + CollectionType: policyType, + NumberOfPolicies: 1, + }, + Policies: []InstancePolicy{policy}, + } + + err := c.setInstancePolicy(ctx, AllowedNetwork, policyRequest) + + return err +} + +// SetMetricsInstancePolicy updates the metrics policy details associated with an instance +// For more information can refer the Key Protect docs in the link below: +// https://cloud.ibm.com/docs/key-protect?topic=key-protect-manage-sysdig-metrics +func (c *Client) SetMetricsInstancePolicy(ctx context.Context, enable bool) error { + policy := InstancePolicy{ + PolicyType: Metrics, + PolicyData: PolicyData{ + Enabled: &enable, + }, + } + + policyRequest := InstancePolicies{ + Metadata: PoliciesMetadata{ + CollectionType: policyType, + NumberOfPolicies: 1, + }, + Policies: []InstancePolicy{policy}, + } + + err := c.setInstancePolicy(ctx, Metrics, policyRequest) + if err != nil { + return err + } + + return err +} + +// SetKeyCreateImportAccessInstancePolicy updates the key create import access policy details associated with an instance. +// For more information, please refer to the Key Protect docs in the link below: +// https://cloud.ibm.com/docs/key-protect?topic=key-protect-manage-keyCreateImportAccess +func (c *Client) SetKeyCreateImportAccessInstancePolicy(ctx context.Context, enable bool, attributes map[string]bool) error { + policy := InstancePolicy{ + PolicyType: KeyCreateImportAccess, + PolicyData: PolicyData{ + Enabled: &enable, + }, + } + + if enable { + policy.PolicyData.Attributes = &Attributes{} + a := policy.PolicyData.Attributes + if val, ok := attributes[CreateRootKey]; ok { + a.CreateRootKey = &val + } + if val, ok := attributes[CreateStandardKey]; ok { + a.CreateStandardKey = &val + } + if val, ok := attributes[ImportRootKey]; ok { + a.ImportRootKey = &val + } + if val, ok := attributes[ImportStandardKey]; ok { + a.ImportStandardKey = &val + } + if val, ok := attributes[EnforceToken]; ok { + a.EnforceToken = &val + } + } + + policyRequest := InstancePolicies{ + Metadata: PoliciesMetadata{ + CollectionType: policyType, + NumberOfPolicies: 1, + }, + Policies: []InstancePolicy{policy}, + } + + err := c.setInstancePolicy(ctx, KeyCreateImportAccess, policyRequest) + + return err +} + +// BasicPolicyData defines the attribute input for the policy that supports only enabled parameter +type BasicPolicyData struct { + Enabled bool +} + +// AllowedNetworkPolicyData defines the attribute input for the Allowed Network instance policy +type AllowedNetworkPolicyData struct { + Enabled bool + Network string +} + +// AllowedIPPolicyData defines the attribute input for the Allowed IP instance policy +type AllowedIPPolicyData struct { + Enabled bool + IPAddresses IPAddresses +} + +// KeyAccessInstancePolicyData defines the attribute input for the Key Create Import Access instance policy +type KeyCreateImportAccessInstancePolicy struct { + Enabled bool + CreateRootKey bool + CreateStandardKey bool + ImportRootKey bool + ImportStandardKey bool + EnforceToken bool +} + +// MultiplePolicies defines the input for the SetInstancPolicies method that can hold multiple policy details +type MultiplePolicies struct { + DualAuthDelete *BasicPolicyData + AllowedNetwork *AllowedNetworkPolicyData + AllowedIP *AllowedIPPolicyData + Metrics *BasicPolicyData + KeyCreateImportAccess *KeyCreateImportAccessInstancePolicy +} + +// SetInstancePolicies updates single or multiple policy details of an instance. +func (c *Client) SetInstancePolicies(ctx context.Context, policies MultiplePolicies) error { + var resPolicies []InstancePolicy + + if policies.DualAuthDelete != nil { + policy := InstancePolicy{ + PolicyType: DualAuthDelete, + PolicyData: PolicyData{ + Enabled: &(policies.DualAuthDelete.Enabled), + }, + } + resPolicies = append(resPolicies, policy) + } + + if policies.AllowedNetwork != nil { + policy := InstancePolicy{ + PolicyType: AllowedNetwork, + PolicyData: PolicyData{ + Enabled: &(policies.AllowedNetwork.Enabled), + Attributes: &Attributes{ + AllowedNetwork: &(policies.AllowedNetwork.Network), + }, + }, + } + resPolicies = append(resPolicies, policy) + } + + if policies.AllowedIP != nil { + policy := InstancePolicy{ + PolicyType: AllowedIP, + PolicyData: PolicyData{ + Enabled: &(policies.AllowedIP.Enabled), + Attributes: &Attributes{ + AllowedIP: policies.AllowedIP.IPAddresses, + }, + }, + } + resPolicies = append(resPolicies, policy) + } + + if policies.Metrics != nil { + policy := InstancePolicy{ + PolicyType: Metrics, + PolicyData: PolicyData{ + Enabled: &(policies.Metrics.Enabled), + }, + } + resPolicies = append(resPolicies, policy) + } + + if policies.KeyCreateImportAccess != nil { + policy := InstancePolicy{ + PolicyType: KeyCreateImportAccess, + PolicyData: PolicyData{ + Enabled: &(policies.KeyCreateImportAccess.Enabled), + Attributes: &Attributes{}, + }, + } + + if policies.KeyCreateImportAccess.CreateRootKey { + policy.PolicyData.Attributes.CreateRootKey = &policies.KeyCreateImportAccess.CreateRootKey + } + if policies.KeyCreateImportAccess.CreateStandardKey { + policy.PolicyData.Attributes.CreateStandardKey = &policies.KeyCreateImportAccess.CreateStandardKey + } + if policies.KeyCreateImportAccess.ImportRootKey { + policy.PolicyData.Attributes.ImportRootKey = &policies.KeyCreateImportAccess.ImportRootKey + } + if policies.KeyCreateImportAccess.ImportStandardKey { + policy.PolicyData.Attributes.ImportStandardKey = &policies.KeyCreateImportAccess.ImportStandardKey + } + if policies.KeyCreateImportAccess.EnforceToken { + policy.PolicyData.Attributes.EnforceToken = &policies.KeyCreateImportAccess.EnforceToken + } + + resPolicies = append(resPolicies, policy) + } + + policyRequest := InstancePolicies{ + Metadata: PoliciesMetadata{ + CollectionType: policyType, + NumberOfPolicies: len(resPolicies), + }, + Policies: resPolicies, + } + + policyresponse := Policies{} + + req, err := c.newRequest("PUT", "instance/policies", &policyRequest) + if err != nil { + return err + } + + _, err = c.do(ctx, req, &policyresponse) + + return err +} + +type portsMetadata struct { + CollectionType string `json:"collectionType"` + NumberOfPorts int `json:"collectionTotal"` +} + +type portResponse struct { + Metadata portsMetadata `json:"metadata"` + Ports []privatePort `json:"resources"` +} +type privatePort struct { + PrivatePort int `json:"private_endpoint_port,omitempty"` +} + +// GetAllowedIPPrivateNetworkPort retrieves the private endpoint port assigned to allowed ip policy. +func (c *Client) GetAllowedIPPrivateNetworkPort(ctx context.Context) (int, error) { + var portResponse portResponse + + req, err := c.newRequest("GET", "instance/allowed_ip_port", nil) + if err != nil { + return 0, err + } + + _, err = c.do(ctx, req, &portResponse) + if err != nil { + return 0, err + } + + if len(portResponse.Ports) == 0 { + return 0, fmt.Errorf("No port number available. Please check the instance has an enabled allowedIP policy") + } + return portResponse.Ports[0].PrivatePort, nil +} diff --git a/vendor/github.com/IBM/keyprotect-go-client/key_alias.go b/vendor/github.com/IBM/keyprotect-go-client/key_alias.go new file mode 100644 index 00000000000..fdb63f9e901 --- /dev/null +++ b/vendor/github.com/IBM/keyprotect-go-client/key_alias.go @@ -0,0 +1,68 @@ +package kp + +import ( + "context" + "fmt" + "time" +) + +var ( + requestPath = "keys/%s/aliases/%s" +) + +// KeyAlias represents an Alias details of a key as returned by KP API +type KeyAlias struct { + KeyID string `json:"keyId,omitempty"` + Alias string `json:"alias,omitempty"` + CreatedBy string `json:"createdBy,omitempty"` + CreationDate *time.Time `json:"creationDate,omitempty"` +} + +// AliasesMetadata represents the metadata of a collection of aliases +type AliasesMetadata struct { + CollectionType string `json:"collectionType"` + NumberOfAliases int `json:"collectionTotal"` +} + +type KeyAliases struct { + Metadata AliasesMetadata `json:"metadata"` + KeyAliases []KeyAlias `json:"resources"` +} + +// CreateKeyAlias creates an alias name for a key. +// An alias name acts as an identifier just like key ID +// For more information please refer to the link below: +// https://cloud.ibm.com/docs/key-protect?topic=key-protect-create-key-alias#create-key-alias-api +func (c *Client) CreateKeyAlias(ctx context.Context, aliasName, keyID string) (*KeyAlias, error) { + + req, err := c.newRequest("POST", fmt.Sprintf(requestPath, keyID, aliasName), nil) + if err != nil { + return nil, err + } + + aliasesResponse := KeyAliases{} + _, err = c.do(ctx, req, &aliasesResponse) + if err != nil { + return nil, err + } + + if len(aliasesResponse.KeyAliases) == 0 { + return nil, nil + } + + return &aliasesResponse.KeyAliases[0], nil +} + +// DeleteKeyAlias deletes an alias name associated with a key +// For more information please refer to the link below: +// https://cloud.ibm.com/docs/key-protect?topic=key-protect-create-key-alias#delete-key-alias +func (c *Client) DeleteKeyAlias(ctx context.Context, aliasName, keyID string) error { + + req, err := c.newRequest("DELETE", fmt.Sprintf(requestPath, keyID, aliasName), nil) + if err != nil { + return err + } + _, err = c.do(ctx, req, nil) + + return err +} diff --git a/vendor/github.com/IBM/keyprotect-go-client/key_rings.go b/vendor/github.com/IBM/keyprotect-go-client/key_rings.go new file mode 100644 index 00000000000..eddb0710faa --- /dev/null +++ b/vendor/github.com/IBM/keyprotect-go-client/key_rings.go @@ -0,0 +1,75 @@ +package kp + +import ( + "context" + "fmt" + "time" +) + +const ( + path = "key_rings" +) + +type KeyRing struct { + ID string `json:"id,omitempty"` + CreationDate *time.Time `json:"creationDate,omitempty"` + CreatedBy string `json:"createdBy,omitempty"` +} + +type KeyRings struct { + Metadata KeysMetadata `json:"metadata"` + KeyRings []KeyRing `json:"resources"` +} + +// CreateRing method creates a key ring in the instance with the provided name +// For information please refer to the link below: +// https://cloud.ibm.com/docs/key-protect?topic=key-protect-managing-key-rings#create-key-ring-api +func (c *Client) CreateKeyRing(ctx context.Context, id string) error { + + req, err := c.newRequest("POST", fmt.Sprintf(path+"/%s", id), nil) + if err != nil { + return err + } + + _, err = c.do(ctx, req, nil) + if err != nil { + return err + } + + return nil +} + +// GetRings method retrieves all the key rings associated with the instance +// For information please refer to the link below: +// https://cloud.ibm.com/docs/key-protect?topic=key-protect-managing-key-rings#list-key-ring-api +func (c *Client) GetKeyRings(ctx context.Context) (*KeyRings, error) { + rings := KeyRings{} + req, err := c.newRequest("GET", path, nil) + if err != nil { + return nil, err + } + + _, err = c.do(ctx, req, &rings) + if err != nil { + return nil, err + } + + return &rings, nil +} + +// DeleteRing method deletes the key ring with the provided name in the instance +// For information please refer to the link below: +// https://cloud.ibm.com/docs/key-protect?topic=key-protect-managing-key-rings#delete-key-ring-api +func (c *Client) DeleteKeyRing(ctx context.Context, id string) error { + req, err := c.newRequest("DELETE", fmt.Sprintf(path+"/%s", id), nil) + if err != nil { + return err + } + + _, err = c.do(ctx, req, nil) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/IBM/keyprotect-go-client/keys.go b/vendor/github.com/IBM/keyprotect-go-client/keys.go new file mode 100644 index 00000000000..d8782195102 --- /dev/null +++ b/vendor/github.com/IBM/keyprotect-go-client/keys.go @@ -0,0 +1,463 @@ +// Copyright 2019 IBM Corp. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kp + +import ( + "context" + "encoding/base64" + "fmt" + "log" + "net/url" + "strconv" + "time" +) + +const ( + ReturnMinimal PreferReturn = 0 + ReturnRepresentation PreferReturn = 1 + + keyType = "application/vnd.ibm.kms.key+json" +) + +var ( + preferHeaders = []string{"return=minimal", "return=representation"} +) + +// PreferReturn designates the value for the "Prefer" header. +type PreferReturn int + +// Key represents a key as returned by the KP API. +type Key struct { + ID string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + Type string `json:"type,omitempty"` + Tags []string `json:"Tags,omitempty"` + Aliases []string `json:"aliases,omitempty"` + AlgorithmType string `json:"algorithmType,omitempty"` + CreatedBy string `json:"createdBy,omitempty"` + CreationDate *time.Time `json:"creationDate,omitempty"` + LastUpdateDate *time.Time `json:"lastUpdateDate,omitempty"` + LastRotateDate *time.Time `json:"lastRotateDate,omitempty"` + KeyVersion *KeyVersion `json:"keyVersion,omitempty" mapstructure:keyVersion` + KeyRingID string `json:"keyRingID,omitempty"` + Extractable bool `json:"extractable"` + Expiration *time.Time `json:"expirationDate,omitempty"` + Imported bool `json:"imported,omitempty"` + Payload string `json:"payload,omitempty"` + State int `json:"state,omitempty"` + EncryptionAlgorithm string `json:"encryptionAlgorithm,omitempty"` + CRN string `json:"crn,omitempty"` + EncryptedNonce string `json:"encryptedNonce,omitempty"` + IV string `json:"iv,omitempty"` + Deleted *bool `json:"deleted,omitempty"` + DeletedBy *string `json:"deletedBy,omitempty"` + DeletionDate *time.Time `json:"deletionDate,omitempty"` + DualAuthDelete *DualAuth `json:"dualAuthDelete,omitempty"` +} + +// KeysMetadata represents the metadata of a collection of keys. +type KeysMetadata struct { + CollectionType string `json:"collectionType"` + NumberOfKeys int `json:"collectionTotal"` +} + +// Keys represents a collection of Keys. +type Keys struct { + Metadata KeysMetadata `json:"metadata"` + Keys []Key `json:"resources"` +} + +// KeysActionRequest represents request parameters for a key action +// API call. +type KeysActionRequest struct { + PlainText string `json:"plaintext,omitempty"` + AAD []string `json:"aad,omitempty"` + CipherText string `json:"ciphertext,omitempty"` + Payload string `json:"payload,omitempty"` +} + +type KeyVersion struct { + ID string `json:"id,omitempty"` + CreationDate *time.Time `json:"creationDate,omitempty"` +} + +// CreateKey creates a new KP key. +func (c *Client) CreateKey(ctx context.Context, name string, expiration *time.Time, extractable bool) (*Key, error) { + return c.CreateImportedKey(ctx, name, expiration, "", "", "", extractable) +} + +// CreateImportedKey creates a new KP key from the given key material. +func (c *Client) CreateImportedKey(ctx context.Context, name string, expiration *time.Time, payload, encryptedNonce, iv string, extractable bool) (*Key, error) { + key := Key{ + Name: name, + Type: keyType, + Extractable: extractable, + Payload: payload, + } + + if payload != "" && encryptedNonce != "" && iv != "" { + key.EncryptedNonce = encryptedNonce + key.IV = iv + key.EncryptionAlgorithm = importTokenEncAlgo + } + + if expiration != nil { + key.Expiration = expiration + } + + return c.createKey(ctx, key) +} + +// CreateRootKey creates a new, non-extractable key resource without +// key material. +func (c *Client) CreateRootKey(ctx context.Context, name string, expiration *time.Time) (*Key, error) { + return c.CreateKey(ctx, name, expiration, false) +} + +// CreateStandardKey creates a new, extractable key resource without +// key material. +func (c *Client) CreateStandardKey(ctx context.Context, name string, expiration *time.Time) (*Key, error) { + return c.CreateKey(ctx, name, expiration, true) +} + +// CreateImportedRootKey creates a new, non-extractable key resource +// with the given key material. +func (c *Client) CreateImportedRootKey(ctx context.Context, name string, expiration *time.Time, payload, encryptedNonce, iv string) (*Key, error) { + return c.CreateImportedKey(ctx, name, expiration, payload, encryptedNonce, iv, false) +} + +// CreateStandardKey creates a new, extractable key resource with the +// given key material. +func (c *Client) CreateImportedStandardKey(ctx context.Context, name string, expiration *time.Time, payload string) (*Key, error) { + return c.CreateImportedKey(ctx, name, expiration, payload, "", "", true) +} + +// CreateKeyWithAliaes creats a new key with alias names. A key can have a maximum of 5 alias names. +// For more information please refer to the links below: +// https://cloud.ibm.com/docs/key-protect?topic=key-protect-create-root-keys#create-root-key-api +// https://cloud.ibm.com/docs/key-protect?topic=key-protect-create-standard-keys#create-standard-key-api +func (c *Client) CreateKeyWithAliases(ctx context.Context, name string, expiration *time.Time, extractable bool, aliases []string) (*Key, error) { + return c.CreateImportedKeyWithAliases(ctx, name, expiration, "", "", "", extractable, aliases) +} + +// CreateImportedKeyWithAliases creates a new key with alias name and provided key material. A key can have a maximum of 5 alias names +// When importing root keys with import-token encryptedNonce and iv need to passed along with payload. +// Standard Keys cannot be imported with an import token hence only payload is required. +// For more information please refer to the links below: +// https://cloud.ibm.com/docs/key-protect?topic=key-protect-import-root-keys#import-root-key-api +// https://cloud.ibm.com/docs/key-protect?topic=key-protect-import-standard-keys#import-standard-key-gui +func (c *Client) CreateImportedKeyWithAliases(ctx context.Context, name string, expiration *time.Time, payload, encryptedNonce, iv string, extractable bool, aliases []string) (*Key, error) { + key := Key{ + Name: name, + Type: keyType, + Extractable: extractable, + Payload: payload, + Aliases: aliases, + } + + if !extractable && payload != "" && encryptedNonce != "" && iv != "" { + key.EncryptedNonce = encryptedNonce + key.IV = iv + key.EncryptionAlgorithm = importTokenEncAlgo + } + + if expiration != nil { + key.Expiration = expiration + } + + return c.createKey(ctx, key) +} + +func (c *Client) createKey(ctx context.Context, key Key) (*Key, error) { + keysRequest := Keys{ + Metadata: KeysMetadata{ + CollectionType: keyType, + NumberOfKeys: 1, + }, + Keys: []Key{key}, + } + + req, err := c.newRequest("POST", "keys", &keysRequest) + if err != nil { + return nil, err + } + + keysResponse := Keys{} + if _, err := c.do(ctx, req, &keysResponse); err != nil { + return nil, err + } + + return &keysResponse.Keys[0], nil +} + +// GetKeys retrieves a collection of keys that can be paged through. +func (c *Client) GetKeys(ctx context.Context, limit int, offset int) (*Keys, error) { + if limit == 0 { + limit = 2000 + } + + req, err := c.newRequest("GET", "keys", nil) + if err != nil { + return nil, err + } + + v := url.Values{} + v.Set("limit", strconv.Itoa(limit)) + v.Set("offset", strconv.Itoa(offset)) + req.URL.RawQuery = v.Encode() + + keys := Keys{} + _, err = c.do(ctx, req, &keys) + if err != nil { + return nil, err + } + + return &keys, nil +} + +// GetKey retrieves a key by ID or alias name. +// For more information on Key Alias please refer to the link below +// https://cloud.ibm.com/docs/key-protect?topic=key-protect-retrieve-key +func (c *Client) GetKey(ctx context.Context, idOrAlias string) (*Key, error) { + return c.getKey(ctx, idOrAlias, "keys/%s") +} + +// GetKeyMetadata retrieves the metadata of a Key by ID or alias name. +// Note that the "/api/v2/keys/{id}/metadata" API does not return the payload, +// therefore the payload attribute in the Key pointer will always be empty. +// If you need the payload, you need to use the GetKey() function with the +// correct service access role. +// https://cloud.ibm.com/docs/key-protect?topic=key-protect-manage-access#service-access-roles +func (c *Client) GetKeyMetadata(ctx context.Context, idOrAlias string) (*Key, error) { + return c.getKey(ctx, idOrAlias, "keys/%s/metadata") +} + +func (c *Client) getKey(ctx context.Context, id string, path string) (*Key, error) { + keys := Keys{} + + req, err := c.newRequest("GET", fmt.Sprintf(path, id), nil) + if err != nil { + return nil, err + } + + _, err = c.do(ctx, req, &keys) + if err != nil { + return nil, err + } + + return &keys.Keys[0], nil +} + +type CallOpt interface{} + +type ForceOpt struct { + Force bool +} + +// DeleteKey deletes a key resource by specifying the ID of the key. +func (c *Client) DeleteKey(ctx context.Context, id string, prefer PreferReturn, callOpts ...CallOpt) (*Key, error) { + + req, err := c.newRequest("DELETE", fmt.Sprintf("keys/%s", id), nil) + if err != nil { + return nil, err + } + + for _, opt := range callOpts { + switch v := opt.(type) { + case ForceOpt: + params := url.Values{} + params.Set("force", strconv.FormatBool(v.Force)) + req.URL.RawQuery = params.Encode() + default: + log.Printf("WARNING: Ignoring invalid CallOpt passed to DeleteKey: %v\n", v) + } + } + + req.Header.Set("Prefer", preferHeaders[prefer]) + + keys := Keys{} + _, err = c.do(ctx, req, &keys) + if err != nil { + return nil, err + } + + if len(keys.Keys) > 0 { + return &keys.Keys[0], nil + } + + return nil, nil +} + +// RestoreKey method reverts a delete key status to active key +// This method performs restore of any key from deleted state to active state. +// For more information please refer to the link below: +// https://cloud.ibm.com/dowcs/key-protect?topic=key-protect-restore-keys +func (c *Client) RestoreKey(ctx context.Context, id string) (*Key, error) { + req, err := c.newRequest("POST", fmt.Sprintf("keys/%s/restore", id), nil) + if err != nil { + return nil, err + } + + keysResponse := Keys{} + + _, err = c.do(ctx, req, &keysResponse) + if err != nil { + return nil, err + } + + return &keysResponse.Keys[0], nil +} + +// Wrap calls the wrap action with the given plain text. +func (c *Client) Wrap(ctx context.Context, id string, plainText []byte, additionalAuthData *[]string) ([]byte, error) { + _, ct, err := c.wrap(ctx, id, plainText, additionalAuthData) + return ct, err +} + +// WrapCreateDEK calls the wrap action without plain text. +func (c *Client) WrapCreateDEK(ctx context.Context, id string, additionalAuthData *[]string) ([]byte, []byte, error) { + return c.wrap(ctx, id, nil, additionalAuthData) +} + +func (c *Client) wrap(ctx context.Context, id string, plainText []byte, additionalAuthData *[]string) ([]byte, []byte, error) { + keysActionReq := &KeysActionRequest{} + + if plainText != nil { + _, err := base64.StdEncoding.DecodeString(string(plainText)) + if err != nil { + return nil, nil, err + } + keysActionReq.PlainText = string(plainText) + } + + if additionalAuthData != nil { + keysActionReq.AAD = *additionalAuthData + } + + keysAction, err := c.doKeysAction(ctx, id, "wrap", keysActionReq) + if err != nil { + return nil, nil, err + } + + pt := []byte(keysAction.PlainText) + ct := []byte(keysAction.CipherText) + + return pt, ct, nil +} + +// Unwrap is deprecated since it returns only plaintext and doesn't know how to handle rotation. +func (c *Client) Unwrap(ctx context.Context, id string, cipherText []byte, additionalAuthData *[]string) ([]byte, error) { + plainText, _, err := c.UnwrapV2(ctx, id, cipherText, additionalAuthData) + if err != nil { + return nil, err + } + return plainText, nil +} + +// Unwrap with rotation support. +func (c *Client) UnwrapV2(ctx context.Context, id string, cipherText []byte, additionalAuthData *[]string) ([]byte, []byte, error) { + + keysAction := &KeysActionRequest{ + CipherText: string(cipherText), + } + + if additionalAuthData != nil { + keysAction.AAD = *additionalAuthData + } + + respAction, err := c.doKeysAction(ctx, id, "unwrap", keysAction) + if err != nil { + return nil, nil, err + } + + plainText := []byte(respAction.PlainText) + rewrapped := []byte(respAction.CipherText) + + return plainText, rewrapped, nil +} + +// Rotate rotates a CRK. +func (c *Client) Rotate(ctx context.Context, id, payload string) error { + + actionReq := &KeysActionRequest{ + Payload: payload, + } + + _, err := c.doKeysAction(ctx, id, "rotate", actionReq) + if err != nil { + return err + } + + return nil +} + +// Disable a key. The key will not be deleted but it will not be active +// and key operations cannot be performed on a disabled key. +// For more information can refer to the Key Protect docs in the link below: +// https://cloud.ibm.com/docs/key-protect?topic=key-protect-disable-keys +func (c *Client) DisableKey(ctx context.Context, id string) error { + _, err := c.doKeysAction(ctx, id, "disable", nil) + return err +} + +// Enable a key. Only disabled keys can be enabled. After enable +// the key becomes active and key operations can be performed on it. +// Note: This does not recover Deleted keys. +// For more information can refer to the Key Protect docs in the link below: +// https://cloud.ibm.com/docs/key-protect?topic=key-protect-disable-keys#enable-api +func (c *Client) EnableKey(ctx context.Context, id string) error { + _, err := c.doKeysAction(ctx, id, "enable", nil) + return err +} + +// InitiateDualAuthDelete sets a key for deletion. The key must be configured with a DualAuthDelete policy. +// After the key is set to deletion it can be deleted by another user who has Manager access. +// For more information refer to the Key Protect docs in the link below: +// https://cloud.ibm.com/docs/key-protect?topic=key-protect-delete-dual-auth-keys#set-key-deletion-api +func (c *Client) InitiateDualAuthDelete(ctx context.Context, id string) error { + _, err := c.doKeysAction(ctx, id, "setKeyForDeletion", nil) + return err +} + +// CancelDualAuthDelete unsets the key for deletion. If a key is set for deletion, it can +// be prevented from getting deleted by unsetting the key for deletion. +// For more information refer to the Key Protect docs in the link below: +//https://cloud.ibm.com/docs/key-protect?topic=key-protect-delete-dual-auth-keys#unset-key-deletion-api +func (c *Client) CancelDualAuthDelete(ctx context.Context, id string) error { + _, err := c.doKeysAction(ctx, id, "unsetKeyForDeletion", nil) + return err +} + +// doKeysAction calls the KP Client to perform an action on a key. +func (c *Client) doKeysAction(ctx context.Context, id string, action string, keysActionReq *KeysActionRequest) (*KeysActionRequest, error) { + keyActionRsp := KeysActionRequest{} + + v := url.Values{} + v.Set("action", action) + + req, err := c.newRequest("POST", fmt.Sprintf("keys/%s", id), keysActionReq) + if err != nil { + return nil, err + } + + req.URL.RawQuery = v.Encode() + + _, err = c.do(ctx, req, &keyActionRsp) + if err != nil { + return nil, err + } + return &keyActionRsp, nil +} diff --git a/vendor/github.com/IBM/keyprotect-go-client/kp.go b/vendor/github.com/IBM/keyprotect-go-client/kp.go new file mode 100644 index 00000000000..bcadb064b75 --- /dev/null +++ b/vendor/github.com/IBM/keyprotect-go-client/kp.go @@ -0,0 +1,510 @@ +// Copyright 2019 IBM Corp. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// keyprotect-go-client is a Go client library for interacting with the IBM KeyProtect service. +package kp + +import ( + "bytes" + "context" + "crypto/tls" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + "time" + + "github.com/google/uuid" + rhttp "github.com/hashicorp/go-retryablehttp" + + "github.com/IBM/keyprotect-go-client/iam" +) + +const ( + // DefaultBaseURL ... + DefaultBaseURL = "https://us-south.kms.cloud.ibm.com" + // DefaultTokenURL .. + DefaultTokenURL = iam.IAMTokenURL + + // VerboseNone ... + VerboseNone = 0 + // VerboseBodyOnly ... + VerboseBodyOnly = 1 + // VerboseAll ... + VerboseAll = 2 + // VerboseFailOnly ... + VerboseFailOnly = 3 + // VerboseAllNoRedact ... + VerboseAllNoRedact = 4 + + authContextKey ContextKey = 0 + defaultTimeout = 30 // in seconds. +) + +var ( + // RetryWaitMax is the maximum time to wait between HTTP retries + RetryWaitMax = 30 * time.Second + + // RetryMax is the max number of attempts to retry for failed HTTP requests + RetryMax = 4 + + cidCtxKey = ctxKey("X-Correlation-Id") +) + +type ctxKey string + +// ClientConfig ... +type ClientConfig struct { + BaseURL string + Authorization string // The IBM Cloud (Bluemix) access token + APIKey string // Service ID API key, can be used instead of an access token + TokenURL string // The URL used to get an access token from the API key + InstanceID string // The IBM Cloud (Bluemix) instance ID that identifies your Key Protect service instance. + KeyRing string // The ID of the target Key Ring the key is associated with. It is optional but recommended for better performance. + Verbose int // See verbose values above + Timeout float64 // KP request timeout in seconds. +} + +// DefaultTransport ... +func DefaultTransport() http.RoundTripper { + transport := &http.Transport{ + DisableKeepAlives: true, + MaxIdleConnsPerHost: -1, + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: false, + }, + } + return transport +} + +// API is deprecated. Use Client instead. +type API = Client + +// Client holds configuration and auth information to interact with KeyProtect. +// It is expected that one of these is created per KeyProtect service instance/credential pair. +type Client struct { + URL *url.URL + HttpClient http.Client + Dump Dump + Config ClientConfig + Logger Logger + + tokenSource iam.TokenSource +} + +// New creates and returns a Client without logging. +func New(config ClientConfig, transport http.RoundTripper) (*Client, error) { + return NewWithLogger(config, transport, nil) +} + +// NewWithLogger creates and returns a Client with logging. The +// error value will be non-nil if the config is invalid. +func NewWithLogger(config ClientConfig, transport http.RoundTripper, logger Logger) (*Client, error) { + + if transport == nil { + transport = DefaultTransport() + } + + if logger == nil { + logger = NewLogger(func(args ...interface{}) { + fmt.Println(args...) + }) + } + + if config.Verbose > len(dumpers)-1 || config.Verbose < 0 { + return nil, errors.New("verbose value is out of range") + } + + if config.Timeout == 0 { + config.Timeout = defaultTimeout + } + keysURL := fmt.Sprintf("%s/api/v2/", config.BaseURL) + + u, err := url.Parse(keysURL) + if err != nil { + return nil, err + } + + ts := iam.CredentialFromAPIKey(config.APIKey) + + if config.TokenURL != "" { + ts.TokenURL = config.TokenURL + } + + c := &Client{ + URL: u, + HttpClient: http.Client{ + Timeout: time.Duration(config.Timeout * float64(time.Second)), + Transport: transport, + }, + Dump: dumpers[config.Verbose], + Config: config, + Logger: logger, + tokenSource: ts, + } + return c, nil +} + +func (c *Client) newRequest(method, path string, body interface{}) (*http.Request, error) { + + u, err := c.URL.Parse(path) + if err != nil { + return nil, err + } + + var reqBody []byte + var buf io.Reader + + if body != nil { + reqBody, err = json.Marshal(body) + if err != nil { + return nil, err + } + buf = bytes.NewBuffer(reqBody) + } + + request, err := http.NewRequest(method, u.String(), buf) + if err != nil { + return nil, err + } + + request.Header.Set("accept", "application/json") + + return request, nil +} + +type reason struct { + Code string + Message string + Status int + MoreInfo string +} + +func (r reason) String() string { + if r.MoreInfo != "" { + return fmt.Sprintf("%s: %s - FOR_MORE_INFO_REFER: %s", r.Code, r.Message, r.MoreInfo) + } + return fmt.Sprintf("%s: %s", r.Code, r.Message) +} + +type Error struct { + URL string // URL of request that resulted in this error + StatusCode int // HTTP error code from KeyProtect service + Message string // error message from KeyProtect service + BodyContent []byte // raw body content if more inspection is needed + CorrelationID string // string value of a UUID that uniquely identifies the request to KeyProtect + Reasons []reason // collection of reason types containing detailed error messages +} + +// Error returns correlation id and error message string +func (e Error) Error() string { + var extraVars string + if e.Reasons != nil && len(e.Reasons) > 0 { + extraVars = fmt.Sprintf(", reasons='%s'", e.Reasons) + } + + return fmt.Sprintf("kp.Error: correlation_id='%v', msg='%s'%s", e.CorrelationID, e.Message, extraVars) +} + +// URLError wraps an error from client.do() calls with a correlation ID from KeyProtect +type URLError struct { + Err error + CorrelationID string +} + +func (e URLError) Error() string { + return fmt.Sprintf( + "error during request to KeyProtect correlation_id='%s': %s", e.CorrelationID, e.Err.Error()) +} + +func (c *Client) do(ctx context.Context, req *http.Request, res interface{}) (*http.Response, error) { + + acccesToken, err := c.getAccessToken(ctx) + if err != nil { + return nil, err + } + + // retrieve the correlation id from the context. If not present, then a UUID will be + // generated for the correlation ID and feed it into the request + // KeyProtect will use this when it is set on a request header rather than generating its + // own inside the service + // if not present, we generate our own here because a connection error might actually + // mean the request doesn't make it server side, so having a correlation ID locally helps + // us know that when comparing with server side logs. + corrID := c.getCorrelationID(ctx) + + req.Header.Set("bluemix-instance", c.Config.InstanceID) + req.Header.Set("authorization", acccesToken) + req.Header.Set("correlation-id", corrID) + + if c.Config.KeyRing != "" { + req.Header.Set("x-kms-key-ring", c.Config.KeyRing) + } + + // set request up to be retryable on 500-level http codes and client errors + retryableClient := getRetryableClient(&c.HttpClient) + retryableRequest, err := rhttp.FromRequest(req) + if err != nil { + return nil, err + } + + response, err := retryableClient.Do(retryableRequest.WithContext(ctx)) + if err != nil { + return nil, &URLError{err, corrID} + } + defer response.Body.Close() + + resBody, err := ioutil.ReadAll(response.Body) + redact := []string{c.Config.APIKey, req.Header.Get("authorization")} + c.Dump(req, response, []byte{}, resBody, c.Logger, redact) + if err != nil { + return nil, err + } + + type KPErrorMsg struct { + Message string `json:"errorMsg,omitempty"` + Reasons []reason + } + + type KPError struct { + Resources []KPErrorMsg `json:"resources,omitempty"` + } + + switch response.StatusCode { + case http.StatusCreated: + if len(resBody) != 0 { + if err := json.Unmarshal(resBody, res); err != nil { + return nil, err + } + } + case http.StatusOK: + if err := json.Unmarshal(resBody, res); err != nil { + return nil, err + } + case http.StatusNoContent: + default: + errMessage := string(resBody) + var reasons []reason + + if strings.Contains(string(resBody), "errorMsg") { + kperr := KPError{} + json.Unmarshal(resBody, &kperr) + if len(kperr.Resources) > 0 && len(kperr.Resources[0].Message) > 0 { + errMessage = kperr.Resources[0].Message + reasons = kperr.Resources[0].Reasons + } + } + + return nil, &Error{ + URL: response.Request.URL.String(), + StatusCode: response.StatusCode, + Message: errMessage, + BodyContent: resBody, + CorrelationID: corrID, + Reasons: reasons, + } + } + + return response, nil +} + +// getRetryableClient returns a fully configured retryable HTTP client +func getRetryableClient(client *http.Client) *rhttp.Client { + // build base client with the library defaults and override as neeeded + rc := rhttp.NewClient() + rc.Logger = nil + rc.HTTPClient = client + rc.RetryWaitMax = RetryWaitMax + rc.RetryMax = RetryMax + rc.CheckRetry = kpCheckRetry + rc.ErrorHandler = rhttp.PassthroughErrorHandler + return rc +} + +// kpCheckRetry will retry on connection errors, server errors, and 429s (rate limit) +func kpCheckRetry(ctx context.Context, resp *http.Response, err error) (bool, error) { + // do not retry on context.Canceled or context.DeadlineExceeded + if ctx.Err() != nil { + return false, ctx.Err() + } + + if err != nil { + return true, err + } + // Retry on connection errors, 500+ errors (except 501 - not implemented), and 429 - too many requests + if resp.StatusCode == 0 || resp.StatusCode == 429 || (resp.StatusCode >= 500 && resp.StatusCode != 501) { + return true, nil + } + + return false, nil +} + +// ContextKey provides a type to auth context keys. +type ContextKey int + +// NewContextWithAuth ... +func NewContextWithAuth(parent context.Context, auth string) context.Context { + return context.WithValue(parent, authContextKey, auth) +} + +// getAccessToken returns the auth context from the given Context, or +// calls to the IAMTokenSource to retrieve an auth token. +func (c *Client) getAccessToken(ctx context.Context) (string, error) { + if ctx.Value(authContextKey) != nil { + return ctx.Value(authContextKey).(string), nil + } + + if len(c.Config.Authorization) > 0 { + return c.Config.Authorization, nil + } + + token, err := c.tokenSource.Token() + if err != nil { + return "", err + } + + return fmt.Sprintf("%s %s", token.TokenType, token.AccessToken), nil +} + +// getCorrelationId returns the correlation ID value from the given Context, or +// returns a new UUID if not present +func (c *Client) getCorrelationID(ctx context.Context) string { + corrID := GetCorrelationID(ctx) + if corrID == nil { + return uuid.New().String() + } + + return corrID.String() +} + +// NewContextWithCorrelationID retuns a context containing the UUID +func NewContextWithCorrelationID(ctx context.Context, uuid *uuid.UUID) context.Context { + return context.WithValue(ctx, cidCtxKey, uuid) +} + +// GetCorrelationID returns the correlation ID from the context +func GetCorrelationID(ctx context.Context) *uuid.UUID { + if id := ctx.Value(cidCtxKey); id != nil { + return id.(*uuid.UUID) + } + return nil +} + +func (c ctxKey) String() string { + return string(c) +} + +// Logger writes when called. +type Logger interface { + Info(...interface{}) +} + +type logger struct { + writer func(...interface{}) +} + +func (l *logger) Info(args ...interface{}) { + l.writer(args...) +} + +func NewLogger(writer func(...interface{})) Logger { + return &logger{writer: writer} +} + +var dumpers = []Dump{dumpNone, dumpBodyOnly, dumpAll, dumpFailOnly, dumpAllNoRedact} + +// Dump writes various parts of an HTTP request and an HTTP response. +type Dump func(*http.Request, *http.Response, []byte, []byte, Logger, []string) + +// Redact replaces various pieces of output. +type Redact func(string, []string) string + +// dumpFailOnly calls dumpAll when the HTTP response isn't 200 (ok), +// 201 (created), or 204 (no content). +func dumpFailOnly(req *http.Request, rsp *http.Response, reqBody, resBody []byte, log Logger, redactStrings []string) { + switch rsp.StatusCode { + case http.StatusOK, http.StatusCreated, http.StatusNoContent: + return + } + dumpAll(req, rsp, reqBody, resBody, log, redactStrings) +} + +// dumpAll dumps the HTTP request and the HTTP response body. +func dumpAll(req *http.Request, rsp *http.Response, reqBody, resBody []byte, log Logger, redactStrings []string) { + dumpRequest(req, rsp, log, redactStrings, redact) + dumpBody(reqBody, resBody, log, redactStrings, redact) +} + +// dumpAllNoRedact dumps the HTTP request and HTTP response body without redaction. +func dumpAllNoRedact(req *http.Request, rsp *http.Response, reqBody, resBody []byte, log Logger, redactStrings []string) { + dumpRequest(req, rsp, log, redactStrings, noredact) + dumpBody(reqBody, resBody, log, redactStrings, noredact) +} + +// dumpBodyOnly dumps the HTTP response body. +func dumpBodyOnly(req *http.Request, rsp *http.Response, reqBody, resBody []byte, log Logger, redactStrings []string) { + dumpBody(reqBody, resBody, log, redactStrings, redact) +} + +// dumpNone does nothing. +func dumpNone(req *http.Request, rsp *http.Response, reqBody, resBody []byte, log Logger, redactStrings []string) { +} + +// dumpRequest dumps the HTTP request. +func dumpRequest(req *http.Request, rsp *http.Response, log Logger, redactStrings []string, redact Redact) { + // log.Info(redact(fmt.Sprint(req), redactStrings)) + // log.Info(redact(fmt.Sprint(rsp), redactStrings)) +} + +// dumpBody dumps the HTTP response body with redactions. +func dumpBody(reqBody, resBody []byte, log Logger, redactStrings []string, redact Redact) { + // log.Info(string(redact(string(reqBody), redactStrings))) + // Redact the access token and refresh token if it shows up in the reponnse body. This will happen + // when using an API Key + var auth iam.Token + if strings.Contains(string(resBody), "access_token") { + err := json.Unmarshal(resBody, &auth) + if err != nil { + log.Info(err) + } + redactStrings = append(redactStrings, auth.AccessToken) + redactStrings = append(redactStrings, auth.RefreshToken) + } + // log.Info(string(redact(string(resBody), redactStrings))) +} + +// redact replaces substrings within the given string. +func redact(s string, redactStrings []string) string { + if len(redactStrings) < 1 { + return s + } + var a []string + for _, s1 := range redactStrings { + if s1 != "" { + a = append(a, s1) + a = append(a, "***Value redacted***") + } + } + r := strings.NewReplacer(a...) + return r.Replace(s) +} + +// noredact does not perform redaction, and returns the given string. +func noredact(s string, redactStrings []string) string { + return s +} diff --git a/vendor/github.com/IBM/keyprotect-go-client/policy.go b/vendor/github.com/IBM/keyprotect-go-client/policy.go new file mode 100644 index 00000000000..53bb5ea697f --- /dev/null +++ b/vendor/github.com/IBM/keyprotect-go-client/policy.go @@ -0,0 +1,317 @@ +// Copyright 2020 IBM Corp. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kp + +import ( + "context" + "fmt" + "net/url" + "time" +) + +const ( + policyType = "application/vnd.ibm.kms.policy+json" + + RotationPolicy = "rotation" +) + +// Policy represents a policy as returned by the KP API. +type Policy struct { + Type string `json:"type,omitempty"` + CreatedBy string `json:"createdBy,omitempty"` + CreatedAt *time.Time `json:"creationDate,omitempty"` + CRN string `json:"crn,omitempty"` + UpdatedAt *time.Time `json:"lastUpdateDate,omitempty"` + UpdatedBy string `json:"updatedBy,omitempty"` + Rotation *Rotation `json:"rotation,omitempty"` + DualAuth *DualAuth `json:"dualAuthDelete,omitempty"` +} + +type Rotation struct { + Interval int `json:"interval_month,omitempty"` +} + +type DualAuth struct { + Enabled *bool `json:"enabled,omitempty"` +} + +// PoliciesMetadata represents the metadata of a collection of keys. +type PoliciesMetadata struct { + CollectionType string `json:"collectionType"` + NumberOfPolicies int `json:"collectionTotal"` +} + +// Policies represents a collection of Policies. +type Policies struct { + Metadata PoliciesMetadata `json:"metadata"` + Policies []Policy `json:"resources"` +} + +// GetPolicy retrieves a policy by Key ID. This function is +// deprecated, as it only returns one policy and does not let you +// select which policy set it will return. It is kept for backward +// compatibility on keys with only one rotation policy. Please update +// to use the new GetPolicies or GetPolicy functions. +func (c *Client) GetPolicy(ctx context.Context, id string) (*Policy, error) { + policyresponse := Policies{} + + req, err := c.newRequest("GET", fmt.Sprintf("keys/%s/policies", id), nil) + if err != nil { + return nil, err + } + + _, err = c.do(ctx, req, &policyresponse) + if err != nil { + return nil, err + } + + return &policyresponse.Policies[0], nil +} + +// SetPolicy updates a policy resource by specifying the ID of the key and +// the rotation interval needed. This function is deprecated as it will only +// let you set key rotation policies. To set dual auth and other newer policies +// on a key, please use the new SetPolicies of SetPolicy functions. +func (c *Client) SetPolicy(ctx context.Context, id string, prefer PreferReturn, rotationInterval int) (*Policy, error) { + + policy := Policy{ + Type: policyType, + Rotation: &Rotation{ + Interval: rotationInterval, + }, + } + + policyRequest := Policies{ + Metadata: PoliciesMetadata{ + CollectionType: policyType, + NumberOfPolicies: 1, + }, + Policies: []Policy{policy}, + } + + policyresponse := Policies{} + + req, err := c.newRequest("PUT", fmt.Sprintf("keys/%s/policies", id), &policyRequest) + if err != nil { + return nil, err + } + + req.Header.Set("Prefer", preferHeaders[prefer]) + + _, err = c.do(ctx, req, &policyresponse) + if err != nil { + return nil, err + } + + return &policyresponse.Policies[0], nil +} + +// GetPolicies retrieves all policies details associated with a Key ID. +func (c *Client) GetPolicies(ctx context.Context, id string) ([]Policy, error) { + policyresponse := Policies{} + + req, err := c.newRequest("GET", fmt.Sprintf("keys/%s/policies", id), nil) + if err != nil { + return nil, err + } + + _, err = c.do(ctx, req, &policyresponse) + if err != nil { + return nil, err + } + + return policyresponse.Policies, nil +} + +func (c *Client) getPolicy(ctx context.Context, id, policyType string, policyresponse *Policies) error { + req, err := c.newRequest("GET", fmt.Sprintf("keys/%s/policies", id), nil) + if err != nil { + return err + } + + v := url.Values{} + v.Set("policy", policyType) + req.URL.RawQuery = v.Encode() + + _, err = c.do(ctx, req, &policyresponse) + if err != nil { + return err + } + return err +} + +// GetRotationPolivy method retrieves rotation policy details of a key +// For more information can refet the Key Protect docs in the link below: +// https://cloud.ibm.com/docs/key-protect?topic=key-protect-set-rotation-policy#view-rotation-policy-api +func (c *Client) GetRotationPolicy(ctx context.Context, id string) (*Policy, error) { + policyresponse := Policies{} + + err := c.getPolicy(ctx, id, RotationPolicy, &policyresponse) + if err != nil { + return nil, err + } + + if len(policyresponse.Policies) == 0 { + return nil, nil + } + + return &policyresponse.Policies[0], nil +} + +// GetDualAuthDeletePolicy method retrieves dual auth delete policy details of a key +// For more information can refer the Key Protect docs in the link below: +// https://cloud.ibm.com/docs/key-protect?topic=key-protect-set-dual-auth-key-policy#view-dual-auth-key-policy-api +func (c *Client) GetDualAuthDeletePolicy(ctx context.Context, id string) (*Policy, error) { + policyresponse := Policies{} + + err := c.getPolicy(ctx, id, DualAuthDelete, &policyresponse) + if err != nil { + return nil, err + } + + if len(policyresponse.Policies) == 0 { + return nil, nil + } + + return &policyresponse.Policies[0], nil +} + +func (c *Client) setPolicy(ctx context.Context, id, policyType string, policyRequest Policies) (*Policies, error) { + policyresponse := Policies{} + + req, err := c.newRequest("PUT", fmt.Sprintf("keys/%s/policies", id), &policyRequest) + if err != nil { + return nil, err + } + + v := url.Values{} + v.Set("policy", policyType) + req.URL.RawQuery = v.Encode() + + _, err = c.do(ctx, req, &policyresponse) + if err != nil { + return nil, err + } + return &policyresponse, nil +} + +// SetRotationPolicy updates the rotation policy associated with a key by specifying key ID and rotation interval. +// For more information can refer the Key Protect docs in the link below: +// https://cloud.ibm.com/docs/key-protect?topic=key-protect-set-rotation-policy#update-rotation-policy-api +func (c *Client) SetRotationPolicy(ctx context.Context, id string, rotationInterval int) (*Policy, error) { + policy := Policy{ + Type: policyType, + Rotation: &Rotation{ + Interval: rotationInterval, + }, + } + + policyRequest := Policies{ + Metadata: PoliciesMetadata{ + CollectionType: policyType, + NumberOfPolicies: 1, + }, + Policies: []Policy{policy}, + } + + policyresponse, err := c.setPolicy(ctx, id, RotationPolicy, policyRequest) + if err != nil { + return nil, err + } + + if len(policyresponse.Policies) == 0 { + return nil, nil + } + + return &policyresponse.Policies[0], nil +} + +// SetDualAuthDeletePolicy updates the dual auth delete policy by passing the key ID and enable detail +// For more information can refer the Key Protect docs in the link below: +// https://cloud.ibm.com/docs/key-protect?topic=key-protect-set-dual-auth-key-policy#create-dual-auth-key-policy-api +func (c *Client) SetDualAuthDeletePolicy(ctx context.Context, id string, enabled bool) (*Policy, error) { + policy := Policy{ + Type: policyType, + DualAuth: &DualAuth{ + Enabled: &enabled, + }, + } + + policyRequest := Policies{ + Metadata: PoliciesMetadata{ + CollectionType: policyType, + NumberOfPolicies: 1, + }, + Policies: []Policy{policy}, + } + + policyresponse, err := c.setPolicy(ctx, id, DualAuthDelete, policyRequest) + if err != nil { + return nil, err + } + + if len(policyresponse.Policies) == 0 { + return nil, nil + } + + return &policyresponse.Policies[0], nil +} + +// SetPolicies updates all policies of the key or a single policy by passing key ID. +// To set rotation policy for the key pass the setRotationPolicy parameter as true and set the rotationInterval detail. +// To set dual auth delete policy for the key pass the setDualAuthDeletePolicy parameter as true and set the dualAuthEnable detail. +// Both the policies can be set or either of the policies can be set. +func (c *Client) SetPolicies(ctx context.Context, id string, setRotationPolicy bool, rotationInterval int, setDualAuthDeletePolicy, dualAuthEnable bool) ([]Policy, error) { + policies := []Policy{} + if setRotationPolicy { + rotationPolicy := Policy{ + Type: policyType, + Rotation: &Rotation{ + Interval: rotationInterval, + }, + } + policies = append(policies, rotationPolicy) + } + if setDualAuthDeletePolicy { + dulaAuthPolicy := Policy{ + Type: policyType, + DualAuth: &DualAuth{ + Enabled: &dualAuthEnable, + }, + } + policies = append(policies, dulaAuthPolicy) + } + + policyRequest := Policies{ + Metadata: PoliciesMetadata{ + CollectionType: policyType, + NumberOfPolicies: len(policies), + }, + Policies: policies, + } + + policyresponse := Policies{} + + req, err := c.newRequest("PUT", fmt.Sprintf("keys/%s/policies", id), &policyRequest) + if err != nil { + return nil, err + } + _, err = c.do(ctx, req, &policyresponse) + if err != nil { + return nil, err + } + + return policyresponse.Policies, nil +} diff --git a/vendor/github.com/IBM/keyprotect-go-client/registrations.go b/vendor/github.com/IBM/keyprotect-go-client/registrations.go new file mode 100644 index 00000000000..04c0363ef8f --- /dev/null +++ b/vendor/github.com/IBM/keyprotect-go-client/registrations.go @@ -0,0 +1,69 @@ +// Copyright 2020 IBM Corp. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kp + +import ( + "context" + "fmt" + "net/url" + "time" +) + +// Registration represents the registration as returned by KP API +type Registration struct { + KeyID string `json:"keyId,omitempty"` + ResourceCrn string `json:"resourceCrn,omitempty"` + CreatedBy string `json:"createdBy,omitempty"` + CreationDate *time.Time `json:"creationDate,omitempty"` + UpdatedBy string `json:"updatedBy,omitempty"` + LastUpdateDate *time.Time `json:"lastUpdated,omitempty"` + Description string `json:"description,omitempty"` + PreventKeyDeletion bool `json:"preventKeyDeletion,omitempty"` + KeyVersion KeyVersion `json:"keyVersion,omitempty"` +} + +type registrations struct { + Metadata KeysMetadata `json:"metadata"` + Registrations []Registration `json:"resources"` +} + +// ListRegistrations retrieves a collection of registrations +func (c *Client) ListRegistrations(ctx context.Context, keyId, crn string) (*registrations, error) { + registrationAPI := "" + if keyId != "" { + registrationAPI = fmt.Sprintf("keys/%s/registrations", keyId) + } else { + registrationAPI = "keys/registrations" + } + + req, err := c.newRequest("GET", registrationAPI, nil) + if err != nil { + return nil, err + } + + if crn != "" { + v := url.Values{} + v.Set("urlEncodedResourceCRNQuery", crn) + req.URL.RawQuery = v.Encode() + } + + regs := registrations{} + _, err = c.do(ctx, req, ®s) + if err != nil { + return nil, err + } + + return ®s, nil +} diff --git a/vendor/github.com/IBM/networking-go-sdk/cachingapiv1/caching_api_v1.go b/vendor/github.com/IBM/networking-go-sdk/cachingapiv1/caching_api_v1.go new file mode 100644 index 00000000000..e0bf3c5ae66 --- /dev/null +++ b/vendor/github.com/IBM/networking-go-sdk/cachingapiv1/caching_api_v1.go @@ -0,0 +1,1826 @@ +/** + * (C) Copyright IBM Corp. 2021. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * IBM OpenAPI SDK Code Generator Version: 3.26.0-4b317b0c-20210127-171701 + */ + + +// Package cachingapiv1 : Operations and models for the CachingApiV1 service +package cachingapiv1 + +import ( + "context" + "encoding/json" + "fmt" + "github.com/IBM/go-sdk-core/v5/core" + common "github.com/IBM/networking-go-sdk/common" + "net/http" + "reflect" + "time" +) + +// CachingApiV1 : This document describes CIS caching API. +// +// Version: 1.0.0 +type CachingApiV1 struct { + Service *core.BaseService + + // cloud resource name. + Crn *string + + // zone id. + ZoneID *string +} + +// DefaultServiceURL is the default URL to make service requests to. +const DefaultServiceURL = "https://api.cis.cloud.ibm.com" + +// DefaultServiceName is the default key used to find external configuration information. +const DefaultServiceName = "caching_api" + +// CachingApiV1Options : Service options +type CachingApiV1Options struct { + ServiceName string + URL string + Authenticator core.Authenticator + + // cloud resource name. + Crn *string `validate:"required"` + + // zone id. + ZoneID *string `validate:"required"` +} + +// NewCachingApiV1UsingExternalConfig : constructs an instance of CachingApiV1 with passed in options and external configuration. +func NewCachingApiV1UsingExternalConfig(options *CachingApiV1Options) (cachingApi *CachingApiV1, err error) { + if options.ServiceName == "" { + options.ServiceName = DefaultServiceName + } + + if options.Authenticator == nil { + options.Authenticator, err = core.GetAuthenticatorFromEnvironment(options.ServiceName) + if err != nil { + return + } + } + + cachingApi, err = NewCachingApiV1(options) + if err != nil { + return + } + + err = cachingApi.Service.ConfigureService(options.ServiceName) + if err != nil { + return + } + + if options.URL != "" { + err = cachingApi.Service.SetServiceURL(options.URL) + } + return +} + +// NewCachingApiV1 : constructs an instance of CachingApiV1 with passed in options. +func NewCachingApiV1(options *CachingApiV1Options) (service *CachingApiV1, err error) { + serviceOptions := &core.ServiceOptions{ + URL: DefaultServiceURL, + Authenticator: options.Authenticator, + } + + err = core.ValidateStruct(options, "options") + if err != nil { + return + } + + baseService, err := core.NewBaseService(serviceOptions) + if err != nil { + return + } + + if options.URL != "" { + err = baseService.SetServiceURL(options.URL) + if err != nil { + return + } + } + + service = &CachingApiV1{ + Service: baseService, + Crn: options.Crn, + ZoneID: options.ZoneID, + } + + return +} + +// GetServiceURLForRegion returns the service URL to be used for the specified region +func GetServiceURLForRegion(region string) (string, error) { + return "", fmt.Errorf("service does not support regional URLs") +} + +// Clone makes a copy of "cachingApi" suitable for processing requests. +func (cachingApi *CachingApiV1) Clone() *CachingApiV1 { + if core.IsNil(cachingApi) { + return nil + } + clone := *cachingApi + clone.Service = cachingApi.Service.Clone() + return &clone +} + +// SetServiceURL sets the service URL +func (cachingApi *CachingApiV1) SetServiceURL(url string) error { + return cachingApi.Service.SetServiceURL(url) +} + +// GetServiceURL returns the service URL +func (cachingApi *CachingApiV1) GetServiceURL() string { + return cachingApi.Service.GetServiceURL() +} + +// SetDefaultHeaders sets HTTP headers to be sent in every request +func (cachingApi *CachingApiV1) SetDefaultHeaders(headers http.Header) { + cachingApi.Service.SetDefaultHeaders(headers) +} + +// SetEnableGzipCompression sets the service's EnableGzipCompression field +func (cachingApi *CachingApiV1) SetEnableGzipCompression(enableGzip bool) { + cachingApi.Service.SetEnableGzipCompression(enableGzip) +} + +// GetEnableGzipCompression returns the service's EnableGzipCompression field +func (cachingApi *CachingApiV1) GetEnableGzipCompression() bool { + return cachingApi.Service.GetEnableGzipCompression() +} + +// EnableRetries enables automatic retries for requests invoked for this service instance. +// If either parameter is specified as 0, then a default value is used instead. +func (cachingApi *CachingApiV1) EnableRetries(maxRetries int, maxRetryInterval time.Duration) { + cachingApi.Service.EnableRetries(maxRetries, maxRetryInterval) +} + +// DisableRetries disables automatic retries for requests invoked for this service instance. +func (cachingApi *CachingApiV1) DisableRetries() { + cachingApi.Service.DisableRetries() +} + +// PurgeAll : Purge all +// All resources in CDN edge servers' cache should be removed. This may have dramatic affects on your origin server load +// after performing this action. +func (cachingApi *CachingApiV1) PurgeAll(purgeAllOptions *PurgeAllOptions) (result *PurgeAllResponse, response *core.DetailedResponse, err error) { + return cachingApi.PurgeAllWithContext(context.Background(), purgeAllOptions) +} + +// PurgeAllWithContext is an alternate form of the PurgeAll method which supports a Context parameter +func (cachingApi *CachingApiV1) PurgeAllWithContext(ctx context.Context, purgeAllOptions *PurgeAllOptions) (result *PurgeAllResponse, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(purgeAllOptions, "purgeAllOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *cachingApi.Crn, + "zone_id": *cachingApi.ZoneID, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = cachingApi.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(cachingApi.Service.Options.URL, `/v1/{crn}/zones/{zone_id}/purge_cache/purge_all`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range purgeAllOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("caching_api", "V1", "PurgeAll") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = cachingApi.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalPurgeAllResponse) + if err != nil { + return + } + response.Result = result + + return +} + +// PurgeByUrls : Purge URLs +// Granularly remove one or more files from CDN edge servers' cache either by specifying URLs. +func (cachingApi *CachingApiV1) PurgeByUrls(purgeByUrlsOptions *PurgeByUrlsOptions) (result *PurgeAllResponse, response *core.DetailedResponse, err error) { + return cachingApi.PurgeByUrlsWithContext(context.Background(), purgeByUrlsOptions) +} + +// PurgeByUrlsWithContext is an alternate form of the PurgeByUrls method which supports a Context parameter +func (cachingApi *CachingApiV1) PurgeByUrlsWithContext(ctx context.Context, purgeByUrlsOptions *PurgeByUrlsOptions) (result *PurgeAllResponse, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(purgeByUrlsOptions, "purgeByUrlsOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *cachingApi.Crn, + "zone_id": *cachingApi.ZoneID, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = cachingApi.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(cachingApi.Service.Options.URL, `/v1/{crn}/zones/{zone_id}/purge_cache/purge_by_urls`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range purgeByUrlsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("caching_api", "V1", "PurgeByUrls") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if purgeByUrlsOptions.Files != nil { + body["files"] = purgeByUrlsOptions.Files + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = cachingApi.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalPurgeAllResponse) + if err != nil { + return + } + response.Result = result + + return +} + +// PurgeByCacheTags : Purge Cache-Tags +// Granularly remove one or more files from CDN edge servers' cache either by specifying the associated Cache-Tags. +func (cachingApi *CachingApiV1) PurgeByCacheTags(purgeByCacheTagsOptions *PurgeByCacheTagsOptions) (result *PurgeAllResponse, response *core.DetailedResponse, err error) { + return cachingApi.PurgeByCacheTagsWithContext(context.Background(), purgeByCacheTagsOptions) +} + +// PurgeByCacheTagsWithContext is an alternate form of the PurgeByCacheTags method which supports a Context parameter +func (cachingApi *CachingApiV1) PurgeByCacheTagsWithContext(ctx context.Context, purgeByCacheTagsOptions *PurgeByCacheTagsOptions) (result *PurgeAllResponse, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(purgeByCacheTagsOptions, "purgeByCacheTagsOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *cachingApi.Crn, + "zone_id": *cachingApi.ZoneID, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = cachingApi.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(cachingApi.Service.Options.URL, `/v1/{crn}/zones/{zone_id}/purge_cache/purge_by_cache_tags`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range purgeByCacheTagsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("caching_api", "V1", "PurgeByCacheTags") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if purgeByCacheTagsOptions.Tags != nil { + body["tags"] = purgeByCacheTagsOptions.Tags + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = cachingApi.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalPurgeAllResponse) + if err != nil { + return + } + response.Result = result + + return +} + +// PurgeByHosts : Purge host names +// Granularly remove one or more files from CDN edge servers' cache either by specifying the hostnames. +func (cachingApi *CachingApiV1) PurgeByHosts(purgeByHostsOptions *PurgeByHostsOptions) (result *PurgeAllResponse, response *core.DetailedResponse, err error) { + return cachingApi.PurgeByHostsWithContext(context.Background(), purgeByHostsOptions) +} + +// PurgeByHostsWithContext is an alternate form of the PurgeByHosts method which supports a Context parameter +func (cachingApi *CachingApiV1) PurgeByHostsWithContext(ctx context.Context, purgeByHostsOptions *PurgeByHostsOptions) (result *PurgeAllResponse, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(purgeByHostsOptions, "purgeByHostsOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *cachingApi.Crn, + "zone_id": *cachingApi.ZoneID, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = cachingApi.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(cachingApi.Service.Options.URL, `/v1/{crn}/zones/{zone_id}/purge_cache/purge_by_hosts`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range purgeByHostsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("caching_api", "V1", "PurgeByHosts") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if purgeByHostsOptions.Hosts != nil { + body["hosts"] = purgeByHostsOptions.Hosts + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = cachingApi.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalPurgeAllResponse) + if err != nil { + return + } + response.Result = result + + return +} + +// GetBrowserCacheTTL : Get browser cache TTL setting +// Browser Cache TTL (in seconds) specifies how long CDN edge servers cached resources will remain on your visitors' +// computers. +func (cachingApi *CachingApiV1) GetBrowserCacheTTL(getBrowserCacheTtlOptions *GetBrowserCacheTtlOptions) (result *BrowserTTLResponse, response *core.DetailedResponse, err error) { + return cachingApi.GetBrowserCacheTTLWithContext(context.Background(), getBrowserCacheTtlOptions) +} + +// GetBrowserCacheTTLWithContext is an alternate form of the GetBrowserCacheTTL method which supports a Context parameter +func (cachingApi *CachingApiV1) GetBrowserCacheTTLWithContext(ctx context.Context, getBrowserCacheTtlOptions *GetBrowserCacheTtlOptions) (result *BrowserTTLResponse, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(getBrowserCacheTtlOptions, "getBrowserCacheTtlOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *cachingApi.Crn, + "zone_id": *cachingApi.ZoneID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = cachingApi.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(cachingApi.Service.Options.URL, `/v1/{crn}/zones/{zone_id}/settings/browser_cache_ttl`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getBrowserCacheTtlOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("caching_api", "V1", "GetBrowserCacheTTL") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = cachingApi.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalBrowserTTLResponse) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateBrowserCacheTTL : Change browser cache TTL setting +// Browser Cache TTL (in seconds) specifies how long CDN edge servers cached resources will remain on your visitors' +// computers. +func (cachingApi *CachingApiV1) UpdateBrowserCacheTTL(updateBrowserCacheTtlOptions *UpdateBrowserCacheTtlOptions) (result *BrowserTTLResponse, response *core.DetailedResponse, err error) { + return cachingApi.UpdateBrowserCacheTTLWithContext(context.Background(), updateBrowserCacheTtlOptions) +} + +// UpdateBrowserCacheTTLWithContext is an alternate form of the UpdateBrowserCacheTTL method which supports a Context parameter +func (cachingApi *CachingApiV1) UpdateBrowserCacheTTLWithContext(ctx context.Context, updateBrowserCacheTtlOptions *UpdateBrowserCacheTtlOptions) (result *BrowserTTLResponse, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(updateBrowserCacheTtlOptions, "updateBrowserCacheTtlOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *cachingApi.Crn, + "zone_id": *cachingApi.ZoneID, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = cachingApi.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(cachingApi.Service.Options.URL, `/v1/{crn}/zones/{zone_id}/settings/browser_cache_ttl`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateBrowserCacheTtlOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("caching_api", "V1", "UpdateBrowserCacheTTL") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if updateBrowserCacheTtlOptions.Value != nil { + body["value"] = updateBrowserCacheTtlOptions.Value + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = cachingApi.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalBrowserTTLResponse) + if err != nil { + return + } + response.Result = result + + return +} + +// GetServeStaleContent : Get Serve Stale Content setting +// When enabled, Serve Stale Content will serve pages from CDN edge servers' cache if your server is offline. +func (cachingApi *CachingApiV1) GetServeStaleContent(getServeStaleContentOptions *GetServeStaleContentOptions) (result *ServeStaleContentResponse, response *core.DetailedResponse, err error) { + return cachingApi.GetServeStaleContentWithContext(context.Background(), getServeStaleContentOptions) +} + +// GetServeStaleContentWithContext is an alternate form of the GetServeStaleContent method which supports a Context parameter +func (cachingApi *CachingApiV1) GetServeStaleContentWithContext(ctx context.Context, getServeStaleContentOptions *GetServeStaleContentOptions) (result *ServeStaleContentResponse, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(getServeStaleContentOptions, "getServeStaleContentOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *cachingApi.Crn, + "zone_id": *cachingApi.ZoneID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = cachingApi.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(cachingApi.Service.Options.URL, `/v1/{crn}/zones/{zone_id}/settings/always_online`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getServeStaleContentOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("caching_api", "V1", "GetServeStaleContent") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = cachingApi.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalServeStaleContentResponse) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateServeStaleContent : Change Serve Stale Content setting +// When enabled, Serve Stale Content will serve pages from CDN edge servers' cache if your server is offline. +func (cachingApi *CachingApiV1) UpdateServeStaleContent(updateServeStaleContentOptions *UpdateServeStaleContentOptions) (result *ServeStaleContentResponse, response *core.DetailedResponse, err error) { + return cachingApi.UpdateServeStaleContentWithContext(context.Background(), updateServeStaleContentOptions) +} + +// UpdateServeStaleContentWithContext is an alternate form of the UpdateServeStaleContent method which supports a Context parameter +func (cachingApi *CachingApiV1) UpdateServeStaleContentWithContext(ctx context.Context, updateServeStaleContentOptions *UpdateServeStaleContentOptions) (result *ServeStaleContentResponse, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(updateServeStaleContentOptions, "updateServeStaleContentOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *cachingApi.Crn, + "zone_id": *cachingApi.ZoneID, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = cachingApi.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(cachingApi.Service.Options.URL, `/v1/{crn}/zones/{zone_id}/settings/always_online`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateServeStaleContentOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("caching_api", "V1", "UpdateServeStaleContent") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if updateServeStaleContentOptions.Value != nil { + body["value"] = updateServeStaleContentOptions.Value + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = cachingApi.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalServeStaleContentResponse) + if err != nil { + return + } + response.Result = result + + return +} + +// GetDevelopmentMode : Get development mode setting +// Get development mode setting. +func (cachingApi *CachingApiV1) GetDevelopmentMode(getDevelopmentModeOptions *GetDevelopmentModeOptions) (result *DeveopmentModeResponse, response *core.DetailedResponse, err error) { + return cachingApi.GetDevelopmentModeWithContext(context.Background(), getDevelopmentModeOptions) +} + +// GetDevelopmentModeWithContext is an alternate form of the GetDevelopmentMode method which supports a Context parameter +func (cachingApi *CachingApiV1) GetDevelopmentModeWithContext(ctx context.Context, getDevelopmentModeOptions *GetDevelopmentModeOptions) (result *DeveopmentModeResponse, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(getDevelopmentModeOptions, "getDevelopmentModeOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *cachingApi.Crn, + "zone_id": *cachingApi.ZoneID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = cachingApi.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(cachingApi.Service.Options.URL, `/v1/{crn}/zones/{zone_id}/settings/development_mode`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getDevelopmentModeOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("caching_api", "V1", "GetDevelopmentMode") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = cachingApi.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalDeveopmentModeResponse) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateDevelopmentMode : Change development mode setting +// Change development mode setting. +func (cachingApi *CachingApiV1) UpdateDevelopmentMode(updateDevelopmentModeOptions *UpdateDevelopmentModeOptions) (result *DeveopmentModeResponse, response *core.DetailedResponse, err error) { + return cachingApi.UpdateDevelopmentModeWithContext(context.Background(), updateDevelopmentModeOptions) +} + +// UpdateDevelopmentModeWithContext is an alternate form of the UpdateDevelopmentMode method which supports a Context parameter +func (cachingApi *CachingApiV1) UpdateDevelopmentModeWithContext(ctx context.Context, updateDevelopmentModeOptions *UpdateDevelopmentModeOptions) (result *DeveopmentModeResponse, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(updateDevelopmentModeOptions, "updateDevelopmentModeOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *cachingApi.Crn, + "zone_id": *cachingApi.ZoneID, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = cachingApi.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(cachingApi.Service.Options.URL, `/v1/{crn}/zones/{zone_id}/settings/development_mode`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateDevelopmentModeOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("caching_api", "V1", "UpdateDevelopmentMode") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if updateDevelopmentModeOptions.Value != nil { + body["value"] = updateDevelopmentModeOptions.Value + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = cachingApi.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalDeveopmentModeResponse) + if err != nil { + return + } + response.Result = result + + return +} + +// GetQueryStringSort : Get Enable Query String Sort setting +// Get Enable Query String Sort setting. +func (cachingApi *CachingApiV1) GetQueryStringSort(getQueryStringSortOptions *GetQueryStringSortOptions) (result *EnableQueryStringSortResponse, response *core.DetailedResponse, err error) { + return cachingApi.GetQueryStringSortWithContext(context.Background(), getQueryStringSortOptions) +} + +// GetQueryStringSortWithContext is an alternate form of the GetQueryStringSort method which supports a Context parameter +func (cachingApi *CachingApiV1) GetQueryStringSortWithContext(ctx context.Context, getQueryStringSortOptions *GetQueryStringSortOptions) (result *EnableQueryStringSortResponse, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(getQueryStringSortOptions, "getQueryStringSortOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *cachingApi.Crn, + "zone_id": *cachingApi.ZoneID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = cachingApi.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(cachingApi.Service.Options.URL, `/v1/{crn}/zones/{zone_id}/settings/sort_query_string_for_cache`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getQueryStringSortOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("caching_api", "V1", "GetQueryStringSort") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = cachingApi.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalEnableQueryStringSortResponse) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateQueryStringSort : Change Enable Query String Sort setting +// Change Enable Query String Sort setting. +func (cachingApi *CachingApiV1) UpdateQueryStringSort(updateQueryStringSortOptions *UpdateQueryStringSortOptions) (result *EnableQueryStringSortResponse, response *core.DetailedResponse, err error) { + return cachingApi.UpdateQueryStringSortWithContext(context.Background(), updateQueryStringSortOptions) +} + +// UpdateQueryStringSortWithContext is an alternate form of the UpdateQueryStringSort method which supports a Context parameter +func (cachingApi *CachingApiV1) UpdateQueryStringSortWithContext(ctx context.Context, updateQueryStringSortOptions *UpdateQueryStringSortOptions) (result *EnableQueryStringSortResponse, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(updateQueryStringSortOptions, "updateQueryStringSortOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *cachingApi.Crn, + "zone_id": *cachingApi.ZoneID, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = cachingApi.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(cachingApi.Service.Options.URL, `/v1/{crn}/zones/{zone_id}/settings/sort_query_string_for_cache`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateQueryStringSortOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("caching_api", "V1", "UpdateQueryStringSort") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if updateQueryStringSortOptions.Value != nil { + body["value"] = updateQueryStringSortOptions.Value + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = cachingApi.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalEnableQueryStringSortResponse) + if err != nil { + return + } + response.Result = result + + return +} + +// GetCacheLevel : Get cache level setting +// Get cache level setting of a specific zone. +func (cachingApi *CachingApiV1) GetCacheLevel(getCacheLevelOptions *GetCacheLevelOptions) (result *CacheLevelResponse, response *core.DetailedResponse, err error) { + return cachingApi.GetCacheLevelWithContext(context.Background(), getCacheLevelOptions) +} + +// GetCacheLevelWithContext is an alternate form of the GetCacheLevel method which supports a Context parameter +func (cachingApi *CachingApiV1) GetCacheLevelWithContext(ctx context.Context, getCacheLevelOptions *GetCacheLevelOptions) (result *CacheLevelResponse, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(getCacheLevelOptions, "getCacheLevelOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *cachingApi.Crn, + "zone_id": *cachingApi.ZoneID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = cachingApi.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(cachingApi.Service.Options.URL, `/v1/{crn}/zones/{zone_id}/settings/cache_level`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getCacheLevelOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("caching_api", "V1", "GetCacheLevel") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = cachingApi.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalCacheLevelResponse) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateCacheLevel : Set cache level setting +// The `basic` setting will cache most static resources (i.e., css, images, and JavaScript). The `simplified` setting +// will ignore the query string when delivering a cached resource. The `aggressive` setting will cache all static +// resources, including ones with a query string. +func (cachingApi *CachingApiV1) UpdateCacheLevel(updateCacheLevelOptions *UpdateCacheLevelOptions) (result *CacheLevelResponse, response *core.DetailedResponse, err error) { + return cachingApi.UpdateCacheLevelWithContext(context.Background(), updateCacheLevelOptions) +} + +// UpdateCacheLevelWithContext is an alternate form of the UpdateCacheLevel method which supports a Context parameter +func (cachingApi *CachingApiV1) UpdateCacheLevelWithContext(ctx context.Context, updateCacheLevelOptions *UpdateCacheLevelOptions) (result *CacheLevelResponse, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(updateCacheLevelOptions, "updateCacheLevelOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *cachingApi.Crn, + "zone_id": *cachingApi.ZoneID, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = cachingApi.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(cachingApi.Service.Options.URL, `/v1/{crn}/zones/{zone_id}/settings/cache_level`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateCacheLevelOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("caching_api", "V1", "UpdateCacheLevel") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if updateCacheLevelOptions.Value != nil { + body["value"] = updateCacheLevelOptions.Value + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = cachingApi.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalCacheLevelResponse) + if err != nil { + return + } + response.Result = result + + return +} + +// BrowserTTLResponseResult : result object. +type BrowserTTLResponseResult struct { + // ttl type. + ID *string `json:"id,omitempty"` + + // ttl value. + Value *int64 `json:"value,omitempty"` + + // editable. + Editable *bool `json:"editable,omitempty"` + + // modified date. + ModifiedOn *string `json:"modified_on,omitempty"` +} + + +// UnmarshalBrowserTTLResponseResult unmarshals an instance of BrowserTTLResponseResult from the specified map of raw messages. +func UnmarshalBrowserTTLResponseResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(BrowserTTLResponseResult) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "editable", &obj.Editable) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "modified_on", &obj.ModifiedOn) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// CacheLevelResponseResult : result. +type CacheLevelResponseResult struct { + // cache level. + ID *string `json:"id,omitempty"` + + // cache level. + Value *string `json:"value,omitempty"` + + // editable value. + Editable *bool `json:"editable,omitempty"` + + // modified date. + ModifiedOn *string `json:"modified_on,omitempty"` +} + + +// UnmarshalCacheLevelResponseResult unmarshals an instance of CacheLevelResponseResult from the specified map of raw messages. +func UnmarshalCacheLevelResponseResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(CacheLevelResponseResult) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "editable", &obj.Editable) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "modified_on", &obj.ModifiedOn) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// DeveopmentModeResponseResult : result object. +type DeveopmentModeResponseResult struct { + // object id. + ID *string `json:"id,omitempty"` + + // on/off value. + Value *string `json:"value,omitempty"` + + // editable value. + Editable *bool `json:"editable,omitempty"` + + // modified date. + ModifiedOn *string `json:"modified_on,omitempty"` +} + + +// UnmarshalDeveopmentModeResponseResult unmarshals an instance of DeveopmentModeResponseResult from the specified map of raw messages. +func UnmarshalDeveopmentModeResponseResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(DeveopmentModeResponseResult) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "editable", &obj.Editable) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "modified_on", &obj.ModifiedOn) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// EnableQueryStringSortResponseResult : result of sort query string. +type EnableQueryStringSortResponseResult struct { + // cache id. + ID *string `json:"id,omitempty"` + + // on/off value. + Value *string `json:"value,omitempty"` + + // editable propery. + Editable *bool `json:"editable,omitempty"` + + // modified date. + ModifiedOn *string `json:"modified_on,omitempty"` +} + + +// UnmarshalEnableQueryStringSortResponseResult unmarshals an instance of EnableQueryStringSortResponseResult from the specified map of raw messages. +func UnmarshalEnableQueryStringSortResponseResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(EnableQueryStringSortResponseResult) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "editable", &obj.Editable) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "modified_on", &obj.ModifiedOn) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// GetBrowserCacheTtlOptions : The GetBrowserCacheTTL options. +type GetBrowserCacheTtlOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetBrowserCacheTtlOptions : Instantiate GetBrowserCacheTtlOptions +func (*CachingApiV1) NewGetBrowserCacheTtlOptions() *GetBrowserCacheTtlOptions { + return &GetBrowserCacheTtlOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *GetBrowserCacheTtlOptions) SetHeaders(param map[string]string) *GetBrowserCacheTtlOptions { + options.Headers = param + return options +} + +// GetCacheLevelOptions : The GetCacheLevel options. +type GetCacheLevelOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetCacheLevelOptions : Instantiate GetCacheLevelOptions +func (*CachingApiV1) NewGetCacheLevelOptions() *GetCacheLevelOptions { + return &GetCacheLevelOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *GetCacheLevelOptions) SetHeaders(param map[string]string) *GetCacheLevelOptions { + options.Headers = param + return options +} + +// GetDevelopmentModeOptions : The GetDevelopmentMode options. +type GetDevelopmentModeOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetDevelopmentModeOptions : Instantiate GetDevelopmentModeOptions +func (*CachingApiV1) NewGetDevelopmentModeOptions() *GetDevelopmentModeOptions { + return &GetDevelopmentModeOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *GetDevelopmentModeOptions) SetHeaders(param map[string]string) *GetDevelopmentModeOptions { + options.Headers = param + return options +} + +// GetQueryStringSortOptions : The GetQueryStringSort options. +type GetQueryStringSortOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetQueryStringSortOptions : Instantiate GetQueryStringSortOptions +func (*CachingApiV1) NewGetQueryStringSortOptions() *GetQueryStringSortOptions { + return &GetQueryStringSortOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *GetQueryStringSortOptions) SetHeaders(param map[string]string) *GetQueryStringSortOptions { + options.Headers = param + return options +} + +// GetServeStaleContentOptions : The GetServeStaleContent options. +type GetServeStaleContentOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetServeStaleContentOptions : Instantiate GetServeStaleContentOptions +func (*CachingApiV1) NewGetServeStaleContentOptions() *GetServeStaleContentOptions { + return &GetServeStaleContentOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *GetServeStaleContentOptions) SetHeaders(param map[string]string) *GetServeStaleContentOptions { + options.Headers = param + return options +} + +// PurgeAllOptions : The PurgeAll options. +type PurgeAllOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewPurgeAllOptions : Instantiate PurgeAllOptions +func (*CachingApiV1) NewPurgeAllOptions() *PurgeAllOptions { + return &PurgeAllOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *PurgeAllOptions) SetHeaders(param map[string]string) *PurgeAllOptions { + options.Headers = param + return options +} + +// PurgeAllResponseResult : purge object. +type PurgeAllResponseResult struct { + // purge id. + ID *string `json:"id,omitempty"` +} + + +// UnmarshalPurgeAllResponseResult unmarshals an instance of PurgeAllResponseResult from the specified map of raw messages. +func UnmarshalPurgeAllResponseResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(PurgeAllResponseResult) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// PurgeByCacheTagsOptions : The PurgeByCacheTags options. +type PurgeByCacheTagsOptions struct { + // array of tags. + Tags []string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewPurgeByCacheTagsOptions : Instantiate PurgeByCacheTagsOptions +func (*CachingApiV1) NewPurgeByCacheTagsOptions() *PurgeByCacheTagsOptions { + return &PurgeByCacheTagsOptions{} +} + +// SetTags : Allow user to set Tags +func (options *PurgeByCacheTagsOptions) SetTags(tags []string) *PurgeByCacheTagsOptions { + options.Tags = tags + return options +} + +// SetHeaders : Allow user to set Headers +func (options *PurgeByCacheTagsOptions) SetHeaders(param map[string]string) *PurgeByCacheTagsOptions { + options.Headers = param + return options +} + +// PurgeByHostsOptions : The PurgeByHosts options. +type PurgeByHostsOptions struct { + // hosts name. + Hosts []string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewPurgeByHostsOptions : Instantiate PurgeByHostsOptions +func (*CachingApiV1) NewPurgeByHostsOptions() *PurgeByHostsOptions { + return &PurgeByHostsOptions{} +} + +// SetHosts : Allow user to set Hosts +func (options *PurgeByHostsOptions) SetHosts(hosts []string) *PurgeByHostsOptions { + options.Hosts = hosts + return options +} + +// SetHeaders : Allow user to set Headers +func (options *PurgeByHostsOptions) SetHeaders(param map[string]string) *PurgeByHostsOptions { + options.Headers = param + return options +} + +// PurgeByUrlsOptions : The PurgeByUrls options. +type PurgeByUrlsOptions struct { + // purge url array. + Files []string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewPurgeByUrlsOptions : Instantiate PurgeByUrlsOptions +func (*CachingApiV1) NewPurgeByUrlsOptions() *PurgeByUrlsOptions { + return &PurgeByUrlsOptions{} +} + +// SetFiles : Allow user to set Files +func (options *PurgeByUrlsOptions) SetFiles(files []string) *PurgeByUrlsOptions { + options.Files = files + return options +} + +// SetHeaders : Allow user to set Headers +func (options *PurgeByUrlsOptions) SetHeaders(param map[string]string) *PurgeByUrlsOptions { + options.Headers = param + return options +} + +// ServeStaleContentResponseResult : result object. +type ServeStaleContentResponseResult struct { + // serve stale content cache id. + ID *string `json:"id,omitempty"` + + // on/off value. + Value *string `json:"value,omitempty"` + + // editable value. + Editable *bool `json:"editable,omitempty"` + + // modified date. + ModifiedOn *string `json:"modified_on,omitempty"` +} + + +// UnmarshalServeStaleContentResponseResult unmarshals an instance of ServeStaleContentResponseResult from the specified map of raw messages. +func UnmarshalServeStaleContentResponseResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ServeStaleContentResponseResult) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "editable", &obj.Editable) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "modified_on", &obj.ModifiedOn) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// UpdateBrowserCacheTtlOptions : The UpdateBrowserCacheTTL options. +type UpdateBrowserCacheTtlOptions struct { + // ttl value. + Value *int64 + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdateBrowserCacheTtlOptions : Instantiate UpdateBrowserCacheTtlOptions +func (*CachingApiV1) NewUpdateBrowserCacheTtlOptions() *UpdateBrowserCacheTtlOptions { + return &UpdateBrowserCacheTtlOptions{} +} + +// SetValue : Allow user to set Value +func (options *UpdateBrowserCacheTtlOptions) SetValue(value int64) *UpdateBrowserCacheTtlOptions { + options.Value = core.Int64Ptr(value) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateBrowserCacheTtlOptions) SetHeaders(param map[string]string) *UpdateBrowserCacheTtlOptions { + options.Headers = param + return options +} + +// UpdateCacheLevelOptions : The UpdateCacheLevel options. +type UpdateCacheLevelOptions struct { + // cache level. + Value *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the UpdateCacheLevelOptions.Value property. +// cache level. +const ( + UpdateCacheLevelOptions_Value_Aggressive = "aggressive" + UpdateCacheLevelOptions_Value_Basic = "basic" + UpdateCacheLevelOptions_Value_Simplified = "simplified" +) + +// NewUpdateCacheLevelOptions : Instantiate UpdateCacheLevelOptions +func (*CachingApiV1) NewUpdateCacheLevelOptions() *UpdateCacheLevelOptions { + return &UpdateCacheLevelOptions{} +} + +// SetValue : Allow user to set Value +func (options *UpdateCacheLevelOptions) SetValue(value string) *UpdateCacheLevelOptions { + options.Value = core.StringPtr(value) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateCacheLevelOptions) SetHeaders(param map[string]string) *UpdateCacheLevelOptions { + options.Headers = param + return options +} + +// UpdateDevelopmentModeOptions : The UpdateDevelopmentMode options. +type UpdateDevelopmentModeOptions struct { + // on/off value. + Value *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the UpdateDevelopmentModeOptions.Value property. +// on/off value. +const ( + UpdateDevelopmentModeOptions_Value_Off = "off" + UpdateDevelopmentModeOptions_Value_On = "on" +) + +// NewUpdateDevelopmentModeOptions : Instantiate UpdateDevelopmentModeOptions +func (*CachingApiV1) NewUpdateDevelopmentModeOptions() *UpdateDevelopmentModeOptions { + return &UpdateDevelopmentModeOptions{} +} + +// SetValue : Allow user to set Value +func (options *UpdateDevelopmentModeOptions) SetValue(value string) *UpdateDevelopmentModeOptions { + options.Value = core.StringPtr(value) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateDevelopmentModeOptions) SetHeaders(param map[string]string) *UpdateDevelopmentModeOptions { + options.Headers = param + return options +} + +// UpdateQueryStringSortOptions : The UpdateQueryStringSort options. +type UpdateQueryStringSortOptions struct { + // on/off property value. + Value *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the UpdateQueryStringSortOptions.Value property. +// on/off property value. +const ( + UpdateQueryStringSortOptions_Value_Off = "off" + UpdateQueryStringSortOptions_Value_On = "on" +) + +// NewUpdateQueryStringSortOptions : Instantiate UpdateQueryStringSortOptions +func (*CachingApiV1) NewUpdateQueryStringSortOptions() *UpdateQueryStringSortOptions { + return &UpdateQueryStringSortOptions{} +} + +// SetValue : Allow user to set Value +func (options *UpdateQueryStringSortOptions) SetValue(value string) *UpdateQueryStringSortOptions { + options.Value = core.StringPtr(value) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateQueryStringSortOptions) SetHeaders(param map[string]string) *UpdateQueryStringSortOptions { + options.Headers = param + return options +} + +// UpdateServeStaleContentOptions : The UpdateServeStaleContent options. +type UpdateServeStaleContentOptions struct { + // on/off value. + Value *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the UpdateServeStaleContentOptions.Value property. +// on/off value. +const ( + UpdateServeStaleContentOptions_Value_Off = "off" + UpdateServeStaleContentOptions_Value_On = "on" +) + +// NewUpdateServeStaleContentOptions : Instantiate UpdateServeStaleContentOptions +func (*CachingApiV1) NewUpdateServeStaleContentOptions() *UpdateServeStaleContentOptions { + return &UpdateServeStaleContentOptions{} +} + +// SetValue : Allow user to set Value +func (options *UpdateServeStaleContentOptions) SetValue(value string) *UpdateServeStaleContentOptions { + options.Value = core.StringPtr(value) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateServeStaleContentOptions) SetHeaders(param map[string]string) *UpdateServeStaleContentOptions { + options.Headers = param + return options +} + +// BrowserTTLResponse : browser ttl response. +type BrowserTTLResponse struct { + // success response. + Success *bool `json:"success" validate:"required"` + + // errors. + Errors [][]string `json:"errors" validate:"required"` + + // messages. + Messages [][]string `json:"messages" validate:"required"` + + // result object. + Result *BrowserTTLResponseResult `json:"result" validate:"required"` +} + + +// UnmarshalBrowserTTLResponse unmarshals an instance of BrowserTTLResponse from the specified map of raw messages. +func UnmarshalBrowserTTLResponse(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(BrowserTTLResponse) + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalBrowserTTLResponseResult) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// CacheLevelResponse : cache level response. +type CacheLevelResponse struct { + // success response. + Success *bool `json:"success" validate:"required"` + + // errors. + Errors [][]string `json:"errors" validate:"required"` + + // messages. + Messages [][]string `json:"messages" validate:"required"` + + // result. + Result *CacheLevelResponseResult `json:"result" validate:"required"` +} + + +// UnmarshalCacheLevelResponse unmarshals an instance of CacheLevelResponse from the specified map of raw messages. +func UnmarshalCacheLevelResponse(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(CacheLevelResponse) + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalCacheLevelResponseResult) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// DeveopmentModeResponse : development mode response. +type DeveopmentModeResponse struct { + // success response. + Success *bool `json:"success" validate:"required"` + + // errors. + Errors [][]string `json:"errors" validate:"required"` + + // messages. + Messages [][]string `json:"messages" validate:"required"` + + // result object. + Result *DeveopmentModeResponseResult `json:"result" validate:"required"` +} + + +// UnmarshalDeveopmentModeResponse unmarshals an instance of DeveopmentModeResponse from the specified map of raw messages. +func UnmarshalDeveopmentModeResponse(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(DeveopmentModeResponse) + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalDeveopmentModeResponseResult) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// EnableQueryStringSortResponse : sort query string response. +type EnableQueryStringSortResponse struct { + // success response true/false. + Success *bool `json:"success" validate:"required"` + + // errors. + Errors [][]string `json:"errors" validate:"required"` + + // messages. + Messages [][]string `json:"messages" validate:"required"` + + // result of sort query string. + Result *EnableQueryStringSortResponseResult `json:"result" validate:"required"` +} + + +// UnmarshalEnableQueryStringSortResponse unmarshals an instance of EnableQueryStringSortResponse from the specified map of raw messages. +func UnmarshalEnableQueryStringSortResponse(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(EnableQueryStringSortResponse) + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalEnableQueryStringSortResponseResult) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// PurgeAllResponse : purge all response. +type PurgeAllResponse struct { + // success response. + Success *bool `json:"success" validate:"required"` + + // errors. + Errors [][]string `json:"errors" validate:"required"` + + // messages. + Messages [][]string `json:"messages" validate:"required"` + + // purge object. + Result *PurgeAllResponseResult `json:"result" validate:"required"` +} + + +// UnmarshalPurgeAllResponse unmarshals an instance of PurgeAllResponse from the specified map of raw messages. +func UnmarshalPurgeAllResponse(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(PurgeAllResponse) + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalPurgeAllResponseResult) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ServeStaleContentResponse : serve stale conent response. +type ServeStaleContentResponse struct { + // success response. + Success *bool `json:"success" validate:"required"` + + // errors. + Errors [][]string `json:"errors" validate:"required"` + + // messages. + Messages [][]string `json:"messages" validate:"required"` + + // result object. + Result *ServeStaleContentResponseResult `json:"result" validate:"required"` +} + + +// UnmarshalServeStaleContentResponse unmarshals an instance of ServeStaleContentResponse from the specified map of raw messages. +func UnmarshalServeStaleContentResponse(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ServeStaleContentResponse) + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalServeStaleContentResponseResult) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} diff --git a/vendor/github.com/IBM/networking-go-sdk/cisipapiv1/cis_ip_api_v1.go b/vendor/github.com/IBM/networking-go-sdk/cisipapiv1/cis_ip_api_v1.go new file mode 100644 index 00000000000..3cf0e12cfcd --- /dev/null +++ b/vendor/github.com/IBM/networking-go-sdk/cisipapiv1/cis_ip_api_v1.go @@ -0,0 +1,292 @@ +/** + * (C) Copyright IBM Corp. 2020. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * IBM OpenAPI SDK Code Generator Version: 3.20.0-debb9f29-20201203-202043 + */ + + +// Package cisipapiv1 : Operations and models for the CisIpApiV1 service +package cisipapiv1 + +import ( + "context" + "encoding/json" + "fmt" + "github.com/IBM/go-sdk-core/v4/core" + common "github.com/IBM/networking-go-sdk/common" + "net/http" + "reflect" + "time" +) + +// CisIpApiV1 : This document describes CIS IP API. +// +// Version: 1.0.0 +type CisIpApiV1 struct { + Service *core.BaseService +} + +// DefaultServiceURL is the default URL to make service requests to. +const DefaultServiceURL = "https://api.cis.cloud.ibm.com" + +// DefaultServiceName is the default key used to find external configuration information. +const DefaultServiceName = "cis_ip_api" + +// CisIpApiV1Options : Service options +type CisIpApiV1Options struct { + ServiceName string + URL string + Authenticator core.Authenticator +} + +// NewCisIpApiV1UsingExternalConfig : constructs an instance of CisIpApiV1 with passed in options and external configuration. +func NewCisIpApiV1UsingExternalConfig(options *CisIpApiV1Options) (cisIpApi *CisIpApiV1, err error) { + if options.ServiceName == "" { + options.ServiceName = DefaultServiceName + } + + if options.Authenticator == nil { + options.Authenticator, err = core.GetAuthenticatorFromEnvironment(options.ServiceName) + if err != nil { + return + } + } + + cisIpApi, err = NewCisIpApiV1(options) + if err != nil { + return + } + + err = cisIpApi.Service.ConfigureService(options.ServiceName) + if err != nil { + return + } + + if options.URL != "" { + err = cisIpApi.Service.SetServiceURL(options.URL) + } + return +} + +// NewCisIpApiV1 : constructs an instance of CisIpApiV1 with passed in options. +func NewCisIpApiV1(options *CisIpApiV1Options) (service *CisIpApiV1, err error) { + serviceOptions := &core.ServiceOptions{ + URL: DefaultServiceURL, + Authenticator: options.Authenticator, + } + + baseService, err := core.NewBaseService(serviceOptions) + if err != nil { + return + } + + if options.URL != "" { + err = baseService.SetServiceURL(options.URL) + if err != nil { + return + } + } + + service = &CisIpApiV1{ + Service: baseService, + } + + return +} + +// GetServiceURLForRegion returns the service URL to be used for the specified region +func GetServiceURLForRegion(region string) (string, error) { + return "", fmt.Errorf("service does not support regional URLs") +} + +// Clone makes a copy of "cisIpApi" suitable for processing requests. +func (cisIpApi *CisIpApiV1) Clone() *CisIpApiV1 { + if core.IsNil(cisIpApi) { + return nil + } + clone := *cisIpApi + clone.Service = cisIpApi.Service.Clone() + return &clone +} + +// SetServiceURL sets the service URL +func (cisIpApi *CisIpApiV1) SetServiceURL(url string) error { + return cisIpApi.Service.SetServiceURL(url) +} + +// GetServiceURL returns the service URL +func (cisIpApi *CisIpApiV1) GetServiceURL() string { + return cisIpApi.Service.GetServiceURL() +} + +// SetDefaultHeaders sets HTTP headers to be sent in every request +func (cisIpApi *CisIpApiV1) SetDefaultHeaders(headers http.Header) { + cisIpApi.Service.SetDefaultHeaders(headers) +} + +// SetEnableGzipCompression sets the service's EnableGzipCompression field +func (cisIpApi *CisIpApiV1) SetEnableGzipCompression(enableGzip bool) { + cisIpApi.Service.SetEnableGzipCompression(enableGzip) +} + +// GetEnableGzipCompression returns the service's EnableGzipCompression field +func (cisIpApi *CisIpApiV1) GetEnableGzipCompression() bool { + return cisIpApi.Service.GetEnableGzipCompression() +} + +// EnableRetries enables automatic retries for requests invoked for this service instance. +// If either parameter is specified as 0, then a default value is used instead. +func (cisIpApi *CisIpApiV1) EnableRetries(maxRetries int, maxRetryInterval time.Duration) { + cisIpApi.Service.EnableRetries(maxRetries, maxRetryInterval) +} + +// DisableRetries disables automatic retries for requests invoked for this service instance. +func (cisIpApi *CisIpApiV1) DisableRetries() { + cisIpApi.Service.DisableRetries() +} + +// ListIps : List of all IP addresses used by the CIS proxy +// List of all IP addresses used by the CIS proxy. +func (cisIpApi *CisIpApiV1) ListIps(listIpsOptions *ListIpsOptions) (result *IpResponse, response *core.DetailedResponse, err error) { + return cisIpApi.ListIpsWithContext(context.Background(), listIpsOptions) +} + +// ListIpsWithContext is an alternate form of the ListIps method which supports a Context parameter +func (cisIpApi *CisIpApiV1) ListIpsWithContext(ctx context.Context, listIpsOptions *ListIpsOptions) (result *IpResponse, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listIpsOptions, "listIpsOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = cisIpApi.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(cisIpApi.Service.Options.URL, `/v1/ips`, nil) + if err != nil { + return + } + + for headerName, headerValue := range listIpsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("cis_ip_api", "V1", "ListIps") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = cisIpApi.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalIpResponse) + if err != nil { + return + } + response.Result = result + + return +} + +// IpResponseResult : Container for response information. +type IpResponseResult struct { + // List of IPv4 CIDR addresses. + Ipv4Cidrs []string `json:"ipv4_cidrs,omitempty"` + + // List of IPv6 CIDR addresses. + Ipv6Cidrs []string `json:"ipv6_cidrs,omitempty"` +} + + +// UnmarshalIpResponseResult unmarshals an instance of IpResponseResult from the specified map of raw messages. +func UnmarshalIpResponseResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(IpResponseResult) + err = core.UnmarshalPrimitive(m, "ipv4_cidrs", &obj.Ipv4Cidrs) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "ipv6_cidrs", &obj.Ipv6Cidrs) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ListIpsOptions : The ListIps options. +type ListIpsOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListIpsOptions : Instantiate ListIpsOptions +func (*CisIpApiV1) NewListIpsOptions() *ListIpsOptions { + return &ListIpsOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *ListIpsOptions) SetHeaders(param map[string]string) *ListIpsOptions { + options.Headers = param + return options +} + +// IpResponse : ip response. +type IpResponse struct { + // Was operation successful. + Success *bool `json:"success" validate:"required"` + + // errors. + Errors [][]string `json:"errors" validate:"required"` + + // messages. + Messages [][]string `json:"messages" validate:"required"` + + // Container for response information. + Result *IpResponseResult `json:"result" validate:"required"` +} + + +// UnmarshalIpResponse unmarshals an instance of IpResponse from the specified map of raw messages. +func UnmarshalIpResponse(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(IpResponse) + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalIpResponseResult) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} diff --git a/vendor/github.com/IBM/networking-go-sdk/custompagesv1/custom_pages_v1.go b/vendor/github.com/IBM/networking-go-sdk/custompagesv1/custom_pages_v1.go new file mode 100644 index 00000000000..ad2c5f21b36 --- /dev/null +++ b/vendor/github.com/IBM/networking-go-sdk/custompagesv1/custom_pages_v1.go @@ -0,0 +1,1030 @@ +/** + * (C) Copyright IBM Corp. 2020. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * IBM OpenAPI SDK Code Generator Version: 3.20.0-debb9f29-20201203-202043 + */ + + +// Package custompagesv1 : Operations and models for the CustomPagesV1 service +package custompagesv1 + +import ( + "context" + "encoding/json" + "fmt" + "github.com/IBM/go-sdk-core/v4/core" + common "github.com/IBM/networking-go-sdk/common" + "github.com/go-openapi/strfmt" + "net/http" + "reflect" + "time" +) + +// CustomPagesV1 : Custom Pages +// +// Version: 1.0.0 +type CustomPagesV1 struct { + Service *core.BaseService + + // Full crn of the service instance. + Crn *string + + // Zone identifier. + ZoneIdentifier *string +} + +// DefaultServiceURL is the default URL to make service requests to. +const DefaultServiceURL = "https://api.cis.cloud.ibm.com" + +// DefaultServiceName is the default key used to find external configuration information. +const DefaultServiceName = "custom_pages" + +// CustomPagesV1Options : Service options +type CustomPagesV1Options struct { + ServiceName string + URL string + Authenticator core.Authenticator + + // Full crn of the service instance. + Crn *string `validate:"required"` + + // Zone identifier. + ZoneIdentifier *string `validate:"required"` +} + +// NewCustomPagesV1UsingExternalConfig : constructs an instance of CustomPagesV1 with passed in options and external configuration. +func NewCustomPagesV1UsingExternalConfig(options *CustomPagesV1Options) (customPages *CustomPagesV1, err error) { + if options.ServiceName == "" { + options.ServiceName = DefaultServiceName + } + + if options.Authenticator == nil { + options.Authenticator, err = core.GetAuthenticatorFromEnvironment(options.ServiceName) + if err != nil { + return + } + } + + customPages, err = NewCustomPagesV1(options) + if err != nil { + return + } + + err = customPages.Service.ConfigureService(options.ServiceName) + if err != nil { + return + } + + if options.URL != "" { + err = customPages.Service.SetServiceURL(options.URL) + } + return +} + +// NewCustomPagesV1 : constructs an instance of CustomPagesV1 with passed in options. +func NewCustomPagesV1(options *CustomPagesV1Options) (service *CustomPagesV1, err error) { + serviceOptions := &core.ServiceOptions{ + URL: DefaultServiceURL, + Authenticator: options.Authenticator, + } + + err = core.ValidateStruct(options, "options") + if err != nil { + return + } + + baseService, err := core.NewBaseService(serviceOptions) + if err != nil { + return + } + + if options.URL != "" { + err = baseService.SetServiceURL(options.URL) + if err != nil { + return + } + } + + service = &CustomPagesV1{ + Service: baseService, + Crn: options.Crn, + ZoneIdentifier: options.ZoneIdentifier, + } + + return +} + +// GetServiceURLForRegion returns the service URL to be used for the specified region +func GetServiceURLForRegion(region string) (string, error) { + return "", fmt.Errorf("service does not support regional URLs") +} + +// Clone makes a copy of "customPages" suitable for processing requests. +func (customPages *CustomPagesV1) Clone() *CustomPagesV1 { + if core.IsNil(customPages) { + return nil + } + clone := *customPages + clone.Service = customPages.Service.Clone() + return &clone +} + +// SetServiceURL sets the service URL +func (customPages *CustomPagesV1) SetServiceURL(url string) error { + return customPages.Service.SetServiceURL(url) +} + +// GetServiceURL returns the service URL +func (customPages *CustomPagesV1) GetServiceURL() string { + return customPages.Service.GetServiceURL() +} + +// SetDefaultHeaders sets HTTP headers to be sent in every request +func (customPages *CustomPagesV1) SetDefaultHeaders(headers http.Header) { + customPages.Service.SetDefaultHeaders(headers) +} + +// SetEnableGzipCompression sets the service's EnableGzipCompression field +func (customPages *CustomPagesV1) SetEnableGzipCompression(enableGzip bool) { + customPages.Service.SetEnableGzipCompression(enableGzip) +} + +// GetEnableGzipCompression returns the service's EnableGzipCompression field +func (customPages *CustomPagesV1) GetEnableGzipCompression() bool { + return customPages.Service.GetEnableGzipCompression() +} + +// EnableRetries enables automatic retries for requests invoked for this service instance. +// If either parameter is specified as 0, then a default value is used instead. +func (customPages *CustomPagesV1) EnableRetries(maxRetries int, maxRetryInterval time.Duration) { + customPages.Service.EnableRetries(maxRetries, maxRetryInterval) +} + +// DisableRetries disables automatic retries for requests invoked for this service instance. +func (customPages *CustomPagesV1) DisableRetries() { + customPages.Service.DisableRetries() +} + +// ListInstanceCustomPages : List all custom pages for a given instance +// List all custom pages for a given instance. +func (customPages *CustomPagesV1) ListInstanceCustomPages(listInstanceCustomPagesOptions *ListInstanceCustomPagesOptions) (result *ListCustomPagesResp, response *core.DetailedResponse, err error) { + return customPages.ListInstanceCustomPagesWithContext(context.Background(), listInstanceCustomPagesOptions) +} + +// ListInstanceCustomPagesWithContext is an alternate form of the ListInstanceCustomPages method which supports a Context parameter +func (customPages *CustomPagesV1) ListInstanceCustomPagesWithContext(ctx context.Context, listInstanceCustomPagesOptions *ListInstanceCustomPagesOptions) (result *ListCustomPagesResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listInstanceCustomPagesOptions, "listInstanceCustomPagesOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *customPages.Crn, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = customPages.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(customPages.Service.Options.URL, `/v1/{crn}/custom_pages`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range listInstanceCustomPagesOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("custom_pages", "V1", "ListInstanceCustomPages") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = customPages.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalListCustomPagesResp) + if err != nil { + return + } + response.Result = result + + return +} + +// GetInstanceCustomPage : Get a custom page for a given instance +// Get a specific custom page for a given instance. +func (customPages *CustomPagesV1) GetInstanceCustomPage(getInstanceCustomPageOptions *GetInstanceCustomPageOptions) (result *CustomPageSpecificResp, response *core.DetailedResponse, err error) { + return customPages.GetInstanceCustomPageWithContext(context.Background(), getInstanceCustomPageOptions) +} + +// GetInstanceCustomPageWithContext is an alternate form of the GetInstanceCustomPage method which supports a Context parameter +func (customPages *CustomPagesV1) GetInstanceCustomPageWithContext(ctx context.Context, getInstanceCustomPageOptions *GetInstanceCustomPageOptions) (result *CustomPageSpecificResp, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getInstanceCustomPageOptions, "getInstanceCustomPageOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getInstanceCustomPageOptions, "getInstanceCustomPageOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *customPages.Crn, + "page_identifier": *getInstanceCustomPageOptions.PageIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = customPages.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(customPages.Service.Options.URL, `/v1/{crn}/custom_pages/{page_identifier}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getInstanceCustomPageOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("custom_pages", "V1", "GetInstanceCustomPage") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = customPages.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalCustomPageSpecificResp) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateInstanceCustomPage : Update a custom page for a given instance +// Update a specific custom page for a given instance. +func (customPages *CustomPagesV1) UpdateInstanceCustomPage(updateInstanceCustomPageOptions *UpdateInstanceCustomPageOptions) (result *CustomPageSpecificResp, response *core.DetailedResponse, err error) { + return customPages.UpdateInstanceCustomPageWithContext(context.Background(), updateInstanceCustomPageOptions) +} + +// UpdateInstanceCustomPageWithContext is an alternate form of the UpdateInstanceCustomPage method which supports a Context parameter +func (customPages *CustomPagesV1) UpdateInstanceCustomPageWithContext(ctx context.Context, updateInstanceCustomPageOptions *UpdateInstanceCustomPageOptions) (result *CustomPageSpecificResp, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateInstanceCustomPageOptions, "updateInstanceCustomPageOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateInstanceCustomPageOptions, "updateInstanceCustomPageOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *customPages.Crn, + "page_identifier": *updateInstanceCustomPageOptions.PageIdentifier, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = customPages.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(customPages.Service.Options.URL, `/v1/{crn}/custom_pages/{page_identifier}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateInstanceCustomPageOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("custom_pages", "V1", "UpdateInstanceCustomPage") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if updateInstanceCustomPageOptions.URL != nil { + body["url"] = updateInstanceCustomPageOptions.URL + } + if updateInstanceCustomPageOptions.State != nil { + body["state"] = updateInstanceCustomPageOptions.State + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = customPages.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalCustomPageSpecificResp) + if err != nil { + return + } + response.Result = result + + return +} + +// ListZoneCustomPages : List all custom pages for a given zone +// List all custom pages for a given zone. +func (customPages *CustomPagesV1) ListZoneCustomPages(listZoneCustomPagesOptions *ListZoneCustomPagesOptions) (result *ListCustomPagesResp, response *core.DetailedResponse, err error) { + return customPages.ListZoneCustomPagesWithContext(context.Background(), listZoneCustomPagesOptions) +} + +// ListZoneCustomPagesWithContext is an alternate form of the ListZoneCustomPages method which supports a Context parameter +func (customPages *CustomPagesV1) ListZoneCustomPagesWithContext(ctx context.Context, listZoneCustomPagesOptions *ListZoneCustomPagesOptions) (result *ListCustomPagesResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listZoneCustomPagesOptions, "listZoneCustomPagesOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *customPages.Crn, + "zone_identifier": *customPages.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = customPages.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(customPages.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/custom_pages`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range listZoneCustomPagesOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("custom_pages", "V1", "ListZoneCustomPages") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = customPages.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalListCustomPagesResp) + if err != nil { + return + } + response.Result = result + + return +} + +// GetZoneCustomPage : Get a custom page for a given zone +// Get a specific custom page for a given zone. +func (customPages *CustomPagesV1) GetZoneCustomPage(getZoneCustomPageOptions *GetZoneCustomPageOptions) (result *CustomPageSpecificResp, response *core.DetailedResponse, err error) { + return customPages.GetZoneCustomPageWithContext(context.Background(), getZoneCustomPageOptions) +} + +// GetZoneCustomPageWithContext is an alternate form of the GetZoneCustomPage method which supports a Context parameter +func (customPages *CustomPagesV1) GetZoneCustomPageWithContext(ctx context.Context, getZoneCustomPageOptions *GetZoneCustomPageOptions) (result *CustomPageSpecificResp, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getZoneCustomPageOptions, "getZoneCustomPageOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getZoneCustomPageOptions, "getZoneCustomPageOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *customPages.Crn, + "zone_identifier": *customPages.ZoneIdentifier, + "page_identifier": *getZoneCustomPageOptions.PageIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = customPages.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(customPages.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/custom_pages/{page_identifier}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getZoneCustomPageOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("custom_pages", "V1", "GetZoneCustomPage") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = customPages.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalCustomPageSpecificResp) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateZoneCustomPage : Update a custom page for a given zone +// Update a specific custom page for a given zone. +func (customPages *CustomPagesV1) UpdateZoneCustomPage(updateZoneCustomPageOptions *UpdateZoneCustomPageOptions) (result *CustomPageSpecificResp, response *core.DetailedResponse, err error) { + return customPages.UpdateZoneCustomPageWithContext(context.Background(), updateZoneCustomPageOptions) +} + +// UpdateZoneCustomPageWithContext is an alternate form of the UpdateZoneCustomPage method which supports a Context parameter +func (customPages *CustomPagesV1) UpdateZoneCustomPageWithContext(ctx context.Context, updateZoneCustomPageOptions *UpdateZoneCustomPageOptions) (result *CustomPageSpecificResp, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateZoneCustomPageOptions, "updateZoneCustomPageOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateZoneCustomPageOptions, "updateZoneCustomPageOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *customPages.Crn, + "zone_identifier": *customPages.ZoneIdentifier, + "page_identifier": *updateZoneCustomPageOptions.PageIdentifier, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = customPages.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(customPages.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/custom_pages/{page_identifier}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateZoneCustomPageOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("custom_pages", "V1", "UpdateZoneCustomPage") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if updateZoneCustomPageOptions.URL != nil { + body["url"] = updateZoneCustomPageOptions.URL + } + if updateZoneCustomPageOptions.State != nil { + body["state"] = updateZoneCustomPageOptions.State + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = customPages.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalCustomPageSpecificResp) + if err != nil { + return + } + response.Result = result + + return +} + +// GetInstanceCustomPageOptions : The GetInstanceCustomPage options. +type GetInstanceCustomPageOptions struct { + // Custom page identifier. + PageIdentifier *string `json:"page_identifier" validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the GetInstanceCustomPageOptions.PageIdentifier property. +// Custom page identifier. +const ( + GetInstanceCustomPageOptions_PageIdentifier_AlwaysOnline = "always_online" + GetInstanceCustomPageOptions_PageIdentifier_BasicChallenge = "basic_challenge" + GetInstanceCustomPageOptions_PageIdentifier_CountryChallenge = "country_challenge" + GetInstanceCustomPageOptions_PageIdentifier_IpBlock = "ip_block" + GetInstanceCustomPageOptions_PageIdentifier_RatelimitBlock = "ratelimit_block" + GetInstanceCustomPageOptions_PageIdentifier_UnderAttack = "under_attack" + GetInstanceCustomPageOptions_PageIdentifier_WafBlock = "waf_block" + GetInstanceCustomPageOptions_PageIdentifier_WafChallenge = "waf_challenge" +) + +// NewGetInstanceCustomPageOptions : Instantiate GetInstanceCustomPageOptions +func (*CustomPagesV1) NewGetInstanceCustomPageOptions(pageIdentifier string) *GetInstanceCustomPageOptions { + return &GetInstanceCustomPageOptions{ + PageIdentifier: core.StringPtr(pageIdentifier), + } +} + +// SetPageIdentifier : Allow user to set PageIdentifier +func (options *GetInstanceCustomPageOptions) SetPageIdentifier(pageIdentifier string) *GetInstanceCustomPageOptions { + options.PageIdentifier = core.StringPtr(pageIdentifier) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetInstanceCustomPageOptions) SetHeaders(param map[string]string) *GetInstanceCustomPageOptions { + options.Headers = param + return options +} + +// GetZoneCustomPageOptions : The GetZoneCustomPage options. +type GetZoneCustomPageOptions struct { + // Custom page identifier. + PageIdentifier *string `json:"page_identifier" validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the GetZoneCustomPageOptions.PageIdentifier property. +// Custom page identifier. +const ( + GetZoneCustomPageOptions_PageIdentifier_AlwaysOnline = "always_online" + GetZoneCustomPageOptions_PageIdentifier_BasicChallenge = "basic_challenge" + GetZoneCustomPageOptions_PageIdentifier_CountryChallenge = "country_challenge" + GetZoneCustomPageOptions_PageIdentifier_IpBlock = "ip_block" + GetZoneCustomPageOptions_PageIdentifier_RatelimitBlock = "ratelimit_block" + GetZoneCustomPageOptions_PageIdentifier_UnderAttack = "under_attack" + GetZoneCustomPageOptions_PageIdentifier_WafBlock = "waf_block" + GetZoneCustomPageOptions_PageIdentifier_WafChallenge = "waf_challenge" +) + +// NewGetZoneCustomPageOptions : Instantiate GetZoneCustomPageOptions +func (*CustomPagesV1) NewGetZoneCustomPageOptions(pageIdentifier string) *GetZoneCustomPageOptions { + return &GetZoneCustomPageOptions{ + PageIdentifier: core.StringPtr(pageIdentifier), + } +} + +// SetPageIdentifier : Allow user to set PageIdentifier +func (options *GetZoneCustomPageOptions) SetPageIdentifier(pageIdentifier string) *GetZoneCustomPageOptions { + options.PageIdentifier = core.StringPtr(pageIdentifier) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetZoneCustomPageOptions) SetHeaders(param map[string]string) *GetZoneCustomPageOptions { + options.Headers = param + return options +} + +// ListCustomPagesRespResultInfo : Statistics of results. +type ListCustomPagesRespResultInfo struct { + // Page number. + Page *int64 `json:"page" validate:"required"` + + // Number of results per page. + PerPage *int64 `json:"per_page" validate:"required"` + + // Number of total pages. + TotalPages *int64 `json:"total_pages" validate:"required"` + + // Number of results. + Count *int64 `json:"count" validate:"required"` + + // Total number of results. + TotalCount *int64 `json:"total_count" validate:"required"` +} + + +// UnmarshalListCustomPagesRespResultInfo unmarshals an instance of ListCustomPagesRespResultInfo from the specified map of raw messages. +func UnmarshalListCustomPagesRespResultInfo(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ListCustomPagesRespResultInfo) + err = core.UnmarshalPrimitive(m, "page", &obj.Page) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "per_page", &obj.PerPage) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "total_pages", &obj.TotalPages) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "count", &obj.Count) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "total_count", &obj.TotalCount) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ListInstanceCustomPagesOptions : The ListInstanceCustomPages options. +type ListInstanceCustomPagesOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListInstanceCustomPagesOptions : Instantiate ListInstanceCustomPagesOptions +func (*CustomPagesV1) NewListInstanceCustomPagesOptions() *ListInstanceCustomPagesOptions { + return &ListInstanceCustomPagesOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *ListInstanceCustomPagesOptions) SetHeaders(param map[string]string) *ListInstanceCustomPagesOptions { + options.Headers = param + return options +} + +// ListZoneCustomPagesOptions : The ListZoneCustomPages options. +type ListZoneCustomPagesOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListZoneCustomPagesOptions : Instantiate ListZoneCustomPagesOptions +func (*CustomPagesV1) NewListZoneCustomPagesOptions() *ListZoneCustomPagesOptions { + return &ListZoneCustomPagesOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *ListZoneCustomPagesOptions) SetHeaders(param map[string]string) *ListZoneCustomPagesOptions { + options.Headers = param + return options +} + +// UpdateInstanceCustomPageOptions : The UpdateInstanceCustomPage options. +type UpdateInstanceCustomPageOptions struct { + // Custom page identifier. + PageIdentifier *string `json:"page_identifier" validate:"required,ne="` + + // A URL that is associated with the Custom Page. + URL *string `json:"url,omitempty"` + + // The Custom Page state. + State *string `json:"state,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the UpdateInstanceCustomPageOptions.PageIdentifier property. +// Custom page identifier. +const ( + UpdateInstanceCustomPageOptions_PageIdentifier_AlwaysOnline = "always_online" + UpdateInstanceCustomPageOptions_PageIdentifier_BasicChallenge = "basic_challenge" + UpdateInstanceCustomPageOptions_PageIdentifier_CountryChallenge = "country_challenge" + UpdateInstanceCustomPageOptions_PageIdentifier_IpBlock = "ip_block" + UpdateInstanceCustomPageOptions_PageIdentifier_RatelimitBlock = "ratelimit_block" + UpdateInstanceCustomPageOptions_PageIdentifier_UnderAttack = "under_attack" + UpdateInstanceCustomPageOptions_PageIdentifier_WafBlock = "waf_block" + UpdateInstanceCustomPageOptions_PageIdentifier_WafChallenge = "waf_challenge" +) + +// Constants associated with the UpdateInstanceCustomPageOptions.State property. +// The Custom Page state. +const ( + UpdateInstanceCustomPageOptions_State_Customized = "customized" + UpdateInstanceCustomPageOptions_State_Default = "default" +) + +// NewUpdateInstanceCustomPageOptions : Instantiate UpdateInstanceCustomPageOptions +func (*CustomPagesV1) NewUpdateInstanceCustomPageOptions(pageIdentifier string) *UpdateInstanceCustomPageOptions { + return &UpdateInstanceCustomPageOptions{ + PageIdentifier: core.StringPtr(pageIdentifier), + } +} + +// SetPageIdentifier : Allow user to set PageIdentifier +func (options *UpdateInstanceCustomPageOptions) SetPageIdentifier(pageIdentifier string) *UpdateInstanceCustomPageOptions { + options.PageIdentifier = core.StringPtr(pageIdentifier) + return options +} + +// SetURL : Allow user to set URL +func (options *UpdateInstanceCustomPageOptions) SetURL(url string) *UpdateInstanceCustomPageOptions { + options.URL = core.StringPtr(url) + return options +} + +// SetState : Allow user to set State +func (options *UpdateInstanceCustomPageOptions) SetState(state string) *UpdateInstanceCustomPageOptions { + options.State = core.StringPtr(state) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateInstanceCustomPageOptions) SetHeaders(param map[string]string) *UpdateInstanceCustomPageOptions { + options.Headers = param + return options +} + +// UpdateZoneCustomPageOptions : The UpdateZoneCustomPage options. +type UpdateZoneCustomPageOptions struct { + // Custom page identifier. + PageIdentifier *string `json:"page_identifier" validate:"required,ne="` + + // A URL that is associated with the Custom Page. + URL *string `json:"url,omitempty"` + + // The Custom Page state. + State *string `json:"state,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the UpdateZoneCustomPageOptions.PageIdentifier property. +// Custom page identifier. +const ( + UpdateZoneCustomPageOptions_PageIdentifier_AlwaysOnline = "always_online" + UpdateZoneCustomPageOptions_PageIdentifier_BasicChallenge = "basic_challenge" + UpdateZoneCustomPageOptions_PageIdentifier_CountryChallenge = "country_challenge" + UpdateZoneCustomPageOptions_PageIdentifier_IpBlock = "ip_block" + UpdateZoneCustomPageOptions_PageIdentifier_RatelimitBlock = "ratelimit_block" + UpdateZoneCustomPageOptions_PageIdentifier_UnderAttack = "under_attack" + UpdateZoneCustomPageOptions_PageIdentifier_WafBlock = "waf_block" + UpdateZoneCustomPageOptions_PageIdentifier_WafChallenge = "waf_challenge" +) + +// Constants associated with the UpdateZoneCustomPageOptions.State property. +// The Custom Page state. +const ( + UpdateZoneCustomPageOptions_State_Customized = "customized" + UpdateZoneCustomPageOptions_State_Default = "default" +) + +// NewUpdateZoneCustomPageOptions : Instantiate UpdateZoneCustomPageOptions +func (*CustomPagesV1) NewUpdateZoneCustomPageOptions(pageIdentifier string) *UpdateZoneCustomPageOptions { + return &UpdateZoneCustomPageOptions{ + PageIdentifier: core.StringPtr(pageIdentifier), + } +} + +// SetPageIdentifier : Allow user to set PageIdentifier +func (options *UpdateZoneCustomPageOptions) SetPageIdentifier(pageIdentifier string) *UpdateZoneCustomPageOptions { + options.PageIdentifier = core.StringPtr(pageIdentifier) + return options +} + +// SetURL : Allow user to set URL +func (options *UpdateZoneCustomPageOptions) SetURL(url string) *UpdateZoneCustomPageOptions { + options.URL = core.StringPtr(url) + return options +} + +// SetState : Allow user to set State +func (options *UpdateZoneCustomPageOptions) SetState(state string) *UpdateZoneCustomPageOptions { + options.State = core.StringPtr(state) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateZoneCustomPageOptions) SetHeaders(param map[string]string) *UpdateZoneCustomPageOptions { + options.Headers = param + return options +} + +// CustomPageObject : custom page object. +type CustomPageObject struct { + // Custom page identifier. + ID *string `json:"id" validate:"required"` + + // Description of custom page. + Description *string `json:"description" validate:"required"` + + // array of page tokens. + RequiredTokens []string `json:"required_tokens" validate:"required"` + + // Preview target. + PreviewTarget *string `json:"preview_target" validate:"required"` + + // Created date. + CreatedOn *strfmt.DateTime `json:"created_on" validate:"required"` + + // Modified date. + ModifiedOn *strfmt.DateTime `json:"modified_on" validate:"required"` + + // A URL that is associated with the Custom Page. + URL *string `json:"url" validate:"required"` + + // The Custom Page state. + State *string `json:"state" validate:"required"` +} + +// Constants associated with the CustomPageObject.ID property. +// Custom page identifier. +const ( + CustomPageObject_ID_AlwaysOnline = "always_online" + CustomPageObject_ID_BasicChallenge = "basic_challenge" + CustomPageObject_ID_CountryChallenge = "country_challenge" + CustomPageObject_ID_IpBlock = "ip_block" + CustomPageObject_ID_RatelimitBlock = "ratelimit_block" + CustomPageObject_ID_UnderAttack = "under_attack" + CustomPageObject_ID_WafBlock = "waf_block" + CustomPageObject_ID_WafChallenge = "waf_challenge" +) + +// Constants associated with the CustomPageObject.RequiredTokens property. +const ( + CustomPageObject_RequiredTokens_AlwaysOnlineNoCopyBox = "::ALWAYS_ONLINE_NO_COPY_BOX::" + CustomPageObject_RequiredTokens_CaptchaBox = "::CAPTCHA_BOX::" + CustomPageObject_RequiredTokens_CloudflareError1000sBox = "::CLOUDFLARE_ERROR_1000S_BOX::" + CustomPageObject_RequiredTokens_CloudflareError500sBox = "::CLOUDFLARE_ERROR_500S_BOX::" + CustomPageObject_RequiredTokens_ImUnderAttackBox = "::IM_UNDER_ATTACK_BOX::" +) + +// Constants associated with the CustomPageObject.State property. +// The Custom Page state. +const ( + CustomPageObject_State_Customized = "customized" + CustomPageObject_State_Default = "default" +) + + +// UnmarshalCustomPageObject unmarshals an instance of CustomPageObject from the specified map of raw messages. +func UnmarshalCustomPageObject(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(CustomPageObject) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "description", &obj.Description) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "required_tokens", &obj.RequiredTokens) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "preview_target", &obj.PreviewTarget) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_on", &obj.CreatedOn) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "modified_on", &obj.ModifiedOn) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "url", &obj.URL) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "state", &obj.State) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// CustomPageSpecificResp : custom page specific response. +type CustomPageSpecificResp struct { + // Was operation successful. + Success *bool `json:"success" validate:"required"` + + // Array of errors encountered. + Errors [][]string `json:"errors" validate:"required"` + + // Array of messages returned. + Messages [][]string `json:"messages" validate:"required"` + + // custom page object. + Result *CustomPageObject `json:"result" validate:"required"` +} + + +// UnmarshalCustomPageSpecificResp unmarshals an instance of CustomPageSpecificResp from the specified map of raw messages. +func UnmarshalCustomPageSpecificResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(CustomPageSpecificResp) + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalCustomPageObject) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ListCustomPagesResp : list of custom pages response. +type ListCustomPagesResp struct { + // Was operation successful. + Success *bool `json:"success" validate:"required"` + + // Array of errors encountered. + Errors [][]string `json:"errors" validate:"required"` + + // Array of messages returned. + Messages [][]string `json:"messages" validate:"required"` + + // custom pages array. + Result []CustomPageObject `json:"result" validate:"required"` + + // Statistics of results. + ResultInfo *ListCustomPagesRespResultInfo `json:"result_info" validate:"required"` +} + + +// UnmarshalListCustomPagesResp unmarshals an instance of ListCustomPagesResp from the specified map of raw messages. +func UnmarshalListCustomPagesResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ListCustomPagesResp) + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalCustomPageObject) + if err != nil { + return + } + err = core.UnmarshalModel(m, "result_info", &obj.ResultInfo, UnmarshalListCustomPagesRespResultInfo) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} diff --git a/vendor/github.com/IBM/networking-go-sdk/directlinkproviderv2/direct_link_provider_v2.go b/vendor/github.com/IBM/networking-go-sdk/directlinkproviderv2/direct_link_provider_v2.go new file mode 100644 index 00000000000..7b68f173eca --- /dev/null +++ b/vendor/github.com/IBM/networking-go-sdk/directlinkproviderv2/direct_link_provider_v2.go @@ -0,0 +1,1466 @@ +/** + * (C) Copyright IBM Corp. 2020. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package directlinkproviderv2 : Operations and models for the DirectLinkProviderV2 service +package directlinkproviderv2 + +import ( + "encoding/json" + "fmt" + "reflect" + + "github.com/IBM/go-sdk-core/v4/core" + common "github.com/IBM/networking-go-sdk/common" + "github.com/go-openapi/strfmt" +) + +// DirectLinkProviderV2 : No description provided (generated by Openapi Generator +// https://github.com/openapitools/openapi-generator) +// +// Version: __VERSION__ +type DirectLinkProviderV2 struct { + Service *core.BaseService + + // Requests the version of the API as a date in the format `YYYY-MM-DD`. Any date from 2020-04-28 up to the current + // date may be provided. Specify the current date to request the latest version. + Version *string +} + +// DefaultServiceURL is the default URL to make service requests to. +const DefaultServiceURL = "https://directlink.cloud.ibm.com/provider/v2" + +// DefaultServiceName is the default key used to find external configuration information. +const DefaultServiceName = "direct_link_provider" + +// DirectLinkProviderV2Options : Service options +type DirectLinkProviderV2Options struct { + ServiceName string + URL string + Authenticator core.Authenticator + + // Requests the version of the API as a date in the format `YYYY-MM-DD`. Any date from 2020-04-28 up to the current + // date may be provided. Specify the current date to request the latest version. + Version *string `validate:"required"` +} + +// NewDirectLinkProviderV2UsingExternalConfig : constructs an instance of DirectLinkProviderV2 with passed in options and external configuration. +func NewDirectLinkProviderV2UsingExternalConfig(options *DirectLinkProviderV2Options) (directLinkProvider *DirectLinkProviderV2, err error) { + if options.ServiceName == "" { + options.ServiceName = DefaultServiceName + } + + if options.Authenticator == nil { + options.Authenticator, err = core.GetAuthenticatorFromEnvironment(options.ServiceName) + if err != nil { + return + } + } + + directLinkProvider, err = NewDirectLinkProviderV2(options) + if err != nil { + return + } + + err = directLinkProvider.Service.ConfigureService(options.ServiceName) + if err != nil { + return + } + + if options.URL != "" { + err = directLinkProvider.Service.SetServiceURL(options.URL) + } + return +} + +// NewDirectLinkProviderV2 : constructs an instance of DirectLinkProviderV2 with passed in options. +func NewDirectLinkProviderV2(options *DirectLinkProviderV2Options) (service *DirectLinkProviderV2, err error) { + serviceOptions := &core.ServiceOptions{ + URL: DefaultServiceURL, + Authenticator: options.Authenticator, + } + + err = core.ValidateStruct(options, "options") + if err != nil { + return + } + + baseService, err := core.NewBaseService(serviceOptions) + if err != nil { + return + } + + if options.URL != "" { + err = baseService.SetServiceURL(options.URL) + if err != nil { + return + } + } + + service = &DirectLinkProviderV2{ + Service: baseService, + Version: options.Version, + } + + return +} + +// SetServiceURL sets the service URL +func (directLinkProvider *DirectLinkProviderV2) SetServiceURL(url string) error { + return directLinkProvider.Service.SetServiceURL(url) +} + +// ListProviderGateways : List gateways +// List all Direct Link Connect gateways created by this provider. +func (directLinkProvider *DirectLinkProviderV2) ListProviderGateways(listProviderGatewaysOptions *ListProviderGatewaysOptions) (result *ProviderGatewayCollection, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listProviderGatewaysOptions, "listProviderGatewaysOptions") + if err != nil { + return + } + + pathSegments := []string{"gateways"} + pathParameters := []string{} + + builder := core.NewRequestBuilder(core.GET) + _, err = builder.ConstructHTTPURL(directLinkProvider.Service.Options.URL, pathSegments, pathParameters) + if err != nil { + return + } + + for headerName, headerValue := range listProviderGatewaysOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("direct_link_provider", "V2", "ListProviderGateways") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*directLinkProvider.Version)) + if listProviderGatewaysOptions.Start != nil { + builder.AddQuery("start", fmt.Sprint(*listProviderGatewaysOptions.Start)) + } + if listProviderGatewaysOptions.Limit != nil { + builder.AddQuery("limit", fmt.Sprint(*listProviderGatewaysOptions.Limit)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = directLinkProvider.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalProviderGatewayCollection) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateProviderGateway : Create gateway +// Create a Direct Link Connect gateway based on the supplied template in the specified customer account. +// +// The gateway will be 'provider_api_managed=true'. +func (directLinkProvider *DirectLinkProviderV2) CreateProviderGateway(createProviderGatewayOptions *CreateProviderGatewayOptions) (result *ProviderGateway, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createProviderGatewayOptions, "createProviderGatewayOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createProviderGatewayOptions, "createProviderGatewayOptions") + if err != nil { + return + } + + pathSegments := []string{"gateways"} + pathParameters := []string{} + + builder := core.NewRequestBuilder(core.POST) + _, err = builder.ConstructHTTPURL(directLinkProvider.Service.Options.URL, pathSegments, pathParameters) + if err != nil { + return + } + + for headerName, headerValue := range createProviderGatewayOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("direct_link_provider", "V2", "CreateProviderGateway") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + builder.AddQuery("version", fmt.Sprint(*directLinkProvider.Version)) + if createProviderGatewayOptions.CheckOnly != nil { + builder.AddQuery("check_only", fmt.Sprint(*createProviderGatewayOptions.CheckOnly)) + } + + body := make(map[string]interface{}) + if createProviderGatewayOptions.BgpAsn != nil { + body["bgp_asn"] = createProviderGatewayOptions.BgpAsn + } + if createProviderGatewayOptions.CustomerAccountID != nil { + body["customer_account_id"] = createProviderGatewayOptions.CustomerAccountID + } + if createProviderGatewayOptions.Name != nil { + body["name"] = createProviderGatewayOptions.Name + } + if createProviderGatewayOptions.Port != nil { + body["port"] = createProviderGatewayOptions.Port + } + if createProviderGatewayOptions.SpeedMbps != nil { + body["speed_mbps"] = createProviderGatewayOptions.SpeedMbps + } + if createProviderGatewayOptions.BgpCerCidr != nil { + body["bgp_cer_cidr"] = createProviderGatewayOptions.BgpCerCidr + } + if createProviderGatewayOptions.BgpIbmCidr != nil { + body["bgp_ibm_cidr"] = createProviderGatewayOptions.BgpIbmCidr + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = directLinkProvider.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalProviderGateway) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteProviderGateway : Delete gateway +// Delete a Direct Link Connect provider managed gateway. +func (directLinkProvider *DirectLinkProviderV2) DeleteProviderGateway(deleteProviderGatewayOptions *DeleteProviderGatewayOptions) (result *ProviderGateway, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteProviderGatewayOptions, "deleteProviderGatewayOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteProviderGatewayOptions, "deleteProviderGatewayOptions") + if err != nil { + return + } + + pathSegments := []string{"gateways"} + pathParameters := []string{*deleteProviderGatewayOptions.ID} + + builder := core.NewRequestBuilder(core.DELETE) + _, err = builder.ConstructHTTPURL(directLinkProvider.Service.Options.URL, pathSegments, pathParameters) + if err != nil { + return + } + + for headerName, headerValue := range deleteProviderGatewayOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("direct_link_provider", "V2", "DeleteProviderGateway") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*directLinkProvider.Version)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = directLinkProvider.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalProviderGateway) + if err != nil { + return + } + response.Result = result + + return +} + +// GetProviderGateway : Get gateway +// Get a Direct Link Connect gateway. +// Gateways with either `provider_api_managed=true` or `provider_api_managed=false` can be retrieved. +func (directLinkProvider *DirectLinkProviderV2) GetProviderGateway(getProviderGatewayOptions *GetProviderGatewayOptions) (result *ProviderGateway, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getProviderGatewayOptions, "getProviderGatewayOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getProviderGatewayOptions, "getProviderGatewayOptions") + if err != nil { + return + } + + pathSegments := []string{"gateways"} + pathParameters := []string{*getProviderGatewayOptions.ID} + + builder := core.NewRequestBuilder(core.GET) + _, err = builder.ConstructHTTPURL(directLinkProvider.Service.Options.URL, pathSegments, pathParameters) + if err != nil { + return + } + + for headerName, headerValue := range getProviderGatewayOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("direct_link_provider", "V2", "GetProviderGateway") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*directLinkProvider.Version)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = directLinkProvider.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalProviderGateway) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateProviderGateway : Update gateway +// Update a Direct Link Connect provider managed gateway. +// +// Name changes are applied immediately, other changes result in a gateway change_request and require approval from the +// client. +func (directLinkProvider *DirectLinkProviderV2) UpdateProviderGateway(updateProviderGatewayOptions *UpdateProviderGatewayOptions) (result *ProviderGateway, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateProviderGatewayOptions, "updateProviderGatewayOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateProviderGatewayOptions, "updateProviderGatewayOptions") + if err != nil { + return + } + + pathSegments := []string{"gateways"} + pathParameters := []string{*updateProviderGatewayOptions.ID} + + builder := core.NewRequestBuilder(core.PATCH) + _, err = builder.ConstructHTTPURL(directLinkProvider.Service.Options.URL, pathSegments, pathParameters) + if err != nil { + return + } + + for headerName, headerValue := range updateProviderGatewayOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("direct_link_provider", "V2", "UpdateProviderGateway") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + builder.AddQuery("version", fmt.Sprint(*directLinkProvider.Version)) + + body := make(map[string]interface{}) + if updateProviderGatewayOptions.Name != nil { + body["name"] = updateProviderGatewayOptions.Name + } + if updateProviderGatewayOptions.SpeedMbps != nil { + body["speed_mbps"] = updateProviderGatewayOptions.SpeedMbps + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = directLinkProvider.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalProviderGateway) + if err != nil { + return + } + response.Result = result + + return +} + +// ListProviderPorts : List ports +// List all provider ports (associated with the caller). +func (directLinkProvider *DirectLinkProviderV2) ListProviderPorts(listProviderPortsOptions *ListProviderPortsOptions) (result *ProviderPortCollection, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listProviderPortsOptions, "listProviderPortsOptions") + if err != nil { + return + } + + pathSegments := []string{"ports"} + pathParameters := []string{} + + builder := core.NewRequestBuilder(core.GET) + _, err = builder.ConstructHTTPURL(directLinkProvider.Service.Options.URL, pathSegments, pathParameters) + if err != nil { + return + } + + for headerName, headerValue := range listProviderPortsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("direct_link_provider", "V2", "ListProviderPorts") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*directLinkProvider.Version)) + if listProviderPortsOptions.Start != nil { + builder.AddQuery("start", fmt.Sprint(*listProviderPortsOptions.Start)) + } + if listProviderPortsOptions.Limit != nil { + builder.AddQuery("limit", fmt.Sprint(*listProviderPortsOptions.Limit)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = directLinkProvider.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalProviderPortCollection) + if err != nil { + return + } + response.Result = result + + return +} + +// GetProviderPort : Get port +// Get provider port information. +func (directLinkProvider *DirectLinkProviderV2) GetProviderPort(getProviderPortOptions *GetProviderPortOptions) (result *ProviderPort, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getProviderPortOptions, "getProviderPortOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getProviderPortOptions, "getProviderPortOptions") + if err != nil { + return + } + + pathSegments := []string{"ports"} + pathParameters := []string{*getProviderPortOptions.ID} + + builder := core.NewRequestBuilder(core.GET) + _, err = builder.ConstructHTTPURL(directLinkProvider.Service.Options.URL, pathSegments, pathParameters) + if err != nil { + return + } + + for headerName, headerValue := range getProviderPortOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("direct_link_provider", "V2", "GetProviderPort") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*directLinkProvider.Version)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = directLinkProvider.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalProviderPort) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateProviderGatewayOptions : The CreateProviderGateway options. +type CreateProviderGatewayOptions struct { + // BGP ASN. + // + // For a 2-byte range, enter a value between 1-64495 or 64999. For a 2-byte or 4-byte range, enter a value between + // 131072-4199999999. For a 4-byte range, enter a value between 4201000000-4201064511. + BgpAsn *int64 `json:"bgp_asn" validate:"required"` + + // Customer IBM Cloud account ID for the new gateway. A gateway object containing the pending create request will + // become available in the specified account. + CustomerAccountID *string `json:"customer_account_id" validate:"required"` + + // The unique user-defined name for this gateway. + Name *string `json:"name" validate:"required"` + + // Select Port Label for the gateway. + Port *ProviderGatewayPortIdentity `json:"port" validate:"required"` + + // Gateway speed in megabits per second. + SpeedMbps *int64 `json:"speed_mbps" validate:"required"` + + // BGP customer edge router CIDR. + // + // For auto IP assignment, omit bgp_cer_cidr and bgp_ibm_cidr. IBM will automatically select values for bgp_cer_cidr + // and bgp_ibm_cidr. + // + // For manual IP assignment set a valid bgp_cer_cidr and bgp_ibm_cidr CIDR, the value must reside in one of + // "10.254.0.0/16", "172.16.0.0/12", "192.168.0.0/16", "169.254.0.0/16" or an owned public CIDR. bgp_cer_cidr and + // bgp_ibm_cidr must have matching network and subnet mask values. + BgpCerCidr *string `json:"bgp_cer_cidr,omitempty"` + + // BGP IBM CIDR. + // + // For auto IP assignment, omit bgp_cer_cidr and bgp_ibm_cidr. IBM will automatically select values for bgp_cer_cidr + // and bgp_ibm_cidr. + // + // For manual IP assignment set a valid bgp_cer_cidr and bgp_ibm_cidr CIDR, the value must reside in one of + // "10.254.0.0/16", "172.16.0.0/12", "192.168.0.0/16", "169.254.0.0/16" or an owned public CIDR. bgp_cer_cidr and + // bgp_ibm_cidr must have matching network and subnet mask values. + BgpIbmCidr *string `json:"bgp_ibm_cidr,omitempty"` + + // When true, perform request validation only and do not create a gateway. + CheckOnly *string `json:"check_only,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewCreateProviderGatewayOptions : Instantiate CreateProviderGatewayOptions +func (*DirectLinkProviderV2) NewCreateProviderGatewayOptions(bgpAsn int64, customerAccountID string, name string, port *ProviderGatewayPortIdentity, speedMbps int64) *CreateProviderGatewayOptions { + return &CreateProviderGatewayOptions{ + BgpAsn: core.Int64Ptr(bgpAsn), + CustomerAccountID: core.StringPtr(customerAccountID), + Name: core.StringPtr(name), + Port: port, + SpeedMbps: core.Int64Ptr(speedMbps), + } +} + +// SetBgpAsn : Allow user to set BgpAsn +func (options *CreateProviderGatewayOptions) SetBgpAsn(bgpAsn int64) *CreateProviderGatewayOptions { + options.BgpAsn = core.Int64Ptr(bgpAsn) + return options +} + +// SetCustomerAccountID : Allow user to set CustomerAccountID +func (options *CreateProviderGatewayOptions) SetCustomerAccountID(customerAccountID string) *CreateProviderGatewayOptions { + options.CustomerAccountID = core.StringPtr(customerAccountID) + return options +} + +// SetName : Allow user to set Name +func (options *CreateProviderGatewayOptions) SetName(name string) *CreateProviderGatewayOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetPort : Allow user to set Port +func (options *CreateProviderGatewayOptions) SetPort(port *ProviderGatewayPortIdentity) *CreateProviderGatewayOptions { + options.Port = port + return options +} + +// SetSpeedMbps : Allow user to set SpeedMbps +func (options *CreateProviderGatewayOptions) SetSpeedMbps(speedMbps int64) *CreateProviderGatewayOptions { + options.SpeedMbps = core.Int64Ptr(speedMbps) + return options +} + +// SetBgpCerCidr : Allow user to set BgpCerCidr +func (options *CreateProviderGatewayOptions) SetBgpCerCidr(bgpCerCidr string) *CreateProviderGatewayOptions { + options.BgpCerCidr = core.StringPtr(bgpCerCidr) + return options +} + +// SetBgpIbmCidr : Allow user to set BgpIbmCidr +func (options *CreateProviderGatewayOptions) SetBgpIbmCidr(bgpIbmCidr string) *CreateProviderGatewayOptions { + options.BgpIbmCidr = core.StringPtr(bgpIbmCidr) + return options +} + +// SetCheckOnly : Allow user to set CheckOnly +func (options *CreateProviderGatewayOptions) SetCheckOnly(checkOnly string) *CreateProviderGatewayOptions { + options.CheckOnly = core.StringPtr(checkOnly) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateProviderGatewayOptions) SetHeaders(param map[string]string) *CreateProviderGatewayOptions { + options.Headers = param + return options +} + +// DeleteProviderGatewayOptions : The DeleteProviderGateway options. +type DeleteProviderGatewayOptions struct { + // Direct Link Connect gateway identifier. + ID *string `json:"id" validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteProviderGatewayOptions : Instantiate DeleteProviderGatewayOptions +func (*DirectLinkProviderV2) NewDeleteProviderGatewayOptions(id string) *DeleteProviderGatewayOptions { + return &DeleteProviderGatewayOptions{ + ID: core.StringPtr(id), + } +} + +// SetID : Allow user to set ID +func (options *DeleteProviderGatewayOptions) SetID(id string) *DeleteProviderGatewayOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteProviderGatewayOptions) SetHeaders(param map[string]string) *DeleteProviderGatewayOptions { + options.Headers = param + return options +} + +// GetProviderGatewayOptions : The GetProviderGateway options. +type GetProviderGatewayOptions struct { + // Direct Link Connect gateway identifier. + ID *string `json:"id" validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetProviderGatewayOptions : Instantiate GetProviderGatewayOptions +func (*DirectLinkProviderV2) NewGetProviderGatewayOptions(id string) *GetProviderGatewayOptions { + return &GetProviderGatewayOptions{ + ID: core.StringPtr(id), + } +} + +// SetID : Allow user to set ID +func (options *GetProviderGatewayOptions) SetID(id string) *GetProviderGatewayOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetProviderGatewayOptions) SetHeaders(param map[string]string) *GetProviderGatewayOptions { + options.Headers = param + return options +} + +// GetProviderPortOptions : The GetProviderPort options. +type GetProviderPortOptions struct { + // port identifier. + ID *string `json:"id" validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetProviderPortOptions : Instantiate GetProviderPortOptions +func (*DirectLinkProviderV2) NewGetProviderPortOptions(id string) *GetProviderPortOptions { + return &GetProviderPortOptions{ + ID: core.StringPtr(id), + } +} + +// SetID : Allow user to set ID +func (options *GetProviderPortOptions) SetID(id string) *GetProviderPortOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetProviderPortOptions) SetHeaders(param map[string]string) *GetProviderPortOptions { + options.Headers = param + return options +} + +// ListProviderGatewaysOptions : The ListProviderGateways options. +type ListProviderGatewaysOptions struct { + // A server-supplied token determining which resource to start the page on. + Start *string `json:"start,omitempty"` + + // The number of resources to return on a page. + Limit *int64 `json:"limit,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListProviderGatewaysOptions : Instantiate ListProviderGatewaysOptions +func (*DirectLinkProviderV2) NewListProviderGatewaysOptions() *ListProviderGatewaysOptions { + return &ListProviderGatewaysOptions{} +} + +// SetStart : Allow user to set Start +func (options *ListProviderGatewaysOptions) SetStart(start string) *ListProviderGatewaysOptions { + options.Start = core.StringPtr(start) + return options +} + +// SetLimit : Allow user to set Limit +func (options *ListProviderGatewaysOptions) SetLimit(limit int64) *ListProviderGatewaysOptions { + options.Limit = core.Int64Ptr(limit) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListProviderGatewaysOptions) SetHeaders(param map[string]string) *ListProviderGatewaysOptions { + options.Headers = param + return options +} + +// ListProviderPortsOptions : The ListProviderPorts options. +type ListProviderPortsOptions struct { + // A server-supplied token determining which resource to start the page on. + Start *string `json:"start,omitempty"` + + // The number of resources to return on a page. + Limit *int64 `json:"limit,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListProviderPortsOptions : Instantiate ListProviderPortsOptions +func (*DirectLinkProviderV2) NewListProviderPortsOptions() *ListProviderPortsOptions { + return &ListProviderPortsOptions{} +} + +// SetStart : Allow user to set Start +func (options *ListProviderPortsOptions) SetStart(start string) *ListProviderPortsOptions { + options.Start = core.StringPtr(start) + return options +} + +// SetLimit : Allow user to set Limit +func (options *ListProviderPortsOptions) SetLimit(limit int64) *ListProviderPortsOptions { + options.Limit = core.Int64Ptr(limit) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListProviderPortsOptions) SetHeaders(param map[string]string) *ListProviderPortsOptions { + options.Headers = param + return options +} + +// ProviderGateway : gateway. +type ProviderGateway struct { + // BGP ASN. + // + // For a 2-byte ASN values between 1-64495 or 64999. For a 2-byte or 4-byte range ASN values between 131072-4199999999. + // For a 4-byte ASN values 4201000000-4201064511. + BgpAsn *int64 `json:"bgp_asn" validate:"required"` + + // BGP customer edge router CIDR. + BgpCerCidr *string `json:"bgp_cer_cidr,omitempty"` + + // IBM BGP ASN. + BgpIbmAsn *int64 `json:"bgp_ibm_asn" validate:"required"` + + // BGP IBM CIDR. + BgpIbmCidr *string `json:"bgp_ibm_cidr,omitempty"` + + // Gateway BGP status. + // + // The list of enumerated values for this property may expand in the future. Code and processes using this field must + // tolerate unexpected values. + BgpStatus *string `json:"bgp_status,omitempty"` + + ChangeRequest ProviderGatewayChangeRequestIntf `json:"change_request,omitempty"` + + // The date and time resource was created. + CreatedAt *strfmt.DateTime `json:"created_at" validate:"required"` + + // The CRN (Cloud Resource Name) of this gateway. + Crn *string `json:"crn,omitempty"` + + // Customer IBM Cloud account ID. + CustomerAccountID *string `json:"customer_account_id" validate:"required"` + + // The unique identifier of this gateway. + ID *string `json:"id" validate:"required"` + + // The unique user-defined name for this gateway. + Name *string `json:"name" validate:"required"` + + // Gateway operational status. + // + // The list of enumerated values for this property may expand in the future. Code and processes using this field must + // tolerate unexpected values. + OperationalStatus *string `json:"operational_status" validate:"required"` + + // Port identifier for the gateway. + Port *ProviderGatewayPortReference `json:"port" validate:"required"` + + // Set to `true` for gateways created through the Direct Link Provider APIs. + // + // Most Direct Link Provider APIs cannot interact with `provider_api_managed=false` gateways. + ProviderApiManaged *bool `json:"provider_api_managed" validate:"required"` + + // Gateway speed in megabits per second. + SpeedMbps *int64 `json:"speed_mbps" validate:"required"` + + // Gateway type. + Type *string `json:"type" validate:"required"` + + // VLAN allocated for this gateway. + Vlan *int64 `json:"vlan,omitempty"` +} + +// Constants associated with the ProviderGateway.BgpStatus property. +// Gateway BGP status. +// +// The list of enumerated values for this property may expand in the future. Code and processes using this field must +// tolerate unexpected values. +const ( + ProviderGateway_BgpStatus_Active = "active" + ProviderGateway_BgpStatus_Connect = "connect" + ProviderGateway_BgpStatus_Established = "established" + ProviderGateway_BgpStatus_Idle = "idle" +) + +// Constants associated with the ProviderGateway.OperationalStatus property. +// Gateway operational status. +// +// The list of enumerated values for this property may expand in the future. Code and processes using this field must +// tolerate unexpected values. +const ( + ProviderGateway_OperationalStatus_CreatePending = "create_pending" + ProviderGateway_OperationalStatus_CreateRejected = "create_rejected" + ProviderGateway_OperationalStatus_DeletePending = "delete_pending" + ProviderGateway_OperationalStatus_Provisioned = "provisioned" +) + +// UnmarshalProviderGateway unmarshals an instance of ProviderGateway from the specified map of raw messages. +func UnmarshalProviderGateway(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ProviderGateway) + err = core.UnmarshalPrimitive(m, "bgp_asn", &obj.BgpAsn) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "bgp_cer_cidr", &obj.BgpCerCidr) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "bgp_ibm_asn", &obj.BgpIbmAsn) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "bgp_ibm_cidr", &obj.BgpIbmCidr) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "bgp_status", &obj.BgpStatus) + if err != nil { + return + } + err = core.UnmarshalModel(m, "change_request", &obj.ChangeRequest, UnmarshalProviderGatewayChangeRequest) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.Crn) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "customer_account_id", &obj.CustomerAccountID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "operational_status", &obj.OperationalStatus) + if err != nil { + return + } + err = core.UnmarshalModel(m, "port", &obj.Port, UnmarshalProviderGatewayPortReference) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "provider_api_managed", &obj.ProviderApiManaged) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "speed_mbps", &obj.SpeedMbps) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "vlan", &obj.Vlan) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ProviderGatewayChangeRequest : ProviderGatewayChangeRequest struct +// Models which "extend" this model: +// - ProviderGatewayChangeRequestProviderGatewayCreate +// - ProviderGatewayChangeRequestProviderGatewayDelete +// - ProviderGatewayChangeRequestProviderGatewayUpdateAttributes +type ProviderGatewayChangeRequest struct { + // type of gateway change request. + Type *string `json:"type,omitempty"` + + // array of pending updates. + Updates []ProviderGatewayUpdateAttributesUpdatesItemIntf `json:"updates,omitempty"` +} + +// Constants associated with the ProviderGatewayChangeRequest.Type property. +// type of gateway change request. +const ( + ProviderGatewayChangeRequest_Type_CreateGateway = "create_gateway" +) + +func (*ProviderGatewayChangeRequest) isaProviderGatewayChangeRequest() bool { + return true +} + +type ProviderGatewayChangeRequestIntf interface { + isaProviderGatewayChangeRequest() bool +} + +// UnmarshalProviderGatewayChangeRequest unmarshals an instance of ProviderGatewayChangeRequest from the specified map of raw messages. +func UnmarshalProviderGatewayChangeRequest(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ProviderGatewayChangeRequest) + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + err = core.UnmarshalModel(m, "updates", &obj.Updates, UnmarshalProviderGatewayUpdateAttributesUpdatesItem) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ProviderGatewayCollection : A paginated collection of resources. +type ProviderGatewayCollection struct { + // A reference to the first page of resources. + First *ProviderGatewayCollectionFirst `json:"first" validate:"required"` + + // The maximum number of resources can be returned by the request. + Limit *int64 `json:"limit" validate:"required"` + + // A reference to the next page of resources; this reference is included for all pages except the last page. + Next *ProviderGatewayCollectionNext `json:"next,omitempty"` + + // The total number of resources across all pages. + TotalCount *int64 `json:"total_count" validate:"required"` + + // Collection of Direct Link gateways. + Gateways []ProviderGateway `json:"gateways" validate:"required"` +} + +// UnmarshalProviderGatewayCollection unmarshals an instance of ProviderGatewayCollection from the specified map of raw messages. +func UnmarshalProviderGatewayCollection(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ProviderGatewayCollection) + err = core.UnmarshalModel(m, "first", &obj.First, UnmarshalProviderGatewayCollectionFirst) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "limit", &obj.Limit) + if err != nil { + return + } + err = core.UnmarshalModel(m, "next", &obj.Next, UnmarshalProviderGatewayCollectionNext) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "total_count", &obj.TotalCount) + if err != nil { + return + } + err = core.UnmarshalModel(m, "gateways", &obj.Gateways, UnmarshalProviderGateway) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ProviderGatewayCollectionFirst : A reference to the first page of resources. +type ProviderGatewayCollectionFirst struct { + // The URL for the first page of resources. + Href *string `json:"href" validate:"required"` +} + +// UnmarshalProviderGatewayCollectionFirst unmarshals an instance of ProviderGatewayCollectionFirst from the specified map of raw messages. +func UnmarshalProviderGatewayCollectionFirst(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ProviderGatewayCollectionFirst) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ProviderGatewayCollectionNext : A reference to the next page of resources; this reference is included for all pages except the last page. +type ProviderGatewayCollectionNext struct { + // The URL for the next page of resources. + Href *string `json:"href" validate:"required"` + + // start token for the next page of resources. + Start *string `json:"start" validate:"required"` +} + +// UnmarshalProviderGatewayCollectionNext unmarshals an instance of ProviderGatewayCollectionNext from the specified map of raw messages. +func UnmarshalProviderGatewayCollectionNext(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ProviderGatewayCollectionNext) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "start", &obj.Start) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ProviderGatewayPortIdentity : Select Port Label for the gateway. +type ProviderGatewayPortIdentity struct { + // Port identifier. + ID *string `json:"id" validate:"required"` +} + +// NewProviderGatewayPortIdentity : Instantiate ProviderGatewayPortIdentity (Generic Model Constructor) +func (*DirectLinkProviderV2) NewProviderGatewayPortIdentity(id string) (model *ProviderGatewayPortIdentity, err error) { + model = &ProviderGatewayPortIdentity{ + ID: core.StringPtr(id), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalProviderGatewayPortIdentity unmarshals an instance of ProviderGatewayPortIdentity from the specified map of raw messages. +func UnmarshalProviderGatewayPortIdentity(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ProviderGatewayPortIdentity) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ProviderGatewayPortReference : Port identifier for the gateway. +type ProviderGatewayPortReference struct { + // Port identifier. + ID *string `json:"id" validate:"required"` +} + +// UnmarshalProviderGatewayPortReference unmarshals an instance of ProviderGatewayPortReference from the specified map of raw messages. +func UnmarshalProviderGatewayPortReference(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ProviderGatewayPortReference) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ProviderGatewayUpdateAttributesUpdatesItem : ProviderGatewayUpdateAttributesUpdatesItem struct +// Models which "extend" this model: +// - ProviderGatewayUpdateAttributesUpdatesItemProviderGatewaySpeedUpdate +type ProviderGatewayUpdateAttributesUpdatesItem struct { + // New gateway speed in megabits per second. + SpeedMbps *int64 `json:"speed_mbps,omitempty"` +} + +func (*ProviderGatewayUpdateAttributesUpdatesItem) isaProviderGatewayUpdateAttributesUpdatesItem() bool { + return true +} + +type ProviderGatewayUpdateAttributesUpdatesItemIntf interface { + isaProviderGatewayUpdateAttributesUpdatesItem() bool +} + +// UnmarshalProviderGatewayUpdateAttributesUpdatesItem unmarshals an instance of ProviderGatewayUpdateAttributesUpdatesItem from the specified map of raw messages. +func UnmarshalProviderGatewayUpdateAttributesUpdatesItem(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ProviderGatewayUpdateAttributesUpdatesItem) + err = core.UnmarshalPrimitive(m, "speed_mbps", &obj.SpeedMbps) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ProviderPort : Provider port details. +type ProviderPort struct { + // Port identifier. + ID *string `json:"id" validate:"required"` + + // Port Label. + Label *string `json:"label" validate:"required"` + + // Port location long name. + LocationDisplayName *string `json:"location_display_name" validate:"required"` + + // Port location name identifier. + LocationName *string `json:"location_name" validate:"required"` + + // Port provider name. + ProviderName *string `json:"provider_name" validate:"required"` + + // Port's supported speeds in megabits per second. + SupportedLinkSpeeds []int64 `json:"supported_link_speeds" validate:"required"` +} + +// UnmarshalProviderPort unmarshals an instance of ProviderPort from the specified map of raw messages. +func UnmarshalProviderPort(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ProviderPort) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "label", &obj.Label) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "location_display_name", &obj.LocationDisplayName) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "location_name", &obj.LocationName) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "provider_name", &obj.ProviderName) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "supported_link_speeds", &obj.SupportedLinkSpeeds) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ProviderPortCollection : List of port label details. +type ProviderPortCollection struct { + // A reference to the first page of resources. + First *ProviderPortCollectionFirst `json:"first" validate:"required"` + + // The maximum number of resources can be returned by the request. + Limit *int64 `json:"limit" validate:"required"` + + // A reference to the next page of resources; this reference is included for all pages except the last page. + Next *ProviderPortCollectionNext `json:"next,omitempty"` + + // The total number of resources across all pages. + TotalCount *int64 `json:"total_count" validate:"required"` + + // Array of ports. + Ports []ProviderPort `json:"ports,omitempty"` +} + +// UnmarshalProviderPortCollection unmarshals an instance of ProviderPortCollection from the specified map of raw messages. +func UnmarshalProviderPortCollection(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ProviderPortCollection) + err = core.UnmarshalModel(m, "first", &obj.First, UnmarshalProviderPortCollectionFirst) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "limit", &obj.Limit) + if err != nil { + return + } + err = core.UnmarshalModel(m, "next", &obj.Next, UnmarshalProviderPortCollectionNext) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "total_count", &obj.TotalCount) + if err != nil { + return + } + err = core.UnmarshalModel(m, "ports", &obj.Ports, UnmarshalProviderPort) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ProviderPortCollectionFirst : A reference to the first page of resources. +type ProviderPortCollectionFirst struct { + // The URL for the first page of resources. + Href *string `json:"href" validate:"required"` +} + +// UnmarshalProviderPortCollectionFirst unmarshals an instance of ProviderPortCollectionFirst from the specified map of raw messages. +func UnmarshalProviderPortCollectionFirst(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ProviderPortCollectionFirst) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ProviderPortCollectionNext : A reference to the next page of resources; this reference is included for all pages except the last page. +type ProviderPortCollectionNext struct { + // URL for the next page of resources. + Href *string `json:"href" validate:"required"` + + // start token for the next page of resources. + Start *string `json:"start" validate:"required"` +} + +// UnmarshalProviderPortCollectionNext unmarshals an instance of ProviderPortCollectionNext from the specified map of raw messages. +func UnmarshalProviderPortCollectionNext(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ProviderPortCollectionNext) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "start", &obj.Start) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// UpdateProviderGatewayOptions : The UpdateProviderGateway options. +type UpdateProviderGatewayOptions struct { + // Direct Link Connect gateway identifier. + ID *string `json:"id" validate:"required"` + + // The unique user-defined name for this gateway. + Name *string `json:"name,omitempty"` + + // Gateway speed in megabits per second. + SpeedMbps *int64 `json:"speed_mbps,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdateProviderGatewayOptions : Instantiate UpdateProviderGatewayOptions +func (*DirectLinkProviderV2) NewUpdateProviderGatewayOptions(id string) *UpdateProviderGatewayOptions { + return &UpdateProviderGatewayOptions{ + ID: core.StringPtr(id), + } +} + +// SetID : Allow user to set ID +func (options *UpdateProviderGatewayOptions) SetID(id string) *UpdateProviderGatewayOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetName : Allow user to set Name +func (options *UpdateProviderGatewayOptions) SetName(name string) *UpdateProviderGatewayOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetSpeedMbps : Allow user to set SpeedMbps +func (options *UpdateProviderGatewayOptions) SetSpeedMbps(speedMbps int64) *UpdateProviderGatewayOptions { + options.SpeedMbps = core.Int64Ptr(speedMbps) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateProviderGatewayOptions) SetHeaders(param map[string]string) *UpdateProviderGatewayOptions { + options.Headers = param + return options +} + +// ProviderGatewayChangeRequestProviderGatewayCreate : gateway create. +// This model "extends" ProviderGatewayChangeRequest +type ProviderGatewayChangeRequestProviderGatewayCreate struct { + // type of gateway change request. + Type *string `json:"type" validate:"required"` +} + +// Constants associated with the ProviderGatewayChangeRequestProviderGatewayCreate.Type property. +// type of gateway change request. +const ( + ProviderGatewayChangeRequestProviderGatewayCreate_Type_CreateGateway = "create_gateway" +) + +func (*ProviderGatewayChangeRequestProviderGatewayCreate) isaProviderGatewayChangeRequest() bool { + return true +} + +// UnmarshalProviderGatewayChangeRequestProviderGatewayCreate unmarshals an instance of ProviderGatewayChangeRequestProviderGatewayCreate from the specified map of raw messages. +func UnmarshalProviderGatewayChangeRequestProviderGatewayCreate(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ProviderGatewayChangeRequestProviderGatewayCreate) + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ProviderGatewayChangeRequestProviderGatewayDelete : gateway delete. +// This model "extends" ProviderGatewayChangeRequest +type ProviderGatewayChangeRequestProviderGatewayDelete struct { + // type of gateway change request. + Type *string `json:"type" validate:"required"` +} + +// Constants associated with the ProviderGatewayChangeRequestProviderGatewayDelete.Type property. +// type of gateway change request. +const ( + ProviderGatewayChangeRequestProviderGatewayDelete_Type_DeleteGateway = "delete_gateway" +) + +func (*ProviderGatewayChangeRequestProviderGatewayDelete) isaProviderGatewayChangeRequest() bool { + return true +} + +// UnmarshalProviderGatewayChangeRequestProviderGatewayDelete unmarshals an instance of ProviderGatewayChangeRequestProviderGatewayDelete from the specified map of raw messages. +func UnmarshalProviderGatewayChangeRequestProviderGatewayDelete(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ProviderGatewayChangeRequestProviderGatewayDelete) + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ProviderGatewayChangeRequestProviderGatewayUpdateAttributes : gateway attributes update. +// This model "extends" ProviderGatewayChangeRequest +type ProviderGatewayChangeRequestProviderGatewayUpdateAttributes struct { + // type of gateway change request. + Type *string `json:"type" validate:"required"` + + // array of pending updates. + Updates []ProviderGatewayUpdateAttributesUpdatesItemIntf `json:"updates" validate:"required"` +} + +// Constants associated with the ProviderGatewayChangeRequestProviderGatewayUpdateAttributes.Type property. +// type of gateway change request. +const ( + ProviderGatewayChangeRequestProviderGatewayUpdateAttributes_Type_UpdateAttributes = "update_attributes" +) + +func (*ProviderGatewayChangeRequestProviderGatewayUpdateAttributes) isaProviderGatewayChangeRequest() bool { + return true +} + +// UnmarshalProviderGatewayChangeRequestProviderGatewayUpdateAttributes unmarshals an instance of ProviderGatewayChangeRequestProviderGatewayUpdateAttributes from the specified map of raw messages. +func UnmarshalProviderGatewayChangeRequestProviderGatewayUpdateAttributes(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ProviderGatewayChangeRequestProviderGatewayUpdateAttributes) + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + err = core.UnmarshalModel(m, "updates", &obj.Updates, UnmarshalProviderGatewayUpdateAttributesUpdatesItem) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ProviderGatewayUpdateAttributesUpdatesItemProviderGatewaySpeedUpdate : gateway speed change. +// This model "extends" ProviderGatewayUpdateAttributesUpdatesItem +type ProviderGatewayUpdateAttributesUpdatesItemProviderGatewaySpeedUpdate struct { + // New gateway speed in megabits per second. + SpeedMbps *int64 `json:"speed_mbps,omitempty"` +} + +func (*ProviderGatewayUpdateAttributesUpdatesItemProviderGatewaySpeedUpdate) isaProviderGatewayUpdateAttributesUpdatesItem() bool { + return true +} + +// UnmarshalProviderGatewayUpdateAttributesUpdatesItemProviderGatewaySpeedUpdate unmarshals an instance of ProviderGatewayUpdateAttributesUpdatesItemProviderGatewaySpeedUpdate from the specified map of raw messages. +func UnmarshalProviderGatewayUpdateAttributesUpdatesItemProviderGatewaySpeedUpdate(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ProviderGatewayUpdateAttributesUpdatesItemProviderGatewaySpeedUpdate) + err = core.UnmarshalPrimitive(m, "speed_mbps", &obj.SpeedMbps) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} diff --git a/vendor/github.com/IBM/networking-go-sdk/directlinkv1/completion_notice.pdf b/vendor/github.com/IBM/networking-go-sdk/directlinkv1/completion_notice.pdf new file mode 100644 index 00000000000..e32c82d7dea Binary files /dev/null and b/vendor/github.com/IBM/networking-go-sdk/directlinkv1/completion_notice.pdf differ diff --git a/vendor/github.com/IBM/networking-go-sdk/directlinkv1/direct_link_v1.go b/vendor/github.com/IBM/networking-go-sdk/directlinkv1/direct_link_v1.go new file mode 100644 index 00000000000..5a553680127 --- /dev/null +++ b/vendor/github.com/IBM/networking-go-sdk/directlinkv1/direct_link_v1.go @@ -0,0 +1,4165 @@ +/** + * (C) Copyright IBM Corp. 2021. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package directlinkv1 : Operations and models for the DirectLinkV1 service +package directlinkv1 + +import ( + "encoding/json" + "fmt" + "io" + "reflect" + + "github.com/IBM/go-sdk-core/v4/core" + common "github.com/IBM/networking-go-sdk/common" + "github.com/go-openapi/strfmt" +) + +// DirectLinkV1 : No description provided (generated by Openapi Generator +// https://github.com/openapitools/openapi-generator) +// +// Version: __VERSION__ +type DirectLinkV1 struct { + Service *core.BaseService + + // Requests the version of the API as a date in the format `YYYY-MM-DD`. Any date from 2019-12-13 up to the current + // date may be provided. Specify the current date to request the latest version. + Version *string +} + +// DefaultServiceURL is the default URL to make service requests to. +const DefaultServiceURL = "https://directlink.cloud.ibm.com/v1" + +// DefaultServiceName is the default key used to find external configuration information. +const DefaultServiceName = "direct_link" + +// DirectLinkV1Options : Service options +type DirectLinkV1Options struct { + ServiceName string + URL string + Authenticator core.Authenticator + + // Requests the version of the API as a date in the format `YYYY-MM-DD`. Any date from 2019-12-13 up to the current + // date may be provided. Specify the current date to request the latest version. + Version *string `validate:"required"` +} + +// NewDirectLinkV1UsingExternalConfig : constructs an instance of DirectLinkV1 with passed in options and external configuration. +func NewDirectLinkV1UsingExternalConfig(options *DirectLinkV1Options) (directLink *DirectLinkV1, err error) { + if options.ServiceName == "" { + options.ServiceName = DefaultServiceName + } + + if options.Authenticator == nil { + options.Authenticator, err = core.GetAuthenticatorFromEnvironment(options.ServiceName) + if err != nil { + return + } + } + + directLink, err = NewDirectLinkV1(options) + if err != nil { + return + } + + err = directLink.Service.ConfigureService(options.ServiceName) + if err != nil { + return + } + + if options.URL != "" { + err = directLink.Service.SetServiceURL(options.URL) + } + return +} + +// NewDirectLinkV1 : constructs an instance of DirectLinkV1 with passed in options. +func NewDirectLinkV1(options *DirectLinkV1Options) (service *DirectLinkV1, err error) { + serviceOptions := &core.ServiceOptions{ + URL: DefaultServiceURL, + Authenticator: options.Authenticator, + } + + err = core.ValidateStruct(options, "options") + if err != nil { + return + } + + baseService, err := core.NewBaseService(serviceOptions) + if err != nil { + return + } + + if options.URL != "" { + err = baseService.SetServiceURL(options.URL) + if err != nil { + return + } + } + + service = &DirectLinkV1{ + Service: baseService, + Version: options.Version, + } + + return +} + +// SetServiceURL sets the service URL +func (directLink *DirectLinkV1) SetServiceURL(url string) error { + return directLink.Service.SetServiceURL(url) +} + +// ListGateways : List gateways +// List all Direct Link gateways in this account. Gateways in other accounts with connections to networks in this +// account are also returned. +func (directLink *DirectLinkV1) ListGateways(listGatewaysOptions *ListGatewaysOptions) (result *GatewayCollection, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listGatewaysOptions, "listGatewaysOptions") + if err != nil { + return + } + + pathSegments := []string{"gateways"} + pathParameters := []string{} + + builder := core.NewRequestBuilder(core.GET) + _, err = builder.ConstructHTTPURL(directLink.Service.Options.URL, pathSegments, pathParameters) + if err != nil { + return + } + + for headerName, headerValue := range listGatewaysOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("direct_link", "V1", "ListGateways") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*directLink.Version)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = directLink.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalGatewayCollection) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateGateway : Create gateway +// Creates a Direct Link gateway based on the supplied template. +func (directLink *DirectLinkV1) CreateGateway(createGatewayOptions *CreateGatewayOptions) (result *Gateway, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createGatewayOptions, "createGatewayOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createGatewayOptions, "createGatewayOptions") + if err != nil { + return + } + + pathSegments := []string{"gateways"} + pathParameters := []string{} + + builder := core.NewRequestBuilder(core.POST) + _, err = builder.ConstructHTTPURL(directLink.Service.Options.URL, pathSegments, pathParameters) + if err != nil { + return + } + + for headerName, headerValue := range createGatewayOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("direct_link", "V1", "CreateGateway") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + builder.AddQuery("version", fmt.Sprint(*directLink.Version)) + + _, err = builder.SetBodyContentJSON(createGatewayOptions.GatewayTemplate) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = directLink.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalGateway) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteGateway : Delete gateway +// Delete a Direct Link gateway. +func (directLink *DirectLinkV1) DeleteGateway(deleteGatewayOptions *DeleteGatewayOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteGatewayOptions, "deleteGatewayOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteGatewayOptions, "deleteGatewayOptions") + if err != nil { + return + } + + pathSegments := []string{"gateways"} + pathParameters := []string{*deleteGatewayOptions.ID} + + builder := core.NewRequestBuilder(core.DELETE) + _, err = builder.ConstructHTTPURL(directLink.Service.Options.URL, pathSegments, pathParameters) + if err != nil { + return + } + + for headerName, headerValue := range deleteGatewayOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("direct_link", "V1", "DeleteGateway") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + builder.AddQuery("version", fmt.Sprint(*directLink.Version)) + + request, err := builder.Build() + if err != nil { + return + } + + response, err = directLink.Service.Request(request, nil) + + return +} + +// GetGateway : Get gateway +// Retrieve a Direct Link gateway. +func (directLink *DirectLinkV1) GetGateway(getGatewayOptions *GetGatewayOptions) (result *Gateway, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getGatewayOptions, "getGatewayOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getGatewayOptions, "getGatewayOptions") + if err != nil { + return + } + + pathSegments := []string{"gateways"} + pathParameters := []string{*getGatewayOptions.ID} + + builder := core.NewRequestBuilder(core.GET) + _, err = builder.ConstructHTTPURL(directLink.Service.Options.URL, pathSegments, pathParameters) + if err != nil { + return + } + + for headerName, headerValue := range getGatewayOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("direct_link", "V1", "GetGateway") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*directLink.Version)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = directLink.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalGateway) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateGateway : Update gateway +// Update a Direct Link gateway. +func (directLink *DirectLinkV1) UpdateGateway(updateGatewayOptions *UpdateGatewayOptions) (result *Gateway, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateGatewayOptions, "updateGatewayOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateGatewayOptions, "updateGatewayOptions") + if err != nil { + return + } + + pathSegments := []string{"gateways"} + pathParameters := []string{*updateGatewayOptions.ID} + + builder := core.NewRequestBuilder(core.PATCH) + _, err = builder.ConstructHTTPURL(directLink.Service.Options.URL, pathSegments, pathParameters) + if err != nil { + return + } + + for headerName, headerValue := range updateGatewayOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("direct_link", "V1", "UpdateGateway") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + builder.AddQuery("version", fmt.Sprint(*directLink.Version)) + + body := make(map[string]interface{}) + if updateGatewayOptions.Global != nil { + body["global"] = updateGatewayOptions.Global + } + if updateGatewayOptions.LoaRejectReason != nil { + body["loa_reject_reason"] = updateGatewayOptions.LoaRejectReason + } + if updateGatewayOptions.MacsecConfig != nil { + body["macsec_config"] = updateGatewayOptions.MacsecConfig + } + if updateGatewayOptions.Metered != nil { + body["metered"] = updateGatewayOptions.Metered + } + if updateGatewayOptions.Name != nil { + body["name"] = updateGatewayOptions.Name + } + if updateGatewayOptions.OperationalStatus != nil { + body["operational_status"] = updateGatewayOptions.OperationalStatus + } + if updateGatewayOptions.SpeedMbps != nil { + body["speed_mbps"] = updateGatewayOptions.SpeedMbps + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = directLink.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalGateway) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateGatewayAction : Approve or reject change requests +// Approve or reject a gateway's current oustanding change request. +// +// This API is only used for provider created Direct Link Connect gateways to approve or reject specific changes +// initiated from a provider portal. +func (directLink *DirectLinkV1) CreateGatewayAction(createGatewayActionOptions *CreateGatewayActionOptions) (result *Gateway, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createGatewayActionOptions, "createGatewayActionOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createGatewayActionOptions, "createGatewayActionOptions") + if err != nil { + return + } + + pathSegments := []string{"gateways", "actions"} + pathParameters := []string{*createGatewayActionOptions.ID} + + builder := core.NewRequestBuilder(core.POST) + _, err = builder.ConstructHTTPURL(directLink.Service.Options.URL, pathSegments, pathParameters) + if err != nil { + return + } + + for headerName, headerValue := range createGatewayActionOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("direct_link", "V1", "CreateGatewayAction") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + builder.AddQuery("version", fmt.Sprint(*directLink.Version)) + + body := make(map[string]interface{}) + if createGatewayActionOptions.Action != nil { + body["action"] = createGatewayActionOptions.Action + } + if createGatewayActionOptions.Global != nil { + body["global"] = createGatewayActionOptions.Global + } + if createGatewayActionOptions.Metered != nil { + body["metered"] = createGatewayActionOptions.Metered + } + if createGatewayActionOptions.ResourceGroup != nil { + body["resource_group"] = createGatewayActionOptions.ResourceGroup + } + if createGatewayActionOptions.Updates != nil { + body["updates"] = createGatewayActionOptions.Updates + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = directLink.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalGateway) + if err != nil { + return + } + response.Result = result + + return +} + +// ListGatewayCompletionNotice : Get completion notice +// Retrieve a Direct Link Dedicated gateway's completion notice. +func (directLink *DirectLinkV1) ListGatewayCompletionNotice(listGatewayCompletionNoticeOptions *ListGatewayCompletionNoticeOptions) (result io.ReadCloser, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(listGatewayCompletionNoticeOptions, "listGatewayCompletionNoticeOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(listGatewayCompletionNoticeOptions, "listGatewayCompletionNoticeOptions") + if err != nil { + return + } + + pathSegments := []string{"gateways", "completion_notice"} + pathParameters := []string{*listGatewayCompletionNoticeOptions.ID} + + builder := core.NewRequestBuilder(core.GET) + _, err = builder.ConstructHTTPURL(directLink.Service.Options.URL, pathSegments, pathParameters) + if err != nil { + return + } + + for headerName, headerValue := range listGatewayCompletionNoticeOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("direct_link", "V1", "ListGatewayCompletionNotice") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/pdf") + + builder.AddQuery("version", fmt.Sprint(*directLink.Version)) + + request, err := builder.Build() + if err != nil { + return + } + + response, err = directLink.Service.Request(request, &result) + + return +} + +// CreateGatewayCompletionNotice : Create completion notice +// Upload a Direct Link Dedicated gateway completion notice. +func (directLink *DirectLinkV1) CreateGatewayCompletionNotice(createGatewayCompletionNoticeOptions *CreateGatewayCompletionNoticeOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createGatewayCompletionNoticeOptions, "createGatewayCompletionNoticeOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createGatewayCompletionNoticeOptions, "createGatewayCompletionNoticeOptions") + if err != nil { + return + } + if createGatewayCompletionNoticeOptions.Upload == nil { + err = fmt.Errorf("at least one of or upload must be supplied") + return + } + + pathSegments := []string{"gateways", "completion_notice"} + pathParameters := []string{*createGatewayCompletionNoticeOptions.ID} + + builder := core.NewRequestBuilder(core.PUT) + _, err = builder.ConstructHTTPURL(directLink.Service.Options.URL, pathSegments, pathParameters) + if err != nil { + return + } + + for headerName, headerValue := range createGatewayCompletionNoticeOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("direct_link", "V1", "CreateGatewayCompletionNotice") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + builder.AddQuery("version", fmt.Sprint(*directLink.Version)) + + if createGatewayCompletionNoticeOptions.Upload != nil { + builder.AddFormData("upload", "filename", + core.StringNilMapper(createGatewayCompletionNoticeOptions.UploadContentType), createGatewayCompletionNoticeOptions.Upload) + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = directLink.Service.Request(request, nil) + + return +} + +// ListGatewayLetterOfAuthorization : Get letter of authorization +// Retrieve a Direct Link Dedicated gateway's Letter of Authorization. +func (directLink *DirectLinkV1) ListGatewayLetterOfAuthorization(listGatewayLetterOfAuthorizationOptions *ListGatewayLetterOfAuthorizationOptions) (result io.ReadCloser, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(listGatewayLetterOfAuthorizationOptions, "listGatewayLetterOfAuthorizationOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(listGatewayLetterOfAuthorizationOptions, "listGatewayLetterOfAuthorizationOptions") + if err != nil { + return + } + + pathSegments := []string{"gateways", "letter_of_authorization"} + pathParameters := []string{*listGatewayLetterOfAuthorizationOptions.ID} + + builder := core.NewRequestBuilder(core.GET) + _, err = builder.ConstructHTTPURL(directLink.Service.Options.URL, pathSegments, pathParameters) + if err != nil { + return + } + + for headerName, headerValue := range listGatewayLetterOfAuthorizationOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("direct_link", "V1", "ListGatewayLetterOfAuthorization") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/pdf") + + builder.AddQuery("version", fmt.Sprint(*directLink.Version)) + + request, err := builder.Build() + if err != nil { + return + } + + response, err = directLink.Service.Request(request, &result) + + return +} + +// GetGatewayStatistics : Gateway statistics +// Retrieve gateway statistics. Specify statistic to retrieve using required `type` query parameter. Currently data +// retrieval is only supported for MACsec configurations. +func (directLink *DirectLinkV1) GetGatewayStatistics(getGatewayStatisticsOptions *GetGatewayStatisticsOptions) (result *GatewayStatisticCollection, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getGatewayStatisticsOptions, "getGatewayStatisticsOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getGatewayStatisticsOptions, "getGatewayStatisticsOptions") + if err != nil { + return + } + + pathSegments := []string{"gateways", "statistics"} + pathParameters := []string{*getGatewayStatisticsOptions.ID} + + builder := core.NewRequestBuilder(core.GET) + _, err = builder.ConstructHTTPURL(directLink.Service.Options.URL, pathSegments, pathParameters) + if err != nil { + return + } + + for headerName, headerValue := range getGatewayStatisticsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("direct_link", "V1", "GetGatewayStatistics") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("type", fmt.Sprint(*getGatewayStatisticsOptions.Type)) + builder.AddQuery("version", fmt.Sprint(*directLink.Version)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = directLink.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalGatewayStatisticCollection) + if err != nil { + return + } + response.Result = result + + return +} + +// ListOfferingTypeLocations : List available locations +// Retrieve the list of valid locations for the specified Direct Link offering. +func (directLink *DirectLinkV1) ListOfferingTypeLocations(listOfferingTypeLocationsOptions *ListOfferingTypeLocationsOptions) (result *LocationCollection, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(listOfferingTypeLocationsOptions, "listOfferingTypeLocationsOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(listOfferingTypeLocationsOptions, "listOfferingTypeLocationsOptions") + if err != nil { + return + } + + pathSegments := []string{"offering_types", "locations"} + pathParameters := []string{*listOfferingTypeLocationsOptions.OfferingType} + + builder := core.NewRequestBuilder(core.GET) + _, err = builder.ConstructHTTPURL(directLink.Service.Options.URL, pathSegments, pathParameters) + if err != nil { + return + } + + for headerName, headerValue := range listOfferingTypeLocationsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("direct_link", "V1", "ListOfferingTypeLocations") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*directLink.Version)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = directLink.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLocationCollection) + if err != nil { + return + } + response.Result = result + + return +} + +// ListOfferingTypeLocationCrossConnectRouters : List routers +// Retrieve location specific cross connect router information. Only valid for offering_type=dedicated locations. +func (directLink *DirectLinkV1) ListOfferingTypeLocationCrossConnectRouters(listOfferingTypeLocationCrossConnectRoutersOptions *ListOfferingTypeLocationCrossConnectRoutersOptions) (result *LocationCrossConnectRouterCollection, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(listOfferingTypeLocationCrossConnectRoutersOptions, "listOfferingTypeLocationCrossConnectRoutersOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(listOfferingTypeLocationCrossConnectRoutersOptions, "listOfferingTypeLocationCrossConnectRoutersOptions") + if err != nil { + return + } + + pathSegments := []string{"offering_types", "locations", "cross_connect_routers"} + pathParameters := []string{*listOfferingTypeLocationCrossConnectRoutersOptions.OfferingType, *listOfferingTypeLocationCrossConnectRoutersOptions.LocationName} + + builder := core.NewRequestBuilder(core.GET) + _, err = builder.ConstructHTTPURL(directLink.Service.Options.URL, pathSegments, pathParameters) + if err != nil { + return + } + + for headerName, headerValue := range listOfferingTypeLocationCrossConnectRoutersOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("direct_link", "V1", "ListOfferingTypeLocationCrossConnectRouters") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*directLink.Version)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = directLink.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLocationCrossConnectRouterCollection) + if err != nil { + return + } + response.Result = result + + return +} + +// ListOfferingTypeSpeeds : List speed options +// List the available Direct Link speeds. +func (directLink *DirectLinkV1) ListOfferingTypeSpeeds(listOfferingTypeSpeedsOptions *ListOfferingTypeSpeedsOptions) (result *OfferingSpeedCollection, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(listOfferingTypeSpeedsOptions, "listOfferingTypeSpeedsOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(listOfferingTypeSpeedsOptions, "listOfferingTypeSpeedsOptions") + if err != nil { + return + } + + pathSegments := []string{"offering_types", "speeds"} + pathParameters := []string{*listOfferingTypeSpeedsOptions.OfferingType} + + builder := core.NewRequestBuilder(core.GET) + _, err = builder.ConstructHTTPURL(directLink.Service.Options.URL, pathSegments, pathParameters) + if err != nil { + return + } + + for headerName, headerValue := range listOfferingTypeSpeedsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("direct_link", "V1", "ListOfferingTypeSpeeds") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*directLink.Version)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = directLink.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalOfferingSpeedCollection) + if err != nil { + return + } + response.Result = result + + return +} + +// ListPorts : List ports +// Retrieve list of available Direct Link connect ports. These ports can be used to create Direct Link connect +// gateways. +func (directLink *DirectLinkV1) ListPorts(listPortsOptions *ListPortsOptions) (result *PortCollection, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listPortsOptions, "listPortsOptions") + if err != nil { + return + } + + pathSegments := []string{"ports"} + pathParameters := []string{} + + builder := core.NewRequestBuilder(core.GET) + _, err = builder.ConstructHTTPURL(directLink.Service.Options.URL, pathSegments, pathParameters) + if err != nil { + return + } + + for headerName, headerValue := range listPortsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("direct_link", "V1", "ListPorts") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*directLink.Version)) + if listPortsOptions.Start != nil { + builder.AddQuery("start", fmt.Sprint(*listPortsOptions.Start)) + } + if listPortsOptions.Limit != nil { + builder.AddQuery("limit", fmt.Sprint(*listPortsOptions.Limit)) + } + if listPortsOptions.LocationName != nil { + builder.AddQuery("location_name", fmt.Sprint(*listPortsOptions.LocationName)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = directLink.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalPortCollection) + if err != nil { + return + } + response.Result = result + + return +} + +// GetPort : Get port +// Retrieve Direct Link Connect provider port. +func (directLink *DirectLinkV1) GetPort(getPortOptions *GetPortOptions) (result *Port, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getPortOptions, "getPortOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getPortOptions, "getPortOptions") + if err != nil { + return + } + + pathSegments := []string{"ports"} + pathParameters := []string{*getPortOptions.ID} + + builder := core.NewRequestBuilder(core.GET) + _, err = builder.ConstructHTTPURL(directLink.Service.Options.URL, pathSegments, pathParameters) + if err != nil { + return + } + + for headerName, headerValue := range getPortOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("direct_link", "V1", "GetPort") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*directLink.Version)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = directLink.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalPort) + if err != nil { + return + } + response.Result = result + + return +} + +// ListGatewayVirtualConnections : List virtual connections +// List a gateway's virtual connections. For gateway in other account with virtual connections that connect to network +// in this account. Only virtual connections that connect to this account are returned. +func (directLink *DirectLinkV1) ListGatewayVirtualConnections(listGatewayVirtualConnectionsOptions *ListGatewayVirtualConnectionsOptions) (result *GatewayVirtualConnectionCollection, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(listGatewayVirtualConnectionsOptions, "listGatewayVirtualConnectionsOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(listGatewayVirtualConnectionsOptions, "listGatewayVirtualConnectionsOptions") + if err != nil { + return + } + + pathSegments := []string{"gateways", "virtual_connections"} + pathParameters := []string{*listGatewayVirtualConnectionsOptions.GatewayID} + + builder := core.NewRequestBuilder(core.GET) + _, err = builder.ConstructHTTPURL(directLink.Service.Options.URL, pathSegments, pathParameters) + if err != nil { + return + } + + for headerName, headerValue := range listGatewayVirtualConnectionsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("direct_link", "V1", "ListGatewayVirtualConnections") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*directLink.Version)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = directLink.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalGatewayVirtualConnectionCollection) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateGatewayVirtualConnection : Create virtual connection +// Create a virtual connection to the specified network. +func (directLink *DirectLinkV1) CreateGatewayVirtualConnection(createGatewayVirtualConnectionOptions *CreateGatewayVirtualConnectionOptions) (result *GatewayVirtualConnection, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createGatewayVirtualConnectionOptions, "createGatewayVirtualConnectionOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createGatewayVirtualConnectionOptions, "createGatewayVirtualConnectionOptions") + if err != nil { + return + } + + pathSegments := []string{"gateways", "virtual_connections"} + pathParameters := []string{*createGatewayVirtualConnectionOptions.GatewayID} + + builder := core.NewRequestBuilder(core.POST) + _, err = builder.ConstructHTTPURL(directLink.Service.Options.URL, pathSegments, pathParameters) + if err != nil { + return + } + + for headerName, headerValue := range createGatewayVirtualConnectionOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("direct_link", "V1", "CreateGatewayVirtualConnection") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + builder.AddQuery("version", fmt.Sprint(*directLink.Version)) + + body := make(map[string]interface{}) + if createGatewayVirtualConnectionOptions.Name != nil { + body["name"] = createGatewayVirtualConnectionOptions.Name + } + if createGatewayVirtualConnectionOptions.Type != nil { + body["type"] = createGatewayVirtualConnectionOptions.Type + } + if createGatewayVirtualConnectionOptions.NetworkID != nil { + body["network_id"] = createGatewayVirtualConnectionOptions.NetworkID + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = directLink.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalGatewayVirtualConnection) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteGatewayVirtualConnection : Delete virtual connection +// Delete the virtual connection. +func (directLink *DirectLinkV1) DeleteGatewayVirtualConnection(deleteGatewayVirtualConnectionOptions *DeleteGatewayVirtualConnectionOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteGatewayVirtualConnectionOptions, "deleteGatewayVirtualConnectionOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteGatewayVirtualConnectionOptions, "deleteGatewayVirtualConnectionOptions") + if err != nil { + return + } + + pathSegments := []string{"gateways", "virtual_connections"} + pathParameters := []string{*deleteGatewayVirtualConnectionOptions.GatewayID, *deleteGatewayVirtualConnectionOptions.ID} + + builder := core.NewRequestBuilder(core.DELETE) + _, err = builder.ConstructHTTPURL(directLink.Service.Options.URL, pathSegments, pathParameters) + if err != nil { + return + } + + for headerName, headerValue := range deleteGatewayVirtualConnectionOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("direct_link", "V1", "DeleteGatewayVirtualConnection") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + builder.AddQuery("version", fmt.Sprint(*directLink.Version)) + + request, err := builder.Build() + if err != nil { + return + } + + response, err = directLink.Service.Request(request, nil) + + return +} + +// GetGatewayVirtualConnection : Get virtual connection +// Retrieve a virtual connection. +func (directLink *DirectLinkV1) GetGatewayVirtualConnection(getGatewayVirtualConnectionOptions *GetGatewayVirtualConnectionOptions) (result *GatewayVirtualConnection, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getGatewayVirtualConnectionOptions, "getGatewayVirtualConnectionOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getGatewayVirtualConnectionOptions, "getGatewayVirtualConnectionOptions") + if err != nil { + return + } + + pathSegments := []string{"gateways", "virtual_connections"} + pathParameters := []string{*getGatewayVirtualConnectionOptions.GatewayID, *getGatewayVirtualConnectionOptions.ID} + + builder := core.NewRequestBuilder(core.GET) + _, err = builder.ConstructHTTPURL(directLink.Service.Options.URL, pathSegments, pathParameters) + if err != nil { + return + } + + for headerName, headerValue := range getGatewayVirtualConnectionOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("direct_link", "V1", "GetGatewayVirtualConnection") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*directLink.Version)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = directLink.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalGatewayVirtualConnection) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateGatewayVirtualConnection : Update virtual connection +// Update a virtual connection. +func (directLink *DirectLinkV1) UpdateGatewayVirtualConnection(updateGatewayVirtualConnectionOptions *UpdateGatewayVirtualConnectionOptions) (result *GatewayVirtualConnection, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateGatewayVirtualConnectionOptions, "updateGatewayVirtualConnectionOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateGatewayVirtualConnectionOptions, "updateGatewayVirtualConnectionOptions") + if err != nil { + return + } + + pathSegments := []string{"gateways", "virtual_connections"} + pathParameters := []string{*updateGatewayVirtualConnectionOptions.GatewayID, *updateGatewayVirtualConnectionOptions.ID} + + builder := core.NewRequestBuilder(core.PATCH) + _, err = builder.ConstructHTTPURL(directLink.Service.Options.URL, pathSegments, pathParameters) + if err != nil { + return + } + + for headerName, headerValue := range updateGatewayVirtualConnectionOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("direct_link", "V1", "UpdateGatewayVirtualConnection") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + builder.AddQuery("version", fmt.Sprint(*directLink.Version)) + + body := make(map[string]interface{}) + if updateGatewayVirtualConnectionOptions.Name != nil { + body["name"] = updateGatewayVirtualConnectionOptions.Name + } + if updateGatewayVirtualConnectionOptions.Status != nil { + body["status"] = updateGatewayVirtualConnectionOptions.Status + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = directLink.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalGatewayVirtualConnection) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateGatewayActionOptions : The CreateGatewayAction options. +type CreateGatewayActionOptions struct { + // Direct Link Connect gateway identifier. + ID *string `json:"id" validate:"required"` + + // Action request. + Action *string `json:"action" validate:"required"` + + // Required for create_gateway_approve requests to select the gateway's routing option. Gateways with global routing + // (`true`) can connect to networks outside of their associated region. + Global *bool `json:"global,omitempty"` + + // Required for create_gateway_approve requests to select the gateway's metered billing option. When `true` gateway + // usage is billed per gigabyte. When `false` there is no per gigabyte usage charge, instead a flat rate is charged + // for the gateway. + Metered *bool `json:"metered,omitempty"` + + // Set for create_gateway_approve requests to select the gateway's resource group. If unspecified on + // create_gateway_approve, the account's [default resource + // group](https://cloud.ibm.com/apidocs/resource-manager#introduction) is used. + ResourceGroup *ResourceGroupIdentity `json:"resource_group,omitempty"` + + // Specify attribute updates being approved or rejected, update_attributes_approve and update_attributes_reject actions + // must provide an updates field that matches the gateway's current pending changes. + Updates []GatewayActionTemplateUpdatesItemIntf `json:"updates,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the CreateGatewayActionOptions.Action property. +// Action request. +const ( + CreateGatewayActionOptions_Action_CreateGatewayApprove = "create_gateway_approve" + CreateGatewayActionOptions_Action_CreateGatewayReject = "create_gateway_reject" + CreateGatewayActionOptions_Action_DeleteGatewayApprove = "delete_gateway_approve" + CreateGatewayActionOptions_Action_DeleteGatewayReject = "delete_gateway_reject" + CreateGatewayActionOptions_Action_UpdateAttributesApprove = "update_attributes_approve" + CreateGatewayActionOptions_Action_UpdateAttributesReject = "update_attributes_reject" +) + +// NewCreateGatewayActionOptions : Instantiate CreateGatewayActionOptions +func (*DirectLinkV1) NewCreateGatewayActionOptions(id string, action string) *CreateGatewayActionOptions { + return &CreateGatewayActionOptions{ + ID: core.StringPtr(id), + Action: core.StringPtr(action), + } +} + +// SetID : Allow user to set ID +func (options *CreateGatewayActionOptions) SetID(id string) *CreateGatewayActionOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetAction : Allow user to set Action +func (options *CreateGatewayActionOptions) SetAction(action string) *CreateGatewayActionOptions { + options.Action = core.StringPtr(action) + return options +} + +// SetGlobal : Allow user to set Global +func (options *CreateGatewayActionOptions) SetGlobal(global bool) *CreateGatewayActionOptions { + options.Global = core.BoolPtr(global) + return options +} + +// SetMetered : Allow user to set Metered +func (options *CreateGatewayActionOptions) SetMetered(metered bool) *CreateGatewayActionOptions { + options.Metered = core.BoolPtr(metered) + return options +} + +// SetResourceGroup : Allow user to set ResourceGroup +func (options *CreateGatewayActionOptions) SetResourceGroup(resourceGroup *ResourceGroupIdentity) *CreateGatewayActionOptions { + options.ResourceGroup = resourceGroup + return options +} + +// SetUpdates : Allow user to set Updates +func (options *CreateGatewayActionOptions) SetUpdates(updates []GatewayActionTemplateUpdatesItemIntf) *CreateGatewayActionOptions { + options.Updates = updates + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateGatewayActionOptions) SetHeaders(param map[string]string) *CreateGatewayActionOptions { + options.Headers = param + return options +} + +// CreateGatewayCompletionNoticeOptions : The CreateGatewayCompletionNotice options. +type CreateGatewayCompletionNoticeOptions struct { + // Direct Link Dedicated gateway identifier. + ID *string `json:"id" validate:"required"` + + // Completion notice PDF file. + Upload io.ReadCloser `json:"upload,omitempty"` + + // The content type of upload. + UploadContentType *string `json:"upload_content_type,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewCreateGatewayCompletionNoticeOptions : Instantiate CreateGatewayCompletionNoticeOptions +func (*DirectLinkV1) NewCreateGatewayCompletionNoticeOptions(id string) *CreateGatewayCompletionNoticeOptions { + return &CreateGatewayCompletionNoticeOptions{ + ID: core.StringPtr(id), + } +} + +// SetID : Allow user to set ID +func (options *CreateGatewayCompletionNoticeOptions) SetID(id string) *CreateGatewayCompletionNoticeOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetUpload : Allow user to set Upload +func (options *CreateGatewayCompletionNoticeOptions) SetUpload(upload io.ReadCloser) *CreateGatewayCompletionNoticeOptions { + options.Upload = upload + return options +} + +// SetUploadContentType : Allow user to set UploadContentType +func (options *CreateGatewayCompletionNoticeOptions) SetUploadContentType(uploadContentType string) *CreateGatewayCompletionNoticeOptions { + options.UploadContentType = core.StringPtr(uploadContentType) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateGatewayCompletionNoticeOptions) SetHeaders(param map[string]string) *CreateGatewayCompletionNoticeOptions { + options.Headers = param + return options +} + +// CreateGatewayOptions : The CreateGateway options. +type CreateGatewayOptions struct { + // The Direct Link Gateway template. + GatewayTemplate GatewayTemplateIntf `json:"GatewayTemplate" validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewCreateGatewayOptions : Instantiate CreateGatewayOptions +func (*DirectLinkV1) NewCreateGatewayOptions(gatewayTemplate GatewayTemplateIntf) *CreateGatewayOptions { + return &CreateGatewayOptions{ + GatewayTemplate: gatewayTemplate, + } +} + +// SetGatewayTemplate : Allow user to set GatewayTemplate +func (options *CreateGatewayOptions) SetGatewayTemplate(gatewayTemplate GatewayTemplateIntf) *CreateGatewayOptions { + options.GatewayTemplate = gatewayTemplate + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateGatewayOptions) SetHeaders(param map[string]string) *CreateGatewayOptions { + options.Headers = param + return options +} + +// CreateGatewayVirtualConnectionOptions : The CreateGatewayVirtualConnection options. +type CreateGatewayVirtualConnectionOptions struct { + // Direct Link gateway identifier. + GatewayID *string `json:"gateway_id" validate:"required"` + + // The user-defined name for this virtual connection. Virtual connection names are unique within a gateway. This is + // the name of the virtual connection itself, the network being connected may have its own name attribute. + Name *string `json:"name" validate:"required"` + + // The type of virtual connection. + Type *string `json:"type" validate:"required"` + + // Unique identifier of the target network. For type=vpc virtual connections this is the CRN of the target VPC. This + // field does not apply to type=classic connections. + NetworkID *string `json:"network_id,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the CreateGatewayVirtualConnectionOptions.Type property. +// The type of virtual connection. +const ( + CreateGatewayVirtualConnectionOptions_Type_Classic = "classic" + CreateGatewayVirtualConnectionOptions_Type_Vpc = "vpc" +) + +// NewCreateGatewayVirtualConnectionOptions : Instantiate CreateGatewayVirtualConnectionOptions +func (*DirectLinkV1) NewCreateGatewayVirtualConnectionOptions(gatewayID string, name string, typeVar string) *CreateGatewayVirtualConnectionOptions { + return &CreateGatewayVirtualConnectionOptions{ + GatewayID: core.StringPtr(gatewayID), + Name: core.StringPtr(name), + Type: core.StringPtr(typeVar), + } +} + +// SetGatewayID : Allow user to set GatewayID +func (options *CreateGatewayVirtualConnectionOptions) SetGatewayID(gatewayID string) *CreateGatewayVirtualConnectionOptions { + options.GatewayID = core.StringPtr(gatewayID) + return options +} + +// SetName : Allow user to set Name +func (options *CreateGatewayVirtualConnectionOptions) SetName(name string) *CreateGatewayVirtualConnectionOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetType : Allow user to set Type +func (options *CreateGatewayVirtualConnectionOptions) SetType(typeVar string) *CreateGatewayVirtualConnectionOptions { + options.Type = core.StringPtr(typeVar) + return options +} + +// SetNetworkID : Allow user to set NetworkID +func (options *CreateGatewayVirtualConnectionOptions) SetNetworkID(networkID string) *CreateGatewayVirtualConnectionOptions { + options.NetworkID = core.StringPtr(networkID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateGatewayVirtualConnectionOptions) SetHeaders(param map[string]string) *CreateGatewayVirtualConnectionOptions { + options.Headers = param + return options +} + +// CrossConnectRouter : Cross Connect Router details. +type CrossConnectRouter struct { + // Array of capabilities for this router. + Capabilities []string `json:"capabilities,omitempty"` + + // The name of the Router. + RouterName *string `json:"router_name,omitempty"` + + // Count of existing Direct Link Dedicated gateways on this router for this account. + TotalConnections *int64 `json:"total_connections,omitempty"` +} + +// UnmarshalCrossConnectRouter unmarshals an instance of CrossConnectRouter from the specified map of raw messages. +func UnmarshalCrossConnectRouter(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(CrossConnectRouter) + err = core.UnmarshalPrimitive(m, "capabilities", &obj.Capabilities) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "router_name", &obj.RouterName) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "total_connections", &obj.TotalConnections) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// DeleteGatewayOptions : The DeleteGateway options. +type DeleteGatewayOptions struct { + // Direct Link gateway identifier. + ID *string `json:"id" validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteGatewayOptions : Instantiate DeleteGatewayOptions +func (*DirectLinkV1) NewDeleteGatewayOptions(id string) *DeleteGatewayOptions { + return &DeleteGatewayOptions{ + ID: core.StringPtr(id), + } +} + +// SetID : Allow user to set ID +func (options *DeleteGatewayOptions) SetID(id string) *DeleteGatewayOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteGatewayOptions) SetHeaders(param map[string]string) *DeleteGatewayOptions { + options.Headers = param + return options +} + +// DeleteGatewayVirtualConnectionOptions : The DeleteGatewayVirtualConnection options. +type DeleteGatewayVirtualConnectionOptions struct { + // Direct Link gateway identifier. + GatewayID *string `json:"gateway_id" validate:"required"` + + // The virtual connection identifier. + ID *string `json:"id" validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteGatewayVirtualConnectionOptions : Instantiate DeleteGatewayVirtualConnectionOptions +func (*DirectLinkV1) NewDeleteGatewayVirtualConnectionOptions(gatewayID string, id string) *DeleteGatewayVirtualConnectionOptions { + return &DeleteGatewayVirtualConnectionOptions{ + GatewayID: core.StringPtr(gatewayID), + ID: core.StringPtr(id), + } +} + +// SetGatewayID : Allow user to set GatewayID +func (options *DeleteGatewayVirtualConnectionOptions) SetGatewayID(gatewayID string) *DeleteGatewayVirtualConnectionOptions { + options.GatewayID = core.StringPtr(gatewayID) + return options +} + +// SetID : Allow user to set ID +func (options *DeleteGatewayVirtualConnectionOptions) SetID(id string) *DeleteGatewayVirtualConnectionOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteGatewayVirtualConnectionOptions) SetHeaders(param map[string]string) *DeleteGatewayVirtualConnectionOptions { + options.Headers = param + return options +} + +// Gateway : gateway. +type Gateway struct { + // Customer BGP ASN. + BgpAsn *int64 `json:"bgp_asn" validate:"required"` + + // (DEPRECATED) BGP base CIDR is deprecated and no longer recognized by the Direct Link APIs. + // + // See bgp_cer_cidr and bgp_ibm_cidr fields instead for IP related information. + // + // Deprecated field bgp_base_cidr will be removed from the API specificiation after 15-MAR-2021. + BgpBaseCidr *string `json:"bgp_base_cidr,omitempty"` + + // BGP customer edge router CIDR. + BgpCerCidr *string `json:"bgp_cer_cidr,omitempty"` + + // IBM BGP ASN. + BgpIbmAsn *int64 `json:"bgp_ibm_asn,omitempty"` + + // BGP IBM CIDR. + BgpIbmCidr *string `json:"bgp_ibm_cidr,omitempty"` + + // Gateway BGP status. The list of enumerated values for this property may expand in the future. Code and processes + // using this field must tolerate unexpected values. + BgpStatus *string `json:"bgp_status,omitempty"` + + // Carrier name. Only set for type=dedicated gateways. + CarrierName *string `json:"carrier_name,omitempty"` + + // Changes pending approval for provider managed Direct Link Connect gateways. + ChangeRequest GatewayChangeRequestIntf `json:"change_request,omitempty"` + + // Reason for completion notice rejection. Only included on type=dedicated gateways with a rejected completion notice. + CompletionNoticeRejectReason *string `json:"completion_notice_reject_reason,omitempty"` + + // The date and time resource was created. + CreatedAt *strfmt.DateTime `json:"created_at" validate:"required"` + + // The CRN (Cloud Resource Name) of this gateway. + Crn *string `json:"crn" validate:"required"` + + // Cross connect router. Only included on type=dedicated gateways. + CrossConnectRouter *string `json:"cross_connect_router,omitempty"` + + // Customer name. Only set for type=dedicated gateways. + CustomerName *string `json:"customer_name,omitempty"` + + // Gateways with global routing (`true`) can connect to networks outside their associated region. + Global *bool `json:"global" validate:"required"` + + // The unique identifier of this gateway. + ID *string `json:"id" validate:"required"` + + // Gateway link status. Only included on type=dedicated gateways. The list of enumerated values for this property may + // expand in the future. Code and processes using this field must tolerate unexpected values. + LinkStatus *string `json:"link_status,omitempty"` + + // Gateway location long name. + LocationDisplayName *string `json:"location_display_name" validate:"required"` + + // Gateway location. + LocationName *string `json:"location_name" validate:"required"` + + // MACsec configuration information. For Dedicated Gateways with MACsec configured, return configuration information. + // Contact IBM support for access to MACsec. + MacsecConfig *GatewayMacsecConfig `json:"macsec_config,omitempty"` + + // Metered billing option. When `true` gateway usage is billed per gigabyte. When `false` there is no per gigabyte + // usage charge, instead a flat rate is charged for the gateway. + Metered *bool `json:"metered" validate:"required"` + + // The unique user-defined name for this gateway. + Name *string `json:"name" validate:"required"` + + // Gateway operational status. The list of enumerated values for this property may expand in the future. Code and + // processes using this field must tolerate unexpected values. + OperationalStatus *string `json:"operational_status" validate:"required"` + + // gateway port for type=connect gateways. + Port *GatewayPort `json:"port,omitempty"` + + // Indicates whether gateway changes must be made via a provider portal. + ProviderApiManaged *bool `json:"provider_api_managed,omitempty"` + + // Resource group reference. + ResourceGroup *ResourceGroupReference `json:"resource_group,omitempty"` + + // Gateway speed in megabits per second. + SpeedMbps *int64 `json:"speed_mbps" validate:"required"` + + // Gateway type. The list of enumerated values for this property may expand in the future. Code and processes using + // this field must tolerate unexpected values. + Type *string `json:"type" validate:"required"` + + // VLAN allocated for this gateway. Only set for type=connect gateways. + Vlan *int64 `json:"vlan,omitempty"` +} + +// Constants associated with the Gateway.BgpStatus property. +// Gateway BGP status. The list of enumerated values for this property may expand in the future. Code and processes +// using this field must tolerate unexpected values. +const ( + Gateway_BgpStatus_Active = "active" + Gateway_BgpStatus_Connect = "connect" + Gateway_BgpStatus_Established = "established" + Gateway_BgpStatus_Idle = "idle" +) + +// Constants associated with the Gateway.LinkStatus property. +// Gateway link status. Only included on type=dedicated gateways. The list of enumerated values for this property may +// expand in the future. Code and processes using this field must tolerate unexpected values. +const ( + Gateway_LinkStatus_Down = "down" + Gateway_LinkStatus_Up = "up" +) + +// Constants associated with the Gateway.OperationalStatus property. +// Gateway operational status. The list of enumerated values for this property may expand in the future. Code and +// processes using this field must tolerate unexpected values. +const ( + Gateway_OperationalStatus_AwaitingCompletionNotice = "awaiting_completion_notice" + Gateway_OperationalStatus_AwaitingLoa = "awaiting_loa" + Gateway_OperationalStatus_CompletionNoticeApproved = "completion_notice_approved" + Gateway_OperationalStatus_CompletionNoticeReceived = "completion_notice_received" + Gateway_OperationalStatus_CompletionNoticeRejected = "completion_notice_rejected" + Gateway_OperationalStatus_Configuring = "configuring" + Gateway_OperationalStatus_CreatePending = "create_pending" + Gateway_OperationalStatus_CreateRejected = "create_rejected" + Gateway_OperationalStatus_DeletePending = "delete_pending" + Gateway_OperationalStatus_LoaAccepted = "loa_accepted" + Gateway_OperationalStatus_LoaCreated = "loa_created" + Gateway_OperationalStatus_LoaRejected = "loa_rejected" + Gateway_OperationalStatus_Provisioned = "provisioned" +) + +// Constants associated with the Gateway.Type property. +// Gateway type. The list of enumerated values for this property may expand in the future. Code and processes using this +// field must tolerate unexpected values. +const ( + Gateway_Type_Connect = "connect" + Gateway_Type_Dedicated = "dedicated" +) + +// UnmarshalGateway unmarshals an instance of Gateway from the specified map of raw messages. +func UnmarshalGateway(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Gateway) + err = core.UnmarshalPrimitive(m, "bgp_asn", &obj.BgpAsn) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "bgp_base_cidr", &obj.BgpBaseCidr) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "bgp_cer_cidr", &obj.BgpCerCidr) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "bgp_ibm_asn", &obj.BgpIbmAsn) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "bgp_ibm_cidr", &obj.BgpIbmCidr) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "bgp_status", &obj.BgpStatus) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "carrier_name", &obj.CarrierName) + if err != nil { + return + } + err = core.UnmarshalModel(m, "change_request", &obj.ChangeRequest, UnmarshalGatewayChangeRequest) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "completion_notice_reject_reason", &obj.CompletionNoticeRejectReason) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.Crn) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "cross_connect_router", &obj.CrossConnectRouter) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "customer_name", &obj.CustomerName) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "global", &obj.Global) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "link_status", &obj.LinkStatus) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "location_display_name", &obj.LocationDisplayName) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "location_name", &obj.LocationName) + if err != nil { + return + } + err = core.UnmarshalModel(m, "macsec_config", &obj.MacsecConfig, UnmarshalGatewayMacsecConfig) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "metered", &obj.Metered) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "operational_status", &obj.OperationalStatus) + if err != nil { + return + } + err = core.UnmarshalModel(m, "port", &obj.Port, UnmarshalGatewayPort) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "provider_api_managed", &obj.ProviderApiManaged) + if err != nil { + return + } + err = core.UnmarshalModel(m, "resource_group", &obj.ResourceGroup, UnmarshalResourceGroupReference) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "speed_mbps", &obj.SpeedMbps) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "vlan", &obj.Vlan) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// GatewayActionTemplateUpdatesItem : GatewayActionTemplateUpdatesItem struct +// Models which "extend" this model: +// - GatewayActionTemplateUpdatesItemGatewayClientSpeedUpdate +type GatewayActionTemplateUpdatesItem struct { + // New gateway speed in megabits per second. + SpeedMbps *int64 `json:"speed_mbps,omitempty"` +} + +func (*GatewayActionTemplateUpdatesItem) isaGatewayActionTemplateUpdatesItem() bool { + return true +} + +type GatewayActionTemplateUpdatesItemIntf interface { + isaGatewayActionTemplateUpdatesItem() bool +} + +// UnmarshalGatewayActionTemplateUpdatesItem unmarshals an instance of GatewayActionTemplateUpdatesItem from the specified map of raw messages. +func UnmarshalGatewayActionTemplateUpdatesItem(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(GatewayActionTemplateUpdatesItem) + err = core.UnmarshalPrimitive(m, "speed_mbps", &obj.SpeedMbps) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// GatewayChangeRequest : GatewayChangeRequest struct +// Models which "extend" this model: +// - GatewayChangeRequestGatewayClientGatewayCreate +// - GatewayChangeRequestGatewayClientGatewayDelete +// - GatewayChangeRequestGatewayClientGatewayUpdateAttributes +type GatewayChangeRequest struct { + // type of gateway change request. + Type *string `json:"type,omitempty"` + + // array of pending updates. + Updates []GatewayChangeRequestUpdatesItemIntf `json:"updates,omitempty"` +} + +// Constants associated with the GatewayChangeRequest.Type property. +// type of gateway change request. +const ( + GatewayChangeRequest_Type_CreateGateway = "create_gateway" +) + +func (*GatewayChangeRequest) isaGatewayChangeRequest() bool { + return true +} + +type GatewayChangeRequestIntf interface { + isaGatewayChangeRequest() bool +} + +// UnmarshalGatewayChangeRequest unmarshals an instance of GatewayChangeRequest from the specified map of raw messages. +func UnmarshalGatewayChangeRequest(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(GatewayChangeRequest) + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + err = core.UnmarshalModel(m, "updates", &obj.Updates, UnmarshalGatewayChangeRequestUpdatesItem) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// GatewayChangeRequestGatewayClientGatewayUpdateAttributesUpdatesItem : GatewayChangeRequestGatewayClientGatewayUpdateAttributesUpdatesItem struct +// Models which "extend" this model: +// - GatewayChangeRequestGatewayClientGatewayUpdateAttributesUpdatesItemGatewayClientSpeedUpdate +type GatewayChangeRequestGatewayClientGatewayUpdateAttributesUpdatesItem struct { + // New gateway speed in megabits per second. + SpeedMbps *int64 `json:"speed_mbps,omitempty"` +} + +func (*GatewayChangeRequestGatewayClientGatewayUpdateAttributesUpdatesItem) isaGatewayChangeRequestGatewayClientGatewayUpdateAttributesUpdatesItem() bool { + return true +} + +type GatewayChangeRequestGatewayClientGatewayUpdateAttributesUpdatesItemIntf interface { + isaGatewayChangeRequestGatewayClientGatewayUpdateAttributesUpdatesItem() bool +} + +// UnmarshalGatewayChangeRequestGatewayClientGatewayUpdateAttributesUpdatesItem unmarshals an instance of GatewayChangeRequestGatewayClientGatewayUpdateAttributesUpdatesItem from the specified map of raw messages. +func UnmarshalGatewayChangeRequestGatewayClientGatewayUpdateAttributesUpdatesItem(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(GatewayChangeRequestGatewayClientGatewayUpdateAttributesUpdatesItem) + err = core.UnmarshalPrimitive(m, "speed_mbps", &obj.SpeedMbps) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// GatewayChangeRequestUpdatesItem : GatewayChangeRequestUpdatesItem struct +// Models which "extend" this model: +// - GatewayChangeRequestUpdatesItemGatewayClientSpeedUpdate +type GatewayChangeRequestUpdatesItem struct { + // New gateway speed in megabits per second. + SpeedMbps *int64 `json:"speed_mbps,omitempty"` +} + +func (*GatewayChangeRequestUpdatesItem) isaGatewayChangeRequestUpdatesItem() bool { + return true +} + +type GatewayChangeRequestUpdatesItemIntf interface { + isaGatewayChangeRequestUpdatesItem() bool +} + +// UnmarshalGatewayChangeRequestUpdatesItem unmarshals an instance of GatewayChangeRequestUpdatesItem from the specified map of raw messages. +func UnmarshalGatewayChangeRequestUpdatesItem(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(GatewayChangeRequestUpdatesItem) + err = core.UnmarshalPrimitive(m, "speed_mbps", &obj.SpeedMbps) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// GatewayCollection : GatewayCollection struct +type GatewayCollection struct { + // Collection of Direct Link gateways. + Gateways []Gateway `json:"gateways" validate:"required"` +} + +// UnmarshalGatewayCollection unmarshals an instance of GatewayCollection from the specified map of raw messages. +func UnmarshalGatewayCollection(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(GatewayCollection) + err = core.UnmarshalModel(m, "gateways", &obj.Gateways, UnmarshalGateway) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// GatewayMacsecConfig : MACsec configuration information. For Dedicated Gateways with MACsec configured, return configuration information. +// Contact IBM support for access to MACsec. +type GatewayMacsecConfig struct { + // Indicate whether MACsec should currently be active (true) or inactive (false) for a MACsec enabled gateway. To be + // MACsec enabled a `macsec_config` must be specified at gateway create time. + Active *bool `json:"active" validate:"required"` + + // Active connectivity association key. + // + // During normal operation `active_cak` will match the desired `primary_cak`. During CAK changes this field can be + // used to indicate which key is currently active on the gateway. + ActiveCak *GatewayMacsecConfigActiveCak `json:"active_cak,omitempty"` + + // SAK cipher suite. + CipherSuite *string `json:"cipher_suite,omitempty"` + + // confidentiality offset. + ConfidentialityOffset *int64 `json:"confidentiality_offset,omitempty"` + + // cryptographic algorithm. + CryptographicAlgorithm *string `json:"cryptographic_algorithm,omitempty"` + + // fallback connectivity association key. + FallbackCak *GatewayMacsecConfigFallbackCak `json:"fallback_cak,omitempty"` + + // key server priority. + KeyServerPriority *int64 `json:"key_server_priority,omitempty"` + + // desired primary connectivity association key. + PrimaryCak *GatewayMacsecConfigPrimaryCak `json:"primary_cak" validate:"required"` + + // Secure Association Key (SAK) expiry time in seconds. + SakExpiryTime *int64 `json:"sak_expiry_time,omitempty"` + + // Packets without MACsec headers are dropped when security_policy is `must_secure`. + SecurityPolicy *string `json:"security_policy,omitempty"` + + // Current status of MACsec on this gateway. + // + // Status 'offline' is returned during gateway creation and deletion. + Status *string `json:"status" validate:"required"` + + // replay protection window size. + WindowSize *int64 `json:"window_size,omitempty"` +} + +// Constants associated with the GatewayMacsecConfig.CipherSuite property. +// SAK cipher suite. +const ( + GatewayMacsecConfig_CipherSuite_GcmAesXpn256 = "gcm_aes_xpn_256" +) + +// Constants associated with the GatewayMacsecConfig.CryptographicAlgorithm property. +// cryptographic algorithm. +const ( + GatewayMacsecConfig_CryptographicAlgorithm_Aes256Cmac = "aes_256_cmac" +) + +// Constants associated with the GatewayMacsecConfig.SecurityPolicy property. +// Packets without MACsec headers are dropped when security_policy is `must_secure`. +const ( + GatewayMacsecConfig_SecurityPolicy_MustSecure = "must_secure" +) + +// Constants associated with the GatewayMacsecConfig.Status property. +// Current status of MACsec on this gateway. +// +// Status 'offline' is returned during gateway creation and deletion. +const ( + GatewayMacsecConfig_Status_Init = "init" + GatewayMacsecConfig_Status_Offline = "offline" + GatewayMacsecConfig_Status_Pending = "pending" + GatewayMacsecConfig_Status_Secured = "secured" +) + +// UnmarshalGatewayMacsecConfig unmarshals an instance of GatewayMacsecConfig from the specified map of raw messages. +func UnmarshalGatewayMacsecConfig(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(GatewayMacsecConfig) + err = core.UnmarshalPrimitive(m, "active", &obj.Active) + if err != nil { + return + } + err = core.UnmarshalModel(m, "active_cak", &obj.ActiveCak, UnmarshalGatewayMacsecConfigActiveCak) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "cipher_suite", &obj.CipherSuite) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "confidentiality_offset", &obj.ConfidentialityOffset) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "cryptographic_algorithm", &obj.CryptographicAlgorithm) + if err != nil { + return + } + err = core.UnmarshalModel(m, "fallback_cak", &obj.FallbackCak, UnmarshalGatewayMacsecConfigFallbackCak) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "key_server_priority", &obj.KeyServerPriority) + if err != nil { + return + } + err = core.UnmarshalModel(m, "primary_cak", &obj.PrimaryCak, UnmarshalGatewayMacsecConfigPrimaryCak) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "sak_expiry_time", &obj.SakExpiryTime) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "security_policy", &obj.SecurityPolicy) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "status", &obj.Status) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "window_size", &obj.WindowSize) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// GatewayMacsecConfigActiveCak : Active connectivity association key. +// +// During normal operation `active_cak` will match the desired `primary_cak`. During CAK changes this field can be used +// to indicate which key is currently active on the gateway. +type GatewayMacsecConfigActiveCak struct { + // connectivity association key crn. + Crn *string `json:"crn" validate:"required"` + + // connectivity association key status. + Status *string `json:"status" validate:"required"` +} + +// UnmarshalGatewayMacsecConfigActiveCak unmarshals an instance of GatewayMacsecConfigActiveCak from the specified map of raw messages. +func UnmarshalGatewayMacsecConfigActiveCak(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(GatewayMacsecConfigActiveCak) + err = core.UnmarshalPrimitive(m, "crn", &obj.Crn) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "status", &obj.Status) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// GatewayMacsecConfigFallbackCak : fallback connectivity association key. +type GatewayMacsecConfigFallbackCak struct { + // connectivity association key crn. + Crn *string `json:"crn" validate:"required"` + + // connectivity association key status. + Status *string `json:"status" validate:"required"` +} + +// UnmarshalGatewayMacsecConfigFallbackCak unmarshals an instance of GatewayMacsecConfigFallbackCak from the specified map of raw messages. +func UnmarshalGatewayMacsecConfigFallbackCak(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(GatewayMacsecConfigFallbackCak) + err = core.UnmarshalPrimitive(m, "crn", &obj.Crn) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "status", &obj.Status) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// GatewayMacsecConfigPatchTemplate : MACsec configuration information. When patching any macsec_config fields, no other fields may be specified in the +// patch request. Contact IBM support for access to MACsec. +// +// A MACsec config cannot be added to a gateway created without MACsec. +type GatewayMacsecConfigPatchTemplate struct { + // Indicate whether MACsec protection should be active (true) or inactive (false) for this MACsec enabled gateway. + Active *bool `json:"active,omitempty"` + + // Fallback connectivity association key. + // + // MACsec keys must be type=standard with key name lengths between 2 to 64 inclusive and contain only characters + // [a-fA-F0-9]. + // The key material must be exactly 64 characters in length and contain only [a-fA-F0-9]. + // + // To clear the optional `fallback_cak` field patch its crn to `""`. + // + // A gateway's `fallback_cak` crn cannot match its `primary_cak` crn. + FallbackCak *GatewayMacsecConfigPatchTemplateFallbackCak `json:"fallback_cak,omitempty"` + + // Desired primary connectivity association key. + // + // MACsec keys must be type=standard with key name lengths between 2 to 64 inclusive and contain only characters + // [a-fA-F0-9]. + // The key material must be exactly 64 characters in length and contain only [a-fA-F0-9]. + // + // A gateway's `primary_cak` crn cannot match its `fallback_cak` crn. + PrimaryCak *GatewayMacsecConfigPatchTemplatePrimaryCak `json:"primary_cak,omitempty"` + + // replay protection window size. + WindowSize *int64 `json:"window_size,omitempty"` +} + +// UnmarshalGatewayMacsecConfigPatchTemplate unmarshals an instance of GatewayMacsecConfigPatchTemplate from the specified map of raw messages. +func UnmarshalGatewayMacsecConfigPatchTemplate(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(GatewayMacsecConfigPatchTemplate) + err = core.UnmarshalPrimitive(m, "active", &obj.Active) + if err != nil { + return + } + err = core.UnmarshalModel(m, "fallback_cak", &obj.FallbackCak, UnmarshalGatewayMacsecConfigPatchTemplateFallbackCak) + if err != nil { + return + } + err = core.UnmarshalModel(m, "primary_cak", &obj.PrimaryCak, UnmarshalGatewayMacsecConfigPatchTemplatePrimaryCak) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "window_size", &obj.WindowSize) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// GatewayMacsecConfigPatchTemplateFallbackCak : Fallback connectivity association key. +// +// MACsec keys must be type=standard with key name lengths between 2 to 64 inclusive and contain only characters +// [a-fA-F0-9]. The key material must be exactly 64 characters in length and contain only [a-fA-F0-9]. +// +// To clear the optional `fallback_cak` field patch its crn to `""`. +// +// A gateway's `fallback_cak` crn cannot match its `primary_cak` crn. +type GatewayMacsecConfigPatchTemplateFallbackCak struct { + // connectivity association key crn. + Crn *string `json:"crn" validate:"required"` +} + +// NewGatewayMacsecConfigPatchTemplateFallbackCak : Instantiate GatewayMacsecConfigPatchTemplateFallbackCak (Generic Model Constructor) +func (*DirectLinkV1) NewGatewayMacsecConfigPatchTemplateFallbackCak(crn string) (model *GatewayMacsecConfigPatchTemplateFallbackCak, err error) { + model = &GatewayMacsecConfigPatchTemplateFallbackCak{ + Crn: core.StringPtr(crn), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalGatewayMacsecConfigPatchTemplateFallbackCak unmarshals an instance of GatewayMacsecConfigPatchTemplateFallbackCak from the specified map of raw messages. +func UnmarshalGatewayMacsecConfigPatchTemplateFallbackCak(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(GatewayMacsecConfigPatchTemplateFallbackCak) + err = core.UnmarshalPrimitive(m, "crn", &obj.Crn) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// GatewayMacsecConfigPatchTemplatePrimaryCak : Desired primary connectivity association key. +// +// MACsec keys must be type=standard with key name lengths between 2 to 64 inclusive and contain only characters +// [a-fA-F0-9]. The key material must be exactly 64 characters in length and contain only [a-fA-F0-9]. +// +// A gateway's `primary_cak` crn cannot match its `fallback_cak` crn. +type GatewayMacsecConfigPatchTemplatePrimaryCak struct { + // connectivity association key crn. + Crn *string `json:"crn" validate:"required"` +} + +// NewGatewayMacsecConfigPatchTemplatePrimaryCak : Instantiate GatewayMacsecConfigPatchTemplatePrimaryCak (Generic Model Constructor) +func (*DirectLinkV1) NewGatewayMacsecConfigPatchTemplatePrimaryCak(crn string) (model *GatewayMacsecConfigPatchTemplatePrimaryCak, err error) { + model = &GatewayMacsecConfigPatchTemplatePrimaryCak{ + Crn: core.StringPtr(crn), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalGatewayMacsecConfigPatchTemplatePrimaryCak unmarshals an instance of GatewayMacsecConfigPatchTemplatePrimaryCak from the specified map of raw messages. +func UnmarshalGatewayMacsecConfigPatchTemplatePrimaryCak(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(GatewayMacsecConfigPatchTemplatePrimaryCak) + err = core.UnmarshalPrimitive(m, "crn", &obj.Crn) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// GatewayMacsecConfigPrimaryCak : desired primary connectivity association key. +type GatewayMacsecConfigPrimaryCak struct { + // connectivity association key crn. + Crn *string `json:"crn" validate:"required"` + + // connectivity association key status. + Status *string `json:"status" validate:"required"` +} + +// UnmarshalGatewayMacsecConfigPrimaryCak unmarshals an instance of GatewayMacsecConfigPrimaryCak from the specified map of raw messages. +func UnmarshalGatewayMacsecConfigPrimaryCak(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(GatewayMacsecConfigPrimaryCak) + err = core.UnmarshalPrimitive(m, "crn", &obj.Crn) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "status", &obj.Status) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// GatewayMacsecConfigTemplate : MACsec configuration information. Contact IBM support for access to MACsec. +type GatewayMacsecConfigTemplate struct { + // Indicate whether MACsec protection should be active (true) or inactive (false) for this MACsec enabled gateway. + Active *bool `json:"active" validate:"required"` + + // Fallback connectivity association key. + // + // The `fallback_cak` crn cannot match the `primary_cak` crn. + // MACsec keys must be type=standard with key name lengths between 2 to 64 inclusive and contain only characters + // [a-fA-F0-9]. + // The key material must be exactly 64 characters in length and contain only [a-fA-F0-9]. + FallbackCak *GatewayMacsecConfigTemplateFallbackCak `json:"fallback_cak,omitempty"` + + // Desired primary connectivity association key. + // + // MACsec keys must be type=standard with key name lengths between 2 to 64 inclusive and contain only characters + // [a-fA-F0-9]. + // The key material must be exactly 64 characters in length and contain only [a-fA-F0-9]. + PrimaryCak *GatewayMacsecConfigTemplatePrimaryCak `json:"primary_cak" validate:"required"` + + // replay protection window size. + WindowSize *int64 `json:"window_size,omitempty"` +} + +// NewGatewayMacsecConfigTemplate : Instantiate GatewayMacsecConfigTemplate (Generic Model Constructor) +func (*DirectLinkV1) NewGatewayMacsecConfigTemplate(active bool, primaryCak *GatewayMacsecConfigTemplatePrimaryCak) (model *GatewayMacsecConfigTemplate, err error) { + model = &GatewayMacsecConfigTemplate{ + Active: core.BoolPtr(active), + PrimaryCak: primaryCak, + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalGatewayMacsecConfigTemplate unmarshals an instance of GatewayMacsecConfigTemplate from the specified map of raw messages. +func UnmarshalGatewayMacsecConfigTemplate(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(GatewayMacsecConfigTemplate) + err = core.UnmarshalPrimitive(m, "active", &obj.Active) + if err != nil { + return + } + err = core.UnmarshalModel(m, "fallback_cak", &obj.FallbackCak, UnmarshalGatewayMacsecConfigTemplateFallbackCak) + if err != nil { + return + } + err = core.UnmarshalModel(m, "primary_cak", &obj.PrimaryCak, UnmarshalGatewayMacsecConfigTemplatePrimaryCak) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "window_size", &obj.WindowSize) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// GatewayMacsecConfigTemplateFallbackCak : Fallback connectivity association key. +// +// The `fallback_cak` crn cannot match the `primary_cak` crn. MACsec keys must be type=standard with key name lengths +// between 2 to 64 inclusive and contain only characters [a-fA-F0-9]. The key material must be exactly 64 characters in +// length and contain only [a-fA-F0-9]. +type GatewayMacsecConfigTemplateFallbackCak struct { + // connectivity association key crn. + Crn *string `json:"crn" validate:"required"` +} + +// NewGatewayMacsecConfigTemplateFallbackCak : Instantiate GatewayMacsecConfigTemplateFallbackCak (Generic Model Constructor) +func (*DirectLinkV1) NewGatewayMacsecConfigTemplateFallbackCak(crn string) (model *GatewayMacsecConfigTemplateFallbackCak, err error) { + model = &GatewayMacsecConfigTemplateFallbackCak{ + Crn: core.StringPtr(crn), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalGatewayMacsecConfigTemplateFallbackCak unmarshals an instance of GatewayMacsecConfigTemplateFallbackCak from the specified map of raw messages. +func UnmarshalGatewayMacsecConfigTemplateFallbackCak(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(GatewayMacsecConfigTemplateFallbackCak) + err = core.UnmarshalPrimitive(m, "crn", &obj.Crn) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// GatewayMacsecConfigTemplatePrimaryCak : Desired primary connectivity association key. +// +// MACsec keys must be type=standard with key name lengths between 2 to 64 inclusive and contain only characters +// [a-fA-F0-9]. The key material must be exactly 64 characters in length and contain only [a-fA-F0-9]. +type GatewayMacsecConfigTemplatePrimaryCak struct { + // connectivity association key crn. + Crn *string `json:"crn" validate:"required"` +} + +// NewGatewayMacsecConfigTemplatePrimaryCak : Instantiate GatewayMacsecConfigTemplatePrimaryCak (Generic Model Constructor) +func (*DirectLinkV1) NewGatewayMacsecConfigTemplatePrimaryCak(crn string) (model *GatewayMacsecConfigTemplatePrimaryCak, err error) { + model = &GatewayMacsecConfigTemplatePrimaryCak{ + Crn: core.StringPtr(crn), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalGatewayMacsecConfigTemplatePrimaryCak unmarshals an instance of GatewayMacsecConfigTemplatePrimaryCak from the specified map of raw messages. +func UnmarshalGatewayMacsecConfigTemplatePrimaryCak(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(GatewayMacsecConfigTemplatePrimaryCak) + err = core.UnmarshalPrimitive(m, "crn", &obj.Crn) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// GatewayPort : gateway port for type=connect gateways. +type GatewayPort struct { + // Port Identifier. + ID *string `json:"id" validate:"required"` +} + +// UnmarshalGatewayPort unmarshals an instance of GatewayPort from the specified map of raw messages. +func UnmarshalGatewayPort(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(GatewayPort) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// GatewayPortIdentity : Select Port Label for new type=connect gateway. +type GatewayPortIdentity struct { + // port id. + ID *string `json:"id" validate:"required"` +} + +// NewGatewayPortIdentity : Instantiate GatewayPortIdentity (Generic Model Constructor) +func (*DirectLinkV1) NewGatewayPortIdentity(id string) (model *GatewayPortIdentity, err error) { + model = &GatewayPortIdentity{ + ID: core.StringPtr(id), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalGatewayPortIdentity unmarshals an instance of GatewayPortIdentity from the specified map of raw messages. +func UnmarshalGatewayPortIdentity(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(GatewayPortIdentity) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// GatewayStatistic : Gateway statistics. Currently data retrieval is only supported for MACsec configurations. +type GatewayStatistic struct { + // Date and time data was collected. + CreatedAt *strfmt.DateTime `json:"created_at" validate:"required"` + + // statistics output. + Data *string `json:"data" validate:"required"` + + // statistic type. + Type *string `json:"type" validate:"required"` +} + +// Constants associated with the GatewayStatistic.Type property. +// statistic type. +const ( + GatewayStatistic_Type_MacsecMkaSession = "macsec_mka_session" + GatewayStatistic_Type_MacsecMkaStatistics = "macsec_mka_statistics" + GatewayStatistic_Type_MacsecPolicy = "macsec_policy" +) + +// UnmarshalGatewayStatistic unmarshals an instance of GatewayStatistic from the specified map of raw messages. +func UnmarshalGatewayStatistic(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(GatewayStatistic) + err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "data", &obj.Data) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// GatewayStatisticCollection : gateway statistics. +type GatewayStatisticCollection struct { + // Collection of gateway statistics. + Statistics []GatewayStatistic `json:"statistics" validate:"required"` +} + +// UnmarshalGatewayStatisticCollection unmarshals an instance of GatewayStatisticCollection from the specified map of raw messages. +func UnmarshalGatewayStatisticCollection(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(GatewayStatisticCollection) + err = core.UnmarshalModel(m, "statistics", &obj.Statistics, UnmarshalGatewayStatistic) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// GatewayTemplate : Create gateway template. +// Models which "extend" this model: +// - GatewayTemplateGatewayTypeDedicatedTemplate +// - GatewayTemplateGatewayTypeConnectTemplate +type GatewayTemplate struct { + // BGP ASN. + BgpAsn *int64 `json:"bgp_asn" validate:"required"` + + // (DEPRECATED) BGP base CIDR. + // + // Field is deprecated. See bgp_ibm_cidr and bgp_cer_cidr for details on how to create a gateway using either + // automatic or explicit IP assignment. Any bgp_base_cidr value set will be ignored. + // + // Deprecated field bgp_base_cidr will be removed from the API specificiation after 15-MAR-2021. + BgpBaseCidr *string `json:"bgp_base_cidr,omitempty"` + + // BGP customer edge router CIDR. + // + // For auto IP assignment, omit bgp_cer_cidr and bgp_ibm_cidr. IBM will automatically select values for bgp_cer_cidr + // and bgp_ibm_cidr. + // + // For explicit IP assignment set a valid bgp_cer_cidr and bgp_ibm_cidr CIDR, the value must reside in one of + // "10.254.0.0/16", "172.16.0.0/12", "192.168.0.0/16", "169.254.0.0/16" or an owned public CIDR. bgp_cer_cidr and + // bgp_ibm_cidr must have matching network and subnet mask values. + BgpCerCidr *string `json:"bgp_cer_cidr,omitempty"` + + // BGP IBM CIDR. + // + // For auto IP assignment, omit bgp_cer_cidr and bgp_ibm_cidr. IBM will automatically select values for bgp_cer_cidr + // and bgp_ibm_cidr. + // + // For explicit IP assignment set a valid bgp_cer_cidr and bgp_ibm_cidr CIDR, the value must reside in one of + // "10.254.0.0/16", "172.16.0.0/12", "192.168.0.0/16", "169.254.0.0/16" or an owned public CIDR. bgp_cer_cidr and + // bgp_ibm_cidr must have matching network and subnet mask values. + BgpIbmCidr *string `json:"bgp_ibm_cidr,omitempty"` + + // Gateways with global routing (`true`) can connect to networks outside their associated region. + Global *bool `json:"global" validate:"required"` + + // Metered billing option. When `true` gateway usage is billed per gigabyte. When `false` there is no per gigabyte + // usage charge, instead a flat rate is charged for the gateway. + Metered *bool `json:"metered" validate:"required"` + + // The unique user-defined name for this gateway. + Name *string `json:"name" validate:"required"` + + // Resource group for this resource. If unspecified, the account's [default resource + // group](https://cloud.ibm.com/apidocs/resource-manager#introduction) is used. + ResourceGroup *ResourceGroupIdentity `json:"resource_group,omitempty"` + + // Gateway speed in megabits per second. + SpeedMbps *int64 `json:"speed_mbps" validate:"required"` + + // Gateway type. + Type *string `json:"type" validate:"required"` + + // Carrier name. + CarrierName *string `json:"carrier_name,omitempty"` + + // Cross connect router. + CrossConnectRouter *string `json:"cross_connect_router,omitempty"` + + // Customer name. + CustomerName *string `json:"customer_name,omitempty"` + + // Gateway location. + LocationName *string `json:"location_name,omitempty"` + + // MACsec configuration information. Contact IBM support for access to MACsec. + MacsecConfig *GatewayMacsecConfigTemplate `json:"macsec_config,omitempty"` + + // Select Port Label for new type=connect gateway. + Port *GatewayPortIdentity `json:"port,omitempty"` +} + +// Constants associated with the GatewayTemplate.Type property. +// Gateway type. +const ( + GatewayTemplate_Type_Connect = "connect" + GatewayTemplate_Type_Dedicated = "dedicated" +) + +func (*GatewayTemplate) isaGatewayTemplate() bool { + return true +} + +type GatewayTemplateIntf interface { + isaGatewayTemplate() bool +} + +// UnmarshalGatewayTemplate unmarshals an instance of GatewayTemplate from the specified map of raw messages. +func UnmarshalGatewayTemplate(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(GatewayTemplate) + err = core.UnmarshalPrimitive(m, "bgp_asn", &obj.BgpAsn) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "bgp_base_cidr", &obj.BgpBaseCidr) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "bgp_cer_cidr", &obj.BgpCerCidr) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "bgp_ibm_cidr", &obj.BgpIbmCidr) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "global", &obj.Global) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "metered", &obj.Metered) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalModel(m, "resource_group", &obj.ResourceGroup, UnmarshalResourceGroupIdentity) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "speed_mbps", &obj.SpeedMbps) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "carrier_name", &obj.CarrierName) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "cross_connect_router", &obj.CrossConnectRouter) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "customer_name", &obj.CustomerName) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "location_name", &obj.LocationName) + if err != nil { + return + } + err = core.UnmarshalModel(m, "macsec_config", &obj.MacsecConfig, UnmarshalGatewayMacsecConfigTemplate) + if err != nil { + return + } + err = core.UnmarshalModel(m, "port", &obj.Port, UnmarshalGatewayPortIdentity) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// GatewayVirtualConnection : Virtual connection. +type GatewayVirtualConnection struct { + // The date and time resource was created. + CreatedAt *strfmt.DateTime `json:"created_at" validate:"required"` + + // The unique identifier for this virtual connection. + ID *string `json:"id" validate:"required"` + + // The user-defined name for this virtual connection. Virtual connection names are unique within a gateway. This is + // the name of the virtual connection itself, the network being connected may have its own name attribute. + Name *string `json:"name" validate:"required"` + + // For virtual connections across two different IBM Cloud Accounts network_account indicates the account that owns the + // target network. + NetworkAccount *string `json:"network_account,omitempty"` + + // Unique identifier of the target network. For type=vpc virtual connections this is the CRN of the target VPC. This + // field does not apply to type=classic connections. + NetworkID *string `json:"network_id,omitempty"` + + // Status of the virtual connection. + // + // The list of enumerated values for this property may expand in the future. Code and processes using this field must + // tolerate unexpected values. + Status *string `json:"status" validate:"required"` + + // Virtual connection type. + // + // The list of enumerated values for this property may expand in the future. Code and processes using this field must + // tolerate unexpected values. + Type *string `json:"type" validate:"required"` +} + +// Constants associated with the GatewayVirtualConnection.Status property. +// Status of the virtual connection. +// +// The list of enumerated values for this property may expand in the future. Code and processes using this field must +// tolerate unexpected values. +const ( + GatewayVirtualConnection_Status_ApprovalPending = "approval_pending" + GatewayVirtualConnection_Status_Attached = "attached" + GatewayVirtualConnection_Status_Deleting = "deleting" + GatewayVirtualConnection_Status_DetachedByNetwork = "detached_by_network" + GatewayVirtualConnection_Status_DetachedByNetworkPending = "detached_by_network_pending" + GatewayVirtualConnection_Status_Expired = "expired" + GatewayVirtualConnection_Status_Pending = "pending" + GatewayVirtualConnection_Status_Rejected = "rejected" +) + +// Constants associated with the GatewayVirtualConnection.Type property. +// Virtual connection type. +// +// The list of enumerated values for this property may expand in the future. Code and processes using this field must +// tolerate unexpected values. +const ( + GatewayVirtualConnection_Type_Classic = "classic" + GatewayVirtualConnection_Type_Vpc = "vpc" +) + +// UnmarshalGatewayVirtualConnection unmarshals an instance of GatewayVirtualConnection from the specified map of raw messages. +func UnmarshalGatewayVirtualConnection(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(GatewayVirtualConnection) + err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "network_account", &obj.NetworkAccount) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "network_id", &obj.NetworkID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "status", &obj.Status) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// GatewayVirtualConnectionCollection : Virtual connection collection. +type GatewayVirtualConnectionCollection struct { + // virtual connection array. + VirtualConnections []GatewayVirtualConnection `json:"virtual_connections" validate:"required"` +} + +// UnmarshalGatewayVirtualConnectionCollection unmarshals an instance of GatewayVirtualConnectionCollection from the specified map of raw messages. +func UnmarshalGatewayVirtualConnectionCollection(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(GatewayVirtualConnectionCollection) + err = core.UnmarshalModel(m, "virtual_connections", &obj.VirtualConnections, UnmarshalGatewayVirtualConnection) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// GetGatewayOptions : The GetGateway options. +type GetGatewayOptions struct { + // Direct Link gateway identifier. + ID *string `json:"id" validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetGatewayOptions : Instantiate GetGatewayOptions +func (*DirectLinkV1) NewGetGatewayOptions(id string) *GetGatewayOptions { + return &GetGatewayOptions{ + ID: core.StringPtr(id), + } +} + +// SetID : Allow user to set ID +func (options *GetGatewayOptions) SetID(id string) *GetGatewayOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetGatewayOptions) SetHeaders(param map[string]string) *GetGatewayOptions { + options.Headers = param + return options +} + +// GetGatewayStatisticsOptions : The GetGatewayStatistics options. +type GetGatewayStatisticsOptions struct { + // Direct Link Dedicated gateway identifier. + ID *string `json:"id" validate:"required"` + + // specify statistic to retrieve. + Type *string `json:"type" validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the GetGatewayStatisticsOptions.Type property. +// specify statistic to retrieve. +const ( + GetGatewayStatisticsOptions_Type_MacsecMka = "macsec_mka" + GetGatewayStatisticsOptions_Type_MacsecSecurity = "macsec_security" +) + +// NewGetGatewayStatisticsOptions : Instantiate GetGatewayStatisticsOptions +func (*DirectLinkV1) NewGetGatewayStatisticsOptions(id string, typeVar string) *GetGatewayStatisticsOptions { + return &GetGatewayStatisticsOptions{ + ID: core.StringPtr(id), + Type: core.StringPtr(typeVar), + } +} + +// SetID : Allow user to set ID +func (options *GetGatewayStatisticsOptions) SetID(id string) *GetGatewayStatisticsOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetType : Allow user to set Type +func (options *GetGatewayStatisticsOptions) SetType(typeVar string) *GetGatewayStatisticsOptions { + options.Type = core.StringPtr(typeVar) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetGatewayStatisticsOptions) SetHeaders(param map[string]string) *GetGatewayStatisticsOptions { + options.Headers = param + return options +} + +// GetGatewayVirtualConnectionOptions : The GetGatewayVirtualConnection options. +type GetGatewayVirtualConnectionOptions struct { + // Direct Link gateway identifier. + GatewayID *string `json:"gateway_id" validate:"required"` + + // The virtual connection identifier. + ID *string `json:"id" validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetGatewayVirtualConnectionOptions : Instantiate GetGatewayVirtualConnectionOptions +func (*DirectLinkV1) NewGetGatewayVirtualConnectionOptions(gatewayID string, id string) *GetGatewayVirtualConnectionOptions { + return &GetGatewayVirtualConnectionOptions{ + GatewayID: core.StringPtr(gatewayID), + ID: core.StringPtr(id), + } +} + +// SetGatewayID : Allow user to set GatewayID +func (options *GetGatewayVirtualConnectionOptions) SetGatewayID(gatewayID string) *GetGatewayVirtualConnectionOptions { + options.GatewayID = core.StringPtr(gatewayID) + return options +} + +// SetID : Allow user to set ID +func (options *GetGatewayVirtualConnectionOptions) SetID(id string) *GetGatewayVirtualConnectionOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetGatewayVirtualConnectionOptions) SetHeaders(param map[string]string) *GetGatewayVirtualConnectionOptions { + options.Headers = param + return options +} + +// GetPortOptions : The GetPort options. +type GetPortOptions struct { + // The port identifier. + ID *string `json:"id" validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetPortOptions : Instantiate GetPortOptions +func (*DirectLinkV1) NewGetPortOptions(id string) *GetPortOptions { + return &GetPortOptions{ + ID: core.StringPtr(id), + } +} + +// SetID : Allow user to set ID +func (options *GetPortOptions) SetID(id string) *GetPortOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetPortOptions) SetHeaders(param map[string]string) *GetPortOptions { + options.Headers = param + return options +} + +// ListGatewayCompletionNoticeOptions : The ListGatewayCompletionNotice options. +type ListGatewayCompletionNoticeOptions struct { + // Direct Link Dedicated gateway identifier. + ID *string `json:"id" validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListGatewayCompletionNoticeOptions : Instantiate ListGatewayCompletionNoticeOptions +func (*DirectLinkV1) NewListGatewayCompletionNoticeOptions(id string) *ListGatewayCompletionNoticeOptions { + return &ListGatewayCompletionNoticeOptions{ + ID: core.StringPtr(id), + } +} + +// SetID : Allow user to set ID +func (options *ListGatewayCompletionNoticeOptions) SetID(id string) *ListGatewayCompletionNoticeOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListGatewayCompletionNoticeOptions) SetHeaders(param map[string]string) *ListGatewayCompletionNoticeOptions { + options.Headers = param + return options +} + +// ListGatewayLetterOfAuthorizationOptions : The ListGatewayLetterOfAuthorization options. +type ListGatewayLetterOfAuthorizationOptions struct { + // Direct Link Dedicated gateway identifier. + ID *string `json:"id" validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListGatewayLetterOfAuthorizationOptions : Instantiate ListGatewayLetterOfAuthorizationOptions +func (*DirectLinkV1) NewListGatewayLetterOfAuthorizationOptions(id string) *ListGatewayLetterOfAuthorizationOptions { + return &ListGatewayLetterOfAuthorizationOptions{ + ID: core.StringPtr(id), + } +} + +// SetID : Allow user to set ID +func (options *ListGatewayLetterOfAuthorizationOptions) SetID(id string) *ListGatewayLetterOfAuthorizationOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListGatewayLetterOfAuthorizationOptions) SetHeaders(param map[string]string) *ListGatewayLetterOfAuthorizationOptions { + options.Headers = param + return options +} + +// ListGatewayVirtualConnectionsOptions : The ListGatewayVirtualConnections options. +type ListGatewayVirtualConnectionsOptions struct { + // Direct Link gateway identifier. + GatewayID *string `json:"gateway_id" validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListGatewayVirtualConnectionsOptions : Instantiate ListGatewayVirtualConnectionsOptions +func (*DirectLinkV1) NewListGatewayVirtualConnectionsOptions(gatewayID string) *ListGatewayVirtualConnectionsOptions { + return &ListGatewayVirtualConnectionsOptions{ + GatewayID: core.StringPtr(gatewayID), + } +} + +// SetGatewayID : Allow user to set GatewayID +func (options *ListGatewayVirtualConnectionsOptions) SetGatewayID(gatewayID string) *ListGatewayVirtualConnectionsOptions { + options.GatewayID = core.StringPtr(gatewayID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListGatewayVirtualConnectionsOptions) SetHeaders(param map[string]string) *ListGatewayVirtualConnectionsOptions { + options.Headers = param + return options +} + +// ListGatewaysOptions : The ListGateways options. +type ListGatewaysOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListGatewaysOptions : Instantiate ListGatewaysOptions +func (*DirectLinkV1) NewListGatewaysOptions() *ListGatewaysOptions { + return &ListGatewaysOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *ListGatewaysOptions) SetHeaders(param map[string]string) *ListGatewaysOptions { + options.Headers = param + return options +} + +// ListOfferingTypeLocationCrossConnectRoutersOptions : The ListOfferingTypeLocationCrossConnectRouters options. +type ListOfferingTypeLocationCrossConnectRoutersOptions struct { + // The Direct Link offering type. Only value `"dedicated"` is supported for this API. + OfferingType *string `json:"offering_type" validate:"required"` + + // The name of the Direct Link location. + LocationName *string `json:"location_name" validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the ListOfferingTypeLocationCrossConnectRoutersOptions.OfferingType property. +// The Direct Link offering type. Only value `"dedicated"` is supported for this API. +const ( + ListOfferingTypeLocationCrossConnectRoutersOptions_OfferingType_Dedicated = "dedicated" +) + +// NewListOfferingTypeLocationCrossConnectRoutersOptions : Instantiate ListOfferingTypeLocationCrossConnectRoutersOptions +func (*DirectLinkV1) NewListOfferingTypeLocationCrossConnectRoutersOptions(offeringType string, locationName string) *ListOfferingTypeLocationCrossConnectRoutersOptions { + return &ListOfferingTypeLocationCrossConnectRoutersOptions{ + OfferingType: core.StringPtr(offeringType), + LocationName: core.StringPtr(locationName), + } +} + +// SetOfferingType : Allow user to set OfferingType +func (options *ListOfferingTypeLocationCrossConnectRoutersOptions) SetOfferingType(offeringType string) *ListOfferingTypeLocationCrossConnectRoutersOptions { + options.OfferingType = core.StringPtr(offeringType) + return options +} + +// SetLocationName : Allow user to set LocationName +func (options *ListOfferingTypeLocationCrossConnectRoutersOptions) SetLocationName(locationName string) *ListOfferingTypeLocationCrossConnectRoutersOptions { + options.LocationName = core.StringPtr(locationName) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListOfferingTypeLocationCrossConnectRoutersOptions) SetHeaders(param map[string]string) *ListOfferingTypeLocationCrossConnectRoutersOptions { + options.Headers = param + return options +} + +// ListOfferingTypeLocationsOptions : The ListOfferingTypeLocations options. +type ListOfferingTypeLocationsOptions struct { + // The Direct Link offering type. Current supported values are `"dedicated"` and `"connect"`. + OfferingType *string `json:"offering_type" validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the ListOfferingTypeLocationsOptions.OfferingType property. +// The Direct Link offering type. Current supported values are `"dedicated"` and `"connect"`. +const ( + ListOfferingTypeLocationsOptions_OfferingType_Connect = "connect" + ListOfferingTypeLocationsOptions_OfferingType_Dedicated = "dedicated" +) + +// NewListOfferingTypeLocationsOptions : Instantiate ListOfferingTypeLocationsOptions +func (*DirectLinkV1) NewListOfferingTypeLocationsOptions(offeringType string) *ListOfferingTypeLocationsOptions { + return &ListOfferingTypeLocationsOptions{ + OfferingType: core.StringPtr(offeringType), + } +} + +// SetOfferingType : Allow user to set OfferingType +func (options *ListOfferingTypeLocationsOptions) SetOfferingType(offeringType string) *ListOfferingTypeLocationsOptions { + options.OfferingType = core.StringPtr(offeringType) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListOfferingTypeLocationsOptions) SetHeaders(param map[string]string) *ListOfferingTypeLocationsOptions { + options.Headers = param + return options +} + +// ListOfferingTypeSpeedsOptions : The ListOfferingTypeSpeeds options. +type ListOfferingTypeSpeedsOptions struct { + // The Direct Link offering type. Current supported values are `"dedicated"` and `"connect"`. + OfferingType *string `json:"offering_type" validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the ListOfferingTypeSpeedsOptions.OfferingType property. +// The Direct Link offering type. Current supported values are `"dedicated"` and `"connect"`. +const ( + ListOfferingTypeSpeedsOptions_OfferingType_Connect = "connect" + ListOfferingTypeSpeedsOptions_OfferingType_Dedicated = "dedicated" +) + +// NewListOfferingTypeSpeedsOptions : Instantiate ListOfferingTypeSpeedsOptions +func (*DirectLinkV1) NewListOfferingTypeSpeedsOptions(offeringType string) *ListOfferingTypeSpeedsOptions { + return &ListOfferingTypeSpeedsOptions{ + OfferingType: core.StringPtr(offeringType), + } +} + +// SetOfferingType : Allow user to set OfferingType +func (options *ListOfferingTypeSpeedsOptions) SetOfferingType(offeringType string) *ListOfferingTypeSpeedsOptions { + options.OfferingType = core.StringPtr(offeringType) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListOfferingTypeSpeedsOptions) SetHeaders(param map[string]string) *ListOfferingTypeSpeedsOptions { + options.Headers = param + return options +} + +// ListPortsOptions : The ListPorts options. +type ListPortsOptions struct { + // A server-supplied token determining which resource to start the page on. + Start *string `json:"start,omitempty"` + + // The number of resources to return on a page. + Limit *int64 `json:"limit,omitempty"` + + // Direct Link location short name. + LocationName *string `json:"location_name,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListPortsOptions : Instantiate ListPortsOptions +func (*DirectLinkV1) NewListPortsOptions() *ListPortsOptions { + return &ListPortsOptions{} +} + +// SetStart : Allow user to set Start +func (options *ListPortsOptions) SetStart(start string) *ListPortsOptions { + options.Start = core.StringPtr(start) + return options +} + +// SetLimit : Allow user to set Limit +func (options *ListPortsOptions) SetLimit(limit int64) *ListPortsOptions { + options.Limit = core.Int64Ptr(limit) + return options +} + +// SetLocationName : Allow user to set LocationName +func (options *ListPortsOptions) SetLocationName(locationName string) *ListPortsOptions { + options.LocationName = core.StringPtr(locationName) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListPortsOptions) SetHeaders(param map[string]string) *ListPortsOptions { + options.Headers = param + return options +} + +// LocationCollection : location collection. +type LocationCollection struct { + // Collection of Direct Link locations. + Locations []LocationOutput `json:"locations" validate:"required"` +} + +// UnmarshalLocationCollection unmarshals an instance of LocationCollection from the specified map of raw messages. +func UnmarshalLocationCollection(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LocationCollection) + err = core.UnmarshalModel(m, "locations", &obj.Locations, UnmarshalLocationOutput) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LocationCrossConnectRouterCollection : List of cross connect router details. +type LocationCrossConnectRouterCollection struct { + // Array of Routers for this location. + CrossConnectRouters []CrossConnectRouter `json:"cross_connect_routers,omitempty"` +} + +// UnmarshalLocationCrossConnectRouterCollection unmarshals an instance of LocationCrossConnectRouterCollection from the specified map of raw messages. +func UnmarshalLocationCrossConnectRouterCollection(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LocationCrossConnectRouterCollection) + err = core.UnmarshalModel(m, "cross_connect_routers", &obj.CrossConnectRouters, UnmarshalCrossConnectRouter) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LocationOutput : location. +type LocationOutput struct { + // Billing location. Only present for locations where provisioning is enabled. + BillingLocation *string `json:"billing_location,omitempty"` + + // Building colocation owner. Only present for offering_type=dedicated locations where provisioning is enabled. + BuildingColocationOwner *string `json:"building_colocation_owner,omitempty"` + + // Location long name. + DisplayName *string `json:"display_name" validate:"required"` + + // Location type. + LocationType *string `json:"location_type" validate:"required"` + + // Indicate whether location supports MACsec. Only returned for gateway type=dedicated locations. Contact IBM support + // for access to MACsec. + MacsecEnabled *bool `json:"macsec_enabled,omitempty"` + + // Location market. + Market *string `json:"market" validate:"required"` + + // Location geography. Only present for locations where provisioning is enabled. + MarketGeography *string `json:"market_geography,omitempty"` + + // Is location a multi-zone region (MZR). Only present for locations where provisioning is enabled. + Mzr *bool `json:"mzr,omitempty"` + + // Location short name. + Name *string `json:"name" validate:"required"` + + // Location offering type. + OfferingType *string `json:"offering_type" validate:"required"` + + // Indicates for the specific offering_type whether this location supports gateway provisioning. + ProvisionEnabled *bool `json:"provision_enabled" validate:"required"` + + // Location's VPC region. Only present for locations where provisioning is enabled. + VpcRegion *string `json:"vpc_region,omitempty"` +} + +// UnmarshalLocationOutput unmarshals an instance of LocationOutput from the specified map of raw messages. +func UnmarshalLocationOutput(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LocationOutput) + err = core.UnmarshalPrimitive(m, "billing_location", &obj.BillingLocation) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "building_colocation_owner", &obj.BuildingColocationOwner) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "display_name", &obj.DisplayName) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "location_type", &obj.LocationType) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "macsec_enabled", &obj.MacsecEnabled) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "market", &obj.Market) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "market_geography", &obj.MarketGeography) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "mzr", &obj.Mzr) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "offering_type", &obj.OfferingType) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "provision_enabled", &obj.ProvisionEnabled) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "vpc_region", &obj.VpcRegion) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// OfferingSpeed : Speed. +type OfferingSpeed struct { + // Array of capabilities for billing option. + Capabilities []string `json:"capabilities" validate:"required"` + + // Link speed in megabits per second. + LinkSpeed *int64 `json:"link_speed" validate:"required"` + + // Indicate whether speed supports MACsec. Only returned for gateway type=dedicated speeds. Contact IBM support for + // access to MACsec. + MacsecEnabled *bool `json:"macsec_enabled,omitempty"` +} + +// UnmarshalOfferingSpeed unmarshals an instance of OfferingSpeed from the specified map of raw messages. +func UnmarshalOfferingSpeed(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(OfferingSpeed) + err = core.UnmarshalPrimitive(m, "capabilities", &obj.Capabilities) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "link_speed", &obj.LinkSpeed) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "macsec_enabled", &obj.MacsecEnabled) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// OfferingSpeedCollection : OfferingSpeedCollection struct +type OfferingSpeedCollection struct { + // speed list. + Speeds []OfferingSpeed `json:"speeds" validate:"required"` +} + +// UnmarshalOfferingSpeedCollection unmarshals an instance of OfferingSpeedCollection from the specified map of raw messages. +func UnmarshalOfferingSpeedCollection(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(OfferingSpeedCollection) + err = core.UnmarshalModel(m, "speeds", &obj.Speeds, UnmarshalOfferingSpeed) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// Port : Provider port details. +type Port struct { + // Count of existing Direct Link gateways in this account on this port. + DirectLinkCount *int64 `json:"direct_link_count" validate:"required"` + + // Port identifier. + ID *string `json:"id" validate:"required"` + + // Port Label. + Label *string `json:"label" validate:"required"` + + // Port location long name. + LocationDisplayName *string `json:"location_display_name" validate:"required"` + + // Port location name identifier. + LocationName *string `json:"location_name" validate:"required"` + + // Port's provider name. + ProviderName *string `json:"provider_name" validate:"required"` + + // Port's supported speeds in megabits per second. + SupportedLinkSpeeds []int64 `json:"supported_link_speeds" validate:"required"` +} + +// UnmarshalPort unmarshals an instance of Port from the specified map of raw messages. +func UnmarshalPort(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Port) + err = core.UnmarshalPrimitive(m, "direct_link_count", &obj.DirectLinkCount) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "label", &obj.Label) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "location_display_name", &obj.LocationDisplayName) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "location_name", &obj.LocationName) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "provider_name", &obj.ProviderName) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "supported_link_speeds", &obj.SupportedLinkSpeeds) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// PortCollection : List of port label details. +type PortCollection struct { + // A reference to the first page of resources. + First *PortsPaginatedCollectionFirst `json:"first" validate:"required"` + + // The maximum number of resources can be returned by the request. + Limit *int64 `json:"limit" validate:"required"` + + // A reference to the next page of resources; this reference is included for all pages except the last page. + Next *PortsPaginatedCollectionNext `json:"next,omitempty"` + + // The total number of resources across all pages. + TotalCount *int64 `json:"total_count" validate:"required"` + + // Array of ports. + Ports []Port `json:"ports,omitempty"` +} + +// UnmarshalPortCollection unmarshals an instance of PortCollection from the specified map of raw messages. +func UnmarshalPortCollection(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(PortCollection) + err = core.UnmarshalModel(m, "first", &obj.First, UnmarshalPortsPaginatedCollectionFirst) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "limit", &obj.Limit) + if err != nil { + return + } + err = core.UnmarshalModel(m, "next", &obj.Next, UnmarshalPortsPaginatedCollectionNext) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "total_count", &obj.TotalCount) + if err != nil { + return + } + err = core.UnmarshalModel(m, "ports", &obj.Ports, UnmarshalPort) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// PortsPaginatedCollectionFirst : A reference to the first page of resources. +type PortsPaginatedCollectionFirst struct { + // The URL for the first page of resources. + Href *string `json:"href" validate:"required"` +} + +// UnmarshalPortsPaginatedCollectionFirst unmarshals an instance of PortsPaginatedCollectionFirst from the specified map of raw messages. +func UnmarshalPortsPaginatedCollectionFirst(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(PortsPaginatedCollectionFirst) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// PortsPaginatedCollectionNext : A reference to the next page of resources; this reference is included for all pages except the last page. +type PortsPaginatedCollectionNext struct { + // URL for the next page of resources. + Href *string `json:"href" validate:"required"` + + // start token for the next page of resources. + Start *string `json:"start,omitempty"` +} + +// UnmarshalPortsPaginatedCollectionNext unmarshals an instance of PortsPaginatedCollectionNext from the specified map of raw messages. +func UnmarshalPortsPaginatedCollectionNext(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(PortsPaginatedCollectionNext) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "start", &obj.Start) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ResourceGroupIdentity : Resource group for this resource. If unspecified, the account's [default resource +// group](https://cloud.ibm.com/apidocs/resource-manager#introduction) is used. +type ResourceGroupIdentity struct { + // Resource group identifier. + ID *string `json:"id" validate:"required"` +} + +// NewResourceGroupIdentity : Instantiate ResourceGroupIdentity (Generic Model Constructor) +func (*DirectLinkV1) NewResourceGroupIdentity(id string) (model *ResourceGroupIdentity, err error) { + model = &ResourceGroupIdentity{ + ID: core.StringPtr(id), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalResourceGroupIdentity unmarshals an instance of ResourceGroupIdentity from the specified map of raw messages. +func UnmarshalResourceGroupIdentity(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ResourceGroupIdentity) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ResourceGroupReference : Resource group reference. +type ResourceGroupReference struct { + // Resource group identifier. + ID *string `json:"id" validate:"required"` +} + +// UnmarshalResourceGroupReference unmarshals an instance of ResourceGroupReference from the specified map of raw messages. +func UnmarshalResourceGroupReference(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ResourceGroupReference) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// UpdateGatewayOptions : The UpdateGateway options. +type UpdateGatewayOptions struct { + // Direct Link gateway identifier. + ID *string `json:"id" validate:"required"` + + // Gateways with global routing (`true`) can connect to networks outside of their associated region. + Global *bool `json:"global,omitempty"` + + // Use this field during LOA rejection to provide the reason for the rejection. + // + // Only allowed for type=dedicated gateways. + LoaRejectReason *string `json:"loa_reject_reason,omitempty"` + + // MACsec configuration information. When patching any macsec_config fields, no other fields may be specified in the + // patch request. Contact IBM support for access to MACsec. + // + // A MACsec config cannot be added to a gateway created without MACsec. + MacsecConfig *GatewayMacsecConfigPatchTemplate `json:"macsec_config,omitempty"` + + // Metered billing option. When `true` gateway usage is billed per gigabyte. When `false` there is no per gigabyte + // usage charge, instead a flat rate is charged for the gateway. + Metered *bool `json:"metered,omitempty"` + + // The unique user-defined name for this gateway. + Name *string `json:"name,omitempty"` + + // Gateway operational status. + // + // For gateways pending LOA approval, patch operational_status to the appropriate value to approve or reject its LOA. + // When rejecting an LOA, provide reject reasoning in `loa_reject_reason`. + // + // Only allowed for type=dedicated gateways. + OperationalStatus *string `json:"operational_status,omitempty"` + + // Gateway speed in megabits per second. + SpeedMbps *int64 `json:"speed_mbps,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the UpdateGatewayOptions.OperationalStatus property. +// Gateway operational status. +// +// For gateways pending LOA approval, patch operational_status to the appropriate value to approve or reject its LOA. +// When rejecting an LOA, provide reject reasoning in `loa_reject_reason`. +// +// Only allowed for type=dedicated gateways. +const ( + UpdateGatewayOptions_OperationalStatus_LoaAccepted = "loa_accepted" + UpdateGatewayOptions_OperationalStatus_LoaRejected = "loa_rejected" +) + +// NewUpdateGatewayOptions : Instantiate UpdateGatewayOptions +func (*DirectLinkV1) NewUpdateGatewayOptions(id string) *UpdateGatewayOptions { + return &UpdateGatewayOptions{ + ID: core.StringPtr(id), + } +} + +// SetID : Allow user to set ID +func (options *UpdateGatewayOptions) SetID(id string) *UpdateGatewayOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetGlobal : Allow user to set Global +func (options *UpdateGatewayOptions) SetGlobal(global bool) *UpdateGatewayOptions { + options.Global = core.BoolPtr(global) + return options +} + +// SetLoaRejectReason : Allow user to set LoaRejectReason +func (options *UpdateGatewayOptions) SetLoaRejectReason(loaRejectReason string) *UpdateGatewayOptions { + options.LoaRejectReason = core.StringPtr(loaRejectReason) + return options +} + +// SetMacsecConfig : Allow user to set MacsecConfig +func (options *UpdateGatewayOptions) SetMacsecConfig(macsecConfig *GatewayMacsecConfigPatchTemplate) *UpdateGatewayOptions { + options.MacsecConfig = macsecConfig + return options +} + +// SetMetered : Allow user to set Metered +func (options *UpdateGatewayOptions) SetMetered(metered bool) *UpdateGatewayOptions { + options.Metered = core.BoolPtr(metered) + return options +} + +// SetName : Allow user to set Name +func (options *UpdateGatewayOptions) SetName(name string) *UpdateGatewayOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetOperationalStatus : Allow user to set OperationalStatus +func (options *UpdateGatewayOptions) SetOperationalStatus(operationalStatus string) *UpdateGatewayOptions { + options.OperationalStatus = core.StringPtr(operationalStatus) + return options +} + +// SetSpeedMbps : Allow user to set SpeedMbps +func (options *UpdateGatewayOptions) SetSpeedMbps(speedMbps int64) *UpdateGatewayOptions { + options.SpeedMbps = core.Int64Ptr(speedMbps) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateGatewayOptions) SetHeaders(param map[string]string) *UpdateGatewayOptions { + options.Headers = param + return options +} + +// UpdateGatewayVirtualConnectionOptions : The UpdateGatewayVirtualConnection options. +type UpdateGatewayVirtualConnectionOptions struct { + // Direct Link gateway identifier. + GatewayID *string `json:"gateway_id" validate:"required"` + + // The virtual connection identifier. + ID *string `json:"id" validate:"required"` + + // The user-defined name for this virtual connection. Virtual connection names are unique within a gateway. This is + // the name of the virtual connection itself, the network being connected may have its own name attribute. + Name *string `json:"name,omitempty"` + + // Status of the virtual connection. Virtual connections that span IBM Cloud Accounts are created in approval_pending + // status. The owner of the target network can accept or reject connection requests by patching status to attached or + // rejected respectively. + Status *string `json:"status,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the UpdateGatewayVirtualConnectionOptions.Status property. +// Status of the virtual connection. Virtual connections that span IBM Cloud Accounts are created in approval_pending +// status. The owner of the target network can accept or reject connection requests by patching status to attached or +// rejected respectively. +const ( + UpdateGatewayVirtualConnectionOptions_Status_Attached = "attached" + UpdateGatewayVirtualConnectionOptions_Status_Rejected = "rejected" +) + +// NewUpdateGatewayVirtualConnectionOptions : Instantiate UpdateGatewayVirtualConnectionOptions +func (*DirectLinkV1) NewUpdateGatewayVirtualConnectionOptions(gatewayID string, id string) *UpdateGatewayVirtualConnectionOptions { + return &UpdateGatewayVirtualConnectionOptions{ + GatewayID: core.StringPtr(gatewayID), + ID: core.StringPtr(id), + } +} + +// SetGatewayID : Allow user to set GatewayID +func (options *UpdateGatewayVirtualConnectionOptions) SetGatewayID(gatewayID string) *UpdateGatewayVirtualConnectionOptions { + options.GatewayID = core.StringPtr(gatewayID) + return options +} + +// SetID : Allow user to set ID +func (options *UpdateGatewayVirtualConnectionOptions) SetID(id string) *UpdateGatewayVirtualConnectionOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetName : Allow user to set Name +func (options *UpdateGatewayVirtualConnectionOptions) SetName(name string) *UpdateGatewayVirtualConnectionOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetStatus : Allow user to set Status +func (options *UpdateGatewayVirtualConnectionOptions) SetStatus(status string) *UpdateGatewayVirtualConnectionOptions { + options.Status = core.StringPtr(status) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateGatewayVirtualConnectionOptions) SetHeaders(param map[string]string) *UpdateGatewayVirtualConnectionOptions { + options.Headers = param + return options +} + +// GatewayActionTemplateUpdatesItemGatewayClientSpeedUpdate : gateway speed change. +// This model "extends" GatewayActionTemplateUpdatesItem +type GatewayActionTemplateUpdatesItemGatewayClientSpeedUpdate struct { + // New gateway speed in megabits per second. + SpeedMbps *int64 `json:"speed_mbps,omitempty"` +} + +func (*GatewayActionTemplateUpdatesItemGatewayClientSpeedUpdate) isaGatewayActionTemplateUpdatesItem() bool { + return true +} + +// UnmarshalGatewayActionTemplateUpdatesItemGatewayClientSpeedUpdate unmarshals an instance of GatewayActionTemplateUpdatesItemGatewayClientSpeedUpdate from the specified map of raw messages. +func UnmarshalGatewayActionTemplateUpdatesItemGatewayClientSpeedUpdate(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(GatewayActionTemplateUpdatesItemGatewayClientSpeedUpdate) + err = core.UnmarshalPrimitive(m, "speed_mbps", &obj.SpeedMbps) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// GatewayChangeRequestGatewayClientGatewayUpdateAttributesUpdatesItemGatewayClientSpeedUpdate : gateway speed change. +// This model "extends" GatewayChangeRequestGatewayClientGatewayUpdateAttributesUpdatesItem +type GatewayChangeRequestGatewayClientGatewayUpdateAttributesUpdatesItemGatewayClientSpeedUpdate struct { + // New gateway speed in megabits per second. + SpeedMbps *int64 `json:"speed_mbps,omitempty"` +} + +func (*GatewayChangeRequestGatewayClientGatewayUpdateAttributesUpdatesItemGatewayClientSpeedUpdate) isaGatewayChangeRequestGatewayClientGatewayUpdateAttributesUpdatesItem() bool { + return true +} + +// UnmarshalGatewayChangeRequestGatewayClientGatewayUpdateAttributesUpdatesItemGatewayClientSpeedUpdate unmarshals an instance of GatewayChangeRequestGatewayClientGatewayUpdateAttributesUpdatesItemGatewayClientSpeedUpdate from the specified map of raw messages. +func UnmarshalGatewayChangeRequestGatewayClientGatewayUpdateAttributesUpdatesItemGatewayClientSpeedUpdate(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(GatewayChangeRequestGatewayClientGatewayUpdateAttributesUpdatesItemGatewayClientSpeedUpdate) + err = core.UnmarshalPrimitive(m, "speed_mbps", &obj.SpeedMbps) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// GatewayChangeRequestUpdatesItemGatewayClientSpeedUpdate : gateway speed change. +// This model "extends" GatewayChangeRequestUpdatesItem +type GatewayChangeRequestUpdatesItemGatewayClientSpeedUpdate struct { + // New gateway speed in megabits per second. + SpeedMbps *int64 `json:"speed_mbps,omitempty"` +} + +func (*GatewayChangeRequestUpdatesItemGatewayClientSpeedUpdate) isaGatewayChangeRequestUpdatesItem() bool { + return true +} + +// UnmarshalGatewayChangeRequestUpdatesItemGatewayClientSpeedUpdate unmarshals an instance of GatewayChangeRequestUpdatesItemGatewayClientSpeedUpdate from the specified map of raw messages. +func UnmarshalGatewayChangeRequestUpdatesItemGatewayClientSpeedUpdate(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(GatewayChangeRequestUpdatesItemGatewayClientSpeedUpdate) + err = core.UnmarshalPrimitive(m, "speed_mbps", &obj.SpeedMbps) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// GatewayChangeRequestGatewayClientGatewayCreate : gateway create. +// This model "extends" GatewayChangeRequest +type GatewayChangeRequestGatewayClientGatewayCreate struct { + // type of gateway change request. + Type *string `json:"type" validate:"required"` +} + +// Constants associated with the GatewayChangeRequestGatewayClientGatewayCreate.Type property. +// type of gateway change request. +const ( + GatewayChangeRequestGatewayClientGatewayCreate_Type_CreateGateway = "create_gateway" +) + +func (*GatewayChangeRequestGatewayClientGatewayCreate) isaGatewayChangeRequest() bool { + return true +} + +// UnmarshalGatewayChangeRequestGatewayClientGatewayCreate unmarshals an instance of GatewayChangeRequestGatewayClientGatewayCreate from the specified map of raw messages. +func UnmarshalGatewayChangeRequestGatewayClientGatewayCreate(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(GatewayChangeRequestGatewayClientGatewayCreate) + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// GatewayChangeRequestGatewayClientGatewayDelete : gateway delete. +// This model "extends" GatewayChangeRequest +type GatewayChangeRequestGatewayClientGatewayDelete struct { + // type of gateway change request. + Type *string `json:"type" validate:"required"` +} + +// Constants associated with the GatewayChangeRequestGatewayClientGatewayDelete.Type property. +// type of gateway change request. +const ( + GatewayChangeRequestGatewayClientGatewayDelete_Type_DeleteGateway = "delete_gateway" +) + +func (*GatewayChangeRequestGatewayClientGatewayDelete) isaGatewayChangeRequest() bool { + return true +} + +// UnmarshalGatewayChangeRequestGatewayClientGatewayDelete unmarshals an instance of GatewayChangeRequestGatewayClientGatewayDelete from the specified map of raw messages. +func UnmarshalGatewayChangeRequestGatewayClientGatewayDelete(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(GatewayChangeRequestGatewayClientGatewayDelete) + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// GatewayChangeRequestGatewayClientGatewayUpdateAttributes : gateway attributes update. +// This model "extends" GatewayChangeRequest +type GatewayChangeRequestGatewayClientGatewayUpdateAttributes struct { + // type of gateway change request. + Type *string `json:"type" validate:"required"` + + // array of pending updates. + Updates []GatewayChangeRequestGatewayClientGatewayUpdateAttributesUpdatesItemIntf `json:"updates" validate:"required"` +} + +// Constants associated with the GatewayChangeRequestGatewayClientGatewayUpdateAttributes.Type property. +// type of gateway change request. +const ( + GatewayChangeRequestGatewayClientGatewayUpdateAttributes_Type_UpdateAttributes = "update_attributes" +) + +func (*GatewayChangeRequestGatewayClientGatewayUpdateAttributes) isaGatewayChangeRequest() bool { + return true +} + +// UnmarshalGatewayChangeRequestGatewayClientGatewayUpdateAttributes unmarshals an instance of GatewayChangeRequestGatewayClientGatewayUpdateAttributes from the specified map of raw messages. +func UnmarshalGatewayChangeRequestGatewayClientGatewayUpdateAttributes(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(GatewayChangeRequestGatewayClientGatewayUpdateAttributes) + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + err = core.UnmarshalModel(m, "updates", &obj.Updates, UnmarshalGatewayChangeRequestGatewayClientGatewayUpdateAttributesUpdatesItem) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// GatewayTemplateGatewayTypeConnectTemplate : Gateway fields specific to type=connect gateway create. +// This model "extends" GatewayTemplate +type GatewayTemplateGatewayTypeConnectTemplate struct { + // BGP ASN. + BgpAsn *int64 `json:"bgp_asn" validate:"required"` + + // (DEPRECATED) BGP base CIDR. + // + // Field is deprecated. See bgp_ibm_cidr and bgp_cer_cidr for details on how to create a gateway using either + // automatic or explicit IP assignment. Any bgp_base_cidr value set will be ignored. + // + // Deprecated field bgp_base_cidr will be removed from the API specificiation after 15-MAR-2021. + BgpBaseCidr *string `json:"bgp_base_cidr,omitempty"` + + // BGP customer edge router CIDR. + // + // For auto IP assignment, omit bgp_cer_cidr and bgp_ibm_cidr. IBM will automatically select values for bgp_cer_cidr + // and bgp_ibm_cidr. + // + // For explicit IP assignment set a valid bgp_cer_cidr and bgp_ibm_cidr CIDR, the value must reside in one of + // "10.254.0.0/16", "172.16.0.0/12", "192.168.0.0/16", "169.254.0.0/16" or an owned public CIDR. bgp_cer_cidr and + // bgp_ibm_cidr must have matching network and subnet mask values. + BgpCerCidr *string `json:"bgp_cer_cidr,omitempty"` + + // BGP IBM CIDR. + // + // For auto IP assignment, omit bgp_cer_cidr and bgp_ibm_cidr. IBM will automatically select values for bgp_cer_cidr + // and bgp_ibm_cidr. + // + // For explicit IP assignment set a valid bgp_cer_cidr and bgp_ibm_cidr CIDR, the value must reside in one of + // "10.254.0.0/16", "172.16.0.0/12", "192.168.0.0/16", "169.254.0.0/16" or an owned public CIDR. bgp_cer_cidr and + // bgp_ibm_cidr must have matching network and subnet mask values. + BgpIbmCidr *string `json:"bgp_ibm_cidr,omitempty"` + + // Gateways with global routing (`true`) can connect to networks outside their associated region. + Global *bool `json:"global" validate:"required"` + + // Metered billing option. When `true` gateway usage is billed per gigabyte. When `false` there is no per gigabyte + // usage charge, instead a flat rate is charged for the gateway. + Metered *bool `json:"metered" validate:"required"` + + // The unique user-defined name for this gateway. + Name *string `json:"name" validate:"required"` + + // Resource group for this resource. If unspecified, the account's [default resource + // group](https://cloud.ibm.com/apidocs/resource-manager#introduction) is used. + ResourceGroup *ResourceGroupIdentity `json:"resource_group,omitempty"` + + // Gateway speed in megabits per second. + SpeedMbps *int64 `json:"speed_mbps" validate:"required"` + + // Gateway type. + Type *string `json:"type" validate:"required"` + + // Select Port Label for new type=connect gateway. + Port *GatewayPortIdentity `json:"port" validate:"required"` +} + +// Constants associated with the GatewayTemplateGatewayTypeConnectTemplate.Type property. +// Gateway type. +const ( + GatewayTemplateGatewayTypeConnectTemplate_Type_Connect = "connect" + GatewayTemplateGatewayTypeConnectTemplate_Type_Dedicated = "dedicated" +) + +// NewGatewayTemplateGatewayTypeConnectTemplate : Instantiate GatewayTemplateGatewayTypeConnectTemplate (Generic Model Constructor) +func (*DirectLinkV1) NewGatewayTemplateGatewayTypeConnectTemplate(bgpAsn int64, global bool, metered bool, name string, speedMbps int64, typeVar string, port *GatewayPortIdentity) (model *GatewayTemplateGatewayTypeConnectTemplate, err error) { + model = &GatewayTemplateGatewayTypeConnectTemplate{ + BgpAsn: core.Int64Ptr(bgpAsn), + Global: core.BoolPtr(global), + Metered: core.BoolPtr(metered), + Name: core.StringPtr(name), + SpeedMbps: core.Int64Ptr(speedMbps), + Type: core.StringPtr(typeVar), + Port: port, + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*GatewayTemplateGatewayTypeConnectTemplate) isaGatewayTemplate() bool { + return true +} + +// UnmarshalGatewayTemplateGatewayTypeConnectTemplate unmarshals an instance of GatewayTemplateGatewayTypeConnectTemplate from the specified map of raw messages. +func UnmarshalGatewayTemplateGatewayTypeConnectTemplate(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(GatewayTemplateGatewayTypeConnectTemplate) + err = core.UnmarshalPrimitive(m, "bgp_asn", &obj.BgpAsn) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "bgp_base_cidr", &obj.BgpBaseCidr) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "bgp_cer_cidr", &obj.BgpCerCidr) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "bgp_ibm_cidr", &obj.BgpIbmCidr) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "global", &obj.Global) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "metered", &obj.Metered) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalModel(m, "resource_group", &obj.ResourceGroup, UnmarshalResourceGroupIdentity) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "speed_mbps", &obj.SpeedMbps) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + err = core.UnmarshalModel(m, "port", &obj.Port, UnmarshalGatewayPortIdentity) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// GatewayTemplateGatewayTypeDedicatedTemplate : Gateway fields specific to type=dedicated gateway create. +// This model "extends" GatewayTemplate +type GatewayTemplateGatewayTypeDedicatedTemplate struct { + // BGP ASN. + BgpAsn *int64 `json:"bgp_asn" validate:"required"` + + // (DEPRECATED) BGP base CIDR. + // + // Field is deprecated. See bgp_ibm_cidr and bgp_cer_cidr for details on how to create a gateway using either + // automatic or explicit IP assignment. Any bgp_base_cidr value set will be ignored. + // + // Deprecated field bgp_base_cidr will be removed from the API specificiation after 15-MAR-2021. + BgpBaseCidr *string `json:"bgp_base_cidr,omitempty"` + + // BGP customer edge router CIDR. + // + // For auto IP assignment, omit bgp_cer_cidr and bgp_ibm_cidr. IBM will automatically select values for bgp_cer_cidr + // and bgp_ibm_cidr. + // + // For explicit IP assignment set a valid bgp_cer_cidr and bgp_ibm_cidr CIDR, the value must reside in one of + // "10.254.0.0/16", "172.16.0.0/12", "192.168.0.0/16", "169.254.0.0/16" or an owned public CIDR. bgp_cer_cidr and + // bgp_ibm_cidr must have matching network and subnet mask values. + BgpCerCidr *string `json:"bgp_cer_cidr,omitempty"` + + // BGP IBM CIDR. + // + // For auto IP assignment, omit bgp_cer_cidr and bgp_ibm_cidr. IBM will automatically select values for bgp_cer_cidr + // and bgp_ibm_cidr. + // + // For explicit IP assignment set a valid bgp_cer_cidr and bgp_ibm_cidr CIDR, the value must reside in one of + // "10.254.0.0/16", "172.16.0.0/12", "192.168.0.0/16", "169.254.0.0/16" or an owned public CIDR. bgp_cer_cidr and + // bgp_ibm_cidr must have matching network and subnet mask values. + BgpIbmCidr *string `json:"bgp_ibm_cidr,omitempty"` + + // Gateways with global routing (`true`) can connect to networks outside their associated region. + Global *bool `json:"global" validate:"required"` + + // Metered billing option. When `true` gateway usage is billed per gigabyte. When `false` there is no per gigabyte + // usage charge, instead a flat rate is charged for the gateway. + Metered *bool `json:"metered" validate:"required"` + + // The unique user-defined name for this gateway. + Name *string `json:"name" validate:"required"` + + // Resource group for this resource. If unspecified, the account's [default resource + // group](https://cloud.ibm.com/apidocs/resource-manager#introduction) is used. + ResourceGroup *ResourceGroupIdentity `json:"resource_group,omitempty"` + + // Gateway speed in megabits per second. + SpeedMbps *int64 `json:"speed_mbps" validate:"required"` + + // Gateway type. + Type *string `json:"type" validate:"required"` + + // Carrier name. + CarrierName *string `json:"carrier_name" validate:"required"` + + // Cross connect router. + CrossConnectRouter *string `json:"cross_connect_router" validate:"required"` + + // Customer name. + CustomerName *string `json:"customer_name" validate:"required"` + + // Gateway location. + LocationName *string `json:"location_name" validate:"required"` + + // MACsec configuration information. Contact IBM support for access to MACsec. + MacsecConfig *GatewayMacsecConfigTemplate `json:"macsec_config,omitempty"` +} + +// Constants associated with the GatewayTemplateGatewayTypeDedicatedTemplate.Type property. +// Gateway type. +const ( + GatewayTemplateGatewayTypeDedicatedTemplate_Type_Connect = "connect" + GatewayTemplateGatewayTypeDedicatedTemplate_Type_Dedicated = "dedicated" +) + +// NewGatewayTemplateGatewayTypeDedicatedTemplate : Instantiate GatewayTemplateGatewayTypeDedicatedTemplate (Generic Model Constructor) +func (*DirectLinkV1) NewGatewayTemplateGatewayTypeDedicatedTemplate(bgpAsn int64, global bool, metered bool, name string, speedMbps int64, typeVar string, carrierName string, crossConnectRouter string, customerName string, locationName string) (model *GatewayTemplateGatewayTypeDedicatedTemplate, err error) { + model = &GatewayTemplateGatewayTypeDedicatedTemplate{ + BgpAsn: core.Int64Ptr(bgpAsn), + Global: core.BoolPtr(global), + Metered: core.BoolPtr(metered), + Name: core.StringPtr(name), + SpeedMbps: core.Int64Ptr(speedMbps), + Type: core.StringPtr(typeVar), + CarrierName: core.StringPtr(carrierName), + CrossConnectRouter: core.StringPtr(crossConnectRouter), + CustomerName: core.StringPtr(customerName), + LocationName: core.StringPtr(locationName), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*GatewayTemplateGatewayTypeDedicatedTemplate) isaGatewayTemplate() bool { + return true +} + +// UnmarshalGatewayTemplateGatewayTypeDedicatedTemplate unmarshals an instance of GatewayTemplateGatewayTypeDedicatedTemplate from the specified map of raw messages. +func UnmarshalGatewayTemplateGatewayTypeDedicatedTemplate(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(GatewayTemplateGatewayTypeDedicatedTemplate) + err = core.UnmarshalPrimitive(m, "bgp_asn", &obj.BgpAsn) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "bgp_base_cidr", &obj.BgpBaseCidr) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "bgp_cer_cidr", &obj.BgpCerCidr) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "bgp_ibm_cidr", &obj.BgpIbmCidr) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "global", &obj.Global) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "metered", &obj.Metered) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalModel(m, "resource_group", &obj.ResourceGroup, UnmarshalResourceGroupIdentity) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "speed_mbps", &obj.SpeedMbps) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "carrier_name", &obj.CarrierName) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "cross_connect_router", &obj.CrossConnectRouter) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "customer_name", &obj.CustomerName) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "location_name", &obj.LocationName) + if err != nil { + return + } + err = core.UnmarshalModel(m, "macsec_config", &obj.MacsecConfig, UnmarshalGatewayMacsecConfigTemplate) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} diff --git a/vendor/github.com/IBM/networking-go-sdk/dnsrecordbulkv1/dns_record_bulk_v1.go b/vendor/github.com/IBM/networking-go-sdk/dnsrecordbulkv1/dns_record_bulk_v1.go new file mode 100644 index 00000000000..6502e228e3e --- /dev/null +++ b/vendor/github.com/IBM/networking-go-sdk/dnsrecordbulkv1/dns_record_bulk_v1.go @@ -0,0 +1,475 @@ +/** + * (C) Copyright IBM Corp. 2020. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * IBM OpenAPI SDK Code Generator Version: 3.20.0-debb9f29-20201203-202043 + */ + + +// Package dnsrecordbulkv1 : Operations and models for the DnsRecordBulkV1 service +package dnsrecordbulkv1 + +import ( + "context" + "encoding/json" + "fmt" + "github.com/IBM/go-sdk-core/v4/core" + common "github.com/IBM/networking-go-sdk/common" + "io" + "net/http" + "reflect" + "time" +) + +// DnsRecordBulkV1 : Import/Export zone files +// +// Version: 1.0.0 +type DnsRecordBulkV1 struct { + Service *core.BaseService + + // Full url-encoded CRN of the service instance. + Crn *string + + // Identifier of zone. + ZoneIdentifier *string +} + +// DefaultServiceURL is the default URL to make service requests to. +const DefaultServiceURL = "https://api.cis.cloud.ibm.com" + +// DefaultServiceName is the default key used to find external configuration information. +const DefaultServiceName = "dns_record_bulk" + +// DnsRecordBulkV1Options : Service options +type DnsRecordBulkV1Options struct { + ServiceName string + URL string + Authenticator core.Authenticator + + // Full url-encoded CRN of the service instance. + Crn *string `validate:"required"` + + // Identifier of zone. + ZoneIdentifier *string `validate:"required"` +} + +// NewDnsRecordBulkV1UsingExternalConfig : constructs an instance of DnsRecordBulkV1 with passed in options and external configuration. +func NewDnsRecordBulkV1UsingExternalConfig(options *DnsRecordBulkV1Options) (dnsRecordBulk *DnsRecordBulkV1, err error) { + if options.ServiceName == "" { + options.ServiceName = DefaultServiceName + } + + if options.Authenticator == nil { + options.Authenticator, err = core.GetAuthenticatorFromEnvironment(options.ServiceName) + if err != nil { + return + } + } + + dnsRecordBulk, err = NewDnsRecordBulkV1(options) + if err != nil { + return + } + + err = dnsRecordBulk.Service.ConfigureService(options.ServiceName) + if err != nil { + return + } + + if options.URL != "" { + err = dnsRecordBulk.Service.SetServiceURL(options.URL) + } + return +} + +// NewDnsRecordBulkV1 : constructs an instance of DnsRecordBulkV1 with passed in options. +func NewDnsRecordBulkV1(options *DnsRecordBulkV1Options) (service *DnsRecordBulkV1, err error) { + serviceOptions := &core.ServiceOptions{ + URL: DefaultServiceURL, + Authenticator: options.Authenticator, + } + + err = core.ValidateStruct(options, "options") + if err != nil { + return + } + + baseService, err := core.NewBaseService(serviceOptions) + if err != nil { + return + } + + if options.URL != "" { + err = baseService.SetServiceURL(options.URL) + if err != nil { + return + } + } + + service = &DnsRecordBulkV1{ + Service: baseService, + Crn: options.Crn, + ZoneIdentifier: options.ZoneIdentifier, + } + + return +} + +// GetServiceURLForRegion returns the service URL to be used for the specified region +func GetServiceURLForRegion(region string) (string, error) { + return "", fmt.Errorf("service does not support regional URLs") +} + +// Clone makes a copy of "dnsRecordBulk" suitable for processing requests. +func (dnsRecordBulk *DnsRecordBulkV1) Clone() *DnsRecordBulkV1 { + if core.IsNil(dnsRecordBulk) { + return nil + } + clone := *dnsRecordBulk + clone.Service = dnsRecordBulk.Service.Clone() + return &clone +} + +// SetServiceURL sets the service URL +func (dnsRecordBulk *DnsRecordBulkV1) SetServiceURL(url string) error { + return dnsRecordBulk.Service.SetServiceURL(url) +} + +// GetServiceURL returns the service URL +func (dnsRecordBulk *DnsRecordBulkV1) GetServiceURL() string { + return dnsRecordBulk.Service.GetServiceURL() +} + +// SetDefaultHeaders sets HTTP headers to be sent in every request +func (dnsRecordBulk *DnsRecordBulkV1) SetDefaultHeaders(headers http.Header) { + dnsRecordBulk.Service.SetDefaultHeaders(headers) +} + +// SetEnableGzipCompression sets the service's EnableGzipCompression field +func (dnsRecordBulk *DnsRecordBulkV1) SetEnableGzipCompression(enableGzip bool) { + dnsRecordBulk.Service.SetEnableGzipCompression(enableGzip) +} + +// GetEnableGzipCompression returns the service's EnableGzipCompression field +func (dnsRecordBulk *DnsRecordBulkV1) GetEnableGzipCompression() bool { + return dnsRecordBulk.Service.GetEnableGzipCompression() +} + +// EnableRetries enables automatic retries for requests invoked for this service instance. +// If either parameter is specified as 0, then a default value is used instead. +func (dnsRecordBulk *DnsRecordBulkV1) EnableRetries(maxRetries int, maxRetryInterval time.Duration) { + dnsRecordBulk.Service.EnableRetries(maxRetries, maxRetryInterval) +} + +// DisableRetries disables automatic retries for requests invoked for this service instance. +func (dnsRecordBulk *DnsRecordBulkV1) DisableRetries() { + dnsRecordBulk.Service.DisableRetries() +} + +// GetDnsRecordsBulk : Export zone file +// Export zone file. +func (dnsRecordBulk *DnsRecordBulkV1) GetDnsRecordsBulk(getDnsRecordsBulkOptions *GetDnsRecordsBulkOptions) (result io.ReadCloser, response *core.DetailedResponse, err error) { + return dnsRecordBulk.GetDnsRecordsBulkWithContext(context.Background(), getDnsRecordsBulkOptions) +} + +// GetDnsRecordsBulkWithContext is an alternate form of the GetDnsRecordsBulk method which supports a Context parameter +func (dnsRecordBulk *DnsRecordBulkV1) GetDnsRecordsBulkWithContext(ctx context.Context, getDnsRecordsBulkOptions *GetDnsRecordsBulkOptions) (result io.ReadCloser, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(getDnsRecordsBulkOptions, "getDnsRecordsBulkOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *dnsRecordBulk.Crn, + "zone_identifier": *dnsRecordBulk.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = dnsRecordBulk.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(dnsRecordBulk.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/dns_records_bulk`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getDnsRecordsBulkOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("dns_record_bulk", "V1", "GetDnsRecordsBulk") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "text/plain; charset=utf-8") + + request, err := builder.Build() + if err != nil { + return + } + + response, err = dnsRecordBulk.Service.Request(request, &result) + + return +} + +// PostDnsRecordsBulk : Import zone file +// Import zone file. +func (dnsRecordBulk *DnsRecordBulkV1) PostDnsRecordsBulk(postDnsRecordsBulkOptions *PostDnsRecordsBulkOptions) (result *DnsRecordsObject, response *core.DetailedResponse, err error) { + return dnsRecordBulk.PostDnsRecordsBulkWithContext(context.Background(), postDnsRecordsBulkOptions) +} + +// PostDnsRecordsBulkWithContext is an alternate form of the PostDnsRecordsBulk method which supports a Context parameter +func (dnsRecordBulk *DnsRecordBulkV1) PostDnsRecordsBulkWithContext(ctx context.Context, postDnsRecordsBulkOptions *PostDnsRecordsBulkOptions) (result *DnsRecordsObject, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(postDnsRecordsBulkOptions, "postDnsRecordsBulkOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(postDnsRecordsBulkOptions, "postDnsRecordsBulkOptions") + if err != nil { + return + } + if (postDnsRecordsBulkOptions.File == nil) { + err = fmt.Errorf("at least one of or file must be supplied") + return + } + + pathParamsMap := map[string]string{ + "crn": *dnsRecordBulk.Crn, + "zone_identifier": *dnsRecordBulk.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = dnsRecordBulk.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(dnsRecordBulk.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/dns_records_bulk`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range postDnsRecordsBulkOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("dns_record_bulk", "V1", "PostDnsRecordsBulk") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + if postDnsRecordsBulkOptions.File != nil { + builder.AddFormData("file", "filename", + core.StringNilMapper(postDnsRecordsBulkOptions.FileContentType), postDnsRecordsBulkOptions.File) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = dnsRecordBulk.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalDnsRecordsObject) + if err != nil { + return + } + response.Result = result + + return +} + +// DnsRecordsObjectMessagesItem : DnsRecordsObjectMessagesItem struct +type DnsRecordsObjectMessagesItem struct { + // Message code. + Code *int64 `json:"code,omitempty"` + + // Message corresponding to the code. + Message *string `json:"message,omitempty"` +} + + +// UnmarshalDnsRecordsObjectMessagesItem unmarshals an instance of DnsRecordsObjectMessagesItem from the specified map of raw messages. +func UnmarshalDnsRecordsObjectMessagesItem(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(DnsRecordsObjectMessagesItem) + err = core.UnmarshalPrimitive(m, "code", &obj.Code) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "message", &obj.Message) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// DnsRecordsObjectResult : DNS record. +type DnsRecordsObjectResult struct { + // total records added. + RecsAdded *int64 `json:"recs_added" validate:"required"` + + // total records parsed. + TotalRecordsParsed *int64 `json:"total_records_parsed" validate:"required"` +} + + +// UnmarshalDnsRecordsObjectResult unmarshals an instance of DnsRecordsObjectResult from the specified map of raw messages. +func UnmarshalDnsRecordsObjectResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(DnsRecordsObjectResult) + err = core.UnmarshalPrimitive(m, "recs_added", &obj.RecsAdded) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "total_records_parsed", &obj.TotalRecordsParsed) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// DnsRecordsObjectTiming : timing object. +type DnsRecordsObjectTiming struct { + // start time. + StartTime *string `json:"start_time,omitempty"` + + // end time. + EndTime *string `json:"end_time,omitempty"` + + // process time. + ProcessTime *int64 `json:"process_time,omitempty"` +} + + +// UnmarshalDnsRecordsObjectTiming unmarshals an instance of DnsRecordsObjectTiming from the specified map of raw messages. +func UnmarshalDnsRecordsObjectTiming(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(DnsRecordsObjectTiming) + err = core.UnmarshalPrimitive(m, "start_time", &obj.StartTime) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "end_time", &obj.EndTime) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "process_time", &obj.ProcessTime) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// GetDnsRecordsBulkOptions : The GetDnsRecordsBulk options. +type GetDnsRecordsBulkOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetDnsRecordsBulkOptions : Instantiate GetDnsRecordsBulkOptions +func (*DnsRecordBulkV1) NewGetDnsRecordsBulkOptions() *GetDnsRecordsBulkOptions { + return &GetDnsRecordsBulkOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *GetDnsRecordsBulkOptions) SetHeaders(param map[string]string) *GetDnsRecordsBulkOptions { + options.Headers = param + return options +} + +// PostDnsRecordsBulkOptions : The PostDnsRecordsBulk options. +type PostDnsRecordsBulkOptions struct { + // file to upload. + File io.ReadCloser `json:"file,omitempty"` + + // The content type of file. + FileContentType *string `json:"file_content_type,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewPostDnsRecordsBulkOptions : Instantiate PostDnsRecordsBulkOptions +func (*DnsRecordBulkV1) NewPostDnsRecordsBulkOptions() *PostDnsRecordsBulkOptions { + return &PostDnsRecordsBulkOptions{} +} + +// SetFile : Allow user to set File +func (options *PostDnsRecordsBulkOptions) SetFile(file io.ReadCloser) *PostDnsRecordsBulkOptions { + options.File = file + return options +} + +// SetFileContentType : Allow user to set FileContentType +func (options *PostDnsRecordsBulkOptions) SetFileContentType(fileContentType string) *PostDnsRecordsBulkOptions { + options.FileContentType = core.StringPtr(fileContentType) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *PostDnsRecordsBulkOptions) SetHeaders(param map[string]string) *PostDnsRecordsBulkOptions { + options.Headers = param + return options +} + +// DnsRecordsObject : dns records objects. +type DnsRecordsObject struct { + // Operation success flag. + Success *bool `json:"success" validate:"required"` + + // Array of errors encountered. + Errors [][]string `json:"errors" validate:"required"` + + // Array of messages returned. + Messages []DnsRecordsObjectMessagesItem `json:"messages" validate:"required"` + + // DNS record. + Result *DnsRecordsObjectResult `json:"result" validate:"required"` + + // timing object. + Timing *DnsRecordsObjectTiming `json:"timing,omitempty"` +} + + +// UnmarshalDnsRecordsObject unmarshals an instance of DnsRecordsObject from the specified map of raw messages. +func UnmarshalDnsRecordsObject(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(DnsRecordsObject) + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalModel(m, "messages", &obj.Messages, UnmarshalDnsRecordsObjectMessagesItem) + if err != nil { + return + } + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalDnsRecordsObjectResult) + if err != nil { + return + } + err = core.UnmarshalModel(m, "timing", &obj.Timing, UnmarshalDnsRecordsObjectTiming) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} diff --git a/vendor/github.com/IBM/networking-go-sdk/dnsrecordbulkv1/records.txt b/vendor/github.com/IBM/networking-go-sdk/dnsrecordbulkv1/records.txt new file mode 100644 index 00000000000..3743d9bae7d --- /dev/null +++ b/vendor/github.com/IBM/networking-go-sdk/dnsrecordbulkv1/records.txt @@ -0,0 +1,49 @@ +;; This file is for testing dns record import and export functionality. +;; +;; Domain: sdk.cistest-load.com. +;; Exported: 2020-07-23 06:46:45 +;; +;; This file is intended for use for informational and archival +;; purposes ONLY and MUST be edited before use on a production +;; DNS server. In particular, you must: +;; -- update the SOA record with the correct authoritative name server +;; -- update the SOA record with the contact e-mail address information +;; -- update the NS record(s) with the authoritative name servers for this domain. +;; +;; For further information, please consult the BIND documentation +;; located on the following website: +;; +;; http://www.isc.org/ +;; +;; And RFC 1035: +;; +;; http://www.ietf.org/rfc/rfc1035.txt +;; +;; Please note that we do NOT offer technical support for any use +;; of this zone data, the BIND name server, or any other third-party +;; DNS software. +;; +;; Use at your own risk. +;; A Records +host-9.test-example.com.sdk.cistest-load.com. 1 IN A 12.12.12.1 + +;; AAAA Records +host-1.test-example.com.sdk.cistest-load.com. 1 IN AAAA 2001:db8:85a3::8a2e:370:7334 + +;; CAA Records +host.test-example.com.sdk.cistest-load.com. 1 IN CAA 0 http "domain1.com" + +;; CNAME Records +host-2.test-example.com.sdk.cistest-load.com. 1 IN CNAME domain2.com. + +;; MX Records +host-3.test-example.com.sdk.cistest-load.com. 1 IN MX 5 example-domain.com. + +;; NS Records +host-4.test-example.com.sdk.cistest-load.com. 1 IN NS domain6.com. + +;; SPF Records +host-5.test-example.com.sdk.cistest-load.com. 1 IN SPF "domain7.com" + +;; TXT Records +host-10.test-example.com.sdk.cistest-load.com. 1 IN TXT "Test Text" diff --git a/vendor/github.com/IBM/networking-go-sdk/dnsrecordsv1/dns_records_v1.go b/vendor/github.com/IBM/networking-go-sdk/dnsrecordsv1/dns_records_v1.go new file mode 100644 index 00000000000..0346b85c8be --- /dev/null +++ b/vendor/github.com/IBM/networking-go-sdk/dnsrecordsv1/dns_records_v1.go @@ -0,0 +1,1206 @@ +/** + * (C) Copyright IBM Corp. 2020. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * IBM OpenAPI SDK Code Generator Version: 3.20.0-debb9f29-20201203-202043 + */ + + +// Package dnsrecordsv1 : Operations and models for the DnsRecordsV1 service +package dnsrecordsv1 + +import ( + "context" + "encoding/json" + "fmt" + "github.com/IBM/go-sdk-core/v4/core" + common "github.com/IBM/networking-go-sdk/common" + "net/http" + "reflect" + "time" +) + +// DnsRecordsV1 : DNS records +// +// Version: 1.0.1 +type DnsRecordsV1 struct { + Service *core.BaseService + + // Full crn of the service instance. + Crn *string + + // Zone identifier (zone id). + ZoneIdentifier *string +} + +// DefaultServiceURL is the default URL to make service requests to. +const DefaultServiceURL = "https://api.cis.cloud.ibm.com" + +// DefaultServiceName is the default key used to find external configuration information. +const DefaultServiceName = "dns_records" + +// DnsRecordsV1Options : Service options +type DnsRecordsV1Options struct { + ServiceName string + URL string + Authenticator core.Authenticator + + // Full crn of the service instance. + Crn *string `validate:"required"` + + // Zone identifier (zone id). + ZoneIdentifier *string `validate:"required"` +} + +// NewDnsRecordsV1UsingExternalConfig : constructs an instance of DnsRecordsV1 with passed in options and external configuration. +func NewDnsRecordsV1UsingExternalConfig(options *DnsRecordsV1Options) (dnsRecords *DnsRecordsV1, err error) { + if options.ServiceName == "" { + options.ServiceName = DefaultServiceName + } + + if options.Authenticator == nil { + options.Authenticator, err = core.GetAuthenticatorFromEnvironment(options.ServiceName) + if err != nil { + return + } + } + + dnsRecords, err = NewDnsRecordsV1(options) + if err != nil { + return + } + + err = dnsRecords.Service.ConfigureService(options.ServiceName) + if err != nil { + return + } + + if options.URL != "" { + err = dnsRecords.Service.SetServiceURL(options.URL) + } + return +} + +// NewDnsRecordsV1 : constructs an instance of DnsRecordsV1 with passed in options. +func NewDnsRecordsV1(options *DnsRecordsV1Options) (service *DnsRecordsV1, err error) { + serviceOptions := &core.ServiceOptions{ + URL: DefaultServiceURL, + Authenticator: options.Authenticator, + } + + err = core.ValidateStruct(options, "options") + if err != nil { + return + } + + baseService, err := core.NewBaseService(serviceOptions) + if err != nil { + return + } + + if options.URL != "" { + err = baseService.SetServiceURL(options.URL) + if err != nil { + return + } + } + + service = &DnsRecordsV1{ + Service: baseService, + Crn: options.Crn, + ZoneIdentifier: options.ZoneIdentifier, + } + + return +} + +// GetServiceURLForRegion returns the service URL to be used for the specified region +func GetServiceURLForRegion(region string) (string, error) { + return "", fmt.Errorf("service does not support regional URLs") +} + +// Clone makes a copy of "dnsRecords" suitable for processing requests. +func (dnsRecords *DnsRecordsV1) Clone() *DnsRecordsV1 { + if core.IsNil(dnsRecords) { + return nil + } + clone := *dnsRecords + clone.Service = dnsRecords.Service.Clone() + return &clone +} + +// SetServiceURL sets the service URL +func (dnsRecords *DnsRecordsV1) SetServiceURL(url string) error { + return dnsRecords.Service.SetServiceURL(url) +} + +// GetServiceURL returns the service URL +func (dnsRecords *DnsRecordsV1) GetServiceURL() string { + return dnsRecords.Service.GetServiceURL() +} + +// SetDefaultHeaders sets HTTP headers to be sent in every request +func (dnsRecords *DnsRecordsV1) SetDefaultHeaders(headers http.Header) { + dnsRecords.Service.SetDefaultHeaders(headers) +} + +// SetEnableGzipCompression sets the service's EnableGzipCompression field +func (dnsRecords *DnsRecordsV1) SetEnableGzipCompression(enableGzip bool) { + dnsRecords.Service.SetEnableGzipCompression(enableGzip) +} + +// GetEnableGzipCompression returns the service's EnableGzipCompression field +func (dnsRecords *DnsRecordsV1) GetEnableGzipCompression() bool { + return dnsRecords.Service.GetEnableGzipCompression() +} + +// EnableRetries enables automatic retries for requests invoked for this service instance. +// If either parameter is specified as 0, then a default value is used instead. +func (dnsRecords *DnsRecordsV1) EnableRetries(maxRetries int, maxRetryInterval time.Duration) { + dnsRecords.Service.EnableRetries(maxRetries, maxRetryInterval) +} + +// DisableRetries disables automatic retries for requests invoked for this service instance. +func (dnsRecords *DnsRecordsV1) DisableRetries() { + dnsRecords.Service.DisableRetries() +} + +// ListAllDnsRecords : List all DNS records +// List all DNS records for a given zone of a service instance. +func (dnsRecords *DnsRecordsV1) ListAllDnsRecords(listAllDnsRecordsOptions *ListAllDnsRecordsOptions) (result *ListDnsrecordsResp, response *core.DetailedResponse, err error) { + return dnsRecords.ListAllDnsRecordsWithContext(context.Background(), listAllDnsRecordsOptions) +} + +// ListAllDnsRecordsWithContext is an alternate form of the ListAllDnsRecords method which supports a Context parameter +func (dnsRecords *DnsRecordsV1) ListAllDnsRecordsWithContext(ctx context.Context, listAllDnsRecordsOptions *ListAllDnsRecordsOptions) (result *ListDnsrecordsResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listAllDnsRecordsOptions, "listAllDnsRecordsOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *dnsRecords.Crn, + "zone_identifier": *dnsRecords.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = dnsRecords.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(dnsRecords.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/dns_records`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range listAllDnsRecordsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("dns_records", "V1", "ListAllDnsRecords") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + if listAllDnsRecordsOptions.Type != nil { + builder.AddQuery("type", fmt.Sprint(*listAllDnsRecordsOptions.Type)) + } + if listAllDnsRecordsOptions.Name != nil { + builder.AddQuery("name", fmt.Sprint(*listAllDnsRecordsOptions.Name)) + } + if listAllDnsRecordsOptions.Content != nil { + builder.AddQuery("content", fmt.Sprint(*listAllDnsRecordsOptions.Content)) + } + if listAllDnsRecordsOptions.Page != nil { + builder.AddQuery("page", fmt.Sprint(*listAllDnsRecordsOptions.Page)) + } + if listAllDnsRecordsOptions.PerPage != nil { + builder.AddQuery("per_page", fmt.Sprint(*listAllDnsRecordsOptions.PerPage)) + } + if listAllDnsRecordsOptions.Order != nil { + builder.AddQuery("order", fmt.Sprint(*listAllDnsRecordsOptions.Order)) + } + if listAllDnsRecordsOptions.Direction != nil { + builder.AddQuery("direction", fmt.Sprint(*listAllDnsRecordsOptions.Direction)) + } + if listAllDnsRecordsOptions.Match != nil { + builder.AddQuery("match", fmt.Sprint(*listAllDnsRecordsOptions.Match)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = dnsRecords.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalListDnsrecordsResp) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateDnsRecord : Create DNS record +// Add a new DNS record for a given zone for a given service instance. +func (dnsRecords *DnsRecordsV1) CreateDnsRecord(createDnsRecordOptions *CreateDnsRecordOptions) (result *DnsrecordResp, response *core.DetailedResponse, err error) { + return dnsRecords.CreateDnsRecordWithContext(context.Background(), createDnsRecordOptions) +} + +// CreateDnsRecordWithContext is an alternate form of the CreateDnsRecord method which supports a Context parameter +func (dnsRecords *DnsRecordsV1) CreateDnsRecordWithContext(ctx context.Context, createDnsRecordOptions *CreateDnsRecordOptions) (result *DnsrecordResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(createDnsRecordOptions, "createDnsRecordOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *dnsRecords.Crn, + "zone_identifier": *dnsRecords.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = dnsRecords.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(dnsRecords.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/dns_records`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range createDnsRecordOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("dns_records", "V1", "CreateDnsRecord") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if createDnsRecordOptions.Name != nil { + body["name"] = createDnsRecordOptions.Name + } + if createDnsRecordOptions.Type != nil { + body["type"] = createDnsRecordOptions.Type + } + if createDnsRecordOptions.TTL != nil { + body["ttl"] = createDnsRecordOptions.TTL + } + if createDnsRecordOptions.Content != nil { + body["content"] = createDnsRecordOptions.Content + } + if createDnsRecordOptions.Priority != nil { + body["priority"] = createDnsRecordOptions.Priority + } + if createDnsRecordOptions.Data != nil { + body["data"] = createDnsRecordOptions.Data + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = dnsRecords.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalDnsrecordResp) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteDnsRecord : Delete DNS record +// Delete a DNS record given its id. +func (dnsRecords *DnsRecordsV1) DeleteDnsRecord(deleteDnsRecordOptions *DeleteDnsRecordOptions) (result *DeleteDnsrecordResp, response *core.DetailedResponse, err error) { + return dnsRecords.DeleteDnsRecordWithContext(context.Background(), deleteDnsRecordOptions) +} + +// DeleteDnsRecordWithContext is an alternate form of the DeleteDnsRecord method which supports a Context parameter +func (dnsRecords *DnsRecordsV1) DeleteDnsRecordWithContext(ctx context.Context, deleteDnsRecordOptions *DeleteDnsRecordOptions) (result *DeleteDnsrecordResp, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteDnsRecordOptions, "deleteDnsRecordOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteDnsRecordOptions, "deleteDnsRecordOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *dnsRecords.Crn, + "zone_identifier": *dnsRecords.ZoneIdentifier, + "dnsrecord_identifier": *deleteDnsRecordOptions.DnsrecordIdentifier, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = dnsRecords.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(dnsRecords.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/dns_records/{dnsrecord_identifier}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteDnsRecordOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("dns_records", "V1", "DeleteDnsRecord") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = dnsRecords.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalDeleteDnsrecordResp) + if err != nil { + return + } + response.Result = result + + return +} + +// GetDnsRecord : Get DNS record +// Get the details of a DNS record for a given zone under a given service instance. +func (dnsRecords *DnsRecordsV1) GetDnsRecord(getDnsRecordOptions *GetDnsRecordOptions) (result *DnsrecordResp, response *core.DetailedResponse, err error) { + return dnsRecords.GetDnsRecordWithContext(context.Background(), getDnsRecordOptions) +} + +// GetDnsRecordWithContext is an alternate form of the GetDnsRecord method which supports a Context parameter +func (dnsRecords *DnsRecordsV1) GetDnsRecordWithContext(ctx context.Context, getDnsRecordOptions *GetDnsRecordOptions) (result *DnsrecordResp, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getDnsRecordOptions, "getDnsRecordOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getDnsRecordOptions, "getDnsRecordOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *dnsRecords.Crn, + "zone_identifier": *dnsRecords.ZoneIdentifier, + "dnsrecord_identifier": *getDnsRecordOptions.DnsrecordIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = dnsRecords.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(dnsRecords.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/dns_records/{dnsrecord_identifier}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getDnsRecordOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("dns_records", "V1", "GetDnsRecord") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = dnsRecords.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalDnsrecordResp) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateDnsRecord : Update DNS record +// Update an existing DNS record for a given zone under a given service instance. +func (dnsRecords *DnsRecordsV1) UpdateDnsRecord(updateDnsRecordOptions *UpdateDnsRecordOptions) (result *DnsrecordResp, response *core.DetailedResponse, err error) { + return dnsRecords.UpdateDnsRecordWithContext(context.Background(), updateDnsRecordOptions) +} + +// UpdateDnsRecordWithContext is an alternate form of the UpdateDnsRecord method which supports a Context parameter +func (dnsRecords *DnsRecordsV1) UpdateDnsRecordWithContext(ctx context.Context, updateDnsRecordOptions *UpdateDnsRecordOptions) (result *DnsrecordResp, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateDnsRecordOptions, "updateDnsRecordOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateDnsRecordOptions, "updateDnsRecordOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *dnsRecords.Crn, + "zone_identifier": *dnsRecords.ZoneIdentifier, + "dnsrecord_identifier": *updateDnsRecordOptions.DnsrecordIdentifier, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = dnsRecords.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(dnsRecords.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/dns_records/{dnsrecord_identifier}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateDnsRecordOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("dns_records", "V1", "UpdateDnsRecord") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if updateDnsRecordOptions.Name != nil { + body["name"] = updateDnsRecordOptions.Name + } + if updateDnsRecordOptions.Type != nil { + body["type"] = updateDnsRecordOptions.Type + } + if updateDnsRecordOptions.TTL != nil { + body["ttl"] = updateDnsRecordOptions.TTL + } + if updateDnsRecordOptions.Content != nil { + body["content"] = updateDnsRecordOptions.Content + } + if updateDnsRecordOptions.Priority != nil { + body["priority"] = updateDnsRecordOptions.Priority + } + if updateDnsRecordOptions.Proxied != nil { + body["proxied"] = updateDnsRecordOptions.Proxied + } + if updateDnsRecordOptions.Data != nil { + body["data"] = updateDnsRecordOptions.Data + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = dnsRecords.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalDnsrecordResp) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateDnsRecordOptions : The CreateDnsRecord options. +type CreateDnsRecordOptions struct { + // Required for all record types except SRV. + Name *string `json:"name,omitempty"` + + // dns record type. + Type *string `json:"type,omitempty"` + + // dns record ttl value. + TTL *int64 `json:"ttl,omitempty"` + + // dns record content. + Content *string `json:"content,omitempty"` + + // For MX records only. + Priority *int64 `json:"priority,omitempty"` + + // For LOC, SRV and CAA records only. + Data interface{} `json:"data,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the CreateDnsRecordOptions.Type property. +// dns record type. +const ( + CreateDnsRecordOptions_Type_A = "A" + CreateDnsRecordOptions_Type_Aaaa = "AAAA" + CreateDnsRecordOptions_Type_Caa = "CAA" + CreateDnsRecordOptions_Type_Cname = "CNAME" + CreateDnsRecordOptions_Type_Loc = "LOC" + CreateDnsRecordOptions_Type_Mx = "MX" + CreateDnsRecordOptions_Type_Ns = "NS" + CreateDnsRecordOptions_Type_Spf = "SPF" + CreateDnsRecordOptions_Type_Srv = "SRV" + CreateDnsRecordOptions_Type_Txt = "TXT" +) + +// NewCreateDnsRecordOptions : Instantiate CreateDnsRecordOptions +func (*DnsRecordsV1) NewCreateDnsRecordOptions() *CreateDnsRecordOptions { + return &CreateDnsRecordOptions{} +} + +// SetName : Allow user to set Name +func (options *CreateDnsRecordOptions) SetName(name string) *CreateDnsRecordOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetType : Allow user to set Type +func (options *CreateDnsRecordOptions) SetType(typeVar string) *CreateDnsRecordOptions { + options.Type = core.StringPtr(typeVar) + return options +} + +// SetTTL : Allow user to set TTL +func (options *CreateDnsRecordOptions) SetTTL(ttl int64) *CreateDnsRecordOptions { + options.TTL = core.Int64Ptr(ttl) + return options +} + +// SetContent : Allow user to set Content +func (options *CreateDnsRecordOptions) SetContent(content string) *CreateDnsRecordOptions { + options.Content = core.StringPtr(content) + return options +} + +// SetPriority : Allow user to set Priority +func (options *CreateDnsRecordOptions) SetPriority(priority int64) *CreateDnsRecordOptions { + options.Priority = core.Int64Ptr(priority) + return options +} + +// SetData : Allow user to set Data +func (options *CreateDnsRecordOptions) SetData(data interface{}) *CreateDnsRecordOptions { + options.Data = data + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateDnsRecordOptions) SetHeaders(param map[string]string) *CreateDnsRecordOptions { + options.Headers = param + return options +} + +// DeleteDnsRecordOptions : The DeleteDnsRecord options. +type DeleteDnsRecordOptions struct { + // Identifier of DNS record. + DnsrecordIdentifier *string `json:"dnsrecord_identifier" validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteDnsRecordOptions : Instantiate DeleteDnsRecordOptions +func (*DnsRecordsV1) NewDeleteDnsRecordOptions(dnsrecordIdentifier string) *DeleteDnsRecordOptions { + return &DeleteDnsRecordOptions{ + DnsrecordIdentifier: core.StringPtr(dnsrecordIdentifier), + } +} + +// SetDnsrecordIdentifier : Allow user to set DnsrecordIdentifier +func (options *DeleteDnsRecordOptions) SetDnsrecordIdentifier(dnsrecordIdentifier string) *DeleteDnsRecordOptions { + options.DnsrecordIdentifier = core.StringPtr(dnsrecordIdentifier) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteDnsRecordOptions) SetHeaders(param map[string]string) *DeleteDnsRecordOptions { + options.Headers = param + return options +} + +// DeleteDnsrecordRespResult : result. +type DeleteDnsrecordRespResult struct { + // dns record id. + ID *string `json:"id" validate:"required"` +} + + +// UnmarshalDeleteDnsrecordRespResult unmarshals an instance of DeleteDnsrecordRespResult from the specified map of raw messages. +func UnmarshalDeleteDnsrecordRespResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(DeleteDnsrecordRespResult) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// GetDnsRecordOptions : The GetDnsRecord options. +type GetDnsRecordOptions struct { + // Identifier of DNS record. + DnsrecordIdentifier *string `json:"dnsrecord_identifier" validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetDnsRecordOptions : Instantiate GetDnsRecordOptions +func (*DnsRecordsV1) NewGetDnsRecordOptions(dnsrecordIdentifier string) *GetDnsRecordOptions { + return &GetDnsRecordOptions{ + DnsrecordIdentifier: core.StringPtr(dnsrecordIdentifier), + } +} + +// SetDnsrecordIdentifier : Allow user to set DnsrecordIdentifier +func (options *GetDnsRecordOptions) SetDnsrecordIdentifier(dnsrecordIdentifier string) *GetDnsRecordOptions { + options.DnsrecordIdentifier = core.StringPtr(dnsrecordIdentifier) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetDnsRecordOptions) SetHeaders(param map[string]string) *GetDnsRecordOptions { + options.Headers = param + return options +} + +// ListAllDnsRecordsOptions : The ListAllDnsRecords options. +type ListAllDnsRecordsOptions struct { + // Type of DNS records to display. + Type *string `json:"type,omitempty"` + + // Value of name field to filter by. + Name *string `json:"name,omitempty"` + + // Value of content field to filter by. + Content *string `json:"content,omitempty"` + + // Page number of paginated results. + Page *int64 `json:"page,omitempty"` + + // Maximum number of DNS records per page. + PerPage *int64 `json:"per_page,omitempty"` + + // Field by which to order list of DNS records. + Order *string `json:"order,omitempty"` + + // Direction in which to order results [ascending/descending order]. + Direction *string `json:"direction,omitempty"` + + // Whether to match all (all) or atleast one search parameter (any). + Match *string `json:"match,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the ListAllDnsRecordsOptions.Order property. +// Field by which to order list of DNS records. +const ( + ListAllDnsRecordsOptions_Order_Content = "content" + ListAllDnsRecordsOptions_Order_Name = "name" + ListAllDnsRecordsOptions_Order_Proxied = "proxied" + ListAllDnsRecordsOptions_Order_TTL = "ttl" + ListAllDnsRecordsOptions_Order_Type = "type" +) + +// Constants associated with the ListAllDnsRecordsOptions.Direction property. +// Direction in which to order results [ascending/descending order]. +const ( + ListAllDnsRecordsOptions_Direction_Asc = "asc" + ListAllDnsRecordsOptions_Direction_Desc = "desc" +) + +// Constants associated with the ListAllDnsRecordsOptions.Match property. +// Whether to match all (all) or atleast one search parameter (any). +const ( + ListAllDnsRecordsOptions_Match_All = "all" + ListAllDnsRecordsOptions_Match_Any = "any" +) + +// NewListAllDnsRecordsOptions : Instantiate ListAllDnsRecordsOptions +func (*DnsRecordsV1) NewListAllDnsRecordsOptions() *ListAllDnsRecordsOptions { + return &ListAllDnsRecordsOptions{} +} + +// SetType : Allow user to set Type +func (options *ListAllDnsRecordsOptions) SetType(typeVar string) *ListAllDnsRecordsOptions { + options.Type = core.StringPtr(typeVar) + return options +} + +// SetName : Allow user to set Name +func (options *ListAllDnsRecordsOptions) SetName(name string) *ListAllDnsRecordsOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetContent : Allow user to set Content +func (options *ListAllDnsRecordsOptions) SetContent(content string) *ListAllDnsRecordsOptions { + options.Content = core.StringPtr(content) + return options +} + +// SetPage : Allow user to set Page +func (options *ListAllDnsRecordsOptions) SetPage(page int64) *ListAllDnsRecordsOptions { + options.Page = core.Int64Ptr(page) + return options +} + +// SetPerPage : Allow user to set PerPage +func (options *ListAllDnsRecordsOptions) SetPerPage(perPage int64) *ListAllDnsRecordsOptions { + options.PerPage = core.Int64Ptr(perPage) + return options +} + +// SetOrder : Allow user to set Order +func (options *ListAllDnsRecordsOptions) SetOrder(order string) *ListAllDnsRecordsOptions { + options.Order = core.StringPtr(order) + return options +} + +// SetDirection : Allow user to set Direction +func (options *ListAllDnsRecordsOptions) SetDirection(direction string) *ListAllDnsRecordsOptions { + options.Direction = core.StringPtr(direction) + return options +} + +// SetMatch : Allow user to set Match +func (options *ListAllDnsRecordsOptions) SetMatch(match string) *ListAllDnsRecordsOptions { + options.Match = core.StringPtr(match) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListAllDnsRecordsOptions) SetHeaders(param map[string]string) *ListAllDnsRecordsOptions { + options.Headers = param + return options +} + +// UpdateDnsRecordOptions : The UpdateDnsRecord options. +type UpdateDnsRecordOptions struct { + // Identifier of DNS record. + DnsrecordIdentifier *string `json:"dnsrecord_identifier" validate:"required,ne="` + + // Required for all record types except SRV. + Name *string `json:"name,omitempty"` + + // dns record type. + Type *string `json:"type,omitempty"` + + // dns record ttl value. + TTL *int64 `json:"ttl,omitempty"` + + // content of dns record. + Content *string `json:"content,omitempty"` + + // For MX records only. + Priority *int64 `json:"priority,omitempty"` + + // proxied. + Proxied *bool `json:"proxied,omitempty"` + + // For LOC, SRV and CAA records only. + Data interface{} `json:"data,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the UpdateDnsRecordOptions.Type property. +// dns record type. +const ( + UpdateDnsRecordOptions_Type_A = "A" + UpdateDnsRecordOptions_Type_Aaaa = "AAAA" + UpdateDnsRecordOptions_Type_Caa = "CAA" + UpdateDnsRecordOptions_Type_Cname = "CNAME" + UpdateDnsRecordOptions_Type_Loc = "LOC" + UpdateDnsRecordOptions_Type_Mx = "MX" + UpdateDnsRecordOptions_Type_Ns = "NS" + UpdateDnsRecordOptions_Type_Spf = "SPF" + UpdateDnsRecordOptions_Type_Srv = "SRV" + UpdateDnsRecordOptions_Type_Txt = "TXT" +) + +// NewUpdateDnsRecordOptions : Instantiate UpdateDnsRecordOptions +func (*DnsRecordsV1) NewUpdateDnsRecordOptions(dnsrecordIdentifier string) *UpdateDnsRecordOptions { + return &UpdateDnsRecordOptions{ + DnsrecordIdentifier: core.StringPtr(dnsrecordIdentifier), + } +} + +// SetDnsrecordIdentifier : Allow user to set DnsrecordIdentifier +func (options *UpdateDnsRecordOptions) SetDnsrecordIdentifier(dnsrecordIdentifier string) *UpdateDnsRecordOptions { + options.DnsrecordIdentifier = core.StringPtr(dnsrecordIdentifier) + return options +} + +// SetName : Allow user to set Name +func (options *UpdateDnsRecordOptions) SetName(name string) *UpdateDnsRecordOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetType : Allow user to set Type +func (options *UpdateDnsRecordOptions) SetType(typeVar string) *UpdateDnsRecordOptions { + options.Type = core.StringPtr(typeVar) + return options +} + +// SetTTL : Allow user to set TTL +func (options *UpdateDnsRecordOptions) SetTTL(ttl int64) *UpdateDnsRecordOptions { + options.TTL = core.Int64Ptr(ttl) + return options +} + +// SetContent : Allow user to set Content +func (options *UpdateDnsRecordOptions) SetContent(content string) *UpdateDnsRecordOptions { + options.Content = core.StringPtr(content) + return options +} + +// SetPriority : Allow user to set Priority +func (options *UpdateDnsRecordOptions) SetPriority(priority int64) *UpdateDnsRecordOptions { + options.Priority = core.Int64Ptr(priority) + return options +} + +// SetProxied : Allow user to set Proxied +func (options *UpdateDnsRecordOptions) SetProxied(proxied bool) *UpdateDnsRecordOptions { + options.Proxied = core.BoolPtr(proxied) + return options +} + +// SetData : Allow user to set Data +func (options *UpdateDnsRecordOptions) SetData(data interface{}) *UpdateDnsRecordOptions { + options.Data = data + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateDnsRecordOptions) SetHeaders(param map[string]string) *UpdateDnsRecordOptions { + options.Headers = param + return options +} + +// DeleteDnsrecordResp : dns record delete response. +type DeleteDnsrecordResp struct { + // success response. + Success *bool `json:"success" validate:"required"` + + // errors. + Errors [][]string `json:"errors" validate:"required"` + + // messages. + Messages [][]string `json:"messages" validate:"required"` + + // result. + Result *DeleteDnsrecordRespResult `json:"result" validate:"required"` +} + + +// UnmarshalDeleteDnsrecordResp unmarshals an instance of DeleteDnsrecordResp from the specified map of raw messages. +func UnmarshalDeleteDnsrecordResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(DeleteDnsrecordResp) + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalDeleteDnsrecordRespResult) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// DnsrecordDetails : dns record details. +type DnsrecordDetails struct { + // dns record identifier. + ID *string `json:"id,omitempty"` + + // created on. + CreatedOn *string `json:"created_on,omitempty"` + + // modified date. + ModifiedOn *string `json:"modified_on,omitempty"` + + // dns record name. + Name *string `json:"name,omitempty"` + + // dns record type. + Type *string `json:"type,omitempty"` + + // dns record content. + Content *string `json:"content,omitempty"` + + // zone identifier. + ZoneID *string `json:"zone_id,omitempty"` + + // zone name. + ZoneName *string `json:"zone_name,omitempty"` + + // proxiable. + Proxiable *bool `json:"proxiable,omitempty"` + + // proxied. + Proxied *bool `json:"proxied,omitempty"` + + // dns record ttl value. + TTL *int64 `json:"ttl,omitempty"` + + // Relevant only to MX type records. + Priority *int64 `json:"priority,omitempty"` + + // Data details for the DNS record. Only for LOC, SRV, CAA records. + Data interface{} `json:"data,omitempty"` +} + +// Constants associated with the DnsrecordDetails.Type property. +// dns record type. +const ( + DnsrecordDetails_Type_A = "A" + DnsrecordDetails_Type_Aaaa = "AAAA" + DnsrecordDetails_Type_Caa = "CAA" + DnsrecordDetails_Type_Cname = "CNAME" + DnsrecordDetails_Type_Loc = "LOC" + DnsrecordDetails_Type_Mx = "MX" + DnsrecordDetails_Type_Ns = "NS" + DnsrecordDetails_Type_Spf = "SPF" + DnsrecordDetails_Type_Srv = "SRV" + DnsrecordDetails_Type_Txt = "TXT" +) + + +// UnmarshalDnsrecordDetails unmarshals an instance of DnsrecordDetails from the specified map of raw messages. +func UnmarshalDnsrecordDetails(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(DnsrecordDetails) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_on", &obj.CreatedOn) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "modified_on", &obj.ModifiedOn) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "content", &obj.Content) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "zone_id", &obj.ZoneID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "zone_name", &obj.ZoneName) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "proxiable", &obj.Proxiable) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "proxied", &obj.Proxied) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "ttl", &obj.TTL) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "priority", &obj.Priority) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "data", &obj.Data) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// DnsrecordResp : dns record response. +type DnsrecordResp struct { + // success response. + Success *bool `json:"success" validate:"required"` + + // errors. + Errors [][]string `json:"errors" validate:"required"` + + // messages. + Messages [][]string `json:"messages" validate:"required"` + + // dns record details. + Result *DnsrecordDetails `json:"result" validate:"required"` +} + + +// UnmarshalDnsrecordResp unmarshals an instance of DnsrecordResp from the specified map of raw messages. +func UnmarshalDnsrecordResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(DnsrecordResp) + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalDnsrecordDetails) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ListDnsrecordsResp : dns records list response. +type ListDnsrecordsResp struct { + // success response. + Success *bool `json:"success" validate:"required"` + + // errors. + Errors [][]string `json:"errors" validate:"required"` + + // messages. + Messages [][]string `json:"messages" validate:"required"` + + // dns record list. + Result []DnsrecordDetails `json:"result" validate:"required"` + + // result information. + ResultInfo *ResultInfo `json:"result_info" validate:"required"` +} + + +// UnmarshalListDnsrecordsResp unmarshals an instance of ListDnsrecordsResp from the specified map of raw messages. +func UnmarshalListDnsrecordsResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ListDnsrecordsResp) + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalDnsrecordDetails) + if err != nil { + return + } + err = core.UnmarshalModel(m, "result_info", &obj.ResultInfo, UnmarshalResultInfo) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ResultInfo : result information. +type ResultInfo struct { + // page. + Page *int64 `json:"page" validate:"required"` + + // per page. + PerPage *int64 `json:"per_page" validate:"required"` + + // count. + Count *int64 `json:"count" validate:"required"` + + // total count. + TotalCount *int64 `json:"total_count" validate:"required"` +} + + +// UnmarshalResultInfo unmarshals an instance of ResultInfo from the specified map of raw messages. +func UnmarshalResultInfo(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ResultInfo) + err = core.UnmarshalPrimitive(m, "page", &obj.Page) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "per_page", &obj.PerPage) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "count", &obj.Count) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "total_count", &obj.TotalCount) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} diff --git a/vendor/github.com/IBM/networking-go-sdk/dnssvcsv1/dns_svcs_v1.go b/vendor/github.com/IBM/networking-go-sdk/dnssvcsv1/dns_svcs_v1.go new file mode 100644 index 00000000000..dfe177706cf --- /dev/null +++ b/vendor/github.com/IBM/networking-go-sdk/dnssvcsv1/dns_svcs_v1.go @@ -0,0 +1,6335 @@ +/** + * (C) Copyright IBM Corp. 2020. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * IBM OpenAPI SDK Code Generator Version: 3.20.0-debb9f29-20201203-202043 + */ + + +// Package dnssvcsv1 : Operations and models for the DnsSvcsV1 service +package dnssvcsv1 + +import ( + "context" + "encoding/json" + "fmt" + "github.com/IBM/go-sdk-core/v4/core" + common "github.com/IBM/networking-go-sdk/common" + "net/http" + "reflect" + "time" +) + +// DnsSvcsV1 : DNS Services API +// +// Version: 1.0.0 +type DnsSvcsV1 struct { + Service *core.BaseService +} + +// DefaultServiceURL is the default URL to make service requests to. +const DefaultServiceURL = "https://api.dns-svcs.cloud.ibm.com/v1" + +// DefaultServiceName is the default key used to find external configuration information. +const DefaultServiceName = "dns_svcs" + +// DnsSvcsV1Options : Service options +type DnsSvcsV1Options struct { + ServiceName string + URL string + Authenticator core.Authenticator +} + +// NewDnsSvcsV1UsingExternalConfig : constructs an instance of DnsSvcsV1 with passed in options and external configuration. +func NewDnsSvcsV1UsingExternalConfig(options *DnsSvcsV1Options) (dnsSvcs *DnsSvcsV1, err error) { + if options.ServiceName == "" { + options.ServiceName = DefaultServiceName + } + + if options.Authenticator == nil { + options.Authenticator, err = core.GetAuthenticatorFromEnvironment(options.ServiceName) + if err != nil { + return + } + } + + dnsSvcs, err = NewDnsSvcsV1(options) + if err != nil { + return + } + + err = dnsSvcs.Service.ConfigureService(options.ServiceName) + if err != nil { + return + } + + if options.URL != "" { + err = dnsSvcs.Service.SetServiceURL(options.URL) + } + return +} + +// NewDnsSvcsV1 : constructs an instance of DnsSvcsV1 with passed in options. +func NewDnsSvcsV1(options *DnsSvcsV1Options) (service *DnsSvcsV1, err error) { + serviceOptions := &core.ServiceOptions{ + URL: DefaultServiceURL, + Authenticator: options.Authenticator, + } + + baseService, err := core.NewBaseService(serviceOptions) + if err != nil { + return + } + + if options.URL != "" { + err = baseService.SetServiceURL(options.URL) + if err != nil { + return + } + } + + service = &DnsSvcsV1{ + Service: baseService, + } + + return +} + +// GetServiceURLForRegion returns the service URL to be used for the specified region +func GetServiceURLForRegion(region string) (string, error) { + return "", fmt.Errorf("service does not support regional URLs") +} + +// Clone makes a copy of "dnsSvcs" suitable for processing requests. +func (dnsSvcs *DnsSvcsV1) Clone() *DnsSvcsV1 { + if core.IsNil(dnsSvcs) { + return nil + } + clone := *dnsSvcs + clone.Service = dnsSvcs.Service.Clone() + return &clone +} + +// SetServiceURL sets the service URL +func (dnsSvcs *DnsSvcsV1) SetServiceURL(url string) error { + return dnsSvcs.Service.SetServiceURL(url) +} + +// GetServiceURL returns the service URL +func (dnsSvcs *DnsSvcsV1) GetServiceURL() string { + return dnsSvcs.Service.GetServiceURL() +} + +// SetDefaultHeaders sets HTTP headers to be sent in every request +func (dnsSvcs *DnsSvcsV1) SetDefaultHeaders(headers http.Header) { + dnsSvcs.Service.SetDefaultHeaders(headers) +} + +// SetEnableGzipCompression sets the service's EnableGzipCompression field +func (dnsSvcs *DnsSvcsV1) SetEnableGzipCompression(enableGzip bool) { + dnsSvcs.Service.SetEnableGzipCompression(enableGzip) +} + +// GetEnableGzipCompression returns the service's EnableGzipCompression field +func (dnsSvcs *DnsSvcsV1) GetEnableGzipCompression() bool { + return dnsSvcs.Service.GetEnableGzipCompression() +} + +// EnableRetries enables automatic retries for requests invoked for this service instance. +// If either parameter is specified as 0, then a default value is used instead. +func (dnsSvcs *DnsSvcsV1) EnableRetries(maxRetries int, maxRetryInterval time.Duration) { + dnsSvcs.Service.EnableRetries(maxRetries, maxRetryInterval) +} + +// DisableRetries disables automatic retries for requests invoked for this service instance. +func (dnsSvcs *DnsSvcsV1) DisableRetries() { + dnsSvcs.Service.DisableRetries() +} + +// ListDnszones : List DNS zones +// List the DNS zones for a given service instance. +func (dnsSvcs *DnsSvcsV1) ListDnszones(listDnszonesOptions *ListDnszonesOptions) (result *ListDnszones, response *core.DetailedResponse, err error) { + return dnsSvcs.ListDnszonesWithContext(context.Background(), listDnszonesOptions) +} + +// ListDnszonesWithContext is an alternate form of the ListDnszones method which supports a Context parameter +func (dnsSvcs *DnsSvcsV1) ListDnszonesWithContext(ctx context.Context, listDnszonesOptions *ListDnszonesOptions) (result *ListDnszones, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(listDnszonesOptions, "listDnszonesOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(listDnszonesOptions, "listDnszonesOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "instance_id": *listDnszonesOptions.InstanceID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = dnsSvcs.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(dnsSvcs.Service.Options.URL, `/instances/{instance_id}/dnszones`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range listDnszonesOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("dns_svcs", "V1", "ListDnszones") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + if listDnszonesOptions.XCorrelationID != nil { + builder.AddHeader("X-Correlation-ID", fmt.Sprint(*listDnszonesOptions.XCorrelationID)) + } + + if listDnszonesOptions.Offset != nil { + builder.AddQuery("offset", fmt.Sprint(*listDnszonesOptions.Offset)) + } + if listDnszonesOptions.Limit != nil { + builder.AddQuery("limit", fmt.Sprint(*listDnszonesOptions.Limit)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = dnsSvcs.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalListDnszones) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateDnszone : Create a DNS zone +// Create a DNS zone for a given service instance. +func (dnsSvcs *DnsSvcsV1) CreateDnszone(createDnszoneOptions *CreateDnszoneOptions) (result *Dnszone, response *core.DetailedResponse, err error) { + return dnsSvcs.CreateDnszoneWithContext(context.Background(), createDnszoneOptions) +} + +// CreateDnszoneWithContext is an alternate form of the CreateDnszone method which supports a Context parameter +func (dnsSvcs *DnsSvcsV1) CreateDnszoneWithContext(ctx context.Context, createDnszoneOptions *CreateDnszoneOptions) (result *Dnszone, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createDnszoneOptions, "createDnszoneOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createDnszoneOptions, "createDnszoneOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "instance_id": *createDnszoneOptions.InstanceID, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = dnsSvcs.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(dnsSvcs.Service.Options.URL, `/instances/{instance_id}/dnszones`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range createDnszoneOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("dns_svcs", "V1", "CreateDnszone") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + if createDnszoneOptions.XCorrelationID != nil { + builder.AddHeader("X-Correlation-ID", fmt.Sprint(*createDnszoneOptions.XCorrelationID)) + } + + body := make(map[string]interface{}) + if createDnszoneOptions.Name != nil { + body["name"] = createDnszoneOptions.Name + } + if createDnszoneOptions.Description != nil { + body["description"] = createDnszoneOptions.Description + } + if createDnszoneOptions.Label != nil { + body["label"] = createDnszoneOptions.Label + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = dnsSvcs.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalDnszone) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteDnszone : Delete a DNS zone +// Delete a DNS zone. +func (dnsSvcs *DnsSvcsV1) DeleteDnszone(deleteDnszoneOptions *DeleteDnszoneOptions) (response *core.DetailedResponse, err error) { + return dnsSvcs.DeleteDnszoneWithContext(context.Background(), deleteDnszoneOptions) +} + +// DeleteDnszoneWithContext is an alternate form of the DeleteDnszone method which supports a Context parameter +func (dnsSvcs *DnsSvcsV1) DeleteDnszoneWithContext(ctx context.Context, deleteDnszoneOptions *DeleteDnszoneOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteDnszoneOptions, "deleteDnszoneOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteDnszoneOptions, "deleteDnszoneOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "instance_id": *deleteDnszoneOptions.InstanceID, + "dnszone_id": *deleteDnszoneOptions.DnszoneID, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = dnsSvcs.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(dnsSvcs.Service.Options.URL, `/instances/{instance_id}/dnszones/{dnszone_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteDnszoneOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("dns_svcs", "V1", "DeleteDnszone") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + if deleteDnszoneOptions.XCorrelationID != nil { + builder.AddHeader("X-Correlation-ID", fmt.Sprint(*deleteDnszoneOptions.XCorrelationID)) + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = dnsSvcs.Service.Request(request, nil) + + return +} + +// GetDnszone : Get a DNS zone +// Get details of a DNS zone. +func (dnsSvcs *DnsSvcsV1) GetDnszone(getDnszoneOptions *GetDnszoneOptions) (result *Dnszone, response *core.DetailedResponse, err error) { + return dnsSvcs.GetDnszoneWithContext(context.Background(), getDnszoneOptions) +} + +// GetDnszoneWithContext is an alternate form of the GetDnszone method which supports a Context parameter +func (dnsSvcs *DnsSvcsV1) GetDnszoneWithContext(ctx context.Context, getDnszoneOptions *GetDnszoneOptions) (result *Dnszone, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getDnszoneOptions, "getDnszoneOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getDnszoneOptions, "getDnszoneOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "instance_id": *getDnszoneOptions.InstanceID, + "dnszone_id": *getDnszoneOptions.DnszoneID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = dnsSvcs.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(dnsSvcs.Service.Options.URL, `/instances/{instance_id}/dnszones/{dnszone_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getDnszoneOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("dns_svcs", "V1", "GetDnszone") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + if getDnszoneOptions.XCorrelationID != nil { + builder.AddHeader("X-Correlation-ID", fmt.Sprint(*getDnszoneOptions.XCorrelationID)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = dnsSvcs.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalDnszone) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateDnszone : Update the properties of a DNS zone +// Update the properties of a DNS zone. +func (dnsSvcs *DnsSvcsV1) UpdateDnszone(updateDnszoneOptions *UpdateDnszoneOptions) (result *Dnszone, response *core.DetailedResponse, err error) { + return dnsSvcs.UpdateDnszoneWithContext(context.Background(), updateDnszoneOptions) +} + +// UpdateDnszoneWithContext is an alternate form of the UpdateDnszone method which supports a Context parameter +func (dnsSvcs *DnsSvcsV1) UpdateDnszoneWithContext(ctx context.Context, updateDnszoneOptions *UpdateDnszoneOptions) (result *Dnszone, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateDnszoneOptions, "updateDnszoneOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateDnszoneOptions, "updateDnszoneOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "instance_id": *updateDnszoneOptions.InstanceID, + "dnszone_id": *updateDnszoneOptions.DnszoneID, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = dnsSvcs.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(dnsSvcs.Service.Options.URL, `/instances/{instance_id}/dnszones/{dnszone_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateDnszoneOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("dns_svcs", "V1", "UpdateDnszone") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + if updateDnszoneOptions.XCorrelationID != nil { + builder.AddHeader("X-Correlation-ID", fmt.Sprint(*updateDnszoneOptions.XCorrelationID)) + } + + body := make(map[string]interface{}) + if updateDnszoneOptions.Description != nil { + body["description"] = updateDnszoneOptions.Description + } + if updateDnszoneOptions.Label != nil { + body["label"] = updateDnszoneOptions.Label + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = dnsSvcs.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalDnszone) + if err != nil { + return + } + response.Result = result + + return +} + +// ListResourceRecords : List Resource Records +// List the Resource Records for a given DNS zone. +func (dnsSvcs *DnsSvcsV1) ListResourceRecords(listResourceRecordsOptions *ListResourceRecordsOptions) (result *ListResourceRecords, response *core.DetailedResponse, err error) { + return dnsSvcs.ListResourceRecordsWithContext(context.Background(), listResourceRecordsOptions) +} + +// ListResourceRecordsWithContext is an alternate form of the ListResourceRecords method which supports a Context parameter +func (dnsSvcs *DnsSvcsV1) ListResourceRecordsWithContext(ctx context.Context, listResourceRecordsOptions *ListResourceRecordsOptions) (result *ListResourceRecords, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(listResourceRecordsOptions, "listResourceRecordsOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(listResourceRecordsOptions, "listResourceRecordsOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "instance_id": *listResourceRecordsOptions.InstanceID, + "dnszone_id": *listResourceRecordsOptions.DnszoneID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = dnsSvcs.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(dnsSvcs.Service.Options.URL, `/instances/{instance_id}/dnszones/{dnszone_id}/resource_records`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range listResourceRecordsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("dns_svcs", "V1", "ListResourceRecords") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + if listResourceRecordsOptions.XCorrelationID != nil { + builder.AddHeader("X-Correlation-ID", fmt.Sprint(*listResourceRecordsOptions.XCorrelationID)) + } + + if listResourceRecordsOptions.Offset != nil { + builder.AddQuery("offset", fmt.Sprint(*listResourceRecordsOptions.Offset)) + } + if listResourceRecordsOptions.Limit != nil { + builder.AddQuery("limit", fmt.Sprint(*listResourceRecordsOptions.Limit)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = dnsSvcs.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalListResourceRecords) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateResourceRecord : Create a resource record +// Create a resource record for a given DNS zone. +func (dnsSvcs *DnsSvcsV1) CreateResourceRecord(createResourceRecordOptions *CreateResourceRecordOptions) (result *ResourceRecord, response *core.DetailedResponse, err error) { + return dnsSvcs.CreateResourceRecordWithContext(context.Background(), createResourceRecordOptions) +} + +// CreateResourceRecordWithContext is an alternate form of the CreateResourceRecord method which supports a Context parameter +func (dnsSvcs *DnsSvcsV1) CreateResourceRecordWithContext(ctx context.Context, createResourceRecordOptions *CreateResourceRecordOptions) (result *ResourceRecord, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createResourceRecordOptions, "createResourceRecordOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createResourceRecordOptions, "createResourceRecordOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "instance_id": *createResourceRecordOptions.InstanceID, + "dnszone_id": *createResourceRecordOptions.DnszoneID, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = dnsSvcs.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(dnsSvcs.Service.Options.URL, `/instances/{instance_id}/dnszones/{dnszone_id}/resource_records`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range createResourceRecordOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("dns_svcs", "V1", "CreateResourceRecord") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + if createResourceRecordOptions.XCorrelationID != nil { + builder.AddHeader("X-Correlation-ID", fmt.Sprint(*createResourceRecordOptions.XCorrelationID)) + } + + body := make(map[string]interface{}) + if createResourceRecordOptions.Name != nil { + body["name"] = createResourceRecordOptions.Name + } + if createResourceRecordOptions.Type != nil { + body["type"] = createResourceRecordOptions.Type + } + if createResourceRecordOptions.Rdata != nil { + body["rdata"] = createResourceRecordOptions.Rdata + } + if createResourceRecordOptions.TTL != nil { + body["ttl"] = createResourceRecordOptions.TTL + } + if createResourceRecordOptions.Service != nil { + body["service"] = createResourceRecordOptions.Service + } + if createResourceRecordOptions.Protocol != nil { + body["protocol"] = createResourceRecordOptions.Protocol + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = dnsSvcs.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalResourceRecord) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteResourceRecord : Delete a resource record +// Delete a resource record. +func (dnsSvcs *DnsSvcsV1) DeleteResourceRecord(deleteResourceRecordOptions *DeleteResourceRecordOptions) (response *core.DetailedResponse, err error) { + return dnsSvcs.DeleteResourceRecordWithContext(context.Background(), deleteResourceRecordOptions) +} + +// DeleteResourceRecordWithContext is an alternate form of the DeleteResourceRecord method which supports a Context parameter +func (dnsSvcs *DnsSvcsV1) DeleteResourceRecordWithContext(ctx context.Context, deleteResourceRecordOptions *DeleteResourceRecordOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteResourceRecordOptions, "deleteResourceRecordOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteResourceRecordOptions, "deleteResourceRecordOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "instance_id": *deleteResourceRecordOptions.InstanceID, + "dnszone_id": *deleteResourceRecordOptions.DnszoneID, + "record_id": *deleteResourceRecordOptions.RecordID, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = dnsSvcs.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(dnsSvcs.Service.Options.URL, `/instances/{instance_id}/dnszones/{dnszone_id}/resource_records/{record_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteResourceRecordOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("dns_svcs", "V1", "DeleteResourceRecord") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + if deleteResourceRecordOptions.XCorrelationID != nil { + builder.AddHeader("X-Correlation-ID", fmt.Sprint(*deleteResourceRecordOptions.XCorrelationID)) + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = dnsSvcs.Service.Request(request, nil) + + return +} + +// GetResourceRecord : Get a resource record +// Get details of a resource record. +func (dnsSvcs *DnsSvcsV1) GetResourceRecord(getResourceRecordOptions *GetResourceRecordOptions) (result *ResourceRecord, response *core.DetailedResponse, err error) { + return dnsSvcs.GetResourceRecordWithContext(context.Background(), getResourceRecordOptions) +} + +// GetResourceRecordWithContext is an alternate form of the GetResourceRecord method which supports a Context parameter +func (dnsSvcs *DnsSvcsV1) GetResourceRecordWithContext(ctx context.Context, getResourceRecordOptions *GetResourceRecordOptions) (result *ResourceRecord, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getResourceRecordOptions, "getResourceRecordOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getResourceRecordOptions, "getResourceRecordOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "instance_id": *getResourceRecordOptions.InstanceID, + "dnszone_id": *getResourceRecordOptions.DnszoneID, + "record_id": *getResourceRecordOptions.RecordID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = dnsSvcs.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(dnsSvcs.Service.Options.URL, `/instances/{instance_id}/dnszones/{dnszone_id}/resource_records/{record_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getResourceRecordOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("dns_svcs", "V1", "GetResourceRecord") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + if getResourceRecordOptions.XCorrelationID != nil { + builder.AddHeader("X-Correlation-ID", fmt.Sprint(*getResourceRecordOptions.XCorrelationID)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = dnsSvcs.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalResourceRecord) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateResourceRecord : Update the properties of a resource record +// Update the properties of a resource record. +func (dnsSvcs *DnsSvcsV1) UpdateResourceRecord(updateResourceRecordOptions *UpdateResourceRecordOptions) (result *ResourceRecord, response *core.DetailedResponse, err error) { + return dnsSvcs.UpdateResourceRecordWithContext(context.Background(), updateResourceRecordOptions) +} + +// UpdateResourceRecordWithContext is an alternate form of the UpdateResourceRecord method which supports a Context parameter +func (dnsSvcs *DnsSvcsV1) UpdateResourceRecordWithContext(ctx context.Context, updateResourceRecordOptions *UpdateResourceRecordOptions) (result *ResourceRecord, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateResourceRecordOptions, "updateResourceRecordOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateResourceRecordOptions, "updateResourceRecordOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "instance_id": *updateResourceRecordOptions.InstanceID, + "dnszone_id": *updateResourceRecordOptions.DnszoneID, + "record_id": *updateResourceRecordOptions.RecordID, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = dnsSvcs.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(dnsSvcs.Service.Options.URL, `/instances/{instance_id}/dnszones/{dnszone_id}/resource_records/{record_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateResourceRecordOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("dns_svcs", "V1", "UpdateResourceRecord") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + if updateResourceRecordOptions.XCorrelationID != nil { + builder.AddHeader("X-Correlation-ID", fmt.Sprint(*updateResourceRecordOptions.XCorrelationID)) + } + + body := make(map[string]interface{}) + if updateResourceRecordOptions.Name != nil { + body["name"] = updateResourceRecordOptions.Name + } + if updateResourceRecordOptions.Rdata != nil { + body["rdata"] = updateResourceRecordOptions.Rdata + } + if updateResourceRecordOptions.TTL != nil { + body["ttl"] = updateResourceRecordOptions.TTL + } + if updateResourceRecordOptions.Service != nil { + body["service"] = updateResourceRecordOptions.Service + } + if updateResourceRecordOptions.Protocol != nil { + body["protocol"] = updateResourceRecordOptions.Protocol + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = dnsSvcs.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalResourceRecord) + if err != nil { + return + } + response.Result = result + + return +} + +// ListPermittedNetworks : List permitted networks +// List the permitted networks for a given DNS zone. +func (dnsSvcs *DnsSvcsV1) ListPermittedNetworks(listPermittedNetworksOptions *ListPermittedNetworksOptions) (result *ListPermittedNetworks, response *core.DetailedResponse, err error) { + return dnsSvcs.ListPermittedNetworksWithContext(context.Background(), listPermittedNetworksOptions) +} + +// ListPermittedNetworksWithContext is an alternate form of the ListPermittedNetworks method which supports a Context parameter +func (dnsSvcs *DnsSvcsV1) ListPermittedNetworksWithContext(ctx context.Context, listPermittedNetworksOptions *ListPermittedNetworksOptions) (result *ListPermittedNetworks, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(listPermittedNetworksOptions, "listPermittedNetworksOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(listPermittedNetworksOptions, "listPermittedNetworksOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "instance_id": *listPermittedNetworksOptions.InstanceID, + "dnszone_id": *listPermittedNetworksOptions.DnszoneID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = dnsSvcs.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(dnsSvcs.Service.Options.URL, `/instances/{instance_id}/dnszones/{dnszone_id}/permitted_networks`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range listPermittedNetworksOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("dns_svcs", "V1", "ListPermittedNetworks") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + if listPermittedNetworksOptions.XCorrelationID != nil { + builder.AddHeader("X-Correlation-ID", fmt.Sprint(*listPermittedNetworksOptions.XCorrelationID)) + } + + if listPermittedNetworksOptions.Offset != nil { + builder.AddQuery("offset", fmt.Sprint(*listPermittedNetworksOptions.Offset)) + } + if listPermittedNetworksOptions.Limit != nil { + builder.AddQuery("limit", fmt.Sprint(*listPermittedNetworksOptions.Limit)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = dnsSvcs.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalListPermittedNetworks) + if err != nil { + return + } + response.Result = result + + return +} + +// CreatePermittedNetwork : Create a permitted network +// Create a permitted network for a given DNS zone. +func (dnsSvcs *DnsSvcsV1) CreatePermittedNetwork(createPermittedNetworkOptions *CreatePermittedNetworkOptions) (result *PermittedNetwork, response *core.DetailedResponse, err error) { + return dnsSvcs.CreatePermittedNetworkWithContext(context.Background(), createPermittedNetworkOptions) +} + +// CreatePermittedNetworkWithContext is an alternate form of the CreatePermittedNetwork method which supports a Context parameter +func (dnsSvcs *DnsSvcsV1) CreatePermittedNetworkWithContext(ctx context.Context, createPermittedNetworkOptions *CreatePermittedNetworkOptions) (result *PermittedNetwork, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createPermittedNetworkOptions, "createPermittedNetworkOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createPermittedNetworkOptions, "createPermittedNetworkOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "instance_id": *createPermittedNetworkOptions.InstanceID, + "dnszone_id": *createPermittedNetworkOptions.DnszoneID, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = dnsSvcs.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(dnsSvcs.Service.Options.URL, `/instances/{instance_id}/dnszones/{dnszone_id}/permitted_networks`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range createPermittedNetworkOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("dns_svcs", "V1", "CreatePermittedNetwork") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + if createPermittedNetworkOptions.XCorrelationID != nil { + builder.AddHeader("X-Correlation-ID", fmt.Sprint(*createPermittedNetworkOptions.XCorrelationID)) + } + + body := make(map[string]interface{}) + if createPermittedNetworkOptions.Type != nil { + body["type"] = createPermittedNetworkOptions.Type + } + if createPermittedNetworkOptions.PermittedNetwork != nil { + body["permitted_network"] = createPermittedNetworkOptions.PermittedNetwork + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = dnsSvcs.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalPermittedNetwork) + if err != nil { + return + } + response.Result = result + + return +} + +// DeletePermittedNetwork : Remove a permitted network +// Remove a permitted network. +func (dnsSvcs *DnsSvcsV1) DeletePermittedNetwork(deletePermittedNetworkOptions *DeletePermittedNetworkOptions) (result *PermittedNetwork, response *core.DetailedResponse, err error) { + return dnsSvcs.DeletePermittedNetworkWithContext(context.Background(), deletePermittedNetworkOptions) +} + +// DeletePermittedNetworkWithContext is an alternate form of the DeletePermittedNetwork method which supports a Context parameter +func (dnsSvcs *DnsSvcsV1) DeletePermittedNetworkWithContext(ctx context.Context, deletePermittedNetworkOptions *DeletePermittedNetworkOptions) (result *PermittedNetwork, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deletePermittedNetworkOptions, "deletePermittedNetworkOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deletePermittedNetworkOptions, "deletePermittedNetworkOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "instance_id": *deletePermittedNetworkOptions.InstanceID, + "dnszone_id": *deletePermittedNetworkOptions.DnszoneID, + "permitted_network_id": *deletePermittedNetworkOptions.PermittedNetworkID, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = dnsSvcs.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(dnsSvcs.Service.Options.URL, `/instances/{instance_id}/dnszones/{dnszone_id}/permitted_networks/{permitted_network_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deletePermittedNetworkOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("dns_svcs", "V1", "DeletePermittedNetwork") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + if deletePermittedNetworkOptions.XCorrelationID != nil { + builder.AddHeader("X-Correlation-ID", fmt.Sprint(*deletePermittedNetworkOptions.XCorrelationID)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = dnsSvcs.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalPermittedNetwork) + if err != nil { + return + } + response.Result = result + + return +} + +// GetPermittedNetwork : Get a permitted network +// Get details of a permitted network. +func (dnsSvcs *DnsSvcsV1) GetPermittedNetwork(getPermittedNetworkOptions *GetPermittedNetworkOptions) (result *PermittedNetwork, response *core.DetailedResponse, err error) { + return dnsSvcs.GetPermittedNetworkWithContext(context.Background(), getPermittedNetworkOptions) +} + +// GetPermittedNetworkWithContext is an alternate form of the GetPermittedNetwork method which supports a Context parameter +func (dnsSvcs *DnsSvcsV1) GetPermittedNetworkWithContext(ctx context.Context, getPermittedNetworkOptions *GetPermittedNetworkOptions) (result *PermittedNetwork, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getPermittedNetworkOptions, "getPermittedNetworkOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getPermittedNetworkOptions, "getPermittedNetworkOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "instance_id": *getPermittedNetworkOptions.InstanceID, + "dnszone_id": *getPermittedNetworkOptions.DnszoneID, + "permitted_network_id": *getPermittedNetworkOptions.PermittedNetworkID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = dnsSvcs.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(dnsSvcs.Service.Options.URL, `/instances/{instance_id}/dnszones/{dnszone_id}/permitted_networks/{permitted_network_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getPermittedNetworkOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("dns_svcs", "V1", "GetPermittedNetwork") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + if getPermittedNetworkOptions.XCorrelationID != nil { + builder.AddHeader("X-Correlation-ID", fmt.Sprint(*getPermittedNetworkOptions.XCorrelationID)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = dnsSvcs.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalPermittedNetwork) + if err != nil { + return + } + response.Result = result + + return +} + +// ListLoadBalancers : List load balancers +// List the Global Load Balancers for a given DNS zone. +func (dnsSvcs *DnsSvcsV1) ListLoadBalancers(listLoadBalancersOptions *ListLoadBalancersOptions) (result *ListLoadBalancers, response *core.DetailedResponse, err error) { + return dnsSvcs.ListLoadBalancersWithContext(context.Background(), listLoadBalancersOptions) +} + +// ListLoadBalancersWithContext is an alternate form of the ListLoadBalancers method which supports a Context parameter +func (dnsSvcs *DnsSvcsV1) ListLoadBalancersWithContext(ctx context.Context, listLoadBalancersOptions *ListLoadBalancersOptions) (result *ListLoadBalancers, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(listLoadBalancersOptions, "listLoadBalancersOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(listLoadBalancersOptions, "listLoadBalancersOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "instance_id": *listLoadBalancersOptions.InstanceID, + "dnszone_id": *listLoadBalancersOptions.DnszoneID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = dnsSvcs.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(dnsSvcs.Service.Options.URL, `/instances/{instance_id}/dnszones/{dnszone_id}/load_balancers`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range listLoadBalancersOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("dns_svcs", "V1", "ListLoadBalancers") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + if listLoadBalancersOptions.XCorrelationID != nil { + builder.AddHeader("X-Correlation-ID", fmt.Sprint(*listLoadBalancersOptions.XCorrelationID)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = dnsSvcs.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalListLoadBalancers) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateLoadBalancer : Create a load balancer +// Create a load balancer for a given DNS zone. +func (dnsSvcs *DnsSvcsV1) CreateLoadBalancer(createLoadBalancerOptions *CreateLoadBalancerOptions) (result *LoadBalancer, response *core.DetailedResponse, err error) { + return dnsSvcs.CreateLoadBalancerWithContext(context.Background(), createLoadBalancerOptions) +} + +// CreateLoadBalancerWithContext is an alternate form of the CreateLoadBalancer method which supports a Context parameter +func (dnsSvcs *DnsSvcsV1) CreateLoadBalancerWithContext(ctx context.Context, createLoadBalancerOptions *CreateLoadBalancerOptions) (result *LoadBalancer, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createLoadBalancerOptions, "createLoadBalancerOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createLoadBalancerOptions, "createLoadBalancerOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "instance_id": *createLoadBalancerOptions.InstanceID, + "dnszone_id": *createLoadBalancerOptions.DnszoneID, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = dnsSvcs.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(dnsSvcs.Service.Options.URL, `/instances/{instance_id}/dnszones/{dnszone_id}/load_balancers`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range createLoadBalancerOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("dns_svcs", "V1", "CreateLoadBalancer") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + if createLoadBalancerOptions.XCorrelationID != nil { + builder.AddHeader("X-Correlation-ID", fmt.Sprint(*createLoadBalancerOptions.XCorrelationID)) + } + + body := make(map[string]interface{}) + if createLoadBalancerOptions.Name != nil { + body["name"] = createLoadBalancerOptions.Name + } + if createLoadBalancerOptions.Description != nil { + body["description"] = createLoadBalancerOptions.Description + } + if createLoadBalancerOptions.Enabled != nil { + body["enabled"] = createLoadBalancerOptions.Enabled + } + if createLoadBalancerOptions.TTL != nil { + body["ttl"] = createLoadBalancerOptions.TTL + } + if createLoadBalancerOptions.FallbackPool != nil { + body["fallback_pool"] = createLoadBalancerOptions.FallbackPool + } + if createLoadBalancerOptions.DefaultPools != nil { + body["default_pools"] = createLoadBalancerOptions.DefaultPools + } + if createLoadBalancerOptions.AzPools != nil { + body["az_pools"] = createLoadBalancerOptions.AzPools + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = dnsSvcs.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancer) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteLoadBalancer : Delete a load balancer +// Delete a load balancer. +func (dnsSvcs *DnsSvcsV1) DeleteLoadBalancer(deleteLoadBalancerOptions *DeleteLoadBalancerOptions) (response *core.DetailedResponse, err error) { + return dnsSvcs.DeleteLoadBalancerWithContext(context.Background(), deleteLoadBalancerOptions) +} + +// DeleteLoadBalancerWithContext is an alternate form of the DeleteLoadBalancer method which supports a Context parameter +func (dnsSvcs *DnsSvcsV1) DeleteLoadBalancerWithContext(ctx context.Context, deleteLoadBalancerOptions *DeleteLoadBalancerOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteLoadBalancerOptions, "deleteLoadBalancerOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteLoadBalancerOptions, "deleteLoadBalancerOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "instance_id": *deleteLoadBalancerOptions.InstanceID, + "dnszone_id": *deleteLoadBalancerOptions.DnszoneID, + "lb_id": *deleteLoadBalancerOptions.LbID, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = dnsSvcs.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(dnsSvcs.Service.Options.URL, `/instances/{instance_id}/dnszones/{dnszone_id}/load_balancers/{lb_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteLoadBalancerOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("dns_svcs", "V1", "DeleteLoadBalancer") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + if deleteLoadBalancerOptions.XCorrelationID != nil { + builder.AddHeader("X-Correlation-ID", fmt.Sprint(*deleteLoadBalancerOptions.XCorrelationID)) + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = dnsSvcs.Service.Request(request, nil) + + return +} + +// GetLoadBalancer : Get a load balancer +// Get details of a load balancer. +func (dnsSvcs *DnsSvcsV1) GetLoadBalancer(getLoadBalancerOptions *GetLoadBalancerOptions) (result *LoadBalancer, response *core.DetailedResponse, err error) { + return dnsSvcs.GetLoadBalancerWithContext(context.Background(), getLoadBalancerOptions) +} + +// GetLoadBalancerWithContext is an alternate form of the GetLoadBalancer method which supports a Context parameter +func (dnsSvcs *DnsSvcsV1) GetLoadBalancerWithContext(ctx context.Context, getLoadBalancerOptions *GetLoadBalancerOptions) (result *LoadBalancer, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getLoadBalancerOptions, "getLoadBalancerOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getLoadBalancerOptions, "getLoadBalancerOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "instance_id": *getLoadBalancerOptions.InstanceID, + "dnszone_id": *getLoadBalancerOptions.DnszoneID, + "lb_id": *getLoadBalancerOptions.LbID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = dnsSvcs.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(dnsSvcs.Service.Options.URL, `/instances/{instance_id}/dnszones/{dnszone_id}/load_balancers/{lb_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getLoadBalancerOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("dns_svcs", "V1", "GetLoadBalancer") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + if getLoadBalancerOptions.XCorrelationID != nil { + builder.AddHeader("X-Correlation-ID", fmt.Sprint(*getLoadBalancerOptions.XCorrelationID)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = dnsSvcs.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancer) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateLoadBalancer : Update the properties of a load balancer +// Update the properties of a load balancer. +func (dnsSvcs *DnsSvcsV1) UpdateLoadBalancer(updateLoadBalancerOptions *UpdateLoadBalancerOptions) (result *LoadBalancer, response *core.DetailedResponse, err error) { + return dnsSvcs.UpdateLoadBalancerWithContext(context.Background(), updateLoadBalancerOptions) +} + +// UpdateLoadBalancerWithContext is an alternate form of the UpdateLoadBalancer method which supports a Context parameter +func (dnsSvcs *DnsSvcsV1) UpdateLoadBalancerWithContext(ctx context.Context, updateLoadBalancerOptions *UpdateLoadBalancerOptions) (result *LoadBalancer, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateLoadBalancerOptions, "updateLoadBalancerOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateLoadBalancerOptions, "updateLoadBalancerOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "instance_id": *updateLoadBalancerOptions.InstanceID, + "dnszone_id": *updateLoadBalancerOptions.DnszoneID, + "lb_id": *updateLoadBalancerOptions.LbID, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = dnsSvcs.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(dnsSvcs.Service.Options.URL, `/instances/{instance_id}/dnszones/{dnszone_id}/load_balancers/{lb_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateLoadBalancerOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("dns_svcs", "V1", "UpdateLoadBalancer") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + if updateLoadBalancerOptions.XCorrelationID != nil { + builder.AddHeader("X-Correlation-ID", fmt.Sprint(*updateLoadBalancerOptions.XCorrelationID)) + } + + body := make(map[string]interface{}) + if updateLoadBalancerOptions.Name != nil { + body["name"] = updateLoadBalancerOptions.Name + } + if updateLoadBalancerOptions.Description != nil { + body["description"] = updateLoadBalancerOptions.Description + } + if updateLoadBalancerOptions.Enabled != nil { + body["enabled"] = updateLoadBalancerOptions.Enabled + } + if updateLoadBalancerOptions.TTL != nil { + body["ttl"] = updateLoadBalancerOptions.TTL + } + if updateLoadBalancerOptions.FallbackPool != nil { + body["fallback_pool"] = updateLoadBalancerOptions.FallbackPool + } + if updateLoadBalancerOptions.DefaultPools != nil { + body["default_pools"] = updateLoadBalancerOptions.DefaultPools + } + if updateLoadBalancerOptions.AzPools != nil { + body["az_pools"] = updateLoadBalancerOptions.AzPools + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = dnsSvcs.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancer) + if err != nil { + return + } + response.Result = result + + return +} + +// ListPools : List load balancer pools +// List the load balancer pools. +func (dnsSvcs *DnsSvcsV1) ListPools(listPoolsOptions *ListPoolsOptions) (result *ListPools, response *core.DetailedResponse, err error) { + return dnsSvcs.ListPoolsWithContext(context.Background(), listPoolsOptions) +} + +// ListPoolsWithContext is an alternate form of the ListPools method which supports a Context parameter +func (dnsSvcs *DnsSvcsV1) ListPoolsWithContext(ctx context.Context, listPoolsOptions *ListPoolsOptions) (result *ListPools, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(listPoolsOptions, "listPoolsOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(listPoolsOptions, "listPoolsOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "instance_id": *listPoolsOptions.InstanceID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = dnsSvcs.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(dnsSvcs.Service.Options.URL, `/instances/{instance_id}/pools`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range listPoolsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("dns_svcs", "V1", "ListPools") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + if listPoolsOptions.XCorrelationID != nil { + builder.AddHeader("X-Correlation-ID", fmt.Sprint(*listPoolsOptions.XCorrelationID)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = dnsSvcs.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalListPools) + if err != nil { + return + } + response.Result = result + + return +} + +// CreatePool : Create a load balancer pool +// Create a load balancer pool. +func (dnsSvcs *DnsSvcsV1) CreatePool(createPoolOptions *CreatePoolOptions) (result *Pool, response *core.DetailedResponse, err error) { + return dnsSvcs.CreatePoolWithContext(context.Background(), createPoolOptions) +} + +// CreatePoolWithContext is an alternate form of the CreatePool method which supports a Context parameter +func (dnsSvcs *DnsSvcsV1) CreatePoolWithContext(ctx context.Context, createPoolOptions *CreatePoolOptions) (result *Pool, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createPoolOptions, "createPoolOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createPoolOptions, "createPoolOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "instance_id": *createPoolOptions.InstanceID, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = dnsSvcs.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(dnsSvcs.Service.Options.URL, `/instances/{instance_id}/pools`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range createPoolOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("dns_svcs", "V1", "CreatePool") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + if createPoolOptions.XCorrelationID != nil { + builder.AddHeader("X-Correlation-ID", fmt.Sprint(*createPoolOptions.XCorrelationID)) + } + + body := make(map[string]interface{}) + if createPoolOptions.Name != nil { + body["name"] = createPoolOptions.Name + } + if createPoolOptions.Description != nil { + body["description"] = createPoolOptions.Description + } + if createPoolOptions.Enabled != nil { + body["enabled"] = createPoolOptions.Enabled + } + if createPoolOptions.HealthyOriginsThreshold != nil { + body["healthy_origins_threshold"] = createPoolOptions.HealthyOriginsThreshold + } + if createPoolOptions.Origins != nil { + body["origins"] = createPoolOptions.Origins + } + if createPoolOptions.Monitor != nil { + body["monitor"] = createPoolOptions.Monitor + } + if createPoolOptions.NotificationChannel != nil { + body["notification_channel"] = createPoolOptions.NotificationChannel + } + if createPoolOptions.HealthcheckRegion != nil { + body["healthcheck_region"] = createPoolOptions.HealthcheckRegion + } + if createPoolOptions.HealthcheckSubnets != nil { + body["healthcheck_subnets"] = createPoolOptions.HealthcheckSubnets + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = dnsSvcs.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalPool) + if err != nil { + return + } + response.Result = result + + return +} + +// DeletePool : Delete a load balancer pool +// Delete a load balancer pool. +func (dnsSvcs *DnsSvcsV1) DeletePool(deletePoolOptions *DeletePoolOptions) (response *core.DetailedResponse, err error) { + return dnsSvcs.DeletePoolWithContext(context.Background(), deletePoolOptions) +} + +// DeletePoolWithContext is an alternate form of the DeletePool method which supports a Context parameter +func (dnsSvcs *DnsSvcsV1) DeletePoolWithContext(ctx context.Context, deletePoolOptions *DeletePoolOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deletePoolOptions, "deletePoolOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deletePoolOptions, "deletePoolOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "instance_id": *deletePoolOptions.InstanceID, + "pool_id": *deletePoolOptions.PoolID, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = dnsSvcs.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(dnsSvcs.Service.Options.URL, `/instances/{instance_id}/pools/{pool_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deletePoolOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("dns_svcs", "V1", "DeletePool") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + if deletePoolOptions.XCorrelationID != nil { + builder.AddHeader("X-Correlation-ID", fmt.Sprint(*deletePoolOptions.XCorrelationID)) + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = dnsSvcs.Service.Request(request, nil) + + return +} + +// GetPool : Get a load balancer pool +// Get details of a load balancer pool. +func (dnsSvcs *DnsSvcsV1) GetPool(getPoolOptions *GetPoolOptions) (result *Pool, response *core.DetailedResponse, err error) { + return dnsSvcs.GetPoolWithContext(context.Background(), getPoolOptions) +} + +// GetPoolWithContext is an alternate form of the GetPool method which supports a Context parameter +func (dnsSvcs *DnsSvcsV1) GetPoolWithContext(ctx context.Context, getPoolOptions *GetPoolOptions) (result *Pool, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getPoolOptions, "getPoolOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getPoolOptions, "getPoolOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "instance_id": *getPoolOptions.InstanceID, + "pool_id": *getPoolOptions.PoolID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = dnsSvcs.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(dnsSvcs.Service.Options.URL, `/instances/{instance_id}/pools/{pool_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getPoolOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("dns_svcs", "V1", "GetPool") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + if getPoolOptions.XCorrelationID != nil { + builder.AddHeader("X-Correlation-ID", fmt.Sprint(*getPoolOptions.XCorrelationID)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = dnsSvcs.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalPool) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdatePool : Update the properties of a load balancer pool +// Update the properties of a load balancer pool. +func (dnsSvcs *DnsSvcsV1) UpdatePool(updatePoolOptions *UpdatePoolOptions) (result *Pool, response *core.DetailedResponse, err error) { + return dnsSvcs.UpdatePoolWithContext(context.Background(), updatePoolOptions) +} + +// UpdatePoolWithContext is an alternate form of the UpdatePool method which supports a Context parameter +func (dnsSvcs *DnsSvcsV1) UpdatePoolWithContext(ctx context.Context, updatePoolOptions *UpdatePoolOptions) (result *Pool, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updatePoolOptions, "updatePoolOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updatePoolOptions, "updatePoolOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "instance_id": *updatePoolOptions.InstanceID, + "pool_id": *updatePoolOptions.PoolID, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = dnsSvcs.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(dnsSvcs.Service.Options.URL, `/instances/{instance_id}/pools/{pool_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updatePoolOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("dns_svcs", "V1", "UpdatePool") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + if updatePoolOptions.XCorrelationID != nil { + builder.AddHeader("X-Correlation-ID", fmt.Sprint(*updatePoolOptions.XCorrelationID)) + } + + body := make(map[string]interface{}) + if updatePoolOptions.Name != nil { + body["name"] = updatePoolOptions.Name + } + if updatePoolOptions.Description != nil { + body["description"] = updatePoolOptions.Description + } + if updatePoolOptions.Enabled != nil { + body["enabled"] = updatePoolOptions.Enabled + } + if updatePoolOptions.HealthyOriginsThreshold != nil { + body["healthy_origins_threshold"] = updatePoolOptions.HealthyOriginsThreshold + } + if updatePoolOptions.Origins != nil { + body["origins"] = updatePoolOptions.Origins + } + if updatePoolOptions.Monitor != nil { + body["monitor"] = updatePoolOptions.Monitor + } + if updatePoolOptions.NotificationChannel != nil { + body["notification_channel"] = updatePoolOptions.NotificationChannel + } + if updatePoolOptions.HealthcheckRegion != nil { + body["healthcheck_region"] = updatePoolOptions.HealthcheckRegion + } + if updatePoolOptions.HealthcheckSubnets != nil { + body["healthcheck_subnets"] = updatePoolOptions.HealthcheckSubnets + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = dnsSvcs.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalPool) + if err != nil { + return + } + response.Result = result + + return +} + +// ListMonitors : List load balancer monitors +// List the load balancer monitors. +func (dnsSvcs *DnsSvcsV1) ListMonitors(listMonitorsOptions *ListMonitorsOptions) (result *ListMonitors, response *core.DetailedResponse, err error) { + return dnsSvcs.ListMonitorsWithContext(context.Background(), listMonitorsOptions) +} + +// ListMonitorsWithContext is an alternate form of the ListMonitors method which supports a Context parameter +func (dnsSvcs *DnsSvcsV1) ListMonitorsWithContext(ctx context.Context, listMonitorsOptions *ListMonitorsOptions) (result *ListMonitors, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(listMonitorsOptions, "listMonitorsOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(listMonitorsOptions, "listMonitorsOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "instance_id": *listMonitorsOptions.InstanceID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = dnsSvcs.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(dnsSvcs.Service.Options.URL, `/instances/{instance_id}/monitors`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range listMonitorsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("dns_svcs", "V1", "ListMonitors") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + if listMonitorsOptions.XCorrelationID != nil { + builder.AddHeader("X-Correlation-ID", fmt.Sprint(*listMonitorsOptions.XCorrelationID)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = dnsSvcs.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalListMonitors) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateMonitor : Create a load balancer monitor +// Create a load balancer monitor. +func (dnsSvcs *DnsSvcsV1) CreateMonitor(createMonitorOptions *CreateMonitorOptions) (result *Monitor, response *core.DetailedResponse, err error) { + return dnsSvcs.CreateMonitorWithContext(context.Background(), createMonitorOptions) +} + +// CreateMonitorWithContext is an alternate form of the CreateMonitor method which supports a Context parameter +func (dnsSvcs *DnsSvcsV1) CreateMonitorWithContext(ctx context.Context, createMonitorOptions *CreateMonitorOptions) (result *Monitor, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createMonitorOptions, "createMonitorOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createMonitorOptions, "createMonitorOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "instance_id": *createMonitorOptions.InstanceID, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = dnsSvcs.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(dnsSvcs.Service.Options.URL, `/instances/{instance_id}/monitors`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range createMonitorOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("dns_svcs", "V1", "CreateMonitor") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + if createMonitorOptions.XCorrelationID != nil { + builder.AddHeader("X-Correlation-ID", fmt.Sprint(*createMonitorOptions.XCorrelationID)) + } + + body := make(map[string]interface{}) + if createMonitorOptions.Name != nil { + body["name"] = createMonitorOptions.Name + } + if createMonitorOptions.Description != nil { + body["description"] = createMonitorOptions.Description + } + if createMonitorOptions.Type != nil { + body["type"] = createMonitorOptions.Type + } + if createMonitorOptions.Port != nil { + body["port"] = createMonitorOptions.Port + } + if createMonitorOptions.Interval != nil { + body["interval"] = createMonitorOptions.Interval + } + if createMonitorOptions.Retries != nil { + body["retries"] = createMonitorOptions.Retries + } + if createMonitorOptions.Timeout != nil { + body["timeout"] = createMonitorOptions.Timeout + } + if createMonitorOptions.Method != nil { + body["method"] = createMonitorOptions.Method + } + if createMonitorOptions.Path != nil { + body["path"] = createMonitorOptions.Path + } + if createMonitorOptions.HeadersVar != nil { + body["headers"] = createMonitorOptions.HeadersVar + } + if createMonitorOptions.AllowInsecure != nil { + body["allow_insecure"] = createMonitorOptions.AllowInsecure + } + if createMonitorOptions.ExpectedCodes != nil { + body["expected_codes"] = createMonitorOptions.ExpectedCodes + } + if createMonitorOptions.ExpectedBody != nil { + body["expected_body"] = createMonitorOptions.ExpectedBody + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = dnsSvcs.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalMonitor) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteMonitor : Delete a load balancer monitor +// Delete a load balancer monitor. +func (dnsSvcs *DnsSvcsV1) DeleteMonitor(deleteMonitorOptions *DeleteMonitorOptions) (response *core.DetailedResponse, err error) { + return dnsSvcs.DeleteMonitorWithContext(context.Background(), deleteMonitorOptions) +} + +// DeleteMonitorWithContext is an alternate form of the DeleteMonitor method which supports a Context parameter +func (dnsSvcs *DnsSvcsV1) DeleteMonitorWithContext(ctx context.Context, deleteMonitorOptions *DeleteMonitorOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteMonitorOptions, "deleteMonitorOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteMonitorOptions, "deleteMonitorOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "instance_id": *deleteMonitorOptions.InstanceID, + "monitor_id": *deleteMonitorOptions.MonitorID, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = dnsSvcs.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(dnsSvcs.Service.Options.URL, `/instances/{instance_id}/monitors/{monitor_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteMonitorOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("dns_svcs", "V1", "DeleteMonitor") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + if deleteMonitorOptions.XCorrelationID != nil { + builder.AddHeader("X-Correlation-ID", fmt.Sprint(*deleteMonitorOptions.XCorrelationID)) + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = dnsSvcs.Service.Request(request, nil) + + return +} + +// GetMonitor : Get a load balancer monitor +// Get details of a load balancer monitor. +func (dnsSvcs *DnsSvcsV1) GetMonitor(getMonitorOptions *GetMonitorOptions) (result *Monitor, response *core.DetailedResponse, err error) { + return dnsSvcs.GetMonitorWithContext(context.Background(), getMonitorOptions) +} + +// GetMonitorWithContext is an alternate form of the GetMonitor method which supports a Context parameter +func (dnsSvcs *DnsSvcsV1) GetMonitorWithContext(ctx context.Context, getMonitorOptions *GetMonitorOptions) (result *Monitor, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getMonitorOptions, "getMonitorOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getMonitorOptions, "getMonitorOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "instance_id": *getMonitorOptions.InstanceID, + "monitor_id": *getMonitorOptions.MonitorID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = dnsSvcs.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(dnsSvcs.Service.Options.URL, `/instances/{instance_id}/monitors/{monitor_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getMonitorOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("dns_svcs", "V1", "GetMonitor") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + if getMonitorOptions.XCorrelationID != nil { + builder.AddHeader("X-Correlation-ID", fmt.Sprint(*getMonitorOptions.XCorrelationID)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = dnsSvcs.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalMonitor) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateMonitor : Update the properties of a load balancer monitor +// Update the properties of a load balancer monitor. +func (dnsSvcs *DnsSvcsV1) UpdateMonitor(updateMonitorOptions *UpdateMonitorOptions) (result *Monitor, response *core.DetailedResponse, err error) { + return dnsSvcs.UpdateMonitorWithContext(context.Background(), updateMonitorOptions) +} + +// UpdateMonitorWithContext is an alternate form of the UpdateMonitor method which supports a Context parameter +func (dnsSvcs *DnsSvcsV1) UpdateMonitorWithContext(ctx context.Context, updateMonitorOptions *UpdateMonitorOptions) (result *Monitor, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateMonitorOptions, "updateMonitorOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateMonitorOptions, "updateMonitorOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "instance_id": *updateMonitorOptions.InstanceID, + "monitor_id": *updateMonitorOptions.MonitorID, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = dnsSvcs.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(dnsSvcs.Service.Options.URL, `/instances/{instance_id}/monitors/{monitor_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateMonitorOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("dns_svcs", "V1", "UpdateMonitor") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + if updateMonitorOptions.XCorrelationID != nil { + builder.AddHeader("X-Correlation-ID", fmt.Sprint(*updateMonitorOptions.XCorrelationID)) + } + + body := make(map[string]interface{}) + if updateMonitorOptions.Name != nil { + body["name"] = updateMonitorOptions.Name + } + if updateMonitorOptions.Description != nil { + body["description"] = updateMonitorOptions.Description + } + if updateMonitorOptions.Type != nil { + body["type"] = updateMonitorOptions.Type + } + if updateMonitorOptions.Port != nil { + body["port"] = updateMonitorOptions.Port + } + if updateMonitorOptions.Interval != nil { + body["interval"] = updateMonitorOptions.Interval + } + if updateMonitorOptions.Retries != nil { + body["retries"] = updateMonitorOptions.Retries + } + if updateMonitorOptions.Timeout != nil { + body["timeout"] = updateMonitorOptions.Timeout + } + if updateMonitorOptions.Method != nil { + body["method"] = updateMonitorOptions.Method + } + if updateMonitorOptions.Path != nil { + body["path"] = updateMonitorOptions.Path + } + if updateMonitorOptions.HeadersVar != nil { + body["headers"] = updateMonitorOptions.HeadersVar + } + if updateMonitorOptions.AllowInsecure != nil { + body["allow_insecure"] = updateMonitorOptions.AllowInsecure + } + if updateMonitorOptions.ExpectedCodes != nil { + body["expected_codes"] = updateMonitorOptions.ExpectedCodes + } + if updateMonitorOptions.ExpectedBody != nil { + body["expected_body"] = updateMonitorOptions.ExpectedBody + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = dnsSvcs.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalMonitor) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateDnszoneOptions : The CreateDnszone options. +type CreateDnszoneOptions struct { + // The unique identifier of a service instance. + InstanceID *string `json:"instance_id" validate:"required,ne="` + + // Name of DNS zone. + Name *string `json:"name,omitempty"` + + // The text describing the purpose of a DNS zone. + Description *string `json:"description,omitempty"` + + // The label of a DNS zone. + Label *string `json:"label,omitempty"` + + // Uniquely identifying a request. + XCorrelationID *string `json:"X-Correlation-ID,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewCreateDnszoneOptions : Instantiate CreateDnszoneOptions +func (*DnsSvcsV1) NewCreateDnszoneOptions(instanceID string) *CreateDnszoneOptions { + return &CreateDnszoneOptions{ + InstanceID: core.StringPtr(instanceID), + } +} + +// SetInstanceID : Allow user to set InstanceID +func (options *CreateDnszoneOptions) SetInstanceID(instanceID string) *CreateDnszoneOptions { + options.InstanceID = core.StringPtr(instanceID) + return options +} + +// SetName : Allow user to set Name +func (options *CreateDnszoneOptions) SetName(name string) *CreateDnszoneOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetDescription : Allow user to set Description +func (options *CreateDnszoneOptions) SetDescription(description string) *CreateDnszoneOptions { + options.Description = core.StringPtr(description) + return options +} + +// SetLabel : Allow user to set Label +func (options *CreateDnszoneOptions) SetLabel(label string) *CreateDnszoneOptions { + options.Label = core.StringPtr(label) + return options +} + +// SetXCorrelationID : Allow user to set XCorrelationID +func (options *CreateDnszoneOptions) SetXCorrelationID(xCorrelationID string) *CreateDnszoneOptions { + options.XCorrelationID = core.StringPtr(xCorrelationID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateDnszoneOptions) SetHeaders(param map[string]string) *CreateDnszoneOptions { + options.Headers = param + return options +} + +// CreateLoadBalancerOptions : The CreateLoadBalancer options. +type CreateLoadBalancerOptions struct { + // The unique identifier of a service instance. + InstanceID *string `json:"instance_id" validate:"required,ne="` + + // The unique identifier of a DNS zone. + DnszoneID *string `json:"dnszone_id" validate:"required,ne="` + + // Name of the load balancer. + Name *string `json:"name,omitempty"` + + // Descriptive text of the load balancer. + Description *string `json:"description,omitempty"` + + // Whether the load balancer is enabled. + Enabled *bool `json:"enabled,omitempty"` + + // Time to live in second. + TTL *int64 `json:"ttl,omitempty"` + + // The pool ID to use when all other pools are detected as unhealthy. + FallbackPool *string `json:"fallback_pool,omitempty"` + + // A list of pool IDs ordered by their failover priority. Pools defined here are used by default, or when region_pools + // are not configured for a given region. + DefaultPools []string `json:"default_pools,omitempty"` + + // Map availability zones to pool IDs. + AzPools []LoadBalancerAzPoolsItem `json:"az_pools,omitempty"` + + // Uniquely identifying a request. + XCorrelationID *string `json:"X-Correlation-ID,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewCreateLoadBalancerOptions : Instantiate CreateLoadBalancerOptions +func (*DnsSvcsV1) NewCreateLoadBalancerOptions(instanceID string, dnszoneID string) *CreateLoadBalancerOptions { + return &CreateLoadBalancerOptions{ + InstanceID: core.StringPtr(instanceID), + DnszoneID: core.StringPtr(dnszoneID), + } +} + +// SetInstanceID : Allow user to set InstanceID +func (options *CreateLoadBalancerOptions) SetInstanceID(instanceID string) *CreateLoadBalancerOptions { + options.InstanceID = core.StringPtr(instanceID) + return options +} + +// SetDnszoneID : Allow user to set DnszoneID +func (options *CreateLoadBalancerOptions) SetDnszoneID(dnszoneID string) *CreateLoadBalancerOptions { + options.DnszoneID = core.StringPtr(dnszoneID) + return options +} + +// SetName : Allow user to set Name +func (options *CreateLoadBalancerOptions) SetName(name string) *CreateLoadBalancerOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetDescription : Allow user to set Description +func (options *CreateLoadBalancerOptions) SetDescription(description string) *CreateLoadBalancerOptions { + options.Description = core.StringPtr(description) + return options +} + +// SetEnabled : Allow user to set Enabled +func (options *CreateLoadBalancerOptions) SetEnabled(enabled bool) *CreateLoadBalancerOptions { + options.Enabled = core.BoolPtr(enabled) + return options +} + +// SetTTL : Allow user to set TTL +func (options *CreateLoadBalancerOptions) SetTTL(ttl int64) *CreateLoadBalancerOptions { + options.TTL = core.Int64Ptr(ttl) + return options +} + +// SetFallbackPool : Allow user to set FallbackPool +func (options *CreateLoadBalancerOptions) SetFallbackPool(fallbackPool string) *CreateLoadBalancerOptions { + options.FallbackPool = core.StringPtr(fallbackPool) + return options +} + +// SetDefaultPools : Allow user to set DefaultPools +func (options *CreateLoadBalancerOptions) SetDefaultPools(defaultPools []string) *CreateLoadBalancerOptions { + options.DefaultPools = defaultPools + return options +} + +// SetAzPools : Allow user to set AzPools +func (options *CreateLoadBalancerOptions) SetAzPools(azPools []LoadBalancerAzPoolsItem) *CreateLoadBalancerOptions { + options.AzPools = azPools + return options +} + +// SetXCorrelationID : Allow user to set XCorrelationID +func (options *CreateLoadBalancerOptions) SetXCorrelationID(xCorrelationID string) *CreateLoadBalancerOptions { + options.XCorrelationID = core.StringPtr(xCorrelationID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateLoadBalancerOptions) SetHeaders(param map[string]string) *CreateLoadBalancerOptions { + options.Headers = param + return options +} + +// CreateMonitorOptions : The CreateMonitor options. +type CreateMonitorOptions struct { + // The unique identifier of a service instance. + InstanceID *string `json:"instance_id" validate:"required,ne="` + + // The name of the load balancer monitor. + Name *string `json:"name,omitempty"` + + // Descriptive text of the load balancer monitor. + Description *string `json:"description,omitempty"` + + // The protocol to use for the health check. Currently supported protocols are 'HTTP','HTTPS' and 'TCP'. + Type *string `json:"type,omitempty"` + + // Port number to connect to for the health check. Required for TCP checks. HTTP and HTTPS checks should only define + // the port when using a non-standard port (HTTP: default 80, HTTPS: default 443). + Port *int64 `json:"port,omitempty"` + + // The interval between each health check. Shorter intervals may improve failover time, but will increase load on the + // origins as we check from multiple locations. + Interval *int64 `json:"interval,omitempty"` + + // The number of retries to attempt in case of a timeout before marking the origin as unhealthy. Retries are attempted + // immediately. + Retries *int64 `json:"retries,omitempty"` + + // The timeout (in seconds) before marking the health check as failed. + Timeout *int64 `json:"timeout,omitempty"` + + // The method to use for the health check applicable to HTTP/HTTPS based checks, the default value is 'GET'. + Method *string `json:"method,omitempty"` + + // The endpoint path to health check against. This parameter is only valid for HTTP and HTTPS monitors. + Path *string `json:"path,omitempty"` + + // The HTTP request headers to send in the health check. It is recommended you set a Host header by default. The + // User-Agent header cannot be overridden. This parameter is only valid for HTTP and HTTPS monitors. + HeadersVar []HealthcheckHeader `json:"headers,omitempty"` + + // Do not validate the certificate when monitor use HTTPS. This parameter is currently only valid for HTTPS monitors. + AllowInsecure *bool `json:"allow_insecure,omitempty"` + + // The expected HTTP response code or code range of the health check. This parameter is only valid for HTTP and HTTPS + // monitors. + ExpectedCodes *string `json:"expected_codes,omitempty"` + + // A case-insensitive sub-string to look for in the response body. If this string is not found, the origin will be + // marked as unhealthy. This parameter is only valid for HTTP and HTTPS monitors. + ExpectedBody *string `json:"expected_body,omitempty"` + + // Uniquely identifying a request. + XCorrelationID *string `json:"X-Correlation-ID,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the CreateMonitorOptions.Type property. +// The protocol to use for the health check. Currently supported protocols are 'HTTP','HTTPS' and 'TCP'. +const ( + CreateMonitorOptions_Type_Http = "HTTP" + CreateMonitorOptions_Type_Https = "HTTPS" + CreateMonitorOptions_Type_Tcp = "TCP" +) + +// Constants associated with the CreateMonitorOptions.Method property. +// The method to use for the health check applicable to HTTP/HTTPS based checks, the default value is 'GET'. +const ( + CreateMonitorOptions_Method_Get = "GET" + CreateMonitorOptions_Method_Head = "HEAD" +) + +// NewCreateMonitorOptions : Instantiate CreateMonitorOptions +func (*DnsSvcsV1) NewCreateMonitorOptions(instanceID string) *CreateMonitorOptions { + return &CreateMonitorOptions{ + InstanceID: core.StringPtr(instanceID), + } +} + +// SetInstanceID : Allow user to set InstanceID +func (options *CreateMonitorOptions) SetInstanceID(instanceID string) *CreateMonitorOptions { + options.InstanceID = core.StringPtr(instanceID) + return options +} + +// SetName : Allow user to set Name +func (options *CreateMonitorOptions) SetName(name string) *CreateMonitorOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetDescription : Allow user to set Description +func (options *CreateMonitorOptions) SetDescription(description string) *CreateMonitorOptions { + options.Description = core.StringPtr(description) + return options +} + +// SetType : Allow user to set Type +func (options *CreateMonitorOptions) SetType(typeVar string) *CreateMonitorOptions { + options.Type = core.StringPtr(typeVar) + return options +} + +// SetPort : Allow user to set Port +func (options *CreateMonitorOptions) SetPort(port int64) *CreateMonitorOptions { + options.Port = core.Int64Ptr(port) + return options +} + +// SetInterval : Allow user to set Interval +func (options *CreateMonitorOptions) SetInterval(interval int64) *CreateMonitorOptions { + options.Interval = core.Int64Ptr(interval) + return options +} + +// SetRetries : Allow user to set Retries +func (options *CreateMonitorOptions) SetRetries(retries int64) *CreateMonitorOptions { + options.Retries = core.Int64Ptr(retries) + return options +} + +// SetTimeout : Allow user to set Timeout +func (options *CreateMonitorOptions) SetTimeout(timeout int64) *CreateMonitorOptions { + options.Timeout = core.Int64Ptr(timeout) + return options +} + +// SetMethod : Allow user to set Method +func (options *CreateMonitorOptions) SetMethod(method string) *CreateMonitorOptions { + options.Method = core.StringPtr(method) + return options +} + +// SetPath : Allow user to set Path +func (options *CreateMonitorOptions) SetPath(path string) *CreateMonitorOptions { + options.Path = core.StringPtr(path) + return options +} + +// SetHeadersVar : Allow user to set HeadersVar +func (options *CreateMonitorOptions) SetHeadersVar(headersVar []HealthcheckHeader) *CreateMonitorOptions { + options.HeadersVar = headersVar + return options +} + +// SetAllowInsecure : Allow user to set AllowInsecure +func (options *CreateMonitorOptions) SetAllowInsecure(allowInsecure bool) *CreateMonitorOptions { + options.AllowInsecure = core.BoolPtr(allowInsecure) + return options +} + +// SetExpectedCodes : Allow user to set ExpectedCodes +func (options *CreateMonitorOptions) SetExpectedCodes(expectedCodes string) *CreateMonitorOptions { + options.ExpectedCodes = core.StringPtr(expectedCodes) + return options +} + +// SetExpectedBody : Allow user to set ExpectedBody +func (options *CreateMonitorOptions) SetExpectedBody(expectedBody string) *CreateMonitorOptions { + options.ExpectedBody = core.StringPtr(expectedBody) + return options +} + +// SetXCorrelationID : Allow user to set XCorrelationID +func (options *CreateMonitorOptions) SetXCorrelationID(xCorrelationID string) *CreateMonitorOptions { + options.XCorrelationID = core.StringPtr(xCorrelationID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateMonitorOptions) SetHeaders(param map[string]string) *CreateMonitorOptions { + options.Headers = param + return options +} + +// CreatePermittedNetworkOptions : The CreatePermittedNetwork options. +type CreatePermittedNetworkOptions struct { + // The unique identifier of a service instance. + InstanceID *string `json:"instance_id" validate:"required,ne="` + + // The unique identifier of a DNS zone. + DnszoneID *string `json:"dnszone_id" validate:"required,ne="` + + // The type of a permitted network. + Type *string `json:"type,omitempty"` + + // Permitted network data for VPC. + PermittedNetwork *PermittedNetworkVpc `json:"permitted_network,omitempty"` + + // Uniquely identifying a request. + XCorrelationID *string `json:"X-Correlation-ID,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the CreatePermittedNetworkOptions.Type property. +// The type of a permitted network. +const ( + CreatePermittedNetworkOptions_Type_Vpc = "vpc" +) + +// NewCreatePermittedNetworkOptions : Instantiate CreatePermittedNetworkOptions +func (*DnsSvcsV1) NewCreatePermittedNetworkOptions(instanceID string, dnszoneID string) *CreatePermittedNetworkOptions { + return &CreatePermittedNetworkOptions{ + InstanceID: core.StringPtr(instanceID), + DnszoneID: core.StringPtr(dnszoneID), + } +} + +// SetInstanceID : Allow user to set InstanceID +func (options *CreatePermittedNetworkOptions) SetInstanceID(instanceID string) *CreatePermittedNetworkOptions { + options.InstanceID = core.StringPtr(instanceID) + return options +} + +// SetDnszoneID : Allow user to set DnszoneID +func (options *CreatePermittedNetworkOptions) SetDnszoneID(dnszoneID string) *CreatePermittedNetworkOptions { + options.DnszoneID = core.StringPtr(dnszoneID) + return options +} + +// SetType : Allow user to set Type +func (options *CreatePermittedNetworkOptions) SetType(typeVar string) *CreatePermittedNetworkOptions { + options.Type = core.StringPtr(typeVar) + return options +} + +// SetPermittedNetwork : Allow user to set PermittedNetwork +func (options *CreatePermittedNetworkOptions) SetPermittedNetwork(permittedNetwork *PermittedNetworkVpc) *CreatePermittedNetworkOptions { + options.PermittedNetwork = permittedNetwork + return options +} + +// SetXCorrelationID : Allow user to set XCorrelationID +func (options *CreatePermittedNetworkOptions) SetXCorrelationID(xCorrelationID string) *CreatePermittedNetworkOptions { + options.XCorrelationID = core.StringPtr(xCorrelationID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreatePermittedNetworkOptions) SetHeaders(param map[string]string) *CreatePermittedNetworkOptions { + options.Headers = param + return options +} + +// CreatePoolOptions : The CreatePool options. +type CreatePoolOptions struct { + // The unique identifier of a service instance. + InstanceID *string `json:"instance_id" validate:"required,ne="` + + // Name of the load balancer pool. + Name *string `json:"name,omitempty"` + + // Descriptive text of the load balancer pool. + Description *string `json:"description,omitempty"` + + // Whether the load balancer pool is enabled. + Enabled *bool `json:"enabled,omitempty"` + + // The minimum number of origins that must be healthy for this pool to serve traffic. If the number of healthy origins + // falls below this number, the pool will be marked unhealthy and we will failover to the next available pool. + HealthyOriginsThreshold *int64 `json:"healthy_origins_threshold,omitempty"` + + // The list of origins within this pool. Traffic directed at this pool is balanced across all currently healthy + // origins, provided the pool itself is healthy. + Origins []OriginInput `json:"origins,omitempty"` + + // The ID of the load balancer monitor to be associated to this pool. + Monitor *string `json:"monitor,omitempty"` + + // The notification channel. + NotificationChannel *string `json:"notification_channel,omitempty"` + + // Health check region of VSIs. + HealthcheckRegion *string `json:"healthcheck_region,omitempty"` + + // Health check subnet CRN. + HealthcheckSubnets []string `json:"healthcheck_subnets,omitempty"` + + // Uniquely identifying a request. + XCorrelationID *string `json:"X-Correlation-ID,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the CreatePoolOptions.HealthcheckRegion property. +// Health check region of VSIs. +const ( + CreatePoolOptions_HealthcheckRegion_AuSyd = "au-syd" + CreatePoolOptions_HealthcheckRegion_EuDu = "eu-du" + CreatePoolOptions_HealthcheckRegion_EuGb = "eu-gb" + CreatePoolOptions_HealthcheckRegion_JpTok = "jp-tok" + CreatePoolOptions_HealthcheckRegion_UsEast = "us-east" + CreatePoolOptions_HealthcheckRegion_UsSouth = "us-south" +) + +// NewCreatePoolOptions : Instantiate CreatePoolOptions +func (*DnsSvcsV1) NewCreatePoolOptions(instanceID string) *CreatePoolOptions { + return &CreatePoolOptions{ + InstanceID: core.StringPtr(instanceID), + } +} + +// SetInstanceID : Allow user to set InstanceID +func (options *CreatePoolOptions) SetInstanceID(instanceID string) *CreatePoolOptions { + options.InstanceID = core.StringPtr(instanceID) + return options +} + +// SetName : Allow user to set Name +func (options *CreatePoolOptions) SetName(name string) *CreatePoolOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetDescription : Allow user to set Description +func (options *CreatePoolOptions) SetDescription(description string) *CreatePoolOptions { + options.Description = core.StringPtr(description) + return options +} + +// SetEnabled : Allow user to set Enabled +func (options *CreatePoolOptions) SetEnabled(enabled bool) *CreatePoolOptions { + options.Enabled = core.BoolPtr(enabled) + return options +} + +// SetHealthyOriginsThreshold : Allow user to set HealthyOriginsThreshold +func (options *CreatePoolOptions) SetHealthyOriginsThreshold(healthyOriginsThreshold int64) *CreatePoolOptions { + options.HealthyOriginsThreshold = core.Int64Ptr(healthyOriginsThreshold) + return options +} + +// SetOrigins : Allow user to set Origins +func (options *CreatePoolOptions) SetOrigins(origins []OriginInput) *CreatePoolOptions { + options.Origins = origins + return options +} + +// SetMonitor : Allow user to set Monitor +func (options *CreatePoolOptions) SetMonitor(monitor string) *CreatePoolOptions { + options.Monitor = core.StringPtr(monitor) + return options +} + +// SetNotificationChannel : Allow user to set NotificationChannel +func (options *CreatePoolOptions) SetNotificationChannel(notificationChannel string) *CreatePoolOptions { + options.NotificationChannel = core.StringPtr(notificationChannel) + return options +} + +// SetHealthcheckRegion : Allow user to set HealthcheckRegion +func (options *CreatePoolOptions) SetHealthcheckRegion(healthcheckRegion string) *CreatePoolOptions { + options.HealthcheckRegion = core.StringPtr(healthcheckRegion) + return options +} + +// SetHealthcheckSubnets : Allow user to set HealthcheckSubnets +func (options *CreatePoolOptions) SetHealthcheckSubnets(healthcheckSubnets []string) *CreatePoolOptions { + options.HealthcheckSubnets = healthcheckSubnets + return options +} + +// SetXCorrelationID : Allow user to set XCorrelationID +func (options *CreatePoolOptions) SetXCorrelationID(xCorrelationID string) *CreatePoolOptions { + options.XCorrelationID = core.StringPtr(xCorrelationID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreatePoolOptions) SetHeaders(param map[string]string) *CreatePoolOptions { + options.Headers = param + return options +} + +// CreateResourceRecordOptions : The CreateResourceRecord options. +type CreateResourceRecordOptions struct { + // The unique identifier of a service instance. + InstanceID *string `json:"instance_id" validate:"required,ne="` + + // The unique identifier of a DNS zone. + DnszoneID *string `json:"dnszone_id" validate:"required,ne="` + + // Name of the resource record. + Name *string `json:"name,omitempty"` + + // Type of the resource record. + Type *string `json:"type,omitempty"` + + // Content of the resource record. + Rdata ResourceRecordInputRdataIntf `json:"rdata,omitempty"` + + // Time to live in second. + TTL *int64 `json:"ttl,omitempty"` + + // Only used for SRV record. + Service *string `json:"service,omitempty"` + + // Only used for SRV record. + Protocol *string `json:"protocol,omitempty"` + + // Uniquely identifying a request. + XCorrelationID *string `json:"X-Correlation-ID,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the CreateResourceRecordOptions.Type property. +// Type of the resource record. +const ( + CreateResourceRecordOptions_Type_A = "A" + CreateResourceRecordOptions_Type_Aaaa = "AAAA" + CreateResourceRecordOptions_Type_Cname = "CNAME" + CreateResourceRecordOptions_Type_Mx = "MX" + CreateResourceRecordOptions_Type_Ptr = "PTR" + CreateResourceRecordOptions_Type_Srv = "SRV" + CreateResourceRecordOptions_Type_Txt = "TXT" +) + +// NewCreateResourceRecordOptions : Instantiate CreateResourceRecordOptions +func (*DnsSvcsV1) NewCreateResourceRecordOptions(instanceID string, dnszoneID string) *CreateResourceRecordOptions { + return &CreateResourceRecordOptions{ + InstanceID: core.StringPtr(instanceID), + DnszoneID: core.StringPtr(dnszoneID), + } +} + +// SetInstanceID : Allow user to set InstanceID +func (options *CreateResourceRecordOptions) SetInstanceID(instanceID string) *CreateResourceRecordOptions { + options.InstanceID = core.StringPtr(instanceID) + return options +} + +// SetDnszoneID : Allow user to set DnszoneID +func (options *CreateResourceRecordOptions) SetDnszoneID(dnszoneID string) *CreateResourceRecordOptions { + options.DnszoneID = core.StringPtr(dnszoneID) + return options +} + +// SetName : Allow user to set Name +func (options *CreateResourceRecordOptions) SetName(name string) *CreateResourceRecordOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetType : Allow user to set Type +func (options *CreateResourceRecordOptions) SetType(typeVar string) *CreateResourceRecordOptions { + options.Type = core.StringPtr(typeVar) + return options +} + +// SetRdata : Allow user to set Rdata +func (options *CreateResourceRecordOptions) SetRdata(rdata ResourceRecordInputRdataIntf) *CreateResourceRecordOptions { + options.Rdata = rdata + return options +} + +// SetTTL : Allow user to set TTL +func (options *CreateResourceRecordOptions) SetTTL(ttl int64) *CreateResourceRecordOptions { + options.TTL = core.Int64Ptr(ttl) + return options +} + +// SetService : Allow user to set Service +func (options *CreateResourceRecordOptions) SetService(service string) *CreateResourceRecordOptions { + options.Service = core.StringPtr(service) + return options +} + +// SetProtocol : Allow user to set Protocol +func (options *CreateResourceRecordOptions) SetProtocol(protocol string) *CreateResourceRecordOptions { + options.Protocol = core.StringPtr(protocol) + return options +} + +// SetXCorrelationID : Allow user to set XCorrelationID +func (options *CreateResourceRecordOptions) SetXCorrelationID(xCorrelationID string) *CreateResourceRecordOptions { + options.XCorrelationID = core.StringPtr(xCorrelationID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateResourceRecordOptions) SetHeaders(param map[string]string) *CreateResourceRecordOptions { + options.Headers = param + return options +} + +// DeleteDnszoneOptions : The DeleteDnszone options. +type DeleteDnszoneOptions struct { + // The unique identifier of a service instance. + InstanceID *string `json:"instance_id" validate:"required,ne="` + + // The unique identifier of a DNS zone. + DnszoneID *string `json:"dnszone_id" validate:"required,ne="` + + // Uniquely identifying a request. + XCorrelationID *string `json:"X-Correlation-ID,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteDnszoneOptions : Instantiate DeleteDnszoneOptions +func (*DnsSvcsV1) NewDeleteDnszoneOptions(instanceID string, dnszoneID string) *DeleteDnszoneOptions { + return &DeleteDnszoneOptions{ + InstanceID: core.StringPtr(instanceID), + DnszoneID: core.StringPtr(dnszoneID), + } +} + +// SetInstanceID : Allow user to set InstanceID +func (options *DeleteDnszoneOptions) SetInstanceID(instanceID string) *DeleteDnszoneOptions { + options.InstanceID = core.StringPtr(instanceID) + return options +} + +// SetDnszoneID : Allow user to set DnszoneID +func (options *DeleteDnszoneOptions) SetDnszoneID(dnszoneID string) *DeleteDnszoneOptions { + options.DnszoneID = core.StringPtr(dnszoneID) + return options +} + +// SetXCorrelationID : Allow user to set XCorrelationID +func (options *DeleteDnszoneOptions) SetXCorrelationID(xCorrelationID string) *DeleteDnszoneOptions { + options.XCorrelationID = core.StringPtr(xCorrelationID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteDnszoneOptions) SetHeaders(param map[string]string) *DeleteDnszoneOptions { + options.Headers = param + return options +} + +// DeleteLoadBalancerOptions : The DeleteLoadBalancer options. +type DeleteLoadBalancerOptions struct { + // The unique identifier of a service instance. + InstanceID *string `json:"instance_id" validate:"required,ne="` + + // The unique identifier of a DNS zone. + DnszoneID *string `json:"dnszone_id" validate:"required,ne="` + + // The unique identifier of a load balancer. + LbID *string `json:"lb_id" validate:"required,ne="` + + // Uniquely identifying a request. + XCorrelationID *string `json:"X-Correlation-ID,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteLoadBalancerOptions : Instantiate DeleteLoadBalancerOptions +func (*DnsSvcsV1) NewDeleteLoadBalancerOptions(instanceID string, dnszoneID string, lbID string) *DeleteLoadBalancerOptions { + return &DeleteLoadBalancerOptions{ + InstanceID: core.StringPtr(instanceID), + DnszoneID: core.StringPtr(dnszoneID), + LbID: core.StringPtr(lbID), + } +} + +// SetInstanceID : Allow user to set InstanceID +func (options *DeleteLoadBalancerOptions) SetInstanceID(instanceID string) *DeleteLoadBalancerOptions { + options.InstanceID = core.StringPtr(instanceID) + return options +} + +// SetDnszoneID : Allow user to set DnszoneID +func (options *DeleteLoadBalancerOptions) SetDnszoneID(dnszoneID string) *DeleteLoadBalancerOptions { + options.DnszoneID = core.StringPtr(dnszoneID) + return options +} + +// SetLbID : Allow user to set LbID +func (options *DeleteLoadBalancerOptions) SetLbID(lbID string) *DeleteLoadBalancerOptions { + options.LbID = core.StringPtr(lbID) + return options +} + +// SetXCorrelationID : Allow user to set XCorrelationID +func (options *DeleteLoadBalancerOptions) SetXCorrelationID(xCorrelationID string) *DeleteLoadBalancerOptions { + options.XCorrelationID = core.StringPtr(xCorrelationID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteLoadBalancerOptions) SetHeaders(param map[string]string) *DeleteLoadBalancerOptions { + options.Headers = param + return options +} + +// DeleteMonitorOptions : The DeleteMonitor options. +type DeleteMonitorOptions struct { + // The unique identifier of a service instance. + InstanceID *string `json:"instance_id" validate:"required,ne="` + + // The unique identifier of a load balancer monitor. + MonitorID *string `json:"monitor_id" validate:"required,ne="` + + // Uniquely identifying a request. + XCorrelationID *string `json:"X-Correlation-ID,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteMonitorOptions : Instantiate DeleteMonitorOptions +func (*DnsSvcsV1) NewDeleteMonitorOptions(instanceID string, monitorID string) *DeleteMonitorOptions { + return &DeleteMonitorOptions{ + InstanceID: core.StringPtr(instanceID), + MonitorID: core.StringPtr(monitorID), + } +} + +// SetInstanceID : Allow user to set InstanceID +func (options *DeleteMonitorOptions) SetInstanceID(instanceID string) *DeleteMonitorOptions { + options.InstanceID = core.StringPtr(instanceID) + return options +} + +// SetMonitorID : Allow user to set MonitorID +func (options *DeleteMonitorOptions) SetMonitorID(monitorID string) *DeleteMonitorOptions { + options.MonitorID = core.StringPtr(monitorID) + return options +} + +// SetXCorrelationID : Allow user to set XCorrelationID +func (options *DeleteMonitorOptions) SetXCorrelationID(xCorrelationID string) *DeleteMonitorOptions { + options.XCorrelationID = core.StringPtr(xCorrelationID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteMonitorOptions) SetHeaders(param map[string]string) *DeleteMonitorOptions { + options.Headers = param + return options +} + +// DeletePermittedNetworkOptions : The DeletePermittedNetwork options. +type DeletePermittedNetworkOptions struct { + // The unique identifier of a service instance. + InstanceID *string `json:"instance_id" validate:"required,ne="` + + // The unique identifier of a DNS zone. + DnszoneID *string `json:"dnszone_id" validate:"required,ne="` + + // The unique identifier of a permitted network. + PermittedNetworkID *string `json:"permitted_network_id" validate:"required,ne="` + + // Uniquely identifying a request. + XCorrelationID *string `json:"X-Correlation-ID,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeletePermittedNetworkOptions : Instantiate DeletePermittedNetworkOptions +func (*DnsSvcsV1) NewDeletePermittedNetworkOptions(instanceID string, dnszoneID string, permittedNetworkID string) *DeletePermittedNetworkOptions { + return &DeletePermittedNetworkOptions{ + InstanceID: core.StringPtr(instanceID), + DnszoneID: core.StringPtr(dnszoneID), + PermittedNetworkID: core.StringPtr(permittedNetworkID), + } +} + +// SetInstanceID : Allow user to set InstanceID +func (options *DeletePermittedNetworkOptions) SetInstanceID(instanceID string) *DeletePermittedNetworkOptions { + options.InstanceID = core.StringPtr(instanceID) + return options +} + +// SetDnszoneID : Allow user to set DnszoneID +func (options *DeletePermittedNetworkOptions) SetDnszoneID(dnszoneID string) *DeletePermittedNetworkOptions { + options.DnszoneID = core.StringPtr(dnszoneID) + return options +} + +// SetPermittedNetworkID : Allow user to set PermittedNetworkID +func (options *DeletePermittedNetworkOptions) SetPermittedNetworkID(permittedNetworkID string) *DeletePermittedNetworkOptions { + options.PermittedNetworkID = core.StringPtr(permittedNetworkID) + return options +} + +// SetXCorrelationID : Allow user to set XCorrelationID +func (options *DeletePermittedNetworkOptions) SetXCorrelationID(xCorrelationID string) *DeletePermittedNetworkOptions { + options.XCorrelationID = core.StringPtr(xCorrelationID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeletePermittedNetworkOptions) SetHeaders(param map[string]string) *DeletePermittedNetworkOptions { + options.Headers = param + return options +} + +// DeletePoolOptions : The DeletePool options. +type DeletePoolOptions struct { + // The unique identifier of a service instance. + InstanceID *string `json:"instance_id" validate:"required,ne="` + + // The unique identifier of a load balancer pool. + PoolID *string `json:"pool_id" validate:"required,ne="` + + // Uniquely identifying a request. + XCorrelationID *string `json:"X-Correlation-ID,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeletePoolOptions : Instantiate DeletePoolOptions +func (*DnsSvcsV1) NewDeletePoolOptions(instanceID string, poolID string) *DeletePoolOptions { + return &DeletePoolOptions{ + InstanceID: core.StringPtr(instanceID), + PoolID: core.StringPtr(poolID), + } +} + +// SetInstanceID : Allow user to set InstanceID +func (options *DeletePoolOptions) SetInstanceID(instanceID string) *DeletePoolOptions { + options.InstanceID = core.StringPtr(instanceID) + return options +} + +// SetPoolID : Allow user to set PoolID +func (options *DeletePoolOptions) SetPoolID(poolID string) *DeletePoolOptions { + options.PoolID = core.StringPtr(poolID) + return options +} + +// SetXCorrelationID : Allow user to set XCorrelationID +func (options *DeletePoolOptions) SetXCorrelationID(xCorrelationID string) *DeletePoolOptions { + options.XCorrelationID = core.StringPtr(xCorrelationID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeletePoolOptions) SetHeaders(param map[string]string) *DeletePoolOptions { + options.Headers = param + return options +} + +// DeleteResourceRecordOptions : The DeleteResourceRecord options. +type DeleteResourceRecordOptions struct { + // The unique identifier of a service instance. + InstanceID *string `json:"instance_id" validate:"required,ne="` + + // The unique identifier of a DNS zone. + DnszoneID *string `json:"dnszone_id" validate:"required,ne="` + + // The unique identifier of a resource record. + RecordID *string `json:"record_id" validate:"required,ne="` + + // Uniquely identifying a request. + XCorrelationID *string `json:"X-Correlation-ID,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteResourceRecordOptions : Instantiate DeleteResourceRecordOptions +func (*DnsSvcsV1) NewDeleteResourceRecordOptions(instanceID string, dnszoneID string, recordID string) *DeleteResourceRecordOptions { + return &DeleteResourceRecordOptions{ + InstanceID: core.StringPtr(instanceID), + DnszoneID: core.StringPtr(dnszoneID), + RecordID: core.StringPtr(recordID), + } +} + +// SetInstanceID : Allow user to set InstanceID +func (options *DeleteResourceRecordOptions) SetInstanceID(instanceID string) *DeleteResourceRecordOptions { + options.InstanceID = core.StringPtr(instanceID) + return options +} + +// SetDnszoneID : Allow user to set DnszoneID +func (options *DeleteResourceRecordOptions) SetDnszoneID(dnszoneID string) *DeleteResourceRecordOptions { + options.DnszoneID = core.StringPtr(dnszoneID) + return options +} + +// SetRecordID : Allow user to set RecordID +func (options *DeleteResourceRecordOptions) SetRecordID(recordID string) *DeleteResourceRecordOptions { + options.RecordID = core.StringPtr(recordID) + return options +} + +// SetXCorrelationID : Allow user to set XCorrelationID +func (options *DeleteResourceRecordOptions) SetXCorrelationID(xCorrelationID string) *DeleteResourceRecordOptions { + options.XCorrelationID = core.StringPtr(xCorrelationID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteResourceRecordOptions) SetHeaders(param map[string]string) *DeleteResourceRecordOptions { + options.Headers = param + return options +} + +// GetDnszoneOptions : The GetDnszone options. +type GetDnszoneOptions struct { + // The unique identifier of a service instance. + InstanceID *string `json:"instance_id" validate:"required,ne="` + + // The unique identifier of a DNS zone. + DnszoneID *string `json:"dnszone_id" validate:"required,ne="` + + // Uniquely identifying a request. + XCorrelationID *string `json:"X-Correlation-ID,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetDnszoneOptions : Instantiate GetDnszoneOptions +func (*DnsSvcsV1) NewGetDnszoneOptions(instanceID string, dnszoneID string) *GetDnszoneOptions { + return &GetDnszoneOptions{ + InstanceID: core.StringPtr(instanceID), + DnszoneID: core.StringPtr(dnszoneID), + } +} + +// SetInstanceID : Allow user to set InstanceID +func (options *GetDnszoneOptions) SetInstanceID(instanceID string) *GetDnszoneOptions { + options.InstanceID = core.StringPtr(instanceID) + return options +} + +// SetDnszoneID : Allow user to set DnszoneID +func (options *GetDnszoneOptions) SetDnszoneID(dnszoneID string) *GetDnszoneOptions { + options.DnszoneID = core.StringPtr(dnszoneID) + return options +} + +// SetXCorrelationID : Allow user to set XCorrelationID +func (options *GetDnszoneOptions) SetXCorrelationID(xCorrelationID string) *GetDnszoneOptions { + options.XCorrelationID = core.StringPtr(xCorrelationID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetDnszoneOptions) SetHeaders(param map[string]string) *GetDnszoneOptions { + options.Headers = param + return options +} + +// GetLoadBalancerOptions : The GetLoadBalancer options. +type GetLoadBalancerOptions struct { + // The unique identifier of a service instance. + InstanceID *string `json:"instance_id" validate:"required,ne="` + + // The unique identifier of a DNS zone. + DnszoneID *string `json:"dnszone_id" validate:"required,ne="` + + // The unique identifier of a load balancer. + LbID *string `json:"lb_id" validate:"required,ne="` + + // Uniquely identifying a request. + XCorrelationID *string `json:"X-Correlation-ID,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetLoadBalancerOptions : Instantiate GetLoadBalancerOptions +func (*DnsSvcsV1) NewGetLoadBalancerOptions(instanceID string, dnszoneID string, lbID string) *GetLoadBalancerOptions { + return &GetLoadBalancerOptions{ + InstanceID: core.StringPtr(instanceID), + DnszoneID: core.StringPtr(dnszoneID), + LbID: core.StringPtr(lbID), + } +} + +// SetInstanceID : Allow user to set InstanceID +func (options *GetLoadBalancerOptions) SetInstanceID(instanceID string) *GetLoadBalancerOptions { + options.InstanceID = core.StringPtr(instanceID) + return options +} + +// SetDnszoneID : Allow user to set DnszoneID +func (options *GetLoadBalancerOptions) SetDnszoneID(dnszoneID string) *GetLoadBalancerOptions { + options.DnszoneID = core.StringPtr(dnszoneID) + return options +} + +// SetLbID : Allow user to set LbID +func (options *GetLoadBalancerOptions) SetLbID(lbID string) *GetLoadBalancerOptions { + options.LbID = core.StringPtr(lbID) + return options +} + +// SetXCorrelationID : Allow user to set XCorrelationID +func (options *GetLoadBalancerOptions) SetXCorrelationID(xCorrelationID string) *GetLoadBalancerOptions { + options.XCorrelationID = core.StringPtr(xCorrelationID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetLoadBalancerOptions) SetHeaders(param map[string]string) *GetLoadBalancerOptions { + options.Headers = param + return options +} + +// GetMonitorOptions : The GetMonitor options. +type GetMonitorOptions struct { + // The unique identifier of a service instance. + InstanceID *string `json:"instance_id" validate:"required,ne="` + + // The unique identifier of a load balancer monitor. + MonitorID *string `json:"monitor_id" validate:"required,ne="` + + // Uniquely identifying a request. + XCorrelationID *string `json:"X-Correlation-ID,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetMonitorOptions : Instantiate GetMonitorOptions +func (*DnsSvcsV1) NewGetMonitorOptions(instanceID string, monitorID string) *GetMonitorOptions { + return &GetMonitorOptions{ + InstanceID: core.StringPtr(instanceID), + MonitorID: core.StringPtr(monitorID), + } +} + +// SetInstanceID : Allow user to set InstanceID +func (options *GetMonitorOptions) SetInstanceID(instanceID string) *GetMonitorOptions { + options.InstanceID = core.StringPtr(instanceID) + return options +} + +// SetMonitorID : Allow user to set MonitorID +func (options *GetMonitorOptions) SetMonitorID(monitorID string) *GetMonitorOptions { + options.MonitorID = core.StringPtr(monitorID) + return options +} + +// SetXCorrelationID : Allow user to set XCorrelationID +func (options *GetMonitorOptions) SetXCorrelationID(xCorrelationID string) *GetMonitorOptions { + options.XCorrelationID = core.StringPtr(xCorrelationID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetMonitorOptions) SetHeaders(param map[string]string) *GetMonitorOptions { + options.Headers = param + return options +} + +// GetPermittedNetworkOptions : The GetPermittedNetwork options. +type GetPermittedNetworkOptions struct { + // The unique identifier of a service instance. + InstanceID *string `json:"instance_id" validate:"required,ne="` + + // The unique identifier of a DNS zone. + DnszoneID *string `json:"dnszone_id" validate:"required,ne="` + + // The unique identifier of a permitted network. + PermittedNetworkID *string `json:"permitted_network_id" validate:"required,ne="` + + // Uniquely identifying a request. + XCorrelationID *string `json:"X-Correlation-ID,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetPermittedNetworkOptions : Instantiate GetPermittedNetworkOptions +func (*DnsSvcsV1) NewGetPermittedNetworkOptions(instanceID string, dnszoneID string, permittedNetworkID string) *GetPermittedNetworkOptions { + return &GetPermittedNetworkOptions{ + InstanceID: core.StringPtr(instanceID), + DnszoneID: core.StringPtr(dnszoneID), + PermittedNetworkID: core.StringPtr(permittedNetworkID), + } +} + +// SetInstanceID : Allow user to set InstanceID +func (options *GetPermittedNetworkOptions) SetInstanceID(instanceID string) *GetPermittedNetworkOptions { + options.InstanceID = core.StringPtr(instanceID) + return options +} + +// SetDnszoneID : Allow user to set DnszoneID +func (options *GetPermittedNetworkOptions) SetDnszoneID(dnszoneID string) *GetPermittedNetworkOptions { + options.DnszoneID = core.StringPtr(dnszoneID) + return options +} + +// SetPermittedNetworkID : Allow user to set PermittedNetworkID +func (options *GetPermittedNetworkOptions) SetPermittedNetworkID(permittedNetworkID string) *GetPermittedNetworkOptions { + options.PermittedNetworkID = core.StringPtr(permittedNetworkID) + return options +} + +// SetXCorrelationID : Allow user to set XCorrelationID +func (options *GetPermittedNetworkOptions) SetXCorrelationID(xCorrelationID string) *GetPermittedNetworkOptions { + options.XCorrelationID = core.StringPtr(xCorrelationID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetPermittedNetworkOptions) SetHeaders(param map[string]string) *GetPermittedNetworkOptions { + options.Headers = param + return options +} + +// GetPoolOptions : The GetPool options. +type GetPoolOptions struct { + // The unique identifier of a service instance. + InstanceID *string `json:"instance_id" validate:"required,ne="` + + // The unique identifier of a load balancer pool. + PoolID *string `json:"pool_id" validate:"required,ne="` + + // Uniquely identifying a request. + XCorrelationID *string `json:"X-Correlation-ID,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetPoolOptions : Instantiate GetPoolOptions +func (*DnsSvcsV1) NewGetPoolOptions(instanceID string, poolID string) *GetPoolOptions { + return &GetPoolOptions{ + InstanceID: core.StringPtr(instanceID), + PoolID: core.StringPtr(poolID), + } +} + +// SetInstanceID : Allow user to set InstanceID +func (options *GetPoolOptions) SetInstanceID(instanceID string) *GetPoolOptions { + options.InstanceID = core.StringPtr(instanceID) + return options +} + +// SetPoolID : Allow user to set PoolID +func (options *GetPoolOptions) SetPoolID(poolID string) *GetPoolOptions { + options.PoolID = core.StringPtr(poolID) + return options +} + +// SetXCorrelationID : Allow user to set XCorrelationID +func (options *GetPoolOptions) SetXCorrelationID(xCorrelationID string) *GetPoolOptions { + options.XCorrelationID = core.StringPtr(xCorrelationID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetPoolOptions) SetHeaders(param map[string]string) *GetPoolOptions { + options.Headers = param + return options +} + +// GetResourceRecordOptions : The GetResourceRecord options. +type GetResourceRecordOptions struct { + // The unique identifier of a service instance. + InstanceID *string `json:"instance_id" validate:"required,ne="` + + // The unique identifier of a DNS zone. + DnszoneID *string `json:"dnszone_id" validate:"required,ne="` + + // The unique identifier of a resource record. + RecordID *string `json:"record_id" validate:"required,ne="` + + // Uniquely identifying a request. + XCorrelationID *string `json:"X-Correlation-ID,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetResourceRecordOptions : Instantiate GetResourceRecordOptions +func (*DnsSvcsV1) NewGetResourceRecordOptions(instanceID string, dnszoneID string, recordID string) *GetResourceRecordOptions { + return &GetResourceRecordOptions{ + InstanceID: core.StringPtr(instanceID), + DnszoneID: core.StringPtr(dnszoneID), + RecordID: core.StringPtr(recordID), + } +} + +// SetInstanceID : Allow user to set InstanceID +func (options *GetResourceRecordOptions) SetInstanceID(instanceID string) *GetResourceRecordOptions { + options.InstanceID = core.StringPtr(instanceID) + return options +} + +// SetDnszoneID : Allow user to set DnszoneID +func (options *GetResourceRecordOptions) SetDnszoneID(dnszoneID string) *GetResourceRecordOptions { + options.DnszoneID = core.StringPtr(dnszoneID) + return options +} + +// SetRecordID : Allow user to set RecordID +func (options *GetResourceRecordOptions) SetRecordID(recordID string) *GetResourceRecordOptions { + options.RecordID = core.StringPtr(recordID) + return options +} + +// SetXCorrelationID : Allow user to set XCorrelationID +func (options *GetResourceRecordOptions) SetXCorrelationID(xCorrelationID string) *GetResourceRecordOptions { + options.XCorrelationID = core.StringPtr(xCorrelationID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetResourceRecordOptions) SetHeaders(param map[string]string) *GetResourceRecordOptions { + options.Headers = param + return options +} + +// ListDnszonesOptions : The ListDnszones options. +type ListDnszonesOptions struct { + // The unique identifier of a service instance. + InstanceID *string `json:"instance_id" validate:"required,ne="` + + // Uniquely identifying a request. + XCorrelationID *string `json:"X-Correlation-ID,omitempty"` + + // Specify how many resource records to skip over, the default value is 0. + Offset *int64 `json:"offset,omitempty"` + + // Specify how many resource records are returned, the default value is 200. + Limit *int64 `json:"limit,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListDnszonesOptions : Instantiate ListDnszonesOptions +func (*DnsSvcsV1) NewListDnszonesOptions(instanceID string) *ListDnszonesOptions { + return &ListDnszonesOptions{ + InstanceID: core.StringPtr(instanceID), + } +} + +// SetInstanceID : Allow user to set InstanceID +func (options *ListDnszonesOptions) SetInstanceID(instanceID string) *ListDnszonesOptions { + options.InstanceID = core.StringPtr(instanceID) + return options +} + +// SetXCorrelationID : Allow user to set XCorrelationID +func (options *ListDnszonesOptions) SetXCorrelationID(xCorrelationID string) *ListDnszonesOptions { + options.XCorrelationID = core.StringPtr(xCorrelationID) + return options +} + +// SetOffset : Allow user to set Offset +func (options *ListDnszonesOptions) SetOffset(offset int64) *ListDnszonesOptions { + options.Offset = core.Int64Ptr(offset) + return options +} + +// SetLimit : Allow user to set Limit +func (options *ListDnszonesOptions) SetLimit(limit int64) *ListDnszonesOptions { + options.Limit = core.Int64Ptr(limit) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListDnszonesOptions) SetHeaders(param map[string]string) *ListDnszonesOptions { + options.Headers = param + return options +} + +// ListLoadBalancersOptions : The ListLoadBalancers options. +type ListLoadBalancersOptions struct { + // The unique identifier of a service instance. + InstanceID *string `json:"instance_id" validate:"required,ne="` + + // The unique identifier of a DNS zone. + DnszoneID *string `json:"dnszone_id" validate:"required,ne="` + + // Uniquely identifying a request. + XCorrelationID *string `json:"X-Correlation-ID,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListLoadBalancersOptions : Instantiate ListLoadBalancersOptions +func (*DnsSvcsV1) NewListLoadBalancersOptions(instanceID string, dnszoneID string) *ListLoadBalancersOptions { + return &ListLoadBalancersOptions{ + InstanceID: core.StringPtr(instanceID), + DnszoneID: core.StringPtr(dnszoneID), + } +} + +// SetInstanceID : Allow user to set InstanceID +func (options *ListLoadBalancersOptions) SetInstanceID(instanceID string) *ListLoadBalancersOptions { + options.InstanceID = core.StringPtr(instanceID) + return options +} + +// SetDnszoneID : Allow user to set DnszoneID +func (options *ListLoadBalancersOptions) SetDnszoneID(dnszoneID string) *ListLoadBalancersOptions { + options.DnszoneID = core.StringPtr(dnszoneID) + return options +} + +// SetXCorrelationID : Allow user to set XCorrelationID +func (options *ListLoadBalancersOptions) SetXCorrelationID(xCorrelationID string) *ListLoadBalancersOptions { + options.XCorrelationID = core.StringPtr(xCorrelationID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListLoadBalancersOptions) SetHeaders(param map[string]string) *ListLoadBalancersOptions { + options.Headers = param + return options +} + +// ListMonitorsOptions : The ListMonitors options. +type ListMonitorsOptions struct { + // The unique identifier of a service instance. + InstanceID *string `json:"instance_id" validate:"required,ne="` + + // Uniquely identifying a request. + XCorrelationID *string `json:"X-Correlation-ID,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListMonitorsOptions : Instantiate ListMonitorsOptions +func (*DnsSvcsV1) NewListMonitorsOptions(instanceID string) *ListMonitorsOptions { + return &ListMonitorsOptions{ + InstanceID: core.StringPtr(instanceID), + } +} + +// SetInstanceID : Allow user to set InstanceID +func (options *ListMonitorsOptions) SetInstanceID(instanceID string) *ListMonitorsOptions { + options.InstanceID = core.StringPtr(instanceID) + return options +} + +// SetXCorrelationID : Allow user to set XCorrelationID +func (options *ListMonitorsOptions) SetXCorrelationID(xCorrelationID string) *ListMonitorsOptions { + options.XCorrelationID = core.StringPtr(xCorrelationID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListMonitorsOptions) SetHeaders(param map[string]string) *ListMonitorsOptions { + options.Headers = param + return options +} + +// ListPermittedNetworksOptions : The ListPermittedNetworks options. +type ListPermittedNetworksOptions struct { + // The unique identifier of a service instance. + InstanceID *string `json:"instance_id" validate:"required,ne="` + + // The unique identifier of a DNS zone. + DnszoneID *string `json:"dnszone_id" validate:"required,ne="` + + // Uniquely identifying a request. + XCorrelationID *string `json:"X-Correlation-ID,omitempty"` + + // Specify how many resource records to skip over, the default value is 0. + Offset *int64 `json:"offset,omitempty"` + + // Specify how many resource records are returned, the default value is 200. + Limit *int64 `json:"limit,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListPermittedNetworksOptions : Instantiate ListPermittedNetworksOptions +func (*DnsSvcsV1) NewListPermittedNetworksOptions(instanceID string, dnszoneID string) *ListPermittedNetworksOptions { + return &ListPermittedNetworksOptions{ + InstanceID: core.StringPtr(instanceID), + DnszoneID: core.StringPtr(dnszoneID), + } +} + +// SetInstanceID : Allow user to set InstanceID +func (options *ListPermittedNetworksOptions) SetInstanceID(instanceID string) *ListPermittedNetworksOptions { + options.InstanceID = core.StringPtr(instanceID) + return options +} + +// SetDnszoneID : Allow user to set DnszoneID +func (options *ListPermittedNetworksOptions) SetDnszoneID(dnszoneID string) *ListPermittedNetworksOptions { + options.DnszoneID = core.StringPtr(dnszoneID) + return options +} + +// SetXCorrelationID : Allow user to set XCorrelationID +func (options *ListPermittedNetworksOptions) SetXCorrelationID(xCorrelationID string) *ListPermittedNetworksOptions { + options.XCorrelationID = core.StringPtr(xCorrelationID) + return options +} + +// SetOffset : Allow user to set Offset +func (options *ListPermittedNetworksOptions) SetOffset(offset int64) *ListPermittedNetworksOptions { + options.Offset = core.Int64Ptr(offset) + return options +} + +// SetLimit : Allow user to set Limit +func (options *ListPermittedNetworksOptions) SetLimit(limit int64) *ListPermittedNetworksOptions { + options.Limit = core.Int64Ptr(limit) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListPermittedNetworksOptions) SetHeaders(param map[string]string) *ListPermittedNetworksOptions { + options.Headers = param + return options +} + +// ListPoolsOptions : The ListPools options. +type ListPoolsOptions struct { + // The unique identifier of a service instance. + InstanceID *string `json:"instance_id" validate:"required,ne="` + + // Uniquely identifying a request. + XCorrelationID *string `json:"X-Correlation-ID,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListPoolsOptions : Instantiate ListPoolsOptions +func (*DnsSvcsV1) NewListPoolsOptions(instanceID string) *ListPoolsOptions { + return &ListPoolsOptions{ + InstanceID: core.StringPtr(instanceID), + } +} + +// SetInstanceID : Allow user to set InstanceID +func (options *ListPoolsOptions) SetInstanceID(instanceID string) *ListPoolsOptions { + options.InstanceID = core.StringPtr(instanceID) + return options +} + +// SetXCorrelationID : Allow user to set XCorrelationID +func (options *ListPoolsOptions) SetXCorrelationID(xCorrelationID string) *ListPoolsOptions { + options.XCorrelationID = core.StringPtr(xCorrelationID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListPoolsOptions) SetHeaders(param map[string]string) *ListPoolsOptions { + options.Headers = param + return options +} + +// ListResourceRecordsOptions : The ListResourceRecords options. +type ListResourceRecordsOptions struct { + // The unique identifier of a service instance. + InstanceID *string `json:"instance_id" validate:"required,ne="` + + // The unique identifier of a DNS zone. + DnszoneID *string `json:"dnszone_id" validate:"required,ne="` + + // Uniquely identifying a request. + XCorrelationID *string `json:"X-Correlation-ID,omitempty"` + + // Specify how many resource records to skip over, the default value is 0. + Offset *int64 `json:"offset,omitempty"` + + // Specify how many resource records are returned, the default value is 200. + Limit *int64 `json:"limit,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListResourceRecordsOptions : Instantiate ListResourceRecordsOptions +func (*DnsSvcsV1) NewListResourceRecordsOptions(instanceID string, dnszoneID string) *ListResourceRecordsOptions { + return &ListResourceRecordsOptions{ + InstanceID: core.StringPtr(instanceID), + DnszoneID: core.StringPtr(dnszoneID), + } +} + +// SetInstanceID : Allow user to set InstanceID +func (options *ListResourceRecordsOptions) SetInstanceID(instanceID string) *ListResourceRecordsOptions { + options.InstanceID = core.StringPtr(instanceID) + return options +} + +// SetDnszoneID : Allow user to set DnszoneID +func (options *ListResourceRecordsOptions) SetDnszoneID(dnszoneID string) *ListResourceRecordsOptions { + options.DnszoneID = core.StringPtr(dnszoneID) + return options +} + +// SetXCorrelationID : Allow user to set XCorrelationID +func (options *ListResourceRecordsOptions) SetXCorrelationID(xCorrelationID string) *ListResourceRecordsOptions { + options.XCorrelationID = core.StringPtr(xCorrelationID) + return options +} + +// SetOffset : Allow user to set Offset +func (options *ListResourceRecordsOptions) SetOffset(offset int64) *ListResourceRecordsOptions { + options.Offset = core.Int64Ptr(offset) + return options +} + +// SetLimit : Allow user to set Limit +func (options *ListResourceRecordsOptions) SetLimit(limit int64) *ListResourceRecordsOptions { + options.Limit = core.Int64Ptr(limit) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListResourceRecordsOptions) SetHeaders(param map[string]string) *ListResourceRecordsOptions { + options.Headers = param + return options +} + +// LoadBalancerAzPoolsItem : LoadBalancerAzPoolsItem struct +type LoadBalancerAzPoolsItem struct { + // Availability zone. + AvailabilityZone *string `json:"availability_zone,omitempty"` + + // List of load balancer pools. + Pools []string `json:"pools,omitempty"` +} + + +// UnmarshalLoadBalancerAzPoolsItem unmarshals an instance of LoadBalancerAzPoolsItem from the specified map of raw messages. +func UnmarshalLoadBalancerAzPoolsItem(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancerAzPoolsItem) + err = core.UnmarshalPrimitive(m, "availability_zone", &obj.AvailabilityZone) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "pools", &obj.Pools) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// PoolHealthcheckVsisItem : PoolHealthcheckVsisItem struct +type PoolHealthcheckVsisItem struct { + // Health check VSI subnet CRN. + Subnet *string `json:"subnet,omitempty"` + + // healthcheck VSI ip address. + Ipv4Address *string `json:"ipv4_address,omitempty"` + + // ipv4 cidr block. + Ipv4CidrBlock *string `json:"ipv4_cidr_block,omitempty"` + + // vpc crn. + Vpc *string `json:"vpc,omitempty"` +} + + +// UnmarshalPoolHealthcheckVsisItem unmarshals an instance of PoolHealthcheckVsisItem from the specified map of raw messages. +func UnmarshalPoolHealthcheckVsisItem(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(PoolHealthcheckVsisItem) + err = core.UnmarshalPrimitive(m, "subnet", &obj.Subnet) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "ipv4_address", &obj.Ipv4Address) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "ipv4_cidr_block", &obj.Ipv4CidrBlock) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "vpc", &obj.Vpc) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ResourceRecordInputRdata : Content of the resource record. +// Models which "extend" this model: +// - ResourceRecordInputRdataRdataARecord +// - ResourceRecordInputRdataRdataAaaaRecord +// - ResourceRecordInputRdataRdataCnameRecord +// - ResourceRecordInputRdataRdataMxRecord +// - ResourceRecordInputRdataRdataSrvRecord +// - ResourceRecordInputRdataRdataTxtRecord +// - ResourceRecordInputRdataRdataPtrRecord +type ResourceRecordInputRdata struct { + // IPv4 address. + Ip *string `json:"ip,omitempty"` + + // Canonical name. + Cname *string `json:"cname,omitempty"` + + // Hostname of Exchange server. + Exchange *string `json:"exchange,omitempty"` + + // Preference of the MX record. + Preference *int64 `json:"preference,omitempty"` + + // Port number of the target server. + Port *int64 `json:"port,omitempty"` + + // Priority of the SRV record. + Priority *int64 `json:"priority,omitempty"` + + // Hostname of the target server. + Target *string `json:"target,omitempty"` + + // Weight of distributing queries among multiple target servers. + Weight *int64 `json:"weight,omitempty"` + + // Human readable text. + Text *string `json:"text,omitempty"` + + // Hostname of the relevant A or AAAA record. + Ptrdname *string `json:"ptrdname,omitempty"` +} + +func (*ResourceRecordInputRdata) isaResourceRecordInputRdata() bool { + return true +} + +type ResourceRecordInputRdataIntf interface { + isaResourceRecordInputRdata() bool +} + +// UnmarshalResourceRecordInputRdata unmarshals an instance of ResourceRecordInputRdata from the specified map of raw messages. +func UnmarshalResourceRecordInputRdata(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ResourceRecordInputRdata) + err = core.UnmarshalPrimitive(m, "ip", &obj.Ip) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "cname", &obj.Cname) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "exchange", &obj.Exchange) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "preference", &obj.Preference) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "port", &obj.Port) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "priority", &obj.Priority) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "target", &obj.Target) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "weight", &obj.Weight) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "text", &obj.Text) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "ptrdname", &obj.Ptrdname) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ResourceRecordUpdateInputRdata : Content of the resource record. +// Models which "extend" this model: +// - ResourceRecordUpdateInputRdataRdataARecord +// - ResourceRecordUpdateInputRdataRdataAaaaRecord +// - ResourceRecordUpdateInputRdataRdataCnameRecord +// - ResourceRecordUpdateInputRdataRdataMxRecord +// - ResourceRecordUpdateInputRdataRdataSrvRecord +// - ResourceRecordUpdateInputRdataRdataTxtRecord +// - ResourceRecordUpdateInputRdataRdataPtrRecord +type ResourceRecordUpdateInputRdata struct { + // IPv4 address. + Ip *string `json:"ip,omitempty"` + + // Canonical name. + Cname *string `json:"cname,omitempty"` + + // Hostname of Exchange server. + Exchange *string `json:"exchange,omitempty"` + + // Preference of the MX record. + Preference *int64 `json:"preference,omitempty"` + + // Port number of the target server. + Port *int64 `json:"port,omitempty"` + + // Priority of the SRV record. + Priority *int64 `json:"priority,omitempty"` + + // Hostname of the target server. + Target *string `json:"target,omitempty"` + + // Weight of distributing queries among multiple target servers. + Weight *int64 `json:"weight,omitempty"` + + // Human readable text. + Text *string `json:"text,omitempty"` + + // Hostname of the relevant A or AAAA record. + Ptrdname *string `json:"ptrdname,omitempty"` +} + +func (*ResourceRecordUpdateInputRdata) isaResourceRecordUpdateInputRdata() bool { + return true +} + +type ResourceRecordUpdateInputRdataIntf interface { + isaResourceRecordUpdateInputRdata() bool +} + +// UnmarshalResourceRecordUpdateInputRdata unmarshals an instance of ResourceRecordUpdateInputRdata from the specified map of raw messages. +func UnmarshalResourceRecordUpdateInputRdata(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ResourceRecordUpdateInputRdata) + err = core.UnmarshalPrimitive(m, "ip", &obj.Ip) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "cname", &obj.Cname) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "exchange", &obj.Exchange) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "preference", &obj.Preference) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "port", &obj.Port) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "priority", &obj.Priority) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "target", &obj.Target) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "weight", &obj.Weight) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "text", &obj.Text) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "ptrdname", &obj.Ptrdname) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// UpdateDnszoneOptions : The UpdateDnszone options. +type UpdateDnszoneOptions struct { + // The unique identifier of a service instance. + InstanceID *string `json:"instance_id" validate:"required,ne="` + + // The unique identifier of a DNS zone. + DnszoneID *string `json:"dnszone_id" validate:"required,ne="` + + // The text describing the purpose of a DNS zone. + Description *string `json:"description,omitempty"` + + // The label of a DNS zone. + Label *string `json:"label,omitempty"` + + // Uniquely identifying a request. + XCorrelationID *string `json:"X-Correlation-ID,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdateDnszoneOptions : Instantiate UpdateDnszoneOptions +func (*DnsSvcsV1) NewUpdateDnszoneOptions(instanceID string, dnszoneID string) *UpdateDnszoneOptions { + return &UpdateDnszoneOptions{ + InstanceID: core.StringPtr(instanceID), + DnszoneID: core.StringPtr(dnszoneID), + } +} + +// SetInstanceID : Allow user to set InstanceID +func (options *UpdateDnszoneOptions) SetInstanceID(instanceID string) *UpdateDnszoneOptions { + options.InstanceID = core.StringPtr(instanceID) + return options +} + +// SetDnszoneID : Allow user to set DnszoneID +func (options *UpdateDnszoneOptions) SetDnszoneID(dnszoneID string) *UpdateDnszoneOptions { + options.DnszoneID = core.StringPtr(dnszoneID) + return options +} + +// SetDescription : Allow user to set Description +func (options *UpdateDnszoneOptions) SetDescription(description string) *UpdateDnszoneOptions { + options.Description = core.StringPtr(description) + return options +} + +// SetLabel : Allow user to set Label +func (options *UpdateDnszoneOptions) SetLabel(label string) *UpdateDnszoneOptions { + options.Label = core.StringPtr(label) + return options +} + +// SetXCorrelationID : Allow user to set XCorrelationID +func (options *UpdateDnszoneOptions) SetXCorrelationID(xCorrelationID string) *UpdateDnszoneOptions { + options.XCorrelationID = core.StringPtr(xCorrelationID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateDnszoneOptions) SetHeaders(param map[string]string) *UpdateDnszoneOptions { + options.Headers = param + return options +} + +// UpdateLoadBalancerOptions : The UpdateLoadBalancer options. +type UpdateLoadBalancerOptions struct { + // The unique identifier of a service instance. + InstanceID *string `json:"instance_id" validate:"required,ne="` + + // The unique identifier of a DNS zone. + DnszoneID *string `json:"dnszone_id" validate:"required,ne="` + + // The unique identifier of a load balancer. + LbID *string `json:"lb_id" validate:"required,ne="` + + // Name of the load balancer. + Name *string `json:"name,omitempty"` + + // Descriptive text of the load balancer. + Description *string `json:"description,omitempty"` + + // Whether the load balancer is enabled. + Enabled *bool `json:"enabled,omitempty"` + + // Time to live in second. + TTL *int64 `json:"ttl,omitempty"` + + // The pool ID to use when all other pools are detected as unhealthy. + FallbackPool *string `json:"fallback_pool,omitempty"` + + // A list of pool IDs ordered by their failover priority. Pools defined here are used by default, or when region_pools + // are not configured for a given region. + DefaultPools []string `json:"default_pools,omitempty"` + + // Map availability zones to pool IDs. + AzPools []LoadBalancerAzPoolsItem `json:"az_pools,omitempty"` + + // Uniquely identifying a request. + XCorrelationID *string `json:"X-Correlation-ID,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdateLoadBalancerOptions : Instantiate UpdateLoadBalancerOptions +func (*DnsSvcsV1) NewUpdateLoadBalancerOptions(instanceID string, dnszoneID string, lbID string) *UpdateLoadBalancerOptions { + return &UpdateLoadBalancerOptions{ + InstanceID: core.StringPtr(instanceID), + DnszoneID: core.StringPtr(dnszoneID), + LbID: core.StringPtr(lbID), + } +} + +// SetInstanceID : Allow user to set InstanceID +func (options *UpdateLoadBalancerOptions) SetInstanceID(instanceID string) *UpdateLoadBalancerOptions { + options.InstanceID = core.StringPtr(instanceID) + return options +} + +// SetDnszoneID : Allow user to set DnszoneID +func (options *UpdateLoadBalancerOptions) SetDnszoneID(dnszoneID string) *UpdateLoadBalancerOptions { + options.DnszoneID = core.StringPtr(dnszoneID) + return options +} + +// SetLbID : Allow user to set LbID +func (options *UpdateLoadBalancerOptions) SetLbID(lbID string) *UpdateLoadBalancerOptions { + options.LbID = core.StringPtr(lbID) + return options +} + +// SetName : Allow user to set Name +func (options *UpdateLoadBalancerOptions) SetName(name string) *UpdateLoadBalancerOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetDescription : Allow user to set Description +func (options *UpdateLoadBalancerOptions) SetDescription(description string) *UpdateLoadBalancerOptions { + options.Description = core.StringPtr(description) + return options +} + +// SetEnabled : Allow user to set Enabled +func (options *UpdateLoadBalancerOptions) SetEnabled(enabled bool) *UpdateLoadBalancerOptions { + options.Enabled = core.BoolPtr(enabled) + return options +} + +// SetTTL : Allow user to set TTL +func (options *UpdateLoadBalancerOptions) SetTTL(ttl int64) *UpdateLoadBalancerOptions { + options.TTL = core.Int64Ptr(ttl) + return options +} + +// SetFallbackPool : Allow user to set FallbackPool +func (options *UpdateLoadBalancerOptions) SetFallbackPool(fallbackPool string) *UpdateLoadBalancerOptions { + options.FallbackPool = core.StringPtr(fallbackPool) + return options +} + +// SetDefaultPools : Allow user to set DefaultPools +func (options *UpdateLoadBalancerOptions) SetDefaultPools(defaultPools []string) *UpdateLoadBalancerOptions { + options.DefaultPools = defaultPools + return options +} + +// SetAzPools : Allow user to set AzPools +func (options *UpdateLoadBalancerOptions) SetAzPools(azPools []LoadBalancerAzPoolsItem) *UpdateLoadBalancerOptions { + options.AzPools = azPools + return options +} + +// SetXCorrelationID : Allow user to set XCorrelationID +func (options *UpdateLoadBalancerOptions) SetXCorrelationID(xCorrelationID string) *UpdateLoadBalancerOptions { + options.XCorrelationID = core.StringPtr(xCorrelationID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateLoadBalancerOptions) SetHeaders(param map[string]string) *UpdateLoadBalancerOptions { + options.Headers = param + return options +} + +// UpdateMonitorOptions : The UpdateMonitor options. +type UpdateMonitorOptions struct { + // The unique identifier of a service instance. + InstanceID *string `json:"instance_id" validate:"required,ne="` + + // The unique identifier of a load balancer monitor. + MonitorID *string `json:"monitor_id" validate:"required,ne="` + + // The name of the load balancer monitor. + Name *string `json:"name,omitempty"` + + // Descriptive text of the load balancer monitor. + Description *string `json:"description,omitempty"` + + // The protocol to use for the health check. Currently supported protocols are 'HTTP','HTTPS' and 'TCP'. + Type *string `json:"type,omitempty"` + + // Port number to connect to for the health check. Required for TCP checks. HTTP and HTTPS checks should only define + // the port when using a non-standard port (HTTP: default 80, HTTPS: default 443). + Port *int64 `json:"port,omitempty"` + + // The interval between each health check. Shorter intervals may improve failover time, but will increase load on the + // origins as we check from multiple locations. + Interval *int64 `json:"interval,omitempty"` + + // The number of retries to attempt in case of a timeout before marking the origin as unhealthy. Retries are attempted + // immediately. + Retries *int64 `json:"retries,omitempty"` + + // The timeout (in seconds) before marking the health check as failed. + Timeout *int64 `json:"timeout,omitempty"` + + // The method to use for the health check applicable to HTTP/HTTPS based checks, the default value is 'GET'. + Method *string `json:"method,omitempty"` + + // The endpoint path to health check against. This parameter is only valid for HTTP and HTTPS monitors. + Path *string `json:"path,omitempty"` + + // The HTTP request headers to send in the health check. It is recommended you set a Host header by default. The + // User-Agent header cannot be overridden. This parameter is only valid for HTTP and HTTPS monitors. + HeadersVar []HealthcheckHeader `json:"headers,omitempty"` + + // Do not validate the certificate when monitor use HTTPS. This parameter is currently only valid for HTTP and HTTPS + // monitors. + AllowInsecure *bool `json:"allow_insecure,omitempty"` + + // The expected HTTP response code or code range of the health check. This parameter is only valid for HTTP and HTTPS + // monitors. + ExpectedCodes *string `json:"expected_codes,omitempty"` + + // A case-insensitive sub-string to look for in the response body. If this string is not found, the origin will be + // marked as unhealthy. This parameter is only valid for HTTP and HTTPS monitors. + ExpectedBody *string `json:"expected_body,omitempty"` + + // Uniquely identifying a request. + XCorrelationID *string `json:"X-Correlation-ID,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the UpdateMonitorOptions.Type property. +// The protocol to use for the health check. Currently supported protocols are 'HTTP','HTTPS' and 'TCP'. +const ( + UpdateMonitorOptions_Type_Http = "HTTP" + UpdateMonitorOptions_Type_Https = "HTTPS" + UpdateMonitorOptions_Type_Tcp = "TCP" +) + +// Constants associated with the UpdateMonitorOptions.Method property. +// The method to use for the health check applicable to HTTP/HTTPS based checks, the default value is 'GET'. +const ( + UpdateMonitorOptions_Method_Get = "GET" + UpdateMonitorOptions_Method_Head = "HEAD" +) + +// NewUpdateMonitorOptions : Instantiate UpdateMonitorOptions +func (*DnsSvcsV1) NewUpdateMonitorOptions(instanceID string, monitorID string) *UpdateMonitorOptions { + return &UpdateMonitorOptions{ + InstanceID: core.StringPtr(instanceID), + MonitorID: core.StringPtr(monitorID), + } +} + +// SetInstanceID : Allow user to set InstanceID +func (options *UpdateMonitorOptions) SetInstanceID(instanceID string) *UpdateMonitorOptions { + options.InstanceID = core.StringPtr(instanceID) + return options +} + +// SetMonitorID : Allow user to set MonitorID +func (options *UpdateMonitorOptions) SetMonitorID(monitorID string) *UpdateMonitorOptions { + options.MonitorID = core.StringPtr(monitorID) + return options +} + +// SetName : Allow user to set Name +func (options *UpdateMonitorOptions) SetName(name string) *UpdateMonitorOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetDescription : Allow user to set Description +func (options *UpdateMonitorOptions) SetDescription(description string) *UpdateMonitorOptions { + options.Description = core.StringPtr(description) + return options +} + +// SetType : Allow user to set Type +func (options *UpdateMonitorOptions) SetType(typeVar string) *UpdateMonitorOptions { + options.Type = core.StringPtr(typeVar) + return options +} + +// SetPort : Allow user to set Port +func (options *UpdateMonitorOptions) SetPort(port int64) *UpdateMonitorOptions { + options.Port = core.Int64Ptr(port) + return options +} + +// SetInterval : Allow user to set Interval +func (options *UpdateMonitorOptions) SetInterval(interval int64) *UpdateMonitorOptions { + options.Interval = core.Int64Ptr(interval) + return options +} + +// SetRetries : Allow user to set Retries +func (options *UpdateMonitorOptions) SetRetries(retries int64) *UpdateMonitorOptions { + options.Retries = core.Int64Ptr(retries) + return options +} + +// SetTimeout : Allow user to set Timeout +func (options *UpdateMonitorOptions) SetTimeout(timeout int64) *UpdateMonitorOptions { + options.Timeout = core.Int64Ptr(timeout) + return options +} + +// SetMethod : Allow user to set Method +func (options *UpdateMonitorOptions) SetMethod(method string) *UpdateMonitorOptions { + options.Method = core.StringPtr(method) + return options +} + +// SetPath : Allow user to set Path +func (options *UpdateMonitorOptions) SetPath(path string) *UpdateMonitorOptions { + options.Path = core.StringPtr(path) + return options +} + +// SetHeadersVar : Allow user to set HeadersVar +func (options *UpdateMonitorOptions) SetHeadersVar(headersVar []HealthcheckHeader) *UpdateMonitorOptions { + options.HeadersVar = headersVar + return options +} + +// SetAllowInsecure : Allow user to set AllowInsecure +func (options *UpdateMonitorOptions) SetAllowInsecure(allowInsecure bool) *UpdateMonitorOptions { + options.AllowInsecure = core.BoolPtr(allowInsecure) + return options +} + +// SetExpectedCodes : Allow user to set ExpectedCodes +func (options *UpdateMonitorOptions) SetExpectedCodes(expectedCodes string) *UpdateMonitorOptions { + options.ExpectedCodes = core.StringPtr(expectedCodes) + return options +} + +// SetExpectedBody : Allow user to set ExpectedBody +func (options *UpdateMonitorOptions) SetExpectedBody(expectedBody string) *UpdateMonitorOptions { + options.ExpectedBody = core.StringPtr(expectedBody) + return options +} + +// SetXCorrelationID : Allow user to set XCorrelationID +func (options *UpdateMonitorOptions) SetXCorrelationID(xCorrelationID string) *UpdateMonitorOptions { + options.XCorrelationID = core.StringPtr(xCorrelationID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateMonitorOptions) SetHeaders(param map[string]string) *UpdateMonitorOptions { + options.Headers = param + return options +} + +// UpdatePoolOptions : The UpdatePool options. +type UpdatePoolOptions struct { + // The unique identifier of a service instance. + InstanceID *string `json:"instance_id" validate:"required,ne="` + + // The unique identifier of a load balancer pool. + PoolID *string `json:"pool_id" validate:"required,ne="` + + // Name of the load balancer pool. + Name *string `json:"name,omitempty"` + + // Descriptive text of the load balancer pool. + Description *string `json:"description,omitempty"` + + // Whether the load balancer pool is enabled. + Enabled *bool `json:"enabled,omitempty"` + + // The minimum number of origins that must be healthy for this pool to serve traffic. If the number of healthy origins + // falls below this number, the pool will be marked unhealthy and we will failover to the next available pool. + HealthyOriginsThreshold *int64 `json:"healthy_origins_threshold,omitempty"` + + // The list of origins within this pool. Traffic directed at this pool is balanced across all currently healthy + // origins, provided the pool itself is healthy. + Origins []OriginInput `json:"origins,omitempty"` + + // The ID of the load balancer monitor to be associated to this pool. + Monitor *string `json:"monitor,omitempty"` + + // The notification channel. + NotificationChannel *string `json:"notification_channel,omitempty"` + + // Health check region of VSIs. + HealthcheckRegion *string `json:"healthcheck_region,omitempty"` + + // Health check subnet CRNs. + HealthcheckSubnets []string `json:"healthcheck_subnets,omitempty"` + + // Uniquely identifying a request. + XCorrelationID *string `json:"X-Correlation-ID,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the UpdatePoolOptions.HealthcheckRegion property. +// Health check region of VSIs. +const ( + UpdatePoolOptions_HealthcheckRegion_AuSyd = "au-syd" + UpdatePoolOptions_HealthcheckRegion_EuDu = "eu-du" + UpdatePoolOptions_HealthcheckRegion_EuGb = "eu-gb" + UpdatePoolOptions_HealthcheckRegion_JpTok = "jp-tok" + UpdatePoolOptions_HealthcheckRegion_UsEast = "us-east" + UpdatePoolOptions_HealthcheckRegion_UsSouth = "us-south" +) + +// NewUpdatePoolOptions : Instantiate UpdatePoolOptions +func (*DnsSvcsV1) NewUpdatePoolOptions(instanceID string, poolID string) *UpdatePoolOptions { + return &UpdatePoolOptions{ + InstanceID: core.StringPtr(instanceID), + PoolID: core.StringPtr(poolID), + } +} + +// SetInstanceID : Allow user to set InstanceID +func (options *UpdatePoolOptions) SetInstanceID(instanceID string) *UpdatePoolOptions { + options.InstanceID = core.StringPtr(instanceID) + return options +} + +// SetPoolID : Allow user to set PoolID +func (options *UpdatePoolOptions) SetPoolID(poolID string) *UpdatePoolOptions { + options.PoolID = core.StringPtr(poolID) + return options +} + +// SetName : Allow user to set Name +func (options *UpdatePoolOptions) SetName(name string) *UpdatePoolOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetDescription : Allow user to set Description +func (options *UpdatePoolOptions) SetDescription(description string) *UpdatePoolOptions { + options.Description = core.StringPtr(description) + return options +} + +// SetEnabled : Allow user to set Enabled +func (options *UpdatePoolOptions) SetEnabled(enabled bool) *UpdatePoolOptions { + options.Enabled = core.BoolPtr(enabled) + return options +} + +// SetHealthyOriginsThreshold : Allow user to set HealthyOriginsThreshold +func (options *UpdatePoolOptions) SetHealthyOriginsThreshold(healthyOriginsThreshold int64) *UpdatePoolOptions { + options.HealthyOriginsThreshold = core.Int64Ptr(healthyOriginsThreshold) + return options +} + +// SetOrigins : Allow user to set Origins +func (options *UpdatePoolOptions) SetOrigins(origins []OriginInput) *UpdatePoolOptions { + options.Origins = origins + return options +} + +// SetMonitor : Allow user to set Monitor +func (options *UpdatePoolOptions) SetMonitor(monitor string) *UpdatePoolOptions { + options.Monitor = core.StringPtr(monitor) + return options +} + +// SetNotificationChannel : Allow user to set NotificationChannel +func (options *UpdatePoolOptions) SetNotificationChannel(notificationChannel string) *UpdatePoolOptions { + options.NotificationChannel = core.StringPtr(notificationChannel) + return options +} + +// SetHealthcheckRegion : Allow user to set HealthcheckRegion +func (options *UpdatePoolOptions) SetHealthcheckRegion(healthcheckRegion string) *UpdatePoolOptions { + options.HealthcheckRegion = core.StringPtr(healthcheckRegion) + return options +} + +// SetHealthcheckSubnets : Allow user to set HealthcheckSubnets +func (options *UpdatePoolOptions) SetHealthcheckSubnets(healthcheckSubnets []string) *UpdatePoolOptions { + options.HealthcheckSubnets = healthcheckSubnets + return options +} + +// SetXCorrelationID : Allow user to set XCorrelationID +func (options *UpdatePoolOptions) SetXCorrelationID(xCorrelationID string) *UpdatePoolOptions { + options.XCorrelationID = core.StringPtr(xCorrelationID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdatePoolOptions) SetHeaders(param map[string]string) *UpdatePoolOptions { + options.Headers = param + return options +} + +// UpdateResourceRecordOptions : The UpdateResourceRecord options. +type UpdateResourceRecordOptions struct { + // The unique identifier of a service instance. + InstanceID *string `json:"instance_id" validate:"required,ne="` + + // The unique identifier of a DNS zone. + DnszoneID *string `json:"dnszone_id" validate:"required,ne="` + + // The unique identifier of a resource record. + RecordID *string `json:"record_id" validate:"required,ne="` + + // Name of the resource record. + Name *string `json:"name,omitempty"` + + // Content of the resource record. + Rdata ResourceRecordUpdateInputRdataIntf `json:"rdata,omitempty"` + + // Time to live in second. + TTL *int64 `json:"ttl,omitempty"` + + // Only used for SRV record. + Service *string `json:"service,omitempty"` + + // Only used for SRV record. + Protocol *string `json:"protocol,omitempty"` + + // Uniquely identifying a request. + XCorrelationID *string `json:"X-Correlation-ID,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdateResourceRecordOptions : Instantiate UpdateResourceRecordOptions +func (*DnsSvcsV1) NewUpdateResourceRecordOptions(instanceID string, dnszoneID string, recordID string) *UpdateResourceRecordOptions { + return &UpdateResourceRecordOptions{ + InstanceID: core.StringPtr(instanceID), + DnszoneID: core.StringPtr(dnszoneID), + RecordID: core.StringPtr(recordID), + } +} + +// SetInstanceID : Allow user to set InstanceID +func (options *UpdateResourceRecordOptions) SetInstanceID(instanceID string) *UpdateResourceRecordOptions { + options.InstanceID = core.StringPtr(instanceID) + return options +} + +// SetDnszoneID : Allow user to set DnszoneID +func (options *UpdateResourceRecordOptions) SetDnszoneID(dnszoneID string) *UpdateResourceRecordOptions { + options.DnszoneID = core.StringPtr(dnszoneID) + return options +} + +// SetRecordID : Allow user to set RecordID +func (options *UpdateResourceRecordOptions) SetRecordID(recordID string) *UpdateResourceRecordOptions { + options.RecordID = core.StringPtr(recordID) + return options +} + +// SetName : Allow user to set Name +func (options *UpdateResourceRecordOptions) SetName(name string) *UpdateResourceRecordOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetRdata : Allow user to set Rdata +func (options *UpdateResourceRecordOptions) SetRdata(rdata ResourceRecordUpdateInputRdataIntf) *UpdateResourceRecordOptions { + options.Rdata = rdata + return options +} + +// SetTTL : Allow user to set TTL +func (options *UpdateResourceRecordOptions) SetTTL(ttl int64) *UpdateResourceRecordOptions { + options.TTL = core.Int64Ptr(ttl) + return options +} + +// SetService : Allow user to set Service +func (options *UpdateResourceRecordOptions) SetService(service string) *UpdateResourceRecordOptions { + options.Service = core.StringPtr(service) + return options +} + +// SetProtocol : Allow user to set Protocol +func (options *UpdateResourceRecordOptions) SetProtocol(protocol string) *UpdateResourceRecordOptions { + options.Protocol = core.StringPtr(protocol) + return options +} + +// SetXCorrelationID : Allow user to set XCorrelationID +func (options *UpdateResourceRecordOptions) SetXCorrelationID(xCorrelationID string) *UpdateResourceRecordOptions { + options.XCorrelationID = core.StringPtr(xCorrelationID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateResourceRecordOptions) SetHeaders(param map[string]string) *UpdateResourceRecordOptions { + options.Headers = param + return options +} + +// Dnszone : DNS zone details. +type Dnszone struct { + // Unique identifier of a DNS zone. + ID *string `json:"id,omitempty"` + + // the time when a DNS zone is created. + CreatedOn *string `json:"created_on,omitempty"` + + // the recent time when a DNS zone is modified. + ModifiedOn *string `json:"modified_on,omitempty"` + + // Unique identifier of a service instance. + InstanceID *string `json:"instance_id,omitempty"` + + // Name of DNS zone. + Name *string `json:"name,omitempty"` + + // The text describing the purpose of a DNS zone. + Description *string `json:"description,omitempty"` + + // State of DNS zone. + State *string `json:"state,omitempty"` + + // The label of a DNS zone. + Label *string `json:"label,omitempty"` +} + +// Constants associated with the Dnszone.State property. +// State of DNS zone. +const ( + Dnszone_State_Active = "active" + Dnszone_State_Deleted = "deleted" + Dnszone_State_Disabled = "disabled" + Dnszone_State_PendingDelete = "pending_delete" + Dnszone_State_PendingNetworkAdd = "pending_network_add" +) + + +// UnmarshalDnszone unmarshals an instance of Dnszone from the specified map of raw messages. +func UnmarshalDnszone(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Dnszone) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_on", &obj.CreatedOn) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "modified_on", &obj.ModifiedOn) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "instance_id", &obj.InstanceID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "description", &obj.Description) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "state", &obj.State) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "label", &obj.Label) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// FirstHref : href. +type FirstHref struct { + // href. + Href *string `json:"href,omitempty"` +} + + +// UnmarshalFirstHref unmarshals an instance of FirstHref from the specified map of raw messages. +func UnmarshalFirstHref(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(FirstHref) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// HealthcheckHeader : The HTTP header of health check request. +type HealthcheckHeader struct { + // The name of HTTP request header. + Name *string `json:"name" validate:"required"` + + // The value of HTTP request header. + Value []string `json:"value" validate:"required"` +} + + +// NewHealthcheckHeader : Instantiate HealthcheckHeader (Generic Model Constructor) +func (*DnsSvcsV1) NewHealthcheckHeader(name string, value []string) (model *HealthcheckHeader, err error) { + model = &HealthcheckHeader{ + Name: core.StringPtr(name), + Value: value, + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalHealthcheckHeader unmarshals an instance of HealthcheckHeader from the specified map of raw messages. +func UnmarshalHealthcheckHeader(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(HealthcheckHeader) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ListDnszones : List DNS zones response. +type ListDnszones struct { + // An array of DNS zones. + Dnszones []Dnszone `json:"dnszones" validate:"required"` + + // Specify how many DNS zones to skip over, the default value is 0. + Offset *int64 `json:"offset" validate:"required"` + + // Specify how many DNS zones are returned, the default value is 10. + Limit *int64 `json:"limit" validate:"required"` + + // Total number of DNS zones. + TotalCount *int64 `json:"total_count" validate:"required"` + + // href. + First *FirstHref `json:"first" validate:"required"` + + // href. + Next *NextHref `json:"next,omitempty"` +} + + +// UnmarshalListDnszones unmarshals an instance of ListDnszones from the specified map of raw messages. +func UnmarshalListDnszones(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ListDnszones) + err = core.UnmarshalModel(m, "dnszones", &obj.Dnszones, UnmarshalDnszone) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "offset", &obj.Offset) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "limit", &obj.Limit) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "total_count", &obj.TotalCount) + if err != nil { + return + } + err = core.UnmarshalModel(m, "first", &obj.First, UnmarshalFirstHref) + if err != nil { + return + } + err = core.UnmarshalModel(m, "next", &obj.Next, UnmarshalNextHref) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ListLoadBalancers : List Global Load Balancers response. +type ListLoadBalancers struct { + // An array of Global Load Balancers. + LoadBalancers []LoadBalancer `json:"load_balancers" validate:"required"` + + // Page number. + Offset *int64 `json:"offset" validate:"required"` + + // Number of Global Load Balancers per page. + Limit *int64 `json:"limit" validate:"required"` + + // Number of Global Load Balancers. + Count *int64 `json:"count" validate:"required"` + + // Total number of Global Load Balancers. + TotalCount *int64 `json:"total_count" validate:"required"` + + // href. + First *FirstHref `json:"first" validate:"required"` + + // href. + Next *NextHref `json:"next" validate:"required"` +} + + +// UnmarshalListLoadBalancers unmarshals an instance of ListLoadBalancers from the specified map of raw messages. +func UnmarshalListLoadBalancers(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ListLoadBalancers) + err = core.UnmarshalModel(m, "load_balancers", &obj.LoadBalancers, UnmarshalLoadBalancer) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "offset", &obj.Offset) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "limit", &obj.Limit) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "count", &obj.Count) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "total_count", &obj.TotalCount) + if err != nil { + return + } + err = core.UnmarshalModel(m, "first", &obj.First, UnmarshalFirstHref) + if err != nil { + return + } + err = core.UnmarshalModel(m, "next", &obj.Next, UnmarshalNextHref) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ListMonitors : List load balancer monitors response. +type ListMonitors struct { + // An array of load balancer monitors. + Monitors []Monitor `json:"monitors" validate:"required"` + + // Page number. + Offset *int64 `json:"offset" validate:"required"` + + // Number of load balancer monitors per page. + Limit *int64 `json:"limit" validate:"required"` + + // Number of load balancers. + Count *int64 `json:"count" validate:"required"` + + // Total number of load balancers. + TotalCount *int64 `json:"total_count" validate:"required"` + + // href. + First *FirstHref `json:"first" validate:"required"` + + // href. + Next *NextHref `json:"next" validate:"required"` +} + + +// UnmarshalListMonitors unmarshals an instance of ListMonitors from the specified map of raw messages. +func UnmarshalListMonitors(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ListMonitors) + err = core.UnmarshalModel(m, "monitors", &obj.Monitors, UnmarshalMonitor) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "offset", &obj.Offset) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "limit", &obj.Limit) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "count", &obj.Count) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "total_count", &obj.TotalCount) + if err != nil { + return + } + err = core.UnmarshalModel(m, "first", &obj.First, UnmarshalFirstHref) + if err != nil { + return + } + err = core.UnmarshalModel(m, "next", &obj.Next, UnmarshalNextHref) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ListPermittedNetworks : List permitted networks response. +type ListPermittedNetworks struct { + // An array of permitted networks. + PermittedNetworks []PermittedNetwork `json:"permitted_networks" validate:"required"` + + // Specify how many permitted networks to skip over, the default value is 0. + Offset *int64 `json:"offset" validate:"required"` + + // Specify how many permitted networks are returned, the default value is 10. + Limit *int64 `json:"limit" validate:"required"` + + // Total number of permitted networks. + TotalCount *int64 `json:"total_count" validate:"required"` + + // href. + First *FirstHref `json:"first" validate:"required"` + + // href. + Next *NextHref `json:"next,omitempty"` +} + + +// UnmarshalListPermittedNetworks unmarshals an instance of ListPermittedNetworks from the specified map of raw messages. +func UnmarshalListPermittedNetworks(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ListPermittedNetworks) + err = core.UnmarshalModel(m, "permitted_networks", &obj.PermittedNetworks, UnmarshalPermittedNetwork) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "offset", &obj.Offset) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "limit", &obj.Limit) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "total_count", &obj.TotalCount) + if err != nil { + return + } + err = core.UnmarshalModel(m, "first", &obj.First, UnmarshalFirstHref) + if err != nil { + return + } + err = core.UnmarshalModel(m, "next", &obj.Next, UnmarshalNextHref) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ListPools : List load balancer pools response. +type ListPools struct { + // An array of load balancer pools. + Pools []Pool `json:"pools" validate:"required"` + + // Page number. + Offset *int64 `json:"offset" validate:"required"` + + // Number of load balancer pools per page. + Limit *int64 `json:"limit" validate:"required"` + + // Number of load balancers. + Count *int64 `json:"count" validate:"required"` + + // Total number of load balancers. + TotalCount *int64 `json:"total_count" validate:"required"` + + // href. + First *FirstHref `json:"first" validate:"required"` + + // href. + Next *NextHref `json:"next" validate:"required"` +} + + +// UnmarshalListPools unmarshals an instance of ListPools from the specified map of raw messages. +func UnmarshalListPools(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ListPools) + err = core.UnmarshalModel(m, "pools", &obj.Pools, UnmarshalPool) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "offset", &obj.Offset) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "limit", &obj.Limit) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "count", &obj.Count) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "total_count", &obj.TotalCount) + if err != nil { + return + } + err = core.UnmarshalModel(m, "first", &obj.First, UnmarshalFirstHref) + if err != nil { + return + } + err = core.UnmarshalModel(m, "next", &obj.Next, UnmarshalNextHref) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ListResourceRecords : List Resource Records response. +type ListResourceRecords struct { + // An array of resource records. + ResourceRecords []ResourceRecord `json:"resource_records" validate:"required"` + + // Specify how many resource records to skip over, the default value is 0. + Offset *int64 `json:"offset" validate:"required"` + + // Specify how many resource records are returned, the default value is 20. + Limit *int64 `json:"limit" validate:"required"` + + // Total number of resource records. + TotalCount *int64 `json:"total_count" validate:"required"` + + // href. + First *FirstHref `json:"first" validate:"required"` + + // href. + Next *NextHref `json:"next,omitempty"` +} + + +// UnmarshalListResourceRecords unmarshals an instance of ListResourceRecords from the specified map of raw messages. +func UnmarshalListResourceRecords(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ListResourceRecords) + err = core.UnmarshalModel(m, "resource_records", &obj.ResourceRecords, UnmarshalResourceRecord) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "offset", &obj.Offset) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "limit", &obj.Limit) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "total_count", &obj.TotalCount) + if err != nil { + return + } + err = core.UnmarshalModel(m, "first", &obj.First, UnmarshalFirstHref) + if err != nil { + return + } + err = core.UnmarshalModel(m, "next", &obj.Next, UnmarshalNextHref) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LoadBalancer : Load balancer details. +type LoadBalancer struct { + // Identifier of the load balancer. + ID *string `json:"id,omitempty"` + + // Name of the load balancer. + Name *string `json:"name,omitempty"` + + // Descriptive text of the load balancer. + Description *string `json:"description,omitempty"` + + // Whether the load balancer is enabled. + Enabled *bool `json:"enabled,omitempty"` + + // Time to live in second. + TTL *int64 `json:"ttl,omitempty"` + + // Healthy state of the load balancer. + Health *string `json:"health,omitempty"` + + // The pool ID to use when all other pools are detected as unhealthy. + FallbackPool *string `json:"fallback_pool,omitempty"` + + // A list of pool IDs ordered by their failover priority. Pools defined here are used by default, or when region_pools + // are not configured for a given region. + DefaultPools []string `json:"default_pools,omitempty"` + + // Map availability zones to pool IDs. + AzPools []LoadBalancerAzPoolsItem `json:"az_pools,omitempty"` + + // The time when a load balancer is created. + CreatedOn *string `json:"created_on,omitempty"` + + // The recent time when a load balancer is modified. + ModifiedOn *string `json:"modified_on,omitempty"` +} + +// Constants associated with the LoadBalancer.Health property. +// Healthy state of the load balancer. +const ( + LoadBalancer_Health_Critical = "CRITICAL" + LoadBalancer_Health_Degraded = "DEGRADED" + LoadBalancer_Health_Healthy = "HEALTHY" +) + + +// UnmarshalLoadBalancer unmarshals an instance of LoadBalancer from the specified map of raw messages. +func UnmarshalLoadBalancer(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancer) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "description", &obj.Description) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "enabled", &obj.Enabled) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "ttl", &obj.TTL) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "health", &obj.Health) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "fallback_pool", &obj.FallbackPool) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "default_pools", &obj.DefaultPools) + if err != nil { + return + } + err = core.UnmarshalModel(m, "az_pools", &obj.AzPools, UnmarshalLoadBalancerAzPoolsItem) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_on", &obj.CreatedOn) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "modified_on", &obj.ModifiedOn) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// Monitor : Load balancer monitor details. +type Monitor struct { + // Identifier of the load balancer monitor. + ID *string `json:"id,omitempty"` + + // The name of the load balancer monitor. + Name *string `json:"name,omitempty"` + + // Descriptive text of the load balancer monitor. + Description *string `json:"description,omitempty"` + + // The protocol to use for the health check. Currently supported protocols are 'HTTP','HTTPS' and 'TCP'. + Type *string `json:"type,omitempty"` + + // Port number to connect to for the health check. Required for TCP checks. HTTP and HTTPS checks should only define + // the port when using a non-standard port (HTTP: default 80, HTTPS: default 443). + Port *int64 `json:"port,omitempty"` + + // The interval between each health check. Shorter intervals may improve failover time, but will increase load on the + // origins as we check from multiple locations. + Interval *int64 `json:"interval,omitempty"` + + // The number of retries to attempt in case of a timeout before marking the origin as unhealthy. Retries are attempted + // immediately. + Retries *int64 `json:"retries,omitempty"` + + // The timeout (in seconds) before marking the health check as failed. + Timeout *int64 `json:"timeout,omitempty"` + + // The method to use for the health check applicable to HTTP/HTTPS based checks, the default value is 'GET'. + Method *string `json:"method,omitempty"` + + // The endpoint path to health check against. This parameter is only valid for HTTP and HTTPS monitors. + Path *string `json:"path,omitempty"` + + // The HTTP request headers to send in the health check. It is recommended you set a Host header by default. The + // User-Agent header cannot be overridden. This parameter is only valid for HTTP and HTTPS monitors. + HeadersVar []HealthcheckHeader `json:"headers,omitempty"` + + // Do not validate the certificate when monitor use HTTPS. This parameter is currently only valid for HTTPS monitors. + AllowInsecure *bool `json:"allow_insecure,omitempty"` + + // The expected HTTP response code or code range of the health check. This parameter is only valid for HTTP and HTTPS + // monitors. + ExpectedCodes *string `json:"expected_codes,omitempty"` + + // A case-insensitive sub-string to look for in the response body. If this string is not found, the origin will be + // marked as unhealthy. This parameter is only valid for HTTP and HTTPS monitors. + ExpectedBody *string `json:"expected_body,omitempty"` + + // the time when a load balancer monitor is created. + CreatedOn *string `json:"created_on,omitempty"` + + // the recent time when a load balancer monitor is modified. + ModifiedOn *string `json:"modified_on,omitempty"` +} + +// Constants associated with the Monitor.Method property. +// The method to use for the health check applicable to HTTP/HTTPS based checks, the default value is 'GET'. +const ( + Monitor_Method_Get = "GET" + Monitor_Method_Head = "HEAD" +) + + +// UnmarshalMonitor unmarshals an instance of Monitor from the specified map of raw messages. +func UnmarshalMonitor(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Monitor) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "description", &obj.Description) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "port", &obj.Port) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "interval", &obj.Interval) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "retries", &obj.Retries) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "timeout", &obj.Timeout) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "method", &obj.Method) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "path", &obj.Path) + if err != nil { + return + } + err = core.UnmarshalModel(m, "headers", &obj.HeadersVar, UnmarshalHealthcheckHeader) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "allow_insecure", &obj.AllowInsecure) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "expected_codes", &obj.ExpectedCodes) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "expected_body", &obj.ExpectedBody) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_on", &obj.CreatedOn) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "modified_on", &obj.ModifiedOn) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// NextHref : href. +type NextHref struct { + // href. + Href *string `json:"href,omitempty"` +} + + +// UnmarshalNextHref unmarshals an instance of NextHref from the specified map of raw messages. +func UnmarshalNextHref(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(NextHref) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// Origin : Origin server. +type Origin struct { + // The name of the origin server. + Name *string `json:"name,omitempty"` + + // Description of the origin server. + Description *string `json:"description,omitempty"` + + // The address of the origin server. It can be a hostname or an IP address. + Address *string `json:"address,omitempty"` + + // Whether the origin server is enabled. + Enabled *bool `json:"enabled,omitempty"` + + // The health state of the origin server. + Health *bool `json:"health,omitempty"` + + // The failure reason of the origin server if it is unhealthy. + HealthFailureReason *string `json:"health_failure_reason,omitempty"` +} + + +// UnmarshalOrigin unmarshals an instance of Origin from the specified map of raw messages. +func UnmarshalOrigin(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Origin) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "description", &obj.Description) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "address", &obj.Address) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "enabled", &obj.Enabled) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "health", &obj.Health) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "health_failure_reason", &obj.HealthFailureReason) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// OriginInput : The request data of origin server. +type OriginInput struct { + // The name of the origin server. + Name *string `json:"name,omitempty"` + + // Description of the origin server. + Description *string `json:"description,omitempty"` + + // The address of the origin server. It can be a hostname or an IP address. + Address *string `json:"address,omitempty"` + + // Whether the origin server is enabled. + Enabled *bool `json:"enabled,omitempty"` +} + + +// UnmarshalOriginInput unmarshals an instance of OriginInput from the specified map of raw messages. +func UnmarshalOriginInput(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(OriginInput) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "description", &obj.Description) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "address", &obj.Address) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "enabled", &obj.Enabled) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// PermittedNetwork : Permitted network details. +type PermittedNetwork struct { + // Unique identifier of a permitted network. + ID *string `json:"id,omitempty"` + + // The time when a permitted network is created. + CreatedOn *string `json:"created_on,omitempty"` + + // The recent time when a permitted network is modified. + ModifiedOn *string `json:"modified_on,omitempty"` + + // Permitted network data for VPC. + PermittedNetwork *PermittedNetworkVpc `json:"permitted_network,omitempty"` + + // The type of a permitted network. + Type *string `json:"type,omitempty"` + + // The state of a permitted network. + State *string `json:"state,omitempty"` +} + +// Constants associated with the PermittedNetwork.Type property. +// The type of a permitted network. +const ( + PermittedNetwork_Type_Vpc = "vpc" +) + +// Constants associated with the PermittedNetwork.State property. +// The state of a permitted network. +const ( + PermittedNetwork_State_Active = "ACTIVE" + PermittedNetwork_State_RemovalInProgress = "REMOVAL_IN_PROGRESS" +) + + +// UnmarshalPermittedNetwork unmarshals an instance of PermittedNetwork from the specified map of raw messages. +func UnmarshalPermittedNetwork(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(PermittedNetwork) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_on", &obj.CreatedOn) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "modified_on", &obj.ModifiedOn) + if err != nil { + return + } + err = core.UnmarshalModel(m, "permitted_network", &obj.PermittedNetwork, UnmarshalPermittedNetworkVpc) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "state", &obj.State) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// PermittedNetworkVpc : Permitted network data for VPC. +type PermittedNetworkVpc struct { + // CRN string uniquely identifies a VPC. + VpcCrn *string `json:"vpc_crn" validate:"required"` +} + + +// NewPermittedNetworkVpc : Instantiate PermittedNetworkVpc (Generic Model Constructor) +func (*DnsSvcsV1) NewPermittedNetworkVpc(vpcCrn string) (model *PermittedNetworkVpc, err error) { + model = &PermittedNetworkVpc{ + VpcCrn: core.StringPtr(vpcCrn), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalPermittedNetworkVpc unmarshals an instance of PermittedNetworkVpc from the specified map of raw messages. +func UnmarshalPermittedNetworkVpc(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(PermittedNetworkVpc) + err = core.UnmarshalPrimitive(m, "vpc_crn", &obj.VpcCrn) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// Pool : Load balancer pool details. +type Pool struct { + // Identifier of the load balancer pool. + ID *string `json:"id,omitempty"` + + // Name of the load balancer pool. + Name *string `json:"name,omitempty"` + + // Descriptive text of the load balancer pool. + Description *string `json:"description,omitempty"` + + // Whether the load balancer pool is enabled. + Enabled *bool `json:"enabled,omitempty"` + + // The minimum number of origins that must be healthy for this pool to serve traffic. If the number of healthy origins + // falls below this number, the pool will be marked unhealthy and we will failover to the next available pool. + HealthyOriginsThreshold *int64 `json:"healthy_origins_threshold,omitempty"` + + // The list of origins within this pool. Traffic directed at this pool is balanced across all currently healthy + // origins, provided the pool itself is healthy. + Origins []Origin `json:"origins,omitempty"` + + // The ID of the load balancer monitor to be associated to this pool. + Monitor *string `json:"monitor,omitempty"` + + // The notification channel. + NotificationChannel *string `json:"notification_channel,omitempty"` + + // Healthy state of the load balancer pool. + Health *string `json:"health,omitempty"` + + // Health check region of VSIs. + HealthcheckRegion *string `json:"healthcheck_region,omitempty"` + + // Health check subnet CRNs. + HealthcheckSubnets []string `json:"healthcheck_subnets,omitempty"` + + // Health check VSI information. + HealthcheckVsis []PoolHealthcheckVsisItem `json:"healthcheck_vsis,omitempty"` + + // the time when a load balancer pool is created. + CreatedOn *string `json:"created_on,omitempty"` + + // the recent time when a load balancer pool is modified. + ModifiedOn *string `json:"modified_on,omitempty"` +} + +// Constants associated with the Pool.Health property. +// Healthy state of the load balancer pool. +const ( + Pool_Health_Critical = "CRITICAL" + Pool_Health_Degraded = "DEGRADED" + Pool_Health_Healthy = "HEALTHY" +) + +// Constants associated with the Pool.HealthcheckRegion property. +// Health check region of VSIs. +const ( + Pool_HealthcheckRegion_AuSyd = "au-syd" + Pool_HealthcheckRegion_EuDu = "eu-du" + Pool_HealthcheckRegion_EuGb = "eu-gb" + Pool_HealthcheckRegion_JpTok = "jp-tok" + Pool_HealthcheckRegion_UsEast = "us-east" + Pool_HealthcheckRegion_UsSouth = "us-south" +) + + +// UnmarshalPool unmarshals an instance of Pool from the specified map of raw messages. +func UnmarshalPool(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Pool) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "description", &obj.Description) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "enabled", &obj.Enabled) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "healthy_origins_threshold", &obj.HealthyOriginsThreshold) + if err != nil { + return + } + err = core.UnmarshalModel(m, "origins", &obj.Origins, UnmarshalOrigin) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "monitor", &obj.Monitor) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "notification_channel", &obj.NotificationChannel) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "health", &obj.Health) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "healthcheck_region", &obj.HealthcheckRegion) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "healthcheck_subnets", &obj.HealthcheckSubnets) + if err != nil { + return + } + err = core.UnmarshalModel(m, "healthcheck_vsis", &obj.HealthcheckVsis, UnmarshalPoolHealthcheckVsisItem) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_on", &obj.CreatedOn) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "modified_on", &obj.ModifiedOn) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ResourceRecord : Resource record details. +type ResourceRecord struct { + // Identifier of the resource record. + ID *string `json:"id,omitempty"` + + // the time when a resource record is created. + CreatedOn *string `json:"created_on,omitempty"` + + // the recent time when a resource record is modified. + ModifiedOn *string `json:"modified_on,omitempty"` + + // Name of the resource record. + Name *string `json:"name,omitempty"` + + // Type of the resource record. + Type *string `json:"type,omitempty"` + + // Time to live in second. + TTL *int64 `json:"ttl,omitempty"` + + // Content of the resource record. + Rdata interface{} `json:"rdata,omitempty"` + + // Only used for SRV record. + Service *string `json:"service,omitempty"` + + // Only used for SRV record. + Protocol *string `json:"protocol,omitempty"` +} + +// Constants associated with the ResourceRecord.Type property. +// Type of the resource record. +const ( + ResourceRecord_Type_A = "A" + ResourceRecord_Type_Aaaa = "AAAA" + ResourceRecord_Type_Cname = "CNAME" + ResourceRecord_Type_Mx = "MX" + ResourceRecord_Type_Ptr = "PTR" + ResourceRecord_Type_Srv = "SRV" + ResourceRecord_Type_Txt = "TXT" +) + + +// UnmarshalResourceRecord unmarshals an instance of ResourceRecord from the specified map of raw messages. +func UnmarshalResourceRecord(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ResourceRecord) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_on", &obj.CreatedOn) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "modified_on", &obj.ModifiedOn) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "ttl", &obj.TTL) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "rdata", &obj.Rdata) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "service", &obj.Service) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "protocol", &obj.Protocol) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ResourceRecordInputRdataRdataARecord : The content of type-A resource record. +// This model "extends" ResourceRecordInputRdata +type ResourceRecordInputRdataRdataARecord struct { + // IPv4 address. + Ip *string `json:"ip" validate:"required"` +} + + +// NewResourceRecordInputRdataRdataARecord : Instantiate ResourceRecordInputRdataRdataARecord (Generic Model Constructor) +func (*DnsSvcsV1) NewResourceRecordInputRdataRdataARecord(ip string) (model *ResourceRecordInputRdataRdataARecord, err error) { + model = &ResourceRecordInputRdataRdataARecord{ + Ip: core.StringPtr(ip), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*ResourceRecordInputRdataRdataARecord) isaResourceRecordInputRdata() bool { + return true +} + +// UnmarshalResourceRecordInputRdataRdataARecord unmarshals an instance of ResourceRecordInputRdataRdataARecord from the specified map of raw messages. +func UnmarshalResourceRecordInputRdataRdataARecord(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ResourceRecordInputRdataRdataARecord) + err = core.UnmarshalPrimitive(m, "ip", &obj.Ip) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ResourceRecordInputRdataRdataAaaaRecord : The content of type-AAAA resource record. +// This model "extends" ResourceRecordInputRdata +type ResourceRecordInputRdataRdataAaaaRecord struct { + // IPv6 address. + Ip *string `json:"ip" validate:"required"` +} + + +// NewResourceRecordInputRdataRdataAaaaRecord : Instantiate ResourceRecordInputRdataRdataAaaaRecord (Generic Model Constructor) +func (*DnsSvcsV1) NewResourceRecordInputRdataRdataAaaaRecord(ip string) (model *ResourceRecordInputRdataRdataAaaaRecord, err error) { + model = &ResourceRecordInputRdataRdataAaaaRecord{ + Ip: core.StringPtr(ip), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*ResourceRecordInputRdataRdataAaaaRecord) isaResourceRecordInputRdata() bool { + return true +} + +// UnmarshalResourceRecordInputRdataRdataAaaaRecord unmarshals an instance of ResourceRecordInputRdataRdataAaaaRecord from the specified map of raw messages. +func UnmarshalResourceRecordInputRdataRdataAaaaRecord(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ResourceRecordInputRdataRdataAaaaRecord) + err = core.UnmarshalPrimitive(m, "ip", &obj.Ip) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ResourceRecordInputRdataRdataCnameRecord : The content of type-CNAME resource record. +// This model "extends" ResourceRecordInputRdata +type ResourceRecordInputRdataRdataCnameRecord struct { + // Canonical name. + Cname *string `json:"cname" validate:"required"` +} + + +// NewResourceRecordInputRdataRdataCnameRecord : Instantiate ResourceRecordInputRdataRdataCnameRecord (Generic Model Constructor) +func (*DnsSvcsV1) NewResourceRecordInputRdataRdataCnameRecord(cname string) (model *ResourceRecordInputRdataRdataCnameRecord, err error) { + model = &ResourceRecordInputRdataRdataCnameRecord{ + Cname: core.StringPtr(cname), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*ResourceRecordInputRdataRdataCnameRecord) isaResourceRecordInputRdata() bool { + return true +} + +// UnmarshalResourceRecordInputRdataRdataCnameRecord unmarshals an instance of ResourceRecordInputRdataRdataCnameRecord from the specified map of raw messages. +func UnmarshalResourceRecordInputRdataRdataCnameRecord(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ResourceRecordInputRdataRdataCnameRecord) + err = core.UnmarshalPrimitive(m, "cname", &obj.Cname) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ResourceRecordInputRdataRdataMxRecord : The content of type-MX resource record. +// This model "extends" ResourceRecordInputRdata +type ResourceRecordInputRdataRdataMxRecord struct { + // Hostname of Exchange server. + Exchange *string `json:"exchange" validate:"required"` + + // Preference of the MX record. + Preference *int64 `json:"preference" validate:"required"` +} + + +// NewResourceRecordInputRdataRdataMxRecord : Instantiate ResourceRecordInputRdataRdataMxRecord (Generic Model Constructor) +func (*DnsSvcsV1) NewResourceRecordInputRdataRdataMxRecord(exchange string, preference int64) (model *ResourceRecordInputRdataRdataMxRecord, err error) { + model = &ResourceRecordInputRdataRdataMxRecord{ + Exchange: core.StringPtr(exchange), + Preference: core.Int64Ptr(preference), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*ResourceRecordInputRdataRdataMxRecord) isaResourceRecordInputRdata() bool { + return true +} + +// UnmarshalResourceRecordInputRdataRdataMxRecord unmarshals an instance of ResourceRecordInputRdataRdataMxRecord from the specified map of raw messages. +func UnmarshalResourceRecordInputRdataRdataMxRecord(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ResourceRecordInputRdataRdataMxRecord) + err = core.UnmarshalPrimitive(m, "exchange", &obj.Exchange) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "preference", &obj.Preference) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ResourceRecordInputRdataRdataPtrRecord : The content of type-PTR resource record. +// This model "extends" ResourceRecordInputRdata +type ResourceRecordInputRdataRdataPtrRecord struct { + // Hostname of the relevant A or AAAA record. + Ptrdname *string `json:"ptrdname" validate:"required"` +} + + +// NewResourceRecordInputRdataRdataPtrRecord : Instantiate ResourceRecordInputRdataRdataPtrRecord (Generic Model Constructor) +func (*DnsSvcsV1) NewResourceRecordInputRdataRdataPtrRecord(ptrdname string) (model *ResourceRecordInputRdataRdataPtrRecord, err error) { + model = &ResourceRecordInputRdataRdataPtrRecord{ + Ptrdname: core.StringPtr(ptrdname), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*ResourceRecordInputRdataRdataPtrRecord) isaResourceRecordInputRdata() bool { + return true +} + +// UnmarshalResourceRecordInputRdataRdataPtrRecord unmarshals an instance of ResourceRecordInputRdataRdataPtrRecord from the specified map of raw messages. +func UnmarshalResourceRecordInputRdataRdataPtrRecord(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ResourceRecordInputRdataRdataPtrRecord) + err = core.UnmarshalPrimitive(m, "ptrdname", &obj.Ptrdname) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ResourceRecordInputRdataRdataSrvRecord : The content of type-SRV resource record. +// This model "extends" ResourceRecordInputRdata +type ResourceRecordInputRdataRdataSrvRecord struct { + // Port number of the target server. + Port *int64 `json:"port" validate:"required"` + + // Priority of the SRV record. + Priority *int64 `json:"priority" validate:"required"` + + // Hostname of the target server. + Target *string `json:"target" validate:"required"` + + // Weight of distributing queries among multiple target servers. + Weight *int64 `json:"weight" validate:"required"` +} + + +// NewResourceRecordInputRdataRdataSrvRecord : Instantiate ResourceRecordInputRdataRdataSrvRecord (Generic Model Constructor) +func (*DnsSvcsV1) NewResourceRecordInputRdataRdataSrvRecord(port int64, priority int64, target string, weight int64) (model *ResourceRecordInputRdataRdataSrvRecord, err error) { + model = &ResourceRecordInputRdataRdataSrvRecord{ + Port: core.Int64Ptr(port), + Priority: core.Int64Ptr(priority), + Target: core.StringPtr(target), + Weight: core.Int64Ptr(weight), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*ResourceRecordInputRdataRdataSrvRecord) isaResourceRecordInputRdata() bool { + return true +} + +// UnmarshalResourceRecordInputRdataRdataSrvRecord unmarshals an instance of ResourceRecordInputRdataRdataSrvRecord from the specified map of raw messages. +func UnmarshalResourceRecordInputRdataRdataSrvRecord(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ResourceRecordInputRdataRdataSrvRecord) + err = core.UnmarshalPrimitive(m, "port", &obj.Port) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "priority", &obj.Priority) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "target", &obj.Target) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "weight", &obj.Weight) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ResourceRecordInputRdataRdataTxtRecord : The content of type-TXT resource record. +// This model "extends" ResourceRecordInputRdata +type ResourceRecordInputRdataRdataTxtRecord struct { + // Human readable text. + Text *string `json:"text" validate:"required"` +} + + +// NewResourceRecordInputRdataRdataTxtRecord : Instantiate ResourceRecordInputRdataRdataTxtRecord (Generic Model Constructor) +func (*DnsSvcsV1) NewResourceRecordInputRdataRdataTxtRecord(text string) (model *ResourceRecordInputRdataRdataTxtRecord, err error) { + model = &ResourceRecordInputRdataRdataTxtRecord{ + Text: core.StringPtr(text), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*ResourceRecordInputRdataRdataTxtRecord) isaResourceRecordInputRdata() bool { + return true +} + +// UnmarshalResourceRecordInputRdataRdataTxtRecord unmarshals an instance of ResourceRecordInputRdataRdataTxtRecord from the specified map of raw messages. +func UnmarshalResourceRecordInputRdataRdataTxtRecord(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ResourceRecordInputRdataRdataTxtRecord) + err = core.UnmarshalPrimitive(m, "text", &obj.Text) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ResourceRecordUpdateInputRdataRdataARecord : The content of type-A resource record. +// This model "extends" ResourceRecordUpdateInputRdata +type ResourceRecordUpdateInputRdataRdataARecord struct { + // IPv4 address. + Ip *string `json:"ip" validate:"required"` +} + + +// NewResourceRecordUpdateInputRdataRdataARecord : Instantiate ResourceRecordUpdateInputRdataRdataARecord (Generic Model Constructor) +func (*DnsSvcsV1) NewResourceRecordUpdateInputRdataRdataARecord(ip string) (model *ResourceRecordUpdateInputRdataRdataARecord, err error) { + model = &ResourceRecordUpdateInputRdataRdataARecord{ + Ip: core.StringPtr(ip), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*ResourceRecordUpdateInputRdataRdataARecord) isaResourceRecordUpdateInputRdata() bool { + return true +} + +// UnmarshalResourceRecordUpdateInputRdataRdataARecord unmarshals an instance of ResourceRecordUpdateInputRdataRdataARecord from the specified map of raw messages. +func UnmarshalResourceRecordUpdateInputRdataRdataARecord(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ResourceRecordUpdateInputRdataRdataARecord) + err = core.UnmarshalPrimitive(m, "ip", &obj.Ip) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ResourceRecordUpdateInputRdataRdataAaaaRecord : The content of type-AAAA resource record. +// This model "extends" ResourceRecordUpdateInputRdata +type ResourceRecordUpdateInputRdataRdataAaaaRecord struct { + // IPv6 address. + Ip *string `json:"ip" validate:"required"` +} + + +// NewResourceRecordUpdateInputRdataRdataAaaaRecord : Instantiate ResourceRecordUpdateInputRdataRdataAaaaRecord (Generic Model Constructor) +func (*DnsSvcsV1) NewResourceRecordUpdateInputRdataRdataAaaaRecord(ip string) (model *ResourceRecordUpdateInputRdataRdataAaaaRecord, err error) { + model = &ResourceRecordUpdateInputRdataRdataAaaaRecord{ + Ip: core.StringPtr(ip), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*ResourceRecordUpdateInputRdataRdataAaaaRecord) isaResourceRecordUpdateInputRdata() bool { + return true +} + +// UnmarshalResourceRecordUpdateInputRdataRdataAaaaRecord unmarshals an instance of ResourceRecordUpdateInputRdataRdataAaaaRecord from the specified map of raw messages. +func UnmarshalResourceRecordUpdateInputRdataRdataAaaaRecord(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ResourceRecordUpdateInputRdataRdataAaaaRecord) + err = core.UnmarshalPrimitive(m, "ip", &obj.Ip) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ResourceRecordUpdateInputRdataRdataCnameRecord : The content of type-CNAME resource record. +// This model "extends" ResourceRecordUpdateInputRdata +type ResourceRecordUpdateInputRdataRdataCnameRecord struct { + // Canonical name. + Cname *string `json:"cname" validate:"required"` +} + + +// NewResourceRecordUpdateInputRdataRdataCnameRecord : Instantiate ResourceRecordUpdateInputRdataRdataCnameRecord (Generic Model Constructor) +func (*DnsSvcsV1) NewResourceRecordUpdateInputRdataRdataCnameRecord(cname string) (model *ResourceRecordUpdateInputRdataRdataCnameRecord, err error) { + model = &ResourceRecordUpdateInputRdataRdataCnameRecord{ + Cname: core.StringPtr(cname), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*ResourceRecordUpdateInputRdataRdataCnameRecord) isaResourceRecordUpdateInputRdata() bool { + return true +} + +// UnmarshalResourceRecordUpdateInputRdataRdataCnameRecord unmarshals an instance of ResourceRecordUpdateInputRdataRdataCnameRecord from the specified map of raw messages. +func UnmarshalResourceRecordUpdateInputRdataRdataCnameRecord(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ResourceRecordUpdateInputRdataRdataCnameRecord) + err = core.UnmarshalPrimitive(m, "cname", &obj.Cname) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ResourceRecordUpdateInputRdataRdataMxRecord : The content of type-MX resource record. +// This model "extends" ResourceRecordUpdateInputRdata +type ResourceRecordUpdateInputRdataRdataMxRecord struct { + // Hostname of Exchange server. + Exchange *string `json:"exchange" validate:"required"` + + // Preference of the MX record. + Preference *int64 `json:"preference" validate:"required"` +} + + +// NewResourceRecordUpdateInputRdataRdataMxRecord : Instantiate ResourceRecordUpdateInputRdataRdataMxRecord (Generic Model Constructor) +func (*DnsSvcsV1) NewResourceRecordUpdateInputRdataRdataMxRecord(exchange string, preference int64) (model *ResourceRecordUpdateInputRdataRdataMxRecord, err error) { + model = &ResourceRecordUpdateInputRdataRdataMxRecord{ + Exchange: core.StringPtr(exchange), + Preference: core.Int64Ptr(preference), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*ResourceRecordUpdateInputRdataRdataMxRecord) isaResourceRecordUpdateInputRdata() bool { + return true +} + +// UnmarshalResourceRecordUpdateInputRdataRdataMxRecord unmarshals an instance of ResourceRecordUpdateInputRdataRdataMxRecord from the specified map of raw messages. +func UnmarshalResourceRecordUpdateInputRdataRdataMxRecord(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ResourceRecordUpdateInputRdataRdataMxRecord) + err = core.UnmarshalPrimitive(m, "exchange", &obj.Exchange) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "preference", &obj.Preference) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ResourceRecordUpdateInputRdataRdataPtrRecord : The content of type-PTR resource record. +// This model "extends" ResourceRecordUpdateInputRdata +type ResourceRecordUpdateInputRdataRdataPtrRecord struct { + // Hostname of the relevant A or AAAA record. + Ptrdname *string `json:"ptrdname" validate:"required"` +} + + +// NewResourceRecordUpdateInputRdataRdataPtrRecord : Instantiate ResourceRecordUpdateInputRdataRdataPtrRecord (Generic Model Constructor) +func (*DnsSvcsV1) NewResourceRecordUpdateInputRdataRdataPtrRecord(ptrdname string) (model *ResourceRecordUpdateInputRdataRdataPtrRecord, err error) { + model = &ResourceRecordUpdateInputRdataRdataPtrRecord{ + Ptrdname: core.StringPtr(ptrdname), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*ResourceRecordUpdateInputRdataRdataPtrRecord) isaResourceRecordUpdateInputRdata() bool { + return true +} + +// UnmarshalResourceRecordUpdateInputRdataRdataPtrRecord unmarshals an instance of ResourceRecordUpdateInputRdataRdataPtrRecord from the specified map of raw messages. +func UnmarshalResourceRecordUpdateInputRdataRdataPtrRecord(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ResourceRecordUpdateInputRdataRdataPtrRecord) + err = core.UnmarshalPrimitive(m, "ptrdname", &obj.Ptrdname) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ResourceRecordUpdateInputRdataRdataSrvRecord : The content of type-SRV resource record. +// This model "extends" ResourceRecordUpdateInputRdata +type ResourceRecordUpdateInputRdataRdataSrvRecord struct { + // Port number of the target server. + Port *int64 `json:"port" validate:"required"` + + // Priority of the SRV record. + Priority *int64 `json:"priority" validate:"required"` + + // Hostname of the target server. + Target *string `json:"target" validate:"required"` + + // Weight of distributing queries among multiple target servers. + Weight *int64 `json:"weight" validate:"required"` +} + + +// NewResourceRecordUpdateInputRdataRdataSrvRecord : Instantiate ResourceRecordUpdateInputRdataRdataSrvRecord (Generic Model Constructor) +func (*DnsSvcsV1) NewResourceRecordUpdateInputRdataRdataSrvRecord(port int64, priority int64, target string, weight int64) (model *ResourceRecordUpdateInputRdataRdataSrvRecord, err error) { + model = &ResourceRecordUpdateInputRdataRdataSrvRecord{ + Port: core.Int64Ptr(port), + Priority: core.Int64Ptr(priority), + Target: core.StringPtr(target), + Weight: core.Int64Ptr(weight), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*ResourceRecordUpdateInputRdataRdataSrvRecord) isaResourceRecordUpdateInputRdata() bool { + return true +} + +// UnmarshalResourceRecordUpdateInputRdataRdataSrvRecord unmarshals an instance of ResourceRecordUpdateInputRdataRdataSrvRecord from the specified map of raw messages. +func UnmarshalResourceRecordUpdateInputRdataRdataSrvRecord(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ResourceRecordUpdateInputRdataRdataSrvRecord) + err = core.UnmarshalPrimitive(m, "port", &obj.Port) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "priority", &obj.Priority) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "target", &obj.Target) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "weight", &obj.Weight) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ResourceRecordUpdateInputRdataRdataTxtRecord : The content of type-TXT resource record. +// This model "extends" ResourceRecordUpdateInputRdata +type ResourceRecordUpdateInputRdataRdataTxtRecord struct { + // Human readable text. + Text *string `json:"text" validate:"required"` +} + + +// NewResourceRecordUpdateInputRdataRdataTxtRecord : Instantiate ResourceRecordUpdateInputRdataRdataTxtRecord (Generic Model Constructor) +func (*DnsSvcsV1) NewResourceRecordUpdateInputRdataRdataTxtRecord(text string) (model *ResourceRecordUpdateInputRdataRdataTxtRecord, err error) { + model = &ResourceRecordUpdateInputRdataRdataTxtRecord{ + Text: core.StringPtr(text), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*ResourceRecordUpdateInputRdataRdataTxtRecord) isaResourceRecordUpdateInputRdata() bool { + return true +} + +// UnmarshalResourceRecordUpdateInputRdataRdataTxtRecord unmarshals an instance of ResourceRecordUpdateInputRdataRdataTxtRecord from the specified map of raw messages. +func UnmarshalResourceRecordUpdateInputRdataRdataTxtRecord(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ResourceRecordUpdateInputRdataRdataTxtRecord) + err = core.UnmarshalPrimitive(m, "text", &obj.Text) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} diff --git a/vendor/github.com/IBM/networking-go-sdk/edgefunctionsapiv1/edge_functions_api_v1.go b/vendor/github.com/IBM/networking-go-sdk/edgefunctionsapiv1/edge_functions_api_v1.go new file mode 100644 index 00000000000..f1e6b675e2b --- /dev/null +++ b/vendor/github.com/IBM/networking-go-sdk/edgefunctionsapiv1/edge_functions_api_v1.go @@ -0,0 +1,1459 @@ +/** + * (C) Copyright IBM Corp. 2020. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * IBM OpenAPI SDK Code Generator Version: 3.20.0-debb9f29-20201203-202043 + */ + + +// Package edgefunctionsapiv1 : Operations and models for the EdgeFunctionsApiV1 service +package edgefunctionsapiv1 + +import ( + "context" + "encoding/json" + "fmt" + "github.com/IBM/go-sdk-core/v4/core" + common "github.com/IBM/networking-go-sdk/common" + "github.com/go-openapi/strfmt" + "io" + "net/http" + "reflect" + "time" +) + +// EdgeFunctionsApiV1 : Edge Functions +// +// Version: 1.0.0 +type EdgeFunctionsApiV1 struct { + Service *core.BaseService + + // cloud resource name. + Crn *string + + // zone identifier. + ZoneIdentifier *string +} + +// DefaultServiceURL is the default URL to make service requests to. +const DefaultServiceURL = "https://api.cis.cloud.ibm.com" + +// DefaultServiceName is the default key used to find external configuration information. +const DefaultServiceName = "edge_functions_api" + +// EdgeFunctionsApiV1Options : Service options +type EdgeFunctionsApiV1Options struct { + ServiceName string + URL string + Authenticator core.Authenticator + + // cloud resource name. + Crn *string `validate:"required"` + + // zone identifier. + ZoneIdentifier *string `validate:"required"` +} + +// NewEdgeFunctionsApiV1UsingExternalConfig : constructs an instance of EdgeFunctionsApiV1 with passed in options and external configuration. +func NewEdgeFunctionsApiV1UsingExternalConfig(options *EdgeFunctionsApiV1Options) (edgeFunctionsApi *EdgeFunctionsApiV1, err error) { + if options.ServiceName == "" { + options.ServiceName = DefaultServiceName + } + + if options.Authenticator == nil { + options.Authenticator, err = core.GetAuthenticatorFromEnvironment(options.ServiceName) + if err != nil { + return + } + } + + edgeFunctionsApi, err = NewEdgeFunctionsApiV1(options) + if err != nil { + return + } + + err = edgeFunctionsApi.Service.ConfigureService(options.ServiceName) + if err != nil { + return + } + + if options.URL != "" { + err = edgeFunctionsApi.Service.SetServiceURL(options.URL) + } + return +} + +// NewEdgeFunctionsApiV1 : constructs an instance of EdgeFunctionsApiV1 with passed in options. +func NewEdgeFunctionsApiV1(options *EdgeFunctionsApiV1Options) (service *EdgeFunctionsApiV1, err error) { + serviceOptions := &core.ServiceOptions{ + URL: DefaultServiceURL, + Authenticator: options.Authenticator, + } + + err = core.ValidateStruct(options, "options") + if err != nil { + return + } + + baseService, err := core.NewBaseService(serviceOptions) + if err != nil { + return + } + + if options.URL != "" { + err = baseService.SetServiceURL(options.URL) + if err != nil { + return + } + } + + service = &EdgeFunctionsApiV1{ + Service: baseService, + Crn: options.Crn, + ZoneIdentifier: options.ZoneIdentifier, + } + + return +} + +// GetServiceURLForRegion returns the service URL to be used for the specified region +func GetServiceURLForRegion(region string) (string, error) { + return "", fmt.Errorf("service does not support regional URLs") +} + +// Clone makes a copy of "edgeFunctionsApi" suitable for processing requests. +func (edgeFunctionsApi *EdgeFunctionsApiV1) Clone() *EdgeFunctionsApiV1 { + if core.IsNil(edgeFunctionsApi) { + return nil + } + clone := *edgeFunctionsApi + clone.Service = edgeFunctionsApi.Service.Clone() + return &clone +} + +// SetServiceURL sets the service URL +func (edgeFunctionsApi *EdgeFunctionsApiV1) SetServiceURL(url string) error { + return edgeFunctionsApi.Service.SetServiceURL(url) +} + +// GetServiceURL returns the service URL +func (edgeFunctionsApi *EdgeFunctionsApiV1) GetServiceURL() string { + return edgeFunctionsApi.Service.GetServiceURL() +} + +// SetDefaultHeaders sets HTTP headers to be sent in every request +func (edgeFunctionsApi *EdgeFunctionsApiV1) SetDefaultHeaders(headers http.Header) { + edgeFunctionsApi.Service.SetDefaultHeaders(headers) +} + +// SetEnableGzipCompression sets the service's EnableGzipCompression field +func (edgeFunctionsApi *EdgeFunctionsApiV1) SetEnableGzipCompression(enableGzip bool) { + edgeFunctionsApi.Service.SetEnableGzipCompression(enableGzip) +} + +// GetEnableGzipCompression returns the service's EnableGzipCompression field +func (edgeFunctionsApi *EdgeFunctionsApiV1) GetEnableGzipCompression() bool { + return edgeFunctionsApi.Service.GetEnableGzipCompression() +} + +// EnableRetries enables automatic retries for requests invoked for this service instance. +// If either parameter is specified as 0, then a default value is used instead. +func (edgeFunctionsApi *EdgeFunctionsApiV1) EnableRetries(maxRetries int, maxRetryInterval time.Duration) { + edgeFunctionsApi.Service.EnableRetries(maxRetries, maxRetryInterval) +} + +// DisableRetries disables automatic retries for requests invoked for this service instance. +func (edgeFunctionsApi *EdgeFunctionsApiV1) DisableRetries() { + edgeFunctionsApi.Service.DisableRetries() +} + +// ListEdgeFunctionsActions : Get all edge functions scripts for a given instance +// Get all edge functions scripts for a given instance. +func (edgeFunctionsApi *EdgeFunctionsApiV1) ListEdgeFunctionsActions(listEdgeFunctionsActionsOptions *ListEdgeFunctionsActionsOptions) (result *ListEdgeFunctionsActionsResp, response *core.DetailedResponse, err error) { + return edgeFunctionsApi.ListEdgeFunctionsActionsWithContext(context.Background(), listEdgeFunctionsActionsOptions) +} + +// ListEdgeFunctionsActionsWithContext is an alternate form of the ListEdgeFunctionsActions method which supports a Context parameter +func (edgeFunctionsApi *EdgeFunctionsApiV1) ListEdgeFunctionsActionsWithContext(ctx context.Context, listEdgeFunctionsActionsOptions *ListEdgeFunctionsActionsOptions) (result *ListEdgeFunctionsActionsResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listEdgeFunctionsActionsOptions, "listEdgeFunctionsActionsOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *edgeFunctionsApi.Crn, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = edgeFunctionsApi.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(edgeFunctionsApi.Service.Options.URL, `/v1/{crn}/workers/scripts`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range listEdgeFunctionsActionsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("edge_functions_api", "V1", "ListEdgeFunctionsActions") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + if listEdgeFunctionsActionsOptions.XCorrelationID != nil { + builder.AddHeader("X-Correlation-ID", fmt.Sprint(*listEdgeFunctionsActionsOptions.XCorrelationID)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = edgeFunctionsApi.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalListEdgeFunctionsActionsResp) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateEdgeFunctionsAction : Upload or replace an edge functions action for a given instance +// Upload or replace an exitsing edge functions action for a given instance. +func (edgeFunctionsApi *EdgeFunctionsApiV1) UpdateEdgeFunctionsAction(updateEdgeFunctionsActionOptions *UpdateEdgeFunctionsActionOptions) (result *GetEdgeFunctionsActionResp, response *core.DetailedResponse, err error) { + return edgeFunctionsApi.UpdateEdgeFunctionsActionWithContext(context.Background(), updateEdgeFunctionsActionOptions) +} + +// UpdateEdgeFunctionsActionWithContext is an alternate form of the UpdateEdgeFunctionsAction method which supports a Context parameter +func (edgeFunctionsApi *EdgeFunctionsApiV1) UpdateEdgeFunctionsActionWithContext(ctx context.Context, updateEdgeFunctionsActionOptions *UpdateEdgeFunctionsActionOptions) (result *GetEdgeFunctionsActionResp, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateEdgeFunctionsActionOptions, "updateEdgeFunctionsActionOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateEdgeFunctionsActionOptions, "updateEdgeFunctionsActionOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *edgeFunctionsApi.Crn, + "script_name": *updateEdgeFunctionsActionOptions.ScriptName, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = edgeFunctionsApi.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(edgeFunctionsApi.Service.Options.URL, `/v1/{crn}/workers/scripts/{script_name}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateEdgeFunctionsActionOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("edge_functions_api", "V1", "UpdateEdgeFunctionsAction") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/javascript") + if updateEdgeFunctionsActionOptions.XCorrelationID != nil { + builder.AddHeader("X-Correlation-ID", fmt.Sprint(*updateEdgeFunctionsActionOptions.XCorrelationID)) + } + + _, err = builder.SetBodyContent("application/javascript", nil, nil, updateEdgeFunctionsActionOptions.EdgeFunctionsAction) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = edgeFunctionsApi.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalGetEdgeFunctionsActionResp) + if err != nil { + return + } + response.Result = result + + return +} + +// GetEdgeFunctionsAction : Download a edge functions action for a given instance +// Fetch raw script content for your worker. Note this is the original script content, not JSON encoded. +func (edgeFunctionsApi *EdgeFunctionsApiV1) GetEdgeFunctionsAction(getEdgeFunctionsActionOptions *GetEdgeFunctionsActionOptions) (result io.ReadCloser, response *core.DetailedResponse, err error) { + return edgeFunctionsApi.GetEdgeFunctionsActionWithContext(context.Background(), getEdgeFunctionsActionOptions) +} + +// GetEdgeFunctionsActionWithContext is an alternate form of the GetEdgeFunctionsAction method which supports a Context parameter +func (edgeFunctionsApi *EdgeFunctionsApiV1) GetEdgeFunctionsActionWithContext(ctx context.Context, getEdgeFunctionsActionOptions *GetEdgeFunctionsActionOptions) (result io.ReadCloser, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getEdgeFunctionsActionOptions, "getEdgeFunctionsActionOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getEdgeFunctionsActionOptions, "getEdgeFunctionsActionOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *edgeFunctionsApi.Crn, + "script_name": *getEdgeFunctionsActionOptions.ScriptName, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = edgeFunctionsApi.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(edgeFunctionsApi.Service.Options.URL, `/v1/{crn}/workers/scripts/{script_name}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getEdgeFunctionsActionOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("edge_functions_api", "V1", "GetEdgeFunctionsAction") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/javascript") + if getEdgeFunctionsActionOptions.XCorrelationID != nil { + builder.AddHeader("X-Correlation-ID", fmt.Sprint(*getEdgeFunctionsActionOptions.XCorrelationID)) + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = edgeFunctionsApi.Service.Request(request, &result) + + return +} + +// DeleteEdgeFunctionsAction : Delete a edge functions action for a given instance +// Delete an edge functions action for a given instance. +func (edgeFunctionsApi *EdgeFunctionsApiV1) DeleteEdgeFunctionsAction(deleteEdgeFunctionsActionOptions *DeleteEdgeFunctionsActionOptions) (result *DeleteEdgeFunctionsActionResp, response *core.DetailedResponse, err error) { + return edgeFunctionsApi.DeleteEdgeFunctionsActionWithContext(context.Background(), deleteEdgeFunctionsActionOptions) +} + +// DeleteEdgeFunctionsActionWithContext is an alternate form of the DeleteEdgeFunctionsAction method which supports a Context parameter +func (edgeFunctionsApi *EdgeFunctionsApiV1) DeleteEdgeFunctionsActionWithContext(ctx context.Context, deleteEdgeFunctionsActionOptions *DeleteEdgeFunctionsActionOptions) (result *DeleteEdgeFunctionsActionResp, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteEdgeFunctionsActionOptions, "deleteEdgeFunctionsActionOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteEdgeFunctionsActionOptions, "deleteEdgeFunctionsActionOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *edgeFunctionsApi.Crn, + "script_name": *deleteEdgeFunctionsActionOptions.ScriptName, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = edgeFunctionsApi.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(edgeFunctionsApi.Service.Options.URL, `/v1/{crn}/workers/scripts/{script_name}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteEdgeFunctionsActionOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("edge_functions_api", "V1", "DeleteEdgeFunctionsAction") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + if deleteEdgeFunctionsActionOptions.XCorrelationID != nil { + builder.AddHeader("X-Correlation-ID", fmt.Sprint(*deleteEdgeFunctionsActionOptions.XCorrelationID)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = edgeFunctionsApi.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalDeleteEdgeFunctionsActionResp) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateEdgeFunctionsTrigger : Create an edge functions trigger on a given zone +// Create an edge functions trigger on a given zone. +func (edgeFunctionsApi *EdgeFunctionsApiV1) CreateEdgeFunctionsTrigger(createEdgeFunctionsTriggerOptions *CreateEdgeFunctionsTriggerOptions) (result *CreateEdgeFunctionsTriggerResp, response *core.DetailedResponse, err error) { + return edgeFunctionsApi.CreateEdgeFunctionsTriggerWithContext(context.Background(), createEdgeFunctionsTriggerOptions) +} + +// CreateEdgeFunctionsTriggerWithContext is an alternate form of the CreateEdgeFunctionsTrigger method which supports a Context parameter +func (edgeFunctionsApi *EdgeFunctionsApiV1) CreateEdgeFunctionsTriggerWithContext(ctx context.Context, createEdgeFunctionsTriggerOptions *CreateEdgeFunctionsTriggerOptions) (result *CreateEdgeFunctionsTriggerResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(createEdgeFunctionsTriggerOptions, "createEdgeFunctionsTriggerOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *edgeFunctionsApi.Crn, + "zone_identifier": *edgeFunctionsApi.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = edgeFunctionsApi.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(edgeFunctionsApi.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/workers/routes`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range createEdgeFunctionsTriggerOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("edge_functions_api", "V1", "CreateEdgeFunctionsTrigger") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + if createEdgeFunctionsTriggerOptions.XCorrelationID != nil { + builder.AddHeader("X-Correlation-ID", fmt.Sprint(*createEdgeFunctionsTriggerOptions.XCorrelationID)) + } + + body := make(map[string]interface{}) + if createEdgeFunctionsTriggerOptions.Pattern != nil { + body["pattern"] = createEdgeFunctionsTriggerOptions.Pattern + } + if createEdgeFunctionsTriggerOptions.Script != nil { + body["script"] = createEdgeFunctionsTriggerOptions.Script + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = edgeFunctionsApi.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalCreateEdgeFunctionsTriggerResp) + if err != nil { + return + } + response.Result = result + + return +} + +// ListEdgeFunctionsTriggers : List all edge functions triggers on a given zone +// List all edge functions triggers on a given zone. +func (edgeFunctionsApi *EdgeFunctionsApiV1) ListEdgeFunctionsTriggers(listEdgeFunctionsTriggersOptions *ListEdgeFunctionsTriggersOptions) (result *ListEdgeFunctionsTriggersResp, response *core.DetailedResponse, err error) { + return edgeFunctionsApi.ListEdgeFunctionsTriggersWithContext(context.Background(), listEdgeFunctionsTriggersOptions) +} + +// ListEdgeFunctionsTriggersWithContext is an alternate form of the ListEdgeFunctionsTriggers method which supports a Context parameter +func (edgeFunctionsApi *EdgeFunctionsApiV1) ListEdgeFunctionsTriggersWithContext(ctx context.Context, listEdgeFunctionsTriggersOptions *ListEdgeFunctionsTriggersOptions) (result *ListEdgeFunctionsTriggersResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listEdgeFunctionsTriggersOptions, "listEdgeFunctionsTriggersOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *edgeFunctionsApi.Crn, + "zone_identifier": *edgeFunctionsApi.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = edgeFunctionsApi.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(edgeFunctionsApi.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/workers/routes`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range listEdgeFunctionsTriggersOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("edge_functions_api", "V1", "ListEdgeFunctionsTriggers") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + if listEdgeFunctionsTriggersOptions.XCorrelationID != nil { + builder.AddHeader("X-Correlation-ID", fmt.Sprint(*listEdgeFunctionsTriggersOptions.XCorrelationID)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = edgeFunctionsApi.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalListEdgeFunctionsTriggersResp) + if err != nil { + return + } + response.Result = result + + return +} + +// GetEdgeFunctionsTrigger : Get an edge functions trigger on a given zone +// Get an edge functions trigger on a given zone. +func (edgeFunctionsApi *EdgeFunctionsApiV1) GetEdgeFunctionsTrigger(getEdgeFunctionsTriggerOptions *GetEdgeFunctionsTriggerOptions) (result *GetEdgeFunctionsTriggerResp, response *core.DetailedResponse, err error) { + return edgeFunctionsApi.GetEdgeFunctionsTriggerWithContext(context.Background(), getEdgeFunctionsTriggerOptions) +} + +// GetEdgeFunctionsTriggerWithContext is an alternate form of the GetEdgeFunctionsTrigger method which supports a Context parameter +func (edgeFunctionsApi *EdgeFunctionsApiV1) GetEdgeFunctionsTriggerWithContext(ctx context.Context, getEdgeFunctionsTriggerOptions *GetEdgeFunctionsTriggerOptions) (result *GetEdgeFunctionsTriggerResp, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getEdgeFunctionsTriggerOptions, "getEdgeFunctionsTriggerOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getEdgeFunctionsTriggerOptions, "getEdgeFunctionsTriggerOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *edgeFunctionsApi.Crn, + "zone_identifier": *edgeFunctionsApi.ZoneIdentifier, + "route_id": *getEdgeFunctionsTriggerOptions.RouteID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = edgeFunctionsApi.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(edgeFunctionsApi.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/workers/routes/{route_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getEdgeFunctionsTriggerOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("edge_functions_api", "V1", "GetEdgeFunctionsTrigger") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + if getEdgeFunctionsTriggerOptions.XCorrelationID != nil { + builder.AddHeader("X-Correlation-ID", fmt.Sprint(*getEdgeFunctionsTriggerOptions.XCorrelationID)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = edgeFunctionsApi.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalGetEdgeFunctionsTriggerResp) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateEdgeFunctionsTrigger : Update an edge functions trigger on a given zone +// Update an edge functions trigger on a given zone. +func (edgeFunctionsApi *EdgeFunctionsApiV1) UpdateEdgeFunctionsTrigger(updateEdgeFunctionsTriggerOptions *UpdateEdgeFunctionsTriggerOptions) (result *GetEdgeFunctionsTriggerResp, response *core.DetailedResponse, err error) { + return edgeFunctionsApi.UpdateEdgeFunctionsTriggerWithContext(context.Background(), updateEdgeFunctionsTriggerOptions) +} + +// UpdateEdgeFunctionsTriggerWithContext is an alternate form of the UpdateEdgeFunctionsTrigger method which supports a Context parameter +func (edgeFunctionsApi *EdgeFunctionsApiV1) UpdateEdgeFunctionsTriggerWithContext(ctx context.Context, updateEdgeFunctionsTriggerOptions *UpdateEdgeFunctionsTriggerOptions) (result *GetEdgeFunctionsTriggerResp, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateEdgeFunctionsTriggerOptions, "updateEdgeFunctionsTriggerOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateEdgeFunctionsTriggerOptions, "updateEdgeFunctionsTriggerOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *edgeFunctionsApi.Crn, + "zone_identifier": *edgeFunctionsApi.ZoneIdentifier, + "route_id": *updateEdgeFunctionsTriggerOptions.RouteID, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = edgeFunctionsApi.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(edgeFunctionsApi.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/workers/routes/{route_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateEdgeFunctionsTriggerOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("edge_functions_api", "V1", "UpdateEdgeFunctionsTrigger") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + if updateEdgeFunctionsTriggerOptions.XCorrelationID != nil { + builder.AddHeader("X-Correlation-ID", fmt.Sprint(*updateEdgeFunctionsTriggerOptions.XCorrelationID)) + } + + body := make(map[string]interface{}) + if updateEdgeFunctionsTriggerOptions.Pattern != nil { + body["pattern"] = updateEdgeFunctionsTriggerOptions.Pattern + } + if updateEdgeFunctionsTriggerOptions.Script != nil { + body["script"] = updateEdgeFunctionsTriggerOptions.Script + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = edgeFunctionsApi.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalGetEdgeFunctionsTriggerResp) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteEdgeFunctionsTrigger : Delete an edge functions trigger on a given zone +// Delete an edge functions trigger on a given zone. +func (edgeFunctionsApi *EdgeFunctionsApiV1) DeleteEdgeFunctionsTrigger(deleteEdgeFunctionsTriggerOptions *DeleteEdgeFunctionsTriggerOptions) (result *CreateEdgeFunctionsTriggerResp, response *core.DetailedResponse, err error) { + return edgeFunctionsApi.DeleteEdgeFunctionsTriggerWithContext(context.Background(), deleteEdgeFunctionsTriggerOptions) +} + +// DeleteEdgeFunctionsTriggerWithContext is an alternate form of the DeleteEdgeFunctionsTrigger method which supports a Context parameter +func (edgeFunctionsApi *EdgeFunctionsApiV1) DeleteEdgeFunctionsTriggerWithContext(ctx context.Context, deleteEdgeFunctionsTriggerOptions *DeleteEdgeFunctionsTriggerOptions) (result *CreateEdgeFunctionsTriggerResp, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteEdgeFunctionsTriggerOptions, "deleteEdgeFunctionsTriggerOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteEdgeFunctionsTriggerOptions, "deleteEdgeFunctionsTriggerOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *edgeFunctionsApi.Crn, + "zone_identifier": *edgeFunctionsApi.ZoneIdentifier, + "route_id": *deleteEdgeFunctionsTriggerOptions.RouteID, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = edgeFunctionsApi.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(edgeFunctionsApi.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/workers/routes/{route_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteEdgeFunctionsTriggerOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("edge_functions_api", "V1", "DeleteEdgeFunctionsTrigger") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + if deleteEdgeFunctionsTriggerOptions.XCorrelationID != nil { + builder.AddHeader("X-Correlation-ID", fmt.Sprint(*deleteEdgeFunctionsTriggerOptions.XCorrelationID)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = edgeFunctionsApi.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalCreateEdgeFunctionsTriggerResp) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateEdgeFunctionsTriggerOptions : The CreateEdgeFunctionsTrigger options. +type CreateEdgeFunctionsTriggerOptions struct { + // a string pattern. + Pattern *string `json:"pattern,omitempty"` + + // Name of the script to apply when the route is matched. The route is skipped when this is blank/missing. + Script *string `json:"script,omitempty"` + + // Uniquely identifying a request. + XCorrelationID *string `json:"X-Correlation-ID,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewCreateEdgeFunctionsTriggerOptions : Instantiate CreateEdgeFunctionsTriggerOptions +func (*EdgeFunctionsApiV1) NewCreateEdgeFunctionsTriggerOptions() *CreateEdgeFunctionsTriggerOptions { + return &CreateEdgeFunctionsTriggerOptions{} +} + +// SetPattern : Allow user to set Pattern +func (options *CreateEdgeFunctionsTriggerOptions) SetPattern(pattern string) *CreateEdgeFunctionsTriggerOptions { + options.Pattern = core.StringPtr(pattern) + return options +} + +// SetScript : Allow user to set Script +func (options *CreateEdgeFunctionsTriggerOptions) SetScript(script string) *CreateEdgeFunctionsTriggerOptions { + options.Script = core.StringPtr(script) + return options +} + +// SetXCorrelationID : Allow user to set XCorrelationID +func (options *CreateEdgeFunctionsTriggerOptions) SetXCorrelationID(xCorrelationID string) *CreateEdgeFunctionsTriggerOptions { + options.XCorrelationID = core.StringPtr(xCorrelationID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateEdgeFunctionsTriggerOptions) SetHeaders(param map[string]string) *CreateEdgeFunctionsTriggerOptions { + options.Headers = param + return options +} + +// DeleteEdgeFunctionsActionOptions : The DeleteEdgeFunctionsAction options. +type DeleteEdgeFunctionsActionOptions struct { + // the edge function action name. + ScriptName *string `json:"script_name" validate:"required,ne="` + + // Uniquely identifying a request. + XCorrelationID *string `json:"X-Correlation-ID,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteEdgeFunctionsActionOptions : Instantiate DeleteEdgeFunctionsActionOptions +func (*EdgeFunctionsApiV1) NewDeleteEdgeFunctionsActionOptions(scriptName string) *DeleteEdgeFunctionsActionOptions { + return &DeleteEdgeFunctionsActionOptions{ + ScriptName: core.StringPtr(scriptName), + } +} + +// SetScriptName : Allow user to set ScriptName +func (options *DeleteEdgeFunctionsActionOptions) SetScriptName(scriptName string) *DeleteEdgeFunctionsActionOptions { + options.ScriptName = core.StringPtr(scriptName) + return options +} + +// SetXCorrelationID : Allow user to set XCorrelationID +func (options *DeleteEdgeFunctionsActionOptions) SetXCorrelationID(xCorrelationID string) *DeleteEdgeFunctionsActionOptions { + options.XCorrelationID = core.StringPtr(xCorrelationID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteEdgeFunctionsActionOptions) SetHeaders(param map[string]string) *DeleteEdgeFunctionsActionOptions { + options.Headers = param + return options +} + +// DeleteEdgeFunctionsTriggerOptions : The DeleteEdgeFunctionsTrigger options. +type DeleteEdgeFunctionsTriggerOptions struct { + // trigger identifier. + RouteID *string `json:"route_id" validate:"required,ne="` + + // Uniquely identifying a request. + XCorrelationID *string `json:"X-Correlation-ID,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteEdgeFunctionsTriggerOptions : Instantiate DeleteEdgeFunctionsTriggerOptions +func (*EdgeFunctionsApiV1) NewDeleteEdgeFunctionsTriggerOptions(routeID string) *DeleteEdgeFunctionsTriggerOptions { + return &DeleteEdgeFunctionsTriggerOptions{ + RouteID: core.StringPtr(routeID), + } +} + +// SetRouteID : Allow user to set RouteID +func (options *DeleteEdgeFunctionsTriggerOptions) SetRouteID(routeID string) *DeleteEdgeFunctionsTriggerOptions { + options.RouteID = core.StringPtr(routeID) + return options +} + +// SetXCorrelationID : Allow user to set XCorrelationID +func (options *DeleteEdgeFunctionsTriggerOptions) SetXCorrelationID(xCorrelationID string) *DeleteEdgeFunctionsTriggerOptions { + options.XCorrelationID = core.StringPtr(xCorrelationID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteEdgeFunctionsTriggerOptions) SetHeaders(param map[string]string) *DeleteEdgeFunctionsTriggerOptions { + options.Headers = param + return options +} + +// GetEdgeFunctionsActionOptions : The GetEdgeFunctionsAction options. +type GetEdgeFunctionsActionOptions struct { + // the edge function action name. + ScriptName *string `json:"script_name" validate:"required,ne="` + + // Uniquely identifying a request. + XCorrelationID *string `json:"X-Correlation-ID,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetEdgeFunctionsActionOptions : Instantiate GetEdgeFunctionsActionOptions +func (*EdgeFunctionsApiV1) NewGetEdgeFunctionsActionOptions(scriptName string) *GetEdgeFunctionsActionOptions { + return &GetEdgeFunctionsActionOptions{ + ScriptName: core.StringPtr(scriptName), + } +} + +// SetScriptName : Allow user to set ScriptName +func (options *GetEdgeFunctionsActionOptions) SetScriptName(scriptName string) *GetEdgeFunctionsActionOptions { + options.ScriptName = core.StringPtr(scriptName) + return options +} + +// SetXCorrelationID : Allow user to set XCorrelationID +func (options *GetEdgeFunctionsActionOptions) SetXCorrelationID(xCorrelationID string) *GetEdgeFunctionsActionOptions { + options.XCorrelationID = core.StringPtr(xCorrelationID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetEdgeFunctionsActionOptions) SetHeaders(param map[string]string) *GetEdgeFunctionsActionOptions { + options.Headers = param + return options +} + +// GetEdgeFunctionsTriggerOptions : The GetEdgeFunctionsTrigger options. +type GetEdgeFunctionsTriggerOptions struct { + // trigger identifier. + RouteID *string `json:"route_id" validate:"required,ne="` + + // Uniquely identifying a request. + XCorrelationID *string `json:"X-Correlation-ID,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetEdgeFunctionsTriggerOptions : Instantiate GetEdgeFunctionsTriggerOptions +func (*EdgeFunctionsApiV1) NewGetEdgeFunctionsTriggerOptions(routeID string) *GetEdgeFunctionsTriggerOptions { + return &GetEdgeFunctionsTriggerOptions{ + RouteID: core.StringPtr(routeID), + } +} + +// SetRouteID : Allow user to set RouteID +func (options *GetEdgeFunctionsTriggerOptions) SetRouteID(routeID string) *GetEdgeFunctionsTriggerOptions { + options.RouteID = core.StringPtr(routeID) + return options +} + +// SetXCorrelationID : Allow user to set XCorrelationID +func (options *GetEdgeFunctionsTriggerOptions) SetXCorrelationID(xCorrelationID string) *GetEdgeFunctionsTriggerOptions { + options.XCorrelationID = core.StringPtr(xCorrelationID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetEdgeFunctionsTriggerOptions) SetHeaders(param map[string]string) *GetEdgeFunctionsTriggerOptions { + options.Headers = param + return options +} + +// ListEdgeFunctionsActionsOptions : The ListEdgeFunctionsActions options. +type ListEdgeFunctionsActionsOptions struct { + // Uniquely identifying a request. + XCorrelationID *string `json:"X-Correlation-ID,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListEdgeFunctionsActionsOptions : Instantiate ListEdgeFunctionsActionsOptions +func (*EdgeFunctionsApiV1) NewListEdgeFunctionsActionsOptions() *ListEdgeFunctionsActionsOptions { + return &ListEdgeFunctionsActionsOptions{} +} + +// SetXCorrelationID : Allow user to set XCorrelationID +func (options *ListEdgeFunctionsActionsOptions) SetXCorrelationID(xCorrelationID string) *ListEdgeFunctionsActionsOptions { + options.XCorrelationID = core.StringPtr(xCorrelationID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListEdgeFunctionsActionsOptions) SetHeaders(param map[string]string) *ListEdgeFunctionsActionsOptions { + options.Headers = param + return options +} + +// ListEdgeFunctionsTriggersOptions : The ListEdgeFunctionsTriggers options. +type ListEdgeFunctionsTriggersOptions struct { + // Uniquely identifying a request. + XCorrelationID *string `json:"X-Correlation-ID,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListEdgeFunctionsTriggersOptions : Instantiate ListEdgeFunctionsTriggersOptions +func (*EdgeFunctionsApiV1) NewListEdgeFunctionsTriggersOptions() *ListEdgeFunctionsTriggersOptions { + return &ListEdgeFunctionsTriggersOptions{} +} + +// SetXCorrelationID : Allow user to set XCorrelationID +func (options *ListEdgeFunctionsTriggersOptions) SetXCorrelationID(xCorrelationID string) *ListEdgeFunctionsTriggersOptions { + options.XCorrelationID = core.StringPtr(xCorrelationID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListEdgeFunctionsTriggersOptions) SetHeaders(param map[string]string) *ListEdgeFunctionsTriggersOptions { + options.Headers = param + return options +} + +// UpdateEdgeFunctionsActionOptions : The UpdateEdgeFunctionsAction options. +type UpdateEdgeFunctionsActionOptions struct { + // the edge function action name. + ScriptName *string `json:"script_name" validate:"required,ne="` + + // upload or replace an edge functions action. + EdgeFunctionsAction io.ReadCloser `json:"edge_functions_action,omitempty"` + + // Uniquely identifying a request. + XCorrelationID *string `json:"X-Correlation-ID,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdateEdgeFunctionsActionOptions : Instantiate UpdateEdgeFunctionsActionOptions +func (*EdgeFunctionsApiV1) NewUpdateEdgeFunctionsActionOptions(scriptName string) *UpdateEdgeFunctionsActionOptions { + return &UpdateEdgeFunctionsActionOptions{ + ScriptName: core.StringPtr(scriptName), + } +} + +// SetScriptName : Allow user to set ScriptName +func (options *UpdateEdgeFunctionsActionOptions) SetScriptName(scriptName string) *UpdateEdgeFunctionsActionOptions { + options.ScriptName = core.StringPtr(scriptName) + return options +} + +// SetEdgeFunctionsAction : Allow user to set EdgeFunctionsAction +func (options *UpdateEdgeFunctionsActionOptions) SetEdgeFunctionsAction(edgeFunctionsAction io.ReadCloser) *UpdateEdgeFunctionsActionOptions { + options.EdgeFunctionsAction = edgeFunctionsAction + return options +} + +// SetXCorrelationID : Allow user to set XCorrelationID +func (options *UpdateEdgeFunctionsActionOptions) SetXCorrelationID(xCorrelationID string) *UpdateEdgeFunctionsActionOptions { + options.XCorrelationID = core.StringPtr(xCorrelationID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateEdgeFunctionsActionOptions) SetHeaders(param map[string]string) *UpdateEdgeFunctionsActionOptions { + options.Headers = param + return options +} + +// UpdateEdgeFunctionsTriggerOptions : The UpdateEdgeFunctionsTrigger options. +type UpdateEdgeFunctionsTriggerOptions struct { + // trigger identifier. + RouteID *string `json:"route_id" validate:"required,ne="` + + // a string pattern. + Pattern *string `json:"pattern,omitempty"` + + // Name of the script to apply when the route is matched. The route is skipped when this is blank/missing. + Script *string `json:"script,omitempty"` + + // Uniquely identifying a request. + XCorrelationID *string `json:"X-Correlation-ID,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdateEdgeFunctionsTriggerOptions : Instantiate UpdateEdgeFunctionsTriggerOptions +func (*EdgeFunctionsApiV1) NewUpdateEdgeFunctionsTriggerOptions(routeID string) *UpdateEdgeFunctionsTriggerOptions { + return &UpdateEdgeFunctionsTriggerOptions{ + RouteID: core.StringPtr(routeID), + } +} + +// SetRouteID : Allow user to set RouteID +func (options *UpdateEdgeFunctionsTriggerOptions) SetRouteID(routeID string) *UpdateEdgeFunctionsTriggerOptions { + options.RouteID = core.StringPtr(routeID) + return options +} + +// SetPattern : Allow user to set Pattern +func (options *UpdateEdgeFunctionsTriggerOptions) SetPattern(pattern string) *UpdateEdgeFunctionsTriggerOptions { + options.Pattern = core.StringPtr(pattern) + return options +} + +// SetScript : Allow user to set Script +func (options *UpdateEdgeFunctionsTriggerOptions) SetScript(script string) *UpdateEdgeFunctionsTriggerOptions { + options.Script = core.StringPtr(script) + return options +} + +// SetXCorrelationID : Allow user to set XCorrelationID +func (options *UpdateEdgeFunctionsTriggerOptions) SetXCorrelationID(xCorrelationID string) *UpdateEdgeFunctionsTriggerOptions { + options.XCorrelationID = core.StringPtr(xCorrelationID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateEdgeFunctionsTriggerOptions) SetHeaders(param map[string]string) *UpdateEdgeFunctionsTriggerOptions { + options.Headers = param + return options +} + +// CreateEdgeFunctionsTriggerResp : create an edge funtions trigger response. +type CreateEdgeFunctionsTriggerResp struct { + // edge function trigger id. + Result *EdgeFunctionsTriggerID `json:"result,omitempty"` + + // success. + Success *bool `json:"success,omitempty"` + + // An array with errors. + Errors []string `json:"errors,omitempty"` + + // An array with messages. + Messages []string `json:"messages,omitempty"` +} + + +// UnmarshalCreateEdgeFunctionsTriggerResp unmarshals an instance of CreateEdgeFunctionsTriggerResp from the specified map of raw messages. +func UnmarshalCreateEdgeFunctionsTriggerResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(CreateEdgeFunctionsTriggerResp) + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalEdgeFunctionsTriggerID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// DeleteEdgeFunctionsActionResp : create an edge funtions trigger response. +type DeleteEdgeFunctionsActionResp struct { + // edge function action id. + Result *EdgeFunctionsActionID `json:"result,omitempty"` + + // success. + Success *bool `json:"success,omitempty"` + + // An array with errors. + Errors []string `json:"errors,omitempty"` + + // An array with messages. + Messages []string `json:"messages,omitempty"` +} + + +// UnmarshalDeleteEdgeFunctionsActionResp unmarshals an instance of DeleteEdgeFunctionsActionResp from the specified map of raw messages. +func UnmarshalDeleteEdgeFunctionsActionResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(DeleteEdgeFunctionsActionResp) + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalEdgeFunctionsActionID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// EdgeFunctionsActionID : edge function action id. +type EdgeFunctionsActionID struct { + // edge functions action identifier tag. + ID *string `json:"id,omitempty"` +} + + +// UnmarshalEdgeFunctionsActionID unmarshals an instance of EdgeFunctionsActionID from the specified map of raw messages. +func UnmarshalEdgeFunctionsActionID(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(EdgeFunctionsActionID) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// EdgeFunctionsActionResp : edge function script. +type EdgeFunctionsActionResp struct { + // Raw script content, as a string. + Script *string `json:"script,omitempty"` + + // Hashed script content, can be used in a If-None-Match header when updating. + Etag *string `json:"etag,omitempty"` + + // handlers. + Handlers []string `json:"handlers,omitempty"` + + // The time when the script was last modified. + ModifiedOn *strfmt.DateTime `json:"modified_on,omitempty"` + + // The time when the script was last created. + CreatedOn *strfmt.DateTime `json:"created_on,omitempty"` + + // An array with items in the list response. + Routes []EdgeFunctionsTriggerResp `json:"routes,omitempty"` +} + + +// UnmarshalEdgeFunctionsActionResp unmarshals an instance of EdgeFunctionsActionResp from the specified map of raw messages. +func UnmarshalEdgeFunctionsActionResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(EdgeFunctionsActionResp) + err = core.UnmarshalPrimitive(m, "script", &obj.Script) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "etag", &obj.Etag) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "handlers", &obj.Handlers) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "modified_on", &obj.ModifiedOn) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_on", &obj.CreatedOn) + if err != nil { + return + } + err = core.UnmarshalModel(m, "routes", &obj.Routes, UnmarshalEdgeFunctionsTriggerResp) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// EdgeFunctionsTriggerID : edge function trigger id. +type EdgeFunctionsTriggerID struct { + // edge functions trigger identifier tag. + ID *string `json:"id,omitempty"` +} + + +// UnmarshalEdgeFunctionsTriggerID unmarshals an instance of EdgeFunctionsTriggerID from the specified map of raw messages. +func UnmarshalEdgeFunctionsTriggerID(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(EdgeFunctionsTriggerID) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// EdgeFunctionsTriggerResp : edge function trigger id. +type EdgeFunctionsTriggerResp struct { + // edge functions trigger identifier tag. + ID *string `json:"id,omitempty"` + + // a string pattern. + Pattern *string `json:"pattern,omitempty"` + + // Name of the script to apply when the route is matched. The route is skipped when this is blank/missing. + Script *string `json:"script,omitempty"` + + // request limit fail open or not. + RequestLimitFailOpen *bool `json:"request_limit_fail_open,omitempty"` +} + + +// UnmarshalEdgeFunctionsTriggerResp unmarshals an instance of EdgeFunctionsTriggerResp from the specified map of raw messages. +func UnmarshalEdgeFunctionsTriggerResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(EdgeFunctionsTriggerResp) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "pattern", &obj.Pattern) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "script", &obj.Script) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "request_limit_fail_open", &obj.RequestLimitFailOpen) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// GetEdgeFunctionsActionResp : edge funtions action response. +type GetEdgeFunctionsActionResp struct { + // edge function script. + Result *EdgeFunctionsActionResp `json:"result,omitempty"` + + // success. + Success *bool `json:"success,omitempty"` + + // An array with errors. + Errors []string `json:"errors,omitempty"` + + // An array with messages. + Messages []string `json:"messages,omitempty"` +} + + +// UnmarshalGetEdgeFunctionsActionResp unmarshals an instance of GetEdgeFunctionsActionResp from the specified map of raw messages. +func UnmarshalGetEdgeFunctionsActionResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(GetEdgeFunctionsActionResp) + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalEdgeFunctionsActionResp) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// GetEdgeFunctionsTriggerResp : edge funtions trigger response. +type GetEdgeFunctionsTriggerResp struct { + // edge function trigger id. + Result *EdgeFunctionsTriggerResp `json:"result,omitempty"` + + // success. + Success *bool `json:"success,omitempty"` + + // An array with errors. + Errors []string `json:"errors,omitempty"` + + // An array with messages. + Messages []string `json:"messages,omitempty"` +} + + +// UnmarshalGetEdgeFunctionsTriggerResp unmarshals an instance of GetEdgeFunctionsTriggerResp from the specified map of raw messages. +func UnmarshalGetEdgeFunctionsTriggerResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(GetEdgeFunctionsTriggerResp) + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalEdgeFunctionsTriggerResp) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ListEdgeFunctionsActionsResp : edge funtions actions response. +type ListEdgeFunctionsActionsResp struct { + // An array with items in the list response. + Result []EdgeFunctionsActionResp `json:"result,omitempty"` + + // success. + Success *bool `json:"success,omitempty"` + + // An array with errors. + Errors []string `json:"errors,omitempty"` + + // An array with messages. + Messages []string `json:"messages,omitempty"` +} + + +// UnmarshalListEdgeFunctionsActionsResp unmarshals an instance of ListEdgeFunctionsActionsResp from the specified map of raw messages. +func UnmarshalListEdgeFunctionsActionsResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ListEdgeFunctionsActionsResp) + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalEdgeFunctionsActionResp) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ListEdgeFunctionsTriggersResp : edge funtions triggers response. +type ListEdgeFunctionsTriggersResp struct { + // An array with items in the list response. + Result []EdgeFunctionsTriggerResp `json:"result,omitempty"` + + // success. + Success *bool `json:"success,omitempty"` + + // An array with errors. + Errors []string `json:"errors,omitempty"` + + // An array with messages. + Messages []string `json:"messages,omitempty"` +} + + +// UnmarshalListEdgeFunctionsTriggersResp unmarshals an instance of ListEdgeFunctionsTriggersResp from the specified map of raw messages. +func UnmarshalListEdgeFunctionsTriggersResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ListEdgeFunctionsTriggersResp) + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalEdgeFunctionsTriggerResp) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} diff --git a/vendor/github.com/IBM/networking-go-sdk/edgefunctionsapiv1/script.js b/vendor/github.com/IBM/networking-go-sdk/edgefunctionsapiv1/script.js new file mode 100644 index 00000000000..eefde0c74f2 --- /dev/null +++ b/vendor/github.com/IBM/networking-go-sdk/edgefunctionsapiv1/script.js @@ -0,0 +1,14 @@ +addEventListener('fetch', (event) => { + event.respondWith(handleRequest(event.request)) +}) + +/** + * Sample test function + * Log a given request object + * @param {Request} request + */ +async function handleRequest(request) { + console.log('Got request', request) + const response = await fetch(request) + return response; +} \ No newline at end of file diff --git a/vendor/github.com/IBM/networking-go-sdk/globalloadbalancermonitorv1/global_load_balancer_monitor_v1.go b/vendor/github.com/IBM/networking-go-sdk/globalloadbalancermonitorv1/global_load_balancer_monitor_v1.go new file mode 100644 index 00000000000..94f95fce5ff --- /dev/null +++ b/vendor/github.com/IBM/networking-go-sdk/globalloadbalancermonitorv1/global_load_balancer_monitor_v1.go @@ -0,0 +1,1206 @@ +/** + * (C) Copyright IBM Corp. 2020. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * IBM OpenAPI SDK Code Generator Version: 3.20.0-debb9f29-20201203-202043 + */ + + +// Package globalloadbalancermonitorv1 : Operations and models for the GlobalLoadBalancerMonitorV1 service +package globalloadbalancermonitorv1 + +import ( + "context" + "encoding/json" + "fmt" + "github.com/IBM/go-sdk-core/v4/core" + common "github.com/IBM/networking-go-sdk/common" + "net/http" + "reflect" + "time" +) + +// GlobalLoadBalancerMonitorV1 : Global Load Balancer Monitor +// +// Version: 1.0.1 +type GlobalLoadBalancerMonitorV1 struct { + Service *core.BaseService + + // Full CRN of the service instance. + Crn *string +} + +// DefaultServiceURL is the default URL to make service requests to. +const DefaultServiceURL = "https://api.cis.cloud.ibm.com" + +// DefaultServiceName is the default key used to find external configuration information. +const DefaultServiceName = "global_load_balancer_monitor" + +// GlobalLoadBalancerMonitorV1Options : Service options +type GlobalLoadBalancerMonitorV1Options struct { + ServiceName string + URL string + Authenticator core.Authenticator + + // Full CRN of the service instance. + Crn *string `validate:"required"` +} + +// NewGlobalLoadBalancerMonitorV1UsingExternalConfig : constructs an instance of GlobalLoadBalancerMonitorV1 with passed in options and external configuration. +func NewGlobalLoadBalancerMonitorV1UsingExternalConfig(options *GlobalLoadBalancerMonitorV1Options) (globalLoadBalancerMonitor *GlobalLoadBalancerMonitorV1, err error) { + if options.ServiceName == "" { + options.ServiceName = DefaultServiceName + } + + if options.Authenticator == nil { + options.Authenticator, err = core.GetAuthenticatorFromEnvironment(options.ServiceName) + if err != nil { + return + } + } + + globalLoadBalancerMonitor, err = NewGlobalLoadBalancerMonitorV1(options) + if err != nil { + return + } + + err = globalLoadBalancerMonitor.Service.ConfigureService(options.ServiceName) + if err != nil { + return + } + + if options.URL != "" { + err = globalLoadBalancerMonitor.Service.SetServiceURL(options.URL) + } + return +} + +// NewGlobalLoadBalancerMonitorV1 : constructs an instance of GlobalLoadBalancerMonitorV1 with passed in options. +func NewGlobalLoadBalancerMonitorV1(options *GlobalLoadBalancerMonitorV1Options) (service *GlobalLoadBalancerMonitorV1, err error) { + serviceOptions := &core.ServiceOptions{ + URL: DefaultServiceURL, + Authenticator: options.Authenticator, + } + + err = core.ValidateStruct(options, "options") + if err != nil { + return + } + + baseService, err := core.NewBaseService(serviceOptions) + if err != nil { + return + } + + if options.URL != "" { + err = baseService.SetServiceURL(options.URL) + if err != nil { + return + } + } + + service = &GlobalLoadBalancerMonitorV1{ + Service: baseService, + Crn: options.Crn, + } + + return +} + +// GetServiceURLForRegion returns the service URL to be used for the specified region +func GetServiceURLForRegion(region string) (string, error) { + return "", fmt.Errorf("service does not support regional URLs") +} + +// Clone makes a copy of "globalLoadBalancerMonitor" suitable for processing requests. +func (globalLoadBalancerMonitor *GlobalLoadBalancerMonitorV1) Clone() *GlobalLoadBalancerMonitorV1 { + if core.IsNil(globalLoadBalancerMonitor) { + return nil + } + clone := *globalLoadBalancerMonitor + clone.Service = globalLoadBalancerMonitor.Service.Clone() + return &clone +} + +// SetServiceURL sets the service URL +func (globalLoadBalancerMonitor *GlobalLoadBalancerMonitorV1) SetServiceURL(url string) error { + return globalLoadBalancerMonitor.Service.SetServiceURL(url) +} + +// GetServiceURL returns the service URL +func (globalLoadBalancerMonitor *GlobalLoadBalancerMonitorV1) GetServiceURL() string { + return globalLoadBalancerMonitor.Service.GetServiceURL() +} + +// SetDefaultHeaders sets HTTP headers to be sent in every request +func (globalLoadBalancerMonitor *GlobalLoadBalancerMonitorV1) SetDefaultHeaders(headers http.Header) { + globalLoadBalancerMonitor.Service.SetDefaultHeaders(headers) +} + +// SetEnableGzipCompression sets the service's EnableGzipCompression field +func (globalLoadBalancerMonitor *GlobalLoadBalancerMonitorV1) SetEnableGzipCompression(enableGzip bool) { + globalLoadBalancerMonitor.Service.SetEnableGzipCompression(enableGzip) +} + +// GetEnableGzipCompression returns the service's EnableGzipCompression field +func (globalLoadBalancerMonitor *GlobalLoadBalancerMonitorV1) GetEnableGzipCompression() bool { + return globalLoadBalancerMonitor.Service.GetEnableGzipCompression() +} + +// EnableRetries enables automatic retries for requests invoked for this service instance. +// If either parameter is specified as 0, then a default value is used instead. +func (globalLoadBalancerMonitor *GlobalLoadBalancerMonitorV1) EnableRetries(maxRetries int, maxRetryInterval time.Duration) { + globalLoadBalancerMonitor.Service.EnableRetries(maxRetries, maxRetryInterval) +} + +// DisableRetries disables automatic retries for requests invoked for this service instance. +func (globalLoadBalancerMonitor *GlobalLoadBalancerMonitorV1) DisableRetries() { + globalLoadBalancerMonitor.Service.DisableRetries() +} + +// ListAllLoadBalancerMonitors : List all load balancer monitors +// List configured load balancer monitors for a user. +func (globalLoadBalancerMonitor *GlobalLoadBalancerMonitorV1) ListAllLoadBalancerMonitors(listAllLoadBalancerMonitorsOptions *ListAllLoadBalancerMonitorsOptions) (result *ListMonitorResp, response *core.DetailedResponse, err error) { + return globalLoadBalancerMonitor.ListAllLoadBalancerMonitorsWithContext(context.Background(), listAllLoadBalancerMonitorsOptions) +} + +// ListAllLoadBalancerMonitorsWithContext is an alternate form of the ListAllLoadBalancerMonitors method which supports a Context parameter +func (globalLoadBalancerMonitor *GlobalLoadBalancerMonitorV1) ListAllLoadBalancerMonitorsWithContext(ctx context.Context, listAllLoadBalancerMonitorsOptions *ListAllLoadBalancerMonitorsOptions) (result *ListMonitorResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listAllLoadBalancerMonitorsOptions, "listAllLoadBalancerMonitorsOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *globalLoadBalancerMonitor.Crn, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = globalLoadBalancerMonitor.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(globalLoadBalancerMonitor.Service.Options.URL, `/v1/{crn}/load_balancers/monitors`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range listAllLoadBalancerMonitorsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("global_load_balancer_monitor", "V1", "ListAllLoadBalancerMonitors") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = globalLoadBalancerMonitor.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalListMonitorResp) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateLoadBalancerMonitor : Create load balancer monitor +// Create a load balancer monitor for a given service instance. +func (globalLoadBalancerMonitor *GlobalLoadBalancerMonitorV1) CreateLoadBalancerMonitor(createLoadBalancerMonitorOptions *CreateLoadBalancerMonitorOptions) (result *MonitorResp, response *core.DetailedResponse, err error) { + return globalLoadBalancerMonitor.CreateLoadBalancerMonitorWithContext(context.Background(), createLoadBalancerMonitorOptions) +} + +// CreateLoadBalancerMonitorWithContext is an alternate form of the CreateLoadBalancerMonitor method which supports a Context parameter +func (globalLoadBalancerMonitor *GlobalLoadBalancerMonitorV1) CreateLoadBalancerMonitorWithContext(ctx context.Context, createLoadBalancerMonitorOptions *CreateLoadBalancerMonitorOptions) (result *MonitorResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(createLoadBalancerMonitorOptions, "createLoadBalancerMonitorOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *globalLoadBalancerMonitor.Crn, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = globalLoadBalancerMonitor.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(globalLoadBalancerMonitor.Service.Options.URL, `/v1/{crn}/load_balancers/monitors`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range createLoadBalancerMonitorOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("global_load_balancer_monitor", "V1", "CreateLoadBalancerMonitor") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if createLoadBalancerMonitorOptions.Type != nil { + body["type"] = createLoadBalancerMonitorOptions.Type + } + if createLoadBalancerMonitorOptions.Description != nil { + body["description"] = createLoadBalancerMonitorOptions.Description + } + if createLoadBalancerMonitorOptions.Method != nil { + body["method"] = createLoadBalancerMonitorOptions.Method + } + if createLoadBalancerMonitorOptions.Port != nil { + body["port"] = createLoadBalancerMonitorOptions.Port + } + if createLoadBalancerMonitorOptions.Path != nil { + body["path"] = createLoadBalancerMonitorOptions.Path + } + if createLoadBalancerMonitorOptions.Timeout != nil { + body["timeout"] = createLoadBalancerMonitorOptions.Timeout + } + if createLoadBalancerMonitorOptions.Retries != nil { + body["retries"] = createLoadBalancerMonitorOptions.Retries + } + if createLoadBalancerMonitorOptions.Interval != nil { + body["interval"] = createLoadBalancerMonitorOptions.Interval + } + if createLoadBalancerMonitorOptions.ExpectedCodes != nil { + body["expected_codes"] = createLoadBalancerMonitorOptions.ExpectedCodes + } + if createLoadBalancerMonitorOptions.FollowRedirects != nil { + body["follow_redirects"] = createLoadBalancerMonitorOptions.FollowRedirects + } + if createLoadBalancerMonitorOptions.ExpectedBody != nil { + body["expected_body"] = createLoadBalancerMonitorOptions.ExpectedBody + } + if createLoadBalancerMonitorOptions.AllowInsecure != nil { + body["allow_insecure"] = createLoadBalancerMonitorOptions.AllowInsecure + } + if createLoadBalancerMonitorOptions.Header != nil { + body["header"] = createLoadBalancerMonitorOptions.Header + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = globalLoadBalancerMonitor.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalMonitorResp) + if err != nil { + return + } + response.Result = result + + return +} + +// EditLoadBalancerMonitor : Edit load balancer monitor +// Edit porperties of an existing load balancer monitor. +func (globalLoadBalancerMonitor *GlobalLoadBalancerMonitorV1) EditLoadBalancerMonitor(editLoadBalancerMonitorOptions *EditLoadBalancerMonitorOptions) (result *MonitorResp, response *core.DetailedResponse, err error) { + return globalLoadBalancerMonitor.EditLoadBalancerMonitorWithContext(context.Background(), editLoadBalancerMonitorOptions) +} + +// EditLoadBalancerMonitorWithContext is an alternate form of the EditLoadBalancerMonitor method which supports a Context parameter +func (globalLoadBalancerMonitor *GlobalLoadBalancerMonitorV1) EditLoadBalancerMonitorWithContext(ctx context.Context, editLoadBalancerMonitorOptions *EditLoadBalancerMonitorOptions) (result *MonitorResp, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(editLoadBalancerMonitorOptions, "editLoadBalancerMonitorOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(editLoadBalancerMonitorOptions, "editLoadBalancerMonitorOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *globalLoadBalancerMonitor.Crn, + "monitor_identifier": *editLoadBalancerMonitorOptions.MonitorIdentifier, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = globalLoadBalancerMonitor.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(globalLoadBalancerMonitor.Service.Options.URL, `/v1/{crn}/load_balancers/monitors/{monitor_identifier}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range editLoadBalancerMonitorOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("global_load_balancer_monitor", "V1", "EditLoadBalancerMonitor") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if editLoadBalancerMonitorOptions.Type != nil { + body["type"] = editLoadBalancerMonitorOptions.Type + } + if editLoadBalancerMonitorOptions.Description != nil { + body["description"] = editLoadBalancerMonitorOptions.Description + } + if editLoadBalancerMonitorOptions.Method != nil { + body["method"] = editLoadBalancerMonitorOptions.Method + } + if editLoadBalancerMonitorOptions.Port != nil { + body["port"] = editLoadBalancerMonitorOptions.Port + } + if editLoadBalancerMonitorOptions.Path != nil { + body["path"] = editLoadBalancerMonitorOptions.Path + } + if editLoadBalancerMonitorOptions.Timeout != nil { + body["timeout"] = editLoadBalancerMonitorOptions.Timeout + } + if editLoadBalancerMonitorOptions.Retries != nil { + body["retries"] = editLoadBalancerMonitorOptions.Retries + } + if editLoadBalancerMonitorOptions.Interval != nil { + body["interval"] = editLoadBalancerMonitorOptions.Interval + } + if editLoadBalancerMonitorOptions.ExpectedCodes != nil { + body["expected_codes"] = editLoadBalancerMonitorOptions.ExpectedCodes + } + if editLoadBalancerMonitorOptions.FollowRedirects != nil { + body["follow_redirects"] = editLoadBalancerMonitorOptions.FollowRedirects + } + if editLoadBalancerMonitorOptions.ExpectedBody != nil { + body["expected_body"] = editLoadBalancerMonitorOptions.ExpectedBody + } + if editLoadBalancerMonitorOptions.AllowInsecure != nil { + body["allow_insecure"] = editLoadBalancerMonitorOptions.AllowInsecure + } + if editLoadBalancerMonitorOptions.Header != nil { + body["header"] = editLoadBalancerMonitorOptions.Header + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = globalLoadBalancerMonitor.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalMonitorResp) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteLoadBalancerMonitor : Delete load balancer monitor +// Delete a load balancer monitor. +func (globalLoadBalancerMonitor *GlobalLoadBalancerMonitorV1) DeleteLoadBalancerMonitor(deleteLoadBalancerMonitorOptions *DeleteLoadBalancerMonitorOptions) (result *DeleteMonitorResp, response *core.DetailedResponse, err error) { + return globalLoadBalancerMonitor.DeleteLoadBalancerMonitorWithContext(context.Background(), deleteLoadBalancerMonitorOptions) +} + +// DeleteLoadBalancerMonitorWithContext is an alternate form of the DeleteLoadBalancerMonitor method which supports a Context parameter +func (globalLoadBalancerMonitor *GlobalLoadBalancerMonitorV1) DeleteLoadBalancerMonitorWithContext(ctx context.Context, deleteLoadBalancerMonitorOptions *DeleteLoadBalancerMonitorOptions) (result *DeleteMonitorResp, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteLoadBalancerMonitorOptions, "deleteLoadBalancerMonitorOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteLoadBalancerMonitorOptions, "deleteLoadBalancerMonitorOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *globalLoadBalancerMonitor.Crn, + "monitor_identifier": *deleteLoadBalancerMonitorOptions.MonitorIdentifier, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = globalLoadBalancerMonitor.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(globalLoadBalancerMonitor.Service.Options.URL, `/v1/{crn}/load_balancers/monitors/{monitor_identifier}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteLoadBalancerMonitorOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("global_load_balancer_monitor", "V1", "DeleteLoadBalancerMonitor") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = globalLoadBalancerMonitor.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalDeleteMonitorResp) + if err != nil { + return + } + response.Result = result + + return +} + +// GetLoadBalancerMonitor : Get load balancer monitor +// For a given service instance and load balancer monitor id, get the monitor details. +func (globalLoadBalancerMonitor *GlobalLoadBalancerMonitorV1) GetLoadBalancerMonitor(getLoadBalancerMonitorOptions *GetLoadBalancerMonitorOptions) (result *MonitorResp, response *core.DetailedResponse, err error) { + return globalLoadBalancerMonitor.GetLoadBalancerMonitorWithContext(context.Background(), getLoadBalancerMonitorOptions) +} + +// GetLoadBalancerMonitorWithContext is an alternate form of the GetLoadBalancerMonitor method which supports a Context parameter +func (globalLoadBalancerMonitor *GlobalLoadBalancerMonitorV1) GetLoadBalancerMonitorWithContext(ctx context.Context, getLoadBalancerMonitorOptions *GetLoadBalancerMonitorOptions) (result *MonitorResp, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getLoadBalancerMonitorOptions, "getLoadBalancerMonitorOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getLoadBalancerMonitorOptions, "getLoadBalancerMonitorOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *globalLoadBalancerMonitor.Crn, + "monitor_identifier": *getLoadBalancerMonitorOptions.MonitorIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = globalLoadBalancerMonitor.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(globalLoadBalancerMonitor.Service.Options.URL, `/v1/{crn}/load_balancers/monitors/{monitor_identifier}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getLoadBalancerMonitorOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("global_load_balancer_monitor", "V1", "GetLoadBalancerMonitor") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = globalLoadBalancerMonitor.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalMonitorResp) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateLoadBalancerMonitorOptions : The CreateLoadBalancerMonitor options. +type CreateLoadBalancerMonitorOptions struct { + // http type. + Type *string `json:"type,omitempty"` + + // login page monitor. + Description *string `json:"description,omitempty"` + + // method. + Method *string `json:"method,omitempty"` + + // port number. + Port *int64 `json:"port,omitempty"` + + // path. + Path *string `json:"path,omitempty"` + + // timeout count. + Timeout *int64 `json:"timeout,omitempty"` + + // retry count. + Retries *int64 `json:"retries,omitempty"` + + // interval. + Interval *int64 `json:"interval,omitempty"` + + // expected codes. + ExpectedCodes *string `json:"expected_codes,omitempty"` + + // follow redirects. + FollowRedirects *bool `json:"follow_redirects,omitempty"` + + // expected body. + ExpectedBody *string `json:"expected_body,omitempty"` + + // allow insecure. + AllowInsecure *bool `json:"allow_insecure,omitempty"` + + // header. + Header map[string][]string `json:"header,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewCreateLoadBalancerMonitorOptions : Instantiate CreateLoadBalancerMonitorOptions +func (*GlobalLoadBalancerMonitorV1) NewCreateLoadBalancerMonitorOptions() *CreateLoadBalancerMonitorOptions { + return &CreateLoadBalancerMonitorOptions{} +} + +// SetType : Allow user to set Type +func (options *CreateLoadBalancerMonitorOptions) SetType(typeVar string) *CreateLoadBalancerMonitorOptions { + options.Type = core.StringPtr(typeVar) + return options +} + +// SetDescription : Allow user to set Description +func (options *CreateLoadBalancerMonitorOptions) SetDescription(description string) *CreateLoadBalancerMonitorOptions { + options.Description = core.StringPtr(description) + return options +} + +// SetMethod : Allow user to set Method +func (options *CreateLoadBalancerMonitorOptions) SetMethod(method string) *CreateLoadBalancerMonitorOptions { + options.Method = core.StringPtr(method) + return options +} + +// SetPort : Allow user to set Port +func (options *CreateLoadBalancerMonitorOptions) SetPort(port int64) *CreateLoadBalancerMonitorOptions { + options.Port = core.Int64Ptr(port) + return options +} + +// SetPath : Allow user to set Path +func (options *CreateLoadBalancerMonitorOptions) SetPath(path string) *CreateLoadBalancerMonitorOptions { + options.Path = core.StringPtr(path) + return options +} + +// SetTimeout : Allow user to set Timeout +func (options *CreateLoadBalancerMonitorOptions) SetTimeout(timeout int64) *CreateLoadBalancerMonitorOptions { + options.Timeout = core.Int64Ptr(timeout) + return options +} + +// SetRetries : Allow user to set Retries +func (options *CreateLoadBalancerMonitorOptions) SetRetries(retries int64) *CreateLoadBalancerMonitorOptions { + options.Retries = core.Int64Ptr(retries) + return options +} + +// SetInterval : Allow user to set Interval +func (options *CreateLoadBalancerMonitorOptions) SetInterval(interval int64) *CreateLoadBalancerMonitorOptions { + options.Interval = core.Int64Ptr(interval) + return options +} + +// SetExpectedCodes : Allow user to set ExpectedCodes +func (options *CreateLoadBalancerMonitorOptions) SetExpectedCodes(expectedCodes string) *CreateLoadBalancerMonitorOptions { + options.ExpectedCodes = core.StringPtr(expectedCodes) + return options +} + +// SetFollowRedirects : Allow user to set FollowRedirects +func (options *CreateLoadBalancerMonitorOptions) SetFollowRedirects(followRedirects bool) *CreateLoadBalancerMonitorOptions { + options.FollowRedirects = core.BoolPtr(followRedirects) + return options +} + +// SetExpectedBody : Allow user to set ExpectedBody +func (options *CreateLoadBalancerMonitorOptions) SetExpectedBody(expectedBody string) *CreateLoadBalancerMonitorOptions { + options.ExpectedBody = core.StringPtr(expectedBody) + return options +} + +// SetAllowInsecure : Allow user to set AllowInsecure +func (options *CreateLoadBalancerMonitorOptions) SetAllowInsecure(allowInsecure bool) *CreateLoadBalancerMonitorOptions { + options.AllowInsecure = core.BoolPtr(allowInsecure) + return options +} + +// SetHeader : Allow user to set Header +func (options *CreateLoadBalancerMonitorOptions) SetHeader(header map[string][]string) *CreateLoadBalancerMonitorOptions { + options.Header = header + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateLoadBalancerMonitorOptions) SetHeaders(param map[string]string) *CreateLoadBalancerMonitorOptions { + options.Headers = param + return options +} + +// DeleteLoadBalancerMonitorOptions : The DeleteLoadBalancerMonitor options. +type DeleteLoadBalancerMonitorOptions struct { + // monitor identifier. + MonitorIdentifier *string `json:"monitor_identifier" validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteLoadBalancerMonitorOptions : Instantiate DeleteLoadBalancerMonitorOptions +func (*GlobalLoadBalancerMonitorV1) NewDeleteLoadBalancerMonitorOptions(monitorIdentifier string) *DeleteLoadBalancerMonitorOptions { + return &DeleteLoadBalancerMonitorOptions{ + MonitorIdentifier: core.StringPtr(monitorIdentifier), + } +} + +// SetMonitorIdentifier : Allow user to set MonitorIdentifier +func (options *DeleteLoadBalancerMonitorOptions) SetMonitorIdentifier(monitorIdentifier string) *DeleteLoadBalancerMonitorOptions { + options.MonitorIdentifier = core.StringPtr(monitorIdentifier) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteLoadBalancerMonitorOptions) SetHeaders(param map[string]string) *DeleteLoadBalancerMonitorOptions { + options.Headers = param + return options +} + +// DeleteMonitorRespResult : result. +type DeleteMonitorRespResult struct { + // identifier. + ID *string `json:"id" validate:"required"` +} + + +// UnmarshalDeleteMonitorRespResult unmarshals an instance of DeleteMonitorRespResult from the specified map of raw messages. +func UnmarshalDeleteMonitorRespResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(DeleteMonitorRespResult) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// EditLoadBalancerMonitorOptions : The EditLoadBalancerMonitor options. +type EditLoadBalancerMonitorOptions struct { + // monitor identifier. + MonitorIdentifier *string `json:"monitor_identifier" validate:"required,ne="` + + // http type. + Type *string `json:"type,omitempty"` + + // login page monitor. + Description *string `json:"description,omitempty"` + + // method. + Method *string `json:"method,omitempty"` + + // port number. + Port *int64 `json:"port,omitempty"` + + // path. + Path *string `json:"path,omitempty"` + + // timeout count. + Timeout *int64 `json:"timeout,omitempty"` + + // retry count. + Retries *int64 `json:"retries,omitempty"` + + // interval. + Interval *int64 `json:"interval,omitempty"` + + // expected codes. + ExpectedCodes *string `json:"expected_codes,omitempty"` + + // follow redirects. + FollowRedirects *bool `json:"follow_redirects,omitempty"` + + // expected body. + ExpectedBody *string `json:"expected_body,omitempty"` + + // allow insecure. + AllowInsecure *bool `json:"allow_insecure,omitempty"` + + // header. + Header map[string][]string `json:"header,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewEditLoadBalancerMonitorOptions : Instantiate EditLoadBalancerMonitorOptions +func (*GlobalLoadBalancerMonitorV1) NewEditLoadBalancerMonitorOptions(monitorIdentifier string) *EditLoadBalancerMonitorOptions { + return &EditLoadBalancerMonitorOptions{ + MonitorIdentifier: core.StringPtr(monitorIdentifier), + } +} + +// SetMonitorIdentifier : Allow user to set MonitorIdentifier +func (options *EditLoadBalancerMonitorOptions) SetMonitorIdentifier(monitorIdentifier string) *EditLoadBalancerMonitorOptions { + options.MonitorIdentifier = core.StringPtr(monitorIdentifier) + return options +} + +// SetType : Allow user to set Type +func (options *EditLoadBalancerMonitorOptions) SetType(typeVar string) *EditLoadBalancerMonitorOptions { + options.Type = core.StringPtr(typeVar) + return options +} + +// SetDescription : Allow user to set Description +func (options *EditLoadBalancerMonitorOptions) SetDescription(description string) *EditLoadBalancerMonitorOptions { + options.Description = core.StringPtr(description) + return options +} + +// SetMethod : Allow user to set Method +func (options *EditLoadBalancerMonitorOptions) SetMethod(method string) *EditLoadBalancerMonitorOptions { + options.Method = core.StringPtr(method) + return options +} + +// SetPort : Allow user to set Port +func (options *EditLoadBalancerMonitorOptions) SetPort(port int64) *EditLoadBalancerMonitorOptions { + options.Port = core.Int64Ptr(port) + return options +} + +// SetPath : Allow user to set Path +func (options *EditLoadBalancerMonitorOptions) SetPath(path string) *EditLoadBalancerMonitorOptions { + options.Path = core.StringPtr(path) + return options +} + +// SetTimeout : Allow user to set Timeout +func (options *EditLoadBalancerMonitorOptions) SetTimeout(timeout int64) *EditLoadBalancerMonitorOptions { + options.Timeout = core.Int64Ptr(timeout) + return options +} + +// SetRetries : Allow user to set Retries +func (options *EditLoadBalancerMonitorOptions) SetRetries(retries int64) *EditLoadBalancerMonitorOptions { + options.Retries = core.Int64Ptr(retries) + return options +} + +// SetInterval : Allow user to set Interval +func (options *EditLoadBalancerMonitorOptions) SetInterval(interval int64) *EditLoadBalancerMonitorOptions { + options.Interval = core.Int64Ptr(interval) + return options +} + +// SetExpectedCodes : Allow user to set ExpectedCodes +func (options *EditLoadBalancerMonitorOptions) SetExpectedCodes(expectedCodes string) *EditLoadBalancerMonitorOptions { + options.ExpectedCodes = core.StringPtr(expectedCodes) + return options +} + +// SetFollowRedirects : Allow user to set FollowRedirects +func (options *EditLoadBalancerMonitorOptions) SetFollowRedirects(followRedirects bool) *EditLoadBalancerMonitorOptions { + options.FollowRedirects = core.BoolPtr(followRedirects) + return options +} + +// SetExpectedBody : Allow user to set ExpectedBody +func (options *EditLoadBalancerMonitorOptions) SetExpectedBody(expectedBody string) *EditLoadBalancerMonitorOptions { + options.ExpectedBody = core.StringPtr(expectedBody) + return options +} + +// SetAllowInsecure : Allow user to set AllowInsecure +func (options *EditLoadBalancerMonitorOptions) SetAllowInsecure(allowInsecure bool) *EditLoadBalancerMonitorOptions { + options.AllowInsecure = core.BoolPtr(allowInsecure) + return options +} + +// SetHeader : Allow user to set Header +func (options *EditLoadBalancerMonitorOptions) SetHeader(header map[string][]string) *EditLoadBalancerMonitorOptions { + options.Header = header + return options +} + +// SetHeaders : Allow user to set Headers +func (options *EditLoadBalancerMonitorOptions) SetHeaders(param map[string]string) *EditLoadBalancerMonitorOptions { + options.Headers = param + return options +} + +// GetLoadBalancerMonitorOptions : The GetLoadBalancerMonitor options. +type GetLoadBalancerMonitorOptions struct { + // monitor identifier. + MonitorIdentifier *string `json:"monitor_identifier" validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetLoadBalancerMonitorOptions : Instantiate GetLoadBalancerMonitorOptions +func (*GlobalLoadBalancerMonitorV1) NewGetLoadBalancerMonitorOptions(monitorIdentifier string) *GetLoadBalancerMonitorOptions { + return &GetLoadBalancerMonitorOptions{ + MonitorIdentifier: core.StringPtr(monitorIdentifier), + } +} + +// SetMonitorIdentifier : Allow user to set MonitorIdentifier +func (options *GetLoadBalancerMonitorOptions) SetMonitorIdentifier(monitorIdentifier string) *GetLoadBalancerMonitorOptions { + options.MonitorIdentifier = core.StringPtr(monitorIdentifier) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetLoadBalancerMonitorOptions) SetHeaders(param map[string]string) *GetLoadBalancerMonitorOptions { + options.Headers = param + return options +} + +// ListAllLoadBalancerMonitorsOptions : The ListAllLoadBalancerMonitors options. +type ListAllLoadBalancerMonitorsOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListAllLoadBalancerMonitorsOptions : Instantiate ListAllLoadBalancerMonitorsOptions +func (*GlobalLoadBalancerMonitorV1) NewListAllLoadBalancerMonitorsOptions() *ListAllLoadBalancerMonitorsOptions { + return &ListAllLoadBalancerMonitorsOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *ListAllLoadBalancerMonitorsOptions) SetHeaders(param map[string]string) *ListAllLoadBalancerMonitorsOptions { + options.Headers = param + return options +} + +// DeleteMonitorResp : delete monitor response object. +type DeleteMonitorResp struct { + // success response. + Success *bool `json:"success" validate:"required"` + + // errors. + Errors [][]string `json:"errors" validate:"required"` + + // messages. + Messages [][]string `json:"messages" validate:"required"` + + // result. + Result *DeleteMonitorRespResult `json:"result" validate:"required"` +} + + +// UnmarshalDeleteMonitorResp unmarshals an instance of DeleteMonitorResp from the specified map of raw messages. +func UnmarshalDeleteMonitorResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(DeleteMonitorResp) + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalDeleteMonitorRespResult) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ListMonitorResp : monitor list response. +type ListMonitorResp struct { + // success response. + Success *bool `json:"success" validate:"required"` + + // errors. + Errors [][]string `json:"errors" validate:"required"` + + // messages. + Messages [][]string `json:"messages" validate:"required"` + + // result. + Result []MonitorPack `json:"result" validate:"required"` + + // result information. + ResultInfo *ResultInfo `json:"result_info" validate:"required"` +} + + +// UnmarshalListMonitorResp unmarshals an instance of ListMonitorResp from the specified map of raw messages. +func UnmarshalListMonitorResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ListMonitorResp) + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalMonitorPack) + if err != nil { + return + } + err = core.UnmarshalModel(m, "result_info", &obj.ResultInfo, UnmarshalResultInfo) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// MonitorPack : monitor package. +type MonitorPack struct { + // identifier. + ID *string `json:"id,omitempty"` + + // created date. + CreatedOn *string `json:"created_on,omitempty"` + + // modified date. + ModifiedOn *string `json:"modified_on,omitempty"` + + // type. + Type *string `json:"type,omitempty"` + + // login page. + Description *string `json:"description,omitempty"` + + // method name. + Method *string `json:"method,omitempty"` + + // port number. + Port *int64 `json:"port,omitempty"` + + // path. + Path *string `json:"path,omitempty"` + + // timeout count. + Timeout *int64 `json:"timeout,omitempty"` + + // retries count. + Retries *int64 `json:"retries,omitempty"` + + // interval. + Interval *int64 `json:"interval,omitempty"` + + // expected body. + ExpectedBody *string `json:"expected_body" validate:"required"` + + // expected codes. + ExpectedCodes *string `json:"expected_codes" validate:"required"` + + // follow redirects. + FollowRedirects *bool `json:"follow_redirects,omitempty"` + + // allow insecure. + AllowInsecure *bool `json:"allow_insecure,omitempty"` + + // header. + Header map[string][]string `json:"header,omitempty"` +} + + +// UnmarshalMonitorPack unmarshals an instance of MonitorPack from the specified map of raw messages. +func UnmarshalMonitorPack(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(MonitorPack) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_on", &obj.CreatedOn) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "modified_on", &obj.ModifiedOn) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "description", &obj.Description) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "method", &obj.Method) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "port", &obj.Port) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "path", &obj.Path) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "timeout", &obj.Timeout) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "retries", &obj.Retries) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "interval", &obj.Interval) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "expected_body", &obj.ExpectedBody) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "expected_codes", &obj.ExpectedCodes) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "follow_redirects", &obj.FollowRedirects) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "allow_insecure", &obj.AllowInsecure) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "header", &obj.Header) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// MonitorResp : monitor response. +type MonitorResp struct { + // success response. + Success *bool `json:"success" validate:"required"` + + // errors. + Errors [][]string `json:"errors" validate:"required"` + + // messages. + Messages [][]string `json:"messages" validate:"required"` + + // monitor package. + Result *MonitorPack `json:"result" validate:"required"` +} + + +// UnmarshalMonitorResp unmarshals an instance of MonitorResp from the specified map of raw messages. +func UnmarshalMonitorResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(MonitorResp) + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalMonitorPack) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ResultInfo : result information. +type ResultInfo struct { + // page number. + Page *int64 `json:"page" validate:"required"` + + // per page number. + PerPage *int64 `json:"per_page" validate:"required"` + + // count. + Count *int64 `json:"count" validate:"required"` + + // total count. + TotalCount *int64 `json:"total_count" validate:"required"` +} + + +// UnmarshalResultInfo unmarshals an instance of ResultInfo from the specified map of raw messages. +func UnmarshalResultInfo(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ResultInfo) + err = core.UnmarshalPrimitive(m, "page", &obj.Page) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "per_page", &obj.PerPage) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "count", &obj.Count) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "total_count", &obj.TotalCount) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} diff --git a/vendor/github.com/IBM/networking-go-sdk/globalloadbalancerpoolsv0/global_load_balancer_pools_v0.go b/vendor/github.com/IBM/networking-go-sdk/globalloadbalancerpoolsv0/global_load_balancer_pools_v0.go new file mode 100644 index 00000000000..eced8ec6bd2 --- /dev/null +++ b/vendor/github.com/IBM/networking-go-sdk/globalloadbalancerpoolsv0/global_load_balancer_pools_v0.go @@ -0,0 +1,1164 @@ +/** + * (C) Copyright IBM Corp. 2020. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * IBM OpenAPI SDK Code Generator Version: 3.20.0-debb9f29-20201203-202043 + */ + + +// Package globalloadbalancerpoolsv0 : Operations and models for the GlobalLoadBalancerPoolsV0 service +package globalloadbalancerpoolsv0 + +import ( + "context" + "encoding/json" + "fmt" + "github.com/IBM/go-sdk-core/v4/core" + common "github.com/IBM/networking-go-sdk/common" + "net/http" + "reflect" + "time" +) + +// GlobalLoadBalancerPoolsV0 : GLB Pools +// +// Version: 0.0.1 +type GlobalLoadBalancerPoolsV0 struct { + Service *core.BaseService + + // Full CRN of the service instance. + Crn *string +} + +// DefaultServiceURL is the default URL to make service requests to. +const DefaultServiceURL = "https://api.cis.cloud.ibm.com" + +// DefaultServiceName is the default key used to find external configuration information. +const DefaultServiceName = "global_load_balancer_pools" + +// GlobalLoadBalancerPoolsV0Options : Service options +type GlobalLoadBalancerPoolsV0Options struct { + ServiceName string + URL string + Authenticator core.Authenticator + + // Full CRN of the service instance. + Crn *string `validate:"required"` +} + +// NewGlobalLoadBalancerPoolsV0UsingExternalConfig : constructs an instance of GlobalLoadBalancerPoolsV0 with passed in options and external configuration. +func NewGlobalLoadBalancerPoolsV0UsingExternalConfig(options *GlobalLoadBalancerPoolsV0Options) (globalLoadBalancerPools *GlobalLoadBalancerPoolsV0, err error) { + if options.ServiceName == "" { + options.ServiceName = DefaultServiceName + } + + if options.Authenticator == nil { + options.Authenticator, err = core.GetAuthenticatorFromEnvironment(options.ServiceName) + if err != nil { + return + } + } + + globalLoadBalancerPools, err = NewGlobalLoadBalancerPoolsV0(options) + if err != nil { + return + } + + err = globalLoadBalancerPools.Service.ConfigureService(options.ServiceName) + if err != nil { + return + } + + if options.URL != "" { + err = globalLoadBalancerPools.Service.SetServiceURL(options.URL) + } + return +} + +// NewGlobalLoadBalancerPoolsV0 : constructs an instance of GlobalLoadBalancerPoolsV0 with passed in options. +func NewGlobalLoadBalancerPoolsV0(options *GlobalLoadBalancerPoolsV0Options) (service *GlobalLoadBalancerPoolsV0, err error) { + serviceOptions := &core.ServiceOptions{ + URL: DefaultServiceURL, + Authenticator: options.Authenticator, + } + + err = core.ValidateStruct(options, "options") + if err != nil { + return + } + + baseService, err := core.NewBaseService(serviceOptions) + if err != nil { + return + } + + if options.URL != "" { + err = baseService.SetServiceURL(options.URL) + if err != nil { + return + } + } + + service = &GlobalLoadBalancerPoolsV0{ + Service: baseService, + Crn: options.Crn, + } + + return +} + +// GetServiceURLForRegion returns the service URL to be used for the specified region +func GetServiceURLForRegion(region string) (string, error) { + return "", fmt.Errorf("service does not support regional URLs") +} + +// Clone makes a copy of "globalLoadBalancerPools" suitable for processing requests. +func (globalLoadBalancerPools *GlobalLoadBalancerPoolsV0) Clone() *GlobalLoadBalancerPoolsV0 { + if core.IsNil(globalLoadBalancerPools) { + return nil + } + clone := *globalLoadBalancerPools + clone.Service = globalLoadBalancerPools.Service.Clone() + return &clone +} + +// SetServiceURL sets the service URL +func (globalLoadBalancerPools *GlobalLoadBalancerPoolsV0) SetServiceURL(url string) error { + return globalLoadBalancerPools.Service.SetServiceURL(url) +} + +// GetServiceURL returns the service URL +func (globalLoadBalancerPools *GlobalLoadBalancerPoolsV0) GetServiceURL() string { + return globalLoadBalancerPools.Service.GetServiceURL() +} + +// SetDefaultHeaders sets HTTP headers to be sent in every request +func (globalLoadBalancerPools *GlobalLoadBalancerPoolsV0) SetDefaultHeaders(headers http.Header) { + globalLoadBalancerPools.Service.SetDefaultHeaders(headers) +} + +// SetEnableGzipCompression sets the service's EnableGzipCompression field +func (globalLoadBalancerPools *GlobalLoadBalancerPoolsV0) SetEnableGzipCompression(enableGzip bool) { + globalLoadBalancerPools.Service.SetEnableGzipCompression(enableGzip) +} + +// GetEnableGzipCompression returns the service's EnableGzipCompression field +func (globalLoadBalancerPools *GlobalLoadBalancerPoolsV0) GetEnableGzipCompression() bool { + return globalLoadBalancerPools.Service.GetEnableGzipCompression() +} + +// EnableRetries enables automatic retries for requests invoked for this service instance. +// If either parameter is specified as 0, then a default value is used instead. +func (globalLoadBalancerPools *GlobalLoadBalancerPoolsV0) EnableRetries(maxRetries int, maxRetryInterval time.Duration) { + globalLoadBalancerPools.Service.EnableRetries(maxRetries, maxRetryInterval) +} + +// DisableRetries disables automatic retries for requests invoked for this service instance. +func (globalLoadBalancerPools *GlobalLoadBalancerPoolsV0) DisableRetries() { + globalLoadBalancerPools.Service.DisableRetries() +} + +// ListAllLoadBalancerPools : List all pools +// List all configured load balancer pools. +func (globalLoadBalancerPools *GlobalLoadBalancerPoolsV0) ListAllLoadBalancerPools(listAllLoadBalancerPoolsOptions *ListAllLoadBalancerPoolsOptions) (result *ListLoadBalancerPoolsResp, response *core.DetailedResponse, err error) { + return globalLoadBalancerPools.ListAllLoadBalancerPoolsWithContext(context.Background(), listAllLoadBalancerPoolsOptions) +} + +// ListAllLoadBalancerPoolsWithContext is an alternate form of the ListAllLoadBalancerPools method which supports a Context parameter +func (globalLoadBalancerPools *GlobalLoadBalancerPoolsV0) ListAllLoadBalancerPoolsWithContext(ctx context.Context, listAllLoadBalancerPoolsOptions *ListAllLoadBalancerPoolsOptions) (result *ListLoadBalancerPoolsResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listAllLoadBalancerPoolsOptions, "listAllLoadBalancerPoolsOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *globalLoadBalancerPools.Crn, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = globalLoadBalancerPools.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(globalLoadBalancerPools.Service.Options.URL, `/v1/{crn}/load_balancers/pools`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range listAllLoadBalancerPoolsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("global_load_balancer_pools", "V0", "ListAllLoadBalancerPools") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = globalLoadBalancerPools.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalListLoadBalancerPoolsResp) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateLoadBalancerPool : Create pool +// Create a new load balancer pool. +func (globalLoadBalancerPools *GlobalLoadBalancerPoolsV0) CreateLoadBalancerPool(createLoadBalancerPoolOptions *CreateLoadBalancerPoolOptions) (result *LoadBalancerPoolResp, response *core.DetailedResponse, err error) { + return globalLoadBalancerPools.CreateLoadBalancerPoolWithContext(context.Background(), createLoadBalancerPoolOptions) +} + +// CreateLoadBalancerPoolWithContext is an alternate form of the CreateLoadBalancerPool method which supports a Context parameter +func (globalLoadBalancerPools *GlobalLoadBalancerPoolsV0) CreateLoadBalancerPoolWithContext(ctx context.Context, createLoadBalancerPoolOptions *CreateLoadBalancerPoolOptions) (result *LoadBalancerPoolResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(createLoadBalancerPoolOptions, "createLoadBalancerPoolOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *globalLoadBalancerPools.Crn, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = globalLoadBalancerPools.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(globalLoadBalancerPools.Service.Options.URL, `/v1/{crn}/load_balancers/pools`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range createLoadBalancerPoolOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("global_load_balancer_pools", "V0", "CreateLoadBalancerPool") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if createLoadBalancerPoolOptions.Name != nil { + body["name"] = createLoadBalancerPoolOptions.Name + } + if createLoadBalancerPoolOptions.CheckRegions != nil { + body["check_regions"] = createLoadBalancerPoolOptions.CheckRegions + } + if createLoadBalancerPoolOptions.Origins != nil { + body["origins"] = createLoadBalancerPoolOptions.Origins + } + if createLoadBalancerPoolOptions.Description != nil { + body["description"] = createLoadBalancerPoolOptions.Description + } + if createLoadBalancerPoolOptions.MinimumOrigins != nil { + body["minimum_origins"] = createLoadBalancerPoolOptions.MinimumOrigins + } + if createLoadBalancerPoolOptions.Enabled != nil { + body["enabled"] = createLoadBalancerPoolOptions.Enabled + } + if createLoadBalancerPoolOptions.Monitor != nil { + body["monitor"] = createLoadBalancerPoolOptions.Monitor + } + if createLoadBalancerPoolOptions.NotificationEmail != nil { + body["notification_email"] = createLoadBalancerPoolOptions.NotificationEmail + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = globalLoadBalancerPools.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerPoolResp) + if err != nil { + return + } + response.Result = result + + return +} + +// GetLoadBalancerPool : Get pool +// Get a single configured load balancer pool. +func (globalLoadBalancerPools *GlobalLoadBalancerPoolsV0) GetLoadBalancerPool(getLoadBalancerPoolOptions *GetLoadBalancerPoolOptions) (result *LoadBalancerPoolResp, response *core.DetailedResponse, err error) { + return globalLoadBalancerPools.GetLoadBalancerPoolWithContext(context.Background(), getLoadBalancerPoolOptions) +} + +// GetLoadBalancerPoolWithContext is an alternate form of the GetLoadBalancerPool method which supports a Context parameter +func (globalLoadBalancerPools *GlobalLoadBalancerPoolsV0) GetLoadBalancerPoolWithContext(ctx context.Context, getLoadBalancerPoolOptions *GetLoadBalancerPoolOptions) (result *LoadBalancerPoolResp, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getLoadBalancerPoolOptions, "getLoadBalancerPoolOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getLoadBalancerPoolOptions, "getLoadBalancerPoolOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *globalLoadBalancerPools.Crn, + "pool_identifier": *getLoadBalancerPoolOptions.PoolIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = globalLoadBalancerPools.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(globalLoadBalancerPools.Service.Options.URL, `/v1/{crn}/load_balancers/pools/{pool_identifier}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getLoadBalancerPoolOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("global_load_balancer_pools", "V0", "GetLoadBalancerPool") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = globalLoadBalancerPools.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerPoolResp) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteLoadBalancerPool : Delete pool +// Delete a specific configured load balancer pool. +func (globalLoadBalancerPools *GlobalLoadBalancerPoolsV0) DeleteLoadBalancerPool(deleteLoadBalancerPoolOptions *DeleteLoadBalancerPoolOptions) (result *DeleteLoadBalancerPoolResp, response *core.DetailedResponse, err error) { + return globalLoadBalancerPools.DeleteLoadBalancerPoolWithContext(context.Background(), deleteLoadBalancerPoolOptions) +} + +// DeleteLoadBalancerPoolWithContext is an alternate form of the DeleteLoadBalancerPool method which supports a Context parameter +func (globalLoadBalancerPools *GlobalLoadBalancerPoolsV0) DeleteLoadBalancerPoolWithContext(ctx context.Context, deleteLoadBalancerPoolOptions *DeleteLoadBalancerPoolOptions) (result *DeleteLoadBalancerPoolResp, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteLoadBalancerPoolOptions, "deleteLoadBalancerPoolOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteLoadBalancerPoolOptions, "deleteLoadBalancerPoolOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *globalLoadBalancerPools.Crn, + "pool_identifier": *deleteLoadBalancerPoolOptions.PoolIdentifier, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = globalLoadBalancerPools.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(globalLoadBalancerPools.Service.Options.URL, `/v1/{crn}/load_balancers/pools/{pool_identifier}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteLoadBalancerPoolOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("global_load_balancer_pools", "V0", "DeleteLoadBalancerPool") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = globalLoadBalancerPools.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalDeleteLoadBalancerPoolResp) + if err != nil { + return + } + response.Result = result + + return +} + +// EditLoadBalancerPool : Edit pool +// Edit a specific configured load balancer pool. +func (globalLoadBalancerPools *GlobalLoadBalancerPoolsV0) EditLoadBalancerPool(editLoadBalancerPoolOptions *EditLoadBalancerPoolOptions) (result *LoadBalancerPoolResp, response *core.DetailedResponse, err error) { + return globalLoadBalancerPools.EditLoadBalancerPoolWithContext(context.Background(), editLoadBalancerPoolOptions) +} + +// EditLoadBalancerPoolWithContext is an alternate form of the EditLoadBalancerPool method which supports a Context parameter +func (globalLoadBalancerPools *GlobalLoadBalancerPoolsV0) EditLoadBalancerPoolWithContext(ctx context.Context, editLoadBalancerPoolOptions *EditLoadBalancerPoolOptions) (result *LoadBalancerPoolResp, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(editLoadBalancerPoolOptions, "editLoadBalancerPoolOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(editLoadBalancerPoolOptions, "editLoadBalancerPoolOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *globalLoadBalancerPools.Crn, + "pool_identifier": *editLoadBalancerPoolOptions.PoolIdentifier, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = globalLoadBalancerPools.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(globalLoadBalancerPools.Service.Options.URL, `/v1/{crn}/load_balancers/pools/{pool_identifier}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range editLoadBalancerPoolOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("global_load_balancer_pools", "V0", "EditLoadBalancerPool") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if editLoadBalancerPoolOptions.Name != nil { + body["name"] = editLoadBalancerPoolOptions.Name + } + if editLoadBalancerPoolOptions.CheckRegions != nil { + body["check_regions"] = editLoadBalancerPoolOptions.CheckRegions + } + if editLoadBalancerPoolOptions.Origins != nil { + body["origins"] = editLoadBalancerPoolOptions.Origins + } + if editLoadBalancerPoolOptions.Description != nil { + body["description"] = editLoadBalancerPoolOptions.Description + } + if editLoadBalancerPoolOptions.MinimumOrigins != nil { + body["minimum_origins"] = editLoadBalancerPoolOptions.MinimumOrigins + } + if editLoadBalancerPoolOptions.Enabled != nil { + body["enabled"] = editLoadBalancerPoolOptions.Enabled + } + if editLoadBalancerPoolOptions.Monitor != nil { + body["monitor"] = editLoadBalancerPoolOptions.Monitor + } + if editLoadBalancerPoolOptions.NotificationEmail != nil { + body["notification_email"] = editLoadBalancerPoolOptions.NotificationEmail + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = globalLoadBalancerPools.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerPoolResp) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateLoadBalancerPoolOptions : The CreateLoadBalancerPool options. +type CreateLoadBalancerPoolOptions struct { + // name. + Name *string `json:"name,omitempty"` + + // regions check. + CheckRegions []string `json:"check_regions,omitempty"` + + // origins. + Origins []LoadBalancerPoolReqOriginsItem `json:"origins,omitempty"` + + // desc. + Description *string `json:"description,omitempty"` + + // The minimum number of origins that must be healthy for this pool to serve traffic. + MinimumOrigins *int64 `json:"minimum_origins,omitempty"` + + // enabled/disabled. + Enabled *bool `json:"enabled,omitempty"` + + // monitor. + Monitor *string `json:"monitor,omitempty"` + + // notification email. + NotificationEmail *string `json:"notification_email,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewCreateLoadBalancerPoolOptions : Instantiate CreateLoadBalancerPoolOptions +func (*GlobalLoadBalancerPoolsV0) NewCreateLoadBalancerPoolOptions() *CreateLoadBalancerPoolOptions { + return &CreateLoadBalancerPoolOptions{} +} + +// SetName : Allow user to set Name +func (options *CreateLoadBalancerPoolOptions) SetName(name string) *CreateLoadBalancerPoolOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetCheckRegions : Allow user to set CheckRegions +func (options *CreateLoadBalancerPoolOptions) SetCheckRegions(checkRegions []string) *CreateLoadBalancerPoolOptions { + options.CheckRegions = checkRegions + return options +} + +// SetOrigins : Allow user to set Origins +func (options *CreateLoadBalancerPoolOptions) SetOrigins(origins []LoadBalancerPoolReqOriginsItem) *CreateLoadBalancerPoolOptions { + options.Origins = origins + return options +} + +// SetDescription : Allow user to set Description +func (options *CreateLoadBalancerPoolOptions) SetDescription(description string) *CreateLoadBalancerPoolOptions { + options.Description = core.StringPtr(description) + return options +} + +// SetMinimumOrigins : Allow user to set MinimumOrigins +func (options *CreateLoadBalancerPoolOptions) SetMinimumOrigins(minimumOrigins int64) *CreateLoadBalancerPoolOptions { + options.MinimumOrigins = core.Int64Ptr(minimumOrigins) + return options +} + +// SetEnabled : Allow user to set Enabled +func (options *CreateLoadBalancerPoolOptions) SetEnabled(enabled bool) *CreateLoadBalancerPoolOptions { + options.Enabled = core.BoolPtr(enabled) + return options +} + +// SetMonitor : Allow user to set Monitor +func (options *CreateLoadBalancerPoolOptions) SetMonitor(monitor string) *CreateLoadBalancerPoolOptions { + options.Monitor = core.StringPtr(monitor) + return options +} + +// SetNotificationEmail : Allow user to set NotificationEmail +func (options *CreateLoadBalancerPoolOptions) SetNotificationEmail(notificationEmail string) *CreateLoadBalancerPoolOptions { + options.NotificationEmail = core.StringPtr(notificationEmail) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateLoadBalancerPoolOptions) SetHeaders(param map[string]string) *CreateLoadBalancerPoolOptions { + options.Headers = param + return options +} + +// DeleteLoadBalancerPoolOptions : The DeleteLoadBalancerPool options. +type DeleteLoadBalancerPoolOptions struct { + // pool identifier. + PoolIdentifier *string `json:"pool_identifier" validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteLoadBalancerPoolOptions : Instantiate DeleteLoadBalancerPoolOptions +func (*GlobalLoadBalancerPoolsV0) NewDeleteLoadBalancerPoolOptions(poolIdentifier string) *DeleteLoadBalancerPoolOptions { + return &DeleteLoadBalancerPoolOptions{ + PoolIdentifier: core.StringPtr(poolIdentifier), + } +} + +// SetPoolIdentifier : Allow user to set PoolIdentifier +func (options *DeleteLoadBalancerPoolOptions) SetPoolIdentifier(poolIdentifier string) *DeleteLoadBalancerPoolOptions { + options.PoolIdentifier = core.StringPtr(poolIdentifier) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteLoadBalancerPoolOptions) SetHeaders(param map[string]string) *DeleteLoadBalancerPoolOptions { + options.Headers = param + return options +} + +// DeleteLoadBalancerPoolRespResult : result. +type DeleteLoadBalancerPoolRespResult struct { + // identifier. + ID *string `json:"id" validate:"required"` +} + + +// UnmarshalDeleteLoadBalancerPoolRespResult unmarshals an instance of DeleteLoadBalancerPoolRespResult from the specified map of raw messages. +func UnmarshalDeleteLoadBalancerPoolRespResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(DeleteLoadBalancerPoolRespResult) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// EditLoadBalancerPoolOptions : The EditLoadBalancerPool options. +type EditLoadBalancerPoolOptions struct { + // pool identifier. + PoolIdentifier *string `json:"pool_identifier" validate:"required,ne="` + + // name. + Name *string `json:"name,omitempty"` + + // regions check. + CheckRegions []string `json:"check_regions,omitempty"` + + // origins. + Origins []LoadBalancerPoolReqOriginsItem `json:"origins,omitempty"` + + // desc. + Description *string `json:"description,omitempty"` + + // The minimum number of origins that must be healthy for this pool to serve traffic. + MinimumOrigins *int64 `json:"minimum_origins,omitempty"` + + // enabled/disabled. + Enabled *bool `json:"enabled,omitempty"` + + // monitor. + Monitor *string `json:"monitor,omitempty"` + + // notification email. + NotificationEmail *string `json:"notification_email,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewEditLoadBalancerPoolOptions : Instantiate EditLoadBalancerPoolOptions +func (*GlobalLoadBalancerPoolsV0) NewEditLoadBalancerPoolOptions(poolIdentifier string) *EditLoadBalancerPoolOptions { + return &EditLoadBalancerPoolOptions{ + PoolIdentifier: core.StringPtr(poolIdentifier), + } +} + +// SetPoolIdentifier : Allow user to set PoolIdentifier +func (options *EditLoadBalancerPoolOptions) SetPoolIdentifier(poolIdentifier string) *EditLoadBalancerPoolOptions { + options.PoolIdentifier = core.StringPtr(poolIdentifier) + return options +} + +// SetName : Allow user to set Name +func (options *EditLoadBalancerPoolOptions) SetName(name string) *EditLoadBalancerPoolOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetCheckRegions : Allow user to set CheckRegions +func (options *EditLoadBalancerPoolOptions) SetCheckRegions(checkRegions []string) *EditLoadBalancerPoolOptions { + options.CheckRegions = checkRegions + return options +} + +// SetOrigins : Allow user to set Origins +func (options *EditLoadBalancerPoolOptions) SetOrigins(origins []LoadBalancerPoolReqOriginsItem) *EditLoadBalancerPoolOptions { + options.Origins = origins + return options +} + +// SetDescription : Allow user to set Description +func (options *EditLoadBalancerPoolOptions) SetDescription(description string) *EditLoadBalancerPoolOptions { + options.Description = core.StringPtr(description) + return options +} + +// SetMinimumOrigins : Allow user to set MinimumOrigins +func (options *EditLoadBalancerPoolOptions) SetMinimumOrigins(minimumOrigins int64) *EditLoadBalancerPoolOptions { + options.MinimumOrigins = core.Int64Ptr(minimumOrigins) + return options +} + +// SetEnabled : Allow user to set Enabled +func (options *EditLoadBalancerPoolOptions) SetEnabled(enabled bool) *EditLoadBalancerPoolOptions { + options.Enabled = core.BoolPtr(enabled) + return options +} + +// SetMonitor : Allow user to set Monitor +func (options *EditLoadBalancerPoolOptions) SetMonitor(monitor string) *EditLoadBalancerPoolOptions { + options.Monitor = core.StringPtr(monitor) + return options +} + +// SetNotificationEmail : Allow user to set NotificationEmail +func (options *EditLoadBalancerPoolOptions) SetNotificationEmail(notificationEmail string) *EditLoadBalancerPoolOptions { + options.NotificationEmail = core.StringPtr(notificationEmail) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *EditLoadBalancerPoolOptions) SetHeaders(param map[string]string) *EditLoadBalancerPoolOptions { + options.Headers = param + return options +} + +// GetLoadBalancerPoolOptions : The GetLoadBalancerPool options. +type GetLoadBalancerPoolOptions struct { + // pool identifier. + PoolIdentifier *string `json:"pool_identifier" validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetLoadBalancerPoolOptions : Instantiate GetLoadBalancerPoolOptions +func (*GlobalLoadBalancerPoolsV0) NewGetLoadBalancerPoolOptions(poolIdentifier string) *GetLoadBalancerPoolOptions { + return &GetLoadBalancerPoolOptions{ + PoolIdentifier: core.StringPtr(poolIdentifier), + } +} + +// SetPoolIdentifier : Allow user to set PoolIdentifier +func (options *GetLoadBalancerPoolOptions) SetPoolIdentifier(poolIdentifier string) *GetLoadBalancerPoolOptions { + options.PoolIdentifier = core.StringPtr(poolIdentifier) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetLoadBalancerPoolOptions) SetHeaders(param map[string]string) *GetLoadBalancerPoolOptions { + options.Headers = param + return options +} + +// ListAllLoadBalancerPoolsOptions : The ListAllLoadBalancerPools options. +type ListAllLoadBalancerPoolsOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListAllLoadBalancerPoolsOptions : Instantiate ListAllLoadBalancerPoolsOptions +func (*GlobalLoadBalancerPoolsV0) NewListAllLoadBalancerPoolsOptions() *ListAllLoadBalancerPoolsOptions { + return &ListAllLoadBalancerPoolsOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *ListAllLoadBalancerPoolsOptions) SetHeaders(param map[string]string) *ListAllLoadBalancerPoolsOptions { + options.Headers = param + return options +} + +// LoadBalancerPoolPackOriginsItem : LoadBalancerPoolPackOriginsItem struct +type LoadBalancerPoolPackOriginsItem struct { + // name. + Name *string `json:"name,omitempty"` + + // address. + Address *string `json:"address,omitempty"` + + // enabled/disabled. + Enabled *bool `json:"enabled,omitempty"` + + // healthy. + Healthy *bool `json:"healthy,omitempty"` + + // weight. + Weight *float64 `json:"weight,omitempty"` + + // Pool origin disabled date. + DisabledAt *string `json:"disabled_at,omitempty"` + + // Reason for failure. + FailureReason *string `json:"failure_reason,omitempty"` +} + + +// UnmarshalLoadBalancerPoolPackOriginsItem unmarshals an instance of LoadBalancerPoolPackOriginsItem from the specified map of raw messages. +func UnmarshalLoadBalancerPoolPackOriginsItem(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancerPoolPackOriginsItem) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "address", &obj.Address) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "enabled", &obj.Enabled) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "healthy", &obj.Healthy) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "weight", &obj.Weight) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "disabled_at", &obj.DisabledAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "failure_reason", &obj.FailureReason) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LoadBalancerPoolReqOriginsItem : items. +type LoadBalancerPoolReqOriginsItem struct { + // name. + Name *string `json:"name,omitempty"` + + // address. + Address *string `json:"address,omitempty"` + + // enabled/disabled. + Enabled *bool `json:"enabled,omitempty"` + + // weight. + Weight *float64 `json:"weight,omitempty"` +} + + +// UnmarshalLoadBalancerPoolReqOriginsItem unmarshals an instance of LoadBalancerPoolReqOriginsItem from the specified map of raw messages. +func UnmarshalLoadBalancerPoolReqOriginsItem(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancerPoolReqOriginsItem) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "address", &obj.Address) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "enabled", &obj.Enabled) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "weight", &obj.Weight) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// DeleteLoadBalancerPoolResp : load balancer pool delete response. +type DeleteLoadBalancerPoolResp struct { + // succcess response. + Success *bool `json:"success" validate:"required"` + + // errors. + Errors [][]string `json:"errors" validate:"required"` + + // messages. + Messages [][]string `json:"messages" validate:"required"` + + // result. + Result *DeleteLoadBalancerPoolRespResult `json:"result" validate:"required"` +} + + +// UnmarshalDeleteLoadBalancerPoolResp unmarshals an instance of DeleteLoadBalancerPoolResp from the specified map of raw messages. +func UnmarshalDeleteLoadBalancerPoolResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(DeleteLoadBalancerPoolResp) + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalDeleteLoadBalancerPoolRespResult) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ListLoadBalancerPoolsResp : list load balancer pools response. +type ListLoadBalancerPoolsResp struct { + // success response. + Success *bool `json:"success" validate:"required"` + + // errors. + Errors [][]string `json:"errors" validate:"required"` + + // messages. + Messages [][]string `json:"messages" validate:"required"` + + // result. + Result []LoadBalancerPoolPack `json:"result" validate:"required"` + + // result information. + ResultInfo *ResultInfo `json:"result_info" validate:"required"` +} + + +// UnmarshalListLoadBalancerPoolsResp unmarshals an instance of ListLoadBalancerPoolsResp from the specified map of raw messages. +func UnmarshalListLoadBalancerPoolsResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ListLoadBalancerPoolsResp) + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalLoadBalancerPoolPack) + if err != nil { + return + } + err = core.UnmarshalModel(m, "result_info", &obj.ResultInfo, UnmarshalResultInfo) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LoadBalancerPoolPack : load balancer pool pack. +type LoadBalancerPoolPack struct { + // identifier. + ID *string `json:"id,omitempty"` + + // created date. + CreatedOn *string `json:"created_on,omitempty"` + + // modified date. + ModifiedOn *string `json:"modified_on,omitempty"` + + // desc. + Description *string `json:"description,omitempty"` + + // name. + Name *string `json:"name" validate:"required"` + + // enabled/disabled. + Enabled *bool `json:"enabled,omitempty"` + + // healthy. + Healthy *bool `json:"healthy,omitempty"` + + // monitor. + Monitor *string `json:"monitor,omitempty"` + + // Minimum origin count. + MinimumOrigins *int64 `json:"minimum_origins,omitempty"` + + // regions check. + CheckRegions []string `json:"check_regions,omitempty"` + + // original. + Origins []LoadBalancerPoolPackOriginsItem `json:"origins" validate:"required"` + + // notification email. + NotificationEmail *string `json:"notification_email,omitempty"` +} + + +// UnmarshalLoadBalancerPoolPack unmarshals an instance of LoadBalancerPoolPack from the specified map of raw messages. +func UnmarshalLoadBalancerPoolPack(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancerPoolPack) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_on", &obj.CreatedOn) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "modified_on", &obj.ModifiedOn) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "description", &obj.Description) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "enabled", &obj.Enabled) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "healthy", &obj.Healthy) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "monitor", &obj.Monitor) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "minimum_origins", &obj.MinimumOrigins) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "check_regions", &obj.CheckRegions) + if err != nil { + return + } + err = core.UnmarshalModel(m, "origins", &obj.Origins, UnmarshalLoadBalancerPoolPackOriginsItem) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "notification_email", &obj.NotificationEmail) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LoadBalancerPoolResp : get load balancer pool response. +type LoadBalancerPoolResp struct { + // success response. + Success *bool `json:"success" validate:"required"` + + // errors. + Errors [][]string `json:"errors" validate:"required"` + + // messages. + Messages [][]string `json:"messages" validate:"required"` + + // load balancer pool pack. + Result *LoadBalancerPoolPack `json:"result" validate:"required"` + + // result information. + ResultInfo *ResultInfo `json:"result_info" validate:"required"` +} + + +// UnmarshalLoadBalancerPoolResp unmarshals an instance of LoadBalancerPoolResp from the specified map of raw messages. +func UnmarshalLoadBalancerPoolResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancerPoolResp) + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalLoadBalancerPoolPack) + if err != nil { + return + } + err = core.UnmarshalModel(m, "result_info", &obj.ResultInfo, UnmarshalResultInfo) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ResultInfo : result information. +type ResultInfo struct { + // page number. + Page *int64 `json:"page" validate:"required"` + + // per page count. + PerPage *int64 `json:"per_page" validate:"required"` + + // count. + Count *int64 `json:"count" validate:"required"` + + // total count. + TotalCount *int64 `json:"total_count" validate:"required"` +} + + +// UnmarshalResultInfo unmarshals an instance of ResultInfo from the specified map of raw messages. +func UnmarshalResultInfo(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ResultInfo) + err = core.UnmarshalPrimitive(m, "page", &obj.Page) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "per_page", &obj.PerPage) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "count", &obj.Count) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "total_count", &obj.TotalCount) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} diff --git a/vendor/github.com/IBM/networking-go-sdk/globalloadbalancerv1/global_load_balancer_v1.go b/vendor/github.com/IBM/networking-go-sdk/globalloadbalancerv1/global_load_balancer_v1.go new file mode 100644 index 00000000000..eca9add437a --- /dev/null +++ b/vendor/github.com/IBM/networking-go-sdk/globalloadbalancerv1/global_load_balancer_v1.go @@ -0,0 +1,1207 @@ +/** + * (C) Copyright IBM Corp. 2020. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * IBM OpenAPI SDK Code Generator Version: 3.20.0-debb9f29-20201203-202043 + */ + + +// Package globalloadbalancerv1 : Operations and models for the GlobalLoadBalancerV1 service +package globalloadbalancerv1 + +import ( + "context" + "encoding/json" + "fmt" + "github.com/IBM/go-sdk-core/v4/core" + common "github.com/IBM/networking-go-sdk/common" + "net/http" + "reflect" + "time" +) + +// GlobalLoadBalancerV1 : Global Load Balancer +// +// Version: 1.0.1 +type GlobalLoadBalancerV1 struct { + Service *core.BaseService + + // Full CRN of the service instance. + Crn *string + + // zone identifier. + ZoneIdentifier *string +} + +// DefaultServiceURL is the default URL to make service requests to. +const DefaultServiceURL = "https://api.cis.cloud.ibm.com" + +// DefaultServiceName is the default key used to find external configuration information. +const DefaultServiceName = "global_load_balancer" + +// GlobalLoadBalancerV1Options : Service options +type GlobalLoadBalancerV1Options struct { + ServiceName string + URL string + Authenticator core.Authenticator + + // Full CRN of the service instance. + Crn *string `validate:"required"` + + // zone identifier. + ZoneIdentifier *string `validate:"required"` +} + +// NewGlobalLoadBalancerV1UsingExternalConfig : constructs an instance of GlobalLoadBalancerV1 with passed in options and external configuration. +func NewGlobalLoadBalancerV1UsingExternalConfig(options *GlobalLoadBalancerV1Options) (globalLoadBalancer *GlobalLoadBalancerV1, err error) { + if options.ServiceName == "" { + options.ServiceName = DefaultServiceName + } + + if options.Authenticator == nil { + options.Authenticator, err = core.GetAuthenticatorFromEnvironment(options.ServiceName) + if err != nil { + return + } + } + + globalLoadBalancer, err = NewGlobalLoadBalancerV1(options) + if err != nil { + return + } + + err = globalLoadBalancer.Service.ConfigureService(options.ServiceName) + if err != nil { + return + } + + if options.URL != "" { + err = globalLoadBalancer.Service.SetServiceURL(options.URL) + } + return +} + +// NewGlobalLoadBalancerV1 : constructs an instance of GlobalLoadBalancerV1 with passed in options. +func NewGlobalLoadBalancerV1(options *GlobalLoadBalancerV1Options) (service *GlobalLoadBalancerV1, err error) { + serviceOptions := &core.ServiceOptions{ + URL: DefaultServiceURL, + Authenticator: options.Authenticator, + } + + err = core.ValidateStruct(options, "options") + if err != nil { + return + } + + baseService, err := core.NewBaseService(serviceOptions) + if err != nil { + return + } + + if options.URL != "" { + err = baseService.SetServiceURL(options.URL) + if err != nil { + return + } + } + + service = &GlobalLoadBalancerV1{ + Service: baseService, + Crn: options.Crn, + ZoneIdentifier: options.ZoneIdentifier, + } + + return +} + +// GetServiceURLForRegion returns the service URL to be used for the specified region +func GetServiceURLForRegion(region string) (string, error) { + return "", fmt.Errorf("service does not support regional URLs") +} + +// Clone makes a copy of "globalLoadBalancer" suitable for processing requests. +func (globalLoadBalancer *GlobalLoadBalancerV1) Clone() *GlobalLoadBalancerV1 { + if core.IsNil(globalLoadBalancer) { + return nil + } + clone := *globalLoadBalancer + clone.Service = globalLoadBalancer.Service.Clone() + return &clone +} + +// SetServiceURL sets the service URL +func (globalLoadBalancer *GlobalLoadBalancerV1) SetServiceURL(url string) error { + return globalLoadBalancer.Service.SetServiceURL(url) +} + +// GetServiceURL returns the service URL +func (globalLoadBalancer *GlobalLoadBalancerV1) GetServiceURL() string { + return globalLoadBalancer.Service.GetServiceURL() +} + +// SetDefaultHeaders sets HTTP headers to be sent in every request +func (globalLoadBalancer *GlobalLoadBalancerV1) SetDefaultHeaders(headers http.Header) { + globalLoadBalancer.Service.SetDefaultHeaders(headers) +} + +// SetEnableGzipCompression sets the service's EnableGzipCompression field +func (globalLoadBalancer *GlobalLoadBalancerV1) SetEnableGzipCompression(enableGzip bool) { + globalLoadBalancer.Service.SetEnableGzipCompression(enableGzip) +} + +// GetEnableGzipCompression returns the service's EnableGzipCompression field +func (globalLoadBalancer *GlobalLoadBalancerV1) GetEnableGzipCompression() bool { + return globalLoadBalancer.Service.GetEnableGzipCompression() +} + +// EnableRetries enables automatic retries for requests invoked for this service instance. +// If either parameter is specified as 0, then a default value is used instead. +func (globalLoadBalancer *GlobalLoadBalancerV1) EnableRetries(maxRetries int, maxRetryInterval time.Duration) { + globalLoadBalancer.Service.EnableRetries(maxRetries, maxRetryInterval) +} + +// DisableRetries disables automatic retries for requests invoked for this service instance. +func (globalLoadBalancer *GlobalLoadBalancerV1) DisableRetries() { + globalLoadBalancer.Service.DisableRetries() +} + +// ListAllLoadBalancers : List all load balancers +// List configured load balancers. +func (globalLoadBalancer *GlobalLoadBalancerV1) ListAllLoadBalancers(listAllLoadBalancersOptions *ListAllLoadBalancersOptions) (result *ListLoadBalancersResp, response *core.DetailedResponse, err error) { + return globalLoadBalancer.ListAllLoadBalancersWithContext(context.Background(), listAllLoadBalancersOptions) +} + +// ListAllLoadBalancersWithContext is an alternate form of the ListAllLoadBalancers method which supports a Context parameter +func (globalLoadBalancer *GlobalLoadBalancerV1) ListAllLoadBalancersWithContext(ctx context.Context, listAllLoadBalancersOptions *ListAllLoadBalancersOptions) (result *ListLoadBalancersResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listAllLoadBalancersOptions, "listAllLoadBalancersOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *globalLoadBalancer.Crn, + "zone_identifier": *globalLoadBalancer.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = globalLoadBalancer.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(globalLoadBalancer.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/load_balancers`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range listAllLoadBalancersOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("global_load_balancer", "V1", "ListAllLoadBalancers") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = globalLoadBalancer.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalListLoadBalancersResp) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateLoadBalancer : Create load balancer +// Create a load balancer for a given zone. The zone should be active before placing an order of a load balancer. +func (globalLoadBalancer *GlobalLoadBalancerV1) CreateLoadBalancer(createLoadBalancerOptions *CreateLoadBalancerOptions) (result *LoadBalancersResp, response *core.DetailedResponse, err error) { + return globalLoadBalancer.CreateLoadBalancerWithContext(context.Background(), createLoadBalancerOptions) +} + +// CreateLoadBalancerWithContext is an alternate form of the CreateLoadBalancer method which supports a Context parameter +func (globalLoadBalancer *GlobalLoadBalancerV1) CreateLoadBalancerWithContext(ctx context.Context, createLoadBalancerOptions *CreateLoadBalancerOptions) (result *LoadBalancersResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(createLoadBalancerOptions, "createLoadBalancerOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *globalLoadBalancer.Crn, + "zone_identifier": *globalLoadBalancer.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = globalLoadBalancer.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(globalLoadBalancer.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/load_balancers`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range createLoadBalancerOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("global_load_balancer", "V1", "CreateLoadBalancer") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if createLoadBalancerOptions.Name != nil { + body["name"] = createLoadBalancerOptions.Name + } + if createLoadBalancerOptions.FallbackPool != nil { + body["fallback_pool"] = createLoadBalancerOptions.FallbackPool + } + if createLoadBalancerOptions.DefaultPools != nil { + body["default_pools"] = createLoadBalancerOptions.DefaultPools + } + if createLoadBalancerOptions.Description != nil { + body["description"] = createLoadBalancerOptions.Description + } + if createLoadBalancerOptions.TTL != nil { + body["ttl"] = createLoadBalancerOptions.TTL + } + if createLoadBalancerOptions.RegionPools != nil { + body["region_pools"] = createLoadBalancerOptions.RegionPools + } + if createLoadBalancerOptions.PopPools != nil { + body["pop_pools"] = createLoadBalancerOptions.PopPools + } + if createLoadBalancerOptions.Proxied != nil { + body["proxied"] = createLoadBalancerOptions.Proxied + } + if createLoadBalancerOptions.Enabled != nil { + body["enabled"] = createLoadBalancerOptions.Enabled + } + if createLoadBalancerOptions.SessionAffinity != nil { + body["session_affinity"] = createLoadBalancerOptions.SessionAffinity + } + if createLoadBalancerOptions.SteeringPolicy != nil { + body["steering_policy"] = createLoadBalancerOptions.SteeringPolicy + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = globalLoadBalancer.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancersResp) + if err != nil { + return + } + response.Result = result + + return +} + +// EditLoadBalancer : Edit load balancer +// Edit porperties of an existing load balancer. +func (globalLoadBalancer *GlobalLoadBalancerV1) EditLoadBalancer(editLoadBalancerOptions *EditLoadBalancerOptions) (result *LoadBalancersResp, response *core.DetailedResponse, err error) { + return globalLoadBalancer.EditLoadBalancerWithContext(context.Background(), editLoadBalancerOptions) +} + +// EditLoadBalancerWithContext is an alternate form of the EditLoadBalancer method which supports a Context parameter +func (globalLoadBalancer *GlobalLoadBalancerV1) EditLoadBalancerWithContext(ctx context.Context, editLoadBalancerOptions *EditLoadBalancerOptions) (result *LoadBalancersResp, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(editLoadBalancerOptions, "editLoadBalancerOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(editLoadBalancerOptions, "editLoadBalancerOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *globalLoadBalancer.Crn, + "zone_identifier": *globalLoadBalancer.ZoneIdentifier, + "load_balancer_identifier": *editLoadBalancerOptions.LoadBalancerIdentifier, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = globalLoadBalancer.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(globalLoadBalancer.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/load_balancers/{load_balancer_identifier}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range editLoadBalancerOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("global_load_balancer", "V1", "EditLoadBalancer") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if editLoadBalancerOptions.Name != nil { + body["name"] = editLoadBalancerOptions.Name + } + if editLoadBalancerOptions.FallbackPool != nil { + body["fallback_pool"] = editLoadBalancerOptions.FallbackPool + } + if editLoadBalancerOptions.DefaultPools != nil { + body["default_pools"] = editLoadBalancerOptions.DefaultPools + } + if editLoadBalancerOptions.Description != nil { + body["description"] = editLoadBalancerOptions.Description + } + if editLoadBalancerOptions.TTL != nil { + body["ttl"] = editLoadBalancerOptions.TTL + } + if editLoadBalancerOptions.RegionPools != nil { + body["region_pools"] = editLoadBalancerOptions.RegionPools + } + if editLoadBalancerOptions.PopPools != nil { + body["pop_pools"] = editLoadBalancerOptions.PopPools + } + if editLoadBalancerOptions.Proxied != nil { + body["proxied"] = editLoadBalancerOptions.Proxied + } + if editLoadBalancerOptions.Enabled != nil { + body["enabled"] = editLoadBalancerOptions.Enabled + } + if editLoadBalancerOptions.SessionAffinity != nil { + body["session_affinity"] = editLoadBalancerOptions.SessionAffinity + } + if editLoadBalancerOptions.SteeringPolicy != nil { + body["steering_policy"] = editLoadBalancerOptions.SteeringPolicy + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = globalLoadBalancer.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancersResp) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteLoadBalancer : Delete load balancer +// Delete a load balancer. +func (globalLoadBalancer *GlobalLoadBalancerV1) DeleteLoadBalancer(deleteLoadBalancerOptions *DeleteLoadBalancerOptions) (result *DeleteLoadBalancersResp, response *core.DetailedResponse, err error) { + return globalLoadBalancer.DeleteLoadBalancerWithContext(context.Background(), deleteLoadBalancerOptions) +} + +// DeleteLoadBalancerWithContext is an alternate form of the DeleteLoadBalancer method which supports a Context parameter +func (globalLoadBalancer *GlobalLoadBalancerV1) DeleteLoadBalancerWithContext(ctx context.Context, deleteLoadBalancerOptions *DeleteLoadBalancerOptions) (result *DeleteLoadBalancersResp, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteLoadBalancerOptions, "deleteLoadBalancerOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteLoadBalancerOptions, "deleteLoadBalancerOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *globalLoadBalancer.Crn, + "zone_identifier": *globalLoadBalancer.ZoneIdentifier, + "load_balancer_identifier": *deleteLoadBalancerOptions.LoadBalancerIdentifier, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = globalLoadBalancer.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(globalLoadBalancer.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/load_balancers/{load_balancer_identifier}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteLoadBalancerOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("global_load_balancer", "V1", "DeleteLoadBalancer") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = globalLoadBalancer.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalDeleteLoadBalancersResp) + if err != nil { + return + } + response.Result = result + + return +} + +// GetLoadBalancerSettings : Get load balancer +// For a given zone identifier and load balancer id, get the load balancer settings. +func (globalLoadBalancer *GlobalLoadBalancerV1) GetLoadBalancerSettings(getLoadBalancerSettingsOptions *GetLoadBalancerSettingsOptions) (result *LoadBalancersResp, response *core.DetailedResponse, err error) { + return globalLoadBalancer.GetLoadBalancerSettingsWithContext(context.Background(), getLoadBalancerSettingsOptions) +} + +// GetLoadBalancerSettingsWithContext is an alternate form of the GetLoadBalancerSettings method which supports a Context parameter +func (globalLoadBalancer *GlobalLoadBalancerV1) GetLoadBalancerSettingsWithContext(ctx context.Context, getLoadBalancerSettingsOptions *GetLoadBalancerSettingsOptions) (result *LoadBalancersResp, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getLoadBalancerSettingsOptions, "getLoadBalancerSettingsOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getLoadBalancerSettingsOptions, "getLoadBalancerSettingsOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *globalLoadBalancer.Crn, + "zone_identifier": *globalLoadBalancer.ZoneIdentifier, + "load_balancer_identifier": *getLoadBalancerSettingsOptions.LoadBalancerIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = globalLoadBalancer.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(globalLoadBalancer.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/load_balancers/{load_balancer_identifier}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getLoadBalancerSettingsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("global_load_balancer", "V1", "GetLoadBalancerSettings") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = globalLoadBalancer.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancersResp) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateLoadBalancerOptions : The CreateLoadBalancer options. +type CreateLoadBalancerOptions struct { + // name. + Name *string `json:"name,omitempty"` + + // fallback pool. + FallbackPool *string `json:"fallback_pool,omitempty"` + + // default pools. + DefaultPools []string `json:"default_pools,omitempty"` + + // desc. + Description *string `json:"description,omitempty"` + + // ttl. + TTL *int64 `json:"ttl,omitempty"` + + // region pools. + RegionPools interface{} `json:"region_pools,omitempty"` + + // pop pools. + PopPools interface{} `json:"pop_pools,omitempty"` + + // proxied. + Proxied *bool `json:"proxied,omitempty"` + + // enabled/disabled. + Enabled *bool `json:"enabled,omitempty"` + + // session affinity. + SessionAffinity *string `json:"session_affinity,omitempty"` + + // steering policy. + SteeringPolicy *string `json:"steering_policy,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the CreateLoadBalancerOptions.SessionAffinity property. +// session affinity. +const ( + CreateLoadBalancerOptions_SessionAffinity_Cookie = "cookie" + CreateLoadBalancerOptions_SessionAffinity_IpCookie = "ip_cookie" + CreateLoadBalancerOptions_SessionAffinity_None = "none" +) + +// Constants associated with the CreateLoadBalancerOptions.SteeringPolicy property. +// steering policy. +const ( + CreateLoadBalancerOptions_SteeringPolicy_DynamicLatency = "dynamic_latency" + CreateLoadBalancerOptions_SteeringPolicy_Geo = "geo" + CreateLoadBalancerOptions_SteeringPolicy_Off = "off" + CreateLoadBalancerOptions_SteeringPolicy_Random = "random" +) + +// NewCreateLoadBalancerOptions : Instantiate CreateLoadBalancerOptions +func (*GlobalLoadBalancerV1) NewCreateLoadBalancerOptions() *CreateLoadBalancerOptions { + return &CreateLoadBalancerOptions{} +} + +// SetName : Allow user to set Name +func (options *CreateLoadBalancerOptions) SetName(name string) *CreateLoadBalancerOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetFallbackPool : Allow user to set FallbackPool +func (options *CreateLoadBalancerOptions) SetFallbackPool(fallbackPool string) *CreateLoadBalancerOptions { + options.FallbackPool = core.StringPtr(fallbackPool) + return options +} + +// SetDefaultPools : Allow user to set DefaultPools +func (options *CreateLoadBalancerOptions) SetDefaultPools(defaultPools []string) *CreateLoadBalancerOptions { + options.DefaultPools = defaultPools + return options +} + +// SetDescription : Allow user to set Description +func (options *CreateLoadBalancerOptions) SetDescription(description string) *CreateLoadBalancerOptions { + options.Description = core.StringPtr(description) + return options +} + +// SetTTL : Allow user to set TTL +func (options *CreateLoadBalancerOptions) SetTTL(ttl int64) *CreateLoadBalancerOptions { + options.TTL = core.Int64Ptr(ttl) + return options +} + +// SetRegionPools : Allow user to set RegionPools +func (options *CreateLoadBalancerOptions) SetRegionPools(regionPools interface{}) *CreateLoadBalancerOptions { + options.RegionPools = regionPools + return options +} + +// SetPopPools : Allow user to set PopPools +func (options *CreateLoadBalancerOptions) SetPopPools(popPools interface{}) *CreateLoadBalancerOptions { + options.PopPools = popPools + return options +} + +// SetProxied : Allow user to set Proxied +func (options *CreateLoadBalancerOptions) SetProxied(proxied bool) *CreateLoadBalancerOptions { + options.Proxied = core.BoolPtr(proxied) + return options +} + +// SetEnabled : Allow user to set Enabled +func (options *CreateLoadBalancerOptions) SetEnabled(enabled bool) *CreateLoadBalancerOptions { + options.Enabled = core.BoolPtr(enabled) + return options +} + +// SetSessionAffinity : Allow user to set SessionAffinity +func (options *CreateLoadBalancerOptions) SetSessionAffinity(sessionAffinity string) *CreateLoadBalancerOptions { + options.SessionAffinity = core.StringPtr(sessionAffinity) + return options +} + +// SetSteeringPolicy : Allow user to set SteeringPolicy +func (options *CreateLoadBalancerOptions) SetSteeringPolicy(steeringPolicy string) *CreateLoadBalancerOptions { + options.SteeringPolicy = core.StringPtr(steeringPolicy) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateLoadBalancerOptions) SetHeaders(param map[string]string) *CreateLoadBalancerOptions { + options.Headers = param + return options +} + +// DeleteLoadBalancerOptions : The DeleteLoadBalancer options. +type DeleteLoadBalancerOptions struct { + // load balancer identifier. + LoadBalancerIdentifier *string `json:"load_balancer_identifier" validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteLoadBalancerOptions : Instantiate DeleteLoadBalancerOptions +func (*GlobalLoadBalancerV1) NewDeleteLoadBalancerOptions(loadBalancerIdentifier string) *DeleteLoadBalancerOptions { + return &DeleteLoadBalancerOptions{ + LoadBalancerIdentifier: core.StringPtr(loadBalancerIdentifier), + } +} + +// SetLoadBalancerIdentifier : Allow user to set LoadBalancerIdentifier +func (options *DeleteLoadBalancerOptions) SetLoadBalancerIdentifier(loadBalancerIdentifier string) *DeleteLoadBalancerOptions { + options.LoadBalancerIdentifier = core.StringPtr(loadBalancerIdentifier) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteLoadBalancerOptions) SetHeaders(param map[string]string) *DeleteLoadBalancerOptions { + options.Headers = param + return options +} + +// DeleteLoadBalancersRespResult : result. +type DeleteLoadBalancersRespResult struct { + // identifier. + ID *string `json:"id" validate:"required"` +} + + +// UnmarshalDeleteLoadBalancersRespResult unmarshals an instance of DeleteLoadBalancersRespResult from the specified map of raw messages. +func UnmarshalDeleteLoadBalancersRespResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(DeleteLoadBalancersRespResult) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// EditLoadBalancerOptions : The EditLoadBalancer options. +type EditLoadBalancerOptions struct { + // load balancer identifier. + LoadBalancerIdentifier *string `json:"load_balancer_identifier" validate:"required,ne="` + + // name. + Name *string `json:"name,omitempty"` + + // fallback pool. + FallbackPool *string `json:"fallback_pool,omitempty"` + + // default pools. + DefaultPools []string `json:"default_pools,omitempty"` + + // desc. + Description *string `json:"description,omitempty"` + + // ttl. + TTL *int64 `json:"ttl,omitempty"` + + // region pools. + RegionPools interface{} `json:"region_pools,omitempty"` + + // pop pools. + PopPools interface{} `json:"pop_pools,omitempty"` + + // proxied. + Proxied *bool `json:"proxied,omitempty"` + + // enabled/disabled. + Enabled *bool `json:"enabled,omitempty"` + + // session affinity. + SessionAffinity *string `json:"session_affinity,omitempty"` + + // steering policy. + SteeringPolicy *string `json:"steering_policy,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the EditLoadBalancerOptions.SessionAffinity property. +// session affinity. +const ( + EditLoadBalancerOptions_SessionAffinity_Cookie = "cookie" + EditLoadBalancerOptions_SessionAffinity_IpCookie = "ip_cookie" + EditLoadBalancerOptions_SessionAffinity_None = "none" +) + +// Constants associated with the EditLoadBalancerOptions.SteeringPolicy property. +// steering policy. +const ( + EditLoadBalancerOptions_SteeringPolicy_DynamicLatency = "dynamic_latency" + EditLoadBalancerOptions_SteeringPolicy_Geo = "geo" + EditLoadBalancerOptions_SteeringPolicy_Off = "off" + EditLoadBalancerOptions_SteeringPolicy_Random = "random" +) + +// NewEditLoadBalancerOptions : Instantiate EditLoadBalancerOptions +func (*GlobalLoadBalancerV1) NewEditLoadBalancerOptions(loadBalancerIdentifier string) *EditLoadBalancerOptions { + return &EditLoadBalancerOptions{ + LoadBalancerIdentifier: core.StringPtr(loadBalancerIdentifier), + } +} + +// SetLoadBalancerIdentifier : Allow user to set LoadBalancerIdentifier +func (options *EditLoadBalancerOptions) SetLoadBalancerIdentifier(loadBalancerIdentifier string) *EditLoadBalancerOptions { + options.LoadBalancerIdentifier = core.StringPtr(loadBalancerIdentifier) + return options +} + +// SetName : Allow user to set Name +func (options *EditLoadBalancerOptions) SetName(name string) *EditLoadBalancerOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetFallbackPool : Allow user to set FallbackPool +func (options *EditLoadBalancerOptions) SetFallbackPool(fallbackPool string) *EditLoadBalancerOptions { + options.FallbackPool = core.StringPtr(fallbackPool) + return options +} + +// SetDefaultPools : Allow user to set DefaultPools +func (options *EditLoadBalancerOptions) SetDefaultPools(defaultPools []string) *EditLoadBalancerOptions { + options.DefaultPools = defaultPools + return options +} + +// SetDescription : Allow user to set Description +func (options *EditLoadBalancerOptions) SetDescription(description string) *EditLoadBalancerOptions { + options.Description = core.StringPtr(description) + return options +} + +// SetTTL : Allow user to set TTL +func (options *EditLoadBalancerOptions) SetTTL(ttl int64) *EditLoadBalancerOptions { + options.TTL = core.Int64Ptr(ttl) + return options +} + +// SetRegionPools : Allow user to set RegionPools +func (options *EditLoadBalancerOptions) SetRegionPools(regionPools interface{}) *EditLoadBalancerOptions { + options.RegionPools = regionPools + return options +} + +// SetPopPools : Allow user to set PopPools +func (options *EditLoadBalancerOptions) SetPopPools(popPools interface{}) *EditLoadBalancerOptions { + options.PopPools = popPools + return options +} + +// SetProxied : Allow user to set Proxied +func (options *EditLoadBalancerOptions) SetProxied(proxied bool) *EditLoadBalancerOptions { + options.Proxied = core.BoolPtr(proxied) + return options +} + +// SetEnabled : Allow user to set Enabled +func (options *EditLoadBalancerOptions) SetEnabled(enabled bool) *EditLoadBalancerOptions { + options.Enabled = core.BoolPtr(enabled) + return options +} + +// SetSessionAffinity : Allow user to set SessionAffinity +func (options *EditLoadBalancerOptions) SetSessionAffinity(sessionAffinity string) *EditLoadBalancerOptions { + options.SessionAffinity = core.StringPtr(sessionAffinity) + return options +} + +// SetSteeringPolicy : Allow user to set SteeringPolicy +func (options *EditLoadBalancerOptions) SetSteeringPolicy(steeringPolicy string) *EditLoadBalancerOptions { + options.SteeringPolicy = core.StringPtr(steeringPolicy) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *EditLoadBalancerOptions) SetHeaders(param map[string]string) *EditLoadBalancerOptions { + options.Headers = param + return options +} + +// GetLoadBalancerSettingsOptions : The GetLoadBalancerSettings options. +type GetLoadBalancerSettingsOptions struct { + // load balancer identifier. + LoadBalancerIdentifier *string `json:"load_balancer_identifier" validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetLoadBalancerSettingsOptions : Instantiate GetLoadBalancerSettingsOptions +func (*GlobalLoadBalancerV1) NewGetLoadBalancerSettingsOptions(loadBalancerIdentifier string) *GetLoadBalancerSettingsOptions { + return &GetLoadBalancerSettingsOptions{ + LoadBalancerIdentifier: core.StringPtr(loadBalancerIdentifier), + } +} + +// SetLoadBalancerIdentifier : Allow user to set LoadBalancerIdentifier +func (options *GetLoadBalancerSettingsOptions) SetLoadBalancerIdentifier(loadBalancerIdentifier string) *GetLoadBalancerSettingsOptions { + options.LoadBalancerIdentifier = core.StringPtr(loadBalancerIdentifier) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetLoadBalancerSettingsOptions) SetHeaders(param map[string]string) *GetLoadBalancerSettingsOptions { + options.Headers = param + return options +} + +// ListAllLoadBalancersOptions : The ListAllLoadBalancers options. +type ListAllLoadBalancersOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListAllLoadBalancersOptions : Instantiate ListAllLoadBalancersOptions +func (*GlobalLoadBalancerV1) NewListAllLoadBalancersOptions() *ListAllLoadBalancersOptions { + return &ListAllLoadBalancersOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *ListAllLoadBalancersOptions) SetHeaders(param map[string]string) *ListAllLoadBalancersOptions { + options.Headers = param + return options +} + +// DeleteLoadBalancersResp : delete load balancers response. +type DeleteLoadBalancersResp struct { + // success respose. + Success *bool `json:"success" validate:"required"` + + // errors. + Errors [][]string `json:"errors" validate:"required"` + + // messages. + Messages [][]string `json:"messages" validate:"required"` + + // result. + Result *DeleteLoadBalancersRespResult `json:"result" validate:"required"` +} + + +// UnmarshalDeleteLoadBalancersResp unmarshals an instance of DeleteLoadBalancersResp from the specified map of raw messages. +func UnmarshalDeleteLoadBalancersResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(DeleteLoadBalancersResp) + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalDeleteLoadBalancersRespResult) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ListLoadBalancersResp : load balancer list response. +type ListLoadBalancersResp struct { + // success response. + Success *bool `json:"success" validate:"required"` + + // errors. + Errors [][]string `json:"errors" validate:"required"` + + // messages. + Messages [][]string `json:"messages" validate:"required"` + + // result. + Result []LoadBalancerPack `json:"result" validate:"required"` + + // result information. + ResultInfo *ResultInfo `json:"result_info" validate:"required"` +} + + +// UnmarshalListLoadBalancersResp unmarshals an instance of ListLoadBalancersResp from the specified map of raw messages. +func UnmarshalListLoadBalancersResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ListLoadBalancersResp) + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalLoadBalancerPack) + if err != nil { + return + } + err = core.UnmarshalModel(m, "result_info", &obj.ResultInfo, UnmarshalResultInfo) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LoadBalancerPack : loadbalancer pack. +type LoadBalancerPack struct { + // identifier. + ID *string `json:"id" validate:"required"` + + // created date. + CreatedOn *string `json:"created_on" validate:"required"` + + // modified date. + ModifiedOn *string `json:"modified_on" validate:"required"` + + // desc. + Description *string `json:"description" validate:"required"` + + // name. + Name *string `json:"name" validate:"required"` + + // ttl. + TTL *int64 `json:"ttl" validate:"required"` + + // fallback pool. + FallbackPool *string `json:"fallback_pool" validate:"required"` + + // default pools. + DefaultPools []string `json:"default_pools" validate:"required"` + + // region pools. + RegionPools interface{} `json:"region_pools" validate:"required"` + + // pop pools. + PopPools interface{} `json:"pop_pools" validate:"required"` + + // proxied. + Proxied *bool `json:"proxied" validate:"required"` + + // enabled/disabled. + Enabled *bool `json:"enabled" validate:"required"` + + // session affinity. + SessionAffinity *string `json:"session_affinity" validate:"required"` + + // steering policy. + SteeringPolicy *string `json:"steering_policy" validate:"required"` +} + +// Constants associated with the LoadBalancerPack.SessionAffinity property. +// session affinity. +const ( + LoadBalancerPack_SessionAffinity_Cookie = "cookie" + LoadBalancerPack_SessionAffinity_IpCookie = "ip_cookie" + LoadBalancerPack_SessionAffinity_None = "none" +) + +// Constants associated with the LoadBalancerPack.SteeringPolicy property. +// steering policy. +const ( + LoadBalancerPack_SteeringPolicy_DynamicLatency = "dynamic_latency" + LoadBalancerPack_SteeringPolicy_Geo = "geo" + LoadBalancerPack_SteeringPolicy_Off = "off" + LoadBalancerPack_SteeringPolicy_Random = "random" +) + + +// UnmarshalLoadBalancerPack unmarshals an instance of LoadBalancerPack from the specified map of raw messages. +func UnmarshalLoadBalancerPack(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancerPack) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_on", &obj.CreatedOn) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "modified_on", &obj.ModifiedOn) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "description", &obj.Description) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "ttl", &obj.TTL) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "fallback_pool", &obj.FallbackPool) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "default_pools", &obj.DefaultPools) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "region_pools", &obj.RegionPools) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "pop_pools", &obj.PopPools) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "proxied", &obj.Proxied) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "enabled", &obj.Enabled) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "session_affinity", &obj.SessionAffinity) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "steering_policy", &obj.SteeringPolicy) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LoadBalancersResp : load balancer response. +type LoadBalancersResp struct { + // success response. + Success *bool `json:"success" validate:"required"` + + // errors. + Errors [][]string `json:"errors" validate:"required"` + + // messages. + Messages [][]string `json:"messages" validate:"required"` + + // loadbalancer pack. + Result *LoadBalancerPack `json:"result" validate:"required"` +} + + +// UnmarshalLoadBalancersResp unmarshals an instance of LoadBalancersResp from the specified map of raw messages. +func UnmarshalLoadBalancersResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancersResp) + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalLoadBalancerPack) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ResultInfo : result information. +type ResultInfo struct { + // page number. + Page *int64 `json:"page" validate:"required"` + + // per page count. + PerPage *int64 `json:"per_page" validate:"required"` + + // count. + Count *int64 `json:"count" validate:"required"` + + // total count. + TotalCount *int64 `json:"total_count" validate:"required"` +} + + +// UnmarshalResultInfo unmarshals an instance of ResultInfo from the specified map of raw messages. +func UnmarshalResultInfo(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ResultInfo) + err = core.UnmarshalPrimitive(m, "page", &obj.Page) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "per_page", &obj.PerPage) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "count", &obj.Count) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "total_count", &obj.TotalCount) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} diff --git a/vendor/github.com/IBM/networking-go-sdk/pageruleapiv1/page_rule_api_v1.go b/vendor/github.com/IBM/networking-go-sdk/pageruleapiv1/page_rule_api_v1.go new file mode 100644 index 00000000000..4964b545aba --- /dev/null +++ b/vendor/github.com/IBM/networking-go-sdk/pageruleapiv1/page_rule_api_v1.go @@ -0,0 +1,1750 @@ +/** + * (C) Copyright IBM Corp. 2020. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * IBM OpenAPI SDK Code Generator Version: 3.20.0-debb9f29-20201203-202043 + */ + + +// Package pageruleapiv1 : Operations and models for the PageRuleApiV1 service +package pageruleapiv1 + +import ( + "context" + "encoding/json" + "fmt" + "github.com/IBM/go-sdk-core/v4/core" + common "github.com/IBM/networking-go-sdk/common" + "net/http" + "reflect" + "time" +) + +// PageRuleApiV1 : This document describes CIS Pagerule API. +// +// Version: 1.0.0 +type PageRuleApiV1 struct { + Service *core.BaseService + + // instance id. + Crn *string + + // zone id. + ZoneID *string +} + +// DefaultServiceURL is the default URL to make service requests to. +const DefaultServiceURL = "https://api.cis.cloud.ibm.com" + +// DefaultServiceName is the default key used to find external configuration information. +const DefaultServiceName = "page_rule_api" + +// PageRuleApiV1Options : Service options +type PageRuleApiV1Options struct { + ServiceName string + URL string + Authenticator core.Authenticator + + // instance id. + Crn *string `validate:"required"` + + // zone id. + ZoneID *string `validate:"required"` +} + +// NewPageRuleApiV1UsingExternalConfig : constructs an instance of PageRuleApiV1 with passed in options and external configuration. +func NewPageRuleApiV1UsingExternalConfig(options *PageRuleApiV1Options) (pageRuleApi *PageRuleApiV1, err error) { + if options.ServiceName == "" { + options.ServiceName = DefaultServiceName + } + + if options.Authenticator == nil { + options.Authenticator, err = core.GetAuthenticatorFromEnvironment(options.ServiceName) + if err != nil { + return + } + } + + pageRuleApi, err = NewPageRuleApiV1(options) + if err != nil { + return + } + + err = pageRuleApi.Service.ConfigureService(options.ServiceName) + if err != nil { + return + } + + if options.URL != "" { + err = pageRuleApi.Service.SetServiceURL(options.URL) + } + return +} + +// NewPageRuleApiV1 : constructs an instance of PageRuleApiV1 with passed in options. +func NewPageRuleApiV1(options *PageRuleApiV1Options) (service *PageRuleApiV1, err error) { + serviceOptions := &core.ServiceOptions{ + URL: DefaultServiceURL, + Authenticator: options.Authenticator, + } + + err = core.ValidateStruct(options, "options") + if err != nil { + return + } + + baseService, err := core.NewBaseService(serviceOptions) + if err != nil { + return + } + + if options.URL != "" { + err = baseService.SetServiceURL(options.URL) + if err != nil { + return + } + } + + service = &PageRuleApiV1{ + Service: baseService, + Crn: options.Crn, + ZoneID: options.ZoneID, + } + + return +} + +// GetServiceURLForRegion returns the service URL to be used for the specified region +func GetServiceURLForRegion(region string) (string, error) { + return "", fmt.Errorf("service does not support regional URLs") +} + +// Clone makes a copy of "pageRuleApi" suitable for processing requests. +func (pageRuleApi *PageRuleApiV1) Clone() *PageRuleApiV1 { + if core.IsNil(pageRuleApi) { + return nil + } + clone := *pageRuleApi + clone.Service = pageRuleApi.Service.Clone() + return &clone +} + +// SetServiceURL sets the service URL +func (pageRuleApi *PageRuleApiV1) SetServiceURL(url string) error { + return pageRuleApi.Service.SetServiceURL(url) +} + +// GetServiceURL returns the service URL +func (pageRuleApi *PageRuleApiV1) GetServiceURL() string { + return pageRuleApi.Service.GetServiceURL() +} + +// SetDefaultHeaders sets HTTP headers to be sent in every request +func (pageRuleApi *PageRuleApiV1) SetDefaultHeaders(headers http.Header) { + pageRuleApi.Service.SetDefaultHeaders(headers) +} + +// SetEnableGzipCompression sets the service's EnableGzipCompression field +func (pageRuleApi *PageRuleApiV1) SetEnableGzipCompression(enableGzip bool) { + pageRuleApi.Service.SetEnableGzipCompression(enableGzip) +} + +// GetEnableGzipCompression returns the service's EnableGzipCompression field +func (pageRuleApi *PageRuleApiV1) GetEnableGzipCompression() bool { + return pageRuleApi.Service.GetEnableGzipCompression() +} + +// EnableRetries enables automatic retries for requests invoked for this service instance. +// If either parameter is specified as 0, then a default value is used instead. +func (pageRuleApi *PageRuleApiV1) EnableRetries(maxRetries int, maxRetryInterval time.Duration) { + pageRuleApi.Service.EnableRetries(maxRetries, maxRetryInterval) +} + +// DisableRetries disables automatic retries for requests invoked for this service instance. +func (pageRuleApi *PageRuleApiV1) DisableRetries() { + pageRuleApi.Service.DisableRetries() +} + +// GetPageRule : Get page rule +// Get a page rule details. +func (pageRuleApi *PageRuleApiV1) GetPageRule(getPageRuleOptions *GetPageRuleOptions) (result *PageRulesResponseWithoutResultInfo, response *core.DetailedResponse, err error) { + return pageRuleApi.GetPageRuleWithContext(context.Background(), getPageRuleOptions) +} + +// GetPageRuleWithContext is an alternate form of the GetPageRule method which supports a Context parameter +func (pageRuleApi *PageRuleApiV1) GetPageRuleWithContext(ctx context.Context, getPageRuleOptions *GetPageRuleOptions) (result *PageRulesResponseWithoutResultInfo, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getPageRuleOptions, "getPageRuleOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getPageRuleOptions, "getPageRuleOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *pageRuleApi.Crn, + "zone_id": *pageRuleApi.ZoneID, + "rule_id": *getPageRuleOptions.RuleID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = pageRuleApi.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(pageRuleApi.Service.Options.URL, `/v1/{crn}/zones/{zone_id}/pagerules/{rule_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getPageRuleOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("page_rule_api", "V1", "GetPageRule") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = pageRuleApi.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalPageRulesResponseWithoutResultInfo) + if err != nil { + return + } + response.Result = result + + return +} + +// ChangePageRule : Change page rule +// Change a page rule. +func (pageRuleApi *PageRuleApiV1) ChangePageRule(changePageRuleOptions *ChangePageRuleOptions) (result *PageRulesResponseWithoutResultInfo, response *core.DetailedResponse, err error) { + return pageRuleApi.ChangePageRuleWithContext(context.Background(), changePageRuleOptions) +} + +// ChangePageRuleWithContext is an alternate form of the ChangePageRule method which supports a Context parameter +func (pageRuleApi *PageRuleApiV1) ChangePageRuleWithContext(ctx context.Context, changePageRuleOptions *ChangePageRuleOptions) (result *PageRulesResponseWithoutResultInfo, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(changePageRuleOptions, "changePageRuleOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(changePageRuleOptions, "changePageRuleOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *pageRuleApi.Crn, + "zone_id": *pageRuleApi.ZoneID, + "rule_id": *changePageRuleOptions.RuleID, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = pageRuleApi.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(pageRuleApi.Service.Options.URL, `/v1/{crn}/zones/{zone_id}/pagerules/{rule_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range changePageRuleOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("page_rule_api", "V1", "ChangePageRule") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if changePageRuleOptions.Targets != nil { + body["targets"] = changePageRuleOptions.Targets + } + if changePageRuleOptions.Actions != nil { + body["actions"] = changePageRuleOptions.Actions + } + if changePageRuleOptions.Priority != nil { + body["priority"] = changePageRuleOptions.Priority + } + if changePageRuleOptions.Status != nil { + body["status"] = changePageRuleOptions.Status + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = pageRuleApi.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalPageRulesResponseWithoutResultInfo) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdatePageRule : Update page rule +// Replace a page rule. The final rule will exactly match the data passed with this request. +func (pageRuleApi *PageRuleApiV1) UpdatePageRule(updatePageRuleOptions *UpdatePageRuleOptions) (result *PageRulesResponseWithoutResultInfo, response *core.DetailedResponse, err error) { + return pageRuleApi.UpdatePageRuleWithContext(context.Background(), updatePageRuleOptions) +} + +// UpdatePageRuleWithContext is an alternate form of the UpdatePageRule method which supports a Context parameter +func (pageRuleApi *PageRuleApiV1) UpdatePageRuleWithContext(ctx context.Context, updatePageRuleOptions *UpdatePageRuleOptions) (result *PageRulesResponseWithoutResultInfo, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updatePageRuleOptions, "updatePageRuleOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updatePageRuleOptions, "updatePageRuleOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *pageRuleApi.Crn, + "zone_id": *pageRuleApi.ZoneID, + "rule_id": *updatePageRuleOptions.RuleID, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = pageRuleApi.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(pageRuleApi.Service.Options.URL, `/v1/{crn}/zones/{zone_id}/pagerules/{rule_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updatePageRuleOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("page_rule_api", "V1", "UpdatePageRule") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if updatePageRuleOptions.Targets != nil { + body["targets"] = updatePageRuleOptions.Targets + } + if updatePageRuleOptions.Actions != nil { + body["actions"] = updatePageRuleOptions.Actions + } + if updatePageRuleOptions.Priority != nil { + body["priority"] = updatePageRuleOptions.Priority + } + if updatePageRuleOptions.Status != nil { + body["status"] = updatePageRuleOptions.Status + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = pageRuleApi.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalPageRulesResponseWithoutResultInfo) + if err != nil { + return + } + response.Result = result + + return +} + +// DeletePageRule : Delete page rule +// Delete a page rule. +func (pageRuleApi *PageRuleApiV1) DeletePageRule(deletePageRuleOptions *DeletePageRuleOptions) (result *PageRulesDeleteResponse, response *core.DetailedResponse, err error) { + return pageRuleApi.DeletePageRuleWithContext(context.Background(), deletePageRuleOptions) +} + +// DeletePageRuleWithContext is an alternate form of the DeletePageRule method which supports a Context parameter +func (pageRuleApi *PageRuleApiV1) DeletePageRuleWithContext(ctx context.Context, deletePageRuleOptions *DeletePageRuleOptions) (result *PageRulesDeleteResponse, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deletePageRuleOptions, "deletePageRuleOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deletePageRuleOptions, "deletePageRuleOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *pageRuleApi.Crn, + "zone_id": *pageRuleApi.ZoneID, + "rule_id": *deletePageRuleOptions.RuleID, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = pageRuleApi.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(pageRuleApi.Service.Options.URL, `/v1/{crn}/zones/{zone_id}/pagerules/{rule_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deletePageRuleOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("page_rule_api", "V1", "DeletePageRule") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = pageRuleApi.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalPageRulesDeleteResponse) + if err != nil { + return + } + response.Result = result + + return +} + +// ListPageRules : List page rules +// List page rules. +func (pageRuleApi *PageRuleApiV1) ListPageRules(listPageRulesOptions *ListPageRulesOptions) (result *PageRulesResponseListAll, response *core.DetailedResponse, err error) { + return pageRuleApi.ListPageRulesWithContext(context.Background(), listPageRulesOptions) +} + +// ListPageRulesWithContext is an alternate form of the ListPageRules method which supports a Context parameter +func (pageRuleApi *PageRuleApiV1) ListPageRulesWithContext(ctx context.Context, listPageRulesOptions *ListPageRulesOptions) (result *PageRulesResponseListAll, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listPageRulesOptions, "listPageRulesOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *pageRuleApi.Crn, + "zone_id": *pageRuleApi.ZoneID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = pageRuleApi.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(pageRuleApi.Service.Options.URL, `/v1/{crn}/zones/{zone_id}/pagerules`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range listPageRulesOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("page_rule_api", "V1", "ListPageRules") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + if listPageRulesOptions.Status != nil { + builder.AddQuery("status", fmt.Sprint(*listPageRulesOptions.Status)) + } + if listPageRulesOptions.Order != nil { + builder.AddQuery("order", fmt.Sprint(*listPageRulesOptions.Order)) + } + if listPageRulesOptions.Direction != nil { + builder.AddQuery("direction", fmt.Sprint(*listPageRulesOptions.Direction)) + } + if listPageRulesOptions.Match != nil { + builder.AddQuery("match", fmt.Sprint(*listPageRulesOptions.Match)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = pageRuleApi.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalPageRulesResponseListAll) + if err != nil { + return + } + response.Result = result + + return +} + +// CreatePageRule : Create page rule +// Create a page rule. +func (pageRuleApi *PageRuleApiV1) CreatePageRule(createPageRuleOptions *CreatePageRuleOptions) (result *PageRulesResponseWithoutResultInfo, response *core.DetailedResponse, err error) { + return pageRuleApi.CreatePageRuleWithContext(context.Background(), createPageRuleOptions) +} + +// CreatePageRuleWithContext is an alternate form of the CreatePageRule method which supports a Context parameter +func (pageRuleApi *PageRuleApiV1) CreatePageRuleWithContext(ctx context.Context, createPageRuleOptions *CreatePageRuleOptions) (result *PageRulesResponseWithoutResultInfo, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(createPageRuleOptions, "createPageRuleOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *pageRuleApi.Crn, + "zone_id": *pageRuleApi.ZoneID, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = pageRuleApi.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(pageRuleApi.Service.Options.URL, `/v1/{crn}/zones/{zone_id}/pagerules`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range createPageRuleOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("page_rule_api", "V1", "CreatePageRule") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if createPageRuleOptions.Targets != nil { + body["targets"] = createPageRuleOptions.Targets + } + if createPageRuleOptions.Actions != nil { + body["actions"] = createPageRuleOptions.Actions + } + if createPageRuleOptions.Priority != nil { + body["priority"] = createPageRuleOptions.Priority + } + if createPageRuleOptions.Status != nil { + body["status"] = createPageRuleOptions.Status + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = pageRuleApi.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalPageRulesResponseWithoutResultInfo) + if err != nil { + return + } + response.Result = result + + return +} + +// ActionsForwardingUrlValue : value. +type ActionsForwardingUrlValue struct { + // url. + URL *string `json:"url,omitempty"` + + // status code. + StatusCode *int64 `json:"status_code,omitempty"` +} + + +// UnmarshalActionsForwardingUrlValue unmarshals an instance of ActionsForwardingUrlValue from the specified map of raw messages. +func UnmarshalActionsForwardingUrlValue(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ActionsForwardingUrlValue) + err = core.UnmarshalPrimitive(m, "url", &obj.URL) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "status_code", &obj.StatusCode) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ChangePageRuleOptions : The ChangePageRule options. +type ChangePageRuleOptions struct { + // rule id. + RuleID *string `json:"rule_id" validate:"required,ne="` + + // targets. + Targets []TargetsItem `json:"targets,omitempty"` + + // actions. + Actions []PageRulesBodyActionsItemIntf `json:"actions,omitempty"` + + // priority. + Priority *int64 `json:"priority,omitempty"` + + // status. + Status *string `json:"status,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewChangePageRuleOptions : Instantiate ChangePageRuleOptions +func (*PageRuleApiV1) NewChangePageRuleOptions(ruleID string) *ChangePageRuleOptions { + return &ChangePageRuleOptions{ + RuleID: core.StringPtr(ruleID), + } +} + +// SetRuleID : Allow user to set RuleID +func (options *ChangePageRuleOptions) SetRuleID(ruleID string) *ChangePageRuleOptions { + options.RuleID = core.StringPtr(ruleID) + return options +} + +// SetTargets : Allow user to set Targets +func (options *ChangePageRuleOptions) SetTargets(targets []TargetsItem) *ChangePageRuleOptions { + options.Targets = targets + return options +} + +// SetActions : Allow user to set Actions +func (options *ChangePageRuleOptions) SetActions(actions []PageRulesBodyActionsItemIntf) *ChangePageRuleOptions { + options.Actions = actions + return options +} + +// SetPriority : Allow user to set Priority +func (options *ChangePageRuleOptions) SetPriority(priority int64) *ChangePageRuleOptions { + options.Priority = core.Int64Ptr(priority) + return options +} + +// SetStatus : Allow user to set Status +func (options *ChangePageRuleOptions) SetStatus(status string) *ChangePageRuleOptions { + options.Status = core.StringPtr(status) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ChangePageRuleOptions) SetHeaders(param map[string]string) *ChangePageRuleOptions { + options.Headers = param + return options +} + +// CreatePageRuleOptions : The CreatePageRule options. +type CreatePageRuleOptions struct { + // targets. + Targets []TargetsItem `json:"targets,omitempty"` + + // actions. + Actions []PageRulesBodyActionsItemIntf `json:"actions,omitempty"` + + // priority. + Priority *int64 `json:"priority,omitempty"` + + // status. + Status *string `json:"status,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewCreatePageRuleOptions : Instantiate CreatePageRuleOptions +func (*PageRuleApiV1) NewCreatePageRuleOptions() *CreatePageRuleOptions { + return &CreatePageRuleOptions{} +} + +// SetTargets : Allow user to set Targets +func (options *CreatePageRuleOptions) SetTargets(targets []TargetsItem) *CreatePageRuleOptions { + options.Targets = targets + return options +} + +// SetActions : Allow user to set Actions +func (options *CreatePageRuleOptions) SetActions(actions []PageRulesBodyActionsItemIntf) *CreatePageRuleOptions { + options.Actions = actions + return options +} + +// SetPriority : Allow user to set Priority +func (options *CreatePageRuleOptions) SetPriority(priority int64) *CreatePageRuleOptions { + options.Priority = core.Int64Ptr(priority) + return options +} + +// SetStatus : Allow user to set Status +func (options *CreatePageRuleOptions) SetStatus(status string) *CreatePageRuleOptions { + options.Status = core.StringPtr(status) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreatePageRuleOptions) SetHeaders(param map[string]string) *CreatePageRuleOptions { + options.Headers = param + return options +} + +// DeletePageRuleOptions : The DeletePageRule options. +type DeletePageRuleOptions struct { + // rule id. + RuleID *string `json:"rule_id" validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeletePageRuleOptions : Instantiate DeletePageRuleOptions +func (*PageRuleApiV1) NewDeletePageRuleOptions(ruleID string) *DeletePageRuleOptions { + return &DeletePageRuleOptions{ + RuleID: core.StringPtr(ruleID), + } +} + +// SetRuleID : Allow user to set RuleID +func (options *DeletePageRuleOptions) SetRuleID(ruleID string) *DeletePageRuleOptions { + options.RuleID = core.StringPtr(ruleID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeletePageRuleOptions) SetHeaders(param map[string]string) *DeletePageRuleOptions { + options.Headers = param + return options +} + +// GetPageRuleOptions : The GetPageRule options. +type GetPageRuleOptions struct { + // rule id. + RuleID *string `json:"rule_id" validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetPageRuleOptions : Instantiate GetPageRuleOptions +func (*PageRuleApiV1) NewGetPageRuleOptions(ruleID string) *GetPageRuleOptions { + return &GetPageRuleOptions{ + RuleID: core.StringPtr(ruleID), + } +} + +// SetRuleID : Allow user to set RuleID +func (options *GetPageRuleOptions) SetRuleID(ruleID string) *GetPageRuleOptions { + options.RuleID = core.StringPtr(ruleID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetPageRuleOptions) SetHeaders(param map[string]string) *GetPageRuleOptions { + options.Headers = param + return options +} + +// ListPageRulesOptions : The ListPageRules options. +type ListPageRulesOptions struct { + // default value: disabled. valid values: active, disabled. + Status *string `json:"status,omitempty"` + + // default value: priority. valid values: status, priority. + Order *string `json:"order,omitempty"` + + // default value: desc. valid values: asc, desc. + Direction *string `json:"direction,omitempty"` + + // default value: all. valid values: any, all. + Match *string `json:"match,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListPageRulesOptions : Instantiate ListPageRulesOptions +func (*PageRuleApiV1) NewListPageRulesOptions() *ListPageRulesOptions { + return &ListPageRulesOptions{} +} + +// SetStatus : Allow user to set Status +func (options *ListPageRulesOptions) SetStatus(status string) *ListPageRulesOptions { + options.Status = core.StringPtr(status) + return options +} + +// SetOrder : Allow user to set Order +func (options *ListPageRulesOptions) SetOrder(order string) *ListPageRulesOptions { + options.Order = core.StringPtr(order) + return options +} + +// SetDirection : Allow user to set Direction +func (options *ListPageRulesOptions) SetDirection(direction string) *ListPageRulesOptions { + options.Direction = core.StringPtr(direction) + return options +} + +// SetMatch : Allow user to set Match +func (options *ListPageRulesOptions) SetMatch(match string) *ListPageRulesOptions { + options.Match = core.StringPtr(match) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListPageRulesOptions) SetHeaders(param map[string]string) *ListPageRulesOptions { + options.Headers = param + return options +} + +// PageRulesBodyActionsItem : PageRulesBodyActionsItem struct +// Models which "extend" this model: +// - PageRulesBodyActionsItemActionsSecurity +// - PageRulesBodyActionsItemActionsSecurityOptions +// - PageRulesBodyActionsItemActionsSsl +// - PageRulesBodyActionsItemActionsTTL +// - PageRulesBodyActionsItemActionsSecurityLevel +// - PageRulesBodyActionsItemActionsCacheLevel +// - PageRulesBodyActionsItemActionsEdgeCacheTTL +// - PageRulesBodyActionsItemActionsForwardingURL +// - PageRulesBodyActionsItemActionsBypassCacheOnCookie +type PageRulesBodyActionsItem struct { + // " Page rule action field map from UI to API + // CF-UI map API, + // 'Disable Security' to 'disable_security', + // 'Browser Integrity Check' to 'browser_check', + // 'Server Side Excludes' to 'server_side_exclude', + // 'SSL' to 'ssl', + // 'Browser Cache TTL' to 'browser_cache_ttl', + // 'Always Online' to 'always_online', + // 'Security Level' to 'security_level', + // 'Cache Level' to 'cache_level', + // 'Edge Cache TTL' to 'edge_cache_ttl' + // 'IP Geolocation Header' to 'ip_geolocation, + // 'Email Obfuscation' to 'email_obfuscation', + // 'Automatic HTTPS Rewrites' to 'automatic_https_rewrites', + // 'Opportunistic Encryption' to 'opportunistic_encryption', + // 'Forwarding URL' to 'forwarding_url', + // 'Always Use HTTPS' to 'always_use_https', + // 'Origin Cache Control' to 'explicit_cache_control', + // 'Bypass Cache on Cookie' to 'bypass_cache_on_cookie', + // 'Cache Deception Armor' to 'cache_deception_armor', + // 'WAF' to 'waf' + // + // Page rule conflict list + // "forwarding_url" with all other settings for the rules + // "always_use_https" with all other settings for the rules + // "disable_security" with "email_obfuscation", "server_side_exclude", "waf" + // ". + ID *string `json:"id" validate:"required"` + + // value. + Value interface{} `json:"value,omitempty"` +} + +// Constants associated with the PageRulesBodyActionsItem.ID property. +// " Page rule action field map from UI to API +// CF-UI map API, +// 'Disable Security' to 'disable_security', +// 'Browser Integrity Check' to 'browser_check', +// 'Server Side Excludes' to 'server_side_exclude', +// 'SSL' to 'ssl', +// 'Browser Cache TTL' to 'browser_cache_ttl', +// 'Always Online' to 'always_online', +// 'Security Level' to 'security_level', +// 'Cache Level' to 'cache_level', +// 'Edge Cache TTL' to 'edge_cache_ttl' +// 'IP Geolocation Header' to 'ip_geolocation, +// 'Email Obfuscation' to 'email_obfuscation', +// 'Automatic HTTPS Rewrites' to 'automatic_https_rewrites', +// 'Opportunistic Encryption' to 'opportunistic_encryption', +// 'Forwarding URL' to 'forwarding_url', +// 'Always Use HTTPS' to 'always_use_https', +// 'Origin Cache Control' to 'explicit_cache_control', +// 'Bypass Cache on Cookie' to 'bypass_cache_on_cookie', +// 'Cache Deception Armor' to 'cache_deception_armor', +// 'WAF' to 'waf' +// +// Page rule conflict list +// "forwarding_url" with all other settings for the rules +// "always_use_https" with all other settings for the rules +// "disable_security" with "email_obfuscation", "server_side_exclude", "waf" +// ". +const ( + PageRulesBodyActionsItem_ID_AlwaysOnline = "always_online" + PageRulesBodyActionsItem_ID_AlwaysUseHttps = "always_use_https" + PageRulesBodyActionsItem_ID_AutomaticHttpsRewrites = "automatic_https_rewrites" + PageRulesBodyActionsItem_ID_BrowserCacheTTL = "browser_cache_ttl" + PageRulesBodyActionsItem_ID_BrowserCheck = "browser_check" + PageRulesBodyActionsItem_ID_BypassCacheOnCookie = "bypass_cache_on_cookie" + PageRulesBodyActionsItem_ID_CacheDeceptionArmor = "cache_deception_armor" + PageRulesBodyActionsItem_ID_CacheLevel = "cache_level" + PageRulesBodyActionsItem_ID_DisableSecurity = "disable_security" + PageRulesBodyActionsItem_ID_EdgeCacheTTL = "edge_cache_ttl" + PageRulesBodyActionsItem_ID_EmailObfuscation = "email_obfuscation" + PageRulesBodyActionsItem_ID_ExplicitCacheControl = "explicit_cache_control" + PageRulesBodyActionsItem_ID_ForwardingURL = "forwarding_url" + PageRulesBodyActionsItem_ID_IpGeolocation = "ip_geolocation" + PageRulesBodyActionsItem_ID_OpportunisticEncryption = "opportunistic_encryption" + PageRulesBodyActionsItem_ID_SecurityLevel = "security_level" + PageRulesBodyActionsItem_ID_ServerSideExclude = "server_side_exclude" + PageRulesBodyActionsItem_ID_Ssl = "ssl" + PageRulesBodyActionsItem_ID_Waf = "waf" +) + +func (*PageRulesBodyActionsItem) isaPageRulesBodyActionsItem() bool { + return true +} + +type PageRulesBodyActionsItemIntf interface { + isaPageRulesBodyActionsItem() bool +} + +// UnmarshalPageRulesBodyActionsItem unmarshals an instance of PageRulesBodyActionsItem from the specified map of raw messages. +func UnmarshalPageRulesBodyActionsItem(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(PageRulesBodyActionsItem) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// PageRulesDeleteResponseResult : result. +type PageRulesDeleteResponseResult struct { + // identifier. + ID *string `json:"id" validate:"required"` +} + + +// UnmarshalPageRulesDeleteResponseResult unmarshals an instance of PageRulesDeleteResponseResult from the specified map of raw messages. +func UnmarshalPageRulesDeleteResponseResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(PageRulesDeleteResponseResult) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// TargetsItem : items. +type TargetsItem struct { + // target. + Target *string `json:"target" validate:"required"` + + // constraint. + Constraint *TargetsItemConstraint `json:"constraint" validate:"required"` +} + + +// NewTargetsItem : Instantiate TargetsItem (Generic Model Constructor) +func (*PageRuleApiV1) NewTargetsItem(target string, constraint *TargetsItemConstraint) (model *TargetsItem, err error) { + model = &TargetsItem{ + Target: core.StringPtr(target), + Constraint: constraint, + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalTargetsItem unmarshals an instance of TargetsItem from the specified map of raw messages. +func UnmarshalTargetsItem(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(TargetsItem) + err = core.UnmarshalPrimitive(m, "target", &obj.Target) + if err != nil { + return + } + err = core.UnmarshalModel(m, "constraint", &obj.Constraint, UnmarshalTargetsItemConstraint) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// TargetsItemConstraint : constraint. +type TargetsItemConstraint struct { + // operator. + Operator *string `json:"operator" validate:"required"` + + // value. + Value *string `json:"value" validate:"required"` +} + + +// NewTargetsItemConstraint : Instantiate TargetsItemConstraint (Generic Model Constructor) +func (*PageRuleApiV1) NewTargetsItemConstraint(operator string, value string) (model *TargetsItemConstraint, err error) { + model = &TargetsItemConstraint{ + Operator: core.StringPtr(operator), + Value: core.StringPtr(value), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalTargetsItemConstraint unmarshals an instance of TargetsItemConstraint from the specified map of raw messages. +func UnmarshalTargetsItemConstraint(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(TargetsItemConstraint) + err = core.UnmarshalPrimitive(m, "operator", &obj.Operator) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// UpdatePageRuleOptions : The UpdatePageRule options. +type UpdatePageRuleOptions struct { + // rule id. + RuleID *string `json:"rule_id" validate:"required,ne="` + + // targets. + Targets []TargetsItem `json:"targets,omitempty"` + + // actions. + Actions []PageRulesBodyActionsItemIntf `json:"actions,omitempty"` + + // priority. + Priority *int64 `json:"priority,omitempty"` + + // status. + Status *string `json:"status,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdatePageRuleOptions : Instantiate UpdatePageRuleOptions +func (*PageRuleApiV1) NewUpdatePageRuleOptions(ruleID string) *UpdatePageRuleOptions { + return &UpdatePageRuleOptions{ + RuleID: core.StringPtr(ruleID), + } +} + +// SetRuleID : Allow user to set RuleID +func (options *UpdatePageRuleOptions) SetRuleID(ruleID string) *UpdatePageRuleOptions { + options.RuleID = core.StringPtr(ruleID) + return options +} + +// SetTargets : Allow user to set Targets +func (options *UpdatePageRuleOptions) SetTargets(targets []TargetsItem) *UpdatePageRuleOptions { + options.Targets = targets + return options +} + +// SetActions : Allow user to set Actions +func (options *UpdatePageRuleOptions) SetActions(actions []PageRulesBodyActionsItemIntf) *UpdatePageRuleOptions { + options.Actions = actions + return options +} + +// SetPriority : Allow user to set Priority +func (options *UpdatePageRuleOptions) SetPriority(priority int64) *UpdatePageRuleOptions { + options.Priority = core.Int64Ptr(priority) + return options +} + +// SetStatus : Allow user to set Status +func (options *UpdatePageRuleOptions) SetStatus(status string) *UpdatePageRuleOptions { + options.Status = core.StringPtr(status) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdatePageRuleOptions) SetHeaders(param map[string]string) *UpdatePageRuleOptions { + options.Headers = param + return options +} + +// PageRulesDeleteResponse : page rules delete response. +type PageRulesDeleteResponse struct { + // success. + Success *bool `json:"success" validate:"required"` + + // errors. + Errors [][]string `json:"errors" validate:"required"` + + // messages. + Messages [][]string `json:"messages" validate:"required"` + + // result. + Result *PageRulesDeleteResponseResult `json:"result" validate:"required"` +} + + +// UnmarshalPageRulesDeleteResponse unmarshals an instance of PageRulesDeleteResponse from the specified map of raw messages. +func UnmarshalPageRulesDeleteResponse(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(PageRulesDeleteResponse) + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalPageRulesDeleteResponseResult) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// PageRulesResponseListAll : page rule response list all. +type PageRulesResponseListAll struct { + // success. + Success *bool `json:"success" validate:"required"` + + // errors. + Errors [][]string `json:"errors" validate:"required"` + + // messages. + Messages [][]string `json:"messages" validate:"required"` + + // result. + Result []PageRuleResult `json:"result" validate:"required"` +} + + +// UnmarshalPageRulesResponseListAll unmarshals an instance of PageRulesResponseListAll from the specified map of raw messages. +func UnmarshalPageRulesResponseListAll(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(PageRulesResponseListAll) + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalPageRuleResult) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// PageRulesResponseWithoutResultInfo : page rule response without result information. +type PageRulesResponseWithoutResultInfo struct { + // success. + Success *bool `json:"success" validate:"required"` + + // errors. + Errors [][]string `json:"errors" validate:"required"` + + // messages. + Messages [][]string `json:"messages" validate:"required"` + + // page rule result. + Result *PageRuleResult `json:"result" validate:"required"` +} + + +// UnmarshalPageRulesResponseWithoutResultInfo unmarshals an instance of PageRulesResponseWithoutResultInfo from the specified map of raw messages. +func UnmarshalPageRulesResponseWithoutResultInfo(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(PageRulesResponseWithoutResultInfo) + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalPageRuleResult) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// PageRuleResult : page rule result. +type PageRuleResult struct { + // identifier. + ID *string `json:"id" validate:"required"` + + // targets. + Targets []TargetsItem `json:"targets" validate:"required"` + + // actions. + Actions []PageRulesBodyActionsItemIntf `json:"actions" validate:"required"` + + // priority. + Priority *int64 `json:"priority" validate:"required"` + + // status. + Status *string `json:"status" validate:"required"` + + // modified date. + ModifiedOn *string `json:"modified_on" validate:"required"` + + // created date. + CreatedOn *string `json:"created_on" validate:"required"` +} + + +// UnmarshalPageRuleResult unmarshals an instance of PageRuleResult from the specified map of raw messages. +func UnmarshalPageRuleResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(PageRuleResult) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalModel(m, "targets", &obj.Targets, UnmarshalTargetsItem) + if err != nil { + return + } + err = core.UnmarshalModel(m, "actions", &obj.Actions, UnmarshalPageRulesBodyActionsItem) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "priority", &obj.Priority) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "status", &obj.Status) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "modified_on", &obj.ModifiedOn) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_on", &obj.CreatedOn) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// PageRulesBodyActionsItemActionsBypassCacheOnCookie : bypass cache on cookie actions. +// This model "extends" PageRulesBodyActionsItem +type PageRulesBodyActionsItemActionsBypassCacheOnCookie struct { + // identifier. + ID *string `json:"id" validate:"required"` + + // value. + Value *string `json:"value,omitempty"` +} + +// Constants associated with the PageRulesBodyActionsItemActionsBypassCacheOnCookie.ID property. +// identifier. +const ( + PageRulesBodyActionsItemActionsBypassCacheOnCookie_ID_BypassCacheOnCookie = "bypass_cache_on_cookie" +) + + +// NewPageRulesBodyActionsItemActionsBypassCacheOnCookie : Instantiate PageRulesBodyActionsItemActionsBypassCacheOnCookie (Generic Model Constructor) +func (*PageRuleApiV1) NewPageRulesBodyActionsItemActionsBypassCacheOnCookie(id string) (model *PageRulesBodyActionsItemActionsBypassCacheOnCookie, err error) { + model = &PageRulesBodyActionsItemActionsBypassCacheOnCookie{ + ID: core.StringPtr(id), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*PageRulesBodyActionsItemActionsBypassCacheOnCookie) isaPageRulesBodyActionsItem() bool { + return true +} + +// UnmarshalPageRulesBodyActionsItemActionsBypassCacheOnCookie unmarshals an instance of PageRulesBodyActionsItemActionsBypassCacheOnCookie from the specified map of raw messages. +func UnmarshalPageRulesBodyActionsItemActionsBypassCacheOnCookie(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(PageRulesBodyActionsItemActionsBypassCacheOnCookie) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// PageRulesBodyActionsItemActionsCacheLevel : cache level actions. +// This model "extends" PageRulesBodyActionsItem +type PageRulesBodyActionsItemActionsCacheLevel struct { + // identifier. + ID *string `json:"id" validate:"required"` + + // value. + Value *string `json:"value,omitempty"` +} + +// Constants associated with the PageRulesBodyActionsItemActionsCacheLevel.ID property. +// identifier. +const ( + PageRulesBodyActionsItemActionsCacheLevel_ID_CacheLevel = "cache_level" +) + +// Constants associated with the PageRulesBodyActionsItemActionsCacheLevel.Value property. +// value. +const ( + PageRulesBodyActionsItemActionsCacheLevel_Value_Aggressive = "aggressive" + PageRulesBodyActionsItemActionsCacheLevel_Value_Basic = "basic" + PageRulesBodyActionsItemActionsCacheLevel_Value_Bypass = "bypass" + PageRulesBodyActionsItemActionsCacheLevel_Value_CacheEverything = "cache_everything" + PageRulesBodyActionsItemActionsCacheLevel_Value_Simplified = "simplified" +) + + +// NewPageRulesBodyActionsItemActionsCacheLevel : Instantiate PageRulesBodyActionsItemActionsCacheLevel (Generic Model Constructor) +func (*PageRuleApiV1) NewPageRulesBodyActionsItemActionsCacheLevel(id string) (model *PageRulesBodyActionsItemActionsCacheLevel, err error) { + model = &PageRulesBodyActionsItemActionsCacheLevel{ + ID: core.StringPtr(id), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*PageRulesBodyActionsItemActionsCacheLevel) isaPageRulesBodyActionsItem() bool { + return true +} + +// UnmarshalPageRulesBodyActionsItemActionsCacheLevel unmarshals an instance of PageRulesBodyActionsItemActionsCacheLevel from the specified map of raw messages. +func UnmarshalPageRulesBodyActionsItemActionsCacheLevel(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(PageRulesBodyActionsItemActionsCacheLevel) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// PageRulesBodyActionsItemActionsEdgeCacheTTL : edge cache ttl actions. +// This model "extends" PageRulesBodyActionsItem +type PageRulesBodyActionsItemActionsEdgeCacheTTL struct { + // identifier. + ID *string `json:"id" validate:"required"` + + // ttl value. + Value *int64 `json:"value,omitempty"` +} + +// Constants associated with the PageRulesBodyActionsItemActionsEdgeCacheTTL.ID property. +// identifier. +const ( + PageRulesBodyActionsItemActionsEdgeCacheTTL_ID_EdgeCacheTTL = "edge_cache_ttl" +) + + +// NewPageRulesBodyActionsItemActionsEdgeCacheTTL : Instantiate PageRulesBodyActionsItemActionsEdgeCacheTTL (Generic Model Constructor) +func (*PageRuleApiV1) NewPageRulesBodyActionsItemActionsEdgeCacheTTL(id string) (model *PageRulesBodyActionsItemActionsEdgeCacheTTL, err error) { + model = &PageRulesBodyActionsItemActionsEdgeCacheTTL{ + ID: core.StringPtr(id), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*PageRulesBodyActionsItemActionsEdgeCacheTTL) isaPageRulesBodyActionsItem() bool { + return true +} + +// UnmarshalPageRulesBodyActionsItemActionsEdgeCacheTTL unmarshals an instance of PageRulesBodyActionsItemActionsEdgeCacheTTL from the specified map of raw messages. +func UnmarshalPageRulesBodyActionsItemActionsEdgeCacheTTL(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(PageRulesBodyActionsItemActionsEdgeCacheTTL) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// PageRulesBodyActionsItemActionsForwardingURL : forwarding url actions. +// This model "extends" PageRulesBodyActionsItem +type PageRulesBodyActionsItemActionsForwardingURL struct { + // identifier. + ID *string `json:"id" validate:"required"` + + // value. + Value *ActionsForwardingUrlValue `json:"value,omitempty"` +} + +// Constants associated with the PageRulesBodyActionsItemActionsForwardingURL.ID property. +// identifier. +const ( + PageRulesBodyActionsItemActionsForwardingURL_ID_ForwardingURL = "forwarding_url" +) + + +// NewPageRulesBodyActionsItemActionsForwardingURL : Instantiate PageRulesBodyActionsItemActionsForwardingURL (Generic Model Constructor) +func (*PageRuleApiV1) NewPageRulesBodyActionsItemActionsForwardingURL(id string) (model *PageRulesBodyActionsItemActionsForwardingURL, err error) { + model = &PageRulesBodyActionsItemActionsForwardingURL{ + ID: core.StringPtr(id), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*PageRulesBodyActionsItemActionsForwardingURL) isaPageRulesBodyActionsItem() bool { + return true +} + +// UnmarshalPageRulesBodyActionsItemActionsForwardingURL unmarshals an instance of PageRulesBodyActionsItemActionsForwardingURL from the specified map of raw messages. +func UnmarshalPageRulesBodyActionsItemActionsForwardingURL(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(PageRulesBodyActionsItemActionsForwardingURL) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalModel(m, "value", &obj.Value, UnmarshalActionsForwardingUrlValue) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// PageRulesBodyActionsItemActionsSecurity : security actions. +// This model "extends" PageRulesBodyActionsItem +type PageRulesBodyActionsItemActionsSecurity struct { + // value. + Value interface{} `json:"value,omitempty"` + + // identifier. + ID *string `json:"id" validate:"required"` +} + +// Constants associated with the PageRulesBodyActionsItemActionsSecurity.ID property. +// identifier. +const ( + PageRulesBodyActionsItemActionsSecurity_ID_AlwaysUseHttps = "always_use_https" + PageRulesBodyActionsItemActionsSecurity_ID_DisableSecurity = "disable_security" +) + + +// NewPageRulesBodyActionsItemActionsSecurity : Instantiate PageRulesBodyActionsItemActionsSecurity (Generic Model Constructor) +func (*PageRuleApiV1) NewPageRulesBodyActionsItemActionsSecurity(id string) (model *PageRulesBodyActionsItemActionsSecurity, err error) { + model = &PageRulesBodyActionsItemActionsSecurity{ + ID: core.StringPtr(id), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*PageRulesBodyActionsItemActionsSecurity) isaPageRulesBodyActionsItem() bool { + return true +} + +// UnmarshalPageRulesBodyActionsItemActionsSecurity unmarshals an instance of PageRulesBodyActionsItemActionsSecurity from the specified map of raw messages. +func UnmarshalPageRulesBodyActionsItemActionsSecurity(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(PageRulesBodyActionsItemActionsSecurity) + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// PageRulesBodyActionsItemActionsSecurityLevel : security level actions. +// This model "extends" PageRulesBodyActionsItem +type PageRulesBodyActionsItemActionsSecurityLevel struct { + // identifier. + ID *string `json:"id" validate:"required"` + + // value. + Value *string `json:"value,omitempty"` +} + +// Constants associated with the PageRulesBodyActionsItemActionsSecurityLevel.ID property. +// identifier. +const ( + PageRulesBodyActionsItemActionsSecurityLevel_ID_SecurityLevel = "security_level" +) + +// Constants associated with the PageRulesBodyActionsItemActionsSecurityLevel.Value property. +// value. +const ( + PageRulesBodyActionsItemActionsSecurityLevel_Value_EssentiallyOff = "essentially_off" + PageRulesBodyActionsItemActionsSecurityLevel_Value_High = "high" + PageRulesBodyActionsItemActionsSecurityLevel_Value_Low = "low" + PageRulesBodyActionsItemActionsSecurityLevel_Value_Medium = "medium" + PageRulesBodyActionsItemActionsSecurityLevel_Value_Off = "off" + PageRulesBodyActionsItemActionsSecurityLevel_Value_UnderAttack = "under_attack" +) + + +// NewPageRulesBodyActionsItemActionsSecurityLevel : Instantiate PageRulesBodyActionsItemActionsSecurityLevel (Generic Model Constructor) +func (*PageRuleApiV1) NewPageRulesBodyActionsItemActionsSecurityLevel(id string) (model *PageRulesBodyActionsItemActionsSecurityLevel, err error) { + model = &PageRulesBodyActionsItemActionsSecurityLevel{ + ID: core.StringPtr(id), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*PageRulesBodyActionsItemActionsSecurityLevel) isaPageRulesBodyActionsItem() bool { + return true +} + +// UnmarshalPageRulesBodyActionsItemActionsSecurityLevel unmarshals an instance of PageRulesBodyActionsItemActionsSecurityLevel from the specified map of raw messages. +func UnmarshalPageRulesBodyActionsItemActionsSecurityLevel(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(PageRulesBodyActionsItemActionsSecurityLevel) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// PageRulesBodyActionsItemActionsSecurityOptions : security options. +// This model "extends" PageRulesBodyActionsItem +type PageRulesBodyActionsItemActionsSecurityOptions struct { + // identifier. + ID *string `json:"id" validate:"required"` + + // value. + Value *string `json:"value,omitempty"` +} + +// Constants associated with the PageRulesBodyActionsItemActionsSecurityOptions.ID property. +// identifier. +const ( + PageRulesBodyActionsItemActionsSecurityOptions_ID_AlwaysOnline = "always_online" + PageRulesBodyActionsItemActionsSecurityOptions_ID_AutomaticHttpsRewrites = "automatic_https_rewrites" + PageRulesBodyActionsItemActionsSecurityOptions_ID_BrowserCheck = "browser_check" + PageRulesBodyActionsItemActionsSecurityOptions_ID_CacheDeceptionArmor = "cache_deception_armor" + PageRulesBodyActionsItemActionsSecurityOptions_ID_EmailObfuscation = "email_obfuscation" + PageRulesBodyActionsItemActionsSecurityOptions_ID_ExplicitCacheControl = "explicit_cache_control" + PageRulesBodyActionsItemActionsSecurityOptions_ID_IpGeolocation = "ip_geolocation" + PageRulesBodyActionsItemActionsSecurityOptions_ID_OpportunisticEncryption = "opportunistic_encryption" + PageRulesBodyActionsItemActionsSecurityOptions_ID_ServerSideExclude = "server_side_exclude" + PageRulesBodyActionsItemActionsSecurityOptions_ID_Waf = "waf" +) + +// Constants associated with the PageRulesBodyActionsItemActionsSecurityOptions.Value property. +// value. +const ( + PageRulesBodyActionsItemActionsSecurityOptions_Value_Off = "off" + PageRulesBodyActionsItemActionsSecurityOptions_Value_On = "on" +) + + +// NewPageRulesBodyActionsItemActionsSecurityOptions : Instantiate PageRulesBodyActionsItemActionsSecurityOptions (Generic Model Constructor) +func (*PageRuleApiV1) NewPageRulesBodyActionsItemActionsSecurityOptions(id string) (model *PageRulesBodyActionsItemActionsSecurityOptions, err error) { + model = &PageRulesBodyActionsItemActionsSecurityOptions{ + ID: core.StringPtr(id), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*PageRulesBodyActionsItemActionsSecurityOptions) isaPageRulesBodyActionsItem() bool { + return true +} + +// UnmarshalPageRulesBodyActionsItemActionsSecurityOptions unmarshals an instance of PageRulesBodyActionsItemActionsSecurityOptions from the specified map of raw messages. +func UnmarshalPageRulesBodyActionsItemActionsSecurityOptions(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(PageRulesBodyActionsItemActionsSecurityOptions) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// PageRulesBodyActionsItemActionsSsl : ssl actions. +// This model "extends" PageRulesBodyActionsItem +type PageRulesBodyActionsItemActionsSsl struct { + // identifier. + ID *string `json:"id" validate:"required"` + + // value. + Value *string `json:"value,omitempty"` +} + +// Constants associated with the PageRulesBodyActionsItemActionsSsl.ID property. +// identifier. +const ( + PageRulesBodyActionsItemActionsSsl_ID_Ssl = "ssl" +) + +// Constants associated with the PageRulesBodyActionsItemActionsSsl.Value property. +// value. +const ( + PageRulesBodyActionsItemActionsSsl_Value_Flexible = "flexible" + PageRulesBodyActionsItemActionsSsl_Value_Full = "full" + PageRulesBodyActionsItemActionsSsl_Value_Off = "off" + PageRulesBodyActionsItemActionsSsl_Value_OriginPull = "origin_pull" + PageRulesBodyActionsItemActionsSsl_Value_Strict = "strict" +) + + +// NewPageRulesBodyActionsItemActionsSsl : Instantiate PageRulesBodyActionsItemActionsSsl (Generic Model Constructor) +func (*PageRuleApiV1) NewPageRulesBodyActionsItemActionsSsl(id string) (model *PageRulesBodyActionsItemActionsSsl, err error) { + model = &PageRulesBodyActionsItemActionsSsl{ + ID: core.StringPtr(id), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*PageRulesBodyActionsItemActionsSsl) isaPageRulesBodyActionsItem() bool { + return true +} + +// UnmarshalPageRulesBodyActionsItemActionsSsl unmarshals an instance of PageRulesBodyActionsItemActionsSsl from the specified map of raw messages. +func UnmarshalPageRulesBodyActionsItemActionsSsl(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(PageRulesBodyActionsItemActionsSsl) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// PageRulesBodyActionsItemActionsTTL : ttl actions. +// This model "extends" PageRulesBodyActionsItem +type PageRulesBodyActionsItemActionsTTL struct { + // identifier. + ID *string `json:"id" validate:"required"` + + // value. + Value *int64 `json:"value,omitempty"` +} + +// Constants associated with the PageRulesBodyActionsItemActionsTTL.ID property. +// identifier. +const ( + PageRulesBodyActionsItemActionsTTL_ID_BrowserCacheTTL = "browser_cache_ttl" +) + + +// NewPageRulesBodyActionsItemActionsTTL : Instantiate PageRulesBodyActionsItemActionsTTL (Generic Model Constructor) +func (*PageRuleApiV1) NewPageRulesBodyActionsItemActionsTTL(id string) (model *PageRulesBodyActionsItemActionsTTL, err error) { + model = &PageRulesBodyActionsItemActionsTTL{ + ID: core.StringPtr(id), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*PageRulesBodyActionsItemActionsTTL) isaPageRulesBodyActionsItem() bool { + return true +} + +// UnmarshalPageRulesBodyActionsItemActionsTTL unmarshals an instance of PageRulesBodyActionsItemActionsTTL from the specified map of raw messages. +func UnmarshalPageRulesBodyActionsItemActionsTTL(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(PageRulesBodyActionsItemActionsTTL) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} diff --git a/vendor/github.com/IBM/networking-go-sdk/rangeapplicationsv1/range_applications_v1.go b/vendor/github.com/IBM/networking-go-sdk/rangeapplicationsv1/range_applications_v1.go new file mode 100644 index 00000000000..aaf7c9da93c --- /dev/null +++ b/vendor/github.com/IBM/networking-go-sdk/rangeapplicationsv1/range_applications_v1.go @@ -0,0 +1,1338 @@ +/** + * (C) Copyright IBM Corp. 2020. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * IBM OpenAPI SDK Code Generator Version: 3.20.0-debb9f29-20201203-202043 + */ + + +// Package rangeapplicationsv1 : Operations and models for the RangeApplicationsV1 service +package rangeapplicationsv1 + +import ( + "context" + "encoding/json" + "fmt" + "github.com/IBM/go-sdk-core/v4/core" + common "github.com/IBM/networking-go-sdk/common" + "github.com/go-openapi/strfmt" + "net/http" + "reflect" + "time" +) + +// RangeApplicationsV1 : Range Applications +// +// Version: 1.0.0 +type RangeApplicationsV1 struct { + Service *core.BaseService + + // Full url-encoded cloud resource name (CRN) of resource instance. + Crn *string + + // zone identifier. + ZoneIdentifier *string +} + +// DefaultServiceURL is the default URL to make service requests to. +const DefaultServiceURL = "https://api.cis.cloud.ibm.com" + +// DefaultServiceName is the default key used to find external configuration information. +const DefaultServiceName = "range_applications" + +// RangeApplicationsV1Options : Service options +type RangeApplicationsV1Options struct { + ServiceName string + URL string + Authenticator core.Authenticator + + // Full url-encoded cloud resource name (CRN) of resource instance. + Crn *string `validate:"required"` + + // zone identifier. + ZoneIdentifier *string `validate:"required"` +} + +// NewRangeApplicationsV1UsingExternalConfig : constructs an instance of RangeApplicationsV1 with passed in options and external configuration. +func NewRangeApplicationsV1UsingExternalConfig(options *RangeApplicationsV1Options) (rangeApplications *RangeApplicationsV1, err error) { + if options.ServiceName == "" { + options.ServiceName = DefaultServiceName + } + + if options.Authenticator == nil { + options.Authenticator, err = core.GetAuthenticatorFromEnvironment(options.ServiceName) + if err != nil { + return + } + } + + rangeApplications, err = NewRangeApplicationsV1(options) + if err != nil { + return + } + + err = rangeApplications.Service.ConfigureService(options.ServiceName) + if err != nil { + return + } + + if options.URL != "" { + err = rangeApplications.Service.SetServiceURL(options.URL) + } + return +} + +// NewRangeApplicationsV1 : constructs an instance of RangeApplicationsV1 with passed in options. +func NewRangeApplicationsV1(options *RangeApplicationsV1Options) (service *RangeApplicationsV1, err error) { + serviceOptions := &core.ServiceOptions{ + URL: DefaultServiceURL, + Authenticator: options.Authenticator, + } + + err = core.ValidateStruct(options, "options") + if err != nil { + return + } + + baseService, err := core.NewBaseService(serviceOptions) + if err != nil { + return + } + + if options.URL != "" { + err = baseService.SetServiceURL(options.URL) + if err != nil { + return + } + } + + service = &RangeApplicationsV1{ + Service: baseService, + Crn: options.Crn, + ZoneIdentifier: options.ZoneIdentifier, + } + + return +} + +// GetServiceURLForRegion returns the service URL to be used for the specified region +func GetServiceURLForRegion(region string) (string, error) { + return "", fmt.Errorf("service does not support regional URLs") +} + +// Clone makes a copy of "rangeApplications" suitable for processing requests. +func (rangeApplications *RangeApplicationsV1) Clone() *RangeApplicationsV1 { + if core.IsNil(rangeApplications) { + return nil + } + clone := *rangeApplications + clone.Service = rangeApplications.Service.Clone() + return &clone +} + +// SetServiceURL sets the service URL +func (rangeApplications *RangeApplicationsV1) SetServiceURL(url string) error { + return rangeApplications.Service.SetServiceURL(url) +} + +// GetServiceURL returns the service URL +func (rangeApplications *RangeApplicationsV1) GetServiceURL() string { + return rangeApplications.Service.GetServiceURL() +} + +// SetDefaultHeaders sets HTTP headers to be sent in every request +func (rangeApplications *RangeApplicationsV1) SetDefaultHeaders(headers http.Header) { + rangeApplications.Service.SetDefaultHeaders(headers) +} + +// SetEnableGzipCompression sets the service's EnableGzipCompression field +func (rangeApplications *RangeApplicationsV1) SetEnableGzipCompression(enableGzip bool) { + rangeApplications.Service.SetEnableGzipCompression(enableGzip) +} + +// GetEnableGzipCompression returns the service's EnableGzipCompression field +func (rangeApplications *RangeApplicationsV1) GetEnableGzipCompression() bool { + return rangeApplications.Service.GetEnableGzipCompression() +} + +// EnableRetries enables automatic retries for requests invoked for this service instance. +// If either parameter is specified as 0, then a default value is used instead. +func (rangeApplications *RangeApplicationsV1) EnableRetries(maxRetries int, maxRetryInterval time.Duration) { + rangeApplications.Service.EnableRetries(maxRetries, maxRetryInterval) +} + +// DisableRetries disables automatic retries for requests invoked for this service instance. +func (rangeApplications *RangeApplicationsV1) DisableRetries() { + rangeApplications.Service.DisableRetries() +} + +// ListRangeApps : List range applications +// Get a list of currently existing Range Applications inside a zone. +func (rangeApplications *RangeApplicationsV1) ListRangeApps(listRangeAppsOptions *ListRangeAppsOptions) (result *RangeApplications, response *core.DetailedResponse, err error) { + return rangeApplications.ListRangeAppsWithContext(context.Background(), listRangeAppsOptions) +} + +// ListRangeAppsWithContext is an alternate form of the ListRangeApps method which supports a Context parameter +func (rangeApplications *RangeApplicationsV1) ListRangeAppsWithContext(ctx context.Context, listRangeAppsOptions *ListRangeAppsOptions) (result *RangeApplications, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listRangeAppsOptions, "listRangeAppsOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *rangeApplications.Crn, + "zone_identifier": *rangeApplications.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = rangeApplications.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(rangeApplications.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/range/apps`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range listRangeAppsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("range_applications", "V1", "ListRangeApps") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + if listRangeAppsOptions.Page != nil { + builder.AddQuery("page", fmt.Sprint(*listRangeAppsOptions.Page)) + } + if listRangeAppsOptions.PerPage != nil { + builder.AddQuery("per_page", fmt.Sprint(*listRangeAppsOptions.PerPage)) + } + if listRangeAppsOptions.Order != nil { + builder.AddQuery("order", fmt.Sprint(*listRangeAppsOptions.Order)) + } + if listRangeAppsOptions.Direction != nil { + builder.AddQuery("direction", fmt.Sprint(*listRangeAppsOptions.Direction)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = rangeApplications.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalRangeApplications) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateRangeApp : Create Range Application +// Create a Range Applications inside a zone. +func (rangeApplications *RangeApplicationsV1) CreateRangeApp(createRangeAppOptions *CreateRangeAppOptions) (result *RangeApplicationResp, response *core.DetailedResponse, err error) { + return rangeApplications.CreateRangeAppWithContext(context.Background(), createRangeAppOptions) +} + +// CreateRangeAppWithContext is an alternate form of the CreateRangeApp method which supports a Context parameter +func (rangeApplications *RangeApplicationsV1) CreateRangeAppWithContext(ctx context.Context, createRangeAppOptions *CreateRangeAppOptions) (result *RangeApplicationResp, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createRangeAppOptions, "createRangeAppOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createRangeAppOptions, "createRangeAppOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *rangeApplications.Crn, + "zone_identifier": *rangeApplications.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = rangeApplications.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(rangeApplications.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/range/apps`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range createRangeAppOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("range_applications", "V1", "CreateRangeApp") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if createRangeAppOptions.Protocol != nil { + body["protocol"] = createRangeAppOptions.Protocol + } + if createRangeAppOptions.Dns != nil { + body["dns"] = createRangeAppOptions.Dns + } + if createRangeAppOptions.OriginDirect != nil { + body["origin_direct"] = createRangeAppOptions.OriginDirect + } + if createRangeAppOptions.OriginDns != nil { + body["origin_dns"] = createRangeAppOptions.OriginDns + } + if createRangeAppOptions.OriginPort != nil { + body["origin_port"] = createRangeAppOptions.OriginPort + } + if createRangeAppOptions.IpFirewall != nil { + body["ip_firewall"] = createRangeAppOptions.IpFirewall + } + if createRangeAppOptions.ProxyProtocol != nil { + body["proxy_protocol"] = createRangeAppOptions.ProxyProtocol + } + if createRangeAppOptions.EdgeIps != nil { + body["edge_ips"] = createRangeAppOptions.EdgeIps + } + if createRangeAppOptions.TrafficType != nil { + body["traffic_type"] = createRangeAppOptions.TrafficType + } + if createRangeAppOptions.Tls != nil { + body["tls"] = createRangeAppOptions.Tls + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = rangeApplications.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalRangeApplicationResp) + if err != nil { + return + } + response.Result = result + + return +} + +// GetRangeApp : Get range application a zone +// Get the application configuration of a specific application inside a zone. +func (rangeApplications *RangeApplicationsV1) GetRangeApp(getRangeAppOptions *GetRangeAppOptions) (result *RangeApplicationResp, response *core.DetailedResponse, err error) { + return rangeApplications.GetRangeAppWithContext(context.Background(), getRangeAppOptions) +} + +// GetRangeAppWithContext is an alternate form of the GetRangeApp method which supports a Context parameter +func (rangeApplications *RangeApplicationsV1) GetRangeAppWithContext(ctx context.Context, getRangeAppOptions *GetRangeAppOptions) (result *RangeApplicationResp, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getRangeAppOptions, "getRangeAppOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getRangeAppOptions, "getRangeAppOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *rangeApplications.Crn, + "zone_identifier": *rangeApplications.ZoneIdentifier, + "app_identifier": *getRangeAppOptions.AppIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = rangeApplications.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(rangeApplications.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/range/apps/{app_identifier}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getRangeAppOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("range_applications", "V1", "GetRangeApp") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = rangeApplications.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalRangeApplicationResp) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateRangeApp : Update range application +// Update a Range Application inside a zone. +func (rangeApplications *RangeApplicationsV1) UpdateRangeApp(updateRangeAppOptions *UpdateRangeAppOptions) (result *RangeApplicationResp, response *core.DetailedResponse, err error) { + return rangeApplications.UpdateRangeAppWithContext(context.Background(), updateRangeAppOptions) +} + +// UpdateRangeAppWithContext is an alternate form of the UpdateRangeApp method which supports a Context parameter +func (rangeApplications *RangeApplicationsV1) UpdateRangeAppWithContext(ctx context.Context, updateRangeAppOptions *UpdateRangeAppOptions) (result *RangeApplicationResp, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateRangeAppOptions, "updateRangeAppOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateRangeAppOptions, "updateRangeAppOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *rangeApplications.Crn, + "zone_identifier": *rangeApplications.ZoneIdentifier, + "app_identifier": *updateRangeAppOptions.AppIdentifier, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = rangeApplications.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(rangeApplications.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/range/apps/{app_identifier}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateRangeAppOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("range_applications", "V1", "UpdateRangeApp") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if updateRangeAppOptions.Protocol != nil { + body["protocol"] = updateRangeAppOptions.Protocol + } + if updateRangeAppOptions.Dns != nil { + body["dns"] = updateRangeAppOptions.Dns + } + if updateRangeAppOptions.OriginDirect != nil { + body["origin_direct"] = updateRangeAppOptions.OriginDirect + } + if updateRangeAppOptions.OriginDns != nil { + body["origin_dns"] = updateRangeAppOptions.OriginDns + } + if updateRangeAppOptions.OriginPort != nil { + body["origin_port"] = updateRangeAppOptions.OriginPort + } + if updateRangeAppOptions.IpFirewall != nil { + body["ip_firewall"] = updateRangeAppOptions.IpFirewall + } + if updateRangeAppOptions.ProxyProtocol != nil { + body["proxy_protocol"] = updateRangeAppOptions.ProxyProtocol + } + if updateRangeAppOptions.EdgeIps != nil { + body["edge_ips"] = updateRangeAppOptions.EdgeIps + } + if updateRangeAppOptions.TrafficType != nil { + body["traffic_type"] = updateRangeAppOptions.TrafficType + } + if updateRangeAppOptions.Tls != nil { + body["tls"] = updateRangeAppOptions.Tls + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = rangeApplications.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalRangeApplicationResp) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteRangeApp : Delete range application +// Delete a specific application configuration. +func (rangeApplications *RangeApplicationsV1) DeleteRangeApp(deleteRangeAppOptions *DeleteRangeAppOptions) (result *RangeApplicationResp, response *core.DetailedResponse, err error) { + return rangeApplications.DeleteRangeAppWithContext(context.Background(), deleteRangeAppOptions) +} + +// DeleteRangeAppWithContext is an alternate form of the DeleteRangeApp method which supports a Context parameter +func (rangeApplications *RangeApplicationsV1) DeleteRangeAppWithContext(ctx context.Context, deleteRangeAppOptions *DeleteRangeAppOptions) (result *RangeApplicationResp, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteRangeAppOptions, "deleteRangeAppOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteRangeAppOptions, "deleteRangeAppOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *rangeApplications.Crn, + "zone_identifier": *rangeApplications.ZoneIdentifier, + "app_identifier": *deleteRangeAppOptions.AppIdentifier, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = rangeApplications.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(rangeApplications.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/range/apps/{app_identifier}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteRangeAppOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("range_applications", "V1", "DeleteRangeApp") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = rangeApplications.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalRangeApplicationResp) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateRangeAppOptions : The CreateRangeApp options. +type CreateRangeAppOptions struct { + // Defines the protocol and port for this application. + Protocol *string `json:"protocol" validate:"required"` + + // Name and type of the DNS record for this application. + Dns *RangeAppReqDns `json:"dns" validate:"required"` + + // IP address and port of the origin for this Range application. If configuring a load balancer, use 'origin_dns' and + // 'origin_port'. This can not be combined with 'origin_dns' and 'origin_port'. + OriginDirect []string `json:"origin_direct,omitempty"` + + // DNS record pointing to the origin for this Range application. This is used for configuring a load balancer. When + // specifying an individual IP address, use 'origin_direct'. This requires 'origin_port' and can not be combined with + // 'origin_direct'. + OriginDns *RangeAppReqOriginDns `json:"origin_dns,omitempty"` + + // Port at the origin that listens to traffic from this Range application. Requires 'origin_dns' and can not be + // combined with 'origin_direct'. + OriginPort *int64 `json:"origin_port,omitempty"` + + // Enables the IP Firewall for this application. Only available for TCP applications. + IpFirewall *bool `json:"ip_firewall,omitempty"` + + // Allows for the true client IP to be passed to the service. + ProxyProtocol *string `json:"proxy_protocol,omitempty"` + + // Configures IP version for the hostname of this application. Default is {"type":"dynamic", "connectivity":"all"}. + EdgeIps *RangeAppReqEdgeIps `json:"edge_ips,omitempty"` + + // Configure how traffic is handled at the edge. If set to "direct" traffic is passed through to the service. In the + // case of "http" or "https" HTTP/s features at the edge are applied ot this traffic. + TrafficType *string `json:"traffic_type,omitempty"` + + // Configure if and how TLS connections are terminated at the edge. + Tls *string `json:"tls,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the CreateRangeAppOptions.ProxyProtocol property. +// Allows for the true client IP to be passed to the service. +const ( + CreateRangeAppOptions_ProxyProtocol_Off = "off" + CreateRangeAppOptions_ProxyProtocol_Simple = "simple" + CreateRangeAppOptions_ProxyProtocol_V1 = "v1" + CreateRangeAppOptions_ProxyProtocol_V2 = "v2" +) + +// Constants associated with the CreateRangeAppOptions.TrafficType property. +// Configure how traffic is handled at the edge. If set to "direct" traffic is passed through to the service. In the +// case of "http" or "https" HTTP/s features at the edge are applied ot this traffic. +const ( + CreateRangeAppOptions_TrafficType_Direct = "direct" + CreateRangeAppOptions_TrafficType_Http = "http" + CreateRangeAppOptions_TrafficType_Https = "https" +) + +// Constants associated with the CreateRangeAppOptions.Tls property. +// Configure if and how TLS connections are terminated at the edge. +const ( + CreateRangeAppOptions_Tls_Flexible = "flexible" + CreateRangeAppOptions_Tls_Full = "full" + CreateRangeAppOptions_Tls_Off = "off" + CreateRangeAppOptions_Tls_Strict = "strict" +) + +// NewCreateRangeAppOptions : Instantiate CreateRangeAppOptions +func (*RangeApplicationsV1) NewCreateRangeAppOptions(protocol string, dns *RangeAppReqDns) *CreateRangeAppOptions { + return &CreateRangeAppOptions{ + Protocol: core.StringPtr(protocol), + Dns: dns, + } +} + +// SetProtocol : Allow user to set Protocol +func (options *CreateRangeAppOptions) SetProtocol(protocol string) *CreateRangeAppOptions { + options.Protocol = core.StringPtr(protocol) + return options +} + +// SetDns : Allow user to set Dns +func (options *CreateRangeAppOptions) SetDns(dns *RangeAppReqDns) *CreateRangeAppOptions { + options.Dns = dns + return options +} + +// SetOriginDirect : Allow user to set OriginDirect +func (options *CreateRangeAppOptions) SetOriginDirect(originDirect []string) *CreateRangeAppOptions { + options.OriginDirect = originDirect + return options +} + +// SetOriginDns : Allow user to set OriginDns +func (options *CreateRangeAppOptions) SetOriginDns(originDns *RangeAppReqOriginDns) *CreateRangeAppOptions { + options.OriginDns = originDns + return options +} + +// SetOriginPort : Allow user to set OriginPort +func (options *CreateRangeAppOptions) SetOriginPort(originPort int64) *CreateRangeAppOptions { + options.OriginPort = core.Int64Ptr(originPort) + return options +} + +// SetIpFirewall : Allow user to set IpFirewall +func (options *CreateRangeAppOptions) SetIpFirewall(ipFirewall bool) *CreateRangeAppOptions { + options.IpFirewall = core.BoolPtr(ipFirewall) + return options +} + +// SetProxyProtocol : Allow user to set ProxyProtocol +func (options *CreateRangeAppOptions) SetProxyProtocol(proxyProtocol string) *CreateRangeAppOptions { + options.ProxyProtocol = core.StringPtr(proxyProtocol) + return options +} + +// SetEdgeIps : Allow user to set EdgeIps +func (options *CreateRangeAppOptions) SetEdgeIps(edgeIps *RangeAppReqEdgeIps) *CreateRangeAppOptions { + options.EdgeIps = edgeIps + return options +} + +// SetTrafficType : Allow user to set TrafficType +func (options *CreateRangeAppOptions) SetTrafficType(trafficType string) *CreateRangeAppOptions { + options.TrafficType = core.StringPtr(trafficType) + return options +} + +// SetTls : Allow user to set Tls +func (options *CreateRangeAppOptions) SetTls(tls string) *CreateRangeAppOptions { + options.Tls = core.StringPtr(tls) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateRangeAppOptions) SetHeaders(param map[string]string) *CreateRangeAppOptions { + options.Headers = param + return options +} + +// DeleteRangeAppOptions : The DeleteRangeApp options. +type DeleteRangeAppOptions struct { + // application identifier. + AppIdentifier *string `json:"app_identifier" validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteRangeAppOptions : Instantiate DeleteRangeAppOptions +func (*RangeApplicationsV1) NewDeleteRangeAppOptions(appIdentifier string) *DeleteRangeAppOptions { + return &DeleteRangeAppOptions{ + AppIdentifier: core.StringPtr(appIdentifier), + } +} + +// SetAppIdentifier : Allow user to set AppIdentifier +func (options *DeleteRangeAppOptions) SetAppIdentifier(appIdentifier string) *DeleteRangeAppOptions { + options.AppIdentifier = core.StringPtr(appIdentifier) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteRangeAppOptions) SetHeaders(param map[string]string) *DeleteRangeAppOptions { + options.Headers = param + return options +} + +// GetRangeAppOptions : The GetRangeApp options. +type GetRangeAppOptions struct { + // application identifier. + AppIdentifier *string `json:"app_identifier" validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetRangeAppOptions : Instantiate GetRangeAppOptions +func (*RangeApplicationsV1) NewGetRangeAppOptions(appIdentifier string) *GetRangeAppOptions { + return &GetRangeAppOptions{ + AppIdentifier: core.StringPtr(appIdentifier), + } +} + +// SetAppIdentifier : Allow user to set AppIdentifier +func (options *GetRangeAppOptions) SetAppIdentifier(appIdentifier string) *GetRangeAppOptions { + options.AppIdentifier = core.StringPtr(appIdentifier) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetRangeAppOptions) SetHeaders(param map[string]string) *GetRangeAppOptions { + options.Headers = param + return options +} + +// ListRangeAppsOptions : The ListRangeApps options. +type ListRangeAppsOptions struct { + // Page number of paginated results. + Page *int64 `json:"page,omitempty"` + + // Maximum number of Range applications per page. + PerPage *int64 `json:"per_page,omitempty"` + + // Field by which to order the list of Range applications. + Order *string `json:"order,omitempty"` + + // Direction in which to order results [ascending/descending order]. + Direction *string `json:"direction,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the ListRangeAppsOptions.Order property. +// Field by which to order the list of Range applications. +const ( + ListRangeAppsOptions_Order_AppID = "app_id" + ListRangeAppsOptions_Order_CreatedOn = "created_on" + ListRangeAppsOptions_Order_Dns = "dns" + ListRangeAppsOptions_Order_ModifiedOn = "modified_on" + ListRangeAppsOptions_Order_Protocol = "protocol" +) + +// Constants associated with the ListRangeAppsOptions.Direction property. +// Direction in which to order results [ascending/descending order]. +const ( + ListRangeAppsOptions_Direction_Asc = "asc" + ListRangeAppsOptions_Direction_Desc = "desc" +) + +// NewListRangeAppsOptions : Instantiate ListRangeAppsOptions +func (*RangeApplicationsV1) NewListRangeAppsOptions() *ListRangeAppsOptions { + return &ListRangeAppsOptions{} +} + +// SetPage : Allow user to set Page +func (options *ListRangeAppsOptions) SetPage(page int64) *ListRangeAppsOptions { + options.Page = core.Int64Ptr(page) + return options +} + +// SetPerPage : Allow user to set PerPage +func (options *ListRangeAppsOptions) SetPerPage(perPage int64) *ListRangeAppsOptions { + options.PerPage = core.Int64Ptr(perPage) + return options +} + +// SetOrder : Allow user to set Order +func (options *ListRangeAppsOptions) SetOrder(order string) *ListRangeAppsOptions { + options.Order = core.StringPtr(order) + return options +} + +// SetDirection : Allow user to set Direction +func (options *ListRangeAppsOptions) SetDirection(direction string) *ListRangeAppsOptions { + options.Direction = core.StringPtr(direction) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListRangeAppsOptions) SetHeaders(param map[string]string) *ListRangeAppsOptions { + options.Headers = param + return options +} + +// RangeAppReqDns : Name and type of the DNS record for this application. +type RangeAppReqDns struct { + // DNS record type. + Type *string `json:"type,omitempty"` + + // DNS record name. + Name *string `json:"name,omitempty"` +} + + +// UnmarshalRangeAppReqDns unmarshals an instance of RangeAppReqDns from the specified map of raw messages. +func UnmarshalRangeAppReqDns(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(RangeAppReqDns) + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// RangeAppReqEdgeIps : Configures IP version for the hostname of this application. Default is {"type":"dynamic", "connectivity":"all"}. +type RangeAppReqEdgeIps struct { + // The type of edge IP configuration. + Type *string `json:"type,omitempty"` + + // Specifies the IP version (or all). + Connectivity *string `json:"connectivity,omitempty"` +} + +// Constants associated with the RangeAppReqEdgeIps.Type property. +// The type of edge IP configuration. +const ( + RangeAppReqEdgeIps_Type_Dynamic = "dynamic" +) + +// Constants associated with the RangeAppReqEdgeIps.Connectivity property. +// Specifies the IP version (or all). +const ( + RangeAppReqEdgeIps_Connectivity_All = "all" + RangeAppReqEdgeIps_Connectivity_Ipv4 = "ipv4" + RangeAppReqEdgeIps_Connectivity_Ipv6 = "ipv6" +) + + +// UnmarshalRangeAppReqEdgeIps unmarshals an instance of RangeAppReqEdgeIps from the specified map of raw messages. +func UnmarshalRangeAppReqEdgeIps(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(RangeAppReqEdgeIps) + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "connectivity", &obj.Connectivity) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// RangeAppReqOriginDns : DNS record pointing to the origin for this Range application. This is used for configuring a load balancer. When +// specifying an individual IP address, use 'origin_direct'. This requires 'origin_port' and can not be combined with +// 'origin_direct'. +type RangeAppReqOriginDns struct { + // Name of the origin. + Name *string `json:"name" validate:"required"` +} + + +// NewRangeAppReqOriginDns : Instantiate RangeAppReqOriginDns (Generic Model Constructor) +func (*RangeApplicationsV1) NewRangeAppReqOriginDns(name string) (model *RangeAppReqOriginDns, err error) { + model = &RangeAppReqOriginDns{ + Name: core.StringPtr(name), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalRangeAppReqOriginDns unmarshals an instance of RangeAppReqOriginDns from the specified map of raw messages. +func UnmarshalRangeAppReqOriginDns(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(RangeAppReqOriginDns) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// RangeApplicationObjectDns : The name and type of DNS record for the Range application. +type RangeApplicationObjectDns struct { + // The type of DNS record associated with the application. + Type *string `json:"type,omitempty"` + + // The name of the DNS record associated with the application. + Name *string `json:"name,omitempty"` +} + +// Constants associated with the RangeApplicationObjectDns.Type property. +// The type of DNS record associated with the application. +const ( + RangeApplicationObjectDns_Type_Cname = "CNAME" +) + + +// UnmarshalRangeApplicationObjectDns unmarshals an instance of RangeApplicationObjectDns from the specified map of raw messages. +func UnmarshalRangeApplicationObjectDns(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(RangeApplicationObjectDns) + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// RangeApplicationObjectEdgeIps : Configures IP version for the hostname of this application. +type RangeApplicationObjectEdgeIps struct { + // The type of edge IP configuration. + Type *string `json:"type,omitempty"` + + // Specifies the IP version (or all). + Connectivity *string `json:"connectivity,omitempty"` +} + +// Constants associated with the RangeApplicationObjectEdgeIps.Type property. +// The type of edge IP configuration. +const ( + RangeApplicationObjectEdgeIps_Type_Dynamic = "dynamic" +) + +// Constants associated with the RangeApplicationObjectEdgeIps.Connectivity property. +// Specifies the IP version (or all). +const ( + RangeApplicationObjectEdgeIps_Connectivity_All = "all" + RangeApplicationObjectEdgeIps_Connectivity_Ipv4 = "ipv4" + RangeApplicationObjectEdgeIps_Connectivity_Ipv6 = "ipv6" +) + + +// UnmarshalRangeApplicationObjectEdgeIps unmarshals an instance of RangeApplicationObjectEdgeIps from the specified map of raw messages. +func UnmarshalRangeApplicationObjectEdgeIps(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(RangeApplicationObjectEdgeIps) + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "connectivity", &obj.Connectivity) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// UpdateRangeAppOptions : The UpdateRangeApp options. +type UpdateRangeAppOptions struct { + // application identifier. + AppIdentifier *string `json:"app_identifier" validate:"required,ne="` + + // Defines the protocol and port for this application. + Protocol *string `json:"protocol" validate:"required"` + + // Name and type of the DNS record for this application. + Dns *RangeAppReqDns `json:"dns" validate:"required"` + + // IP address and port of the origin for this Range application. If configuring a load balancer, use 'origin_dns' and + // 'origin_port'. This can not be combined with 'origin_dns' and 'origin_port'. + OriginDirect []string `json:"origin_direct,omitempty"` + + // DNS record pointing to the origin for this Range application. This is used for configuring a load balancer. When + // specifying an individual IP address, use 'origin_direct'. This requires 'origin_port' and can not be combined with + // 'origin_direct'. + OriginDns *RangeAppReqOriginDns `json:"origin_dns,omitempty"` + + // Port at the origin that listens to traffic from this Range application. Requires 'origin_dns' and can not be + // combined with 'origin_direct'. + OriginPort *int64 `json:"origin_port,omitempty"` + + // Enables the IP Firewall for this application. Only available for TCP applications. + IpFirewall *bool `json:"ip_firewall,omitempty"` + + // Allows for the true client IP to be passed to the service. + ProxyProtocol *string `json:"proxy_protocol,omitempty"` + + // Configures IP version for the hostname of this application. Default is {"type":"dynamic", "connectivity":"all"}. + EdgeIps *RangeAppReqEdgeIps `json:"edge_ips,omitempty"` + + // Configure how traffic is handled at the edge. If set to "direct" traffic is passed through to the service. In the + // case of "http" or "https" HTTP/s features at the edge are applied ot this traffic. + TrafficType *string `json:"traffic_type,omitempty"` + + // Configure if and how TLS connections are terminated at the edge. + Tls *string `json:"tls,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the UpdateRangeAppOptions.ProxyProtocol property. +// Allows for the true client IP to be passed to the service. +const ( + UpdateRangeAppOptions_ProxyProtocol_Off = "off" + UpdateRangeAppOptions_ProxyProtocol_Simple = "simple" + UpdateRangeAppOptions_ProxyProtocol_V1 = "v1" + UpdateRangeAppOptions_ProxyProtocol_V2 = "v2" +) + +// Constants associated with the UpdateRangeAppOptions.TrafficType property. +// Configure how traffic is handled at the edge. If set to "direct" traffic is passed through to the service. In the +// case of "http" or "https" HTTP/s features at the edge are applied ot this traffic. +const ( + UpdateRangeAppOptions_TrafficType_Direct = "direct" + UpdateRangeAppOptions_TrafficType_Http = "http" + UpdateRangeAppOptions_TrafficType_Https = "https" +) + +// Constants associated with the UpdateRangeAppOptions.Tls property. +// Configure if and how TLS connections are terminated at the edge. +const ( + UpdateRangeAppOptions_Tls_Flexible = "flexible" + UpdateRangeAppOptions_Tls_Full = "full" + UpdateRangeAppOptions_Tls_Off = "off" + UpdateRangeAppOptions_Tls_Strict = "strict" +) + +// NewUpdateRangeAppOptions : Instantiate UpdateRangeAppOptions +func (*RangeApplicationsV1) NewUpdateRangeAppOptions(appIdentifier string, protocol string, dns *RangeAppReqDns) *UpdateRangeAppOptions { + return &UpdateRangeAppOptions{ + AppIdentifier: core.StringPtr(appIdentifier), + Protocol: core.StringPtr(protocol), + Dns: dns, + } +} + +// SetAppIdentifier : Allow user to set AppIdentifier +func (options *UpdateRangeAppOptions) SetAppIdentifier(appIdentifier string) *UpdateRangeAppOptions { + options.AppIdentifier = core.StringPtr(appIdentifier) + return options +} + +// SetProtocol : Allow user to set Protocol +func (options *UpdateRangeAppOptions) SetProtocol(protocol string) *UpdateRangeAppOptions { + options.Protocol = core.StringPtr(protocol) + return options +} + +// SetDns : Allow user to set Dns +func (options *UpdateRangeAppOptions) SetDns(dns *RangeAppReqDns) *UpdateRangeAppOptions { + options.Dns = dns + return options +} + +// SetOriginDirect : Allow user to set OriginDirect +func (options *UpdateRangeAppOptions) SetOriginDirect(originDirect []string) *UpdateRangeAppOptions { + options.OriginDirect = originDirect + return options +} + +// SetOriginDns : Allow user to set OriginDns +func (options *UpdateRangeAppOptions) SetOriginDns(originDns *RangeAppReqOriginDns) *UpdateRangeAppOptions { + options.OriginDns = originDns + return options +} + +// SetOriginPort : Allow user to set OriginPort +func (options *UpdateRangeAppOptions) SetOriginPort(originPort int64) *UpdateRangeAppOptions { + options.OriginPort = core.Int64Ptr(originPort) + return options +} + +// SetIpFirewall : Allow user to set IpFirewall +func (options *UpdateRangeAppOptions) SetIpFirewall(ipFirewall bool) *UpdateRangeAppOptions { + options.IpFirewall = core.BoolPtr(ipFirewall) + return options +} + +// SetProxyProtocol : Allow user to set ProxyProtocol +func (options *UpdateRangeAppOptions) SetProxyProtocol(proxyProtocol string) *UpdateRangeAppOptions { + options.ProxyProtocol = core.StringPtr(proxyProtocol) + return options +} + +// SetEdgeIps : Allow user to set EdgeIps +func (options *UpdateRangeAppOptions) SetEdgeIps(edgeIps *RangeAppReqEdgeIps) *UpdateRangeAppOptions { + options.EdgeIps = edgeIps + return options +} + +// SetTrafficType : Allow user to set TrafficType +func (options *UpdateRangeAppOptions) SetTrafficType(trafficType string) *UpdateRangeAppOptions { + options.TrafficType = core.StringPtr(trafficType) + return options +} + +// SetTls : Allow user to set Tls +func (options *UpdateRangeAppOptions) SetTls(tls string) *UpdateRangeAppOptions { + options.Tls = core.StringPtr(tls) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateRangeAppOptions) SetHeaders(param map[string]string) *UpdateRangeAppOptions { + options.Headers = param + return options +} + +// RangeApplicationObject : range application object. +type RangeApplicationObject struct { + // Application identifier. + ID *string `json:"id,omitempty"` + + // Port configuration. + Protocol *string `json:"protocol,omitempty"` + + // The name and type of DNS record for the Range application. + Dns *RangeApplicationObjectDns `json:"dns,omitempty"` + + // A list of destination addresses to the origin. + OriginDirect []string `json:"origin_direct,omitempty"` + + // Enables the IP Firewall for this application. + IpFirewall *bool `json:"ip_firewall,omitempty"` + + // Allows for the true client IP to be passed to the service. + ProxyProtocol *string `json:"proxy_protocol,omitempty"` + + // Configures IP version for the hostname of this application. + EdgeIps *RangeApplicationObjectEdgeIps `json:"edge_ips,omitempty"` + + // Specifies the TLS termination at the edge. + Tls *string `json:"tls,omitempty"` + + // Configure how traffic is handled at the edge. If set to "direct" traffic is passed through to the service. In the + // case of "http" or "https" HTTP/s features at the edge are applied ot this traffic. + TrafficType *string `json:"traffic_type,omitempty"` + + // When the Application was created. + CreatedOn *strfmt.DateTime `json:"created_on,omitempty"` + + // When the Application was last modified. + ModifiedOn *strfmt.DateTime `json:"modified_on,omitempty"` +} + +// Constants associated with the RangeApplicationObject.ProxyProtocol property. +// Allows for the true client IP to be passed to the service. +const ( + RangeApplicationObject_ProxyProtocol_Off = "off" + RangeApplicationObject_ProxyProtocol_Simple = "simple" + RangeApplicationObject_ProxyProtocol_V1 = "v1" + RangeApplicationObject_ProxyProtocol_V2 = "v2" +) + +// Constants associated with the RangeApplicationObject.Tls property. +// Specifies the TLS termination at the edge. +const ( + RangeApplicationObject_Tls_Flexible = "flexible" + RangeApplicationObject_Tls_Full = "full" + RangeApplicationObject_Tls_Off = "off" + RangeApplicationObject_Tls_Strict = "strict" +) + +// Constants associated with the RangeApplicationObject.TrafficType property. +// Configure how traffic is handled at the edge. If set to "direct" traffic is passed through to the service. In the +// case of "http" or "https" HTTP/s features at the edge are applied ot this traffic. +const ( + RangeApplicationObject_TrafficType_Direct = "direct" + RangeApplicationObject_TrafficType_Http = "http" + RangeApplicationObject_TrafficType_Https = "https" +) + + +// UnmarshalRangeApplicationObject unmarshals an instance of RangeApplicationObject from the specified map of raw messages. +func UnmarshalRangeApplicationObject(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(RangeApplicationObject) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "protocol", &obj.Protocol) + if err != nil { + return + } + err = core.UnmarshalModel(m, "dns", &obj.Dns, UnmarshalRangeApplicationObjectDns) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "origin_direct", &obj.OriginDirect) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "ip_firewall", &obj.IpFirewall) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "proxy_protocol", &obj.ProxyProtocol) + if err != nil { + return + } + err = core.UnmarshalModel(m, "edge_ips", &obj.EdgeIps, UnmarshalRangeApplicationObjectEdgeIps) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "tls", &obj.Tls) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "traffic_type", &obj.TrafficType) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_on", &obj.CreatedOn) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "modified_on", &obj.ModifiedOn) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// RangeApplicationResp : range application response. +type RangeApplicationResp struct { + // Was the get successful. + Success *bool `json:"success" validate:"required"` + + // errors. + Errors [][]string `json:"errors" validate:"required"` + + // messages. + Messages [][]string `json:"messages" validate:"required"` + + // range application object. + Result *RangeApplicationObject `json:"result" validate:"required"` +} + + +// UnmarshalRangeApplicationResp unmarshals an instance of RangeApplicationResp from the specified map of raw messages. +func UnmarshalRangeApplicationResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(RangeApplicationResp) + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalRangeApplicationObject) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// RangeApplications : range application. +type RangeApplications struct { + // Was the get successful. + Success *bool `json:"success" validate:"required"` + + // errors. + Errors [][]string `json:"errors" validate:"required"` + + // messages. + Messages [][]string `json:"messages" validate:"required"` + + // Container for Range application objects. + Result []RangeApplicationObject `json:"result" validate:"required"` +} + + +// UnmarshalRangeApplications unmarshals an instance of RangeApplications from the specified map of raw messages. +func UnmarshalRangeApplications(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(RangeApplications) + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalRangeApplicationObject) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} diff --git a/vendor/github.com/IBM/networking-go-sdk/routingv1/routing_v1.go b/vendor/github.com/IBM/networking-go-sdk/routingv1/routing_v1.go new file mode 100644 index 00000000000..c917a78ebda --- /dev/null +++ b/vendor/github.com/IBM/networking-go-sdk/routingv1/routing_v1.go @@ -0,0 +1,429 @@ +/** + * (C) Copyright IBM Corp. 2020. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * IBM OpenAPI SDK Code Generator Version: 3.20.0-debb9f29-20201203-202043 + */ + + +// Package routingv1 : Operations and models for the RoutingV1 service +package routingv1 + +import ( + "context" + "encoding/json" + "fmt" + "github.com/IBM/go-sdk-core/v4/core" + common "github.com/IBM/networking-go-sdk/common" + "github.com/go-openapi/strfmt" + "net/http" + "reflect" + "time" +) + +// RoutingV1 : Routing +// +// Version: 1.0.1 +type RoutingV1 struct { + Service *core.BaseService + + // Full url-encoded cloud resource name (CRN) of resource instance. + Crn *string + + // Zone identifier. + ZoneIdentifier *string +} + +// DefaultServiceURL is the default URL to make service requests to. +const DefaultServiceURL = "https://api.cis.cloud.ibm.com" + +// DefaultServiceName is the default key used to find external configuration information. +const DefaultServiceName = "routing" + +// RoutingV1Options : Service options +type RoutingV1Options struct { + ServiceName string + URL string + Authenticator core.Authenticator + + // Full url-encoded cloud resource name (CRN) of resource instance. + Crn *string `validate:"required"` + + // Zone identifier. + ZoneIdentifier *string `validate:"required"` +} + +// NewRoutingV1UsingExternalConfig : constructs an instance of RoutingV1 with passed in options and external configuration. +func NewRoutingV1UsingExternalConfig(options *RoutingV1Options) (routing *RoutingV1, err error) { + if options.ServiceName == "" { + options.ServiceName = DefaultServiceName + } + + if options.Authenticator == nil { + options.Authenticator, err = core.GetAuthenticatorFromEnvironment(options.ServiceName) + if err != nil { + return + } + } + + routing, err = NewRoutingV1(options) + if err != nil { + return + } + + err = routing.Service.ConfigureService(options.ServiceName) + if err != nil { + return + } + + if options.URL != "" { + err = routing.Service.SetServiceURL(options.URL) + } + return +} + +// NewRoutingV1 : constructs an instance of RoutingV1 with passed in options. +func NewRoutingV1(options *RoutingV1Options) (service *RoutingV1, err error) { + serviceOptions := &core.ServiceOptions{ + URL: DefaultServiceURL, + Authenticator: options.Authenticator, + } + + err = core.ValidateStruct(options, "options") + if err != nil { + return + } + + baseService, err := core.NewBaseService(serviceOptions) + if err != nil { + return + } + + if options.URL != "" { + err = baseService.SetServiceURL(options.URL) + if err != nil { + return + } + } + + service = &RoutingV1{ + Service: baseService, + Crn: options.Crn, + ZoneIdentifier: options.ZoneIdentifier, + } + + return +} + +// GetServiceURLForRegion returns the service URL to be used for the specified region +func GetServiceURLForRegion(region string) (string, error) { + return "", fmt.Errorf("service does not support regional URLs") +} + +// Clone makes a copy of "routing" suitable for processing requests. +func (routing *RoutingV1) Clone() *RoutingV1 { + if core.IsNil(routing) { + return nil + } + clone := *routing + clone.Service = routing.Service.Clone() + return &clone +} + +// SetServiceURL sets the service URL +func (routing *RoutingV1) SetServiceURL(url string) error { + return routing.Service.SetServiceURL(url) +} + +// GetServiceURL returns the service URL +func (routing *RoutingV1) GetServiceURL() string { + return routing.Service.GetServiceURL() +} + +// SetDefaultHeaders sets HTTP headers to be sent in every request +func (routing *RoutingV1) SetDefaultHeaders(headers http.Header) { + routing.Service.SetDefaultHeaders(headers) +} + +// SetEnableGzipCompression sets the service's EnableGzipCompression field +func (routing *RoutingV1) SetEnableGzipCompression(enableGzip bool) { + routing.Service.SetEnableGzipCompression(enableGzip) +} + +// GetEnableGzipCompression returns the service's EnableGzipCompression field +func (routing *RoutingV1) GetEnableGzipCompression() bool { + return routing.Service.GetEnableGzipCompression() +} + +// EnableRetries enables automatic retries for requests invoked for this service instance. +// If either parameter is specified as 0, then a default value is used instead. +func (routing *RoutingV1) EnableRetries(maxRetries int, maxRetryInterval time.Duration) { + routing.Service.EnableRetries(maxRetries, maxRetryInterval) +} + +// DisableRetries disables automatic retries for requests invoked for this service instance. +func (routing *RoutingV1) DisableRetries() { + routing.Service.DisableRetries() +} + +// GetSmartRouting : Get Routing feature smart routing setting +// Get Routing feature smart routing setting for a zone. +func (routing *RoutingV1) GetSmartRouting(getSmartRoutingOptions *GetSmartRoutingOptions) (result *SmartRoutingResp, response *core.DetailedResponse, err error) { + return routing.GetSmartRoutingWithContext(context.Background(), getSmartRoutingOptions) +} + +// GetSmartRoutingWithContext is an alternate form of the GetSmartRouting method which supports a Context parameter +func (routing *RoutingV1) GetSmartRoutingWithContext(ctx context.Context, getSmartRoutingOptions *GetSmartRoutingOptions) (result *SmartRoutingResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(getSmartRoutingOptions, "getSmartRoutingOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *routing.Crn, + "zone_identifier": *routing.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = routing.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(routing.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/routing/smart_routing`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getSmartRoutingOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("routing", "V1", "GetSmartRouting") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = routing.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSmartRoutingResp) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateSmartRouting : Update Routing feature smart route setting +// Update Routing feature smart route setting for a zone. +func (routing *RoutingV1) UpdateSmartRouting(updateSmartRoutingOptions *UpdateSmartRoutingOptions) (result *SmartRoutingResp, response *core.DetailedResponse, err error) { + return routing.UpdateSmartRoutingWithContext(context.Background(), updateSmartRoutingOptions) +} + +// UpdateSmartRoutingWithContext is an alternate form of the UpdateSmartRouting method which supports a Context parameter +func (routing *RoutingV1) UpdateSmartRoutingWithContext(ctx context.Context, updateSmartRoutingOptions *UpdateSmartRoutingOptions) (result *SmartRoutingResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(updateSmartRoutingOptions, "updateSmartRoutingOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *routing.Crn, + "zone_identifier": *routing.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = routing.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(routing.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/routing/smart_routing`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateSmartRoutingOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("routing", "V1", "UpdateSmartRouting") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if updateSmartRoutingOptions.Value != nil { + body["value"] = updateSmartRoutingOptions.Value + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = routing.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSmartRoutingResp) + if err != nil { + return + } + response.Result = result + + return +} + +// GetSmartRoutingOptions : The GetSmartRouting options. +type GetSmartRoutingOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetSmartRoutingOptions : Instantiate GetSmartRoutingOptions +func (*RoutingV1) NewGetSmartRoutingOptions() *GetSmartRoutingOptions { + return &GetSmartRoutingOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *GetSmartRoutingOptions) SetHeaders(param map[string]string) *GetSmartRoutingOptions { + options.Headers = param + return options +} + +// SmartRoutingRespResult : Container for response information. +type SmartRoutingRespResult struct { + // ID. + ID *string `json:"id" validate:"required"` + + // Value. + Value *string `json:"value" validate:"required"` + + // Editable. + Editable *bool `json:"editable" validate:"required"` + + // Modified date. + ModifiedOn *strfmt.DateTime `json:"modified_on" validate:"required"` +} + + +// UnmarshalSmartRoutingRespResult unmarshals an instance of SmartRoutingRespResult from the specified map of raw messages. +func UnmarshalSmartRoutingRespResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SmartRoutingRespResult) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "editable", &obj.Editable) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "modified_on", &obj.ModifiedOn) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// UpdateSmartRoutingOptions : The UpdateSmartRouting options. +type UpdateSmartRoutingOptions struct { + // Value. + Value *string `json:"value,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the UpdateSmartRoutingOptions.Value property. +// Value. +const ( + UpdateSmartRoutingOptions_Value_Off = "off" + UpdateSmartRoutingOptions_Value_On = "on" +) + +// NewUpdateSmartRoutingOptions : Instantiate UpdateSmartRoutingOptions +func (*RoutingV1) NewUpdateSmartRoutingOptions() *UpdateSmartRoutingOptions { + return &UpdateSmartRoutingOptions{} +} + +// SetValue : Allow user to set Value +func (options *UpdateSmartRoutingOptions) SetValue(value string) *UpdateSmartRoutingOptions { + options.Value = core.StringPtr(value) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateSmartRoutingOptions) SetHeaders(param map[string]string) *UpdateSmartRoutingOptions { + options.Headers = param + return options +} + +// SmartRoutingResp : smart routing response. +type SmartRoutingResp struct { + // Container for response information. + Result *SmartRoutingRespResult `json:"result" validate:"required"` + + // Was the get successful. + Success *bool `json:"success" validate:"required"` + + // Array of errors encountered. + Errors [][]string `json:"errors" validate:"required"` + + // Array of messages returned. + Messages [][]string `json:"messages" validate:"required"` +} + + +// UnmarshalSmartRoutingResp unmarshals an instance of SmartRoutingResp from the specified map of raw messages. +func UnmarshalSmartRoutingResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SmartRoutingResp) + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalSmartRoutingRespResult) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} diff --git a/vendor/github.com/IBM/networking-go-sdk/sslcertificateapiv1/ssl_certificate_api_v1.go b/vendor/github.com/IBM/networking-go-sdk/sslcertificateapiv1/ssl_certificate_api_v1.go new file mode 100644 index 00000000000..2399e5bfa66 --- /dev/null +++ b/vendor/github.com/IBM/networking-go-sdk/sslcertificateapiv1/ssl_certificate_api_v1.go @@ -0,0 +1,2533 @@ +/** + * (C) Copyright IBM Corp. 2020. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * IBM OpenAPI SDK Code Generator Version: 3.20.0-debb9f29-20201203-202043 + */ + + +// Package sslcertificateapiv1 : Operations and models for the SslCertificateApiV1 service +package sslcertificateapiv1 + +import ( + "context" + "encoding/json" + "fmt" + "github.com/IBM/go-sdk-core/v4/core" + common "github.com/IBM/networking-go-sdk/common" + "github.com/go-openapi/strfmt" + "net/http" + "reflect" + "time" +) + +// SslCertificateApiV1 : SSL Certificate +// +// Version: 1.0.0 +type SslCertificateApiV1 struct { + Service *core.BaseService + + // cloud resource name. + Crn *string + + // zone identifier. + ZoneIdentifier *string +} + +// DefaultServiceURL is the default URL to make service requests to. +const DefaultServiceURL = "https://api.cis.cloud.ibm.com" + +// DefaultServiceName is the default key used to find external configuration information. +const DefaultServiceName = "ssl_certificate_api" + +// SslCertificateApiV1Options : Service options +type SslCertificateApiV1Options struct { + ServiceName string + URL string + Authenticator core.Authenticator + + // cloud resource name. + Crn *string `validate:"required"` + + // zone identifier. + ZoneIdentifier *string `validate:"required"` +} + +// NewSslCertificateApiV1UsingExternalConfig : constructs an instance of SslCertificateApiV1 with passed in options and external configuration. +func NewSslCertificateApiV1UsingExternalConfig(options *SslCertificateApiV1Options) (sslCertificateApi *SslCertificateApiV1, err error) { + if options.ServiceName == "" { + options.ServiceName = DefaultServiceName + } + + if options.Authenticator == nil { + options.Authenticator, err = core.GetAuthenticatorFromEnvironment(options.ServiceName) + if err != nil { + return + } + } + + sslCertificateApi, err = NewSslCertificateApiV1(options) + if err != nil { + return + } + + err = sslCertificateApi.Service.ConfigureService(options.ServiceName) + if err != nil { + return + } + + if options.URL != "" { + err = sslCertificateApi.Service.SetServiceURL(options.URL) + } + return +} + +// NewSslCertificateApiV1 : constructs an instance of SslCertificateApiV1 with passed in options. +func NewSslCertificateApiV1(options *SslCertificateApiV1Options) (service *SslCertificateApiV1, err error) { + serviceOptions := &core.ServiceOptions{ + URL: DefaultServiceURL, + Authenticator: options.Authenticator, + } + + err = core.ValidateStruct(options, "options") + if err != nil { + return + } + + baseService, err := core.NewBaseService(serviceOptions) + if err != nil { + return + } + + if options.URL != "" { + err = baseService.SetServiceURL(options.URL) + if err != nil { + return + } + } + + service = &SslCertificateApiV1{ + Service: baseService, + Crn: options.Crn, + ZoneIdentifier: options.ZoneIdentifier, + } + + return +} + +// GetServiceURLForRegion returns the service URL to be used for the specified region +func GetServiceURLForRegion(region string) (string, error) { + return "", fmt.Errorf("service does not support regional URLs") +} + +// Clone makes a copy of "sslCertificateApi" suitable for processing requests. +func (sslCertificateApi *SslCertificateApiV1) Clone() *SslCertificateApiV1 { + if core.IsNil(sslCertificateApi) { + return nil + } + clone := *sslCertificateApi + clone.Service = sslCertificateApi.Service.Clone() + return &clone +} + +// SetServiceURL sets the service URL +func (sslCertificateApi *SslCertificateApiV1) SetServiceURL(url string) error { + return sslCertificateApi.Service.SetServiceURL(url) +} + +// GetServiceURL returns the service URL +func (sslCertificateApi *SslCertificateApiV1) GetServiceURL() string { + return sslCertificateApi.Service.GetServiceURL() +} + +// SetDefaultHeaders sets HTTP headers to be sent in every request +func (sslCertificateApi *SslCertificateApiV1) SetDefaultHeaders(headers http.Header) { + sslCertificateApi.Service.SetDefaultHeaders(headers) +} + +// SetEnableGzipCompression sets the service's EnableGzipCompression field +func (sslCertificateApi *SslCertificateApiV1) SetEnableGzipCompression(enableGzip bool) { + sslCertificateApi.Service.SetEnableGzipCompression(enableGzip) +} + +// GetEnableGzipCompression returns the service's EnableGzipCompression field +func (sslCertificateApi *SslCertificateApiV1) GetEnableGzipCompression() bool { + return sslCertificateApi.Service.GetEnableGzipCompression() +} + +// EnableRetries enables automatic retries for requests invoked for this service instance. +// If either parameter is specified as 0, then a default value is used instead. +func (sslCertificateApi *SslCertificateApiV1) EnableRetries(maxRetries int, maxRetryInterval time.Duration) { + sslCertificateApi.Service.EnableRetries(maxRetries, maxRetryInterval) +} + +// DisableRetries disables automatic retries for requests invoked for this service instance. +func (sslCertificateApi *SslCertificateApiV1) DisableRetries() { + sslCertificateApi.Service.DisableRetries() +} + +// ListCertificates : List all certificates +// CIS automatically add an active DNS zone to a universal SSL certificate, shared among multiple customers. Customer +// may order dedicated certificates for the owning zones. This API list all certificates for a given zone, including +// shared and dedicated certificates. +func (sslCertificateApi *SslCertificateApiV1) ListCertificates(listCertificatesOptions *ListCertificatesOptions) (result *ListCertificateResp, response *core.DetailedResponse, err error) { + return sslCertificateApi.ListCertificatesWithContext(context.Background(), listCertificatesOptions) +} + +// ListCertificatesWithContext is an alternate form of the ListCertificates method which supports a Context parameter +func (sslCertificateApi *SslCertificateApiV1) ListCertificatesWithContext(ctx context.Context, listCertificatesOptions *ListCertificatesOptions) (result *ListCertificateResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listCertificatesOptions, "listCertificatesOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *sslCertificateApi.Crn, + "zone_identifier": *sslCertificateApi.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = sslCertificateApi.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(sslCertificateApi.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/ssl/certificate_packs`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range listCertificatesOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("ssl_certificate_api", "V1", "ListCertificates") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + if listCertificatesOptions.XCorrelationID != nil { + builder.AddHeader("X-Correlation-ID", fmt.Sprint(*listCertificatesOptions.XCorrelationID)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = sslCertificateApi.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalListCertificateResp) + if err != nil { + return + } + response.Result = result + + return +} + +// OrderCertificate : Order dedicated certificate +// Order a dedicated certificate for a given zone. The zone should be active before placing an order of a dedicated +// certificate. +func (sslCertificateApi *SslCertificateApiV1) OrderCertificate(orderCertificateOptions *OrderCertificateOptions) (result *DedicatedCertificateResp, response *core.DetailedResponse, err error) { + return sslCertificateApi.OrderCertificateWithContext(context.Background(), orderCertificateOptions) +} + +// OrderCertificateWithContext is an alternate form of the OrderCertificate method which supports a Context parameter +func (sslCertificateApi *SslCertificateApiV1) OrderCertificateWithContext(ctx context.Context, orderCertificateOptions *OrderCertificateOptions) (result *DedicatedCertificateResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(orderCertificateOptions, "orderCertificateOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *sslCertificateApi.Crn, + "zone_identifier": *sslCertificateApi.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = sslCertificateApi.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(sslCertificateApi.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/ssl/certificate_packs`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range orderCertificateOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("ssl_certificate_api", "V1", "OrderCertificate") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + if orderCertificateOptions.XCorrelationID != nil { + builder.AddHeader("X-Correlation-ID", fmt.Sprint(*orderCertificateOptions.XCorrelationID)) + } + + body := make(map[string]interface{}) + if orderCertificateOptions.Type != nil { + body["type"] = orderCertificateOptions.Type + } + if orderCertificateOptions.Hosts != nil { + body["hosts"] = orderCertificateOptions.Hosts + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = sslCertificateApi.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalDedicatedCertificateResp) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteCertificate : Delete a certificate +// Delete a given certificate. +func (sslCertificateApi *SslCertificateApiV1) DeleteCertificate(deleteCertificateOptions *DeleteCertificateOptions) (response *core.DetailedResponse, err error) { + return sslCertificateApi.DeleteCertificateWithContext(context.Background(), deleteCertificateOptions) +} + +// DeleteCertificateWithContext is an alternate form of the DeleteCertificate method which supports a Context parameter +func (sslCertificateApi *SslCertificateApiV1) DeleteCertificateWithContext(ctx context.Context, deleteCertificateOptions *DeleteCertificateOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteCertificateOptions, "deleteCertificateOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteCertificateOptions, "deleteCertificateOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *sslCertificateApi.Crn, + "zone_identifier": *sslCertificateApi.ZoneIdentifier, + "cert_identifier": *deleteCertificateOptions.CertIdentifier, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = sslCertificateApi.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(sslCertificateApi.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/ssl/certificate_packs/{cert_identifier}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteCertificateOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("ssl_certificate_api", "V1", "DeleteCertificate") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + if deleteCertificateOptions.XCorrelationID != nil { + builder.AddHeader("X-Correlation-ID", fmt.Sprint(*deleteCertificateOptions.XCorrelationID)) + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = sslCertificateApi.Service.Request(request, nil) + + return +} + +// GetSslSetting : Get SSL setting +// For a given zone identifier, get SSL setting. +func (sslCertificateApi *SslCertificateApiV1) GetSslSetting(getSslSettingOptions *GetSslSettingOptions) (result *SslSettingResp, response *core.DetailedResponse, err error) { + return sslCertificateApi.GetSslSettingWithContext(context.Background(), getSslSettingOptions) +} + +// GetSslSettingWithContext is an alternate form of the GetSslSetting method which supports a Context parameter +func (sslCertificateApi *SslCertificateApiV1) GetSslSettingWithContext(ctx context.Context, getSslSettingOptions *GetSslSettingOptions) (result *SslSettingResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(getSslSettingOptions, "getSslSettingOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *sslCertificateApi.Crn, + "zone_identifier": *sslCertificateApi.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = sslCertificateApi.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(sslCertificateApi.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/settings/ssl`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getSslSettingOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("ssl_certificate_api", "V1", "GetSslSetting") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = sslCertificateApi.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSslSettingResp) + if err != nil { + return + } + response.Result = result + + return +} + +// ChangeSslSetting : Change SSL setting +// For a given zone identifier, change SSL setting. +func (sslCertificateApi *SslCertificateApiV1) ChangeSslSetting(changeSslSettingOptions *ChangeSslSettingOptions) (result *SslSettingResp, response *core.DetailedResponse, err error) { + return sslCertificateApi.ChangeSslSettingWithContext(context.Background(), changeSslSettingOptions) +} + +// ChangeSslSettingWithContext is an alternate form of the ChangeSslSetting method which supports a Context parameter +func (sslCertificateApi *SslCertificateApiV1) ChangeSslSettingWithContext(ctx context.Context, changeSslSettingOptions *ChangeSslSettingOptions) (result *SslSettingResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(changeSslSettingOptions, "changeSslSettingOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *sslCertificateApi.Crn, + "zone_identifier": *sslCertificateApi.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = sslCertificateApi.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(sslCertificateApi.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/settings/ssl`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range changeSslSettingOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("ssl_certificate_api", "V1", "ChangeSslSetting") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if changeSslSettingOptions.Value != nil { + body["value"] = changeSslSettingOptions.Value + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = sslCertificateApi.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSslSettingResp) + if err != nil { + return + } + response.Result = result + + return +} + +// ListCustomCertificates : List all custom certificates +// For a given zone identifier, list all custom certificates. +func (sslCertificateApi *SslCertificateApiV1) ListCustomCertificates(listCustomCertificatesOptions *ListCustomCertificatesOptions) (result *ListCustomCertsResp, response *core.DetailedResponse, err error) { + return sslCertificateApi.ListCustomCertificatesWithContext(context.Background(), listCustomCertificatesOptions) +} + +// ListCustomCertificatesWithContext is an alternate form of the ListCustomCertificates method which supports a Context parameter +func (sslCertificateApi *SslCertificateApiV1) ListCustomCertificatesWithContext(ctx context.Context, listCustomCertificatesOptions *ListCustomCertificatesOptions) (result *ListCustomCertsResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listCustomCertificatesOptions, "listCustomCertificatesOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *sslCertificateApi.Crn, + "zone_identifier": *sslCertificateApi.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = sslCertificateApi.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(sslCertificateApi.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/custom_certificates`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range listCustomCertificatesOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("ssl_certificate_api", "V1", "ListCustomCertificates") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = sslCertificateApi.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalListCustomCertsResp) + if err != nil { + return + } + response.Result = result + + return +} + +// UploadCustomCertificate : Upload a custom certificate +// For a given zone identifier, upload a custom certificates. +func (sslCertificateApi *SslCertificateApiV1) UploadCustomCertificate(uploadCustomCertificateOptions *UploadCustomCertificateOptions) (result *CustomCertResp, response *core.DetailedResponse, err error) { + return sslCertificateApi.UploadCustomCertificateWithContext(context.Background(), uploadCustomCertificateOptions) +} + +// UploadCustomCertificateWithContext is an alternate form of the UploadCustomCertificate method which supports a Context parameter +func (sslCertificateApi *SslCertificateApiV1) UploadCustomCertificateWithContext(ctx context.Context, uploadCustomCertificateOptions *UploadCustomCertificateOptions) (result *CustomCertResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(uploadCustomCertificateOptions, "uploadCustomCertificateOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *sslCertificateApi.Crn, + "zone_identifier": *sslCertificateApi.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = sslCertificateApi.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(sslCertificateApi.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/custom_certificates`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range uploadCustomCertificateOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("ssl_certificate_api", "V1", "UploadCustomCertificate") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if uploadCustomCertificateOptions.Certificate != nil { + body["certificate"] = uploadCustomCertificateOptions.Certificate + } + if uploadCustomCertificateOptions.PrivateKey != nil { + body["private_key"] = uploadCustomCertificateOptions.PrivateKey + } + if uploadCustomCertificateOptions.BundleMethod != nil { + body["bundle_method"] = uploadCustomCertificateOptions.BundleMethod + } + if uploadCustomCertificateOptions.GeoRestrictions != nil { + body["geo_restrictions"] = uploadCustomCertificateOptions.GeoRestrictions + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = sslCertificateApi.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalCustomCertResp) + if err != nil { + return + } + response.Result = result + + return +} + +// GetCustomCertificate : Get custom certificate +// For a given zone identifier, get a custom certificates. +func (sslCertificateApi *SslCertificateApiV1) GetCustomCertificate(getCustomCertificateOptions *GetCustomCertificateOptions) (result *CustomCertResp, response *core.DetailedResponse, err error) { + return sslCertificateApi.GetCustomCertificateWithContext(context.Background(), getCustomCertificateOptions) +} + +// GetCustomCertificateWithContext is an alternate form of the GetCustomCertificate method which supports a Context parameter +func (sslCertificateApi *SslCertificateApiV1) GetCustomCertificateWithContext(ctx context.Context, getCustomCertificateOptions *GetCustomCertificateOptions) (result *CustomCertResp, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getCustomCertificateOptions, "getCustomCertificateOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getCustomCertificateOptions, "getCustomCertificateOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *sslCertificateApi.Crn, + "zone_identifier": *sslCertificateApi.ZoneIdentifier, + "custom_cert_id": *getCustomCertificateOptions.CustomCertID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = sslCertificateApi.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(sslCertificateApi.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/custom_certificates/{custom_cert_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getCustomCertificateOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("ssl_certificate_api", "V1", "GetCustomCertificate") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = sslCertificateApi.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalCustomCertResp) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateCustomCertificate : Update custom certificate +// For a given zone identifier, update a custom certificates. +func (sslCertificateApi *SslCertificateApiV1) UpdateCustomCertificate(updateCustomCertificateOptions *UpdateCustomCertificateOptions) (result *CustomCertResp, response *core.DetailedResponse, err error) { + return sslCertificateApi.UpdateCustomCertificateWithContext(context.Background(), updateCustomCertificateOptions) +} + +// UpdateCustomCertificateWithContext is an alternate form of the UpdateCustomCertificate method which supports a Context parameter +func (sslCertificateApi *SslCertificateApiV1) UpdateCustomCertificateWithContext(ctx context.Context, updateCustomCertificateOptions *UpdateCustomCertificateOptions) (result *CustomCertResp, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateCustomCertificateOptions, "updateCustomCertificateOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateCustomCertificateOptions, "updateCustomCertificateOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *sslCertificateApi.Crn, + "zone_identifier": *sslCertificateApi.ZoneIdentifier, + "custom_cert_id": *updateCustomCertificateOptions.CustomCertID, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = sslCertificateApi.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(sslCertificateApi.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/custom_certificates/{custom_cert_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateCustomCertificateOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("ssl_certificate_api", "V1", "UpdateCustomCertificate") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if updateCustomCertificateOptions.Certificate != nil { + body["certificate"] = updateCustomCertificateOptions.Certificate + } + if updateCustomCertificateOptions.PrivateKey != nil { + body["private_key"] = updateCustomCertificateOptions.PrivateKey + } + if updateCustomCertificateOptions.BundleMethod != nil { + body["bundle_method"] = updateCustomCertificateOptions.BundleMethod + } + if updateCustomCertificateOptions.GeoRestrictions != nil { + body["geo_restrictions"] = updateCustomCertificateOptions.GeoRestrictions + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = sslCertificateApi.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalCustomCertResp) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteCustomCertificate : Delete custom certificate +// For a given zone identifier, delete a custom certificates. +func (sslCertificateApi *SslCertificateApiV1) DeleteCustomCertificate(deleteCustomCertificateOptions *DeleteCustomCertificateOptions) (response *core.DetailedResponse, err error) { + return sslCertificateApi.DeleteCustomCertificateWithContext(context.Background(), deleteCustomCertificateOptions) +} + +// DeleteCustomCertificateWithContext is an alternate form of the DeleteCustomCertificate method which supports a Context parameter +func (sslCertificateApi *SslCertificateApiV1) DeleteCustomCertificateWithContext(ctx context.Context, deleteCustomCertificateOptions *DeleteCustomCertificateOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteCustomCertificateOptions, "deleteCustomCertificateOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteCustomCertificateOptions, "deleteCustomCertificateOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *sslCertificateApi.Crn, + "zone_identifier": *sslCertificateApi.ZoneIdentifier, + "custom_cert_id": *deleteCustomCertificateOptions.CustomCertID, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = sslCertificateApi.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(sslCertificateApi.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/custom_certificates/{custom_cert_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteCustomCertificateOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("ssl_certificate_api", "V1", "DeleteCustomCertificate") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = sslCertificateApi.Service.Request(request, nil) + + return +} + +// ChangeCertificatePriority : Set certificate priority +// For a given zone identifier, set priority of certificates. +func (sslCertificateApi *SslCertificateApiV1) ChangeCertificatePriority(changeCertificatePriorityOptions *ChangeCertificatePriorityOptions) (response *core.DetailedResponse, err error) { + return sslCertificateApi.ChangeCertificatePriorityWithContext(context.Background(), changeCertificatePriorityOptions) +} + +// ChangeCertificatePriorityWithContext is an alternate form of the ChangeCertificatePriority method which supports a Context parameter +func (sslCertificateApi *SslCertificateApiV1) ChangeCertificatePriorityWithContext(ctx context.Context, changeCertificatePriorityOptions *ChangeCertificatePriorityOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateStruct(changeCertificatePriorityOptions, "changeCertificatePriorityOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *sslCertificateApi.Crn, + "zone_identifier": *sslCertificateApi.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = sslCertificateApi.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(sslCertificateApi.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/custom_certificates/prioritize`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range changeCertificatePriorityOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("ssl_certificate_api", "V1", "ChangeCertificatePriority") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if changeCertificatePriorityOptions.Certificates != nil { + body["certificates"] = changeCertificatePriorityOptions.Certificates + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = sslCertificateApi.Service.Request(request, nil) + + return +} + +// GetUniversalCertificateSetting : Get details of universal certificate +// For a given zone identifier, get universal certificate. +func (sslCertificateApi *SslCertificateApiV1) GetUniversalCertificateSetting(getUniversalCertificateSettingOptions *GetUniversalCertificateSettingOptions) (result *UniversalSettingResp, response *core.DetailedResponse, err error) { + return sslCertificateApi.GetUniversalCertificateSettingWithContext(context.Background(), getUniversalCertificateSettingOptions) +} + +// GetUniversalCertificateSettingWithContext is an alternate form of the GetUniversalCertificateSetting method which supports a Context parameter +func (sslCertificateApi *SslCertificateApiV1) GetUniversalCertificateSettingWithContext(ctx context.Context, getUniversalCertificateSettingOptions *GetUniversalCertificateSettingOptions) (result *UniversalSettingResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(getUniversalCertificateSettingOptions, "getUniversalCertificateSettingOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *sslCertificateApi.Crn, + "zone_identifier": *sslCertificateApi.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = sslCertificateApi.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(sslCertificateApi.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/ssl/universal/settings`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getUniversalCertificateSettingOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("ssl_certificate_api", "V1", "GetUniversalCertificateSetting") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = sslCertificateApi.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalUniversalSettingResp) + if err != nil { + return + } + response.Result = result + + return +} + +// ChangeUniversalCertificateSetting : Enable or Disable universal certificate +// change universal certificate setting. +func (sslCertificateApi *SslCertificateApiV1) ChangeUniversalCertificateSetting(changeUniversalCertificateSettingOptions *ChangeUniversalCertificateSettingOptions) (response *core.DetailedResponse, err error) { + return sslCertificateApi.ChangeUniversalCertificateSettingWithContext(context.Background(), changeUniversalCertificateSettingOptions) +} + +// ChangeUniversalCertificateSettingWithContext is an alternate form of the ChangeUniversalCertificateSetting method which supports a Context parameter +func (sslCertificateApi *SslCertificateApiV1) ChangeUniversalCertificateSettingWithContext(ctx context.Context, changeUniversalCertificateSettingOptions *ChangeUniversalCertificateSettingOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateStruct(changeUniversalCertificateSettingOptions, "changeUniversalCertificateSettingOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *sslCertificateApi.Crn, + "zone_identifier": *sslCertificateApi.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = sslCertificateApi.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(sslCertificateApi.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/ssl/universal/settings`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range changeUniversalCertificateSettingOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("ssl_certificate_api", "V1", "ChangeUniversalCertificateSetting") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if changeUniversalCertificateSettingOptions.Enabled != nil { + body["enabled"] = changeUniversalCertificateSettingOptions.Enabled + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = sslCertificateApi.Service.Request(request, nil) + + return +} + +// GetTls12Setting : Get TLS 1.2 only setting +// For a given zone identifier, get TLS 1.2 only setting. +func (sslCertificateApi *SslCertificateApiV1) GetTls12Setting(getTls12SettingOptions *GetTls12SettingOptions) (result *Tls12SettingResp, response *core.DetailedResponse, err error) { + return sslCertificateApi.GetTls12SettingWithContext(context.Background(), getTls12SettingOptions) +} + +// GetTls12SettingWithContext is an alternate form of the GetTls12Setting method which supports a Context parameter +func (sslCertificateApi *SslCertificateApiV1) GetTls12SettingWithContext(ctx context.Context, getTls12SettingOptions *GetTls12SettingOptions) (result *Tls12SettingResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(getTls12SettingOptions, "getTls12SettingOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *sslCertificateApi.Crn, + "zone_identifier": *sslCertificateApi.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = sslCertificateApi.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(sslCertificateApi.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/settings/tls_1_2_only`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getTls12SettingOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("ssl_certificate_api", "V1", "GetTls12Setting") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = sslCertificateApi.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalTls12SettingResp) + if err != nil { + return + } + response.Result = result + + return +} + +// ChangeTls12Setting : Set TLS 1.2 setting +// For a given zone identifier, set TLS 1.2 setting. +func (sslCertificateApi *SslCertificateApiV1) ChangeTls12Setting(changeTls12SettingOptions *ChangeTls12SettingOptions) (result *Tls12SettingResp, response *core.DetailedResponse, err error) { + return sslCertificateApi.ChangeTls12SettingWithContext(context.Background(), changeTls12SettingOptions) +} + +// ChangeTls12SettingWithContext is an alternate form of the ChangeTls12Setting method which supports a Context parameter +func (sslCertificateApi *SslCertificateApiV1) ChangeTls12SettingWithContext(ctx context.Context, changeTls12SettingOptions *ChangeTls12SettingOptions) (result *Tls12SettingResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(changeTls12SettingOptions, "changeTls12SettingOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *sslCertificateApi.Crn, + "zone_identifier": *sslCertificateApi.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = sslCertificateApi.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(sslCertificateApi.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/settings/tls_1_2_only`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range changeTls12SettingOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("ssl_certificate_api", "V1", "ChangeTls12Setting") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if changeTls12SettingOptions.Value != nil { + body["value"] = changeTls12SettingOptions.Value + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = sslCertificateApi.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalTls12SettingResp) + if err != nil { + return + } + response.Result = result + + return +} + +// GetTls13Setting : Get TLS 1.3 setting +// For a given zone identifier, get TLS 1.3 setting. +func (sslCertificateApi *SslCertificateApiV1) GetTls13Setting(getTls13SettingOptions *GetTls13SettingOptions) (result *Tls13SettingResp, response *core.DetailedResponse, err error) { + return sslCertificateApi.GetTls13SettingWithContext(context.Background(), getTls13SettingOptions) +} + +// GetTls13SettingWithContext is an alternate form of the GetTls13Setting method which supports a Context parameter +func (sslCertificateApi *SslCertificateApiV1) GetTls13SettingWithContext(ctx context.Context, getTls13SettingOptions *GetTls13SettingOptions) (result *Tls13SettingResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(getTls13SettingOptions, "getTls13SettingOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *sslCertificateApi.Crn, + "zone_identifier": *sslCertificateApi.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = sslCertificateApi.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(sslCertificateApi.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/settings/tls_1_3`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getTls13SettingOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("ssl_certificate_api", "V1", "GetTls13Setting") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = sslCertificateApi.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalTls13SettingResp) + if err != nil { + return + } + response.Result = result + + return +} + +// ChangeTls13Setting : Set TLS 1.3 setting +// For a given zone identifier, set TLS 1.3 setting. +func (sslCertificateApi *SslCertificateApiV1) ChangeTls13Setting(changeTls13SettingOptions *ChangeTls13SettingOptions) (result *Tls13SettingResp, response *core.DetailedResponse, err error) { + return sslCertificateApi.ChangeTls13SettingWithContext(context.Background(), changeTls13SettingOptions) +} + +// ChangeTls13SettingWithContext is an alternate form of the ChangeTls13Setting method which supports a Context parameter +func (sslCertificateApi *SslCertificateApiV1) ChangeTls13SettingWithContext(ctx context.Context, changeTls13SettingOptions *ChangeTls13SettingOptions) (result *Tls13SettingResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(changeTls13SettingOptions, "changeTls13SettingOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *sslCertificateApi.Crn, + "zone_identifier": *sslCertificateApi.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = sslCertificateApi.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(sslCertificateApi.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/settings/tls_1_3`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range changeTls13SettingOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("ssl_certificate_api", "V1", "ChangeTls13Setting") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if changeTls13SettingOptions.Value != nil { + body["value"] = changeTls13SettingOptions.Value + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = sslCertificateApi.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalTls13SettingResp) + if err != nil { + return + } + response.Result = result + + return +} + +// CertPriorityReqCertificatesItem : certificate items. +type CertPriorityReqCertificatesItem struct { + // identifier. + ID *string `json:"id" validate:"required"` + + // certificate priority. + Priority *int64 `json:"priority" validate:"required"` +} + + +// NewCertPriorityReqCertificatesItem : Instantiate CertPriorityReqCertificatesItem (Generic Model Constructor) +func (*SslCertificateApiV1) NewCertPriorityReqCertificatesItem(id string, priority int64) (model *CertPriorityReqCertificatesItem, err error) { + model = &CertPriorityReqCertificatesItem{ + ID: core.StringPtr(id), + Priority: core.Int64Ptr(priority), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalCertPriorityReqCertificatesItem unmarshals an instance of CertPriorityReqCertificatesItem from the specified map of raw messages. +func UnmarshalCertPriorityReqCertificatesItem(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(CertPriorityReqCertificatesItem) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "priority", &obj.Priority) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ChangeCertificatePriorityOptions : The ChangeCertificatePriority options. +type ChangeCertificatePriorityOptions struct { + // certificates array. + Certificates []CertPriorityReqCertificatesItem `json:"certificates,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewChangeCertificatePriorityOptions : Instantiate ChangeCertificatePriorityOptions +func (*SslCertificateApiV1) NewChangeCertificatePriorityOptions() *ChangeCertificatePriorityOptions { + return &ChangeCertificatePriorityOptions{} +} + +// SetCertificates : Allow user to set Certificates +func (options *ChangeCertificatePriorityOptions) SetCertificates(certificates []CertPriorityReqCertificatesItem) *ChangeCertificatePriorityOptions { + options.Certificates = certificates + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ChangeCertificatePriorityOptions) SetHeaders(param map[string]string) *ChangeCertificatePriorityOptions { + options.Headers = param + return options +} + +// ChangeSslSettingOptions : The ChangeSslSetting options. +type ChangeSslSettingOptions struct { + // value. + Value *string `json:"value,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the ChangeSslSettingOptions.Value property. +// value. +const ( + ChangeSslSettingOptions_Value_Flexible = "flexible" + ChangeSslSettingOptions_Value_Full = "full" + ChangeSslSettingOptions_Value_Off = "off" + ChangeSslSettingOptions_Value_Strict = "strict" +) + +// NewChangeSslSettingOptions : Instantiate ChangeSslSettingOptions +func (*SslCertificateApiV1) NewChangeSslSettingOptions() *ChangeSslSettingOptions { + return &ChangeSslSettingOptions{} +} + +// SetValue : Allow user to set Value +func (options *ChangeSslSettingOptions) SetValue(value string) *ChangeSslSettingOptions { + options.Value = core.StringPtr(value) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ChangeSslSettingOptions) SetHeaders(param map[string]string) *ChangeSslSettingOptions { + options.Headers = param + return options +} + +// ChangeTls12SettingOptions : The ChangeTls12Setting options. +type ChangeTls12SettingOptions struct { + // value. + Value *string `json:"value,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the ChangeTls12SettingOptions.Value property. +// value. +const ( + ChangeTls12SettingOptions_Value_Off = "off" + ChangeTls12SettingOptions_Value_On = "on" +) + +// NewChangeTls12SettingOptions : Instantiate ChangeTls12SettingOptions +func (*SslCertificateApiV1) NewChangeTls12SettingOptions() *ChangeTls12SettingOptions { + return &ChangeTls12SettingOptions{} +} + +// SetValue : Allow user to set Value +func (options *ChangeTls12SettingOptions) SetValue(value string) *ChangeTls12SettingOptions { + options.Value = core.StringPtr(value) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ChangeTls12SettingOptions) SetHeaders(param map[string]string) *ChangeTls12SettingOptions { + options.Headers = param + return options +} + +// ChangeTls13SettingOptions : The ChangeTls13Setting options. +type ChangeTls13SettingOptions struct { + // value. + Value *string `json:"value,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the ChangeTls13SettingOptions.Value property. +// value. +const ( + ChangeTls13SettingOptions_Value_Off = "off" + ChangeTls13SettingOptions_Value_On = "on" +) + +// NewChangeTls13SettingOptions : Instantiate ChangeTls13SettingOptions +func (*SslCertificateApiV1) NewChangeTls13SettingOptions() *ChangeTls13SettingOptions { + return &ChangeTls13SettingOptions{} +} + +// SetValue : Allow user to set Value +func (options *ChangeTls13SettingOptions) SetValue(value string) *ChangeTls13SettingOptions { + options.Value = core.StringPtr(value) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ChangeTls13SettingOptions) SetHeaders(param map[string]string) *ChangeTls13SettingOptions { + options.Headers = param + return options +} + +// ChangeUniversalCertificateSettingOptions : The ChangeUniversalCertificateSetting options. +type ChangeUniversalCertificateSettingOptions struct { + // enabled. + Enabled *bool `json:"enabled,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewChangeUniversalCertificateSettingOptions : Instantiate ChangeUniversalCertificateSettingOptions +func (*SslCertificateApiV1) NewChangeUniversalCertificateSettingOptions() *ChangeUniversalCertificateSettingOptions { + return &ChangeUniversalCertificateSettingOptions{} +} + +// SetEnabled : Allow user to set Enabled +func (options *ChangeUniversalCertificateSettingOptions) SetEnabled(enabled bool) *ChangeUniversalCertificateSettingOptions { + options.Enabled = core.BoolPtr(enabled) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ChangeUniversalCertificateSettingOptions) SetHeaders(param map[string]string) *ChangeUniversalCertificateSettingOptions { + options.Headers = param + return options +} + +// CustomCertReqGeoRestrictions : geo restrictions. +type CustomCertReqGeoRestrictions struct { + // properties. + Label *string `json:"label" validate:"required"` +} + +// Constants associated with the CustomCertReqGeoRestrictions.Label property. +// properties. +const ( + CustomCertReqGeoRestrictions_Label_Eu = "eu" + CustomCertReqGeoRestrictions_Label_HighestSecurity = "highest_security" + CustomCertReqGeoRestrictions_Label_Us = "us" +) + + +// NewCustomCertReqGeoRestrictions : Instantiate CustomCertReqGeoRestrictions (Generic Model Constructor) +func (*SslCertificateApiV1) NewCustomCertReqGeoRestrictions(label string) (model *CustomCertReqGeoRestrictions, err error) { + model = &CustomCertReqGeoRestrictions{ + Label: core.StringPtr(label), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalCustomCertReqGeoRestrictions unmarshals an instance of CustomCertReqGeoRestrictions from the specified map of raw messages. +func UnmarshalCustomCertReqGeoRestrictions(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(CustomCertReqGeoRestrictions) + err = core.UnmarshalPrimitive(m, "label", &obj.Label) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// DeleteCertificateOptions : The DeleteCertificate options. +type DeleteCertificateOptions struct { + // cedrtificate identifier. + CertIdentifier *string `json:"cert_identifier" validate:"required,ne="` + + // uuid, identify a session. + XCorrelationID *string `json:"X-Correlation-ID,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteCertificateOptions : Instantiate DeleteCertificateOptions +func (*SslCertificateApiV1) NewDeleteCertificateOptions(certIdentifier string) *DeleteCertificateOptions { + return &DeleteCertificateOptions{ + CertIdentifier: core.StringPtr(certIdentifier), + } +} + +// SetCertIdentifier : Allow user to set CertIdentifier +func (options *DeleteCertificateOptions) SetCertIdentifier(certIdentifier string) *DeleteCertificateOptions { + options.CertIdentifier = core.StringPtr(certIdentifier) + return options +} + +// SetXCorrelationID : Allow user to set XCorrelationID +func (options *DeleteCertificateOptions) SetXCorrelationID(xCorrelationID string) *DeleteCertificateOptions { + options.XCorrelationID = core.StringPtr(xCorrelationID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteCertificateOptions) SetHeaders(param map[string]string) *DeleteCertificateOptions { + options.Headers = param + return options +} + +// DeleteCustomCertificateOptions : The DeleteCustomCertificate options. +type DeleteCustomCertificateOptions struct { + // custom certificate id. + CustomCertID *string `json:"custom_cert_id" validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteCustomCertificateOptions : Instantiate DeleteCustomCertificateOptions +func (*SslCertificateApiV1) NewDeleteCustomCertificateOptions(customCertID string) *DeleteCustomCertificateOptions { + return &DeleteCustomCertificateOptions{ + CustomCertID: core.StringPtr(customCertID), + } +} + +// SetCustomCertID : Allow user to set CustomCertID +func (options *DeleteCustomCertificateOptions) SetCustomCertID(customCertID string) *DeleteCustomCertificateOptions { + options.CustomCertID = core.StringPtr(customCertID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteCustomCertificateOptions) SetHeaders(param map[string]string) *DeleteCustomCertificateOptions { + options.Headers = param + return options +} + +// GetCustomCertificateOptions : The GetCustomCertificate options. +type GetCustomCertificateOptions struct { + // custom certificate id. + CustomCertID *string `json:"custom_cert_id" validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetCustomCertificateOptions : Instantiate GetCustomCertificateOptions +func (*SslCertificateApiV1) NewGetCustomCertificateOptions(customCertID string) *GetCustomCertificateOptions { + return &GetCustomCertificateOptions{ + CustomCertID: core.StringPtr(customCertID), + } +} + +// SetCustomCertID : Allow user to set CustomCertID +func (options *GetCustomCertificateOptions) SetCustomCertID(customCertID string) *GetCustomCertificateOptions { + options.CustomCertID = core.StringPtr(customCertID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetCustomCertificateOptions) SetHeaders(param map[string]string) *GetCustomCertificateOptions { + options.Headers = param + return options +} + +// GetSslSettingOptions : The GetSslSetting options. +type GetSslSettingOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetSslSettingOptions : Instantiate GetSslSettingOptions +func (*SslCertificateApiV1) NewGetSslSettingOptions() *GetSslSettingOptions { + return &GetSslSettingOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *GetSslSettingOptions) SetHeaders(param map[string]string) *GetSslSettingOptions { + options.Headers = param + return options +} + +// GetTls12SettingOptions : The GetTls12Setting options. +type GetTls12SettingOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetTls12SettingOptions : Instantiate GetTls12SettingOptions +func (*SslCertificateApiV1) NewGetTls12SettingOptions() *GetTls12SettingOptions { + return &GetTls12SettingOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *GetTls12SettingOptions) SetHeaders(param map[string]string) *GetTls12SettingOptions { + options.Headers = param + return options +} + +// GetTls13SettingOptions : The GetTls13Setting options. +type GetTls13SettingOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetTls13SettingOptions : Instantiate GetTls13SettingOptions +func (*SslCertificateApiV1) NewGetTls13SettingOptions() *GetTls13SettingOptions { + return &GetTls13SettingOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *GetTls13SettingOptions) SetHeaders(param map[string]string) *GetTls13SettingOptions { + options.Headers = param + return options +} + +// GetUniversalCertificateSettingOptions : The GetUniversalCertificateSetting options. +type GetUniversalCertificateSettingOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetUniversalCertificateSettingOptions : Instantiate GetUniversalCertificateSettingOptions +func (*SslCertificateApiV1) NewGetUniversalCertificateSettingOptions() *GetUniversalCertificateSettingOptions { + return &GetUniversalCertificateSettingOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *GetUniversalCertificateSettingOptions) SetHeaders(param map[string]string) *GetUniversalCertificateSettingOptions { + options.Headers = param + return options +} + +// ListCertificatesOptions : The ListCertificates options. +type ListCertificatesOptions struct { + // uuid, identify a session. + XCorrelationID *string `json:"X-Correlation-ID,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListCertificatesOptions : Instantiate ListCertificatesOptions +func (*SslCertificateApiV1) NewListCertificatesOptions() *ListCertificatesOptions { + return &ListCertificatesOptions{} +} + +// SetXCorrelationID : Allow user to set XCorrelationID +func (options *ListCertificatesOptions) SetXCorrelationID(xCorrelationID string) *ListCertificatesOptions { + options.XCorrelationID = core.StringPtr(xCorrelationID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListCertificatesOptions) SetHeaders(param map[string]string) *ListCertificatesOptions { + options.Headers = param + return options +} + +// ListCustomCertificatesOptions : The ListCustomCertificates options. +type ListCustomCertificatesOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListCustomCertificatesOptions : Instantiate ListCustomCertificatesOptions +func (*SslCertificateApiV1) NewListCustomCertificatesOptions() *ListCustomCertificatesOptions { + return &ListCustomCertificatesOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *ListCustomCertificatesOptions) SetHeaders(param map[string]string) *ListCustomCertificatesOptions { + options.Headers = param + return options +} + +// OrderCertificateOptions : The OrderCertificate options. +type OrderCertificateOptions struct { + // priorities. + Type *string `json:"type,omitempty"` + + // host name. + Hosts []string `json:"hosts,omitempty"` + + // uuid, identify a session. + XCorrelationID *string `json:"X-Correlation-ID,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the OrderCertificateOptions.Type property. +// priorities. +const ( + OrderCertificateOptions_Type_Dedicated = "dedicated" +) + +// NewOrderCertificateOptions : Instantiate OrderCertificateOptions +func (*SslCertificateApiV1) NewOrderCertificateOptions() *OrderCertificateOptions { + return &OrderCertificateOptions{} +} + +// SetType : Allow user to set Type +func (options *OrderCertificateOptions) SetType(typeVar string) *OrderCertificateOptions { + options.Type = core.StringPtr(typeVar) + return options +} + +// SetHosts : Allow user to set Hosts +func (options *OrderCertificateOptions) SetHosts(hosts []string) *OrderCertificateOptions { + options.Hosts = hosts + return options +} + +// SetXCorrelationID : Allow user to set XCorrelationID +func (options *OrderCertificateOptions) SetXCorrelationID(xCorrelationID string) *OrderCertificateOptions { + options.XCorrelationID = core.StringPtr(xCorrelationID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *OrderCertificateOptions) SetHeaders(param map[string]string) *OrderCertificateOptions { + options.Headers = param + return options +} + +// Tls12SettingRespMessagesItem : Tls12SettingRespMessagesItem struct +type Tls12SettingRespMessagesItem struct { + // status. + Status *string `json:"status,omitempty"` +} + + +// UnmarshalTls12SettingRespMessagesItem unmarshals an instance of Tls12SettingRespMessagesItem from the specified map of raw messages. +func UnmarshalTls12SettingRespMessagesItem(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Tls12SettingRespMessagesItem) + err = core.UnmarshalPrimitive(m, "status", &obj.Status) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// Tls12SettingRespResult : result. +type Tls12SettingRespResult struct { + // identifier. + ID *string `json:"id" validate:"required"` + + // value. + Value *string `json:"value" validate:"required"` + + // editable. + Editable *bool `json:"editable" validate:"required"` + + // modified date. + ModifiedOn *strfmt.DateTime `json:"modified_on" validate:"required"` +} + +// Constants associated with the Tls12SettingRespResult.ID property. +// identifier. +const ( + Tls12SettingRespResult_ID_Tls12Only = "tls_1_2_only" +) + + +// UnmarshalTls12SettingRespResult unmarshals an instance of Tls12SettingRespResult from the specified map of raw messages. +func UnmarshalTls12SettingRespResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Tls12SettingRespResult) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "editable", &obj.Editable) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "modified_on", &obj.ModifiedOn) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// Tls13SettingRespResult : result. +type Tls13SettingRespResult struct { + // identifier. + ID *string `json:"id" validate:"required"` + + // value. + Value *string `json:"value" validate:"required"` + + // editable. + Editable *bool `json:"editable" validate:"required"` + + // modified date. + ModifiedOn *strfmt.DateTime `json:"modified_on" validate:"required"` +} + +// Constants associated with the Tls13SettingRespResult.ID property. +// identifier. +const ( + Tls13SettingRespResult_ID_Tls13 = "tls_1_3" +) + + +// UnmarshalTls13SettingRespResult unmarshals an instance of Tls13SettingRespResult from the specified map of raw messages. +func UnmarshalTls13SettingRespResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Tls13SettingRespResult) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "editable", &obj.Editable) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "modified_on", &obj.ModifiedOn) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// UniversalSettingRespResult : result. +type UniversalSettingRespResult struct { + // enabled. + Enabled *bool `json:"enabled" validate:"required"` +} + + +// UnmarshalUniversalSettingRespResult unmarshals an instance of UniversalSettingRespResult from the specified map of raw messages. +func UnmarshalUniversalSettingRespResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(UniversalSettingRespResult) + err = core.UnmarshalPrimitive(m, "enabled", &obj.Enabled) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// UpdateCustomCertificateOptions : The UpdateCustomCertificate options. +type UpdateCustomCertificateOptions struct { + // custom certificate id. + CustomCertID *string `json:"custom_cert_id" validate:"required,ne="` + + // certificates. + Certificate *string `json:"certificate,omitempty"` + + // private key. + PrivateKey *string `json:"private_key,omitempty"` + + // Methods shown in UI mapping to API: Compatible(ubiquitous), Modern(optimal), User Defined(force). + BundleMethod *string `json:"bundle_method,omitempty"` + + // geo restrictions. + GeoRestrictions *CustomCertReqGeoRestrictions `json:"geo_restrictions,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the UpdateCustomCertificateOptions.BundleMethod property. +// Methods shown in UI mapping to API: Compatible(ubiquitous), Modern(optimal), User Defined(force). +const ( + UpdateCustomCertificateOptions_BundleMethod_Force = "force" + UpdateCustomCertificateOptions_BundleMethod_Optimal = "optimal" + UpdateCustomCertificateOptions_BundleMethod_Ubiquitous = "ubiquitous" +) + +// NewUpdateCustomCertificateOptions : Instantiate UpdateCustomCertificateOptions +func (*SslCertificateApiV1) NewUpdateCustomCertificateOptions(customCertID string) *UpdateCustomCertificateOptions { + return &UpdateCustomCertificateOptions{ + CustomCertID: core.StringPtr(customCertID), + } +} + +// SetCustomCertID : Allow user to set CustomCertID +func (options *UpdateCustomCertificateOptions) SetCustomCertID(customCertID string) *UpdateCustomCertificateOptions { + options.CustomCertID = core.StringPtr(customCertID) + return options +} + +// SetCertificate : Allow user to set Certificate +func (options *UpdateCustomCertificateOptions) SetCertificate(certificate string) *UpdateCustomCertificateOptions { + options.Certificate = core.StringPtr(certificate) + return options +} + +// SetPrivateKey : Allow user to set PrivateKey +func (options *UpdateCustomCertificateOptions) SetPrivateKey(privateKey string) *UpdateCustomCertificateOptions { + options.PrivateKey = core.StringPtr(privateKey) + return options +} + +// SetBundleMethod : Allow user to set BundleMethod +func (options *UpdateCustomCertificateOptions) SetBundleMethod(bundleMethod string) *UpdateCustomCertificateOptions { + options.BundleMethod = core.StringPtr(bundleMethod) + return options +} + +// SetGeoRestrictions : Allow user to set GeoRestrictions +func (options *UpdateCustomCertificateOptions) SetGeoRestrictions(geoRestrictions *CustomCertReqGeoRestrictions) *UpdateCustomCertificateOptions { + options.GeoRestrictions = geoRestrictions + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateCustomCertificateOptions) SetHeaders(param map[string]string) *UpdateCustomCertificateOptions { + options.Headers = param + return options +} + +// UploadCustomCertificateOptions : The UploadCustomCertificate options. +type UploadCustomCertificateOptions struct { + // certificates. + Certificate *string `json:"certificate,omitempty"` + + // private key. + PrivateKey *string `json:"private_key,omitempty"` + + // Methods shown in UI mapping to API: Compatible(ubiquitous), Modern(optimal), User Defined(force). + BundleMethod *string `json:"bundle_method,omitempty"` + + // geo restrictions. + GeoRestrictions *CustomCertReqGeoRestrictions `json:"geo_restrictions,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the UploadCustomCertificateOptions.BundleMethod property. +// Methods shown in UI mapping to API: Compatible(ubiquitous), Modern(optimal), User Defined(force). +const ( + UploadCustomCertificateOptions_BundleMethod_Force = "force" + UploadCustomCertificateOptions_BundleMethod_Optimal = "optimal" + UploadCustomCertificateOptions_BundleMethod_Ubiquitous = "ubiquitous" +) + +// NewUploadCustomCertificateOptions : Instantiate UploadCustomCertificateOptions +func (*SslCertificateApiV1) NewUploadCustomCertificateOptions() *UploadCustomCertificateOptions { + return &UploadCustomCertificateOptions{} +} + +// SetCertificate : Allow user to set Certificate +func (options *UploadCustomCertificateOptions) SetCertificate(certificate string) *UploadCustomCertificateOptions { + options.Certificate = core.StringPtr(certificate) + return options +} + +// SetPrivateKey : Allow user to set PrivateKey +func (options *UploadCustomCertificateOptions) SetPrivateKey(privateKey string) *UploadCustomCertificateOptions { + options.PrivateKey = core.StringPtr(privateKey) + return options +} + +// SetBundleMethod : Allow user to set BundleMethod +func (options *UploadCustomCertificateOptions) SetBundleMethod(bundleMethod string) *UploadCustomCertificateOptions { + options.BundleMethod = core.StringPtr(bundleMethod) + return options +} + +// SetGeoRestrictions : Allow user to set GeoRestrictions +func (options *UploadCustomCertificateOptions) SetGeoRestrictions(geoRestrictions *CustomCertReqGeoRestrictions) *UploadCustomCertificateOptions { + options.GeoRestrictions = geoRestrictions + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UploadCustomCertificateOptions) SetHeaders(param map[string]string) *UploadCustomCertificateOptions { + options.Headers = param + return options +} + +// Certificate : certificate. +type Certificate struct { + // identifier. + ID interface{} `json:"id" validate:"required"` + + // host name. + Hosts []string `json:"hosts" validate:"required"` + + // status. + Status *string `json:"status" validate:"required"` +} + + +// UnmarshalCertificate unmarshals an instance of Certificate from the specified map of raw messages. +func UnmarshalCertificate(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Certificate) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "hosts", &obj.Hosts) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "status", &obj.Status) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// CustomCertPack : custom certificate pack. +type CustomCertPack struct { + // identifier. + ID *string `json:"id" validate:"required"` + + // host name. + Hosts []string `json:"hosts" validate:"required"` + + // issuer. + Issuer *string `json:"issuer" validate:"required"` + + // signature. + Signature *string `json:"signature" validate:"required"` + + // status. + Status *string `json:"status" validate:"required"` + + // bundle method. + BundleMethod *string `json:"bundle_method" validate:"required"` + + // zone identifier. + ZoneID *string `json:"zone_id" validate:"required"` + + // uploaded date. + UploadedOn *string `json:"uploaded_on" validate:"required"` + + // modified date. + ModifiedOn *string `json:"modified_on" validate:"required"` + + // expire date. + ExpiresOn *string `json:"expires_on" validate:"required"` + + // priority. + Priority *float64 `json:"priority" validate:"required"` +} + + +// UnmarshalCustomCertPack unmarshals an instance of CustomCertPack from the specified map of raw messages. +func UnmarshalCustomCertPack(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(CustomCertPack) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "hosts", &obj.Hosts) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "issuer", &obj.Issuer) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "signature", &obj.Signature) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "status", &obj.Status) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "bundle_method", &obj.BundleMethod) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "zone_id", &obj.ZoneID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "uploaded_on", &obj.UploadedOn) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "modified_on", &obj.ModifiedOn) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "expires_on", &obj.ExpiresOn) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "priority", &obj.Priority) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// CustomCertResp : custom certificate response. +type CustomCertResp struct { + // custom certificate pack. + Result *CustomCertPack `json:"result" validate:"required"` + + // success. + Success *bool `json:"success" validate:"required"` + + // errors. + Errors [][]string `json:"errors" validate:"required"` + + // messages. + Messages []Tls12SettingRespMessagesItem `json:"messages" validate:"required"` +} + + +// UnmarshalCustomCertResp unmarshals an instance of CustomCertResp from the specified map of raw messages. +func UnmarshalCustomCertResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(CustomCertResp) + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalCustomCertPack) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalModel(m, "messages", &obj.Messages, UnmarshalTls12SettingRespMessagesItem) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// DedicatedCertificatePack : dedicated certificate packs. +type DedicatedCertificatePack struct { + // identifier. + ID *string `json:"id" validate:"required"` + + // certificate type. + Type *string `json:"type" validate:"required"` + + // host name. + Hosts []string `json:"hosts" validate:"required"` + + // certificates. + Certificates []Certificate `json:"certificates" validate:"required"` + + // primary certificate. + PrimaryCertificate interface{} `json:"primary_certificate" validate:"required"` + + // status. + Status *string `json:"status" validate:"required"` +} + + +// UnmarshalDedicatedCertificatePack unmarshals an instance of DedicatedCertificatePack from the specified map of raw messages. +func UnmarshalDedicatedCertificatePack(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(DedicatedCertificatePack) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "hosts", &obj.Hosts) + if err != nil { + return + } + err = core.UnmarshalModel(m, "certificates", &obj.Certificates, UnmarshalCertificate) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "primary_certificate", &obj.PrimaryCertificate) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "status", &obj.Status) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// DedicatedCertificateResp : certificate response. +type DedicatedCertificateResp struct { + // dedicated certificate packs. + Result *DedicatedCertificatePack `json:"result" validate:"required"` + + // result information. + ResultInfo *ResultInfo `json:"result_info" validate:"required"` + + // success. + Success *bool `json:"success" validate:"required"` + + // errors. + Errors [][]string `json:"errors" validate:"required"` + + // messages. + Messages []Tls12SettingRespMessagesItem `json:"messages" validate:"required"` +} + + +// UnmarshalDedicatedCertificateResp unmarshals an instance of DedicatedCertificateResp from the specified map of raw messages. +func UnmarshalDedicatedCertificateResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(DedicatedCertificateResp) + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalDedicatedCertificatePack) + if err != nil { + return + } + err = core.UnmarshalModel(m, "result_info", &obj.ResultInfo, UnmarshalResultInfo) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalModel(m, "messages", &obj.Messages, UnmarshalTls12SettingRespMessagesItem) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ListCertificateResp : certificate response. +type ListCertificateResp struct { + // certificate packs. + Result []DedicatedCertificatePack `json:"result" validate:"required"` + + // result information. + ResultInfo *ResultInfo `json:"result_info" validate:"required"` + + // success. + Success *bool `json:"success" validate:"required"` + + // errors. + Errors [][]string `json:"errors" validate:"required"` + + // messages. + Messages []Tls12SettingRespMessagesItem `json:"messages" validate:"required"` +} + + +// UnmarshalListCertificateResp unmarshals an instance of ListCertificateResp from the specified map of raw messages. +func UnmarshalListCertificateResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ListCertificateResp) + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalDedicatedCertificatePack) + if err != nil { + return + } + err = core.UnmarshalModel(m, "result_info", &obj.ResultInfo, UnmarshalResultInfo) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalModel(m, "messages", &obj.Messages, UnmarshalTls12SettingRespMessagesItem) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ListCustomCertsResp : custom certificate response. +type ListCustomCertsResp struct { + // custom certificate packs. + Result []CustomCertPack `json:"result" validate:"required"` + + // result information. + ResultInfo *ResultInfo `json:"result_info" validate:"required"` + + // success. + Success *bool `json:"success" validate:"required"` + + // errors. + Errors [][]string `json:"errors" validate:"required"` + + // messages. + Messages []Tls12SettingRespMessagesItem `json:"messages" validate:"required"` +} + + +// UnmarshalListCustomCertsResp unmarshals an instance of ListCustomCertsResp from the specified map of raw messages. +func UnmarshalListCustomCertsResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ListCustomCertsResp) + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalCustomCertPack) + if err != nil { + return + } + err = core.UnmarshalModel(m, "result_info", &obj.ResultInfo, UnmarshalResultInfo) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalModel(m, "messages", &obj.Messages, UnmarshalTls12SettingRespMessagesItem) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ResultInfo : result information. +type ResultInfo struct { + // page number. + Page *int64 `json:"page" validate:"required"` + + // per page count. + PerPage *int64 `json:"per_page" validate:"required"` + + // count. + Count *int64 `json:"count" validate:"required"` + + // total count. + TotalCount *int64 `json:"total_count" validate:"required"` +} + + +// UnmarshalResultInfo unmarshals an instance of ResultInfo from the specified map of raw messages. +func UnmarshalResultInfo(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ResultInfo) + err = core.UnmarshalPrimitive(m, "page", &obj.Page) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "per_page", &obj.PerPage) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "count", &obj.Count) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "total_count", &obj.TotalCount) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SslSetting : ssl setting. +type SslSetting struct { + // identifier. + ID *string `json:"id" validate:"required"` + + // value. + Value *string `json:"value" validate:"required"` + + // editable. + Editable *bool `json:"editable" validate:"required"` + + // modified date. + ModifiedOn *string `json:"modified_on" validate:"required"` +} + + +// UnmarshalSslSetting unmarshals an instance of SslSetting from the specified map of raw messages. +func UnmarshalSslSetting(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SslSetting) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "editable", &obj.Editable) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "modified_on", &obj.ModifiedOn) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SslSettingResp : ssl setting response. +type SslSettingResp struct { + // success. + Success *bool `json:"success" validate:"required"` + + // ssl setting. + Result *SslSetting `json:"result" validate:"required"` + + // errors. + Errors [][]string `json:"errors" validate:"required"` + + // messages. + Messages []Tls12SettingRespMessagesItem `json:"messages" validate:"required"` +} + + +// UnmarshalSslSettingResp unmarshals an instance of SslSettingResp from the specified map of raw messages. +func UnmarshalSslSettingResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SslSettingResp) + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalSslSetting) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalModel(m, "messages", &obj.Messages, UnmarshalTls12SettingRespMessagesItem) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// Tls12SettingResp : tls 1.2 setting response. +type Tls12SettingResp struct { + // result. + Result *Tls12SettingRespResult `json:"result" validate:"required"` + + // success. + Success *bool `json:"success" validate:"required"` + + // errors. + Errors [][]string `json:"errors" validate:"required"` + + // messages. + Messages []Tls12SettingRespMessagesItem `json:"messages" validate:"required"` +} + + +// UnmarshalTls12SettingResp unmarshals an instance of Tls12SettingResp from the specified map of raw messages. +func UnmarshalTls12SettingResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Tls12SettingResp) + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalTls12SettingRespResult) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalModel(m, "messages", &obj.Messages, UnmarshalTls12SettingRespMessagesItem) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// Tls13SettingResp : tls 1.3 setting response. +type Tls13SettingResp struct { + // result. + Result *Tls13SettingRespResult `json:"result" validate:"required"` + + // success. + Success *bool `json:"success" validate:"required"` + + // errors. + Errors [][]string `json:"errors" validate:"required"` + + // messages. + Messages []Tls12SettingRespMessagesItem `json:"messages" validate:"required"` +} + + +// UnmarshalTls13SettingResp unmarshals an instance of Tls13SettingResp from the specified map of raw messages. +func UnmarshalTls13SettingResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Tls13SettingResp) + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalTls13SettingRespResult) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalModel(m, "messages", &obj.Messages, UnmarshalTls12SettingRespMessagesItem) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// UniversalSettingResp : universal setting response. +type UniversalSettingResp struct { + // result. + Result *UniversalSettingRespResult `json:"result" validate:"required"` + + // success. + Success *bool `json:"success" validate:"required"` + + // errors. + Errors [][]string `json:"errors" validate:"required"` + + // messages. + Messages []Tls12SettingRespMessagesItem `json:"messages" validate:"required"` +} + + +// UnmarshalUniversalSettingResp unmarshals an instance of UniversalSettingResp from the specified map of raw messages. +func UnmarshalUniversalSettingResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(UniversalSettingResp) + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalUniversalSettingRespResult) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalModel(m, "messages", &obj.Messages, UnmarshalTls12SettingRespMessagesItem) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} diff --git a/vendor/github.com/IBM/networking-go-sdk/transitgatewayapisv1/transit_gateway_apis_v1.go b/vendor/github.com/IBM/networking-go-sdk/transitgatewayapisv1/transit_gateway_apis_v1.go new file mode 100644 index 00000000000..8c6d1902319 --- /dev/null +++ b/vendor/github.com/IBM/networking-go-sdk/transitgatewayapisv1/transit_gateway_apis_v1.go @@ -0,0 +1,1749 @@ +/** + * (C) Copyright IBM Corp. 2020. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package transitgatewayapisv1 : Operations and models for the TransitGatewayApisV1 service +package transitgatewayapisv1 + +import ( + "encoding/json" + "fmt" + "github.com/IBM/go-sdk-core/v4/core" + "github.com/go-openapi/strfmt" + common "github.com/IBM/networking-go-sdk/common" + "reflect" +) + +// TransitGatewayApisV1 : No description provided (generated by Openapi Generator +// https://github.com/openapitools/openapi-generator) +// +// Version: __VERSION__ +type TransitGatewayApisV1 struct { + Service *core.BaseService + + // Requests the version of the API as of a date in the format `YYYY-MM-DD`. Any date up to the current date may be + // provided. Specify the current date to request the latest version. + Version *string +} + +// DefaultServiceURL is the default URL to make service requests to. +const DefaultServiceURL = "https://transit.cloud.ibm.com/v1" + +// DefaultServiceName is the default key used to find external configuration information. +const DefaultServiceName = "transit_gateway_apis" + +// TransitGatewayApisV1Options : Service options +type TransitGatewayApisV1Options struct { + ServiceName string + URL string + Authenticator core.Authenticator + + // Requests the version of the API as of a date in the format `YYYY-MM-DD`. Any date up to the current date may be + // provided. Specify the current date to request the latest version. + Version *string `validate:"required"` +} + +// NewTransitGatewayApisV1UsingExternalConfig : constructs an instance of TransitGatewayApisV1 with passed in options and external configuration. +func NewTransitGatewayApisV1UsingExternalConfig(options *TransitGatewayApisV1Options) (transitGatewayApis *TransitGatewayApisV1, err error) { + if options.ServiceName == "" { + options.ServiceName = DefaultServiceName + } + + if options.Authenticator == nil { + options.Authenticator, err = core.GetAuthenticatorFromEnvironment(options.ServiceName) + if err != nil { + return + } + } + + transitGatewayApis, err = NewTransitGatewayApisV1(options) + if err != nil { + return + } + + err = transitGatewayApis.Service.ConfigureService(options.ServiceName) + if err != nil { + return + } + + if options.URL != "" { + err = transitGatewayApis.Service.SetServiceURL(options.URL) + } + return +} + +// NewTransitGatewayApisV1 : constructs an instance of TransitGatewayApisV1 with passed in options. +func NewTransitGatewayApisV1(options *TransitGatewayApisV1Options) (service *TransitGatewayApisV1, err error) { + serviceOptions := &core.ServiceOptions{ + URL: DefaultServiceURL, + Authenticator: options.Authenticator, + } + + err = core.ValidateStruct(options, "options") + if err != nil { + return + } + + baseService, err := core.NewBaseService(serviceOptions) + if err != nil { + return + } + + if options.URL != "" { + err = baseService.SetServiceURL(options.URL) + if err != nil { + return + } + } + + service = &TransitGatewayApisV1{ + Service: baseService, + Version: options.Version, + } + + return +} + +// SetServiceURL sets the service URL +func (transitGatewayApis *TransitGatewayApisV1) SetServiceURL(url string) error { + return transitGatewayApis.Service.SetServiceURL(url) +} + +// ListTransitGateways : Retrieves all Transit Gateways +// List all the Transit Gateways in the account. User will get a list of Transit Gateways they have access to 'view'. +func (transitGatewayApis *TransitGatewayApisV1) ListTransitGateways(listTransitGatewaysOptions *ListTransitGatewaysOptions) (result *TransitGatewayCollection, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listTransitGatewaysOptions, "listTransitGatewaysOptions") + if err != nil { + return + } + + pathSegments := []string{"transit_gateways"} + pathParameters := []string{} + + builder := core.NewRequestBuilder(core.GET) + _, err = builder.ConstructHTTPURL(transitGatewayApis.Service.Options.URL, pathSegments, pathParameters) + if err != nil { + return + } + + for headerName, headerValue := range listTransitGatewaysOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("transit_gateway_apis", "V1", "ListTransitGateways") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*transitGatewayApis.Version)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = transitGatewayApis.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalTransitGatewayCollection) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateTransitGateway : Creates a Transit Gateway +// Create a Transit Gateway based on the supplied input template. +func (transitGatewayApis *TransitGatewayApisV1) CreateTransitGateway(createTransitGatewayOptions *CreateTransitGatewayOptions) (result *TransitGateway, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createTransitGatewayOptions, "createTransitGatewayOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createTransitGatewayOptions, "createTransitGatewayOptions") + if err != nil { + return + } + + pathSegments := []string{"transit_gateways"} + pathParameters := []string{} + + builder := core.NewRequestBuilder(core.POST) + _, err = builder.ConstructHTTPURL(transitGatewayApis.Service.Options.URL, pathSegments, pathParameters) + if err != nil { + return + } + + for headerName, headerValue := range createTransitGatewayOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("transit_gateway_apis", "V1", "CreateTransitGateway") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + builder.AddQuery("version", fmt.Sprint(*transitGatewayApis.Version)) + + body := make(map[string]interface{}) + if createTransitGatewayOptions.Location != nil { + body["location"] = createTransitGatewayOptions.Location + } + if createTransitGatewayOptions.Name != nil { + body["name"] = createTransitGatewayOptions.Name + } + if createTransitGatewayOptions.Global != nil { + body["global"] = createTransitGatewayOptions.Global + } + if createTransitGatewayOptions.ResourceGroup != nil { + body["resource_group"] = createTransitGatewayOptions.ResourceGroup + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = transitGatewayApis.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalTransitGateway) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteTransitGateway : Deletes specified Transit Gateway +// This request deletes a Transit Gateway. This operation cannot be reversed. For this request to succeed, the Transit +// Gateway must not contain connections. +func (transitGatewayApis *TransitGatewayApisV1) DeleteTransitGateway(deleteTransitGatewayOptions *DeleteTransitGatewayOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteTransitGatewayOptions, "deleteTransitGatewayOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteTransitGatewayOptions, "deleteTransitGatewayOptions") + if err != nil { + return + } + + pathSegments := []string{"transit_gateways"} + pathParameters := []string{*deleteTransitGatewayOptions.ID} + + builder := core.NewRequestBuilder(core.DELETE) + _, err = builder.ConstructHTTPURL(transitGatewayApis.Service.Options.URL, pathSegments, pathParameters) + if err != nil { + return + } + + for headerName, headerValue := range deleteTransitGatewayOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("transit_gateway_apis", "V1", "DeleteTransitGateway") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + builder.AddQuery("version", fmt.Sprint(*transitGatewayApis.Version)) + + request, err := builder.Build() + if err != nil { + return + } + + response, err = transitGatewayApis.Service.Request(request, nil) + + return +} + +// GetTransitGateway : Retrieves specified Transit Gateway +// This request retrieves a single Transit Gateway specified by the identifier in the URL. +func (transitGatewayApis *TransitGatewayApisV1) GetTransitGateway(getTransitGatewayOptions *GetTransitGatewayOptions) (result *TransitGateway, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getTransitGatewayOptions, "getTransitGatewayOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getTransitGatewayOptions, "getTransitGatewayOptions") + if err != nil { + return + } + + pathSegments := []string{"transit_gateways"} + pathParameters := []string{*getTransitGatewayOptions.ID} + + builder := core.NewRequestBuilder(core.GET) + _, err = builder.ConstructHTTPURL(transitGatewayApis.Service.Options.URL, pathSegments, pathParameters) + if err != nil { + return + } + + for headerName, headerValue := range getTransitGatewayOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("transit_gateway_apis", "V1", "GetTransitGateway") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*transitGatewayApis.Version)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = transitGatewayApis.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalTransitGateway) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateTransitGateway : Updates specified Transit Gateway +// This request updates a Transit Gateway's name and/or global flag. +func (transitGatewayApis *TransitGatewayApisV1) UpdateTransitGateway(updateTransitGatewayOptions *UpdateTransitGatewayOptions) (result *TransitGateway, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateTransitGatewayOptions, "updateTransitGatewayOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateTransitGatewayOptions, "updateTransitGatewayOptions") + if err != nil { + return + } + + pathSegments := []string{"transit_gateways"} + pathParameters := []string{*updateTransitGatewayOptions.ID} + + builder := core.NewRequestBuilder(core.PATCH) + _, err = builder.ConstructHTTPURL(transitGatewayApis.Service.Options.URL, pathSegments, pathParameters) + if err != nil { + return + } + + for headerName, headerValue := range updateTransitGatewayOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("transit_gateway_apis", "V1", "UpdateTransitGateway") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + builder.AddQuery("version", fmt.Sprint(*transitGatewayApis.Version)) + + body := make(map[string]interface{}) + if updateTransitGatewayOptions.Global != nil { + body["global"] = updateTransitGatewayOptions.Global + } + if updateTransitGatewayOptions.Name != nil { + body["name"] = updateTransitGatewayOptions.Name + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = transitGatewayApis.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalTransitGateway) + if err != nil { + return + } + response.Result = result + + return +} + +// ListTransitGatewayConnections : Retrieves all connections in a Transit Gateway +// This request retrieves all connections in a Transit Gateway. +func (transitGatewayApis *TransitGatewayApisV1) ListTransitGatewayConnections(listTransitGatewayConnectionsOptions *ListTransitGatewayConnectionsOptions) (result *TransitGatewayConnectionCollection, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(listTransitGatewayConnectionsOptions, "listTransitGatewayConnectionsOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(listTransitGatewayConnectionsOptions, "listTransitGatewayConnectionsOptions") + if err != nil { + return + } + + pathSegments := []string{"transit_gateways", "connections"} + pathParameters := []string{*listTransitGatewayConnectionsOptions.TransitGatewayID} + + builder := core.NewRequestBuilder(core.GET) + _, err = builder.ConstructHTTPURL(transitGatewayApis.Service.Options.URL, pathSegments, pathParameters) + if err != nil { + return + } + + for headerName, headerValue := range listTransitGatewayConnectionsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("transit_gateway_apis", "V1", "ListTransitGatewayConnections") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*transitGatewayApis.Version)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = transitGatewayApis.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalTransitGatewayConnectionCollection) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateTransitGatewayConnection : Add connection to a Transit Gateway +// Add a connection to Transit Gateway. +func (transitGatewayApis *TransitGatewayApisV1) CreateTransitGatewayConnection(createTransitGatewayConnectionOptions *CreateTransitGatewayConnectionOptions) (result *TransitGatewayConnectionCust, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createTransitGatewayConnectionOptions, "createTransitGatewayConnectionOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createTransitGatewayConnectionOptions, "createTransitGatewayConnectionOptions") + if err != nil { + return + } + + pathSegments := []string{"transit_gateways", "connections"} + pathParameters := []string{*createTransitGatewayConnectionOptions.TransitGatewayID} + + builder := core.NewRequestBuilder(core.POST) + _, err = builder.ConstructHTTPURL(transitGatewayApis.Service.Options.URL, pathSegments, pathParameters) + if err != nil { + return + } + + for headerName, headerValue := range createTransitGatewayConnectionOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("transit_gateway_apis", "V1", "CreateTransitGatewayConnection") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + builder.AddQuery("version", fmt.Sprint(*transitGatewayApis.Version)) + + body := make(map[string]interface{}) + if createTransitGatewayConnectionOptions.NetworkType != nil { + body["network_type"] = createTransitGatewayConnectionOptions.NetworkType + } + if createTransitGatewayConnectionOptions.Name != nil { + body["name"] = createTransitGatewayConnectionOptions.Name + } + if createTransitGatewayConnectionOptions.NetworkID != nil { + body["network_id"] = createTransitGatewayConnectionOptions.NetworkID + } + if createTransitGatewayConnectionOptions.NetworkAccountID != nil { + body["network_account_id"] = createTransitGatewayConnectionOptions.NetworkAccountID + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = transitGatewayApis.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalTransitGatewayConnectionCust) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteTransitGatewayConnection : Remove connection from Transit Gateway +// After the specified connection is detached, entities still within the Transit Gateway will no longer be able to +// communicate directly to it through the IBM Cloud private backbone. +func (transitGatewayApis *TransitGatewayApisV1) DeleteTransitGatewayConnection(deleteTransitGatewayConnectionOptions *DeleteTransitGatewayConnectionOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteTransitGatewayConnectionOptions, "deleteTransitGatewayConnectionOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteTransitGatewayConnectionOptions, "deleteTransitGatewayConnectionOptions") + if err != nil { + return + } + + pathSegments := []string{"transit_gateways", "connections"} + pathParameters := []string{*deleteTransitGatewayConnectionOptions.TransitGatewayID, *deleteTransitGatewayConnectionOptions.ID} + + builder := core.NewRequestBuilder(core.DELETE) + _, err = builder.ConstructHTTPURL(transitGatewayApis.Service.Options.URL, pathSegments, pathParameters) + if err != nil { + return + } + + for headerName, headerValue := range deleteTransitGatewayConnectionOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("transit_gateway_apis", "V1", "DeleteTransitGatewayConnection") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + builder.AddQuery("version", fmt.Sprint(*transitGatewayApis.Version)) + + request, err := builder.Build() + if err != nil { + return + } + + response, err = transitGatewayApis.Service.Request(request, nil) + + return +} + +// GetTransitGatewayConnection : Retrieves specified Transit Gateway connection +// This request retrieves a connection from the Transit Gateway. +func (transitGatewayApis *TransitGatewayApisV1) GetTransitGatewayConnection(getTransitGatewayConnectionOptions *GetTransitGatewayConnectionOptions) (result *TransitGatewayConnectionCust, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getTransitGatewayConnectionOptions, "getTransitGatewayConnectionOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getTransitGatewayConnectionOptions, "getTransitGatewayConnectionOptions") + if err != nil { + return + } + + pathSegments := []string{"transit_gateways", "connections"} + pathParameters := []string{*getTransitGatewayConnectionOptions.TransitGatewayID, *getTransitGatewayConnectionOptions.ID} + + builder := core.NewRequestBuilder(core.GET) + _, err = builder.ConstructHTTPURL(transitGatewayApis.Service.Options.URL, pathSegments, pathParameters) + if err != nil { + return + } + + for headerName, headerValue := range getTransitGatewayConnectionOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("transit_gateway_apis", "V1", "GetTransitGatewayConnection") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*transitGatewayApis.Version)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = transitGatewayApis.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalTransitGatewayConnectionCust) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateTransitGatewayConnection : Updates specified Transit Gateway connection +// Update the name of a connection to a Transit Gateway. +func (transitGatewayApis *TransitGatewayApisV1) UpdateTransitGatewayConnection(updateTransitGatewayConnectionOptions *UpdateTransitGatewayConnectionOptions) (result *TransitGatewayConnectionCust, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateTransitGatewayConnectionOptions, "updateTransitGatewayConnectionOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateTransitGatewayConnectionOptions, "updateTransitGatewayConnectionOptions") + if err != nil { + return + } + + pathSegments := []string{"transit_gateways", "connections"} + pathParameters := []string{*updateTransitGatewayConnectionOptions.TransitGatewayID, *updateTransitGatewayConnectionOptions.ID} + + builder := core.NewRequestBuilder(core.PATCH) + _, err = builder.ConstructHTTPURL(transitGatewayApis.Service.Options.URL, pathSegments, pathParameters) + if err != nil { + return + } + + for headerName, headerValue := range updateTransitGatewayConnectionOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("transit_gateway_apis", "V1", "UpdateTransitGatewayConnection") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + builder.AddQuery("version", fmt.Sprint(*transitGatewayApis.Version)) + + body := make(map[string]interface{}) + if updateTransitGatewayConnectionOptions.Name != nil { + body["name"] = updateTransitGatewayConnectionOptions.Name + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = transitGatewayApis.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalTransitGatewayConnectionCust) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateTransitGatewayConnectionActions : Perform actions on a connection for a Transit Gateway +// Allow a network owner to approve or reject a cross-account connection request. +func (transitGatewayApis *TransitGatewayApisV1) CreateTransitGatewayConnectionActions(createTransitGatewayConnectionActionsOptions *CreateTransitGatewayConnectionActionsOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createTransitGatewayConnectionActionsOptions, "createTransitGatewayConnectionActionsOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createTransitGatewayConnectionActionsOptions, "createTransitGatewayConnectionActionsOptions") + if err != nil { + return + } + + pathSegments := []string{"transit_gateways", "connections", "actions"} + pathParameters := []string{*createTransitGatewayConnectionActionsOptions.TransitGatewayID, *createTransitGatewayConnectionActionsOptions.ID} + + builder := core.NewRequestBuilder(core.POST) + _, err = builder.ConstructHTTPURL(transitGatewayApis.Service.Options.URL, pathSegments, pathParameters) + if err != nil { + return + } + + for headerName, headerValue := range createTransitGatewayConnectionActionsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("transit_gateway_apis", "V1", "CreateTransitGatewayConnectionActions") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Content-Type", "application/json") + + builder.AddQuery("version", fmt.Sprint(*transitGatewayApis.Version)) + + body := make(map[string]interface{}) + if createTransitGatewayConnectionActionsOptions.Action != nil { + body["action"] = createTransitGatewayConnectionActionsOptions.Action + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = transitGatewayApis.Service.Request(request, nil) + + return +} + +// ListGatewayLocations : List all locations that support Transit Gateways +// List all locations that support Transit Gateways. +func (transitGatewayApis *TransitGatewayApisV1) ListGatewayLocations(listGatewayLocationsOptions *ListGatewayLocationsOptions) (result *TSCollection, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listGatewayLocationsOptions, "listGatewayLocationsOptions") + if err != nil { + return + } + + pathSegments := []string{"locations"} + pathParameters := []string{} + + builder := core.NewRequestBuilder(core.GET) + _, err = builder.ConstructHTTPURL(transitGatewayApis.Service.Options.URL, pathSegments, pathParameters) + if err != nil { + return + } + + for headerName, headerValue := range listGatewayLocationsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("transit_gateway_apis", "V1", "ListGatewayLocations") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*transitGatewayApis.Version)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = transitGatewayApis.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalTSCollection) + if err != nil { + return + } + response.Result = result + + return +} + +// GetGatewayLocation : Show the details of a given Transit Gateway location +// Get the details of a Transit Gateway Location. +func (transitGatewayApis *TransitGatewayApisV1) GetGatewayLocation(getGatewayLocationOptions *GetGatewayLocationOptions) (result *TSLocation, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getGatewayLocationOptions, "getGatewayLocationOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getGatewayLocationOptions, "getGatewayLocationOptions") + if err != nil { + return + } + + pathSegments := []string{"locations"} + pathParameters := []string{*getGatewayLocationOptions.Name} + + builder := core.NewRequestBuilder(core.GET) + _, err = builder.ConstructHTTPURL(transitGatewayApis.Service.Options.URL, pathSegments, pathParameters) + if err != nil { + return + } + + for headerName, headerValue := range getGatewayLocationOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("transit_gateway_apis", "V1", "GetGatewayLocation") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*transitGatewayApis.Version)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = transitGatewayApis.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalTSLocation) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateTransitGatewayConnectionActionsOptions : The CreateTransitGatewayConnectionActions options. +type CreateTransitGatewayConnectionActionsOptions struct { + // The Transit Gateway identifier. + TransitGatewayID *string `json:"transit_gateway_id" validate:"required"` + + // The connection identifier. + ID *string `json:"id" validate:"required"` + + // The action that is to be performed against the connection request. + Action *string `json:"action" validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the CreateTransitGatewayConnectionActionsOptions.Action property. +// The action that is to be performed against the connection request. +const ( + CreateTransitGatewayConnectionActionsOptions_Action_Approve = "approve" + CreateTransitGatewayConnectionActionsOptions_Action_Reject = "reject" +) + +// NewCreateTransitGatewayConnectionActionsOptions : Instantiate CreateTransitGatewayConnectionActionsOptions +func (*TransitGatewayApisV1) NewCreateTransitGatewayConnectionActionsOptions(transitGatewayID string, id string, action string) *CreateTransitGatewayConnectionActionsOptions { + return &CreateTransitGatewayConnectionActionsOptions{ + TransitGatewayID: core.StringPtr(transitGatewayID), + ID: core.StringPtr(id), + Action: core.StringPtr(action), + } +} + +// SetTransitGatewayID : Allow user to set TransitGatewayID +func (options *CreateTransitGatewayConnectionActionsOptions) SetTransitGatewayID(transitGatewayID string) *CreateTransitGatewayConnectionActionsOptions { + options.TransitGatewayID = core.StringPtr(transitGatewayID) + return options +} + +// SetID : Allow user to set ID +func (options *CreateTransitGatewayConnectionActionsOptions) SetID(id string) *CreateTransitGatewayConnectionActionsOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetAction : Allow user to set Action +func (options *CreateTransitGatewayConnectionActionsOptions) SetAction(action string) *CreateTransitGatewayConnectionActionsOptions { + options.Action = core.StringPtr(action) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateTransitGatewayConnectionActionsOptions) SetHeaders(param map[string]string) *CreateTransitGatewayConnectionActionsOptions { + options.Headers = param + return options +} + +// CreateTransitGatewayConnectionOptions : The CreateTransitGatewayConnection options. +type CreateTransitGatewayConnectionOptions struct { + // The Transit Gateway identifier. + TransitGatewayID *string `json:"transit_gateway_id" validate:"required"` + + // Defines what type of network is connected via this connection. + NetworkType *string `json:"network_type" validate:"required"` + + // The user-defined name for this transit gateway. If unspecified, the name will be the network name (the name of the + // VPC in the case of network type 'vpc', and the word Classic, in the case of network type 'classic'). + Name *string `json:"name,omitempty"` + + // The ID of the network being connected via this connection. This field is required for some types, such as 'vpc'. For + // network type 'vpc' this is the CRN of the VPC to be connected. This field is required to be unspecified for network + // type 'classic'. + NetworkID *string `json:"network_id,omitempty"` + + // The ID of the account which owns the network that is being connected. Generally only used if the network is in a + // different account than the gateway. + NetworkAccountID *string `json:"network_account_id,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the CreateTransitGatewayConnectionOptions.NetworkType property. +// Defines what type of network is connected via this connection. +const ( + CreateTransitGatewayConnectionOptions_NetworkType_Classic = "classic" + CreateTransitGatewayConnectionOptions_NetworkType_Vpc = "vpc" +) + +// NewCreateTransitGatewayConnectionOptions : Instantiate CreateTransitGatewayConnectionOptions +func (*TransitGatewayApisV1) NewCreateTransitGatewayConnectionOptions(transitGatewayID string, networkType string) *CreateTransitGatewayConnectionOptions { + return &CreateTransitGatewayConnectionOptions{ + TransitGatewayID: core.StringPtr(transitGatewayID), + NetworkType: core.StringPtr(networkType), + } +} + +// SetTransitGatewayID : Allow user to set TransitGatewayID +func (options *CreateTransitGatewayConnectionOptions) SetTransitGatewayID(transitGatewayID string) *CreateTransitGatewayConnectionOptions { + options.TransitGatewayID = core.StringPtr(transitGatewayID) + return options +} + +// SetNetworkType : Allow user to set NetworkType +func (options *CreateTransitGatewayConnectionOptions) SetNetworkType(networkType string) *CreateTransitGatewayConnectionOptions { + options.NetworkType = core.StringPtr(networkType) + return options +} + +// SetName : Allow user to set Name +func (options *CreateTransitGatewayConnectionOptions) SetName(name string) *CreateTransitGatewayConnectionOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetNetworkID : Allow user to set NetworkID +func (options *CreateTransitGatewayConnectionOptions) SetNetworkID(networkID string) *CreateTransitGatewayConnectionOptions { + options.NetworkID = core.StringPtr(networkID) + return options +} + +// SetNetworkAccountID : Allow user to set NetworkAccountID +func (options *CreateTransitGatewayConnectionOptions) SetNetworkAccountID(networkAccountID string) *CreateTransitGatewayConnectionOptions { + options.NetworkAccountID = core.StringPtr(networkAccountID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateTransitGatewayConnectionOptions) SetHeaders(param map[string]string) *CreateTransitGatewayConnectionOptions { + options.Headers = param + return options +} + +// CreateTransitGatewayOptions : The CreateTransitGateway options. +type CreateTransitGatewayOptions struct { + // Location of Transit Gateway Services. + Location *string `json:"location" validate:"required"` + + // Name Transit Gateway Services. + Name *string `json:"name" validate:"required"` + + // Allow global routing for a Transit Gateway. If unspecified, the default value is false. + Global *bool `json:"global,omitempty"` + + // The resource group to use. If unspecified, the account's [default resource + // group](https://console.bluemix.net/apidocs/resource-manager#introduction) is used. + ResourceGroup *ResourceGroupIdentity `json:"resource_group,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewCreateTransitGatewayOptions : Instantiate CreateTransitGatewayOptions +func (*TransitGatewayApisV1) NewCreateTransitGatewayOptions(location string, name string) *CreateTransitGatewayOptions { + return &CreateTransitGatewayOptions{ + Location: core.StringPtr(location), + Name: core.StringPtr(name), + } +} + +// SetLocation : Allow user to set Location +func (options *CreateTransitGatewayOptions) SetLocation(location string) *CreateTransitGatewayOptions { + options.Location = core.StringPtr(location) + return options +} + +// SetName : Allow user to set Name +func (options *CreateTransitGatewayOptions) SetName(name string) *CreateTransitGatewayOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetGlobal : Allow user to set Global +func (options *CreateTransitGatewayOptions) SetGlobal(global bool) *CreateTransitGatewayOptions { + options.Global = core.BoolPtr(global) + return options +} + +// SetResourceGroup : Allow user to set ResourceGroup +func (options *CreateTransitGatewayOptions) SetResourceGroup(resourceGroup *ResourceGroupIdentity) *CreateTransitGatewayOptions { + options.ResourceGroup = resourceGroup + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateTransitGatewayOptions) SetHeaders(param map[string]string) *CreateTransitGatewayOptions { + options.Headers = param + return options +} + +// DeleteTransitGatewayConnectionOptions : The DeleteTransitGatewayConnection options. +type DeleteTransitGatewayConnectionOptions struct { + // The Transit Gateway identifier. + TransitGatewayID *string `json:"transit_gateway_id" validate:"required"` + + // The connection identifier. + ID *string `json:"id" validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteTransitGatewayConnectionOptions : Instantiate DeleteTransitGatewayConnectionOptions +func (*TransitGatewayApisV1) NewDeleteTransitGatewayConnectionOptions(transitGatewayID string, id string) *DeleteTransitGatewayConnectionOptions { + return &DeleteTransitGatewayConnectionOptions{ + TransitGatewayID: core.StringPtr(transitGatewayID), + ID: core.StringPtr(id), + } +} + +// SetTransitGatewayID : Allow user to set TransitGatewayID +func (options *DeleteTransitGatewayConnectionOptions) SetTransitGatewayID(transitGatewayID string) *DeleteTransitGatewayConnectionOptions { + options.TransitGatewayID = core.StringPtr(transitGatewayID) + return options +} + +// SetID : Allow user to set ID +func (options *DeleteTransitGatewayConnectionOptions) SetID(id string) *DeleteTransitGatewayConnectionOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteTransitGatewayConnectionOptions) SetHeaders(param map[string]string) *DeleteTransitGatewayConnectionOptions { + options.Headers = param + return options +} + +// DeleteTransitGatewayOptions : The DeleteTransitGateway options. +type DeleteTransitGatewayOptions struct { + // The Transit Gateway identifier. + ID *string `json:"id" validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteTransitGatewayOptions : Instantiate DeleteTransitGatewayOptions +func (*TransitGatewayApisV1) NewDeleteTransitGatewayOptions(id string) *DeleteTransitGatewayOptions { + return &DeleteTransitGatewayOptions{ + ID: core.StringPtr(id), + } +} + +// SetID : Allow user to set ID +func (options *DeleteTransitGatewayOptions) SetID(id string) *DeleteTransitGatewayOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteTransitGatewayOptions) SetHeaders(param map[string]string) *DeleteTransitGatewayOptions { + options.Headers = param + return options +} + +// GetGatewayLocationOptions : The GetGatewayLocation options. +type GetGatewayLocationOptions struct { + // The Transit Gateway location Name. + Name *string `json:"name" validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetGatewayLocationOptions : Instantiate GetGatewayLocationOptions +func (*TransitGatewayApisV1) NewGetGatewayLocationOptions(name string) *GetGatewayLocationOptions { + return &GetGatewayLocationOptions{ + Name: core.StringPtr(name), + } +} + +// SetName : Allow user to set Name +func (options *GetGatewayLocationOptions) SetName(name string) *GetGatewayLocationOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetGatewayLocationOptions) SetHeaders(param map[string]string) *GetGatewayLocationOptions { + options.Headers = param + return options +} + +// GetTransitGatewayConnectionOptions : The GetTransitGatewayConnection options. +type GetTransitGatewayConnectionOptions struct { + // The Transit Gateway identifier. + TransitGatewayID *string `json:"transit_gateway_id" validate:"required"` + + // The connection identifier. + ID *string `json:"id" validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetTransitGatewayConnectionOptions : Instantiate GetTransitGatewayConnectionOptions +func (*TransitGatewayApisV1) NewGetTransitGatewayConnectionOptions(transitGatewayID string, id string) *GetTransitGatewayConnectionOptions { + return &GetTransitGatewayConnectionOptions{ + TransitGatewayID: core.StringPtr(transitGatewayID), + ID: core.StringPtr(id), + } +} + +// SetTransitGatewayID : Allow user to set TransitGatewayID +func (options *GetTransitGatewayConnectionOptions) SetTransitGatewayID(transitGatewayID string) *GetTransitGatewayConnectionOptions { + options.TransitGatewayID = core.StringPtr(transitGatewayID) + return options +} + +// SetID : Allow user to set ID +func (options *GetTransitGatewayConnectionOptions) SetID(id string) *GetTransitGatewayConnectionOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetTransitGatewayConnectionOptions) SetHeaders(param map[string]string) *GetTransitGatewayConnectionOptions { + options.Headers = param + return options +} + +// GetTransitGatewayOptions : The GetTransitGateway options. +type GetTransitGatewayOptions struct { + // The Transit Gateway identifier. + ID *string `json:"id" validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetTransitGatewayOptions : Instantiate GetTransitGatewayOptions +func (*TransitGatewayApisV1) NewGetTransitGatewayOptions(id string) *GetTransitGatewayOptions { + return &GetTransitGatewayOptions{ + ID: core.StringPtr(id), + } +} + +// SetID : Allow user to set ID +func (options *GetTransitGatewayOptions) SetID(id string) *GetTransitGatewayOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetTransitGatewayOptions) SetHeaders(param map[string]string) *GetTransitGatewayOptions { + options.Headers = param + return options +} + +// ListGatewayLocationsOptions : The ListGatewayLocations options. +type ListGatewayLocationsOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListGatewayLocationsOptions : Instantiate ListGatewayLocationsOptions +func (*TransitGatewayApisV1) NewListGatewayLocationsOptions() *ListGatewayLocationsOptions { + return &ListGatewayLocationsOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *ListGatewayLocationsOptions) SetHeaders(param map[string]string) *ListGatewayLocationsOptions { + options.Headers = param + return options +} + +// ListTransitGatewayConnectionsOptions : The ListTransitGatewayConnections options. +type ListTransitGatewayConnectionsOptions struct { + // The Transit Gateway identifier. + TransitGatewayID *string `json:"transit_gateway_id" validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListTransitGatewayConnectionsOptions : Instantiate ListTransitGatewayConnectionsOptions +func (*TransitGatewayApisV1) NewListTransitGatewayConnectionsOptions(transitGatewayID string) *ListTransitGatewayConnectionsOptions { + return &ListTransitGatewayConnectionsOptions{ + TransitGatewayID: core.StringPtr(transitGatewayID), + } +} + +// SetTransitGatewayID : Allow user to set TransitGatewayID +func (options *ListTransitGatewayConnectionsOptions) SetTransitGatewayID(transitGatewayID string) *ListTransitGatewayConnectionsOptions { + options.TransitGatewayID = core.StringPtr(transitGatewayID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListTransitGatewayConnectionsOptions) SetHeaders(param map[string]string) *ListTransitGatewayConnectionsOptions { + options.Headers = param + return options +} + +// ListTransitGatewaysOptions : The ListTransitGateways options. +type ListTransitGatewaysOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListTransitGatewaysOptions : Instantiate ListTransitGatewaysOptions +func (*TransitGatewayApisV1) NewListTransitGatewaysOptions() *ListTransitGatewaysOptions { + return &ListTransitGatewaysOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *ListTransitGatewaysOptions) SetHeaders(param map[string]string) *ListTransitGatewaysOptions { + options.Headers = param + return options +} + +// ResourceGroupIdentity : The resource group to use. If unspecified, the account's [default resource +// group](https://console.bluemix.net/apidocs/resource-manager#introduction) is used. +type ResourceGroupIdentity struct { + // The unique identifier for this resource group. + ID *string `json:"id" validate:"required"` +} + + +// NewResourceGroupIdentity : Instantiate ResourceGroupIdentity (Generic Model Constructor) +func (*TransitGatewayApisV1) NewResourceGroupIdentity(id string) (model *ResourceGroupIdentity, err error) { + model = &ResourceGroupIdentity{ + ID: core.StringPtr(id), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalResourceGroupIdentity unmarshals an instance of ResourceGroupIdentity from the specified map of raw messages. +func UnmarshalResourceGroupIdentity(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ResourceGroupIdentity) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ResourceGroupReference : The resource group to use. If unspecified, the account's [default resource +// group](https://console.bluemix.net/apidocs/resource-manager#introduction) is used. +type ResourceGroupReference struct { + // The unique identifier for this resource group. + ID *string `json:"id" validate:"required"` + + // The URL for this resource group. + Href *string `json:"href" validate:"required"` +} + + +// UnmarshalResourceGroupReference unmarshals an instance of ResourceGroupReference from the specified map of raw messages. +func UnmarshalResourceGroupReference(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ResourceGroupReference) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// TSCollection : A list of Transit Gateway locations. +type TSCollection struct { + // Collection of Transit Gateway locations. + Locations []TSLocationBasic `json:"locations" validate:"required"` +} + + +// UnmarshalTSCollection unmarshals an instance of TSCollection from the specified map of raw messages. +func UnmarshalTSCollection(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(TSCollection) + err = core.UnmarshalModel(m, "locations", &obj.Locations, UnmarshalTSLocationBasic) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// TSLocalLocation : Details of a local connection location. +type TSLocalLocation struct { + // A descriptive display name for the location. + DisplayName *string `json:"display_name" validate:"required"` + + // The name of the location. + Name *string `json:"name" validate:"required"` + + // The type of the location, determining is this a multi-zone region, a single data center, or a point of presence. + Type *string `json:"type" validate:"required"` +} + +// Constants associated with the TSLocalLocation.Type property. +// The type of the location, determining is this a multi-zone region, a single data center, or a point of presence. +const ( + TSLocalLocation_Type_Region = "region" +) + + +// UnmarshalTSLocalLocation unmarshals an instance of TSLocalLocation from the specified map of raw messages. +func UnmarshalTSLocalLocation(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(TSLocalLocation) + err = core.UnmarshalPrimitive(m, "display_name", &obj.DisplayName) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// TSLocation : Details of a Transit Gateway location. +type TSLocation struct { + // The geographical location of this location, used for billing purposes. + BillingLocation *string `json:"billing_location" validate:"required"` + + // Name of the Location. + Name *string `json:"name" validate:"required"` + + // The type of the location, determining is this a multi-zone region, a single data center, or a point of presence. + Type *string `json:"type" validate:"required"` + + // The set of network locations that are considered local for this Transit Gateway location. + LocalConnectionLocations []TSLocalLocation `json:"local_connection_locations" validate:"required"` +} + + +// UnmarshalTSLocation unmarshals an instance of TSLocation from the specified map of raw messages. +func UnmarshalTSLocation(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(TSLocation) + err = core.UnmarshalPrimitive(m, "billing_location", &obj.BillingLocation) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + err = core.UnmarshalModel(m, "local_connection_locations", &obj.LocalConnectionLocations, UnmarshalTSLocalLocation) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// TSLocationBasic : Details of a Transit Gateway location. +type TSLocationBasic struct { + // The geographical location of this location, used for billing purposes. + BillingLocation *string `json:"billing_location" validate:"required"` + + // Name of the Location. + Name *string `json:"name" validate:"required"` + + // The type of the location, determining is this a multi-zone region, a single data center, or a point of presence. + Type *string `json:"type" validate:"required"` +} + + +// UnmarshalTSLocationBasic unmarshals an instance of TSLocationBasic from the specified map of raw messages. +func UnmarshalTSLocationBasic(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(TSLocationBasic) + err = core.UnmarshalPrimitive(m, "billing_location", &obj.BillingLocation) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// TransitGateway : Details of a Transit Gateway. +type TransitGateway struct { + // The unique identifier for this Transit Gateway. + ID *string `json:"id" validate:"required"` + + // The CRN for this Transit Gateway. + Crn *string `json:"crn" validate:"required"` + + // A human readable name for the transit gateway. + Name *string `json:"name" validate:"required"` + + // Location of Transit Gateway Services. + Location *string `json:"location" validate:"required"` + + // The date and time that this gateway was created. + CreatedAt *strfmt.DateTime `json:"created_at" validate:"required"` + + // Allow global routing for a Transit Gateway. + Global *bool `json:"global" validate:"required"` + + // The resource group to use. If unspecified, the account's [default resource + // group](https://console.bluemix.net/apidocs/resource-manager#introduction) is used. + ResourceGroup *ResourceGroupReference `json:"resource_group,omitempty"` + + // The status of the Transit Gateway. + Status *string `json:"status" validate:"required"` + + // The date and time that this gateway was last updated. + UpdatedAt *strfmt.DateTime `json:"updated_at,omitempty"` +} + +// Constants associated with the TransitGateway.Status property. +// The status of the Transit Gateway. +const ( + TransitGateway_Status_Available = "available" + TransitGateway_Status_Deleting = "deleting" + TransitGateway_Status_Failed = "failed" + TransitGateway_Status_Pending = "pending" +) + + +// UnmarshalTransitGateway unmarshals an instance of TransitGateway from the specified map of raw messages. +func UnmarshalTransitGateway(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(TransitGateway) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.Crn) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "location", &obj.Location) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "global", &obj.Global) + if err != nil { + return + } + err = core.UnmarshalModel(m, "resource_group", &obj.ResourceGroup, UnmarshalResourceGroupReference) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "status", &obj.Status) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "updated_at", &obj.UpdatedAt) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// TransitGatewayCollection : A list of Transit Gateways. +type TransitGatewayCollection struct { + // Collection of Transit Services gateways. + TransitGateways []TransitGateway `json:"transit_gateways" validate:"required"` +} + + +// UnmarshalTransitGatewayCollection unmarshals an instance of TransitGatewayCollection from the specified map of raw messages. +func UnmarshalTransitGatewayCollection(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(TransitGatewayCollection) + err = core.UnmarshalModel(m, "transit_gateways", &obj.TransitGateways, UnmarshalTransitGateway) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// TransitGatewayConnectionCollection : A set of Transit Gateway network connections. +type TransitGatewayConnectionCollection struct { + // Array of transit gateways network Connections. + Connections []TransitGatewayConnectionCust `json:"connections" validate:"required"` +} + + +// UnmarshalTransitGatewayConnectionCollection unmarshals an instance of TransitGatewayConnectionCollection from the specified map of raw messages. +func UnmarshalTransitGatewayConnectionCollection(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(TransitGatewayConnectionCollection) + err = core.UnmarshalModel(m, "connections", &obj.Connections, UnmarshalTransitGatewayConnectionCust) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// TransitGatewayConnectionCust : Connection included in transit gateway. +type TransitGatewayConnectionCust struct { + // The user-defined name for this transit gateway. If unspecified, the name will be the network name (the name of the + // VPC in the case of network type 'vpc', and the word Classic, in the case of network type 'classic'). + Name *string `json:"name,omitempty"` + + // The ID of the network being connected via this connection. This field is required for some types, such as 'vpc'. For + // network type 'vpc' this is the CRN of the VPC to be connected. This field is required to be unspecified for network + // type 'classic'. + NetworkID *string `json:"network_id,omitempty"` + + // Defines what type of network is connected via this connection. + NetworkType *string `json:"network_type" validate:"required"` + + // The ID of the account which owns the network that is being connected. Generally only used if the network is in a + // different account than the gateway. + NetworkAccountID *string `json:"network_account_id,omitempty"` + + // The unique identifier for this Transit Gateway Connection to Network (vpc/classic). + ID *string `json:"id" validate:"required"` + + // The date and time that this connection was created. + CreatedAt *strfmt.DateTime `json:"created_at" validate:"required"` + + // Only visible for cross account connections, this field represents the status of the request to connect the given + // network between accounts. + RequestStatus *string `json:"request_status,omitempty"` + + // What is the current configuration state of this connection. + Status *string `json:"status,omitempty"` + + // The date and time that this connection was last updated. + UpdatedAt *strfmt.DateTime `json:"updated_at,omitempty"` +} + +// Constants associated with the TransitGatewayConnectionCust.NetworkType property. +// Defines what type of network is connected via this connection. +const ( + TransitGatewayConnectionCust_NetworkType_Classic = "classic" + TransitGatewayConnectionCust_NetworkType_Vpc = "vpc" +) + +// Constants associated with the TransitGatewayConnectionCust.RequestStatus property. +// Only visible for cross account connections, this field represents the status of the request to connect the given +// network between accounts. +const ( + TransitGatewayConnectionCust_RequestStatus_Approved = "approved" + TransitGatewayConnectionCust_RequestStatus_Detached = "detached" + TransitGatewayConnectionCust_RequestStatus_Expired = "expired" + TransitGatewayConnectionCust_RequestStatus_Pending = "pending" + TransitGatewayConnectionCust_RequestStatus_Rejected = "rejected" +) + +// Constants associated with the TransitGatewayConnectionCust.Status property. +// What is the current configuration state of this connection. +const ( + TransitGatewayConnectionCust_Status_Attached = "attached" + TransitGatewayConnectionCust_Status_Deleting = "deleting" + TransitGatewayConnectionCust_Status_Detached = "detached" + TransitGatewayConnectionCust_Status_Detaching = "detaching" + TransitGatewayConnectionCust_Status_Failed = "failed" + TransitGatewayConnectionCust_Status_Pending = "pending" +) + + +// UnmarshalTransitGatewayConnectionCust unmarshals an instance of TransitGatewayConnectionCust from the specified map of raw messages. +func UnmarshalTransitGatewayConnectionCust(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(TransitGatewayConnectionCust) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "network_id", &obj.NetworkID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "network_type", &obj.NetworkType) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "network_account_id", &obj.NetworkAccountID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "request_status", &obj.RequestStatus) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "status", &obj.Status) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "updated_at", &obj.UpdatedAt) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// UpdateTransitGatewayConnectionOptions : The UpdateTransitGatewayConnection options. +type UpdateTransitGatewayConnectionOptions struct { + // The Transit Gateway identifier. + TransitGatewayID *string `json:"transit_gateway_id" validate:"required"` + + // The connection identifier. + ID *string `json:"id" validate:"required"` + + // The user-defined name for this transit gateway. If specified as empty string or nil, the name will be the network + // name (the name of the VPC in the case of network type 'vpc', and the word Classic, in the case of network type + // 'classic'). + Name *string `json:"name,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdateTransitGatewayConnectionOptions : Instantiate UpdateTransitGatewayConnectionOptions +func (*TransitGatewayApisV1) NewUpdateTransitGatewayConnectionOptions(transitGatewayID string, id string) *UpdateTransitGatewayConnectionOptions { + return &UpdateTransitGatewayConnectionOptions{ + TransitGatewayID: core.StringPtr(transitGatewayID), + ID: core.StringPtr(id), + } +} + +// SetTransitGatewayID : Allow user to set TransitGatewayID +func (options *UpdateTransitGatewayConnectionOptions) SetTransitGatewayID(transitGatewayID string) *UpdateTransitGatewayConnectionOptions { + options.TransitGatewayID = core.StringPtr(transitGatewayID) + return options +} + +// SetID : Allow user to set ID +func (options *UpdateTransitGatewayConnectionOptions) SetID(id string) *UpdateTransitGatewayConnectionOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetName : Allow user to set Name +func (options *UpdateTransitGatewayConnectionOptions) SetName(name string) *UpdateTransitGatewayConnectionOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateTransitGatewayConnectionOptions) SetHeaders(param map[string]string) *UpdateTransitGatewayConnectionOptions { + options.Headers = param + return options +} + +// UpdateTransitGatewayOptions : The UpdateTransitGateway options. +type UpdateTransitGatewayOptions struct { + // The Transit Gateway identifier. + ID *string `json:"id" validate:"required"` + + // Allow global routing for a Transit Gateway. + Global *bool `json:"global,omitempty"` + + // The user-defined name for this transit gateway. + Name *string `json:"name,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdateTransitGatewayOptions : Instantiate UpdateTransitGatewayOptions +func (*TransitGatewayApisV1) NewUpdateTransitGatewayOptions(id string) *UpdateTransitGatewayOptions { + return &UpdateTransitGatewayOptions{ + ID: core.StringPtr(id), + } +} + +// SetID : Allow user to set ID +func (options *UpdateTransitGatewayOptions) SetID(id string) *UpdateTransitGatewayOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetGlobal : Allow user to set Global +func (options *UpdateTransitGatewayOptions) SetGlobal(global bool) *UpdateTransitGatewayOptions { + options.Global = core.BoolPtr(global) + return options +} + +// SetName : Allow user to set Name +func (options *UpdateTransitGatewayOptions) SetName(name string) *UpdateTransitGatewayOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateTransitGatewayOptions) SetHeaders(param map[string]string) *UpdateTransitGatewayOptions { + options.Headers = param + return options +} diff --git a/vendor/github.com/IBM/networking-go-sdk/useragentblockingrulesv1/user_agent_blocking_rules_v1.go b/vendor/github.com/IBM/networking-go-sdk/useragentblockingrulesv1/user_agent_blocking_rules_v1.go new file mode 100644 index 00000000000..ec4a83e9043 --- /dev/null +++ b/vendor/github.com/IBM/networking-go-sdk/useragentblockingrulesv1/user_agent_blocking_rules_v1.go @@ -0,0 +1,1045 @@ +/** + * (C) Copyright IBM Corp. 2020. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * IBM OpenAPI SDK Code Generator Version: 3.20.0-debb9f29-20201203-202043 + */ + + +// Package useragentblockingrulesv1 : Operations and models for the UserAgentBlockingRulesV1 service +package useragentblockingrulesv1 + +import ( + "context" + "encoding/json" + "fmt" + "github.com/IBM/go-sdk-core/v4/core" + common "github.com/IBM/networking-go-sdk/common" + "net/http" + "reflect" + "time" +) + +// UserAgentBlockingRulesV1 : User-Agent Blocking Rules +// +// Version: 1.0.1 +type UserAgentBlockingRulesV1 struct { + Service *core.BaseService + + // Full crn of the service instance. + Crn *string + + // Zone identifier (zone id). + ZoneIdentifier *string +} + +// DefaultServiceURL is the default URL to make service requests to. +const DefaultServiceURL = "https://api.cis.cloud.ibm.com" + +// DefaultServiceName is the default key used to find external configuration information. +const DefaultServiceName = "user_agent_blocking_rules" + +// UserAgentBlockingRulesV1Options : Service options +type UserAgentBlockingRulesV1Options struct { + ServiceName string + URL string + Authenticator core.Authenticator + + // Full crn of the service instance. + Crn *string `validate:"required"` + + // Zone identifier (zone id). + ZoneIdentifier *string `validate:"required"` +} + +// NewUserAgentBlockingRulesV1UsingExternalConfig : constructs an instance of UserAgentBlockingRulesV1 with passed in options and external configuration. +func NewUserAgentBlockingRulesV1UsingExternalConfig(options *UserAgentBlockingRulesV1Options) (userAgentBlockingRules *UserAgentBlockingRulesV1, err error) { + if options.ServiceName == "" { + options.ServiceName = DefaultServiceName + } + + if options.Authenticator == nil { + options.Authenticator, err = core.GetAuthenticatorFromEnvironment(options.ServiceName) + if err != nil { + return + } + } + + userAgentBlockingRules, err = NewUserAgentBlockingRulesV1(options) + if err != nil { + return + } + + err = userAgentBlockingRules.Service.ConfigureService(options.ServiceName) + if err != nil { + return + } + + if options.URL != "" { + err = userAgentBlockingRules.Service.SetServiceURL(options.URL) + } + return +} + +// NewUserAgentBlockingRulesV1 : constructs an instance of UserAgentBlockingRulesV1 with passed in options. +func NewUserAgentBlockingRulesV1(options *UserAgentBlockingRulesV1Options) (service *UserAgentBlockingRulesV1, err error) { + serviceOptions := &core.ServiceOptions{ + URL: DefaultServiceURL, + Authenticator: options.Authenticator, + } + + err = core.ValidateStruct(options, "options") + if err != nil { + return + } + + baseService, err := core.NewBaseService(serviceOptions) + if err != nil { + return + } + + if options.URL != "" { + err = baseService.SetServiceURL(options.URL) + if err != nil { + return + } + } + + service = &UserAgentBlockingRulesV1{ + Service: baseService, + Crn: options.Crn, + ZoneIdentifier: options.ZoneIdentifier, + } + + return +} + +// GetServiceURLForRegion returns the service URL to be used for the specified region +func GetServiceURLForRegion(region string) (string, error) { + return "", fmt.Errorf("service does not support regional URLs") +} + +// Clone makes a copy of "userAgentBlockingRules" suitable for processing requests. +func (userAgentBlockingRules *UserAgentBlockingRulesV1) Clone() *UserAgentBlockingRulesV1 { + if core.IsNil(userAgentBlockingRules) { + return nil + } + clone := *userAgentBlockingRules + clone.Service = userAgentBlockingRules.Service.Clone() + return &clone +} + +// SetServiceURL sets the service URL +func (userAgentBlockingRules *UserAgentBlockingRulesV1) SetServiceURL(url string) error { + return userAgentBlockingRules.Service.SetServiceURL(url) +} + +// GetServiceURL returns the service URL +func (userAgentBlockingRules *UserAgentBlockingRulesV1) GetServiceURL() string { + return userAgentBlockingRules.Service.GetServiceURL() +} + +// SetDefaultHeaders sets HTTP headers to be sent in every request +func (userAgentBlockingRules *UserAgentBlockingRulesV1) SetDefaultHeaders(headers http.Header) { + userAgentBlockingRules.Service.SetDefaultHeaders(headers) +} + +// SetEnableGzipCompression sets the service's EnableGzipCompression field +func (userAgentBlockingRules *UserAgentBlockingRulesV1) SetEnableGzipCompression(enableGzip bool) { + userAgentBlockingRules.Service.SetEnableGzipCompression(enableGzip) +} + +// GetEnableGzipCompression returns the service's EnableGzipCompression field +func (userAgentBlockingRules *UserAgentBlockingRulesV1) GetEnableGzipCompression() bool { + return userAgentBlockingRules.Service.GetEnableGzipCompression() +} + +// EnableRetries enables automatic retries for requests invoked for this service instance. +// If either parameter is specified as 0, then a default value is used instead. +func (userAgentBlockingRules *UserAgentBlockingRulesV1) EnableRetries(maxRetries int, maxRetryInterval time.Duration) { + userAgentBlockingRules.Service.EnableRetries(maxRetries, maxRetryInterval) +} + +// DisableRetries disables automatic retries for requests invoked for this service instance. +func (userAgentBlockingRules *UserAgentBlockingRulesV1) DisableRetries() { + userAgentBlockingRules.Service.DisableRetries() +} + +// ListAllZoneUserAgentRules : List all user-agent blocking rules +// List all user agent blocking rules. +func (userAgentBlockingRules *UserAgentBlockingRulesV1) ListAllZoneUserAgentRules(listAllZoneUserAgentRulesOptions *ListAllZoneUserAgentRulesOptions) (result *ListUseragentRulesResp, response *core.DetailedResponse, err error) { + return userAgentBlockingRules.ListAllZoneUserAgentRulesWithContext(context.Background(), listAllZoneUserAgentRulesOptions) +} + +// ListAllZoneUserAgentRulesWithContext is an alternate form of the ListAllZoneUserAgentRules method which supports a Context parameter +func (userAgentBlockingRules *UserAgentBlockingRulesV1) ListAllZoneUserAgentRulesWithContext(ctx context.Context, listAllZoneUserAgentRulesOptions *ListAllZoneUserAgentRulesOptions) (result *ListUseragentRulesResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listAllZoneUserAgentRulesOptions, "listAllZoneUserAgentRulesOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *userAgentBlockingRules.Crn, + "zone_identifier": *userAgentBlockingRules.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = userAgentBlockingRules.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(userAgentBlockingRules.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/firewall/ua_rules`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range listAllZoneUserAgentRulesOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("user_agent_blocking_rules", "V1", "ListAllZoneUserAgentRules") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + if listAllZoneUserAgentRulesOptions.Page != nil { + builder.AddQuery("page", fmt.Sprint(*listAllZoneUserAgentRulesOptions.Page)) + } + if listAllZoneUserAgentRulesOptions.PerPage != nil { + builder.AddQuery("per_page", fmt.Sprint(*listAllZoneUserAgentRulesOptions.PerPage)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = userAgentBlockingRules.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalListUseragentRulesResp) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateZoneUserAgentRule : Create user-agent blocking rule +// Create a new user-agent blocking rule for a given zone under a service instance. +func (userAgentBlockingRules *UserAgentBlockingRulesV1) CreateZoneUserAgentRule(createZoneUserAgentRuleOptions *CreateZoneUserAgentRuleOptions) (result *UseragentRuleResp, response *core.DetailedResponse, err error) { + return userAgentBlockingRules.CreateZoneUserAgentRuleWithContext(context.Background(), createZoneUserAgentRuleOptions) +} + +// CreateZoneUserAgentRuleWithContext is an alternate form of the CreateZoneUserAgentRule method which supports a Context parameter +func (userAgentBlockingRules *UserAgentBlockingRulesV1) CreateZoneUserAgentRuleWithContext(ctx context.Context, createZoneUserAgentRuleOptions *CreateZoneUserAgentRuleOptions) (result *UseragentRuleResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(createZoneUserAgentRuleOptions, "createZoneUserAgentRuleOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *userAgentBlockingRules.Crn, + "zone_identifier": *userAgentBlockingRules.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = userAgentBlockingRules.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(userAgentBlockingRules.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/firewall/ua_rules`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range createZoneUserAgentRuleOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("user_agent_blocking_rules", "V1", "CreateZoneUserAgentRule") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if createZoneUserAgentRuleOptions.Paused != nil { + body["paused"] = createZoneUserAgentRuleOptions.Paused + } + if createZoneUserAgentRuleOptions.Description != nil { + body["description"] = createZoneUserAgentRuleOptions.Description + } + if createZoneUserAgentRuleOptions.Mode != nil { + body["mode"] = createZoneUserAgentRuleOptions.Mode + } + if createZoneUserAgentRuleOptions.Configuration != nil { + body["configuration"] = createZoneUserAgentRuleOptions.Configuration + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = userAgentBlockingRules.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalUseragentRuleResp) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteZoneUserAgentRule : Delete user-agent blocking rule +// Delete a user-agent blocking rule for a particular zone, given its id. +func (userAgentBlockingRules *UserAgentBlockingRulesV1) DeleteZoneUserAgentRule(deleteZoneUserAgentRuleOptions *DeleteZoneUserAgentRuleOptions) (result *DeleteUseragentRuleResp, response *core.DetailedResponse, err error) { + return userAgentBlockingRules.DeleteZoneUserAgentRuleWithContext(context.Background(), deleteZoneUserAgentRuleOptions) +} + +// DeleteZoneUserAgentRuleWithContext is an alternate form of the DeleteZoneUserAgentRule method which supports a Context parameter +func (userAgentBlockingRules *UserAgentBlockingRulesV1) DeleteZoneUserAgentRuleWithContext(ctx context.Context, deleteZoneUserAgentRuleOptions *DeleteZoneUserAgentRuleOptions) (result *DeleteUseragentRuleResp, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteZoneUserAgentRuleOptions, "deleteZoneUserAgentRuleOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteZoneUserAgentRuleOptions, "deleteZoneUserAgentRuleOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *userAgentBlockingRules.Crn, + "zone_identifier": *userAgentBlockingRules.ZoneIdentifier, + "useragent_rule_identifier": *deleteZoneUserAgentRuleOptions.UseragentRuleIdentifier, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = userAgentBlockingRules.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(userAgentBlockingRules.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/firewall/ua_rules/{useragent_rule_identifier}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteZoneUserAgentRuleOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("user_agent_blocking_rules", "V1", "DeleteZoneUserAgentRule") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = userAgentBlockingRules.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalDeleteUseragentRuleResp) + if err != nil { + return + } + response.Result = result + + return +} + +// GetUserAgentRule : Get user-agent blocking rule +// For a given service instance, zone id and user-agent rule id, get the user-agent blocking rule details. +func (userAgentBlockingRules *UserAgentBlockingRulesV1) GetUserAgentRule(getUserAgentRuleOptions *GetUserAgentRuleOptions) (result *UseragentRuleResp, response *core.DetailedResponse, err error) { + return userAgentBlockingRules.GetUserAgentRuleWithContext(context.Background(), getUserAgentRuleOptions) +} + +// GetUserAgentRuleWithContext is an alternate form of the GetUserAgentRule method which supports a Context parameter +func (userAgentBlockingRules *UserAgentBlockingRulesV1) GetUserAgentRuleWithContext(ctx context.Context, getUserAgentRuleOptions *GetUserAgentRuleOptions) (result *UseragentRuleResp, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getUserAgentRuleOptions, "getUserAgentRuleOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getUserAgentRuleOptions, "getUserAgentRuleOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *userAgentBlockingRules.Crn, + "zone_identifier": *userAgentBlockingRules.ZoneIdentifier, + "useragent_rule_identifier": *getUserAgentRuleOptions.UseragentRuleIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = userAgentBlockingRules.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(userAgentBlockingRules.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/firewall/ua_rules/{useragent_rule_identifier}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getUserAgentRuleOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("user_agent_blocking_rules", "V1", "GetUserAgentRule") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = userAgentBlockingRules.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalUseragentRuleResp) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateUserAgentRule : Update user-agent blocking rule +// Update an existing user-agent blocking rule for a given zone under a given service instance. +func (userAgentBlockingRules *UserAgentBlockingRulesV1) UpdateUserAgentRule(updateUserAgentRuleOptions *UpdateUserAgentRuleOptions) (result *UseragentRuleResp, response *core.DetailedResponse, err error) { + return userAgentBlockingRules.UpdateUserAgentRuleWithContext(context.Background(), updateUserAgentRuleOptions) +} + +// UpdateUserAgentRuleWithContext is an alternate form of the UpdateUserAgentRule method which supports a Context parameter +func (userAgentBlockingRules *UserAgentBlockingRulesV1) UpdateUserAgentRuleWithContext(ctx context.Context, updateUserAgentRuleOptions *UpdateUserAgentRuleOptions) (result *UseragentRuleResp, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateUserAgentRuleOptions, "updateUserAgentRuleOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateUserAgentRuleOptions, "updateUserAgentRuleOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *userAgentBlockingRules.Crn, + "zone_identifier": *userAgentBlockingRules.ZoneIdentifier, + "useragent_rule_identifier": *updateUserAgentRuleOptions.UseragentRuleIdentifier, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = userAgentBlockingRules.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(userAgentBlockingRules.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/firewall/ua_rules/{useragent_rule_identifier}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateUserAgentRuleOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("user_agent_blocking_rules", "V1", "UpdateUserAgentRule") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if updateUserAgentRuleOptions.Paused != nil { + body["paused"] = updateUserAgentRuleOptions.Paused + } + if updateUserAgentRuleOptions.Description != nil { + body["description"] = updateUserAgentRuleOptions.Description + } + if updateUserAgentRuleOptions.Mode != nil { + body["mode"] = updateUserAgentRuleOptions.Mode + } + if updateUserAgentRuleOptions.Configuration != nil { + body["configuration"] = updateUserAgentRuleOptions.Configuration + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = userAgentBlockingRules.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalUseragentRuleResp) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateZoneUserAgentRuleOptions : The CreateZoneUserAgentRule options. +type CreateZoneUserAgentRuleOptions struct { + // Whether this user-agent rule is currently disabled. + Paused *bool `json:"paused,omitempty"` + + // Some useful information about this rule to help identify the purpose of it. + Description *string `json:"description,omitempty"` + + // The type of action to perform. + Mode *string `json:"mode,omitempty"` + + // Target/Value pair to use for this rule. The value is the exact UserAgent to match. + Configuration *UseragentRuleInputConfiguration `json:"configuration,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the CreateZoneUserAgentRuleOptions.Mode property. +// The type of action to perform. +const ( + CreateZoneUserAgentRuleOptions_Mode_Block = "block" + CreateZoneUserAgentRuleOptions_Mode_Challenge = "challenge" + CreateZoneUserAgentRuleOptions_Mode_JsChallenge = "js_challenge" +) + +// NewCreateZoneUserAgentRuleOptions : Instantiate CreateZoneUserAgentRuleOptions +func (*UserAgentBlockingRulesV1) NewCreateZoneUserAgentRuleOptions() *CreateZoneUserAgentRuleOptions { + return &CreateZoneUserAgentRuleOptions{} +} + +// SetPaused : Allow user to set Paused +func (options *CreateZoneUserAgentRuleOptions) SetPaused(paused bool) *CreateZoneUserAgentRuleOptions { + options.Paused = core.BoolPtr(paused) + return options +} + +// SetDescription : Allow user to set Description +func (options *CreateZoneUserAgentRuleOptions) SetDescription(description string) *CreateZoneUserAgentRuleOptions { + options.Description = core.StringPtr(description) + return options +} + +// SetMode : Allow user to set Mode +func (options *CreateZoneUserAgentRuleOptions) SetMode(mode string) *CreateZoneUserAgentRuleOptions { + options.Mode = core.StringPtr(mode) + return options +} + +// SetConfiguration : Allow user to set Configuration +func (options *CreateZoneUserAgentRuleOptions) SetConfiguration(configuration *UseragentRuleInputConfiguration) *CreateZoneUserAgentRuleOptions { + options.Configuration = configuration + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateZoneUserAgentRuleOptions) SetHeaders(param map[string]string) *CreateZoneUserAgentRuleOptions { + options.Headers = param + return options +} + +// DeleteUseragentRuleRespResult : Container for response information. +type DeleteUseragentRuleRespResult struct { + // ID. + ID *string `json:"id" validate:"required"` +} + + +// UnmarshalDeleteUseragentRuleRespResult unmarshals an instance of DeleteUseragentRuleRespResult from the specified map of raw messages. +func UnmarshalDeleteUseragentRuleRespResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(DeleteUseragentRuleRespResult) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// DeleteZoneUserAgentRuleOptions : The DeleteZoneUserAgentRule options. +type DeleteZoneUserAgentRuleOptions struct { + // Identifier of the user-agent rule to be deleted. + UseragentRuleIdentifier *string `json:"useragent_rule_identifier" validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteZoneUserAgentRuleOptions : Instantiate DeleteZoneUserAgentRuleOptions +func (*UserAgentBlockingRulesV1) NewDeleteZoneUserAgentRuleOptions(useragentRuleIdentifier string) *DeleteZoneUserAgentRuleOptions { + return &DeleteZoneUserAgentRuleOptions{ + UseragentRuleIdentifier: core.StringPtr(useragentRuleIdentifier), + } +} + +// SetUseragentRuleIdentifier : Allow user to set UseragentRuleIdentifier +func (options *DeleteZoneUserAgentRuleOptions) SetUseragentRuleIdentifier(useragentRuleIdentifier string) *DeleteZoneUserAgentRuleOptions { + options.UseragentRuleIdentifier = core.StringPtr(useragentRuleIdentifier) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteZoneUserAgentRuleOptions) SetHeaders(param map[string]string) *DeleteZoneUserAgentRuleOptions { + options.Headers = param + return options +} + +// GetUserAgentRuleOptions : The GetUserAgentRule options. +type GetUserAgentRuleOptions struct { + // Identifier of user-agent blocking rule for the given zone. + UseragentRuleIdentifier *string `json:"useragent_rule_identifier" validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetUserAgentRuleOptions : Instantiate GetUserAgentRuleOptions +func (*UserAgentBlockingRulesV1) NewGetUserAgentRuleOptions(useragentRuleIdentifier string) *GetUserAgentRuleOptions { + return &GetUserAgentRuleOptions{ + UseragentRuleIdentifier: core.StringPtr(useragentRuleIdentifier), + } +} + +// SetUseragentRuleIdentifier : Allow user to set UseragentRuleIdentifier +func (options *GetUserAgentRuleOptions) SetUseragentRuleIdentifier(useragentRuleIdentifier string) *GetUserAgentRuleOptions { + options.UseragentRuleIdentifier = core.StringPtr(useragentRuleIdentifier) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetUserAgentRuleOptions) SetHeaders(param map[string]string) *GetUserAgentRuleOptions { + options.Headers = param + return options +} + +// ListAllZoneUserAgentRulesOptions : The ListAllZoneUserAgentRules options. +type ListAllZoneUserAgentRulesOptions struct { + // Page number of paginated results. + Page *int64 `json:"page,omitempty"` + + // Maximum number of user-agent rules per page. + PerPage *int64 `json:"per_page,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListAllZoneUserAgentRulesOptions : Instantiate ListAllZoneUserAgentRulesOptions +func (*UserAgentBlockingRulesV1) NewListAllZoneUserAgentRulesOptions() *ListAllZoneUserAgentRulesOptions { + return &ListAllZoneUserAgentRulesOptions{} +} + +// SetPage : Allow user to set Page +func (options *ListAllZoneUserAgentRulesOptions) SetPage(page int64) *ListAllZoneUserAgentRulesOptions { + options.Page = core.Int64Ptr(page) + return options +} + +// SetPerPage : Allow user to set PerPage +func (options *ListAllZoneUserAgentRulesOptions) SetPerPage(perPage int64) *ListAllZoneUserAgentRulesOptions { + options.PerPage = core.Int64Ptr(perPage) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListAllZoneUserAgentRulesOptions) SetHeaders(param map[string]string) *ListAllZoneUserAgentRulesOptions { + options.Headers = param + return options +} + +// ListUseragentRulesRespResultInfo : Statistics of results. +type ListUseragentRulesRespResultInfo struct { + // Page number. + Page *int64 `json:"page" validate:"required"` + + // Number of results per page. + PerPage *int64 `json:"per_page" validate:"required"` + + // Number of results. + Count *int64 `json:"count" validate:"required"` + + // Total number of results. + TotalCount *int64 `json:"total_count" validate:"required"` +} + + +// UnmarshalListUseragentRulesRespResultInfo unmarshals an instance of ListUseragentRulesRespResultInfo from the specified map of raw messages. +func UnmarshalListUseragentRulesRespResultInfo(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ListUseragentRulesRespResultInfo) + err = core.UnmarshalPrimitive(m, "page", &obj.Page) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "per_page", &obj.PerPage) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "count", &obj.Count) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "total_count", &obj.TotalCount) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// UpdateUserAgentRuleOptions : The UpdateUserAgentRule options. +type UpdateUserAgentRuleOptions struct { + // Identifier of user-agent rule. + UseragentRuleIdentifier *string `json:"useragent_rule_identifier" validate:"required,ne="` + + // Whether this user-agent rule is currently disabled. + Paused *bool `json:"paused,omitempty"` + + // Some useful information about this rule to help identify the purpose of it. + Description *string `json:"description,omitempty"` + + // The type of action to perform. + Mode *string `json:"mode,omitempty"` + + // Target/Value pair to use for this rule. The value is the exact UserAgent to match. + Configuration *UseragentRuleInputConfiguration `json:"configuration,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the UpdateUserAgentRuleOptions.Mode property. +// The type of action to perform. +const ( + UpdateUserAgentRuleOptions_Mode_Block = "block" + UpdateUserAgentRuleOptions_Mode_Challenge = "challenge" + UpdateUserAgentRuleOptions_Mode_JsChallenge = "js_challenge" +) + +// NewUpdateUserAgentRuleOptions : Instantiate UpdateUserAgentRuleOptions +func (*UserAgentBlockingRulesV1) NewUpdateUserAgentRuleOptions(useragentRuleIdentifier string) *UpdateUserAgentRuleOptions { + return &UpdateUserAgentRuleOptions{ + UseragentRuleIdentifier: core.StringPtr(useragentRuleIdentifier), + } +} + +// SetUseragentRuleIdentifier : Allow user to set UseragentRuleIdentifier +func (options *UpdateUserAgentRuleOptions) SetUseragentRuleIdentifier(useragentRuleIdentifier string) *UpdateUserAgentRuleOptions { + options.UseragentRuleIdentifier = core.StringPtr(useragentRuleIdentifier) + return options +} + +// SetPaused : Allow user to set Paused +func (options *UpdateUserAgentRuleOptions) SetPaused(paused bool) *UpdateUserAgentRuleOptions { + options.Paused = core.BoolPtr(paused) + return options +} + +// SetDescription : Allow user to set Description +func (options *UpdateUserAgentRuleOptions) SetDescription(description string) *UpdateUserAgentRuleOptions { + options.Description = core.StringPtr(description) + return options +} + +// SetMode : Allow user to set Mode +func (options *UpdateUserAgentRuleOptions) SetMode(mode string) *UpdateUserAgentRuleOptions { + options.Mode = core.StringPtr(mode) + return options +} + +// SetConfiguration : Allow user to set Configuration +func (options *UpdateUserAgentRuleOptions) SetConfiguration(configuration *UseragentRuleInputConfiguration) *UpdateUserAgentRuleOptions { + options.Configuration = configuration + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateUserAgentRuleOptions) SetHeaders(param map[string]string) *UpdateUserAgentRuleOptions { + options.Headers = param + return options +} + +// UseragentRuleInputConfiguration : Target/Value pair to use for this rule. The value is the exact UserAgent to match. +type UseragentRuleInputConfiguration struct { + // properties. + Target *string `json:"target" validate:"required"` + + // The exact UserAgent string to match with this rule. + Value *string `json:"value" validate:"required"` +} + +// Constants associated with the UseragentRuleInputConfiguration.Target property. +// properties. +const ( + UseragentRuleInputConfiguration_Target_Ua = "ua" +) + + +// NewUseragentRuleInputConfiguration : Instantiate UseragentRuleInputConfiguration (Generic Model Constructor) +func (*UserAgentBlockingRulesV1) NewUseragentRuleInputConfiguration(target string, value string) (model *UseragentRuleInputConfiguration, err error) { + model = &UseragentRuleInputConfiguration{ + Target: core.StringPtr(target), + Value: core.StringPtr(value), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalUseragentRuleInputConfiguration unmarshals an instance of UseragentRuleInputConfiguration from the specified map of raw messages. +func UnmarshalUseragentRuleInputConfiguration(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(UseragentRuleInputConfiguration) + err = core.UnmarshalPrimitive(m, "target", &obj.Target) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// UseragentRuleObjectConfiguration : Target/Value pair to use for this rule. The value is the exact UserAgent to match. +type UseragentRuleObjectConfiguration struct { + // properties. + Target *string `json:"target" validate:"required"` + + // The exact UserAgent string to match with this rule. + Value *string `json:"value" validate:"required"` +} + +// Constants associated with the UseragentRuleObjectConfiguration.Target property. +// properties. +const ( + UseragentRuleObjectConfiguration_Target_Ua = "ua" +) + + +// UnmarshalUseragentRuleObjectConfiguration unmarshals an instance of UseragentRuleObjectConfiguration from the specified map of raw messages. +func UnmarshalUseragentRuleObjectConfiguration(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(UseragentRuleObjectConfiguration) + err = core.UnmarshalPrimitive(m, "target", &obj.Target) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// DeleteUseragentRuleResp : user agent delete response. +type DeleteUseragentRuleResp struct { + // Operation success flag. + Success *bool `json:"success" validate:"required"` + + // Array of errors encountered. + Errors [][]string `json:"errors" validate:"required"` + + // Array of messages returned. + Messages [][]string `json:"messages" validate:"required"` + + // Container for response information. + Result *DeleteUseragentRuleRespResult `json:"result" validate:"required"` +} + + +// UnmarshalDeleteUseragentRuleResp unmarshals an instance of DeleteUseragentRuleResp from the specified map of raw messages. +func UnmarshalDeleteUseragentRuleResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(DeleteUseragentRuleResp) + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalDeleteUseragentRuleRespResult) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ListUseragentRulesResp : user agent rules response. +type ListUseragentRulesResp struct { + // Was operation successful. + Success *bool `json:"success" validate:"required"` + + // Array of errors encountered. + Errors [][]string `json:"errors" validate:"required"` + + // Array of messages returned. + Messages [][]string `json:"messages" validate:"required"` + + // Container for response information. + Result []UseragentRuleObject `json:"result" validate:"required"` + + // Statistics of results. + ResultInfo *ListUseragentRulesRespResultInfo `json:"result_info" validate:"required"` +} + + +// UnmarshalListUseragentRulesResp unmarshals an instance of ListUseragentRulesResp from the specified map of raw messages. +func UnmarshalListUseragentRulesResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ListUseragentRulesResp) + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalUseragentRuleObject) + if err != nil { + return + } + err = core.UnmarshalModel(m, "result_info", &obj.ResultInfo, UnmarshalListUseragentRulesRespResultInfo) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// UseragentRuleObject : user agent rule object. +type UseragentRuleObject struct { + // Identifier of the user-agent blocking rule. + ID *string `json:"id" validate:"required"` + + // Whether this user-agent rule is currently disabled. + Paused *bool `json:"paused" validate:"required"` + + // Some useful information about this rule to help identify the purpose of it. + Description *string `json:"description" validate:"required"` + + // The type of action to perform. + Mode *string `json:"mode" validate:"required"` + + // Target/Value pair to use for this rule. The value is the exact UserAgent to match. + Configuration *UseragentRuleObjectConfiguration `json:"configuration" validate:"required"` +} + +// Constants associated with the UseragentRuleObject.Mode property. +// The type of action to perform. +const ( + UseragentRuleObject_Mode_Block = "block" + UseragentRuleObject_Mode_Challenge = "challenge" + UseragentRuleObject_Mode_JsChallenge = "js_challenge" +) + + +// UnmarshalUseragentRuleObject unmarshals an instance of UseragentRuleObject from the specified map of raw messages. +func UnmarshalUseragentRuleObject(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(UseragentRuleObject) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "paused", &obj.Paused) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "description", &obj.Description) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "mode", &obj.Mode) + if err != nil { + return + } + err = core.UnmarshalModel(m, "configuration", &obj.Configuration, UnmarshalUseragentRuleObjectConfiguration) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// UseragentRuleResp : user agent rule response. +type UseragentRuleResp struct { + // Was operation successful. + Success *bool `json:"success" validate:"required"` + + // Array of errors encountered. + Errors [][]string `json:"errors" validate:"required"` + + // Array of messages returned. + Messages [][]string `json:"messages" validate:"required"` + + // user agent rule object. + Result *UseragentRuleObject `json:"result" validate:"required"` +} + + +// UnmarshalUseragentRuleResp unmarshals an instance of UseragentRuleResp from the specified map of raw messages. +func UnmarshalUseragentRuleResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(UseragentRuleResp) + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalUseragentRuleObject) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} diff --git a/vendor/github.com/IBM/networking-go-sdk/wafrulegroupsapiv1/waf_rule_groups_api_v1.go b/vendor/github.com/IBM/networking-go-sdk/wafrulegroupsapiv1/waf_rule_groups_api_v1.go new file mode 100644 index 00000000000..f2737f7fcf7 --- /dev/null +++ b/vendor/github.com/IBM/networking-go-sdk/wafrulegroupsapiv1/waf_rule_groups_api_v1.go @@ -0,0 +1,846 @@ +/** + * (C) Copyright IBM Corp. 2020. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * IBM OpenAPI SDK Code Generator Version: 3.20.0-debb9f29-20201203-202043 + */ + + +// Package wafrulegroupsapiv1 : Operations and models for the WafRuleGroupsApiV1 service +package wafrulegroupsapiv1 + +import ( + "context" + "encoding/json" + "fmt" + "github.com/IBM/go-sdk-core/v4/core" + common "github.com/IBM/networking-go-sdk/common" + "net/http" + "reflect" + "time" +) + +// WafRuleGroupsApiV1 : This document describes CIS WAF Rule Groups API. +// +// Version: 1.0.0 +type WafRuleGroupsApiV1 struct { + Service *core.BaseService + + // cloud resource name. + Crn *string + + // Zone ID. + ZoneID *string +} + +// DefaultServiceURL is the default URL to make service requests to. +const DefaultServiceURL = "https://api.cis.cloud.ibm.com" + +// DefaultServiceName is the default key used to find external configuration information. +const DefaultServiceName = "waf_rule_groups_api" + +// WafRuleGroupsApiV1Options : Service options +type WafRuleGroupsApiV1Options struct { + ServiceName string + URL string + Authenticator core.Authenticator + + // cloud resource name. + Crn *string `validate:"required"` + + // Zone ID. + ZoneID *string `validate:"required"` +} + +// NewWafRuleGroupsApiV1UsingExternalConfig : constructs an instance of WafRuleGroupsApiV1 with passed in options and external configuration. +func NewWafRuleGroupsApiV1UsingExternalConfig(options *WafRuleGroupsApiV1Options) (wafRuleGroupsApi *WafRuleGroupsApiV1, err error) { + if options.ServiceName == "" { + options.ServiceName = DefaultServiceName + } + + if options.Authenticator == nil { + options.Authenticator, err = core.GetAuthenticatorFromEnvironment(options.ServiceName) + if err != nil { + return + } + } + + wafRuleGroupsApi, err = NewWafRuleGroupsApiV1(options) + if err != nil { + return + } + + err = wafRuleGroupsApi.Service.ConfigureService(options.ServiceName) + if err != nil { + return + } + + if options.URL != "" { + err = wafRuleGroupsApi.Service.SetServiceURL(options.URL) + } + return +} + +// NewWafRuleGroupsApiV1 : constructs an instance of WafRuleGroupsApiV1 with passed in options. +func NewWafRuleGroupsApiV1(options *WafRuleGroupsApiV1Options) (service *WafRuleGroupsApiV1, err error) { + serviceOptions := &core.ServiceOptions{ + URL: DefaultServiceURL, + Authenticator: options.Authenticator, + } + + err = core.ValidateStruct(options, "options") + if err != nil { + return + } + + baseService, err := core.NewBaseService(serviceOptions) + if err != nil { + return + } + + if options.URL != "" { + err = baseService.SetServiceURL(options.URL) + if err != nil { + return + } + } + + service = &WafRuleGroupsApiV1{ + Service: baseService, + Crn: options.Crn, + ZoneID: options.ZoneID, + } + + return +} + +// GetServiceURLForRegion returns the service URL to be used for the specified region +func GetServiceURLForRegion(region string) (string, error) { + return "", fmt.Errorf("service does not support regional URLs") +} + +// Clone makes a copy of "wafRuleGroupsApi" suitable for processing requests. +func (wafRuleGroupsApi *WafRuleGroupsApiV1) Clone() *WafRuleGroupsApiV1 { + if core.IsNil(wafRuleGroupsApi) { + return nil + } + clone := *wafRuleGroupsApi + clone.Service = wafRuleGroupsApi.Service.Clone() + return &clone +} + +// SetServiceURL sets the service URL +func (wafRuleGroupsApi *WafRuleGroupsApiV1) SetServiceURL(url string) error { + return wafRuleGroupsApi.Service.SetServiceURL(url) +} + +// GetServiceURL returns the service URL +func (wafRuleGroupsApi *WafRuleGroupsApiV1) GetServiceURL() string { + return wafRuleGroupsApi.Service.GetServiceURL() +} + +// SetDefaultHeaders sets HTTP headers to be sent in every request +func (wafRuleGroupsApi *WafRuleGroupsApiV1) SetDefaultHeaders(headers http.Header) { + wafRuleGroupsApi.Service.SetDefaultHeaders(headers) +} + +// SetEnableGzipCompression sets the service's EnableGzipCompression field +func (wafRuleGroupsApi *WafRuleGroupsApiV1) SetEnableGzipCompression(enableGzip bool) { + wafRuleGroupsApi.Service.SetEnableGzipCompression(enableGzip) +} + +// GetEnableGzipCompression returns the service's EnableGzipCompression field +func (wafRuleGroupsApi *WafRuleGroupsApiV1) GetEnableGzipCompression() bool { + return wafRuleGroupsApi.Service.GetEnableGzipCompression() +} + +// EnableRetries enables automatic retries for requests invoked for this service instance. +// If either parameter is specified as 0, then a default value is used instead. +func (wafRuleGroupsApi *WafRuleGroupsApiV1) EnableRetries(maxRetries int, maxRetryInterval time.Duration) { + wafRuleGroupsApi.Service.EnableRetries(maxRetries, maxRetryInterval) +} + +// DisableRetries disables automatic retries for requests invoked for this service instance. +func (wafRuleGroupsApi *WafRuleGroupsApiV1) DisableRetries() { + wafRuleGroupsApi.Service.DisableRetries() +} + +// ListWafRuleGroups : List all WAF rule groups +// List all WAF rule groups contained within a package. +func (wafRuleGroupsApi *WafRuleGroupsApiV1) ListWafRuleGroups(listWafRuleGroupsOptions *ListWafRuleGroupsOptions) (result *WafGroupsResponse, response *core.DetailedResponse, err error) { + return wafRuleGroupsApi.ListWafRuleGroupsWithContext(context.Background(), listWafRuleGroupsOptions) +} + +// ListWafRuleGroupsWithContext is an alternate form of the ListWafRuleGroups method which supports a Context parameter +func (wafRuleGroupsApi *WafRuleGroupsApiV1) ListWafRuleGroupsWithContext(ctx context.Context, listWafRuleGroupsOptions *ListWafRuleGroupsOptions) (result *WafGroupsResponse, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(listWafRuleGroupsOptions, "listWafRuleGroupsOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(listWafRuleGroupsOptions, "listWafRuleGroupsOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *wafRuleGroupsApi.Crn, + "zone_id": *wafRuleGroupsApi.ZoneID, + "pkg_id": *listWafRuleGroupsOptions.PkgID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = wafRuleGroupsApi.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(wafRuleGroupsApi.Service.Options.URL, `/v1/{crn}/zones/{zone_id}/firewall/waf/packages/{pkg_id}/groups`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range listWafRuleGroupsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("waf_rule_groups_api", "V1", "ListWafRuleGroups") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + if listWafRuleGroupsOptions.Name != nil { + builder.AddQuery("name", fmt.Sprint(*listWafRuleGroupsOptions.Name)) + } + if listWafRuleGroupsOptions.Mode != nil { + builder.AddQuery("mode", fmt.Sprint(*listWafRuleGroupsOptions.Mode)) + } + if listWafRuleGroupsOptions.RulesCount != nil { + builder.AddQuery("rules_count", fmt.Sprint(*listWafRuleGroupsOptions.RulesCount)) + } + if listWafRuleGroupsOptions.Page != nil { + builder.AddQuery("page", fmt.Sprint(*listWafRuleGroupsOptions.Page)) + } + if listWafRuleGroupsOptions.PerPage != nil { + builder.AddQuery("per_page", fmt.Sprint(*listWafRuleGroupsOptions.PerPage)) + } + if listWafRuleGroupsOptions.Order != nil { + builder.AddQuery("order", fmt.Sprint(*listWafRuleGroupsOptions.Order)) + } + if listWafRuleGroupsOptions.Direction != nil { + builder.AddQuery("direction", fmt.Sprint(*listWafRuleGroupsOptions.Direction)) + } + if listWafRuleGroupsOptions.Match != nil { + builder.AddQuery("match", fmt.Sprint(*listWafRuleGroupsOptions.Match)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = wafRuleGroupsApi.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalWafGroupsResponse) + if err != nil { + return + } + response.Result = result + + return +} + +// GetWafRuleGroup : Get WAF rule group +// Get a single WAF rule group. +func (wafRuleGroupsApi *WafRuleGroupsApiV1) GetWafRuleGroup(getWafRuleGroupOptions *GetWafRuleGroupOptions) (result *WafGroupResponse, response *core.DetailedResponse, err error) { + return wafRuleGroupsApi.GetWafRuleGroupWithContext(context.Background(), getWafRuleGroupOptions) +} + +// GetWafRuleGroupWithContext is an alternate form of the GetWafRuleGroup method which supports a Context parameter +func (wafRuleGroupsApi *WafRuleGroupsApiV1) GetWafRuleGroupWithContext(ctx context.Context, getWafRuleGroupOptions *GetWafRuleGroupOptions) (result *WafGroupResponse, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getWafRuleGroupOptions, "getWafRuleGroupOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getWafRuleGroupOptions, "getWafRuleGroupOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *wafRuleGroupsApi.Crn, + "zone_id": *wafRuleGroupsApi.ZoneID, + "pkg_id": *getWafRuleGroupOptions.PkgID, + "group_id": *getWafRuleGroupOptions.GroupID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = wafRuleGroupsApi.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(wafRuleGroupsApi.Service.Options.URL, `/v1/{crn}/zones/{zone_id}/firewall/waf/packages/{pkg_id}/groups/{group_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getWafRuleGroupOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("waf_rule_groups_api", "V1", "GetWafRuleGroup") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = wafRuleGroupsApi.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalWafGroupResponse) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateWafRuleGroup : Update WAF rule group +// Update the state of a WAF rule group. +func (wafRuleGroupsApi *WafRuleGroupsApiV1) UpdateWafRuleGroup(updateWafRuleGroupOptions *UpdateWafRuleGroupOptions) (result *WafGroupResponse, response *core.DetailedResponse, err error) { + return wafRuleGroupsApi.UpdateWafRuleGroupWithContext(context.Background(), updateWafRuleGroupOptions) +} + +// UpdateWafRuleGroupWithContext is an alternate form of the UpdateWafRuleGroup method which supports a Context parameter +func (wafRuleGroupsApi *WafRuleGroupsApiV1) UpdateWafRuleGroupWithContext(ctx context.Context, updateWafRuleGroupOptions *UpdateWafRuleGroupOptions) (result *WafGroupResponse, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateWafRuleGroupOptions, "updateWafRuleGroupOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateWafRuleGroupOptions, "updateWafRuleGroupOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *wafRuleGroupsApi.Crn, + "zone_id": *wafRuleGroupsApi.ZoneID, + "pkg_id": *updateWafRuleGroupOptions.PkgID, + "group_id": *updateWafRuleGroupOptions.GroupID, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = wafRuleGroupsApi.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(wafRuleGroupsApi.Service.Options.URL, `/v1/{crn}/zones/{zone_id}/firewall/waf/packages/{pkg_id}/groups/{group_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateWafRuleGroupOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("waf_rule_groups_api", "V1", "UpdateWafRuleGroup") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if updateWafRuleGroupOptions.Mode != nil { + body["mode"] = updateWafRuleGroupOptions.Mode + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = wafRuleGroupsApi.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalWafGroupResponse) + if err != nil { + return + } + response.Result = result + + return +} + +// GetWafRuleGroupOptions : The GetWafRuleGroup options. +type GetWafRuleGroupOptions struct { + // Package ID. + PkgID *string `json:"pkg_id" validate:"required,ne="` + + // Group ID. + GroupID *string `json:"group_id" validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetWafRuleGroupOptions : Instantiate GetWafRuleGroupOptions +func (*WafRuleGroupsApiV1) NewGetWafRuleGroupOptions(pkgID string, groupID string) *GetWafRuleGroupOptions { + return &GetWafRuleGroupOptions{ + PkgID: core.StringPtr(pkgID), + GroupID: core.StringPtr(groupID), + } +} + +// SetPkgID : Allow user to set PkgID +func (options *GetWafRuleGroupOptions) SetPkgID(pkgID string) *GetWafRuleGroupOptions { + options.PkgID = core.StringPtr(pkgID) + return options +} + +// SetGroupID : Allow user to set GroupID +func (options *GetWafRuleGroupOptions) SetGroupID(groupID string) *GetWafRuleGroupOptions { + options.GroupID = core.StringPtr(groupID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetWafRuleGroupOptions) SetHeaders(param map[string]string) *GetWafRuleGroupOptions { + options.Headers = param + return options +} + +// ListWafRuleGroupsOptions : The ListWafRuleGroups options. +type ListWafRuleGroupsOptions struct { + // Package ID. + PkgID *string `json:"pkg_id" validate:"required,ne="` + + // Name of the firewall package. + Name *string `json:"name,omitempty"` + + // Whether or not the rules contained within this group are configurable/usable. + Mode *string `json:"mode,omitempty"` + + // How many rules are contained within this group. + RulesCount *string `json:"rules_count,omitempty"` + + // Page number of paginated results. + Page *int64 `json:"page,omitempty"` + + // Number of packages per page. + PerPage *int64 `json:"per_page,omitempty"` + + // Field to order packages by. + Order *string `json:"order,omitempty"` + + // Direction to order packages. + Direction *string `json:"direction,omitempty"` + + // Whether to match all search requirements or at least one (any). + Match *string `json:"match,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the ListWafRuleGroupsOptions.Mode property. +// Whether or not the rules contained within this group are configurable/usable. +const ( + ListWafRuleGroupsOptions_Mode_Off = "off" + ListWafRuleGroupsOptions_Mode_On = "on" +) + +// Constants associated with the ListWafRuleGroupsOptions.Direction property. +// Direction to order packages. +const ( + ListWafRuleGroupsOptions_Direction_Asc = "asc" + ListWafRuleGroupsOptions_Direction_Desc = "desc" +) + +// Constants associated with the ListWafRuleGroupsOptions.Match property. +// Whether to match all search requirements or at least one (any). +const ( + ListWafRuleGroupsOptions_Match_All = "all" + ListWafRuleGroupsOptions_Match_Any = "any" +) + +// NewListWafRuleGroupsOptions : Instantiate ListWafRuleGroupsOptions +func (*WafRuleGroupsApiV1) NewListWafRuleGroupsOptions(pkgID string) *ListWafRuleGroupsOptions { + return &ListWafRuleGroupsOptions{ + PkgID: core.StringPtr(pkgID), + } +} + +// SetPkgID : Allow user to set PkgID +func (options *ListWafRuleGroupsOptions) SetPkgID(pkgID string) *ListWafRuleGroupsOptions { + options.PkgID = core.StringPtr(pkgID) + return options +} + +// SetName : Allow user to set Name +func (options *ListWafRuleGroupsOptions) SetName(name string) *ListWafRuleGroupsOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetMode : Allow user to set Mode +func (options *ListWafRuleGroupsOptions) SetMode(mode string) *ListWafRuleGroupsOptions { + options.Mode = core.StringPtr(mode) + return options +} + +// SetRulesCount : Allow user to set RulesCount +func (options *ListWafRuleGroupsOptions) SetRulesCount(rulesCount string) *ListWafRuleGroupsOptions { + options.RulesCount = core.StringPtr(rulesCount) + return options +} + +// SetPage : Allow user to set Page +func (options *ListWafRuleGroupsOptions) SetPage(page int64) *ListWafRuleGroupsOptions { + options.Page = core.Int64Ptr(page) + return options +} + +// SetPerPage : Allow user to set PerPage +func (options *ListWafRuleGroupsOptions) SetPerPage(perPage int64) *ListWafRuleGroupsOptions { + options.PerPage = core.Int64Ptr(perPage) + return options +} + +// SetOrder : Allow user to set Order +func (options *ListWafRuleGroupsOptions) SetOrder(order string) *ListWafRuleGroupsOptions { + options.Order = core.StringPtr(order) + return options +} + +// SetDirection : Allow user to set Direction +func (options *ListWafRuleGroupsOptions) SetDirection(direction string) *ListWafRuleGroupsOptions { + options.Direction = core.StringPtr(direction) + return options +} + +// SetMatch : Allow user to set Match +func (options *ListWafRuleGroupsOptions) SetMatch(match string) *ListWafRuleGroupsOptions { + options.Match = core.StringPtr(match) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListWafRuleGroupsOptions) SetHeaders(param map[string]string) *ListWafRuleGroupsOptions { + options.Headers = param + return options +} + +// UpdateWafRuleGroupOptions : The UpdateWafRuleGroup options. +type UpdateWafRuleGroupOptions struct { + // Package ID. + PkgID *string `json:"pkg_id" validate:"required,ne="` + + // Group ID. + GroupID *string `json:"group_id" validate:"required,ne="` + + // Whether or not the rules contained within this group are configurable/usable. + Mode *string `json:"mode,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the UpdateWafRuleGroupOptions.Mode property. +// Whether or not the rules contained within this group are configurable/usable. +const ( + UpdateWafRuleGroupOptions_Mode_Off = "off" + UpdateWafRuleGroupOptions_Mode_On = "on" +) + +// NewUpdateWafRuleGroupOptions : Instantiate UpdateWafRuleGroupOptions +func (*WafRuleGroupsApiV1) NewUpdateWafRuleGroupOptions(pkgID string, groupID string) *UpdateWafRuleGroupOptions { + return &UpdateWafRuleGroupOptions{ + PkgID: core.StringPtr(pkgID), + GroupID: core.StringPtr(groupID), + } +} + +// SetPkgID : Allow user to set PkgID +func (options *UpdateWafRuleGroupOptions) SetPkgID(pkgID string) *UpdateWafRuleGroupOptions { + options.PkgID = core.StringPtr(pkgID) + return options +} + +// SetGroupID : Allow user to set GroupID +func (options *UpdateWafRuleGroupOptions) SetGroupID(groupID string) *UpdateWafRuleGroupOptions { + options.GroupID = core.StringPtr(groupID) + return options +} + +// SetMode : Allow user to set Mode +func (options *UpdateWafRuleGroupOptions) SetMode(mode string) *UpdateWafRuleGroupOptions { + options.Mode = core.StringPtr(mode) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateWafRuleGroupOptions) SetHeaders(param map[string]string) *UpdateWafRuleGroupOptions { + options.Headers = param + return options +} + +// WafGroupResponseResultInfo : Statistics of results. +type WafGroupResponseResultInfo struct { + // Page number. + Page *int64 `json:"page" validate:"required"` + + // Number of results per page. + PerPage *int64 `json:"per_page" validate:"required"` + + // Number of results. + Count *int64 `json:"count" validate:"required"` + + // Total number of results. + TotalCount *int64 `json:"total_count" validate:"required"` +} + + +// UnmarshalWafGroupResponseResultInfo unmarshals an instance of WafGroupResponseResultInfo from the specified map of raw messages. +func UnmarshalWafGroupResponseResultInfo(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(WafGroupResponseResultInfo) + err = core.UnmarshalPrimitive(m, "page", &obj.Page) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "per_page", &obj.PerPage) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "count", &obj.Count) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "total_count", &obj.TotalCount) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// WafGroupsResponseResultInfo : Statistics of results. +type WafGroupsResponseResultInfo struct { + // Page number. + Page *int64 `json:"page" validate:"required"` + + // Number of results per page. + PerPage *int64 `json:"per_page" validate:"required"` + + // Number of results. + Count *int64 `json:"count" validate:"required"` + + // Total number of results. + TotalCount *int64 `json:"total_count" validate:"required"` +} + + +// UnmarshalWafGroupsResponseResultInfo unmarshals an instance of WafGroupsResponseResultInfo from the specified map of raw messages. +func UnmarshalWafGroupsResponseResultInfo(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(WafGroupsResponseResultInfo) + err = core.UnmarshalPrimitive(m, "page", &obj.Page) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "per_page", &obj.PerPage) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "count", &obj.Count) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "total_count", &obj.TotalCount) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// WafGroupResponse : waf group response. +type WafGroupResponse struct { + // Was operation successful. + Success *bool `json:"success" validate:"required"` + + // Array of errors encountered. + Errors [][]string `json:"errors" validate:"required"` + + // Array of messages returned. + Messages [][]string `json:"messages" validate:"required"` + + // waf rule properties. + Result *WafRuleProperties `json:"result" validate:"required"` + + // Statistics of results. + ResultInfo *WafGroupResponseResultInfo `json:"result_info" validate:"required"` +} + + +// UnmarshalWafGroupResponse unmarshals an instance of WafGroupResponse from the specified map of raw messages. +func UnmarshalWafGroupResponse(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(WafGroupResponse) + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalWafRuleProperties) + if err != nil { + return + } + err = core.UnmarshalModel(m, "result_info", &obj.ResultInfo, UnmarshalWafGroupResponseResultInfo) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// WafGroupsResponse : waf groups response. +type WafGroupsResponse struct { + // Was operation successful. + Success *bool `json:"success" validate:"required"` + + // Array of errors encountered. + Errors [][]string `json:"errors" validate:"required"` + + // Array of messages returned. + Messages [][]string `json:"messages" validate:"required"` + + // Container for response information. + Result []WafRuleProperties `json:"result" validate:"required"` + + // Statistics of results. + ResultInfo *WafGroupsResponseResultInfo `json:"result_info" validate:"required"` +} + + +// UnmarshalWafGroupsResponse unmarshals an instance of WafGroupsResponse from the specified map of raw messages. +func UnmarshalWafGroupsResponse(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(WafGroupsResponse) + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalWafRuleProperties) + if err != nil { + return + } + err = core.UnmarshalModel(m, "result_info", &obj.ResultInfo, UnmarshalWafGroupsResponseResultInfo) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// WafRuleProperties : waf rule properties. +type WafRuleProperties struct { + // ID. + ID *string `json:"id,omitempty"` + + // Name. + Name *string `json:"name,omitempty"` + + // Description. + Description *string `json:"description,omitempty"` + + // Number of rules. + RulesCount *int64 `json:"rules_count,omitempty"` + + // Number of modified rules. + ModifiedRulesCount *int64 `json:"modified_rules_count,omitempty"` + + // Package ID. + PackageID *string `json:"package_id,omitempty"` + + // Mode. + Mode *string `json:"mode,omitempty"` + + // Allowed Modes. + AllowedModes []string `json:"allowed_modes,omitempty"` +} + + +// UnmarshalWafRuleProperties unmarshals an instance of WafRuleProperties from the specified map of raw messages. +func UnmarshalWafRuleProperties(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(WafRuleProperties) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "description", &obj.Description) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "rules_count", &obj.RulesCount) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "modified_rules_count", &obj.ModifiedRulesCount) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "package_id", &obj.PackageID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "mode", &obj.Mode) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "allowed_modes", &obj.AllowedModes) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} diff --git a/vendor/github.com/IBM/networking-go-sdk/wafrulepackagesapiv1/waf_rule_packages_api_v1.go b/vendor/github.com/IBM/networking-go-sdk/wafrulepackagesapiv1/waf_rule_packages_api_v1.go new file mode 100644 index 00000000000..eea8c87f324 --- /dev/null +++ b/vendor/github.com/IBM/networking-go-sdk/wafrulepackagesapiv1/waf_rule_packages_api_v1.go @@ -0,0 +1,806 @@ +/** + * (C) Copyright IBM Corp. 2020. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * IBM OpenAPI SDK Code Generator Version: 3.20.0-debb9f29-20201203-202043 + */ + + +// Package wafrulepackagesapiv1 : Operations and models for the WafRulePackagesApiV1 service +package wafrulepackagesapiv1 + +import ( + "context" + "encoding/json" + "fmt" + "github.com/IBM/go-sdk-core/v4/core" + common "github.com/IBM/networking-go-sdk/common" + "net/http" + "reflect" + "time" +) + +// WafRulePackagesApiV1 : This document describes CIS WAF Rule Packages API. +// +// Version: 1.0.1 +type WafRulePackagesApiV1 struct { + Service *core.BaseService + + // Cloud resource name. + Crn *string + + // Zone ID. + ZoneID *string +} + +// DefaultServiceURL is the default URL to make service requests to. +const DefaultServiceURL = "https://api.cis.cloud.ibm.com" + +// DefaultServiceName is the default key used to find external configuration information. +const DefaultServiceName = "waf_rule_packages_api" + +// WafRulePackagesApiV1Options : Service options +type WafRulePackagesApiV1Options struct { + ServiceName string + URL string + Authenticator core.Authenticator + + // Cloud resource name. + Crn *string `validate:"required"` + + // Zone ID. + ZoneID *string `validate:"required"` +} + +// NewWafRulePackagesApiV1UsingExternalConfig : constructs an instance of WafRulePackagesApiV1 with passed in options and external configuration. +func NewWafRulePackagesApiV1UsingExternalConfig(options *WafRulePackagesApiV1Options) (wafRulePackagesApi *WafRulePackagesApiV1, err error) { + if options.ServiceName == "" { + options.ServiceName = DefaultServiceName + } + + if options.Authenticator == nil { + options.Authenticator, err = core.GetAuthenticatorFromEnvironment(options.ServiceName) + if err != nil { + return + } + } + + wafRulePackagesApi, err = NewWafRulePackagesApiV1(options) + if err != nil { + return + } + + err = wafRulePackagesApi.Service.ConfigureService(options.ServiceName) + if err != nil { + return + } + + if options.URL != "" { + err = wafRulePackagesApi.Service.SetServiceURL(options.URL) + } + return +} + +// NewWafRulePackagesApiV1 : constructs an instance of WafRulePackagesApiV1 with passed in options. +func NewWafRulePackagesApiV1(options *WafRulePackagesApiV1Options) (service *WafRulePackagesApiV1, err error) { + serviceOptions := &core.ServiceOptions{ + URL: DefaultServiceURL, + Authenticator: options.Authenticator, + } + + err = core.ValidateStruct(options, "options") + if err != nil { + return + } + + baseService, err := core.NewBaseService(serviceOptions) + if err != nil { + return + } + + if options.URL != "" { + err = baseService.SetServiceURL(options.URL) + if err != nil { + return + } + } + + service = &WafRulePackagesApiV1{ + Service: baseService, + Crn: options.Crn, + ZoneID: options.ZoneID, + } + + return +} + +// GetServiceURLForRegion returns the service URL to be used for the specified region +func GetServiceURLForRegion(region string) (string, error) { + return "", fmt.Errorf("service does not support regional URLs") +} + +// Clone makes a copy of "wafRulePackagesApi" suitable for processing requests. +func (wafRulePackagesApi *WafRulePackagesApiV1) Clone() *WafRulePackagesApiV1 { + if core.IsNil(wafRulePackagesApi) { + return nil + } + clone := *wafRulePackagesApi + clone.Service = wafRulePackagesApi.Service.Clone() + return &clone +} + +// SetServiceURL sets the service URL +func (wafRulePackagesApi *WafRulePackagesApiV1) SetServiceURL(url string) error { + return wafRulePackagesApi.Service.SetServiceURL(url) +} + +// GetServiceURL returns the service URL +func (wafRulePackagesApi *WafRulePackagesApiV1) GetServiceURL() string { + return wafRulePackagesApi.Service.GetServiceURL() +} + +// SetDefaultHeaders sets HTTP headers to be sent in every request +func (wafRulePackagesApi *WafRulePackagesApiV1) SetDefaultHeaders(headers http.Header) { + wafRulePackagesApi.Service.SetDefaultHeaders(headers) +} + +// SetEnableGzipCompression sets the service's EnableGzipCompression field +func (wafRulePackagesApi *WafRulePackagesApiV1) SetEnableGzipCompression(enableGzip bool) { + wafRulePackagesApi.Service.SetEnableGzipCompression(enableGzip) +} + +// GetEnableGzipCompression returns the service's EnableGzipCompression field +func (wafRulePackagesApi *WafRulePackagesApiV1) GetEnableGzipCompression() bool { + return wafRulePackagesApi.Service.GetEnableGzipCompression() +} + +// EnableRetries enables automatic retries for requests invoked for this service instance. +// If either parameter is specified as 0, then a default value is used instead. +func (wafRulePackagesApi *WafRulePackagesApiV1) EnableRetries(maxRetries int, maxRetryInterval time.Duration) { + wafRulePackagesApi.Service.EnableRetries(maxRetries, maxRetryInterval) +} + +// DisableRetries disables automatic retries for requests invoked for this service instance. +func (wafRulePackagesApi *WafRulePackagesApiV1) DisableRetries() { + wafRulePackagesApi.Service.DisableRetries() +} + +// ListWafPackages : List all WAF rule packages +// Get firewall packages for a zone. +func (wafRulePackagesApi *WafRulePackagesApiV1) ListWafPackages(listWafPackagesOptions *ListWafPackagesOptions) (result *WafPackagesResponse, response *core.DetailedResponse, err error) { + return wafRulePackagesApi.ListWafPackagesWithContext(context.Background(), listWafPackagesOptions) +} + +// ListWafPackagesWithContext is an alternate form of the ListWafPackages method which supports a Context parameter +func (wafRulePackagesApi *WafRulePackagesApiV1) ListWafPackagesWithContext(ctx context.Context, listWafPackagesOptions *ListWafPackagesOptions) (result *WafPackagesResponse, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listWafPackagesOptions, "listWafPackagesOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *wafRulePackagesApi.Crn, + "zone_id": *wafRulePackagesApi.ZoneID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = wafRulePackagesApi.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(wafRulePackagesApi.Service.Options.URL, `/v1/{crn}/zones/{zone_id}/firewall/waf/packages`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range listWafPackagesOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("waf_rule_packages_api", "V1", "ListWafPackages") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + if listWafPackagesOptions.Name != nil { + builder.AddQuery("name", fmt.Sprint(*listWafPackagesOptions.Name)) + } + if listWafPackagesOptions.Page != nil { + builder.AddQuery("page", fmt.Sprint(*listWafPackagesOptions.Page)) + } + if listWafPackagesOptions.PerPage != nil { + builder.AddQuery("per_page", fmt.Sprint(*listWafPackagesOptions.PerPage)) + } + if listWafPackagesOptions.Order != nil { + builder.AddQuery("order", fmt.Sprint(*listWafPackagesOptions.Order)) + } + if listWafPackagesOptions.Direction != nil { + builder.AddQuery("direction", fmt.Sprint(*listWafPackagesOptions.Direction)) + } + if listWafPackagesOptions.Match != nil { + builder.AddQuery("match", fmt.Sprint(*listWafPackagesOptions.Match)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = wafRulePackagesApi.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalWafPackagesResponse) + if err != nil { + return + } + response.Result = result + + return +} + +// GetWafPackage : Get WAF rule package +// Get information about a single firewall package. +func (wafRulePackagesApi *WafRulePackagesApiV1) GetWafPackage(getWafPackageOptions *GetWafPackageOptions) (result *WafPackageResponse, response *core.DetailedResponse, err error) { + return wafRulePackagesApi.GetWafPackageWithContext(context.Background(), getWafPackageOptions) +} + +// GetWafPackageWithContext is an alternate form of the GetWafPackage method which supports a Context parameter +func (wafRulePackagesApi *WafRulePackagesApiV1) GetWafPackageWithContext(ctx context.Context, getWafPackageOptions *GetWafPackageOptions) (result *WafPackageResponse, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getWafPackageOptions, "getWafPackageOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getWafPackageOptions, "getWafPackageOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *wafRulePackagesApi.Crn, + "zone_id": *wafRulePackagesApi.ZoneID, + "package_id": *getWafPackageOptions.PackageID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = wafRulePackagesApi.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(wafRulePackagesApi.Service.Options.URL, `/v1/{crn}/zones/{zone_id}/firewall/waf/packages/{package_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getWafPackageOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("waf_rule_packages_api", "V1", "GetWafPackage") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = wafRulePackagesApi.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalWafPackageResponse) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateWafPackage : Change WAF rule package package +// Change the sensitivity and action for an anomaly detection type WAF rule package. +func (wafRulePackagesApi *WafRulePackagesApiV1) UpdateWafPackage(updateWafPackageOptions *UpdateWafPackageOptions) (result *WafPackageResponse, response *core.DetailedResponse, err error) { + return wafRulePackagesApi.UpdateWafPackageWithContext(context.Background(), updateWafPackageOptions) +} + +// UpdateWafPackageWithContext is an alternate form of the UpdateWafPackage method which supports a Context parameter +func (wafRulePackagesApi *WafRulePackagesApiV1) UpdateWafPackageWithContext(ctx context.Context, updateWafPackageOptions *UpdateWafPackageOptions) (result *WafPackageResponse, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateWafPackageOptions, "updateWafPackageOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateWafPackageOptions, "updateWafPackageOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *wafRulePackagesApi.Crn, + "zone_id": *wafRulePackagesApi.ZoneID, + "package_id": *updateWafPackageOptions.PackageID, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = wafRulePackagesApi.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(wafRulePackagesApi.Service.Options.URL, `/v1/{crn}/zones/{zone_id}/firewall/waf/packages/{package_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateWafPackageOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("waf_rule_packages_api", "V1", "UpdateWafPackage") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if updateWafPackageOptions.Sensitivity != nil { + body["sensitivity"] = updateWafPackageOptions.Sensitivity + } + if updateWafPackageOptions.ActionMode != nil { + body["action_mode"] = updateWafPackageOptions.ActionMode + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = wafRulePackagesApi.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalWafPackageResponse) + if err != nil { + return + } + response.Result = result + + return +} + +// GetWafPackageOptions : The GetWafPackage options. +type GetWafPackageOptions struct { + // Package ID. + PackageID *string `json:"package_id" validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetWafPackageOptions : Instantiate GetWafPackageOptions +func (*WafRulePackagesApiV1) NewGetWafPackageOptions(packageID string) *GetWafPackageOptions { + return &GetWafPackageOptions{ + PackageID: core.StringPtr(packageID), + } +} + +// SetPackageID : Allow user to set PackageID +func (options *GetWafPackageOptions) SetPackageID(packageID string) *GetWafPackageOptions { + options.PackageID = core.StringPtr(packageID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetWafPackageOptions) SetHeaders(param map[string]string) *GetWafPackageOptions { + options.Headers = param + return options +} + +// ListWafPackagesOptions : The ListWafPackages options. +type ListWafPackagesOptions struct { + // Name of the firewall package. + Name *string `json:"name,omitempty"` + + // Page number of paginated results. + Page *int64 `json:"page,omitempty"` + + // Number of packages per page. + PerPage *int64 `json:"per_page,omitempty"` + + // Field to order packages by. + Order *string `json:"order,omitempty"` + + // Direction to order packages. + Direction *string `json:"direction,omitempty"` + + // Whether to match all search requirements or at least one (any). + Match *string `json:"match,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the ListWafPackagesOptions.Direction property. +// Direction to order packages. +const ( + ListWafPackagesOptions_Direction_Asc = "asc" + ListWafPackagesOptions_Direction_Desc = "desc" +) + +// Constants associated with the ListWafPackagesOptions.Match property. +// Whether to match all search requirements or at least one (any). +const ( + ListWafPackagesOptions_Match_All = "all" + ListWafPackagesOptions_Match_Any = "any" +) + +// NewListWafPackagesOptions : Instantiate ListWafPackagesOptions +func (*WafRulePackagesApiV1) NewListWafPackagesOptions() *ListWafPackagesOptions { + return &ListWafPackagesOptions{} +} + +// SetName : Allow user to set Name +func (options *ListWafPackagesOptions) SetName(name string) *ListWafPackagesOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetPage : Allow user to set Page +func (options *ListWafPackagesOptions) SetPage(page int64) *ListWafPackagesOptions { + options.Page = core.Int64Ptr(page) + return options +} + +// SetPerPage : Allow user to set PerPage +func (options *ListWafPackagesOptions) SetPerPage(perPage int64) *ListWafPackagesOptions { + options.PerPage = core.Int64Ptr(perPage) + return options +} + +// SetOrder : Allow user to set Order +func (options *ListWafPackagesOptions) SetOrder(order string) *ListWafPackagesOptions { + options.Order = core.StringPtr(order) + return options +} + +// SetDirection : Allow user to set Direction +func (options *ListWafPackagesOptions) SetDirection(direction string) *ListWafPackagesOptions { + options.Direction = core.StringPtr(direction) + return options +} + +// SetMatch : Allow user to set Match +func (options *ListWafPackagesOptions) SetMatch(match string) *ListWafPackagesOptions { + options.Match = core.StringPtr(match) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListWafPackagesOptions) SetHeaders(param map[string]string) *ListWafPackagesOptions { + options.Headers = param + return options +} + +// UpdateWafPackageOptions : The UpdateWafPackage options. +type UpdateWafPackageOptions struct { + // Package ID. + PackageID *string `json:"package_id" validate:"required,ne="` + + // The sensitivity of the firewall package. + Sensitivity *string `json:"sensitivity,omitempty"` + + // The default action that will be taken for rules under the firewall package. + ActionMode *string `json:"action_mode,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the UpdateWafPackageOptions.Sensitivity property. +// The sensitivity of the firewall package. +const ( + UpdateWafPackageOptions_Sensitivity_High = "high" + UpdateWafPackageOptions_Sensitivity_Low = "low" + UpdateWafPackageOptions_Sensitivity_Medium = "medium" + UpdateWafPackageOptions_Sensitivity_Off = "off" +) + +// Constants associated with the UpdateWafPackageOptions.ActionMode property. +// The default action that will be taken for rules under the firewall package. +const ( + UpdateWafPackageOptions_ActionMode_Block = "block" + UpdateWafPackageOptions_ActionMode_Challenge = "challenge" + UpdateWafPackageOptions_ActionMode_Simulate = "simulate" +) + +// NewUpdateWafPackageOptions : Instantiate UpdateWafPackageOptions +func (*WafRulePackagesApiV1) NewUpdateWafPackageOptions(packageID string) *UpdateWafPackageOptions { + return &UpdateWafPackageOptions{ + PackageID: core.StringPtr(packageID), + } +} + +// SetPackageID : Allow user to set PackageID +func (options *UpdateWafPackageOptions) SetPackageID(packageID string) *UpdateWafPackageOptions { + options.PackageID = core.StringPtr(packageID) + return options +} + +// SetSensitivity : Allow user to set Sensitivity +func (options *UpdateWafPackageOptions) SetSensitivity(sensitivity string) *UpdateWafPackageOptions { + options.Sensitivity = core.StringPtr(sensitivity) + return options +} + +// SetActionMode : Allow user to set ActionMode +func (options *UpdateWafPackageOptions) SetActionMode(actionMode string) *UpdateWafPackageOptions { + options.ActionMode = core.StringPtr(actionMode) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateWafPackageOptions) SetHeaders(param map[string]string) *UpdateWafPackageOptions { + options.Headers = param + return options +} + +// WafPackageResponseResult : Container for response information. +type WafPackageResponseResult struct { + // ID. + ID *string `json:"id,omitempty"` + + // Name. + Name *string `json:"name,omitempty"` + + // Description. + Description *string `json:"description,omitempty"` + + // Detection mode. + DetectionMode *string `json:"detection_mode,omitempty"` + + // Value. + ZoneID *string `json:"zone_id,omitempty"` + + // Value. + Status *string `json:"status,omitempty"` + + // Value. + Sensitivity *string `json:"sensitivity,omitempty"` + + // Value. + ActionMode *string `json:"action_mode,omitempty"` +} + + +// UnmarshalWafPackageResponseResult unmarshals an instance of WafPackageResponseResult from the specified map of raw messages. +func UnmarshalWafPackageResponseResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(WafPackageResponseResult) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "description", &obj.Description) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "detection_mode", &obj.DetectionMode) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "zone_id", &obj.ZoneID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "status", &obj.Status) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "sensitivity", &obj.Sensitivity) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "action_mode", &obj.ActionMode) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// WafPackagesResponseResultInfo : Statistics of results. +type WafPackagesResponseResultInfo struct { + // Page number. + Page *int64 `json:"page" validate:"required"` + + // Number of results per page. + PerPage *int64 `json:"per_page" validate:"required"` + + // Number of results. + Count *int64 `json:"count" validate:"required"` + + // Total number of results. + TotalCount *int64 `json:"total_count" validate:"required"` +} + + +// UnmarshalWafPackagesResponseResultInfo unmarshals an instance of WafPackagesResponseResultInfo from the specified map of raw messages. +func UnmarshalWafPackagesResponseResultInfo(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(WafPackagesResponseResultInfo) + err = core.UnmarshalPrimitive(m, "page", &obj.Page) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "per_page", &obj.PerPage) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "count", &obj.Count) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "total_count", &obj.TotalCount) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// WafPackagesResponseResultItem : WafPackagesResponseResultItem struct +type WafPackagesResponseResultItem struct { + // ID. + ID *string `json:"id,omitempty"` + + // Name. + Name *string `json:"name,omitempty"` + + // Description. + Description *string `json:"description,omitempty"` + + // Detection mode. + DetectionMode *string `json:"detection_mode,omitempty"` + + // Value. + ZoneID *string `json:"zone_id,omitempty"` + + // Value. + Status *string `json:"status,omitempty"` +} + + +// UnmarshalWafPackagesResponseResultItem unmarshals an instance of WafPackagesResponseResultItem from the specified map of raw messages. +func UnmarshalWafPackagesResponseResultItem(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(WafPackagesResponseResultItem) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "description", &obj.Description) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "detection_mode", &obj.DetectionMode) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "zone_id", &obj.ZoneID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "status", &obj.Status) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// WafPackageResponse : waf package response. +type WafPackageResponse struct { + // Was operation successful. + Success *bool `json:"success" validate:"required"` + + // Array of errors encountered. + Errors [][]string `json:"errors" validate:"required"` + + // Array of messages returned. + Messages [][]string `json:"messages" validate:"required"` + + // Container for response information. + Result *WafPackageResponseResult `json:"result" validate:"required"` +} + + +// UnmarshalWafPackageResponse unmarshals an instance of WafPackageResponse from the specified map of raw messages. +func UnmarshalWafPackageResponse(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(WafPackageResponse) + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalWafPackageResponseResult) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// WafPackagesResponse : waf packages response. +type WafPackagesResponse struct { + // Was operation successful. + Success *bool `json:"success" validate:"required"` + + // Array of errors encountered. + Errors [][]string `json:"errors" validate:"required"` + + // Array of messages returned. + Messages [][]string `json:"messages" validate:"required"` + + // Container for response information. + Result []WafPackagesResponseResultItem `json:"result" validate:"required"` + + // Statistics of results. + ResultInfo *WafPackagesResponseResultInfo `json:"result_info" validate:"required"` +} + + +// UnmarshalWafPackagesResponse unmarshals an instance of WafPackagesResponse from the specified map of raw messages. +func UnmarshalWafPackagesResponse(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(WafPackagesResponse) + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalWafPackagesResponseResultItem) + if err != nil { + return + } + err = core.UnmarshalModel(m, "result_info", &obj.ResultInfo, UnmarshalWafPackagesResponseResultInfo) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} diff --git a/vendor/github.com/IBM/networking-go-sdk/wafrulesapiv1/waf_rules_api_v1.go b/vendor/github.com/IBM/networking-go-sdk/wafrulesapiv1/waf_rules_api_v1.go new file mode 100644 index 00000000000..b422781f14d --- /dev/null +++ b/vendor/github.com/IBM/networking-go-sdk/wafrulesapiv1/waf_rules_api_v1.go @@ -0,0 +1,991 @@ +/** + * (C) Copyright IBM Corp. 2020. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * IBM OpenAPI SDK Code Generator Version: 3.20.0-debb9f29-20201203-202043 + */ + + +// Package wafrulesapiv1 : Operations and models for the WafRulesApiV1 service +package wafrulesapiv1 + +import ( + "context" + "encoding/json" + "fmt" + "github.com/IBM/go-sdk-core/v4/core" + common "github.com/IBM/networking-go-sdk/common" + "net/http" + "reflect" + "time" +) + +// WafRulesApiV1 : This document describes CIS WAF Rules API. +// +// Version: 1.0.0 +type WafRulesApiV1 struct { + Service *core.BaseService + + // cloud resource name. + Crn *string + + // zone id. + ZoneID *string +} + +// DefaultServiceURL is the default URL to make service requests to. +const DefaultServiceURL = "https://api.cis.cloud.ibm.com" + +// DefaultServiceName is the default key used to find external configuration information. +const DefaultServiceName = "waf_rules_api" + +// WafRulesApiV1Options : Service options +type WafRulesApiV1Options struct { + ServiceName string + URL string + Authenticator core.Authenticator + + // cloud resource name. + Crn *string `validate:"required"` + + // zone id. + ZoneID *string `validate:"required"` +} + +// NewWafRulesApiV1UsingExternalConfig : constructs an instance of WafRulesApiV1 with passed in options and external configuration. +func NewWafRulesApiV1UsingExternalConfig(options *WafRulesApiV1Options) (wafRulesApi *WafRulesApiV1, err error) { + if options.ServiceName == "" { + options.ServiceName = DefaultServiceName + } + + if options.Authenticator == nil { + options.Authenticator, err = core.GetAuthenticatorFromEnvironment(options.ServiceName) + if err != nil { + return + } + } + + wafRulesApi, err = NewWafRulesApiV1(options) + if err != nil { + return + } + + err = wafRulesApi.Service.ConfigureService(options.ServiceName) + if err != nil { + return + } + + if options.URL != "" { + err = wafRulesApi.Service.SetServiceURL(options.URL) + } + return +} + +// NewWafRulesApiV1 : constructs an instance of WafRulesApiV1 with passed in options. +func NewWafRulesApiV1(options *WafRulesApiV1Options) (service *WafRulesApiV1, err error) { + serviceOptions := &core.ServiceOptions{ + URL: DefaultServiceURL, + Authenticator: options.Authenticator, + } + + err = core.ValidateStruct(options, "options") + if err != nil { + return + } + + baseService, err := core.NewBaseService(serviceOptions) + if err != nil { + return + } + + if options.URL != "" { + err = baseService.SetServiceURL(options.URL) + if err != nil { + return + } + } + + service = &WafRulesApiV1{ + Service: baseService, + Crn: options.Crn, + ZoneID: options.ZoneID, + } + + return +} + +// GetServiceURLForRegion returns the service URL to be used for the specified region +func GetServiceURLForRegion(region string) (string, error) { + return "", fmt.Errorf("service does not support regional URLs") +} + +// Clone makes a copy of "wafRulesApi" suitable for processing requests. +func (wafRulesApi *WafRulesApiV1) Clone() *WafRulesApiV1 { + if core.IsNil(wafRulesApi) { + return nil + } + clone := *wafRulesApi + clone.Service = wafRulesApi.Service.Clone() + return &clone +} + +// SetServiceURL sets the service URL +func (wafRulesApi *WafRulesApiV1) SetServiceURL(url string) error { + return wafRulesApi.Service.SetServiceURL(url) +} + +// GetServiceURL returns the service URL +func (wafRulesApi *WafRulesApiV1) GetServiceURL() string { + return wafRulesApi.Service.GetServiceURL() +} + +// SetDefaultHeaders sets HTTP headers to be sent in every request +func (wafRulesApi *WafRulesApiV1) SetDefaultHeaders(headers http.Header) { + wafRulesApi.Service.SetDefaultHeaders(headers) +} + +// SetEnableGzipCompression sets the service's EnableGzipCompression field +func (wafRulesApi *WafRulesApiV1) SetEnableGzipCompression(enableGzip bool) { + wafRulesApi.Service.SetEnableGzipCompression(enableGzip) +} + +// GetEnableGzipCompression returns the service's EnableGzipCompression field +func (wafRulesApi *WafRulesApiV1) GetEnableGzipCompression() bool { + return wafRulesApi.Service.GetEnableGzipCompression() +} + +// EnableRetries enables automatic retries for requests invoked for this service instance. +// If either parameter is specified as 0, then a default value is used instead. +func (wafRulesApi *WafRulesApiV1) EnableRetries(maxRetries int, maxRetryInterval time.Duration) { + wafRulesApi.Service.EnableRetries(maxRetries, maxRetryInterval) +} + +// DisableRetries disables automatic retries for requests invoked for this service instance. +func (wafRulesApi *WafRulesApiV1) DisableRetries() { + wafRulesApi.Service.DisableRetries() +} + +// ListWafRules : List all WAF rules +// List all Web Application Firewall (WAF) rules. +func (wafRulesApi *WafRulesApiV1) ListWafRules(listWafRulesOptions *ListWafRulesOptions) (result *WafRulesResponse, response *core.DetailedResponse, err error) { + return wafRulesApi.ListWafRulesWithContext(context.Background(), listWafRulesOptions) +} + +// ListWafRulesWithContext is an alternate form of the ListWafRules method which supports a Context parameter +func (wafRulesApi *WafRulesApiV1) ListWafRulesWithContext(ctx context.Context, listWafRulesOptions *ListWafRulesOptions) (result *WafRulesResponse, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(listWafRulesOptions, "listWafRulesOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(listWafRulesOptions, "listWafRulesOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *wafRulesApi.Crn, + "zone_id": *wafRulesApi.ZoneID, + "package_id": *listWafRulesOptions.PackageID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = wafRulesApi.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(wafRulesApi.Service.Options.URL, `/v1/{crn}/zones/{zone_id}/firewall/waf/packages/{package_id}/rules`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range listWafRulesOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("waf_rules_api", "V1", "ListWafRules") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + if listWafRulesOptions.Mode != nil { + builder.AddQuery("mode", fmt.Sprint(*listWafRulesOptions.Mode)) + } + if listWafRulesOptions.Priority != nil { + builder.AddQuery("priority", fmt.Sprint(*listWafRulesOptions.Priority)) + } + if listWafRulesOptions.Match != nil { + builder.AddQuery("match", fmt.Sprint(*listWafRulesOptions.Match)) + } + if listWafRulesOptions.Order != nil { + builder.AddQuery("order", fmt.Sprint(*listWafRulesOptions.Order)) + } + if listWafRulesOptions.GroupID != nil { + builder.AddQuery("group_id", fmt.Sprint(*listWafRulesOptions.GroupID)) + } + if listWafRulesOptions.Description != nil { + builder.AddQuery("description", fmt.Sprint(*listWafRulesOptions.Description)) + } + if listWafRulesOptions.Direction != nil { + builder.AddQuery("direction", fmt.Sprint(*listWafRulesOptions.Direction)) + } + if listWafRulesOptions.Page != nil { + builder.AddQuery("page", fmt.Sprint(*listWafRulesOptions.Page)) + } + if listWafRulesOptions.PerPage != nil { + builder.AddQuery("per_page", fmt.Sprint(*listWafRulesOptions.PerPage)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = wafRulesApi.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalWafRulesResponse) + if err != nil { + return + } + response.Result = result + + return +} + +// GetWafRule : Get WAF rule +// Get individual information about a rule. +func (wafRulesApi *WafRulesApiV1) GetWafRule(getWafRuleOptions *GetWafRuleOptions) (result *WafRuleResponse, response *core.DetailedResponse, err error) { + return wafRulesApi.GetWafRuleWithContext(context.Background(), getWafRuleOptions) +} + +// GetWafRuleWithContext is an alternate form of the GetWafRule method which supports a Context parameter +func (wafRulesApi *WafRulesApiV1) GetWafRuleWithContext(ctx context.Context, getWafRuleOptions *GetWafRuleOptions) (result *WafRuleResponse, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getWafRuleOptions, "getWafRuleOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getWafRuleOptions, "getWafRuleOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *wafRulesApi.Crn, + "zone_id": *wafRulesApi.ZoneID, + "package_id": *getWafRuleOptions.PackageID, + "identifier": *getWafRuleOptions.Identifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = wafRulesApi.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(wafRulesApi.Service.Options.URL, `/v1/{crn}/zones/{zone_id}/firewall/waf/packages/{package_id}/rules/{identifier}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getWafRuleOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("waf_rules_api", "V1", "GetWafRule") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = wafRulesApi.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalWafRuleResponse) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateWafRule : Update WAF rule +// Update the action the rule will perform if triggered on the zone. +func (wafRulesApi *WafRulesApiV1) UpdateWafRule(updateWafRuleOptions *UpdateWafRuleOptions) (result *WafRuleResponse, response *core.DetailedResponse, err error) { + return wafRulesApi.UpdateWafRuleWithContext(context.Background(), updateWafRuleOptions) +} + +// UpdateWafRuleWithContext is an alternate form of the UpdateWafRule method which supports a Context parameter +func (wafRulesApi *WafRulesApiV1) UpdateWafRuleWithContext(ctx context.Context, updateWafRuleOptions *UpdateWafRuleOptions) (result *WafRuleResponse, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateWafRuleOptions, "updateWafRuleOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateWafRuleOptions, "updateWafRuleOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *wafRulesApi.Crn, + "zone_id": *wafRulesApi.ZoneID, + "package_id": *updateWafRuleOptions.PackageID, + "identifier": *updateWafRuleOptions.Identifier, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = wafRulesApi.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(wafRulesApi.Service.Options.URL, `/v1/{crn}/zones/{zone_id}/firewall/waf/packages/{package_id}/rules/{identifier}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateWafRuleOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("waf_rules_api", "V1", "UpdateWafRule") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if updateWafRuleOptions.Cis != nil { + body["cis"] = updateWafRuleOptions.Cis + } + if updateWafRuleOptions.Owasp != nil { + body["owasp"] = updateWafRuleOptions.Owasp + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = wafRulesApi.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalWafRuleResponse) + if err != nil { + return + } + response.Result = result + + return +} + +// GetWafRuleOptions : The GetWafRule options. +type GetWafRuleOptions struct { + // package id. + PackageID *string `json:"package_id" validate:"required,ne="` + + // rule identifier. + Identifier *string `json:"identifier" validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetWafRuleOptions : Instantiate GetWafRuleOptions +func (*WafRulesApiV1) NewGetWafRuleOptions(packageID string, identifier string) *GetWafRuleOptions { + return &GetWafRuleOptions{ + PackageID: core.StringPtr(packageID), + Identifier: core.StringPtr(identifier), + } +} + +// SetPackageID : Allow user to set PackageID +func (options *GetWafRuleOptions) SetPackageID(packageID string) *GetWafRuleOptions { + options.PackageID = core.StringPtr(packageID) + return options +} + +// SetIdentifier : Allow user to set Identifier +func (options *GetWafRuleOptions) SetIdentifier(identifier string) *GetWafRuleOptions { + options.Identifier = core.StringPtr(identifier) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetWafRuleOptions) SetHeaders(param map[string]string) *GetWafRuleOptions { + options.Headers = param + return options +} + +// ListWafRulesOptions : The ListWafRules options. +type ListWafRulesOptions struct { + // package id. + PackageID *string `json:"package_id" validate:"required,ne="` + + // The Rule Mode. + Mode *string `json:"mode,omitempty"` + + // The order in which the individual rule is executed within the related group. + Priority *string `json:"priority,omitempty"` + + // Whether to match all search requirements or at least one. default value: all. valid values: any, all. + Match *string `json:"match,omitempty"` + + // Field to order rules by. valid values: priority, group_id, description. + Order *string `json:"order,omitempty"` + + // WAF group identifier tag. max length: 32; Read-only. + GroupID *string `json:"group_id,omitempty"` + + // Public description of the rule. + Description *string `json:"description,omitempty"` + + // Direction to order rules. valid values: asc, desc. + Direction *string `json:"direction,omitempty"` + + // Page number of paginated results. default value: 1; min value:1. + Page *int64 `json:"page,omitempty"` + + // Number of rules per page. default value: 50; min value:5; max value:100. + PerPage *int64 `json:"per_page,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the ListWafRulesOptions.Mode property. +// The Rule Mode. +const ( + ListWafRulesOptions_Mode_Off = "off" + ListWafRulesOptions_Mode_On = "on" +) + +// NewListWafRulesOptions : Instantiate ListWafRulesOptions +func (*WafRulesApiV1) NewListWafRulesOptions(packageID string) *ListWafRulesOptions { + return &ListWafRulesOptions{ + PackageID: core.StringPtr(packageID), + } +} + +// SetPackageID : Allow user to set PackageID +func (options *ListWafRulesOptions) SetPackageID(packageID string) *ListWafRulesOptions { + options.PackageID = core.StringPtr(packageID) + return options +} + +// SetMode : Allow user to set Mode +func (options *ListWafRulesOptions) SetMode(mode string) *ListWafRulesOptions { + options.Mode = core.StringPtr(mode) + return options +} + +// SetPriority : Allow user to set Priority +func (options *ListWafRulesOptions) SetPriority(priority string) *ListWafRulesOptions { + options.Priority = core.StringPtr(priority) + return options +} + +// SetMatch : Allow user to set Match +func (options *ListWafRulesOptions) SetMatch(match string) *ListWafRulesOptions { + options.Match = core.StringPtr(match) + return options +} + +// SetOrder : Allow user to set Order +func (options *ListWafRulesOptions) SetOrder(order string) *ListWafRulesOptions { + options.Order = core.StringPtr(order) + return options +} + +// SetGroupID : Allow user to set GroupID +func (options *ListWafRulesOptions) SetGroupID(groupID string) *ListWafRulesOptions { + options.GroupID = core.StringPtr(groupID) + return options +} + +// SetDescription : Allow user to set Description +func (options *ListWafRulesOptions) SetDescription(description string) *ListWafRulesOptions { + options.Description = core.StringPtr(description) + return options +} + +// SetDirection : Allow user to set Direction +func (options *ListWafRulesOptions) SetDirection(direction string) *ListWafRulesOptions { + options.Direction = core.StringPtr(direction) + return options +} + +// SetPage : Allow user to set Page +func (options *ListWafRulesOptions) SetPage(page int64) *ListWafRulesOptions { + options.Page = core.Int64Ptr(page) + return options +} + +// SetPerPage : Allow user to set PerPage +func (options *ListWafRulesOptions) SetPerPage(perPage int64) *ListWafRulesOptions { + options.PerPage = core.Int64Ptr(perPage) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListWafRulesOptions) SetHeaders(param map[string]string) *ListWafRulesOptions { + options.Headers = param + return options +} + +// UpdateWafRuleOptions : The UpdateWafRule options. +type UpdateWafRuleOptions struct { + // package id. + PackageID *string `json:"package_id" validate:"required,ne="` + + // rule identifier. + Identifier *string `json:"identifier" validate:"required,ne="` + + // cis package. + Cis *WafRuleBodyCis `json:"cis,omitempty"` + + // owasp package. + Owasp *WafRuleBodyOwasp `json:"owasp,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdateWafRuleOptions : Instantiate UpdateWafRuleOptions +func (*WafRulesApiV1) NewUpdateWafRuleOptions(packageID string, identifier string) *UpdateWafRuleOptions { + return &UpdateWafRuleOptions{ + PackageID: core.StringPtr(packageID), + Identifier: core.StringPtr(identifier), + } +} + +// SetPackageID : Allow user to set PackageID +func (options *UpdateWafRuleOptions) SetPackageID(packageID string) *UpdateWafRuleOptions { + options.PackageID = core.StringPtr(packageID) + return options +} + +// SetIdentifier : Allow user to set Identifier +func (options *UpdateWafRuleOptions) SetIdentifier(identifier string) *UpdateWafRuleOptions { + options.Identifier = core.StringPtr(identifier) + return options +} + +// SetCis : Allow user to set Cis +func (options *UpdateWafRuleOptions) SetCis(cis *WafRuleBodyCis) *UpdateWafRuleOptions { + options.Cis = cis + return options +} + +// SetOwasp : Allow user to set Owasp +func (options *UpdateWafRuleOptions) SetOwasp(owasp *WafRuleBodyOwasp) *UpdateWafRuleOptions { + options.Owasp = owasp + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateWafRuleOptions) SetHeaders(param map[string]string) *UpdateWafRuleOptions { + options.Headers = param + return options +} + +// WafRuleBodyCis : cis package. +type WafRuleBodyCis struct { + // mode to choose from. + Mode *string `json:"mode" validate:"required"` +} + +// Constants associated with the WafRuleBodyCis.Mode property. +// mode to choose from. +const ( + WafRuleBodyCis_Mode_Block = "block" + WafRuleBodyCis_Mode_Challenge = "challenge" + WafRuleBodyCis_Mode_Default = "default" + WafRuleBodyCis_Mode_Disable = "disable" + WafRuleBodyCis_Mode_Simulate = "simulate" +) + + +// NewWafRuleBodyCis : Instantiate WafRuleBodyCis (Generic Model Constructor) +func (*WafRulesApiV1) NewWafRuleBodyCis(mode string) (model *WafRuleBodyCis, err error) { + model = &WafRuleBodyCis{ + Mode: core.StringPtr(mode), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalWafRuleBodyCis unmarshals an instance of WafRuleBodyCis from the specified map of raw messages. +func UnmarshalWafRuleBodyCis(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(WafRuleBodyCis) + err = core.UnmarshalPrimitive(m, "mode", &obj.Mode) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// WafRuleBodyOwasp : owasp package. +type WafRuleBodyOwasp struct { + // mode to choose from. 'owasp' limited modes - on and off. + Mode *string `json:"mode" validate:"required"` +} + +// Constants associated with the WafRuleBodyOwasp.Mode property. +// mode to choose from. 'owasp' limited modes - on and off. +const ( + WafRuleBodyOwasp_Mode_Off = "off" + WafRuleBodyOwasp_Mode_On = "on" +) + + +// NewWafRuleBodyOwasp : Instantiate WafRuleBodyOwasp (Generic Model Constructor) +func (*WafRulesApiV1) NewWafRuleBodyOwasp(mode string) (model *WafRuleBodyOwasp, err error) { + model = &WafRuleBodyOwasp{ + Mode: core.StringPtr(mode), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalWafRuleBodyOwasp unmarshals an instance of WafRuleBodyOwasp from the specified map of raw messages. +func UnmarshalWafRuleBodyOwasp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(WafRuleBodyOwasp) + err = core.UnmarshalPrimitive(m, "mode", &obj.Mode) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// WafRuleResponseResult : Information about a Rule. +type WafRuleResponseResult struct { + // ID. + ID *string `json:"id,omitempty"` + + // description. + Description *string `json:"description,omitempty"` + + // priority. + Priority *string `json:"priority,omitempty"` + + // group definition. + Group *WafRuleResponseResultGroup `json:"group,omitempty"` + + // package id. + PackageID *string `json:"package_id,omitempty"` + + // allowed modes. + AllowedModes []string `json:"allowed_modes,omitempty"` + + // mode. + Mode *string `json:"mode,omitempty"` +} + +// Constants associated with the WafRuleResponseResult.Mode property. +// mode. +const ( + WafRuleResponseResult_Mode_Off = "off" + WafRuleResponseResult_Mode_On = "on" +) + + +// UnmarshalWafRuleResponseResult unmarshals an instance of WafRuleResponseResult from the specified map of raw messages. +func UnmarshalWafRuleResponseResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(WafRuleResponseResult) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "description", &obj.Description) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "priority", &obj.Priority) + if err != nil { + return + } + err = core.UnmarshalModel(m, "group", &obj.Group, UnmarshalWafRuleResponseResultGroup) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "package_id", &obj.PackageID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "allowed_modes", &obj.AllowedModes) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "mode", &obj.Mode) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// WafRuleResponseResultGroup : group definition. +type WafRuleResponseResultGroup struct { + // group id. + ID *string `json:"id,omitempty"` + + // group name. + Name *string `json:"name,omitempty"` +} + + +// UnmarshalWafRuleResponseResultGroup unmarshals an instance of WafRuleResponseResultGroup from the specified map of raw messages. +func UnmarshalWafRuleResponseResultGroup(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(WafRuleResponseResultGroup) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// WafRulesResponseResultInfo : result information. +type WafRulesResponseResultInfo struct { + // current page. + Page *int64 `json:"page,omitempty"` + + // number of data per page. + PerPage *int64 `json:"per_page,omitempty"` + + // count. + Count *int64 `json:"count,omitempty"` + + // total count of data. + TotalCount *int64 `json:"total_count,omitempty"` +} + + +// UnmarshalWafRulesResponseResultInfo unmarshals an instance of WafRulesResponseResultInfo from the specified map of raw messages. +func UnmarshalWafRulesResponseResultInfo(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(WafRulesResponseResultInfo) + err = core.UnmarshalPrimitive(m, "page", &obj.Page) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "per_page", &obj.PerPage) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "count", &obj.Count) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "total_count", &obj.TotalCount) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// WafRulesResponseResultItem : WafRulesResponseResultItem struct +type WafRulesResponseResultItem struct { + // ID. + ID *string `json:"id,omitempty"` + + // description. + Description *string `json:"description,omitempty"` + + // priority. + Priority *string `json:"priority,omitempty"` + + // group definition. + Group *WafRulesResponseResultItemGroup `json:"group,omitempty"` + + // package id. + PackageID *string `json:"package_id,omitempty"` + + // allowed modes. + AllowedModes []string `json:"allowed_modes,omitempty"` + + // mode. + Mode *string `json:"mode,omitempty"` +} + +// Constants associated with the WafRulesResponseResultItem.Mode property. +// mode. +const ( + WafRulesResponseResultItem_Mode_Off = "off" + WafRulesResponseResultItem_Mode_On = "on" +) + + +// UnmarshalWafRulesResponseResultItem unmarshals an instance of WafRulesResponseResultItem from the specified map of raw messages. +func UnmarshalWafRulesResponseResultItem(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(WafRulesResponseResultItem) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "description", &obj.Description) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "priority", &obj.Priority) + if err != nil { + return + } + err = core.UnmarshalModel(m, "group", &obj.Group, UnmarshalWafRulesResponseResultItemGroup) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "package_id", &obj.PackageID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "allowed_modes", &obj.AllowedModes) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "mode", &obj.Mode) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// WafRulesResponseResultItemGroup : group definition. +type WafRulesResponseResultItemGroup struct { + // group id. + ID *string `json:"id,omitempty"` + + // group name. + Name *string `json:"name,omitempty"` +} + + +// UnmarshalWafRulesResponseResultItemGroup unmarshals an instance of WafRulesResponseResultItemGroup from the specified map of raw messages. +func UnmarshalWafRulesResponseResultItemGroup(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(WafRulesResponseResultItemGroup) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// WafRuleResponse : waf rule response. +type WafRuleResponse struct { + // Was operation successful. + Success *bool `json:"success" validate:"required"` + + // Array of errors encountered. + Errors [][]string `json:"errors" validate:"required"` + + // Array of messages returned. + Messages [][]string `json:"messages" validate:"required"` + + // Information about a Rule. + Result *WafRuleResponseResult `json:"result" validate:"required"` +} + + +// UnmarshalWafRuleResponse unmarshals an instance of WafRuleResponse from the specified map of raw messages. +func UnmarshalWafRuleResponse(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(WafRuleResponse) + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalWafRuleResponseResult) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// WafRulesResponse : waf rule response. +type WafRulesResponse struct { + // Was operation successful. + Success *bool `json:"success" validate:"required"` + + // Array of errors encountered. + Errors [][]string `json:"errors" validate:"required"` + + // Array of messages returned. + Messages [][]string `json:"messages" validate:"required"` + + // Array of Rules. + Result []WafRulesResponseResultItem `json:"result" validate:"required"` + + // result information. + ResultInfo *WafRulesResponseResultInfo `json:"result_info,omitempty"` +} + + +// UnmarshalWafRulesResponse unmarshals an instance of WafRulesResponse from the specified map of raw messages. +func UnmarshalWafRulesResponse(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(WafRulesResponse) + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalWafRulesResponseResultItem) + if err != nil { + return + } + err = core.UnmarshalModel(m, "result_info", &obj.ResultInfo, UnmarshalWafRulesResponseResultInfo) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} diff --git a/vendor/github.com/IBM/networking-go-sdk/zonefirewallaccessrulesv1/zone_firewall_access_rules_v1.go b/vendor/github.com/IBM/networking-go-sdk/zonefirewallaccessrulesv1/zone_firewall_access_rules_v1.go new file mode 100644 index 00000000000..6ed9dfbcdda --- /dev/null +++ b/vendor/github.com/IBM/networking-go-sdk/zonefirewallaccessrulesv1/zone_firewall_access_rules_v1.go @@ -0,0 +1,1200 @@ +/** + * (C) Copyright IBM Corp. 2020. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * IBM OpenAPI SDK Code Generator Version: 3.20.0-debb9f29-20201203-202043 + */ + + +// Package zonefirewallaccessrulesv1 : Operations and models for the ZoneFirewallAccessRulesV1 service +package zonefirewallaccessrulesv1 + +import ( + "context" + "encoding/json" + "fmt" + "github.com/IBM/go-sdk-core/v4/core" + common "github.com/IBM/networking-go-sdk/common" + "net/http" + "reflect" + "time" +) + +// ZoneFirewallAccessRulesV1 : Zone Firewall Access Rules +// +// Version: 1.0.1 +type ZoneFirewallAccessRulesV1 struct { + Service *core.BaseService + + // Full crn of the service instance. + Crn *string + + // Identifier of zone whose access rule is to be deleted. + ZoneIdentifier *string +} + +// DefaultServiceURL is the default URL to make service requests to. +const DefaultServiceURL = "https://api.cis.cloud.ibm.com" + +// DefaultServiceName is the default key used to find external configuration information. +const DefaultServiceName = "zone_firewall_access_rules" + +// ZoneFirewallAccessRulesV1Options : Service options +type ZoneFirewallAccessRulesV1Options struct { + ServiceName string + URL string + Authenticator core.Authenticator + + // Full crn of the service instance. + Crn *string `validate:"required"` + + // Identifier of zone whose access rule is to be deleted. + ZoneIdentifier *string `validate:"required"` +} + +// NewZoneFirewallAccessRulesV1UsingExternalConfig : constructs an instance of ZoneFirewallAccessRulesV1 with passed in options and external configuration. +func NewZoneFirewallAccessRulesV1UsingExternalConfig(options *ZoneFirewallAccessRulesV1Options) (zoneFirewallAccessRules *ZoneFirewallAccessRulesV1, err error) { + if options.ServiceName == "" { + options.ServiceName = DefaultServiceName + } + + if options.Authenticator == nil { + options.Authenticator, err = core.GetAuthenticatorFromEnvironment(options.ServiceName) + if err != nil { + return + } + } + + zoneFirewallAccessRules, err = NewZoneFirewallAccessRulesV1(options) + if err != nil { + return + } + + err = zoneFirewallAccessRules.Service.ConfigureService(options.ServiceName) + if err != nil { + return + } + + if options.URL != "" { + err = zoneFirewallAccessRules.Service.SetServiceURL(options.URL) + } + return +} + +// NewZoneFirewallAccessRulesV1 : constructs an instance of ZoneFirewallAccessRulesV1 with passed in options. +func NewZoneFirewallAccessRulesV1(options *ZoneFirewallAccessRulesV1Options) (service *ZoneFirewallAccessRulesV1, err error) { + serviceOptions := &core.ServiceOptions{ + URL: DefaultServiceURL, + Authenticator: options.Authenticator, + } + + err = core.ValidateStruct(options, "options") + if err != nil { + return + } + + baseService, err := core.NewBaseService(serviceOptions) + if err != nil { + return + } + + if options.URL != "" { + err = baseService.SetServiceURL(options.URL) + if err != nil { + return + } + } + + service = &ZoneFirewallAccessRulesV1{ + Service: baseService, + Crn: options.Crn, + ZoneIdentifier: options.ZoneIdentifier, + } + + return +} + +// GetServiceURLForRegion returns the service URL to be used for the specified region +func GetServiceURLForRegion(region string) (string, error) { + return "", fmt.Errorf("service does not support regional URLs") +} + +// Clone makes a copy of "zoneFirewallAccessRules" suitable for processing requests. +func (zoneFirewallAccessRules *ZoneFirewallAccessRulesV1) Clone() *ZoneFirewallAccessRulesV1 { + if core.IsNil(zoneFirewallAccessRules) { + return nil + } + clone := *zoneFirewallAccessRules + clone.Service = zoneFirewallAccessRules.Service.Clone() + return &clone +} + +// SetServiceURL sets the service URL +func (zoneFirewallAccessRules *ZoneFirewallAccessRulesV1) SetServiceURL(url string) error { + return zoneFirewallAccessRules.Service.SetServiceURL(url) +} + +// GetServiceURL returns the service URL +func (zoneFirewallAccessRules *ZoneFirewallAccessRulesV1) GetServiceURL() string { + return zoneFirewallAccessRules.Service.GetServiceURL() +} + +// SetDefaultHeaders sets HTTP headers to be sent in every request +func (zoneFirewallAccessRules *ZoneFirewallAccessRulesV1) SetDefaultHeaders(headers http.Header) { + zoneFirewallAccessRules.Service.SetDefaultHeaders(headers) +} + +// SetEnableGzipCompression sets the service's EnableGzipCompression field +func (zoneFirewallAccessRules *ZoneFirewallAccessRulesV1) SetEnableGzipCompression(enableGzip bool) { + zoneFirewallAccessRules.Service.SetEnableGzipCompression(enableGzip) +} + +// GetEnableGzipCompression returns the service's EnableGzipCompression field +func (zoneFirewallAccessRules *ZoneFirewallAccessRulesV1) GetEnableGzipCompression() bool { + return zoneFirewallAccessRules.Service.GetEnableGzipCompression() +} + +// EnableRetries enables automatic retries for requests invoked for this service instance. +// If either parameter is specified as 0, then a default value is used instead. +func (zoneFirewallAccessRules *ZoneFirewallAccessRulesV1) EnableRetries(maxRetries int, maxRetryInterval time.Duration) { + zoneFirewallAccessRules.Service.EnableRetries(maxRetries, maxRetryInterval) +} + +// DisableRetries disables automatic retries for requests invoked for this service instance. +func (zoneFirewallAccessRules *ZoneFirewallAccessRulesV1) DisableRetries() { + zoneFirewallAccessRules.Service.DisableRetries() +} + +// ListAllZoneAccessRules : List all firewall access rules +// List all firewall access rules for a zone. +func (zoneFirewallAccessRules *ZoneFirewallAccessRulesV1) ListAllZoneAccessRules(listAllZoneAccessRulesOptions *ListAllZoneAccessRulesOptions) (result *ListZoneAccessRulesResp, response *core.DetailedResponse, err error) { + return zoneFirewallAccessRules.ListAllZoneAccessRulesWithContext(context.Background(), listAllZoneAccessRulesOptions) +} + +// ListAllZoneAccessRulesWithContext is an alternate form of the ListAllZoneAccessRules method which supports a Context parameter +func (zoneFirewallAccessRules *ZoneFirewallAccessRulesV1) ListAllZoneAccessRulesWithContext(ctx context.Context, listAllZoneAccessRulesOptions *ListAllZoneAccessRulesOptions) (result *ListZoneAccessRulesResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listAllZoneAccessRulesOptions, "listAllZoneAccessRulesOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zoneFirewallAccessRules.Crn, + "zone_identifier": *zoneFirewallAccessRules.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zoneFirewallAccessRules.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zoneFirewallAccessRules.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/firewall/access_rules/rules`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range listAllZoneAccessRulesOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zone_firewall_access_rules", "V1", "ListAllZoneAccessRules") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + if listAllZoneAccessRulesOptions.Notes != nil { + builder.AddQuery("notes", fmt.Sprint(*listAllZoneAccessRulesOptions.Notes)) + } + if listAllZoneAccessRulesOptions.Mode != nil { + builder.AddQuery("mode", fmt.Sprint(*listAllZoneAccessRulesOptions.Mode)) + } + if listAllZoneAccessRulesOptions.ConfigurationTarget != nil { + builder.AddQuery("configuration.target", fmt.Sprint(*listAllZoneAccessRulesOptions.ConfigurationTarget)) + } + if listAllZoneAccessRulesOptions.ConfigurationValue != nil { + builder.AddQuery("configuration.value", fmt.Sprint(*listAllZoneAccessRulesOptions.ConfigurationValue)) + } + if listAllZoneAccessRulesOptions.Page != nil { + builder.AddQuery("page", fmt.Sprint(*listAllZoneAccessRulesOptions.Page)) + } + if listAllZoneAccessRulesOptions.PerPage != nil { + builder.AddQuery("per_page", fmt.Sprint(*listAllZoneAccessRulesOptions.PerPage)) + } + if listAllZoneAccessRulesOptions.Order != nil { + builder.AddQuery("order", fmt.Sprint(*listAllZoneAccessRulesOptions.Order)) + } + if listAllZoneAccessRulesOptions.Direction != nil { + builder.AddQuery("direction", fmt.Sprint(*listAllZoneAccessRulesOptions.Direction)) + } + if listAllZoneAccessRulesOptions.Match != nil { + builder.AddQuery("match", fmt.Sprint(*listAllZoneAccessRulesOptions.Match)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zoneFirewallAccessRules.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalListZoneAccessRulesResp) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateZoneAccessRule : Create firewall access rule +// Create a new firewall access rule for a given zone under a service instance. +func (zoneFirewallAccessRules *ZoneFirewallAccessRulesV1) CreateZoneAccessRule(createZoneAccessRuleOptions *CreateZoneAccessRuleOptions) (result *ZoneAccessRuleResp, response *core.DetailedResponse, err error) { + return zoneFirewallAccessRules.CreateZoneAccessRuleWithContext(context.Background(), createZoneAccessRuleOptions) +} + +// CreateZoneAccessRuleWithContext is an alternate form of the CreateZoneAccessRule method which supports a Context parameter +func (zoneFirewallAccessRules *ZoneFirewallAccessRulesV1) CreateZoneAccessRuleWithContext(ctx context.Context, createZoneAccessRuleOptions *CreateZoneAccessRuleOptions) (result *ZoneAccessRuleResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(createZoneAccessRuleOptions, "createZoneAccessRuleOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zoneFirewallAccessRules.Crn, + "zone_identifier": *zoneFirewallAccessRules.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zoneFirewallAccessRules.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zoneFirewallAccessRules.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/firewall/access_rules/rules`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range createZoneAccessRuleOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zone_firewall_access_rules", "V1", "CreateZoneAccessRule") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if createZoneAccessRuleOptions.Mode != nil { + body["mode"] = createZoneAccessRuleOptions.Mode + } + if createZoneAccessRuleOptions.Notes != nil { + body["notes"] = createZoneAccessRuleOptions.Notes + } + if createZoneAccessRuleOptions.Configuration != nil { + body["configuration"] = createZoneAccessRuleOptions.Configuration + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zoneFirewallAccessRules.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalZoneAccessRuleResp) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteZoneAccessRule : Delete firewall access rule +// Delete an access rule given its id. +func (zoneFirewallAccessRules *ZoneFirewallAccessRulesV1) DeleteZoneAccessRule(deleteZoneAccessRuleOptions *DeleteZoneAccessRuleOptions) (result *DeleteZoneAccessRuleResp, response *core.DetailedResponse, err error) { + return zoneFirewallAccessRules.DeleteZoneAccessRuleWithContext(context.Background(), deleteZoneAccessRuleOptions) +} + +// DeleteZoneAccessRuleWithContext is an alternate form of the DeleteZoneAccessRule method which supports a Context parameter +func (zoneFirewallAccessRules *ZoneFirewallAccessRulesV1) DeleteZoneAccessRuleWithContext(ctx context.Context, deleteZoneAccessRuleOptions *DeleteZoneAccessRuleOptions) (result *DeleteZoneAccessRuleResp, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteZoneAccessRuleOptions, "deleteZoneAccessRuleOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteZoneAccessRuleOptions, "deleteZoneAccessRuleOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zoneFirewallAccessRules.Crn, + "zone_identifier": *zoneFirewallAccessRules.ZoneIdentifier, + "accessrule_identifier": *deleteZoneAccessRuleOptions.AccessruleIdentifier, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zoneFirewallAccessRules.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zoneFirewallAccessRules.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/firewall/access_rules/rules/{accessrule_identifier}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteZoneAccessRuleOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zone_firewall_access_rules", "V1", "DeleteZoneAccessRule") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zoneFirewallAccessRules.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalDeleteZoneAccessRuleResp) + if err != nil { + return + } + response.Result = result + + return +} + +// GetZoneAccessRule : Get firewall access rule +// Get the details of a firewall access rule for a given zone under a given service instance. +func (zoneFirewallAccessRules *ZoneFirewallAccessRulesV1) GetZoneAccessRule(getZoneAccessRuleOptions *GetZoneAccessRuleOptions) (result *ZoneAccessRuleResp, response *core.DetailedResponse, err error) { + return zoneFirewallAccessRules.GetZoneAccessRuleWithContext(context.Background(), getZoneAccessRuleOptions) +} + +// GetZoneAccessRuleWithContext is an alternate form of the GetZoneAccessRule method which supports a Context parameter +func (zoneFirewallAccessRules *ZoneFirewallAccessRulesV1) GetZoneAccessRuleWithContext(ctx context.Context, getZoneAccessRuleOptions *GetZoneAccessRuleOptions) (result *ZoneAccessRuleResp, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getZoneAccessRuleOptions, "getZoneAccessRuleOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getZoneAccessRuleOptions, "getZoneAccessRuleOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zoneFirewallAccessRules.Crn, + "zone_identifier": *zoneFirewallAccessRules.ZoneIdentifier, + "accessrule_identifier": *getZoneAccessRuleOptions.AccessruleIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zoneFirewallAccessRules.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zoneFirewallAccessRules.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/firewall/access_rules/rules/{accessrule_identifier}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getZoneAccessRuleOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zone_firewall_access_rules", "V1", "GetZoneAccessRule") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zoneFirewallAccessRules.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalZoneAccessRuleResp) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateZoneAccessRule : Update firewall access rule +// Update an existing firewall access rule for a given zone under a given service instance. +func (zoneFirewallAccessRules *ZoneFirewallAccessRulesV1) UpdateZoneAccessRule(updateZoneAccessRuleOptions *UpdateZoneAccessRuleOptions) (result *ZoneAccessRuleResp, response *core.DetailedResponse, err error) { + return zoneFirewallAccessRules.UpdateZoneAccessRuleWithContext(context.Background(), updateZoneAccessRuleOptions) +} + +// UpdateZoneAccessRuleWithContext is an alternate form of the UpdateZoneAccessRule method which supports a Context parameter +func (zoneFirewallAccessRules *ZoneFirewallAccessRulesV1) UpdateZoneAccessRuleWithContext(ctx context.Context, updateZoneAccessRuleOptions *UpdateZoneAccessRuleOptions) (result *ZoneAccessRuleResp, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateZoneAccessRuleOptions, "updateZoneAccessRuleOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateZoneAccessRuleOptions, "updateZoneAccessRuleOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zoneFirewallAccessRules.Crn, + "zone_identifier": *zoneFirewallAccessRules.ZoneIdentifier, + "accessrule_identifier": *updateZoneAccessRuleOptions.AccessruleIdentifier, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zoneFirewallAccessRules.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zoneFirewallAccessRules.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/firewall/access_rules/rules/{accessrule_identifier}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateZoneAccessRuleOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zone_firewall_access_rules", "V1", "UpdateZoneAccessRule") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if updateZoneAccessRuleOptions.Mode != nil { + body["mode"] = updateZoneAccessRuleOptions.Mode + } + if updateZoneAccessRuleOptions.Notes != nil { + body["notes"] = updateZoneAccessRuleOptions.Notes + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zoneFirewallAccessRules.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalZoneAccessRuleResp) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateZoneAccessRuleOptions : The CreateZoneAccessRule options. +type CreateZoneAccessRuleOptions struct { + // The action to apply to a matched request. + Mode *string `json:"mode,omitempty"` + + // A personal note about the rule. Typically used as a reminder or explanation for the rule. + Notes *string `json:"notes,omitempty"` + + // Configuration object specifying access rule. + Configuration *ZoneAccessRuleInputConfiguration `json:"configuration,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the CreateZoneAccessRuleOptions.Mode property. +// The action to apply to a matched request. +const ( + CreateZoneAccessRuleOptions_Mode_Block = "block" + CreateZoneAccessRuleOptions_Mode_Challenge = "challenge" + CreateZoneAccessRuleOptions_Mode_JsChallenge = "js_challenge" + CreateZoneAccessRuleOptions_Mode_Whitelist = "whitelist" +) + +// NewCreateZoneAccessRuleOptions : Instantiate CreateZoneAccessRuleOptions +func (*ZoneFirewallAccessRulesV1) NewCreateZoneAccessRuleOptions() *CreateZoneAccessRuleOptions { + return &CreateZoneAccessRuleOptions{} +} + +// SetMode : Allow user to set Mode +func (options *CreateZoneAccessRuleOptions) SetMode(mode string) *CreateZoneAccessRuleOptions { + options.Mode = core.StringPtr(mode) + return options +} + +// SetNotes : Allow user to set Notes +func (options *CreateZoneAccessRuleOptions) SetNotes(notes string) *CreateZoneAccessRuleOptions { + options.Notes = core.StringPtr(notes) + return options +} + +// SetConfiguration : Allow user to set Configuration +func (options *CreateZoneAccessRuleOptions) SetConfiguration(configuration *ZoneAccessRuleInputConfiguration) *CreateZoneAccessRuleOptions { + options.Configuration = configuration + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateZoneAccessRuleOptions) SetHeaders(param map[string]string) *CreateZoneAccessRuleOptions { + options.Headers = param + return options +} + +// DeleteZoneAccessRuleOptions : The DeleteZoneAccessRule options. +type DeleteZoneAccessRuleOptions struct { + // Identifier of the access rule to be deleted. + AccessruleIdentifier *string `json:"accessrule_identifier" validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteZoneAccessRuleOptions : Instantiate DeleteZoneAccessRuleOptions +func (*ZoneFirewallAccessRulesV1) NewDeleteZoneAccessRuleOptions(accessruleIdentifier string) *DeleteZoneAccessRuleOptions { + return &DeleteZoneAccessRuleOptions{ + AccessruleIdentifier: core.StringPtr(accessruleIdentifier), + } +} + +// SetAccessruleIdentifier : Allow user to set AccessruleIdentifier +func (options *DeleteZoneAccessRuleOptions) SetAccessruleIdentifier(accessruleIdentifier string) *DeleteZoneAccessRuleOptions { + options.AccessruleIdentifier = core.StringPtr(accessruleIdentifier) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteZoneAccessRuleOptions) SetHeaders(param map[string]string) *DeleteZoneAccessRuleOptions { + options.Headers = param + return options +} + +// DeleteZoneAccessRuleRespResult : Container for response information. +type DeleteZoneAccessRuleRespResult struct { + // ID. + ID *string `json:"id" validate:"required"` +} + + +// UnmarshalDeleteZoneAccessRuleRespResult unmarshals an instance of DeleteZoneAccessRuleRespResult from the specified map of raw messages. +func UnmarshalDeleteZoneAccessRuleRespResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(DeleteZoneAccessRuleRespResult) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// GetZoneAccessRuleOptions : The GetZoneAccessRule options. +type GetZoneAccessRuleOptions struct { + // Identifier of firewall access rule for the given zone. + AccessruleIdentifier *string `json:"accessrule_identifier" validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetZoneAccessRuleOptions : Instantiate GetZoneAccessRuleOptions +func (*ZoneFirewallAccessRulesV1) NewGetZoneAccessRuleOptions(accessruleIdentifier string) *GetZoneAccessRuleOptions { + return &GetZoneAccessRuleOptions{ + AccessruleIdentifier: core.StringPtr(accessruleIdentifier), + } +} + +// SetAccessruleIdentifier : Allow user to set AccessruleIdentifier +func (options *GetZoneAccessRuleOptions) SetAccessruleIdentifier(accessruleIdentifier string) *GetZoneAccessRuleOptions { + options.AccessruleIdentifier = core.StringPtr(accessruleIdentifier) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetZoneAccessRuleOptions) SetHeaders(param map[string]string) *GetZoneAccessRuleOptions { + options.Headers = param + return options +} + +// ListAllZoneAccessRulesOptions : The ListAllZoneAccessRules options. +type ListAllZoneAccessRulesOptions struct { + // Search access rules by note.(Not case sensitive). + Notes *string `json:"notes,omitempty"` + + // Search access rules by mode. + Mode *string `json:"mode,omitempty"` + + // Search access rules by configuration target. + ConfigurationTarget *string `json:"configuration.target,omitempty"` + + // Search access rules by configuration value which can be IP, IPrange, or country code. + ConfigurationValue *string `json:"configuration.value,omitempty"` + + // Page number of paginated results. + Page *int64 `json:"page,omitempty"` + + // Maximum number of access rules per page. + PerPage *int64 `json:"per_page,omitempty"` + + // Field by which to order list of access rules. + Order *string `json:"order,omitempty"` + + // Direction in which to order results [ascending/descending order]. + Direction *string `json:"direction,omitempty"` + + // Whether to match all (all) or atleast one search parameter (any). + Match *string `json:"match,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the ListAllZoneAccessRulesOptions.Mode property. +// Search access rules by mode. +const ( + ListAllZoneAccessRulesOptions_Mode_Block = "block" + ListAllZoneAccessRulesOptions_Mode_Challenge = "challenge" + ListAllZoneAccessRulesOptions_Mode_JsChallenge = "js_challenge" + ListAllZoneAccessRulesOptions_Mode_Whitelist = "whitelist" +) + +// Constants associated with the ListAllZoneAccessRulesOptions.ConfigurationTarget property. +// Search access rules by configuration target. +const ( + ListAllZoneAccessRulesOptions_ConfigurationTarget_Asn = "asn" + ListAllZoneAccessRulesOptions_ConfigurationTarget_Country = "country" + ListAllZoneAccessRulesOptions_ConfigurationTarget_Ip = "ip" + ListAllZoneAccessRulesOptions_ConfigurationTarget_IpRange = "ip_range" +) + +// Constants associated with the ListAllZoneAccessRulesOptions.Order property. +// Field by which to order list of access rules. +const ( + ListAllZoneAccessRulesOptions_Order_ConfigurationTarget = "configuration.target" + ListAllZoneAccessRulesOptions_Order_ConfigurationValue = "configuration.value" + ListAllZoneAccessRulesOptions_Order_Mode = "mode" +) + +// Constants associated with the ListAllZoneAccessRulesOptions.Direction property. +// Direction in which to order results [ascending/descending order]. +const ( + ListAllZoneAccessRulesOptions_Direction_Asc = "asc" + ListAllZoneAccessRulesOptions_Direction_Desc = "desc" +) + +// Constants associated with the ListAllZoneAccessRulesOptions.Match property. +// Whether to match all (all) or atleast one search parameter (any). +const ( + ListAllZoneAccessRulesOptions_Match_All = "all" + ListAllZoneAccessRulesOptions_Match_Any = "any" +) + +// NewListAllZoneAccessRulesOptions : Instantiate ListAllZoneAccessRulesOptions +func (*ZoneFirewallAccessRulesV1) NewListAllZoneAccessRulesOptions() *ListAllZoneAccessRulesOptions { + return &ListAllZoneAccessRulesOptions{} +} + +// SetNotes : Allow user to set Notes +func (options *ListAllZoneAccessRulesOptions) SetNotes(notes string) *ListAllZoneAccessRulesOptions { + options.Notes = core.StringPtr(notes) + return options +} + +// SetMode : Allow user to set Mode +func (options *ListAllZoneAccessRulesOptions) SetMode(mode string) *ListAllZoneAccessRulesOptions { + options.Mode = core.StringPtr(mode) + return options +} + +// SetConfigurationTarget : Allow user to set ConfigurationTarget +func (options *ListAllZoneAccessRulesOptions) SetConfigurationTarget(configurationTarget string) *ListAllZoneAccessRulesOptions { + options.ConfigurationTarget = core.StringPtr(configurationTarget) + return options +} + +// SetConfigurationValue : Allow user to set ConfigurationValue +func (options *ListAllZoneAccessRulesOptions) SetConfigurationValue(configurationValue string) *ListAllZoneAccessRulesOptions { + options.ConfigurationValue = core.StringPtr(configurationValue) + return options +} + +// SetPage : Allow user to set Page +func (options *ListAllZoneAccessRulesOptions) SetPage(page int64) *ListAllZoneAccessRulesOptions { + options.Page = core.Int64Ptr(page) + return options +} + +// SetPerPage : Allow user to set PerPage +func (options *ListAllZoneAccessRulesOptions) SetPerPage(perPage int64) *ListAllZoneAccessRulesOptions { + options.PerPage = core.Int64Ptr(perPage) + return options +} + +// SetOrder : Allow user to set Order +func (options *ListAllZoneAccessRulesOptions) SetOrder(order string) *ListAllZoneAccessRulesOptions { + options.Order = core.StringPtr(order) + return options +} + +// SetDirection : Allow user to set Direction +func (options *ListAllZoneAccessRulesOptions) SetDirection(direction string) *ListAllZoneAccessRulesOptions { + options.Direction = core.StringPtr(direction) + return options +} + +// SetMatch : Allow user to set Match +func (options *ListAllZoneAccessRulesOptions) SetMatch(match string) *ListAllZoneAccessRulesOptions { + options.Match = core.StringPtr(match) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListAllZoneAccessRulesOptions) SetHeaders(param map[string]string) *ListAllZoneAccessRulesOptions { + options.Headers = param + return options +} + +// ListZoneAccessRulesRespResultInfo : Statistics of results. +type ListZoneAccessRulesRespResultInfo struct { + // Page number. + Page *int64 `json:"page" validate:"required"` + + // Number of results per page. + PerPage *int64 `json:"per_page" validate:"required"` + + // Number of results. + Count *int64 `json:"count" validate:"required"` + + // Total number of results. + TotalCount *int64 `json:"total_count" validate:"required"` +} + + +// UnmarshalListZoneAccessRulesRespResultInfo unmarshals an instance of ListZoneAccessRulesRespResultInfo from the specified map of raw messages. +func UnmarshalListZoneAccessRulesRespResultInfo(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ListZoneAccessRulesRespResultInfo) + err = core.UnmarshalPrimitive(m, "page", &obj.Page) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "per_page", &obj.PerPage) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "count", &obj.Count) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "total_count", &obj.TotalCount) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// UpdateZoneAccessRuleOptions : The UpdateZoneAccessRule options. +type UpdateZoneAccessRuleOptions struct { + // Identifier of firewall access rule. + AccessruleIdentifier *string `json:"accessrule_identifier" validate:"required,ne="` + + // The action to apply to a matched request. + Mode *string `json:"mode,omitempty"` + + // A personal note about the rule. Typically used as a reminder or explanation for the rule. + Notes *string `json:"notes,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the UpdateZoneAccessRuleOptions.Mode property. +// The action to apply to a matched request. +const ( + UpdateZoneAccessRuleOptions_Mode_Block = "block" + UpdateZoneAccessRuleOptions_Mode_Challenge = "challenge" + UpdateZoneAccessRuleOptions_Mode_JsChallenge = "js_challenge" + UpdateZoneAccessRuleOptions_Mode_Whitelist = "whitelist" +) + +// NewUpdateZoneAccessRuleOptions : Instantiate UpdateZoneAccessRuleOptions +func (*ZoneFirewallAccessRulesV1) NewUpdateZoneAccessRuleOptions(accessruleIdentifier string) *UpdateZoneAccessRuleOptions { + return &UpdateZoneAccessRuleOptions{ + AccessruleIdentifier: core.StringPtr(accessruleIdentifier), + } +} + +// SetAccessruleIdentifier : Allow user to set AccessruleIdentifier +func (options *UpdateZoneAccessRuleOptions) SetAccessruleIdentifier(accessruleIdentifier string) *UpdateZoneAccessRuleOptions { + options.AccessruleIdentifier = core.StringPtr(accessruleIdentifier) + return options +} + +// SetMode : Allow user to set Mode +func (options *UpdateZoneAccessRuleOptions) SetMode(mode string) *UpdateZoneAccessRuleOptions { + options.Mode = core.StringPtr(mode) + return options +} + +// SetNotes : Allow user to set Notes +func (options *UpdateZoneAccessRuleOptions) SetNotes(notes string) *UpdateZoneAccessRuleOptions { + options.Notes = core.StringPtr(notes) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateZoneAccessRuleOptions) SetHeaders(param map[string]string) *UpdateZoneAccessRuleOptions { + options.Headers = param + return options +} + +// ZoneAccessRuleInputConfiguration : Configuration object specifying access rule. +type ZoneAccessRuleInputConfiguration struct { + // The request property to target. + Target *string `json:"target" validate:"required"` + + // The value for the selected target.For ip the value is a valid ip address.For ip_range the value specifies ip range + // limited to /16 and /24. For asn the value is an AS number. For country the value is a country code for the country. + Value *string `json:"value" validate:"required"` +} + +// Constants associated with the ZoneAccessRuleInputConfiguration.Target property. +// The request property to target. +const ( + ZoneAccessRuleInputConfiguration_Target_Asn = "asn" + ZoneAccessRuleInputConfiguration_Target_Country = "country" + ZoneAccessRuleInputConfiguration_Target_Ip = "ip" + ZoneAccessRuleInputConfiguration_Target_IpRange = "ip_range" +) + + +// NewZoneAccessRuleInputConfiguration : Instantiate ZoneAccessRuleInputConfiguration (Generic Model Constructor) +func (*ZoneFirewallAccessRulesV1) NewZoneAccessRuleInputConfiguration(target string, value string) (model *ZoneAccessRuleInputConfiguration, err error) { + model = &ZoneAccessRuleInputConfiguration{ + Target: core.StringPtr(target), + Value: core.StringPtr(value), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalZoneAccessRuleInputConfiguration unmarshals an instance of ZoneAccessRuleInputConfiguration from the specified map of raw messages. +func UnmarshalZoneAccessRuleInputConfiguration(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ZoneAccessRuleInputConfiguration) + err = core.UnmarshalPrimitive(m, "target", &obj.Target) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ZoneAccessRuleObjectConfiguration : configuration. +type ZoneAccessRuleObjectConfiguration struct { + // target. + Target *string `json:"target" validate:"required"` + + // Value for the given target. For ip the value is a valid ip address.For ip_range the value specifies ip range limited + // to /16 and /24. For asn the value is an AS number. For country the value is a country code for the country. + Value *string `json:"value" validate:"required"` +} + +// Constants associated with the ZoneAccessRuleObjectConfiguration.Target property. +// target. +const ( + ZoneAccessRuleObjectConfiguration_Target_Asn = "asn" + ZoneAccessRuleObjectConfiguration_Target_Country = "country" + ZoneAccessRuleObjectConfiguration_Target_Ip = "ip" + ZoneAccessRuleObjectConfiguration_Target_IpRange = "ip_range" +) + + +// UnmarshalZoneAccessRuleObjectConfiguration unmarshals an instance of ZoneAccessRuleObjectConfiguration from the specified map of raw messages. +func UnmarshalZoneAccessRuleObjectConfiguration(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ZoneAccessRuleObjectConfiguration) + err = core.UnmarshalPrimitive(m, "target", &obj.Target) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ZoneAccessRuleObjectScope : The scope definition of the access rule. +type ZoneAccessRuleObjectScope struct { + // The scope of the access rule, indicating if its applicable at zone level("zone") or inherited from instance + // level("account"). + Type *string `json:"type" validate:"required"` +} + +// Constants associated with the ZoneAccessRuleObjectScope.Type property. +// The scope of the access rule, indicating if its applicable at zone level("zone") or inherited from instance +// level("account"). +const ( + ZoneAccessRuleObjectScope_Type_Account = "account" + ZoneAccessRuleObjectScope_Type_Zone = "zone" +) + + +// UnmarshalZoneAccessRuleObjectScope unmarshals an instance of ZoneAccessRuleObjectScope from the specified map of raw messages. +func UnmarshalZoneAccessRuleObjectScope(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ZoneAccessRuleObjectScope) + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// DeleteZoneAccessRuleResp : delete access rule response. +type DeleteZoneAccessRuleResp struct { + // Operation success flag. + Success *bool `json:"success" validate:"required"` + + // Array of errors encountered. + Errors [][]string `json:"errors" validate:"required"` + + // Array of messages returned. + Messages [][]string `json:"messages" validate:"required"` + + // Container for response information. + Result *DeleteZoneAccessRuleRespResult `json:"result" validate:"required"` +} + + +// UnmarshalDeleteZoneAccessRuleResp unmarshals an instance of DeleteZoneAccessRuleResp from the specified map of raw messages. +func UnmarshalDeleteZoneAccessRuleResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(DeleteZoneAccessRuleResp) + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalDeleteZoneAccessRuleRespResult) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ListZoneAccessRulesResp : list access rules response. +type ListZoneAccessRulesResp struct { + // Operation success flag. + Success *bool `json:"success" validate:"required"` + + // Array of errors encountered. + Errors [][]string `json:"errors" validate:"required"` + + // Array of messages returned. + Messages [][]string `json:"messages" validate:"required"` + + // Container for response information. + Result []ZoneAccessRuleObject `json:"result" validate:"required"` + + // Statistics of results. + ResultInfo *ListZoneAccessRulesRespResultInfo `json:"result_info" validate:"required"` +} + + +// UnmarshalListZoneAccessRulesResp unmarshals an instance of ListZoneAccessRulesResp from the specified map of raw messages. +func UnmarshalListZoneAccessRulesResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ListZoneAccessRulesResp) + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalZoneAccessRuleObject) + if err != nil { + return + } + err = core.UnmarshalModel(m, "result_info", &obj.ResultInfo, UnmarshalListZoneAccessRulesRespResultInfo) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ZoneAccessRuleObject : access rule object. +type ZoneAccessRuleObject struct { + // Identifier of the firewall access rule. + ID *string `json:"id" validate:"required"` + + // A personal note about the rule. Typically used as a reminder or explanation for the rule. + Notes *string `json:"notes" validate:"required"` + + // List of modes that are allowed. + AllowedModes []string `json:"allowed_modes" validate:"required"` + + // The action to be applied to a request matching the access rule. + Mode *string `json:"mode" validate:"required"` + + // The scope definition of the access rule. + Scope *ZoneAccessRuleObjectScope `json:"scope,omitempty"` + + // The creation date-time of the firewall access rule. + CreatedOn *string `json:"created_on" validate:"required"` + + // The modification date-time of the firewall access rule. + ModifiedOn *string `json:"modified_on" validate:"required"` + + // configuration. + Configuration *ZoneAccessRuleObjectConfiguration `json:"configuration" validate:"required"` +} + +// Constants associated with the ZoneAccessRuleObject.AllowedModes property. +const ( + ZoneAccessRuleObject_AllowedModes_Block = "block" + ZoneAccessRuleObject_AllowedModes_Challenge = "challenge" + ZoneAccessRuleObject_AllowedModes_JsChallenge = "js_challenge" + ZoneAccessRuleObject_AllowedModes_Whitelist = "whitelist" +) + +// Constants associated with the ZoneAccessRuleObject.Mode property. +// The action to be applied to a request matching the access rule. +const ( + ZoneAccessRuleObject_Mode_Block = "block" + ZoneAccessRuleObject_Mode_Challenge = "challenge" + ZoneAccessRuleObject_Mode_JsChallenge = "js_challenge" + ZoneAccessRuleObject_Mode_Whitelist = "whitelist" +) + + +// UnmarshalZoneAccessRuleObject unmarshals an instance of ZoneAccessRuleObject from the specified map of raw messages. +func UnmarshalZoneAccessRuleObject(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ZoneAccessRuleObject) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "notes", &obj.Notes) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "allowed_modes", &obj.AllowedModes) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "mode", &obj.Mode) + if err != nil { + return + } + err = core.UnmarshalModel(m, "scope", &obj.Scope, UnmarshalZoneAccessRuleObjectScope) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_on", &obj.CreatedOn) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "modified_on", &obj.ModifiedOn) + if err != nil { + return + } + err = core.UnmarshalModel(m, "configuration", &obj.Configuration, UnmarshalZoneAccessRuleObjectConfiguration) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ZoneAccessRuleResp : access rule response. +type ZoneAccessRuleResp struct { + // Operation success flag. + Success *bool `json:"success" validate:"required"` + + // Array of errors encountered. + Errors [][]string `json:"errors" validate:"required"` + + // Array of messages returned. + Messages [][]string `json:"messages" validate:"required"` + + // access rule object. + Result *ZoneAccessRuleObject `json:"result" validate:"required"` +} + + +// UnmarshalZoneAccessRuleResp unmarshals an instance of ZoneAccessRuleResp from the specified map of raw messages. +func UnmarshalZoneAccessRuleResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ZoneAccessRuleResp) + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalZoneAccessRuleObject) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} diff --git a/vendor/github.com/IBM/networking-go-sdk/zonelockdownv1/zone_lockdown_v1.go b/vendor/github.com/IBM/networking-go-sdk/zonelockdownv1/zone_lockdown_v1.go new file mode 100644 index 00000000000..0cbe7cffb01 --- /dev/null +++ b/vendor/github.com/IBM/networking-go-sdk/zonelockdownv1/zone_lockdown_v1.go @@ -0,0 +1,1084 @@ +/** + * (C) Copyright IBM Corp. 2020. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * IBM OpenAPI SDK Code Generator Version: 3.20.0-debb9f29-20201203-202043 + */ + + +// Package zonelockdownv1 : Operations and models for the ZoneLockdownV1 service +package zonelockdownv1 + +import ( + "context" + "encoding/json" + "fmt" + "github.com/IBM/go-sdk-core/v4/core" + common "github.com/IBM/networking-go-sdk/common" + "net/http" + "reflect" + "time" +) + +// ZoneLockdownV1 : Zone Lockdown +// +// Version: 1.0.1 +type ZoneLockdownV1 struct { + Service *core.BaseService + + // Full crn of the service instance. + Crn *string + + // Zone identifier (zone id). + ZoneIdentifier *string +} + +// DefaultServiceURL is the default URL to make service requests to. +const DefaultServiceURL = "https://api.cis.cloud.ibm.com" + +// DefaultServiceName is the default key used to find external configuration information. +const DefaultServiceName = "zone_lockdown" + +// ZoneLockdownV1Options : Service options +type ZoneLockdownV1Options struct { + ServiceName string + URL string + Authenticator core.Authenticator + + // Full crn of the service instance. + Crn *string `validate:"required"` + + // Zone identifier (zone id). + ZoneIdentifier *string `validate:"required"` +} + +// NewZoneLockdownV1UsingExternalConfig : constructs an instance of ZoneLockdownV1 with passed in options and external configuration. +func NewZoneLockdownV1UsingExternalConfig(options *ZoneLockdownV1Options) (zoneLockdown *ZoneLockdownV1, err error) { + if options.ServiceName == "" { + options.ServiceName = DefaultServiceName + } + + if options.Authenticator == nil { + options.Authenticator, err = core.GetAuthenticatorFromEnvironment(options.ServiceName) + if err != nil { + return + } + } + + zoneLockdown, err = NewZoneLockdownV1(options) + if err != nil { + return + } + + err = zoneLockdown.Service.ConfigureService(options.ServiceName) + if err != nil { + return + } + + if options.URL != "" { + err = zoneLockdown.Service.SetServiceURL(options.URL) + } + return +} + +// NewZoneLockdownV1 : constructs an instance of ZoneLockdownV1 with passed in options. +func NewZoneLockdownV1(options *ZoneLockdownV1Options) (service *ZoneLockdownV1, err error) { + serviceOptions := &core.ServiceOptions{ + URL: DefaultServiceURL, + Authenticator: options.Authenticator, + } + + err = core.ValidateStruct(options, "options") + if err != nil { + return + } + + baseService, err := core.NewBaseService(serviceOptions) + if err != nil { + return + } + + if options.URL != "" { + err = baseService.SetServiceURL(options.URL) + if err != nil { + return + } + } + + service = &ZoneLockdownV1{ + Service: baseService, + Crn: options.Crn, + ZoneIdentifier: options.ZoneIdentifier, + } + + return +} + +// GetServiceURLForRegion returns the service URL to be used for the specified region +func GetServiceURLForRegion(region string) (string, error) { + return "", fmt.Errorf("service does not support regional URLs") +} + +// Clone makes a copy of "zoneLockdown" suitable for processing requests. +func (zoneLockdown *ZoneLockdownV1) Clone() *ZoneLockdownV1 { + if core.IsNil(zoneLockdown) { + return nil + } + clone := *zoneLockdown + clone.Service = zoneLockdown.Service.Clone() + return &clone +} + +// SetServiceURL sets the service URL +func (zoneLockdown *ZoneLockdownV1) SetServiceURL(url string) error { + return zoneLockdown.Service.SetServiceURL(url) +} + +// GetServiceURL returns the service URL +func (zoneLockdown *ZoneLockdownV1) GetServiceURL() string { + return zoneLockdown.Service.GetServiceURL() +} + +// SetDefaultHeaders sets HTTP headers to be sent in every request +func (zoneLockdown *ZoneLockdownV1) SetDefaultHeaders(headers http.Header) { + zoneLockdown.Service.SetDefaultHeaders(headers) +} + +// SetEnableGzipCompression sets the service's EnableGzipCompression field +func (zoneLockdown *ZoneLockdownV1) SetEnableGzipCompression(enableGzip bool) { + zoneLockdown.Service.SetEnableGzipCompression(enableGzip) +} + +// GetEnableGzipCompression returns the service's EnableGzipCompression field +func (zoneLockdown *ZoneLockdownV1) GetEnableGzipCompression() bool { + return zoneLockdown.Service.GetEnableGzipCompression() +} + +// EnableRetries enables automatic retries for requests invoked for this service instance. +// If either parameter is specified as 0, then a default value is used instead. +func (zoneLockdown *ZoneLockdownV1) EnableRetries(maxRetries int, maxRetryInterval time.Duration) { + zoneLockdown.Service.EnableRetries(maxRetries, maxRetryInterval) +} + +// DisableRetries disables automatic retries for requests invoked for this service instance. +func (zoneLockdown *ZoneLockdownV1) DisableRetries() { + zoneLockdown.Service.DisableRetries() +} + +// ListAllZoneLockownRules : List all lockdown rules +// List all lockdown rules for a zone. +func (zoneLockdown *ZoneLockdownV1) ListAllZoneLockownRules(listAllZoneLockownRulesOptions *ListAllZoneLockownRulesOptions) (result *ListLockdownResp, response *core.DetailedResponse, err error) { + return zoneLockdown.ListAllZoneLockownRulesWithContext(context.Background(), listAllZoneLockownRulesOptions) +} + +// ListAllZoneLockownRulesWithContext is an alternate form of the ListAllZoneLockownRules method which supports a Context parameter +func (zoneLockdown *ZoneLockdownV1) ListAllZoneLockownRulesWithContext(ctx context.Context, listAllZoneLockownRulesOptions *ListAllZoneLockownRulesOptions) (result *ListLockdownResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listAllZoneLockownRulesOptions, "listAllZoneLockownRulesOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zoneLockdown.Crn, + "zone_identifier": *zoneLockdown.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zoneLockdown.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zoneLockdown.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/firewall/lockdowns`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range listAllZoneLockownRulesOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zone_lockdown", "V1", "ListAllZoneLockownRules") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + if listAllZoneLockownRulesOptions.Page != nil { + builder.AddQuery("page", fmt.Sprint(*listAllZoneLockownRulesOptions.Page)) + } + if listAllZoneLockownRulesOptions.PerPage != nil { + builder.AddQuery("per_page", fmt.Sprint(*listAllZoneLockownRulesOptions.PerPage)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zoneLockdown.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalListLockdownResp) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateZoneLockdownRule : Create lockdown rule +// Create a new lockdown rule for a given zone under a service instance. +func (zoneLockdown *ZoneLockdownV1) CreateZoneLockdownRule(createZoneLockdownRuleOptions *CreateZoneLockdownRuleOptions) (result *LockdownResp, response *core.DetailedResponse, err error) { + return zoneLockdown.CreateZoneLockdownRuleWithContext(context.Background(), createZoneLockdownRuleOptions) +} + +// CreateZoneLockdownRuleWithContext is an alternate form of the CreateZoneLockdownRule method which supports a Context parameter +func (zoneLockdown *ZoneLockdownV1) CreateZoneLockdownRuleWithContext(ctx context.Context, createZoneLockdownRuleOptions *CreateZoneLockdownRuleOptions) (result *LockdownResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(createZoneLockdownRuleOptions, "createZoneLockdownRuleOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zoneLockdown.Crn, + "zone_identifier": *zoneLockdown.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zoneLockdown.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zoneLockdown.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/firewall/lockdowns`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range createZoneLockdownRuleOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zone_lockdown", "V1", "CreateZoneLockdownRule") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if createZoneLockdownRuleOptions.ID != nil { + body["id"] = createZoneLockdownRuleOptions.ID + } + if createZoneLockdownRuleOptions.Paused != nil { + body["paused"] = createZoneLockdownRuleOptions.Paused + } + if createZoneLockdownRuleOptions.Description != nil { + body["description"] = createZoneLockdownRuleOptions.Description + } + if createZoneLockdownRuleOptions.Urls != nil { + body["urls"] = createZoneLockdownRuleOptions.Urls + } + if createZoneLockdownRuleOptions.Configurations != nil { + body["configurations"] = createZoneLockdownRuleOptions.Configurations + } + if createZoneLockdownRuleOptions.Priority != nil { + body["priority"] = createZoneLockdownRuleOptions.Priority + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zoneLockdown.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLockdownResp) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteZoneLockdownRule : Delete lockdown rule +// Delete a lockdown rule for a particular zone, given its id. +func (zoneLockdown *ZoneLockdownV1) DeleteZoneLockdownRule(deleteZoneLockdownRuleOptions *DeleteZoneLockdownRuleOptions) (result *DeleteLockdownResp, response *core.DetailedResponse, err error) { + return zoneLockdown.DeleteZoneLockdownRuleWithContext(context.Background(), deleteZoneLockdownRuleOptions) +} + +// DeleteZoneLockdownRuleWithContext is an alternate form of the DeleteZoneLockdownRule method which supports a Context parameter +func (zoneLockdown *ZoneLockdownV1) DeleteZoneLockdownRuleWithContext(ctx context.Context, deleteZoneLockdownRuleOptions *DeleteZoneLockdownRuleOptions) (result *DeleteLockdownResp, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteZoneLockdownRuleOptions, "deleteZoneLockdownRuleOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteZoneLockdownRuleOptions, "deleteZoneLockdownRuleOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zoneLockdown.Crn, + "zone_identifier": *zoneLockdown.ZoneIdentifier, + "lockdown_rule_identifier": *deleteZoneLockdownRuleOptions.LockdownRuleIdentifier, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zoneLockdown.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zoneLockdown.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/firewall/lockdowns/{lockdown_rule_identifier}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteZoneLockdownRuleOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zone_lockdown", "V1", "DeleteZoneLockdownRule") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zoneLockdown.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalDeleteLockdownResp) + if err != nil { + return + } + response.Result = result + + return +} + +// GetLockdown : Get lockdown rule +// For a given service instance, zone id and lockdown rule id, get the lockdown rule details. +func (zoneLockdown *ZoneLockdownV1) GetLockdown(getLockdownOptions *GetLockdownOptions) (result *LockdownResp, response *core.DetailedResponse, err error) { + return zoneLockdown.GetLockdownWithContext(context.Background(), getLockdownOptions) +} + +// GetLockdownWithContext is an alternate form of the GetLockdown method which supports a Context parameter +func (zoneLockdown *ZoneLockdownV1) GetLockdownWithContext(ctx context.Context, getLockdownOptions *GetLockdownOptions) (result *LockdownResp, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getLockdownOptions, "getLockdownOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getLockdownOptions, "getLockdownOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zoneLockdown.Crn, + "zone_identifier": *zoneLockdown.ZoneIdentifier, + "lockdown_rule_identifier": *getLockdownOptions.LockdownRuleIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zoneLockdown.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zoneLockdown.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/firewall/lockdowns/{lockdown_rule_identifier}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getLockdownOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zone_lockdown", "V1", "GetLockdown") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zoneLockdown.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLockdownResp) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateLockdownRule : Update lockdown rule +// Update an existing lockdown rule for a given zone under a given service instance. +func (zoneLockdown *ZoneLockdownV1) UpdateLockdownRule(updateLockdownRuleOptions *UpdateLockdownRuleOptions) (result *LockdownResp, response *core.DetailedResponse, err error) { + return zoneLockdown.UpdateLockdownRuleWithContext(context.Background(), updateLockdownRuleOptions) +} + +// UpdateLockdownRuleWithContext is an alternate form of the UpdateLockdownRule method which supports a Context parameter +func (zoneLockdown *ZoneLockdownV1) UpdateLockdownRuleWithContext(ctx context.Context, updateLockdownRuleOptions *UpdateLockdownRuleOptions) (result *LockdownResp, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateLockdownRuleOptions, "updateLockdownRuleOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateLockdownRuleOptions, "updateLockdownRuleOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zoneLockdown.Crn, + "zone_identifier": *zoneLockdown.ZoneIdentifier, + "lockdown_rule_identifier": *updateLockdownRuleOptions.LockdownRuleIdentifier, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zoneLockdown.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zoneLockdown.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/firewall/lockdowns/{lockdown_rule_identifier}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateLockdownRuleOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zone_lockdown", "V1", "UpdateLockdownRule") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if updateLockdownRuleOptions.ID != nil { + body["id"] = updateLockdownRuleOptions.ID + } + if updateLockdownRuleOptions.Paused != nil { + body["paused"] = updateLockdownRuleOptions.Paused + } + if updateLockdownRuleOptions.Description != nil { + body["description"] = updateLockdownRuleOptions.Description + } + if updateLockdownRuleOptions.Urls != nil { + body["urls"] = updateLockdownRuleOptions.Urls + } + if updateLockdownRuleOptions.Configurations != nil { + body["configurations"] = updateLockdownRuleOptions.Configurations + } + if updateLockdownRuleOptions.Priority != nil { + body["priority"] = updateLockdownRuleOptions.Priority + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zoneLockdown.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLockdownResp) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateZoneLockdownRuleOptions : The CreateZoneLockdownRule options. +type CreateZoneLockdownRuleOptions struct { + // Lockdown rule identifier. + ID *string `json:"id,omitempty"` + + // Whether this zone lockdown is currently paused. + Paused *bool `json:"paused,omitempty"` + + // A note that you can use to describe the reason for a Lockdown rule. + Description *string `json:"description,omitempty"` + + // URLs to be included in this rule definition. Wildcards are permitted. The URL pattern entered here will be escaped + // before use. This limits the URL to just simple wildcard patterns. + Urls []string `json:"urls,omitempty"` + + // List of IP addresses or CIDR ranges to use for this rule. This can include any number of ip or ip_range + // configurations that can access the provided URLs. + Configurations []LockdownInputConfigurationsItem `json:"configurations,omitempty"` + + // firewall priority. + Priority *int64 `json:"priority,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewCreateZoneLockdownRuleOptions : Instantiate CreateZoneLockdownRuleOptions +func (*ZoneLockdownV1) NewCreateZoneLockdownRuleOptions() *CreateZoneLockdownRuleOptions { + return &CreateZoneLockdownRuleOptions{} +} + +// SetID : Allow user to set ID +func (options *CreateZoneLockdownRuleOptions) SetID(id string) *CreateZoneLockdownRuleOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetPaused : Allow user to set Paused +func (options *CreateZoneLockdownRuleOptions) SetPaused(paused bool) *CreateZoneLockdownRuleOptions { + options.Paused = core.BoolPtr(paused) + return options +} + +// SetDescription : Allow user to set Description +func (options *CreateZoneLockdownRuleOptions) SetDescription(description string) *CreateZoneLockdownRuleOptions { + options.Description = core.StringPtr(description) + return options +} + +// SetUrls : Allow user to set Urls +func (options *CreateZoneLockdownRuleOptions) SetUrls(urls []string) *CreateZoneLockdownRuleOptions { + options.Urls = urls + return options +} + +// SetConfigurations : Allow user to set Configurations +func (options *CreateZoneLockdownRuleOptions) SetConfigurations(configurations []LockdownInputConfigurationsItem) *CreateZoneLockdownRuleOptions { + options.Configurations = configurations + return options +} + +// SetPriority : Allow user to set Priority +func (options *CreateZoneLockdownRuleOptions) SetPriority(priority int64) *CreateZoneLockdownRuleOptions { + options.Priority = core.Int64Ptr(priority) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateZoneLockdownRuleOptions) SetHeaders(param map[string]string) *CreateZoneLockdownRuleOptions { + options.Headers = param + return options +} + +// DeleteLockdownRespResult : Container for response information. +type DeleteLockdownRespResult struct { + // ID. + ID *string `json:"id" validate:"required"` +} + + +// UnmarshalDeleteLockdownRespResult unmarshals an instance of DeleteLockdownRespResult from the specified map of raw messages. +func UnmarshalDeleteLockdownRespResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(DeleteLockdownRespResult) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// DeleteZoneLockdownRuleOptions : The DeleteZoneLockdownRule options. +type DeleteZoneLockdownRuleOptions struct { + // Identifier of the lockdown rule to be deleted. + LockdownRuleIdentifier *string `json:"lockdown_rule_identifier" validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteZoneLockdownRuleOptions : Instantiate DeleteZoneLockdownRuleOptions +func (*ZoneLockdownV1) NewDeleteZoneLockdownRuleOptions(lockdownRuleIdentifier string) *DeleteZoneLockdownRuleOptions { + return &DeleteZoneLockdownRuleOptions{ + LockdownRuleIdentifier: core.StringPtr(lockdownRuleIdentifier), + } +} + +// SetLockdownRuleIdentifier : Allow user to set LockdownRuleIdentifier +func (options *DeleteZoneLockdownRuleOptions) SetLockdownRuleIdentifier(lockdownRuleIdentifier string) *DeleteZoneLockdownRuleOptions { + options.LockdownRuleIdentifier = core.StringPtr(lockdownRuleIdentifier) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteZoneLockdownRuleOptions) SetHeaders(param map[string]string) *DeleteZoneLockdownRuleOptions { + options.Headers = param + return options +} + +// GetLockdownOptions : The GetLockdown options. +type GetLockdownOptions struct { + // Identifier of lockdown rule for the given zone. + LockdownRuleIdentifier *string `json:"lockdown_rule_identifier" validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetLockdownOptions : Instantiate GetLockdownOptions +func (*ZoneLockdownV1) NewGetLockdownOptions(lockdownRuleIdentifier string) *GetLockdownOptions { + return &GetLockdownOptions{ + LockdownRuleIdentifier: core.StringPtr(lockdownRuleIdentifier), + } +} + +// SetLockdownRuleIdentifier : Allow user to set LockdownRuleIdentifier +func (options *GetLockdownOptions) SetLockdownRuleIdentifier(lockdownRuleIdentifier string) *GetLockdownOptions { + options.LockdownRuleIdentifier = core.StringPtr(lockdownRuleIdentifier) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetLockdownOptions) SetHeaders(param map[string]string) *GetLockdownOptions { + options.Headers = param + return options +} + +// ListAllZoneLockownRulesOptions : The ListAllZoneLockownRules options. +type ListAllZoneLockownRulesOptions struct { + // Page number of paginated results. + Page *int64 `json:"page,omitempty"` + + // Maximum number of lockdown rules per page. + PerPage *int64 `json:"per_page,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListAllZoneLockownRulesOptions : Instantiate ListAllZoneLockownRulesOptions +func (*ZoneLockdownV1) NewListAllZoneLockownRulesOptions() *ListAllZoneLockownRulesOptions { + return &ListAllZoneLockownRulesOptions{} +} + +// SetPage : Allow user to set Page +func (options *ListAllZoneLockownRulesOptions) SetPage(page int64) *ListAllZoneLockownRulesOptions { + options.Page = core.Int64Ptr(page) + return options +} + +// SetPerPage : Allow user to set PerPage +func (options *ListAllZoneLockownRulesOptions) SetPerPage(perPage int64) *ListAllZoneLockownRulesOptions { + options.PerPage = core.Int64Ptr(perPage) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListAllZoneLockownRulesOptions) SetHeaders(param map[string]string) *ListAllZoneLockownRulesOptions { + options.Headers = param + return options +} + +// ListLockdownRespResultInfo : Statistics of results. +type ListLockdownRespResultInfo struct { + // Page number. + Page *int64 `json:"page" validate:"required"` + + // Number of results per page. + PerPage *int64 `json:"per_page" validate:"required"` + + // Number of results. + Count *int64 `json:"count" validate:"required"` + + // Total number of results. + TotalCount *int64 `json:"total_count" validate:"required"` +} + + +// UnmarshalListLockdownRespResultInfo unmarshals an instance of ListLockdownRespResultInfo from the specified map of raw messages. +func UnmarshalListLockdownRespResultInfo(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ListLockdownRespResultInfo) + err = core.UnmarshalPrimitive(m, "page", &obj.Page) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "per_page", &obj.PerPage) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "count", &obj.Count) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "total_count", &obj.TotalCount) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LockdownInputConfigurationsItem : LockdownInputConfigurationsItem struct +type LockdownInputConfigurationsItem struct { + // properties. + Target *string `json:"target" validate:"required"` + + // IP addresses or CIDR, if target is "ip", then value should be an IP addresses, otherwise CIDR. + Value *string `json:"value" validate:"required"` +} + +// Constants associated with the LockdownInputConfigurationsItem.Target property. +// properties. +const ( + LockdownInputConfigurationsItem_Target_Ip = "ip" + LockdownInputConfigurationsItem_Target_IpRange = "ip_range" +) + + +// NewLockdownInputConfigurationsItem : Instantiate LockdownInputConfigurationsItem (Generic Model Constructor) +func (*ZoneLockdownV1) NewLockdownInputConfigurationsItem(target string, value string) (model *LockdownInputConfigurationsItem, err error) { + model = &LockdownInputConfigurationsItem{ + Target: core.StringPtr(target), + Value: core.StringPtr(value), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalLockdownInputConfigurationsItem unmarshals an instance of LockdownInputConfigurationsItem from the specified map of raw messages. +func UnmarshalLockdownInputConfigurationsItem(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LockdownInputConfigurationsItem) + err = core.UnmarshalPrimitive(m, "target", &obj.Target) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LockdownObjectConfigurationsItem : LockdownObjectConfigurationsItem struct +type LockdownObjectConfigurationsItem struct { + // target. + Target *string `json:"target" validate:"required"` + + // IP addresses or CIDR, if target is "ip", then value should be an IP addresses, otherwise CIDR. + Value *string `json:"value" validate:"required"` +} + +// Constants associated with the LockdownObjectConfigurationsItem.Target property. +// target. +const ( + LockdownObjectConfigurationsItem_Target_Ip = "ip" + LockdownObjectConfigurationsItem_Target_IpRange = "ip_range" +) + + +// UnmarshalLockdownObjectConfigurationsItem unmarshals an instance of LockdownObjectConfigurationsItem from the specified map of raw messages. +func UnmarshalLockdownObjectConfigurationsItem(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LockdownObjectConfigurationsItem) + err = core.UnmarshalPrimitive(m, "target", &obj.Target) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// UpdateLockdownRuleOptions : The UpdateLockdownRule options. +type UpdateLockdownRuleOptions struct { + // Identifier of lockdown rule. + LockdownRuleIdentifier *string `json:"lockdown_rule_identifier" validate:"required,ne="` + + // Lockdown rule identifier. + ID *string `json:"id,omitempty"` + + // Whether this zone lockdown is currently paused. + Paused *bool `json:"paused,omitempty"` + + // A note that you can use to describe the reason for a Lockdown rule. + Description *string `json:"description,omitempty"` + + // URLs to be included in this rule definition. Wildcards are permitted. The URL pattern entered here will be escaped + // before use. This limits the URL to just simple wildcard patterns. + Urls []string `json:"urls,omitempty"` + + // List of IP addresses or CIDR ranges to use for this rule. This can include any number of ip or ip_range + // configurations that can access the provided URLs. + Configurations []LockdownInputConfigurationsItem `json:"configurations,omitempty"` + + // firewall priority. + Priority *int64 `json:"priority,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdateLockdownRuleOptions : Instantiate UpdateLockdownRuleOptions +func (*ZoneLockdownV1) NewUpdateLockdownRuleOptions(lockdownRuleIdentifier string) *UpdateLockdownRuleOptions { + return &UpdateLockdownRuleOptions{ + LockdownRuleIdentifier: core.StringPtr(lockdownRuleIdentifier), + } +} + +// SetLockdownRuleIdentifier : Allow user to set LockdownRuleIdentifier +func (options *UpdateLockdownRuleOptions) SetLockdownRuleIdentifier(lockdownRuleIdentifier string) *UpdateLockdownRuleOptions { + options.LockdownRuleIdentifier = core.StringPtr(lockdownRuleIdentifier) + return options +} + +// SetID : Allow user to set ID +func (options *UpdateLockdownRuleOptions) SetID(id string) *UpdateLockdownRuleOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetPaused : Allow user to set Paused +func (options *UpdateLockdownRuleOptions) SetPaused(paused bool) *UpdateLockdownRuleOptions { + options.Paused = core.BoolPtr(paused) + return options +} + +// SetDescription : Allow user to set Description +func (options *UpdateLockdownRuleOptions) SetDescription(description string) *UpdateLockdownRuleOptions { + options.Description = core.StringPtr(description) + return options +} + +// SetUrls : Allow user to set Urls +func (options *UpdateLockdownRuleOptions) SetUrls(urls []string) *UpdateLockdownRuleOptions { + options.Urls = urls + return options +} + +// SetConfigurations : Allow user to set Configurations +func (options *UpdateLockdownRuleOptions) SetConfigurations(configurations []LockdownInputConfigurationsItem) *UpdateLockdownRuleOptions { + options.Configurations = configurations + return options +} + +// SetPriority : Allow user to set Priority +func (options *UpdateLockdownRuleOptions) SetPriority(priority int64) *UpdateLockdownRuleOptions { + options.Priority = core.Int64Ptr(priority) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateLockdownRuleOptions) SetHeaders(param map[string]string) *UpdateLockdownRuleOptions { + options.Headers = param + return options +} + +// DeleteLockdownResp : delete lockdown response. +type DeleteLockdownResp struct { + // Operation success flag. + Success *bool `json:"success" validate:"required"` + + // Array of errors encountered. + Errors [][]string `json:"errors" validate:"required"` + + // Array of messages returned. + Messages [][]string `json:"messages" validate:"required"` + + // Container for response information. + Result *DeleteLockdownRespResult `json:"result" validate:"required"` +} + + +// UnmarshalDeleteLockdownResp unmarshals an instance of DeleteLockdownResp from the specified map of raw messages. +func UnmarshalDeleteLockdownResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(DeleteLockdownResp) + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalDeleteLockdownRespResult) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ListLockdownResp : list lockdown response. +type ListLockdownResp struct { + // Was operation successful. + Success *bool `json:"success" validate:"required"` + + // Array of errors encountered. + Errors [][]string `json:"errors" validate:"required"` + + // Array of messages returned. + Messages [][]string `json:"messages" validate:"required"` + + // Container for response information. + Result []LockdownObject `json:"result" validate:"required"` + + // Statistics of results. + ResultInfo *ListLockdownRespResultInfo `json:"result_info" validate:"required"` +} + + +// UnmarshalListLockdownResp unmarshals an instance of ListLockdownResp from the specified map of raw messages. +func UnmarshalListLockdownResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ListLockdownResp) + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalLockdownObject) + if err != nil { + return + } + err = core.UnmarshalModel(m, "result_info", &obj.ResultInfo, UnmarshalListLockdownRespResultInfo) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LockdownObject : lockdown object. +type LockdownObject struct { + // Lockdown rule identifier. + ID *string `json:"id" validate:"required"` + + // firewall priority. + Priority *int64 `json:"priority,omitempty"` + + // Whether this zone lockdown is currently paused. + Paused *bool `json:"paused" validate:"required"` + + // A note that you can use to describe the reason for a Lockdown rule. + Description *string `json:"description" validate:"required"` + + // URLs to be included in this rule definition. Wildcards are permitted. The URL pattern entered here will be escaped + // before use. This limits the URL to just simple wildcard patterns. + Urls []string `json:"urls" validate:"required"` + + // List of IP addresses or CIDR ranges to use for this rule. This can include any number of ip or ip_range + // configurations that can access the provided URLs. + Configurations []LockdownObjectConfigurationsItem `json:"configurations" validate:"required"` +} + + +// UnmarshalLockdownObject unmarshals an instance of LockdownObject from the specified map of raw messages. +func UnmarshalLockdownObject(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LockdownObject) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "priority", &obj.Priority) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "paused", &obj.Paused) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "description", &obj.Description) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "urls", &obj.Urls) + if err != nil { + return + } + err = core.UnmarshalModel(m, "configurations", &obj.Configurations, UnmarshalLockdownObjectConfigurationsItem) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LockdownResp : lockdown response. +type LockdownResp struct { + // Was operation successful. + Success *bool `json:"success" validate:"required"` + + // Array of errors encountered. + Errors [][]string `json:"errors" validate:"required"` + + // Array of messages returned. + Messages [][]string `json:"messages" validate:"required"` + + // lockdown object. + Result *LockdownObject `json:"result" validate:"required"` +} + + +// UnmarshalLockdownResp unmarshals an instance of LockdownResp from the specified map of raw messages. +func UnmarshalLockdownResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LockdownResp) + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalLockdownObject) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} diff --git a/vendor/github.com/IBM/networking-go-sdk/zoneratelimitsv1/zone_rate_limits_v1.go b/vendor/github.com/IBM/networking-go-sdk/zoneratelimitsv1/zone_rate_limits_v1.go new file mode 100644 index 00000000000..f1c6250e99d --- /dev/null +++ b/vendor/github.com/IBM/networking-go-sdk/zoneratelimitsv1/zone_rate_limits_v1.go @@ -0,0 +1,1708 @@ +/** + * (C) Copyright IBM Corp. 2020. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * IBM OpenAPI SDK Code Generator Version: 3.20.0-debb9f29-20201203-202043 + */ + + +// Package zoneratelimitsv1 : Operations and models for the ZoneRateLimitsV1 service +package zoneratelimitsv1 + +import ( + "context" + "encoding/json" + "fmt" + "github.com/IBM/go-sdk-core/v4/core" + common "github.com/IBM/networking-go-sdk/common" + "net/http" + "reflect" + "time" +) + +// ZoneRateLimitsV1 : Zone Rate Limits +// +// Version: 1.0.1 +type ZoneRateLimitsV1 struct { + Service *core.BaseService + + // Full url-encoded cloud resource name (CRN) of resource instance. + Crn *string + + // Zone identifier of the zone for which rate limit is to be created. + ZoneIdentifier *string +} + +// DefaultServiceURL is the default URL to make service requests to. +const DefaultServiceURL = "https://api.cis.cloud.ibm.com" + +// DefaultServiceName is the default key used to find external configuration information. +const DefaultServiceName = "zone_rate_limits" + +// ZoneRateLimitsV1Options : Service options +type ZoneRateLimitsV1Options struct { + ServiceName string + URL string + Authenticator core.Authenticator + + // Full url-encoded cloud resource name (CRN) of resource instance. + Crn *string `validate:"required"` + + // Zone identifier of the zone for which rate limit is to be created. + ZoneIdentifier *string `validate:"required"` +} + +// NewZoneRateLimitsV1UsingExternalConfig : constructs an instance of ZoneRateLimitsV1 with passed in options and external configuration. +func NewZoneRateLimitsV1UsingExternalConfig(options *ZoneRateLimitsV1Options) (zoneRateLimits *ZoneRateLimitsV1, err error) { + if options.ServiceName == "" { + options.ServiceName = DefaultServiceName + } + + if options.Authenticator == nil { + options.Authenticator, err = core.GetAuthenticatorFromEnvironment(options.ServiceName) + if err != nil { + return + } + } + + zoneRateLimits, err = NewZoneRateLimitsV1(options) + if err != nil { + return + } + + err = zoneRateLimits.Service.ConfigureService(options.ServiceName) + if err != nil { + return + } + + if options.URL != "" { + err = zoneRateLimits.Service.SetServiceURL(options.URL) + } + return +} + +// NewZoneRateLimitsV1 : constructs an instance of ZoneRateLimitsV1 with passed in options. +func NewZoneRateLimitsV1(options *ZoneRateLimitsV1Options) (service *ZoneRateLimitsV1, err error) { + serviceOptions := &core.ServiceOptions{ + URL: DefaultServiceURL, + Authenticator: options.Authenticator, + } + + err = core.ValidateStruct(options, "options") + if err != nil { + return + } + + baseService, err := core.NewBaseService(serviceOptions) + if err != nil { + return + } + + if options.URL != "" { + err = baseService.SetServiceURL(options.URL) + if err != nil { + return + } + } + + service = &ZoneRateLimitsV1{ + Service: baseService, + Crn: options.Crn, + ZoneIdentifier: options.ZoneIdentifier, + } + + return +} + +// GetServiceURLForRegion returns the service URL to be used for the specified region +func GetServiceURLForRegion(region string) (string, error) { + return "", fmt.Errorf("service does not support regional URLs") +} + +// Clone makes a copy of "zoneRateLimits" suitable for processing requests. +func (zoneRateLimits *ZoneRateLimitsV1) Clone() *ZoneRateLimitsV1 { + if core.IsNil(zoneRateLimits) { + return nil + } + clone := *zoneRateLimits + clone.Service = zoneRateLimits.Service.Clone() + return &clone +} + +// SetServiceURL sets the service URL +func (zoneRateLimits *ZoneRateLimitsV1) SetServiceURL(url string) error { + return zoneRateLimits.Service.SetServiceURL(url) +} + +// GetServiceURL returns the service URL +func (zoneRateLimits *ZoneRateLimitsV1) GetServiceURL() string { + return zoneRateLimits.Service.GetServiceURL() +} + +// SetDefaultHeaders sets HTTP headers to be sent in every request +func (zoneRateLimits *ZoneRateLimitsV1) SetDefaultHeaders(headers http.Header) { + zoneRateLimits.Service.SetDefaultHeaders(headers) +} + +// SetEnableGzipCompression sets the service's EnableGzipCompression field +func (zoneRateLimits *ZoneRateLimitsV1) SetEnableGzipCompression(enableGzip bool) { + zoneRateLimits.Service.SetEnableGzipCompression(enableGzip) +} + +// GetEnableGzipCompression returns the service's EnableGzipCompression field +func (zoneRateLimits *ZoneRateLimitsV1) GetEnableGzipCompression() bool { + return zoneRateLimits.Service.GetEnableGzipCompression() +} + +// EnableRetries enables automatic retries for requests invoked for this service instance. +// If either parameter is specified as 0, then a default value is used instead. +func (zoneRateLimits *ZoneRateLimitsV1) EnableRetries(maxRetries int, maxRetryInterval time.Duration) { + zoneRateLimits.Service.EnableRetries(maxRetries, maxRetryInterval) +} + +// DisableRetries disables automatic retries for requests invoked for this service instance. +func (zoneRateLimits *ZoneRateLimitsV1) DisableRetries() { + zoneRateLimits.Service.DisableRetries() +} + +// ListAllZoneRateLimits : List all rate limits +// The details of Rate Limit for a given zone under a given service instance. +func (zoneRateLimits *ZoneRateLimitsV1) ListAllZoneRateLimits(listAllZoneRateLimitsOptions *ListAllZoneRateLimitsOptions) (result *ListRatelimitResp, response *core.DetailedResponse, err error) { + return zoneRateLimits.ListAllZoneRateLimitsWithContext(context.Background(), listAllZoneRateLimitsOptions) +} + +// ListAllZoneRateLimitsWithContext is an alternate form of the ListAllZoneRateLimits method which supports a Context parameter +func (zoneRateLimits *ZoneRateLimitsV1) ListAllZoneRateLimitsWithContext(ctx context.Context, listAllZoneRateLimitsOptions *ListAllZoneRateLimitsOptions) (result *ListRatelimitResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listAllZoneRateLimitsOptions, "listAllZoneRateLimitsOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zoneRateLimits.Crn, + "zone_identifier": *zoneRateLimits.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zoneRateLimits.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zoneRateLimits.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/rate_limits`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range listAllZoneRateLimitsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zone_rate_limits", "V1", "ListAllZoneRateLimits") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + if listAllZoneRateLimitsOptions.Page != nil { + builder.AddQuery("page", fmt.Sprint(*listAllZoneRateLimitsOptions.Page)) + } + if listAllZoneRateLimitsOptions.PerPage != nil { + builder.AddQuery("per_page", fmt.Sprint(*listAllZoneRateLimitsOptions.PerPage)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zoneRateLimits.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalListRatelimitResp) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateZoneRateLimits : Create rate limit +// Create a new rate limit for a given zone under a service instance. +func (zoneRateLimits *ZoneRateLimitsV1) CreateZoneRateLimits(createZoneRateLimitsOptions *CreateZoneRateLimitsOptions) (result *RatelimitResp, response *core.DetailedResponse, err error) { + return zoneRateLimits.CreateZoneRateLimitsWithContext(context.Background(), createZoneRateLimitsOptions) +} + +// CreateZoneRateLimitsWithContext is an alternate form of the CreateZoneRateLimits method which supports a Context parameter +func (zoneRateLimits *ZoneRateLimitsV1) CreateZoneRateLimitsWithContext(ctx context.Context, createZoneRateLimitsOptions *CreateZoneRateLimitsOptions) (result *RatelimitResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(createZoneRateLimitsOptions, "createZoneRateLimitsOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zoneRateLimits.Crn, + "zone_identifier": *zoneRateLimits.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zoneRateLimits.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zoneRateLimits.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/rate_limits`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range createZoneRateLimitsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zone_rate_limits", "V1", "CreateZoneRateLimits") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if createZoneRateLimitsOptions.Disabled != nil { + body["disabled"] = createZoneRateLimitsOptions.Disabled + } + if createZoneRateLimitsOptions.Description != nil { + body["description"] = createZoneRateLimitsOptions.Description + } + if createZoneRateLimitsOptions.Bypass != nil { + body["bypass"] = createZoneRateLimitsOptions.Bypass + } + if createZoneRateLimitsOptions.Threshold != nil { + body["threshold"] = createZoneRateLimitsOptions.Threshold + } + if createZoneRateLimitsOptions.Period != nil { + body["period"] = createZoneRateLimitsOptions.Period + } + if createZoneRateLimitsOptions.Action != nil { + body["action"] = createZoneRateLimitsOptions.Action + } + if createZoneRateLimitsOptions.Correlate != nil { + body["correlate"] = createZoneRateLimitsOptions.Correlate + } + if createZoneRateLimitsOptions.Match != nil { + body["match"] = createZoneRateLimitsOptions.Match + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zoneRateLimits.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalRatelimitResp) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteZoneRateLimit : Delete rate limit +// Delete a rate limit given its id. +func (zoneRateLimits *ZoneRateLimitsV1) DeleteZoneRateLimit(deleteZoneRateLimitOptions *DeleteZoneRateLimitOptions) (result *DeleteRateLimitResp, response *core.DetailedResponse, err error) { + return zoneRateLimits.DeleteZoneRateLimitWithContext(context.Background(), deleteZoneRateLimitOptions) +} + +// DeleteZoneRateLimitWithContext is an alternate form of the DeleteZoneRateLimit method which supports a Context parameter +func (zoneRateLimits *ZoneRateLimitsV1) DeleteZoneRateLimitWithContext(ctx context.Context, deleteZoneRateLimitOptions *DeleteZoneRateLimitOptions) (result *DeleteRateLimitResp, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteZoneRateLimitOptions, "deleteZoneRateLimitOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteZoneRateLimitOptions, "deleteZoneRateLimitOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zoneRateLimits.Crn, + "zone_identifier": *zoneRateLimits.ZoneIdentifier, + "rate_limit_identifier": *deleteZoneRateLimitOptions.RateLimitIdentifier, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zoneRateLimits.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zoneRateLimits.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/rate_limits/{rate_limit_identifier}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteZoneRateLimitOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zone_rate_limits", "V1", "DeleteZoneRateLimit") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zoneRateLimits.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalDeleteRateLimitResp) + if err != nil { + return + } + response.Result = result + + return +} + +// GetRateLimit : Get a rate limit +// Get the details of a rate limit for a given zone under a given service instance. +func (zoneRateLimits *ZoneRateLimitsV1) GetRateLimit(getRateLimitOptions *GetRateLimitOptions) (result *RatelimitResp, response *core.DetailedResponse, err error) { + return zoneRateLimits.GetRateLimitWithContext(context.Background(), getRateLimitOptions) +} + +// GetRateLimitWithContext is an alternate form of the GetRateLimit method which supports a Context parameter +func (zoneRateLimits *ZoneRateLimitsV1) GetRateLimitWithContext(ctx context.Context, getRateLimitOptions *GetRateLimitOptions) (result *RatelimitResp, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getRateLimitOptions, "getRateLimitOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getRateLimitOptions, "getRateLimitOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zoneRateLimits.Crn, + "zone_identifier": *zoneRateLimits.ZoneIdentifier, + "rate_limit_identifier": *getRateLimitOptions.RateLimitIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zoneRateLimits.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zoneRateLimits.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/rate_limits/{rate_limit_identifier}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getRateLimitOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zone_rate_limits", "V1", "GetRateLimit") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zoneRateLimits.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalRatelimitResp) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateRateLimit : Update rate limit +// Update an existing rate limit for a given zone under a service instance. +func (zoneRateLimits *ZoneRateLimitsV1) UpdateRateLimit(updateRateLimitOptions *UpdateRateLimitOptions) (result *RatelimitResp, response *core.DetailedResponse, err error) { + return zoneRateLimits.UpdateRateLimitWithContext(context.Background(), updateRateLimitOptions) +} + +// UpdateRateLimitWithContext is an alternate form of the UpdateRateLimit method which supports a Context parameter +func (zoneRateLimits *ZoneRateLimitsV1) UpdateRateLimitWithContext(ctx context.Context, updateRateLimitOptions *UpdateRateLimitOptions) (result *RatelimitResp, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateRateLimitOptions, "updateRateLimitOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateRateLimitOptions, "updateRateLimitOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zoneRateLimits.Crn, + "zone_identifier": *zoneRateLimits.ZoneIdentifier, + "rate_limit_identifier": *updateRateLimitOptions.RateLimitIdentifier, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zoneRateLimits.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zoneRateLimits.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/rate_limits/{rate_limit_identifier}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateRateLimitOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zone_rate_limits", "V1", "UpdateRateLimit") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if updateRateLimitOptions.Disabled != nil { + body["disabled"] = updateRateLimitOptions.Disabled + } + if updateRateLimitOptions.Description != nil { + body["description"] = updateRateLimitOptions.Description + } + if updateRateLimitOptions.Bypass != nil { + body["bypass"] = updateRateLimitOptions.Bypass + } + if updateRateLimitOptions.Threshold != nil { + body["threshold"] = updateRateLimitOptions.Threshold + } + if updateRateLimitOptions.Period != nil { + body["period"] = updateRateLimitOptions.Period + } + if updateRateLimitOptions.Action != nil { + body["action"] = updateRateLimitOptions.Action + } + if updateRateLimitOptions.Correlate != nil { + body["correlate"] = updateRateLimitOptions.Correlate + } + if updateRateLimitOptions.Match != nil { + body["match"] = updateRateLimitOptions.Match + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zoneRateLimits.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalRatelimitResp) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateZoneRateLimitsOptions : The CreateZoneRateLimits options. +type CreateZoneRateLimitsOptions struct { + // Whether this ratelimit is currently disabled. + Disabled *bool `json:"disabled,omitempty"` + + // A note that you can use to describe the reason for a rate limit. + Description *string `json:"description,omitempty"` + + // Criteria that would allow the rate limit to be bypassed, for example to express that you shouldn't apply a rate + // limit to a given set of URLs. + Bypass []RatelimitInputBypassItem `json:"bypass,omitempty"` + + // The threshold that triggers the rate limit mitigations, combine with period. i.e. threshold per period. + Threshold *int64 `json:"threshold,omitempty"` + + // The time in seconds to count matching traffic. If the count exceeds threshold within this period the action will be + // performed. + Period *int64 `json:"period,omitempty"` + + // action. + Action *RatelimitInputAction `json:"action,omitempty"` + + // Enable NAT based rate limits. + Correlate *RatelimitInputCorrelate `json:"correlate,omitempty"` + + // Determines which traffic the rate limit counts towards the threshold. Needs to be one of "request" or "response" + // objects. + Match *RatelimitInputMatch `json:"match,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewCreateZoneRateLimitsOptions : Instantiate CreateZoneRateLimitsOptions +func (*ZoneRateLimitsV1) NewCreateZoneRateLimitsOptions() *CreateZoneRateLimitsOptions { + return &CreateZoneRateLimitsOptions{} +} + +// SetDisabled : Allow user to set Disabled +func (options *CreateZoneRateLimitsOptions) SetDisabled(disabled bool) *CreateZoneRateLimitsOptions { + options.Disabled = core.BoolPtr(disabled) + return options +} + +// SetDescription : Allow user to set Description +func (options *CreateZoneRateLimitsOptions) SetDescription(description string) *CreateZoneRateLimitsOptions { + options.Description = core.StringPtr(description) + return options +} + +// SetBypass : Allow user to set Bypass +func (options *CreateZoneRateLimitsOptions) SetBypass(bypass []RatelimitInputBypassItem) *CreateZoneRateLimitsOptions { + options.Bypass = bypass + return options +} + +// SetThreshold : Allow user to set Threshold +func (options *CreateZoneRateLimitsOptions) SetThreshold(threshold int64) *CreateZoneRateLimitsOptions { + options.Threshold = core.Int64Ptr(threshold) + return options +} + +// SetPeriod : Allow user to set Period +func (options *CreateZoneRateLimitsOptions) SetPeriod(period int64) *CreateZoneRateLimitsOptions { + options.Period = core.Int64Ptr(period) + return options +} + +// SetAction : Allow user to set Action +func (options *CreateZoneRateLimitsOptions) SetAction(action *RatelimitInputAction) *CreateZoneRateLimitsOptions { + options.Action = action + return options +} + +// SetCorrelate : Allow user to set Correlate +func (options *CreateZoneRateLimitsOptions) SetCorrelate(correlate *RatelimitInputCorrelate) *CreateZoneRateLimitsOptions { + options.Correlate = correlate + return options +} + +// SetMatch : Allow user to set Match +func (options *CreateZoneRateLimitsOptions) SetMatch(match *RatelimitInputMatch) *CreateZoneRateLimitsOptions { + options.Match = match + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateZoneRateLimitsOptions) SetHeaders(param map[string]string) *CreateZoneRateLimitsOptions { + options.Headers = param + return options +} + +// DeleteRateLimitRespResult : Container for response information. +type DeleteRateLimitRespResult struct { + // ID. + ID *string `json:"id" validate:"required"` +} + + +// UnmarshalDeleteRateLimitRespResult unmarshals an instance of DeleteRateLimitRespResult from the specified map of raw messages. +func UnmarshalDeleteRateLimitRespResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(DeleteRateLimitRespResult) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// DeleteZoneRateLimitOptions : The DeleteZoneRateLimit options. +type DeleteZoneRateLimitOptions struct { + // Identifier of the rate limit to be deleted. + RateLimitIdentifier *string `json:"rate_limit_identifier" validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteZoneRateLimitOptions : Instantiate DeleteZoneRateLimitOptions +func (*ZoneRateLimitsV1) NewDeleteZoneRateLimitOptions(rateLimitIdentifier string) *DeleteZoneRateLimitOptions { + return &DeleteZoneRateLimitOptions{ + RateLimitIdentifier: core.StringPtr(rateLimitIdentifier), + } +} + +// SetRateLimitIdentifier : Allow user to set RateLimitIdentifier +func (options *DeleteZoneRateLimitOptions) SetRateLimitIdentifier(rateLimitIdentifier string) *DeleteZoneRateLimitOptions { + options.RateLimitIdentifier = core.StringPtr(rateLimitIdentifier) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteZoneRateLimitOptions) SetHeaders(param map[string]string) *DeleteZoneRateLimitOptions { + options.Headers = param + return options +} + +// GetRateLimitOptions : The GetRateLimit options. +type GetRateLimitOptions struct { + // Identifier of rate limit for the given zone. + RateLimitIdentifier *string `json:"rate_limit_identifier" validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetRateLimitOptions : Instantiate GetRateLimitOptions +func (*ZoneRateLimitsV1) NewGetRateLimitOptions(rateLimitIdentifier string) *GetRateLimitOptions { + return &GetRateLimitOptions{ + RateLimitIdentifier: core.StringPtr(rateLimitIdentifier), + } +} + +// SetRateLimitIdentifier : Allow user to set RateLimitIdentifier +func (options *GetRateLimitOptions) SetRateLimitIdentifier(rateLimitIdentifier string) *GetRateLimitOptions { + options.RateLimitIdentifier = core.StringPtr(rateLimitIdentifier) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetRateLimitOptions) SetHeaders(param map[string]string) *GetRateLimitOptions { + options.Headers = param + return options +} + +// ListAllZoneRateLimitsOptions : The ListAllZoneRateLimits options. +type ListAllZoneRateLimitsOptions struct { + // Page number of paginated results. + Page *int64 `json:"page,omitempty"` + + // Maximum number of rate limits per page. + PerPage *int64 `json:"per_page,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListAllZoneRateLimitsOptions : Instantiate ListAllZoneRateLimitsOptions +func (*ZoneRateLimitsV1) NewListAllZoneRateLimitsOptions() *ListAllZoneRateLimitsOptions { + return &ListAllZoneRateLimitsOptions{} +} + +// SetPage : Allow user to set Page +func (options *ListAllZoneRateLimitsOptions) SetPage(page int64) *ListAllZoneRateLimitsOptions { + options.Page = core.Int64Ptr(page) + return options +} + +// SetPerPage : Allow user to set PerPage +func (options *ListAllZoneRateLimitsOptions) SetPerPage(perPage int64) *ListAllZoneRateLimitsOptions { + options.PerPage = core.Int64Ptr(perPage) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListAllZoneRateLimitsOptions) SetHeaders(param map[string]string) *ListAllZoneRateLimitsOptions { + options.Headers = param + return options +} + +// ListRatelimitRespResultInfo : Statistics of results. +type ListRatelimitRespResultInfo struct { + // Page number. + Page *int64 `json:"page" validate:"required"` + + // Number of results per page. + PerPage *int64 `json:"per_page" validate:"required"` + + // Number of results. + Count *int64 `json:"count" validate:"required"` + + // Total number of results. + TotalCount *int64 `json:"total_count" validate:"required"` +} + + +// UnmarshalListRatelimitRespResultInfo unmarshals an instance of ListRatelimitRespResultInfo from the specified map of raw messages. +func UnmarshalListRatelimitRespResultInfo(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ListRatelimitRespResultInfo) + err = core.UnmarshalPrimitive(m, "page", &obj.Page) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "per_page", &obj.PerPage) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "count", &obj.Count) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "total_count", &obj.TotalCount) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// RatelimitInputAction : action. +type RatelimitInputAction struct { + // The type of action to perform. + Mode *string `json:"mode" validate:"required"` + + // The time in seconds as an integer to perform the mitigation action. Must be the same or greater than the period. + // This field is valid only when mode is "simulate" or "ban". + Timeout *int64 `json:"timeout,omitempty"` + + // Custom content-type and body to return, this overrides the custom error for the zone. This field is not required. + // Omission will result in default HTML error page.This field is valid only when mode is "simulate" or "ban". + Response *RatelimitInputActionResponse `json:"response,omitempty"` +} + +// Constants associated with the RatelimitInputAction.Mode property. +// The type of action to perform. +const ( + RatelimitInputAction_Mode_Ban = "ban" + RatelimitInputAction_Mode_Challenge = "challenge" + RatelimitInputAction_Mode_JsChallenge = "js_challenge" + RatelimitInputAction_Mode_Simulate = "simulate" +) + + +// NewRatelimitInputAction : Instantiate RatelimitInputAction (Generic Model Constructor) +func (*ZoneRateLimitsV1) NewRatelimitInputAction(mode string) (model *RatelimitInputAction, err error) { + model = &RatelimitInputAction{ + Mode: core.StringPtr(mode), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalRatelimitInputAction unmarshals an instance of RatelimitInputAction from the specified map of raw messages. +func UnmarshalRatelimitInputAction(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(RatelimitInputAction) + err = core.UnmarshalPrimitive(m, "mode", &obj.Mode) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "timeout", &obj.Timeout) + if err != nil { + return + } + err = core.UnmarshalModel(m, "response", &obj.Response, UnmarshalRatelimitInputActionResponse) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// RatelimitInputActionResponse : Custom content-type and body to return, this overrides the custom error for the zone. This field is not required. +// Omission will result in default HTML error page.This field is valid only when mode is "simulate" or "ban". +type RatelimitInputActionResponse struct { + // The content type of the body. + ContentType *string `json:"content_type,omitempty"` + + // The body to return, the content here should conform to the content_type. + Body *string `json:"body,omitempty"` +} + +// Constants associated with the RatelimitInputActionResponse.ContentType property. +// The content type of the body. +const ( + RatelimitInputActionResponse_ContentType_ApplicationJSON = "application/json" + RatelimitInputActionResponse_ContentType_TextPlain = "text/plain" + RatelimitInputActionResponse_ContentType_TextXml = "text/xml" +) + + +// UnmarshalRatelimitInputActionResponse unmarshals an instance of RatelimitInputActionResponse from the specified map of raw messages. +func UnmarshalRatelimitInputActionResponse(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(RatelimitInputActionResponse) + err = core.UnmarshalPrimitive(m, "content_type", &obj.ContentType) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "body", &obj.Body) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// RatelimitInputBypassItem : RatelimitInputBypassItem struct +type RatelimitInputBypassItem struct { + // Rate limit name. + Name *string `json:"name" validate:"required"` + + // The url to bypass. + Value *string `json:"value" validate:"required"` +} + +// Constants associated with the RatelimitInputBypassItem.Name property. +// Rate limit name. +const ( + RatelimitInputBypassItem_Name_URL = "url" +) + + +// NewRatelimitInputBypassItem : Instantiate RatelimitInputBypassItem (Generic Model Constructor) +func (*ZoneRateLimitsV1) NewRatelimitInputBypassItem(name string, value string) (model *RatelimitInputBypassItem, err error) { + model = &RatelimitInputBypassItem{ + Name: core.StringPtr(name), + Value: core.StringPtr(value), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalRatelimitInputBypassItem unmarshals an instance of RatelimitInputBypassItem from the specified map of raw messages. +func UnmarshalRatelimitInputBypassItem(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(RatelimitInputBypassItem) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// RatelimitInputCorrelate : Enable NAT based rate limits. +type RatelimitInputCorrelate struct { + // NAT rate limits by. + By *string `json:"by" validate:"required"` +} + +// Constants associated with the RatelimitInputCorrelate.By property. +// NAT rate limits by. +const ( + RatelimitInputCorrelate_By_Nat = "nat" +) + + +// NewRatelimitInputCorrelate : Instantiate RatelimitInputCorrelate (Generic Model Constructor) +func (*ZoneRateLimitsV1) NewRatelimitInputCorrelate(by string) (model *RatelimitInputCorrelate, err error) { + model = &RatelimitInputCorrelate{ + By: core.StringPtr(by), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalRatelimitInputCorrelate unmarshals an instance of RatelimitInputCorrelate from the specified map of raw messages. +func UnmarshalRatelimitInputCorrelate(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(RatelimitInputCorrelate) + err = core.UnmarshalPrimitive(m, "by", &obj.By) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// RatelimitInputMatch : Determines which traffic the rate limit counts towards the threshold. Needs to be one of "request" or "response" +// objects. +type RatelimitInputMatch struct { + // request. + Request *RatelimitInputMatchRequest `json:"request,omitempty"` + + // response. + Response *RatelimitInputMatchResponse `json:"response,omitempty"` +} + + +// UnmarshalRatelimitInputMatch unmarshals an instance of RatelimitInputMatch from the specified map of raw messages. +func UnmarshalRatelimitInputMatch(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(RatelimitInputMatch) + err = core.UnmarshalModel(m, "request", &obj.Request, UnmarshalRatelimitInputMatchRequest) + if err != nil { + return + } + err = core.UnmarshalModel(m, "response", &obj.Response, UnmarshalRatelimitInputMatchResponse) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// RatelimitInputMatchRequest : request. +type RatelimitInputMatchRequest struct { + // A subset of the list HTTP methods, or ["_ALL_"] for selecting all methods. + Methods []string `json:"methods,omitempty"` + + // HTTP schemes list, or ["_ALL_"] for selecting all schemes. + Schemes []string `json:"schemes,omitempty"` + + // The URL pattern to match comprised of the host and path, i.e. example.org/path. Wildcard are expanded to match + // applicable traffic, query strings are not matched. Use * for all traffic to your zone. + URL *string `json:"url" validate:"required"` +} + +// Constants associated with the RatelimitInputMatchRequest.Methods property. +const ( + RatelimitInputMatchRequest_Methods_All = "_ALL_" + RatelimitInputMatchRequest_Methods_Delete = "DELETE" + RatelimitInputMatchRequest_Methods_Get = "GET" + RatelimitInputMatchRequest_Methods_Head = "HEAD" + RatelimitInputMatchRequest_Methods_Patch = "PATCH" + RatelimitInputMatchRequest_Methods_Post = "POST" + RatelimitInputMatchRequest_Methods_Put = "PUT" +) + +// Constants associated with the RatelimitInputMatchRequest.Schemes property. +const ( + RatelimitInputMatchRequest_Schemes_All = "_ALL_" + RatelimitInputMatchRequest_Schemes_Http = "HTTP" + RatelimitInputMatchRequest_Schemes_Https = "HTTPS" +) + + +// NewRatelimitInputMatchRequest : Instantiate RatelimitInputMatchRequest (Generic Model Constructor) +func (*ZoneRateLimitsV1) NewRatelimitInputMatchRequest(url string) (model *RatelimitInputMatchRequest, err error) { + model = &RatelimitInputMatchRequest{ + URL: core.StringPtr(url), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalRatelimitInputMatchRequest unmarshals an instance of RatelimitInputMatchRequest from the specified map of raw messages. +func UnmarshalRatelimitInputMatchRequest(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(RatelimitInputMatchRequest) + err = core.UnmarshalPrimitive(m, "methods", &obj.Methods) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "schemes", &obj.Schemes) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "url", &obj.URL) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// RatelimitInputMatchResponse : response. +type RatelimitInputMatchResponse struct { + // HTTP Status codes, can be one [403], many [401,403] or indicate all by not providing this value. This field is not + // required. + Status []int64 `json:"status,omitempty"` + + // Array of response headers to match. If a response does not meet the header criteria then the request will not be + // counted towards the rate limit. + HeadersVar []RatelimitInputMatchResponseHeadersItem `json:"headers,omitempty"` + + // Deprecated, please use response headers instead and also provide "origin_traffic:false" to avoid legacy behaviour + // interacting with the response.headers property. + OriginTraffic *bool `json:"origin_traffic,omitempty"` +} + + +// UnmarshalRatelimitInputMatchResponse unmarshals an instance of RatelimitInputMatchResponse from the specified map of raw messages. +func UnmarshalRatelimitInputMatchResponse(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(RatelimitInputMatchResponse) + err = core.UnmarshalPrimitive(m, "status", &obj.Status) + if err != nil { + return + } + err = core.UnmarshalModel(m, "headers", &obj.HeadersVar, UnmarshalRatelimitInputMatchResponseHeadersItem) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "origin_traffic", &obj.OriginTraffic) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// RatelimitInputMatchResponseHeadersItem : RatelimitInputMatchResponseHeadersItem struct +type RatelimitInputMatchResponseHeadersItem struct { + // The name of the response header to match. + Name *string `json:"name" validate:"required"` + + // The operator when matchin, eq means equals, ne means not equals. + Op *string `json:"op" validate:"required"` + + // The value of the header, which will be exactly matched. + Value *string `json:"value" validate:"required"` +} + +// Constants associated with the RatelimitInputMatchResponseHeadersItem.Op property. +// The operator when matchin, eq means equals, ne means not equals. +const ( + RatelimitInputMatchResponseHeadersItem_Op_Eq = "eq" + RatelimitInputMatchResponseHeadersItem_Op_Ne = "ne" +) + +// Constants associated with the RatelimitInputMatchResponseHeadersItem.Value property. +// The value of the header, which will be exactly matched. +const ( + RatelimitInputMatchResponseHeadersItem_Value_Hit = "HIT" +) + + +// NewRatelimitInputMatchResponseHeadersItem : Instantiate RatelimitInputMatchResponseHeadersItem (Generic Model Constructor) +func (*ZoneRateLimitsV1) NewRatelimitInputMatchResponseHeadersItem(name string, op string, value string) (model *RatelimitInputMatchResponseHeadersItem, err error) { + model = &RatelimitInputMatchResponseHeadersItem{ + Name: core.StringPtr(name), + Op: core.StringPtr(op), + Value: core.StringPtr(value), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalRatelimitInputMatchResponseHeadersItem unmarshals an instance of RatelimitInputMatchResponseHeadersItem from the specified map of raw messages. +func UnmarshalRatelimitInputMatchResponseHeadersItem(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(RatelimitInputMatchResponseHeadersItem) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "op", &obj.Op) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// RatelimitObjectAction : action. +type RatelimitObjectAction struct { + // The type of action to perform. + Mode *string `json:"mode" validate:"required"` + + // The time in seconds as an integer to perform the mitigation action. Must be the same or greater than the period. + // This field is valid only when mode is "simulate" or "ban". + Timeout *int64 `json:"timeout,omitempty"` + + // Custom content-type and body to return, this overrides the custom error for the zone. This field is not required. + // Omission will result in default HTML error page.This field is valid only when mode is "simulate" or "ban". + Response *RatelimitObjectActionResponse `json:"response,omitempty"` +} + +// Constants associated with the RatelimitObjectAction.Mode property. +// The type of action to perform. +const ( + RatelimitObjectAction_Mode_Ban = "ban" + RatelimitObjectAction_Mode_Challenge = "challenge" + RatelimitObjectAction_Mode_JsChallenge = "js_challenge" + RatelimitObjectAction_Mode_Simulate = "simulate" +) + + +// UnmarshalRatelimitObjectAction unmarshals an instance of RatelimitObjectAction from the specified map of raw messages. +func UnmarshalRatelimitObjectAction(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(RatelimitObjectAction) + err = core.UnmarshalPrimitive(m, "mode", &obj.Mode) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "timeout", &obj.Timeout) + if err != nil { + return + } + err = core.UnmarshalModel(m, "response", &obj.Response, UnmarshalRatelimitObjectActionResponse) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// RatelimitObjectActionResponse : Custom content-type and body to return, this overrides the custom error for the zone. This field is not required. +// Omission will result in default HTML error page.This field is valid only when mode is "simulate" or "ban". +type RatelimitObjectActionResponse struct { + // The content type of the body. + ContentType *string `json:"content_type" validate:"required"` + + // The body to return, the content here should conform to the content_type. + Body *string `json:"body" validate:"required"` +} + +// Constants associated with the RatelimitObjectActionResponse.ContentType property. +// The content type of the body. +const ( + RatelimitObjectActionResponse_ContentType_ApplicationJSON = "application/json" + RatelimitObjectActionResponse_ContentType_TextPlain = "text/plain" + RatelimitObjectActionResponse_ContentType_TextXml = "text/xml" +) + + +// UnmarshalRatelimitObjectActionResponse unmarshals an instance of RatelimitObjectActionResponse from the specified map of raw messages. +func UnmarshalRatelimitObjectActionResponse(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(RatelimitObjectActionResponse) + err = core.UnmarshalPrimitive(m, "content_type", &obj.ContentType) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "body", &obj.Body) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// RatelimitObjectBypassItem : RatelimitObjectBypassItem struct +type RatelimitObjectBypassItem struct { + // rate limit name. + Name *string `json:"name" validate:"required"` + + // The url to bypass. + Value *string `json:"value" validate:"required"` +} + +// Constants associated with the RatelimitObjectBypassItem.Name property. +// rate limit name. +const ( + RatelimitObjectBypassItem_Name_URL = "url" +) + + +// UnmarshalRatelimitObjectBypassItem unmarshals an instance of RatelimitObjectBypassItem from the specified map of raw messages. +func UnmarshalRatelimitObjectBypassItem(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(RatelimitObjectBypassItem) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// RatelimitObjectCorrelate : Enable NAT based rate limits. +type RatelimitObjectCorrelate struct { + // rate limit enabled by. + By *string `json:"by" validate:"required"` +} + +// Constants associated with the RatelimitObjectCorrelate.By property. +// rate limit enabled by. +const ( + RatelimitObjectCorrelate_By_Nat = "nat" +) + + +// UnmarshalRatelimitObjectCorrelate unmarshals an instance of RatelimitObjectCorrelate from the specified map of raw messages. +func UnmarshalRatelimitObjectCorrelate(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(RatelimitObjectCorrelate) + err = core.UnmarshalPrimitive(m, "by", &obj.By) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// RatelimitObjectMatch : Determines which traffic the rate limit counts towards the threshold. Needs to be one of "request" or "response" +// objects. +type RatelimitObjectMatch struct { + // request. + Request *RatelimitObjectMatchRequest `json:"request,omitempty"` + + // response. + Response *RatelimitObjectMatchResponse `json:"response,omitempty"` +} + + +// UnmarshalRatelimitObjectMatch unmarshals an instance of RatelimitObjectMatch from the specified map of raw messages. +func UnmarshalRatelimitObjectMatch(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(RatelimitObjectMatch) + err = core.UnmarshalModel(m, "request", &obj.Request, UnmarshalRatelimitObjectMatchRequest) + if err != nil { + return + } + err = core.UnmarshalModel(m, "response", &obj.Response, UnmarshalRatelimitObjectMatchResponse) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// RatelimitObjectMatchRequest : request. +type RatelimitObjectMatchRequest struct { + // A subset of the list HTTP methods, or ["_ALL_"] for selecting all methods. + Methods []string `json:"methods,omitempty"` + + // HTTP schemes list, or ["_ALL_"] for selecting all schemes. + Schemes []string `json:"schemes,omitempty"` + + // The URL pattern to match comprised of the host and path, i.e. example.org/path. Wildcard are expanded to match + // applicable traffic, query strings are not matched. Use * for all traffic to your zone. + URL *string `json:"url" validate:"required"` +} + +// Constants associated with the RatelimitObjectMatchRequest.Methods property. +const ( + RatelimitObjectMatchRequest_Methods_All = "_ALL_" + RatelimitObjectMatchRequest_Methods_Delete = "DELETE" + RatelimitObjectMatchRequest_Methods_Get = "GET" + RatelimitObjectMatchRequest_Methods_Head = "HEAD" + RatelimitObjectMatchRequest_Methods_Patch = "PATCH" + RatelimitObjectMatchRequest_Methods_Post = "POST" + RatelimitObjectMatchRequest_Methods_Put = "PUT" +) + +// Constants associated with the RatelimitObjectMatchRequest.Schemes property. +const ( + RatelimitObjectMatchRequest_Schemes_All = "_ALL_" + RatelimitObjectMatchRequest_Schemes_Http = "HTTP" + RatelimitObjectMatchRequest_Schemes_Https = "HTTPS" +) + + +// UnmarshalRatelimitObjectMatchRequest unmarshals an instance of RatelimitObjectMatchRequest from the specified map of raw messages. +func UnmarshalRatelimitObjectMatchRequest(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(RatelimitObjectMatchRequest) + err = core.UnmarshalPrimitive(m, "methods", &obj.Methods) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "schemes", &obj.Schemes) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "url", &obj.URL) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// RatelimitObjectMatchResponse : response. +type RatelimitObjectMatchResponse struct { + // HTTP Status codes, can be one [403], many [401,403] or indicate all by not providing this value. This field is not + // required. + Status []int64 `json:"status,omitempty"` + + // Array of response headers to match. If a response does not meet the header criteria then the request will not be + // counted towards the rate limit. + HeadersVar []RatelimitObjectMatchResponseHeadersItem `json:"headers,omitempty"` + + // Deprecated, please use response headers instead and also provide "origin_traffic:false" to avoid legacy behaviour + // interacting with the response.headers property. + OriginTraffic *bool `json:"origin_traffic,omitempty"` +} + + +// UnmarshalRatelimitObjectMatchResponse unmarshals an instance of RatelimitObjectMatchResponse from the specified map of raw messages. +func UnmarshalRatelimitObjectMatchResponse(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(RatelimitObjectMatchResponse) + err = core.UnmarshalPrimitive(m, "status", &obj.Status) + if err != nil { + return + } + err = core.UnmarshalModel(m, "headers", &obj.HeadersVar, UnmarshalRatelimitObjectMatchResponseHeadersItem) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "origin_traffic", &obj.OriginTraffic) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// RatelimitObjectMatchResponseHeadersItem : RatelimitObjectMatchResponseHeadersItem struct +type RatelimitObjectMatchResponseHeadersItem struct { + // The name of the response header to match. + Name *string `json:"name" validate:"required"` + + // The operator when matchin, eq means equals, ne means not equals. + Op *string `json:"op" validate:"required"` + + // The value of the header, which will be exactly matched. + Value *string `json:"value" validate:"required"` +} + +// Constants associated with the RatelimitObjectMatchResponseHeadersItem.Op property. +// The operator when matchin, eq means equals, ne means not equals. +const ( + RatelimitObjectMatchResponseHeadersItem_Op_Eq = "eq" + RatelimitObjectMatchResponseHeadersItem_Op_Ne = "ne" +) + +// Constants associated with the RatelimitObjectMatchResponseHeadersItem.Value property. +// The value of the header, which will be exactly matched. +const ( + RatelimitObjectMatchResponseHeadersItem_Value_Hit = "HIT" +) + + +// UnmarshalRatelimitObjectMatchResponseHeadersItem unmarshals an instance of RatelimitObjectMatchResponseHeadersItem from the specified map of raw messages. +func UnmarshalRatelimitObjectMatchResponseHeadersItem(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(RatelimitObjectMatchResponseHeadersItem) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "op", &obj.Op) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// UpdateRateLimitOptions : The UpdateRateLimit options. +type UpdateRateLimitOptions struct { + // Identifier of rate limit. + RateLimitIdentifier *string `json:"rate_limit_identifier" validate:"required,ne="` + + // Whether this ratelimit is currently disabled. + Disabled *bool `json:"disabled,omitempty"` + + // A note that you can use to describe the reason for a rate limit. + Description *string `json:"description,omitempty"` + + // Criteria that would allow the rate limit to be bypassed, for example to express that you shouldn't apply a rate + // limit to a given set of URLs. + Bypass []RatelimitInputBypassItem `json:"bypass,omitempty"` + + // The threshold that triggers the rate limit mitigations, combine with period. i.e. threshold per period. + Threshold *int64 `json:"threshold,omitempty"` + + // The time in seconds to count matching traffic. If the count exceeds threshold within this period the action will be + // performed. + Period *int64 `json:"period,omitempty"` + + // action. + Action *RatelimitInputAction `json:"action,omitempty"` + + // Enable NAT based rate limits. + Correlate *RatelimitInputCorrelate `json:"correlate,omitempty"` + + // Determines which traffic the rate limit counts towards the threshold. Needs to be one of "request" or "response" + // objects. + Match *RatelimitInputMatch `json:"match,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdateRateLimitOptions : Instantiate UpdateRateLimitOptions +func (*ZoneRateLimitsV1) NewUpdateRateLimitOptions(rateLimitIdentifier string) *UpdateRateLimitOptions { + return &UpdateRateLimitOptions{ + RateLimitIdentifier: core.StringPtr(rateLimitIdentifier), + } +} + +// SetRateLimitIdentifier : Allow user to set RateLimitIdentifier +func (options *UpdateRateLimitOptions) SetRateLimitIdentifier(rateLimitIdentifier string) *UpdateRateLimitOptions { + options.RateLimitIdentifier = core.StringPtr(rateLimitIdentifier) + return options +} + +// SetDisabled : Allow user to set Disabled +func (options *UpdateRateLimitOptions) SetDisabled(disabled bool) *UpdateRateLimitOptions { + options.Disabled = core.BoolPtr(disabled) + return options +} + +// SetDescription : Allow user to set Description +func (options *UpdateRateLimitOptions) SetDescription(description string) *UpdateRateLimitOptions { + options.Description = core.StringPtr(description) + return options +} + +// SetBypass : Allow user to set Bypass +func (options *UpdateRateLimitOptions) SetBypass(bypass []RatelimitInputBypassItem) *UpdateRateLimitOptions { + options.Bypass = bypass + return options +} + +// SetThreshold : Allow user to set Threshold +func (options *UpdateRateLimitOptions) SetThreshold(threshold int64) *UpdateRateLimitOptions { + options.Threshold = core.Int64Ptr(threshold) + return options +} + +// SetPeriod : Allow user to set Period +func (options *UpdateRateLimitOptions) SetPeriod(period int64) *UpdateRateLimitOptions { + options.Period = core.Int64Ptr(period) + return options +} + +// SetAction : Allow user to set Action +func (options *UpdateRateLimitOptions) SetAction(action *RatelimitInputAction) *UpdateRateLimitOptions { + options.Action = action + return options +} + +// SetCorrelate : Allow user to set Correlate +func (options *UpdateRateLimitOptions) SetCorrelate(correlate *RatelimitInputCorrelate) *UpdateRateLimitOptions { + options.Correlate = correlate + return options +} + +// SetMatch : Allow user to set Match +func (options *UpdateRateLimitOptions) SetMatch(match *RatelimitInputMatch) *UpdateRateLimitOptions { + options.Match = match + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateRateLimitOptions) SetHeaders(param map[string]string) *UpdateRateLimitOptions { + options.Headers = param + return options +} + +// DeleteRateLimitResp : rate limit delete response. +type DeleteRateLimitResp struct { + // Operation success flag. + Success *bool `json:"success" validate:"required"` + + // Array of errors encountered. + Errors [][]string `json:"errors" validate:"required"` + + // Array of messages returned. + Messages [][]string `json:"messages" validate:"required"` + + // Container for response information. + Result *DeleteRateLimitRespResult `json:"result" validate:"required"` +} + + +// UnmarshalDeleteRateLimitResp unmarshals an instance of DeleteRateLimitResp from the specified map of raw messages. +func UnmarshalDeleteRateLimitResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(DeleteRateLimitResp) + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalDeleteRateLimitRespResult) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ListRatelimitResp : rate limit list response. +type ListRatelimitResp struct { + // Was operation successful. + Success *bool `json:"success" validate:"required"` + + // Array of errors encountered. + Errors [][]string `json:"errors" validate:"required"` + + // Array of messages returned. + Messages [][]string `json:"messages" validate:"required"` + + // Container for response information. + Result []RatelimitObject `json:"result" validate:"required"` + + // Statistics of results. + ResultInfo *ListRatelimitRespResultInfo `json:"result_info" validate:"required"` +} + + +// UnmarshalListRatelimitResp unmarshals an instance of ListRatelimitResp from the specified map of raw messages. +func UnmarshalListRatelimitResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ListRatelimitResp) + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalRatelimitObject) + if err != nil { + return + } + err = core.UnmarshalModel(m, "result_info", &obj.ResultInfo, UnmarshalListRatelimitRespResultInfo) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// RatelimitObject : rate limit object. +type RatelimitObject struct { + // Identifier of the rate limit. + ID *string `json:"id" validate:"required"` + + // Whether this ratelimit is currently disabled. + Disabled *bool `json:"disabled" validate:"required"` + + // A note that you can use to describe the reason for a rate limit. + Description *string `json:"description" validate:"required"` + + // Criteria that would allow the rate limit to be bypassed, for example to express that you shouldn't apply a rate + // limit to a given set of URLs. + Bypass []RatelimitObjectBypassItem `json:"bypass" validate:"required"` + + // The threshold that triggers the rate limit mitigations, combine with period. i.e. threshold per period. + Threshold *int64 `json:"threshold" validate:"required"` + + // The time in seconds to count matching traffic. If the count exceeds threshold within this period the action will be + // performed. + Period *int64 `json:"period" validate:"required"` + + // Enable NAT based rate limits. + Correlate *RatelimitObjectCorrelate `json:"correlate,omitempty"` + + // action. + Action *RatelimitObjectAction `json:"action" validate:"required"` + + // Determines which traffic the rate limit counts towards the threshold. Needs to be one of "request" or "response" + // objects. + Match *RatelimitObjectMatch `json:"match" validate:"required"` +} + + +// UnmarshalRatelimitObject unmarshals an instance of RatelimitObject from the specified map of raw messages. +func UnmarshalRatelimitObject(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(RatelimitObject) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "disabled", &obj.Disabled) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "description", &obj.Description) + if err != nil { + return + } + err = core.UnmarshalModel(m, "bypass", &obj.Bypass, UnmarshalRatelimitObjectBypassItem) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "threshold", &obj.Threshold) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "period", &obj.Period) + if err != nil { + return + } + err = core.UnmarshalModel(m, "correlate", &obj.Correlate, UnmarshalRatelimitObjectCorrelate) + if err != nil { + return + } + err = core.UnmarshalModel(m, "action", &obj.Action, UnmarshalRatelimitObjectAction) + if err != nil { + return + } + err = core.UnmarshalModel(m, "match", &obj.Match, UnmarshalRatelimitObjectMatch) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// RatelimitResp : rate limit response. +type RatelimitResp struct { + // Was operation successful. + Success *bool `json:"success" validate:"required"` + + // Array of errors encountered. + Errors [][]string `json:"errors" validate:"required"` + + // Array of messages returned. + Messages [][]string `json:"messages" validate:"required"` + + // rate limit object. + Result *RatelimitObject `json:"result" validate:"required"` +} + + +// UnmarshalRatelimitResp unmarshals an instance of RatelimitResp from the specified map of raw messages. +func UnmarshalRatelimitResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(RatelimitResp) + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalRatelimitObject) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} diff --git a/vendor/github.com/IBM/networking-go-sdk/zonessettingsv1/zones_settings_v1.go b/vendor/github.com/IBM/networking-go-sdk/zonessettingsv1/zones_settings_v1.go new file mode 100644 index 00000000000..c023338ef6a --- /dev/null +++ b/vendor/github.com/IBM/networking-go-sdk/zonessettingsv1/zones_settings_v1.go @@ -0,0 +1,7759 @@ +/** + * (C) Copyright IBM Corp. 2020. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * IBM OpenAPI SDK Code Generator Version: 3.20.0-debb9f29-20201203-202043 + */ + + +// Package zonessettingsv1 : Operations and models for the ZonesSettingsV1 service +package zonessettingsv1 + +import ( + "context" + "encoding/json" + "fmt" + "github.com/IBM/go-sdk-core/v4/core" + common "github.com/IBM/networking-go-sdk/common" + "github.com/go-openapi/strfmt" + "net/http" + "reflect" + "time" +) + +// ZonesSettingsV1 : CIS Zones Settings +// +// Version: 1.0.1 +type ZonesSettingsV1 struct { + Service *core.BaseService + + // Full url-encoded cloud resource name (CRN) of resource instance. + Crn *string + + // Zone identifier. + ZoneIdentifier *string +} + +// DefaultServiceURL is the default URL to make service requests to. +const DefaultServiceURL = "https://api.cis.cloud.ibm.com" + +// DefaultServiceName is the default key used to find external configuration information. +const DefaultServiceName = "zones_settings" + +// ZonesSettingsV1Options : Service options +type ZonesSettingsV1Options struct { + ServiceName string + URL string + Authenticator core.Authenticator + + // Full url-encoded cloud resource name (CRN) of resource instance. + Crn *string `validate:"required"` + + // Zone identifier. + ZoneIdentifier *string `validate:"required"` +} + +// NewZonesSettingsV1UsingExternalConfig : constructs an instance of ZonesSettingsV1 with passed in options and external configuration. +func NewZonesSettingsV1UsingExternalConfig(options *ZonesSettingsV1Options) (zonesSettings *ZonesSettingsV1, err error) { + if options.ServiceName == "" { + options.ServiceName = DefaultServiceName + } + + if options.Authenticator == nil { + options.Authenticator, err = core.GetAuthenticatorFromEnvironment(options.ServiceName) + if err != nil { + return + } + } + + zonesSettings, err = NewZonesSettingsV1(options) + if err != nil { + return + } + + err = zonesSettings.Service.ConfigureService(options.ServiceName) + if err != nil { + return + } + + if options.URL != "" { + err = zonesSettings.Service.SetServiceURL(options.URL) + } + return +} + +// NewZonesSettingsV1 : constructs an instance of ZonesSettingsV1 with passed in options. +func NewZonesSettingsV1(options *ZonesSettingsV1Options) (service *ZonesSettingsV1, err error) { + serviceOptions := &core.ServiceOptions{ + URL: DefaultServiceURL, + Authenticator: options.Authenticator, + } + + err = core.ValidateStruct(options, "options") + if err != nil { + return + } + + baseService, err := core.NewBaseService(serviceOptions) + if err != nil { + return + } + + if options.URL != "" { + err = baseService.SetServiceURL(options.URL) + if err != nil { + return + } + } + + service = &ZonesSettingsV1{ + Service: baseService, + Crn: options.Crn, + ZoneIdentifier: options.ZoneIdentifier, + } + + return +} + +// GetServiceURLForRegion returns the service URL to be used for the specified region +func GetServiceURLForRegion(region string) (string, error) { + return "", fmt.Errorf("service does not support regional URLs") +} + +// Clone makes a copy of "zonesSettings" suitable for processing requests. +func (zonesSettings *ZonesSettingsV1) Clone() *ZonesSettingsV1 { + if core.IsNil(zonesSettings) { + return nil + } + clone := *zonesSettings + clone.Service = zonesSettings.Service.Clone() + return &clone +} + +// SetServiceURL sets the service URL +func (zonesSettings *ZonesSettingsV1) SetServiceURL(url string) error { + return zonesSettings.Service.SetServiceURL(url) +} + +// GetServiceURL returns the service URL +func (zonesSettings *ZonesSettingsV1) GetServiceURL() string { + return zonesSettings.Service.GetServiceURL() +} + +// SetDefaultHeaders sets HTTP headers to be sent in every request +func (zonesSettings *ZonesSettingsV1) SetDefaultHeaders(headers http.Header) { + zonesSettings.Service.SetDefaultHeaders(headers) +} + +// SetEnableGzipCompression sets the service's EnableGzipCompression field +func (zonesSettings *ZonesSettingsV1) SetEnableGzipCompression(enableGzip bool) { + zonesSettings.Service.SetEnableGzipCompression(enableGzip) +} + +// GetEnableGzipCompression returns the service's EnableGzipCompression field +func (zonesSettings *ZonesSettingsV1) GetEnableGzipCompression() bool { + return zonesSettings.Service.GetEnableGzipCompression() +} + +// EnableRetries enables automatic retries for requests invoked for this service instance. +// If either parameter is specified as 0, then a default value is used instead. +func (zonesSettings *ZonesSettingsV1) EnableRetries(maxRetries int, maxRetryInterval time.Duration) { + zonesSettings.Service.EnableRetries(maxRetries, maxRetryInterval) +} + +// DisableRetries disables automatic retries for requests invoked for this service instance. +func (zonesSettings *ZonesSettingsV1) DisableRetries() { + zonesSettings.Service.DisableRetries() +} + +// GetZoneDnssec : Get zone DNSSEC +// Get DNSSEC setting for a given zone. +func (zonesSettings *ZonesSettingsV1) GetZoneDnssec(getZoneDnssecOptions *GetZoneDnssecOptions) (result *ZonesDnssecResp, response *core.DetailedResponse, err error) { + return zonesSettings.GetZoneDnssecWithContext(context.Background(), getZoneDnssecOptions) +} + +// GetZoneDnssecWithContext is an alternate form of the GetZoneDnssec method which supports a Context parameter +func (zonesSettings *ZonesSettingsV1) GetZoneDnssecWithContext(ctx context.Context, getZoneDnssecOptions *GetZoneDnssecOptions) (result *ZonesDnssecResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(getZoneDnssecOptions, "getZoneDnssecOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zonesSettings.Crn, + "zone_identifier": *zonesSettings.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zonesSettings.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zonesSettings.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/dnssec`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getZoneDnssecOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zones_settings", "V1", "GetZoneDnssec") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zonesSettings.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalZonesDnssecResp) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateZoneDnssec : Update zone DNSSEC +// Update DNSSEC setting for given zone. +func (zonesSettings *ZonesSettingsV1) UpdateZoneDnssec(updateZoneDnssecOptions *UpdateZoneDnssecOptions) (result *ZonesDnssecResp, response *core.DetailedResponse, err error) { + return zonesSettings.UpdateZoneDnssecWithContext(context.Background(), updateZoneDnssecOptions) +} + +// UpdateZoneDnssecWithContext is an alternate form of the UpdateZoneDnssec method which supports a Context parameter +func (zonesSettings *ZonesSettingsV1) UpdateZoneDnssecWithContext(ctx context.Context, updateZoneDnssecOptions *UpdateZoneDnssecOptions) (result *ZonesDnssecResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(updateZoneDnssecOptions, "updateZoneDnssecOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zonesSettings.Crn, + "zone_identifier": *zonesSettings.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zonesSettings.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zonesSettings.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/dnssec`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateZoneDnssecOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zones_settings", "V1", "UpdateZoneDnssec") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if updateZoneDnssecOptions.Status != nil { + body["status"] = updateZoneDnssecOptions.Status + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zonesSettings.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalZonesDnssecResp) + if err != nil { + return + } + response.Result = result + + return +} + +// GetZoneCnameFlattening : Get zone CNAME flattening +// Get CNAME flattening setting for a given zone. +func (zonesSettings *ZonesSettingsV1) GetZoneCnameFlattening(getZoneCnameFlatteningOptions *GetZoneCnameFlatteningOptions) (result *ZonesCnameFlatteningResp, response *core.DetailedResponse, err error) { + return zonesSettings.GetZoneCnameFlatteningWithContext(context.Background(), getZoneCnameFlatteningOptions) +} + +// GetZoneCnameFlatteningWithContext is an alternate form of the GetZoneCnameFlattening method which supports a Context parameter +func (zonesSettings *ZonesSettingsV1) GetZoneCnameFlatteningWithContext(ctx context.Context, getZoneCnameFlatteningOptions *GetZoneCnameFlatteningOptions) (result *ZonesCnameFlatteningResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(getZoneCnameFlatteningOptions, "getZoneCnameFlatteningOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zonesSettings.Crn, + "zone_identifier": *zonesSettings.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zonesSettings.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zonesSettings.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/settings/cname_flattening`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getZoneCnameFlatteningOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zones_settings", "V1", "GetZoneCnameFlattening") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zonesSettings.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalZonesCnameFlatteningResp) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateZoneCnameFlattening : Update zone CNAME flattening +// Update CNAME flattening setting for given zone. +func (zonesSettings *ZonesSettingsV1) UpdateZoneCnameFlattening(updateZoneCnameFlatteningOptions *UpdateZoneCnameFlatteningOptions) (result *ZonesCnameFlatteningResp, response *core.DetailedResponse, err error) { + return zonesSettings.UpdateZoneCnameFlatteningWithContext(context.Background(), updateZoneCnameFlatteningOptions) +} + +// UpdateZoneCnameFlatteningWithContext is an alternate form of the UpdateZoneCnameFlattening method which supports a Context parameter +func (zonesSettings *ZonesSettingsV1) UpdateZoneCnameFlatteningWithContext(ctx context.Context, updateZoneCnameFlatteningOptions *UpdateZoneCnameFlatteningOptions) (result *ZonesCnameFlatteningResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(updateZoneCnameFlatteningOptions, "updateZoneCnameFlatteningOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zonesSettings.Crn, + "zone_identifier": *zonesSettings.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zonesSettings.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zonesSettings.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/settings/cname_flattening`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateZoneCnameFlatteningOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zones_settings", "V1", "UpdateZoneCnameFlattening") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if updateZoneCnameFlatteningOptions.Value != nil { + body["value"] = updateZoneCnameFlatteningOptions.Value + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zonesSettings.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalZonesCnameFlatteningResp) + if err != nil { + return + } + response.Result = result + + return +} + +// GetOpportunisticEncryption : Get opportunistic encryption setting +// Get opportunistic encryption setting for a zone. +func (zonesSettings *ZonesSettingsV1) GetOpportunisticEncryption(getOpportunisticEncryptionOptions *GetOpportunisticEncryptionOptions) (result *OpportunisticEncryptionResp, response *core.DetailedResponse, err error) { + return zonesSettings.GetOpportunisticEncryptionWithContext(context.Background(), getOpportunisticEncryptionOptions) +} + +// GetOpportunisticEncryptionWithContext is an alternate form of the GetOpportunisticEncryption method which supports a Context parameter +func (zonesSettings *ZonesSettingsV1) GetOpportunisticEncryptionWithContext(ctx context.Context, getOpportunisticEncryptionOptions *GetOpportunisticEncryptionOptions) (result *OpportunisticEncryptionResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(getOpportunisticEncryptionOptions, "getOpportunisticEncryptionOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zonesSettings.Crn, + "zone_identifier": *zonesSettings.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zonesSettings.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zonesSettings.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/settings/opportunistic_encryption`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getOpportunisticEncryptionOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zones_settings", "V1", "GetOpportunisticEncryption") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zonesSettings.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalOpportunisticEncryptionResp) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateOpportunisticEncryption : Update opportunistic encryption setting +// Update opportunistic encryption setting for a zone. +func (zonesSettings *ZonesSettingsV1) UpdateOpportunisticEncryption(updateOpportunisticEncryptionOptions *UpdateOpportunisticEncryptionOptions) (result *OpportunisticEncryptionResp, response *core.DetailedResponse, err error) { + return zonesSettings.UpdateOpportunisticEncryptionWithContext(context.Background(), updateOpportunisticEncryptionOptions) +} + +// UpdateOpportunisticEncryptionWithContext is an alternate form of the UpdateOpportunisticEncryption method which supports a Context parameter +func (zonesSettings *ZonesSettingsV1) UpdateOpportunisticEncryptionWithContext(ctx context.Context, updateOpportunisticEncryptionOptions *UpdateOpportunisticEncryptionOptions) (result *OpportunisticEncryptionResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(updateOpportunisticEncryptionOptions, "updateOpportunisticEncryptionOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zonesSettings.Crn, + "zone_identifier": *zonesSettings.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zonesSettings.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zonesSettings.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/settings/opportunistic_encryption`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateOpportunisticEncryptionOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zones_settings", "V1", "UpdateOpportunisticEncryption") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if updateOpportunisticEncryptionOptions.Value != nil { + body["value"] = updateOpportunisticEncryptionOptions.Value + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zonesSettings.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalOpportunisticEncryptionResp) + if err != nil { + return + } + response.Result = result + + return +} + +// GetChallengeTTL : Get challenge TTL setting +// Get challenge TTL setting for a zone. +func (zonesSettings *ZonesSettingsV1) GetChallengeTTL(getChallengeTtlOptions *GetChallengeTtlOptions) (result *ChallengeTtlResp, response *core.DetailedResponse, err error) { + return zonesSettings.GetChallengeTTLWithContext(context.Background(), getChallengeTtlOptions) +} + +// GetChallengeTTLWithContext is an alternate form of the GetChallengeTTL method which supports a Context parameter +func (zonesSettings *ZonesSettingsV1) GetChallengeTTLWithContext(ctx context.Context, getChallengeTtlOptions *GetChallengeTtlOptions) (result *ChallengeTtlResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(getChallengeTtlOptions, "getChallengeTtlOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zonesSettings.Crn, + "zone_identifier": *zonesSettings.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zonesSettings.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zonesSettings.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/settings/challenge_ttl`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getChallengeTtlOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zones_settings", "V1", "GetChallengeTTL") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zonesSettings.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalChallengeTtlResp) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateChallengeTTL : Update challenge TTL setting +// Update challenge TTL setting for a zone. +func (zonesSettings *ZonesSettingsV1) UpdateChallengeTTL(updateChallengeTtlOptions *UpdateChallengeTtlOptions) (result *ChallengeTtlResp, response *core.DetailedResponse, err error) { + return zonesSettings.UpdateChallengeTTLWithContext(context.Background(), updateChallengeTtlOptions) +} + +// UpdateChallengeTTLWithContext is an alternate form of the UpdateChallengeTTL method which supports a Context parameter +func (zonesSettings *ZonesSettingsV1) UpdateChallengeTTLWithContext(ctx context.Context, updateChallengeTtlOptions *UpdateChallengeTtlOptions) (result *ChallengeTtlResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(updateChallengeTtlOptions, "updateChallengeTtlOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zonesSettings.Crn, + "zone_identifier": *zonesSettings.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zonesSettings.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zonesSettings.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/settings/challenge_ttl`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateChallengeTtlOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zones_settings", "V1", "UpdateChallengeTTL") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if updateChallengeTtlOptions.Value != nil { + body["value"] = updateChallengeTtlOptions.Value + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zonesSettings.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalChallengeTtlResp) + if err != nil { + return + } + response.Result = result + + return +} + +// GetAutomaticHttpsRewrites : Get automatic https rewrites setting +// Get automatic https rewrites setting for a zone. +func (zonesSettings *ZonesSettingsV1) GetAutomaticHttpsRewrites(getAutomaticHttpsRewritesOptions *GetAutomaticHttpsRewritesOptions) (result *AutomaticHttpsRewritesResp, response *core.DetailedResponse, err error) { + return zonesSettings.GetAutomaticHttpsRewritesWithContext(context.Background(), getAutomaticHttpsRewritesOptions) +} + +// GetAutomaticHttpsRewritesWithContext is an alternate form of the GetAutomaticHttpsRewrites method which supports a Context parameter +func (zonesSettings *ZonesSettingsV1) GetAutomaticHttpsRewritesWithContext(ctx context.Context, getAutomaticHttpsRewritesOptions *GetAutomaticHttpsRewritesOptions) (result *AutomaticHttpsRewritesResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(getAutomaticHttpsRewritesOptions, "getAutomaticHttpsRewritesOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zonesSettings.Crn, + "zone_identifier": *zonesSettings.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zonesSettings.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zonesSettings.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/settings/automatic_https_rewrites`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getAutomaticHttpsRewritesOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zones_settings", "V1", "GetAutomaticHttpsRewrites") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zonesSettings.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalAutomaticHttpsRewritesResp) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateAutomaticHttpsRewrites : Update automatic https rewrites setting +// Update automatic https rewrites setting for a zone. +func (zonesSettings *ZonesSettingsV1) UpdateAutomaticHttpsRewrites(updateAutomaticHttpsRewritesOptions *UpdateAutomaticHttpsRewritesOptions) (result *AutomaticHttpsRewritesResp, response *core.DetailedResponse, err error) { + return zonesSettings.UpdateAutomaticHttpsRewritesWithContext(context.Background(), updateAutomaticHttpsRewritesOptions) +} + +// UpdateAutomaticHttpsRewritesWithContext is an alternate form of the UpdateAutomaticHttpsRewrites method which supports a Context parameter +func (zonesSettings *ZonesSettingsV1) UpdateAutomaticHttpsRewritesWithContext(ctx context.Context, updateAutomaticHttpsRewritesOptions *UpdateAutomaticHttpsRewritesOptions) (result *AutomaticHttpsRewritesResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(updateAutomaticHttpsRewritesOptions, "updateAutomaticHttpsRewritesOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zonesSettings.Crn, + "zone_identifier": *zonesSettings.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zonesSettings.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zonesSettings.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/settings/automatic_https_rewrites`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateAutomaticHttpsRewritesOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zones_settings", "V1", "UpdateAutomaticHttpsRewrites") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if updateAutomaticHttpsRewritesOptions.Value != nil { + body["value"] = updateAutomaticHttpsRewritesOptions.Value + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zonesSettings.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalAutomaticHttpsRewritesResp) + if err != nil { + return + } + response.Result = result + + return +} + +// GetTrueClientIp : Get true client IP setting +// Get true client IP setting for a zone. +func (zonesSettings *ZonesSettingsV1) GetTrueClientIp(getTrueClientIpOptions *GetTrueClientIpOptions) (result *TrueClientIpResp, response *core.DetailedResponse, err error) { + return zonesSettings.GetTrueClientIpWithContext(context.Background(), getTrueClientIpOptions) +} + +// GetTrueClientIpWithContext is an alternate form of the GetTrueClientIp method which supports a Context parameter +func (zonesSettings *ZonesSettingsV1) GetTrueClientIpWithContext(ctx context.Context, getTrueClientIpOptions *GetTrueClientIpOptions) (result *TrueClientIpResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(getTrueClientIpOptions, "getTrueClientIpOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zonesSettings.Crn, + "zone_identifier": *zonesSettings.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zonesSettings.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zonesSettings.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/settings/true_client_ip_header`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getTrueClientIpOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zones_settings", "V1", "GetTrueClientIp") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zonesSettings.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalTrueClientIpResp) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateTrueClientIp : Update true client IP setting +// Update true client IP setting for a zone. +func (zonesSettings *ZonesSettingsV1) UpdateTrueClientIp(updateTrueClientIpOptions *UpdateTrueClientIpOptions) (result *TrueClientIpResp, response *core.DetailedResponse, err error) { + return zonesSettings.UpdateTrueClientIpWithContext(context.Background(), updateTrueClientIpOptions) +} + +// UpdateTrueClientIpWithContext is an alternate form of the UpdateTrueClientIp method which supports a Context parameter +func (zonesSettings *ZonesSettingsV1) UpdateTrueClientIpWithContext(ctx context.Context, updateTrueClientIpOptions *UpdateTrueClientIpOptions) (result *TrueClientIpResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(updateTrueClientIpOptions, "updateTrueClientIpOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zonesSettings.Crn, + "zone_identifier": *zonesSettings.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zonesSettings.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zonesSettings.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/settings/true_client_ip_header`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateTrueClientIpOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zones_settings", "V1", "UpdateTrueClientIp") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if updateTrueClientIpOptions.Value != nil { + body["value"] = updateTrueClientIpOptions.Value + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zonesSettings.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalTrueClientIpResp) + if err != nil { + return + } + response.Result = result + + return +} + +// GetAlwaysUseHttps : Get always use https setting +// Get always use https setting for a zone. +func (zonesSettings *ZonesSettingsV1) GetAlwaysUseHttps(getAlwaysUseHttpsOptions *GetAlwaysUseHttpsOptions) (result *AlwaysUseHttpsResp, response *core.DetailedResponse, err error) { + return zonesSettings.GetAlwaysUseHttpsWithContext(context.Background(), getAlwaysUseHttpsOptions) +} + +// GetAlwaysUseHttpsWithContext is an alternate form of the GetAlwaysUseHttps method which supports a Context parameter +func (zonesSettings *ZonesSettingsV1) GetAlwaysUseHttpsWithContext(ctx context.Context, getAlwaysUseHttpsOptions *GetAlwaysUseHttpsOptions) (result *AlwaysUseHttpsResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(getAlwaysUseHttpsOptions, "getAlwaysUseHttpsOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zonesSettings.Crn, + "zone_identifier": *zonesSettings.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zonesSettings.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zonesSettings.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/settings/always_use_https`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getAlwaysUseHttpsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zones_settings", "V1", "GetAlwaysUseHttps") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zonesSettings.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalAlwaysUseHttpsResp) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateAlwaysUseHttps : Update always use https setting +// Update always use https setting for a zone. +func (zonesSettings *ZonesSettingsV1) UpdateAlwaysUseHttps(updateAlwaysUseHttpsOptions *UpdateAlwaysUseHttpsOptions) (result *AlwaysUseHttpsResp, response *core.DetailedResponse, err error) { + return zonesSettings.UpdateAlwaysUseHttpsWithContext(context.Background(), updateAlwaysUseHttpsOptions) +} + +// UpdateAlwaysUseHttpsWithContext is an alternate form of the UpdateAlwaysUseHttps method which supports a Context parameter +func (zonesSettings *ZonesSettingsV1) UpdateAlwaysUseHttpsWithContext(ctx context.Context, updateAlwaysUseHttpsOptions *UpdateAlwaysUseHttpsOptions) (result *AlwaysUseHttpsResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(updateAlwaysUseHttpsOptions, "updateAlwaysUseHttpsOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zonesSettings.Crn, + "zone_identifier": *zonesSettings.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zonesSettings.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zonesSettings.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/settings/always_use_https`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateAlwaysUseHttpsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zones_settings", "V1", "UpdateAlwaysUseHttps") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if updateAlwaysUseHttpsOptions.Value != nil { + body["value"] = updateAlwaysUseHttpsOptions.Value + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zonesSettings.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalAlwaysUseHttpsResp) + if err != nil { + return + } + response.Result = result + + return +} + +// GetImageSizeOptimization : Get image size optimization setting +// Get image size optimization setting for a zone. +func (zonesSettings *ZonesSettingsV1) GetImageSizeOptimization(getImageSizeOptimizationOptions *GetImageSizeOptimizationOptions) (result *ImageSizeOptimizationResp, response *core.DetailedResponse, err error) { + return zonesSettings.GetImageSizeOptimizationWithContext(context.Background(), getImageSizeOptimizationOptions) +} + +// GetImageSizeOptimizationWithContext is an alternate form of the GetImageSizeOptimization method which supports a Context parameter +func (zonesSettings *ZonesSettingsV1) GetImageSizeOptimizationWithContext(ctx context.Context, getImageSizeOptimizationOptions *GetImageSizeOptimizationOptions) (result *ImageSizeOptimizationResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(getImageSizeOptimizationOptions, "getImageSizeOptimizationOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zonesSettings.Crn, + "zone_identifier": *zonesSettings.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zonesSettings.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zonesSettings.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/settings/image_size_optimization`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getImageSizeOptimizationOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zones_settings", "V1", "GetImageSizeOptimization") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zonesSettings.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalImageSizeOptimizationResp) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateImageSizeOptimization : Update image size optimization setting +// Update image size optimization setting for a zone. +func (zonesSettings *ZonesSettingsV1) UpdateImageSizeOptimization(updateImageSizeOptimizationOptions *UpdateImageSizeOptimizationOptions) (result *ImageSizeOptimizationResp, response *core.DetailedResponse, err error) { + return zonesSettings.UpdateImageSizeOptimizationWithContext(context.Background(), updateImageSizeOptimizationOptions) +} + +// UpdateImageSizeOptimizationWithContext is an alternate form of the UpdateImageSizeOptimization method which supports a Context parameter +func (zonesSettings *ZonesSettingsV1) UpdateImageSizeOptimizationWithContext(ctx context.Context, updateImageSizeOptimizationOptions *UpdateImageSizeOptimizationOptions) (result *ImageSizeOptimizationResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(updateImageSizeOptimizationOptions, "updateImageSizeOptimizationOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zonesSettings.Crn, + "zone_identifier": *zonesSettings.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zonesSettings.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zonesSettings.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/settings/image_size_optimization`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateImageSizeOptimizationOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zones_settings", "V1", "UpdateImageSizeOptimization") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if updateImageSizeOptimizationOptions.Value != nil { + body["value"] = updateImageSizeOptimizationOptions.Value + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zonesSettings.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalImageSizeOptimizationResp) + if err != nil { + return + } + response.Result = result + + return +} + +// GetScriptLoadOptimization : Get script load optimization setting +// Get script load optimization setting for a zone. +func (zonesSettings *ZonesSettingsV1) GetScriptLoadOptimization(getScriptLoadOptimizationOptions *GetScriptLoadOptimizationOptions) (result *ScriptLoadOptimizationResp, response *core.DetailedResponse, err error) { + return zonesSettings.GetScriptLoadOptimizationWithContext(context.Background(), getScriptLoadOptimizationOptions) +} + +// GetScriptLoadOptimizationWithContext is an alternate form of the GetScriptLoadOptimization method which supports a Context parameter +func (zonesSettings *ZonesSettingsV1) GetScriptLoadOptimizationWithContext(ctx context.Context, getScriptLoadOptimizationOptions *GetScriptLoadOptimizationOptions) (result *ScriptLoadOptimizationResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(getScriptLoadOptimizationOptions, "getScriptLoadOptimizationOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zonesSettings.Crn, + "zone_identifier": *zonesSettings.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zonesSettings.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zonesSettings.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/settings/script_load_optimization`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getScriptLoadOptimizationOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zones_settings", "V1", "GetScriptLoadOptimization") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zonesSettings.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalScriptLoadOptimizationResp) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateScriptLoadOptimization : Update script load optimization setting +// Update script load optimization setting for a zone. +func (zonesSettings *ZonesSettingsV1) UpdateScriptLoadOptimization(updateScriptLoadOptimizationOptions *UpdateScriptLoadOptimizationOptions) (result *ScriptLoadOptimizationResp, response *core.DetailedResponse, err error) { + return zonesSettings.UpdateScriptLoadOptimizationWithContext(context.Background(), updateScriptLoadOptimizationOptions) +} + +// UpdateScriptLoadOptimizationWithContext is an alternate form of the UpdateScriptLoadOptimization method which supports a Context parameter +func (zonesSettings *ZonesSettingsV1) UpdateScriptLoadOptimizationWithContext(ctx context.Context, updateScriptLoadOptimizationOptions *UpdateScriptLoadOptimizationOptions) (result *ScriptLoadOptimizationResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(updateScriptLoadOptimizationOptions, "updateScriptLoadOptimizationOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zonesSettings.Crn, + "zone_identifier": *zonesSettings.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zonesSettings.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zonesSettings.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/settings/script_load_optimization`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateScriptLoadOptimizationOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zones_settings", "V1", "UpdateScriptLoadOptimization") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if updateScriptLoadOptimizationOptions.Value != nil { + body["value"] = updateScriptLoadOptimizationOptions.Value + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zonesSettings.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalScriptLoadOptimizationResp) + if err != nil { + return + } + response.Result = result + + return +} + +// GetImageLoadOptimization : Get image load optimizationn setting +// Get image load optimizationn setting for a zone. +func (zonesSettings *ZonesSettingsV1) GetImageLoadOptimization(getImageLoadOptimizationOptions *GetImageLoadOptimizationOptions) (result *ImageLoadOptimizationResp, response *core.DetailedResponse, err error) { + return zonesSettings.GetImageLoadOptimizationWithContext(context.Background(), getImageLoadOptimizationOptions) +} + +// GetImageLoadOptimizationWithContext is an alternate form of the GetImageLoadOptimization method which supports a Context parameter +func (zonesSettings *ZonesSettingsV1) GetImageLoadOptimizationWithContext(ctx context.Context, getImageLoadOptimizationOptions *GetImageLoadOptimizationOptions) (result *ImageLoadOptimizationResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(getImageLoadOptimizationOptions, "getImageLoadOptimizationOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zonesSettings.Crn, + "zone_identifier": *zonesSettings.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zonesSettings.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zonesSettings.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/settings/image_load_optimization`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getImageLoadOptimizationOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zones_settings", "V1", "GetImageLoadOptimization") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zonesSettings.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalImageLoadOptimizationResp) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateImageLoadOptimization : Update image load optimizationn setting +// Update image load optimizationn setting for a zone. +func (zonesSettings *ZonesSettingsV1) UpdateImageLoadOptimization(updateImageLoadOptimizationOptions *UpdateImageLoadOptimizationOptions) (result *ImageLoadOptimizationResp, response *core.DetailedResponse, err error) { + return zonesSettings.UpdateImageLoadOptimizationWithContext(context.Background(), updateImageLoadOptimizationOptions) +} + +// UpdateImageLoadOptimizationWithContext is an alternate form of the UpdateImageLoadOptimization method which supports a Context parameter +func (zonesSettings *ZonesSettingsV1) UpdateImageLoadOptimizationWithContext(ctx context.Context, updateImageLoadOptimizationOptions *UpdateImageLoadOptimizationOptions) (result *ImageLoadOptimizationResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(updateImageLoadOptimizationOptions, "updateImageLoadOptimizationOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zonesSettings.Crn, + "zone_identifier": *zonesSettings.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zonesSettings.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zonesSettings.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/settings/image_load_optimization`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateImageLoadOptimizationOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zones_settings", "V1", "UpdateImageLoadOptimization") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if updateImageLoadOptimizationOptions.Value != nil { + body["value"] = updateImageLoadOptimizationOptions.Value + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zonesSettings.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalImageLoadOptimizationResp) + if err != nil { + return + } + response.Result = result + + return +} + +// GetMinify : Get minify setting +// Get minify setting for a zone. +func (zonesSettings *ZonesSettingsV1) GetMinify(getMinifyOptions *GetMinifyOptions) (result *MinifyResp, response *core.DetailedResponse, err error) { + return zonesSettings.GetMinifyWithContext(context.Background(), getMinifyOptions) +} + +// GetMinifyWithContext is an alternate form of the GetMinify method which supports a Context parameter +func (zonesSettings *ZonesSettingsV1) GetMinifyWithContext(ctx context.Context, getMinifyOptions *GetMinifyOptions) (result *MinifyResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(getMinifyOptions, "getMinifyOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zonesSettings.Crn, + "zone_identifier": *zonesSettings.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zonesSettings.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zonesSettings.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/settings/minify`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getMinifyOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zones_settings", "V1", "GetMinify") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zonesSettings.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalMinifyResp) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateMinify : Update minify setting +// Update minify setting for a zone. +func (zonesSettings *ZonesSettingsV1) UpdateMinify(updateMinifyOptions *UpdateMinifyOptions) (result *MinifyResp, response *core.DetailedResponse, err error) { + return zonesSettings.UpdateMinifyWithContext(context.Background(), updateMinifyOptions) +} + +// UpdateMinifyWithContext is an alternate form of the UpdateMinify method which supports a Context parameter +func (zonesSettings *ZonesSettingsV1) UpdateMinifyWithContext(ctx context.Context, updateMinifyOptions *UpdateMinifyOptions) (result *MinifyResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(updateMinifyOptions, "updateMinifyOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zonesSettings.Crn, + "zone_identifier": *zonesSettings.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zonesSettings.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zonesSettings.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/settings/minify`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateMinifyOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zones_settings", "V1", "UpdateMinify") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if updateMinifyOptions.Value != nil { + body["value"] = updateMinifyOptions.Value + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zonesSettings.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalMinifyResp) + if err != nil { + return + } + response.Result = result + + return +} + +// GetMinTlsVersion : Get minimum TLS version setting +// Get minimum TLS version setting for a zone. +func (zonesSettings *ZonesSettingsV1) GetMinTlsVersion(getMinTlsVersionOptions *GetMinTlsVersionOptions) (result *MinTlsVersionResp, response *core.DetailedResponse, err error) { + return zonesSettings.GetMinTlsVersionWithContext(context.Background(), getMinTlsVersionOptions) +} + +// GetMinTlsVersionWithContext is an alternate form of the GetMinTlsVersion method which supports a Context parameter +func (zonesSettings *ZonesSettingsV1) GetMinTlsVersionWithContext(ctx context.Context, getMinTlsVersionOptions *GetMinTlsVersionOptions) (result *MinTlsVersionResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(getMinTlsVersionOptions, "getMinTlsVersionOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zonesSettings.Crn, + "zone_identifier": *zonesSettings.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zonesSettings.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zonesSettings.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/settings/min_tls_version`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getMinTlsVersionOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zones_settings", "V1", "GetMinTlsVersion") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zonesSettings.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalMinTlsVersionResp) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateMinTlsVersion : Update minimum TLS version setting +// Update minimum TLS version setting for a zone. +func (zonesSettings *ZonesSettingsV1) UpdateMinTlsVersion(updateMinTlsVersionOptions *UpdateMinTlsVersionOptions) (result *MinTlsVersionResp, response *core.DetailedResponse, err error) { + return zonesSettings.UpdateMinTlsVersionWithContext(context.Background(), updateMinTlsVersionOptions) +} + +// UpdateMinTlsVersionWithContext is an alternate form of the UpdateMinTlsVersion method which supports a Context parameter +func (zonesSettings *ZonesSettingsV1) UpdateMinTlsVersionWithContext(ctx context.Context, updateMinTlsVersionOptions *UpdateMinTlsVersionOptions) (result *MinTlsVersionResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(updateMinTlsVersionOptions, "updateMinTlsVersionOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zonesSettings.Crn, + "zone_identifier": *zonesSettings.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zonesSettings.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zonesSettings.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/settings/min_tls_version`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateMinTlsVersionOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zones_settings", "V1", "UpdateMinTlsVersion") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if updateMinTlsVersionOptions.Value != nil { + body["value"] = updateMinTlsVersionOptions.Value + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zonesSettings.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalMinTlsVersionResp) + if err != nil { + return + } + response.Result = result + + return +} + +// GetIpGeolocation : Get IP geolocation setting +// Get IP geolocation setting for a zone. +func (zonesSettings *ZonesSettingsV1) GetIpGeolocation(getIpGeolocationOptions *GetIpGeolocationOptions) (result *IpGeolocationResp, response *core.DetailedResponse, err error) { + return zonesSettings.GetIpGeolocationWithContext(context.Background(), getIpGeolocationOptions) +} + +// GetIpGeolocationWithContext is an alternate form of the GetIpGeolocation method which supports a Context parameter +func (zonesSettings *ZonesSettingsV1) GetIpGeolocationWithContext(ctx context.Context, getIpGeolocationOptions *GetIpGeolocationOptions) (result *IpGeolocationResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(getIpGeolocationOptions, "getIpGeolocationOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zonesSettings.Crn, + "zone_identifier": *zonesSettings.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zonesSettings.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zonesSettings.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/settings/ip_geolocation`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getIpGeolocationOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zones_settings", "V1", "GetIpGeolocation") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zonesSettings.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalIpGeolocationResp) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateIpGeolocation : Update IP geolocation setting +// Update IP geolocation setting for a zone. +func (zonesSettings *ZonesSettingsV1) UpdateIpGeolocation(updateIpGeolocationOptions *UpdateIpGeolocationOptions) (result *IpGeolocationResp, response *core.DetailedResponse, err error) { + return zonesSettings.UpdateIpGeolocationWithContext(context.Background(), updateIpGeolocationOptions) +} + +// UpdateIpGeolocationWithContext is an alternate form of the UpdateIpGeolocation method which supports a Context parameter +func (zonesSettings *ZonesSettingsV1) UpdateIpGeolocationWithContext(ctx context.Context, updateIpGeolocationOptions *UpdateIpGeolocationOptions) (result *IpGeolocationResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(updateIpGeolocationOptions, "updateIpGeolocationOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zonesSettings.Crn, + "zone_identifier": *zonesSettings.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zonesSettings.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zonesSettings.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/settings/ip_geolocation`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateIpGeolocationOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zones_settings", "V1", "UpdateIpGeolocation") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if updateIpGeolocationOptions.Value != nil { + body["value"] = updateIpGeolocationOptions.Value + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zonesSettings.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalIpGeolocationResp) + if err != nil { + return + } + response.Result = result + + return +} + +// GetServerSideExclude : Get server side exclude setting +// Get server side exclude setting for a zone. +func (zonesSettings *ZonesSettingsV1) GetServerSideExclude(getServerSideExcludeOptions *GetServerSideExcludeOptions) (result *ServerSideExcludeResp, response *core.DetailedResponse, err error) { + return zonesSettings.GetServerSideExcludeWithContext(context.Background(), getServerSideExcludeOptions) +} + +// GetServerSideExcludeWithContext is an alternate form of the GetServerSideExclude method which supports a Context parameter +func (zonesSettings *ZonesSettingsV1) GetServerSideExcludeWithContext(ctx context.Context, getServerSideExcludeOptions *GetServerSideExcludeOptions) (result *ServerSideExcludeResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(getServerSideExcludeOptions, "getServerSideExcludeOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zonesSettings.Crn, + "zone_identifier": *zonesSettings.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zonesSettings.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zonesSettings.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/settings/server_side_exclude`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getServerSideExcludeOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zones_settings", "V1", "GetServerSideExclude") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zonesSettings.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalServerSideExcludeResp) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateServerSideExclude : Update server side exclude setting +// Update server side exclude setting for a zone. +func (zonesSettings *ZonesSettingsV1) UpdateServerSideExclude(updateServerSideExcludeOptions *UpdateServerSideExcludeOptions) (result *ServerSideExcludeResp, response *core.DetailedResponse, err error) { + return zonesSettings.UpdateServerSideExcludeWithContext(context.Background(), updateServerSideExcludeOptions) +} + +// UpdateServerSideExcludeWithContext is an alternate form of the UpdateServerSideExclude method which supports a Context parameter +func (zonesSettings *ZonesSettingsV1) UpdateServerSideExcludeWithContext(ctx context.Context, updateServerSideExcludeOptions *UpdateServerSideExcludeOptions) (result *ServerSideExcludeResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(updateServerSideExcludeOptions, "updateServerSideExcludeOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zonesSettings.Crn, + "zone_identifier": *zonesSettings.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zonesSettings.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zonesSettings.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/settings/server_side_exclude`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateServerSideExcludeOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zones_settings", "V1", "UpdateServerSideExclude") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if updateServerSideExcludeOptions.Value != nil { + body["value"] = updateServerSideExcludeOptions.Value + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zonesSettings.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalServerSideExcludeResp) + if err != nil { + return + } + response.Result = result + + return +} + +// GetSecurityHeader : Get HTTP strict transport security setting +// Get HTTP strict transport security setting for a zone. +func (zonesSettings *ZonesSettingsV1) GetSecurityHeader(getSecurityHeaderOptions *GetSecurityHeaderOptions) (result *SecurityHeaderResp, response *core.DetailedResponse, err error) { + return zonesSettings.GetSecurityHeaderWithContext(context.Background(), getSecurityHeaderOptions) +} + +// GetSecurityHeaderWithContext is an alternate form of the GetSecurityHeader method which supports a Context parameter +func (zonesSettings *ZonesSettingsV1) GetSecurityHeaderWithContext(ctx context.Context, getSecurityHeaderOptions *GetSecurityHeaderOptions) (result *SecurityHeaderResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(getSecurityHeaderOptions, "getSecurityHeaderOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zonesSettings.Crn, + "zone_identifier": *zonesSettings.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zonesSettings.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zonesSettings.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/settings/security_header`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getSecurityHeaderOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zones_settings", "V1", "GetSecurityHeader") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zonesSettings.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSecurityHeaderResp) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateSecurityHeader : Update HTTP strict transport security setting +// Update HTTP strict transport security setting for a zone. +func (zonesSettings *ZonesSettingsV1) UpdateSecurityHeader(updateSecurityHeaderOptions *UpdateSecurityHeaderOptions) (result *SecurityHeaderResp, response *core.DetailedResponse, err error) { + return zonesSettings.UpdateSecurityHeaderWithContext(context.Background(), updateSecurityHeaderOptions) +} + +// UpdateSecurityHeaderWithContext is an alternate form of the UpdateSecurityHeader method which supports a Context parameter +func (zonesSettings *ZonesSettingsV1) UpdateSecurityHeaderWithContext(ctx context.Context, updateSecurityHeaderOptions *UpdateSecurityHeaderOptions) (result *SecurityHeaderResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(updateSecurityHeaderOptions, "updateSecurityHeaderOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zonesSettings.Crn, + "zone_identifier": *zonesSettings.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zonesSettings.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zonesSettings.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/settings/security_header`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateSecurityHeaderOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zones_settings", "V1", "UpdateSecurityHeader") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if updateSecurityHeaderOptions.Value != nil { + body["value"] = updateSecurityHeaderOptions.Value + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zonesSettings.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSecurityHeaderResp) + if err != nil { + return + } + response.Result = result + + return +} + +// GetMobileRedirect : Get mobile redirect setting +// Get mobile redirect setting for a zone. +func (zonesSettings *ZonesSettingsV1) GetMobileRedirect(getMobileRedirectOptions *GetMobileRedirectOptions) (result *MobileRedirectResp, response *core.DetailedResponse, err error) { + return zonesSettings.GetMobileRedirectWithContext(context.Background(), getMobileRedirectOptions) +} + +// GetMobileRedirectWithContext is an alternate form of the GetMobileRedirect method which supports a Context parameter +func (zonesSettings *ZonesSettingsV1) GetMobileRedirectWithContext(ctx context.Context, getMobileRedirectOptions *GetMobileRedirectOptions) (result *MobileRedirectResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(getMobileRedirectOptions, "getMobileRedirectOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zonesSettings.Crn, + "zone_identifier": *zonesSettings.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zonesSettings.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zonesSettings.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/settings/mobile_redirect`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getMobileRedirectOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zones_settings", "V1", "GetMobileRedirect") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zonesSettings.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalMobileRedirectResp) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateMobileRedirect : Update mobile redirect setting +// Update mobile redirect setting for a zone. +func (zonesSettings *ZonesSettingsV1) UpdateMobileRedirect(updateMobileRedirectOptions *UpdateMobileRedirectOptions) (result *MobileRedirectResp, response *core.DetailedResponse, err error) { + return zonesSettings.UpdateMobileRedirectWithContext(context.Background(), updateMobileRedirectOptions) +} + +// UpdateMobileRedirectWithContext is an alternate form of the UpdateMobileRedirect method which supports a Context parameter +func (zonesSettings *ZonesSettingsV1) UpdateMobileRedirectWithContext(ctx context.Context, updateMobileRedirectOptions *UpdateMobileRedirectOptions) (result *MobileRedirectResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(updateMobileRedirectOptions, "updateMobileRedirectOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zonesSettings.Crn, + "zone_identifier": *zonesSettings.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zonesSettings.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zonesSettings.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/settings/mobile_redirect`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateMobileRedirectOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zones_settings", "V1", "UpdateMobileRedirect") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if updateMobileRedirectOptions.Value != nil { + body["value"] = updateMobileRedirectOptions.Value + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zonesSettings.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalMobileRedirectResp) + if err != nil { + return + } + response.Result = result + + return +} + +// GetPrefetchPreload : Get prefetch URLs from header setting +// Get prefetch URLs from header setting for a zone. +func (zonesSettings *ZonesSettingsV1) GetPrefetchPreload(getPrefetchPreloadOptions *GetPrefetchPreloadOptions) (result *PrefetchPreloadResp, response *core.DetailedResponse, err error) { + return zonesSettings.GetPrefetchPreloadWithContext(context.Background(), getPrefetchPreloadOptions) +} + +// GetPrefetchPreloadWithContext is an alternate form of the GetPrefetchPreload method which supports a Context parameter +func (zonesSettings *ZonesSettingsV1) GetPrefetchPreloadWithContext(ctx context.Context, getPrefetchPreloadOptions *GetPrefetchPreloadOptions) (result *PrefetchPreloadResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(getPrefetchPreloadOptions, "getPrefetchPreloadOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zonesSettings.Crn, + "zone_identifier": *zonesSettings.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zonesSettings.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zonesSettings.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/settings/prefetch_preload`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getPrefetchPreloadOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zones_settings", "V1", "GetPrefetchPreload") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zonesSettings.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalPrefetchPreloadResp) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdatePrefetchPreload : Update prefetch URLs from header setting +// Update prefetch URLs from header setting for a zone. +func (zonesSettings *ZonesSettingsV1) UpdatePrefetchPreload(updatePrefetchPreloadOptions *UpdatePrefetchPreloadOptions) (result *PrefetchPreloadResp, response *core.DetailedResponse, err error) { + return zonesSettings.UpdatePrefetchPreloadWithContext(context.Background(), updatePrefetchPreloadOptions) +} + +// UpdatePrefetchPreloadWithContext is an alternate form of the UpdatePrefetchPreload method which supports a Context parameter +func (zonesSettings *ZonesSettingsV1) UpdatePrefetchPreloadWithContext(ctx context.Context, updatePrefetchPreloadOptions *UpdatePrefetchPreloadOptions) (result *PrefetchPreloadResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(updatePrefetchPreloadOptions, "updatePrefetchPreloadOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zonesSettings.Crn, + "zone_identifier": *zonesSettings.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zonesSettings.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zonesSettings.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/settings/prefetch_preload`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updatePrefetchPreloadOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zones_settings", "V1", "UpdatePrefetchPreload") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if updatePrefetchPreloadOptions.Value != nil { + body["value"] = updatePrefetchPreloadOptions.Value + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zonesSettings.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalPrefetchPreloadResp) + if err != nil { + return + } + response.Result = result + + return +} + +// GetHttp2 : Get http/2 setting +// Get http/2 setting for a zone. +func (zonesSettings *ZonesSettingsV1) GetHttp2(getHttp2Options *GetHttp2Options) (result *Http2Resp, response *core.DetailedResponse, err error) { + return zonesSettings.GetHttp2WithContext(context.Background(), getHttp2Options) +} + +// GetHttp2WithContext is an alternate form of the GetHttp2 method which supports a Context parameter +func (zonesSettings *ZonesSettingsV1) GetHttp2WithContext(ctx context.Context, getHttp2Options *GetHttp2Options) (result *Http2Resp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(getHttp2Options, "getHttp2Options") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zonesSettings.Crn, + "zone_identifier": *zonesSettings.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zonesSettings.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zonesSettings.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/settings/http2`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getHttp2Options.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zones_settings", "V1", "GetHttp2") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zonesSettings.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalHttp2Resp) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateHttp2 : Update http/2 setting +// Update http/2 setting for a zone. +func (zonesSettings *ZonesSettingsV1) UpdateHttp2(updateHttp2Options *UpdateHttp2Options) (result *Http2Resp, response *core.DetailedResponse, err error) { + return zonesSettings.UpdateHttp2WithContext(context.Background(), updateHttp2Options) +} + +// UpdateHttp2WithContext is an alternate form of the UpdateHttp2 method which supports a Context parameter +func (zonesSettings *ZonesSettingsV1) UpdateHttp2WithContext(ctx context.Context, updateHttp2Options *UpdateHttp2Options) (result *Http2Resp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(updateHttp2Options, "updateHttp2Options") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zonesSettings.Crn, + "zone_identifier": *zonesSettings.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zonesSettings.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zonesSettings.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/settings/http2`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateHttp2Options.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zones_settings", "V1", "UpdateHttp2") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if updateHttp2Options.Value != nil { + body["value"] = updateHttp2Options.Value + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zonesSettings.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalHttp2Resp) + if err != nil { + return + } + response.Result = result + + return +} + +// GetIpv6 : Get IPv6 compatibility setting +// Get IPv6 compatibility setting for a zone. +func (zonesSettings *ZonesSettingsV1) GetIpv6(getIpv6Options *GetIpv6Options) (result *Ipv6Resp, response *core.DetailedResponse, err error) { + return zonesSettings.GetIpv6WithContext(context.Background(), getIpv6Options) +} + +// GetIpv6WithContext is an alternate form of the GetIpv6 method which supports a Context parameter +func (zonesSettings *ZonesSettingsV1) GetIpv6WithContext(ctx context.Context, getIpv6Options *GetIpv6Options) (result *Ipv6Resp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(getIpv6Options, "getIpv6Options") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zonesSettings.Crn, + "zone_identifier": *zonesSettings.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zonesSettings.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zonesSettings.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/settings/ipv6`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getIpv6Options.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zones_settings", "V1", "GetIpv6") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zonesSettings.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalIpv6Resp) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateIpv6 : Update IPv6 compatibility setting +// Update IPv6 compatibility setting for a zone. +func (zonesSettings *ZonesSettingsV1) UpdateIpv6(updateIpv6Options *UpdateIpv6Options) (result *Ipv6Resp, response *core.DetailedResponse, err error) { + return zonesSettings.UpdateIpv6WithContext(context.Background(), updateIpv6Options) +} + +// UpdateIpv6WithContext is an alternate form of the UpdateIpv6 method which supports a Context parameter +func (zonesSettings *ZonesSettingsV1) UpdateIpv6WithContext(ctx context.Context, updateIpv6Options *UpdateIpv6Options) (result *Ipv6Resp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(updateIpv6Options, "updateIpv6Options") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zonesSettings.Crn, + "zone_identifier": *zonesSettings.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zonesSettings.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zonesSettings.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/settings/ipv6`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateIpv6Options.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zones_settings", "V1", "UpdateIpv6") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if updateIpv6Options.Value != nil { + body["value"] = updateIpv6Options.Value + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zonesSettings.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalIpv6Resp) + if err != nil { + return + } + response.Result = result + + return +} + +// GetWebSockets : Get web sockets setting +// Get web sockets setting for a zone. +func (zonesSettings *ZonesSettingsV1) GetWebSockets(getWebSocketsOptions *GetWebSocketsOptions) (result *WebsocketsResp, response *core.DetailedResponse, err error) { + return zonesSettings.GetWebSocketsWithContext(context.Background(), getWebSocketsOptions) +} + +// GetWebSocketsWithContext is an alternate form of the GetWebSockets method which supports a Context parameter +func (zonesSettings *ZonesSettingsV1) GetWebSocketsWithContext(ctx context.Context, getWebSocketsOptions *GetWebSocketsOptions) (result *WebsocketsResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(getWebSocketsOptions, "getWebSocketsOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zonesSettings.Crn, + "zone_identifier": *zonesSettings.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zonesSettings.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zonesSettings.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/settings/websockets`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getWebSocketsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zones_settings", "V1", "GetWebSockets") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zonesSettings.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalWebsocketsResp) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateWebSockets : Update web sockets setting +// Update web sockets setting for a zone. +func (zonesSettings *ZonesSettingsV1) UpdateWebSockets(updateWebSocketsOptions *UpdateWebSocketsOptions) (result *WebsocketsResp, response *core.DetailedResponse, err error) { + return zonesSettings.UpdateWebSocketsWithContext(context.Background(), updateWebSocketsOptions) +} + +// UpdateWebSocketsWithContext is an alternate form of the UpdateWebSockets method which supports a Context parameter +func (zonesSettings *ZonesSettingsV1) UpdateWebSocketsWithContext(ctx context.Context, updateWebSocketsOptions *UpdateWebSocketsOptions) (result *WebsocketsResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(updateWebSocketsOptions, "updateWebSocketsOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zonesSettings.Crn, + "zone_identifier": *zonesSettings.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zonesSettings.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zonesSettings.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/settings/websockets`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateWebSocketsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zones_settings", "V1", "UpdateWebSockets") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if updateWebSocketsOptions.Value != nil { + body["value"] = updateWebSocketsOptions.Value + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zonesSettings.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalWebsocketsResp) + if err != nil { + return + } + response.Result = result + + return +} + +// GetPseudoIpv4 : Get pseudo IPv4 setting +// Get pseudo IPv4 setting for a zone. +func (zonesSettings *ZonesSettingsV1) GetPseudoIpv4(getPseudoIpv4Options *GetPseudoIpv4Options) (result *PseudoIpv4Resp, response *core.DetailedResponse, err error) { + return zonesSettings.GetPseudoIpv4WithContext(context.Background(), getPseudoIpv4Options) +} + +// GetPseudoIpv4WithContext is an alternate form of the GetPseudoIpv4 method which supports a Context parameter +func (zonesSettings *ZonesSettingsV1) GetPseudoIpv4WithContext(ctx context.Context, getPseudoIpv4Options *GetPseudoIpv4Options) (result *PseudoIpv4Resp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(getPseudoIpv4Options, "getPseudoIpv4Options") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zonesSettings.Crn, + "zone_identifier": *zonesSettings.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zonesSettings.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zonesSettings.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/settings/pseudo_ipv4`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getPseudoIpv4Options.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zones_settings", "V1", "GetPseudoIpv4") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zonesSettings.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalPseudoIpv4Resp) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdatePseudoIpv4 : Update pseudo IPv4 setting +// Update pseudo IPv4 setting for a zone. +func (zonesSettings *ZonesSettingsV1) UpdatePseudoIpv4(updatePseudoIpv4Options *UpdatePseudoIpv4Options) (result *PseudoIpv4Resp, response *core.DetailedResponse, err error) { + return zonesSettings.UpdatePseudoIpv4WithContext(context.Background(), updatePseudoIpv4Options) +} + +// UpdatePseudoIpv4WithContext is an alternate form of the UpdatePseudoIpv4 method which supports a Context parameter +func (zonesSettings *ZonesSettingsV1) UpdatePseudoIpv4WithContext(ctx context.Context, updatePseudoIpv4Options *UpdatePseudoIpv4Options) (result *PseudoIpv4Resp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(updatePseudoIpv4Options, "updatePseudoIpv4Options") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zonesSettings.Crn, + "zone_identifier": *zonesSettings.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zonesSettings.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zonesSettings.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/settings/pseudo_ipv4`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updatePseudoIpv4Options.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zones_settings", "V1", "UpdatePseudoIpv4") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if updatePseudoIpv4Options.Value != nil { + body["value"] = updatePseudoIpv4Options.Value + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zonesSettings.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalPseudoIpv4Resp) + if err != nil { + return + } + response.Result = result + + return +} + +// GetResponseBuffering : Get response buffering setting +// Get response buffering setting for a zone. +func (zonesSettings *ZonesSettingsV1) GetResponseBuffering(getResponseBufferingOptions *GetResponseBufferingOptions) (result *ResponseBufferingResp, response *core.DetailedResponse, err error) { + return zonesSettings.GetResponseBufferingWithContext(context.Background(), getResponseBufferingOptions) +} + +// GetResponseBufferingWithContext is an alternate form of the GetResponseBuffering method which supports a Context parameter +func (zonesSettings *ZonesSettingsV1) GetResponseBufferingWithContext(ctx context.Context, getResponseBufferingOptions *GetResponseBufferingOptions) (result *ResponseBufferingResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(getResponseBufferingOptions, "getResponseBufferingOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zonesSettings.Crn, + "zone_identifier": *zonesSettings.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zonesSettings.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zonesSettings.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/settings/response_buffering`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getResponseBufferingOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zones_settings", "V1", "GetResponseBuffering") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zonesSettings.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalResponseBufferingResp) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateResponseBuffering : Update response buffering setting +// Update response buffering setting for a zone. +func (zonesSettings *ZonesSettingsV1) UpdateResponseBuffering(updateResponseBufferingOptions *UpdateResponseBufferingOptions) (result *ResponseBufferingResp, response *core.DetailedResponse, err error) { + return zonesSettings.UpdateResponseBufferingWithContext(context.Background(), updateResponseBufferingOptions) +} + +// UpdateResponseBufferingWithContext is an alternate form of the UpdateResponseBuffering method which supports a Context parameter +func (zonesSettings *ZonesSettingsV1) UpdateResponseBufferingWithContext(ctx context.Context, updateResponseBufferingOptions *UpdateResponseBufferingOptions) (result *ResponseBufferingResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(updateResponseBufferingOptions, "updateResponseBufferingOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zonesSettings.Crn, + "zone_identifier": *zonesSettings.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zonesSettings.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zonesSettings.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/settings/response_buffering`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateResponseBufferingOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zones_settings", "V1", "UpdateResponseBuffering") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if updateResponseBufferingOptions.Value != nil { + body["value"] = updateResponseBufferingOptions.Value + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zonesSettings.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalResponseBufferingResp) + if err != nil { + return + } + response.Result = result + + return +} + +// GetHotlinkProtection : Get hotlink protection setting +// Get hotlink protection setting for a zone. +func (zonesSettings *ZonesSettingsV1) GetHotlinkProtection(getHotlinkProtectionOptions *GetHotlinkProtectionOptions) (result *HotlinkProtectionResp, response *core.DetailedResponse, err error) { + return zonesSettings.GetHotlinkProtectionWithContext(context.Background(), getHotlinkProtectionOptions) +} + +// GetHotlinkProtectionWithContext is an alternate form of the GetHotlinkProtection method which supports a Context parameter +func (zonesSettings *ZonesSettingsV1) GetHotlinkProtectionWithContext(ctx context.Context, getHotlinkProtectionOptions *GetHotlinkProtectionOptions) (result *HotlinkProtectionResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(getHotlinkProtectionOptions, "getHotlinkProtectionOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zonesSettings.Crn, + "zone_identifier": *zonesSettings.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zonesSettings.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zonesSettings.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/settings/hotlink_protection`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getHotlinkProtectionOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zones_settings", "V1", "GetHotlinkProtection") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zonesSettings.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalHotlinkProtectionResp) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateHotlinkProtection : Update hotlink protection setting +// Update hotlink protection setting for a zone. +func (zonesSettings *ZonesSettingsV1) UpdateHotlinkProtection(updateHotlinkProtectionOptions *UpdateHotlinkProtectionOptions) (result *HotlinkProtectionResp, response *core.DetailedResponse, err error) { + return zonesSettings.UpdateHotlinkProtectionWithContext(context.Background(), updateHotlinkProtectionOptions) +} + +// UpdateHotlinkProtectionWithContext is an alternate form of the UpdateHotlinkProtection method which supports a Context parameter +func (zonesSettings *ZonesSettingsV1) UpdateHotlinkProtectionWithContext(ctx context.Context, updateHotlinkProtectionOptions *UpdateHotlinkProtectionOptions) (result *HotlinkProtectionResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(updateHotlinkProtectionOptions, "updateHotlinkProtectionOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zonesSettings.Crn, + "zone_identifier": *zonesSettings.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zonesSettings.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zonesSettings.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/settings/hotlink_protection`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateHotlinkProtectionOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zones_settings", "V1", "UpdateHotlinkProtection") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if updateHotlinkProtectionOptions.Value != nil { + body["value"] = updateHotlinkProtectionOptions.Value + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zonesSettings.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalHotlinkProtectionResp) + if err != nil { + return + } + response.Result = result + + return +} + +// GetMaxUpload : Get maximum upload size setting +// Get maximum upload size setting for a zone. +func (zonesSettings *ZonesSettingsV1) GetMaxUpload(getMaxUploadOptions *GetMaxUploadOptions) (result *MaxUploadResp, response *core.DetailedResponse, err error) { + return zonesSettings.GetMaxUploadWithContext(context.Background(), getMaxUploadOptions) +} + +// GetMaxUploadWithContext is an alternate form of the GetMaxUpload method which supports a Context parameter +func (zonesSettings *ZonesSettingsV1) GetMaxUploadWithContext(ctx context.Context, getMaxUploadOptions *GetMaxUploadOptions) (result *MaxUploadResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(getMaxUploadOptions, "getMaxUploadOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zonesSettings.Crn, + "zone_identifier": *zonesSettings.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zonesSettings.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zonesSettings.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/settings/max_upload`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getMaxUploadOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zones_settings", "V1", "GetMaxUpload") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zonesSettings.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalMaxUploadResp) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateMaxUpload : Update maximum upload size setting +// Update maximum upload size setting for a zone. +func (zonesSettings *ZonesSettingsV1) UpdateMaxUpload(updateMaxUploadOptions *UpdateMaxUploadOptions) (result *MaxUploadResp, response *core.DetailedResponse, err error) { + return zonesSettings.UpdateMaxUploadWithContext(context.Background(), updateMaxUploadOptions) +} + +// UpdateMaxUploadWithContext is an alternate form of the UpdateMaxUpload method which supports a Context parameter +func (zonesSettings *ZonesSettingsV1) UpdateMaxUploadWithContext(ctx context.Context, updateMaxUploadOptions *UpdateMaxUploadOptions) (result *MaxUploadResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(updateMaxUploadOptions, "updateMaxUploadOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zonesSettings.Crn, + "zone_identifier": *zonesSettings.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zonesSettings.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zonesSettings.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/settings/max_upload`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateMaxUploadOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zones_settings", "V1", "UpdateMaxUpload") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if updateMaxUploadOptions.Value != nil { + body["value"] = updateMaxUploadOptions.Value + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zonesSettings.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalMaxUploadResp) + if err != nil { + return + } + response.Result = result + + return +} + +// GetTlsClientAuth : Get TLS Client Auth setting +// Get TLS Client Auth setting for a zone. +func (zonesSettings *ZonesSettingsV1) GetTlsClientAuth(getTlsClientAuthOptions *GetTlsClientAuthOptions) (result *TlsClientAuthResp, response *core.DetailedResponse, err error) { + return zonesSettings.GetTlsClientAuthWithContext(context.Background(), getTlsClientAuthOptions) +} + +// GetTlsClientAuthWithContext is an alternate form of the GetTlsClientAuth method which supports a Context parameter +func (zonesSettings *ZonesSettingsV1) GetTlsClientAuthWithContext(ctx context.Context, getTlsClientAuthOptions *GetTlsClientAuthOptions) (result *TlsClientAuthResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(getTlsClientAuthOptions, "getTlsClientAuthOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zonesSettings.Crn, + "zone_identifier": *zonesSettings.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zonesSettings.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zonesSettings.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/settings/tls_client_auth`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getTlsClientAuthOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zones_settings", "V1", "GetTlsClientAuth") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zonesSettings.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalTlsClientAuthResp) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateTlsClientAuth : Update TLS Client Auth setting +// Update TLS Client Auth setting for a zone. +func (zonesSettings *ZonesSettingsV1) UpdateTlsClientAuth(updateTlsClientAuthOptions *UpdateTlsClientAuthOptions) (result *TlsClientAuthResp, response *core.DetailedResponse, err error) { + return zonesSettings.UpdateTlsClientAuthWithContext(context.Background(), updateTlsClientAuthOptions) +} + +// UpdateTlsClientAuthWithContext is an alternate form of the UpdateTlsClientAuth method which supports a Context parameter +func (zonesSettings *ZonesSettingsV1) UpdateTlsClientAuthWithContext(ctx context.Context, updateTlsClientAuthOptions *UpdateTlsClientAuthOptions) (result *TlsClientAuthResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(updateTlsClientAuthOptions, "updateTlsClientAuthOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zonesSettings.Crn, + "zone_identifier": *zonesSettings.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zonesSettings.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zonesSettings.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/settings/tls_client_auth`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateTlsClientAuthOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zones_settings", "V1", "UpdateTlsClientAuth") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if updateTlsClientAuthOptions.Value != nil { + body["value"] = updateTlsClientAuthOptions.Value + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zonesSettings.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalTlsClientAuthResp) + if err != nil { + return + } + response.Result = result + + return +} + +// GetBrowserCheck : Get browser check setting +// Get browser check setting for a zone. +func (zonesSettings *ZonesSettingsV1) GetBrowserCheck(getBrowserCheckOptions *GetBrowserCheckOptions) (result *BrowserCheckResp, response *core.DetailedResponse, err error) { + return zonesSettings.GetBrowserCheckWithContext(context.Background(), getBrowserCheckOptions) +} + +// GetBrowserCheckWithContext is an alternate form of the GetBrowserCheck method which supports a Context parameter +func (zonesSettings *ZonesSettingsV1) GetBrowserCheckWithContext(ctx context.Context, getBrowserCheckOptions *GetBrowserCheckOptions) (result *BrowserCheckResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(getBrowserCheckOptions, "getBrowserCheckOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zonesSettings.Crn, + "zone_identifier": *zonesSettings.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zonesSettings.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zonesSettings.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/settings/browser_check`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getBrowserCheckOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zones_settings", "V1", "GetBrowserCheck") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zonesSettings.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalBrowserCheckResp) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateBrowserCheck : Update browser check setting +// Update browser check setting for a zone. +func (zonesSettings *ZonesSettingsV1) UpdateBrowserCheck(updateBrowserCheckOptions *UpdateBrowserCheckOptions) (result *BrowserCheckResp, response *core.DetailedResponse, err error) { + return zonesSettings.UpdateBrowserCheckWithContext(context.Background(), updateBrowserCheckOptions) +} + +// UpdateBrowserCheckWithContext is an alternate form of the UpdateBrowserCheck method which supports a Context parameter +func (zonesSettings *ZonesSettingsV1) UpdateBrowserCheckWithContext(ctx context.Context, updateBrowserCheckOptions *UpdateBrowserCheckOptions) (result *BrowserCheckResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(updateBrowserCheckOptions, "updateBrowserCheckOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zonesSettings.Crn, + "zone_identifier": *zonesSettings.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zonesSettings.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zonesSettings.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/settings/browser_check`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateBrowserCheckOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zones_settings", "V1", "UpdateBrowserCheck") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if updateBrowserCheckOptions.Value != nil { + body["value"] = updateBrowserCheckOptions.Value + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zonesSettings.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalBrowserCheckResp) + if err != nil { + return + } + response.Result = result + + return +} + +// GetEnableErrorPagesOn : Get enable error pages on setting +// Get enable error pages on setting for a zone. +func (zonesSettings *ZonesSettingsV1) GetEnableErrorPagesOn(getEnableErrorPagesOnOptions *GetEnableErrorPagesOnOptions) (result *OriginErrorPagePassThruResp, response *core.DetailedResponse, err error) { + return zonesSettings.GetEnableErrorPagesOnWithContext(context.Background(), getEnableErrorPagesOnOptions) +} + +// GetEnableErrorPagesOnWithContext is an alternate form of the GetEnableErrorPagesOn method which supports a Context parameter +func (zonesSettings *ZonesSettingsV1) GetEnableErrorPagesOnWithContext(ctx context.Context, getEnableErrorPagesOnOptions *GetEnableErrorPagesOnOptions) (result *OriginErrorPagePassThruResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(getEnableErrorPagesOnOptions, "getEnableErrorPagesOnOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zonesSettings.Crn, + "zone_identifier": *zonesSettings.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zonesSettings.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zonesSettings.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/settings/origin_error_page_pass_thru`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getEnableErrorPagesOnOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zones_settings", "V1", "GetEnableErrorPagesOn") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zonesSettings.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalOriginErrorPagePassThruResp) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateEnableErrorPagesOn : Update enable error pages on setting +// Update enable error pages on setting for a zone. +func (zonesSettings *ZonesSettingsV1) UpdateEnableErrorPagesOn(updateEnableErrorPagesOnOptions *UpdateEnableErrorPagesOnOptions) (result *OriginErrorPagePassThruResp, response *core.DetailedResponse, err error) { + return zonesSettings.UpdateEnableErrorPagesOnWithContext(context.Background(), updateEnableErrorPagesOnOptions) +} + +// UpdateEnableErrorPagesOnWithContext is an alternate form of the UpdateEnableErrorPagesOn method which supports a Context parameter +func (zonesSettings *ZonesSettingsV1) UpdateEnableErrorPagesOnWithContext(ctx context.Context, updateEnableErrorPagesOnOptions *UpdateEnableErrorPagesOnOptions) (result *OriginErrorPagePassThruResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(updateEnableErrorPagesOnOptions, "updateEnableErrorPagesOnOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zonesSettings.Crn, + "zone_identifier": *zonesSettings.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zonesSettings.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zonesSettings.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/settings/origin_error_page_pass_thru`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateEnableErrorPagesOnOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zones_settings", "V1", "UpdateEnableErrorPagesOn") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if updateEnableErrorPagesOnOptions.Value != nil { + body["value"] = updateEnableErrorPagesOnOptions.Value + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zonesSettings.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalOriginErrorPagePassThruResp) + if err != nil { + return + } + response.Result = result + + return +} + +// GetWebApplicationFirewall : Get web application firewall setting +// Get web application firewall setting for a zone. +func (zonesSettings *ZonesSettingsV1) GetWebApplicationFirewall(getWebApplicationFirewallOptions *GetWebApplicationFirewallOptions) (result *WafResp, response *core.DetailedResponse, err error) { + return zonesSettings.GetWebApplicationFirewallWithContext(context.Background(), getWebApplicationFirewallOptions) +} + +// GetWebApplicationFirewallWithContext is an alternate form of the GetWebApplicationFirewall method which supports a Context parameter +func (zonesSettings *ZonesSettingsV1) GetWebApplicationFirewallWithContext(ctx context.Context, getWebApplicationFirewallOptions *GetWebApplicationFirewallOptions) (result *WafResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(getWebApplicationFirewallOptions, "getWebApplicationFirewallOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zonesSettings.Crn, + "zone_identifier": *zonesSettings.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zonesSettings.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zonesSettings.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/settings/waf`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getWebApplicationFirewallOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zones_settings", "V1", "GetWebApplicationFirewall") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zonesSettings.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalWafResp) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateWebApplicationFirewall : Update web application firewall setting +// A Web Application Firewall (WAF) blocks requests that contain malicious content. +func (zonesSettings *ZonesSettingsV1) UpdateWebApplicationFirewall(updateWebApplicationFirewallOptions *UpdateWebApplicationFirewallOptions) (result *WafResp, response *core.DetailedResponse, err error) { + return zonesSettings.UpdateWebApplicationFirewallWithContext(context.Background(), updateWebApplicationFirewallOptions) +} + +// UpdateWebApplicationFirewallWithContext is an alternate form of the UpdateWebApplicationFirewall method which supports a Context parameter +func (zonesSettings *ZonesSettingsV1) UpdateWebApplicationFirewallWithContext(ctx context.Context, updateWebApplicationFirewallOptions *UpdateWebApplicationFirewallOptions) (result *WafResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(updateWebApplicationFirewallOptions, "updateWebApplicationFirewallOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zonesSettings.Crn, + "zone_identifier": *zonesSettings.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zonesSettings.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zonesSettings.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/settings/waf`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateWebApplicationFirewallOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zones_settings", "V1", "UpdateWebApplicationFirewall") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if updateWebApplicationFirewallOptions.Value != nil { + body["value"] = updateWebApplicationFirewallOptions.Value + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zonesSettings.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalWafResp) + if err != nil { + return + } + response.Result = result + + return +} + +// GetCiphers : Get ciphers setting +// Get ciphers setting for a zone. +func (zonesSettings *ZonesSettingsV1) GetCiphers(getCiphersOptions *GetCiphersOptions) (result *CiphersResp, response *core.DetailedResponse, err error) { + return zonesSettings.GetCiphersWithContext(context.Background(), getCiphersOptions) +} + +// GetCiphersWithContext is an alternate form of the GetCiphers method which supports a Context parameter +func (zonesSettings *ZonesSettingsV1) GetCiphersWithContext(ctx context.Context, getCiphersOptions *GetCiphersOptions) (result *CiphersResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(getCiphersOptions, "getCiphersOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zonesSettings.Crn, + "zone_identifier": *zonesSettings.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zonesSettings.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zonesSettings.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/settings/ciphers`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getCiphersOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zones_settings", "V1", "GetCiphers") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zonesSettings.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalCiphersResp) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateCiphers : Update ciphers setting +// Update ciphers setting for a zone. +func (zonesSettings *ZonesSettingsV1) UpdateCiphers(updateCiphersOptions *UpdateCiphersOptions) (result *CiphersResp, response *core.DetailedResponse, err error) { + return zonesSettings.UpdateCiphersWithContext(context.Background(), updateCiphersOptions) +} + +// UpdateCiphersWithContext is an alternate form of the UpdateCiphers method which supports a Context parameter +func (zonesSettings *ZonesSettingsV1) UpdateCiphersWithContext(ctx context.Context, updateCiphersOptions *UpdateCiphersOptions) (result *CiphersResp, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(updateCiphersOptions, "updateCiphersOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "crn": *zonesSettings.Crn, + "zone_identifier": *zonesSettings.ZoneIdentifier, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = zonesSettings.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(zonesSettings.Service.Options.URL, `/v1/{crn}/zones/{zone_identifier}/settings/ciphers`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateCiphersOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("zones_settings", "V1", "UpdateCiphers") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if updateCiphersOptions.Value != nil { + body["value"] = updateCiphersOptions.Value + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = zonesSettings.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalCiphersResp) + if err != nil { + return + } + response.Result = result + + return +} + +// AlwaysUseHttpsRespResult : Container for response information. +type AlwaysUseHttpsRespResult struct { + // ID. + ID *string `json:"id" validate:"required"` + + // Value. + Value *string `json:"value" validate:"required"` + + // Editable. + Editable *bool `json:"editable" validate:"required"` + + // Modified date. + ModifiedOn *strfmt.DateTime `json:"modified_on" validate:"required"` +} + + +// UnmarshalAlwaysUseHttpsRespResult unmarshals an instance of AlwaysUseHttpsRespResult from the specified map of raw messages. +func UnmarshalAlwaysUseHttpsRespResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(AlwaysUseHttpsRespResult) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "editable", &obj.Editable) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "modified_on", &obj.ModifiedOn) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// AutomaticHttpsRewritesRespResult : Container for response information. +type AutomaticHttpsRewritesRespResult struct { + // ID. + ID *string `json:"id" validate:"required"` + + // Value. + Value *string `json:"value" validate:"required"` + + // Editable. + Editable *bool `json:"editable" validate:"required"` + + // Modified date. + ModifiedOn *strfmt.DateTime `json:"modified_on" validate:"required"` +} + + +// UnmarshalAutomaticHttpsRewritesRespResult unmarshals an instance of AutomaticHttpsRewritesRespResult from the specified map of raw messages. +func UnmarshalAutomaticHttpsRewritesRespResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(AutomaticHttpsRewritesRespResult) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "editable", &obj.Editable) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "modified_on", &obj.ModifiedOn) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// BrowserCheckRespResult : Container for response information. +type BrowserCheckRespResult struct { + // ID. + ID *string `json:"id" validate:"required"` + + // Value. + Value *string `json:"value" validate:"required"` + + // Editable. + Editable *bool `json:"editable" validate:"required"` + + // Modified date. + ModifiedOn *strfmt.DateTime `json:"modified_on" validate:"required"` +} + + +// UnmarshalBrowserCheckRespResult unmarshals an instance of BrowserCheckRespResult from the specified map of raw messages. +func UnmarshalBrowserCheckRespResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(BrowserCheckRespResult) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "editable", &obj.Editable) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "modified_on", &obj.ModifiedOn) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ChallengeTtlRespResult : Container for response information. +type ChallengeTtlRespResult struct { + // ID. + ID *string `json:"id" validate:"required"` + + // Value. + Value *int64 `json:"value" validate:"required"` + + // Editable. + Editable *bool `json:"editable" validate:"required"` + + // Modified date. + ModifiedOn *strfmt.DateTime `json:"modified_on" validate:"required"` +} + + +// UnmarshalChallengeTtlRespResult unmarshals an instance of ChallengeTtlRespResult from the specified map of raw messages. +func UnmarshalChallengeTtlRespResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ChallengeTtlRespResult) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "editable", &obj.Editable) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "modified_on", &obj.ModifiedOn) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// CiphersRespResult : Container for response information. +type CiphersRespResult struct { + // ID. + ID *string `json:"id" validate:"required"` + + // Value. + Value []string `json:"value" validate:"required"` + + // Editable. + Editable *bool `json:"editable" validate:"required"` + + // Modified date. + ModifiedOn *strfmt.DateTime `json:"modified_on" validate:"required"` +} + + +// UnmarshalCiphersRespResult unmarshals an instance of CiphersRespResult from the specified map of raw messages. +func UnmarshalCiphersRespResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(CiphersRespResult) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "editable", &obj.Editable) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "modified_on", &obj.ModifiedOn) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// GetAlwaysUseHttpsOptions : The GetAlwaysUseHttps options. +type GetAlwaysUseHttpsOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetAlwaysUseHttpsOptions : Instantiate GetAlwaysUseHttpsOptions +func (*ZonesSettingsV1) NewGetAlwaysUseHttpsOptions() *GetAlwaysUseHttpsOptions { + return &GetAlwaysUseHttpsOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *GetAlwaysUseHttpsOptions) SetHeaders(param map[string]string) *GetAlwaysUseHttpsOptions { + options.Headers = param + return options +} + +// GetAutomaticHttpsRewritesOptions : The GetAutomaticHttpsRewrites options. +type GetAutomaticHttpsRewritesOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetAutomaticHttpsRewritesOptions : Instantiate GetAutomaticHttpsRewritesOptions +func (*ZonesSettingsV1) NewGetAutomaticHttpsRewritesOptions() *GetAutomaticHttpsRewritesOptions { + return &GetAutomaticHttpsRewritesOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *GetAutomaticHttpsRewritesOptions) SetHeaders(param map[string]string) *GetAutomaticHttpsRewritesOptions { + options.Headers = param + return options +} + +// GetBrowserCheckOptions : The GetBrowserCheck options. +type GetBrowserCheckOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetBrowserCheckOptions : Instantiate GetBrowserCheckOptions +func (*ZonesSettingsV1) NewGetBrowserCheckOptions() *GetBrowserCheckOptions { + return &GetBrowserCheckOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *GetBrowserCheckOptions) SetHeaders(param map[string]string) *GetBrowserCheckOptions { + options.Headers = param + return options +} + +// GetChallengeTtlOptions : The GetChallengeTTL options. +type GetChallengeTtlOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetChallengeTtlOptions : Instantiate GetChallengeTtlOptions +func (*ZonesSettingsV1) NewGetChallengeTtlOptions() *GetChallengeTtlOptions { + return &GetChallengeTtlOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *GetChallengeTtlOptions) SetHeaders(param map[string]string) *GetChallengeTtlOptions { + options.Headers = param + return options +} + +// GetCiphersOptions : The GetCiphers options. +type GetCiphersOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetCiphersOptions : Instantiate GetCiphersOptions +func (*ZonesSettingsV1) NewGetCiphersOptions() *GetCiphersOptions { + return &GetCiphersOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *GetCiphersOptions) SetHeaders(param map[string]string) *GetCiphersOptions { + options.Headers = param + return options +} + +// GetEnableErrorPagesOnOptions : The GetEnableErrorPagesOn options. +type GetEnableErrorPagesOnOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetEnableErrorPagesOnOptions : Instantiate GetEnableErrorPagesOnOptions +func (*ZonesSettingsV1) NewGetEnableErrorPagesOnOptions() *GetEnableErrorPagesOnOptions { + return &GetEnableErrorPagesOnOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *GetEnableErrorPagesOnOptions) SetHeaders(param map[string]string) *GetEnableErrorPagesOnOptions { + options.Headers = param + return options +} + +// GetHotlinkProtectionOptions : The GetHotlinkProtection options. +type GetHotlinkProtectionOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetHotlinkProtectionOptions : Instantiate GetHotlinkProtectionOptions +func (*ZonesSettingsV1) NewGetHotlinkProtectionOptions() *GetHotlinkProtectionOptions { + return &GetHotlinkProtectionOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *GetHotlinkProtectionOptions) SetHeaders(param map[string]string) *GetHotlinkProtectionOptions { + options.Headers = param + return options +} + +// GetHttp2Options : The GetHttp2 options. +type GetHttp2Options struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetHttp2Options : Instantiate GetHttp2Options +func (*ZonesSettingsV1) NewGetHttp2Options() *GetHttp2Options { + return &GetHttp2Options{} +} + +// SetHeaders : Allow user to set Headers +func (options *GetHttp2Options) SetHeaders(param map[string]string) *GetHttp2Options { + options.Headers = param + return options +} + +// GetImageLoadOptimizationOptions : The GetImageLoadOptimization options. +type GetImageLoadOptimizationOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetImageLoadOptimizationOptions : Instantiate GetImageLoadOptimizationOptions +func (*ZonesSettingsV1) NewGetImageLoadOptimizationOptions() *GetImageLoadOptimizationOptions { + return &GetImageLoadOptimizationOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *GetImageLoadOptimizationOptions) SetHeaders(param map[string]string) *GetImageLoadOptimizationOptions { + options.Headers = param + return options +} + +// GetImageSizeOptimizationOptions : The GetImageSizeOptimization options. +type GetImageSizeOptimizationOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetImageSizeOptimizationOptions : Instantiate GetImageSizeOptimizationOptions +func (*ZonesSettingsV1) NewGetImageSizeOptimizationOptions() *GetImageSizeOptimizationOptions { + return &GetImageSizeOptimizationOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *GetImageSizeOptimizationOptions) SetHeaders(param map[string]string) *GetImageSizeOptimizationOptions { + options.Headers = param + return options +} + +// GetIpGeolocationOptions : The GetIpGeolocation options. +type GetIpGeolocationOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetIpGeolocationOptions : Instantiate GetIpGeolocationOptions +func (*ZonesSettingsV1) NewGetIpGeolocationOptions() *GetIpGeolocationOptions { + return &GetIpGeolocationOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *GetIpGeolocationOptions) SetHeaders(param map[string]string) *GetIpGeolocationOptions { + options.Headers = param + return options +} + +// GetIpv6Options : The GetIpv6 options. +type GetIpv6Options struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetIpv6Options : Instantiate GetIpv6Options +func (*ZonesSettingsV1) NewGetIpv6Options() *GetIpv6Options { + return &GetIpv6Options{} +} + +// SetHeaders : Allow user to set Headers +func (options *GetIpv6Options) SetHeaders(param map[string]string) *GetIpv6Options { + options.Headers = param + return options +} + +// GetMaxUploadOptions : The GetMaxUpload options. +type GetMaxUploadOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetMaxUploadOptions : Instantiate GetMaxUploadOptions +func (*ZonesSettingsV1) NewGetMaxUploadOptions() *GetMaxUploadOptions { + return &GetMaxUploadOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *GetMaxUploadOptions) SetHeaders(param map[string]string) *GetMaxUploadOptions { + options.Headers = param + return options +} + +// GetMinTlsVersionOptions : The GetMinTlsVersion options. +type GetMinTlsVersionOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetMinTlsVersionOptions : Instantiate GetMinTlsVersionOptions +func (*ZonesSettingsV1) NewGetMinTlsVersionOptions() *GetMinTlsVersionOptions { + return &GetMinTlsVersionOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *GetMinTlsVersionOptions) SetHeaders(param map[string]string) *GetMinTlsVersionOptions { + options.Headers = param + return options +} + +// GetMinifyOptions : The GetMinify options. +type GetMinifyOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetMinifyOptions : Instantiate GetMinifyOptions +func (*ZonesSettingsV1) NewGetMinifyOptions() *GetMinifyOptions { + return &GetMinifyOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *GetMinifyOptions) SetHeaders(param map[string]string) *GetMinifyOptions { + options.Headers = param + return options +} + +// GetMobileRedirectOptions : The GetMobileRedirect options. +type GetMobileRedirectOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetMobileRedirectOptions : Instantiate GetMobileRedirectOptions +func (*ZonesSettingsV1) NewGetMobileRedirectOptions() *GetMobileRedirectOptions { + return &GetMobileRedirectOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *GetMobileRedirectOptions) SetHeaders(param map[string]string) *GetMobileRedirectOptions { + options.Headers = param + return options +} + +// GetOpportunisticEncryptionOptions : The GetOpportunisticEncryption options. +type GetOpportunisticEncryptionOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetOpportunisticEncryptionOptions : Instantiate GetOpportunisticEncryptionOptions +func (*ZonesSettingsV1) NewGetOpportunisticEncryptionOptions() *GetOpportunisticEncryptionOptions { + return &GetOpportunisticEncryptionOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *GetOpportunisticEncryptionOptions) SetHeaders(param map[string]string) *GetOpportunisticEncryptionOptions { + options.Headers = param + return options +} + +// GetPrefetchPreloadOptions : The GetPrefetchPreload options. +type GetPrefetchPreloadOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetPrefetchPreloadOptions : Instantiate GetPrefetchPreloadOptions +func (*ZonesSettingsV1) NewGetPrefetchPreloadOptions() *GetPrefetchPreloadOptions { + return &GetPrefetchPreloadOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *GetPrefetchPreloadOptions) SetHeaders(param map[string]string) *GetPrefetchPreloadOptions { + options.Headers = param + return options +} + +// GetPseudoIpv4Options : The GetPseudoIpv4 options. +type GetPseudoIpv4Options struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetPseudoIpv4Options : Instantiate GetPseudoIpv4Options +func (*ZonesSettingsV1) NewGetPseudoIpv4Options() *GetPseudoIpv4Options { + return &GetPseudoIpv4Options{} +} + +// SetHeaders : Allow user to set Headers +func (options *GetPseudoIpv4Options) SetHeaders(param map[string]string) *GetPseudoIpv4Options { + options.Headers = param + return options +} + +// GetResponseBufferingOptions : The GetResponseBuffering options. +type GetResponseBufferingOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetResponseBufferingOptions : Instantiate GetResponseBufferingOptions +func (*ZonesSettingsV1) NewGetResponseBufferingOptions() *GetResponseBufferingOptions { + return &GetResponseBufferingOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *GetResponseBufferingOptions) SetHeaders(param map[string]string) *GetResponseBufferingOptions { + options.Headers = param + return options +} + +// GetScriptLoadOptimizationOptions : The GetScriptLoadOptimization options. +type GetScriptLoadOptimizationOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetScriptLoadOptimizationOptions : Instantiate GetScriptLoadOptimizationOptions +func (*ZonesSettingsV1) NewGetScriptLoadOptimizationOptions() *GetScriptLoadOptimizationOptions { + return &GetScriptLoadOptimizationOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *GetScriptLoadOptimizationOptions) SetHeaders(param map[string]string) *GetScriptLoadOptimizationOptions { + options.Headers = param + return options +} + +// GetSecurityHeaderOptions : The GetSecurityHeader options. +type GetSecurityHeaderOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetSecurityHeaderOptions : Instantiate GetSecurityHeaderOptions +func (*ZonesSettingsV1) NewGetSecurityHeaderOptions() *GetSecurityHeaderOptions { + return &GetSecurityHeaderOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *GetSecurityHeaderOptions) SetHeaders(param map[string]string) *GetSecurityHeaderOptions { + options.Headers = param + return options +} + +// GetServerSideExcludeOptions : The GetServerSideExclude options. +type GetServerSideExcludeOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetServerSideExcludeOptions : Instantiate GetServerSideExcludeOptions +func (*ZonesSettingsV1) NewGetServerSideExcludeOptions() *GetServerSideExcludeOptions { + return &GetServerSideExcludeOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *GetServerSideExcludeOptions) SetHeaders(param map[string]string) *GetServerSideExcludeOptions { + options.Headers = param + return options +} + +// GetTlsClientAuthOptions : The GetTlsClientAuth options. +type GetTlsClientAuthOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetTlsClientAuthOptions : Instantiate GetTlsClientAuthOptions +func (*ZonesSettingsV1) NewGetTlsClientAuthOptions() *GetTlsClientAuthOptions { + return &GetTlsClientAuthOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *GetTlsClientAuthOptions) SetHeaders(param map[string]string) *GetTlsClientAuthOptions { + options.Headers = param + return options +} + +// GetTrueClientIpOptions : The GetTrueClientIp options. +type GetTrueClientIpOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetTrueClientIpOptions : Instantiate GetTrueClientIpOptions +func (*ZonesSettingsV1) NewGetTrueClientIpOptions() *GetTrueClientIpOptions { + return &GetTrueClientIpOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *GetTrueClientIpOptions) SetHeaders(param map[string]string) *GetTrueClientIpOptions { + options.Headers = param + return options +} + +// GetWebApplicationFirewallOptions : The GetWebApplicationFirewall options. +type GetWebApplicationFirewallOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetWebApplicationFirewallOptions : Instantiate GetWebApplicationFirewallOptions +func (*ZonesSettingsV1) NewGetWebApplicationFirewallOptions() *GetWebApplicationFirewallOptions { + return &GetWebApplicationFirewallOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *GetWebApplicationFirewallOptions) SetHeaders(param map[string]string) *GetWebApplicationFirewallOptions { + options.Headers = param + return options +} + +// GetWebSocketsOptions : The GetWebSockets options. +type GetWebSocketsOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetWebSocketsOptions : Instantiate GetWebSocketsOptions +func (*ZonesSettingsV1) NewGetWebSocketsOptions() *GetWebSocketsOptions { + return &GetWebSocketsOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *GetWebSocketsOptions) SetHeaders(param map[string]string) *GetWebSocketsOptions { + options.Headers = param + return options +} + +// GetZoneCnameFlatteningOptions : The GetZoneCnameFlattening options. +type GetZoneCnameFlatteningOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetZoneCnameFlatteningOptions : Instantiate GetZoneCnameFlatteningOptions +func (*ZonesSettingsV1) NewGetZoneCnameFlatteningOptions() *GetZoneCnameFlatteningOptions { + return &GetZoneCnameFlatteningOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *GetZoneCnameFlatteningOptions) SetHeaders(param map[string]string) *GetZoneCnameFlatteningOptions { + options.Headers = param + return options +} + +// GetZoneDnssecOptions : The GetZoneDnssec options. +type GetZoneDnssecOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetZoneDnssecOptions : Instantiate GetZoneDnssecOptions +func (*ZonesSettingsV1) NewGetZoneDnssecOptions() *GetZoneDnssecOptions { + return &GetZoneDnssecOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *GetZoneDnssecOptions) SetHeaders(param map[string]string) *GetZoneDnssecOptions { + options.Headers = param + return options +} + +// HotlinkProtectionRespResult : Container for response information. +type HotlinkProtectionRespResult struct { + // ID. + ID *string `json:"id" validate:"required"` + + // Value. + Value *string `json:"value" validate:"required"` + + // Editable. + Editable *bool `json:"editable" validate:"required"` + + // Modified date. + ModifiedOn *strfmt.DateTime `json:"modified_on" validate:"required"` +} + + +// UnmarshalHotlinkProtectionRespResult unmarshals an instance of HotlinkProtectionRespResult from the specified map of raw messages. +func UnmarshalHotlinkProtectionRespResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(HotlinkProtectionRespResult) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "editable", &obj.Editable) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "modified_on", &obj.ModifiedOn) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// Http2RespResult : Container for response information. +type Http2RespResult struct { + // ID. + ID *string `json:"id" validate:"required"` + + // Value. + Value *string `json:"value" validate:"required"` + + // Editable. + Editable *bool `json:"editable" validate:"required"` + + // Modified date. + ModifiedOn *strfmt.DateTime `json:"modified_on" validate:"required"` +} + + +// UnmarshalHttp2RespResult unmarshals an instance of Http2RespResult from the specified map of raw messages. +func UnmarshalHttp2RespResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Http2RespResult) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "editable", &obj.Editable) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "modified_on", &obj.ModifiedOn) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ImageLoadOptimizationRespResult : Container for response information. +type ImageLoadOptimizationRespResult struct { + // ID. + ID *string `json:"id" validate:"required"` + + // Value. + Value *string `json:"value" validate:"required"` + + // Editable. + Editable *bool `json:"editable" validate:"required"` + + // Modified date. + ModifiedOn *strfmt.DateTime `json:"modified_on" validate:"required"` +} + + +// UnmarshalImageLoadOptimizationRespResult unmarshals an instance of ImageLoadOptimizationRespResult from the specified map of raw messages. +func UnmarshalImageLoadOptimizationRespResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ImageLoadOptimizationRespResult) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "editable", &obj.Editable) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "modified_on", &obj.ModifiedOn) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ImageSizeOptimizationRespResult : Container for response information. +type ImageSizeOptimizationRespResult struct { + // ID. + ID *string `json:"id" validate:"required"` + + // Value. + Value *string `json:"value" validate:"required"` + + // Editable. + Editable *bool `json:"editable" validate:"required"` + + // Modified date. + ModifiedOn *strfmt.DateTime `json:"modified_on" validate:"required"` +} + + +// UnmarshalImageSizeOptimizationRespResult unmarshals an instance of ImageSizeOptimizationRespResult from the specified map of raw messages. +func UnmarshalImageSizeOptimizationRespResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ImageSizeOptimizationRespResult) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "editable", &obj.Editable) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "modified_on", &obj.ModifiedOn) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// IpGeolocationRespResult : Container for response information. +type IpGeolocationRespResult struct { + // ID. + ID *string `json:"id" validate:"required"` + + // Value. + Value *string `json:"value" validate:"required"` + + // Editable. + Editable *bool `json:"editable" validate:"required"` + + // Modified date. + ModifiedOn *strfmt.DateTime `json:"modified_on" validate:"required"` +} + + +// UnmarshalIpGeolocationRespResult unmarshals an instance of IpGeolocationRespResult from the specified map of raw messages. +func UnmarshalIpGeolocationRespResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(IpGeolocationRespResult) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "editable", &obj.Editable) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "modified_on", &obj.ModifiedOn) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// Ipv6RespResult : Container for response information. +type Ipv6RespResult struct { + // ID. + ID *string `json:"id" validate:"required"` + + // Value. + Value *string `json:"value" validate:"required"` + + // Editable. + Editable *bool `json:"editable" validate:"required"` + + // Modified date. + ModifiedOn *strfmt.DateTime `json:"modified_on" validate:"required"` +} + + +// UnmarshalIpv6RespResult unmarshals an instance of Ipv6RespResult from the specified map of raw messages. +func UnmarshalIpv6RespResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Ipv6RespResult) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "editable", &obj.Editable) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "modified_on", &obj.ModifiedOn) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// MaxUploadRespResult : Container for response information. +type MaxUploadRespResult struct { + // ID. + ID *string `json:"id" validate:"required"` + + // Value. + Value *int64 `json:"value" validate:"required"` + + // Editable. + Editable *bool `json:"editable" validate:"required"` + + // Modified date. + ModifiedOn *strfmt.DateTime `json:"modified_on" validate:"required"` +} + + +// UnmarshalMaxUploadRespResult unmarshals an instance of MaxUploadRespResult from the specified map of raw messages. +func UnmarshalMaxUploadRespResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(MaxUploadRespResult) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "editable", &obj.Editable) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "modified_on", &obj.ModifiedOn) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// MinTlsVersionRespResult : Container for response information. +type MinTlsVersionRespResult struct { + // ID. + ID *string `json:"id" validate:"required"` + + // Value. + Value *string `json:"value" validate:"required"` + + // Editable. + Editable *bool `json:"editable" validate:"required"` + + // Modified date. + ModifiedOn *strfmt.DateTime `json:"modified_on" validate:"required"` +} + + +// UnmarshalMinTlsVersionRespResult unmarshals an instance of MinTlsVersionRespResult from the specified map of raw messages. +func UnmarshalMinTlsVersionRespResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(MinTlsVersionRespResult) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "editable", &obj.Editable) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "modified_on", &obj.ModifiedOn) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// MinifyRespResult : Container for response information. +type MinifyRespResult struct { + // ID. + ID *string `json:"id" validate:"required"` + + // Value. + Value *MinifyRespResultValue `json:"value" validate:"required"` + + // Editable. + Editable *bool `json:"editable" validate:"required"` + + // Modified date. + ModifiedOn *strfmt.DateTime `json:"modified_on" validate:"required"` +} + + +// UnmarshalMinifyRespResult unmarshals an instance of MinifyRespResult from the specified map of raw messages. +func UnmarshalMinifyRespResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(MinifyRespResult) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalModel(m, "value", &obj.Value, UnmarshalMinifyRespResultValue) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "editable", &obj.Editable) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "modified_on", &obj.ModifiedOn) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// MinifyRespResultValue : Value. +type MinifyRespResultValue struct { + // css. + Css *string `json:"css" validate:"required"` + + // html. + HTML *string `json:"html" validate:"required"` + + // js. + Js *string `json:"js" validate:"required"` +} + + +// UnmarshalMinifyRespResultValue unmarshals an instance of MinifyRespResultValue from the specified map of raw messages. +func UnmarshalMinifyRespResultValue(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(MinifyRespResultValue) + err = core.UnmarshalPrimitive(m, "css", &obj.Css) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "html", &obj.HTML) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "js", &obj.Js) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// MinifySettingValue : Value. +type MinifySettingValue struct { + // Automatically minify all CSS for your website. + Css *string `json:"css" validate:"required"` + + // Automatically minify all HTML for your website. + HTML *string `json:"html" validate:"required"` + + // Automatically minify all JavaScript for your website. + Js *string `json:"js" validate:"required"` +} + +// Constants associated with the MinifySettingValue.Css property. +// Automatically minify all CSS for your website. +const ( + MinifySettingValue_Css_Off = "off" + MinifySettingValue_Css_On = "on" +) + +// Constants associated with the MinifySettingValue.HTML property. +// Automatically minify all HTML for your website. +const ( + MinifySettingValue_HTML_Off = "off" + MinifySettingValue_HTML_On = "on" +) + +// Constants associated with the MinifySettingValue.Js property. +// Automatically minify all JavaScript for your website. +const ( + MinifySettingValue_Js_Off = "off" + MinifySettingValue_Js_On = "on" +) + + +// NewMinifySettingValue : Instantiate MinifySettingValue (Generic Model Constructor) +func (*ZonesSettingsV1) NewMinifySettingValue(css string, html string, js string) (model *MinifySettingValue, err error) { + model = &MinifySettingValue{ + Css: core.StringPtr(css), + HTML: core.StringPtr(html), + Js: core.StringPtr(js), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalMinifySettingValue unmarshals an instance of MinifySettingValue from the specified map of raw messages. +func UnmarshalMinifySettingValue(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(MinifySettingValue) + err = core.UnmarshalPrimitive(m, "css", &obj.Css) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "html", &obj.HTML) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "js", &obj.Js) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// MobileRedirecSettingValue : Value. +type MobileRedirecSettingValue struct { + // Whether or not the mobile redirection is enabled. + Status *string `json:"status" validate:"required"` + + // Which subdomain prefix you wish to redirect visitors on mobile devices to. + MobileSubdomain *string `json:"mobile_subdomain" validate:"required"` + + // Whether to drop the current page path and redirect to the mobile subdomain URL root or to keep the path and redirect + // to the same page on the mobile subdomain. + StripURI *bool `json:"strip_uri" validate:"required"` +} + +// Constants associated with the MobileRedirecSettingValue.Status property. +// Whether or not the mobile redirection is enabled. +const ( + MobileRedirecSettingValue_Status_Off = "off" + MobileRedirecSettingValue_Status_On = "on" +) + + +// NewMobileRedirecSettingValue : Instantiate MobileRedirecSettingValue (Generic Model Constructor) +func (*ZonesSettingsV1) NewMobileRedirecSettingValue(status string, mobileSubdomain string, stripURI bool) (model *MobileRedirecSettingValue, err error) { + model = &MobileRedirecSettingValue{ + Status: core.StringPtr(status), + MobileSubdomain: core.StringPtr(mobileSubdomain), + StripURI: core.BoolPtr(stripURI), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalMobileRedirecSettingValue unmarshals an instance of MobileRedirecSettingValue from the specified map of raw messages. +func UnmarshalMobileRedirecSettingValue(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(MobileRedirecSettingValue) + err = core.UnmarshalPrimitive(m, "status", &obj.Status) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "mobile_subdomain", &obj.MobileSubdomain) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "strip_uri", &obj.StripURI) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// MobileRedirectRespResult : Container for response information. +type MobileRedirectRespResult struct { + // ID. + ID *string `json:"id" validate:"required"` + + // Value. + Value *MobileRedirectRespResultValue `json:"value" validate:"required"` + + // Editable. + Editable *bool `json:"editable" validate:"required"` + + // Modified date. + ModifiedOn *strfmt.DateTime `json:"modified_on" validate:"required"` +} + + +// UnmarshalMobileRedirectRespResult unmarshals an instance of MobileRedirectRespResult from the specified map of raw messages. +func UnmarshalMobileRedirectRespResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(MobileRedirectRespResult) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalModel(m, "value", &obj.Value, UnmarshalMobileRedirectRespResultValue) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "editable", &obj.Editable) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "modified_on", &obj.ModifiedOn) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// MobileRedirectRespResultValue : Value. +type MobileRedirectRespResultValue struct { + // Whether or not the mobile redirection is enabled. + Status *string `json:"status" validate:"required"` + + // Which subdomain prefix you wish to redirect visitors on mobile devices to. + MobileSubdomain *string `json:"mobile_subdomain" validate:"required"` + + // Whether to drop the current page path and redirect to the mobile subdomain URL root or to keep the path and redirect + // to the same page on the mobile subdomain. + StripURI *bool `json:"strip_uri" validate:"required"` +} + + +// UnmarshalMobileRedirectRespResultValue unmarshals an instance of MobileRedirectRespResultValue from the specified map of raw messages. +func UnmarshalMobileRedirectRespResultValue(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(MobileRedirectRespResultValue) + err = core.UnmarshalPrimitive(m, "status", &obj.Status) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "mobile_subdomain", &obj.MobileSubdomain) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "strip_uri", &obj.StripURI) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// OpportunisticEncryptionRespResult : Container for response information. +type OpportunisticEncryptionRespResult struct { + // ID. + ID *string `json:"id" validate:"required"` + + // Value. + Value *string `json:"value" validate:"required"` + + // Editable. + Editable *bool `json:"editable" validate:"required"` + + // Modified date. + ModifiedOn *strfmt.DateTime `json:"modified_on" validate:"required"` +} + + +// UnmarshalOpportunisticEncryptionRespResult unmarshals an instance of OpportunisticEncryptionRespResult from the specified map of raw messages. +func UnmarshalOpportunisticEncryptionRespResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(OpportunisticEncryptionRespResult) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "editable", &obj.Editable) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "modified_on", &obj.ModifiedOn) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// OriginErrorPagePassThruRespResult : Container for response information. +type OriginErrorPagePassThruRespResult struct { + // ID. + ID *string `json:"id" validate:"required"` + + // Value. + Value *string `json:"value" validate:"required"` + + // Editable. + Editable *bool `json:"editable" validate:"required"` + + // Modified date. + ModifiedOn *strfmt.DateTime `json:"modified_on" validate:"required"` +} + + +// UnmarshalOriginErrorPagePassThruRespResult unmarshals an instance of OriginErrorPagePassThruRespResult from the specified map of raw messages. +func UnmarshalOriginErrorPagePassThruRespResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(OriginErrorPagePassThruRespResult) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "editable", &obj.Editable) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "modified_on", &obj.ModifiedOn) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// PrefetchPreloadRespResult : Container for response information. +type PrefetchPreloadRespResult struct { + // ID. + ID *string `json:"id" validate:"required"` + + // Value. + Value *string `json:"value" validate:"required"` + + // Editable. + Editable *bool `json:"editable" validate:"required"` + + // Modified date. + ModifiedOn *strfmt.DateTime `json:"modified_on" validate:"required"` +} + + +// UnmarshalPrefetchPreloadRespResult unmarshals an instance of PrefetchPreloadRespResult from the specified map of raw messages. +func UnmarshalPrefetchPreloadRespResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(PrefetchPreloadRespResult) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "editable", &obj.Editable) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "modified_on", &obj.ModifiedOn) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// PseudoIpv4RespResult : Container for response information. +type PseudoIpv4RespResult struct { + // ID. + ID *string `json:"id" validate:"required"` + + // Value. + Value *string `json:"value" validate:"required"` + + // Editable. + Editable *bool `json:"editable" validate:"required"` + + // Modified date. + ModifiedOn *strfmt.DateTime `json:"modified_on" validate:"required"` +} + + +// UnmarshalPseudoIpv4RespResult unmarshals an instance of PseudoIpv4RespResult from the specified map of raw messages. +func UnmarshalPseudoIpv4RespResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(PseudoIpv4RespResult) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "editable", &obj.Editable) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "modified_on", &obj.ModifiedOn) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ResponseBufferingRespResult : Container for response information. +type ResponseBufferingRespResult struct { + // ID. + ID *string `json:"id" validate:"required"` + + // Value. + Value *string `json:"value" validate:"required"` + + // Editable. + Editable *bool `json:"editable" validate:"required"` + + // Modified date. + ModifiedOn *strfmt.DateTime `json:"modified_on" validate:"required"` +} + + +// UnmarshalResponseBufferingRespResult unmarshals an instance of ResponseBufferingRespResult from the specified map of raw messages. +func UnmarshalResponseBufferingRespResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ResponseBufferingRespResult) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "editable", &obj.Editable) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "modified_on", &obj.ModifiedOn) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ScriptLoadOptimizationRespResult : Container for response information. +type ScriptLoadOptimizationRespResult struct { + // ID. + ID *string `json:"id" validate:"required"` + + // Value. + Value *string `json:"value" validate:"required"` + + // Editable. + Editable *bool `json:"editable" validate:"required"` + + // Modified date. + ModifiedOn *strfmt.DateTime `json:"modified_on" validate:"required"` +} + + +// UnmarshalScriptLoadOptimizationRespResult unmarshals an instance of ScriptLoadOptimizationRespResult from the specified map of raw messages. +func UnmarshalScriptLoadOptimizationRespResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ScriptLoadOptimizationRespResult) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "editable", &obj.Editable) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "modified_on", &obj.ModifiedOn) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SecurityHeaderRespResult : Container for response information. +type SecurityHeaderRespResult struct { + // ID. + ID *string `json:"id" validate:"required"` + + // Value. + Value *SecurityHeaderRespResultValue `json:"value" validate:"required"` + + // Editable. + Editable *bool `json:"editable" validate:"required"` + + // Modified date. + ModifiedOn *strfmt.DateTime `json:"modified_on" validate:"required"` +} + + +// UnmarshalSecurityHeaderRespResult unmarshals an instance of SecurityHeaderRespResult from the specified map of raw messages. +func UnmarshalSecurityHeaderRespResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SecurityHeaderRespResult) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalModel(m, "value", &obj.Value, UnmarshalSecurityHeaderRespResultValue) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "editable", &obj.Editable) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "modified_on", &obj.ModifiedOn) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SecurityHeaderRespResultValue : Value. +type SecurityHeaderRespResultValue struct { + // Strict transport security. + StrictTransportSecurity *SecurityHeaderRespResultValueStrictTransportSecurity `json:"strict_transport_security" validate:"required"` +} + + +// UnmarshalSecurityHeaderRespResultValue unmarshals an instance of SecurityHeaderRespResultValue from the specified map of raw messages. +func UnmarshalSecurityHeaderRespResultValue(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SecurityHeaderRespResultValue) + err = core.UnmarshalModel(m, "strict_transport_security", &obj.StrictTransportSecurity, UnmarshalSecurityHeaderRespResultValueStrictTransportSecurity) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SecurityHeaderRespResultValueStrictTransportSecurity : Strict transport security. +type SecurityHeaderRespResultValueStrictTransportSecurity struct { + // Whether or not security header is enabled. + Enabled *bool `json:"enabled" validate:"required"` + + // Max age in seconds. + MaxAge *int64 `json:"max_age" validate:"required"` + + // Include all subdomains. + IncludeSubdomains *bool `json:"include_subdomains" validate:"required"` + + // Whether or not to include 'X-Content-Type-Options:nosniff' header. + Nosniff *bool `json:"nosniff" validate:"required"` +} + + +// UnmarshalSecurityHeaderRespResultValueStrictTransportSecurity unmarshals an instance of SecurityHeaderRespResultValueStrictTransportSecurity from the specified map of raw messages. +func UnmarshalSecurityHeaderRespResultValueStrictTransportSecurity(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SecurityHeaderRespResultValueStrictTransportSecurity) + err = core.UnmarshalPrimitive(m, "enabled", &obj.Enabled) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "max_age", &obj.MaxAge) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "include_subdomains", &obj.IncludeSubdomains) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "nosniff", &obj.Nosniff) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SecurityHeaderSettingValue : Value. +type SecurityHeaderSettingValue struct { + // Strict transport security. + StrictTransportSecurity *SecurityHeaderSettingValueStrictTransportSecurity `json:"strict_transport_security" validate:"required"` +} + + +// NewSecurityHeaderSettingValue : Instantiate SecurityHeaderSettingValue (Generic Model Constructor) +func (*ZonesSettingsV1) NewSecurityHeaderSettingValue(strictTransportSecurity *SecurityHeaderSettingValueStrictTransportSecurity) (model *SecurityHeaderSettingValue, err error) { + model = &SecurityHeaderSettingValue{ + StrictTransportSecurity: strictTransportSecurity, + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalSecurityHeaderSettingValue unmarshals an instance of SecurityHeaderSettingValue from the specified map of raw messages. +func UnmarshalSecurityHeaderSettingValue(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SecurityHeaderSettingValue) + err = core.UnmarshalModel(m, "strict_transport_security", &obj.StrictTransportSecurity, UnmarshalSecurityHeaderSettingValueStrictTransportSecurity) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SecurityHeaderSettingValueStrictTransportSecurity : Strict transport security. +type SecurityHeaderSettingValueStrictTransportSecurity struct { + // Whether or not security header is enabled. + Enabled *bool `json:"enabled" validate:"required"` + + // Max age in seconds. + MaxAge *int64 `json:"max_age" validate:"required"` + + // Include all subdomains. + IncludeSubdomains *bool `json:"include_subdomains" validate:"required"` + + // Whether or not to include 'X-Content-Type-Options:nosniff' header. + Nosniff *bool `json:"nosniff" validate:"required"` +} + + +// NewSecurityHeaderSettingValueStrictTransportSecurity : Instantiate SecurityHeaderSettingValueStrictTransportSecurity (Generic Model Constructor) +func (*ZonesSettingsV1) NewSecurityHeaderSettingValueStrictTransportSecurity(enabled bool, maxAge int64, includeSubdomains bool, nosniff bool) (model *SecurityHeaderSettingValueStrictTransportSecurity, err error) { + model = &SecurityHeaderSettingValueStrictTransportSecurity{ + Enabled: core.BoolPtr(enabled), + MaxAge: core.Int64Ptr(maxAge), + IncludeSubdomains: core.BoolPtr(includeSubdomains), + Nosniff: core.BoolPtr(nosniff), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalSecurityHeaderSettingValueStrictTransportSecurity unmarshals an instance of SecurityHeaderSettingValueStrictTransportSecurity from the specified map of raw messages. +func UnmarshalSecurityHeaderSettingValueStrictTransportSecurity(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SecurityHeaderSettingValueStrictTransportSecurity) + err = core.UnmarshalPrimitive(m, "enabled", &obj.Enabled) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "max_age", &obj.MaxAge) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "include_subdomains", &obj.IncludeSubdomains) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "nosniff", &obj.Nosniff) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ServerSideExcludeRespResult : Container for response information. +type ServerSideExcludeRespResult struct { + // ID. + ID *string `json:"id" validate:"required"` + + // Value. + Value *string `json:"value" validate:"required"` + + // Editable. + Editable *bool `json:"editable" validate:"required"` + + // Modified date. + ModifiedOn *strfmt.DateTime `json:"modified_on" validate:"required"` +} + + +// UnmarshalServerSideExcludeRespResult unmarshals an instance of ServerSideExcludeRespResult from the specified map of raw messages. +func UnmarshalServerSideExcludeRespResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ServerSideExcludeRespResult) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "editable", &obj.Editable) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "modified_on", &obj.ModifiedOn) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// TlsClientAuthRespResult : Container for response information. +type TlsClientAuthRespResult struct { + // ID. + ID *string `json:"id" validate:"required"` + + // Value. + Value *string `json:"value" validate:"required"` + + // Editable. + Editable *bool `json:"editable" validate:"required"` + + // Modified date. + ModifiedOn *strfmt.DateTime `json:"modified_on" validate:"required"` +} + + +// UnmarshalTlsClientAuthRespResult unmarshals an instance of TlsClientAuthRespResult from the specified map of raw messages. +func UnmarshalTlsClientAuthRespResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(TlsClientAuthRespResult) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "editable", &obj.Editable) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "modified_on", &obj.ModifiedOn) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// TrueClientIpRespResult : Container for response information. +type TrueClientIpRespResult struct { + // ID. + ID *string `json:"id" validate:"required"` + + // Value. + Value *string `json:"value" validate:"required"` + + // Editable. + Editable *bool `json:"editable" validate:"required"` + + // Modified date. + ModifiedOn *strfmt.DateTime `json:"modified_on" validate:"required"` +} + + +// UnmarshalTrueClientIpRespResult unmarshals an instance of TrueClientIpRespResult from the specified map of raw messages. +func UnmarshalTrueClientIpRespResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(TrueClientIpRespResult) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "editable", &obj.Editable) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "modified_on", &obj.ModifiedOn) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// UpdateAlwaysUseHttpsOptions : The UpdateAlwaysUseHttps options. +type UpdateAlwaysUseHttpsOptions struct { + // Value. + Value *string `json:"value,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the UpdateAlwaysUseHttpsOptions.Value property. +// Value. +const ( + UpdateAlwaysUseHttpsOptions_Value_Off = "off" + UpdateAlwaysUseHttpsOptions_Value_On = "on" +) + +// NewUpdateAlwaysUseHttpsOptions : Instantiate UpdateAlwaysUseHttpsOptions +func (*ZonesSettingsV1) NewUpdateAlwaysUseHttpsOptions() *UpdateAlwaysUseHttpsOptions { + return &UpdateAlwaysUseHttpsOptions{} +} + +// SetValue : Allow user to set Value +func (options *UpdateAlwaysUseHttpsOptions) SetValue(value string) *UpdateAlwaysUseHttpsOptions { + options.Value = core.StringPtr(value) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateAlwaysUseHttpsOptions) SetHeaders(param map[string]string) *UpdateAlwaysUseHttpsOptions { + options.Headers = param + return options +} + +// UpdateAutomaticHttpsRewritesOptions : The UpdateAutomaticHttpsRewrites options. +type UpdateAutomaticHttpsRewritesOptions struct { + // Value. + Value *string `json:"value,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the UpdateAutomaticHttpsRewritesOptions.Value property. +// Value. +const ( + UpdateAutomaticHttpsRewritesOptions_Value_Off = "off" + UpdateAutomaticHttpsRewritesOptions_Value_On = "on" +) + +// NewUpdateAutomaticHttpsRewritesOptions : Instantiate UpdateAutomaticHttpsRewritesOptions +func (*ZonesSettingsV1) NewUpdateAutomaticHttpsRewritesOptions() *UpdateAutomaticHttpsRewritesOptions { + return &UpdateAutomaticHttpsRewritesOptions{} +} + +// SetValue : Allow user to set Value +func (options *UpdateAutomaticHttpsRewritesOptions) SetValue(value string) *UpdateAutomaticHttpsRewritesOptions { + options.Value = core.StringPtr(value) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateAutomaticHttpsRewritesOptions) SetHeaders(param map[string]string) *UpdateAutomaticHttpsRewritesOptions { + options.Headers = param + return options +} + +// UpdateBrowserCheckOptions : The UpdateBrowserCheck options. +type UpdateBrowserCheckOptions struct { + // Value. + Value *string `json:"value,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the UpdateBrowserCheckOptions.Value property. +// Value. +const ( + UpdateBrowserCheckOptions_Value_Off = "off" + UpdateBrowserCheckOptions_Value_On = "on" +) + +// NewUpdateBrowserCheckOptions : Instantiate UpdateBrowserCheckOptions +func (*ZonesSettingsV1) NewUpdateBrowserCheckOptions() *UpdateBrowserCheckOptions { + return &UpdateBrowserCheckOptions{} +} + +// SetValue : Allow user to set Value +func (options *UpdateBrowserCheckOptions) SetValue(value string) *UpdateBrowserCheckOptions { + options.Value = core.StringPtr(value) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateBrowserCheckOptions) SetHeaders(param map[string]string) *UpdateBrowserCheckOptions { + options.Headers = param + return options +} + +// UpdateChallengeTtlOptions : The UpdateChallengeTTL options. +type UpdateChallengeTtlOptions struct { + // Value. + Value *int64 `json:"value,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdateChallengeTtlOptions : Instantiate UpdateChallengeTtlOptions +func (*ZonesSettingsV1) NewUpdateChallengeTtlOptions() *UpdateChallengeTtlOptions { + return &UpdateChallengeTtlOptions{} +} + +// SetValue : Allow user to set Value +func (options *UpdateChallengeTtlOptions) SetValue(value int64) *UpdateChallengeTtlOptions { + options.Value = core.Int64Ptr(value) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateChallengeTtlOptions) SetHeaders(param map[string]string) *UpdateChallengeTtlOptions { + options.Headers = param + return options +} + +// UpdateCiphersOptions : The UpdateCiphers options. +type UpdateCiphersOptions struct { + // Value. + Value []string `json:"value,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the UpdateCiphersOptions.Value property. +const ( + UpdateCiphersOptions_Value_Aes128GcmSha256 = "AES128-GCM-SHA256" + UpdateCiphersOptions_Value_Aes128Sha = "AES128-SHA" + UpdateCiphersOptions_Value_Aes128Sha256 = "AES128-SHA256" + UpdateCiphersOptions_Value_Aes256GcmSha384 = "AES256-GCM-SHA384" + UpdateCiphersOptions_Value_Aes256Sha = "AES256-SHA" + UpdateCiphersOptions_Value_Aes256Sha256 = "AES256-SHA256" + UpdateCiphersOptions_Value_DesCbc3Sha = "DES-CBC3-SHA" + UpdateCiphersOptions_Value_EcdheEcdsaAes128GcmSha256 = "ECDHE-ECDSA-AES128-GCM-SHA256" + UpdateCiphersOptions_Value_EcdheEcdsaAes128Sha = "ECDHE-ECDSA-AES128-SHA" + UpdateCiphersOptions_Value_EcdheEcdsaAes128Sha256 = "ECDHE-ECDSA-AES128-SHA256" + UpdateCiphersOptions_Value_EcdheEcdsaAes256GcmSha384 = "ECDHE-ECDSA-AES256-GCM-SHA384" + UpdateCiphersOptions_Value_EcdheEcdsaAes256Sha384 = "ECDHE-ECDSA-AES256-SHA384" + UpdateCiphersOptions_Value_EcdheEcdsaChacha20Poly1305 = "ECDHE-ECDSA-CHACHA20-POLY1305" + UpdateCiphersOptions_Value_EcdheRsaAes128GcmSha256 = "ECDHE-RSA-AES128-GCM-SHA256" + UpdateCiphersOptions_Value_EcdheRsaAes128Sha = "ECDHE-RSA-AES128-SHA" + UpdateCiphersOptions_Value_EcdheRsaAes128Sha256 = "ECDHE-RSA-AES128-SHA256" + UpdateCiphersOptions_Value_EcdheRsaAes256GcmSha384 = "ECDHE-RSA-AES256-GCM-SHA384" + UpdateCiphersOptions_Value_EcdheRsaAes256Sha = "ECDHE-RSA-AES256-SHA" + UpdateCiphersOptions_Value_EcdheRsaAes256Sha384 = "ECDHE-RSA-AES256-SHA384" + UpdateCiphersOptions_Value_EcdheRsaChacha20Poly1305 = "ECDHE-RSA-CHACHA20-POLY1305" +) + +// NewUpdateCiphersOptions : Instantiate UpdateCiphersOptions +func (*ZonesSettingsV1) NewUpdateCiphersOptions() *UpdateCiphersOptions { + return &UpdateCiphersOptions{} +} + +// SetValue : Allow user to set Value +func (options *UpdateCiphersOptions) SetValue(value []string) *UpdateCiphersOptions { + options.Value = value + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateCiphersOptions) SetHeaders(param map[string]string) *UpdateCiphersOptions { + options.Headers = param + return options +} + +// UpdateEnableErrorPagesOnOptions : The UpdateEnableErrorPagesOn options. +type UpdateEnableErrorPagesOnOptions struct { + // Value. + Value *string `json:"value,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the UpdateEnableErrorPagesOnOptions.Value property. +// Value. +const ( + UpdateEnableErrorPagesOnOptions_Value_Off = "off" + UpdateEnableErrorPagesOnOptions_Value_On = "on" +) + +// NewUpdateEnableErrorPagesOnOptions : Instantiate UpdateEnableErrorPagesOnOptions +func (*ZonesSettingsV1) NewUpdateEnableErrorPagesOnOptions() *UpdateEnableErrorPagesOnOptions { + return &UpdateEnableErrorPagesOnOptions{} +} + +// SetValue : Allow user to set Value +func (options *UpdateEnableErrorPagesOnOptions) SetValue(value string) *UpdateEnableErrorPagesOnOptions { + options.Value = core.StringPtr(value) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateEnableErrorPagesOnOptions) SetHeaders(param map[string]string) *UpdateEnableErrorPagesOnOptions { + options.Headers = param + return options +} + +// UpdateHotlinkProtectionOptions : The UpdateHotlinkProtection options. +type UpdateHotlinkProtectionOptions struct { + // Value. + Value *string `json:"value,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the UpdateHotlinkProtectionOptions.Value property. +// Value. +const ( + UpdateHotlinkProtectionOptions_Value_Off = "off" + UpdateHotlinkProtectionOptions_Value_On = "on" +) + +// NewUpdateHotlinkProtectionOptions : Instantiate UpdateHotlinkProtectionOptions +func (*ZonesSettingsV1) NewUpdateHotlinkProtectionOptions() *UpdateHotlinkProtectionOptions { + return &UpdateHotlinkProtectionOptions{} +} + +// SetValue : Allow user to set Value +func (options *UpdateHotlinkProtectionOptions) SetValue(value string) *UpdateHotlinkProtectionOptions { + options.Value = core.StringPtr(value) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateHotlinkProtectionOptions) SetHeaders(param map[string]string) *UpdateHotlinkProtectionOptions { + options.Headers = param + return options +} + +// UpdateHttp2Options : The UpdateHttp2 options. +type UpdateHttp2Options struct { + // Value. + Value *string `json:"value,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the UpdateHttp2Options.Value property. +// Value. +const ( + UpdateHttp2Options_Value_Off = "off" + UpdateHttp2Options_Value_On = "on" +) + +// NewUpdateHttp2Options : Instantiate UpdateHttp2Options +func (*ZonesSettingsV1) NewUpdateHttp2Options() *UpdateHttp2Options { + return &UpdateHttp2Options{} +} + +// SetValue : Allow user to set Value +func (options *UpdateHttp2Options) SetValue(value string) *UpdateHttp2Options { + options.Value = core.StringPtr(value) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateHttp2Options) SetHeaders(param map[string]string) *UpdateHttp2Options { + options.Headers = param + return options +} + +// UpdateImageLoadOptimizationOptions : The UpdateImageLoadOptimization options. +type UpdateImageLoadOptimizationOptions struct { + // Value. + Value *string `json:"value,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the UpdateImageLoadOptimizationOptions.Value property. +// Value. +const ( + UpdateImageLoadOptimizationOptions_Value_Off = "off" + UpdateImageLoadOptimizationOptions_Value_On = "on" +) + +// NewUpdateImageLoadOptimizationOptions : Instantiate UpdateImageLoadOptimizationOptions +func (*ZonesSettingsV1) NewUpdateImageLoadOptimizationOptions() *UpdateImageLoadOptimizationOptions { + return &UpdateImageLoadOptimizationOptions{} +} + +// SetValue : Allow user to set Value +func (options *UpdateImageLoadOptimizationOptions) SetValue(value string) *UpdateImageLoadOptimizationOptions { + options.Value = core.StringPtr(value) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateImageLoadOptimizationOptions) SetHeaders(param map[string]string) *UpdateImageLoadOptimizationOptions { + options.Headers = param + return options +} + +// UpdateImageSizeOptimizationOptions : The UpdateImageSizeOptimization options. +type UpdateImageSizeOptimizationOptions struct { + // Valid values are "lossy", "off", "lossless". "lossy" - The file size of JPEG images is reduced using lossy + // compression, which may reduce visual quality. "off" - Disable Image Size Optimization. "lossless" - Reduce the size + // of image files without impacting visual quality. + Value *string `json:"value,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the UpdateImageSizeOptimizationOptions.Value property. +// Valid values are "lossy", "off", "lossless". "lossy" - The file size of JPEG images is reduced using lossy +// compression, which may reduce visual quality. "off" - Disable Image Size Optimization. "lossless" - Reduce the size +// of image files without impacting visual quality. +const ( + UpdateImageSizeOptimizationOptions_Value_Lossless = "lossless" + UpdateImageSizeOptimizationOptions_Value_Lossy = "lossy" + UpdateImageSizeOptimizationOptions_Value_Off = "off" +) + +// NewUpdateImageSizeOptimizationOptions : Instantiate UpdateImageSizeOptimizationOptions +func (*ZonesSettingsV1) NewUpdateImageSizeOptimizationOptions() *UpdateImageSizeOptimizationOptions { + return &UpdateImageSizeOptimizationOptions{} +} + +// SetValue : Allow user to set Value +func (options *UpdateImageSizeOptimizationOptions) SetValue(value string) *UpdateImageSizeOptimizationOptions { + options.Value = core.StringPtr(value) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateImageSizeOptimizationOptions) SetHeaders(param map[string]string) *UpdateImageSizeOptimizationOptions { + options.Headers = param + return options +} + +// UpdateIpGeolocationOptions : The UpdateIpGeolocation options. +type UpdateIpGeolocationOptions struct { + // Value. + Value *string `json:"value,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the UpdateIpGeolocationOptions.Value property. +// Value. +const ( + UpdateIpGeolocationOptions_Value_Off = "off" + UpdateIpGeolocationOptions_Value_On = "on" +) + +// NewUpdateIpGeolocationOptions : Instantiate UpdateIpGeolocationOptions +func (*ZonesSettingsV1) NewUpdateIpGeolocationOptions() *UpdateIpGeolocationOptions { + return &UpdateIpGeolocationOptions{} +} + +// SetValue : Allow user to set Value +func (options *UpdateIpGeolocationOptions) SetValue(value string) *UpdateIpGeolocationOptions { + options.Value = core.StringPtr(value) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateIpGeolocationOptions) SetHeaders(param map[string]string) *UpdateIpGeolocationOptions { + options.Headers = param + return options +} + +// UpdateIpv6Options : The UpdateIpv6 options. +type UpdateIpv6Options struct { + // Value. + Value *string `json:"value,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the UpdateIpv6Options.Value property. +// Value. +const ( + UpdateIpv6Options_Value_Off = "off" + UpdateIpv6Options_Value_On = "on" +) + +// NewUpdateIpv6Options : Instantiate UpdateIpv6Options +func (*ZonesSettingsV1) NewUpdateIpv6Options() *UpdateIpv6Options { + return &UpdateIpv6Options{} +} + +// SetValue : Allow user to set Value +func (options *UpdateIpv6Options) SetValue(value string) *UpdateIpv6Options { + options.Value = core.StringPtr(value) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateIpv6Options) SetHeaders(param map[string]string) *UpdateIpv6Options { + options.Headers = param + return options +} + +// UpdateMaxUploadOptions : The UpdateMaxUpload options. +type UpdateMaxUploadOptions struct { + // Valid values(in MB) for "max_upload" are 100, 125, 150, 175, 200, 225, 250, 275, 300, 325, 350, 375, 400, 425, 450, + // 475, 500. Values 225, 250, 275, 300, 325, 350, 375, 400, 425, 450, 475, 500 are only for Enterprise Plan. + Value *int64 `json:"value,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdateMaxUploadOptions : Instantiate UpdateMaxUploadOptions +func (*ZonesSettingsV1) NewUpdateMaxUploadOptions() *UpdateMaxUploadOptions { + return &UpdateMaxUploadOptions{} +} + +// SetValue : Allow user to set Value +func (options *UpdateMaxUploadOptions) SetValue(value int64) *UpdateMaxUploadOptions { + options.Value = core.Int64Ptr(value) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateMaxUploadOptions) SetHeaders(param map[string]string) *UpdateMaxUploadOptions { + options.Headers = param + return options +} + +// UpdateMinTlsVersionOptions : The UpdateMinTlsVersion options. +type UpdateMinTlsVersionOptions struct { + // Value. + Value *string `json:"value,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdateMinTlsVersionOptions : Instantiate UpdateMinTlsVersionOptions +func (*ZonesSettingsV1) NewUpdateMinTlsVersionOptions() *UpdateMinTlsVersionOptions { + return &UpdateMinTlsVersionOptions{} +} + +// SetValue : Allow user to set Value +func (options *UpdateMinTlsVersionOptions) SetValue(value string) *UpdateMinTlsVersionOptions { + options.Value = core.StringPtr(value) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateMinTlsVersionOptions) SetHeaders(param map[string]string) *UpdateMinTlsVersionOptions { + options.Headers = param + return options +} + +// UpdateMinifyOptions : The UpdateMinify options. +type UpdateMinifyOptions struct { + // Value. + Value *MinifySettingValue `json:"value,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdateMinifyOptions : Instantiate UpdateMinifyOptions +func (*ZonesSettingsV1) NewUpdateMinifyOptions() *UpdateMinifyOptions { + return &UpdateMinifyOptions{} +} + +// SetValue : Allow user to set Value +func (options *UpdateMinifyOptions) SetValue(value *MinifySettingValue) *UpdateMinifyOptions { + options.Value = value + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateMinifyOptions) SetHeaders(param map[string]string) *UpdateMinifyOptions { + options.Headers = param + return options +} + +// UpdateMobileRedirectOptions : The UpdateMobileRedirect options. +type UpdateMobileRedirectOptions struct { + // Value. + Value *MobileRedirecSettingValue `json:"value,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdateMobileRedirectOptions : Instantiate UpdateMobileRedirectOptions +func (*ZonesSettingsV1) NewUpdateMobileRedirectOptions() *UpdateMobileRedirectOptions { + return &UpdateMobileRedirectOptions{} +} + +// SetValue : Allow user to set Value +func (options *UpdateMobileRedirectOptions) SetValue(value *MobileRedirecSettingValue) *UpdateMobileRedirectOptions { + options.Value = value + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateMobileRedirectOptions) SetHeaders(param map[string]string) *UpdateMobileRedirectOptions { + options.Headers = param + return options +} + +// UpdateOpportunisticEncryptionOptions : The UpdateOpportunisticEncryption options. +type UpdateOpportunisticEncryptionOptions struct { + // Value. + Value *string `json:"value,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the UpdateOpportunisticEncryptionOptions.Value property. +// Value. +const ( + UpdateOpportunisticEncryptionOptions_Value_Off = "off" + UpdateOpportunisticEncryptionOptions_Value_On = "on" +) + +// NewUpdateOpportunisticEncryptionOptions : Instantiate UpdateOpportunisticEncryptionOptions +func (*ZonesSettingsV1) NewUpdateOpportunisticEncryptionOptions() *UpdateOpportunisticEncryptionOptions { + return &UpdateOpportunisticEncryptionOptions{} +} + +// SetValue : Allow user to set Value +func (options *UpdateOpportunisticEncryptionOptions) SetValue(value string) *UpdateOpportunisticEncryptionOptions { + options.Value = core.StringPtr(value) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateOpportunisticEncryptionOptions) SetHeaders(param map[string]string) *UpdateOpportunisticEncryptionOptions { + options.Headers = param + return options +} + +// UpdatePrefetchPreloadOptions : The UpdatePrefetchPreload options. +type UpdatePrefetchPreloadOptions struct { + // Value. + Value *string `json:"value,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the UpdatePrefetchPreloadOptions.Value property. +// Value. +const ( + UpdatePrefetchPreloadOptions_Value_Off = "off" + UpdatePrefetchPreloadOptions_Value_On = "on" +) + +// NewUpdatePrefetchPreloadOptions : Instantiate UpdatePrefetchPreloadOptions +func (*ZonesSettingsV1) NewUpdatePrefetchPreloadOptions() *UpdatePrefetchPreloadOptions { + return &UpdatePrefetchPreloadOptions{} +} + +// SetValue : Allow user to set Value +func (options *UpdatePrefetchPreloadOptions) SetValue(value string) *UpdatePrefetchPreloadOptions { + options.Value = core.StringPtr(value) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdatePrefetchPreloadOptions) SetHeaders(param map[string]string) *UpdatePrefetchPreloadOptions { + options.Headers = param + return options +} + +// UpdatePseudoIpv4Options : The UpdatePseudoIpv4 options. +type UpdatePseudoIpv4Options struct { + // Value. + Value *string `json:"value,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the UpdatePseudoIpv4Options.Value property. +// Value. +const ( + UpdatePseudoIpv4Options_Value_AddHeader = "add_header" + UpdatePseudoIpv4Options_Value_Off = "off" + UpdatePseudoIpv4Options_Value_OverwriteHeader = "overwrite_header" +) + +// NewUpdatePseudoIpv4Options : Instantiate UpdatePseudoIpv4Options +func (*ZonesSettingsV1) NewUpdatePseudoIpv4Options() *UpdatePseudoIpv4Options { + return &UpdatePseudoIpv4Options{} +} + +// SetValue : Allow user to set Value +func (options *UpdatePseudoIpv4Options) SetValue(value string) *UpdatePseudoIpv4Options { + options.Value = core.StringPtr(value) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdatePseudoIpv4Options) SetHeaders(param map[string]string) *UpdatePseudoIpv4Options { + options.Headers = param + return options +} + +// UpdateResponseBufferingOptions : The UpdateResponseBuffering options. +type UpdateResponseBufferingOptions struct { + // Value. + Value *string `json:"value,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the UpdateResponseBufferingOptions.Value property. +// Value. +const ( + UpdateResponseBufferingOptions_Value_Off = "off" + UpdateResponseBufferingOptions_Value_On = "on" +) + +// NewUpdateResponseBufferingOptions : Instantiate UpdateResponseBufferingOptions +func (*ZonesSettingsV1) NewUpdateResponseBufferingOptions() *UpdateResponseBufferingOptions { + return &UpdateResponseBufferingOptions{} +} + +// SetValue : Allow user to set Value +func (options *UpdateResponseBufferingOptions) SetValue(value string) *UpdateResponseBufferingOptions { + options.Value = core.StringPtr(value) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateResponseBufferingOptions) SetHeaders(param map[string]string) *UpdateResponseBufferingOptions { + options.Headers = param + return options +} + +// UpdateScriptLoadOptimizationOptions : The UpdateScriptLoadOptimization options. +type UpdateScriptLoadOptimizationOptions struct { + // Value. + Value *string `json:"value,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the UpdateScriptLoadOptimizationOptions.Value property. +// Value. +const ( + UpdateScriptLoadOptimizationOptions_Value_Off = "off" + UpdateScriptLoadOptimizationOptions_Value_On = "on" +) + +// NewUpdateScriptLoadOptimizationOptions : Instantiate UpdateScriptLoadOptimizationOptions +func (*ZonesSettingsV1) NewUpdateScriptLoadOptimizationOptions() *UpdateScriptLoadOptimizationOptions { + return &UpdateScriptLoadOptimizationOptions{} +} + +// SetValue : Allow user to set Value +func (options *UpdateScriptLoadOptimizationOptions) SetValue(value string) *UpdateScriptLoadOptimizationOptions { + options.Value = core.StringPtr(value) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateScriptLoadOptimizationOptions) SetHeaders(param map[string]string) *UpdateScriptLoadOptimizationOptions { + options.Headers = param + return options +} + +// UpdateSecurityHeaderOptions : The UpdateSecurityHeader options. +type UpdateSecurityHeaderOptions struct { + // Value. + Value *SecurityHeaderSettingValue `json:"value,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdateSecurityHeaderOptions : Instantiate UpdateSecurityHeaderOptions +func (*ZonesSettingsV1) NewUpdateSecurityHeaderOptions() *UpdateSecurityHeaderOptions { + return &UpdateSecurityHeaderOptions{} +} + +// SetValue : Allow user to set Value +func (options *UpdateSecurityHeaderOptions) SetValue(value *SecurityHeaderSettingValue) *UpdateSecurityHeaderOptions { + options.Value = value + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateSecurityHeaderOptions) SetHeaders(param map[string]string) *UpdateSecurityHeaderOptions { + options.Headers = param + return options +} + +// UpdateServerSideExcludeOptions : The UpdateServerSideExclude options. +type UpdateServerSideExcludeOptions struct { + // Value. + Value *string `json:"value,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the UpdateServerSideExcludeOptions.Value property. +// Value. +const ( + UpdateServerSideExcludeOptions_Value_Off = "off" + UpdateServerSideExcludeOptions_Value_On = "on" +) + +// NewUpdateServerSideExcludeOptions : Instantiate UpdateServerSideExcludeOptions +func (*ZonesSettingsV1) NewUpdateServerSideExcludeOptions() *UpdateServerSideExcludeOptions { + return &UpdateServerSideExcludeOptions{} +} + +// SetValue : Allow user to set Value +func (options *UpdateServerSideExcludeOptions) SetValue(value string) *UpdateServerSideExcludeOptions { + options.Value = core.StringPtr(value) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateServerSideExcludeOptions) SetHeaders(param map[string]string) *UpdateServerSideExcludeOptions { + options.Headers = param + return options +} + +// UpdateTlsClientAuthOptions : The UpdateTlsClientAuth options. +type UpdateTlsClientAuthOptions struct { + // Value. + Value *string `json:"value,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the UpdateTlsClientAuthOptions.Value property. +// Value. +const ( + UpdateTlsClientAuthOptions_Value_Off = "off" + UpdateTlsClientAuthOptions_Value_On = "on" +) + +// NewUpdateTlsClientAuthOptions : Instantiate UpdateTlsClientAuthOptions +func (*ZonesSettingsV1) NewUpdateTlsClientAuthOptions() *UpdateTlsClientAuthOptions { + return &UpdateTlsClientAuthOptions{} +} + +// SetValue : Allow user to set Value +func (options *UpdateTlsClientAuthOptions) SetValue(value string) *UpdateTlsClientAuthOptions { + options.Value = core.StringPtr(value) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateTlsClientAuthOptions) SetHeaders(param map[string]string) *UpdateTlsClientAuthOptions { + options.Headers = param + return options +} + +// UpdateTrueClientIpOptions : The UpdateTrueClientIp options. +type UpdateTrueClientIpOptions struct { + // Value. + Value *string `json:"value,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the UpdateTrueClientIpOptions.Value property. +// Value. +const ( + UpdateTrueClientIpOptions_Value_Off = "off" + UpdateTrueClientIpOptions_Value_On = "on" +) + +// NewUpdateTrueClientIpOptions : Instantiate UpdateTrueClientIpOptions +func (*ZonesSettingsV1) NewUpdateTrueClientIpOptions() *UpdateTrueClientIpOptions { + return &UpdateTrueClientIpOptions{} +} + +// SetValue : Allow user to set Value +func (options *UpdateTrueClientIpOptions) SetValue(value string) *UpdateTrueClientIpOptions { + options.Value = core.StringPtr(value) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateTrueClientIpOptions) SetHeaders(param map[string]string) *UpdateTrueClientIpOptions { + options.Headers = param + return options +} + +// UpdateWebApplicationFirewallOptions : The UpdateWebApplicationFirewall options. +type UpdateWebApplicationFirewallOptions struct { + // Value. + Value *string `json:"value,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the UpdateWebApplicationFirewallOptions.Value property. +// Value. +const ( + UpdateWebApplicationFirewallOptions_Value_Off = "off" + UpdateWebApplicationFirewallOptions_Value_On = "on" +) + +// NewUpdateWebApplicationFirewallOptions : Instantiate UpdateWebApplicationFirewallOptions +func (*ZonesSettingsV1) NewUpdateWebApplicationFirewallOptions() *UpdateWebApplicationFirewallOptions { + return &UpdateWebApplicationFirewallOptions{} +} + +// SetValue : Allow user to set Value +func (options *UpdateWebApplicationFirewallOptions) SetValue(value string) *UpdateWebApplicationFirewallOptions { + options.Value = core.StringPtr(value) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateWebApplicationFirewallOptions) SetHeaders(param map[string]string) *UpdateWebApplicationFirewallOptions { + options.Headers = param + return options +} + +// UpdateWebSocketsOptions : The UpdateWebSockets options. +type UpdateWebSocketsOptions struct { + // Value. + Value *string `json:"value,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the UpdateWebSocketsOptions.Value property. +// Value. +const ( + UpdateWebSocketsOptions_Value_Off = "off" + UpdateWebSocketsOptions_Value_On = "on" +) + +// NewUpdateWebSocketsOptions : Instantiate UpdateWebSocketsOptions +func (*ZonesSettingsV1) NewUpdateWebSocketsOptions() *UpdateWebSocketsOptions { + return &UpdateWebSocketsOptions{} +} + +// SetValue : Allow user to set Value +func (options *UpdateWebSocketsOptions) SetValue(value string) *UpdateWebSocketsOptions { + options.Value = core.StringPtr(value) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateWebSocketsOptions) SetHeaders(param map[string]string) *UpdateWebSocketsOptions { + options.Headers = param + return options +} + +// UpdateZoneCnameFlatteningOptions : The UpdateZoneCnameFlattening options. +type UpdateZoneCnameFlatteningOptions struct { + // Valid values are "flatten_at_root", "flatten_all". "flatten_at_root" - Flatten CNAME at root domain. This is the + // default value. "flatten_all" - Flatten all CNAME records under your domain. + Value *string `json:"value,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the UpdateZoneCnameFlatteningOptions.Value property. +// Valid values are "flatten_at_root", "flatten_all". "flatten_at_root" - Flatten CNAME at root domain. This is the +// default value. "flatten_all" - Flatten all CNAME records under your domain. +const ( + UpdateZoneCnameFlatteningOptions_Value_FlattenAll = "flatten_all" + UpdateZoneCnameFlatteningOptions_Value_FlattenAtRoot = "flatten_at_root" +) + +// NewUpdateZoneCnameFlatteningOptions : Instantiate UpdateZoneCnameFlatteningOptions +func (*ZonesSettingsV1) NewUpdateZoneCnameFlatteningOptions() *UpdateZoneCnameFlatteningOptions { + return &UpdateZoneCnameFlatteningOptions{} +} + +// SetValue : Allow user to set Value +func (options *UpdateZoneCnameFlatteningOptions) SetValue(value string) *UpdateZoneCnameFlatteningOptions { + options.Value = core.StringPtr(value) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateZoneCnameFlatteningOptions) SetHeaders(param map[string]string) *UpdateZoneCnameFlatteningOptions { + options.Headers = param + return options +} + +// UpdateZoneDnssecOptions : The UpdateZoneDnssec options. +type UpdateZoneDnssecOptions struct { + // Status. + Status *string `json:"status,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the UpdateZoneDnssecOptions.Status property. +// Status. +const ( + UpdateZoneDnssecOptions_Status_Active = "active" + UpdateZoneDnssecOptions_Status_Disabled = "disabled" +) + +// NewUpdateZoneDnssecOptions : Instantiate UpdateZoneDnssecOptions +func (*ZonesSettingsV1) NewUpdateZoneDnssecOptions() *UpdateZoneDnssecOptions { + return &UpdateZoneDnssecOptions{} +} + +// SetStatus : Allow user to set Status +func (options *UpdateZoneDnssecOptions) SetStatus(status string) *UpdateZoneDnssecOptions { + options.Status = core.StringPtr(status) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateZoneDnssecOptions) SetHeaders(param map[string]string) *UpdateZoneDnssecOptions { + options.Headers = param + return options +} + +// WafRespResult : Container for response information. +type WafRespResult struct { + // ID. + ID *string `json:"id" validate:"required"` + + // Value. + Value *string `json:"value" validate:"required"` + + // Editable. + Editable *bool `json:"editable" validate:"required"` + + // Modified date. + ModifiedOn *strfmt.DateTime `json:"modified_on" validate:"required"` +} + + +// UnmarshalWafRespResult unmarshals an instance of WafRespResult from the specified map of raw messages. +func UnmarshalWafRespResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(WafRespResult) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "editable", &obj.Editable) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "modified_on", &obj.ModifiedOn) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// WebsocketsRespResult : Container for response information. +type WebsocketsRespResult struct { + // ID. + ID *string `json:"id" validate:"required"` + + // Value. + Value *string `json:"value" validate:"required"` + + // Editable. + Editable *bool `json:"editable" validate:"required"` + + // Modified date. + ModifiedOn *strfmt.DateTime `json:"modified_on" validate:"required"` +} + + +// UnmarshalWebsocketsRespResult unmarshals an instance of WebsocketsRespResult from the specified map of raw messages. +func UnmarshalWebsocketsRespResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(WebsocketsRespResult) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "editable", &obj.Editable) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "modified_on", &obj.ModifiedOn) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ZonesDnssecRespResult : Container for response information. +type ZonesDnssecRespResult struct { + // Status. + Status *string `json:"status,omitempty"` + + // Flags. + Flags *int64 `json:"flags,omitempty"` + + // Algorithm. + Algorithm *string `json:"algorithm,omitempty"` + + // Key type. + KeyType *string `json:"key_type,omitempty"` + + // Digest type. + DigestType *string `json:"digest_type,omitempty"` + + // Digest algorithm. + DigestAlgorithm *string `json:"digest_algorithm,omitempty"` + + // Digest. + Digest *string `json:"digest,omitempty"` + + // DS. + Ds *string `json:"ds,omitempty"` + + // Key tag. + KeyTag *int64 `json:"key_tag,omitempty"` + + // Public key. + PublicKey *string `json:"public_key,omitempty"` +} + +// Constants associated with the ZonesDnssecRespResult.Status property. +// Status. +const ( + ZonesDnssecRespResult_Status_Active = "active" + ZonesDnssecRespResult_Status_Disabled = "disabled" + ZonesDnssecRespResult_Status_Error = "error" + ZonesDnssecRespResult_Status_Pending = "pending" + ZonesDnssecRespResult_Status_PendingDisabled = "pending-disabled" +) + + +// UnmarshalZonesDnssecRespResult unmarshals an instance of ZonesDnssecRespResult from the specified map of raw messages. +func UnmarshalZonesDnssecRespResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ZonesDnssecRespResult) + err = core.UnmarshalPrimitive(m, "status", &obj.Status) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "flags", &obj.Flags) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "algorithm", &obj.Algorithm) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "key_type", &obj.KeyType) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "digest_type", &obj.DigestType) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "digest_algorithm", &obj.DigestAlgorithm) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "digest", &obj.Digest) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "ds", &obj.Ds) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "key_tag", &obj.KeyTag) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "public_key", &obj.PublicKey) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// AlwaysUseHttpsResp : Always use http response. +type AlwaysUseHttpsResp struct { + // Container for response information. + Result *AlwaysUseHttpsRespResult `json:"result" validate:"required"` + + // Was the get successful. + Success *bool `json:"success" validate:"required"` + + // Array of errors encountered. + Errors [][]string `json:"errors" validate:"required"` + + // Array of messages returned. + Messages [][]string `json:"messages" validate:"required"` +} + + +// UnmarshalAlwaysUseHttpsResp unmarshals an instance of AlwaysUseHttpsResp from the specified map of raw messages. +func UnmarshalAlwaysUseHttpsResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(AlwaysUseHttpsResp) + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalAlwaysUseHttpsRespResult) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// AutomaticHttpsRewritesResp : automatic https rewrite response. +type AutomaticHttpsRewritesResp struct { + // Container for response information. + Result *AutomaticHttpsRewritesRespResult `json:"result" validate:"required"` + + // Was the get successful. + Success *bool `json:"success" validate:"required"` + + // Array of errors encountered. + Errors [][]string `json:"errors" validate:"required"` + + // Array of messages returned. + Messages [][]string `json:"messages" validate:"required"` +} + + +// UnmarshalAutomaticHttpsRewritesResp unmarshals an instance of AutomaticHttpsRewritesResp from the specified map of raw messages. +func UnmarshalAutomaticHttpsRewritesResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(AutomaticHttpsRewritesResp) + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalAutomaticHttpsRewritesRespResult) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// BrowserCheckResp : Browser Check response. +type BrowserCheckResp struct { + // Container for response information. + Result *BrowserCheckRespResult `json:"result" validate:"required"` + + // Was the get successful. + Success *bool `json:"success" validate:"required"` + + // Array of errors encountered. + Errors [][]string `json:"errors" validate:"required"` + + // Array of messages returned. + Messages [][]string `json:"messages" validate:"required"` +} + + +// UnmarshalBrowserCheckResp unmarshals an instance of BrowserCheckResp from the specified map of raw messages. +func UnmarshalBrowserCheckResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(BrowserCheckResp) + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalBrowserCheckRespResult) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ChallengeTtlResp : challenge TTL response. +type ChallengeTtlResp struct { + // Container for response information. + Result *ChallengeTtlRespResult `json:"result" validate:"required"` + + // Was the get successful. + Success *bool `json:"success" validate:"required"` + + // Array of errors encountered. + Errors [][]string `json:"errors" validate:"required"` + + // Array of messages returned. + Messages [][]string `json:"messages" validate:"required"` +} + + +// UnmarshalChallengeTtlResp unmarshals an instance of ChallengeTtlResp from the specified map of raw messages. +func UnmarshalChallengeTtlResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ChallengeTtlResp) + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalChallengeTtlRespResult) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// CiphersResp : Ciphers response. +type CiphersResp struct { + // Container for response information. + Result *CiphersRespResult `json:"result" validate:"required"` + + // Was the get successful. + Success *bool `json:"success" validate:"required"` + + // Array of errors encountered. + Errors [][]string `json:"errors" validate:"required"` + + // Array of messages returned. + Messages [][]string `json:"messages" validate:"required"` +} + + +// UnmarshalCiphersResp unmarshals an instance of CiphersResp from the specified map of raw messages. +func UnmarshalCiphersResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(CiphersResp) + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalCiphersRespResult) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// CnameFlatteningResponse : CNAME Flattening response. +type CnameFlatteningResponse struct { + // id. + ID *string `json:"id,omitempty"` + + // value. + Value *string `json:"value,omitempty"` + + // Date when it is modified. + ModifiedOn *strfmt.DateTime `json:"modified_on,omitempty"` + + // editable. + Editable *bool `json:"editable,omitempty"` +} + +// Constants associated with the CnameFlatteningResponse.Value property. +// value. +const ( + CnameFlatteningResponse_Value_FlattenAll = "flatten_all" + CnameFlatteningResponse_Value_FlattenAtRoot = "flatten_at_root" +) + + +// UnmarshalCnameFlatteningResponse unmarshals an instance of CnameFlatteningResponse from the specified map of raw messages. +func UnmarshalCnameFlatteningResponse(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(CnameFlatteningResponse) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "modified_on", &obj.ModifiedOn) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "editable", &obj.Editable) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// HotlinkProtectionResp : Hotlink Protection response. +type HotlinkProtectionResp struct { + // Container for response information. + Result *HotlinkProtectionRespResult `json:"result" validate:"required"` + + // Was the get successful. + Success *bool `json:"success" validate:"required"` + + // Array of errors encountered. + Errors [][]string `json:"errors" validate:"required"` + + // Array of messages returned. + Messages [][]string `json:"messages" validate:"required"` +} + + +// UnmarshalHotlinkProtectionResp unmarshals an instance of HotlinkProtectionResp from the specified map of raw messages. +func UnmarshalHotlinkProtectionResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(HotlinkProtectionResp) + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalHotlinkProtectionRespResult) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// Http2Resp : HTTP2 Response. +type Http2Resp struct { + // Container for response information. + Result *Http2RespResult `json:"result" validate:"required"` + + // Was the get successful. + Success *bool `json:"success" validate:"required"` + + // Array of errors encountered. + Errors [][]string `json:"errors" validate:"required"` + + // Array of messages returned. + Messages [][]string `json:"messages" validate:"required"` +} + + +// UnmarshalHttp2Resp unmarshals an instance of Http2Resp from the specified map of raw messages. +func UnmarshalHttp2Resp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Http2Resp) + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalHttp2RespResult) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ImageLoadOptimizationResp : Image Load Optimization response. +type ImageLoadOptimizationResp struct { + // Container for response information. + Result *ImageLoadOptimizationRespResult `json:"result" validate:"required"` + + // Was the get successful. + Success *bool `json:"success" validate:"required"` + + // Array of errors encountered. + Errors [][]string `json:"errors" validate:"required"` + + // Array of messages returned. + Messages [][]string `json:"messages" validate:"required"` +} + + +// UnmarshalImageLoadOptimizationResp unmarshals an instance of ImageLoadOptimizationResp from the specified map of raw messages. +func UnmarshalImageLoadOptimizationResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ImageLoadOptimizationResp) + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalImageLoadOptimizationRespResult) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ImageSizeOptimizationResp : Image size optimization response. +type ImageSizeOptimizationResp struct { + // Container for response information. + Result *ImageSizeOptimizationRespResult `json:"result" validate:"required"` + + // Was the get successful. + Success *bool `json:"success" validate:"required"` + + // Array of errors encountered. + Errors [][]string `json:"errors" validate:"required"` + + // Array of messages returned. + Messages [][]string `json:"messages" validate:"required"` +} + + +// UnmarshalImageSizeOptimizationResp unmarshals an instance of ImageSizeOptimizationResp from the specified map of raw messages. +func UnmarshalImageSizeOptimizationResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ImageSizeOptimizationResp) + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalImageSizeOptimizationRespResult) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// IpGeolocationResp : IP Geolocation response. +type IpGeolocationResp struct { + // Container for response information. + Result *IpGeolocationRespResult `json:"result" validate:"required"` + + // Was the get successful. + Success *bool `json:"success" validate:"required"` + + // Array of errors encountered. + Errors [][]string `json:"errors" validate:"required"` + + // Array of messages returned. + Messages [][]string `json:"messages" validate:"required"` +} + + +// UnmarshalIpGeolocationResp unmarshals an instance of IpGeolocationResp from the specified map of raw messages. +func UnmarshalIpGeolocationResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(IpGeolocationResp) + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalIpGeolocationRespResult) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// Ipv6Resp : IPv6 Response. +type Ipv6Resp struct { + // Container for response information. + Result *Ipv6RespResult `json:"result" validate:"required"` + + // Was the get successful. + Success *bool `json:"success" validate:"required"` + + // Array of errors encountered. + Errors [][]string `json:"errors" validate:"required"` + + // Array of messages returned. + Messages [][]string `json:"messages" validate:"required"` +} + + +// UnmarshalIpv6Resp unmarshals an instance of Ipv6Resp from the specified map of raw messages. +func UnmarshalIpv6Resp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Ipv6Resp) + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalIpv6RespResult) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// MaxUploadResp : Maximum upload response. +type MaxUploadResp struct { + // Container for response information. + Result *MaxUploadRespResult `json:"result" validate:"required"` + + // Was the get successful. + Success *bool `json:"success" validate:"required"` + + // Array of errors encountered. + Errors [][]string `json:"errors" validate:"required"` + + // Array of messages returned. + Messages [][]string `json:"messages" validate:"required"` +} + + +// UnmarshalMaxUploadResp unmarshals an instance of MaxUploadResp from the specified map of raw messages. +func UnmarshalMaxUploadResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(MaxUploadResp) + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalMaxUploadRespResult) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// MinTlsVersionResp : Minimum TLS Version response. +type MinTlsVersionResp struct { + // Container for response information. + Result *MinTlsVersionRespResult `json:"result" validate:"required"` + + // Was the get successful. + Success *bool `json:"success" validate:"required"` + + // Array of errors encountered. + Errors [][]string `json:"errors" validate:"required"` + + // Array of messages returned. + Messages [][]string `json:"messages" validate:"required"` +} + + +// UnmarshalMinTlsVersionResp unmarshals an instance of MinTlsVersionResp from the specified map of raw messages. +func UnmarshalMinTlsVersionResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(MinTlsVersionResp) + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalMinTlsVersionRespResult) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// MinifyResp : Minify response. +type MinifyResp struct { + // Container for response information. + Result *MinifyRespResult `json:"result" validate:"required"` + + // Was the get successful. + Success *bool `json:"success" validate:"required"` + + // Array of errors encountered. + Errors [][]string `json:"errors" validate:"required"` + + // Array of messages returned. + Messages [][]string `json:"messages" validate:"required"` +} + + +// UnmarshalMinifyResp unmarshals an instance of MinifyResp from the specified map of raw messages. +func UnmarshalMinifyResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(MinifyResp) + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalMinifyRespResult) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// MobileRedirectResp : Mobile Redirect Response. +type MobileRedirectResp struct { + // Container for response information. + Result *MobileRedirectRespResult `json:"result" validate:"required"` + + // Was the get successful. + Success *bool `json:"success" validate:"required"` + + // Array of errors encountered. + Errors [][]string `json:"errors" validate:"required"` + + // Array of messages returned. + Messages [][]string `json:"messages" validate:"required"` +} + + +// UnmarshalMobileRedirectResp unmarshals an instance of MobileRedirectResp from the specified map of raw messages. +func UnmarshalMobileRedirectResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(MobileRedirectResp) + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalMobileRedirectRespResult) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// OpportunisticEncryptionResp : Oppertunistic encryption response. +type OpportunisticEncryptionResp struct { + // Container for response information. + Result *OpportunisticEncryptionRespResult `json:"result" validate:"required"` + + // Was the get successful. + Success *bool `json:"success" validate:"required"` + + // Array of errors encountered. + Errors [][]string `json:"errors" validate:"required"` + + // Array of messages returned. + Messages [][]string `json:"messages" validate:"required"` +} + + +// UnmarshalOpportunisticEncryptionResp unmarshals an instance of OpportunisticEncryptionResp from the specified map of raw messages. +func UnmarshalOpportunisticEncryptionResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(OpportunisticEncryptionResp) + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalOpportunisticEncryptionRespResult) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// OriginErrorPagePassThruResp : origin error page pass through response. +type OriginErrorPagePassThruResp struct { + // Container for response information. + Result *OriginErrorPagePassThruRespResult `json:"result" validate:"required"` + + // Was the get successful. + Success *bool `json:"success" validate:"required"` + + // Array of errors encountered. + Errors [][]string `json:"errors" validate:"required"` + + // Array of messages returned. + Messages [][]string `json:"messages" validate:"required"` +} + + +// UnmarshalOriginErrorPagePassThruResp unmarshals an instance of OriginErrorPagePassThruResp from the specified map of raw messages. +func UnmarshalOriginErrorPagePassThruResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(OriginErrorPagePassThruResp) + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalOriginErrorPagePassThruRespResult) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// PrefetchPreloadResp : Prefetch & Preload Response. +type PrefetchPreloadResp struct { + // Container for response information. + Result *PrefetchPreloadRespResult `json:"result" validate:"required"` + + // Was the get successful. + Success *bool `json:"success" validate:"required"` + + // Array of errors encountered. + Errors [][]string `json:"errors" validate:"required"` + + // Array of messages returned. + Messages [][]string `json:"messages" validate:"required"` +} + + +// UnmarshalPrefetchPreloadResp unmarshals an instance of PrefetchPreloadResp from the specified map of raw messages. +func UnmarshalPrefetchPreloadResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(PrefetchPreloadResp) + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalPrefetchPreloadRespResult) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// PseudoIpv4Resp : Pseudo ipv4 response. +type PseudoIpv4Resp struct { + // Container for response information. + Result *PseudoIpv4RespResult `json:"result" validate:"required"` + + // Was the get successful. + Success *bool `json:"success" validate:"required"` + + // Array of errors encountered. + Errors [][]string `json:"errors" validate:"required"` + + // Array of messages returned. + Messages [][]string `json:"messages" validate:"required"` +} + + +// UnmarshalPseudoIpv4Resp unmarshals an instance of PseudoIpv4Resp from the specified map of raw messages. +func UnmarshalPseudoIpv4Resp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(PseudoIpv4Resp) + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalPseudoIpv4RespResult) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ResponseBufferingResp : Buffering response. +type ResponseBufferingResp struct { + // Container for response information. + Result *ResponseBufferingRespResult `json:"result" validate:"required"` + + // Was the get successful. + Success *bool `json:"success" validate:"required"` + + // Array of errors encountered. + Errors [][]string `json:"errors" validate:"required"` + + // Array of messages returned. + Messages [][]string `json:"messages" validate:"required"` +} + + +// UnmarshalResponseBufferingResp unmarshals an instance of ResponseBufferingResp from the specified map of raw messages. +func UnmarshalResponseBufferingResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ResponseBufferingResp) + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalResponseBufferingRespResult) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ScriptLoadOptimizationResp : Script load optimization response. +type ScriptLoadOptimizationResp struct { + // Container for response information. + Result *ScriptLoadOptimizationRespResult `json:"result" validate:"required"` + + // Was the get successful. + Success *bool `json:"success" validate:"required"` + + // Array of errors encountered. + Errors [][]string `json:"errors" validate:"required"` + + // Array of messages returned. + Messages [][]string `json:"messages" validate:"required"` +} + + +// UnmarshalScriptLoadOptimizationResp unmarshals an instance of ScriptLoadOptimizationResp from the specified map of raw messages. +func UnmarshalScriptLoadOptimizationResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ScriptLoadOptimizationResp) + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalScriptLoadOptimizationRespResult) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SecurityHeaderResp : Response of Security Header. +type SecurityHeaderResp struct { + // Container for response information. + Result *SecurityHeaderRespResult `json:"result" validate:"required"` + + // Was the get successful. + Success *bool `json:"success" validate:"required"` + + // Array of errors encountered. + Errors [][]string `json:"errors" validate:"required"` + + // Array of messages returned. + Messages [][]string `json:"messages" validate:"required"` +} + + +// UnmarshalSecurityHeaderResp unmarshals an instance of SecurityHeaderResp from the specified map of raw messages. +func UnmarshalSecurityHeaderResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SecurityHeaderResp) + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalSecurityHeaderRespResult) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ServerSideExcludeResp : Response of server side exclude. +type ServerSideExcludeResp struct { + // Container for response information. + Result *ServerSideExcludeRespResult `json:"result" validate:"required"` + + // Was the get successful. + Success *bool `json:"success" validate:"required"` + + // Array of errors encountered. + Errors [][]string `json:"errors" validate:"required"` + + // Array of messages returned. + Messages [][]string `json:"messages" validate:"required"` +} + + +// UnmarshalServerSideExcludeResp unmarshals an instance of ServerSideExcludeResp from the specified map of raw messages. +func UnmarshalServerSideExcludeResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ServerSideExcludeResp) + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalServerSideExcludeRespResult) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// TlsClientAuthResp : TLS Client authentication response. +type TlsClientAuthResp struct { + // Container for response information. + Result *TlsClientAuthRespResult `json:"result" validate:"required"` + + // Was the get successful. + Success *bool `json:"success" validate:"required"` + + // Array of errors encountered. + Errors [][]string `json:"errors" validate:"required"` + + // Array of messages returned. + Messages [][]string `json:"messages" validate:"required"` +} + + +// UnmarshalTlsClientAuthResp unmarshals an instance of TlsClientAuthResp from the specified map of raw messages. +func UnmarshalTlsClientAuthResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(TlsClientAuthResp) + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalTlsClientAuthRespResult) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// TrueClientIpResp : true client IP response. +type TrueClientIpResp struct { + // Container for response information. + Result *TrueClientIpRespResult `json:"result" validate:"required"` + + // Was the get successful. + Success *bool `json:"success" validate:"required"` + + // Array of errors encountered. + Errors [][]string `json:"errors" validate:"required"` + + // Array of messages returned. + Messages [][]string `json:"messages" validate:"required"` +} + + +// UnmarshalTrueClientIpResp unmarshals an instance of TrueClientIpResp from the specified map of raw messages. +func UnmarshalTrueClientIpResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(TrueClientIpResp) + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalTrueClientIpRespResult) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// WafResp : WAF Response. +type WafResp struct { + // Container for response information. + Result *WafRespResult `json:"result" validate:"required"` + + // Was the get successful. + Success *bool `json:"success" validate:"required"` + + // Array of errors encountered. + Errors [][]string `json:"errors" validate:"required"` + + // Array of messages returned. + Messages [][]string `json:"messages" validate:"required"` +} + + +// UnmarshalWafResp unmarshals an instance of WafResp from the specified map of raw messages. +func UnmarshalWafResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(WafResp) + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalWafRespResult) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// WebsocketsResp : Websocket Response. +type WebsocketsResp struct { + // Container for response information. + Result *WebsocketsRespResult `json:"result" validate:"required"` + + // Was the get successful. + Success *bool `json:"success" validate:"required"` + + // Array of errors encountered. + Errors [][]string `json:"errors" validate:"required"` + + // Array of messages returned. + Messages [][]string `json:"messages" validate:"required"` +} + + +// UnmarshalWebsocketsResp unmarshals an instance of WebsocketsResp from the specified map of raw messages. +func UnmarshalWebsocketsResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(WebsocketsResp) + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalWebsocketsRespResult) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ZonesCnameFlatteningResp : Zones CNAME flattening response. +type ZonesCnameFlatteningResp struct { + // Was operation successful. + Success *bool `json:"success" validate:"required"` + + // Array of errors encountered. + Errors [][]string `json:"errors" validate:"required"` + + // Array of messages returned. + Messages [][]string `json:"messages" validate:"required"` + + // CNAME Flattening response. + Result *CnameFlatteningResponse `json:"result" validate:"required"` +} + + +// UnmarshalZonesCnameFlatteningResp unmarshals an instance of ZonesCnameFlatteningResp from the specified map of raw messages. +func UnmarshalZonesCnameFlatteningResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ZonesCnameFlatteningResp) + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalCnameFlatteningResponse) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ZonesDnssecResp : Zones DNS Sec Response. +type ZonesDnssecResp struct { + // Was operation successful. + Success *bool `json:"success" validate:"required"` + + // Array of errors encountered. + Errors [][]string `json:"errors" validate:"required"` + + // Array of messages returned. + Messages [][]string `json:"messages" validate:"required"` + + // Container for response information. + Result *ZonesDnssecRespResult `json:"result" validate:"required"` +} + + +// UnmarshalZonesDnssecResp unmarshals an instance of ZonesDnssecResp from the specified map of raw messages. +func UnmarshalZonesDnssecResp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ZonesDnssecResp) + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + err = core.UnmarshalModel(m, "result", &obj.Result, UnmarshalZonesDnssecRespResult) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} diff --git a/vendor/github.com/IBM/platform-services-go-sdk/catalogmanagementv1/catalog_management_v1.go b/vendor/github.com/IBM/platform-services-go-sdk/catalogmanagementv1/catalog_management_v1.go new file mode 100644 index 00000000000..1c810e33e6b --- /dev/null +++ b/vendor/github.com/IBM/platform-services-go-sdk/catalogmanagementv1/catalog_management_v1.go @@ -0,0 +1,12620 @@ +/** + * (C) Copyright IBM Corp. 2021. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * IBM OpenAPI SDK Code Generator Version: 3.31.0-902c9336-20210504-161156 + */ + +// Package catalogmanagementv1 : Operations and models for the CatalogManagementV1 service +package catalogmanagementv1 + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "reflect" + "time" + + "github.com/IBM/go-sdk-core/v5/core" + common "github.com/IBM/platform-services-go-sdk/common" + "github.com/go-openapi/strfmt" +) + +// CatalogManagementV1 : This is the API to use for managing private catalogs for IBM Cloud. Private catalogs provide a +// way to centrally manage access to products in the IBM Cloud catalog and your own catalogs. +// +// Version: 1.0 +type CatalogManagementV1 struct { + Service *core.BaseService +} + +// DefaultServiceURL is the default URL to make service requests to. +const DefaultServiceURL = "https://cm.globalcatalog.cloud.ibm.com/api/v1-beta" + +// DefaultServiceName is the default key used to find external configuration information. +const DefaultServiceName = "catalog_management" + +// CatalogManagementV1Options : Service options +type CatalogManagementV1Options struct { + ServiceName string + URL string + Authenticator core.Authenticator +} + +// NewCatalogManagementV1UsingExternalConfig : constructs an instance of CatalogManagementV1 with passed in options and external configuration. +func NewCatalogManagementV1UsingExternalConfig(options *CatalogManagementV1Options) (catalogManagement *CatalogManagementV1, err error) { + if options.ServiceName == "" { + options.ServiceName = DefaultServiceName + } + + if options.Authenticator == nil { + options.Authenticator, err = core.GetAuthenticatorFromEnvironment(options.ServiceName) + if err != nil { + return + } + } + + catalogManagement, err = NewCatalogManagementV1(options) + if err != nil { + return + } + + err = catalogManagement.Service.ConfigureService(options.ServiceName) + if err != nil { + return + } + + if options.URL != "" { + err = catalogManagement.Service.SetServiceURL(options.URL) + } + return +} + +// NewCatalogManagementV1 : constructs an instance of CatalogManagementV1 with passed in options. +func NewCatalogManagementV1(options *CatalogManagementV1Options) (service *CatalogManagementV1, err error) { + serviceOptions := &core.ServiceOptions{ + URL: DefaultServiceURL, + Authenticator: options.Authenticator, + } + + baseService, err := core.NewBaseService(serviceOptions) + if err != nil { + return + } + + if options.URL != "" { + err = baseService.SetServiceURL(options.URL) + if err != nil { + return + } + } + + service = &CatalogManagementV1{ + Service: baseService, + } + + return +} + +// GetServiceURLForRegion returns the service URL to be used for the specified region +func GetServiceURLForRegion(region string) (string, error) { + return "", fmt.Errorf("service does not support regional URLs") +} + +// Clone makes a copy of "catalogManagement" suitable for processing requests. +func (catalogManagement *CatalogManagementV1) Clone() *CatalogManagementV1 { + if core.IsNil(catalogManagement) { + return nil + } + clone := *catalogManagement + clone.Service = catalogManagement.Service.Clone() + return &clone +} + +// SetServiceURL sets the service URL +func (catalogManagement *CatalogManagementV1) SetServiceURL(url string) error { + return catalogManagement.Service.SetServiceURL(url) +} + +// GetServiceURL returns the service URL +func (catalogManagement *CatalogManagementV1) GetServiceURL() string { + return catalogManagement.Service.GetServiceURL() +} + +// SetDefaultHeaders sets HTTP headers to be sent in every request +func (catalogManagement *CatalogManagementV1) SetDefaultHeaders(headers http.Header) { + catalogManagement.Service.SetDefaultHeaders(headers) +} + +// SetEnableGzipCompression sets the service's EnableGzipCompression field +func (catalogManagement *CatalogManagementV1) SetEnableGzipCompression(enableGzip bool) { + catalogManagement.Service.SetEnableGzipCompression(enableGzip) +} + +// GetEnableGzipCompression returns the service's EnableGzipCompression field +func (catalogManagement *CatalogManagementV1) GetEnableGzipCompression() bool { + return catalogManagement.Service.GetEnableGzipCompression() +} + +// EnableRetries enables automatic retries for requests invoked for this service instance. +// If either parameter is specified as 0, then a default value is used instead. +func (catalogManagement *CatalogManagementV1) EnableRetries(maxRetries int, maxRetryInterval time.Duration) { + catalogManagement.Service.EnableRetries(maxRetries, maxRetryInterval) +} + +// DisableRetries disables automatic retries for requests invoked for this service instance. +func (catalogManagement *CatalogManagementV1) DisableRetries() { + catalogManagement.Service.DisableRetries() +} + +// GetCatalogAccount : Get catalog account settings +// Get the account level settings for the account for private catalog. +func (catalogManagement *CatalogManagementV1) GetCatalogAccount(getCatalogAccountOptions *GetCatalogAccountOptions) (result *Account, response *core.DetailedResponse, err error) { + return catalogManagement.GetCatalogAccountWithContext(context.Background(), getCatalogAccountOptions) +} + +// GetCatalogAccountWithContext is an alternate form of the GetCatalogAccount method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) GetCatalogAccountWithContext(ctx context.Context, getCatalogAccountOptions *GetCatalogAccountOptions) (result *Account, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(getCatalogAccountOptions, "getCatalogAccountOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/catalogaccount`, nil) + if err != nil { + return + } + + for headerName, headerValue := range getCatalogAccountOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "GetCatalogAccount") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = catalogManagement.Service.Request(request, &rawResponse) + if err != nil { + return + } + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalAccount) + if err != nil { + return + } + response.Result = result + } + + return +} + +// UpdateCatalogAccount : Update account settings +// Update the account level settings for the account for private catalog. +func (catalogManagement *CatalogManagementV1) UpdateCatalogAccount(updateCatalogAccountOptions *UpdateCatalogAccountOptions) (response *core.DetailedResponse, err error) { + return catalogManagement.UpdateCatalogAccountWithContext(context.Background(), updateCatalogAccountOptions) +} + +// UpdateCatalogAccountWithContext is an alternate form of the UpdateCatalogAccount method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) UpdateCatalogAccountWithContext(ctx context.Context, updateCatalogAccountOptions *UpdateCatalogAccountOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateStruct(updateCatalogAccountOptions, "updateCatalogAccountOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/catalogaccount`, nil) + if err != nil { + return + } + + for headerName, headerValue := range updateCatalogAccountOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "UpdateCatalogAccount") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if updateCatalogAccountOptions.ID != nil { + body["id"] = updateCatalogAccountOptions.ID + } + if updateCatalogAccountOptions.HideIBMCloudCatalog != nil { + body["hide_IBM_cloud_catalog"] = updateCatalogAccountOptions.HideIBMCloudCatalog + } + if updateCatalogAccountOptions.AccountFilters != nil { + body["account_filters"] = updateCatalogAccountOptions.AccountFilters + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = catalogManagement.Service.Request(request, nil) + + return +} + +// GetCatalogAccountAudit : Get catalog account audit log +// Get the audit log associated with a catalog account. +func (catalogManagement *CatalogManagementV1) GetCatalogAccountAudit(getCatalogAccountAuditOptions *GetCatalogAccountAuditOptions) (result *AuditLog, response *core.DetailedResponse, err error) { + return catalogManagement.GetCatalogAccountAuditWithContext(context.Background(), getCatalogAccountAuditOptions) +} + +// GetCatalogAccountAuditWithContext is an alternate form of the GetCatalogAccountAudit method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) GetCatalogAccountAuditWithContext(ctx context.Context, getCatalogAccountAuditOptions *GetCatalogAccountAuditOptions) (result *AuditLog, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(getCatalogAccountAuditOptions, "getCatalogAccountAuditOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/catalogaccount/audit`, nil) + if err != nil { + return + } + + for headerName, headerValue := range getCatalogAccountAuditOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "GetCatalogAccountAudit") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = catalogManagement.Service.Request(request, &rawResponse) + if err != nil { + return + } + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalAuditLog) + if err != nil { + return + } + response.Result = result + } + + return +} + +// GetCatalogAccountFilters : Get catalog account filters +// Get the accumulated filters of the account and of the catalogs you have access to. +func (catalogManagement *CatalogManagementV1) GetCatalogAccountFilters(getCatalogAccountFiltersOptions *GetCatalogAccountFiltersOptions) (result *AccumulatedFilters, response *core.DetailedResponse, err error) { + return catalogManagement.GetCatalogAccountFiltersWithContext(context.Background(), getCatalogAccountFiltersOptions) +} + +// GetCatalogAccountFiltersWithContext is an alternate form of the GetCatalogAccountFilters method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) GetCatalogAccountFiltersWithContext(ctx context.Context, getCatalogAccountFiltersOptions *GetCatalogAccountFiltersOptions) (result *AccumulatedFilters, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(getCatalogAccountFiltersOptions, "getCatalogAccountFiltersOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/catalogaccount/filters`, nil) + if err != nil { + return + } + + for headerName, headerValue := range getCatalogAccountFiltersOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "GetCatalogAccountFilters") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + if getCatalogAccountFiltersOptions.Catalog != nil { + builder.AddQuery("catalog", fmt.Sprint(*getCatalogAccountFiltersOptions.Catalog)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = catalogManagement.Service.Request(request, &rawResponse) + if err != nil { + return + } + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalAccumulatedFilters) + if err != nil { + return + } + response.Result = result + } + + return +} + +// ListCatalogs : Get list of catalogs +// Retrieves the available catalogs for a given account. This can be used by an unauthenticated user to retrieve the +// public catalog. +func (catalogManagement *CatalogManagementV1) ListCatalogs(listCatalogsOptions *ListCatalogsOptions) (result *CatalogSearchResult, response *core.DetailedResponse, err error) { + return catalogManagement.ListCatalogsWithContext(context.Background(), listCatalogsOptions) +} + +// ListCatalogsWithContext is an alternate form of the ListCatalogs method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) ListCatalogsWithContext(ctx context.Context, listCatalogsOptions *ListCatalogsOptions) (result *CatalogSearchResult, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listCatalogsOptions, "listCatalogsOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/catalogs`, nil) + if err != nil { + return + } + + for headerName, headerValue := range listCatalogsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "ListCatalogs") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = catalogManagement.Service.Request(request, &rawResponse) + if err != nil { + return + } + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalCatalogSearchResult) + if err != nil { + return + } + response.Result = result + } + + return +} + +// CreateCatalog : Create a catalog +// Create a catalog for a given account. +func (catalogManagement *CatalogManagementV1) CreateCatalog(createCatalogOptions *CreateCatalogOptions) (result *Catalog, response *core.DetailedResponse, err error) { + return catalogManagement.CreateCatalogWithContext(context.Background(), createCatalogOptions) +} + +// CreateCatalogWithContext is an alternate form of the CreateCatalog method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) CreateCatalogWithContext(ctx context.Context, createCatalogOptions *CreateCatalogOptions) (result *Catalog, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(createCatalogOptions, "createCatalogOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/catalogs`, nil) + if err != nil { + return + } + + for headerName, headerValue := range createCatalogOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "CreateCatalog") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if createCatalogOptions.ID != nil { + body["id"] = createCatalogOptions.ID + } + if createCatalogOptions.Rev != nil { + body["_rev"] = createCatalogOptions.Rev + } + if createCatalogOptions.Label != nil { + body["label"] = createCatalogOptions.Label + } + if createCatalogOptions.ShortDescription != nil { + body["short_description"] = createCatalogOptions.ShortDescription + } + if createCatalogOptions.CatalogIconURL != nil { + body["catalog_icon_url"] = createCatalogOptions.CatalogIconURL + } + if createCatalogOptions.Tags != nil { + body["tags"] = createCatalogOptions.Tags + } + if createCatalogOptions.Features != nil { + body["features"] = createCatalogOptions.Features + } + if createCatalogOptions.Disabled != nil { + body["disabled"] = createCatalogOptions.Disabled + } + if createCatalogOptions.ResourceGroupID != nil { + body["resource_group_id"] = createCatalogOptions.ResourceGroupID + } + if createCatalogOptions.OwningAccount != nil { + body["owning_account"] = createCatalogOptions.OwningAccount + } + if createCatalogOptions.CatalogFilters != nil { + body["catalog_filters"] = createCatalogOptions.CatalogFilters + } + if createCatalogOptions.SyndicationSettings != nil { + body["syndication_settings"] = createCatalogOptions.SyndicationSettings + } + if createCatalogOptions.Kind != nil { + body["kind"] = createCatalogOptions.Kind + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = catalogManagement.Service.Request(request, &rawResponse) + if err != nil { + return + } + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalCatalog) + if err != nil { + return + } + response.Result = result + } + + return +} + +// GetCatalog : Get catalog +// Get a catalog. This can also be used by an unauthenticated user to get the public catalog. +func (catalogManagement *CatalogManagementV1) GetCatalog(getCatalogOptions *GetCatalogOptions) (result *Catalog, response *core.DetailedResponse, err error) { + return catalogManagement.GetCatalogWithContext(context.Background(), getCatalogOptions) +} + +// GetCatalogWithContext is an alternate form of the GetCatalog method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) GetCatalogWithContext(ctx context.Context, getCatalogOptions *GetCatalogOptions) (result *Catalog, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getCatalogOptions, "getCatalogOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getCatalogOptions, "getCatalogOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "catalog_identifier": *getCatalogOptions.CatalogIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/catalogs/{catalog_identifier}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getCatalogOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "GetCatalog") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = catalogManagement.Service.Request(request, &rawResponse) + if err != nil { + return + } + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalCatalog) + if err != nil { + return + } + response.Result = result + } + + return +} + +// ReplaceCatalog : Update catalog +// Update a catalog. +func (catalogManagement *CatalogManagementV1) ReplaceCatalog(replaceCatalogOptions *ReplaceCatalogOptions) (result *Catalog, response *core.DetailedResponse, err error) { + return catalogManagement.ReplaceCatalogWithContext(context.Background(), replaceCatalogOptions) +} + +// ReplaceCatalogWithContext is an alternate form of the ReplaceCatalog method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) ReplaceCatalogWithContext(ctx context.Context, replaceCatalogOptions *ReplaceCatalogOptions) (result *Catalog, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(replaceCatalogOptions, "replaceCatalogOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(replaceCatalogOptions, "replaceCatalogOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "catalog_identifier": *replaceCatalogOptions.CatalogIdentifier, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/catalogs/{catalog_identifier}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range replaceCatalogOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "ReplaceCatalog") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if replaceCatalogOptions.ID != nil { + body["id"] = replaceCatalogOptions.ID + } + if replaceCatalogOptions.Rev != nil { + body["_rev"] = replaceCatalogOptions.Rev + } + if replaceCatalogOptions.Label != nil { + body["label"] = replaceCatalogOptions.Label + } + if replaceCatalogOptions.ShortDescription != nil { + body["short_description"] = replaceCatalogOptions.ShortDescription + } + if replaceCatalogOptions.CatalogIconURL != nil { + body["catalog_icon_url"] = replaceCatalogOptions.CatalogIconURL + } + if replaceCatalogOptions.Tags != nil { + body["tags"] = replaceCatalogOptions.Tags + } + if replaceCatalogOptions.Features != nil { + body["features"] = replaceCatalogOptions.Features + } + if replaceCatalogOptions.Disabled != nil { + body["disabled"] = replaceCatalogOptions.Disabled + } + if replaceCatalogOptions.ResourceGroupID != nil { + body["resource_group_id"] = replaceCatalogOptions.ResourceGroupID + } + if replaceCatalogOptions.OwningAccount != nil { + body["owning_account"] = replaceCatalogOptions.OwningAccount + } + if replaceCatalogOptions.CatalogFilters != nil { + body["catalog_filters"] = replaceCatalogOptions.CatalogFilters + } + if replaceCatalogOptions.SyndicationSettings != nil { + body["syndication_settings"] = replaceCatalogOptions.SyndicationSettings + } + if replaceCatalogOptions.Kind != nil { + body["kind"] = replaceCatalogOptions.Kind + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = catalogManagement.Service.Request(request, &rawResponse) + if err != nil { + return + } + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalCatalog) + if err != nil { + return + } + response.Result = result + } + + return +} + +// DeleteCatalog : Delete catalog +// Delete a catalog. +func (catalogManagement *CatalogManagementV1) DeleteCatalog(deleteCatalogOptions *DeleteCatalogOptions) (response *core.DetailedResponse, err error) { + return catalogManagement.DeleteCatalogWithContext(context.Background(), deleteCatalogOptions) +} + +// DeleteCatalogWithContext is an alternate form of the DeleteCatalog method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) DeleteCatalogWithContext(ctx context.Context, deleteCatalogOptions *DeleteCatalogOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteCatalogOptions, "deleteCatalogOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteCatalogOptions, "deleteCatalogOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "catalog_identifier": *deleteCatalogOptions.CatalogIdentifier, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/catalogs/{catalog_identifier}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteCatalogOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "DeleteCatalog") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = catalogManagement.Service.Request(request, nil) + + return +} + +// GetCatalogAudit : Get catalog audit log +// Get the audit log associated with a catalog. +func (catalogManagement *CatalogManagementV1) GetCatalogAudit(getCatalogAuditOptions *GetCatalogAuditOptions) (result *AuditLog, response *core.DetailedResponse, err error) { + return catalogManagement.GetCatalogAuditWithContext(context.Background(), getCatalogAuditOptions) +} + +// GetCatalogAuditWithContext is an alternate form of the GetCatalogAudit method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) GetCatalogAuditWithContext(ctx context.Context, getCatalogAuditOptions *GetCatalogAuditOptions) (result *AuditLog, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getCatalogAuditOptions, "getCatalogAuditOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getCatalogAuditOptions, "getCatalogAuditOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "catalog_identifier": *getCatalogAuditOptions.CatalogIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/catalogs/{catalog_identifier}/audit`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getCatalogAuditOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "GetCatalogAudit") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = catalogManagement.Service.Request(request, &rawResponse) + if err != nil { + return + } + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalAuditLog) + if err != nil { + return + } + response.Result = result + } + + return +} + +// GetConsumptionOfferings : Get consumption offerings +// Retrieve the available offerings from both public and from the account that currently scoped for consumption. These +// copies cannot be used for updating. They are not complete and only return what is visible to the caller. This can be +// used by an unauthenticated user to retreive publicly available offerings. +func (catalogManagement *CatalogManagementV1) GetConsumptionOfferings(getConsumptionOfferingsOptions *GetConsumptionOfferingsOptions) (result *OfferingSearchResult, response *core.DetailedResponse, err error) { + return catalogManagement.GetConsumptionOfferingsWithContext(context.Background(), getConsumptionOfferingsOptions) +} + +// GetConsumptionOfferingsWithContext is an alternate form of the GetConsumptionOfferings method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) GetConsumptionOfferingsWithContext(ctx context.Context, getConsumptionOfferingsOptions *GetConsumptionOfferingsOptions) (result *OfferingSearchResult, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(getConsumptionOfferingsOptions, "getConsumptionOfferingsOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/offerings`, nil) + if err != nil { + return + } + + for headerName, headerValue := range getConsumptionOfferingsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "GetConsumptionOfferings") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + if getConsumptionOfferingsOptions.Digest != nil { + builder.AddQuery("digest", fmt.Sprint(*getConsumptionOfferingsOptions.Digest)) + } + if getConsumptionOfferingsOptions.Catalog != nil { + builder.AddQuery("catalog", fmt.Sprint(*getConsumptionOfferingsOptions.Catalog)) + } + if getConsumptionOfferingsOptions.Select != nil { + builder.AddQuery("select", fmt.Sprint(*getConsumptionOfferingsOptions.Select)) + } + if getConsumptionOfferingsOptions.IncludeHidden != nil { + builder.AddQuery("includeHidden", fmt.Sprint(*getConsumptionOfferingsOptions.IncludeHidden)) + } + if getConsumptionOfferingsOptions.Limit != nil { + builder.AddQuery("limit", fmt.Sprint(*getConsumptionOfferingsOptions.Limit)) + } + if getConsumptionOfferingsOptions.Offset != nil { + builder.AddQuery("offset", fmt.Sprint(*getConsumptionOfferingsOptions.Offset)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = catalogManagement.Service.Request(request, &rawResponse) + if err != nil { + return + } + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalOfferingSearchResult) + if err != nil { + return + } + response.Result = result + } + + return +} + +// ListOfferings : Get list of offerings +// Retrieve the available offerings in the specified catalog. This can also be used by an unauthenticated user to +// retreive publicly available offerings. +func (catalogManagement *CatalogManagementV1) ListOfferings(listOfferingsOptions *ListOfferingsOptions) (result *OfferingSearchResult, response *core.DetailedResponse, err error) { + return catalogManagement.ListOfferingsWithContext(context.Background(), listOfferingsOptions) +} + +// ListOfferingsWithContext is an alternate form of the ListOfferings method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) ListOfferingsWithContext(ctx context.Context, listOfferingsOptions *ListOfferingsOptions) (result *OfferingSearchResult, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(listOfferingsOptions, "listOfferingsOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(listOfferingsOptions, "listOfferingsOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "catalog_identifier": *listOfferingsOptions.CatalogIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/catalogs/{catalog_identifier}/offerings`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range listOfferingsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "ListOfferings") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + if listOfferingsOptions.Digest != nil { + builder.AddQuery("digest", fmt.Sprint(*listOfferingsOptions.Digest)) + } + if listOfferingsOptions.Limit != nil { + builder.AddQuery("limit", fmt.Sprint(*listOfferingsOptions.Limit)) + } + if listOfferingsOptions.Offset != nil { + builder.AddQuery("offset", fmt.Sprint(*listOfferingsOptions.Offset)) + } + if listOfferingsOptions.Name != nil { + builder.AddQuery("name", fmt.Sprint(*listOfferingsOptions.Name)) + } + if listOfferingsOptions.Sort != nil { + builder.AddQuery("sort", fmt.Sprint(*listOfferingsOptions.Sort)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = catalogManagement.Service.Request(request, &rawResponse) + if err != nil { + return + } + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalOfferingSearchResult) + if err != nil { + return + } + response.Result = result + } + + return +} + +// CreateOffering : Create offering +// Create an offering. +func (catalogManagement *CatalogManagementV1) CreateOffering(createOfferingOptions *CreateOfferingOptions) (result *Offering, response *core.DetailedResponse, err error) { + return catalogManagement.CreateOfferingWithContext(context.Background(), createOfferingOptions) +} + +// CreateOfferingWithContext is an alternate form of the CreateOffering method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) CreateOfferingWithContext(ctx context.Context, createOfferingOptions *CreateOfferingOptions) (result *Offering, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createOfferingOptions, "createOfferingOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createOfferingOptions, "createOfferingOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "catalog_identifier": *createOfferingOptions.CatalogIdentifier, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/catalogs/{catalog_identifier}/offerings`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range createOfferingOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "CreateOffering") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if createOfferingOptions.ID != nil { + body["id"] = createOfferingOptions.ID + } + if createOfferingOptions.Rev != nil { + body["_rev"] = createOfferingOptions.Rev + } + if createOfferingOptions.URL != nil { + body["url"] = createOfferingOptions.URL + } + if createOfferingOptions.CRN != nil { + body["crn"] = createOfferingOptions.CRN + } + if createOfferingOptions.Label != nil { + body["label"] = createOfferingOptions.Label + } + if createOfferingOptions.Name != nil { + body["name"] = createOfferingOptions.Name + } + if createOfferingOptions.OfferingIconURL != nil { + body["offering_icon_url"] = createOfferingOptions.OfferingIconURL + } + if createOfferingOptions.OfferingDocsURL != nil { + body["offering_docs_url"] = createOfferingOptions.OfferingDocsURL + } + if createOfferingOptions.OfferingSupportURL != nil { + body["offering_support_url"] = createOfferingOptions.OfferingSupportURL + } + if createOfferingOptions.Tags != nil { + body["tags"] = createOfferingOptions.Tags + } + if createOfferingOptions.Keywords != nil { + body["keywords"] = createOfferingOptions.Keywords + } + if createOfferingOptions.Rating != nil { + body["rating"] = createOfferingOptions.Rating + } + if createOfferingOptions.Created != nil { + body["created"] = createOfferingOptions.Created + } + if createOfferingOptions.Updated != nil { + body["updated"] = createOfferingOptions.Updated + } + if createOfferingOptions.ShortDescription != nil { + body["short_description"] = createOfferingOptions.ShortDescription + } + if createOfferingOptions.LongDescription != nil { + body["long_description"] = createOfferingOptions.LongDescription + } + if createOfferingOptions.Features != nil { + body["features"] = createOfferingOptions.Features + } + if createOfferingOptions.Kinds != nil { + body["kinds"] = createOfferingOptions.Kinds + } + if createOfferingOptions.PermitRequestIBMPublicPublish != nil { + body["permit_request_ibm_public_publish"] = createOfferingOptions.PermitRequestIBMPublicPublish + } + if createOfferingOptions.IBMPublishApproved != nil { + body["ibm_publish_approved"] = createOfferingOptions.IBMPublishApproved + } + if createOfferingOptions.PublicPublishApproved != nil { + body["public_publish_approved"] = createOfferingOptions.PublicPublishApproved + } + if createOfferingOptions.PublicOriginalCRN != nil { + body["public_original_crn"] = createOfferingOptions.PublicOriginalCRN + } + if createOfferingOptions.PublishPublicCRN != nil { + body["publish_public_crn"] = createOfferingOptions.PublishPublicCRN + } + if createOfferingOptions.PortalApprovalRecord != nil { + body["portal_approval_record"] = createOfferingOptions.PortalApprovalRecord + } + if createOfferingOptions.PortalUIURL != nil { + body["portal_ui_url"] = createOfferingOptions.PortalUIURL + } + if createOfferingOptions.CatalogID != nil { + body["catalog_id"] = createOfferingOptions.CatalogID + } + if createOfferingOptions.CatalogName != nil { + body["catalog_name"] = createOfferingOptions.CatalogName + } + if createOfferingOptions.Metadata != nil { + body["metadata"] = createOfferingOptions.Metadata + } + if createOfferingOptions.Disclaimer != nil { + body["disclaimer"] = createOfferingOptions.Disclaimer + } + if createOfferingOptions.Hidden != nil { + body["hidden"] = createOfferingOptions.Hidden + } + if createOfferingOptions.Provider != nil { + body["provider"] = createOfferingOptions.Provider + } + if createOfferingOptions.RepoInfo != nil { + body["repo_info"] = createOfferingOptions.RepoInfo + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = catalogManagement.Service.Request(request, &rawResponse) + if err != nil { + return + } + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalOffering) + if err != nil { + return + } + response.Result = result + } + + return +} + +// ImportOfferingVersion : Import offering version +// Import new version to offering from a tgz. +func (catalogManagement *CatalogManagementV1) ImportOfferingVersion(importOfferingVersionOptions *ImportOfferingVersionOptions) (result *Offering, response *core.DetailedResponse, err error) { + return catalogManagement.ImportOfferingVersionWithContext(context.Background(), importOfferingVersionOptions) +} + +// ImportOfferingVersionWithContext is an alternate form of the ImportOfferingVersion method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) ImportOfferingVersionWithContext(ctx context.Context, importOfferingVersionOptions *ImportOfferingVersionOptions) (result *Offering, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(importOfferingVersionOptions, "importOfferingVersionOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(importOfferingVersionOptions, "importOfferingVersionOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "catalog_identifier": *importOfferingVersionOptions.CatalogIdentifier, + "offering_id": *importOfferingVersionOptions.OfferingID, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/catalogs/{catalog_identifier}/offerings/{offering_id}/version`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range importOfferingVersionOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "ImportOfferingVersion") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + if importOfferingVersionOptions.Zipurl != nil { + builder.AddQuery("zipurl", fmt.Sprint(*importOfferingVersionOptions.Zipurl)) + } + if importOfferingVersionOptions.TargetVersion != nil { + builder.AddQuery("targetVersion", fmt.Sprint(*importOfferingVersionOptions.TargetVersion)) + } + if importOfferingVersionOptions.IncludeConfig != nil { + builder.AddQuery("includeConfig", fmt.Sprint(*importOfferingVersionOptions.IncludeConfig)) + } + if importOfferingVersionOptions.IsVsi != nil { + builder.AddQuery("isVSI", fmt.Sprint(*importOfferingVersionOptions.IsVsi)) + } + if importOfferingVersionOptions.RepoType != nil { + builder.AddQuery("repoType", fmt.Sprint(*importOfferingVersionOptions.RepoType)) + } + + body := make(map[string]interface{}) + if importOfferingVersionOptions.Tags != nil { + body["tags"] = importOfferingVersionOptions.Tags + } + if importOfferingVersionOptions.TargetKinds != nil { + body["target_kinds"] = importOfferingVersionOptions.TargetKinds + } + if importOfferingVersionOptions.Content != nil { + body["content"] = importOfferingVersionOptions.Content + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = catalogManagement.Service.Request(request, &rawResponse) + if err != nil { + return + } + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalOffering) + if err != nil { + return + } + response.Result = result + } + + return +} + +// ImportOffering : Import offering +// Import a new offering from a tgz. +func (catalogManagement *CatalogManagementV1) ImportOffering(importOfferingOptions *ImportOfferingOptions) (result *Offering, response *core.DetailedResponse, err error) { + return catalogManagement.ImportOfferingWithContext(context.Background(), importOfferingOptions) +} + +// ImportOfferingWithContext is an alternate form of the ImportOffering method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) ImportOfferingWithContext(ctx context.Context, importOfferingOptions *ImportOfferingOptions) (result *Offering, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(importOfferingOptions, "importOfferingOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(importOfferingOptions, "importOfferingOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "catalog_identifier": *importOfferingOptions.CatalogIdentifier, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/catalogs/{catalog_identifier}/import/offerings`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range importOfferingOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "ImportOffering") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + if importOfferingOptions.XAuthToken != nil { + builder.AddHeader("X-Auth-Token", fmt.Sprint(*importOfferingOptions.XAuthToken)) + } + + if importOfferingOptions.Zipurl != nil { + builder.AddQuery("zipurl", fmt.Sprint(*importOfferingOptions.Zipurl)) + } + if importOfferingOptions.OfferingID != nil { + builder.AddQuery("offeringID", fmt.Sprint(*importOfferingOptions.OfferingID)) + } + if importOfferingOptions.TargetVersion != nil { + builder.AddQuery("targetVersion", fmt.Sprint(*importOfferingOptions.TargetVersion)) + } + if importOfferingOptions.IncludeConfig != nil { + builder.AddQuery("includeConfig", fmt.Sprint(*importOfferingOptions.IncludeConfig)) + } + if importOfferingOptions.IsVsi != nil { + builder.AddQuery("isVSI", fmt.Sprint(*importOfferingOptions.IsVsi)) + } + if importOfferingOptions.RepoType != nil { + builder.AddQuery("repoType", fmt.Sprint(*importOfferingOptions.RepoType)) + } + + body := make(map[string]interface{}) + if importOfferingOptions.Tags != nil { + body["tags"] = importOfferingOptions.Tags + } + if importOfferingOptions.TargetKinds != nil { + body["target_kinds"] = importOfferingOptions.TargetKinds + } + if importOfferingOptions.Content != nil { + body["content"] = importOfferingOptions.Content + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = catalogManagement.Service.Request(request, &rawResponse) + if err != nil { + return + } + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalOffering) + if err != nil { + return + } + response.Result = result + } + + return +} + +// ReloadOffering : Reload offering +// Reload an existing version in offering from a tgz. +func (catalogManagement *CatalogManagementV1) ReloadOffering(reloadOfferingOptions *ReloadOfferingOptions) (result *Offering, response *core.DetailedResponse, err error) { + return catalogManagement.ReloadOfferingWithContext(context.Background(), reloadOfferingOptions) +} + +// ReloadOfferingWithContext is an alternate form of the ReloadOffering method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) ReloadOfferingWithContext(ctx context.Context, reloadOfferingOptions *ReloadOfferingOptions) (result *Offering, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(reloadOfferingOptions, "reloadOfferingOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(reloadOfferingOptions, "reloadOfferingOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "catalog_identifier": *reloadOfferingOptions.CatalogIdentifier, + "offering_id": *reloadOfferingOptions.OfferingID, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/catalogs/{catalog_identifier}/offerings/{offering_id}/reload`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range reloadOfferingOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "ReloadOffering") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + builder.AddQuery("targetVersion", fmt.Sprint(*reloadOfferingOptions.TargetVersion)) + if reloadOfferingOptions.Zipurl != nil { + builder.AddQuery("zipurl", fmt.Sprint(*reloadOfferingOptions.Zipurl)) + } + if reloadOfferingOptions.RepoType != nil { + builder.AddQuery("repoType", fmt.Sprint(*reloadOfferingOptions.RepoType)) + } + + body := make(map[string]interface{}) + if reloadOfferingOptions.Tags != nil { + body["tags"] = reloadOfferingOptions.Tags + } + if reloadOfferingOptions.TargetKinds != nil { + body["target_kinds"] = reloadOfferingOptions.TargetKinds + } + if reloadOfferingOptions.Content != nil { + body["content"] = reloadOfferingOptions.Content + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = catalogManagement.Service.Request(request, &rawResponse) + if err != nil { + return + } + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalOffering) + if err != nil { + return + } + response.Result = result + } + + return +} + +// GetOffering : Get offering +// Get an offering. This can be used by an unauthenticated user for publicly available offerings. +func (catalogManagement *CatalogManagementV1) GetOffering(getOfferingOptions *GetOfferingOptions) (result *Offering, response *core.DetailedResponse, err error) { + return catalogManagement.GetOfferingWithContext(context.Background(), getOfferingOptions) +} + +// GetOfferingWithContext is an alternate form of the GetOffering method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) GetOfferingWithContext(ctx context.Context, getOfferingOptions *GetOfferingOptions) (result *Offering, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getOfferingOptions, "getOfferingOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getOfferingOptions, "getOfferingOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "catalog_identifier": *getOfferingOptions.CatalogIdentifier, + "offering_id": *getOfferingOptions.OfferingID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/catalogs/{catalog_identifier}/offerings/{offering_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getOfferingOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "GetOffering") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = catalogManagement.Service.Request(request, &rawResponse) + if err != nil { + return + } + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalOffering) + if err != nil { + return + } + response.Result = result + } + + return +} + +// ReplaceOffering : Update offering +// Update an offering. +func (catalogManagement *CatalogManagementV1) ReplaceOffering(replaceOfferingOptions *ReplaceOfferingOptions) (result *Offering, response *core.DetailedResponse, err error) { + return catalogManagement.ReplaceOfferingWithContext(context.Background(), replaceOfferingOptions) +} + +// ReplaceOfferingWithContext is an alternate form of the ReplaceOffering method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) ReplaceOfferingWithContext(ctx context.Context, replaceOfferingOptions *ReplaceOfferingOptions) (result *Offering, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(replaceOfferingOptions, "replaceOfferingOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(replaceOfferingOptions, "replaceOfferingOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "catalog_identifier": *replaceOfferingOptions.CatalogIdentifier, + "offering_id": *replaceOfferingOptions.OfferingID, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/catalogs/{catalog_identifier}/offerings/{offering_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range replaceOfferingOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "ReplaceOffering") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if replaceOfferingOptions.ID != nil { + body["id"] = replaceOfferingOptions.ID + } + if replaceOfferingOptions.Rev != nil { + body["_rev"] = replaceOfferingOptions.Rev + } + if replaceOfferingOptions.URL != nil { + body["url"] = replaceOfferingOptions.URL + } + if replaceOfferingOptions.CRN != nil { + body["crn"] = replaceOfferingOptions.CRN + } + if replaceOfferingOptions.Label != nil { + body["label"] = replaceOfferingOptions.Label + } + if replaceOfferingOptions.Name != nil { + body["name"] = replaceOfferingOptions.Name + } + if replaceOfferingOptions.OfferingIconURL != nil { + body["offering_icon_url"] = replaceOfferingOptions.OfferingIconURL + } + if replaceOfferingOptions.OfferingDocsURL != nil { + body["offering_docs_url"] = replaceOfferingOptions.OfferingDocsURL + } + if replaceOfferingOptions.OfferingSupportURL != nil { + body["offering_support_url"] = replaceOfferingOptions.OfferingSupportURL + } + if replaceOfferingOptions.Tags != nil { + body["tags"] = replaceOfferingOptions.Tags + } + if replaceOfferingOptions.Keywords != nil { + body["keywords"] = replaceOfferingOptions.Keywords + } + if replaceOfferingOptions.Rating != nil { + body["rating"] = replaceOfferingOptions.Rating + } + if replaceOfferingOptions.Created != nil { + body["created"] = replaceOfferingOptions.Created + } + if replaceOfferingOptions.Updated != nil { + body["updated"] = replaceOfferingOptions.Updated + } + if replaceOfferingOptions.ShortDescription != nil { + body["short_description"] = replaceOfferingOptions.ShortDescription + } + if replaceOfferingOptions.LongDescription != nil { + body["long_description"] = replaceOfferingOptions.LongDescription + } + if replaceOfferingOptions.Features != nil { + body["features"] = replaceOfferingOptions.Features + } + if replaceOfferingOptions.Kinds != nil { + body["kinds"] = replaceOfferingOptions.Kinds + } + if replaceOfferingOptions.PermitRequestIBMPublicPublish != nil { + body["permit_request_ibm_public_publish"] = replaceOfferingOptions.PermitRequestIBMPublicPublish + } + if replaceOfferingOptions.IBMPublishApproved != nil { + body["ibm_publish_approved"] = replaceOfferingOptions.IBMPublishApproved + } + if replaceOfferingOptions.PublicPublishApproved != nil { + body["public_publish_approved"] = replaceOfferingOptions.PublicPublishApproved + } + if replaceOfferingOptions.PublicOriginalCRN != nil { + body["public_original_crn"] = replaceOfferingOptions.PublicOriginalCRN + } + if replaceOfferingOptions.PublishPublicCRN != nil { + body["publish_public_crn"] = replaceOfferingOptions.PublishPublicCRN + } + if replaceOfferingOptions.PortalApprovalRecord != nil { + body["portal_approval_record"] = replaceOfferingOptions.PortalApprovalRecord + } + if replaceOfferingOptions.PortalUIURL != nil { + body["portal_ui_url"] = replaceOfferingOptions.PortalUIURL + } + if replaceOfferingOptions.CatalogID != nil { + body["catalog_id"] = replaceOfferingOptions.CatalogID + } + if replaceOfferingOptions.CatalogName != nil { + body["catalog_name"] = replaceOfferingOptions.CatalogName + } + if replaceOfferingOptions.Metadata != nil { + body["metadata"] = replaceOfferingOptions.Metadata + } + if replaceOfferingOptions.Disclaimer != nil { + body["disclaimer"] = replaceOfferingOptions.Disclaimer + } + if replaceOfferingOptions.Hidden != nil { + body["hidden"] = replaceOfferingOptions.Hidden + } + if replaceOfferingOptions.Provider != nil { + body["provider"] = replaceOfferingOptions.Provider + } + if replaceOfferingOptions.RepoInfo != nil { + body["repo_info"] = replaceOfferingOptions.RepoInfo + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = catalogManagement.Service.Request(request, &rawResponse) + if err != nil { + return + } + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalOffering) + if err != nil { + return + } + response.Result = result + } + + return +} + +// DeleteOffering : Delete offering +// Delete an offering. +func (catalogManagement *CatalogManagementV1) DeleteOffering(deleteOfferingOptions *DeleteOfferingOptions) (response *core.DetailedResponse, err error) { + return catalogManagement.DeleteOfferingWithContext(context.Background(), deleteOfferingOptions) +} + +// DeleteOfferingWithContext is an alternate form of the DeleteOffering method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) DeleteOfferingWithContext(ctx context.Context, deleteOfferingOptions *DeleteOfferingOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteOfferingOptions, "deleteOfferingOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteOfferingOptions, "deleteOfferingOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "catalog_identifier": *deleteOfferingOptions.CatalogIdentifier, + "offering_id": *deleteOfferingOptions.OfferingID, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/catalogs/{catalog_identifier}/offerings/{offering_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteOfferingOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "DeleteOffering") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = catalogManagement.Service.Request(request, nil) + + return +} + +// GetOfferingAudit : Get offering audit log +// Get the audit log associated with an offering. +func (catalogManagement *CatalogManagementV1) GetOfferingAudit(getOfferingAuditOptions *GetOfferingAuditOptions) (result *AuditLog, response *core.DetailedResponse, err error) { + return catalogManagement.GetOfferingAuditWithContext(context.Background(), getOfferingAuditOptions) +} + +// GetOfferingAuditWithContext is an alternate form of the GetOfferingAudit method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) GetOfferingAuditWithContext(ctx context.Context, getOfferingAuditOptions *GetOfferingAuditOptions) (result *AuditLog, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getOfferingAuditOptions, "getOfferingAuditOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getOfferingAuditOptions, "getOfferingAuditOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "catalog_identifier": *getOfferingAuditOptions.CatalogIdentifier, + "offering_id": *getOfferingAuditOptions.OfferingID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/catalogs/{catalog_identifier}/offerings/{offering_id}/audit`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getOfferingAuditOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "GetOfferingAudit") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = catalogManagement.Service.Request(request, &rawResponse) + if err != nil { + return + } + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalAuditLog) + if err != nil { + return + } + response.Result = result + } + + return +} + +// ReplaceOfferingIcon : Upload icon for offering +// Upload an icon file to be stored in GC. File is uploaded as a binary payload - not as a form. +func (catalogManagement *CatalogManagementV1) ReplaceOfferingIcon(replaceOfferingIconOptions *ReplaceOfferingIconOptions) (result *Offering, response *core.DetailedResponse, err error) { + return catalogManagement.ReplaceOfferingIconWithContext(context.Background(), replaceOfferingIconOptions) +} + +// ReplaceOfferingIconWithContext is an alternate form of the ReplaceOfferingIcon method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) ReplaceOfferingIconWithContext(ctx context.Context, replaceOfferingIconOptions *ReplaceOfferingIconOptions) (result *Offering, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(replaceOfferingIconOptions, "replaceOfferingIconOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(replaceOfferingIconOptions, "replaceOfferingIconOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "catalog_identifier": *replaceOfferingIconOptions.CatalogIdentifier, + "offering_id": *replaceOfferingIconOptions.OfferingID, + "file_name": *replaceOfferingIconOptions.FileName, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/catalogs/{catalog_identifier}/offerings/{offering_id}/icon/{file_name}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range replaceOfferingIconOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "ReplaceOfferingIcon") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = catalogManagement.Service.Request(request, &rawResponse) + if err != nil { + return + } + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalOffering) + if err != nil { + return + } + response.Result = result + } + + return +} + +// UpdateOfferingIBM : Allow offering to be published +// Approve or disapprove the offering to be allowed to publish to the IBM Public Catalog. Options: +// * `allow_request` - (Allow requesting to publish to IBM) +// * `ibm` - (Allow publishing to be visible to IBM only) +// * `public` - (Allow publishing to be visible to everyone, including IBM) +// +// If disapprove `public`, then `ibm` approval will not be changed. If disapprove `ibm` then `public` will +// automatically be disapproved. if disapprove `allow_request` then all rights to publish will be removed. This is +// because the process steps always go first through `allow` to `ibm` and then to `public`. `ibm` cannot be skipped. +// Only users with Approval IAM authority can use this. Approvers should use the catalog and offering id from the public +// catalog since they wouldn't have access to the private offering.'. +func (catalogManagement *CatalogManagementV1) UpdateOfferingIBM(updateOfferingIBMOptions *UpdateOfferingIBMOptions) (result *ApprovalResult, response *core.DetailedResponse, err error) { + return catalogManagement.UpdateOfferingIBMWithContext(context.Background(), updateOfferingIBMOptions) +} + +// UpdateOfferingIBMWithContext is an alternate form of the UpdateOfferingIBM method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) UpdateOfferingIBMWithContext(ctx context.Context, updateOfferingIBMOptions *UpdateOfferingIBMOptions) (result *ApprovalResult, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateOfferingIBMOptions, "updateOfferingIBMOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateOfferingIBMOptions, "updateOfferingIBMOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "catalog_identifier": *updateOfferingIBMOptions.CatalogIdentifier, + "offering_id": *updateOfferingIBMOptions.OfferingID, + "approval_type": *updateOfferingIBMOptions.ApprovalType, + "approved": *updateOfferingIBMOptions.Approved, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/catalogs/{catalog_identifier}/offerings/{offering_id}/publish/{approval_type}/{approved}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateOfferingIBMOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "UpdateOfferingIBM") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = catalogManagement.Service.Request(request, &rawResponse) + if err != nil { + return + } + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalApprovalResult) + if err != nil { + return + } + response.Result = result + } + + return +} + +// GetOfferingUpdates : Get version updates +// Get available updates for the specified version. +func (catalogManagement *CatalogManagementV1) GetOfferingUpdates(getOfferingUpdatesOptions *GetOfferingUpdatesOptions) (result []VersionUpdateDescriptor, response *core.DetailedResponse, err error) { + return catalogManagement.GetOfferingUpdatesWithContext(context.Background(), getOfferingUpdatesOptions) +} + +// GetOfferingUpdatesWithContext is an alternate form of the GetOfferingUpdates method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) GetOfferingUpdatesWithContext(ctx context.Context, getOfferingUpdatesOptions *GetOfferingUpdatesOptions) (result []VersionUpdateDescriptor, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getOfferingUpdatesOptions, "getOfferingUpdatesOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getOfferingUpdatesOptions, "getOfferingUpdatesOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "catalog_identifier": *getOfferingUpdatesOptions.CatalogIdentifier, + "offering_id": *getOfferingUpdatesOptions.OfferingID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/catalogs/{catalog_identifier}/offerings/{offering_id}/updates`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getOfferingUpdatesOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "GetOfferingUpdates") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("kind", fmt.Sprint(*getOfferingUpdatesOptions.Kind)) + if getOfferingUpdatesOptions.Version != nil { + builder.AddQuery("version", fmt.Sprint(*getOfferingUpdatesOptions.Version)) + } + if getOfferingUpdatesOptions.ClusterID != nil { + builder.AddQuery("cluster_id", fmt.Sprint(*getOfferingUpdatesOptions.ClusterID)) + } + if getOfferingUpdatesOptions.Region != nil { + builder.AddQuery("region", fmt.Sprint(*getOfferingUpdatesOptions.Region)) + } + if getOfferingUpdatesOptions.ResourceGroupID != nil { + builder.AddQuery("resource_group_id", fmt.Sprint(*getOfferingUpdatesOptions.ResourceGroupID)) + } + if getOfferingUpdatesOptions.Namespace != nil { + builder.AddQuery("namespace", fmt.Sprint(*getOfferingUpdatesOptions.Namespace)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse []json.RawMessage + response, err = catalogManagement.Service.Request(request, &rawResponse) + if err != nil { + return + } + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVersionUpdateDescriptor) + if err != nil { + return + } + response.Result = result + } + + return +} + +// GetOfferingAbout : Get version about information +// Get the about information, in markdown, for the current version. +func (catalogManagement *CatalogManagementV1) GetOfferingAbout(getOfferingAboutOptions *GetOfferingAboutOptions) (result *string, response *core.DetailedResponse, err error) { + return catalogManagement.GetOfferingAboutWithContext(context.Background(), getOfferingAboutOptions) +} + +// GetOfferingAboutWithContext is an alternate form of the GetOfferingAbout method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) GetOfferingAboutWithContext(ctx context.Context, getOfferingAboutOptions *GetOfferingAboutOptions) (result *string, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getOfferingAboutOptions, "getOfferingAboutOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getOfferingAboutOptions, "getOfferingAboutOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "version_loc_id": *getOfferingAboutOptions.VersionLocID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/versions/{version_loc_id}/about`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getOfferingAboutOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "GetOfferingAbout") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "text/markdown") + + request, err := builder.Build() + if err != nil { + return + } + + response, err = catalogManagement.Service.Request(request, &result) + + return +} + +// GetOfferingLicense : Get version license content +// Get the license content for the specified license ID in the specified version. +func (catalogManagement *CatalogManagementV1) GetOfferingLicense(getOfferingLicenseOptions *GetOfferingLicenseOptions) (result *string, response *core.DetailedResponse, err error) { + return catalogManagement.GetOfferingLicenseWithContext(context.Background(), getOfferingLicenseOptions) +} + +// GetOfferingLicenseWithContext is an alternate form of the GetOfferingLicense method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) GetOfferingLicenseWithContext(ctx context.Context, getOfferingLicenseOptions *GetOfferingLicenseOptions) (result *string, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getOfferingLicenseOptions, "getOfferingLicenseOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getOfferingLicenseOptions, "getOfferingLicenseOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "version_loc_id": *getOfferingLicenseOptions.VersionLocID, + "license_id": *getOfferingLicenseOptions.LicenseID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/versions/{version_loc_id}/licenses/{license_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getOfferingLicenseOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "GetOfferingLicense") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "text/plain") + + request, err := builder.Build() + if err != nil { + return + } + + response, err = catalogManagement.Service.Request(request, &result) + + return +} + +// GetOfferingContainerImages : Get version's container images +// Get the list of container images associated with the specified version. The "image_manifest_url" property of the +// version should be the URL for the image manifest, and the operation will return that content. +func (catalogManagement *CatalogManagementV1) GetOfferingContainerImages(getOfferingContainerImagesOptions *GetOfferingContainerImagesOptions) (result *ImageManifest, response *core.DetailedResponse, err error) { + return catalogManagement.GetOfferingContainerImagesWithContext(context.Background(), getOfferingContainerImagesOptions) +} + +// GetOfferingContainerImagesWithContext is an alternate form of the GetOfferingContainerImages method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) GetOfferingContainerImagesWithContext(ctx context.Context, getOfferingContainerImagesOptions *GetOfferingContainerImagesOptions) (result *ImageManifest, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getOfferingContainerImagesOptions, "getOfferingContainerImagesOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getOfferingContainerImagesOptions, "getOfferingContainerImagesOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "version_loc_id": *getOfferingContainerImagesOptions.VersionLocID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/versions/{version_loc_id}/containerImages`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getOfferingContainerImagesOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "GetOfferingContainerImages") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = catalogManagement.Service.Request(request, &rawResponse) + if err != nil { + return + } + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalImageManifest) + if err != nil { + return + } + response.Result = result + } + + return +} + +// DeprecateVersion : Deprecate version +// Deprecate the specified version. +func (catalogManagement *CatalogManagementV1) DeprecateVersion(deprecateVersionOptions *DeprecateVersionOptions) (response *core.DetailedResponse, err error) { + return catalogManagement.DeprecateVersionWithContext(context.Background(), deprecateVersionOptions) +} + +// DeprecateVersionWithContext is an alternate form of the DeprecateVersion method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) DeprecateVersionWithContext(ctx context.Context, deprecateVersionOptions *DeprecateVersionOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deprecateVersionOptions, "deprecateVersionOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deprecateVersionOptions, "deprecateVersionOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "version_loc_id": *deprecateVersionOptions.VersionLocID, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/versions/{version_loc_id}/deprecate`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deprecateVersionOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "DeprecateVersion") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = catalogManagement.Service.Request(request, nil) + + return +} + +// AccountPublishVersion : Publish version to account members +// Publish the specified version so it is viewable by account members. +func (catalogManagement *CatalogManagementV1) AccountPublishVersion(accountPublishVersionOptions *AccountPublishVersionOptions) (response *core.DetailedResponse, err error) { + return catalogManagement.AccountPublishVersionWithContext(context.Background(), accountPublishVersionOptions) +} + +// AccountPublishVersionWithContext is an alternate form of the AccountPublishVersion method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) AccountPublishVersionWithContext(ctx context.Context, accountPublishVersionOptions *AccountPublishVersionOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(accountPublishVersionOptions, "accountPublishVersionOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(accountPublishVersionOptions, "accountPublishVersionOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "version_loc_id": *accountPublishVersionOptions.VersionLocID, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/versions/{version_loc_id}/account-publish`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range accountPublishVersionOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "AccountPublishVersion") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = catalogManagement.Service.Request(request, nil) + + return +} + +// IBMPublishVersion : Publish version to IBMers in public catalog +// Publish the specified version so that it is visible to IBMers in the public catalog. +func (catalogManagement *CatalogManagementV1) IBMPublishVersion(ibmPublishVersionOptions *IBMPublishVersionOptions) (response *core.DetailedResponse, err error) { + return catalogManagement.IBMPublishVersionWithContext(context.Background(), ibmPublishVersionOptions) +} + +// IBMPublishVersionWithContext is an alternate form of the IBMPublishVersion method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) IBMPublishVersionWithContext(ctx context.Context, ibmPublishVersionOptions *IBMPublishVersionOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(ibmPublishVersionOptions, "ibmPublishVersionOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(ibmPublishVersionOptions, "ibmPublishVersionOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "version_loc_id": *ibmPublishVersionOptions.VersionLocID, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/versions/{version_loc_id}/ibm-publish`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range ibmPublishVersionOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "IBMPublishVersion") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = catalogManagement.Service.Request(request, nil) + + return +} + +// PublicPublishVersion : Publish version to all users in public catalog +// Publish the specified version so it is visible to all users in the public catalog. +func (catalogManagement *CatalogManagementV1) PublicPublishVersion(publicPublishVersionOptions *PublicPublishVersionOptions) (response *core.DetailedResponse, err error) { + return catalogManagement.PublicPublishVersionWithContext(context.Background(), publicPublishVersionOptions) +} + +// PublicPublishVersionWithContext is an alternate form of the PublicPublishVersion method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) PublicPublishVersionWithContext(ctx context.Context, publicPublishVersionOptions *PublicPublishVersionOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(publicPublishVersionOptions, "publicPublishVersionOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(publicPublishVersionOptions, "publicPublishVersionOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "version_loc_id": *publicPublishVersionOptions.VersionLocID, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/versions/{version_loc_id}/public-publish`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range publicPublishVersionOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "PublicPublishVersion") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = catalogManagement.Service.Request(request, nil) + + return +} + +// CommitVersion : Commit version +// Commit a working copy of the specified version. +func (catalogManagement *CatalogManagementV1) CommitVersion(commitVersionOptions *CommitVersionOptions) (response *core.DetailedResponse, err error) { + return catalogManagement.CommitVersionWithContext(context.Background(), commitVersionOptions) +} + +// CommitVersionWithContext is an alternate form of the CommitVersion method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) CommitVersionWithContext(ctx context.Context, commitVersionOptions *CommitVersionOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(commitVersionOptions, "commitVersionOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(commitVersionOptions, "commitVersionOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "version_loc_id": *commitVersionOptions.VersionLocID, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/versions/{version_loc_id}/commit`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range commitVersionOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "CommitVersion") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = catalogManagement.Service.Request(request, nil) + + return +} + +// CopyVersion : Copy version to new target kind +// Copy the specified version to a new target kind within the same offering. +func (catalogManagement *CatalogManagementV1) CopyVersion(copyVersionOptions *CopyVersionOptions) (response *core.DetailedResponse, err error) { + return catalogManagement.CopyVersionWithContext(context.Background(), copyVersionOptions) +} + +// CopyVersionWithContext is an alternate form of the CopyVersion method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) CopyVersionWithContext(ctx context.Context, copyVersionOptions *CopyVersionOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(copyVersionOptions, "copyVersionOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(copyVersionOptions, "copyVersionOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "version_loc_id": *copyVersionOptions.VersionLocID, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/versions/{version_loc_id}/copy`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range copyVersionOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "CopyVersion") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if copyVersionOptions.Tags != nil { + body["tags"] = copyVersionOptions.Tags + } + if copyVersionOptions.TargetKinds != nil { + body["target_kinds"] = copyVersionOptions.TargetKinds + } + if copyVersionOptions.Content != nil { + body["content"] = copyVersionOptions.Content + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = catalogManagement.Service.Request(request, nil) + + return +} + +// GetOfferingWorkingCopy : Create working copy of version +// Create a working copy of the specified version. +func (catalogManagement *CatalogManagementV1) GetOfferingWorkingCopy(getOfferingWorkingCopyOptions *GetOfferingWorkingCopyOptions) (result *Version, response *core.DetailedResponse, err error) { + return catalogManagement.GetOfferingWorkingCopyWithContext(context.Background(), getOfferingWorkingCopyOptions) +} + +// GetOfferingWorkingCopyWithContext is an alternate form of the GetOfferingWorkingCopy method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) GetOfferingWorkingCopyWithContext(ctx context.Context, getOfferingWorkingCopyOptions *GetOfferingWorkingCopyOptions) (result *Version, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getOfferingWorkingCopyOptions, "getOfferingWorkingCopyOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getOfferingWorkingCopyOptions, "getOfferingWorkingCopyOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "version_loc_id": *getOfferingWorkingCopyOptions.VersionLocID, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/versions/{version_loc_id}/workingcopy`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getOfferingWorkingCopyOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "GetOfferingWorkingCopy") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = catalogManagement.Service.Request(request, &rawResponse) + if err != nil { + return + } + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVersion) + if err != nil { + return + } + response.Result = result + } + + return +} + +// GetVersion : Get offering/kind/version 'branch' +// Get the Offering/Kind/Version 'branch' for the specified locator ID. +func (catalogManagement *CatalogManagementV1) GetVersion(getVersionOptions *GetVersionOptions) (result *Offering, response *core.DetailedResponse, err error) { + return catalogManagement.GetVersionWithContext(context.Background(), getVersionOptions) +} + +// GetVersionWithContext is an alternate form of the GetVersion method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) GetVersionWithContext(ctx context.Context, getVersionOptions *GetVersionOptions) (result *Offering, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getVersionOptions, "getVersionOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getVersionOptions, "getVersionOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "version_loc_id": *getVersionOptions.VersionLocID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/versions/{version_loc_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getVersionOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "GetVersion") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = catalogManagement.Service.Request(request, &rawResponse) + if err != nil { + return + } + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalOffering) + if err != nil { + return + } + response.Result = result + } + + return +} + +// DeleteVersion : Delete version +// Delete the specified version. If the version is an active version with a working copy, the working copy will be +// deleted as well. +func (catalogManagement *CatalogManagementV1) DeleteVersion(deleteVersionOptions *DeleteVersionOptions) (response *core.DetailedResponse, err error) { + return catalogManagement.DeleteVersionWithContext(context.Background(), deleteVersionOptions) +} + +// DeleteVersionWithContext is an alternate form of the DeleteVersion method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) DeleteVersionWithContext(ctx context.Context, deleteVersionOptions *DeleteVersionOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteVersionOptions, "deleteVersionOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteVersionOptions, "deleteVersionOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "version_loc_id": *deleteVersionOptions.VersionLocID, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/versions/{version_loc_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteVersionOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "DeleteVersion") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = catalogManagement.Service.Request(request, nil) + + return +} + +// GetCluster : Get kubernetes cluster +// Get the contents of the specified kubernetes cluster. +func (catalogManagement *CatalogManagementV1) GetCluster(getClusterOptions *GetClusterOptions) (result *ClusterInfo, response *core.DetailedResponse, err error) { + return catalogManagement.GetClusterWithContext(context.Background(), getClusterOptions) +} + +// GetClusterWithContext is an alternate form of the GetCluster method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) GetClusterWithContext(ctx context.Context, getClusterOptions *GetClusterOptions) (result *ClusterInfo, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getClusterOptions, "getClusterOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getClusterOptions, "getClusterOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "cluster_id": *getClusterOptions.ClusterID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/deploy/kubernetes/clusters/{cluster_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getClusterOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "GetCluster") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + if getClusterOptions.XAuthRefreshToken != nil { + builder.AddHeader("X-Auth-Refresh-Token", fmt.Sprint(*getClusterOptions.XAuthRefreshToken)) + } + + builder.AddQuery("region", fmt.Sprint(*getClusterOptions.Region)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = catalogManagement.Service.Request(request, &rawResponse) + if err != nil { + return + } + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalClusterInfo) + if err != nil { + return + } + response.Result = result + } + + return +} + +// GetNamespaces : Get cluster namespaces +// Get the namespaces associated with the specified kubernetes cluster. +func (catalogManagement *CatalogManagementV1) GetNamespaces(getNamespacesOptions *GetNamespacesOptions) (result *NamespaceSearchResult, response *core.DetailedResponse, err error) { + return catalogManagement.GetNamespacesWithContext(context.Background(), getNamespacesOptions) +} + +// GetNamespacesWithContext is an alternate form of the GetNamespaces method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) GetNamespacesWithContext(ctx context.Context, getNamespacesOptions *GetNamespacesOptions) (result *NamespaceSearchResult, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getNamespacesOptions, "getNamespacesOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getNamespacesOptions, "getNamespacesOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "cluster_id": *getNamespacesOptions.ClusterID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/deploy/kubernetes/clusters/{cluster_id}/namespaces`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getNamespacesOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "GetNamespaces") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + if getNamespacesOptions.XAuthRefreshToken != nil { + builder.AddHeader("X-Auth-Refresh-Token", fmt.Sprint(*getNamespacesOptions.XAuthRefreshToken)) + } + + builder.AddQuery("region", fmt.Sprint(*getNamespacesOptions.Region)) + if getNamespacesOptions.Limit != nil { + builder.AddQuery("limit", fmt.Sprint(*getNamespacesOptions.Limit)) + } + if getNamespacesOptions.Offset != nil { + builder.AddQuery("offset", fmt.Sprint(*getNamespacesOptions.Offset)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = catalogManagement.Service.Request(request, &rawResponse) + if err != nil { + return + } + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalNamespaceSearchResult) + if err != nil { + return + } + response.Result = result + } + + return +} + +// DeployOperators : Deploy operators +// Deploy operators on a kubernetes cluster. +func (catalogManagement *CatalogManagementV1) DeployOperators(deployOperatorsOptions *DeployOperatorsOptions) (result []OperatorDeployResult, response *core.DetailedResponse, err error) { + return catalogManagement.DeployOperatorsWithContext(context.Background(), deployOperatorsOptions) +} + +// DeployOperatorsWithContext is an alternate form of the DeployOperators method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) DeployOperatorsWithContext(ctx context.Context, deployOperatorsOptions *DeployOperatorsOptions) (result []OperatorDeployResult, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deployOperatorsOptions, "deployOperatorsOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deployOperatorsOptions, "deployOperatorsOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/deploy/kubernetes/olm/operator`, nil) + if err != nil { + return + } + + for headerName, headerValue := range deployOperatorsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "DeployOperators") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + if deployOperatorsOptions.XAuthRefreshToken != nil { + builder.AddHeader("X-Auth-Refresh-Token", fmt.Sprint(*deployOperatorsOptions.XAuthRefreshToken)) + } + + body := make(map[string]interface{}) + if deployOperatorsOptions.ClusterID != nil { + body["cluster_id"] = deployOperatorsOptions.ClusterID + } + if deployOperatorsOptions.Region != nil { + body["region"] = deployOperatorsOptions.Region + } + if deployOperatorsOptions.Namespaces != nil { + body["namespaces"] = deployOperatorsOptions.Namespaces + } + if deployOperatorsOptions.AllNamespaces != nil { + body["all_namespaces"] = deployOperatorsOptions.AllNamespaces + } + if deployOperatorsOptions.VersionLocatorID != nil { + body["version_locator_id"] = deployOperatorsOptions.VersionLocatorID + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse []json.RawMessage + response, err = catalogManagement.Service.Request(request, &rawResponse) + if err != nil { + return + } + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalOperatorDeployResult) + if err != nil { + return + } + response.Result = result + } + + return +} + +// ListOperators : List operators +// List the operators from a kubernetes cluster. +func (catalogManagement *CatalogManagementV1) ListOperators(listOperatorsOptions *ListOperatorsOptions) (result []OperatorDeployResult, response *core.DetailedResponse, err error) { + return catalogManagement.ListOperatorsWithContext(context.Background(), listOperatorsOptions) +} + +// ListOperatorsWithContext is an alternate form of the ListOperators method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) ListOperatorsWithContext(ctx context.Context, listOperatorsOptions *ListOperatorsOptions) (result []OperatorDeployResult, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(listOperatorsOptions, "listOperatorsOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(listOperatorsOptions, "listOperatorsOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/deploy/kubernetes/olm/operator`, nil) + if err != nil { + return + } + + for headerName, headerValue := range listOperatorsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "ListOperators") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + if listOperatorsOptions.XAuthRefreshToken != nil { + builder.AddHeader("X-Auth-Refresh-Token", fmt.Sprint(*listOperatorsOptions.XAuthRefreshToken)) + } + + builder.AddQuery("cluster_id", fmt.Sprint(*listOperatorsOptions.ClusterID)) + builder.AddQuery("region", fmt.Sprint(*listOperatorsOptions.Region)) + builder.AddQuery("version_locator_id", fmt.Sprint(*listOperatorsOptions.VersionLocatorID)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse []json.RawMessage + response, err = catalogManagement.Service.Request(request, &rawResponse) + if err != nil { + return + } + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalOperatorDeployResult) + if err != nil { + return + } + response.Result = result + } + + return +} + +// ReplaceOperators : Update operators +// Update the operators on a kubernetes cluster. +func (catalogManagement *CatalogManagementV1) ReplaceOperators(replaceOperatorsOptions *ReplaceOperatorsOptions) (result []OperatorDeployResult, response *core.DetailedResponse, err error) { + return catalogManagement.ReplaceOperatorsWithContext(context.Background(), replaceOperatorsOptions) +} + +// ReplaceOperatorsWithContext is an alternate form of the ReplaceOperators method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) ReplaceOperatorsWithContext(ctx context.Context, replaceOperatorsOptions *ReplaceOperatorsOptions) (result []OperatorDeployResult, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(replaceOperatorsOptions, "replaceOperatorsOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(replaceOperatorsOptions, "replaceOperatorsOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/deploy/kubernetes/olm/operator`, nil) + if err != nil { + return + } + + for headerName, headerValue := range replaceOperatorsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "ReplaceOperators") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + if replaceOperatorsOptions.XAuthRefreshToken != nil { + builder.AddHeader("X-Auth-Refresh-Token", fmt.Sprint(*replaceOperatorsOptions.XAuthRefreshToken)) + } + + body := make(map[string]interface{}) + if replaceOperatorsOptions.ClusterID != nil { + body["cluster_id"] = replaceOperatorsOptions.ClusterID + } + if replaceOperatorsOptions.Region != nil { + body["region"] = replaceOperatorsOptions.Region + } + if replaceOperatorsOptions.Namespaces != nil { + body["namespaces"] = replaceOperatorsOptions.Namespaces + } + if replaceOperatorsOptions.AllNamespaces != nil { + body["all_namespaces"] = replaceOperatorsOptions.AllNamespaces + } + if replaceOperatorsOptions.VersionLocatorID != nil { + body["version_locator_id"] = replaceOperatorsOptions.VersionLocatorID + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse []json.RawMessage + response, err = catalogManagement.Service.Request(request, &rawResponse) + if err != nil { + return + } + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalOperatorDeployResult) + if err != nil { + return + } + response.Result = result + } + + return +} + +// DeleteOperators : Delete operators +// Delete operators from a kubernetes cluster. +func (catalogManagement *CatalogManagementV1) DeleteOperators(deleteOperatorsOptions *DeleteOperatorsOptions) (response *core.DetailedResponse, err error) { + return catalogManagement.DeleteOperatorsWithContext(context.Background(), deleteOperatorsOptions) +} + +// DeleteOperatorsWithContext is an alternate form of the DeleteOperators method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) DeleteOperatorsWithContext(ctx context.Context, deleteOperatorsOptions *DeleteOperatorsOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteOperatorsOptions, "deleteOperatorsOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteOperatorsOptions, "deleteOperatorsOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/deploy/kubernetes/olm/operator`, nil) + if err != nil { + return + } + + for headerName, headerValue := range deleteOperatorsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "DeleteOperators") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + if deleteOperatorsOptions.XAuthRefreshToken != nil { + builder.AddHeader("X-Auth-Refresh-Token", fmt.Sprint(*deleteOperatorsOptions.XAuthRefreshToken)) + } + + builder.AddQuery("cluster_id", fmt.Sprint(*deleteOperatorsOptions.ClusterID)) + builder.AddQuery("region", fmt.Sprint(*deleteOperatorsOptions.Region)) + builder.AddQuery("version_locator_id", fmt.Sprint(*deleteOperatorsOptions.VersionLocatorID)) + + request, err := builder.Build() + if err != nil { + return + } + + response, err = catalogManagement.Service.Request(request, nil) + + return +} + +// InstallVersion : Install version +// Create an install for the specified version. +func (catalogManagement *CatalogManagementV1) InstallVersion(installVersionOptions *InstallVersionOptions) (response *core.DetailedResponse, err error) { + return catalogManagement.InstallVersionWithContext(context.Background(), installVersionOptions) +} + +// InstallVersionWithContext is an alternate form of the InstallVersion method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) InstallVersionWithContext(ctx context.Context, installVersionOptions *InstallVersionOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(installVersionOptions, "installVersionOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(installVersionOptions, "installVersionOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "version_loc_id": *installVersionOptions.VersionLocID, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/versions/{version_loc_id}/install`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range installVersionOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "InstallVersion") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Content-Type", "application/json") + if installVersionOptions.XAuthRefreshToken != nil { + builder.AddHeader("X-Auth-Refresh-Token", fmt.Sprint(*installVersionOptions.XAuthRefreshToken)) + } + + body := make(map[string]interface{}) + if installVersionOptions.ClusterID != nil { + body["cluster_id"] = installVersionOptions.ClusterID + } + if installVersionOptions.Region != nil { + body["region"] = installVersionOptions.Region + } + if installVersionOptions.Namespace != nil { + body["namespace"] = installVersionOptions.Namespace + } + if installVersionOptions.OverrideValues != nil { + body["override_values"] = installVersionOptions.OverrideValues + } + if installVersionOptions.EntitlementApikey != nil { + body["entitlement_apikey"] = installVersionOptions.EntitlementApikey + } + if installVersionOptions.Schematics != nil { + body["schematics"] = installVersionOptions.Schematics + } + if installVersionOptions.Script != nil { + body["script"] = installVersionOptions.Script + } + if installVersionOptions.ScriptID != nil { + body["script_id"] = installVersionOptions.ScriptID + } + if installVersionOptions.VersionLocatorID != nil { + body["version_locator_id"] = installVersionOptions.VersionLocatorID + } + if installVersionOptions.VcenterID != nil { + body["vcenter_id"] = installVersionOptions.VcenterID + } + if installVersionOptions.VcenterUser != nil { + body["vcenter_user"] = installVersionOptions.VcenterUser + } + if installVersionOptions.VcenterPassword != nil { + body["vcenter_password"] = installVersionOptions.VcenterPassword + } + if installVersionOptions.VcenterLocation != nil { + body["vcenter_location"] = installVersionOptions.VcenterLocation + } + if installVersionOptions.VcenterDatastore != nil { + body["vcenter_datastore"] = installVersionOptions.VcenterDatastore + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = catalogManagement.Service.Request(request, nil) + + return +} + +// PreinstallVersion : Pre-install version +// Create a pre-install for the specified version. +func (catalogManagement *CatalogManagementV1) PreinstallVersion(preinstallVersionOptions *PreinstallVersionOptions) (response *core.DetailedResponse, err error) { + return catalogManagement.PreinstallVersionWithContext(context.Background(), preinstallVersionOptions) +} + +// PreinstallVersionWithContext is an alternate form of the PreinstallVersion method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) PreinstallVersionWithContext(ctx context.Context, preinstallVersionOptions *PreinstallVersionOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(preinstallVersionOptions, "preinstallVersionOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(preinstallVersionOptions, "preinstallVersionOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "version_loc_id": *preinstallVersionOptions.VersionLocID, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/versions/{version_loc_id}/preinstall`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range preinstallVersionOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "PreinstallVersion") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Content-Type", "application/json") + if preinstallVersionOptions.XAuthRefreshToken != nil { + builder.AddHeader("X-Auth-Refresh-Token", fmt.Sprint(*preinstallVersionOptions.XAuthRefreshToken)) + } + + body := make(map[string]interface{}) + if preinstallVersionOptions.ClusterID != nil { + body["cluster_id"] = preinstallVersionOptions.ClusterID + } + if preinstallVersionOptions.Region != nil { + body["region"] = preinstallVersionOptions.Region + } + if preinstallVersionOptions.Namespace != nil { + body["namespace"] = preinstallVersionOptions.Namespace + } + if preinstallVersionOptions.OverrideValues != nil { + body["override_values"] = preinstallVersionOptions.OverrideValues + } + if preinstallVersionOptions.EntitlementApikey != nil { + body["entitlement_apikey"] = preinstallVersionOptions.EntitlementApikey + } + if preinstallVersionOptions.Schematics != nil { + body["schematics"] = preinstallVersionOptions.Schematics + } + if preinstallVersionOptions.Script != nil { + body["script"] = preinstallVersionOptions.Script + } + if preinstallVersionOptions.ScriptID != nil { + body["script_id"] = preinstallVersionOptions.ScriptID + } + if preinstallVersionOptions.VersionLocatorID != nil { + body["version_locator_id"] = preinstallVersionOptions.VersionLocatorID + } + if preinstallVersionOptions.VcenterID != nil { + body["vcenter_id"] = preinstallVersionOptions.VcenterID + } + if preinstallVersionOptions.VcenterUser != nil { + body["vcenter_user"] = preinstallVersionOptions.VcenterUser + } + if preinstallVersionOptions.VcenterPassword != nil { + body["vcenter_password"] = preinstallVersionOptions.VcenterPassword + } + if preinstallVersionOptions.VcenterLocation != nil { + body["vcenter_location"] = preinstallVersionOptions.VcenterLocation + } + if preinstallVersionOptions.VcenterDatastore != nil { + body["vcenter_datastore"] = preinstallVersionOptions.VcenterDatastore + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = catalogManagement.Service.Request(request, nil) + + return +} + +// GetPreinstall : Get version pre-install status +// Get the pre-install status for the specified version. +func (catalogManagement *CatalogManagementV1) GetPreinstall(getPreinstallOptions *GetPreinstallOptions) (result *InstallStatus, response *core.DetailedResponse, err error) { + return catalogManagement.GetPreinstallWithContext(context.Background(), getPreinstallOptions) +} + +// GetPreinstallWithContext is an alternate form of the GetPreinstall method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) GetPreinstallWithContext(ctx context.Context, getPreinstallOptions *GetPreinstallOptions) (result *InstallStatus, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getPreinstallOptions, "getPreinstallOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getPreinstallOptions, "getPreinstallOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "version_loc_id": *getPreinstallOptions.VersionLocID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/versions/{version_loc_id}/preinstall`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getPreinstallOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "GetPreinstall") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + if getPreinstallOptions.XAuthRefreshToken != nil { + builder.AddHeader("X-Auth-Refresh-Token", fmt.Sprint(*getPreinstallOptions.XAuthRefreshToken)) + } + + if getPreinstallOptions.ClusterID != nil { + builder.AddQuery("cluster_id", fmt.Sprint(*getPreinstallOptions.ClusterID)) + } + if getPreinstallOptions.Region != nil { + builder.AddQuery("region", fmt.Sprint(*getPreinstallOptions.Region)) + } + if getPreinstallOptions.Namespace != nil { + builder.AddQuery("namespace", fmt.Sprint(*getPreinstallOptions.Namespace)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = catalogManagement.Service.Request(request, &rawResponse) + if err != nil { + return + } + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstallStatus) + if err != nil { + return + } + response.Result = result + } + + return +} + +// ValidateInstall : Validate offering +// Validate the offering associated with the specified version. +func (catalogManagement *CatalogManagementV1) ValidateInstall(validateInstallOptions *ValidateInstallOptions) (response *core.DetailedResponse, err error) { + return catalogManagement.ValidateInstallWithContext(context.Background(), validateInstallOptions) +} + +// ValidateInstallWithContext is an alternate form of the ValidateInstall method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) ValidateInstallWithContext(ctx context.Context, validateInstallOptions *ValidateInstallOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(validateInstallOptions, "validateInstallOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(validateInstallOptions, "validateInstallOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "version_loc_id": *validateInstallOptions.VersionLocID, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/versions/{version_loc_id}/validation/install`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range validateInstallOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "ValidateInstall") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Content-Type", "application/json") + if validateInstallOptions.XAuthRefreshToken != nil { + builder.AddHeader("X-Auth-Refresh-Token", fmt.Sprint(*validateInstallOptions.XAuthRefreshToken)) + } + + body := make(map[string]interface{}) + if validateInstallOptions.ClusterID != nil { + body["cluster_id"] = validateInstallOptions.ClusterID + } + if validateInstallOptions.Region != nil { + body["region"] = validateInstallOptions.Region + } + if validateInstallOptions.Namespace != nil { + body["namespace"] = validateInstallOptions.Namespace + } + if validateInstallOptions.OverrideValues != nil { + body["override_values"] = validateInstallOptions.OverrideValues + } + if validateInstallOptions.EntitlementApikey != nil { + body["entitlement_apikey"] = validateInstallOptions.EntitlementApikey + } + if validateInstallOptions.Schematics != nil { + body["schematics"] = validateInstallOptions.Schematics + } + if validateInstallOptions.Script != nil { + body["script"] = validateInstallOptions.Script + } + if validateInstallOptions.ScriptID != nil { + body["script_id"] = validateInstallOptions.ScriptID + } + if validateInstallOptions.VersionLocatorID != nil { + body["version_locator_id"] = validateInstallOptions.VersionLocatorID + } + if validateInstallOptions.VcenterID != nil { + body["vcenter_id"] = validateInstallOptions.VcenterID + } + if validateInstallOptions.VcenterUser != nil { + body["vcenter_user"] = validateInstallOptions.VcenterUser + } + if validateInstallOptions.VcenterPassword != nil { + body["vcenter_password"] = validateInstallOptions.VcenterPassword + } + if validateInstallOptions.VcenterLocation != nil { + body["vcenter_location"] = validateInstallOptions.VcenterLocation + } + if validateInstallOptions.VcenterDatastore != nil { + body["vcenter_datastore"] = validateInstallOptions.VcenterDatastore + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = catalogManagement.Service.Request(request, nil) + + return +} + +// GetValidationStatus : Get offering install status +// Returns the install status for the specified offering version. +func (catalogManagement *CatalogManagementV1) GetValidationStatus(getValidationStatusOptions *GetValidationStatusOptions) (result *Validation, response *core.DetailedResponse, err error) { + return catalogManagement.GetValidationStatusWithContext(context.Background(), getValidationStatusOptions) +} + +// GetValidationStatusWithContext is an alternate form of the GetValidationStatus method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) GetValidationStatusWithContext(ctx context.Context, getValidationStatusOptions *GetValidationStatusOptions) (result *Validation, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getValidationStatusOptions, "getValidationStatusOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getValidationStatusOptions, "getValidationStatusOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "version_loc_id": *getValidationStatusOptions.VersionLocID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/versions/{version_loc_id}/validation/install`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getValidationStatusOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "GetValidationStatus") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + if getValidationStatusOptions.XAuthRefreshToken != nil { + builder.AddHeader("X-Auth-Refresh-Token", fmt.Sprint(*getValidationStatusOptions.XAuthRefreshToken)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = catalogManagement.Service.Request(request, &rawResponse) + if err != nil { + return + } + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalValidation) + if err != nil { + return + } + response.Result = result + } + + return +} + +// GetOverrideValues : Get override values +// Returns the override values that were used to validate the specified offering version. +func (catalogManagement *CatalogManagementV1) GetOverrideValues(getOverrideValuesOptions *GetOverrideValuesOptions) (result map[string]interface{}, response *core.DetailedResponse, err error) { + return catalogManagement.GetOverrideValuesWithContext(context.Background(), getOverrideValuesOptions) +} + +// GetOverrideValuesWithContext is an alternate form of the GetOverrideValues method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) GetOverrideValuesWithContext(ctx context.Context, getOverrideValuesOptions *GetOverrideValuesOptions) (result map[string]interface{}, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getOverrideValuesOptions, "getOverrideValuesOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getOverrideValuesOptions, "getOverrideValuesOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "version_loc_id": *getOverrideValuesOptions.VersionLocID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/versions/{version_loc_id}/validation/overridevalues`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getOverrideValuesOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "GetOverrideValues") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + response, err = catalogManagement.Service.Request(request, &result) + + return +} + +// SearchObjects : List objects across catalogs +// List the available objects from both public and private catalogs. These copies cannot be used for updating. They are +// not complete and only return what is visible to the caller. +func (catalogManagement *CatalogManagementV1) SearchObjects(searchObjectsOptions *SearchObjectsOptions) (result *ObjectSearchResult, response *core.DetailedResponse, err error) { + return catalogManagement.SearchObjectsWithContext(context.Background(), searchObjectsOptions) +} + +// SearchObjectsWithContext is an alternate form of the SearchObjects method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) SearchObjectsWithContext(ctx context.Context, searchObjectsOptions *SearchObjectsOptions) (result *ObjectSearchResult, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(searchObjectsOptions, "searchObjectsOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(searchObjectsOptions, "searchObjectsOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/objects`, nil) + if err != nil { + return + } + + for headerName, headerValue := range searchObjectsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "SearchObjects") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("query", fmt.Sprint(*searchObjectsOptions.Query)) + if searchObjectsOptions.Limit != nil { + builder.AddQuery("limit", fmt.Sprint(*searchObjectsOptions.Limit)) + } + if searchObjectsOptions.Offset != nil { + builder.AddQuery("offset", fmt.Sprint(*searchObjectsOptions.Offset)) + } + if searchObjectsOptions.Collapse != nil { + builder.AddQuery("collapse", fmt.Sprint(*searchObjectsOptions.Collapse)) + } + if searchObjectsOptions.Digest != nil { + builder.AddQuery("digest", fmt.Sprint(*searchObjectsOptions.Digest)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = catalogManagement.Service.Request(request, &rawResponse) + if err != nil { + return + } + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalObjectSearchResult) + if err != nil { + return + } + response.Result = result + } + + return +} + +// ListObjects : List objects within a catalog +// List the available objects within the specified catalog. +func (catalogManagement *CatalogManagementV1) ListObjects(listObjectsOptions *ListObjectsOptions) (result *ObjectListResult, response *core.DetailedResponse, err error) { + return catalogManagement.ListObjectsWithContext(context.Background(), listObjectsOptions) +} + +// ListObjectsWithContext is an alternate form of the ListObjects method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) ListObjectsWithContext(ctx context.Context, listObjectsOptions *ListObjectsOptions) (result *ObjectListResult, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(listObjectsOptions, "listObjectsOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(listObjectsOptions, "listObjectsOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "catalog_identifier": *listObjectsOptions.CatalogIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/catalogs/{catalog_identifier}/objects`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range listObjectsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "ListObjects") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + if listObjectsOptions.Limit != nil { + builder.AddQuery("limit", fmt.Sprint(*listObjectsOptions.Limit)) + } + if listObjectsOptions.Offset != nil { + builder.AddQuery("offset", fmt.Sprint(*listObjectsOptions.Offset)) + } + if listObjectsOptions.Name != nil { + builder.AddQuery("name", fmt.Sprint(*listObjectsOptions.Name)) + } + if listObjectsOptions.Sort != nil { + builder.AddQuery("sort", fmt.Sprint(*listObjectsOptions.Sort)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = catalogManagement.Service.Request(request, &rawResponse) + if err != nil { + return + } + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalObjectListResult) + if err != nil { + return + } + response.Result = result + } + + return +} + +// CreateObject : Create catalog object +// Create an object with a specific catalog. +func (catalogManagement *CatalogManagementV1) CreateObject(createObjectOptions *CreateObjectOptions) (result *CatalogObject, response *core.DetailedResponse, err error) { + return catalogManagement.CreateObjectWithContext(context.Background(), createObjectOptions) +} + +// CreateObjectWithContext is an alternate form of the CreateObject method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) CreateObjectWithContext(ctx context.Context, createObjectOptions *CreateObjectOptions) (result *CatalogObject, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createObjectOptions, "createObjectOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createObjectOptions, "createObjectOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "catalog_identifier": *createObjectOptions.CatalogIdentifier, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/catalogs/{catalog_identifier}/objects`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range createObjectOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "CreateObject") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if createObjectOptions.ID != nil { + body["id"] = createObjectOptions.ID + } + if createObjectOptions.Name != nil { + body["name"] = createObjectOptions.Name + } + if createObjectOptions.Rev != nil { + body["_rev"] = createObjectOptions.Rev + } + if createObjectOptions.CRN != nil { + body["crn"] = createObjectOptions.CRN + } + if createObjectOptions.URL != nil { + body["url"] = createObjectOptions.URL + } + if createObjectOptions.ParentID != nil { + body["parent_id"] = createObjectOptions.ParentID + } + if createObjectOptions.LabelI18n != nil { + body["label_i18n"] = createObjectOptions.LabelI18n + } + if createObjectOptions.Label != nil { + body["label"] = createObjectOptions.Label + } + if createObjectOptions.Tags != nil { + body["tags"] = createObjectOptions.Tags + } + if createObjectOptions.Created != nil { + body["created"] = createObjectOptions.Created + } + if createObjectOptions.Updated != nil { + body["updated"] = createObjectOptions.Updated + } + if createObjectOptions.ShortDescription != nil { + body["short_description"] = createObjectOptions.ShortDescription + } + if createObjectOptions.ShortDescriptionI18n != nil { + body["short_description_i18n"] = createObjectOptions.ShortDescriptionI18n + } + if createObjectOptions.Kind != nil { + body["kind"] = createObjectOptions.Kind + } + if createObjectOptions.Publish != nil { + body["publish"] = createObjectOptions.Publish + } + if createObjectOptions.State != nil { + body["state"] = createObjectOptions.State + } + if createObjectOptions.CatalogID != nil { + body["catalog_id"] = createObjectOptions.CatalogID + } + if createObjectOptions.CatalogName != nil { + body["catalog_name"] = createObjectOptions.CatalogName + } + if createObjectOptions.Data != nil { + body["data"] = createObjectOptions.Data + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = catalogManagement.Service.Request(request, &rawResponse) + if err != nil { + return + } + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalCatalogObject) + if err != nil { + return + } + response.Result = result + } + + return +} + +// GetObject : Get catalog object +// Get the specified object from within the specified catalog. +func (catalogManagement *CatalogManagementV1) GetObject(getObjectOptions *GetObjectOptions) (result *CatalogObject, response *core.DetailedResponse, err error) { + return catalogManagement.GetObjectWithContext(context.Background(), getObjectOptions) +} + +// GetObjectWithContext is an alternate form of the GetObject method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) GetObjectWithContext(ctx context.Context, getObjectOptions *GetObjectOptions) (result *CatalogObject, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getObjectOptions, "getObjectOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getObjectOptions, "getObjectOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "catalog_identifier": *getObjectOptions.CatalogIdentifier, + "object_identifier": *getObjectOptions.ObjectIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/catalogs/{catalog_identifier}/objects/{object_identifier}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getObjectOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "GetObject") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = catalogManagement.Service.Request(request, &rawResponse) + if err != nil { + return + } + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalCatalogObject) + if err != nil { + return + } + response.Result = result + } + + return +} + +// ReplaceObject : Update catalog object +// Update an object within a specific catalog. +func (catalogManagement *CatalogManagementV1) ReplaceObject(replaceObjectOptions *ReplaceObjectOptions) (result *CatalogObject, response *core.DetailedResponse, err error) { + return catalogManagement.ReplaceObjectWithContext(context.Background(), replaceObjectOptions) +} + +// ReplaceObjectWithContext is an alternate form of the ReplaceObject method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) ReplaceObjectWithContext(ctx context.Context, replaceObjectOptions *ReplaceObjectOptions) (result *CatalogObject, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(replaceObjectOptions, "replaceObjectOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(replaceObjectOptions, "replaceObjectOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "catalog_identifier": *replaceObjectOptions.CatalogIdentifier, + "object_identifier": *replaceObjectOptions.ObjectIdentifier, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/catalogs/{catalog_identifier}/objects/{object_identifier}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range replaceObjectOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "ReplaceObject") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if replaceObjectOptions.ID != nil { + body["id"] = replaceObjectOptions.ID + } + if replaceObjectOptions.Name != nil { + body["name"] = replaceObjectOptions.Name + } + if replaceObjectOptions.Rev != nil { + body["_rev"] = replaceObjectOptions.Rev + } + if replaceObjectOptions.CRN != nil { + body["crn"] = replaceObjectOptions.CRN + } + if replaceObjectOptions.URL != nil { + body["url"] = replaceObjectOptions.URL + } + if replaceObjectOptions.ParentID != nil { + body["parent_id"] = replaceObjectOptions.ParentID + } + if replaceObjectOptions.LabelI18n != nil { + body["label_i18n"] = replaceObjectOptions.LabelI18n + } + if replaceObjectOptions.Label != nil { + body["label"] = replaceObjectOptions.Label + } + if replaceObjectOptions.Tags != nil { + body["tags"] = replaceObjectOptions.Tags + } + if replaceObjectOptions.Created != nil { + body["created"] = replaceObjectOptions.Created + } + if replaceObjectOptions.Updated != nil { + body["updated"] = replaceObjectOptions.Updated + } + if replaceObjectOptions.ShortDescription != nil { + body["short_description"] = replaceObjectOptions.ShortDescription + } + if replaceObjectOptions.ShortDescriptionI18n != nil { + body["short_description_i18n"] = replaceObjectOptions.ShortDescriptionI18n + } + if replaceObjectOptions.Kind != nil { + body["kind"] = replaceObjectOptions.Kind + } + if replaceObjectOptions.Publish != nil { + body["publish"] = replaceObjectOptions.Publish + } + if replaceObjectOptions.State != nil { + body["state"] = replaceObjectOptions.State + } + if replaceObjectOptions.CatalogID != nil { + body["catalog_id"] = replaceObjectOptions.CatalogID + } + if replaceObjectOptions.CatalogName != nil { + body["catalog_name"] = replaceObjectOptions.CatalogName + } + if replaceObjectOptions.Data != nil { + body["data"] = replaceObjectOptions.Data + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = catalogManagement.Service.Request(request, &rawResponse) + if err != nil { + return + } + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalCatalogObject) + if err != nil { + return + } + response.Result = result + } + + return +} + +// DeleteObject : Delete catalog object +// Delete a specific object within a specific catalog. +func (catalogManagement *CatalogManagementV1) DeleteObject(deleteObjectOptions *DeleteObjectOptions) (response *core.DetailedResponse, err error) { + return catalogManagement.DeleteObjectWithContext(context.Background(), deleteObjectOptions) +} + +// DeleteObjectWithContext is an alternate form of the DeleteObject method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) DeleteObjectWithContext(ctx context.Context, deleteObjectOptions *DeleteObjectOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteObjectOptions, "deleteObjectOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteObjectOptions, "deleteObjectOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "catalog_identifier": *deleteObjectOptions.CatalogIdentifier, + "object_identifier": *deleteObjectOptions.ObjectIdentifier, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/catalogs/{catalog_identifier}/objects/{object_identifier}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteObjectOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "DeleteObject") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = catalogManagement.Service.Request(request, nil) + + return +} + +// GetObjectAudit : Get catalog object audit log +// Get the audit log associated with a specific catalog object. +func (catalogManagement *CatalogManagementV1) GetObjectAudit(getObjectAuditOptions *GetObjectAuditOptions) (result *AuditLog, response *core.DetailedResponse, err error) { + return catalogManagement.GetObjectAuditWithContext(context.Background(), getObjectAuditOptions) +} + +// GetObjectAuditWithContext is an alternate form of the GetObjectAudit method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) GetObjectAuditWithContext(ctx context.Context, getObjectAuditOptions *GetObjectAuditOptions) (result *AuditLog, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getObjectAuditOptions, "getObjectAuditOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getObjectAuditOptions, "getObjectAuditOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "catalog_identifier": *getObjectAuditOptions.CatalogIdentifier, + "object_identifier": *getObjectAuditOptions.ObjectIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/catalogs/{catalog_identifier}/objects/{object_identifier}/audit`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getObjectAuditOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "GetObjectAudit") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = catalogManagement.Service.Request(request, &rawResponse) + if err != nil { + return + } + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalAuditLog) + if err != nil { + return + } + response.Result = result + } + + return +} + +// AccountPublishObject : Publish object to account +// Publish a catalog object to account. +func (catalogManagement *CatalogManagementV1) AccountPublishObject(accountPublishObjectOptions *AccountPublishObjectOptions) (response *core.DetailedResponse, err error) { + return catalogManagement.AccountPublishObjectWithContext(context.Background(), accountPublishObjectOptions) +} + +// AccountPublishObjectWithContext is an alternate form of the AccountPublishObject method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) AccountPublishObjectWithContext(ctx context.Context, accountPublishObjectOptions *AccountPublishObjectOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(accountPublishObjectOptions, "accountPublishObjectOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(accountPublishObjectOptions, "accountPublishObjectOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "catalog_identifier": *accountPublishObjectOptions.CatalogIdentifier, + "object_identifier": *accountPublishObjectOptions.ObjectIdentifier, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/catalogs/{catalog_identifier}/objects/{object_identifier}/account-publish`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range accountPublishObjectOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "AccountPublishObject") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = catalogManagement.Service.Request(request, nil) + + return +} + +// SharedPublishObject : Publish object to share with allow list +// Publish the specified object so that it is visible to those in the allow list. +func (catalogManagement *CatalogManagementV1) SharedPublishObject(sharedPublishObjectOptions *SharedPublishObjectOptions) (response *core.DetailedResponse, err error) { + return catalogManagement.SharedPublishObjectWithContext(context.Background(), sharedPublishObjectOptions) +} + +// SharedPublishObjectWithContext is an alternate form of the SharedPublishObject method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) SharedPublishObjectWithContext(ctx context.Context, sharedPublishObjectOptions *SharedPublishObjectOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(sharedPublishObjectOptions, "sharedPublishObjectOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(sharedPublishObjectOptions, "sharedPublishObjectOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "catalog_identifier": *sharedPublishObjectOptions.CatalogIdentifier, + "object_identifier": *sharedPublishObjectOptions.ObjectIdentifier, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/catalogs/{catalog_identifier}/objects/{object_identifier}/shared-publish`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range sharedPublishObjectOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "SharedPublishObject") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = catalogManagement.Service.Request(request, nil) + + return +} + +// IBMPublishObject : Publish object to share with IBMers +// Publish the specified object so that it is visible to IBMers in the public catalog. +func (catalogManagement *CatalogManagementV1) IBMPublishObject(ibmPublishObjectOptions *IBMPublishObjectOptions) (response *core.DetailedResponse, err error) { + return catalogManagement.IBMPublishObjectWithContext(context.Background(), ibmPublishObjectOptions) +} + +// IBMPublishObjectWithContext is an alternate form of the IBMPublishObject method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) IBMPublishObjectWithContext(ctx context.Context, ibmPublishObjectOptions *IBMPublishObjectOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(ibmPublishObjectOptions, "ibmPublishObjectOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(ibmPublishObjectOptions, "ibmPublishObjectOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "catalog_identifier": *ibmPublishObjectOptions.CatalogIdentifier, + "object_identifier": *ibmPublishObjectOptions.ObjectIdentifier, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/catalogs/{catalog_identifier}/objects/{object_identifier}/ibm-publish`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range ibmPublishObjectOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "IBMPublishObject") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = catalogManagement.Service.Request(request, nil) + + return +} + +// PublicPublishObject : Publish object to share with all users +// Publish the specified object so it is visible to all users in the public catalog. +func (catalogManagement *CatalogManagementV1) PublicPublishObject(publicPublishObjectOptions *PublicPublishObjectOptions) (response *core.DetailedResponse, err error) { + return catalogManagement.PublicPublishObjectWithContext(context.Background(), publicPublishObjectOptions) +} + +// PublicPublishObjectWithContext is an alternate form of the PublicPublishObject method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) PublicPublishObjectWithContext(ctx context.Context, publicPublishObjectOptions *PublicPublishObjectOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(publicPublishObjectOptions, "publicPublishObjectOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(publicPublishObjectOptions, "publicPublishObjectOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "catalog_identifier": *publicPublishObjectOptions.CatalogIdentifier, + "object_identifier": *publicPublishObjectOptions.ObjectIdentifier, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/catalogs/{catalog_identifier}/objects/{object_identifier}/public-publish`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range publicPublishObjectOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "PublicPublishObject") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = catalogManagement.Service.Request(request, nil) + + return +} + +// CreateObjectAccess : Add account ID to object access list +// Add an account ID to an object's access list. +func (catalogManagement *CatalogManagementV1) CreateObjectAccess(createObjectAccessOptions *CreateObjectAccessOptions) (response *core.DetailedResponse, err error) { + return catalogManagement.CreateObjectAccessWithContext(context.Background(), createObjectAccessOptions) +} + +// CreateObjectAccessWithContext is an alternate form of the CreateObjectAccess method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) CreateObjectAccessWithContext(ctx context.Context, createObjectAccessOptions *CreateObjectAccessOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createObjectAccessOptions, "createObjectAccessOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createObjectAccessOptions, "createObjectAccessOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "catalog_identifier": *createObjectAccessOptions.CatalogIdentifier, + "object_identifier": *createObjectAccessOptions.ObjectIdentifier, + "account_identifier": *createObjectAccessOptions.AccountIdentifier, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/catalogs/{catalog_identifier}/objects/{object_identifier}/access/{account_identifier}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range createObjectAccessOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "CreateObjectAccess") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = catalogManagement.Service.Request(request, nil) + + return +} + +// GetObjectAccess : Check for account ID in object access list +// Determine if an account ID is in an object's access list. +func (catalogManagement *CatalogManagementV1) GetObjectAccess(getObjectAccessOptions *GetObjectAccessOptions) (result *ObjectAccess, response *core.DetailedResponse, err error) { + return catalogManagement.GetObjectAccessWithContext(context.Background(), getObjectAccessOptions) +} + +// GetObjectAccessWithContext is an alternate form of the GetObjectAccess method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) GetObjectAccessWithContext(ctx context.Context, getObjectAccessOptions *GetObjectAccessOptions) (result *ObjectAccess, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getObjectAccessOptions, "getObjectAccessOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getObjectAccessOptions, "getObjectAccessOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "catalog_identifier": *getObjectAccessOptions.CatalogIdentifier, + "object_identifier": *getObjectAccessOptions.ObjectIdentifier, + "account_identifier": *getObjectAccessOptions.AccountIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/catalogs/{catalog_identifier}/objects/{object_identifier}/access/{account_identifier}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getObjectAccessOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "GetObjectAccess") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = catalogManagement.Service.Request(request, &rawResponse) + if err != nil { + return + } + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalObjectAccess) + if err != nil { + return + } + response.Result = result + } + + return +} + +// DeleteObjectAccess : Remove account ID from object access list +// Delete the specified account ID from the specified object's access list. +func (catalogManagement *CatalogManagementV1) DeleteObjectAccess(deleteObjectAccessOptions *DeleteObjectAccessOptions) (response *core.DetailedResponse, err error) { + return catalogManagement.DeleteObjectAccessWithContext(context.Background(), deleteObjectAccessOptions) +} + +// DeleteObjectAccessWithContext is an alternate form of the DeleteObjectAccess method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) DeleteObjectAccessWithContext(ctx context.Context, deleteObjectAccessOptions *DeleteObjectAccessOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteObjectAccessOptions, "deleteObjectAccessOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteObjectAccessOptions, "deleteObjectAccessOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "catalog_identifier": *deleteObjectAccessOptions.CatalogIdentifier, + "object_identifier": *deleteObjectAccessOptions.ObjectIdentifier, + "account_identifier": *deleteObjectAccessOptions.AccountIdentifier, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/catalogs/{catalog_identifier}/objects/{object_identifier}/access/{account_identifier}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteObjectAccessOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "DeleteObjectAccess") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = catalogManagement.Service.Request(request, nil) + + return +} + +// GetObjectAccessList : Get object access list +// Get the access list associated with the specified object. +func (catalogManagement *CatalogManagementV1) GetObjectAccessList(getObjectAccessListOptions *GetObjectAccessListOptions) (result *ObjectAccessListResult, response *core.DetailedResponse, err error) { + return catalogManagement.GetObjectAccessListWithContext(context.Background(), getObjectAccessListOptions) +} + +// GetObjectAccessListWithContext is an alternate form of the GetObjectAccessList method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) GetObjectAccessListWithContext(ctx context.Context, getObjectAccessListOptions *GetObjectAccessListOptions) (result *ObjectAccessListResult, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getObjectAccessListOptions, "getObjectAccessListOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getObjectAccessListOptions, "getObjectAccessListOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "catalog_identifier": *getObjectAccessListOptions.CatalogIdentifier, + "object_identifier": *getObjectAccessListOptions.ObjectIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/catalogs/{catalog_identifier}/objects/{object_identifier}/access`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getObjectAccessListOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "GetObjectAccessList") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + if getObjectAccessListOptions.Limit != nil { + builder.AddQuery("limit", fmt.Sprint(*getObjectAccessListOptions.Limit)) + } + if getObjectAccessListOptions.Offset != nil { + builder.AddQuery("offset", fmt.Sprint(*getObjectAccessListOptions.Offset)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = catalogManagement.Service.Request(request, &rawResponse) + if err != nil { + return + } + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalObjectAccessListResult) + if err != nil { + return + } + response.Result = result + } + + return +} + +// DeleteObjectAccessList : Delete accounts from object access list +// Delete all or a set of accounts from an object's access list. +func (catalogManagement *CatalogManagementV1) DeleteObjectAccessList(deleteObjectAccessListOptions *DeleteObjectAccessListOptions) (result *AccessListBulkResponse, response *core.DetailedResponse, err error) { + return catalogManagement.DeleteObjectAccessListWithContext(context.Background(), deleteObjectAccessListOptions) +} + +// DeleteObjectAccessListWithContext is an alternate form of the DeleteObjectAccessList method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) DeleteObjectAccessListWithContext(ctx context.Context, deleteObjectAccessListOptions *DeleteObjectAccessListOptions) (result *AccessListBulkResponse, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteObjectAccessListOptions, "deleteObjectAccessListOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteObjectAccessListOptions, "deleteObjectAccessListOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "catalog_identifier": *deleteObjectAccessListOptions.CatalogIdentifier, + "object_identifier": *deleteObjectAccessListOptions.ObjectIdentifier, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/catalogs/{catalog_identifier}/objects/{object_identifier}/access`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteObjectAccessListOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "DeleteObjectAccessList") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + _, err = builder.SetBodyContentJSON(deleteObjectAccessListOptions.Accounts) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = catalogManagement.Service.Request(request, &rawResponse) + if err != nil { + return + } + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalAccessListBulkResponse) + if err != nil { + return + } + response.Result = result + } + + return +} + +// AddObjectAccessList : Add accounts to object access list +// Add one or more accounts to the specified object's access list. +func (catalogManagement *CatalogManagementV1) AddObjectAccessList(addObjectAccessListOptions *AddObjectAccessListOptions) (result *AccessListBulkResponse, response *core.DetailedResponse, err error) { + return catalogManagement.AddObjectAccessListWithContext(context.Background(), addObjectAccessListOptions) +} + +// AddObjectAccessListWithContext is an alternate form of the AddObjectAccessList method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) AddObjectAccessListWithContext(ctx context.Context, addObjectAccessListOptions *AddObjectAccessListOptions) (result *AccessListBulkResponse, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(addObjectAccessListOptions, "addObjectAccessListOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(addObjectAccessListOptions, "addObjectAccessListOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "catalog_identifier": *addObjectAccessListOptions.CatalogIdentifier, + "object_identifier": *addObjectAccessListOptions.ObjectIdentifier, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/catalogs/{catalog_identifier}/objects/{object_identifier}/access`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range addObjectAccessListOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "AddObjectAccessList") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + _, err = builder.SetBodyContentJSON(addObjectAccessListOptions.Accounts) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = catalogManagement.Service.Request(request, &rawResponse) + if err != nil { + return + } + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalAccessListBulkResponse) + if err != nil { + return + } + response.Result = result + } + + return +} + +// CreateOfferingInstance : Create an offering resource instance +// Provision a new offering in a given account, and return its resource instance. +func (catalogManagement *CatalogManagementV1) CreateOfferingInstance(createOfferingInstanceOptions *CreateOfferingInstanceOptions) (result *OfferingInstance, response *core.DetailedResponse, err error) { + return catalogManagement.CreateOfferingInstanceWithContext(context.Background(), createOfferingInstanceOptions) +} + +// CreateOfferingInstanceWithContext is an alternate form of the CreateOfferingInstance method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) CreateOfferingInstanceWithContext(ctx context.Context, createOfferingInstanceOptions *CreateOfferingInstanceOptions) (result *OfferingInstance, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createOfferingInstanceOptions, "createOfferingInstanceOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createOfferingInstanceOptions, "createOfferingInstanceOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/instances/offerings`, nil) + if err != nil { + return + } + + for headerName, headerValue := range createOfferingInstanceOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "CreateOfferingInstance") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + if createOfferingInstanceOptions.XAuthRefreshToken != nil { + builder.AddHeader("X-Auth-Refresh-Token", fmt.Sprint(*createOfferingInstanceOptions.XAuthRefreshToken)) + } + + body := make(map[string]interface{}) + if createOfferingInstanceOptions.ID != nil { + body["id"] = createOfferingInstanceOptions.ID + } + if createOfferingInstanceOptions.Rev != nil { + body["_rev"] = createOfferingInstanceOptions.Rev + } + if createOfferingInstanceOptions.URL != nil { + body["url"] = createOfferingInstanceOptions.URL + } + if createOfferingInstanceOptions.CRN != nil { + body["crn"] = createOfferingInstanceOptions.CRN + } + if createOfferingInstanceOptions.Label != nil { + body["label"] = createOfferingInstanceOptions.Label + } + if createOfferingInstanceOptions.CatalogID != nil { + body["catalog_id"] = createOfferingInstanceOptions.CatalogID + } + if createOfferingInstanceOptions.OfferingID != nil { + body["offering_id"] = createOfferingInstanceOptions.OfferingID + } + if createOfferingInstanceOptions.KindFormat != nil { + body["kind_format"] = createOfferingInstanceOptions.KindFormat + } + if createOfferingInstanceOptions.Version != nil { + body["version"] = createOfferingInstanceOptions.Version + } + if createOfferingInstanceOptions.ClusterID != nil { + body["cluster_id"] = createOfferingInstanceOptions.ClusterID + } + if createOfferingInstanceOptions.ClusterRegion != nil { + body["cluster_region"] = createOfferingInstanceOptions.ClusterRegion + } + if createOfferingInstanceOptions.ClusterNamespaces != nil { + body["cluster_namespaces"] = createOfferingInstanceOptions.ClusterNamespaces + } + if createOfferingInstanceOptions.ClusterAllNamespaces != nil { + body["cluster_all_namespaces"] = createOfferingInstanceOptions.ClusterAllNamespaces + } + if createOfferingInstanceOptions.SchematicsWorkspaceID != nil { + body["schematics_workspace_id"] = createOfferingInstanceOptions.SchematicsWorkspaceID + } + if createOfferingInstanceOptions.ResourceGroupID != nil { + body["resource_group_id"] = createOfferingInstanceOptions.ResourceGroupID + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = catalogManagement.Service.Request(request, &rawResponse) + if err != nil { + return + } + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalOfferingInstance) + if err != nil { + return + } + response.Result = result + } + + return +} + +// GetOfferingInstance : Get Offering Instance +// Get the resource associated with an installed offering instance. +func (catalogManagement *CatalogManagementV1) GetOfferingInstance(getOfferingInstanceOptions *GetOfferingInstanceOptions) (result *OfferingInstance, response *core.DetailedResponse, err error) { + return catalogManagement.GetOfferingInstanceWithContext(context.Background(), getOfferingInstanceOptions) +} + +// GetOfferingInstanceWithContext is an alternate form of the GetOfferingInstance method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) GetOfferingInstanceWithContext(ctx context.Context, getOfferingInstanceOptions *GetOfferingInstanceOptions) (result *OfferingInstance, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getOfferingInstanceOptions, "getOfferingInstanceOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getOfferingInstanceOptions, "getOfferingInstanceOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "instance_identifier": *getOfferingInstanceOptions.InstanceIdentifier, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/instances/offerings/{instance_identifier}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getOfferingInstanceOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "GetOfferingInstance") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = catalogManagement.Service.Request(request, &rawResponse) + if err != nil { + return + } + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalOfferingInstance) + if err != nil { + return + } + response.Result = result + } + + return +} + +// PutOfferingInstance : Update Offering Instance +// Update an installed offering instance. +func (catalogManagement *CatalogManagementV1) PutOfferingInstance(putOfferingInstanceOptions *PutOfferingInstanceOptions) (result *OfferingInstance, response *core.DetailedResponse, err error) { + return catalogManagement.PutOfferingInstanceWithContext(context.Background(), putOfferingInstanceOptions) +} + +// PutOfferingInstanceWithContext is an alternate form of the PutOfferingInstance method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) PutOfferingInstanceWithContext(ctx context.Context, putOfferingInstanceOptions *PutOfferingInstanceOptions) (result *OfferingInstance, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(putOfferingInstanceOptions, "putOfferingInstanceOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(putOfferingInstanceOptions, "putOfferingInstanceOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "instance_identifier": *putOfferingInstanceOptions.InstanceIdentifier, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/instances/offerings/{instance_identifier}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range putOfferingInstanceOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "PutOfferingInstance") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + if putOfferingInstanceOptions.XAuthRefreshToken != nil { + builder.AddHeader("X-Auth-Refresh-Token", fmt.Sprint(*putOfferingInstanceOptions.XAuthRefreshToken)) + } + + body := make(map[string]interface{}) + if putOfferingInstanceOptions.ID != nil { + body["id"] = putOfferingInstanceOptions.ID + } + if putOfferingInstanceOptions.Rev != nil { + body["_rev"] = putOfferingInstanceOptions.Rev + } + if putOfferingInstanceOptions.URL != nil { + body["url"] = putOfferingInstanceOptions.URL + } + if putOfferingInstanceOptions.CRN != nil { + body["crn"] = putOfferingInstanceOptions.CRN + } + if putOfferingInstanceOptions.Label != nil { + body["label"] = putOfferingInstanceOptions.Label + } + if putOfferingInstanceOptions.CatalogID != nil { + body["catalog_id"] = putOfferingInstanceOptions.CatalogID + } + if putOfferingInstanceOptions.OfferingID != nil { + body["offering_id"] = putOfferingInstanceOptions.OfferingID + } + if putOfferingInstanceOptions.KindFormat != nil { + body["kind_format"] = putOfferingInstanceOptions.KindFormat + } + if putOfferingInstanceOptions.Version != nil { + body["version"] = putOfferingInstanceOptions.Version + } + if putOfferingInstanceOptions.ClusterID != nil { + body["cluster_id"] = putOfferingInstanceOptions.ClusterID + } + if putOfferingInstanceOptions.ClusterRegion != nil { + body["cluster_region"] = putOfferingInstanceOptions.ClusterRegion + } + if putOfferingInstanceOptions.ClusterNamespaces != nil { + body["cluster_namespaces"] = putOfferingInstanceOptions.ClusterNamespaces + } + if putOfferingInstanceOptions.ClusterAllNamespaces != nil { + body["cluster_all_namespaces"] = putOfferingInstanceOptions.ClusterAllNamespaces + } + if putOfferingInstanceOptions.SchematicsWorkspaceID != nil { + body["schematics_workspace_id"] = putOfferingInstanceOptions.SchematicsWorkspaceID + } + if putOfferingInstanceOptions.ResourceGroupID != nil { + body["resource_group_id"] = putOfferingInstanceOptions.ResourceGroupID + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = catalogManagement.Service.Request(request, &rawResponse) + if err != nil { + return + } + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalOfferingInstance) + if err != nil { + return + } + response.Result = result + } + + return +} + +// DeleteOfferingInstance : Delete a version instance +// Delete and instance deployed out of a product version. +func (catalogManagement *CatalogManagementV1) DeleteOfferingInstance(deleteOfferingInstanceOptions *DeleteOfferingInstanceOptions) (response *core.DetailedResponse, err error) { + return catalogManagement.DeleteOfferingInstanceWithContext(context.Background(), deleteOfferingInstanceOptions) +} + +// DeleteOfferingInstanceWithContext is an alternate form of the DeleteOfferingInstance method which supports a Context parameter +func (catalogManagement *CatalogManagementV1) DeleteOfferingInstanceWithContext(ctx context.Context, deleteOfferingInstanceOptions *DeleteOfferingInstanceOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteOfferingInstanceOptions, "deleteOfferingInstanceOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteOfferingInstanceOptions, "deleteOfferingInstanceOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "instance_identifier": *deleteOfferingInstanceOptions.InstanceIdentifier, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = catalogManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(catalogManagement.Service.Options.URL, `/instances/offerings/{instance_identifier}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteOfferingInstanceOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("catalog_management", "V1", "DeleteOfferingInstance") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + if deleteOfferingInstanceOptions.XAuthRefreshToken != nil { + builder.AddHeader("X-Auth-Refresh-Token", fmt.Sprint(*deleteOfferingInstanceOptions.XAuthRefreshToken)) + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = catalogManagement.Service.Request(request, nil) + + return +} + +// AccessListBulkResponse : Access List Add/Remove result. +type AccessListBulkResponse struct { + // in the case of error on an account add/remove - account: error. + Errors map[string]string `json:"errors,omitempty"` +} + +// UnmarshalAccessListBulkResponse unmarshals an instance of AccessListBulkResponse from the specified map of raw messages. +func UnmarshalAccessListBulkResponse(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(AccessListBulkResponse) + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// Account : Account information. +type Account struct { + // Account identification. + ID *string `json:"id,omitempty"` + + // Hide the public catalog in this account. + HideIBMCloudCatalog *bool `json:"hide_IBM_cloud_catalog,omitempty"` + + // Filters for account and catalog filters. + AccountFilters *Filters `json:"account_filters,omitempty"` +} + +// UnmarshalAccount unmarshals an instance of Account from the specified map of raw messages. +func UnmarshalAccount(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Account) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "hide_IBM_cloud_catalog", &obj.HideIBMCloudCatalog) + if err != nil { + return + } + err = core.UnmarshalModel(m, "account_filters", &obj.AccountFilters, UnmarshalFilters) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// AccountPublishObjectOptions : The AccountPublishObject options. +type AccountPublishObjectOptions struct { + // Catalog identifier. + CatalogIdentifier *string `validate:"required,ne="` + + // Object identifier. + ObjectIdentifier *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewAccountPublishObjectOptions : Instantiate AccountPublishObjectOptions +func (*CatalogManagementV1) NewAccountPublishObjectOptions(catalogIdentifier string, objectIdentifier string) *AccountPublishObjectOptions { + return &AccountPublishObjectOptions{ + CatalogIdentifier: core.StringPtr(catalogIdentifier), + ObjectIdentifier: core.StringPtr(objectIdentifier), + } +} + +// SetCatalogIdentifier : Allow user to set CatalogIdentifier +func (options *AccountPublishObjectOptions) SetCatalogIdentifier(catalogIdentifier string) *AccountPublishObjectOptions { + options.CatalogIdentifier = core.StringPtr(catalogIdentifier) + return options +} + +// SetObjectIdentifier : Allow user to set ObjectIdentifier +func (options *AccountPublishObjectOptions) SetObjectIdentifier(objectIdentifier string) *AccountPublishObjectOptions { + options.ObjectIdentifier = core.StringPtr(objectIdentifier) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *AccountPublishObjectOptions) SetHeaders(param map[string]string) *AccountPublishObjectOptions { + options.Headers = param + return options +} + +// AccountPublishVersionOptions : The AccountPublishVersion options. +type AccountPublishVersionOptions struct { + // A dotted value of `catalogID`.`versionID`. + VersionLocID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewAccountPublishVersionOptions : Instantiate AccountPublishVersionOptions +func (*CatalogManagementV1) NewAccountPublishVersionOptions(versionLocID string) *AccountPublishVersionOptions { + return &AccountPublishVersionOptions{ + VersionLocID: core.StringPtr(versionLocID), + } +} + +// SetVersionLocID : Allow user to set VersionLocID +func (options *AccountPublishVersionOptions) SetVersionLocID(versionLocID string) *AccountPublishVersionOptions { + options.VersionLocID = core.StringPtr(versionLocID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *AccountPublishVersionOptions) SetHeaders(param map[string]string) *AccountPublishVersionOptions { + options.Headers = param + return options +} + +// AccumulatedFilters : The accumulated filters for an account. This will return the account filters plus a filter for each catalog the user +// has access to. +type AccumulatedFilters struct { + // Filters for accounts (at this time this will always be just one item array). + AccountFilters []Filters `json:"account_filters,omitempty"` + + // The filters for all of the accessible catalogs. + CatalogFilters []AccumulatedFiltersCatalogFiltersItem `json:"catalog_filters,omitempty"` +} + +// UnmarshalAccumulatedFilters unmarshals an instance of AccumulatedFilters from the specified map of raw messages. +func UnmarshalAccumulatedFilters(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(AccumulatedFilters) + err = core.UnmarshalModel(m, "account_filters", &obj.AccountFilters, UnmarshalFilters) + if err != nil { + return + } + err = core.UnmarshalModel(m, "catalog_filters", &obj.CatalogFilters, UnmarshalAccumulatedFiltersCatalogFiltersItem) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// AccumulatedFiltersCatalogFiltersItem : AccumulatedFiltersCatalogFiltersItem struct +type AccumulatedFiltersCatalogFiltersItem struct { + // Filters for catalog. + Catalog *AccumulatedFiltersCatalogFiltersItemCatalog `json:"catalog,omitempty"` + + // Filters for account and catalog filters. + Filters *Filters `json:"filters,omitempty"` +} + +// UnmarshalAccumulatedFiltersCatalogFiltersItem unmarshals an instance of AccumulatedFiltersCatalogFiltersItem from the specified map of raw messages. +func UnmarshalAccumulatedFiltersCatalogFiltersItem(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(AccumulatedFiltersCatalogFiltersItem) + err = core.UnmarshalModel(m, "catalog", &obj.Catalog, UnmarshalAccumulatedFiltersCatalogFiltersItemCatalog) + if err != nil { + return + } + err = core.UnmarshalModel(m, "filters", &obj.Filters, UnmarshalFilters) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// AccumulatedFiltersCatalogFiltersItemCatalog : Filters for catalog. +type AccumulatedFiltersCatalogFiltersItemCatalog struct { + // The ID of the catalog. + ID *string `json:"id,omitempty"` + + // The name of the catalog. + Name *string `json:"name,omitempty"` +} + +// UnmarshalAccumulatedFiltersCatalogFiltersItemCatalog unmarshals an instance of AccumulatedFiltersCatalogFiltersItemCatalog from the specified map of raw messages. +func UnmarshalAccumulatedFiltersCatalogFiltersItemCatalog(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(AccumulatedFiltersCatalogFiltersItemCatalog) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// AddObjectAccessListOptions : The AddObjectAccessList options. +type AddObjectAccessListOptions struct { + // Catalog identifier. + CatalogIdentifier *string `validate:"required,ne="` + + // Object identifier. + ObjectIdentifier *string `validate:"required,ne="` + + // A list of accounts to add. + Accounts []string `validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewAddObjectAccessListOptions : Instantiate AddObjectAccessListOptions +func (*CatalogManagementV1) NewAddObjectAccessListOptions(catalogIdentifier string, objectIdentifier string, accounts []string) *AddObjectAccessListOptions { + return &AddObjectAccessListOptions{ + CatalogIdentifier: core.StringPtr(catalogIdentifier), + ObjectIdentifier: core.StringPtr(objectIdentifier), + Accounts: accounts, + } +} + +// SetCatalogIdentifier : Allow user to set CatalogIdentifier +func (options *AddObjectAccessListOptions) SetCatalogIdentifier(catalogIdentifier string) *AddObjectAccessListOptions { + options.CatalogIdentifier = core.StringPtr(catalogIdentifier) + return options +} + +// SetObjectIdentifier : Allow user to set ObjectIdentifier +func (options *AddObjectAccessListOptions) SetObjectIdentifier(objectIdentifier string) *AddObjectAccessListOptions { + options.ObjectIdentifier = core.StringPtr(objectIdentifier) + return options +} + +// SetAccounts : Allow user to set Accounts +func (options *AddObjectAccessListOptions) SetAccounts(accounts []string) *AddObjectAccessListOptions { + options.Accounts = accounts + return options +} + +// SetHeaders : Allow user to set Headers +func (options *AddObjectAccessListOptions) SetHeaders(param map[string]string) *AddObjectAccessListOptions { + options.Headers = param + return options +} + +// ApprovalResult : Result of approval. +type ApprovalResult struct { + // Allowed to request to publish. + AllowRequest *bool `json:"allow_request,omitempty"` + + // Visible to IBM. + IBM *bool `json:"ibm,omitempty"` + + // Visible to everyone. + Public *bool `json:"public,omitempty"` + + // Denotes whether approval has changed. + Changed *bool `json:"changed,omitempty"` +} + +// UnmarshalApprovalResult unmarshals an instance of ApprovalResult from the specified map of raw messages. +func UnmarshalApprovalResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ApprovalResult) + err = core.UnmarshalPrimitive(m, "allow_request", &obj.AllowRequest) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "ibm", &obj.IBM) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "public", &obj.Public) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "changed", &obj.Changed) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// AuditLog : A collection of audit records. +type AuditLog struct { + // A list of audit records. + List []AuditRecord `json:"list,omitempty"` +} + +// UnmarshalAuditLog unmarshals an instance of AuditLog from the specified map of raw messages. +func UnmarshalAuditLog(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(AuditLog) + err = core.UnmarshalModel(m, "list", &obj.List, UnmarshalAuditRecord) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// AuditRecord : An audit record which describes a change made to a catalog or associated resource. +type AuditRecord struct { + // The identifier of the audit record. + ID *string `json:"id,omitempty"` + + // The time at which the change was made. + Created *strfmt.DateTime `json:"created,omitempty"` + + // The type of change described by the audit record. + ChangeType *string `json:"change_type,omitempty"` + + // The resource type associated with the change. + TargetType *string `json:"target_type,omitempty"` + + // The identifier of the resource that was changed. + TargetID *string `json:"target_id,omitempty"` + + // The email address of the user that made the change. + WhoDelegateEmail *string `json:"who_delegate_email,omitempty"` + + // A message which describes the change. + Message *string `json:"message,omitempty"` +} + +// UnmarshalAuditRecord unmarshals an instance of AuditRecord from the specified map of raw messages. +func UnmarshalAuditRecord(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(AuditRecord) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created", &obj.Created) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "change_type", &obj.ChangeType) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "target_type", &obj.TargetType) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "target_id", &obj.TargetID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "who_delegate_email", &obj.WhoDelegateEmail) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "message", &obj.Message) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// Catalog : Catalog information. +type Catalog struct { + // Unique ID. + ID *string `json:"id,omitempty"` + + // Cloudant revision. + Rev *string `json:"_rev,omitempty"` + + // Display Name in the requested language. + Label *string `json:"label,omitempty"` + + // Description in the requested language. + ShortDescription *string `json:"short_description,omitempty"` + + // URL for an icon associated with this catalog. + CatalogIconURL *string `json:"catalog_icon_url,omitempty"` + + // List of tags associated with this catalog. + Tags []string `json:"tags,omitempty"` + + // The url for this specific catalog. + URL *string `json:"url,omitempty"` + + // CRN associated with the catalog. + CRN *string `json:"crn,omitempty"` + + // URL path to offerings. + OfferingsURL *string `json:"offerings_url,omitempty"` + + // List of features associated with this catalog. + Features []Feature `json:"features,omitempty"` + + // Denotes whether a catalog is disabled. + Disabled *bool `json:"disabled,omitempty"` + + // The date-time this catalog was created. + Created *strfmt.DateTime `json:"created,omitempty"` + + // The date-time this catalog was last updated. + Updated *strfmt.DateTime `json:"updated,omitempty"` + + // Resource group id the catalog is owned by. + ResourceGroupID *string `json:"resource_group_id,omitempty"` + + // Account that owns catalog. + OwningAccount *string `json:"owning_account,omitempty"` + + // Filters for account and catalog filters. + CatalogFilters *Filters `json:"catalog_filters,omitempty"` + + // Feature information. + SyndicationSettings *SyndicationResource `json:"syndication_settings,omitempty"` + + // Kind of catalog. Supported kinds are offering and vpe. + Kind *string `json:"kind,omitempty"` +} + +// UnmarshalCatalog unmarshals an instance of Catalog from the specified map of raw messages. +func UnmarshalCatalog(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Catalog) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "_rev", &obj.Rev) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "label", &obj.Label) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "short_description", &obj.ShortDescription) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "catalog_icon_url", &obj.CatalogIconURL) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "tags", &obj.Tags) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "url", &obj.URL) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "offerings_url", &obj.OfferingsURL) + if err != nil { + return + } + err = core.UnmarshalModel(m, "features", &obj.Features, UnmarshalFeature) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "disabled", &obj.Disabled) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created", &obj.Created) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "updated", &obj.Updated) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "resource_group_id", &obj.ResourceGroupID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "owning_account", &obj.OwningAccount) + if err != nil { + return + } + err = core.UnmarshalModel(m, "catalog_filters", &obj.CatalogFilters, UnmarshalFilters) + if err != nil { + return + } + err = core.UnmarshalModel(m, "syndication_settings", &obj.SyndicationSettings, UnmarshalSyndicationResource) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "kind", &obj.Kind) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// CatalogObject : object information. +type CatalogObject struct { + // unique id. + ID *string `json:"id,omitempty"` + + // The programmatic name of this offering. + Name *string `json:"name,omitempty"` + + // Cloudant revision. + Rev *string `json:"_rev,omitempty"` + + // The crn for this specific object. + CRN *string `json:"crn,omitempty"` + + // The url for this specific object. + URL *string `json:"url,omitempty"` + + // The parent for this specific object. + ParentID *string `json:"parent_id,omitempty"` + + // Translated display name in the requested language. + LabelI18n *string `json:"label_i18n,omitempty"` + + // Display name in the requested language. + Label *string `json:"label,omitempty"` + + // List of tags associated with this catalog. + Tags []string `json:"tags,omitempty"` + + // The date and time this catalog was created. + Created *strfmt.DateTime `json:"created,omitempty"` + + // The date and time this catalog was last updated. + Updated *strfmt.DateTime `json:"updated,omitempty"` + + // Short description in the requested language. + ShortDescription *string `json:"short_description,omitempty"` + + // Short description translation. + ShortDescriptionI18n *string `json:"short_description_i18n,omitempty"` + + // Kind of object. + Kind *string `json:"kind,omitempty"` + + // Publish information. + Publish *PublishObject `json:"publish,omitempty"` + + // Offering state. + State *State `json:"state,omitempty"` + + // The id of the catalog containing this offering. + CatalogID *string `json:"catalog_id,omitempty"` + + // The name of the catalog. + CatalogName *string `json:"catalog_name,omitempty"` + + // Map of data values for this object. + Data map[string]interface{} `json:"data,omitempty"` +} + +// UnmarshalCatalogObject unmarshals an instance of CatalogObject from the specified map of raw messages. +func UnmarshalCatalogObject(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(CatalogObject) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "_rev", &obj.Rev) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "url", &obj.URL) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "parent_id", &obj.ParentID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "label_i18n", &obj.LabelI18n) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "label", &obj.Label) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "tags", &obj.Tags) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created", &obj.Created) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "updated", &obj.Updated) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "short_description", &obj.ShortDescription) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "short_description_i18n", &obj.ShortDescriptionI18n) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "kind", &obj.Kind) + if err != nil { + return + } + err = core.UnmarshalModel(m, "publish", &obj.Publish, UnmarshalPublishObject) + if err != nil { + return + } + err = core.UnmarshalModel(m, "state", &obj.State, UnmarshalState) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "catalog_id", &obj.CatalogID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "catalog_name", &obj.CatalogName) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "data", &obj.Data) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// CatalogSearchResult : Paginated catalog search result. +type CatalogSearchResult struct { + // The overall total number of resources in the search result set. + TotalCount *int64 `json:"total_count,omitempty"` + + // Resulting objects. + Resources []Catalog `json:"resources,omitempty"` +} + +// UnmarshalCatalogSearchResult unmarshals an instance of CatalogSearchResult from the specified map of raw messages. +func UnmarshalCatalogSearchResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(CatalogSearchResult) + err = core.UnmarshalPrimitive(m, "total_count", &obj.TotalCount) + if err != nil { + return + } + err = core.UnmarshalModel(m, "resources", &obj.Resources, UnmarshalCatalog) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// CategoryFilter : Filter on a category. The filter will match against the values of the given category with include or exclude. +type CategoryFilter struct { + // -> true - This is an include filter, false - this is an exclude filter. + Include *bool `json:"include,omitempty"` + + // Offering filter terms. + Filter *FilterTerms `json:"filter,omitempty"` +} + +// UnmarshalCategoryFilter unmarshals an instance of CategoryFilter from the specified map of raw messages. +func UnmarshalCategoryFilter(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(CategoryFilter) + err = core.UnmarshalPrimitive(m, "include", &obj.Include) + if err != nil { + return + } + err = core.UnmarshalModel(m, "filter", &obj.Filter, UnmarshalFilterTerms) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ClusterInfo : Cluster information. +type ClusterInfo struct { + // Resource Group ID. + ResourceGroupID *string `json:"resource_group_id,omitempty"` + + // Resource Group name. + ResourceGroupName *string `json:"resource_group_name,omitempty"` + + // Cluster ID. + ID *string `json:"id,omitempty"` + + // Cluster name. + Name *string `json:"name,omitempty"` + + // Cluster region. + Region *string `json:"region,omitempty"` +} + +// UnmarshalClusterInfo unmarshals an instance of ClusterInfo from the specified map of raw messages. +func UnmarshalClusterInfo(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ClusterInfo) + err = core.UnmarshalPrimitive(m, "resource_group_id", &obj.ResourceGroupID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "resource_group_name", &obj.ResourceGroupName) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "region", &obj.Region) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// CommitVersionOptions : The CommitVersion options. +type CommitVersionOptions struct { + // A dotted value of `catalogID`.`versionID`. + VersionLocID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewCommitVersionOptions : Instantiate CommitVersionOptions +func (*CatalogManagementV1) NewCommitVersionOptions(versionLocID string) *CommitVersionOptions { + return &CommitVersionOptions{ + VersionLocID: core.StringPtr(versionLocID), + } +} + +// SetVersionLocID : Allow user to set VersionLocID +func (options *CommitVersionOptions) SetVersionLocID(versionLocID string) *CommitVersionOptions { + options.VersionLocID = core.StringPtr(versionLocID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CommitVersionOptions) SetHeaders(param map[string]string) *CommitVersionOptions { + options.Headers = param + return options +} + +// Configuration : Configuration description. +type Configuration struct { + // Configuration key. + Key *string `json:"key,omitempty"` + + // Value type (string, boolean, int). + Type *string `json:"type,omitempty"` + + // The default value. To use a secret when the type is password, specify a JSON encoded value of + // $ref:#/components/schemas/SecretInstance, prefixed with `cmsm_v1:`. + DefaultValue interface{} `json:"default_value,omitempty"` + + // Constraint associated with value, e.g., for string type - regx:[a-z]. + ValueConstraint *string `json:"value_constraint,omitempty"` + + // Key description. + Description *string `json:"description,omitempty"` + + // Is key required to install. + Required *bool `json:"required,omitempty"` + + // List of options of type. + Options []interface{} `json:"options,omitempty"` + + // Hide values. + Hidden *bool `json:"hidden,omitempty"` +} + +// UnmarshalConfiguration unmarshals an instance of Configuration from the specified map of raw messages. +func UnmarshalConfiguration(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Configuration) + err = core.UnmarshalPrimitive(m, "key", &obj.Key) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "default_value", &obj.DefaultValue) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value_constraint", &obj.ValueConstraint) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "description", &obj.Description) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "required", &obj.Required) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "options", &obj.Options) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "hidden", &obj.Hidden) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// CopyVersionOptions : The CopyVersion options. +type CopyVersionOptions struct { + // A dotted value of `catalogID`.`versionID`. + VersionLocID *string `validate:"required,ne="` + + // Tags array. + Tags []string + + // Target kinds. Current valid values are 'iks', 'roks', 'vcenter', and 'terraform'. + TargetKinds []string + + // byte array representing the content to be imported. Only supported for OVA images at this time. + Content *[]byte + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewCopyVersionOptions : Instantiate CopyVersionOptions +func (*CatalogManagementV1) NewCopyVersionOptions(versionLocID string) *CopyVersionOptions { + return &CopyVersionOptions{ + VersionLocID: core.StringPtr(versionLocID), + } +} + +// SetVersionLocID : Allow user to set VersionLocID +func (options *CopyVersionOptions) SetVersionLocID(versionLocID string) *CopyVersionOptions { + options.VersionLocID = core.StringPtr(versionLocID) + return options +} + +// SetTags : Allow user to set Tags +func (options *CopyVersionOptions) SetTags(tags []string) *CopyVersionOptions { + options.Tags = tags + return options +} + +// SetTargetKinds : Allow user to set TargetKinds +func (options *CopyVersionOptions) SetTargetKinds(targetKinds []string) *CopyVersionOptions { + options.TargetKinds = targetKinds + return options +} + +// SetContent : Allow user to set Content +func (options *CopyVersionOptions) SetContent(content []byte) *CopyVersionOptions { + options.Content = &content + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CopyVersionOptions) SetHeaders(param map[string]string) *CopyVersionOptions { + options.Headers = param + return options +} + +// CreateCatalogOptions : The CreateCatalog options. +type CreateCatalogOptions struct { + // Unique ID. + ID *string + + // Cloudant revision. + Rev *string + + // Display Name in the requested language. + Label *string + + // Description in the requested language. + ShortDescription *string + + // URL for an icon associated with this catalog. + CatalogIconURL *string + + // List of tags associated with this catalog. + Tags []string + + // List of features associated with this catalog. + Features []Feature + + // Denotes whether a catalog is disabled. + Disabled *bool + + // Resource group id the catalog is owned by. + ResourceGroupID *string + + // Account that owns catalog. + OwningAccount *string + + // Filters for account and catalog filters. + CatalogFilters *Filters + + // Feature information. + SyndicationSettings *SyndicationResource + + // Kind of catalog. Supported kinds are offering and vpe. + Kind *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewCreateCatalogOptions : Instantiate CreateCatalogOptions +func (*CatalogManagementV1) NewCreateCatalogOptions() *CreateCatalogOptions { + return &CreateCatalogOptions{} +} + +// SetID : Allow user to set ID +func (options *CreateCatalogOptions) SetID(id string) *CreateCatalogOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetRev : Allow user to set Rev +func (options *CreateCatalogOptions) SetRev(rev string) *CreateCatalogOptions { + options.Rev = core.StringPtr(rev) + return options +} + +// SetLabel : Allow user to set Label +func (options *CreateCatalogOptions) SetLabel(label string) *CreateCatalogOptions { + options.Label = core.StringPtr(label) + return options +} + +// SetShortDescription : Allow user to set ShortDescription +func (options *CreateCatalogOptions) SetShortDescription(shortDescription string) *CreateCatalogOptions { + options.ShortDescription = core.StringPtr(shortDescription) + return options +} + +// SetCatalogIconURL : Allow user to set CatalogIconURL +func (options *CreateCatalogOptions) SetCatalogIconURL(catalogIconURL string) *CreateCatalogOptions { + options.CatalogIconURL = core.StringPtr(catalogIconURL) + return options +} + +// SetTags : Allow user to set Tags +func (options *CreateCatalogOptions) SetTags(tags []string) *CreateCatalogOptions { + options.Tags = tags + return options +} + +// SetFeatures : Allow user to set Features +func (options *CreateCatalogOptions) SetFeatures(features []Feature) *CreateCatalogOptions { + options.Features = features + return options +} + +// SetDisabled : Allow user to set Disabled +func (options *CreateCatalogOptions) SetDisabled(disabled bool) *CreateCatalogOptions { + options.Disabled = core.BoolPtr(disabled) + return options +} + +// SetResourceGroupID : Allow user to set ResourceGroupID +func (options *CreateCatalogOptions) SetResourceGroupID(resourceGroupID string) *CreateCatalogOptions { + options.ResourceGroupID = core.StringPtr(resourceGroupID) + return options +} + +// SetOwningAccount : Allow user to set OwningAccount +func (options *CreateCatalogOptions) SetOwningAccount(owningAccount string) *CreateCatalogOptions { + options.OwningAccount = core.StringPtr(owningAccount) + return options +} + +// SetCatalogFilters : Allow user to set CatalogFilters +func (options *CreateCatalogOptions) SetCatalogFilters(catalogFilters *Filters) *CreateCatalogOptions { + options.CatalogFilters = catalogFilters + return options +} + +// SetSyndicationSettings : Allow user to set SyndicationSettings +func (options *CreateCatalogOptions) SetSyndicationSettings(syndicationSettings *SyndicationResource) *CreateCatalogOptions { + options.SyndicationSettings = syndicationSettings + return options +} + +// SetKind : Allow user to set Kind +func (options *CreateCatalogOptions) SetKind(kind string) *CreateCatalogOptions { + options.Kind = core.StringPtr(kind) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateCatalogOptions) SetHeaders(param map[string]string) *CreateCatalogOptions { + options.Headers = param + return options +} + +// CreateObjectAccessOptions : The CreateObjectAccess options. +type CreateObjectAccessOptions struct { + // Catalog identifier. + CatalogIdentifier *string `validate:"required,ne="` + + // Object identifier. + ObjectIdentifier *string `validate:"required,ne="` + + // Account identifier. + AccountIdentifier *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewCreateObjectAccessOptions : Instantiate CreateObjectAccessOptions +func (*CatalogManagementV1) NewCreateObjectAccessOptions(catalogIdentifier string, objectIdentifier string, accountIdentifier string) *CreateObjectAccessOptions { + return &CreateObjectAccessOptions{ + CatalogIdentifier: core.StringPtr(catalogIdentifier), + ObjectIdentifier: core.StringPtr(objectIdentifier), + AccountIdentifier: core.StringPtr(accountIdentifier), + } +} + +// SetCatalogIdentifier : Allow user to set CatalogIdentifier +func (options *CreateObjectAccessOptions) SetCatalogIdentifier(catalogIdentifier string) *CreateObjectAccessOptions { + options.CatalogIdentifier = core.StringPtr(catalogIdentifier) + return options +} + +// SetObjectIdentifier : Allow user to set ObjectIdentifier +func (options *CreateObjectAccessOptions) SetObjectIdentifier(objectIdentifier string) *CreateObjectAccessOptions { + options.ObjectIdentifier = core.StringPtr(objectIdentifier) + return options +} + +// SetAccountIdentifier : Allow user to set AccountIdentifier +func (options *CreateObjectAccessOptions) SetAccountIdentifier(accountIdentifier string) *CreateObjectAccessOptions { + options.AccountIdentifier = core.StringPtr(accountIdentifier) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateObjectAccessOptions) SetHeaders(param map[string]string) *CreateObjectAccessOptions { + options.Headers = param + return options +} + +// CreateObjectOptions : The CreateObject options. +type CreateObjectOptions struct { + // Catalog identifier. + CatalogIdentifier *string `validate:"required,ne="` + + // unique id. + ID *string + + // The programmatic name of this offering. + Name *string + + // Cloudant revision. + Rev *string + + // The crn for this specific object. + CRN *string + + // The url for this specific object. + URL *string + + // The parent for this specific object. + ParentID *string + + // Translated display name in the requested language. + LabelI18n *string + + // Display name in the requested language. + Label *string + + // List of tags associated with this catalog. + Tags []string + + // The date and time this catalog was created. + Created *strfmt.DateTime + + // The date and time this catalog was last updated. + Updated *strfmt.DateTime + + // Short description in the requested language. + ShortDescription *string + + // Short description translation. + ShortDescriptionI18n *string + + // Kind of object. + Kind *string + + // Publish information. + Publish *PublishObject + + // Offering state. + State *State + + // The id of the catalog containing this offering. + CatalogID *string + + // The name of the catalog. + CatalogName *string + + // Map of data values for this object. + Data map[string]interface{} + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewCreateObjectOptions : Instantiate CreateObjectOptions +func (*CatalogManagementV1) NewCreateObjectOptions(catalogIdentifier string) *CreateObjectOptions { + return &CreateObjectOptions{ + CatalogIdentifier: core.StringPtr(catalogIdentifier), + } +} + +// SetCatalogIdentifier : Allow user to set CatalogIdentifier +func (options *CreateObjectOptions) SetCatalogIdentifier(catalogIdentifier string) *CreateObjectOptions { + options.CatalogIdentifier = core.StringPtr(catalogIdentifier) + return options +} + +// SetID : Allow user to set ID +func (options *CreateObjectOptions) SetID(id string) *CreateObjectOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetName : Allow user to set Name +func (options *CreateObjectOptions) SetName(name string) *CreateObjectOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetRev : Allow user to set Rev +func (options *CreateObjectOptions) SetRev(rev string) *CreateObjectOptions { + options.Rev = core.StringPtr(rev) + return options +} + +// SetCRN : Allow user to set CRN +func (options *CreateObjectOptions) SetCRN(crn string) *CreateObjectOptions { + options.CRN = core.StringPtr(crn) + return options +} + +// SetURL : Allow user to set URL +func (options *CreateObjectOptions) SetURL(url string) *CreateObjectOptions { + options.URL = core.StringPtr(url) + return options +} + +// SetParentID : Allow user to set ParentID +func (options *CreateObjectOptions) SetParentID(parentID string) *CreateObjectOptions { + options.ParentID = core.StringPtr(parentID) + return options +} + +// SetLabelI18n : Allow user to set LabelI18n +func (options *CreateObjectOptions) SetLabelI18n(labelI18n string) *CreateObjectOptions { + options.LabelI18n = core.StringPtr(labelI18n) + return options +} + +// SetLabel : Allow user to set Label +func (options *CreateObjectOptions) SetLabel(label string) *CreateObjectOptions { + options.Label = core.StringPtr(label) + return options +} + +// SetTags : Allow user to set Tags +func (options *CreateObjectOptions) SetTags(tags []string) *CreateObjectOptions { + options.Tags = tags + return options +} + +// SetCreated : Allow user to set Created +func (options *CreateObjectOptions) SetCreated(created *strfmt.DateTime) *CreateObjectOptions { + options.Created = created + return options +} + +// SetUpdated : Allow user to set Updated +func (options *CreateObjectOptions) SetUpdated(updated *strfmt.DateTime) *CreateObjectOptions { + options.Updated = updated + return options +} + +// SetShortDescription : Allow user to set ShortDescription +func (options *CreateObjectOptions) SetShortDescription(shortDescription string) *CreateObjectOptions { + options.ShortDescription = core.StringPtr(shortDescription) + return options +} + +// SetShortDescriptionI18n : Allow user to set ShortDescriptionI18n +func (options *CreateObjectOptions) SetShortDescriptionI18n(shortDescriptionI18n string) *CreateObjectOptions { + options.ShortDescriptionI18n = core.StringPtr(shortDescriptionI18n) + return options +} + +// SetKind : Allow user to set Kind +func (options *CreateObjectOptions) SetKind(kind string) *CreateObjectOptions { + options.Kind = core.StringPtr(kind) + return options +} + +// SetPublish : Allow user to set Publish +func (options *CreateObjectOptions) SetPublish(publish *PublishObject) *CreateObjectOptions { + options.Publish = publish + return options +} + +// SetState : Allow user to set State +func (options *CreateObjectOptions) SetState(state *State) *CreateObjectOptions { + options.State = state + return options +} + +// SetCatalogID : Allow user to set CatalogID +func (options *CreateObjectOptions) SetCatalogID(catalogID string) *CreateObjectOptions { + options.CatalogID = core.StringPtr(catalogID) + return options +} + +// SetCatalogName : Allow user to set CatalogName +func (options *CreateObjectOptions) SetCatalogName(catalogName string) *CreateObjectOptions { + options.CatalogName = core.StringPtr(catalogName) + return options +} + +// SetData : Allow user to set Data +func (options *CreateObjectOptions) SetData(data map[string]interface{}) *CreateObjectOptions { + options.Data = data + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateObjectOptions) SetHeaders(param map[string]string) *CreateObjectOptions { + options.Headers = param + return options +} + +// CreateOfferingInstanceOptions : The CreateOfferingInstance options. +type CreateOfferingInstanceOptions struct { + // IAM Refresh token. + XAuthRefreshToken *string `validate:"required"` + + // provisioned instance ID (part of the CRN). + ID *string + + // Cloudant revision. + Rev *string + + // url reference to this object. + URL *string + + // platform CRN for this instance. + CRN *string + + // the label for this instance. + Label *string + + // Catalog ID this instance was created from. + CatalogID *string + + // Offering ID this instance was created from. + OfferingID *string + + // the format this instance has (helm, operator, ova...). + KindFormat *string + + // The version this instance was installed from (not version id). + Version *string + + // Cluster ID. + ClusterID *string + + // Cluster region (e.g., us-south). + ClusterRegion *string + + // List of target namespaces to install into. + ClusterNamespaces []string + + // designate to install into all namespaces. + ClusterAllNamespaces *bool + + // Id of the schematics workspace, for offering instances provisioned through schematics. + SchematicsWorkspaceID *string + + // Id of the resource group to provision the offering instance into. + ResourceGroupID *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewCreateOfferingInstanceOptions : Instantiate CreateOfferingInstanceOptions +func (*CatalogManagementV1) NewCreateOfferingInstanceOptions(xAuthRefreshToken string) *CreateOfferingInstanceOptions { + return &CreateOfferingInstanceOptions{ + XAuthRefreshToken: core.StringPtr(xAuthRefreshToken), + } +} + +// SetXAuthRefreshToken : Allow user to set XAuthRefreshToken +func (options *CreateOfferingInstanceOptions) SetXAuthRefreshToken(xAuthRefreshToken string) *CreateOfferingInstanceOptions { + options.XAuthRefreshToken = core.StringPtr(xAuthRefreshToken) + return options +} + +// SetID : Allow user to set ID +func (options *CreateOfferingInstanceOptions) SetID(id string) *CreateOfferingInstanceOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetRev : Allow user to set Rev +func (options *CreateOfferingInstanceOptions) SetRev(rev string) *CreateOfferingInstanceOptions { + options.Rev = core.StringPtr(rev) + return options +} + +// SetURL : Allow user to set URL +func (options *CreateOfferingInstanceOptions) SetURL(url string) *CreateOfferingInstanceOptions { + options.URL = core.StringPtr(url) + return options +} + +// SetCRN : Allow user to set CRN +func (options *CreateOfferingInstanceOptions) SetCRN(crn string) *CreateOfferingInstanceOptions { + options.CRN = core.StringPtr(crn) + return options +} + +// SetLabel : Allow user to set Label +func (options *CreateOfferingInstanceOptions) SetLabel(label string) *CreateOfferingInstanceOptions { + options.Label = core.StringPtr(label) + return options +} + +// SetCatalogID : Allow user to set CatalogID +func (options *CreateOfferingInstanceOptions) SetCatalogID(catalogID string) *CreateOfferingInstanceOptions { + options.CatalogID = core.StringPtr(catalogID) + return options +} + +// SetOfferingID : Allow user to set OfferingID +func (options *CreateOfferingInstanceOptions) SetOfferingID(offeringID string) *CreateOfferingInstanceOptions { + options.OfferingID = core.StringPtr(offeringID) + return options +} + +// SetKindFormat : Allow user to set KindFormat +func (options *CreateOfferingInstanceOptions) SetKindFormat(kindFormat string) *CreateOfferingInstanceOptions { + options.KindFormat = core.StringPtr(kindFormat) + return options +} + +// SetVersion : Allow user to set Version +func (options *CreateOfferingInstanceOptions) SetVersion(version string) *CreateOfferingInstanceOptions { + options.Version = core.StringPtr(version) + return options +} + +// SetClusterID : Allow user to set ClusterID +func (options *CreateOfferingInstanceOptions) SetClusterID(clusterID string) *CreateOfferingInstanceOptions { + options.ClusterID = core.StringPtr(clusterID) + return options +} + +// SetClusterRegion : Allow user to set ClusterRegion +func (options *CreateOfferingInstanceOptions) SetClusterRegion(clusterRegion string) *CreateOfferingInstanceOptions { + options.ClusterRegion = core.StringPtr(clusterRegion) + return options +} + +// SetClusterNamespaces : Allow user to set ClusterNamespaces +func (options *CreateOfferingInstanceOptions) SetClusterNamespaces(clusterNamespaces []string) *CreateOfferingInstanceOptions { + options.ClusterNamespaces = clusterNamespaces + return options +} + +// SetClusterAllNamespaces : Allow user to set ClusterAllNamespaces +func (options *CreateOfferingInstanceOptions) SetClusterAllNamespaces(clusterAllNamespaces bool) *CreateOfferingInstanceOptions { + options.ClusterAllNamespaces = core.BoolPtr(clusterAllNamespaces) + return options +} + +// SetSchematicsWorkspaceID : Allow user to set SchematicsWorkspaceID +func (options *CreateOfferingInstanceOptions) SetSchematicsWorkspaceID(schematicsWorkspaceID string) *CreateOfferingInstanceOptions { + options.SchematicsWorkspaceID = core.StringPtr(schematicsWorkspaceID) + return options +} + +// SetResourceGroupID : Allow user to set ResourceGroupID +func (options *CreateOfferingInstanceOptions) SetResourceGroupID(resourceGroupID string) *CreateOfferingInstanceOptions { + options.ResourceGroupID = core.StringPtr(resourceGroupID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateOfferingInstanceOptions) SetHeaders(param map[string]string) *CreateOfferingInstanceOptions { + options.Headers = param + return options +} + +// CreateOfferingOptions : The CreateOffering options. +type CreateOfferingOptions struct { + // Catalog identifier. + CatalogIdentifier *string `validate:"required,ne="` + + // unique id. + ID *string + + // Cloudant revision. + Rev *string + + // The url for this specific offering. + URL *string + + // The crn for this specific offering. + CRN *string + + // Display Name in the requested language. + Label *string + + // The programmatic name of this offering. + Name *string + + // URL for an icon associated with this offering. + OfferingIconURL *string + + // URL for an additional docs with this offering. + OfferingDocsURL *string + + // URL to be displayed in the Consumption UI for getting support on this offering. + OfferingSupportURL *string + + // List of tags associated with this catalog. + Tags []string + + // List of keywords associated with offering, typically used to search for it. + Keywords []string + + // Repository info for offerings. + Rating *Rating + + // The date and time this catalog was created. + Created *strfmt.DateTime + + // The date and time this catalog was last updated. + Updated *strfmt.DateTime + + // Short description in the requested language. + ShortDescription *string + + // Long description in the requested language. + LongDescription *string + + // list of features associated with this offering. + Features []Feature + + // Array of kind. + Kinds []Kind + + // Is it permitted to request publishing to IBM or Public. + PermitRequestIBMPublicPublish *bool + + // Indicates if this offering has been approved for use by all IBMers. + IBMPublishApproved *bool + + // Indicates if this offering has been approved for use by all IBM Cloud users. + PublicPublishApproved *bool + + // The original offering CRN that this publish entry came from. + PublicOriginalCRN *string + + // The crn of the public catalog entry of this offering. + PublishPublicCRN *string + + // The portal's approval record ID. + PortalApprovalRecord *string + + // The portal UI URL. + PortalUIURL *string + + // The id of the catalog containing this offering. + CatalogID *string + + // The name of the catalog. + CatalogName *string + + // Map of metadata values for this offering. + Metadata map[string]interface{} + + // A disclaimer for this offering. + Disclaimer *string + + // Determine if this offering should be displayed in the Consumption UI. + Hidden *bool + + // Provider of this offering. + Provider *string + + // Repository info for offerings. + RepoInfo *RepoInfo + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewCreateOfferingOptions : Instantiate CreateOfferingOptions +func (*CatalogManagementV1) NewCreateOfferingOptions(catalogIdentifier string) *CreateOfferingOptions { + return &CreateOfferingOptions{ + CatalogIdentifier: core.StringPtr(catalogIdentifier), + } +} + +// SetCatalogIdentifier : Allow user to set CatalogIdentifier +func (options *CreateOfferingOptions) SetCatalogIdentifier(catalogIdentifier string) *CreateOfferingOptions { + options.CatalogIdentifier = core.StringPtr(catalogIdentifier) + return options +} + +// SetID : Allow user to set ID +func (options *CreateOfferingOptions) SetID(id string) *CreateOfferingOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetRev : Allow user to set Rev +func (options *CreateOfferingOptions) SetRev(rev string) *CreateOfferingOptions { + options.Rev = core.StringPtr(rev) + return options +} + +// SetURL : Allow user to set URL +func (options *CreateOfferingOptions) SetURL(url string) *CreateOfferingOptions { + options.URL = core.StringPtr(url) + return options +} + +// SetCRN : Allow user to set CRN +func (options *CreateOfferingOptions) SetCRN(crn string) *CreateOfferingOptions { + options.CRN = core.StringPtr(crn) + return options +} + +// SetLabel : Allow user to set Label +func (options *CreateOfferingOptions) SetLabel(label string) *CreateOfferingOptions { + options.Label = core.StringPtr(label) + return options +} + +// SetName : Allow user to set Name +func (options *CreateOfferingOptions) SetName(name string) *CreateOfferingOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetOfferingIconURL : Allow user to set OfferingIconURL +func (options *CreateOfferingOptions) SetOfferingIconURL(offeringIconURL string) *CreateOfferingOptions { + options.OfferingIconURL = core.StringPtr(offeringIconURL) + return options +} + +// SetOfferingDocsURL : Allow user to set OfferingDocsURL +func (options *CreateOfferingOptions) SetOfferingDocsURL(offeringDocsURL string) *CreateOfferingOptions { + options.OfferingDocsURL = core.StringPtr(offeringDocsURL) + return options +} + +// SetOfferingSupportURL : Allow user to set OfferingSupportURL +func (options *CreateOfferingOptions) SetOfferingSupportURL(offeringSupportURL string) *CreateOfferingOptions { + options.OfferingSupportURL = core.StringPtr(offeringSupportURL) + return options +} + +// SetTags : Allow user to set Tags +func (options *CreateOfferingOptions) SetTags(tags []string) *CreateOfferingOptions { + options.Tags = tags + return options +} + +// SetKeywords : Allow user to set Keywords +func (options *CreateOfferingOptions) SetKeywords(keywords []string) *CreateOfferingOptions { + options.Keywords = keywords + return options +} + +// SetRating : Allow user to set Rating +func (options *CreateOfferingOptions) SetRating(rating *Rating) *CreateOfferingOptions { + options.Rating = rating + return options +} + +// SetCreated : Allow user to set Created +func (options *CreateOfferingOptions) SetCreated(created *strfmt.DateTime) *CreateOfferingOptions { + options.Created = created + return options +} + +// SetUpdated : Allow user to set Updated +func (options *CreateOfferingOptions) SetUpdated(updated *strfmt.DateTime) *CreateOfferingOptions { + options.Updated = updated + return options +} + +// SetShortDescription : Allow user to set ShortDescription +func (options *CreateOfferingOptions) SetShortDescription(shortDescription string) *CreateOfferingOptions { + options.ShortDescription = core.StringPtr(shortDescription) + return options +} + +// SetLongDescription : Allow user to set LongDescription +func (options *CreateOfferingOptions) SetLongDescription(longDescription string) *CreateOfferingOptions { + options.LongDescription = core.StringPtr(longDescription) + return options +} + +// SetFeatures : Allow user to set Features +func (options *CreateOfferingOptions) SetFeatures(features []Feature) *CreateOfferingOptions { + options.Features = features + return options +} + +// SetKinds : Allow user to set Kinds +func (options *CreateOfferingOptions) SetKinds(kinds []Kind) *CreateOfferingOptions { + options.Kinds = kinds + return options +} + +// SetPermitRequestIBMPublicPublish : Allow user to set PermitRequestIBMPublicPublish +func (options *CreateOfferingOptions) SetPermitRequestIBMPublicPublish(permitRequestIBMPublicPublish bool) *CreateOfferingOptions { + options.PermitRequestIBMPublicPublish = core.BoolPtr(permitRequestIBMPublicPublish) + return options +} + +// SetIBMPublishApproved : Allow user to set IBMPublishApproved +func (options *CreateOfferingOptions) SetIBMPublishApproved(ibmPublishApproved bool) *CreateOfferingOptions { + options.IBMPublishApproved = core.BoolPtr(ibmPublishApproved) + return options +} + +// SetPublicPublishApproved : Allow user to set PublicPublishApproved +func (options *CreateOfferingOptions) SetPublicPublishApproved(publicPublishApproved bool) *CreateOfferingOptions { + options.PublicPublishApproved = core.BoolPtr(publicPublishApproved) + return options +} + +// SetPublicOriginalCRN : Allow user to set PublicOriginalCRN +func (options *CreateOfferingOptions) SetPublicOriginalCRN(publicOriginalCRN string) *CreateOfferingOptions { + options.PublicOriginalCRN = core.StringPtr(publicOriginalCRN) + return options +} + +// SetPublishPublicCRN : Allow user to set PublishPublicCRN +func (options *CreateOfferingOptions) SetPublishPublicCRN(publishPublicCRN string) *CreateOfferingOptions { + options.PublishPublicCRN = core.StringPtr(publishPublicCRN) + return options +} + +// SetPortalApprovalRecord : Allow user to set PortalApprovalRecord +func (options *CreateOfferingOptions) SetPortalApprovalRecord(portalApprovalRecord string) *CreateOfferingOptions { + options.PortalApprovalRecord = core.StringPtr(portalApprovalRecord) + return options +} + +// SetPortalUIURL : Allow user to set PortalUIURL +func (options *CreateOfferingOptions) SetPortalUIURL(portalUIURL string) *CreateOfferingOptions { + options.PortalUIURL = core.StringPtr(portalUIURL) + return options +} + +// SetCatalogID : Allow user to set CatalogID +func (options *CreateOfferingOptions) SetCatalogID(catalogID string) *CreateOfferingOptions { + options.CatalogID = core.StringPtr(catalogID) + return options +} + +// SetCatalogName : Allow user to set CatalogName +func (options *CreateOfferingOptions) SetCatalogName(catalogName string) *CreateOfferingOptions { + options.CatalogName = core.StringPtr(catalogName) + return options +} + +// SetMetadata : Allow user to set Metadata +func (options *CreateOfferingOptions) SetMetadata(metadata map[string]interface{}) *CreateOfferingOptions { + options.Metadata = metadata + return options +} + +// SetDisclaimer : Allow user to set Disclaimer +func (options *CreateOfferingOptions) SetDisclaimer(disclaimer string) *CreateOfferingOptions { + options.Disclaimer = core.StringPtr(disclaimer) + return options +} + +// SetHidden : Allow user to set Hidden +func (options *CreateOfferingOptions) SetHidden(hidden bool) *CreateOfferingOptions { + options.Hidden = core.BoolPtr(hidden) + return options +} + +// SetProvider : Allow user to set Provider +func (options *CreateOfferingOptions) SetProvider(provider string) *CreateOfferingOptions { + options.Provider = core.StringPtr(provider) + return options +} + +// SetRepoInfo : Allow user to set RepoInfo +func (options *CreateOfferingOptions) SetRepoInfo(repoInfo *RepoInfo) *CreateOfferingOptions { + options.RepoInfo = repoInfo + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateOfferingOptions) SetHeaders(param map[string]string) *CreateOfferingOptions { + options.Headers = param + return options +} + +// DeleteCatalogOptions : The DeleteCatalog options. +type DeleteCatalogOptions struct { + // Catalog identifier. + CatalogIdentifier *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteCatalogOptions : Instantiate DeleteCatalogOptions +func (*CatalogManagementV1) NewDeleteCatalogOptions(catalogIdentifier string) *DeleteCatalogOptions { + return &DeleteCatalogOptions{ + CatalogIdentifier: core.StringPtr(catalogIdentifier), + } +} + +// SetCatalogIdentifier : Allow user to set CatalogIdentifier +func (options *DeleteCatalogOptions) SetCatalogIdentifier(catalogIdentifier string) *DeleteCatalogOptions { + options.CatalogIdentifier = core.StringPtr(catalogIdentifier) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteCatalogOptions) SetHeaders(param map[string]string) *DeleteCatalogOptions { + options.Headers = param + return options +} + +// DeleteObjectAccessListOptions : The DeleteObjectAccessList options. +type DeleteObjectAccessListOptions struct { + // Catalog identifier. + CatalogIdentifier *string `validate:"required,ne="` + + // Object identifier. + ObjectIdentifier *string `validate:"required,ne="` + + // A list of accounts to delete. An entry with star["*"] will remove all accounts. + Accounts []string `validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteObjectAccessListOptions : Instantiate DeleteObjectAccessListOptions +func (*CatalogManagementV1) NewDeleteObjectAccessListOptions(catalogIdentifier string, objectIdentifier string, accounts []string) *DeleteObjectAccessListOptions { + return &DeleteObjectAccessListOptions{ + CatalogIdentifier: core.StringPtr(catalogIdentifier), + ObjectIdentifier: core.StringPtr(objectIdentifier), + Accounts: accounts, + } +} + +// SetCatalogIdentifier : Allow user to set CatalogIdentifier +func (options *DeleteObjectAccessListOptions) SetCatalogIdentifier(catalogIdentifier string) *DeleteObjectAccessListOptions { + options.CatalogIdentifier = core.StringPtr(catalogIdentifier) + return options +} + +// SetObjectIdentifier : Allow user to set ObjectIdentifier +func (options *DeleteObjectAccessListOptions) SetObjectIdentifier(objectIdentifier string) *DeleteObjectAccessListOptions { + options.ObjectIdentifier = core.StringPtr(objectIdentifier) + return options +} + +// SetAccounts : Allow user to set Accounts +func (options *DeleteObjectAccessListOptions) SetAccounts(accounts []string) *DeleteObjectAccessListOptions { + options.Accounts = accounts + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteObjectAccessListOptions) SetHeaders(param map[string]string) *DeleteObjectAccessListOptions { + options.Headers = param + return options +} + +// DeleteObjectAccessOptions : The DeleteObjectAccess options. +type DeleteObjectAccessOptions struct { + // Catalog identifier. + CatalogIdentifier *string `validate:"required,ne="` + + // Object identifier. + ObjectIdentifier *string `validate:"required,ne="` + + // Account identifier. + AccountIdentifier *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteObjectAccessOptions : Instantiate DeleteObjectAccessOptions +func (*CatalogManagementV1) NewDeleteObjectAccessOptions(catalogIdentifier string, objectIdentifier string, accountIdentifier string) *DeleteObjectAccessOptions { + return &DeleteObjectAccessOptions{ + CatalogIdentifier: core.StringPtr(catalogIdentifier), + ObjectIdentifier: core.StringPtr(objectIdentifier), + AccountIdentifier: core.StringPtr(accountIdentifier), + } +} + +// SetCatalogIdentifier : Allow user to set CatalogIdentifier +func (options *DeleteObjectAccessOptions) SetCatalogIdentifier(catalogIdentifier string) *DeleteObjectAccessOptions { + options.CatalogIdentifier = core.StringPtr(catalogIdentifier) + return options +} + +// SetObjectIdentifier : Allow user to set ObjectIdentifier +func (options *DeleteObjectAccessOptions) SetObjectIdentifier(objectIdentifier string) *DeleteObjectAccessOptions { + options.ObjectIdentifier = core.StringPtr(objectIdentifier) + return options +} + +// SetAccountIdentifier : Allow user to set AccountIdentifier +func (options *DeleteObjectAccessOptions) SetAccountIdentifier(accountIdentifier string) *DeleteObjectAccessOptions { + options.AccountIdentifier = core.StringPtr(accountIdentifier) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteObjectAccessOptions) SetHeaders(param map[string]string) *DeleteObjectAccessOptions { + options.Headers = param + return options +} + +// DeleteObjectOptions : The DeleteObject options. +type DeleteObjectOptions struct { + // Catalog identifier. + CatalogIdentifier *string `validate:"required,ne="` + + // Object identifier. + ObjectIdentifier *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteObjectOptions : Instantiate DeleteObjectOptions +func (*CatalogManagementV1) NewDeleteObjectOptions(catalogIdentifier string, objectIdentifier string) *DeleteObjectOptions { + return &DeleteObjectOptions{ + CatalogIdentifier: core.StringPtr(catalogIdentifier), + ObjectIdentifier: core.StringPtr(objectIdentifier), + } +} + +// SetCatalogIdentifier : Allow user to set CatalogIdentifier +func (options *DeleteObjectOptions) SetCatalogIdentifier(catalogIdentifier string) *DeleteObjectOptions { + options.CatalogIdentifier = core.StringPtr(catalogIdentifier) + return options +} + +// SetObjectIdentifier : Allow user to set ObjectIdentifier +func (options *DeleteObjectOptions) SetObjectIdentifier(objectIdentifier string) *DeleteObjectOptions { + options.ObjectIdentifier = core.StringPtr(objectIdentifier) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteObjectOptions) SetHeaders(param map[string]string) *DeleteObjectOptions { + options.Headers = param + return options +} + +// DeleteOfferingInstanceOptions : The DeleteOfferingInstance options. +type DeleteOfferingInstanceOptions struct { + // Version Instance identifier. + InstanceIdentifier *string `validate:"required,ne="` + + // IAM Refresh token. + XAuthRefreshToken *string `validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteOfferingInstanceOptions : Instantiate DeleteOfferingInstanceOptions +func (*CatalogManagementV1) NewDeleteOfferingInstanceOptions(instanceIdentifier string, xAuthRefreshToken string) *DeleteOfferingInstanceOptions { + return &DeleteOfferingInstanceOptions{ + InstanceIdentifier: core.StringPtr(instanceIdentifier), + XAuthRefreshToken: core.StringPtr(xAuthRefreshToken), + } +} + +// SetInstanceIdentifier : Allow user to set InstanceIdentifier +func (options *DeleteOfferingInstanceOptions) SetInstanceIdentifier(instanceIdentifier string) *DeleteOfferingInstanceOptions { + options.InstanceIdentifier = core.StringPtr(instanceIdentifier) + return options +} + +// SetXAuthRefreshToken : Allow user to set XAuthRefreshToken +func (options *DeleteOfferingInstanceOptions) SetXAuthRefreshToken(xAuthRefreshToken string) *DeleteOfferingInstanceOptions { + options.XAuthRefreshToken = core.StringPtr(xAuthRefreshToken) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteOfferingInstanceOptions) SetHeaders(param map[string]string) *DeleteOfferingInstanceOptions { + options.Headers = param + return options +} + +// DeleteOfferingOptions : The DeleteOffering options. +type DeleteOfferingOptions struct { + // Catalog identifier. + CatalogIdentifier *string `validate:"required,ne="` + + // Offering identification. + OfferingID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteOfferingOptions : Instantiate DeleteOfferingOptions +func (*CatalogManagementV1) NewDeleteOfferingOptions(catalogIdentifier string, offeringID string) *DeleteOfferingOptions { + return &DeleteOfferingOptions{ + CatalogIdentifier: core.StringPtr(catalogIdentifier), + OfferingID: core.StringPtr(offeringID), + } +} + +// SetCatalogIdentifier : Allow user to set CatalogIdentifier +func (options *DeleteOfferingOptions) SetCatalogIdentifier(catalogIdentifier string) *DeleteOfferingOptions { + options.CatalogIdentifier = core.StringPtr(catalogIdentifier) + return options +} + +// SetOfferingID : Allow user to set OfferingID +func (options *DeleteOfferingOptions) SetOfferingID(offeringID string) *DeleteOfferingOptions { + options.OfferingID = core.StringPtr(offeringID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteOfferingOptions) SetHeaders(param map[string]string) *DeleteOfferingOptions { + options.Headers = param + return options +} + +// DeleteOperatorsOptions : The DeleteOperators options. +type DeleteOperatorsOptions struct { + // IAM Refresh token. + XAuthRefreshToken *string `validate:"required"` + + // Cluster identification. + ClusterID *string `validate:"required"` + + // Cluster region. + Region *string `validate:"required"` + + // A dotted value of `catalogID`.`versionID`. + VersionLocatorID *string `validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteOperatorsOptions : Instantiate DeleteOperatorsOptions +func (*CatalogManagementV1) NewDeleteOperatorsOptions(xAuthRefreshToken string, clusterID string, region string, versionLocatorID string) *DeleteOperatorsOptions { + return &DeleteOperatorsOptions{ + XAuthRefreshToken: core.StringPtr(xAuthRefreshToken), + ClusterID: core.StringPtr(clusterID), + Region: core.StringPtr(region), + VersionLocatorID: core.StringPtr(versionLocatorID), + } +} + +// SetXAuthRefreshToken : Allow user to set XAuthRefreshToken +func (options *DeleteOperatorsOptions) SetXAuthRefreshToken(xAuthRefreshToken string) *DeleteOperatorsOptions { + options.XAuthRefreshToken = core.StringPtr(xAuthRefreshToken) + return options +} + +// SetClusterID : Allow user to set ClusterID +func (options *DeleteOperatorsOptions) SetClusterID(clusterID string) *DeleteOperatorsOptions { + options.ClusterID = core.StringPtr(clusterID) + return options +} + +// SetRegion : Allow user to set Region +func (options *DeleteOperatorsOptions) SetRegion(region string) *DeleteOperatorsOptions { + options.Region = core.StringPtr(region) + return options +} + +// SetVersionLocatorID : Allow user to set VersionLocatorID +func (options *DeleteOperatorsOptions) SetVersionLocatorID(versionLocatorID string) *DeleteOperatorsOptions { + options.VersionLocatorID = core.StringPtr(versionLocatorID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteOperatorsOptions) SetHeaders(param map[string]string) *DeleteOperatorsOptions { + options.Headers = param + return options +} + +// DeleteVersionOptions : The DeleteVersion options. +type DeleteVersionOptions struct { + // A dotted value of `catalogID`.`versionID`. + VersionLocID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteVersionOptions : Instantiate DeleteVersionOptions +func (*CatalogManagementV1) NewDeleteVersionOptions(versionLocID string) *DeleteVersionOptions { + return &DeleteVersionOptions{ + VersionLocID: core.StringPtr(versionLocID), + } +} + +// SetVersionLocID : Allow user to set VersionLocID +func (options *DeleteVersionOptions) SetVersionLocID(versionLocID string) *DeleteVersionOptions { + options.VersionLocID = core.StringPtr(versionLocID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteVersionOptions) SetHeaders(param map[string]string) *DeleteVersionOptions { + options.Headers = param + return options +} + +// DeployOperatorsOptions : The DeployOperators options. +type DeployOperatorsOptions struct { + // IAM Refresh token. + XAuthRefreshToken *string `validate:"required"` + + // Cluster ID. + ClusterID *string + + // Cluster region. + Region *string + + // Kube namespaces to deploy Operator(s) to. + Namespaces []string + + // Denotes whether to install Operator(s) globally. + AllNamespaces *bool + + // A dotted value of `catalogID`.`versionID`. + VersionLocatorID *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeployOperatorsOptions : Instantiate DeployOperatorsOptions +func (*CatalogManagementV1) NewDeployOperatorsOptions(xAuthRefreshToken string) *DeployOperatorsOptions { + return &DeployOperatorsOptions{ + XAuthRefreshToken: core.StringPtr(xAuthRefreshToken), + } +} + +// SetXAuthRefreshToken : Allow user to set XAuthRefreshToken +func (options *DeployOperatorsOptions) SetXAuthRefreshToken(xAuthRefreshToken string) *DeployOperatorsOptions { + options.XAuthRefreshToken = core.StringPtr(xAuthRefreshToken) + return options +} + +// SetClusterID : Allow user to set ClusterID +func (options *DeployOperatorsOptions) SetClusterID(clusterID string) *DeployOperatorsOptions { + options.ClusterID = core.StringPtr(clusterID) + return options +} + +// SetRegion : Allow user to set Region +func (options *DeployOperatorsOptions) SetRegion(region string) *DeployOperatorsOptions { + options.Region = core.StringPtr(region) + return options +} + +// SetNamespaces : Allow user to set Namespaces +func (options *DeployOperatorsOptions) SetNamespaces(namespaces []string) *DeployOperatorsOptions { + options.Namespaces = namespaces + return options +} + +// SetAllNamespaces : Allow user to set AllNamespaces +func (options *DeployOperatorsOptions) SetAllNamespaces(allNamespaces bool) *DeployOperatorsOptions { + options.AllNamespaces = core.BoolPtr(allNamespaces) + return options +} + +// SetVersionLocatorID : Allow user to set VersionLocatorID +func (options *DeployOperatorsOptions) SetVersionLocatorID(versionLocatorID string) *DeployOperatorsOptions { + options.VersionLocatorID = core.StringPtr(versionLocatorID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeployOperatorsOptions) SetHeaders(param map[string]string) *DeployOperatorsOptions { + options.Headers = param + return options +} + +// DeployRequestBodySchematics : Schematics workspace configuration. +type DeployRequestBodySchematics struct { + // Schematics workspace name. + Name *string `json:"name,omitempty"` + + // Schematics workspace description. + Description *string `json:"description,omitempty"` + + // Schematics workspace tags. + Tags []string `json:"tags,omitempty"` + + // Resource group to use when creating the schematics workspace. + ResourceGroupID *string `json:"resource_group_id,omitempty"` +} + +// UnmarshalDeployRequestBodySchematics unmarshals an instance of DeployRequestBodySchematics from the specified map of raw messages. +func UnmarshalDeployRequestBodySchematics(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(DeployRequestBodySchematics) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "description", &obj.Description) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "tags", &obj.Tags) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "resource_group_id", &obj.ResourceGroupID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// Deployment : Deployment for offering. +type Deployment struct { + // unique id. + ID *string `json:"id,omitempty"` + + // Display Name in the requested language. + Label *string `json:"label,omitempty"` + + // The programmatic name of this offering. + Name *string `json:"name,omitempty"` + + // Short description in the requested language. + ShortDescription *string `json:"short_description,omitempty"` + + // Long description in the requested language. + LongDescription *string `json:"long_description,omitempty"` + + // open ended metadata information. + Metadata map[string]interface{} `json:"metadata,omitempty"` + + // list of tags associated with this catalog. + Tags []string `json:"tags,omitempty"` + + // the date'time this catalog was created. + Created *strfmt.DateTime `json:"created,omitempty"` + + // the date'time this catalog was last updated. + Updated *strfmt.DateTime `json:"updated,omitempty"` +} + +// UnmarshalDeployment unmarshals an instance of Deployment from the specified map of raw messages. +func UnmarshalDeployment(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Deployment) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "label", &obj.Label) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "short_description", &obj.ShortDescription) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "long_description", &obj.LongDescription) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "metadata", &obj.Metadata) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "tags", &obj.Tags) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created", &obj.Created) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "updated", &obj.Updated) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// DeprecateVersionOptions : The DeprecateVersion options. +type DeprecateVersionOptions struct { + // A dotted value of `catalogID`.`versionID`. + VersionLocID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeprecateVersionOptions : Instantiate DeprecateVersionOptions +func (*CatalogManagementV1) NewDeprecateVersionOptions(versionLocID string) *DeprecateVersionOptions { + return &DeprecateVersionOptions{ + VersionLocID: core.StringPtr(versionLocID), + } +} + +// SetVersionLocID : Allow user to set VersionLocID +func (options *DeprecateVersionOptions) SetVersionLocID(versionLocID string) *DeprecateVersionOptions { + options.VersionLocID = core.StringPtr(versionLocID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeprecateVersionOptions) SetHeaders(param map[string]string) *DeprecateVersionOptions { + options.Headers = param + return options +} + +// Feature : Feature information. +type Feature struct { + // Heading. + Title *string `json:"title,omitempty"` + + // Feature description. + Description *string `json:"description,omitempty"` +} + +// UnmarshalFeature unmarshals an instance of Feature from the specified map of raw messages. +func UnmarshalFeature(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Feature) + err = core.UnmarshalPrimitive(m, "title", &obj.Title) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "description", &obj.Description) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// FilterTerms : Offering filter terms. +type FilterTerms struct { + // List of values to match against. If include is true, then if the offering has one of the values then the offering is + // included. If include is false, then if the offering has one of the values then the offering is excluded. + FilterTerms []string `json:"filter_terms,omitempty"` +} + +// UnmarshalFilterTerms unmarshals an instance of FilterTerms from the specified map of raw messages. +func UnmarshalFilterTerms(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(FilterTerms) + err = core.UnmarshalPrimitive(m, "filter_terms", &obj.FilterTerms) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// Filters : Filters for account and catalog filters. +type Filters struct { + // -> true - Include all of the public catalog when filtering. Further settings will specifically exclude some + // offerings. false - Exclude all of the public catalog when filtering. Further settings will specifically include some + // offerings. + IncludeAll *bool `json:"include_all,omitempty"` + + // Filter against offering properties. + CategoryFilters map[string]CategoryFilter `json:"category_filters,omitempty"` + + // Filter on offering ID's. There is an include filter and an exclule filter. Both can be set. + IDFilters *IDFilter `json:"id_filters,omitempty"` +} + +// UnmarshalFilters unmarshals an instance of Filters from the specified map of raw messages. +func UnmarshalFilters(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Filters) + err = core.UnmarshalPrimitive(m, "include_all", &obj.IncludeAll) + if err != nil { + return + } + err = core.UnmarshalModel(m, "category_filters", &obj.CategoryFilters, UnmarshalCategoryFilter) + if err != nil { + return + } + err = core.UnmarshalModel(m, "id_filters", &obj.IDFilters, UnmarshalIDFilter) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// GetCatalogAccountAuditOptions : The GetCatalogAccountAudit options. +type GetCatalogAccountAuditOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetCatalogAccountAuditOptions : Instantiate GetCatalogAccountAuditOptions +func (*CatalogManagementV1) NewGetCatalogAccountAuditOptions() *GetCatalogAccountAuditOptions { + return &GetCatalogAccountAuditOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *GetCatalogAccountAuditOptions) SetHeaders(param map[string]string) *GetCatalogAccountAuditOptions { + options.Headers = param + return options +} + +// GetCatalogAccountFiltersOptions : The GetCatalogAccountFilters options. +type GetCatalogAccountFiltersOptions struct { + // catalog id. Narrow down filters to the account and just the one catalog. + Catalog *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetCatalogAccountFiltersOptions : Instantiate GetCatalogAccountFiltersOptions +func (*CatalogManagementV1) NewGetCatalogAccountFiltersOptions() *GetCatalogAccountFiltersOptions { + return &GetCatalogAccountFiltersOptions{} +} + +// SetCatalog : Allow user to set Catalog +func (options *GetCatalogAccountFiltersOptions) SetCatalog(catalog string) *GetCatalogAccountFiltersOptions { + options.Catalog = core.StringPtr(catalog) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetCatalogAccountFiltersOptions) SetHeaders(param map[string]string) *GetCatalogAccountFiltersOptions { + options.Headers = param + return options +} + +// GetCatalogAccountOptions : The GetCatalogAccount options. +type GetCatalogAccountOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetCatalogAccountOptions : Instantiate GetCatalogAccountOptions +func (*CatalogManagementV1) NewGetCatalogAccountOptions() *GetCatalogAccountOptions { + return &GetCatalogAccountOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *GetCatalogAccountOptions) SetHeaders(param map[string]string) *GetCatalogAccountOptions { + options.Headers = param + return options +} + +// GetCatalogAuditOptions : The GetCatalogAudit options. +type GetCatalogAuditOptions struct { + // Catalog identifier. + CatalogIdentifier *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetCatalogAuditOptions : Instantiate GetCatalogAuditOptions +func (*CatalogManagementV1) NewGetCatalogAuditOptions(catalogIdentifier string) *GetCatalogAuditOptions { + return &GetCatalogAuditOptions{ + CatalogIdentifier: core.StringPtr(catalogIdentifier), + } +} + +// SetCatalogIdentifier : Allow user to set CatalogIdentifier +func (options *GetCatalogAuditOptions) SetCatalogIdentifier(catalogIdentifier string) *GetCatalogAuditOptions { + options.CatalogIdentifier = core.StringPtr(catalogIdentifier) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetCatalogAuditOptions) SetHeaders(param map[string]string) *GetCatalogAuditOptions { + options.Headers = param + return options +} + +// GetCatalogOptions : The GetCatalog options. +type GetCatalogOptions struct { + // Catalog identifier. + CatalogIdentifier *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetCatalogOptions : Instantiate GetCatalogOptions +func (*CatalogManagementV1) NewGetCatalogOptions(catalogIdentifier string) *GetCatalogOptions { + return &GetCatalogOptions{ + CatalogIdentifier: core.StringPtr(catalogIdentifier), + } +} + +// SetCatalogIdentifier : Allow user to set CatalogIdentifier +func (options *GetCatalogOptions) SetCatalogIdentifier(catalogIdentifier string) *GetCatalogOptions { + options.CatalogIdentifier = core.StringPtr(catalogIdentifier) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetCatalogOptions) SetHeaders(param map[string]string) *GetCatalogOptions { + options.Headers = param + return options +} + +// GetClusterOptions : The GetCluster options. +type GetClusterOptions struct { + // ID of the cluster. + ClusterID *string `validate:"required,ne="` + + // Region of the cluster. + Region *string `validate:"required"` + + // IAM Refresh token. + XAuthRefreshToken *string `validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetClusterOptions : Instantiate GetClusterOptions +func (*CatalogManagementV1) NewGetClusterOptions(clusterID string, region string, xAuthRefreshToken string) *GetClusterOptions { + return &GetClusterOptions{ + ClusterID: core.StringPtr(clusterID), + Region: core.StringPtr(region), + XAuthRefreshToken: core.StringPtr(xAuthRefreshToken), + } +} + +// SetClusterID : Allow user to set ClusterID +func (options *GetClusterOptions) SetClusterID(clusterID string) *GetClusterOptions { + options.ClusterID = core.StringPtr(clusterID) + return options +} + +// SetRegion : Allow user to set Region +func (options *GetClusterOptions) SetRegion(region string) *GetClusterOptions { + options.Region = core.StringPtr(region) + return options +} + +// SetXAuthRefreshToken : Allow user to set XAuthRefreshToken +func (options *GetClusterOptions) SetXAuthRefreshToken(xAuthRefreshToken string) *GetClusterOptions { + options.XAuthRefreshToken = core.StringPtr(xAuthRefreshToken) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetClusterOptions) SetHeaders(param map[string]string) *GetClusterOptions { + options.Headers = param + return options +} + +// GetConsumptionOfferingsOptions : The GetConsumptionOfferings options. +type GetConsumptionOfferingsOptions struct { + // true - Strip down the content of what is returned. For example don't return the readme. Makes the result much + // smaller. Defaults to false. + Digest *bool + + // catalog id. Narrow search down to just a particular catalog. It will apply the catalog's public filters to the + // public catalog offerings on the result. + Catalog *string + + // What should be selected. Default is 'all' which will return both public and private offerings. 'public' returns only + // the public offerings and 'private' returns only the private offerings. + Select *string + + // true - include offerings which have been marked as hidden. The default is false and hidden offerings are not + // returned. + IncludeHidden *bool + + // number or results to return. + Limit *int64 + + // number of results to skip before returning values. + Offset *int64 + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the GetConsumptionOfferingsOptions.Select property. +// What should be selected. Default is 'all' which will return both public and private offerings. 'public' returns only +// the public offerings and 'private' returns only the private offerings. +const ( + GetConsumptionOfferingsOptionsSelectAllConst = "all" + GetConsumptionOfferingsOptionsSelectPrivateConst = "private" + GetConsumptionOfferingsOptionsSelectPublicConst = "public" +) + +// NewGetConsumptionOfferingsOptions : Instantiate GetConsumptionOfferingsOptions +func (*CatalogManagementV1) NewGetConsumptionOfferingsOptions() *GetConsumptionOfferingsOptions { + return &GetConsumptionOfferingsOptions{} +} + +// SetDigest : Allow user to set Digest +func (options *GetConsumptionOfferingsOptions) SetDigest(digest bool) *GetConsumptionOfferingsOptions { + options.Digest = core.BoolPtr(digest) + return options +} + +// SetCatalog : Allow user to set Catalog +func (options *GetConsumptionOfferingsOptions) SetCatalog(catalog string) *GetConsumptionOfferingsOptions { + options.Catalog = core.StringPtr(catalog) + return options +} + +// SetSelect : Allow user to set Select +func (options *GetConsumptionOfferingsOptions) SetSelect(selectVar string) *GetConsumptionOfferingsOptions { + options.Select = core.StringPtr(selectVar) + return options +} + +// SetIncludeHidden : Allow user to set IncludeHidden +func (options *GetConsumptionOfferingsOptions) SetIncludeHidden(includeHidden bool) *GetConsumptionOfferingsOptions { + options.IncludeHidden = core.BoolPtr(includeHidden) + return options +} + +// SetLimit : Allow user to set Limit +func (options *GetConsumptionOfferingsOptions) SetLimit(limit int64) *GetConsumptionOfferingsOptions { + options.Limit = core.Int64Ptr(limit) + return options +} + +// SetOffset : Allow user to set Offset +func (options *GetConsumptionOfferingsOptions) SetOffset(offset int64) *GetConsumptionOfferingsOptions { + options.Offset = core.Int64Ptr(offset) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetConsumptionOfferingsOptions) SetHeaders(param map[string]string) *GetConsumptionOfferingsOptions { + options.Headers = param + return options +} + +// GetNamespacesOptions : The GetNamespaces options. +type GetNamespacesOptions struct { + // ID of the cluster. + ClusterID *string `validate:"required,ne="` + + // Cluster region. + Region *string `validate:"required"` + + // IAM Refresh token. + XAuthRefreshToken *string `validate:"required"` + + // The maximum number of results to return. + Limit *int64 + + // The number of results to skip before returning values. + Offset *int64 + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetNamespacesOptions : Instantiate GetNamespacesOptions +func (*CatalogManagementV1) NewGetNamespacesOptions(clusterID string, region string, xAuthRefreshToken string) *GetNamespacesOptions { + return &GetNamespacesOptions{ + ClusterID: core.StringPtr(clusterID), + Region: core.StringPtr(region), + XAuthRefreshToken: core.StringPtr(xAuthRefreshToken), + } +} + +// SetClusterID : Allow user to set ClusterID +func (options *GetNamespacesOptions) SetClusterID(clusterID string) *GetNamespacesOptions { + options.ClusterID = core.StringPtr(clusterID) + return options +} + +// SetRegion : Allow user to set Region +func (options *GetNamespacesOptions) SetRegion(region string) *GetNamespacesOptions { + options.Region = core.StringPtr(region) + return options +} + +// SetXAuthRefreshToken : Allow user to set XAuthRefreshToken +func (options *GetNamespacesOptions) SetXAuthRefreshToken(xAuthRefreshToken string) *GetNamespacesOptions { + options.XAuthRefreshToken = core.StringPtr(xAuthRefreshToken) + return options +} + +// SetLimit : Allow user to set Limit +func (options *GetNamespacesOptions) SetLimit(limit int64) *GetNamespacesOptions { + options.Limit = core.Int64Ptr(limit) + return options +} + +// SetOffset : Allow user to set Offset +func (options *GetNamespacesOptions) SetOffset(offset int64) *GetNamespacesOptions { + options.Offset = core.Int64Ptr(offset) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetNamespacesOptions) SetHeaders(param map[string]string) *GetNamespacesOptions { + options.Headers = param + return options +} + +// GetObjectAccessListOptions : The GetObjectAccessList options. +type GetObjectAccessListOptions struct { + // Catalog identifier. + CatalogIdentifier *string `validate:"required,ne="` + + // Object identifier. + ObjectIdentifier *string `validate:"required,ne="` + + // The maximum number of results to return. + Limit *int64 + + // The number of results to skip before returning values. + Offset *int64 + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetObjectAccessListOptions : Instantiate GetObjectAccessListOptions +func (*CatalogManagementV1) NewGetObjectAccessListOptions(catalogIdentifier string, objectIdentifier string) *GetObjectAccessListOptions { + return &GetObjectAccessListOptions{ + CatalogIdentifier: core.StringPtr(catalogIdentifier), + ObjectIdentifier: core.StringPtr(objectIdentifier), + } +} + +// SetCatalogIdentifier : Allow user to set CatalogIdentifier +func (options *GetObjectAccessListOptions) SetCatalogIdentifier(catalogIdentifier string) *GetObjectAccessListOptions { + options.CatalogIdentifier = core.StringPtr(catalogIdentifier) + return options +} + +// SetObjectIdentifier : Allow user to set ObjectIdentifier +func (options *GetObjectAccessListOptions) SetObjectIdentifier(objectIdentifier string) *GetObjectAccessListOptions { + options.ObjectIdentifier = core.StringPtr(objectIdentifier) + return options +} + +// SetLimit : Allow user to set Limit +func (options *GetObjectAccessListOptions) SetLimit(limit int64) *GetObjectAccessListOptions { + options.Limit = core.Int64Ptr(limit) + return options +} + +// SetOffset : Allow user to set Offset +func (options *GetObjectAccessListOptions) SetOffset(offset int64) *GetObjectAccessListOptions { + options.Offset = core.Int64Ptr(offset) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetObjectAccessListOptions) SetHeaders(param map[string]string) *GetObjectAccessListOptions { + options.Headers = param + return options +} + +// GetObjectAccessOptions : The GetObjectAccess options. +type GetObjectAccessOptions struct { + // Catalog identifier. + CatalogIdentifier *string `validate:"required,ne="` + + // Object identifier. + ObjectIdentifier *string `validate:"required,ne="` + + // Account identifier. + AccountIdentifier *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetObjectAccessOptions : Instantiate GetObjectAccessOptions +func (*CatalogManagementV1) NewGetObjectAccessOptions(catalogIdentifier string, objectIdentifier string, accountIdentifier string) *GetObjectAccessOptions { + return &GetObjectAccessOptions{ + CatalogIdentifier: core.StringPtr(catalogIdentifier), + ObjectIdentifier: core.StringPtr(objectIdentifier), + AccountIdentifier: core.StringPtr(accountIdentifier), + } +} + +// SetCatalogIdentifier : Allow user to set CatalogIdentifier +func (options *GetObjectAccessOptions) SetCatalogIdentifier(catalogIdentifier string) *GetObjectAccessOptions { + options.CatalogIdentifier = core.StringPtr(catalogIdentifier) + return options +} + +// SetObjectIdentifier : Allow user to set ObjectIdentifier +func (options *GetObjectAccessOptions) SetObjectIdentifier(objectIdentifier string) *GetObjectAccessOptions { + options.ObjectIdentifier = core.StringPtr(objectIdentifier) + return options +} + +// SetAccountIdentifier : Allow user to set AccountIdentifier +func (options *GetObjectAccessOptions) SetAccountIdentifier(accountIdentifier string) *GetObjectAccessOptions { + options.AccountIdentifier = core.StringPtr(accountIdentifier) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetObjectAccessOptions) SetHeaders(param map[string]string) *GetObjectAccessOptions { + options.Headers = param + return options +} + +// GetObjectAuditOptions : The GetObjectAudit options. +type GetObjectAuditOptions struct { + // Catalog identifier. + CatalogIdentifier *string `validate:"required,ne="` + + // Object identifier. + ObjectIdentifier *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetObjectAuditOptions : Instantiate GetObjectAuditOptions +func (*CatalogManagementV1) NewGetObjectAuditOptions(catalogIdentifier string, objectIdentifier string) *GetObjectAuditOptions { + return &GetObjectAuditOptions{ + CatalogIdentifier: core.StringPtr(catalogIdentifier), + ObjectIdentifier: core.StringPtr(objectIdentifier), + } +} + +// SetCatalogIdentifier : Allow user to set CatalogIdentifier +func (options *GetObjectAuditOptions) SetCatalogIdentifier(catalogIdentifier string) *GetObjectAuditOptions { + options.CatalogIdentifier = core.StringPtr(catalogIdentifier) + return options +} + +// SetObjectIdentifier : Allow user to set ObjectIdentifier +func (options *GetObjectAuditOptions) SetObjectIdentifier(objectIdentifier string) *GetObjectAuditOptions { + options.ObjectIdentifier = core.StringPtr(objectIdentifier) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetObjectAuditOptions) SetHeaders(param map[string]string) *GetObjectAuditOptions { + options.Headers = param + return options +} + +// GetObjectOptions : The GetObject options. +type GetObjectOptions struct { + // Catalog identifier. + CatalogIdentifier *string `validate:"required,ne="` + + // Object identifier. + ObjectIdentifier *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetObjectOptions : Instantiate GetObjectOptions +func (*CatalogManagementV1) NewGetObjectOptions(catalogIdentifier string, objectIdentifier string) *GetObjectOptions { + return &GetObjectOptions{ + CatalogIdentifier: core.StringPtr(catalogIdentifier), + ObjectIdentifier: core.StringPtr(objectIdentifier), + } +} + +// SetCatalogIdentifier : Allow user to set CatalogIdentifier +func (options *GetObjectOptions) SetCatalogIdentifier(catalogIdentifier string) *GetObjectOptions { + options.CatalogIdentifier = core.StringPtr(catalogIdentifier) + return options +} + +// SetObjectIdentifier : Allow user to set ObjectIdentifier +func (options *GetObjectOptions) SetObjectIdentifier(objectIdentifier string) *GetObjectOptions { + options.ObjectIdentifier = core.StringPtr(objectIdentifier) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetObjectOptions) SetHeaders(param map[string]string) *GetObjectOptions { + options.Headers = param + return options +} + +// GetOfferingAboutOptions : The GetOfferingAbout options. +type GetOfferingAboutOptions struct { + // A dotted value of `catalogID`.`versionID`. + VersionLocID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetOfferingAboutOptions : Instantiate GetOfferingAboutOptions +func (*CatalogManagementV1) NewGetOfferingAboutOptions(versionLocID string) *GetOfferingAboutOptions { + return &GetOfferingAboutOptions{ + VersionLocID: core.StringPtr(versionLocID), + } +} + +// SetVersionLocID : Allow user to set VersionLocID +func (options *GetOfferingAboutOptions) SetVersionLocID(versionLocID string) *GetOfferingAboutOptions { + options.VersionLocID = core.StringPtr(versionLocID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetOfferingAboutOptions) SetHeaders(param map[string]string) *GetOfferingAboutOptions { + options.Headers = param + return options +} + +// GetOfferingAuditOptions : The GetOfferingAudit options. +type GetOfferingAuditOptions struct { + // Catalog identifier. + CatalogIdentifier *string `validate:"required,ne="` + + // Offering identifier. + OfferingID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetOfferingAuditOptions : Instantiate GetOfferingAuditOptions +func (*CatalogManagementV1) NewGetOfferingAuditOptions(catalogIdentifier string, offeringID string) *GetOfferingAuditOptions { + return &GetOfferingAuditOptions{ + CatalogIdentifier: core.StringPtr(catalogIdentifier), + OfferingID: core.StringPtr(offeringID), + } +} + +// SetCatalogIdentifier : Allow user to set CatalogIdentifier +func (options *GetOfferingAuditOptions) SetCatalogIdentifier(catalogIdentifier string) *GetOfferingAuditOptions { + options.CatalogIdentifier = core.StringPtr(catalogIdentifier) + return options +} + +// SetOfferingID : Allow user to set OfferingID +func (options *GetOfferingAuditOptions) SetOfferingID(offeringID string) *GetOfferingAuditOptions { + options.OfferingID = core.StringPtr(offeringID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetOfferingAuditOptions) SetHeaders(param map[string]string) *GetOfferingAuditOptions { + options.Headers = param + return options +} + +// GetOfferingContainerImagesOptions : The GetOfferingContainerImages options. +type GetOfferingContainerImagesOptions struct { + // A dotted value of `catalogID`.`versionID`. + VersionLocID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetOfferingContainerImagesOptions : Instantiate GetOfferingContainerImagesOptions +func (*CatalogManagementV1) NewGetOfferingContainerImagesOptions(versionLocID string) *GetOfferingContainerImagesOptions { + return &GetOfferingContainerImagesOptions{ + VersionLocID: core.StringPtr(versionLocID), + } +} + +// SetVersionLocID : Allow user to set VersionLocID +func (options *GetOfferingContainerImagesOptions) SetVersionLocID(versionLocID string) *GetOfferingContainerImagesOptions { + options.VersionLocID = core.StringPtr(versionLocID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetOfferingContainerImagesOptions) SetHeaders(param map[string]string) *GetOfferingContainerImagesOptions { + options.Headers = param + return options +} + +// GetOfferingInstanceOptions : The GetOfferingInstance options. +type GetOfferingInstanceOptions struct { + // Version Instance identifier. + InstanceIdentifier *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetOfferingInstanceOptions : Instantiate GetOfferingInstanceOptions +func (*CatalogManagementV1) NewGetOfferingInstanceOptions(instanceIdentifier string) *GetOfferingInstanceOptions { + return &GetOfferingInstanceOptions{ + InstanceIdentifier: core.StringPtr(instanceIdentifier), + } +} + +// SetInstanceIdentifier : Allow user to set InstanceIdentifier +func (options *GetOfferingInstanceOptions) SetInstanceIdentifier(instanceIdentifier string) *GetOfferingInstanceOptions { + options.InstanceIdentifier = core.StringPtr(instanceIdentifier) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetOfferingInstanceOptions) SetHeaders(param map[string]string) *GetOfferingInstanceOptions { + options.Headers = param + return options +} + +// GetOfferingLicenseOptions : The GetOfferingLicense options. +type GetOfferingLicenseOptions struct { + // A dotted value of `catalogID`.`versionID`. + VersionLocID *string `validate:"required,ne="` + + // The ID of the license, which maps to the file name in the 'licenses' directory of this verions tgz file. + LicenseID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetOfferingLicenseOptions : Instantiate GetOfferingLicenseOptions +func (*CatalogManagementV1) NewGetOfferingLicenseOptions(versionLocID string, licenseID string) *GetOfferingLicenseOptions { + return &GetOfferingLicenseOptions{ + VersionLocID: core.StringPtr(versionLocID), + LicenseID: core.StringPtr(licenseID), + } +} + +// SetVersionLocID : Allow user to set VersionLocID +func (options *GetOfferingLicenseOptions) SetVersionLocID(versionLocID string) *GetOfferingLicenseOptions { + options.VersionLocID = core.StringPtr(versionLocID) + return options +} + +// SetLicenseID : Allow user to set LicenseID +func (options *GetOfferingLicenseOptions) SetLicenseID(licenseID string) *GetOfferingLicenseOptions { + options.LicenseID = core.StringPtr(licenseID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetOfferingLicenseOptions) SetHeaders(param map[string]string) *GetOfferingLicenseOptions { + options.Headers = param + return options +} + +// GetOfferingOptions : The GetOffering options. +type GetOfferingOptions struct { + // Catalog identifier. + CatalogIdentifier *string `validate:"required,ne="` + + // Offering identification. + OfferingID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetOfferingOptions : Instantiate GetOfferingOptions +func (*CatalogManagementV1) NewGetOfferingOptions(catalogIdentifier string, offeringID string) *GetOfferingOptions { + return &GetOfferingOptions{ + CatalogIdentifier: core.StringPtr(catalogIdentifier), + OfferingID: core.StringPtr(offeringID), + } +} + +// SetCatalogIdentifier : Allow user to set CatalogIdentifier +func (options *GetOfferingOptions) SetCatalogIdentifier(catalogIdentifier string) *GetOfferingOptions { + options.CatalogIdentifier = core.StringPtr(catalogIdentifier) + return options +} + +// SetOfferingID : Allow user to set OfferingID +func (options *GetOfferingOptions) SetOfferingID(offeringID string) *GetOfferingOptions { + options.OfferingID = core.StringPtr(offeringID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetOfferingOptions) SetHeaders(param map[string]string) *GetOfferingOptions { + options.Headers = param + return options +} + +// GetOfferingUpdatesOptions : The GetOfferingUpdates options. +type GetOfferingUpdatesOptions struct { + // Catalog identifier. + CatalogIdentifier *string `validate:"required,ne="` + + // Offering identification. + OfferingID *string `validate:"required,ne="` + + // The kind of offering (e.g, helm, ova, terraform ...). + Kind *string `validate:"required"` + + // optionaly provide an existing version to check updates for if one is not given, all version will be returned. + Version *string + + // The id of the cluster where this version was installed. + ClusterID *string + + // The region of the cluster where this version was installed. + Region *string + + // The resource group id of the cluster where this version was installed. + ResourceGroupID *string + + // The namespace of the cluster where this version was installed. + Namespace *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetOfferingUpdatesOptions : Instantiate GetOfferingUpdatesOptions +func (*CatalogManagementV1) NewGetOfferingUpdatesOptions(catalogIdentifier string, offeringID string, kind string) *GetOfferingUpdatesOptions { + return &GetOfferingUpdatesOptions{ + CatalogIdentifier: core.StringPtr(catalogIdentifier), + OfferingID: core.StringPtr(offeringID), + Kind: core.StringPtr(kind), + } +} + +// SetCatalogIdentifier : Allow user to set CatalogIdentifier +func (options *GetOfferingUpdatesOptions) SetCatalogIdentifier(catalogIdentifier string) *GetOfferingUpdatesOptions { + options.CatalogIdentifier = core.StringPtr(catalogIdentifier) + return options +} + +// SetOfferingID : Allow user to set OfferingID +func (options *GetOfferingUpdatesOptions) SetOfferingID(offeringID string) *GetOfferingUpdatesOptions { + options.OfferingID = core.StringPtr(offeringID) + return options +} + +// SetKind : Allow user to set Kind +func (options *GetOfferingUpdatesOptions) SetKind(kind string) *GetOfferingUpdatesOptions { + options.Kind = core.StringPtr(kind) + return options +} + +// SetVersion : Allow user to set Version +func (options *GetOfferingUpdatesOptions) SetVersion(version string) *GetOfferingUpdatesOptions { + options.Version = core.StringPtr(version) + return options +} + +// SetClusterID : Allow user to set ClusterID +func (options *GetOfferingUpdatesOptions) SetClusterID(clusterID string) *GetOfferingUpdatesOptions { + options.ClusterID = core.StringPtr(clusterID) + return options +} + +// SetRegion : Allow user to set Region +func (options *GetOfferingUpdatesOptions) SetRegion(region string) *GetOfferingUpdatesOptions { + options.Region = core.StringPtr(region) + return options +} + +// SetResourceGroupID : Allow user to set ResourceGroupID +func (options *GetOfferingUpdatesOptions) SetResourceGroupID(resourceGroupID string) *GetOfferingUpdatesOptions { + options.ResourceGroupID = core.StringPtr(resourceGroupID) + return options +} + +// SetNamespace : Allow user to set Namespace +func (options *GetOfferingUpdatesOptions) SetNamespace(namespace string) *GetOfferingUpdatesOptions { + options.Namespace = core.StringPtr(namespace) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetOfferingUpdatesOptions) SetHeaders(param map[string]string) *GetOfferingUpdatesOptions { + options.Headers = param + return options +} + +// GetOfferingWorkingCopyOptions : The GetOfferingWorkingCopy options. +type GetOfferingWorkingCopyOptions struct { + // A dotted value of `catalogID`.`versionID`. + VersionLocID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetOfferingWorkingCopyOptions : Instantiate GetOfferingWorkingCopyOptions +func (*CatalogManagementV1) NewGetOfferingWorkingCopyOptions(versionLocID string) *GetOfferingWorkingCopyOptions { + return &GetOfferingWorkingCopyOptions{ + VersionLocID: core.StringPtr(versionLocID), + } +} + +// SetVersionLocID : Allow user to set VersionLocID +func (options *GetOfferingWorkingCopyOptions) SetVersionLocID(versionLocID string) *GetOfferingWorkingCopyOptions { + options.VersionLocID = core.StringPtr(versionLocID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetOfferingWorkingCopyOptions) SetHeaders(param map[string]string) *GetOfferingWorkingCopyOptions { + options.Headers = param + return options +} + +// GetOverrideValuesOptions : The GetOverrideValues options. +type GetOverrideValuesOptions struct { + // A dotted value of `catalogID`.`versionID`. + VersionLocID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetOverrideValuesOptions : Instantiate GetOverrideValuesOptions +func (*CatalogManagementV1) NewGetOverrideValuesOptions(versionLocID string) *GetOverrideValuesOptions { + return &GetOverrideValuesOptions{ + VersionLocID: core.StringPtr(versionLocID), + } +} + +// SetVersionLocID : Allow user to set VersionLocID +func (options *GetOverrideValuesOptions) SetVersionLocID(versionLocID string) *GetOverrideValuesOptions { + options.VersionLocID = core.StringPtr(versionLocID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetOverrideValuesOptions) SetHeaders(param map[string]string) *GetOverrideValuesOptions { + options.Headers = param + return options +} + +// GetPreinstallOptions : The GetPreinstall options. +type GetPreinstallOptions struct { + // A dotted value of `catalogID`.`versionID`. + VersionLocID *string `validate:"required,ne="` + + // IAM Refresh token. + XAuthRefreshToken *string `validate:"required"` + + // ID of the cluster. + ClusterID *string + + // Cluster region. + Region *string + + // Required if the version's pre-install scope is `namespace`. + Namespace *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetPreinstallOptions : Instantiate GetPreinstallOptions +func (*CatalogManagementV1) NewGetPreinstallOptions(versionLocID string, xAuthRefreshToken string) *GetPreinstallOptions { + return &GetPreinstallOptions{ + VersionLocID: core.StringPtr(versionLocID), + XAuthRefreshToken: core.StringPtr(xAuthRefreshToken), + } +} + +// SetVersionLocID : Allow user to set VersionLocID +func (options *GetPreinstallOptions) SetVersionLocID(versionLocID string) *GetPreinstallOptions { + options.VersionLocID = core.StringPtr(versionLocID) + return options +} + +// SetXAuthRefreshToken : Allow user to set XAuthRefreshToken +func (options *GetPreinstallOptions) SetXAuthRefreshToken(xAuthRefreshToken string) *GetPreinstallOptions { + options.XAuthRefreshToken = core.StringPtr(xAuthRefreshToken) + return options +} + +// SetClusterID : Allow user to set ClusterID +func (options *GetPreinstallOptions) SetClusterID(clusterID string) *GetPreinstallOptions { + options.ClusterID = core.StringPtr(clusterID) + return options +} + +// SetRegion : Allow user to set Region +func (options *GetPreinstallOptions) SetRegion(region string) *GetPreinstallOptions { + options.Region = core.StringPtr(region) + return options +} + +// SetNamespace : Allow user to set Namespace +func (options *GetPreinstallOptions) SetNamespace(namespace string) *GetPreinstallOptions { + options.Namespace = core.StringPtr(namespace) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetPreinstallOptions) SetHeaders(param map[string]string) *GetPreinstallOptions { + options.Headers = param + return options +} + +// GetValidationStatusOptions : The GetValidationStatus options. +type GetValidationStatusOptions struct { + // A dotted value of `catalogID`.`versionID`. + VersionLocID *string `validate:"required,ne="` + + // IAM Refresh token. + XAuthRefreshToken *string `validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetValidationStatusOptions : Instantiate GetValidationStatusOptions +func (*CatalogManagementV1) NewGetValidationStatusOptions(versionLocID string, xAuthRefreshToken string) *GetValidationStatusOptions { + return &GetValidationStatusOptions{ + VersionLocID: core.StringPtr(versionLocID), + XAuthRefreshToken: core.StringPtr(xAuthRefreshToken), + } +} + +// SetVersionLocID : Allow user to set VersionLocID +func (options *GetValidationStatusOptions) SetVersionLocID(versionLocID string) *GetValidationStatusOptions { + options.VersionLocID = core.StringPtr(versionLocID) + return options +} + +// SetXAuthRefreshToken : Allow user to set XAuthRefreshToken +func (options *GetValidationStatusOptions) SetXAuthRefreshToken(xAuthRefreshToken string) *GetValidationStatusOptions { + options.XAuthRefreshToken = core.StringPtr(xAuthRefreshToken) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetValidationStatusOptions) SetHeaders(param map[string]string) *GetValidationStatusOptions { + options.Headers = param + return options +} + +// GetVersionOptions : The GetVersion options. +type GetVersionOptions struct { + // A dotted value of `catalogID`.`versionID`. + VersionLocID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetVersionOptions : Instantiate GetVersionOptions +func (*CatalogManagementV1) NewGetVersionOptions(versionLocID string) *GetVersionOptions { + return &GetVersionOptions{ + VersionLocID: core.StringPtr(versionLocID), + } +} + +// SetVersionLocID : Allow user to set VersionLocID +func (options *GetVersionOptions) SetVersionLocID(versionLocID string) *GetVersionOptions { + options.VersionLocID = core.StringPtr(versionLocID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetVersionOptions) SetHeaders(param map[string]string) *GetVersionOptions { + options.Headers = param + return options +} + +// IDFilter : Filter on offering ID's. There is an include filter and an exclule filter. Both can be set. +type IDFilter struct { + // Offering filter terms. + Include *FilterTerms `json:"include,omitempty"` + + // Offering filter terms. + Exclude *FilterTerms `json:"exclude,omitempty"` +} + +// UnmarshalIDFilter unmarshals an instance of IDFilter from the specified map of raw messages. +func UnmarshalIDFilter(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(IDFilter) + err = core.UnmarshalModel(m, "include", &obj.Include, UnmarshalFilterTerms) + if err != nil { + return + } + err = core.UnmarshalModel(m, "exclude", &obj.Exclude, UnmarshalFilterTerms) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// IBMPublishObjectOptions : The IBMPublishObject options. +type IBMPublishObjectOptions struct { + // Catalog identifier. + CatalogIdentifier *string `validate:"required,ne="` + + // Object identifier. + ObjectIdentifier *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewIBMPublishObjectOptions : Instantiate IBMPublishObjectOptions +func (*CatalogManagementV1) NewIBMPublishObjectOptions(catalogIdentifier string, objectIdentifier string) *IBMPublishObjectOptions { + return &IBMPublishObjectOptions{ + CatalogIdentifier: core.StringPtr(catalogIdentifier), + ObjectIdentifier: core.StringPtr(objectIdentifier), + } +} + +// SetCatalogIdentifier : Allow user to set CatalogIdentifier +func (options *IBMPublishObjectOptions) SetCatalogIdentifier(catalogIdentifier string) *IBMPublishObjectOptions { + options.CatalogIdentifier = core.StringPtr(catalogIdentifier) + return options +} + +// SetObjectIdentifier : Allow user to set ObjectIdentifier +func (options *IBMPublishObjectOptions) SetObjectIdentifier(objectIdentifier string) *IBMPublishObjectOptions { + options.ObjectIdentifier = core.StringPtr(objectIdentifier) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *IBMPublishObjectOptions) SetHeaders(param map[string]string) *IBMPublishObjectOptions { + options.Headers = param + return options +} + +// IBMPublishVersionOptions : The IBMPublishVersion options. +type IBMPublishVersionOptions struct { + // A dotted value of `catalogID`.`versionID`. + VersionLocID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewIBMPublishVersionOptions : Instantiate IBMPublishVersionOptions +func (*CatalogManagementV1) NewIBMPublishVersionOptions(versionLocID string) *IBMPublishVersionOptions { + return &IBMPublishVersionOptions{ + VersionLocID: core.StringPtr(versionLocID), + } +} + +// SetVersionLocID : Allow user to set VersionLocID +func (options *IBMPublishVersionOptions) SetVersionLocID(versionLocID string) *IBMPublishVersionOptions { + options.VersionLocID = core.StringPtr(versionLocID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *IBMPublishVersionOptions) SetHeaders(param map[string]string) *IBMPublishVersionOptions { + options.Headers = param + return options +} + +// Image : Image. +type Image struct { + // Image. + Image *string `json:"image,omitempty"` +} + +// UnmarshalImage unmarshals an instance of Image from the specified map of raw messages. +func UnmarshalImage(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Image) + err = core.UnmarshalPrimitive(m, "image", &obj.Image) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ImageManifest : Image Manifest. +type ImageManifest struct { + // Image manifest description. + Description *string `json:"description,omitempty"` + + // List of images. + Images []Image `json:"images,omitempty"` +} + +// UnmarshalImageManifest unmarshals an instance of ImageManifest from the specified map of raw messages. +func UnmarshalImageManifest(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ImageManifest) + err = core.UnmarshalPrimitive(m, "description", &obj.Description) + if err != nil { + return + } + err = core.UnmarshalModel(m, "images", &obj.Images, UnmarshalImage) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ImportOfferingOptions : The ImportOffering options. +type ImportOfferingOptions struct { + // Catalog identifier. + CatalogIdentifier *string `validate:"required,ne="` + + // Tags array. + Tags []string + + // Target kinds. Current valid values are 'iks', 'roks', 'vcenter', and 'terraform'. + TargetKinds []string + + // byte array representing the content to be imported. Only supported for OVA images at this time. + Content *[]byte + + // URL path to zip location. If not specified, must provide content in this post body. + Zipurl *string + + // Re-use the specified offeringID during import. + OfferingID *string + + // The semver value for this new version. + TargetVersion *string + + // Add all possible configuration items when creating this version. + IncludeConfig *bool + + // Indicates that the current terraform template is used to install a VSI Image. + IsVsi *bool + + // The type of repository containing this version. Valid values are 'public_git' or 'enterprise_git'. + RepoType *string + + // Authentication token used to access the specified zip file. + XAuthToken *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewImportOfferingOptions : Instantiate ImportOfferingOptions +func (*CatalogManagementV1) NewImportOfferingOptions(catalogIdentifier string) *ImportOfferingOptions { + return &ImportOfferingOptions{ + CatalogIdentifier: core.StringPtr(catalogIdentifier), + } +} + +// SetCatalogIdentifier : Allow user to set CatalogIdentifier +func (options *ImportOfferingOptions) SetCatalogIdentifier(catalogIdentifier string) *ImportOfferingOptions { + options.CatalogIdentifier = core.StringPtr(catalogIdentifier) + return options +} + +// SetTags : Allow user to set Tags +func (options *ImportOfferingOptions) SetTags(tags []string) *ImportOfferingOptions { + options.Tags = tags + return options +} + +// SetTargetKinds : Allow user to set TargetKinds +func (options *ImportOfferingOptions) SetTargetKinds(targetKinds []string) *ImportOfferingOptions { + options.TargetKinds = targetKinds + return options +} + +// SetContent : Allow user to set Content +func (options *ImportOfferingOptions) SetContent(content []byte) *ImportOfferingOptions { + options.Content = &content + return options +} + +// SetZipurl : Allow user to set Zipurl +func (options *ImportOfferingOptions) SetZipurl(zipurl string) *ImportOfferingOptions { + options.Zipurl = core.StringPtr(zipurl) + return options +} + +// SetOfferingID : Allow user to set OfferingID +func (options *ImportOfferingOptions) SetOfferingID(offeringID string) *ImportOfferingOptions { + options.OfferingID = core.StringPtr(offeringID) + return options +} + +// SetTargetVersion : Allow user to set TargetVersion +func (options *ImportOfferingOptions) SetTargetVersion(targetVersion string) *ImportOfferingOptions { + options.TargetVersion = core.StringPtr(targetVersion) + return options +} + +// SetIncludeConfig : Allow user to set IncludeConfig +func (options *ImportOfferingOptions) SetIncludeConfig(includeConfig bool) *ImportOfferingOptions { + options.IncludeConfig = core.BoolPtr(includeConfig) + return options +} + +// SetIsVsi : Allow user to set IsVsi +func (options *ImportOfferingOptions) SetIsVsi(isVsi bool) *ImportOfferingOptions { + options.IsVsi = core.BoolPtr(isVsi) + return options +} + +// SetRepoType : Allow user to set RepoType +func (options *ImportOfferingOptions) SetRepoType(repoType string) *ImportOfferingOptions { + options.RepoType = core.StringPtr(repoType) + return options +} + +// SetXAuthToken : Allow user to set XAuthToken +func (options *ImportOfferingOptions) SetXAuthToken(xAuthToken string) *ImportOfferingOptions { + options.XAuthToken = core.StringPtr(xAuthToken) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ImportOfferingOptions) SetHeaders(param map[string]string) *ImportOfferingOptions { + options.Headers = param + return options +} + +// ImportOfferingVersionOptions : The ImportOfferingVersion options. +type ImportOfferingVersionOptions struct { + // Catalog identifier. + CatalogIdentifier *string `validate:"required,ne="` + + // Offering identification. + OfferingID *string `validate:"required,ne="` + + // Tags array. + Tags []string + + // Target kinds. Current valid values are 'iks', 'roks', 'vcenter', and 'terraform'. + TargetKinds []string + + // byte array representing the content to be imported. Only supported for OVA images at this time. + Content *[]byte + + // URL path to zip location. If not specified, must provide content in the body of this call. + Zipurl *string + + // The semver value for this new version, if not found in the zip url package content. + TargetVersion *string + + // Add all possible configuration values to this version when importing. + IncludeConfig *bool + + // Indicates that the current terraform template is used to install a VSI Image. + IsVsi *bool + + // The type of repository containing this version. Valid values are 'public_git' or 'enterprise_git'. + RepoType *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewImportOfferingVersionOptions : Instantiate ImportOfferingVersionOptions +func (*CatalogManagementV1) NewImportOfferingVersionOptions(catalogIdentifier string, offeringID string) *ImportOfferingVersionOptions { + return &ImportOfferingVersionOptions{ + CatalogIdentifier: core.StringPtr(catalogIdentifier), + OfferingID: core.StringPtr(offeringID), + } +} + +// SetCatalogIdentifier : Allow user to set CatalogIdentifier +func (options *ImportOfferingVersionOptions) SetCatalogIdentifier(catalogIdentifier string) *ImportOfferingVersionOptions { + options.CatalogIdentifier = core.StringPtr(catalogIdentifier) + return options +} + +// SetOfferingID : Allow user to set OfferingID +func (options *ImportOfferingVersionOptions) SetOfferingID(offeringID string) *ImportOfferingVersionOptions { + options.OfferingID = core.StringPtr(offeringID) + return options +} + +// SetTags : Allow user to set Tags +func (options *ImportOfferingVersionOptions) SetTags(tags []string) *ImportOfferingVersionOptions { + options.Tags = tags + return options +} + +// SetTargetKinds : Allow user to set TargetKinds +func (options *ImportOfferingVersionOptions) SetTargetKinds(targetKinds []string) *ImportOfferingVersionOptions { + options.TargetKinds = targetKinds + return options +} + +// SetContent : Allow user to set Content +func (options *ImportOfferingVersionOptions) SetContent(content []byte) *ImportOfferingVersionOptions { + options.Content = &content + return options +} + +// SetZipurl : Allow user to set Zipurl +func (options *ImportOfferingVersionOptions) SetZipurl(zipurl string) *ImportOfferingVersionOptions { + options.Zipurl = core.StringPtr(zipurl) + return options +} + +// SetTargetVersion : Allow user to set TargetVersion +func (options *ImportOfferingVersionOptions) SetTargetVersion(targetVersion string) *ImportOfferingVersionOptions { + options.TargetVersion = core.StringPtr(targetVersion) + return options +} + +// SetIncludeConfig : Allow user to set IncludeConfig +func (options *ImportOfferingVersionOptions) SetIncludeConfig(includeConfig bool) *ImportOfferingVersionOptions { + options.IncludeConfig = core.BoolPtr(includeConfig) + return options +} + +// SetIsVsi : Allow user to set IsVsi +func (options *ImportOfferingVersionOptions) SetIsVsi(isVsi bool) *ImportOfferingVersionOptions { + options.IsVsi = core.BoolPtr(isVsi) + return options +} + +// SetRepoType : Allow user to set RepoType +func (options *ImportOfferingVersionOptions) SetRepoType(repoType string) *ImportOfferingVersionOptions { + options.RepoType = core.StringPtr(repoType) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ImportOfferingVersionOptions) SetHeaders(param map[string]string) *ImportOfferingVersionOptions { + options.Headers = param + return options +} + +// InstallStatus : Installation status. +type InstallStatus struct { + // Installation status metadata. + Metadata *InstallStatusMetadata `json:"metadata,omitempty"` + + // Release information. + Release *InstallStatusRelease `json:"release,omitempty"` + + // Content management information. + ContentMgmt *InstallStatusContentMgmt `json:"content_mgmt,omitempty"` +} + +// UnmarshalInstallStatus unmarshals an instance of InstallStatus from the specified map of raw messages. +func UnmarshalInstallStatus(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstallStatus) + err = core.UnmarshalModel(m, "metadata", &obj.Metadata, UnmarshalInstallStatusMetadata) + if err != nil { + return + } + err = core.UnmarshalModel(m, "release", &obj.Release, UnmarshalInstallStatusRelease) + if err != nil { + return + } + err = core.UnmarshalModel(m, "content_mgmt", &obj.ContentMgmt, UnmarshalInstallStatusContentMgmt) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstallStatusContentMgmt : Content management information. +type InstallStatusContentMgmt struct { + // Pods. + Pods []map[string]string `json:"pods,omitempty"` + + // Errors. + Errors []map[string]string `json:"errors,omitempty"` +} + +// UnmarshalInstallStatusContentMgmt unmarshals an instance of InstallStatusContentMgmt from the specified map of raw messages. +func UnmarshalInstallStatusContentMgmt(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstallStatusContentMgmt) + err = core.UnmarshalPrimitive(m, "pods", &obj.Pods) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstallStatusMetadata : Installation status metadata. +type InstallStatusMetadata struct { + // Cluster ID. + ClusterID *string `json:"cluster_id,omitempty"` + + // Cluster region. + Region *string `json:"region,omitempty"` + + // Cluster namespace. + Namespace *string `json:"namespace,omitempty"` + + // Workspace ID. + WorkspaceID *string `json:"workspace_id,omitempty"` + + // Workspace name. + WorkspaceName *string `json:"workspace_name,omitempty"` +} + +// UnmarshalInstallStatusMetadata unmarshals an instance of InstallStatusMetadata from the specified map of raw messages. +func UnmarshalInstallStatusMetadata(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstallStatusMetadata) + err = core.UnmarshalPrimitive(m, "cluster_id", &obj.ClusterID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "region", &obj.Region) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "namespace", &obj.Namespace) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "workspace_id", &obj.WorkspaceID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "workspace_name", &obj.WorkspaceName) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstallStatusRelease : Release information. +type InstallStatusRelease struct { + // Kube deployments. + Deployments []map[string]interface{} `json:"deployments,omitempty"` + + // Kube replica sets. + Replicasets []map[string]interface{} `json:"replicasets,omitempty"` + + // Kube stateful sets. + Statefulsets []map[string]interface{} `json:"statefulsets,omitempty"` + + // Kube pods. + Pods []map[string]interface{} `json:"pods,omitempty"` + + // Kube errors. + Errors []map[string]string `json:"errors,omitempty"` +} + +// UnmarshalInstallStatusRelease unmarshals an instance of InstallStatusRelease from the specified map of raw messages. +func UnmarshalInstallStatusRelease(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstallStatusRelease) + err = core.UnmarshalPrimitive(m, "deployments", &obj.Deployments) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "replicasets", &obj.Replicasets) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "statefulsets", &obj.Statefulsets) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "pods", &obj.Pods) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstallVersionOptions : The InstallVersion options. +type InstallVersionOptions struct { + // A dotted value of `catalogID`.`versionID`. + VersionLocID *string `validate:"required,ne="` + + // IAM Refresh token. + XAuthRefreshToken *string `validate:"required"` + + // Cluster ID. + ClusterID *string + + // Cluster region. + Region *string + + // Kube namespace. + Namespace *string + + // Object containing Helm chart override values. To use a secret for items of type password, specify a JSON encoded + // value of $ref:#/components/schemas/SecretInstance, prefixed with `cmsm_v1:`. + OverrideValues map[string]interface{} + + // Entitlement API Key for this offering. + EntitlementApikey *string + + // Schematics workspace configuration. + Schematics *DeployRequestBodySchematics + + // Script. + Script *string + + // Script ID. + ScriptID *string + + // A dotted value of `catalogID`.`versionID`. + VersionLocatorID *string + + // VCenter ID. + VcenterID *string + + // VCenter User. + VcenterUser *string + + // VCenter Password. + VcenterPassword *string + + // VCenter Location. + VcenterLocation *string + + // VCenter Datastore. + VcenterDatastore *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewInstallVersionOptions : Instantiate InstallVersionOptions +func (*CatalogManagementV1) NewInstallVersionOptions(versionLocID string, xAuthRefreshToken string) *InstallVersionOptions { + return &InstallVersionOptions{ + VersionLocID: core.StringPtr(versionLocID), + XAuthRefreshToken: core.StringPtr(xAuthRefreshToken), + } +} + +// SetVersionLocID : Allow user to set VersionLocID +func (options *InstallVersionOptions) SetVersionLocID(versionLocID string) *InstallVersionOptions { + options.VersionLocID = core.StringPtr(versionLocID) + return options +} + +// SetXAuthRefreshToken : Allow user to set XAuthRefreshToken +func (options *InstallVersionOptions) SetXAuthRefreshToken(xAuthRefreshToken string) *InstallVersionOptions { + options.XAuthRefreshToken = core.StringPtr(xAuthRefreshToken) + return options +} + +// SetClusterID : Allow user to set ClusterID +func (options *InstallVersionOptions) SetClusterID(clusterID string) *InstallVersionOptions { + options.ClusterID = core.StringPtr(clusterID) + return options +} + +// SetRegion : Allow user to set Region +func (options *InstallVersionOptions) SetRegion(region string) *InstallVersionOptions { + options.Region = core.StringPtr(region) + return options +} + +// SetNamespace : Allow user to set Namespace +func (options *InstallVersionOptions) SetNamespace(namespace string) *InstallVersionOptions { + options.Namespace = core.StringPtr(namespace) + return options +} + +// SetOverrideValues : Allow user to set OverrideValues +func (options *InstallVersionOptions) SetOverrideValues(overrideValues map[string]interface{}) *InstallVersionOptions { + options.OverrideValues = overrideValues + return options +} + +// SetEntitlementApikey : Allow user to set EntitlementApikey +func (options *InstallVersionOptions) SetEntitlementApikey(entitlementApikey string) *InstallVersionOptions { + options.EntitlementApikey = core.StringPtr(entitlementApikey) + return options +} + +// SetSchematics : Allow user to set Schematics +func (options *InstallVersionOptions) SetSchematics(schematics *DeployRequestBodySchematics) *InstallVersionOptions { + options.Schematics = schematics + return options +} + +// SetScript : Allow user to set Script +func (options *InstallVersionOptions) SetScript(script string) *InstallVersionOptions { + options.Script = core.StringPtr(script) + return options +} + +// SetScriptID : Allow user to set ScriptID +func (options *InstallVersionOptions) SetScriptID(scriptID string) *InstallVersionOptions { + options.ScriptID = core.StringPtr(scriptID) + return options +} + +// SetVersionLocatorID : Allow user to set VersionLocatorID +func (options *InstallVersionOptions) SetVersionLocatorID(versionLocatorID string) *InstallVersionOptions { + options.VersionLocatorID = core.StringPtr(versionLocatorID) + return options +} + +// SetVcenterID : Allow user to set VcenterID +func (options *InstallVersionOptions) SetVcenterID(vcenterID string) *InstallVersionOptions { + options.VcenterID = core.StringPtr(vcenterID) + return options +} + +// SetVcenterUser : Allow user to set VcenterUser +func (options *InstallVersionOptions) SetVcenterUser(vcenterUser string) *InstallVersionOptions { + options.VcenterUser = core.StringPtr(vcenterUser) + return options +} + +// SetVcenterPassword : Allow user to set VcenterPassword +func (options *InstallVersionOptions) SetVcenterPassword(vcenterPassword string) *InstallVersionOptions { + options.VcenterPassword = core.StringPtr(vcenterPassword) + return options +} + +// SetVcenterLocation : Allow user to set VcenterLocation +func (options *InstallVersionOptions) SetVcenterLocation(vcenterLocation string) *InstallVersionOptions { + options.VcenterLocation = core.StringPtr(vcenterLocation) + return options +} + +// SetVcenterDatastore : Allow user to set VcenterDatastore +func (options *InstallVersionOptions) SetVcenterDatastore(vcenterDatastore string) *InstallVersionOptions { + options.VcenterDatastore = core.StringPtr(vcenterDatastore) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *InstallVersionOptions) SetHeaders(param map[string]string) *InstallVersionOptions { + options.Headers = param + return options +} + +// Kind : Offering kind. +type Kind struct { + // Unique ID. + ID *string `json:"id,omitempty"` + + // content kind, e.g., helm, vm image. + FormatKind *string `json:"format_kind,omitempty"` + + // target cloud to install, e.g., iks, open_shift_iks. + TargetKind *string `json:"target_kind,omitempty"` + + // Open ended metadata information. + Metadata map[string]interface{} `json:"metadata,omitempty"` + + // Installation instruction. + InstallDescription *string `json:"install_description,omitempty"` + + // List of tags associated with this catalog. + Tags []string `json:"tags,omitempty"` + + // List of features associated with this offering. + AdditionalFeatures []Feature `json:"additional_features,omitempty"` + + // The date and time this catalog was created. + Created *strfmt.DateTime `json:"created,omitempty"` + + // The date and time this catalog was last updated. + Updated *strfmt.DateTime `json:"updated,omitempty"` + + // list of versions. + Versions []Version `json:"versions,omitempty"` + + // list of plans. + Plans []Plan `json:"plans,omitempty"` +} + +// UnmarshalKind unmarshals an instance of Kind from the specified map of raw messages. +func UnmarshalKind(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Kind) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "format_kind", &obj.FormatKind) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "target_kind", &obj.TargetKind) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "metadata", &obj.Metadata) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "install_description", &obj.InstallDescription) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "tags", &obj.Tags) + if err != nil { + return + } + err = core.UnmarshalModel(m, "additional_features", &obj.AdditionalFeatures, UnmarshalFeature) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created", &obj.Created) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "updated", &obj.Updated) + if err != nil { + return + } + err = core.UnmarshalModel(m, "versions", &obj.Versions, UnmarshalVersion) + if err != nil { + return + } + err = core.UnmarshalModel(m, "plans", &obj.Plans, UnmarshalPlan) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// License : BSS license. +type License struct { + // License ID. + ID *string `json:"id,omitempty"` + + // license name. + Name *string `json:"name,omitempty"` + + // type of license e.g., Apache xxx. + Type *string `json:"type,omitempty"` + + // URL for the license text. + URL *string `json:"url,omitempty"` + + // License description. + Description *string `json:"description,omitempty"` +} + +// UnmarshalLicense unmarshals an instance of License from the specified map of raw messages. +func UnmarshalLicense(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(License) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "url", &obj.URL) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "description", &obj.Description) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ListCatalogsOptions : The ListCatalogs options. +type ListCatalogsOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListCatalogsOptions : Instantiate ListCatalogsOptions +func (*CatalogManagementV1) NewListCatalogsOptions() *ListCatalogsOptions { + return &ListCatalogsOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *ListCatalogsOptions) SetHeaders(param map[string]string) *ListCatalogsOptions { + options.Headers = param + return options +} + +// ListObjectsOptions : The ListObjects options. +type ListObjectsOptions struct { + // Catalog identifier. + CatalogIdentifier *string `validate:"required,ne="` + + // The number of results to return. + Limit *int64 + + // The number of results to skip before returning values. + Offset *int64 + + // Only return results that contain the specified string. + Name *string + + // The field on which the output is sorted. Sorts by default by **label** property. Available fields are **name**, + // **label**, **created**, and **updated**. By adding **-** (i.e. **-label**) in front of the query string, you can + // specify descending order. Default is ascending order. + Sort *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListObjectsOptions : Instantiate ListObjectsOptions +func (*CatalogManagementV1) NewListObjectsOptions(catalogIdentifier string) *ListObjectsOptions { + return &ListObjectsOptions{ + CatalogIdentifier: core.StringPtr(catalogIdentifier), + } +} + +// SetCatalogIdentifier : Allow user to set CatalogIdentifier +func (options *ListObjectsOptions) SetCatalogIdentifier(catalogIdentifier string) *ListObjectsOptions { + options.CatalogIdentifier = core.StringPtr(catalogIdentifier) + return options +} + +// SetLimit : Allow user to set Limit +func (options *ListObjectsOptions) SetLimit(limit int64) *ListObjectsOptions { + options.Limit = core.Int64Ptr(limit) + return options +} + +// SetOffset : Allow user to set Offset +func (options *ListObjectsOptions) SetOffset(offset int64) *ListObjectsOptions { + options.Offset = core.Int64Ptr(offset) + return options +} + +// SetName : Allow user to set Name +func (options *ListObjectsOptions) SetName(name string) *ListObjectsOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetSort : Allow user to set Sort +func (options *ListObjectsOptions) SetSort(sort string) *ListObjectsOptions { + options.Sort = core.StringPtr(sort) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListObjectsOptions) SetHeaders(param map[string]string) *ListObjectsOptions { + options.Headers = param + return options +} + +// ListOfferingsOptions : The ListOfferings options. +type ListOfferingsOptions struct { + // Catalog identifier. + CatalogIdentifier *string `validate:"required,ne="` + + // true - Strip down the content of what is returned. For example don't return the readme. Makes the result much + // smaller. Defaults to false. + Digest *bool + + // The maximum number of results to return. + Limit *int64 + + // The number of results to skip before returning values. + Offset *int64 + + // Only return results that contain the specified string. + Name *string + + // The field on which the output is sorted. Sorts by default by **label** property. Available fields are **name**, + // **label**, **created**, and **updated**. By adding **-** (i.e. **-label**) in front of the query string, you can + // specify descending order. Default is ascending order. + Sort *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListOfferingsOptions : Instantiate ListOfferingsOptions +func (*CatalogManagementV1) NewListOfferingsOptions(catalogIdentifier string) *ListOfferingsOptions { + return &ListOfferingsOptions{ + CatalogIdentifier: core.StringPtr(catalogIdentifier), + } +} + +// SetCatalogIdentifier : Allow user to set CatalogIdentifier +func (options *ListOfferingsOptions) SetCatalogIdentifier(catalogIdentifier string) *ListOfferingsOptions { + options.CatalogIdentifier = core.StringPtr(catalogIdentifier) + return options +} + +// SetDigest : Allow user to set Digest +func (options *ListOfferingsOptions) SetDigest(digest bool) *ListOfferingsOptions { + options.Digest = core.BoolPtr(digest) + return options +} + +// SetLimit : Allow user to set Limit +func (options *ListOfferingsOptions) SetLimit(limit int64) *ListOfferingsOptions { + options.Limit = core.Int64Ptr(limit) + return options +} + +// SetOffset : Allow user to set Offset +func (options *ListOfferingsOptions) SetOffset(offset int64) *ListOfferingsOptions { + options.Offset = core.Int64Ptr(offset) + return options +} + +// SetName : Allow user to set Name +func (options *ListOfferingsOptions) SetName(name string) *ListOfferingsOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetSort : Allow user to set Sort +func (options *ListOfferingsOptions) SetSort(sort string) *ListOfferingsOptions { + options.Sort = core.StringPtr(sort) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListOfferingsOptions) SetHeaders(param map[string]string) *ListOfferingsOptions { + options.Headers = param + return options +} + +// ListOperatorsOptions : The ListOperators options. +type ListOperatorsOptions struct { + // IAM Refresh token. + XAuthRefreshToken *string `validate:"required"` + + // Cluster identification. + ClusterID *string `validate:"required"` + + // Cluster region. + Region *string `validate:"required"` + + // A dotted value of `catalogID`.`versionID`. + VersionLocatorID *string `validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListOperatorsOptions : Instantiate ListOperatorsOptions +func (*CatalogManagementV1) NewListOperatorsOptions(xAuthRefreshToken string, clusterID string, region string, versionLocatorID string) *ListOperatorsOptions { + return &ListOperatorsOptions{ + XAuthRefreshToken: core.StringPtr(xAuthRefreshToken), + ClusterID: core.StringPtr(clusterID), + Region: core.StringPtr(region), + VersionLocatorID: core.StringPtr(versionLocatorID), + } +} + +// SetXAuthRefreshToken : Allow user to set XAuthRefreshToken +func (options *ListOperatorsOptions) SetXAuthRefreshToken(xAuthRefreshToken string) *ListOperatorsOptions { + options.XAuthRefreshToken = core.StringPtr(xAuthRefreshToken) + return options +} + +// SetClusterID : Allow user to set ClusterID +func (options *ListOperatorsOptions) SetClusterID(clusterID string) *ListOperatorsOptions { + options.ClusterID = core.StringPtr(clusterID) + return options +} + +// SetRegion : Allow user to set Region +func (options *ListOperatorsOptions) SetRegion(region string) *ListOperatorsOptions { + options.Region = core.StringPtr(region) + return options +} + +// SetVersionLocatorID : Allow user to set VersionLocatorID +func (options *ListOperatorsOptions) SetVersionLocatorID(versionLocatorID string) *ListOperatorsOptions { + options.VersionLocatorID = core.StringPtr(versionLocatorID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListOperatorsOptions) SetHeaders(param map[string]string) *ListOperatorsOptions { + options.Headers = param + return options +} + +// NamespaceSearchResult : Paginated list of namespace search results. +type NamespaceSearchResult struct { + // The offset (origin 0) of the first resource in this page of search results. + Offset *int64 `json:"offset" validate:"required"` + + // The maximum number of resources returned in each page of search results. + Limit *int64 `json:"limit" validate:"required"` + + // The overall total number of resources in the search result set. + TotalCount *int64 `json:"total_count,omitempty"` + + // The number of resources returned in this page of search results. + ResourceCount *int64 `json:"resource_count,omitempty"` + + // A URL for retrieving the first page of search results. + First *string `json:"first,omitempty"` + + // A URL for retrieving the last page of search results. + Last *string `json:"last,omitempty"` + + // A URL for retrieving the previous page of search results. + Prev *string `json:"prev,omitempty"` + + // A URL for retrieving the next page of search results. + Next *string `json:"next,omitempty"` + + // Resulting objects. + Resources []string `json:"resources,omitempty"` +} + +// UnmarshalNamespaceSearchResult unmarshals an instance of NamespaceSearchResult from the specified map of raw messages. +func UnmarshalNamespaceSearchResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(NamespaceSearchResult) + err = core.UnmarshalPrimitive(m, "offset", &obj.Offset) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "limit", &obj.Limit) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "total_count", &obj.TotalCount) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "resource_count", &obj.ResourceCount) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "first", &obj.First) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "last", &obj.Last) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "prev", &obj.Prev) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "next", &obj.Next) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "resources", &obj.Resources) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ObjectAccess : object access. +type ObjectAccess struct { + // unique id. + ID *string `json:"id,omitempty"` + + // account id. + Account *string `json:"account,omitempty"` + + // unique id. + CatalogID *string `json:"catalog_id,omitempty"` + + // object id. + TargetID *string `json:"target_id,omitempty"` + + // date and time create. + Create *strfmt.DateTime `json:"create,omitempty"` +} + +// UnmarshalObjectAccess unmarshals an instance of ObjectAccess from the specified map of raw messages. +func UnmarshalObjectAccess(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ObjectAccess) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "account", &obj.Account) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "catalog_id", &obj.CatalogID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "target_id", &obj.TargetID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "create", &obj.Create) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ObjectAccessListResult : Paginated object search result. +type ObjectAccessListResult struct { + // The offset (origin 0) of the first resource in this page of search results. + Offset *int64 `json:"offset" validate:"required"` + + // The maximum number of resources returned in each page of search results. + Limit *int64 `json:"limit" validate:"required"` + + // The overall total number of resources in the search result set. + TotalCount *int64 `json:"total_count,omitempty"` + + // The number of resources returned in this page of search results. + ResourceCount *int64 `json:"resource_count,omitempty"` + + // A URL for retrieving the first page of search results. + First *string `json:"first,omitempty"` + + // A URL for retrieving the last page of search results. + Last *string `json:"last,omitempty"` + + // A URL for retrieving the previous page of search results. + Prev *string `json:"prev,omitempty"` + + // A URL for retrieving the next page of search results. + Next *string `json:"next,omitempty"` + + // Resulting objects. + Resources []ObjectAccess `json:"resources,omitempty"` +} + +// UnmarshalObjectAccessListResult unmarshals an instance of ObjectAccessListResult from the specified map of raw messages. +func UnmarshalObjectAccessListResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ObjectAccessListResult) + err = core.UnmarshalPrimitive(m, "offset", &obj.Offset) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "limit", &obj.Limit) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "total_count", &obj.TotalCount) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "resource_count", &obj.ResourceCount) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "first", &obj.First) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "last", &obj.Last) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "prev", &obj.Prev) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "next", &obj.Next) + if err != nil { + return + } + err = core.UnmarshalModel(m, "resources", &obj.Resources, UnmarshalObjectAccess) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ObjectListResult : Paginated object search result. +type ObjectListResult struct { + // The offset (origin 0) of the first resource in this page of search results. + Offset *int64 `json:"offset" validate:"required"` + + // The maximum number of resources returned in each page of search results. + Limit *int64 `json:"limit" validate:"required"` + + // The overall total number of resources in the search result set. + TotalCount *int64 `json:"total_count,omitempty"` + + // The number of resources returned in this page of search results. + ResourceCount *int64 `json:"resource_count,omitempty"` + + // A URL for retrieving the first page of search results. + First *string `json:"first,omitempty"` + + // A URL for retrieving the last page of search results. + Last *string `json:"last,omitempty"` + + // A URL for retrieving the previous page of search results. + Prev *string `json:"prev,omitempty"` + + // A URL for retrieving the next page of search results. + Next *string `json:"next,omitempty"` + + // Resulting objects. + Resources []CatalogObject `json:"resources,omitempty"` +} + +// UnmarshalObjectListResult unmarshals an instance of ObjectListResult from the specified map of raw messages. +func UnmarshalObjectListResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ObjectListResult) + err = core.UnmarshalPrimitive(m, "offset", &obj.Offset) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "limit", &obj.Limit) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "total_count", &obj.TotalCount) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "resource_count", &obj.ResourceCount) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "first", &obj.First) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "last", &obj.Last) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "prev", &obj.Prev) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "next", &obj.Next) + if err != nil { + return + } + err = core.UnmarshalModel(m, "resources", &obj.Resources, UnmarshalCatalogObject) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ObjectSearchResult : Paginated object search result. +type ObjectSearchResult struct { + // The offset (origin 0) of the first resource in this page of search results. + Offset *int64 `json:"offset" validate:"required"` + + // The maximum number of resources returned in each page of search results. + Limit *int64 `json:"limit" validate:"required"` + + // The overall total number of resources in the search result set. + TotalCount *int64 `json:"total_count,omitempty"` + + // The number of resources returned in this page of search results. + ResourceCount *int64 `json:"resource_count,omitempty"` + + // A URL for retrieving the first page of search results. + First *string `json:"first,omitempty"` + + // A URL for retrieving the last page of search results. + Last *string `json:"last,omitempty"` + + // A URL for retrieving the previous page of search results. + Prev *string `json:"prev,omitempty"` + + // A URL for retrieving the next page of search results. + Next *string `json:"next,omitempty"` + + // Resulting objects. + Resources []CatalogObject `json:"resources,omitempty"` +} + +// UnmarshalObjectSearchResult unmarshals an instance of ObjectSearchResult from the specified map of raw messages. +func UnmarshalObjectSearchResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ObjectSearchResult) + err = core.UnmarshalPrimitive(m, "offset", &obj.Offset) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "limit", &obj.Limit) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "total_count", &obj.TotalCount) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "resource_count", &obj.ResourceCount) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "first", &obj.First) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "last", &obj.Last) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "prev", &obj.Prev) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "next", &obj.Next) + if err != nil { + return + } + err = core.UnmarshalModel(m, "resources", &obj.Resources, UnmarshalCatalogObject) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// Offering : Offering information. +type Offering struct { + // unique id. + ID *string `json:"id,omitempty"` + + // Cloudant revision. + Rev *string `json:"_rev,omitempty"` + + // The url for this specific offering. + URL *string `json:"url,omitempty"` + + // The crn for this specific offering. + CRN *string `json:"crn,omitempty"` + + // Display Name in the requested language. + Label *string `json:"label,omitempty"` + + // The programmatic name of this offering. + Name *string `json:"name,omitempty"` + + // URL for an icon associated with this offering. + OfferingIconURL *string `json:"offering_icon_url,omitempty"` + + // URL for an additional docs with this offering. + OfferingDocsURL *string `json:"offering_docs_url,omitempty"` + + // URL to be displayed in the Consumption UI for getting support on this offering. + OfferingSupportURL *string `json:"offering_support_url,omitempty"` + + // List of tags associated with this catalog. + Tags []string `json:"tags,omitempty"` + + // List of keywords associated with offering, typically used to search for it. + Keywords []string `json:"keywords,omitempty"` + + // Repository info for offerings. + Rating *Rating `json:"rating,omitempty"` + + // The date and time this catalog was created. + Created *strfmt.DateTime `json:"created,omitempty"` + + // The date and time this catalog was last updated. + Updated *strfmt.DateTime `json:"updated,omitempty"` + + // Short description in the requested language. + ShortDescription *string `json:"short_description,omitempty"` + + // Long description in the requested language. + LongDescription *string `json:"long_description,omitempty"` + + // list of features associated with this offering. + Features []Feature `json:"features,omitempty"` + + // Array of kind. + Kinds []Kind `json:"kinds,omitempty"` + + // Is it permitted to request publishing to IBM or Public. + PermitRequestIBMPublicPublish *bool `json:"permit_request_ibm_public_publish,omitempty"` + + // Indicates if this offering has been approved for use by all IBMers. + IBMPublishApproved *bool `json:"ibm_publish_approved,omitempty"` + + // Indicates if this offering has been approved for use by all IBM Cloud users. + PublicPublishApproved *bool `json:"public_publish_approved,omitempty"` + + // The original offering CRN that this publish entry came from. + PublicOriginalCRN *string `json:"public_original_crn,omitempty"` + + // The crn of the public catalog entry of this offering. + PublishPublicCRN *string `json:"publish_public_crn,omitempty"` + + // The portal's approval record ID. + PortalApprovalRecord *string `json:"portal_approval_record,omitempty"` + + // The portal UI URL. + PortalUIURL *string `json:"portal_ui_url,omitempty"` + + // The id of the catalog containing this offering. + CatalogID *string `json:"catalog_id,omitempty"` + + // The name of the catalog. + CatalogName *string `json:"catalog_name,omitempty"` + + // Map of metadata values for this offering. + Metadata map[string]interface{} `json:"metadata,omitempty"` + + // A disclaimer for this offering. + Disclaimer *string `json:"disclaimer,omitempty"` + + // Determine if this offering should be displayed in the Consumption UI. + Hidden *bool `json:"hidden,omitempty"` + + // Provider of this offering. + Provider *string `json:"provider,omitempty"` + + // Repository info for offerings. + RepoInfo *RepoInfo `json:"repo_info,omitempty"` +} + +// UnmarshalOffering unmarshals an instance of Offering from the specified map of raw messages. +func UnmarshalOffering(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Offering) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "_rev", &obj.Rev) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "url", &obj.URL) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "label", &obj.Label) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "offering_icon_url", &obj.OfferingIconURL) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "offering_docs_url", &obj.OfferingDocsURL) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "offering_support_url", &obj.OfferingSupportURL) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "tags", &obj.Tags) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "keywords", &obj.Keywords) + if err != nil { + return + } + err = core.UnmarshalModel(m, "rating", &obj.Rating, UnmarshalRating) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created", &obj.Created) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "updated", &obj.Updated) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "short_description", &obj.ShortDescription) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "long_description", &obj.LongDescription) + if err != nil { + return + } + err = core.UnmarshalModel(m, "features", &obj.Features, UnmarshalFeature) + if err != nil { + return + } + err = core.UnmarshalModel(m, "kinds", &obj.Kinds, UnmarshalKind) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "permit_request_ibm_public_publish", &obj.PermitRequestIBMPublicPublish) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "ibm_publish_approved", &obj.IBMPublishApproved) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "public_publish_approved", &obj.PublicPublishApproved) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "public_original_crn", &obj.PublicOriginalCRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "publish_public_crn", &obj.PublishPublicCRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "portal_approval_record", &obj.PortalApprovalRecord) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "portal_ui_url", &obj.PortalUIURL) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "catalog_id", &obj.CatalogID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "catalog_name", &obj.CatalogName) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "metadata", &obj.Metadata) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "disclaimer", &obj.Disclaimer) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "hidden", &obj.Hidden) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "provider", &obj.Provider) + if err != nil { + return + } + err = core.UnmarshalModel(m, "repo_info", &obj.RepoInfo, UnmarshalRepoInfo) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// OfferingInstance : A offering instance resource (provision instance of a catalog offering). +type OfferingInstance struct { + // provisioned instance ID (part of the CRN). + ID *string `json:"id,omitempty"` + + // Cloudant revision. + Rev *string `json:"_rev,omitempty"` + + // url reference to this object. + URL *string `json:"url,omitempty"` + + // platform CRN for this instance. + CRN *string `json:"crn,omitempty"` + + // the label for this instance. + Label *string `json:"label,omitempty"` + + // Catalog ID this instance was created from. + CatalogID *string `json:"catalog_id,omitempty"` + + // Offering ID this instance was created from. + OfferingID *string `json:"offering_id,omitempty"` + + // the format this instance has (helm, operator, ova...). + KindFormat *string `json:"kind_format,omitempty"` + + // The version this instance was installed from (not version id). + Version *string `json:"version,omitempty"` + + // Cluster ID. + ClusterID *string `json:"cluster_id,omitempty"` + + // Cluster region (e.g., us-south). + ClusterRegion *string `json:"cluster_region,omitempty"` + + // List of target namespaces to install into. + ClusterNamespaces []string `json:"cluster_namespaces,omitempty"` + + // designate to install into all namespaces. + ClusterAllNamespaces *bool `json:"cluster_all_namespaces,omitempty"` + + // Id of the schematics workspace, for offering instances provisioned through schematics. + SchematicsWorkspaceID *string `json:"schematics_workspace_id,omitempty"` + + // Id of the resource group to provision the offering instance into. + ResourceGroupID *string `json:"resource_group_id,omitempty"` +} + +// UnmarshalOfferingInstance unmarshals an instance of OfferingInstance from the specified map of raw messages. +func UnmarshalOfferingInstance(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(OfferingInstance) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "_rev", &obj.Rev) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "url", &obj.URL) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "label", &obj.Label) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "catalog_id", &obj.CatalogID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "offering_id", &obj.OfferingID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "kind_format", &obj.KindFormat) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "version", &obj.Version) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "cluster_id", &obj.ClusterID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "cluster_region", &obj.ClusterRegion) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "cluster_namespaces", &obj.ClusterNamespaces) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "cluster_all_namespaces", &obj.ClusterAllNamespaces) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "schematics_workspace_id", &obj.SchematicsWorkspaceID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "resource_group_id", &obj.ResourceGroupID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// OfferingSearchResult : Paginated offering search result. +type OfferingSearchResult struct { + // The offset (origin 0) of the first resource in this page of search results. + Offset *int64 `json:"offset" validate:"required"` + + // The maximum number of resources returned in each page of search results. + Limit *int64 `json:"limit" validate:"required"` + + // The overall total number of resources in the search result set. + TotalCount *int64 `json:"total_count,omitempty"` + + // The number of resources returned in this page of search results. + ResourceCount *int64 `json:"resource_count,omitempty"` + + // A URL for retrieving the first page of search results. + First *string `json:"first,omitempty"` + + // A URL for retrieving the last page of search results. + Last *string `json:"last,omitempty"` + + // A URL for retrieving the previous page of search results. + Prev *string `json:"prev,omitempty"` + + // A URL for retrieving the next page of search results. + Next *string `json:"next,omitempty"` + + // Resulting objects. + Resources []Offering `json:"resources,omitempty"` +} + +// UnmarshalOfferingSearchResult unmarshals an instance of OfferingSearchResult from the specified map of raw messages. +func UnmarshalOfferingSearchResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(OfferingSearchResult) + err = core.UnmarshalPrimitive(m, "offset", &obj.Offset) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "limit", &obj.Limit) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "total_count", &obj.TotalCount) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "resource_count", &obj.ResourceCount) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "first", &obj.First) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "last", &obj.Last) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "prev", &obj.Prev) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "next", &obj.Next) + if err != nil { + return + } + err = core.UnmarshalModel(m, "resources", &obj.Resources, UnmarshalOffering) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// OperatorDeployResult : Operator deploy result. +type OperatorDeployResult struct { + // Status phase. + Phase *string `json:"phase,omitempty"` + + // Status message. + Message *string `json:"message,omitempty"` + + // Operator API path. + Link *string `json:"link,omitempty"` + + // Name of Operator. + Name *string `json:"name,omitempty"` + + // Operator version. + Version *string `json:"version,omitempty"` + + // Kube namespace. + Namespace *string `json:"namespace,omitempty"` + + // Package Operator exists in. + PackageName *string `json:"package_name,omitempty"` + + // Catalog identification. + CatalogID *string `json:"catalog_id,omitempty"` +} + +// UnmarshalOperatorDeployResult unmarshals an instance of OperatorDeployResult from the specified map of raw messages. +func UnmarshalOperatorDeployResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(OperatorDeployResult) + err = core.UnmarshalPrimitive(m, "phase", &obj.Phase) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "message", &obj.Message) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "link", &obj.Link) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "version", &obj.Version) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "namespace", &obj.Namespace) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "package_name", &obj.PackageName) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "catalog_id", &obj.CatalogID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// Plan : Offering plan. +type Plan struct { + // unique id. + ID *string `json:"id,omitempty"` + + // Display Name in the requested language. + Label *string `json:"label,omitempty"` + + // The programmatic name of this offering. + Name *string `json:"name,omitempty"` + + // Short description in the requested language. + ShortDescription *string `json:"short_description,omitempty"` + + // Long description in the requested language. + LongDescription *string `json:"long_description,omitempty"` + + // open ended metadata information. + Metadata map[string]interface{} `json:"metadata,omitempty"` + + // list of tags associated with this catalog. + Tags []string `json:"tags,omitempty"` + + // list of features associated with this offering. + AdditionalFeatures []Feature `json:"additional_features,omitempty"` + + // the date'time this catalog was created. + Created *strfmt.DateTime `json:"created,omitempty"` + + // the date'time this catalog was last updated. + Updated *strfmt.DateTime `json:"updated,omitempty"` + + // list of deployments. + Deployments []Deployment `json:"deployments,omitempty"` +} + +// UnmarshalPlan unmarshals an instance of Plan from the specified map of raw messages. +func UnmarshalPlan(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Plan) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "label", &obj.Label) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "short_description", &obj.ShortDescription) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "long_description", &obj.LongDescription) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "metadata", &obj.Metadata) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "tags", &obj.Tags) + if err != nil { + return + } + err = core.UnmarshalModel(m, "additional_features", &obj.AdditionalFeatures, UnmarshalFeature) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created", &obj.Created) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "updated", &obj.Updated) + if err != nil { + return + } + err = core.UnmarshalModel(m, "deployments", &obj.Deployments, UnmarshalDeployment) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// PreinstallVersionOptions : The PreinstallVersion options. +type PreinstallVersionOptions struct { + // A dotted value of `catalogID`.`versionID`. + VersionLocID *string `validate:"required,ne="` + + // IAM Refresh token. + XAuthRefreshToken *string `validate:"required"` + + // Cluster ID. + ClusterID *string + + // Cluster region. + Region *string + + // Kube namespace. + Namespace *string + + // Object containing Helm chart override values. To use a secret for items of type password, specify a JSON encoded + // value of $ref:#/components/schemas/SecretInstance, prefixed with `cmsm_v1:`. + OverrideValues map[string]interface{} + + // Entitlement API Key for this offering. + EntitlementApikey *string + + // Schematics workspace configuration. + Schematics *DeployRequestBodySchematics + + // Script. + Script *string + + // Script ID. + ScriptID *string + + // A dotted value of `catalogID`.`versionID`. + VersionLocatorID *string + + // VCenter ID. + VcenterID *string + + // VCenter User. + VcenterUser *string + + // VCenter Password. + VcenterPassword *string + + // VCenter Location. + VcenterLocation *string + + // VCenter Datastore. + VcenterDatastore *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewPreinstallVersionOptions : Instantiate PreinstallVersionOptions +func (*CatalogManagementV1) NewPreinstallVersionOptions(versionLocID string, xAuthRefreshToken string) *PreinstallVersionOptions { + return &PreinstallVersionOptions{ + VersionLocID: core.StringPtr(versionLocID), + XAuthRefreshToken: core.StringPtr(xAuthRefreshToken), + } +} + +// SetVersionLocID : Allow user to set VersionLocID +func (options *PreinstallVersionOptions) SetVersionLocID(versionLocID string) *PreinstallVersionOptions { + options.VersionLocID = core.StringPtr(versionLocID) + return options +} + +// SetXAuthRefreshToken : Allow user to set XAuthRefreshToken +func (options *PreinstallVersionOptions) SetXAuthRefreshToken(xAuthRefreshToken string) *PreinstallVersionOptions { + options.XAuthRefreshToken = core.StringPtr(xAuthRefreshToken) + return options +} + +// SetClusterID : Allow user to set ClusterID +func (options *PreinstallVersionOptions) SetClusterID(clusterID string) *PreinstallVersionOptions { + options.ClusterID = core.StringPtr(clusterID) + return options +} + +// SetRegion : Allow user to set Region +func (options *PreinstallVersionOptions) SetRegion(region string) *PreinstallVersionOptions { + options.Region = core.StringPtr(region) + return options +} + +// SetNamespace : Allow user to set Namespace +func (options *PreinstallVersionOptions) SetNamespace(namespace string) *PreinstallVersionOptions { + options.Namespace = core.StringPtr(namespace) + return options +} + +// SetOverrideValues : Allow user to set OverrideValues +func (options *PreinstallVersionOptions) SetOverrideValues(overrideValues map[string]interface{}) *PreinstallVersionOptions { + options.OverrideValues = overrideValues + return options +} + +// SetEntitlementApikey : Allow user to set EntitlementApikey +func (options *PreinstallVersionOptions) SetEntitlementApikey(entitlementApikey string) *PreinstallVersionOptions { + options.EntitlementApikey = core.StringPtr(entitlementApikey) + return options +} + +// SetSchematics : Allow user to set Schematics +func (options *PreinstallVersionOptions) SetSchematics(schematics *DeployRequestBodySchematics) *PreinstallVersionOptions { + options.Schematics = schematics + return options +} + +// SetScript : Allow user to set Script +func (options *PreinstallVersionOptions) SetScript(script string) *PreinstallVersionOptions { + options.Script = core.StringPtr(script) + return options +} + +// SetScriptID : Allow user to set ScriptID +func (options *PreinstallVersionOptions) SetScriptID(scriptID string) *PreinstallVersionOptions { + options.ScriptID = core.StringPtr(scriptID) + return options +} + +// SetVersionLocatorID : Allow user to set VersionLocatorID +func (options *PreinstallVersionOptions) SetVersionLocatorID(versionLocatorID string) *PreinstallVersionOptions { + options.VersionLocatorID = core.StringPtr(versionLocatorID) + return options +} + +// SetVcenterID : Allow user to set VcenterID +func (options *PreinstallVersionOptions) SetVcenterID(vcenterID string) *PreinstallVersionOptions { + options.VcenterID = core.StringPtr(vcenterID) + return options +} + +// SetVcenterUser : Allow user to set VcenterUser +func (options *PreinstallVersionOptions) SetVcenterUser(vcenterUser string) *PreinstallVersionOptions { + options.VcenterUser = core.StringPtr(vcenterUser) + return options +} + +// SetVcenterPassword : Allow user to set VcenterPassword +func (options *PreinstallVersionOptions) SetVcenterPassword(vcenterPassword string) *PreinstallVersionOptions { + options.VcenterPassword = core.StringPtr(vcenterPassword) + return options +} + +// SetVcenterLocation : Allow user to set VcenterLocation +func (options *PreinstallVersionOptions) SetVcenterLocation(vcenterLocation string) *PreinstallVersionOptions { + options.VcenterLocation = core.StringPtr(vcenterLocation) + return options +} + +// SetVcenterDatastore : Allow user to set VcenterDatastore +func (options *PreinstallVersionOptions) SetVcenterDatastore(vcenterDatastore string) *PreinstallVersionOptions { + options.VcenterDatastore = core.StringPtr(vcenterDatastore) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *PreinstallVersionOptions) SetHeaders(param map[string]string) *PreinstallVersionOptions { + options.Headers = param + return options +} + +// PublicPublishObjectOptions : The PublicPublishObject options. +type PublicPublishObjectOptions struct { + // Catalog identifier. + CatalogIdentifier *string `validate:"required,ne="` + + // Object identifier. + ObjectIdentifier *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewPublicPublishObjectOptions : Instantiate PublicPublishObjectOptions +func (*CatalogManagementV1) NewPublicPublishObjectOptions(catalogIdentifier string, objectIdentifier string) *PublicPublishObjectOptions { + return &PublicPublishObjectOptions{ + CatalogIdentifier: core.StringPtr(catalogIdentifier), + ObjectIdentifier: core.StringPtr(objectIdentifier), + } +} + +// SetCatalogIdentifier : Allow user to set CatalogIdentifier +func (options *PublicPublishObjectOptions) SetCatalogIdentifier(catalogIdentifier string) *PublicPublishObjectOptions { + options.CatalogIdentifier = core.StringPtr(catalogIdentifier) + return options +} + +// SetObjectIdentifier : Allow user to set ObjectIdentifier +func (options *PublicPublishObjectOptions) SetObjectIdentifier(objectIdentifier string) *PublicPublishObjectOptions { + options.ObjectIdentifier = core.StringPtr(objectIdentifier) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *PublicPublishObjectOptions) SetHeaders(param map[string]string) *PublicPublishObjectOptions { + options.Headers = param + return options +} + +// PublicPublishVersionOptions : The PublicPublishVersion options. +type PublicPublishVersionOptions struct { + // A dotted value of `catalogID`.`versionID`. + VersionLocID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewPublicPublishVersionOptions : Instantiate PublicPublishVersionOptions +func (*CatalogManagementV1) NewPublicPublishVersionOptions(versionLocID string) *PublicPublishVersionOptions { + return &PublicPublishVersionOptions{ + VersionLocID: core.StringPtr(versionLocID), + } +} + +// SetVersionLocID : Allow user to set VersionLocID +func (options *PublicPublishVersionOptions) SetVersionLocID(versionLocID string) *PublicPublishVersionOptions { + options.VersionLocID = core.StringPtr(versionLocID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *PublicPublishVersionOptions) SetHeaders(param map[string]string) *PublicPublishVersionOptions { + options.Headers = param + return options +} + +// PublishObject : Publish information. +type PublishObject struct { + // Is it permitted to request publishing to IBM or Public. + PermitIBMPublicPublish *bool `json:"permit_ibm_public_publish,omitempty"` + + // Indicates if this offering has been approved for use by all IBMers. + IBMApproved *bool `json:"ibm_approved,omitempty"` + + // Indicates if this offering has been approved for use by all IBM Cloud users. + PublicApproved *bool `json:"public_approved,omitempty"` + + // The portal's approval record ID. + PortalApprovalRecord *string `json:"portal_approval_record,omitempty"` + + // The portal UI URL. + PortalURL *string `json:"portal_url,omitempty"` +} + +// UnmarshalPublishObject unmarshals an instance of PublishObject from the specified map of raw messages. +func UnmarshalPublishObject(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(PublishObject) + err = core.UnmarshalPrimitive(m, "permit_ibm_public_publish", &obj.PermitIBMPublicPublish) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "ibm_approved", &obj.IBMApproved) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "public_approved", &obj.PublicApproved) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "portal_approval_record", &obj.PortalApprovalRecord) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "portal_url", &obj.PortalURL) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// PutOfferingInstanceOptions : The PutOfferingInstance options. +type PutOfferingInstanceOptions struct { + // Version Instance identifier. + InstanceIdentifier *string `validate:"required,ne="` + + // IAM Refresh token. + XAuthRefreshToken *string `validate:"required"` + + // provisioned instance ID (part of the CRN). + ID *string + + // Cloudant revision. + Rev *string + + // url reference to this object. + URL *string + + // platform CRN for this instance. + CRN *string + + // the label for this instance. + Label *string + + // Catalog ID this instance was created from. + CatalogID *string + + // Offering ID this instance was created from. + OfferingID *string + + // the format this instance has (helm, operator, ova...). + KindFormat *string + + // The version this instance was installed from (not version id). + Version *string + + // Cluster ID. + ClusterID *string + + // Cluster region (e.g., us-south). + ClusterRegion *string + + // List of target namespaces to install into. + ClusterNamespaces []string + + // designate to install into all namespaces. + ClusterAllNamespaces *bool + + // Id of the schematics workspace, for offering instances provisioned through schematics. + SchematicsWorkspaceID *string + + // Id of the resource group to provision the offering instance into. + ResourceGroupID *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewPutOfferingInstanceOptions : Instantiate PutOfferingInstanceOptions +func (*CatalogManagementV1) NewPutOfferingInstanceOptions(instanceIdentifier string, xAuthRefreshToken string) *PutOfferingInstanceOptions { + return &PutOfferingInstanceOptions{ + InstanceIdentifier: core.StringPtr(instanceIdentifier), + XAuthRefreshToken: core.StringPtr(xAuthRefreshToken), + } +} + +// SetInstanceIdentifier : Allow user to set InstanceIdentifier +func (options *PutOfferingInstanceOptions) SetInstanceIdentifier(instanceIdentifier string) *PutOfferingInstanceOptions { + options.InstanceIdentifier = core.StringPtr(instanceIdentifier) + return options +} + +// SetXAuthRefreshToken : Allow user to set XAuthRefreshToken +func (options *PutOfferingInstanceOptions) SetXAuthRefreshToken(xAuthRefreshToken string) *PutOfferingInstanceOptions { + options.XAuthRefreshToken = core.StringPtr(xAuthRefreshToken) + return options +} + +// SetID : Allow user to set ID +func (options *PutOfferingInstanceOptions) SetID(id string) *PutOfferingInstanceOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetRev : Allow user to set Rev +func (options *PutOfferingInstanceOptions) SetRev(rev string) *PutOfferingInstanceOptions { + options.Rev = core.StringPtr(rev) + return options +} + +// SetURL : Allow user to set URL +func (options *PutOfferingInstanceOptions) SetURL(url string) *PutOfferingInstanceOptions { + options.URL = core.StringPtr(url) + return options +} + +// SetCRN : Allow user to set CRN +func (options *PutOfferingInstanceOptions) SetCRN(crn string) *PutOfferingInstanceOptions { + options.CRN = core.StringPtr(crn) + return options +} + +// SetLabel : Allow user to set Label +func (options *PutOfferingInstanceOptions) SetLabel(label string) *PutOfferingInstanceOptions { + options.Label = core.StringPtr(label) + return options +} + +// SetCatalogID : Allow user to set CatalogID +func (options *PutOfferingInstanceOptions) SetCatalogID(catalogID string) *PutOfferingInstanceOptions { + options.CatalogID = core.StringPtr(catalogID) + return options +} + +// SetOfferingID : Allow user to set OfferingID +func (options *PutOfferingInstanceOptions) SetOfferingID(offeringID string) *PutOfferingInstanceOptions { + options.OfferingID = core.StringPtr(offeringID) + return options +} + +// SetKindFormat : Allow user to set KindFormat +func (options *PutOfferingInstanceOptions) SetKindFormat(kindFormat string) *PutOfferingInstanceOptions { + options.KindFormat = core.StringPtr(kindFormat) + return options +} + +// SetVersion : Allow user to set Version +func (options *PutOfferingInstanceOptions) SetVersion(version string) *PutOfferingInstanceOptions { + options.Version = core.StringPtr(version) + return options +} + +// SetClusterID : Allow user to set ClusterID +func (options *PutOfferingInstanceOptions) SetClusterID(clusterID string) *PutOfferingInstanceOptions { + options.ClusterID = core.StringPtr(clusterID) + return options +} + +// SetClusterRegion : Allow user to set ClusterRegion +func (options *PutOfferingInstanceOptions) SetClusterRegion(clusterRegion string) *PutOfferingInstanceOptions { + options.ClusterRegion = core.StringPtr(clusterRegion) + return options +} + +// SetClusterNamespaces : Allow user to set ClusterNamespaces +func (options *PutOfferingInstanceOptions) SetClusterNamespaces(clusterNamespaces []string) *PutOfferingInstanceOptions { + options.ClusterNamespaces = clusterNamespaces + return options +} + +// SetClusterAllNamespaces : Allow user to set ClusterAllNamespaces +func (options *PutOfferingInstanceOptions) SetClusterAllNamespaces(clusterAllNamespaces bool) *PutOfferingInstanceOptions { + options.ClusterAllNamespaces = core.BoolPtr(clusterAllNamespaces) + return options +} + +// SetSchematicsWorkspaceID : Allow user to set SchematicsWorkspaceID +func (options *PutOfferingInstanceOptions) SetSchematicsWorkspaceID(schematicsWorkspaceID string) *PutOfferingInstanceOptions { + options.SchematicsWorkspaceID = core.StringPtr(schematicsWorkspaceID) + return options +} + +// SetResourceGroupID : Allow user to set ResourceGroupID +func (options *PutOfferingInstanceOptions) SetResourceGroupID(resourceGroupID string) *PutOfferingInstanceOptions { + options.ResourceGroupID = core.StringPtr(resourceGroupID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *PutOfferingInstanceOptions) SetHeaders(param map[string]string) *PutOfferingInstanceOptions { + options.Headers = param + return options +} + +// Rating : Repository info for offerings. +type Rating struct { + // One start rating. + OneStarCount *int64 `json:"one_star_count,omitempty"` + + // Two start rating. + TwoStarCount *int64 `json:"two_star_count,omitempty"` + + // Three start rating. + ThreeStarCount *int64 `json:"three_star_count,omitempty"` + + // Four start rating. + FourStarCount *int64 `json:"four_star_count,omitempty"` +} + +// UnmarshalRating unmarshals an instance of Rating from the specified map of raw messages. +func UnmarshalRating(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Rating) + err = core.UnmarshalPrimitive(m, "one_star_count", &obj.OneStarCount) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "two_star_count", &obj.TwoStarCount) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "three_star_count", &obj.ThreeStarCount) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "four_star_count", &obj.FourStarCount) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ReloadOfferingOptions : The ReloadOffering options. +type ReloadOfferingOptions struct { + // Catalog identifier. + CatalogIdentifier *string `validate:"required,ne="` + + // Offering identification. + OfferingID *string `validate:"required,ne="` + + // The semver value for this new version. + TargetVersion *string `validate:"required"` + + // Tags array. + Tags []string + + // Target kinds. Current valid values are 'iks', 'roks', 'vcenter', and 'terraform'. + TargetKinds []string + + // byte array representing the content to be imported. Only supported for OVA images at this time. + Content *[]byte + + // URL path to zip location. If not specified, must provide content in this post body. + Zipurl *string + + // The type of repository containing this version. Valid values are 'public_git' or 'enterprise_git'. + RepoType *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewReloadOfferingOptions : Instantiate ReloadOfferingOptions +func (*CatalogManagementV1) NewReloadOfferingOptions(catalogIdentifier string, offeringID string, targetVersion string) *ReloadOfferingOptions { + return &ReloadOfferingOptions{ + CatalogIdentifier: core.StringPtr(catalogIdentifier), + OfferingID: core.StringPtr(offeringID), + TargetVersion: core.StringPtr(targetVersion), + } +} + +// SetCatalogIdentifier : Allow user to set CatalogIdentifier +func (options *ReloadOfferingOptions) SetCatalogIdentifier(catalogIdentifier string) *ReloadOfferingOptions { + options.CatalogIdentifier = core.StringPtr(catalogIdentifier) + return options +} + +// SetOfferingID : Allow user to set OfferingID +func (options *ReloadOfferingOptions) SetOfferingID(offeringID string) *ReloadOfferingOptions { + options.OfferingID = core.StringPtr(offeringID) + return options +} + +// SetTargetVersion : Allow user to set TargetVersion +func (options *ReloadOfferingOptions) SetTargetVersion(targetVersion string) *ReloadOfferingOptions { + options.TargetVersion = core.StringPtr(targetVersion) + return options +} + +// SetTags : Allow user to set Tags +func (options *ReloadOfferingOptions) SetTags(tags []string) *ReloadOfferingOptions { + options.Tags = tags + return options +} + +// SetTargetKinds : Allow user to set TargetKinds +func (options *ReloadOfferingOptions) SetTargetKinds(targetKinds []string) *ReloadOfferingOptions { + options.TargetKinds = targetKinds + return options +} + +// SetContent : Allow user to set Content +func (options *ReloadOfferingOptions) SetContent(content []byte) *ReloadOfferingOptions { + options.Content = &content + return options +} + +// SetZipurl : Allow user to set Zipurl +func (options *ReloadOfferingOptions) SetZipurl(zipurl string) *ReloadOfferingOptions { + options.Zipurl = core.StringPtr(zipurl) + return options +} + +// SetRepoType : Allow user to set RepoType +func (options *ReloadOfferingOptions) SetRepoType(repoType string) *ReloadOfferingOptions { + options.RepoType = core.StringPtr(repoType) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ReloadOfferingOptions) SetHeaders(param map[string]string) *ReloadOfferingOptions { + options.Headers = param + return options +} + +// ReplaceCatalogOptions : The ReplaceCatalog options. +type ReplaceCatalogOptions struct { + // Catalog identifier. + CatalogIdentifier *string `validate:"required,ne="` + + // Unique ID. + ID *string + + // Cloudant revision. + Rev *string + + // Display Name in the requested language. + Label *string + + // Description in the requested language. + ShortDescription *string + + // URL for an icon associated with this catalog. + CatalogIconURL *string + + // List of tags associated with this catalog. + Tags []string + + // List of features associated with this catalog. + Features []Feature + + // Denotes whether a catalog is disabled. + Disabled *bool + + // Resource group id the catalog is owned by. + ResourceGroupID *string + + // Account that owns catalog. + OwningAccount *string + + // Filters for account and catalog filters. + CatalogFilters *Filters + + // Feature information. + SyndicationSettings *SyndicationResource + + // Kind of catalog. Supported kinds are offering and vpe. + Kind *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewReplaceCatalogOptions : Instantiate ReplaceCatalogOptions +func (*CatalogManagementV1) NewReplaceCatalogOptions(catalogIdentifier string) *ReplaceCatalogOptions { + return &ReplaceCatalogOptions{ + CatalogIdentifier: core.StringPtr(catalogIdentifier), + } +} + +// SetCatalogIdentifier : Allow user to set CatalogIdentifier +func (options *ReplaceCatalogOptions) SetCatalogIdentifier(catalogIdentifier string) *ReplaceCatalogOptions { + options.CatalogIdentifier = core.StringPtr(catalogIdentifier) + return options +} + +// SetID : Allow user to set ID +func (options *ReplaceCatalogOptions) SetID(id string) *ReplaceCatalogOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetRev : Allow user to set Rev +func (options *ReplaceCatalogOptions) SetRev(rev string) *ReplaceCatalogOptions { + options.Rev = core.StringPtr(rev) + return options +} + +// SetLabel : Allow user to set Label +func (options *ReplaceCatalogOptions) SetLabel(label string) *ReplaceCatalogOptions { + options.Label = core.StringPtr(label) + return options +} + +// SetShortDescription : Allow user to set ShortDescription +func (options *ReplaceCatalogOptions) SetShortDescription(shortDescription string) *ReplaceCatalogOptions { + options.ShortDescription = core.StringPtr(shortDescription) + return options +} + +// SetCatalogIconURL : Allow user to set CatalogIconURL +func (options *ReplaceCatalogOptions) SetCatalogIconURL(catalogIconURL string) *ReplaceCatalogOptions { + options.CatalogIconURL = core.StringPtr(catalogIconURL) + return options +} + +// SetTags : Allow user to set Tags +func (options *ReplaceCatalogOptions) SetTags(tags []string) *ReplaceCatalogOptions { + options.Tags = tags + return options +} + +// SetFeatures : Allow user to set Features +func (options *ReplaceCatalogOptions) SetFeatures(features []Feature) *ReplaceCatalogOptions { + options.Features = features + return options +} + +// SetDisabled : Allow user to set Disabled +func (options *ReplaceCatalogOptions) SetDisabled(disabled bool) *ReplaceCatalogOptions { + options.Disabled = core.BoolPtr(disabled) + return options +} + +// SetResourceGroupID : Allow user to set ResourceGroupID +func (options *ReplaceCatalogOptions) SetResourceGroupID(resourceGroupID string) *ReplaceCatalogOptions { + options.ResourceGroupID = core.StringPtr(resourceGroupID) + return options +} + +// SetOwningAccount : Allow user to set OwningAccount +func (options *ReplaceCatalogOptions) SetOwningAccount(owningAccount string) *ReplaceCatalogOptions { + options.OwningAccount = core.StringPtr(owningAccount) + return options +} + +// SetCatalogFilters : Allow user to set CatalogFilters +func (options *ReplaceCatalogOptions) SetCatalogFilters(catalogFilters *Filters) *ReplaceCatalogOptions { + options.CatalogFilters = catalogFilters + return options +} + +// SetSyndicationSettings : Allow user to set SyndicationSettings +func (options *ReplaceCatalogOptions) SetSyndicationSettings(syndicationSettings *SyndicationResource) *ReplaceCatalogOptions { + options.SyndicationSettings = syndicationSettings + return options +} + +// SetKind : Allow user to set Kind +func (options *ReplaceCatalogOptions) SetKind(kind string) *ReplaceCatalogOptions { + options.Kind = core.StringPtr(kind) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ReplaceCatalogOptions) SetHeaders(param map[string]string) *ReplaceCatalogOptions { + options.Headers = param + return options +} + +// ReplaceObjectOptions : The ReplaceObject options. +type ReplaceObjectOptions struct { + // Catalog identifier. + CatalogIdentifier *string `validate:"required,ne="` + + // Object identifier. + ObjectIdentifier *string `validate:"required,ne="` + + // unique id. + ID *string + + // The programmatic name of this offering. + Name *string + + // Cloudant revision. + Rev *string + + // The crn for this specific object. + CRN *string + + // The url for this specific object. + URL *string + + // The parent for this specific object. + ParentID *string + + // Translated display name in the requested language. + LabelI18n *string + + // Display name in the requested language. + Label *string + + // List of tags associated with this catalog. + Tags []string + + // The date and time this catalog was created. + Created *strfmt.DateTime + + // The date and time this catalog was last updated. + Updated *strfmt.DateTime + + // Short description in the requested language. + ShortDescription *string + + // Short description translation. + ShortDescriptionI18n *string + + // Kind of object. + Kind *string + + // Publish information. + Publish *PublishObject + + // Offering state. + State *State + + // The id of the catalog containing this offering. + CatalogID *string + + // The name of the catalog. + CatalogName *string + + // Map of data values for this object. + Data map[string]interface{} + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewReplaceObjectOptions : Instantiate ReplaceObjectOptions +func (*CatalogManagementV1) NewReplaceObjectOptions(catalogIdentifier string, objectIdentifier string) *ReplaceObjectOptions { + return &ReplaceObjectOptions{ + CatalogIdentifier: core.StringPtr(catalogIdentifier), + ObjectIdentifier: core.StringPtr(objectIdentifier), + } +} + +// SetCatalogIdentifier : Allow user to set CatalogIdentifier +func (options *ReplaceObjectOptions) SetCatalogIdentifier(catalogIdentifier string) *ReplaceObjectOptions { + options.CatalogIdentifier = core.StringPtr(catalogIdentifier) + return options +} + +// SetObjectIdentifier : Allow user to set ObjectIdentifier +func (options *ReplaceObjectOptions) SetObjectIdentifier(objectIdentifier string) *ReplaceObjectOptions { + options.ObjectIdentifier = core.StringPtr(objectIdentifier) + return options +} + +// SetID : Allow user to set ID +func (options *ReplaceObjectOptions) SetID(id string) *ReplaceObjectOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetName : Allow user to set Name +func (options *ReplaceObjectOptions) SetName(name string) *ReplaceObjectOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetRev : Allow user to set Rev +func (options *ReplaceObjectOptions) SetRev(rev string) *ReplaceObjectOptions { + options.Rev = core.StringPtr(rev) + return options +} + +// SetCRN : Allow user to set CRN +func (options *ReplaceObjectOptions) SetCRN(crn string) *ReplaceObjectOptions { + options.CRN = core.StringPtr(crn) + return options +} + +// SetURL : Allow user to set URL +func (options *ReplaceObjectOptions) SetURL(url string) *ReplaceObjectOptions { + options.URL = core.StringPtr(url) + return options +} + +// SetParentID : Allow user to set ParentID +func (options *ReplaceObjectOptions) SetParentID(parentID string) *ReplaceObjectOptions { + options.ParentID = core.StringPtr(parentID) + return options +} + +// SetLabelI18n : Allow user to set LabelI18n +func (options *ReplaceObjectOptions) SetLabelI18n(labelI18n string) *ReplaceObjectOptions { + options.LabelI18n = core.StringPtr(labelI18n) + return options +} + +// SetLabel : Allow user to set Label +func (options *ReplaceObjectOptions) SetLabel(label string) *ReplaceObjectOptions { + options.Label = core.StringPtr(label) + return options +} + +// SetTags : Allow user to set Tags +func (options *ReplaceObjectOptions) SetTags(tags []string) *ReplaceObjectOptions { + options.Tags = tags + return options +} + +// SetCreated : Allow user to set Created +func (options *ReplaceObjectOptions) SetCreated(created *strfmt.DateTime) *ReplaceObjectOptions { + options.Created = created + return options +} + +// SetUpdated : Allow user to set Updated +func (options *ReplaceObjectOptions) SetUpdated(updated *strfmt.DateTime) *ReplaceObjectOptions { + options.Updated = updated + return options +} + +// SetShortDescription : Allow user to set ShortDescription +func (options *ReplaceObjectOptions) SetShortDescription(shortDescription string) *ReplaceObjectOptions { + options.ShortDescription = core.StringPtr(shortDescription) + return options +} + +// SetShortDescriptionI18n : Allow user to set ShortDescriptionI18n +func (options *ReplaceObjectOptions) SetShortDescriptionI18n(shortDescriptionI18n string) *ReplaceObjectOptions { + options.ShortDescriptionI18n = core.StringPtr(shortDescriptionI18n) + return options +} + +// SetKind : Allow user to set Kind +func (options *ReplaceObjectOptions) SetKind(kind string) *ReplaceObjectOptions { + options.Kind = core.StringPtr(kind) + return options +} + +// SetPublish : Allow user to set Publish +func (options *ReplaceObjectOptions) SetPublish(publish *PublishObject) *ReplaceObjectOptions { + options.Publish = publish + return options +} + +// SetState : Allow user to set State +func (options *ReplaceObjectOptions) SetState(state *State) *ReplaceObjectOptions { + options.State = state + return options +} + +// SetCatalogID : Allow user to set CatalogID +func (options *ReplaceObjectOptions) SetCatalogID(catalogID string) *ReplaceObjectOptions { + options.CatalogID = core.StringPtr(catalogID) + return options +} + +// SetCatalogName : Allow user to set CatalogName +func (options *ReplaceObjectOptions) SetCatalogName(catalogName string) *ReplaceObjectOptions { + options.CatalogName = core.StringPtr(catalogName) + return options +} + +// SetData : Allow user to set Data +func (options *ReplaceObjectOptions) SetData(data map[string]interface{}) *ReplaceObjectOptions { + options.Data = data + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ReplaceObjectOptions) SetHeaders(param map[string]string) *ReplaceObjectOptions { + options.Headers = param + return options +} + +// ReplaceOfferingIconOptions : The ReplaceOfferingIcon options. +type ReplaceOfferingIconOptions struct { + // Catalog identifier. + CatalogIdentifier *string `validate:"required,ne="` + + // Offering identification. + OfferingID *string `validate:"required,ne="` + + // Name of the file name that is being uploaded. + FileName *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewReplaceOfferingIconOptions : Instantiate ReplaceOfferingIconOptions +func (*CatalogManagementV1) NewReplaceOfferingIconOptions(catalogIdentifier string, offeringID string, fileName string) *ReplaceOfferingIconOptions { + return &ReplaceOfferingIconOptions{ + CatalogIdentifier: core.StringPtr(catalogIdentifier), + OfferingID: core.StringPtr(offeringID), + FileName: core.StringPtr(fileName), + } +} + +// SetCatalogIdentifier : Allow user to set CatalogIdentifier +func (options *ReplaceOfferingIconOptions) SetCatalogIdentifier(catalogIdentifier string) *ReplaceOfferingIconOptions { + options.CatalogIdentifier = core.StringPtr(catalogIdentifier) + return options +} + +// SetOfferingID : Allow user to set OfferingID +func (options *ReplaceOfferingIconOptions) SetOfferingID(offeringID string) *ReplaceOfferingIconOptions { + options.OfferingID = core.StringPtr(offeringID) + return options +} + +// SetFileName : Allow user to set FileName +func (options *ReplaceOfferingIconOptions) SetFileName(fileName string) *ReplaceOfferingIconOptions { + options.FileName = core.StringPtr(fileName) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ReplaceOfferingIconOptions) SetHeaders(param map[string]string) *ReplaceOfferingIconOptions { + options.Headers = param + return options +} + +// ReplaceOfferingOptions : The ReplaceOffering options. +type ReplaceOfferingOptions struct { + // Catalog identifier. + CatalogIdentifier *string `validate:"required,ne="` + + // Offering identification. + OfferingID *string `validate:"required,ne="` + + // unique id. + ID *string + + // Cloudant revision. + Rev *string + + // The url for this specific offering. + URL *string + + // The crn for this specific offering. + CRN *string + + // Display Name in the requested language. + Label *string + + // The programmatic name of this offering. + Name *string + + // URL for an icon associated with this offering. + OfferingIconURL *string + + // URL for an additional docs with this offering. + OfferingDocsURL *string + + // URL to be displayed in the Consumption UI for getting support on this offering. + OfferingSupportURL *string + + // List of tags associated with this catalog. + Tags []string + + // List of keywords associated with offering, typically used to search for it. + Keywords []string + + // Repository info for offerings. + Rating *Rating + + // The date and time this catalog was created. + Created *strfmt.DateTime + + // The date and time this catalog was last updated. + Updated *strfmt.DateTime + + // Short description in the requested language. + ShortDescription *string + + // Long description in the requested language. + LongDescription *string + + // list of features associated with this offering. + Features []Feature + + // Array of kind. + Kinds []Kind + + // Is it permitted to request publishing to IBM or Public. + PermitRequestIBMPublicPublish *bool + + // Indicates if this offering has been approved for use by all IBMers. + IBMPublishApproved *bool + + // Indicates if this offering has been approved for use by all IBM Cloud users. + PublicPublishApproved *bool + + // The original offering CRN that this publish entry came from. + PublicOriginalCRN *string + + // The crn of the public catalog entry of this offering. + PublishPublicCRN *string + + // The portal's approval record ID. + PortalApprovalRecord *string + + // The portal UI URL. + PortalUIURL *string + + // The id of the catalog containing this offering. + CatalogID *string + + // The name of the catalog. + CatalogName *string + + // Map of metadata values for this offering. + Metadata map[string]interface{} + + // A disclaimer for this offering. + Disclaimer *string + + // Determine if this offering should be displayed in the Consumption UI. + Hidden *bool + + // Provider of this offering. + Provider *string + + // Repository info for offerings. + RepoInfo *RepoInfo + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewReplaceOfferingOptions : Instantiate ReplaceOfferingOptions +func (*CatalogManagementV1) NewReplaceOfferingOptions(catalogIdentifier string, offeringID string) *ReplaceOfferingOptions { + return &ReplaceOfferingOptions{ + CatalogIdentifier: core.StringPtr(catalogIdentifier), + OfferingID: core.StringPtr(offeringID), + } +} + +// SetCatalogIdentifier : Allow user to set CatalogIdentifier +func (options *ReplaceOfferingOptions) SetCatalogIdentifier(catalogIdentifier string) *ReplaceOfferingOptions { + options.CatalogIdentifier = core.StringPtr(catalogIdentifier) + return options +} + +// SetOfferingID : Allow user to set OfferingID +func (options *ReplaceOfferingOptions) SetOfferingID(offeringID string) *ReplaceOfferingOptions { + options.OfferingID = core.StringPtr(offeringID) + return options +} + +// SetID : Allow user to set ID +func (options *ReplaceOfferingOptions) SetID(id string) *ReplaceOfferingOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetRev : Allow user to set Rev +func (options *ReplaceOfferingOptions) SetRev(rev string) *ReplaceOfferingOptions { + options.Rev = core.StringPtr(rev) + return options +} + +// SetURL : Allow user to set URL +func (options *ReplaceOfferingOptions) SetURL(url string) *ReplaceOfferingOptions { + options.URL = core.StringPtr(url) + return options +} + +// SetCRN : Allow user to set CRN +func (options *ReplaceOfferingOptions) SetCRN(crn string) *ReplaceOfferingOptions { + options.CRN = core.StringPtr(crn) + return options +} + +// SetLabel : Allow user to set Label +func (options *ReplaceOfferingOptions) SetLabel(label string) *ReplaceOfferingOptions { + options.Label = core.StringPtr(label) + return options +} + +// SetName : Allow user to set Name +func (options *ReplaceOfferingOptions) SetName(name string) *ReplaceOfferingOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetOfferingIconURL : Allow user to set OfferingIconURL +func (options *ReplaceOfferingOptions) SetOfferingIconURL(offeringIconURL string) *ReplaceOfferingOptions { + options.OfferingIconURL = core.StringPtr(offeringIconURL) + return options +} + +// SetOfferingDocsURL : Allow user to set OfferingDocsURL +func (options *ReplaceOfferingOptions) SetOfferingDocsURL(offeringDocsURL string) *ReplaceOfferingOptions { + options.OfferingDocsURL = core.StringPtr(offeringDocsURL) + return options +} + +// SetOfferingSupportURL : Allow user to set OfferingSupportURL +func (options *ReplaceOfferingOptions) SetOfferingSupportURL(offeringSupportURL string) *ReplaceOfferingOptions { + options.OfferingSupportURL = core.StringPtr(offeringSupportURL) + return options +} + +// SetTags : Allow user to set Tags +func (options *ReplaceOfferingOptions) SetTags(tags []string) *ReplaceOfferingOptions { + options.Tags = tags + return options +} + +// SetKeywords : Allow user to set Keywords +func (options *ReplaceOfferingOptions) SetKeywords(keywords []string) *ReplaceOfferingOptions { + options.Keywords = keywords + return options +} + +// SetRating : Allow user to set Rating +func (options *ReplaceOfferingOptions) SetRating(rating *Rating) *ReplaceOfferingOptions { + options.Rating = rating + return options +} + +// SetCreated : Allow user to set Created +func (options *ReplaceOfferingOptions) SetCreated(created *strfmt.DateTime) *ReplaceOfferingOptions { + options.Created = created + return options +} + +// SetUpdated : Allow user to set Updated +func (options *ReplaceOfferingOptions) SetUpdated(updated *strfmt.DateTime) *ReplaceOfferingOptions { + options.Updated = updated + return options +} + +// SetShortDescription : Allow user to set ShortDescription +func (options *ReplaceOfferingOptions) SetShortDescription(shortDescription string) *ReplaceOfferingOptions { + options.ShortDescription = core.StringPtr(shortDescription) + return options +} + +// SetLongDescription : Allow user to set LongDescription +func (options *ReplaceOfferingOptions) SetLongDescription(longDescription string) *ReplaceOfferingOptions { + options.LongDescription = core.StringPtr(longDescription) + return options +} + +// SetFeatures : Allow user to set Features +func (options *ReplaceOfferingOptions) SetFeatures(features []Feature) *ReplaceOfferingOptions { + options.Features = features + return options +} + +// SetKinds : Allow user to set Kinds +func (options *ReplaceOfferingOptions) SetKinds(kinds []Kind) *ReplaceOfferingOptions { + options.Kinds = kinds + return options +} + +// SetPermitRequestIBMPublicPublish : Allow user to set PermitRequestIBMPublicPublish +func (options *ReplaceOfferingOptions) SetPermitRequestIBMPublicPublish(permitRequestIBMPublicPublish bool) *ReplaceOfferingOptions { + options.PermitRequestIBMPublicPublish = core.BoolPtr(permitRequestIBMPublicPublish) + return options +} + +// SetIBMPublishApproved : Allow user to set IBMPublishApproved +func (options *ReplaceOfferingOptions) SetIBMPublishApproved(ibmPublishApproved bool) *ReplaceOfferingOptions { + options.IBMPublishApproved = core.BoolPtr(ibmPublishApproved) + return options +} + +// SetPublicPublishApproved : Allow user to set PublicPublishApproved +func (options *ReplaceOfferingOptions) SetPublicPublishApproved(publicPublishApproved bool) *ReplaceOfferingOptions { + options.PublicPublishApproved = core.BoolPtr(publicPublishApproved) + return options +} + +// SetPublicOriginalCRN : Allow user to set PublicOriginalCRN +func (options *ReplaceOfferingOptions) SetPublicOriginalCRN(publicOriginalCRN string) *ReplaceOfferingOptions { + options.PublicOriginalCRN = core.StringPtr(publicOriginalCRN) + return options +} + +// SetPublishPublicCRN : Allow user to set PublishPublicCRN +func (options *ReplaceOfferingOptions) SetPublishPublicCRN(publishPublicCRN string) *ReplaceOfferingOptions { + options.PublishPublicCRN = core.StringPtr(publishPublicCRN) + return options +} + +// SetPortalApprovalRecord : Allow user to set PortalApprovalRecord +func (options *ReplaceOfferingOptions) SetPortalApprovalRecord(portalApprovalRecord string) *ReplaceOfferingOptions { + options.PortalApprovalRecord = core.StringPtr(portalApprovalRecord) + return options +} + +// SetPortalUIURL : Allow user to set PortalUIURL +func (options *ReplaceOfferingOptions) SetPortalUIURL(portalUIURL string) *ReplaceOfferingOptions { + options.PortalUIURL = core.StringPtr(portalUIURL) + return options +} + +// SetCatalogID : Allow user to set CatalogID +func (options *ReplaceOfferingOptions) SetCatalogID(catalogID string) *ReplaceOfferingOptions { + options.CatalogID = core.StringPtr(catalogID) + return options +} + +// SetCatalogName : Allow user to set CatalogName +func (options *ReplaceOfferingOptions) SetCatalogName(catalogName string) *ReplaceOfferingOptions { + options.CatalogName = core.StringPtr(catalogName) + return options +} + +// SetMetadata : Allow user to set Metadata +func (options *ReplaceOfferingOptions) SetMetadata(metadata map[string]interface{}) *ReplaceOfferingOptions { + options.Metadata = metadata + return options +} + +// SetDisclaimer : Allow user to set Disclaimer +func (options *ReplaceOfferingOptions) SetDisclaimer(disclaimer string) *ReplaceOfferingOptions { + options.Disclaimer = core.StringPtr(disclaimer) + return options +} + +// SetHidden : Allow user to set Hidden +func (options *ReplaceOfferingOptions) SetHidden(hidden bool) *ReplaceOfferingOptions { + options.Hidden = core.BoolPtr(hidden) + return options +} + +// SetProvider : Allow user to set Provider +func (options *ReplaceOfferingOptions) SetProvider(provider string) *ReplaceOfferingOptions { + options.Provider = core.StringPtr(provider) + return options +} + +// SetRepoInfo : Allow user to set RepoInfo +func (options *ReplaceOfferingOptions) SetRepoInfo(repoInfo *RepoInfo) *ReplaceOfferingOptions { + options.RepoInfo = repoInfo + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ReplaceOfferingOptions) SetHeaders(param map[string]string) *ReplaceOfferingOptions { + options.Headers = param + return options +} + +// ReplaceOperatorsOptions : The ReplaceOperators options. +type ReplaceOperatorsOptions struct { + // IAM Refresh token. + XAuthRefreshToken *string `validate:"required"` + + // Cluster ID. + ClusterID *string + + // Cluster region. + Region *string + + // Kube namespaces to deploy Operator(s) to. + Namespaces []string + + // Denotes whether to install Operator(s) globally. + AllNamespaces *bool + + // A dotted value of `catalogID`.`versionID`. + VersionLocatorID *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewReplaceOperatorsOptions : Instantiate ReplaceOperatorsOptions +func (*CatalogManagementV1) NewReplaceOperatorsOptions(xAuthRefreshToken string) *ReplaceOperatorsOptions { + return &ReplaceOperatorsOptions{ + XAuthRefreshToken: core.StringPtr(xAuthRefreshToken), + } +} + +// SetXAuthRefreshToken : Allow user to set XAuthRefreshToken +func (options *ReplaceOperatorsOptions) SetXAuthRefreshToken(xAuthRefreshToken string) *ReplaceOperatorsOptions { + options.XAuthRefreshToken = core.StringPtr(xAuthRefreshToken) + return options +} + +// SetClusterID : Allow user to set ClusterID +func (options *ReplaceOperatorsOptions) SetClusterID(clusterID string) *ReplaceOperatorsOptions { + options.ClusterID = core.StringPtr(clusterID) + return options +} + +// SetRegion : Allow user to set Region +func (options *ReplaceOperatorsOptions) SetRegion(region string) *ReplaceOperatorsOptions { + options.Region = core.StringPtr(region) + return options +} + +// SetNamespaces : Allow user to set Namespaces +func (options *ReplaceOperatorsOptions) SetNamespaces(namespaces []string) *ReplaceOperatorsOptions { + options.Namespaces = namespaces + return options +} + +// SetAllNamespaces : Allow user to set AllNamespaces +func (options *ReplaceOperatorsOptions) SetAllNamespaces(allNamespaces bool) *ReplaceOperatorsOptions { + options.AllNamespaces = core.BoolPtr(allNamespaces) + return options +} + +// SetVersionLocatorID : Allow user to set VersionLocatorID +func (options *ReplaceOperatorsOptions) SetVersionLocatorID(versionLocatorID string) *ReplaceOperatorsOptions { + options.VersionLocatorID = core.StringPtr(versionLocatorID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ReplaceOperatorsOptions) SetHeaders(param map[string]string) *ReplaceOperatorsOptions { + options.Headers = param + return options +} + +// RepoInfo : Repository info for offerings. +type RepoInfo struct { + // Token for private repos. + Token *string `json:"token,omitempty"` + + // Public or enterprise GitHub. + Type *string `json:"type,omitempty"` +} + +// UnmarshalRepoInfo unmarshals an instance of RepoInfo from the specified map of raw messages. +func UnmarshalRepoInfo(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(RepoInfo) + err = core.UnmarshalPrimitive(m, "token", &obj.Token) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// Resource : Resource requirements. +type Resource struct { + // Type of requirement. + Type *string `json:"type,omitempty"` + + // mem, disk, cores, and nodes can be parsed as an int. targetVersion will be a semver range value. + Value interface{} `json:"value,omitempty"` +} + +// Constants associated with the Resource.Type property. +// Type of requirement. +const ( + ResourceTypeCoresConst = "cores" + ResourceTypeDiskConst = "disk" + ResourceTypeMemConst = "mem" + ResourceTypeNodesConst = "nodes" + ResourceTypeTargetversionConst = "targetVersion" +) + +// UnmarshalResource unmarshals an instance of Resource from the specified map of raw messages. +func UnmarshalResource(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Resource) + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// Script : Script information. +type Script struct { + // Instruction on step and by whom (role) that are needed to take place to prepare the target for installing this + // version. + Instructions *string `json:"instructions,omitempty"` + + // Optional script that needs to be run post any pre-condition script. + Script *string `json:"script,omitempty"` + + // Optional iam permissions that are required on the target cluster to run this script. + ScriptPermission *string `json:"script_permission,omitempty"` + + // Optional script that if run will remove the installed version. + DeleteScript *string `json:"delete_script,omitempty"` + + // Optional value indicating if this script is scoped to a namespace or the entire cluster. + Scope *string `json:"scope,omitempty"` +} + +// UnmarshalScript unmarshals an instance of Script from the specified map of raw messages. +func UnmarshalScript(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Script) + err = core.UnmarshalPrimitive(m, "instructions", &obj.Instructions) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "script", &obj.Script) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "script_permission", &obj.ScriptPermission) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "delete_script", &obj.DeleteScript) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "scope", &obj.Scope) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SearchObjectsOptions : The SearchObjects options. +type SearchObjectsOptions struct { + // Lucene query string. + Query *string `validate:"required"` + + // The maximum number of results to return. + Limit *int64 + + // The number of results to skip before returning values. + Offset *int64 + + // When true, hide private objects that correspond to public or IBM published objects. + Collapse *bool + + // Display a digests of search results, has default value of true. + Digest *bool + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewSearchObjectsOptions : Instantiate SearchObjectsOptions +func (*CatalogManagementV1) NewSearchObjectsOptions(query string) *SearchObjectsOptions { + return &SearchObjectsOptions{ + Query: core.StringPtr(query), + } +} + +// SetQuery : Allow user to set Query +func (options *SearchObjectsOptions) SetQuery(query string) *SearchObjectsOptions { + options.Query = core.StringPtr(query) + return options +} + +// SetLimit : Allow user to set Limit +func (options *SearchObjectsOptions) SetLimit(limit int64) *SearchObjectsOptions { + options.Limit = core.Int64Ptr(limit) + return options +} + +// SetOffset : Allow user to set Offset +func (options *SearchObjectsOptions) SetOffset(offset int64) *SearchObjectsOptions { + options.Offset = core.Int64Ptr(offset) + return options +} + +// SetCollapse : Allow user to set Collapse +func (options *SearchObjectsOptions) SetCollapse(collapse bool) *SearchObjectsOptions { + options.Collapse = core.BoolPtr(collapse) + return options +} + +// SetDigest : Allow user to set Digest +func (options *SearchObjectsOptions) SetDigest(digest bool) *SearchObjectsOptions { + options.Digest = core.BoolPtr(digest) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *SearchObjectsOptions) SetHeaders(param map[string]string) *SearchObjectsOptions { + options.Headers = param + return options +} + +// SharedPublishObjectOptions : The SharedPublishObject options. +type SharedPublishObjectOptions struct { + // Catalog identifier. + CatalogIdentifier *string `validate:"required,ne="` + + // Object identifier. + ObjectIdentifier *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewSharedPublishObjectOptions : Instantiate SharedPublishObjectOptions +func (*CatalogManagementV1) NewSharedPublishObjectOptions(catalogIdentifier string, objectIdentifier string) *SharedPublishObjectOptions { + return &SharedPublishObjectOptions{ + CatalogIdentifier: core.StringPtr(catalogIdentifier), + ObjectIdentifier: core.StringPtr(objectIdentifier), + } +} + +// SetCatalogIdentifier : Allow user to set CatalogIdentifier +func (options *SharedPublishObjectOptions) SetCatalogIdentifier(catalogIdentifier string) *SharedPublishObjectOptions { + options.CatalogIdentifier = core.StringPtr(catalogIdentifier) + return options +} + +// SetObjectIdentifier : Allow user to set ObjectIdentifier +func (options *SharedPublishObjectOptions) SetObjectIdentifier(objectIdentifier string) *SharedPublishObjectOptions { + options.ObjectIdentifier = core.StringPtr(objectIdentifier) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *SharedPublishObjectOptions) SetHeaders(param map[string]string) *SharedPublishObjectOptions { + options.Headers = param + return options +} + +// State : Offering state. +type State struct { + // one of: new, validated, account-published, ibm-published, public-published. + Current *string `json:"current,omitempty"` + + // Date and time of current request. + CurrentEntered *strfmt.DateTime `json:"current_entered,omitempty"` + + // one of: new, validated, account-published, ibm-published, public-published. + Pending *string `json:"pending,omitempty"` + + // Date and time of pending request. + PendingRequested *strfmt.DateTime `json:"pending_requested,omitempty"` + + // one of: new, validated, account-published, ibm-published, public-published. + Previous *string `json:"previous,omitempty"` +} + +// UnmarshalState unmarshals an instance of State from the specified map of raw messages. +func UnmarshalState(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(State) + err = core.UnmarshalPrimitive(m, "current", &obj.Current) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "current_entered", &obj.CurrentEntered) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "pending", &obj.Pending) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "pending_requested", &obj.PendingRequested) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "previous", &obj.Previous) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SyndicationAuthorization : Feature information. +type SyndicationAuthorization struct { + // Array of syndicated namespaces. + Token *string `json:"token,omitempty"` + + // Date and time last updated. + LastRun *strfmt.DateTime `json:"last_run,omitempty"` +} + +// UnmarshalSyndicationAuthorization unmarshals an instance of SyndicationAuthorization from the specified map of raw messages. +func UnmarshalSyndicationAuthorization(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SyndicationAuthorization) + err = core.UnmarshalPrimitive(m, "token", &obj.Token) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "last_run", &obj.LastRun) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SyndicationCluster : Feature information. +type SyndicationCluster struct { + // Cluster region. + Region *string `json:"region,omitempty"` + + // Cluster ID. + ID *string `json:"id,omitempty"` + + // Cluster name. + Name *string `json:"name,omitempty"` + + // Resource group ID. + ResourceGroupName *string `json:"resource_group_name,omitempty"` + + // Syndication type. + Type *string `json:"type,omitempty"` + + // Syndicated namespaces. + Namespaces []string `json:"namespaces,omitempty"` + + // Syndicated to all namespaces on cluster. + AllNamespaces *bool `json:"all_namespaces,omitempty"` +} + +// UnmarshalSyndicationCluster unmarshals an instance of SyndicationCluster from the specified map of raw messages. +func UnmarshalSyndicationCluster(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SyndicationCluster) + err = core.UnmarshalPrimitive(m, "region", &obj.Region) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "resource_group_name", &obj.ResourceGroupName) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "namespaces", &obj.Namespaces) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "all_namespaces", &obj.AllNamespaces) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SyndicationHistory : Feature information. +type SyndicationHistory struct { + // Array of syndicated namespaces. + Namespaces []string `json:"namespaces,omitempty"` + + // Array of syndicated namespaces. + Clusters []SyndicationCluster `json:"clusters,omitempty"` + + // Date and time last syndicated. + LastRun *strfmt.DateTime `json:"last_run,omitempty"` +} + +// UnmarshalSyndicationHistory unmarshals an instance of SyndicationHistory from the specified map of raw messages. +func UnmarshalSyndicationHistory(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SyndicationHistory) + err = core.UnmarshalPrimitive(m, "namespaces", &obj.Namespaces) + if err != nil { + return + } + err = core.UnmarshalModel(m, "clusters", &obj.Clusters, UnmarshalSyndicationCluster) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "last_run", &obj.LastRun) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SyndicationResource : Feature information. +type SyndicationResource struct { + // Remove related components. + RemoveRelatedComponents *bool `json:"remove_related_components,omitempty"` + + // Syndication clusters. + Clusters []SyndicationCluster `json:"clusters,omitempty"` + + // Feature information. + History *SyndicationHistory `json:"history,omitempty"` + + // Feature information. + Authorization *SyndicationAuthorization `json:"authorization,omitempty"` +} + +// UnmarshalSyndicationResource unmarshals an instance of SyndicationResource from the specified map of raw messages. +func UnmarshalSyndicationResource(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SyndicationResource) + err = core.UnmarshalPrimitive(m, "remove_related_components", &obj.RemoveRelatedComponents) + if err != nil { + return + } + err = core.UnmarshalModel(m, "clusters", &obj.Clusters, UnmarshalSyndicationCluster) + if err != nil { + return + } + err = core.UnmarshalModel(m, "history", &obj.History, UnmarshalSyndicationHistory) + if err != nil { + return + } + err = core.UnmarshalModel(m, "authorization", &obj.Authorization, UnmarshalSyndicationAuthorization) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// UpdateCatalogAccountOptions : The UpdateCatalogAccount options. +type UpdateCatalogAccountOptions struct { + // Account identification. + ID *string + + // Hide the public catalog in this account. + HideIBMCloudCatalog *bool + + // Filters for account and catalog filters. + AccountFilters *Filters + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdateCatalogAccountOptions : Instantiate UpdateCatalogAccountOptions +func (*CatalogManagementV1) NewUpdateCatalogAccountOptions() *UpdateCatalogAccountOptions { + return &UpdateCatalogAccountOptions{} +} + +// SetID : Allow user to set ID +func (options *UpdateCatalogAccountOptions) SetID(id string) *UpdateCatalogAccountOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHideIBMCloudCatalog : Allow user to set HideIBMCloudCatalog +func (options *UpdateCatalogAccountOptions) SetHideIBMCloudCatalog(hideIBMCloudCatalog bool) *UpdateCatalogAccountOptions { + options.HideIBMCloudCatalog = core.BoolPtr(hideIBMCloudCatalog) + return options +} + +// SetAccountFilters : Allow user to set AccountFilters +func (options *UpdateCatalogAccountOptions) SetAccountFilters(accountFilters *Filters) *UpdateCatalogAccountOptions { + options.AccountFilters = accountFilters + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateCatalogAccountOptions) SetHeaders(param map[string]string) *UpdateCatalogAccountOptions { + options.Headers = param + return options +} + +// UpdateOfferingIBMOptions : The UpdateOfferingIBM options. +type UpdateOfferingIBMOptions struct { + // Catalog identifier. + CatalogIdentifier *string `validate:"required,ne="` + + // Offering identification. + OfferingID *string `validate:"required,ne="` + + // Type of approval, ibm or public. + ApprovalType *string `validate:"required,ne="` + + // Approve (true) or disapprove (false). + Approved *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the UpdateOfferingIBMOptions.ApprovalType property. +// Type of approval, ibm or public. +const ( + UpdateOfferingIBMOptionsApprovalTypeAllowRequestConst = "allow_request" + UpdateOfferingIBMOptionsApprovalTypeIBMConst = "ibm" + UpdateOfferingIBMOptionsApprovalTypePublicConst = "public" +) + +// Constants associated with the UpdateOfferingIBMOptions.Approved property. +// Approve (true) or disapprove (false). +const ( + UpdateOfferingIBMOptionsApprovedFalseConst = "false" + UpdateOfferingIBMOptionsApprovedTrueConst = "true" +) + +// NewUpdateOfferingIBMOptions : Instantiate UpdateOfferingIBMOptions +func (*CatalogManagementV1) NewUpdateOfferingIBMOptions(catalogIdentifier string, offeringID string, approvalType string, approved string) *UpdateOfferingIBMOptions { + return &UpdateOfferingIBMOptions{ + CatalogIdentifier: core.StringPtr(catalogIdentifier), + OfferingID: core.StringPtr(offeringID), + ApprovalType: core.StringPtr(approvalType), + Approved: core.StringPtr(approved), + } +} + +// SetCatalogIdentifier : Allow user to set CatalogIdentifier +func (options *UpdateOfferingIBMOptions) SetCatalogIdentifier(catalogIdentifier string) *UpdateOfferingIBMOptions { + options.CatalogIdentifier = core.StringPtr(catalogIdentifier) + return options +} + +// SetOfferingID : Allow user to set OfferingID +func (options *UpdateOfferingIBMOptions) SetOfferingID(offeringID string) *UpdateOfferingIBMOptions { + options.OfferingID = core.StringPtr(offeringID) + return options +} + +// SetApprovalType : Allow user to set ApprovalType +func (options *UpdateOfferingIBMOptions) SetApprovalType(approvalType string) *UpdateOfferingIBMOptions { + options.ApprovalType = core.StringPtr(approvalType) + return options +} + +// SetApproved : Allow user to set Approved +func (options *UpdateOfferingIBMOptions) SetApproved(approved string) *UpdateOfferingIBMOptions { + options.Approved = core.StringPtr(approved) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateOfferingIBMOptions) SetHeaders(param map[string]string) *UpdateOfferingIBMOptions { + options.Headers = param + return options +} + +// ValidateInstallOptions : The ValidateInstall options. +type ValidateInstallOptions struct { + // A dotted value of `catalogID`.`versionID`. + VersionLocID *string `validate:"required,ne="` + + // IAM Refresh token. + XAuthRefreshToken *string `validate:"required"` + + // Cluster ID. + ClusterID *string + + // Cluster region. + Region *string + + // Kube namespace. + Namespace *string + + // Object containing Helm chart override values. To use a secret for items of type password, specify a JSON encoded + // value of $ref:#/components/schemas/SecretInstance, prefixed with `cmsm_v1:`. + OverrideValues map[string]interface{} + + // Entitlement API Key for this offering. + EntitlementApikey *string + + // Schematics workspace configuration. + Schematics *DeployRequestBodySchematics + + // Script. + Script *string + + // Script ID. + ScriptID *string + + // A dotted value of `catalogID`.`versionID`. + VersionLocatorID *string + + // VCenter ID. + VcenterID *string + + // VCenter User. + VcenterUser *string + + // VCenter Password. + VcenterPassword *string + + // VCenter Location. + VcenterLocation *string + + // VCenter Datastore. + VcenterDatastore *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewValidateInstallOptions : Instantiate ValidateInstallOptions +func (*CatalogManagementV1) NewValidateInstallOptions(versionLocID string, xAuthRefreshToken string) *ValidateInstallOptions { + return &ValidateInstallOptions{ + VersionLocID: core.StringPtr(versionLocID), + XAuthRefreshToken: core.StringPtr(xAuthRefreshToken), + } +} + +// SetVersionLocID : Allow user to set VersionLocID +func (options *ValidateInstallOptions) SetVersionLocID(versionLocID string) *ValidateInstallOptions { + options.VersionLocID = core.StringPtr(versionLocID) + return options +} + +// SetXAuthRefreshToken : Allow user to set XAuthRefreshToken +func (options *ValidateInstallOptions) SetXAuthRefreshToken(xAuthRefreshToken string) *ValidateInstallOptions { + options.XAuthRefreshToken = core.StringPtr(xAuthRefreshToken) + return options +} + +// SetClusterID : Allow user to set ClusterID +func (options *ValidateInstallOptions) SetClusterID(clusterID string) *ValidateInstallOptions { + options.ClusterID = core.StringPtr(clusterID) + return options +} + +// SetRegion : Allow user to set Region +func (options *ValidateInstallOptions) SetRegion(region string) *ValidateInstallOptions { + options.Region = core.StringPtr(region) + return options +} + +// SetNamespace : Allow user to set Namespace +func (options *ValidateInstallOptions) SetNamespace(namespace string) *ValidateInstallOptions { + options.Namespace = core.StringPtr(namespace) + return options +} + +// SetOverrideValues : Allow user to set OverrideValues +func (options *ValidateInstallOptions) SetOverrideValues(overrideValues map[string]interface{}) *ValidateInstallOptions { + options.OverrideValues = overrideValues + return options +} + +// SetEntitlementApikey : Allow user to set EntitlementApikey +func (options *ValidateInstallOptions) SetEntitlementApikey(entitlementApikey string) *ValidateInstallOptions { + options.EntitlementApikey = core.StringPtr(entitlementApikey) + return options +} + +// SetSchematics : Allow user to set Schematics +func (options *ValidateInstallOptions) SetSchematics(schematics *DeployRequestBodySchematics) *ValidateInstallOptions { + options.Schematics = schematics + return options +} + +// SetScript : Allow user to set Script +func (options *ValidateInstallOptions) SetScript(script string) *ValidateInstallOptions { + options.Script = core.StringPtr(script) + return options +} + +// SetScriptID : Allow user to set ScriptID +func (options *ValidateInstallOptions) SetScriptID(scriptID string) *ValidateInstallOptions { + options.ScriptID = core.StringPtr(scriptID) + return options +} + +// SetVersionLocatorID : Allow user to set VersionLocatorID +func (options *ValidateInstallOptions) SetVersionLocatorID(versionLocatorID string) *ValidateInstallOptions { + options.VersionLocatorID = core.StringPtr(versionLocatorID) + return options +} + +// SetVcenterID : Allow user to set VcenterID +func (options *ValidateInstallOptions) SetVcenterID(vcenterID string) *ValidateInstallOptions { + options.VcenterID = core.StringPtr(vcenterID) + return options +} + +// SetVcenterUser : Allow user to set VcenterUser +func (options *ValidateInstallOptions) SetVcenterUser(vcenterUser string) *ValidateInstallOptions { + options.VcenterUser = core.StringPtr(vcenterUser) + return options +} + +// SetVcenterPassword : Allow user to set VcenterPassword +func (options *ValidateInstallOptions) SetVcenterPassword(vcenterPassword string) *ValidateInstallOptions { + options.VcenterPassword = core.StringPtr(vcenterPassword) + return options +} + +// SetVcenterLocation : Allow user to set VcenterLocation +func (options *ValidateInstallOptions) SetVcenterLocation(vcenterLocation string) *ValidateInstallOptions { + options.VcenterLocation = core.StringPtr(vcenterLocation) + return options +} + +// SetVcenterDatastore : Allow user to set VcenterDatastore +func (options *ValidateInstallOptions) SetVcenterDatastore(vcenterDatastore string) *ValidateInstallOptions { + options.VcenterDatastore = core.StringPtr(vcenterDatastore) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ValidateInstallOptions) SetHeaders(param map[string]string) *ValidateInstallOptions { + options.Headers = param + return options +} + +// Validation : Validation response. +type Validation struct { + // Date and time of last successful validation. + Validated *strfmt.DateTime `json:"validated,omitempty"` + + // Date and time of last validation was requested. + Requested *strfmt.DateTime `json:"requested,omitempty"` + + // Current validation state - , in_progress, valid, invalid, expired. + State *string `json:"state,omitempty"` + + // Last operation (e.g. submit_deployment, generate_installer, install_offering. + LastOperation *string `json:"last_operation,omitempty"` + + // Validation target information (e.g. cluster_id, region, namespace, etc). Values will vary by Content type. + Target map[string]interface{} `json:"target,omitempty"` +} + +// UnmarshalValidation unmarshals an instance of Validation from the specified map of raw messages. +func UnmarshalValidation(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Validation) + err = core.UnmarshalPrimitive(m, "validated", &obj.Validated) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "requested", &obj.Requested) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "state", &obj.State) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "last_operation", &obj.LastOperation) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "target", &obj.Target) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// Version : Offering version information. +type Version struct { + // Unique ID. + ID *string `json:"id,omitempty"` + + // Cloudant revision. + Rev *string `json:"_rev,omitempty"` + + // Version's CRN. + CRN *string `json:"crn,omitempty"` + + // Version of content type. + Version *string `json:"version,omitempty"` + + // hash of the content. + Sha *string `json:"sha,omitempty"` + + // The date and time this version was created. + Created *strfmt.DateTime `json:"created,omitempty"` + + // The date and time this version was last updated. + Updated *strfmt.DateTime `json:"updated,omitempty"` + + // Offering ID. + OfferingID *string `json:"offering_id,omitempty"` + + // Catalog ID. + CatalogID *string `json:"catalog_id,omitempty"` + + // Kind ID. + KindID *string `json:"kind_id,omitempty"` + + // List of tags associated with this catalog. + Tags []string `json:"tags,omitempty"` + + // Content's repo URL. + RepoURL *string `json:"repo_url,omitempty"` + + // Content's source URL (e.g git repo). + SourceURL *string `json:"source_url,omitempty"` + + // File used to on-board this version. + TgzURL *string `json:"tgz_url,omitempty"` + + // List of user solicited overrides. + Configuration []Configuration `json:"configuration,omitempty"` + + // Open ended metadata information. + Metadata map[string]interface{} `json:"metadata,omitempty"` + + // Validation response. + Validation *Validation `json:"validation,omitempty"` + + // Resource requirments for installation. + RequiredResources []Resource `json:"required_resources,omitempty"` + + // Denotes if single instance can be deployed to a given cluster. + SingleInstance *bool `json:"single_instance,omitempty"` + + // Script information. + Install *Script `json:"install,omitempty"` + + // Optional pre-install instructions. + PreInstall []Script `json:"pre_install,omitempty"` + + // Entitlement license info. + Entitlement *VersionEntitlement `json:"entitlement,omitempty"` + + // List of licenses the product was built with. + Licenses []License `json:"licenses,omitempty"` + + // If set, denotes a url to a YAML file with list of container images used by this version. + ImageManifestURL *string `json:"image_manifest_url,omitempty"` + + // read only field, indicating if this version is deprecated. + Deprecated *bool `json:"deprecated,omitempty"` + + // Version of the package used to create this version. + PackageVersion *string `json:"package_version,omitempty"` + + // Offering state. + State *State `json:"state,omitempty"` + + // A dotted value of `catalogID`.`versionID`. + VersionLocator *string `json:"version_locator,omitempty"` + + // Console URL. + ConsoleURL *string `json:"console_url,omitempty"` + + // Long description for version. + LongDescription *string `json:"long_description,omitempty"` + + // Whitelisted accounts for version. + WhitelistedAccounts []string `json:"whitelisted_accounts,omitempty"` +} + +// UnmarshalVersion unmarshals an instance of Version from the specified map of raw messages. +func UnmarshalVersion(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Version) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "_rev", &obj.Rev) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "version", &obj.Version) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "sha", &obj.Sha) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created", &obj.Created) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "updated", &obj.Updated) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "offering_id", &obj.OfferingID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "catalog_id", &obj.CatalogID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "kind_id", &obj.KindID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "tags", &obj.Tags) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "repo_url", &obj.RepoURL) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "source_url", &obj.SourceURL) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "tgz_url", &obj.TgzURL) + if err != nil { + return + } + err = core.UnmarshalModel(m, "configuration", &obj.Configuration, UnmarshalConfiguration) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "metadata", &obj.Metadata) + if err != nil { + return + } + err = core.UnmarshalModel(m, "validation", &obj.Validation, UnmarshalValidation) + if err != nil { + return + } + err = core.UnmarshalModel(m, "required_resources", &obj.RequiredResources, UnmarshalResource) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "single_instance", &obj.SingleInstance) + if err != nil { + return + } + err = core.UnmarshalModel(m, "install", &obj.Install, UnmarshalScript) + if err != nil { + return + } + err = core.UnmarshalModel(m, "pre_install", &obj.PreInstall, UnmarshalScript) + if err != nil { + return + } + err = core.UnmarshalModel(m, "entitlement", &obj.Entitlement, UnmarshalVersionEntitlement) + if err != nil { + return + } + err = core.UnmarshalModel(m, "licenses", &obj.Licenses, UnmarshalLicense) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "image_manifest_url", &obj.ImageManifestURL) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "deprecated", &obj.Deprecated) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "package_version", &obj.PackageVersion) + if err != nil { + return + } + err = core.UnmarshalModel(m, "state", &obj.State, UnmarshalState) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "version_locator", &obj.VersionLocator) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "console_url", &obj.ConsoleURL) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "long_description", &obj.LongDescription) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "whitelisted_accounts", &obj.WhitelistedAccounts) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VersionEntitlement : Entitlement license info. +type VersionEntitlement struct { + // Provider name. + ProviderName *string `json:"provider_name,omitempty"` + + // Provider ID. + ProviderID *string `json:"provider_id,omitempty"` + + // Product ID. + ProductID *string `json:"product_id,omitempty"` + + // list of license entitlement part numbers, eg. D1YGZLL,D1ZXILL. + PartNumbers []string `json:"part_numbers,omitempty"` + + // Image repository name. + ImageRepoName *string `json:"image_repo_name,omitempty"` +} + +// UnmarshalVersionEntitlement unmarshals an instance of VersionEntitlement from the specified map of raw messages. +func UnmarshalVersionEntitlement(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VersionEntitlement) + err = core.UnmarshalPrimitive(m, "provider_name", &obj.ProviderName) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "provider_id", &obj.ProviderID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "product_id", &obj.ProductID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "part_numbers", &obj.PartNumbers) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "image_repo_name", &obj.ImageRepoName) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VersionUpdateDescriptor : Indicates if the current version can be upgraded to the version identified by the descriptor. +type VersionUpdateDescriptor struct { + // A dotted value of `catalogID`.`versionID`. + VersionLocator *string `json:"version_locator,omitempty"` + + // the version number of this version. + Version *string `json:"version,omitempty"` + + // Offering state. + State *State `json:"state,omitempty"` + + // Resource requirments for installation. + RequiredResources []Resource `json:"required_resources,omitempty"` + + // Version of package. + PackageVersion *string `json:"package_version,omitempty"` + + // true if the current version can be upgraded to this version, false otherwise. + CanUpdate *bool `json:"can_update,omitempty"` + + // If can_update is false, this map will contain messages for each failed check, otherwise it will be omitted. + // Possible keys include nodes, cores, mem, disk, targetVersion, and install-permission-check. + Messages map[string]string `json:"messages,omitempty"` +} + +// UnmarshalVersionUpdateDescriptor unmarshals an instance of VersionUpdateDescriptor from the specified map of raw messages. +func UnmarshalVersionUpdateDescriptor(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VersionUpdateDescriptor) + err = core.UnmarshalPrimitive(m, "version_locator", &obj.VersionLocator) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "version", &obj.Version) + if err != nil { + return + } + err = core.UnmarshalModel(m, "state", &obj.State, UnmarshalState) + if err != nil { + return + } + err = core.UnmarshalModel(m, "required_resources", &obj.RequiredResources, UnmarshalResource) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "package_version", &obj.PackageVersion) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "can_update", &obj.CanUpdate) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messages", &obj.Messages) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} diff --git a/vendor/github.com/IBM/platform-services-go-sdk/enterprisemanagementv1/enterprise_management_v1.go b/vendor/github.com/IBM/platform-services-go-sdk/enterprisemanagementv1/enterprise_management_v1.go new file mode 100644 index 00000000000..8833f025389 --- /dev/null +++ b/vendor/github.com/IBM/platform-services-go-sdk/enterprisemanagementv1/enterprise_management_v1.go @@ -0,0 +1,2159 @@ +/** + * (C) Copyright IBM Corp. 2021. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * IBM OpenAPI SDK Code Generator Version: 3.30.0-bd714324-20210406-200538 + */ + +// Package enterprisemanagementv1 : Operations and models for the EnterpriseManagementV1 service +package enterprisemanagementv1 + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "reflect" + "time" + + "github.com/IBM/go-sdk-core/v5/core" + common "github.com/IBM/platform-services-go-sdk/common" + "github.com/go-openapi/strfmt" +) + +// EnterpriseManagementV1 : The Enterprise Management API enables you to create and manage an enterprise, account +// groups, and accounts within the enterprise. +// +// Version: 1.0 +type EnterpriseManagementV1 struct { + Service *core.BaseService +} + +// DefaultServiceURL is the default URL to make service requests to. +const DefaultServiceURL = "https://enterprise.cloud.ibm.com/v1" + +// DefaultServiceName is the default key used to find external configuration information. +const DefaultServiceName = "enterprise_management" + +// EnterpriseManagementV1Options : Service options +type EnterpriseManagementV1Options struct { + ServiceName string + URL string + Authenticator core.Authenticator +} + +// NewEnterpriseManagementV1UsingExternalConfig : constructs an instance of EnterpriseManagementV1 with passed in options and external configuration. +func NewEnterpriseManagementV1UsingExternalConfig(options *EnterpriseManagementV1Options) (enterpriseManagement *EnterpriseManagementV1, err error) { + if options.ServiceName == "" { + options.ServiceName = DefaultServiceName + } + + if options.Authenticator == nil { + options.Authenticator, err = core.GetAuthenticatorFromEnvironment(options.ServiceName) + if err != nil { + return + } + } + + enterpriseManagement, err = NewEnterpriseManagementV1(options) + if err != nil { + return + } + + err = enterpriseManagement.Service.ConfigureService(options.ServiceName) + if err != nil { + return + } + + if options.URL != "" { + err = enterpriseManagement.Service.SetServiceURL(options.URL) + } + return +} + +// NewEnterpriseManagementV1 : constructs an instance of EnterpriseManagementV1 with passed in options. +func NewEnterpriseManagementV1(options *EnterpriseManagementV1Options) (service *EnterpriseManagementV1, err error) { + serviceOptions := &core.ServiceOptions{ + URL: DefaultServiceURL, + Authenticator: options.Authenticator, + } + + baseService, err := core.NewBaseService(serviceOptions) + if err != nil { + return + } + + if options.URL != "" { + err = baseService.SetServiceURL(options.URL) + if err != nil { + return + } + } + + service = &EnterpriseManagementV1{ + Service: baseService, + } + + return +} + +// GetServiceURLForRegion returns the service URL to be used for the specified region +func GetServiceURLForRegion(region string) (string, error) { + return "", fmt.Errorf("service does not support regional URLs") +} + +// Clone makes a copy of "enterpriseManagement" suitable for processing requests. +func (enterpriseManagement *EnterpriseManagementV1) Clone() *EnterpriseManagementV1 { + if core.IsNil(enterpriseManagement) { + return nil + } + clone := *enterpriseManagement + clone.Service = enterpriseManagement.Service.Clone() + return &clone +} + +// SetServiceURL sets the service URL +func (enterpriseManagement *EnterpriseManagementV1) SetServiceURL(url string) error { + return enterpriseManagement.Service.SetServiceURL(url) +} + +// GetServiceURL returns the service URL +func (enterpriseManagement *EnterpriseManagementV1) GetServiceURL() string { + return enterpriseManagement.Service.GetServiceURL() +} + +// SetDefaultHeaders sets HTTP headers to be sent in every request +func (enterpriseManagement *EnterpriseManagementV1) SetDefaultHeaders(headers http.Header) { + enterpriseManagement.Service.SetDefaultHeaders(headers) +} + +// SetEnableGzipCompression sets the service's EnableGzipCompression field +func (enterpriseManagement *EnterpriseManagementV1) SetEnableGzipCompression(enableGzip bool) { + enterpriseManagement.Service.SetEnableGzipCompression(enableGzip) +} + +// GetEnableGzipCompression returns the service's EnableGzipCompression field +func (enterpriseManagement *EnterpriseManagementV1) GetEnableGzipCompression() bool { + return enterpriseManagement.Service.GetEnableGzipCompression() +} + +// EnableRetries enables automatic retries for requests invoked for this service instance. +// If either parameter is specified as 0, then a default value is used instead. +func (enterpriseManagement *EnterpriseManagementV1) EnableRetries(maxRetries int, maxRetryInterval time.Duration) { + enterpriseManagement.Service.EnableRetries(maxRetries, maxRetryInterval) +} + +// DisableRetries disables automatic retries for requests invoked for this service instance. +func (enterpriseManagement *EnterpriseManagementV1) DisableRetries() { + enterpriseManagement.Service.DisableRetries() +} + +// CreateEnterprise : Create an enterprise +// Create a new enterprise, which you can use to centrally manage multiple accounts. To create an enterprise, you must +// have an active Subscription account.

The API creates an enterprise entity, which is the root of the +// enterprise hierarchy. It also creates a new enterprise account that is used to manage the enterprise. All +// subscriptions, support entitlements, credits, and discounts from the source subscription account are migrated to the +// enterprise account, and the source account becomes a child account in the hierarchy. The user that you assign as the +// enterprise primary contact is also assigned as the owner of the enterprise account. +func (enterpriseManagement *EnterpriseManagementV1) CreateEnterprise(createEnterpriseOptions *CreateEnterpriseOptions) (result *CreateEnterpriseResponse, response *core.DetailedResponse, err error) { + return enterpriseManagement.CreateEnterpriseWithContext(context.Background(), createEnterpriseOptions) +} + +// CreateEnterpriseWithContext is an alternate form of the CreateEnterprise method which supports a Context parameter +func (enterpriseManagement *EnterpriseManagementV1) CreateEnterpriseWithContext(ctx context.Context, createEnterpriseOptions *CreateEnterpriseOptions) (result *CreateEnterpriseResponse, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createEnterpriseOptions, "createEnterpriseOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createEnterpriseOptions, "createEnterpriseOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = enterpriseManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(enterpriseManagement.Service.Options.URL, `/enterprises`, nil) + if err != nil { + return + } + + for headerName, headerValue := range createEnterpriseOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("enterprise_management", "V1", "CreateEnterprise") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if createEnterpriseOptions.SourceAccountID != nil { + body["source_account_id"] = createEnterpriseOptions.SourceAccountID + } + if createEnterpriseOptions.Name != nil { + body["name"] = createEnterpriseOptions.Name + } + if createEnterpriseOptions.PrimaryContactIamID != nil { + body["primary_contact_iam_id"] = createEnterpriseOptions.PrimaryContactIamID + } + if createEnterpriseOptions.Domain != nil { + body["domain"] = createEnterpriseOptions.Domain + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = enterpriseManagement.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalCreateEnterpriseResponse) + if err != nil { + return + } + response.Result = result + + return +} + +// ListEnterprises : List enterprises +// Retrieve all enterprises for a given ID by passing the IDs on query parameters. If no ID is passed, the enterprises +// for which the calling identity is the primary contact are returned. You can use pagination parameters to filter the +// results.

This method ensures that only the enterprises that the user has access to are returned. Access can +// be controlled either through a policy on a specific enterprise, or account-level platform services access roles, such +// as Administrator, Editor, Operator, or Viewer. When you call the method with the `enterprise_account_id` or +// `account_id` query parameter, the account ID in the token is compared with that in the query parameter. If these +// account IDs match, authentication isn't performed and the enterprise information is returned. If the account IDs +// don't match, authentication is performed and only then is the enterprise information returned in the response. +func (enterpriseManagement *EnterpriseManagementV1) ListEnterprises(listEnterprisesOptions *ListEnterprisesOptions) (result *ListEnterprisesResponse, response *core.DetailedResponse, err error) { + return enterpriseManagement.ListEnterprisesWithContext(context.Background(), listEnterprisesOptions) +} + +// ListEnterprisesWithContext is an alternate form of the ListEnterprises method which supports a Context parameter +func (enterpriseManagement *EnterpriseManagementV1) ListEnterprisesWithContext(ctx context.Context, listEnterprisesOptions *ListEnterprisesOptions) (result *ListEnterprisesResponse, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listEnterprisesOptions, "listEnterprisesOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = enterpriseManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(enterpriseManagement.Service.Options.URL, `/enterprises`, nil) + if err != nil { + return + } + + for headerName, headerValue := range listEnterprisesOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("enterprise_management", "V1", "ListEnterprises") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + if listEnterprisesOptions.EnterpriseAccountID != nil { + builder.AddQuery("enterprise_account_id", fmt.Sprint(*listEnterprisesOptions.EnterpriseAccountID)) + } + if listEnterprisesOptions.AccountGroupID != nil { + builder.AddQuery("account_group_id", fmt.Sprint(*listEnterprisesOptions.AccountGroupID)) + } + if listEnterprisesOptions.AccountID != nil { + builder.AddQuery("account_id", fmt.Sprint(*listEnterprisesOptions.AccountID)) + } + if listEnterprisesOptions.NextDocid != nil { + builder.AddQuery("next_docid", fmt.Sprint(*listEnterprisesOptions.NextDocid)) + } + if listEnterprisesOptions.Limit != nil { + builder.AddQuery("limit", fmt.Sprint(*listEnterprisesOptions.Limit)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = enterpriseManagement.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalListEnterprisesResponse) + if err != nil { + return + } + response.Result = result + + return +} + +// GetEnterprise : Get enterprise by ID +// Retrieve an enterprise by the `enterprise_id` parameter. All data related to the enterprise is returned only if the +// caller has access to retrieve the enterprise. +func (enterpriseManagement *EnterpriseManagementV1) GetEnterprise(getEnterpriseOptions *GetEnterpriseOptions) (result *Enterprise, response *core.DetailedResponse, err error) { + return enterpriseManagement.GetEnterpriseWithContext(context.Background(), getEnterpriseOptions) +} + +// GetEnterpriseWithContext is an alternate form of the GetEnterprise method which supports a Context parameter +func (enterpriseManagement *EnterpriseManagementV1) GetEnterpriseWithContext(ctx context.Context, getEnterpriseOptions *GetEnterpriseOptions) (result *Enterprise, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getEnterpriseOptions, "getEnterpriseOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getEnterpriseOptions, "getEnterpriseOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "enterprise_id": *getEnterpriseOptions.EnterpriseID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = enterpriseManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(enterpriseManagement.Service.Options.URL, `/enterprises/{enterprise_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getEnterpriseOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("enterprise_management", "V1", "GetEnterprise") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = enterpriseManagement.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalEnterprise) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateEnterprise : Update an enterprise +// Update the name, domain, or IAM ID of the primary contact for an existing enterprise. The new primary contact must +// already be a user in the enterprise account. +func (enterpriseManagement *EnterpriseManagementV1) UpdateEnterprise(updateEnterpriseOptions *UpdateEnterpriseOptions) (response *core.DetailedResponse, err error) { + return enterpriseManagement.UpdateEnterpriseWithContext(context.Background(), updateEnterpriseOptions) +} + +// UpdateEnterpriseWithContext is an alternate form of the UpdateEnterprise method which supports a Context parameter +func (enterpriseManagement *EnterpriseManagementV1) UpdateEnterpriseWithContext(ctx context.Context, updateEnterpriseOptions *UpdateEnterpriseOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateEnterpriseOptions, "updateEnterpriseOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateEnterpriseOptions, "updateEnterpriseOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "enterprise_id": *updateEnterpriseOptions.EnterpriseID, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = enterpriseManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(enterpriseManagement.Service.Options.URL, `/enterprises/{enterprise_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateEnterpriseOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("enterprise_management", "V1", "UpdateEnterprise") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if updateEnterpriseOptions.Name != nil { + body["name"] = updateEnterpriseOptions.Name + } + if updateEnterpriseOptions.Domain != nil { + body["domain"] = updateEnterpriseOptions.Domain + } + if updateEnterpriseOptions.PrimaryContactIamID != nil { + body["primary_contact_iam_id"] = updateEnterpriseOptions.PrimaryContactIamID + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = enterpriseManagement.Service.Request(request, nil) + + return +} + +// ImportAccountToEnterprise : Import an account into an enterprise +// Import an existing stand-alone account into an enterprise. The existing account can be any type: trial (`TRIAL`), +// Lite (`STANDARD`), Pay-As-You-Go (`PAYG`), or Subscription (`SUBSCRIPTION`). In the case of a `SUBSCRIPTION` account, +// the credits, promotional offers, and discounts are migrated to the billing unit of the enterprise. For a billable +// account (`PAYG` or `SUBSCRIPTION`), the country and currency code of the existing account and the billing unit of the +// enterprise must match. The API returns a `202` response and performs asynchronous operations to import the account +// into the enterprise.

For more information about impacts to the account, see [Adding accounts to an +// enterprise](https://{DomainName}/docs/account?topic=account-enterprise-add). +func (enterpriseManagement *EnterpriseManagementV1) ImportAccountToEnterprise(importAccountToEnterpriseOptions *ImportAccountToEnterpriseOptions) (response *core.DetailedResponse, err error) { + return enterpriseManagement.ImportAccountToEnterpriseWithContext(context.Background(), importAccountToEnterpriseOptions) +} + +// ImportAccountToEnterpriseWithContext is an alternate form of the ImportAccountToEnterprise method which supports a Context parameter +func (enterpriseManagement *EnterpriseManagementV1) ImportAccountToEnterpriseWithContext(ctx context.Context, importAccountToEnterpriseOptions *ImportAccountToEnterpriseOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(importAccountToEnterpriseOptions, "importAccountToEnterpriseOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(importAccountToEnterpriseOptions, "importAccountToEnterpriseOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "enterprise_id": *importAccountToEnterpriseOptions.EnterpriseID, + "account_id": *importAccountToEnterpriseOptions.AccountID, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = enterpriseManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(enterpriseManagement.Service.Options.URL, `/enterprises/{enterprise_id}/import/accounts/{account_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range importAccountToEnterpriseOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("enterprise_management", "V1", "ImportAccountToEnterprise") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if importAccountToEnterpriseOptions.Parent != nil { + body["parent"] = importAccountToEnterpriseOptions.Parent + } + if importAccountToEnterpriseOptions.BillingUnitID != nil { + body["billing_unit_id"] = importAccountToEnterpriseOptions.BillingUnitID + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = enterpriseManagement.Service.Request(request, nil) + + return +} + +// CreateAccount : Create a new account in an enterprise +// Create a new account as a part of an existing enterprise. The API creates an account entity under the parent that is +// specified in the payload of the request. The request also takes in the name and the owner of this new account. The +// owner must have a valid IBMid that's registered with IBM Cloud, but they don't need to be a user in the enterprise +// account. +func (enterpriseManagement *EnterpriseManagementV1) CreateAccount(createAccountOptions *CreateAccountOptions) (result *CreateAccountResponse, response *core.DetailedResponse, err error) { + return enterpriseManagement.CreateAccountWithContext(context.Background(), createAccountOptions) +} + +// CreateAccountWithContext is an alternate form of the CreateAccount method which supports a Context parameter +func (enterpriseManagement *EnterpriseManagementV1) CreateAccountWithContext(ctx context.Context, createAccountOptions *CreateAccountOptions) (result *CreateAccountResponse, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createAccountOptions, "createAccountOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createAccountOptions, "createAccountOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = enterpriseManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(enterpriseManagement.Service.Options.URL, `/accounts`, nil) + if err != nil { + return + } + + for headerName, headerValue := range createAccountOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("enterprise_management", "V1", "CreateAccount") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if createAccountOptions.Parent != nil { + body["parent"] = createAccountOptions.Parent + } + if createAccountOptions.Name != nil { + body["name"] = createAccountOptions.Name + } + if createAccountOptions.OwnerIamID != nil { + body["owner_iam_id"] = createAccountOptions.OwnerIamID + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = enterpriseManagement.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalCreateAccountResponse) + if err != nil { + return + } + response.Result = result + + return +} + +// ListAccounts : List accounts +// Retrieve all accounts based on the values that are passed in the query parameters. If no query parameter is passed, +// all of the accounts in the enterprise for which the calling identity has access are returned.

You can use +// pagination parameters to filter the results. The `limit` field can be used to limit the number of results that are +// displayed for this method.

This method ensures that only the accounts that the user has access to are +// returned. Access can be controlled either through a policy on a specific account, or account-level platform services +// access roles, such as Administrator, Editor, Operator, or Viewer. When you call the method with the `enterprise_id`, +// `account_group_id` or `parent` query parameter, all of the accounts that are immediate children of this entity are +// returned. Authentication is performed on all the accounts before they are returned to the user to ensure that only +// those accounts are returned to which the calling identity has access to. +func (enterpriseManagement *EnterpriseManagementV1) ListAccounts(listAccountsOptions *ListAccountsOptions) (result *ListAccountsResponse, response *core.DetailedResponse, err error) { + return enterpriseManagement.ListAccountsWithContext(context.Background(), listAccountsOptions) +} + +// ListAccountsWithContext is an alternate form of the ListAccounts method which supports a Context parameter +func (enterpriseManagement *EnterpriseManagementV1) ListAccountsWithContext(ctx context.Context, listAccountsOptions *ListAccountsOptions) (result *ListAccountsResponse, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listAccountsOptions, "listAccountsOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = enterpriseManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(enterpriseManagement.Service.Options.URL, `/accounts`, nil) + if err != nil { + return + } + + for headerName, headerValue := range listAccountsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("enterprise_management", "V1", "ListAccounts") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + if listAccountsOptions.EnterpriseID != nil { + builder.AddQuery("enterprise_id", fmt.Sprint(*listAccountsOptions.EnterpriseID)) + } + if listAccountsOptions.AccountGroupID != nil { + builder.AddQuery("account_group_id", fmt.Sprint(*listAccountsOptions.AccountGroupID)) + } + if listAccountsOptions.NextDocid != nil { + builder.AddQuery("next_docid", fmt.Sprint(*listAccountsOptions.NextDocid)) + } + if listAccountsOptions.Parent != nil { + builder.AddQuery("parent", fmt.Sprint(*listAccountsOptions.Parent)) + } + if listAccountsOptions.Limit != nil { + builder.AddQuery("limit", fmt.Sprint(*listAccountsOptions.Limit)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = enterpriseManagement.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalListAccountsResponse) + if err != nil { + return + } + response.Result = result + + return +} + +// GetAccount : Get account by ID +// Retrieve an account by the `account_id` parameter. All data related to the account is returned only if the caller has +// access to retrieve the account. +func (enterpriseManagement *EnterpriseManagementV1) GetAccount(getAccountOptions *GetAccountOptions) (result *Account, response *core.DetailedResponse, err error) { + return enterpriseManagement.GetAccountWithContext(context.Background(), getAccountOptions) +} + +// GetAccountWithContext is an alternate form of the GetAccount method which supports a Context parameter +func (enterpriseManagement *EnterpriseManagementV1) GetAccountWithContext(ctx context.Context, getAccountOptions *GetAccountOptions) (result *Account, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getAccountOptions, "getAccountOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getAccountOptions, "getAccountOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "account_id": *getAccountOptions.AccountID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = enterpriseManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(enterpriseManagement.Service.Options.URL, `/accounts/{account_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getAccountOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("enterprise_management", "V1", "GetAccount") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = enterpriseManagement.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalAccount) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateAccount : Move an account within the enterprise +// Move an account to a different parent within the same enterprise. +func (enterpriseManagement *EnterpriseManagementV1) UpdateAccount(updateAccountOptions *UpdateAccountOptions) (response *core.DetailedResponse, err error) { + return enterpriseManagement.UpdateAccountWithContext(context.Background(), updateAccountOptions) +} + +// UpdateAccountWithContext is an alternate form of the UpdateAccount method which supports a Context parameter +func (enterpriseManagement *EnterpriseManagementV1) UpdateAccountWithContext(ctx context.Context, updateAccountOptions *UpdateAccountOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateAccountOptions, "updateAccountOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateAccountOptions, "updateAccountOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "account_id": *updateAccountOptions.AccountID, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = enterpriseManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(enterpriseManagement.Service.Options.URL, `/accounts/{account_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateAccountOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("enterprise_management", "V1", "UpdateAccount") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if updateAccountOptions.Parent != nil { + body["parent"] = updateAccountOptions.Parent + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = enterpriseManagement.Service.Request(request, nil) + + return +} + +// CreateAccountGroup : Create an account group +// Create a new account group, which can be used to group together multiple accounts. To create an account group, you +// must have an existing enterprise. The API creates an account group entity under the parent that is specified in the +// payload of the request. The request also takes in the name and the primary contact of this new account group. +func (enterpriseManagement *EnterpriseManagementV1) CreateAccountGroup(createAccountGroupOptions *CreateAccountGroupOptions) (result *CreateAccountGroupResponse, response *core.DetailedResponse, err error) { + return enterpriseManagement.CreateAccountGroupWithContext(context.Background(), createAccountGroupOptions) +} + +// CreateAccountGroupWithContext is an alternate form of the CreateAccountGroup method which supports a Context parameter +func (enterpriseManagement *EnterpriseManagementV1) CreateAccountGroupWithContext(ctx context.Context, createAccountGroupOptions *CreateAccountGroupOptions) (result *CreateAccountGroupResponse, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createAccountGroupOptions, "createAccountGroupOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createAccountGroupOptions, "createAccountGroupOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = enterpriseManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(enterpriseManagement.Service.Options.URL, `/account-groups`, nil) + if err != nil { + return + } + + for headerName, headerValue := range createAccountGroupOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("enterprise_management", "V1", "CreateAccountGroup") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if createAccountGroupOptions.Parent != nil { + body["parent"] = createAccountGroupOptions.Parent + } + if createAccountGroupOptions.Name != nil { + body["name"] = createAccountGroupOptions.Name + } + if createAccountGroupOptions.PrimaryContactIamID != nil { + body["primary_contact_iam_id"] = createAccountGroupOptions.PrimaryContactIamID + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = enterpriseManagement.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalCreateAccountGroupResponse) + if err != nil { + return + } + response.Result = result + + return +} + +// ListAccountGroups : List account groups +// Retrieve all account groups based on the values that are passed in the query parameters. If no query parameter is +// passed, all of the account groups in the enterprise for which the calling identity has access are returned. +//

You can use pagination parameters to filter the results. The `limit` field can be used to limit the number +// of results that are displayed for this method.

This method ensures that only the account groups that the +// user has access to are returned. Access can be controlled either through a policy on a specific account group, or +// account-level platform services access roles, such as Administrator, Editor, Operator, or Viewer. When you call the +// method with the `enterprise_id`, `parent_account_group_id` or `parent` query parameter, all of the account groups +// that are immediate children of this entity are returned. Authentication is performed on all account groups before +// they are returned to the user to ensure that only those account groups are returned to which the calling identity has +// access. +func (enterpriseManagement *EnterpriseManagementV1) ListAccountGroups(listAccountGroupsOptions *ListAccountGroupsOptions) (result *ListAccountGroupsResponse, response *core.DetailedResponse, err error) { + return enterpriseManagement.ListAccountGroupsWithContext(context.Background(), listAccountGroupsOptions) +} + +// ListAccountGroupsWithContext is an alternate form of the ListAccountGroups method which supports a Context parameter +func (enterpriseManagement *EnterpriseManagementV1) ListAccountGroupsWithContext(ctx context.Context, listAccountGroupsOptions *ListAccountGroupsOptions) (result *ListAccountGroupsResponse, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listAccountGroupsOptions, "listAccountGroupsOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = enterpriseManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(enterpriseManagement.Service.Options.URL, `/account-groups`, nil) + if err != nil { + return + } + + for headerName, headerValue := range listAccountGroupsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("enterprise_management", "V1", "ListAccountGroups") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + if listAccountGroupsOptions.EnterpriseID != nil { + builder.AddQuery("enterprise_id", fmt.Sprint(*listAccountGroupsOptions.EnterpriseID)) + } + if listAccountGroupsOptions.ParentAccountGroupID != nil { + builder.AddQuery("parent_account_group_id", fmt.Sprint(*listAccountGroupsOptions.ParentAccountGroupID)) + } + if listAccountGroupsOptions.NextDocid != nil { + builder.AddQuery("next_docid", fmt.Sprint(*listAccountGroupsOptions.NextDocid)) + } + if listAccountGroupsOptions.Parent != nil { + builder.AddQuery("parent", fmt.Sprint(*listAccountGroupsOptions.Parent)) + } + if listAccountGroupsOptions.Limit != nil { + builder.AddQuery("limit", fmt.Sprint(*listAccountGroupsOptions.Limit)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = enterpriseManagement.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalListAccountGroupsResponse) + if err != nil { + return + } + response.Result = result + + return +} + +// GetAccountGroup : Get account group by ID +// Retrieve an account by the `account_group_id` parameter. All data related to the account group is returned only if +// the caller has access to retrieve the account group. +func (enterpriseManagement *EnterpriseManagementV1) GetAccountGroup(getAccountGroupOptions *GetAccountGroupOptions) (result *AccountGroup, response *core.DetailedResponse, err error) { + return enterpriseManagement.GetAccountGroupWithContext(context.Background(), getAccountGroupOptions) +} + +// GetAccountGroupWithContext is an alternate form of the GetAccountGroup method which supports a Context parameter +func (enterpriseManagement *EnterpriseManagementV1) GetAccountGroupWithContext(ctx context.Context, getAccountGroupOptions *GetAccountGroupOptions) (result *AccountGroup, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getAccountGroupOptions, "getAccountGroupOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getAccountGroupOptions, "getAccountGroupOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "account_group_id": *getAccountGroupOptions.AccountGroupID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = enterpriseManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(enterpriseManagement.Service.Options.URL, `/account-groups/{account_group_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getAccountGroupOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("enterprise_management", "V1", "GetAccountGroup") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = enterpriseManagement.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalAccountGroup) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateAccountGroup : Update an account group +// Update the name or IAM ID of the primary contact for an existing account group. The new primary contact must already +// be a user in the enterprise account. +func (enterpriseManagement *EnterpriseManagementV1) UpdateAccountGroup(updateAccountGroupOptions *UpdateAccountGroupOptions) (response *core.DetailedResponse, err error) { + return enterpriseManagement.UpdateAccountGroupWithContext(context.Background(), updateAccountGroupOptions) +} + +// UpdateAccountGroupWithContext is an alternate form of the UpdateAccountGroup method which supports a Context parameter +func (enterpriseManagement *EnterpriseManagementV1) UpdateAccountGroupWithContext(ctx context.Context, updateAccountGroupOptions *UpdateAccountGroupOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateAccountGroupOptions, "updateAccountGroupOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateAccountGroupOptions, "updateAccountGroupOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "account_group_id": *updateAccountGroupOptions.AccountGroupID, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = enterpriseManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(enterpriseManagement.Service.Options.URL, `/account-groups/{account_group_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateAccountGroupOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("enterprise_management", "V1", "UpdateAccountGroup") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if updateAccountGroupOptions.Name != nil { + body["name"] = updateAccountGroupOptions.Name + } + if updateAccountGroupOptions.PrimaryContactIamID != nil { + body["primary_contact_iam_id"] = updateAccountGroupOptions.PrimaryContactIamID + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = enterpriseManagement.Service.Request(request, nil) + + return +} + +// Account : An account resource. +type Account struct { + // The URL of the account. + URL *string `json:"url,omitempty"` + + // The account ID. + ID *string `json:"id,omitempty"` + + // The Cloud Resource Name (CRN) of the account. + CRN *string `json:"crn,omitempty"` + + // The CRN of the parent of the account. + Parent *string `json:"parent,omitempty"` + + // The enterprise account ID. + EnterpriseAccountID *string `json:"enterprise_account_id,omitempty"` + + // The enterprise ID that the account is a part of. + EnterpriseID *string `json:"enterprise_id,omitempty"` + + // The path from the enterprise to this particular account. + EnterprisePath *string `json:"enterprise_path,omitempty"` + + // The name of the account. + Name *string `json:"name,omitempty"` + + // The state of the account. + State *string `json:"state,omitempty"` + + // The IAM ID of the owner of the account. + OwnerIamID *string `json:"owner_iam_id,omitempty"` + + // The type of account - whether it is free or paid. + Paid *bool `json:"paid,omitempty"` + + // The email address of the owner of the account. + OwnerEmail *string `json:"owner_email,omitempty"` + + // The flag to indicate whether the account is an enterprise account or not. + IsEnterpriseAccount *bool `json:"is_enterprise_account,omitempty"` + + // The time stamp at which the account was created. + CreatedAt *strfmt.DateTime `json:"created_at,omitempty"` + + // The IAM ID of the user or service that created the account. + CreatedBy *string `json:"created_by,omitempty"` + + // The time stamp at which the account was last updated. + UpdatedAt *strfmt.DateTime `json:"updated_at,omitempty"` + + // The IAM ID of the user or service that updated the account. + UpdatedBy *string `json:"updated_by,omitempty"` +} + +// UnmarshalAccount unmarshals an instance of Account from the specified map of raw messages. +func UnmarshalAccount(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Account) + err = core.UnmarshalPrimitive(m, "url", &obj.URL) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "parent", &obj.Parent) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "enterprise_account_id", &obj.EnterpriseAccountID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "enterprise_id", &obj.EnterpriseID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "enterprise_path", &obj.EnterprisePath) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "state", &obj.State) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "owner_iam_id", &obj.OwnerIamID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "paid", &obj.Paid) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "owner_email", &obj.OwnerEmail) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "is_enterprise_account", &obj.IsEnterpriseAccount) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_by", &obj.CreatedBy) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "updated_at", &obj.UpdatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "updated_by", &obj.UpdatedBy) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// AccountGroup : An account group resource. +type AccountGroup struct { + // The URL of the account group. + URL *string `json:"url,omitempty"` + + // The account group ID. + ID *string `json:"id,omitempty"` + + // The Cloud Resource Name (CRN) of the account group. + CRN *string `json:"crn,omitempty"` + + // The CRN of the parent of the account group. + Parent *string `json:"parent,omitempty"` + + // The enterprise account ID. + EnterpriseAccountID *string `json:"enterprise_account_id,omitempty"` + + // The enterprise ID that the account group is a part of. + EnterpriseID *string `json:"enterprise_id,omitempty"` + + // The path from the enterprise to this particular account group. + EnterprisePath *string `json:"enterprise_path,omitempty"` + + // The name of the account group. + Name *string `json:"name,omitempty"` + + // The state of the account group. + State *string `json:"state,omitempty"` + + // The IAM ID of the primary contact of the account group. + PrimaryContactIamID *string `json:"primary_contact_iam_id,omitempty"` + + // The email address of the primary contact of the account group. + PrimaryContactEmail *string `json:"primary_contact_email,omitempty"` + + // The time stamp at which the account group was created. + CreatedAt *strfmt.DateTime `json:"created_at,omitempty"` + + // The IAM ID of the user or service that created the account group. + CreatedBy *string `json:"created_by,omitempty"` + + // The time stamp at which the account group was last updated. + UpdatedAt *strfmt.DateTime `json:"updated_at,omitempty"` + + // The IAM ID of the user or service that updated the account group. + UpdatedBy *string `json:"updated_by,omitempty"` +} + +// UnmarshalAccountGroup unmarshals an instance of AccountGroup from the specified map of raw messages. +func UnmarshalAccountGroup(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(AccountGroup) + err = core.UnmarshalPrimitive(m, "url", &obj.URL) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "parent", &obj.Parent) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "enterprise_account_id", &obj.EnterpriseAccountID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "enterprise_id", &obj.EnterpriseID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "enterprise_path", &obj.EnterprisePath) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "state", &obj.State) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "primary_contact_iam_id", &obj.PrimaryContactIamID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "primary_contact_email", &obj.PrimaryContactEmail) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_by", &obj.CreatedBy) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "updated_at", &obj.UpdatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "updated_by", &obj.UpdatedBy) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// CreateAccountGroupOptions : The CreateAccountGroup options. +type CreateAccountGroupOptions struct { + // The CRN of the parent under which the account group will be created. The parent can be an existing account group or + // the enterprise itself. + Parent *string `validate:"required"` + + // The name of the account group. This field must have 3 - 60 characters. + Name *string `validate:"required"` + + // The IAM ID of the primary contact for this account group, such as `IBMid-0123ABC`. The IAM ID must already exist. + PrimaryContactIamID *string `validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewCreateAccountGroupOptions : Instantiate CreateAccountGroupOptions +func (*EnterpriseManagementV1) NewCreateAccountGroupOptions(parent string, name string, primaryContactIamID string) *CreateAccountGroupOptions { + return &CreateAccountGroupOptions{ + Parent: core.StringPtr(parent), + Name: core.StringPtr(name), + PrimaryContactIamID: core.StringPtr(primaryContactIamID), + } +} + +// SetParent : Allow user to set Parent +func (options *CreateAccountGroupOptions) SetParent(parent string) *CreateAccountGroupOptions { + options.Parent = core.StringPtr(parent) + return options +} + +// SetName : Allow user to set Name +func (options *CreateAccountGroupOptions) SetName(name string) *CreateAccountGroupOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetPrimaryContactIamID : Allow user to set PrimaryContactIamID +func (options *CreateAccountGroupOptions) SetPrimaryContactIamID(primaryContactIamID string) *CreateAccountGroupOptions { + options.PrimaryContactIamID = core.StringPtr(primaryContactIamID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateAccountGroupOptions) SetHeaders(param map[string]string) *CreateAccountGroupOptions { + options.Headers = param + return options +} + +// CreateAccountGroupResponse : A newly-created account group. +type CreateAccountGroupResponse struct { + // The ID of the account group entity that was created. + AccountGroupID *string `json:"account_group_id,omitempty"` +} + +// UnmarshalCreateAccountGroupResponse unmarshals an instance of CreateAccountGroupResponse from the specified map of raw messages. +func UnmarshalCreateAccountGroupResponse(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(CreateAccountGroupResponse) + err = core.UnmarshalPrimitive(m, "account_group_id", &obj.AccountGroupID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// CreateAccountOptions : The CreateAccount options. +type CreateAccountOptions struct { + // The CRN of the parent under which the account will be created. The parent can be an existing account group or the + // enterprise itself. + Parent *string `validate:"required"` + + // The name of the account. This field must have 3 - 60 characters. + Name *string `validate:"required"` + + // The IAM ID of the account owner, such as `IBMid-0123ABC`. The IAM ID must already exist. + OwnerIamID *string `validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewCreateAccountOptions : Instantiate CreateAccountOptions +func (*EnterpriseManagementV1) NewCreateAccountOptions(parent string, name string, ownerIamID string) *CreateAccountOptions { + return &CreateAccountOptions{ + Parent: core.StringPtr(parent), + Name: core.StringPtr(name), + OwnerIamID: core.StringPtr(ownerIamID), + } +} + +// SetParent : Allow user to set Parent +func (options *CreateAccountOptions) SetParent(parent string) *CreateAccountOptions { + options.Parent = core.StringPtr(parent) + return options +} + +// SetName : Allow user to set Name +func (options *CreateAccountOptions) SetName(name string) *CreateAccountOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetOwnerIamID : Allow user to set OwnerIamID +func (options *CreateAccountOptions) SetOwnerIamID(ownerIamID string) *CreateAccountOptions { + options.OwnerIamID = core.StringPtr(ownerIamID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateAccountOptions) SetHeaders(param map[string]string) *CreateAccountOptions { + options.Headers = param + return options +} + +// CreateAccountResponse : A newly-created account. +type CreateAccountResponse struct { + // The ID of the account entity that was created. + AccountID *string `json:"account_id,omitempty"` +} + +// UnmarshalCreateAccountResponse unmarshals an instance of CreateAccountResponse from the specified map of raw messages. +func UnmarshalCreateAccountResponse(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(CreateAccountResponse) + err = core.UnmarshalPrimitive(m, "account_id", &obj.AccountID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// CreateEnterpriseOptions : The CreateEnterprise options. +type CreateEnterpriseOptions struct { + // The ID of the account that is used to create the enterprise. + SourceAccountID *string `validate:"required"` + + // The name of the enterprise. This field must have 3 - 60 characters. + Name *string `validate:"required"` + + // The IAM ID of the enterprise primary contact, such as `IBMid-0123ABC`. The IAM ID must already exist. + PrimaryContactIamID *string `validate:"required"` + + // A domain or subdomain for the enterprise, such as `example.com` or `my.example.com`. + Domain *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewCreateEnterpriseOptions : Instantiate CreateEnterpriseOptions +func (*EnterpriseManagementV1) NewCreateEnterpriseOptions(sourceAccountID string, name string, primaryContactIamID string) *CreateEnterpriseOptions { + return &CreateEnterpriseOptions{ + SourceAccountID: core.StringPtr(sourceAccountID), + Name: core.StringPtr(name), + PrimaryContactIamID: core.StringPtr(primaryContactIamID), + } +} + +// SetSourceAccountID : Allow user to set SourceAccountID +func (options *CreateEnterpriseOptions) SetSourceAccountID(sourceAccountID string) *CreateEnterpriseOptions { + options.SourceAccountID = core.StringPtr(sourceAccountID) + return options +} + +// SetName : Allow user to set Name +func (options *CreateEnterpriseOptions) SetName(name string) *CreateEnterpriseOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetPrimaryContactIamID : Allow user to set PrimaryContactIamID +func (options *CreateEnterpriseOptions) SetPrimaryContactIamID(primaryContactIamID string) *CreateEnterpriseOptions { + options.PrimaryContactIamID = core.StringPtr(primaryContactIamID) + return options +} + +// SetDomain : Allow user to set Domain +func (options *CreateEnterpriseOptions) SetDomain(domain string) *CreateEnterpriseOptions { + options.Domain = core.StringPtr(domain) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateEnterpriseOptions) SetHeaders(param map[string]string) *CreateEnterpriseOptions { + options.Headers = param + return options +} + +// CreateEnterpriseResponse : The response from calling create enterprise. +type CreateEnterpriseResponse struct { + // The ID of the enterprise entity that was created. This entity is the root of the hierarchy. + EnterpriseID *string `json:"enterprise_id,omitempty"` + + // The ID of the enterprise account that was created. The enterprise account is used to manage billing and access to + // the enterprise management. + EnterpriseAccountID *string `json:"enterprise_account_id,omitempty"` +} + +// UnmarshalCreateEnterpriseResponse unmarshals an instance of CreateEnterpriseResponse from the specified map of raw messages. +func UnmarshalCreateEnterpriseResponse(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(CreateEnterpriseResponse) + err = core.UnmarshalPrimitive(m, "enterprise_id", &obj.EnterpriseID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "enterprise_account_id", &obj.EnterpriseAccountID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// Enterprise : An enterprise resource. +type Enterprise struct { + // The URL of the enterprise. + URL *string `json:"url,omitempty"` + + // The enterprise ID. + ID *string `json:"id,omitempty"` + + // The enterprise account ID. + EnterpriseAccountID *string `json:"enterprise_account_id,omitempty"` + + // The Cloud Resource Name (CRN) of the enterprise. + CRN *string `json:"crn,omitempty"` + + // The name of the enterprise. + Name *string `json:"name,omitempty"` + + // The domain of the enterprise. + Domain *string `json:"domain,omitempty"` + + // The state of the enterprise. + State *string `json:"state,omitempty"` + + // The IAM ID of the primary contact of the enterprise, such as `IBMid-0123ABC`. + PrimaryContactIamID *string `json:"primary_contact_iam_id,omitempty"` + + // The email of the primary contact of the enterprise. + PrimaryContactEmail *string `json:"primary_contact_email,omitempty"` + + // The time stamp at which the enterprise was created. + CreatedAt *strfmt.DateTime `json:"created_at,omitempty"` + + // The IAM ID of the user or service that created the enterprise. + CreatedBy *string `json:"created_by,omitempty"` + + // The time stamp at which the enterprise was last updated. + UpdatedAt *strfmt.DateTime `json:"updated_at,omitempty"` + + // The IAM ID of the user or service that updated the enterprise. + UpdatedBy *string `json:"updated_by,omitempty"` +} + +// UnmarshalEnterprise unmarshals an instance of Enterprise from the specified map of raw messages. +func UnmarshalEnterprise(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Enterprise) + err = core.UnmarshalPrimitive(m, "url", &obj.URL) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "enterprise_account_id", &obj.EnterpriseAccountID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "domain", &obj.Domain) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "state", &obj.State) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "primary_contact_iam_id", &obj.PrimaryContactIamID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "primary_contact_email", &obj.PrimaryContactEmail) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_by", &obj.CreatedBy) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "updated_at", &obj.UpdatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "updated_by", &obj.UpdatedBy) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// GetAccountGroupOptions : The GetAccountGroup options. +type GetAccountGroupOptions struct { + // The ID of the account group to retrieve. + AccountGroupID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetAccountGroupOptions : Instantiate GetAccountGroupOptions +func (*EnterpriseManagementV1) NewGetAccountGroupOptions(accountGroupID string) *GetAccountGroupOptions { + return &GetAccountGroupOptions{ + AccountGroupID: core.StringPtr(accountGroupID), + } +} + +// SetAccountGroupID : Allow user to set AccountGroupID +func (options *GetAccountGroupOptions) SetAccountGroupID(accountGroupID string) *GetAccountGroupOptions { + options.AccountGroupID = core.StringPtr(accountGroupID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetAccountGroupOptions) SetHeaders(param map[string]string) *GetAccountGroupOptions { + options.Headers = param + return options +} + +// GetAccountOptions : The GetAccount options. +type GetAccountOptions struct { + // The ID of the account to retrieve. + AccountID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetAccountOptions : Instantiate GetAccountOptions +func (*EnterpriseManagementV1) NewGetAccountOptions(accountID string) *GetAccountOptions { + return &GetAccountOptions{ + AccountID: core.StringPtr(accountID), + } +} + +// SetAccountID : Allow user to set AccountID +func (options *GetAccountOptions) SetAccountID(accountID string) *GetAccountOptions { + options.AccountID = core.StringPtr(accountID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetAccountOptions) SetHeaders(param map[string]string) *GetAccountOptions { + options.Headers = param + return options +} + +// GetEnterpriseOptions : The GetEnterprise options. +type GetEnterpriseOptions struct { + // The ID of the enterprise to retrieve. + EnterpriseID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetEnterpriseOptions : Instantiate GetEnterpriseOptions +func (*EnterpriseManagementV1) NewGetEnterpriseOptions(enterpriseID string) *GetEnterpriseOptions { + return &GetEnterpriseOptions{ + EnterpriseID: core.StringPtr(enterpriseID), + } +} + +// SetEnterpriseID : Allow user to set EnterpriseID +func (options *GetEnterpriseOptions) SetEnterpriseID(enterpriseID string) *GetEnterpriseOptions { + options.EnterpriseID = core.StringPtr(enterpriseID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetEnterpriseOptions) SetHeaders(param map[string]string) *GetEnterpriseOptions { + options.Headers = param + return options +} + +// ImportAccountToEnterpriseOptions : The ImportAccountToEnterprise options. +type ImportAccountToEnterpriseOptions struct { + // The ID of the enterprise to import the stand-alone account into. + EnterpriseID *string `validate:"required,ne="` + + // The ID of the existing stand-alone account to be imported. + AccountID *string `validate:"required,ne="` + + // The CRN of the expected parent of the imported account. The parent is the enterprise or account group that the + // account is added to. + Parent *string + + // The ID of the [billing unit](/apidocs/enterprise-apis/billing-unit) to use for billing this account in the + // enterprise. + BillingUnitID *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewImportAccountToEnterpriseOptions : Instantiate ImportAccountToEnterpriseOptions +func (*EnterpriseManagementV1) NewImportAccountToEnterpriseOptions(enterpriseID string, accountID string) *ImportAccountToEnterpriseOptions { + return &ImportAccountToEnterpriseOptions{ + EnterpriseID: core.StringPtr(enterpriseID), + AccountID: core.StringPtr(accountID), + } +} + +// SetEnterpriseID : Allow user to set EnterpriseID +func (options *ImportAccountToEnterpriseOptions) SetEnterpriseID(enterpriseID string) *ImportAccountToEnterpriseOptions { + options.EnterpriseID = core.StringPtr(enterpriseID) + return options +} + +// SetAccountID : Allow user to set AccountID +func (options *ImportAccountToEnterpriseOptions) SetAccountID(accountID string) *ImportAccountToEnterpriseOptions { + options.AccountID = core.StringPtr(accountID) + return options +} + +// SetParent : Allow user to set Parent +func (options *ImportAccountToEnterpriseOptions) SetParent(parent string) *ImportAccountToEnterpriseOptions { + options.Parent = core.StringPtr(parent) + return options +} + +// SetBillingUnitID : Allow user to set BillingUnitID +func (options *ImportAccountToEnterpriseOptions) SetBillingUnitID(billingUnitID string) *ImportAccountToEnterpriseOptions { + options.BillingUnitID = core.StringPtr(billingUnitID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ImportAccountToEnterpriseOptions) SetHeaders(param map[string]string) *ImportAccountToEnterpriseOptions { + options.Headers = param + return options +} + +// ListAccountGroupsOptions : The ListAccountGroups options. +type ListAccountGroupsOptions struct { + // Get account groups that are either immediate children or are a part of the hierarchy for a given enterprise ID. + EnterpriseID *string + + // Get account groups that are either immediate children or are a part of the hierarchy for a given account group ID. + ParentAccountGroupID *string + + // The first item to be returned in the page of results. This value can be obtained from the next_url property from the + // previous call of the operation. If not specified, then the first page of results is returned. + NextDocid *string + + // Get account groups that are either immediate children or are a part of the hierarchy for a given parent CRN. + Parent *string + + // Return results up to this limit. Valid values are between `0` and `100`. + Limit *int64 + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListAccountGroupsOptions : Instantiate ListAccountGroupsOptions +func (*EnterpriseManagementV1) NewListAccountGroupsOptions() *ListAccountGroupsOptions { + return &ListAccountGroupsOptions{} +} + +// SetEnterpriseID : Allow user to set EnterpriseID +func (options *ListAccountGroupsOptions) SetEnterpriseID(enterpriseID string) *ListAccountGroupsOptions { + options.EnterpriseID = core.StringPtr(enterpriseID) + return options +} + +// SetParentAccountGroupID : Allow user to set ParentAccountGroupID +func (options *ListAccountGroupsOptions) SetParentAccountGroupID(parentAccountGroupID string) *ListAccountGroupsOptions { + options.ParentAccountGroupID = core.StringPtr(parentAccountGroupID) + return options +} + +// SetNextDocid : Allow user to set NextDocid +func (options *ListAccountGroupsOptions) SetNextDocid(nextDocid string) *ListAccountGroupsOptions { + options.NextDocid = core.StringPtr(nextDocid) + return options +} + +// SetParent : Allow user to set Parent +func (options *ListAccountGroupsOptions) SetParent(parent string) *ListAccountGroupsOptions { + options.Parent = core.StringPtr(parent) + return options +} + +// SetLimit : Allow user to set Limit +func (options *ListAccountGroupsOptions) SetLimit(limit int64) *ListAccountGroupsOptions { + options.Limit = core.Int64Ptr(limit) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListAccountGroupsOptions) SetHeaders(param map[string]string) *ListAccountGroupsOptions { + options.Headers = param + return options +} + +// ListAccountGroupsResponse : The list_account_groups operation response. +type ListAccountGroupsResponse struct { + // The number of enterprises returned from calling list account groups. + RowsCount *int64 `json:"rows_count,omitempty"` + + // A string that represents the link to the next page of results. + NextURL *string `json:"next_url,omitempty"` + + // A list of account groups. + Resources []AccountGroup `json:"resources,omitempty"` +} + +// UnmarshalListAccountGroupsResponse unmarshals an instance of ListAccountGroupsResponse from the specified map of raw messages. +func UnmarshalListAccountGroupsResponse(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ListAccountGroupsResponse) + err = core.UnmarshalPrimitive(m, "rows_count", &obj.RowsCount) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "next_url", &obj.NextURL) + if err != nil { + return + } + err = core.UnmarshalModel(m, "resources", &obj.Resources, UnmarshalAccountGroup) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ListAccountsOptions : The ListAccounts options. +type ListAccountsOptions struct { + // Get accounts that are either immediate children or are a part of the hierarchy for a given enterprise ID. + EnterpriseID *string + + // Get accounts that are either immediate children or are a part of the hierarchy for a given account group ID. + AccountGroupID *string + + // The first item to be returned in the page of results. This value can be obtained from the next_url property from the + // previous call of the operation. If not specified, then the first page of results is returned. + NextDocid *string + + // Get accounts that are either immediate children or are a part of the hierarchy for a given parent CRN. + Parent *string + + // Return results up to this limit. Valid values are between `0` and `100`. + Limit *int64 + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListAccountsOptions : Instantiate ListAccountsOptions +func (*EnterpriseManagementV1) NewListAccountsOptions() *ListAccountsOptions { + return &ListAccountsOptions{} +} + +// SetEnterpriseID : Allow user to set EnterpriseID +func (options *ListAccountsOptions) SetEnterpriseID(enterpriseID string) *ListAccountsOptions { + options.EnterpriseID = core.StringPtr(enterpriseID) + return options +} + +// SetAccountGroupID : Allow user to set AccountGroupID +func (options *ListAccountsOptions) SetAccountGroupID(accountGroupID string) *ListAccountsOptions { + options.AccountGroupID = core.StringPtr(accountGroupID) + return options +} + +// SetNextDocid : Allow user to set NextDocid +func (options *ListAccountsOptions) SetNextDocid(nextDocid string) *ListAccountsOptions { + options.NextDocid = core.StringPtr(nextDocid) + return options +} + +// SetParent : Allow user to set Parent +func (options *ListAccountsOptions) SetParent(parent string) *ListAccountsOptions { + options.Parent = core.StringPtr(parent) + return options +} + +// SetLimit : Allow user to set Limit +func (options *ListAccountsOptions) SetLimit(limit int64) *ListAccountsOptions { + options.Limit = core.Int64Ptr(limit) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListAccountsOptions) SetHeaders(param map[string]string) *ListAccountsOptions { + options.Headers = param + return options +} + +// ListAccountsResponse : The list_accounts operation response. +type ListAccountsResponse struct { + // The number of enterprises returned from calling list accounts. + RowsCount *int64 `json:"rows_count,omitempty"` + + // A string that represents the link to the next page of results. + NextURL *string `json:"next_url,omitempty"` + + // A list of accounts. + Resources []Account `json:"resources,omitempty"` +} + +// UnmarshalListAccountsResponse unmarshals an instance of ListAccountsResponse from the specified map of raw messages. +func UnmarshalListAccountsResponse(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ListAccountsResponse) + err = core.UnmarshalPrimitive(m, "rows_count", &obj.RowsCount) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "next_url", &obj.NextURL) + if err != nil { + return + } + err = core.UnmarshalModel(m, "resources", &obj.Resources, UnmarshalAccount) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ListEnterprisesOptions : The ListEnterprises options. +type ListEnterprisesOptions struct { + // Get enterprises for a given enterprise account ID. + EnterpriseAccountID *string + + // Get enterprises for a given account group ID. + AccountGroupID *string + + // Get enterprises for a given account ID. + AccountID *string + + // The first item to be returned in the page of results. This value can be obtained from the next_url property from the + // previous call of the operation. If not specified, then the first page of results is returned. + NextDocid *string + + // Return results up to this limit. Valid values are between `0` and `100`. + Limit *int64 + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListEnterprisesOptions : Instantiate ListEnterprisesOptions +func (*EnterpriseManagementV1) NewListEnterprisesOptions() *ListEnterprisesOptions { + return &ListEnterprisesOptions{} +} + +// SetEnterpriseAccountID : Allow user to set EnterpriseAccountID +func (options *ListEnterprisesOptions) SetEnterpriseAccountID(enterpriseAccountID string) *ListEnterprisesOptions { + options.EnterpriseAccountID = core.StringPtr(enterpriseAccountID) + return options +} + +// SetAccountGroupID : Allow user to set AccountGroupID +func (options *ListEnterprisesOptions) SetAccountGroupID(accountGroupID string) *ListEnterprisesOptions { + options.AccountGroupID = core.StringPtr(accountGroupID) + return options +} + +// SetAccountID : Allow user to set AccountID +func (options *ListEnterprisesOptions) SetAccountID(accountID string) *ListEnterprisesOptions { + options.AccountID = core.StringPtr(accountID) + return options +} + +// SetNextDocid : Allow user to set NextDocid +func (options *ListEnterprisesOptions) SetNextDocid(nextDocid string) *ListEnterprisesOptions { + options.NextDocid = core.StringPtr(nextDocid) + return options +} + +// SetLimit : Allow user to set Limit +func (options *ListEnterprisesOptions) SetLimit(limit int64) *ListEnterprisesOptions { + options.Limit = core.Int64Ptr(limit) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListEnterprisesOptions) SetHeaders(param map[string]string) *ListEnterprisesOptions { + options.Headers = param + return options +} + +// ListEnterprisesResponse : The response from calling list enterprises. +type ListEnterprisesResponse struct { + // The number of enterprises returned from calling list enterprise. + RowsCount *int64 `json:"rows_count,omitempty"` + + // A string that represents the link to the next page of results. + NextURL *string `json:"next_url,omitempty"` + + // A list of enterprise objects. + Resources []Enterprise `json:"resources,omitempty"` +} + +// UnmarshalListEnterprisesResponse unmarshals an instance of ListEnterprisesResponse from the specified map of raw messages. +func UnmarshalListEnterprisesResponse(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ListEnterprisesResponse) + err = core.UnmarshalPrimitive(m, "rows_count", &obj.RowsCount) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "next_url", &obj.NextURL) + if err != nil { + return + } + err = core.UnmarshalModel(m, "resources", &obj.Resources, UnmarshalEnterprise) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// UpdateAccountGroupOptions : The UpdateAccountGroup options. +type UpdateAccountGroupOptions struct { + // The ID of the account group to retrieve. + AccountGroupID *string `validate:"required,ne="` + + // The new name of the account group. This field must have 3 - 60 characters. + Name *string + + // The IAM ID of the user to be the new primary contact for the account group. + PrimaryContactIamID *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdateAccountGroupOptions : Instantiate UpdateAccountGroupOptions +func (*EnterpriseManagementV1) NewUpdateAccountGroupOptions(accountGroupID string) *UpdateAccountGroupOptions { + return &UpdateAccountGroupOptions{ + AccountGroupID: core.StringPtr(accountGroupID), + } +} + +// SetAccountGroupID : Allow user to set AccountGroupID +func (options *UpdateAccountGroupOptions) SetAccountGroupID(accountGroupID string) *UpdateAccountGroupOptions { + options.AccountGroupID = core.StringPtr(accountGroupID) + return options +} + +// SetName : Allow user to set Name +func (options *UpdateAccountGroupOptions) SetName(name string) *UpdateAccountGroupOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetPrimaryContactIamID : Allow user to set PrimaryContactIamID +func (options *UpdateAccountGroupOptions) SetPrimaryContactIamID(primaryContactIamID string) *UpdateAccountGroupOptions { + options.PrimaryContactIamID = core.StringPtr(primaryContactIamID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateAccountGroupOptions) SetHeaders(param map[string]string) *UpdateAccountGroupOptions { + options.Headers = param + return options +} + +// UpdateAccountOptions : The UpdateAccount options. +type UpdateAccountOptions struct { + // The ID of the account to retrieve. + AccountID *string `validate:"required,ne="` + + // The CRN of the new parent within the enterprise. + Parent *string `validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdateAccountOptions : Instantiate UpdateAccountOptions +func (*EnterpriseManagementV1) NewUpdateAccountOptions(accountID string, parent string) *UpdateAccountOptions { + return &UpdateAccountOptions{ + AccountID: core.StringPtr(accountID), + Parent: core.StringPtr(parent), + } +} + +// SetAccountID : Allow user to set AccountID +func (options *UpdateAccountOptions) SetAccountID(accountID string) *UpdateAccountOptions { + options.AccountID = core.StringPtr(accountID) + return options +} + +// SetParent : Allow user to set Parent +func (options *UpdateAccountOptions) SetParent(parent string) *UpdateAccountOptions { + options.Parent = core.StringPtr(parent) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateAccountOptions) SetHeaders(param map[string]string) *UpdateAccountOptions { + options.Headers = param + return options +} + +// UpdateEnterpriseOptions : The UpdateEnterprise options. +type UpdateEnterpriseOptions struct { + // The ID of the enterprise to retrieve. + EnterpriseID *string `validate:"required,ne="` + + // The new name of the enterprise. This field must have 3 - 60 characters. + Name *string + + // The new domain of the enterprise. This field has a limit of 60 characters. + Domain *string + + // The IAM ID of the user to be the new primary contact for the enterprise. + PrimaryContactIamID *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdateEnterpriseOptions : Instantiate UpdateEnterpriseOptions +func (*EnterpriseManagementV1) NewUpdateEnterpriseOptions(enterpriseID string) *UpdateEnterpriseOptions { + return &UpdateEnterpriseOptions{ + EnterpriseID: core.StringPtr(enterpriseID), + } +} + +// SetEnterpriseID : Allow user to set EnterpriseID +func (options *UpdateEnterpriseOptions) SetEnterpriseID(enterpriseID string) *UpdateEnterpriseOptions { + options.EnterpriseID = core.StringPtr(enterpriseID) + return options +} + +// SetName : Allow user to set Name +func (options *UpdateEnterpriseOptions) SetName(name string) *UpdateEnterpriseOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetDomain : Allow user to set Domain +func (options *UpdateEnterpriseOptions) SetDomain(domain string) *UpdateEnterpriseOptions { + options.Domain = core.StringPtr(domain) + return options +} + +// SetPrimaryContactIamID : Allow user to set PrimaryContactIamID +func (options *UpdateEnterpriseOptions) SetPrimaryContactIamID(primaryContactIamID string) *UpdateEnterpriseOptions { + options.PrimaryContactIamID = core.StringPtr(primaryContactIamID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateEnterpriseOptions) SetHeaders(param map[string]string) *UpdateEnterpriseOptions { + options.Headers = param + return options +} diff --git a/vendor/github.com/IBM/platform-services-go-sdk/globaltaggingv1/global_tagging_v1.go b/vendor/github.com/IBM/platform-services-go-sdk/globaltaggingv1/global_tagging_v1.go new file mode 100644 index 00000000000..d04ba9f3c7b --- /dev/null +++ b/vendor/github.com/IBM/platform-services-go-sdk/globaltaggingv1/global_tagging_v1.go @@ -0,0 +1,1481 @@ +/** + * (C) Copyright IBM Corp. 2021. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * IBM OpenAPI SDK Code Generator Version: 99-SNAPSHOT-a8493a65-20210115-083246 + */ + +// Package globaltaggingv1 : Operations and models for the GlobalTaggingV1 service +package globaltaggingv1 + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "reflect" + "strings" + "time" + + "github.com/IBM/go-sdk-core/v5/core" + common "github.com/IBM/platform-services-go-sdk/common" +) + +// GlobalTaggingV1 : Manage your tags with the Tagging API in IBM Cloud. You can attach, detach, delete a tag or list +// all tags in your billing account with the Tagging API. The tag name must be unique within a billing account. You can +// create tags in two formats: `key:value` or `label`. The tagging API supports three types of tag: `user` `service`, +// and `access` tags. `service` tags cannot be attached to IMS resources. `service` tags must be in the form +// `service_prefix:tag_label` where `service_prefix` identifies the Service owning the tag. `access` tags cannot be +// attached to IMS and Cloud Foundry resources. They must be in the form `key:value`. +// +// Version: 1.2.0 +type GlobalTaggingV1 struct { + Service *core.BaseService +} + +// DefaultServiceURL is the default URL to make service requests to. +const DefaultServiceURL = "https://tags.global-search-tagging.cloud.ibm.com" + +// DefaultServiceName is the default key used to find external configuration information. +const DefaultServiceName = "global_tagging" + +// GlobalTaggingV1Options : Service options +type GlobalTaggingV1Options struct { + ServiceName string + URL string + Authenticator core.Authenticator +} + +// NewGlobalTaggingV1UsingExternalConfig : constructs an instance of GlobalTaggingV1 with passed in options and external configuration. +func NewGlobalTaggingV1UsingExternalConfig(options *GlobalTaggingV1Options) (globalTagging *GlobalTaggingV1, err error) { + if options.ServiceName == "" { + options.ServiceName = DefaultServiceName + } + + if options.Authenticator == nil { + options.Authenticator, err = core.GetAuthenticatorFromEnvironment(options.ServiceName) + if err != nil { + return + } + } + + globalTagging, err = NewGlobalTaggingV1(options) + if err != nil { + return + } + + err = globalTagging.Service.ConfigureService(options.ServiceName) + if err != nil { + return + } + + if options.URL != "" { + err = globalTagging.Service.SetServiceURL(options.URL) + } + return +} + +// NewGlobalTaggingV1 : constructs an instance of GlobalTaggingV1 with passed in options. +func NewGlobalTaggingV1(options *GlobalTaggingV1Options) (service *GlobalTaggingV1, err error) { + serviceOptions := &core.ServiceOptions{ + URL: DefaultServiceURL, + Authenticator: options.Authenticator, + } + + baseService, err := core.NewBaseService(serviceOptions) + if err != nil { + return + } + + if options.URL != "" { + err = baseService.SetServiceURL(options.URL) + if err != nil { + return + } + } + + service = &GlobalTaggingV1{ + Service: baseService, + } + + return +} + +// GetServiceURLForRegion returns the service URL to be used for the specified region +func GetServiceURLForRegion(region string) (string, error) { + return "", fmt.Errorf("service does not support regional URLs") +} + +// Clone makes a copy of "globalTagging" suitable for processing requests. +func (globalTagging *GlobalTaggingV1) Clone() *GlobalTaggingV1 { + if core.IsNil(globalTagging) { + return nil + } + clone := *globalTagging + clone.Service = globalTagging.Service.Clone() + return &clone +} + +// SetServiceURL sets the service URL +func (globalTagging *GlobalTaggingV1) SetServiceURL(url string) error { + return globalTagging.Service.SetServiceURL(url) +} + +// GetServiceURL returns the service URL +func (globalTagging *GlobalTaggingV1) GetServiceURL() string { + return globalTagging.Service.GetServiceURL() +} + +// SetDefaultHeaders sets HTTP headers to be sent in every request +func (globalTagging *GlobalTaggingV1) SetDefaultHeaders(headers http.Header) { + globalTagging.Service.SetDefaultHeaders(headers) +} + +// SetEnableGzipCompression sets the service's EnableGzipCompression field +func (globalTagging *GlobalTaggingV1) SetEnableGzipCompression(enableGzip bool) { + globalTagging.Service.SetEnableGzipCompression(enableGzip) +} + +// GetEnableGzipCompression returns the service's EnableGzipCompression field +func (globalTagging *GlobalTaggingV1) GetEnableGzipCompression() bool { + return globalTagging.Service.GetEnableGzipCompression() +} + +// EnableRetries enables automatic retries for requests invoked for this service instance. +// If either parameter is specified as 0, then a default value is used instead. +func (globalTagging *GlobalTaggingV1) EnableRetries(maxRetries int, maxRetryInterval time.Duration) { + globalTagging.Service.EnableRetries(maxRetries, maxRetryInterval) +} + +// DisableRetries disables automatic retries for requests invoked for this service instance. +func (globalTagging *GlobalTaggingV1) DisableRetries() { + globalTagging.Service.DisableRetries() +} + +// ListTags : Get all tags +// Lists all tags in a billing account. Use the `attached_to` parameter to return the list of tags attached to the +// specified resource. +func (globalTagging *GlobalTaggingV1) ListTags(listTagsOptions *ListTagsOptions) (result *TagList, response *core.DetailedResponse, err error) { + return globalTagging.ListTagsWithContext(context.Background(), listTagsOptions) +} + +// ListTagsWithContext is an alternate form of the ListTags method which supports a Context parameter +func (globalTagging *GlobalTaggingV1) ListTagsWithContext(ctx context.Context, listTagsOptions *ListTagsOptions) (result *TagList, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listTagsOptions, "listTagsOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = globalTagging.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(globalTagging.Service.Options.URL, `/v3/tags`, nil) + if err != nil { + return + } + + for headerName, headerValue := range listTagsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("global_tagging", "V1", "ListTags") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + if listTagsOptions.ImpersonateUser != nil { + builder.AddQuery("impersonate_user", fmt.Sprint(*listTagsOptions.ImpersonateUser)) + } + if listTagsOptions.AccountID != nil { + builder.AddQuery("account_id", fmt.Sprint(*listTagsOptions.AccountID)) + } + if listTagsOptions.TagType != nil { + builder.AddQuery("tag_type", fmt.Sprint(*listTagsOptions.TagType)) + } + if listTagsOptions.FullData != nil { + builder.AddQuery("full_data", fmt.Sprint(*listTagsOptions.FullData)) + } + if listTagsOptions.Providers != nil { + builder.AddQuery("providers", strings.Join(listTagsOptions.Providers, ",")) + } + if listTagsOptions.AttachedTo != nil { + builder.AddQuery("attached_to", fmt.Sprint(*listTagsOptions.AttachedTo)) + } + if listTagsOptions.Offset != nil { + builder.AddQuery("offset", fmt.Sprint(*listTagsOptions.Offset)) + } + if listTagsOptions.Limit != nil { + builder.AddQuery("limit", fmt.Sprint(*listTagsOptions.Limit)) + } + if listTagsOptions.Timeout != nil { + builder.AddQuery("timeout", fmt.Sprint(*listTagsOptions.Timeout)) + } + if listTagsOptions.OrderByName != nil { + builder.AddQuery("order_by_name", fmt.Sprint(*listTagsOptions.OrderByName)) + } + if listTagsOptions.AttachedOnly != nil { + builder.AddQuery("attached_only", fmt.Sprint(*listTagsOptions.AttachedOnly)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = globalTagging.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalTagList) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateTag : Create an access tag +// Create an access tag. To create an `access` tag, you must have the access listed in the [Granting users access to tag +// resources](https://cloud.ibm.com/docs/account?topic=account-access) documentation. `service` and `user` tags cannot +// be created upfront. They are created when they are attached for the first time to a resource. +func (globalTagging *GlobalTaggingV1) CreateTag(createTagOptions *CreateTagOptions) (result *CreateTagResults, response *core.DetailedResponse, err error) { + return globalTagging.CreateTagWithContext(context.Background(), createTagOptions) +} + +// CreateTagWithContext is an alternate form of the CreateTag method which supports a Context parameter +func (globalTagging *GlobalTaggingV1) CreateTagWithContext(ctx context.Context, createTagOptions *CreateTagOptions) (result *CreateTagResults, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createTagOptions, "createTagOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createTagOptions, "createTagOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = globalTagging.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(globalTagging.Service.Options.URL, `/v3/tags`, nil) + if err != nil { + return + } + + for headerName, headerValue := range createTagOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("global_tagging", "V1", "CreateTag") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + if createTagOptions.ImpersonateUser != nil { + builder.AddQuery("impersonate_user", fmt.Sprint(*createTagOptions.ImpersonateUser)) + } + if createTagOptions.AccountID != nil { + builder.AddQuery("account_id", fmt.Sprint(*createTagOptions.AccountID)) + } + if createTagOptions.TagType != nil { + builder.AddQuery("tag_type", fmt.Sprint(*createTagOptions.TagType)) + } + + body := make(map[string]interface{}) + if createTagOptions.TagNames != nil { + body["tag_names"] = createTagOptions.TagNames + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = globalTagging.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalCreateTagResults) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteTagAll : Delete all unused tags +// Delete the tags that are not attached to any resource. +func (globalTagging *GlobalTaggingV1) DeleteTagAll(deleteTagAllOptions *DeleteTagAllOptions) (result *DeleteTagsResult, response *core.DetailedResponse, err error) { + return globalTagging.DeleteTagAllWithContext(context.Background(), deleteTagAllOptions) +} + +// DeleteTagAllWithContext is an alternate form of the DeleteTagAll method which supports a Context parameter +func (globalTagging *GlobalTaggingV1) DeleteTagAllWithContext(ctx context.Context, deleteTagAllOptions *DeleteTagAllOptions) (result *DeleteTagsResult, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(deleteTagAllOptions, "deleteTagAllOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = globalTagging.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(globalTagging.Service.Options.URL, `/v3/tags`, nil) + if err != nil { + return + } + + for headerName, headerValue := range deleteTagAllOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("global_tagging", "V1", "DeleteTagAll") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + if deleteTagAllOptions.Providers != nil { + builder.AddQuery("providers", fmt.Sprint(*deleteTagAllOptions.Providers)) + } + if deleteTagAllOptions.ImpersonateUser != nil { + builder.AddQuery("impersonate_user", fmt.Sprint(*deleteTagAllOptions.ImpersonateUser)) + } + if deleteTagAllOptions.AccountID != nil { + builder.AddQuery("account_id", fmt.Sprint(*deleteTagAllOptions.AccountID)) + } + if deleteTagAllOptions.TagType != nil { + builder.AddQuery("tag_type", fmt.Sprint(*deleteTagAllOptions.TagType)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = globalTagging.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalDeleteTagsResult) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteTag : Delete an unused tag +// Delete an existing tag. A tag can be deleted only if it is not attached to any resource. +func (globalTagging *GlobalTaggingV1) DeleteTag(deleteTagOptions *DeleteTagOptions) (result *DeleteTagResults, response *core.DetailedResponse, err error) { + return globalTagging.DeleteTagWithContext(context.Background(), deleteTagOptions) +} + +// DeleteTagWithContext is an alternate form of the DeleteTag method which supports a Context parameter +func (globalTagging *GlobalTaggingV1) DeleteTagWithContext(ctx context.Context, deleteTagOptions *DeleteTagOptions) (result *DeleteTagResults, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteTagOptions, "deleteTagOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteTagOptions, "deleteTagOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "tag_name": *deleteTagOptions.TagName, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = globalTagging.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(globalTagging.Service.Options.URL, `/v3/tags/{tag_name}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteTagOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("global_tagging", "V1", "DeleteTag") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + if deleteTagOptions.Providers != nil { + builder.AddQuery("providers", strings.Join(deleteTagOptions.Providers, ",")) + } + if deleteTagOptions.ImpersonateUser != nil { + builder.AddQuery("impersonate_user", fmt.Sprint(*deleteTagOptions.ImpersonateUser)) + } + if deleteTagOptions.AccountID != nil { + builder.AddQuery("account_id", fmt.Sprint(*deleteTagOptions.AccountID)) + } + if deleteTagOptions.TagType != nil { + builder.AddQuery("tag_type", fmt.Sprint(*deleteTagOptions.TagType)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = globalTagging.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalDeleteTagResults) + if err != nil { + return + } + response.Result = result + + return +} + +// AttachTag : Attach tags +// Attaches one or more tags to one or more resources. To attach a `user` tag on a resource, you must have the access +// listed in the [Granting users access to tag resources](https://cloud.ibm.com/docs/account?topic=account-access) +// documentation. To attach a `service` tag, you must be an authorized service. If that is the case, then you can attach +// a `service` tag with your registered `prefix` to any resource in any account. The account ID must be set through the +// `account_id` query parameter. To attach an `access` tag, you must be the resource administrator within the account. +// You can attach only `access` tags already existing. +func (globalTagging *GlobalTaggingV1) AttachTag(attachTagOptions *AttachTagOptions) (result *TagResults, response *core.DetailedResponse, err error) { + return globalTagging.AttachTagWithContext(context.Background(), attachTagOptions) +} + +// AttachTagWithContext is an alternate form of the AttachTag method which supports a Context parameter +func (globalTagging *GlobalTaggingV1) AttachTagWithContext(ctx context.Context, attachTagOptions *AttachTagOptions) (result *TagResults, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(attachTagOptions, "attachTagOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(attachTagOptions, "attachTagOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = globalTagging.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(globalTagging.Service.Options.URL, `/v3/tags/attach`, nil) + if err != nil { + return + } + + for headerName, headerValue := range attachTagOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("global_tagging", "V1", "AttachTag") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + if attachTagOptions.ImpersonateUser != nil { + builder.AddQuery("impersonate_user", fmt.Sprint(*attachTagOptions.ImpersonateUser)) + } + if attachTagOptions.AccountID != nil { + builder.AddQuery("account_id", fmt.Sprint(*attachTagOptions.AccountID)) + } + if attachTagOptions.TagType != nil { + builder.AddQuery("tag_type", fmt.Sprint(*attachTagOptions.TagType)) + } + + body := make(map[string]interface{}) + if attachTagOptions.Resources != nil { + body["resources"] = attachTagOptions.Resources + } + if attachTagOptions.TagName != nil { + body["tag_name"] = attachTagOptions.TagName + } + if attachTagOptions.TagNames != nil { + body["tag_names"] = attachTagOptions.TagNames + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = globalTagging.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalTagResults) + if err != nil { + return + } + response.Result = result + + return +} + +// DetachTag : Detach tags +// Detaches one or more tags from one or more resources. To detach a `user` tag on a resource you must have the +// permissions listed in the [Granting users access to tag +// resources](https://cloud.ibm.com/docs/account?topic=account-access) documentation. To detach a `service` tag you must +// be an authorized Service. If that is the case, then you can detach a `service` tag with your registered `prefix` from +// any resource in any account. The account ID must be set through the `account_id` query parameter. To detach an +// `access` tag, you must be the resource administrator within the account. +func (globalTagging *GlobalTaggingV1) DetachTag(detachTagOptions *DetachTagOptions) (result *TagResults, response *core.DetailedResponse, err error) { + return globalTagging.DetachTagWithContext(context.Background(), detachTagOptions) +} + +// DetachTagWithContext is an alternate form of the DetachTag method which supports a Context parameter +func (globalTagging *GlobalTaggingV1) DetachTagWithContext(ctx context.Context, detachTagOptions *DetachTagOptions) (result *TagResults, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(detachTagOptions, "detachTagOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(detachTagOptions, "detachTagOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = globalTagging.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(globalTagging.Service.Options.URL, `/v3/tags/detach`, nil) + if err != nil { + return + } + + for headerName, headerValue := range detachTagOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("global_tagging", "V1", "DetachTag") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + if detachTagOptions.ImpersonateUser != nil { + builder.AddQuery("impersonate_user", fmt.Sprint(*detachTagOptions.ImpersonateUser)) + } + if detachTagOptions.AccountID != nil { + builder.AddQuery("account_id", fmt.Sprint(*detachTagOptions.AccountID)) + } + if detachTagOptions.TagType != nil { + builder.AddQuery("tag_type", fmt.Sprint(*detachTagOptions.TagType)) + } + + body := make(map[string]interface{}) + if detachTagOptions.Resources != nil { + body["resources"] = detachTagOptions.Resources + } + if detachTagOptions.TagName != nil { + body["tag_name"] = detachTagOptions.TagName + } + if detachTagOptions.TagNames != nil { + body["tag_names"] = detachTagOptions.TagNames + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = globalTagging.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalTagResults) + if err != nil { + return + } + response.Result = result + + return +} + +// AttachTagOptions : The AttachTag options. +type AttachTagOptions struct { + // List of resources on which the tag or tags should be attached. + Resources []Resource `validate:"required"` + + // The name of the tag to attach. + TagName *string + + // An array of tag names to attach. + TagNames []string + + // The user on whose behalf the attach operation must be performed (_for administrators only_). + ImpersonateUser *string + + // The ID of the billing account where the resources to be tagged lives. It is a required parameter if `tag_type` is + // set to `service`. Otherwise, it is inferred from the authorization IAM token. + AccountID *string + + // The type of the tag. Supported values are `user`, `service` and `access`. `service` and `access` are not supported + // for IMS resources. + TagType *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the AttachTagOptions.TagType property. +// The type of the tag. Supported values are `user`, `service` and `access`. `service` and `access` are not supported +// for IMS resources. +const ( + AttachTagOptionsTagTypeAccessConst = "access" + AttachTagOptionsTagTypeServiceConst = "service" + AttachTagOptionsTagTypeUserConst = "user" +) + +// NewAttachTagOptions : Instantiate AttachTagOptions +func (*GlobalTaggingV1) NewAttachTagOptions(resources []Resource) *AttachTagOptions { + return &AttachTagOptions{ + Resources: resources, + } +} + +// SetResources : Allow user to set Resources +func (options *AttachTagOptions) SetResources(resources []Resource) *AttachTagOptions { + options.Resources = resources + return options +} + +// SetTagName : Allow user to set TagName +func (options *AttachTagOptions) SetTagName(tagName string) *AttachTagOptions { + options.TagName = core.StringPtr(tagName) + return options +} + +// SetTagNames : Allow user to set TagNames +func (options *AttachTagOptions) SetTagNames(tagNames []string) *AttachTagOptions { + options.TagNames = tagNames + return options +} + +// SetImpersonateUser : Allow user to set ImpersonateUser +func (options *AttachTagOptions) SetImpersonateUser(impersonateUser string) *AttachTagOptions { + options.ImpersonateUser = core.StringPtr(impersonateUser) + return options +} + +// SetAccountID : Allow user to set AccountID +func (options *AttachTagOptions) SetAccountID(accountID string) *AttachTagOptions { + options.AccountID = core.StringPtr(accountID) + return options +} + +// SetTagType : Allow user to set TagType +func (options *AttachTagOptions) SetTagType(tagType string) *AttachTagOptions { + options.TagType = core.StringPtr(tagType) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *AttachTagOptions) SetHeaders(param map[string]string) *AttachTagOptions { + options.Headers = param + return options +} + +// CreateTagOptions : The CreateTag options. +type CreateTagOptions struct { + // An array of tag names to create. + TagNames []string `validate:"required"` + + // The user on whose behalf the create operation must be performed (_for administrators only_). + ImpersonateUser *string + + // The ID of the billing account where the tag must be created. It is a required parameter if `impersonate_user` is + // set. + AccountID *string + + // The type of the tags you want to create. The only allowed value is `access`. + TagType *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the CreateTagOptions.TagType property. +// The type of the tags you want to create. The only allowed value is `access`. +const ( + CreateTagOptionsTagTypeAccessConst = "access" +) + +// NewCreateTagOptions : Instantiate CreateTagOptions +func (*GlobalTaggingV1) NewCreateTagOptions(tagNames []string) *CreateTagOptions { + return &CreateTagOptions{ + TagNames: tagNames, + } +} + +// SetTagNames : Allow user to set TagNames +func (options *CreateTagOptions) SetTagNames(tagNames []string) *CreateTagOptions { + options.TagNames = tagNames + return options +} + +// SetImpersonateUser : Allow user to set ImpersonateUser +func (options *CreateTagOptions) SetImpersonateUser(impersonateUser string) *CreateTagOptions { + options.ImpersonateUser = core.StringPtr(impersonateUser) + return options +} + +// SetAccountID : Allow user to set AccountID +func (options *CreateTagOptions) SetAccountID(accountID string) *CreateTagOptions { + options.AccountID = core.StringPtr(accountID) + return options +} + +// SetTagType : Allow user to set TagType +func (options *CreateTagOptions) SetTagType(tagType string) *CreateTagOptions { + options.TagType = core.StringPtr(tagType) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateTagOptions) SetHeaders(param map[string]string) *CreateTagOptions { + options.Headers = param + return options +} + +// CreateTagResults : Results of a create tag(s) request. +type CreateTagResults struct { + // Array of results of an set_tags request. + Results []CreateTagResultsResultsItem `json:"results,omitempty"` +} + +// UnmarshalCreateTagResults unmarshals an instance of CreateTagResults from the specified map of raw messages. +func UnmarshalCreateTagResults(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(CreateTagResults) + err = core.UnmarshalModel(m, "results", &obj.Results, UnmarshalCreateTagResultsResultsItem) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// CreateTagResultsResultsItem : CreateTagResultsResultsItem struct +type CreateTagResultsResultsItem struct { + // The name of the tag created. + TagName *string `json:"tag_name,omitempty"` + + // true if the tag was not created. + IsError *bool `json:"is_error,omitempty"` +} + +// UnmarshalCreateTagResultsResultsItem unmarshals an instance of CreateTagResultsResultsItem from the specified map of raw messages. +func UnmarshalCreateTagResultsResultsItem(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(CreateTagResultsResultsItem) + err = core.UnmarshalPrimitive(m, "tag_name", &obj.TagName) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "is_error", &obj.IsError) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// DeleteTagAllOptions : The DeleteTagAll options. +type DeleteTagAllOptions struct { + // Select a provider. Supported values are `ghost` and `ims`. + Providers *string + + // The user on whose behalf the delete all operation must be performed (_for administrators only_). + ImpersonateUser *string + + // The ID of the billing account to delete the tags for. If it is not set, then it is taken from the authorization + // token. It is a required parameter if `tag_type` is set to `service`. + AccountID *string + + // The type of the tag. Supported values are `user`, `service` and `access`. `service` and `access` are not supported + // for IMS resources (`providers` parameter set to `ims`). + TagType *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the DeleteTagAllOptions.Providers property. +// Select a provider. Supported values are `ghost` and `ims`. +const ( + DeleteTagAllOptionsProvidersGhostConst = "ghost" + DeleteTagAllOptionsProvidersImsConst = "ims" +) + +// Constants associated with the DeleteTagAllOptions.TagType property. +// The type of the tag. Supported values are `user`, `service` and `access`. `service` and `access` are not supported +// for IMS resources (`providers` parameter set to `ims`). +const ( + DeleteTagAllOptionsTagTypeAccessConst = "access" + DeleteTagAllOptionsTagTypeServiceConst = "service" + DeleteTagAllOptionsTagTypeUserConst = "user" +) + +// NewDeleteTagAllOptions : Instantiate DeleteTagAllOptions +func (*GlobalTaggingV1) NewDeleteTagAllOptions() *DeleteTagAllOptions { + return &DeleteTagAllOptions{} +} + +// SetProviders : Allow user to set Providers +func (options *DeleteTagAllOptions) SetProviders(providers string) *DeleteTagAllOptions { + options.Providers = core.StringPtr(providers) + return options +} + +// SetImpersonateUser : Allow user to set ImpersonateUser +func (options *DeleteTagAllOptions) SetImpersonateUser(impersonateUser string) *DeleteTagAllOptions { + options.ImpersonateUser = core.StringPtr(impersonateUser) + return options +} + +// SetAccountID : Allow user to set AccountID +func (options *DeleteTagAllOptions) SetAccountID(accountID string) *DeleteTagAllOptions { + options.AccountID = core.StringPtr(accountID) + return options +} + +// SetTagType : Allow user to set TagType +func (options *DeleteTagAllOptions) SetTagType(tagType string) *DeleteTagAllOptions { + options.TagType = core.StringPtr(tagType) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteTagAllOptions) SetHeaders(param map[string]string) *DeleteTagAllOptions { + options.Headers = param + return options +} + +// DeleteTagOptions : The DeleteTag options. +type DeleteTagOptions struct { + // The name of tag to be deleted. + TagName *string `validate:"required,ne="` + + // Select a provider. Supported values are `ghost` and `ims`. To delete tag both in GhoST in IMS, use `ghost,ims`. + Providers []string + + // The user on whose behalf the delete operation must be performed (_for administrators only_). + ImpersonateUser *string + + // The ID of the billing account to delete the tag for. It is a required parameter if `tag_type` is set to `service`, + // otherwise it is inferred from the authorization IAM token. + AccountID *string + + // The type of the tag. Supported values are `user`, `service` and `access`. `service` and `access` are not supported + // for IMS resources (`providers` parameter set to `ims`). + TagType *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the DeleteTagOptions.Providers property. +const ( + DeleteTagOptionsProvidersGhostConst = "ghost" + DeleteTagOptionsProvidersImsConst = "ims" +) + +// Constants associated with the DeleteTagOptions.TagType property. +// The type of the tag. Supported values are `user`, `service` and `access`. `service` and `access` are not supported +// for IMS resources (`providers` parameter set to `ims`). +const ( + DeleteTagOptionsTagTypeAccessConst = "access" + DeleteTagOptionsTagTypeServiceConst = "service" + DeleteTagOptionsTagTypeUserConst = "user" +) + +// NewDeleteTagOptions : Instantiate DeleteTagOptions +func (*GlobalTaggingV1) NewDeleteTagOptions(tagName string) *DeleteTagOptions { + return &DeleteTagOptions{ + TagName: core.StringPtr(tagName), + } +} + +// SetTagName : Allow user to set TagName +func (options *DeleteTagOptions) SetTagName(tagName string) *DeleteTagOptions { + options.TagName = core.StringPtr(tagName) + return options +} + +// SetProviders : Allow user to set Providers +func (options *DeleteTagOptions) SetProviders(providers []string) *DeleteTagOptions { + options.Providers = providers + return options +} + +// SetImpersonateUser : Allow user to set ImpersonateUser +func (options *DeleteTagOptions) SetImpersonateUser(impersonateUser string) *DeleteTagOptions { + options.ImpersonateUser = core.StringPtr(impersonateUser) + return options +} + +// SetAccountID : Allow user to set AccountID +func (options *DeleteTagOptions) SetAccountID(accountID string) *DeleteTagOptions { + options.AccountID = core.StringPtr(accountID) + return options +} + +// SetTagType : Allow user to set TagType +func (options *DeleteTagOptions) SetTagType(tagType string) *DeleteTagOptions { + options.TagType = core.StringPtr(tagType) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteTagOptions) SetHeaders(param map[string]string) *DeleteTagOptions { + options.Headers = param + return options +} + +// DeleteTagResults : Results of a delete_tag request. +type DeleteTagResults struct { + // Array of results of a delete_tag request. + Results []DeleteTagResultsItem `json:"results,omitempty"` +} + +// UnmarshalDeleteTagResults unmarshals an instance of DeleteTagResults from the specified map of raw messages. +func UnmarshalDeleteTagResults(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(DeleteTagResults) + err = core.UnmarshalModel(m, "results", &obj.Results, UnmarshalDeleteTagResultsItem) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// DeleteTagResultsItem : Result of a delete_tag request. +type DeleteTagResultsItem struct { + // The provider of the tag. + Provider *string `json:"provider,omitempty"` + + // It is `true` if the operation exits with an error. + IsError *bool `json:"is_error,omitempty"` + + // Allows users to set arbitrary properties + additionalProperties map[string]interface{} +} + +// Constants associated with the DeleteTagResultsItem.Provider property. +// The provider of the tag. +const ( + DeleteTagResultsItemProviderGhostConst = "ghost" + DeleteTagResultsItemProviderImsConst = "ims" +) + +// SetProperty allows the user to set an arbitrary property on an instance of DeleteTagResultsItem +func (o *DeleteTagResultsItem) SetProperty(key string, value interface{}) { + if o.additionalProperties == nil { + o.additionalProperties = make(map[string]interface{}) + } + o.additionalProperties[key] = value +} + +// GetProperty allows the user to retrieve an arbitrary property from an instance of DeleteTagResultsItem +func (o *DeleteTagResultsItem) GetProperty(key string) interface{} { + return o.additionalProperties[key] +} + +// GetProperties allows the user to retrieve the map of arbitrary properties from an instance of DeleteTagResultsItem +func (o *DeleteTagResultsItem) GetProperties() map[string]interface{} { + return o.additionalProperties +} + +// MarshalJSON performs custom serialization for instances of DeleteTagResultsItem +func (o *DeleteTagResultsItem) MarshalJSON() (buffer []byte, err error) { + m := make(map[string]interface{}) + if len(o.additionalProperties) > 0 { + for k, v := range o.additionalProperties { + m[k] = v + } + } + if o.Provider != nil { + m["provider"] = o.Provider + } + if o.IsError != nil { + m["is_error"] = o.IsError + } + buffer, err = json.Marshal(m) + return +} + +// UnmarshalDeleteTagResultsItem unmarshals an instance of DeleteTagResultsItem from the specified map of raw messages. +func UnmarshalDeleteTagResultsItem(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(DeleteTagResultsItem) + err = core.UnmarshalPrimitive(m, "provider", &obj.Provider) + if err != nil { + return + } + delete(m, "provider") + err = core.UnmarshalPrimitive(m, "is_error", &obj.IsError) + if err != nil { + return + } + delete(m, "is_error") + for k := range m { + var v interface{} + e := core.UnmarshalPrimitive(m, k, &v) + if e != nil { + err = e + return + } + obj.SetProperty(k, v) + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// DeleteTagsResult : Results of a deleting unattatched tags. +type DeleteTagsResult struct { + // The number of tags that have been deleted. + TotalCount *int64 `json:"total_count,omitempty"` + + // It is set to true if there is at least one tag operation in error. + Errors *bool `json:"errors,omitempty"` + + // The list of tag operation results. + Items []DeleteTagsResultItem `json:"items,omitempty"` +} + +// UnmarshalDeleteTagsResult unmarshals an instance of DeleteTagsResult from the specified map of raw messages. +func UnmarshalDeleteTagsResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(DeleteTagsResult) + err = core.UnmarshalPrimitive(m, "total_count", &obj.TotalCount) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "errors", &obj.Errors) + if err != nil { + return + } + err = core.UnmarshalModel(m, "items", &obj.Items, UnmarshalDeleteTagsResultItem) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// DeleteTagsResultItem : Result of a delete_tags request. +type DeleteTagsResultItem struct { + // The name of the deleted tag. + TagName *string `json:"tag_name,omitempty"` + + // true if the tag was not deleted. + IsError *bool `json:"is_error,omitempty"` +} + +// UnmarshalDeleteTagsResultItem unmarshals an instance of DeleteTagsResultItem from the specified map of raw messages. +func UnmarshalDeleteTagsResultItem(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(DeleteTagsResultItem) + err = core.UnmarshalPrimitive(m, "tag_name", &obj.TagName) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "is_error", &obj.IsError) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// DetachTagOptions : The DetachTag options. +type DetachTagOptions struct { + // List of resources on which the tag or tags should be detached. + Resources []Resource `validate:"required"` + + // The name of the tag to detach. + TagName *string + + // An array of tag names to detach. + TagNames []string + + // The user on whose behalf the detach operation must be performed (_for administrators only_). + ImpersonateUser *string + + // The ID of the billing account where the resources to be un-tagged lives. It is a required parameter if `tag_type` is + // set to `service`, otherwise it is inferred from the authorization IAM token. + AccountID *string + + // The type of the tag. Supported values are `user`, `service` and `access`. `service` and `access` are not supported + // for IMS resources. + TagType *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the DetachTagOptions.TagType property. +// The type of the tag. Supported values are `user`, `service` and `access`. `service` and `access` are not supported +// for IMS resources. +const ( + DetachTagOptionsTagTypeAccessConst = "access" + DetachTagOptionsTagTypeServiceConst = "service" + DetachTagOptionsTagTypeUserConst = "user" +) + +// NewDetachTagOptions : Instantiate DetachTagOptions +func (*GlobalTaggingV1) NewDetachTagOptions(resources []Resource) *DetachTagOptions { + return &DetachTagOptions{ + Resources: resources, + } +} + +// SetResources : Allow user to set Resources +func (options *DetachTagOptions) SetResources(resources []Resource) *DetachTagOptions { + options.Resources = resources + return options +} + +// SetTagName : Allow user to set TagName +func (options *DetachTagOptions) SetTagName(tagName string) *DetachTagOptions { + options.TagName = core.StringPtr(tagName) + return options +} + +// SetTagNames : Allow user to set TagNames +func (options *DetachTagOptions) SetTagNames(tagNames []string) *DetachTagOptions { + options.TagNames = tagNames + return options +} + +// SetImpersonateUser : Allow user to set ImpersonateUser +func (options *DetachTagOptions) SetImpersonateUser(impersonateUser string) *DetachTagOptions { + options.ImpersonateUser = core.StringPtr(impersonateUser) + return options +} + +// SetAccountID : Allow user to set AccountID +func (options *DetachTagOptions) SetAccountID(accountID string) *DetachTagOptions { + options.AccountID = core.StringPtr(accountID) + return options +} + +// SetTagType : Allow user to set TagType +func (options *DetachTagOptions) SetTagType(tagType string) *DetachTagOptions { + options.TagType = core.StringPtr(tagType) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DetachTagOptions) SetHeaders(param map[string]string) *DetachTagOptions { + options.Headers = param + return options +} + +// ListTagsOptions : The ListTags options. +type ListTagsOptions struct { + // The user on whose behalf the get operation must be performed (_for administrators only_). + ImpersonateUser *string + + // The ID of the billing account to list the tags for. If it is not set, then it is taken from the authorization token. + // This parameter is required if `tag_type` is set to `service`. + AccountID *string + + // The type of the tag you want to list. Supported values are `user`, `service` and `access`. + TagType *string + + // If set to `true`, this query returns the provider, `ghost`, `ims` or `ghost,ims`, where the tag exists and the + // number of attached resources. + FullData *bool + + // Select a provider. Supported values are `ghost` and `ims`. To list GhoST tags and infrastructure tags use + // `ghost,ims`. `service` and `access` tags can only be attached to GhoST onboarded resources, so you should not set + // this parameter when listing them. + Providers []string + + // If you want to return only the list of tags attached to a specified resource, pass the ID of the resource on this + // parameter. For GhoST onboarded resources, the resource ID is the CRN; for IMS resources, it is the IMS ID. When + // using this parameter, you must specify the appropriate provider (`ims` or `ghost`). + AttachedTo *string + + // The offset is the index of the item from which you want to start returning data from. + Offset *int64 + + // The number of tags to return. + Limit *int64 + + // The search timeout bounds the search request to be executed within the specified time value. It returns the hits + // accumulated until time runs out. + Timeout *int64 + + // Order the output by tag name. + OrderByName *string + + // Filter on attached tags. If `true`, it returns only tags that are attached to one or more resources. If `false`, it + // returns all tags. + AttachedOnly *bool + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the ListTagsOptions.TagType property. +// The type of the tag you want to list. Supported values are `user`, `service` and `access`. +const ( + ListTagsOptionsTagTypeAccessConst = "access" + ListTagsOptionsTagTypeServiceConst = "service" + ListTagsOptionsTagTypeUserConst = "user" +) + +// Constants associated with the ListTagsOptions.Providers property. +const ( + ListTagsOptionsProvidersGhostConst = "ghost" + ListTagsOptionsProvidersImsConst = "ims" +) + +// Constants associated with the ListTagsOptions.OrderByName property. +// Order the output by tag name. +const ( + ListTagsOptionsOrderByNameAscConst = "asc" + ListTagsOptionsOrderByNameDescConst = "desc" +) + +// NewListTagsOptions : Instantiate ListTagsOptions +func (*GlobalTaggingV1) NewListTagsOptions() *ListTagsOptions { + return &ListTagsOptions{} +} + +// SetImpersonateUser : Allow user to set ImpersonateUser +func (options *ListTagsOptions) SetImpersonateUser(impersonateUser string) *ListTagsOptions { + options.ImpersonateUser = core.StringPtr(impersonateUser) + return options +} + +// SetAccountID : Allow user to set AccountID +func (options *ListTagsOptions) SetAccountID(accountID string) *ListTagsOptions { + options.AccountID = core.StringPtr(accountID) + return options +} + +// SetTagType : Allow user to set TagType +func (options *ListTagsOptions) SetTagType(tagType string) *ListTagsOptions { + options.TagType = core.StringPtr(tagType) + return options +} + +// SetFullData : Allow user to set FullData +func (options *ListTagsOptions) SetFullData(fullData bool) *ListTagsOptions { + options.FullData = core.BoolPtr(fullData) + return options +} + +// SetProviders : Allow user to set Providers +func (options *ListTagsOptions) SetProviders(providers []string) *ListTagsOptions { + options.Providers = providers + return options +} + +// SetAttachedTo : Allow user to set AttachedTo +func (options *ListTagsOptions) SetAttachedTo(attachedTo string) *ListTagsOptions { + options.AttachedTo = core.StringPtr(attachedTo) + return options +} + +// SetOffset : Allow user to set Offset +func (options *ListTagsOptions) SetOffset(offset int64) *ListTagsOptions { + options.Offset = core.Int64Ptr(offset) + return options +} + +// SetLimit : Allow user to set Limit +func (options *ListTagsOptions) SetLimit(limit int64) *ListTagsOptions { + options.Limit = core.Int64Ptr(limit) + return options +} + +// SetTimeout : Allow user to set Timeout +func (options *ListTagsOptions) SetTimeout(timeout int64) *ListTagsOptions { + options.Timeout = core.Int64Ptr(timeout) + return options +} + +// SetOrderByName : Allow user to set OrderByName +func (options *ListTagsOptions) SetOrderByName(orderByName string) *ListTagsOptions { + options.OrderByName = core.StringPtr(orderByName) + return options +} + +// SetAttachedOnly : Allow user to set AttachedOnly +func (options *ListTagsOptions) SetAttachedOnly(attachedOnly bool) *ListTagsOptions { + options.AttachedOnly = core.BoolPtr(attachedOnly) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListTagsOptions) SetHeaders(param map[string]string) *ListTagsOptions { + options.Headers = param + return options +} + +// Resource : A resource that may have attached tags. +type Resource struct { + // The CRN or IMS ID of the resource. + ResourceID *string `json:"resource_id" validate:"required"` + + // The IMS resource type of the resource. + ResourceType *string `json:"resource_type,omitempty"` +} + +// NewResource : Instantiate Resource (Generic Model Constructor) +func (*GlobalTaggingV1) NewResource(resourceID string) (model *Resource, err error) { + model = &Resource{ + ResourceID: core.StringPtr(resourceID), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalResource unmarshals an instance of Resource from the specified map of raw messages. +func UnmarshalResource(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Resource) + err = core.UnmarshalPrimitive(m, "resource_id", &obj.ResourceID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "resource_type", &obj.ResourceType) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// Tag : A tag. +type Tag struct { + // This is the name of the tag. + Name *string `json:"name" validate:"required"` +} + +// UnmarshalTag unmarshals an instance of Tag from the specified map of raw messages. +func UnmarshalTag(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Tag) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// TagList : A list of tags. +type TagList struct { + // Set the occurrencies of the total tags associated to this account. + TotalCount *int64 `json:"total_count,omitempty"` + + // The offset at which tags are returned. + Offset *int64 `json:"offset,omitempty"` + + // The number of tags requested to be returned. + Limit *int64 `json:"limit,omitempty"` + + // Array of output results. + Items []Tag `json:"items,omitempty"` +} + +// UnmarshalTagList unmarshals an instance of TagList from the specified map of raw messages. +func UnmarshalTagList(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(TagList) + err = core.UnmarshalPrimitive(m, "total_count", &obj.TotalCount) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "offset", &obj.Offset) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "limit", &obj.Limit) + if err != nil { + return + } + err = core.UnmarshalModel(m, "items", &obj.Items, UnmarshalTag) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// TagResults : Results of an attach_tag or detach_tag request. +type TagResults struct { + // Array of results of an attach_tag or detach_tag request. + Results []TagResultsItem `json:"results,omitempty"` +} + +// UnmarshalTagResults unmarshals an instance of TagResults from the specified map of raw messages. +func UnmarshalTagResults(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(TagResults) + err = core.UnmarshalModel(m, "results", &obj.Results, UnmarshalTagResultsItem) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// TagResultsItem : Result of an attach_tag or detach_tag request for a tagged resource. +type TagResultsItem struct { + // The CRN or IMS ID of the resource. + ResourceID *string `json:"resource_id" validate:"required"` + + // It is `true` if the operation exits with an error. + IsError *bool `json:"is_error,omitempty"` +} + +// UnmarshalTagResultsItem unmarshals an instance of TagResultsItem from the specified map of raw messages. +func UnmarshalTagResultsItem(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(TagResultsItem) + err = core.UnmarshalPrimitive(m, "resource_id", &obj.ResourceID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "is_error", &obj.IsError) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} diff --git a/vendor/github.com/IBM/platform-services-go-sdk/iamidentityv1/iam_identity_v1.go b/vendor/github.com/IBM/platform-services-go-sdk/iamidentityv1/iam_identity_v1.go new file mode 100644 index 00000000000..87d8cda4a4a --- /dev/null +++ b/vendor/github.com/IBM/platform-services-go-sdk/iamidentityv1/iam_identity_v1.go @@ -0,0 +1,2941 @@ +/** + * (C) Copyright IBM Corp. 2021. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * IBM OpenAPI SDK Code Generator Version: 99-SNAPSHOT-46891d34-20210426-162952 + */ + +// Package iamidentityv1 : Operations and models for the IamIdentityV1 service +package iamidentityv1 + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "reflect" + "time" + + "github.com/IBM/go-sdk-core/v5/core" + common "github.com/IBM/platform-services-go-sdk/common" + "github.com/go-openapi/strfmt" +) + +// IamIdentityV1 : The IAM Identity Service API allows for the management of Account Settings and Identities (Service +// IDs, ApiKeys). +// +// Version: 1.0.0 +type IamIdentityV1 struct { + Service *core.BaseService +} + +// DefaultServiceURL is the default URL to make service requests to. +const DefaultServiceURL = "https://iam.cloud.ibm.com" + +// DefaultServiceName is the default key used to find external configuration information. +const DefaultServiceName = "iam_identity" + +// IamIdentityV1Options : Service options +type IamIdentityV1Options struct { + ServiceName string + URL string + Authenticator core.Authenticator +} + +// NewIamIdentityV1UsingExternalConfig : constructs an instance of IamIdentityV1 with passed in options and external configuration. +func NewIamIdentityV1UsingExternalConfig(options *IamIdentityV1Options) (iamIdentity *IamIdentityV1, err error) { + if options.ServiceName == "" { + options.ServiceName = DefaultServiceName + } + + if options.Authenticator == nil { + options.Authenticator, err = core.GetAuthenticatorFromEnvironment(options.ServiceName) + if err != nil { + return + } + } + + iamIdentity, err = NewIamIdentityV1(options) + if err != nil { + return + } + + err = iamIdentity.Service.ConfigureService(options.ServiceName) + if err != nil { + return + } + + if options.URL != "" { + err = iamIdentity.Service.SetServiceURL(options.URL) + } + return +} + +// NewIamIdentityV1 : constructs an instance of IamIdentityV1 with passed in options. +func NewIamIdentityV1(options *IamIdentityV1Options) (service *IamIdentityV1, err error) { + serviceOptions := &core.ServiceOptions{ + URL: DefaultServiceURL, + Authenticator: options.Authenticator, + } + + baseService, err := core.NewBaseService(serviceOptions) + if err != nil { + return + } + + if options.URL != "" { + err = baseService.SetServiceURL(options.URL) + if err != nil { + return + } + } + + service = &IamIdentityV1{ + Service: baseService, + } + + return +} + +// GetServiceURLForRegion returns the service URL to be used for the specified region +func GetServiceURLForRegion(region string) (string, error) { + return "", fmt.Errorf("service does not support regional URLs") +} + +// Clone makes a copy of "iamIdentity" suitable for processing requests. +func (iamIdentity *IamIdentityV1) Clone() *IamIdentityV1 { + if core.IsNil(iamIdentity) { + return nil + } + clone := *iamIdentity + clone.Service = iamIdentity.Service.Clone() + return &clone +} + +// SetServiceURL sets the service URL +func (iamIdentity *IamIdentityV1) SetServiceURL(url string) error { + return iamIdentity.Service.SetServiceURL(url) +} + +// GetServiceURL returns the service URL +func (iamIdentity *IamIdentityV1) GetServiceURL() string { + return iamIdentity.Service.GetServiceURL() +} + +// SetDefaultHeaders sets HTTP headers to be sent in every request +func (iamIdentity *IamIdentityV1) SetDefaultHeaders(headers http.Header) { + iamIdentity.Service.SetDefaultHeaders(headers) +} + +// SetEnableGzipCompression sets the service's EnableGzipCompression field +func (iamIdentity *IamIdentityV1) SetEnableGzipCompression(enableGzip bool) { + iamIdentity.Service.SetEnableGzipCompression(enableGzip) +} + +// GetEnableGzipCompression returns the service's EnableGzipCompression field +func (iamIdentity *IamIdentityV1) GetEnableGzipCompression() bool { + return iamIdentity.Service.GetEnableGzipCompression() +} + +// EnableRetries enables automatic retries for requests invoked for this service instance. +// If either parameter is specified as 0, then a default value is used instead. +func (iamIdentity *IamIdentityV1) EnableRetries(maxRetries int, maxRetryInterval time.Duration) { + iamIdentity.Service.EnableRetries(maxRetries, maxRetryInterval) +} + +// DisableRetries disables automatic retries for requests invoked for this service instance. +func (iamIdentity *IamIdentityV1) DisableRetries() { + iamIdentity.Service.DisableRetries() +} + +// ListAPIKeys : Get API keys for a given service or user IAM ID and account ID +// Returns the list of API key details for a given service or user IAM ID and account ID. Users can manage user API keys +// for themself, or service ID API keys for service IDs that are bound to an entity they have access to. In case of +// service IDs and their API keys, a user must be either an account owner, a IBM Cloud org manager or IBM Cloud space +// developer in order to manage service IDs of the entity. +func (iamIdentity *IamIdentityV1) ListAPIKeys(listAPIKeysOptions *ListAPIKeysOptions) (result *APIKeyList, response *core.DetailedResponse, err error) { + return iamIdentity.ListAPIKeysWithContext(context.Background(), listAPIKeysOptions) +} + +// ListAPIKeysWithContext is an alternate form of the ListAPIKeys method which supports a Context parameter +func (iamIdentity *IamIdentityV1) ListAPIKeysWithContext(ctx context.Context, listAPIKeysOptions *ListAPIKeysOptions) (result *APIKeyList, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listAPIKeysOptions, "listAPIKeysOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = iamIdentity.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(iamIdentity.Service.Options.URL, `/v1/apikeys`, nil) + if err != nil { + return + } + + for headerName, headerValue := range listAPIKeysOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("iam_identity", "V1", "ListAPIKeys") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + if listAPIKeysOptions.AccountID != nil { + builder.AddQuery("account_id", fmt.Sprint(*listAPIKeysOptions.AccountID)) + } + if listAPIKeysOptions.IamID != nil { + builder.AddQuery("iam_id", fmt.Sprint(*listAPIKeysOptions.IamID)) + } + if listAPIKeysOptions.Pagesize != nil { + builder.AddQuery("pagesize", fmt.Sprint(*listAPIKeysOptions.Pagesize)) + } + if listAPIKeysOptions.Pagetoken != nil { + builder.AddQuery("pagetoken", fmt.Sprint(*listAPIKeysOptions.Pagetoken)) + } + if listAPIKeysOptions.Scope != nil { + builder.AddQuery("scope", fmt.Sprint(*listAPIKeysOptions.Scope)) + } + if listAPIKeysOptions.Type != nil { + builder.AddQuery("type", fmt.Sprint(*listAPIKeysOptions.Type)) + } + if listAPIKeysOptions.Sort != nil { + builder.AddQuery("sort", fmt.Sprint(*listAPIKeysOptions.Sort)) + } + if listAPIKeysOptions.Order != nil { + builder.AddQuery("order", fmt.Sprint(*listAPIKeysOptions.Order)) + } + if listAPIKeysOptions.IncludeHistory != nil { + builder.AddQuery("include_history", fmt.Sprint(*listAPIKeysOptions.IncludeHistory)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = iamIdentity.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalAPIKeyList) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateAPIKey : Create an API key +// Creates an API key for a UserID or service ID. Users can manage user API keys for themself, or service ID API keys +// for service IDs that are bound to an entity they have access to. +func (iamIdentity *IamIdentityV1) CreateAPIKey(createAPIKeyOptions *CreateAPIKeyOptions) (result *APIKey, response *core.DetailedResponse, err error) { + return iamIdentity.CreateAPIKeyWithContext(context.Background(), createAPIKeyOptions) +} + +// CreateAPIKeyWithContext is an alternate form of the CreateAPIKey method which supports a Context parameter +func (iamIdentity *IamIdentityV1) CreateAPIKeyWithContext(ctx context.Context, createAPIKeyOptions *CreateAPIKeyOptions) (result *APIKey, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createAPIKeyOptions, "createAPIKeyOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createAPIKeyOptions, "createAPIKeyOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = iamIdentity.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(iamIdentity.Service.Options.URL, `/v1/apikeys`, nil) + if err != nil { + return + } + + for headerName, headerValue := range createAPIKeyOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("iam_identity", "V1", "CreateAPIKey") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + if createAPIKeyOptions.EntityLock != nil { + builder.AddHeader("Entity-Lock", fmt.Sprint(*createAPIKeyOptions.EntityLock)) + } + + body := make(map[string]interface{}) + if createAPIKeyOptions.Name != nil { + body["name"] = createAPIKeyOptions.Name + } + if createAPIKeyOptions.IamID != nil { + body["iam_id"] = createAPIKeyOptions.IamID + } + if createAPIKeyOptions.Description != nil { + body["description"] = createAPIKeyOptions.Description + } + if createAPIKeyOptions.AccountID != nil { + body["account_id"] = createAPIKeyOptions.AccountID + } + if createAPIKeyOptions.Apikey != nil { + body["apikey"] = createAPIKeyOptions.Apikey + } + if createAPIKeyOptions.StoreValue != nil { + body["store_value"] = createAPIKeyOptions.StoreValue + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = iamIdentity.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalAPIKey) + if err != nil { + return + } + response.Result = result + + return +} + +// GetAPIKeysDetails : Get details of an API key by its value +// Returns the details of an API key by its value. Users can manage user API keys for themself, or service ID API keys +// for service IDs that are bound to an entity they have access to. +func (iamIdentity *IamIdentityV1) GetAPIKeysDetails(getAPIKeysDetailsOptions *GetAPIKeysDetailsOptions) (result *APIKey, response *core.DetailedResponse, err error) { + return iamIdentity.GetAPIKeysDetailsWithContext(context.Background(), getAPIKeysDetailsOptions) +} + +// GetAPIKeysDetailsWithContext is an alternate form of the GetAPIKeysDetails method which supports a Context parameter +func (iamIdentity *IamIdentityV1) GetAPIKeysDetailsWithContext(ctx context.Context, getAPIKeysDetailsOptions *GetAPIKeysDetailsOptions) (result *APIKey, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(getAPIKeysDetailsOptions, "getAPIKeysDetailsOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = iamIdentity.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(iamIdentity.Service.Options.URL, `/v1/apikeys/details`, nil) + if err != nil { + return + } + + for headerName, headerValue := range getAPIKeysDetailsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("iam_identity", "V1", "GetAPIKeysDetails") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + if getAPIKeysDetailsOptions.IamAPIKey != nil { + builder.AddHeader("IAM-ApiKey", fmt.Sprint(*getAPIKeysDetailsOptions.IamAPIKey)) + } + + if getAPIKeysDetailsOptions.IncludeHistory != nil { + builder.AddQuery("include_history", fmt.Sprint(*getAPIKeysDetailsOptions.IncludeHistory)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = iamIdentity.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalAPIKey) + if err != nil { + return + } + response.Result = result + + return +} + +// GetAPIKey : Get details of an API key +// Returns the details of an API key. Users can manage user API keys for themself, or service ID API keys for service +// IDs that are bound to an entity they have access to. In case of service IDs and their API keys, a user must be +// either an account owner, a IBM Cloud org manager or IBM Cloud space developer in order to manage service IDs of the +// entity. +func (iamIdentity *IamIdentityV1) GetAPIKey(getAPIKeyOptions *GetAPIKeyOptions) (result *APIKey, response *core.DetailedResponse, err error) { + return iamIdentity.GetAPIKeyWithContext(context.Background(), getAPIKeyOptions) +} + +// GetAPIKeyWithContext is an alternate form of the GetAPIKey method which supports a Context parameter +func (iamIdentity *IamIdentityV1) GetAPIKeyWithContext(ctx context.Context, getAPIKeyOptions *GetAPIKeyOptions) (result *APIKey, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getAPIKeyOptions, "getAPIKeyOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getAPIKeyOptions, "getAPIKeyOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *getAPIKeyOptions.ID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = iamIdentity.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(iamIdentity.Service.Options.URL, `/v1/apikeys/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getAPIKeyOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("iam_identity", "V1", "GetAPIKey") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + if getAPIKeyOptions.IncludeHistory != nil { + builder.AddQuery("include_history", fmt.Sprint(*getAPIKeyOptions.IncludeHistory)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = iamIdentity.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalAPIKey) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateAPIKey : Updates an API key +// Updates properties of an API key. This does NOT affect existing access tokens. Their token content will stay +// unchanged until the access token is refreshed. To update an API key, pass the property to be modified. To delete one +// property's value, pass the property with an empty value "".Users can manage user API keys for themself, or service ID +// API keys for service IDs that are bound to an entity they have access to. +func (iamIdentity *IamIdentityV1) UpdateAPIKey(updateAPIKeyOptions *UpdateAPIKeyOptions) (result *APIKey, response *core.DetailedResponse, err error) { + return iamIdentity.UpdateAPIKeyWithContext(context.Background(), updateAPIKeyOptions) +} + +// UpdateAPIKeyWithContext is an alternate form of the UpdateAPIKey method which supports a Context parameter +func (iamIdentity *IamIdentityV1) UpdateAPIKeyWithContext(ctx context.Context, updateAPIKeyOptions *UpdateAPIKeyOptions) (result *APIKey, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateAPIKeyOptions, "updateAPIKeyOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateAPIKeyOptions, "updateAPIKeyOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *updateAPIKeyOptions.ID, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = iamIdentity.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(iamIdentity.Service.Options.URL, `/v1/apikeys/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateAPIKeyOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("iam_identity", "V1", "UpdateAPIKey") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + if updateAPIKeyOptions.IfMatch != nil { + builder.AddHeader("If-Match", fmt.Sprint(*updateAPIKeyOptions.IfMatch)) + } + + body := make(map[string]interface{}) + if updateAPIKeyOptions.Name != nil { + body["name"] = updateAPIKeyOptions.Name + } + if updateAPIKeyOptions.Description != nil { + body["description"] = updateAPIKeyOptions.Description + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = iamIdentity.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalAPIKey) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteAPIKey : Deletes an API key +// Deletes an API key. Existing tokens will remain valid until expired. Users can manage user API keys for themself, or +// service ID API keys for service IDs that are bound to an entity they have access to. +func (iamIdentity *IamIdentityV1) DeleteAPIKey(deleteAPIKeyOptions *DeleteAPIKeyOptions) (response *core.DetailedResponse, err error) { + return iamIdentity.DeleteAPIKeyWithContext(context.Background(), deleteAPIKeyOptions) +} + +// DeleteAPIKeyWithContext is an alternate form of the DeleteAPIKey method which supports a Context parameter +func (iamIdentity *IamIdentityV1) DeleteAPIKeyWithContext(ctx context.Context, deleteAPIKeyOptions *DeleteAPIKeyOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteAPIKeyOptions, "deleteAPIKeyOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteAPIKeyOptions, "deleteAPIKeyOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *deleteAPIKeyOptions.ID, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = iamIdentity.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(iamIdentity.Service.Options.URL, `/v1/apikeys/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteAPIKeyOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("iam_identity", "V1", "DeleteAPIKey") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = iamIdentity.Service.Request(request, nil) + + return +} + +// LockAPIKey : Lock the API key +// Locks an API key by ID. Users can manage user API keys for themself, or service ID API keys for service IDs that are +// bound to an entity they have access to. In case of service IDs and their API keys, a user must be either an account +// owner, a IBM Cloud org manager or IBM Cloud space developer in order to manage service IDs of the entity. +func (iamIdentity *IamIdentityV1) LockAPIKey(lockAPIKeyOptions *LockAPIKeyOptions) (response *core.DetailedResponse, err error) { + return iamIdentity.LockAPIKeyWithContext(context.Background(), lockAPIKeyOptions) +} + +// LockAPIKeyWithContext is an alternate form of the LockAPIKey method which supports a Context parameter +func (iamIdentity *IamIdentityV1) LockAPIKeyWithContext(ctx context.Context, lockAPIKeyOptions *LockAPIKeyOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(lockAPIKeyOptions, "lockAPIKeyOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(lockAPIKeyOptions, "lockAPIKeyOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *lockAPIKeyOptions.ID, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = iamIdentity.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(iamIdentity.Service.Options.URL, `/v1/apikeys/{id}/lock`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range lockAPIKeyOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("iam_identity", "V1", "LockAPIKey") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = iamIdentity.Service.Request(request, nil) + + return +} + +// UnlockAPIKey : Unlock the API key +// Unlocks an API key by ID. Users can manage user API keys for themself, or service ID API keys for service IDs that +// are bound to an entity they have access to. In case of service IDs and their API keys, a user must be either an +// account owner, a IBM Cloud org manager or IBM Cloud space developer in order to manage service IDs of the entity. +func (iamIdentity *IamIdentityV1) UnlockAPIKey(unlockAPIKeyOptions *UnlockAPIKeyOptions) (response *core.DetailedResponse, err error) { + return iamIdentity.UnlockAPIKeyWithContext(context.Background(), unlockAPIKeyOptions) +} + +// UnlockAPIKeyWithContext is an alternate form of the UnlockAPIKey method which supports a Context parameter +func (iamIdentity *IamIdentityV1) UnlockAPIKeyWithContext(ctx context.Context, unlockAPIKeyOptions *UnlockAPIKeyOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(unlockAPIKeyOptions, "unlockAPIKeyOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(unlockAPIKeyOptions, "unlockAPIKeyOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *unlockAPIKeyOptions.ID, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = iamIdentity.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(iamIdentity.Service.Options.URL, `/v1/apikeys/{id}/lock`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range unlockAPIKeyOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("iam_identity", "V1", "UnlockAPIKey") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = iamIdentity.Service.Request(request, nil) + + return +} + +// ListServiceIds : List service IDs +// Returns a list of service IDs. Users can manage user API keys for themself, or service ID API keys for service IDs +// that are bound to an entity they have access to. +func (iamIdentity *IamIdentityV1) ListServiceIds(listServiceIdsOptions *ListServiceIdsOptions) (result *ServiceIDList, response *core.DetailedResponse, err error) { + return iamIdentity.ListServiceIdsWithContext(context.Background(), listServiceIdsOptions) +} + +// ListServiceIdsWithContext is an alternate form of the ListServiceIds method which supports a Context parameter +func (iamIdentity *IamIdentityV1) ListServiceIdsWithContext(ctx context.Context, listServiceIdsOptions *ListServiceIdsOptions) (result *ServiceIDList, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listServiceIdsOptions, "listServiceIdsOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = iamIdentity.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(iamIdentity.Service.Options.URL, `/v1/serviceids/`, nil) + if err != nil { + return + } + + for headerName, headerValue := range listServiceIdsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("iam_identity", "V1", "ListServiceIds") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + if listServiceIdsOptions.AccountID != nil { + builder.AddQuery("account_id", fmt.Sprint(*listServiceIdsOptions.AccountID)) + } + if listServiceIdsOptions.Name != nil { + builder.AddQuery("name", fmt.Sprint(*listServiceIdsOptions.Name)) + } + if listServiceIdsOptions.Pagesize != nil { + builder.AddQuery("pagesize", fmt.Sprint(*listServiceIdsOptions.Pagesize)) + } + if listServiceIdsOptions.Pagetoken != nil { + builder.AddQuery("pagetoken", fmt.Sprint(*listServiceIdsOptions.Pagetoken)) + } + if listServiceIdsOptions.Sort != nil { + builder.AddQuery("sort", fmt.Sprint(*listServiceIdsOptions.Sort)) + } + if listServiceIdsOptions.Order != nil { + builder.AddQuery("order", fmt.Sprint(*listServiceIdsOptions.Order)) + } + if listServiceIdsOptions.IncludeHistory != nil { + builder.AddQuery("include_history", fmt.Sprint(*listServiceIdsOptions.IncludeHistory)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = iamIdentity.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalServiceIDList) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateServiceID : Create a service ID +// Creates a service ID for an IBM Cloud account. Users can manage user API keys for themself, or service ID API keys +// for service IDs that are bound to an entity they have access to. +func (iamIdentity *IamIdentityV1) CreateServiceID(createServiceIDOptions *CreateServiceIDOptions) (result *ServiceID, response *core.DetailedResponse, err error) { + return iamIdentity.CreateServiceIDWithContext(context.Background(), createServiceIDOptions) +} + +// CreateServiceIDWithContext is an alternate form of the CreateServiceID method which supports a Context parameter +func (iamIdentity *IamIdentityV1) CreateServiceIDWithContext(ctx context.Context, createServiceIDOptions *CreateServiceIDOptions) (result *ServiceID, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createServiceIDOptions, "createServiceIDOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createServiceIDOptions, "createServiceIDOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = iamIdentity.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(iamIdentity.Service.Options.URL, `/v1/serviceids/`, nil) + if err != nil { + return + } + + for headerName, headerValue := range createServiceIDOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("iam_identity", "V1", "CreateServiceID") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + if createServiceIDOptions.EntityLock != nil { + builder.AddHeader("Entity-Lock", fmt.Sprint(*createServiceIDOptions.EntityLock)) + } + + body := make(map[string]interface{}) + if createServiceIDOptions.AccountID != nil { + body["account_id"] = createServiceIDOptions.AccountID + } + if createServiceIDOptions.Name != nil { + body["name"] = createServiceIDOptions.Name + } + if createServiceIDOptions.Description != nil { + body["description"] = createServiceIDOptions.Description + } + if createServiceIDOptions.UniqueInstanceCrns != nil { + body["unique_instance_crns"] = createServiceIDOptions.UniqueInstanceCrns + } + if createServiceIDOptions.Apikey != nil { + body["apikey"] = createServiceIDOptions.Apikey + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = iamIdentity.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalServiceID) + if err != nil { + return + } + response.Result = result + + return +} + +// GetServiceID : Get details of a service ID +// Returns the details of a service ID. Users can manage user API keys for themself, or service ID API keys for service +// IDs that are bound to an entity they have access to. +func (iamIdentity *IamIdentityV1) GetServiceID(getServiceIDOptions *GetServiceIDOptions) (result *ServiceID, response *core.DetailedResponse, err error) { + return iamIdentity.GetServiceIDWithContext(context.Background(), getServiceIDOptions) +} + +// GetServiceIDWithContext is an alternate form of the GetServiceID method which supports a Context parameter +func (iamIdentity *IamIdentityV1) GetServiceIDWithContext(ctx context.Context, getServiceIDOptions *GetServiceIDOptions) (result *ServiceID, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getServiceIDOptions, "getServiceIDOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getServiceIDOptions, "getServiceIDOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *getServiceIDOptions.ID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = iamIdentity.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(iamIdentity.Service.Options.URL, `/v1/serviceids/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getServiceIDOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("iam_identity", "V1", "GetServiceID") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + if getServiceIDOptions.IncludeHistory != nil { + builder.AddQuery("include_history", fmt.Sprint(*getServiceIDOptions.IncludeHistory)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = iamIdentity.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalServiceID) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateServiceID : Update service ID +// Updates properties of a service ID. This does NOT affect existing access tokens. Their token content will stay +// unchanged until the access token is refreshed. To update a service ID, pass the property to be modified. To delete +// one property's value, pass the property with an empty value "".Users can manage user API keys for themself, or +// service ID API keys for service IDs that are bound to an entity they have access to. +func (iamIdentity *IamIdentityV1) UpdateServiceID(updateServiceIDOptions *UpdateServiceIDOptions) (result *ServiceID, response *core.DetailedResponse, err error) { + return iamIdentity.UpdateServiceIDWithContext(context.Background(), updateServiceIDOptions) +} + +// UpdateServiceIDWithContext is an alternate form of the UpdateServiceID method which supports a Context parameter +func (iamIdentity *IamIdentityV1) UpdateServiceIDWithContext(ctx context.Context, updateServiceIDOptions *UpdateServiceIDOptions) (result *ServiceID, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateServiceIDOptions, "updateServiceIDOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateServiceIDOptions, "updateServiceIDOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *updateServiceIDOptions.ID, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = iamIdentity.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(iamIdentity.Service.Options.URL, `/v1/serviceids/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateServiceIDOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("iam_identity", "V1", "UpdateServiceID") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + if updateServiceIDOptions.IfMatch != nil { + builder.AddHeader("If-Match", fmt.Sprint(*updateServiceIDOptions.IfMatch)) + } + + body := make(map[string]interface{}) + if updateServiceIDOptions.Name != nil { + body["name"] = updateServiceIDOptions.Name + } + if updateServiceIDOptions.Description != nil { + body["description"] = updateServiceIDOptions.Description + } + if updateServiceIDOptions.UniqueInstanceCrns != nil { + body["unique_instance_crns"] = updateServiceIDOptions.UniqueInstanceCrns + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = iamIdentity.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalServiceID) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteServiceID : Deletes a service ID and associated API keys +// Deletes a service ID and all API keys associated to it. Before deleting the service ID, all associated API keys are +// deleted. In case a Delete Conflict (status code 409) a retry of the request may help as the service ID is only +// deleted if the associated API keys were successfully deleted before. Users can manage user API keys for themself, or +// service ID API keys for service IDs that are bound to an entity they have access to. +func (iamIdentity *IamIdentityV1) DeleteServiceID(deleteServiceIDOptions *DeleteServiceIDOptions) (response *core.DetailedResponse, err error) { + return iamIdentity.DeleteServiceIDWithContext(context.Background(), deleteServiceIDOptions) +} + +// DeleteServiceIDWithContext is an alternate form of the DeleteServiceID method which supports a Context parameter +func (iamIdentity *IamIdentityV1) DeleteServiceIDWithContext(ctx context.Context, deleteServiceIDOptions *DeleteServiceIDOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteServiceIDOptions, "deleteServiceIDOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteServiceIDOptions, "deleteServiceIDOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *deleteServiceIDOptions.ID, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = iamIdentity.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(iamIdentity.Service.Options.URL, `/v1/serviceids/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteServiceIDOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("iam_identity", "V1", "DeleteServiceID") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = iamIdentity.Service.Request(request, nil) + + return +} + +// LockServiceID : Lock the service ID +// Locks a service ID by ID. Users can manage user API keys for themself, or service ID API keys for service IDs that +// are bound to an entity they have access to. In case of service IDs and their API keys, a user must be either an +// account owner, a IBM Cloud org manager or IBM Cloud space developer in order to manage service IDs of the entity. +func (iamIdentity *IamIdentityV1) LockServiceID(lockServiceIDOptions *LockServiceIDOptions) (response *core.DetailedResponse, err error) { + return iamIdentity.LockServiceIDWithContext(context.Background(), lockServiceIDOptions) +} + +// LockServiceIDWithContext is an alternate form of the LockServiceID method which supports a Context parameter +func (iamIdentity *IamIdentityV1) LockServiceIDWithContext(ctx context.Context, lockServiceIDOptions *LockServiceIDOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(lockServiceIDOptions, "lockServiceIDOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(lockServiceIDOptions, "lockServiceIDOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *lockServiceIDOptions.ID, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = iamIdentity.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(iamIdentity.Service.Options.URL, `/v1/serviceids/{id}/lock`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range lockServiceIDOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("iam_identity", "V1", "LockServiceID") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = iamIdentity.Service.Request(request, nil) + + return +} + +// UnlockServiceID : Unlock the service ID +// Unlocks a service ID by ID. Users can manage user API keys for themself, or service ID API keys for service IDs that +// are bound to an entity they have access to. In case of service IDs and their API keys, a user must be either an +// account owner, a IBM Cloud org manager or IBM Cloud space developer in order to manage service IDs of the entity. +func (iamIdentity *IamIdentityV1) UnlockServiceID(unlockServiceIDOptions *UnlockServiceIDOptions) (response *core.DetailedResponse, err error) { + return iamIdentity.UnlockServiceIDWithContext(context.Background(), unlockServiceIDOptions) +} + +// UnlockServiceIDWithContext is an alternate form of the UnlockServiceID method which supports a Context parameter +func (iamIdentity *IamIdentityV1) UnlockServiceIDWithContext(ctx context.Context, unlockServiceIDOptions *UnlockServiceIDOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(unlockServiceIDOptions, "unlockServiceIDOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(unlockServiceIDOptions, "unlockServiceIDOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *unlockServiceIDOptions.ID, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = iamIdentity.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(iamIdentity.Service.Options.URL, `/v1/serviceids/{id}/lock`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range unlockServiceIDOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("iam_identity", "V1", "UnlockServiceID") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = iamIdentity.Service.Request(request, nil) + + return +} + +// GetAccountSettings : Get account configurations +// Returns the details of an account's configuration. +func (iamIdentity *IamIdentityV1) GetAccountSettings(getAccountSettingsOptions *GetAccountSettingsOptions) (result *AccountSettingsResponse, response *core.DetailedResponse, err error) { + return iamIdentity.GetAccountSettingsWithContext(context.Background(), getAccountSettingsOptions) +} + +// GetAccountSettingsWithContext is an alternate form of the GetAccountSettings method which supports a Context parameter +func (iamIdentity *IamIdentityV1) GetAccountSettingsWithContext(ctx context.Context, getAccountSettingsOptions *GetAccountSettingsOptions) (result *AccountSettingsResponse, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getAccountSettingsOptions, "getAccountSettingsOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getAccountSettingsOptions, "getAccountSettingsOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "account_id": *getAccountSettingsOptions.AccountID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = iamIdentity.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(iamIdentity.Service.Options.URL, `/v1/accounts/{account_id}/settings/identity`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getAccountSettingsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("iam_identity", "V1", "GetAccountSettings") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + if getAccountSettingsOptions.IncludeHistory != nil { + builder.AddQuery("include_history", fmt.Sprint(*getAccountSettingsOptions.IncludeHistory)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = iamIdentity.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalAccountSettingsResponse) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateAccountSettings : Update account configurations +// Allows a user to configure settings on their account with regards to MFA, session lifetimes, access control for +// creating new identities, and enforcing IP restrictions on token creation. +func (iamIdentity *IamIdentityV1) UpdateAccountSettings(updateAccountSettingsOptions *UpdateAccountSettingsOptions) (result *AccountSettingsResponse, response *core.DetailedResponse, err error) { + return iamIdentity.UpdateAccountSettingsWithContext(context.Background(), updateAccountSettingsOptions) +} + +// UpdateAccountSettingsWithContext is an alternate form of the UpdateAccountSettings method which supports a Context parameter +func (iamIdentity *IamIdentityV1) UpdateAccountSettingsWithContext(ctx context.Context, updateAccountSettingsOptions *UpdateAccountSettingsOptions) (result *AccountSettingsResponse, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateAccountSettingsOptions, "updateAccountSettingsOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateAccountSettingsOptions, "updateAccountSettingsOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "account_id": *updateAccountSettingsOptions.AccountID, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = iamIdentity.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(iamIdentity.Service.Options.URL, `/v1/accounts/{account_id}/settings/identity`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateAccountSettingsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("iam_identity", "V1", "UpdateAccountSettings") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + if updateAccountSettingsOptions.IfMatch != nil { + builder.AddHeader("If-Match", fmt.Sprint(*updateAccountSettingsOptions.IfMatch)) + } + + body := make(map[string]interface{}) + if updateAccountSettingsOptions.RestrictCreateServiceID != nil { + body["restrict_create_service_id"] = updateAccountSettingsOptions.RestrictCreateServiceID + } + if updateAccountSettingsOptions.RestrictCreatePlatformApikey != nil { + body["restrict_create_platform_apikey"] = updateAccountSettingsOptions.RestrictCreatePlatformApikey + } + if updateAccountSettingsOptions.AllowedIPAddresses != nil { + body["allowed_ip_addresses"] = updateAccountSettingsOptions.AllowedIPAddresses + } + if updateAccountSettingsOptions.Mfa != nil { + body["mfa"] = updateAccountSettingsOptions.Mfa + } + if updateAccountSettingsOptions.SessionExpirationInSeconds != nil { + body["session_expiration_in_seconds"] = updateAccountSettingsOptions.SessionExpirationInSeconds + } + if updateAccountSettingsOptions.SessionInvalidationInSeconds != nil { + body["session_invalidation_in_seconds"] = updateAccountSettingsOptions.SessionInvalidationInSeconds + } + if updateAccountSettingsOptions.MaxSessionsPerIdentity != nil { + body["max_sessions_per_identity"] = updateAccountSettingsOptions.MaxSessionsPerIdentity + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = iamIdentity.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalAccountSettingsResponse) + if err != nil { + return + } + response.Result = result + + return +} + +// AccountSettingsResponse : Response body format for Account Settings REST requests. +type AccountSettingsResponse struct { + // Context with key properties for problem determination. + Context *ResponseContext `json:"context,omitempty"` + + // Unique ID of the account. + AccountID *string `json:"account_id" validate:"required"` + + // Defines whether or not creating a Service Id is access controlled. Valid values: + // * RESTRICTED - to apply access control + // * NOT_RESTRICTED - to remove access control + // * NOT_SET - to 'unset' a previous set value. + RestrictCreateServiceID *string `json:"restrict_create_service_id" validate:"required"` + + // Defines whether or not creating platform API keys is access controlled. Valid values: + // * RESTRICTED - to apply access control + // * NOT_RESTRICTED - to remove access control + // * NOT_SET - to 'unset' a previous set value. + RestrictCreatePlatformApikey *string `json:"restrict_create_platform_apikey" validate:"required"` + + // Defines the IP addresses and subnets from which IAM tokens can be created for the account. + AllowedIPAddresses *string `json:"allowed_ip_addresses" validate:"required"` + + // Version of the account settings. + EntityTag *string `json:"entity_tag" validate:"required"` + + // Defines the MFA trait for the account. Valid values: + // * NONE - No MFA trait set + // * TOTP - For all non-federated IBMId users + // * TOTP4ALL - For all users + // * LEVEL1 - Email-based MFA for all users + // * LEVEL2 - TOTP-based MFA for all users + // * LEVEL3 - U2F MFA for all users. + Mfa *string `json:"mfa" validate:"required"` + + // History of the Account Settings. + History []EnityHistoryRecord `json:"history,omitempty"` + + // Defines the session expiration in seconds for the account. Valid values: + // * Any whole number between between '900' and '86400' + // * NOT_SET - To unset account setting and use service default. + SessionExpirationInSeconds *string `json:"session_expiration_in_seconds" validate:"required"` + + // Defines the period of time in seconds in which a session will be invalidated due to inactivity. Valid values: + // * Any whole number between '900' and '7200' + // * NOT_SET - To unset account setting and use service default. + SessionInvalidationInSeconds *string `json:"session_invalidation_in_seconds" validate:"required"` + + // Defines the max allowed sessions per identity required by the account. Valid values: + // * Any whole number greater than 0 + // * NOT_SET - To unset account setting and use service default. + MaxSessionsPerIdentity *string `json:"max_sessions_per_identity" validate:"required"` +} + +// Constants associated with the AccountSettingsResponse.RestrictCreateServiceID property. +// Defines whether or not creating a Service Id is access controlled. Valid values: +// * RESTRICTED - to apply access control +// * NOT_RESTRICTED - to remove access control +// * NOT_SET - to 'unset' a previous set value. +const ( + AccountSettingsResponseRestrictCreateServiceIDNotRestrictedConst = "NOT_RESTRICTED" + AccountSettingsResponseRestrictCreateServiceIDNotSetConst = "NOT_SET" + AccountSettingsResponseRestrictCreateServiceIDRestrictedConst = "RESTRICTED" +) + +// Constants associated with the AccountSettingsResponse.RestrictCreatePlatformApikey property. +// Defines whether or not creating platform API keys is access controlled. Valid values: +// * RESTRICTED - to apply access control +// * NOT_RESTRICTED - to remove access control +// * NOT_SET - to 'unset' a previous set value. +const ( + AccountSettingsResponseRestrictCreatePlatformApikeyNotRestrictedConst = "NOT_RESTRICTED" + AccountSettingsResponseRestrictCreatePlatformApikeyNotSetConst = "NOT_SET" + AccountSettingsResponseRestrictCreatePlatformApikeyRestrictedConst = "RESTRICTED" +) + +// Constants associated with the AccountSettingsResponse.Mfa property. +// Defines the MFA trait for the account. Valid values: +// * NONE - No MFA trait set +// * TOTP - For all non-federated IBMId users +// * TOTP4ALL - For all users +// * LEVEL1 - Email-based MFA for all users +// * LEVEL2 - TOTP-based MFA for all users +// * LEVEL3 - U2F MFA for all users. +const ( + AccountSettingsResponseMfaLevel1Const = "LEVEL1" + AccountSettingsResponseMfaLevel2Const = "LEVEL2" + AccountSettingsResponseMfaLevel3Const = "LEVEL3" + AccountSettingsResponseMfaNoneConst = "NONE" + AccountSettingsResponseMfaTotpConst = "TOTP" + AccountSettingsResponseMfaTotp4allConst = "TOTP4ALL" +) + +// UnmarshalAccountSettingsResponse unmarshals an instance of AccountSettingsResponse from the specified map of raw messages. +func UnmarshalAccountSettingsResponse(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(AccountSettingsResponse) + err = core.UnmarshalModel(m, "context", &obj.Context, UnmarshalResponseContext) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "account_id", &obj.AccountID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "restrict_create_service_id", &obj.RestrictCreateServiceID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "restrict_create_platform_apikey", &obj.RestrictCreatePlatformApikey) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "allowed_ip_addresses", &obj.AllowedIPAddresses) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "entity_tag", &obj.EntityTag) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "mfa", &obj.Mfa) + if err != nil { + return + } + err = core.UnmarshalModel(m, "history", &obj.History, UnmarshalEnityHistoryRecord) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "session_expiration_in_seconds", &obj.SessionExpirationInSeconds) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "session_invalidation_in_seconds", &obj.SessionInvalidationInSeconds) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "max_sessions_per_identity", &obj.MaxSessionsPerIdentity) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// APIKey : Response body format for API key V1 REST requests. +type APIKey struct { + // Context with key properties for problem determination. + Context *ResponseContext `json:"context,omitempty"` + + // Unique identifier of this API Key. + ID *string `json:"id" validate:"required"` + + // Version of the API Key details object. You need to specify this value when updating the API key to avoid stale + // updates. + EntityTag *string `json:"entity_tag,omitempty"` + + // Cloud Resource Name of the item. Example Cloud Resource Name: + // 'crn:v1:bluemix:public:iam-identity:us-south:a/myaccount::apikey:1234-9012-5678'. + CRN *string `json:"crn" validate:"required"` + + // The API key cannot be changed if set to true. + Locked *bool `json:"locked" validate:"required"` + + // If set contains a date time string of the creation date in ISO format. + CreatedAt *strfmt.DateTime `json:"created_at,omitempty"` + + // IAM ID of the user or service which created the API key. + CreatedBy *string `json:"created_by" validate:"required"` + + // If set contains a date time string of the last modification date in ISO format. + ModifiedAt *strfmt.DateTime `json:"modified_at,omitempty"` + + // Name of the API key. The name is not checked for uniqueness. Therefore multiple names with the same value can exist. + // Access is done via the UUID of the API key. + Name *string `json:"name" validate:"required"` + + // The optional description of the API key. The 'description' property is only available if a description was provided + // during a create of an API key. + Description *string `json:"description,omitempty"` + + // The iam_id that this API key authenticates. + IamID *string `json:"iam_id" validate:"required"` + + // ID of the account that this API key authenticates for. + AccountID *string `json:"account_id" validate:"required"` + + // The API key value. This property only contains the API key value for the following cases: create an API key, update + // a service ID API key that stores the API key value as retrievable, or get a service ID API key that stores the API + // key value as retrievable. All other operations don't return the API key value, for example all user API key related + // operations, except for create, don't contain the API key value. + Apikey *string `json:"apikey" validate:"required"` + + // History of the API key. + History []EnityHistoryRecord `json:"history,omitempty"` +} + +// UnmarshalAPIKey unmarshals an instance of APIKey from the specified map of raw messages. +func UnmarshalAPIKey(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(APIKey) + err = core.UnmarshalModel(m, "context", &obj.Context, UnmarshalResponseContext) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "entity_tag", &obj.EntityTag) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "locked", &obj.Locked) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_by", &obj.CreatedBy) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "modified_at", &obj.ModifiedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "description", &obj.Description) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "iam_id", &obj.IamID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "account_id", &obj.AccountID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "apikey", &obj.Apikey) + if err != nil { + return + } + err = core.UnmarshalModel(m, "history", &obj.History, UnmarshalEnityHistoryRecord) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// APIKeyInsideCreateServiceIDRequest : Parameters for the API key in the Create service Id V1 REST request. +type APIKeyInsideCreateServiceIDRequest struct { + // Name of the API key. The name is not checked for uniqueness. Therefore multiple names with the same value can exist. + // Access is done via the UUID of the API key. + Name *string `json:"name" validate:"required"` + + // The optional description of the API key. The 'description' property is only available if a description was provided + // during a create of an API key. + Description *string `json:"description,omitempty"` + + // You can optionally passthrough the API key value for this API key. If passed, NO validation of that apiKey value is + // done, i.e. the value can be non-URL safe. If omitted, the API key management will create an URL safe opaque API key + // value. The value of the API key is checked for uniqueness. Please ensure enough variations when passing in this + // value. + Apikey *string `json:"apikey,omitempty"` + + // Send true or false to set whether the API key value is retrievable in the future by using the Get details of an API + // key request. If you create an API key for a user, you must specify `false` or omit the value. We don't allow storing + // of API keys for users. + StoreValue *bool `json:"store_value,omitempty"` +} + +// NewAPIKeyInsideCreateServiceIDRequest : Instantiate APIKeyInsideCreateServiceIDRequest (Generic Model Constructor) +func (*IamIdentityV1) NewAPIKeyInsideCreateServiceIDRequest(name string) (model *APIKeyInsideCreateServiceIDRequest, err error) { + model = &APIKeyInsideCreateServiceIDRequest{ + Name: core.StringPtr(name), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalAPIKeyInsideCreateServiceIDRequest unmarshals an instance of APIKeyInsideCreateServiceIDRequest from the specified map of raw messages. +func UnmarshalAPIKeyInsideCreateServiceIDRequest(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(APIKeyInsideCreateServiceIDRequest) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "description", &obj.Description) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "apikey", &obj.Apikey) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "store_value", &obj.StoreValue) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// APIKeyList : Response body format for the List API keys V1 REST request. +type APIKeyList struct { + // Context with key properties for problem determination. + Context *ResponseContext `json:"context,omitempty"` + + // The offset of the current page. + Offset *int64 `json:"offset,omitempty"` + + // Optional size of a single page. Default is 20 items per page. Valid range is 1 to 100. + Limit *int64 `json:"limit,omitempty"` + + // Link to the first page. + First *string `json:"first,omitempty"` + + // Link to the previous available page. If 'previous' property is not part of the response no previous page is + // available. + Previous *string `json:"previous,omitempty"` + + // Link to the next available page. If 'next' property is not part of the response no next page is available. + Next *string `json:"next,omitempty"` + + // List of API keys based on the query paramters and the page size. The apikeys array is always part of the response + // but might be empty depending on the query parameters values provided. + Apikeys []APIKey `json:"apikeys" validate:"required"` +} + +// UnmarshalAPIKeyList unmarshals an instance of APIKeyList from the specified map of raw messages. +func UnmarshalAPIKeyList(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(APIKeyList) + err = core.UnmarshalModel(m, "context", &obj.Context, UnmarshalResponseContext) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "offset", &obj.Offset) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "limit", &obj.Limit) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "first", &obj.First) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "previous", &obj.Previous) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "next", &obj.Next) + if err != nil { + return + } + err = core.UnmarshalModel(m, "apikeys", &obj.Apikeys, UnmarshalAPIKey) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// CreateAPIKeyOptions : The CreateAPIKey options. +type CreateAPIKeyOptions struct { + // Name of the API key. The name is not checked for uniqueness. Therefore multiple names with the same value can exist. + // Access is done via the UUID of the API key. + Name *string `validate:"required"` + + // The iam_id that this API key authenticates. + IamID *string `validate:"required"` + + // The optional description of the API key. The 'description' property is only available if a description was provided + // during a create of an API key. + Description *string + + // The account ID of the API key. + AccountID *string + + // You can optionally passthrough the API key value for this API key. If passed, NO validation of that apiKey value is + // done, i.e. the value can be non-URL safe. If omitted, the API key management will create an URL safe opaque API key + // value. The value of the API key is checked for uniqueness. Please ensure enough variations when passing in this + // value. + Apikey *string + + // Send true or false to set whether the API key value is retrievable in the future by using the Get details of an API + // key request. If you create an API key for a user, you must specify `false` or omit the value. We don't allow storing + // of API keys for users. + StoreValue *bool + + // Indicates if the API key is locked for further write operations. False by default. + EntityLock *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewCreateAPIKeyOptions : Instantiate CreateAPIKeyOptions +func (*IamIdentityV1) NewCreateAPIKeyOptions(name string, iamID string) *CreateAPIKeyOptions { + return &CreateAPIKeyOptions{ + Name: core.StringPtr(name), + IamID: core.StringPtr(iamID), + } +} + +// SetName : Allow user to set Name +func (options *CreateAPIKeyOptions) SetName(name string) *CreateAPIKeyOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetIamID : Allow user to set IamID +func (options *CreateAPIKeyOptions) SetIamID(iamID string) *CreateAPIKeyOptions { + options.IamID = core.StringPtr(iamID) + return options +} + +// SetDescription : Allow user to set Description +func (options *CreateAPIKeyOptions) SetDescription(description string) *CreateAPIKeyOptions { + options.Description = core.StringPtr(description) + return options +} + +// SetAccountID : Allow user to set AccountID +func (options *CreateAPIKeyOptions) SetAccountID(accountID string) *CreateAPIKeyOptions { + options.AccountID = core.StringPtr(accountID) + return options +} + +// SetApikey : Allow user to set Apikey +func (options *CreateAPIKeyOptions) SetApikey(apikey string) *CreateAPIKeyOptions { + options.Apikey = core.StringPtr(apikey) + return options +} + +// SetStoreValue : Allow user to set StoreValue +func (options *CreateAPIKeyOptions) SetStoreValue(storeValue bool) *CreateAPIKeyOptions { + options.StoreValue = core.BoolPtr(storeValue) + return options +} + +// SetEntityLock : Allow user to set EntityLock +func (options *CreateAPIKeyOptions) SetEntityLock(entityLock string) *CreateAPIKeyOptions { + options.EntityLock = core.StringPtr(entityLock) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateAPIKeyOptions) SetHeaders(param map[string]string) *CreateAPIKeyOptions { + options.Headers = param + return options +} + +// CreateServiceIDOptions : The CreateServiceID options. +type CreateServiceIDOptions struct { + // ID of the account the service ID belongs to. + AccountID *string `validate:"required"` + + // Name of the Service Id. The name is not checked for uniqueness. Therefore multiple names with the same value can + // exist. Access is done via the UUID of the Service Id. + Name *string `validate:"required"` + + // The optional description of the Service Id. The 'description' property is only available if a description was + // provided during a create of a Service Id. + Description *string + + // Optional list of CRNs (string array) which point to the services connected to the service ID. + UniqueInstanceCrns []string + + // Parameters for the API key in the Create service Id V1 REST request. + Apikey *APIKeyInsideCreateServiceIDRequest + + // Indicates if the service ID is locked for further write operations. False by default. + EntityLock *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewCreateServiceIDOptions : Instantiate CreateServiceIDOptions +func (*IamIdentityV1) NewCreateServiceIDOptions(accountID string, name string) *CreateServiceIDOptions { + return &CreateServiceIDOptions{ + AccountID: core.StringPtr(accountID), + Name: core.StringPtr(name), + } +} + +// SetAccountID : Allow user to set AccountID +func (options *CreateServiceIDOptions) SetAccountID(accountID string) *CreateServiceIDOptions { + options.AccountID = core.StringPtr(accountID) + return options +} + +// SetName : Allow user to set Name +func (options *CreateServiceIDOptions) SetName(name string) *CreateServiceIDOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetDescription : Allow user to set Description +func (options *CreateServiceIDOptions) SetDescription(description string) *CreateServiceIDOptions { + options.Description = core.StringPtr(description) + return options +} + +// SetUniqueInstanceCrns : Allow user to set UniqueInstanceCrns +func (options *CreateServiceIDOptions) SetUniqueInstanceCrns(uniqueInstanceCrns []string) *CreateServiceIDOptions { + options.UniqueInstanceCrns = uniqueInstanceCrns + return options +} + +// SetApikey : Allow user to set Apikey +func (options *CreateServiceIDOptions) SetApikey(apikey *APIKeyInsideCreateServiceIDRequest) *CreateServiceIDOptions { + options.Apikey = apikey + return options +} + +// SetEntityLock : Allow user to set EntityLock +func (options *CreateServiceIDOptions) SetEntityLock(entityLock string) *CreateServiceIDOptions { + options.EntityLock = core.StringPtr(entityLock) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateServiceIDOptions) SetHeaders(param map[string]string) *CreateServiceIDOptions { + options.Headers = param + return options +} + +// DeleteAPIKeyOptions : The DeleteAPIKey options. +type DeleteAPIKeyOptions struct { + // Unique ID of the API key. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteAPIKeyOptions : Instantiate DeleteAPIKeyOptions +func (*IamIdentityV1) NewDeleteAPIKeyOptions(id string) *DeleteAPIKeyOptions { + return &DeleteAPIKeyOptions{ + ID: core.StringPtr(id), + } +} + +// SetID : Allow user to set ID +func (options *DeleteAPIKeyOptions) SetID(id string) *DeleteAPIKeyOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteAPIKeyOptions) SetHeaders(param map[string]string) *DeleteAPIKeyOptions { + options.Headers = param + return options +} + +// DeleteServiceIDOptions : The DeleteServiceID options. +type DeleteServiceIDOptions struct { + // Unique ID of the service ID. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteServiceIDOptions : Instantiate DeleteServiceIDOptions +func (*IamIdentityV1) NewDeleteServiceIDOptions(id string) *DeleteServiceIDOptions { + return &DeleteServiceIDOptions{ + ID: core.StringPtr(id), + } +} + +// SetID : Allow user to set ID +func (options *DeleteServiceIDOptions) SetID(id string) *DeleteServiceIDOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteServiceIDOptions) SetHeaders(param map[string]string) *DeleteServiceIDOptions { + options.Headers = param + return options +} + +// EnityHistoryRecord : Response body format for an entity history record. +type EnityHistoryRecord struct { + // Timestamp when the action was triggered. + Timestamp *string `json:"timestamp" validate:"required"` + + // IAM ID of the identity which triggered the action. + IamID *string `json:"iam_id" validate:"required"` + + // Account of the identity which triggered the action. + IamIDAccount *string `json:"iam_id_account" validate:"required"` + + // Action of the history entry. + Action *string `json:"action" validate:"required"` + + // Params of the history entry. + Params []string `json:"params" validate:"required"` + + // Message which summarizes the executed action. + Message *string `json:"message" validate:"required"` +} + +// UnmarshalEnityHistoryRecord unmarshals an instance of EnityHistoryRecord from the specified map of raw messages. +func UnmarshalEnityHistoryRecord(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(EnityHistoryRecord) + err = core.UnmarshalPrimitive(m, "timestamp", &obj.Timestamp) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "iam_id", &obj.IamID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "iam_id_account", &obj.IamIDAccount) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "action", &obj.Action) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "params", &obj.Params) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "message", &obj.Message) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// GetAccountSettingsOptions : The GetAccountSettings options. +type GetAccountSettingsOptions struct { + // Unique ID of the account. + AccountID *string `validate:"required,ne="` + + // Defines if the entity history is included in the response. + IncludeHistory *bool + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetAccountSettingsOptions : Instantiate GetAccountSettingsOptions +func (*IamIdentityV1) NewGetAccountSettingsOptions(accountID string) *GetAccountSettingsOptions { + return &GetAccountSettingsOptions{ + AccountID: core.StringPtr(accountID), + } +} + +// SetAccountID : Allow user to set AccountID +func (options *GetAccountSettingsOptions) SetAccountID(accountID string) *GetAccountSettingsOptions { + options.AccountID = core.StringPtr(accountID) + return options +} + +// SetIncludeHistory : Allow user to set IncludeHistory +func (options *GetAccountSettingsOptions) SetIncludeHistory(includeHistory bool) *GetAccountSettingsOptions { + options.IncludeHistory = core.BoolPtr(includeHistory) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetAccountSettingsOptions) SetHeaders(param map[string]string) *GetAccountSettingsOptions { + options.Headers = param + return options +} + +// GetAPIKeyOptions : The GetAPIKey options. +type GetAPIKeyOptions struct { + // Unique ID of the API key. + ID *string `validate:"required,ne="` + + // Defines if the entity history is included in the response. + IncludeHistory *bool + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetAPIKeyOptions : Instantiate GetAPIKeyOptions +func (*IamIdentityV1) NewGetAPIKeyOptions(id string) *GetAPIKeyOptions { + return &GetAPIKeyOptions{ + ID: core.StringPtr(id), + } +} + +// SetID : Allow user to set ID +func (options *GetAPIKeyOptions) SetID(id string) *GetAPIKeyOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetIncludeHistory : Allow user to set IncludeHistory +func (options *GetAPIKeyOptions) SetIncludeHistory(includeHistory bool) *GetAPIKeyOptions { + options.IncludeHistory = core.BoolPtr(includeHistory) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetAPIKeyOptions) SetHeaders(param map[string]string) *GetAPIKeyOptions { + options.Headers = param + return options +} + +// GetAPIKeysDetailsOptions : The GetAPIKeysDetails options. +type GetAPIKeysDetailsOptions struct { + // API key value. + IamAPIKey *string + + // Defines if the entity history is included in the response. + IncludeHistory *bool + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetAPIKeysDetailsOptions : Instantiate GetAPIKeysDetailsOptions +func (*IamIdentityV1) NewGetAPIKeysDetailsOptions() *GetAPIKeysDetailsOptions { + return &GetAPIKeysDetailsOptions{} +} + +// SetIamAPIKey : Allow user to set IamAPIKey +func (options *GetAPIKeysDetailsOptions) SetIamAPIKey(iamAPIKey string) *GetAPIKeysDetailsOptions { + options.IamAPIKey = core.StringPtr(iamAPIKey) + return options +} + +// SetIncludeHistory : Allow user to set IncludeHistory +func (options *GetAPIKeysDetailsOptions) SetIncludeHistory(includeHistory bool) *GetAPIKeysDetailsOptions { + options.IncludeHistory = core.BoolPtr(includeHistory) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetAPIKeysDetailsOptions) SetHeaders(param map[string]string) *GetAPIKeysDetailsOptions { + options.Headers = param + return options +} + +// GetServiceIDOptions : The GetServiceID options. +type GetServiceIDOptions struct { + // Unique ID of the service ID. + ID *string `validate:"required,ne="` + + // Defines if the entity history is included in the response. + IncludeHistory *bool + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetServiceIDOptions : Instantiate GetServiceIDOptions +func (*IamIdentityV1) NewGetServiceIDOptions(id string) *GetServiceIDOptions { + return &GetServiceIDOptions{ + ID: core.StringPtr(id), + } +} + +// SetID : Allow user to set ID +func (options *GetServiceIDOptions) SetID(id string) *GetServiceIDOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetIncludeHistory : Allow user to set IncludeHistory +func (options *GetServiceIDOptions) SetIncludeHistory(includeHistory bool) *GetServiceIDOptions { + options.IncludeHistory = core.BoolPtr(includeHistory) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetServiceIDOptions) SetHeaders(param map[string]string) *GetServiceIDOptions { + options.Headers = param + return options +} + +// ListAPIKeysOptions : The ListAPIKeys options. +type ListAPIKeysOptions struct { + // Account ID of the API keys(s) to query. If a service IAM ID is specified in iam_id then account_id must match the + // account of the IAM ID. If a user IAM ID is specified in iam_id then then account_id must match the account of the + // Authorization token. + AccountID *string + + // IAM ID of the API key(s) to be queried. The IAM ID may be that of a user or a service. For a user IAM ID iam_id must + // match the Authorization token. + IamID *string + + // Optional size of a single page. Default is 20 items per page. Valid range is 1 to 100. + Pagesize *int64 + + // Optional Prev or Next page token returned from a previous query execution. Default is start with first page. + Pagetoken *string + + // Optional parameter to define the scope of the queried API Keys. Can be 'entity' (default) or 'account'. + Scope *string + + // Optional parameter to filter the type of the queried API Keys. Can be 'user' or 'serviceid'. + Type *string + + // Optional sort property, valid values are name, description, created_at and created_by. If specified, the items are + // sorted by the value of this property. + Sort *string + + // Optional sort order, valid values are asc and desc. Default: asc. + Order *string + + // Defines if the entity history is included in the response. + IncludeHistory *bool + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the ListAPIKeysOptions.Scope property. +// Optional parameter to define the scope of the queried API Keys. Can be 'entity' (default) or 'account'. +const ( + ListAPIKeysOptionsScopeAccountConst = "account" + ListAPIKeysOptionsScopeEntityConst = "entity" +) + +// Constants associated with the ListAPIKeysOptions.Type property. +// Optional parameter to filter the type of the queried API Keys. Can be 'user' or 'serviceid'. +const ( + ListAPIKeysOptionsTypeServiceidConst = "serviceid" + ListAPIKeysOptionsTypeUserConst = "user" +) + +// Constants associated with the ListAPIKeysOptions.Order property. +// Optional sort order, valid values are asc and desc. Default: asc. +const ( + ListAPIKeysOptionsOrderAscConst = "asc" + ListAPIKeysOptionsOrderDescConst = "desc" +) + +// NewListAPIKeysOptions : Instantiate ListAPIKeysOptions +func (*IamIdentityV1) NewListAPIKeysOptions() *ListAPIKeysOptions { + return &ListAPIKeysOptions{} +} + +// SetAccountID : Allow user to set AccountID +func (options *ListAPIKeysOptions) SetAccountID(accountID string) *ListAPIKeysOptions { + options.AccountID = core.StringPtr(accountID) + return options +} + +// SetIamID : Allow user to set IamID +func (options *ListAPIKeysOptions) SetIamID(iamID string) *ListAPIKeysOptions { + options.IamID = core.StringPtr(iamID) + return options +} + +// SetPagesize : Allow user to set Pagesize +func (options *ListAPIKeysOptions) SetPagesize(pagesize int64) *ListAPIKeysOptions { + options.Pagesize = core.Int64Ptr(pagesize) + return options +} + +// SetPagetoken : Allow user to set Pagetoken +func (options *ListAPIKeysOptions) SetPagetoken(pagetoken string) *ListAPIKeysOptions { + options.Pagetoken = core.StringPtr(pagetoken) + return options +} + +// SetScope : Allow user to set Scope +func (options *ListAPIKeysOptions) SetScope(scope string) *ListAPIKeysOptions { + options.Scope = core.StringPtr(scope) + return options +} + +// SetType : Allow user to set Type +func (options *ListAPIKeysOptions) SetType(typeVar string) *ListAPIKeysOptions { + options.Type = core.StringPtr(typeVar) + return options +} + +// SetSort : Allow user to set Sort +func (options *ListAPIKeysOptions) SetSort(sort string) *ListAPIKeysOptions { + options.Sort = core.StringPtr(sort) + return options +} + +// SetOrder : Allow user to set Order +func (options *ListAPIKeysOptions) SetOrder(order string) *ListAPIKeysOptions { + options.Order = core.StringPtr(order) + return options +} + +// SetIncludeHistory : Allow user to set IncludeHistory +func (options *ListAPIKeysOptions) SetIncludeHistory(includeHistory bool) *ListAPIKeysOptions { + options.IncludeHistory = core.BoolPtr(includeHistory) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListAPIKeysOptions) SetHeaders(param map[string]string) *ListAPIKeysOptions { + options.Headers = param + return options +} + +// ListServiceIdsOptions : The ListServiceIds options. +type ListServiceIdsOptions struct { + // Account ID of the service ID(s) to query. This parameter is required (unless using a pagetoken). + AccountID *string + + // Name of the service ID(s) to query. Optional.20 items per page. Valid range is 1 to 100. + Name *string + + // Optional size of a single page. Default is 20 items per page. Valid range is 1 to 100. + Pagesize *int64 + + // Optional Prev or Next page token returned from a previous query execution. Default is start with first page. + Pagetoken *string + + // Optional sort property, valid values are name, description, created_at and modified_at. If specified, the items are + // sorted by the value of this property. + Sort *string + + // Optional sort order, valid values are asc and desc. Default: asc. + Order *string + + // Defines if the entity history is included in the response. + IncludeHistory *bool + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the ListServiceIdsOptions.Order property. +// Optional sort order, valid values are asc and desc. Default: asc. +const ( + ListServiceIdsOptionsOrderAscConst = "asc" + ListServiceIdsOptionsOrderDescConst = "desc" +) + +// NewListServiceIdsOptions : Instantiate ListServiceIdsOptions +func (*IamIdentityV1) NewListServiceIdsOptions() *ListServiceIdsOptions { + return &ListServiceIdsOptions{} +} + +// SetAccountID : Allow user to set AccountID +func (options *ListServiceIdsOptions) SetAccountID(accountID string) *ListServiceIdsOptions { + options.AccountID = core.StringPtr(accountID) + return options +} + +// SetName : Allow user to set Name +func (options *ListServiceIdsOptions) SetName(name string) *ListServiceIdsOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetPagesize : Allow user to set Pagesize +func (options *ListServiceIdsOptions) SetPagesize(pagesize int64) *ListServiceIdsOptions { + options.Pagesize = core.Int64Ptr(pagesize) + return options +} + +// SetPagetoken : Allow user to set Pagetoken +func (options *ListServiceIdsOptions) SetPagetoken(pagetoken string) *ListServiceIdsOptions { + options.Pagetoken = core.StringPtr(pagetoken) + return options +} + +// SetSort : Allow user to set Sort +func (options *ListServiceIdsOptions) SetSort(sort string) *ListServiceIdsOptions { + options.Sort = core.StringPtr(sort) + return options +} + +// SetOrder : Allow user to set Order +func (options *ListServiceIdsOptions) SetOrder(order string) *ListServiceIdsOptions { + options.Order = core.StringPtr(order) + return options +} + +// SetIncludeHistory : Allow user to set IncludeHistory +func (options *ListServiceIdsOptions) SetIncludeHistory(includeHistory bool) *ListServiceIdsOptions { + options.IncludeHistory = core.BoolPtr(includeHistory) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListServiceIdsOptions) SetHeaders(param map[string]string) *ListServiceIdsOptions { + options.Headers = param + return options +} + +// LockAPIKeyOptions : The LockAPIKey options. +type LockAPIKeyOptions struct { + // Unique ID of the API key. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewLockAPIKeyOptions : Instantiate LockAPIKeyOptions +func (*IamIdentityV1) NewLockAPIKeyOptions(id string) *LockAPIKeyOptions { + return &LockAPIKeyOptions{ + ID: core.StringPtr(id), + } +} + +// SetID : Allow user to set ID +func (options *LockAPIKeyOptions) SetID(id string) *LockAPIKeyOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *LockAPIKeyOptions) SetHeaders(param map[string]string) *LockAPIKeyOptions { + options.Headers = param + return options +} + +// LockServiceIDOptions : The LockServiceID options. +type LockServiceIDOptions struct { + // Unique ID of the service ID. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewLockServiceIDOptions : Instantiate LockServiceIDOptions +func (*IamIdentityV1) NewLockServiceIDOptions(id string) *LockServiceIDOptions { + return &LockServiceIDOptions{ + ID: core.StringPtr(id), + } +} + +// SetID : Allow user to set ID +func (options *LockServiceIDOptions) SetID(id string) *LockServiceIDOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *LockServiceIDOptions) SetHeaders(param map[string]string) *LockServiceIDOptions { + options.Headers = param + return options +} + +// ResponseContext : Context with key properties for problem determination. +type ResponseContext struct { + // The transaction ID of the inbound REST request. + TransactionID *string `json:"transaction_id,omitempty"` + + // The operation of the inbound REST request. + Operation *string `json:"operation,omitempty"` + + // The user agent of the inbound REST request. + UserAgent *string `json:"user_agent,omitempty"` + + // The URL of that cluster. + URL *string `json:"url,omitempty"` + + // The instance ID of the server instance processing the request. + InstanceID *string `json:"instance_id,omitempty"` + + // The thread ID of the server instance processing the request. + ThreadID *string `json:"thread_id,omitempty"` + + // The host of the server instance processing the request. + Host *string `json:"host,omitempty"` + + // The start time of the request. + StartTime *string `json:"start_time,omitempty"` + + // The finish time of the request. + EndTime *string `json:"end_time,omitempty"` + + // The elapsed time in msec. + ElapsedTime *string `json:"elapsed_time,omitempty"` + + // The cluster name. + ClusterName *string `json:"cluster_name,omitempty"` +} + +// UnmarshalResponseContext unmarshals an instance of ResponseContext from the specified map of raw messages. +func UnmarshalResponseContext(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ResponseContext) + err = core.UnmarshalPrimitive(m, "transaction_id", &obj.TransactionID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "operation", &obj.Operation) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "user_agent", &obj.UserAgent) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "url", &obj.URL) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "instance_id", &obj.InstanceID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "thread_id", &obj.ThreadID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "host", &obj.Host) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "start_time", &obj.StartTime) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "end_time", &obj.EndTime) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "elapsed_time", &obj.ElapsedTime) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "cluster_name", &obj.ClusterName) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ServiceID : Response body format for service ID V1 REST requests. +type ServiceID struct { + // Context with key properties for problem determination. + Context *ResponseContext `json:"context,omitempty"` + + // Unique identifier of this Service Id. + ID *string `json:"id" validate:"required"` + + // Cloud wide identifier for identities of this service ID. + IamID *string `json:"iam_id" validate:"required"` + + // Version of the service ID details object. You need to specify this value when updating the service ID to avoid stale + // updates. + EntityTag *string `json:"entity_tag,omitempty"` + + // Cloud Resource Name of the item. Example Cloud Resource Name: + // 'crn:v1:bluemix:public:iam-identity:us-south:a/myaccount::serviceid:1234-5678-9012'. + CRN *string `json:"crn" validate:"required"` + + // The service ID cannot be changed if set to true. + Locked *bool `json:"locked" validate:"required"` + + // If set contains a date time string of the creation date in ISO format. + CreatedAt *strfmt.DateTime `json:"created_at,omitempty"` + + // If set contains a date time string of the last modification date in ISO format. + ModifiedAt *strfmt.DateTime `json:"modified_at,omitempty"` + + // ID of the account the service ID belongs to. + AccountID *string `json:"account_id" validate:"required"` + + // Name of the Service Id. The name is not checked for uniqueness. Therefore multiple names with the same value can + // exist. Access is done via the UUID of the Service Id. + Name *string `json:"name" validate:"required"` + + // The optional description of the Service Id. The 'description' property is only available if a description was + // provided during a create of a Service Id. + Description *string `json:"description,omitempty"` + + // Optional list of CRNs (string array) which point to the services connected to the service ID. + UniqueInstanceCrns []string `json:"unique_instance_crns,omitempty"` + + // History of the Service ID. + History []EnityHistoryRecord `json:"history,omitempty"` + + // Response body format for API key V1 REST requests. + Apikey *APIKey `json:"apikey" validate:"required"` +} + +// UnmarshalServiceID unmarshals an instance of ServiceID from the specified map of raw messages. +func UnmarshalServiceID(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ServiceID) + err = core.UnmarshalModel(m, "context", &obj.Context, UnmarshalResponseContext) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "iam_id", &obj.IamID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "entity_tag", &obj.EntityTag) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "locked", &obj.Locked) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "modified_at", &obj.ModifiedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "account_id", &obj.AccountID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "description", &obj.Description) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "unique_instance_crns", &obj.UniqueInstanceCrns) + if err != nil { + return + } + err = core.UnmarshalModel(m, "history", &obj.History, UnmarshalEnityHistoryRecord) + if err != nil { + return + } + err = core.UnmarshalModel(m, "apikey", &obj.Apikey, UnmarshalAPIKey) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ServiceIDList : Response body format for the list service ID V1 REST request. +type ServiceIDList struct { + // Context with key properties for problem determination. + Context *ResponseContext `json:"context,omitempty"` + + // The offset of the current page. + Offset *int64 `json:"offset,omitempty"` + + // Optional size of a single page. Default is 20 items per page. Valid range is 1 to 100. + Limit *int64 `json:"limit,omitempty"` + + // Link to the first page. + First *string `json:"first,omitempty"` + + // Link to the previous available page. If 'previous' property is not part of the response no previous page is + // available. + Previous *string `json:"previous,omitempty"` + + // Link to the next available page. If 'next' property is not part of the response no next page is available. + Next *string `json:"next,omitempty"` + + // List of service IDs based on the query paramters and the page size. The service IDs array is always part of the + // response but might be empty depending on the query parameter values provided. + Serviceids []ServiceID `json:"serviceids" validate:"required"` +} + +// UnmarshalServiceIDList unmarshals an instance of ServiceIDList from the specified map of raw messages. +func UnmarshalServiceIDList(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ServiceIDList) + err = core.UnmarshalModel(m, "context", &obj.Context, UnmarshalResponseContext) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "offset", &obj.Offset) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "limit", &obj.Limit) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "first", &obj.First) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "previous", &obj.Previous) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "next", &obj.Next) + if err != nil { + return + } + err = core.UnmarshalModel(m, "serviceids", &obj.Serviceids, UnmarshalServiceID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// UnlockAPIKeyOptions : The UnlockAPIKey options. +type UnlockAPIKeyOptions struct { + // Unique ID of the API key. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUnlockAPIKeyOptions : Instantiate UnlockAPIKeyOptions +func (*IamIdentityV1) NewUnlockAPIKeyOptions(id string) *UnlockAPIKeyOptions { + return &UnlockAPIKeyOptions{ + ID: core.StringPtr(id), + } +} + +// SetID : Allow user to set ID +func (options *UnlockAPIKeyOptions) SetID(id string) *UnlockAPIKeyOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UnlockAPIKeyOptions) SetHeaders(param map[string]string) *UnlockAPIKeyOptions { + options.Headers = param + return options +} + +// UnlockServiceIDOptions : The UnlockServiceID options. +type UnlockServiceIDOptions struct { + // Unique ID of the service ID. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUnlockServiceIDOptions : Instantiate UnlockServiceIDOptions +func (*IamIdentityV1) NewUnlockServiceIDOptions(id string) *UnlockServiceIDOptions { + return &UnlockServiceIDOptions{ + ID: core.StringPtr(id), + } +} + +// SetID : Allow user to set ID +func (options *UnlockServiceIDOptions) SetID(id string) *UnlockServiceIDOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UnlockServiceIDOptions) SetHeaders(param map[string]string) *UnlockServiceIDOptions { + options.Headers = param + return options +} + +// UpdateAccountSettingsOptions : The UpdateAccountSettings options. +type UpdateAccountSettingsOptions struct { + // Version of the account settings to be updated. Specify the version that you retrieved as entity_tag (ETag header) + // when reading the account. This value helps identifying parallel usage of this API. Pass * to indicate to update any + // version available. This might result in stale updates. + IfMatch *string `validate:"required"` + + // The id of the account to update the settings for. + AccountID *string `validate:"required,ne="` + + // Defines whether or not creating a Service Id is access controlled. Valid values: + // * RESTRICTED - to apply access control + // * NOT_RESTRICTED - to remove access control + // * NOT_SET - to unset a previously set value. + RestrictCreateServiceID *string + + // Defines whether or not creating platform API keys is access controlled. Valid values: + // * RESTRICTED - to apply access control + // * NOT_RESTRICTED - to remove access control + // * NOT_SET - to 'unset' a previous set value. + RestrictCreatePlatformApikey *string + + // Defines the IP addresses and subnets from which IAM tokens can be created for the account. + AllowedIPAddresses *string + + // Defines the MFA trait for the account. Valid values: + // * NONE - No MFA trait set + // * TOTP - For all non-federated IBMId users + // * TOTP4ALL - For all users + // * LEVEL1 - Email-based MFA for all users + // * LEVEL2 - TOTP-based MFA for all users + // * LEVEL3 - U2F MFA for all users. + Mfa *string + + // Defines the session expiration in seconds for the account. Valid values: + // * Any whole number between between '900' and '86400' + // * NOT_SET - To unset account setting and use service default. + SessionExpirationInSeconds *string + + // Defines the period of time in seconds in which a session will be invalidated due to inactivity. Valid values: + // * Any whole number between '900' and '7200' + // * NOT_SET - To unset account setting and use service default. + SessionInvalidationInSeconds *string + + // Defines the max allowed sessions per identity required by the account. Value values: + // * Any whole number greater than 0 + // * NOT_SET - To unset account setting and use service default. + MaxSessionsPerIdentity *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the UpdateAccountSettingsOptions.RestrictCreateServiceID property. +// Defines whether or not creating a Service Id is access controlled. Valid values: +// * RESTRICTED - to apply access control +// * NOT_RESTRICTED - to remove access control +// * NOT_SET - to unset a previously set value. +const ( + UpdateAccountSettingsOptionsRestrictCreateServiceIDNotRestrictedConst = "NOT_RESTRICTED" + UpdateAccountSettingsOptionsRestrictCreateServiceIDNotSetConst = "NOT_SET" + UpdateAccountSettingsOptionsRestrictCreateServiceIDRestrictedConst = "RESTRICTED" +) + +// Constants associated with the UpdateAccountSettingsOptions.RestrictCreatePlatformApikey property. +// Defines whether or not creating platform API keys is access controlled. Valid values: +// * RESTRICTED - to apply access control +// * NOT_RESTRICTED - to remove access control +// * NOT_SET - to 'unset' a previous set value. +const ( + UpdateAccountSettingsOptionsRestrictCreatePlatformApikeyNotRestrictedConst = "NOT_RESTRICTED" + UpdateAccountSettingsOptionsRestrictCreatePlatformApikeyNotSetConst = "NOT_SET" + UpdateAccountSettingsOptionsRestrictCreatePlatformApikeyRestrictedConst = "RESTRICTED" +) + +// Constants associated with the UpdateAccountSettingsOptions.Mfa property. +// Defines the MFA trait for the account. Valid values: +// * NONE - No MFA trait set +// * TOTP - For all non-federated IBMId users +// * TOTP4ALL - For all users +// * LEVEL1 - Email-based MFA for all users +// * LEVEL2 - TOTP-based MFA for all users +// * LEVEL3 - U2F MFA for all users. +const ( + UpdateAccountSettingsOptionsMfaLevel1Const = "LEVEL1" + UpdateAccountSettingsOptionsMfaLevel2Const = "LEVEL2" + UpdateAccountSettingsOptionsMfaLevel3Const = "LEVEL3" + UpdateAccountSettingsOptionsMfaNoneConst = "NONE" + UpdateAccountSettingsOptionsMfaTotpConst = "TOTP" + UpdateAccountSettingsOptionsMfaTotp4allConst = "TOTP4ALL" +) + +// NewUpdateAccountSettingsOptions : Instantiate UpdateAccountSettingsOptions +func (*IamIdentityV1) NewUpdateAccountSettingsOptions(ifMatch string, accountID string) *UpdateAccountSettingsOptions { + return &UpdateAccountSettingsOptions{ + IfMatch: core.StringPtr(ifMatch), + AccountID: core.StringPtr(accountID), + } +} + +// SetIfMatch : Allow user to set IfMatch +func (options *UpdateAccountSettingsOptions) SetIfMatch(ifMatch string) *UpdateAccountSettingsOptions { + options.IfMatch = core.StringPtr(ifMatch) + return options +} + +// SetAccountID : Allow user to set AccountID +func (options *UpdateAccountSettingsOptions) SetAccountID(accountID string) *UpdateAccountSettingsOptions { + options.AccountID = core.StringPtr(accountID) + return options +} + +// SetRestrictCreateServiceID : Allow user to set RestrictCreateServiceID +func (options *UpdateAccountSettingsOptions) SetRestrictCreateServiceID(restrictCreateServiceID string) *UpdateAccountSettingsOptions { + options.RestrictCreateServiceID = core.StringPtr(restrictCreateServiceID) + return options +} + +// SetRestrictCreatePlatformApikey : Allow user to set RestrictCreatePlatformApikey +func (options *UpdateAccountSettingsOptions) SetRestrictCreatePlatformApikey(restrictCreatePlatformApikey string) *UpdateAccountSettingsOptions { + options.RestrictCreatePlatformApikey = core.StringPtr(restrictCreatePlatformApikey) + return options +} + +// SetAllowedIPAddresses : Allow user to set AllowedIPAddresses +func (options *UpdateAccountSettingsOptions) SetAllowedIPAddresses(allowedIPAddresses string) *UpdateAccountSettingsOptions { + options.AllowedIPAddresses = core.StringPtr(allowedIPAddresses) + return options +} + +// SetMfa : Allow user to set Mfa +func (options *UpdateAccountSettingsOptions) SetMfa(mfa string) *UpdateAccountSettingsOptions { + options.Mfa = core.StringPtr(mfa) + return options +} + +// SetSessionExpirationInSeconds : Allow user to set SessionExpirationInSeconds +func (options *UpdateAccountSettingsOptions) SetSessionExpirationInSeconds(sessionExpirationInSeconds string) *UpdateAccountSettingsOptions { + options.SessionExpirationInSeconds = core.StringPtr(sessionExpirationInSeconds) + return options +} + +// SetSessionInvalidationInSeconds : Allow user to set SessionInvalidationInSeconds +func (options *UpdateAccountSettingsOptions) SetSessionInvalidationInSeconds(sessionInvalidationInSeconds string) *UpdateAccountSettingsOptions { + options.SessionInvalidationInSeconds = core.StringPtr(sessionInvalidationInSeconds) + return options +} + +// SetMaxSessionsPerIdentity : Allow user to set MaxSessionsPerIdentity +func (options *UpdateAccountSettingsOptions) SetMaxSessionsPerIdentity(maxSessionsPerIdentity string) *UpdateAccountSettingsOptions { + options.MaxSessionsPerIdentity = core.StringPtr(maxSessionsPerIdentity) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateAccountSettingsOptions) SetHeaders(param map[string]string) *UpdateAccountSettingsOptions { + options.Headers = param + return options +} + +// UpdateAPIKeyOptions : The UpdateAPIKey options. +type UpdateAPIKeyOptions struct { + // Unique ID of the API key to be updated. + ID *string `validate:"required,ne="` + + // Version of the API key to be updated. Specify the version that you retrieved when reading the API key. This value + // helps identifying parallel usage of this API. Pass * to indicate to update any version available. This might result + // in stale updates. + IfMatch *string `validate:"required"` + + // The name of the API key to update. If specified in the request the parameter must not be empty. The name is not + // checked for uniqueness. Failure to this will result in an Error condition. + Name *string + + // The description of the API key to update. If specified an empty description will clear the description of the API + // key. If a non empty value is provided the API key will be updated. + Description *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdateAPIKeyOptions : Instantiate UpdateAPIKeyOptions +func (*IamIdentityV1) NewUpdateAPIKeyOptions(id string, ifMatch string) *UpdateAPIKeyOptions { + return &UpdateAPIKeyOptions{ + ID: core.StringPtr(id), + IfMatch: core.StringPtr(ifMatch), + } +} + +// SetID : Allow user to set ID +func (options *UpdateAPIKeyOptions) SetID(id string) *UpdateAPIKeyOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetIfMatch : Allow user to set IfMatch +func (options *UpdateAPIKeyOptions) SetIfMatch(ifMatch string) *UpdateAPIKeyOptions { + options.IfMatch = core.StringPtr(ifMatch) + return options +} + +// SetName : Allow user to set Name +func (options *UpdateAPIKeyOptions) SetName(name string) *UpdateAPIKeyOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetDescription : Allow user to set Description +func (options *UpdateAPIKeyOptions) SetDescription(description string) *UpdateAPIKeyOptions { + options.Description = core.StringPtr(description) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateAPIKeyOptions) SetHeaders(param map[string]string) *UpdateAPIKeyOptions { + options.Headers = param + return options +} + +// UpdateServiceIDOptions : The UpdateServiceID options. +type UpdateServiceIDOptions struct { + // Unique ID of the service ID to be updated. + ID *string `validate:"required,ne="` + + // Version of the service ID to be updated. Specify the version that you retrieved as entity_tag (ETag header) when + // reading the service ID. This value helps identifying parallel usage of this API. Pass * to indicate to update any + // version available. This might result in stale updates. + IfMatch *string `validate:"required"` + + // The name of the service ID to update. If specified in the request the parameter must not be empty. The name is not + // checked for uniqueness. Failure to this will result in an Error condition. + Name *string + + // The description of the service ID to update. If specified an empty description will clear the description of the + // service ID. If an non empty value is provided the service ID will be updated. + Description *string + + // List of CRNs which point to the services connected to this service ID. If specified an empty list will clear all + // existing unique instance crns of the service ID. + UniqueInstanceCrns []string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdateServiceIDOptions : Instantiate UpdateServiceIDOptions +func (*IamIdentityV1) NewUpdateServiceIDOptions(id string, ifMatch string) *UpdateServiceIDOptions { + return &UpdateServiceIDOptions{ + ID: core.StringPtr(id), + IfMatch: core.StringPtr(ifMatch), + } +} + +// SetID : Allow user to set ID +func (options *UpdateServiceIDOptions) SetID(id string) *UpdateServiceIDOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetIfMatch : Allow user to set IfMatch +func (options *UpdateServiceIDOptions) SetIfMatch(ifMatch string) *UpdateServiceIDOptions { + options.IfMatch = core.StringPtr(ifMatch) + return options +} + +// SetName : Allow user to set Name +func (options *UpdateServiceIDOptions) SetName(name string) *UpdateServiceIDOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetDescription : Allow user to set Description +func (options *UpdateServiceIDOptions) SetDescription(description string) *UpdateServiceIDOptions { + options.Description = core.StringPtr(description) + return options +} + +// SetUniqueInstanceCrns : Allow user to set UniqueInstanceCrns +func (options *UpdateServiceIDOptions) SetUniqueInstanceCrns(uniqueInstanceCrns []string) *UpdateServiceIDOptions { + options.UniqueInstanceCrns = uniqueInstanceCrns + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateServiceIDOptions) SetHeaders(param map[string]string) *UpdateServiceIDOptions { + options.Headers = param + return options +} diff --git a/vendor/github.com/IBM/platform-services-go-sdk/iampolicymanagementv1/iam_policy_management_v1.go b/vendor/github.com/IBM/platform-services-go-sdk/iampolicymanagementv1/iam_policy_management_v1.go new file mode 100644 index 00000000000..9a61804f737 --- /dev/null +++ b/vendor/github.com/IBM/platform-services-go-sdk/iampolicymanagementv1/iam_policy_management_v1.go @@ -0,0 +1,2086 @@ +/** + * (C) Copyright IBM Corp. 2021. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * IBM OpenAPI SDK Code Generator Version: 3.29.1-b338fb38-20210313-010605 + */ + +// Package iampolicymanagementv1 : Operations and models for the IamPolicyManagementV1 service +package iampolicymanagementv1 + +import ( + "context" + "encoding/json" + "fmt" + "github.com/IBM/go-sdk-core/v5/core" + common "github.com/IBM/platform-services-go-sdk/common" + "github.com/go-openapi/strfmt" + "net/http" + "reflect" + "time" +) + +// IamPolicyManagementV1 : IAM Policy Management API +// +// Version: 1.0.1 +type IamPolicyManagementV1 struct { + Service *core.BaseService +} + +// DefaultServiceURL is the default URL to make service requests to. +const DefaultServiceURL = "https://iam.cloud.ibm.com" + +// DefaultServiceName is the default key used to find external configuration information. +const DefaultServiceName = "iam_policy_management" + +// IamPolicyManagementV1Options : Service options +type IamPolicyManagementV1Options struct { + ServiceName string + URL string + Authenticator core.Authenticator +} + +// NewIamPolicyManagementV1UsingExternalConfig : constructs an instance of IamPolicyManagementV1 with passed in options and external configuration. +func NewIamPolicyManagementV1UsingExternalConfig(options *IamPolicyManagementV1Options) (iamPolicyManagement *IamPolicyManagementV1, err error) { + if options.ServiceName == "" { + options.ServiceName = DefaultServiceName + } + + if options.Authenticator == nil { + options.Authenticator, err = core.GetAuthenticatorFromEnvironment(options.ServiceName) + if err != nil { + return + } + } + + iamPolicyManagement, err = NewIamPolicyManagementV1(options) + if err != nil { + return + } + + err = iamPolicyManagement.Service.ConfigureService(options.ServiceName) + if err != nil { + return + } + + if options.URL != "" { + err = iamPolicyManagement.Service.SetServiceURL(options.URL) + } + return +} + +// NewIamPolicyManagementV1 : constructs an instance of IamPolicyManagementV1 with passed in options. +func NewIamPolicyManagementV1(options *IamPolicyManagementV1Options) (service *IamPolicyManagementV1, err error) { + serviceOptions := &core.ServiceOptions{ + URL: DefaultServiceURL, + Authenticator: options.Authenticator, + } + + baseService, err := core.NewBaseService(serviceOptions) + if err != nil { + return + } + + if options.URL != "" { + err = baseService.SetServiceURL(options.URL) + if err != nil { + return + } + } + + service = &IamPolicyManagementV1{ + Service: baseService, + } + + return +} + +// GetServiceURLForRegion returns the service URL to be used for the specified region +func GetServiceURLForRegion(region string) (string, error) { + return "", fmt.Errorf("service does not support regional URLs") +} + +// Clone makes a copy of "iamPolicyManagement" suitable for processing requests. +func (iamPolicyManagement *IamPolicyManagementV1) Clone() *IamPolicyManagementV1 { + if core.IsNil(iamPolicyManagement) { + return nil + } + clone := *iamPolicyManagement + clone.Service = iamPolicyManagement.Service.Clone() + return &clone +} + +// SetServiceURL sets the service URL +func (iamPolicyManagement *IamPolicyManagementV1) SetServiceURL(url string) error { + return iamPolicyManagement.Service.SetServiceURL(url) +} + +// GetServiceURL returns the service URL +func (iamPolicyManagement *IamPolicyManagementV1) GetServiceURL() string { + return iamPolicyManagement.Service.GetServiceURL() +} + +// SetDefaultHeaders sets HTTP headers to be sent in every request +func (iamPolicyManagement *IamPolicyManagementV1) SetDefaultHeaders(headers http.Header) { + iamPolicyManagement.Service.SetDefaultHeaders(headers) +} + +// SetEnableGzipCompression sets the service's EnableGzipCompression field +func (iamPolicyManagement *IamPolicyManagementV1) SetEnableGzipCompression(enableGzip bool) { + iamPolicyManagement.Service.SetEnableGzipCompression(enableGzip) +} + +// GetEnableGzipCompression returns the service's EnableGzipCompression field +func (iamPolicyManagement *IamPolicyManagementV1) GetEnableGzipCompression() bool { + return iamPolicyManagement.Service.GetEnableGzipCompression() +} + +// EnableRetries enables automatic retries for requests invoked for this service instance. +// If either parameter is specified as 0, then a default value is used instead. +func (iamPolicyManagement *IamPolicyManagementV1) EnableRetries(maxRetries int, maxRetryInterval time.Duration) { + iamPolicyManagement.Service.EnableRetries(maxRetries, maxRetryInterval) +} + +// DisableRetries disables automatic retries for requests invoked for this service instance. +func (iamPolicyManagement *IamPolicyManagementV1) DisableRetries() { + iamPolicyManagement.Service.DisableRetries() +} + +// ListPolicies : Get policies by attributes +// Get policies and filter by attributes. While managing policies, you may want to retrieve policies in the account and +// filter by attribute values. This can be done through query parameters. Currently, only the following attributes are +// supported: account_id, iam_id, access_group_id, type, service_type, sort, format and state. account_id is a required +// query parameter. Only policies that have the specified attributes and that the caller has read access to are +// returned. If the caller does not have read access to any policies an empty array is returned. +func (iamPolicyManagement *IamPolicyManagementV1) ListPolicies(listPoliciesOptions *ListPoliciesOptions) (result *PolicyList, response *core.DetailedResponse, err error) { + return iamPolicyManagement.ListPoliciesWithContext(context.Background(), listPoliciesOptions) +} + +// ListPoliciesWithContext is an alternate form of the ListPolicies method which supports a Context parameter +func (iamPolicyManagement *IamPolicyManagementV1) ListPoliciesWithContext(ctx context.Context, listPoliciesOptions *ListPoliciesOptions) (result *PolicyList, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(listPoliciesOptions, "listPoliciesOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(listPoliciesOptions, "listPoliciesOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = iamPolicyManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(iamPolicyManagement.Service.Options.URL, `/v1/policies`, nil) + if err != nil { + return + } + + for headerName, headerValue := range listPoliciesOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("iam_policy_management", "V1", "ListPolicies") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + if listPoliciesOptions.AcceptLanguage != nil { + builder.AddHeader("Accept-Language", fmt.Sprint(*listPoliciesOptions.AcceptLanguage)) + } + + builder.AddQuery("account_id", fmt.Sprint(*listPoliciesOptions.AccountID)) + if listPoliciesOptions.IamID != nil { + builder.AddQuery("iam_id", fmt.Sprint(*listPoliciesOptions.IamID)) + } + if listPoliciesOptions.AccessGroupID != nil { + builder.AddQuery("access_group_id", fmt.Sprint(*listPoliciesOptions.AccessGroupID)) + } + if listPoliciesOptions.Type != nil { + builder.AddQuery("type", fmt.Sprint(*listPoliciesOptions.Type)) + } + if listPoliciesOptions.ServiceType != nil { + builder.AddQuery("service_type", fmt.Sprint(*listPoliciesOptions.ServiceType)) + } + if listPoliciesOptions.TagName != nil { + builder.AddQuery("tag_name", fmt.Sprint(*listPoliciesOptions.TagName)) + } + if listPoliciesOptions.TagValue != nil { + builder.AddQuery("tag_value", fmt.Sprint(*listPoliciesOptions.TagValue)) + } + if listPoliciesOptions.Sort != nil { + builder.AddQuery("sort", fmt.Sprint(*listPoliciesOptions.Sort)) + } + if listPoliciesOptions.Format != nil { + builder.AddQuery("format", fmt.Sprint(*listPoliciesOptions.Format)) + } + if listPoliciesOptions.State != nil { + builder.AddQuery("state", fmt.Sprint(*listPoliciesOptions.State)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = iamPolicyManagement.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalPolicyList) + if err != nil { + return + } + response.Result = result + + return +} + +// CreatePolicy : Create a policy +// Creates a policy to grant access between a subject and a resource. There are two types of policies: **access** and +// **authorization**. A policy administrator might want to create an access policy which grants access to a user, +// service-id, or an access group. They might also want to create an authorization policy and setup access between +// services. +// +// ### Access +// +// To create an access policy, use **`"type": "access"`** in the body. The possible subject attributes are **`iam_id`** +// and **`access_group_id`**. Use the **`iam_id`** subject attribute for assigning access for a user or service-id. Use +// the **`access_group_id`** subject attribute for assigning access for an access group. The roles must be a subset of a +// service's or the platform's supported roles. The resource attributes must be a subset of a service's or the +// platform's supported attributes. The policy resource must include either the **`serviceType`**, **`serviceName`**, +// or **`resourceGroupId`** attribute and the **`accountId`** attribute.` If the subject is a locked service-id, the +// request will fail. +// +// ### Authorization +// +// Authorization policies are supported by services on a case by case basis. Refer to service documentation to verify +// their support of authorization policies. To create an authorization policy, use **`"type": "authorization"`** in the +// body. The subject attributes must match the supported authorization subjects of the resource. Multiple subject +// attributes might be provided. The following attributes are supported: +// serviceName, serviceInstance, region, resourceType, resource, accountId The policy roles must be a subset of the +// supported authorization roles supported by the target service. The user must also have the same level of access or +// greater to the target resource in order to grant the role. The resource attributes must be a subset of a service's or +// the platform's supported attributes. Both the policy subject and the policy resource must include the +// **`serviceName`** and **`accountId`** attributes. +func (iamPolicyManagement *IamPolicyManagementV1) CreatePolicy(createPolicyOptions *CreatePolicyOptions) (result *Policy, response *core.DetailedResponse, err error) { + return iamPolicyManagement.CreatePolicyWithContext(context.Background(), createPolicyOptions) +} + +// CreatePolicyWithContext is an alternate form of the CreatePolicy method which supports a Context parameter +func (iamPolicyManagement *IamPolicyManagementV1) CreatePolicyWithContext(ctx context.Context, createPolicyOptions *CreatePolicyOptions) (result *Policy, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createPolicyOptions, "createPolicyOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createPolicyOptions, "createPolicyOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = iamPolicyManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(iamPolicyManagement.Service.Options.URL, `/v1/policies`, nil) + if err != nil { + return + } + + for headerName, headerValue := range createPolicyOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("iam_policy_management", "V1", "CreatePolicy") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + if createPolicyOptions.AcceptLanguage != nil { + builder.AddHeader("Accept-Language", fmt.Sprint(*createPolicyOptions.AcceptLanguage)) + } + + body := make(map[string]interface{}) + if createPolicyOptions.Type != nil { + body["type"] = createPolicyOptions.Type + } + if createPolicyOptions.Subjects != nil { + body["subjects"] = createPolicyOptions.Subjects + } + if createPolicyOptions.Roles != nil { + body["roles"] = createPolicyOptions.Roles + } + if createPolicyOptions.Resources != nil { + body["resources"] = createPolicyOptions.Resources + } + if createPolicyOptions.Description != nil { + body["description"] = createPolicyOptions.Description + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = iamPolicyManagement.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalPolicy) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdatePolicy : Update a policy +// Update a policy to grant access between a subject and a resource. A policy administrator might want to update an +// existing policy. The policy type cannot be changed (You cannot change an access policy to an authorization policy). +// +// ### Access +// +// To update an access policy, use **`"type": "access"`** in the body. The possible subject attributes are **`iam_id`** +// and **`access_group_id`**. Use the **`iam_id`** subject attribute for assigning access for a user or service-id. Use +// the **`access_group_id`** subject attribute for assigning access for an access group. The roles must be a subset of a +// service's or the platform's supported roles. The resource attributes must be a subset of a service's or the +// platform's supported attributes. The policy resource must include either the **`serviceType`**, **`serviceName`**, +// or **`resourceGroupId`** attribute and the **`accountId`** attribute.` If the subject is a locked service-id, the +// request will fail. +// +// ### Authorization +// +// To update an authorization policy, use **`"type": "authorization"`** in the body. The subject attributes must match +// the supported authorization subjects of the resource. Multiple subject attributes might be provided. The following +// attributes are supported: +// serviceName, serviceInstance, region, resourceType, resource, accountId The policy roles must be a subset of the +// supported authorization roles supported by the target service. The user must also have the same level of access or +// greater to the target resource in order to grant the role. The resource attributes must be a subset of a service's or +// the platform's supported attributes. Both the policy subject and the policy resource must include the +// **`serviceName`** and **`accountId`** attributes. +func (iamPolicyManagement *IamPolicyManagementV1) UpdatePolicy(updatePolicyOptions *UpdatePolicyOptions) (result *Policy, response *core.DetailedResponse, err error) { + return iamPolicyManagement.UpdatePolicyWithContext(context.Background(), updatePolicyOptions) +} + +// UpdatePolicyWithContext is an alternate form of the UpdatePolicy method which supports a Context parameter +func (iamPolicyManagement *IamPolicyManagementV1) UpdatePolicyWithContext(ctx context.Context, updatePolicyOptions *UpdatePolicyOptions) (result *Policy, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updatePolicyOptions, "updatePolicyOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updatePolicyOptions, "updatePolicyOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "policy_id": *updatePolicyOptions.PolicyID, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = iamPolicyManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(iamPolicyManagement.Service.Options.URL, `/v1/policies/{policy_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updatePolicyOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("iam_policy_management", "V1", "UpdatePolicy") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + if updatePolicyOptions.IfMatch != nil { + builder.AddHeader("If-Match", fmt.Sprint(*updatePolicyOptions.IfMatch)) + } + + body := make(map[string]interface{}) + if updatePolicyOptions.Type != nil { + body["type"] = updatePolicyOptions.Type + } + if updatePolicyOptions.Subjects != nil { + body["subjects"] = updatePolicyOptions.Subjects + } + if updatePolicyOptions.Roles != nil { + body["roles"] = updatePolicyOptions.Roles + } + if updatePolicyOptions.Resources != nil { + body["resources"] = updatePolicyOptions.Resources + } + if updatePolicyOptions.Description != nil { + body["description"] = updatePolicyOptions.Description + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = iamPolicyManagement.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalPolicy) + if err != nil { + return + } + response.Result = result + + return +} + +// GetPolicy : Retrieve a policy by ID +// Retrieve a policy by providing a policy ID. +func (iamPolicyManagement *IamPolicyManagementV1) GetPolicy(getPolicyOptions *GetPolicyOptions) (result *Policy, response *core.DetailedResponse, err error) { + return iamPolicyManagement.GetPolicyWithContext(context.Background(), getPolicyOptions) +} + +// GetPolicyWithContext is an alternate form of the GetPolicy method which supports a Context parameter +func (iamPolicyManagement *IamPolicyManagementV1) GetPolicyWithContext(ctx context.Context, getPolicyOptions *GetPolicyOptions) (result *Policy, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getPolicyOptions, "getPolicyOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getPolicyOptions, "getPolicyOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "policy_id": *getPolicyOptions.PolicyID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = iamPolicyManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(iamPolicyManagement.Service.Options.URL, `/v1/policies/{policy_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getPolicyOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("iam_policy_management", "V1", "GetPolicy") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = iamPolicyManagement.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalPolicy) + if err != nil { + return + } + response.Result = result + + return +} + +// DeletePolicy : Delete a policy by ID +// Delete a policy by providing a policy ID. A policy cannot be deleted if the subject ID contains a locked service ID. +// If the subject of the policy is a locked service-id, the request will fail. +func (iamPolicyManagement *IamPolicyManagementV1) DeletePolicy(deletePolicyOptions *DeletePolicyOptions) (response *core.DetailedResponse, err error) { + return iamPolicyManagement.DeletePolicyWithContext(context.Background(), deletePolicyOptions) +} + +// DeletePolicyWithContext is an alternate form of the DeletePolicy method which supports a Context parameter +func (iamPolicyManagement *IamPolicyManagementV1) DeletePolicyWithContext(ctx context.Context, deletePolicyOptions *DeletePolicyOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deletePolicyOptions, "deletePolicyOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deletePolicyOptions, "deletePolicyOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "policy_id": *deletePolicyOptions.PolicyID, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = iamPolicyManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(iamPolicyManagement.Service.Options.URL, `/v1/policies/{policy_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deletePolicyOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("iam_policy_management", "V1", "DeletePolicy") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = iamPolicyManagement.Service.Request(request, nil) + + return +} + +// PatchPolicy : Restore a deleted policy by ID +// Restore a policy that has recently been deleted. A policy administrator might want to restore a deleted policy. To +// restore a policy, use **`"state": "active"`** in the body. +func (iamPolicyManagement *IamPolicyManagementV1) PatchPolicy(patchPolicyOptions *PatchPolicyOptions) (result *Policy, response *core.DetailedResponse, err error) { + return iamPolicyManagement.PatchPolicyWithContext(context.Background(), patchPolicyOptions) +} + +// PatchPolicyWithContext is an alternate form of the PatchPolicy method which supports a Context parameter +func (iamPolicyManagement *IamPolicyManagementV1) PatchPolicyWithContext(ctx context.Context, patchPolicyOptions *PatchPolicyOptions) (result *Policy, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(patchPolicyOptions, "patchPolicyOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(patchPolicyOptions, "patchPolicyOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "policy_id": *patchPolicyOptions.PolicyID, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = iamPolicyManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(iamPolicyManagement.Service.Options.URL, `/v1/policies/{policy_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range patchPolicyOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("iam_policy_management", "V1", "PatchPolicy") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + if patchPolicyOptions.IfMatch != nil { + builder.AddHeader("If-Match", fmt.Sprint(*patchPolicyOptions.IfMatch)) + } + + body := make(map[string]interface{}) + if patchPolicyOptions.State != nil { + body["state"] = patchPolicyOptions.State + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = iamPolicyManagement.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalPolicy) + if err != nil { + return + } + response.Result = result + + return +} + +// ListRoles : Get roles by filters +// Get roles based on the filters. While managing roles, you may want to retrieve roles and filter by usages. This can +// be done through query parameters. Currently, we only support the following attributes: account_id, and service_name. +// Only roles that match the filter and that the caller has read access to are returned. If the caller does not have +// read access to any roles an empty array is returned. +func (iamPolicyManagement *IamPolicyManagementV1) ListRoles(listRolesOptions *ListRolesOptions) (result *RoleList, response *core.DetailedResponse, err error) { + return iamPolicyManagement.ListRolesWithContext(context.Background(), listRolesOptions) +} + +// ListRolesWithContext is an alternate form of the ListRoles method which supports a Context parameter +func (iamPolicyManagement *IamPolicyManagementV1) ListRolesWithContext(ctx context.Context, listRolesOptions *ListRolesOptions) (result *RoleList, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listRolesOptions, "listRolesOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = iamPolicyManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(iamPolicyManagement.Service.Options.URL, `/v2/roles`, nil) + if err != nil { + return + } + + for headerName, headerValue := range listRolesOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("iam_policy_management", "V1", "ListRoles") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + if listRolesOptions.AcceptLanguage != nil { + builder.AddHeader("Accept-Language", fmt.Sprint(*listRolesOptions.AcceptLanguage)) + } + + if listRolesOptions.AccountID != nil { + builder.AddQuery("account_id", fmt.Sprint(*listRolesOptions.AccountID)) + } + if listRolesOptions.ServiceName != nil { + builder.AddQuery("service_name", fmt.Sprint(*listRolesOptions.ServiceName)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = iamPolicyManagement.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalRoleList) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateRole : Create a role +// Creates a custom role for a specific service within the account. An account owner or a user assigned the +// Administrator role on the Role management service can create a custom role. Any number of actions for a single +// service can be mapped to the new role, but there must be at least one service-defined action to successfully create +// the new role. +func (iamPolicyManagement *IamPolicyManagementV1) CreateRole(createRoleOptions *CreateRoleOptions) (result *CustomRole, response *core.DetailedResponse, err error) { + return iamPolicyManagement.CreateRoleWithContext(context.Background(), createRoleOptions) +} + +// CreateRoleWithContext is an alternate form of the CreateRole method which supports a Context parameter +func (iamPolicyManagement *IamPolicyManagementV1) CreateRoleWithContext(ctx context.Context, createRoleOptions *CreateRoleOptions) (result *CustomRole, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createRoleOptions, "createRoleOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createRoleOptions, "createRoleOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = iamPolicyManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(iamPolicyManagement.Service.Options.URL, `/v2/roles`, nil) + if err != nil { + return + } + + for headerName, headerValue := range createRoleOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("iam_policy_management", "V1", "CreateRole") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + if createRoleOptions.AcceptLanguage != nil { + builder.AddHeader("Accept-Language", fmt.Sprint(*createRoleOptions.AcceptLanguage)) + } + + body := make(map[string]interface{}) + if createRoleOptions.DisplayName != nil { + body["display_name"] = createRoleOptions.DisplayName + } + if createRoleOptions.Actions != nil { + body["actions"] = createRoleOptions.Actions + } + if createRoleOptions.Name != nil { + body["name"] = createRoleOptions.Name + } + if createRoleOptions.AccountID != nil { + body["account_id"] = createRoleOptions.AccountID + } + if createRoleOptions.ServiceName != nil { + body["service_name"] = createRoleOptions.ServiceName + } + if createRoleOptions.Description != nil { + body["description"] = createRoleOptions.Description + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = iamPolicyManagement.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalCustomRole) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateRole : Update a role +// Update a custom role. A role administrator might want to update an existing role by updating the display name, +// description, or the actions that are mapped to the role. The name, account_id, and service_name can't be changed. +func (iamPolicyManagement *IamPolicyManagementV1) UpdateRole(updateRoleOptions *UpdateRoleOptions) (result *CustomRole, response *core.DetailedResponse, err error) { + return iamPolicyManagement.UpdateRoleWithContext(context.Background(), updateRoleOptions) +} + +// UpdateRoleWithContext is an alternate form of the UpdateRole method which supports a Context parameter +func (iamPolicyManagement *IamPolicyManagementV1) UpdateRoleWithContext(ctx context.Context, updateRoleOptions *UpdateRoleOptions) (result *CustomRole, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateRoleOptions, "updateRoleOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateRoleOptions, "updateRoleOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "role_id": *updateRoleOptions.RoleID, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = iamPolicyManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(iamPolicyManagement.Service.Options.URL, `/v2/roles/{role_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateRoleOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("iam_policy_management", "V1", "UpdateRole") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + if updateRoleOptions.IfMatch != nil { + builder.AddHeader("If-Match", fmt.Sprint(*updateRoleOptions.IfMatch)) + } + + body := make(map[string]interface{}) + if updateRoleOptions.DisplayName != nil { + body["display_name"] = updateRoleOptions.DisplayName + } + if updateRoleOptions.Description != nil { + body["description"] = updateRoleOptions.Description + } + if updateRoleOptions.Actions != nil { + body["actions"] = updateRoleOptions.Actions + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = iamPolicyManagement.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalCustomRole) + if err != nil { + return + } + response.Result = result + + return +} + +// GetRole : Retrieve a role by ID +// Retrieve a role by providing a role ID. +func (iamPolicyManagement *IamPolicyManagementV1) GetRole(getRoleOptions *GetRoleOptions) (result *CustomRole, response *core.DetailedResponse, err error) { + return iamPolicyManagement.GetRoleWithContext(context.Background(), getRoleOptions) +} + +// GetRoleWithContext is an alternate form of the GetRole method which supports a Context parameter +func (iamPolicyManagement *IamPolicyManagementV1) GetRoleWithContext(ctx context.Context, getRoleOptions *GetRoleOptions) (result *CustomRole, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getRoleOptions, "getRoleOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getRoleOptions, "getRoleOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "role_id": *getRoleOptions.RoleID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = iamPolicyManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(iamPolicyManagement.Service.Options.URL, `/v2/roles/{role_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getRoleOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("iam_policy_management", "V1", "GetRole") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = iamPolicyManagement.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalCustomRole) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteRole : Delete a role by ID +// Delete a role by providing a role ID. +func (iamPolicyManagement *IamPolicyManagementV1) DeleteRole(deleteRoleOptions *DeleteRoleOptions) (response *core.DetailedResponse, err error) { + return iamPolicyManagement.DeleteRoleWithContext(context.Background(), deleteRoleOptions) +} + +// DeleteRoleWithContext is an alternate form of the DeleteRole method which supports a Context parameter +func (iamPolicyManagement *IamPolicyManagementV1) DeleteRoleWithContext(ctx context.Context, deleteRoleOptions *DeleteRoleOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteRoleOptions, "deleteRoleOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteRoleOptions, "deleteRoleOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "role_id": *deleteRoleOptions.RoleID, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = iamPolicyManagement.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(iamPolicyManagement.Service.Options.URL, `/v2/roles/{role_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteRoleOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("iam_policy_management", "V1", "DeleteRole") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = iamPolicyManagement.Service.Request(request, nil) + + return +} + +// CreatePolicyOptions : The CreatePolicy options. +type CreatePolicyOptions struct { + // The policy type; either 'access' or 'authorization'. + Type *string `validate:"required"` + + // The subjects associated with a policy. + Subjects []PolicySubject `validate:"required"` + + // A set of role cloud resource names (CRNs) granted by the policy. + Roles []PolicyRole `validate:"required"` + + // The resources associated with a policy. + Resources []PolicyResource `validate:"required"` + + // Customer-defined description. + Description *string + + // Translation language code. + AcceptLanguage *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewCreatePolicyOptions : Instantiate CreatePolicyOptions +func (*IamPolicyManagementV1) NewCreatePolicyOptions(typeVar string, subjects []PolicySubject, roles []PolicyRole, resources []PolicyResource) *CreatePolicyOptions { + return &CreatePolicyOptions{ + Type: core.StringPtr(typeVar), + Subjects: subjects, + Roles: roles, + Resources: resources, + } +} + +// SetType : Allow user to set Type +func (options *CreatePolicyOptions) SetType(typeVar string) *CreatePolicyOptions { + options.Type = core.StringPtr(typeVar) + return options +} + +// SetSubjects : Allow user to set Subjects +func (options *CreatePolicyOptions) SetSubjects(subjects []PolicySubject) *CreatePolicyOptions { + options.Subjects = subjects + return options +} + +// SetRoles : Allow user to set Roles +func (options *CreatePolicyOptions) SetRoles(roles []PolicyRole) *CreatePolicyOptions { + options.Roles = roles + return options +} + +// SetResources : Allow user to set Resources +func (options *CreatePolicyOptions) SetResources(resources []PolicyResource) *CreatePolicyOptions { + options.Resources = resources + return options +} + +// SetDescription : Allow user to set Description +func (options *CreatePolicyOptions) SetDescription(description string) *CreatePolicyOptions { + options.Description = core.StringPtr(description) + return options +} + +// SetAcceptLanguage : Allow user to set AcceptLanguage +func (options *CreatePolicyOptions) SetAcceptLanguage(acceptLanguage string) *CreatePolicyOptions { + options.AcceptLanguage = core.StringPtr(acceptLanguage) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreatePolicyOptions) SetHeaders(param map[string]string) *CreatePolicyOptions { + options.Headers = param + return options +} + +// CreateRoleOptions : The CreateRole options. +type CreateRoleOptions struct { + // The display name of the role that is shown in the console. + DisplayName *string `validate:"required"` + + // The actions of the role. + Actions []string `validate:"required"` + + // The name of the role that is used in the CRN. Can only be alphanumeric and has to be capitalized. + Name *string `validate:"required"` + + // The account GUID. + AccountID *string `validate:"required"` + + // The service name. + ServiceName *string `validate:"required"` + + // The description of the role. + Description *string + + // Translation language code. + AcceptLanguage *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewCreateRoleOptions : Instantiate CreateRoleOptions +func (*IamPolicyManagementV1) NewCreateRoleOptions(displayName string, actions []string, name string, accountID string, serviceName string) *CreateRoleOptions { + return &CreateRoleOptions{ + DisplayName: core.StringPtr(displayName), + Actions: actions, + Name: core.StringPtr(name), + AccountID: core.StringPtr(accountID), + ServiceName: core.StringPtr(serviceName), + } +} + +// SetDisplayName : Allow user to set DisplayName +func (options *CreateRoleOptions) SetDisplayName(displayName string) *CreateRoleOptions { + options.DisplayName = core.StringPtr(displayName) + return options +} + +// SetActions : Allow user to set Actions +func (options *CreateRoleOptions) SetActions(actions []string) *CreateRoleOptions { + options.Actions = actions + return options +} + +// SetName : Allow user to set Name +func (options *CreateRoleOptions) SetName(name string) *CreateRoleOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetAccountID : Allow user to set AccountID +func (options *CreateRoleOptions) SetAccountID(accountID string) *CreateRoleOptions { + options.AccountID = core.StringPtr(accountID) + return options +} + +// SetServiceName : Allow user to set ServiceName +func (options *CreateRoleOptions) SetServiceName(serviceName string) *CreateRoleOptions { + options.ServiceName = core.StringPtr(serviceName) + return options +} + +// SetDescription : Allow user to set Description +func (options *CreateRoleOptions) SetDescription(description string) *CreateRoleOptions { + options.Description = core.StringPtr(description) + return options +} + +// SetAcceptLanguage : Allow user to set AcceptLanguage +func (options *CreateRoleOptions) SetAcceptLanguage(acceptLanguage string) *CreateRoleOptions { + options.AcceptLanguage = core.StringPtr(acceptLanguage) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateRoleOptions) SetHeaders(param map[string]string) *CreateRoleOptions { + options.Headers = param + return options +} + +// DeletePolicyOptions : The DeletePolicy options. +type DeletePolicyOptions struct { + // The policy ID. + PolicyID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeletePolicyOptions : Instantiate DeletePolicyOptions +func (*IamPolicyManagementV1) NewDeletePolicyOptions(policyID string) *DeletePolicyOptions { + return &DeletePolicyOptions{ + PolicyID: core.StringPtr(policyID), + } +} + +// SetPolicyID : Allow user to set PolicyID +func (options *DeletePolicyOptions) SetPolicyID(policyID string) *DeletePolicyOptions { + options.PolicyID = core.StringPtr(policyID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeletePolicyOptions) SetHeaders(param map[string]string) *DeletePolicyOptions { + options.Headers = param + return options +} + +// DeleteRoleOptions : The DeleteRole options. +type DeleteRoleOptions struct { + // The role ID. + RoleID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteRoleOptions : Instantiate DeleteRoleOptions +func (*IamPolicyManagementV1) NewDeleteRoleOptions(roleID string) *DeleteRoleOptions { + return &DeleteRoleOptions{ + RoleID: core.StringPtr(roleID), + } +} + +// SetRoleID : Allow user to set RoleID +func (options *DeleteRoleOptions) SetRoleID(roleID string) *DeleteRoleOptions { + options.RoleID = core.StringPtr(roleID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteRoleOptions) SetHeaders(param map[string]string) *DeleteRoleOptions { + options.Headers = param + return options +} + +// GetPolicyOptions : The GetPolicy options. +type GetPolicyOptions struct { + // The policy ID. + PolicyID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetPolicyOptions : Instantiate GetPolicyOptions +func (*IamPolicyManagementV1) NewGetPolicyOptions(policyID string) *GetPolicyOptions { + return &GetPolicyOptions{ + PolicyID: core.StringPtr(policyID), + } +} + +// SetPolicyID : Allow user to set PolicyID +func (options *GetPolicyOptions) SetPolicyID(policyID string) *GetPolicyOptions { + options.PolicyID = core.StringPtr(policyID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetPolicyOptions) SetHeaders(param map[string]string) *GetPolicyOptions { + options.Headers = param + return options +} + +// GetRoleOptions : The GetRole options. +type GetRoleOptions struct { + // The role ID. + RoleID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetRoleOptions : Instantiate GetRoleOptions +func (*IamPolicyManagementV1) NewGetRoleOptions(roleID string) *GetRoleOptions { + return &GetRoleOptions{ + RoleID: core.StringPtr(roleID), + } +} + +// SetRoleID : Allow user to set RoleID +func (options *GetRoleOptions) SetRoleID(roleID string) *GetRoleOptions { + options.RoleID = core.StringPtr(roleID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetRoleOptions) SetHeaders(param map[string]string) *GetRoleOptions { + options.Headers = param + return options +} + +// ListPoliciesOptions : The ListPolicies options. +type ListPoliciesOptions struct { + // The account GUID in which the policies belong to. + AccountID *string `validate:"required"` + + // Translation language code. + AcceptLanguage *string + + // The IAM ID used to identify the subject. + IamID *string + + // The access group id. + AccessGroupID *string + + // The type of policy (access or authorization). + Type *string + + // The type of service. + ServiceType *string + + // The name of the access management tag in the policy. + TagName *string + + // The value of the access management tag in the policy. + TagValue *string + + // Sort the results by any of the top level policy fields (id, created_at, created_by_id, last_modified_at, etc). + Sort *string + + // Include additional data per policy returned [include_last_permit, display]. + Format *string + + // The state of the policy, 'active' or 'deleted'. + State *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListPoliciesOptions : Instantiate ListPoliciesOptions +func (*IamPolicyManagementV1) NewListPoliciesOptions(accountID string) *ListPoliciesOptions { + return &ListPoliciesOptions{ + AccountID: core.StringPtr(accountID), + } +} + +// SetAccountID : Allow user to set AccountID +func (options *ListPoliciesOptions) SetAccountID(accountID string) *ListPoliciesOptions { + options.AccountID = core.StringPtr(accountID) + return options +} + +// SetAcceptLanguage : Allow user to set AcceptLanguage +func (options *ListPoliciesOptions) SetAcceptLanguage(acceptLanguage string) *ListPoliciesOptions { + options.AcceptLanguage = core.StringPtr(acceptLanguage) + return options +} + +// SetIamID : Allow user to set IamID +func (options *ListPoliciesOptions) SetIamID(iamID string) *ListPoliciesOptions { + options.IamID = core.StringPtr(iamID) + return options +} + +// SetAccessGroupID : Allow user to set AccessGroupID +func (options *ListPoliciesOptions) SetAccessGroupID(accessGroupID string) *ListPoliciesOptions { + options.AccessGroupID = core.StringPtr(accessGroupID) + return options +} + +// SetType : Allow user to set Type +func (options *ListPoliciesOptions) SetType(typeVar string) *ListPoliciesOptions { + options.Type = core.StringPtr(typeVar) + return options +} + +// SetServiceType : Allow user to set ServiceType +func (options *ListPoliciesOptions) SetServiceType(serviceType string) *ListPoliciesOptions { + options.ServiceType = core.StringPtr(serviceType) + return options +} + +// SetTagName : Allow user to set TagName +func (options *ListPoliciesOptions) SetTagName(tagName string) *ListPoliciesOptions { + options.TagName = core.StringPtr(tagName) + return options +} + +// SetTagValue : Allow user to set TagValue +func (options *ListPoliciesOptions) SetTagValue(tagValue string) *ListPoliciesOptions { + options.TagValue = core.StringPtr(tagValue) + return options +} + +// SetSort : Allow user to set Sort +func (options *ListPoliciesOptions) SetSort(sort string) *ListPoliciesOptions { + options.Sort = core.StringPtr(sort) + return options +} + +// SetFormat : Allow user to set Format +func (options *ListPoliciesOptions) SetFormat(format string) *ListPoliciesOptions { + options.Format = core.StringPtr(format) + return options +} + +// SetState : Allow user to set State +func (options *ListPoliciesOptions) SetState(state string) *ListPoliciesOptions { + options.State = core.StringPtr(state) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListPoliciesOptions) SetHeaders(param map[string]string) *ListPoliciesOptions { + options.Headers = param + return options +} + +// ListRolesOptions : The ListRoles options. +type ListRolesOptions struct { + // Translation language code. + AcceptLanguage *string + + // The account GUID in which the roles belong to. + AccountID *string + + // The name of service. + ServiceName *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListRolesOptions : Instantiate ListRolesOptions +func (*IamPolicyManagementV1) NewListRolesOptions() *ListRolesOptions { + return &ListRolesOptions{} +} + +// SetAcceptLanguage : Allow user to set AcceptLanguage +func (options *ListRolesOptions) SetAcceptLanguage(acceptLanguage string) *ListRolesOptions { + options.AcceptLanguage = core.StringPtr(acceptLanguage) + return options +} + +// SetAccountID : Allow user to set AccountID +func (options *ListRolesOptions) SetAccountID(accountID string) *ListRolesOptions { + options.AccountID = core.StringPtr(accountID) + return options +} + +// SetServiceName : Allow user to set ServiceName +func (options *ListRolesOptions) SetServiceName(serviceName string) *ListRolesOptions { + options.ServiceName = core.StringPtr(serviceName) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListRolesOptions) SetHeaders(param map[string]string) *ListRolesOptions { + options.Headers = param + return options +} + +// PatchPolicyOptions : The PatchPolicy options. +type PatchPolicyOptions struct { + // The policy ID. + PolicyID *string `validate:"required,ne="` + + // The revision number for updating a policy and must match the ETag value of the existing policy. The Etag can be + // retrieved using the GET /v1/policies/{policy_id} API and looking at the ETag response header. + IfMatch *string `validate:"required"` + + // The policy state; either 'active' or 'deleted'. + State *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewPatchPolicyOptions : Instantiate PatchPolicyOptions +func (*IamPolicyManagementV1) NewPatchPolicyOptions(policyID string, ifMatch string) *PatchPolicyOptions { + return &PatchPolicyOptions{ + PolicyID: core.StringPtr(policyID), + IfMatch: core.StringPtr(ifMatch), + } +} + +// SetPolicyID : Allow user to set PolicyID +func (options *PatchPolicyOptions) SetPolicyID(policyID string) *PatchPolicyOptions { + options.PolicyID = core.StringPtr(policyID) + return options +} + +// SetIfMatch : Allow user to set IfMatch +func (options *PatchPolicyOptions) SetIfMatch(ifMatch string) *PatchPolicyOptions { + options.IfMatch = core.StringPtr(ifMatch) + return options +} + +// SetState : Allow user to set State +func (options *PatchPolicyOptions) SetState(state string) *PatchPolicyOptions { + options.State = core.StringPtr(state) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *PatchPolicyOptions) SetHeaders(param map[string]string) *PatchPolicyOptions { + options.Headers = param + return options +} + +// UpdatePolicyOptions : The UpdatePolicy options. +type UpdatePolicyOptions struct { + // The policy ID. + PolicyID *string `validate:"required,ne="` + + // The revision number for updating a policy and must match the ETag value of the existing policy. The Etag can be + // retrieved using the GET /v1/policies/{policy_id} API and looking at the ETag response header. + IfMatch *string `validate:"required"` + + // The policy type; either 'access' or 'authorization'. + Type *string `validate:"required"` + + // The subjects associated with a policy. + Subjects []PolicySubject `validate:"required"` + + // A set of role cloud resource names (CRNs) granted by the policy. + Roles []PolicyRole `validate:"required"` + + // The resources associated with a policy. + Resources []PolicyResource `validate:"required"` + + // Customer-defined description. + Description *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdatePolicyOptions : Instantiate UpdatePolicyOptions +func (*IamPolicyManagementV1) NewUpdatePolicyOptions(policyID string, ifMatch string, typeVar string, subjects []PolicySubject, roles []PolicyRole, resources []PolicyResource) *UpdatePolicyOptions { + return &UpdatePolicyOptions{ + PolicyID: core.StringPtr(policyID), + IfMatch: core.StringPtr(ifMatch), + Type: core.StringPtr(typeVar), + Subjects: subjects, + Roles: roles, + Resources: resources, + } +} + +// SetPolicyID : Allow user to set PolicyID +func (options *UpdatePolicyOptions) SetPolicyID(policyID string) *UpdatePolicyOptions { + options.PolicyID = core.StringPtr(policyID) + return options +} + +// SetIfMatch : Allow user to set IfMatch +func (options *UpdatePolicyOptions) SetIfMatch(ifMatch string) *UpdatePolicyOptions { + options.IfMatch = core.StringPtr(ifMatch) + return options +} + +// SetType : Allow user to set Type +func (options *UpdatePolicyOptions) SetType(typeVar string) *UpdatePolicyOptions { + options.Type = core.StringPtr(typeVar) + return options +} + +// SetSubjects : Allow user to set Subjects +func (options *UpdatePolicyOptions) SetSubjects(subjects []PolicySubject) *UpdatePolicyOptions { + options.Subjects = subjects + return options +} + +// SetRoles : Allow user to set Roles +func (options *UpdatePolicyOptions) SetRoles(roles []PolicyRole) *UpdatePolicyOptions { + options.Roles = roles + return options +} + +// SetResources : Allow user to set Resources +func (options *UpdatePolicyOptions) SetResources(resources []PolicyResource) *UpdatePolicyOptions { + options.Resources = resources + return options +} + +// SetDescription : Allow user to set Description +func (options *UpdatePolicyOptions) SetDescription(description string) *UpdatePolicyOptions { + options.Description = core.StringPtr(description) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdatePolicyOptions) SetHeaders(param map[string]string) *UpdatePolicyOptions { + options.Headers = param + return options +} + +// UpdateRoleOptions : The UpdateRole options. +type UpdateRoleOptions struct { + // The role ID. + RoleID *string `validate:"required,ne="` + + // The revision number for updating a role and must match the ETag value of the existing role. The Etag can be + // retrieved using the GET /v2/roles/{role_id} API and looking at the ETag response header. + IfMatch *string `validate:"required"` + + // The display name of the role that is shown in the console. + DisplayName *string + + // The description of the role. + Description *string + + // The actions of the role. + Actions []string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdateRoleOptions : Instantiate UpdateRoleOptions +func (*IamPolicyManagementV1) NewUpdateRoleOptions(roleID string, ifMatch string) *UpdateRoleOptions { + return &UpdateRoleOptions{ + RoleID: core.StringPtr(roleID), + IfMatch: core.StringPtr(ifMatch), + } +} + +// SetRoleID : Allow user to set RoleID +func (options *UpdateRoleOptions) SetRoleID(roleID string) *UpdateRoleOptions { + options.RoleID = core.StringPtr(roleID) + return options +} + +// SetIfMatch : Allow user to set IfMatch +func (options *UpdateRoleOptions) SetIfMatch(ifMatch string) *UpdateRoleOptions { + options.IfMatch = core.StringPtr(ifMatch) + return options +} + +// SetDisplayName : Allow user to set DisplayName +func (options *UpdateRoleOptions) SetDisplayName(displayName string) *UpdateRoleOptions { + options.DisplayName = core.StringPtr(displayName) + return options +} + +// SetDescription : Allow user to set Description +func (options *UpdateRoleOptions) SetDescription(description string) *UpdateRoleOptions { + options.Description = core.StringPtr(description) + return options +} + +// SetActions : Allow user to set Actions +func (options *UpdateRoleOptions) SetActions(actions []string) *UpdateRoleOptions { + options.Actions = actions + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateRoleOptions) SetHeaders(param map[string]string) *UpdateRoleOptions { + options.Headers = param + return options +} + +// CustomRole : An additional set of properties associated with a role. +type CustomRole struct { + // The role ID. + ID *string `json:"id,omitempty"` + + // The display name of the role that is shown in the console. + DisplayName *string `json:"display_name,omitempty"` + + // The description of the role. + Description *string `json:"description,omitempty"` + + // The actions of the role. + Actions []string `json:"actions,omitempty"` + + // The role CRN. + CRN *string `json:"crn,omitempty"` + + // The name of the role that is used in the CRN. Can only be alphanumeric and has to be capitalized. + Name *string `json:"name,omitempty"` + + // The account GUID. + AccountID *string `json:"account_id,omitempty"` + + // The service name. + ServiceName *string `json:"service_name,omitempty"` + + // The UTC timestamp when the role was created. + CreatedAt *strfmt.DateTime `json:"created_at,omitempty"` + + // The iam ID of the entity that created the role. + CreatedByID *string `json:"created_by_id,omitempty"` + + // The UTC timestamp when the role was last modified. + LastModifiedAt *strfmt.DateTime `json:"last_modified_at,omitempty"` + + // The iam ID of the entity that last modified the policy. + LastModifiedByID *string `json:"last_modified_by_id,omitempty"` + + // The href link back to the role. + Href *string `json:"href,omitempty"` +} + +// UnmarshalCustomRole unmarshals an instance of CustomRole from the specified map of raw messages. +func UnmarshalCustomRole(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(CustomRole) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "display_name", &obj.DisplayName) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "description", &obj.Description) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "actions", &obj.Actions) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "account_id", &obj.AccountID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "service_name", &obj.ServiceName) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_by_id", &obj.CreatedByID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "last_modified_at", &obj.LastModifiedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "last_modified_by_id", &obj.LastModifiedByID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// Policy : The core set of properties associated with a policy. +type Policy struct { + // The policy ID. + ID *string `json:"id,omitempty"` + + // The policy type; either 'access' or 'authorization'. + Type *string `json:"type,omitempty"` + + // Customer-defined description. + Description *string `json:"description,omitempty"` + + // The subjects associated with a policy. + Subjects []PolicySubject `json:"subjects,omitempty"` + + // A set of role cloud resource names (CRNs) granted by the policy. + Roles []PolicyRole `json:"roles,omitempty"` + + // The resources associated with a policy. + Resources []PolicyResource `json:"resources,omitempty"` + + // The href link back to the policy. + Href *string `json:"href,omitempty"` + + // The UTC timestamp when the policy was created. + CreatedAt *strfmt.DateTime `json:"created_at,omitempty"` + + // The iam ID of the entity that created the policy. + CreatedByID *string `json:"created_by_id,omitempty"` + + // The UTC timestamp when the policy was last modified. + LastModifiedAt *strfmt.DateTime `json:"last_modified_at,omitempty"` + + // The iam ID of the entity that last modified the policy. + LastModifiedByID *string `json:"last_modified_by_id,omitempty"` + + // The policy state; either 'active' or 'deleted'. + State *string `json:"state,omitempty"` +} + +// UnmarshalPolicy unmarshals an instance of Policy from the specified map of raw messages. +func UnmarshalPolicy(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Policy) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "description", &obj.Description) + if err != nil { + return + } + err = core.UnmarshalModel(m, "subjects", &obj.Subjects, UnmarshalPolicySubject) + if err != nil { + return + } + err = core.UnmarshalModel(m, "roles", &obj.Roles, UnmarshalPolicyRole) + if err != nil { + return + } + err = core.UnmarshalModel(m, "resources", &obj.Resources, UnmarshalPolicyResource) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_by_id", &obj.CreatedByID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "last_modified_at", &obj.LastModifiedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "last_modified_by_id", &obj.LastModifiedByID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "state", &obj.State) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// PolicyList : A collection of policies. +type PolicyList struct { + // List of policies. + Policies []Policy `json:"policies,omitempty"` +} + +// UnmarshalPolicyList unmarshals an instance of PolicyList from the specified map of raw messages. +func UnmarshalPolicyList(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(PolicyList) + err = core.UnmarshalModel(m, "policies", &obj.Policies, UnmarshalPolicy) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// PolicyResource : The attributes of the resource. Note that only one resource is allowed in a policy. +type PolicyResource struct { + // List of resource attributes. + Attributes []ResourceAttribute `json:"attributes,omitempty"` + + // List of access management tags. + Tags []ResourceTag `json:"tags,omitempty"` +} + +// UnmarshalPolicyResource unmarshals an instance of PolicyResource from the specified map of raw messages. +func UnmarshalPolicyResource(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(PolicyResource) + err = core.UnmarshalModel(m, "attributes", &obj.Attributes, UnmarshalResourceAttribute) + if err != nil { + return + } + err = core.UnmarshalModel(m, "tags", &obj.Tags, UnmarshalResourceTag) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// PolicyRole : A role associated with a policy. +type PolicyRole struct { + // The role cloud resource name granted by the policy. + RoleID *string `json:"role_id" validate:"required"` + + // The display name of the role. + DisplayName *string `json:"display_name,omitempty"` + + // The description of the role. + Description *string `json:"description,omitempty"` +} + +// NewPolicyRole : Instantiate PolicyRole (Generic Model Constructor) +func (*IamPolicyManagementV1) NewPolicyRole(roleID string) (model *PolicyRole, err error) { + model = &PolicyRole{ + RoleID: core.StringPtr(roleID), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalPolicyRole unmarshals an instance of PolicyRole from the specified map of raw messages. +func UnmarshalPolicyRole(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(PolicyRole) + err = core.UnmarshalPrimitive(m, "role_id", &obj.RoleID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "display_name", &obj.DisplayName) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "description", &obj.Description) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// PolicySubject : The subject attribute values that must match in order for this policy to apply in a permission decision. +type PolicySubject struct { + // List of subject attributes. + Attributes []SubjectAttribute `json:"attributes,omitempty"` +} + +// UnmarshalPolicySubject unmarshals an instance of PolicySubject from the specified map of raw messages. +func UnmarshalPolicySubject(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(PolicySubject) + err = core.UnmarshalModel(m, "attributes", &obj.Attributes, UnmarshalSubjectAttribute) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ResourceAttribute : An attribute associated with a resource. +type ResourceAttribute struct { + // The name of an attribute. + Name *string `json:"name" validate:"required"` + + // The value of an attribute. + Value *string `json:"value" validate:"required"` + + // The operator of an attribute. + Operator *string `json:"operator,omitempty"` +} + +// NewResourceAttribute : Instantiate ResourceAttribute (Generic Model Constructor) +func (*IamPolicyManagementV1) NewResourceAttribute(name string, value string) (model *ResourceAttribute, err error) { + model = &ResourceAttribute{ + Name: core.StringPtr(name), + Value: core.StringPtr(value), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalResourceAttribute unmarshals an instance of ResourceAttribute from the specified map of raw messages. +func UnmarshalResourceAttribute(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ResourceAttribute) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "operator", &obj.Operator) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ResourceTag : A tag associated with a resource. +type ResourceTag struct { + // The name of an access management tag. + Name *string `json:"name" validate:"required"` + + // The value of an access management tag. + Value *string `json:"value" validate:"required"` + + // The operator of an access management tag. + Operator *string `json:"operator,omitempty"` +} + +// NewResourceTag : Instantiate ResourceTag (Generic Model Constructor) +func (*IamPolicyManagementV1) NewResourceTag(name string, value string) (model *ResourceTag, err error) { + model = &ResourceTag{ + Name: core.StringPtr(name), + Value: core.StringPtr(value), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalResourceTag unmarshals an instance of ResourceTag from the specified map of raw messages. +func UnmarshalResourceTag(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ResourceTag) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "operator", &obj.Operator) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// Role : A role resource. +type Role struct { + // The display name of the role that is shown in the console. + DisplayName *string `json:"display_name,omitempty"` + + // The description of the role. + Description *string `json:"description,omitempty"` + + // The actions of the role. + Actions []string `json:"actions,omitempty"` + + // The role CRN. + CRN *string `json:"crn,omitempty"` +} + +// UnmarshalRole unmarshals an instance of Role from the specified map of raw messages. +func UnmarshalRole(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Role) + err = core.UnmarshalPrimitive(m, "display_name", &obj.DisplayName) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "description", &obj.Description) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "actions", &obj.Actions) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// RoleList : A collection of roles returned by the 'list roles' operation. +type RoleList struct { + // List of custom roles. + CustomRoles []CustomRole `json:"custom_roles,omitempty"` + + // List of service roles. + ServiceRoles []Role `json:"service_roles,omitempty"` + + // List of system roles. + SystemRoles []Role `json:"system_roles,omitempty"` +} + +// UnmarshalRoleList unmarshals an instance of RoleList from the specified map of raw messages. +func UnmarshalRoleList(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(RoleList) + err = core.UnmarshalModel(m, "custom_roles", &obj.CustomRoles, UnmarshalCustomRole) + if err != nil { + return + } + err = core.UnmarshalModel(m, "service_roles", &obj.ServiceRoles, UnmarshalRole) + if err != nil { + return + } + err = core.UnmarshalModel(m, "system_roles", &obj.SystemRoles, UnmarshalRole) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SubjectAttribute : An attribute associated with a subject. +type SubjectAttribute struct { + // The name of an attribute. + Name *string `json:"name" validate:"required"` + + // The value of an attribute. + Value *string `json:"value" validate:"required"` +} + +// NewSubjectAttribute : Instantiate SubjectAttribute (Generic Model Constructor) +func (*IamPolicyManagementV1) NewSubjectAttribute(name string, value string) (model *SubjectAttribute, err error) { + model = &SubjectAttribute{ + Name: core.StringPtr(name), + Value: core.StringPtr(value), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalSubjectAttribute unmarshals an instance of SubjectAttribute from the specified map of raw messages. +func UnmarshalSubjectAttribute(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SubjectAttribute) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} diff --git a/vendor/github.com/IBM/push-notifications-go-sdk/LICENSE b/vendor/github.com/IBM/push-notifications-go-sdk/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/vendor/github.com/IBM/push-notifications-go-sdk/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/IBM/push-notifications-go-sdk/common/headers.go b/vendor/github.com/IBM/push-notifications-go-sdk/common/headers.go new file mode 100644 index 00000000000..ac93027cd13 --- /dev/null +++ b/vendor/github.com/IBM/push-notifications-go-sdk/common/headers.go @@ -0,0 +1,82 @@ +/** + * (C) Copyright IBM Corp. 2019. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package common + +import ( + "fmt" + "runtime" +) + +const ( + sdkName = "push-service-go-sdk" + headerNameUserAgent = "User-Agent" +) + +// +// GetSdkHeaders - returns the set of SDK-specific headers to be included in an outgoing request. +// +// This function is invoked by generated service methods (i.e. methods which implement the REST API operations +// defined within the API definition). The purpose of this function is to give the SDK implementor the opportunity +// to provide SDK-specific HTTP headers that will be sent with an outgoing REST API request. +// This function is invoked for each invocation of a generated service method, +// so the set of HTTP headers could be request-specific. +// As an optimization, if your SDK will be returning the same set of HTTP headers for each invocation of this +// function, it is recommended that you initialize the returned map just once (perhaps by using +// lazy initialization) and simply return it each time the function is invoked, instead of building it each time +// as in the example below. +// +// If you plan to gather metrics for your SDK, the User-Agent header value must +// be a string similar to the following: +// my-go-sdk/0.0.1 (lang=go; arch=x86_64; os=Linux; go.version=1.12.9) +// +// In the example above, the analytics tool will parse the user-agent header and +// use the following properties: +// "my-go-sdk" - the name of your sdk +// "0.0.1"- the version of your sdk +// "lang=go" - the language of the current sdk +// "arch=x86_64; os=Linux; go.version=1.12.9" - system information +// +// Note: It is very important that the sdk name ends with the string `-sdk`, +// as the analytics data collector uses this to gather usage data. +// +// Parameters: +// serviceName - the name of the service as defined in the API definition (e.g. "MyService1") +// serviceVersion - the version of the service as defined in the API definition (e.g. "V1") +// operationId - the operationId as defined in the API definition (e.g. getContext) +// +// Returns: +// a Map which contains the set of headers to be included in the REST API request +// +func GetSdkHeaders(serviceName string, serviceVersion string, operationId string) map[string]string { + sdkHeaders := make(map[string]string) + + sdkHeaders[headerNameUserAgent] = GetUserAgentInfo() + + return sdkHeaders +} + +var userAgent string = fmt.Sprintf("%s/%s %s", sdkName, Version, GetSystemInfo()) + +func GetUserAgentInfo() string { + return userAgent +} + +var systemInfo = fmt.Sprintf("(lang=go; arch=%s; os=%s; go.version=%s)", runtime.GOARCH, runtime.GOOS, runtime.Version()) + +func GetSystemInfo() string { + return systemInfo +} diff --git a/vendor/github.com/IBM/push-notifications-go-sdk/common/version.go b/vendor/github.com/IBM/push-notifications-go-sdk/common/version.go new file mode 100644 index 00000000000..22cd2571232 --- /dev/null +++ b/vendor/github.com/IBM/push-notifications-go-sdk/common/version.go @@ -0,0 +1,20 @@ +/** + * (C) Copyright IBM Corp. 2019. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package common + +// Version of the SDK +const Version = "0.0.1" diff --git a/vendor/github.com/IBM/push-notifications-go-sdk/pushservicev1/push_service_v1.go b/vendor/github.com/IBM/push-notifications-go-sdk/pushservicev1/push_service_v1.go new file mode 100644 index 00000000000..ad2d9859537 --- /dev/null +++ b/vendor/github.com/IBM/push-notifications-go-sdk/pushservicev1/push_service_v1.go @@ -0,0 +1,4232 @@ +/** + * (C) Copyright IBM Corp. 2021. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * IBM OpenAPI SDK Code Generator Version: 3.27.0-c07e12f4-20210209-225127 + */ + +// Package pushservicev1 : Operations and models for the PushServiceV1 service +package pushservicev1 + +import ( + "context" + "encoding/json" + "fmt" + "io" + "math/rand" + "net/http" + "reflect" + "time" + + "github.com/IBM/go-sdk-core/v5/core" + common "github.com/IBM/push-notifications-go-sdk/common" +) + +// PushServiceV1 : No description provided (generated by Openapi Generator +// https://github.com/openapitools/openapi-generator) +// +// Version: 1.0 +type PushServiceV1 struct { + Service *core.BaseService +} + +// DefaultServiceURL is the default URL to make service requests to. +const DefaultServiceURL = "https://push-service.cloud.ibm.com/imfpush/v1" + +// DefaultServiceName is the default key used to find external configuration information. +const DefaultServiceName = "push_service" + +// PushServiceV1Options : Service options +type PushServiceV1Options struct { + ServiceName string + URL string + Authenticator core.Authenticator +} + +const charset = "abcdefghijklmnopqrstuvwxyz_-" + + "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" + +var seededRand *rand.Rand = rand.New( + rand.NewSource(time.Now().UnixNano())) + +func stringWithCharset(length int) string { + b := make([]byte, length) + for i := range b { + b[i] = charset[seededRand.Intn(len(charset))] + } + return string(b) +} + +// NewPushServiceV1UsingExternalConfig : constructs an instance of PushServiceV1 with passed in options and external configuration. +func NewPushServiceV1UsingExternalConfig(options *PushServiceV1Options) (pushService *PushServiceV1, err error) { + if options.ServiceName == "" { + options.ServiceName = DefaultServiceName + } + + if options.Authenticator == nil { + options.Authenticator, err = core.GetAuthenticatorFromEnvironment(options.ServiceName) + if err != nil { + return + } + } + + pushService, err = NewPushServiceV1(options) + if err != nil { + return + } + + err = pushService.Service.ConfigureService(options.ServiceName) + if err != nil { + return + } + + if options.URL != "" { + err = pushService.Service.SetServiceURL(options.URL) + } + return +} + +// NewPushServiceV1 : constructs an instance of PushServiceV1 with passed in options. +func NewPushServiceV1(options *PushServiceV1Options) (service *PushServiceV1, err error) { + serviceOptions := &core.ServiceOptions{ + URL: DefaultServiceURL, + Authenticator: options.Authenticator, + } + + baseService, err := core.NewBaseService(serviceOptions) + if err != nil { + return + } + + if options.URL != "" { + err = baseService.SetServiceURL(options.URL) + if err != nil { + return + } + } + + service = &PushServiceV1{ + Service: baseService, + } + + return +} + +// GetServiceURLForRegion returns the service URL to be used for the specified region +func GetServiceURLForRegion(region string) (string, error) { + return "", fmt.Errorf("service does not support regional URLs") +} + +// Clone makes a copy of "pushService" suitable for processing requests. +func (pushService *PushServiceV1) Clone() *PushServiceV1 { + if core.IsNil(pushService) { + return nil + } + clone := *pushService + clone.Service = pushService.Service.Clone() + return &clone +} + +// SetServiceURL sets the service URL +func (pushService *PushServiceV1) SetServiceURL(url string) error { + return pushService.Service.SetServiceURL(url) +} + +// GetServiceURL returns the service URL +func (pushService *PushServiceV1) GetServiceURL() string { + return pushService.Service.GetServiceURL() +} + +// SetDefaultHeaders sets HTTP headers to be sent in every request +func (pushService *PushServiceV1) SetDefaultHeaders(headers http.Header) { + pushService.Service.SetDefaultHeaders(headers) +} + +// SetEnableGzipCompression sets the service's EnableGzipCompression field +func (pushService *PushServiceV1) SetEnableGzipCompression(enableGzip bool) { + pushService.Service.SetEnableGzipCompression(enableGzip) +} + +// GetEnableGzipCompression returns the service's EnableGzipCompression field +func (pushService *PushServiceV1) GetEnableGzipCompression() bool { + return pushService.Service.GetEnableGzipCompression() +} + +// EnableRetries enables automatic retries for requests invoked for this service instance. +// If either parameter is specified as 0, then a default value is used instead. +func (pushService *PushServiceV1) EnableRetries(maxRetries int, maxRetryInterval time.Duration) { + pushService.Service.EnableRetries(maxRetries, maxRetryInterval) +} + +// DisableRetries disables automatic retries for requests invoked for this service instance. +func (pushService *PushServiceV1) DisableRetries() { + pushService.Service.DisableRetries() +} + +// GetSettings : Retrieve application settings +// Retrieves the application settings, which is referenced by the applicationId parameter. +func (pushService *PushServiceV1) GetSettings(getSettingsOptions *GetSettingsOptions) (result *AppSettingsObjResponse, response *core.DetailedResponse, err error) { + return pushService.GetSettingsWithContext(context.Background(), getSettingsOptions) +} + +// GetSettingsWithContext is an alternate form of the GetSettings method which supports a Context parameter +func (pushService *PushServiceV1) GetSettingsWithContext(ctx context.Context, getSettingsOptions *GetSettingsOptions) (result *AppSettingsObjResponse, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getSettingsOptions, "getSettingsOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getSettingsOptions, "getSettingsOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "applicationId": *getSettingsOptions.ApplicationID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = pushService.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(pushService.Service.Options.URL, `/apps/{applicationId}/settings`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getSettingsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("push_service", "V1", "GetSettings") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + if getSettingsOptions.AppSecret != nil { + builder.AddHeader("appSecret", fmt.Sprint(*getSettingsOptions.AppSecret)) + } + if getSettingsOptions.AcceptLanguage != nil { + builder.AddHeader("Accept-Language", fmt.Sprint(*getSettingsOptions.AcceptLanguage)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = pushService.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalAppSettingsObjResponse) + if err != nil { + return + } + response.Result = result + + return +} + +// GetApnsConf : Get the APNS settings +// Retrieves APNS credentials for the application. +func (pushService *PushServiceV1) GetApnsConf(getApnsConfOptions *GetApnsConfOptions) (result *ApnsCertUploadResponse, response *core.DetailedResponse, err error) { + return pushService.GetApnsConfWithContext(context.Background(), getApnsConfOptions) +} + +// GetApnsConfWithContext is an alternate form of the GetApnsConf method which supports a Context parameter +func (pushService *PushServiceV1) GetApnsConfWithContext(ctx context.Context, getApnsConfOptions *GetApnsConfOptions) (result *ApnsCertUploadResponse, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getApnsConfOptions, "getApnsConfOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getApnsConfOptions, "getApnsConfOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "applicationId": *getApnsConfOptions.ApplicationID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = pushService.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(pushService.Service.Options.URL, `/apps/{applicationId}/settings/apnsConf`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getApnsConfOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("push_service", "V1", "GetApnsConf") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + if getApnsConfOptions.AcceptLanguage != nil { + builder.AddHeader("Accept-Language", fmt.Sprint(*getApnsConfOptions.AcceptLanguage)) + } + if getApnsConfOptions.AppSecret != nil { + builder.AddHeader("appSecret", fmt.Sprint(*getApnsConfOptions.AppSecret)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = pushService.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalApnsCertUploadResponse) + if err != nil { + return + } + response.Result = result + + return +} + +// SaveApnsConf : Updates APNS settings +// Uploads an APNS certificate to the application referenced by the applicationId. +func (pushService *PushServiceV1) SaveApnsConf(saveApnsConfOptions *SaveApnsConfOptions) (result *ApnsCertUploadResponse, response *core.DetailedResponse, err error) { + return pushService.SaveApnsConfWithContext(context.Background(), saveApnsConfOptions) +} + +// SaveApnsConfWithContext is an alternate form of the SaveApnsConf method which supports a Context parameter +func (pushService *PushServiceV1) SaveApnsConfWithContext(ctx context.Context, saveApnsConfOptions *SaveApnsConfOptions) (result *ApnsCertUploadResponse, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(saveApnsConfOptions, "saveApnsConfOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(saveApnsConfOptions, "saveApnsConfOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "applicationId": *saveApnsConfOptions.ApplicationID, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = pushService.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(pushService.Service.Options.URL, `/apps/{applicationId}/settings/apnsConf`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range saveApnsConfOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("push_service", "V1", "SaveApnsConf") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + if saveApnsConfOptions.AcceptLanguage != nil { + builder.AddHeader("Accept-Language", fmt.Sprint(*saveApnsConfOptions.AcceptLanguage)) + } + if saveApnsConfOptions.AppSecret != nil { + builder.AddHeader("appSecret", fmt.Sprint(*saveApnsConfOptions.AppSecret)) + } + + builder.AddFormData("password", "", "", fmt.Sprint(*saveApnsConfOptions.Password)) + builder.AddFormData("isSandBox", "", "", fmt.Sprint(*saveApnsConfOptions.IsSandBox)) + builder.AddFormData("certificate", fmt.Sprintf("%s.p12", stringWithCharset(24)), + core.StringNilMapper(saveApnsConfOptions.CertificateContentType), saveApnsConfOptions.Certificate) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = pushService.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalApnsCertUploadResponse) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteApnsConf : Delete APNS settings +// Deletes the APNS settings to the application referenced by the applicationId. +func (pushService *PushServiceV1) DeleteApnsConf(deleteApnsConfOptions *DeleteApnsConfOptions) (response *core.DetailedResponse, err error) { + return pushService.DeleteApnsConfWithContext(context.Background(), deleteApnsConfOptions) +} + +// DeleteApnsConfWithContext is an alternate form of the DeleteApnsConf method which supports a Context parameter +func (pushService *PushServiceV1) DeleteApnsConfWithContext(ctx context.Context, deleteApnsConfOptions *DeleteApnsConfOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteApnsConfOptions, "deleteApnsConfOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteApnsConfOptions, "deleteApnsConfOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "applicationId": *deleteApnsConfOptions.ApplicationID, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = pushService.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(pushService.Service.Options.URL, `/apps/{applicationId}/settings/apnsConf`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteApnsConfOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("push_service", "V1", "DeleteApnsConf") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + if deleteApnsConfOptions.AcceptLanguage != nil { + builder.AddHeader("Accept-Language", fmt.Sprint(*deleteApnsConfOptions.AcceptLanguage)) + } + if deleteApnsConfOptions.AppSecret != nil { + builder.AddHeader("appSecret", fmt.Sprint(*deleteApnsConfOptions.AppSecret)) + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = pushService.Service.Request(request, nil) + + return +} + +// GetGCMConf : Get the GCM settings +// Retrieves GCM credentials for the application. +func (pushService *PushServiceV1) GetGCMConf(getGCMConfOptions *GetGCMConfOptions) (result *GCMCredendialsModel, response *core.DetailedResponse, err error) { + return pushService.GetGCMConfWithContext(context.Background(), getGCMConfOptions) +} + +// GetGCMConfWithContext is an alternate form of the GetGCMConf method which supports a Context parameter +func (pushService *PushServiceV1) GetGCMConfWithContext(ctx context.Context, getGCMConfOptions *GetGCMConfOptions) (result *GCMCredendialsModel, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getGCMConfOptions, "getGCMConfOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getGCMConfOptions, "getGCMConfOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "applicationId": *getGCMConfOptions.ApplicationID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = pushService.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(pushService.Service.Options.URL, `/apps/{applicationId}/settings/gcmConf`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getGCMConfOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("push_service", "V1", "GetGCMConf") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + if getGCMConfOptions.AcceptLanguage != nil { + builder.AddHeader("Accept-Language", fmt.Sprint(*getGCMConfOptions.AcceptLanguage)) + } + if getGCMConfOptions.AppSecret != nil { + builder.AddHeader("appSecret", fmt.Sprint(*getGCMConfOptions.AppSecret)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = pushService.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalGCMCredendialsModel) + if err != nil { + return + } + response.Result = result + + return +} + +// SaveGCMConf : Updates GCM settings +// Updates the GCM credentials of the application referenced by the applicationId. +func (pushService *PushServiceV1) SaveGCMConf(saveGCMConfOptions *SaveGCMConfOptions) (result *GCMCredendialsModel, response *core.DetailedResponse, err error) { + return pushService.SaveGCMConfWithContext(context.Background(), saveGCMConfOptions) +} + +// SaveGCMConfWithContext is an alternate form of the SaveGCMConf method which supports a Context parameter +func (pushService *PushServiceV1) SaveGCMConfWithContext(ctx context.Context, saveGCMConfOptions *SaveGCMConfOptions) (result *GCMCredendialsModel, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(saveGCMConfOptions, "saveGCMConfOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(saveGCMConfOptions, "saveGCMConfOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "applicationId": *saveGCMConfOptions.ApplicationID, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = pushService.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(pushService.Service.Options.URL, `/apps/{applicationId}/settings/gcmConf`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range saveGCMConfOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("push_service", "V1", "SaveGCMConf") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + if saveGCMConfOptions.AcceptLanguage != nil { + builder.AddHeader("Accept-Language", fmt.Sprint(*saveGCMConfOptions.AcceptLanguage)) + } + if saveGCMConfOptions.AppSecret != nil { + builder.AddHeader("appSecret", fmt.Sprint(*saveGCMConfOptions.AppSecret)) + } + + body := make(map[string]interface{}) + if saveGCMConfOptions.ApiKey != nil { + body["apiKey"] = saveGCMConfOptions.ApiKey + } + if saveGCMConfOptions.SenderID != nil { + body["senderId"] = saveGCMConfOptions.SenderID + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = pushService.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalGCMCredendialsModel) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteGCMConf : Delete GCM settings +// Deletes the GCM credentials of the application, which is referenced by the applicationId parameter. +func (pushService *PushServiceV1) DeleteGCMConf(deleteGCMConfOptions *DeleteGCMConfOptions) (response *core.DetailedResponse, err error) { + return pushService.DeleteGCMConfWithContext(context.Background(), deleteGCMConfOptions) +} + +// DeleteGCMConfWithContext is an alternate form of the DeleteGCMConf method which supports a Context parameter +func (pushService *PushServiceV1) DeleteGCMConfWithContext(ctx context.Context, deleteGCMConfOptions *DeleteGCMConfOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteGCMConfOptions, "deleteGCMConfOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteGCMConfOptions, "deleteGCMConfOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "applicationId": *deleteGCMConfOptions.ApplicationID, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = pushService.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(pushService.Service.Options.URL, `/apps/{applicationId}/settings/gcmConf`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteGCMConfOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("push_service", "V1", "DeleteGCMConf") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + if deleteGCMConfOptions.AcceptLanguage != nil { + builder.AddHeader("Accept-Language", fmt.Sprint(*deleteGCMConfOptions.AcceptLanguage)) + } + if deleteGCMConfOptions.AppSecret != nil { + builder.AddHeader("appSecret", fmt.Sprint(*deleteGCMConfOptions.AppSecret)) + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = pushService.Service.Request(request, nil) + + return +} + +// GetWebpushServerKey : Get the Web Push Server Key +// Retrieves Web Push Server Key (VAPID). +func (pushService *PushServiceV1) GetWebpushServerKey(getWebpushServerKeyOptions *GetWebpushServerKeyOptions) (result *ApplicationServerKeyModel, response *core.DetailedResponse, err error) { + return pushService.GetWebpushServerKeyWithContext(context.Background(), getWebpushServerKeyOptions) +} + +// GetWebpushServerKeyWithContext is an alternate form of the GetWebpushServerKey method which supports a Context parameter +func (pushService *PushServiceV1) GetWebpushServerKeyWithContext(ctx context.Context, getWebpushServerKeyOptions *GetWebpushServerKeyOptions) (result *ApplicationServerKeyModel, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getWebpushServerKeyOptions, "getWebpushServerKeyOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getWebpushServerKeyOptions, "getWebpushServerKeyOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "applicationId": *getWebpushServerKeyOptions.ApplicationID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = pushService.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(pushService.Service.Options.URL, `/apps/{applicationId}/settings/webpushServerKey`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getWebpushServerKeyOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("push_service", "V1", "GetWebpushServerKey") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + if getWebpushServerKeyOptions.ClientSecret != nil { + builder.AddHeader("clientSecret", fmt.Sprint(*getWebpushServerKeyOptions.ClientSecret)) + } + if getWebpushServerKeyOptions.AcceptLanguage != nil { + builder.AddHeader("Accept-Language", fmt.Sprint(*getWebpushServerKeyOptions.AcceptLanguage)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = pushService.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalApplicationServerKeyModel) + if err != nil { + return + } + response.Result = result + + return +} + +// GetSafariWebConf : Get the Safari Push Notifications settings +// Retrieves Safari Push Notifications settings for the application. +func (pushService *PushServiceV1) GetSafariWebConf(getSafariWebConfOptions *GetSafariWebConfOptions) (result *SafariCertUploadResponse, response *core.DetailedResponse, err error) { + return pushService.GetSafariWebConfWithContext(context.Background(), getSafariWebConfOptions) +} + +// GetSafariWebConfWithContext is an alternate form of the GetSafariWebConf method which supports a Context parameter +func (pushService *PushServiceV1) GetSafariWebConfWithContext(ctx context.Context, getSafariWebConfOptions *GetSafariWebConfOptions) (result *SafariCertUploadResponse, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getSafariWebConfOptions, "getSafariWebConfOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getSafariWebConfOptions, "getSafariWebConfOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "applicationId": *getSafariWebConfOptions.ApplicationID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = pushService.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(pushService.Service.Options.URL, `/apps/{applicationId}/settings/safariWebConf`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getSafariWebConfOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("push_service", "V1", "GetSafariWebConf") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + if getSafariWebConfOptions.AcceptLanguage != nil { + builder.AddHeader("Accept-Language", fmt.Sprint(*getSafariWebConfOptions.AcceptLanguage)) + } + if getSafariWebConfOptions.AppSecret != nil { + builder.AddHeader("appSecret", fmt.Sprint(*getSafariWebConfOptions.AppSecret)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = pushService.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSafariCertUploadResponse) + if err != nil { + return + } + response.Result = result + + return +} + +// SaveSafariWebConf : Updates Safari Push Notifications settings +// Uploads Safari Push Notifications settings to the application referenced by the applicationId. The settings include +// providing a web push certificate and other credentials. If none of the icons are provided then default icons will be +// used. These images populate the icons displayed to the user in the permission prompt, Notification Center and the +// notification itself. +func (pushService *PushServiceV1) SaveSafariWebConf(saveSafariWebConfOptions *SaveSafariWebConfOptions) (result *SafariCertUploadResponse, response *core.DetailedResponse, err error) { + return pushService.SaveSafariWebConfWithContext(context.Background(), saveSafariWebConfOptions) +} + +// SaveSafariWebConfWithContext is an alternate form of the SaveSafariWebConf method which supports a Context parameter +func (pushService *PushServiceV1) SaveSafariWebConfWithContext(ctx context.Context, saveSafariWebConfOptions *SaveSafariWebConfOptions) (result *SafariCertUploadResponse, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(saveSafariWebConfOptions, "saveSafariWebConfOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(saveSafariWebConfOptions, "saveSafariWebConfOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "applicationId": *saveSafariWebConfOptions.ApplicationID, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = pushService.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(pushService.Service.Options.URL, `/apps/{applicationId}/settings/safariWebConf`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range saveSafariWebConfOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("push_service", "V1", "SaveSafariWebConf") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + if saveSafariWebConfOptions.AcceptLanguage != nil { + builder.AddHeader("Accept-Language", fmt.Sprint(*saveSafariWebConfOptions.AcceptLanguage)) + } + if saveSafariWebConfOptions.AppSecret != nil { + builder.AddHeader("appSecret", fmt.Sprint(*saveSafariWebConfOptions.AppSecret)) + } + + builder.AddFormData("password", "", "", fmt.Sprint(*saveSafariWebConfOptions.Password)) + builder.AddFormData("certificate", fmt.Sprintf("%s.p12", stringWithCharset(24)), + core.StringNilMapper(saveSafariWebConfOptions.CertificateContentType), saveSafariWebConfOptions.Certificate) + builder.AddFormData("websiteName", "", "", fmt.Sprint(*saveSafariWebConfOptions.WebsiteName)) + builder.AddFormData("urlFormatString", "", "", fmt.Sprint(*saveSafariWebConfOptions.UrlFormatString)) + builder.AddFormData("websitePushID", "", "", fmt.Sprint(*saveSafariWebConfOptions.WebsitePushID)) + builder.AddFormData("webSiteUrl", "", "", fmt.Sprint(*saveSafariWebConfOptions.WebSiteURL)) + if saveSafariWebConfOptions.Icon16x16 != nil { + builder.AddFormData("icon_16x16", fmt.Sprintf("%s.p12", stringWithCharset(12)), + core.StringNilMapper(saveSafariWebConfOptions.Icon16x16ContentType), saveSafariWebConfOptions.Icon16x16) + } + if saveSafariWebConfOptions.Icon16x162x != nil { + builder.AddFormData("icon_16x16@2x", fmt.Sprintf("%s.p12", stringWithCharset(12)), + core.StringNilMapper(saveSafariWebConfOptions.Icon16x162xContentType), saveSafariWebConfOptions.Icon16x162x) + } + if saveSafariWebConfOptions.Icon32x32 != nil { + builder.AddFormData("icon_32x32", fmt.Sprintf("%s.p12", stringWithCharset(12)), + core.StringNilMapper(saveSafariWebConfOptions.Icon32x32ContentType), saveSafariWebConfOptions.Icon32x32) + } + if saveSafariWebConfOptions.Icon32x322x != nil { + builder.AddFormData("icon_32x32@2x", fmt.Sprintf("%s.p12", stringWithCharset(12)), + core.StringNilMapper(saveSafariWebConfOptions.Icon32x322xContentType), saveSafariWebConfOptions.Icon32x322x) + } + if saveSafariWebConfOptions.Icon128x128 != nil { + builder.AddFormData("icon_128x128", fmt.Sprintf("%s.p12", stringWithCharset(12)), + core.StringNilMapper(saveSafariWebConfOptions.Icon128x128ContentType), saveSafariWebConfOptions.Icon128x128) + } + if saveSafariWebConfOptions.Icon128x1282x != nil { + builder.AddFormData("icon_128x128@2x", fmt.Sprintf("%s.p12", stringWithCharset(12)), + core.StringNilMapper(saveSafariWebConfOptions.Icon128x1282xContentType), saveSafariWebConfOptions.Icon128x1282x) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = pushService.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSafariCertUploadResponse) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteSafariWebConf : Delete Safari Push Notifications settings +// Deletes the Safari Push Notifications settings of the application, which is referenced by the applicationId +// parameter. +func (pushService *PushServiceV1) DeleteSafariWebConf(deleteSafariWebConfOptions *DeleteSafariWebConfOptions) (response *core.DetailedResponse, err error) { + return pushService.DeleteSafariWebConfWithContext(context.Background(), deleteSafariWebConfOptions) +} + +// DeleteSafariWebConfWithContext is an alternate form of the DeleteSafariWebConf method which supports a Context parameter +func (pushService *PushServiceV1) DeleteSafariWebConfWithContext(ctx context.Context, deleteSafariWebConfOptions *DeleteSafariWebConfOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteSafariWebConfOptions, "deleteSafariWebConfOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteSafariWebConfOptions, "deleteSafariWebConfOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "applicationId": *deleteSafariWebConfOptions.ApplicationID, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = pushService.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(pushService.Service.Options.URL, `/apps/{applicationId}/settings/safariWebConf`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteSafariWebConfOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("push_service", "V1", "DeleteSafariWebConf") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + if deleteSafariWebConfOptions.AcceptLanguage != nil { + builder.AddHeader("Accept-Language", fmt.Sprint(*deleteSafariWebConfOptions.AcceptLanguage)) + } + if deleteSafariWebConfOptions.AppSecret != nil { + builder.AddHeader("appSecret", fmt.Sprint(*deleteSafariWebConfOptions.AppSecret)) + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = pushService.Service.Request(request, nil) + + return +} + +// GetGcmConfPublic : Get the GCM senderId +// Retrieves GCM senderId only for the application. +func (pushService *PushServiceV1) GetGcmConfPublic(getGcmConfPublicOptions *GetGcmConfPublicOptions) (result *GCMCredendialsPublicModel, response *core.DetailedResponse, err error) { + return pushService.GetGcmConfPublicWithContext(context.Background(), getGcmConfPublicOptions) +} + +// GetGcmConfPublicWithContext is an alternate form of the GetGcmConfPublic method which supports a Context parameter +func (pushService *PushServiceV1) GetGcmConfPublicWithContext(ctx context.Context, getGcmConfPublicOptions *GetGcmConfPublicOptions) (result *GCMCredendialsPublicModel, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getGcmConfPublicOptions, "getGcmConfPublicOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getGcmConfPublicOptions, "getGcmConfPublicOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "applicationId": *getGcmConfPublicOptions.ApplicationID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = pushService.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(pushService.Service.Options.URL, `/apps/{applicationId}/settings/gcmConfPublic`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getGcmConfPublicOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("push_service", "V1", "GetGcmConfPublic") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + if getGcmConfPublicOptions.ClientSecret != nil { + builder.AddHeader("clientSecret", fmt.Sprint(*getGcmConfPublicOptions.ClientSecret)) + } + if getGcmConfPublicOptions.AcceptLanguage != nil { + builder.AddHeader("Accept-Language", fmt.Sprint(*getGcmConfPublicOptions.AcceptLanguage)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = pushService.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalGCMCredendialsPublicModel) + if err != nil { + return + } + response.Result = result + + return +} + +// GetChromeWebConf : Get the Chrome WebPush settings +// Retrieves Chrome WebPush credentials for the application. +func (pushService *PushServiceV1) GetChromeWebConf(getChromeWebConfOptions *GetChromeWebConfOptions) (result *ChromeWebPushCredendialsModel, response *core.DetailedResponse, err error) { + return pushService.GetChromeWebConfWithContext(context.Background(), getChromeWebConfOptions) +} + +// GetChromeWebConfWithContext is an alternate form of the GetChromeWebConf method which supports a Context parameter +func (pushService *PushServiceV1) GetChromeWebConfWithContext(ctx context.Context, getChromeWebConfOptions *GetChromeWebConfOptions) (result *ChromeWebPushCredendialsModel, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getChromeWebConfOptions, "getChromeWebConfOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getChromeWebConfOptions, "getChromeWebConfOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "applicationId": *getChromeWebConfOptions.ApplicationID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = pushService.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(pushService.Service.Options.URL, `/apps/{applicationId}/settings/chromeWebConf`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getChromeWebConfOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("push_service", "V1", "GetChromeWebConf") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + if getChromeWebConfOptions.AcceptLanguage != nil { + builder.AddHeader("Accept-Language", fmt.Sprint(*getChromeWebConfOptions.AcceptLanguage)) + } + if getChromeWebConfOptions.AppSecret != nil { + builder.AddHeader("appSecret", fmt.Sprint(*getChromeWebConfOptions.AppSecret)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = pushService.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalChromeWebPushCredendialsModel) + if err != nil { + return + } + response.Result = result + + return +} + +// SaveChromeWebConf : Updates Chrome WebPush settings +// Updates the Chrome WebPush credentials of the application referenced by the applicationId. +func (pushService *PushServiceV1) SaveChromeWebConf(saveChromeWebConfOptions *SaveChromeWebConfOptions) (result *ChromeWebPushCredendialsModel, response *core.DetailedResponse, err error) { + return pushService.SaveChromeWebConfWithContext(context.Background(), saveChromeWebConfOptions) +} + +// SaveChromeWebConfWithContext is an alternate form of the SaveChromeWebConf method which supports a Context parameter +func (pushService *PushServiceV1) SaveChromeWebConfWithContext(ctx context.Context, saveChromeWebConfOptions *SaveChromeWebConfOptions) (result *ChromeWebPushCredendialsModel, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(saveChromeWebConfOptions, "saveChromeWebConfOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(saveChromeWebConfOptions, "saveChromeWebConfOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "applicationId": *saveChromeWebConfOptions.ApplicationID, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = pushService.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(pushService.Service.Options.URL, `/apps/{applicationId}/settings/chromeWebConf`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range saveChromeWebConfOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("push_service", "V1", "SaveChromeWebConf") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + if saveChromeWebConfOptions.AcceptLanguage != nil { + builder.AddHeader("Accept-Language", fmt.Sprint(*saveChromeWebConfOptions.AcceptLanguage)) + } + if saveChromeWebConfOptions.AppSecret != nil { + builder.AddHeader("appSecret", fmt.Sprint(*saveChromeWebConfOptions.AppSecret)) + } + + body := make(map[string]interface{}) + if saveChromeWebConfOptions.ApiKey != nil { + body["apiKey"] = saveChromeWebConfOptions.ApiKey + } + if saveChromeWebConfOptions.WebSiteURL != nil { + body["webSiteUrl"] = saveChromeWebConfOptions.WebSiteURL + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = pushService.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalChromeWebPushCredendialsModel) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteChromeWebConf : Delete Chrome WebPush Settings +// Deletes the Chrome WebPush credentials of the application, which is referenced by the applicationId parameter. +func (pushService *PushServiceV1) DeleteChromeWebConf(deleteChromeWebConfOptions *DeleteChromeWebConfOptions) (response *core.DetailedResponse, err error) { + return pushService.DeleteChromeWebConfWithContext(context.Background(), deleteChromeWebConfOptions) +} + +// DeleteChromeWebConfWithContext is an alternate form of the DeleteChromeWebConf method which supports a Context parameter +func (pushService *PushServiceV1) DeleteChromeWebConfWithContext(ctx context.Context, deleteChromeWebConfOptions *DeleteChromeWebConfOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteChromeWebConfOptions, "deleteChromeWebConfOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteChromeWebConfOptions, "deleteChromeWebConfOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "applicationId": *deleteChromeWebConfOptions.ApplicationID, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = pushService.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(pushService.Service.Options.URL, `/apps/{applicationId}/settings/chromeWebConf`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteChromeWebConfOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("push_service", "V1", "DeleteChromeWebConf") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + if deleteChromeWebConfOptions.AcceptLanguage != nil { + builder.AddHeader("Accept-Language", fmt.Sprint(*deleteChromeWebConfOptions.AcceptLanguage)) + } + if deleteChromeWebConfOptions.AppSecret != nil { + builder.AddHeader("appSecret", fmt.Sprint(*deleteChromeWebConfOptions.AppSecret)) + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = pushService.Service.Request(request, nil) + + return +} + +// GetFirefoxWebConf : Get the Firefox WebPush settings +// Retrieves Firefox WebPush credentials for the application. +func (pushService *PushServiceV1) GetFirefoxWebConf(getFirefoxWebConfOptions *GetFirefoxWebConfOptions) (result *FirefoxWebPushCredendialsModel, response *core.DetailedResponse, err error) { + return pushService.GetFirefoxWebConfWithContext(context.Background(), getFirefoxWebConfOptions) +} + +// GetFirefoxWebConfWithContext is an alternate form of the GetFirefoxWebConf method which supports a Context parameter +func (pushService *PushServiceV1) GetFirefoxWebConfWithContext(ctx context.Context, getFirefoxWebConfOptions *GetFirefoxWebConfOptions) (result *FirefoxWebPushCredendialsModel, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getFirefoxWebConfOptions, "getFirefoxWebConfOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getFirefoxWebConfOptions, "getFirefoxWebConfOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "applicationId": *getFirefoxWebConfOptions.ApplicationID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = pushService.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(pushService.Service.Options.URL, `/apps/{applicationId}/settings/firefoxWebConf`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getFirefoxWebConfOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("push_service", "V1", "GetFirefoxWebConf") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + if getFirefoxWebConfOptions.AcceptLanguage != nil { + builder.AddHeader("Accept-Language", fmt.Sprint(*getFirefoxWebConfOptions.AcceptLanguage)) + } + if getFirefoxWebConfOptions.AppSecret != nil { + builder.AddHeader("appSecret", fmt.Sprint(*getFirefoxWebConfOptions.AppSecret)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = pushService.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalFirefoxWebPushCredendialsModel) + if err != nil { + return + } + response.Result = result + + return +} + +// SaveFirefoxWebConf : Updates Firefox WebPush settings +// Updates the Firefox WebPush credentials of the application referenced by the applicationId. +func (pushService *PushServiceV1) SaveFirefoxWebConf(saveFirefoxWebConfOptions *SaveFirefoxWebConfOptions) (result *FirefoxWebPushCredendialsModel, response *core.DetailedResponse, err error) { + return pushService.SaveFirefoxWebConfWithContext(context.Background(), saveFirefoxWebConfOptions) +} + +// SaveFirefoxWebConfWithContext is an alternate form of the SaveFirefoxWebConf method which supports a Context parameter +func (pushService *PushServiceV1) SaveFirefoxWebConfWithContext(ctx context.Context, saveFirefoxWebConfOptions *SaveFirefoxWebConfOptions) (result *FirefoxWebPushCredendialsModel, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(saveFirefoxWebConfOptions, "saveFirefoxWebConfOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(saveFirefoxWebConfOptions, "saveFirefoxWebConfOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "applicationId": *saveFirefoxWebConfOptions.ApplicationID, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = pushService.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(pushService.Service.Options.URL, `/apps/{applicationId}/settings/firefoxWebConf`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range saveFirefoxWebConfOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("push_service", "V1", "SaveFirefoxWebConf") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + if saveFirefoxWebConfOptions.AcceptLanguage != nil { + builder.AddHeader("Accept-Language", fmt.Sprint(*saveFirefoxWebConfOptions.AcceptLanguage)) + } + if saveFirefoxWebConfOptions.AppSecret != nil { + builder.AddHeader("appSecret", fmt.Sprint(*saveFirefoxWebConfOptions.AppSecret)) + } + + body := make(map[string]interface{}) + if saveFirefoxWebConfOptions.WebSiteURL != nil { + body["webSiteUrl"] = saveFirefoxWebConfOptions.WebSiteURL + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = pushService.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalFirefoxWebPushCredendialsModel) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteFirefoxWebConf : Delete Firefox WebPush Settings +// Deletes the Firefox WebPush credentials of the application, which is referenced by the applicationId parameter. +func (pushService *PushServiceV1) DeleteFirefoxWebConf(deleteFirefoxWebConfOptions *DeleteFirefoxWebConfOptions) (response *core.DetailedResponse, err error) { + return pushService.DeleteFirefoxWebConfWithContext(context.Background(), deleteFirefoxWebConfOptions) +} + +// DeleteFirefoxWebConfWithContext is an alternate form of the DeleteFirefoxWebConf method which supports a Context parameter +func (pushService *PushServiceV1) DeleteFirefoxWebConfWithContext(ctx context.Context, deleteFirefoxWebConfOptions *DeleteFirefoxWebConfOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteFirefoxWebConfOptions, "deleteFirefoxWebConfOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteFirefoxWebConfOptions, "deleteFirefoxWebConfOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "applicationId": *deleteFirefoxWebConfOptions.ApplicationID, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = pushService.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(pushService.Service.Options.URL, `/apps/{applicationId}/settings/firefoxWebConf`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteFirefoxWebConfOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("push_service", "V1", "DeleteFirefoxWebConf") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + if deleteFirefoxWebConfOptions.AcceptLanguage != nil { + builder.AddHeader("Accept-Language", fmt.Sprint(*deleteFirefoxWebConfOptions.AcceptLanguage)) + } + if deleteFirefoxWebConfOptions.AppSecret != nil { + builder.AddHeader("appSecret", fmt.Sprint(*deleteFirefoxWebConfOptions.AppSecret)) + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = pushService.Service.Request(request, nil) + + return +} + +// GetChromeAppExtConf : Get the Chorme Apps-Extentions Push credentials settings +// Retrieves Chorme Apps-Extentions Push credentials settings for the application. +func (pushService *PushServiceV1) GetChromeAppExtConf(getChromeAppExtConfOptions *GetChromeAppExtConfOptions) (result *GCMCredendialsModel, response *core.DetailedResponse, err error) { + return pushService.GetChromeAppExtConfWithContext(context.Background(), getChromeAppExtConfOptions) +} + +// GetChromeAppExtConfWithContext is an alternate form of the GetChromeAppExtConf method which supports a Context parameter +func (pushService *PushServiceV1) GetChromeAppExtConfWithContext(ctx context.Context, getChromeAppExtConfOptions *GetChromeAppExtConfOptions) (result *GCMCredendialsModel, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getChromeAppExtConfOptions, "getChromeAppExtConfOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getChromeAppExtConfOptions, "getChromeAppExtConfOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "applicationId": *getChromeAppExtConfOptions.ApplicationID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = pushService.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(pushService.Service.Options.URL, `/apps/{applicationId}/settings/chromeAppExtConf`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getChromeAppExtConfOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("push_service", "V1", "GetChromeAppExtConf") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + if getChromeAppExtConfOptions.AcceptLanguage != nil { + builder.AddHeader("Accept-Language", fmt.Sprint(*getChromeAppExtConfOptions.AcceptLanguage)) + } + if getChromeAppExtConfOptions.AppSecret != nil { + builder.AddHeader("appSecret", fmt.Sprint(*getChromeAppExtConfOptions.AppSecret)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = pushService.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalGCMCredendialsModel) + if err != nil { + return + } + response.Result = result + + return +} + +// SaveChromeAppExtConf : Updates Chorme Apps-Extentions Push credentials settings +// Updates the Chorme Apps-Extentions Push credentials settings of the application referenced by the applicationId. +func (pushService *PushServiceV1) SaveChromeAppExtConf(saveChromeAppExtConfOptions *SaveChromeAppExtConfOptions) (result *GCMCredendialsModel, response *core.DetailedResponse, err error) { + return pushService.SaveChromeAppExtConfWithContext(context.Background(), saveChromeAppExtConfOptions) +} + +// SaveChromeAppExtConfWithContext is an alternate form of the SaveChromeAppExtConf method which supports a Context parameter +func (pushService *PushServiceV1) SaveChromeAppExtConfWithContext(ctx context.Context, saveChromeAppExtConfOptions *SaveChromeAppExtConfOptions) (result *GCMCredendialsModel, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(saveChromeAppExtConfOptions, "saveChromeAppExtConfOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(saveChromeAppExtConfOptions, "saveChromeAppExtConfOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "applicationId": *saveChromeAppExtConfOptions.ApplicationID, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = pushService.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(pushService.Service.Options.URL, `/apps/{applicationId}/settings/chromeAppExtConf`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range saveChromeAppExtConfOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("push_service", "V1", "SaveChromeAppExtConf") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + if saveChromeAppExtConfOptions.AcceptLanguage != nil { + builder.AddHeader("Accept-Language", fmt.Sprint(*saveChromeAppExtConfOptions.AcceptLanguage)) + } + if saveChromeAppExtConfOptions.AppSecret != nil { + builder.AddHeader("appSecret", fmt.Sprint(*saveChromeAppExtConfOptions.AppSecret)) + } + + body := make(map[string]interface{}) + if saveChromeAppExtConfOptions.ApiKey != nil { + body["apiKey"] = saveChromeAppExtConfOptions.ApiKey + } + if saveChromeAppExtConfOptions.SenderID != nil { + body["senderId"] = saveChromeAppExtConfOptions.SenderID + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = pushService.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalGCMCredendialsModel) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteChromeAppExtConf : Delete Chorme Apps-Extentions Push credentials settings +// Deletes the push credentials settings for Chrome Apps-Extensions of the application, which is referenced by the +// applicationId parameter. +func (pushService *PushServiceV1) DeleteChromeAppExtConf(deleteChromeAppExtConfOptions *DeleteChromeAppExtConfOptions) (response *core.DetailedResponse, err error) { + return pushService.DeleteChromeAppExtConfWithContext(context.Background(), deleteChromeAppExtConfOptions) +} + +// DeleteChromeAppExtConfWithContext is an alternate form of the DeleteChromeAppExtConf method which supports a Context parameter +func (pushService *PushServiceV1) DeleteChromeAppExtConfWithContext(ctx context.Context, deleteChromeAppExtConfOptions *DeleteChromeAppExtConfOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteChromeAppExtConfOptions, "deleteChromeAppExtConfOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteChromeAppExtConfOptions, "deleteChromeAppExtConfOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "applicationId": *deleteChromeAppExtConfOptions.ApplicationID, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = pushService.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(pushService.Service.Options.URL, `/apps/{applicationId}/settings/chromeAppExtConf`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteChromeAppExtConfOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("push_service", "V1", "DeleteChromeAppExtConf") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + if deleteChromeAppExtConfOptions.AcceptLanguage != nil { + builder.AddHeader("Accept-Language", fmt.Sprint(*deleteChromeAppExtConfOptions.AcceptLanguage)) + } + if deleteChromeAppExtConfOptions.AppSecret != nil { + builder.AddHeader("appSecret", fmt.Sprint(*deleteChromeAppExtConfOptions.AppSecret)) + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = pushService.Service.Request(request, nil) + + return +} + +// GetChromeAppExtConfPublic : Get the GCM senderId for Chorme Apps-Extentions Push credentials +// Retrieves GCM senderId only for Chorme Apps-Extentions Push credentials of the application. +func (pushService *PushServiceV1) GetChromeAppExtConfPublic(getChromeAppExtConfPublicOptions *GetChromeAppExtConfPublicOptions) (result *GCMCredendialsPublicModel, response *core.DetailedResponse, err error) { + return pushService.GetChromeAppExtConfPublicWithContext(context.Background(), getChromeAppExtConfPublicOptions) +} + +// GetChromeAppExtConfPublicWithContext is an alternate form of the GetChromeAppExtConfPublic method which supports a Context parameter +func (pushService *PushServiceV1) GetChromeAppExtConfPublicWithContext(ctx context.Context, getChromeAppExtConfPublicOptions *GetChromeAppExtConfPublicOptions) (result *GCMCredendialsPublicModel, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getChromeAppExtConfPublicOptions, "getChromeAppExtConfPublicOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getChromeAppExtConfPublicOptions, "getChromeAppExtConfPublicOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "applicationId": *getChromeAppExtConfPublicOptions.ApplicationID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = pushService.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(pushService.Service.Options.URL, `/apps/{applicationId}/settings/chromeAppExtConfPublic`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getChromeAppExtConfPublicOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("push_service", "V1", "GetChromeAppExtConfPublic") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + if getChromeAppExtConfPublicOptions.ClientSecret != nil { + builder.AddHeader("clientSecret", fmt.Sprint(*getChromeAppExtConfPublicOptions.ClientSecret)) + } + if getChromeAppExtConfPublicOptions.AcceptLanguage != nil { + builder.AddHeader("Accept-Language", fmt.Sprint(*getChromeAppExtConfPublicOptions.AcceptLanguage)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = pushService.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalGCMCredendialsPublicModel) + if err != nil { + return + } + response.Result = result + + return +} + +// SendMessage : Send message with different options +// When the request to send the message is accepted, sends push notifications to the specified targets and returns HTTP +// return code 202. The sent messages are stored and would auto expire in 10 days. You would not be able to retrieve +// information about a message using GET, after this specified time limit. Available in server side frameworks. +func (pushService *PushServiceV1) SendMessage(sendMessageOptions *SendMessageOptions) (result *MessageResponseModel, response *core.DetailedResponse, err error) { + return pushService.SendMessageWithContext(context.Background(), sendMessageOptions) +} + +// SendMessageWithContext is an alternate form of the SendMessage method which supports a Context parameter +func (pushService *PushServiceV1) SendMessageWithContext(ctx context.Context, sendMessageOptions *SendMessageOptions) (result *MessageResponseModel, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(sendMessageOptions, "sendMessageOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(sendMessageOptions, "sendMessageOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "applicationId": *sendMessageOptions.ApplicationID, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = pushService.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(pushService.Service.Options.URL, `/apps/{applicationId}/messages`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range sendMessageOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("push_service", "V1", "SendMessage") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + if sendMessageOptions.AcceptLanguage != nil { + builder.AddHeader("Accept-Language", fmt.Sprint(*sendMessageOptions.AcceptLanguage)) + } + + body := make(map[string]interface{}) + if sendMessageOptions.Message != nil { + body["message"] = sendMessageOptions.Message + } + if sendMessageOptions.Settings != nil { + body["settings"] = sendMessageOptions.Settings + } + if sendMessageOptions.Validate != nil { + body["validate"] = sendMessageOptions.Validate + } + if sendMessageOptions.Target != nil { + body["target"] = sendMessageOptions.Target + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = pushService.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalMessageResponseModel) + if err != nil { + return + } + response.Result = result + + return +} + +// SendMessagesInBulk : Send Bulk Messages +// Send bulk messages with different options to be specified. The sent messages are stored and would auto expire in 10 +// days. You would not be able to retrieve information about a message using GET, after this specified time limit. +// Available in server side frameworks. +func (pushService *PushServiceV1) SendMessagesInBulk(sendMessagesInBulkOptions *SendMessagesInBulkOptions) (result *MessagesArrayModel, response *core.DetailedResponse, err error) { + return pushService.SendMessagesInBulkWithContext(context.Background(), sendMessagesInBulkOptions) +} + +// SendMessagesInBulkWithContext is an alternate form of the SendMessagesInBulk method which supports a Context parameter +func (pushService *PushServiceV1) SendMessagesInBulkWithContext(ctx context.Context, sendMessagesInBulkOptions *SendMessagesInBulkOptions) (result *MessagesArrayModel, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(sendMessagesInBulkOptions, "sendMessagesInBulkOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(sendMessagesInBulkOptions, "sendMessagesInBulkOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "applicationId": *sendMessagesInBulkOptions.ApplicationID, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = pushService.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(pushService.Service.Options.URL, `/apps/{applicationId}/messages/bulk`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range sendMessagesInBulkOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("push_service", "V1", "SendMessagesInBulk") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + if sendMessagesInBulkOptions.AcceptLanguage != nil { + builder.AddHeader("Accept-Language", fmt.Sprint(*sendMessagesInBulkOptions.AcceptLanguage)) + } + if sendMessagesInBulkOptions.AppSecret != nil { + builder.AddHeader("appSecret", fmt.Sprint(*sendMessagesInBulkOptions.AppSecret)) + } + + _, err = builder.SetBodyContentJSON(sendMessagesInBulkOptions.Body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = pushService.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalMessagesArrayModel) + if err != nil { + return + } + response.Result = result + + return +} + +// Apns : Settings specific to iOS platform. +type Apns struct { + // The number to display as the badge of the application icon. + Badge *int64 `json:"badge,omitempty"` + + // The category identifier to be used for the interactive push notifications. + InteractiveCategory *string `json:"interactiveCategory,omitempty"` + + // __Deprecated__. Use __'interactiveCategory'__ for interactive push notification. + Category *string `json:"category,omitempty"` + + // The title for the Action key. + IosActionKey *string `json:"iosActionKey,omitempty"` + + // Custom JSON payload that will be sent as part of the notification message. + Payload interface{} `json:"payload,omitempty"` + + // The name of the sound file in the application bundle. The sound of this file is played as an alert. + Sound *string `json:"sound,omitempty"` + + // The key to a title string in the Localizable.strings file for the current localization. The key string can be + // formatted with %@ and %n$@ specifiers to take the variables specified in the titleLocArgs array. + TitleLocKey *string `json:"titleLocKey,omitempty"` + + // A key to an alert-message string in a Localizabl.strings file for the current localization (which is set by the + // user’s language preference). + // The key string can be formatted with %@ and %n$@ specifiers to take the variables specified in the locArgs array. + LocKey *string `json:"locKey,omitempty"` + + // The filename of an image file in the app bundle, with or without the filename extension. The image is used as the + // launch image when users tap the action button or move the action slider. + LaunchImage *string `json:"launchImage,omitempty"` + + // Variable string values to appear in place of the format specifiers in title-loc-key. + TitleLocArgs []string `json:"titleLocArgs,omitempty"` + + // Variable string values to appear in place of the format specifiers in locKey. + LocArgs []string `json:"locArgs,omitempty"` + + // The title of Rich Push notifications (Supported only on iOS 10 and above). + Title *string `json:"title,omitempty"` + + // The subtitle of the Rich Notifications.(Supported only on iOS 10 and above). + Subtitle *string `json:"subtitle,omitempty"` + + // The link to the iOS notifications media (video, audio, GIF, images - Supported only on iOS 10 and above). + AttachmentURL *string `json:"attachmentUrl,omitempty"` + + Type *string `json:"type,omitempty"` + + // Multiple notifications with the same collapse identifier are displayed to the user as a single notification. + ApnsCollapseID *string `json:"apnsCollapseId,omitempty"` + + // An app-specific identifier for grouping related notifications. This value corresponds to the threadIdentifier + // property in the UNNotificationContent object. + ApnsThreadID *string `json:"apnsThreadId,omitempty"` + + // The string the notification adds to the category’s summary format string. + ApnsGroupSummaryArg *string `json:"apnsGroupSummaryArg,omitempty"` + + // The number of items the notification adds to the category’s summary format string. + ApnsGroupSummaryArgCount *int64 `json:"apnsGroupSummaryArgCount,omitempty"` +} + +// Constants associated with the Apns.Type property. +const ( + Apns_Type_Default = "DEFAULT" + Apns_Type_Mixed = "MIXED" + Apns_Type_Silent = "SILENT" +) + +// UnmarshalApns unmarshals an instance of Apns from the specified map of raw messages. +func UnmarshalApns(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Apns) + err = core.UnmarshalPrimitive(m, "badge", &obj.Badge) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "interactiveCategory", &obj.InteractiveCategory) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "category", &obj.Category) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "iosActionKey", &obj.IosActionKey) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "payload", &obj.Payload) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "sound", &obj.Sound) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "titleLocKey", &obj.TitleLocKey) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "locKey", &obj.LocKey) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "launchImage", &obj.LaunchImage) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "titleLocArgs", &obj.TitleLocArgs) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "locArgs", &obj.LocArgs) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "title", &obj.Title) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "subtitle", &obj.Subtitle) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "attachmentUrl", &obj.AttachmentURL) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "apnsCollapseId", &obj.ApnsCollapseID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "apnsThreadId", &obj.ApnsThreadID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "apnsGroupSummaryArg", &obj.ApnsGroupSummaryArg) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "apnsGroupSummaryArgCount", &obj.ApnsGroupSummaryArgCount) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ApplicationServerKeyModel : ApplicationServerKeyModel struct +type ApplicationServerKeyModel struct { + // Application Server key for Web Push Identification. + WebpushServerKey *string `json:"webpushServerKey" validate:"required"` +} + +// UnmarshalApplicationServerKeyModel unmarshals an instance of ApplicationServerKeyModel from the specified map of raw messages. +func UnmarshalApplicationServerKeyModel(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ApplicationServerKeyModel) + err = core.UnmarshalPrimitive(m, "webpushServerKey", &obj.WebpushServerKey) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ChromeAppExt : Web Push Notifications settings specific to Chrome browser. +type ChromeAppExt struct { + // Dozed devices to display only the latest notification and discard old low priority notifications. + CollapseKey *string `json:"collapseKey,omitempty"` + + // When this parameter is set to true, it indicates that the message should not be sent until the device becomes + // active. + DelayWhileIdle *bool `json:"delayWhileIdle,omitempty"` + + // Specifies the title to be set for the WebPush Notification. + Title *string `json:"title,omitempty"` + + // The URL of the icon to be set for the WebPush Notification. + IconURL *string `json:"iconUrl,omitempty"` + + // This parameter specifies how long (in seconds) the message should be kept in GCM storage if the device is offline. + TimeToLive *int64 `json:"timeToLive,omitempty"` + + // Custom JSON payload that will be sent as part of the notification message. + Payload *string `json:"payload,omitempty"` +} + +// UnmarshalChromeAppExt unmarshals an instance of ChromeAppExt from the specified map of raw messages. +func UnmarshalChromeAppExt(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ChromeAppExt) + err = core.UnmarshalPrimitive(m, "collapseKey", &obj.CollapseKey) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "delayWhileIdle", &obj.DelayWhileIdle) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "title", &obj.Title) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "iconUrl", &obj.IconURL) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "timeToLive", &obj.TimeToLive) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "payload", &obj.Payload) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ChromeWeb : Web Push Notifications settings specific to Chrome browser. +type ChromeWeb struct { + // Specifies the title to be set for the WebPush Notification. + Title *string `json:"title,omitempty"` + + // The URL of the icon to be set for the WebPush Notification. + IconURL *string `json:"iconUrl,omitempty"` + + // This parameter specifies how long (in seconds) the message should be kept in GCM storage if the device is offline. + TimeToLive *int64 `json:"timeToLive,omitempty"` + + // Custom JSON payload that will be sent as part of the + // notification message. + Payload *string `json:"payload,omitempty"` +} + +// UnmarshalChromeWeb unmarshals an instance of ChromeWeb from the specified map of raw messages. +func UnmarshalChromeWeb(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ChromeWeb) + err = core.UnmarshalPrimitive(m, "title", &obj.Title) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "iconUrl", &obj.IconURL) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "timeToLive", &obj.TimeToLive) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "payload", &obj.Payload) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ChromeWebPushCredendialsModel : ChromeWebPushCredendialsModel struct +type ChromeWebPushCredendialsModel struct { + // An API key that gives the push service an authorized access to Google services that is used for Chrome Web Push. + ApiKey *string `json:"apiKey" validate:"required"` + + // The URL of the WebSite / WebApp that should be permitted to subscribe to WebPush. + WebSiteURL *string `json:"webSiteUrl" validate:"required"` +} + +// NewChromeWebPushCredendialsModel : Instantiate ChromeWebPushCredendialsModel (Generic Model Constructor) +func (*PushServiceV1) NewChromeWebPushCredendialsModel(apiKey string, webSiteURL string) (model *ChromeWebPushCredendialsModel, err error) { + model = &ChromeWebPushCredendialsModel{ + ApiKey: core.StringPtr(apiKey), + WebSiteURL: core.StringPtr(webSiteURL), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalChromeWebPushCredendialsModel unmarshals an instance of ChromeWebPushCredendialsModel from the specified map of raw messages. +func UnmarshalChromeWebPushCredendialsModel(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ChromeWebPushCredendialsModel) + err = core.UnmarshalPrimitive(m, "apiKey", &obj.ApiKey) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "webSiteUrl", &obj.WebSiteURL) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// DeleteApnsConfOptions : The DeleteApnsConf options. +type DeleteApnsConfOptions struct { + // Unique ID of the application using the push service. + ApplicationID *string `validate:"required,ne="` + + // The preferred language to use for error messages. + AcceptLanguage *string + + // Deprecated, use Authorization instead. + AppSecret *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteApnsConfOptions : Instantiate DeleteApnsConfOptions +func (*PushServiceV1) NewDeleteApnsConfOptions(applicationID string) *DeleteApnsConfOptions { + return &DeleteApnsConfOptions{ + ApplicationID: core.StringPtr(applicationID), + } +} + +// SetApplicationID : Allow user to set ApplicationID +func (options *DeleteApnsConfOptions) SetApplicationID(applicationID string) *DeleteApnsConfOptions { + options.ApplicationID = core.StringPtr(applicationID) + return options +} + +// SetAcceptLanguage : Allow user to set AcceptLanguage +func (options *DeleteApnsConfOptions) SetAcceptLanguage(acceptLanguage string) *DeleteApnsConfOptions { + options.AcceptLanguage = core.StringPtr(acceptLanguage) + return options +} + +// SetAppSecret : Allow user to set AppSecret +func (options *DeleteApnsConfOptions) SetAppSecret(appSecret string) *DeleteApnsConfOptions { + options.AppSecret = core.StringPtr(appSecret) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteApnsConfOptions) SetHeaders(param map[string]string) *DeleteApnsConfOptions { + options.Headers = param + return options +} + +// DeleteChromeAppExtConfOptions : The DeleteChromeAppExtConf options. +type DeleteChromeAppExtConfOptions struct { + // Unique ID of the application using the push service. + ApplicationID *string `validate:"required,ne="` + + // The preferred language to use for error messages. + AcceptLanguage *string + + // Deprecated, use Authorization instead. + AppSecret *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteChromeAppExtConfOptions : Instantiate DeleteChromeAppExtConfOptions +func (*PushServiceV1) NewDeleteChromeAppExtConfOptions(applicationID string) *DeleteChromeAppExtConfOptions { + return &DeleteChromeAppExtConfOptions{ + ApplicationID: core.StringPtr(applicationID), + } +} + +// SetApplicationID : Allow user to set ApplicationID +func (options *DeleteChromeAppExtConfOptions) SetApplicationID(applicationID string) *DeleteChromeAppExtConfOptions { + options.ApplicationID = core.StringPtr(applicationID) + return options +} + +// SetAcceptLanguage : Allow user to set AcceptLanguage +func (options *DeleteChromeAppExtConfOptions) SetAcceptLanguage(acceptLanguage string) *DeleteChromeAppExtConfOptions { + options.AcceptLanguage = core.StringPtr(acceptLanguage) + return options +} + +// SetAppSecret : Allow user to set AppSecret +func (options *DeleteChromeAppExtConfOptions) SetAppSecret(appSecret string) *DeleteChromeAppExtConfOptions { + options.AppSecret = core.StringPtr(appSecret) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteChromeAppExtConfOptions) SetHeaders(param map[string]string) *DeleteChromeAppExtConfOptions { + options.Headers = param + return options +} + +// DeleteChromeWebConfOptions : The DeleteChromeWebConf options. +type DeleteChromeWebConfOptions struct { + // Unique ID of the application using the push service. + ApplicationID *string `validate:"required,ne="` + + // The preferred language to use for error messages. + AcceptLanguage *string + + // Deprecated, use Authorization instead. + AppSecret *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteChromeWebConfOptions : Instantiate DeleteChromeWebConfOptions +func (*PushServiceV1) NewDeleteChromeWebConfOptions(applicationID string) *DeleteChromeWebConfOptions { + return &DeleteChromeWebConfOptions{ + ApplicationID: core.StringPtr(applicationID), + } +} + +// SetApplicationID : Allow user to set ApplicationID +func (options *DeleteChromeWebConfOptions) SetApplicationID(applicationID string) *DeleteChromeWebConfOptions { + options.ApplicationID = core.StringPtr(applicationID) + return options +} + +// SetAcceptLanguage : Allow user to set AcceptLanguage +func (options *DeleteChromeWebConfOptions) SetAcceptLanguage(acceptLanguage string) *DeleteChromeWebConfOptions { + options.AcceptLanguage = core.StringPtr(acceptLanguage) + return options +} + +// SetAppSecret : Allow user to set AppSecret +func (options *DeleteChromeWebConfOptions) SetAppSecret(appSecret string) *DeleteChromeWebConfOptions { + options.AppSecret = core.StringPtr(appSecret) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteChromeWebConfOptions) SetHeaders(param map[string]string) *DeleteChromeWebConfOptions { + options.Headers = param + return options +} + +// DeleteFirefoxWebConfOptions : The DeleteFirefoxWebConf options. +type DeleteFirefoxWebConfOptions struct { + // Unique ID of the application using the push service. + ApplicationID *string `validate:"required,ne="` + + // The preferred language to use for error messages. + AcceptLanguage *string + + // Deprecated, use Authorization instead. + AppSecret *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteFirefoxWebConfOptions : Instantiate DeleteFirefoxWebConfOptions +func (*PushServiceV1) NewDeleteFirefoxWebConfOptions(applicationID string) *DeleteFirefoxWebConfOptions { + return &DeleteFirefoxWebConfOptions{ + ApplicationID: core.StringPtr(applicationID), + } +} + +// SetApplicationID : Allow user to set ApplicationID +func (options *DeleteFirefoxWebConfOptions) SetApplicationID(applicationID string) *DeleteFirefoxWebConfOptions { + options.ApplicationID = core.StringPtr(applicationID) + return options +} + +// SetAcceptLanguage : Allow user to set AcceptLanguage +func (options *DeleteFirefoxWebConfOptions) SetAcceptLanguage(acceptLanguage string) *DeleteFirefoxWebConfOptions { + options.AcceptLanguage = core.StringPtr(acceptLanguage) + return options +} + +// SetAppSecret : Allow user to set AppSecret +func (options *DeleteFirefoxWebConfOptions) SetAppSecret(appSecret string) *DeleteFirefoxWebConfOptions { + options.AppSecret = core.StringPtr(appSecret) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteFirefoxWebConfOptions) SetHeaders(param map[string]string) *DeleteFirefoxWebConfOptions { + options.Headers = param + return options +} + +// DeleteGCMConfOptions : The DeleteGCMConf options. +type DeleteGCMConfOptions struct { + // Unique ID of the application using the push service. + ApplicationID *string `validate:"required,ne="` + + // The preferred language to use for error messages. + AcceptLanguage *string + + // Deprecated, use Authorization instead. + AppSecret *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteGCMConfOptions : Instantiate DeleteGCMConfOptions +func (*PushServiceV1) NewDeleteGCMConfOptions(applicationID string) *DeleteGCMConfOptions { + return &DeleteGCMConfOptions{ + ApplicationID: core.StringPtr(applicationID), + } +} + +// SetApplicationID : Allow user to set ApplicationID +func (options *DeleteGCMConfOptions) SetApplicationID(applicationID string) *DeleteGCMConfOptions { + options.ApplicationID = core.StringPtr(applicationID) + return options +} + +// SetAcceptLanguage : Allow user to set AcceptLanguage +func (options *DeleteGCMConfOptions) SetAcceptLanguage(acceptLanguage string) *DeleteGCMConfOptions { + options.AcceptLanguage = core.StringPtr(acceptLanguage) + return options +} + +// SetAppSecret : Allow user to set AppSecret +func (options *DeleteGCMConfOptions) SetAppSecret(appSecret string) *DeleteGCMConfOptions { + options.AppSecret = core.StringPtr(appSecret) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteGCMConfOptions) SetHeaders(param map[string]string) *DeleteGCMConfOptions { + options.Headers = param + return options +} + +// DeleteSafariWebConfOptions : The DeleteSafariWebConf options. +type DeleteSafariWebConfOptions struct { + // Unique ID of the application using the push service. + ApplicationID *string `validate:"required,ne="` + + // The preferred language to use for error messages. + AcceptLanguage *string + + // Deprecated, use Authorization instead. + AppSecret *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteSafariWebConfOptions : Instantiate DeleteSafariWebConfOptions +func (*PushServiceV1) NewDeleteSafariWebConfOptions(applicationID string) *DeleteSafariWebConfOptions { + return &DeleteSafariWebConfOptions{ + ApplicationID: core.StringPtr(applicationID), + } +} + +// SetApplicationID : Allow user to set ApplicationID +func (options *DeleteSafariWebConfOptions) SetApplicationID(applicationID string) *DeleteSafariWebConfOptions { + options.ApplicationID = core.StringPtr(applicationID) + return options +} + +// SetAcceptLanguage : Allow user to set AcceptLanguage +func (options *DeleteSafariWebConfOptions) SetAcceptLanguage(acceptLanguage string) *DeleteSafariWebConfOptions { + options.AcceptLanguage = core.StringPtr(acceptLanguage) + return options +} + +// SetAppSecret : Allow user to set AppSecret +func (options *DeleteSafariWebConfOptions) SetAppSecret(appSecret string) *DeleteSafariWebConfOptions { + options.AppSecret = core.StringPtr(appSecret) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteSafariWebConfOptions) SetHeaders(param map[string]string) *DeleteSafariWebConfOptions { + options.Headers = param + return options +} + +// FirefoxWeb : Web Push Notifications settings specific to Mozilla Firefox browser platforms. +type FirefoxWeb struct { + // Specifies the title to be set for the WebPush Notification. + Title *string `json:"title,omitempty"` + + // The URL of the icon to be set for the WebPush Notification. + IconURL *string `json:"iconUrl,omitempty"` + + // This parameter specifies how long (in seconds) the message should be kept in GCM storage if the device is offline. + TimeToLive *int64 `json:"timeToLive,omitempty"` + + // Custom JSON payload that will be sent as part of the notification message. + Payload *string `json:"payload,omitempty"` +} + +// UnmarshalFirefoxWeb unmarshals an instance of FirefoxWeb from the specified map of raw messages. +func UnmarshalFirefoxWeb(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(FirefoxWeb) + err = core.UnmarshalPrimitive(m, "title", &obj.Title) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "iconUrl", &obj.IconURL) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "timeToLive", &obj.TimeToLive) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "payload", &obj.Payload) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// FirefoxWebPushCredendialsModel : FirefoxWebPushCredendialsModel struct +type FirefoxWebPushCredendialsModel struct { + // The URL of the WebSite / WebApp that should be permitted to subscribe to WebPush. + WebSiteURL *string `json:"webSiteUrl" validate:"required"` +} + +// NewFirefoxWebPushCredendialsModel : Instantiate FirefoxWebPushCredendialsModel (Generic Model Constructor) +func (*PushServiceV1) NewFirefoxWebPushCredendialsModel(webSiteURL string) (model *FirefoxWebPushCredendialsModel, err error) { + model = &FirefoxWebPushCredendialsModel{ + WebSiteURL: core.StringPtr(webSiteURL), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalFirefoxWebPushCredendialsModel unmarshals an instance of FirefoxWebPushCredendialsModel from the specified map of raw messages. +func UnmarshalFirefoxWebPushCredendialsModel(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(FirefoxWebPushCredendialsModel) + err = core.UnmarshalPrimitive(m, "webSiteUrl", &obj.WebSiteURL) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// GCMCredendialsModel : GCMCredendialsModel struct +type GCMCredendialsModel struct { + // An API key that gives the push service an authorized access to Google services. + ApiKey *string `json:"apiKey" validate:"required"` + + // Project Number in the Google Developers Console. + SenderID *string `json:"senderId" validate:"required"` +} + +// NewGCMCredendialsModel : Instantiate GCMCredendialsModel (Generic Model Constructor) +func (*PushServiceV1) NewGCMCredendialsModel(apiKey string, senderID string) (model *GCMCredendialsModel, err error) { + model = &GCMCredendialsModel{ + ApiKey: core.StringPtr(apiKey), + SenderID: core.StringPtr(senderID), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalGCMCredendialsModel unmarshals an instance of GCMCredendialsModel from the specified map of raw messages. +func UnmarshalGCMCredendialsModel(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(GCMCredendialsModel) + err = core.UnmarshalPrimitive(m, "apiKey", &obj.ApiKey) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "senderId", &obj.SenderID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// GCMCredendialsPublicModel : GCMCredendialsPublicModel struct +type GCMCredendialsPublicModel struct { + // Project Number in the Google Developers Console. + SenderID *string `json:"senderId" validate:"required"` +} + +// UnmarshalGCMCredendialsPublicModel unmarshals an instance of GCMCredendialsPublicModel from the specified map of raw messages. +func UnmarshalGCMCredendialsPublicModel(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(GCMCredendialsPublicModel) + err = core.UnmarshalPrimitive(m, "senderId", &obj.SenderID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// Gcm : Settings specific to Android platform. +type Gcm struct { + // Dozed devices to display only the latest notification and discard old low priority notifications. + CollapseKey *string `json:"collapseKey,omitempty"` + + // The category identifier to be used for the interactive push notifications. + InteractiveCategory *string `json:"interactiveCategory,omitempty"` + + // Specify the name of the icon to be displayed for the notification. Make sure the icon is already packaged with the + // client application. + Icon *string `json:"icon,omitempty"` + + // When this parameter is set to true, it indicates that the + // message should not be sent until the device becomes active. + DelayWhileIdle *bool `json:"delayWhileIdle,omitempty"` + + // Device group messaging makes it possible for every app instance in a group to reflect the latest messaging state. + Sync *bool `json:"sync,omitempty"` + + // private/public - Visibility of this notification, which affects how and when the notifications are revealed on a + // secure locked screen. + Visibility *string `json:"visibility,omitempty"` + + // Content specified will show up on a secure locked screen on the device when visibility is set to Private. + Redact *string `json:"redact,omitempty"` + + // unique Id of the channel to add channel properties. + ChannelID *string `json:"channelId,omitempty"` + + // Custom JSON payload that will be sent as part of the notification message. + Payload interface{} `json:"payload,omitempty"` + + // A string value that indicates the priority of this notification. Allowed values are 'max', 'high', 'default', 'low' + // and 'min'. High/Max priority notifications along with 'sound' field may be used for Heads up notification in Android + // 5.0 or higher.sampleval='low'. + Priority *string `json:"priority,omitempty"` + + // The sound file (on device) that will be attempted to play when the notification arrives on the device. + Sound *string `json:"sound,omitempty"` + + // This parameter specifies how long (in seconds) the message + // should be kept in GCM storage if the device is offline. + TimeToLive *int64 `json:"timeToLive,omitempty"` + + // Allows setting the notification LED color on receiving push notification . + Lights *Lights `json:"lights,omitempty"` + + // The title of Rich Push notifications. + AndroidTitle *string `json:"androidTitle,omitempty"` + + // Set this notification to be part of a group of notifications sharing the same key. Grouped notifications may display + // in a cluster or stack on devices which support such rendering. + GroupID *string `json:"groupId,omitempty"` + + // Options to specify for Android expandable notifications. The types of expandable notifications are + // picture_notification, bigtext_notification, inbox_notification. + Style *Style `json:"style,omitempty"` + + Type *string `json:"type,omitempty"` +} + +// Constants associated with the Gcm.Type property. +const ( + Gcm_Type_Default = "DEFAULT" + Gcm_Type_Silent = "SILENT" +) + +// UnmarshalGcm unmarshals an instance of Gcm from the specified map of raw messages. +func UnmarshalGcm(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Gcm) + err = core.UnmarshalPrimitive(m, "collapseKey", &obj.CollapseKey) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "interactiveCategory", &obj.InteractiveCategory) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "icon", &obj.Icon) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "delayWhileIdle", &obj.DelayWhileIdle) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "sync", &obj.Sync) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "visibility", &obj.Visibility) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "redact", &obj.Redact) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "channelId", &obj.ChannelID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "payload", &obj.Payload) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "priority", &obj.Priority) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "sound", &obj.Sound) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "timeToLive", &obj.TimeToLive) + if err != nil { + return + } + err = core.UnmarshalModel(m, "lights", &obj.Lights, UnmarshalLights) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "androidTitle", &obj.AndroidTitle) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "groupId", &obj.GroupID) + if err != nil { + return + } + err = core.UnmarshalModel(m, "style", &obj.Style, UnmarshalStyle) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// GetApnsConfOptions : The GetApnsConf options. +type GetApnsConfOptions struct { + // Unique ID of the application using the push service. + ApplicationID *string `validate:"required,ne="` + + // The preferred language to use for error messages. + AcceptLanguage *string + + // Deprecated, use Authorization instead. + AppSecret *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetApnsConfOptions : Instantiate GetApnsConfOptions +func (*PushServiceV1) NewGetApnsConfOptions(applicationID string) *GetApnsConfOptions { + return &GetApnsConfOptions{ + ApplicationID: core.StringPtr(applicationID), + } +} + +// SetApplicationID : Allow user to set ApplicationID +func (options *GetApnsConfOptions) SetApplicationID(applicationID string) *GetApnsConfOptions { + options.ApplicationID = core.StringPtr(applicationID) + return options +} + +// SetAcceptLanguage : Allow user to set AcceptLanguage +func (options *GetApnsConfOptions) SetAcceptLanguage(acceptLanguage string) *GetApnsConfOptions { + options.AcceptLanguage = core.StringPtr(acceptLanguage) + return options +} + +// SetAppSecret : Allow user to set AppSecret +func (options *GetApnsConfOptions) SetAppSecret(appSecret string) *GetApnsConfOptions { + options.AppSecret = core.StringPtr(appSecret) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetApnsConfOptions) SetHeaders(param map[string]string) *GetApnsConfOptions { + options.Headers = param + return options +} + +// GetChromeAppExtConfOptions : The GetChromeAppExtConf options. +type GetChromeAppExtConfOptions struct { + // Unique ID of the application using the push service. + ApplicationID *string `validate:"required,ne="` + + // The preferred language to use for error messages. + AcceptLanguage *string + + // Deprecated, use Authorization instead. + AppSecret *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetChromeAppExtConfOptions : Instantiate GetChromeAppExtConfOptions +func (*PushServiceV1) NewGetChromeAppExtConfOptions(applicationID string) *GetChromeAppExtConfOptions { + return &GetChromeAppExtConfOptions{ + ApplicationID: core.StringPtr(applicationID), + } +} + +// SetApplicationID : Allow user to set ApplicationID +func (options *GetChromeAppExtConfOptions) SetApplicationID(applicationID string) *GetChromeAppExtConfOptions { + options.ApplicationID = core.StringPtr(applicationID) + return options +} + +// SetAcceptLanguage : Allow user to set AcceptLanguage +func (options *GetChromeAppExtConfOptions) SetAcceptLanguage(acceptLanguage string) *GetChromeAppExtConfOptions { + options.AcceptLanguage = core.StringPtr(acceptLanguage) + return options +} + +// SetAppSecret : Allow user to set AppSecret +func (options *GetChromeAppExtConfOptions) SetAppSecret(appSecret string) *GetChromeAppExtConfOptions { + options.AppSecret = core.StringPtr(appSecret) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetChromeAppExtConfOptions) SetHeaders(param map[string]string) *GetChromeAppExtConfOptions { + options.Headers = param + return options +} + +// GetChromeAppExtConfPublicOptions : The GetChromeAppExtConfPublic options. +type GetChromeAppExtConfPublicOptions struct { + // Unique ID of the application using the push service. + ApplicationID *string `validate:"required,ne="` + + // The clientSecret associated with this application. + ClientSecret *string + + // The preferred language to use for error messages. + AcceptLanguage *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetChromeAppExtConfPublicOptions : Instantiate GetChromeAppExtConfPublicOptions +func (*PushServiceV1) NewGetChromeAppExtConfPublicOptions(applicationID string) *GetChromeAppExtConfPublicOptions { + return &GetChromeAppExtConfPublicOptions{ + ApplicationID: core.StringPtr(applicationID), + } +} + +// SetApplicationID : Allow user to set ApplicationID +func (options *GetChromeAppExtConfPublicOptions) SetApplicationID(applicationID string) *GetChromeAppExtConfPublicOptions { + options.ApplicationID = core.StringPtr(applicationID) + return options +} + +// SetClientSecret : Allow user to set ClientSecret +func (options *GetChromeAppExtConfPublicOptions) SetClientSecret(clientSecret string) *GetChromeAppExtConfPublicOptions { + options.ClientSecret = core.StringPtr(clientSecret) + return options +} + +// SetAcceptLanguage : Allow user to set AcceptLanguage +func (options *GetChromeAppExtConfPublicOptions) SetAcceptLanguage(acceptLanguage string) *GetChromeAppExtConfPublicOptions { + options.AcceptLanguage = core.StringPtr(acceptLanguage) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetChromeAppExtConfPublicOptions) SetHeaders(param map[string]string) *GetChromeAppExtConfPublicOptions { + options.Headers = param + return options +} + +// GetChromeWebConfOptions : The GetChromeWebConf options. +type GetChromeWebConfOptions struct { + // Unique ID of the application using the push service. + ApplicationID *string `validate:"required,ne="` + + // The preferred language to use for error messages. + AcceptLanguage *string + + // Deprecated, use Authorization instead. + AppSecret *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetChromeWebConfOptions : Instantiate GetChromeWebConfOptions +func (*PushServiceV1) NewGetChromeWebConfOptions(applicationID string) *GetChromeWebConfOptions { + return &GetChromeWebConfOptions{ + ApplicationID: core.StringPtr(applicationID), + } +} + +// SetApplicationID : Allow user to set ApplicationID +func (options *GetChromeWebConfOptions) SetApplicationID(applicationID string) *GetChromeWebConfOptions { + options.ApplicationID = core.StringPtr(applicationID) + return options +} + +// SetAcceptLanguage : Allow user to set AcceptLanguage +func (options *GetChromeWebConfOptions) SetAcceptLanguage(acceptLanguage string) *GetChromeWebConfOptions { + options.AcceptLanguage = core.StringPtr(acceptLanguage) + return options +} + +// SetAppSecret : Allow user to set AppSecret +func (options *GetChromeWebConfOptions) SetAppSecret(appSecret string) *GetChromeWebConfOptions { + options.AppSecret = core.StringPtr(appSecret) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetChromeWebConfOptions) SetHeaders(param map[string]string) *GetChromeWebConfOptions { + options.Headers = param + return options +} + +// GetFirefoxWebConfOptions : The GetFirefoxWebConf options. +type GetFirefoxWebConfOptions struct { + // Unique ID of the application using the push service. + ApplicationID *string `validate:"required,ne="` + + // The preferred language to use for error messages. + AcceptLanguage *string + + // Deprecated, use Authorization instead. + AppSecret *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetFirefoxWebConfOptions : Instantiate GetFirefoxWebConfOptions +func (*PushServiceV1) NewGetFirefoxWebConfOptions(applicationID string) *GetFirefoxWebConfOptions { + return &GetFirefoxWebConfOptions{ + ApplicationID: core.StringPtr(applicationID), + } +} + +// SetApplicationID : Allow user to set ApplicationID +func (options *GetFirefoxWebConfOptions) SetApplicationID(applicationID string) *GetFirefoxWebConfOptions { + options.ApplicationID = core.StringPtr(applicationID) + return options +} + +// SetAcceptLanguage : Allow user to set AcceptLanguage +func (options *GetFirefoxWebConfOptions) SetAcceptLanguage(acceptLanguage string) *GetFirefoxWebConfOptions { + options.AcceptLanguage = core.StringPtr(acceptLanguage) + return options +} + +// SetAppSecret : Allow user to set AppSecret +func (options *GetFirefoxWebConfOptions) SetAppSecret(appSecret string) *GetFirefoxWebConfOptions { + options.AppSecret = core.StringPtr(appSecret) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetFirefoxWebConfOptions) SetHeaders(param map[string]string) *GetFirefoxWebConfOptions { + options.Headers = param + return options +} + +// GetGCMConfOptions : The GetGCMConf options. +type GetGCMConfOptions struct { + // Unique ID of the application using the push service. + ApplicationID *string `validate:"required,ne="` + + // The preferred language to use for error messages. + AcceptLanguage *string + + // Deprecated, use Authorization instead. + AppSecret *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetGCMConfOptions : Instantiate GetGCMConfOptions +func (*PushServiceV1) NewGetGCMConfOptions(applicationID string) *GetGCMConfOptions { + return &GetGCMConfOptions{ + ApplicationID: core.StringPtr(applicationID), + } +} + +// SetApplicationID : Allow user to set ApplicationID +func (options *GetGCMConfOptions) SetApplicationID(applicationID string) *GetGCMConfOptions { + options.ApplicationID = core.StringPtr(applicationID) + return options +} + +// SetAcceptLanguage : Allow user to set AcceptLanguage +func (options *GetGCMConfOptions) SetAcceptLanguage(acceptLanguage string) *GetGCMConfOptions { + options.AcceptLanguage = core.StringPtr(acceptLanguage) + return options +} + +// SetAppSecret : Allow user to set AppSecret +func (options *GetGCMConfOptions) SetAppSecret(appSecret string) *GetGCMConfOptions { + options.AppSecret = core.StringPtr(appSecret) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetGCMConfOptions) SetHeaders(param map[string]string) *GetGCMConfOptions { + options.Headers = param + return options +} + +// GetGcmConfPublicOptions : The GetGcmConfPublic options. +type GetGcmConfPublicOptions struct { + // Unique ID of the application using the push service. + ApplicationID *string `validate:"required,ne="` + + // The clientSecret associated with this application. + ClientSecret *string + + // The preferred language to use for error messages. + AcceptLanguage *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetGcmConfPublicOptions : Instantiate GetGcmConfPublicOptions +func (*PushServiceV1) NewGetGcmConfPublicOptions(applicationID string) *GetGcmConfPublicOptions { + return &GetGcmConfPublicOptions{ + ApplicationID: core.StringPtr(applicationID), + } +} + +// SetApplicationID : Allow user to set ApplicationID +func (options *GetGcmConfPublicOptions) SetApplicationID(applicationID string) *GetGcmConfPublicOptions { + options.ApplicationID = core.StringPtr(applicationID) + return options +} + +// SetClientSecret : Allow user to set ClientSecret +func (options *GetGcmConfPublicOptions) SetClientSecret(clientSecret string) *GetGcmConfPublicOptions { + options.ClientSecret = core.StringPtr(clientSecret) + return options +} + +// SetAcceptLanguage : Allow user to set AcceptLanguage +func (options *GetGcmConfPublicOptions) SetAcceptLanguage(acceptLanguage string) *GetGcmConfPublicOptions { + options.AcceptLanguage = core.StringPtr(acceptLanguage) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetGcmConfPublicOptions) SetHeaders(param map[string]string) *GetGcmConfPublicOptions { + options.Headers = param + return options +} + +// GetSafariWebConfOptions : The GetSafariWebConf options. +type GetSafariWebConfOptions struct { + // Unique ID of the application using the push service. + ApplicationID *string `validate:"required,ne="` + + // The preferred language to use for error messages. + AcceptLanguage *string + + // Deprecated, use Authorization instead. + AppSecret *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetSafariWebConfOptions : Instantiate GetSafariWebConfOptions +func (*PushServiceV1) NewGetSafariWebConfOptions(applicationID string) *GetSafariWebConfOptions { + return &GetSafariWebConfOptions{ + ApplicationID: core.StringPtr(applicationID), + } +} + +// SetApplicationID : Allow user to set ApplicationID +func (options *GetSafariWebConfOptions) SetApplicationID(applicationID string) *GetSafariWebConfOptions { + options.ApplicationID = core.StringPtr(applicationID) + return options +} + +// SetAcceptLanguage : Allow user to set AcceptLanguage +func (options *GetSafariWebConfOptions) SetAcceptLanguage(acceptLanguage string) *GetSafariWebConfOptions { + options.AcceptLanguage = core.StringPtr(acceptLanguage) + return options +} + +// SetAppSecret : Allow user to set AppSecret +func (options *GetSafariWebConfOptions) SetAppSecret(appSecret string) *GetSafariWebConfOptions { + options.AppSecret = core.StringPtr(appSecret) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetSafariWebConfOptions) SetHeaders(param map[string]string) *GetSafariWebConfOptions { + options.Headers = param + return options +} + +// GetSettingsOptions : The GetSettings options. +type GetSettingsOptions struct { + // Unique ID of the application using the push service. + ApplicationID *string `validate:"required,ne="` + + // Deprecated, use Authorization instead. + AppSecret *string + + // The preferred language to use for error messages. + AcceptLanguage *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetSettingsOptions : Instantiate GetSettingsOptions +func (*PushServiceV1) NewGetSettingsOptions(applicationID string) *GetSettingsOptions { + return &GetSettingsOptions{ + ApplicationID: core.StringPtr(applicationID), + } +} + +// SetApplicationID : Allow user to set ApplicationID +func (options *GetSettingsOptions) SetApplicationID(applicationID string) *GetSettingsOptions { + options.ApplicationID = core.StringPtr(applicationID) + return options +} + +// SetAppSecret : Allow user to set AppSecret +func (options *GetSettingsOptions) SetAppSecret(appSecret string) *GetSettingsOptions { + options.AppSecret = core.StringPtr(appSecret) + return options +} + +// SetAcceptLanguage : Allow user to set AcceptLanguage +func (options *GetSettingsOptions) SetAcceptLanguage(acceptLanguage string) *GetSettingsOptions { + options.AcceptLanguage = core.StringPtr(acceptLanguage) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetSettingsOptions) SetHeaders(param map[string]string) *GetSettingsOptions { + options.Headers = param + return options +} + +// GetWebpushServerKeyOptions : The GetWebpushServerKey options. +type GetWebpushServerKeyOptions struct { + // Unique ID of the application server for the IBM Cloud Push Notification Service identification for web push + // communication. + ApplicationID *string `validate:"required,ne="` + + // Deprecated, use Authorization instead. + ClientSecret *string + + // The preferred language to use for error messages. + AcceptLanguage *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetWebpushServerKeyOptions : Instantiate GetWebpushServerKeyOptions +func (*PushServiceV1) NewGetWebpushServerKeyOptions(applicationID string) *GetWebpushServerKeyOptions { + return &GetWebpushServerKeyOptions{ + ApplicationID: core.StringPtr(applicationID), + } +} + +// SetApplicationID : Allow user to set ApplicationID +func (options *GetWebpushServerKeyOptions) SetApplicationID(applicationID string) *GetWebpushServerKeyOptions { + options.ApplicationID = core.StringPtr(applicationID) + return options +} + +// SetClientSecret : Allow user to set ClientSecret +func (options *GetWebpushServerKeyOptions) SetClientSecret(clientSecret string) *GetWebpushServerKeyOptions { + options.ClientSecret = core.StringPtr(clientSecret) + return options +} + +// SetAcceptLanguage : Allow user to set AcceptLanguage +func (options *GetWebpushServerKeyOptions) SetAcceptLanguage(acceptLanguage string) *GetWebpushServerKeyOptions { + options.AcceptLanguage = core.StringPtr(acceptLanguage) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetWebpushServerKeyOptions) SetHeaders(param map[string]string) *GetWebpushServerKeyOptions { + options.Headers = param + return options +} + +// Lights : Allows setting the notification LED color on receiving push notification . +type Lights struct { + // The color of the led. The hardware will do its best approximation. + LedArgb *string `json:"ledArgb,omitempty"` + + // The number of milliseconds for the LED to be on while it's flashing. The hardware will do its best approximation. + LedOnMs *int64 `json:"ledOnMs,omitempty"` + + // The number of milliseconds for the LED to be off while it's flashing. The hardware will do its best approximation. + LedOffMs *string `json:"ledOffMs,omitempty"` +} + +// UnmarshalLights unmarshals an instance of Lights from the specified map of raw messages. +func UnmarshalLights(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Lights) + err = core.UnmarshalPrimitive(m, "ledArgb", &obj.LedArgb) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "ledOnMs", &obj.LedOnMs) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "ledOffMs", &obj.LedOffMs) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// Message : Details of the content of the notification message. +type Message struct { + // The notification message to be shown to the user. + Alert *string `json:"alert,omitempty"` + + // An optional URL that can be sent along with the alert. + URL *string `json:"url,omitempty"` +} + +// UnmarshalMessage unmarshals an instance of Message from the specified map of raw messages. +func UnmarshalMessage(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Message) + err = core.UnmarshalPrimitive(m, "alert", &obj.Alert) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "url", &obj.URL) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// MessageResponseModel : MessageResponseModel struct +type MessageResponseModel struct { + Message *SendMessageBody `json:"message,omitempty"` + + // Unique Id for the message. + MessageID *string `json:"messageId,omitempty"` +} + +// UnmarshalMessageResponseModel unmarshals an instance of MessageResponseModel from the specified map of raw messages. +func UnmarshalMessageResponseModel(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(MessageResponseModel) + err = core.UnmarshalModel(m, "message", &obj.Message, UnmarshalSendMessageBody) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messageId", &obj.MessageID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// MessagesArrayModel : MessagesArrayModel struct +type MessagesArrayModel struct { + // An array of messages. + Messages []MessagesList `json:"messages,omitempty"` +} + +// UnmarshalMessagesArrayModel unmarshals an instance of MessagesArrayModel from the specified map of raw messages. +func UnmarshalMessagesArrayModel(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(MessagesArrayModel) + err = core.UnmarshalModel(m, "messages", &obj.Messages, UnmarshalMessagesList) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// MessagesList : MessagesList struct +type MessagesList struct { + // Created time of the message. + CreatedTime *string `json:"createdTime,omitempty"` + + // Unique identifier of the message. + MessageID *string `json:"messageId,omitempty"` + + // Message text. + Alert *string `json:"alert,omitempty"` + + // The URL to the message resource. + Href *string `json:"href,omitempty"` +} + +// UnmarshalMessagesList unmarshals an instance of MessagesList from the specified map of raw messages. +func UnmarshalMessagesList(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(MessagesList) + err = core.UnmarshalPrimitive(m, "createdTime", &obj.CreatedTime) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "messageId", &obj.MessageID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "alert", &obj.Alert) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SafariWeb : Web Push Notifications settings specific to Safari browser. +type SafariWeb struct { + // Specifies the title to be set for the Safari Push Notifications. + Title *string `json:"title,omitempty"` + + // The URL arguments that need to be used with this notification. This has to provided in the form of a JSON Array. + UrlArgs []string `json:"urlArgs,omitempty"` + + // The label of the action button. + Action *string `json:"action,omitempty"` +} + +// UnmarshalSafariWeb unmarshals an instance of SafariWeb from the specified map of raw messages. +func UnmarshalSafariWeb(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SafariWeb) + err = core.UnmarshalPrimitive(m, "title", &obj.Title) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "urlArgs", &obj.UrlArgs) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "action", &obj.Action) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SaveApnsConfOptions : The SaveApnsConf options. +type SaveApnsConfOptions struct { + // Unique ID of the application using the push service. + ApplicationID *string `validate:"required,ne="` + + // Password for the APNS certificate. + Password *string `validate:"required"` + + // Type of the APNS certificate. + IsSandBox *bool `validate:"required"` + + // The APNS certificate. + Certificate io.ReadCloser `validate:"required"` + + // The content type of certificate. + CertificateContentType *string + + // The preferred language to use for error messages. + AcceptLanguage *string + + // Deprecated, use Authorization instead. + AppSecret *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewSaveApnsConfOptions : Instantiate SaveApnsConfOptions +func (*PushServiceV1) NewSaveApnsConfOptions(applicationID string, password string, isSandBox bool, certificate io.ReadCloser) *SaveApnsConfOptions { + return &SaveApnsConfOptions{ + ApplicationID: core.StringPtr(applicationID), + Password: core.StringPtr(password), + IsSandBox: core.BoolPtr(isSandBox), + Certificate: certificate, + } +} + +// SetApplicationID : Allow user to set ApplicationID +func (options *SaveApnsConfOptions) SetApplicationID(applicationID string) *SaveApnsConfOptions { + options.ApplicationID = core.StringPtr(applicationID) + return options +} + +// SetPassword : Allow user to set Password +func (options *SaveApnsConfOptions) SetPassword(password string) *SaveApnsConfOptions { + options.Password = core.StringPtr(password) + return options +} + +// SetIsSandBox : Allow user to set IsSandBox +func (options *SaveApnsConfOptions) SetIsSandBox(isSandBox bool) *SaveApnsConfOptions { + options.IsSandBox = core.BoolPtr(isSandBox) + return options +} + +// SetCertificate : Allow user to set Certificate +func (options *SaveApnsConfOptions) SetCertificate(certificate io.ReadCloser) *SaveApnsConfOptions { + options.Certificate = certificate + return options +} + +// SetCertificateContentType : Allow user to set CertificateContentType +func (options *SaveApnsConfOptions) SetCertificateContentType(certificateContentType string) *SaveApnsConfOptions { + options.CertificateContentType = core.StringPtr(certificateContentType) + return options +} + +// SetAcceptLanguage : Allow user to set AcceptLanguage +func (options *SaveApnsConfOptions) SetAcceptLanguage(acceptLanguage string) *SaveApnsConfOptions { + options.AcceptLanguage = core.StringPtr(acceptLanguage) + return options +} + +// SetAppSecret : Allow user to set AppSecret +func (options *SaveApnsConfOptions) SetAppSecret(appSecret string) *SaveApnsConfOptions { + options.AppSecret = core.StringPtr(appSecret) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *SaveApnsConfOptions) SetHeaders(param map[string]string) *SaveApnsConfOptions { + options.Headers = param + return options +} + +// SaveChromeAppExtConfOptions : The SaveChromeAppExtConf options. +type SaveChromeAppExtConfOptions struct { + // Unique ID of the application using the push service. + ApplicationID *string `validate:"required,ne="` + + // An API key that gives the push service an authorized access to Google services. + ApiKey *string `validate:"required"` + + // Project Number in the Google Developers Console. + SenderID *string `validate:"required"` + + // The preferred language to use for error messages. + AcceptLanguage *string + + // Deprecated, use Authorization instead. + AppSecret *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewSaveChromeAppExtConfOptions : Instantiate SaveChromeAppExtConfOptions +func (*PushServiceV1) NewSaveChromeAppExtConfOptions(applicationID string, apiKey string, senderID string) *SaveChromeAppExtConfOptions { + return &SaveChromeAppExtConfOptions{ + ApplicationID: core.StringPtr(applicationID), + ApiKey: core.StringPtr(apiKey), + SenderID: core.StringPtr(senderID), + } +} + +// SetApplicationID : Allow user to set ApplicationID +func (options *SaveChromeAppExtConfOptions) SetApplicationID(applicationID string) *SaveChromeAppExtConfOptions { + options.ApplicationID = core.StringPtr(applicationID) + return options +} + +// SetApiKey : Allow user to set ApiKey +func (options *SaveChromeAppExtConfOptions) SetApiKey(apiKey string) *SaveChromeAppExtConfOptions { + options.ApiKey = core.StringPtr(apiKey) + return options +} + +// SetSenderID : Allow user to set SenderID +func (options *SaveChromeAppExtConfOptions) SetSenderID(senderID string) *SaveChromeAppExtConfOptions { + options.SenderID = core.StringPtr(senderID) + return options +} + +// SetAcceptLanguage : Allow user to set AcceptLanguage +func (options *SaveChromeAppExtConfOptions) SetAcceptLanguage(acceptLanguage string) *SaveChromeAppExtConfOptions { + options.AcceptLanguage = core.StringPtr(acceptLanguage) + return options +} + +// SetAppSecret : Allow user to set AppSecret +func (options *SaveChromeAppExtConfOptions) SetAppSecret(appSecret string) *SaveChromeAppExtConfOptions { + options.AppSecret = core.StringPtr(appSecret) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *SaveChromeAppExtConfOptions) SetHeaders(param map[string]string) *SaveChromeAppExtConfOptions { + options.Headers = param + return options +} + +// SaveChromeWebConfOptions : The SaveChromeWebConf options. +type SaveChromeWebConfOptions struct { + // Unique ID of the application using the push service. + ApplicationID *string `validate:"required,ne="` + + // An API key that gives the push service an authorized access to Google services that is used for Chrome Web Push. + ApiKey *string `validate:"required"` + + // The URL of the WebSite / WebApp that should be permitted to subscribe to WebPush. + WebSiteURL *string `validate:"required"` + + // The preferred language to use for error messages. + AcceptLanguage *string + + // Deprecated, use Authorization instead. + AppSecret *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewSaveChromeWebConfOptions : Instantiate SaveChromeWebConfOptions +func (*PushServiceV1) NewSaveChromeWebConfOptions(applicationID string, apiKey string, webSiteURL string) *SaveChromeWebConfOptions { + return &SaveChromeWebConfOptions{ + ApplicationID: core.StringPtr(applicationID), + ApiKey: core.StringPtr(apiKey), + WebSiteURL: core.StringPtr(webSiteURL), + } +} + +// SetApplicationID : Allow user to set ApplicationID +func (options *SaveChromeWebConfOptions) SetApplicationID(applicationID string) *SaveChromeWebConfOptions { + options.ApplicationID = core.StringPtr(applicationID) + return options +} + +// SetApiKey : Allow user to set ApiKey +func (options *SaveChromeWebConfOptions) SetApiKey(apiKey string) *SaveChromeWebConfOptions { + options.ApiKey = core.StringPtr(apiKey) + return options +} + +// SetWebSiteURL : Allow user to set WebSiteURL +func (options *SaveChromeWebConfOptions) SetWebSiteURL(webSiteURL string) *SaveChromeWebConfOptions { + options.WebSiteURL = core.StringPtr(webSiteURL) + return options +} + +// SetAcceptLanguage : Allow user to set AcceptLanguage +func (options *SaveChromeWebConfOptions) SetAcceptLanguage(acceptLanguage string) *SaveChromeWebConfOptions { + options.AcceptLanguage = core.StringPtr(acceptLanguage) + return options +} + +// SetAppSecret : Allow user to set AppSecret +func (options *SaveChromeWebConfOptions) SetAppSecret(appSecret string) *SaveChromeWebConfOptions { + options.AppSecret = core.StringPtr(appSecret) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *SaveChromeWebConfOptions) SetHeaders(param map[string]string) *SaveChromeWebConfOptions { + options.Headers = param + return options +} + +// SaveFirefoxWebConfOptions : The SaveFirefoxWebConf options. +type SaveFirefoxWebConfOptions struct { + // Unique ID of the application using the push service. + ApplicationID *string `validate:"required,ne="` + + // The URL of the WebSite / WebApp that should be permitted to subscribe to WebPush. + WebSiteURL *string `validate:"required"` + + // The preferred language to use for error messages. + AcceptLanguage *string + + // Deprecated, use Authorization instead. + AppSecret *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewSaveFirefoxWebConfOptions : Instantiate SaveFirefoxWebConfOptions +func (*PushServiceV1) NewSaveFirefoxWebConfOptions(applicationID string, webSiteURL string) *SaveFirefoxWebConfOptions { + return &SaveFirefoxWebConfOptions{ + ApplicationID: core.StringPtr(applicationID), + WebSiteURL: core.StringPtr(webSiteURL), + } +} + +// SetApplicationID : Allow user to set ApplicationID +func (options *SaveFirefoxWebConfOptions) SetApplicationID(applicationID string) *SaveFirefoxWebConfOptions { + options.ApplicationID = core.StringPtr(applicationID) + return options +} + +// SetWebSiteURL : Allow user to set WebSiteURL +func (options *SaveFirefoxWebConfOptions) SetWebSiteURL(webSiteURL string) *SaveFirefoxWebConfOptions { + options.WebSiteURL = core.StringPtr(webSiteURL) + return options +} + +// SetAcceptLanguage : Allow user to set AcceptLanguage +func (options *SaveFirefoxWebConfOptions) SetAcceptLanguage(acceptLanguage string) *SaveFirefoxWebConfOptions { + options.AcceptLanguage = core.StringPtr(acceptLanguage) + return options +} + +// SetAppSecret : Allow user to set AppSecret +func (options *SaveFirefoxWebConfOptions) SetAppSecret(appSecret string) *SaveFirefoxWebConfOptions { + options.AppSecret = core.StringPtr(appSecret) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *SaveFirefoxWebConfOptions) SetHeaders(param map[string]string) *SaveFirefoxWebConfOptions { + options.Headers = param + return options +} + +// SaveGCMConfOptions : The SaveGCMConf options. +type SaveGCMConfOptions struct { + // Unique ID of the application using the push service. + ApplicationID *string `validate:"required,ne="` + + // An API key that gives the push service an authorized access to Google services. + ApiKey *string `validate:"required"` + + // Project Number in the Google Developers Console. + SenderID *string `validate:"required"` + + // The preferred language to use for error messages. + AcceptLanguage *string + + // Deprecated, use Authorization instead. + AppSecret *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewSaveGCMConfOptions : Instantiate SaveGCMConfOptions +func (*PushServiceV1) NewSaveGCMConfOptions(applicationID string, apiKey string, senderID string) *SaveGCMConfOptions { + return &SaveGCMConfOptions{ + ApplicationID: core.StringPtr(applicationID), + ApiKey: core.StringPtr(apiKey), + SenderID: core.StringPtr(senderID), + } +} + +// SetApplicationID : Allow user to set ApplicationID +func (options *SaveGCMConfOptions) SetApplicationID(applicationID string) *SaveGCMConfOptions { + options.ApplicationID = core.StringPtr(applicationID) + return options +} + +// SetApiKey : Allow user to set ApiKey +func (options *SaveGCMConfOptions) SetApiKey(apiKey string) *SaveGCMConfOptions { + options.ApiKey = core.StringPtr(apiKey) + return options +} + +// SetSenderID : Allow user to set SenderID +func (options *SaveGCMConfOptions) SetSenderID(senderID string) *SaveGCMConfOptions { + options.SenderID = core.StringPtr(senderID) + return options +} + +// SetAcceptLanguage : Allow user to set AcceptLanguage +func (options *SaveGCMConfOptions) SetAcceptLanguage(acceptLanguage string) *SaveGCMConfOptions { + options.AcceptLanguage = core.StringPtr(acceptLanguage) + return options +} + +// SetAppSecret : Allow user to set AppSecret +func (options *SaveGCMConfOptions) SetAppSecret(appSecret string) *SaveGCMConfOptions { + options.AppSecret = core.StringPtr(appSecret) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *SaveGCMConfOptions) SetHeaders(param map[string]string) *SaveGCMConfOptions { + options.Headers = param + return options +} + +// SaveSafariWebConfOptions : The SaveSafariWebConf options. +type SaveSafariWebConfOptions struct { + // Unique ID of the application using the push service. + ApplicationID *string `validate:"required,ne="` + + // Password for the web push certificate. + Password *string `validate:"required"` + + // The Safari web push certificate (p12 format). + Certificate io.ReadCloser `validate:"required"` + + // The website name. This is the heading used in Notification Center. + WebsiteName *string `validate:"required"` + + // The URL to go to when the notification is clicked. Use %@ as a placeholder for arguments you fill in when delivering + // your notification. This URL must use the http or https scheme; otherwise, it is invalid. + UrlFormatString *string `validate:"required"` + + // Unique reverse-domain string for your Website Push ID such as web.com.example.domain (the string must start with + // web). + WebsitePushID *string `validate:"required"` + + // The URL of the website that should be permitted to subscribe to Safari Push Notifications. + WebSiteURL *string `validate:"required"` + + // The content type of certificate. + CertificateContentType *string + + // PNG icon file of 16x16 size. + Icon16x16 io.ReadCloser + + // The content type of icon16x16. + Icon16x16ContentType *string + + // PNG icon file of 16x16@2x size. + Icon16x162x io.ReadCloser + + // The content type of icon16x162x. + Icon16x162xContentType *string + + // PNG icon file of 32x32 size. + Icon32x32 io.ReadCloser + + // The content type of icon32x32. + Icon32x32ContentType *string + + // PNG icon file of 32x32@2x size. + Icon32x322x io.ReadCloser + + // The content type of icon32x322x. + Icon32x322xContentType *string + + // PNG icon file of 128x128 size. + Icon128x128 io.ReadCloser + + // The content type of icon128x128. + Icon128x128ContentType *string + + // PNG icon file of 128x128@2x size. + Icon128x1282x io.ReadCloser + + // The content type of icon128x1282x. + Icon128x1282xContentType *string + + // The preferred language to use for error messages. + AcceptLanguage *string + + // Deprecated, use Authorization instead. + AppSecret *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewSaveSafariWebConfOptions : Instantiate SaveSafariWebConfOptions +func (*PushServiceV1) NewSaveSafariWebConfOptions(applicationID string, password string, certificate io.ReadCloser, websiteName string, urlFormatString string, websitePushID string, webSiteURL string) *SaveSafariWebConfOptions { + return &SaveSafariWebConfOptions{ + ApplicationID: core.StringPtr(applicationID), + Password: core.StringPtr(password), + Certificate: certificate, + WebsiteName: core.StringPtr(websiteName), + UrlFormatString: core.StringPtr(urlFormatString), + WebsitePushID: core.StringPtr(websitePushID), + WebSiteURL: core.StringPtr(webSiteURL), + } +} + +// SetApplicationID : Allow user to set ApplicationID +func (options *SaveSafariWebConfOptions) SetApplicationID(applicationID string) *SaveSafariWebConfOptions { + options.ApplicationID = core.StringPtr(applicationID) + return options +} + +// SetPassword : Allow user to set Password +func (options *SaveSafariWebConfOptions) SetPassword(password string) *SaveSafariWebConfOptions { + options.Password = core.StringPtr(password) + return options +} + +// SetCertificate : Allow user to set Certificate +func (options *SaveSafariWebConfOptions) SetCertificate(certificate io.ReadCloser) *SaveSafariWebConfOptions { + options.Certificate = certificate + return options +} + +// SetWebsiteName : Allow user to set WebsiteName +func (options *SaveSafariWebConfOptions) SetWebsiteName(websiteName string) *SaveSafariWebConfOptions { + options.WebsiteName = core.StringPtr(websiteName) + return options +} + +// SetUrlFormatString : Allow user to set UrlFormatString +func (options *SaveSafariWebConfOptions) SetUrlFormatString(urlFormatString string) *SaveSafariWebConfOptions { + options.UrlFormatString = core.StringPtr(urlFormatString) + return options +} + +// SetWebsitePushID : Allow user to set WebsitePushID +func (options *SaveSafariWebConfOptions) SetWebsitePushID(websitePushID string) *SaveSafariWebConfOptions { + options.WebsitePushID = core.StringPtr(websitePushID) + return options +} + +// SetWebSiteURL : Allow user to set WebSiteURL +func (options *SaveSafariWebConfOptions) SetWebSiteURL(webSiteURL string) *SaveSafariWebConfOptions { + options.WebSiteURL = core.StringPtr(webSiteURL) + return options +} + +// SetCertificateContentType : Allow user to set CertificateContentType +func (options *SaveSafariWebConfOptions) SetCertificateContentType(certificateContentType string) *SaveSafariWebConfOptions { + options.CertificateContentType = core.StringPtr(certificateContentType) + return options +} + +// SetIcon16x16 : Allow user to set Icon16x16 +func (options *SaveSafariWebConfOptions) SetIcon16x16(icon16x16 io.ReadCloser) *SaveSafariWebConfOptions { + options.Icon16x16 = icon16x16 + return options +} + +// SetIcon16x16ContentType : Allow user to set Icon16x16ContentType +func (options *SaveSafariWebConfOptions) SetIcon16x16ContentType(icon16x16ContentType string) *SaveSafariWebConfOptions { + options.Icon16x16ContentType = core.StringPtr(icon16x16ContentType) + return options +} + +// SetIcon16x162x : Allow user to set Icon16x162x +func (options *SaveSafariWebConfOptions) SetIcon16x162x(icon16x162x io.ReadCloser) *SaveSafariWebConfOptions { + options.Icon16x162x = icon16x162x + return options +} + +// SetIcon16x162xContentType : Allow user to set Icon16x162xContentType +func (options *SaveSafariWebConfOptions) SetIcon16x162xContentType(icon16x162xContentType string) *SaveSafariWebConfOptions { + options.Icon16x162xContentType = core.StringPtr(icon16x162xContentType) + return options +} + +// SetIcon32x32 : Allow user to set Icon32x32 +func (options *SaveSafariWebConfOptions) SetIcon32x32(icon32x32 io.ReadCloser) *SaveSafariWebConfOptions { + options.Icon32x32 = icon32x32 + return options +} + +// SetIcon32x32ContentType : Allow user to set Icon32x32ContentType +func (options *SaveSafariWebConfOptions) SetIcon32x32ContentType(icon32x32ContentType string) *SaveSafariWebConfOptions { + options.Icon32x32ContentType = core.StringPtr(icon32x32ContentType) + return options +} + +// SetIcon32x322x : Allow user to set Icon32x322x +func (options *SaveSafariWebConfOptions) SetIcon32x322x(icon32x322x io.ReadCloser) *SaveSafariWebConfOptions { + options.Icon32x322x = icon32x322x + return options +} + +// SetIcon32x322xContentType : Allow user to set Icon32x322xContentType +func (options *SaveSafariWebConfOptions) SetIcon32x322xContentType(icon32x322xContentType string) *SaveSafariWebConfOptions { + options.Icon32x322xContentType = core.StringPtr(icon32x322xContentType) + return options +} + +// SetIcon128x128 : Allow user to set Icon128x128 +func (options *SaveSafariWebConfOptions) SetIcon128x128(icon128x128 io.ReadCloser) *SaveSafariWebConfOptions { + options.Icon128x128 = icon128x128 + return options +} + +// SetIcon128x128ContentType : Allow user to set Icon128x128ContentType +func (options *SaveSafariWebConfOptions) SetIcon128x128ContentType(icon128x128ContentType string) *SaveSafariWebConfOptions { + options.Icon128x128ContentType = core.StringPtr(icon128x128ContentType) + return options +} + +// SetIcon128x1282x : Allow user to set Icon128x1282x +func (options *SaveSafariWebConfOptions) SetIcon128x1282x(icon128x1282x io.ReadCloser) *SaveSafariWebConfOptions { + options.Icon128x1282x = icon128x1282x + return options +} + +// SetIcon128x1282xContentType : Allow user to set Icon128x1282xContentType +func (options *SaveSafariWebConfOptions) SetIcon128x1282xContentType(icon128x1282xContentType string) *SaveSafariWebConfOptions { + options.Icon128x1282xContentType = core.StringPtr(icon128x1282xContentType) + return options +} + +// SetAcceptLanguage : Allow user to set AcceptLanguage +func (options *SaveSafariWebConfOptions) SetAcceptLanguage(acceptLanguage string) *SaveSafariWebConfOptions { + options.AcceptLanguage = core.StringPtr(acceptLanguage) + return options +} + +// SetAppSecret : Allow user to set AppSecret +func (options *SaveSafariWebConfOptions) SetAppSecret(appSecret string) *SaveSafariWebConfOptions { + options.AppSecret = core.StringPtr(appSecret) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *SaveSafariWebConfOptions) SetHeaders(param map[string]string) *SaveSafariWebConfOptions { + options.Headers = param + return options +} + +// SendMessageOptions : The SendMessage options. +type SendMessageOptions struct { + // Unique ID of the application using the push service. + ApplicationID *string `validate:"required,ne="` + + // Details of the content of the notification message. + Message *Message `validate:"required"` + + // Additional properties that can be configured for the notification. + Settings *Settings + + // Validate the devices. + Validate *bool + + // An optional target for the message. Specify one of the target parameters to choose the recipients of the + // notification. If no target is specified, a broadcast notification will be sent to all the registered devices. + Target *Target + + // The preferred language to use for error messages. + AcceptLanguage *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewSendMessageOptions : Instantiate SendMessageOptions +func (*PushServiceV1) NewSendMessageOptions(applicationID string, message *Message) *SendMessageOptions { + return &SendMessageOptions{ + ApplicationID: core.StringPtr(applicationID), + Message: message, + } +} + +// SetApplicationID : Allow user to set ApplicationID +func (options *SendMessageOptions) SetApplicationID(applicationID string) *SendMessageOptions { + options.ApplicationID = core.StringPtr(applicationID) + return options +} + +// SetMessage : Allow user to set Message +func (options *SendMessageOptions) SetMessage(message *Message) *SendMessageOptions { + options.Message = message + return options +} + +// SetSettings : Allow user to set Settings +func (options *SendMessageOptions) SetSettings(settings *Settings) *SendMessageOptions { + options.Settings = settings + return options +} + +// SetValidate : Allow user to set Validate +func (options *SendMessageOptions) SetValidate(validate bool) *SendMessageOptions { + options.Validate = core.BoolPtr(validate) + return options +} + +// SetTarget : Allow user to set Target +func (options *SendMessageOptions) SetTarget(target *Target) *SendMessageOptions { + options.Target = target + return options +} + +// SetAcceptLanguage : Allow user to set AcceptLanguage +func (options *SendMessageOptions) SetAcceptLanguage(acceptLanguage string) *SendMessageOptions { + options.AcceptLanguage = core.StringPtr(acceptLanguage) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *SendMessageOptions) SetHeaders(param map[string]string) *SendMessageOptions { + options.Headers = param + return options +} + +// SendMessagesInBulkOptions : The SendMessagesInBulk options. +type SendMessagesInBulkOptions struct { + // Application ID. + ApplicationID *string `validate:"required,ne="` + + Body []SendMessageBody `validate:"required"` + + // The preferred language to use for error messages. + AcceptLanguage *string + + // Deprecated, use Authorization instead. + AppSecret *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewSendMessagesInBulkOptions : Instantiate SendMessagesInBulkOptions +func (*PushServiceV1) NewSendMessagesInBulkOptions(applicationID string, body []SendMessageBody) *SendMessagesInBulkOptions { + return &SendMessagesInBulkOptions{ + ApplicationID: core.StringPtr(applicationID), + Body: body, + } +} + +// SetApplicationID : Allow user to set ApplicationID +func (options *SendMessagesInBulkOptions) SetApplicationID(applicationID string) *SendMessagesInBulkOptions { + options.ApplicationID = core.StringPtr(applicationID) + return options +} + +// SetBody : Allow user to set Body +func (options *SendMessagesInBulkOptions) SetBody(body []SendMessageBody) *SendMessagesInBulkOptions { + options.Body = body + return options +} + +// SetAcceptLanguage : Allow user to set AcceptLanguage +func (options *SendMessagesInBulkOptions) SetAcceptLanguage(acceptLanguage string) *SendMessagesInBulkOptions { + options.AcceptLanguage = core.StringPtr(acceptLanguage) + return options +} + +// SetAppSecret : Allow user to set AppSecret +func (options *SendMessagesInBulkOptions) SetAppSecret(appSecret string) *SendMessagesInBulkOptions { + options.AppSecret = core.StringPtr(appSecret) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *SendMessagesInBulkOptions) SetHeaders(param map[string]string) *SendMessagesInBulkOptions { + options.Headers = param + return options +} + +// Settings : Additional properties that can be configured for the notification. +type Settings struct { + // Settings specific to iOS platform. + Apns *Apns `json:"apns,omitempty"` + + // Settings specific to Android platform. + Gcm *Gcm `json:"gcm,omitempty"` + + // Web Push Notifications settings specific to Mozilla Firefox browser platforms. + FirefoxWeb *FirefoxWeb `json:"firefoxWeb,omitempty"` + + // Web Push Notifications settings specific to Chrome browser. + ChromeWeb *ChromeWeb `json:"chromeWeb,omitempty"` + + // Web Push Notifications settings specific to Safari browser. + SafariWeb *SafariWeb `json:"safariWeb,omitempty"` + + // Web Push Notifications settings specific to Chrome browser. + ChromeAppExt *ChromeAppExt `json:"chromeAppExt,omitempty"` +} + +// UnmarshalSettings unmarshals an instance of Settings from the specified map of raw messages. +func UnmarshalSettings(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Settings) + err = core.UnmarshalModel(m, "apns", &obj.Apns, UnmarshalApns) + if err != nil { + return + } + err = core.UnmarshalModel(m, "gcm", &obj.Gcm, UnmarshalGcm) + if err != nil { + return + } + err = core.UnmarshalModel(m, "firefoxWeb", &obj.FirefoxWeb, UnmarshalFirefoxWeb) + if err != nil { + return + } + err = core.UnmarshalModel(m, "chromeWeb", &obj.ChromeWeb, UnmarshalChromeWeb) + if err != nil { + return + } + err = core.UnmarshalModel(m, "safariWeb", &obj.SafariWeb, UnmarshalSafariWeb) + if err != nil { + return + } + err = core.UnmarshalModel(m, "chromeAppExt", &obj.ChromeAppExt, UnmarshalChromeAppExt) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// Style : Options to specify for Android expandable notifications. The types of expandable notifications are +// picture_notification, bigtext_notification, inbox_notification. +type Style struct { + // Specifies the type of expandable notifications. The possible values are bigtext_notification, picture_notification, + // inbox_notification. + Type *string `json:"type,omitempty"` + + // Specifies the title of the notification. The title is displayed when the notification is expanded. Title must be + // specified for all three expandable notification. + Title *string `json:"title,omitempty"` + + // An URL from which the picture has to be obtained for the notification. Must be specified for picture_notification. + URL *string `json:"url,omitempty"` + + // The big text that needs to be displayed on expanding a bigtext_notification. Must be specified for + // bigtext_notification. + Text *string `json:"text,omitempty"` + + // An array of strings that is to be displayed in inbox style for inbox_notification. Must be specified for + // inbox_notification. + Lines []string `json:"lines,omitempty"` +} + +// UnmarshalStyle unmarshals an instance of Style from the specified map of raw messages. +func UnmarshalStyle(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Style) + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "title", &obj.Title) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "url", &obj.URL) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "text", &obj.Text) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "lines", &obj.Lines) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// Target : An optional target for the message. Specify one of the target parameters to choose the recipients of the +// notification. If no target is specified, a broadcast notification will be sent to all the registered devices. +type Target struct { + // Send notification to the list of specified devices. + DeviceIds []string `json:"deviceIds,omitempty"` + + // Send notification to the specified userIds. + UserIds []string `json:"userIds,omitempty"` + + // Send notification to the devices of the specified platforms. 'A' for apple (iOS) devices, 'G' for google (Android) + // devices, 'WEB_CHROME' for Chrome Web Browsers, 'WEB_FIREFOX' for Firefox Web Browsers, 'WEB_SAFARI' for Safari Push + // Notifications and 'APPEXT_CHROME' for Chrome App Extension. + Platforms []string `json:"platforms,omitempty"` + + // Send notification to the devices that have subscribed to any of + // these tags. + TagNames []string `json:"tagNames,omitempty"` +} + +// UnmarshalTarget unmarshals an instance of Target from the specified map of raw messages. +func UnmarshalTarget(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Target) + err = core.UnmarshalPrimitive(m, "deviceIds", &obj.DeviceIds) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "userIds", &obj.UserIds) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "platforms", &obj.Platforms) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "tagNames", &obj.TagNames) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ApnsCertUploadResponse : ApnsCertUploadResponse struct +type ApnsCertUploadResponse struct { + // The APNS certificate file name. + Certificate *string `json:"certificate,omitempty"` + + // Certificate type. + IsSandBox *bool `json:"isSandBox,omitempty"` + + // The date until which the certificate is valid. + ValidUntil *string `json:"validUntil,omitempty"` +} + +// UnmarshalApnsCertUploadResponse unmarshals an instance of ApnsCertUploadResponse from the specified map of raw messages. +func UnmarshalApnsCertUploadResponse(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ApnsCertUploadResponse) + err = core.UnmarshalPrimitive(m, "certificate", &obj.Certificate) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "isSandBox", &obj.IsSandBox) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "validUntil", &obj.ValidUntil) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// AppSettingsObjResponse : AppSettingsObjResponse struct +type AppSettingsObjResponse struct { + // The link to the APNS configuration. + ApnsConf *string `json:"apnsConf,omitempty"` + + // The link to the GCM configuration. + GcmConf *string `json:"gcmConf,omitempty"` + + // The link to the Chrome webpush configuration. + ChromeWebConf *string `json:"chromeWebConf,omitempty"` + + // The link to the Safari Push configuration. + SafariWebConf *string `json:"safariWebConf,omitempty"` + + // The link to the Firefox webpush configuration. + FirefoxWebConf *string `json:"firefoxWebConf,omitempty"` +} + +// UnmarshalAppSettingsObjResponse unmarshals an instance of AppSettingsObjResponse from the specified map of raw messages. +func UnmarshalAppSettingsObjResponse(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(AppSettingsObjResponse) + err = core.UnmarshalPrimitive(m, "apnsConf", &obj.ApnsConf) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "gcmConf", &obj.GcmConf) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "chromeWebConf", &obj.ChromeWebConf) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "safariWebConf", &obj.SafariWebConf) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "firefoxWebConf", &obj.FirefoxWebConf) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SafariCertUploadResponse : SafariCertUploadResponse struct +type SafariCertUploadResponse struct { + // The APNS certificate file name. + Certificate *string `json:"certificate,omitempty"` + + // The website name. This is the heading used in Notification Center. + WebsiteName *string `json:"websiteName,omitempty"` + + // The URL to go when the notification is clicked. + UrlFormatString *string `json:"urlFormatString,omitempty"` + + // Unique reverse-domain string for your Website Push ID. + WebsitePushID interface{} `json:"websitePushID,omitempty"` + + // The URL of the website that should be permitted to subscribe to Safari Push Notifications. + WebSiteURL interface{} `json:"webSiteUrl,omitempty"` +} + +// UnmarshalSafariCertUploadResponse unmarshals an instance of SafariCertUploadResponse from the specified map of raw messages. +func UnmarshalSafariCertUploadResponse(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SafariCertUploadResponse) + err = core.UnmarshalPrimitive(m, "certificate", &obj.Certificate) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "websiteName", &obj.WebsiteName) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "urlFormatString", &obj.UrlFormatString) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "websitePushID", &obj.WebsitePushID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "webSiteUrl", &obj.WebSiteURL) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SendMessageBody : SendMessageBody struct +type SendMessageBody struct { + // Details of the content of the notification message. + Message *Message `json:"message" validate:"required"` + + // Additional properties that can be configured for the notification. + Settings *Settings `json:"settings,omitempty"` + + // Validate the devices. + Validate *bool `json:"validate,omitempty"` + + // An optional target for the message. Specify one of the target parameters to choose the recipients of the + // notification. If no target is specified, a broadcast notification will be sent to all the registered devices. + Target *Target `json:"target,omitempty"` +} + +// NewSendMessageBody : Instantiate SendMessageBody (Generic Model Constructor) +func (*PushServiceV1) NewSendMessageBody(message *Message) (model *SendMessageBody, err error) { + model = &SendMessageBody{ + Message: message, + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalSendMessageBody unmarshals an instance of SendMessageBody from the specified map of raw messages. +func UnmarshalSendMessageBody(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SendMessageBody) + err = core.UnmarshalModel(m, "message", &obj.Message, UnmarshalMessage) + if err != nil { + return + } + err = core.UnmarshalModel(m, "settings", &obj.Settings, UnmarshalSettings) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "validate", &obj.Validate) + if err != nil { + return + } + err = core.UnmarshalModel(m, "target", &obj.Target, UnmarshalTarget) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} diff --git a/vendor/github.com/IBM/schematics-go-sdk/LICENSE b/vendor/github.com/IBM/schematics-go-sdk/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/vendor/github.com/IBM/schematics-go-sdk/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/IBM/schematics-go-sdk/common/headers.go b/vendor/github.com/IBM/schematics-go-sdk/common/headers.go new file mode 100644 index 00000000000..4dc378c451e --- /dev/null +++ b/vendor/github.com/IBM/schematics-go-sdk/common/headers.go @@ -0,0 +1,82 @@ +/** + * (C) Copyright IBM Corp. 2019, 2020. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package common + +import ( + "fmt" + "runtime" +) + +const ( + sdkName = "schematics-go-sdk" + headerNameUserAgent = "User-Agent" +) + +// +// GetSdkHeaders - returns the set of SDK-specific headers to be included in an outgoing request. +// +// This function is invoked by generated service methods (i.e. methods which implement the REST API operations +// defined within the API definition). The purpose of this function is to give the SDK implementor the opportunity +// to provide SDK-specific HTTP headers that will be sent with an outgoing REST API request. +// This function is invoked for each invocation of a generated service method, +// so the set of HTTP headers could be request-specific. +// As an optimization, if your SDK will be returning the same set of HTTP headers for each invocation of this +// function, it is recommended that you initialize the returned map just once (perhaps by using +// lazy initialization) and simply return it each time the function is invoked, instead of building it each time +// as in the example below. +// +// If you plan to gather metrics for your SDK, the User-Agent header value must +// be a string similar to the following: +// schematics-go-sdk/0.0.1 (lang=go; arch=x86_64; os=Linux; go.version=1.12.9) +// +// In the example above, the analytics tool will parse the user-agent header and +// use the following properties: +// "schematics-go-sdk" - the name of your sdk +// "0.0.1"- the version of your sdk +// "lang=go" - the language of the current sdk +// "arch=x86_64; os=Linux; go.version=1.12.9" - system information +// +// Note: It is very important that the sdk name ends with the string `-sdk`, +// as the analytics data collector uses this to gather usage data. +// +// Parameters: +// serviceName - the name of the service as defined in the API definition (e.g. "MyService1") +// serviceVersion - the version of the service as defined in the API definition (e.g. "V1") +// operationId - the operationId as defined in the API definition (e.g. getContext) +// +// Returns: +// a Map which contains the set of headers to be included in the REST API request +// +func GetSdkHeaders(serviceName string, serviceVersion string, operationId string) map[string]string { + sdkHeaders := make(map[string]string) + + sdkHeaders[headerNameUserAgent] = GetUserAgentInfo() + + return sdkHeaders +} + +var userAgent string = fmt.Sprintf("%s/%s %s", sdkName, Version, GetSystemInfo()) + +func GetUserAgentInfo() string { + return userAgent +} + +var systemInfo = fmt.Sprintf("(lang=go; arch=%s; os=%s; go.version=%s)", runtime.GOARCH, runtime.GOOS, runtime.Version()) + +func GetSystemInfo() string { + return systemInfo +} diff --git a/vendor/github.com/IBM/schematics-go-sdk/common/version.go b/vendor/github.com/IBM/schematics-go-sdk/common/version.go new file mode 100644 index 00000000000..878f3262f17 --- /dev/null +++ b/vendor/github.com/IBM/schematics-go-sdk/common/version.go @@ -0,0 +1,20 @@ +/** + * (C) Copyright IBM Corp. 2019, 2020. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package common + +// Version of the SDK +const Version = "0.0.1" diff --git a/vendor/github.com/IBM/schematics-go-sdk/schematicsv1/schematics_v1.go b/vendor/github.com/IBM/schematics-go-sdk/schematicsv1/schematics_v1.go new file mode 100644 index 00000000000..1657e063d9b --- /dev/null +++ b/vendor/github.com/IBM/schematics-go-sdk/schematicsv1/schematics_v1.go @@ -0,0 +1,11820 @@ +/** + * (C) Copyright IBM Corp. 2021. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * IBM OpenAPI SDK Code Generator Version: 3.17.0-8d569e8f-20201030-142059 + */ + +// Package schematicsv1 : Operations and models for the SchematicsV1 service +package schematicsv1 + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "reflect" + "time" + + "github.com/IBM/go-sdk-core/v4/core" + common "github.com/IBM/schematics-go-sdk/common" + "github.com/go-openapi/strfmt" +) + +// SchematicsV1 : Schematics Service is to provide the capability to manage resources of (cloud) provider +// infrastructure using file based configurations. With the Schematics service the customer is able to specify the +// required set of resources and their configuration in ''config files'', and then pass these config files to the +// service to fulfill it by calling the necessary actions on the infrastructure. This principle is also known as +// Infrastructure as Code. For more information refer to +// https://cloud.ibm.com/docs/schematics?topic=schematics-getting-started' +// +// Version: 1.0 +type SchematicsV1 struct { + Service *core.BaseService +} + +// DefaultServiceURL is the default URL to make service requests to. +const DefaultServiceURL = "https://schematics-dev.containers.appdomain.cloud" + +// DefaultServiceName is the default key used to find external configuration information. +const DefaultServiceName = "schematics" + +// SchematicsV1Options : Service options +type SchematicsV1Options struct { + ServiceName string + URL string + Authenticator core.Authenticator +} + +// NewSchematicsV1UsingExternalConfig : constructs an instance of SchematicsV1 with passed in options and external configuration. +func NewSchematicsV1UsingExternalConfig(options *SchematicsV1Options) (schematics *SchematicsV1, err error) { + if options.ServiceName == "" { + options.ServiceName = DefaultServiceName + } + + if options.Authenticator == nil { + options.Authenticator, err = core.GetAuthenticatorFromEnvironment(options.ServiceName) + if err != nil { + return + } + } + + schematics, err = NewSchematicsV1(options) + if err != nil { + return + } + + err = schematics.Service.ConfigureService(options.ServiceName) + if err != nil { + return + } + + if options.URL != "" { + err = schematics.Service.SetServiceURL(options.URL) + } + return +} + +// NewSchematicsV1 : constructs an instance of SchematicsV1 with passed in options. +func NewSchematicsV1(options *SchematicsV1Options) (service *SchematicsV1, err error) { + serviceOptions := &core.ServiceOptions{ + URL: DefaultServiceURL, + Authenticator: options.Authenticator, + } + + baseService, err := core.NewBaseService(serviceOptions) + if err != nil { + return + } + + if options.URL != "" { + err = baseService.SetServiceURL(options.URL) + if err != nil { + return + } + } + + service = &SchematicsV1{ + Service: baseService, + } + + return +} + +// SetServiceURL sets the service URL +func (schematics *SchematicsV1) SetServiceURL(url string) error { + return schematics.Service.SetServiceURL(url) +} + +// GetServiceURL returns the service URL +func (schematics *SchematicsV1) GetServiceURL() string { + return schematics.Service.GetServiceURL() +} + +// SetDefaultHeaders sets HTTP headers to be sent in every request +func (schematics *SchematicsV1) SetDefaultHeaders(headers http.Header) { + schematics.Service.SetDefaultHeaders(headers) +} + +// SetEnableGzipCompression sets the service's EnableGzipCompression field +func (schematics *SchematicsV1) SetEnableGzipCompression(enableGzip bool) { + schematics.Service.SetEnableGzipCompression(enableGzip) +} + +// GetEnableGzipCompression returns the service's EnableGzipCompression field +func (schematics *SchematicsV1) GetEnableGzipCompression() bool { + return schematics.Service.GetEnableGzipCompression() +} + +// EnableRetries enables automatic retries for requests invoked for this service instance. +// If either parameter is specified as 0, then a default value is used instead. +func (schematics *SchematicsV1) EnableRetries(maxRetries int, maxRetryInterval time.Duration) { + schematics.Service.EnableRetries(maxRetries, maxRetryInterval) +} + +// DisableRetries disables automatic retries for requests invoked for this service instance. +func (schematics *SchematicsV1) DisableRetries() { + schematics.Service.DisableRetries() +} + +// ListSchematicsLocation : List supported schematics locations +// List supported schematics locations. +func (schematics *SchematicsV1) ListSchematicsLocation(listSchematicsLocationOptions *ListSchematicsLocationOptions) (result []SchematicsLocations, response *core.DetailedResponse, err error) { + return schematics.ListSchematicsLocationWithContext(context.Background(), listSchematicsLocationOptions) +} + +// ListSchematicsLocationWithContext is an alternate form of the ListSchematicsLocation method which supports a Context parameter +func (schematics *SchematicsV1) ListSchematicsLocationWithContext(ctx context.Context, listSchematicsLocationOptions *ListSchematicsLocationOptions) (result []SchematicsLocations, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listSchematicsLocationOptions, "listSchematicsLocationOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = schematics.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(schematics.Service.Options.URL, `/v1/locations`, nil) + if err != nil { + return + } + + for headerName, headerValue := range listSchematicsLocationOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("schematics", "V1", "ListSchematicsLocation") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse []json.RawMessage + response, err = schematics.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSchematicsLocations) + if err != nil { + return + } + response.Result = result + + return +} + +// ListResourceGroup : List of resource groups in the Account +// List of resource groups in the Account. +func (schematics *SchematicsV1) ListResourceGroup(listResourceGroupOptions *ListResourceGroupOptions) (result []ResourceGroupResponse, response *core.DetailedResponse, err error) { + return schematics.ListResourceGroupWithContext(context.Background(), listResourceGroupOptions) +} + +// ListResourceGroupWithContext is an alternate form of the ListResourceGroup method which supports a Context parameter +func (schematics *SchematicsV1) ListResourceGroupWithContext(ctx context.Context, listResourceGroupOptions *ListResourceGroupOptions) (result []ResourceGroupResponse, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listResourceGroupOptions, "listResourceGroupOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = schematics.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(schematics.Service.Options.URL, `/v1/resource_groups`, nil) + if err != nil { + return + } + + for headerName, headerValue := range listResourceGroupOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("schematics", "V1", "ListResourceGroup") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse []json.RawMessage + response, err = schematics.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalResourceGroupResponse) + if err != nil { + return + } + response.Result = result + + return +} + +// GetSchematicsVersion : Get schematics version +// Get schematics version. +func (schematics *SchematicsV1) GetSchematicsVersion(getSchematicsVersionOptions *GetSchematicsVersionOptions) (result *VersionResponse, response *core.DetailedResponse, err error) { + return schematics.GetSchematicsVersionWithContext(context.Background(), getSchematicsVersionOptions) +} + +// GetSchematicsVersionWithContext is an alternate form of the GetSchematicsVersion method which supports a Context parameter +func (schematics *SchematicsV1) GetSchematicsVersionWithContext(ctx context.Context, getSchematicsVersionOptions *GetSchematicsVersionOptions) (result *VersionResponse, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(getSchematicsVersionOptions, "getSchematicsVersionOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = schematics.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(schematics.Service.Options.URL, `/v1/version`, nil) + if err != nil { + return + } + + for headerName, headerValue := range getSchematicsVersionOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("schematics", "V1", "GetSchematicsVersion") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = schematics.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVersionResponse) + if err != nil { + return + } + response.Result = result + + return +} + +// ListWorkspaces : List all workspace definitions +// List all workspace definitions. +func (schematics *SchematicsV1) ListWorkspaces(listWorkspacesOptions *ListWorkspacesOptions) (result *WorkspaceResponseList, response *core.DetailedResponse, err error) { + return schematics.ListWorkspacesWithContext(context.Background(), listWorkspacesOptions) +} + +// ListWorkspacesWithContext is an alternate form of the ListWorkspaces method which supports a Context parameter +func (schematics *SchematicsV1) ListWorkspacesWithContext(ctx context.Context, listWorkspacesOptions *ListWorkspacesOptions) (result *WorkspaceResponseList, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listWorkspacesOptions, "listWorkspacesOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = schematics.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(schematics.Service.Options.URL, `/v1/workspaces`, nil) + if err != nil { + return + } + + for headerName, headerValue := range listWorkspacesOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("schematics", "V1", "ListWorkspaces") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + if listWorkspacesOptions.Offset != nil { + builder.AddQuery("offset", fmt.Sprint(*listWorkspacesOptions.Offset)) + } + if listWorkspacesOptions.Limit != nil { + builder.AddQuery("limit", fmt.Sprint(*listWorkspacesOptions.Limit)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = schematics.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalWorkspaceResponseList) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateWorkspace : Create workspace definition +// Create workspace definition. +func (schematics *SchematicsV1) CreateWorkspace(createWorkspaceOptions *CreateWorkspaceOptions) (result *WorkspaceResponse, response *core.DetailedResponse, err error) { + return schematics.CreateWorkspaceWithContext(context.Background(), createWorkspaceOptions) +} + +// CreateWorkspaceWithContext is an alternate form of the CreateWorkspace method which supports a Context parameter +func (schematics *SchematicsV1) CreateWorkspaceWithContext(ctx context.Context, createWorkspaceOptions *CreateWorkspaceOptions) (result *WorkspaceResponse, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createWorkspaceOptions, "createWorkspaceOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createWorkspaceOptions, "createWorkspaceOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = schematics.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(schematics.Service.Options.URL, `/v1/workspaces`, nil) + if err != nil { + return + } + + for headerName, headerValue := range createWorkspaceOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("schematics", "V1", "CreateWorkspace") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + if createWorkspaceOptions.XGithubToken != nil { + builder.AddHeader("X-Github-token", fmt.Sprint(*createWorkspaceOptions.XGithubToken)) + } + + body := make(map[string]interface{}) + if createWorkspaceOptions.AppliedShareddataIds != nil { + body["applied_shareddata_ids"] = createWorkspaceOptions.AppliedShareddataIds + } + if createWorkspaceOptions.CatalogRef != nil { + body["catalog_ref"] = createWorkspaceOptions.CatalogRef + } + if createWorkspaceOptions.Description != nil { + body["description"] = createWorkspaceOptions.Description + } + if createWorkspaceOptions.Location != nil { + body["location"] = createWorkspaceOptions.Location + } + if createWorkspaceOptions.Name != nil { + body["name"] = createWorkspaceOptions.Name + } + if createWorkspaceOptions.ResourceGroup != nil { + body["resource_group"] = createWorkspaceOptions.ResourceGroup + } + if createWorkspaceOptions.SharedData != nil { + body["shared_data"] = createWorkspaceOptions.SharedData + } + if createWorkspaceOptions.Tags != nil { + body["tags"] = createWorkspaceOptions.Tags + } + if createWorkspaceOptions.TemplateData != nil { + body["template_data"] = createWorkspaceOptions.TemplateData + } + if createWorkspaceOptions.TemplateRef != nil { + body["template_ref"] = createWorkspaceOptions.TemplateRef + } + if createWorkspaceOptions.TemplateRepo != nil { + body["template_repo"] = createWorkspaceOptions.TemplateRepo + } + if createWorkspaceOptions.Type != nil { + body["type"] = createWorkspaceOptions.Type + } + if createWorkspaceOptions.WorkspaceStatus != nil { + body["workspace_status"] = createWorkspaceOptions.WorkspaceStatus + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = schematics.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalWorkspaceResponse) + if err != nil { + return + } + response.Result = result + + return +} + +// GetWorkspace : Get workspace definition +// Get workspace definition. +func (schematics *SchematicsV1) GetWorkspace(getWorkspaceOptions *GetWorkspaceOptions) (result *WorkspaceResponse, response *core.DetailedResponse, err error) { + return schematics.GetWorkspaceWithContext(context.Background(), getWorkspaceOptions) +} + +// GetWorkspaceWithContext is an alternate form of the GetWorkspace method which supports a Context parameter +func (schematics *SchematicsV1) GetWorkspaceWithContext(ctx context.Context, getWorkspaceOptions *GetWorkspaceOptions) (result *WorkspaceResponse, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getWorkspaceOptions, "getWorkspaceOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getWorkspaceOptions, "getWorkspaceOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "w_id": *getWorkspaceOptions.WID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = schematics.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(schematics.Service.Options.URL, `/v1/workspaces/{w_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getWorkspaceOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("schematics", "V1", "GetWorkspace") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = schematics.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalWorkspaceResponse) + if err != nil { + return + } + response.Result = result + + return +} + +// ReplaceWorkspace : Replace the workspace definition +// Replace the workspace definition. +func (schematics *SchematicsV1) ReplaceWorkspace(replaceWorkspaceOptions *ReplaceWorkspaceOptions) (result *WorkspaceResponse, response *core.DetailedResponse, err error) { + return schematics.ReplaceWorkspaceWithContext(context.Background(), replaceWorkspaceOptions) +} + +// ReplaceWorkspaceWithContext is an alternate form of the ReplaceWorkspace method which supports a Context parameter +func (schematics *SchematicsV1) ReplaceWorkspaceWithContext(ctx context.Context, replaceWorkspaceOptions *ReplaceWorkspaceOptions) (result *WorkspaceResponse, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(replaceWorkspaceOptions, "replaceWorkspaceOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(replaceWorkspaceOptions, "replaceWorkspaceOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "w_id": *replaceWorkspaceOptions.WID, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = schematics.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(schematics.Service.Options.URL, `/v1/workspaces/{w_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range replaceWorkspaceOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("schematics", "V1", "ReplaceWorkspace") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if replaceWorkspaceOptions.CatalogRef != nil { + body["catalog_ref"] = replaceWorkspaceOptions.CatalogRef + } + if replaceWorkspaceOptions.Description != nil { + body["description"] = replaceWorkspaceOptions.Description + } + if replaceWorkspaceOptions.Name != nil { + body["name"] = replaceWorkspaceOptions.Name + } + if replaceWorkspaceOptions.SharedData != nil { + body["shared_data"] = replaceWorkspaceOptions.SharedData + } + if replaceWorkspaceOptions.Tags != nil { + body["tags"] = replaceWorkspaceOptions.Tags + } + if replaceWorkspaceOptions.TemplateData != nil { + body["template_data"] = replaceWorkspaceOptions.TemplateData + } + if replaceWorkspaceOptions.TemplateRepo != nil { + body["template_repo"] = replaceWorkspaceOptions.TemplateRepo + } + if replaceWorkspaceOptions.Type != nil { + body["type"] = replaceWorkspaceOptions.Type + } + if replaceWorkspaceOptions.WorkspaceStatus != nil { + body["workspace_status"] = replaceWorkspaceOptions.WorkspaceStatus + } + if replaceWorkspaceOptions.WorkspaceStatusMsg != nil { + body["workspace_status_msg"] = replaceWorkspaceOptions.WorkspaceStatusMsg + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = schematics.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalWorkspaceResponse) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteWorkspace : Delete a workspace definition +// Delete a workspace definition. Use destroy_resource='true' to destroy the related cloud resource. +func (schematics *SchematicsV1) DeleteWorkspace(deleteWorkspaceOptions *DeleteWorkspaceOptions) (result *string, response *core.DetailedResponse, err error) { + return schematics.DeleteWorkspaceWithContext(context.Background(), deleteWorkspaceOptions) +} + +// DeleteWorkspaceWithContext is an alternate form of the DeleteWorkspace method which supports a Context parameter +func (schematics *SchematicsV1) DeleteWorkspaceWithContext(ctx context.Context, deleteWorkspaceOptions *DeleteWorkspaceOptions) (result *string, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteWorkspaceOptions, "deleteWorkspaceOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteWorkspaceOptions, "deleteWorkspaceOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "w_id": *deleteWorkspaceOptions.WID, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = schematics.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(schematics.Service.Options.URL, `/v1/workspaces/{w_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteWorkspaceOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("schematics", "V1", "DeleteWorkspace") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + if deleteWorkspaceOptions.RefreshToken != nil { + builder.AddHeader("refresh_token", fmt.Sprint(*deleteWorkspaceOptions.RefreshToken)) + } + + if deleteWorkspaceOptions.DestroyResources != nil { + builder.AddQuery("destroy_resources", fmt.Sprint(*deleteWorkspaceOptions.DestroyResources)) + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = schematics.Service.Request(request, &result) + + return +} + +// UpdateWorkspace : Update the workspace definition +// Update the workspace definition. +func (schematics *SchematicsV1) UpdateWorkspace(updateWorkspaceOptions *UpdateWorkspaceOptions) (result *WorkspaceResponse, response *core.DetailedResponse, err error) { + return schematics.UpdateWorkspaceWithContext(context.Background(), updateWorkspaceOptions) +} + +// UpdateWorkspaceWithContext is an alternate form of the UpdateWorkspace method which supports a Context parameter +func (schematics *SchematicsV1) UpdateWorkspaceWithContext(ctx context.Context, updateWorkspaceOptions *UpdateWorkspaceOptions) (result *WorkspaceResponse, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateWorkspaceOptions, "updateWorkspaceOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateWorkspaceOptions, "updateWorkspaceOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "w_id": *updateWorkspaceOptions.WID, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = schematics.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(schematics.Service.Options.URL, `/v1/workspaces/{w_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateWorkspaceOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("schematics", "V1", "UpdateWorkspace") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if updateWorkspaceOptions.CatalogRef != nil { + body["catalog_ref"] = updateWorkspaceOptions.CatalogRef + } + if updateWorkspaceOptions.Description != nil { + body["description"] = updateWorkspaceOptions.Description + } + if updateWorkspaceOptions.Name != nil { + body["name"] = updateWorkspaceOptions.Name + } + if updateWorkspaceOptions.SharedData != nil { + body["shared_data"] = updateWorkspaceOptions.SharedData + } + if updateWorkspaceOptions.Tags != nil { + body["tags"] = updateWorkspaceOptions.Tags + } + if updateWorkspaceOptions.TemplateData != nil { + body["template_data"] = updateWorkspaceOptions.TemplateData + } + if updateWorkspaceOptions.TemplateRepo != nil { + body["template_repo"] = updateWorkspaceOptions.TemplateRepo + } + if updateWorkspaceOptions.Type != nil { + body["type"] = updateWorkspaceOptions.Type + } + if updateWorkspaceOptions.WorkspaceStatus != nil { + body["workspace_status"] = updateWorkspaceOptions.WorkspaceStatus + } + if updateWorkspaceOptions.WorkspaceStatusMsg != nil { + body["workspace_status_msg"] = updateWorkspaceOptions.WorkspaceStatusMsg + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = schematics.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalWorkspaceResponse) + if err != nil { + return + } + response.Result = result + + return +} + +// UploadTemplateTar : Upload template tar file for the workspace +// Upload template tar file for the workspace. +func (schematics *SchematicsV1) UploadTemplateTar(uploadTemplateTarOptions *UploadTemplateTarOptions) (result *TemplateRepoTarUploadResponse, response *core.DetailedResponse, err error) { + return schematics.UploadTemplateTarWithContext(context.Background(), uploadTemplateTarOptions) +} + +// UploadTemplateTarWithContext is an alternate form of the UploadTemplateTar method which supports a Context parameter +func (schematics *SchematicsV1) UploadTemplateTarWithContext(ctx context.Context, uploadTemplateTarOptions *UploadTemplateTarOptions) (result *TemplateRepoTarUploadResponse, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(uploadTemplateTarOptions, "uploadTemplateTarOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(uploadTemplateTarOptions, "uploadTemplateTarOptions") + if err != nil { + return + } + if uploadTemplateTarOptions.File == nil { + err = fmt.Errorf("at least one of or file must be supplied") + return + } + + pathParamsMap := map[string]string{ + "w_id": *uploadTemplateTarOptions.WID, + "t_id": *uploadTemplateTarOptions.TID, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = schematics.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(schematics.Service.Options.URL, `/v1/workspaces/{w_id}/template_data/{t_id}/template_repo_upload`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range uploadTemplateTarOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("schematics", "V1", "UploadTemplateTar") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + if uploadTemplateTarOptions.File != nil { + builder.AddFormData("file", "filename", + core.StringNilMapper(uploadTemplateTarOptions.FileContentType), uploadTemplateTarOptions.File) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = schematics.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalTemplateRepoTarUploadResponse) + if err != nil { + return + } + response.Result = result + + return +} + +// GetWorkspaceReadme : Get the workspace readme +// Get the workspace readme. +func (schematics *SchematicsV1) GetWorkspaceReadme(getWorkspaceReadmeOptions *GetWorkspaceReadmeOptions) (result *TemplateReadme, response *core.DetailedResponse, err error) { + return schematics.GetWorkspaceReadmeWithContext(context.Background(), getWorkspaceReadmeOptions) +} + +// GetWorkspaceReadmeWithContext is an alternate form of the GetWorkspaceReadme method which supports a Context parameter +func (schematics *SchematicsV1) GetWorkspaceReadmeWithContext(ctx context.Context, getWorkspaceReadmeOptions *GetWorkspaceReadmeOptions) (result *TemplateReadme, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getWorkspaceReadmeOptions, "getWorkspaceReadmeOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getWorkspaceReadmeOptions, "getWorkspaceReadmeOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "w_id": *getWorkspaceReadmeOptions.WID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = schematics.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(schematics.Service.Options.URL, `/v1/workspaces/{w_id}/templates/readme`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getWorkspaceReadmeOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("schematics", "V1", "GetWorkspaceReadme") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + if getWorkspaceReadmeOptions.Ref != nil { + builder.AddQuery("ref", fmt.Sprint(*getWorkspaceReadmeOptions.Ref)) + } + if getWorkspaceReadmeOptions.Formatted != nil { + builder.AddQuery("formatted", fmt.Sprint(*getWorkspaceReadmeOptions.Formatted)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = schematics.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalTemplateReadme) + if err != nil { + return + } + response.Result = result + + return +} + +// ListWorkspaceActivities : List all workspace activities +// List all workspace activities. +func (schematics *SchematicsV1) ListWorkspaceActivities(listWorkspaceActivitiesOptions *ListWorkspaceActivitiesOptions) (result *WorkspaceActivities, response *core.DetailedResponse, err error) { + return schematics.ListWorkspaceActivitiesWithContext(context.Background(), listWorkspaceActivitiesOptions) +} + +// ListWorkspaceActivitiesWithContext is an alternate form of the ListWorkspaceActivities method which supports a Context parameter +func (schematics *SchematicsV1) ListWorkspaceActivitiesWithContext(ctx context.Context, listWorkspaceActivitiesOptions *ListWorkspaceActivitiesOptions) (result *WorkspaceActivities, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(listWorkspaceActivitiesOptions, "listWorkspaceActivitiesOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(listWorkspaceActivitiesOptions, "listWorkspaceActivitiesOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "w_id": *listWorkspaceActivitiesOptions.WID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = schematics.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(schematics.Service.Options.URL, `/v1/workspaces/{w_id}/actions`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range listWorkspaceActivitiesOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("schematics", "V1", "ListWorkspaceActivities") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + if listWorkspaceActivitiesOptions.Offset != nil { + builder.AddQuery("offset", fmt.Sprint(*listWorkspaceActivitiesOptions.Offset)) + } + if listWorkspaceActivitiesOptions.Limit != nil { + builder.AddQuery("limit", fmt.Sprint(*listWorkspaceActivitiesOptions.Limit)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = schematics.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalWorkspaceActivities) + if err != nil { + return + } + response.Result = result + + return +} + +// GetWorkspaceActivity : Get workspace activity details +// Get workspace activity details. +func (schematics *SchematicsV1) GetWorkspaceActivity(getWorkspaceActivityOptions *GetWorkspaceActivityOptions) (result *WorkspaceActivity, response *core.DetailedResponse, err error) { + return schematics.GetWorkspaceActivityWithContext(context.Background(), getWorkspaceActivityOptions) +} + +// GetWorkspaceActivityWithContext is an alternate form of the GetWorkspaceActivity method which supports a Context parameter +func (schematics *SchematicsV1) GetWorkspaceActivityWithContext(ctx context.Context, getWorkspaceActivityOptions *GetWorkspaceActivityOptions) (result *WorkspaceActivity, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getWorkspaceActivityOptions, "getWorkspaceActivityOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getWorkspaceActivityOptions, "getWorkspaceActivityOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "w_id": *getWorkspaceActivityOptions.WID, + "activity_id": *getWorkspaceActivityOptions.ActivityID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = schematics.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(schematics.Service.Options.URL, `/v1/workspaces/{w_id}/actions/{activity_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getWorkspaceActivityOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("schematics", "V1", "GetWorkspaceActivity") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = schematics.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalWorkspaceActivity) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteWorkspaceActivity : Stop the workspace activity +// Stop the workspace activity. +func (schematics *SchematicsV1) DeleteWorkspaceActivity(deleteWorkspaceActivityOptions *DeleteWorkspaceActivityOptions) (result *WorkspaceActivityApplyResult, response *core.DetailedResponse, err error) { + return schematics.DeleteWorkspaceActivityWithContext(context.Background(), deleteWorkspaceActivityOptions) +} + +// DeleteWorkspaceActivityWithContext is an alternate form of the DeleteWorkspaceActivity method which supports a Context parameter +func (schematics *SchematicsV1) DeleteWorkspaceActivityWithContext(ctx context.Context, deleteWorkspaceActivityOptions *DeleteWorkspaceActivityOptions) (result *WorkspaceActivityApplyResult, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteWorkspaceActivityOptions, "deleteWorkspaceActivityOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteWorkspaceActivityOptions, "deleteWorkspaceActivityOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "w_id": *deleteWorkspaceActivityOptions.WID, + "activity_id": *deleteWorkspaceActivityOptions.ActivityID, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = schematics.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(schematics.Service.Options.URL, `/v1/workspaces/{w_id}/actions/{activity_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteWorkspaceActivityOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("schematics", "V1", "DeleteWorkspaceActivity") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = schematics.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalWorkspaceActivityApplyResult) + if err != nil { + return + } + response.Result = result + + return +} + +// RunWorkspaceCommands : Run terraform Commands +// Run terraform Commands on workspaces. +func (schematics *SchematicsV1) RunWorkspaceCommands(runWorkspaceCommandsOptions *RunWorkspaceCommandsOptions) (result *WorkspaceActivityCommandResult, response *core.DetailedResponse, err error) { + return schematics.RunWorkspaceCommandsWithContext(context.Background(), runWorkspaceCommandsOptions) +} + +// RunWorkspaceCommandsWithContext is an alternate form of the RunWorkspaceCommands method which supports a Context parameter +func (schematics *SchematicsV1) RunWorkspaceCommandsWithContext(ctx context.Context, runWorkspaceCommandsOptions *RunWorkspaceCommandsOptions) (result *WorkspaceActivityCommandResult, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(runWorkspaceCommandsOptions, "runWorkspaceCommandsOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(runWorkspaceCommandsOptions, "runWorkspaceCommandsOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "w_id": *runWorkspaceCommandsOptions.WID, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = schematics.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(schematics.Service.Options.URL, `/v1/workspaces/{w_id}/commands`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range runWorkspaceCommandsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("schematics", "V1", "RunWorkspaceCommands") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + if runWorkspaceCommandsOptions.RefreshToken != nil { + builder.AddHeader("refresh_token", fmt.Sprint(*runWorkspaceCommandsOptions.RefreshToken)) + } + + body := make(map[string]interface{}) + if runWorkspaceCommandsOptions.Commands != nil { + body["commands"] = runWorkspaceCommandsOptions.Commands + } + if runWorkspaceCommandsOptions.OperationName != nil { + body["operation_name"] = runWorkspaceCommandsOptions.OperationName + } + if runWorkspaceCommandsOptions.Description != nil { + body["description"] = runWorkspaceCommandsOptions.Description + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = schematics.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalWorkspaceActivityCommandResult) + if err != nil { + return + } + response.Result = result + + return +} + +// ApplyWorkspaceCommand : Run schematics workspace 'apply' activity +// Run schematics workspace 'apply' activity. +func (schematics *SchematicsV1) ApplyWorkspaceCommand(applyWorkspaceCommandOptions *ApplyWorkspaceCommandOptions) (result *WorkspaceActivityApplyResult, response *core.DetailedResponse, err error) { + return schematics.ApplyWorkspaceCommandWithContext(context.Background(), applyWorkspaceCommandOptions) +} + +// ApplyWorkspaceCommandWithContext is an alternate form of the ApplyWorkspaceCommand method which supports a Context parameter +func (schematics *SchematicsV1) ApplyWorkspaceCommandWithContext(ctx context.Context, applyWorkspaceCommandOptions *ApplyWorkspaceCommandOptions) (result *WorkspaceActivityApplyResult, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(applyWorkspaceCommandOptions, "applyWorkspaceCommandOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(applyWorkspaceCommandOptions, "applyWorkspaceCommandOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "w_id": *applyWorkspaceCommandOptions.WID, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = schematics.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(schematics.Service.Options.URL, `/v1/workspaces/{w_id}/apply`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range applyWorkspaceCommandOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("schematics", "V1", "ApplyWorkspaceCommand") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + if applyWorkspaceCommandOptions.RefreshToken != nil { + builder.AddHeader("refresh_token", fmt.Sprint(*applyWorkspaceCommandOptions.RefreshToken)) + } + + body := make(map[string]interface{}) + if applyWorkspaceCommandOptions.ActionOptions != nil { + body["action_options"] = applyWorkspaceCommandOptions.ActionOptions + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = schematics.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalWorkspaceActivityApplyResult) + if err != nil { + return + } + response.Result = result + + return +} + +// DestroyWorkspaceCommand : Run workspace 'destroy' activity +// Run workspace 'destroy' activity, to destroy all the resources associated with the workspace. WARNING: This action +// cannot be reversed. +func (schematics *SchematicsV1) DestroyWorkspaceCommand(destroyWorkspaceCommandOptions *DestroyWorkspaceCommandOptions) (result *WorkspaceActivityDestroyResult, response *core.DetailedResponse, err error) { + return schematics.DestroyWorkspaceCommandWithContext(context.Background(), destroyWorkspaceCommandOptions) +} + +// DestroyWorkspaceCommandWithContext is an alternate form of the DestroyWorkspaceCommand method which supports a Context parameter +func (schematics *SchematicsV1) DestroyWorkspaceCommandWithContext(ctx context.Context, destroyWorkspaceCommandOptions *DestroyWorkspaceCommandOptions) (result *WorkspaceActivityDestroyResult, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(destroyWorkspaceCommandOptions, "destroyWorkspaceCommandOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(destroyWorkspaceCommandOptions, "destroyWorkspaceCommandOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "w_id": *destroyWorkspaceCommandOptions.WID, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = schematics.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(schematics.Service.Options.URL, `/v1/workspaces/{w_id}/destroy`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range destroyWorkspaceCommandOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("schematics", "V1", "DestroyWorkspaceCommand") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + if destroyWorkspaceCommandOptions.RefreshToken != nil { + builder.AddHeader("refresh_token", fmt.Sprint(*destroyWorkspaceCommandOptions.RefreshToken)) + } + + body := make(map[string]interface{}) + if destroyWorkspaceCommandOptions.ActionOptions != nil { + body["action_options"] = destroyWorkspaceCommandOptions.ActionOptions + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = schematics.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalWorkspaceActivityDestroyResult) + if err != nil { + return + } + response.Result = result + + return +} + +// PlanWorkspaceCommand : Run workspace 'plan' activity, +// Run schematics workspace 'plan' activity, to preview the change before running an 'apply' activity. +func (schematics *SchematicsV1) PlanWorkspaceCommand(planWorkspaceCommandOptions *PlanWorkspaceCommandOptions) (result *WorkspaceActivityPlanResult, response *core.DetailedResponse, err error) { + return schematics.PlanWorkspaceCommandWithContext(context.Background(), planWorkspaceCommandOptions) +} + +// PlanWorkspaceCommandWithContext is an alternate form of the PlanWorkspaceCommand method which supports a Context parameter +func (schematics *SchematicsV1) PlanWorkspaceCommandWithContext(ctx context.Context, planWorkspaceCommandOptions *PlanWorkspaceCommandOptions) (result *WorkspaceActivityPlanResult, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(planWorkspaceCommandOptions, "planWorkspaceCommandOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(planWorkspaceCommandOptions, "planWorkspaceCommandOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "w_id": *planWorkspaceCommandOptions.WID, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = schematics.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(schematics.Service.Options.URL, `/v1/workspaces/{w_id}/plan`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range planWorkspaceCommandOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("schematics", "V1", "PlanWorkspaceCommand") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + if planWorkspaceCommandOptions.RefreshToken != nil { + builder.AddHeader("refresh_token", fmt.Sprint(*planWorkspaceCommandOptions.RefreshToken)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = schematics.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalWorkspaceActivityPlanResult) + if err != nil { + return + } + response.Result = result + + return +} + +// RefreshWorkspaceCommand : Run workspace 'refresh' activity +// Run workspace 'refresh' activity. +func (schematics *SchematicsV1) RefreshWorkspaceCommand(refreshWorkspaceCommandOptions *RefreshWorkspaceCommandOptions) (result *WorkspaceActivityRefreshResult, response *core.DetailedResponse, err error) { + return schematics.RefreshWorkspaceCommandWithContext(context.Background(), refreshWorkspaceCommandOptions) +} + +// RefreshWorkspaceCommandWithContext is an alternate form of the RefreshWorkspaceCommand method which supports a Context parameter +func (schematics *SchematicsV1) RefreshWorkspaceCommandWithContext(ctx context.Context, refreshWorkspaceCommandOptions *RefreshWorkspaceCommandOptions) (result *WorkspaceActivityRefreshResult, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(refreshWorkspaceCommandOptions, "refreshWorkspaceCommandOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(refreshWorkspaceCommandOptions, "refreshWorkspaceCommandOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "w_id": *refreshWorkspaceCommandOptions.WID, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = schematics.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(schematics.Service.Options.URL, `/v1/workspaces/{w_id}/refresh`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range refreshWorkspaceCommandOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("schematics", "V1", "RefreshWorkspaceCommand") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + if refreshWorkspaceCommandOptions.RefreshToken != nil { + builder.AddHeader("refresh_token", fmt.Sprint(*refreshWorkspaceCommandOptions.RefreshToken)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = schematics.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalWorkspaceActivityRefreshResult) + if err != nil { + return + } + response.Result = result + + return +} + +// GetWorkspaceInputs : Get the input values of the workspace +// Get the input values of the workspace. +func (schematics *SchematicsV1) GetWorkspaceInputs(getWorkspaceInputsOptions *GetWorkspaceInputsOptions) (result *TemplateValues, response *core.DetailedResponse, err error) { + return schematics.GetWorkspaceInputsWithContext(context.Background(), getWorkspaceInputsOptions) +} + +// GetWorkspaceInputsWithContext is an alternate form of the GetWorkspaceInputs method which supports a Context parameter +func (schematics *SchematicsV1) GetWorkspaceInputsWithContext(ctx context.Context, getWorkspaceInputsOptions *GetWorkspaceInputsOptions) (result *TemplateValues, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getWorkspaceInputsOptions, "getWorkspaceInputsOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getWorkspaceInputsOptions, "getWorkspaceInputsOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "w_id": *getWorkspaceInputsOptions.WID, + "t_id": *getWorkspaceInputsOptions.TID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = schematics.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(schematics.Service.Options.URL, `/v1/workspaces/{w_id}/template_data/{t_id}/values`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getWorkspaceInputsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("schematics", "V1", "GetWorkspaceInputs") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = schematics.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalTemplateValues) + if err != nil { + return + } + response.Result = result + + return +} + +// ReplaceWorkspaceInputs : Replace the input values for the workspace +// Replace the input values for the workspace. +func (schematics *SchematicsV1) ReplaceWorkspaceInputs(replaceWorkspaceInputsOptions *ReplaceWorkspaceInputsOptions) (result *UserValues, response *core.DetailedResponse, err error) { + return schematics.ReplaceWorkspaceInputsWithContext(context.Background(), replaceWorkspaceInputsOptions) +} + +// ReplaceWorkspaceInputsWithContext is an alternate form of the ReplaceWorkspaceInputs method which supports a Context parameter +func (schematics *SchematicsV1) ReplaceWorkspaceInputsWithContext(ctx context.Context, replaceWorkspaceInputsOptions *ReplaceWorkspaceInputsOptions) (result *UserValues, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(replaceWorkspaceInputsOptions, "replaceWorkspaceInputsOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(replaceWorkspaceInputsOptions, "replaceWorkspaceInputsOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "w_id": *replaceWorkspaceInputsOptions.WID, + "t_id": *replaceWorkspaceInputsOptions.TID, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = schematics.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(schematics.Service.Options.URL, `/v1/workspaces/{w_id}/template_data/{t_id}/values`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range replaceWorkspaceInputsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("schematics", "V1", "ReplaceWorkspaceInputs") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if replaceWorkspaceInputsOptions.EnvValues != nil { + body["env_values"] = replaceWorkspaceInputsOptions.EnvValues + } + if replaceWorkspaceInputsOptions.Values != nil { + body["values"] = replaceWorkspaceInputsOptions.Values + } + if replaceWorkspaceInputsOptions.Variablestore != nil { + body["variablestore"] = replaceWorkspaceInputsOptions.Variablestore + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = schematics.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalUserValues) + if err != nil { + return + } + response.Result = result + + return +} + +// GetAllWorkspaceInputs : Get all the input values of the workspace +// Get all the input values of the workspace. +func (schematics *SchematicsV1) GetAllWorkspaceInputs(getAllWorkspaceInputsOptions *GetAllWorkspaceInputsOptions) (result *WorkspaceTemplateValuesResponse, response *core.DetailedResponse, err error) { + return schematics.GetAllWorkspaceInputsWithContext(context.Background(), getAllWorkspaceInputsOptions) +} + +// GetAllWorkspaceInputsWithContext is an alternate form of the GetAllWorkspaceInputs method which supports a Context parameter +func (schematics *SchematicsV1) GetAllWorkspaceInputsWithContext(ctx context.Context, getAllWorkspaceInputsOptions *GetAllWorkspaceInputsOptions) (result *WorkspaceTemplateValuesResponse, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getAllWorkspaceInputsOptions, "getAllWorkspaceInputsOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getAllWorkspaceInputsOptions, "getAllWorkspaceInputsOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "w_id": *getAllWorkspaceInputsOptions.WID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = schematics.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(schematics.Service.Options.URL, `/v1/workspaces/{w_id}/templates/values`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getAllWorkspaceInputsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("schematics", "V1", "GetAllWorkspaceInputs") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = schematics.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalWorkspaceTemplateValuesResponse) + if err != nil { + return + } + response.Result = result + + return +} + +// GetWorkspaceInputMetadata : Get the input metadata of the workspace +// Get the input metadata of the workspace. +func (schematics *SchematicsV1) GetWorkspaceInputMetadata(getWorkspaceInputMetadataOptions *GetWorkspaceInputMetadataOptions) (result []interface{}, response *core.DetailedResponse, err error) { + return schematics.GetWorkspaceInputMetadataWithContext(context.Background(), getWorkspaceInputMetadataOptions) +} + +// GetWorkspaceInputMetadataWithContext is an alternate form of the GetWorkspaceInputMetadata method which supports a Context parameter +func (schematics *SchematicsV1) GetWorkspaceInputMetadataWithContext(ctx context.Context, getWorkspaceInputMetadataOptions *GetWorkspaceInputMetadataOptions) (result []interface{}, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getWorkspaceInputMetadataOptions, "getWorkspaceInputMetadataOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getWorkspaceInputMetadataOptions, "getWorkspaceInputMetadataOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "w_id": *getWorkspaceInputMetadataOptions.WID, + "t_id": *getWorkspaceInputMetadataOptions.TID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = schematics.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(schematics.Service.Options.URL, `/v1/workspaces/{w_id}/template_data/{t_id}/values_metadata`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getWorkspaceInputMetadataOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("schematics", "V1", "GetWorkspaceInputMetadata") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + response, err = schematics.Service.Request(request, &result) + + return +} + +// GetWorkspaceOutputs : Get all the output values of the workspace +// Get all the output values from your workspace; (ex. result of terraform output command). +func (schematics *SchematicsV1) GetWorkspaceOutputs(getWorkspaceOutputsOptions *GetWorkspaceOutputsOptions) (result []OutputValuesItem, response *core.DetailedResponse, err error) { + return schematics.GetWorkspaceOutputsWithContext(context.Background(), getWorkspaceOutputsOptions) +} + +// GetWorkspaceOutputsWithContext is an alternate form of the GetWorkspaceOutputs method which supports a Context parameter +func (schematics *SchematicsV1) GetWorkspaceOutputsWithContext(ctx context.Context, getWorkspaceOutputsOptions *GetWorkspaceOutputsOptions) (result []OutputValuesItem, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getWorkspaceOutputsOptions, "getWorkspaceOutputsOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getWorkspaceOutputsOptions, "getWorkspaceOutputsOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "w_id": *getWorkspaceOutputsOptions.WID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = schematics.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(schematics.Service.Options.URL, `/v1/workspaces/{w_id}/output_values`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getWorkspaceOutputsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("schematics", "V1", "GetWorkspaceOutputs") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse []json.RawMessage + response, err = schematics.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalOutputValuesItem) + if err != nil { + return + } + response.Result = result + + return +} + +// GetWorkspaceResources : Get all the resources created by the workspace +// Get all the resources created by the workspace. +func (schematics *SchematicsV1) GetWorkspaceResources(getWorkspaceResourcesOptions *GetWorkspaceResourcesOptions) (result []TemplateResources, response *core.DetailedResponse, err error) { + return schematics.GetWorkspaceResourcesWithContext(context.Background(), getWorkspaceResourcesOptions) +} + +// GetWorkspaceResourcesWithContext is an alternate form of the GetWorkspaceResources method which supports a Context parameter +func (schematics *SchematicsV1) GetWorkspaceResourcesWithContext(ctx context.Context, getWorkspaceResourcesOptions *GetWorkspaceResourcesOptions) (result []TemplateResources, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getWorkspaceResourcesOptions, "getWorkspaceResourcesOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getWorkspaceResourcesOptions, "getWorkspaceResourcesOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "w_id": *getWorkspaceResourcesOptions.WID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = schematics.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(schematics.Service.Options.URL, `/v1/workspaces/{w_id}/resources`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getWorkspaceResourcesOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("schematics", "V1", "GetWorkspaceResources") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse []json.RawMessage + response, err = schematics.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalTemplateResources) + if err != nil { + return + } + response.Result = result + + return +} + +// GetWorkspaceState : Get the workspace state +// Get the workspace state. +func (schematics *SchematicsV1) GetWorkspaceState(getWorkspaceStateOptions *GetWorkspaceStateOptions) (result *StateStoreResponseList, response *core.DetailedResponse, err error) { + return schematics.GetWorkspaceStateWithContext(context.Background(), getWorkspaceStateOptions) +} + +// GetWorkspaceStateWithContext is an alternate form of the GetWorkspaceState method which supports a Context parameter +func (schematics *SchematicsV1) GetWorkspaceStateWithContext(ctx context.Context, getWorkspaceStateOptions *GetWorkspaceStateOptions) (result *StateStoreResponseList, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getWorkspaceStateOptions, "getWorkspaceStateOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getWorkspaceStateOptions, "getWorkspaceStateOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "w_id": *getWorkspaceStateOptions.WID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = schematics.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(schematics.Service.Options.URL, `/v1/workspaces/{w_id}/state_stores`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getWorkspaceStateOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("schematics", "V1", "GetWorkspaceState") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = schematics.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalStateStoreResponseList) + if err != nil { + return + } + response.Result = result + + return +} + +// GetWorkspaceTemplateState : Get the template state +// Get the template state. +func (schematics *SchematicsV1) GetWorkspaceTemplateState(getWorkspaceTemplateStateOptions *GetWorkspaceTemplateStateOptions) (result *TemplateStateStore, response *core.DetailedResponse, err error) { + return schematics.GetWorkspaceTemplateStateWithContext(context.Background(), getWorkspaceTemplateStateOptions) +} + +// GetWorkspaceTemplateStateWithContext is an alternate form of the GetWorkspaceTemplateState method which supports a Context parameter +func (schematics *SchematicsV1) GetWorkspaceTemplateStateWithContext(ctx context.Context, getWorkspaceTemplateStateOptions *GetWorkspaceTemplateStateOptions) (result *TemplateStateStore, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getWorkspaceTemplateStateOptions, "getWorkspaceTemplateStateOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getWorkspaceTemplateStateOptions, "getWorkspaceTemplateStateOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "w_id": *getWorkspaceTemplateStateOptions.WID, + "t_id": *getWorkspaceTemplateStateOptions.TID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = schematics.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(schematics.Service.Options.URL, `/v1/workspaces/{w_id}/runtime_data/{t_id}/state_store`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getWorkspaceTemplateStateOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("schematics", "V1", "GetWorkspaceTemplateState") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + // The response is the terraform statefile and the structure + // can change between versions. So unmarshalling using a fixed + // schema is impossible. Hence the result is sent as json.RawMessage + var rawResponse json.RawMessage + response, err = schematics.Service.Request(request, &rawResponse) + if err != nil { + return + } + /*err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalTemplateStateStore) + if err != nil { + return + }*/ + response.Result = rawResponse + + return +} + +// GetWorkspaceActivityLogs : Get the workspace activity log urls +// View an activity log for Terraform actions that ran against your workspace. You can view logs for plan, apply, and +// destroy actions. operationId: get_activity_log_urls. +func (schematics *SchematicsV1) GetWorkspaceActivityLogs(getWorkspaceActivityLogsOptions *GetWorkspaceActivityLogsOptions) (result *WorkspaceActivityLogs, response *core.DetailedResponse, err error) { + return schematics.GetWorkspaceActivityLogsWithContext(context.Background(), getWorkspaceActivityLogsOptions) +} + +// GetWorkspaceActivityLogsWithContext is an alternate form of the GetWorkspaceActivityLogs method which supports a Context parameter +func (schematics *SchematicsV1) GetWorkspaceActivityLogsWithContext(ctx context.Context, getWorkspaceActivityLogsOptions *GetWorkspaceActivityLogsOptions) (result *WorkspaceActivityLogs, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getWorkspaceActivityLogsOptions, "getWorkspaceActivityLogsOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getWorkspaceActivityLogsOptions, "getWorkspaceActivityLogsOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "w_id": *getWorkspaceActivityLogsOptions.WID, + "activity_id": *getWorkspaceActivityLogsOptions.ActivityID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = schematics.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(schematics.Service.Options.URL, `/v1/workspaces/{w_id}/actions/{activity_id}/logs`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getWorkspaceActivityLogsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("schematics", "V1", "GetWorkspaceActivityLogs") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = schematics.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalWorkspaceActivityLogs) + if err != nil { + return + } + response.Result = result + + return +} + +// GetWorkspaceLogUrls : Get all workspace log urls +// Get all workspace log urls. +func (schematics *SchematicsV1) GetWorkspaceLogUrls(getWorkspaceLogUrlsOptions *GetWorkspaceLogUrlsOptions) (result *LogStoreResponseList, response *core.DetailedResponse, err error) { + return schematics.GetWorkspaceLogUrlsWithContext(context.Background(), getWorkspaceLogUrlsOptions) +} + +// GetWorkspaceLogUrlsWithContext is an alternate form of the GetWorkspaceLogUrls method which supports a Context parameter +func (schematics *SchematicsV1) GetWorkspaceLogUrlsWithContext(ctx context.Context, getWorkspaceLogUrlsOptions *GetWorkspaceLogUrlsOptions) (result *LogStoreResponseList, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getWorkspaceLogUrlsOptions, "getWorkspaceLogUrlsOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getWorkspaceLogUrlsOptions, "getWorkspaceLogUrlsOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "w_id": *getWorkspaceLogUrlsOptions.WID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = schematics.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(schematics.Service.Options.URL, `/v1/workspaces/{w_id}/log_stores`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getWorkspaceLogUrlsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("schematics", "V1", "GetWorkspaceLogUrls") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = schematics.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLogStoreResponseList) + if err != nil { + return + } + response.Result = result + + return +} + +// GetTemplateLogs : Get all template logs +// Get all template logs. +func (schematics *SchematicsV1) GetTemplateLogs(getTemplateLogsOptions *GetTemplateLogsOptions) (result *string, response *core.DetailedResponse, err error) { + return schematics.GetTemplateLogsWithContext(context.Background(), getTemplateLogsOptions) +} + +// GetTemplateLogsWithContext is an alternate form of the GetTemplateLogs method which supports a Context parameter +func (schematics *SchematicsV1) GetTemplateLogsWithContext(ctx context.Context, getTemplateLogsOptions *GetTemplateLogsOptions) (result *string, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getTemplateLogsOptions, "getTemplateLogsOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getTemplateLogsOptions, "getTemplateLogsOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "w_id": *getTemplateLogsOptions.WID, + "t_id": *getTemplateLogsOptions.TID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = schematics.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(schematics.Service.Options.URL, `/v1/workspaces/{w_id}/runtime_data/{t_id}/log_store`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getTemplateLogsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("schematics", "V1", "GetTemplateLogs") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + if getTemplateLogsOptions.LogTfCmd != nil { + builder.AddQuery("log_tf_cmd", fmt.Sprint(*getTemplateLogsOptions.LogTfCmd)) + } + if getTemplateLogsOptions.LogTfPrefix != nil { + builder.AddQuery("log_tf_prefix", fmt.Sprint(*getTemplateLogsOptions.LogTfPrefix)) + } + if getTemplateLogsOptions.LogTfNullResource != nil { + builder.AddQuery("log_tf_null_resource", fmt.Sprint(*getTemplateLogsOptions.LogTfNullResource)) + } + if getTemplateLogsOptions.LogTfAnsible != nil { + builder.AddQuery("log_tf_ansible", fmt.Sprint(*getTemplateLogsOptions.LogTfAnsible)) + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = schematics.Service.Request(request, &result) + + return +} + +// GetTemplateActivityLog : Get the template activity logs +// View an activity log for Terraform actions that ran for a template against your workspace. You can view logs for +// plan, apply, and destroy actions. +func (schematics *SchematicsV1) GetTemplateActivityLog(getTemplateActivityLogOptions *GetTemplateActivityLogOptions) (result *string, response *core.DetailedResponse, err error) { + return schematics.GetTemplateActivityLogWithContext(context.Background(), getTemplateActivityLogOptions) +} + +// GetTemplateActivityLogWithContext is an alternate form of the GetTemplateActivityLog method which supports a Context parameter +func (schematics *SchematicsV1) GetTemplateActivityLogWithContext(ctx context.Context, getTemplateActivityLogOptions *GetTemplateActivityLogOptions) (result *string, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getTemplateActivityLogOptions, "getTemplateActivityLogOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getTemplateActivityLogOptions, "getTemplateActivityLogOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "w_id": *getTemplateActivityLogOptions.WID, + "t_id": *getTemplateActivityLogOptions.TID, + "activity_id": *getTemplateActivityLogOptions.ActivityID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = schematics.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(schematics.Service.Options.URL, `/v1/workspaces/{w_id}/runtime_data/{t_id}/log_store/actions/{activity_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getTemplateActivityLogOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("schematics", "V1", "GetTemplateActivityLog") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + if getTemplateActivityLogOptions.LogTfCmd != nil { + builder.AddQuery("log_tf_cmd", fmt.Sprint(*getTemplateActivityLogOptions.LogTfCmd)) + } + if getTemplateActivityLogOptions.LogTfPrefix != nil { + builder.AddQuery("log_tf_prefix", fmt.Sprint(*getTemplateActivityLogOptions.LogTfPrefix)) + } + if getTemplateActivityLogOptions.LogTfNullResource != nil { + builder.AddQuery("log_tf_null_resource", fmt.Sprint(*getTemplateActivityLogOptions.LogTfNullResource)) + } + if getTemplateActivityLogOptions.LogTfAnsible != nil { + builder.AddQuery("log_tf_ansible", fmt.Sprint(*getTemplateActivityLogOptions.LogTfAnsible)) + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = schematics.Service.Request(request, &result) + + return +} + +// CreateWorkspaceDeletionJob : Delete multiple workspaces +// Delete multiple workspaces. Use ?destroy_resource="true" to destroy the related cloud resources, otherwise the +// resources must be managed outside of Schematics. +func (schematics *SchematicsV1) CreateWorkspaceDeletionJob(createWorkspaceDeletionJobOptions *CreateWorkspaceDeletionJobOptions) (result *WorkspaceBulkDeleteResponse, response *core.DetailedResponse, err error) { + return schematics.CreateWorkspaceDeletionJobWithContext(context.Background(), createWorkspaceDeletionJobOptions) +} + +// CreateWorkspaceDeletionJobWithContext is an alternate form of the CreateWorkspaceDeletionJob method which supports a Context parameter +func (schematics *SchematicsV1) CreateWorkspaceDeletionJobWithContext(ctx context.Context, createWorkspaceDeletionJobOptions *CreateWorkspaceDeletionJobOptions) (result *WorkspaceBulkDeleteResponse, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createWorkspaceDeletionJobOptions, "createWorkspaceDeletionJobOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createWorkspaceDeletionJobOptions, "createWorkspaceDeletionJobOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = schematics.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(schematics.Service.Options.URL, `/v1/workspace_jobs`, nil) + if err != nil { + return + } + + for headerName, headerValue := range createWorkspaceDeletionJobOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("schematics", "V1", "CreateWorkspaceDeletionJob") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + if createWorkspaceDeletionJobOptions.RefreshToken != nil { + builder.AddHeader("refresh_token", fmt.Sprint(*createWorkspaceDeletionJobOptions.RefreshToken)) + } + + if createWorkspaceDeletionJobOptions.DestroyResources != nil { + builder.AddQuery("destroy_resources", fmt.Sprint(*createWorkspaceDeletionJobOptions.DestroyResources)) + } + + body := make(map[string]interface{}) + if createWorkspaceDeletionJobOptions.NewDeleteWorkspaces != nil { + body["delete_workspaces"] = createWorkspaceDeletionJobOptions.NewDeleteWorkspaces + } + if createWorkspaceDeletionJobOptions.NewDestroyResources != nil { + body["destroy_resources"] = createWorkspaceDeletionJobOptions.NewDestroyResources + } + if createWorkspaceDeletionJobOptions.NewJob != nil { + body["job"] = createWorkspaceDeletionJobOptions.NewJob + } + if createWorkspaceDeletionJobOptions.NewVersion != nil { + body["version"] = createWorkspaceDeletionJobOptions.NewVersion + } + if createWorkspaceDeletionJobOptions.NewWorkspaces != nil { + body["workspaces"] = createWorkspaceDeletionJobOptions.NewWorkspaces + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = schematics.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalWorkspaceBulkDeleteResponse) + if err != nil { + return + } + response.Result = result + + return +} + +// GetWorkspaceDeletionJobStatus : Get the workspace deletion job status +// Get the workspace deletion job status. +func (schematics *SchematicsV1) GetWorkspaceDeletionJobStatus(getWorkspaceDeletionJobStatusOptions *GetWorkspaceDeletionJobStatusOptions) (result *WorkspaceJobResponse, response *core.DetailedResponse, err error) { + return schematics.GetWorkspaceDeletionJobStatusWithContext(context.Background(), getWorkspaceDeletionJobStatusOptions) +} + +// GetWorkspaceDeletionJobStatusWithContext is an alternate form of the GetWorkspaceDeletionJobStatus method which supports a Context parameter +func (schematics *SchematicsV1) GetWorkspaceDeletionJobStatusWithContext(ctx context.Context, getWorkspaceDeletionJobStatusOptions *GetWorkspaceDeletionJobStatusOptions) (result *WorkspaceJobResponse, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getWorkspaceDeletionJobStatusOptions, "getWorkspaceDeletionJobStatusOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getWorkspaceDeletionJobStatusOptions, "getWorkspaceDeletionJobStatusOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "wj_id": *getWorkspaceDeletionJobStatusOptions.WjID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = schematics.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(schematics.Service.Options.URL, `/v1/workspace_jobs/{wj_id}/status`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getWorkspaceDeletionJobStatusOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("schematics", "V1", "GetWorkspaceDeletionJobStatus") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = schematics.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalWorkspaceJobResponse) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateAction : Create an Action definition +// Create a new Action definition. +func (schematics *SchematicsV1) CreateAction(createActionOptions *CreateActionOptions) (result *Action, response *core.DetailedResponse, err error) { + return schematics.CreateActionWithContext(context.Background(), createActionOptions) +} + +// CreateActionWithContext is an alternate form of the CreateAction method which supports a Context parameter +func (schematics *SchematicsV1) CreateActionWithContext(ctx context.Context, createActionOptions *CreateActionOptions) (result *Action, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createActionOptions, "createActionOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createActionOptions, "createActionOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = schematics.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(schematics.Service.Options.URL, `/v2/actions`, nil) + if err != nil { + return + } + + for headerName, headerValue := range createActionOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("schematics", "V1", "CreateAction") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + if createActionOptions.XGithubToken != nil { + builder.AddHeader("X-Github-token", fmt.Sprint(*createActionOptions.XGithubToken)) + } + + body := make(map[string]interface{}) + if createActionOptions.Name != nil { + body["name"] = createActionOptions.Name + } + if createActionOptions.Description != nil { + body["description"] = createActionOptions.Description + } + if createActionOptions.Location != nil { + body["location"] = createActionOptions.Location + } + if createActionOptions.ResourceGroup != nil { + body["resource_group"] = createActionOptions.ResourceGroup + } + if createActionOptions.Tags != nil { + body["tags"] = createActionOptions.Tags + } + if createActionOptions.UserState != nil { + body["user_state"] = createActionOptions.UserState + } + if createActionOptions.SourceReadmeURL != nil { + body["source_readme_url"] = createActionOptions.SourceReadmeURL + } + if createActionOptions.Source != nil { + body["source"] = createActionOptions.Source + } + if createActionOptions.SourceType != nil { + body["source_type"] = createActionOptions.SourceType + } + if createActionOptions.CommandParameter != nil { + body["command_parameter"] = createActionOptions.CommandParameter + } + if createActionOptions.Bastion != nil { + body["bastion"] = createActionOptions.Bastion + } + if createActionOptions.TargetsIni != nil { + body["targets_ini"] = createActionOptions.TargetsIni + } + if createActionOptions.Credentials != nil { + body["credentials"] = createActionOptions.Credentials + } + if createActionOptions.Inputs != nil { + body["inputs"] = createActionOptions.Inputs + } + if createActionOptions.Outputs != nil { + body["outputs"] = createActionOptions.Outputs + } + if createActionOptions.Settings != nil { + body["settings"] = createActionOptions.Settings + } + if createActionOptions.TriggerRecordID != nil { + body["trigger_record_id"] = createActionOptions.TriggerRecordID + } + if createActionOptions.State != nil { + body["state"] = createActionOptions.State + } + if createActionOptions.SysLock != nil { + body["sys_lock"] = createActionOptions.SysLock + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = schematics.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalAction) + if err != nil { + return + } + response.Result = result + + return +} + +// ListActions : Get all the Action definitions +// Get all the Action definitions. +func (schematics *SchematicsV1) ListActions(listActionsOptions *ListActionsOptions) (result *ActionList, response *core.DetailedResponse, err error) { + return schematics.ListActionsWithContext(context.Background(), listActionsOptions) +} + +// ListActionsWithContext is an alternate form of the ListActions method which supports a Context parameter +func (schematics *SchematicsV1) ListActionsWithContext(ctx context.Context, listActionsOptions *ListActionsOptions) (result *ActionList, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listActionsOptions, "listActionsOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = schematics.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(schematics.Service.Options.URL, `/v2/actions`, nil) + if err != nil { + return + } + + for headerName, headerValue := range listActionsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("schematics", "V1", "ListActions") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + if listActionsOptions.Offset != nil { + builder.AddQuery("offset", fmt.Sprint(*listActionsOptions.Offset)) + } + if listActionsOptions.Limit != nil { + builder.AddQuery("limit", fmt.Sprint(*listActionsOptions.Limit)) + } + if listActionsOptions.Sort != nil { + builder.AddQuery("sort", fmt.Sprint(*listActionsOptions.Sort)) + } + if listActionsOptions.Profile != nil { + builder.AddQuery("profile", fmt.Sprint(*listActionsOptions.Profile)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = schematics.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalActionList) + if err != nil { + return + } + response.Result = result + + return +} + +// GetAction : Get the Action definition +// Get the Action definition. +func (schematics *SchematicsV1) GetAction(getActionOptions *GetActionOptions) (result *Action, response *core.DetailedResponse, err error) { + return schematics.GetActionWithContext(context.Background(), getActionOptions) +} + +// GetActionWithContext is an alternate form of the GetAction method which supports a Context parameter +func (schematics *SchematicsV1) GetActionWithContext(ctx context.Context, getActionOptions *GetActionOptions) (result *Action, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getActionOptions, "getActionOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getActionOptions, "getActionOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "action_id": *getActionOptions.ActionID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = schematics.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(schematics.Service.Options.URL, `/v2/actions/{action_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getActionOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("schematics", "V1", "GetAction") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + if getActionOptions.Profile != nil { + builder.AddQuery("profile", fmt.Sprint(*getActionOptions.Profile)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = schematics.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalAction) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteAction : Delete the Action +// Delete the Action definition. +func (schematics *SchematicsV1) DeleteAction(deleteActionOptions *DeleteActionOptions) (response *core.DetailedResponse, err error) { + return schematics.DeleteActionWithContext(context.Background(), deleteActionOptions) +} + +// DeleteActionWithContext is an alternate form of the DeleteAction method which supports a Context parameter +func (schematics *SchematicsV1) DeleteActionWithContext(ctx context.Context, deleteActionOptions *DeleteActionOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteActionOptions, "deleteActionOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteActionOptions, "deleteActionOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "action_id": *deleteActionOptions.ActionID, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = schematics.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(schematics.Service.Options.URL, `/v2/actions/{action_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteActionOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("schematics", "V1", "DeleteAction") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + if deleteActionOptions.Force != nil { + builder.AddHeader("force", fmt.Sprint(*deleteActionOptions.Force)) + } + if deleteActionOptions.Propagate != nil { + builder.AddHeader("propagate", fmt.Sprint(*deleteActionOptions.Propagate)) + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = schematics.Service.Request(request, nil) + + return +} + +// UpdateAction : Update the Action definition +// Update the Action definition. +func (schematics *SchematicsV1) UpdateAction(updateActionOptions *UpdateActionOptions) (result *Action, response *core.DetailedResponse, err error) { + return schematics.UpdateActionWithContext(context.Background(), updateActionOptions) +} + +// UpdateActionWithContext is an alternate form of the UpdateAction method which supports a Context parameter +func (schematics *SchematicsV1) UpdateActionWithContext(ctx context.Context, updateActionOptions *UpdateActionOptions) (result *Action, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateActionOptions, "updateActionOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateActionOptions, "updateActionOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "action_id": *updateActionOptions.ActionID, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = schematics.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(schematics.Service.Options.URL, `/v2/actions/{action_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateActionOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("schematics", "V1", "UpdateAction") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + if updateActionOptions.XGithubToken != nil { + builder.AddHeader("X-Github-token", fmt.Sprint(*updateActionOptions.XGithubToken)) + } + + body := make(map[string]interface{}) + if updateActionOptions.Name != nil { + body["name"] = updateActionOptions.Name + } + if updateActionOptions.Description != nil { + body["description"] = updateActionOptions.Description + } + if updateActionOptions.Location != nil { + body["location"] = updateActionOptions.Location + } + if updateActionOptions.ResourceGroup != nil { + body["resource_group"] = updateActionOptions.ResourceGroup + } + if updateActionOptions.Tags != nil { + body["tags"] = updateActionOptions.Tags + } + if updateActionOptions.UserState != nil { + body["user_state"] = updateActionOptions.UserState + } + if updateActionOptions.SourceReadmeURL != nil { + body["source_readme_url"] = updateActionOptions.SourceReadmeURL + } + if updateActionOptions.Source != nil { + body["source"] = updateActionOptions.Source + } + if updateActionOptions.SourceType != nil { + body["source_type"] = updateActionOptions.SourceType + } + if updateActionOptions.CommandParameter != nil { + body["command_parameter"] = updateActionOptions.CommandParameter + } + if updateActionOptions.Bastion != nil { + body["bastion"] = updateActionOptions.Bastion + } + if updateActionOptions.TargetsIni != nil { + body["targets_ini"] = updateActionOptions.TargetsIni + } + if updateActionOptions.Credentials != nil { + body["credentials"] = updateActionOptions.Credentials + } + if updateActionOptions.Inputs != nil { + body["inputs"] = updateActionOptions.Inputs + } + if updateActionOptions.Outputs != nil { + body["outputs"] = updateActionOptions.Outputs + } + if updateActionOptions.Settings != nil { + body["settings"] = updateActionOptions.Settings + } + if updateActionOptions.TriggerRecordID != nil { + body["trigger_record_id"] = updateActionOptions.TriggerRecordID + } + if updateActionOptions.State != nil { + body["state"] = updateActionOptions.State + } + if updateActionOptions.SysLock != nil { + body["sys_lock"] = updateActionOptions.SysLock + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = schematics.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalAction) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateJob : Create a Job record and launch the Job +// Creare a Job record and launch the Job. +func (schematics *SchematicsV1) CreateJob(createJobOptions *CreateJobOptions) (result *Job, response *core.DetailedResponse, err error) { + return schematics.CreateJobWithContext(context.Background(), createJobOptions) +} + +// CreateJobWithContext is an alternate form of the CreateJob method which supports a Context parameter +func (schematics *SchematicsV1) CreateJobWithContext(ctx context.Context, createJobOptions *CreateJobOptions) (result *Job, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createJobOptions, "createJobOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createJobOptions, "createJobOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = schematics.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(schematics.Service.Options.URL, `/v2/jobs`, nil) + if err != nil { + return + } + + for headerName, headerValue := range createJobOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("schematics", "V1", "CreateJob") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + if createJobOptions.RefreshToken != nil { + builder.AddHeader("refresh_token", fmt.Sprint(*createJobOptions.RefreshToken)) + } + + body := make(map[string]interface{}) + if createJobOptions.CommandObject != nil { + body["command_object"] = createJobOptions.CommandObject + } + if createJobOptions.CommandObjectID != nil { + body["command_object_id"] = createJobOptions.CommandObjectID + } + if createJobOptions.CommandName != nil { + body["command_name"] = createJobOptions.CommandName + } + if createJobOptions.CommandParameter != nil { + body["command_parameter"] = createJobOptions.CommandParameter + } + if createJobOptions.CommandOptions != nil { + body["command_options"] = createJobOptions.CommandOptions + } + if createJobOptions.Inputs != nil { + body["inputs"] = createJobOptions.Inputs + } + if createJobOptions.Settings != nil { + body["settings"] = createJobOptions.Settings + } + if createJobOptions.Tags != nil { + body["tags"] = createJobOptions.Tags + } + if createJobOptions.Location != nil { + body["location"] = createJobOptions.Location + } + if createJobOptions.Status != nil { + body["status"] = createJobOptions.Status + } + if createJobOptions.Data != nil { + body["data"] = createJobOptions.Data + } + if createJobOptions.Bastion != nil { + body["bastion"] = createJobOptions.Bastion + } + if createJobOptions.LogSummary != nil { + body["log_summary"] = createJobOptions.LogSummary + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = schematics.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalJob) + if err != nil { + return + } + response.Result = result + + return +} + +// ListJobs : Get all the Job records +// Get all the Job records. +func (schematics *SchematicsV1) ListJobs(listJobsOptions *ListJobsOptions) (result *JobList, response *core.DetailedResponse, err error) { + return schematics.ListJobsWithContext(context.Background(), listJobsOptions) +} + +// ListJobsWithContext is an alternate form of the ListJobs method which supports a Context parameter +func (schematics *SchematicsV1) ListJobsWithContext(ctx context.Context, listJobsOptions *ListJobsOptions) (result *JobList, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listJobsOptions, "listJobsOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = schematics.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(schematics.Service.Options.URL, `/v2/jobs`, nil) + if err != nil { + return + } + + for headerName, headerValue := range listJobsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("schematics", "V1", "ListJobs") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + if listJobsOptions.Offset != nil { + builder.AddQuery("offset", fmt.Sprint(*listJobsOptions.Offset)) + } + if listJobsOptions.Limit != nil { + builder.AddQuery("limit", fmt.Sprint(*listJobsOptions.Limit)) + } + if listJobsOptions.Sort != nil { + builder.AddQuery("sort", fmt.Sprint(*listJobsOptions.Sort)) + } + if listJobsOptions.Profile != nil { + builder.AddQuery("profile", fmt.Sprint(*listJobsOptions.Profile)) + } + if listJobsOptions.Resource != nil { + builder.AddQuery("resource", fmt.Sprint(*listJobsOptions.Resource)) + } + if listJobsOptions.ActionID != nil { + builder.AddQuery("action_id", fmt.Sprint(*listJobsOptions.ActionID)) + } + if listJobsOptions.List != nil { + builder.AddQuery("list", fmt.Sprint(*listJobsOptions.List)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = schematics.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalJobList) + if err != nil { + return + } + response.Result = result + + return +} + +// ReplaceJob : Clone the Job-record, and relaunch the Job +// Clone the Job-record, and relaunch the Job. +func (schematics *SchematicsV1) ReplaceJob(replaceJobOptions *ReplaceJobOptions) (result *Job, response *core.DetailedResponse, err error) { + return schematics.ReplaceJobWithContext(context.Background(), replaceJobOptions) +} + +// ReplaceJobWithContext is an alternate form of the ReplaceJob method which supports a Context parameter +func (schematics *SchematicsV1) ReplaceJobWithContext(ctx context.Context, replaceJobOptions *ReplaceJobOptions) (result *Job, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(replaceJobOptions, "replaceJobOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(replaceJobOptions, "replaceJobOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "job_id": *replaceJobOptions.JobID, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = schematics.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(schematics.Service.Options.URL, `/v2/jobs/{job_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range replaceJobOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("schematics", "V1", "ReplaceJob") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + if replaceJobOptions.RefreshToken != nil { + builder.AddHeader("refresh_token", fmt.Sprint(*replaceJobOptions.RefreshToken)) + } + + body := make(map[string]interface{}) + if replaceJobOptions.CommandObject != nil { + body["command_object"] = replaceJobOptions.CommandObject + } + if replaceJobOptions.CommandObjectID != nil { + body["command_object_id"] = replaceJobOptions.CommandObjectID + } + if replaceJobOptions.CommandName != nil { + body["command_name"] = replaceJobOptions.CommandName + } + if replaceJobOptions.CommandParameter != nil { + body["command_parameter"] = replaceJobOptions.CommandParameter + } + if replaceJobOptions.CommandOptions != nil { + body["command_options"] = replaceJobOptions.CommandOptions + } + if replaceJobOptions.Inputs != nil { + body["inputs"] = replaceJobOptions.Inputs + } + if replaceJobOptions.Settings != nil { + body["settings"] = replaceJobOptions.Settings + } + if replaceJobOptions.Tags != nil { + body["tags"] = replaceJobOptions.Tags + } + if replaceJobOptions.Location != nil { + body["location"] = replaceJobOptions.Location + } + if replaceJobOptions.Status != nil { + body["status"] = replaceJobOptions.Status + } + if replaceJobOptions.Data != nil { + body["data"] = replaceJobOptions.Data + } + if replaceJobOptions.Bastion != nil { + body["bastion"] = replaceJobOptions.Bastion + } + if replaceJobOptions.LogSummary != nil { + body["log_summary"] = replaceJobOptions.LogSummary + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = schematics.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalJob) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteJob : Stop the running Job, and delete the Job-record +// Stop the running Job, and delete the Job-record. +func (schematics *SchematicsV1) DeleteJob(deleteJobOptions *DeleteJobOptions) (response *core.DetailedResponse, err error) { + return schematics.DeleteJobWithContext(context.Background(), deleteJobOptions) +} + +// DeleteJobWithContext is an alternate form of the DeleteJob method which supports a Context parameter +func (schematics *SchematicsV1) DeleteJobWithContext(ctx context.Context, deleteJobOptions *DeleteJobOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteJobOptions, "deleteJobOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteJobOptions, "deleteJobOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "job_id": *deleteJobOptions.JobID, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = schematics.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(schematics.Service.Options.URL, `/v2/jobs/{job_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteJobOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("schematics", "V1", "DeleteJob") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + if deleteJobOptions.RefreshToken != nil { + builder.AddHeader("refresh_token", fmt.Sprint(*deleteJobOptions.RefreshToken)) + } + if deleteJobOptions.Force != nil { + builder.AddHeader("force", fmt.Sprint(*deleteJobOptions.Force)) + } + if deleteJobOptions.Propagate != nil { + builder.AddHeader("propagate", fmt.Sprint(*deleteJobOptions.Propagate)) + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = schematics.Service.Request(request, nil) + + return +} + +// GetJob : Get the Job record +// Get the Job record. +func (schematics *SchematicsV1) GetJob(getJobOptions *GetJobOptions) (result *Job, response *core.DetailedResponse, err error) { + return schematics.GetJobWithContext(context.Background(), getJobOptions) +} + +// GetJobWithContext is an alternate form of the GetJob method which supports a Context parameter +func (schematics *SchematicsV1) GetJobWithContext(ctx context.Context, getJobOptions *GetJobOptions) (result *Job, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getJobOptions, "getJobOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getJobOptions, "getJobOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "job_id": *getJobOptions.JobID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = schematics.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(schematics.Service.Options.URL, `/v2/jobs/{job_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getJobOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("schematics", "V1", "GetJob") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + if getJobOptions.Profile != nil { + builder.AddQuery("profile", fmt.Sprint(*getJobOptions.Profile)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = schematics.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalJob) + if err != nil { + return + } + response.Result = result + + return +} + +// ListJobLogs : Get log-file from the Job record +// Get log-file from the Job record. +func (schematics *SchematicsV1) ListJobLogs(listJobLogsOptions *ListJobLogsOptions) (result *JobLog, response *core.DetailedResponse, err error) { + return schematics.ListJobLogsWithContext(context.Background(), listJobLogsOptions) +} + +// ListJobLogsWithContext is an alternate form of the ListJobLogs method which supports a Context parameter +func (schematics *SchematicsV1) ListJobLogsWithContext(ctx context.Context, listJobLogsOptions *ListJobLogsOptions) (result *JobLog, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(listJobLogsOptions, "listJobLogsOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(listJobLogsOptions, "listJobLogsOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "job_id": *listJobLogsOptions.JobID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = schematics.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(schematics.Service.Options.URL, `/v2/jobs/{job_id}/logs`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range listJobLogsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("schematics", "V1", "ListJobLogs") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = schematics.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalJobLog) + if err != nil { + return + } + response.Result = result + + return +} + +// ListJobStates : Get state-data from the Job record +// Get state-data from the Job record. +func (schematics *SchematicsV1) ListJobStates(listJobStatesOptions *ListJobStatesOptions) (result *JobStateData, response *core.DetailedResponse, err error) { + return schematics.ListJobStatesWithContext(context.Background(), listJobStatesOptions) +} + +// ListJobStatesWithContext is an alternate form of the ListJobStates method which supports a Context parameter +func (schematics *SchematicsV1) ListJobStatesWithContext(ctx context.Context, listJobStatesOptions *ListJobStatesOptions) (result *JobStateData, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(listJobStatesOptions, "listJobStatesOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(listJobStatesOptions, "listJobStatesOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "job_id": *listJobStatesOptions.JobID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = schematics.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(schematics.Service.Options.URL, `/v2/jobs/{job_id}/states`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range listJobStatesOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("schematics", "V1", "ListJobStates") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = schematics.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalJobStateData) + if err != nil { + return + } + response.Result = result + + return +} + +// ListSharedDatasets : List all shared datasets +// List all shared datasets. +func (schematics *SchematicsV1) ListSharedDatasets(listSharedDatasetsOptions *ListSharedDatasetsOptions) (result *SharedDatasetResponseList, response *core.DetailedResponse, err error) { + return schematics.ListSharedDatasetsWithContext(context.Background(), listSharedDatasetsOptions) +} + +// ListSharedDatasetsWithContext is an alternate form of the ListSharedDatasets method which supports a Context parameter +func (schematics *SchematicsV1) ListSharedDatasetsWithContext(ctx context.Context, listSharedDatasetsOptions *ListSharedDatasetsOptions) (result *SharedDatasetResponseList, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listSharedDatasetsOptions, "listSharedDatasetsOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = schematics.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(schematics.Service.Options.URL, `/v2/shared_datasets`, nil) + if err != nil { + return + } + + for headerName, headerValue := range listSharedDatasetsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("schematics", "V1", "ListSharedDatasets") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = schematics.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSharedDatasetResponseList) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateSharedDataset : Create a shared dataset definition +// Create a shared dataset definition. +func (schematics *SchematicsV1) CreateSharedDataset(createSharedDatasetOptions *CreateSharedDatasetOptions) (result *SharedDatasetResponse, response *core.DetailedResponse, err error) { + return schematics.CreateSharedDatasetWithContext(context.Background(), createSharedDatasetOptions) +} + +// CreateSharedDatasetWithContext is an alternate form of the CreateSharedDataset method which supports a Context parameter +func (schematics *SchematicsV1) CreateSharedDatasetWithContext(ctx context.Context, createSharedDatasetOptions *CreateSharedDatasetOptions) (result *SharedDatasetResponse, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createSharedDatasetOptions, "createSharedDatasetOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createSharedDatasetOptions, "createSharedDatasetOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = schematics.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(schematics.Service.Options.URL, `/v2/shared_datasets`, nil) + if err != nil { + return + } + + for headerName, headerValue := range createSharedDatasetOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("schematics", "V1", "CreateSharedDataset") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if createSharedDatasetOptions.AutoPropagateChange != nil { + body["auto_propagate_change"] = createSharedDatasetOptions.AutoPropagateChange + } + if createSharedDatasetOptions.Description != nil { + body["description"] = createSharedDatasetOptions.Description + } + if createSharedDatasetOptions.EffectedWorkspaceIds != nil { + body["effected_workspace_ids"] = createSharedDatasetOptions.EffectedWorkspaceIds + } + if createSharedDatasetOptions.ResourceGroup != nil { + body["resource_group"] = createSharedDatasetOptions.ResourceGroup + } + if createSharedDatasetOptions.SharedDatasetData != nil { + body["shared_dataset_data"] = createSharedDatasetOptions.SharedDatasetData + } + if createSharedDatasetOptions.SharedDatasetName != nil { + body["shared_dataset_name"] = createSharedDatasetOptions.SharedDatasetName + } + if createSharedDatasetOptions.SharedDatasetSourceName != nil { + body["shared_dataset_source_name"] = createSharedDatasetOptions.SharedDatasetSourceName + } + if createSharedDatasetOptions.SharedDatasetType != nil { + body["shared_dataset_type"] = createSharedDatasetOptions.SharedDatasetType + } + if createSharedDatasetOptions.Tags != nil { + body["tags"] = createSharedDatasetOptions.Tags + } + if createSharedDatasetOptions.Version != nil { + body["version"] = createSharedDatasetOptions.Version + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = schematics.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSharedDatasetResponse) + if err != nil { + return + } + response.Result = result + + return +} + +// GetSharedDataset : Get the shared dataset +// Get the shared dataset. +func (schematics *SchematicsV1) GetSharedDataset(getSharedDatasetOptions *GetSharedDatasetOptions) (result *SharedDatasetResponse, response *core.DetailedResponse, err error) { + return schematics.GetSharedDatasetWithContext(context.Background(), getSharedDatasetOptions) +} + +// GetSharedDatasetWithContext is an alternate form of the GetSharedDataset method which supports a Context parameter +func (schematics *SchematicsV1) GetSharedDatasetWithContext(ctx context.Context, getSharedDatasetOptions *GetSharedDatasetOptions) (result *SharedDatasetResponse, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getSharedDatasetOptions, "getSharedDatasetOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getSharedDatasetOptions, "getSharedDatasetOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "sd_id": *getSharedDatasetOptions.SdID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = schematics.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(schematics.Service.Options.URL, `/v2/shared_datasets/{sd_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getSharedDatasetOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("schematics", "V1", "GetSharedDataset") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = schematics.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSharedDatasetResponse) + if err != nil { + return + } + response.Result = result + + return +} + +// ReplaceSharedDataset : Replace the shared dataset +// Replace the shared dataset. +func (schematics *SchematicsV1) ReplaceSharedDataset(replaceSharedDatasetOptions *ReplaceSharedDatasetOptions) (result *SharedDatasetResponse, response *core.DetailedResponse, err error) { + return schematics.ReplaceSharedDatasetWithContext(context.Background(), replaceSharedDatasetOptions) +} + +// ReplaceSharedDatasetWithContext is an alternate form of the ReplaceSharedDataset method which supports a Context parameter +func (schematics *SchematicsV1) ReplaceSharedDatasetWithContext(ctx context.Context, replaceSharedDatasetOptions *ReplaceSharedDatasetOptions) (result *SharedDatasetResponse, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(replaceSharedDatasetOptions, "replaceSharedDatasetOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(replaceSharedDatasetOptions, "replaceSharedDatasetOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "sd_id": *replaceSharedDatasetOptions.SdID, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = schematics.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(schematics.Service.Options.URL, `/v2/shared_datasets/{sd_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range replaceSharedDatasetOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("schematics", "V1", "ReplaceSharedDataset") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if replaceSharedDatasetOptions.AutoPropagateChange != nil { + body["auto_propagate_change"] = replaceSharedDatasetOptions.AutoPropagateChange + } + if replaceSharedDatasetOptions.Description != nil { + body["description"] = replaceSharedDatasetOptions.Description + } + if replaceSharedDatasetOptions.EffectedWorkspaceIds != nil { + body["effected_workspace_ids"] = replaceSharedDatasetOptions.EffectedWorkspaceIds + } + if replaceSharedDatasetOptions.ResourceGroup != nil { + body["resource_group"] = replaceSharedDatasetOptions.ResourceGroup + } + if replaceSharedDatasetOptions.SharedDatasetData != nil { + body["shared_dataset_data"] = replaceSharedDatasetOptions.SharedDatasetData + } + if replaceSharedDatasetOptions.SharedDatasetName != nil { + body["shared_dataset_name"] = replaceSharedDatasetOptions.SharedDatasetName + } + if replaceSharedDatasetOptions.SharedDatasetSourceName != nil { + body["shared_dataset_source_name"] = replaceSharedDatasetOptions.SharedDatasetSourceName + } + if replaceSharedDatasetOptions.SharedDatasetType != nil { + body["shared_dataset_type"] = replaceSharedDatasetOptions.SharedDatasetType + } + if replaceSharedDatasetOptions.Tags != nil { + body["tags"] = replaceSharedDatasetOptions.Tags + } + if replaceSharedDatasetOptions.Version != nil { + body["version"] = replaceSharedDatasetOptions.Version + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = schematics.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSharedDatasetResponse) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteSharedDataset : Delete the shared dataset +// Replace the shared dataset. +func (schematics *SchematicsV1) DeleteSharedDataset(deleteSharedDatasetOptions *DeleteSharedDatasetOptions) (result *SharedDatasetResponse, response *core.DetailedResponse, err error) { + return schematics.DeleteSharedDatasetWithContext(context.Background(), deleteSharedDatasetOptions) +} + +// DeleteSharedDatasetWithContext is an alternate form of the DeleteSharedDataset method which supports a Context parameter +func (schematics *SchematicsV1) DeleteSharedDatasetWithContext(ctx context.Context, deleteSharedDatasetOptions *DeleteSharedDatasetOptions) (result *SharedDatasetResponse, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteSharedDatasetOptions, "deleteSharedDatasetOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteSharedDatasetOptions, "deleteSharedDatasetOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "sd_id": *deleteSharedDatasetOptions.SdID, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = schematics.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(schematics.Service.Options.URL, `/v2/shared_datasets/{sd_id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteSharedDatasetOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("schematics", "V1", "DeleteSharedDataset") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = schematics.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSharedDatasetResponse) + if err != nil { + return + } + response.Result = result + + return +} + +// GetKmsSettings : Get the KMS settings for customer account +// Get the KMS settings for customer account. +func (schematics *SchematicsV1) GetKmsSettings(getKmsSettingsOptions *GetKmsSettingsOptions) (result *KMSSettings, response *core.DetailedResponse, err error) { + return schematics.GetKmsSettingsWithContext(context.Background(), getKmsSettingsOptions) +} + +// GetKmsSettingsWithContext is an alternate form of the GetKmsSettings method which supports a Context parameter +func (schematics *SchematicsV1) GetKmsSettingsWithContext(ctx context.Context, getKmsSettingsOptions *GetKmsSettingsOptions) (result *KMSSettings, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getKmsSettingsOptions, "getKmsSettingsOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getKmsSettingsOptions, "getKmsSettingsOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = schematics.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(schematics.Service.Options.URL, `/v2/settings/kms`, nil) + if err != nil { + return + } + + for headerName, headerValue := range getKmsSettingsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("schematics", "V1", "GetKmsSettings") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("location", fmt.Sprint(*getKmsSettingsOptions.Location)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = schematics.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalKMSSettings) + if err != nil { + return + } + response.Result = result + + return +} + +// ReplaceKmsSettings : Set the KMS settings for customer account +// Set the KMS settings for customer account. +func (schematics *SchematicsV1) ReplaceKmsSettings(replaceKmsSettingsOptions *ReplaceKmsSettingsOptions) (result *KMSSettings, response *core.DetailedResponse, err error) { + return schematics.ReplaceKmsSettingsWithContext(context.Background(), replaceKmsSettingsOptions) +} + +// ReplaceKmsSettingsWithContext is an alternate form of the ReplaceKmsSettings method which supports a Context parameter +func (schematics *SchematicsV1) ReplaceKmsSettingsWithContext(ctx context.Context, replaceKmsSettingsOptions *ReplaceKmsSettingsOptions) (result *KMSSettings, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(replaceKmsSettingsOptions, "replaceKmsSettingsOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(replaceKmsSettingsOptions, "replaceKmsSettingsOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = schematics.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(schematics.Service.Options.URL, `/v2/settings/kms`, nil) + if err != nil { + return + } + + for headerName, headerValue := range replaceKmsSettingsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("schematics", "V1", "ReplaceKmsSettings") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if replaceKmsSettingsOptions.Location != nil { + body["location"] = replaceKmsSettingsOptions.Location + } + if replaceKmsSettingsOptions.EncryptionScheme != nil { + body["encryption_scheme"] = replaceKmsSettingsOptions.EncryptionScheme + } + if replaceKmsSettingsOptions.ResourceGroup != nil { + body["resource_group"] = replaceKmsSettingsOptions.ResourceGroup + } + if replaceKmsSettingsOptions.PrimaryCrk != nil { + body["primary_crk"] = replaceKmsSettingsOptions.PrimaryCrk + } + if replaceKmsSettingsOptions.SecondaryCrk != nil { + body["secondary_crk"] = replaceKmsSettingsOptions.SecondaryCrk + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = schematics.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalKMSSettings) + if err != nil { + return + } + response.Result = result + + return +} + +// GetDiscoveredKmsInstances : Discover the KMS instances in the account +// Discover the KMS instances in the account. +func (schematics *SchematicsV1) GetDiscoveredKmsInstances(getDiscoveredKmsInstancesOptions *GetDiscoveredKmsInstancesOptions) (result *KMSDiscovery, response *core.DetailedResponse, err error) { + return schematics.GetDiscoveredKmsInstancesWithContext(context.Background(), getDiscoveredKmsInstancesOptions) +} + +// GetDiscoveredKmsInstancesWithContext is an alternate form of the GetDiscoveredKmsInstances method which supports a Context parameter +func (schematics *SchematicsV1) GetDiscoveredKmsInstancesWithContext(ctx context.Context, getDiscoveredKmsInstancesOptions *GetDiscoveredKmsInstancesOptions) (result *KMSDiscovery, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getDiscoveredKmsInstancesOptions, "getDiscoveredKmsInstancesOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getDiscoveredKmsInstancesOptions, "getDiscoveredKmsInstancesOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = schematics.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(schematics.Service.Options.URL, `/v2/settings/kms_instances`, nil) + if err != nil { + return + } + + for headerName, headerValue := range getDiscoveredKmsInstancesOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("schematics", "V1", "GetDiscoveredKmsInstances") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("encryption_scheme", fmt.Sprint(*getDiscoveredKmsInstancesOptions.EncryptionScheme)) + builder.AddQuery("location", fmt.Sprint(*getDiscoveredKmsInstancesOptions.Location)) + if getDiscoveredKmsInstancesOptions.ResourceGroup != nil { + builder.AddQuery("resource_group", fmt.Sprint(*getDiscoveredKmsInstancesOptions.ResourceGroup)) + } + if getDiscoveredKmsInstancesOptions.Limit != nil { + builder.AddQuery("limit", fmt.Sprint(*getDiscoveredKmsInstancesOptions.Limit)) + } + if getDiscoveredKmsInstancesOptions.Sort != nil { + builder.AddQuery("sort", fmt.Sprint(*getDiscoveredKmsInstancesOptions.Sort)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = schematics.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalKMSDiscovery) + if err != nil { + return + } + response.Result = result + + return +} + +// Action : Complete Action details with user inputs and system generated data. +type Action struct { + // Action name (unique for an account). + Name *string `json:"name,omitempty"` + + // Action description. + Description *string `json:"description,omitempty"` + + // List of workspace locations supported by IBM Cloud Schematics service. Note, this does not limit the location of + // the resources provisioned using Schematics. + Location *string `json:"location,omitempty"` + + // Resource-group name for the Action. By default, Action will be created in Default Resource Group. + ResourceGroup *string `json:"resource_group,omitempty"` + + // Action tags. + Tags []string `json:"tags,omitempty"` + + // User defined status of the Schematics object. + UserState *UserState `json:"user_state,omitempty"` + + // URL of the README file, for the source. + SourceReadmeURL *string `json:"source_readme_url,omitempty"` + + // Source of templates, playbooks, or controls. + Source *ExternalSource `json:"source,omitempty"` + + // Type of source for the Template. + SourceType *string `json:"source_type,omitempty"` + + // Schematics job command parameter (playbook-name, capsule-name or flow-name). + CommandParameter *string `json:"command_parameter,omitempty"` + + // Complete Target details with user inputs and system generated data. + Bastion *TargetResourceset `json:"bastion,omitempty"` + + // Inventory of host and host group for the playbook, in .ini file format. + TargetsIni *string `json:"targets_ini,omitempty"` + + // credentials of the Action. + Credentials []VariableData `json:"credentials,omitempty"` + + // Input variables for the Action. + Inputs []VariableData `json:"inputs,omitempty"` + + // Output variables for the Action. + Outputs []VariableData `json:"outputs,omitempty"` + + // Environment variables for the Action. + Settings []VariableData `json:"settings,omitempty"` + + // Id to the Trigger. + TriggerRecordID *string `json:"trigger_record_id,omitempty"` + + // Action Id. + ID *string `json:"id,omitempty"` + + // Action Cloud Resource Name. + Crn *string `json:"crn,omitempty"` + + // Action account id. + Account *string `json:"account,omitempty"` + + // Action Playbook Source creation time. + SourceCreatedAt *strfmt.DateTime `json:"source_created_at,omitempty"` + + // Email address of user who created the Action Playbook Source. + SourceCreatedBy *string `json:"source_created_by,omitempty"` + + // Action Playbook updation time. + SourceUpdatedAt *strfmt.DateTime `json:"source_updated_at,omitempty"` + + // Email address of user who updated the Action Playbook Source. + SourceUpdatedBy *string `json:"source_updated_by,omitempty"` + + // Action creation time. + CreatedAt *strfmt.DateTime `json:"created_at,omitempty"` + + // Email address of user who created the action. + CreatedBy *string `json:"created_by,omitempty"` + + // Action updation time. + UpdatedAt *strfmt.DateTime `json:"updated_at,omitempty"` + + // Email address of user who updated the action. + UpdatedBy *string `json:"updated_by,omitempty"` + + // name of the namespace. + Namespace *string `json:"namespace,omitempty"` + + // Computed state of the Action. + State *ActionState `json:"state,omitempty"` + + // Playbook names retrieved from repo. + PlaybookNames []string `json:"playbook_names,omitempty"` + + // System lock status. + SysLock *SystemLock `json:"sys_lock,omitempty"` +} + +// Constants associated with the Action.Location property. +// List of workspace locations supported by IBM Cloud Schematics service. Note, this does not limit the location of the +// resources provisioned using Schematics. +const ( + Action_Location_EuDe = "eu_de" + Action_Location_EuGb = "eu_gb" + Action_Location_UsEast = "us_east" + Action_Location_UsSouth = "us_south" +) + +// Constants associated with the Action.SourceType property. +// Type of source for the Template. +const ( + Action_SourceType_ExternalScm = "external_scm" + Action_SourceType_GitHub = "git_hub" + Action_SourceType_GitHubEnterprise = "git_hub_enterprise" + Action_SourceType_GitLab = "git_lab" + Action_SourceType_IbmCloudCatalog = "ibm_cloud_catalog" + Action_SourceType_IbmGitLab = "ibm_git_lab" + Action_SourceType_Local = "local" +) + +// UnmarshalAction unmarshals an instance of Action from the specified map of raw messages. +func UnmarshalAction(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Action) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "description", &obj.Description) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "location", &obj.Location) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "resource_group", &obj.ResourceGroup) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "tags", &obj.Tags) + if err != nil { + return + } + err = core.UnmarshalModel(m, "user_state", &obj.UserState, UnmarshalUserState) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "source_readme_url", &obj.SourceReadmeURL) + if err != nil { + return + } + err = core.UnmarshalModel(m, "source", &obj.Source, UnmarshalExternalSource) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "source_type", &obj.SourceType) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "command_parameter", &obj.CommandParameter) + if err != nil { + return + } + err = core.UnmarshalModel(m, "bastion", &obj.Bastion, UnmarshalTargetResourceset) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "targets_ini", &obj.TargetsIni) + if err != nil { + return + } + err = core.UnmarshalModel(m, "credentials", &obj.Credentials, UnmarshalVariableData) + if err != nil { + return + } + err = core.UnmarshalModel(m, "inputs", &obj.Inputs, UnmarshalVariableData) + if err != nil { + return + } + err = core.UnmarshalModel(m, "outputs", &obj.Outputs, UnmarshalVariableData) + if err != nil { + return + } + err = core.UnmarshalModel(m, "settings", &obj.Settings, UnmarshalVariableData) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "trigger_record_id", &obj.TriggerRecordID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.Crn) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "account", &obj.Account) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "source_created_at", &obj.SourceCreatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "source_created_by", &obj.SourceCreatedBy) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "source_updated_at", &obj.SourceUpdatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "source_updated_by", &obj.SourceUpdatedBy) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_by", &obj.CreatedBy) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "updated_at", &obj.UpdatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "updated_by", &obj.UpdatedBy) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "namespace", &obj.Namespace) + if err != nil { + return + } + err = core.UnmarshalModel(m, "state", &obj.State, UnmarshalActionState) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "playbook_names", &obj.PlaybookNames) + if err != nil { + return + } + err = core.UnmarshalModel(m, "sys_lock", &obj.SysLock, UnmarshalSystemLock) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ActionList : List of Action definition response. +type ActionList struct { + // Total number of records. + TotalCount *int64 `json:"total_count,omitempty"` + + // Number of records returned. + Limit *int64 `json:"limit" validate:"required"` + + // Skipped number of records. + Offset *int64 `json:"offset" validate:"required"` + + // List of action records. + Actions []ActionLite `json:"actions,omitempty"` +} + +// UnmarshalActionList unmarshals an instance of ActionList from the specified map of raw messages. +func UnmarshalActionList(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ActionList) + err = core.UnmarshalPrimitive(m, "total_count", &obj.TotalCount) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "limit", &obj.Limit) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "offset", &obj.Offset) + if err != nil { + return + } + err = core.UnmarshalModel(m, "actions", &obj.Actions, UnmarshalActionLite) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ActionLite : Action summary profile with user inputs and system generated data. +type ActionLite struct { + // Action name (unique for an account). + Name *string `json:"name,omitempty"` + + // Action description. + Description *string `json:"description,omitempty"` + + // Action Id. + ID *string `json:"id,omitempty"` + + // Action Cloud Resource Name. + Crn *string `json:"crn,omitempty"` + + // List of workspace locations supported by IBM Cloud Schematics service. Note, this does not limit the location of + // the resources provisioned using Schematics. + Location *string `json:"location,omitempty"` + + // Resource-group name for the Action. By default, Action will be created in Default Resource Group. + ResourceGroup *string `json:"resource_group,omitempty"` + + // name of the namespace. + Namespace *string `json:"namespace,omitempty"` + + // Action tags. + Tags []string `json:"tags,omitempty"` + + // Name of the selected playbook. + PlaybookName *string `json:"playbook_name,omitempty"` + + // User defined status of the Schematics object. + UserState *UserState `json:"user_state,omitempty"` + + // Computed state of the Action. + State *ActionLiteState `json:"state,omitempty"` + + // System lock status. + SysLock *SystemLock `json:"sys_lock,omitempty"` + + // Action creation time. + CreatedAt *strfmt.DateTime `json:"created_at,omitempty"` + + // Email address of user who created the action. + CreatedBy *string `json:"created_by,omitempty"` + + // Action updation time. + UpdatedAt *strfmt.DateTime `json:"updated_at,omitempty"` + + // Email address of user who updated the action. + UpdatedBy *string `json:"updated_by,omitempty"` +} + +// Constants associated with the ActionLite.Location property. +// List of workspace locations supported by IBM Cloud Schematics service. Note, this does not limit the location of the +// resources provisioned using Schematics. +const ( + ActionLite_Location_EuDe = "eu_de" + ActionLite_Location_EuGb = "eu_gb" + ActionLite_Location_UsEast = "us_east" + ActionLite_Location_UsSouth = "us_south" +) + +// UnmarshalActionLite unmarshals an instance of ActionLite from the specified map of raw messages. +func UnmarshalActionLite(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ActionLite) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "description", &obj.Description) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.Crn) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "location", &obj.Location) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "resource_group", &obj.ResourceGroup) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "namespace", &obj.Namespace) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "tags", &obj.Tags) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "playbook_name", &obj.PlaybookName) + if err != nil { + return + } + err = core.UnmarshalModel(m, "user_state", &obj.UserState, UnmarshalUserState) + if err != nil { + return + } + err = core.UnmarshalModel(m, "state", &obj.State, UnmarshalActionLiteState) + if err != nil { + return + } + err = core.UnmarshalModel(m, "sys_lock", &obj.SysLock, UnmarshalSystemLock) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_by", &obj.CreatedBy) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "updated_at", &obj.UpdatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "updated_by", &obj.UpdatedBy) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ActionLiteState : Computed state of the Action. +type ActionLiteState struct { + // Status of automation (workspace or action). + StatusCode *string `json:"status_code,omitempty"` + + // Automation status message - to be displayed along with the status_code. + StatusMessage *string `json:"status_message,omitempty"` +} + +// Constants associated with the ActionLiteState.StatusCode property. +// Status of automation (workspace or action). +const ( + ActionLiteState_StatusCode_Critical = "critical" + ActionLiteState_StatusCode_Disabled = "disabled" + ActionLiteState_StatusCode_Normal = "normal" + ActionLiteState_StatusCode_Pending = "pending" +) + +// UnmarshalActionLiteState unmarshals an instance of ActionLiteState from the specified map of raw messages. +func UnmarshalActionLiteState(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ActionLiteState) + err = core.UnmarshalPrimitive(m, "status_code", &obj.StatusCode) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "status_message", &obj.StatusMessage) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ActionState : Computed state of the Action. +type ActionState struct { + // Status of automation (workspace or action). + StatusCode *string `json:"status_code,omitempty"` + + // Job id reference for this status. + StatusJobID *string `json:"status_job_id,omitempty"` + + // Automation status message - to be displayed along with the status_code. + StatusMessage *string `json:"status_message,omitempty"` +} + +// Constants associated with the ActionState.StatusCode property. +// Status of automation (workspace or action). +const ( + ActionState_StatusCode_Critical = "critical" + ActionState_StatusCode_Disabled = "disabled" + ActionState_StatusCode_Normal = "normal" + ActionState_StatusCode_Pending = "pending" +) + +// UnmarshalActionState unmarshals an instance of ActionState from the specified map of raw messages. +func UnmarshalActionState(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ActionState) + err = core.UnmarshalPrimitive(m, "status_code", &obj.StatusCode) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "status_job_id", &obj.StatusJobID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "status_message", &obj.StatusMessage) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ApplyWorkspaceCommandOptions : The ApplyWorkspaceCommand options. +type ApplyWorkspaceCommandOptions struct { + // The workspace ID for the workspace that you want to query. You can run the GET /workspaces call if you need to look + // up the workspace IDs in your IBM Cloud account. + WID *string `json:"w_id" validate:"required,ne="` + + // The IAM refresh token associated with the IBM Cloud account. + RefreshToken *string `json:"refresh_token" validate:"required"` + + // Action Options Template ... + ActionOptions *WorkspaceActivityOptionsTemplate `json:"action_options,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewApplyWorkspaceCommandOptions : Instantiate ApplyWorkspaceCommandOptions +func (*SchematicsV1) NewApplyWorkspaceCommandOptions(wID string, refreshToken string) *ApplyWorkspaceCommandOptions { + return &ApplyWorkspaceCommandOptions{ + WID: core.StringPtr(wID), + RefreshToken: core.StringPtr(refreshToken), + } +} + +// SetWID : Allow user to set WID +func (options *ApplyWorkspaceCommandOptions) SetWID(wID string) *ApplyWorkspaceCommandOptions { + options.WID = core.StringPtr(wID) + return options +} + +// SetRefreshToken : Allow user to set RefreshToken +func (options *ApplyWorkspaceCommandOptions) SetRefreshToken(refreshToken string) *ApplyWorkspaceCommandOptions { + options.RefreshToken = core.StringPtr(refreshToken) + return options +} + +// SetActionOptions : Allow user to set ActionOptions +func (options *ApplyWorkspaceCommandOptions) SetActionOptions(actionOptions *WorkspaceActivityOptionsTemplate) *ApplyWorkspaceCommandOptions { + options.ActionOptions = actionOptions + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ApplyWorkspaceCommandOptions) SetHeaders(param map[string]string) *ApplyWorkspaceCommandOptions { + options.Headers = param + return options +} + +// CatalogRef : CatalogRef -. +type CatalogRef struct { + // Dry run. + DryRun *bool `json:"dry_run,omitempty"` + + // Catalog item icon url. + ItemIconURL *string `json:"item_icon_url,omitempty"` + + // Catalog item id. + ItemID *string `json:"item_id,omitempty"` + + // Catalog item name. + ItemName *string `json:"item_name,omitempty"` + + // Catalog item readme url. + ItemReadmeURL *string `json:"item_readme_url,omitempty"` + + // Catalog item url. + ItemURL *string `json:"item_url,omitempty"` + + // Catalog item launch url. + LaunchURL *string `json:"launch_url,omitempty"` + + // Catalog item offering version. + OfferingVersion *string `json:"offering_version,omitempty"` +} + +// UnmarshalCatalogRef unmarshals an instance of CatalogRef from the specified map of raw messages. +func UnmarshalCatalogRef(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(CatalogRef) + err = core.UnmarshalPrimitive(m, "dry_run", &obj.DryRun) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "item_icon_url", &obj.ItemIconURL) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "item_id", &obj.ItemID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "item_name", &obj.ItemName) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "item_readme_url", &obj.ItemReadmeURL) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "item_url", &obj.ItemURL) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "launch_url", &obj.LaunchURL) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "offering_version", &obj.OfferingVersion) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// CreateActionOptions : The CreateAction options. +type CreateActionOptions struct { + // Action name (unique for an account). + Name *string `json:"name,omitempty"` + + // Action description. + Description *string `json:"description,omitempty"` + + // List of workspace locations supported by IBM Cloud Schematics service. Note, this does not limit the location of + // the resources provisioned using Schematics. + Location *string `json:"location,omitempty"` + + // Resource-group name for the Action. By default, Action will be created in Default Resource Group. + ResourceGroup *string `json:"resource_group,omitempty"` + + // Action tags. + Tags []string `json:"tags,omitempty"` + + // User defined status of the Schematics object. + UserState *UserState `json:"user_state,omitempty"` + + // URL of the README file, for the source. + SourceReadmeURL *string `json:"source_readme_url,omitempty"` + + // Source of templates, playbooks, or controls. + Source *ExternalSource `json:"source,omitempty"` + + // Type of source for the Template. + SourceType *string `json:"source_type,omitempty"` + + // Schematics job command parameter (playbook-name, capsule-name or flow-name). + CommandParameter *string `json:"command_parameter,omitempty"` + + // Complete Target details with user inputs and system generated data. + Bastion *TargetResourceset `json:"bastion,omitempty"` + + // Inventory of host and host group for the playbook, in .ini file format. + TargetsIni *string `json:"targets_ini,omitempty"` + + // credentials of the Action. + Credentials []VariableData `json:"credentials,omitempty"` + + // Input variables for the Action. + Inputs []VariableData `json:"inputs,omitempty"` + + // Output variables for the Action. + Outputs []VariableData `json:"outputs,omitempty"` + + // Environment variables for the Action. + Settings []VariableData `json:"settings,omitempty"` + + // Id to the Trigger. + TriggerRecordID *string `json:"trigger_record_id,omitempty"` + + // Computed state of the Action. + State *ActionState `json:"state,omitempty"` + + // System lock status. + SysLock *SystemLock `json:"sys_lock,omitempty"` + + // The github token associated with the GIT. Required for cloning of repo. + XGithubToken *string `json:"X-Github-token,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the CreateActionOptions.Location property. +// List of workspace locations supported by IBM Cloud Schematics service. Note, this does not limit the location of the +// resources provisioned using Schematics. +const ( + CreateActionOptions_Location_EuDe = "eu_de" + CreateActionOptions_Location_EuGb = "eu_gb" + CreateActionOptions_Location_UsEast = "us_east" + CreateActionOptions_Location_UsSouth = "us_south" +) + +// Constants associated with the CreateActionOptions.SourceType property. +// Type of source for the Template. +const ( + CreateActionOptions_SourceType_ExternalScm = "external_scm" + CreateActionOptions_SourceType_GitHub = "git_hub" + CreateActionOptions_SourceType_GitHubEnterprise = "git_hub_enterprise" + CreateActionOptions_SourceType_GitLab = "git_lab" + CreateActionOptions_SourceType_IbmCloudCatalog = "ibm_cloud_catalog" + CreateActionOptions_SourceType_IbmGitLab = "ibm_git_lab" + CreateActionOptions_SourceType_Local = "local" +) + +// NewCreateActionOptions : Instantiate CreateActionOptions +func (*SchematicsV1) NewCreateActionOptions() *CreateActionOptions { + return &CreateActionOptions{} +} + +// SetName : Allow user to set Name +func (options *CreateActionOptions) SetName(name string) *CreateActionOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetDescription : Allow user to set Description +func (options *CreateActionOptions) SetDescription(description string) *CreateActionOptions { + options.Description = core.StringPtr(description) + return options +} + +// SetLocation : Allow user to set Location +func (options *CreateActionOptions) SetLocation(location string) *CreateActionOptions { + options.Location = core.StringPtr(location) + return options +} + +// SetResourceGroup : Allow user to set ResourceGroup +func (options *CreateActionOptions) SetResourceGroup(resourceGroup string) *CreateActionOptions { + options.ResourceGroup = core.StringPtr(resourceGroup) + return options +} + +// SetTags : Allow user to set Tags +func (options *CreateActionOptions) SetTags(tags []string) *CreateActionOptions { + options.Tags = tags + return options +} + +// SetUserState : Allow user to set UserState +func (options *CreateActionOptions) SetUserState(userState *UserState) *CreateActionOptions { + options.UserState = userState + return options +} + +// SetSourceReadmeURL : Allow user to set SourceReadmeURL +func (options *CreateActionOptions) SetSourceReadmeURL(sourceReadmeURL string) *CreateActionOptions { + options.SourceReadmeURL = core.StringPtr(sourceReadmeURL) + return options +} + +// SetSource : Allow user to set Source +func (options *CreateActionOptions) SetSource(source *ExternalSource) *CreateActionOptions { + options.Source = source + return options +} + +// SetSourceType : Allow user to set SourceType +func (options *CreateActionOptions) SetSourceType(sourceType string) *CreateActionOptions { + options.SourceType = core.StringPtr(sourceType) + return options +} + +// SetCommandParameter : Allow user to set CommandParameter +func (options *CreateActionOptions) SetCommandParameter(commandParameter string) *CreateActionOptions { + options.CommandParameter = core.StringPtr(commandParameter) + return options +} + +// SetBastion : Allow user to set Bastion +func (options *CreateActionOptions) SetBastion(bastion *TargetResourceset) *CreateActionOptions { + options.Bastion = bastion + return options +} + +// SetTargetsIni : Allow user to set TargetsIni +func (options *CreateActionOptions) SetTargetsIni(targetsIni string) *CreateActionOptions { + options.TargetsIni = core.StringPtr(targetsIni) + return options +} + +// SetCredentials : Allow user to set Credentials +func (options *CreateActionOptions) SetCredentials(credentials []VariableData) *CreateActionOptions { + options.Credentials = credentials + return options +} + +// SetInputs : Allow user to set Inputs +func (options *CreateActionOptions) SetInputs(inputs []VariableData) *CreateActionOptions { + options.Inputs = inputs + return options +} + +// SetOutputs : Allow user to set Outputs +func (options *CreateActionOptions) SetOutputs(outputs []VariableData) *CreateActionOptions { + options.Outputs = outputs + return options +} + +// SetSettings : Allow user to set Settings +func (options *CreateActionOptions) SetSettings(settings []VariableData) *CreateActionOptions { + options.Settings = settings + return options +} + +// SetTriggerRecordID : Allow user to set TriggerRecordID +func (options *CreateActionOptions) SetTriggerRecordID(triggerRecordID string) *CreateActionOptions { + options.TriggerRecordID = core.StringPtr(triggerRecordID) + return options +} + +// SetState : Allow user to set State +func (options *CreateActionOptions) SetState(state *ActionState) *CreateActionOptions { + options.State = state + return options +} + +// SetSysLock : Allow user to set SysLock +func (options *CreateActionOptions) SetSysLock(sysLock *SystemLock) *CreateActionOptions { + options.SysLock = sysLock + return options +} + +// SetXGithubToken : Allow user to set XGithubToken +func (options *CreateActionOptions) SetXGithubToken(xGithubToken string) *CreateActionOptions { + options.XGithubToken = core.StringPtr(xGithubToken) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateActionOptions) SetHeaders(param map[string]string) *CreateActionOptions { + options.Headers = param + return options +} + +// CreateJobOptions : The CreateJob options. +type CreateJobOptions struct { + // The IAM refresh token associated with the IBM Cloud account. + RefreshToken *string `json:"refresh_token" validate:"required"` + + // Name of the Schematics automation resource. + CommandObject *string `json:"command_object,omitempty"` + + // Job command object id (workspace-id, action-id or control-id). + CommandObjectID *string `json:"command_object_id,omitempty"` + + // Schematics job command name. + CommandName *string `json:"command_name,omitempty"` + + // Schematics job command parameter (playbook-name, capsule-name or flow-name). + CommandParameter *string `json:"command_parameter,omitempty"` + + // Command line options for the command. + CommandOptions []string `json:"command_options,omitempty"` + + // Job inputs used by Action. + Inputs []VariableData `json:"inputs,omitempty"` + + // Environment variables used by the Job while performing Action. + Settings []VariableData `json:"settings,omitempty"` + + // User defined tags, while running the job. + Tags []string `json:"tags,omitempty"` + + // List of workspace locations supported by IBM Cloud Schematics service. Note, this does not limit the location of + // the resources provisioned using Schematics. + Location *string `json:"location,omitempty"` + + // Job Status. + Status *JobStatus `json:"status,omitempty"` + + // Job data. + Data *JobData `json:"data,omitempty"` + + // Complete Target details with user inputs and system generated data. + Bastion *TargetResourceset `json:"bastion,omitempty"` + + // Job log summary record. + LogSummary *JobLogSummary `json:"log_summary,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the CreateJobOptions.CommandObject property. +// Name of the Schematics automation resource. +const ( + CreateJobOptions_CommandObject_Action = "action" + CreateJobOptions_CommandObject_Workspace = "workspace" +) + +// Constants associated with the CreateJobOptions.CommandName property. +// Schematics job command name. +const ( + CreateJobOptions_CommandName_AnsiblePlaybookCheck = "ansible_playbook_check" + CreateJobOptions_CommandName_AnsiblePlaybookRun = "ansible_playbook_run" + CreateJobOptions_CommandName_HelmInstall = "helm_install" + CreateJobOptions_CommandName_HelmList = "helm_list" + CreateJobOptions_CommandName_HelmShow = "helm_show" + CreateJobOptions_CommandName_OpaEvaluate = "opa_evaluate" + CreateJobOptions_CommandName_TerraformInit = "terraform_init" + CreateJobOptions_CommandName_TerrformApply = "terrform_apply" + CreateJobOptions_CommandName_TerrformDestroy = "terrform_destroy" + CreateJobOptions_CommandName_TerrformPlan = "terrform_plan" + CreateJobOptions_CommandName_TerrformRefresh = "terrform_refresh" + CreateJobOptions_CommandName_TerrformShow = "terrform_show" + CreateJobOptions_CommandName_TerrformTaint = "terrform_taint" + CreateJobOptions_CommandName_WorkspaceApplyFlow = "workspace_apply_flow" + CreateJobOptions_CommandName_WorkspaceCustomFlow = "workspace_custom_flow" + CreateJobOptions_CommandName_WorkspaceDestroyFlow = "workspace_destroy_flow" + CreateJobOptions_CommandName_WorkspaceInitFlow = "workspace_init_flow" + CreateJobOptions_CommandName_WorkspacePlanFlow = "workspace_plan_flow" + CreateJobOptions_CommandName_WorkspaceRefreshFlow = "workspace_refresh_flow" + CreateJobOptions_CommandName_WorkspaceShowFlow = "workspace_show_flow" +) + +// Constants associated with the CreateJobOptions.Location property. +// List of workspace locations supported by IBM Cloud Schematics service. Note, this does not limit the location of the +// resources provisioned using Schematics. +const ( + CreateJobOptions_Location_EuDe = "eu_de" + CreateJobOptions_Location_EuGb = "eu_gb" + CreateJobOptions_Location_UsEast = "us_east" + CreateJobOptions_Location_UsSouth = "us_south" +) + +// NewCreateJobOptions : Instantiate CreateJobOptions +func (*SchematicsV1) NewCreateJobOptions(refreshToken string) *CreateJobOptions { + return &CreateJobOptions{ + RefreshToken: core.StringPtr(refreshToken), + } +} + +// SetRefreshToken : Allow user to set RefreshToken +func (options *CreateJobOptions) SetRefreshToken(refreshToken string) *CreateJobOptions { + options.RefreshToken = core.StringPtr(refreshToken) + return options +} + +// SetCommandObject : Allow user to set CommandObject +func (options *CreateJobOptions) SetCommandObject(commandObject string) *CreateJobOptions { + options.CommandObject = core.StringPtr(commandObject) + return options +} + +// SetCommandObjectID : Allow user to set CommandObjectID +func (options *CreateJobOptions) SetCommandObjectID(commandObjectID string) *CreateJobOptions { + options.CommandObjectID = core.StringPtr(commandObjectID) + return options +} + +// SetCommandName : Allow user to set CommandName +func (options *CreateJobOptions) SetCommandName(commandName string) *CreateJobOptions { + options.CommandName = core.StringPtr(commandName) + return options +} + +// SetCommandParameter : Allow user to set CommandParameter +func (options *CreateJobOptions) SetCommandParameter(commandParameter string) *CreateJobOptions { + options.CommandParameter = core.StringPtr(commandParameter) + return options +} + +// SetCommandOptions : Allow user to set CommandOptions +func (options *CreateJobOptions) SetCommandOptions(commandOptions []string) *CreateJobOptions { + options.CommandOptions = commandOptions + return options +} + +// SetInputs : Allow user to set Inputs +func (options *CreateJobOptions) SetInputs(inputs []VariableData) *CreateJobOptions { + options.Inputs = inputs + return options +} + +// SetSettings : Allow user to set Settings +func (options *CreateJobOptions) SetSettings(settings []VariableData) *CreateJobOptions { + options.Settings = settings + return options +} + +// SetTags : Allow user to set Tags +func (options *CreateJobOptions) SetTags(tags []string) *CreateJobOptions { + options.Tags = tags + return options +} + +// SetLocation : Allow user to set Location +func (options *CreateJobOptions) SetLocation(location string) *CreateJobOptions { + options.Location = core.StringPtr(location) + return options +} + +// SetStatus : Allow user to set Status +func (options *CreateJobOptions) SetStatus(status *JobStatus) *CreateJobOptions { + options.Status = status + return options +} + +// SetData : Allow user to set Data +func (options *CreateJobOptions) SetData(data *JobData) *CreateJobOptions { + options.Data = data + return options +} + +// SetBastion : Allow user to set Bastion +func (options *CreateJobOptions) SetBastion(bastion *TargetResourceset) *CreateJobOptions { + options.Bastion = bastion + return options +} + +// SetLogSummary : Allow user to set LogSummary +func (options *CreateJobOptions) SetLogSummary(logSummary *JobLogSummary) *CreateJobOptions { + options.LogSummary = logSummary + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateJobOptions) SetHeaders(param map[string]string) *CreateJobOptions { + options.Headers = param + return options +} + +// CreateSharedDatasetOptions : The CreateSharedDataset options. +type CreateSharedDatasetOptions struct { + // Automatically propagate changes to consumers. + AutoPropagateChange *bool `json:"auto_propagate_change,omitempty"` + + // Dataset description. + Description *string `json:"description,omitempty"` + + // Affected workspaces. + EffectedWorkspaceIds []string `json:"effected_workspace_ids,omitempty"` + + // Resource group name. + ResourceGroup *string `json:"resource_group,omitempty"` + + // Shared dataset data. + SharedDatasetData []SharedDatasetData `json:"shared_dataset_data,omitempty"` + + // Shared dataset name. + SharedDatasetName *string `json:"shared_dataset_name,omitempty"` + + // Shared dataset source name. + SharedDatasetSourceName *string `json:"shared_dataset_source_name,omitempty"` + + // Shared dataset type. + SharedDatasetType []string `json:"shared_dataset_type,omitempty"` + + // Shared dataset tags. + Tags []string `json:"tags,omitempty"` + + // Shared dataset version. + Version *string `json:"version,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewCreateSharedDatasetOptions : Instantiate CreateSharedDatasetOptions +func (*SchematicsV1) NewCreateSharedDatasetOptions() *CreateSharedDatasetOptions { + return &CreateSharedDatasetOptions{} +} + +// SetAutoPropagateChange : Allow user to set AutoPropagateChange +func (options *CreateSharedDatasetOptions) SetAutoPropagateChange(autoPropagateChange bool) *CreateSharedDatasetOptions { + options.AutoPropagateChange = core.BoolPtr(autoPropagateChange) + return options +} + +// SetDescription : Allow user to set Description +func (options *CreateSharedDatasetOptions) SetDescription(description string) *CreateSharedDatasetOptions { + options.Description = core.StringPtr(description) + return options +} + +// SetEffectedWorkspaceIds : Allow user to set EffectedWorkspaceIds +func (options *CreateSharedDatasetOptions) SetEffectedWorkspaceIds(effectedWorkspaceIds []string) *CreateSharedDatasetOptions { + options.EffectedWorkspaceIds = effectedWorkspaceIds + return options +} + +// SetResourceGroup : Allow user to set ResourceGroup +func (options *CreateSharedDatasetOptions) SetResourceGroup(resourceGroup string) *CreateSharedDatasetOptions { + options.ResourceGroup = core.StringPtr(resourceGroup) + return options +} + +// SetSharedDatasetData : Allow user to set SharedDatasetData +func (options *CreateSharedDatasetOptions) SetSharedDatasetData(sharedDatasetData []SharedDatasetData) *CreateSharedDatasetOptions { + options.SharedDatasetData = sharedDatasetData + return options +} + +// SetSharedDatasetName : Allow user to set SharedDatasetName +func (options *CreateSharedDatasetOptions) SetSharedDatasetName(sharedDatasetName string) *CreateSharedDatasetOptions { + options.SharedDatasetName = core.StringPtr(sharedDatasetName) + return options +} + +// SetSharedDatasetSourceName : Allow user to set SharedDatasetSourceName +func (options *CreateSharedDatasetOptions) SetSharedDatasetSourceName(sharedDatasetSourceName string) *CreateSharedDatasetOptions { + options.SharedDatasetSourceName = core.StringPtr(sharedDatasetSourceName) + return options +} + +// SetSharedDatasetType : Allow user to set SharedDatasetType +func (options *CreateSharedDatasetOptions) SetSharedDatasetType(sharedDatasetType []string) *CreateSharedDatasetOptions { + options.SharedDatasetType = sharedDatasetType + return options +} + +// SetTags : Allow user to set Tags +func (options *CreateSharedDatasetOptions) SetTags(tags []string) *CreateSharedDatasetOptions { + options.Tags = tags + return options +} + +// SetVersion : Allow user to set Version +func (options *CreateSharedDatasetOptions) SetVersion(version string) *CreateSharedDatasetOptions { + options.Version = core.StringPtr(version) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateSharedDatasetOptions) SetHeaders(param map[string]string) *CreateSharedDatasetOptions { + options.Headers = param + return options +} + +// CreateWorkspaceDeletionJobOptions : The CreateWorkspaceDeletionJob options. +type CreateWorkspaceDeletionJobOptions struct { + // The IAM refresh token associated with the IBM Cloud account. + RefreshToken *string `json:"refresh_token" validate:"required"` + + // True to delete workspace. + NewDeleteWorkspaces *bool `json:"new_delete_workspaces,omitempty"` + + // True to destroy the resources managed by this workspace. + NewDestroyResources *bool `json:"new_destroy_resources,omitempty"` + + // Workspace deletion job name. + NewJob *string `json:"new_job,omitempty"` + + // Version. + NewVersion *string `json:"new_version,omitempty"` + + // List of workspaces to be deleted. + NewWorkspaces []string `json:"new_workspaces,omitempty"` + + // true or 1 - to destroy resources before deleting workspace; If this is true, refresh_token is mandatory. + DestroyResources *string `json:"destroy_resources,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewCreateWorkspaceDeletionJobOptions : Instantiate CreateWorkspaceDeletionJobOptions +func (*SchematicsV1) NewCreateWorkspaceDeletionJobOptions(refreshToken string) *CreateWorkspaceDeletionJobOptions { + return &CreateWorkspaceDeletionJobOptions{ + RefreshToken: core.StringPtr(refreshToken), + } +} + +// SetRefreshToken : Allow user to set RefreshToken +func (options *CreateWorkspaceDeletionJobOptions) SetRefreshToken(refreshToken string) *CreateWorkspaceDeletionJobOptions { + options.RefreshToken = core.StringPtr(refreshToken) + return options +} + +// SetNewDeleteWorkspaces : Allow user to set NewDeleteWorkspaces +func (options *CreateWorkspaceDeletionJobOptions) SetNewDeleteWorkspaces(newDeleteWorkspaces bool) *CreateWorkspaceDeletionJobOptions { + options.NewDeleteWorkspaces = core.BoolPtr(newDeleteWorkspaces) + return options +} + +// SetNewDestroyResources : Allow user to set NewDestroyResources +func (options *CreateWorkspaceDeletionJobOptions) SetNewDestroyResources(newDestroyResources bool) *CreateWorkspaceDeletionJobOptions { + options.NewDestroyResources = core.BoolPtr(newDestroyResources) + return options +} + +// SetNewJob : Allow user to set NewJob +func (options *CreateWorkspaceDeletionJobOptions) SetNewJob(newJob string) *CreateWorkspaceDeletionJobOptions { + options.NewJob = core.StringPtr(newJob) + return options +} + +// SetNewVersion : Allow user to set NewVersion +func (options *CreateWorkspaceDeletionJobOptions) SetNewVersion(newVersion string) *CreateWorkspaceDeletionJobOptions { + options.NewVersion = core.StringPtr(newVersion) + return options +} + +// SetNewWorkspaces : Allow user to set NewWorkspaces +func (options *CreateWorkspaceDeletionJobOptions) SetNewWorkspaces(newWorkspaces []string) *CreateWorkspaceDeletionJobOptions { + options.NewWorkspaces = newWorkspaces + return options +} + +// SetDestroyResources : Allow user to set DestroyResources +func (options *CreateWorkspaceDeletionJobOptions) SetDestroyResources(destroyResources string) *CreateWorkspaceDeletionJobOptions { + options.DestroyResources = core.StringPtr(destroyResources) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateWorkspaceDeletionJobOptions) SetHeaders(param map[string]string) *CreateWorkspaceDeletionJobOptions { + options.Headers = param + return options +} + +// CreateWorkspaceOptions : The CreateWorkspace options. +type CreateWorkspaceOptions struct { + // List of applied shared dataset id. + AppliedShareddataIds []string `json:"applied_shareddata_ids,omitempty"` + + // CatalogRef -. + CatalogRef *CatalogRef `json:"catalog_ref,omitempty"` + + // Workspace description. + Description *string `json:"description,omitempty"` + + // Workspace location. + Location *string `json:"location,omitempty"` + + // Workspace name. + Name *string `json:"name,omitempty"` + + // Workspace resource group. + ResourceGroup *string `json:"resource_group,omitempty"` + + // SharedTargetData -. + SharedData *SharedTargetData `json:"shared_data,omitempty"` + + // Workspace tags. + Tags []string `json:"tags,omitempty"` + + // TemplateData -. + TemplateData []TemplateSourceDataRequest `json:"template_data,omitempty"` + + // Workspace template ref. + TemplateRef *string `json:"template_ref,omitempty"` + + // TemplateRepoRequest -. + TemplateRepo *TemplateRepoRequest `json:"template_repo,omitempty"` + + // List of Workspace type. + Type []string `json:"type,omitempty"` + + // WorkspaceStatusRequest -. + WorkspaceStatus *WorkspaceStatusRequest `json:"workspace_status,omitempty"` + + // The github token associated with the GIT. Required for cloning of repo. + XGithubToken *string `json:"X-Github-token,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewCreateWorkspaceOptions : Instantiate CreateWorkspaceOptions +func (*SchematicsV1) NewCreateWorkspaceOptions() *CreateWorkspaceOptions { + return &CreateWorkspaceOptions{} +} + +// SetAppliedShareddataIds : Allow user to set AppliedShareddataIds +func (options *CreateWorkspaceOptions) SetAppliedShareddataIds(appliedShareddataIds []string) *CreateWorkspaceOptions { + options.AppliedShareddataIds = appliedShareddataIds + return options +} + +// SetCatalogRef : Allow user to set CatalogRef +func (options *CreateWorkspaceOptions) SetCatalogRef(catalogRef *CatalogRef) *CreateWorkspaceOptions { + options.CatalogRef = catalogRef + return options +} + +// SetDescription : Allow user to set Description +func (options *CreateWorkspaceOptions) SetDescription(description string) *CreateWorkspaceOptions { + options.Description = core.StringPtr(description) + return options +} + +// SetLocation : Allow user to set Location +func (options *CreateWorkspaceOptions) SetLocation(location string) *CreateWorkspaceOptions { + options.Location = core.StringPtr(location) + return options +} + +// SetName : Allow user to set Name +func (options *CreateWorkspaceOptions) SetName(name string) *CreateWorkspaceOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetResourceGroup : Allow user to set ResourceGroup +func (options *CreateWorkspaceOptions) SetResourceGroup(resourceGroup string) *CreateWorkspaceOptions { + options.ResourceGroup = core.StringPtr(resourceGroup) + return options +} + +// SetSharedData : Allow user to set SharedData +func (options *CreateWorkspaceOptions) SetSharedData(sharedData *SharedTargetData) *CreateWorkspaceOptions { + options.SharedData = sharedData + return options +} + +// SetTags : Allow user to set Tags +func (options *CreateWorkspaceOptions) SetTags(tags []string) *CreateWorkspaceOptions { + options.Tags = tags + return options +} + +// SetTemplateData : Allow user to set TemplateData +func (options *CreateWorkspaceOptions) SetTemplateData(templateData []TemplateSourceDataRequest) *CreateWorkspaceOptions { + options.TemplateData = templateData + return options +} + +// SetTemplateRef : Allow user to set TemplateRef +func (options *CreateWorkspaceOptions) SetTemplateRef(templateRef string) *CreateWorkspaceOptions { + options.TemplateRef = core.StringPtr(templateRef) + return options +} + +// SetTemplateRepo : Allow user to set TemplateRepo +func (options *CreateWorkspaceOptions) SetTemplateRepo(templateRepo *TemplateRepoRequest) *CreateWorkspaceOptions { + options.TemplateRepo = templateRepo + return options +} + +// SetType : Allow user to set Type +func (options *CreateWorkspaceOptions) SetType(typeVar []string) *CreateWorkspaceOptions { + options.Type = typeVar + return options +} + +// SetWorkspaceStatus : Allow user to set WorkspaceStatus +func (options *CreateWorkspaceOptions) SetWorkspaceStatus(workspaceStatus *WorkspaceStatusRequest) *CreateWorkspaceOptions { + options.WorkspaceStatus = workspaceStatus + return options +} + +// SetXGithubToken : Allow user to set XGithubToken +func (options *CreateWorkspaceOptions) SetXGithubToken(xGithubToken string) *CreateWorkspaceOptions { + options.XGithubToken = core.StringPtr(xGithubToken) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateWorkspaceOptions) SetHeaders(param map[string]string) *CreateWorkspaceOptions { + options.Headers = param + return options +} + +// DeleteActionOptions : The DeleteAction options. +type DeleteActionOptions struct { + // Action Id. Use GET /actions API to look up the Action Ids in your IBM Cloud account. + ActionID *string `json:"action_id" validate:"required,ne="` + + // Equivalent to -force options in the command line. + Force *bool `json:"force,omitempty"` + + // Auto propagate the chaange or deletion to the dependent resources. + Propagate *bool `json:"propagate,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteActionOptions : Instantiate DeleteActionOptions +func (*SchematicsV1) NewDeleteActionOptions(actionID string) *DeleteActionOptions { + return &DeleteActionOptions{ + ActionID: core.StringPtr(actionID), + } +} + +// SetActionID : Allow user to set ActionID +func (options *DeleteActionOptions) SetActionID(actionID string) *DeleteActionOptions { + options.ActionID = core.StringPtr(actionID) + return options +} + +// SetForce : Allow user to set Force +func (options *DeleteActionOptions) SetForce(force bool) *DeleteActionOptions { + options.Force = core.BoolPtr(force) + return options +} + +// SetPropagate : Allow user to set Propagate +func (options *DeleteActionOptions) SetPropagate(propagate bool) *DeleteActionOptions { + options.Propagate = core.BoolPtr(propagate) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteActionOptions) SetHeaders(param map[string]string) *DeleteActionOptions { + options.Headers = param + return options +} + +// DeleteJobOptions : The DeleteJob options. +type DeleteJobOptions struct { + // Job Id. Use GET /jobs API to look up the Job Ids in your IBM Cloud account. + JobID *string `json:"job_id" validate:"required,ne="` + + // The IAM refresh token associated with the IBM Cloud account. + RefreshToken *string `json:"refresh_token" validate:"required"` + + // Equivalent to -force options in the command line. + Force *bool `json:"force,omitempty"` + + // Auto propagate the chaange or deletion to the dependent resources. + Propagate *bool `json:"propagate,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteJobOptions : Instantiate DeleteJobOptions +func (*SchematicsV1) NewDeleteJobOptions(jobID string, refreshToken string) *DeleteJobOptions { + return &DeleteJobOptions{ + JobID: core.StringPtr(jobID), + RefreshToken: core.StringPtr(refreshToken), + } +} + +// SetJobID : Allow user to set JobID +func (options *DeleteJobOptions) SetJobID(jobID string) *DeleteJobOptions { + options.JobID = core.StringPtr(jobID) + return options +} + +// SetRefreshToken : Allow user to set RefreshToken +func (options *DeleteJobOptions) SetRefreshToken(refreshToken string) *DeleteJobOptions { + options.RefreshToken = core.StringPtr(refreshToken) + return options +} + +// SetForce : Allow user to set Force +func (options *DeleteJobOptions) SetForce(force bool) *DeleteJobOptions { + options.Force = core.BoolPtr(force) + return options +} + +// SetPropagate : Allow user to set Propagate +func (options *DeleteJobOptions) SetPropagate(propagate bool) *DeleteJobOptions { + options.Propagate = core.BoolPtr(propagate) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteJobOptions) SetHeaders(param map[string]string) *DeleteJobOptions { + options.Headers = param + return options +} + +// DeleteSharedDatasetOptions : The DeleteSharedDataset options. +type DeleteSharedDatasetOptions struct { + // The shared dataset ID Use the GET /shared_datasets to look up the shared dataset IDs in your IBM Cloud account. + SdID *string `json:"sd_id" validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteSharedDatasetOptions : Instantiate DeleteSharedDatasetOptions +func (*SchematicsV1) NewDeleteSharedDatasetOptions(sdID string) *DeleteSharedDatasetOptions { + return &DeleteSharedDatasetOptions{ + SdID: core.StringPtr(sdID), + } +} + +// SetSdID : Allow user to set SdID +func (options *DeleteSharedDatasetOptions) SetSdID(sdID string) *DeleteSharedDatasetOptions { + options.SdID = core.StringPtr(sdID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteSharedDatasetOptions) SetHeaders(param map[string]string) *DeleteSharedDatasetOptions { + options.Headers = param + return options +} + +// DeleteWorkspaceActivityOptions : The DeleteWorkspaceActivity options. +type DeleteWorkspaceActivityOptions struct { + // The workspace ID for the workspace that you want to query. You can run the GET /workspaces call if you need to look + // up the workspace IDs in your IBM Cloud account. + WID *string `json:"w_id" validate:"required,ne="` + + // The activity ID that you want to see additional details. + ActivityID *string `json:"activity_id" validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteWorkspaceActivityOptions : Instantiate DeleteWorkspaceActivityOptions +func (*SchematicsV1) NewDeleteWorkspaceActivityOptions(wID string, activityID string) *DeleteWorkspaceActivityOptions { + return &DeleteWorkspaceActivityOptions{ + WID: core.StringPtr(wID), + ActivityID: core.StringPtr(activityID), + } +} + +// SetWID : Allow user to set WID +func (options *DeleteWorkspaceActivityOptions) SetWID(wID string) *DeleteWorkspaceActivityOptions { + options.WID = core.StringPtr(wID) + return options +} + +// SetActivityID : Allow user to set ActivityID +func (options *DeleteWorkspaceActivityOptions) SetActivityID(activityID string) *DeleteWorkspaceActivityOptions { + options.ActivityID = core.StringPtr(activityID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteWorkspaceActivityOptions) SetHeaders(param map[string]string) *DeleteWorkspaceActivityOptions { + options.Headers = param + return options +} + +// DeleteWorkspaceOptions : The DeleteWorkspace options. +type DeleteWorkspaceOptions struct { + // The workspace ID for the workspace that you want to query. You can run the GET /workspaces call if you need to look + // up the workspace IDs in your IBM Cloud account. + WID *string `json:"w_id" validate:"required,ne="` + + // The IAM refresh token associated with the IBM Cloud account. + RefreshToken *string `json:"refresh_token" validate:"required"` + + // true or 1 - to destroy resources before deleting workspace; If this is true, refresh_token is mandatory. + DestroyResources *string `json:"destroy_resources,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteWorkspaceOptions : Instantiate DeleteWorkspaceOptions +func (*SchematicsV1) NewDeleteWorkspaceOptions(wID string, refreshToken string) *DeleteWorkspaceOptions { + return &DeleteWorkspaceOptions{ + WID: core.StringPtr(wID), + RefreshToken: core.StringPtr(refreshToken), + } +} + +// SetWID : Allow user to set WID +func (options *DeleteWorkspaceOptions) SetWID(wID string) *DeleteWorkspaceOptions { + options.WID = core.StringPtr(wID) + return options +} + +// SetRefreshToken : Allow user to set RefreshToken +func (options *DeleteWorkspaceOptions) SetRefreshToken(refreshToken string) *DeleteWorkspaceOptions { + options.RefreshToken = core.StringPtr(refreshToken) + return options +} + +// SetDestroyResources : Allow user to set DestroyResources +func (options *DeleteWorkspaceOptions) SetDestroyResources(destroyResources string) *DeleteWorkspaceOptions { + options.DestroyResources = core.StringPtr(destroyResources) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteWorkspaceOptions) SetHeaders(param map[string]string) *DeleteWorkspaceOptions { + options.Headers = param + return options +} + +// DestroyWorkspaceCommandOptions : The DestroyWorkspaceCommand options. +type DestroyWorkspaceCommandOptions struct { + // The workspace ID for the workspace that you want to query. You can run the GET /workspaces call if you need to look + // up the workspace IDs in your IBM Cloud account. + WID *string `json:"w_id" validate:"required,ne="` + + // The IAM refresh token associated with the IBM Cloud account. + RefreshToken *string `json:"refresh_token" validate:"required"` + + // Action Options Template ... + ActionOptions *WorkspaceActivityOptionsTemplate `json:"action_options,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDestroyWorkspaceCommandOptions : Instantiate DestroyWorkspaceCommandOptions +func (*SchematicsV1) NewDestroyWorkspaceCommandOptions(wID string, refreshToken string) *DestroyWorkspaceCommandOptions { + return &DestroyWorkspaceCommandOptions{ + WID: core.StringPtr(wID), + RefreshToken: core.StringPtr(refreshToken), + } +} + +// SetWID : Allow user to set WID +func (options *DestroyWorkspaceCommandOptions) SetWID(wID string) *DestroyWorkspaceCommandOptions { + options.WID = core.StringPtr(wID) + return options +} + +// SetRefreshToken : Allow user to set RefreshToken +func (options *DestroyWorkspaceCommandOptions) SetRefreshToken(refreshToken string) *DestroyWorkspaceCommandOptions { + options.RefreshToken = core.StringPtr(refreshToken) + return options +} + +// SetActionOptions : Allow user to set ActionOptions +func (options *DestroyWorkspaceCommandOptions) SetActionOptions(actionOptions *WorkspaceActivityOptionsTemplate) *DestroyWorkspaceCommandOptions { + options.ActionOptions = actionOptions + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DestroyWorkspaceCommandOptions) SetHeaders(param map[string]string) *DestroyWorkspaceCommandOptions { + options.Headers = param + return options +} + +// EnvVariableResponse : EnvVariableResponse -. +type EnvVariableResponse struct { + // Env variable is hidden. + Hidden *bool `json:"hidden,omitempty"` + + // Env variable name. + Name *string `json:"name,omitempty"` + + // Env variable is secure. + Secure *bool `json:"secure,omitempty"` + + // Value for env variable. + Value *string `json:"value,omitempty"` +} + +// UnmarshalEnvVariableResponse unmarshals an instance of EnvVariableResponse from the specified map of raw messages. +func UnmarshalEnvVariableResponse(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(EnvVariableResponse) + err = core.UnmarshalPrimitive(m, "hidden", &obj.Hidden) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "secure", &obj.Secure) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ExternalSource : Source of templates, playbooks, or controls. +type ExternalSource struct { + // Type of source for the Template. + SourceType *string `json:"source_type" validate:"required"` + + // Connection details to Git source. + Git *ExternalSourceGit `json:"git,omitempty"` +} + +// Constants associated with the ExternalSource.SourceType property. +// Type of source for the Template. +const ( + ExternalSource_SourceType_ExternalScm = "external_scm" + ExternalSource_SourceType_GitHub = "git_hub" + ExternalSource_SourceType_GitHubEnterprise = "git_hub_enterprise" + ExternalSource_SourceType_GitLab = "git_lab" + ExternalSource_SourceType_IbmCloudCatalog = "ibm_cloud_catalog" + ExternalSource_SourceType_IbmGitLab = "ibm_git_lab" + ExternalSource_SourceType_Local = "local" +) + +// NewExternalSource : Instantiate ExternalSource (Generic Model Constructor) +func (*SchematicsV1) NewExternalSource(sourceType string) (model *ExternalSource, err error) { + model = &ExternalSource{ + SourceType: core.StringPtr(sourceType), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalExternalSource unmarshals an instance of ExternalSource from the specified map of raw messages. +func UnmarshalExternalSource(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ExternalSource) + err = core.UnmarshalPrimitive(m, "source_type", &obj.SourceType) + if err != nil { + return + } + err = core.UnmarshalModel(m, "git", &obj.Git, UnmarshalExternalSourceGit) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ExternalSourceGit : Connection details to Git source. +type ExternalSourceGit struct { + // URL to the GIT Repo that can be used to clone the template. + GitRepoURL *string `json:"git_repo_url,omitempty"` + + // Personal Access Token to connect to Git URLs. + GitToken *string `json:"git_token,omitempty"` + + // Name of the folder in the Git Repo, that contains the template. + GitRepoFolder *string `json:"git_repo_folder,omitempty"` + + // Name of the release tag, used to fetch the Git Repo. + GitRelease *string `json:"git_release,omitempty"` + + // Name of the branch, used to fetch the Git Repo. + GitBranch *string `json:"git_branch,omitempty"` +} + +// UnmarshalExternalSourceGit unmarshals an instance of ExternalSourceGit from the specified map of raw messages. +func UnmarshalExternalSourceGit(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ExternalSourceGit) + err = core.UnmarshalPrimitive(m, "git_repo_url", &obj.GitRepoURL) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "git_token", &obj.GitToken) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "git_repo_folder", &obj.GitRepoFolder) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "git_release", &obj.GitRelease) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "git_branch", &obj.GitBranch) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// GetActionOptions : The GetAction options. +type GetActionOptions struct { + // Action Id. Use GET /actions API to look up the Action Ids in your IBM Cloud account. + ActionID *string `json:"action_id" validate:"required,ne="` + + // Level of details returned by the get method. + Profile *string `json:"profile,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the GetActionOptions.Profile property. +// Level of details returned by the get method. +const ( + GetActionOptions_Profile_Detailed = "detailed" + GetActionOptions_Profile_Summary = "summary" +) + +// NewGetActionOptions : Instantiate GetActionOptions +func (*SchematicsV1) NewGetActionOptions(actionID string) *GetActionOptions { + return &GetActionOptions{ + ActionID: core.StringPtr(actionID), + } +} + +// SetActionID : Allow user to set ActionID +func (options *GetActionOptions) SetActionID(actionID string) *GetActionOptions { + options.ActionID = core.StringPtr(actionID) + return options +} + +// SetProfile : Allow user to set Profile +func (options *GetActionOptions) SetProfile(profile string) *GetActionOptions { + options.Profile = core.StringPtr(profile) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetActionOptions) SetHeaders(param map[string]string) *GetActionOptions { + options.Headers = param + return options +} + +// GetAllWorkspaceInputsOptions : The GetAllWorkspaceInputs options. +type GetAllWorkspaceInputsOptions struct { + // The workspace ID for the workspace that you want to query. You can run the GET /workspaces call if you need to look + // up the workspace IDs in your IBM Cloud account. + WID *string `json:"w_id" validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetAllWorkspaceInputsOptions : Instantiate GetAllWorkspaceInputsOptions +func (*SchematicsV1) NewGetAllWorkspaceInputsOptions(wID string) *GetAllWorkspaceInputsOptions { + return &GetAllWorkspaceInputsOptions{ + WID: core.StringPtr(wID), + } +} + +// SetWID : Allow user to set WID +func (options *GetAllWorkspaceInputsOptions) SetWID(wID string) *GetAllWorkspaceInputsOptions { + options.WID = core.StringPtr(wID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetAllWorkspaceInputsOptions) SetHeaders(param map[string]string) *GetAllWorkspaceInputsOptions { + options.Headers = param + return options +} + +// GetDiscoveredKmsInstancesOptions : The GetDiscoveredKmsInstances options. +type GetDiscoveredKmsInstancesOptions struct { + // The encryption scheme to be used. + EncryptionScheme *string `json:"encryption_scheme" validate:"required"` + + // The location of the Resource. + Location *string `json:"location" validate:"required"` + + // The resource group (by default, fetch from all resource groups). + ResourceGroup *string `json:"resource_group,omitempty"` + + // The numbers of items to return. + Limit *int64 `json:"limit,omitempty"` + + // Name of the field to sort-by; Use the '.' character to delineate sub-resources and sub-fields (eg. + // owner.last_name). Prepend the field with '+' or '-', indicating 'ascending' or 'descending' (default is ascending) + // Ignore unrecognized or unsupported sort field. + Sort *string `json:"sort,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetDiscoveredKmsInstancesOptions : Instantiate GetDiscoveredKmsInstancesOptions +func (*SchematicsV1) NewGetDiscoveredKmsInstancesOptions(encryptionScheme string, location string) *GetDiscoveredKmsInstancesOptions { + return &GetDiscoveredKmsInstancesOptions{ + EncryptionScheme: core.StringPtr(encryptionScheme), + Location: core.StringPtr(location), + } +} + +// SetEncryptionScheme : Allow user to set EncryptionScheme +func (options *GetDiscoveredKmsInstancesOptions) SetEncryptionScheme(encryptionScheme string) *GetDiscoveredKmsInstancesOptions { + options.EncryptionScheme = core.StringPtr(encryptionScheme) + return options +} + +// SetLocation : Allow user to set Location +func (options *GetDiscoveredKmsInstancesOptions) SetLocation(location string) *GetDiscoveredKmsInstancesOptions { + options.Location = core.StringPtr(location) + return options +} + +// SetResourceGroup : Allow user to set ResourceGroup +func (options *GetDiscoveredKmsInstancesOptions) SetResourceGroup(resourceGroup string) *GetDiscoveredKmsInstancesOptions { + options.ResourceGroup = core.StringPtr(resourceGroup) + return options +} + +// SetLimit : Allow user to set Limit +func (options *GetDiscoveredKmsInstancesOptions) SetLimit(limit int64) *GetDiscoveredKmsInstancesOptions { + options.Limit = core.Int64Ptr(limit) + return options +} + +// SetSort : Allow user to set Sort +func (options *GetDiscoveredKmsInstancesOptions) SetSort(sort string) *GetDiscoveredKmsInstancesOptions { + options.Sort = core.StringPtr(sort) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetDiscoveredKmsInstancesOptions) SetHeaders(param map[string]string) *GetDiscoveredKmsInstancesOptions { + options.Headers = param + return options +} + +// GetJobOptions : The GetJob options. +type GetJobOptions struct { + // Job Id. Use GET /jobs API to look up the Job Ids in your IBM Cloud account. + JobID *string `json:"job_id" validate:"required,ne="` + + // Level of details returned by the get method. + Profile *string `json:"profile,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the GetJobOptions.Profile property. +// Level of details returned by the get method. +const ( + GetJobOptions_Profile_Detailed = "detailed" + GetJobOptions_Profile_Summary = "summary" +) + +// NewGetJobOptions : Instantiate GetJobOptions +func (*SchematicsV1) NewGetJobOptions(jobID string) *GetJobOptions { + return &GetJobOptions{ + JobID: core.StringPtr(jobID), + } +} + +// SetJobID : Allow user to set JobID +func (options *GetJobOptions) SetJobID(jobID string) *GetJobOptions { + options.JobID = core.StringPtr(jobID) + return options +} + +// SetProfile : Allow user to set Profile +func (options *GetJobOptions) SetProfile(profile string) *GetJobOptions { + options.Profile = core.StringPtr(profile) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetJobOptions) SetHeaders(param map[string]string) *GetJobOptions { + options.Headers = param + return options +} + +// GetKmsSettingsOptions : The GetKmsSettings options. +type GetKmsSettingsOptions struct { + // The location of the Resource. + Location *string `json:"location" validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetKmsSettingsOptions : Instantiate GetKmsSettingsOptions +func (*SchematicsV1) NewGetKmsSettingsOptions(location string) *GetKmsSettingsOptions { + return &GetKmsSettingsOptions{ + Location: core.StringPtr(location), + } +} + +// SetLocation : Allow user to set Location +func (options *GetKmsSettingsOptions) SetLocation(location string) *GetKmsSettingsOptions { + options.Location = core.StringPtr(location) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetKmsSettingsOptions) SetHeaders(param map[string]string) *GetKmsSettingsOptions { + options.Headers = param + return options +} + +// GetSchematicsVersionOptions : The GetSchematicsVersion options. +type GetSchematicsVersionOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetSchematicsVersionOptions : Instantiate GetSchematicsVersionOptions +func (*SchematicsV1) NewGetSchematicsVersionOptions() *GetSchematicsVersionOptions { + return &GetSchematicsVersionOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *GetSchematicsVersionOptions) SetHeaders(param map[string]string) *GetSchematicsVersionOptions { + options.Headers = param + return options +} + +// GetSharedDatasetOptions : The GetSharedDataset options. +type GetSharedDatasetOptions struct { + // The shared dataset ID Use the GET /shared_datasets to look up the shared dataset IDs in your IBM Cloud account. + SdID *string `json:"sd_id" validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetSharedDatasetOptions : Instantiate GetSharedDatasetOptions +func (*SchematicsV1) NewGetSharedDatasetOptions(sdID string) *GetSharedDatasetOptions { + return &GetSharedDatasetOptions{ + SdID: core.StringPtr(sdID), + } +} + +// SetSdID : Allow user to set SdID +func (options *GetSharedDatasetOptions) SetSdID(sdID string) *GetSharedDatasetOptions { + options.SdID = core.StringPtr(sdID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetSharedDatasetOptions) SetHeaders(param map[string]string) *GetSharedDatasetOptions { + options.Headers = param + return options +} + +// GetTemplateActivityLogOptions : The GetTemplateActivityLog options. +type GetTemplateActivityLogOptions struct { + // The workspace ID for the workspace that you want to query. You can run the GET /workspaces call if you need to look + // up the workspace IDs in your IBM Cloud account. + WID *string `json:"w_id" validate:"required,ne="` + + // The Template ID for which you want to get the values. Use the GET /workspaces to look up the workspace IDs or + // template IDs in your IBM Cloud account. + TID *string `json:"t_id" validate:"required,ne="` + + // The activity ID that you want to see additional details. + ActivityID *string `json:"activity_id" validate:"required,ne="` + + // `false` will hide the terraform command header in the logs. + LogTfCmd *bool `json:"log_tf_cmd,omitempty"` + + // `false` will hide all the terraform command prefix in the log statements. + LogTfPrefix *bool `json:"log_tf_prefix,omitempty"` + + // `false` will hide all the null resource prefix in the log statements. + LogTfNullResource *bool `json:"log_tf_null_resource,omitempty"` + + // `true` will format all logs to withhold the original format of ansible output in the log statements. + LogTfAnsible *bool `json:"log_tf_ansible,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetTemplateActivityLogOptions : Instantiate GetTemplateActivityLogOptions +func (*SchematicsV1) NewGetTemplateActivityLogOptions(wID string, tID string, activityID string) *GetTemplateActivityLogOptions { + return &GetTemplateActivityLogOptions{ + WID: core.StringPtr(wID), + TID: core.StringPtr(tID), + ActivityID: core.StringPtr(activityID), + } +} + +// SetWID : Allow user to set WID +func (options *GetTemplateActivityLogOptions) SetWID(wID string) *GetTemplateActivityLogOptions { + options.WID = core.StringPtr(wID) + return options +} + +// SetTID : Allow user to set TID +func (options *GetTemplateActivityLogOptions) SetTID(tID string) *GetTemplateActivityLogOptions { + options.TID = core.StringPtr(tID) + return options +} + +// SetActivityID : Allow user to set ActivityID +func (options *GetTemplateActivityLogOptions) SetActivityID(activityID string) *GetTemplateActivityLogOptions { + options.ActivityID = core.StringPtr(activityID) + return options +} + +// SetLogTfCmd : Allow user to set LogTfCmd +func (options *GetTemplateActivityLogOptions) SetLogTfCmd(logTfCmd bool) *GetTemplateActivityLogOptions { + options.LogTfCmd = core.BoolPtr(logTfCmd) + return options +} + +// SetLogTfPrefix : Allow user to set LogTfPrefix +func (options *GetTemplateActivityLogOptions) SetLogTfPrefix(logTfPrefix bool) *GetTemplateActivityLogOptions { + options.LogTfPrefix = core.BoolPtr(logTfPrefix) + return options +} + +// SetLogTfNullResource : Allow user to set LogTfNullResource +func (options *GetTemplateActivityLogOptions) SetLogTfNullResource(logTfNullResource bool) *GetTemplateActivityLogOptions { + options.LogTfNullResource = core.BoolPtr(logTfNullResource) + return options +} + +// SetLogTfAnsible : Allow user to set LogTfAnsible +func (options *GetTemplateActivityLogOptions) SetLogTfAnsible(logTfAnsible bool) *GetTemplateActivityLogOptions { + options.LogTfAnsible = core.BoolPtr(logTfAnsible) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetTemplateActivityLogOptions) SetHeaders(param map[string]string) *GetTemplateActivityLogOptions { + options.Headers = param + return options +} + +// GetTemplateLogsOptions : The GetTemplateLogs options. +type GetTemplateLogsOptions struct { + // The workspace ID for the workspace that you want to query. You can run the GET /workspaces call if you need to look + // up the workspace IDs in your IBM Cloud account. + WID *string `json:"w_id" validate:"required,ne="` + + // The Template ID for which you want to get the values. Use the GET /workspaces to look up the workspace IDs or + // template IDs in your IBM Cloud account. + TID *string `json:"t_id" validate:"required,ne="` + + // `false` will hide the terraform command header in the logs. + LogTfCmd *bool `json:"log_tf_cmd,omitempty"` + + // `false` will hide all the terraform command prefix in the log statements. + LogTfPrefix *bool `json:"log_tf_prefix,omitempty"` + + // `false` will hide all the null resource prefix in the log statements. + LogTfNullResource *bool `json:"log_tf_null_resource,omitempty"` + + // `true` will format all logs to withhold the original format of ansible output in the log statements. + LogTfAnsible *bool `json:"log_tf_ansible,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetTemplateLogsOptions : Instantiate GetTemplateLogsOptions +func (*SchematicsV1) NewGetTemplateLogsOptions(wID string, tID string) *GetTemplateLogsOptions { + return &GetTemplateLogsOptions{ + WID: core.StringPtr(wID), + TID: core.StringPtr(tID), + } +} + +// SetWID : Allow user to set WID +func (options *GetTemplateLogsOptions) SetWID(wID string) *GetTemplateLogsOptions { + options.WID = core.StringPtr(wID) + return options +} + +// SetTID : Allow user to set TID +func (options *GetTemplateLogsOptions) SetTID(tID string) *GetTemplateLogsOptions { + options.TID = core.StringPtr(tID) + return options +} + +// SetLogTfCmd : Allow user to set LogTfCmd +func (options *GetTemplateLogsOptions) SetLogTfCmd(logTfCmd bool) *GetTemplateLogsOptions { + options.LogTfCmd = core.BoolPtr(logTfCmd) + return options +} + +// SetLogTfPrefix : Allow user to set LogTfPrefix +func (options *GetTemplateLogsOptions) SetLogTfPrefix(logTfPrefix bool) *GetTemplateLogsOptions { + options.LogTfPrefix = core.BoolPtr(logTfPrefix) + return options +} + +// SetLogTfNullResource : Allow user to set LogTfNullResource +func (options *GetTemplateLogsOptions) SetLogTfNullResource(logTfNullResource bool) *GetTemplateLogsOptions { + options.LogTfNullResource = core.BoolPtr(logTfNullResource) + return options +} + +// SetLogTfAnsible : Allow user to set LogTfAnsible +func (options *GetTemplateLogsOptions) SetLogTfAnsible(logTfAnsible bool) *GetTemplateLogsOptions { + options.LogTfAnsible = core.BoolPtr(logTfAnsible) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetTemplateLogsOptions) SetHeaders(param map[string]string) *GetTemplateLogsOptions { + options.Headers = param + return options +} + +// GetWorkspaceActivityLogsOptions : The GetWorkspaceActivityLogs options. +type GetWorkspaceActivityLogsOptions struct { + // The workspace ID for the workspace that you want to query. You can run the GET /workspaces call if you need to look + // up the workspace IDs in your IBM Cloud account. + WID *string `json:"w_id" validate:"required,ne="` + + // The activity ID that you want to see additional details. + ActivityID *string `json:"activity_id" validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetWorkspaceActivityLogsOptions : Instantiate GetWorkspaceActivityLogsOptions +func (*SchematicsV1) NewGetWorkspaceActivityLogsOptions(wID string, activityID string) *GetWorkspaceActivityLogsOptions { + return &GetWorkspaceActivityLogsOptions{ + WID: core.StringPtr(wID), + ActivityID: core.StringPtr(activityID), + } +} + +// SetWID : Allow user to set WID +func (options *GetWorkspaceActivityLogsOptions) SetWID(wID string) *GetWorkspaceActivityLogsOptions { + options.WID = core.StringPtr(wID) + return options +} + +// SetActivityID : Allow user to set ActivityID +func (options *GetWorkspaceActivityLogsOptions) SetActivityID(activityID string) *GetWorkspaceActivityLogsOptions { + options.ActivityID = core.StringPtr(activityID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetWorkspaceActivityLogsOptions) SetHeaders(param map[string]string) *GetWorkspaceActivityLogsOptions { + options.Headers = param + return options +} + +// GetWorkspaceActivityOptions : The GetWorkspaceActivity options. +type GetWorkspaceActivityOptions struct { + // The workspace ID for the workspace that you want to query. You can run the GET /workspaces call if you need to look + // up the workspace IDs in your IBM Cloud account. + WID *string `json:"w_id" validate:"required,ne="` + + // The activity ID that you want to see additional details. + ActivityID *string `json:"activity_id" validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetWorkspaceActivityOptions : Instantiate GetWorkspaceActivityOptions +func (*SchematicsV1) NewGetWorkspaceActivityOptions(wID string, activityID string) *GetWorkspaceActivityOptions { + return &GetWorkspaceActivityOptions{ + WID: core.StringPtr(wID), + ActivityID: core.StringPtr(activityID), + } +} + +// SetWID : Allow user to set WID +func (options *GetWorkspaceActivityOptions) SetWID(wID string) *GetWorkspaceActivityOptions { + options.WID = core.StringPtr(wID) + return options +} + +// SetActivityID : Allow user to set ActivityID +func (options *GetWorkspaceActivityOptions) SetActivityID(activityID string) *GetWorkspaceActivityOptions { + options.ActivityID = core.StringPtr(activityID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetWorkspaceActivityOptions) SetHeaders(param map[string]string) *GetWorkspaceActivityOptions { + options.Headers = param + return options +} + +// GetWorkspaceDeletionJobStatusOptions : The GetWorkspaceDeletionJobStatus options. +type GetWorkspaceDeletionJobStatusOptions struct { + // The workspace job deletion ID. + WjID *string `json:"wj_id" validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetWorkspaceDeletionJobStatusOptions : Instantiate GetWorkspaceDeletionJobStatusOptions +func (*SchematicsV1) NewGetWorkspaceDeletionJobStatusOptions(wjID string) *GetWorkspaceDeletionJobStatusOptions { + return &GetWorkspaceDeletionJobStatusOptions{ + WjID: core.StringPtr(wjID), + } +} + +// SetWjID : Allow user to set WjID +func (options *GetWorkspaceDeletionJobStatusOptions) SetWjID(wjID string) *GetWorkspaceDeletionJobStatusOptions { + options.WjID = core.StringPtr(wjID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetWorkspaceDeletionJobStatusOptions) SetHeaders(param map[string]string) *GetWorkspaceDeletionJobStatusOptions { + options.Headers = param + return options +} + +// GetWorkspaceInputMetadataOptions : The GetWorkspaceInputMetadata options. +type GetWorkspaceInputMetadataOptions struct { + // The workspace ID for the workspace that you want to query. You can run the GET /workspaces call if you need to look + // up the workspace IDs in your IBM Cloud account. + WID *string `json:"w_id" validate:"required,ne="` + + // The Template ID for which you want to get the values. Use the GET /workspaces to look up the workspace IDs or + // template IDs in your IBM Cloud account. + TID *string `json:"t_id" validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetWorkspaceInputMetadataOptions : Instantiate GetWorkspaceInputMetadataOptions +func (*SchematicsV1) NewGetWorkspaceInputMetadataOptions(wID string, tID string) *GetWorkspaceInputMetadataOptions { + return &GetWorkspaceInputMetadataOptions{ + WID: core.StringPtr(wID), + TID: core.StringPtr(tID), + } +} + +// SetWID : Allow user to set WID +func (options *GetWorkspaceInputMetadataOptions) SetWID(wID string) *GetWorkspaceInputMetadataOptions { + options.WID = core.StringPtr(wID) + return options +} + +// SetTID : Allow user to set TID +func (options *GetWorkspaceInputMetadataOptions) SetTID(tID string) *GetWorkspaceInputMetadataOptions { + options.TID = core.StringPtr(tID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetWorkspaceInputMetadataOptions) SetHeaders(param map[string]string) *GetWorkspaceInputMetadataOptions { + options.Headers = param + return options +} + +// GetWorkspaceInputsOptions : The GetWorkspaceInputs options. +type GetWorkspaceInputsOptions struct { + // The workspace ID for the workspace that you want to query. You can run the GET /workspaces call if you need to look + // up the workspace IDs in your IBM Cloud account. + WID *string `json:"w_id" validate:"required,ne="` + + // The Template ID for which you want to get the values. Use the GET /workspaces to look up the workspace IDs or + // template IDs in your IBM Cloud account. + TID *string `json:"t_id" validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetWorkspaceInputsOptions : Instantiate GetWorkspaceInputsOptions +func (*SchematicsV1) NewGetWorkspaceInputsOptions(wID string, tID string) *GetWorkspaceInputsOptions { + return &GetWorkspaceInputsOptions{ + WID: core.StringPtr(wID), + TID: core.StringPtr(tID), + } +} + +// SetWID : Allow user to set WID +func (options *GetWorkspaceInputsOptions) SetWID(wID string) *GetWorkspaceInputsOptions { + options.WID = core.StringPtr(wID) + return options +} + +// SetTID : Allow user to set TID +func (options *GetWorkspaceInputsOptions) SetTID(tID string) *GetWorkspaceInputsOptions { + options.TID = core.StringPtr(tID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetWorkspaceInputsOptions) SetHeaders(param map[string]string) *GetWorkspaceInputsOptions { + options.Headers = param + return options +} + +// GetWorkspaceLogUrlsOptions : The GetWorkspaceLogUrls options. +type GetWorkspaceLogUrlsOptions struct { + // The workspace ID for the workspace that you want to query. You can run the GET /workspaces call if you need to look + // up the workspace IDs in your IBM Cloud account. + WID *string `json:"w_id" validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetWorkspaceLogUrlsOptions : Instantiate GetWorkspaceLogUrlsOptions +func (*SchematicsV1) NewGetWorkspaceLogUrlsOptions(wID string) *GetWorkspaceLogUrlsOptions { + return &GetWorkspaceLogUrlsOptions{ + WID: core.StringPtr(wID), + } +} + +// SetWID : Allow user to set WID +func (options *GetWorkspaceLogUrlsOptions) SetWID(wID string) *GetWorkspaceLogUrlsOptions { + options.WID = core.StringPtr(wID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetWorkspaceLogUrlsOptions) SetHeaders(param map[string]string) *GetWorkspaceLogUrlsOptions { + options.Headers = param + return options +} + +// GetWorkspaceOptions : The GetWorkspace options. +type GetWorkspaceOptions struct { + // The workspace ID for the workspace that you want to query. You can run the GET /workspaces call if you need to look + // up the workspace IDs in your IBM Cloud account. + WID *string `json:"w_id" validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetWorkspaceOptions : Instantiate GetWorkspaceOptions +func (*SchematicsV1) NewGetWorkspaceOptions(wID string) *GetWorkspaceOptions { + return &GetWorkspaceOptions{ + WID: core.StringPtr(wID), + } +} + +// SetWID : Allow user to set WID +func (options *GetWorkspaceOptions) SetWID(wID string) *GetWorkspaceOptions { + options.WID = core.StringPtr(wID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetWorkspaceOptions) SetHeaders(param map[string]string) *GetWorkspaceOptions { + options.Headers = param + return options +} + +// GetWorkspaceOutputsOptions : The GetWorkspaceOutputs options. +type GetWorkspaceOutputsOptions struct { + // The workspace ID for the workspace that you want to query. You can run the GET /workspaces call if you need to look + // up the workspace IDs in your IBM Cloud account. + WID *string `json:"w_id" validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetWorkspaceOutputsOptions : Instantiate GetWorkspaceOutputsOptions +func (*SchematicsV1) NewGetWorkspaceOutputsOptions(wID string) *GetWorkspaceOutputsOptions { + return &GetWorkspaceOutputsOptions{ + WID: core.StringPtr(wID), + } +} + +// SetWID : Allow user to set WID +func (options *GetWorkspaceOutputsOptions) SetWID(wID string) *GetWorkspaceOutputsOptions { + options.WID = core.StringPtr(wID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetWorkspaceOutputsOptions) SetHeaders(param map[string]string) *GetWorkspaceOutputsOptions { + options.Headers = param + return options +} + +// GetWorkspaceReadmeOptions : The GetWorkspaceReadme options. +type GetWorkspaceReadmeOptions struct { + // The workspace ID for the workspace that you want to query. You can run the GET /workspaces call if you need to look + // up the workspace IDs in your IBM Cloud account. + WID *string `json:"w_id" validate:"required,ne="` + + // The name of the commit/branch/tag. Default, the repository’s default branch (usually master). + Ref *string `json:"ref,omitempty"` + + // The format of the readme file. Value ''markdown'' will give markdown, otherwise html. + Formatted *string `json:"formatted,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the GetWorkspaceReadmeOptions.Formatted property. +// The format of the readme file. Value ''markdown'' will give markdown, otherwise html. +const ( + GetWorkspaceReadmeOptions_Formatted_HTML = "html" + GetWorkspaceReadmeOptions_Formatted_Markdown = "markdown" +) + +// NewGetWorkspaceReadmeOptions : Instantiate GetWorkspaceReadmeOptions +func (*SchematicsV1) NewGetWorkspaceReadmeOptions(wID string) *GetWorkspaceReadmeOptions { + return &GetWorkspaceReadmeOptions{ + WID: core.StringPtr(wID), + } +} + +// SetWID : Allow user to set WID +func (options *GetWorkspaceReadmeOptions) SetWID(wID string) *GetWorkspaceReadmeOptions { + options.WID = core.StringPtr(wID) + return options +} + +// SetRef : Allow user to set Ref +func (options *GetWorkspaceReadmeOptions) SetRef(ref string) *GetWorkspaceReadmeOptions { + options.Ref = core.StringPtr(ref) + return options +} + +// SetFormatted : Allow user to set Formatted +func (options *GetWorkspaceReadmeOptions) SetFormatted(formatted string) *GetWorkspaceReadmeOptions { + options.Formatted = core.StringPtr(formatted) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetWorkspaceReadmeOptions) SetHeaders(param map[string]string) *GetWorkspaceReadmeOptions { + options.Headers = param + return options +} + +// GetWorkspaceResourcesOptions : The GetWorkspaceResources options. +type GetWorkspaceResourcesOptions struct { + // The workspace ID for the workspace that you want to query. You can run the GET /workspaces call if you need to look + // up the workspace IDs in your IBM Cloud account. + WID *string `json:"w_id" validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetWorkspaceResourcesOptions : Instantiate GetWorkspaceResourcesOptions +func (*SchematicsV1) NewGetWorkspaceResourcesOptions(wID string) *GetWorkspaceResourcesOptions { + return &GetWorkspaceResourcesOptions{ + WID: core.StringPtr(wID), + } +} + +// SetWID : Allow user to set WID +func (options *GetWorkspaceResourcesOptions) SetWID(wID string) *GetWorkspaceResourcesOptions { + options.WID = core.StringPtr(wID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetWorkspaceResourcesOptions) SetHeaders(param map[string]string) *GetWorkspaceResourcesOptions { + options.Headers = param + return options +} + +// GetWorkspaceStateOptions : The GetWorkspaceState options. +type GetWorkspaceStateOptions struct { + // The workspace ID for the workspace that you want to query. You can run the GET /workspaces call if you need to look + // up the workspace IDs in your IBM Cloud account. + WID *string `json:"w_id" validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetWorkspaceStateOptions : Instantiate GetWorkspaceStateOptions +func (*SchematicsV1) NewGetWorkspaceStateOptions(wID string) *GetWorkspaceStateOptions { + return &GetWorkspaceStateOptions{ + WID: core.StringPtr(wID), + } +} + +// SetWID : Allow user to set WID +func (options *GetWorkspaceStateOptions) SetWID(wID string) *GetWorkspaceStateOptions { + options.WID = core.StringPtr(wID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetWorkspaceStateOptions) SetHeaders(param map[string]string) *GetWorkspaceStateOptions { + options.Headers = param + return options +} + +// GetWorkspaceTemplateStateOptions : The GetWorkspaceTemplateState options. +type GetWorkspaceTemplateStateOptions struct { + // The workspace ID for the workspace that you want to query. You can run the GET /workspaces call if you need to look + // up the workspace IDs in your IBM Cloud account. + WID *string `json:"w_id" validate:"required,ne="` + + // The Template ID for which you want to get the values. Use the GET /workspaces to look up the workspace IDs or + // template IDs in your IBM Cloud account. + TID *string `json:"t_id" validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetWorkspaceTemplateStateOptions : Instantiate GetWorkspaceTemplateStateOptions +func (*SchematicsV1) NewGetWorkspaceTemplateStateOptions(wID string, tID string) *GetWorkspaceTemplateStateOptions { + return &GetWorkspaceTemplateStateOptions{ + WID: core.StringPtr(wID), + TID: core.StringPtr(tID), + } +} + +// SetWID : Allow user to set WID +func (options *GetWorkspaceTemplateStateOptions) SetWID(wID string) *GetWorkspaceTemplateStateOptions { + options.WID = core.StringPtr(wID) + return options +} + +// SetTID : Allow user to set TID +func (options *GetWorkspaceTemplateStateOptions) SetTID(tID string) *GetWorkspaceTemplateStateOptions { + options.TID = core.StringPtr(tID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetWorkspaceTemplateStateOptions) SetHeaders(param map[string]string) *GetWorkspaceTemplateStateOptions { + options.Headers = param + return options +} + +// Job : Complete Job with user inputs and system generated data. +type Job struct { + // Name of the Schematics automation resource. + CommandObject *string `json:"command_object,omitempty"` + + // Job command object id (workspace-id, action-id or control-id). + CommandObjectID *string `json:"command_object_id,omitempty"` + + // Schematics job command name. + CommandName *string `json:"command_name,omitempty"` + + // Schematics job command parameter (playbook-name, capsule-name or flow-name). + CommandParameter *string `json:"command_parameter,omitempty"` + + // Command line options for the command. + CommandOptions []string `json:"command_options,omitempty"` + + // Job inputs used by Action. + Inputs []VariableData `json:"inputs,omitempty"` + + // Environment variables used by the Job while performing Action. + Settings []VariableData `json:"settings,omitempty"` + + // User defined tags, while running the job. + Tags []string `json:"tags,omitempty"` + + // Job ID. + ID *string `json:"id,omitempty"` + + // Job name, uniquely derived from the related Action. + Name *string `json:"name,omitempty"` + + // Job description derived from the related Action. + Description *string `json:"description,omitempty"` + + // List of workspace locations supported by IBM Cloud Schematics service. Note, this does not limit the location of + // the resources provisioned using Schematics. + Location *string `json:"location,omitempty"` + + // Resource-group name derived from the related Action. + ResourceGroup *string `json:"resource_group,omitempty"` + + // Job submission time. + SubmittedAt *strfmt.DateTime `json:"submitted_at,omitempty"` + + // Email address of user who submitted the job. + SubmittedBy *string `json:"submitted_by,omitempty"` + + // Job start time. + StartAt *strfmt.DateTime `json:"start_at,omitempty"` + + // Job end time. + EndAt *strfmt.DateTime `json:"end_at,omitempty"` + + // Duration of job execution; example 40 sec. + Duration *string `json:"duration,omitempty"` + + // Job Status. + Status *JobStatus `json:"status,omitempty"` + + // Job data. + Data *JobData `json:"data,omitempty"` + + // Inventory of host and host group for the playbook, in .ini file format. + TargetsIni *string `json:"targets_ini,omitempty"` + + // Complete Target details with user inputs and system generated data. + Bastion *TargetResourceset `json:"bastion,omitempty"` + + // Job log summary record. + LogSummary *JobLogSummary `json:"log_summary,omitempty"` + + // Job log store URL. + LogStoreURL *string `json:"log_store_url,omitempty"` + + // Job state store URL. + StateStoreURL *string `json:"state_store_url,omitempty"` + + // Job results store URL. + ResultsURL *string `json:"results_url,omitempty"` + + // Job status updation timestamp. + UpdatedAt *strfmt.DateTime `json:"updated_at,omitempty"` +} + +// Constants associated with the Job.CommandObject property. +// Name of the Schematics automation resource. +const ( + Job_CommandObject_Action = "action" + Job_CommandObject_Workspace = "workspace" +) + +// Constants associated with the Job.CommandName property. +// Schematics job command name. +const ( + Job_CommandName_AnsiblePlaybookCheck = "ansible_playbook_check" + Job_CommandName_AnsiblePlaybookRun = "ansible_playbook_run" + Job_CommandName_HelmInstall = "helm_install" + Job_CommandName_HelmList = "helm_list" + Job_CommandName_HelmShow = "helm_show" + Job_CommandName_OpaEvaluate = "opa_evaluate" + Job_CommandName_TerraformInit = "terraform_init" + Job_CommandName_TerrformApply = "terrform_apply" + Job_CommandName_TerrformDestroy = "terrform_destroy" + Job_CommandName_TerrformPlan = "terrform_plan" + Job_CommandName_TerrformRefresh = "terrform_refresh" + Job_CommandName_TerrformShow = "terrform_show" + Job_CommandName_TerrformTaint = "terrform_taint" + Job_CommandName_WorkspaceApplyFlow = "workspace_apply_flow" + Job_CommandName_WorkspaceCustomFlow = "workspace_custom_flow" + Job_CommandName_WorkspaceDestroyFlow = "workspace_destroy_flow" + Job_CommandName_WorkspaceInitFlow = "workspace_init_flow" + Job_CommandName_WorkspacePlanFlow = "workspace_plan_flow" + Job_CommandName_WorkspaceRefreshFlow = "workspace_refresh_flow" + Job_CommandName_WorkspaceShowFlow = "workspace_show_flow" +) + +// Constants associated with the Job.Location property. +// List of workspace locations supported by IBM Cloud Schematics service. Note, this does not limit the location of the +// resources provisioned using Schematics. +const ( + Job_Location_EuDe = "eu_de" + Job_Location_EuGb = "eu_gb" + Job_Location_UsEast = "us_east" + Job_Location_UsSouth = "us_south" +) + +// UnmarshalJob unmarshals an instance of Job from the specified map of raw messages. +func UnmarshalJob(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Job) + err = core.UnmarshalPrimitive(m, "command_object", &obj.CommandObject) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "command_object_id", &obj.CommandObjectID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "command_name", &obj.CommandName) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "command_parameter", &obj.CommandParameter) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "command_options", &obj.CommandOptions) + if err != nil { + return + } + err = core.UnmarshalModel(m, "inputs", &obj.Inputs, UnmarshalVariableData) + if err != nil { + return + } + err = core.UnmarshalModel(m, "settings", &obj.Settings, UnmarshalVariableData) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "tags", &obj.Tags) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "description", &obj.Description) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "location", &obj.Location) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "resource_group", &obj.ResourceGroup) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "submitted_at", &obj.SubmittedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "submitted_by", &obj.SubmittedBy) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "start_at", &obj.StartAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "end_at", &obj.EndAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "duration", &obj.Duration) + if err != nil { + return + } + err = core.UnmarshalModel(m, "status", &obj.Status, UnmarshalJobStatus) + if err != nil { + return + } + err = core.UnmarshalModel(m, "data", &obj.Data, UnmarshalJobData) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "targets_ini", &obj.TargetsIni) + if err != nil { + return + } + err = core.UnmarshalModel(m, "bastion", &obj.Bastion, UnmarshalTargetResourceset) + if err != nil { + return + } + err = core.UnmarshalModel(m, "log_summary", &obj.LogSummary, UnmarshalJobLogSummary) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "log_store_url", &obj.LogStoreURL) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "state_store_url", &obj.StateStoreURL) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "results_url", &obj.ResultsURL) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "updated_at", &obj.UpdatedAt) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// JobData : Job data. +type JobData struct { + // Type of Job. + JobType *string `json:"job_type" validate:"required"` + + // Action Job data. + ActionJobData *JobDataAction `json:"action_job_data,omitempty"` +} + +// Constants associated with the JobData.JobType property. +// Type of Job. +const ( + JobData_JobType_ActionJob = "action_job" + JobData_JobType_RepoDownloadJob = "repo_download_job" +) + +// NewJobData : Instantiate JobData (Generic Model Constructor) +func (*SchematicsV1) NewJobData(jobType string) (model *JobData, err error) { + model = &JobData{ + JobType: core.StringPtr(jobType), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalJobData unmarshals an instance of JobData from the specified map of raw messages. +func UnmarshalJobData(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(JobData) + err = core.UnmarshalPrimitive(m, "job_type", &obj.JobType) + if err != nil { + return + } + err = core.UnmarshalModel(m, "action_job_data", &obj.ActionJobData, UnmarshalJobDataAction) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// JobDataAction : Action Job data. +type JobDataAction struct { + // Flow name. + ActionName *string `json:"action_name,omitempty"` + + // Input variables data used by the Action Job. + Inputs []VariableData `json:"inputs,omitempty"` + + // Output variables data from the Action Job. + Outputs []VariableData `json:"outputs,omitempty"` + + // Environment variables used by all the templates in the Action. + Settings []VariableData `json:"settings,omitempty"` + + // Job status updation timestamp. + UpdatedAt *strfmt.DateTime `json:"updated_at,omitempty"` +} + +// UnmarshalJobDataAction unmarshals an instance of JobDataAction from the specified map of raw messages. +func UnmarshalJobDataAction(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(JobDataAction) + err = core.UnmarshalPrimitive(m, "action_name", &obj.ActionName) + if err != nil { + return + } + err = core.UnmarshalModel(m, "inputs", &obj.Inputs, UnmarshalVariableData) + if err != nil { + return + } + err = core.UnmarshalModel(m, "outputs", &obj.Outputs, UnmarshalVariableData) + if err != nil { + return + } + err = core.UnmarshalModel(m, "settings", &obj.Settings, UnmarshalVariableData) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "updated_at", &obj.UpdatedAt) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// JobList : List of Job details. +type JobList struct { + // Total number of records. + TotalCount *int64 `json:"total_count,omitempty"` + + // Number of records returned. + Limit *int64 `json:"limit" validate:"required"` + + // Skipped number of records. + Offset *int64 `json:"offset" validate:"required"` + + // List of job records. + Jobs []JobLite `json:"jobs,omitempty"` +} + +// UnmarshalJobList unmarshals an instance of JobList from the specified map of raw messages. +func UnmarshalJobList(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(JobList) + err = core.UnmarshalPrimitive(m, "total_count", &obj.TotalCount) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "limit", &obj.Limit) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "offset", &obj.Offset) + if err != nil { + return + } + err = core.UnmarshalModel(m, "jobs", &obj.Jobs, UnmarshalJobLite) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// JobLite : Job summary profile with system generated data. +type JobLite struct { + // Job ID. + ID *string `json:"id,omitempty"` + + // Job name, uniquely derived from the related Action. + Name *string `json:"name,omitempty"` + + // Job description derived from the related Action. + Description *string `json:"description,omitempty"` + + // Name of the Schematics automation resource. + CommandObject *string `json:"command_object,omitempty"` + + // Job command object id (action-id). + CommandObjectID *string `json:"command_object_id,omitempty"` + + // Schematics job command name. + CommandName *string `json:"command_name,omitempty"` + + // User defined tags, while running the job. + Tags []string `json:"tags,omitempty"` + + // List of workspace locations supported by IBM Cloud Schematics service. Note, this does not limit the location of + // the resources provisioned using Schematics. + Location *string `json:"location,omitempty"` + + // Resource-group name derived from the related Action,. + ResourceGroup *string `json:"resource_group,omitempty"` + + // Inventory of host and host group for the playbook, in .ini file format. + TargetsIni *string `json:"targets_ini,omitempty"` + + // Job submission time. + SubmittedAt *strfmt.DateTime `json:"submitted_at,omitempty"` + + // Email address of user who submitted the job. + SubmittedBy *string `json:"submitted_by,omitempty"` + + // Duration of job execution; example 40 sec. + Duration *string `json:"duration,omitempty"` + + // Job start time. + StartAt *strfmt.DateTime `json:"start_at,omitempty"` + + // Job end time. + EndAt *strfmt.DateTime `json:"end_at,omitempty"` + + // Job Status. + Status *JobStatus `json:"status,omitempty"` + + // Job log summary record. + LogSummary *JobLogSummary `json:"log_summary,omitempty"` + + // Job status updation timestamp. + UpdatedAt *strfmt.DateTime `json:"updated_at,omitempty"` +} + +// Constants associated with the JobLite.CommandObject property. +// Name of the Schematics automation resource. +const ( + JobLite_CommandObject_Action = "action" + JobLite_CommandObject_Workspace = "workspace" +) + +// Constants associated with the JobLite.CommandName property. +// Schematics job command name. +const ( + JobLite_CommandName_AnsiblePlaybookCheck = "ansible_playbook_check" + JobLite_CommandName_AnsiblePlaybookRun = "ansible_playbook_run" + JobLite_CommandName_HelmInstall = "helm_install" + JobLite_CommandName_HelmList = "helm_list" + JobLite_CommandName_HelmShow = "helm_show" + JobLite_CommandName_OpaEvaluate = "opa_evaluate" + JobLite_CommandName_TerraformInit = "terraform_init" + JobLite_CommandName_TerrformApply = "terrform_apply" + JobLite_CommandName_TerrformDestroy = "terrform_destroy" + JobLite_CommandName_TerrformPlan = "terrform_plan" + JobLite_CommandName_TerrformRefresh = "terrform_refresh" + JobLite_CommandName_TerrformShow = "terrform_show" + JobLite_CommandName_TerrformTaint = "terrform_taint" + JobLite_CommandName_WorkspaceApplyFlow = "workspace_apply_flow" + JobLite_CommandName_WorkspaceCustomFlow = "workspace_custom_flow" + JobLite_CommandName_WorkspaceDestroyFlow = "workspace_destroy_flow" + JobLite_CommandName_WorkspaceInitFlow = "workspace_init_flow" + JobLite_CommandName_WorkspacePlanFlow = "workspace_plan_flow" + JobLite_CommandName_WorkspaceRefreshFlow = "workspace_refresh_flow" + JobLite_CommandName_WorkspaceShowFlow = "workspace_show_flow" +) + +// Constants associated with the JobLite.Location property. +// List of workspace locations supported by IBM Cloud Schematics service. Note, this does not limit the location of the +// resources provisioned using Schematics. +const ( + JobLite_Location_EuDe = "eu_de" + JobLite_Location_EuGb = "eu_gb" + JobLite_Location_UsEast = "us_east" + JobLite_Location_UsSouth = "us_south" +) + +// UnmarshalJobLite unmarshals an instance of JobLite from the specified map of raw messages. +func UnmarshalJobLite(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(JobLite) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "description", &obj.Description) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "command_object", &obj.CommandObject) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "command_object_id", &obj.CommandObjectID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "command_name", &obj.CommandName) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "tags", &obj.Tags) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "location", &obj.Location) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "resource_group", &obj.ResourceGroup) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "targets_ini", &obj.TargetsIni) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "submitted_at", &obj.SubmittedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "submitted_by", &obj.SubmittedBy) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "duration", &obj.Duration) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "start_at", &obj.StartAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "end_at", &obj.EndAt) + if err != nil { + return + } + err = core.UnmarshalModel(m, "status", &obj.Status, UnmarshalJobStatus) + if err != nil { + return + } + err = core.UnmarshalModel(m, "log_summary", &obj.LogSummary, UnmarshalJobLogSummary) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "updated_at", &obj.UpdatedAt) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// JobLog : Job Log details. +type JobLog struct { + // Job Id. + JobID *string `json:"job_id,omitempty"` + + // Job name, uniquely derived from the related Action. + JobName *string `json:"job_name,omitempty"` + + // Job log summary record. + LogSummary *JobLogSummary `json:"log_summary,omitempty"` + + // Format of the Log text. + Format *string `json:"format,omitempty"` + + // Log text, generated by the Job. + Details *[]byte `json:"details,omitempty"` + + // Job status updation timestamp. + UpdatedAt *strfmt.DateTime `json:"updated_at,omitempty"` +} + +// Constants associated with the JobLog.Format property. +// Format of the Log text. +const ( + JobLog_Format_HTML = "html" + JobLog_Format_JSON = "json" + JobLog_Format_Markdown = "markdown" + JobLog_Format_Rtf = "rtf" +) + +// UnmarshalJobLog unmarshals an instance of JobLog from the specified map of raw messages. +func UnmarshalJobLog(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(JobLog) + err = core.UnmarshalPrimitive(m, "job_id", &obj.JobID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "job_name", &obj.JobName) + if err != nil { + return + } + err = core.UnmarshalModel(m, "log_summary", &obj.LogSummary, UnmarshalJobLogSummary) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "format", &obj.Format) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "details", &obj.Details) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "updated_at", &obj.UpdatedAt) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// JobLogSummary : Job log summary record. +type JobLogSummary struct { + // Workspace Id. + JobID *string `json:"job_id,omitempty"` + + // Type of Job. + JobType *string `json:"job_type,omitempty"` + + // Job log start timestamp. + LogStartAt *strfmt.DateTime `json:"log_start_at,omitempty"` + + // Job log update timestamp. + LogAnalyzedTill *strfmt.DateTime `json:"log_analyzed_till,omitempty"` + + // Job log elapsed time (log_analyzed_till - log_start_at). + ElapsedTime *float64 `json:"elapsed_time,omitempty"` + + // Job log errors. + LogErrors []JobLogSummaryLogErrorsItem `json:"log_errors,omitempty"` + + // Repo download Job log summary. + RepoDownloadJob *JobLogSummaryRepoDownloadJob `json:"repo_download_job,omitempty"` + + // Flow Job log summary. + ActionJob *JobLogSummaryActionJob `json:"action_job,omitempty"` +} + +// Constants associated with the JobLogSummary.JobType property. +// Type of Job. +const ( + JobLogSummary_JobType_ActionJob = "action_job" + JobLogSummary_JobType_CapsuleJob = "capsule_job" + JobLogSummary_JobType_ControlsJob = "controls_job" + JobLogSummary_JobType_RepoDownloadJob = "repo_download_job" + JobLogSummary_JobType_WorkspaceJob = "workspace_job" +) + +// UnmarshalJobLogSummary unmarshals an instance of JobLogSummary from the specified map of raw messages. +func UnmarshalJobLogSummary(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(JobLogSummary) + err = core.UnmarshalPrimitive(m, "job_id", &obj.JobID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "job_type", &obj.JobType) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "log_start_at", &obj.LogStartAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "log_analyzed_till", &obj.LogAnalyzedTill) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "elapsed_time", &obj.ElapsedTime) + if err != nil { + return + } + err = core.UnmarshalModel(m, "log_errors", &obj.LogErrors, UnmarshalJobLogSummaryLogErrorsItem) + if err != nil { + return + } + err = core.UnmarshalModel(m, "repo_download_job", &obj.RepoDownloadJob, UnmarshalJobLogSummaryRepoDownloadJob) + if err != nil { + return + } + err = core.UnmarshalModel(m, "action_job", &obj.ActionJob, UnmarshalJobLogSummaryActionJob) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// JobLogSummaryActionJob : Flow Job log summary. +type JobLogSummaryActionJob struct { + // number of targets or hosts. + TargetCount *float64 `json:"target_count,omitempty"` + + // number of tasks in playbook. + TaskCount *float64 `json:"task_count,omitempty"` + + // number of plays in playbook. + PlayCount *float64 `json:"play_count,omitempty"` + + // Recap records. + Recap *JobLogSummaryActionJobRecap `json:"recap,omitempty"` +} + +// UnmarshalJobLogSummaryActionJob unmarshals an instance of JobLogSummaryActionJob from the specified map of raw messages. +func UnmarshalJobLogSummaryActionJob(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(JobLogSummaryActionJob) + err = core.UnmarshalPrimitive(m, "target_count", &obj.TargetCount) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "task_count", &obj.TaskCount) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "play_count", &obj.PlayCount) + if err != nil { + return + } + err = core.UnmarshalModel(m, "recap", &obj.Recap, UnmarshalJobLogSummaryActionJobRecap) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// JobLogSummaryActionJobRecap : Recap records. +type JobLogSummaryActionJobRecap struct { + // List of target or host name. + Target []string `json:"target,omitempty"` + + // Number of OK. + Ok *float64 `json:"ok,omitempty"` + + // Number of changed. + Changed *float64 `json:"changed,omitempty"` + + // Number of failed. + Failed *float64 `json:"failed,omitempty"` + + // Number of skipped. + Skipped *float64 `json:"skipped,omitempty"` + + // Number of unreachable. + Unreachable *float64 `json:"unreachable,omitempty"` +} + +// UnmarshalJobLogSummaryActionJobRecap unmarshals an instance of JobLogSummaryActionJobRecap from the specified map of raw messages. +func UnmarshalJobLogSummaryActionJobRecap(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(JobLogSummaryActionJobRecap) + err = core.UnmarshalPrimitive(m, "target", &obj.Target) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "ok", &obj.Ok) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "changed", &obj.Changed) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "failed", &obj.Failed) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "skipped", &obj.Skipped) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "unreachable", &obj.Unreachable) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// JobLogSummaryLogErrorsItem : JobLogSummaryLogErrorsItem struct +type JobLogSummaryLogErrorsItem struct { + // Error code in the Log. + ErrorCode *string `json:"error_code,omitempty"` + + // Summary error message in the log. + ErrorMsg *string `json:"error_msg,omitempty"` + + // Number of occurrence. + ErrorCount *float64 `json:"error_count,omitempty"` +} + +// UnmarshalJobLogSummaryLogErrorsItem unmarshals an instance of JobLogSummaryLogErrorsItem from the specified map of raw messages. +func UnmarshalJobLogSummaryLogErrorsItem(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(JobLogSummaryLogErrorsItem) + err = core.UnmarshalPrimitive(m, "error_code", &obj.ErrorCode) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "error_msg", &obj.ErrorMsg) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "error_count", &obj.ErrorCount) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// JobLogSummaryRepoDownloadJob : Repo download Job log summary. +type JobLogSummaryRepoDownloadJob struct { + // Number of files scanned. + ScannedFileCount *float64 `json:"scanned_file_count,omitempty"` + + // Number of files quarantined. + QuarantinedFileCount *float64 `json:"quarantined_file_count,omitempty"` + + // Detected template or data file type. + DetectedFiletype *string `json:"detected_filetype,omitempty"` + + // Number of inputs detected. + InputsCount *string `json:"inputs_count,omitempty"` + + // Number of outputs detected. + OutputsCount *string `json:"outputs_count,omitempty"` +} + +// UnmarshalJobLogSummaryRepoDownloadJob unmarshals an instance of JobLogSummaryRepoDownloadJob from the specified map of raw messages. +func UnmarshalJobLogSummaryRepoDownloadJob(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(JobLogSummaryRepoDownloadJob) + err = core.UnmarshalPrimitive(m, "scanned_file_count", &obj.ScannedFileCount) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "quarantined_file_count", &obj.QuarantinedFileCount) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "detected_filetype", &obj.DetectedFiletype) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "inputs_count", &obj.InputsCount) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "outputs_count", &obj.OutputsCount) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// JobStateData : Workspace Job state-file. +type JobStateData struct { + // Job Id. + JobID *string `json:"job_id,omitempty"` + + // Job name, uniquely derived from the related Action. + JobName *string `json:"job_name,omitempty"` + + // Job state summary. + Summary []JobStateDataSummaryItem `json:"summary,omitempty"` + + // Format of the State data (eg. tfstate). + Format *string `json:"format,omitempty"` + + // State data file. + Details *[]byte `json:"details,omitempty"` + + // Job status updation timestamp. + UpdatedAt *strfmt.DateTime `json:"updated_at,omitempty"` +} + +// UnmarshalJobStateData unmarshals an instance of JobStateData from the specified map of raw messages. +func UnmarshalJobStateData(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(JobStateData) + err = core.UnmarshalPrimitive(m, "job_id", &obj.JobID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "job_name", &obj.JobName) + if err != nil { + return + } + err = core.UnmarshalModel(m, "summary", &obj.Summary, UnmarshalJobStateDataSummaryItem) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "format", &obj.Format) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "details", &obj.Details) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "updated_at", &obj.UpdatedAt) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// JobStateDataSummaryItem : JobStateDataSummaryItem struct +type JobStateDataSummaryItem struct { + // State summary feature name. + Name *string `json:"name,omitempty"` + + // State summary feature type. + Type *string `json:"type,omitempty"` + + // State summary feature value. + Value *string `json:"value,omitempty"` +} + +// Constants associated with the JobStateDataSummaryItem.Type property. +// State summary feature type. +const ( + JobStateDataSummaryItem_Type_Number = "number" + JobStateDataSummaryItem_Type_String = "string" +) + +// UnmarshalJobStateDataSummaryItem unmarshals an instance of JobStateDataSummaryItem from the specified map of raw messages. +func UnmarshalJobStateDataSummaryItem(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(JobStateDataSummaryItem) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// JobStatus : Job Status. +type JobStatus struct { + // Action Job Status. + ActionJobStatus *JobStatusAction `json:"action_job_status,omitempty"` +} + +// UnmarshalJobStatus unmarshals an instance of JobStatus from the specified map of raw messages. +func UnmarshalJobStatus(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(JobStatus) + err = core.UnmarshalModel(m, "action_job_status", &obj.ActionJobStatus, UnmarshalJobStatusAction) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// JobStatusAction : Action Job Status. +type JobStatusAction struct { + // Action name. + ActionName *string `json:"action_name,omitempty"` + + // Status of Jobs. + StatusCode *string `json:"status_code,omitempty"` + + // Action Job status message - to be displayed along with the action_status_code. + StatusMessage *string `json:"status_message,omitempty"` + + // Status of Resources. + BastionStatusCode *string `json:"bastion_status_code,omitempty"` + + // Bastion status message - to be displayed along with the bastion_status_code;. + BastionStatusMessage *string `json:"bastion_status_message,omitempty"` + + // Status of Resources. + TargetsStatusCode *string `json:"targets_status_code,omitempty"` + + // Aggregated status message for all target resources, to be displayed along with the targets_status_code;. + TargetsStatusMessage *string `json:"targets_status_message,omitempty"` + + // Job status updation timestamp. + UpdatedAt *strfmt.DateTime `json:"updated_at,omitempty"` +} + +// Constants associated with the JobStatusAction.StatusCode property. +// Status of Jobs. +const ( + JobStatusAction_StatusCode_IobFinished = "iob_finished" + JobStatusAction_StatusCode_JobCancelled = "job_cancelled" + JobStatusAction_StatusCode_JobFailed = "job_failed" + JobStatusAction_StatusCode_JobInProgress = "job_in_progress" + JobStatusAction_StatusCode_JobPending = "job_pending" +) + +// Constants associated with the JobStatusAction.BastionStatusCode property. +// Status of Resources. +const ( + JobStatusAction_BastionStatusCode_Error = "error" + JobStatusAction_BastionStatusCode_None = "none" + JobStatusAction_BastionStatusCode_Processing = "processing" + JobStatusAction_BastionStatusCode_Ready = "ready" +) + +// Constants associated with the JobStatusAction.TargetsStatusCode property. +// Status of Resources. +const ( + JobStatusAction_TargetsStatusCode_Error = "error" + JobStatusAction_TargetsStatusCode_None = "none" + JobStatusAction_TargetsStatusCode_Processing = "processing" + JobStatusAction_TargetsStatusCode_Ready = "ready" +) + +// UnmarshalJobStatusAction unmarshals an instance of JobStatusAction from the specified map of raw messages. +func UnmarshalJobStatusAction(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(JobStatusAction) + err = core.UnmarshalPrimitive(m, "action_name", &obj.ActionName) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "status_code", &obj.StatusCode) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "status_message", &obj.StatusMessage) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "bastion_status_code", &obj.BastionStatusCode) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "bastion_status_message", &obj.BastionStatusMessage) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "targets_status_code", &obj.TargetsStatusCode) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "targets_status_message", &obj.TargetsStatusMessage) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "updated_at", &obj.UpdatedAt) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// JobStatusType : JobStatusType -. +type JobStatusType struct { + // List of failed workspace jobs. + Failed []string `json:"failed,omitempty"` + + // List of in_progress workspace jobs. + InProgress []string `json:"in_progress,omitempty"` + + // List of successful workspace jobs. + Success []string `json:"success,omitempty"` + + // Workspace job status updated at. + LastUpdatedOn *strfmt.DateTime `json:"last_updated_on,omitempty"` +} + +// UnmarshalJobStatusType unmarshals an instance of JobStatusType from the specified map of raw messages. +func UnmarshalJobStatusType(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(JobStatusType) + err = core.UnmarshalPrimitive(m, "failed", &obj.Failed) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "in_progress", &obj.InProgress) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "success", &obj.Success) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "last_updated_on", &obj.LastUpdatedOn) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// KMSDiscovery : Discovered KMS instances. +type KMSDiscovery struct { + // Total number of records. + TotalCount *int64 `json:"total_count,omitempty"` + + // Number of records returned. + Limit *int64 `json:"limit" validate:"required"` + + // Skipped number of records. + Offset *int64 `json:"offset" validate:"required"` + + // List of KMS instances. + KmsInstances []KMSInstances `json:"kms_instances,omitempty"` +} + +// UnmarshalKMSDiscovery unmarshals an instance of KMSDiscovery from the specified map of raw messages. +func UnmarshalKMSDiscovery(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(KMSDiscovery) + err = core.UnmarshalPrimitive(m, "total_count", &obj.TotalCount) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "limit", &obj.Limit) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "offset", &obj.Offset) + if err != nil { + return + } + err = core.UnmarshalModel(m, "kms_instances", &obj.KmsInstances, UnmarshalKMSInstances) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// KMSInstances : KMS Instances. +type KMSInstances struct { + // Location. + Location *string `json:"location,omitempty"` + + // Encryption schema. + EncryptionScheme *string `json:"encryption_scheme,omitempty"` + + // Resource groups. + ResourceGroup *string `json:"resource_group,omitempty"` + + // KMS CRN. + KmsCrn *string `json:"kms_crn,omitempty"` + + // KMS Name. + KmsName *string `json:"kms_name,omitempty"` + + // KMS private endpoint. + KmsPrivateEndpoint *string `json:"kms_private_endpoint,omitempty"` + + // KMS public endpoint. + KmsPublicEndpoint *string `json:"kms_public_endpoint,omitempty"` + + // List of keys. + Keys []KMSInstancesKeysItem `json:"keys,omitempty"` +} + +// UnmarshalKMSInstances unmarshals an instance of KMSInstances from the specified map of raw messages. +func UnmarshalKMSInstances(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(KMSInstances) + err = core.UnmarshalPrimitive(m, "location", &obj.Location) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "encryption_scheme", &obj.EncryptionScheme) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "resource_group", &obj.ResourceGroup) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "kms_crn", &obj.KmsCrn) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "kms_name", &obj.KmsName) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "kms_private_endpoint", &obj.KmsPrivateEndpoint) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "kms_public_endpoint", &obj.KmsPublicEndpoint) + if err != nil { + return + } + err = core.UnmarshalModel(m, "keys", &obj.Keys, UnmarshalKMSInstancesKeysItem) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// KMSInstancesKeysItem : KMSInstancesKeysItem struct +type KMSInstancesKeysItem struct { + // Key name. + Name *string `json:"name,omitempty"` + + // CRN of the Key. + Crn *string `json:"crn,omitempty"` + + // Error message. + Error *string `json:"error,omitempty"` +} + +// UnmarshalKMSInstancesKeysItem unmarshals an instance of KMSInstancesKeysItem from the specified map of raw messages. +func UnmarshalKMSInstancesKeysItem(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(KMSInstancesKeysItem) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.Crn) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "error", &obj.Error) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// KMSSettings : User defined KMS Settings details. +type KMSSettings struct { + // Location. + Location *string `json:"location,omitempty"` + + // Encryption scheme. + EncryptionScheme *string `json:"encryption_scheme,omitempty"` + + // Resource group. + ResourceGroup *string `json:"resource_group,omitempty"` + + // Primary CRK details. + PrimaryCrk *KMSSettingsPrimaryCrk `json:"primary_crk,omitempty"` + + // Secondary CRK details. + SecondaryCrk *KMSSettingsSecondaryCrk `json:"secondary_crk,omitempty"` +} + +// UnmarshalKMSSettings unmarshals an instance of KMSSettings from the specified map of raw messages. +func UnmarshalKMSSettings(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(KMSSettings) + err = core.UnmarshalPrimitive(m, "location", &obj.Location) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "encryption_scheme", &obj.EncryptionScheme) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "resource_group", &obj.ResourceGroup) + if err != nil { + return + } + err = core.UnmarshalModel(m, "primary_crk", &obj.PrimaryCrk, UnmarshalKMSSettingsPrimaryCrk) + if err != nil { + return + } + err = core.UnmarshalModel(m, "secondary_crk", &obj.SecondaryCrk, UnmarshalKMSSettingsSecondaryCrk) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// KMSSettingsPrimaryCrk : Primary CRK details. +type KMSSettingsPrimaryCrk struct { + // Primary KMS name. + KmsName *string `json:"kms_name,omitempty"` + + // Primary KMS endpoint. + KmsPrivateEndpoint *string `json:"kms_private_endpoint,omitempty"` + + // CRN of the Primary Key. + KeyCrn *string `json:"key_crn,omitempty"` +} + +// UnmarshalKMSSettingsPrimaryCrk unmarshals an instance of KMSSettingsPrimaryCrk from the specified map of raw messages. +func UnmarshalKMSSettingsPrimaryCrk(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(KMSSettingsPrimaryCrk) + err = core.UnmarshalPrimitive(m, "kms_name", &obj.KmsName) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "kms_private_endpoint", &obj.KmsPrivateEndpoint) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "key_crn", &obj.KeyCrn) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// KMSSettingsSecondaryCrk : Secondary CRK details. +type KMSSettingsSecondaryCrk struct { + // Secondary KMS name. + KmsName *string `json:"kms_name,omitempty"` + + // Secondary KMS endpoint. + KmsPrivateEndpoint *string `json:"kms_private_endpoint,omitempty"` + + // CRN of the Secondary Key. + KeyCrn *string `json:"key_crn,omitempty"` +} + +// UnmarshalKMSSettingsSecondaryCrk unmarshals an instance of KMSSettingsSecondaryCrk from the specified map of raw messages. +func UnmarshalKMSSettingsSecondaryCrk(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(KMSSettingsSecondaryCrk) + err = core.UnmarshalPrimitive(m, "kms_name", &obj.KmsName) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "kms_private_endpoint", &obj.KmsPrivateEndpoint) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "key_crn", &obj.KeyCrn) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ListActionsOptions : The ListActions options. +type ListActionsOptions struct { + // The number of items to skip before starting to collect the result set. + Offset *int64 `json:"offset,omitempty"` + + // The numbers of items to return. + Limit *int64 `json:"limit,omitempty"` + + // Name of the field to sort-by; Use the '.' character to delineate sub-resources and sub-fields (eg. + // owner.last_name). Prepend the field with '+' or '-', indicating 'ascending' or 'descending' (default is ascending) + // Ignore unrecognized or unsupported sort field. + Sort *string `json:"sort,omitempty"` + + // Level of details returned by the get method. + Profile *string `json:"profile,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the ListActionsOptions.Profile property. +// Level of details returned by the get method. +const ( + ListActionsOptions_Profile_Ids = "ids" + ListActionsOptions_Profile_Summary = "summary" +) + +// NewListActionsOptions : Instantiate ListActionsOptions +func (*SchematicsV1) NewListActionsOptions() *ListActionsOptions { + return &ListActionsOptions{} +} + +// SetOffset : Allow user to set Offset +func (options *ListActionsOptions) SetOffset(offset int64) *ListActionsOptions { + options.Offset = core.Int64Ptr(offset) + return options +} + +// SetLimit : Allow user to set Limit +func (options *ListActionsOptions) SetLimit(limit int64) *ListActionsOptions { + options.Limit = core.Int64Ptr(limit) + return options +} + +// SetSort : Allow user to set Sort +func (options *ListActionsOptions) SetSort(sort string) *ListActionsOptions { + options.Sort = core.StringPtr(sort) + return options +} + +// SetProfile : Allow user to set Profile +func (options *ListActionsOptions) SetProfile(profile string) *ListActionsOptions { + options.Profile = core.StringPtr(profile) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListActionsOptions) SetHeaders(param map[string]string) *ListActionsOptions { + options.Headers = param + return options +} + +// ListJobLogsOptions : The ListJobLogs options. +type ListJobLogsOptions struct { + // Job Id. Use GET /jobs API to look up the Job Ids in your IBM Cloud account. + JobID *string `json:"job_id" validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListJobLogsOptions : Instantiate ListJobLogsOptions +func (*SchematicsV1) NewListJobLogsOptions(jobID string) *ListJobLogsOptions { + return &ListJobLogsOptions{ + JobID: core.StringPtr(jobID), + } +} + +// SetJobID : Allow user to set JobID +func (options *ListJobLogsOptions) SetJobID(jobID string) *ListJobLogsOptions { + options.JobID = core.StringPtr(jobID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListJobLogsOptions) SetHeaders(param map[string]string) *ListJobLogsOptions { + options.Headers = param + return options +} + +// ListJobStatesOptions : The ListJobStates options. +type ListJobStatesOptions struct { + // Job Id. Use GET /jobs API to look up the Job Ids in your IBM Cloud account. + JobID *string `json:"job_id" validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListJobStatesOptions : Instantiate ListJobStatesOptions +func (*SchematicsV1) NewListJobStatesOptions(jobID string) *ListJobStatesOptions { + return &ListJobStatesOptions{ + JobID: core.StringPtr(jobID), + } +} + +// SetJobID : Allow user to set JobID +func (options *ListJobStatesOptions) SetJobID(jobID string) *ListJobStatesOptions { + options.JobID = core.StringPtr(jobID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListJobStatesOptions) SetHeaders(param map[string]string) *ListJobStatesOptions { + options.Headers = param + return options +} + +// ListJobsOptions : The ListJobs options. +type ListJobsOptions struct { + // The number of items to skip before starting to collect the result set. + Offset *int64 `json:"offset,omitempty"` + + // The numbers of items to return. + Limit *int64 `json:"limit,omitempty"` + + // Name of the field to sort-by; Use the '.' character to delineate sub-resources and sub-fields (eg. + // owner.last_name). Prepend the field with '+' or '-', indicating 'ascending' or 'descending' (default is ascending) + // Ignore unrecognized or unsupported sort field. + Sort *string `json:"sort,omitempty"` + + // Level of details returned by the get method. + Profile *string `json:"profile,omitempty"` + + // Name of the resource (workspace, actions or controls). + Resource *string `json:"resource,omitempty"` + + // Action Id. + ActionID *string `json:"action_id,omitempty"` + + // list jobs. + List *string `json:"list,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the ListJobsOptions.Profile property. +// Level of details returned by the get method. +const ( + ListJobsOptions_Profile_Ids = "ids" + ListJobsOptions_Profile_Summary = "summary" +) + +// Constants associated with the ListJobsOptions.Resource property. +// Name of the resource (workspace, actions or controls). +const ( + ListJobsOptions_Resource_Actions = "actions" + ListJobsOptions_Resource_Controls = "controls" + ListJobsOptions_Resource_Workspaces = "workspaces" +) + +// Constants associated with the ListJobsOptions.List property. +// list jobs. +const ( + ListJobsOptions_List_All = "all" +) + +// NewListJobsOptions : Instantiate ListJobsOptions +func (*SchematicsV1) NewListJobsOptions() *ListJobsOptions { + return &ListJobsOptions{} +} + +// SetOffset : Allow user to set Offset +func (options *ListJobsOptions) SetOffset(offset int64) *ListJobsOptions { + options.Offset = core.Int64Ptr(offset) + return options +} + +// SetLimit : Allow user to set Limit +func (options *ListJobsOptions) SetLimit(limit int64) *ListJobsOptions { + options.Limit = core.Int64Ptr(limit) + return options +} + +// SetSort : Allow user to set Sort +func (options *ListJobsOptions) SetSort(sort string) *ListJobsOptions { + options.Sort = core.StringPtr(sort) + return options +} + +// SetProfile : Allow user to set Profile +func (options *ListJobsOptions) SetProfile(profile string) *ListJobsOptions { + options.Profile = core.StringPtr(profile) + return options +} + +// SetResource : Allow user to set Resource +func (options *ListJobsOptions) SetResource(resource string) *ListJobsOptions { + options.Resource = core.StringPtr(resource) + return options +} + +// SetActionID : Allow user to set ActionID +func (options *ListJobsOptions) SetActionID(actionID string) *ListJobsOptions { + options.ActionID = core.StringPtr(actionID) + return options +} + +// SetList : Allow user to set List +func (options *ListJobsOptions) SetList(list string) *ListJobsOptions { + options.List = core.StringPtr(list) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListJobsOptions) SetHeaders(param map[string]string) *ListJobsOptions { + options.Headers = param + return options +} + +// ListResourceGroupOptions : The ListResourceGroup options. +type ListResourceGroupOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListResourceGroupOptions : Instantiate ListResourceGroupOptions +func (*SchematicsV1) NewListResourceGroupOptions() *ListResourceGroupOptions { + return &ListResourceGroupOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *ListResourceGroupOptions) SetHeaders(param map[string]string) *ListResourceGroupOptions { + options.Headers = param + return options +} + +// ListSchematicsLocationOptions : The ListSchematicsLocation options. +type ListSchematicsLocationOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListSchematicsLocationOptions : Instantiate ListSchematicsLocationOptions +func (*SchematicsV1) NewListSchematicsLocationOptions() *ListSchematicsLocationOptions { + return &ListSchematicsLocationOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *ListSchematicsLocationOptions) SetHeaders(param map[string]string) *ListSchematicsLocationOptions { + options.Headers = param + return options +} + +// ListSharedDatasetsOptions : The ListSharedDatasets options. +type ListSharedDatasetsOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListSharedDatasetsOptions : Instantiate ListSharedDatasetsOptions +func (*SchematicsV1) NewListSharedDatasetsOptions() *ListSharedDatasetsOptions { + return &ListSharedDatasetsOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *ListSharedDatasetsOptions) SetHeaders(param map[string]string) *ListSharedDatasetsOptions { + options.Headers = param + return options +} + +// ListWorkspaceActivitiesOptions : The ListWorkspaceActivities options. +type ListWorkspaceActivitiesOptions struct { + // The workspace ID for the workspace that you want to query. You can run the GET /workspaces call if you need to look + // up the workspace IDs in your IBM Cloud account. + WID *string `json:"w_id" validate:"required,ne="` + + // The number of items to skip before starting to collect the result set. + Offset *int64 `json:"offset,omitempty"` + + // The numbers of items to return. + Limit *int64 `json:"limit,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListWorkspaceActivitiesOptions : Instantiate ListWorkspaceActivitiesOptions +func (*SchematicsV1) NewListWorkspaceActivitiesOptions(wID string) *ListWorkspaceActivitiesOptions { + return &ListWorkspaceActivitiesOptions{ + WID: core.StringPtr(wID), + } +} + +// SetWID : Allow user to set WID +func (options *ListWorkspaceActivitiesOptions) SetWID(wID string) *ListWorkspaceActivitiesOptions { + options.WID = core.StringPtr(wID) + return options +} + +// SetOffset : Allow user to set Offset +func (options *ListWorkspaceActivitiesOptions) SetOffset(offset int64) *ListWorkspaceActivitiesOptions { + options.Offset = core.Int64Ptr(offset) + return options +} + +// SetLimit : Allow user to set Limit +func (options *ListWorkspaceActivitiesOptions) SetLimit(limit int64) *ListWorkspaceActivitiesOptions { + options.Limit = core.Int64Ptr(limit) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListWorkspaceActivitiesOptions) SetHeaders(param map[string]string) *ListWorkspaceActivitiesOptions { + options.Headers = param + return options +} + +// ListWorkspacesOptions : The ListWorkspaces options. +type ListWorkspacesOptions struct { + // The number of items to skip before starting to collect the result set. + Offset *int64 `json:"offset,omitempty"` + + // The numbers of items to return. + Limit *int64 `json:"limit,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListWorkspacesOptions : Instantiate ListWorkspacesOptions +func (*SchematicsV1) NewListWorkspacesOptions() *ListWorkspacesOptions { + return &ListWorkspacesOptions{} +} + +// SetOffset : Allow user to set Offset +func (options *ListWorkspacesOptions) SetOffset(offset int64) *ListWorkspacesOptions { + options.Offset = core.Int64Ptr(offset) + return options +} + +// SetLimit : Allow user to set Limit +func (options *ListWorkspacesOptions) SetLimit(limit int64) *ListWorkspacesOptions { + options.Limit = core.Int64Ptr(limit) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListWorkspacesOptions) SetHeaders(param map[string]string) *ListWorkspacesOptions { + options.Headers = param + return options +} + +// LogStoreResponse : LogStoreResponse -. +type LogStoreResponse struct { + // Engine name. + EngineName *string `json:"engine_name,omitempty"` + + // Engine version. + EngineVersion *string `json:"engine_version,omitempty"` + + // Engine id. + ID *string `json:"id,omitempty"` + + // Log store url. + LogStoreURL *string `json:"log_store_url,omitempty"` +} + +// UnmarshalLogStoreResponse unmarshals an instance of LogStoreResponse from the specified map of raw messages. +func UnmarshalLogStoreResponse(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LogStoreResponse) + err = core.UnmarshalPrimitive(m, "engine_name", &obj.EngineName) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "engine_version", &obj.EngineVersion) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "log_store_url", &obj.LogStoreURL) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LogStoreResponseList : LogStoreResponseList -. +type LogStoreResponseList struct { + // Runtime data. + RuntimeData []LogStoreResponse `json:"runtime_data,omitempty"` +} + +// UnmarshalLogStoreResponseList unmarshals an instance of LogStoreResponseList from the specified map of raw messages. +func UnmarshalLogStoreResponseList(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LogStoreResponseList) + err = core.UnmarshalModel(m, "runtime_data", &obj.RuntimeData, UnmarshalLogStoreResponse) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LogSummary : LogSummary ... +type LogSummary struct { + // WorkspaceActivityStatus activity status type. + ActivityStatus *string `json:"activity_status,omitempty"` + + // Template detected type. + DetectedTemplateType *string `json:"detected_template_type,omitempty"` + + // Numner of discarded files. + DiscardedFiles *int64 `json:"discarded_files,omitempty"` + + // Numner of errors in log. + Error *string `json:"error,omitempty"` + + // Numner of resources added. + ResourcesAdded *int64 `json:"resources_added,omitempty"` + + // Numner of resources destroyed. + ResourcesDestroyed *int64 `json:"resources_destroyed,omitempty"` + + // Numner of resources modified. + ResourcesModified *int64 `json:"resources_modified,omitempty"` + + // Numner of filed scanned. + ScannedFiles *int64 `json:"scanned_files,omitempty"` + + // Numner of template variables. + TemplateVariableCount *int64 `json:"template_variable_count,omitempty"` + + // Time takemn to perform activity. + TimeTaken *float64 `json:"time_taken,omitempty"` +} + +// UnmarshalLogSummary unmarshals an instance of LogSummary from the specified map of raw messages. +func UnmarshalLogSummary(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LogSummary) + err = core.UnmarshalPrimitive(m, "activity_status", &obj.ActivityStatus) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "detected_template_type", &obj.DetectedTemplateType) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "discarded_files", &obj.DiscardedFiles) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "error", &obj.Error) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "resources_added", &obj.ResourcesAdded) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "resources_destroyed", &obj.ResourcesDestroyed) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "resources_modified", &obj.ResourcesModified) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "scanned_files", &obj.ScannedFiles) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "template_variable_count", &obj.TemplateVariableCount) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "time_taken", &obj.TimeTaken) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// OutputValuesItem : OutputValuesItem struct +type OutputValuesItem struct { + // Output variable name. + Folder *string `json:"folder,omitempty"` + + // Output variable id. + ID *string `json:"id,omitempty"` + + // List of Output values. + OutputValues []interface{} `json:"output_values,omitempty"` + + // Output variable type. + ValueType *string `json:"value_type,omitempty"` +} + +// UnmarshalOutputValuesItem unmarshals an instance of OutputValuesItem from the specified map of raw messages. +func UnmarshalOutputValuesItem(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(OutputValuesItem) + err = core.UnmarshalPrimitive(m, "folder", &obj.Folder) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "output_values", &obj.OutputValues) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value_type", &obj.ValueType) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// PlanWorkspaceCommandOptions : The PlanWorkspaceCommand options. +type PlanWorkspaceCommandOptions struct { + // The workspace ID for the workspace that you want to query. You can run the GET /workspaces call if you need to look + // up the workspace IDs in your IBM Cloud account. + WID *string `json:"w_id" validate:"required,ne="` + + // The IAM refresh token associated with the IBM Cloud account. + RefreshToken *string `json:"refresh_token" validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewPlanWorkspaceCommandOptions : Instantiate PlanWorkspaceCommandOptions +func (*SchematicsV1) NewPlanWorkspaceCommandOptions(wID string, refreshToken string) *PlanWorkspaceCommandOptions { + return &PlanWorkspaceCommandOptions{ + WID: core.StringPtr(wID), + RefreshToken: core.StringPtr(refreshToken), + } +} + +// SetWID : Allow user to set WID +func (options *PlanWorkspaceCommandOptions) SetWID(wID string) *PlanWorkspaceCommandOptions { + options.WID = core.StringPtr(wID) + return options +} + +// SetRefreshToken : Allow user to set RefreshToken +func (options *PlanWorkspaceCommandOptions) SetRefreshToken(refreshToken string) *PlanWorkspaceCommandOptions { + options.RefreshToken = core.StringPtr(refreshToken) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *PlanWorkspaceCommandOptions) SetHeaders(param map[string]string) *PlanWorkspaceCommandOptions { + options.Headers = param + return options +} + +// RefreshWorkspaceCommandOptions : The RefreshWorkspaceCommand options. +type RefreshWorkspaceCommandOptions struct { + // The workspace ID for the workspace that you want to query. You can run the GET /workspaces call if you need to look + // up the workspace IDs in your IBM Cloud account. + WID *string `json:"w_id" validate:"required,ne="` + + // The IAM refresh token associated with the IBM Cloud account. + RefreshToken *string `json:"refresh_token" validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewRefreshWorkspaceCommandOptions : Instantiate RefreshWorkspaceCommandOptions +func (*SchematicsV1) NewRefreshWorkspaceCommandOptions(wID string, refreshToken string) *RefreshWorkspaceCommandOptions { + return &RefreshWorkspaceCommandOptions{ + WID: core.StringPtr(wID), + RefreshToken: core.StringPtr(refreshToken), + } +} + +// SetWID : Allow user to set WID +func (options *RefreshWorkspaceCommandOptions) SetWID(wID string) *RefreshWorkspaceCommandOptions { + options.WID = core.StringPtr(wID) + return options +} + +// SetRefreshToken : Allow user to set RefreshToken +func (options *RefreshWorkspaceCommandOptions) SetRefreshToken(refreshToken string) *RefreshWorkspaceCommandOptions { + options.RefreshToken = core.StringPtr(refreshToken) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *RefreshWorkspaceCommandOptions) SetHeaders(param map[string]string) *RefreshWorkspaceCommandOptions { + options.Headers = param + return options +} + +// ReplaceJobOptions : The ReplaceJob options. +type ReplaceJobOptions struct { + // Job Id. Use GET /jobs API to look up the Job Ids in your IBM Cloud account. + JobID *string `json:"job_id" validate:"required,ne="` + + // The IAM refresh token associated with the IBM Cloud account. + RefreshToken *string `json:"refresh_token" validate:"required"` + + // Name of the Schematics automation resource. + CommandObject *string `json:"command_object,omitempty"` + + // Job command object id (workspace-id, action-id or control-id). + CommandObjectID *string `json:"command_object_id,omitempty"` + + // Schematics job command name. + CommandName *string `json:"command_name,omitempty"` + + // Schematics job command parameter (playbook-name, capsule-name or flow-name). + CommandParameter *string `json:"command_parameter,omitempty"` + + // Command line options for the command. + CommandOptions []string `json:"command_options,omitempty"` + + // Job inputs used by Action. + Inputs []VariableData `json:"inputs,omitempty"` + + // Environment variables used by the Job while performing Action. + Settings []VariableData `json:"settings,omitempty"` + + // User defined tags, while running the job. + Tags []string `json:"tags,omitempty"` + + // List of workspace locations supported by IBM Cloud Schematics service. Note, this does not limit the location of + // the resources provisioned using Schematics. + Location *string `json:"location,omitempty"` + + // Job Status. + Status *JobStatus `json:"status,omitempty"` + + // Job data. + Data *JobData `json:"data,omitempty"` + + // Complete Target details with user inputs and system generated data. + Bastion *TargetResourceset `json:"bastion,omitempty"` + + // Job log summary record. + LogSummary *JobLogSummary `json:"log_summary,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the ReplaceJobOptions.CommandObject property. +// Name of the Schematics automation resource. +const ( + ReplaceJobOptions_CommandObject_Action = "action" + ReplaceJobOptions_CommandObject_Workspace = "workspace" +) + +// Constants associated with the ReplaceJobOptions.CommandName property. +// Schematics job command name. +const ( + ReplaceJobOptions_CommandName_AnsiblePlaybookCheck = "ansible_playbook_check" + ReplaceJobOptions_CommandName_AnsiblePlaybookRun = "ansible_playbook_run" + ReplaceJobOptions_CommandName_HelmInstall = "helm_install" + ReplaceJobOptions_CommandName_HelmList = "helm_list" + ReplaceJobOptions_CommandName_HelmShow = "helm_show" + ReplaceJobOptions_CommandName_OpaEvaluate = "opa_evaluate" + ReplaceJobOptions_CommandName_TerraformInit = "terraform_init" + ReplaceJobOptions_CommandName_TerrformApply = "terrform_apply" + ReplaceJobOptions_CommandName_TerrformDestroy = "terrform_destroy" + ReplaceJobOptions_CommandName_TerrformPlan = "terrform_plan" + ReplaceJobOptions_CommandName_TerrformRefresh = "terrform_refresh" + ReplaceJobOptions_CommandName_TerrformShow = "terrform_show" + ReplaceJobOptions_CommandName_TerrformTaint = "terrform_taint" + ReplaceJobOptions_CommandName_WorkspaceApplyFlow = "workspace_apply_flow" + ReplaceJobOptions_CommandName_WorkspaceCustomFlow = "workspace_custom_flow" + ReplaceJobOptions_CommandName_WorkspaceDestroyFlow = "workspace_destroy_flow" + ReplaceJobOptions_CommandName_WorkspaceInitFlow = "workspace_init_flow" + ReplaceJobOptions_CommandName_WorkspacePlanFlow = "workspace_plan_flow" + ReplaceJobOptions_CommandName_WorkspaceRefreshFlow = "workspace_refresh_flow" + ReplaceJobOptions_CommandName_WorkspaceShowFlow = "workspace_show_flow" +) + +// Constants associated with the ReplaceJobOptions.Location property. +// List of workspace locations supported by IBM Cloud Schematics service. Note, this does not limit the location of the +// resources provisioned using Schematics. +const ( + ReplaceJobOptions_Location_EuDe = "eu_de" + ReplaceJobOptions_Location_EuGb = "eu_gb" + ReplaceJobOptions_Location_UsEast = "us_east" + ReplaceJobOptions_Location_UsSouth = "us_south" +) + +// NewReplaceJobOptions : Instantiate ReplaceJobOptions +func (*SchematicsV1) NewReplaceJobOptions(jobID string, refreshToken string) *ReplaceJobOptions { + return &ReplaceJobOptions{ + JobID: core.StringPtr(jobID), + RefreshToken: core.StringPtr(refreshToken), + } +} + +// SetJobID : Allow user to set JobID +func (options *ReplaceJobOptions) SetJobID(jobID string) *ReplaceJobOptions { + options.JobID = core.StringPtr(jobID) + return options +} + +// SetRefreshToken : Allow user to set RefreshToken +func (options *ReplaceJobOptions) SetRefreshToken(refreshToken string) *ReplaceJobOptions { + options.RefreshToken = core.StringPtr(refreshToken) + return options +} + +// SetCommandObject : Allow user to set CommandObject +func (options *ReplaceJobOptions) SetCommandObject(commandObject string) *ReplaceJobOptions { + options.CommandObject = core.StringPtr(commandObject) + return options +} + +// SetCommandObjectID : Allow user to set CommandObjectID +func (options *ReplaceJobOptions) SetCommandObjectID(commandObjectID string) *ReplaceJobOptions { + options.CommandObjectID = core.StringPtr(commandObjectID) + return options +} + +// SetCommandName : Allow user to set CommandName +func (options *ReplaceJobOptions) SetCommandName(commandName string) *ReplaceJobOptions { + options.CommandName = core.StringPtr(commandName) + return options +} + +// SetCommandParameter : Allow user to set CommandParameter +func (options *ReplaceJobOptions) SetCommandParameter(commandParameter string) *ReplaceJobOptions { + options.CommandParameter = core.StringPtr(commandParameter) + return options +} + +// SetCommandOptions : Allow user to set CommandOptions +func (options *ReplaceJobOptions) SetCommandOptions(commandOptions []string) *ReplaceJobOptions { + options.CommandOptions = commandOptions + return options +} + +// SetInputs : Allow user to set Inputs +func (options *ReplaceJobOptions) SetInputs(inputs []VariableData) *ReplaceJobOptions { + options.Inputs = inputs + return options +} + +// SetSettings : Allow user to set Settings +func (options *ReplaceJobOptions) SetSettings(settings []VariableData) *ReplaceJobOptions { + options.Settings = settings + return options +} + +// SetTags : Allow user to set Tags +func (options *ReplaceJobOptions) SetTags(tags []string) *ReplaceJobOptions { + options.Tags = tags + return options +} + +// SetLocation : Allow user to set Location +func (options *ReplaceJobOptions) SetLocation(location string) *ReplaceJobOptions { + options.Location = core.StringPtr(location) + return options +} + +// SetStatus : Allow user to set Status +func (options *ReplaceJobOptions) SetStatus(status *JobStatus) *ReplaceJobOptions { + options.Status = status + return options +} + +// SetData : Allow user to set Data +func (options *ReplaceJobOptions) SetData(data *JobData) *ReplaceJobOptions { + options.Data = data + return options +} + +// SetBastion : Allow user to set Bastion +func (options *ReplaceJobOptions) SetBastion(bastion *TargetResourceset) *ReplaceJobOptions { + options.Bastion = bastion + return options +} + +// SetLogSummary : Allow user to set LogSummary +func (options *ReplaceJobOptions) SetLogSummary(logSummary *JobLogSummary) *ReplaceJobOptions { + options.LogSummary = logSummary + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ReplaceJobOptions) SetHeaders(param map[string]string) *ReplaceJobOptions { + options.Headers = param + return options +} + +// ReplaceKmsSettingsOptions : The ReplaceKmsSettings options. +type ReplaceKmsSettingsOptions struct { + // Location. + Location *string `json:"location,omitempty"` + + // Encryption scheme. + EncryptionScheme *string `json:"encryption_scheme,omitempty"` + + // Resource group. + ResourceGroup *string `json:"resource_group,omitempty"` + + // Primary CRK details. + PrimaryCrk *KMSSettingsPrimaryCrk `json:"primary_crk,omitempty"` + + // Secondary CRK details. + SecondaryCrk *KMSSettingsSecondaryCrk `json:"secondary_crk,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewReplaceKmsSettingsOptions : Instantiate ReplaceKmsSettingsOptions +func (*SchematicsV1) NewReplaceKmsSettingsOptions() *ReplaceKmsSettingsOptions { + return &ReplaceKmsSettingsOptions{} +} + +// SetLocation : Allow user to set Location +func (options *ReplaceKmsSettingsOptions) SetLocation(location string) *ReplaceKmsSettingsOptions { + options.Location = core.StringPtr(location) + return options +} + +// SetEncryptionScheme : Allow user to set EncryptionScheme +func (options *ReplaceKmsSettingsOptions) SetEncryptionScheme(encryptionScheme string) *ReplaceKmsSettingsOptions { + options.EncryptionScheme = core.StringPtr(encryptionScheme) + return options +} + +// SetResourceGroup : Allow user to set ResourceGroup +func (options *ReplaceKmsSettingsOptions) SetResourceGroup(resourceGroup string) *ReplaceKmsSettingsOptions { + options.ResourceGroup = core.StringPtr(resourceGroup) + return options +} + +// SetPrimaryCrk : Allow user to set PrimaryCrk +func (options *ReplaceKmsSettingsOptions) SetPrimaryCrk(primaryCrk *KMSSettingsPrimaryCrk) *ReplaceKmsSettingsOptions { + options.PrimaryCrk = primaryCrk + return options +} + +// SetSecondaryCrk : Allow user to set SecondaryCrk +func (options *ReplaceKmsSettingsOptions) SetSecondaryCrk(secondaryCrk *KMSSettingsSecondaryCrk) *ReplaceKmsSettingsOptions { + options.SecondaryCrk = secondaryCrk + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ReplaceKmsSettingsOptions) SetHeaders(param map[string]string) *ReplaceKmsSettingsOptions { + options.Headers = param + return options +} + +// ReplaceSharedDatasetOptions : The ReplaceSharedDataset options. +type ReplaceSharedDatasetOptions struct { + // The shared dataset ID Use the GET /shared_datasets to look up the shared dataset IDs in your IBM Cloud account. + SdID *string `json:"sd_id" validate:"required,ne="` + + // Automatically propagate changes to consumers. + AutoPropagateChange *bool `json:"auto_propagate_change,omitempty"` + + // Dataset description. + Description *string `json:"description,omitempty"` + + // Affected workspaces. + EffectedWorkspaceIds []string `json:"effected_workspace_ids,omitempty"` + + // Resource group name. + ResourceGroup *string `json:"resource_group,omitempty"` + + // Shared dataset data. + SharedDatasetData []SharedDatasetData `json:"shared_dataset_data,omitempty"` + + // Shared dataset name. + SharedDatasetName *string `json:"shared_dataset_name,omitempty"` + + // Shared dataset source name. + SharedDatasetSourceName *string `json:"shared_dataset_source_name,omitempty"` + + // Shared dataset type. + SharedDatasetType []string `json:"shared_dataset_type,omitempty"` + + // Shared dataset tags. + Tags []string `json:"tags,omitempty"` + + // Shared dataset version. + Version *string `json:"version,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewReplaceSharedDatasetOptions : Instantiate ReplaceSharedDatasetOptions +func (*SchematicsV1) NewReplaceSharedDatasetOptions(sdID string) *ReplaceSharedDatasetOptions { + return &ReplaceSharedDatasetOptions{ + SdID: core.StringPtr(sdID), + } +} + +// SetSdID : Allow user to set SdID +func (options *ReplaceSharedDatasetOptions) SetSdID(sdID string) *ReplaceSharedDatasetOptions { + options.SdID = core.StringPtr(sdID) + return options +} + +// SetAutoPropagateChange : Allow user to set AutoPropagateChange +func (options *ReplaceSharedDatasetOptions) SetAutoPropagateChange(autoPropagateChange bool) *ReplaceSharedDatasetOptions { + options.AutoPropagateChange = core.BoolPtr(autoPropagateChange) + return options +} + +// SetDescription : Allow user to set Description +func (options *ReplaceSharedDatasetOptions) SetDescription(description string) *ReplaceSharedDatasetOptions { + options.Description = core.StringPtr(description) + return options +} + +// SetEffectedWorkspaceIds : Allow user to set EffectedWorkspaceIds +func (options *ReplaceSharedDatasetOptions) SetEffectedWorkspaceIds(effectedWorkspaceIds []string) *ReplaceSharedDatasetOptions { + options.EffectedWorkspaceIds = effectedWorkspaceIds + return options +} + +// SetResourceGroup : Allow user to set ResourceGroup +func (options *ReplaceSharedDatasetOptions) SetResourceGroup(resourceGroup string) *ReplaceSharedDatasetOptions { + options.ResourceGroup = core.StringPtr(resourceGroup) + return options +} + +// SetSharedDatasetData : Allow user to set SharedDatasetData +func (options *ReplaceSharedDatasetOptions) SetSharedDatasetData(sharedDatasetData []SharedDatasetData) *ReplaceSharedDatasetOptions { + options.SharedDatasetData = sharedDatasetData + return options +} + +// SetSharedDatasetName : Allow user to set SharedDatasetName +func (options *ReplaceSharedDatasetOptions) SetSharedDatasetName(sharedDatasetName string) *ReplaceSharedDatasetOptions { + options.SharedDatasetName = core.StringPtr(sharedDatasetName) + return options +} + +// SetSharedDatasetSourceName : Allow user to set SharedDatasetSourceName +func (options *ReplaceSharedDatasetOptions) SetSharedDatasetSourceName(sharedDatasetSourceName string) *ReplaceSharedDatasetOptions { + options.SharedDatasetSourceName = core.StringPtr(sharedDatasetSourceName) + return options +} + +// SetSharedDatasetType : Allow user to set SharedDatasetType +func (options *ReplaceSharedDatasetOptions) SetSharedDatasetType(sharedDatasetType []string) *ReplaceSharedDatasetOptions { + options.SharedDatasetType = sharedDatasetType + return options +} + +// SetTags : Allow user to set Tags +func (options *ReplaceSharedDatasetOptions) SetTags(tags []string) *ReplaceSharedDatasetOptions { + options.Tags = tags + return options +} + +// SetVersion : Allow user to set Version +func (options *ReplaceSharedDatasetOptions) SetVersion(version string) *ReplaceSharedDatasetOptions { + options.Version = core.StringPtr(version) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ReplaceSharedDatasetOptions) SetHeaders(param map[string]string) *ReplaceSharedDatasetOptions { + options.Headers = param + return options +} + +// ReplaceWorkspaceInputsOptions : The ReplaceWorkspaceInputs options. +type ReplaceWorkspaceInputsOptions struct { + // The workspace ID for the workspace that you want to query. You can run the GET /workspaces call if you need to look + // up the workspace IDs in your IBM Cloud account. + WID *string `json:"w_id" validate:"required,ne="` + + // The Template ID for which you want to get the values. Use the GET /workspaces to look up the workspace IDs or + // template IDs in your IBM Cloud account. + TID *string `json:"t_id" validate:"required,ne="` + + // EnvVariableRequest .. + EnvValues []interface{} `json:"env_values,omitempty"` + + // User values. + Values *string `json:"values,omitempty"` + + // VariablesRequest -. + Variablestore []WorkspaceVariableRequest `json:"variablestore,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewReplaceWorkspaceInputsOptions : Instantiate ReplaceWorkspaceInputsOptions +func (*SchematicsV1) NewReplaceWorkspaceInputsOptions(wID string, tID string) *ReplaceWorkspaceInputsOptions { + return &ReplaceWorkspaceInputsOptions{ + WID: core.StringPtr(wID), + TID: core.StringPtr(tID), + } +} + +// SetWID : Allow user to set WID +func (options *ReplaceWorkspaceInputsOptions) SetWID(wID string) *ReplaceWorkspaceInputsOptions { + options.WID = core.StringPtr(wID) + return options +} + +// SetTID : Allow user to set TID +func (options *ReplaceWorkspaceInputsOptions) SetTID(tID string) *ReplaceWorkspaceInputsOptions { + options.TID = core.StringPtr(tID) + return options +} + +// SetEnvValues : Allow user to set EnvValues +func (options *ReplaceWorkspaceInputsOptions) SetEnvValues(envValues []interface{}) *ReplaceWorkspaceInputsOptions { + options.EnvValues = envValues + return options +} + +// SetValues : Allow user to set Values +func (options *ReplaceWorkspaceInputsOptions) SetValues(values string) *ReplaceWorkspaceInputsOptions { + options.Values = core.StringPtr(values) + return options +} + +// SetVariablestore : Allow user to set Variablestore +func (options *ReplaceWorkspaceInputsOptions) SetVariablestore(variablestore []WorkspaceVariableRequest) *ReplaceWorkspaceInputsOptions { + options.Variablestore = variablestore + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ReplaceWorkspaceInputsOptions) SetHeaders(param map[string]string) *ReplaceWorkspaceInputsOptions { + options.Headers = param + return options +} + +// ReplaceWorkspaceOptions : The ReplaceWorkspace options. +type ReplaceWorkspaceOptions struct { + // The workspace ID for the workspace that you want to query. You can run the GET /workspaces call if you need to look + // up the workspace IDs in your IBM Cloud account. + WID *string `json:"w_id" validate:"required,ne="` + + // CatalogRef -. + CatalogRef *CatalogRef `json:"catalog_ref,omitempty"` + + // Workspace description. + Description *string `json:"description,omitempty"` + + // Workspace name. + Name *string `json:"name,omitempty"` + + // SharedTargetData -. + SharedData *SharedTargetData `json:"shared_data,omitempty"` + + // Tags -. + Tags []string `json:"tags,omitempty"` + + // TemplateData -. + TemplateData []TemplateSourceDataRequest `json:"template_data,omitempty"` + + // TemplateRepoUpdateRequest -. + TemplateRepo *TemplateRepoUpdateRequest `json:"template_repo,omitempty"` + + // List of Workspace type. + Type []string `json:"type,omitempty"` + + // WorkspaceStatusUpdateRequest -. + WorkspaceStatus *WorkspaceStatusUpdateRequest `json:"workspace_status,omitempty"` + + // WorkspaceStatusMessage -. + WorkspaceStatusMsg *WorkspaceStatusMessage `json:"workspace_status_msg,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewReplaceWorkspaceOptions : Instantiate ReplaceWorkspaceOptions +func (*SchematicsV1) NewReplaceWorkspaceOptions(wID string) *ReplaceWorkspaceOptions { + return &ReplaceWorkspaceOptions{ + WID: core.StringPtr(wID), + } +} + +// SetWID : Allow user to set WID +func (options *ReplaceWorkspaceOptions) SetWID(wID string) *ReplaceWorkspaceOptions { + options.WID = core.StringPtr(wID) + return options +} + +// SetCatalogRef : Allow user to set CatalogRef +func (options *ReplaceWorkspaceOptions) SetCatalogRef(catalogRef *CatalogRef) *ReplaceWorkspaceOptions { + options.CatalogRef = catalogRef + return options +} + +// SetDescription : Allow user to set Description +func (options *ReplaceWorkspaceOptions) SetDescription(description string) *ReplaceWorkspaceOptions { + options.Description = core.StringPtr(description) + return options +} + +// SetName : Allow user to set Name +func (options *ReplaceWorkspaceOptions) SetName(name string) *ReplaceWorkspaceOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetSharedData : Allow user to set SharedData +func (options *ReplaceWorkspaceOptions) SetSharedData(sharedData *SharedTargetData) *ReplaceWorkspaceOptions { + options.SharedData = sharedData + return options +} + +// SetTags : Allow user to set Tags +func (options *ReplaceWorkspaceOptions) SetTags(tags []string) *ReplaceWorkspaceOptions { + options.Tags = tags + return options +} + +// SetTemplateData : Allow user to set TemplateData +func (options *ReplaceWorkspaceOptions) SetTemplateData(templateData []TemplateSourceDataRequest) *ReplaceWorkspaceOptions { + options.TemplateData = templateData + return options +} + +// SetTemplateRepo : Allow user to set TemplateRepo +func (options *ReplaceWorkspaceOptions) SetTemplateRepo(templateRepo *TemplateRepoUpdateRequest) *ReplaceWorkspaceOptions { + options.TemplateRepo = templateRepo + return options +} + +// SetType : Allow user to set Type +func (options *ReplaceWorkspaceOptions) SetType(typeVar []string) *ReplaceWorkspaceOptions { + options.Type = typeVar + return options +} + +// SetWorkspaceStatus : Allow user to set WorkspaceStatus +func (options *ReplaceWorkspaceOptions) SetWorkspaceStatus(workspaceStatus *WorkspaceStatusUpdateRequest) *ReplaceWorkspaceOptions { + options.WorkspaceStatus = workspaceStatus + return options +} + +// SetWorkspaceStatusMsg : Allow user to set WorkspaceStatusMsg +func (options *ReplaceWorkspaceOptions) SetWorkspaceStatusMsg(workspaceStatusMsg *WorkspaceStatusMessage) *ReplaceWorkspaceOptions { + options.WorkspaceStatusMsg = workspaceStatusMsg + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ReplaceWorkspaceOptions) SetHeaders(param map[string]string) *ReplaceWorkspaceOptions { + options.Headers = param + return options +} + +// ResourceGroupResponse : ResourceGroupResponse -. +type ResourceGroupResponse struct { + // Account id. + AccountID *string `json:"account_id,omitempty"` + + // CRN. + Crn *string `json:"crn,omitempty"` + + // default. + Default *bool `json:"default,omitempty"` + + // Resource group name. + Name *string `json:"name,omitempty"` + + // Resource group id. + ResourceGroupID *string `json:"resource_group_id,omitempty"` + + // Resource group state. + State *string `json:"state,omitempty"` +} + +// UnmarshalResourceGroupResponse unmarshals an instance of ResourceGroupResponse from the specified map of raw messages. +func UnmarshalResourceGroupResponse(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ResourceGroupResponse) + err = core.UnmarshalPrimitive(m, "account_id", &obj.AccountID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.Crn) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "default", &obj.Default) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "resource_group_id", &obj.ResourceGroupID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "state", &obj.State) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// RunWorkspaceCommandsOptions : The RunWorkspaceCommands options. +type RunWorkspaceCommandsOptions struct { + // The workspace ID for the workspace that you want to query. You can run the GET /workspaces call if you need to look + // up the workspace IDs in your IBM Cloud account. + WID *string `json:"w_id" validate:"required,ne="` + + // The IAM refresh token associated with the IBM Cloud account. + RefreshToken *string `json:"refresh_token" validate:"required"` + + // List of commands. + Commands []TerraformCommand `json:"commands,omitempty"` + + // Command name. + OperationName *string `json:"operation_name,omitempty"` + + // Command description. + Description *string `json:"description,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewRunWorkspaceCommandsOptions : Instantiate RunWorkspaceCommandsOptions +func (*SchematicsV1) NewRunWorkspaceCommandsOptions(wID string, refreshToken string) *RunWorkspaceCommandsOptions { + return &RunWorkspaceCommandsOptions{ + WID: core.StringPtr(wID), + RefreshToken: core.StringPtr(refreshToken), + } +} + +// SetWID : Allow user to set WID +func (options *RunWorkspaceCommandsOptions) SetWID(wID string) *RunWorkspaceCommandsOptions { + options.WID = core.StringPtr(wID) + return options +} + +// SetRefreshToken : Allow user to set RefreshToken +func (options *RunWorkspaceCommandsOptions) SetRefreshToken(refreshToken string) *RunWorkspaceCommandsOptions { + options.RefreshToken = core.StringPtr(refreshToken) + return options +} + +// SetCommands : Allow user to set Commands +func (options *RunWorkspaceCommandsOptions) SetCommands(commands []TerraformCommand) *RunWorkspaceCommandsOptions { + options.Commands = commands + return options +} + +// SetOperationName : Allow user to set OperationName +func (options *RunWorkspaceCommandsOptions) SetOperationName(operationName string) *RunWorkspaceCommandsOptions { + options.OperationName = core.StringPtr(operationName) + return options +} + +// SetDescription : Allow user to set Description +func (options *RunWorkspaceCommandsOptions) SetDescription(description string) *RunWorkspaceCommandsOptions { + options.Description = core.StringPtr(description) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *RunWorkspaceCommandsOptions) SetHeaders(param map[string]string) *RunWorkspaceCommandsOptions { + options.Headers = param + return options +} + +// SchematicsLocations : Schematics locations. +type SchematicsLocations struct { + // Country. + Country *string `json:"country,omitempty"` + + // Geography. + Geography *string `json:"geography,omitempty"` + + // Location id. + ID *string `json:"id,omitempty"` + + // Kind. + Kind *string `json:"kind,omitempty"` + + // Metro. + Metro *string `json:"metro,omitempty"` + + // Multizone metro. + MultizoneMetro *string `json:"multizone_metro,omitempty"` + + // Location name. + Name *string `json:"name,omitempty"` +} + +// UnmarshalSchematicsLocations unmarshals an instance of SchematicsLocations from the specified map of raw messages. +func UnmarshalSchematicsLocations(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SchematicsLocations) + err = core.UnmarshalPrimitive(m, "country", &obj.Country) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "geography", &obj.Geography) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "kind", &obj.Kind) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "metro", &obj.Metro) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "multizone_metro", &obj.MultizoneMetro) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SharedDatasetData : SharedDatasetData ... +type SharedDatasetData struct { + // Default values. + DefaultValue *string `json:"default_value,omitempty"` + + // Data description. + Description *string `json:"description,omitempty"` + + // Data is hidden. + Hidden *bool `json:"hidden,omitempty"` + + // Data is readonly. + Immutable *bool `json:"immutable,omitempty"` + + // Data is matches regular expression. + Matches *string `json:"matches,omitempty"` + + // Max value of the data. + MaxValue *string `json:"max_value,omitempty"` + + // Max string length of the data. + MaxValueLen *string `json:"max_value_len,omitempty"` + + // Min value of the data. + MinValue *string `json:"min_value,omitempty"` + + // Min string length of the data. + MinValueLen *string `json:"min_value_len,omitempty"` + + // Possible options for the Data. + Options []string `json:"options,omitempty"` + + // Override value for the Data. + OverrideValue *string `json:"override_value,omitempty"` + + // Data is secure. + Secure *bool `json:"secure,omitempty"` + + // Alias strings for the variable names. + VarAliases []string `json:"var_aliases,omitempty"` + + // Variable name. + VarName *string `json:"var_name,omitempty"` + + // Variable reference. + VarRef *string `json:"var_ref,omitempty"` + + // Variable type. + VarType *string `json:"var_type,omitempty"` +} + +// UnmarshalSharedDatasetData unmarshals an instance of SharedDatasetData from the specified map of raw messages. +func UnmarshalSharedDatasetData(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SharedDatasetData) + err = core.UnmarshalPrimitive(m, "default_value", &obj.DefaultValue) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "description", &obj.Description) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "hidden", &obj.Hidden) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "immutable", &obj.Immutable) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "matches", &obj.Matches) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "max_value", &obj.MaxValue) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "max_value_len", &obj.MaxValueLen) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "min_value", &obj.MinValue) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "min_value_len", &obj.MinValueLen) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "options", &obj.Options) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "override_value", &obj.OverrideValue) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "secure", &obj.Secure) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "var_aliases", &obj.VarAliases) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "var_name", &obj.VarName) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "var_ref", &obj.VarRef) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "var_type", &obj.VarType) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SharedDatasetResponse : SharedDatasetResponse - request returned by create. +type SharedDatasetResponse struct { + // Account id. + Account *string `json:"account,omitempty"` + + // Dataset created at. + CreatedAt *strfmt.DateTime `json:"created_at,omitempty"` + + // Dataset created by. + CreatedBy *string `json:"created_by,omitempty"` + + // Dataset description. + Description *string `json:"description,omitempty"` + + // Affected workspace id. + EffectedWorkspaceIds []string `json:"effected_workspace_ids,omitempty"` + + // Resource group name. + ResourceGroup *string `json:"resource_group,omitempty"` + + // Shared dataset data. + SharedDatasetData []SharedDatasetData `json:"shared_dataset_data,omitempty"` + + // Shared dataset id. + SharedDatasetID *string `json:"shared_dataset_id,omitempty"` + + // Shared dataset name. + SharedDatasetName *string `json:"shared_dataset_name,omitempty"` + + // Shared dataset type. + SharedDatasetType []string `json:"shared_dataset_type,omitempty"` + + // shareddata variable status type. + State *string `json:"state,omitempty"` + + // Shared dataset tags. + Tags []string `json:"tags,omitempty"` + + // Shared dataset updated at. + UpdatedAt *strfmt.DateTime `json:"updated_at,omitempty"` + + // Shared dataset updated by. + UpdatedBy *string `json:"updated_by,omitempty"` + + // Shared dataset version. + Version *string `json:"version,omitempty"` +} + +// UnmarshalSharedDatasetResponse unmarshals an instance of SharedDatasetResponse from the specified map of raw messages. +func UnmarshalSharedDatasetResponse(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SharedDatasetResponse) + err = core.UnmarshalPrimitive(m, "account", &obj.Account) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_by", &obj.CreatedBy) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "description", &obj.Description) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "effected_workspace_ids", &obj.EffectedWorkspaceIds) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "resource_group", &obj.ResourceGroup) + if err != nil { + return + } + err = core.UnmarshalModel(m, "shared_dataset_data", &obj.SharedDatasetData, UnmarshalSharedDatasetData) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "shared_dataset_id", &obj.SharedDatasetID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "shared_dataset_name", &obj.SharedDatasetName) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "shared_dataset_type", &obj.SharedDatasetType) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "state", &obj.State) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "tags", &obj.Tags) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "updated_at", &obj.UpdatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "updated_by", &obj.UpdatedBy) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "version", &obj.Version) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SharedDatasetResponseList : SharedDatasetResponseList -. +type SharedDatasetResponseList struct { + // Shared dataset count. + Count *int64 `json:"count,omitempty"` + + // List of datasets. + SharedDatasets []SharedDatasetResponse `json:"shared_datasets,omitempty"` +} + +// UnmarshalSharedDatasetResponseList unmarshals an instance of SharedDatasetResponseList from the specified map of raw messages. +func UnmarshalSharedDatasetResponseList(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SharedDatasetResponseList) + err = core.UnmarshalPrimitive(m, "count", &obj.Count) + if err != nil { + return + } + err = core.UnmarshalModel(m, "shared_datasets", &obj.SharedDatasets, UnmarshalSharedDatasetResponse) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SharedTargetData : SharedTargetData -. +type SharedTargetData struct { + // Cluster created on. + ClusterCreatedOn *string `json:"cluster_created_on,omitempty"` + + // Cluster id. + ClusterID *string `json:"cluster_id,omitempty"` + + // Cluster name. + ClusterName *string `json:"cluster_name,omitempty"` + + // Cluster type. + ClusterType *string `json:"cluster_type,omitempty"` + + // Entitlement keys. + EntitlementKeys []interface{} `json:"entitlement_keys,omitempty"` + + // Target namespace. + Namespace *string `json:"namespace,omitempty"` + + // Target region. + Region *string `json:"region,omitempty"` + + // Target resource group id. + ResourceGroupID *string `json:"resource_group_id,omitempty"` + + // Cluster worker count. + WorkerCount *int64 `json:"worker_count,omitempty"` + + // Cluster worker type. + WorkerMachineType *string `json:"worker_machine_type,omitempty"` +} + +// UnmarshalSharedTargetData unmarshals an instance of SharedTargetData from the specified map of raw messages. +func UnmarshalSharedTargetData(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SharedTargetData) + err = core.UnmarshalPrimitive(m, "cluster_created_on", &obj.ClusterCreatedOn) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "cluster_id", &obj.ClusterID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "cluster_name", &obj.ClusterName) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "cluster_type", &obj.ClusterType) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "entitlement_keys", &obj.EntitlementKeys) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "namespace", &obj.Namespace) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "region", &obj.Region) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "resource_group_id", &obj.ResourceGroupID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "worker_count", &obj.WorkerCount) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "worker_machine_type", &obj.WorkerMachineType) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SharedTargetDataResponse : SharedTargetDataResponse -. +type SharedTargetDataResponse struct { + // Target cluster id. + ClusterID *string `json:"cluster_id,omitempty"` + + // Target cluster name. + ClusterName *string `json:"cluster_name,omitempty"` + + // Entitlement keys. + EntitlementKeys []interface{} `json:"entitlement_keys,omitempty"` + + // Target namespace. + Namespace *string `json:"namespace,omitempty"` + + // Target region. + Region *string `json:"region,omitempty"` + + // Target resource group id. + ResourceGroupID *string `json:"resource_group_id,omitempty"` +} + +// UnmarshalSharedTargetDataResponse unmarshals an instance of SharedTargetDataResponse from the specified map of raw messages. +func UnmarshalSharedTargetDataResponse(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SharedTargetDataResponse) + err = core.UnmarshalPrimitive(m, "cluster_id", &obj.ClusterID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "cluster_name", &obj.ClusterName) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "entitlement_keys", &obj.EntitlementKeys) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "namespace", &obj.Namespace) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "region", &obj.Region) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "resource_group_id", &obj.ResourceGroupID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// StateStoreResponse : StateStoreResponse -. +type StateStoreResponse struct { + // Engine name. + EngineName *string `json:"engine_name,omitempty"` + + // Engine version. + EngineVersion *string `json:"engine_version,omitempty"` + + // State store id. + ID *string `json:"id,omitempty"` + + // State store url. + StateStoreURL *string `json:"state_store_url,omitempty"` +} + +// UnmarshalStateStoreResponse unmarshals an instance of StateStoreResponse from the specified map of raw messages. +func UnmarshalStateStoreResponse(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(StateStoreResponse) + err = core.UnmarshalPrimitive(m, "engine_name", &obj.EngineName) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "engine_version", &obj.EngineVersion) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "state_store_url", &obj.StateStoreURL) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// StateStoreResponseList : StateStoreResponseList -. +type StateStoreResponseList struct { + // List of state stores. + RuntimeData []StateStoreResponse `json:"runtime_data,omitempty"` +} + +// UnmarshalStateStoreResponseList unmarshals an instance of StateStoreResponseList from the specified map of raw messages. +func UnmarshalStateStoreResponseList(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(StateStoreResponseList) + err = core.UnmarshalModel(m, "runtime_data", &obj.RuntimeData, UnmarshalStateStoreResponse) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SystemLock : System lock status. +type SystemLock struct { + // Is the Workspace locked by a Schematic action ?. + SysLocked *bool `json:"sys_locked,omitempty"` + + // Name of the User who performed the action, that lead to the locking of the Workspace. + SysLockedBy *string `json:"sys_locked_by,omitempty"` + + // When the User performed the action that lead to locking of the Workspace ?. + SysLockedAt *strfmt.DateTime `json:"sys_locked_at,omitempty"` +} + +// UnmarshalSystemLock unmarshals an instance of SystemLock from the specified map of raw messages. +func UnmarshalSystemLock(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SystemLock) + err = core.UnmarshalPrimitive(m, "sys_locked", &obj.SysLocked) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "sys_locked_by", &obj.SysLockedBy) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "sys_locked_at", &obj.SysLockedAt) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// TargetResourceset : Complete Target details with user inputs and system generated data. +type TargetResourceset struct { + // Target name. + Name *string `json:"name,omitempty"` + + // Target type (cluster, vsi, icd, vpc). + Type *string `json:"type,omitempty"` + + // Target description. + Description *string `json:"description,omitempty"` + + // Resource selection query string. + ResourceQuery *string `json:"resource_query,omitempty"` + + // Override credential for each resource. Reference to credentials values, used by all resources. + CredentialRef *string `json:"credential_ref,omitempty"` + + // Target id. + ID *string `json:"id,omitempty"` + + // Targets creation time. + CreatedAt *strfmt.DateTime `json:"created_at,omitempty"` + + // Email address of user who created the Targets. + CreatedBy *string `json:"created_by,omitempty"` + + // Targets updation time. + UpdatedAt *strfmt.DateTime `json:"updated_at,omitempty"` + + // Email address of user who updated the Targets. + UpdatedBy *string `json:"updated_by,omitempty"` + + // System lock status. + SysLock *SystemLock `json:"sys_lock,omitempty"` + + // Array of resource ids. + ResourceIds []string `json:"resource_ids,omitempty"` +} + +// UnmarshalTargetResourceset unmarshals an instance of TargetResourceset from the specified map of raw messages. +func UnmarshalTargetResourceset(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(TargetResourceset) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "description", &obj.Description) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "resource_query", &obj.ResourceQuery) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "credential_ref", &obj.CredentialRef) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_by", &obj.CreatedBy) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "updated_at", &obj.UpdatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "updated_by", &obj.UpdatedBy) + if err != nil { + return + } + err = core.UnmarshalModel(m, "sys_lock", &obj.SysLock, UnmarshalSystemLock) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "resource_ids", &obj.ResourceIds) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// TemplateReadme : TemplateReadme -. +type TemplateReadme struct { + // Readme string. + Readme *string `json:"readme,omitempty"` +} + +// UnmarshalTemplateReadme unmarshals an instance of TemplateReadme from the specified map of raw messages. +func UnmarshalTemplateReadme(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(TemplateReadme) + err = core.UnmarshalPrimitive(m, "readme", &obj.Readme) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// TemplateRepoRequest : TemplateRepoRequest -. +type TemplateRepoRequest struct { + // Repo branch. + Branch *string `json:"branch,omitempty"` + + // Repo release. + Release *string `json:"release,omitempty"` + + // Repo SHA value. + RepoShaValue *string `json:"repo_sha_value,omitempty"` + + // Repo URL. + RepoURL *string `json:"repo_url,omitempty"` + + // Source URL. + URL *string `json:"url,omitempty"` +} + +// UnmarshalTemplateRepoRequest unmarshals an instance of TemplateRepoRequest from the specified map of raw messages. +func UnmarshalTemplateRepoRequest(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(TemplateRepoRequest) + err = core.UnmarshalPrimitive(m, "branch", &obj.Branch) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "release", &obj.Release) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "repo_sha_value", &obj.RepoShaValue) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "repo_url", &obj.RepoURL) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "url", &obj.URL) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// TemplateRepoResponse : TemplateRepoResponse -. +type TemplateRepoResponse struct { + // Repo branch. + Branch *string `json:"branch,omitempty"` + + // Full repo URL. + FullURL *string `json:"full_url,omitempty"` + + // Has uploaded git repo tar. + HasUploadedgitrepotar *bool `json:"has_uploadedgitrepotar,omitempty"` + + // Repo release. + Release *string `json:"release,omitempty"` + + // Repo SHA value. + RepoShaValue *string `json:"repo_sha_value,omitempty"` + + // Repo URL. + RepoURL *string `json:"repo_url,omitempty"` + + // Source URL. + URL *string `json:"url,omitempty"` +} + +// UnmarshalTemplateRepoResponse unmarshals an instance of TemplateRepoResponse from the specified map of raw messages. +func UnmarshalTemplateRepoResponse(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(TemplateRepoResponse) + err = core.UnmarshalPrimitive(m, "branch", &obj.Branch) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "full_url", &obj.FullURL) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "has_uploadedgitrepotar", &obj.HasUploadedgitrepotar) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "release", &obj.Release) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "repo_sha_value", &obj.RepoShaValue) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "repo_url", &obj.RepoURL) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "url", &obj.URL) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// TemplateRepoTarUploadResponse : TemplateRepoTarUploadResponse -. +type TemplateRepoTarUploadResponse struct { + // Tar file value. + FileValue *string `json:"file_value,omitempty"` + + // Has received tar file. + HasReceivedFile *bool `json:"has_received_file,omitempty"` + + // Template id. + ID *string `json:"id,omitempty"` +} + +// UnmarshalTemplateRepoTarUploadResponse unmarshals an instance of TemplateRepoTarUploadResponse from the specified map of raw messages. +func UnmarshalTemplateRepoTarUploadResponse(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(TemplateRepoTarUploadResponse) + err = core.UnmarshalPrimitive(m, "file_value", &obj.FileValue) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "has_received_file", &obj.HasReceivedFile) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// TemplateRepoUpdateRequest : TemplateRepoUpdateRequest -. +type TemplateRepoUpdateRequest struct { + // Repo branch. + Branch *string `json:"branch,omitempty"` + + // Repo release. + Release *string `json:"release,omitempty"` + + // Repo SHA value. + RepoShaValue *string `json:"repo_sha_value,omitempty"` + + // Repo URL. + RepoURL *string `json:"repo_url,omitempty"` + + // Source URL. + URL *string `json:"url,omitempty"` +} + +// UnmarshalTemplateRepoUpdateRequest unmarshals an instance of TemplateRepoUpdateRequest from the specified map of raw messages. +func UnmarshalTemplateRepoUpdateRequest(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(TemplateRepoUpdateRequest) + err = core.UnmarshalPrimitive(m, "branch", &obj.Branch) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "release", &obj.Release) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "repo_sha_value", &obj.RepoShaValue) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "repo_url", &obj.RepoURL) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "url", &obj.URL) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// TemplateResources : TemplateResources -. +type TemplateResources struct { + // Template folder name. + Folder *string `json:"folder,omitempty"` + + // Template id. + ID *string `json:"id,omitempty"` + + // List of null resources. + NullResources []interface{} `json:"null_resources,omitempty"` + + // List of related resources. + RelatedResources []interface{} `json:"related_resources,omitempty"` + + // List of resources. + Resources []interface{} `json:"resources,omitempty"` + + // Number of resources. + ResourcesCount *int64 `json:"resources_count,omitempty"` + + // Type of templaes. + TemplateType *string `json:"template_type,omitempty"` +} + +// UnmarshalTemplateResources unmarshals an instance of TemplateResources from the specified map of raw messages. +func UnmarshalTemplateResources(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(TemplateResources) + err = core.UnmarshalPrimitive(m, "folder", &obj.Folder) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "null_resources", &obj.NullResources) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "related_resources", &obj.RelatedResources) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "resources", &obj.Resources) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "resources_count", &obj.ResourcesCount) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "template_type", &obj.TemplateType) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// TemplateRunTimeDataResponse : TemplateRunTimeDataResponse -. +type TemplateRunTimeDataResponse struct { + // Engine command. + EngineCmd *string `json:"engine_cmd,omitempty"` + + // Engine name. + EngineName *string `json:"engine_name,omitempty"` + + // Engine version. + EngineVersion *string `json:"engine_version,omitempty"` + + // Template id. + ID *string `json:"id,omitempty"` + + // Log store url. + LogStoreURL *string `json:"log_store_url,omitempty"` + + // List of Output values. + OutputValues []interface{} `json:"output_values,omitempty"` + + // List of resources. + Resources [][]interface{} `json:"resources,omitempty"` + + // State store URL. + StateStoreURL *string `json:"state_store_url,omitempty"` +} + +// UnmarshalTemplateRunTimeDataResponse unmarshals an instance of TemplateRunTimeDataResponse from the specified map of raw messages. +func UnmarshalTemplateRunTimeDataResponse(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(TemplateRunTimeDataResponse) + err = core.UnmarshalPrimitive(m, "engine_cmd", &obj.EngineCmd) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "engine_name", &obj.EngineName) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "engine_version", &obj.EngineVersion) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "log_store_url", &obj.LogStoreURL) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "output_values", &obj.OutputValues) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "resources", &obj.Resources) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "state_store_url", &obj.StateStoreURL) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// TemplateSourceDataRequest : TemplateSourceDataRequest -. +type TemplateSourceDataRequest struct { + // EnvVariableRequest .. + EnvValues []interface{} `json:"env_values,omitempty"` + + // Folder name. + Folder *string `json:"folder,omitempty"` + + // Init state file. + InitStateFile *string `json:"init_state_file,omitempty"` + + // Template type. + Type *string `json:"type,omitempty"` + + // Uninstall script name. + UninstallScriptName *string `json:"uninstall_script_name,omitempty"` + + // Value. + Values *string `json:"values,omitempty"` + + // List of values metadata. + ValuesMetadata []interface{} `json:"values_metadata,omitempty"` + + // VariablesRequest -. + Variablestore []WorkspaceVariableRequest `json:"variablestore,omitempty"` +} + +// UnmarshalTemplateSourceDataRequest unmarshals an instance of TemplateSourceDataRequest from the specified map of raw messages. +func UnmarshalTemplateSourceDataRequest(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(TemplateSourceDataRequest) + err = core.UnmarshalPrimitive(m, "env_values", &obj.EnvValues) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "folder", &obj.Folder) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "init_state_file", &obj.InitStateFile) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "uninstall_script_name", &obj.UninstallScriptName) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "values", &obj.Values) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "values_metadata", &obj.ValuesMetadata) + if err != nil { + return + } + err = core.UnmarshalModel(m, "variablestore", &obj.Variablestore, UnmarshalWorkspaceVariableRequest) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// TemplateSourceDataResponse : TemplateSourceDataResponse -. +type TemplateSourceDataResponse struct { + // List of environment values. + EnvValues []EnvVariableResponse `json:"env_values,omitempty"` + + // Folder name. + Folder *string `json:"folder,omitempty"` + + // Has github token. + HasGithubtoken *bool `json:"has_githubtoken,omitempty"` + + // Template id. + ID *string `json:"id,omitempty"` + + // Template tyoe. + Type *string `json:"type,omitempty"` + + // Uninstall script name. + UninstallScriptName *string `json:"uninstall_script_name,omitempty"` + + // Values. + Values *string `json:"values,omitempty"` + + // List of values metadata. + ValuesMetadata []interface{} `json:"values_metadata,omitempty"` + + // Values URL. + ValuesURL *string `json:"values_url,omitempty"` + + // VariablesResponse -. + Variablestore []WorkspaceVariableResponse `json:"variablestore,omitempty"` +} + +// UnmarshalTemplateSourceDataResponse unmarshals an instance of TemplateSourceDataResponse from the specified map of raw messages. +func UnmarshalTemplateSourceDataResponse(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(TemplateSourceDataResponse) + err = core.UnmarshalModel(m, "env_values", &obj.EnvValues, UnmarshalEnvVariableResponse) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "folder", &obj.Folder) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "has_githubtoken", &obj.HasGithubtoken) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "uninstall_script_name", &obj.UninstallScriptName) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "values", &obj.Values) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "values_metadata", &obj.ValuesMetadata) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "values_url", &obj.ValuesURL) + if err != nil { + return + } + err = core.UnmarshalModel(m, "variablestore", &obj.Variablestore, UnmarshalWorkspaceVariableResponse) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// TemplateStateStore : TemplateStateStore -. +type TemplateStateStore struct { + Version *float64 `json:"version,omitempty"` + + TerraformVersion *string `json:"terraform_version,omitempty"` + + Serial *float64 `json:"serial,omitempty"` + + Lineage *string `json:"lineage,omitempty"` + + Modules []interface{} `json:"modules,omitempty"` +} + +// UnmarshalTemplateStateStore unmarshals an instance of TemplateStateStore from the specified map of raw messages. +func UnmarshalTemplateStateStore(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(TemplateStateStore) + err = core.UnmarshalPrimitive(m, "version", &obj.Version) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "terraform_version", &obj.TerraformVersion) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "serial", &obj.Serial) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "lineage", &obj.Lineage) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "modules", &obj.Modules) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// TemplateValues : TemplateValues -. +type TemplateValues struct { + ValuesMetadata []interface{} `json:"values_metadata,omitempty"` +} + +// UnmarshalTemplateValues unmarshals an instance of TemplateValues from the specified map of raw messages. +func UnmarshalTemplateValues(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(TemplateValues) + err = core.UnmarshalPrimitive(m, "values_metadata", &obj.ValuesMetadata) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// TerraformCommand : TerraformCommand -. +type TerraformCommand struct { + // Command to execute. + Command *string `json:"command,omitempty"` + + // Command Parameters. + CommandParams *string `json:"command_params,omitempty"` + + // Command name. + CommandName *string `json:"command_name,omitempty"` + + // Command description. + CommandDesc *string `json:"command_desc,omitempty"` + + // Instruction to continue or break in case of error. + CommandOnError *string `json:"command_onError,omitempty"` + + // Dependency on previous commands. + CommandDependsOn *string `json:"command_dependsOn,omitempty"` + + // Command status. + CommandStatus *string `json:"command_status,omitempty"` +} + +// UnmarshalTerraformCommand unmarshals an instance of TerraformCommand from the specified map of raw messages. +func UnmarshalTerraformCommand(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(TerraformCommand) + err = core.UnmarshalPrimitive(m, "command", &obj.Command) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "command_params", &obj.CommandParams) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "command_name", &obj.CommandName) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "command_desc", &obj.CommandDesc) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "command_onError", &obj.CommandOnError) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "command_dependsOn", &obj.CommandDependsOn) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "command_status", &obj.CommandStatus) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// UpdateActionOptions : The UpdateAction options. +type UpdateActionOptions struct { + // Action Id. Use GET /actions API to look up the Action Ids in your IBM Cloud account. + ActionID *string `json:"action_id" validate:"required,ne="` + + // Action name (unique for an account). + Name *string `json:"name,omitempty"` + + // Action description. + Description *string `json:"description,omitempty"` + + // List of workspace locations supported by IBM Cloud Schematics service. Note, this does not limit the location of + // the resources provisioned using Schematics. + Location *string `json:"location,omitempty"` + + // Resource-group name for the Action. By default, Action will be created in Default Resource Group. + ResourceGroup *string `json:"resource_group,omitempty"` + + // Action tags. + Tags []string `json:"tags,omitempty"` + + // User defined status of the Schematics object. + UserState *UserState `json:"user_state,omitempty"` + + // URL of the README file, for the source. + SourceReadmeURL *string `json:"source_readme_url,omitempty"` + + // Source of templates, playbooks, or controls. + Source *ExternalSource `json:"source,omitempty"` + + // Type of source for the Template. + SourceType *string `json:"source_type,omitempty"` + + // Schematics job command parameter (playbook-name, capsule-name or flow-name). + CommandParameter *string `json:"command_parameter,omitempty"` + + // Complete Target details with user inputs and system generated data. + Bastion *TargetResourceset `json:"bastion,omitempty"` + + // Inventory of host and host group for the playbook, in .ini file format. + TargetsIni *string `json:"targets_ini,omitempty"` + + // credentials of the Action. + Credentials []VariableData `json:"credentials,omitempty"` + + // Input variables for the Action. + Inputs []VariableData `json:"inputs,omitempty"` + + // Output variables for the Action. + Outputs []VariableData `json:"outputs,omitempty"` + + // Environment variables for the Action. + Settings []VariableData `json:"settings,omitempty"` + + // Id to the Trigger. + TriggerRecordID *string `json:"trigger_record_id,omitempty"` + + // Computed state of the Action. + State *ActionState `json:"state,omitempty"` + + // System lock status. + SysLock *SystemLock `json:"sys_lock,omitempty"` + + // The github token associated with the GIT. Required for cloning of repo. + XGithubToken *string `json:"X-Github-token,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the UpdateActionOptions.Location property. +// List of workspace locations supported by IBM Cloud Schematics service. Note, this does not limit the location of the +// resources provisioned using Schematics. +const ( + UpdateActionOptions_Location_EuDe = "eu_de" + UpdateActionOptions_Location_EuGb = "eu_gb" + UpdateActionOptions_Location_UsEast = "us_east" + UpdateActionOptions_Location_UsSouth = "us_south" +) + +// Constants associated with the UpdateActionOptions.SourceType property. +// Type of source for the Template. +const ( + UpdateActionOptions_SourceType_ExternalScm = "external_scm" + UpdateActionOptions_SourceType_GitHub = "git_hub" + UpdateActionOptions_SourceType_GitHubEnterprise = "git_hub_enterprise" + UpdateActionOptions_SourceType_GitLab = "git_lab" + UpdateActionOptions_SourceType_IbmCloudCatalog = "ibm_cloud_catalog" + UpdateActionOptions_SourceType_IbmGitLab = "ibm_git_lab" + UpdateActionOptions_SourceType_Local = "local" +) + +// NewUpdateActionOptions : Instantiate UpdateActionOptions +func (*SchematicsV1) NewUpdateActionOptions(actionID string) *UpdateActionOptions { + return &UpdateActionOptions{ + ActionID: core.StringPtr(actionID), + } +} + +// SetActionID : Allow user to set ActionID +func (options *UpdateActionOptions) SetActionID(actionID string) *UpdateActionOptions { + options.ActionID = core.StringPtr(actionID) + return options +} + +// SetName : Allow user to set Name +func (options *UpdateActionOptions) SetName(name string) *UpdateActionOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetDescription : Allow user to set Description +func (options *UpdateActionOptions) SetDescription(description string) *UpdateActionOptions { + options.Description = core.StringPtr(description) + return options +} + +// SetLocation : Allow user to set Location +func (options *UpdateActionOptions) SetLocation(location string) *UpdateActionOptions { + options.Location = core.StringPtr(location) + return options +} + +// SetResourceGroup : Allow user to set ResourceGroup +func (options *UpdateActionOptions) SetResourceGroup(resourceGroup string) *UpdateActionOptions { + options.ResourceGroup = core.StringPtr(resourceGroup) + return options +} + +// SetTags : Allow user to set Tags +func (options *UpdateActionOptions) SetTags(tags []string) *UpdateActionOptions { + options.Tags = tags + return options +} + +// SetUserState : Allow user to set UserState +func (options *UpdateActionOptions) SetUserState(userState *UserState) *UpdateActionOptions { + options.UserState = userState + return options +} + +// SetSourceReadmeURL : Allow user to set SourceReadmeURL +func (options *UpdateActionOptions) SetSourceReadmeURL(sourceReadmeURL string) *UpdateActionOptions { + options.SourceReadmeURL = core.StringPtr(sourceReadmeURL) + return options +} + +// SetSource : Allow user to set Source +func (options *UpdateActionOptions) SetSource(source *ExternalSource) *UpdateActionOptions { + options.Source = source + return options +} + +// SetSourceType : Allow user to set SourceType +func (options *UpdateActionOptions) SetSourceType(sourceType string) *UpdateActionOptions { + options.SourceType = core.StringPtr(sourceType) + return options +} + +// SetCommandParameter : Allow user to set CommandParameter +func (options *UpdateActionOptions) SetCommandParameter(commandParameter string) *UpdateActionOptions { + options.CommandParameter = core.StringPtr(commandParameter) + return options +} + +// SetBastion : Allow user to set Bastion +func (options *UpdateActionOptions) SetBastion(bastion *TargetResourceset) *UpdateActionOptions { + options.Bastion = bastion + return options +} + +// SetTargetsIni : Allow user to set TargetsIni +func (options *UpdateActionOptions) SetTargetsIni(targetsIni string) *UpdateActionOptions { + options.TargetsIni = core.StringPtr(targetsIni) + return options +} + +// SetCredentials : Allow user to set Credentials +func (options *UpdateActionOptions) SetCredentials(credentials []VariableData) *UpdateActionOptions { + options.Credentials = credentials + return options +} + +// SetInputs : Allow user to set Inputs +func (options *UpdateActionOptions) SetInputs(inputs []VariableData) *UpdateActionOptions { + options.Inputs = inputs + return options +} + +// SetOutputs : Allow user to set Outputs +func (options *UpdateActionOptions) SetOutputs(outputs []VariableData) *UpdateActionOptions { + options.Outputs = outputs + return options +} + +// SetSettings : Allow user to set Settings +func (options *UpdateActionOptions) SetSettings(settings []VariableData) *UpdateActionOptions { + options.Settings = settings + return options +} + +// SetTriggerRecordID : Allow user to set TriggerRecordID +func (options *UpdateActionOptions) SetTriggerRecordID(triggerRecordID string) *UpdateActionOptions { + options.TriggerRecordID = core.StringPtr(triggerRecordID) + return options +} + +// SetState : Allow user to set State +func (options *UpdateActionOptions) SetState(state *ActionState) *UpdateActionOptions { + options.State = state + return options +} + +// SetSysLock : Allow user to set SysLock +func (options *UpdateActionOptions) SetSysLock(sysLock *SystemLock) *UpdateActionOptions { + options.SysLock = sysLock + return options +} + +// SetXGithubToken : Allow user to set XGithubToken +func (options *UpdateActionOptions) SetXGithubToken(xGithubToken string) *UpdateActionOptions { + options.XGithubToken = core.StringPtr(xGithubToken) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateActionOptions) SetHeaders(param map[string]string) *UpdateActionOptions { + options.Headers = param + return options +} + +// UpdateWorkspaceOptions : The UpdateWorkspace options. +type UpdateWorkspaceOptions struct { + // The workspace ID for the workspace that you want to query. You can run the GET /workspaces call if you need to look + // up the workspace IDs in your IBM Cloud account. + WID *string `json:"w_id" validate:"required,ne="` + + // CatalogRef -. + CatalogRef *CatalogRef `json:"catalog_ref,omitempty"` + + // Workspace description. + Description *string `json:"description,omitempty"` + + // Workspace name. + Name *string `json:"name,omitempty"` + + // SharedTargetData -. + SharedData *SharedTargetData `json:"shared_data,omitempty"` + + // Tags -. + Tags []string `json:"tags,omitempty"` + + // TemplateData -. + TemplateData []TemplateSourceDataRequest `json:"template_data,omitempty"` + + // TemplateRepoUpdateRequest -. + TemplateRepo *TemplateRepoUpdateRequest `json:"template_repo,omitempty"` + + // List of Workspace type. + Type []string `json:"type,omitempty"` + + // WorkspaceStatusUpdateRequest -. + WorkspaceStatus *WorkspaceStatusUpdateRequest `json:"workspace_status,omitempty"` + + // WorkspaceStatusMessage -. + WorkspaceStatusMsg *WorkspaceStatusMessage `json:"workspace_status_msg,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdateWorkspaceOptions : Instantiate UpdateWorkspaceOptions +func (*SchematicsV1) NewUpdateWorkspaceOptions(wID string) *UpdateWorkspaceOptions { + return &UpdateWorkspaceOptions{ + WID: core.StringPtr(wID), + } +} + +// SetWID : Allow user to set WID +func (options *UpdateWorkspaceOptions) SetWID(wID string) *UpdateWorkspaceOptions { + options.WID = core.StringPtr(wID) + return options +} + +// SetCatalogRef : Allow user to set CatalogRef +func (options *UpdateWorkspaceOptions) SetCatalogRef(catalogRef *CatalogRef) *UpdateWorkspaceOptions { + options.CatalogRef = catalogRef + return options +} + +// SetDescription : Allow user to set Description +func (options *UpdateWorkspaceOptions) SetDescription(description string) *UpdateWorkspaceOptions { + options.Description = core.StringPtr(description) + return options +} + +// SetName : Allow user to set Name +func (options *UpdateWorkspaceOptions) SetName(name string) *UpdateWorkspaceOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetSharedData : Allow user to set SharedData +func (options *UpdateWorkspaceOptions) SetSharedData(sharedData *SharedTargetData) *UpdateWorkspaceOptions { + options.SharedData = sharedData + return options +} + +// SetTags : Allow user to set Tags +func (options *UpdateWorkspaceOptions) SetTags(tags []string) *UpdateWorkspaceOptions { + options.Tags = tags + return options +} + +// SetTemplateData : Allow user to set TemplateData +func (options *UpdateWorkspaceOptions) SetTemplateData(templateData []TemplateSourceDataRequest) *UpdateWorkspaceOptions { + options.TemplateData = templateData + return options +} + +// SetTemplateRepo : Allow user to set TemplateRepo +func (options *UpdateWorkspaceOptions) SetTemplateRepo(templateRepo *TemplateRepoUpdateRequest) *UpdateWorkspaceOptions { + options.TemplateRepo = templateRepo + return options +} + +// SetType : Allow user to set Type +func (options *UpdateWorkspaceOptions) SetType(typeVar []string) *UpdateWorkspaceOptions { + options.Type = typeVar + return options +} + +// SetWorkspaceStatus : Allow user to set WorkspaceStatus +func (options *UpdateWorkspaceOptions) SetWorkspaceStatus(workspaceStatus *WorkspaceStatusUpdateRequest) *UpdateWorkspaceOptions { + options.WorkspaceStatus = workspaceStatus + return options +} + +// SetWorkspaceStatusMsg : Allow user to set WorkspaceStatusMsg +func (options *UpdateWorkspaceOptions) SetWorkspaceStatusMsg(workspaceStatusMsg *WorkspaceStatusMessage) *UpdateWorkspaceOptions { + options.WorkspaceStatusMsg = workspaceStatusMsg + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateWorkspaceOptions) SetHeaders(param map[string]string) *UpdateWorkspaceOptions { + options.Headers = param + return options +} + +// UploadTemplateTarOptions : The UploadTemplateTar options. +type UploadTemplateTarOptions struct { + // The workspace ID for the workspace that you want to query. You can run the GET /workspaces call if you need to look + // up the workspace IDs in your IBM Cloud account. + WID *string `json:"w_id" validate:"required,ne="` + + // The Template ID for which you want to get the values. Use the GET /workspaces to look up the workspace IDs or + // template IDs in your IBM Cloud account. + TID *string `json:"t_id" validate:"required,ne="` + + // Template tar file. + File io.ReadCloser `json:"file,omitempty"` + + // The content type of file. + FileContentType *string `json:"file_content_type,omitempty"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUploadTemplateTarOptions : Instantiate UploadTemplateTarOptions +func (*SchematicsV1) NewUploadTemplateTarOptions(wID string, tID string) *UploadTemplateTarOptions { + return &UploadTemplateTarOptions{ + WID: core.StringPtr(wID), + TID: core.StringPtr(tID), + } +} + +// SetWID : Allow user to set WID +func (options *UploadTemplateTarOptions) SetWID(wID string) *UploadTemplateTarOptions { + options.WID = core.StringPtr(wID) + return options +} + +// SetTID : Allow user to set TID +func (options *UploadTemplateTarOptions) SetTID(tID string) *UploadTemplateTarOptions { + options.TID = core.StringPtr(tID) + return options +} + +// SetFile : Allow user to set File +func (options *UploadTemplateTarOptions) SetFile(file io.ReadCloser) *UploadTemplateTarOptions { + options.File = file + return options +} + +// SetFileContentType : Allow user to set FileContentType +func (options *UploadTemplateTarOptions) SetFileContentType(fileContentType string) *UploadTemplateTarOptions { + options.FileContentType = core.StringPtr(fileContentType) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UploadTemplateTarOptions) SetHeaders(param map[string]string) *UploadTemplateTarOptions { + options.Headers = param + return options +} + +// UserState : User defined status of the Schematics object. +type UserState struct { + // User-defined states + // * `draft` Object can be modified; can be used by Jobs run by the author, during execution + // * `live` Object can be modified; can be used by Jobs during execution + // * `locked` Object cannot be modified; can be used by Jobs during execution + // * `disable` Object can be modified. cannot be used by Jobs during execution. + State *string `json:"state,omitempty"` + + // Name of the User who set the state of the Object. + SetBy *string `json:"set_by,omitempty"` + + // When the User who set the state of the Object. + SetAt *strfmt.DateTime `json:"set_at,omitempty"` +} + +// Constants associated with the UserState.State property. +// User-defined states +// * `draft` Object can be modified; can be used by Jobs run by the author, during execution +// * `live` Object can be modified; can be used by Jobs during execution +// * `locked` Object cannot be modified; can be used by Jobs during execution +// * `disable` Object can be modified. cannot be used by Jobs during execution. +const ( + UserState_State_Disable = "disable" + UserState_State_Draft = "draft" + UserState_State_Live = "live" + UserState_State_Locked = "locked" +) + +// UnmarshalUserState unmarshals an instance of UserState from the specified map of raw messages. +func UnmarshalUserState(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(UserState) + err = core.UnmarshalPrimitive(m, "state", &obj.State) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "set_by", &obj.SetBy) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "set_at", &obj.SetAt) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// UserValues : UserValues -. +type UserValues struct { + // EnvVariableRequest .. + EnvValues []interface{} `json:"env_values,omitempty"` + + // User values. + Values *string `json:"values,omitempty"` + + // VariablesResponse -. + Variablestore []WorkspaceVariableResponse `json:"variablestore,omitempty"` +} + +// UnmarshalUserValues unmarshals an instance of UserValues from the specified map of raw messages. +func UnmarshalUserValues(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(UserValues) + err = core.UnmarshalPrimitive(m, "env_values", &obj.EnvValues) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "values", &obj.Values) + if err != nil { + return + } + err = core.UnmarshalModel(m, "variablestore", &obj.Variablestore, UnmarshalWorkspaceVariableResponse) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VariableData : User editable variable data & system generated reference to value. +type VariableData struct { + // Name of the variable. + Name *string `json:"name,omitempty"` + + // Value for the variable or reference to the value. + Value *string `json:"value,omitempty"` + + // User editable metadata for the variables. + Metadata *VariableMetadata `json:"metadata,omitempty"` + + // Reference link to the variable value By default the expression will point to self.value. + Link *string `json:"link,omitempty"` +} + +// UnmarshalVariableData unmarshals an instance of VariableData from the specified map of raw messages. +func UnmarshalVariableData(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VariableData) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + err = core.UnmarshalModel(m, "metadata", &obj.Metadata, UnmarshalVariableMetadata) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "link", &obj.Link) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VariableMetadata : User editable metadata for the variables. +type VariableMetadata struct { + // Type of the variable. + Type *string `json:"type,omitempty"` + + // List of aliases for the variable name. + Aliases []string `json:"aliases,omitempty"` + + // Description of the meta data. + Description *string `json:"description,omitempty"` + + // Default value for the variable, if the override value is not specified. + DefaultValue *string `json:"default_value,omitempty"` + + // Is the variable secure or sensitive ?. + Secure *bool `json:"secure,omitempty"` + + // Is the variable readonly ?. + Immutable *bool `json:"immutable,omitempty"` + + // If true, the variable will not be displayed on UI or CLI. + Hidden *bool `json:"hidden,omitempty"` + + // List of possible values for this variable. If type is integer or date, then the array of string will be converted + // to array of integers or date during runtime. + Options []string `json:"options,omitempty"` + + // Minimum value of the variable. Applicable for integer type. + MinValue *int64 `json:"min_value,omitempty"` + + // Maximum value of the variable. Applicable for integer type. + MaxValue *int64 `json:"max_value,omitempty"` + + // Minimum length of the variable value. Applicable for string type. + MinLength *int64 `json:"min_length,omitempty"` + + // Maximum length of the variable value. Applicable for string type. + MaxLength *int64 `json:"max_length,omitempty"` + + // Regex for the variable value. + Matches *string `json:"matches,omitempty"` + + // Relative position of this variable in a list. + Position *int64 `json:"position,omitempty"` + + // Display name of the group this variable belongs to. + GroupBy *string `json:"group_by,omitempty"` + + // Source of this meta-data. + Source *string `json:"source,omitempty"` +} + +// Constants associated with the VariableMetadata.Type property. +// Type of the variable. +const ( + VariableMetadata_Type_Array = "array" + VariableMetadata_Type_Boolean = "boolean" + VariableMetadata_Type_Complex = "complex" + VariableMetadata_Type_Date = "date" + VariableMetadata_Type_Integer = "integer" + VariableMetadata_Type_List = "list" + VariableMetadata_Type_Map = "map" + VariableMetadata_Type_String = "string" +) + +// UnmarshalVariableMetadata unmarshals an instance of VariableMetadata from the specified map of raw messages. +func UnmarshalVariableMetadata(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VariableMetadata) + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "aliases", &obj.Aliases) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "description", &obj.Description) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "default_value", &obj.DefaultValue) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "secure", &obj.Secure) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "immutable", &obj.Immutable) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "hidden", &obj.Hidden) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "options", &obj.Options) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "min_value", &obj.MinValue) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "max_value", &obj.MaxValue) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "min_length", &obj.MinLength) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "max_length", &obj.MaxLength) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "matches", &obj.Matches) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "position", &obj.Position) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "group_by", &obj.GroupBy) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "source", &obj.Source) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VersionResponse : VersionResponse -. +type VersionResponse struct { + // Build data. + Builddate *string `json:"builddate,omitempty"` + + // Build number. + Buildno *string `json:"buildno,omitempty"` + + // Commit SHA. + Commitsha *string `json:"commitsha,omitempty"` + + // Version number of 'Helm provider for Terraform'. + HelmProviderVersion *string `json:"helm_provider_version,omitempty"` + + // Helm Version. + HelmVersion *string `json:"helm_version,omitempty"` + + // Supported template types. + SupportedTemplateTypes interface{} `json:"supported_template_types,omitempty"` + + // Terraform provider versions. + TerraformProviderVersion *string `json:"terraform_provider_version,omitempty"` + + // Terraform versions. + TerraformVersion *string `json:"terraform_version,omitempty"` +} + +// UnmarshalVersionResponse unmarshals an instance of VersionResponse from the specified map of raw messages. +func UnmarshalVersionResponse(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VersionResponse) + err = core.UnmarshalPrimitive(m, "builddate", &obj.Builddate) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "buildno", &obj.Buildno) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "commitsha", &obj.Commitsha) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "helm_provider_version", &obj.HelmProviderVersion) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "helm_version", &obj.HelmVersion) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "supported_template_types", &obj.SupportedTemplateTypes) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "terraform_provider_version", &obj.TerraformProviderVersion) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "terraform_version", &obj.TerraformVersion) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// WorkspaceActivities : WorkspaceActivities -. +type WorkspaceActivities struct { + // List of workspace activities. + Actions []WorkspaceActivity `json:"actions,omitempty"` + + // Workspace id. + WorkspaceID *string `json:"workspace_id,omitempty"` + + // Workspace name. + WorkspaceName *string `json:"workspace_name,omitempty"` +} + +// UnmarshalWorkspaceActivities unmarshals an instance of WorkspaceActivities from the specified map of raw messages. +func UnmarshalWorkspaceActivities(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(WorkspaceActivities) + err = core.UnmarshalModel(m, "actions", &obj.Actions, UnmarshalWorkspaceActivity) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "workspace_id", &obj.WorkspaceID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "workspace_name", &obj.WorkspaceName) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// WorkspaceActivity : WorkspaceActivity -. +type WorkspaceActivity struct { + // Activity id. + ActionID *string `json:"action_id,omitempty"` + + // StatusMessages -. + Message []string `json:"message,omitempty"` + + // WorkspaceActivityAction activity action type. + Name *string `json:"name,omitempty"` + + // Activity performed at. + PerformedAt *strfmt.DateTime `json:"performed_at,omitempty"` + + // Activity performed by. + PerformedBy *string `json:"performed_by,omitempty"` + + // WorkspaceActivityStatus activity status type. + Status *string `json:"status,omitempty"` + + // List of template activities. + Templates []WorkspaceActivityTemplate `json:"templates,omitempty"` +} + +// UnmarshalWorkspaceActivity unmarshals an instance of WorkspaceActivity from the specified map of raw messages. +func UnmarshalWorkspaceActivity(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(WorkspaceActivity) + err = core.UnmarshalPrimitive(m, "action_id", &obj.ActionID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "message", &obj.Message) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "performed_at", &obj.PerformedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "performed_by", &obj.PerformedBy) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "status", &obj.Status) + if err != nil { + return + } + err = core.UnmarshalModel(m, "templates", &obj.Templates, UnmarshalWorkspaceActivityTemplate) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// WorkspaceActivityApplyResult : WorkspaceActivityApplyResult -. +type WorkspaceActivityApplyResult struct { + // Activity id. + Activityid *string `json:"activityid,omitempty"` +} + +// UnmarshalWorkspaceActivityApplyResult unmarshals an instance of WorkspaceActivityApplyResult from the specified map of raw messages. +func UnmarshalWorkspaceActivityApplyResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(WorkspaceActivityApplyResult) + err = core.UnmarshalPrimitive(m, "activityid", &obj.Activityid) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// WorkspaceActivityCommandResult : WorkspaceActivityCommandResult -. +type WorkspaceActivityCommandResult struct { + // Activity id. + Activityid *string `json:"activityid,omitempty"` +} + +// UnmarshalWorkspaceActivityCommandResult unmarshals an instance of WorkspaceActivityCommandResult from the specified map of raw messages. +func UnmarshalWorkspaceActivityCommandResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(WorkspaceActivityCommandResult) + err = core.UnmarshalPrimitive(m, "activityid", &obj.Activityid) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// WorkspaceActivityDestroyResult : WorkspaceActivityDestroyResult -. +type WorkspaceActivityDestroyResult struct { + // Activity id. + Activityid *string `json:"activityid,omitempty"` +} + +// UnmarshalWorkspaceActivityDestroyResult unmarshals an instance of WorkspaceActivityDestroyResult from the specified map of raw messages. +func UnmarshalWorkspaceActivityDestroyResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(WorkspaceActivityDestroyResult) + err = core.UnmarshalPrimitive(m, "activityid", &obj.Activityid) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// WorkspaceActivityLogs : WorkspaceActivityLogs -. +type WorkspaceActivityLogs struct { + // Activity id. + ActionID *string `json:"action_id,omitempty"` + + // WorkspaceActivityAction activity action type. + Name *string `json:"name,omitempty"` + + // List of activity logs. + Templates []WorkspaceActivityTemplateLogs `json:"templates,omitempty"` +} + +// UnmarshalWorkspaceActivityLogs unmarshals an instance of WorkspaceActivityLogs from the specified map of raw messages. +func UnmarshalWorkspaceActivityLogs(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(WorkspaceActivityLogs) + err = core.UnmarshalPrimitive(m, "action_id", &obj.ActionID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalModel(m, "templates", &obj.Templates, UnmarshalWorkspaceActivityTemplateLogs) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// WorkspaceActivityOptionsTemplate : Action Options Template ... +type WorkspaceActivityOptionsTemplate struct { + // Action targets. + Target []string `json:"target,omitempty"` + + // Action tfvars. + TfVars []string `json:"tf_vars,omitempty"` +} + +// UnmarshalWorkspaceActivityOptionsTemplate unmarshals an instance of WorkspaceActivityOptionsTemplate from the specified map of raw messages. +func UnmarshalWorkspaceActivityOptionsTemplate(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(WorkspaceActivityOptionsTemplate) + err = core.UnmarshalPrimitive(m, "target", &obj.Target) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "tf_vars", &obj.TfVars) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// WorkspaceActivityPlanResult : WorkspaceActivityPlanResult -. +type WorkspaceActivityPlanResult struct { + // Activity id. + Activityid *string `json:"activityid,omitempty"` +} + +// UnmarshalWorkspaceActivityPlanResult unmarshals an instance of WorkspaceActivityPlanResult from the specified map of raw messages. +func UnmarshalWorkspaceActivityPlanResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(WorkspaceActivityPlanResult) + err = core.UnmarshalPrimitive(m, "activityid", &obj.Activityid) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// WorkspaceActivityRefreshResult : WorkspaceActivityRefreshResult -. +type WorkspaceActivityRefreshResult struct { + // Activity id. + Activityid *string `json:"activityid,omitempty"` +} + +// UnmarshalWorkspaceActivityRefreshResult unmarshals an instance of WorkspaceActivityRefreshResult from the specified map of raw messages. +func UnmarshalWorkspaceActivityRefreshResult(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(WorkspaceActivityRefreshResult) + err = core.UnmarshalPrimitive(m, "activityid", &obj.Activityid) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// WorkspaceActivityTemplate : WorkspaceActivityTemplate -. +type WorkspaceActivityTemplate struct { + // End time for the activity. + EndTime *strfmt.DateTime `json:"end_time,omitempty"` + + // LogSummary ... + LogSummary *LogSummary `json:"log_summary,omitempty"` + + // Log URL. + LogURL *string `json:"log_url,omitempty"` + + // Message. + Message *string `json:"message,omitempty"` + + // Activity start time. + StartTime *strfmt.DateTime `json:"start_time,omitempty"` + + // WorkspaceActivityStatus activity status type. + Status *string `json:"status,omitempty"` + + // Template id. + TemplateID *string `json:"template_id,omitempty"` + + // Template type. + TemplateType *string `json:"template_type,omitempty"` +} + +// UnmarshalWorkspaceActivityTemplate unmarshals an instance of WorkspaceActivityTemplate from the specified map of raw messages. +func UnmarshalWorkspaceActivityTemplate(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(WorkspaceActivityTemplate) + err = core.UnmarshalPrimitive(m, "end_time", &obj.EndTime) + if err != nil { + return + } + err = core.UnmarshalModel(m, "log_summary", &obj.LogSummary, UnmarshalLogSummary) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "log_url", &obj.LogURL) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "message", &obj.Message) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "start_time", &obj.StartTime) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "status", &obj.Status) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "template_id", &obj.TemplateID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "template_type", &obj.TemplateType) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// WorkspaceActivityTemplateLogs : WorkspaceActivityTemplateLogs -. +type WorkspaceActivityTemplateLogs struct { + // Log URL. + LogURL *string `json:"log_url,omitempty"` + + // Template id. + TemplateID *string `json:"template_id,omitempty"` + + // Template type. + TemplateType *string `json:"template_type,omitempty"` +} + +// UnmarshalWorkspaceActivityTemplateLogs unmarshals an instance of WorkspaceActivityTemplateLogs from the specified map of raw messages. +func UnmarshalWorkspaceActivityTemplateLogs(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(WorkspaceActivityTemplateLogs) + err = core.UnmarshalPrimitive(m, "log_url", &obj.LogURL) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "template_id", &obj.TemplateID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "template_type", &obj.TemplateType) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// WorkspaceBulkDeleteResponse : WorkspaceBulkDeleteResponse -. +type WorkspaceBulkDeleteResponse struct { + // Workspace deletion job name. + Job *string `json:"job,omitempty"` + + // Workspace deletion job id. + JobID *string `json:"job_id,omitempty"` +} + +// UnmarshalWorkspaceBulkDeleteResponse unmarshals an instance of WorkspaceBulkDeleteResponse from the specified map of raw messages. +func UnmarshalWorkspaceBulkDeleteResponse(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(WorkspaceBulkDeleteResponse) + err = core.UnmarshalPrimitive(m, "job", &obj.Job) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "job_id", &obj.JobID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// WorkspaceJobResponse : WorkspaceJobResponse -. +type WorkspaceJobResponse struct { + // JobStatusType -. + JobStatus *JobStatusType `json:"job_status,omitempty"` +} + +// UnmarshalWorkspaceJobResponse unmarshals an instance of WorkspaceJobResponse from the specified map of raw messages. +func UnmarshalWorkspaceJobResponse(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(WorkspaceJobResponse) + err = core.UnmarshalModel(m, "job_status", &obj.JobStatus, UnmarshalJobStatusType) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// WorkspaceResponse : WorkspaceResponse - request returned by create. +type WorkspaceResponse struct { + // List of applied shared dataset id. + AppliedShareddataIds []string `json:"applied_shareddata_ids,omitempty"` + + // CatalogRef -. + CatalogRef *CatalogRef `json:"catalog_ref,omitempty"` + + // Workspace created at. + CreatedAt *strfmt.DateTime `json:"created_at,omitempty"` + + // Workspace created by. + CreatedBy *string `json:"created_by,omitempty"` + + // Workspace CRN. + Crn *string `json:"crn,omitempty"` + + // Workspace description. + Description *string `json:"description,omitempty"` + + // Workspace id. + ID *string `json:"id,omitempty"` + + // Last health checked at. + LastHealthCheckAt *strfmt.DateTime `json:"last_health_check_at,omitempty"` + + // Workspace location. + Location *string `json:"location,omitempty"` + + // Workspace name. + Name *string `json:"name,omitempty"` + + // Workspace resource group. + ResourceGroup *string `json:"resource_group,omitempty"` + + // Workspace runtime data. + RuntimeData []TemplateRunTimeDataResponse `json:"runtime_data,omitempty"` + + // SharedTargetDataResponse -. + SharedData *SharedTargetDataResponse `json:"shared_data,omitempty"` + + // Workspace status type. + Status *string `json:"status,omitempty"` + + // Workspace tags. + Tags []string `json:"tags,omitempty"` + + // Workspace template data. + TemplateData []TemplateSourceDataResponse `json:"template_data,omitempty"` + + // Workspace template ref. + TemplateRef *string `json:"template_ref,omitempty"` + + // TemplateRepoResponse -. + TemplateRepo *TemplateRepoResponse `json:"template_repo,omitempty"` + + // List of Workspace type. + Type []string `json:"type,omitempty"` + + // Workspace updated at. + UpdatedAt *strfmt.DateTime `json:"updated_at,omitempty"` + + // Workspace updated by. + UpdatedBy *string `json:"updated_by,omitempty"` + + // WorkspaceStatusResponse -. + WorkspaceStatus *WorkspaceStatusResponse `json:"workspace_status,omitempty"` + + // WorkspaceStatusMessage -. + WorkspaceStatusMsg *WorkspaceStatusMessage `json:"workspace_status_msg,omitempty"` +} + +// UnmarshalWorkspaceResponse unmarshals an instance of WorkspaceResponse from the specified map of raw messages. +func UnmarshalWorkspaceResponse(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(WorkspaceResponse) + err = core.UnmarshalPrimitive(m, "applied_shareddata_ids", &obj.AppliedShareddataIds) + if err != nil { + return + } + err = core.UnmarshalModel(m, "catalog_ref", &obj.CatalogRef, UnmarshalCatalogRef) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_by", &obj.CreatedBy) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.Crn) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "description", &obj.Description) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "last_health_check_at", &obj.LastHealthCheckAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "location", &obj.Location) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "resource_group", &obj.ResourceGroup) + if err != nil { + return + } + err = core.UnmarshalModel(m, "runtime_data", &obj.RuntimeData, UnmarshalTemplateRunTimeDataResponse) + if err != nil { + return + } + err = core.UnmarshalModel(m, "shared_data", &obj.SharedData, UnmarshalSharedTargetDataResponse) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "status", &obj.Status) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "tags", &obj.Tags) + if err != nil { + return + } + err = core.UnmarshalModel(m, "template_data", &obj.TemplateData, UnmarshalTemplateSourceDataResponse) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "template_ref", &obj.TemplateRef) + if err != nil { + return + } + err = core.UnmarshalModel(m, "template_repo", &obj.TemplateRepo, UnmarshalTemplateRepoResponse) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "updated_at", &obj.UpdatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "updated_by", &obj.UpdatedBy) + if err != nil { + return + } + err = core.UnmarshalModel(m, "workspace_status", &obj.WorkspaceStatus, UnmarshalWorkspaceStatusResponse) + if err != nil { + return + } + err = core.UnmarshalModel(m, "workspace_status_msg", &obj.WorkspaceStatusMsg, UnmarshalWorkspaceStatusMessage) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// WorkspaceResponseList : WorkspaceResponseList -. +type WorkspaceResponseList struct { + // Total number of workspaces. + Count *int64 `json:"count,omitempty"` + + // Limit for the list. + Limit *int64 `json:"limit" validate:"required"` + + // Offset for the list. + Offset *int64 `json:"offset" validate:"required"` + + // List of Workspaces. + Workspaces []WorkspaceResponse `json:"workspaces,omitempty"` +} + +// UnmarshalWorkspaceResponseList unmarshals an instance of WorkspaceResponseList from the specified map of raw messages. +func UnmarshalWorkspaceResponseList(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(WorkspaceResponseList) + err = core.UnmarshalPrimitive(m, "count", &obj.Count) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "limit", &obj.Limit) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "offset", &obj.Offset) + if err != nil { + return + } + err = core.UnmarshalModel(m, "workspaces", &obj.Workspaces, UnmarshalWorkspaceResponse) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// WorkspaceStatusMessage : WorkspaceStatusMessage -. +type WorkspaceStatusMessage struct { + // Status code. + StatusCode *string `json:"status_code,omitempty"` + + // Status message. + StatusMsg *string `json:"status_msg,omitempty"` +} + +// UnmarshalWorkspaceStatusMessage unmarshals an instance of WorkspaceStatusMessage from the specified map of raw messages. +func UnmarshalWorkspaceStatusMessage(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(WorkspaceStatusMessage) + err = core.UnmarshalPrimitive(m, "status_code", &obj.StatusCode) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "status_msg", &obj.StatusMsg) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// WorkspaceStatusRequest : WorkspaceStatusRequest -. +type WorkspaceStatusRequest struct { + // Frozen status. + Frozen *bool `json:"frozen,omitempty"` + + // Frozen at. + FrozenAt *strfmt.DateTime `json:"frozen_at,omitempty"` + + // Frozen by. + FrozenBy *string `json:"frozen_by,omitempty"` + + // Locked status. + Locked *bool `json:"locked,omitempty"` + + // Locked by. + LockedBy *string `json:"locked_by,omitempty"` + + // Locked at. + LockedTime *strfmt.DateTime `json:"locked_time,omitempty"` +} + +// UnmarshalWorkspaceStatusRequest unmarshals an instance of WorkspaceStatusRequest from the specified map of raw messages. +func UnmarshalWorkspaceStatusRequest(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(WorkspaceStatusRequest) + err = core.UnmarshalPrimitive(m, "frozen", &obj.Frozen) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "frozen_at", &obj.FrozenAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "frozen_by", &obj.FrozenBy) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "locked", &obj.Locked) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "locked_by", &obj.LockedBy) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "locked_time", &obj.LockedTime) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// WorkspaceStatusResponse : WorkspaceStatusResponse -. +type WorkspaceStatusResponse struct { + // Frozen status. + Frozen *bool `json:"frozen,omitempty"` + + // Frozen at. + FrozenAt *strfmt.DateTime `json:"frozen_at,omitempty"` + + // Frozen by. + FrozenBy *string `json:"frozen_by,omitempty"` + + // Locked status. + Locked *bool `json:"locked,omitempty"` + + // Locked by. + LockedBy *string `json:"locked_by,omitempty"` + + // Locked at. + LockedTime *strfmt.DateTime `json:"locked_time,omitempty"` +} + +// UnmarshalWorkspaceStatusResponse unmarshals an instance of WorkspaceStatusResponse from the specified map of raw messages. +func UnmarshalWorkspaceStatusResponse(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(WorkspaceStatusResponse) + err = core.UnmarshalPrimitive(m, "frozen", &obj.Frozen) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "frozen_at", &obj.FrozenAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "frozen_by", &obj.FrozenBy) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "locked", &obj.Locked) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "locked_by", &obj.LockedBy) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "locked_time", &obj.LockedTime) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// WorkspaceStatusUpdateRequest : WorkspaceStatusUpdateRequest -. +type WorkspaceStatusUpdateRequest struct { + // Frozen status. + Frozen *bool `json:"frozen,omitempty"` + + // Frozen at. + FrozenAt *strfmt.DateTime `json:"frozen_at,omitempty"` + + // Frozen by. + FrozenBy *string `json:"frozen_by,omitempty"` + + // Locked status. + Locked *bool `json:"locked,omitempty"` + + // Locked by. + LockedBy *string `json:"locked_by,omitempty"` + + // Locked at. + LockedTime *strfmt.DateTime `json:"locked_time,omitempty"` +} + +// UnmarshalWorkspaceStatusUpdateRequest unmarshals an instance of WorkspaceStatusUpdateRequest from the specified map of raw messages. +func UnmarshalWorkspaceStatusUpdateRequest(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(WorkspaceStatusUpdateRequest) + err = core.UnmarshalPrimitive(m, "frozen", &obj.Frozen) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "frozen_at", &obj.FrozenAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "frozen_by", &obj.FrozenBy) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "locked", &obj.Locked) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "locked_by", &obj.LockedBy) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "locked_time", &obj.LockedTime) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// WorkspaceTemplateValuesResponse : WorkspaceTemplateValuesResponse -. +type WorkspaceTemplateValuesResponse struct { + // List of runtime data. + RuntimeData []TemplateRunTimeDataResponse `json:"runtime_data,omitempty"` + + // SharedTargetData -. + SharedData *SharedTargetData `json:"shared_data,omitempty"` + + // List of source data. + TemplateData []TemplateSourceDataResponse `json:"template_data,omitempty"` +} + +// UnmarshalWorkspaceTemplateValuesResponse unmarshals an instance of WorkspaceTemplateValuesResponse from the specified map of raw messages. +func UnmarshalWorkspaceTemplateValuesResponse(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(WorkspaceTemplateValuesResponse) + err = core.UnmarshalModel(m, "runtime_data", &obj.RuntimeData, UnmarshalTemplateRunTimeDataResponse) + if err != nil { + return + } + err = core.UnmarshalModel(m, "shared_data", &obj.SharedData, UnmarshalSharedTargetData) + if err != nil { + return + } + err = core.UnmarshalModel(m, "template_data", &obj.TemplateData, UnmarshalTemplateSourceDataResponse) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// WorkspaceVariableRequest : WorkspaceVariableRequest -. +type WorkspaceVariableRequest struct { + // Variable description. + Description *string `json:"description,omitempty"` + + // Variable name. + Name *string `json:"name,omitempty"` + + // Variable is secure. + Secure *bool `json:"secure,omitempty"` + + // Variable type. + Type *string `json:"type,omitempty"` + + // Variable uses default value; and is not over-ridden. + UseDefault *bool `json:"use_default,omitempty"` + + // Value of the Variable. + Value *string `json:"value,omitempty"` +} + +// UnmarshalWorkspaceVariableRequest unmarshals an instance of WorkspaceVariableRequest from the specified map of raw messages. +func UnmarshalWorkspaceVariableRequest(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(WorkspaceVariableRequest) + err = core.UnmarshalPrimitive(m, "description", &obj.Description) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "secure", &obj.Secure) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "use_default", &obj.UseDefault) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// WorkspaceVariableResponse : WorkspaceVariableResponse -. +type WorkspaceVariableResponse struct { + // Variable descrption. + Description *string `json:"description,omitempty"` + + // Variable name. + Name *string `json:"name,omitempty"` + + // Variable is secure. + Secure *bool `json:"secure,omitempty"` + + // Variable type. + Type *string `json:"type,omitempty"` + + // Value of the Variable. + Value *string `json:"value,omitempty"` +} + +// UnmarshalWorkspaceVariableResponse unmarshals an instance of WorkspaceVariableResponse from the specified map of raw messages. +func UnmarshalWorkspaceVariableResponse(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(WorkspaceVariableResponse) + err = core.UnmarshalPrimitive(m, "description", &obj.Description) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "secure", &obj.Secure) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} diff --git a/vendor/github.com/IBM/secrets-manager-go-sdk/LICENSE b/vendor/github.com/IBM/secrets-manager-go-sdk/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/vendor/github.com/IBM/secrets-manager-go-sdk/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/IBM/secrets-manager-go-sdk/common/headers.go b/vendor/github.com/IBM/secrets-manager-go-sdk/common/headers.go new file mode 100644 index 00000000000..055396e208e --- /dev/null +++ b/vendor/github.com/IBM/secrets-manager-go-sdk/common/headers.go @@ -0,0 +1,82 @@ +/** + * (C) Copyright IBM Corp. 2019, 2020. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package common + +import ( + "fmt" + "runtime" +) + +const ( + sdkName = "secrets-manager-go-sdk" + headerNameUserAgent = "User-Agent" +) + +// +// GetSdkHeaders - returns the set of SDK-specific headers to be included in an outgoing request. +// +// This function is invoked by generated service methods (i.e. methods which implement the REST API operations +// defined within the API definition). The purpose of this function is to give the SDK implementor the opportunity +// to provide SDK-specific HTTP headers that will be sent with an outgoing REST API request. +// This function is invoked for each invocation of a generated service method, +// so the set of HTTP headers could be request-specific. +// As an optimization, if your SDK will be returning the same set of HTTP headers for each invocation of this +// function, it is recommended that you initialize the returned map just once (perhaps by using +// lazy initialization) and simply return it each time the function is invoked, instead of building it each time +// as in the example below. +// +// If you plan to gather metrics for your SDK, the User-Agent header value must +// be a string similar to the following: +// secrets-manager-go-sdk/0.0.1 (lang=go; arch=x86_64; os=Linux; go.version=1.12.9) +// +// In the example above, the analytics tool will parse the user-agent header and +// use the following properties: +// "secrets-manager-go-sdk" - the name of your sdk +// "0.0.1"- the version of your sdk +// "lang=go" - the language of the current sdk +// "arch=x86_64; os=Linux; go.version=1.12.9" - system information +// +// Note: It is very important that the sdk name ends with the string `-sdk`, +// as the analytics data collector uses this to gather usage data. +// +// Parameters: +// serviceName - the name of the service as defined in the API definition (e.g. "MyService1") +// serviceVersion - the version of the service as defined in the API definition (e.g. "V1") +// operationId - the operationId as defined in the API definition (e.g. getContext) +// +// Returns: +// a Map which contains the set of headers to be included in the REST API request +// +func GetSdkHeaders(serviceName string, serviceVersion string, operationId string) map[string]string { + sdkHeaders := make(map[string]string) + + sdkHeaders[headerNameUserAgent] = GetUserAgentInfo() + + return sdkHeaders +} + +var userAgent string = fmt.Sprintf("%s/%s %s", sdkName, Version, GetSystemInfo()) + +func GetUserAgentInfo() string { + return userAgent +} + +var systemInfo = fmt.Sprintf("(lang=go; arch=%s; os=%s; go.version=%s)", runtime.GOARCH, runtime.GOOS, runtime.Version()) + +func GetSystemInfo() string { + return systemInfo +} diff --git a/vendor/github.com/IBM/secrets-manager-go-sdk/common/version.go b/vendor/github.com/IBM/secrets-manager-go-sdk/common/version.go new file mode 100644 index 00000000000..7c584d25058 --- /dev/null +++ b/vendor/github.com/IBM/secrets-manager-go-sdk/common/version.go @@ -0,0 +1,20 @@ +/** + * (C) Copyright IBM Corp. 2019, 2020. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package common + +// Version of the SDK +const Version = "0.1.19" diff --git a/vendor/github.com/IBM/secrets-manager-go-sdk/secretsmanagerv1/secrets_manager_v1.go b/vendor/github.com/IBM/secrets-manager-go-sdk/secretsmanagerv1/secrets_manager_v1.go new file mode 100644 index 00000000000..c69c3cabf04 --- /dev/null +++ b/vendor/github.com/IBM/secrets-manager-go-sdk/secretsmanagerv1/secrets_manager_v1.go @@ -0,0 +1,3918 @@ +/** + * (C) Copyright IBM Corp. 2021. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * IBM OpenAPI SDK Code Generator Version: 3.29.0-cd9ba74f-20210305-183535 + */ + +// Package secretsmanagerv1 : Operations and models for the SecretsManagerV1 service +package secretsmanagerv1 + +import ( + "context" + "encoding/json" + "fmt" + "github.com/IBM/go-sdk-core/v5/core" + common "github.com/IBM/secrets-manager-go-sdk/common" + "github.com/go-openapi/strfmt" + "net/http" + "reflect" + "time" +) + +// SecretsManagerV1 : With IBM Cloud® Secrets Manager, you can create, lease, and centrally manage secrets that are used +// in IBM Cloud services or your custom-built applications. Secrets are stored in a dedicated instance of Secrets +// Manager, built on open source HashiCorp Vault. +// +// Version: 1.0.0 +// See: https://cloud.ibm.com/docs/secrets-manager +type SecretsManagerV1 struct { + Service *core.BaseService +} + +// DefaultServiceURL is the default URL to make service requests to. +const DefaultServiceURL = "https://secrets-manager.cloud.ibm.com" + +// DefaultServiceName is the default key used to find external configuration information. +const DefaultServiceName = "secrets_manager" + +// SecretsManagerV1Options : Service options +type SecretsManagerV1Options struct { + ServiceName string + URL string + Authenticator core.Authenticator +} + +// NewSecretsManagerV1UsingExternalConfig : constructs an instance of SecretsManagerV1 with passed in options and external configuration. +func NewSecretsManagerV1UsingExternalConfig(options *SecretsManagerV1Options) (secretsManager *SecretsManagerV1, err error) { + if options.ServiceName == "" { + options.ServiceName = DefaultServiceName + } + + if options.Authenticator == nil { + options.Authenticator, err = core.GetAuthenticatorFromEnvironment(options.ServiceName) + if err != nil { + return + } + } + + secretsManager, err = NewSecretsManagerV1(options) + if err != nil { + return + } + + err = secretsManager.Service.ConfigureService(options.ServiceName) + if err != nil { + return + } + + if options.URL != "" { + err = secretsManager.Service.SetServiceURL(options.URL) + } + return +} + +// NewSecretsManagerV1 : constructs an instance of SecretsManagerV1 with passed in options. +func NewSecretsManagerV1(options *SecretsManagerV1Options) (service *SecretsManagerV1, err error) { + serviceOptions := &core.ServiceOptions{ + URL: DefaultServiceURL, + Authenticator: options.Authenticator, + } + + baseService, err := core.NewBaseService(serviceOptions) + if err != nil { + return + } + + if options.URL != "" { + err = baseService.SetServiceURL(options.URL) + if err != nil { + return + } + } + + service = &SecretsManagerV1{ + Service: baseService, + } + + return +} + +// GetServiceURLForRegion returns the service URL to be used for the specified region +func GetServiceURLForRegion(region string) (string, error) { + return "", fmt.Errorf("service does not support regional URLs") +} + +// Clone makes a copy of "secretsManager" suitable for processing requests. +func (secretsManager *SecretsManagerV1) Clone() *SecretsManagerV1 { + if core.IsNil(secretsManager) { + return nil + } + clone := *secretsManager + clone.Service = secretsManager.Service.Clone() + return &clone +} + +// SetServiceURL sets the service URL +func (secretsManager *SecretsManagerV1) SetServiceURL(url string) error { + return secretsManager.Service.SetServiceURL(url) +} + +// GetServiceURL returns the service URL +func (secretsManager *SecretsManagerV1) GetServiceURL() string { + return secretsManager.Service.GetServiceURL() +} + +// SetDefaultHeaders sets HTTP headers to be sent in every request +func (secretsManager *SecretsManagerV1) SetDefaultHeaders(headers http.Header) { + secretsManager.Service.SetDefaultHeaders(headers) +} + +// SetEnableGzipCompression sets the service's EnableGzipCompression field +func (secretsManager *SecretsManagerV1) SetEnableGzipCompression(enableGzip bool) { + secretsManager.Service.SetEnableGzipCompression(enableGzip) +} + +// GetEnableGzipCompression returns the service's EnableGzipCompression field +func (secretsManager *SecretsManagerV1) GetEnableGzipCompression() bool { + return secretsManager.Service.GetEnableGzipCompression() +} + +// EnableRetries enables automatic retries for requests invoked for this service instance. +// If either parameter is specified as 0, then a default value is used instead. +func (secretsManager *SecretsManagerV1) EnableRetries(maxRetries int, maxRetryInterval time.Duration) { + secretsManager.Service.EnableRetries(maxRetries, maxRetryInterval) +} + +// DisableRetries disables automatic retries for requests invoked for this service instance. +func (secretsManager *SecretsManagerV1) DisableRetries() { + secretsManager.Service.DisableRetries() +} + +// PutConfig : Configure secrets of a given type +// Updates the configuration for the given secret type. +func (secretsManager *SecretsManagerV1) PutConfig(putConfigOptions *PutConfigOptions) (response *core.DetailedResponse, err error) { + return secretsManager.PutConfigWithContext(context.Background(), putConfigOptions) +} + +// PutConfigWithContext is an alternate form of the PutConfig method which supports a Context parameter +func (secretsManager *SecretsManagerV1) PutConfigWithContext(ctx context.Context, putConfigOptions *PutConfigOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(putConfigOptions, "putConfigOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(putConfigOptions, "putConfigOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "secret_type": *putConfigOptions.SecretType, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = secretsManager.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(secretsManager.Service.Options.URL, `/api/v1/config/{secret_type}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range putConfigOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("secrets_manager", "V1", "PutConfig") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Content-Type", "application/json") + + _, err = builder.SetBodyContentJSON(putConfigOptions.EngineConfigOneOf) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = secretsManager.Service.Request(request, nil) + + return +} + +// GetConfig : Get the configuration for a secret type +// Retrieves the configuration that is associated with the given secret type. +func (secretsManager *SecretsManagerV1) GetConfig(getConfigOptions *GetConfigOptions) (result EngineConfigOneOfIntf, response *core.DetailedResponse, err error) { + return secretsManager.GetConfigWithContext(context.Background(), getConfigOptions) +} + +// GetConfigWithContext is an alternate form of the GetConfig method which supports a Context parameter +func (secretsManager *SecretsManagerV1) GetConfigWithContext(ctx context.Context, getConfigOptions *GetConfigOptions) (result EngineConfigOneOfIntf, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getConfigOptions, "getConfigOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getConfigOptions, "getConfigOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "secret_type": *getConfigOptions.SecretType, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = secretsManager.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(secretsManager.Service.Options.URL, `/api/v1/config/{secret_type}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getConfigOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("secrets_manager", "V1", "GetConfig") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = secretsManager.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalEngineConfigOneOf) + if err != nil { + return + } + response.Result = result + + return +} + +// PutPolicy : Set secret policies +// Creates or updates one or more policies, such as an [automatic rotation +// policy](http://cloud.ibm.com/docs/secrets-manager?topic=secrets-manager-rotate-secrets#auto-rotate-secret), for the +// specified secret. +func (secretsManager *SecretsManagerV1) PutPolicy(putPolicyOptions *PutPolicyOptions) (result GetSecretPoliciesOneOfIntf, response *core.DetailedResponse, err error) { + return secretsManager.PutPolicyWithContext(context.Background(), putPolicyOptions) +} + +// PutPolicyWithContext is an alternate form of the PutPolicy method which supports a Context parameter +func (secretsManager *SecretsManagerV1) PutPolicyWithContext(ctx context.Context, putPolicyOptions *PutPolicyOptions) (result GetSecretPoliciesOneOfIntf, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(putPolicyOptions, "putPolicyOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(putPolicyOptions, "putPolicyOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "secret_type": *putPolicyOptions.SecretType, + "id": *putPolicyOptions.ID, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = secretsManager.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(secretsManager.Service.Options.URL, `/api/v1/secrets/{secret_type}/{id}/policies`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range putPolicyOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("secrets_manager", "V1", "PutPolicy") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + if putPolicyOptions.Policy != nil { + builder.AddQuery("policy", fmt.Sprint(*putPolicyOptions.Policy)) + } + + body := make(map[string]interface{}) + if putPolicyOptions.Metadata != nil { + body["metadata"] = putPolicyOptions.Metadata + } + if putPolicyOptions.Resources != nil { + body["resources"] = putPolicyOptions.Resources + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = secretsManager.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalGetSecretPoliciesOneOf) + if err != nil { + return + } + response.Result = result + + return +} + +// GetPolicy : List secret policies +// Retrieves a list of policies that are associated with a specified secret. +func (secretsManager *SecretsManagerV1) GetPolicy(getPolicyOptions *GetPolicyOptions) (result GetSecretPoliciesOneOfIntf, response *core.DetailedResponse, err error) { + return secretsManager.GetPolicyWithContext(context.Background(), getPolicyOptions) +} + +// GetPolicyWithContext is an alternate form of the GetPolicy method which supports a Context parameter +func (secretsManager *SecretsManagerV1) GetPolicyWithContext(ctx context.Context, getPolicyOptions *GetPolicyOptions) (result GetSecretPoliciesOneOfIntf, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getPolicyOptions, "getPolicyOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getPolicyOptions, "getPolicyOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "secret_type": *getPolicyOptions.SecretType, + "id": *getPolicyOptions.ID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = secretsManager.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(secretsManager.Service.Options.URL, `/api/v1/secrets/{secret_type}/{id}/policies`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getPolicyOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("secrets_manager", "V1", "GetPolicy") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + if getPolicyOptions.Policy != nil { + builder.AddQuery("policy", fmt.Sprint(*getPolicyOptions.Policy)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = secretsManager.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalGetSecretPoliciesOneOf) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateSecretGroup : Create a secret group +// Creates a secret group that you can use to organize secrets and control who on your team has access to them. +// +// A successful request returns the ID value of the secret group, along with other metadata. To learn more about secret +// groups, check out the [docs](https://cloud.ibm.com/docs/secrets-manager?topic=secrets-manager-secret-groups). +func (secretsManager *SecretsManagerV1) CreateSecretGroup(createSecretGroupOptions *CreateSecretGroupOptions) (result *SecretGroupDef, response *core.DetailedResponse, err error) { + return secretsManager.CreateSecretGroupWithContext(context.Background(), createSecretGroupOptions) +} + +// CreateSecretGroupWithContext is an alternate form of the CreateSecretGroup method which supports a Context parameter +func (secretsManager *SecretsManagerV1) CreateSecretGroupWithContext(ctx context.Context, createSecretGroupOptions *CreateSecretGroupOptions) (result *SecretGroupDef, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createSecretGroupOptions, "createSecretGroupOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createSecretGroupOptions, "createSecretGroupOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = secretsManager.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(secretsManager.Service.Options.URL, `/api/v1/secret_groups`, nil) + if err != nil { + return + } + + for headerName, headerValue := range createSecretGroupOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("secrets_manager", "V1", "CreateSecretGroup") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if createSecretGroupOptions.Metadata != nil { + body["metadata"] = createSecretGroupOptions.Metadata + } + if createSecretGroupOptions.Resources != nil { + body["resources"] = createSecretGroupOptions.Resources + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = secretsManager.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSecretGroupDef) + if err != nil { + return + } + response.Result = result + + return +} + +// ListSecretGroups : List secret groups +// Retrieves the list of secret groups that are available in your Secrets Manager instance. +func (secretsManager *SecretsManagerV1) ListSecretGroups(listSecretGroupsOptions *ListSecretGroupsOptions) (result *SecretGroupDef, response *core.DetailedResponse, err error) { + return secretsManager.ListSecretGroupsWithContext(context.Background(), listSecretGroupsOptions) +} + +// ListSecretGroupsWithContext is an alternate form of the ListSecretGroups method which supports a Context parameter +func (secretsManager *SecretsManagerV1) ListSecretGroupsWithContext(ctx context.Context, listSecretGroupsOptions *ListSecretGroupsOptions) (result *SecretGroupDef, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listSecretGroupsOptions, "listSecretGroupsOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = secretsManager.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(secretsManager.Service.Options.URL, `/api/v1/secret_groups`, nil) + if err != nil { + return + } + + for headerName, headerValue := range listSecretGroupsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("secrets_manager", "V1", "ListSecretGroups") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = secretsManager.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSecretGroupDef) + if err != nil { + return + } + response.Result = result + + return +} + +// GetSecretGroup : Get a secret group +// Retrieves the metadata of an existing secret group by specifying the ID of the group. +func (secretsManager *SecretsManagerV1) GetSecretGroup(getSecretGroupOptions *GetSecretGroupOptions) (result *SecretGroupDef, response *core.DetailedResponse, err error) { + return secretsManager.GetSecretGroupWithContext(context.Background(), getSecretGroupOptions) +} + +// GetSecretGroupWithContext is an alternate form of the GetSecretGroup method which supports a Context parameter +func (secretsManager *SecretsManagerV1) GetSecretGroupWithContext(ctx context.Context, getSecretGroupOptions *GetSecretGroupOptions) (result *SecretGroupDef, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getSecretGroupOptions, "getSecretGroupOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getSecretGroupOptions, "getSecretGroupOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *getSecretGroupOptions.ID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = secretsManager.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(secretsManager.Service.Options.URL, `/api/v1/secret_groups/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getSecretGroupOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("secrets_manager", "V1", "GetSecretGroup") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = secretsManager.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSecretGroupDef) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateSecretGroupMetadata : Update a secret group +// Updates the metadata of an existing secret group, such as its name or description. +func (secretsManager *SecretsManagerV1) UpdateSecretGroupMetadata(updateSecretGroupMetadataOptions *UpdateSecretGroupMetadataOptions) (result *SecretGroupDef, response *core.DetailedResponse, err error) { + return secretsManager.UpdateSecretGroupMetadataWithContext(context.Background(), updateSecretGroupMetadataOptions) +} + +// UpdateSecretGroupMetadataWithContext is an alternate form of the UpdateSecretGroupMetadata method which supports a Context parameter +func (secretsManager *SecretsManagerV1) UpdateSecretGroupMetadataWithContext(ctx context.Context, updateSecretGroupMetadataOptions *UpdateSecretGroupMetadataOptions) (result *SecretGroupDef, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateSecretGroupMetadataOptions, "updateSecretGroupMetadataOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateSecretGroupMetadataOptions, "updateSecretGroupMetadataOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *updateSecretGroupMetadataOptions.ID, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = secretsManager.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(secretsManager.Service.Options.URL, `/api/v1/secret_groups/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateSecretGroupMetadataOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("secrets_manager", "V1", "UpdateSecretGroupMetadata") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if updateSecretGroupMetadataOptions.Metadata != nil { + body["metadata"] = updateSecretGroupMetadataOptions.Metadata + } + if updateSecretGroupMetadataOptions.Resources != nil { + body["resources"] = updateSecretGroupMetadataOptions.Resources + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = secretsManager.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSecretGroupDef) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteSecretGroup : Delete a secret group +// Deletes a secret group by specifying the ID of the secret group. +// +// **Note:** To delete a secret group, it must be empty. If you need to remove a secret group that contains secrets, you +// must first [delete the secrets](#delete-secret) that are associated with the group. +func (secretsManager *SecretsManagerV1) DeleteSecretGroup(deleteSecretGroupOptions *DeleteSecretGroupOptions) (response *core.DetailedResponse, err error) { + return secretsManager.DeleteSecretGroupWithContext(context.Background(), deleteSecretGroupOptions) +} + +// DeleteSecretGroupWithContext is an alternate form of the DeleteSecretGroup method which supports a Context parameter +func (secretsManager *SecretsManagerV1) DeleteSecretGroupWithContext(ctx context.Context, deleteSecretGroupOptions *DeleteSecretGroupOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteSecretGroupOptions, "deleteSecretGroupOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteSecretGroupOptions, "deleteSecretGroupOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *deleteSecretGroupOptions.ID, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = secretsManager.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(secretsManager.Service.Options.URL, `/api/v1/secret_groups/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteSecretGroupOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("secrets_manager", "V1", "DeleteSecretGroup") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = secretsManager.Service.Request(request, nil) + + return +} + +// CreateSecret : Create a secret +// Creates a secret that you can use to access or authenticate to a protected resource. +// +// A successful request stores the secret in your dedicated instance based on the secret type and data that you specify. +// The response returns the ID value of the secret, along with other metadata. +// +// To learn more about the types of secrets that you can create with Secrets Manager, check out the +// [docs](https://cloud.ibm.com/docs/secrets-manager?topic=secrets-manager-secret-basics). +func (secretsManager *SecretsManagerV1) CreateSecret(createSecretOptions *CreateSecretOptions) (result *CreateSecret, response *core.DetailedResponse, err error) { + return secretsManager.CreateSecretWithContext(context.Background(), createSecretOptions) +} + +// CreateSecretWithContext is an alternate form of the CreateSecret method which supports a Context parameter +func (secretsManager *SecretsManagerV1) CreateSecretWithContext(ctx context.Context, createSecretOptions *CreateSecretOptions) (result *CreateSecret, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createSecretOptions, "createSecretOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createSecretOptions, "createSecretOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "secret_type": *createSecretOptions.SecretType, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = secretsManager.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(secretsManager.Service.Options.URL, `/api/v1/secrets/{secret_type}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range createSecretOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("secrets_manager", "V1", "CreateSecret") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if createSecretOptions.Metadata != nil { + body["metadata"] = createSecretOptions.Metadata + } + if createSecretOptions.Resources != nil { + body["resources"] = createSecretOptions.Resources + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = secretsManager.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalCreateSecret) + if err != nil { + return + } + response.Result = result + + return +} + +// ListSecrets : List secrets by type +// Retrieves a list of secrets based on the type that you specify. +func (secretsManager *SecretsManagerV1) ListSecrets(listSecretsOptions *ListSecretsOptions) (result *ListSecrets, response *core.DetailedResponse, err error) { + return secretsManager.ListSecretsWithContext(context.Background(), listSecretsOptions) +} + +// ListSecretsWithContext is an alternate form of the ListSecrets method which supports a Context parameter +func (secretsManager *SecretsManagerV1) ListSecretsWithContext(ctx context.Context, listSecretsOptions *ListSecretsOptions) (result *ListSecrets, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(listSecretsOptions, "listSecretsOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(listSecretsOptions, "listSecretsOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "secret_type": *listSecretsOptions.SecretType, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = secretsManager.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(secretsManager.Service.Options.URL, `/api/v1/secrets/{secret_type}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range listSecretsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("secrets_manager", "V1", "ListSecrets") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + if listSecretsOptions.Limit != nil { + builder.AddQuery("limit", fmt.Sprint(*listSecretsOptions.Limit)) + } + if listSecretsOptions.Offset != nil { + builder.AddQuery("offset", fmt.Sprint(*listSecretsOptions.Offset)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = secretsManager.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalListSecrets) + if err != nil { + return + } + response.Result = result + + return +} + +// ListAllSecrets : List all secrets +// Retrieves a list of all secrets in your Secrets Manager instance. +func (secretsManager *SecretsManagerV1) ListAllSecrets(listAllSecretsOptions *ListAllSecretsOptions) (result *ListSecrets, response *core.DetailedResponse, err error) { + return secretsManager.ListAllSecretsWithContext(context.Background(), listAllSecretsOptions) +} + +// ListAllSecretsWithContext is an alternate form of the ListAllSecrets method which supports a Context parameter +func (secretsManager *SecretsManagerV1) ListAllSecretsWithContext(ctx context.Context, listAllSecretsOptions *ListAllSecretsOptions) (result *ListSecrets, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listAllSecretsOptions, "listAllSecretsOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = secretsManager.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(secretsManager.Service.Options.URL, `/api/v1/secrets`, nil) + if err != nil { + return + } + + for headerName, headerValue := range listAllSecretsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("secrets_manager", "V1", "ListAllSecrets") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + if listAllSecretsOptions.Limit != nil { + builder.AddQuery("limit", fmt.Sprint(*listAllSecretsOptions.Limit)) + } + if listAllSecretsOptions.Offset != nil { + builder.AddQuery("offset", fmt.Sprint(*listAllSecretsOptions.Offset)) + } + if listAllSecretsOptions.Search != nil { + builder.AddQuery("search", fmt.Sprint(*listAllSecretsOptions.Search)) + } + if listAllSecretsOptions.SortBy != nil { + builder.AddQuery("sort_by", fmt.Sprint(*listAllSecretsOptions.SortBy)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = secretsManager.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalListSecrets) + if err != nil { + return + } + response.Result = result + + return +} + +// GetSecret : Get a secret +// Retrieves a secret and its details by specifying the ID of the secret. +// +// A successful request returns the secret data that is associated with your secret, along with other metadata. To view +// only the details of a specified secret without retrieving its value, use the [Get secret +// metadata](#get-secret-metadata) method. +func (secretsManager *SecretsManagerV1) GetSecret(getSecretOptions *GetSecretOptions) (result *GetSecret, response *core.DetailedResponse, err error) { + return secretsManager.GetSecretWithContext(context.Background(), getSecretOptions) +} + +// GetSecretWithContext is an alternate form of the GetSecret method which supports a Context parameter +func (secretsManager *SecretsManagerV1) GetSecretWithContext(ctx context.Context, getSecretOptions *GetSecretOptions) (result *GetSecret, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getSecretOptions, "getSecretOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getSecretOptions, "getSecretOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "secret_type": *getSecretOptions.SecretType, + "id": *getSecretOptions.ID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = secretsManager.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(secretsManager.Service.Options.URL, `/api/v1/secrets/{secret_type}/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getSecretOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("secrets_manager", "V1", "GetSecret") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = secretsManager.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalGetSecret) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateSecret : Invoke an action on a secret +// Invokes an action on a specified secret. This method supports the following actions: +// +// - `rotate`: Replace the value of an `arbitrary` or `username_password` secret. +// - `delete_credentials`: Delete the API key that is associated with an `iam_credentials` secret. +func (secretsManager *SecretsManagerV1) UpdateSecret(updateSecretOptions *UpdateSecretOptions) (result *GetSecret, response *core.DetailedResponse, err error) { + return secretsManager.UpdateSecretWithContext(context.Background(), updateSecretOptions) +} + +// UpdateSecretWithContext is an alternate form of the UpdateSecret method which supports a Context parameter +func (secretsManager *SecretsManagerV1) UpdateSecretWithContext(ctx context.Context, updateSecretOptions *UpdateSecretOptions) (result *GetSecret, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateSecretOptions, "updateSecretOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateSecretOptions, "updateSecretOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "secret_type": *updateSecretOptions.SecretType, + "id": *updateSecretOptions.ID, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = secretsManager.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(secretsManager.Service.Options.URL, `/api/v1/secrets/{secret_type}/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateSecretOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("secrets_manager", "V1", "UpdateSecret") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + builder.AddQuery("action", fmt.Sprint(*updateSecretOptions.Action)) + + _, err = builder.SetBodyContentJSON(updateSecretOptions.SecretActionOneOf) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = secretsManager.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalGetSecret) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteSecret : Delete a secret +// Deletes a secret by specifying the ID of the secret. +func (secretsManager *SecretsManagerV1) DeleteSecret(deleteSecretOptions *DeleteSecretOptions) (response *core.DetailedResponse, err error) { + return secretsManager.DeleteSecretWithContext(context.Background(), deleteSecretOptions) +} + +// DeleteSecretWithContext is an alternate form of the DeleteSecret method which supports a Context parameter +func (secretsManager *SecretsManagerV1) DeleteSecretWithContext(ctx context.Context, deleteSecretOptions *DeleteSecretOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteSecretOptions, "deleteSecretOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteSecretOptions, "deleteSecretOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "secret_type": *deleteSecretOptions.SecretType, + "id": *deleteSecretOptions.ID, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = secretsManager.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(secretsManager.Service.Options.URL, `/api/v1/secrets/{secret_type}/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteSecretOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("secrets_manager", "V1", "DeleteSecret") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + request, err := builder.Build() + if err != nil { + return + } + + response, err = secretsManager.Service.Request(request, nil) + + return +} + +// GetSecretMetadata : Get secret metadata +// Retrieves the details of a secret by specifying the ID. +// +// A successful request returns only metadata about the secret, such as its name and creation date. To retrieve the +// value of a secret, use the [Get a secret](#get-secret) method. +func (secretsManager *SecretsManagerV1) GetSecretMetadata(getSecretMetadataOptions *GetSecretMetadataOptions) (result *SecretMetadataRequest, response *core.DetailedResponse, err error) { + return secretsManager.GetSecretMetadataWithContext(context.Background(), getSecretMetadataOptions) +} + +// GetSecretMetadataWithContext is an alternate form of the GetSecretMetadata method which supports a Context parameter +func (secretsManager *SecretsManagerV1) GetSecretMetadataWithContext(ctx context.Context, getSecretMetadataOptions *GetSecretMetadataOptions) (result *SecretMetadataRequest, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getSecretMetadataOptions, "getSecretMetadataOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getSecretMetadataOptions, "getSecretMetadataOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "secret_type": *getSecretMetadataOptions.SecretType, + "id": *getSecretMetadataOptions.ID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = secretsManager.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(secretsManager.Service.Options.URL, `/api/v1/secrets/{secret_type}/{id}/metadata`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getSecretMetadataOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("secrets_manager", "V1", "GetSecretMetadata") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = secretsManager.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSecretMetadataRequest) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateSecretMetadata : Update secret metadata +// Updates the metadata of a secret, such as its name or description. +// +// To update the actual contents of a secret, rotate the secret by using the [Invoke an action on a +// secret](#update-secret) method. +func (secretsManager *SecretsManagerV1) UpdateSecretMetadata(updateSecretMetadataOptions *UpdateSecretMetadataOptions) (result *SecretMetadataRequest, response *core.DetailedResponse, err error) { + return secretsManager.UpdateSecretMetadataWithContext(context.Background(), updateSecretMetadataOptions) +} + +// UpdateSecretMetadataWithContext is an alternate form of the UpdateSecretMetadata method which supports a Context parameter +func (secretsManager *SecretsManagerV1) UpdateSecretMetadataWithContext(ctx context.Context, updateSecretMetadataOptions *UpdateSecretMetadataOptions) (result *SecretMetadataRequest, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateSecretMetadataOptions, "updateSecretMetadataOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateSecretMetadataOptions, "updateSecretMetadataOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "secret_type": *updateSecretMetadataOptions.SecretType, + "id": *updateSecretMetadataOptions.ID, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = secretsManager.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(secretsManager.Service.Options.URL, `/api/v1/secrets/{secret_type}/{id}/metadata`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateSecretMetadataOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("secrets_manager", "V1", "UpdateSecretMetadata") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + body := make(map[string]interface{}) + if updateSecretMetadataOptions.Metadata != nil { + body["metadata"] = updateSecretMetadataOptions.Metadata + } + if updateSecretMetadataOptions.Resources != nil { + body["resources"] = updateSecretMetadataOptions.Resources + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = secretsManager.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSecretMetadataRequest) + if err != nil { + return + } + response.Result = result + + return +} + +// CollectionMetadata : The metadata that describes the resource array. +type CollectionMetadata struct { + // The type of resources in the resource array. + CollectionType *string `json:"collection_type" validate:"required"` + + // The number of elements in the resource array. + CollectionTotal *int64 `json:"collection_total" validate:"required"` +} + +// Constants associated with the CollectionMetadata.CollectionType property. +// The type of resources in the resource array. +const ( + CollectionMetadataCollectionTypeApplicationVndIBMSecretsManagerErrorJSONConst = "application/vnd.ibm.secrets-manager.error+json" + CollectionMetadataCollectionTypeApplicationVndIBMSecretsManagerSecretGroupJSONConst = "application/vnd.ibm.secrets-manager.secret.group+json" + CollectionMetadataCollectionTypeApplicationVndIBMSecretsManagerSecretJSONConst = "application/vnd.ibm.secrets-manager.secret+json" + CollectionMetadataCollectionTypeApplicationVndIBMSecretsManagerSecretPolicyJSONConst = "application/vnd.ibm.secrets-manager.secret.policy+json" + CollectionMetadataCollectionTypeApplicationVndIBMSecretsManagerSecretVersionJSONConst = "application/vnd.ibm.secrets-manager.secret.version+json" +) + +// NewCollectionMetadata : Instantiate CollectionMetadata (Generic Model Constructor) +func (*SecretsManagerV1) NewCollectionMetadata(collectionType string, collectionTotal int64) (model *CollectionMetadata, err error) { + model = &CollectionMetadata{ + CollectionType: core.StringPtr(collectionType), + CollectionTotal: core.Int64Ptr(collectionTotal), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalCollectionMetadata unmarshals an instance of CollectionMetadata from the specified map of raw messages. +func UnmarshalCollectionMetadata(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(CollectionMetadata) + err = core.UnmarshalPrimitive(m, "collection_type", &obj.CollectionType) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "collection_total", &obj.CollectionTotal) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// CreateSecret : The base schema for creating secrets. +type CreateSecret struct { + // The metadata that describes the resource array. + Metadata *CollectionMetadata `json:"metadata" validate:"required"` + + // A collection of resources. + Resources []SecretResourceIntf `json:"resources" validate:"required"` +} + +// NewCreateSecret : Instantiate CreateSecret (Generic Model Constructor) +func (*SecretsManagerV1) NewCreateSecret(metadata *CollectionMetadata, resources []SecretResourceIntf) (model *CreateSecret, err error) { + model = &CreateSecret{ + Metadata: metadata, + Resources: resources, + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalCreateSecret unmarshals an instance of CreateSecret from the specified map of raw messages. +func UnmarshalCreateSecret(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(CreateSecret) + err = core.UnmarshalModel(m, "metadata", &obj.Metadata, UnmarshalCollectionMetadata) + if err != nil { + return + } + err = core.UnmarshalModel(m, "resources", &obj.Resources, UnmarshalSecretResource) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// CreateSecretGroupOptions : The CreateSecretGroup options. +type CreateSecretGroupOptions struct { + // The metadata that describes the resource array. + Metadata *CollectionMetadata `validate:"required"` + + // A collection of resources. + Resources []SecretGroupResource `validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewCreateSecretGroupOptions : Instantiate CreateSecretGroupOptions +func (*SecretsManagerV1) NewCreateSecretGroupOptions(metadata *CollectionMetadata, resources []SecretGroupResource) *CreateSecretGroupOptions { + return &CreateSecretGroupOptions{ + Metadata: metadata, + Resources: resources, + } +} + +// SetMetadata : Allow user to set Metadata +func (options *CreateSecretGroupOptions) SetMetadata(metadata *CollectionMetadata) *CreateSecretGroupOptions { + options.Metadata = metadata + return options +} + +// SetResources : Allow user to set Resources +func (options *CreateSecretGroupOptions) SetResources(resources []SecretGroupResource) *CreateSecretGroupOptions { + options.Resources = resources + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateSecretGroupOptions) SetHeaders(param map[string]string) *CreateSecretGroupOptions { + options.Headers = param + return options +} + +// CreateSecretOptions : The CreateSecret options. +type CreateSecretOptions struct { + // The secret type. + SecretType *string `validate:"required,ne="` + + // The metadata that describes the resource array. + Metadata *CollectionMetadata `validate:"required"` + + // A collection of resources. + Resources []SecretResourceIntf `validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the CreateSecretOptions.SecretType property. +// The secret type. +const ( + CreateSecretOptionsSecretTypeArbitraryConst = "arbitrary" + CreateSecretOptionsSecretTypeIamCredentialsConst = "iam_credentials" + CreateSecretOptionsSecretTypeUsernamePasswordConst = "username_password" +) + +// NewCreateSecretOptions : Instantiate CreateSecretOptions +func (*SecretsManagerV1) NewCreateSecretOptions(secretType string, metadata *CollectionMetadata, resources []SecretResourceIntf) *CreateSecretOptions { + return &CreateSecretOptions{ + SecretType: core.StringPtr(secretType), + Metadata: metadata, + Resources: resources, + } +} + +// SetSecretType : Allow user to set SecretType +func (options *CreateSecretOptions) SetSecretType(secretType string) *CreateSecretOptions { + options.SecretType = core.StringPtr(secretType) + return options +} + +// SetMetadata : Allow user to set Metadata +func (options *CreateSecretOptions) SetMetadata(metadata *CollectionMetadata) *CreateSecretOptions { + options.Metadata = metadata + return options +} + +// SetResources : Allow user to set Resources +func (options *CreateSecretOptions) SetResources(resources []SecretResourceIntf) *CreateSecretOptions { + options.Resources = resources + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateSecretOptions) SetHeaders(param map[string]string) *CreateSecretOptions { + options.Headers = param + return options +} + +// DeleteSecretGroupOptions : The DeleteSecretGroup options. +type DeleteSecretGroupOptions struct { + // The v4 UUID that uniquely identifies the secret group. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteSecretGroupOptions : Instantiate DeleteSecretGroupOptions +func (*SecretsManagerV1) NewDeleteSecretGroupOptions(id string) *DeleteSecretGroupOptions { + return &DeleteSecretGroupOptions{ + ID: core.StringPtr(id), + } +} + +// SetID : Allow user to set ID +func (options *DeleteSecretGroupOptions) SetID(id string) *DeleteSecretGroupOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteSecretGroupOptions) SetHeaders(param map[string]string) *DeleteSecretGroupOptions { + options.Headers = param + return options +} + +// DeleteSecretOptions : The DeleteSecret options. +type DeleteSecretOptions struct { + // The secret type. + SecretType *string `validate:"required,ne="` + + // The v4 UUID that uniquely identifies the secret. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the DeleteSecretOptions.SecretType property. +// The secret type. +const ( + DeleteSecretOptionsSecretTypeArbitraryConst = "arbitrary" + DeleteSecretOptionsSecretTypeIamCredentialsConst = "iam_credentials" + DeleteSecretOptionsSecretTypeUsernamePasswordConst = "username_password" +) + +// NewDeleteSecretOptions : Instantiate DeleteSecretOptions +func (*SecretsManagerV1) NewDeleteSecretOptions(secretType string, id string) *DeleteSecretOptions { + return &DeleteSecretOptions{ + SecretType: core.StringPtr(secretType), + ID: core.StringPtr(id), + } +} + +// SetSecretType : Allow user to set SecretType +func (options *DeleteSecretOptions) SetSecretType(secretType string) *DeleteSecretOptions { + options.SecretType = core.StringPtr(secretType) + return options +} + +// SetID : Allow user to set ID +func (options *DeleteSecretOptions) SetID(id string) *DeleteSecretOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteSecretOptions) SetHeaders(param map[string]string) *DeleteSecretOptions { + options.Headers = param + return options +} + +// EngineConfigOneOf : EngineConfigOneOf struct +// Models which "extend" this model: +// - EngineConfigOneOfIamSecretEngineRootConfig +type EngineConfigOneOf struct { + // An IBM Cloud API key that has the capability to create and manage service IDs. + // + // The API key must be assigned the Editor platform role on the Access Groups Service and the Operator platform role on + // the IAM Identity Service. For more information, see [Enabling the IAM secrets + // engine](https://cloud.ibm.com/docs/secrets-manager?topic=secrets-manager-secret-engines#configure-iam-engine). + APIKey *string `json:"api_key,omitempty"` + + // The hash value of the IBM Cloud API key that is used to create and manage service IDs. + APIKeyHash *string `json:"api_key_hash,omitempty"` +} + +func (*EngineConfigOneOf) isaEngineConfigOneOf() bool { + return true +} + +type EngineConfigOneOfIntf interface { + isaEngineConfigOneOf() bool +} + +// UnmarshalEngineConfigOneOf unmarshals an instance of EngineConfigOneOf from the specified map of raw messages. +func UnmarshalEngineConfigOneOf(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(EngineConfigOneOf) + err = core.UnmarshalPrimitive(m, "api_key", &obj.APIKey) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "api_key_hash", &obj.APIKeyHash) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// GetConfigOptions : The GetConfig options. +type GetConfigOptions struct { + // The secret type. + SecretType *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the GetConfigOptions.SecretType property. +// The secret type. +const ( + GetConfigOptionsSecretTypeIamCredentialsConst = "iam_credentials" +) + +// NewGetConfigOptions : Instantiate GetConfigOptions +func (*SecretsManagerV1) NewGetConfigOptions(secretType string) *GetConfigOptions { + return &GetConfigOptions{ + SecretType: core.StringPtr(secretType), + } +} + +// SetSecretType : Allow user to set SecretType +func (options *GetConfigOptions) SetSecretType(secretType string) *GetConfigOptions { + options.SecretType = core.StringPtr(secretType) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetConfigOptions) SetHeaders(param map[string]string) *GetConfigOptions { + options.Headers = param + return options +} + +// GetPolicyOptions : The GetPolicy options. +type GetPolicyOptions struct { + // The secret type. + SecretType *string `validate:"required,ne="` + + // The v4 UUID that uniquely identifies the secret. + ID *string `validate:"required,ne="` + + // The type of policy that is associated with the specified secret. + Policy *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the GetPolicyOptions.SecretType property. +// The secret type. +const ( + GetPolicyOptionsSecretTypeUsernamePasswordConst = "username_password" +) + +// Constants associated with the GetPolicyOptions.Policy property. +// The type of policy that is associated with the specified secret. +const ( + GetPolicyOptionsPolicyRotationConst = "rotation" +) + +// NewGetPolicyOptions : Instantiate GetPolicyOptions +func (*SecretsManagerV1) NewGetPolicyOptions(secretType string, id string) *GetPolicyOptions { + return &GetPolicyOptions{ + SecretType: core.StringPtr(secretType), + ID: core.StringPtr(id), + } +} + +// SetSecretType : Allow user to set SecretType +func (options *GetPolicyOptions) SetSecretType(secretType string) *GetPolicyOptions { + options.SecretType = core.StringPtr(secretType) + return options +} + +// SetID : Allow user to set ID +func (options *GetPolicyOptions) SetID(id string) *GetPolicyOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetPolicy : Allow user to set Policy +func (options *GetPolicyOptions) SetPolicy(policy string) *GetPolicyOptions { + options.Policy = core.StringPtr(policy) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetPolicyOptions) SetHeaders(param map[string]string) *GetPolicyOptions { + options.Headers = param + return options +} + +// GetSecret : The base schema for retrieving a secret. +type GetSecret struct { + // The metadata that describes the resource array. + Metadata *CollectionMetadata `json:"metadata" validate:"required"` + + // A collection of resources. + Resources []SecretResourceIntf `json:"resources" validate:"required"` +} + +// UnmarshalGetSecret unmarshals an instance of GetSecret from the specified map of raw messages. +func UnmarshalGetSecret(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(GetSecret) + err = core.UnmarshalModel(m, "metadata", &obj.Metadata, UnmarshalCollectionMetadata) + if err != nil { + return + } + err = core.UnmarshalModel(m, "resources", &obj.Resources, UnmarshalSecretResource) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// GetSecretGroupOptions : The GetSecretGroup options. +type GetSecretGroupOptions struct { + // The v4 UUID that uniquely identifies the secret group. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetSecretGroupOptions : Instantiate GetSecretGroupOptions +func (*SecretsManagerV1) NewGetSecretGroupOptions(id string) *GetSecretGroupOptions { + return &GetSecretGroupOptions{ + ID: core.StringPtr(id), + } +} + +// SetID : Allow user to set ID +func (options *GetSecretGroupOptions) SetID(id string) *GetSecretGroupOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetSecretGroupOptions) SetHeaders(param map[string]string) *GetSecretGroupOptions { + options.Headers = param + return options +} + +// GetSecretMetadataOptions : The GetSecretMetadata options. +type GetSecretMetadataOptions struct { + // The secret type. + SecretType *string `validate:"required,ne="` + + // The v4 UUID that uniquely identifies the secret. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the GetSecretMetadataOptions.SecretType property. +// The secret type. +const ( + GetSecretMetadataOptionsSecretTypeArbitraryConst = "arbitrary" + GetSecretMetadataOptionsSecretTypeIamCredentialsConst = "iam_credentials" + GetSecretMetadataOptionsSecretTypeUsernamePasswordConst = "username_password" +) + +// NewGetSecretMetadataOptions : Instantiate GetSecretMetadataOptions +func (*SecretsManagerV1) NewGetSecretMetadataOptions(secretType string, id string) *GetSecretMetadataOptions { + return &GetSecretMetadataOptions{ + SecretType: core.StringPtr(secretType), + ID: core.StringPtr(id), + } +} + +// SetSecretType : Allow user to set SecretType +func (options *GetSecretMetadataOptions) SetSecretType(secretType string) *GetSecretMetadataOptions { + options.SecretType = core.StringPtr(secretType) + return options +} + +// SetID : Allow user to set ID +func (options *GetSecretMetadataOptions) SetID(id string) *GetSecretMetadataOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetSecretMetadataOptions) SetHeaders(param map[string]string) *GetSecretMetadataOptions { + options.Headers = param + return options +} + +// GetSecretOptions : The GetSecret options. +type GetSecretOptions struct { + // The secret type. + SecretType *string `validate:"required,ne="` + + // The v4 UUID that uniquely identifies the secret. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the GetSecretOptions.SecretType property. +// The secret type. +const ( + GetSecretOptionsSecretTypeArbitraryConst = "arbitrary" + GetSecretOptionsSecretTypeIamCredentialsConst = "iam_credentials" + GetSecretOptionsSecretTypeUsernamePasswordConst = "username_password" +) + +// NewGetSecretOptions : Instantiate GetSecretOptions +func (*SecretsManagerV1) NewGetSecretOptions(secretType string, id string) *GetSecretOptions { + return &GetSecretOptions{ + SecretType: core.StringPtr(secretType), + ID: core.StringPtr(id), + } +} + +// SetSecretType : Allow user to set SecretType +func (options *GetSecretOptions) SetSecretType(secretType string) *GetSecretOptions { + options.SecretType = core.StringPtr(secretType) + return options +} + +// SetID : Allow user to set ID +func (options *GetSecretOptions) SetID(id string) *GetSecretOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetSecretOptions) SetHeaders(param map[string]string) *GetSecretOptions { + options.Headers = param + return options +} + +// GetSecretPoliciesOneOf : GetSecretPoliciesOneOf struct +// Models which "extend" this model: +// - GetSecretPoliciesOneOfGetSecretPolicyRotation +type GetSecretPoliciesOneOf struct { + // The metadata that describes the resource array. + Metadata *CollectionMetadata `json:"metadata,omitempty"` + + // A collection of resources. + Resources []GetSecretPoliciesOneOfResourcesItem `json:"resources,omitempty"` +} + +func (*GetSecretPoliciesOneOf) isaGetSecretPoliciesOneOf() bool { + return true +} + +type GetSecretPoliciesOneOfIntf interface { + isaGetSecretPoliciesOneOf() bool +} + +// UnmarshalGetSecretPoliciesOneOf unmarshals an instance of GetSecretPoliciesOneOf from the specified map of raw messages. +func UnmarshalGetSecretPoliciesOneOf(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(GetSecretPoliciesOneOf) + err = core.UnmarshalModel(m, "metadata", &obj.Metadata, UnmarshalCollectionMetadata) + if err != nil { + return + } + err = core.UnmarshalModel(m, "resources", &obj.Resources, UnmarshalGetSecretPoliciesOneOfResourcesItem) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// GetSecretPoliciesOneOfGetSecretPolicyRotationResourcesItem : Properties that are associated with a rotation policy. +type GetSecretPoliciesOneOfGetSecretPolicyRotationResourcesItem struct { + // The v4 UUID that uniquely identifies the policy. + ID *string `json:"id" validate:"required"` + + // The Cloud Resource Name (CRN) that uniquely identifies your cloud resources. + CRN *string `json:"crn,omitempty"` + + // The date the policy was created. The date format follows RFC 3339. + CreationDate *strfmt.DateTime `json:"creation_date,omitempty"` + + // The unique identifier for the entity that created the policy. + CreatedBy *string `json:"created_by,omitempty"` + + // Updates when the policy is replaced or modified. The date format follows RFC 3339. + LastUpdateDate *strfmt.DateTime `json:"last_update_date,omitempty"` + + // The unique identifier for the entity that updated the policy. + UpdatedBy *string `json:"updated_by,omitempty"` + + // The MIME type that represents the policy. Currently, only the default is supported. + Type *string `json:"type" validate:"required"` + + // The secret rotation time interval. + Rotation *SecretPolicyRotationRotation `json:"rotation" validate:"required"` +} + +// Constants associated with the GetSecretPoliciesOneOfGetSecretPolicyRotationResourcesItem.Type property. +// The MIME type that represents the policy. Currently, only the default is supported. +const ( + GetSecretPoliciesOneOfGetSecretPolicyRotationResourcesItemTypeApplicationVndIBMSecretsManagerSecretPolicyJSONConst = "application/vnd.ibm.secrets-manager.secret.policy+json" +) + +// UnmarshalGetSecretPoliciesOneOfGetSecretPolicyRotationResourcesItem unmarshals an instance of GetSecretPoliciesOneOfGetSecretPolicyRotationResourcesItem from the specified map of raw messages. +func UnmarshalGetSecretPoliciesOneOfGetSecretPolicyRotationResourcesItem(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(GetSecretPoliciesOneOfGetSecretPolicyRotationResourcesItem) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "creation_date", &obj.CreationDate) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_by", &obj.CreatedBy) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "last_update_date", &obj.LastUpdateDate) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "updated_by", &obj.UpdatedBy) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + err = core.UnmarshalModel(m, "rotation", &obj.Rotation, UnmarshalSecretPolicyRotationRotation) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// GetSecretPoliciesOneOfResourcesItem : Properties that are associated with a rotation policy. +type GetSecretPoliciesOneOfResourcesItem struct { + // The v4 UUID that uniquely identifies the policy. + ID *string `json:"id" validate:"required"` + + // The Cloud Resource Name (CRN) that uniquely identifies your cloud resources. + CRN *string `json:"crn,omitempty"` + + // The date the policy was created. The date format follows RFC 3339. + CreationDate *strfmt.DateTime `json:"creation_date,omitempty"` + + // The unique identifier for the entity that created the policy. + CreatedBy *string `json:"created_by,omitempty"` + + // Updates when the policy is replaced or modified. The date format follows RFC 3339. + LastUpdateDate *strfmt.DateTime `json:"last_update_date,omitempty"` + + // The unique identifier for the entity that updated the policy. + UpdatedBy *string `json:"updated_by,omitempty"` + + // The MIME type that represents the policy. Currently, only the default is supported. + Type *string `json:"type" validate:"required"` + + // The secret rotation time interval. + Rotation *SecretPolicyRotationRotation `json:"rotation" validate:"required"` +} + +// Constants associated with the GetSecretPoliciesOneOfResourcesItem.Type property. +// The MIME type that represents the policy. Currently, only the default is supported. +const ( + GetSecretPoliciesOneOfResourcesItemTypeApplicationVndIBMSecretsManagerSecretPolicyJSONConst = "application/vnd.ibm.secrets-manager.secret.policy+json" +) + +// UnmarshalGetSecretPoliciesOneOfResourcesItem unmarshals an instance of GetSecretPoliciesOneOfResourcesItem from the specified map of raw messages. +func UnmarshalGetSecretPoliciesOneOfResourcesItem(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(GetSecretPoliciesOneOfResourcesItem) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "creation_date", &obj.CreationDate) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_by", &obj.CreatedBy) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "last_update_date", &obj.LastUpdateDate) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "updated_by", &obj.UpdatedBy) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + err = core.UnmarshalModel(m, "rotation", &obj.Rotation, UnmarshalSecretPolicyRotationRotation) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ListAllSecretsOptions : The ListAllSecrets options. +type ListAllSecretsOptions struct { + // The number of secrets to retrieve. By default, list operations return the first 200 items. To retrieve a different + // set of items, use `limit` with `offset` to page through your available resources. + // + // **Usage:** If you have 20 secrets in your instance, and you want to retrieve only the first 5 secrets, use + // `../secrets/{secret-type}?limit=5`. + Limit *int64 + + // The number of secrets to skip. By specifying `offset`, you retrieve a subset of items that starts with the `offset` + // value. Use `offset` with `limit` to page through your available resources. + // + // **Usage:** If you have 100 secrets in your instance, and you want to retrieve secrets 26 through 50, use + // `../secrets/{secret-type}?offset=25&limit=25`. + Offset *int64 + + // Filter secrets that contain the specified string. The fields that are searched include: id, name, description, + // labels, secret_type. + // + // **Usage:** If you want to list only the secrets that contain the string "text", use + // `../secrets/{secret-type}?search=text`. + Search *string + + // Sort a list of secrets by the specified field. + // + // **Usage:** To sort a list of secrets by their creation date, use + // `../secrets/{secret-type}?sort_by=creation_date`. + SortBy *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the ListAllSecretsOptions.SortBy property. +// Sort a list of secrets by the specified field. +// +// **Usage:** To sort a list of secrets by their creation date, use +// `../secrets/{secret-type}?sort_by=creation_date`. +const ( + ListAllSecretsOptionsSortByCreationDateConst = "creation_date" + ListAllSecretsOptionsSortByExpirationDateConst = "expiration_date" + ListAllSecretsOptionsSortByIDConst = "id" + ListAllSecretsOptionsSortByNameConst = "name" + ListAllSecretsOptionsSortBySecretTypeConst = "secret_type" +) + +// NewListAllSecretsOptions : Instantiate ListAllSecretsOptions +func (*SecretsManagerV1) NewListAllSecretsOptions() *ListAllSecretsOptions { + return &ListAllSecretsOptions{} +} + +// SetLimit : Allow user to set Limit +func (options *ListAllSecretsOptions) SetLimit(limit int64) *ListAllSecretsOptions { + options.Limit = core.Int64Ptr(limit) + return options +} + +// SetOffset : Allow user to set Offset +func (options *ListAllSecretsOptions) SetOffset(offset int64) *ListAllSecretsOptions { + options.Offset = core.Int64Ptr(offset) + return options +} + +// SetSearch : Allow user to set Search +func (options *ListAllSecretsOptions) SetSearch(search string) *ListAllSecretsOptions { + options.Search = core.StringPtr(search) + return options +} + +// SetSortBy : Allow user to set SortBy +func (options *ListAllSecretsOptions) SetSortBy(sortBy string) *ListAllSecretsOptions { + options.SortBy = core.StringPtr(sortBy) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListAllSecretsOptions) SetHeaders(param map[string]string) *ListAllSecretsOptions { + options.Headers = param + return options +} + +// ListSecretGroupsOptions : The ListSecretGroups options. +type ListSecretGroupsOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListSecretGroupsOptions : Instantiate ListSecretGroupsOptions +func (*SecretsManagerV1) NewListSecretGroupsOptions() *ListSecretGroupsOptions { + return &ListSecretGroupsOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *ListSecretGroupsOptions) SetHeaders(param map[string]string) *ListSecretGroupsOptions { + options.Headers = param + return options +} + +// ListSecrets : The base schema for listing secrets. +type ListSecrets struct { + // The metadata that describes the resource array. + Metadata *CollectionMetadata `json:"metadata" validate:"required"` + + // A collection of resources. + Resources []SecretResourceIntf `json:"resources,omitempty"` +} + +// UnmarshalListSecrets unmarshals an instance of ListSecrets from the specified map of raw messages. +func UnmarshalListSecrets(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ListSecrets) + err = core.UnmarshalModel(m, "metadata", &obj.Metadata, UnmarshalCollectionMetadata) + if err != nil { + return + } + err = core.UnmarshalModel(m, "resources", &obj.Resources, UnmarshalSecretResource) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ListSecretsOptions : The ListSecrets options. +type ListSecretsOptions struct { + // The secret type. + SecretType *string `validate:"required,ne="` + + // The number of secrets to retrieve. By default, list operations return the first 200 items. To retrieve a different + // set of items, use `limit` with `offset` to page through your available resources. + // + // **Usage:** If you have 20 secrets in your instance, and you want to retrieve only the first 5 secrets, use + // `../secrets/{secret-type}?limit=5`. + Limit *int64 + + // The number of secrets to skip. By specifying `offset`, you retrieve a subset of items that starts with the `offset` + // value. Use `offset` with `limit` to page through your available resources. + // + // **Usage:** If you have 100 secrets in your instance, and you want to retrieve secrets 26 through 50, use + // `../secrets/{secret-type}?offset=25&limit=25`. + Offset *int64 + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the ListSecretsOptions.SecretType property. +// The secret type. +const ( + ListSecretsOptionsSecretTypeArbitraryConst = "arbitrary" + ListSecretsOptionsSecretTypeIamCredentialsConst = "iam_credentials" + ListSecretsOptionsSecretTypeUsernamePasswordConst = "username_password" +) + +// NewListSecretsOptions : Instantiate ListSecretsOptions +func (*SecretsManagerV1) NewListSecretsOptions(secretType string) *ListSecretsOptions { + return &ListSecretsOptions{ + SecretType: core.StringPtr(secretType), + } +} + +// SetSecretType : Allow user to set SecretType +func (options *ListSecretsOptions) SetSecretType(secretType string) *ListSecretsOptions { + options.SecretType = core.StringPtr(secretType) + return options +} + +// SetLimit : Allow user to set Limit +func (options *ListSecretsOptions) SetLimit(limit int64) *ListSecretsOptions { + options.Limit = core.Int64Ptr(limit) + return options +} + +// SetOffset : Allow user to set Offset +func (options *ListSecretsOptions) SetOffset(offset int64) *ListSecretsOptions { + options.Offset = core.Int64Ptr(offset) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListSecretsOptions) SetHeaders(param map[string]string) *ListSecretsOptions { + options.Headers = param + return options +} + +// PutConfigOptions : The PutConfig options. +type PutConfigOptions struct { + // The secret type. + SecretType *string `validate:"required,ne="` + + // The base request for setting secret engine configuration. + EngineConfigOneOf EngineConfigOneOfIntf `validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the PutConfigOptions.SecretType property. +// The secret type. +const ( + PutConfigOptionsSecretTypeIamCredentialsConst = "iam_credentials" +) + +// NewPutConfigOptions : Instantiate PutConfigOptions +func (*SecretsManagerV1) NewPutConfigOptions(secretType string, engineConfigOneOf EngineConfigOneOfIntf) *PutConfigOptions { + return &PutConfigOptions{ + SecretType: core.StringPtr(secretType), + EngineConfigOneOf: engineConfigOneOf, + } +} + +// SetSecretType : Allow user to set SecretType +func (options *PutConfigOptions) SetSecretType(secretType string) *PutConfigOptions { + options.SecretType = core.StringPtr(secretType) + return options +} + +// SetEngineConfigOneOf : Allow user to set EngineConfigOneOf +func (options *PutConfigOptions) SetEngineConfigOneOf(engineConfigOneOf EngineConfigOneOfIntf) *PutConfigOptions { + options.EngineConfigOneOf = engineConfigOneOf + return options +} + +// SetHeaders : Allow user to set Headers +func (options *PutConfigOptions) SetHeaders(param map[string]string) *PutConfigOptions { + options.Headers = param + return options +} + +// PutPolicyOptions : The PutPolicy options. +type PutPolicyOptions struct { + // The secret type. + SecretType *string `validate:"required,ne="` + + // The v4 UUID that uniquely identifies the secret. + ID *string `validate:"required,ne="` + + // The metadata that describes the resource array. + Metadata *CollectionMetadata `validate:"required"` + + // A collection of resources. + Resources []SecretPolicyRotation `validate:"required"` + + // The type of policy that is associated with the specified secret. + Policy *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the PutPolicyOptions.SecretType property. +// The secret type. +const ( + PutPolicyOptionsSecretTypeUsernamePasswordConst = "username_password" +) + +// Constants associated with the PutPolicyOptions.Policy property. +// The type of policy that is associated with the specified secret. +const ( + PutPolicyOptionsPolicyRotationConst = "rotation" +) + +// NewPutPolicyOptions : Instantiate PutPolicyOptions +func (*SecretsManagerV1) NewPutPolicyOptions(secretType string, id string, metadata *CollectionMetadata, resources []SecretPolicyRotation) *PutPolicyOptions { + return &PutPolicyOptions{ + SecretType: core.StringPtr(secretType), + ID: core.StringPtr(id), + Metadata: metadata, + Resources: resources, + } +} + +// SetSecretType : Allow user to set SecretType +func (options *PutPolicyOptions) SetSecretType(secretType string) *PutPolicyOptions { + options.SecretType = core.StringPtr(secretType) + return options +} + +// SetID : Allow user to set ID +func (options *PutPolicyOptions) SetID(id string) *PutPolicyOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetMetadata : Allow user to set Metadata +func (options *PutPolicyOptions) SetMetadata(metadata *CollectionMetadata) *PutPolicyOptions { + options.Metadata = metadata + return options +} + +// SetResources : Allow user to set Resources +func (options *PutPolicyOptions) SetResources(resources []SecretPolicyRotation) *PutPolicyOptions { + options.Resources = resources + return options +} + +// SetPolicy : Allow user to set Policy +func (options *PutPolicyOptions) SetPolicy(policy string) *PutPolicyOptions { + options.Policy = core.StringPtr(policy) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *PutPolicyOptions) SetHeaders(param map[string]string) *PutPolicyOptions { + options.Headers = param + return options +} + +// SecretActionOneOf : SecretActionOneOf struct +// Models which "extend" this model: +// - SecretActionOneOfRotateArbitrarySecretBody +// - SecretActionOneOfRotateUsernamePasswordSecretBody +// - SecretActionOneOfDeleteCredentialsForIamSecret +type SecretActionOneOf struct { + // The new secret data to assign to an `arbitrary` secret. + Payload *string `json:"payload,omitempty"` + + // The new password to assign to a `username_password` secret. + Password *string `json:"password,omitempty"` + + // The service ID that you want to delete. It is deleted together with its API key. + ServiceID *string `json:"service_id,omitempty"` +} + +func (*SecretActionOneOf) isaSecretActionOneOf() bool { + return true +} + +type SecretActionOneOfIntf interface { + isaSecretActionOneOf() bool +} + +// UnmarshalSecretActionOneOf unmarshals an instance of SecretActionOneOf from the specified map of raw messages. +func UnmarshalSecretActionOneOf(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SecretActionOneOf) + err = core.UnmarshalPrimitive(m, "payload", &obj.Payload) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "password", &obj.Password) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "service_id", &obj.ServiceID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SecretGroupDef : The base schema definition for a secret group. +type SecretGroupDef struct { + // The metadata that describes the resource array. + Metadata *CollectionMetadata `json:"metadata" validate:"required"` + + // A collection of resources. + Resources []SecretGroupResource `json:"resources" validate:"required"` +} + +// NewSecretGroupDef : Instantiate SecretGroupDef (Generic Model Constructor) +func (*SecretsManagerV1) NewSecretGroupDef(metadata *CollectionMetadata, resources []SecretGroupResource) (model *SecretGroupDef, err error) { + model = &SecretGroupDef{ + Metadata: metadata, + Resources: resources, + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalSecretGroupDef unmarshals an instance of SecretGroupDef from the specified map of raw messages. +func UnmarshalSecretGroupDef(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SecretGroupDef) + err = core.UnmarshalModel(m, "metadata", &obj.Metadata, UnmarshalCollectionMetadata) + if err != nil { + return + } + err = core.UnmarshalModel(m, "resources", &obj.Resources, UnmarshalSecretGroupResource) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SecretGroupMetadataUpdatable : Metadata properties that describe a secret group. +type SecretGroupMetadataUpdatable struct { + // A human-readable name to assign to your secret group. + // + // To protect your privacy, do not use personal data, such as your name or location, as a name for your secret group. + Name *string `json:"name,omitempty"` + + // An extended description of your secret group. + // + // To protect your privacy, do not use personal data, such as your name or location, as a description for your secret + // group. + Description *string `json:"description,omitempty"` +} + +// UnmarshalSecretGroupMetadataUpdatable unmarshals an instance of SecretGroupMetadataUpdatable from the specified map of raw messages. +func UnmarshalSecretGroupMetadataUpdatable(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SecretGroupMetadataUpdatable) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "description", &obj.Description) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SecretGroupResource : Properties that describe a secret group. +type SecretGroupResource struct { + // The v4 UUID that uniquely identifies the secret group. + ID *string `json:"id,omitempty"` + + // A human-readable name to assign to your secret group. + // + // To protect your privacy, do not use personal data, such as your name or location, as a name for your secret group. + Name *string `json:"name,omitempty"` + + // An extended description of your secret group. + // + // To protect your privacy, do not use personal data, such as your name or location, as a description for your secret + // group. + Description *string `json:"description,omitempty"` + + // The date the secret group was created. The date format follows RFC 3339. + CreationDate *strfmt.DateTime `json:"creation_date,omitempty"` + + // Updates when the metadata of the secret group is modified. The date format follows RFC 3339. + LastUpdateDate *strfmt.DateTime `json:"last_update_date,omitempty"` + + // The MIME type that represents the secret group. + Type *string `json:"type,omitempty"` + + // Allows users to set arbitrary properties + additionalProperties map[string]interface{} +} + +// SetProperty allows the user to set an arbitrary property on an instance of SecretGroupResource +func (o *SecretGroupResource) SetProperty(key string, value interface{}) { + if o.additionalProperties == nil { + o.additionalProperties = make(map[string]interface{}) + } + o.additionalProperties[key] = value +} + +// GetProperty allows the user to retrieve an arbitrary property from an instance of SecretGroupResource +func (o *SecretGroupResource) GetProperty(key string) interface{} { + return o.additionalProperties[key] +} + +// GetProperties allows the user to retrieve the map of arbitrary properties from an instance of SecretGroupResource +func (o *SecretGroupResource) GetProperties() map[string]interface{} { + return o.additionalProperties +} + +// MarshalJSON performs custom serialization for instances of SecretGroupResource +func (o *SecretGroupResource) MarshalJSON() (buffer []byte, err error) { + m := make(map[string]interface{}) + if len(o.additionalProperties) > 0 { + for k, v := range o.additionalProperties { + m[k] = v + } + } + if o.ID != nil { + m["id"] = o.ID + } + if o.Name != nil { + m["name"] = o.Name + } + if o.Description != nil { + m["description"] = o.Description + } + if o.CreationDate != nil { + m["creation_date"] = o.CreationDate + } + if o.LastUpdateDate != nil { + m["last_update_date"] = o.LastUpdateDate + } + if o.Type != nil { + m["type"] = o.Type + } + buffer, err = json.Marshal(m) + return +} + +// UnmarshalSecretGroupResource unmarshals an instance of SecretGroupResource from the specified map of raw messages. +func UnmarshalSecretGroupResource(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SecretGroupResource) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + delete(m, "id") + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + delete(m, "name") + err = core.UnmarshalPrimitive(m, "description", &obj.Description) + if err != nil { + return + } + delete(m, "description") + err = core.UnmarshalPrimitive(m, "creation_date", &obj.CreationDate) + if err != nil { + return + } + delete(m, "creation_date") + err = core.UnmarshalPrimitive(m, "last_update_date", &obj.LastUpdateDate) + if err != nil { + return + } + delete(m, "last_update_date") + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + delete(m, "type") + for k := range m { + var v interface{} + e := core.UnmarshalPrimitive(m, k, &v) + if e != nil { + err = e + return + } + obj.SetProperty(k, v) + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SecretMetadata : Metadata properties that describe a secret. +type SecretMetadata struct { + // The unique ID of the secret. + ID *string `json:"id,omitempty"` + + // Labels that you can use to filter for secrets in your instance. + // + // Up to 30 labels can be created. Labels can be between 2-30 characters, including spaces. Special characters not + // permitted include the angled bracket, comma, colon, ampersand, and vertical pipe character (|). + // + // To protect your privacy, do not use personal data, such as your name or location, as a label for your secret. + Labels []string `json:"labels,omitempty"` + + // A human-readable alias to assign to your secret. + // + // To protect your privacy, do not use personal data, such as your name or location, as an alias for your secret. + Name *string `json:"name" validate:"required"` + + // An extended description of your secret. + // + // To protect your privacy, do not use personal data, such as your name or location, as a description for your secret. + Description *string `json:"description,omitempty"` + + // The v4 UUID that uniquely identifies the secret group to assign to this secret. + // + // If you omit this parameter, your secret is assigned to the `default` secret group. + SecretGroupID *string `json:"secret_group_id,omitempty"` + + // The secret state based on NIST SP 800-57. States are integers and correspond to the Pre-activation = 0, Active = 1, + // Suspended = 2, Deactivated = 3, and Destroyed = 5 values. + State *int64 `json:"state,omitempty"` + + // A text representation of the secret state. + StateDescription *string `json:"state_description,omitempty"` + + // The secret type. + SecretType *string `json:"secret_type,omitempty"` + + // The date the secret material expires. The date format follows RFC 3339. + // + // You can set an expiration date on supported secret types at their creation. If you create a secret without + // specifying an expiration date, the secret does not expire. The `expiration_date` field is supported for the + // following secret types: + // + // - `arbitrary` + // - `username_password`. + ExpirationDate *strfmt.DateTime `json:"expiration_date,omitempty"` + + // The time-to-live (TTL) or lease duration to assign to generated credentials. + // + // For `iam_credentials` secrets, the TTL defines for how long each generated API key remains valid. The value can be + // either an integer that specifies the number of seconds, or the string representation of a duration, such as `120m` + // or `24h`. + TTL interface{} `json:"ttl,omitempty"` + + // The Cloud Resource Name (CRN) that uniquely identifies the resource. + CRN *string `json:"crn,omitempty"` + + // The date the secret was created. The date format follows RFC 3339. + CreationDate *strfmt.DateTime `json:"creation_date,omitempty"` + + // The unique identifier for the entity that created the secret. + CreatedBy *string `json:"created_by,omitempty"` + + // Updates when any part of the secret metadata is modified. The date format follows RFC 3339. + LastUpdateDate *strfmt.DateTime `json:"last_update_date,omitempty"` +} + +// Constants associated with the SecretMetadata.SecretType property. +// The secret type. +const ( + SecretMetadataSecretTypeArbitraryConst = "arbitrary" + SecretMetadataSecretTypeIamCredentialsConst = "iam_credentials" + SecretMetadataSecretTypeUsernamePasswordConst = "username_password" +) + +// NewSecretMetadata : Instantiate SecretMetadata (Generic Model Constructor) +func (*SecretsManagerV1) NewSecretMetadata(name string) (model *SecretMetadata, err error) { + model = &SecretMetadata{ + Name: core.StringPtr(name), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalSecretMetadata unmarshals an instance of SecretMetadata from the specified map of raw messages. +func UnmarshalSecretMetadata(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SecretMetadata) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "labels", &obj.Labels) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "description", &obj.Description) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "secret_group_id", &obj.SecretGroupID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "state", &obj.State) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "state_description", &obj.StateDescription) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "secret_type", &obj.SecretType) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "expiration_date", &obj.ExpirationDate) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "ttl", &obj.TTL) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "creation_date", &obj.CreationDate) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_by", &obj.CreatedBy) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "last_update_date", &obj.LastUpdateDate) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SecretMetadataRequest : The metadata of a secret. +type SecretMetadataRequest struct { + // The metadata that describes the resource array. + Metadata *CollectionMetadata `json:"metadata" validate:"required"` + + // A collection of resources. + Resources []SecretMetadata `json:"resources" validate:"required"` +} + +// NewSecretMetadataRequest : Instantiate SecretMetadataRequest (Generic Model Constructor) +func (*SecretsManagerV1) NewSecretMetadataRequest(metadata *CollectionMetadata, resources []SecretMetadata) (model *SecretMetadataRequest, err error) { + model = &SecretMetadataRequest{ + Metadata: metadata, + Resources: resources, + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalSecretMetadataRequest unmarshals an instance of SecretMetadataRequest from the specified map of raw messages. +func UnmarshalSecretMetadataRequest(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SecretMetadataRequest) + err = core.UnmarshalModel(m, "metadata", &obj.Metadata, UnmarshalCollectionMetadata) + if err != nil { + return + } + err = core.UnmarshalModel(m, "resources", &obj.Resources, UnmarshalSecretMetadata) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SecretPolicyRotation : Properties that are associated with a rotation policy. +type SecretPolicyRotation struct { + // The MIME type that represents the policy. Currently, only the default is supported. + Type *string `json:"type" validate:"required"` + + // The secret rotation time interval. + Rotation *SecretPolicyRotationRotation `json:"rotation" validate:"required"` +} + +// Constants associated with the SecretPolicyRotation.Type property. +// The MIME type that represents the policy. Currently, only the default is supported. +const ( + SecretPolicyRotationTypeApplicationVndIBMSecretsManagerSecretPolicyJSONConst = "application/vnd.ibm.secrets-manager.secret.policy+json" +) + +// NewSecretPolicyRotation : Instantiate SecretPolicyRotation (Generic Model Constructor) +func (*SecretsManagerV1) NewSecretPolicyRotation(typeVar string, rotation *SecretPolicyRotationRotation) (model *SecretPolicyRotation, err error) { + model = &SecretPolicyRotation{ + Type: core.StringPtr(typeVar), + Rotation: rotation, + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalSecretPolicyRotation unmarshals an instance of SecretPolicyRotation from the specified map of raw messages. +func UnmarshalSecretPolicyRotation(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SecretPolicyRotation) + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + err = core.UnmarshalModel(m, "rotation", &obj.Rotation, UnmarshalSecretPolicyRotationRotation) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SecretPolicyRotationRotation : The secret rotation time interval. +type SecretPolicyRotationRotation struct { + // Specifies the length of the secret rotation time interval. + Interval *int64 `json:"interval" validate:"required"` + + // Specifies the units for the secret rotation time interval. + Unit *string `json:"unit" validate:"required"` +} + +// Constants associated with the SecretPolicyRotationRotation.Unit property. +// Specifies the units for the secret rotation time interval. +const ( + SecretPolicyRotationRotationUnitDayConst = "day" + SecretPolicyRotationRotationUnitMonthConst = "month" +) + +// NewSecretPolicyRotationRotation : Instantiate SecretPolicyRotationRotation (Generic Model Constructor) +func (*SecretsManagerV1) NewSecretPolicyRotationRotation(interval int64, unit string) (model *SecretPolicyRotationRotation, err error) { + model = &SecretPolicyRotationRotation{ + Interval: core.Int64Ptr(interval), + Unit: core.StringPtr(unit), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalSecretPolicyRotationRotation unmarshals an instance of SecretPolicyRotationRotation from the specified map of raw messages. +func UnmarshalSecretPolicyRotationRotation(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SecretPolicyRotationRotation) + err = core.UnmarshalPrimitive(m, "interval", &obj.Interval) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "unit", &obj.Unit) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SecretResource : SecretResource struct +// Models which "extend" this model: +// - SecretResourceArbitrarySecretResource +// - SecretResourceUsernamePasswordSecretResource +// - SecretResourceIamSecretResource +type SecretResource struct { + // The MIME type that represents the secret. + Type *string `json:"type,omitempty"` + + // The v4 UUID that uniquely identifies the secret. + ID *string `json:"id,omitempty"` + + // A human-readable alias to assign to your secret. + // + // To protect your privacy, do not use personal data, such as your name or location, as an alias for your secret. + Name *string `json:"name,omitempty"` + + // An extended description of your secret. + // + // To protect your privacy, do not use personal data, such as your name or location, as a description for your secret. + Description *string `json:"description,omitempty"` + + // The v4 UUID that uniquely identifies the secret group to assign to this secret. + // + // If you omit this parameter, your secret is assigned to the `default` secret group. + SecretGroupID *string `json:"secret_group_id,omitempty"` + + // Labels that you can use to filter for secrets in your instance. + // + // Up to 30 labels can be created. Labels can be between 2-30 characters, including spaces. Special characters not + // permitted include the angled bracket, comma, colon, ampersand, and vertical pipe character (|). + // + // To protect your privacy, do not use personal data, such as your name or location, as a label for your secret. + Labels []string `json:"labels,omitempty"` + + // The secret state based on NIST SP 800-57. States are integers and correspond to the Pre-activation = 0, Active = 1, + // Suspended = 2, Deactivated = 3, and Destroyed = 5 values. + State *int64 `json:"state,omitempty"` + + // A text representation of the secret state. + StateDescription *string `json:"state_description,omitempty"` + + // The secret type. + SecretType *string `json:"secret_type,omitempty"` + + // The Cloud Resource Name (CRN) that uniquely identifies your Secrets Manager resource. + CRN *string `json:"crn,omitempty"` + + // The date the secret was created. The date format follows RFC 3339. + CreationDate *strfmt.DateTime `json:"creation_date,omitempty"` + + // The unique identifier for the entity that created the secret. + CreatedBy *string `json:"created_by,omitempty"` + + // Updates when the actual secret is modified. The date format follows RFC 3339. + LastUpdateDate *strfmt.DateTime `json:"last_update_date,omitempty"` + + // An array that contains metadata for each secret version. + Versions []SecretVersion `json:"versions,omitempty"` + + // The date the secret material expires. The date format follows RFC 3339. + // + // You can set an expiration date on supported secret types at their creation. If you create a secret without + // specifying an expiration date, the secret does not expire. The `expiration_date` field is supported for the + // following secret types: + // + // - `arbitrary` + // - `username_password`. + ExpirationDate *strfmt.DateTime `json:"expiration_date,omitempty"` + + // The new secret data to assign to an `arbitrary` secret. + Payload *string `json:"payload,omitempty"` + + SecretData interface{} `json:"secret_data,omitempty"` + + // The username to assign to this secret. + Username *string `json:"username,omitempty"` + + // The password to assign to this secret. + Password *string `json:"password,omitempty"` + + // The date that the secret is scheduled for automatic rotation. + // + // The service automatically creates a new version of the secret on its next rotation date. This field exists only for + // secrets that can be auto-rotated and have an existing rotation policy. + NextRotationDate *strfmt.DateTime `json:"next_rotation_date,omitempty"` + + // The time-to-live (TTL) or lease duration to assign to generated credentials. + // + // For `iam_credentials` secrets, the TTL defines for how long each generated API key remains valid. The value can be + // either an integer that specifies the number of seconds, or the string representation of a duration, such as `120m` + // or `24h`. + TTL interface{} `json:"ttl,omitempty"` + + // The access groups that define the capabilities of the service ID and API key that are generated for an + // `iam_credentials` secret. + // + // **Tip:** To find the ID of an access group, go to **Manage > Access (IAM) > Access groups** in the IBM Cloud + // console. Select the access group to inspect, and click **Details** to view its ID. + AccessGroups []string `json:"access_groups,omitempty"` + + // The API key that is generated for this secret. + // + // After the secret reaches the end of its lease (see the `ttl` field), the API key is deleted automatically. If you + // want to continue to use the same API key for future read operations, see the `reuse_api_key` field. + APIKey *string `json:"api_key,omitempty"` + + // The service ID under which the API key (see the `api_key` field) is created. This service ID is added to the access + // groups that you assign for this secret. + ServiceID *string `json:"service_id,omitempty"` + + // Set to `true` to reuse the service ID and API key for this secret. + // + // Use this field to control whether to use the same service ID and API key for future read operations on this secret. + // If set to `true`, the service reuses the current credentials. If set to `false`, a new service ID and API key is + // generated each time that the secret is read or accessed. + ReuseAPIKey *bool `json:"reuse_api_key,omitempty"` +} + +// Constants associated with the SecretResource.SecretType property. +// The secret type. +const ( + SecretResourceSecretTypeArbitraryConst = "arbitrary" + SecretResourceSecretTypeIamCredentialsConst = "iam_credentials" + SecretResourceSecretTypeUsernamePasswordConst = "username_password" +) + +func (*SecretResource) isaSecretResource() bool { + return true +} + +type SecretResourceIntf interface { + isaSecretResource() bool +} + +// UnmarshalSecretResource unmarshals an instance of SecretResource from the specified map of raw messages. +func UnmarshalSecretResource(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SecretResource) + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "description", &obj.Description) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "secret_group_id", &obj.SecretGroupID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "labels", &obj.Labels) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "state", &obj.State) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "state_description", &obj.StateDescription) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "secret_type", &obj.SecretType) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "creation_date", &obj.CreationDate) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_by", &obj.CreatedBy) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "last_update_date", &obj.LastUpdateDate) + if err != nil { + return + } + err = core.UnmarshalModel(m, "versions", &obj.Versions, UnmarshalSecretVersion) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "expiration_date", &obj.ExpirationDate) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "payload", &obj.Payload) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "secret_data", &obj.SecretData) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "username", &obj.Username) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "password", &obj.Password) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "next_rotation_date", &obj.NextRotationDate) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "ttl", &obj.TTL) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "access_groups", &obj.AccessGroups) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "api_key", &obj.APIKey) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "service_id", &obj.ServiceID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "reuse_api_key", &obj.ReuseAPIKey) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SecretVersion : Properties that are associated with a specific secret version. +type SecretVersion struct { + // The ID of the secret version. + ID *string `json:"id,omitempty"` + + // The date that the version of the secret was created. + CreationDate *strfmt.DateTime `json:"creation_date,omitempty"` + + // The unique identifier for the entity that created the secret. + CreatedBy *string `json:"created_by,omitempty"` + + // Indicates whether the version of the secret was created by automatic rotation. + AutoRotated *bool `json:"auto_rotated,omitempty"` +} + +// UnmarshalSecretVersion unmarshals an instance of SecretVersion from the specified map of raw messages. +func UnmarshalSecretVersion(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SecretVersion) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "creation_date", &obj.CreationDate) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_by", &obj.CreatedBy) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "auto_rotated", &obj.AutoRotated) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// UpdateSecretGroupMetadataOptions : The UpdateSecretGroupMetadata options. +type UpdateSecretGroupMetadataOptions struct { + // The v4 UUID that uniquely identifies the secret group. + ID *string `validate:"required,ne="` + + // The metadata that describes the resource array. + Metadata *CollectionMetadata `validate:"required"` + + // A collection of resources. + Resources []SecretGroupMetadataUpdatable `validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdateSecretGroupMetadataOptions : Instantiate UpdateSecretGroupMetadataOptions +func (*SecretsManagerV1) NewUpdateSecretGroupMetadataOptions(id string, metadata *CollectionMetadata, resources []SecretGroupMetadataUpdatable) *UpdateSecretGroupMetadataOptions { + return &UpdateSecretGroupMetadataOptions{ + ID: core.StringPtr(id), + Metadata: metadata, + Resources: resources, + } +} + +// SetID : Allow user to set ID +func (options *UpdateSecretGroupMetadataOptions) SetID(id string) *UpdateSecretGroupMetadataOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetMetadata : Allow user to set Metadata +func (options *UpdateSecretGroupMetadataOptions) SetMetadata(metadata *CollectionMetadata) *UpdateSecretGroupMetadataOptions { + options.Metadata = metadata + return options +} + +// SetResources : Allow user to set Resources +func (options *UpdateSecretGroupMetadataOptions) SetResources(resources []SecretGroupMetadataUpdatable) *UpdateSecretGroupMetadataOptions { + options.Resources = resources + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateSecretGroupMetadataOptions) SetHeaders(param map[string]string) *UpdateSecretGroupMetadataOptions { + options.Headers = param + return options +} + +// UpdateSecretMetadataOptions : The UpdateSecretMetadata options. +type UpdateSecretMetadataOptions struct { + // The secret type. + SecretType *string `validate:"required,ne="` + + // The v4 UUID that uniquely identifies the secret. + ID *string `validate:"required,ne="` + + // The metadata that describes the resource array. + Metadata *CollectionMetadata `validate:"required"` + + // A collection of resources. + Resources []SecretMetadata `validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the UpdateSecretMetadataOptions.SecretType property. +// The secret type. +const ( + UpdateSecretMetadataOptionsSecretTypeArbitraryConst = "arbitrary" + UpdateSecretMetadataOptionsSecretTypeIamCredentialsConst = "iam_credentials" + UpdateSecretMetadataOptionsSecretTypeUsernamePasswordConst = "username_password" +) + +// NewUpdateSecretMetadataOptions : Instantiate UpdateSecretMetadataOptions +func (*SecretsManagerV1) NewUpdateSecretMetadataOptions(secretType string, id string, metadata *CollectionMetadata, resources []SecretMetadata) *UpdateSecretMetadataOptions { + return &UpdateSecretMetadataOptions{ + SecretType: core.StringPtr(secretType), + ID: core.StringPtr(id), + Metadata: metadata, + Resources: resources, + } +} + +// SetSecretType : Allow user to set SecretType +func (options *UpdateSecretMetadataOptions) SetSecretType(secretType string) *UpdateSecretMetadataOptions { + options.SecretType = core.StringPtr(secretType) + return options +} + +// SetID : Allow user to set ID +func (options *UpdateSecretMetadataOptions) SetID(id string) *UpdateSecretMetadataOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetMetadata : Allow user to set Metadata +func (options *UpdateSecretMetadataOptions) SetMetadata(metadata *CollectionMetadata) *UpdateSecretMetadataOptions { + options.Metadata = metadata + return options +} + +// SetResources : Allow user to set Resources +func (options *UpdateSecretMetadataOptions) SetResources(resources []SecretMetadata) *UpdateSecretMetadataOptions { + options.Resources = resources + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateSecretMetadataOptions) SetHeaders(param map[string]string) *UpdateSecretMetadataOptions { + options.Headers = param + return options +} + +// UpdateSecretOptions : The UpdateSecret options. +type UpdateSecretOptions struct { + // The secret type. + SecretType *string `validate:"required,ne="` + + // The v4 UUID that uniquely identifies the secret. + ID *string `validate:"required,ne="` + + // The action to perform on the specified secret. + Action *string `validate:"required"` + + // The base request for invoking an action on a secret. + SecretActionOneOf SecretActionOneOfIntf `validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the UpdateSecretOptions.SecretType property. +// The secret type. +const ( + UpdateSecretOptionsSecretTypeArbitraryConst = "arbitrary" + UpdateSecretOptionsSecretTypeIamCredentialsConst = "iam_credentials" + UpdateSecretOptionsSecretTypeUsernamePasswordConst = "username_password" +) + +// Constants associated with the UpdateSecretOptions.Action property. +// The action to perform on the specified secret. +const ( + UpdateSecretOptionsActionDeleteCredentialsConst = "delete_credentials" + UpdateSecretOptionsActionRotateConst = "rotate" +) + +// NewUpdateSecretOptions : Instantiate UpdateSecretOptions +func (*SecretsManagerV1) NewUpdateSecretOptions(secretType string, id string, action string, secretActionOneOf SecretActionOneOfIntf) *UpdateSecretOptions { + return &UpdateSecretOptions{ + SecretType: core.StringPtr(secretType), + ID: core.StringPtr(id), + Action: core.StringPtr(action), + SecretActionOneOf: secretActionOneOf, + } +} + +// SetSecretType : Allow user to set SecretType +func (options *UpdateSecretOptions) SetSecretType(secretType string) *UpdateSecretOptions { + options.SecretType = core.StringPtr(secretType) + return options +} + +// SetID : Allow user to set ID +func (options *UpdateSecretOptions) SetID(id string) *UpdateSecretOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetAction : Allow user to set Action +func (options *UpdateSecretOptions) SetAction(action string) *UpdateSecretOptions { + options.Action = core.StringPtr(action) + return options +} + +// SetSecretActionOneOf : Allow user to set SecretActionOneOf +func (options *UpdateSecretOptions) SetSecretActionOneOf(secretActionOneOf SecretActionOneOfIntf) *UpdateSecretOptions { + options.SecretActionOneOf = secretActionOneOf + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateSecretOptions) SetHeaders(param map[string]string) *UpdateSecretOptions { + options.Headers = param + return options +} + +// EngineConfigOneOfIamSecretEngineRootConfig : Configuration that is used to generate IAM credentials. +// This model "extends" EngineConfigOneOf +type EngineConfigOneOfIamSecretEngineRootConfig struct { + // An IBM Cloud API key that has the capability to create and manage service IDs. + // + // The API key must be assigned the Editor platform role on the Access Groups Service and the Operator platform role on + // the IAM Identity Service. For more information, see [Enabling the IAM secrets + // engine](https://cloud.ibm.com/docs/secrets-manager?topic=secrets-manager-secret-engines#configure-iam-engine). + APIKey *string `json:"api_key" validate:"required"` + + // The hash value of the IBM Cloud API key that is used to create and manage service IDs. + APIKeyHash *string `json:"api_key_hash,omitempty"` +} + +// NewEngineConfigOneOfIamSecretEngineRootConfig : Instantiate EngineConfigOneOfIamSecretEngineRootConfig (Generic Model Constructor) +func (*SecretsManagerV1) NewEngineConfigOneOfIamSecretEngineRootConfig(apiKey string) (model *EngineConfigOneOfIamSecretEngineRootConfig, err error) { + model = &EngineConfigOneOfIamSecretEngineRootConfig{ + APIKey: core.StringPtr(apiKey), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*EngineConfigOneOfIamSecretEngineRootConfig) isaEngineConfigOneOf() bool { + return true +} + +// UnmarshalEngineConfigOneOfIamSecretEngineRootConfig unmarshals an instance of EngineConfigOneOfIamSecretEngineRootConfig from the specified map of raw messages. +func UnmarshalEngineConfigOneOfIamSecretEngineRootConfig(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(EngineConfigOneOfIamSecretEngineRootConfig) + err = core.UnmarshalPrimitive(m, "api_key", &obj.APIKey) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "api_key_hash", &obj.APIKeyHash) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// GetSecretPoliciesOneOfGetSecretPolicyRotation : The base schema for retrieving a policy that is associated with a secret. +// This model "extends" GetSecretPoliciesOneOf +type GetSecretPoliciesOneOfGetSecretPolicyRotation struct { + // The metadata that describes the resource array. + Metadata *CollectionMetadata `json:"metadata" validate:"required"` + + // A collection of resources. + Resources []GetSecretPoliciesOneOfGetSecretPolicyRotationResourcesItem `json:"resources" validate:"required"` +} + +func (*GetSecretPoliciesOneOfGetSecretPolicyRotation) isaGetSecretPoliciesOneOf() bool { + return true +} + +// UnmarshalGetSecretPoliciesOneOfGetSecretPolicyRotation unmarshals an instance of GetSecretPoliciesOneOfGetSecretPolicyRotation from the specified map of raw messages. +func UnmarshalGetSecretPoliciesOneOfGetSecretPolicyRotation(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(GetSecretPoliciesOneOfGetSecretPolicyRotation) + err = core.UnmarshalModel(m, "metadata", &obj.Metadata, UnmarshalCollectionMetadata) + if err != nil { + return + } + err = core.UnmarshalModel(m, "resources", &obj.Resources, UnmarshalGetSecretPoliciesOneOfGetSecretPolicyRotationResourcesItem) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SecretActionOneOfDeleteCredentialsForIamSecret : Delete the credentials that are associated with an `iam_credentials` secret. +// This model "extends" SecretActionOneOf +type SecretActionOneOfDeleteCredentialsForIamSecret struct { + // The service ID that you want to delete. It is deleted together with its API key. + ServiceID *string `json:"service_id" validate:"required"` +} + +// NewSecretActionOneOfDeleteCredentialsForIamSecret : Instantiate SecretActionOneOfDeleteCredentialsForIamSecret (Generic Model Constructor) +func (*SecretsManagerV1) NewSecretActionOneOfDeleteCredentialsForIamSecret(serviceID string) (model *SecretActionOneOfDeleteCredentialsForIamSecret, err error) { + model = &SecretActionOneOfDeleteCredentialsForIamSecret{ + ServiceID: core.StringPtr(serviceID), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*SecretActionOneOfDeleteCredentialsForIamSecret) isaSecretActionOneOf() bool { + return true +} + +// UnmarshalSecretActionOneOfDeleteCredentialsForIamSecret unmarshals an instance of SecretActionOneOfDeleteCredentialsForIamSecret from the specified map of raw messages. +func UnmarshalSecretActionOneOfDeleteCredentialsForIamSecret(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SecretActionOneOfDeleteCredentialsForIamSecret) + err = core.UnmarshalPrimitive(m, "service_id", &obj.ServiceID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SecretActionOneOfRotateArbitrarySecretBody : The request body of a `rotate` action. +// This model "extends" SecretActionOneOf +type SecretActionOneOfRotateArbitrarySecretBody struct { + // The new secret data to assign to an `arbitrary` secret. + Payload *string `json:"payload" validate:"required"` +} + +// NewSecretActionOneOfRotateArbitrarySecretBody : Instantiate SecretActionOneOfRotateArbitrarySecretBody (Generic Model Constructor) +func (*SecretsManagerV1) NewSecretActionOneOfRotateArbitrarySecretBody(payload string) (model *SecretActionOneOfRotateArbitrarySecretBody, err error) { + model = &SecretActionOneOfRotateArbitrarySecretBody{ + Payload: core.StringPtr(payload), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*SecretActionOneOfRotateArbitrarySecretBody) isaSecretActionOneOf() bool { + return true +} + +// UnmarshalSecretActionOneOfRotateArbitrarySecretBody unmarshals an instance of SecretActionOneOfRotateArbitrarySecretBody from the specified map of raw messages. +func UnmarshalSecretActionOneOfRotateArbitrarySecretBody(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SecretActionOneOfRotateArbitrarySecretBody) + err = core.UnmarshalPrimitive(m, "payload", &obj.Payload) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SecretActionOneOfRotateUsernamePasswordSecretBody : The request body of a `rotate` action. +// This model "extends" SecretActionOneOf +type SecretActionOneOfRotateUsernamePasswordSecretBody struct { + // The new password to assign to a `username_password` secret. + Password *string `json:"password" validate:"required"` +} + +// NewSecretActionOneOfRotateUsernamePasswordSecretBody : Instantiate SecretActionOneOfRotateUsernamePasswordSecretBody (Generic Model Constructor) +func (*SecretsManagerV1) NewSecretActionOneOfRotateUsernamePasswordSecretBody(password string) (model *SecretActionOneOfRotateUsernamePasswordSecretBody, err error) { + model = &SecretActionOneOfRotateUsernamePasswordSecretBody{ + Password: core.StringPtr(password), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*SecretActionOneOfRotateUsernamePasswordSecretBody) isaSecretActionOneOf() bool { + return true +} + +// UnmarshalSecretActionOneOfRotateUsernamePasswordSecretBody unmarshals an instance of SecretActionOneOfRotateUsernamePasswordSecretBody from the specified map of raw messages. +func UnmarshalSecretActionOneOfRotateUsernamePasswordSecretBody(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SecretActionOneOfRotateUsernamePasswordSecretBody) + err = core.UnmarshalPrimitive(m, "password", &obj.Password) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SecretResourceArbitrarySecretResource : The base schema for secrets. +// This model "extends" SecretResource +type SecretResourceArbitrarySecretResource struct { + // The MIME type that represents the secret. + Type *string `json:"type,omitempty"` + + // The v4 UUID that uniquely identifies the secret. + ID *string `json:"id,omitempty"` + + // A human-readable alias to assign to your secret. + // + // To protect your privacy, do not use personal data, such as your name or location, as an alias for your secret. + Name *string `json:"name" validate:"required"` + + // An extended description of your secret. + // + // To protect your privacy, do not use personal data, such as your name or location, as a description for your secret. + Description *string `json:"description,omitempty"` + + // The v4 UUID that uniquely identifies the secret group to assign to this secret. + // + // If you omit this parameter, your secret is assigned to the `default` secret group. + SecretGroupID *string `json:"secret_group_id,omitempty"` + + // Labels that you can use to filter for secrets in your instance. + // + // Up to 30 labels can be created. Labels can be between 2-30 characters, including spaces. Special characters not + // permitted include the angled bracket, comma, colon, ampersand, and vertical pipe character (|). + // + // To protect your privacy, do not use personal data, such as your name or location, as a label for your secret. + Labels []string `json:"labels,omitempty"` + + // The secret state based on NIST SP 800-57. States are integers and correspond to the Pre-activation = 0, Active = 1, + // Suspended = 2, Deactivated = 3, and Destroyed = 5 values. + State *int64 `json:"state,omitempty"` + + // A text representation of the secret state. + StateDescription *string `json:"state_description,omitempty"` + + // The secret type. + SecretType *string `json:"secret_type,omitempty"` + + // The Cloud Resource Name (CRN) that uniquely identifies your Secrets Manager resource. + CRN *string `json:"crn,omitempty"` + + // The date the secret was created. The date format follows RFC 3339. + CreationDate *strfmt.DateTime `json:"creation_date,omitempty"` + + // The unique identifier for the entity that created the secret. + CreatedBy *string `json:"created_by,omitempty"` + + // Updates when the actual secret is modified. The date format follows RFC 3339. + LastUpdateDate *strfmt.DateTime `json:"last_update_date,omitempty"` + + // An array that contains metadata for each secret version. + Versions []SecretVersion `json:"versions,omitempty"` + + // The date the secret material expires. The date format follows RFC 3339. + // + // You can set an expiration date on supported secret types at their creation. If you create a secret without + // specifying an expiration date, the secret does not expire. The `expiration_date` field is supported for the + // following secret types: + // + // - `arbitrary` + // - `username_password`. + ExpirationDate *strfmt.DateTime `json:"expiration_date,omitempty"` + + // The new secret data to assign to an `arbitrary` secret. + Payload *string `json:"payload,omitempty"` + + SecretData interface{} `json:"secret_data,omitempty"` +} + +// Constants associated with the SecretResourceArbitrarySecretResource.SecretType property. +// The secret type. +const ( + SecretResourceArbitrarySecretResourceSecretTypeArbitraryConst = "arbitrary" + SecretResourceArbitrarySecretResourceSecretTypeIamCredentialsConst = "iam_credentials" + SecretResourceArbitrarySecretResourceSecretTypeUsernamePasswordConst = "username_password" +) + +// NewSecretResourceArbitrarySecretResource : Instantiate SecretResourceArbitrarySecretResource (Generic Model Constructor) +func (*SecretsManagerV1) NewSecretResourceArbitrarySecretResource(name string) (model *SecretResourceArbitrarySecretResource, err error) { + model = &SecretResourceArbitrarySecretResource{ + Name: core.StringPtr(name), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*SecretResourceArbitrarySecretResource) isaSecretResource() bool { + return true +} + +// UnmarshalSecretResourceArbitrarySecretResource unmarshals an instance of SecretResourceArbitrarySecretResource from the specified map of raw messages. +func UnmarshalSecretResourceArbitrarySecretResource(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SecretResourceArbitrarySecretResource) + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "description", &obj.Description) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "secret_group_id", &obj.SecretGroupID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "labels", &obj.Labels) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "state", &obj.State) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "state_description", &obj.StateDescription) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "secret_type", &obj.SecretType) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "creation_date", &obj.CreationDate) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_by", &obj.CreatedBy) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "last_update_date", &obj.LastUpdateDate) + if err != nil { + return + } + err = core.UnmarshalModel(m, "versions", &obj.Versions, UnmarshalSecretVersion) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "expiration_date", &obj.ExpirationDate) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "payload", &obj.Payload) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "secret_data", &obj.SecretData) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SecretResourceIamSecretResource : The base schema for secrets. +// This model "extends" SecretResource +type SecretResourceIamSecretResource struct { + // The MIME type that represents the secret. + Type *string `json:"type,omitempty"` + + // The v4 UUID that uniquely identifies the secret. + ID *string `json:"id,omitempty"` + + // A human-readable alias to assign to your secret. + // + // To protect your privacy, do not use personal data, such as your name or location, as an alias for your secret. + Name *string `json:"name" validate:"required"` + + // An extended description of your secret. + // + // To protect your privacy, do not use personal data, such as your name or location, as a description for your secret. + Description *string `json:"description,omitempty"` + + // The v4 UUID that uniquely identifies the secret group to assign to this secret. + // + // If you omit this parameter, your secret is assigned to the `default` secret group. + SecretGroupID *string `json:"secret_group_id,omitempty"` + + // Labels that you can use to filter for secrets in your instance. + // + // Up to 30 labels can be created. Labels can be between 2-30 characters, including spaces. Special characters not + // permitted include the angled bracket, comma, colon, ampersand, and vertical pipe character (|). + // + // To protect your privacy, do not use personal data, such as your name or location, as a label for your secret. + Labels []string `json:"labels,omitempty"` + + // The secret state based on NIST SP 800-57. States are integers and correspond to the Pre-activation = 0, Active = 1, + // Suspended = 2, Deactivated = 3, and Destroyed = 5 values. + State *int64 `json:"state,omitempty"` + + // A text representation of the secret state. + StateDescription *string `json:"state_description,omitempty"` + + // The secret type. + SecretType *string `json:"secret_type,omitempty"` + + // The Cloud Resource Name (CRN) that uniquely identifies your Secrets Manager resource. + CRN *string `json:"crn,omitempty"` + + // The date the secret was created. The date format follows RFC 3339. + CreationDate *strfmt.DateTime `json:"creation_date,omitempty"` + + // The unique identifier for the entity that created the secret. + CreatedBy *string `json:"created_by,omitempty"` + + // Updates when the actual secret is modified. The date format follows RFC 3339. + LastUpdateDate *strfmt.DateTime `json:"last_update_date,omitempty"` + + // An array that contains metadata for each secret version. + Versions []SecretVersion `json:"versions,omitempty"` + + // The time-to-live (TTL) or lease duration to assign to generated credentials. + // + // For `iam_credentials` secrets, the TTL defines for how long each generated API key remains valid. The value can be + // either an integer that specifies the number of seconds, or the string representation of a duration, such as `120m` + // or `24h`. + TTL interface{} `json:"ttl,omitempty"` + + // The access groups that define the capabilities of the service ID and API key that are generated for an + // `iam_credentials` secret. + // + // **Tip:** To find the ID of an access group, go to **Manage > Access (IAM) > Access groups** in the IBM Cloud + // console. Select the access group to inspect, and click **Details** to view its ID. + AccessGroups []string `json:"access_groups,omitempty"` + + // The API key that is generated for this secret. + // + // After the secret reaches the end of its lease (see the `ttl` field), the API key is deleted automatically. If you + // want to continue to use the same API key for future read operations, see the `reuse_api_key` field. + APIKey *string `json:"api_key,omitempty"` + + // The service ID under which the API key (see the `api_key` field) is created. This service ID is added to the access + // groups that you assign for this secret. + ServiceID *string `json:"service_id,omitempty"` + + // Set to `true` to reuse the service ID and API key for this secret. + // + // Use this field to control whether to use the same service ID and API key for future read operations on this secret. + // If set to `true`, the service reuses the current credentials. If set to `false`, a new service ID and API key is + // generated each time that the secret is read or accessed. + ReuseAPIKey *bool `json:"reuse_api_key,omitempty"` +} + +// Constants associated with the SecretResourceIamSecretResource.SecretType property. +// The secret type. +const ( + SecretResourceIamSecretResourceSecretTypeArbitraryConst = "arbitrary" + SecretResourceIamSecretResourceSecretTypeIamCredentialsConst = "iam_credentials" + SecretResourceIamSecretResourceSecretTypeUsernamePasswordConst = "username_password" +) + +// NewSecretResourceIamSecretResource : Instantiate SecretResourceIamSecretResource (Generic Model Constructor) +func (*SecretsManagerV1) NewSecretResourceIamSecretResource(name string) (model *SecretResourceIamSecretResource, err error) { + model = &SecretResourceIamSecretResource{ + Name: core.StringPtr(name), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*SecretResourceIamSecretResource) isaSecretResource() bool { + return true +} + +// UnmarshalSecretResourceIamSecretResource unmarshals an instance of SecretResourceIamSecretResource from the specified map of raw messages. +func UnmarshalSecretResourceIamSecretResource(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SecretResourceIamSecretResource) + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "description", &obj.Description) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "secret_group_id", &obj.SecretGroupID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "labels", &obj.Labels) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "state", &obj.State) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "state_description", &obj.StateDescription) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "secret_type", &obj.SecretType) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "creation_date", &obj.CreationDate) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_by", &obj.CreatedBy) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "last_update_date", &obj.LastUpdateDate) + if err != nil { + return + } + err = core.UnmarshalModel(m, "versions", &obj.Versions, UnmarshalSecretVersion) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "ttl", &obj.TTL) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "access_groups", &obj.AccessGroups) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "api_key", &obj.APIKey) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "service_id", &obj.ServiceID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "reuse_api_key", &obj.ReuseAPIKey) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SecretResourceUsernamePasswordSecretResource : The base schema for secrets. +// This model "extends" SecretResource +type SecretResourceUsernamePasswordSecretResource struct { + // The MIME type that represents the secret. + Type *string `json:"type,omitempty"` + + // The v4 UUID that uniquely identifies the secret. + ID *string `json:"id,omitempty"` + + // A human-readable alias to assign to your secret. + // + // To protect your privacy, do not use personal data, such as your name or location, as an alias for your secret. + Name *string `json:"name" validate:"required"` + + // An extended description of your secret. + // + // To protect your privacy, do not use personal data, such as your name or location, as a description for your secret. + Description *string `json:"description,omitempty"` + + // The v4 UUID that uniquely identifies the secret group to assign to this secret. + // + // If you omit this parameter, your secret is assigned to the `default` secret group. + SecretGroupID *string `json:"secret_group_id,omitempty"` + + // Labels that you can use to filter for secrets in your instance. + // + // Up to 30 labels can be created. Labels can be between 2-30 characters, including spaces. Special characters not + // permitted include the angled bracket, comma, colon, ampersand, and vertical pipe character (|). + // + // To protect your privacy, do not use personal data, such as your name or location, as a label for your secret. + Labels []string `json:"labels,omitempty"` + + // The secret state based on NIST SP 800-57. States are integers and correspond to the Pre-activation = 0, Active = 1, + // Suspended = 2, Deactivated = 3, and Destroyed = 5 values. + State *int64 `json:"state,omitempty"` + + // A text representation of the secret state. + StateDescription *string `json:"state_description,omitempty"` + + // The secret type. + SecretType *string `json:"secret_type,omitempty"` + + // The Cloud Resource Name (CRN) that uniquely identifies your Secrets Manager resource. + CRN *string `json:"crn,omitempty"` + + // The date the secret was created. The date format follows RFC 3339. + CreationDate *strfmt.DateTime `json:"creation_date,omitempty"` + + // The unique identifier for the entity that created the secret. + CreatedBy *string `json:"created_by,omitempty"` + + // Updates when the actual secret is modified. The date format follows RFC 3339. + LastUpdateDate *strfmt.DateTime `json:"last_update_date,omitempty"` + + // An array that contains metadata for each secret version. + Versions []SecretVersion `json:"versions,omitempty"` + + // The username to assign to this secret. + Username *string `json:"username,omitempty"` + + // The password to assign to this secret. + Password *string `json:"password,omitempty"` + + SecretData interface{} `json:"secret_data,omitempty"` + + // The date the secret material expires. The date format follows RFC 3339. + // + // You can set an expiration date on supported secret types at their creation. If you create a secret without + // specifying an expiration date, the secret does not expire. The `expiration_date` field is supported for the + // following secret types: + // + // - `arbitrary` + // - `username_password`. + ExpirationDate *strfmt.DateTime `json:"expiration_date,omitempty"` + + // The date that the secret is scheduled for automatic rotation. + // + // The service automatically creates a new version of the secret on its next rotation date. This field exists only for + // secrets that can be auto-rotated and have an existing rotation policy. + NextRotationDate *strfmt.DateTime `json:"next_rotation_date,omitempty"` +} + +// Constants associated with the SecretResourceUsernamePasswordSecretResource.SecretType property. +// The secret type. +const ( + SecretResourceUsernamePasswordSecretResourceSecretTypeArbitraryConst = "arbitrary" + SecretResourceUsernamePasswordSecretResourceSecretTypeIamCredentialsConst = "iam_credentials" + SecretResourceUsernamePasswordSecretResourceSecretTypeUsernamePasswordConst = "username_password" +) + +// NewSecretResourceUsernamePasswordSecretResource : Instantiate SecretResourceUsernamePasswordSecretResource (Generic Model Constructor) +func (*SecretsManagerV1) NewSecretResourceUsernamePasswordSecretResource(name string) (model *SecretResourceUsernamePasswordSecretResource, err error) { + model = &SecretResourceUsernamePasswordSecretResource{ + Name: core.StringPtr(name), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*SecretResourceUsernamePasswordSecretResource) isaSecretResource() bool { + return true +} + +// UnmarshalSecretResourceUsernamePasswordSecretResource unmarshals an instance of SecretResourceUsernamePasswordSecretResource from the specified map of raw messages. +func UnmarshalSecretResourceUsernamePasswordSecretResource(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SecretResourceUsernamePasswordSecretResource) + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "description", &obj.Description) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "secret_group_id", &obj.SecretGroupID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "labels", &obj.Labels) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "state", &obj.State) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "state_description", &obj.StateDescription) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "secret_type", &obj.SecretType) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "creation_date", &obj.CreationDate) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_by", &obj.CreatedBy) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "last_update_date", &obj.LastUpdateDate) + if err != nil { + return + } + err = core.UnmarshalModel(m, "versions", &obj.Versions, UnmarshalSecretVersion) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "username", &obj.Username) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "password", &obj.Password) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "secret_data", &obj.SecretData) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "expiration_date", &obj.ExpirationDate) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "next_rotation_date", &obj.NextRotationDate) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} diff --git a/vendor/github.com/IBM/vpc-go-sdk/common/version.go b/vendor/github.com/IBM/vpc-go-sdk/common/version.go index 157beb25bcd..623548bdac3 100644 --- a/vendor/github.com/IBM/vpc-go-sdk/common/version.go +++ b/vendor/github.com/IBM/vpc-go-sdk/common/version.go @@ -1,4 +1,4 @@ package common // Version of the SDK -const Version = "1.0.1" +const Version = "0.6.0" diff --git a/vendor/github.com/IBM/vpc-go-sdk/vpcclassicv1/vpc_classic_v1.go b/vendor/github.com/IBM/vpc-go-sdk/vpcclassicv1/vpc_classic_v1.go new file mode 100644 index 00000000000..a768a90c85c --- /dev/null +++ b/vendor/github.com/IBM/vpc-go-sdk/vpcclassicv1/vpc_classic_v1.go @@ -0,0 +1,35201 @@ +/** + * (C) Copyright IBM Corp. 2020, 2021. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * IBM OpenAPI SDK Code Generator Version: 3.26.0-4b317b0c-20210127-171701 + */ + +// Package vpcclassicv1 : Operations and models for the VpcClassicV1 service +package vpcclassicv1 + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "reflect" + "time" + + "github.com/IBM/go-sdk-core/v5/core" + common "github.com/IBM/vpc-go-sdk/common" + "github.com/go-openapi/strfmt" +) + +// VpcClassicV1 : The IBM Cloud Virtual Private Cloud (VPC) API can be used to programmatically provision and manage +// infrastructure resources, including virtual server instances, subnets, volumes, and load balancers. +// +// Version: 2021-02-09 +type VpcClassicV1 struct { + Service *core.BaseService + + // Requests the version of the API as of a date in the format `YYYY-MM-DD`. Any date up to the current date may be + // provided. Specify the current date to request the latest version. + Version *string + + // The infrastructure generation for the request. For the API behavior documented here, use + // `1`. + generation *int64 +} + +// DefaultServiceURL is the default URL to make service requests to. +const DefaultServiceURL = "https://us-south.iaas.cloud.ibm.com/v1" + +// DefaultServiceName is the default key used to find external configuration information. +const DefaultServiceName = "vpc_classic" + +// VpcClassicV1Options : Service options +type VpcClassicV1Options struct { + ServiceName string + URL string + Authenticator core.Authenticator + + // Requests the version of the API as of a date in the format `YYYY-MM-DD`. Any date up to the current date may be + // provided. Specify the current date to request the latest version. + Version *string +} + +// NewVpcClassicV1UsingExternalConfig : constructs an instance of VpcClassicV1 with passed in options and external configuration. +func NewVpcClassicV1UsingExternalConfig(options *VpcClassicV1Options) (vpcClassic *VpcClassicV1, err error) { + if options.ServiceName == "" { + options.ServiceName = DefaultServiceName + } + + if options.Authenticator == nil { + options.Authenticator, err = core.GetAuthenticatorFromEnvironment(options.ServiceName) + if err != nil { + return + } + } + + vpcClassic, err = NewVpcClassicV1(options) + if err != nil { + return + } + + err = vpcClassic.Service.ConfigureService(options.ServiceName) + if err != nil { + return + } + + if options.URL != "" { + err = vpcClassic.Service.SetServiceURL(options.URL) + } + return +} + +// NewVpcClassicV1 : constructs an instance of VpcClassicV1 with passed in options. +func NewVpcClassicV1(options *VpcClassicV1Options) (service *VpcClassicV1, err error) { + serviceOptions := &core.ServiceOptions{ + URL: DefaultServiceURL, + Authenticator: options.Authenticator, + } + + err = core.ValidateStruct(options, "options") + if err != nil { + return + } + + baseService, err := core.NewBaseService(serviceOptions) + if err != nil { + return + } + + if options.URL != "" { + err = baseService.SetServiceURL(options.URL) + if err != nil { + return + } + } + + if options.Version == nil { + options.Version = core.StringPtr("2021-02-09") + } + + service = &VpcClassicV1{ + Service: baseService, + Version: options.Version, + generation: core.Int64Ptr(1), + } + + return +} + +// GetServiceURLForRegion returns the service URL to be used for the specified region +func GetServiceURLForRegion(region string) (string, error) { + return "", fmt.Errorf("service does not support regional URLs") +} + +// Clone makes a copy of "vpcClassic" suitable for processing requests. +func (vpcClassic *VpcClassicV1) Clone() *VpcClassicV1 { + if core.IsNil(vpcClassic) { + return nil + } + clone := *vpcClassic + clone.Service = vpcClassic.Service.Clone() + return &clone +} + +// SetServiceURL sets the service URL +func (vpcClassic *VpcClassicV1) SetServiceURL(url string) error { + return vpcClassic.Service.SetServiceURL(url) +} + +// GetServiceURL returns the service URL +func (vpcClassic *VpcClassicV1) GetServiceURL() string { + return vpcClassic.Service.GetServiceURL() +} + +// SetDefaultHeaders sets HTTP headers to be sent in every request +func (vpcClassic *VpcClassicV1) SetDefaultHeaders(headers http.Header) { + vpcClassic.Service.SetDefaultHeaders(headers) +} + +// SetEnableGzipCompression sets the service's EnableGzipCompression field +func (vpcClassic *VpcClassicV1) SetEnableGzipCompression(enableGzip bool) { + vpcClassic.Service.SetEnableGzipCompression(enableGzip) +} + +// GetEnableGzipCompression returns the service's EnableGzipCompression field +func (vpcClassic *VpcClassicV1) GetEnableGzipCompression() bool { + return vpcClassic.Service.GetEnableGzipCompression() +} + +// EnableRetries enables automatic retries for requests invoked for this service instance. +// If either parameter is specified as 0, then a default value is used instead. +func (vpcClassic *VpcClassicV1) EnableRetries(maxRetries int, maxRetryInterval time.Duration) { + vpcClassic.Service.EnableRetries(maxRetries, maxRetryInterval) +} + +// DisableRetries disables automatic retries for requests invoked for this service instance. +func (vpcClassic *VpcClassicV1) DisableRetries() { + vpcClassic.Service.DisableRetries() +} + +// ListVpcs : List all VPCs +// This request lists all VPCs in the region. A VPC is a virtual network that belongs to an account and provides logical +// isolation from other networks. A VPC is made up of resources in one or more zones. VPCs are regional, and each VPC +// can contain resources in multiple zones in a region. +func (vpcClassic *VpcClassicV1) ListVpcs(listVpcsOptions *ListVpcsOptions) (result *VPCCollection, response *core.DetailedResponse, err error) { + return vpcClassic.ListVpcsWithContext(context.Background(), listVpcsOptions) +} + +// ListVpcsWithContext is an alternate form of the ListVpcs method which supports a Context parameter +func (vpcClassic *VpcClassicV1) ListVpcsWithContext(ctx context.Context, listVpcsOptions *ListVpcsOptions) (result *VPCCollection, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listVpcsOptions, "listVpcsOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/vpcs`, nil) + if err != nil { + return + } + + for headerName, headerValue := range listVpcsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "ListVpcs") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + if listVpcsOptions.Start != nil { + builder.AddQuery("start", fmt.Sprint(*listVpcsOptions.Start)) + } + if listVpcsOptions.Limit != nil { + builder.AddQuery("limit", fmt.Sprint(*listVpcsOptions.Limit)) + } + if listVpcsOptions.ClassicAccess != nil { + builder.AddQuery("classic_access", fmt.Sprint(*listVpcsOptions.ClassicAccess)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVPCCollection) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateVPC : Create a VPC +// This request creates a new VPC from a VPC prototype object. The prototype object is structured in the same way as a +// retrieved VPC, and contains the information necessary to create the new VPC. +func (vpcClassic *VpcClassicV1) CreateVPC(createVPCOptions *CreateVPCOptions) (result *VPC, response *core.DetailedResponse, err error) { + return vpcClassic.CreateVPCWithContext(context.Background(), createVPCOptions) +} + +// CreateVPCWithContext is an alternate form of the CreateVPC method which supports a Context parameter +func (vpcClassic *VpcClassicV1) CreateVPCWithContext(ctx context.Context, createVPCOptions *CreateVPCOptions) (result *VPC, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(createVPCOptions, "createVPCOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/vpcs`, nil) + if err != nil { + return + } + + for headerName, headerValue := range createVPCOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "CreateVPC") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + body := make(map[string]interface{}) + if createVPCOptions.AddressPrefixManagement != nil { + body["address_prefix_management"] = createVPCOptions.AddressPrefixManagement + } + if createVPCOptions.ClassicAccess != nil { + body["classic_access"] = createVPCOptions.ClassicAccess + } + if createVPCOptions.Name != nil { + body["name"] = createVPCOptions.Name + } + if createVPCOptions.ResourceGroup != nil { + body["resource_group"] = createVPCOptions.ResourceGroup + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVPC) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteVPC : Delete a VPC +// This request deletes a VPC. This operation cannot be reversed. For this request to succeed, the VPC must not contain +// any instances, subnets, or public gateways. All security groups associated with the VPC are automatically deleted. If +// the default network ACL was automatically created, it is automatically deleted. +func (vpcClassic *VpcClassicV1) DeleteVPC(deleteVPCOptions *DeleteVPCOptions) (response *core.DetailedResponse, err error) { + return vpcClassic.DeleteVPCWithContext(context.Background(), deleteVPCOptions) +} + +// DeleteVPCWithContext is an alternate form of the DeleteVPC method which supports a Context parameter +func (vpcClassic *VpcClassicV1) DeleteVPCWithContext(ctx context.Context, deleteVPCOptions *DeleteVPCOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteVPCOptions, "deleteVPCOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteVPCOptions, "deleteVPCOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *deleteVPCOptions.ID, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/vpcs/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteVPCOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "DeleteVPC") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + response, err = vpcClassic.Service.Request(request, nil) + + return +} + +// GetVPC : Retrieve a VPC +// This request retrieves a single VPC specified by the identifier in the URL. +func (vpcClassic *VpcClassicV1) GetVPC(getVPCOptions *GetVPCOptions) (result *VPC, response *core.DetailedResponse, err error) { + return vpcClassic.GetVPCWithContext(context.Background(), getVPCOptions) +} + +// GetVPCWithContext is an alternate form of the GetVPC method which supports a Context parameter +func (vpcClassic *VpcClassicV1) GetVPCWithContext(ctx context.Context, getVPCOptions *GetVPCOptions) (result *VPC, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getVPCOptions, "getVPCOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getVPCOptions, "getVPCOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *getVPCOptions.ID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/vpcs/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getVPCOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "GetVPC") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVPC) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateVPC : Update a VPC +// This request updates a VPC's name. +func (vpcClassic *VpcClassicV1) UpdateVPC(updateVPCOptions *UpdateVPCOptions) (result *VPC, response *core.DetailedResponse, err error) { + return vpcClassic.UpdateVPCWithContext(context.Background(), updateVPCOptions) +} + +// UpdateVPCWithContext is an alternate form of the UpdateVPC method which supports a Context parameter +func (vpcClassic *VpcClassicV1) UpdateVPCWithContext(ctx context.Context, updateVPCOptions *UpdateVPCOptions) (result *VPC, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateVPCOptions, "updateVPCOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateVPCOptions, "updateVPCOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *updateVPCOptions.ID, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/vpcs/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateVPCOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "UpdateVPC") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/merge-patch+json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + _, err = builder.SetBodyContentJSON(updateVPCOptions.VPCPatch) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVPC) + if err != nil { + return + } + response.Result = result + + return +} + +// GetVPCDefaultSecurityGroup : Retrieve a VPC's default security group +// This request retrieves the default security group for the VPC specified by the identifier in the URL. The default +// security group is applied to any new network interfaces in the VPC that do not specify a security group. +func (vpcClassic *VpcClassicV1) GetVPCDefaultSecurityGroup(getVPCDefaultSecurityGroupOptions *GetVPCDefaultSecurityGroupOptions) (result *DefaultSecurityGroup, response *core.DetailedResponse, err error) { + return vpcClassic.GetVPCDefaultSecurityGroupWithContext(context.Background(), getVPCDefaultSecurityGroupOptions) +} + +// GetVPCDefaultSecurityGroupWithContext is an alternate form of the GetVPCDefaultSecurityGroup method which supports a Context parameter +func (vpcClassic *VpcClassicV1) GetVPCDefaultSecurityGroupWithContext(ctx context.Context, getVPCDefaultSecurityGroupOptions *GetVPCDefaultSecurityGroupOptions) (result *DefaultSecurityGroup, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getVPCDefaultSecurityGroupOptions, "getVPCDefaultSecurityGroupOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getVPCDefaultSecurityGroupOptions, "getVPCDefaultSecurityGroupOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *getVPCDefaultSecurityGroupOptions.ID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/vpcs/{id}/default_security_group`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getVPCDefaultSecurityGroupOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "GetVPCDefaultSecurityGroup") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalDefaultSecurityGroup) + if err != nil { + return + } + response.Result = result + + return +} + +// ListVPCAddressPrefixes : List all address prefixes for a VPC +// This request lists all address pool prefixes for a VPC. +func (vpcClassic *VpcClassicV1) ListVPCAddressPrefixes(listVPCAddressPrefixesOptions *ListVPCAddressPrefixesOptions) (result *AddressPrefixCollection, response *core.DetailedResponse, err error) { + return vpcClassic.ListVPCAddressPrefixesWithContext(context.Background(), listVPCAddressPrefixesOptions) +} + +// ListVPCAddressPrefixesWithContext is an alternate form of the ListVPCAddressPrefixes method which supports a Context parameter +func (vpcClassic *VpcClassicV1) ListVPCAddressPrefixesWithContext(ctx context.Context, listVPCAddressPrefixesOptions *ListVPCAddressPrefixesOptions) (result *AddressPrefixCollection, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(listVPCAddressPrefixesOptions, "listVPCAddressPrefixesOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(listVPCAddressPrefixesOptions, "listVPCAddressPrefixesOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "vpc_id": *listVPCAddressPrefixesOptions.VPCID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/vpcs/{vpc_id}/address_prefixes`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range listVPCAddressPrefixesOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "ListVPCAddressPrefixes") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + if listVPCAddressPrefixesOptions.Start != nil { + builder.AddQuery("start", fmt.Sprint(*listVPCAddressPrefixesOptions.Start)) + } + if listVPCAddressPrefixesOptions.Limit != nil { + builder.AddQuery("limit", fmt.Sprint(*listVPCAddressPrefixesOptions.Limit)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalAddressPrefixCollection) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateVPCAddressPrefix : Create an address prefix for a VPC +// This request creates a new prefix from a prefix prototype object. The prototype object is structured in the same way +// as a retrieved prefix, and contains the information necessary to create the new prefix. +func (vpcClassic *VpcClassicV1) CreateVPCAddressPrefix(createVPCAddressPrefixOptions *CreateVPCAddressPrefixOptions) (result *AddressPrefix, response *core.DetailedResponse, err error) { + return vpcClassic.CreateVPCAddressPrefixWithContext(context.Background(), createVPCAddressPrefixOptions) +} + +// CreateVPCAddressPrefixWithContext is an alternate form of the CreateVPCAddressPrefix method which supports a Context parameter +func (vpcClassic *VpcClassicV1) CreateVPCAddressPrefixWithContext(ctx context.Context, createVPCAddressPrefixOptions *CreateVPCAddressPrefixOptions) (result *AddressPrefix, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createVPCAddressPrefixOptions, "createVPCAddressPrefixOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createVPCAddressPrefixOptions, "createVPCAddressPrefixOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "vpc_id": *createVPCAddressPrefixOptions.VPCID, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/vpcs/{vpc_id}/address_prefixes`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range createVPCAddressPrefixOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "CreateVPCAddressPrefix") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + body := make(map[string]interface{}) + if createVPCAddressPrefixOptions.CIDR != nil { + body["cidr"] = createVPCAddressPrefixOptions.CIDR + } + if createVPCAddressPrefixOptions.Zone != nil { + body["zone"] = createVPCAddressPrefixOptions.Zone + } + if createVPCAddressPrefixOptions.IsDefault != nil { + body["is_default"] = createVPCAddressPrefixOptions.IsDefault + } + if createVPCAddressPrefixOptions.Name != nil { + body["name"] = createVPCAddressPrefixOptions.Name + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalAddressPrefix) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteVPCAddressPrefix : Delete an address prefix +// This request deletes a prefix. This operation cannot be reversed. The request will fail if any subnets use addresses +// from this prefix. +func (vpcClassic *VpcClassicV1) DeleteVPCAddressPrefix(deleteVPCAddressPrefixOptions *DeleteVPCAddressPrefixOptions) (response *core.DetailedResponse, err error) { + return vpcClassic.DeleteVPCAddressPrefixWithContext(context.Background(), deleteVPCAddressPrefixOptions) +} + +// DeleteVPCAddressPrefixWithContext is an alternate form of the DeleteVPCAddressPrefix method which supports a Context parameter +func (vpcClassic *VpcClassicV1) DeleteVPCAddressPrefixWithContext(ctx context.Context, deleteVPCAddressPrefixOptions *DeleteVPCAddressPrefixOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteVPCAddressPrefixOptions, "deleteVPCAddressPrefixOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteVPCAddressPrefixOptions, "deleteVPCAddressPrefixOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "vpc_id": *deleteVPCAddressPrefixOptions.VPCID, + "id": *deleteVPCAddressPrefixOptions.ID, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/vpcs/{vpc_id}/address_prefixes/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteVPCAddressPrefixOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "DeleteVPCAddressPrefix") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + response, err = vpcClassic.Service.Request(request, nil) + + return +} + +// GetVPCAddressPrefix : Retrieve an address prefix +// This request retrieves a single prefix specified by the identifier in the URL. +func (vpcClassic *VpcClassicV1) GetVPCAddressPrefix(getVPCAddressPrefixOptions *GetVPCAddressPrefixOptions) (result *AddressPrefix, response *core.DetailedResponse, err error) { + return vpcClassic.GetVPCAddressPrefixWithContext(context.Background(), getVPCAddressPrefixOptions) +} + +// GetVPCAddressPrefixWithContext is an alternate form of the GetVPCAddressPrefix method which supports a Context parameter +func (vpcClassic *VpcClassicV1) GetVPCAddressPrefixWithContext(ctx context.Context, getVPCAddressPrefixOptions *GetVPCAddressPrefixOptions) (result *AddressPrefix, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getVPCAddressPrefixOptions, "getVPCAddressPrefixOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getVPCAddressPrefixOptions, "getVPCAddressPrefixOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "vpc_id": *getVPCAddressPrefixOptions.VPCID, + "id": *getVPCAddressPrefixOptions.ID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/vpcs/{vpc_id}/address_prefixes/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getVPCAddressPrefixOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "GetVPCAddressPrefix") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalAddressPrefix) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateVPCAddressPrefix : Update an address prefix +// This request updates a prefix with the information in a provided prefix patch. The prefix patch object is structured +// in the same way as a retrieved prefix and contains only the information to be updated. +func (vpcClassic *VpcClassicV1) UpdateVPCAddressPrefix(updateVPCAddressPrefixOptions *UpdateVPCAddressPrefixOptions) (result *AddressPrefix, response *core.DetailedResponse, err error) { + return vpcClassic.UpdateVPCAddressPrefixWithContext(context.Background(), updateVPCAddressPrefixOptions) +} + +// UpdateVPCAddressPrefixWithContext is an alternate form of the UpdateVPCAddressPrefix method which supports a Context parameter +func (vpcClassic *VpcClassicV1) UpdateVPCAddressPrefixWithContext(ctx context.Context, updateVPCAddressPrefixOptions *UpdateVPCAddressPrefixOptions) (result *AddressPrefix, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateVPCAddressPrefixOptions, "updateVPCAddressPrefixOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateVPCAddressPrefixOptions, "updateVPCAddressPrefixOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "vpc_id": *updateVPCAddressPrefixOptions.VPCID, + "id": *updateVPCAddressPrefixOptions.ID, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/vpcs/{vpc_id}/address_prefixes/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateVPCAddressPrefixOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "UpdateVPCAddressPrefix") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/merge-patch+json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + _, err = builder.SetBodyContentJSON(updateVPCAddressPrefixOptions.AddressPrefixPatch) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalAddressPrefix) + if err != nil { + return + } + response.Result = result + + return +} + +// ListVPCRoutes : List all routes in a VPC's default routing table +// This request lists all routes in the VPC's default routing table. Each route is zone-specific and directs any packets +// matching its destination CIDR block to a `next_hop` IP address. The most specific route matching a packet's +// destination will be used. If multiple equally-specific routes exist, traffic will be distributed across them. +func (vpcClassic *VpcClassicV1) ListVPCRoutes(listVPCRoutesOptions *ListVPCRoutesOptions) (result *RouteCollection, response *core.DetailedResponse, err error) { + return vpcClassic.ListVPCRoutesWithContext(context.Background(), listVPCRoutesOptions) +} + +// ListVPCRoutesWithContext is an alternate form of the ListVPCRoutes method which supports a Context parameter +func (vpcClassic *VpcClassicV1) ListVPCRoutesWithContext(ctx context.Context, listVPCRoutesOptions *ListVPCRoutesOptions) (result *RouteCollection, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(listVPCRoutesOptions, "listVPCRoutesOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(listVPCRoutesOptions, "listVPCRoutesOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "vpc_id": *listVPCRoutesOptions.VPCID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/vpcs/{vpc_id}/routes`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range listVPCRoutesOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "ListVPCRoutes") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + if listVPCRoutesOptions.ZoneName != nil { + builder.AddQuery("zone.name", fmt.Sprint(*listVPCRoutesOptions.ZoneName)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalRouteCollection) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateVPCRoute : Create a route in a VPC's default routing table +// This request creates a new route in the VPC's default routing table. The route prototype object is structured in the +// same way as a retrieved route, and contains the information necessary to create the new route. The request will fail +// if the new route will cause a loop. +func (vpcClassic *VpcClassicV1) CreateVPCRoute(createVPCRouteOptions *CreateVPCRouteOptions) (result *Route, response *core.DetailedResponse, err error) { + return vpcClassic.CreateVPCRouteWithContext(context.Background(), createVPCRouteOptions) +} + +// CreateVPCRouteWithContext is an alternate form of the CreateVPCRoute method which supports a Context parameter +func (vpcClassic *VpcClassicV1) CreateVPCRouteWithContext(ctx context.Context, createVPCRouteOptions *CreateVPCRouteOptions) (result *Route, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createVPCRouteOptions, "createVPCRouteOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createVPCRouteOptions, "createVPCRouteOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "vpc_id": *createVPCRouteOptions.VPCID, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/vpcs/{vpc_id}/routes`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range createVPCRouteOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "CreateVPCRoute") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + body := make(map[string]interface{}) + if createVPCRouteOptions.Destination != nil { + body["destination"] = createVPCRouteOptions.Destination + } + if createVPCRouteOptions.NextHop != nil { + body["next_hop"] = createVPCRouteOptions.NextHop + } + if createVPCRouteOptions.Zone != nil { + body["zone"] = createVPCRouteOptions.Zone + } + if createVPCRouteOptions.Name != nil { + body["name"] = createVPCRouteOptions.Name + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalRoute) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteVPCRoute : Delete a VPC route +// This request deletes a route. This operation cannot be reversed. +func (vpcClassic *VpcClassicV1) DeleteVPCRoute(deleteVPCRouteOptions *DeleteVPCRouteOptions) (response *core.DetailedResponse, err error) { + return vpcClassic.DeleteVPCRouteWithContext(context.Background(), deleteVPCRouteOptions) +} + +// DeleteVPCRouteWithContext is an alternate form of the DeleteVPCRoute method which supports a Context parameter +func (vpcClassic *VpcClassicV1) DeleteVPCRouteWithContext(ctx context.Context, deleteVPCRouteOptions *DeleteVPCRouteOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteVPCRouteOptions, "deleteVPCRouteOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteVPCRouteOptions, "deleteVPCRouteOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "vpc_id": *deleteVPCRouteOptions.VPCID, + "id": *deleteVPCRouteOptions.ID, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/vpcs/{vpc_id}/routes/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteVPCRouteOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "DeleteVPCRoute") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + response, err = vpcClassic.Service.Request(request, nil) + + return +} + +// GetVPCRoute : Retrieve a VPC route +// This request retrieves a single route specified by the identifier in the URL. +func (vpcClassic *VpcClassicV1) GetVPCRoute(getVPCRouteOptions *GetVPCRouteOptions) (result *Route, response *core.DetailedResponse, err error) { + return vpcClassic.GetVPCRouteWithContext(context.Background(), getVPCRouteOptions) +} + +// GetVPCRouteWithContext is an alternate form of the GetVPCRoute method which supports a Context parameter +func (vpcClassic *VpcClassicV1) GetVPCRouteWithContext(ctx context.Context, getVPCRouteOptions *GetVPCRouteOptions) (result *Route, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getVPCRouteOptions, "getVPCRouteOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getVPCRouteOptions, "getVPCRouteOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "vpc_id": *getVPCRouteOptions.VPCID, + "id": *getVPCRouteOptions.ID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/vpcs/{vpc_id}/routes/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getVPCRouteOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "GetVPCRoute") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalRoute) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateVPCRoute : Update a VPC route +// This request updates a route with the information in a provided route patch. The route patch object is structured in +// the same way as a retrieved route and contains only the information to be updated. +func (vpcClassic *VpcClassicV1) UpdateVPCRoute(updateVPCRouteOptions *UpdateVPCRouteOptions) (result *Route, response *core.DetailedResponse, err error) { + return vpcClassic.UpdateVPCRouteWithContext(context.Background(), updateVPCRouteOptions) +} + +// UpdateVPCRouteWithContext is an alternate form of the UpdateVPCRoute method which supports a Context parameter +func (vpcClassic *VpcClassicV1) UpdateVPCRouteWithContext(ctx context.Context, updateVPCRouteOptions *UpdateVPCRouteOptions) (result *Route, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateVPCRouteOptions, "updateVPCRouteOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateVPCRouteOptions, "updateVPCRouteOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "vpc_id": *updateVPCRouteOptions.VPCID, + "id": *updateVPCRouteOptions.ID, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/vpcs/{vpc_id}/routes/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateVPCRouteOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "UpdateVPCRoute") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/merge-patch+json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + _, err = builder.SetBodyContentJSON(updateVPCRouteOptions.RoutePatch) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalRoute) + if err != nil { + return + } + response.Result = result + + return +} + +// ListSubnets : List all subnets +// This request lists all subnets in the region. Subnets are contiguous ranges of IP addresses specified in CIDR block +// notation. Each subnet is within a particular zone and cannot span multiple zones or regions. +func (vpcClassic *VpcClassicV1) ListSubnets(listSubnetsOptions *ListSubnetsOptions) (result *SubnetCollection, response *core.DetailedResponse, err error) { + return vpcClassic.ListSubnetsWithContext(context.Background(), listSubnetsOptions) +} + +// ListSubnetsWithContext is an alternate form of the ListSubnets method which supports a Context parameter +func (vpcClassic *VpcClassicV1) ListSubnetsWithContext(ctx context.Context, listSubnetsOptions *ListSubnetsOptions) (result *SubnetCollection, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listSubnetsOptions, "listSubnetsOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/subnets`, nil) + if err != nil { + return + } + + for headerName, headerValue := range listSubnetsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "ListSubnets") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + if listSubnetsOptions.Start != nil { + builder.AddQuery("start", fmt.Sprint(*listSubnetsOptions.Start)) + } + if listSubnetsOptions.Limit != nil { + builder.AddQuery("limit", fmt.Sprint(*listSubnetsOptions.Limit)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSubnetCollection) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateSubnet : Create a subnet +// This request creates a new subnet from a subnet prototype object. The prototype object is structured in the same way +// as a retrieved subnet, and contains the information necessary to create the new subnet. For this request to succeed, +// the prototype's CIDR block must not overlap with an existing subnet in the VPC. +func (vpcClassic *VpcClassicV1) CreateSubnet(createSubnetOptions *CreateSubnetOptions) (result *Subnet, response *core.DetailedResponse, err error) { + return vpcClassic.CreateSubnetWithContext(context.Background(), createSubnetOptions) +} + +// CreateSubnetWithContext is an alternate form of the CreateSubnet method which supports a Context parameter +func (vpcClassic *VpcClassicV1) CreateSubnetWithContext(ctx context.Context, createSubnetOptions *CreateSubnetOptions) (result *Subnet, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createSubnetOptions, "createSubnetOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createSubnetOptions, "createSubnetOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/subnets`, nil) + if err != nil { + return + } + + for headerName, headerValue := range createSubnetOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "CreateSubnet") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + _, err = builder.SetBodyContentJSON(createSubnetOptions.SubnetPrototype) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSubnet) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteSubnet : Delete a subnet +// This request deletes a subnet. This operation cannot be reversed. For this request to succeed, the subnet must not be +// referenced by any network interfaces, VPN gateways, or load balancers. A delete operation automatically detaches the +// subnet from any network ACLs or public gateways. +func (vpcClassic *VpcClassicV1) DeleteSubnet(deleteSubnetOptions *DeleteSubnetOptions) (response *core.DetailedResponse, err error) { + return vpcClassic.DeleteSubnetWithContext(context.Background(), deleteSubnetOptions) +} + +// DeleteSubnetWithContext is an alternate form of the DeleteSubnet method which supports a Context parameter +func (vpcClassic *VpcClassicV1) DeleteSubnetWithContext(ctx context.Context, deleteSubnetOptions *DeleteSubnetOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteSubnetOptions, "deleteSubnetOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteSubnetOptions, "deleteSubnetOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *deleteSubnetOptions.ID, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/subnets/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteSubnetOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "DeleteSubnet") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + response, err = vpcClassic.Service.Request(request, nil) + + return +} + +// GetSubnet : Retrieve a subnet +// This request retrieves a single subnet specified by the identifier in the URL. +func (vpcClassic *VpcClassicV1) GetSubnet(getSubnetOptions *GetSubnetOptions) (result *Subnet, response *core.DetailedResponse, err error) { + return vpcClassic.GetSubnetWithContext(context.Background(), getSubnetOptions) +} + +// GetSubnetWithContext is an alternate form of the GetSubnet method which supports a Context parameter +func (vpcClassic *VpcClassicV1) GetSubnetWithContext(ctx context.Context, getSubnetOptions *GetSubnetOptions) (result *Subnet, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getSubnetOptions, "getSubnetOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getSubnetOptions, "getSubnetOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *getSubnetOptions.ID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/subnets/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getSubnetOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "GetSubnet") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSubnet) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateSubnet : Update a subnet +// This request updates a subnet with the information in a provided subnet patch. The subnet patch object is structured +// in the same way as a retrieved subnet and contains only the information to be updated. +func (vpcClassic *VpcClassicV1) UpdateSubnet(updateSubnetOptions *UpdateSubnetOptions) (result *Subnet, response *core.DetailedResponse, err error) { + return vpcClassic.UpdateSubnetWithContext(context.Background(), updateSubnetOptions) +} + +// UpdateSubnetWithContext is an alternate form of the UpdateSubnet method which supports a Context parameter +func (vpcClassic *VpcClassicV1) UpdateSubnetWithContext(ctx context.Context, updateSubnetOptions *UpdateSubnetOptions) (result *Subnet, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateSubnetOptions, "updateSubnetOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateSubnetOptions, "updateSubnetOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *updateSubnetOptions.ID, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/subnets/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateSubnetOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "UpdateSubnet") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/merge-patch+json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + _, err = builder.SetBodyContentJSON(updateSubnetOptions.SubnetPatch) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSubnet) + if err != nil { + return + } + response.Result = result + + return +} + +// GetSubnetNetworkACL : Retrieve a subnet's attached network ACL +// This request retrieves the network ACL attached to the subnet specified by the identifier in the URL. +func (vpcClassic *VpcClassicV1) GetSubnetNetworkACL(getSubnetNetworkACLOptions *GetSubnetNetworkACLOptions) (result *NetworkACL, response *core.DetailedResponse, err error) { + return vpcClassic.GetSubnetNetworkACLWithContext(context.Background(), getSubnetNetworkACLOptions) +} + +// GetSubnetNetworkACLWithContext is an alternate form of the GetSubnetNetworkACL method which supports a Context parameter +func (vpcClassic *VpcClassicV1) GetSubnetNetworkACLWithContext(ctx context.Context, getSubnetNetworkACLOptions *GetSubnetNetworkACLOptions) (result *NetworkACL, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getSubnetNetworkACLOptions, "getSubnetNetworkACLOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getSubnetNetworkACLOptions, "getSubnetNetworkACLOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *getSubnetNetworkACLOptions.ID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/subnets/{id}/network_acl`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getSubnetNetworkACLOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "GetSubnetNetworkACL") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalNetworkACL) + if err != nil { + return + } + response.Result = result + + return +} + +// ReplaceSubnetNetworkACL : Attach a network ACL to a subnet +// This request attaches the network ACL, specified in the request body, to the subnet specified by the subnet +// identifier in the URL. This replaces the existing network ACL on the subnet. +func (vpcClassic *VpcClassicV1) ReplaceSubnetNetworkACL(replaceSubnetNetworkACLOptions *ReplaceSubnetNetworkACLOptions) (result *NetworkACL, response *core.DetailedResponse, err error) { + return vpcClassic.ReplaceSubnetNetworkACLWithContext(context.Background(), replaceSubnetNetworkACLOptions) +} + +// ReplaceSubnetNetworkACLWithContext is an alternate form of the ReplaceSubnetNetworkACL method which supports a Context parameter +func (vpcClassic *VpcClassicV1) ReplaceSubnetNetworkACLWithContext(ctx context.Context, replaceSubnetNetworkACLOptions *ReplaceSubnetNetworkACLOptions) (result *NetworkACL, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(replaceSubnetNetworkACLOptions, "replaceSubnetNetworkACLOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(replaceSubnetNetworkACLOptions, "replaceSubnetNetworkACLOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *replaceSubnetNetworkACLOptions.ID, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/subnets/{id}/network_acl`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range replaceSubnetNetworkACLOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "ReplaceSubnetNetworkACL") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + _, err = builder.SetBodyContentJSON(replaceSubnetNetworkACLOptions.NetworkACLIdentity) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalNetworkACL) + if err != nil { + return + } + response.Result = result + + return +} + +// UnsetSubnetPublicGateway : Detach a public gateway from a subnet +// This request detaches the public gateway from the subnet specified by the subnet identifier in the URL. +func (vpcClassic *VpcClassicV1) UnsetSubnetPublicGateway(unsetSubnetPublicGatewayOptions *UnsetSubnetPublicGatewayOptions) (response *core.DetailedResponse, err error) { + return vpcClassic.UnsetSubnetPublicGatewayWithContext(context.Background(), unsetSubnetPublicGatewayOptions) +} + +// UnsetSubnetPublicGatewayWithContext is an alternate form of the UnsetSubnetPublicGateway method which supports a Context parameter +func (vpcClassic *VpcClassicV1) UnsetSubnetPublicGatewayWithContext(ctx context.Context, unsetSubnetPublicGatewayOptions *UnsetSubnetPublicGatewayOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(unsetSubnetPublicGatewayOptions, "unsetSubnetPublicGatewayOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(unsetSubnetPublicGatewayOptions, "unsetSubnetPublicGatewayOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *unsetSubnetPublicGatewayOptions.ID, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/subnets/{id}/public_gateway`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range unsetSubnetPublicGatewayOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "UnsetSubnetPublicGateway") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + response, err = vpcClassic.Service.Request(request, nil) + + return +} + +// GetSubnetPublicGateway : Retrieve a subnet's attached public gateway +// This request retrieves the public gateway attached to the subnet specified by the identifier in the URL. +func (vpcClassic *VpcClassicV1) GetSubnetPublicGateway(getSubnetPublicGatewayOptions *GetSubnetPublicGatewayOptions) (result *PublicGateway, response *core.DetailedResponse, err error) { + return vpcClassic.GetSubnetPublicGatewayWithContext(context.Background(), getSubnetPublicGatewayOptions) +} + +// GetSubnetPublicGatewayWithContext is an alternate form of the GetSubnetPublicGateway method which supports a Context parameter +func (vpcClassic *VpcClassicV1) GetSubnetPublicGatewayWithContext(ctx context.Context, getSubnetPublicGatewayOptions *GetSubnetPublicGatewayOptions) (result *PublicGateway, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getSubnetPublicGatewayOptions, "getSubnetPublicGatewayOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getSubnetPublicGatewayOptions, "getSubnetPublicGatewayOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *getSubnetPublicGatewayOptions.ID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/subnets/{id}/public_gateway`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getSubnetPublicGatewayOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "GetSubnetPublicGateway") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalPublicGateway) + if err != nil { + return + } + response.Result = result + + return +} + +// SetSubnetPublicGateway : Attach a public gateway to a subnet +// This request attaches the public gateway, specified in the request body, to the subnet specified by the subnet +// identifier in the URL. The public gateway must have the same VPC and zone as the subnet. +func (vpcClassic *VpcClassicV1) SetSubnetPublicGateway(setSubnetPublicGatewayOptions *SetSubnetPublicGatewayOptions) (result *PublicGateway, response *core.DetailedResponse, err error) { + return vpcClassic.SetSubnetPublicGatewayWithContext(context.Background(), setSubnetPublicGatewayOptions) +} + +// SetSubnetPublicGatewayWithContext is an alternate form of the SetSubnetPublicGateway method which supports a Context parameter +func (vpcClassic *VpcClassicV1) SetSubnetPublicGatewayWithContext(ctx context.Context, setSubnetPublicGatewayOptions *SetSubnetPublicGatewayOptions) (result *PublicGateway, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(setSubnetPublicGatewayOptions, "setSubnetPublicGatewayOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(setSubnetPublicGatewayOptions, "setSubnetPublicGatewayOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *setSubnetPublicGatewayOptions.ID, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/subnets/{id}/public_gateway`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range setSubnetPublicGatewayOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "SetSubnetPublicGateway") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + _, err = builder.SetBodyContentJSON(setSubnetPublicGatewayOptions.PublicGatewayIdentity) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalPublicGateway) + if err != nil { + return + } + response.Result = result + + return +} + +// ListImages : List all images +// This request lists all provisionable images available in the region. An image provides source data for a volume. +// Images are either system-provided, or created from another source, such as importing from object storage. +// +// The images will be sorted by their `created_at` property values, with the newest first. Images with identical +// `created_at` values will be secondarily sorted by ascending `id` property values. +func (vpcClassic *VpcClassicV1) ListImages(listImagesOptions *ListImagesOptions) (result *ImageCollection, response *core.DetailedResponse, err error) { + return vpcClassic.ListImagesWithContext(context.Background(), listImagesOptions) +} + +// ListImagesWithContext is an alternate form of the ListImages method which supports a Context parameter +func (vpcClassic *VpcClassicV1) ListImagesWithContext(ctx context.Context, listImagesOptions *ListImagesOptions) (result *ImageCollection, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listImagesOptions, "listImagesOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/images`, nil) + if err != nil { + return + } + + for headerName, headerValue := range listImagesOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "ListImages") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + if listImagesOptions.Start != nil { + builder.AddQuery("start", fmt.Sprint(*listImagesOptions.Start)) + } + if listImagesOptions.Limit != nil { + builder.AddQuery("limit", fmt.Sprint(*listImagesOptions.Limit)) + } + if listImagesOptions.ResourceGroupID != nil { + builder.AddQuery("resource_group.id", fmt.Sprint(*listImagesOptions.ResourceGroupID)) + } + if listImagesOptions.Name != nil { + builder.AddQuery("name", fmt.Sprint(*listImagesOptions.Name)) + } + if listImagesOptions.Visibility != nil { + builder.AddQuery("visibility", fmt.Sprint(*listImagesOptions.Visibility)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalImageCollection) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateImage : Create an image +// This request creates a new image from an image prototype object. The prototype object is structured in the same way +// as a retrieved image, and contains the information necessary to create the new image. A URL to the image file on +// object storage must be provided. +func (vpcClassic *VpcClassicV1) CreateImage(createImageOptions *CreateImageOptions) (result *Image, response *core.DetailedResponse, err error) { + return vpcClassic.CreateImageWithContext(context.Background(), createImageOptions) +} + +// CreateImageWithContext is an alternate form of the CreateImage method which supports a Context parameter +func (vpcClassic *VpcClassicV1) CreateImageWithContext(ctx context.Context, createImageOptions *CreateImageOptions) (result *Image, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createImageOptions, "createImageOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createImageOptions, "createImageOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/images`, nil) + if err != nil { + return + } + + for headerName, headerValue := range createImageOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "CreateImage") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + _, err = builder.SetBodyContentJSON(createImageOptions.ImagePrototype) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalImage) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteImage : Delete an image +// This request deletes an image. This operation cannot be reversed. System-provided images are not allowed to be +// deleted. An image with a `status` of `pending`, `tentative`, or `deleting` cannot be deleted. +func (vpcClassic *VpcClassicV1) DeleteImage(deleteImageOptions *DeleteImageOptions) (response *core.DetailedResponse, err error) { + return vpcClassic.DeleteImageWithContext(context.Background(), deleteImageOptions) +} + +// DeleteImageWithContext is an alternate form of the DeleteImage method which supports a Context parameter +func (vpcClassic *VpcClassicV1) DeleteImageWithContext(ctx context.Context, deleteImageOptions *DeleteImageOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteImageOptions, "deleteImageOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteImageOptions, "deleteImageOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *deleteImageOptions.ID, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/images/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteImageOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "DeleteImage") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + response, err = vpcClassic.Service.Request(request, nil) + + return +} + +// GetImage : Retrieve an image +// This request retrieves a single image specified by the identifier in the URL. +func (vpcClassic *VpcClassicV1) GetImage(getImageOptions *GetImageOptions) (result *Image, response *core.DetailedResponse, err error) { + return vpcClassic.GetImageWithContext(context.Background(), getImageOptions) +} + +// GetImageWithContext is an alternate form of the GetImage method which supports a Context parameter +func (vpcClassic *VpcClassicV1) GetImageWithContext(ctx context.Context, getImageOptions *GetImageOptions) (result *Image, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getImageOptions, "getImageOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getImageOptions, "getImageOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *getImageOptions.ID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/images/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getImageOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "GetImage") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalImage) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateImage : Update an image +// This request updates an image with the information in a provided image patch. The image patch object is structured in +// the same way as a retrieved image and contains only the information to be updated. System-provided images are not +// allowed to be updated. An image with a `status` of `deleting` cannot be updated. +func (vpcClassic *VpcClassicV1) UpdateImage(updateImageOptions *UpdateImageOptions) (result *Image, response *core.DetailedResponse, err error) { + return vpcClassic.UpdateImageWithContext(context.Background(), updateImageOptions) +} + +// UpdateImageWithContext is an alternate form of the UpdateImage method which supports a Context parameter +func (vpcClassic *VpcClassicV1) UpdateImageWithContext(ctx context.Context, updateImageOptions *UpdateImageOptions) (result *Image, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateImageOptions, "updateImageOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateImageOptions, "updateImageOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *updateImageOptions.ID, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/images/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateImageOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "UpdateImage") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/merge-patch+json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + _, err = builder.SetBodyContentJSON(updateImageOptions.ImagePatch) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalImage) + if err != nil { + return + } + response.Result = result + + return +} + +// ListOperatingSystems : List all operating systems +// This request lists all operating systems in the region. +func (vpcClassic *VpcClassicV1) ListOperatingSystems(listOperatingSystemsOptions *ListOperatingSystemsOptions) (result *OperatingSystemCollection, response *core.DetailedResponse, err error) { + return vpcClassic.ListOperatingSystemsWithContext(context.Background(), listOperatingSystemsOptions) +} + +// ListOperatingSystemsWithContext is an alternate form of the ListOperatingSystems method which supports a Context parameter +func (vpcClassic *VpcClassicV1) ListOperatingSystemsWithContext(ctx context.Context, listOperatingSystemsOptions *ListOperatingSystemsOptions) (result *OperatingSystemCollection, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listOperatingSystemsOptions, "listOperatingSystemsOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/operating_systems`, nil) + if err != nil { + return + } + + for headerName, headerValue := range listOperatingSystemsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "ListOperatingSystems") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + if listOperatingSystemsOptions.Start != nil { + builder.AddQuery("start", fmt.Sprint(*listOperatingSystemsOptions.Start)) + } + if listOperatingSystemsOptions.Limit != nil { + builder.AddQuery("limit", fmt.Sprint(*listOperatingSystemsOptions.Limit)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalOperatingSystemCollection) + if err != nil { + return + } + response.Result = result + + return +} + +// GetOperatingSystem : Retrieve an operating system +// This request retrieves a single operating system specified by the name in the URL. +func (vpcClassic *VpcClassicV1) GetOperatingSystem(getOperatingSystemOptions *GetOperatingSystemOptions) (result *OperatingSystem, response *core.DetailedResponse, err error) { + return vpcClassic.GetOperatingSystemWithContext(context.Background(), getOperatingSystemOptions) +} + +// GetOperatingSystemWithContext is an alternate form of the GetOperatingSystem method which supports a Context parameter +func (vpcClassic *VpcClassicV1) GetOperatingSystemWithContext(ctx context.Context, getOperatingSystemOptions *GetOperatingSystemOptions) (result *OperatingSystem, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getOperatingSystemOptions, "getOperatingSystemOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getOperatingSystemOptions, "getOperatingSystemOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "name": *getOperatingSystemOptions.Name, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/operating_systems/{name}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getOperatingSystemOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "GetOperatingSystem") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalOperatingSystem) + if err != nil { + return + } + response.Result = result + + return +} + +// ListKeys : List all keys +// This request lists all keys in the region. A key contains a public SSH key which may be installed on instances when +// they are created. Private keys are not stored. +func (vpcClassic *VpcClassicV1) ListKeys(listKeysOptions *ListKeysOptions) (result *KeyCollection, response *core.DetailedResponse, err error) { + return vpcClassic.ListKeysWithContext(context.Background(), listKeysOptions) +} + +// ListKeysWithContext is an alternate form of the ListKeys method which supports a Context parameter +func (vpcClassic *VpcClassicV1) ListKeysWithContext(ctx context.Context, listKeysOptions *ListKeysOptions) (result *KeyCollection, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listKeysOptions, "listKeysOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/keys`, nil) + if err != nil { + return + } + + for headerName, headerValue := range listKeysOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "ListKeys") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + if listKeysOptions.Start != nil { + builder.AddQuery("start", fmt.Sprint(*listKeysOptions.Start)) + } + if listKeysOptions.Limit != nil { + builder.AddQuery("limit", fmt.Sprint(*listKeysOptions.Limit)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalKeyCollection) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateKey : Create a key +// This request creates a new SSH key from an key prototype object. The prototype object is structured in the same way +// as a retrieved key, and contains the information necessary to create the new key. The public key value must be +// provided. +func (vpcClassic *VpcClassicV1) CreateKey(createKeyOptions *CreateKeyOptions) (result *Key, response *core.DetailedResponse, err error) { + return vpcClassic.CreateKeyWithContext(context.Background(), createKeyOptions) +} + +// CreateKeyWithContext is an alternate form of the CreateKey method which supports a Context parameter +func (vpcClassic *VpcClassicV1) CreateKeyWithContext(ctx context.Context, createKeyOptions *CreateKeyOptions) (result *Key, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createKeyOptions, "createKeyOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createKeyOptions, "createKeyOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/keys`, nil) + if err != nil { + return + } + + for headerName, headerValue := range createKeyOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "CreateKey") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + body := make(map[string]interface{}) + if createKeyOptions.PublicKey != nil { + body["public_key"] = createKeyOptions.PublicKey + } + if createKeyOptions.Name != nil { + body["name"] = createKeyOptions.Name + } + if createKeyOptions.ResourceGroup != nil { + body["resource_group"] = createKeyOptions.ResourceGroup + } + if createKeyOptions.Type != nil { + body["type"] = createKeyOptions.Type + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalKey) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteKey : Delete a key +// This request deletes a key. This operation cannot be reversed. +func (vpcClassic *VpcClassicV1) DeleteKey(deleteKeyOptions *DeleteKeyOptions) (response *core.DetailedResponse, err error) { + return vpcClassic.DeleteKeyWithContext(context.Background(), deleteKeyOptions) +} + +// DeleteKeyWithContext is an alternate form of the DeleteKey method which supports a Context parameter +func (vpcClassic *VpcClassicV1) DeleteKeyWithContext(ctx context.Context, deleteKeyOptions *DeleteKeyOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteKeyOptions, "deleteKeyOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteKeyOptions, "deleteKeyOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *deleteKeyOptions.ID, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/keys/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteKeyOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "DeleteKey") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + response, err = vpcClassic.Service.Request(request, nil) + + return +} + +// GetKey : Retrieve a key +// This request retrieves a single key specified by the identifier in the URL. +func (vpcClassic *VpcClassicV1) GetKey(getKeyOptions *GetKeyOptions) (result *Key, response *core.DetailedResponse, err error) { + return vpcClassic.GetKeyWithContext(context.Background(), getKeyOptions) +} + +// GetKeyWithContext is an alternate form of the GetKey method which supports a Context parameter +func (vpcClassic *VpcClassicV1) GetKeyWithContext(ctx context.Context, getKeyOptions *GetKeyOptions) (result *Key, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getKeyOptions, "getKeyOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getKeyOptions, "getKeyOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *getKeyOptions.ID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/keys/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getKeyOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "GetKey") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalKey) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateKey : Update a key +// This request updates a key's name. +func (vpcClassic *VpcClassicV1) UpdateKey(updateKeyOptions *UpdateKeyOptions) (result *Key, response *core.DetailedResponse, err error) { + return vpcClassic.UpdateKeyWithContext(context.Background(), updateKeyOptions) +} + +// UpdateKeyWithContext is an alternate form of the UpdateKey method which supports a Context parameter +func (vpcClassic *VpcClassicV1) UpdateKeyWithContext(ctx context.Context, updateKeyOptions *UpdateKeyOptions) (result *Key, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateKeyOptions, "updateKeyOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateKeyOptions, "updateKeyOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *updateKeyOptions.ID, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/keys/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateKeyOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "UpdateKey") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/merge-patch+json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + _, err = builder.SetBodyContentJSON(updateKeyOptions.KeyPatch) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalKey) + if err != nil { + return + } + response.Result = result + + return +} + +// ListInstanceProfiles : List all instance profiles +// This request lists provisionable instance profiles in the region. An instance profile specifies the performance +// characteristics and pricing model for an instance. +func (vpcClassic *VpcClassicV1) ListInstanceProfiles(listInstanceProfilesOptions *ListInstanceProfilesOptions) (result *InstanceProfileCollection, response *core.DetailedResponse, err error) { + return vpcClassic.ListInstanceProfilesWithContext(context.Background(), listInstanceProfilesOptions) +} + +// ListInstanceProfilesWithContext is an alternate form of the ListInstanceProfiles method which supports a Context parameter +func (vpcClassic *VpcClassicV1) ListInstanceProfilesWithContext(ctx context.Context, listInstanceProfilesOptions *ListInstanceProfilesOptions) (result *InstanceProfileCollection, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listInstanceProfilesOptions, "listInstanceProfilesOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/instance/profiles`, nil) + if err != nil { + return + } + + for headerName, headerValue := range listInstanceProfilesOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "ListInstanceProfiles") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + if listInstanceProfilesOptions.Start != nil { + builder.AddQuery("start", fmt.Sprint(*listInstanceProfilesOptions.Start)) + } + if listInstanceProfilesOptions.Limit != nil { + builder.AddQuery("limit", fmt.Sprint(*listInstanceProfilesOptions.Limit)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstanceProfileCollection) + if err != nil { + return + } + response.Result = result + + return +} + +// GetInstanceProfile : Retrieve an instance profile +// This request retrieves a single instance profile specified by the name in the URL. +func (vpcClassic *VpcClassicV1) GetInstanceProfile(getInstanceProfileOptions *GetInstanceProfileOptions) (result *InstanceProfile, response *core.DetailedResponse, err error) { + return vpcClassic.GetInstanceProfileWithContext(context.Background(), getInstanceProfileOptions) +} + +// GetInstanceProfileWithContext is an alternate form of the GetInstanceProfile method which supports a Context parameter +func (vpcClassic *VpcClassicV1) GetInstanceProfileWithContext(ctx context.Context, getInstanceProfileOptions *GetInstanceProfileOptions) (result *InstanceProfile, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getInstanceProfileOptions, "getInstanceProfileOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getInstanceProfileOptions, "getInstanceProfileOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "name": *getInstanceProfileOptions.Name, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/instance/profiles/{name}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getInstanceProfileOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "GetInstanceProfile") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstanceProfile) + if err != nil { + return + } + response.Result = result + + return +} + +// ListInstances : List all instances +// This request lists all instances in the region. +func (vpcClassic *VpcClassicV1) ListInstances(listInstancesOptions *ListInstancesOptions) (result *InstanceCollection, response *core.DetailedResponse, err error) { + return vpcClassic.ListInstancesWithContext(context.Background(), listInstancesOptions) +} + +// ListInstancesWithContext is an alternate form of the ListInstances method which supports a Context parameter +func (vpcClassic *VpcClassicV1) ListInstancesWithContext(ctx context.Context, listInstancesOptions *ListInstancesOptions) (result *InstanceCollection, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listInstancesOptions, "listInstancesOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/instances`, nil) + if err != nil { + return + } + + for headerName, headerValue := range listInstancesOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "ListInstances") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + if listInstancesOptions.Start != nil { + builder.AddQuery("start", fmt.Sprint(*listInstancesOptions.Start)) + } + if listInstancesOptions.Limit != nil { + builder.AddQuery("limit", fmt.Sprint(*listInstancesOptions.Limit)) + } + if listInstancesOptions.NetworkInterfacesSubnetID != nil { + builder.AddQuery("network_interfaces.subnet.id", fmt.Sprint(*listInstancesOptions.NetworkInterfacesSubnetID)) + } + if listInstancesOptions.NetworkInterfacesSubnetCRN != nil { + builder.AddQuery("network_interfaces.subnet.crn", fmt.Sprint(*listInstancesOptions.NetworkInterfacesSubnetCRN)) + } + if listInstancesOptions.NetworkInterfacesSubnetName != nil { + builder.AddQuery("network_interfaces.subnet.name", fmt.Sprint(*listInstancesOptions.NetworkInterfacesSubnetName)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstanceCollection) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateInstance : Create an instance +// This request provisions a new instance from an instance prototype object. The prototype object is structured in the +// same way as a retrieved instance, and contains the information necessary to provision the new instance. The instance +// is automatically started. +func (vpcClassic *VpcClassicV1) CreateInstance(createInstanceOptions *CreateInstanceOptions) (result *Instance, response *core.DetailedResponse, err error) { + return vpcClassic.CreateInstanceWithContext(context.Background(), createInstanceOptions) +} + +// CreateInstanceWithContext is an alternate form of the CreateInstance method which supports a Context parameter +func (vpcClassic *VpcClassicV1) CreateInstanceWithContext(ctx context.Context, createInstanceOptions *CreateInstanceOptions) (result *Instance, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createInstanceOptions, "createInstanceOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createInstanceOptions, "createInstanceOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/instances`, nil) + if err != nil { + return + } + + for headerName, headerValue := range createInstanceOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "CreateInstance") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + _, err = builder.SetBodyContentJSON(createInstanceOptions.InstancePrototype) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstance) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteInstance : Delete an instance +// This request deletes an instance. This operation cannot be reversed. Any floating IPs associated with the instance's +// network interfaces are implicitly disassociated. All flow log collectors with `auto_delete` set to `true` targeting +// the instance and/or the instance's network interfaces are automatically deleted. +func (vpcClassic *VpcClassicV1) DeleteInstance(deleteInstanceOptions *DeleteInstanceOptions) (response *core.DetailedResponse, err error) { + return vpcClassic.DeleteInstanceWithContext(context.Background(), deleteInstanceOptions) +} + +// DeleteInstanceWithContext is an alternate form of the DeleteInstance method which supports a Context parameter +func (vpcClassic *VpcClassicV1) DeleteInstanceWithContext(ctx context.Context, deleteInstanceOptions *DeleteInstanceOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteInstanceOptions, "deleteInstanceOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteInstanceOptions, "deleteInstanceOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *deleteInstanceOptions.ID, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/instances/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteInstanceOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "DeleteInstance") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + response, err = vpcClassic.Service.Request(request, nil) + + return +} + +// GetInstance : Retrieve an instance +// This request retrieves a single instance specified by the identifier in the URL. +func (vpcClassic *VpcClassicV1) GetInstance(getInstanceOptions *GetInstanceOptions) (result *Instance, response *core.DetailedResponse, err error) { + return vpcClassic.GetInstanceWithContext(context.Background(), getInstanceOptions) +} + +// GetInstanceWithContext is an alternate form of the GetInstance method which supports a Context parameter +func (vpcClassic *VpcClassicV1) GetInstanceWithContext(ctx context.Context, getInstanceOptions *GetInstanceOptions) (result *Instance, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getInstanceOptions, "getInstanceOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getInstanceOptions, "getInstanceOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *getInstanceOptions.ID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/instances/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getInstanceOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "GetInstance") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstance) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateInstance : Update an instance +// This request updates an instance with the information in a provided instance patch. The instance patch object is +// structured in the same way as a retrieved instance and contains only the information to be updated. +func (vpcClassic *VpcClassicV1) UpdateInstance(updateInstanceOptions *UpdateInstanceOptions) (result *Instance, response *core.DetailedResponse, err error) { + return vpcClassic.UpdateInstanceWithContext(context.Background(), updateInstanceOptions) +} + +// UpdateInstanceWithContext is an alternate form of the UpdateInstance method which supports a Context parameter +func (vpcClassic *VpcClassicV1) UpdateInstanceWithContext(ctx context.Context, updateInstanceOptions *UpdateInstanceOptions) (result *Instance, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateInstanceOptions, "updateInstanceOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateInstanceOptions, "updateInstanceOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *updateInstanceOptions.ID, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/instances/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateInstanceOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "UpdateInstance") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/merge-patch+json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + _, err = builder.SetBodyContentJSON(updateInstanceOptions.InstancePatch) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstance) + if err != nil { + return + } + response.Result = result + + return +} + +// GetInstanceInitialization : Retrieve initialization configuration for an instance +// This request retrieves configuration variables used to initialize the instance, such as SSH keys and the Windows +// administrator password. +func (vpcClassic *VpcClassicV1) GetInstanceInitialization(getInstanceInitializationOptions *GetInstanceInitializationOptions) (result *InstanceInitialization, response *core.DetailedResponse, err error) { + return vpcClassic.GetInstanceInitializationWithContext(context.Background(), getInstanceInitializationOptions) +} + +// GetInstanceInitializationWithContext is an alternate form of the GetInstanceInitialization method which supports a Context parameter +func (vpcClassic *VpcClassicV1) GetInstanceInitializationWithContext(ctx context.Context, getInstanceInitializationOptions *GetInstanceInitializationOptions) (result *InstanceInitialization, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getInstanceInitializationOptions, "getInstanceInitializationOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getInstanceInitializationOptions, "getInstanceInitializationOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *getInstanceInitializationOptions.ID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/instances/{id}/initialization`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getInstanceInitializationOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "GetInstanceInitialization") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstanceInitialization) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateInstanceAction : Create an instance action +// This request creates a new action which will be queued up to run as soon as any pending or running actions have +// completed. +func (vpcClassic *VpcClassicV1) CreateInstanceAction(createInstanceActionOptions *CreateInstanceActionOptions) (result *InstanceAction, response *core.DetailedResponse, err error) { + return vpcClassic.CreateInstanceActionWithContext(context.Background(), createInstanceActionOptions) +} + +// CreateInstanceActionWithContext is an alternate form of the CreateInstanceAction method which supports a Context parameter +func (vpcClassic *VpcClassicV1) CreateInstanceActionWithContext(ctx context.Context, createInstanceActionOptions *CreateInstanceActionOptions) (result *InstanceAction, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createInstanceActionOptions, "createInstanceActionOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createInstanceActionOptions, "createInstanceActionOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "instance_id": *createInstanceActionOptions.InstanceID, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/instances/{instance_id}/actions`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range createInstanceActionOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "CreateInstanceAction") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + body := make(map[string]interface{}) + if createInstanceActionOptions.Type != nil { + body["type"] = createInstanceActionOptions.Type + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstanceAction) + if err != nil { + return + } + response.Result = result + + return +} + +// ListInstanceNetworkInterfaces : List all network interfaces on an instance +// This request lists all network interfaces on an instance. A network interface is an abstract representation of a +// network interface card and connects an instance to a subnet. While each network interface can attach to only one +// subnet, multiple network interfaces can be created to attach to multiple subnets. Multiple interfaces may also attach +// to the same subnet. +func (vpcClassic *VpcClassicV1) ListInstanceNetworkInterfaces(listInstanceNetworkInterfacesOptions *ListInstanceNetworkInterfacesOptions) (result *NetworkInterfaceUnpaginatedCollection, response *core.DetailedResponse, err error) { + return vpcClassic.ListInstanceNetworkInterfacesWithContext(context.Background(), listInstanceNetworkInterfacesOptions) +} + +// ListInstanceNetworkInterfacesWithContext is an alternate form of the ListInstanceNetworkInterfaces method which supports a Context parameter +func (vpcClassic *VpcClassicV1) ListInstanceNetworkInterfacesWithContext(ctx context.Context, listInstanceNetworkInterfacesOptions *ListInstanceNetworkInterfacesOptions) (result *NetworkInterfaceUnpaginatedCollection, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(listInstanceNetworkInterfacesOptions, "listInstanceNetworkInterfacesOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(listInstanceNetworkInterfacesOptions, "listInstanceNetworkInterfacesOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "instance_id": *listInstanceNetworkInterfacesOptions.InstanceID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/instances/{instance_id}/network_interfaces`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range listInstanceNetworkInterfacesOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "ListInstanceNetworkInterfaces") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalNetworkInterfaceUnpaginatedCollection) + if err != nil { + return + } + response.Result = result + + return +} + +// GetInstanceNetworkInterface : Retrieve a network interface +// This request retrieves a single network interface specified by the identifier in the URL. +func (vpcClassic *VpcClassicV1) GetInstanceNetworkInterface(getInstanceNetworkInterfaceOptions *GetInstanceNetworkInterfaceOptions) (result *NetworkInterface, response *core.DetailedResponse, err error) { + return vpcClassic.GetInstanceNetworkInterfaceWithContext(context.Background(), getInstanceNetworkInterfaceOptions) +} + +// GetInstanceNetworkInterfaceWithContext is an alternate form of the GetInstanceNetworkInterface method which supports a Context parameter +func (vpcClassic *VpcClassicV1) GetInstanceNetworkInterfaceWithContext(ctx context.Context, getInstanceNetworkInterfaceOptions *GetInstanceNetworkInterfaceOptions) (result *NetworkInterface, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getInstanceNetworkInterfaceOptions, "getInstanceNetworkInterfaceOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getInstanceNetworkInterfaceOptions, "getInstanceNetworkInterfaceOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "instance_id": *getInstanceNetworkInterfaceOptions.InstanceID, + "id": *getInstanceNetworkInterfaceOptions.ID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/instances/{instance_id}/network_interfaces/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getInstanceNetworkInterfaceOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "GetInstanceNetworkInterface") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalNetworkInterface) + if err != nil { + return + } + response.Result = result + + return +} + +// ListInstanceNetworkInterfaceFloatingIps : List all floating IPs associated with a network interface +// This request lists all floating IPs associated with a network interface. +func (vpcClassic *VpcClassicV1) ListInstanceNetworkInterfaceFloatingIps(listInstanceNetworkInterfaceFloatingIpsOptions *ListInstanceNetworkInterfaceFloatingIpsOptions) (result *FloatingIPUnpaginatedCollection, response *core.DetailedResponse, err error) { + return vpcClassic.ListInstanceNetworkInterfaceFloatingIpsWithContext(context.Background(), listInstanceNetworkInterfaceFloatingIpsOptions) +} + +// ListInstanceNetworkInterfaceFloatingIpsWithContext is an alternate form of the ListInstanceNetworkInterfaceFloatingIps method which supports a Context parameter +func (vpcClassic *VpcClassicV1) ListInstanceNetworkInterfaceFloatingIpsWithContext(ctx context.Context, listInstanceNetworkInterfaceFloatingIpsOptions *ListInstanceNetworkInterfaceFloatingIpsOptions) (result *FloatingIPUnpaginatedCollection, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(listInstanceNetworkInterfaceFloatingIpsOptions, "listInstanceNetworkInterfaceFloatingIpsOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(listInstanceNetworkInterfaceFloatingIpsOptions, "listInstanceNetworkInterfaceFloatingIpsOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "instance_id": *listInstanceNetworkInterfaceFloatingIpsOptions.InstanceID, + "network_interface_id": *listInstanceNetworkInterfaceFloatingIpsOptions.NetworkInterfaceID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/instances/{instance_id}/network_interfaces/{network_interface_id}/floating_ips`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range listInstanceNetworkInterfaceFloatingIpsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "ListInstanceNetworkInterfaceFloatingIps") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalFloatingIPUnpaginatedCollection) + if err != nil { + return + } + response.Result = result + + return +} + +// RemoveInstanceNetworkInterfaceFloatingIP : Disassociate a floating IP from a network interface +// This request disassociates the specified floating IP from the specified network interface. +func (vpcClassic *VpcClassicV1) RemoveInstanceNetworkInterfaceFloatingIP(removeInstanceNetworkInterfaceFloatingIPOptions *RemoveInstanceNetworkInterfaceFloatingIPOptions) (response *core.DetailedResponse, err error) { + return vpcClassic.RemoveInstanceNetworkInterfaceFloatingIPWithContext(context.Background(), removeInstanceNetworkInterfaceFloatingIPOptions) +} + +// RemoveInstanceNetworkInterfaceFloatingIPWithContext is an alternate form of the RemoveInstanceNetworkInterfaceFloatingIP method which supports a Context parameter +func (vpcClassic *VpcClassicV1) RemoveInstanceNetworkInterfaceFloatingIPWithContext(ctx context.Context, removeInstanceNetworkInterfaceFloatingIPOptions *RemoveInstanceNetworkInterfaceFloatingIPOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(removeInstanceNetworkInterfaceFloatingIPOptions, "removeInstanceNetworkInterfaceFloatingIPOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(removeInstanceNetworkInterfaceFloatingIPOptions, "removeInstanceNetworkInterfaceFloatingIPOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "instance_id": *removeInstanceNetworkInterfaceFloatingIPOptions.InstanceID, + "network_interface_id": *removeInstanceNetworkInterfaceFloatingIPOptions.NetworkInterfaceID, + "id": *removeInstanceNetworkInterfaceFloatingIPOptions.ID, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/instances/{instance_id}/network_interfaces/{network_interface_id}/floating_ips/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range removeInstanceNetworkInterfaceFloatingIPOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "RemoveInstanceNetworkInterfaceFloatingIP") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + response, err = vpcClassic.Service.Request(request, nil) + + return +} + +// GetInstanceNetworkInterfaceFloatingIP : Retrieve associated floating IP +// This request a retrieves a specified floating IP address if it is associated with the network interface and instance +// specified in the URL. +func (vpcClassic *VpcClassicV1) GetInstanceNetworkInterfaceFloatingIP(getInstanceNetworkInterfaceFloatingIPOptions *GetInstanceNetworkInterfaceFloatingIPOptions) (result *FloatingIP, response *core.DetailedResponse, err error) { + return vpcClassic.GetInstanceNetworkInterfaceFloatingIPWithContext(context.Background(), getInstanceNetworkInterfaceFloatingIPOptions) +} + +// GetInstanceNetworkInterfaceFloatingIPWithContext is an alternate form of the GetInstanceNetworkInterfaceFloatingIP method which supports a Context parameter +func (vpcClassic *VpcClassicV1) GetInstanceNetworkInterfaceFloatingIPWithContext(ctx context.Context, getInstanceNetworkInterfaceFloatingIPOptions *GetInstanceNetworkInterfaceFloatingIPOptions) (result *FloatingIP, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getInstanceNetworkInterfaceFloatingIPOptions, "getInstanceNetworkInterfaceFloatingIPOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getInstanceNetworkInterfaceFloatingIPOptions, "getInstanceNetworkInterfaceFloatingIPOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "instance_id": *getInstanceNetworkInterfaceFloatingIPOptions.InstanceID, + "network_interface_id": *getInstanceNetworkInterfaceFloatingIPOptions.NetworkInterfaceID, + "id": *getInstanceNetworkInterfaceFloatingIPOptions.ID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/instances/{instance_id}/network_interfaces/{network_interface_id}/floating_ips/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getInstanceNetworkInterfaceFloatingIPOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "GetInstanceNetworkInterfaceFloatingIP") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalFloatingIP) + if err != nil { + return + } + response.Result = result + + return +} + +// AddInstanceNetworkInterfaceFloatingIP : Associate a floating IP with a network interface +// This request associates the specified floating IP with the specified network interface, replacing any existing +// association. For this request to succeed, the existing floating IP must not be required by another resource, such as +// a public gateway. A request body is not required, and if supplied, is ignored. +func (vpcClassic *VpcClassicV1) AddInstanceNetworkInterfaceFloatingIP(addInstanceNetworkInterfaceFloatingIPOptions *AddInstanceNetworkInterfaceFloatingIPOptions) (result *FloatingIP, response *core.DetailedResponse, err error) { + return vpcClassic.AddInstanceNetworkInterfaceFloatingIPWithContext(context.Background(), addInstanceNetworkInterfaceFloatingIPOptions) +} + +// AddInstanceNetworkInterfaceFloatingIPWithContext is an alternate form of the AddInstanceNetworkInterfaceFloatingIP method which supports a Context parameter +func (vpcClassic *VpcClassicV1) AddInstanceNetworkInterfaceFloatingIPWithContext(ctx context.Context, addInstanceNetworkInterfaceFloatingIPOptions *AddInstanceNetworkInterfaceFloatingIPOptions) (result *FloatingIP, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(addInstanceNetworkInterfaceFloatingIPOptions, "addInstanceNetworkInterfaceFloatingIPOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(addInstanceNetworkInterfaceFloatingIPOptions, "addInstanceNetworkInterfaceFloatingIPOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "instance_id": *addInstanceNetworkInterfaceFloatingIPOptions.InstanceID, + "network_interface_id": *addInstanceNetworkInterfaceFloatingIPOptions.NetworkInterfaceID, + "id": *addInstanceNetworkInterfaceFloatingIPOptions.ID, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/instances/{instance_id}/network_interfaces/{network_interface_id}/floating_ips/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range addInstanceNetworkInterfaceFloatingIPOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "AddInstanceNetworkInterfaceFloatingIP") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalFloatingIP) + if err != nil { + return + } + response.Result = result + + return +} + +// ListInstanceVolumeAttachments : List all volumes attachments on an instance +// This request lists all volume attachments on an instance. A volume attachment connects a volume to an instance. Each +// instance may have many volume attachments but each volume attachment connects exactly one instance to exactly one +// volume. +func (vpcClassic *VpcClassicV1) ListInstanceVolumeAttachments(listInstanceVolumeAttachmentsOptions *ListInstanceVolumeAttachmentsOptions) (result *VolumeAttachmentCollection, response *core.DetailedResponse, err error) { + return vpcClassic.ListInstanceVolumeAttachmentsWithContext(context.Background(), listInstanceVolumeAttachmentsOptions) +} + +// ListInstanceVolumeAttachmentsWithContext is an alternate form of the ListInstanceVolumeAttachments method which supports a Context parameter +func (vpcClassic *VpcClassicV1) ListInstanceVolumeAttachmentsWithContext(ctx context.Context, listInstanceVolumeAttachmentsOptions *ListInstanceVolumeAttachmentsOptions) (result *VolumeAttachmentCollection, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(listInstanceVolumeAttachmentsOptions, "listInstanceVolumeAttachmentsOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(listInstanceVolumeAttachmentsOptions, "listInstanceVolumeAttachmentsOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "instance_id": *listInstanceVolumeAttachmentsOptions.InstanceID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/instances/{instance_id}/volume_attachments`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range listInstanceVolumeAttachmentsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "ListInstanceVolumeAttachments") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVolumeAttachmentCollection) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateInstanceVolumeAttachment : Create a volume attachment on an instance +// This request creates a new volume attachment from a volume attachment prototype object. The prototype object is +// structured in the same way as a retrieved volume attachment, and contains the information necessary to create the new +// volume attachment. The creation of a new volume attachment connects a volume to an instance. +func (vpcClassic *VpcClassicV1) CreateInstanceVolumeAttachment(createInstanceVolumeAttachmentOptions *CreateInstanceVolumeAttachmentOptions) (result *VolumeAttachment, response *core.DetailedResponse, err error) { + return vpcClassic.CreateInstanceVolumeAttachmentWithContext(context.Background(), createInstanceVolumeAttachmentOptions) +} + +// CreateInstanceVolumeAttachmentWithContext is an alternate form of the CreateInstanceVolumeAttachment method which supports a Context parameter +func (vpcClassic *VpcClassicV1) CreateInstanceVolumeAttachmentWithContext(ctx context.Context, createInstanceVolumeAttachmentOptions *CreateInstanceVolumeAttachmentOptions) (result *VolumeAttachment, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createInstanceVolumeAttachmentOptions, "createInstanceVolumeAttachmentOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createInstanceVolumeAttachmentOptions, "createInstanceVolumeAttachmentOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "instance_id": *createInstanceVolumeAttachmentOptions.InstanceID, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/instances/{instance_id}/volume_attachments`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range createInstanceVolumeAttachmentOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "CreateInstanceVolumeAttachment") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + body := make(map[string]interface{}) + if createInstanceVolumeAttachmentOptions.Volume != nil { + body["volume"] = createInstanceVolumeAttachmentOptions.Volume + } + if createInstanceVolumeAttachmentOptions.DeleteVolumeOnInstanceDelete != nil { + body["delete_volume_on_instance_delete"] = createInstanceVolumeAttachmentOptions.DeleteVolumeOnInstanceDelete + } + if createInstanceVolumeAttachmentOptions.Name != nil { + body["name"] = createInstanceVolumeAttachmentOptions.Name + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVolumeAttachment) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteInstanceVolumeAttachment : Delete a volume attachment +// This request deletes a volume attachment. The deletion of a volume attachment detaches a volume from an instance. +func (vpcClassic *VpcClassicV1) DeleteInstanceVolumeAttachment(deleteInstanceVolumeAttachmentOptions *DeleteInstanceVolumeAttachmentOptions) (response *core.DetailedResponse, err error) { + return vpcClassic.DeleteInstanceVolumeAttachmentWithContext(context.Background(), deleteInstanceVolumeAttachmentOptions) +} + +// DeleteInstanceVolumeAttachmentWithContext is an alternate form of the DeleteInstanceVolumeAttachment method which supports a Context parameter +func (vpcClassic *VpcClassicV1) DeleteInstanceVolumeAttachmentWithContext(ctx context.Context, deleteInstanceVolumeAttachmentOptions *DeleteInstanceVolumeAttachmentOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteInstanceVolumeAttachmentOptions, "deleteInstanceVolumeAttachmentOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteInstanceVolumeAttachmentOptions, "deleteInstanceVolumeAttachmentOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "instance_id": *deleteInstanceVolumeAttachmentOptions.InstanceID, + "id": *deleteInstanceVolumeAttachmentOptions.ID, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/instances/{instance_id}/volume_attachments/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteInstanceVolumeAttachmentOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "DeleteInstanceVolumeAttachment") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + response, err = vpcClassic.Service.Request(request, nil) + + return +} + +// GetInstanceVolumeAttachment : Retrieve a volume attachment +// This request retrieves a single volume attachment specified by the identifier in the URL. +func (vpcClassic *VpcClassicV1) GetInstanceVolumeAttachment(getInstanceVolumeAttachmentOptions *GetInstanceVolumeAttachmentOptions) (result *VolumeAttachment, response *core.DetailedResponse, err error) { + return vpcClassic.GetInstanceVolumeAttachmentWithContext(context.Background(), getInstanceVolumeAttachmentOptions) +} + +// GetInstanceVolumeAttachmentWithContext is an alternate form of the GetInstanceVolumeAttachment method which supports a Context parameter +func (vpcClassic *VpcClassicV1) GetInstanceVolumeAttachmentWithContext(ctx context.Context, getInstanceVolumeAttachmentOptions *GetInstanceVolumeAttachmentOptions) (result *VolumeAttachment, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getInstanceVolumeAttachmentOptions, "getInstanceVolumeAttachmentOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getInstanceVolumeAttachmentOptions, "getInstanceVolumeAttachmentOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "instance_id": *getInstanceVolumeAttachmentOptions.InstanceID, + "id": *getInstanceVolumeAttachmentOptions.ID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/instances/{instance_id}/volume_attachments/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getInstanceVolumeAttachmentOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "GetInstanceVolumeAttachment") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVolumeAttachment) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateInstanceVolumeAttachment : Update a volume attachment +// This request updates a volume attachment with the information in a provided volume attachment patch. The volume +// attachment patch object is structured in the same way as a retrieved volume attachment and can contain an updated +// name. +func (vpcClassic *VpcClassicV1) UpdateInstanceVolumeAttachment(updateInstanceVolumeAttachmentOptions *UpdateInstanceVolumeAttachmentOptions) (result *VolumeAttachment, response *core.DetailedResponse, err error) { + return vpcClassic.UpdateInstanceVolumeAttachmentWithContext(context.Background(), updateInstanceVolumeAttachmentOptions) +} + +// UpdateInstanceVolumeAttachmentWithContext is an alternate form of the UpdateInstanceVolumeAttachment method which supports a Context parameter +func (vpcClassic *VpcClassicV1) UpdateInstanceVolumeAttachmentWithContext(ctx context.Context, updateInstanceVolumeAttachmentOptions *UpdateInstanceVolumeAttachmentOptions) (result *VolumeAttachment, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateInstanceVolumeAttachmentOptions, "updateInstanceVolumeAttachmentOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateInstanceVolumeAttachmentOptions, "updateInstanceVolumeAttachmentOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "instance_id": *updateInstanceVolumeAttachmentOptions.InstanceID, + "id": *updateInstanceVolumeAttachmentOptions.ID, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/instances/{instance_id}/volume_attachments/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateInstanceVolumeAttachmentOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "UpdateInstanceVolumeAttachment") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/merge-patch+json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + _, err = builder.SetBodyContentJSON(updateInstanceVolumeAttachmentOptions.VolumeAttachmentPatch) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVolumeAttachment) + if err != nil { + return + } + response.Result = result + + return +} + +// ListVolumeProfiles : List all volume profiles +// This request lists all volume profiles available in the region. A volume profile specifies the performance +// characteristics and pricing model for a volume. +func (vpcClassic *VpcClassicV1) ListVolumeProfiles(listVolumeProfilesOptions *ListVolumeProfilesOptions) (result *VolumeProfileCollection, response *core.DetailedResponse, err error) { + return vpcClassic.ListVolumeProfilesWithContext(context.Background(), listVolumeProfilesOptions) +} + +// ListVolumeProfilesWithContext is an alternate form of the ListVolumeProfiles method which supports a Context parameter +func (vpcClassic *VpcClassicV1) ListVolumeProfilesWithContext(ctx context.Context, listVolumeProfilesOptions *ListVolumeProfilesOptions) (result *VolumeProfileCollection, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listVolumeProfilesOptions, "listVolumeProfilesOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/volume/profiles`, nil) + if err != nil { + return + } + + for headerName, headerValue := range listVolumeProfilesOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "ListVolumeProfiles") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + if listVolumeProfilesOptions.Start != nil { + builder.AddQuery("start", fmt.Sprint(*listVolumeProfilesOptions.Start)) + } + if listVolumeProfilesOptions.Limit != nil { + builder.AddQuery("limit", fmt.Sprint(*listVolumeProfilesOptions.Limit)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVolumeProfileCollection) + if err != nil { + return + } + response.Result = result + + return +} + +// GetVolumeProfile : Retrieve a volume profile +// This request retrieves a single volume profile specified by the name in the URL. +func (vpcClassic *VpcClassicV1) GetVolumeProfile(getVolumeProfileOptions *GetVolumeProfileOptions) (result *VolumeProfile, response *core.DetailedResponse, err error) { + return vpcClassic.GetVolumeProfileWithContext(context.Background(), getVolumeProfileOptions) +} + +// GetVolumeProfileWithContext is an alternate form of the GetVolumeProfile method which supports a Context parameter +func (vpcClassic *VpcClassicV1) GetVolumeProfileWithContext(ctx context.Context, getVolumeProfileOptions *GetVolumeProfileOptions) (result *VolumeProfile, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getVolumeProfileOptions, "getVolumeProfileOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getVolumeProfileOptions, "getVolumeProfileOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "name": *getVolumeProfileOptions.Name, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/volume/profiles/{name}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getVolumeProfileOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "GetVolumeProfile") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVolumeProfile) + if err != nil { + return + } + response.Result = result + + return +} + +// ListVolumes : List all volumes +// This request lists all volumes in the region. Volumes are network-connected block storage devices that may be +// attached to one or more instances in the same region. +func (vpcClassic *VpcClassicV1) ListVolumes(listVolumesOptions *ListVolumesOptions) (result *VolumeCollection, response *core.DetailedResponse, err error) { + return vpcClassic.ListVolumesWithContext(context.Background(), listVolumesOptions) +} + +// ListVolumesWithContext is an alternate form of the ListVolumes method which supports a Context parameter +func (vpcClassic *VpcClassicV1) ListVolumesWithContext(ctx context.Context, listVolumesOptions *ListVolumesOptions) (result *VolumeCollection, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listVolumesOptions, "listVolumesOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/volumes`, nil) + if err != nil { + return + } + + for headerName, headerValue := range listVolumesOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "ListVolumes") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + if listVolumesOptions.Start != nil { + builder.AddQuery("start", fmt.Sprint(*listVolumesOptions.Start)) + } + if listVolumesOptions.Limit != nil { + builder.AddQuery("limit", fmt.Sprint(*listVolumesOptions.Limit)) + } + if listVolumesOptions.Name != nil { + builder.AddQuery("name", fmt.Sprint(*listVolumesOptions.Name)) + } + if listVolumesOptions.ZoneName != nil { + builder.AddQuery("zone.name", fmt.Sprint(*listVolumesOptions.ZoneName)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVolumeCollection) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateVolume : Create a volume +// This request creates a new volume from a volume prototype object. The prototype object is structured in the same way +// as a retrieved volume, and contains the information necessary to create the new volume. +func (vpcClassic *VpcClassicV1) CreateVolume(createVolumeOptions *CreateVolumeOptions) (result *Volume, response *core.DetailedResponse, err error) { + return vpcClassic.CreateVolumeWithContext(context.Background(), createVolumeOptions) +} + +// CreateVolumeWithContext is an alternate form of the CreateVolume method which supports a Context parameter +func (vpcClassic *VpcClassicV1) CreateVolumeWithContext(ctx context.Context, createVolumeOptions *CreateVolumeOptions) (result *Volume, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createVolumeOptions, "createVolumeOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createVolumeOptions, "createVolumeOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/volumes`, nil) + if err != nil { + return + } + + for headerName, headerValue := range createVolumeOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "CreateVolume") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + _, err = builder.SetBodyContentJSON(createVolumeOptions.VolumePrototype) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVolume) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteVolume : Delete a volume +// This request deletes a volume. This operation cannot be reversed. For this request to succeed, the volume must not be +// attached to any instances. +func (vpcClassic *VpcClassicV1) DeleteVolume(deleteVolumeOptions *DeleteVolumeOptions) (response *core.DetailedResponse, err error) { + return vpcClassic.DeleteVolumeWithContext(context.Background(), deleteVolumeOptions) +} + +// DeleteVolumeWithContext is an alternate form of the DeleteVolume method which supports a Context parameter +func (vpcClassic *VpcClassicV1) DeleteVolumeWithContext(ctx context.Context, deleteVolumeOptions *DeleteVolumeOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteVolumeOptions, "deleteVolumeOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteVolumeOptions, "deleteVolumeOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *deleteVolumeOptions.ID, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/volumes/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteVolumeOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "DeleteVolume") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + response, err = vpcClassic.Service.Request(request, nil) + + return +} + +// GetVolume : Retrieve a volume +// This request retrieves a single volume specified by the identifier in the URL. +func (vpcClassic *VpcClassicV1) GetVolume(getVolumeOptions *GetVolumeOptions) (result *Volume, response *core.DetailedResponse, err error) { + return vpcClassic.GetVolumeWithContext(context.Background(), getVolumeOptions) +} + +// GetVolumeWithContext is an alternate form of the GetVolume method which supports a Context parameter +func (vpcClassic *VpcClassicV1) GetVolumeWithContext(ctx context.Context, getVolumeOptions *GetVolumeOptions) (result *Volume, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getVolumeOptions, "getVolumeOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getVolumeOptions, "getVolumeOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *getVolumeOptions.ID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/volumes/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getVolumeOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "GetVolume") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVolume) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateVolume : Update a volume +// This request updates a volume with the information in a provided volume patch. The volume patch object is structured +// in the same way as a retrieved volume and contains only the information to be updated. +func (vpcClassic *VpcClassicV1) UpdateVolume(updateVolumeOptions *UpdateVolumeOptions) (result *Volume, response *core.DetailedResponse, err error) { + return vpcClassic.UpdateVolumeWithContext(context.Background(), updateVolumeOptions) +} + +// UpdateVolumeWithContext is an alternate form of the UpdateVolume method which supports a Context parameter +func (vpcClassic *VpcClassicV1) UpdateVolumeWithContext(ctx context.Context, updateVolumeOptions *UpdateVolumeOptions) (result *Volume, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateVolumeOptions, "updateVolumeOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateVolumeOptions, "updateVolumeOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *updateVolumeOptions.ID, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/volumes/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateVolumeOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "UpdateVolume") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/merge-patch+json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + _, err = builder.SetBodyContentJSON(updateVolumeOptions.VolumePatch) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVolume) + if err != nil { + return + } + response.Result = result + + return +} + +// ListRegions : List all regions +// This request lists all regions. Each region is a separate geographic area that contains multiple isolated zones. +// Resources can be provisioned into one or more zones in a region. Each zone is isolated, but connected to other zones +// in the same region with low-latency and high-bandwidth links. Regions represent the top-level of fault isolation +// available. Resources deployed within a single region also benefit from the low latency afforded by geographic +// proximity. +func (vpcClassic *VpcClassicV1) ListRegions(listRegionsOptions *ListRegionsOptions) (result *RegionCollection, response *core.DetailedResponse, err error) { + return vpcClassic.ListRegionsWithContext(context.Background(), listRegionsOptions) +} + +// ListRegionsWithContext is an alternate form of the ListRegions method which supports a Context parameter +func (vpcClassic *VpcClassicV1) ListRegionsWithContext(ctx context.Context, listRegionsOptions *ListRegionsOptions) (result *RegionCollection, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listRegionsOptions, "listRegionsOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/regions`, nil) + if err != nil { + return + } + + for headerName, headerValue := range listRegionsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "ListRegions") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalRegionCollection) + if err != nil { + return + } + response.Result = result + + return +} + +// GetRegion : Retrieve a region +// This request retrieves a single region specified by the name in the URL. +func (vpcClassic *VpcClassicV1) GetRegion(getRegionOptions *GetRegionOptions) (result *Region, response *core.DetailedResponse, err error) { + return vpcClassic.GetRegionWithContext(context.Background(), getRegionOptions) +} + +// GetRegionWithContext is an alternate form of the GetRegion method which supports a Context parameter +func (vpcClassic *VpcClassicV1) GetRegionWithContext(ctx context.Context, getRegionOptions *GetRegionOptions) (result *Region, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getRegionOptions, "getRegionOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getRegionOptions, "getRegionOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "name": *getRegionOptions.Name, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/regions/{name}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getRegionOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "GetRegion") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalRegion) + if err != nil { + return + } + response.Result = result + + return +} + +// ListRegionZones : List all zones in a region +// This request lists all zones in a region. Zones represent logically-isolated data centers with high-bandwidth and +// low-latency interconnects to other zones in the same region. Faults in a zone do not affect other zones. +func (vpcClassic *VpcClassicV1) ListRegionZones(listRegionZonesOptions *ListRegionZonesOptions) (result *ZoneCollection, response *core.DetailedResponse, err error) { + return vpcClassic.ListRegionZonesWithContext(context.Background(), listRegionZonesOptions) +} + +// ListRegionZonesWithContext is an alternate form of the ListRegionZones method which supports a Context parameter +func (vpcClassic *VpcClassicV1) ListRegionZonesWithContext(ctx context.Context, listRegionZonesOptions *ListRegionZonesOptions) (result *ZoneCollection, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(listRegionZonesOptions, "listRegionZonesOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(listRegionZonesOptions, "listRegionZonesOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "region_name": *listRegionZonesOptions.RegionName, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/regions/{region_name}/zones`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range listRegionZonesOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "ListRegionZones") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalZoneCollection) + if err != nil { + return + } + response.Result = result + + return +} + +// GetRegionZone : Retrieve a zone +// This request retrieves a single zone specified by the region and zone names in the URL. +func (vpcClassic *VpcClassicV1) GetRegionZone(getRegionZoneOptions *GetRegionZoneOptions) (result *Zone, response *core.DetailedResponse, err error) { + return vpcClassic.GetRegionZoneWithContext(context.Background(), getRegionZoneOptions) +} + +// GetRegionZoneWithContext is an alternate form of the GetRegionZone method which supports a Context parameter +func (vpcClassic *VpcClassicV1) GetRegionZoneWithContext(ctx context.Context, getRegionZoneOptions *GetRegionZoneOptions) (result *Zone, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getRegionZoneOptions, "getRegionZoneOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getRegionZoneOptions, "getRegionZoneOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "region_name": *getRegionZoneOptions.RegionName, + "name": *getRegionZoneOptions.Name, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/regions/{region_name}/zones/{name}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getRegionZoneOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "GetRegionZone") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalZone) + if err != nil { + return + } + response.Result = result + + return +} + +// ListPublicGateways : List all public gateways +// This request lists all public gateways in the region. A public gateway is a virtual network device associated with a +// VPC, which allows access to the Internet. A public gateway resides in a zone and can be connected to subnets in the +// same zone only. +func (vpcClassic *VpcClassicV1) ListPublicGateways(listPublicGatewaysOptions *ListPublicGatewaysOptions) (result *PublicGatewayCollection, response *core.DetailedResponse, err error) { + return vpcClassic.ListPublicGatewaysWithContext(context.Background(), listPublicGatewaysOptions) +} + +// ListPublicGatewaysWithContext is an alternate form of the ListPublicGateways method which supports a Context parameter +func (vpcClassic *VpcClassicV1) ListPublicGatewaysWithContext(ctx context.Context, listPublicGatewaysOptions *ListPublicGatewaysOptions) (result *PublicGatewayCollection, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listPublicGatewaysOptions, "listPublicGatewaysOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/public_gateways`, nil) + if err != nil { + return + } + + for headerName, headerValue := range listPublicGatewaysOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "ListPublicGateways") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + if listPublicGatewaysOptions.Start != nil { + builder.AddQuery("start", fmt.Sprint(*listPublicGatewaysOptions.Start)) + } + if listPublicGatewaysOptions.Limit != nil { + builder.AddQuery("limit", fmt.Sprint(*listPublicGatewaysOptions.Limit)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalPublicGatewayCollection) + if err != nil { + return + } + response.Result = result + + return +} + +// CreatePublicGateway : Create a public gateway +// This request creates a new public gateway from a public gateway prototype object. For this to succeed, the VPC must +// not already have a public gateway in the specified zone. +// +// If a floating IP is provided, it must be unbound. If a floating IP is not provided, one will be created and bound to +// the public gateway. Once a public gateway has been created, its floating IP cannot be unbound. A public gateway must +// be explicitly attached to each subnet it will provide connectivity for. +func (vpcClassic *VpcClassicV1) CreatePublicGateway(createPublicGatewayOptions *CreatePublicGatewayOptions) (result *PublicGateway, response *core.DetailedResponse, err error) { + return vpcClassic.CreatePublicGatewayWithContext(context.Background(), createPublicGatewayOptions) +} + +// CreatePublicGatewayWithContext is an alternate form of the CreatePublicGateway method which supports a Context parameter +func (vpcClassic *VpcClassicV1) CreatePublicGatewayWithContext(ctx context.Context, createPublicGatewayOptions *CreatePublicGatewayOptions) (result *PublicGateway, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createPublicGatewayOptions, "createPublicGatewayOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createPublicGatewayOptions, "createPublicGatewayOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/public_gateways`, nil) + if err != nil { + return + } + + for headerName, headerValue := range createPublicGatewayOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "CreatePublicGateway") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + body := make(map[string]interface{}) + if createPublicGatewayOptions.VPC != nil { + body["vpc"] = createPublicGatewayOptions.VPC + } + if createPublicGatewayOptions.Zone != nil { + body["zone"] = createPublicGatewayOptions.Zone + } + if createPublicGatewayOptions.FloatingIP != nil { + body["floating_ip"] = createPublicGatewayOptions.FloatingIP + } + if createPublicGatewayOptions.Name != nil { + body["name"] = createPublicGatewayOptions.Name + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalPublicGateway) + if err != nil { + return + } + response.Result = result + + return +} + +// DeletePublicGateway : Delete a public gateway +// This request deletes a public gateway. This operation cannot be reversed. For this request to succeed, the public +// gateway must not be attached to any subnets. The public gateway's floating IP will be automatically unbound. If the +// floating IP was created when the public gateway was created, it will be deleted. +func (vpcClassic *VpcClassicV1) DeletePublicGateway(deletePublicGatewayOptions *DeletePublicGatewayOptions) (response *core.DetailedResponse, err error) { + return vpcClassic.DeletePublicGatewayWithContext(context.Background(), deletePublicGatewayOptions) +} + +// DeletePublicGatewayWithContext is an alternate form of the DeletePublicGateway method which supports a Context parameter +func (vpcClassic *VpcClassicV1) DeletePublicGatewayWithContext(ctx context.Context, deletePublicGatewayOptions *DeletePublicGatewayOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deletePublicGatewayOptions, "deletePublicGatewayOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deletePublicGatewayOptions, "deletePublicGatewayOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *deletePublicGatewayOptions.ID, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/public_gateways/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deletePublicGatewayOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "DeletePublicGateway") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + response, err = vpcClassic.Service.Request(request, nil) + + return +} + +// GetPublicGateway : Retrieve a public gateway +// This request retrieves a single public gateway specified by the identifier in the URL. +func (vpcClassic *VpcClassicV1) GetPublicGateway(getPublicGatewayOptions *GetPublicGatewayOptions) (result *PublicGateway, response *core.DetailedResponse, err error) { + return vpcClassic.GetPublicGatewayWithContext(context.Background(), getPublicGatewayOptions) +} + +// GetPublicGatewayWithContext is an alternate form of the GetPublicGateway method which supports a Context parameter +func (vpcClassic *VpcClassicV1) GetPublicGatewayWithContext(ctx context.Context, getPublicGatewayOptions *GetPublicGatewayOptions) (result *PublicGateway, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getPublicGatewayOptions, "getPublicGatewayOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getPublicGatewayOptions, "getPublicGatewayOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *getPublicGatewayOptions.ID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/public_gateways/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getPublicGatewayOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "GetPublicGateway") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalPublicGateway) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdatePublicGateway : Update a public gateway +// This request updates a public gateway's name. +func (vpcClassic *VpcClassicV1) UpdatePublicGateway(updatePublicGatewayOptions *UpdatePublicGatewayOptions) (result *PublicGateway, response *core.DetailedResponse, err error) { + return vpcClassic.UpdatePublicGatewayWithContext(context.Background(), updatePublicGatewayOptions) +} + +// UpdatePublicGatewayWithContext is an alternate form of the UpdatePublicGateway method which supports a Context parameter +func (vpcClassic *VpcClassicV1) UpdatePublicGatewayWithContext(ctx context.Context, updatePublicGatewayOptions *UpdatePublicGatewayOptions) (result *PublicGateway, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updatePublicGatewayOptions, "updatePublicGatewayOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updatePublicGatewayOptions, "updatePublicGatewayOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *updatePublicGatewayOptions.ID, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/public_gateways/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updatePublicGatewayOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "UpdatePublicGateway") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/merge-patch+json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + _, err = builder.SetBodyContentJSON(updatePublicGatewayOptions.PublicGatewayPatch) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalPublicGateway) + if err != nil { + return + } + response.Result = result + + return +} + +// ListFloatingIps : List all floating IPs +// This request lists all floating IPs in the region. Floating IPs allow inbound and outbound traffic from the Internet +// to an instance. +func (vpcClassic *VpcClassicV1) ListFloatingIps(listFloatingIpsOptions *ListFloatingIpsOptions) (result *FloatingIPCollection, response *core.DetailedResponse, err error) { + return vpcClassic.ListFloatingIpsWithContext(context.Background(), listFloatingIpsOptions) +} + +// ListFloatingIpsWithContext is an alternate form of the ListFloatingIps method which supports a Context parameter +func (vpcClassic *VpcClassicV1) ListFloatingIpsWithContext(ctx context.Context, listFloatingIpsOptions *ListFloatingIpsOptions) (result *FloatingIPCollection, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listFloatingIpsOptions, "listFloatingIpsOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/floating_ips`, nil) + if err != nil { + return + } + + for headerName, headerValue := range listFloatingIpsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "ListFloatingIps") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + if listFloatingIpsOptions.Start != nil { + builder.AddQuery("start", fmt.Sprint(*listFloatingIpsOptions.Start)) + } + if listFloatingIpsOptions.Limit != nil { + builder.AddQuery("limit", fmt.Sprint(*listFloatingIpsOptions.Limit)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalFloatingIPCollection) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateFloatingIP : Reserve a floating IP +// This request reserves a new floating IP. +func (vpcClassic *VpcClassicV1) CreateFloatingIP(createFloatingIPOptions *CreateFloatingIPOptions) (result *FloatingIP, response *core.DetailedResponse, err error) { + return vpcClassic.CreateFloatingIPWithContext(context.Background(), createFloatingIPOptions) +} + +// CreateFloatingIPWithContext is an alternate form of the CreateFloatingIP method which supports a Context parameter +func (vpcClassic *VpcClassicV1) CreateFloatingIPWithContext(ctx context.Context, createFloatingIPOptions *CreateFloatingIPOptions) (result *FloatingIP, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createFloatingIPOptions, "createFloatingIPOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createFloatingIPOptions, "createFloatingIPOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/floating_ips`, nil) + if err != nil { + return + } + + for headerName, headerValue := range createFloatingIPOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "CreateFloatingIP") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + _, err = builder.SetBodyContentJSON(createFloatingIPOptions.FloatingIPPrototype) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalFloatingIP) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteFloatingIP : Release a floating IP +// This request disassociates (if associated) and releases a floating IP. This operation cannot be reversed. For this +// request to succeed, the floating IP must not be required by another resource, such as a public gateway. +func (vpcClassic *VpcClassicV1) DeleteFloatingIP(deleteFloatingIPOptions *DeleteFloatingIPOptions) (response *core.DetailedResponse, err error) { + return vpcClassic.DeleteFloatingIPWithContext(context.Background(), deleteFloatingIPOptions) +} + +// DeleteFloatingIPWithContext is an alternate form of the DeleteFloatingIP method which supports a Context parameter +func (vpcClassic *VpcClassicV1) DeleteFloatingIPWithContext(ctx context.Context, deleteFloatingIPOptions *DeleteFloatingIPOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteFloatingIPOptions, "deleteFloatingIPOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteFloatingIPOptions, "deleteFloatingIPOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *deleteFloatingIPOptions.ID, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/floating_ips/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteFloatingIPOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "DeleteFloatingIP") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + response, err = vpcClassic.Service.Request(request, nil) + + return +} + +// GetFloatingIP : Retrieve a floating IP +// This request retrieves a single floating IP specified by the identifier in the URL. +func (vpcClassic *VpcClassicV1) GetFloatingIP(getFloatingIPOptions *GetFloatingIPOptions) (result *FloatingIP, response *core.DetailedResponse, err error) { + return vpcClassic.GetFloatingIPWithContext(context.Background(), getFloatingIPOptions) +} + +// GetFloatingIPWithContext is an alternate form of the GetFloatingIP method which supports a Context parameter +func (vpcClassic *VpcClassicV1) GetFloatingIPWithContext(ctx context.Context, getFloatingIPOptions *GetFloatingIPOptions) (result *FloatingIP, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getFloatingIPOptions, "getFloatingIPOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getFloatingIPOptions, "getFloatingIPOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *getFloatingIPOptions.ID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/floating_ips/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getFloatingIPOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "GetFloatingIP") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalFloatingIP) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateFloatingIP : Update a floating IP +// This request updates a floating IP's name and/or target. +func (vpcClassic *VpcClassicV1) UpdateFloatingIP(updateFloatingIPOptions *UpdateFloatingIPOptions) (result *FloatingIP, response *core.DetailedResponse, err error) { + return vpcClassic.UpdateFloatingIPWithContext(context.Background(), updateFloatingIPOptions) +} + +// UpdateFloatingIPWithContext is an alternate form of the UpdateFloatingIP method which supports a Context parameter +func (vpcClassic *VpcClassicV1) UpdateFloatingIPWithContext(ctx context.Context, updateFloatingIPOptions *UpdateFloatingIPOptions) (result *FloatingIP, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateFloatingIPOptions, "updateFloatingIPOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateFloatingIPOptions, "updateFloatingIPOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *updateFloatingIPOptions.ID, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/floating_ips/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateFloatingIPOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "UpdateFloatingIP") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/merge-patch+json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + _, err = builder.SetBodyContentJSON(updateFloatingIPOptions.FloatingIPPatch) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalFloatingIP) + if err != nil { + return + } + response.Result = result + + return +} + +// ListNetworkAcls : List all network ACLs +// This request lists all network ACLs in the region. A network ACL defines a set of packet filtering (5-tuple) rules +// for all traffic in and out of a subnet. Both allow and deny rules can be defined, and rules are stateless such that +// reverse traffic in response to allowed traffic is not automatically permitted. +func (vpcClassic *VpcClassicV1) ListNetworkAcls(listNetworkAclsOptions *ListNetworkAclsOptions) (result *NetworkACLCollection, response *core.DetailedResponse, err error) { + return vpcClassic.ListNetworkAclsWithContext(context.Background(), listNetworkAclsOptions) +} + +// ListNetworkAclsWithContext is an alternate form of the ListNetworkAcls method which supports a Context parameter +func (vpcClassic *VpcClassicV1) ListNetworkAclsWithContext(ctx context.Context, listNetworkAclsOptions *ListNetworkAclsOptions) (result *NetworkACLCollection, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listNetworkAclsOptions, "listNetworkAclsOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/network_acls`, nil) + if err != nil { + return + } + + for headerName, headerValue := range listNetworkAclsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "ListNetworkAcls") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + if listNetworkAclsOptions.Start != nil { + builder.AddQuery("start", fmt.Sprint(*listNetworkAclsOptions.Start)) + } + if listNetworkAclsOptions.Limit != nil { + builder.AddQuery("limit", fmt.Sprint(*listNetworkAclsOptions.Limit)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalNetworkACLCollection) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateNetworkACL : Create a network ACL +// This request creates a new network ACL from a network ACL prototype object. The prototype object is structured in the +// same way as a retrieved network ACL, and contains the information necessary to create the new network ACL. +func (vpcClassic *VpcClassicV1) CreateNetworkACL(createNetworkACLOptions *CreateNetworkACLOptions) (result *NetworkACL, response *core.DetailedResponse, err error) { + return vpcClassic.CreateNetworkACLWithContext(context.Background(), createNetworkACLOptions) +} + +// CreateNetworkACLWithContext is an alternate form of the CreateNetworkACL method which supports a Context parameter +func (vpcClassic *VpcClassicV1) CreateNetworkACLWithContext(ctx context.Context, createNetworkACLOptions *CreateNetworkACLOptions) (result *NetworkACL, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(createNetworkACLOptions, "createNetworkACLOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/network_acls`, nil) + if err != nil { + return + } + + for headerName, headerValue := range createNetworkACLOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "CreateNetworkACL") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + if createNetworkACLOptions.NetworkACLPrototype != nil { + _, err = builder.SetBodyContentJSON(createNetworkACLOptions.NetworkACLPrototype) + if err != nil { + return + } + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalNetworkACL) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteNetworkACL : Delete a network ACL +// This request deletes a network ACL. This operation cannot be reversed. For this request to succeed, the network ACL +// must not be the default network ACL for any VPCs, and the network ACL must not be attached to any subnets. +func (vpcClassic *VpcClassicV1) DeleteNetworkACL(deleteNetworkACLOptions *DeleteNetworkACLOptions) (response *core.DetailedResponse, err error) { + return vpcClassic.DeleteNetworkACLWithContext(context.Background(), deleteNetworkACLOptions) +} + +// DeleteNetworkACLWithContext is an alternate form of the DeleteNetworkACL method which supports a Context parameter +func (vpcClassic *VpcClassicV1) DeleteNetworkACLWithContext(ctx context.Context, deleteNetworkACLOptions *DeleteNetworkACLOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteNetworkACLOptions, "deleteNetworkACLOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteNetworkACLOptions, "deleteNetworkACLOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *deleteNetworkACLOptions.ID, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/network_acls/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteNetworkACLOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "DeleteNetworkACL") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + response, err = vpcClassic.Service.Request(request, nil) + + return +} + +// GetNetworkACL : Retrieve a network ACL +// This request retrieves a single network ACL specified by the identifier in the URL. +func (vpcClassic *VpcClassicV1) GetNetworkACL(getNetworkACLOptions *GetNetworkACLOptions) (result *NetworkACL, response *core.DetailedResponse, err error) { + return vpcClassic.GetNetworkACLWithContext(context.Background(), getNetworkACLOptions) +} + +// GetNetworkACLWithContext is an alternate form of the GetNetworkACL method which supports a Context parameter +func (vpcClassic *VpcClassicV1) GetNetworkACLWithContext(ctx context.Context, getNetworkACLOptions *GetNetworkACLOptions) (result *NetworkACL, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getNetworkACLOptions, "getNetworkACLOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getNetworkACLOptions, "getNetworkACLOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *getNetworkACLOptions.ID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/network_acls/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getNetworkACLOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "GetNetworkACL") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalNetworkACL) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateNetworkACL : Update a network ACL +// This request updates a network ACL's name. +func (vpcClassic *VpcClassicV1) UpdateNetworkACL(updateNetworkACLOptions *UpdateNetworkACLOptions) (result *NetworkACL, response *core.DetailedResponse, err error) { + return vpcClassic.UpdateNetworkACLWithContext(context.Background(), updateNetworkACLOptions) +} + +// UpdateNetworkACLWithContext is an alternate form of the UpdateNetworkACL method which supports a Context parameter +func (vpcClassic *VpcClassicV1) UpdateNetworkACLWithContext(ctx context.Context, updateNetworkACLOptions *UpdateNetworkACLOptions) (result *NetworkACL, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateNetworkACLOptions, "updateNetworkACLOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateNetworkACLOptions, "updateNetworkACLOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *updateNetworkACLOptions.ID, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/network_acls/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateNetworkACLOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "UpdateNetworkACL") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/merge-patch+json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + _, err = builder.SetBodyContentJSON(updateNetworkACLOptions.NetworkACLPatch) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalNetworkACL) + if err != nil { + return + } + response.Result = result + + return +} + +// ListNetworkACLRules : List all rules for a network ACL +// This request lists all rules for a network ACL. These rules can allow or deny traffic between a source CIDR block and +// a destination CIDR block over a particular protocol and port range. +func (vpcClassic *VpcClassicV1) ListNetworkACLRules(listNetworkACLRulesOptions *ListNetworkACLRulesOptions) (result *NetworkACLRuleCollection, response *core.DetailedResponse, err error) { + return vpcClassic.ListNetworkACLRulesWithContext(context.Background(), listNetworkACLRulesOptions) +} + +// ListNetworkACLRulesWithContext is an alternate form of the ListNetworkACLRules method which supports a Context parameter +func (vpcClassic *VpcClassicV1) ListNetworkACLRulesWithContext(ctx context.Context, listNetworkACLRulesOptions *ListNetworkACLRulesOptions) (result *NetworkACLRuleCollection, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(listNetworkACLRulesOptions, "listNetworkACLRulesOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(listNetworkACLRulesOptions, "listNetworkACLRulesOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "network_acl_id": *listNetworkACLRulesOptions.NetworkACLID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/network_acls/{network_acl_id}/rules`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range listNetworkACLRulesOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "ListNetworkACLRules") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + if listNetworkACLRulesOptions.Start != nil { + builder.AddQuery("start", fmt.Sprint(*listNetworkACLRulesOptions.Start)) + } + if listNetworkACLRulesOptions.Limit != nil { + builder.AddQuery("limit", fmt.Sprint(*listNetworkACLRulesOptions.Limit)) + } + if listNetworkACLRulesOptions.Direction != nil { + builder.AddQuery("direction", fmt.Sprint(*listNetworkACLRulesOptions.Direction)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalNetworkACLRuleCollection) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateNetworkACLRule : Create a rule for a network ACL +// This request creates a new rule from a network ACL rule prototype object. The prototype object is structured in the +// same way as a retrieved rule, and contains the information necessary to create the new rule. +func (vpcClassic *VpcClassicV1) CreateNetworkACLRule(createNetworkACLRuleOptions *CreateNetworkACLRuleOptions) (result NetworkACLRuleIntf, response *core.DetailedResponse, err error) { + return vpcClassic.CreateNetworkACLRuleWithContext(context.Background(), createNetworkACLRuleOptions) +} + +// CreateNetworkACLRuleWithContext is an alternate form of the CreateNetworkACLRule method which supports a Context parameter +func (vpcClassic *VpcClassicV1) CreateNetworkACLRuleWithContext(ctx context.Context, createNetworkACLRuleOptions *CreateNetworkACLRuleOptions) (result NetworkACLRuleIntf, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createNetworkACLRuleOptions, "createNetworkACLRuleOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createNetworkACLRuleOptions, "createNetworkACLRuleOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "network_acl_id": *createNetworkACLRuleOptions.NetworkACLID, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/network_acls/{network_acl_id}/rules`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range createNetworkACLRuleOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "CreateNetworkACLRule") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + _, err = builder.SetBodyContentJSON(createNetworkACLRuleOptions.NetworkACLRulePrototype) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalNetworkACLRule) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteNetworkACLRule : Delete a network ACL rule +// This request deletes a rule. This operation cannot be reversed. +func (vpcClassic *VpcClassicV1) DeleteNetworkACLRule(deleteNetworkACLRuleOptions *DeleteNetworkACLRuleOptions) (response *core.DetailedResponse, err error) { + return vpcClassic.DeleteNetworkACLRuleWithContext(context.Background(), deleteNetworkACLRuleOptions) +} + +// DeleteNetworkACLRuleWithContext is an alternate form of the DeleteNetworkACLRule method which supports a Context parameter +func (vpcClassic *VpcClassicV1) DeleteNetworkACLRuleWithContext(ctx context.Context, deleteNetworkACLRuleOptions *DeleteNetworkACLRuleOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteNetworkACLRuleOptions, "deleteNetworkACLRuleOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteNetworkACLRuleOptions, "deleteNetworkACLRuleOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "network_acl_id": *deleteNetworkACLRuleOptions.NetworkACLID, + "id": *deleteNetworkACLRuleOptions.ID, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/network_acls/{network_acl_id}/rules/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteNetworkACLRuleOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "DeleteNetworkACLRule") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + response, err = vpcClassic.Service.Request(request, nil) + + return +} + +// GetNetworkACLRule : Retrieve a network ACL rule +// This request retrieves a single rule specified by the identifier in the URL. +func (vpcClassic *VpcClassicV1) GetNetworkACLRule(getNetworkACLRuleOptions *GetNetworkACLRuleOptions) (result NetworkACLRuleIntf, response *core.DetailedResponse, err error) { + return vpcClassic.GetNetworkACLRuleWithContext(context.Background(), getNetworkACLRuleOptions) +} + +// GetNetworkACLRuleWithContext is an alternate form of the GetNetworkACLRule method which supports a Context parameter +func (vpcClassic *VpcClassicV1) GetNetworkACLRuleWithContext(ctx context.Context, getNetworkACLRuleOptions *GetNetworkACLRuleOptions) (result NetworkACLRuleIntf, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getNetworkACLRuleOptions, "getNetworkACLRuleOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getNetworkACLRuleOptions, "getNetworkACLRuleOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "network_acl_id": *getNetworkACLRuleOptions.NetworkACLID, + "id": *getNetworkACLRuleOptions.ID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/network_acls/{network_acl_id}/rules/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getNetworkACLRuleOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "GetNetworkACLRule") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalNetworkACLRule) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateNetworkACLRule : Update a network ACL rule +// This request updates a rule with the information in a provided rule patch. The rule patch object is structured in the +// same way as a retrieved rule and contains only the information to be updated. +func (vpcClassic *VpcClassicV1) UpdateNetworkACLRule(updateNetworkACLRuleOptions *UpdateNetworkACLRuleOptions) (result NetworkACLRuleIntf, response *core.DetailedResponse, err error) { + return vpcClassic.UpdateNetworkACLRuleWithContext(context.Background(), updateNetworkACLRuleOptions) +} + +// UpdateNetworkACLRuleWithContext is an alternate form of the UpdateNetworkACLRule method which supports a Context parameter +func (vpcClassic *VpcClassicV1) UpdateNetworkACLRuleWithContext(ctx context.Context, updateNetworkACLRuleOptions *UpdateNetworkACLRuleOptions) (result NetworkACLRuleIntf, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateNetworkACLRuleOptions, "updateNetworkACLRuleOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateNetworkACLRuleOptions, "updateNetworkACLRuleOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "network_acl_id": *updateNetworkACLRuleOptions.NetworkACLID, + "id": *updateNetworkACLRuleOptions.ID, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/network_acls/{network_acl_id}/rules/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateNetworkACLRuleOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "UpdateNetworkACLRule") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/merge-patch+json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + _, err = builder.SetBodyContentJSON(updateNetworkACLRuleOptions.NetworkACLRulePatch) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalNetworkACLRule) + if err != nil { + return + } + response.Result = result + + return +} + +// ListSecurityGroups : List all security groups +// This request lists all security groups in the region. Security groups provide a way to apply IP filtering rules to +// instances in the associated VPC. With security groups, all traffic is denied by default, and rules added to security +// groups define which traffic the security group permits. Security group rules are stateful such that reverse traffic +// in response to allowed traffic is automatically permitted. +func (vpcClassic *VpcClassicV1) ListSecurityGroups(listSecurityGroupsOptions *ListSecurityGroupsOptions) (result *SecurityGroupCollection, response *core.DetailedResponse, err error) { + return vpcClassic.ListSecurityGroupsWithContext(context.Background(), listSecurityGroupsOptions) +} + +// ListSecurityGroupsWithContext is an alternate form of the ListSecurityGroups method which supports a Context parameter +func (vpcClassic *VpcClassicV1) ListSecurityGroupsWithContext(ctx context.Context, listSecurityGroupsOptions *ListSecurityGroupsOptions) (result *SecurityGroupCollection, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listSecurityGroupsOptions, "listSecurityGroupsOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/security_groups`, nil) + if err != nil { + return + } + + for headerName, headerValue := range listSecurityGroupsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "ListSecurityGroups") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + if listSecurityGroupsOptions.VPCID != nil { + builder.AddQuery("vpc.id", fmt.Sprint(*listSecurityGroupsOptions.VPCID)) + } + if listSecurityGroupsOptions.VPCCRN != nil { + builder.AddQuery("vpc.crn", fmt.Sprint(*listSecurityGroupsOptions.VPCCRN)) + } + if listSecurityGroupsOptions.VPCName != nil { + builder.AddQuery("vpc.name", fmt.Sprint(*listSecurityGroupsOptions.VPCName)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSecurityGroupCollection) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateSecurityGroup : Create a security group +// This request creates a new security group from a security group prototype object. The prototype object is structured +// in the same way as a retrieved security group, and contains the information necessary to create the new security +// group. If security group rules are included in the prototype object, those rules will be added to the security group. +// Each security group is scoped to one VPC. Only network interfaces on instances in that VPC can be added to the +// security group. +func (vpcClassic *VpcClassicV1) CreateSecurityGroup(createSecurityGroupOptions *CreateSecurityGroupOptions) (result *SecurityGroup, response *core.DetailedResponse, err error) { + return vpcClassic.CreateSecurityGroupWithContext(context.Background(), createSecurityGroupOptions) +} + +// CreateSecurityGroupWithContext is an alternate form of the CreateSecurityGroup method which supports a Context parameter +func (vpcClassic *VpcClassicV1) CreateSecurityGroupWithContext(ctx context.Context, createSecurityGroupOptions *CreateSecurityGroupOptions) (result *SecurityGroup, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createSecurityGroupOptions, "createSecurityGroupOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createSecurityGroupOptions, "createSecurityGroupOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/security_groups`, nil) + if err != nil { + return + } + + for headerName, headerValue := range createSecurityGroupOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "CreateSecurityGroup") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + body := make(map[string]interface{}) + if createSecurityGroupOptions.VPC != nil { + body["vpc"] = createSecurityGroupOptions.VPC + } + if createSecurityGroupOptions.Name != nil { + body["name"] = createSecurityGroupOptions.Name + } + if createSecurityGroupOptions.ResourceGroup != nil { + body["resource_group"] = createSecurityGroupOptions.ResourceGroup + } + if createSecurityGroupOptions.Rules != nil { + body["rules"] = createSecurityGroupOptions.Rules + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSecurityGroup) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteSecurityGroup : Delete a security group +// This request deletes a security group. A security group cannot be deleted if it is referenced by any network +// interfaces or other security group rules. Additionally, a VPC's default security group cannot be deleted. This +// operation cannot be reversed. +func (vpcClassic *VpcClassicV1) DeleteSecurityGroup(deleteSecurityGroupOptions *DeleteSecurityGroupOptions) (response *core.DetailedResponse, err error) { + return vpcClassic.DeleteSecurityGroupWithContext(context.Background(), deleteSecurityGroupOptions) +} + +// DeleteSecurityGroupWithContext is an alternate form of the DeleteSecurityGroup method which supports a Context parameter +func (vpcClassic *VpcClassicV1) DeleteSecurityGroupWithContext(ctx context.Context, deleteSecurityGroupOptions *DeleteSecurityGroupOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteSecurityGroupOptions, "deleteSecurityGroupOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteSecurityGroupOptions, "deleteSecurityGroupOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *deleteSecurityGroupOptions.ID, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/security_groups/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteSecurityGroupOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "DeleteSecurityGroup") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + response, err = vpcClassic.Service.Request(request, nil) + + return +} + +// GetSecurityGroup : Retrieve a security group +// This request retrieves a single security group specified by the identifier in the URL path. +func (vpcClassic *VpcClassicV1) GetSecurityGroup(getSecurityGroupOptions *GetSecurityGroupOptions) (result *SecurityGroup, response *core.DetailedResponse, err error) { + return vpcClassic.GetSecurityGroupWithContext(context.Background(), getSecurityGroupOptions) +} + +// GetSecurityGroupWithContext is an alternate form of the GetSecurityGroup method which supports a Context parameter +func (vpcClassic *VpcClassicV1) GetSecurityGroupWithContext(ctx context.Context, getSecurityGroupOptions *GetSecurityGroupOptions) (result *SecurityGroup, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getSecurityGroupOptions, "getSecurityGroupOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getSecurityGroupOptions, "getSecurityGroupOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *getSecurityGroupOptions.ID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/security_groups/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getSecurityGroupOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "GetSecurityGroup") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSecurityGroup) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateSecurityGroup : Update a security group +// This request updates a security group with the information provided in a security group patch object. The security +// group patch object is structured in the same way as a retrieved security group and contains only the information to +// be updated. +func (vpcClassic *VpcClassicV1) UpdateSecurityGroup(updateSecurityGroupOptions *UpdateSecurityGroupOptions) (result *SecurityGroup, response *core.DetailedResponse, err error) { + return vpcClassic.UpdateSecurityGroupWithContext(context.Background(), updateSecurityGroupOptions) +} + +// UpdateSecurityGroupWithContext is an alternate form of the UpdateSecurityGroup method which supports a Context parameter +func (vpcClassic *VpcClassicV1) UpdateSecurityGroupWithContext(ctx context.Context, updateSecurityGroupOptions *UpdateSecurityGroupOptions) (result *SecurityGroup, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateSecurityGroupOptions, "updateSecurityGroupOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateSecurityGroupOptions, "updateSecurityGroupOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *updateSecurityGroupOptions.ID, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/security_groups/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateSecurityGroupOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "UpdateSecurityGroup") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/merge-patch+json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + _, err = builder.SetBodyContentJSON(updateSecurityGroupOptions.SecurityGroupPatch) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSecurityGroup) + if err != nil { + return + } + response.Result = result + + return +} + +// ListSecurityGroupNetworkInterfaces : List all network interfaces associated with a security group +// This request lists all network interfaces associated with a security group, to which the rules in the security group +// are applied. +func (vpcClassic *VpcClassicV1) ListSecurityGroupNetworkInterfaces(listSecurityGroupNetworkInterfacesOptions *ListSecurityGroupNetworkInterfacesOptions) (result *NetworkInterfaceCollection, response *core.DetailedResponse, err error) { + return vpcClassic.ListSecurityGroupNetworkInterfacesWithContext(context.Background(), listSecurityGroupNetworkInterfacesOptions) +} + +// ListSecurityGroupNetworkInterfacesWithContext is an alternate form of the ListSecurityGroupNetworkInterfaces method which supports a Context parameter +func (vpcClassic *VpcClassicV1) ListSecurityGroupNetworkInterfacesWithContext(ctx context.Context, listSecurityGroupNetworkInterfacesOptions *ListSecurityGroupNetworkInterfacesOptions) (result *NetworkInterfaceCollection, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(listSecurityGroupNetworkInterfacesOptions, "listSecurityGroupNetworkInterfacesOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(listSecurityGroupNetworkInterfacesOptions, "listSecurityGroupNetworkInterfacesOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "security_group_id": *listSecurityGroupNetworkInterfacesOptions.SecurityGroupID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/security_groups/{security_group_id}/network_interfaces`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range listSecurityGroupNetworkInterfacesOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "ListSecurityGroupNetworkInterfaces") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalNetworkInterfaceCollection) + if err != nil { + return + } + response.Result = result + + return +} + +// RemoveSecurityGroupNetworkInterface : Remove a network interface from a security group +// This request removes a network interface from a security group. Security groups are stateful, so any changes to a +// network interface's security groups are applied to new connections. Existing connections are not affected. If the +// network interface being removed has no other security groups, it will be attached to the VPC's default security +// group. +func (vpcClassic *VpcClassicV1) RemoveSecurityGroupNetworkInterface(removeSecurityGroupNetworkInterfaceOptions *RemoveSecurityGroupNetworkInterfaceOptions) (response *core.DetailedResponse, err error) { + return vpcClassic.RemoveSecurityGroupNetworkInterfaceWithContext(context.Background(), removeSecurityGroupNetworkInterfaceOptions) +} + +// RemoveSecurityGroupNetworkInterfaceWithContext is an alternate form of the RemoveSecurityGroupNetworkInterface method which supports a Context parameter +func (vpcClassic *VpcClassicV1) RemoveSecurityGroupNetworkInterfaceWithContext(ctx context.Context, removeSecurityGroupNetworkInterfaceOptions *RemoveSecurityGroupNetworkInterfaceOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(removeSecurityGroupNetworkInterfaceOptions, "removeSecurityGroupNetworkInterfaceOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(removeSecurityGroupNetworkInterfaceOptions, "removeSecurityGroupNetworkInterfaceOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "security_group_id": *removeSecurityGroupNetworkInterfaceOptions.SecurityGroupID, + "id": *removeSecurityGroupNetworkInterfaceOptions.ID, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/security_groups/{security_group_id}/network_interfaces/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range removeSecurityGroupNetworkInterfaceOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "RemoveSecurityGroupNetworkInterface") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + response, err = vpcClassic.Service.Request(request, nil) + + return +} + +// GetSecurityGroupNetworkInterface : Retrieve a network interface in a security group +// This request retrieves a single network interface specified by the identifier in the URL path. The network interface +// must be an existing member of the security group. +func (vpcClassic *VpcClassicV1) GetSecurityGroupNetworkInterface(getSecurityGroupNetworkInterfaceOptions *GetSecurityGroupNetworkInterfaceOptions) (result *NetworkInterface, response *core.DetailedResponse, err error) { + return vpcClassic.GetSecurityGroupNetworkInterfaceWithContext(context.Background(), getSecurityGroupNetworkInterfaceOptions) +} + +// GetSecurityGroupNetworkInterfaceWithContext is an alternate form of the GetSecurityGroupNetworkInterface method which supports a Context parameter +func (vpcClassic *VpcClassicV1) GetSecurityGroupNetworkInterfaceWithContext(ctx context.Context, getSecurityGroupNetworkInterfaceOptions *GetSecurityGroupNetworkInterfaceOptions) (result *NetworkInterface, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getSecurityGroupNetworkInterfaceOptions, "getSecurityGroupNetworkInterfaceOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getSecurityGroupNetworkInterfaceOptions, "getSecurityGroupNetworkInterfaceOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "security_group_id": *getSecurityGroupNetworkInterfaceOptions.SecurityGroupID, + "id": *getSecurityGroupNetworkInterfaceOptions.ID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/security_groups/{security_group_id}/network_interfaces/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getSecurityGroupNetworkInterfaceOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "GetSecurityGroupNetworkInterface") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalNetworkInterface) + if err != nil { + return + } + response.Result = result + + return +} + +// AddSecurityGroupNetworkInterface : Add a network interface to a security group +// This request adds an existing network interface to an existing security group. When a network interface is added to a +// security group, the security group rules are applied to the network interface. A request body is not required, and if +// supplied, is ignored. +func (vpcClassic *VpcClassicV1) AddSecurityGroupNetworkInterface(addSecurityGroupNetworkInterfaceOptions *AddSecurityGroupNetworkInterfaceOptions) (result *NetworkInterface, response *core.DetailedResponse, err error) { + return vpcClassic.AddSecurityGroupNetworkInterfaceWithContext(context.Background(), addSecurityGroupNetworkInterfaceOptions) +} + +// AddSecurityGroupNetworkInterfaceWithContext is an alternate form of the AddSecurityGroupNetworkInterface method which supports a Context parameter +func (vpcClassic *VpcClassicV1) AddSecurityGroupNetworkInterfaceWithContext(ctx context.Context, addSecurityGroupNetworkInterfaceOptions *AddSecurityGroupNetworkInterfaceOptions) (result *NetworkInterface, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(addSecurityGroupNetworkInterfaceOptions, "addSecurityGroupNetworkInterfaceOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(addSecurityGroupNetworkInterfaceOptions, "addSecurityGroupNetworkInterfaceOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "security_group_id": *addSecurityGroupNetworkInterfaceOptions.SecurityGroupID, + "id": *addSecurityGroupNetworkInterfaceOptions.ID, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/security_groups/{security_group_id}/network_interfaces/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range addSecurityGroupNetworkInterfaceOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "AddSecurityGroupNetworkInterface") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalNetworkInterface) + if err != nil { + return + } + response.Result = result + + return +} + +// ListSecurityGroupRules : List all rules in a security group +// This request lists all rules in a security group. These rules define what traffic the security group permits. +// Security group rules are stateful, such that reverse traffic in response to allowed traffic is automatically +// permitted. +func (vpcClassic *VpcClassicV1) ListSecurityGroupRules(listSecurityGroupRulesOptions *ListSecurityGroupRulesOptions) (result *SecurityGroupRuleCollection, response *core.DetailedResponse, err error) { + return vpcClassic.ListSecurityGroupRulesWithContext(context.Background(), listSecurityGroupRulesOptions) +} + +// ListSecurityGroupRulesWithContext is an alternate form of the ListSecurityGroupRules method which supports a Context parameter +func (vpcClassic *VpcClassicV1) ListSecurityGroupRulesWithContext(ctx context.Context, listSecurityGroupRulesOptions *ListSecurityGroupRulesOptions) (result *SecurityGroupRuleCollection, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(listSecurityGroupRulesOptions, "listSecurityGroupRulesOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(listSecurityGroupRulesOptions, "listSecurityGroupRulesOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "security_group_id": *listSecurityGroupRulesOptions.SecurityGroupID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/security_groups/{security_group_id}/rules`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range listSecurityGroupRulesOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "ListSecurityGroupRules") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSecurityGroupRuleCollection) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateSecurityGroupRule : Create a rule for a security group +// This request creates a new security group rule from a security group rule prototype object. The prototype object is +// structured in the same way as a retrieved security group rule and contains the information necessary to create the +// rule. As part of creating a new rule in a security group, the rule is applied to all the networking interfaces in the +// security group. Rules specify which IP traffic a security group should allow. Security group rules are stateful, such +// that reverse traffic in response to allowed traffic is automatically permitted. A rule allowing inbound TCP traffic +// on port 80 also allows outbound TCP traffic on port 80 without the need for an additional rule. +func (vpcClassic *VpcClassicV1) CreateSecurityGroupRule(createSecurityGroupRuleOptions *CreateSecurityGroupRuleOptions) (result SecurityGroupRuleIntf, response *core.DetailedResponse, err error) { + return vpcClassic.CreateSecurityGroupRuleWithContext(context.Background(), createSecurityGroupRuleOptions) +} + +// CreateSecurityGroupRuleWithContext is an alternate form of the CreateSecurityGroupRule method which supports a Context parameter +func (vpcClassic *VpcClassicV1) CreateSecurityGroupRuleWithContext(ctx context.Context, createSecurityGroupRuleOptions *CreateSecurityGroupRuleOptions) (result SecurityGroupRuleIntf, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createSecurityGroupRuleOptions, "createSecurityGroupRuleOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createSecurityGroupRuleOptions, "createSecurityGroupRuleOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "security_group_id": *createSecurityGroupRuleOptions.SecurityGroupID, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/security_groups/{security_group_id}/rules`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range createSecurityGroupRuleOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "CreateSecurityGroupRule") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + _, err = builder.SetBodyContentJSON(createSecurityGroupRuleOptions.SecurityGroupRulePrototype) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSecurityGroupRule) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteSecurityGroupRule : Delete a security group rule +// This request deletes a security group rule. This operation cannot be reversed. Removing a security group rule will +// not end existing connections allowed by that rule. +func (vpcClassic *VpcClassicV1) DeleteSecurityGroupRule(deleteSecurityGroupRuleOptions *DeleteSecurityGroupRuleOptions) (response *core.DetailedResponse, err error) { + return vpcClassic.DeleteSecurityGroupRuleWithContext(context.Background(), deleteSecurityGroupRuleOptions) +} + +// DeleteSecurityGroupRuleWithContext is an alternate form of the DeleteSecurityGroupRule method which supports a Context parameter +func (vpcClassic *VpcClassicV1) DeleteSecurityGroupRuleWithContext(ctx context.Context, deleteSecurityGroupRuleOptions *DeleteSecurityGroupRuleOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteSecurityGroupRuleOptions, "deleteSecurityGroupRuleOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteSecurityGroupRuleOptions, "deleteSecurityGroupRuleOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "security_group_id": *deleteSecurityGroupRuleOptions.SecurityGroupID, + "id": *deleteSecurityGroupRuleOptions.ID, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/security_groups/{security_group_id}/rules/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteSecurityGroupRuleOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "DeleteSecurityGroupRule") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + response, err = vpcClassic.Service.Request(request, nil) + + return +} + +// GetSecurityGroupRule : Retrieve a security group rule +// This request retrieves a single security group rule specified by the identifier in the URL path. +func (vpcClassic *VpcClassicV1) GetSecurityGroupRule(getSecurityGroupRuleOptions *GetSecurityGroupRuleOptions) (result SecurityGroupRuleIntf, response *core.DetailedResponse, err error) { + return vpcClassic.GetSecurityGroupRuleWithContext(context.Background(), getSecurityGroupRuleOptions) +} + +// GetSecurityGroupRuleWithContext is an alternate form of the GetSecurityGroupRule method which supports a Context parameter +func (vpcClassic *VpcClassicV1) GetSecurityGroupRuleWithContext(ctx context.Context, getSecurityGroupRuleOptions *GetSecurityGroupRuleOptions) (result SecurityGroupRuleIntf, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getSecurityGroupRuleOptions, "getSecurityGroupRuleOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getSecurityGroupRuleOptions, "getSecurityGroupRuleOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "security_group_id": *getSecurityGroupRuleOptions.SecurityGroupID, + "id": *getSecurityGroupRuleOptions.ID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/security_groups/{security_group_id}/rules/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getSecurityGroupRuleOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "GetSecurityGroupRule") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSecurityGroupRule) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateSecurityGroupRule : Update a security group rule +// This request updates a security group rule with the information provided in a rule patch object. The patch object is +// structured in the same way as a retrieved security group rule and needs to contain only the information to be +// updated. +func (vpcClassic *VpcClassicV1) UpdateSecurityGroupRule(updateSecurityGroupRuleOptions *UpdateSecurityGroupRuleOptions) (result SecurityGroupRuleIntf, response *core.DetailedResponse, err error) { + return vpcClassic.UpdateSecurityGroupRuleWithContext(context.Background(), updateSecurityGroupRuleOptions) +} + +// UpdateSecurityGroupRuleWithContext is an alternate form of the UpdateSecurityGroupRule method which supports a Context parameter +func (vpcClassic *VpcClassicV1) UpdateSecurityGroupRuleWithContext(ctx context.Context, updateSecurityGroupRuleOptions *UpdateSecurityGroupRuleOptions) (result SecurityGroupRuleIntf, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateSecurityGroupRuleOptions, "updateSecurityGroupRuleOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateSecurityGroupRuleOptions, "updateSecurityGroupRuleOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "security_group_id": *updateSecurityGroupRuleOptions.SecurityGroupID, + "id": *updateSecurityGroupRuleOptions.ID, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/security_groups/{security_group_id}/rules/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateSecurityGroupRuleOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "UpdateSecurityGroupRule") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/merge-patch+json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + _, err = builder.SetBodyContentJSON(updateSecurityGroupRuleOptions.SecurityGroupRulePatch) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSecurityGroupRule) + if err != nil { + return + } + response.Result = result + + return +} + +// ListIkePolicies : List all IKE policies +// This request lists all IKE policies in the region. +func (vpcClassic *VpcClassicV1) ListIkePolicies(listIkePoliciesOptions *ListIkePoliciesOptions) (result *IkePolicyCollection, response *core.DetailedResponse, err error) { + return vpcClassic.ListIkePoliciesWithContext(context.Background(), listIkePoliciesOptions) +} + +// ListIkePoliciesWithContext is an alternate form of the ListIkePolicies method which supports a Context parameter +func (vpcClassic *VpcClassicV1) ListIkePoliciesWithContext(ctx context.Context, listIkePoliciesOptions *ListIkePoliciesOptions) (result *IkePolicyCollection, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listIkePoliciesOptions, "listIkePoliciesOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/ike_policies`, nil) + if err != nil { + return + } + + for headerName, headerValue := range listIkePoliciesOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "ListIkePolicies") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + if listIkePoliciesOptions.Start != nil { + builder.AddQuery("start", fmt.Sprint(*listIkePoliciesOptions.Start)) + } + if listIkePoliciesOptions.Limit != nil { + builder.AddQuery("limit", fmt.Sprint(*listIkePoliciesOptions.Limit)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalIkePolicyCollection) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateIkePolicy : Create an IKE policy +// This request creates a new IKE policy. +func (vpcClassic *VpcClassicV1) CreateIkePolicy(createIkePolicyOptions *CreateIkePolicyOptions) (result *IkePolicy, response *core.DetailedResponse, err error) { + return vpcClassic.CreateIkePolicyWithContext(context.Background(), createIkePolicyOptions) +} + +// CreateIkePolicyWithContext is an alternate form of the CreateIkePolicy method which supports a Context parameter +func (vpcClassic *VpcClassicV1) CreateIkePolicyWithContext(ctx context.Context, createIkePolicyOptions *CreateIkePolicyOptions) (result *IkePolicy, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createIkePolicyOptions, "createIkePolicyOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createIkePolicyOptions, "createIkePolicyOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/ike_policies`, nil) + if err != nil { + return + } + + for headerName, headerValue := range createIkePolicyOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "CreateIkePolicy") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + body := make(map[string]interface{}) + if createIkePolicyOptions.AuthenticationAlgorithm != nil { + body["authentication_algorithm"] = createIkePolicyOptions.AuthenticationAlgorithm + } + if createIkePolicyOptions.DhGroup != nil { + body["dh_group"] = createIkePolicyOptions.DhGroup + } + if createIkePolicyOptions.EncryptionAlgorithm != nil { + body["encryption_algorithm"] = createIkePolicyOptions.EncryptionAlgorithm + } + if createIkePolicyOptions.IkeVersion != nil { + body["ike_version"] = createIkePolicyOptions.IkeVersion + } + if createIkePolicyOptions.KeyLifetime != nil { + body["key_lifetime"] = createIkePolicyOptions.KeyLifetime + } + if createIkePolicyOptions.Name != nil { + body["name"] = createIkePolicyOptions.Name + } + if createIkePolicyOptions.ResourceGroup != nil { + body["resource_group"] = createIkePolicyOptions.ResourceGroup + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalIkePolicy) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteIkePolicy : Delete an IKE policy +// This request deletes an IKE policy. This operation cannot be reversed. +func (vpcClassic *VpcClassicV1) DeleteIkePolicy(deleteIkePolicyOptions *DeleteIkePolicyOptions) (response *core.DetailedResponse, err error) { + return vpcClassic.DeleteIkePolicyWithContext(context.Background(), deleteIkePolicyOptions) +} + +// DeleteIkePolicyWithContext is an alternate form of the DeleteIkePolicy method which supports a Context parameter +func (vpcClassic *VpcClassicV1) DeleteIkePolicyWithContext(ctx context.Context, deleteIkePolicyOptions *DeleteIkePolicyOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteIkePolicyOptions, "deleteIkePolicyOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteIkePolicyOptions, "deleteIkePolicyOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *deleteIkePolicyOptions.ID, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/ike_policies/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteIkePolicyOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "DeleteIkePolicy") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + response, err = vpcClassic.Service.Request(request, nil) + + return +} + +// GetIkePolicy : Retrieve an IKE policy +// This request retrieves a single IKE policy specified by the identifier in the URL. +func (vpcClassic *VpcClassicV1) GetIkePolicy(getIkePolicyOptions *GetIkePolicyOptions) (result *IkePolicy, response *core.DetailedResponse, err error) { + return vpcClassic.GetIkePolicyWithContext(context.Background(), getIkePolicyOptions) +} + +// GetIkePolicyWithContext is an alternate form of the GetIkePolicy method which supports a Context parameter +func (vpcClassic *VpcClassicV1) GetIkePolicyWithContext(ctx context.Context, getIkePolicyOptions *GetIkePolicyOptions) (result *IkePolicy, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getIkePolicyOptions, "getIkePolicyOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getIkePolicyOptions, "getIkePolicyOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *getIkePolicyOptions.ID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/ike_policies/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getIkePolicyOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "GetIkePolicy") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalIkePolicy) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateIkePolicy : Update an IKE policy +// This request updates the properties of an existing IKE policy. +func (vpcClassic *VpcClassicV1) UpdateIkePolicy(updateIkePolicyOptions *UpdateIkePolicyOptions) (result *IkePolicy, response *core.DetailedResponse, err error) { + return vpcClassic.UpdateIkePolicyWithContext(context.Background(), updateIkePolicyOptions) +} + +// UpdateIkePolicyWithContext is an alternate form of the UpdateIkePolicy method which supports a Context parameter +func (vpcClassic *VpcClassicV1) UpdateIkePolicyWithContext(ctx context.Context, updateIkePolicyOptions *UpdateIkePolicyOptions) (result *IkePolicy, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateIkePolicyOptions, "updateIkePolicyOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateIkePolicyOptions, "updateIkePolicyOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *updateIkePolicyOptions.ID, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/ike_policies/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateIkePolicyOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "UpdateIkePolicy") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/merge-patch+json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + _, err = builder.SetBodyContentJSON(updateIkePolicyOptions.IkePolicyPatch) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalIkePolicy) + if err != nil { + return + } + response.Result = result + + return +} + +// ListIkePolicyConnections : List all VPN gateway connections that use a specified IKE policy +// This request lists all VPN gateway connections that use a policy. +func (vpcClassic *VpcClassicV1) ListIkePolicyConnections(listIkePolicyConnectionsOptions *ListIkePolicyConnectionsOptions) (result *VPNGatewayConnectionCollection, response *core.DetailedResponse, err error) { + return vpcClassic.ListIkePolicyConnectionsWithContext(context.Background(), listIkePolicyConnectionsOptions) +} + +// ListIkePolicyConnectionsWithContext is an alternate form of the ListIkePolicyConnections method which supports a Context parameter +func (vpcClassic *VpcClassicV1) ListIkePolicyConnectionsWithContext(ctx context.Context, listIkePolicyConnectionsOptions *ListIkePolicyConnectionsOptions) (result *VPNGatewayConnectionCollection, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(listIkePolicyConnectionsOptions, "listIkePolicyConnectionsOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(listIkePolicyConnectionsOptions, "listIkePolicyConnectionsOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *listIkePolicyConnectionsOptions.ID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/ike_policies/{id}/connections`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range listIkePolicyConnectionsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "ListIkePolicyConnections") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVPNGatewayConnectionCollection) + if err != nil { + return + } + response.Result = result + + return +} + +// ListIpsecPolicies : List all IPsec policies +// This request lists all IPsec policies in the region. +func (vpcClassic *VpcClassicV1) ListIpsecPolicies(listIpsecPoliciesOptions *ListIpsecPoliciesOptions) (result *IPsecPolicyCollection, response *core.DetailedResponse, err error) { + return vpcClassic.ListIpsecPoliciesWithContext(context.Background(), listIpsecPoliciesOptions) +} + +// ListIpsecPoliciesWithContext is an alternate form of the ListIpsecPolicies method which supports a Context parameter +func (vpcClassic *VpcClassicV1) ListIpsecPoliciesWithContext(ctx context.Context, listIpsecPoliciesOptions *ListIpsecPoliciesOptions) (result *IPsecPolicyCollection, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listIpsecPoliciesOptions, "listIpsecPoliciesOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/ipsec_policies`, nil) + if err != nil { + return + } + + for headerName, headerValue := range listIpsecPoliciesOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "ListIpsecPolicies") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + if listIpsecPoliciesOptions.Start != nil { + builder.AddQuery("start", fmt.Sprint(*listIpsecPoliciesOptions.Start)) + } + if listIpsecPoliciesOptions.Limit != nil { + builder.AddQuery("limit", fmt.Sprint(*listIpsecPoliciesOptions.Limit)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalIPsecPolicyCollection) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateIpsecPolicy : Create an IPsec policy +// This request creates a new IPsec policy. +func (vpcClassic *VpcClassicV1) CreateIpsecPolicy(createIpsecPolicyOptions *CreateIpsecPolicyOptions) (result *IPsecPolicy, response *core.DetailedResponse, err error) { + return vpcClassic.CreateIpsecPolicyWithContext(context.Background(), createIpsecPolicyOptions) +} + +// CreateIpsecPolicyWithContext is an alternate form of the CreateIpsecPolicy method which supports a Context parameter +func (vpcClassic *VpcClassicV1) CreateIpsecPolicyWithContext(ctx context.Context, createIpsecPolicyOptions *CreateIpsecPolicyOptions) (result *IPsecPolicy, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createIpsecPolicyOptions, "createIpsecPolicyOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createIpsecPolicyOptions, "createIpsecPolicyOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/ipsec_policies`, nil) + if err != nil { + return + } + + for headerName, headerValue := range createIpsecPolicyOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "CreateIpsecPolicy") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + body := make(map[string]interface{}) + if createIpsecPolicyOptions.AuthenticationAlgorithm != nil { + body["authentication_algorithm"] = createIpsecPolicyOptions.AuthenticationAlgorithm + } + if createIpsecPolicyOptions.EncryptionAlgorithm != nil { + body["encryption_algorithm"] = createIpsecPolicyOptions.EncryptionAlgorithm + } + if createIpsecPolicyOptions.Pfs != nil { + body["pfs"] = createIpsecPolicyOptions.Pfs + } + if createIpsecPolicyOptions.KeyLifetime != nil { + body["key_lifetime"] = createIpsecPolicyOptions.KeyLifetime + } + if createIpsecPolicyOptions.Name != nil { + body["name"] = createIpsecPolicyOptions.Name + } + if createIpsecPolicyOptions.ResourceGroup != nil { + body["resource_group"] = createIpsecPolicyOptions.ResourceGroup + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalIPsecPolicy) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteIpsecPolicy : Delete an IPsec policy +// This request deletes an IPsec policy. This operation cannot be reversed. +func (vpcClassic *VpcClassicV1) DeleteIpsecPolicy(deleteIpsecPolicyOptions *DeleteIpsecPolicyOptions) (response *core.DetailedResponse, err error) { + return vpcClassic.DeleteIpsecPolicyWithContext(context.Background(), deleteIpsecPolicyOptions) +} + +// DeleteIpsecPolicyWithContext is an alternate form of the DeleteIpsecPolicy method which supports a Context parameter +func (vpcClassic *VpcClassicV1) DeleteIpsecPolicyWithContext(ctx context.Context, deleteIpsecPolicyOptions *DeleteIpsecPolicyOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteIpsecPolicyOptions, "deleteIpsecPolicyOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteIpsecPolicyOptions, "deleteIpsecPolicyOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *deleteIpsecPolicyOptions.ID, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/ipsec_policies/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteIpsecPolicyOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "DeleteIpsecPolicy") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + response, err = vpcClassic.Service.Request(request, nil) + + return +} + +// GetIpsecPolicy : Retrieve an IPsec policy +// This request retrieves a single IPsec policy specified by the identifier in the URL. +func (vpcClassic *VpcClassicV1) GetIpsecPolicy(getIpsecPolicyOptions *GetIpsecPolicyOptions) (result *IPsecPolicy, response *core.DetailedResponse, err error) { + return vpcClassic.GetIpsecPolicyWithContext(context.Background(), getIpsecPolicyOptions) +} + +// GetIpsecPolicyWithContext is an alternate form of the GetIpsecPolicy method which supports a Context parameter +func (vpcClassic *VpcClassicV1) GetIpsecPolicyWithContext(ctx context.Context, getIpsecPolicyOptions *GetIpsecPolicyOptions) (result *IPsecPolicy, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getIpsecPolicyOptions, "getIpsecPolicyOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getIpsecPolicyOptions, "getIpsecPolicyOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *getIpsecPolicyOptions.ID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/ipsec_policies/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getIpsecPolicyOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "GetIpsecPolicy") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalIPsecPolicy) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateIpsecPolicy : Update an IPsec policy +// This request updates the properties of an existing IPsec policy. +func (vpcClassic *VpcClassicV1) UpdateIpsecPolicy(updateIpsecPolicyOptions *UpdateIpsecPolicyOptions) (result *IPsecPolicy, response *core.DetailedResponse, err error) { + return vpcClassic.UpdateIpsecPolicyWithContext(context.Background(), updateIpsecPolicyOptions) +} + +// UpdateIpsecPolicyWithContext is an alternate form of the UpdateIpsecPolicy method which supports a Context parameter +func (vpcClassic *VpcClassicV1) UpdateIpsecPolicyWithContext(ctx context.Context, updateIpsecPolicyOptions *UpdateIpsecPolicyOptions) (result *IPsecPolicy, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateIpsecPolicyOptions, "updateIpsecPolicyOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateIpsecPolicyOptions, "updateIpsecPolicyOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *updateIpsecPolicyOptions.ID, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/ipsec_policies/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateIpsecPolicyOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "UpdateIpsecPolicy") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/merge-patch+json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + _, err = builder.SetBodyContentJSON(updateIpsecPolicyOptions.IPsecPolicyPatch) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalIPsecPolicy) + if err != nil { + return + } + response.Result = result + + return +} + +// ListIpsecPolicyConnections : List all VPN gateway connections that use a specified IPsec policy +// This request lists all VPN gateway connections that use a policy. +func (vpcClassic *VpcClassicV1) ListIpsecPolicyConnections(listIpsecPolicyConnectionsOptions *ListIpsecPolicyConnectionsOptions) (result *VPNGatewayConnectionCollection, response *core.DetailedResponse, err error) { + return vpcClassic.ListIpsecPolicyConnectionsWithContext(context.Background(), listIpsecPolicyConnectionsOptions) +} + +// ListIpsecPolicyConnectionsWithContext is an alternate form of the ListIpsecPolicyConnections method which supports a Context parameter +func (vpcClassic *VpcClassicV1) ListIpsecPolicyConnectionsWithContext(ctx context.Context, listIpsecPolicyConnectionsOptions *ListIpsecPolicyConnectionsOptions) (result *VPNGatewayConnectionCollection, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(listIpsecPolicyConnectionsOptions, "listIpsecPolicyConnectionsOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(listIpsecPolicyConnectionsOptions, "listIpsecPolicyConnectionsOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *listIpsecPolicyConnectionsOptions.ID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/ipsec_policies/{id}/connections`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range listIpsecPolicyConnectionsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "ListIpsecPolicyConnections") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVPNGatewayConnectionCollection) + if err != nil { + return + } + response.Result = result + + return +} + +// ListVPNGateways : List all VPN gateways +// This request lists all VPN gateways in the region. +func (vpcClassic *VpcClassicV1) ListVPNGateways(listVPNGatewaysOptions *ListVPNGatewaysOptions) (result *VPNGatewayCollection, response *core.DetailedResponse, err error) { + return vpcClassic.ListVPNGatewaysWithContext(context.Background(), listVPNGatewaysOptions) +} + +// ListVPNGatewaysWithContext is an alternate form of the ListVPNGateways method which supports a Context parameter +func (vpcClassic *VpcClassicV1) ListVPNGatewaysWithContext(ctx context.Context, listVPNGatewaysOptions *ListVPNGatewaysOptions) (result *VPNGatewayCollection, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listVPNGatewaysOptions, "listVPNGatewaysOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/vpn_gateways`, nil) + if err != nil { + return + } + + for headerName, headerValue := range listVPNGatewaysOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "ListVPNGateways") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + if listVPNGatewaysOptions.Start != nil { + builder.AddQuery("start", fmt.Sprint(*listVPNGatewaysOptions.Start)) + } + if listVPNGatewaysOptions.Limit != nil { + builder.AddQuery("limit", fmt.Sprint(*listVPNGatewaysOptions.Limit)) + } + if listVPNGatewaysOptions.ResourceGroupID != nil { + builder.AddQuery("resource_group.id", fmt.Sprint(*listVPNGatewaysOptions.ResourceGroupID)) + } + if listVPNGatewaysOptions.Mode != nil { + builder.AddQuery("mode", fmt.Sprint(*listVPNGatewaysOptions.Mode)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVPNGatewayCollection) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateVPNGateway : Create a VPN gateway +// This request creates a new VPN gateway. +func (vpcClassic *VpcClassicV1) CreateVPNGateway(createVPNGatewayOptions *CreateVPNGatewayOptions) (result VPNGatewayIntf, response *core.DetailedResponse, err error) { + return vpcClassic.CreateVPNGatewayWithContext(context.Background(), createVPNGatewayOptions) +} + +// CreateVPNGatewayWithContext is an alternate form of the CreateVPNGateway method which supports a Context parameter +func (vpcClassic *VpcClassicV1) CreateVPNGatewayWithContext(ctx context.Context, createVPNGatewayOptions *CreateVPNGatewayOptions) (result VPNGatewayIntf, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createVPNGatewayOptions, "createVPNGatewayOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createVPNGatewayOptions, "createVPNGatewayOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/vpn_gateways`, nil) + if err != nil { + return + } + + for headerName, headerValue := range createVPNGatewayOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "CreateVPNGateway") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + _, err = builder.SetBodyContentJSON(createVPNGatewayOptions.VPNGatewayPrototype) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVPNGateway) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteVPNGateway : Delete a VPN gateway +// This request deletes a VPN gateway. A VPN gateway with a `status` of `pending` cannot be deleted. This operation +// deletes all VPN gateway connections associated with this VPN gateway. This operation cannot be reversed. +func (vpcClassic *VpcClassicV1) DeleteVPNGateway(deleteVPNGatewayOptions *DeleteVPNGatewayOptions) (response *core.DetailedResponse, err error) { + return vpcClassic.DeleteVPNGatewayWithContext(context.Background(), deleteVPNGatewayOptions) +} + +// DeleteVPNGatewayWithContext is an alternate form of the DeleteVPNGateway method which supports a Context parameter +func (vpcClassic *VpcClassicV1) DeleteVPNGatewayWithContext(ctx context.Context, deleteVPNGatewayOptions *DeleteVPNGatewayOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteVPNGatewayOptions, "deleteVPNGatewayOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteVPNGatewayOptions, "deleteVPNGatewayOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *deleteVPNGatewayOptions.ID, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/vpn_gateways/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteVPNGatewayOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "DeleteVPNGateway") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + response, err = vpcClassic.Service.Request(request, nil) + + return +} + +// GetVPNGateway : Retrieve a VPN gateway +// This request retrieves a single VPN gateway specified by the identifier in the URL. +func (vpcClassic *VpcClassicV1) GetVPNGateway(getVPNGatewayOptions *GetVPNGatewayOptions) (result VPNGatewayIntf, response *core.DetailedResponse, err error) { + return vpcClassic.GetVPNGatewayWithContext(context.Background(), getVPNGatewayOptions) +} + +// GetVPNGatewayWithContext is an alternate form of the GetVPNGateway method which supports a Context parameter +func (vpcClassic *VpcClassicV1) GetVPNGatewayWithContext(ctx context.Context, getVPNGatewayOptions *GetVPNGatewayOptions) (result VPNGatewayIntf, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getVPNGatewayOptions, "getVPNGatewayOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getVPNGatewayOptions, "getVPNGatewayOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *getVPNGatewayOptions.ID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/vpn_gateways/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getVPNGatewayOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "GetVPNGateway") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVPNGateway) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateVPNGateway : Update a VPN gateway +// This request updates the properties of an existing VPN gateway. +func (vpcClassic *VpcClassicV1) UpdateVPNGateway(updateVPNGatewayOptions *UpdateVPNGatewayOptions) (result VPNGatewayIntf, response *core.DetailedResponse, err error) { + return vpcClassic.UpdateVPNGatewayWithContext(context.Background(), updateVPNGatewayOptions) +} + +// UpdateVPNGatewayWithContext is an alternate form of the UpdateVPNGateway method which supports a Context parameter +func (vpcClassic *VpcClassicV1) UpdateVPNGatewayWithContext(ctx context.Context, updateVPNGatewayOptions *UpdateVPNGatewayOptions) (result VPNGatewayIntf, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateVPNGatewayOptions, "updateVPNGatewayOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateVPNGatewayOptions, "updateVPNGatewayOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *updateVPNGatewayOptions.ID, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/vpn_gateways/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateVPNGatewayOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "UpdateVPNGateway") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/merge-patch+json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + _, err = builder.SetBodyContentJSON(updateVPNGatewayOptions.VPNGatewayPatch) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVPNGateway) + if err != nil { + return + } + response.Result = result + + return +} + +// ListVPNGatewayConnections : List all connections of a VPN gateway +// This request lists all connections of a VPN gateway. +func (vpcClassic *VpcClassicV1) ListVPNGatewayConnections(listVPNGatewayConnectionsOptions *ListVPNGatewayConnectionsOptions) (result *VPNGatewayConnectionCollection, response *core.DetailedResponse, err error) { + return vpcClassic.ListVPNGatewayConnectionsWithContext(context.Background(), listVPNGatewayConnectionsOptions) +} + +// ListVPNGatewayConnectionsWithContext is an alternate form of the ListVPNGatewayConnections method which supports a Context parameter +func (vpcClassic *VpcClassicV1) ListVPNGatewayConnectionsWithContext(ctx context.Context, listVPNGatewayConnectionsOptions *ListVPNGatewayConnectionsOptions) (result *VPNGatewayConnectionCollection, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(listVPNGatewayConnectionsOptions, "listVPNGatewayConnectionsOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(listVPNGatewayConnectionsOptions, "listVPNGatewayConnectionsOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "vpn_gateway_id": *listVPNGatewayConnectionsOptions.VPNGatewayID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/vpn_gateways/{vpn_gateway_id}/connections`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range listVPNGatewayConnectionsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "ListVPNGatewayConnections") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + if listVPNGatewayConnectionsOptions.Status != nil { + builder.AddQuery("status", fmt.Sprint(*listVPNGatewayConnectionsOptions.Status)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVPNGatewayConnectionCollection) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateVPNGatewayConnection : Create a connection for a VPN gateway +// This request creates a new VPN gateway connection. +func (vpcClassic *VpcClassicV1) CreateVPNGatewayConnection(createVPNGatewayConnectionOptions *CreateVPNGatewayConnectionOptions) (result VPNGatewayConnectionIntf, response *core.DetailedResponse, err error) { + return vpcClassic.CreateVPNGatewayConnectionWithContext(context.Background(), createVPNGatewayConnectionOptions) +} + +// CreateVPNGatewayConnectionWithContext is an alternate form of the CreateVPNGatewayConnection method which supports a Context parameter +func (vpcClassic *VpcClassicV1) CreateVPNGatewayConnectionWithContext(ctx context.Context, createVPNGatewayConnectionOptions *CreateVPNGatewayConnectionOptions) (result VPNGatewayConnectionIntf, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createVPNGatewayConnectionOptions, "createVPNGatewayConnectionOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createVPNGatewayConnectionOptions, "createVPNGatewayConnectionOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "vpn_gateway_id": *createVPNGatewayConnectionOptions.VPNGatewayID, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/vpn_gateways/{vpn_gateway_id}/connections`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range createVPNGatewayConnectionOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "CreateVPNGatewayConnection") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + _, err = builder.SetBodyContentJSON(createVPNGatewayConnectionOptions.VPNGatewayConnectionPrototype) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVPNGatewayConnection) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteVPNGatewayConnection : Delete a VPN gateway connection +// This request deletes a VPN gateway connection. This operation cannot be reversed. +func (vpcClassic *VpcClassicV1) DeleteVPNGatewayConnection(deleteVPNGatewayConnectionOptions *DeleteVPNGatewayConnectionOptions) (response *core.DetailedResponse, err error) { + return vpcClassic.DeleteVPNGatewayConnectionWithContext(context.Background(), deleteVPNGatewayConnectionOptions) +} + +// DeleteVPNGatewayConnectionWithContext is an alternate form of the DeleteVPNGatewayConnection method which supports a Context parameter +func (vpcClassic *VpcClassicV1) DeleteVPNGatewayConnectionWithContext(ctx context.Context, deleteVPNGatewayConnectionOptions *DeleteVPNGatewayConnectionOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteVPNGatewayConnectionOptions, "deleteVPNGatewayConnectionOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteVPNGatewayConnectionOptions, "deleteVPNGatewayConnectionOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "vpn_gateway_id": *deleteVPNGatewayConnectionOptions.VPNGatewayID, + "id": *deleteVPNGatewayConnectionOptions.ID, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/vpn_gateways/{vpn_gateway_id}/connections/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteVPNGatewayConnectionOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "DeleteVPNGatewayConnection") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + response, err = vpcClassic.Service.Request(request, nil) + + return +} + +// GetVPNGatewayConnection : Retrieve a VPN gateway connection +// This request retrieves a single VPN gateway connection specified by the identifier in the URL. +func (vpcClassic *VpcClassicV1) GetVPNGatewayConnection(getVPNGatewayConnectionOptions *GetVPNGatewayConnectionOptions) (result VPNGatewayConnectionIntf, response *core.DetailedResponse, err error) { + return vpcClassic.GetVPNGatewayConnectionWithContext(context.Background(), getVPNGatewayConnectionOptions) +} + +// GetVPNGatewayConnectionWithContext is an alternate form of the GetVPNGatewayConnection method which supports a Context parameter +func (vpcClassic *VpcClassicV1) GetVPNGatewayConnectionWithContext(ctx context.Context, getVPNGatewayConnectionOptions *GetVPNGatewayConnectionOptions) (result VPNGatewayConnectionIntf, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getVPNGatewayConnectionOptions, "getVPNGatewayConnectionOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getVPNGatewayConnectionOptions, "getVPNGatewayConnectionOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "vpn_gateway_id": *getVPNGatewayConnectionOptions.VPNGatewayID, + "id": *getVPNGatewayConnectionOptions.ID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/vpn_gateways/{vpn_gateway_id}/connections/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getVPNGatewayConnectionOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "GetVPNGatewayConnection") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVPNGatewayConnection) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateVPNGatewayConnection : Update a VPN gateway connection +// This request updates the properties of an existing VPN gateway connection. +func (vpcClassic *VpcClassicV1) UpdateVPNGatewayConnection(updateVPNGatewayConnectionOptions *UpdateVPNGatewayConnectionOptions) (result VPNGatewayConnectionIntf, response *core.DetailedResponse, err error) { + return vpcClassic.UpdateVPNGatewayConnectionWithContext(context.Background(), updateVPNGatewayConnectionOptions) +} + +// UpdateVPNGatewayConnectionWithContext is an alternate form of the UpdateVPNGatewayConnection method which supports a Context parameter +func (vpcClassic *VpcClassicV1) UpdateVPNGatewayConnectionWithContext(ctx context.Context, updateVPNGatewayConnectionOptions *UpdateVPNGatewayConnectionOptions) (result VPNGatewayConnectionIntf, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateVPNGatewayConnectionOptions, "updateVPNGatewayConnectionOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateVPNGatewayConnectionOptions, "updateVPNGatewayConnectionOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "vpn_gateway_id": *updateVPNGatewayConnectionOptions.VPNGatewayID, + "id": *updateVPNGatewayConnectionOptions.ID, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/vpn_gateways/{vpn_gateway_id}/connections/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateVPNGatewayConnectionOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "UpdateVPNGatewayConnection") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/merge-patch+json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + _, err = builder.SetBodyContentJSON(updateVPNGatewayConnectionOptions.VPNGatewayConnectionPatch) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVPNGatewayConnection) + if err != nil { + return + } + response.Result = result + + return +} + +// ListVPNGatewayConnectionLocalCIDRs : List all local CIDRs for a VPN gateway connection +// This request lists all local CIDRs for a VPN gateway connection. +func (vpcClassic *VpcClassicV1) ListVPNGatewayConnectionLocalCIDRs(listVPNGatewayConnectionLocalCIDRsOptions *ListVPNGatewayConnectionLocalCIDRsOptions) (result *VPNGatewayConnectionLocalCIDRs, response *core.DetailedResponse, err error) { + return vpcClassic.ListVPNGatewayConnectionLocalCIDRsWithContext(context.Background(), listVPNGatewayConnectionLocalCIDRsOptions) +} + +// ListVPNGatewayConnectionLocalCIDRsWithContext is an alternate form of the ListVPNGatewayConnectionLocalCIDRs method which supports a Context parameter +func (vpcClassic *VpcClassicV1) ListVPNGatewayConnectionLocalCIDRsWithContext(ctx context.Context, listVPNGatewayConnectionLocalCIDRsOptions *ListVPNGatewayConnectionLocalCIDRsOptions) (result *VPNGatewayConnectionLocalCIDRs, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(listVPNGatewayConnectionLocalCIDRsOptions, "listVPNGatewayConnectionLocalCIDRsOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(listVPNGatewayConnectionLocalCIDRsOptions, "listVPNGatewayConnectionLocalCIDRsOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "vpn_gateway_id": *listVPNGatewayConnectionLocalCIDRsOptions.VPNGatewayID, + "id": *listVPNGatewayConnectionLocalCIDRsOptions.ID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/vpn_gateways/{vpn_gateway_id}/connections/{id}/local_cidrs`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range listVPNGatewayConnectionLocalCIDRsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "ListVPNGatewayConnectionLocalCIDRs") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVPNGatewayConnectionLocalCIDRs) + if err != nil { + return + } + response.Result = result + + return +} + +// RemoveVPNGatewayConnectionLocalCIDR : Remove a local CIDR from a VPN gateway connection +// This request removes a CIDR from a VPN gateway connection. +func (vpcClassic *VpcClassicV1) RemoveVPNGatewayConnectionLocalCIDR(removeVPNGatewayConnectionLocalCIDROptions *RemoveVPNGatewayConnectionLocalCIDROptions) (response *core.DetailedResponse, err error) { + return vpcClassic.RemoveVPNGatewayConnectionLocalCIDRWithContext(context.Background(), removeVPNGatewayConnectionLocalCIDROptions) +} + +// RemoveVPNGatewayConnectionLocalCIDRWithContext is an alternate form of the RemoveVPNGatewayConnectionLocalCIDR method which supports a Context parameter +func (vpcClassic *VpcClassicV1) RemoveVPNGatewayConnectionLocalCIDRWithContext(ctx context.Context, removeVPNGatewayConnectionLocalCIDROptions *RemoveVPNGatewayConnectionLocalCIDROptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(removeVPNGatewayConnectionLocalCIDROptions, "removeVPNGatewayConnectionLocalCIDROptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(removeVPNGatewayConnectionLocalCIDROptions, "removeVPNGatewayConnectionLocalCIDROptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "vpn_gateway_id": *removeVPNGatewayConnectionLocalCIDROptions.VPNGatewayID, + "id": *removeVPNGatewayConnectionLocalCIDROptions.ID, + "cidr_prefix": *removeVPNGatewayConnectionLocalCIDROptions.CIDRPrefix, + "prefix_length": *removeVPNGatewayConnectionLocalCIDROptions.PrefixLength, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/vpn_gateways/{vpn_gateway_id}/connections/{id}/local_cidrs/{cidr_prefix}/{prefix_length}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range removeVPNGatewayConnectionLocalCIDROptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "RemoveVPNGatewayConnectionLocalCIDR") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + response, err = vpcClassic.Service.Request(request, nil) + + return +} + +// CheckVPNGatewayConnectionLocalCIDR : Check if the specified local CIDR exists on a VPN gateway connection +// This request succeeds if a CIDR exists on the specified VPN gateway connection and fails otherwise. +func (vpcClassic *VpcClassicV1) CheckVPNGatewayConnectionLocalCIDR(checkVPNGatewayConnectionLocalCIDROptions *CheckVPNGatewayConnectionLocalCIDROptions) (response *core.DetailedResponse, err error) { + return vpcClassic.CheckVPNGatewayConnectionLocalCIDRWithContext(context.Background(), checkVPNGatewayConnectionLocalCIDROptions) +} + +// CheckVPNGatewayConnectionLocalCIDRWithContext is an alternate form of the CheckVPNGatewayConnectionLocalCIDR method which supports a Context parameter +func (vpcClassic *VpcClassicV1) CheckVPNGatewayConnectionLocalCIDRWithContext(ctx context.Context, checkVPNGatewayConnectionLocalCIDROptions *CheckVPNGatewayConnectionLocalCIDROptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(checkVPNGatewayConnectionLocalCIDROptions, "checkVPNGatewayConnectionLocalCIDROptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(checkVPNGatewayConnectionLocalCIDROptions, "checkVPNGatewayConnectionLocalCIDROptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "vpn_gateway_id": *checkVPNGatewayConnectionLocalCIDROptions.VPNGatewayID, + "id": *checkVPNGatewayConnectionLocalCIDROptions.ID, + "cidr_prefix": *checkVPNGatewayConnectionLocalCIDROptions.CIDRPrefix, + "prefix_length": *checkVPNGatewayConnectionLocalCIDROptions.PrefixLength, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/vpn_gateways/{vpn_gateway_id}/connections/{id}/local_cidrs/{cidr_prefix}/{prefix_length}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range checkVPNGatewayConnectionLocalCIDROptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "CheckVPNGatewayConnectionLocalCIDR") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + response, err = vpcClassic.Service.Request(request, nil) + + return +} + +// AddVPNGatewayConnectionLocalCIDR : Set a local CIDR on a VPN gateway connection +// This request adds the specified CIDR to the specified VPN gateway connection. A request body is not required, and if +// supplied, is ignored. This request succeeds if the CIDR already exists on the specified VPN gateway connection. +func (vpcClassic *VpcClassicV1) AddVPNGatewayConnectionLocalCIDR(addVPNGatewayConnectionLocalCIDROptions *AddVPNGatewayConnectionLocalCIDROptions) (response *core.DetailedResponse, err error) { + return vpcClassic.AddVPNGatewayConnectionLocalCIDRWithContext(context.Background(), addVPNGatewayConnectionLocalCIDROptions) +} + +// AddVPNGatewayConnectionLocalCIDRWithContext is an alternate form of the AddVPNGatewayConnectionLocalCIDR method which supports a Context parameter +func (vpcClassic *VpcClassicV1) AddVPNGatewayConnectionLocalCIDRWithContext(ctx context.Context, addVPNGatewayConnectionLocalCIDROptions *AddVPNGatewayConnectionLocalCIDROptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(addVPNGatewayConnectionLocalCIDROptions, "addVPNGatewayConnectionLocalCIDROptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(addVPNGatewayConnectionLocalCIDROptions, "addVPNGatewayConnectionLocalCIDROptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "vpn_gateway_id": *addVPNGatewayConnectionLocalCIDROptions.VPNGatewayID, + "id": *addVPNGatewayConnectionLocalCIDROptions.ID, + "cidr_prefix": *addVPNGatewayConnectionLocalCIDROptions.CIDRPrefix, + "prefix_length": *addVPNGatewayConnectionLocalCIDROptions.PrefixLength, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/vpn_gateways/{vpn_gateway_id}/connections/{id}/local_cidrs/{cidr_prefix}/{prefix_length}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range addVPNGatewayConnectionLocalCIDROptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "AddVPNGatewayConnectionLocalCIDR") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + response, err = vpcClassic.Service.Request(request, nil) + + return +} + +// ListVPNGatewayConnectionPeerCIDRs : List all peer CIDRs for a VPN gateway connection +// This request lists all peer CIDRs for a VPN gateway connection. +func (vpcClassic *VpcClassicV1) ListVPNGatewayConnectionPeerCIDRs(listVPNGatewayConnectionPeerCIDRsOptions *ListVPNGatewayConnectionPeerCIDRsOptions) (result *VPNGatewayConnectionPeerCIDRs, response *core.DetailedResponse, err error) { + return vpcClassic.ListVPNGatewayConnectionPeerCIDRsWithContext(context.Background(), listVPNGatewayConnectionPeerCIDRsOptions) +} + +// ListVPNGatewayConnectionPeerCIDRsWithContext is an alternate form of the ListVPNGatewayConnectionPeerCIDRs method which supports a Context parameter +func (vpcClassic *VpcClassicV1) ListVPNGatewayConnectionPeerCIDRsWithContext(ctx context.Context, listVPNGatewayConnectionPeerCIDRsOptions *ListVPNGatewayConnectionPeerCIDRsOptions) (result *VPNGatewayConnectionPeerCIDRs, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(listVPNGatewayConnectionPeerCIDRsOptions, "listVPNGatewayConnectionPeerCIDRsOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(listVPNGatewayConnectionPeerCIDRsOptions, "listVPNGatewayConnectionPeerCIDRsOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "vpn_gateway_id": *listVPNGatewayConnectionPeerCIDRsOptions.VPNGatewayID, + "id": *listVPNGatewayConnectionPeerCIDRsOptions.ID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/vpn_gateways/{vpn_gateway_id}/connections/{id}/peer_cidrs`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range listVPNGatewayConnectionPeerCIDRsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "ListVPNGatewayConnectionPeerCIDRs") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVPNGatewayConnectionPeerCIDRs) + if err != nil { + return + } + response.Result = result + + return +} + +// RemoveVPNGatewayConnectionPeerCIDR : Remove a peer CIDR from a VPN gateway connection +// This request removes a CIDR from a VPN gateway connection. +func (vpcClassic *VpcClassicV1) RemoveVPNGatewayConnectionPeerCIDR(removeVPNGatewayConnectionPeerCIDROptions *RemoveVPNGatewayConnectionPeerCIDROptions) (response *core.DetailedResponse, err error) { + return vpcClassic.RemoveVPNGatewayConnectionPeerCIDRWithContext(context.Background(), removeVPNGatewayConnectionPeerCIDROptions) +} + +// RemoveVPNGatewayConnectionPeerCIDRWithContext is an alternate form of the RemoveVPNGatewayConnectionPeerCIDR method which supports a Context parameter +func (vpcClassic *VpcClassicV1) RemoveVPNGatewayConnectionPeerCIDRWithContext(ctx context.Context, removeVPNGatewayConnectionPeerCIDROptions *RemoveVPNGatewayConnectionPeerCIDROptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(removeVPNGatewayConnectionPeerCIDROptions, "removeVPNGatewayConnectionPeerCIDROptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(removeVPNGatewayConnectionPeerCIDROptions, "removeVPNGatewayConnectionPeerCIDROptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "vpn_gateway_id": *removeVPNGatewayConnectionPeerCIDROptions.VPNGatewayID, + "id": *removeVPNGatewayConnectionPeerCIDROptions.ID, + "cidr_prefix": *removeVPNGatewayConnectionPeerCIDROptions.CIDRPrefix, + "prefix_length": *removeVPNGatewayConnectionPeerCIDROptions.PrefixLength, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/vpn_gateways/{vpn_gateway_id}/connections/{id}/peer_cidrs/{cidr_prefix}/{prefix_length}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range removeVPNGatewayConnectionPeerCIDROptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "RemoveVPNGatewayConnectionPeerCIDR") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + response, err = vpcClassic.Service.Request(request, nil) + + return +} + +// CheckVPNGatewayConnectionPeerCIDR : Check if the specified peer CIDR exists on a VPN gateway connection +// This request succeeds if a CIDR exists on the specified VPN gateway connection and fails otherwise. +func (vpcClassic *VpcClassicV1) CheckVPNGatewayConnectionPeerCIDR(checkVPNGatewayConnectionPeerCIDROptions *CheckVPNGatewayConnectionPeerCIDROptions) (response *core.DetailedResponse, err error) { + return vpcClassic.CheckVPNGatewayConnectionPeerCIDRWithContext(context.Background(), checkVPNGatewayConnectionPeerCIDROptions) +} + +// CheckVPNGatewayConnectionPeerCIDRWithContext is an alternate form of the CheckVPNGatewayConnectionPeerCIDR method which supports a Context parameter +func (vpcClassic *VpcClassicV1) CheckVPNGatewayConnectionPeerCIDRWithContext(ctx context.Context, checkVPNGatewayConnectionPeerCIDROptions *CheckVPNGatewayConnectionPeerCIDROptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(checkVPNGatewayConnectionPeerCIDROptions, "checkVPNGatewayConnectionPeerCIDROptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(checkVPNGatewayConnectionPeerCIDROptions, "checkVPNGatewayConnectionPeerCIDROptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "vpn_gateway_id": *checkVPNGatewayConnectionPeerCIDROptions.VPNGatewayID, + "id": *checkVPNGatewayConnectionPeerCIDROptions.ID, + "cidr_prefix": *checkVPNGatewayConnectionPeerCIDROptions.CIDRPrefix, + "prefix_length": *checkVPNGatewayConnectionPeerCIDROptions.PrefixLength, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/vpn_gateways/{vpn_gateway_id}/connections/{id}/peer_cidrs/{cidr_prefix}/{prefix_length}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range checkVPNGatewayConnectionPeerCIDROptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "CheckVPNGatewayConnectionPeerCIDR") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + response, err = vpcClassic.Service.Request(request, nil) + + return +} + +// AddVPNGatewayConnectionPeerCIDR : Set a peer CIDR on a VPN gateway connection +// This request adds the specified CIDR to the specified VPN gateway connection. A request body is not required, and if +// supplied, is ignored. This request succeeds if the CIDR already exists on the specified VPN gateway connection. +func (vpcClassic *VpcClassicV1) AddVPNGatewayConnectionPeerCIDR(addVPNGatewayConnectionPeerCIDROptions *AddVPNGatewayConnectionPeerCIDROptions) (response *core.DetailedResponse, err error) { + return vpcClassic.AddVPNGatewayConnectionPeerCIDRWithContext(context.Background(), addVPNGatewayConnectionPeerCIDROptions) +} + +// AddVPNGatewayConnectionPeerCIDRWithContext is an alternate form of the AddVPNGatewayConnectionPeerCIDR method which supports a Context parameter +func (vpcClassic *VpcClassicV1) AddVPNGatewayConnectionPeerCIDRWithContext(ctx context.Context, addVPNGatewayConnectionPeerCIDROptions *AddVPNGatewayConnectionPeerCIDROptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(addVPNGatewayConnectionPeerCIDROptions, "addVPNGatewayConnectionPeerCIDROptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(addVPNGatewayConnectionPeerCIDROptions, "addVPNGatewayConnectionPeerCIDROptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "vpn_gateway_id": *addVPNGatewayConnectionPeerCIDROptions.VPNGatewayID, + "id": *addVPNGatewayConnectionPeerCIDROptions.ID, + "cidr_prefix": *addVPNGatewayConnectionPeerCIDROptions.CIDRPrefix, + "prefix_length": *addVPNGatewayConnectionPeerCIDROptions.PrefixLength, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/vpn_gateways/{vpn_gateway_id}/connections/{id}/peer_cidrs/{cidr_prefix}/{prefix_length}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range addVPNGatewayConnectionPeerCIDROptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "AddVPNGatewayConnectionPeerCIDR") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + response, err = vpcClassic.Service.Request(request, nil) + + return +} + +// ListLoadBalancers : List all load balancers +// This request lists all load balancers in the region. +func (vpcClassic *VpcClassicV1) ListLoadBalancers(listLoadBalancersOptions *ListLoadBalancersOptions) (result *LoadBalancerCollection, response *core.DetailedResponse, err error) { + return vpcClassic.ListLoadBalancersWithContext(context.Background(), listLoadBalancersOptions) +} + +// ListLoadBalancersWithContext is an alternate form of the ListLoadBalancers method which supports a Context parameter +func (vpcClassic *VpcClassicV1) ListLoadBalancersWithContext(ctx context.Context, listLoadBalancersOptions *ListLoadBalancersOptions) (result *LoadBalancerCollection, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listLoadBalancersOptions, "listLoadBalancersOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/load_balancers`, nil) + if err != nil { + return + } + + for headerName, headerValue := range listLoadBalancersOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "ListLoadBalancers") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerCollection) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateLoadBalancer : Create a load balancer +// This request creates and provisions a new load balancer. +func (vpcClassic *VpcClassicV1) CreateLoadBalancer(createLoadBalancerOptions *CreateLoadBalancerOptions) (result *LoadBalancer, response *core.DetailedResponse, err error) { + return vpcClassic.CreateLoadBalancerWithContext(context.Background(), createLoadBalancerOptions) +} + +// CreateLoadBalancerWithContext is an alternate form of the CreateLoadBalancer method which supports a Context parameter +func (vpcClassic *VpcClassicV1) CreateLoadBalancerWithContext(ctx context.Context, createLoadBalancerOptions *CreateLoadBalancerOptions) (result *LoadBalancer, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createLoadBalancerOptions, "createLoadBalancerOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createLoadBalancerOptions, "createLoadBalancerOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/load_balancers`, nil) + if err != nil { + return + } + + for headerName, headerValue := range createLoadBalancerOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "CreateLoadBalancer") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + body := make(map[string]interface{}) + if createLoadBalancerOptions.IsPublic != nil { + body["is_public"] = createLoadBalancerOptions.IsPublic + } + if createLoadBalancerOptions.Subnets != nil { + body["subnets"] = createLoadBalancerOptions.Subnets + } + if createLoadBalancerOptions.Listeners != nil { + body["listeners"] = createLoadBalancerOptions.Listeners + } + if createLoadBalancerOptions.Logging != nil { + body["logging"] = createLoadBalancerOptions.Logging + } + if createLoadBalancerOptions.Name != nil { + body["name"] = createLoadBalancerOptions.Name + } + if createLoadBalancerOptions.Pools != nil { + body["pools"] = createLoadBalancerOptions.Pools + } + if createLoadBalancerOptions.ResourceGroup != nil { + body["resource_group"] = createLoadBalancerOptions.ResourceGroup + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancer) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteLoadBalancer : Delete a load balancer +// This request deletes a load balancer. This operation cannot be reversed. +func (vpcClassic *VpcClassicV1) DeleteLoadBalancer(deleteLoadBalancerOptions *DeleteLoadBalancerOptions) (response *core.DetailedResponse, err error) { + return vpcClassic.DeleteLoadBalancerWithContext(context.Background(), deleteLoadBalancerOptions) +} + +// DeleteLoadBalancerWithContext is an alternate form of the DeleteLoadBalancer method which supports a Context parameter +func (vpcClassic *VpcClassicV1) DeleteLoadBalancerWithContext(ctx context.Context, deleteLoadBalancerOptions *DeleteLoadBalancerOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteLoadBalancerOptions, "deleteLoadBalancerOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteLoadBalancerOptions, "deleteLoadBalancerOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *deleteLoadBalancerOptions.ID, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/load_balancers/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteLoadBalancerOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "DeleteLoadBalancer") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + response, err = vpcClassic.Service.Request(request, nil) + + return +} + +// GetLoadBalancer : Retrieve a load balancer +// This request retrieves a single load balancer specified by the identifier in the URL path. +func (vpcClassic *VpcClassicV1) GetLoadBalancer(getLoadBalancerOptions *GetLoadBalancerOptions) (result *LoadBalancer, response *core.DetailedResponse, err error) { + return vpcClassic.GetLoadBalancerWithContext(context.Background(), getLoadBalancerOptions) +} + +// GetLoadBalancerWithContext is an alternate form of the GetLoadBalancer method which supports a Context parameter +func (vpcClassic *VpcClassicV1) GetLoadBalancerWithContext(ctx context.Context, getLoadBalancerOptions *GetLoadBalancerOptions) (result *LoadBalancer, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getLoadBalancerOptions, "getLoadBalancerOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getLoadBalancerOptions, "getLoadBalancerOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *getLoadBalancerOptions.ID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/load_balancers/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getLoadBalancerOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "GetLoadBalancer") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancer) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateLoadBalancer : Update a load balancer +// This request updates a load balancer. +func (vpcClassic *VpcClassicV1) UpdateLoadBalancer(updateLoadBalancerOptions *UpdateLoadBalancerOptions) (result *LoadBalancer, response *core.DetailedResponse, err error) { + return vpcClassic.UpdateLoadBalancerWithContext(context.Background(), updateLoadBalancerOptions) +} + +// UpdateLoadBalancerWithContext is an alternate form of the UpdateLoadBalancer method which supports a Context parameter +func (vpcClassic *VpcClassicV1) UpdateLoadBalancerWithContext(ctx context.Context, updateLoadBalancerOptions *UpdateLoadBalancerOptions) (result *LoadBalancer, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateLoadBalancerOptions, "updateLoadBalancerOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateLoadBalancerOptions, "updateLoadBalancerOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *updateLoadBalancerOptions.ID, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/load_balancers/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateLoadBalancerOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "UpdateLoadBalancer") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/merge-patch+json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + _, err = builder.SetBodyContentJSON(updateLoadBalancerOptions.LoadBalancerPatch) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancer) + if err != nil { + return + } + response.Result = result + + return +} + +// GetLoadBalancerStatistics : List all statistics of a load balancer +// This request lists statistics of a load balancer. +func (vpcClassic *VpcClassicV1) GetLoadBalancerStatistics(getLoadBalancerStatisticsOptions *GetLoadBalancerStatisticsOptions) (result *LoadBalancerStatistics, response *core.DetailedResponse, err error) { + return vpcClassic.GetLoadBalancerStatisticsWithContext(context.Background(), getLoadBalancerStatisticsOptions) +} + +// GetLoadBalancerStatisticsWithContext is an alternate form of the GetLoadBalancerStatistics method which supports a Context parameter +func (vpcClassic *VpcClassicV1) GetLoadBalancerStatisticsWithContext(ctx context.Context, getLoadBalancerStatisticsOptions *GetLoadBalancerStatisticsOptions) (result *LoadBalancerStatistics, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getLoadBalancerStatisticsOptions, "getLoadBalancerStatisticsOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getLoadBalancerStatisticsOptions, "getLoadBalancerStatisticsOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *getLoadBalancerStatisticsOptions.ID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/load_balancers/{id}/statistics`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getLoadBalancerStatisticsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "GetLoadBalancerStatistics") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerStatistics) + if err != nil { + return + } + response.Result = result + + return +} + +// ListLoadBalancerListeners : List all listeners for a load balancer +// This request lists all listeners for a load balancer. +func (vpcClassic *VpcClassicV1) ListLoadBalancerListeners(listLoadBalancerListenersOptions *ListLoadBalancerListenersOptions) (result *LoadBalancerListenerCollection, response *core.DetailedResponse, err error) { + return vpcClassic.ListLoadBalancerListenersWithContext(context.Background(), listLoadBalancerListenersOptions) +} + +// ListLoadBalancerListenersWithContext is an alternate form of the ListLoadBalancerListeners method which supports a Context parameter +func (vpcClassic *VpcClassicV1) ListLoadBalancerListenersWithContext(ctx context.Context, listLoadBalancerListenersOptions *ListLoadBalancerListenersOptions) (result *LoadBalancerListenerCollection, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(listLoadBalancerListenersOptions, "listLoadBalancerListenersOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(listLoadBalancerListenersOptions, "listLoadBalancerListenersOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "load_balancer_id": *listLoadBalancerListenersOptions.LoadBalancerID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/load_balancers/{load_balancer_id}/listeners`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range listLoadBalancerListenersOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "ListLoadBalancerListeners") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerListenerCollection) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateLoadBalancerListener : Create a listener for a load balancer +// This request creates a new listener for a load balancer. +func (vpcClassic *VpcClassicV1) CreateLoadBalancerListener(createLoadBalancerListenerOptions *CreateLoadBalancerListenerOptions) (result *LoadBalancerListener, response *core.DetailedResponse, err error) { + return vpcClassic.CreateLoadBalancerListenerWithContext(context.Background(), createLoadBalancerListenerOptions) +} + +// CreateLoadBalancerListenerWithContext is an alternate form of the CreateLoadBalancerListener method which supports a Context parameter +func (vpcClassic *VpcClassicV1) CreateLoadBalancerListenerWithContext(ctx context.Context, createLoadBalancerListenerOptions *CreateLoadBalancerListenerOptions) (result *LoadBalancerListener, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createLoadBalancerListenerOptions, "createLoadBalancerListenerOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createLoadBalancerListenerOptions, "createLoadBalancerListenerOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "load_balancer_id": *createLoadBalancerListenerOptions.LoadBalancerID, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/load_balancers/{load_balancer_id}/listeners`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range createLoadBalancerListenerOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "CreateLoadBalancerListener") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + body := make(map[string]interface{}) + if createLoadBalancerListenerOptions.Port != nil { + body["port"] = createLoadBalancerListenerOptions.Port + } + if createLoadBalancerListenerOptions.Protocol != nil { + body["protocol"] = createLoadBalancerListenerOptions.Protocol + } + if createLoadBalancerListenerOptions.AcceptProxyProtocol != nil { + body["accept_proxy_protocol"] = createLoadBalancerListenerOptions.AcceptProxyProtocol + } + if createLoadBalancerListenerOptions.CertificateInstance != nil { + body["certificate_instance"] = createLoadBalancerListenerOptions.CertificateInstance + } + if createLoadBalancerListenerOptions.ConnectionLimit != nil { + body["connection_limit"] = createLoadBalancerListenerOptions.ConnectionLimit + } + if createLoadBalancerListenerOptions.DefaultPool != nil { + body["default_pool"] = createLoadBalancerListenerOptions.DefaultPool + } + if createLoadBalancerListenerOptions.Policies != nil { + body["policies"] = createLoadBalancerListenerOptions.Policies + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerListener) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteLoadBalancerListener : Delete a load balancer listener +// This request deletes a load balancer listener. This operation cannot be reversed. +func (vpcClassic *VpcClassicV1) DeleteLoadBalancerListener(deleteLoadBalancerListenerOptions *DeleteLoadBalancerListenerOptions) (response *core.DetailedResponse, err error) { + return vpcClassic.DeleteLoadBalancerListenerWithContext(context.Background(), deleteLoadBalancerListenerOptions) +} + +// DeleteLoadBalancerListenerWithContext is an alternate form of the DeleteLoadBalancerListener method which supports a Context parameter +func (vpcClassic *VpcClassicV1) DeleteLoadBalancerListenerWithContext(ctx context.Context, deleteLoadBalancerListenerOptions *DeleteLoadBalancerListenerOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteLoadBalancerListenerOptions, "deleteLoadBalancerListenerOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteLoadBalancerListenerOptions, "deleteLoadBalancerListenerOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "load_balancer_id": *deleteLoadBalancerListenerOptions.LoadBalancerID, + "id": *deleteLoadBalancerListenerOptions.ID, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/load_balancers/{load_balancer_id}/listeners/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteLoadBalancerListenerOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "DeleteLoadBalancerListener") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + response, err = vpcClassic.Service.Request(request, nil) + + return +} + +// GetLoadBalancerListener : Retrieve a load balancer listener +// This request retrieves a single listener specified by the identifier in the URL path. +func (vpcClassic *VpcClassicV1) GetLoadBalancerListener(getLoadBalancerListenerOptions *GetLoadBalancerListenerOptions) (result *LoadBalancerListener, response *core.DetailedResponse, err error) { + return vpcClassic.GetLoadBalancerListenerWithContext(context.Background(), getLoadBalancerListenerOptions) +} + +// GetLoadBalancerListenerWithContext is an alternate form of the GetLoadBalancerListener method which supports a Context parameter +func (vpcClassic *VpcClassicV1) GetLoadBalancerListenerWithContext(ctx context.Context, getLoadBalancerListenerOptions *GetLoadBalancerListenerOptions) (result *LoadBalancerListener, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getLoadBalancerListenerOptions, "getLoadBalancerListenerOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getLoadBalancerListenerOptions, "getLoadBalancerListenerOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "load_balancer_id": *getLoadBalancerListenerOptions.LoadBalancerID, + "id": *getLoadBalancerListenerOptions.ID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/load_balancers/{load_balancer_id}/listeners/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getLoadBalancerListenerOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "GetLoadBalancerListener") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerListener) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateLoadBalancerListener : Update a load balancer listener +// This request updates a load balancer listener from a listener patch. +func (vpcClassic *VpcClassicV1) UpdateLoadBalancerListener(updateLoadBalancerListenerOptions *UpdateLoadBalancerListenerOptions) (result *LoadBalancerListener, response *core.DetailedResponse, err error) { + return vpcClassic.UpdateLoadBalancerListenerWithContext(context.Background(), updateLoadBalancerListenerOptions) +} + +// UpdateLoadBalancerListenerWithContext is an alternate form of the UpdateLoadBalancerListener method which supports a Context parameter +func (vpcClassic *VpcClassicV1) UpdateLoadBalancerListenerWithContext(ctx context.Context, updateLoadBalancerListenerOptions *UpdateLoadBalancerListenerOptions) (result *LoadBalancerListener, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateLoadBalancerListenerOptions, "updateLoadBalancerListenerOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateLoadBalancerListenerOptions, "updateLoadBalancerListenerOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "load_balancer_id": *updateLoadBalancerListenerOptions.LoadBalancerID, + "id": *updateLoadBalancerListenerOptions.ID, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/load_balancers/{load_balancer_id}/listeners/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateLoadBalancerListenerOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "UpdateLoadBalancerListener") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/merge-patch+json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + _, err = builder.SetBodyContentJSON(updateLoadBalancerListenerOptions.LoadBalancerListenerPatch) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerListener) + if err != nil { + return + } + response.Result = result + + return +} + +// ListLoadBalancerListenerPolicies : List all policies for a load balancer listener +// This request lists all policies for a load balancer listener. +func (vpcClassic *VpcClassicV1) ListLoadBalancerListenerPolicies(listLoadBalancerListenerPoliciesOptions *ListLoadBalancerListenerPoliciesOptions) (result *LoadBalancerListenerPolicyCollection, response *core.DetailedResponse, err error) { + return vpcClassic.ListLoadBalancerListenerPoliciesWithContext(context.Background(), listLoadBalancerListenerPoliciesOptions) +} + +// ListLoadBalancerListenerPoliciesWithContext is an alternate form of the ListLoadBalancerListenerPolicies method which supports a Context parameter +func (vpcClassic *VpcClassicV1) ListLoadBalancerListenerPoliciesWithContext(ctx context.Context, listLoadBalancerListenerPoliciesOptions *ListLoadBalancerListenerPoliciesOptions) (result *LoadBalancerListenerPolicyCollection, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(listLoadBalancerListenerPoliciesOptions, "listLoadBalancerListenerPoliciesOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(listLoadBalancerListenerPoliciesOptions, "listLoadBalancerListenerPoliciesOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "load_balancer_id": *listLoadBalancerListenerPoliciesOptions.LoadBalancerID, + "listener_id": *listLoadBalancerListenerPoliciesOptions.ListenerID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/load_balancers/{load_balancer_id}/listeners/{listener_id}/policies`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range listLoadBalancerListenerPoliciesOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "ListLoadBalancerListenerPolicies") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerListenerPolicyCollection) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateLoadBalancerListenerPolicy : Create a policy for a load balancer listener +// Creates a new policy for a load balancer listener. +func (vpcClassic *VpcClassicV1) CreateLoadBalancerListenerPolicy(createLoadBalancerListenerPolicyOptions *CreateLoadBalancerListenerPolicyOptions) (result *LoadBalancerListenerPolicy, response *core.DetailedResponse, err error) { + return vpcClassic.CreateLoadBalancerListenerPolicyWithContext(context.Background(), createLoadBalancerListenerPolicyOptions) +} + +// CreateLoadBalancerListenerPolicyWithContext is an alternate form of the CreateLoadBalancerListenerPolicy method which supports a Context parameter +func (vpcClassic *VpcClassicV1) CreateLoadBalancerListenerPolicyWithContext(ctx context.Context, createLoadBalancerListenerPolicyOptions *CreateLoadBalancerListenerPolicyOptions) (result *LoadBalancerListenerPolicy, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createLoadBalancerListenerPolicyOptions, "createLoadBalancerListenerPolicyOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createLoadBalancerListenerPolicyOptions, "createLoadBalancerListenerPolicyOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "load_balancer_id": *createLoadBalancerListenerPolicyOptions.LoadBalancerID, + "listener_id": *createLoadBalancerListenerPolicyOptions.ListenerID, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/load_balancers/{load_balancer_id}/listeners/{listener_id}/policies`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range createLoadBalancerListenerPolicyOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "CreateLoadBalancerListenerPolicy") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + body := make(map[string]interface{}) + if createLoadBalancerListenerPolicyOptions.Action != nil { + body["action"] = createLoadBalancerListenerPolicyOptions.Action + } + if createLoadBalancerListenerPolicyOptions.Priority != nil { + body["priority"] = createLoadBalancerListenerPolicyOptions.Priority + } + if createLoadBalancerListenerPolicyOptions.Name != nil { + body["name"] = createLoadBalancerListenerPolicyOptions.Name + } + if createLoadBalancerListenerPolicyOptions.Rules != nil { + body["rules"] = createLoadBalancerListenerPolicyOptions.Rules + } + if createLoadBalancerListenerPolicyOptions.Target != nil { + body["target"] = createLoadBalancerListenerPolicyOptions.Target + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerListenerPolicy) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteLoadBalancerListenerPolicy : Delete a load balancer listener policy +// Deletes a policy of the load balancer listener. This operation cannot be reversed. +func (vpcClassic *VpcClassicV1) DeleteLoadBalancerListenerPolicy(deleteLoadBalancerListenerPolicyOptions *DeleteLoadBalancerListenerPolicyOptions) (response *core.DetailedResponse, err error) { + return vpcClassic.DeleteLoadBalancerListenerPolicyWithContext(context.Background(), deleteLoadBalancerListenerPolicyOptions) +} + +// DeleteLoadBalancerListenerPolicyWithContext is an alternate form of the DeleteLoadBalancerListenerPolicy method which supports a Context parameter +func (vpcClassic *VpcClassicV1) DeleteLoadBalancerListenerPolicyWithContext(ctx context.Context, deleteLoadBalancerListenerPolicyOptions *DeleteLoadBalancerListenerPolicyOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteLoadBalancerListenerPolicyOptions, "deleteLoadBalancerListenerPolicyOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteLoadBalancerListenerPolicyOptions, "deleteLoadBalancerListenerPolicyOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "load_balancer_id": *deleteLoadBalancerListenerPolicyOptions.LoadBalancerID, + "listener_id": *deleteLoadBalancerListenerPolicyOptions.ListenerID, + "id": *deleteLoadBalancerListenerPolicyOptions.ID, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/load_balancers/{load_balancer_id}/listeners/{listener_id}/policies/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteLoadBalancerListenerPolicyOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "DeleteLoadBalancerListenerPolicy") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + response, err = vpcClassic.Service.Request(request, nil) + + return +} + +// GetLoadBalancerListenerPolicy : Retrieve a load balancer listener policy +// Retrieve a single policy specified by the identifier in the URL path. +func (vpcClassic *VpcClassicV1) GetLoadBalancerListenerPolicy(getLoadBalancerListenerPolicyOptions *GetLoadBalancerListenerPolicyOptions) (result *LoadBalancerListenerPolicy, response *core.DetailedResponse, err error) { + return vpcClassic.GetLoadBalancerListenerPolicyWithContext(context.Background(), getLoadBalancerListenerPolicyOptions) +} + +// GetLoadBalancerListenerPolicyWithContext is an alternate form of the GetLoadBalancerListenerPolicy method which supports a Context parameter +func (vpcClassic *VpcClassicV1) GetLoadBalancerListenerPolicyWithContext(ctx context.Context, getLoadBalancerListenerPolicyOptions *GetLoadBalancerListenerPolicyOptions) (result *LoadBalancerListenerPolicy, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getLoadBalancerListenerPolicyOptions, "getLoadBalancerListenerPolicyOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getLoadBalancerListenerPolicyOptions, "getLoadBalancerListenerPolicyOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "load_balancer_id": *getLoadBalancerListenerPolicyOptions.LoadBalancerID, + "listener_id": *getLoadBalancerListenerPolicyOptions.ListenerID, + "id": *getLoadBalancerListenerPolicyOptions.ID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/load_balancers/{load_balancer_id}/listeners/{listener_id}/policies/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getLoadBalancerListenerPolicyOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "GetLoadBalancerListenerPolicy") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerListenerPolicy) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateLoadBalancerListenerPolicy : Update a load balancer listener policy +// Updates a policy from a policy patch. +func (vpcClassic *VpcClassicV1) UpdateLoadBalancerListenerPolicy(updateLoadBalancerListenerPolicyOptions *UpdateLoadBalancerListenerPolicyOptions) (result *LoadBalancerListenerPolicy, response *core.DetailedResponse, err error) { + return vpcClassic.UpdateLoadBalancerListenerPolicyWithContext(context.Background(), updateLoadBalancerListenerPolicyOptions) +} + +// UpdateLoadBalancerListenerPolicyWithContext is an alternate form of the UpdateLoadBalancerListenerPolicy method which supports a Context parameter +func (vpcClassic *VpcClassicV1) UpdateLoadBalancerListenerPolicyWithContext(ctx context.Context, updateLoadBalancerListenerPolicyOptions *UpdateLoadBalancerListenerPolicyOptions) (result *LoadBalancerListenerPolicy, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateLoadBalancerListenerPolicyOptions, "updateLoadBalancerListenerPolicyOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateLoadBalancerListenerPolicyOptions, "updateLoadBalancerListenerPolicyOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "load_balancer_id": *updateLoadBalancerListenerPolicyOptions.LoadBalancerID, + "listener_id": *updateLoadBalancerListenerPolicyOptions.ListenerID, + "id": *updateLoadBalancerListenerPolicyOptions.ID, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/load_balancers/{load_balancer_id}/listeners/{listener_id}/policies/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateLoadBalancerListenerPolicyOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "UpdateLoadBalancerListenerPolicy") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/merge-patch+json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + _, err = builder.SetBodyContentJSON(updateLoadBalancerListenerPolicyOptions.LoadBalancerListenerPolicyPatch) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerListenerPolicy) + if err != nil { + return + } + response.Result = result + + return +} + +// ListLoadBalancerListenerPolicyRules : List all rules of a load balancer listener policy +// This request lists all rules of a load balancer listener policy. +func (vpcClassic *VpcClassicV1) ListLoadBalancerListenerPolicyRules(listLoadBalancerListenerPolicyRulesOptions *ListLoadBalancerListenerPolicyRulesOptions) (result *LoadBalancerListenerPolicyRuleCollection, response *core.DetailedResponse, err error) { + return vpcClassic.ListLoadBalancerListenerPolicyRulesWithContext(context.Background(), listLoadBalancerListenerPolicyRulesOptions) +} + +// ListLoadBalancerListenerPolicyRulesWithContext is an alternate form of the ListLoadBalancerListenerPolicyRules method which supports a Context parameter +func (vpcClassic *VpcClassicV1) ListLoadBalancerListenerPolicyRulesWithContext(ctx context.Context, listLoadBalancerListenerPolicyRulesOptions *ListLoadBalancerListenerPolicyRulesOptions) (result *LoadBalancerListenerPolicyRuleCollection, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(listLoadBalancerListenerPolicyRulesOptions, "listLoadBalancerListenerPolicyRulesOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(listLoadBalancerListenerPolicyRulesOptions, "listLoadBalancerListenerPolicyRulesOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "load_balancer_id": *listLoadBalancerListenerPolicyRulesOptions.LoadBalancerID, + "listener_id": *listLoadBalancerListenerPolicyRulesOptions.ListenerID, + "policy_id": *listLoadBalancerListenerPolicyRulesOptions.PolicyID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/load_balancers/{load_balancer_id}/listeners/{listener_id}/policies/{policy_id}/rules`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range listLoadBalancerListenerPolicyRulesOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "ListLoadBalancerListenerPolicyRules") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerListenerPolicyRuleCollection) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateLoadBalancerListenerPolicyRule : Create a rule for a load balancer listener policy +// Creates a new rule for the load balancer listener policy. +func (vpcClassic *VpcClassicV1) CreateLoadBalancerListenerPolicyRule(createLoadBalancerListenerPolicyRuleOptions *CreateLoadBalancerListenerPolicyRuleOptions) (result *LoadBalancerListenerPolicyRule, response *core.DetailedResponse, err error) { + return vpcClassic.CreateLoadBalancerListenerPolicyRuleWithContext(context.Background(), createLoadBalancerListenerPolicyRuleOptions) +} + +// CreateLoadBalancerListenerPolicyRuleWithContext is an alternate form of the CreateLoadBalancerListenerPolicyRule method which supports a Context parameter +func (vpcClassic *VpcClassicV1) CreateLoadBalancerListenerPolicyRuleWithContext(ctx context.Context, createLoadBalancerListenerPolicyRuleOptions *CreateLoadBalancerListenerPolicyRuleOptions) (result *LoadBalancerListenerPolicyRule, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createLoadBalancerListenerPolicyRuleOptions, "createLoadBalancerListenerPolicyRuleOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createLoadBalancerListenerPolicyRuleOptions, "createLoadBalancerListenerPolicyRuleOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "load_balancer_id": *createLoadBalancerListenerPolicyRuleOptions.LoadBalancerID, + "listener_id": *createLoadBalancerListenerPolicyRuleOptions.ListenerID, + "policy_id": *createLoadBalancerListenerPolicyRuleOptions.PolicyID, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/load_balancers/{load_balancer_id}/listeners/{listener_id}/policies/{policy_id}/rules`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range createLoadBalancerListenerPolicyRuleOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "CreateLoadBalancerListenerPolicyRule") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + body := make(map[string]interface{}) + if createLoadBalancerListenerPolicyRuleOptions.Condition != nil { + body["condition"] = createLoadBalancerListenerPolicyRuleOptions.Condition + } + if createLoadBalancerListenerPolicyRuleOptions.Type != nil { + body["type"] = createLoadBalancerListenerPolicyRuleOptions.Type + } + if createLoadBalancerListenerPolicyRuleOptions.Value != nil { + body["value"] = createLoadBalancerListenerPolicyRuleOptions.Value + } + if createLoadBalancerListenerPolicyRuleOptions.Field != nil { + body["field"] = createLoadBalancerListenerPolicyRuleOptions.Field + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerListenerPolicyRule) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteLoadBalancerListenerPolicyRule : Delete a load balancer listener policy rule +// Deletes a rule from the load balancer listener policy. This operation cannot be reversed. +func (vpcClassic *VpcClassicV1) DeleteLoadBalancerListenerPolicyRule(deleteLoadBalancerListenerPolicyRuleOptions *DeleteLoadBalancerListenerPolicyRuleOptions) (response *core.DetailedResponse, err error) { + return vpcClassic.DeleteLoadBalancerListenerPolicyRuleWithContext(context.Background(), deleteLoadBalancerListenerPolicyRuleOptions) +} + +// DeleteLoadBalancerListenerPolicyRuleWithContext is an alternate form of the DeleteLoadBalancerListenerPolicyRule method which supports a Context parameter +func (vpcClassic *VpcClassicV1) DeleteLoadBalancerListenerPolicyRuleWithContext(ctx context.Context, deleteLoadBalancerListenerPolicyRuleOptions *DeleteLoadBalancerListenerPolicyRuleOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteLoadBalancerListenerPolicyRuleOptions, "deleteLoadBalancerListenerPolicyRuleOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteLoadBalancerListenerPolicyRuleOptions, "deleteLoadBalancerListenerPolicyRuleOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "load_balancer_id": *deleteLoadBalancerListenerPolicyRuleOptions.LoadBalancerID, + "listener_id": *deleteLoadBalancerListenerPolicyRuleOptions.ListenerID, + "policy_id": *deleteLoadBalancerListenerPolicyRuleOptions.PolicyID, + "id": *deleteLoadBalancerListenerPolicyRuleOptions.ID, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/load_balancers/{load_balancer_id}/listeners/{listener_id}/policies/{policy_id}/rules/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteLoadBalancerListenerPolicyRuleOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "DeleteLoadBalancerListenerPolicyRule") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + response, err = vpcClassic.Service.Request(request, nil) + + return +} + +// GetLoadBalancerListenerPolicyRule : Retrieve a load balancer listener policy rule +// Retrieves a single rule specified by the identifier in the URL path. +func (vpcClassic *VpcClassicV1) GetLoadBalancerListenerPolicyRule(getLoadBalancerListenerPolicyRuleOptions *GetLoadBalancerListenerPolicyRuleOptions) (result *LoadBalancerListenerPolicyRule, response *core.DetailedResponse, err error) { + return vpcClassic.GetLoadBalancerListenerPolicyRuleWithContext(context.Background(), getLoadBalancerListenerPolicyRuleOptions) +} + +// GetLoadBalancerListenerPolicyRuleWithContext is an alternate form of the GetLoadBalancerListenerPolicyRule method which supports a Context parameter +func (vpcClassic *VpcClassicV1) GetLoadBalancerListenerPolicyRuleWithContext(ctx context.Context, getLoadBalancerListenerPolicyRuleOptions *GetLoadBalancerListenerPolicyRuleOptions) (result *LoadBalancerListenerPolicyRule, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getLoadBalancerListenerPolicyRuleOptions, "getLoadBalancerListenerPolicyRuleOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getLoadBalancerListenerPolicyRuleOptions, "getLoadBalancerListenerPolicyRuleOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "load_balancer_id": *getLoadBalancerListenerPolicyRuleOptions.LoadBalancerID, + "listener_id": *getLoadBalancerListenerPolicyRuleOptions.ListenerID, + "policy_id": *getLoadBalancerListenerPolicyRuleOptions.PolicyID, + "id": *getLoadBalancerListenerPolicyRuleOptions.ID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/load_balancers/{load_balancer_id}/listeners/{listener_id}/policies/{policy_id}/rules/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getLoadBalancerListenerPolicyRuleOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "GetLoadBalancerListenerPolicyRule") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerListenerPolicyRule) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateLoadBalancerListenerPolicyRule : Update a load balancer listener policy rule +// Updates a rule of the load balancer listener policy. +func (vpcClassic *VpcClassicV1) UpdateLoadBalancerListenerPolicyRule(updateLoadBalancerListenerPolicyRuleOptions *UpdateLoadBalancerListenerPolicyRuleOptions) (result *LoadBalancerListenerPolicyRule, response *core.DetailedResponse, err error) { + return vpcClassic.UpdateLoadBalancerListenerPolicyRuleWithContext(context.Background(), updateLoadBalancerListenerPolicyRuleOptions) +} + +// UpdateLoadBalancerListenerPolicyRuleWithContext is an alternate form of the UpdateLoadBalancerListenerPolicyRule method which supports a Context parameter +func (vpcClassic *VpcClassicV1) UpdateLoadBalancerListenerPolicyRuleWithContext(ctx context.Context, updateLoadBalancerListenerPolicyRuleOptions *UpdateLoadBalancerListenerPolicyRuleOptions) (result *LoadBalancerListenerPolicyRule, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateLoadBalancerListenerPolicyRuleOptions, "updateLoadBalancerListenerPolicyRuleOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateLoadBalancerListenerPolicyRuleOptions, "updateLoadBalancerListenerPolicyRuleOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "load_balancer_id": *updateLoadBalancerListenerPolicyRuleOptions.LoadBalancerID, + "listener_id": *updateLoadBalancerListenerPolicyRuleOptions.ListenerID, + "policy_id": *updateLoadBalancerListenerPolicyRuleOptions.PolicyID, + "id": *updateLoadBalancerListenerPolicyRuleOptions.ID, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/load_balancers/{load_balancer_id}/listeners/{listener_id}/policies/{policy_id}/rules/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateLoadBalancerListenerPolicyRuleOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "UpdateLoadBalancerListenerPolicyRule") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/merge-patch+json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + _, err = builder.SetBodyContentJSON(updateLoadBalancerListenerPolicyRuleOptions.LoadBalancerListenerPolicyRulePatch) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerListenerPolicyRule) + if err != nil { + return + } + response.Result = result + + return +} + +// ListLoadBalancerPools : List all pools of a load balancer +// This request lists all pools of a load balancer. +func (vpcClassic *VpcClassicV1) ListLoadBalancerPools(listLoadBalancerPoolsOptions *ListLoadBalancerPoolsOptions) (result *LoadBalancerPoolCollection, response *core.DetailedResponse, err error) { + return vpcClassic.ListLoadBalancerPoolsWithContext(context.Background(), listLoadBalancerPoolsOptions) +} + +// ListLoadBalancerPoolsWithContext is an alternate form of the ListLoadBalancerPools method which supports a Context parameter +func (vpcClassic *VpcClassicV1) ListLoadBalancerPoolsWithContext(ctx context.Context, listLoadBalancerPoolsOptions *ListLoadBalancerPoolsOptions) (result *LoadBalancerPoolCollection, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(listLoadBalancerPoolsOptions, "listLoadBalancerPoolsOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(listLoadBalancerPoolsOptions, "listLoadBalancerPoolsOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "load_balancer_id": *listLoadBalancerPoolsOptions.LoadBalancerID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/load_balancers/{load_balancer_id}/pools`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range listLoadBalancerPoolsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "ListLoadBalancerPools") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerPoolCollection) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateLoadBalancerPool : Create a load balancer pool +// This request creates a new pool from a pool prototype object. +func (vpcClassic *VpcClassicV1) CreateLoadBalancerPool(createLoadBalancerPoolOptions *CreateLoadBalancerPoolOptions) (result *LoadBalancerPool, response *core.DetailedResponse, err error) { + return vpcClassic.CreateLoadBalancerPoolWithContext(context.Background(), createLoadBalancerPoolOptions) +} + +// CreateLoadBalancerPoolWithContext is an alternate form of the CreateLoadBalancerPool method which supports a Context parameter +func (vpcClassic *VpcClassicV1) CreateLoadBalancerPoolWithContext(ctx context.Context, createLoadBalancerPoolOptions *CreateLoadBalancerPoolOptions) (result *LoadBalancerPool, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createLoadBalancerPoolOptions, "createLoadBalancerPoolOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createLoadBalancerPoolOptions, "createLoadBalancerPoolOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "load_balancer_id": *createLoadBalancerPoolOptions.LoadBalancerID, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/load_balancers/{load_balancer_id}/pools`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range createLoadBalancerPoolOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "CreateLoadBalancerPool") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + body := make(map[string]interface{}) + if createLoadBalancerPoolOptions.Algorithm != nil { + body["algorithm"] = createLoadBalancerPoolOptions.Algorithm + } + if createLoadBalancerPoolOptions.HealthMonitor != nil { + body["health_monitor"] = createLoadBalancerPoolOptions.HealthMonitor + } + if createLoadBalancerPoolOptions.Protocol != nil { + body["protocol"] = createLoadBalancerPoolOptions.Protocol + } + if createLoadBalancerPoolOptions.Members != nil { + body["members"] = createLoadBalancerPoolOptions.Members + } + if createLoadBalancerPoolOptions.Name != nil { + body["name"] = createLoadBalancerPoolOptions.Name + } + if createLoadBalancerPoolOptions.ProxyProtocol != nil { + body["proxy_protocol"] = createLoadBalancerPoolOptions.ProxyProtocol + } + if createLoadBalancerPoolOptions.SessionPersistence != nil { + body["session_persistence"] = createLoadBalancerPoolOptions.SessionPersistence + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerPool) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteLoadBalancerPool : Delete a load balancer pool +// This request deletes a load balancer pool. This operation cannot be reversed. The pool must not currently be the +// default pool for any listener in the load balancer. +func (vpcClassic *VpcClassicV1) DeleteLoadBalancerPool(deleteLoadBalancerPoolOptions *DeleteLoadBalancerPoolOptions) (response *core.DetailedResponse, err error) { + return vpcClassic.DeleteLoadBalancerPoolWithContext(context.Background(), deleteLoadBalancerPoolOptions) +} + +// DeleteLoadBalancerPoolWithContext is an alternate form of the DeleteLoadBalancerPool method which supports a Context parameter +func (vpcClassic *VpcClassicV1) DeleteLoadBalancerPoolWithContext(ctx context.Context, deleteLoadBalancerPoolOptions *DeleteLoadBalancerPoolOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteLoadBalancerPoolOptions, "deleteLoadBalancerPoolOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteLoadBalancerPoolOptions, "deleteLoadBalancerPoolOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "load_balancer_id": *deleteLoadBalancerPoolOptions.LoadBalancerID, + "id": *deleteLoadBalancerPoolOptions.ID, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/load_balancers/{load_balancer_id}/pools/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteLoadBalancerPoolOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "DeleteLoadBalancerPool") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + response, err = vpcClassic.Service.Request(request, nil) + + return +} + +// GetLoadBalancerPool : Retrieve a load balancer pool +// This request retrieves a single pool specified by the identifier in the URL path. +func (vpcClassic *VpcClassicV1) GetLoadBalancerPool(getLoadBalancerPoolOptions *GetLoadBalancerPoolOptions) (result *LoadBalancerPool, response *core.DetailedResponse, err error) { + return vpcClassic.GetLoadBalancerPoolWithContext(context.Background(), getLoadBalancerPoolOptions) +} + +// GetLoadBalancerPoolWithContext is an alternate form of the GetLoadBalancerPool method which supports a Context parameter +func (vpcClassic *VpcClassicV1) GetLoadBalancerPoolWithContext(ctx context.Context, getLoadBalancerPoolOptions *GetLoadBalancerPoolOptions) (result *LoadBalancerPool, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getLoadBalancerPoolOptions, "getLoadBalancerPoolOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getLoadBalancerPoolOptions, "getLoadBalancerPoolOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "load_balancer_id": *getLoadBalancerPoolOptions.LoadBalancerID, + "id": *getLoadBalancerPoolOptions.ID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/load_balancers/{load_balancer_id}/pools/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getLoadBalancerPoolOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "GetLoadBalancerPool") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerPool) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateLoadBalancerPool : Update a load balancer pool +// This request updates a load balancer pool from a pool patch. +func (vpcClassic *VpcClassicV1) UpdateLoadBalancerPool(updateLoadBalancerPoolOptions *UpdateLoadBalancerPoolOptions) (result *LoadBalancerPool, response *core.DetailedResponse, err error) { + return vpcClassic.UpdateLoadBalancerPoolWithContext(context.Background(), updateLoadBalancerPoolOptions) +} + +// UpdateLoadBalancerPoolWithContext is an alternate form of the UpdateLoadBalancerPool method which supports a Context parameter +func (vpcClassic *VpcClassicV1) UpdateLoadBalancerPoolWithContext(ctx context.Context, updateLoadBalancerPoolOptions *UpdateLoadBalancerPoolOptions) (result *LoadBalancerPool, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateLoadBalancerPoolOptions, "updateLoadBalancerPoolOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateLoadBalancerPoolOptions, "updateLoadBalancerPoolOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "load_balancer_id": *updateLoadBalancerPoolOptions.LoadBalancerID, + "id": *updateLoadBalancerPoolOptions.ID, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/load_balancers/{load_balancer_id}/pools/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateLoadBalancerPoolOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "UpdateLoadBalancerPool") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/merge-patch+json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + _, err = builder.SetBodyContentJSON(updateLoadBalancerPoolOptions.LoadBalancerPoolPatch) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerPool) + if err != nil { + return + } + response.Result = result + + return +} + +// ListLoadBalancerPoolMembers : List all members of a load balancer pool +// This request lists all members of a load balancer pool. +func (vpcClassic *VpcClassicV1) ListLoadBalancerPoolMembers(listLoadBalancerPoolMembersOptions *ListLoadBalancerPoolMembersOptions) (result *LoadBalancerPoolMemberCollection, response *core.DetailedResponse, err error) { + return vpcClassic.ListLoadBalancerPoolMembersWithContext(context.Background(), listLoadBalancerPoolMembersOptions) +} + +// ListLoadBalancerPoolMembersWithContext is an alternate form of the ListLoadBalancerPoolMembers method which supports a Context parameter +func (vpcClassic *VpcClassicV1) ListLoadBalancerPoolMembersWithContext(ctx context.Context, listLoadBalancerPoolMembersOptions *ListLoadBalancerPoolMembersOptions) (result *LoadBalancerPoolMemberCollection, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(listLoadBalancerPoolMembersOptions, "listLoadBalancerPoolMembersOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(listLoadBalancerPoolMembersOptions, "listLoadBalancerPoolMembersOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "load_balancer_id": *listLoadBalancerPoolMembersOptions.LoadBalancerID, + "pool_id": *listLoadBalancerPoolMembersOptions.PoolID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/load_balancers/{load_balancer_id}/pools/{pool_id}/members`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range listLoadBalancerPoolMembersOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "ListLoadBalancerPoolMembers") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerPoolMemberCollection) + if err != nil { + return + } + response.Result = result + + return +} + +// CreateLoadBalancerPoolMember : Create a member in a load balancer pool +// This request creates a new member and adds the member to the pool. +func (vpcClassic *VpcClassicV1) CreateLoadBalancerPoolMember(createLoadBalancerPoolMemberOptions *CreateLoadBalancerPoolMemberOptions) (result *LoadBalancerPoolMember, response *core.DetailedResponse, err error) { + return vpcClassic.CreateLoadBalancerPoolMemberWithContext(context.Background(), createLoadBalancerPoolMemberOptions) +} + +// CreateLoadBalancerPoolMemberWithContext is an alternate form of the CreateLoadBalancerPoolMember method which supports a Context parameter +func (vpcClassic *VpcClassicV1) CreateLoadBalancerPoolMemberWithContext(ctx context.Context, createLoadBalancerPoolMemberOptions *CreateLoadBalancerPoolMemberOptions) (result *LoadBalancerPoolMember, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createLoadBalancerPoolMemberOptions, "createLoadBalancerPoolMemberOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createLoadBalancerPoolMemberOptions, "createLoadBalancerPoolMemberOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "load_balancer_id": *createLoadBalancerPoolMemberOptions.LoadBalancerID, + "pool_id": *createLoadBalancerPoolMemberOptions.PoolID, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/load_balancers/{load_balancer_id}/pools/{pool_id}/members`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range createLoadBalancerPoolMemberOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "CreateLoadBalancerPoolMember") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + body := make(map[string]interface{}) + if createLoadBalancerPoolMemberOptions.Port != nil { + body["port"] = createLoadBalancerPoolMemberOptions.Port + } + if createLoadBalancerPoolMemberOptions.Target != nil { + body["target"] = createLoadBalancerPoolMemberOptions.Target + } + if createLoadBalancerPoolMemberOptions.Weight != nil { + body["weight"] = createLoadBalancerPoolMemberOptions.Weight + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerPoolMember) + if err != nil { + return + } + response.Result = result + + return +} + +// ReplaceLoadBalancerPoolMembers : Replace load balancer pool members +// This request replaces the existing members of the load balancer pool with new members created from the collection of +// member prototype objects. +func (vpcClassic *VpcClassicV1) ReplaceLoadBalancerPoolMembers(replaceLoadBalancerPoolMembersOptions *ReplaceLoadBalancerPoolMembersOptions) (result *LoadBalancerPoolMemberCollection, response *core.DetailedResponse, err error) { + return vpcClassic.ReplaceLoadBalancerPoolMembersWithContext(context.Background(), replaceLoadBalancerPoolMembersOptions) +} + +// ReplaceLoadBalancerPoolMembersWithContext is an alternate form of the ReplaceLoadBalancerPoolMembers method which supports a Context parameter +func (vpcClassic *VpcClassicV1) ReplaceLoadBalancerPoolMembersWithContext(ctx context.Context, replaceLoadBalancerPoolMembersOptions *ReplaceLoadBalancerPoolMembersOptions) (result *LoadBalancerPoolMemberCollection, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(replaceLoadBalancerPoolMembersOptions, "replaceLoadBalancerPoolMembersOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(replaceLoadBalancerPoolMembersOptions, "replaceLoadBalancerPoolMembersOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "load_balancer_id": *replaceLoadBalancerPoolMembersOptions.LoadBalancerID, + "pool_id": *replaceLoadBalancerPoolMembersOptions.PoolID, + } + + builder := core.NewRequestBuilder(core.PUT) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/load_balancers/{load_balancer_id}/pools/{pool_id}/members`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range replaceLoadBalancerPoolMembersOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "ReplaceLoadBalancerPoolMembers") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + body := make(map[string]interface{}) + if replaceLoadBalancerPoolMembersOptions.Members != nil { + body["members"] = replaceLoadBalancerPoolMembersOptions.Members + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerPoolMemberCollection) + if err != nil { + return + } + response.Result = result + + return +} + +// DeleteLoadBalancerPoolMember : Delete a load balancer pool member +// This request deletes a member from the pool. This operation cannot be reversed. +func (vpcClassic *VpcClassicV1) DeleteLoadBalancerPoolMember(deleteLoadBalancerPoolMemberOptions *DeleteLoadBalancerPoolMemberOptions) (response *core.DetailedResponse, err error) { + return vpcClassic.DeleteLoadBalancerPoolMemberWithContext(context.Background(), deleteLoadBalancerPoolMemberOptions) +} + +// DeleteLoadBalancerPoolMemberWithContext is an alternate form of the DeleteLoadBalancerPoolMember method which supports a Context parameter +func (vpcClassic *VpcClassicV1) DeleteLoadBalancerPoolMemberWithContext(ctx context.Context, deleteLoadBalancerPoolMemberOptions *DeleteLoadBalancerPoolMemberOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteLoadBalancerPoolMemberOptions, "deleteLoadBalancerPoolMemberOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteLoadBalancerPoolMemberOptions, "deleteLoadBalancerPoolMemberOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "load_balancer_id": *deleteLoadBalancerPoolMemberOptions.LoadBalancerID, + "pool_id": *deleteLoadBalancerPoolMemberOptions.PoolID, + "id": *deleteLoadBalancerPoolMemberOptions.ID, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/load_balancers/{load_balancer_id}/pools/{pool_id}/members/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteLoadBalancerPoolMemberOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "DeleteLoadBalancerPoolMember") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + response, err = vpcClassic.Service.Request(request, nil) + + return +} + +// GetLoadBalancerPoolMember : Retrieve a load balancer pool member +// This request retrieves a single member specified by the identifier in the URL path. +func (vpcClassic *VpcClassicV1) GetLoadBalancerPoolMember(getLoadBalancerPoolMemberOptions *GetLoadBalancerPoolMemberOptions) (result *LoadBalancerPoolMember, response *core.DetailedResponse, err error) { + return vpcClassic.GetLoadBalancerPoolMemberWithContext(context.Background(), getLoadBalancerPoolMemberOptions) +} + +// GetLoadBalancerPoolMemberWithContext is an alternate form of the GetLoadBalancerPoolMember method which supports a Context parameter +func (vpcClassic *VpcClassicV1) GetLoadBalancerPoolMemberWithContext(ctx context.Context, getLoadBalancerPoolMemberOptions *GetLoadBalancerPoolMemberOptions) (result *LoadBalancerPoolMember, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getLoadBalancerPoolMemberOptions, "getLoadBalancerPoolMemberOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getLoadBalancerPoolMemberOptions, "getLoadBalancerPoolMemberOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "load_balancer_id": *getLoadBalancerPoolMemberOptions.LoadBalancerID, + "pool_id": *getLoadBalancerPoolMemberOptions.PoolID, + "id": *getLoadBalancerPoolMemberOptions.ID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/load_balancers/{load_balancer_id}/pools/{pool_id}/members/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getLoadBalancerPoolMemberOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "GetLoadBalancerPoolMember") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerPoolMember) + if err != nil { + return + } + response.Result = result + + return +} + +// UpdateLoadBalancerPoolMember : Update a load balancer pool member +// This request updates an existing member from a member patch. +func (vpcClassic *VpcClassicV1) UpdateLoadBalancerPoolMember(updateLoadBalancerPoolMemberOptions *UpdateLoadBalancerPoolMemberOptions) (result *LoadBalancerPoolMember, response *core.DetailedResponse, err error) { + return vpcClassic.UpdateLoadBalancerPoolMemberWithContext(context.Background(), updateLoadBalancerPoolMemberOptions) +} + +// UpdateLoadBalancerPoolMemberWithContext is an alternate form of the UpdateLoadBalancerPoolMember method which supports a Context parameter +func (vpcClassic *VpcClassicV1) UpdateLoadBalancerPoolMemberWithContext(ctx context.Context, updateLoadBalancerPoolMemberOptions *UpdateLoadBalancerPoolMemberOptions) (result *LoadBalancerPoolMember, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateLoadBalancerPoolMemberOptions, "updateLoadBalancerPoolMemberOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateLoadBalancerPoolMemberOptions, "updateLoadBalancerPoolMemberOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "load_balancer_id": *updateLoadBalancerPoolMemberOptions.LoadBalancerID, + "pool_id": *updateLoadBalancerPoolMemberOptions.PoolID, + "id": *updateLoadBalancerPoolMemberOptions.ID, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpcClassic.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpcClassic.Service.Options.URL, `/load_balancers/{load_balancer_id}/pools/{pool_id}/members/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateLoadBalancerPoolMemberOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc_classic", "V1", "UpdateLoadBalancerPoolMember") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/merge-patch+json") + + builder.AddQuery("version", fmt.Sprint(*vpcClassic.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpcClassic.generation)) + + _, err = builder.SetBodyContentJSON(updateLoadBalancerPoolMemberOptions.LoadBalancerPoolMemberPatch) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpcClassic.Service.Request(request, &rawResponse) + if err != nil { + return + } + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerPoolMember) + if err != nil { + return + } + response.Result = result + + return +} + +// AddInstanceNetworkInterfaceFloatingIPOptions : The AddInstanceNetworkInterfaceFloatingIP options. +type AddInstanceNetworkInterfaceFloatingIPOptions struct { + // The instance identifier. + InstanceID *string `validate:"required,ne="` + + // The network interface identifier. + NetworkInterfaceID *string `validate:"required,ne="` + + // The floating IP identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewAddInstanceNetworkInterfaceFloatingIPOptions : Instantiate AddInstanceNetworkInterfaceFloatingIPOptions +func (*VpcClassicV1) NewAddInstanceNetworkInterfaceFloatingIPOptions(instanceID string, networkInterfaceID string, id string) *AddInstanceNetworkInterfaceFloatingIPOptions { + return &AddInstanceNetworkInterfaceFloatingIPOptions{ + InstanceID: core.StringPtr(instanceID), + NetworkInterfaceID: core.StringPtr(networkInterfaceID), + ID: core.StringPtr(id), + } +} + +// SetInstanceID : Allow user to set InstanceID +func (options *AddInstanceNetworkInterfaceFloatingIPOptions) SetInstanceID(instanceID string) *AddInstanceNetworkInterfaceFloatingIPOptions { + options.InstanceID = core.StringPtr(instanceID) + return options +} + +// SetNetworkInterfaceID : Allow user to set NetworkInterfaceID +func (options *AddInstanceNetworkInterfaceFloatingIPOptions) SetNetworkInterfaceID(networkInterfaceID string) *AddInstanceNetworkInterfaceFloatingIPOptions { + options.NetworkInterfaceID = core.StringPtr(networkInterfaceID) + return options +} + +// SetID : Allow user to set ID +func (options *AddInstanceNetworkInterfaceFloatingIPOptions) SetID(id string) *AddInstanceNetworkInterfaceFloatingIPOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *AddInstanceNetworkInterfaceFloatingIPOptions) SetHeaders(param map[string]string) *AddInstanceNetworkInterfaceFloatingIPOptions { + options.Headers = param + return options +} + +// AddSecurityGroupNetworkInterfaceOptions : The AddSecurityGroupNetworkInterface options. +type AddSecurityGroupNetworkInterfaceOptions struct { + // The security group identifier. + SecurityGroupID *string `validate:"required,ne="` + + // The network interface identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewAddSecurityGroupNetworkInterfaceOptions : Instantiate AddSecurityGroupNetworkInterfaceOptions +func (*VpcClassicV1) NewAddSecurityGroupNetworkInterfaceOptions(securityGroupID string, id string) *AddSecurityGroupNetworkInterfaceOptions { + return &AddSecurityGroupNetworkInterfaceOptions{ + SecurityGroupID: core.StringPtr(securityGroupID), + ID: core.StringPtr(id), + } +} + +// SetSecurityGroupID : Allow user to set SecurityGroupID +func (options *AddSecurityGroupNetworkInterfaceOptions) SetSecurityGroupID(securityGroupID string) *AddSecurityGroupNetworkInterfaceOptions { + options.SecurityGroupID = core.StringPtr(securityGroupID) + return options +} + +// SetID : Allow user to set ID +func (options *AddSecurityGroupNetworkInterfaceOptions) SetID(id string) *AddSecurityGroupNetworkInterfaceOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *AddSecurityGroupNetworkInterfaceOptions) SetHeaders(param map[string]string) *AddSecurityGroupNetworkInterfaceOptions { + options.Headers = param + return options +} + +// AddVPNGatewayConnectionLocalCIDROptions : The AddVPNGatewayConnectionLocalCIDR options. +type AddVPNGatewayConnectionLocalCIDROptions struct { + // The VPN gateway identifier. + VPNGatewayID *string `validate:"required,ne="` + + // The VPN gateway connection identifier. + ID *string `validate:"required,ne="` + + // The address prefix part of the CIDR. + CIDRPrefix *string `validate:"required,ne="` + + // The prefix length part of the CIDR. + PrefixLength *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewAddVPNGatewayConnectionLocalCIDROptions : Instantiate AddVPNGatewayConnectionLocalCIDROptions +func (*VpcClassicV1) NewAddVPNGatewayConnectionLocalCIDROptions(vpnGatewayID string, id string, cidrPrefix string, prefixLength string) *AddVPNGatewayConnectionLocalCIDROptions { + return &AddVPNGatewayConnectionLocalCIDROptions{ + VPNGatewayID: core.StringPtr(vpnGatewayID), + ID: core.StringPtr(id), + CIDRPrefix: core.StringPtr(cidrPrefix), + PrefixLength: core.StringPtr(prefixLength), + } +} + +// SetVPNGatewayID : Allow user to set VPNGatewayID +func (options *AddVPNGatewayConnectionLocalCIDROptions) SetVPNGatewayID(vpnGatewayID string) *AddVPNGatewayConnectionLocalCIDROptions { + options.VPNGatewayID = core.StringPtr(vpnGatewayID) + return options +} + +// SetID : Allow user to set ID +func (options *AddVPNGatewayConnectionLocalCIDROptions) SetID(id string) *AddVPNGatewayConnectionLocalCIDROptions { + options.ID = core.StringPtr(id) + return options +} + +// SetCIDRPrefix : Allow user to set CIDRPrefix +func (options *AddVPNGatewayConnectionLocalCIDROptions) SetCIDRPrefix(cidrPrefix string) *AddVPNGatewayConnectionLocalCIDROptions { + options.CIDRPrefix = core.StringPtr(cidrPrefix) + return options +} + +// SetPrefixLength : Allow user to set PrefixLength +func (options *AddVPNGatewayConnectionLocalCIDROptions) SetPrefixLength(prefixLength string) *AddVPNGatewayConnectionLocalCIDROptions { + options.PrefixLength = core.StringPtr(prefixLength) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *AddVPNGatewayConnectionLocalCIDROptions) SetHeaders(param map[string]string) *AddVPNGatewayConnectionLocalCIDROptions { + options.Headers = param + return options +} + +// AddVPNGatewayConnectionPeerCIDROptions : The AddVPNGatewayConnectionPeerCIDR options. +type AddVPNGatewayConnectionPeerCIDROptions struct { + // The VPN gateway identifier. + VPNGatewayID *string `validate:"required,ne="` + + // The VPN gateway connection identifier. + ID *string `validate:"required,ne="` + + // The address prefix part of the CIDR. + CIDRPrefix *string `validate:"required,ne="` + + // The prefix length part of the CIDR. + PrefixLength *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewAddVPNGatewayConnectionPeerCIDROptions : Instantiate AddVPNGatewayConnectionPeerCIDROptions +func (*VpcClassicV1) NewAddVPNGatewayConnectionPeerCIDROptions(vpnGatewayID string, id string, cidrPrefix string, prefixLength string) *AddVPNGatewayConnectionPeerCIDROptions { + return &AddVPNGatewayConnectionPeerCIDROptions{ + VPNGatewayID: core.StringPtr(vpnGatewayID), + ID: core.StringPtr(id), + CIDRPrefix: core.StringPtr(cidrPrefix), + PrefixLength: core.StringPtr(prefixLength), + } +} + +// SetVPNGatewayID : Allow user to set VPNGatewayID +func (options *AddVPNGatewayConnectionPeerCIDROptions) SetVPNGatewayID(vpnGatewayID string) *AddVPNGatewayConnectionPeerCIDROptions { + options.VPNGatewayID = core.StringPtr(vpnGatewayID) + return options +} + +// SetID : Allow user to set ID +func (options *AddVPNGatewayConnectionPeerCIDROptions) SetID(id string) *AddVPNGatewayConnectionPeerCIDROptions { + options.ID = core.StringPtr(id) + return options +} + +// SetCIDRPrefix : Allow user to set CIDRPrefix +func (options *AddVPNGatewayConnectionPeerCIDROptions) SetCIDRPrefix(cidrPrefix string) *AddVPNGatewayConnectionPeerCIDROptions { + options.CIDRPrefix = core.StringPtr(cidrPrefix) + return options +} + +// SetPrefixLength : Allow user to set PrefixLength +func (options *AddVPNGatewayConnectionPeerCIDROptions) SetPrefixLength(prefixLength string) *AddVPNGatewayConnectionPeerCIDROptions { + options.PrefixLength = core.StringPtr(prefixLength) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *AddVPNGatewayConnectionPeerCIDROptions) SetHeaders(param map[string]string) *AddVPNGatewayConnectionPeerCIDROptions { + options.Headers = param + return options +} + +// AddressPrefix : AddressPrefix struct +type AddressPrefix struct { + // The CIDR block for this prefix. + CIDR *string `json:"cidr" validate:"required"` + + // The date and time that the prefix was created. + CreatedAt *strfmt.DateTime `json:"created_at" validate:"required"` + + // Indicates whether subnets exist with addresses from this prefix. + HasSubnets *bool `json:"has_subnets" validate:"required"` + + // The URL for this address prefix. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this address prefix. + ID *string `json:"id" validate:"required"` + + // Indicates whether this is the default prefix for this zone in this VPC. If a default prefix was automatically + // created when the VPC was created, the prefix is automatically named using a hyphenated list of randomly-selected + // words, but may be updated with a user-specified name. + IsDefault *bool `json:"is_default" validate:"required"` + + // The user-defined name for this address prefix. Names must be unique within the VPC the address prefix resides in. + Name *string `json:"name" validate:"required"` + + // The zone this address prefix resides in. + Zone *ZoneReference `json:"zone" validate:"required"` +} + +// UnmarshalAddressPrefix unmarshals an instance of AddressPrefix from the specified map of raw messages. +func UnmarshalAddressPrefix(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(AddressPrefix) + err = core.UnmarshalPrimitive(m, "cidr", &obj.CIDR) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "has_subnets", &obj.HasSubnets) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "is_default", &obj.IsDefault) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalModel(m, "zone", &obj.Zone, UnmarshalZoneReference) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// AddressPrefixCollection : AddressPrefixCollection struct +type AddressPrefixCollection struct { + // Collection of address prefixes. + AddressPrefixes []AddressPrefix `json:"address_prefixes" validate:"required"` + + // A link to the first page of resources. + First *AddressPrefixCollectionFirst `json:"first" validate:"required"` + + // The maximum number of resources that can be returned by the request. + Limit *int64 `json:"limit" validate:"required"` + + // A link to the next page of resources. This property is present for all pages + // except the last page. + Next *AddressPrefixCollectionNext `json:"next,omitempty"` +} + +// UnmarshalAddressPrefixCollection unmarshals an instance of AddressPrefixCollection from the specified map of raw messages. +func UnmarshalAddressPrefixCollection(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(AddressPrefixCollection) + err = core.UnmarshalModel(m, "address_prefixes", &obj.AddressPrefixes, UnmarshalAddressPrefix) + if err != nil { + return + } + err = core.UnmarshalModel(m, "first", &obj.First, UnmarshalAddressPrefixCollectionFirst) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "limit", &obj.Limit) + if err != nil { + return + } + err = core.UnmarshalModel(m, "next", &obj.Next, UnmarshalAddressPrefixCollectionNext) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// AddressPrefixCollectionFirst : A link to the first page of resources. +type AddressPrefixCollectionFirst struct { + // The URL for a page of resources. + Href *string `json:"href" validate:"required"` +} + +// UnmarshalAddressPrefixCollectionFirst unmarshals an instance of AddressPrefixCollectionFirst from the specified map of raw messages. +func UnmarshalAddressPrefixCollectionFirst(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(AddressPrefixCollectionFirst) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// AddressPrefixCollectionNext : A link to the next page of resources. This property is present for all pages except the last page. +type AddressPrefixCollectionNext struct { + // The URL for a page of resources. + Href *string `json:"href" validate:"required"` +} + +// UnmarshalAddressPrefixCollectionNext unmarshals an instance of AddressPrefixCollectionNext from the specified map of raw messages. +func UnmarshalAddressPrefixCollectionNext(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(AddressPrefixCollectionNext) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// AddressPrefixPatch : AddressPrefixPatch struct +type AddressPrefixPatch struct { + // Indicates whether this is the default prefix for this zone in this VPC. Updating to true makes this prefix the + // default prefix for this zone in this VPC, provided the VPC currently has no default address prefix for this zone. + // Updating to false removes the default prefix for this zone in this VPC. + IsDefault *bool `json:"is_default,omitempty"` + + // The user-defined name for this address prefix. Names must be unique within the VPC the address prefix resides in. + Name *string `json:"name,omitempty"` +} + +// UnmarshalAddressPrefixPatch unmarshals an instance of AddressPrefixPatch from the specified map of raw messages. +func UnmarshalAddressPrefixPatch(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(AddressPrefixPatch) + err = core.UnmarshalPrimitive(m, "is_default", &obj.IsDefault) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// AsPatch returns a generic map representation of the AddressPrefixPatch +func (addressPrefixPatch *AddressPrefixPatch) AsPatch() (patch map[string]interface{}, err error) { + var jsonData []byte + jsonData, err = json.Marshal(addressPrefixPatch) + if err == nil { + err = json.Unmarshal(jsonData, &patch) + } + return +} + +// CertificateInstanceIdentity : Identifies a certificate instance by a unique property. +// Models which "extend" this model: +// - CertificateInstanceIdentityByCRN +type CertificateInstanceIdentity struct { + // The CRN for this certificate instance. + CRN *string `json:"crn,omitempty"` +} + +func (*CertificateInstanceIdentity) isaCertificateInstanceIdentity() bool { + return true +} + +type CertificateInstanceIdentityIntf interface { + isaCertificateInstanceIdentity() bool +} + +// UnmarshalCertificateInstanceIdentity unmarshals an instance of CertificateInstanceIdentity from the specified map of raw messages. +func UnmarshalCertificateInstanceIdentity(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(CertificateInstanceIdentity) + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// CertificateInstanceReference : CertificateInstanceReference struct +type CertificateInstanceReference struct { + // The CRN for this certificate instance. + CRN *string `json:"crn" validate:"required"` +} + +// UnmarshalCertificateInstanceReference unmarshals an instance of CertificateInstanceReference from the specified map of raw messages. +func UnmarshalCertificateInstanceReference(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(CertificateInstanceReference) + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// CheckVPNGatewayConnectionLocalCIDROptions : The CheckVPNGatewayConnectionLocalCIDR options. +type CheckVPNGatewayConnectionLocalCIDROptions struct { + // The VPN gateway identifier. + VPNGatewayID *string `validate:"required,ne="` + + // The VPN gateway connection identifier. + ID *string `validate:"required,ne="` + + // The address prefix part of the CIDR. + CIDRPrefix *string `validate:"required,ne="` + + // The prefix length part of the CIDR. + PrefixLength *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewCheckVPNGatewayConnectionLocalCIDROptions : Instantiate CheckVPNGatewayConnectionLocalCIDROptions +func (*VpcClassicV1) NewCheckVPNGatewayConnectionLocalCIDROptions(vpnGatewayID string, id string, cidrPrefix string, prefixLength string) *CheckVPNGatewayConnectionLocalCIDROptions { + return &CheckVPNGatewayConnectionLocalCIDROptions{ + VPNGatewayID: core.StringPtr(vpnGatewayID), + ID: core.StringPtr(id), + CIDRPrefix: core.StringPtr(cidrPrefix), + PrefixLength: core.StringPtr(prefixLength), + } +} + +// SetVPNGatewayID : Allow user to set VPNGatewayID +func (options *CheckVPNGatewayConnectionLocalCIDROptions) SetVPNGatewayID(vpnGatewayID string) *CheckVPNGatewayConnectionLocalCIDROptions { + options.VPNGatewayID = core.StringPtr(vpnGatewayID) + return options +} + +// SetID : Allow user to set ID +func (options *CheckVPNGatewayConnectionLocalCIDROptions) SetID(id string) *CheckVPNGatewayConnectionLocalCIDROptions { + options.ID = core.StringPtr(id) + return options +} + +// SetCIDRPrefix : Allow user to set CIDRPrefix +func (options *CheckVPNGatewayConnectionLocalCIDROptions) SetCIDRPrefix(cidrPrefix string) *CheckVPNGatewayConnectionLocalCIDROptions { + options.CIDRPrefix = core.StringPtr(cidrPrefix) + return options +} + +// SetPrefixLength : Allow user to set PrefixLength +func (options *CheckVPNGatewayConnectionLocalCIDROptions) SetPrefixLength(prefixLength string) *CheckVPNGatewayConnectionLocalCIDROptions { + options.PrefixLength = core.StringPtr(prefixLength) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CheckVPNGatewayConnectionLocalCIDROptions) SetHeaders(param map[string]string) *CheckVPNGatewayConnectionLocalCIDROptions { + options.Headers = param + return options +} + +// CheckVPNGatewayConnectionPeerCIDROptions : The CheckVPNGatewayConnectionPeerCIDR options. +type CheckVPNGatewayConnectionPeerCIDROptions struct { + // The VPN gateway identifier. + VPNGatewayID *string `validate:"required,ne="` + + // The VPN gateway connection identifier. + ID *string `validate:"required,ne="` + + // The address prefix part of the CIDR. + CIDRPrefix *string `validate:"required,ne="` + + // The prefix length part of the CIDR. + PrefixLength *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewCheckVPNGatewayConnectionPeerCIDROptions : Instantiate CheckVPNGatewayConnectionPeerCIDROptions +func (*VpcClassicV1) NewCheckVPNGatewayConnectionPeerCIDROptions(vpnGatewayID string, id string, cidrPrefix string, prefixLength string) *CheckVPNGatewayConnectionPeerCIDROptions { + return &CheckVPNGatewayConnectionPeerCIDROptions{ + VPNGatewayID: core.StringPtr(vpnGatewayID), + ID: core.StringPtr(id), + CIDRPrefix: core.StringPtr(cidrPrefix), + PrefixLength: core.StringPtr(prefixLength), + } +} + +// SetVPNGatewayID : Allow user to set VPNGatewayID +func (options *CheckVPNGatewayConnectionPeerCIDROptions) SetVPNGatewayID(vpnGatewayID string) *CheckVPNGatewayConnectionPeerCIDROptions { + options.VPNGatewayID = core.StringPtr(vpnGatewayID) + return options +} + +// SetID : Allow user to set ID +func (options *CheckVPNGatewayConnectionPeerCIDROptions) SetID(id string) *CheckVPNGatewayConnectionPeerCIDROptions { + options.ID = core.StringPtr(id) + return options +} + +// SetCIDRPrefix : Allow user to set CIDRPrefix +func (options *CheckVPNGatewayConnectionPeerCIDROptions) SetCIDRPrefix(cidrPrefix string) *CheckVPNGatewayConnectionPeerCIDROptions { + options.CIDRPrefix = core.StringPtr(cidrPrefix) + return options +} + +// SetPrefixLength : Allow user to set PrefixLength +func (options *CheckVPNGatewayConnectionPeerCIDROptions) SetPrefixLength(prefixLength string) *CheckVPNGatewayConnectionPeerCIDROptions { + options.PrefixLength = core.StringPtr(prefixLength) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CheckVPNGatewayConnectionPeerCIDROptions) SetHeaders(param map[string]string) *CheckVPNGatewayConnectionPeerCIDROptions { + options.Headers = param + return options +} + +// CreateFloatingIPOptions : The CreateFloatingIP options. +type CreateFloatingIPOptions struct { + // The floating IP prototype object. + FloatingIPPrototype FloatingIPPrototypeIntf `validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewCreateFloatingIPOptions : Instantiate CreateFloatingIPOptions +func (*VpcClassicV1) NewCreateFloatingIPOptions(floatingIPPrototype FloatingIPPrototypeIntf) *CreateFloatingIPOptions { + return &CreateFloatingIPOptions{ + FloatingIPPrototype: floatingIPPrototype, + } +} + +// SetFloatingIPPrototype : Allow user to set FloatingIPPrototype +func (options *CreateFloatingIPOptions) SetFloatingIPPrototype(floatingIPPrototype FloatingIPPrototypeIntf) *CreateFloatingIPOptions { + options.FloatingIPPrototype = floatingIPPrototype + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateFloatingIPOptions) SetHeaders(param map[string]string) *CreateFloatingIPOptions { + options.Headers = param + return options +} + +// CreateIkePolicyOptions : The CreateIkePolicy options. +type CreateIkePolicyOptions struct { + // The authentication algorithm. + AuthenticationAlgorithm *string `validate:"required"` + + // The Diffie-Hellman group. + DhGroup *int64 `validate:"required"` + + // The encryption algorithm. + EncryptionAlgorithm *string `validate:"required"` + + // The IKE protocol version. + IkeVersion *int64 `validate:"required"` + + // The key lifetime in seconds. + KeyLifetime *int64 + + // The user-defined name for this IKE policy. + Name *string + + // The resource group to use. If unspecified, the account's [default resource + // group](https://cloud.ibm.com/apidocs/resource-manager#introduction) is used. + ResourceGroup ResourceGroupIdentityIntf + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the CreateIkePolicyOptions.AuthenticationAlgorithm property. +// The authentication algorithm. +const ( + CreateIkePolicyOptionsAuthenticationAlgorithmMd5Const = "md5" + CreateIkePolicyOptionsAuthenticationAlgorithmSha1Const = "sha1" + CreateIkePolicyOptionsAuthenticationAlgorithmSha256Const = "sha256" +) + +// Constants associated with the CreateIkePolicyOptions.EncryptionAlgorithm property. +// The encryption algorithm. +const ( + CreateIkePolicyOptionsEncryptionAlgorithmAes128Const = "aes128" + CreateIkePolicyOptionsEncryptionAlgorithmAes256Const = "aes256" + CreateIkePolicyOptionsEncryptionAlgorithmTripleDesConst = "triple_des" +) + +// NewCreateIkePolicyOptions : Instantiate CreateIkePolicyOptions +func (*VpcClassicV1) NewCreateIkePolicyOptions(authenticationAlgorithm string, dhGroup int64, encryptionAlgorithm string, ikeVersion int64) *CreateIkePolicyOptions { + return &CreateIkePolicyOptions{ + AuthenticationAlgorithm: core.StringPtr(authenticationAlgorithm), + DhGroup: core.Int64Ptr(dhGroup), + EncryptionAlgorithm: core.StringPtr(encryptionAlgorithm), + IkeVersion: core.Int64Ptr(ikeVersion), + } +} + +// SetAuthenticationAlgorithm : Allow user to set AuthenticationAlgorithm +func (options *CreateIkePolicyOptions) SetAuthenticationAlgorithm(authenticationAlgorithm string) *CreateIkePolicyOptions { + options.AuthenticationAlgorithm = core.StringPtr(authenticationAlgorithm) + return options +} + +// SetDhGroup : Allow user to set DhGroup +func (options *CreateIkePolicyOptions) SetDhGroup(dhGroup int64) *CreateIkePolicyOptions { + options.DhGroup = core.Int64Ptr(dhGroup) + return options +} + +// SetEncryptionAlgorithm : Allow user to set EncryptionAlgorithm +func (options *CreateIkePolicyOptions) SetEncryptionAlgorithm(encryptionAlgorithm string) *CreateIkePolicyOptions { + options.EncryptionAlgorithm = core.StringPtr(encryptionAlgorithm) + return options +} + +// SetIkeVersion : Allow user to set IkeVersion +func (options *CreateIkePolicyOptions) SetIkeVersion(ikeVersion int64) *CreateIkePolicyOptions { + options.IkeVersion = core.Int64Ptr(ikeVersion) + return options +} + +// SetKeyLifetime : Allow user to set KeyLifetime +func (options *CreateIkePolicyOptions) SetKeyLifetime(keyLifetime int64) *CreateIkePolicyOptions { + options.KeyLifetime = core.Int64Ptr(keyLifetime) + return options +} + +// SetName : Allow user to set Name +func (options *CreateIkePolicyOptions) SetName(name string) *CreateIkePolicyOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetResourceGroup : Allow user to set ResourceGroup +func (options *CreateIkePolicyOptions) SetResourceGroup(resourceGroup ResourceGroupIdentityIntf) *CreateIkePolicyOptions { + options.ResourceGroup = resourceGroup + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateIkePolicyOptions) SetHeaders(param map[string]string) *CreateIkePolicyOptions { + options.Headers = param + return options +} + +// CreateImageOptions : The CreateImage options. +type CreateImageOptions struct { + // The image prototype object. + ImagePrototype ImagePrototypeIntf `validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewCreateImageOptions : Instantiate CreateImageOptions +func (*VpcClassicV1) NewCreateImageOptions(imagePrototype ImagePrototypeIntf) *CreateImageOptions { + return &CreateImageOptions{ + ImagePrototype: imagePrototype, + } +} + +// SetImagePrototype : Allow user to set ImagePrototype +func (options *CreateImageOptions) SetImagePrototype(imagePrototype ImagePrototypeIntf) *CreateImageOptions { + options.ImagePrototype = imagePrototype + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateImageOptions) SetHeaders(param map[string]string) *CreateImageOptions { + options.Headers = param + return options +} + +// CreateInstanceActionOptions : The CreateInstanceAction options. +type CreateInstanceActionOptions struct { + // The instance identifier. + InstanceID *string `validate:"required,ne="` + + // The type of action. + Type *string `validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the CreateInstanceActionOptions.Type property. +// The type of action. +const ( + CreateInstanceActionOptionsTypeRebootConst = "reboot" + CreateInstanceActionOptionsTypeResetConst = "reset" + CreateInstanceActionOptionsTypeStartConst = "start" + CreateInstanceActionOptionsTypeStopConst = "stop" +) + +// NewCreateInstanceActionOptions : Instantiate CreateInstanceActionOptions +func (*VpcClassicV1) NewCreateInstanceActionOptions(instanceID string, typeVar string) *CreateInstanceActionOptions { + return &CreateInstanceActionOptions{ + InstanceID: core.StringPtr(instanceID), + Type: core.StringPtr(typeVar), + } +} + +// SetInstanceID : Allow user to set InstanceID +func (options *CreateInstanceActionOptions) SetInstanceID(instanceID string) *CreateInstanceActionOptions { + options.InstanceID = core.StringPtr(instanceID) + return options +} + +// SetType : Allow user to set Type +func (options *CreateInstanceActionOptions) SetType(typeVar string) *CreateInstanceActionOptions { + options.Type = core.StringPtr(typeVar) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateInstanceActionOptions) SetHeaders(param map[string]string) *CreateInstanceActionOptions { + options.Headers = param + return options +} + +// CreateInstanceOptions : The CreateInstance options. +type CreateInstanceOptions struct { + // The instance prototype object. + InstancePrototype InstancePrototypeIntf `validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewCreateInstanceOptions : Instantiate CreateInstanceOptions +func (*VpcClassicV1) NewCreateInstanceOptions(instancePrototype InstancePrototypeIntf) *CreateInstanceOptions { + return &CreateInstanceOptions{ + InstancePrototype: instancePrototype, + } +} + +// SetInstancePrototype : Allow user to set InstancePrototype +func (options *CreateInstanceOptions) SetInstancePrototype(instancePrototype InstancePrototypeIntf) *CreateInstanceOptions { + options.InstancePrototype = instancePrototype + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateInstanceOptions) SetHeaders(param map[string]string) *CreateInstanceOptions { + options.Headers = param + return options +} + +// CreateInstanceVolumeAttachmentOptions : The CreateInstanceVolumeAttachment options. +type CreateInstanceVolumeAttachmentOptions struct { + // The instance identifier. + InstanceID *string `validate:"required,ne="` + + // The identity of the volume to attach to the instance. + Volume VolumeIdentityIntf `validate:"required"` + + // If set to true, when deleting the instance the volume will also be deleted. + DeleteVolumeOnInstanceDelete *bool + + // The user-defined name for this volume attachment. If unspecified, the name will be a hyphenated list of + // randomly-selected words. + Name *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewCreateInstanceVolumeAttachmentOptions : Instantiate CreateInstanceVolumeAttachmentOptions +func (*VpcClassicV1) NewCreateInstanceVolumeAttachmentOptions(instanceID string, volume VolumeIdentityIntf) *CreateInstanceVolumeAttachmentOptions { + return &CreateInstanceVolumeAttachmentOptions{ + InstanceID: core.StringPtr(instanceID), + Volume: volume, + } +} + +// SetInstanceID : Allow user to set InstanceID +func (options *CreateInstanceVolumeAttachmentOptions) SetInstanceID(instanceID string) *CreateInstanceVolumeAttachmentOptions { + options.InstanceID = core.StringPtr(instanceID) + return options +} + +// SetVolume : Allow user to set Volume +func (options *CreateInstanceVolumeAttachmentOptions) SetVolume(volume VolumeIdentityIntf) *CreateInstanceVolumeAttachmentOptions { + options.Volume = volume + return options +} + +// SetDeleteVolumeOnInstanceDelete : Allow user to set DeleteVolumeOnInstanceDelete +func (options *CreateInstanceVolumeAttachmentOptions) SetDeleteVolumeOnInstanceDelete(deleteVolumeOnInstanceDelete bool) *CreateInstanceVolumeAttachmentOptions { + options.DeleteVolumeOnInstanceDelete = core.BoolPtr(deleteVolumeOnInstanceDelete) + return options +} + +// SetName : Allow user to set Name +func (options *CreateInstanceVolumeAttachmentOptions) SetName(name string) *CreateInstanceVolumeAttachmentOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateInstanceVolumeAttachmentOptions) SetHeaders(param map[string]string) *CreateInstanceVolumeAttachmentOptions { + options.Headers = param + return options +} + +// CreateIpsecPolicyOptions : The CreateIpsecPolicy options. +type CreateIpsecPolicyOptions struct { + // The authentication algorithm. + AuthenticationAlgorithm *string `validate:"required"` + + // The encryption algorithm. + EncryptionAlgorithm *string `validate:"required"` + + // Perfect Forward Secrecy. + Pfs *string `validate:"required"` + + // The key lifetime in seconds. + KeyLifetime *int64 + + // The user-defined name for this IPsec policy. + Name *string + + // The resource group to use. If unspecified, the account's [default resource + // group](https://cloud.ibm.com/apidocs/resource-manager#introduction) is used. + ResourceGroup ResourceGroupIdentityIntf + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the CreateIpsecPolicyOptions.AuthenticationAlgorithm property. +// The authentication algorithm. +const ( + CreateIpsecPolicyOptionsAuthenticationAlgorithmMd5Const = "md5" + CreateIpsecPolicyOptionsAuthenticationAlgorithmSha1Const = "sha1" + CreateIpsecPolicyOptionsAuthenticationAlgorithmSha256Const = "sha256" +) + +// Constants associated with the CreateIpsecPolicyOptions.EncryptionAlgorithm property. +// The encryption algorithm. +const ( + CreateIpsecPolicyOptionsEncryptionAlgorithmAes128Const = "aes128" + CreateIpsecPolicyOptionsEncryptionAlgorithmAes256Const = "aes256" + CreateIpsecPolicyOptionsEncryptionAlgorithmTripleDesConst = "triple_des" +) + +// Constants associated with the CreateIpsecPolicyOptions.Pfs property. +// Perfect Forward Secrecy. +const ( + CreateIpsecPolicyOptionsPfsDisabledConst = "disabled" + CreateIpsecPolicyOptionsPfsGroup14Const = "group_14" + CreateIpsecPolicyOptionsPfsGroup2Const = "group_2" + CreateIpsecPolicyOptionsPfsGroup5Const = "group_5" +) + +// NewCreateIpsecPolicyOptions : Instantiate CreateIpsecPolicyOptions +func (*VpcClassicV1) NewCreateIpsecPolicyOptions(authenticationAlgorithm string, encryptionAlgorithm string, pfs string) *CreateIpsecPolicyOptions { + return &CreateIpsecPolicyOptions{ + AuthenticationAlgorithm: core.StringPtr(authenticationAlgorithm), + EncryptionAlgorithm: core.StringPtr(encryptionAlgorithm), + Pfs: core.StringPtr(pfs), + } +} + +// SetAuthenticationAlgorithm : Allow user to set AuthenticationAlgorithm +func (options *CreateIpsecPolicyOptions) SetAuthenticationAlgorithm(authenticationAlgorithm string) *CreateIpsecPolicyOptions { + options.AuthenticationAlgorithm = core.StringPtr(authenticationAlgorithm) + return options +} + +// SetEncryptionAlgorithm : Allow user to set EncryptionAlgorithm +func (options *CreateIpsecPolicyOptions) SetEncryptionAlgorithm(encryptionAlgorithm string) *CreateIpsecPolicyOptions { + options.EncryptionAlgorithm = core.StringPtr(encryptionAlgorithm) + return options +} + +// SetPfs : Allow user to set Pfs +func (options *CreateIpsecPolicyOptions) SetPfs(pfs string) *CreateIpsecPolicyOptions { + options.Pfs = core.StringPtr(pfs) + return options +} + +// SetKeyLifetime : Allow user to set KeyLifetime +func (options *CreateIpsecPolicyOptions) SetKeyLifetime(keyLifetime int64) *CreateIpsecPolicyOptions { + options.KeyLifetime = core.Int64Ptr(keyLifetime) + return options +} + +// SetName : Allow user to set Name +func (options *CreateIpsecPolicyOptions) SetName(name string) *CreateIpsecPolicyOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetResourceGroup : Allow user to set ResourceGroup +func (options *CreateIpsecPolicyOptions) SetResourceGroup(resourceGroup ResourceGroupIdentityIntf) *CreateIpsecPolicyOptions { + options.ResourceGroup = resourceGroup + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateIpsecPolicyOptions) SetHeaders(param map[string]string) *CreateIpsecPolicyOptions { + options.Headers = param + return options +} + +// CreateKeyOptions : The CreateKey options. +type CreateKeyOptions struct { + // A unique public SSH key to import, encoded in PEM format. The key (prior to encoding) must be either 2048 or 4096 + // bits long. + PublicKey *string `validate:"required"` + + // The user-defined name for this key. + Name *string + + // The resource group to use. If unspecified, the account's [default resource + // group](https://cloud.ibm.com/apidocs/resource-manager#introduction) is used. + ResourceGroup ResourceGroupIdentityIntf + + // The crypto-system used by this key. + Type *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the CreateKeyOptions.Type property. +// The crypto-system used by this key. +const ( + CreateKeyOptionsTypeRsaConst = "rsa" +) + +// NewCreateKeyOptions : Instantiate CreateKeyOptions +func (*VpcClassicV1) NewCreateKeyOptions(publicKey string) *CreateKeyOptions { + return &CreateKeyOptions{ + PublicKey: core.StringPtr(publicKey), + } +} + +// SetPublicKey : Allow user to set PublicKey +func (options *CreateKeyOptions) SetPublicKey(publicKey string) *CreateKeyOptions { + options.PublicKey = core.StringPtr(publicKey) + return options +} + +// SetName : Allow user to set Name +func (options *CreateKeyOptions) SetName(name string) *CreateKeyOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetResourceGroup : Allow user to set ResourceGroup +func (options *CreateKeyOptions) SetResourceGroup(resourceGroup ResourceGroupIdentityIntf) *CreateKeyOptions { + options.ResourceGroup = resourceGroup + return options +} + +// SetType : Allow user to set Type +func (options *CreateKeyOptions) SetType(typeVar string) *CreateKeyOptions { + options.Type = core.StringPtr(typeVar) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateKeyOptions) SetHeaders(param map[string]string) *CreateKeyOptions { + options.Headers = param + return options +} + +// CreateLoadBalancerListenerOptions : The CreateLoadBalancerListener options. +type CreateLoadBalancerListenerOptions struct { + // The load balancer identifier. + LoadBalancerID *string `validate:"required,ne="` + + // The listener port number. Each listener in the load balancer must have a unique + // `port` and `protocol` combination. + Port *int64 `validate:"required"` + + // The listener protocol. + Protocol *string `validate:"required"` + + // If set to `true`, this listener will accept and forward PROXY protocol information. Supported by load balancers in + // the `application` family (otherwise always `false`). + AcceptProxyProtocol *bool + + // The certificate instance used for SSL termination. It is applicable only to `https` + // protocol. + CertificateInstance CertificateInstanceIdentityIntf + + // The connection limit of the listener. + ConnectionLimit *int64 + + // The default pool associated with the listener. The specified pool must: + // + // - Belong to this load balancer + // - Have the same `protocol` as this listener + // - Not already be the default pool for another listener. + DefaultPool LoadBalancerPoolIdentityIntf + + // An array of policies for this listener. + Policies []LoadBalancerListenerPolicyPrototype + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the CreateLoadBalancerListenerOptions.Protocol property. +// The listener protocol. +const ( + CreateLoadBalancerListenerOptionsProtocolHTTPConst = "http" + CreateLoadBalancerListenerOptionsProtocolHTTPSConst = "https" + CreateLoadBalancerListenerOptionsProtocolTCPConst = "tcp" +) + +// NewCreateLoadBalancerListenerOptions : Instantiate CreateLoadBalancerListenerOptions +func (*VpcClassicV1) NewCreateLoadBalancerListenerOptions(loadBalancerID string, port int64, protocol string) *CreateLoadBalancerListenerOptions { + return &CreateLoadBalancerListenerOptions{ + LoadBalancerID: core.StringPtr(loadBalancerID), + Port: core.Int64Ptr(port), + Protocol: core.StringPtr(protocol), + } +} + +// SetLoadBalancerID : Allow user to set LoadBalancerID +func (options *CreateLoadBalancerListenerOptions) SetLoadBalancerID(loadBalancerID string) *CreateLoadBalancerListenerOptions { + options.LoadBalancerID = core.StringPtr(loadBalancerID) + return options +} + +// SetPort : Allow user to set Port +func (options *CreateLoadBalancerListenerOptions) SetPort(port int64) *CreateLoadBalancerListenerOptions { + options.Port = core.Int64Ptr(port) + return options +} + +// SetProtocol : Allow user to set Protocol +func (options *CreateLoadBalancerListenerOptions) SetProtocol(protocol string) *CreateLoadBalancerListenerOptions { + options.Protocol = core.StringPtr(protocol) + return options +} + +// SetAcceptProxyProtocol : Allow user to set AcceptProxyProtocol +func (options *CreateLoadBalancerListenerOptions) SetAcceptProxyProtocol(acceptProxyProtocol bool) *CreateLoadBalancerListenerOptions { + options.AcceptProxyProtocol = core.BoolPtr(acceptProxyProtocol) + return options +} + +// SetCertificateInstance : Allow user to set CertificateInstance +func (options *CreateLoadBalancerListenerOptions) SetCertificateInstance(certificateInstance CertificateInstanceIdentityIntf) *CreateLoadBalancerListenerOptions { + options.CertificateInstance = certificateInstance + return options +} + +// SetConnectionLimit : Allow user to set ConnectionLimit +func (options *CreateLoadBalancerListenerOptions) SetConnectionLimit(connectionLimit int64) *CreateLoadBalancerListenerOptions { + options.ConnectionLimit = core.Int64Ptr(connectionLimit) + return options +} + +// SetDefaultPool : Allow user to set DefaultPool +func (options *CreateLoadBalancerListenerOptions) SetDefaultPool(defaultPool LoadBalancerPoolIdentityIntf) *CreateLoadBalancerListenerOptions { + options.DefaultPool = defaultPool + return options +} + +// SetPolicies : Allow user to set Policies +func (options *CreateLoadBalancerListenerOptions) SetPolicies(policies []LoadBalancerListenerPolicyPrototype) *CreateLoadBalancerListenerOptions { + options.Policies = policies + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateLoadBalancerListenerOptions) SetHeaders(param map[string]string) *CreateLoadBalancerListenerOptions { + options.Headers = param + return options +} + +// CreateLoadBalancerListenerPolicyOptions : The CreateLoadBalancerListenerPolicy options. +type CreateLoadBalancerListenerPolicyOptions struct { + // The load balancer identifier. + LoadBalancerID *string `validate:"required,ne="` + + // The listener identifier. + ListenerID *string `validate:"required,ne="` + + // The policy action. + Action *string `validate:"required"` + + // Priority of the policy. Lower value indicates higher priority. + Priority *int64 `validate:"required"` + + // The user-defined name for this policy. Names must be unique within the load balancer listener the policy resides in. + Name *string + + // An array of rules for this policy. + Rules []LoadBalancerListenerPolicyRulePrototype + + // When `action` is `forward`, `LoadBalancerPoolIdentity` is required to specify which + // pool the load balancer forwards the traffic to. When `action` is `redirect`, + // `LoadBalancerListenerPolicyRedirectURLPrototype` is required to specify the url and + // http status code used in the redirect response. + Target LoadBalancerListenerPolicyTargetPrototypeIntf + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the CreateLoadBalancerListenerPolicyOptions.Action property. +// The policy action. +const ( + CreateLoadBalancerListenerPolicyOptionsActionForwardConst = "forward" + CreateLoadBalancerListenerPolicyOptionsActionRedirectConst = "redirect" + CreateLoadBalancerListenerPolicyOptionsActionRejectConst = "reject" +) + +// NewCreateLoadBalancerListenerPolicyOptions : Instantiate CreateLoadBalancerListenerPolicyOptions +func (*VpcClassicV1) NewCreateLoadBalancerListenerPolicyOptions(loadBalancerID string, listenerID string, action string, priority int64) *CreateLoadBalancerListenerPolicyOptions { + return &CreateLoadBalancerListenerPolicyOptions{ + LoadBalancerID: core.StringPtr(loadBalancerID), + ListenerID: core.StringPtr(listenerID), + Action: core.StringPtr(action), + Priority: core.Int64Ptr(priority), + } +} + +// SetLoadBalancerID : Allow user to set LoadBalancerID +func (options *CreateLoadBalancerListenerPolicyOptions) SetLoadBalancerID(loadBalancerID string) *CreateLoadBalancerListenerPolicyOptions { + options.LoadBalancerID = core.StringPtr(loadBalancerID) + return options +} + +// SetListenerID : Allow user to set ListenerID +func (options *CreateLoadBalancerListenerPolicyOptions) SetListenerID(listenerID string) *CreateLoadBalancerListenerPolicyOptions { + options.ListenerID = core.StringPtr(listenerID) + return options +} + +// SetAction : Allow user to set Action +func (options *CreateLoadBalancerListenerPolicyOptions) SetAction(action string) *CreateLoadBalancerListenerPolicyOptions { + options.Action = core.StringPtr(action) + return options +} + +// SetPriority : Allow user to set Priority +func (options *CreateLoadBalancerListenerPolicyOptions) SetPriority(priority int64) *CreateLoadBalancerListenerPolicyOptions { + options.Priority = core.Int64Ptr(priority) + return options +} + +// SetName : Allow user to set Name +func (options *CreateLoadBalancerListenerPolicyOptions) SetName(name string) *CreateLoadBalancerListenerPolicyOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetRules : Allow user to set Rules +func (options *CreateLoadBalancerListenerPolicyOptions) SetRules(rules []LoadBalancerListenerPolicyRulePrototype) *CreateLoadBalancerListenerPolicyOptions { + options.Rules = rules + return options +} + +// SetTarget : Allow user to set Target +func (options *CreateLoadBalancerListenerPolicyOptions) SetTarget(target LoadBalancerListenerPolicyTargetPrototypeIntf) *CreateLoadBalancerListenerPolicyOptions { + options.Target = target + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateLoadBalancerListenerPolicyOptions) SetHeaders(param map[string]string) *CreateLoadBalancerListenerPolicyOptions { + options.Headers = param + return options +} + +// CreateLoadBalancerListenerPolicyRuleOptions : The CreateLoadBalancerListenerPolicyRule options. +type CreateLoadBalancerListenerPolicyRuleOptions struct { + // The load balancer identifier. + LoadBalancerID *string `validate:"required,ne="` + + // The listener identifier. + ListenerID *string `validate:"required,ne="` + + // The policy identifier. + PolicyID *string `validate:"required,ne="` + + // The condition of the rule. + Condition *string `validate:"required"` + + // The type of the rule. + Type *string `validate:"required"` + + // Value to be matched for rule condition. + Value *string `validate:"required"` + + // HTTP header field. This is only applicable to "header" rule type. + Field *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the CreateLoadBalancerListenerPolicyRuleOptions.Condition property. +// The condition of the rule. +const ( + CreateLoadBalancerListenerPolicyRuleOptionsConditionContainsConst = "contains" + CreateLoadBalancerListenerPolicyRuleOptionsConditionEqualsConst = "equals" + CreateLoadBalancerListenerPolicyRuleOptionsConditionMatchesRegexConst = "matches_regex" +) + +// Constants associated with the CreateLoadBalancerListenerPolicyRuleOptions.Type property. +// The type of the rule. +const ( + CreateLoadBalancerListenerPolicyRuleOptionsTypeHeaderConst = "header" + CreateLoadBalancerListenerPolicyRuleOptionsTypeHostnameConst = "hostname" + CreateLoadBalancerListenerPolicyRuleOptionsTypePathConst = "path" +) + +// NewCreateLoadBalancerListenerPolicyRuleOptions : Instantiate CreateLoadBalancerListenerPolicyRuleOptions +func (*VpcClassicV1) NewCreateLoadBalancerListenerPolicyRuleOptions(loadBalancerID string, listenerID string, policyID string, condition string, typeVar string, value string) *CreateLoadBalancerListenerPolicyRuleOptions { + return &CreateLoadBalancerListenerPolicyRuleOptions{ + LoadBalancerID: core.StringPtr(loadBalancerID), + ListenerID: core.StringPtr(listenerID), + PolicyID: core.StringPtr(policyID), + Condition: core.StringPtr(condition), + Type: core.StringPtr(typeVar), + Value: core.StringPtr(value), + } +} + +// SetLoadBalancerID : Allow user to set LoadBalancerID +func (options *CreateLoadBalancerListenerPolicyRuleOptions) SetLoadBalancerID(loadBalancerID string) *CreateLoadBalancerListenerPolicyRuleOptions { + options.LoadBalancerID = core.StringPtr(loadBalancerID) + return options +} + +// SetListenerID : Allow user to set ListenerID +func (options *CreateLoadBalancerListenerPolicyRuleOptions) SetListenerID(listenerID string) *CreateLoadBalancerListenerPolicyRuleOptions { + options.ListenerID = core.StringPtr(listenerID) + return options +} + +// SetPolicyID : Allow user to set PolicyID +func (options *CreateLoadBalancerListenerPolicyRuleOptions) SetPolicyID(policyID string) *CreateLoadBalancerListenerPolicyRuleOptions { + options.PolicyID = core.StringPtr(policyID) + return options +} + +// SetCondition : Allow user to set Condition +func (options *CreateLoadBalancerListenerPolicyRuleOptions) SetCondition(condition string) *CreateLoadBalancerListenerPolicyRuleOptions { + options.Condition = core.StringPtr(condition) + return options +} + +// SetType : Allow user to set Type +func (options *CreateLoadBalancerListenerPolicyRuleOptions) SetType(typeVar string) *CreateLoadBalancerListenerPolicyRuleOptions { + options.Type = core.StringPtr(typeVar) + return options +} + +// SetValue : Allow user to set Value +func (options *CreateLoadBalancerListenerPolicyRuleOptions) SetValue(value string) *CreateLoadBalancerListenerPolicyRuleOptions { + options.Value = core.StringPtr(value) + return options +} + +// SetField : Allow user to set Field +func (options *CreateLoadBalancerListenerPolicyRuleOptions) SetField(field string) *CreateLoadBalancerListenerPolicyRuleOptions { + options.Field = core.StringPtr(field) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateLoadBalancerListenerPolicyRuleOptions) SetHeaders(param map[string]string) *CreateLoadBalancerListenerPolicyRuleOptions { + options.Headers = param + return options +} + +// CreateLoadBalancerOptions : The CreateLoadBalancer options. +type CreateLoadBalancerOptions struct { + // Indicates whether this load balancer is public or private. + IsPublic *bool `validate:"required"` + + // The subnets to provision this load balancer. + Subnets []SubnetIdentityIntf `validate:"required"` + + // The listeners of this load balancer. + Listeners []LoadBalancerListenerPrototypeLoadBalancerContext + + // The logging configuration to use for this load balancer. See [VPC Datapath + // Logging](https://cloud.ibm.com/docs/vpc?topic=vpc-datapath-logging) + // on the logging format, fields and permitted values. + // + // To activate logging, the load balancer profile must support the specified logging + // type. + Logging *LoadBalancerLogging + + // The user-defined name for this load balancer. If unspecified, the name will be a hyphenated list of + // randomly-selected words. + Name *string + + // The pools of this load balancer. + Pools []LoadBalancerPoolPrototype + + // The resource group to use. If unspecified, the account's [default resource + // group](https://cloud.ibm.com/apidocs/resource-manager#introduction) is used. + ResourceGroup ResourceGroupIdentityIntf + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewCreateLoadBalancerOptions : Instantiate CreateLoadBalancerOptions +func (*VpcClassicV1) NewCreateLoadBalancerOptions(isPublic bool, subnets []SubnetIdentityIntf) *CreateLoadBalancerOptions { + return &CreateLoadBalancerOptions{ + IsPublic: core.BoolPtr(isPublic), + Subnets: subnets, + } +} + +// SetIsPublic : Allow user to set IsPublic +func (options *CreateLoadBalancerOptions) SetIsPublic(isPublic bool) *CreateLoadBalancerOptions { + options.IsPublic = core.BoolPtr(isPublic) + return options +} + +// SetSubnets : Allow user to set Subnets +func (options *CreateLoadBalancerOptions) SetSubnets(subnets []SubnetIdentityIntf) *CreateLoadBalancerOptions { + options.Subnets = subnets + return options +} + +// SetListeners : Allow user to set Listeners +func (options *CreateLoadBalancerOptions) SetListeners(listeners []LoadBalancerListenerPrototypeLoadBalancerContext) *CreateLoadBalancerOptions { + options.Listeners = listeners + return options +} + +// SetLogging : Allow user to set Logging +func (options *CreateLoadBalancerOptions) SetLogging(logging *LoadBalancerLogging) *CreateLoadBalancerOptions { + options.Logging = logging + return options +} + +// SetName : Allow user to set Name +func (options *CreateLoadBalancerOptions) SetName(name string) *CreateLoadBalancerOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetPools : Allow user to set Pools +func (options *CreateLoadBalancerOptions) SetPools(pools []LoadBalancerPoolPrototype) *CreateLoadBalancerOptions { + options.Pools = pools + return options +} + +// SetResourceGroup : Allow user to set ResourceGroup +func (options *CreateLoadBalancerOptions) SetResourceGroup(resourceGroup ResourceGroupIdentityIntf) *CreateLoadBalancerOptions { + options.ResourceGroup = resourceGroup + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateLoadBalancerOptions) SetHeaders(param map[string]string) *CreateLoadBalancerOptions { + options.Headers = param + return options +} + +// CreateLoadBalancerPoolMemberOptions : The CreateLoadBalancerPoolMember options. +type CreateLoadBalancerPoolMemberOptions struct { + // The load balancer identifier. + LoadBalancerID *string `validate:"required,ne="` + + // The pool identifier. + PoolID *string `validate:"required,ne="` + + // The port number of the application running in the server member. + Port *int64 `validate:"required"` + + // The pool member target. + Target LoadBalancerPoolMemberTargetPrototypeIntf `validate:"required"` + + // Weight of the server member. Applicable only if the pool algorithm is + // `weighted_round_robin`. + Weight *int64 + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewCreateLoadBalancerPoolMemberOptions : Instantiate CreateLoadBalancerPoolMemberOptions +func (*VpcClassicV1) NewCreateLoadBalancerPoolMemberOptions(loadBalancerID string, poolID string, port int64, target LoadBalancerPoolMemberTargetPrototypeIntf) *CreateLoadBalancerPoolMemberOptions { + return &CreateLoadBalancerPoolMemberOptions{ + LoadBalancerID: core.StringPtr(loadBalancerID), + PoolID: core.StringPtr(poolID), + Port: core.Int64Ptr(port), + Target: target, + } +} + +// SetLoadBalancerID : Allow user to set LoadBalancerID +func (options *CreateLoadBalancerPoolMemberOptions) SetLoadBalancerID(loadBalancerID string) *CreateLoadBalancerPoolMemberOptions { + options.LoadBalancerID = core.StringPtr(loadBalancerID) + return options +} + +// SetPoolID : Allow user to set PoolID +func (options *CreateLoadBalancerPoolMemberOptions) SetPoolID(poolID string) *CreateLoadBalancerPoolMemberOptions { + options.PoolID = core.StringPtr(poolID) + return options +} + +// SetPort : Allow user to set Port +func (options *CreateLoadBalancerPoolMemberOptions) SetPort(port int64) *CreateLoadBalancerPoolMemberOptions { + options.Port = core.Int64Ptr(port) + return options +} + +// SetTarget : Allow user to set Target +func (options *CreateLoadBalancerPoolMemberOptions) SetTarget(target LoadBalancerPoolMemberTargetPrototypeIntf) *CreateLoadBalancerPoolMemberOptions { + options.Target = target + return options +} + +// SetWeight : Allow user to set Weight +func (options *CreateLoadBalancerPoolMemberOptions) SetWeight(weight int64) *CreateLoadBalancerPoolMemberOptions { + options.Weight = core.Int64Ptr(weight) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateLoadBalancerPoolMemberOptions) SetHeaders(param map[string]string) *CreateLoadBalancerPoolMemberOptions { + options.Headers = param + return options +} + +// CreateLoadBalancerPoolOptions : The CreateLoadBalancerPool options. +type CreateLoadBalancerPoolOptions struct { + // The load balancer identifier. + LoadBalancerID *string `validate:"required,ne="` + + // The load balancing algorithm. + Algorithm *string `validate:"required"` + + // The health monitor of this pool. + HealthMonitor *LoadBalancerPoolHealthMonitorPrototype `validate:"required"` + + // The protocol used for this load balancer pool. Load balancers in the `network` family support `tcp`. Load balancers + // in the `application` family support `tcp`, `http`, and + // `https`. + Protocol *string `validate:"required"` + + // The members for this load balancer pool. For load balancers in the `network` family, the same `port` and `target` + // tuple cannot be shared by a pool member of any other load balancer in the same VPC. + Members []LoadBalancerPoolMemberPrototype + + // The user-defined name for this load balancer pool. If unspecified, the name will be a hyphenated list of + // randomly-selected words. + Name *string + + // The PROXY protocol setting for this pool: + // - `v1`: Enabled with version 1 (human-readable header format) + // - `v2`: Enabled with version 2 (binary header format) + // - `disabled`: Disabled + // + // Supported by load balancers in the `application` family (otherwise always `disabled`). + ProxyProtocol *string + + // The session persistence of this pool. + SessionPersistence *LoadBalancerPoolSessionPersistencePrototype + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the CreateLoadBalancerPoolOptions.Algorithm property. +// The load balancing algorithm. +const ( + CreateLoadBalancerPoolOptionsAlgorithmLeastConnectionsConst = "least_connections" + CreateLoadBalancerPoolOptionsAlgorithmRoundRobinConst = "round_robin" + CreateLoadBalancerPoolOptionsAlgorithmWeightedRoundRobinConst = "weighted_round_robin" +) + +// Constants associated with the CreateLoadBalancerPoolOptions.Protocol property. +// The protocol used for this load balancer pool. Load balancers in the `network` family support `tcp`. Load balancers +// in the `application` family support `tcp`, `http`, and +// `https`. +const ( + CreateLoadBalancerPoolOptionsProtocolHTTPConst = "http" + CreateLoadBalancerPoolOptionsProtocolHTTPSConst = "https" + CreateLoadBalancerPoolOptionsProtocolTCPConst = "tcp" +) + +// Constants associated with the CreateLoadBalancerPoolOptions.ProxyProtocol property. +// The PROXY protocol setting for this pool: +// - `v1`: Enabled with version 1 (human-readable header format) +// - `v2`: Enabled with version 2 (binary header format) +// - `disabled`: Disabled +// +// Supported by load balancers in the `application` family (otherwise always `disabled`). +const ( + CreateLoadBalancerPoolOptionsProxyProtocolDisabledConst = "disabled" + CreateLoadBalancerPoolOptionsProxyProtocolV1Const = "v1" + CreateLoadBalancerPoolOptionsProxyProtocolV2Const = "v2" +) + +// NewCreateLoadBalancerPoolOptions : Instantiate CreateLoadBalancerPoolOptions +func (*VpcClassicV1) NewCreateLoadBalancerPoolOptions(loadBalancerID string, algorithm string, healthMonitor *LoadBalancerPoolHealthMonitorPrototype, protocol string) *CreateLoadBalancerPoolOptions { + return &CreateLoadBalancerPoolOptions{ + LoadBalancerID: core.StringPtr(loadBalancerID), + Algorithm: core.StringPtr(algorithm), + HealthMonitor: healthMonitor, + Protocol: core.StringPtr(protocol), + } +} + +// SetLoadBalancerID : Allow user to set LoadBalancerID +func (options *CreateLoadBalancerPoolOptions) SetLoadBalancerID(loadBalancerID string) *CreateLoadBalancerPoolOptions { + options.LoadBalancerID = core.StringPtr(loadBalancerID) + return options +} + +// SetAlgorithm : Allow user to set Algorithm +func (options *CreateLoadBalancerPoolOptions) SetAlgorithm(algorithm string) *CreateLoadBalancerPoolOptions { + options.Algorithm = core.StringPtr(algorithm) + return options +} + +// SetHealthMonitor : Allow user to set HealthMonitor +func (options *CreateLoadBalancerPoolOptions) SetHealthMonitor(healthMonitor *LoadBalancerPoolHealthMonitorPrototype) *CreateLoadBalancerPoolOptions { + options.HealthMonitor = healthMonitor + return options +} + +// SetProtocol : Allow user to set Protocol +func (options *CreateLoadBalancerPoolOptions) SetProtocol(protocol string) *CreateLoadBalancerPoolOptions { + options.Protocol = core.StringPtr(protocol) + return options +} + +// SetMembers : Allow user to set Members +func (options *CreateLoadBalancerPoolOptions) SetMembers(members []LoadBalancerPoolMemberPrototype) *CreateLoadBalancerPoolOptions { + options.Members = members + return options +} + +// SetName : Allow user to set Name +func (options *CreateLoadBalancerPoolOptions) SetName(name string) *CreateLoadBalancerPoolOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetProxyProtocol : Allow user to set ProxyProtocol +func (options *CreateLoadBalancerPoolOptions) SetProxyProtocol(proxyProtocol string) *CreateLoadBalancerPoolOptions { + options.ProxyProtocol = core.StringPtr(proxyProtocol) + return options +} + +// SetSessionPersistence : Allow user to set SessionPersistence +func (options *CreateLoadBalancerPoolOptions) SetSessionPersistence(sessionPersistence *LoadBalancerPoolSessionPersistencePrototype) *CreateLoadBalancerPoolOptions { + options.SessionPersistence = sessionPersistence + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateLoadBalancerPoolOptions) SetHeaders(param map[string]string) *CreateLoadBalancerPoolOptions { + options.Headers = param + return options +} + +// CreateNetworkACLOptions : The CreateNetworkACL options. +type CreateNetworkACLOptions struct { + // The network ACL prototype object. + NetworkACLPrototype NetworkACLPrototypeIntf + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewCreateNetworkACLOptions : Instantiate CreateNetworkACLOptions +func (*VpcClassicV1) NewCreateNetworkACLOptions() *CreateNetworkACLOptions { + return &CreateNetworkACLOptions{} +} + +// SetNetworkACLPrototype : Allow user to set NetworkACLPrototype +func (options *CreateNetworkACLOptions) SetNetworkACLPrototype(networkACLPrototype NetworkACLPrototypeIntf) *CreateNetworkACLOptions { + options.NetworkACLPrototype = networkACLPrototype + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateNetworkACLOptions) SetHeaders(param map[string]string) *CreateNetworkACLOptions { + options.Headers = param + return options +} + +// CreateNetworkACLRuleOptions : The CreateNetworkACLRule options. +type CreateNetworkACLRuleOptions struct { + // The network ACL identifier. + NetworkACLID *string `validate:"required,ne="` + + // The network ACL rule prototype object. + NetworkACLRulePrototype NetworkACLRulePrototypeIntf `validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewCreateNetworkACLRuleOptions : Instantiate CreateNetworkACLRuleOptions +func (*VpcClassicV1) NewCreateNetworkACLRuleOptions(networkACLID string, networkACLRulePrototype NetworkACLRulePrototypeIntf) *CreateNetworkACLRuleOptions { + return &CreateNetworkACLRuleOptions{ + NetworkACLID: core.StringPtr(networkACLID), + NetworkACLRulePrototype: networkACLRulePrototype, + } +} + +// SetNetworkACLID : Allow user to set NetworkACLID +func (options *CreateNetworkACLRuleOptions) SetNetworkACLID(networkACLID string) *CreateNetworkACLRuleOptions { + options.NetworkACLID = core.StringPtr(networkACLID) + return options +} + +// SetNetworkACLRulePrototype : Allow user to set NetworkACLRulePrototype +func (options *CreateNetworkACLRuleOptions) SetNetworkACLRulePrototype(networkACLRulePrototype NetworkACLRulePrototypeIntf) *CreateNetworkACLRuleOptions { + options.NetworkACLRulePrototype = networkACLRulePrototype + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateNetworkACLRuleOptions) SetHeaders(param map[string]string) *CreateNetworkACLRuleOptions { + options.Headers = param + return options +} + +// CreatePublicGatewayOptions : The CreatePublicGateway options. +type CreatePublicGatewayOptions struct { + // The VPC this public gateway will serve. + VPC VPCIdentityIntf `validate:"required"` + + // The zone where this public gateway will be created. + Zone ZoneIdentityIntf `validate:"required"` + + FloatingIP PublicGatewayFloatingIPPrototypeIntf + + // The user-defined name for this public gateway. Names must be unique within the VPC the public gateway resides in. If + // unspecified, the name will be a hyphenated list of randomly-selected words. + Name *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewCreatePublicGatewayOptions : Instantiate CreatePublicGatewayOptions +func (*VpcClassicV1) NewCreatePublicGatewayOptions(vpc VPCIdentityIntf, zone ZoneIdentityIntf) *CreatePublicGatewayOptions { + return &CreatePublicGatewayOptions{ + VPC: vpc, + Zone: zone, + } +} + +// SetVPC : Allow user to set VPC +func (options *CreatePublicGatewayOptions) SetVPC(vpc VPCIdentityIntf) *CreatePublicGatewayOptions { + options.VPC = vpc + return options +} + +// SetZone : Allow user to set Zone +func (options *CreatePublicGatewayOptions) SetZone(zone ZoneIdentityIntf) *CreatePublicGatewayOptions { + options.Zone = zone + return options +} + +// SetFloatingIP : Allow user to set FloatingIP +func (options *CreatePublicGatewayOptions) SetFloatingIP(floatingIP PublicGatewayFloatingIPPrototypeIntf) *CreatePublicGatewayOptions { + options.FloatingIP = floatingIP + return options +} + +// SetName : Allow user to set Name +func (options *CreatePublicGatewayOptions) SetName(name string) *CreatePublicGatewayOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreatePublicGatewayOptions) SetHeaders(param map[string]string) *CreatePublicGatewayOptions { + options.Headers = param + return options +} + +// CreateSecurityGroupOptions : The CreateSecurityGroup options. +type CreateSecurityGroupOptions struct { + // The VPC this security group is to be a part of. + VPC VPCIdentityIntf `validate:"required"` + + // The user-defined name for this security group. If unspecified, the name will be a hyphenated list of + // randomly-selected words. Security group names must be unique, within the scope of an account. + Name *string + + // The resource group to use. If unspecified, the account's [default resource + // group](https://cloud.ibm.com/apidocs/resource-manager#introduction) is used. + ResourceGroup ResourceGroupIdentityIntf + + // Array of rule prototype objects for rules to be created for this security group. If unspecified, no rules will be + // created, resulting in all traffic being denied. + Rules []SecurityGroupRulePrototypeIntf + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewCreateSecurityGroupOptions : Instantiate CreateSecurityGroupOptions +func (*VpcClassicV1) NewCreateSecurityGroupOptions(vpc VPCIdentityIntf) *CreateSecurityGroupOptions { + return &CreateSecurityGroupOptions{ + VPC: vpc, + } +} + +// SetVPC : Allow user to set VPC +func (options *CreateSecurityGroupOptions) SetVPC(vpc VPCIdentityIntf) *CreateSecurityGroupOptions { + options.VPC = vpc + return options +} + +// SetName : Allow user to set Name +func (options *CreateSecurityGroupOptions) SetName(name string) *CreateSecurityGroupOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetResourceGroup : Allow user to set ResourceGroup +func (options *CreateSecurityGroupOptions) SetResourceGroup(resourceGroup ResourceGroupIdentityIntf) *CreateSecurityGroupOptions { + options.ResourceGroup = resourceGroup + return options +} + +// SetRules : Allow user to set Rules +func (options *CreateSecurityGroupOptions) SetRules(rules []SecurityGroupRulePrototypeIntf) *CreateSecurityGroupOptions { + options.Rules = rules + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateSecurityGroupOptions) SetHeaders(param map[string]string) *CreateSecurityGroupOptions { + options.Headers = param + return options +} + +// CreateSecurityGroupRuleOptions : The CreateSecurityGroupRule options. +type CreateSecurityGroupRuleOptions struct { + // The security group identifier. + SecurityGroupID *string `validate:"required,ne="` + + // The properties of the security group rule to be created. + SecurityGroupRulePrototype SecurityGroupRulePrototypeIntf `validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewCreateSecurityGroupRuleOptions : Instantiate CreateSecurityGroupRuleOptions +func (*VpcClassicV1) NewCreateSecurityGroupRuleOptions(securityGroupID string, securityGroupRulePrototype SecurityGroupRulePrototypeIntf) *CreateSecurityGroupRuleOptions { + return &CreateSecurityGroupRuleOptions{ + SecurityGroupID: core.StringPtr(securityGroupID), + SecurityGroupRulePrototype: securityGroupRulePrototype, + } +} + +// SetSecurityGroupID : Allow user to set SecurityGroupID +func (options *CreateSecurityGroupRuleOptions) SetSecurityGroupID(securityGroupID string) *CreateSecurityGroupRuleOptions { + options.SecurityGroupID = core.StringPtr(securityGroupID) + return options +} + +// SetSecurityGroupRulePrototype : Allow user to set SecurityGroupRulePrototype +func (options *CreateSecurityGroupRuleOptions) SetSecurityGroupRulePrototype(securityGroupRulePrototype SecurityGroupRulePrototypeIntf) *CreateSecurityGroupRuleOptions { + options.SecurityGroupRulePrototype = securityGroupRulePrototype + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateSecurityGroupRuleOptions) SetHeaders(param map[string]string) *CreateSecurityGroupRuleOptions { + options.Headers = param + return options +} + +// CreateSubnetOptions : The CreateSubnet options. +type CreateSubnetOptions struct { + // The subnet prototype object. + SubnetPrototype SubnetPrototypeIntf `validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewCreateSubnetOptions : Instantiate CreateSubnetOptions +func (*VpcClassicV1) NewCreateSubnetOptions(subnetPrototype SubnetPrototypeIntf) *CreateSubnetOptions { + return &CreateSubnetOptions{ + SubnetPrototype: subnetPrototype, + } +} + +// SetSubnetPrototype : Allow user to set SubnetPrototype +func (options *CreateSubnetOptions) SetSubnetPrototype(subnetPrototype SubnetPrototypeIntf) *CreateSubnetOptions { + options.SubnetPrototype = subnetPrototype + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateSubnetOptions) SetHeaders(param map[string]string) *CreateSubnetOptions { + options.Headers = param + return options +} + +// CreateVolumeOptions : The CreateVolume options. +type CreateVolumeOptions struct { + // The volume prototype object. + VolumePrototype VolumePrototypeIntf `validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewCreateVolumeOptions : Instantiate CreateVolumeOptions +func (*VpcClassicV1) NewCreateVolumeOptions(volumePrototype VolumePrototypeIntf) *CreateVolumeOptions { + return &CreateVolumeOptions{ + VolumePrototype: volumePrototype, + } +} + +// SetVolumePrototype : Allow user to set VolumePrototype +func (options *CreateVolumeOptions) SetVolumePrototype(volumePrototype VolumePrototypeIntf) *CreateVolumeOptions { + options.VolumePrototype = volumePrototype + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateVolumeOptions) SetHeaders(param map[string]string) *CreateVolumeOptions { + options.Headers = param + return options +} + +// CreateVPCAddressPrefixOptions : The CreateVPCAddressPrefix options. +type CreateVPCAddressPrefixOptions struct { + // The VPC identifier. + VPCID *string `validate:"required,ne="` + + // The IPv4 range of the address prefix, expressed in CIDR format. The request must not overlap with any existing + // address prefixes in the VPC or any of the following reserved address ranges: + // - `127.0.0.0/8` (IPv4 loopback addresses) + // - `161.26.0.0/16` (IBM services) + // - `166.8.0.0/14` (Cloud Service Endpoints) + // - `169.254.0.0/16` (IPv4 link-local addresses) + // - `224.0.0.0/4` (IPv4 multicast addresses) + // + // The prefix length of the address prefix's CIDR must be between `/9` (8,388,608 addresses) and `/29` (8 addresses). + CIDR *string `validate:"required"` + + // The zone this address prefix is to belong to. + Zone ZoneIdentityIntf `validate:"required"` + + // Indicates whether this is the default prefix for this zone in this VPC. If true, this prefix will become the default + // prefix for this zone in this VPC. This fails if the VPC currently has a default address prefix for this zone. + IsDefault *bool + + // The user-defined name for this address prefix. Names must be unique within the VPC the address prefix resides in. If + // unspecified, the name will be a hyphenated list of randomly-selected words. + Name *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewCreateVPCAddressPrefixOptions : Instantiate CreateVPCAddressPrefixOptions +func (*VpcClassicV1) NewCreateVPCAddressPrefixOptions(vpcID string, cidr string, zone ZoneIdentityIntf) *CreateVPCAddressPrefixOptions { + return &CreateVPCAddressPrefixOptions{ + VPCID: core.StringPtr(vpcID), + CIDR: core.StringPtr(cidr), + Zone: zone, + } +} + +// SetVPCID : Allow user to set VPCID +func (options *CreateVPCAddressPrefixOptions) SetVPCID(vpcID string) *CreateVPCAddressPrefixOptions { + options.VPCID = core.StringPtr(vpcID) + return options +} + +// SetCIDR : Allow user to set CIDR +func (options *CreateVPCAddressPrefixOptions) SetCIDR(cidr string) *CreateVPCAddressPrefixOptions { + options.CIDR = core.StringPtr(cidr) + return options +} + +// SetZone : Allow user to set Zone +func (options *CreateVPCAddressPrefixOptions) SetZone(zone ZoneIdentityIntf) *CreateVPCAddressPrefixOptions { + options.Zone = zone + return options +} + +// SetIsDefault : Allow user to set IsDefault +func (options *CreateVPCAddressPrefixOptions) SetIsDefault(isDefault bool) *CreateVPCAddressPrefixOptions { + options.IsDefault = core.BoolPtr(isDefault) + return options +} + +// SetName : Allow user to set Name +func (options *CreateVPCAddressPrefixOptions) SetName(name string) *CreateVPCAddressPrefixOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateVPCAddressPrefixOptions) SetHeaders(param map[string]string) *CreateVPCAddressPrefixOptions { + options.Headers = param + return options +} + +// CreateVPCOptions : The CreateVPC options. +type CreateVPCOptions struct { + // Indicates whether a default address prefix should be automatically created for each zone in this VPC. If `manual`, + // this VPC will be created with no default address prefixes. + AddressPrefixManagement *string + + // Indicates whether this VPC should be connected to Classic Infrastructure. If true, this VPC's resources will have + // private network connectivity to the account's Classic Infrastructure resources. Only one VPC, per region, may be + // connected in this way. This value is set at creation and subsequently immutable. + ClassicAccess *bool + + // The unique user-defined name for this VPC. If unspecified, the name will be a hyphenated list of randomly-selected + // words. + Name *string + + // The resource group to use. If unspecified, the account's [default resource + // group](https://cloud.ibm.com/apidocs/resource-manager#introduction) is used. + ResourceGroup ResourceGroupIdentityIntf + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the CreateVPCOptions.AddressPrefixManagement property. +// Indicates whether a default address prefix should be automatically created for each zone in this VPC. If `manual`, +// this VPC will be created with no default address prefixes. +const ( + CreateVPCOptionsAddressPrefixManagementAutoConst = "auto" + CreateVPCOptionsAddressPrefixManagementManualConst = "manual" +) + +// NewCreateVPCOptions : Instantiate CreateVPCOptions +func (*VpcClassicV1) NewCreateVPCOptions() *CreateVPCOptions { + return &CreateVPCOptions{} +} + +// SetAddressPrefixManagement : Allow user to set AddressPrefixManagement +func (options *CreateVPCOptions) SetAddressPrefixManagement(addressPrefixManagement string) *CreateVPCOptions { + options.AddressPrefixManagement = core.StringPtr(addressPrefixManagement) + return options +} + +// SetClassicAccess : Allow user to set ClassicAccess +func (options *CreateVPCOptions) SetClassicAccess(classicAccess bool) *CreateVPCOptions { + options.ClassicAccess = core.BoolPtr(classicAccess) + return options +} + +// SetName : Allow user to set Name +func (options *CreateVPCOptions) SetName(name string) *CreateVPCOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetResourceGroup : Allow user to set ResourceGroup +func (options *CreateVPCOptions) SetResourceGroup(resourceGroup ResourceGroupIdentityIntf) *CreateVPCOptions { + options.ResourceGroup = resourceGroup + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateVPCOptions) SetHeaders(param map[string]string) *CreateVPCOptions { + options.Headers = param + return options +} + +// CreateVPCRouteOptions : The CreateVPCRoute options. +type CreateVPCRouteOptions struct { + // The VPC identifier. + VPCID *string `validate:"required,ne="` + + // The destination of the route. Must not overlap with destinations for existing user-defined routes within the VPC. + Destination *string `validate:"required"` + + // The next hop that packets will be delivered to. + NextHop RouteNextHopPrototypeIntf `validate:"required"` + + // The zone to apply the route to. (Traffic from subnets in this zone will be + // subject to this route.). + Zone ZoneIdentityIntf `validate:"required"` + + // The user-defined name for this route. If unspecified, the name will be a hyphenated list of randomly-selected words. + // Names must be unique within the VPC routing table the route resides in. + Name *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewCreateVPCRouteOptions : Instantiate CreateVPCRouteOptions +func (*VpcClassicV1) NewCreateVPCRouteOptions(vpcID string, destination string, nextHop RouteNextHopPrototypeIntf, zone ZoneIdentityIntf) *CreateVPCRouteOptions { + return &CreateVPCRouteOptions{ + VPCID: core.StringPtr(vpcID), + Destination: core.StringPtr(destination), + NextHop: nextHop, + Zone: zone, + } +} + +// SetVPCID : Allow user to set VPCID +func (options *CreateVPCRouteOptions) SetVPCID(vpcID string) *CreateVPCRouteOptions { + options.VPCID = core.StringPtr(vpcID) + return options +} + +// SetDestination : Allow user to set Destination +func (options *CreateVPCRouteOptions) SetDestination(destination string) *CreateVPCRouteOptions { + options.Destination = core.StringPtr(destination) + return options +} + +// SetNextHop : Allow user to set NextHop +func (options *CreateVPCRouteOptions) SetNextHop(nextHop RouteNextHopPrototypeIntf) *CreateVPCRouteOptions { + options.NextHop = nextHop + return options +} + +// SetZone : Allow user to set Zone +func (options *CreateVPCRouteOptions) SetZone(zone ZoneIdentityIntf) *CreateVPCRouteOptions { + options.Zone = zone + return options +} + +// SetName : Allow user to set Name +func (options *CreateVPCRouteOptions) SetName(name string) *CreateVPCRouteOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateVPCRouteOptions) SetHeaders(param map[string]string) *CreateVPCRouteOptions { + options.Headers = param + return options +} + +// CreateVPNGatewayConnectionOptions : The CreateVPNGatewayConnection options. +type CreateVPNGatewayConnectionOptions struct { + // The VPN gateway identifier. + VPNGatewayID *string `validate:"required,ne="` + + // The VPN gateway connection prototype object. + VPNGatewayConnectionPrototype VPNGatewayConnectionPrototypeIntf `validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewCreateVPNGatewayConnectionOptions : Instantiate CreateVPNGatewayConnectionOptions +func (*VpcClassicV1) NewCreateVPNGatewayConnectionOptions(vpnGatewayID string, vpnGatewayConnectionPrototype VPNGatewayConnectionPrototypeIntf) *CreateVPNGatewayConnectionOptions { + return &CreateVPNGatewayConnectionOptions{ + VPNGatewayID: core.StringPtr(vpnGatewayID), + VPNGatewayConnectionPrototype: vpnGatewayConnectionPrototype, + } +} + +// SetVPNGatewayID : Allow user to set VPNGatewayID +func (options *CreateVPNGatewayConnectionOptions) SetVPNGatewayID(vpnGatewayID string) *CreateVPNGatewayConnectionOptions { + options.VPNGatewayID = core.StringPtr(vpnGatewayID) + return options +} + +// SetVPNGatewayConnectionPrototype : Allow user to set VPNGatewayConnectionPrototype +func (options *CreateVPNGatewayConnectionOptions) SetVPNGatewayConnectionPrototype(vpnGatewayConnectionPrototype VPNGatewayConnectionPrototypeIntf) *CreateVPNGatewayConnectionOptions { + options.VPNGatewayConnectionPrototype = vpnGatewayConnectionPrototype + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateVPNGatewayConnectionOptions) SetHeaders(param map[string]string) *CreateVPNGatewayConnectionOptions { + options.Headers = param + return options +} + +// CreateVPNGatewayOptions : The CreateVPNGateway options. +type CreateVPNGatewayOptions struct { + // The VPN gateway prototype object. + VPNGatewayPrototype VPNGatewayPrototypeIntf `validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewCreateVPNGatewayOptions : Instantiate CreateVPNGatewayOptions +func (*VpcClassicV1) NewCreateVPNGatewayOptions(vpnGatewayPrototype VPNGatewayPrototypeIntf) *CreateVPNGatewayOptions { + return &CreateVPNGatewayOptions{ + VPNGatewayPrototype: vpnGatewayPrototype, + } +} + +// SetVPNGatewayPrototype : Allow user to set VPNGatewayPrototype +func (options *CreateVPNGatewayOptions) SetVPNGatewayPrototype(vpnGatewayPrototype VPNGatewayPrototypeIntf) *CreateVPNGatewayOptions { + options.VPNGatewayPrototype = vpnGatewayPrototype + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateVPNGatewayOptions) SetHeaders(param map[string]string) *CreateVPNGatewayOptions { + options.Headers = param + return options +} + +// DefaultSecurityGroup : DefaultSecurityGroup struct +type DefaultSecurityGroup struct { + // The date and time that this security group was created. + CreatedAt *strfmt.DateTime `json:"created_at" validate:"required"` + + // The security group's CRN. + CRN *string `json:"crn" validate:"required"` + + // The security group's canonical URL. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this security group. + ID *string `json:"id" validate:"required"` + + // The name of the default security group created for a VPC. The name will be a hyphenated list of randomly-selected + // words at creation, but may be user-specified with a subsequent request. + Name *string `json:"name" validate:"required"` + + // Array of rules for the default security group for a VPC. Defaults to allowing all outbound traffic, and allowing all + // inbound traffic from other interfaces in the VPC's default security group. Rules in the default security group may + // be changed, added or removed. + Rules []SecurityGroupRuleIntf `json:"rules" validate:"required"` + + // The VPC this security group is a part of. + VPC *VPCReference `json:"vpc" validate:"required"` +} + +// UnmarshalDefaultSecurityGroup unmarshals an instance of DefaultSecurityGroup from the specified map of raw messages. +func UnmarshalDefaultSecurityGroup(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(DefaultSecurityGroup) + err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalModel(m, "rules", &obj.Rules, UnmarshalSecurityGroupRule) + if err != nil { + return + } + err = core.UnmarshalModel(m, "vpc", &obj.VPC, UnmarshalVPCReference) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// DeleteFloatingIPOptions : The DeleteFloatingIP options. +type DeleteFloatingIPOptions struct { + // The floating IP identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteFloatingIPOptions : Instantiate DeleteFloatingIPOptions +func (*VpcClassicV1) NewDeleteFloatingIPOptions(id string) *DeleteFloatingIPOptions { + return &DeleteFloatingIPOptions{ + ID: core.StringPtr(id), + } +} + +// SetID : Allow user to set ID +func (options *DeleteFloatingIPOptions) SetID(id string) *DeleteFloatingIPOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteFloatingIPOptions) SetHeaders(param map[string]string) *DeleteFloatingIPOptions { + options.Headers = param + return options +} + +// DeleteIkePolicyOptions : The DeleteIkePolicy options. +type DeleteIkePolicyOptions struct { + // The IKE policy identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteIkePolicyOptions : Instantiate DeleteIkePolicyOptions +func (*VpcClassicV1) NewDeleteIkePolicyOptions(id string) *DeleteIkePolicyOptions { + return &DeleteIkePolicyOptions{ + ID: core.StringPtr(id), + } +} + +// SetID : Allow user to set ID +func (options *DeleteIkePolicyOptions) SetID(id string) *DeleteIkePolicyOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteIkePolicyOptions) SetHeaders(param map[string]string) *DeleteIkePolicyOptions { + options.Headers = param + return options +} + +// DeleteImageOptions : The DeleteImage options. +type DeleteImageOptions struct { + // The image identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteImageOptions : Instantiate DeleteImageOptions +func (*VpcClassicV1) NewDeleteImageOptions(id string) *DeleteImageOptions { + return &DeleteImageOptions{ + ID: core.StringPtr(id), + } +} + +// SetID : Allow user to set ID +func (options *DeleteImageOptions) SetID(id string) *DeleteImageOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteImageOptions) SetHeaders(param map[string]string) *DeleteImageOptions { + options.Headers = param + return options +} + +// DeleteInstanceOptions : The DeleteInstance options. +type DeleteInstanceOptions struct { + // The instance identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteInstanceOptions : Instantiate DeleteInstanceOptions +func (*VpcClassicV1) NewDeleteInstanceOptions(id string) *DeleteInstanceOptions { + return &DeleteInstanceOptions{ + ID: core.StringPtr(id), + } +} + +// SetID : Allow user to set ID +func (options *DeleteInstanceOptions) SetID(id string) *DeleteInstanceOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteInstanceOptions) SetHeaders(param map[string]string) *DeleteInstanceOptions { + options.Headers = param + return options +} + +// DeleteInstanceVolumeAttachmentOptions : The DeleteInstanceVolumeAttachment options. +type DeleteInstanceVolumeAttachmentOptions struct { + // The instance identifier. + InstanceID *string `validate:"required,ne="` + + // The volume attachment identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteInstanceVolumeAttachmentOptions : Instantiate DeleteInstanceVolumeAttachmentOptions +func (*VpcClassicV1) NewDeleteInstanceVolumeAttachmentOptions(instanceID string, id string) *DeleteInstanceVolumeAttachmentOptions { + return &DeleteInstanceVolumeAttachmentOptions{ + InstanceID: core.StringPtr(instanceID), + ID: core.StringPtr(id), + } +} + +// SetInstanceID : Allow user to set InstanceID +func (options *DeleteInstanceVolumeAttachmentOptions) SetInstanceID(instanceID string) *DeleteInstanceVolumeAttachmentOptions { + options.InstanceID = core.StringPtr(instanceID) + return options +} + +// SetID : Allow user to set ID +func (options *DeleteInstanceVolumeAttachmentOptions) SetID(id string) *DeleteInstanceVolumeAttachmentOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteInstanceVolumeAttachmentOptions) SetHeaders(param map[string]string) *DeleteInstanceVolumeAttachmentOptions { + options.Headers = param + return options +} + +// DeleteIpsecPolicyOptions : The DeleteIpsecPolicy options. +type DeleteIpsecPolicyOptions struct { + // The IPsec policy identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteIpsecPolicyOptions : Instantiate DeleteIpsecPolicyOptions +func (*VpcClassicV1) NewDeleteIpsecPolicyOptions(id string) *DeleteIpsecPolicyOptions { + return &DeleteIpsecPolicyOptions{ + ID: core.StringPtr(id), + } +} + +// SetID : Allow user to set ID +func (options *DeleteIpsecPolicyOptions) SetID(id string) *DeleteIpsecPolicyOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteIpsecPolicyOptions) SetHeaders(param map[string]string) *DeleteIpsecPolicyOptions { + options.Headers = param + return options +} + +// DeleteKeyOptions : The DeleteKey options. +type DeleteKeyOptions struct { + // The key identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteKeyOptions : Instantiate DeleteKeyOptions +func (*VpcClassicV1) NewDeleteKeyOptions(id string) *DeleteKeyOptions { + return &DeleteKeyOptions{ + ID: core.StringPtr(id), + } +} + +// SetID : Allow user to set ID +func (options *DeleteKeyOptions) SetID(id string) *DeleteKeyOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteKeyOptions) SetHeaders(param map[string]string) *DeleteKeyOptions { + options.Headers = param + return options +} + +// DeleteLoadBalancerListenerOptions : The DeleteLoadBalancerListener options. +type DeleteLoadBalancerListenerOptions struct { + // The load balancer identifier. + LoadBalancerID *string `validate:"required,ne="` + + // The listener identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteLoadBalancerListenerOptions : Instantiate DeleteLoadBalancerListenerOptions +func (*VpcClassicV1) NewDeleteLoadBalancerListenerOptions(loadBalancerID string, id string) *DeleteLoadBalancerListenerOptions { + return &DeleteLoadBalancerListenerOptions{ + LoadBalancerID: core.StringPtr(loadBalancerID), + ID: core.StringPtr(id), + } +} + +// SetLoadBalancerID : Allow user to set LoadBalancerID +func (options *DeleteLoadBalancerListenerOptions) SetLoadBalancerID(loadBalancerID string) *DeleteLoadBalancerListenerOptions { + options.LoadBalancerID = core.StringPtr(loadBalancerID) + return options +} + +// SetID : Allow user to set ID +func (options *DeleteLoadBalancerListenerOptions) SetID(id string) *DeleteLoadBalancerListenerOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteLoadBalancerListenerOptions) SetHeaders(param map[string]string) *DeleteLoadBalancerListenerOptions { + options.Headers = param + return options +} + +// DeleteLoadBalancerListenerPolicyOptions : The DeleteLoadBalancerListenerPolicy options. +type DeleteLoadBalancerListenerPolicyOptions struct { + // The load balancer identifier. + LoadBalancerID *string `validate:"required,ne="` + + // The listener identifier. + ListenerID *string `validate:"required,ne="` + + // The policy identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteLoadBalancerListenerPolicyOptions : Instantiate DeleteLoadBalancerListenerPolicyOptions +func (*VpcClassicV1) NewDeleteLoadBalancerListenerPolicyOptions(loadBalancerID string, listenerID string, id string) *DeleteLoadBalancerListenerPolicyOptions { + return &DeleteLoadBalancerListenerPolicyOptions{ + LoadBalancerID: core.StringPtr(loadBalancerID), + ListenerID: core.StringPtr(listenerID), + ID: core.StringPtr(id), + } +} + +// SetLoadBalancerID : Allow user to set LoadBalancerID +func (options *DeleteLoadBalancerListenerPolicyOptions) SetLoadBalancerID(loadBalancerID string) *DeleteLoadBalancerListenerPolicyOptions { + options.LoadBalancerID = core.StringPtr(loadBalancerID) + return options +} + +// SetListenerID : Allow user to set ListenerID +func (options *DeleteLoadBalancerListenerPolicyOptions) SetListenerID(listenerID string) *DeleteLoadBalancerListenerPolicyOptions { + options.ListenerID = core.StringPtr(listenerID) + return options +} + +// SetID : Allow user to set ID +func (options *DeleteLoadBalancerListenerPolicyOptions) SetID(id string) *DeleteLoadBalancerListenerPolicyOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteLoadBalancerListenerPolicyOptions) SetHeaders(param map[string]string) *DeleteLoadBalancerListenerPolicyOptions { + options.Headers = param + return options +} + +// DeleteLoadBalancerListenerPolicyRuleOptions : The DeleteLoadBalancerListenerPolicyRule options. +type DeleteLoadBalancerListenerPolicyRuleOptions struct { + // The load balancer identifier. + LoadBalancerID *string `validate:"required,ne="` + + // The listener identifier. + ListenerID *string `validate:"required,ne="` + + // The policy identifier. + PolicyID *string `validate:"required,ne="` + + // The rule identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteLoadBalancerListenerPolicyRuleOptions : Instantiate DeleteLoadBalancerListenerPolicyRuleOptions +func (*VpcClassicV1) NewDeleteLoadBalancerListenerPolicyRuleOptions(loadBalancerID string, listenerID string, policyID string, id string) *DeleteLoadBalancerListenerPolicyRuleOptions { + return &DeleteLoadBalancerListenerPolicyRuleOptions{ + LoadBalancerID: core.StringPtr(loadBalancerID), + ListenerID: core.StringPtr(listenerID), + PolicyID: core.StringPtr(policyID), + ID: core.StringPtr(id), + } +} + +// SetLoadBalancerID : Allow user to set LoadBalancerID +func (options *DeleteLoadBalancerListenerPolicyRuleOptions) SetLoadBalancerID(loadBalancerID string) *DeleteLoadBalancerListenerPolicyRuleOptions { + options.LoadBalancerID = core.StringPtr(loadBalancerID) + return options +} + +// SetListenerID : Allow user to set ListenerID +func (options *DeleteLoadBalancerListenerPolicyRuleOptions) SetListenerID(listenerID string) *DeleteLoadBalancerListenerPolicyRuleOptions { + options.ListenerID = core.StringPtr(listenerID) + return options +} + +// SetPolicyID : Allow user to set PolicyID +func (options *DeleteLoadBalancerListenerPolicyRuleOptions) SetPolicyID(policyID string) *DeleteLoadBalancerListenerPolicyRuleOptions { + options.PolicyID = core.StringPtr(policyID) + return options +} + +// SetID : Allow user to set ID +func (options *DeleteLoadBalancerListenerPolicyRuleOptions) SetID(id string) *DeleteLoadBalancerListenerPolicyRuleOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteLoadBalancerListenerPolicyRuleOptions) SetHeaders(param map[string]string) *DeleteLoadBalancerListenerPolicyRuleOptions { + options.Headers = param + return options +} + +// DeleteLoadBalancerOptions : The DeleteLoadBalancer options. +type DeleteLoadBalancerOptions struct { + // The load balancer identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteLoadBalancerOptions : Instantiate DeleteLoadBalancerOptions +func (*VpcClassicV1) NewDeleteLoadBalancerOptions(id string) *DeleteLoadBalancerOptions { + return &DeleteLoadBalancerOptions{ + ID: core.StringPtr(id), + } +} + +// SetID : Allow user to set ID +func (options *DeleteLoadBalancerOptions) SetID(id string) *DeleteLoadBalancerOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteLoadBalancerOptions) SetHeaders(param map[string]string) *DeleteLoadBalancerOptions { + options.Headers = param + return options +} + +// DeleteLoadBalancerPoolMemberOptions : The DeleteLoadBalancerPoolMember options. +type DeleteLoadBalancerPoolMemberOptions struct { + // The load balancer identifier. + LoadBalancerID *string `validate:"required,ne="` + + // The pool identifier. + PoolID *string `validate:"required,ne="` + + // The member identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteLoadBalancerPoolMemberOptions : Instantiate DeleteLoadBalancerPoolMemberOptions +func (*VpcClassicV1) NewDeleteLoadBalancerPoolMemberOptions(loadBalancerID string, poolID string, id string) *DeleteLoadBalancerPoolMemberOptions { + return &DeleteLoadBalancerPoolMemberOptions{ + LoadBalancerID: core.StringPtr(loadBalancerID), + PoolID: core.StringPtr(poolID), + ID: core.StringPtr(id), + } +} + +// SetLoadBalancerID : Allow user to set LoadBalancerID +func (options *DeleteLoadBalancerPoolMemberOptions) SetLoadBalancerID(loadBalancerID string) *DeleteLoadBalancerPoolMemberOptions { + options.LoadBalancerID = core.StringPtr(loadBalancerID) + return options +} + +// SetPoolID : Allow user to set PoolID +func (options *DeleteLoadBalancerPoolMemberOptions) SetPoolID(poolID string) *DeleteLoadBalancerPoolMemberOptions { + options.PoolID = core.StringPtr(poolID) + return options +} + +// SetID : Allow user to set ID +func (options *DeleteLoadBalancerPoolMemberOptions) SetID(id string) *DeleteLoadBalancerPoolMemberOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteLoadBalancerPoolMemberOptions) SetHeaders(param map[string]string) *DeleteLoadBalancerPoolMemberOptions { + options.Headers = param + return options +} + +// DeleteLoadBalancerPoolOptions : The DeleteLoadBalancerPool options. +type DeleteLoadBalancerPoolOptions struct { + // The load balancer identifier. + LoadBalancerID *string `validate:"required,ne="` + + // The pool identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteLoadBalancerPoolOptions : Instantiate DeleteLoadBalancerPoolOptions +func (*VpcClassicV1) NewDeleteLoadBalancerPoolOptions(loadBalancerID string, id string) *DeleteLoadBalancerPoolOptions { + return &DeleteLoadBalancerPoolOptions{ + LoadBalancerID: core.StringPtr(loadBalancerID), + ID: core.StringPtr(id), + } +} + +// SetLoadBalancerID : Allow user to set LoadBalancerID +func (options *DeleteLoadBalancerPoolOptions) SetLoadBalancerID(loadBalancerID string) *DeleteLoadBalancerPoolOptions { + options.LoadBalancerID = core.StringPtr(loadBalancerID) + return options +} + +// SetID : Allow user to set ID +func (options *DeleteLoadBalancerPoolOptions) SetID(id string) *DeleteLoadBalancerPoolOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteLoadBalancerPoolOptions) SetHeaders(param map[string]string) *DeleteLoadBalancerPoolOptions { + options.Headers = param + return options +} + +// DeleteNetworkACLOptions : The DeleteNetworkACL options. +type DeleteNetworkACLOptions struct { + // The network ACL identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteNetworkACLOptions : Instantiate DeleteNetworkACLOptions +func (*VpcClassicV1) NewDeleteNetworkACLOptions(id string) *DeleteNetworkACLOptions { + return &DeleteNetworkACLOptions{ + ID: core.StringPtr(id), + } +} + +// SetID : Allow user to set ID +func (options *DeleteNetworkACLOptions) SetID(id string) *DeleteNetworkACLOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteNetworkACLOptions) SetHeaders(param map[string]string) *DeleteNetworkACLOptions { + options.Headers = param + return options +} + +// DeleteNetworkACLRuleOptions : The DeleteNetworkACLRule options. +type DeleteNetworkACLRuleOptions struct { + // The network ACL identifier. + NetworkACLID *string `validate:"required,ne="` + + // The rule identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteNetworkACLRuleOptions : Instantiate DeleteNetworkACLRuleOptions +func (*VpcClassicV1) NewDeleteNetworkACLRuleOptions(networkACLID string, id string) *DeleteNetworkACLRuleOptions { + return &DeleteNetworkACLRuleOptions{ + NetworkACLID: core.StringPtr(networkACLID), + ID: core.StringPtr(id), + } +} + +// SetNetworkACLID : Allow user to set NetworkACLID +func (options *DeleteNetworkACLRuleOptions) SetNetworkACLID(networkACLID string) *DeleteNetworkACLRuleOptions { + options.NetworkACLID = core.StringPtr(networkACLID) + return options +} + +// SetID : Allow user to set ID +func (options *DeleteNetworkACLRuleOptions) SetID(id string) *DeleteNetworkACLRuleOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteNetworkACLRuleOptions) SetHeaders(param map[string]string) *DeleteNetworkACLRuleOptions { + options.Headers = param + return options +} + +// DeletePublicGatewayOptions : The DeletePublicGateway options. +type DeletePublicGatewayOptions struct { + // The public gateway identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeletePublicGatewayOptions : Instantiate DeletePublicGatewayOptions +func (*VpcClassicV1) NewDeletePublicGatewayOptions(id string) *DeletePublicGatewayOptions { + return &DeletePublicGatewayOptions{ + ID: core.StringPtr(id), + } +} + +// SetID : Allow user to set ID +func (options *DeletePublicGatewayOptions) SetID(id string) *DeletePublicGatewayOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeletePublicGatewayOptions) SetHeaders(param map[string]string) *DeletePublicGatewayOptions { + options.Headers = param + return options +} + +// DeleteSecurityGroupOptions : The DeleteSecurityGroup options. +type DeleteSecurityGroupOptions struct { + // The security group identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteSecurityGroupOptions : Instantiate DeleteSecurityGroupOptions +func (*VpcClassicV1) NewDeleteSecurityGroupOptions(id string) *DeleteSecurityGroupOptions { + return &DeleteSecurityGroupOptions{ + ID: core.StringPtr(id), + } +} + +// SetID : Allow user to set ID +func (options *DeleteSecurityGroupOptions) SetID(id string) *DeleteSecurityGroupOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteSecurityGroupOptions) SetHeaders(param map[string]string) *DeleteSecurityGroupOptions { + options.Headers = param + return options +} + +// DeleteSecurityGroupRuleOptions : The DeleteSecurityGroupRule options. +type DeleteSecurityGroupRuleOptions struct { + // The security group identifier. + SecurityGroupID *string `validate:"required,ne="` + + // The rule identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteSecurityGroupRuleOptions : Instantiate DeleteSecurityGroupRuleOptions +func (*VpcClassicV1) NewDeleteSecurityGroupRuleOptions(securityGroupID string, id string) *DeleteSecurityGroupRuleOptions { + return &DeleteSecurityGroupRuleOptions{ + SecurityGroupID: core.StringPtr(securityGroupID), + ID: core.StringPtr(id), + } +} + +// SetSecurityGroupID : Allow user to set SecurityGroupID +func (options *DeleteSecurityGroupRuleOptions) SetSecurityGroupID(securityGroupID string) *DeleteSecurityGroupRuleOptions { + options.SecurityGroupID = core.StringPtr(securityGroupID) + return options +} + +// SetID : Allow user to set ID +func (options *DeleteSecurityGroupRuleOptions) SetID(id string) *DeleteSecurityGroupRuleOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteSecurityGroupRuleOptions) SetHeaders(param map[string]string) *DeleteSecurityGroupRuleOptions { + options.Headers = param + return options +} + +// DeleteSubnetOptions : The DeleteSubnet options. +type DeleteSubnetOptions struct { + // The subnet identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteSubnetOptions : Instantiate DeleteSubnetOptions +func (*VpcClassicV1) NewDeleteSubnetOptions(id string) *DeleteSubnetOptions { + return &DeleteSubnetOptions{ + ID: core.StringPtr(id), + } +} + +// SetID : Allow user to set ID +func (options *DeleteSubnetOptions) SetID(id string) *DeleteSubnetOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteSubnetOptions) SetHeaders(param map[string]string) *DeleteSubnetOptions { + options.Headers = param + return options +} + +// DeleteVolumeOptions : The DeleteVolume options. +type DeleteVolumeOptions struct { + // The volume identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteVolumeOptions : Instantiate DeleteVolumeOptions +func (*VpcClassicV1) NewDeleteVolumeOptions(id string) *DeleteVolumeOptions { + return &DeleteVolumeOptions{ + ID: core.StringPtr(id), + } +} + +// SetID : Allow user to set ID +func (options *DeleteVolumeOptions) SetID(id string) *DeleteVolumeOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteVolumeOptions) SetHeaders(param map[string]string) *DeleteVolumeOptions { + options.Headers = param + return options +} + +// DeleteVPCAddressPrefixOptions : The DeleteVPCAddressPrefix options. +type DeleteVPCAddressPrefixOptions struct { + // The VPC identifier. + VPCID *string `validate:"required,ne="` + + // The prefix identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteVPCAddressPrefixOptions : Instantiate DeleteVPCAddressPrefixOptions +func (*VpcClassicV1) NewDeleteVPCAddressPrefixOptions(vpcID string, id string) *DeleteVPCAddressPrefixOptions { + return &DeleteVPCAddressPrefixOptions{ + VPCID: core.StringPtr(vpcID), + ID: core.StringPtr(id), + } +} + +// SetVPCID : Allow user to set VPCID +func (options *DeleteVPCAddressPrefixOptions) SetVPCID(vpcID string) *DeleteVPCAddressPrefixOptions { + options.VPCID = core.StringPtr(vpcID) + return options +} + +// SetID : Allow user to set ID +func (options *DeleteVPCAddressPrefixOptions) SetID(id string) *DeleteVPCAddressPrefixOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteVPCAddressPrefixOptions) SetHeaders(param map[string]string) *DeleteVPCAddressPrefixOptions { + options.Headers = param + return options +} + +// DeleteVPCOptions : The DeleteVPC options. +type DeleteVPCOptions struct { + // The VPC identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteVPCOptions : Instantiate DeleteVPCOptions +func (*VpcClassicV1) NewDeleteVPCOptions(id string) *DeleteVPCOptions { + return &DeleteVPCOptions{ + ID: core.StringPtr(id), + } +} + +// SetID : Allow user to set ID +func (options *DeleteVPCOptions) SetID(id string) *DeleteVPCOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteVPCOptions) SetHeaders(param map[string]string) *DeleteVPCOptions { + options.Headers = param + return options +} + +// DeleteVPCRouteOptions : The DeleteVPCRoute options. +type DeleteVPCRouteOptions struct { + // The VPC identifier. + VPCID *string `validate:"required,ne="` + + // The route identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteVPCRouteOptions : Instantiate DeleteVPCRouteOptions +func (*VpcClassicV1) NewDeleteVPCRouteOptions(vpcID string, id string) *DeleteVPCRouteOptions { + return &DeleteVPCRouteOptions{ + VPCID: core.StringPtr(vpcID), + ID: core.StringPtr(id), + } +} + +// SetVPCID : Allow user to set VPCID +func (options *DeleteVPCRouteOptions) SetVPCID(vpcID string) *DeleteVPCRouteOptions { + options.VPCID = core.StringPtr(vpcID) + return options +} + +// SetID : Allow user to set ID +func (options *DeleteVPCRouteOptions) SetID(id string) *DeleteVPCRouteOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteVPCRouteOptions) SetHeaders(param map[string]string) *DeleteVPCRouteOptions { + options.Headers = param + return options +} + +// DeleteVPNGatewayConnectionOptions : The DeleteVPNGatewayConnection options. +type DeleteVPNGatewayConnectionOptions struct { + // The VPN gateway identifier. + VPNGatewayID *string `validate:"required,ne="` + + // The VPN gateway connection identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteVPNGatewayConnectionOptions : Instantiate DeleteVPNGatewayConnectionOptions +func (*VpcClassicV1) NewDeleteVPNGatewayConnectionOptions(vpnGatewayID string, id string) *DeleteVPNGatewayConnectionOptions { + return &DeleteVPNGatewayConnectionOptions{ + VPNGatewayID: core.StringPtr(vpnGatewayID), + ID: core.StringPtr(id), + } +} + +// SetVPNGatewayID : Allow user to set VPNGatewayID +func (options *DeleteVPNGatewayConnectionOptions) SetVPNGatewayID(vpnGatewayID string) *DeleteVPNGatewayConnectionOptions { + options.VPNGatewayID = core.StringPtr(vpnGatewayID) + return options +} + +// SetID : Allow user to set ID +func (options *DeleteVPNGatewayConnectionOptions) SetID(id string) *DeleteVPNGatewayConnectionOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteVPNGatewayConnectionOptions) SetHeaders(param map[string]string) *DeleteVPNGatewayConnectionOptions { + options.Headers = param + return options +} + +// DeleteVPNGatewayOptions : The DeleteVPNGateway options. +type DeleteVPNGatewayOptions struct { + // The VPN gateway identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteVPNGatewayOptions : Instantiate DeleteVPNGatewayOptions +func (*VpcClassicV1) NewDeleteVPNGatewayOptions(id string) *DeleteVPNGatewayOptions { + return &DeleteVPNGatewayOptions{ + ID: core.StringPtr(id), + } +} + +// SetID : Allow user to set ID +func (options *DeleteVPNGatewayOptions) SetID(id string) *DeleteVPNGatewayOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteVPNGatewayOptions) SetHeaders(param map[string]string) *DeleteVPNGatewayOptions { + options.Headers = param + return options +} + +// EncryptionKeyIdentity : Identifies an encryption key by a unique property. +// Models which "extend" this model: +// - EncryptionKeyIdentityByCRN +type EncryptionKeyIdentity struct { + // The CRN of the [Key Protect Root + // Key](https://cloud.ibm.com/docs/key-protect?topic=key-protect-getting-started-tutorial) or [Hyper Protect Crypto + // Service Root Key](https://cloud.ibm.com/docs/hs-crypto?topic=hs-crypto-get-started) for this resource. + CRN *string `json:"crn,omitempty"` +} + +func (*EncryptionKeyIdentity) isaEncryptionKeyIdentity() bool { + return true +} + +type EncryptionKeyIdentityIntf interface { + isaEncryptionKeyIdentity() bool +} + +// UnmarshalEncryptionKeyIdentity unmarshals an instance of EncryptionKeyIdentity from the specified map of raw messages. +func UnmarshalEncryptionKeyIdentity(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(EncryptionKeyIdentity) + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// EncryptionKeyReference : EncryptionKeyReference struct +type EncryptionKeyReference struct { + // The CRN of the [Key Protect Root + // Key](https://cloud.ibm.com/docs/key-protect?topic=key-protect-getting-started-tutorial) or [Hyper Protect Crypto + // Service Root Key](https://cloud.ibm.com/docs/hs-crypto?topic=hs-crypto-get-started) for this resource. + CRN *string `json:"crn" validate:"required"` +} + +// UnmarshalEncryptionKeyReference unmarshals an instance of EncryptionKeyReference from the specified map of raw messages. +func UnmarshalEncryptionKeyReference(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(EncryptionKeyReference) + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// FloatingIP : FloatingIP struct +type FloatingIP struct { + // The globally unique IP address. + Address *string `json:"address" validate:"required"` + + // The date and time that the floating IP was created. + CreatedAt *strfmt.DateTime `json:"created_at" validate:"required"` + + // The CRN for this floating IP. + CRN *string `json:"crn" validate:"required"` + + // The URL for this floating IP. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this floating IP. + ID *string `json:"id" validate:"required"` + + // The unique user-defined name for this floating IP. + Name *string `json:"name" validate:"required"` + + // The status of the floating IP. + Status *string `json:"status" validate:"required"` + + // The target of this floating IP. + Target FloatingIPTargetIntf `json:"target,omitempty"` + + // The zone the floating IP resides in. + Zone *ZoneReference `json:"zone" validate:"required"` +} + +// Constants associated with the FloatingIP.Status property. +// The status of the floating IP. +const ( + FloatingIPStatusAvailableConst = "available" + FloatingIPStatusDeletingConst = "deleting" + FloatingIPStatusFailedConst = "failed" + FloatingIPStatusPendingConst = "pending" +) + +// UnmarshalFloatingIP unmarshals an instance of FloatingIP from the specified map of raw messages. +func UnmarshalFloatingIP(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(FloatingIP) + err = core.UnmarshalPrimitive(m, "address", &obj.Address) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "status", &obj.Status) + if err != nil { + return + } + err = core.UnmarshalModel(m, "target", &obj.Target, UnmarshalFloatingIPTarget) + if err != nil { + return + } + err = core.UnmarshalModel(m, "zone", &obj.Zone, UnmarshalZoneReference) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// FloatingIPByTargetNetworkInterfaceIdentity : The network interface this floating IP is to be bound to. +// Models which "extend" this model: +// - FloatingIPByTargetNetworkInterfaceIdentityNetworkInterfaceIdentityByID +// - FloatingIPByTargetNetworkInterfaceIdentityNetworkInterfaceIdentityByHref +type FloatingIPByTargetNetworkInterfaceIdentity struct { + // The unique identifier for this network interface. + ID *string `json:"id,omitempty"` + + // The URL for this network interface. + Href *string `json:"href,omitempty"` +} + +func (*FloatingIPByTargetNetworkInterfaceIdentity) isaFloatingIPByTargetNetworkInterfaceIdentity() bool { + return true +} + +type FloatingIPByTargetNetworkInterfaceIdentityIntf interface { + isaFloatingIPByTargetNetworkInterfaceIdentity() bool +} + +// UnmarshalFloatingIPByTargetNetworkInterfaceIdentity unmarshals an instance of FloatingIPByTargetNetworkInterfaceIdentity from the specified map of raw messages. +func UnmarshalFloatingIPByTargetNetworkInterfaceIdentity(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(FloatingIPByTargetNetworkInterfaceIdentity) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// FloatingIPCollection : FloatingIPCollection struct +type FloatingIPCollection struct { + // A link to the first page of resources. + First *FloatingIPCollectionFirst `json:"first" validate:"required"` + + // Collection of floating IPs. + FloatingIps []FloatingIP `json:"floating_ips" validate:"required"` + + // The maximum number of resources that can be returned by the request. + Limit *int64 `json:"limit" validate:"required"` + + // A link to the next page of resources. This property is present for all pages + // except the last page. + Next *FloatingIPCollectionNext `json:"next,omitempty"` +} + +// UnmarshalFloatingIPCollection unmarshals an instance of FloatingIPCollection from the specified map of raw messages. +func UnmarshalFloatingIPCollection(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(FloatingIPCollection) + err = core.UnmarshalModel(m, "first", &obj.First, UnmarshalFloatingIPCollectionFirst) + if err != nil { + return + } + err = core.UnmarshalModel(m, "floating_ips", &obj.FloatingIps, UnmarshalFloatingIP) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "limit", &obj.Limit) + if err != nil { + return + } + err = core.UnmarshalModel(m, "next", &obj.Next, UnmarshalFloatingIPCollectionNext) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// FloatingIPCollectionFirst : A link to the first page of resources. +type FloatingIPCollectionFirst struct { + // The URL for a page of resources. + Href *string `json:"href" validate:"required"` +} + +// UnmarshalFloatingIPCollectionFirst unmarshals an instance of FloatingIPCollectionFirst from the specified map of raw messages. +func UnmarshalFloatingIPCollectionFirst(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(FloatingIPCollectionFirst) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// FloatingIPCollectionNext : A link to the next page of resources. This property is present for all pages except the last page. +type FloatingIPCollectionNext struct { + // The URL for a page of resources. + Href *string `json:"href" validate:"required"` +} + +// UnmarshalFloatingIPCollectionNext unmarshals an instance of FloatingIPCollectionNext from the specified map of raw messages. +func UnmarshalFloatingIPCollectionNext(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(FloatingIPCollectionNext) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// FloatingIPPatch : FloatingIPPatch struct +type FloatingIPPatch struct { + // The unique user-defined name for this floating IP. + Name *string `json:"name,omitempty"` + + // A new network interface to bind this floating IP to, replacing any existing binding. + // For this request to succeed, the existing floating IP must not be required by another + // resource, such as a public gateway. + Target FloatingIPPatchTargetNetworkInterfaceIdentityIntf `json:"target,omitempty"` +} + +// UnmarshalFloatingIPPatch unmarshals an instance of FloatingIPPatch from the specified map of raw messages. +func UnmarshalFloatingIPPatch(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(FloatingIPPatch) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalModel(m, "target", &obj.Target, UnmarshalFloatingIPPatchTargetNetworkInterfaceIdentity) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// AsPatch returns a generic map representation of the FloatingIPPatch +func (floatingIPPatch *FloatingIPPatch) AsPatch() (patch map[string]interface{}, err error) { + var jsonData []byte + jsonData, err = json.Marshal(floatingIPPatch) + if err == nil { + err = json.Unmarshal(jsonData, &patch) + } + return +} + +// FloatingIPPatchTargetNetworkInterfaceIdentity : A new network interface to bind this floating IP to, replacing any existing binding. For this request to succeed, the +// existing floating IP must not be required by another resource, such as a public gateway. +// Models which "extend" this model: +// - FloatingIPPatchTargetNetworkInterfaceIdentityNetworkInterfaceIdentityByID +// - FloatingIPPatchTargetNetworkInterfaceIdentityNetworkInterfaceIdentityByHref +type FloatingIPPatchTargetNetworkInterfaceIdentity struct { + // The unique identifier for this network interface. + ID *string `json:"id,omitempty"` + + // The URL for this network interface. + Href *string `json:"href,omitempty"` +} + +func (*FloatingIPPatchTargetNetworkInterfaceIdentity) isaFloatingIPPatchTargetNetworkInterfaceIdentity() bool { + return true +} + +type FloatingIPPatchTargetNetworkInterfaceIdentityIntf interface { + isaFloatingIPPatchTargetNetworkInterfaceIdentity() bool +} + +// UnmarshalFloatingIPPatchTargetNetworkInterfaceIdentity unmarshals an instance of FloatingIPPatchTargetNetworkInterfaceIdentity from the specified map of raw messages. +func UnmarshalFloatingIPPatchTargetNetworkInterfaceIdentity(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(FloatingIPPatchTargetNetworkInterfaceIdentity) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// FloatingIPPrototype : FloatingIPPrototype struct +// Models which "extend" this model: +// - FloatingIPPrototypeFloatingIPByZone +// - FloatingIPPrototypeFloatingIPByTarget +type FloatingIPPrototype struct { + // The unique user-defined name for this floating IP. If unspecified, the name will be a hyphenated list of + // randomly-selected words. + Name *string `json:"name,omitempty"` + + // The identity of the zone to provision a floating IP in. + Zone ZoneIdentityIntf `json:"zone,omitempty"` + + // The network interface this floating IP is to be bound to. + Target FloatingIPByTargetNetworkInterfaceIdentityIntf `json:"target,omitempty"` +} + +func (*FloatingIPPrototype) isaFloatingIPPrototype() bool { + return true +} + +type FloatingIPPrototypeIntf interface { + isaFloatingIPPrototype() bool +} + +// UnmarshalFloatingIPPrototype unmarshals an instance of FloatingIPPrototype from the specified map of raw messages. +func UnmarshalFloatingIPPrototype(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(FloatingIPPrototype) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalModel(m, "zone", &obj.Zone, UnmarshalZoneIdentity) + if err != nil { + return + } + err = core.UnmarshalModel(m, "target", &obj.Target, UnmarshalFloatingIPByTargetNetworkInterfaceIdentity) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// FloatingIPReference : FloatingIPReference struct +type FloatingIPReference struct { + // The globally unique IP address. + Address *string `json:"address" validate:"required"` + + // The CRN for this floating IP. + CRN *string `json:"crn" validate:"required"` + + // The URL for this floating IP. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this floating IP. + ID *string `json:"id" validate:"required"` + + // The unique user-defined name for this floating IP. + Name *string `json:"name" validate:"required"` +} + +// UnmarshalFloatingIPReference unmarshals an instance of FloatingIPReference from the specified map of raw messages. +func UnmarshalFloatingIPReference(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(FloatingIPReference) + err = core.UnmarshalPrimitive(m, "address", &obj.Address) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// FloatingIPTarget : The target of this floating IP. +// Models which "extend" this model: +// - FloatingIPTargetNetworkInterfaceReference +// - FloatingIPTargetPublicGatewayReference +type FloatingIPTarget struct { + // The URL for this network interface. + Href *string `json:"href,omitempty"` + + // The unique identifier for this network interface. + ID *string `json:"id,omitempty"` + + // The user-defined name for this network interface. + Name *string `json:"name,omitempty"` + + // The primary IPv4 address. + PrimaryIpv4Address *string `json:"primary_ipv4_address,omitempty"` + + // The resource type. + ResourceType *string `json:"resource_type,omitempty"` + + // The CRN for this public gateway. + CRN *string `json:"crn,omitempty"` +} + +// Constants associated with the FloatingIPTarget.ResourceType property. +// The resource type. +const ( + FloatingIPTargetResourceTypeNetworkInterfaceConst = "network_interface" +) + +func (*FloatingIPTarget) isaFloatingIPTarget() bool { + return true +} + +type FloatingIPTargetIntf interface { + isaFloatingIPTarget() bool +} + +// UnmarshalFloatingIPTarget unmarshals an instance of FloatingIPTarget from the specified map of raw messages. +func UnmarshalFloatingIPTarget(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(FloatingIPTarget) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "primary_ipv4_address", &obj.PrimaryIpv4Address) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "resource_type", &obj.ResourceType) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// FloatingIPUnpaginatedCollection : FloatingIPUnpaginatedCollection struct +type FloatingIPUnpaginatedCollection struct { + // Collection of floating IPs. + FloatingIps []FloatingIP `json:"floating_ips" validate:"required"` +} + +// UnmarshalFloatingIPUnpaginatedCollection unmarshals an instance of FloatingIPUnpaginatedCollection from the specified map of raw messages. +func UnmarshalFloatingIPUnpaginatedCollection(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(FloatingIPUnpaginatedCollection) + err = core.UnmarshalModel(m, "floating_ips", &obj.FloatingIps, UnmarshalFloatingIP) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// GetFloatingIPOptions : The GetFloatingIP options. +type GetFloatingIPOptions struct { + // The floating IP identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetFloatingIPOptions : Instantiate GetFloatingIPOptions +func (*VpcClassicV1) NewGetFloatingIPOptions(id string) *GetFloatingIPOptions { + return &GetFloatingIPOptions{ + ID: core.StringPtr(id), + } +} + +// SetID : Allow user to set ID +func (options *GetFloatingIPOptions) SetID(id string) *GetFloatingIPOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetFloatingIPOptions) SetHeaders(param map[string]string) *GetFloatingIPOptions { + options.Headers = param + return options +} + +// GetIkePolicyOptions : The GetIkePolicy options. +type GetIkePolicyOptions struct { + // The IKE policy identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetIkePolicyOptions : Instantiate GetIkePolicyOptions +func (*VpcClassicV1) NewGetIkePolicyOptions(id string) *GetIkePolicyOptions { + return &GetIkePolicyOptions{ + ID: core.StringPtr(id), + } +} + +// SetID : Allow user to set ID +func (options *GetIkePolicyOptions) SetID(id string) *GetIkePolicyOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetIkePolicyOptions) SetHeaders(param map[string]string) *GetIkePolicyOptions { + options.Headers = param + return options +} + +// GetImageOptions : The GetImage options. +type GetImageOptions struct { + // The image identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetImageOptions : Instantiate GetImageOptions +func (*VpcClassicV1) NewGetImageOptions(id string) *GetImageOptions { + return &GetImageOptions{ + ID: core.StringPtr(id), + } +} + +// SetID : Allow user to set ID +func (options *GetImageOptions) SetID(id string) *GetImageOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetImageOptions) SetHeaders(param map[string]string) *GetImageOptions { + options.Headers = param + return options +} + +// GetInstanceInitializationOptions : The GetInstanceInitialization options. +type GetInstanceInitializationOptions struct { + // The instance identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetInstanceInitializationOptions : Instantiate GetInstanceInitializationOptions +func (*VpcClassicV1) NewGetInstanceInitializationOptions(id string) *GetInstanceInitializationOptions { + return &GetInstanceInitializationOptions{ + ID: core.StringPtr(id), + } +} + +// SetID : Allow user to set ID +func (options *GetInstanceInitializationOptions) SetID(id string) *GetInstanceInitializationOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetInstanceInitializationOptions) SetHeaders(param map[string]string) *GetInstanceInitializationOptions { + options.Headers = param + return options +} + +// GetInstanceNetworkInterfaceFloatingIPOptions : The GetInstanceNetworkInterfaceFloatingIP options. +type GetInstanceNetworkInterfaceFloatingIPOptions struct { + // The instance identifier. + InstanceID *string `validate:"required,ne="` + + // The network interface identifier. + NetworkInterfaceID *string `validate:"required,ne="` + + // The floating IP identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetInstanceNetworkInterfaceFloatingIPOptions : Instantiate GetInstanceNetworkInterfaceFloatingIPOptions +func (*VpcClassicV1) NewGetInstanceNetworkInterfaceFloatingIPOptions(instanceID string, networkInterfaceID string, id string) *GetInstanceNetworkInterfaceFloatingIPOptions { + return &GetInstanceNetworkInterfaceFloatingIPOptions{ + InstanceID: core.StringPtr(instanceID), + NetworkInterfaceID: core.StringPtr(networkInterfaceID), + ID: core.StringPtr(id), + } +} + +// SetInstanceID : Allow user to set InstanceID +func (options *GetInstanceNetworkInterfaceFloatingIPOptions) SetInstanceID(instanceID string) *GetInstanceNetworkInterfaceFloatingIPOptions { + options.InstanceID = core.StringPtr(instanceID) + return options +} + +// SetNetworkInterfaceID : Allow user to set NetworkInterfaceID +func (options *GetInstanceNetworkInterfaceFloatingIPOptions) SetNetworkInterfaceID(networkInterfaceID string) *GetInstanceNetworkInterfaceFloatingIPOptions { + options.NetworkInterfaceID = core.StringPtr(networkInterfaceID) + return options +} + +// SetID : Allow user to set ID +func (options *GetInstanceNetworkInterfaceFloatingIPOptions) SetID(id string) *GetInstanceNetworkInterfaceFloatingIPOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetInstanceNetworkInterfaceFloatingIPOptions) SetHeaders(param map[string]string) *GetInstanceNetworkInterfaceFloatingIPOptions { + options.Headers = param + return options +} + +// GetInstanceNetworkInterfaceOptions : The GetInstanceNetworkInterface options. +type GetInstanceNetworkInterfaceOptions struct { + // The instance identifier. + InstanceID *string `validate:"required,ne="` + + // The network interface identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetInstanceNetworkInterfaceOptions : Instantiate GetInstanceNetworkInterfaceOptions +func (*VpcClassicV1) NewGetInstanceNetworkInterfaceOptions(instanceID string, id string) *GetInstanceNetworkInterfaceOptions { + return &GetInstanceNetworkInterfaceOptions{ + InstanceID: core.StringPtr(instanceID), + ID: core.StringPtr(id), + } +} + +// SetInstanceID : Allow user to set InstanceID +func (options *GetInstanceNetworkInterfaceOptions) SetInstanceID(instanceID string) *GetInstanceNetworkInterfaceOptions { + options.InstanceID = core.StringPtr(instanceID) + return options +} + +// SetID : Allow user to set ID +func (options *GetInstanceNetworkInterfaceOptions) SetID(id string) *GetInstanceNetworkInterfaceOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetInstanceNetworkInterfaceOptions) SetHeaders(param map[string]string) *GetInstanceNetworkInterfaceOptions { + options.Headers = param + return options +} + +// GetInstanceOptions : The GetInstance options. +type GetInstanceOptions struct { + // The instance identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetInstanceOptions : Instantiate GetInstanceOptions +func (*VpcClassicV1) NewGetInstanceOptions(id string) *GetInstanceOptions { + return &GetInstanceOptions{ + ID: core.StringPtr(id), + } +} + +// SetID : Allow user to set ID +func (options *GetInstanceOptions) SetID(id string) *GetInstanceOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetInstanceOptions) SetHeaders(param map[string]string) *GetInstanceOptions { + options.Headers = param + return options +} + +// GetInstanceProfileOptions : The GetInstanceProfile options. +type GetInstanceProfileOptions struct { + // The instance profile name. + Name *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetInstanceProfileOptions : Instantiate GetInstanceProfileOptions +func (*VpcClassicV1) NewGetInstanceProfileOptions(name string) *GetInstanceProfileOptions { + return &GetInstanceProfileOptions{ + Name: core.StringPtr(name), + } +} + +// SetName : Allow user to set Name +func (options *GetInstanceProfileOptions) SetName(name string) *GetInstanceProfileOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetInstanceProfileOptions) SetHeaders(param map[string]string) *GetInstanceProfileOptions { + options.Headers = param + return options +} + +// GetInstanceVolumeAttachmentOptions : The GetInstanceVolumeAttachment options. +type GetInstanceVolumeAttachmentOptions struct { + // The instance identifier. + InstanceID *string `validate:"required,ne="` + + // The volume attachment identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetInstanceVolumeAttachmentOptions : Instantiate GetInstanceVolumeAttachmentOptions +func (*VpcClassicV1) NewGetInstanceVolumeAttachmentOptions(instanceID string, id string) *GetInstanceVolumeAttachmentOptions { + return &GetInstanceVolumeAttachmentOptions{ + InstanceID: core.StringPtr(instanceID), + ID: core.StringPtr(id), + } +} + +// SetInstanceID : Allow user to set InstanceID +func (options *GetInstanceVolumeAttachmentOptions) SetInstanceID(instanceID string) *GetInstanceVolumeAttachmentOptions { + options.InstanceID = core.StringPtr(instanceID) + return options +} + +// SetID : Allow user to set ID +func (options *GetInstanceVolumeAttachmentOptions) SetID(id string) *GetInstanceVolumeAttachmentOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetInstanceVolumeAttachmentOptions) SetHeaders(param map[string]string) *GetInstanceVolumeAttachmentOptions { + options.Headers = param + return options +} + +// GetIpsecPolicyOptions : The GetIpsecPolicy options. +type GetIpsecPolicyOptions struct { + // The IPsec policy identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetIpsecPolicyOptions : Instantiate GetIpsecPolicyOptions +func (*VpcClassicV1) NewGetIpsecPolicyOptions(id string) *GetIpsecPolicyOptions { + return &GetIpsecPolicyOptions{ + ID: core.StringPtr(id), + } +} + +// SetID : Allow user to set ID +func (options *GetIpsecPolicyOptions) SetID(id string) *GetIpsecPolicyOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetIpsecPolicyOptions) SetHeaders(param map[string]string) *GetIpsecPolicyOptions { + options.Headers = param + return options +} + +// GetKeyOptions : The GetKey options. +type GetKeyOptions struct { + // The key identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetKeyOptions : Instantiate GetKeyOptions +func (*VpcClassicV1) NewGetKeyOptions(id string) *GetKeyOptions { + return &GetKeyOptions{ + ID: core.StringPtr(id), + } +} + +// SetID : Allow user to set ID +func (options *GetKeyOptions) SetID(id string) *GetKeyOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetKeyOptions) SetHeaders(param map[string]string) *GetKeyOptions { + options.Headers = param + return options +} + +// GetLoadBalancerListenerOptions : The GetLoadBalancerListener options. +type GetLoadBalancerListenerOptions struct { + // The load balancer identifier. + LoadBalancerID *string `validate:"required,ne="` + + // The listener identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetLoadBalancerListenerOptions : Instantiate GetLoadBalancerListenerOptions +func (*VpcClassicV1) NewGetLoadBalancerListenerOptions(loadBalancerID string, id string) *GetLoadBalancerListenerOptions { + return &GetLoadBalancerListenerOptions{ + LoadBalancerID: core.StringPtr(loadBalancerID), + ID: core.StringPtr(id), + } +} + +// SetLoadBalancerID : Allow user to set LoadBalancerID +func (options *GetLoadBalancerListenerOptions) SetLoadBalancerID(loadBalancerID string) *GetLoadBalancerListenerOptions { + options.LoadBalancerID = core.StringPtr(loadBalancerID) + return options +} + +// SetID : Allow user to set ID +func (options *GetLoadBalancerListenerOptions) SetID(id string) *GetLoadBalancerListenerOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetLoadBalancerListenerOptions) SetHeaders(param map[string]string) *GetLoadBalancerListenerOptions { + options.Headers = param + return options +} + +// GetLoadBalancerListenerPolicyOptions : The GetLoadBalancerListenerPolicy options. +type GetLoadBalancerListenerPolicyOptions struct { + // The load balancer identifier. + LoadBalancerID *string `validate:"required,ne="` + + // The listener identifier. + ListenerID *string `validate:"required,ne="` + + // The policy identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetLoadBalancerListenerPolicyOptions : Instantiate GetLoadBalancerListenerPolicyOptions +func (*VpcClassicV1) NewGetLoadBalancerListenerPolicyOptions(loadBalancerID string, listenerID string, id string) *GetLoadBalancerListenerPolicyOptions { + return &GetLoadBalancerListenerPolicyOptions{ + LoadBalancerID: core.StringPtr(loadBalancerID), + ListenerID: core.StringPtr(listenerID), + ID: core.StringPtr(id), + } +} + +// SetLoadBalancerID : Allow user to set LoadBalancerID +func (options *GetLoadBalancerListenerPolicyOptions) SetLoadBalancerID(loadBalancerID string) *GetLoadBalancerListenerPolicyOptions { + options.LoadBalancerID = core.StringPtr(loadBalancerID) + return options +} + +// SetListenerID : Allow user to set ListenerID +func (options *GetLoadBalancerListenerPolicyOptions) SetListenerID(listenerID string) *GetLoadBalancerListenerPolicyOptions { + options.ListenerID = core.StringPtr(listenerID) + return options +} + +// SetID : Allow user to set ID +func (options *GetLoadBalancerListenerPolicyOptions) SetID(id string) *GetLoadBalancerListenerPolicyOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetLoadBalancerListenerPolicyOptions) SetHeaders(param map[string]string) *GetLoadBalancerListenerPolicyOptions { + options.Headers = param + return options +} + +// GetLoadBalancerListenerPolicyRuleOptions : The GetLoadBalancerListenerPolicyRule options. +type GetLoadBalancerListenerPolicyRuleOptions struct { + // The load balancer identifier. + LoadBalancerID *string `validate:"required,ne="` + + // The listener identifier. + ListenerID *string `validate:"required,ne="` + + // The policy identifier. + PolicyID *string `validate:"required,ne="` + + // The rule identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetLoadBalancerListenerPolicyRuleOptions : Instantiate GetLoadBalancerListenerPolicyRuleOptions +func (*VpcClassicV1) NewGetLoadBalancerListenerPolicyRuleOptions(loadBalancerID string, listenerID string, policyID string, id string) *GetLoadBalancerListenerPolicyRuleOptions { + return &GetLoadBalancerListenerPolicyRuleOptions{ + LoadBalancerID: core.StringPtr(loadBalancerID), + ListenerID: core.StringPtr(listenerID), + PolicyID: core.StringPtr(policyID), + ID: core.StringPtr(id), + } +} + +// SetLoadBalancerID : Allow user to set LoadBalancerID +func (options *GetLoadBalancerListenerPolicyRuleOptions) SetLoadBalancerID(loadBalancerID string) *GetLoadBalancerListenerPolicyRuleOptions { + options.LoadBalancerID = core.StringPtr(loadBalancerID) + return options +} + +// SetListenerID : Allow user to set ListenerID +func (options *GetLoadBalancerListenerPolicyRuleOptions) SetListenerID(listenerID string) *GetLoadBalancerListenerPolicyRuleOptions { + options.ListenerID = core.StringPtr(listenerID) + return options +} + +// SetPolicyID : Allow user to set PolicyID +func (options *GetLoadBalancerListenerPolicyRuleOptions) SetPolicyID(policyID string) *GetLoadBalancerListenerPolicyRuleOptions { + options.PolicyID = core.StringPtr(policyID) + return options +} + +// SetID : Allow user to set ID +func (options *GetLoadBalancerListenerPolicyRuleOptions) SetID(id string) *GetLoadBalancerListenerPolicyRuleOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetLoadBalancerListenerPolicyRuleOptions) SetHeaders(param map[string]string) *GetLoadBalancerListenerPolicyRuleOptions { + options.Headers = param + return options +} + +// GetLoadBalancerOptions : The GetLoadBalancer options. +type GetLoadBalancerOptions struct { + // The load balancer identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetLoadBalancerOptions : Instantiate GetLoadBalancerOptions +func (*VpcClassicV1) NewGetLoadBalancerOptions(id string) *GetLoadBalancerOptions { + return &GetLoadBalancerOptions{ + ID: core.StringPtr(id), + } +} + +// SetID : Allow user to set ID +func (options *GetLoadBalancerOptions) SetID(id string) *GetLoadBalancerOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetLoadBalancerOptions) SetHeaders(param map[string]string) *GetLoadBalancerOptions { + options.Headers = param + return options +} + +// GetLoadBalancerPoolMemberOptions : The GetLoadBalancerPoolMember options. +type GetLoadBalancerPoolMemberOptions struct { + // The load balancer identifier. + LoadBalancerID *string `validate:"required,ne="` + + // The pool identifier. + PoolID *string `validate:"required,ne="` + + // The member identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetLoadBalancerPoolMemberOptions : Instantiate GetLoadBalancerPoolMemberOptions +func (*VpcClassicV1) NewGetLoadBalancerPoolMemberOptions(loadBalancerID string, poolID string, id string) *GetLoadBalancerPoolMemberOptions { + return &GetLoadBalancerPoolMemberOptions{ + LoadBalancerID: core.StringPtr(loadBalancerID), + PoolID: core.StringPtr(poolID), + ID: core.StringPtr(id), + } +} + +// SetLoadBalancerID : Allow user to set LoadBalancerID +func (options *GetLoadBalancerPoolMemberOptions) SetLoadBalancerID(loadBalancerID string) *GetLoadBalancerPoolMemberOptions { + options.LoadBalancerID = core.StringPtr(loadBalancerID) + return options +} + +// SetPoolID : Allow user to set PoolID +func (options *GetLoadBalancerPoolMemberOptions) SetPoolID(poolID string) *GetLoadBalancerPoolMemberOptions { + options.PoolID = core.StringPtr(poolID) + return options +} + +// SetID : Allow user to set ID +func (options *GetLoadBalancerPoolMemberOptions) SetID(id string) *GetLoadBalancerPoolMemberOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetLoadBalancerPoolMemberOptions) SetHeaders(param map[string]string) *GetLoadBalancerPoolMemberOptions { + options.Headers = param + return options +} + +// GetLoadBalancerPoolOptions : The GetLoadBalancerPool options. +type GetLoadBalancerPoolOptions struct { + // The load balancer identifier. + LoadBalancerID *string `validate:"required,ne="` + + // The pool identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetLoadBalancerPoolOptions : Instantiate GetLoadBalancerPoolOptions +func (*VpcClassicV1) NewGetLoadBalancerPoolOptions(loadBalancerID string, id string) *GetLoadBalancerPoolOptions { + return &GetLoadBalancerPoolOptions{ + LoadBalancerID: core.StringPtr(loadBalancerID), + ID: core.StringPtr(id), + } +} + +// SetLoadBalancerID : Allow user to set LoadBalancerID +func (options *GetLoadBalancerPoolOptions) SetLoadBalancerID(loadBalancerID string) *GetLoadBalancerPoolOptions { + options.LoadBalancerID = core.StringPtr(loadBalancerID) + return options +} + +// SetID : Allow user to set ID +func (options *GetLoadBalancerPoolOptions) SetID(id string) *GetLoadBalancerPoolOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetLoadBalancerPoolOptions) SetHeaders(param map[string]string) *GetLoadBalancerPoolOptions { + options.Headers = param + return options +} + +// GetLoadBalancerStatisticsOptions : The GetLoadBalancerStatistics options. +type GetLoadBalancerStatisticsOptions struct { + // The load balancer identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetLoadBalancerStatisticsOptions : Instantiate GetLoadBalancerStatisticsOptions +func (*VpcClassicV1) NewGetLoadBalancerStatisticsOptions(id string) *GetLoadBalancerStatisticsOptions { + return &GetLoadBalancerStatisticsOptions{ + ID: core.StringPtr(id), + } +} + +// SetID : Allow user to set ID +func (options *GetLoadBalancerStatisticsOptions) SetID(id string) *GetLoadBalancerStatisticsOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetLoadBalancerStatisticsOptions) SetHeaders(param map[string]string) *GetLoadBalancerStatisticsOptions { + options.Headers = param + return options +} + +// GetNetworkACLOptions : The GetNetworkACL options. +type GetNetworkACLOptions struct { + // The network ACL identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetNetworkACLOptions : Instantiate GetNetworkACLOptions +func (*VpcClassicV1) NewGetNetworkACLOptions(id string) *GetNetworkACLOptions { + return &GetNetworkACLOptions{ + ID: core.StringPtr(id), + } +} + +// SetID : Allow user to set ID +func (options *GetNetworkACLOptions) SetID(id string) *GetNetworkACLOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetNetworkACLOptions) SetHeaders(param map[string]string) *GetNetworkACLOptions { + options.Headers = param + return options +} + +// GetNetworkACLRuleOptions : The GetNetworkACLRule options. +type GetNetworkACLRuleOptions struct { + // The network ACL identifier. + NetworkACLID *string `validate:"required,ne="` + + // The rule identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetNetworkACLRuleOptions : Instantiate GetNetworkACLRuleOptions +func (*VpcClassicV1) NewGetNetworkACLRuleOptions(networkACLID string, id string) *GetNetworkACLRuleOptions { + return &GetNetworkACLRuleOptions{ + NetworkACLID: core.StringPtr(networkACLID), + ID: core.StringPtr(id), + } +} + +// SetNetworkACLID : Allow user to set NetworkACLID +func (options *GetNetworkACLRuleOptions) SetNetworkACLID(networkACLID string) *GetNetworkACLRuleOptions { + options.NetworkACLID = core.StringPtr(networkACLID) + return options +} + +// SetID : Allow user to set ID +func (options *GetNetworkACLRuleOptions) SetID(id string) *GetNetworkACLRuleOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetNetworkACLRuleOptions) SetHeaders(param map[string]string) *GetNetworkACLRuleOptions { + options.Headers = param + return options +} + +// GetOperatingSystemOptions : The GetOperatingSystem options. +type GetOperatingSystemOptions struct { + // The operating system name. + Name *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetOperatingSystemOptions : Instantiate GetOperatingSystemOptions +func (*VpcClassicV1) NewGetOperatingSystemOptions(name string) *GetOperatingSystemOptions { + return &GetOperatingSystemOptions{ + Name: core.StringPtr(name), + } +} + +// SetName : Allow user to set Name +func (options *GetOperatingSystemOptions) SetName(name string) *GetOperatingSystemOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetOperatingSystemOptions) SetHeaders(param map[string]string) *GetOperatingSystemOptions { + options.Headers = param + return options +} + +// GetPublicGatewayOptions : The GetPublicGateway options. +type GetPublicGatewayOptions struct { + // The public gateway identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetPublicGatewayOptions : Instantiate GetPublicGatewayOptions +func (*VpcClassicV1) NewGetPublicGatewayOptions(id string) *GetPublicGatewayOptions { + return &GetPublicGatewayOptions{ + ID: core.StringPtr(id), + } +} + +// SetID : Allow user to set ID +func (options *GetPublicGatewayOptions) SetID(id string) *GetPublicGatewayOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetPublicGatewayOptions) SetHeaders(param map[string]string) *GetPublicGatewayOptions { + options.Headers = param + return options +} + +// GetRegionOptions : The GetRegion options. +type GetRegionOptions struct { + // The region name. + Name *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetRegionOptions : Instantiate GetRegionOptions +func (*VpcClassicV1) NewGetRegionOptions(name string) *GetRegionOptions { + return &GetRegionOptions{ + Name: core.StringPtr(name), + } +} + +// SetName : Allow user to set Name +func (options *GetRegionOptions) SetName(name string) *GetRegionOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetRegionOptions) SetHeaders(param map[string]string) *GetRegionOptions { + options.Headers = param + return options +} + +// GetRegionZoneOptions : The GetRegionZone options. +type GetRegionZoneOptions struct { + // The region name. + RegionName *string `validate:"required,ne="` + + // The zone name. + Name *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetRegionZoneOptions : Instantiate GetRegionZoneOptions +func (*VpcClassicV1) NewGetRegionZoneOptions(regionName string, name string) *GetRegionZoneOptions { + return &GetRegionZoneOptions{ + RegionName: core.StringPtr(regionName), + Name: core.StringPtr(name), + } +} + +// SetRegionName : Allow user to set RegionName +func (options *GetRegionZoneOptions) SetRegionName(regionName string) *GetRegionZoneOptions { + options.RegionName = core.StringPtr(regionName) + return options +} + +// SetName : Allow user to set Name +func (options *GetRegionZoneOptions) SetName(name string) *GetRegionZoneOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetRegionZoneOptions) SetHeaders(param map[string]string) *GetRegionZoneOptions { + options.Headers = param + return options +} + +// GetSecurityGroupNetworkInterfaceOptions : The GetSecurityGroupNetworkInterface options. +type GetSecurityGroupNetworkInterfaceOptions struct { + // The security group identifier. + SecurityGroupID *string `validate:"required,ne="` + + // The network interface identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetSecurityGroupNetworkInterfaceOptions : Instantiate GetSecurityGroupNetworkInterfaceOptions +func (*VpcClassicV1) NewGetSecurityGroupNetworkInterfaceOptions(securityGroupID string, id string) *GetSecurityGroupNetworkInterfaceOptions { + return &GetSecurityGroupNetworkInterfaceOptions{ + SecurityGroupID: core.StringPtr(securityGroupID), + ID: core.StringPtr(id), + } +} + +// SetSecurityGroupID : Allow user to set SecurityGroupID +func (options *GetSecurityGroupNetworkInterfaceOptions) SetSecurityGroupID(securityGroupID string) *GetSecurityGroupNetworkInterfaceOptions { + options.SecurityGroupID = core.StringPtr(securityGroupID) + return options +} + +// SetID : Allow user to set ID +func (options *GetSecurityGroupNetworkInterfaceOptions) SetID(id string) *GetSecurityGroupNetworkInterfaceOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetSecurityGroupNetworkInterfaceOptions) SetHeaders(param map[string]string) *GetSecurityGroupNetworkInterfaceOptions { + options.Headers = param + return options +} + +// GetSecurityGroupOptions : The GetSecurityGroup options. +type GetSecurityGroupOptions struct { + // The security group identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetSecurityGroupOptions : Instantiate GetSecurityGroupOptions +func (*VpcClassicV1) NewGetSecurityGroupOptions(id string) *GetSecurityGroupOptions { + return &GetSecurityGroupOptions{ + ID: core.StringPtr(id), + } +} + +// SetID : Allow user to set ID +func (options *GetSecurityGroupOptions) SetID(id string) *GetSecurityGroupOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetSecurityGroupOptions) SetHeaders(param map[string]string) *GetSecurityGroupOptions { + options.Headers = param + return options +} + +// GetSecurityGroupRuleOptions : The GetSecurityGroupRule options. +type GetSecurityGroupRuleOptions struct { + // The security group identifier. + SecurityGroupID *string `validate:"required,ne="` + + // The rule identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetSecurityGroupRuleOptions : Instantiate GetSecurityGroupRuleOptions +func (*VpcClassicV1) NewGetSecurityGroupRuleOptions(securityGroupID string, id string) *GetSecurityGroupRuleOptions { + return &GetSecurityGroupRuleOptions{ + SecurityGroupID: core.StringPtr(securityGroupID), + ID: core.StringPtr(id), + } +} + +// SetSecurityGroupID : Allow user to set SecurityGroupID +func (options *GetSecurityGroupRuleOptions) SetSecurityGroupID(securityGroupID string) *GetSecurityGroupRuleOptions { + options.SecurityGroupID = core.StringPtr(securityGroupID) + return options +} + +// SetID : Allow user to set ID +func (options *GetSecurityGroupRuleOptions) SetID(id string) *GetSecurityGroupRuleOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetSecurityGroupRuleOptions) SetHeaders(param map[string]string) *GetSecurityGroupRuleOptions { + options.Headers = param + return options +} + +// GetSubnetNetworkACLOptions : The GetSubnetNetworkACL options. +type GetSubnetNetworkACLOptions struct { + // The subnet identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetSubnetNetworkACLOptions : Instantiate GetSubnetNetworkACLOptions +func (*VpcClassicV1) NewGetSubnetNetworkACLOptions(id string) *GetSubnetNetworkACLOptions { + return &GetSubnetNetworkACLOptions{ + ID: core.StringPtr(id), + } +} + +// SetID : Allow user to set ID +func (options *GetSubnetNetworkACLOptions) SetID(id string) *GetSubnetNetworkACLOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetSubnetNetworkACLOptions) SetHeaders(param map[string]string) *GetSubnetNetworkACLOptions { + options.Headers = param + return options +} + +// GetSubnetOptions : The GetSubnet options. +type GetSubnetOptions struct { + // The subnet identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetSubnetOptions : Instantiate GetSubnetOptions +func (*VpcClassicV1) NewGetSubnetOptions(id string) *GetSubnetOptions { + return &GetSubnetOptions{ + ID: core.StringPtr(id), + } +} + +// SetID : Allow user to set ID +func (options *GetSubnetOptions) SetID(id string) *GetSubnetOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetSubnetOptions) SetHeaders(param map[string]string) *GetSubnetOptions { + options.Headers = param + return options +} + +// GetSubnetPublicGatewayOptions : The GetSubnetPublicGateway options. +type GetSubnetPublicGatewayOptions struct { + // The subnet identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetSubnetPublicGatewayOptions : Instantiate GetSubnetPublicGatewayOptions +func (*VpcClassicV1) NewGetSubnetPublicGatewayOptions(id string) *GetSubnetPublicGatewayOptions { + return &GetSubnetPublicGatewayOptions{ + ID: core.StringPtr(id), + } +} + +// SetID : Allow user to set ID +func (options *GetSubnetPublicGatewayOptions) SetID(id string) *GetSubnetPublicGatewayOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetSubnetPublicGatewayOptions) SetHeaders(param map[string]string) *GetSubnetPublicGatewayOptions { + options.Headers = param + return options +} + +// GetVolumeOptions : The GetVolume options. +type GetVolumeOptions struct { + // The volume identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetVolumeOptions : Instantiate GetVolumeOptions +func (*VpcClassicV1) NewGetVolumeOptions(id string) *GetVolumeOptions { + return &GetVolumeOptions{ + ID: core.StringPtr(id), + } +} + +// SetID : Allow user to set ID +func (options *GetVolumeOptions) SetID(id string) *GetVolumeOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetVolumeOptions) SetHeaders(param map[string]string) *GetVolumeOptions { + options.Headers = param + return options +} + +// GetVolumeProfileOptions : The GetVolumeProfile options. +type GetVolumeProfileOptions struct { + // The volume profile name. + Name *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetVolumeProfileOptions : Instantiate GetVolumeProfileOptions +func (*VpcClassicV1) NewGetVolumeProfileOptions(name string) *GetVolumeProfileOptions { + return &GetVolumeProfileOptions{ + Name: core.StringPtr(name), + } +} + +// SetName : Allow user to set Name +func (options *GetVolumeProfileOptions) SetName(name string) *GetVolumeProfileOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetVolumeProfileOptions) SetHeaders(param map[string]string) *GetVolumeProfileOptions { + options.Headers = param + return options +} + +// GetVPCAddressPrefixOptions : The GetVPCAddressPrefix options. +type GetVPCAddressPrefixOptions struct { + // The VPC identifier. + VPCID *string `validate:"required,ne="` + + // The prefix identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetVPCAddressPrefixOptions : Instantiate GetVPCAddressPrefixOptions +func (*VpcClassicV1) NewGetVPCAddressPrefixOptions(vpcID string, id string) *GetVPCAddressPrefixOptions { + return &GetVPCAddressPrefixOptions{ + VPCID: core.StringPtr(vpcID), + ID: core.StringPtr(id), + } +} + +// SetVPCID : Allow user to set VPCID +func (options *GetVPCAddressPrefixOptions) SetVPCID(vpcID string) *GetVPCAddressPrefixOptions { + options.VPCID = core.StringPtr(vpcID) + return options +} + +// SetID : Allow user to set ID +func (options *GetVPCAddressPrefixOptions) SetID(id string) *GetVPCAddressPrefixOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetVPCAddressPrefixOptions) SetHeaders(param map[string]string) *GetVPCAddressPrefixOptions { + options.Headers = param + return options +} + +// GetVPCDefaultSecurityGroupOptions : The GetVPCDefaultSecurityGroup options. +type GetVPCDefaultSecurityGroupOptions struct { + // The VPC identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetVPCDefaultSecurityGroupOptions : Instantiate GetVPCDefaultSecurityGroupOptions +func (*VpcClassicV1) NewGetVPCDefaultSecurityGroupOptions(id string) *GetVPCDefaultSecurityGroupOptions { + return &GetVPCDefaultSecurityGroupOptions{ + ID: core.StringPtr(id), + } +} + +// SetID : Allow user to set ID +func (options *GetVPCDefaultSecurityGroupOptions) SetID(id string) *GetVPCDefaultSecurityGroupOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetVPCDefaultSecurityGroupOptions) SetHeaders(param map[string]string) *GetVPCDefaultSecurityGroupOptions { + options.Headers = param + return options +} + +// GetVPCOptions : The GetVPC options. +type GetVPCOptions struct { + // The VPC identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetVPCOptions : Instantiate GetVPCOptions +func (*VpcClassicV1) NewGetVPCOptions(id string) *GetVPCOptions { + return &GetVPCOptions{ + ID: core.StringPtr(id), + } +} + +// SetID : Allow user to set ID +func (options *GetVPCOptions) SetID(id string) *GetVPCOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetVPCOptions) SetHeaders(param map[string]string) *GetVPCOptions { + options.Headers = param + return options +} + +// GetVPCRouteOptions : The GetVPCRoute options. +type GetVPCRouteOptions struct { + // The VPC identifier. + VPCID *string `validate:"required,ne="` + + // The route identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetVPCRouteOptions : Instantiate GetVPCRouteOptions +func (*VpcClassicV1) NewGetVPCRouteOptions(vpcID string, id string) *GetVPCRouteOptions { + return &GetVPCRouteOptions{ + VPCID: core.StringPtr(vpcID), + ID: core.StringPtr(id), + } +} + +// SetVPCID : Allow user to set VPCID +func (options *GetVPCRouteOptions) SetVPCID(vpcID string) *GetVPCRouteOptions { + options.VPCID = core.StringPtr(vpcID) + return options +} + +// SetID : Allow user to set ID +func (options *GetVPCRouteOptions) SetID(id string) *GetVPCRouteOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetVPCRouteOptions) SetHeaders(param map[string]string) *GetVPCRouteOptions { + options.Headers = param + return options +} + +// GetVPNGatewayConnectionOptions : The GetVPNGatewayConnection options. +type GetVPNGatewayConnectionOptions struct { + // The VPN gateway identifier. + VPNGatewayID *string `validate:"required,ne="` + + // The VPN gateway connection identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetVPNGatewayConnectionOptions : Instantiate GetVPNGatewayConnectionOptions +func (*VpcClassicV1) NewGetVPNGatewayConnectionOptions(vpnGatewayID string, id string) *GetVPNGatewayConnectionOptions { + return &GetVPNGatewayConnectionOptions{ + VPNGatewayID: core.StringPtr(vpnGatewayID), + ID: core.StringPtr(id), + } +} + +// SetVPNGatewayID : Allow user to set VPNGatewayID +func (options *GetVPNGatewayConnectionOptions) SetVPNGatewayID(vpnGatewayID string) *GetVPNGatewayConnectionOptions { + options.VPNGatewayID = core.StringPtr(vpnGatewayID) + return options +} + +// SetID : Allow user to set ID +func (options *GetVPNGatewayConnectionOptions) SetID(id string) *GetVPNGatewayConnectionOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetVPNGatewayConnectionOptions) SetHeaders(param map[string]string) *GetVPNGatewayConnectionOptions { + options.Headers = param + return options +} + +// GetVPNGatewayOptions : The GetVPNGateway options. +type GetVPNGatewayOptions struct { + // The VPN gateway identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetVPNGatewayOptions : Instantiate GetVPNGatewayOptions +func (*VpcClassicV1) NewGetVPNGatewayOptions(id string) *GetVPNGatewayOptions { + return &GetVPNGatewayOptions{ + ID: core.StringPtr(id), + } +} + +// SetID : Allow user to set ID +func (options *GetVPNGatewayOptions) SetID(id string) *GetVPNGatewayOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetVPNGatewayOptions) SetHeaders(param map[string]string) *GetVPNGatewayOptions { + options.Headers = param + return options +} + +// IkePolicy : IkePolicy struct +type IkePolicy struct { + // The authentication algorithm. + AuthenticationAlgorithm *string `json:"authentication_algorithm" validate:"required"` + + // Collection of references to VPN gateway connections that use this IKE policy. + Connections []VPNGatewayConnectionReference `json:"connections" validate:"required"` + + // The date and time that this IKE policy was created. + CreatedAt *strfmt.DateTime `json:"created_at" validate:"required"` + + // The Diffie-Hellman group. + DhGroup *int64 `json:"dh_group" validate:"required"` + + // The encryption algorithm. + EncryptionAlgorithm *string `json:"encryption_algorithm" validate:"required"` + + // The IKE policy's canonical URL. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this IKE policy. + ID *string `json:"id" validate:"required"` + + // The IKE protocol version. + IkeVersion *int64 `json:"ike_version" validate:"required"` + + // The key lifetime in seconds. + KeyLifetime *int64 `json:"key_lifetime" validate:"required"` + + // The user-defined name for this IKE policy. + Name *string `json:"name" validate:"required"` + + // The IKE negotiation mode. Only `main` is supported. + NegotiationMode *string `json:"negotiation_mode" validate:"required"` + + // The resource group for this IKE policy. + ResourceGroup *ResourceGroupReference `json:"resource_group" validate:"required"` + + // The resource type. + ResourceType *string `json:"resource_type" validate:"required"` +} + +// Constants associated with the IkePolicy.AuthenticationAlgorithm property. +// The authentication algorithm. +const ( + IkePolicyAuthenticationAlgorithmMd5Const = "md5" + IkePolicyAuthenticationAlgorithmSha1Const = "sha1" + IkePolicyAuthenticationAlgorithmSha256Const = "sha256" +) + +// Constants associated with the IkePolicy.EncryptionAlgorithm property. +// The encryption algorithm. +const ( + IkePolicyEncryptionAlgorithmAes128Const = "aes128" + IkePolicyEncryptionAlgorithmAes256Const = "aes256" + IkePolicyEncryptionAlgorithmTripleDesConst = "triple_des" +) + +// Constants associated with the IkePolicy.NegotiationMode property. +// The IKE negotiation mode. Only `main` is supported. +const ( + IkePolicyNegotiationModeMainConst = "main" +) + +// Constants associated with the IkePolicy.ResourceType property. +// The resource type. +const ( + IkePolicyResourceTypeIkePolicyConst = "ike_policy" +) + +// UnmarshalIkePolicy unmarshals an instance of IkePolicy from the specified map of raw messages. +func UnmarshalIkePolicy(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(IkePolicy) + err = core.UnmarshalPrimitive(m, "authentication_algorithm", &obj.AuthenticationAlgorithm) + if err != nil { + return + } + err = core.UnmarshalModel(m, "connections", &obj.Connections, UnmarshalVPNGatewayConnectionReference) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "dh_group", &obj.DhGroup) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "encryption_algorithm", &obj.EncryptionAlgorithm) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "ike_version", &obj.IkeVersion) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "key_lifetime", &obj.KeyLifetime) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "negotiation_mode", &obj.NegotiationMode) + if err != nil { + return + } + err = core.UnmarshalModel(m, "resource_group", &obj.ResourceGroup, UnmarshalResourceGroupReference) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "resource_type", &obj.ResourceType) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// IkePolicyCollection : IkePolicyCollection struct +type IkePolicyCollection struct { + // A link to the first page of resources. + First *IkePolicyCollectionFirst `json:"first" validate:"required"` + + // Collection of IKE policies. + IkePolicies []IkePolicy `json:"ike_policies" validate:"required"` + + // The maximum number of resources that can be returned by the request. + Limit *int64 `json:"limit" validate:"required"` + + // A link to the next page of resources. This property is present for all pages + // except the last page. + Next *IkePolicyCollectionNext `json:"next,omitempty"` + + // The total number of resources across all pages. + TotalCount *int64 `json:"total_count" validate:"required"` +} + +// UnmarshalIkePolicyCollection unmarshals an instance of IkePolicyCollection from the specified map of raw messages. +func UnmarshalIkePolicyCollection(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(IkePolicyCollection) + err = core.UnmarshalModel(m, "first", &obj.First, UnmarshalIkePolicyCollectionFirst) + if err != nil { + return + } + err = core.UnmarshalModel(m, "ike_policies", &obj.IkePolicies, UnmarshalIkePolicy) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "limit", &obj.Limit) + if err != nil { + return + } + err = core.UnmarshalModel(m, "next", &obj.Next, UnmarshalIkePolicyCollectionNext) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "total_count", &obj.TotalCount) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// IkePolicyIdentity : Identifies an IKE policy by a unique property. +// Models which "extend" this model: +// - IkePolicyIdentityByID +// - IkePolicyIdentityByHref +type IkePolicyIdentity struct { + // The unique identifier for this IKE policy. + ID *string `json:"id,omitempty"` + + // The IKE policy's canonical URL. + Href *string `json:"href,omitempty"` +} + +func (*IkePolicyIdentity) isaIkePolicyIdentity() bool { + return true +} + +type IkePolicyIdentityIntf interface { + isaIkePolicyIdentity() bool +} + +// UnmarshalIkePolicyIdentity unmarshals an instance of IkePolicyIdentity from the specified map of raw messages. +func UnmarshalIkePolicyIdentity(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(IkePolicyIdentity) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// IkePolicyPatch : IkePolicyPatch struct +type IkePolicyPatch struct { + // The authentication algorithm. + AuthenticationAlgorithm *string `json:"authentication_algorithm,omitempty"` + + // The Diffie-Hellman group. + DhGroup *int64 `json:"dh_group,omitempty"` + + // The encryption algorithm. + EncryptionAlgorithm *string `json:"encryption_algorithm,omitempty"` + + // The IKE protocol version. + IkeVersion *int64 `json:"ike_version,omitempty"` + + // The key lifetime in seconds. + KeyLifetime *int64 `json:"key_lifetime,omitempty"` + + // The user-defined name for this IKE policy. + Name *string `json:"name,omitempty"` +} + +// Constants associated with the IkePolicyPatch.AuthenticationAlgorithm property. +// The authentication algorithm. +const ( + IkePolicyPatchAuthenticationAlgorithmMd5Const = "md5" + IkePolicyPatchAuthenticationAlgorithmSha1Const = "sha1" + IkePolicyPatchAuthenticationAlgorithmSha256Const = "sha256" +) + +// Constants associated with the IkePolicyPatch.EncryptionAlgorithm property. +// The encryption algorithm. +const ( + IkePolicyPatchEncryptionAlgorithmAes128Const = "aes128" + IkePolicyPatchEncryptionAlgorithmAes256Const = "aes256" + IkePolicyPatchEncryptionAlgorithmTripleDesConst = "triple_des" +) + +// UnmarshalIkePolicyPatch unmarshals an instance of IkePolicyPatch from the specified map of raw messages. +func UnmarshalIkePolicyPatch(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(IkePolicyPatch) + err = core.UnmarshalPrimitive(m, "authentication_algorithm", &obj.AuthenticationAlgorithm) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "dh_group", &obj.DhGroup) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "encryption_algorithm", &obj.EncryptionAlgorithm) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "ike_version", &obj.IkeVersion) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "key_lifetime", &obj.KeyLifetime) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// AsPatch returns a generic map representation of the IkePolicyPatch +func (ikePolicyPatch *IkePolicyPatch) AsPatch() (patch map[string]interface{}, err error) { + var jsonData []byte + jsonData, err = json.Marshal(ikePolicyPatch) + if err == nil { + err = json.Unmarshal(jsonData, &patch) + } + return +} + +// IkePolicyReference : IkePolicyReference struct +type IkePolicyReference struct { + // The IKE policy's canonical URL. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this IKE policy. + ID *string `json:"id" validate:"required"` + + // The user-defined name for this IKE policy. + Name *string `json:"name" validate:"required"` + + // The resource type. + ResourceType *string `json:"resource_type" validate:"required"` +} + +// Constants associated with the IkePolicyReference.ResourceType property. +// The resource type. +const ( + IkePolicyReferenceResourceTypeIkePolicyConst = "ike_policy" +) + +// UnmarshalIkePolicyReference unmarshals an instance of IkePolicyReference from the specified map of raw messages. +func UnmarshalIkePolicyReference(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(IkePolicyReference) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "resource_type", &obj.ResourceType) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// IP : IP struct +type IP struct { + // The IP address. This property may add support for IPv6 addresses in the future. When processing a value in this + // property, verify that the address is in an expected format. If it is not, log an error. Optionally halt processing + // and surface the error, or bypass the resource on which the unexpected IP address format was encountered. + Address *string `json:"address" validate:"required"` +} + +// UnmarshalIP unmarshals an instance of IP from the specified map of raw messages. +func UnmarshalIP(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(IP) + err = core.UnmarshalPrimitive(m, "address", &obj.Address) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// IPsecPolicy : IPsecPolicy struct +type IPsecPolicy struct { + // The authentication algorithm. + AuthenticationAlgorithm *string `json:"authentication_algorithm" validate:"required"` + + // Collection of references to VPN gateway connections that use this IPsec policy. + Connections []VPNGatewayConnectionReference `json:"connections" validate:"required"` + + // The date and time that this IPsec policy was created. + CreatedAt *strfmt.DateTime `json:"created_at" validate:"required"` + + // The encapsulation mode used. Only `tunnel` is supported. + EncapsulationMode *string `json:"encapsulation_mode" validate:"required"` + + // The encryption algorithm. + EncryptionAlgorithm *string `json:"encryption_algorithm" validate:"required"` + + // The IPsec policy's canonical URL. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this IPsec policy. + ID *string `json:"id" validate:"required"` + + // The key lifetime in seconds. + KeyLifetime *int64 `json:"key_lifetime" validate:"required"` + + // The user-defined name for this IPsec policy. + Name *string `json:"name" validate:"required"` + + // Perfect Forward Secrecy. + Pfs *string `json:"pfs" validate:"required"` + + // The resource group for this IPsec policy. + ResourceGroup *ResourceGroupReference `json:"resource_group" validate:"required"` + + // The resource type. + ResourceType *string `json:"resource_type" validate:"required"` + + // The transform protocol used. Only `esp` is supported. + TransformProtocol *string `json:"transform_protocol" validate:"required"` +} + +// Constants associated with the IPsecPolicy.AuthenticationAlgorithm property. +// The authentication algorithm. +const ( + IPsecPolicyAuthenticationAlgorithmMd5Const = "md5" + IPsecPolicyAuthenticationAlgorithmSha1Const = "sha1" + IPsecPolicyAuthenticationAlgorithmSha256Const = "sha256" +) + +// Constants associated with the IPsecPolicy.EncapsulationMode property. +// The encapsulation mode used. Only `tunnel` is supported. +const ( + IPsecPolicyEncapsulationModeTunnelConst = "tunnel" +) + +// Constants associated with the IPsecPolicy.EncryptionAlgorithm property. +// The encryption algorithm. +const ( + IPsecPolicyEncryptionAlgorithmAes128Const = "aes128" + IPsecPolicyEncryptionAlgorithmAes256Const = "aes256" + IPsecPolicyEncryptionAlgorithmTripleDesConst = "triple_des" +) + +// Constants associated with the IPsecPolicy.Pfs property. +// Perfect Forward Secrecy. +const ( + IPsecPolicyPfsDisabledConst = "disabled" + IPsecPolicyPfsGroup14Const = "group_14" + IPsecPolicyPfsGroup2Const = "group_2" + IPsecPolicyPfsGroup5Const = "group_5" +) + +// Constants associated with the IPsecPolicy.ResourceType property. +// The resource type. +const ( + IPsecPolicyResourceTypeIpsecPolicyConst = "ipsec_policy" +) + +// Constants associated with the IPsecPolicy.TransformProtocol property. +// The transform protocol used. Only `esp` is supported. +const ( + IPsecPolicyTransformProtocolEspConst = "esp" +) + +// UnmarshalIPsecPolicy unmarshals an instance of IPsecPolicy from the specified map of raw messages. +func UnmarshalIPsecPolicy(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(IPsecPolicy) + err = core.UnmarshalPrimitive(m, "authentication_algorithm", &obj.AuthenticationAlgorithm) + if err != nil { + return + } + err = core.UnmarshalModel(m, "connections", &obj.Connections, UnmarshalVPNGatewayConnectionReference) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "encapsulation_mode", &obj.EncapsulationMode) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "encryption_algorithm", &obj.EncryptionAlgorithm) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "key_lifetime", &obj.KeyLifetime) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "pfs", &obj.Pfs) + if err != nil { + return + } + err = core.UnmarshalModel(m, "resource_group", &obj.ResourceGroup, UnmarshalResourceGroupReference) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "resource_type", &obj.ResourceType) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "transform_protocol", &obj.TransformProtocol) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// IPsecPolicyCollection : IPsecPolicyCollection struct +type IPsecPolicyCollection struct { + // A link to the first page of resources. + First *IPsecPolicyCollectionFirst `json:"first" validate:"required"` + + // Collection of IPsec policies. + IpsecPolicies []IPsecPolicy `json:"ipsec_policies" validate:"required"` + + // The maximum number of resources that can be returned by the request. + Limit *int64 `json:"limit" validate:"required"` + + // A link to the next page of resources. This property is present for all pages + // except the last page. + Next *IPsecPolicyCollectionNext `json:"next,omitempty"` + + // The total number of resources across all pages. + TotalCount *int64 `json:"total_count" validate:"required"` +} + +// UnmarshalIPsecPolicyCollection unmarshals an instance of IPsecPolicyCollection from the specified map of raw messages. +func UnmarshalIPsecPolicyCollection(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(IPsecPolicyCollection) + err = core.UnmarshalModel(m, "first", &obj.First, UnmarshalIPsecPolicyCollectionFirst) + if err != nil { + return + } + err = core.UnmarshalModel(m, "ipsec_policies", &obj.IpsecPolicies, UnmarshalIPsecPolicy) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "limit", &obj.Limit) + if err != nil { + return + } + err = core.UnmarshalModel(m, "next", &obj.Next, UnmarshalIPsecPolicyCollectionNext) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "total_count", &obj.TotalCount) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// IPsecPolicyCollectionFirst : A link to the first page of resources. +type IPsecPolicyCollectionFirst struct { + // The URL for a page of resources. + Href *string `json:"href" validate:"required"` +} + +// UnmarshalIPsecPolicyCollectionFirst unmarshals an instance of IPsecPolicyCollectionFirst from the specified map of raw messages. +func UnmarshalIPsecPolicyCollectionFirst(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(IPsecPolicyCollectionFirst) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// IPsecPolicyCollectionNext : A link to the next page of resources. This property is present for all pages except the last page. +type IPsecPolicyCollectionNext struct { + // The URL for a page of resources. + Href *string `json:"href" validate:"required"` +} + +// UnmarshalIPsecPolicyCollectionNext unmarshals an instance of IPsecPolicyCollectionNext from the specified map of raw messages. +func UnmarshalIPsecPolicyCollectionNext(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(IPsecPolicyCollectionNext) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// IPsecPolicyIdentity : Identifies an IPsec policy by a unique property. +// Models which "extend" this model: +// - IPsecPolicyIdentityByID +// - IPsecPolicyIdentityByHref +type IPsecPolicyIdentity struct { + // The unique identifier for this IPsec policy. + ID *string `json:"id,omitempty"` + + // The IPsec policy's canonical URL. + Href *string `json:"href,omitempty"` +} + +func (*IPsecPolicyIdentity) isaIPsecPolicyIdentity() bool { + return true +} + +type IPsecPolicyIdentityIntf interface { + isaIPsecPolicyIdentity() bool +} + +// UnmarshalIPsecPolicyIdentity unmarshals an instance of IPsecPolicyIdentity from the specified map of raw messages. +func UnmarshalIPsecPolicyIdentity(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(IPsecPolicyIdentity) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// IPsecPolicyPatch : IPsecPolicyPatch struct +type IPsecPolicyPatch struct { + // The authentication algorithm. + AuthenticationAlgorithm *string `json:"authentication_algorithm,omitempty"` + + // The encryption algorithm. + EncryptionAlgorithm *string `json:"encryption_algorithm,omitempty"` + + // The key lifetime in seconds. + KeyLifetime *int64 `json:"key_lifetime,omitempty"` + + // The user-defined name for this IPsec policy. + Name *string `json:"name,omitempty"` + + // Perfect Forward Secrecy. + Pfs *string `json:"pfs,omitempty"` +} + +// Constants associated with the IPsecPolicyPatch.AuthenticationAlgorithm property. +// The authentication algorithm. +const ( + IPsecPolicyPatchAuthenticationAlgorithmMd5Const = "md5" + IPsecPolicyPatchAuthenticationAlgorithmSha1Const = "sha1" + IPsecPolicyPatchAuthenticationAlgorithmSha256Const = "sha256" +) + +// Constants associated with the IPsecPolicyPatch.EncryptionAlgorithm property. +// The encryption algorithm. +const ( + IPsecPolicyPatchEncryptionAlgorithmAes128Const = "aes128" + IPsecPolicyPatchEncryptionAlgorithmAes256Const = "aes256" + IPsecPolicyPatchEncryptionAlgorithmTripleDesConst = "triple_des" +) + +// Constants associated with the IPsecPolicyPatch.Pfs property. +// Perfect Forward Secrecy. +const ( + IPsecPolicyPatchPfsDisabledConst = "disabled" + IPsecPolicyPatchPfsGroup14Const = "group_14" + IPsecPolicyPatchPfsGroup2Const = "group_2" + IPsecPolicyPatchPfsGroup5Const = "group_5" +) + +// UnmarshalIPsecPolicyPatch unmarshals an instance of IPsecPolicyPatch from the specified map of raw messages. +func UnmarshalIPsecPolicyPatch(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(IPsecPolicyPatch) + err = core.UnmarshalPrimitive(m, "authentication_algorithm", &obj.AuthenticationAlgorithm) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "encryption_algorithm", &obj.EncryptionAlgorithm) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "key_lifetime", &obj.KeyLifetime) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "pfs", &obj.Pfs) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// AsPatch returns a generic map representation of the IPsecPolicyPatch +func (iPsecPolicyPatch *IPsecPolicyPatch) AsPatch() (patch map[string]interface{}, err error) { + var jsonData []byte + jsonData, err = json.Marshal(iPsecPolicyPatch) + if err == nil { + err = json.Unmarshal(jsonData, &patch) + } + return +} + +// IPsecPolicyReference : IPsecPolicyReference struct +type IPsecPolicyReference struct { + // The IPsec policy's canonical URL. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this IPsec policy. + ID *string `json:"id" validate:"required"` + + // The user-defined name for this IPsec policy. + Name *string `json:"name" validate:"required"` + + // The resource type. + ResourceType *string `json:"resource_type" validate:"required"` +} + +// Constants associated with the IPsecPolicyReference.ResourceType property. +// The resource type. +const ( + IPsecPolicyReferenceResourceTypeIpsecPolicyConst = "ipsec_policy" +) + +// UnmarshalIPsecPolicyReference unmarshals an instance of IPsecPolicyReference from the specified map of raw messages. +func UnmarshalIPsecPolicyReference(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(IPsecPolicyReference) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "resource_type", &obj.ResourceType) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// IkePolicyCollectionFirst : A link to the first page of resources. +type IkePolicyCollectionFirst struct { + // The URL for a page of resources. + Href *string `json:"href" validate:"required"` +} + +// UnmarshalIkePolicyCollectionFirst unmarshals an instance of IkePolicyCollectionFirst from the specified map of raw messages. +func UnmarshalIkePolicyCollectionFirst(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(IkePolicyCollectionFirst) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// IkePolicyCollectionNext : A link to the next page of resources. This property is present for all pages except the last page. +type IkePolicyCollectionNext struct { + // The URL for a page of resources. + Href *string `json:"href" validate:"required"` +} + +// UnmarshalIkePolicyCollectionNext unmarshals an instance of IkePolicyCollectionNext from the specified map of raw messages. +func UnmarshalIkePolicyCollectionNext(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(IkePolicyCollectionNext) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// Image : Image struct +type Image struct { + // The date and time that the image was created. + CreatedAt *strfmt.DateTime `json:"created_at" validate:"required"` + + // The CRN for this image. + CRN *string `json:"crn" validate:"required"` + + // Details for the stored image file. + File *ImageFile `json:"file" validate:"required"` + + // The URL for this image. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this image. + ID *string `json:"id" validate:"required"` + + // The minimum size (in gigabytes) of a volume onto which this image may be provisioned. + // + // This property may be absent if the image has a `status` of `pending`, `tentative`, or + // `failed`. + MinimumProvisionedSize *int64 `json:"minimum_provisioned_size,omitempty"` + + // The user-defined or system-provided name for this image. + Name *string `json:"name" validate:"required"` + + // The operating system included in this image. + OperatingSystem *OperatingSystem `json:"operating_system,omitempty"` + + // The resource group for this image. + ResourceGroup *ResourceGroupReference `json:"resource_group" validate:"required"` + + // The status of this image. + Status *string `json:"status" validate:"required"` + + // Whether the image is publicly visible or private to the account. + Visibility *string `json:"visibility" validate:"required"` +} + +// Constants associated with the Image.Status property. +// The status of this image. +const ( + ImageStatusAvailableConst = "available" + ImageStatusDeletingConst = "deleting" + ImageStatusDeprecatedConst = "deprecated" + ImageStatusFailedConst = "failed" + ImageStatusPendingConst = "pending" + ImageStatusTentativeConst = "tentative" + ImageStatusUnusableConst = "unusable" +) + +// Constants associated with the Image.Visibility property. +// Whether the image is publicly visible or private to the account. +const ( + ImageVisibilityPrivateConst = "private" + ImageVisibilityPublicConst = "public" +) + +// UnmarshalImage unmarshals an instance of Image from the specified map of raw messages. +func UnmarshalImage(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Image) + err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalModel(m, "file", &obj.File, UnmarshalImageFile) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "minimum_provisioned_size", &obj.MinimumProvisionedSize) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalModel(m, "operating_system", &obj.OperatingSystem, UnmarshalOperatingSystem) + if err != nil { + return + } + err = core.UnmarshalModel(m, "resource_group", &obj.ResourceGroup, UnmarshalResourceGroupReference) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "status", &obj.Status) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "visibility", &obj.Visibility) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ImageCollection : ImageCollection struct +type ImageCollection struct { + // A link to the first page of resources. + First *ImageCollectionFirst `json:"first" validate:"required"` + + // Collection of images. + Images []Image `json:"images" validate:"required"` + + // The maximum number of resources that can be returned by the request. + Limit *int64 `json:"limit" validate:"required"` + + // A link to the next page of resources. This property is present for all pages + // except the last page. + Next *ImageCollectionNext `json:"next,omitempty"` +} + +// UnmarshalImageCollection unmarshals an instance of ImageCollection from the specified map of raw messages. +func UnmarshalImageCollection(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ImageCollection) + err = core.UnmarshalModel(m, "first", &obj.First, UnmarshalImageCollectionFirst) + if err != nil { + return + } + err = core.UnmarshalModel(m, "images", &obj.Images, UnmarshalImage) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "limit", &obj.Limit) + if err != nil { + return + } + err = core.UnmarshalModel(m, "next", &obj.Next, UnmarshalImageCollectionNext) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ImageCollectionFirst : A link to the first page of resources. +type ImageCollectionFirst struct { + // The URL for a page of resources. + Href *string `json:"href" validate:"required"` +} + +// UnmarshalImageCollectionFirst unmarshals an instance of ImageCollectionFirst from the specified map of raw messages. +func UnmarshalImageCollectionFirst(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ImageCollectionFirst) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ImageCollectionNext : A link to the next page of resources. This property is present for all pages except the last page. +type ImageCollectionNext struct { + // The URL for a page of resources. + Href *string `json:"href" validate:"required"` +} + +// UnmarshalImageCollectionNext unmarshals an instance of ImageCollectionNext from the specified map of raw messages. +func UnmarshalImageCollectionNext(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ImageCollectionNext) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ImageFile : ImageFile struct +type ImageFile struct { + // The size of the stored image file rounded up to the next gigabyte. + // + // This property may be absent if the associated image has a `status` of `pending` or + // `failed`. + Size *int64 `json:"size,omitempty"` +} + +// UnmarshalImageFile unmarshals an instance of ImageFile from the specified map of raw messages. +func UnmarshalImageFile(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ImageFile) + err = core.UnmarshalPrimitive(m, "size", &obj.Size) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ImageFilePrototype : ImageFilePrototype struct +type ImageFilePrototype struct { + // The Cloud Object Store (COS) location of the image file. + Href *string `json:"href" validate:"required"` +} + +// NewImageFilePrototype : Instantiate ImageFilePrototype (Generic Model Constructor) +func (*VpcClassicV1) NewImageFilePrototype(href string) (model *ImageFilePrototype, err error) { + model = &ImageFilePrototype{ + Href: core.StringPtr(href), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalImageFilePrototype unmarshals an instance of ImageFilePrototype from the specified map of raw messages. +func UnmarshalImageFilePrototype(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ImageFilePrototype) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ImageIdentity : Identifies an image by a unique property. +// Models which "extend" this model: +// - ImageIdentityByID +// - ImageIdentityByCRN +// - ImageIdentityByHref +type ImageIdentity struct { + // The unique identifier for this image. + ID *string `json:"id,omitempty"` + + // The CRN for this image. + CRN *string `json:"crn,omitempty"` + + // The URL for this image. + Href *string `json:"href,omitempty"` +} + +func (*ImageIdentity) isaImageIdentity() bool { + return true +} + +type ImageIdentityIntf interface { + isaImageIdentity() bool +} + +// UnmarshalImageIdentity unmarshals an instance of ImageIdentity from the specified map of raw messages. +func UnmarshalImageIdentity(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ImageIdentity) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ImagePatch : ImagePatch struct +type ImagePatch struct { + // The unique user-defined name for this image. Names starting with "ibm-" are not allowed. + Name *string `json:"name,omitempty"` +} + +// UnmarshalImagePatch unmarshals an instance of ImagePatch from the specified map of raw messages. +func UnmarshalImagePatch(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ImagePatch) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// AsPatch returns a generic map representation of the ImagePatch +func (imagePatch *ImagePatch) AsPatch() (patch map[string]interface{}, err error) { + var jsonData []byte + jsonData, err = json.Marshal(imagePatch) + if err == nil { + err = json.Unmarshal(jsonData, &patch) + } + return +} + +// ImagePrototype : ImagePrototype struct +// Models which "extend" this model: +// - ImagePrototypeImageByFile +type ImagePrototype struct { + // The unique user-defined name for this image. Names starting with "ibm-" are not allowed. If unspecified, the name + // will be a hyphenated list of randomly-selected words. + Name *string `json:"name,omitempty"` + + // The resource group to use. If unspecified, the account's [default resource + // group](https://cloud.ibm.com/apidocs/resource-manager#introduction) is used. + ResourceGroup ResourceGroupIdentityIntf `json:"resource_group,omitempty"` + + // The file from which to create the image. + File *ImageFilePrototype `json:"file,omitempty"` + + // The identity of the [supported operating + // system](https://cloud.ibm.com/apidocs/vpc#list-operating-systems) included in + // this image. + OperatingSystem OperatingSystemIdentityIntf `json:"operating_system,omitempty"` +} + +func (*ImagePrototype) isaImagePrototype() bool { + return true +} + +type ImagePrototypeIntf interface { + isaImagePrototype() bool +} + +// UnmarshalImagePrototype unmarshals an instance of ImagePrototype from the specified map of raw messages. +func UnmarshalImagePrototype(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ImagePrototype) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalModel(m, "resource_group", &obj.ResourceGroup, UnmarshalResourceGroupIdentity) + if err != nil { + return + } + err = core.UnmarshalModel(m, "file", &obj.File, UnmarshalImageFilePrototype) + if err != nil { + return + } + err = core.UnmarshalModel(m, "operating_system", &obj.OperatingSystem, UnmarshalOperatingSystemIdentity) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ImageReference : ImageReference struct +type ImageReference struct { + // The CRN for this image. + CRN *string `json:"crn" validate:"required"` + + // The URL for this image. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this image. + ID *string `json:"id" validate:"required"` + + // The user-defined or system-provided name for this image. + Name *string `json:"name" validate:"required"` +} + +// UnmarshalImageReference unmarshals an instance of ImageReference from the specified map of raw messages. +func UnmarshalImageReference(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ImageReference) + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// Instance : Instance struct +type Instance struct { + // The total bandwidth (in megabits per second) shared across the virtual server instance's network interfaces. + Bandwidth *int64 `json:"bandwidth" validate:"required"` + + // Boot volume attachment. + BootVolumeAttachment *VolumeAttachmentReferenceInstanceContext `json:"boot_volume_attachment" validate:"required"` + + // The date and time that the virtual server instance was created. + CreatedAt *strfmt.DateTime `json:"created_at" validate:"required"` + + // The CRN for this virtual server instance. + CRN *string `json:"crn" validate:"required"` + + // The URL for this virtual server instance. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this virtual server instance. + ID *string `json:"id" validate:"required"` + + // The image the virtual server instance was provisioned from. + Image *ImageReference `json:"image,omitempty"` + + // The amount of memory in gigabytes. + Memory *int64 `json:"memory" validate:"required"` + + // The user-defined name for this virtual server instance (and default system hostname). + Name *string `json:"name" validate:"required"` + + // Collection of the virtual server instance's network interfaces, including the primary network interface. + NetworkInterfaces []NetworkInterfaceInstanceContextReference `json:"network_interfaces" validate:"required"` + + // Primary network interface. + PrimaryNetworkInterface *NetworkInterfaceInstanceContextReference `json:"primary_network_interface" validate:"required"` + + // The profile for this virtual server instance. + Profile *InstanceProfileReference `json:"profile" validate:"required"` + + // The resource group for this instance. + ResourceGroup *ResourceGroupReference `json:"resource_group" validate:"required"` + + // The status of the virtual server instance. + Status *string `json:"status" validate:"required"` + + // The virtual server instance VCPU configuration. + Vcpu *InstanceVcpu `json:"vcpu" validate:"required"` + + // Collection of the virtual server instance's volume attachments, including the boot volume attachment. + VolumeAttachments []VolumeAttachmentReferenceInstanceContext `json:"volume_attachments" validate:"required"` + + // The VPC the virtual server instance resides in. + VPC *VPCReference `json:"vpc" validate:"required"` + + // The zone the virtual server instance resides in. + Zone *ZoneReference `json:"zone" validate:"required"` +} + +// Constants associated with the Instance.Status property. +// The status of the virtual server instance. +const ( + InstanceStatusDeletingConst = "deleting" + InstanceStatusFailedConst = "failed" + InstanceStatusPausedConst = "paused" + InstanceStatusPausingConst = "pausing" + InstanceStatusPendingConst = "pending" + InstanceStatusRestartingConst = "restarting" + InstanceStatusResumingConst = "resuming" + InstanceStatusRunningConst = "running" + InstanceStatusStartingConst = "starting" + InstanceStatusStoppedConst = "stopped" + InstanceStatusStoppingConst = "stopping" +) + +// UnmarshalInstance unmarshals an instance of Instance from the specified map of raw messages. +func UnmarshalInstance(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Instance) + err = core.UnmarshalPrimitive(m, "bandwidth", &obj.Bandwidth) + if err != nil { + return + } + err = core.UnmarshalModel(m, "boot_volume_attachment", &obj.BootVolumeAttachment, UnmarshalVolumeAttachmentReferenceInstanceContext) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalModel(m, "image", &obj.Image, UnmarshalImageReference) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "memory", &obj.Memory) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalModel(m, "network_interfaces", &obj.NetworkInterfaces, UnmarshalNetworkInterfaceInstanceContextReference) + if err != nil { + return + } + err = core.UnmarshalModel(m, "primary_network_interface", &obj.PrimaryNetworkInterface, UnmarshalNetworkInterfaceInstanceContextReference) + if err != nil { + return + } + err = core.UnmarshalModel(m, "profile", &obj.Profile, UnmarshalInstanceProfileReference) + if err != nil { + return + } + err = core.UnmarshalModel(m, "resource_group", &obj.ResourceGroup, UnmarshalResourceGroupReference) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "status", &obj.Status) + if err != nil { + return + } + err = core.UnmarshalModel(m, "vcpu", &obj.Vcpu, UnmarshalInstanceVcpu) + if err != nil { + return + } + err = core.UnmarshalModel(m, "volume_attachments", &obj.VolumeAttachments, UnmarshalVolumeAttachmentReferenceInstanceContext) + if err != nil { + return + } + err = core.UnmarshalModel(m, "vpc", &obj.VPC, UnmarshalVPCReference) + if err != nil { + return + } + err = core.UnmarshalModel(m, "zone", &obj.Zone, UnmarshalZoneReference) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstanceAction : InstanceAction struct +type InstanceAction struct { + // The date and time that the action was completed. + CompletedAt *strfmt.DateTime `json:"completed_at,omitempty"` + + // The date and time that the action was created. + CreatedAt *strfmt.DateTime `json:"created_at" validate:"required"` + + // The URL for this instance action. + Href *string `json:"href" validate:"required"` + + // The identifier for this instance action. + ID *string `json:"id" validate:"required"` + + // The date and time that the action was started. + StartedAt *strfmt.DateTime `json:"started_at,omitempty"` + + // The current status of this action. + Status *string `json:"status" validate:"required"` + + // The type of action. + Type *string `json:"type" validate:"required"` +} + +// Constants associated with the InstanceAction.Status property. +// The current status of this action. +const ( + InstanceActionStatusCompletedConst = "completed" + InstanceActionStatusFailedConst = "failed" + InstanceActionStatusPendingConst = "pending" + InstanceActionStatusRunningConst = "running" +) + +// Constants associated with the InstanceAction.Type property. +// The type of action. +const ( + InstanceActionTypeRebootConst = "reboot" + InstanceActionTypeResetConst = "reset" + InstanceActionTypeStartConst = "start" + InstanceActionTypeStopConst = "stop" +) + +// UnmarshalInstanceAction unmarshals an instance of InstanceAction from the specified map of raw messages. +func UnmarshalInstanceAction(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstanceAction) + err = core.UnmarshalPrimitive(m, "completed_at", &obj.CompletedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "started_at", &obj.StartedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "status", &obj.Status) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstanceCollection : InstanceCollection struct +type InstanceCollection struct { + // A link to the first page of resources. + First *InstanceCollectionFirst `json:"first" validate:"required"` + + // Collection of virtual server instances. + Instances []Instance `json:"instances" validate:"required"` + + // The maximum number of resources that can be returned by the request. + Limit *int64 `json:"limit" validate:"required"` + + // A link to the next page of resources. This property is present for all pages + // except the last page. + Next *InstanceCollectionNext `json:"next,omitempty"` + + // The total number of resources across all pages. + TotalCount *int64 `json:"total_count" validate:"required"` +} + +// UnmarshalInstanceCollection unmarshals an instance of InstanceCollection from the specified map of raw messages. +func UnmarshalInstanceCollection(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstanceCollection) + err = core.UnmarshalModel(m, "first", &obj.First, UnmarshalInstanceCollectionFirst) + if err != nil { + return + } + err = core.UnmarshalModel(m, "instances", &obj.Instances, UnmarshalInstance) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "limit", &obj.Limit) + if err != nil { + return + } + err = core.UnmarshalModel(m, "next", &obj.Next, UnmarshalInstanceCollectionNext) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "total_count", &obj.TotalCount) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstanceCollectionFirst : A link to the first page of resources. +type InstanceCollectionFirst struct { + // The URL for a page of resources. + Href *string `json:"href" validate:"required"` +} + +// UnmarshalInstanceCollectionFirst unmarshals an instance of InstanceCollectionFirst from the specified map of raw messages. +func UnmarshalInstanceCollectionFirst(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstanceCollectionFirst) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstanceCollectionNext : A link to the next page of resources. This property is present for all pages except the last page. +type InstanceCollectionNext struct { + // The URL for a page of resources. + Href *string `json:"href" validate:"required"` +} + +// UnmarshalInstanceCollectionNext unmarshals an instance of InstanceCollectionNext from the specified map of raw messages. +func UnmarshalInstanceCollectionNext(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstanceCollectionNext) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstanceInitialization : InstanceInitialization struct +type InstanceInitialization struct { + // Collection of references to public SSH keys used at instance initialization. + Keys []KeyReferenceInstanceInitializationContextIntf `json:"keys" validate:"required"` + + Password *InstanceInitializationPassword `json:"password,omitempty"` +} + +// UnmarshalInstanceInitialization unmarshals an instance of InstanceInitialization from the specified map of raw messages. +func UnmarshalInstanceInitialization(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstanceInitialization) + err = core.UnmarshalModel(m, "keys", &obj.Keys, UnmarshalKeyReferenceInstanceInitializationContext) + if err != nil { + return + } + err = core.UnmarshalModel(m, "password", &obj.Password, UnmarshalInstanceInitializationPassword) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstanceInitializationPassword : InstanceInitializationPassword struct +type InstanceInitializationPassword struct { + // The administrator password at initialization, encrypted using `encryption_key`, and returned base64-encoded. + EncryptedPassword *[]byte `json:"encrypted_password" validate:"required"` + + // The reference to the public SSH key used to encrypt the administrator password. + EncryptionKey KeyReferenceInstanceInitializationContextIntf `json:"encryption_key" validate:"required"` +} + +// UnmarshalInstanceInitializationPassword unmarshals an instance of InstanceInitializationPassword from the specified map of raw messages. +func UnmarshalInstanceInitializationPassword(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstanceInitializationPassword) + err = core.UnmarshalPrimitive(m, "encrypted_password", &obj.EncryptedPassword) + if err != nil { + return + } + err = core.UnmarshalModel(m, "encryption_key", &obj.EncryptionKey, UnmarshalKeyReferenceInstanceInitializationContext) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstancePatch : InstancePatch struct +type InstancePatch struct { + // The user-defined name for this virtual server instance (and default system hostname). + Name *string `json:"name,omitempty"` +} + +// UnmarshalInstancePatch unmarshals an instance of InstancePatch from the specified map of raw messages. +func UnmarshalInstancePatch(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstancePatch) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// AsPatch returns a generic map representation of the InstancePatch +func (instancePatch *InstancePatch) AsPatch() (patch map[string]interface{}, err error) { + var jsonData []byte + jsonData, err = json.Marshal(instancePatch) + if err == nil { + err = json.Unmarshal(jsonData, &patch) + } + return +} + +// InstanceProfile : InstanceProfile struct +type InstanceProfile struct { + Bandwidth InstanceProfileBandwidthIntf `json:"bandwidth" validate:"required"` + + // The CRN for this virtual server instance profile. + CRN *string `json:"crn" validate:"required"` + + // The product family this virtual server instance profile belongs to. + Family *string `json:"family,omitempty"` + + // The URL for this virtual server instance profile. + Href *string `json:"href" validate:"required"` + + // The globally unique name for this virtual server instance profile. + Name *string `json:"name" validate:"required"` + + PortSpeed InstanceProfilePortSpeedIntf `json:"port_speed" validate:"required"` +} + +// UnmarshalInstanceProfile unmarshals an instance of InstanceProfile from the specified map of raw messages. +func UnmarshalInstanceProfile(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstanceProfile) + err = core.UnmarshalModel(m, "bandwidth", &obj.Bandwidth, UnmarshalInstanceProfileBandwidth) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "family", &obj.Family) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalModel(m, "port_speed", &obj.PortSpeed, UnmarshalInstanceProfilePortSpeed) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstanceProfileBandwidth : InstanceProfileBandwidth struct +// Models which "extend" this model: +// - InstanceProfileBandwidthFixed +// - InstanceProfileBandwidthRange +// - InstanceProfileBandwidthEnum +// - InstanceProfileBandwidthDependent +type InstanceProfileBandwidth struct { + // The type for this profile field. + Type *string `json:"type,omitempty"` + + // The value for this profile field. + Value *int64 `json:"value,omitempty"` + + // The default value for this profile field. + Default *int64 `json:"default,omitempty"` + + // The maximum value for this profile field. + Max *int64 `json:"max,omitempty"` + + // The minimum value for this profile field. + Min *int64 `json:"min,omitempty"` + + // The increment step value for this profile field. + Step *int64 `json:"step,omitempty"` + + // The permitted values for this profile field. + Values []int64 `json:"values,omitempty"` +} + +// Constants associated with the InstanceProfileBandwidth.Type property. +// The type for this profile field. +const ( + InstanceProfileBandwidthTypeFixedConst = "fixed" +) + +func (*InstanceProfileBandwidth) isaInstanceProfileBandwidth() bool { + return true +} + +type InstanceProfileBandwidthIntf interface { + isaInstanceProfileBandwidth() bool +} + +// UnmarshalInstanceProfileBandwidth unmarshals an instance of InstanceProfileBandwidth from the specified map of raw messages. +func UnmarshalInstanceProfileBandwidth(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstanceProfileBandwidth) + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "default", &obj.Default) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "max", &obj.Max) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "min", &obj.Min) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "step", &obj.Step) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "values", &obj.Values) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstanceProfileCollection : InstanceProfileCollection struct +type InstanceProfileCollection struct { + // A link to the first page of resources. + First *InstanceProfileCollectionFirst `json:"first" validate:"required"` + + // The maximum number of resources that can be returned by the request. + Limit *int64 `json:"limit" validate:"required"` + + // A link to the next page of resources. This property is present for all pages + // except the last page. + Next *InstanceProfileCollectionNext `json:"next,omitempty"` + + // Collection of virtual server instance profiles. + Profiles []InstanceProfile `json:"profiles" validate:"required"` + + // The total number of resources across all pages. + TotalCount *int64 `json:"total_count" validate:"required"` +} + +// UnmarshalInstanceProfileCollection unmarshals an instance of InstanceProfileCollection from the specified map of raw messages. +func UnmarshalInstanceProfileCollection(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstanceProfileCollection) + err = core.UnmarshalModel(m, "first", &obj.First, UnmarshalInstanceProfileCollectionFirst) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "limit", &obj.Limit) + if err != nil { + return + } + err = core.UnmarshalModel(m, "next", &obj.Next, UnmarshalInstanceProfileCollectionNext) + if err != nil { + return + } + err = core.UnmarshalModel(m, "profiles", &obj.Profiles, UnmarshalInstanceProfile) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "total_count", &obj.TotalCount) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstanceProfileCollectionFirst : A link to the first page of resources. +type InstanceProfileCollectionFirst struct { + // The URL for a page of resources. + Href *string `json:"href" validate:"required"` +} + +// UnmarshalInstanceProfileCollectionFirst unmarshals an instance of InstanceProfileCollectionFirst from the specified map of raw messages. +func UnmarshalInstanceProfileCollectionFirst(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstanceProfileCollectionFirst) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstanceProfileCollectionNext : A link to the next page of resources. This property is present for all pages except the last page. +type InstanceProfileCollectionNext struct { + // The URL for a page of resources. + Href *string `json:"href" validate:"required"` +} + +// UnmarshalInstanceProfileCollectionNext unmarshals an instance of InstanceProfileCollectionNext from the specified map of raw messages. +func UnmarshalInstanceProfileCollectionNext(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstanceProfileCollectionNext) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstanceProfileIdentity : Identifies an instance profile by a unique property. +// Models which "extend" this model: +// - InstanceProfileIdentityByName +// - InstanceProfileIdentityByCRN +// - InstanceProfileIdentityByHref +type InstanceProfileIdentity struct { + // The globally unique name for this virtual server instance profile. + Name *string `json:"name,omitempty"` + + // The CRN for this virtual server instance profile. + CRN *string `json:"crn,omitempty"` + + // The URL for this virtual server instance profile. + Href *string `json:"href,omitempty"` +} + +func (*InstanceProfileIdentity) isaInstanceProfileIdentity() bool { + return true +} + +type InstanceProfileIdentityIntf interface { + isaInstanceProfileIdentity() bool +} + +// UnmarshalInstanceProfileIdentity unmarshals an instance of InstanceProfileIdentity from the specified map of raw messages. +func UnmarshalInstanceProfileIdentity(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstanceProfileIdentity) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstanceProfilePortSpeed : InstanceProfilePortSpeed struct +// Models which "extend" this model: +// - InstanceProfilePortSpeedFixed +// - InstanceProfilePortSpeedDependent +type InstanceProfilePortSpeed struct { + // The type for this profile field. + Type *string `json:"type,omitempty"` + + // The value for this profile field. + Value *int64 `json:"value,omitempty"` +} + +// Constants associated with the InstanceProfilePortSpeed.Type property. +// The type for this profile field. +const ( + InstanceProfilePortSpeedTypeFixedConst = "fixed" +) + +func (*InstanceProfilePortSpeed) isaInstanceProfilePortSpeed() bool { + return true +} + +type InstanceProfilePortSpeedIntf interface { + isaInstanceProfilePortSpeed() bool +} + +// UnmarshalInstanceProfilePortSpeed unmarshals an instance of InstanceProfilePortSpeed from the specified map of raw messages. +func UnmarshalInstanceProfilePortSpeed(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstanceProfilePortSpeed) + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstanceProfileReference : InstanceProfileReference struct +type InstanceProfileReference struct { + // The CRN for this virtual server instance profile. + CRN *string `json:"crn" validate:"required"` + + // The URL for this virtual server instance profile. + Href *string `json:"href" validate:"required"` + + // The globally unique name for this virtual server instance profile. + Name *string `json:"name" validate:"required"` +} + +// UnmarshalInstanceProfileReference unmarshals an instance of InstanceProfileReference from the specified map of raw messages. +func UnmarshalInstanceProfileReference(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstanceProfileReference) + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstancePrototype : InstancePrototype struct +// Models which "extend" this model: +// - InstancePrototypeInstanceByImage +type InstancePrototype struct { + // The public SSH keys for the administrative user of the virtual server instance. Up to 10 keys may be provided; if no + // keys are provided the instance will be inaccessible unless the image used provides another means of access. For + // Windows instances, one of the keys will be used to encrypt the administrator password. + // + // Keys will be made available to the virtual server instance as cloud-init vendor data. For cloud-init enabled images, + // these keys will also be added as SSH authorized keys for the administrative user. + Keys []KeyIdentityIntf `json:"keys,omitempty"` + + // The user-defined name for this virtual server instance (and default system hostname). If unspecified, the name will + // be a hyphenated list of randomly-selected words. + Name *string `json:"name,omitempty"` + + // Collection of additional network interfaces to create for the virtual server instance. + NetworkInterfaces []NetworkInterfacePrototype `json:"network_interfaces,omitempty"` + + // The profile to use for this virtual server instance. + Profile InstanceProfileIdentityIntf `json:"profile,omitempty"` + + // The resource group to use. If unspecified, the account's [default resource + // group](https://cloud.ibm.com/apidocs/resource-manager#introduction) is used. + ResourceGroup ResourceGroupIdentityIntf `json:"resource_group,omitempty"` + + // User data to be made available when setting up the virtual server instance. + UserData *string `json:"user_data,omitempty"` + + // Collection of volume attachments. + VolumeAttachments []VolumeAttachmentPrototypeInstanceContext `json:"volume_attachments,omitempty"` + + // The VPC the virtual server instance is to be a part of. If provided, must match the + // VPC tied to the subnets of the instance's network interfaces. + VPC VPCIdentityIntf `json:"vpc,omitempty"` + + // The boot volume attachment for the virtual server instance. + BootVolumeAttachment *VolumeAttachmentPrototypeInstanceByImageContext `json:"boot_volume_attachment,omitempty"` + + // The identity of the image to use when provisioning the virtual server instance. + Image ImageIdentityIntf `json:"image,omitempty"` + + // Primary network interface. + PrimaryNetworkInterface *NetworkInterfacePrototype `json:"primary_network_interface,omitempty"` + + // The identity of the zone to provision the virtual server instance in. + Zone ZoneIdentityIntf `json:"zone,omitempty"` +} + +func (*InstancePrototype) isaInstancePrototype() bool { + return true +} + +type InstancePrototypeIntf interface { + isaInstancePrototype() bool +} + +// UnmarshalInstancePrototype unmarshals an instance of InstancePrototype from the specified map of raw messages. +func UnmarshalInstancePrototype(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstancePrototype) + err = core.UnmarshalModel(m, "keys", &obj.Keys, UnmarshalKeyIdentity) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalModel(m, "network_interfaces", &obj.NetworkInterfaces, UnmarshalNetworkInterfacePrototype) + if err != nil { + return + } + err = core.UnmarshalModel(m, "profile", &obj.Profile, UnmarshalInstanceProfileIdentity) + if err != nil { + return + } + err = core.UnmarshalModel(m, "resource_group", &obj.ResourceGroup, UnmarshalResourceGroupIdentity) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "user_data", &obj.UserData) + if err != nil { + return + } + err = core.UnmarshalModel(m, "volume_attachments", &obj.VolumeAttachments, UnmarshalVolumeAttachmentPrototypeInstanceContext) + if err != nil { + return + } + err = core.UnmarshalModel(m, "vpc", &obj.VPC, UnmarshalVPCIdentity) + if err != nil { + return + } + err = core.UnmarshalModel(m, "boot_volume_attachment", &obj.BootVolumeAttachment, UnmarshalVolumeAttachmentPrototypeInstanceByImageContext) + if err != nil { + return + } + err = core.UnmarshalModel(m, "image", &obj.Image, UnmarshalImageIdentity) + if err != nil { + return + } + err = core.UnmarshalModel(m, "primary_network_interface", &obj.PrimaryNetworkInterface, UnmarshalNetworkInterfacePrototype) + if err != nil { + return + } + err = core.UnmarshalModel(m, "zone", &obj.Zone, UnmarshalZoneIdentity) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstanceReference : InstanceReference struct +type InstanceReference struct { + // The CRN for this virtual server instance. + CRN *string `json:"crn" validate:"required"` + + // The URL for this virtual server instance. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this virtual server instance. + ID *string `json:"id" validate:"required"` + + // The user-defined name for this virtual server instance (and default system hostname). + Name *string `json:"name" validate:"required"` +} + +// UnmarshalInstanceReference unmarshals an instance of InstanceReference from the specified map of raw messages. +func UnmarshalInstanceReference(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstanceReference) + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstanceVcpu : The virtual server instance VCPU configuration. +type InstanceVcpu struct { + // The VCPU architecture. + Architecture *string `json:"architecture" validate:"required"` + + // The number of VCPUs assigned. + Count *int64 `json:"count" validate:"required"` +} + +// UnmarshalInstanceVcpu unmarshals an instance of InstanceVcpu from the specified map of raw messages. +func UnmarshalInstanceVcpu(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstanceVcpu) + err = core.UnmarshalPrimitive(m, "architecture", &obj.Architecture) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "count", &obj.Count) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// Key : Key struct +type Key struct { + // The date and time that the key was created. + CreatedAt *strfmt.DateTime `json:"created_at" validate:"required"` + + // The CRN for this key. + CRN *string `json:"crn" validate:"required"` + + // The fingerprint for this key. The value is returned base64-encoded and prefixed with the hash algorithm (always + // `SHA256`). + Fingerprint *string `json:"fingerprint" validate:"required"` + + // The URL for this key. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this key. + ID *string `json:"id" validate:"required"` + + // The length of this key (in bits). + Length *int64 `json:"length" validate:"required"` + + // The user-defined name for this key. + Name *string `json:"name" validate:"required"` + + // The public SSH key. + PublicKey *string `json:"public_key" validate:"required"` + + // The resource group for this key. + ResourceGroup *ResourceGroupReference `json:"resource_group" validate:"required"` + + // The crypto-system used by this key. + Type *string `json:"type" validate:"required"` +} + +// Constants associated with the Key.Type property. +// The crypto-system used by this key. +const ( + KeyTypeRsaConst = "rsa" +) + +// UnmarshalKey unmarshals an instance of Key from the specified map of raw messages. +func UnmarshalKey(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Key) + err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "fingerprint", &obj.Fingerprint) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "length", &obj.Length) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "public_key", &obj.PublicKey) + if err != nil { + return + } + err = core.UnmarshalModel(m, "resource_group", &obj.ResourceGroup, UnmarshalResourceGroupReference) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// KeyCollection : KeyCollection struct +type KeyCollection struct { + // A link to the first page of resources. + First *KeyCollectionFirst `json:"first" validate:"required"` + + // Collection of keys. + Keys []Key `json:"keys" validate:"required"` + + // The maximum number of resources that can be returned by the request. + Limit *int64 `json:"limit" validate:"required"` + + // A link to the next page of resources. This property is present for all pages + // except the last page. + Next *KeyCollectionNext `json:"next,omitempty"` + + // The total number of resources across all pages. + TotalCount *int64 `json:"total_count" validate:"required"` +} + +// UnmarshalKeyCollection unmarshals an instance of KeyCollection from the specified map of raw messages. +func UnmarshalKeyCollection(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(KeyCollection) + err = core.UnmarshalModel(m, "first", &obj.First, UnmarshalKeyCollectionFirst) + if err != nil { + return + } + err = core.UnmarshalModel(m, "keys", &obj.Keys, UnmarshalKey) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "limit", &obj.Limit) + if err != nil { + return + } + err = core.UnmarshalModel(m, "next", &obj.Next, UnmarshalKeyCollectionNext) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "total_count", &obj.TotalCount) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// KeyCollectionFirst : A link to the first page of resources. +type KeyCollectionFirst struct { + // The URL for a page of resources. + Href *string `json:"href" validate:"required"` +} + +// UnmarshalKeyCollectionFirst unmarshals an instance of KeyCollectionFirst from the specified map of raw messages. +func UnmarshalKeyCollectionFirst(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(KeyCollectionFirst) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// KeyCollectionNext : A link to the next page of resources. This property is present for all pages except the last page. +type KeyCollectionNext struct { + // The URL for a page of resources. + Href *string `json:"href" validate:"required"` +} + +// UnmarshalKeyCollectionNext unmarshals an instance of KeyCollectionNext from the specified map of raw messages. +func UnmarshalKeyCollectionNext(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(KeyCollectionNext) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// KeyIdentity : Identifies a key by a unique property. +// Models which "extend" this model: +// - KeyIdentityByID +// - KeyIdentityByCRN +// - KeyIdentityByHref +// - KeyIdentityKeyIdentityByFingerprint +type KeyIdentity struct { + // The unique identifier for this key. + ID *string `json:"id,omitempty"` + + // The CRN for this key. + CRN *string `json:"crn,omitempty"` + + // The URL for this key. + Href *string `json:"href,omitempty"` + + // The fingerprint for this key. The value is returned base64-encoded and prefixed with the hash algorithm (always + // `SHA256`). + Fingerprint *string `json:"fingerprint,omitempty"` +} + +func (*KeyIdentity) isaKeyIdentity() bool { + return true +} + +type KeyIdentityIntf interface { + isaKeyIdentity() bool +} + +// UnmarshalKeyIdentity unmarshals an instance of KeyIdentity from the specified map of raw messages. +func UnmarshalKeyIdentity(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(KeyIdentity) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "fingerprint", &obj.Fingerprint) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// KeyPatch : KeyPatch struct +type KeyPatch struct { + // The user-defined name for this key. + Name *string `json:"name,omitempty"` +} + +// UnmarshalKeyPatch unmarshals an instance of KeyPatch from the specified map of raw messages. +func UnmarshalKeyPatch(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(KeyPatch) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// AsPatch returns a generic map representation of the KeyPatch +func (keyPatch *KeyPatch) AsPatch() (patch map[string]interface{}, err error) { + var jsonData []byte + jsonData, err = json.Marshal(keyPatch) + if err == nil { + err = json.Unmarshal(jsonData, &patch) + } + return +} + +// KeyReferenceInstanceInitializationContext : KeyReferenceInstanceInitializationContext struct +// Models which "extend" this model: +// - KeyReferenceInstanceInitializationContextKeyReference +// - KeyReferenceInstanceInitializationContextKeyIdentityByFingerprint +type KeyReferenceInstanceInitializationContext struct { + // The CRN for this key. + CRN *string `json:"crn,omitempty"` + + // The fingerprint for this key. The value is returned base64-encoded and prefixed with the hash algorithm (always + // `SHA256`). + Fingerprint *string `json:"fingerprint,omitempty"` + + // The URL for this key. + Href *string `json:"href,omitempty"` + + // The unique identifier for this key. + ID *string `json:"id,omitempty"` + + // The user-defined name for this key. + Name *string `json:"name,omitempty"` +} + +func (*KeyReferenceInstanceInitializationContext) isaKeyReferenceInstanceInitializationContext() bool { + return true +} + +type KeyReferenceInstanceInitializationContextIntf interface { + isaKeyReferenceInstanceInitializationContext() bool +} + +// UnmarshalKeyReferenceInstanceInitializationContext unmarshals an instance of KeyReferenceInstanceInitializationContext from the specified map of raw messages. +func UnmarshalKeyReferenceInstanceInitializationContext(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(KeyReferenceInstanceInitializationContext) + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "fingerprint", &obj.Fingerprint) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ListFloatingIpsOptions : The ListFloatingIps options. +type ListFloatingIpsOptions struct { + // A server-supplied token determining what resource to start the page on. + Start *string + + // The number of resources to return on a page. + Limit *int64 + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListFloatingIpsOptions : Instantiate ListFloatingIpsOptions +func (*VpcClassicV1) NewListFloatingIpsOptions() *ListFloatingIpsOptions { + return &ListFloatingIpsOptions{} +} + +// SetStart : Allow user to set Start +func (options *ListFloatingIpsOptions) SetStart(start string) *ListFloatingIpsOptions { + options.Start = core.StringPtr(start) + return options +} + +// SetLimit : Allow user to set Limit +func (options *ListFloatingIpsOptions) SetLimit(limit int64) *ListFloatingIpsOptions { + options.Limit = core.Int64Ptr(limit) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListFloatingIpsOptions) SetHeaders(param map[string]string) *ListFloatingIpsOptions { + options.Headers = param + return options +} + +// ListIkePoliciesOptions : The ListIkePolicies options. +type ListIkePoliciesOptions struct { + // A server-supplied token determining what resource to start the page on. + Start *string + + // The number of resources to return on a page. + Limit *int64 + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListIkePoliciesOptions : Instantiate ListIkePoliciesOptions +func (*VpcClassicV1) NewListIkePoliciesOptions() *ListIkePoliciesOptions { + return &ListIkePoliciesOptions{} +} + +// SetStart : Allow user to set Start +func (options *ListIkePoliciesOptions) SetStart(start string) *ListIkePoliciesOptions { + options.Start = core.StringPtr(start) + return options +} + +// SetLimit : Allow user to set Limit +func (options *ListIkePoliciesOptions) SetLimit(limit int64) *ListIkePoliciesOptions { + options.Limit = core.Int64Ptr(limit) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListIkePoliciesOptions) SetHeaders(param map[string]string) *ListIkePoliciesOptions { + options.Headers = param + return options +} + +// ListIkePolicyConnectionsOptions : The ListIkePolicyConnections options. +type ListIkePolicyConnectionsOptions struct { + // The IKE policy identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListIkePolicyConnectionsOptions : Instantiate ListIkePolicyConnectionsOptions +func (*VpcClassicV1) NewListIkePolicyConnectionsOptions(id string) *ListIkePolicyConnectionsOptions { + return &ListIkePolicyConnectionsOptions{ + ID: core.StringPtr(id), + } +} + +// SetID : Allow user to set ID +func (options *ListIkePolicyConnectionsOptions) SetID(id string) *ListIkePolicyConnectionsOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListIkePolicyConnectionsOptions) SetHeaders(param map[string]string) *ListIkePolicyConnectionsOptions { + options.Headers = param + return options +} + +// ListImagesOptions : The ListImages options. +type ListImagesOptions struct { + // A server-supplied token determining what resource to start the page on. + Start *string + + // The number of resources to return on a page. + Limit *int64 + + // Filters the collection to resources within one of the resource groups identified in a comma-separated list of + // resource group identifiers. + ResourceGroupID *string + + // Filters the collection to resources with the exact specified name. + Name *string + + // Filters the collection to images with the specified `visibility`. + Visibility *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the ListImagesOptions.Visibility property. +// Filters the collection to images with the specified `visibility`. +const ( + ListImagesOptionsVisibilityPrivateConst = "private" + ListImagesOptionsVisibilityPublicConst = "public" +) + +// NewListImagesOptions : Instantiate ListImagesOptions +func (*VpcClassicV1) NewListImagesOptions() *ListImagesOptions { + return &ListImagesOptions{} +} + +// SetStart : Allow user to set Start +func (options *ListImagesOptions) SetStart(start string) *ListImagesOptions { + options.Start = core.StringPtr(start) + return options +} + +// SetLimit : Allow user to set Limit +func (options *ListImagesOptions) SetLimit(limit int64) *ListImagesOptions { + options.Limit = core.Int64Ptr(limit) + return options +} + +// SetResourceGroupID : Allow user to set ResourceGroupID +func (options *ListImagesOptions) SetResourceGroupID(resourceGroupID string) *ListImagesOptions { + options.ResourceGroupID = core.StringPtr(resourceGroupID) + return options +} + +// SetName : Allow user to set Name +func (options *ListImagesOptions) SetName(name string) *ListImagesOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetVisibility : Allow user to set Visibility +func (options *ListImagesOptions) SetVisibility(visibility string) *ListImagesOptions { + options.Visibility = core.StringPtr(visibility) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListImagesOptions) SetHeaders(param map[string]string) *ListImagesOptions { + options.Headers = param + return options +} + +// ListInstanceNetworkInterfaceFloatingIpsOptions : The ListInstanceNetworkInterfaceFloatingIps options. +type ListInstanceNetworkInterfaceFloatingIpsOptions struct { + // The instance identifier. + InstanceID *string `validate:"required,ne="` + + // The network interface identifier. + NetworkInterfaceID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListInstanceNetworkInterfaceFloatingIpsOptions : Instantiate ListInstanceNetworkInterfaceFloatingIpsOptions +func (*VpcClassicV1) NewListInstanceNetworkInterfaceFloatingIpsOptions(instanceID string, networkInterfaceID string) *ListInstanceNetworkInterfaceFloatingIpsOptions { + return &ListInstanceNetworkInterfaceFloatingIpsOptions{ + InstanceID: core.StringPtr(instanceID), + NetworkInterfaceID: core.StringPtr(networkInterfaceID), + } +} + +// SetInstanceID : Allow user to set InstanceID +func (options *ListInstanceNetworkInterfaceFloatingIpsOptions) SetInstanceID(instanceID string) *ListInstanceNetworkInterfaceFloatingIpsOptions { + options.InstanceID = core.StringPtr(instanceID) + return options +} + +// SetNetworkInterfaceID : Allow user to set NetworkInterfaceID +func (options *ListInstanceNetworkInterfaceFloatingIpsOptions) SetNetworkInterfaceID(networkInterfaceID string) *ListInstanceNetworkInterfaceFloatingIpsOptions { + options.NetworkInterfaceID = core.StringPtr(networkInterfaceID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListInstanceNetworkInterfaceFloatingIpsOptions) SetHeaders(param map[string]string) *ListInstanceNetworkInterfaceFloatingIpsOptions { + options.Headers = param + return options +} + +// ListInstanceNetworkInterfacesOptions : The ListInstanceNetworkInterfaces options. +type ListInstanceNetworkInterfacesOptions struct { + // The instance identifier. + InstanceID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListInstanceNetworkInterfacesOptions : Instantiate ListInstanceNetworkInterfacesOptions +func (*VpcClassicV1) NewListInstanceNetworkInterfacesOptions(instanceID string) *ListInstanceNetworkInterfacesOptions { + return &ListInstanceNetworkInterfacesOptions{ + InstanceID: core.StringPtr(instanceID), + } +} + +// SetInstanceID : Allow user to set InstanceID +func (options *ListInstanceNetworkInterfacesOptions) SetInstanceID(instanceID string) *ListInstanceNetworkInterfacesOptions { + options.InstanceID = core.StringPtr(instanceID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListInstanceNetworkInterfacesOptions) SetHeaders(param map[string]string) *ListInstanceNetworkInterfacesOptions { + options.Headers = param + return options +} + +// ListInstanceProfilesOptions : The ListInstanceProfiles options. +type ListInstanceProfilesOptions struct { + // A server-supplied token determining what resource to start the page on. + Start *string + + // The number of resources to return on a page. + Limit *int64 + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListInstanceProfilesOptions : Instantiate ListInstanceProfilesOptions +func (*VpcClassicV1) NewListInstanceProfilesOptions() *ListInstanceProfilesOptions { + return &ListInstanceProfilesOptions{} +} + +// SetStart : Allow user to set Start +func (options *ListInstanceProfilesOptions) SetStart(start string) *ListInstanceProfilesOptions { + options.Start = core.StringPtr(start) + return options +} + +// SetLimit : Allow user to set Limit +func (options *ListInstanceProfilesOptions) SetLimit(limit int64) *ListInstanceProfilesOptions { + options.Limit = core.Int64Ptr(limit) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListInstanceProfilesOptions) SetHeaders(param map[string]string) *ListInstanceProfilesOptions { + options.Headers = param + return options +} + +// ListInstanceVolumeAttachmentsOptions : The ListInstanceVolumeAttachments options. +type ListInstanceVolumeAttachmentsOptions struct { + // The instance identifier. + InstanceID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListInstanceVolumeAttachmentsOptions : Instantiate ListInstanceVolumeAttachmentsOptions +func (*VpcClassicV1) NewListInstanceVolumeAttachmentsOptions(instanceID string) *ListInstanceVolumeAttachmentsOptions { + return &ListInstanceVolumeAttachmentsOptions{ + InstanceID: core.StringPtr(instanceID), + } +} + +// SetInstanceID : Allow user to set InstanceID +func (options *ListInstanceVolumeAttachmentsOptions) SetInstanceID(instanceID string) *ListInstanceVolumeAttachmentsOptions { + options.InstanceID = core.StringPtr(instanceID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListInstanceVolumeAttachmentsOptions) SetHeaders(param map[string]string) *ListInstanceVolumeAttachmentsOptions { + options.Headers = param + return options +} + +// ListInstancesOptions : The ListInstances options. +type ListInstancesOptions struct { + // A server-supplied token determining what resource to start the page on. + Start *string + + // The number of resources to return on a page. + Limit *int64 + + // Filters the collection to instances on the subnet of the specified identifier. + NetworkInterfacesSubnetID *string + + // Filters the collection to instances on the subnet of the specified CRN. + NetworkInterfacesSubnetCRN *string + + // Filters the collection to instances on the subnet of the specified name. + NetworkInterfacesSubnetName *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListInstancesOptions : Instantiate ListInstancesOptions +func (*VpcClassicV1) NewListInstancesOptions() *ListInstancesOptions { + return &ListInstancesOptions{} +} + +// SetStart : Allow user to set Start +func (options *ListInstancesOptions) SetStart(start string) *ListInstancesOptions { + options.Start = core.StringPtr(start) + return options +} + +// SetLimit : Allow user to set Limit +func (options *ListInstancesOptions) SetLimit(limit int64) *ListInstancesOptions { + options.Limit = core.Int64Ptr(limit) + return options +} + +// SetNetworkInterfacesSubnetID : Allow user to set NetworkInterfacesSubnetID +func (options *ListInstancesOptions) SetNetworkInterfacesSubnetID(networkInterfacesSubnetID string) *ListInstancesOptions { + options.NetworkInterfacesSubnetID = core.StringPtr(networkInterfacesSubnetID) + return options +} + +// SetNetworkInterfacesSubnetCRN : Allow user to set NetworkInterfacesSubnetCRN +func (options *ListInstancesOptions) SetNetworkInterfacesSubnetCRN(networkInterfacesSubnetCRN string) *ListInstancesOptions { + options.NetworkInterfacesSubnetCRN = core.StringPtr(networkInterfacesSubnetCRN) + return options +} + +// SetNetworkInterfacesSubnetName : Allow user to set NetworkInterfacesSubnetName +func (options *ListInstancesOptions) SetNetworkInterfacesSubnetName(networkInterfacesSubnetName string) *ListInstancesOptions { + options.NetworkInterfacesSubnetName = core.StringPtr(networkInterfacesSubnetName) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListInstancesOptions) SetHeaders(param map[string]string) *ListInstancesOptions { + options.Headers = param + return options +} + +// ListIpsecPoliciesOptions : The ListIpsecPolicies options. +type ListIpsecPoliciesOptions struct { + // A server-supplied token determining what resource to start the page on. + Start *string + + // The number of resources to return on a page. + Limit *int64 + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListIpsecPoliciesOptions : Instantiate ListIpsecPoliciesOptions +func (*VpcClassicV1) NewListIpsecPoliciesOptions() *ListIpsecPoliciesOptions { + return &ListIpsecPoliciesOptions{} +} + +// SetStart : Allow user to set Start +func (options *ListIpsecPoliciesOptions) SetStart(start string) *ListIpsecPoliciesOptions { + options.Start = core.StringPtr(start) + return options +} + +// SetLimit : Allow user to set Limit +func (options *ListIpsecPoliciesOptions) SetLimit(limit int64) *ListIpsecPoliciesOptions { + options.Limit = core.Int64Ptr(limit) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListIpsecPoliciesOptions) SetHeaders(param map[string]string) *ListIpsecPoliciesOptions { + options.Headers = param + return options +} + +// ListIpsecPolicyConnectionsOptions : The ListIpsecPolicyConnections options. +type ListIpsecPolicyConnectionsOptions struct { + // The IPsec policy identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListIpsecPolicyConnectionsOptions : Instantiate ListIpsecPolicyConnectionsOptions +func (*VpcClassicV1) NewListIpsecPolicyConnectionsOptions(id string) *ListIpsecPolicyConnectionsOptions { + return &ListIpsecPolicyConnectionsOptions{ + ID: core.StringPtr(id), + } +} + +// SetID : Allow user to set ID +func (options *ListIpsecPolicyConnectionsOptions) SetID(id string) *ListIpsecPolicyConnectionsOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListIpsecPolicyConnectionsOptions) SetHeaders(param map[string]string) *ListIpsecPolicyConnectionsOptions { + options.Headers = param + return options +} + +// ListKeysOptions : The ListKeys options. +type ListKeysOptions struct { + // A server-supplied token determining what resource to start the page on. + Start *string + + // The number of resources to return on a page. + Limit *int64 + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListKeysOptions : Instantiate ListKeysOptions +func (*VpcClassicV1) NewListKeysOptions() *ListKeysOptions { + return &ListKeysOptions{} +} + +// SetStart : Allow user to set Start +func (options *ListKeysOptions) SetStart(start string) *ListKeysOptions { + options.Start = core.StringPtr(start) + return options +} + +// SetLimit : Allow user to set Limit +func (options *ListKeysOptions) SetLimit(limit int64) *ListKeysOptions { + options.Limit = core.Int64Ptr(limit) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListKeysOptions) SetHeaders(param map[string]string) *ListKeysOptions { + options.Headers = param + return options +} + +// ListLoadBalancerListenerPoliciesOptions : The ListLoadBalancerListenerPolicies options. +type ListLoadBalancerListenerPoliciesOptions struct { + // The load balancer identifier. + LoadBalancerID *string `validate:"required,ne="` + + // The listener identifier. + ListenerID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListLoadBalancerListenerPoliciesOptions : Instantiate ListLoadBalancerListenerPoliciesOptions +func (*VpcClassicV1) NewListLoadBalancerListenerPoliciesOptions(loadBalancerID string, listenerID string) *ListLoadBalancerListenerPoliciesOptions { + return &ListLoadBalancerListenerPoliciesOptions{ + LoadBalancerID: core.StringPtr(loadBalancerID), + ListenerID: core.StringPtr(listenerID), + } +} + +// SetLoadBalancerID : Allow user to set LoadBalancerID +func (options *ListLoadBalancerListenerPoliciesOptions) SetLoadBalancerID(loadBalancerID string) *ListLoadBalancerListenerPoliciesOptions { + options.LoadBalancerID = core.StringPtr(loadBalancerID) + return options +} + +// SetListenerID : Allow user to set ListenerID +func (options *ListLoadBalancerListenerPoliciesOptions) SetListenerID(listenerID string) *ListLoadBalancerListenerPoliciesOptions { + options.ListenerID = core.StringPtr(listenerID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListLoadBalancerListenerPoliciesOptions) SetHeaders(param map[string]string) *ListLoadBalancerListenerPoliciesOptions { + options.Headers = param + return options +} + +// ListLoadBalancerListenerPolicyRulesOptions : The ListLoadBalancerListenerPolicyRules options. +type ListLoadBalancerListenerPolicyRulesOptions struct { + // The load balancer identifier. + LoadBalancerID *string `validate:"required,ne="` + + // The listener identifier. + ListenerID *string `validate:"required,ne="` + + // The policy identifier. + PolicyID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListLoadBalancerListenerPolicyRulesOptions : Instantiate ListLoadBalancerListenerPolicyRulesOptions +func (*VpcClassicV1) NewListLoadBalancerListenerPolicyRulesOptions(loadBalancerID string, listenerID string, policyID string) *ListLoadBalancerListenerPolicyRulesOptions { + return &ListLoadBalancerListenerPolicyRulesOptions{ + LoadBalancerID: core.StringPtr(loadBalancerID), + ListenerID: core.StringPtr(listenerID), + PolicyID: core.StringPtr(policyID), + } +} + +// SetLoadBalancerID : Allow user to set LoadBalancerID +func (options *ListLoadBalancerListenerPolicyRulesOptions) SetLoadBalancerID(loadBalancerID string) *ListLoadBalancerListenerPolicyRulesOptions { + options.LoadBalancerID = core.StringPtr(loadBalancerID) + return options +} + +// SetListenerID : Allow user to set ListenerID +func (options *ListLoadBalancerListenerPolicyRulesOptions) SetListenerID(listenerID string) *ListLoadBalancerListenerPolicyRulesOptions { + options.ListenerID = core.StringPtr(listenerID) + return options +} + +// SetPolicyID : Allow user to set PolicyID +func (options *ListLoadBalancerListenerPolicyRulesOptions) SetPolicyID(policyID string) *ListLoadBalancerListenerPolicyRulesOptions { + options.PolicyID = core.StringPtr(policyID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListLoadBalancerListenerPolicyRulesOptions) SetHeaders(param map[string]string) *ListLoadBalancerListenerPolicyRulesOptions { + options.Headers = param + return options +} + +// ListLoadBalancerListenersOptions : The ListLoadBalancerListeners options. +type ListLoadBalancerListenersOptions struct { + // The load balancer identifier. + LoadBalancerID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListLoadBalancerListenersOptions : Instantiate ListLoadBalancerListenersOptions +func (*VpcClassicV1) NewListLoadBalancerListenersOptions(loadBalancerID string) *ListLoadBalancerListenersOptions { + return &ListLoadBalancerListenersOptions{ + LoadBalancerID: core.StringPtr(loadBalancerID), + } +} + +// SetLoadBalancerID : Allow user to set LoadBalancerID +func (options *ListLoadBalancerListenersOptions) SetLoadBalancerID(loadBalancerID string) *ListLoadBalancerListenersOptions { + options.LoadBalancerID = core.StringPtr(loadBalancerID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListLoadBalancerListenersOptions) SetHeaders(param map[string]string) *ListLoadBalancerListenersOptions { + options.Headers = param + return options +} + +// ListLoadBalancerPoolMembersOptions : The ListLoadBalancerPoolMembers options. +type ListLoadBalancerPoolMembersOptions struct { + // The load balancer identifier. + LoadBalancerID *string `validate:"required,ne="` + + // The pool identifier. + PoolID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListLoadBalancerPoolMembersOptions : Instantiate ListLoadBalancerPoolMembersOptions +func (*VpcClassicV1) NewListLoadBalancerPoolMembersOptions(loadBalancerID string, poolID string) *ListLoadBalancerPoolMembersOptions { + return &ListLoadBalancerPoolMembersOptions{ + LoadBalancerID: core.StringPtr(loadBalancerID), + PoolID: core.StringPtr(poolID), + } +} + +// SetLoadBalancerID : Allow user to set LoadBalancerID +func (options *ListLoadBalancerPoolMembersOptions) SetLoadBalancerID(loadBalancerID string) *ListLoadBalancerPoolMembersOptions { + options.LoadBalancerID = core.StringPtr(loadBalancerID) + return options +} + +// SetPoolID : Allow user to set PoolID +func (options *ListLoadBalancerPoolMembersOptions) SetPoolID(poolID string) *ListLoadBalancerPoolMembersOptions { + options.PoolID = core.StringPtr(poolID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListLoadBalancerPoolMembersOptions) SetHeaders(param map[string]string) *ListLoadBalancerPoolMembersOptions { + options.Headers = param + return options +} + +// ListLoadBalancerPoolsOptions : The ListLoadBalancerPools options. +type ListLoadBalancerPoolsOptions struct { + // The load balancer identifier. + LoadBalancerID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListLoadBalancerPoolsOptions : Instantiate ListLoadBalancerPoolsOptions +func (*VpcClassicV1) NewListLoadBalancerPoolsOptions(loadBalancerID string) *ListLoadBalancerPoolsOptions { + return &ListLoadBalancerPoolsOptions{ + LoadBalancerID: core.StringPtr(loadBalancerID), + } +} + +// SetLoadBalancerID : Allow user to set LoadBalancerID +func (options *ListLoadBalancerPoolsOptions) SetLoadBalancerID(loadBalancerID string) *ListLoadBalancerPoolsOptions { + options.LoadBalancerID = core.StringPtr(loadBalancerID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListLoadBalancerPoolsOptions) SetHeaders(param map[string]string) *ListLoadBalancerPoolsOptions { + options.Headers = param + return options +} + +// ListLoadBalancersOptions : The ListLoadBalancers options. +type ListLoadBalancersOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListLoadBalancersOptions : Instantiate ListLoadBalancersOptions +func (*VpcClassicV1) NewListLoadBalancersOptions() *ListLoadBalancersOptions { + return &ListLoadBalancersOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *ListLoadBalancersOptions) SetHeaders(param map[string]string) *ListLoadBalancersOptions { + options.Headers = param + return options +} + +// ListNetworkACLRulesOptions : The ListNetworkACLRules options. +type ListNetworkACLRulesOptions struct { + // The network ACL identifier. + NetworkACLID *string `validate:"required,ne="` + + // A server-supplied token determining what resource to start the page on. + Start *string + + // The number of resources to return on a page. + Limit *int64 + + // Filters the collection to rules with the specified direction. + Direction *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the ListNetworkACLRulesOptions.Direction property. +// Filters the collection to rules with the specified direction. +const ( + ListNetworkACLRulesOptionsDirectionInboundConst = "inbound" + ListNetworkACLRulesOptionsDirectionOutboundConst = "outbound" +) + +// NewListNetworkACLRulesOptions : Instantiate ListNetworkACLRulesOptions +func (*VpcClassicV1) NewListNetworkACLRulesOptions(networkACLID string) *ListNetworkACLRulesOptions { + return &ListNetworkACLRulesOptions{ + NetworkACLID: core.StringPtr(networkACLID), + } +} + +// SetNetworkACLID : Allow user to set NetworkACLID +func (options *ListNetworkACLRulesOptions) SetNetworkACLID(networkACLID string) *ListNetworkACLRulesOptions { + options.NetworkACLID = core.StringPtr(networkACLID) + return options +} + +// SetStart : Allow user to set Start +func (options *ListNetworkACLRulesOptions) SetStart(start string) *ListNetworkACLRulesOptions { + options.Start = core.StringPtr(start) + return options +} + +// SetLimit : Allow user to set Limit +func (options *ListNetworkACLRulesOptions) SetLimit(limit int64) *ListNetworkACLRulesOptions { + options.Limit = core.Int64Ptr(limit) + return options +} + +// SetDirection : Allow user to set Direction +func (options *ListNetworkACLRulesOptions) SetDirection(direction string) *ListNetworkACLRulesOptions { + options.Direction = core.StringPtr(direction) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListNetworkACLRulesOptions) SetHeaders(param map[string]string) *ListNetworkACLRulesOptions { + options.Headers = param + return options +} + +// ListNetworkAclsOptions : The ListNetworkAcls options. +type ListNetworkAclsOptions struct { + // A server-supplied token determining what resource to start the page on. + Start *string + + // The number of resources to return on a page. + Limit *int64 + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListNetworkAclsOptions : Instantiate ListNetworkAclsOptions +func (*VpcClassicV1) NewListNetworkAclsOptions() *ListNetworkAclsOptions { + return &ListNetworkAclsOptions{} +} + +// SetStart : Allow user to set Start +func (options *ListNetworkAclsOptions) SetStart(start string) *ListNetworkAclsOptions { + options.Start = core.StringPtr(start) + return options +} + +// SetLimit : Allow user to set Limit +func (options *ListNetworkAclsOptions) SetLimit(limit int64) *ListNetworkAclsOptions { + options.Limit = core.Int64Ptr(limit) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListNetworkAclsOptions) SetHeaders(param map[string]string) *ListNetworkAclsOptions { + options.Headers = param + return options +} + +// ListOperatingSystemsOptions : The ListOperatingSystems options. +type ListOperatingSystemsOptions struct { + // A server-supplied token determining what resource to start the page on. + Start *string + + // The number of resources to return on a page. + Limit *int64 + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListOperatingSystemsOptions : Instantiate ListOperatingSystemsOptions +func (*VpcClassicV1) NewListOperatingSystemsOptions() *ListOperatingSystemsOptions { + return &ListOperatingSystemsOptions{} +} + +// SetStart : Allow user to set Start +func (options *ListOperatingSystemsOptions) SetStart(start string) *ListOperatingSystemsOptions { + options.Start = core.StringPtr(start) + return options +} + +// SetLimit : Allow user to set Limit +func (options *ListOperatingSystemsOptions) SetLimit(limit int64) *ListOperatingSystemsOptions { + options.Limit = core.Int64Ptr(limit) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListOperatingSystemsOptions) SetHeaders(param map[string]string) *ListOperatingSystemsOptions { + options.Headers = param + return options +} + +// ListPublicGatewaysOptions : The ListPublicGateways options. +type ListPublicGatewaysOptions struct { + // A server-supplied token determining what resource to start the page on. + Start *string + + // The number of resources to return on a page. + Limit *int64 + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListPublicGatewaysOptions : Instantiate ListPublicGatewaysOptions +func (*VpcClassicV1) NewListPublicGatewaysOptions() *ListPublicGatewaysOptions { + return &ListPublicGatewaysOptions{} +} + +// SetStart : Allow user to set Start +func (options *ListPublicGatewaysOptions) SetStart(start string) *ListPublicGatewaysOptions { + options.Start = core.StringPtr(start) + return options +} + +// SetLimit : Allow user to set Limit +func (options *ListPublicGatewaysOptions) SetLimit(limit int64) *ListPublicGatewaysOptions { + options.Limit = core.Int64Ptr(limit) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListPublicGatewaysOptions) SetHeaders(param map[string]string) *ListPublicGatewaysOptions { + options.Headers = param + return options +} + +// ListRegionZonesOptions : The ListRegionZones options. +type ListRegionZonesOptions struct { + // The region name. + RegionName *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListRegionZonesOptions : Instantiate ListRegionZonesOptions +func (*VpcClassicV1) NewListRegionZonesOptions(regionName string) *ListRegionZonesOptions { + return &ListRegionZonesOptions{ + RegionName: core.StringPtr(regionName), + } +} + +// SetRegionName : Allow user to set RegionName +func (options *ListRegionZonesOptions) SetRegionName(regionName string) *ListRegionZonesOptions { + options.RegionName = core.StringPtr(regionName) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListRegionZonesOptions) SetHeaders(param map[string]string) *ListRegionZonesOptions { + options.Headers = param + return options +} + +// ListRegionsOptions : The ListRegions options. +type ListRegionsOptions struct { + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListRegionsOptions : Instantiate ListRegionsOptions +func (*VpcClassicV1) NewListRegionsOptions() *ListRegionsOptions { + return &ListRegionsOptions{} +} + +// SetHeaders : Allow user to set Headers +func (options *ListRegionsOptions) SetHeaders(param map[string]string) *ListRegionsOptions { + options.Headers = param + return options +} + +// ListSecurityGroupNetworkInterfacesOptions : The ListSecurityGroupNetworkInterfaces options. +type ListSecurityGroupNetworkInterfacesOptions struct { + // The security group identifier. + SecurityGroupID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListSecurityGroupNetworkInterfacesOptions : Instantiate ListSecurityGroupNetworkInterfacesOptions +func (*VpcClassicV1) NewListSecurityGroupNetworkInterfacesOptions(securityGroupID string) *ListSecurityGroupNetworkInterfacesOptions { + return &ListSecurityGroupNetworkInterfacesOptions{ + SecurityGroupID: core.StringPtr(securityGroupID), + } +} + +// SetSecurityGroupID : Allow user to set SecurityGroupID +func (options *ListSecurityGroupNetworkInterfacesOptions) SetSecurityGroupID(securityGroupID string) *ListSecurityGroupNetworkInterfacesOptions { + options.SecurityGroupID = core.StringPtr(securityGroupID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListSecurityGroupNetworkInterfacesOptions) SetHeaders(param map[string]string) *ListSecurityGroupNetworkInterfacesOptions { + options.Headers = param + return options +} + +// ListSecurityGroupRulesOptions : The ListSecurityGroupRules options. +type ListSecurityGroupRulesOptions struct { + // The security group identifier. + SecurityGroupID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListSecurityGroupRulesOptions : Instantiate ListSecurityGroupRulesOptions +func (*VpcClassicV1) NewListSecurityGroupRulesOptions(securityGroupID string) *ListSecurityGroupRulesOptions { + return &ListSecurityGroupRulesOptions{ + SecurityGroupID: core.StringPtr(securityGroupID), + } +} + +// SetSecurityGroupID : Allow user to set SecurityGroupID +func (options *ListSecurityGroupRulesOptions) SetSecurityGroupID(securityGroupID string) *ListSecurityGroupRulesOptions { + options.SecurityGroupID = core.StringPtr(securityGroupID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListSecurityGroupRulesOptions) SetHeaders(param map[string]string) *ListSecurityGroupRulesOptions { + options.Headers = param + return options +} + +// ListSecurityGroupsOptions : The ListSecurityGroups options. +type ListSecurityGroupsOptions struct { + // Filters the collection to resources in the VPC with the specified identifier. + VPCID *string + + // Filters the collection to resources in the VPC with the specified CRN. + VPCCRN *string + + // Filters the collection to resources in the VPC with the exact specified name. + VPCName *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListSecurityGroupsOptions : Instantiate ListSecurityGroupsOptions +func (*VpcClassicV1) NewListSecurityGroupsOptions() *ListSecurityGroupsOptions { + return &ListSecurityGroupsOptions{} +} + +// SetVPCID : Allow user to set VPCID +func (options *ListSecurityGroupsOptions) SetVPCID(vpcID string) *ListSecurityGroupsOptions { + options.VPCID = core.StringPtr(vpcID) + return options +} + +// SetVPCCRN : Allow user to set VPCCRN +func (options *ListSecurityGroupsOptions) SetVPCCRN(vpcCRN string) *ListSecurityGroupsOptions { + options.VPCCRN = core.StringPtr(vpcCRN) + return options +} + +// SetVPCName : Allow user to set VPCName +func (options *ListSecurityGroupsOptions) SetVPCName(vpcName string) *ListSecurityGroupsOptions { + options.VPCName = core.StringPtr(vpcName) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListSecurityGroupsOptions) SetHeaders(param map[string]string) *ListSecurityGroupsOptions { + options.Headers = param + return options +} + +// ListSubnetsOptions : The ListSubnets options. +type ListSubnetsOptions struct { + // A server-supplied token determining what resource to start the page on. + Start *string + + // The number of resources to return on a page. + Limit *int64 + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListSubnetsOptions : Instantiate ListSubnetsOptions +func (*VpcClassicV1) NewListSubnetsOptions() *ListSubnetsOptions { + return &ListSubnetsOptions{} +} + +// SetStart : Allow user to set Start +func (options *ListSubnetsOptions) SetStart(start string) *ListSubnetsOptions { + options.Start = core.StringPtr(start) + return options +} + +// SetLimit : Allow user to set Limit +func (options *ListSubnetsOptions) SetLimit(limit int64) *ListSubnetsOptions { + options.Limit = core.Int64Ptr(limit) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListSubnetsOptions) SetHeaders(param map[string]string) *ListSubnetsOptions { + options.Headers = param + return options +} + +// ListVolumeProfilesOptions : The ListVolumeProfiles options. +type ListVolumeProfilesOptions struct { + // A server-supplied token determining what resource to start the page on. + Start *string + + // The number of resources to return on a page. + Limit *int64 + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListVolumeProfilesOptions : Instantiate ListVolumeProfilesOptions +func (*VpcClassicV1) NewListVolumeProfilesOptions() *ListVolumeProfilesOptions { + return &ListVolumeProfilesOptions{} +} + +// SetStart : Allow user to set Start +func (options *ListVolumeProfilesOptions) SetStart(start string) *ListVolumeProfilesOptions { + options.Start = core.StringPtr(start) + return options +} + +// SetLimit : Allow user to set Limit +func (options *ListVolumeProfilesOptions) SetLimit(limit int64) *ListVolumeProfilesOptions { + options.Limit = core.Int64Ptr(limit) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListVolumeProfilesOptions) SetHeaders(param map[string]string) *ListVolumeProfilesOptions { + options.Headers = param + return options +} + +// ListVolumesOptions : The ListVolumes options. +type ListVolumesOptions struct { + // A server-supplied token determining what resource to start the page on. + Start *string + + // The number of resources to return on a page. + Limit *int64 + + // Filters the collection to resources with the exact specified name. + Name *string + + // Filters the collection to resources in the zone with the exact specified name. + ZoneName *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListVolumesOptions : Instantiate ListVolumesOptions +func (*VpcClassicV1) NewListVolumesOptions() *ListVolumesOptions { + return &ListVolumesOptions{} +} + +// SetStart : Allow user to set Start +func (options *ListVolumesOptions) SetStart(start string) *ListVolumesOptions { + options.Start = core.StringPtr(start) + return options +} + +// SetLimit : Allow user to set Limit +func (options *ListVolumesOptions) SetLimit(limit int64) *ListVolumesOptions { + options.Limit = core.Int64Ptr(limit) + return options +} + +// SetName : Allow user to set Name +func (options *ListVolumesOptions) SetName(name string) *ListVolumesOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetZoneName : Allow user to set ZoneName +func (options *ListVolumesOptions) SetZoneName(zoneName string) *ListVolumesOptions { + options.ZoneName = core.StringPtr(zoneName) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListVolumesOptions) SetHeaders(param map[string]string) *ListVolumesOptions { + options.Headers = param + return options +} + +// ListVPCAddressPrefixesOptions : The ListVPCAddressPrefixes options. +type ListVPCAddressPrefixesOptions struct { + // The VPC identifier. + VPCID *string `validate:"required,ne="` + + // A server-supplied token determining what resource to start the page on. + Start *string + + // The number of resources to return on a page. + Limit *int64 + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListVPCAddressPrefixesOptions : Instantiate ListVPCAddressPrefixesOptions +func (*VpcClassicV1) NewListVPCAddressPrefixesOptions(vpcID string) *ListVPCAddressPrefixesOptions { + return &ListVPCAddressPrefixesOptions{ + VPCID: core.StringPtr(vpcID), + } +} + +// SetVPCID : Allow user to set VPCID +func (options *ListVPCAddressPrefixesOptions) SetVPCID(vpcID string) *ListVPCAddressPrefixesOptions { + options.VPCID = core.StringPtr(vpcID) + return options +} + +// SetStart : Allow user to set Start +func (options *ListVPCAddressPrefixesOptions) SetStart(start string) *ListVPCAddressPrefixesOptions { + options.Start = core.StringPtr(start) + return options +} + +// SetLimit : Allow user to set Limit +func (options *ListVPCAddressPrefixesOptions) SetLimit(limit int64) *ListVPCAddressPrefixesOptions { + options.Limit = core.Int64Ptr(limit) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListVPCAddressPrefixesOptions) SetHeaders(param map[string]string) *ListVPCAddressPrefixesOptions { + options.Headers = param + return options +} + +// ListVPCRoutesOptions : The ListVPCRoutes options. +type ListVPCRoutesOptions struct { + // The VPC identifier. + VPCID *string `validate:"required,ne="` + + // Filters the collection to resources in the zone with the exact specified name. + ZoneName *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListVPCRoutesOptions : Instantiate ListVPCRoutesOptions +func (*VpcClassicV1) NewListVPCRoutesOptions(vpcID string) *ListVPCRoutesOptions { + return &ListVPCRoutesOptions{ + VPCID: core.StringPtr(vpcID), + } +} + +// SetVPCID : Allow user to set VPCID +func (options *ListVPCRoutesOptions) SetVPCID(vpcID string) *ListVPCRoutesOptions { + options.VPCID = core.StringPtr(vpcID) + return options +} + +// SetZoneName : Allow user to set ZoneName +func (options *ListVPCRoutesOptions) SetZoneName(zoneName string) *ListVPCRoutesOptions { + options.ZoneName = core.StringPtr(zoneName) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListVPCRoutesOptions) SetHeaders(param map[string]string) *ListVPCRoutesOptions { + options.Headers = param + return options +} + +// ListVpcsOptions : The ListVpcs options. +type ListVpcsOptions struct { + // A server-supplied token determining what resource to start the page on. + Start *string + + // The number of resources to return on a page. + Limit *int64 + + // The `classic_access` parameter filters the returned collection by the supplied field. If the supplied field is + // `true`, only Classic Access VPCs will be returned. If the supplied field is `false`, only VPCs without Classic + // Access will be returned. + ClassicAccess *bool + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListVpcsOptions : Instantiate ListVpcsOptions +func (*VpcClassicV1) NewListVpcsOptions() *ListVpcsOptions { + return &ListVpcsOptions{} +} + +// SetStart : Allow user to set Start +func (options *ListVpcsOptions) SetStart(start string) *ListVpcsOptions { + options.Start = core.StringPtr(start) + return options +} + +// SetLimit : Allow user to set Limit +func (options *ListVpcsOptions) SetLimit(limit int64) *ListVpcsOptions { + options.Limit = core.Int64Ptr(limit) + return options +} + +// SetClassicAccess : Allow user to set ClassicAccess +func (options *ListVpcsOptions) SetClassicAccess(classicAccess bool) *ListVpcsOptions { + options.ClassicAccess = core.BoolPtr(classicAccess) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListVpcsOptions) SetHeaders(param map[string]string) *ListVpcsOptions { + options.Headers = param + return options +} + +// ListVPNGatewayConnectionLocalCIDRsOptions : The ListVPNGatewayConnectionLocalCIDRs options. +type ListVPNGatewayConnectionLocalCIDRsOptions struct { + // The VPN gateway identifier. + VPNGatewayID *string `validate:"required,ne="` + + // The VPN gateway connection identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListVPNGatewayConnectionLocalCIDRsOptions : Instantiate ListVPNGatewayConnectionLocalCIDRsOptions +func (*VpcClassicV1) NewListVPNGatewayConnectionLocalCIDRsOptions(vpnGatewayID string, id string) *ListVPNGatewayConnectionLocalCIDRsOptions { + return &ListVPNGatewayConnectionLocalCIDRsOptions{ + VPNGatewayID: core.StringPtr(vpnGatewayID), + ID: core.StringPtr(id), + } +} + +// SetVPNGatewayID : Allow user to set VPNGatewayID +func (options *ListVPNGatewayConnectionLocalCIDRsOptions) SetVPNGatewayID(vpnGatewayID string) *ListVPNGatewayConnectionLocalCIDRsOptions { + options.VPNGatewayID = core.StringPtr(vpnGatewayID) + return options +} + +// SetID : Allow user to set ID +func (options *ListVPNGatewayConnectionLocalCIDRsOptions) SetID(id string) *ListVPNGatewayConnectionLocalCIDRsOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListVPNGatewayConnectionLocalCIDRsOptions) SetHeaders(param map[string]string) *ListVPNGatewayConnectionLocalCIDRsOptions { + options.Headers = param + return options +} + +// ListVPNGatewayConnectionPeerCIDRsOptions : The ListVPNGatewayConnectionPeerCIDRs options. +type ListVPNGatewayConnectionPeerCIDRsOptions struct { + // The VPN gateway identifier. + VPNGatewayID *string `validate:"required,ne="` + + // The VPN gateway connection identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListVPNGatewayConnectionPeerCIDRsOptions : Instantiate ListVPNGatewayConnectionPeerCIDRsOptions +func (*VpcClassicV1) NewListVPNGatewayConnectionPeerCIDRsOptions(vpnGatewayID string, id string) *ListVPNGatewayConnectionPeerCIDRsOptions { + return &ListVPNGatewayConnectionPeerCIDRsOptions{ + VPNGatewayID: core.StringPtr(vpnGatewayID), + ID: core.StringPtr(id), + } +} + +// SetVPNGatewayID : Allow user to set VPNGatewayID +func (options *ListVPNGatewayConnectionPeerCIDRsOptions) SetVPNGatewayID(vpnGatewayID string) *ListVPNGatewayConnectionPeerCIDRsOptions { + options.VPNGatewayID = core.StringPtr(vpnGatewayID) + return options +} + +// SetID : Allow user to set ID +func (options *ListVPNGatewayConnectionPeerCIDRsOptions) SetID(id string) *ListVPNGatewayConnectionPeerCIDRsOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListVPNGatewayConnectionPeerCIDRsOptions) SetHeaders(param map[string]string) *ListVPNGatewayConnectionPeerCIDRsOptions { + options.Headers = param + return options +} + +// ListVPNGatewayConnectionsOptions : The ListVPNGatewayConnections options. +type ListVPNGatewayConnectionsOptions struct { + // The VPN gateway identifier. + VPNGatewayID *string `validate:"required,ne="` + + // Filters the collection to VPN gateway connections with the specified status. + Status *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListVPNGatewayConnectionsOptions : Instantiate ListVPNGatewayConnectionsOptions +func (*VpcClassicV1) NewListVPNGatewayConnectionsOptions(vpnGatewayID string) *ListVPNGatewayConnectionsOptions { + return &ListVPNGatewayConnectionsOptions{ + VPNGatewayID: core.StringPtr(vpnGatewayID), + } +} + +// SetVPNGatewayID : Allow user to set VPNGatewayID +func (options *ListVPNGatewayConnectionsOptions) SetVPNGatewayID(vpnGatewayID string) *ListVPNGatewayConnectionsOptions { + options.VPNGatewayID = core.StringPtr(vpnGatewayID) + return options +} + +// SetStatus : Allow user to set Status +func (options *ListVPNGatewayConnectionsOptions) SetStatus(status string) *ListVPNGatewayConnectionsOptions { + options.Status = core.StringPtr(status) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListVPNGatewayConnectionsOptions) SetHeaders(param map[string]string) *ListVPNGatewayConnectionsOptions { + options.Headers = param + return options +} + +// ListVPNGatewaysOptions : The ListVPNGateways options. +type ListVPNGatewaysOptions struct { + // A server-supplied token determining what resource to start the page on. + Start *string + + // The number of resources to return on a page. + Limit *int64 + + // Filters the collection to resources within one of the resource groups identified in a comma-separated list of + // resource group identifiers. + ResourceGroupID *string + + // Filters the collection to VPN gateways with the specified mode. + Mode *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the ListVPNGatewaysOptions.Mode property. +// Filters the collection to VPN gateways with the specified mode. +const ( + ListVPNGatewaysOptionsModePolicyConst = "policy" + ListVPNGatewaysOptionsModeRouteConst = "route" +) + +// NewListVPNGatewaysOptions : Instantiate ListVPNGatewaysOptions +func (*VpcClassicV1) NewListVPNGatewaysOptions() *ListVPNGatewaysOptions { + return &ListVPNGatewaysOptions{} +} + +// SetStart : Allow user to set Start +func (options *ListVPNGatewaysOptions) SetStart(start string) *ListVPNGatewaysOptions { + options.Start = core.StringPtr(start) + return options +} + +// SetLimit : Allow user to set Limit +func (options *ListVPNGatewaysOptions) SetLimit(limit int64) *ListVPNGatewaysOptions { + options.Limit = core.Int64Ptr(limit) + return options +} + +// SetResourceGroupID : Allow user to set ResourceGroupID +func (options *ListVPNGatewaysOptions) SetResourceGroupID(resourceGroupID string) *ListVPNGatewaysOptions { + options.ResourceGroupID = core.StringPtr(resourceGroupID) + return options +} + +// SetMode : Allow user to set Mode +func (options *ListVPNGatewaysOptions) SetMode(mode string) *ListVPNGatewaysOptions { + options.Mode = core.StringPtr(mode) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListVPNGatewaysOptions) SetHeaders(param map[string]string) *ListVPNGatewaysOptions { + options.Headers = param + return options +} + +// LoadBalancer : LoadBalancer struct +type LoadBalancer struct { + // The date and time that this load balancer was created. + CreatedAt *strfmt.DateTime `json:"created_at" validate:"required"` + + // The load balancer's CRN. + CRN *string `json:"crn" validate:"required"` + + // Fully qualified domain name assigned to this load balancer. + Hostname *string `json:"hostname" validate:"required"` + + // The load balancer's canonical URL. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this load balancer. + ID *string `json:"id" validate:"required"` + + // The type of this load balancer, public or private. + IsPublic *bool `json:"is_public" validate:"required"` + + // The listeners of this load balancer. + Listeners []LoadBalancerListenerReference `json:"listeners" validate:"required"` + + // The logging configuration for this load balancer. + Logging *LoadBalancerLogging `json:"logging" validate:"required"` + + // The unique user-defined name for this load balancer. + Name *string `json:"name" validate:"required"` + + // The operating status of this load balancer. + OperatingStatus *string `json:"operating_status" validate:"required"` + + // The pools of this load balancer. + Pools []LoadBalancerPoolReference `json:"pools" validate:"required"` + + // The private IP addresses assigned to this load balancer. + PrivateIps []IP `json:"private_ips" validate:"required"` + + // The provisioning status of this load balancer. + ProvisioningStatus *string `json:"provisioning_status" validate:"required"` + + // The public IP addresses assigned to this load balancer. + // + // Applicable only for public load balancers. + PublicIps []IP `json:"public_ips" validate:"required"` + + // The resource group for this load balancer. + ResourceGroup *ResourceGroupReference `json:"resource_group" validate:"required"` + + // The subnets this load balancer is part of. + Subnets []SubnetReference `json:"subnets" validate:"required"` +} + +// Constants associated with the LoadBalancer.OperatingStatus property. +// The operating status of this load balancer. +const ( + LoadBalancerOperatingStatusOfflineConst = "offline" + LoadBalancerOperatingStatusOnlineConst = "online" +) + +// Constants associated with the LoadBalancer.ProvisioningStatus property. +// The provisioning status of this load balancer. +const ( + LoadBalancerProvisioningStatusActiveConst = "active" + LoadBalancerProvisioningStatusCreatePendingConst = "create_pending" + LoadBalancerProvisioningStatusDeletePendingConst = "delete_pending" + LoadBalancerProvisioningStatusFailedConst = "failed" + LoadBalancerProvisioningStatusMaintenancePendingConst = "maintenance_pending" + LoadBalancerProvisioningStatusUpdatePendingConst = "update_pending" +) + +// UnmarshalLoadBalancer unmarshals an instance of LoadBalancer from the specified map of raw messages. +func UnmarshalLoadBalancer(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancer) + err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "hostname", &obj.Hostname) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "is_public", &obj.IsPublic) + if err != nil { + return + } + err = core.UnmarshalModel(m, "listeners", &obj.Listeners, UnmarshalLoadBalancerListenerReference) + if err != nil { + return + } + err = core.UnmarshalModel(m, "logging", &obj.Logging, UnmarshalLoadBalancerLogging) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "operating_status", &obj.OperatingStatus) + if err != nil { + return + } + err = core.UnmarshalModel(m, "pools", &obj.Pools, UnmarshalLoadBalancerPoolReference) + if err != nil { + return + } + err = core.UnmarshalModel(m, "private_ips", &obj.PrivateIps, UnmarshalIP) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "provisioning_status", &obj.ProvisioningStatus) + if err != nil { + return + } + err = core.UnmarshalModel(m, "public_ips", &obj.PublicIps, UnmarshalIP) + if err != nil { + return + } + err = core.UnmarshalModel(m, "resource_group", &obj.ResourceGroup, UnmarshalResourceGroupReference) + if err != nil { + return + } + err = core.UnmarshalModel(m, "subnets", &obj.Subnets, UnmarshalSubnetReference) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LoadBalancerCollection : LoadBalancerCollection struct +type LoadBalancerCollection struct { + // Collection of load balancers. + LoadBalancers []LoadBalancer `json:"load_balancers" validate:"required"` +} + +// UnmarshalLoadBalancerCollection unmarshals an instance of LoadBalancerCollection from the specified map of raw messages. +func UnmarshalLoadBalancerCollection(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancerCollection) + err = core.UnmarshalModel(m, "load_balancers", &obj.LoadBalancers, UnmarshalLoadBalancer) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LoadBalancerListener : LoadBalancerListener struct +type LoadBalancerListener struct { + // If set to `true`, this listener will accept and forward PROXY protocol information. Supported by load balancers in + // the `application` family (otherwise always `false`). + AcceptProxyProtocol *bool `json:"accept_proxy_protocol" validate:"required"` + + // The certificate instance used for SSL termination. It is applicable only to `https` + // protocol. + CertificateInstance *CertificateInstanceReference `json:"certificate_instance,omitempty"` + + // The connection limit of the listener. + ConnectionLimit *int64 `json:"connection_limit,omitempty"` + + // The date and time that this listener was created. + CreatedAt *strfmt.DateTime `json:"created_at" validate:"required"` + + // The default pool associated with the listener. + DefaultPool *LoadBalancerPoolReference `json:"default_pool,omitempty"` + + // The listener's canonical URL. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this load balancer listener. + ID *string `json:"id" validate:"required"` + + // An array of policies for this listener. + Policies []LoadBalancerListenerPolicyReference `json:"policies,omitempty"` + + // The listener port number. Each listener in the load balancer must have a unique + // `port` and `protocol` combination. + Port *int64 `json:"port" validate:"required"` + + // The listener protocol. + Protocol *string `json:"protocol" validate:"required"` + + // The provisioning status of this listener. + ProvisioningStatus *string `json:"provisioning_status" validate:"required"` +} + +// Constants associated with the LoadBalancerListener.Protocol property. +// The listener protocol. +const ( + LoadBalancerListenerProtocolHTTPConst = "http" + LoadBalancerListenerProtocolHTTPSConst = "https" + LoadBalancerListenerProtocolTCPConst = "tcp" +) + +// Constants associated with the LoadBalancerListener.ProvisioningStatus property. +// The provisioning status of this listener. +const ( + LoadBalancerListenerProvisioningStatusActiveConst = "active" + LoadBalancerListenerProvisioningStatusCreatePendingConst = "create_pending" + LoadBalancerListenerProvisioningStatusDeletePendingConst = "delete_pending" + LoadBalancerListenerProvisioningStatusFailedConst = "failed" + LoadBalancerListenerProvisioningStatusMaintenancePendingConst = "maintenance_pending" + LoadBalancerListenerProvisioningStatusUpdatePendingConst = "update_pending" +) + +// UnmarshalLoadBalancerListener unmarshals an instance of LoadBalancerListener from the specified map of raw messages. +func UnmarshalLoadBalancerListener(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancerListener) + err = core.UnmarshalPrimitive(m, "accept_proxy_protocol", &obj.AcceptProxyProtocol) + if err != nil { + return + } + err = core.UnmarshalModel(m, "certificate_instance", &obj.CertificateInstance, UnmarshalCertificateInstanceReference) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "connection_limit", &obj.ConnectionLimit) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) + if err != nil { + return + } + err = core.UnmarshalModel(m, "default_pool", &obj.DefaultPool, UnmarshalLoadBalancerPoolReference) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalModel(m, "policies", &obj.Policies, UnmarshalLoadBalancerListenerPolicyReference) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "port", &obj.Port) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "protocol", &obj.Protocol) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "provisioning_status", &obj.ProvisioningStatus) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LoadBalancerListenerCollection : LoadBalancerListenerCollection struct +type LoadBalancerListenerCollection struct { + // Collection of listeners. + Listeners []LoadBalancerListener `json:"listeners" validate:"required"` +} + +// UnmarshalLoadBalancerListenerCollection unmarshals an instance of LoadBalancerListenerCollection from the specified map of raw messages. +func UnmarshalLoadBalancerListenerCollection(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancerListenerCollection) + err = core.UnmarshalModel(m, "listeners", &obj.Listeners, UnmarshalLoadBalancerListener) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LoadBalancerListenerPatch : LoadBalancerListenerPatch struct +type LoadBalancerListenerPatch struct { + // If set to `true`, this listener will accept and forward PROXY protocol information. Supported by load balancers in + // the `application` family (otherwise always `false`). + AcceptProxyProtocol *bool `json:"accept_proxy_protocol,omitempty"` + + // The certificate instance used for SSL termination. It is applicable only to `https` + // protocol. + CertificateInstance CertificateInstanceIdentityIntf `json:"certificate_instance,omitempty"` + + // The connection limit of the listener. + ConnectionLimit *int64 `json:"connection_limit,omitempty"` + + // The default pool associated with the listener. The specified pool must: + // + // - Belong to this load balancer + // - Have the same `protocol` as this listener + // - Not already be the default pool for another listener. + DefaultPool LoadBalancerPoolIdentityIntf `json:"default_pool,omitempty"` + + // The listener port number. Each listener in the load balancer must have a unique + // `port` and `protocol` combination. + Port *int64 `json:"port,omitempty"` + + // The listener protocol. + Protocol *string `json:"protocol,omitempty"` +} + +// Constants associated with the LoadBalancerListenerPatch.Protocol property. +// The listener protocol. +const ( + LoadBalancerListenerPatchProtocolHTTPConst = "http" + LoadBalancerListenerPatchProtocolHTTPSConst = "https" + LoadBalancerListenerPatchProtocolTCPConst = "tcp" +) + +// UnmarshalLoadBalancerListenerPatch unmarshals an instance of LoadBalancerListenerPatch from the specified map of raw messages. +func UnmarshalLoadBalancerListenerPatch(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancerListenerPatch) + err = core.UnmarshalPrimitive(m, "accept_proxy_protocol", &obj.AcceptProxyProtocol) + if err != nil { + return + } + err = core.UnmarshalModel(m, "certificate_instance", &obj.CertificateInstance, UnmarshalCertificateInstanceIdentity) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "connection_limit", &obj.ConnectionLimit) + if err != nil { + return + } + err = core.UnmarshalModel(m, "default_pool", &obj.DefaultPool, UnmarshalLoadBalancerPoolIdentity) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "port", &obj.Port) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "protocol", &obj.Protocol) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// AsPatch returns a generic map representation of the LoadBalancerListenerPatch +func (loadBalancerListenerPatch *LoadBalancerListenerPatch) AsPatch() (patch map[string]interface{}, err error) { + var jsonData []byte + jsonData, err = json.Marshal(loadBalancerListenerPatch) + if err == nil { + err = json.Unmarshal(jsonData, &patch) + } + return +} + +// LoadBalancerListenerPolicy : LoadBalancerListenerPolicy struct +type LoadBalancerListenerPolicy struct { + // The policy action. + Action *string `json:"action" validate:"required"` + + // The date and time that this policy was created. + CreatedAt *strfmt.DateTime `json:"created_at" validate:"required"` + + // The listener policy's canonical URL. + Href *string `json:"href" validate:"required"` + + // The policy's unique identifier. + ID *string `json:"id" validate:"required"` + + // The user-defined name for this policy. + Name *string `json:"name" validate:"required"` + + // Priority of the policy. Lower value indicates higher priority. + Priority *int64 `json:"priority" validate:"required"` + + // The provisioning status of this policy. + ProvisioningStatus *string `json:"provisioning_status" validate:"required"` + + // The rules of this policy. + Rules []LoadBalancerListenerPolicyRuleReference `json:"rules" validate:"required"` + + // `LoadBalancerPoolReference` is in the response if `action` is `forward`. + // `LoadBalancerListenerPolicyRedirectURL` is in the response if `action` is `redirect`. + Target LoadBalancerListenerPolicyTargetIntf `json:"target,omitempty"` +} + +// Constants associated with the LoadBalancerListenerPolicy.Action property. +// The policy action. +const ( + LoadBalancerListenerPolicyActionForwardConst = "forward" + LoadBalancerListenerPolicyActionRedirectConst = "redirect" + LoadBalancerListenerPolicyActionRejectConst = "reject" +) + +// Constants associated with the LoadBalancerListenerPolicy.ProvisioningStatus property. +// The provisioning status of this policy. +const ( + LoadBalancerListenerPolicyProvisioningStatusActiveConst = "active" + LoadBalancerListenerPolicyProvisioningStatusCreatePendingConst = "create_pending" + LoadBalancerListenerPolicyProvisioningStatusDeletePendingConst = "delete_pending" + LoadBalancerListenerPolicyProvisioningStatusFailedConst = "failed" + LoadBalancerListenerPolicyProvisioningStatusMaintenancePendingConst = "maintenance_pending" + LoadBalancerListenerPolicyProvisioningStatusUpdatePendingConst = "update_pending" +) + +// UnmarshalLoadBalancerListenerPolicy unmarshals an instance of LoadBalancerListenerPolicy from the specified map of raw messages. +func UnmarshalLoadBalancerListenerPolicy(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancerListenerPolicy) + err = core.UnmarshalPrimitive(m, "action", &obj.Action) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "priority", &obj.Priority) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "provisioning_status", &obj.ProvisioningStatus) + if err != nil { + return + } + err = core.UnmarshalModel(m, "rules", &obj.Rules, UnmarshalLoadBalancerListenerPolicyRuleReference) + if err != nil { + return + } + err = core.UnmarshalModel(m, "target", &obj.Target, UnmarshalLoadBalancerListenerPolicyTarget) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LoadBalancerListenerPolicyCollection : LoadBalancerListenerPolicyCollection struct +type LoadBalancerListenerPolicyCollection struct { + // Collection of policies. + Policies []LoadBalancerListenerPolicy `json:"policies" validate:"required"` +} + +// UnmarshalLoadBalancerListenerPolicyCollection unmarshals an instance of LoadBalancerListenerPolicyCollection from the specified map of raw messages. +func UnmarshalLoadBalancerListenerPolicyCollection(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancerListenerPolicyCollection) + err = core.UnmarshalModel(m, "policies", &obj.Policies, UnmarshalLoadBalancerListenerPolicy) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LoadBalancerListenerPolicyPatch : LoadBalancerListenerPolicyPatch struct +type LoadBalancerListenerPolicyPatch struct { + // The user-defined name for this policy. Names must be unique within the load balancer listener the policy resides in. + Name *string `json:"name,omitempty"` + + // Priority of the policy. Lower value indicates higher priority. + Priority *int64 `json:"priority,omitempty"` + + // When `action` is `forward`, `LoadBalancerPoolIdentity` specifies which pool the load + // balancer forwards the traffic to. When `action` is `redirect`, + // `LoadBalancerListenerPolicyRedirectURLPatch` specifies the url and http + // status code used in the redirect response. + Target LoadBalancerListenerPolicyTargetPatchIntf `json:"target,omitempty"` +} + +// UnmarshalLoadBalancerListenerPolicyPatch unmarshals an instance of LoadBalancerListenerPolicyPatch from the specified map of raw messages. +func UnmarshalLoadBalancerListenerPolicyPatch(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancerListenerPolicyPatch) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "priority", &obj.Priority) + if err != nil { + return + } + err = core.UnmarshalModel(m, "target", &obj.Target, UnmarshalLoadBalancerListenerPolicyTargetPatch) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// AsPatch returns a generic map representation of the LoadBalancerListenerPolicyPatch +func (loadBalancerListenerPolicyPatch *LoadBalancerListenerPolicyPatch) AsPatch() (patch map[string]interface{}, err error) { + var jsonData []byte + jsonData, err = json.Marshal(loadBalancerListenerPolicyPatch) + if err == nil { + err = json.Unmarshal(jsonData, &patch) + } + return +} + +// LoadBalancerListenerPolicyPrototype : LoadBalancerListenerPolicyPrototype struct +type LoadBalancerListenerPolicyPrototype struct { + // The policy action. + Action *string `json:"action" validate:"required"` + + // The user-defined name for this policy. Names must be unique within the load balancer listener the policy resides in. + Name *string `json:"name,omitempty"` + + // Priority of the policy. Lower value indicates higher priority. + Priority *int64 `json:"priority" validate:"required"` + + // An array of rules for this policy. + Rules []LoadBalancerListenerPolicyRulePrototype `json:"rules,omitempty"` + + // When `action` is `forward`, `LoadBalancerPoolIdentity` is required to specify which + // pool the load balancer forwards the traffic to. When `action` is `redirect`, + // `LoadBalancerListenerPolicyRedirectURLPrototype` is required to specify the url and + // http status code used in the redirect response. + Target LoadBalancerListenerPolicyTargetPrototypeIntf `json:"target,omitempty"` +} + +// Constants associated with the LoadBalancerListenerPolicyPrototype.Action property. +// The policy action. +const ( + LoadBalancerListenerPolicyPrototypeActionForwardConst = "forward" + LoadBalancerListenerPolicyPrototypeActionRedirectConst = "redirect" + LoadBalancerListenerPolicyPrototypeActionRejectConst = "reject" +) + +// NewLoadBalancerListenerPolicyPrototype : Instantiate LoadBalancerListenerPolicyPrototype (Generic Model Constructor) +func (*VpcClassicV1) NewLoadBalancerListenerPolicyPrototype(action string, priority int64) (model *LoadBalancerListenerPolicyPrototype, err error) { + model = &LoadBalancerListenerPolicyPrototype{ + Action: core.StringPtr(action), + Priority: core.Int64Ptr(priority), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalLoadBalancerListenerPolicyPrototype unmarshals an instance of LoadBalancerListenerPolicyPrototype from the specified map of raw messages. +func UnmarshalLoadBalancerListenerPolicyPrototype(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancerListenerPolicyPrototype) + err = core.UnmarshalPrimitive(m, "action", &obj.Action) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "priority", &obj.Priority) + if err != nil { + return + } + err = core.UnmarshalModel(m, "rules", &obj.Rules, UnmarshalLoadBalancerListenerPolicyRulePrototype) + if err != nil { + return + } + err = core.UnmarshalModel(m, "target", &obj.Target, UnmarshalLoadBalancerListenerPolicyTargetPrototype) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LoadBalancerListenerPolicyReference : LoadBalancerListenerPolicyReference struct +type LoadBalancerListenerPolicyReference struct { + // The listener policy's canonical URL. + Href *string `json:"href" validate:"required"` + + // The policy's unique identifier. + ID *string `json:"id" validate:"required"` +} + +// UnmarshalLoadBalancerListenerPolicyReference unmarshals an instance of LoadBalancerListenerPolicyReference from the specified map of raw messages. +func UnmarshalLoadBalancerListenerPolicyReference(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancerListenerPolicyReference) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LoadBalancerListenerPolicyRule : LoadBalancerListenerPolicyRule struct +type LoadBalancerListenerPolicyRule struct { + // The condition of the rule. + Condition *string `json:"condition" validate:"required"` + + // The date and time that this rule was created. + CreatedAt *strfmt.DateTime `json:"created_at" validate:"required"` + + // HTTP header field. This is only applicable to "header" rule type. + Field *string `json:"field,omitempty"` + + // The rule's canonical URL. + Href *string `json:"href" validate:"required"` + + // The rule's unique identifier. + ID *string `json:"id" validate:"required"` + + // The provisioning status of this rule. + ProvisioningStatus *string `json:"provisioning_status" validate:"required"` + + // The type of the rule. + Type *string `json:"type" validate:"required"` + + // Value to be matched for rule condition. + Value *string `json:"value" validate:"required"` +} + +// Constants associated with the LoadBalancerListenerPolicyRule.Condition property. +// The condition of the rule. +const ( + LoadBalancerListenerPolicyRuleConditionContainsConst = "contains" + LoadBalancerListenerPolicyRuleConditionEqualsConst = "equals" + LoadBalancerListenerPolicyRuleConditionMatchesRegexConst = "matches_regex" +) + +// Constants associated with the LoadBalancerListenerPolicyRule.ProvisioningStatus property. +// The provisioning status of this rule. +const ( + LoadBalancerListenerPolicyRuleProvisioningStatusActiveConst = "active" + LoadBalancerListenerPolicyRuleProvisioningStatusCreatePendingConst = "create_pending" + LoadBalancerListenerPolicyRuleProvisioningStatusDeletePendingConst = "delete_pending" + LoadBalancerListenerPolicyRuleProvisioningStatusFailedConst = "failed" + LoadBalancerListenerPolicyRuleProvisioningStatusMaintenancePendingConst = "maintenance_pending" + LoadBalancerListenerPolicyRuleProvisioningStatusUpdatePendingConst = "update_pending" +) + +// Constants associated with the LoadBalancerListenerPolicyRule.Type property. +// The type of the rule. +const ( + LoadBalancerListenerPolicyRuleTypeHeaderConst = "header" + LoadBalancerListenerPolicyRuleTypeHostnameConst = "hostname" + LoadBalancerListenerPolicyRuleTypePathConst = "path" +) + +// UnmarshalLoadBalancerListenerPolicyRule unmarshals an instance of LoadBalancerListenerPolicyRule from the specified map of raw messages. +func UnmarshalLoadBalancerListenerPolicyRule(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancerListenerPolicyRule) + err = core.UnmarshalPrimitive(m, "condition", &obj.Condition) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "field", &obj.Field) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "provisioning_status", &obj.ProvisioningStatus) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LoadBalancerListenerPolicyRuleCollection : LoadBalancerListenerPolicyRuleCollection struct +type LoadBalancerListenerPolicyRuleCollection struct { + // Collection of rules. + Rules []LoadBalancerListenerPolicyRule `json:"rules" validate:"required"` +} + +// UnmarshalLoadBalancerListenerPolicyRuleCollection unmarshals an instance of LoadBalancerListenerPolicyRuleCollection from the specified map of raw messages. +func UnmarshalLoadBalancerListenerPolicyRuleCollection(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancerListenerPolicyRuleCollection) + err = core.UnmarshalModel(m, "rules", &obj.Rules, UnmarshalLoadBalancerListenerPolicyRule) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LoadBalancerListenerPolicyRulePatch : LoadBalancerListenerPolicyRulePatch struct +type LoadBalancerListenerPolicyRulePatch struct { + // The condition of the rule. + Condition *string `json:"condition,omitempty"` + + // HTTP header field. This is only applicable to "header" rule type. + Field *string `json:"field,omitempty"` + + // The type of the rule. + Type *string `json:"type,omitempty"` + + // Value to be matched for rule condition. + Value *string `json:"value,omitempty"` +} + +// Constants associated with the LoadBalancerListenerPolicyRulePatch.Condition property. +// The condition of the rule. +const ( + LoadBalancerListenerPolicyRulePatchConditionContainsConst = "contains" + LoadBalancerListenerPolicyRulePatchConditionEqualsConst = "equals" + LoadBalancerListenerPolicyRulePatchConditionMatchesRegexConst = "matches_regex" +) + +// Constants associated with the LoadBalancerListenerPolicyRulePatch.Type property. +// The type of the rule. +const ( + LoadBalancerListenerPolicyRulePatchTypeHeaderConst = "header" + LoadBalancerListenerPolicyRulePatchTypeHostnameConst = "hostname" + LoadBalancerListenerPolicyRulePatchTypePathConst = "path" +) + +// UnmarshalLoadBalancerListenerPolicyRulePatch unmarshals an instance of LoadBalancerListenerPolicyRulePatch from the specified map of raw messages. +func UnmarshalLoadBalancerListenerPolicyRulePatch(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancerListenerPolicyRulePatch) + err = core.UnmarshalPrimitive(m, "condition", &obj.Condition) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "field", &obj.Field) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// AsPatch returns a generic map representation of the LoadBalancerListenerPolicyRulePatch +func (loadBalancerListenerPolicyRulePatch *LoadBalancerListenerPolicyRulePatch) AsPatch() (patch map[string]interface{}, err error) { + var jsonData []byte + jsonData, err = json.Marshal(loadBalancerListenerPolicyRulePatch) + if err == nil { + err = json.Unmarshal(jsonData, &patch) + } + return +} + +// LoadBalancerListenerPolicyRulePrototype : LoadBalancerListenerPolicyRulePrototype struct +type LoadBalancerListenerPolicyRulePrototype struct { + // The condition of the rule. + Condition *string `json:"condition" validate:"required"` + + // HTTP header field. This is only applicable to "header" rule type. + Field *string `json:"field,omitempty"` + + // The type of the rule. + Type *string `json:"type" validate:"required"` + + // Value to be matched for rule condition. + Value *string `json:"value" validate:"required"` +} + +// Constants associated with the LoadBalancerListenerPolicyRulePrototype.Condition property. +// The condition of the rule. +const ( + LoadBalancerListenerPolicyRulePrototypeConditionContainsConst = "contains" + LoadBalancerListenerPolicyRulePrototypeConditionEqualsConst = "equals" + LoadBalancerListenerPolicyRulePrototypeConditionMatchesRegexConst = "matches_regex" +) + +// Constants associated with the LoadBalancerListenerPolicyRulePrototype.Type property. +// The type of the rule. +const ( + LoadBalancerListenerPolicyRulePrototypeTypeHeaderConst = "header" + LoadBalancerListenerPolicyRulePrototypeTypeHostnameConst = "hostname" + LoadBalancerListenerPolicyRulePrototypeTypePathConst = "path" +) + +// NewLoadBalancerListenerPolicyRulePrototype : Instantiate LoadBalancerListenerPolicyRulePrototype (Generic Model Constructor) +func (*VpcClassicV1) NewLoadBalancerListenerPolicyRulePrototype(condition string, typeVar string, value string) (model *LoadBalancerListenerPolicyRulePrototype, err error) { + model = &LoadBalancerListenerPolicyRulePrototype{ + Condition: core.StringPtr(condition), + Type: core.StringPtr(typeVar), + Value: core.StringPtr(value), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalLoadBalancerListenerPolicyRulePrototype unmarshals an instance of LoadBalancerListenerPolicyRulePrototype from the specified map of raw messages. +func UnmarshalLoadBalancerListenerPolicyRulePrototype(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancerListenerPolicyRulePrototype) + err = core.UnmarshalPrimitive(m, "condition", &obj.Condition) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "field", &obj.Field) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LoadBalancerListenerPolicyRuleReference : LoadBalancerListenerPolicyRuleReference struct +type LoadBalancerListenerPolicyRuleReference struct { + // The rule's canonical URL. + Href *string `json:"href" validate:"required"` + + // The rule's unique identifier. + ID *string `json:"id" validate:"required"` +} + +// UnmarshalLoadBalancerListenerPolicyRuleReference unmarshals an instance of LoadBalancerListenerPolicyRuleReference from the specified map of raw messages. +func UnmarshalLoadBalancerListenerPolicyRuleReference(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancerListenerPolicyRuleReference) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LoadBalancerListenerPolicyTarget : `LoadBalancerPoolReference` is in the response if `action` is `forward`. +// `LoadBalancerListenerPolicyRedirectURL` is in the response if `action` is `redirect`. +// Models which "extend" this model: +// - LoadBalancerListenerPolicyTargetLoadBalancerPoolReference +// - LoadBalancerListenerPolicyTargetLoadBalancerListenerPolicyRedirectURL +type LoadBalancerListenerPolicyTarget struct { + // The pool's canonical URL. + Href *string `json:"href,omitempty"` + + // The unique identifier for this load balancer pool. + ID *string `json:"id,omitempty"` + + // The user-defined name for this load balancer pool. + Name *string `json:"name,omitempty"` + + // The http status code in the redirect response. + HTTPStatusCode *int64 `json:"http_status_code,omitempty"` + + // The redirect target URL. + URL *string `json:"url,omitempty"` +} + +func (*LoadBalancerListenerPolicyTarget) isaLoadBalancerListenerPolicyTarget() bool { + return true +} + +type LoadBalancerListenerPolicyTargetIntf interface { + isaLoadBalancerListenerPolicyTarget() bool +} + +// UnmarshalLoadBalancerListenerPolicyTarget unmarshals an instance of LoadBalancerListenerPolicyTarget from the specified map of raw messages. +func UnmarshalLoadBalancerListenerPolicyTarget(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancerListenerPolicyTarget) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "http_status_code", &obj.HTTPStatusCode) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "url", &obj.URL) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LoadBalancerListenerPolicyTargetPatch : When `action` is `forward`, `LoadBalancerPoolIdentity` specifies which pool the load balancer forwards the traffic +// to. When `action` is `redirect`, +// `LoadBalancerListenerPolicyRedirectURLPatch` specifies the url and http status code used in the redirect response. +// Models which "extend" this model: +// - LoadBalancerListenerPolicyTargetPatchLoadBalancerPoolIdentity +// - LoadBalancerListenerPolicyTargetPatchLoadBalancerListenerPolicyRedirectURLPatch +type LoadBalancerListenerPolicyTargetPatch struct { + // The unique identifier for this load balancer pool. + ID *string `json:"id,omitempty"` + + // The pool's canonical URL. + Href *string `json:"href,omitempty"` + + // The http status code in the redirect response. + HTTPStatusCode *int64 `json:"http_status_code,omitempty"` + + // The redirect target URL. + URL *string `json:"url,omitempty"` +} + +func (*LoadBalancerListenerPolicyTargetPatch) isaLoadBalancerListenerPolicyTargetPatch() bool { + return true +} + +type LoadBalancerListenerPolicyTargetPatchIntf interface { + isaLoadBalancerListenerPolicyTargetPatch() bool +} + +// UnmarshalLoadBalancerListenerPolicyTargetPatch unmarshals an instance of LoadBalancerListenerPolicyTargetPatch from the specified map of raw messages. +func UnmarshalLoadBalancerListenerPolicyTargetPatch(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancerListenerPolicyTargetPatch) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "http_status_code", &obj.HTTPStatusCode) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "url", &obj.URL) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LoadBalancerListenerPolicyTargetPrototype : When `action` is `forward`, `LoadBalancerPoolIdentity` is required to specify which pool the load balancer forwards +// the traffic to. When `action` is `redirect`, +// `LoadBalancerListenerPolicyRedirectURLPrototype` is required to specify the url and http status code used in the +// redirect response. +// Models which "extend" this model: +// - LoadBalancerListenerPolicyTargetPrototypeLoadBalancerPoolIdentity +// - LoadBalancerListenerPolicyTargetPrototypeLoadBalancerListenerPolicyRedirectURLPrototype +type LoadBalancerListenerPolicyTargetPrototype struct { + // The unique identifier for this load balancer pool. + ID *string `json:"id,omitempty"` + + // The pool's canonical URL. + Href *string `json:"href,omitempty"` + + // The http status code in the redirect response. + HTTPStatusCode *int64 `json:"http_status_code,omitempty"` + + // The redirect target URL. + URL *string `json:"url,omitempty"` +} + +func (*LoadBalancerListenerPolicyTargetPrototype) isaLoadBalancerListenerPolicyTargetPrototype() bool { + return true +} + +type LoadBalancerListenerPolicyTargetPrototypeIntf interface { + isaLoadBalancerListenerPolicyTargetPrototype() bool +} + +// UnmarshalLoadBalancerListenerPolicyTargetPrototype unmarshals an instance of LoadBalancerListenerPolicyTargetPrototype from the specified map of raw messages. +func UnmarshalLoadBalancerListenerPolicyTargetPrototype(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancerListenerPolicyTargetPrototype) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "http_status_code", &obj.HTTPStatusCode) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "url", &obj.URL) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LoadBalancerListenerPrototypeLoadBalancerContext : LoadBalancerListenerPrototypeLoadBalancerContext struct +type LoadBalancerListenerPrototypeLoadBalancerContext struct { + // If set to `true`, this listener will accept and forward PROXY protocol information. Supported by load balancers in + // the `application` family (otherwise always `false`). + AcceptProxyProtocol *bool `json:"accept_proxy_protocol,omitempty"` + + // The connection limit of the listener. + ConnectionLimit *int64 `json:"connection_limit,omitempty"` + + // The default pool associated with the listener. + DefaultPool *LoadBalancerPoolIdentityByName `json:"default_pool,omitempty"` + + // The listener port number. Each listener in the load balancer must have a unique + // `port` and `protocol` combination. + Port *int64 `json:"port" validate:"required"` + + // The listener protocol. + Protocol *string `json:"protocol" validate:"required"` +} + +// Constants associated with the LoadBalancerListenerPrototypeLoadBalancerContext.Protocol property. +// The listener protocol. +const ( + LoadBalancerListenerPrototypeLoadBalancerContextProtocolHTTPConst = "http" + LoadBalancerListenerPrototypeLoadBalancerContextProtocolHTTPSConst = "https" + LoadBalancerListenerPrototypeLoadBalancerContextProtocolTCPConst = "tcp" +) + +// NewLoadBalancerListenerPrototypeLoadBalancerContext : Instantiate LoadBalancerListenerPrototypeLoadBalancerContext (Generic Model Constructor) +func (*VpcClassicV1) NewLoadBalancerListenerPrototypeLoadBalancerContext(port int64, protocol string) (model *LoadBalancerListenerPrototypeLoadBalancerContext, err error) { + model = &LoadBalancerListenerPrototypeLoadBalancerContext{ + Port: core.Int64Ptr(port), + Protocol: core.StringPtr(protocol), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalLoadBalancerListenerPrototypeLoadBalancerContext unmarshals an instance of LoadBalancerListenerPrototypeLoadBalancerContext from the specified map of raw messages. +func UnmarshalLoadBalancerListenerPrototypeLoadBalancerContext(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancerListenerPrototypeLoadBalancerContext) + err = core.UnmarshalPrimitive(m, "accept_proxy_protocol", &obj.AcceptProxyProtocol) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "connection_limit", &obj.ConnectionLimit) + if err != nil { + return + } + err = core.UnmarshalModel(m, "default_pool", &obj.DefaultPool, UnmarshalLoadBalancerPoolIdentityByName) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "port", &obj.Port) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "protocol", &obj.Protocol) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LoadBalancerListenerReference : LoadBalancerListenerReference struct +type LoadBalancerListenerReference struct { + // The listener's canonical URL. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this load balancer listener. + ID *string `json:"id" validate:"required"` +} + +// UnmarshalLoadBalancerListenerReference unmarshals an instance of LoadBalancerListenerReference from the specified map of raw messages. +func UnmarshalLoadBalancerListenerReference(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancerListenerReference) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LoadBalancerLogging : The logging configuration for this load balancer. +type LoadBalancerLogging struct { + // The datapath logging configuration for this load balancer. + Datapath *LoadBalancerLoggingDatapath `json:"datapath,omitempty"` +} + +// UnmarshalLoadBalancerLogging unmarshals an instance of LoadBalancerLogging from the specified map of raw messages. +func UnmarshalLoadBalancerLogging(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancerLogging) + err = core.UnmarshalModel(m, "datapath", &obj.Datapath, UnmarshalLoadBalancerLoggingDatapath) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LoadBalancerLoggingDatapath : The datapath logging configuration for this load balancer. +type LoadBalancerLoggingDatapath struct { + // If set to `true`, datapath logging is active for this load balancer. + Active *bool `json:"active" validate:"required"` +} + +// NewLoadBalancerLoggingDatapath : Instantiate LoadBalancerLoggingDatapath (Generic Model Constructor) +func (*VpcClassicV1) NewLoadBalancerLoggingDatapath(active bool) (model *LoadBalancerLoggingDatapath, err error) { + model = &LoadBalancerLoggingDatapath{ + Active: core.BoolPtr(active), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalLoadBalancerLoggingDatapath unmarshals an instance of LoadBalancerLoggingDatapath from the specified map of raw messages. +func UnmarshalLoadBalancerLoggingDatapath(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancerLoggingDatapath) + err = core.UnmarshalPrimitive(m, "active", &obj.Active) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LoadBalancerPatch : LoadBalancerPatch struct +type LoadBalancerPatch struct { + // The logging configuration to use for this load balancer. + // + // To activate logging, the load balancer profile must support the specified logging type. + Logging *LoadBalancerLogging `json:"logging,omitempty"` + + // The unique user-defined name for this load balancer. + Name *string `json:"name,omitempty"` +} + +// UnmarshalLoadBalancerPatch unmarshals an instance of LoadBalancerPatch from the specified map of raw messages. +func UnmarshalLoadBalancerPatch(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancerPatch) + err = core.UnmarshalModel(m, "logging", &obj.Logging, UnmarshalLoadBalancerLogging) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// AsPatch returns a generic map representation of the LoadBalancerPatch +func (loadBalancerPatch *LoadBalancerPatch) AsPatch() (patch map[string]interface{}, err error) { + var jsonData []byte + jsonData, err = json.Marshal(loadBalancerPatch) + if err == nil { + err = json.Unmarshal(jsonData, &patch) + } + return +} + +// LoadBalancerPool : LoadBalancerPool struct +type LoadBalancerPool struct { + // The load balancing algorithm. + Algorithm *string `json:"algorithm" validate:"required"` + + // The date and time that this pool was created. + CreatedAt *strfmt.DateTime `json:"created_at" validate:"required"` + + // The health monitor of this pool. + HealthMonitor *LoadBalancerPoolHealthMonitor `json:"health_monitor" validate:"required"` + + // The pool's canonical URL. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this load balancer pool. + ID *string `json:"id" validate:"required"` + + // The backend server members of the pool. + Members []LoadBalancerPoolMemberReference `json:"members,omitempty"` + + // The user-defined name for this load balancer pool. + Name *string `json:"name" validate:"required"` + + // The protocol used for this load balancer pool. + // + // The enumerated values for this property are expected to expand in the future. When processing this property, check + // for and log unknown values. Optionally halt processing and surface the error, or bypass the pool on which the + // unexpected property value was encountered. + Protocol *string `json:"protocol" validate:"required"` + + // The provisioning status of this pool. + ProvisioningStatus *string `json:"provisioning_status" validate:"required"` + + // The PROXY protocol setting for this pool: + // - `v1`: Enabled with version 1 (human-readable header format) + // - `v2`: Enabled with version 2 (binary header format) + // - `disabled`: Disabled + // + // Supported by load balancers in the `application` family (otherwise always `disabled`). + ProxyProtocol *string `json:"proxy_protocol" validate:"required"` + + // The session persistence of this pool. + SessionPersistence *LoadBalancerPoolSessionPersistence `json:"session_persistence,omitempty"` +} + +// Constants associated with the LoadBalancerPool.Algorithm property. +// The load balancing algorithm. +const ( + LoadBalancerPoolAlgorithmLeastConnectionsConst = "least_connections" + LoadBalancerPoolAlgorithmRoundRobinConst = "round_robin" + LoadBalancerPoolAlgorithmWeightedRoundRobinConst = "weighted_round_robin" +) + +// Constants associated with the LoadBalancerPool.Protocol property. +// The protocol used for this load balancer pool. +// +// The enumerated values for this property are expected to expand in the future. When processing this property, check +// for and log unknown values. Optionally halt processing and surface the error, or bypass the pool on which the +// unexpected property value was encountered. +const ( + LoadBalancerPoolProtocolHTTPConst = "http" + LoadBalancerPoolProtocolHTTPSConst = "https" + LoadBalancerPoolProtocolTCPConst = "tcp" +) + +// Constants associated with the LoadBalancerPool.ProvisioningStatus property. +// The provisioning status of this pool. +const ( + LoadBalancerPoolProvisioningStatusActiveConst = "active" + LoadBalancerPoolProvisioningStatusCreatePendingConst = "create_pending" + LoadBalancerPoolProvisioningStatusDeletePendingConst = "delete_pending" + LoadBalancerPoolProvisioningStatusFailedConst = "failed" + LoadBalancerPoolProvisioningStatusMaintenancePendingConst = "maintenance_pending" + LoadBalancerPoolProvisioningStatusUpdatePendingConst = "update_pending" +) + +// Constants associated with the LoadBalancerPool.ProxyProtocol property. +// The PROXY protocol setting for this pool: +// - `v1`: Enabled with version 1 (human-readable header format) +// - `v2`: Enabled with version 2 (binary header format) +// - `disabled`: Disabled +// +// Supported by load balancers in the `application` family (otherwise always `disabled`). +const ( + LoadBalancerPoolProxyProtocolDisabledConst = "disabled" + LoadBalancerPoolProxyProtocolV1Const = "v1" + LoadBalancerPoolProxyProtocolV2Const = "v2" +) + +// UnmarshalLoadBalancerPool unmarshals an instance of LoadBalancerPool from the specified map of raw messages. +func UnmarshalLoadBalancerPool(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancerPool) + err = core.UnmarshalPrimitive(m, "algorithm", &obj.Algorithm) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) + if err != nil { + return + } + err = core.UnmarshalModel(m, "health_monitor", &obj.HealthMonitor, UnmarshalLoadBalancerPoolHealthMonitor) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalModel(m, "members", &obj.Members, UnmarshalLoadBalancerPoolMemberReference) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "protocol", &obj.Protocol) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "provisioning_status", &obj.ProvisioningStatus) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "proxy_protocol", &obj.ProxyProtocol) + if err != nil { + return + } + err = core.UnmarshalModel(m, "session_persistence", &obj.SessionPersistence, UnmarshalLoadBalancerPoolSessionPersistence) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LoadBalancerPoolCollection : LoadBalancerPoolCollection struct +type LoadBalancerPoolCollection struct { + // Collection of pools. + Pools []LoadBalancerPool `json:"pools" validate:"required"` +} + +// UnmarshalLoadBalancerPoolCollection unmarshals an instance of LoadBalancerPoolCollection from the specified map of raw messages. +func UnmarshalLoadBalancerPoolCollection(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancerPoolCollection) + err = core.UnmarshalModel(m, "pools", &obj.Pools, UnmarshalLoadBalancerPool) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LoadBalancerPoolHealthMonitor : LoadBalancerPoolHealthMonitor struct +type LoadBalancerPoolHealthMonitor struct { + // The health check interval in seconds. Interval must be greater than timeout value. + Delay *int64 `json:"delay" validate:"required"` + + // The health check max retries. + MaxRetries *int64 `json:"max_retries" validate:"required"` + + // The health check port number. If specified, this overrides the ports specified in the server member resources. + Port *int64 `json:"port,omitempty"` + + // The health check timeout in seconds. + Timeout *int64 `json:"timeout" validate:"required"` + + // The protocol type of this load balancer pool health monitor. + // + // The enumerated values for this property are expected to expand in the future. When processing this property, check + // for and log unknown values. Optionally halt processing and surface the error, or bypass the health monitor on which + // the unexpected property value was encountered. + Type *string `json:"type" validate:"required"` + + // The health check URL path. Applicable only if the health monitor `type` is `http` or + // `https`. This value must be in the format of an [origin-form request + // target](https://tools.ietf.org/html/rfc7230#section-5.3.1). + URLPath *string `json:"url_path,omitempty"` +} + +// Constants associated with the LoadBalancerPoolHealthMonitor.Type property. +// The protocol type of this load balancer pool health monitor. +// +// The enumerated values for this property are expected to expand in the future. When processing this property, check +// for and log unknown values. Optionally halt processing and surface the error, or bypass the health monitor on which +// the unexpected property value was encountered. +const ( + LoadBalancerPoolHealthMonitorTypeHTTPConst = "http" + LoadBalancerPoolHealthMonitorTypeHTTPSConst = "https" + LoadBalancerPoolHealthMonitorTypeTCPConst = "tcp" +) + +// UnmarshalLoadBalancerPoolHealthMonitor unmarshals an instance of LoadBalancerPoolHealthMonitor from the specified map of raw messages. +func UnmarshalLoadBalancerPoolHealthMonitor(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancerPoolHealthMonitor) + err = core.UnmarshalPrimitive(m, "delay", &obj.Delay) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "max_retries", &obj.MaxRetries) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "port", &obj.Port) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "timeout", &obj.Timeout) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "url_path", &obj.URLPath) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LoadBalancerPoolHealthMonitorPatch : LoadBalancerPoolHealthMonitorPatch struct +type LoadBalancerPoolHealthMonitorPatch struct { + // The health check interval in seconds. Interval must be greater than timeout value. + Delay *int64 `json:"delay" validate:"required"` + + // The health check max retries. + MaxRetries *int64 `json:"max_retries" validate:"required"` + + // The health check port number. If specified, this overrides the ports specified in the server member resources. + // Specify `null` to remove an existing port value. + Port *int64 `json:"port,omitempty"` + + // The health check timeout in seconds. + Timeout *int64 `json:"timeout" validate:"required"` + + // The protocol type of this load balancer pool health monitor. + Type *string `json:"type" validate:"required"` + + // The health check URL path. Applicable only if the health monitor `type` is `http` or + // `https`. This value must be in the format of an [origin-form request + // target](https://tools.ietf.org/html/rfc7230#section-5.3.1). + URLPath *string `json:"url_path,omitempty"` +} + +// Constants associated with the LoadBalancerPoolHealthMonitorPatch.Type property. +// The protocol type of this load balancer pool health monitor. +const ( + LoadBalancerPoolHealthMonitorPatchTypeHTTPConst = "http" + LoadBalancerPoolHealthMonitorPatchTypeHTTPSConst = "https" + LoadBalancerPoolHealthMonitorPatchTypeTCPConst = "tcp" +) + +// NewLoadBalancerPoolHealthMonitorPatch : Instantiate LoadBalancerPoolHealthMonitorPatch (Generic Model Constructor) +func (*VpcClassicV1) NewLoadBalancerPoolHealthMonitorPatch(delay int64, maxRetries int64, timeout int64, typeVar string) (model *LoadBalancerPoolHealthMonitorPatch, err error) { + model = &LoadBalancerPoolHealthMonitorPatch{ + Delay: core.Int64Ptr(delay), + MaxRetries: core.Int64Ptr(maxRetries), + Timeout: core.Int64Ptr(timeout), + Type: core.StringPtr(typeVar), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalLoadBalancerPoolHealthMonitorPatch unmarshals an instance of LoadBalancerPoolHealthMonitorPatch from the specified map of raw messages. +func UnmarshalLoadBalancerPoolHealthMonitorPatch(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancerPoolHealthMonitorPatch) + err = core.UnmarshalPrimitive(m, "delay", &obj.Delay) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "max_retries", &obj.MaxRetries) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "port", &obj.Port) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "timeout", &obj.Timeout) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "url_path", &obj.URLPath) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LoadBalancerPoolHealthMonitorPrototype : LoadBalancerPoolHealthMonitorPrototype struct +type LoadBalancerPoolHealthMonitorPrototype struct { + // The health check interval in seconds. Interval must be greater than timeout value. + Delay *int64 `json:"delay" validate:"required"` + + // The health check max retries. + MaxRetries *int64 `json:"max_retries" validate:"required"` + + // The health check port number. If specified, this overrides the ports specified in the server member resources. + Port *int64 `json:"port,omitempty"` + + // The health check timeout in seconds. + Timeout *int64 `json:"timeout" validate:"required"` + + // The protocol type of this load balancer pool health monitor. + Type *string `json:"type" validate:"required"` + + // The health check URL path. Applicable only if the health monitor `type` is `http` or + // `https`. This value must be in the format of an [origin-form request + // target](https://tools.ietf.org/html/rfc7230#section-5.3.1). + URLPath *string `json:"url_path,omitempty"` +} + +// Constants associated with the LoadBalancerPoolHealthMonitorPrototype.Type property. +// The protocol type of this load balancer pool health monitor. +const ( + LoadBalancerPoolHealthMonitorPrototypeTypeHTTPConst = "http" + LoadBalancerPoolHealthMonitorPrototypeTypeHTTPSConst = "https" + LoadBalancerPoolHealthMonitorPrototypeTypeTCPConst = "tcp" +) + +// NewLoadBalancerPoolHealthMonitorPrototype : Instantiate LoadBalancerPoolHealthMonitorPrototype (Generic Model Constructor) +func (*VpcClassicV1) NewLoadBalancerPoolHealthMonitorPrototype(delay int64, maxRetries int64, timeout int64, typeVar string) (model *LoadBalancerPoolHealthMonitorPrototype, err error) { + model = &LoadBalancerPoolHealthMonitorPrototype{ + Delay: core.Int64Ptr(delay), + MaxRetries: core.Int64Ptr(maxRetries), + Timeout: core.Int64Ptr(timeout), + Type: core.StringPtr(typeVar), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalLoadBalancerPoolHealthMonitorPrototype unmarshals an instance of LoadBalancerPoolHealthMonitorPrototype from the specified map of raw messages. +func UnmarshalLoadBalancerPoolHealthMonitorPrototype(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancerPoolHealthMonitorPrototype) + err = core.UnmarshalPrimitive(m, "delay", &obj.Delay) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "max_retries", &obj.MaxRetries) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "port", &obj.Port) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "timeout", &obj.Timeout) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "url_path", &obj.URLPath) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LoadBalancerPoolIdentity : Identifies a load balancer pool by a unique property. +// Models which "extend" this model: +// - LoadBalancerPoolIdentityByID +// - LoadBalancerPoolIdentityByHref +type LoadBalancerPoolIdentity struct { + // The unique identifier for this load balancer pool. + ID *string `json:"id,omitempty"` + + // The pool's canonical URL. + Href *string `json:"href,omitempty"` +} + +func (*LoadBalancerPoolIdentity) isaLoadBalancerPoolIdentity() bool { + return true +} + +type LoadBalancerPoolIdentityIntf interface { + isaLoadBalancerPoolIdentity() bool +} + +// UnmarshalLoadBalancerPoolIdentity unmarshals an instance of LoadBalancerPoolIdentity from the specified map of raw messages. +func UnmarshalLoadBalancerPoolIdentity(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancerPoolIdentity) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LoadBalancerPoolIdentityByName : LoadBalancerPoolIdentityByName struct +type LoadBalancerPoolIdentityByName struct { + // The user-defined name for this load balancer pool. + Name *string `json:"name" validate:"required"` +} + +// NewLoadBalancerPoolIdentityByName : Instantiate LoadBalancerPoolIdentityByName (Generic Model Constructor) +func (*VpcClassicV1) NewLoadBalancerPoolIdentityByName(name string) (model *LoadBalancerPoolIdentityByName, err error) { + model = &LoadBalancerPoolIdentityByName{ + Name: core.StringPtr(name), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalLoadBalancerPoolIdentityByName unmarshals an instance of LoadBalancerPoolIdentityByName from the specified map of raw messages. +func UnmarshalLoadBalancerPoolIdentityByName(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancerPoolIdentityByName) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LoadBalancerPoolMember : LoadBalancerPoolMember struct +type LoadBalancerPoolMember struct { + // The date and time that this member was created. + CreatedAt *strfmt.DateTime `json:"created_at" validate:"required"` + + // Health of the server member in the pool. + Health *string `json:"health" validate:"required"` + + // The member's canonical URL. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this load balancer pool member. + ID *string `json:"id" validate:"required"` + + // The port number of the application running in the server member. + Port *int64 `json:"port" validate:"required"` + + // The provisioning status of this member. + ProvisioningStatus *string `json:"provisioning_status" validate:"required"` + + // The pool member target. + Target LoadBalancerPoolMemberTargetIntf `json:"target" validate:"required"` + + // Weight of the server member. Applicable only if the pool algorithm is + // `weighted_round_robin`. + Weight *int64 `json:"weight,omitempty"` +} + +// Constants associated with the LoadBalancerPoolMember.Health property. +// Health of the server member in the pool. +const ( + LoadBalancerPoolMemberHealthFaultedConst = "faulted" + LoadBalancerPoolMemberHealthOkConst = "ok" + LoadBalancerPoolMemberHealthUnknownConst = "unknown" +) + +// Constants associated with the LoadBalancerPoolMember.ProvisioningStatus property. +// The provisioning status of this member. +const ( + LoadBalancerPoolMemberProvisioningStatusActiveConst = "active" + LoadBalancerPoolMemberProvisioningStatusCreatePendingConst = "create_pending" + LoadBalancerPoolMemberProvisioningStatusDeletePendingConst = "delete_pending" + LoadBalancerPoolMemberProvisioningStatusFailedConst = "failed" + LoadBalancerPoolMemberProvisioningStatusMaintenancePendingConst = "maintenance_pending" + LoadBalancerPoolMemberProvisioningStatusUpdatePendingConst = "update_pending" +) + +// UnmarshalLoadBalancerPoolMember unmarshals an instance of LoadBalancerPoolMember from the specified map of raw messages. +func UnmarshalLoadBalancerPoolMember(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancerPoolMember) + err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "health", &obj.Health) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "port", &obj.Port) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "provisioning_status", &obj.ProvisioningStatus) + if err != nil { + return + } + err = core.UnmarshalModel(m, "target", &obj.Target, UnmarshalLoadBalancerPoolMemberTarget) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "weight", &obj.Weight) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LoadBalancerPoolMemberCollection : LoadBalancerPoolMemberCollection struct +type LoadBalancerPoolMemberCollection struct { + // Collection of members. + Members []LoadBalancerPoolMember `json:"members" validate:"required"` +} + +// UnmarshalLoadBalancerPoolMemberCollection unmarshals an instance of LoadBalancerPoolMemberCollection from the specified map of raw messages. +func UnmarshalLoadBalancerPoolMemberCollection(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancerPoolMemberCollection) + err = core.UnmarshalModel(m, "members", &obj.Members, UnmarshalLoadBalancerPoolMember) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LoadBalancerPoolMemberPatch : LoadBalancerPoolMemberPatch struct +type LoadBalancerPoolMemberPatch struct { + // The port number of the application running in the server member. + Port *int64 `json:"port,omitempty"` + + // The pool member target. + Target LoadBalancerPoolMemberTargetPrototypeIntf `json:"target,omitempty"` + + // Weight of the server member. Applicable only if the pool algorithm is + // `weighted_round_robin`. + Weight *int64 `json:"weight,omitempty"` +} + +// UnmarshalLoadBalancerPoolMemberPatch unmarshals an instance of LoadBalancerPoolMemberPatch from the specified map of raw messages. +func UnmarshalLoadBalancerPoolMemberPatch(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancerPoolMemberPatch) + err = core.UnmarshalPrimitive(m, "port", &obj.Port) + if err != nil { + return + } + err = core.UnmarshalModel(m, "target", &obj.Target, UnmarshalLoadBalancerPoolMemberTargetPrototype) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "weight", &obj.Weight) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// AsPatch returns a generic map representation of the LoadBalancerPoolMemberPatch +func (loadBalancerPoolMemberPatch *LoadBalancerPoolMemberPatch) AsPatch() (patch map[string]interface{}, err error) { + var jsonData []byte + jsonData, err = json.Marshal(loadBalancerPoolMemberPatch) + if err == nil { + err = json.Unmarshal(jsonData, &patch) + } + return +} + +// LoadBalancerPoolMemberPrototype : LoadBalancerPoolMemberPrototype struct +type LoadBalancerPoolMemberPrototype struct { + // The port number of the application running in the server member. + Port *int64 `json:"port" validate:"required"` + + // The pool member target. + Target LoadBalancerPoolMemberTargetPrototypeIntf `json:"target" validate:"required"` + + // Weight of the server member. Applicable only if the pool algorithm is + // `weighted_round_robin`. + Weight *int64 `json:"weight,omitempty"` +} + +// NewLoadBalancerPoolMemberPrototype : Instantiate LoadBalancerPoolMemberPrototype (Generic Model Constructor) +func (*VpcClassicV1) NewLoadBalancerPoolMemberPrototype(port int64, target LoadBalancerPoolMemberTargetPrototypeIntf) (model *LoadBalancerPoolMemberPrototype, err error) { + model = &LoadBalancerPoolMemberPrototype{ + Port: core.Int64Ptr(port), + Target: target, + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalLoadBalancerPoolMemberPrototype unmarshals an instance of LoadBalancerPoolMemberPrototype from the specified map of raw messages. +func UnmarshalLoadBalancerPoolMemberPrototype(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancerPoolMemberPrototype) + err = core.UnmarshalPrimitive(m, "port", &obj.Port) + if err != nil { + return + } + err = core.UnmarshalModel(m, "target", &obj.Target, UnmarshalLoadBalancerPoolMemberTargetPrototype) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "weight", &obj.Weight) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LoadBalancerPoolMemberReference : LoadBalancerPoolMemberReference struct +type LoadBalancerPoolMemberReference struct { + // The member's canonical URL. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this load balancer pool member. + ID *string `json:"id" validate:"required"` +} + +// UnmarshalLoadBalancerPoolMemberReference unmarshals an instance of LoadBalancerPoolMemberReference from the specified map of raw messages. +func UnmarshalLoadBalancerPoolMemberReference(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancerPoolMemberReference) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LoadBalancerPoolMemberTarget : The pool member target. +// Models which "extend" this model: +// - LoadBalancerPoolMemberTargetIP +type LoadBalancerPoolMemberTarget struct { + // The IP address. This property may add support for IPv6 addresses in the future. When processing a value in this + // property, verify that the address is in an expected format. If it is not, log an error. Optionally halt processing + // and surface the error, or bypass the resource on which the unexpected IP address format was encountered. + Address *string `json:"address,omitempty"` +} + +func (*LoadBalancerPoolMemberTarget) isaLoadBalancerPoolMemberTarget() bool { + return true +} + +type LoadBalancerPoolMemberTargetIntf interface { + isaLoadBalancerPoolMemberTarget() bool +} + +// UnmarshalLoadBalancerPoolMemberTarget unmarshals an instance of LoadBalancerPoolMemberTarget from the specified map of raw messages. +func UnmarshalLoadBalancerPoolMemberTarget(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancerPoolMemberTarget) + err = core.UnmarshalPrimitive(m, "address", &obj.Address) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LoadBalancerPoolMemberTargetPrototype : The pool member target. +// Models which "extend" this model: +// - LoadBalancerPoolMemberTargetPrototypeIP +type LoadBalancerPoolMemberTargetPrototype struct { + // The IP address. This property may add support for IPv6 addresses in the future. When processing a value in this + // property, verify that the address is in an expected format. If it is not, log an error. Optionally halt processing + // and surface the error, or bypass the resource on which the unexpected IP address format was encountered. + Address *string `json:"address,omitempty"` +} + +func (*LoadBalancerPoolMemberTargetPrototype) isaLoadBalancerPoolMemberTargetPrototype() bool { + return true +} + +type LoadBalancerPoolMemberTargetPrototypeIntf interface { + isaLoadBalancerPoolMemberTargetPrototype() bool +} + +// UnmarshalLoadBalancerPoolMemberTargetPrototype unmarshals an instance of LoadBalancerPoolMemberTargetPrototype from the specified map of raw messages. +func UnmarshalLoadBalancerPoolMemberTargetPrototype(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancerPoolMemberTargetPrototype) + err = core.UnmarshalPrimitive(m, "address", &obj.Address) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LoadBalancerPoolPatch : LoadBalancerPoolPatch struct +type LoadBalancerPoolPatch struct { + // The load balancing algorithm. + Algorithm *string `json:"algorithm,omitempty"` + + // The health monitor of this pool. + HealthMonitor *LoadBalancerPoolHealthMonitorPatch `json:"health_monitor,omitempty"` + + // The user-defined name for this load balancer pool. + Name *string `json:"name,omitempty"` + + // The protocol used for this load balancer pool. + // + // The enumerated values for this property are expected to expand in the future. When processing this property, check + // for and log unknown values. Optionally halt processing and surface the error, or bypass the pool on which the + // unexpected property value was encountered. + Protocol *string `json:"protocol,omitempty"` + + // The PROXY protocol setting for this pool: + // - `v1`: Enabled with version 1 (human-readable header format) + // - `v2`: Enabled with version 2 (binary header format) + // - `disabled`: Disabled + // + // Supported by load balancers in the `application` family (otherwise always `disabled`). + ProxyProtocol *string `json:"proxy_protocol,omitempty"` + + // The session persistence of this pool. + SessionPersistence *LoadBalancerPoolSessionPersistencePatch `json:"session_persistence,omitempty"` +} + +// Constants associated with the LoadBalancerPoolPatch.Algorithm property. +// The load balancing algorithm. +const ( + LoadBalancerPoolPatchAlgorithmLeastConnectionsConst = "least_connections" + LoadBalancerPoolPatchAlgorithmRoundRobinConst = "round_robin" + LoadBalancerPoolPatchAlgorithmWeightedRoundRobinConst = "weighted_round_robin" +) + +// Constants associated with the LoadBalancerPoolPatch.Protocol property. +// The protocol used for this load balancer pool. +// +// The enumerated values for this property are expected to expand in the future. When processing this property, check +// for and log unknown values. Optionally halt processing and surface the error, or bypass the pool on which the +// unexpected property value was encountered. +const ( + LoadBalancerPoolPatchProtocolHTTPConst = "http" + LoadBalancerPoolPatchProtocolHTTPSConst = "https" + LoadBalancerPoolPatchProtocolTCPConst = "tcp" +) + +// Constants associated with the LoadBalancerPoolPatch.ProxyProtocol property. +// The PROXY protocol setting for this pool: +// - `v1`: Enabled with version 1 (human-readable header format) +// - `v2`: Enabled with version 2 (binary header format) +// - `disabled`: Disabled +// +// Supported by load balancers in the `application` family (otherwise always `disabled`). +const ( + LoadBalancerPoolPatchProxyProtocolDisabledConst = "disabled" + LoadBalancerPoolPatchProxyProtocolV1Const = "v1" + LoadBalancerPoolPatchProxyProtocolV2Const = "v2" +) + +// UnmarshalLoadBalancerPoolPatch unmarshals an instance of LoadBalancerPoolPatch from the specified map of raw messages. +func UnmarshalLoadBalancerPoolPatch(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancerPoolPatch) + err = core.UnmarshalPrimitive(m, "algorithm", &obj.Algorithm) + if err != nil { + return + } + err = core.UnmarshalModel(m, "health_monitor", &obj.HealthMonitor, UnmarshalLoadBalancerPoolHealthMonitorPatch) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "protocol", &obj.Protocol) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "proxy_protocol", &obj.ProxyProtocol) + if err != nil { + return + } + err = core.UnmarshalModel(m, "session_persistence", &obj.SessionPersistence, UnmarshalLoadBalancerPoolSessionPersistencePatch) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// AsPatch returns a generic map representation of the LoadBalancerPoolPatch +func (loadBalancerPoolPatch *LoadBalancerPoolPatch) AsPatch() (patch map[string]interface{}, err error) { + var jsonData []byte + jsonData, err = json.Marshal(loadBalancerPoolPatch) + if err == nil { + err = json.Unmarshal(jsonData, &patch) + } + return +} + +// LoadBalancerPoolPrototype : LoadBalancerPoolPrototype struct +type LoadBalancerPoolPrototype struct { + // The load balancing algorithm. + Algorithm *string `json:"algorithm" validate:"required"` + + // The health monitor of this pool. + HealthMonitor *LoadBalancerPoolHealthMonitorPrototype `json:"health_monitor" validate:"required"` + + // The members for this load balancer pool. For load balancers in the `network` family, the same `port` and `target` + // tuple cannot be shared by a pool member of any other load balancer in the same VPC. + Members []LoadBalancerPoolMemberPrototype `json:"members,omitempty"` + + // The user-defined name for this load balancer pool. If unspecified, the name will be a hyphenated list of + // randomly-selected words. + Name *string `json:"name,omitempty"` + + // The protocol used for this load balancer pool. Load balancers in the `network` family support `tcp`. Load balancers + // in the `application` family support `tcp`, `http`, and + // `https`. + Protocol *string `json:"protocol" validate:"required"` + + // The PROXY protocol setting for this pool: + // - `v1`: Enabled with version 1 (human-readable header format) + // - `v2`: Enabled with version 2 (binary header format) + // - `disabled`: Disabled + // + // Supported by load balancers in the `application` family (otherwise always `disabled`). + ProxyProtocol *string `json:"proxy_protocol,omitempty"` + + // The session persistence of this pool. + SessionPersistence *LoadBalancerPoolSessionPersistencePrototype `json:"session_persistence,omitempty"` +} + +// Constants associated with the LoadBalancerPoolPrototype.Algorithm property. +// The load balancing algorithm. +const ( + LoadBalancerPoolPrototypeAlgorithmLeastConnectionsConst = "least_connections" + LoadBalancerPoolPrototypeAlgorithmRoundRobinConst = "round_robin" + LoadBalancerPoolPrototypeAlgorithmWeightedRoundRobinConst = "weighted_round_robin" +) + +// Constants associated with the LoadBalancerPoolPrototype.Protocol property. +// The protocol used for this load balancer pool. Load balancers in the `network` family support `tcp`. Load balancers +// in the `application` family support `tcp`, `http`, and +// `https`. +const ( + LoadBalancerPoolPrototypeProtocolHTTPConst = "http" + LoadBalancerPoolPrototypeProtocolHTTPSConst = "https" + LoadBalancerPoolPrototypeProtocolTCPConst = "tcp" +) + +// Constants associated with the LoadBalancerPoolPrototype.ProxyProtocol property. +// The PROXY protocol setting for this pool: +// - `v1`: Enabled with version 1 (human-readable header format) +// - `v2`: Enabled with version 2 (binary header format) +// - `disabled`: Disabled +// +// Supported by load balancers in the `application` family (otherwise always `disabled`). +const ( + LoadBalancerPoolPrototypeProxyProtocolDisabledConst = "disabled" + LoadBalancerPoolPrototypeProxyProtocolV1Const = "v1" + LoadBalancerPoolPrototypeProxyProtocolV2Const = "v2" +) + +// NewLoadBalancerPoolPrototype : Instantiate LoadBalancerPoolPrototype (Generic Model Constructor) +func (*VpcClassicV1) NewLoadBalancerPoolPrototype(algorithm string, healthMonitor *LoadBalancerPoolHealthMonitorPrototype, protocol string) (model *LoadBalancerPoolPrototype, err error) { + model = &LoadBalancerPoolPrototype{ + Algorithm: core.StringPtr(algorithm), + HealthMonitor: healthMonitor, + Protocol: core.StringPtr(protocol), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalLoadBalancerPoolPrototype unmarshals an instance of LoadBalancerPoolPrototype from the specified map of raw messages. +func UnmarshalLoadBalancerPoolPrototype(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancerPoolPrototype) + err = core.UnmarshalPrimitive(m, "algorithm", &obj.Algorithm) + if err != nil { + return + } + err = core.UnmarshalModel(m, "health_monitor", &obj.HealthMonitor, UnmarshalLoadBalancerPoolHealthMonitorPrototype) + if err != nil { + return + } + err = core.UnmarshalModel(m, "members", &obj.Members, UnmarshalLoadBalancerPoolMemberPrototype) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "protocol", &obj.Protocol) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "proxy_protocol", &obj.ProxyProtocol) + if err != nil { + return + } + err = core.UnmarshalModel(m, "session_persistence", &obj.SessionPersistence, UnmarshalLoadBalancerPoolSessionPersistencePrototype) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LoadBalancerPoolReference : LoadBalancerPoolReference struct +type LoadBalancerPoolReference struct { + // The pool's canonical URL. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this load balancer pool. + ID *string `json:"id" validate:"required"` + + // The user-defined name for this load balancer pool. + Name *string `json:"name" validate:"required"` +} + +// UnmarshalLoadBalancerPoolReference unmarshals an instance of LoadBalancerPoolReference from the specified map of raw messages. +func UnmarshalLoadBalancerPoolReference(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancerPoolReference) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LoadBalancerPoolSessionPersistence : LoadBalancerPoolSessionPersistence struct +type LoadBalancerPoolSessionPersistence struct { + // The session persistence type. + Type *string `json:"type" validate:"required"` +} + +// Constants associated with the LoadBalancerPoolSessionPersistence.Type property. +// The session persistence type. +const ( + LoadBalancerPoolSessionPersistenceTypeSourceIPConst = "source_ip" +) + +// UnmarshalLoadBalancerPoolSessionPersistence unmarshals an instance of LoadBalancerPoolSessionPersistence from the specified map of raw messages. +func UnmarshalLoadBalancerPoolSessionPersistence(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancerPoolSessionPersistence) + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LoadBalancerPoolSessionPersistencePatch : LoadBalancerPoolSessionPersistencePatch struct +type LoadBalancerPoolSessionPersistencePatch struct { + // The session persistence type. + Type *string `json:"type" validate:"required"` +} + +// Constants associated with the LoadBalancerPoolSessionPersistencePatch.Type property. +// The session persistence type. +const ( + LoadBalancerPoolSessionPersistencePatchTypeSourceIPConst = "source_ip" +) + +// NewLoadBalancerPoolSessionPersistencePatch : Instantiate LoadBalancerPoolSessionPersistencePatch (Generic Model Constructor) +func (*VpcClassicV1) NewLoadBalancerPoolSessionPersistencePatch(typeVar string) (model *LoadBalancerPoolSessionPersistencePatch, err error) { + model = &LoadBalancerPoolSessionPersistencePatch{ + Type: core.StringPtr(typeVar), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalLoadBalancerPoolSessionPersistencePatch unmarshals an instance of LoadBalancerPoolSessionPersistencePatch from the specified map of raw messages. +func UnmarshalLoadBalancerPoolSessionPersistencePatch(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancerPoolSessionPersistencePatch) + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LoadBalancerPoolSessionPersistencePrototype : LoadBalancerPoolSessionPersistencePrototype struct +type LoadBalancerPoolSessionPersistencePrototype struct { + // The session persistence type. + Type *string `json:"type" validate:"required"` +} + +// Constants associated with the LoadBalancerPoolSessionPersistencePrototype.Type property. +// The session persistence type. +const ( + LoadBalancerPoolSessionPersistencePrototypeTypeSourceIPConst = "source_ip" +) + +// NewLoadBalancerPoolSessionPersistencePrototype : Instantiate LoadBalancerPoolSessionPersistencePrototype (Generic Model Constructor) +func (*VpcClassicV1) NewLoadBalancerPoolSessionPersistencePrototype(typeVar string) (model *LoadBalancerPoolSessionPersistencePrototype, err error) { + model = &LoadBalancerPoolSessionPersistencePrototype{ + Type: core.StringPtr(typeVar), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalLoadBalancerPoolSessionPersistencePrototype unmarshals an instance of LoadBalancerPoolSessionPersistencePrototype from the specified map of raw messages. +func UnmarshalLoadBalancerPoolSessionPersistencePrototype(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancerPoolSessionPersistencePrototype) + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LoadBalancerStatistics : LoadBalancerStatistics struct +type LoadBalancerStatistics struct { + // Number of active connections of this load balancer. + ActiveConnections *int64 `json:"active_connections" validate:"required"` + + // Current connection rate (connections per second) of this load balancer. + ConnectionRate *float32 `json:"connection_rate" validate:"required"` + + // Total number of data processed (bytes) of this load balancer within current calendar month. + DataProcessedThisMonth *int64 `json:"data_processed_this_month" validate:"required"` + + // Current throughput (Mbps) of this load balancer. + Throughput *float32 `json:"throughput" validate:"required"` +} + +// UnmarshalLoadBalancerStatistics unmarshals an instance of LoadBalancerStatistics from the specified map of raw messages. +func UnmarshalLoadBalancerStatistics(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancerStatistics) + err = core.UnmarshalPrimitive(m, "active_connections", &obj.ActiveConnections) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "connection_rate", &obj.ConnectionRate) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "data_processed_this_month", &obj.DataProcessedThisMonth) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "throughput", &obj.Throughput) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// NetworkACL : NetworkACL struct +type NetworkACL struct { + // The date and time that the network ACL was created. + CreatedAt *strfmt.DateTime `json:"created_at" validate:"required"` + + // The URL for this network ACL. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this network ACL. + ID *string `json:"id" validate:"required"` + + // The user-defined name for this network ACL. + Name *string `json:"name" validate:"required"` + + // The ordered rules for this network ACL. If no rules exist, all traffic will be allowed. + Rules []NetworkACLRuleItemIntf `json:"rules" validate:"required"` + + // The subnets to which this network ACL is attached. + Subnets []SubnetReference `json:"subnets" validate:"required"` +} + +// UnmarshalNetworkACL unmarshals an instance of NetworkACL from the specified map of raw messages. +func UnmarshalNetworkACL(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(NetworkACL) + err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalModel(m, "rules", &obj.Rules, UnmarshalNetworkACLRuleItem) + if err != nil { + return + } + err = core.UnmarshalModel(m, "subnets", &obj.Subnets, UnmarshalSubnetReference) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// NetworkACLCollection : NetworkACLCollection struct +type NetworkACLCollection struct { + // A link to the first page of resources. + First *NetworkACLCollectionFirst `json:"first" validate:"required"` + + // The maximum number of resources that can be returned by the request. + Limit *int64 `json:"limit" validate:"required"` + + // Collection of network ACLs. + NetworkAcls []NetworkACL `json:"network_acls" validate:"required"` + + // A link to the next page of resources. This property is present for all pages + // except the last page. + Next *NetworkACLCollectionNext `json:"next,omitempty"` +} + +// UnmarshalNetworkACLCollection unmarshals an instance of NetworkACLCollection from the specified map of raw messages. +func UnmarshalNetworkACLCollection(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(NetworkACLCollection) + err = core.UnmarshalModel(m, "first", &obj.First, UnmarshalNetworkACLCollectionFirst) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "limit", &obj.Limit) + if err != nil { + return + } + err = core.UnmarshalModel(m, "network_acls", &obj.NetworkAcls, UnmarshalNetworkACL) + if err != nil { + return + } + err = core.UnmarshalModel(m, "next", &obj.Next, UnmarshalNetworkACLCollectionNext) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// NetworkACLCollectionFirst : A link to the first page of resources. +type NetworkACLCollectionFirst struct { + // The URL for a page of resources. + Href *string `json:"href" validate:"required"` +} + +// UnmarshalNetworkACLCollectionFirst unmarshals an instance of NetworkACLCollectionFirst from the specified map of raw messages. +func UnmarshalNetworkACLCollectionFirst(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(NetworkACLCollectionFirst) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// NetworkACLCollectionNext : A link to the next page of resources. This property is present for all pages except the last page. +type NetworkACLCollectionNext struct { + // The URL for a page of resources. + Href *string `json:"href" validate:"required"` +} + +// UnmarshalNetworkACLCollectionNext unmarshals an instance of NetworkACLCollectionNext from the specified map of raw messages. +func UnmarshalNetworkACLCollectionNext(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(NetworkACLCollectionNext) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// NetworkACLIdentity : Identifies a network ACL by a unique property. +// Models which "extend" this model: +// - NetworkACLIdentityByID +// - NetworkACLIdentityByHref +type NetworkACLIdentity struct { + // The unique identifier for this network ACL. + ID *string `json:"id,omitempty"` + + // The URL for this network ACL. + Href *string `json:"href,omitempty"` +} + +func (*NetworkACLIdentity) isaNetworkACLIdentity() bool { + return true +} + +type NetworkACLIdentityIntf interface { + isaNetworkACLIdentity() bool +} + +// UnmarshalNetworkACLIdentity unmarshals an instance of NetworkACLIdentity from the specified map of raw messages. +func UnmarshalNetworkACLIdentity(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(NetworkACLIdentity) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// NetworkACLPatch : NetworkACLPatch struct +type NetworkACLPatch struct { + // The unique user-defined name for this network ACL. + Name *string `json:"name,omitempty"` +} + +// UnmarshalNetworkACLPatch unmarshals an instance of NetworkACLPatch from the specified map of raw messages. +func UnmarshalNetworkACLPatch(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(NetworkACLPatch) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// AsPatch returns a generic map representation of the NetworkACLPatch +func (networkACLPatch *NetworkACLPatch) AsPatch() (patch map[string]interface{}, err error) { + var jsonData []byte + jsonData, err = json.Marshal(networkACLPatch) + if err == nil { + err = json.Unmarshal(jsonData, &patch) + } + return +} + +// NetworkACLPrototype : NetworkACLPrototype struct +// Models which "extend" this model: +// - NetworkACLPrototypeNetworkACLByRules +// - NetworkACLPrototypeNetworkACLBySourceNetworkACL +type NetworkACLPrototype struct { + // The unique user-defined name for this network ACL. If unspecified, the name will be a hyphenated list of + // randomly-selected words. + Name *string `json:"name,omitempty"` + + // Array of prototype objects for rules to create along with this network ACL. If unspecified, rules will be created to + // allow all traffic. + Rules []NetworkACLRulePrototypeNetworkACLContextIntf `json:"rules,omitempty"` + + // Network ACL to copy rules from. + SourceNetworkACL NetworkACLIdentityIntf `json:"source_network_acl,omitempty"` +} + +func (*NetworkACLPrototype) isaNetworkACLPrototype() bool { + return true +} + +type NetworkACLPrototypeIntf interface { + isaNetworkACLPrototype() bool +} + +// UnmarshalNetworkACLPrototype unmarshals an instance of NetworkACLPrototype from the specified map of raw messages. +func UnmarshalNetworkACLPrototype(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(NetworkACLPrototype) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalModel(m, "rules", &obj.Rules, UnmarshalNetworkACLRulePrototypeNetworkACLContext) + if err != nil { + return + } + err = core.UnmarshalModel(m, "source_network_acl", &obj.SourceNetworkACL, UnmarshalNetworkACLIdentity) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// NetworkACLReference : NetworkACLReference struct +type NetworkACLReference struct { + // The URL for this network ACL. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this network ACL. + ID *string `json:"id" validate:"required"` + + // The user-defined name for this network ACL. + Name *string `json:"name" validate:"required"` +} + +// UnmarshalNetworkACLReference unmarshals an instance of NetworkACLReference from the specified map of raw messages. +func UnmarshalNetworkACLReference(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(NetworkACLReference) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// NetworkACLRule : NetworkACLRule struct +// Models which "extend" this model: +// - NetworkACLRuleNetworkACLRuleProtocolTcpudp +// - NetworkACLRuleNetworkACLRuleProtocolIcmp +// - NetworkACLRuleNetworkACLRuleProtocolAll +type NetworkACLRule struct { + // Whether to allow or deny matching traffic. + Action *string `json:"action" validate:"required"` + + // The rule that this rule is immediately before. If absent, this is the last rule. + Before *NetworkACLRuleReference `json:"before,omitempty"` + + // The date and time that the rule was created. + CreatedAt *strfmt.DateTime `json:"created_at" validate:"required"` + + // The destination CIDR block. The CIDR block `0.0.0.0/0` applies to all addresses. + Destination *string `json:"destination" validate:"required"` + + // Whether the traffic to be matched is `inbound` or `outbound`. + Direction *string `json:"direction" validate:"required"` + + // The URL for this network ACL rule. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this network ACL rule. + ID *string `json:"id" validate:"required"` + + // The IP version for this rule. + IPVersion *string `json:"ip_version" validate:"required"` + + // The user-defined name for this rule. Names must be unique within the network ACL the rule resides in. If + // unspecified, the name will be a hyphenated list of randomly-selected words. + Name *string `json:"name" validate:"required"` + + // The protocol to enforce. + Protocol *string `json:"protocol" validate:"required"` + + // The source CIDR block. The CIDR block `0.0.0.0/0` applies to all addresses. + Source *string `json:"source" validate:"required"` + + // The inclusive upper bound of TCP/UDP destination port range. + PortMax *int64 `json:"port_max,omitempty"` + + // The inclusive lower bound of TCP/UDP destination port range. + PortMin *int64 `json:"port_min,omitempty"` + + // The inclusive upper bound of TCP/UDP source port range. + SourcePortMax *int64 `json:"source_port_max,omitempty"` + + // The inclusive lower bound of TCP/UDP source port range. + SourcePortMin *int64 `json:"source_port_min,omitempty"` + + // The ICMP traffic code to allow. If unspecified, all codes are allowed. This can only be specified if type is also + // specified. + Code *int64 `json:"code,omitempty"` + + // The ICMP traffic type to allow. If unspecified, all types are allowed by this rule. + Type *int64 `json:"type,omitempty"` +} + +// Constants associated with the NetworkACLRule.Action property. +// Whether to allow or deny matching traffic. +const ( + NetworkACLRuleActionAllowConst = "allow" + NetworkACLRuleActionDenyConst = "deny" +) + +// Constants associated with the NetworkACLRule.Direction property. +// Whether the traffic to be matched is `inbound` or `outbound`. +const ( + NetworkACLRuleDirectionInboundConst = "inbound" + NetworkACLRuleDirectionOutboundConst = "outbound" +) + +// Constants associated with the NetworkACLRule.IPVersion property. +// The IP version for this rule. +const ( + NetworkACLRuleIPVersionIpv4Const = "ipv4" + NetworkACLRuleIPVersionIpv6Const = "ipv6" +) + +func (*NetworkACLRule) isaNetworkACLRule() bool { + return true +} + +type NetworkACLRuleIntf interface { + isaNetworkACLRule() bool +} + +// UnmarshalNetworkACLRule unmarshals an instance of NetworkACLRule from the specified map of raw messages. +func UnmarshalNetworkACLRule(m map[string]json.RawMessage, result interface{}) (err error) { + // Retrieve discriminator value to determine correct "subclass". + var discValue string + err = core.UnmarshalPrimitive(m, "protocol", &discValue) + if err != nil { + err = fmt.Errorf("error unmarshalling discriminator property 'protocol': %s", err.Error()) + return + } + if discValue == "" { + err = fmt.Errorf("required discriminator property 'protocol' not found in JSON object") + return + } + if discValue == "all" { + err = core.UnmarshalModel(m, "", result, UnmarshalNetworkACLRuleNetworkACLRuleProtocolAll) + } else if discValue == "icmp" { + err = core.UnmarshalModel(m, "", result, UnmarshalNetworkACLRuleNetworkACLRuleProtocolIcmp) + } else if discValue == "tcp" { + err = core.UnmarshalModel(m, "", result, UnmarshalNetworkACLRuleNetworkACLRuleProtocolTcpudp) + } else if discValue == "udp" { + err = core.UnmarshalModel(m, "", result, UnmarshalNetworkACLRuleNetworkACLRuleProtocolTcpudp) + } else { + err = fmt.Errorf("unrecognized value for discriminator property 'protocol': %s", discValue) + } + return +} + +// NetworkACLRuleBeforePatch : The rule to move this rule immediately before. Specify `null` to move this rule after all existing rules. +// Models which "extend" this model: +// - NetworkACLRuleBeforePatchNetworkACLRuleIdentityByID +// - NetworkACLRuleBeforePatchNetworkACLRuleIdentityByHref +type NetworkACLRuleBeforePatch struct { + // The unique identifier for this network ACL rule. + ID *string `json:"id,omitempty"` + + // The URL for this network ACL rule. + Href *string `json:"href,omitempty"` +} + +func (*NetworkACLRuleBeforePatch) isaNetworkACLRuleBeforePatch() bool { + return true +} + +type NetworkACLRuleBeforePatchIntf interface { + isaNetworkACLRuleBeforePatch() bool +} + +// UnmarshalNetworkACLRuleBeforePatch unmarshals an instance of NetworkACLRuleBeforePatch from the specified map of raw messages. +func UnmarshalNetworkACLRuleBeforePatch(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(NetworkACLRuleBeforePatch) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// NetworkACLRuleBeforePrototype : The rule to insert this rule immediately before. If omitted, this rule will be inserted after all existing rules. +// Models which "extend" this model: +// - NetworkACLRuleBeforePrototypeNetworkACLRuleIdentityByID +// - NetworkACLRuleBeforePrototypeNetworkACLRuleIdentityByHref +type NetworkACLRuleBeforePrototype struct { + // The unique identifier for this network ACL rule. + ID *string `json:"id,omitempty"` + + // The URL for this network ACL rule. + Href *string `json:"href,omitempty"` +} + +func (*NetworkACLRuleBeforePrototype) isaNetworkACLRuleBeforePrototype() bool { + return true +} + +type NetworkACLRuleBeforePrototypeIntf interface { + isaNetworkACLRuleBeforePrototype() bool +} + +// UnmarshalNetworkACLRuleBeforePrototype unmarshals an instance of NetworkACLRuleBeforePrototype from the specified map of raw messages. +func UnmarshalNetworkACLRuleBeforePrototype(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(NetworkACLRuleBeforePrototype) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// NetworkACLRuleCollection : NetworkACLRuleCollection struct +type NetworkACLRuleCollection struct { + // A link to the first page of resources. + First *NetworkACLRuleCollectionFirst `json:"first" validate:"required"` + + // The maximum number of resources that can be returned by the request. + Limit *int64 `json:"limit" validate:"required"` + + // A link to the next page of resources. This property is present for all pages + // except the last page. + Next *NetworkACLRuleCollectionNext `json:"next,omitempty"` + + // Ordered collection of network ACL rules. + Rules []NetworkACLRuleItemIntf `json:"rules" validate:"required"` +} + +// UnmarshalNetworkACLRuleCollection unmarshals an instance of NetworkACLRuleCollection from the specified map of raw messages. +func UnmarshalNetworkACLRuleCollection(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(NetworkACLRuleCollection) + err = core.UnmarshalModel(m, "first", &obj.First, UnmarshalNetworkACLRuleCollectionFirst) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "limit", &obj.Limit) + if err != nil { + return + } + err = core.UnmarshalModel(m, "next", &obj.Next, UnmarshalNetworkACLRuleCollectionNext) + if err != nil { + return + } + err = core.UnmarshalModel(m, "rules", &obj.Rules, UnmarshalNetworkACLRuleItem) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// NetworkACLRuleCollectionFirst : A link to the first page of resources. +type NetworkACLRuleCollectionFirst struct { + // The URL for a page of resources. + Href *string `json:"href" validate:"required"` +} + +// UnmarshalNetworkACLRuleCollectionFirst unmarshals an instance of NetworkACLRuleCollectionFirst from the specified map of raw messages. +func UnmarshalNetworkACLRuleCollectionFirst(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(NetworkACLRuleCollectionFirst) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// NetworkACLRuleCollectionNext : A link to the next page of resources. This property is present for all pages except the last page. +type NetworkACLRuleCollectionNext struct { + // The URL for a page of resources. + Href *string `json:"href" validate:"required"` +} + +// UnmarshalNetworkACLRuleCollectionNext unmarshals an instance of NetworkACLRuleCollectionNext from the specified map of raw messages. +func UnmarshalNetworkACLRuleCollectionNext(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(NetworkACLRuleCollectionNext) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// NetworkACLRuleItem : NetworkACLRuleItem struct +// Models which "extend" this model: +// - NetworkACLRuleItemNetworkACLRuleProtocolTcpudp +// - NetworkACLRuleItemNetworkACLRuleProtocolIcmp +// - NetworkACLRuleItemNetworkACLRuleProtocolAll +type NetworkACLRuleItem struct { + // Whether to allow or deny matching traffic. + Action *string `json:"action" validate:"required"` + + // The rule that this rule is immediately before. In a rule collection, this always + // refers to the next item in the collection. If absent, this is the last rule. + Before *NetworkACLRuleReference `json:"before,omitempty"` + + // The date and time that the rule was created. + CreatedAt *strfmt.DateTime `json:"created_at" validate:"required"` + + // The destination CIDR block. The CIDR block `0.0.0.0/0` applies to all addresses. + Destination *string `json:"destination" validate:"required"` + + // Whether the traffic to be matched is `inbound` or `outbound`. + Direction *string `json:"direction" validate:"required"` + + // The URL for this network ACL rule. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this network ACL rule. + ID *string `json:"id" validate:"required"` + + // The IP version for this rule. + IPVersion *string `json:"ip_version" validate:"required"` + + // The user-defined name for this rule. Names must be unique within the network ACL the rule resides in. If + // unspecified, the name will be a hyphenated list of randomly-selected words. + Name *string `json:"name" validate:"required"` + + // The protocol to enforce. + Protocol *string `json:"protocol" validate:"required"` + + // The source CIDR block. The CIDR block `0.0.0.0/0` applies to all addresses. + Source *string `json:"source" validate:"required"` + + // The inclusive upper bound of TCP/UDP destination port range. + PortMax *int64 `json:"port_max,omitempty"` + + // The inclusive lower bound of TCP/UDP destination port range. + PortMin *int64 `json:"port_min,omitempty"` + + // The inclusive upper bound of TCP/UDP source port range. + SourcePortMax *int64 `json:"source_port_max,omitempty"` + + // The inclusive lower bound of TCP/UDP source port range. + SourcePortMin *int64 `json:"source_port_min,omitempty"` + + // The ICMP traffic code to allow. If unspecified, all codes are allowed. This can only be specified if type is also + // specified. + Code *int64 `json:"code,omitempty"` + + // The ICMP traffic type to allow. If unspecified, all types are allowed by this rule. + Type *int64 `json:"type,omitempty"` +} + +// Constants associated with the NetworkACLRuleItem.Action property. +// Whether to allow or deny matching traffic. +const ( + NetworkACLRuleItemActionAllowConst = "allow" + NetworkACLRuleItemActionDenyConst = "deny" +) + +// Constants associated with the NetworkACLRuleItem.Direction property. +// Whether the traffic to be matched is `inbound` or `outbound`. +const ( + NetworkACLRuleItemDirectionInboundConst = "inbound" + NetworkACLRuleItemDirectionOutboundConst = "outbound" +) + +// Constants associated with the NetworkACLRuleItem.IPVersion property. +// The IP version for this rule. +const ( + NetworkACLRuleItemIPVersionIpv4Const = "ipv4" + NetworkACLRuleItemIPVersionIpv6Const = "ipv6" +) + +func (*NetworkACLRuleItem) isaNetworkACLRuleItem() bool { + return true +} + +type NetworkACLRuleItemIntf interface { + isaNetworkACLRuleItem() bool +} + +// UnmarshalNetworkACLRuleItem unmarshals an instance of NetworkACLRuleItem from the specified map of raw messages. +func UnmarshalNetworkACLRuleItem(m map[string]json.RawMessage, result interface{}) (err error) { + // Retrieve discriminator value to determine correct "subclass". + var discValue string + err = core.UnmarshalPrimitive(m, "protocol", &discValue) + if err != nil { + err = fmt.Errorf("error unmarshalling discriminator property 'protocol': %s", err.Error()) + return + } + if discValue == "" { + err = fmt.Errorf("required discriminator property 'protocol' not found in JSON object") + return + } + if discValue == "all" { + err = core.UnmarshalModel(m, "", result, UnmarshalNetworkACLRuleItemNetworkACLRuleProtocolAll) + } else if discValue == "icmp" { + err = core.UnmarshalModel(m, "", result, UnmarshalNetworkACLRuleItemNetworkACLRuleProtocolIcmp) + } else if discValue == "tcp" { + err = core.UnmarshalModel(m, "", result, UnmarshalNetworkACLRuleItemNetworkACLRuleProtocolTcpudp) + } else if discValue == "udp" { + err = core.UnmarshalModel(m, "", result, UnmarshalNetworkACLRuleItemNetworkACLRuleProtocolTcpudp) + } else { + err = fmt.Errorf("unrecognized value for discriminator property 'protocol': %s", discValue) + } + return +} + +// NetworkACLRulePatch : NetworkACLRulePatch struct +// Models which "extend" this model: +// - NetworkACLRulePatchNetworkACLRuleProtocolTcpudp +// - NetworkACLRulePatchNetworkACLRuleProtocolIcmp +// - NetworkACLRulePatchNetworkACLRuleProtocolAll +type NetworkACLRulePatch struct { + // Whether to allow or deny matching traffic. + Action *string `json:"action,omitempty"` + + // The rule to move this rule immediately before. Specify `null` to move this rule after + // all existing rules. + Before NetworkACLRuleBeforePatchIntf `json:"before,omitempty"` + + // The destination IP address or CIDR block. The CIDR block `0.0.0.0/0` applies to all addresses. + Destination *string `json:"destination,omitempty"` + + // Whether the traffic to be matched is `inbound` or `outbound`. + Direction *string `json:"direction,omitempty"` + + // The user-defined name for this rule. Names must be unique within the network ACL the rule resides in. + Name *string `json:"name,omitempty"` + + // The protocol to enforce. + Protocol *string `json:"protocol,omitempty"` + + // The source IP address or CIDR block. The CIDR block `0.0.0.0/0` applies to all addresses. + Source *string `json:"source,omitempty"` + + // The inclusive upper bound of TCP/UDP destination port range. + PortMax *int64 `json:"port_max,omitempty"` + + // The inclusive lower bound of TCP/UDP destination port range. + PortMin *int64 `json:"port_min,omitempty"` + + // The inclusive upper bound of TCP/UDP source port range. + SourcePortMax *int64 `json:"source_port_max,omitempty"` + + // The inclusive lower bound of TCP/UDP source port range. + SourcePortMin *int64 `json:"source_port_min,omitempty"` + + // The ICMP traffic code to allow. If unspecified, all codes are allowed. This can only be specified if type is also + // specified. + Code *int64 `json:"code,omitempty"` + + // The ICMP traffic type to allow. If unspecified, all types are allowed by this rule. + Type *int64 `json:"type,omitempty"` +} + +// Constants associated with the NetworkACLRulePatch.Action property. +// Whether to allow or deny matching traffic. +const ( + NetworkACLRulePatchActionAllowConst = "allow" + NetworkACLRulePatchActionDenyConst = "deny" +) + +// Constants associated with the NetworkACLRulePatch.Direction property. +// Whether the traffic to be matched is `inbound` or `outbound`. +const ( + NetworkACLRulePatchDirectionInboundConst = "inbound" + NetworkACLRulePatchDirectionOutboundConst = "outbound" +) + +func (*NetworkACLRulePatch) isaNetworkACLRulePatch() bool { + return true +} + +type NetworkACLRulePatchIntf interface { + isaNetworkACLRulePatch() bool +} + +// UnmarshalNetworkACLRulePatch unmarshals an instance of NetworkACLRulePatch from the specified map of raw messages. +func UnmarshalNetworkACLRulePatch(m map[string]json.RawMessage, result interface{}) (err error) { + // Retrieve discriminator value to determine correct "subclass". + var discValue string + err = core.UnmarshalPrimitive(m, "protocol", &discValue) + if err != nil { + err = fmt.Errorf("error unmarshalling discriminator property 'protocol': %s", err.Error()) + return + } + if discValue == "" { + err = fmt.Errorf("required discriminator property 'protocol' not found in JSON object") + return + } + if discValue == "all" { + err = core.UnmarshalModel(m, "", result, UnmarshalNetworkACLRulePatchNetworkACLRuleProtocolAll) + } else if discValue == "icmp" { + err = core.UnmarshalModel(m, "", result, UnmarshalNetworkACLRulePatchNetworkACLRuleProtocolIcmp) + } else if discValue == "tcp" { + err = core.UnmarshalModel(m, "", result, UnmarshalNetworkACLRulePatchNetworkACLRuleProtocolTcpudp) + } else if discValue == "udp" { + err = core.UnmarshalModel(m, "", result, UnmarshalNetworkACLRulePatchNetworkACLRuleProtocolTcpudp) + } else { + err = fmt.Errorf("unrecognized value for discriminator property 'protocol': %s", discValue) + } + return +} + +// AsPatch returns a generic map representation of the NetworkACLRulePatch +func (networkACLRulePatch *NetworkACLRulePatch) AsPatch() (patch map[string]interface{}, err error) { + var jsonData []byte + jsonData, err = json.Marshal(networkACLRulePatch) + if err == nil { + err = json.Unmarshal(jsonData, &patch) + } + return +} + +// NetworkACLRulePrototype : NetworkACLRulePrototype struct +// Models which "extend" this model: +// - NetworkACLRulePrototypeNetworkACLRuleProtocolTcpudp +// - NetworkACLRulePrototypeNetworkACLRuleProtocolIcmp +// - NetworkACLRulePrototypeNetworkACLRuleProtocolAll +type NetworkACLRulePrototype struct { + // Whether to allow or deny matching traffic. + Action *string `json:"action" validate:"required"` + + // The rule to insert this rule immediately before. If omitted, this rule will be + // inserted after all existing rules. + Before NetworkACLRuleBeforePrototypeIntf `json:"before,omitempty"` + + // The destination IP address or CIDR block. The CIDR block `0.0.0.0/0` applies to all addresses. + Destination *string `json:"destination" validate:"required"` + + // Whether the traffic to be matched is `inbound` or `outbound`. + Direction *string `json:"direction" validate:"required"` + + // The user-defined name for this rule. Names must be unique within the network ACL the rule resides in. If + // unspecified, the name will be a hyphenated list of randomly-selected words. + Name *string `json:"name,omitempty"` + + // The protocol to enforce. + Protocol *string `json:"protocol" validate:"required"` + + // The source IP address or CIDR block. The CIDR block `0.0.0.0/0` applies to all addresses. + Source *string `json:"source" validate:"required"` + + // The inclusive upper bound of TCP/UDP destination port range. + PortMax *int64 `json:"port_max,omitempty"` + + // The inclusive lower bound of TCP/UDP destination port range. + PortMin *int64 `json:"port_min,omitempty"` + + // The inclusive upper bound of TCP/UDP source port range. + SourcePortMax *int64 `json:"source_port_max,omitempty"` + + // The inclusive lower bound of TCP/UDP source port range. + SourcePortMin *int64 `json:"source_port_min,omitempty"` + + // The ICMP traffic code to allow. If unspecified, all codes are allowed. This can only be specified if type is also + // specified. + Code *int64 `json:"code,omitempty"` + + // The ICMP traffic type to allow. If unspecified, all types are allowed by this rule. + Type *int64 `json:"type,omitempty"` +} + +// Constants associated with the NetworkACLRulePrototype.Action property. +// Whether to allow or deny matching traffic. +const ( + NetworkACLRulePrototypeActionAllowConst = "allow" + NetworkACLRulePrototypeActionDenyConst = "deny" +) + +// Constants associated with the NetworkACLRulePrototype.Direction property. +// Whether the traffic to be matched is `inbound` or `outbound`. +const ( + NetworkACLRulePrototypeDirectionInboundConst = "inbound" + NetworkACLRulePrototypeDirectionOutboundConst = "outbound" +) + +func (*NetworkACLRulePrototype) isaNetworkACLRulePrototype() bool { + return true +} + +type NetworkACLRulePrototypeIntf interface { + isaNetworkACLRulePrototype() bool +} + +// UnmarshalNetworkACLRulePrototype unmarshals an instance of NetworkACLRulePrototype from the specified map of raw messages. +func UnmarshalNetworkACLRulePrototype(m map[string]json.RawMessage, result interface{}) (err error) { + // Retrieve discriminator value to determine correct "subclass". + var discValue string + err = core.UnmarshalPrimitive(m, "protocol", &discValue) + if err != nil { + err = fmt.Errorf("error unmarshalling discriminator property 'protocol': %s", err.Error()) + return + } + if discValue == "" { + err = fmt.Errorf("required discriminator property 'protocol' not found in JSON object") + return + } + if discValue == "all" { + err = core.UnmarshalModel(m, "", result, UnmarshalNetworkACLRulePrototypeNetworkACLRuleProtocolAll) + } else if discValue == "icmp" { + err = core.UnmarshalModel(m, "", result, UnmarshalNetworkACLRulePrototypeNetworkACLRuleProtocolIcmp) + } else if discValue == "tcp" { + err = core.UnmarshalModel(m, "", result, UnmarshalNetworkACLRulePrototypeNetworkACLRuleProtocolTcpudp) + } else if discValue == "udp" { + err = core.UnmarshalModel(m, "", result, UnmarshalNetworkACLRulePrototypeNetworkACLRuleProtocolTcpudp) + } else { + err = fmt.Errorf("unrecognized value for discriminator property 'protocol': %s", discValue) + } + return +} + +// NetworkACLRulePrototypeNetworkACLContext : NetworkACLRulePrototypeNetworkACLContext struct +// Models which "extend" this model: +// - NetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolTcpudp +// - NetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolIcmp +// - NetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolAll +type NetworkACLRulePrototypeNetworkACLContext struct { + // Whether to allow or deny matching traffic. + Action *string `json:"action" validate:"required"` + + // The destination IP address or CIDR block. The CIDR block `0.0.0.0/0` applies to all addresses. + Destination *string `json:"destination" validate:"required"` + + // Whether the traffic to be matched is `inbound` or `outbound`. + Direction *string `json:"direction" validate:"required"` + + // The user-defined name for this rule. Names must be unique within the network ACL the rule resides in. If + // unspecified, the name will be a hyphenated list of randomly-selected words. + Name *string `json:"name,omitempty"` + + // The protocol to enforce. + Protocol *string `json:"protocol" validate:"required"` + + // The source IP address or CIDR block. The CIDR block `0.0.0.0/0` applies to all addresses. + Source *string `json:"source" validate:"required"` + + // The inclusive upper bound of TCP/UDP destination port range. + PortMax *int64 `json:"port_max,omitempty"` + + // The inclusive lower bound of TCP/UDP destination port range. + PortMin *int64 `json:"port_min,omitempty"` + + // The inclusive upper bound of TCP/UDP source port range. + SourcePortMax *int64 `json:"source_port_max,omitempty"` + + // The inclusive lower bound of TCP/UDP source port range. + SourcePortMin *int64 `json:"source_port_min,omitempty"` + + // The ICMP traffic code to allow. If unspecified, all codes are allowed. This can only be specified if type is also + // specified. + Code *int64 `json:"code,omitempty"` + + // The ICMP traffic type to allow. If unspecified, all types are allowed by this rule. + Type *int64 `json:"type,omitempty"` +} + +// Constants associated with the NetworkACLRulePrototypeNetworkACLContext.Action property. +// Whether to allow or deny matching traffic. +const ( + NetworkACLRulePrototypeNetworkACLContextActionAllowConst = "allow" + NetworkACLRulePrototypeNetworkACLContextActionDenyConst = "deny" +) + +// Constants associated with the NetworkACLRulePrototypeNetworkACLContext.Direction property. +// Whether the traffic to be matched is `inbound` or `outbound`. +const ( + NetworkACLRulePrototypeNetworkACLContextDirectionInboundConst = "inbound" + NetworkACLRulePrototypeNetworkACLContextDirectionOutboundConst = "outbound" +) + +func (*NetworkACLRulePrototypeNetworkACLContext) isaNetworkACLRulePrototypeNetworkACLContext() bool { + return true +} + +type NetworkACLRulePrototypeNetworkACLContextIntf interface { + isaNetworkACLRulePrototypeNetworkACLContext() bool +} + +// UnmarshalNetworkACLRulePrototypeNetworkACLContext unmarshals an instance of NetworkACLRulePrototypeNetworkACLContext from the specified map of raw messages. +func UnmarshalNetworkACLRulePrototypeNetworkACLContext(m map[string]json.RawMessage, result interface{}) (err error) { + // Retrieve discriminator value to determine correct "subclass". + var discValue string + err = core.UnmarshalPrimitive(m, "protocol", &discValue) + if err != nil { + err = fmt.Errorf("error unmarshalling discriminator property 'protocol': %s", err.Error()) + return + } + if discValue == "" { + err = fmt.Errorf("required discriminator property 'protocol' not found in JSON object") + return + } + if discValue == "all" { + err = core.UnmarshalModel(m, "", result, UnmarshalNetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolAll) + } else if discValue == "icmp" { + err = core.UnmarshalModel(m, "", result, UnmarshalNetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolIcmp) + } else if discValue == "tcp" { + err = core.UnmarshalModel(m, "", result, UnmarshalNetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolTcpudp) + } else if discValue == "udp" { + err = core.UnmarshalModel(m, "", result, UnmarshalNetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolTcpudp) + } else { + err = fmt.Errorf("unrecognized value for discriminator property 'protocol': %s", discValue) + } + return +} + +// NetworkACLRuleReference : NetworkACLRuleReference struct +type NetworkACLRuleReference struct { + // The URL for this network ACL rule. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this network ACL rule. + ID *string `json:"id" validate:"required"` + + // The user-defined name for this network ACL rule. + Name *string `json:"name" validate:"required"` +} + +// UnmarshalNetworkACLRuleReference unmarshals an instance of NetworkACLRuleReference from the specified map of raw messages. +func UnmarshalNetworkACLRuleReference(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(NetworkACLRuleReference) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// NetworkInterface : NetworkInterface struct +type NetworkInterface struct { + // The date and time that the network interface was created. + CreatedAt *strfmt.DateTime `json:"created_at" validate:"required"` + + // Array of references to floating IPs associated with this network interface. + FloatingIps []FloatingIPReference `json:"floating_ips,omitempty"` + + // The URL for this network interface. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this network interface. + ID *string `json:"id" validate:"required"` + + // The user-defined name for this network interface. + Name *string `json:"name" validate:"required"` + + // The network interface port speed in Mbps. + PortSpeed *int64 `json:"port_speed" validate:"required"` + + // The primary IPv4 address. + PrimaryIpv4Address *string `json:"primary_ipv4_address" validate:"required"` + + // The resource type. + ResourceType *string `json:"resource_type" validate:"required"` + + // Collection of security groups. + SecurityGroups []SecurityGroupReference `json:"security_groups" validate:"required"` + + // The status of the network interface. + // + // Due to a known issue, the value for this property may be `ACTIVE` instead of + // `available`. + Status *string `json:"status" validate:"required"` + + // The associated subnet. + Subnet *SubnetReference `json:"subnet" validate:"required"` + + // The type of this network interface as it relates to an instance. + Type *string `json:"type" validate:"required"` +} + +// Constants associated with the NetworkInterface.ResourceType property. +// The resource type. +const ( + NetworkInterfaceResourceTypeNetworkInterfaceConst = "network_interface" +) + +// Constants associated with the NetworkInterface.Status property. +// The status of the network interface. +// +// Due to a known issue, the value for this property may be `ACTIVE` instead of +// `available`. +const ( + NetworkInterfaceStatusActiveConst = "active" + NetworkInterfaceStatusAvailableConst = "available" + NetworkInterfaceStatusDeletingConst = "deleting" + NetworkInterfaceStatusFailedConst = "failed" + NetworkInterfaceStatusPendingConst = "pending" +) + +// Constants associated with the NetworkInterface.Type property. +// The type of this network interface as it relates to an instance. +const ( + NetworkInterfaceTypePrimaryConst = "primary" + NetworkInterfaceTypeSecondaryConst = "secondary" +) + +// UnmarshalNetworkInterface unmarshals an instance of NetworkInterface from the specified map of raw messages. +func UnmarshalNetworkInterface(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(NetworkInterface) + err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) + if err != nil { + return + } + err = core.UnmarshalModel(m, "floating_ips", &obj.FloatingIps, UnmarshalFloatingIPReference) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "port_speed", &obj.PortSpeed) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "primary_ipv4_address", &obj.PrimaryIpv4Address) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "resource_type", &obj.ResourceType) + if err != nil { + return + } + err = core.UnmarshalModel(m, "security_groups", &obj.SecurityGroups, UnmarshalSecurityGroupReference) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "status", &obj.Status) + if err != nil { + return + } + err = core.UnmarshalModel(m, "subnet", &obj.Subnet, UnmarshalSubnetReference) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// NetworkInterfaceCollection : NetworkInterfaceCollection struct +type NetworkInterfaceCollection struct { + // Collection of network interfaces. + NetworkInterfaces []NetworkInterface `json:"network_interfaces" validate:"required"` +} + +// UnmarshalNetworkInterfaceCollection unmarshals an instance of NetworkInterfaceCollection from the specified map of raw messages. +func UnmarshalNetworkInterfaceCollection(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(NetworkInterfaceCollection) + err = core.UnmarshalModel(m, "network_interfaces", &obj.NetworkInterfaces, UnmarshalNetworkInterface) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// NetworkInterfaceInstanceContextReference : NetworkInterfaceInstanceContextReference struct +type NetworkInterfaceInstanceContextReference struct { + // The URL for this network interface. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this network interface. + ID *string `json:"id" validate:"required"` + + // The user-defined name for this network interface. + Name *string `json:"name" validate:"required"` + + // The primary IPv4 address. + PrimaryIpv4Address *string `json:"primary_ipv4_address" validate:"required"` + + // The resource type. + ResourceType *string `json:"resource_type" validate:"required"` + + // The associated subnet. + Subnet *SubnetReference `json:"subnet" validate:"required"` +} + +// Constants associated with the NetworkInterfaceInstanceContextReference.ResourceType property. +// The resource type. +const ( + NetworkInterfaceInstanceContextReferenceResourceTypeNetworkInterfaceConst = "network_interface" +) + +// UnmarshalNetworkInterfaceInstanceContextReference unmarshals an instance of NetworkInterfaceInstanceContextReference from the specified map of raw messages. +func UnmarshalNetworkInterfaceInstanceContextReference(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(NetworkInterfaceInstanceContextReference) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "primary_ipv4_address", &obj.PrimaryIpv4Address) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "resource_type", &obj.ResourceType) + if err != nil { + return + } + err = core.UnmarshalModel(m, "subnet", &obj.Subnet, UnmarshalSubnetReference) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// NetworkInterfacePrototype : NetworkInterfacePrototype struct +type NetworkInterfacePrototype struct { + // The user-defined name for this network interface. If unspecified, the name will be a hyphenated list of + // randomly-selected words. + Name *string `json:"name,omitempty"` + + // The primary IPv4 address. If specified, it must be an available address on the network interface's subnet. If + // unspecified, an available address on the subnet will be automatically selected. + PrimaryIpv4Address *string `json:"primary_ipv4_address,omitempty"` + + // Collection of security groups. + SecurityGroups []SecurityGroupIdentityIntf `json:"security_groups,omitempty"` + + // The associated subnet. + Subnet SubnetIdentityIntf `json:"subnet" validate:"required"` +} + +// NewNetworkInterfacePrototype : Instantiate NetworkInterfacePrototype (Generic Model Constructor) +func (*VpcClassicV1) NewNetworkInterfacePrototype(subnet SubnetIdentityIntf) (model *NetworkInterfacePrototype, err error) { + model = &NetworkInterfacePrototype{ + Subnet: subnet, + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalNetworkInterfacePrototype unmarshals an instance of NetworkInterfacePrototype from the specified map of raw messages. +func UnmarshalNetworkInterfacePrototype(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(NetworkInterfacePrototype) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "primary_ipv4_address", &obj.PrimaryIpv4Address) + if err != nil { + return + } + err = core.UnmarshalModel(m, "security_groups", &obj.SecurityGroups, UnmarshalSecurityGroupIdentity) + if err != nil { + return + } + err = core.UnmarshalModel(m, "subnet", &obj.Subnet, UnmarshalSubnetIdentity) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// NetworkInterfaceReference : NetworkInterfaceReference struct +type NetworkInterfaceReference struct { + // The URL for this network interface. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this network interface. + ID *string `json:"id" validate:"required"` + + // The user-defined name for this network interface. + Name *string `json:"name" validate:"required"` + + // The primary IPv4 address. + PrimaryIpv4Address *string `json:"primary_ipv4_address" validate:"required"` + + // The resource type. + ResourceType *string `json:"resource_type" validate:"required"` +} + +// Constants associated with the NetworkInterfaceReference.ResourceType property. +// The resource type. +const ( + NetworkInterfaceReferenceResourceTypeNetworkInterfaceConst = "network_interface" +) + +// UnmarshalNetworkInterfaceReference unmarshals an instance of NetworkInterfaceReference from the specified map of raw messages. +func UnmarshalNetworkInterfaceReference(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(NetworkInterfaceReference) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "primary_ipv4_address", &obj.PrimaryIpv4Address) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "resource_type", &obj.ResourceType) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// NetworkInterfaceUnpaginatedCollection : NetworkInterfaceUnpaginatedCollection struct +type NetworkInterfaceUnpaginatedCollection struct { + // Collection of network interfaces. + NetworkInterfaces []NetworkInterface `json:"network_interfaces" validate:"required"` +} + +// UnmarshalNetworkInterfaceUnpaginatedCollection unmarshals an instance of NetworkInterfaceUnpaginatedCollection from the specified map of raw messages. +func UnmarshalNetworkInterfaceUnpaginatedCollection(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(NetworkInterfaceUnpaginatedCollection) + err = core.UnmarshalModel(m, "network_interfaces", &obj.NetworkInterfaces, UnmarshalNetworkInterface) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// OperatingSystem : OperatingSystem struct +type OperatingSystem struct { + // The operating system architecture. + Architecture *string `json:"architecture" validate:"required"` + + // A unique, display-friendly name for the operating system. + DisplayName *string `json:"display_name" validate:"required"` + + // The name of the software family this operating system belongs to. + Family *string `json:"family" validate:"required"` + + // The URL for this operating system. + Href *string `json:"href" validate:"required"` + + // The globally unique name for this operating system. + Name *string `json:"name" validate:"required"` + + // The vendor of the operating system. + Vendor *string `json:"vendor" validate:"required"` + + // The major release version of this operating system. + Version *string `json:"version" validate:"required"` +} + +// UnmarshalOperatingSystem unmarshals an instance of OperatingSystem from the specified map of raw messages. +func UnmarshalOperatingSystem(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(OperatingSystem) + err = core.UnmarshalPrimitive(m, "architecture", &obj.Architecture) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "display_name", &obj.DisplayName) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "family", &obj.Family) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "vendor", &obj.Vendor) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "version", &obj.Version) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// OperatingSystemCollection : OperatingSystemCollection struct +type OperatingSystemCollection struct { + // A link to the first page of resources. + First *OperatingSystemCollectionFirst `json:"first" validate:"required"` + + // The maximum number of resources that can be returned by the request. + Limit *int64 `json:"limit" validate:"required"` + + // A link to the next page of resources. This property is present for all pages + // except the last page. + Next *OperatingSystemCollectionNext `json:"next,omitempty"` + + // Collection of operating systems. + OperatingSystems []OperatingSystem `json:"operating_systems" validate:"required"` +} + +// UnmarshalOperatingSystemCollection unmarshals an instance of OperatingSystemCollection from the specified map of raw messages. +func UnmarshalOperatingSystemCollection(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(OperatingSystemCollection) + err = core.UnmarshalModel(m, "first", &obj.First, UnmarshalOperatingSystemCollectionFirst) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "limit", &obj.Limit) + if err != nil { + return + } + err = core.UnmarshalModel(m, "next", &obj.Next, UnmarshalOperatingSystemCollectionNext) + if err != nil { + return + } + err = core.UnmarshalModel(m, "operating_systems", &obj.OperatingSystems, UnmarshalOperatingSystem) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// OperatingSystemCollectionFirst : A link to the first page of resources. +type OperatingSystemCollectionFirst struct { + // The URL for a page of resources. + Href *string `json:"href" validate:"required"` +} + +// UnmarshalOperatingSystemCollectionFirst unmarshals an instance of OperatingSystemCollectionFirst from the specified map of raw messages. +func UnmarshalOperatingSystemCollectionFirst(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(OperatingSystemCollectionFirst) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// OperatingSystemCollectionNext : A link to the next page of resources. This property is present for all pages except the last page. +type OperatingSystemCollectionNext struct { + // The URL for a page of resources. + Href *string `json:"href" validate:"required"` +} + +// UnmarshalOperatingSystemCollectionNext unmarshals an instance of OperatingSystemCollectionNext from the specified map of raw messages. +func UnmarshalOperatingSystemCollectionNext(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(OperatingSystemCollectionNext) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// OperatingSystemIdentity : Identifies an operating system by a unique property. +// Models which "extend" this model: +// - OperatingSystemIdentityByName +// - OperatingSystemIdentityByHref +type OperatingSystemIdentity struct { + // The globally unique name for this operating system. + Name *string `json:"name,omitempty"` + + // The URL for this operating system. + Href *string `json:"href,omitempty"` +} + +func (*OperatingSystemIdentity) isaOperatingSystemIdentity() bool { + return true +} + +type OperatingSystemIdentityIntf interface { + isaOperatingSystemIdentity() bool +} + +// UnmarshalOperatingSystemIdentity unmarshals an instance of OperatingSystemIdentity from the specified map of raw messages. +func UnmarshalOperatingSystemIdentity(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(OperatingSystemIdentity) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// PublicGateway : PublicGateway struct +type PublicGateway struct { + // The date and time that the public gateway was created. + CreatedAt *strfmt.DateTime `json:"created_at" validate:"required"` + + // The CRN for this public gateway. + CRN *string `json:"crn" validate:"required"` + + // Reference to the floating IP which is bound to this public gateway. + FloatingIP *PublicGatewayFloatingIP `json:"floating_ip" validate:"required"` + + // The URL for this public gateway. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this public gateway. + ID *string `json:"id" validate:"required"` + + // The user-defined name for this public gateway. + Name *string `json:"name" validate:"required"` + + // The resource type. + ResourceType *string `json:"resource_type" validate:"required"` + + // The status of the volume. + Status *string `json:"status" validate:"required"` + + // The VPC this public gateway serves. + VPC *VPCReference `json:"vpc" validate:"required"` + + // The zone where this public gateway lives. + Zone *ZoneReference `json:"zone" validate:"required"` +} + +// Constants associated with the PublicGateway.ResourceType property. +// The resource type. +const ( + PublicGatewayResourceTypePublicGatewayConst = "public_gateway" +) + +// Constants associated with the PublicGateway.Status property. +// The status of the volume. +const ( + PublicGatewayStatusAvailableConst = "available" + PublicGatewayStatusDeletingConst = "deleting" + PublicGatewayStatusFailedConst = "failed" + PublicGatewayStatusPendingConst = "pending" +) + +// UnmarshalPublicGateway unmarshals an instance of PublicGateway from the specified map of raw messages. +func UnmarshalPublicGateway(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(PublicGateway) + err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalModel(m, "floating_ip", &obj.FloatingIP, UnmarshalPublicGatewayFloatingIP) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "resource_type", &obj.ResourceType) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "status", &obj.Status) + if err != nil { + return + } + err = core.UnmarshalModel(m, "vpc", &obj.VPC, UnmarshalVPCReference) + if err != nil { + return + } + err = core.UnmarshalModel(m, "zone", &obj.Zone, UnmarshalZoneReference) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// PublicGatewayCollection : PublicGatewayCollection struct +type PublicGatewayCollection struct { + // A link to the first page of resources. + First *PublicGatewayCollectionFirst `json:"first" validate:"required"` + + // The maximum number of resources that can be returned by the request. + Limit *int64 `json:"limit" validate:"required"` + + // A link to the next page of resources. This property is present for all pages + // except the last page. + Next *PublicGatewayCollectionNext `json:"next,omitempty"` + + // Collection of public gateways. + PublicGateways []PublicGateway `json:"public_gateways" validate:"required"` +} + +// UnmarshalPublicGatewayCollection unmarshals an instance of PublicGatewayCollection from the specified map of raw messages. +func UnmarshalPublicGatewayCollection(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(PublicGatewayCollection) + err = core.UnmarshalModel(m, "first", &obj.First, UnmarshalPublicGatewayCollectionFirst) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "limit", &obj.Limit) + if err != nil { + return + } + err = core.UnmarshalModel(m, "next", &obj.Next, UnmarshalPublicGatewayCollectionNext) + if err != nil { + return + } + err = core.UnmarshalModel(m, "public_gateways", &obj.PublicGateways, UnmarshalPublicGateway) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// PublicGatewayCollectionFirst : A link to the first page of resources. +type PublicGatewayCollectionFirst struct { + // The URL for a page of resources. + Href *string `json:"href" validate:"required"` +} + +// UnmarshalPublicGatewayCollectionFirst unmarshals an instance of PublicGatewayCollectionFirst from the specified map of raw messages. +func UnmarshalPublicGatewayCollectionFirst(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(PublicGatewayCollectionFirst) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// PublicGatewayCollectionNext : A link to the next page of resources. This property is present for all pages except the last page. +type PublicGatewayCollectionNext struct { + // The URL for a page of resources. + Href *string `json:"href" validate:"required"` +} + +// UnmarshalPublicGatewayCollectionNext unmarshals an instance of PublicGatewayCollectionNext from the specified map of raw messages. +func UnmarshalPublicGatewayCollectionNext(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(PublicGatewayCollectionNext) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// PublicGatewayFloatingIP : Reference to the floating IP which is bound to this public gateway. +type PublicGatewayFloatingIP struct { + // The globally unique IP address. + Address *string `json:"address" validate:"required"` + + // The CRN for this floating IP. + CRN *string `json:"crn" validate:"required"` + + // The URL for this floating IP. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this floating IP. + ID *string `json:"id" validate:"required"` + + // The unique user-defined name for this floating IP. + Name *string `json:"name" validate:"required"` +} + +// UnmarshalPublicGatewayFloatingIP unmarshals an instance of PublicGatewayFloatingIP from the specified map of raw messages. +func UnmarshalPublicGatewayFloatingIP(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(PublicGatewayFloatingIP) + err = core.UnmarshalPrimitive(m, "address", &obj.Address) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// PublicGatewayFloatingIPPrototype : PublicGatewayFloatingIPPrototype struct +// Models which "extend" this model: +// - PublicGatewayFloatingIPPrototypeFloatingIPIdentity +// - PublicGatewayFloatingIPPrototypeFloatingIPPrototypeTargetContext +type PublicGatewayFloatingIPPrototype struct { + // The unique identifier for this floating IP. + ID *string `json:"id,omitempty"` + + // The CRN for this floating IP. + CRN *string `json:"crn,omitempty"` + + // The URL for this floating IP. + Href *string `json:"href,omitempty"` + + // The globally unique IP address. + Address *string `json:"address,omitempty"` + + // The unique user-defined name for this floating IP. If unspecified, the name will be a hyphenated list of + // randomly-selected words. + Name *string `json:"name,omitempty"` +} + +func (*PublicGatewayFloatingIPPrototype) isaPublicGatewayFloatingIPPrototype() bool { + return true +} + +type PublicGatewayFloatingIPPrototypeIntf interface { + isaPublicGatewayFloatingIPPrototype() bool +} + +// UnmarshalPublicGatewayFloatingIPPrototype unmarshals an instance of PublicGatewayFloatingIPPrototype from the specified map of raw messages. +func UnmarshalPublicGatewayFloatingIPPrototype(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(PublicGatewayFloatingIPPrototype) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "address", &obj.Address) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// PublicGatewayIdentity : Identifies a public gateway by a unique property. +// Models which "extend" this model: +// - PublicGatewayIdentityByID +// - PublicGatewayIdentityByCRN +// - PublicGatewayIdentityByHref +type PublicGatewayIdentity struct { + // The unique identifier for this public gateway. + ID *string `json:"id,omitempty"` + + // The CRN for this public gateway. + CRN *string `json:"crn,omitempty"` + + // The URL for this public gateway. + Href *string `json:"href,omitempty"` +} + +func (*PublicGatewayIdentity) isaPublicGatewayIdentity() bool { + return true +} + +type PublicGatewayIdentityIntf interface { + isaPublicGatewayIdentity() bool +} + +// UnmarshalPublicGatewayIdentity unmarshals an instance of PublicGatewayIdentity from the specified map of raw messages. +func UnmarshalPublicGatewayIdentity(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(PublicGatewayIdentity) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// PublicGatewayPatch : PublicGatewayPatch struct +type PublicGatewayPatch struct { + // The user-defined name for this public gateway. Names must be unique within the VPC the public gateway resides in. + Name *string `json:"name,omitempty"` +} + +// UnmarshalPublicGatewayPatch unmarshals an instance of PublicGatewayPatch from the specified map of raw messages. +func UnmarshalPublicGatewayPatch(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(PublicGatewayPatch) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// AsPatch returns a generic map representation of the PublicGatewayPatch +func (publicGatewayPatch *PublicGatewayPatch) AsPatch() (patch map[string]interface{}, err error) { + var jsonData []byte + jsonData, err = json.Marshal(publicGatewayPatch) + if err == nil { + err = json.Unmarshal(jsonData, &patch) + } + return +} + +// PublicGatewayReference : PublicGatewayReference struct +type PublicGatewayReference struct { + // The CRN for this public gateway. + CRN *string `json:"crn" validate:"required"` + + // The URL for this public gateway. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this public gateway. + ID *string `json:"id" validate:"required"` + + // The user-defined name for this public gateway. + Name *string `json:"name" validate:"required"` + + // The resource type. + ResourceType *string `json:"resource_type" validate:"required"` +} + +// Constants associated with the PublicGatewayReference.ResourceType property. +// The resource type. +const ( + PublicGatewayReferenceResourceTypePublicGatewayConst = "public_gateway" +) + +// UnmarshalPublicGatewayReference unmarshals an instance of PublicGatewayReference from the specified map of raw messages. +func UnmarshalPublicGatewayReference(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(PublicGatewayReference) + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "resource_type", &obj.ResourceType) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// Region : Region struct +type Region struct { + // The API endpoint for this region. + Endpoint *string `json:"endpoint" validate:"required"` + + // The URL for this region. + Href *string `json:"href" validate:"required"` + + // The globally unique name for this region. + Name *string `json:"name" validate:"required"` + + // The availability status of this region. + Status *string `json:"status" validate:"required"` +} + +// Constants associated with the Region.Status property. +// The availability status of this region. +const ( + RegionStatusAvailableConst = "available" + RegionStatusUnavailableConst = "unavailable" +) + +// UnmarshalRegion unmarshals an instance of Region from the specified map of raw messages. +func UnmarshalRegion(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Region) + err = core.UnmarshalPrimitive(m, "endpoint", &obj.Endpoint) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "status", &obj.Status) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// RegionCollection : RegionCollection struct +type RegionCollection struct { + // Collection of regions. + Regions []Region `json:"regions" validate:"required"` +} + +// UnmarshalRegionCollection unmarshals an instance of RegionCollection from the specified map of raw messages. +func UnmarshalRegionCollection(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(RegionCollection) + err = core.UnmarshalModel(m, "regions", &obj.Regions, UnmarshalRegion) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// RegionReference : RegionReference struct +type RegionReference struct { + // The URL for this region. + Href *string `json:"href" validate:"required"` + + // The globally unique name for this region. + Name *string `json:"name" validate:"required"` +} + +// UnmarshalRegionReference unmarshals an instance of RegionReference from the specified map of raw messages. +func UnmarshalRegionReference(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(RegionReference) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// RemoveInstanceNetworkInterfaceFloatingIPOptions : The RemoveInstanceNetworkInterfaceFloatingIP options. +type RemoveInstanceNetworkInterfaceFloatingIPOptions struct { + // The instance identifier. + InstanceID *string `validate:"required,ne="` + + // The network interface identifier. + NetworkInterfaceID *string `validate:"required,ne="` + + // The floating IP identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewRemoveInstanceNetworkInterfaceFloatingIPOptions : Instantiate RemoveInstanceNetworkInterfaceFloatingIPOptions +func (*VpcClassicV1) NewRemoveInstanceNetworkInterfaceFloatingIPOptions(instanceID string, networkInterfaceID string, id string) *RemoveInstanceNetworkInterfaceFloatingIPOptions { + return &RemoveInstanceNetworkInterfaceFloatingIPOptions{ + InstanceID: core.StringPtr(instanceID), + NetworkInterfaceID: core.StringPtr(networkInterfaceID), + ID: core.StringPtr(id), + } +} + +// SetInstanceID : Allow user to set InstanceID +func (options *RemoveInstanceNetworkInterfaceFloatingIPOptions) SetInstanceID(instanceID string) *RemoveInstanceNetworkInterfaceFloatingIPOptions { + options.InstanceID = core.StringPtr(instanceID) + return options +} + +// SetNetworkInterfaceID : Allow user to set NetworkInterfaceID +func (options *RemoveInstanceNetworkInterfaceFloatingIPOptions) SetNetworkInterfaceID(networkInterfaceID string) *RemoveInstanceNetworkInterfaceFloatingIPOptions { + options.NetworkInterfaceID = core.StringPtr(networkInterfaceID) + return options +} + +// SetID : Allow user to set ID +func (options *RemoveInstanceNetworkInterfaceFloatingIPOptions) SetID(id string) *RemoveInstanceNetworkInterfaceFloatingIPOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *RemoveInstanceNetworkInterfaceFloatingIPOptions) SetHeaders(param map[string]string) *RemoveInstanceNetworkInterfaceFloatingIPOptions { + options.Headers = param + return options +} + +// RemoveSecurityGroupNetworkInterfaceOptions : The RemoveSecurityGroupNetworkInterface options. +type RemoveSecurityGroupNetworkInterfaceOptions struct { + // The security group identifier. + SecurityGroupID *string `validate:"required,ne="` + + // The network interface identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewRemoveSecurityGroupNetworkInterfaceOptions : Instantiate RemoveSecurityGroupNetworkInterfaceOptions +func (*VpcClassicV1) NewRemoveSecurityGroupNetworkInterfaceOptions(securityGroupID string, id string) *RemoveSecurityGroupNetworkInterfaceOptions { + return &RemoveSecurityGroupNetworkInterfaceOptions{ + SecurityGroupID: core.StringPtr(securityGroupID), + ID: core.StringPtr(id), + } +} + +// SetSecurityGroupID : Allow user to set SecurityGroupID +func (options *RemoveSecurityGroupNetworkInterfaceOptions) SetSecurityGroupID(securityGroupID string) *RemoveSecurityGroupNetworkInterfaceOptions { + options.SecurityGroupID = core.StringPtr(securityGroupID) + return options +} + +// SetID : Allow user to set ID +func (options *RemoveSecurityGroupNetworkInterfaceOptions) SetID(id string) *RemoveSecurityGroupNetworkInterfaceOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *RemoveSecurityGroupNetworkInterfaceOptions) SetHeaders(param map[string]string) *RemoveSecurityGroupNetworkInterfaceOptions { + options.Headers = param + return options +} + +// RemoveVPNGatewayConnectionLocalCIDROptions : The RemoveVPNGatewayConnectionLocalCIDR options. +type RemoveVPNGatewayConnectionLocalCIDROptions struct { + // The VPN gateway identifier. + VPNGatewayID *string `validate:"required,ne="` + + // The VPN gateway connection identifier. + ID *string `validate:"required,ne="` + + // The address prefix part of the CIDR. + CIDRPrefix *string `validate:"required,ne="` + + // The prefix length part of the CIDR. + PrefixLength *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewRemoveVPNGatewayConnectionLocalCIDROptions : Instantiate RemoveVPNGatewayConnectionLocalCIDROptions +func (*VpcClassicV1) NewRemoveVPNGatewayConnectionLocalCIDROptions(vpnGatewayID string, id string, cidrPrefix string, prefixLength string) *RemoveVPNGatewayConnectionLocalCIDROptions { + return &RemoveVPNGatewayConnectionLocalCIDROptions{ + VPNGatewayID: core.StringPtr(vpnGatewayID), + ID: core.StringPtr(id), + CIDRPrefix: core.StringPtr(cidrPrefix), + PrefixLength: core.StringPtr(prefixLength), + } +} + +// SetVPNGatewayID : Allow user to set VPNGatewayID +func (options *RemoveVPNGatewayConnectionLocalCIDROptions) SetVPNGatewayID(vpnGatewayID string) *RemoveVPNGatewayConnectionLocalCIDROptions { + options.VPNGatewayID = core.StringPtr(vpnGatewayID) + return options +} + +// SetID : Allow user to set ID +func (options *RemoveVPNGatewayConnectionLocalCIDROptions) SetID(id string) *RemoveVPNGatewayConnectionLocalCIDROptions { + options.ID = core.StringPtr(id) + return options +} + +// SetCIDRPrefix : Allow user to set CIDRPrefix +func (options *RemoveVPNGatewayConnectionLocalCIDROptions) SetCIDRPrefix(cidrPrefix string) *RemoveVPNGatewayConnectionLocalCIDROptions { + options.CIDRPrefix = core.StringPtr(cidrPrefix) + return options +} + +// SetPrefixLength : Allow user to set PrefixLength +func (options *RemoveVPNGatewayConnectionLocalCIDROptions) SetPrefixLength(prefixLength string) *RemoveVPNGatewayConnectionLocalCIDROptions { + options.PrefixLength = core.StringPtr(prefixLength) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *RemoveVPNGatewayConnectionLocalCIDROptions) SetHeaders(param map[string]string) *RemoveVPNGatewayConnectionLocalCIDROptions { + options.Headers = param + return options +} + +// RemoveVPNGatewayConnectionPeerCIDROptions : The RemoveVPNGatewayConnectionPeerCIDR options. +type RemoveVPNGatewayConnectionPeerCIDROptions struct { + // The VPN gateway identifier. + VPNGatewayID *string `validate:"required,ne="` + + // The VPN gateway connection identifier. + ID *string `validate:"required,ne="` + + // The address prefix part of the CIDR. + CIDRPrefix *string `validate:"required,ne="` + + // The prefix length part of the CIDR. + PrefixLength *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewRemoveVPNGatewayConnectionPeerCIDROptions : Instantiate RemoveVPNGatewayConnectionPeerCIDROptions +func (*VpcClassicV1) NewRemoveVPNGatewayConnectionPeerCIDROptions(vpnGatewayID string, id string, cidrPrefix string, prefixLength string) *RemoveVPNGatewayConnectionPeerCIDROptions { + return &RemoveVPNGatewayConnectionPeerCIDROptions{ + VPNGatewayID: core.StringPtr(vpnGatewayID), + ID: core.StringPtr(id), + CIDRPrefix: core.StringPtr(cidrPrefix), + PrefixLength: core.StringPtr(prefixLength), + } +} + +// SetVPNGatewayID : Allow user to set VPNGatewayID +func (options *RemoveVPNGatewayConnectionPeerCIDROptions) SetVPNGatewayID(vpnGatewayID string) *RemoveVPNGatewayConnectionPeerCIDROptions { + options.VPNGatewayID = core.StringPtr(vpnGatewayID) + return options +} + +// SetID : Allow user to set ID +func (options *RemoveVPNGatewayConnectionPeerCIDROptions) SetID(id string) *RemoveVPNGatewayConnectionPeerCIDROptions { + options.ID = core.StringPtr(id) + return options +} + +// SetCIDRPrefix : Allow user to set CIDRPrefix +func (options *RemoveVPNGatewayConnectionPeerCIDROptions) SetCIDRPrefix(cidrPrefix string) *RemoveVPNGatewayConnectionPeerCIDROptions { + options.CIDRPrefix = core.StringPtr(cidrPrefix) + return options +} + +// SetPrefixLength : Allow user to set PrefixLength +func (options *RemoveVPNGatewayConnectionPeerCIDROptions) SetPrefixLength(prefixLength string) *RemoveVPNGatewayConnectionPeerCIDROptions { + options.PrefixLength = core.StringPtr(prefixLength) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *RemoveVPNGatewayConnectionPeerCIDROptions) SetHeaders(param map[string]string) *RemoveVPNGatewayConnectionPeerCIDROptions { + options.Headers = param + return options +} + +// ReplaceLoadBalancerPoolMembersOptions : The ReplaceLoadBalancerPoolMembers options. +type ReplaceLoadBalancerPoolMembersOptions struct { + // The load balancer identifier. + LoadBalancerID *string `validate:"required,ne="` + + // The pool identifier. + PoolID *string `validate:"required,ne="` + + // Array of pool member prototype objects. + Members []LoadBalancerPoolMemberPrototype `validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewReplaceLoadBalancerPoolMembersOptions : Instantiate ReplaceLoadBalancerPoolMembersOptions +func (*VpcClassicV1) NewReplaceLoadBalancerPoolMembersOptions(loadBalancerID string, poolID string, members []LoadBalancerPoolMemberPrototype) *ReplaceLoadBalancerPoolMembersOptions { + return &ReplaceLoadBalancerPoolMembersOptions{ + LoadBalancerID: core.StringPtr(loadBalancerID), + PoolID: core.StringPtr(poolID), + Members: members, + } +} + +// SetLoadBalancerID : Allow user to set LoadBalancerID +func (options *ReplaceLoadBalancerPoolMembersOptions) SetLoadBalancerID(loadBalancerID string) *ReplaceLoadBalancerPoolMembersOptions { + options.LoadBalancerID = core.StringPtr(loadBalancerID) + return options +} + +// SetPoolID : Allow user to set PoolID +func (options *ReplaceLoadBalancerPoolMembersOptions) SetPoolID(poolID string) *ReplaceLoadBalancerPoolMembersOptions { + options.PoolID = core.StringPtr(poolID) + return options +} + +// SetMembers : Allow user to set Members +func (options *ReplaceLoadBalancerPoolMembersOptions) SetMembers(members []LoadBalancerPoolMemberPrototype) *ReplaceLoadBalancerPoolMembersOptions { + options.Members = members + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ReplaceLoadBalancerPoolMembersOptions) SetHeaders(param map[string]string) *ReplaceLoadBalancerPoolMembersOptions { + options.Headers = param + return options +} + +// ReplaceSubnetNetworkACLOptions : The ReplaceSubnetNetworkACL options. +type ReplaceSubnetNetworkACLOptions struct { + // The subnet identifier. + ID *string `validate:"required,ne="` + + // The network ACL identity. + NetworkACLIdentity NetworkACLIdentityIntf `validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewReplaceSubnetNetworkACLOptions : Instantiate ReplaceSubnetNetworkACLOptions +func (*VpcClassicV1) NewReplaceSubnetNetworkACLOptions(id string, networkACLIdentity NetworkACLIdentityIntf) *ReplaceSubnetNetworkACLOptions { + return &ReplaceSubnetNetworkACLOptions{ + ID: core.StringPtr(id), + NetworkACLIdentity: networkACLIdentity, + } +} + +// SetID : Allow user to set ID +func (options *ReplaceSubnetNetworkACLOptions) SetID(id string) *ReplaceSubnetNetworkACLOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetNetworkACLIdentity : Allow user to set NetworkACLIdentity +func (options *ReplaceSubnetNetworkACLOptions) SetNetworkACLIdentity(networkACLIdentity NetworkACLIdentityIntf) *ReplaceSubnetNetworkACLOptions { + options.NetworkACLIdentity = networkACLIdentity + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ReplaceSubnetNetworkACLOptions) SetHeaders(param map[string]string) *ReplaceSubnetNetworkACLOptions { + options.Headers = param + return options +} + +// ResourceGroupIdentity : The resource group to use. If unspecified, the account's [default resource +// group](https://cloud.ibm.com/apidocs/resource-manager#introduction) is used. +// Models which "extend" this model: +// - ResourceGroupIdentityByID +type ResourceGroupIdentity struct { + // The unique identifier for this resource group. + ID *string `json:"id,omitempty"` +} + +func (*ResourceGroupIdentity) isaResourceGroupIdentity() bool { + return true +} + +type ResourceGroupIdentityIntf interface { + isaResourceGroupIdentity() bool +} + +// UnmarshalResourceGroupIdentity unmarshals an instance of ResourceGroupIdentity from the specified map of raw messages. +func UnmarshalResourceGroupIdentity(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ResourceGroupIdentity) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ResourceGroupReference : ResourceGroupReference struct +type ResourceGroupReference struct { + // The URL for this resource group. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this resource group. + ID *string `json:"id" validate:"required"` +} + +// UnmarshalResourceGroupReference unmarshals an instance of ResourceGroupReference from the specified map of raw messages. +func UnmarshalResourceGroupReference(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ResourceGroupReference) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// Route : Route struct +type Route struct { + // The date and time that the route was created. + CreatedAt *strfmt.DateTime `json:"created_at" validate:"required"` + + // The destination of the route. + Destination *string `json:"destination" validate:"required"` + + // The URL for this route. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this route. + ID *string `json:"id" validate:"required"` + + // The lifecycle state of the route. + LifecycleState *string `json:"lifecycle_state" validate:"required"` + + // The user-defined name for this route. + Name *string `json:"name" validate:"required"` + + // The next hop that packets will be delivered to. + NextHop RouteNextHopIntf `json:"next_hop" validate:"required"` + + // The zone the route applies to. (Traffic from subnets in this zone will be + // subject to this route.). + Zone *ZoneReference `json:"zone" validate:"required"` +} + +// Constants associated with the Route.LifecycleState property. +// The lifecycle state of the route. +const ( + RouteLifecycleStateDeletedConst = "deleted" + RouteLifecycleStateDeletingConst = "deleting" + RouteLifecycleStateFailedConst = "failed" + RouteLifecycleStatePendingConst = "pending" + RouteLifecycleStateStableConst = "stable" + RouteLifecycleStateSuspendedConst = "suspended" + RouteLifecycleStateUpdatingConst = "updating" + RouteLifecycleStateWaitingConst = "waiting" +) + +// UnmarshalRoute unmarshals an instance of Route from the specified map of raw messages. +func UnmarshalRoute(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Route) + err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "destination", &obj.Destination) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "lifecycle_state", &obj.LifecycleState) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalModel(m, "next_hop", &obj.NextHop, UnmarshalRouteNextHop) + if err != nil { + return + } + err = core.UnmarshalModel(m, "zone", &obj.Zone, UnmarshalZoneReference) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// RouteCollection : RouteCollection struct +type RouteCollection struct { + // Collection of routes. + Routes []Route `json:"routes" validate:"required"` +} + +// UnmarshalRouteCollection unmarshals an instance of RouteCollection from the specified map of raw messages. +func UnmarshalRouteCollection(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(RouteCollection) + err = core.UnmarshalModel(m, "routes", &obj.Routes, UnmarshalRoute) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// RouteNextHop : RouteNextHop struct +// Models which "extend" this model: +// - RouteNextHopIP +type RouteNextHop struct { + // The IP address. This property may add support for IPv6 addresses in the future. When processing a value in this + // property, verify that the address is in an expected format. If it is not, log an error. Optionally halt processing + // and surface the error, or bypass the resource on which the unexpected IP address format was encountered. + Address *string `json:"address,omitempty"` +} + +func (*RouteNextHop) isaRouteNextHop() bool { + return true +} + +type RouteNextHopIntf interface { + isaRouteNextHop() bool +} + +// UnmarshalRouteNextHop unmarshals an instance of RouteNextHop from the specified map of raw messages. +func UnmarshalRouteNextHop(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(RouteNextHop) + err = core.UnmarshalPrimitive(m, "address", &obj.Address) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// RouteNextHopPrototype : The next hop packets will be routed to. +// Models which "extend" this model: +// - RouteNextHopPrototypeRouteNextHopIP +type RouteNextHopPrototype struct { + // The IP address. This property may add support for IPv6 addresses in the future. When processing a value in this + // property, verify that the address is in an expected format. If it is not, log an error. Optionally halt processing + // and surface the error, or bypass the resource on which the unexpected IP address format was encountered. + Address *string `json:"address,omitempty"` +} + +func (*RouteNextHopPrototype) isaRouteNextHopPrototype() bool { + return true +} + +type RouteNextHopPrototypeIntf interface { + isaRouteNextHopPrototype() bool +} + +// UnmarshalRouteNextHopPrototype unmarshals an instance of RouteNextHopPrototype from the specified map of raw messages. +func UnmarshalRouteNextHopPrototype(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(RouteNextHopPrototype) + err = core.UnmarshalPrimitive(m, "address", &obj.Address) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// RoutePatch : RoutePatch struct +type RoutePatch struct { + // The user-defined name for this route. Names must be unique within the VPC routing table the route resides in. + Name *string `json:"name,omitempty"` +} + +// UnmarshalRoutePatch unmarshals an instance of RoutePatch from the specified map of raw messages. +func UnmarshalRoutePatch(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(RoutePatch) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// AsPatch returns a generic map representation of the RoutePatch +func (routePatch *RoutePatch) AsPatch() (patch map[string]interface{}, err error) { + var jsonData []byte + jsonData, err = json.Marshal(routePatch) + if err == nil { + err = json.Unmarshal(jsonData, &patch) + } + return +} + +// SecurityGroup : SecurityGroup struct +type SecurityGroup struct { + // The date and time that this security group was created. + CreatedAt *strfmt.DateTime `json:"created_at" validate:"required"` + + // The security group's CRN. + CRN *string `json:"crn" validate:"required"` + + // The security group's canonical URL. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this security group. + ID *string `json:"id" validate:"required"` + + // The user-defined name for this security group. Security group names must be unique, within the scope of an account. + Name *string `json:"name" validate:"required"` + + // Array of references to network interfaces. + NetworkInterfaces []NetworkInterfaceReference `json:"network_interfaces" validate:"required"` + + // The resource group for this security group. + ResourceGroup *ResourceGroupReference `json:"resource_group" validate:"required"` + + // Array of rules for this security group. If no rules exist, all traffic will be denied. + Rules []SecurityGroupRuleIntf `json:"rules" validate:"required"` + + // The VPC this security group is a part of. + VPC *VPCReference `json:"vpc" validate:"required"` +} + +// UnmarshalSecurityGroup unmarshals an instance of SecurityGroup from the specified map of raw messages. +func UnmarshalSecurityGroup(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SecurityGroup) + err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalModel(m, "network_interfaces", &obj.NetworkInterfaces, UnmarshalNetworkInterfaceReference) + if err != nil { + return + } + err = core.UnmarshalModel(m, "resource_group", &obj.ResourceGroup, UnmarshalResourceGroupReference) + if err != nil { + return + } + err = core.UnmarshalModel(m, "rules", &obj.Rules, UnmarshalSecurityGroupRule) + if err != nil { + return + } + err = core.UnmarshalModel(m, "vpc", &obj.VPC, UnmarshalVPCReference) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SecurityGroupCollection : SecurityGroupCollection struct +type SecurityGroupCollection struct { + // Collection of security groups. + SecurityGroups []SecurityGroup `json:"security_groups" validate:"required"` +} + +// UnmarshalSecurityGroupCollection unmarshals an instance of SecurityGroupCollection from the specified map of raw messages. +func UnmarshalSecurityGroupCollection(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SecurityGroupCollection) + err = core.UnmarshalModel(m, "security_groups", &obj.SecurityGroups, UnmarshalSecurityGroup) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SecurityGroupIdentity : Identifies a security group by a unique property. +// Models which "extend" this model: +// - SecurityGroupIdentityByID +// - SecurityGroupIdentityByCRN +// - SecurityGroupIdentityByHref +type SecurityGroupIdentity struct { + // The unique identifier for this security group. + ID *string `json:"id,omitempty"` + + // The security group's CRN. + CRN *string `json:"crn,omitempty"` + + // The security group's canonical URL. + Href *string `json:"href,omitempty"` +} + +func (*SecurityGroupIdentity) isaSecurityGroupIdentity() bool { + return true +} + +type SecurityGroupIdentityIntf interface { + isaSecurityGroupIdentity() bool +} + +// UnmarshalSecurityGroupIdentity unmarshals an instance of SecurityGroupIdentity from the specified map of raw messages. +func UnmarshalSecurityGroupIdentity(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SecurityGroupIdentity) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SecurityGroupPatch : SecurityGroupPatch struct +type SecurityGroupPatch struct { + // The user-defined name for this security group. Security group names must be unique, within the scope of an account. + Name *string `json:"name,omitempty"` +} + +// UnmarshalSecurityGroupPatch unmarshals an instance of SecurityGroupPatch from the specified map of raw messages. +func UnmarshalSecurityGroupPatch(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SecurityGroupPatch) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// AsPatch returns a generic map representation of the SecurityGroupPatch +func (securityGroupPatch *SecurityGroupPatch) AsPatch() (patch map[string]interface{}, err error) { + var jsonData []byte + jsonData, err = json.Marshal(securityGroupPatch) + if err == nil { + err = json.Unmarshal(jsonData, &patch) + } + return +} + +// SecurityGroupReference : SecurityGroupReference struct +type SecurityGroupReference struct { + // The security group's CRN. + CRN *string `json:"crn" validate:"required"` + + // The security group's canonical URL. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this security group. + ID *string `json:"id" validate:"required"` + + // The user-defined name for this security group. Security group names must be unique, within the scope of an account. + Name *string `json:"name" validate:"required"` +} + +// UnmarshalSecurityGroupReference unmarshals an instance of SecurityGroupReference from the specified map of raw messages. +func UnmarshalSecurityGroupReference(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SecurityGroupReference) + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SecurityGroupRule : SecurityGroupRule struct +// Models which "extend" this model: +// - SecurityGroupRuleSecurityGroupRuleProtocolAll +// - SecurityGroupRuleSecurityGroupRuleProtocolIcmp +// - SecurityGroupRuleSecurityGroupRuleProtocolTcpudp +type SecurityGroupRule struct { + // The direction of traffic to enforce, either `inbound` or `outbound`. + Direction *string `json:"direction" validate:"required"` + + // The unique identifier for this security group rule. + ID *string `json:"id" validate:"required"` + + // The IP version to enforce. The format of `remote.address` or `remote.cidr_block` must match this field, if they are + // used. Alternatively, if `remote` references a security group, then this rule only applies to IP addresses (network + // interfaces) in that group matching this IP version. + IPVersion *string `json:"ip_version,omitempty"` + + // The protocol to enforce. + Protocol *string `json:"protocol,omitempty"` + + // The IP addresses or security groups from which this rule allows traffic (or to + // which, for outbound rules). Can be specified as an IP address, a CIDR block, or a + // security group. If omitted, then traffic is allowed from any source (or to any + // source, for outbound rules). + Remote SecurityGroupRuleRemoteIntf `json:"remote,omitempty"` + + // The ICMP traffic code to allow. + Code *int64 `json:"code,omitempty"` + + // The ICMP traffic type to allow. + Type *int64 `json:"type,omitempty"` + + // The inclusive upper bound of TCP/UDP port range. + PortMax *int64 `json:"port_max,omitempty"` + + // The inclusive lower bound of TCP/UDP port range. + PortMin *int64 `json:"port_min,omitempty"` +} + +// Constants associated with the SecurityGroupRule.Direction property. +// The direction of traffic to enforce, either `inbound` or `outbound`. +const ( + SecurityGroupRuleDirectionInboundConst = "inbound" + SecurityGroupRuleDirectionOutboundConst = "outbound" +) + +// Constants associated with the SecurityGroupRule.IPVersion property. +// The IP version to enforce. The format of `remote.address` or `remote.cidr_block` must match this field, if they are +// used. Alternatively, if `remote` references a security group, then this rule only applies to IP addresses (network +// interfaces) in that group matching this IP version. +const ( + SecurityGroupRuleIPVersionIpv4Const = "ipv4" +) + +func (*SecurityGroupRule) isaSecurityGroupRule() bool { + return true +} + +type SecurityGroupRuleIntf interface { + isaSecurityGroupRule() bool +} + +// UnmarshalSecurityGroupRule unmarshals an instance of SecurityGroupRule from the specified map of raw messages. +func UnmarshalSecurityGroupRule(m map[string]json.RawMessage, result interface{}) (err error) { + // Retrieve discriminator value to determine correct "subclass". + var discValue string + err = core.UnmarshalPrimitive(m, "protocol", &discValue) + if err != nil { + err = fmt.Errorf("error unmarshalling discriminator property 'protocol': %s", err.Error()) + return + } + if discValue == "" { + err = fmt.Errorf("required discriminator property 'protocol' not found in JSON object") + return + } + if discValue == "all" { + err = core.UnmarshalModel(m, "", result, UnmarshalSecurityGroupRuleSecurityGroupRuleProtocolAll) + } else if discValue == "icmp" { + err = core.UnmarshalModel(m, "", result, UnmarshalSecurityGroupRuleSecurityGroupRuleProtocolIcmp) + } else if discValue == "tcp" { + err = core.UnmarshalModel(m, "", result, UnmarshalSecurityGroupRuleSecurityGroupRuleProtocolTcpudp) + } else if discValue == "udp" { + err = core.UnmarshalModel(m, "", result, UnmarshalSecurityGroupRuleSecurityGroupRuleProtocolTcpudp) + } else { + err = fmt.Errorf("unrecognized value for discriminator property 'protocol': %s", discValue) + } + return +} + +// SecurityGroupRuleCollection : Collection of rules in a security group. +type SecurityGroupRuleCollection struct { + // Array of rules. + Rules []SecurityGroupRuleIntf `json:"rules" validate:"required"` +} + +// UnmarshalSecurityGroupRuleCollection unmarshals an instance of SecurityGroupRuleCollection from the specified map of raw messages. +func UnmarshalSecurityGroupRuleCollection(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SecurityGroupRuleCollection) + err = core.UnmarshalModel(m, "rules", &obj.Rules, UnmarshalSecurityGroupRule) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SecurityGroupRulePatch : SecurityGroupRulePatch struct +// Models which "extend" this model: +// - SecurityGroupRulePatchSecurityGroupRuleProtocolAll +// - SecurityGroupRulePatchSecurityGroupRuleProtocolIcmp +// - SecurityGroupRulePatchSecurityGroupRuleProtocolTcpudp +type SecurityGroupRulePatch struct { + // The direction of traffic to enforce, either `inbound` or `outbound`. + Direction *string `json:"direction,omitempty"` + + // The IP version to enforce. The format of `remote.address` or `remote.cidr_block` must match this field, if they are + // used. Alternatively, if `remote` references a security group, then this rule only applies to IP addresses (network + // interfaces) in that group matching this IP version. + IPVersion *string `json:"ip_version,omitempty"` + + // The protocol to enforce. + Protocol *string `json:"protocol,omitempty"` + + // The IP addresses or security groups from which this rule will allow traffic (or to + // which, for outbound rules). Can be specified as an IP address, a CIDR block, or a + // security group. A CIDR block of `0.0.0.0/0` will allow traffic from any source (or to + // any source, for outbound rules). + Remote SecurityGroupRuleRemotePatchIntf `json:"remote,omitempty"` + + // The ICMP traffic code to allow. + Code *int64 `json:"code,omitempty"` + + // The ICMP traffic type to allow. + Type *int64 `json:"type,omitempty"` + + // The inclusive upper bound of TCP/UDP port range. + PortMax *int64 `json:"port_max,omitempty"` + + // The inclusive lower bound of TCP/UDP port range. + PortMin *int64 `json:"port_min,omitempty"` +} + +// Constants associated with the SecurityGroupRulePatch.Direction property. +// The direction of traffic to enforce, either `inbound` or `outbound`. +const ( + SecurityGroupRulePatchDirectionInboundConst = "inbound" + SecurityGroupRulePatchDirectionOutboundConst = "outbound" +) + +// Constants associated with the SecurityGroupRulePatch.IPVersion property. +// The IP version to enforce. The format of `remote.address` or `remote.cidr_block` must match this field, if they are +// used. Alternatively, if `remote` references a security group, then this rule only applies to IP addresses (network +// interfaces) in that group matching this IP version. +const ( + SecurityGroupRulePatchIPVersionIpv4Const = "ipv4" +) + +func (*SecurityGroupRulePatch) isaSecurityGroupRulePatch() bool { + return true +} + +type SecurityGroupRulePatchIntf interface { + isaSecurityGroupRulePatch() bool +} + +// UnmarshalSecurityGroupRulePatch unmarshals an instance of SecurityGroupRulePatch from the specified map of raw messages. +func UnmarshalSecurityGroupRulePatch(m map[string]json.RawMessage, result interface{}) (err error) { + // Retrieve discriminator value to determine correct "subclass". + var discValue string + err = core.UnmarshalPrimitive(m, "protocol", &discValue) + if err != nil { + err = fmt.Errorf("error unmarshalling discriminator property 'protocol': %s", err.Error()) + return + } + if discValue == "" { + err = fmt.Errorf("required discriminator property 'protocol' not found in JSON object") + return + } + if discValue == "all" { + err = core.UnmarshalModel(m, "", result, UnmarshalSecurityGroupRulePatchSecurityGroupRuleProtocolAll) + } else if discValue == "icmp" { + err = core.UnmarshalModel(m, "", result, UnmarshalSecurityGroupRulePatchSecurityGroupRuleProtocolIcmp) + } else if discValue == "tcp" { + err = core.UnmarshalModel(m, "", result, UnmarshalSecurityGroupRulePatchSecurityGroupRuleProtocolTcpudp) + } else if discValue == "udp" { + err = core.UnmarshalModel(m, "", result, UnmarshalSecurityGroupRulePatchSecurityGroupRuleProtocolTcpudp) + } else { + err = fmt.Errorf("unrecognized value for discriminator property 'protocol': %s", discValue) + } + return +} + +// AsPatch returns a generic map representation of the SecurityGroupRulePatch +func (securityGroupRulePatch *SecurityGroupRulePatch) AsPatch() (patch map[string]interface{}, err error) { + var jsonData []byte + jsonData, err = json.Marshal(securityGroupRulePatch) + if err == nil { + err = json.Unmarshal(jsonData, &patch) + } + return +} + +// SecurityGroupRulePrototype : SecurityGroupRulePrototype struct +// Models which "extend" this model: +// - SecurityGroupRulePrototypeSecurityGroupRuleProtocolAll +// - SecurityGroupRulePrototypeSecurityGroupRuleProtocolIcmp +// - SecurityGroupRulePrototypeSecurityGroupRuleProtocolTcpudp +type SecurityGroupRulePrototype struct { + // The direction of traffic to enforce, either `inbound` or `outbound`. + Direction *string `json:"direction" validate:"required"` + + // The IP version to enforce. The format of `remote.address` or `remote.cidr_block` must match this field, if they are + // used. Alternatively, if `remote` references a security group, then this rule only applies to IP addresses (network + // interfaces) in that group matching this IP version. + IPVersion *string `json:"ip_version,omitempty"` + + // The protocol to enforce. + Protocol *string `json:"protocol,omitempty"` + + // The IP addresses or security groups from which this rule will allow traffic (or to + // which, for outbound rules). Can be specified as an IP address, a CIDR block, or a + // security group. If omitted, then traffic will be allowed from any source (or to any + // source, for outbound rules). + Remote SecurityGroupRuleRemotePrototypeIntf `json:"remote,omitempty"` + + // The ICMP traffic code to allow. + Code *int64 `json:"code,omitempty"` + + // The ICMP traffic type to allow. + Type *int64 `json:"type,omitempty"` + + // The inclusive upper bound of TCP/UDP port range. + PortMax *int64 `json:"port_max,omitempty"` + + // The inclusive lower bound of TCP/UDP port range. + PortMin *int64 `json:"port_min,omitempty"` +} + +// Constants associated with the SecurityGroupRulePrototype.Direction property. +// The direction of traffic to enforce, either `inbound` or `outbound`. +const ( + SecurityGroupRulePrototypeDirectionInboundConst = "inbound" + SecurityGroupRulePrototypeDirectionOutboundConst = "outbound" +) + +// Constants associated with the SecurityGroupRulePrototype.IPVersion property. +// The IP version to enforce. The format of `remote.address` or `remote.cidr_block` must match this field, if they are +// used. Alternatively, if `remote` references a security group, then this rule only applies to IP addresses (network +// interfaces) in that group matching this IP version. +const ( + SecurityGroupRulePrototypeIPVersionIpv4Const = "ipv4" +) + +func (*SecurityGroupRulePrototype) isaSecurityGroupRulePrototype() bool { + return true +} + +type SecurityGroupRulePrototypeIntf interface { + isaSecurityGroupRulePrototype() bool +} + +// UnmarshalSecurityGroupRulePrototype unmarshals an instance of SecurityGroupRulePrototype from the specified map of raw messages. +func UnmarshalSecurityGroupRulePrototype(m map[string]json.RawMessage, result interface{}) (err error) { + // Retrieve discriminator value to determine correct "subclass". + var discValue string + err = core.UnmarshalPrimitive(m, "protocol", &discValue) + if err != nil { + err = fmt.Errorf("error unmarshalling discriminator property 'protocol': %s", err.Error()) + return + } + if discValue == "" { + err = fmt.Errorf("required discriminator property 'protocol' not found in JSON object") + return + } + if discValue == "all" { + err = core.UnmarshalModel(m, "", result, UnmarshalSecurityGroupRulePrototypeSecurityGroupRuleProtocolAll) + } else if discValue == "icmp" { + err = core.UnmarshalModel(m, "", result, UnmarshalSecurityGroupRulePrototypeSecurityGroupRuleProtocolIcmp) + } else if discValue == "tcp" { + err = core.UnmarshalModel(m, "", result, UnmarshalSecurityGroupRulePrototypeSecurityGroupRuleProtocolTcpudp) + } else if discValue == "udp" { + err = core.UnmarshalModel(m, "", result, UnmarshalSecurityGroupRulePrototypeSecurityGroupRuleProtocolTcpudp) + } else { + err = fmt.Errorf("unrecognized value for discriminator property 'protocol': %s", discValue) + } + return +} + +// SecurityGroupRuleRemote : The IP addresses or security groups from which this rule allows traffic (or to which, for outbound rules). Can be +// specified as an IP address, a CIDR block, or a security group. If omitted, then traffic is allowed from any source +// (or to any source, for outbound rules). +// Models which "extend" this model: +// - SecurityGroupRuleRemoteIP +// - SecurityGroupRuleRemoteCIDR +// - SecurityGroupRuleRemoteSecurityGroupReference +type SecurityGroupRuleRemote struct { + // The IP address. This property may add support for IPv6 addresses in the future. When processing a value in this + // property, verify that the address is in an expected format. If it is not, log an error. Optionally halt processing + // and surface the error, or bypass the resource on which the unexpected IP address format was encountered. + Address *string `json:"address,omitempty"` + + // The CIDR block. This property may add support for IPv6 CIDR blocks in the future. When processing a value in this + // property, verify that the CIDR block is in an expected format. If it is not, log an error. Optionally halt + // processing and surface the error, or bypass the resource on which the unexpected CIDR block format was encountered. + CIDRBlock *string `json:"cidr_block,omitempty"` + + // The security group's CRN. + CRN *string `json:"crn,omitempty"` + + // The security group's canonical URL. + Href *string `json:"href,omitempty"` + + // The unique identifier for this security group. + ID *string `json:"id,omitempty"` + + // The user-defined name for this security group. Security group names must be unique, within the scope of an account. + Name *string `json:"name,omitempty"` +} + +func (*SecurityGroupRuleRemote) isaSecurityGroupRuleRemote() bool { + return true +} + +type SecurityGroupRuleRemoteIntf interface { + isaSecurityGroupRuleRemote() bool +} + +// UnmarshalSecurityGroupRuleRemote unmarshals an instance of SecurityGroupRuleRemote from the specified map of raw messages. +func UnmarshalSecurityGroupRuleRemote(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SecurityGroupRuleRemote) + err = core.UnmarshalPrimitive(m, "address", &obj.Address) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "cidr_block", &obj.CIDRBlock) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SecurityGroupRuleRemotePatch : The IP addresses or security groups from which this rule will allow traffic (or to which, for outbound rules). Can be +// specified as an IP address, a CIDR block, or a security group. A CIDR block of `0.0.0.0/0` will allow traffic from +// any source (or to any source, for outbound rules). +// Models which "extend" this model: +// - SecurityGroupRuleRemotePatchIP +// - SecurityGroupRuleRemotePatchCIDR +// - SecurityGroupRuleRemotePatchSecurityGroupIdentity +type SecurityGroupRuleRemotePatch struct { + // The IP address. This property may add support for IPv6 addresses in the future. When processing a value in this + // property, verify that the address is in an expected format. If it is not, log an error. Optionally halt processing + // and surface the error, or bypass the resource on which the unexpected IP address format was encountered. + Address *string `json:"address,omitempty"` + + // The CIDR block. This property may add support for IPv6 CIDR blocks in the future. When processing a value in this + // property, verify that the CIDR block is in an expected format. If it is not, log an error. Optionally halt + // processing and surface the error, or bypass the resource on which the unexpected CIDR block format was encountered. + CIDRBlock *string `json:"cidr_block,omitempty"` + + // The unique identifier for this security group. + ID *string `json:"id,omitempty"` + + // The security group's CRN. + CRN *string `json:"crn,omitempty"` + + // The security group's canonical URL. + Href *string `json:"href,omitempty"` +} + +func (*SecurityGroupRuleRemotePatch) isaSecurityGroupRuleRemotePatch() bool { + return true +} + +type SecurityGroupRuleRemotePatchIntf interface { + isaSecurityGroupRuleRemotePatch() bool +} + +// UnmarshalSecurityGroupRuleRemotePatch unmarshals an instance of SecurityGroupRuleRemotePatch from the specified map of raw messages. +func UnmarshalSecurityGroupRuleRemotePatch(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SecurityGroupRuleRemotePatch) + err = core.UnmarshalPrimitive(m, "address", &obj.Address) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "cidr_block", &obj.CIDRBlock) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SecurityGroupRuleRemotePrototype : The IP addresses or security groups from which this rule will allow traffic (or to which, for outbound rules). Can be +// specified as an IP address, a CIDR block, or a security group. If omitted, then traffic will be allowed from any +// source (or to any source, for outbound rules). +// Models which "extend" this model: +// - SecurityGroupRuleRemotePrototypeIP +// - SecurityGroupRuleRemotePrototypeCIDR +// - SecurityGroupRuleRemotePrototypeSecurityGroupIdentity +type SecurityGroupRuleRemotePrototype struct { + // The IP address. This property may add support for IPv6 addresses in the future. When processing a value in this + // property, verify that the address is in an expected format. If it is not, log an error. Optionally halt processing + // and surface the error, or bypass the resource on which the unexpected IP address format was encountered. + Address *string `json:"address,omitempty"` + + // The CIDR block. This property may add support for IPv6 CIDR blocks in the future. When processing a value in this + // property, verify that the CIDR block is in an expected format. If it is not, log an error. Optionally halt + // processing and surface the error, or bypass the resource on which the unexpected CIDR block format was encountered. + CIDRBlock *string `json:"cidr_block,omitempty"` + + // The unique identifier for this security group. + ID *string `json:"id,omitempty"` + + // The security group's CRN. + CRN *string `json:"crn,omitempty"` + + // The security group's canonical URL. + Href *string `json:"href,omitempty"` +} + +func (*SecurityGroupRuleRemotePrototype) isaSecurityGroupRuleRemotePrototype() bool { + return true +} + +type SecurityGroupRuleRemotePrototypeIntf interface { + isaSecurityGroupRuleRemotePrototype() bool +} + +// UnmarshalSecurityGroupRuleRemotePrototype unmarshals an instance of SecurityGroupRuleRemotePrototype from the specified map of raw messages. +func UnmarshalSecurityGroupRuleRemotePrototype(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SecurityGroupRuleRemotePrototype) + err = core.UnmarshalPrimitive(m, "address", &obj.Address) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "cidr_block", &obj.CIDRBlock) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SetSubnetPublicGatewayOptions : The SetSubnetPublicGateway options. +type SetSubnetPublicGatewayOptions struct { + // The subnet identifier. + ID *string `validate:"required,ne="` + + // The public gateway identity. + PublicGatewayIdentity PublicGatewayIdentityIntf `validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewSetSubnetPublicGatewayOptions : Instantiate SetSubnetPublicGatewayOptions +func (*VpcClassicV1) NewSetSubnetPublicGatewayOptions(id string, publicGatewayIdentity PublicGatewayIdentityIntf) *SetSubnetPublicGatewayOptions { + return &SetSubnetPublicGatewayOptions{ + ID: core.StringPtr(id), + PublicGatewayIdentity: publicGatewayIdentity, + } +} + +// SetID : Allow user to set ID +func (options *SetSubnetPublicGatewayOptions) SetID(id string) *SetSubnetPublicGatewayOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetPublicGatewayIdentity : Allow user to set PublicGatewayIdentity +func (options *SetSubnetPublicGatewayOptions) SetPublicGatewayIdentity(publicGatewayIdentity PublicGatewayIdentityIntf) *SetSubnetPublicGatewayOptions { + options.PublicGatewayIdentity = publicGatewayIdentity + return options +} + +// SetHeaders : Allow user to set Headers +func (options *SetSubnetPublicGatewayOptions) SetHeaders(param map[string]string) *SetSubnetPublicGatewayOptions { + options.Headers = param + return options +} + +// Subnet : Subnet struct +type Subnet struct { + // The number of IPv4 addresses in this subnet that are not in-use, and have not been reserved by the user or the + // provider. + AvailableIpv4AddressCount *int64 `json:"available_ipv4_address_count" validate:"required"` + + // The date and time that the subnet was created. + CreatedAt *strfmt.DateTime `json:"created_at" validate:"required"` + + // The CRN for this subnet. + CRN *string `json:"crn" validate:"required"` + + // The URL for this subnet. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this subnet. + ID *string `json:"id" validate:"required"` + + // The IPv4 range of the subnet, expressed in CIDR format. + Ipv4CIDRBlock *string `json:"ipv4_cidr_block" validate:"required"` + + // The user-defined name for this subnet. + Name *string `json:"name" validate:"required"` + + // The network ACL for this subnet. + NetworkACL *NetworkACLReference `json:"network_acl" validate:"required"` + + // The public gateway to handle internet bound traffic for this subnet. + PublicGateway *PublicGatewayReference `json:"public_gateway,omitempty"` + + // The status of the subnet. + Status *string `json:"status" validate:"required"` + + // The total number of IPv4 addresses in this subnet. + // + // Note: This is calculated as 2(32 − prefix length). For example, the prefix length `/24` gives:
+ // 2(32 − 24) = 28 = 256 addresses. + TotalIpv4AddressCount *int64 `json:"total_ipv4_address_count" validate:"required"` + + // The VPC this subnet is a part of. + VPC *VPCReference `json:"vpc" validate:"required"` + + // The zone this subnet resides in. + Zone *ZoneReference `json:"zone" validate:"required"` +} + +// Constants associated with the Subnet.Status property. +// The status of the subnet. +const ( + SubnetStatusAvailableConst = "available" + SubnetStatusDeletingConst = "deleting" + SubnetStatusFailedConst = "failed" + SubnetStatusPendingConst = "pending" +) + +// UnmarshalSubnet unmarshals an instance of Subnet from the specified map of raw messages. +func UnmarshalSubnet(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Subnet) + err = core.UnmarshalPrimitive(m, "available_ipv4_address_count", &obj.AvailableIpv4AddressCount) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "ipv4_cidr_block", &obj.Ipv4CIDRBlock) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalModel(m, "network_acl", &obj.NetworkACL, UnmarshalNetworkACLReference) + if err != nil { + return + } + err = core.UnmarshalModel(m, "public_gateway", &obj.PublicGateway, UnmarshalPublicGatewayReference) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "status", &obj.Status) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "total_ipv4_address_count", &obj.TotalIpv4AddressCount) + if err != nil { + return + } + err = core.UnmarshalModel(m, "vpc", &obj.VPC, UnmarshalVPCReference) + if err != nil { + return + } + err = core.UnmarshalModel(m, "zone", &obj.Zone, UnmarshalZoneReference) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SubnetCollection : SubnetCollection struct +type SubnetCollection struct { + // A link to the first page of resources. + First *SubnetCollectionFirst `json:"first" validate:"required"` + + // The maximum number of resources that can be returned by the request. + Limit *int64 `json:"limit" validate:"required"` + + // A link to the next page of resources. This property is present for all pages + // except the last page. + Next *SubnetCollectionNext `json:"next,omitempty"` + + // Collection of subnets. + Subnets []Subnet `json:"subnets" validate:"required"` +} + +// UnmarshalSubnetCollection unmarshals an instance of SubnetCollection from the specified map of raw messages. +func UnmarshalSubnetCollection(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SubnetCollection) + err = core.UnmarshalModel(m, "first", &obj.First, UnmarshalSubnetCollectionFirst) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "limit", &obj.Limit) + if err != nil { + return + } + err = core.UnmarshalModel(m, "next", &obj.Next, UnmarshalSubnetCollectionNext) + if err != nil { + return + } + err = core.UnmarshalModel(m, "subnets", &obj.Subnets, UnmarshalSubnet) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SubnetCollectionFirst : A link to the first page of resources. +type SubnetCollectionFirst struct { + // The URL for a page of resources. + Href *string `json:"href" validate:"required"` +} + +// UnmarshalSubnetCollectionFirst unmarshals an instance of SubnetCollectionFirst from the specified map of raw messages. +func UnmarshalSubnetCollectionFirst(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SubnetCollectionFirst) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SubnetCollectionNext : A link to the next page of resources. This property is present for all pages except the last page. +type SubnetCollectionNext struct { + // The URL for a page of resources. + Href *string `json:"href" validate:"required"` +} + +// UnmarshalSubnetCollectionNext unmarshals an instance of SubnetCollectionNext from the specified map of raw messages. +func UnmarshalSubnetCollectionNext(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SubnetCollectionNext) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SubnetIdentity : Identifies a subnet by a unique property. +// Models which "extend" this model: +// - SubnetIdentityByID +// - SubnetIdentityByCRN +// - SubnetIdentityByHref +type SubnetIdentity struct { + // The unique identifier for this subnet. + ID *string `json:"id,omitempty"` + + // The CRN for this subnet. + CRN *string `json:"crn,omitempty"` + + // The URL for this subnet. + Href *string `json:"href,omitempty"` +} + +func (*SubnetIdentity) isaSubnetIdentity() bool { + return true +} + +type SubnetIdentityIntf interface { + isaSubnetIdentity() bool +} + +// UnmarshalSubnetIdentity unmarshals an instance of SubnetIdentity from the specified map of raw messages. +func UnmarshalSubnetIdentity(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SubnetIdentity) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SubnetPatch : SubnetPatch struct +type SubnetPatch struct { + // The user-defined name for this subnet. Names must be unique within the VPC the subnet resides in. + Name *string `json:"name,omitempty"` + + // The network ACL to use for this subnet. + NetworkACL NetworkACLIdentityIntf `json:"network_acl,omitempty"` + + // The public gateway to handle internet bound traffic for this subnet. + PublicGateway PublicGatewayIdentityIntf `json:"public_gateway,omitempty"` +} + +// UnmarshalSubnetPatch unmarshals an instance of SubnetPatch from the specified map of raw messages. +func UnmarshalSubnetPatch(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SubnetPatch) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalModel(m, "network_acl", &obj.NetworkACL, UnmarshalNetworkACLIdentity) + if err != nil { + return + } + err = core.UnmarshalModel(m, "public_gateway", &obj.PublicGateway, UnmarshalPublicGatewayIdentity) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// AsPatch returns a generic map representation of the SubnetPatch +func (subnetPatch *SubnetPatch) AsPatch() (patch map[string]interface{}, err error) { + var jsonData []byte + jsonData, err = json.Marshal(subnetPatch) + if err == nil { + err = json.Unmarshal(jsonData, &patch) + } + return +} + +// SubnetPrototype : SubnetPrototype struct +// Models which "extend" this model: +// - SubnetPrototypeSubnetByTotalCount +// - SubnetPrototypeSubnetByCIDR +type SubnetPrototype struct { + // The user-defined name for this subnet. Names must be unique within the VPC the subnet resides in. If unspecified, + // the name will be a hyphenated list of randomly-selected words. + Name *string `json:"name,omitempty"` + + // The network ACL to use for this subnet. + NetworkACL NetworkACLIdentityIntf `json:"network_acl,omitempty"` + + // The public gateway to handle internet bound traffic for this subnet. + PublicGateway PublicGatewayIdentityIntf `json:"public_gateway,omitempty"` + + // The VPC the subnet is to be a part of. + VPC VPCIdentityIntf `json:"vpc" validate:"required"` + + // The total number of IPv4 addresses required. Must be a power of 2. The VPC must have a default address prefix in the + // specified zone, and that prefix must have a free CIDR range with at least this number of addresses. + TotalIpv4AddressCount *int64 `json:"total_ipv4_address_count,omitempty"` + + // The zone the subnet is to reside in. + Zone ZoneIdentityIntf `json:"zone,omitempty"` + + // The IPv4 range of the subnet, expressed in CIDR format. The prefix length of the subnet's CIDR must be between `/8` + // (16,777,216 addresses) and `/29` (8 addresses). The IPv4 range of the subnet's CIDR must fall within an existing + // address prefix in the VPC. The subnet will be created in the zone of the address prefix that contains the IPv4 CIDR. + // If zone is specified, it must match the zone of the address prefix that contains the subnet's IPv4 CIDR. + Ipv4CIDRBlock *string `json:"ipv4_cidr_block,omitempty"` +} + +func (*SubnetPrototype) isaSubnetPrototype() bool { + return true +} + +type SubnetPrototypeIntf interface { + isaSubnetPrototype() bool +} + +// UnmarshalSubnetPrototype unmarshals an instance of SubnetPrototype from the specified map of raw messages. +func UnmarshalSubnetPrototype(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SubnetPrototype) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalModel(m, "network_acl", &obj.NetworkACL, UnmarshalNetworkACLIdentity) + if err != nil { + return + } + err = core.UnmarshalModel(m, "public_gateway", &obj.PublicGateway, UnmarshalPublicGatewayIdentity) + if err != nil { + return + } + err = core.UnmarshalModel(m, "vpc", &obj.VPC, UnmarshalVPCIdentity) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "total_ipv4_address_count", &obj.TotalIpv4AddressCount) + if err != nil { + return + } + err = core.UnmarshalModel(m, "zone", &obj.Zone, UnmarshalZoneIdentity) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "ipv4_cidr_block", &obj.Ipv4CIDRBlock) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SubnetReference : SubnetReference struct +type SubnetReference struct { + // The CRN for this subnet. + CRN *string `json:"crn" validate:"required"` + + // The URL for this subnet. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this subnet. + ID *string `json:"id" validate:"required"` + + // The user-defined name for this subnet. + Name *string `json:"name" validate:"required"` +} + +// UnmarshalSubnetReference unmarshals an instance of SubnetReference from the specified map of raw messages. +func UnmarshalSubnetReference(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SubnetReference) + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// UnsetSubnetPublicGatewayOptions : The UnsetSubnetPublicGateway options. +type UnsetSubnetPublicGatewayOptions struct { + // The subnet identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUnsetSubnetPublicGatewayOptions : Instantiate UnsetSubnetPublicGatewayOptions +func (*VpcClassicV1) NewUnsetSubnetPublicGatewayOptions(id string) *UnsetSubnetPublicGatewayOptions { + return &UnsetSubnetPublicGatewayOptions{ + ID: core.StringPtr(id), + } +} + +// SetID : Allow user to set ID +func (options *UnsetSubnetPublicGatewayOptions) SetID(id string) *UnsetSubnetPublicGatewayOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UnsetSubnetPublicGatewayOptions) SetHeaders(param map[string]string) *UnsetSubnetPublicGatewayOptions { + options.Headers = param + return options +} + +// UpdateFloatingIPOptions : The UpdateFloatingIP options. +type UpdateFloatingIPOptions struct { + // The floating IP identifier. + ID *string `validate:"required,ne="` + + // The floating IP patch. + FloatingIPPatch map[string]interface{} `validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdateFloatingIPOptions : Instantiate UpdateFloatingIPOptions +func (*VpcClassicV1) NewUpdateFloatingIPOptions(id string, floatingIPPatch map[string]interface{}) *UpdateFloatingIPOptions { + return &UpdateFloatingIPOptions{ + ID: core.StringPtr(id), + FloatingIPPatch: floatingIPPatch, + } +} + +// SetID : Allow user to set ID +func (options *UpdateFloatingIPOptions) SetID(id string) *UpdateFloatingIPOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetFloatingIPPatch : Allow user to set FloatingIPPatch +func (options *UpdateFloatingIPOptions) SetFloatingIPPatch(floatingIPPatch map[string]interface{}) *UpdateFloatingIPOptions { + options.FloatingIPPatch = floatingIPPatch + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateFloatingIPOptions) SetHeaders(param map[string]string) *UpdateFloatingIPOptions { + options.Headers = param + return options +} + +// UpdateIkePolicyOptions : The UpdateIkePolicy options. +type UpdateIkePolicyOptions struct { + // The IKE policy identifier. + ID *string `validate:"required,ne="` + + // The IKE policy patch. + IkePolicyPatch map[string]interface{} `validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdateIkePolicyOptions : Instantiate UpdateIkePolicyOptions +func (*VpcClassicV1) NewUpdateIkePolicyOptions(id string, ikePolicyPatch map[string]interface{}) *UpdateIkePolicyOptions { + return &UpdateIkePolicyOptions{ + ID: core.StringPtr(id), + IkePolicyPatch: ikePolicyPatch, + } +} + +// SetID : Allow user to set ID +func (options *UpdateIkePolicyOptions) SetID(id string) *UpdateIkePolicyOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetIkePolicyPatch : Allow user to set IkePolicyPatch +func (options *UpdateIkePolicyOptions) SetIkePolicyPatch(ikePolicyPatch map[string]interface{}) *UpdateIkePolicyOptions { + options.IkePolicyPatch = ikePolicyPatch + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateIkePolicyOptions) SetHeaders(param map[string]string) *UpdateIkePolicyOptions { + options.Headers = param + return options +} + +// UpdateImageOptions : The UpdateImage options. +type UpdateImageOptions struct { + // The image identifier. + ID *string `validate:"required,ne="` + + // The image patch. + ImagePatch map[string]interface{} `validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdateImageOptions : Instantiate UpdateImageOptions +func (*VpcClassicV1) NewUpdateImageOptions(id string, imagePatch map[string]interface{}) *UpdateImageOptions { + return &UpdateImageOptions{ + ID: core.StringPtr(id), + ImagePatch: imagePatch, + } +} + +// SetID : Allow user to set ID +func (options *UpdateImageOptions) SetID(id string) *UpdateImageOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetImagePatch : Allow user to set ImagePatch +func (options *UpdateImageOptions) SetImagePatch(imagePatch map[string]interface{}) *UpdateImageOptions { + options.ImagePatch = imagePatch + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateImageOptions) SetHeaders(param map[string]string) *UpdateImageOptions { + options.Headers = param + return options +} + +// UpdateInstanceOptions : The UpdateInstance options. +type UpdateInstanceOptions struct { + // The instance identifier. + ID *string `validate:"required,ne="` + + // The instance patch. + InstancePatch map[string]interface{} `validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdateInstanceOptions : Instantiate UpdateInstanceOptions +func (*VpcClassicV1) NewUpdateInstanceOptions(id string, instancePatch map[string]interface{}) *UpdateInstanceOptions { + return &UpdateInstanceOptions{ + ID: core.StringPtr(id), + InstancePatch: instancePatch, + } +} + +// SetID : Allow user to set ID +func (options *UpdateInstanceOptions) SetID(id string) *UpdateInstanceOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetInstancePatch : Allow user to set InstancePatch +func (options *UpdateInstanceOptions) SetInstancePatch(instancePatch map[string]interface{}) *UpdateInstanceOptions { + options.InstancePatch = instancePatch + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateInstanceOptions) SetHeaders(param map[string]string) *UpdateInstanceOptions { + options.Headers = param + return options +} + +// UpdateInstanceVolumeAttachmentOptions : The UpdateInstanceVolumeAttachment options. +type UpdateInstanceVolumeAttachmentOptions struct { + // The instance identifier. + InstanceID *string `validate:"required,ne="` + + // The volume attachment identifier. + ID *string `validate:"required,ne="` + + // The volume attachment patch. + VolumeAttachmentPatch map[string]interface{} `validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdateInstanceVolumeAttachmentOptions : Instantiate UpdateInstanceVolumeAttachmentOptions +func (*VpcClassicV1) NewUpdateInstanceVolumeAttachmentOptions(instanceID string, id string, volumeAttachmentPatch map[string]interface{}) *UpdateInstanceVolumeAttachmentOptions { + return &UpdateInstanceVolumeAttachmentOptions{ + InstanceID: core.StringPtr(instanceID), + ID: core.StringPtr(id), + VolumeAttachmentPatch: volumeAttachmentPatch, + } +} + +// SetInstanceID : Allow user to set InstanceID +func (options *UpdateInstanceVolumeAttachmentOptions) SetInstanceID(instanceID string) *UpdateInstanceVolumeAttachmentOptions { + options.InstanceID = core.StringPtr(instanceID) + return options +} + +// SetID : Allow user to set ID +func (options *UpdateInstanceVolumeAttachmentOptions) SetID(id string) *UpdateInstanceVolumeAttachmentOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetVolumeAttachmentPatch : Allow user to set VolumeAttachmentPatch +func (options *UpdateInstanceVolumeAttachmentOptions) SetVolumeAttachmentPatch(volumeAttachmentPatch map[string]interface{}) *UpdateInstanceVolumeAttachmentOptions { + options.VolumeAttachmentPatch = volumeAttachmentPatch + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateInstanceVolumeAttachmentOptions) SetHeaders(param map[string]string) *UpdateInstanceVolumeAttachmentOptions { + options.Headers = param + return options +} + +// UpdateIpsecPolicyOptions : The UpdateIpsecPolicy options. +type UpdateIpsecPolicyOptions struct { + // The IPsec policy identifier. + ID *string `validate:"required,ne="` + + // The IPsec policy patch. + IPsecPolicyPatch map[string]interface{} `validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdateIpsecPolicyOptions : Instantiate UpdateIpsecPolicyOptions +func (*VpcClassicV1) NewUpdateIpsecPolicyOptions(id string, iPsecPolicyPatch map[string]interface{}) *UpdateIpsecPolicyOptions { + return &UpdateIpsecPolicyOptions{ + ID: core.StringPtr(id), + IPsecPolicyPatch: iPsecPolicyPatch, + } +} + +// SetID : Allow user to set ID +func (options *UpdateIpsecPolicyOptions) SetID(id string) *UpdateIpsecPolicyOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetIPsecPolicyPatch : Allow user to set IPsecPolicyPatch +func (options *UpdateIpsecPolicyOptions) SetIPsecPolicyPatch(iPsecPolicyPatch map[string]interface{}) *UpdateIpsecPolicyOptions { + options.IPsecPolicyPatch = iPsecPolicyPatch + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateIpsecPolicyOptions) SetHeaders(param map[string]string) *UpdateIpsecPolicyOptions { + options.Headers = param + return options +} + +// UpdateKeyOptions : The UpdateKey options. +type UpdateKeyOptions struct { + // The key identifier. + ID *string `validate:"required,ne="` + + // The key patch. + KeyPatch map[string]interface{} `validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdateKeyOptions : Instantiate UpdateKeyOptions +func (*VpcClassicV1) NewUpdateKeyOptions(id string, keyPatch map[string]interface{}) *UpdateKeyOptions { + return &UpdateKeyOptions{ + ID: core.StringPtr(id), + KeyPatch: keyPatch, + } +} + +// SetID : Allow user to set ID +func (options *UpdateKeyOptions) SetID(id string) *UpdateKeyOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetKeyPatch : Allow user to set KeyPatch +func (options *UpdateKeyOptions) SetKeyPatch(keyPatch map[string]interface{}) *UpdateKeyOptions { + options.KeyPatch = keyPatch + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateKeyOptions) SetHeaders(param map[string]string) *UpdateKeyOptions { + options.Headers = param + return options +} + +// UpdateLoadBalancerListenerOptions : The UpdateLoadBalancerListener options. +type UpdateLoadBalancerListenerOptions struct { + // The load balancer identifier. + LoadBalancerID *string `validate:"required,ne="` + + // The listener identifier. + ID *string `validate:"required,ne="` + + // The load balancer listener patch. + LoadBalancerListenerPatch map[string]interface{} `validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdateLoadBalancerListenerOptions : Instantiate UpdateLoadBalancerListenerOptions +func (*VpcClassicV1) NewUpdateLoadBalancerListenerOptions(loadBalancerID string, id string, loadBalancerListenerPatch map[string]interface{}) *UpdateLoadBalancerListenerOptions { + return &UpdateLoadBalancerListenerOptions{ + LoadBalancerID: core.StringPtr(loadBalancerID), + ID: core.StringPtr(id), + LoadBalancerListenerPatch: loadBalancerListenerPatch, + } +} + +// SetLoadBalancerID : Allow user to set LoadBalancerID +func (options *UpdateLoadBalancerListenerOptions) SetLoadBalancerID(loadBalancerID string) *UpdateLoadBalancerListenerOptions { + options.LoadBalancerID = core.StringPtr(loadBalancerID) + return options +} + +// SetID : Allow user to set ID +func (options *UpdateLoadBalancerListenerOptions) SetID(id string) *UpdateLoadBalancerListenerOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetLoadBalancerListenerPatch : Allow user to set LoadBalancerListenerPatch +func (options *UpdateLoadBalancerListenerOptions) SetLoadBalancerListenerPatch(loadBalancerListenerPatch map[string]interface{}) *UpdateLoadBalancerListenerOptions { + options.LoadBalancerListenerPatch = loadBalancerListenerPatch + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateLoadBalancerListenerOptions) SetHeaders(param map[string]string) *UpdateLoadBalancerListenerOptions { + options.Headers = param + return options +} + +// UpdateLoadBalancerListenerPolicyOptions : The UpdateLoadBalancerListenerPolicy options. +type UpdateLoadBalancerListenerPolicyOptions struct { + // The load balancer identifier. + LoadBalancerID *string `validate:"required,ne="` + + // The listener identifier. + ListenerID *string `validate:"required,ne="` + + // The policy identifier. + ID *string `validate:"required,ne="` + + // The listener policy patch. + LoadBalancerListenerPolicyPatch map[string]interface{} `validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdateLoadBalancerListenerPolicyOptions : Instantiate UpdateLoadBalancerListenerPolicyOptions +func (*VpcClassicV1) NewUpdateLoadBalancerListenerPolicyOptions(loadBalancerID string, listenerID string, id string, loadBalancerListenerPolicyPatch map[string]interface{}) *UpdateLoadBalancerListenerPolicyOptions { + return &UpdateLoadBalancerListenerPolicyOptions{ + LoadBalancerID: core.StringPtr(loadBalancerID), + ListenerID: core.StringPtr(listenerID), + ID: core.StringPtr(id), + LoadBalancerListenerPolicyPatch: loadBalancerListenerPolicyPatch, + } +} + +// SetLoadBalancerID : Allow user to set LoadBalancerID +func (options *UpdateLoadBalancerListenerPolicyOptions) SetLoadBalancerID(loadBalancerID string) *UpdateLoadBalancerListenerPolicyOptions { + options.LoadBalancerID = core.StringPtr(loadBalancerID) + return options +} + +// SetListenerID : Allow user to set ListenerID +func (options *UpdateLoadBalancerListenerPolicyOptions) SetListenerID(listenerID string) *UpdateLoadBalancerListenerPolicyOptions { + options.ListenerID = core.StringPtr(listenerID) + return options +} + +// SetID : Allow user to set ID +func (options *UpdateLoadBalancerListenerPolicyOptions) SetID(id string) *UpdateLoadBalancerListenerPolicyOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetLoadBalancerListenerPolicyPatch : Allow user to set LoadBalancerListenerPolicyPatch +func (options *UpdateLoadBalancerListenerPolicyOptions) SetLoadBalancerListenerPolicyPatch(loadBalancerListenerPolicyPatch map[string]interface{}) *UpdateLoadBalancerListenerPolicyOptions { + options.LoadBalancerListenerPolicyPatch = loadBalancerListenerPolicyPatch + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateLoadBalancerListenerPolicyOptions) SetHeaders(param map[string]string) *UpdateLoadBalancerListenerPolicyOptions { + options.Headers = param + return options +} + +// UpdateLoadBalancerListenerPolicyRuleOptions : The UpdateLoadBalancerListenerPolicyRule options. +type UpdateLoadBalancerListenerPolicyRuleOptions struct { + // The load balancer identifier. + LoadBalancerID *string `validate:"required,ne="` + + // The listener identifier. + ListenerID *string `validate:"required,ne="` + + // The policy identifier. + PolicyID *string `validate:"required,ne="` + + // The rule identifier. + ID *string `validate:"required,ne="` + + // The listener policy rule patch. + LoadBalancerListenerPolicyRulePatch map[string]interface{} `validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdateLoadBalancerListenerPolicyRuleOptions : Instantiate UpdateLoadBalancerListenerPolicyRuleOptions +func (*VpcClassicV1) NewUpdateLoadBalancerListenerPolicyRuleOptions(loadBalancerID string, listenerID string, policyID string, id string, loadBalancerListenerPolicyRulePatch map[string]interface{}) *UpdateLoadBalancerListenerPolicyRuleOptions { + return &UpdateLoadBalancerListenerPolicyRuleOptions{ + LoadBalancerID: core.StringPtr(loadBalancerID), + ListenerID: core.StringPtr(listenerID), + PolicyID: core.StringPtr(policyID), + ID: core.StringPtr(id), + LoadBalancerListenerPolicyRulePatch: loadBalancerListenerPolicyRulePatch, + } +} + +// SetLoadBalancerID : Allow user to set LoadBalancerID +func (options *UpdateLoadBalancerListenerPolicyRuleOptions) SetLoadBalancerID(loadBalancerID string) *UpdateLoadBalancerListenerPolicyRuleOptions { + options.LoadBalancerID = core.StringPtr(loadBalancerID) + return options +} + +// SetListenerID : Allow user to set ListenerID +func (options *UpdateLoadBalancerListenerPolicyRuleOptions) SetListenerID(listenerID string) *UpdateLoadBalancerListenerPolicyRuleOptions { + options.ListenerID = core.StringPtr(listenerID) + return options +} + +// SetPolicyID : Allow user to set PolicyID +func (options *UpdateLoadBalancerListenerPolicyRuleOptions) SetPolicyID(policyID string) *UpdateLoadBalancerListenerPolicyRuleOptions { + options.PolicyID = core.StringPtr(policyID) + return options +} + +// SetID : Allow user to set ID +func (options *UpdateLoadBalancerListenerPolicyRuleOptions) SetID(id string) *UpdateLoadBalancerListenerPolicyRuleOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetLoadBalancerListenerPolicyRulePatch : Allow user to set LoadBalancerListenerPolicyRulePatch +func (options *UpdateLoadBalancerListenerPolicyRuleOptions) SetLoadBalancerListenerPolicyRulePatch(loadBalancerListenerPolicyRulePatch map[string]interface{}) *UpdateLoadBalancerListenerPolicyRuleOptions { + options.LoadBalancerListenerPolicyRulePatch = loadBalancerListenerPolicyRulePatch + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateLoadBalancerListenerPolicyRuleOptions) SetHeaders(param map[string]string) *UpdateLoadBalancerListenerPolicyRuleOptions { + options.Headers = param + return options +} + +// UpdateLoadBalancerOptions : The UpdateLoadBalancer options. +type UpdateLoadBalancerOptions struct { + // The load balancer identifier. + ID *string `validate:"required,ne="` + + // The load balancer patch. + LoadBalancerPatch map[string]interface{} `validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdateLoadBalancerOptions : Instantiate UpdateLoadBalancerOptions +func (*VpcClassicV1) NewUpdateLoadBalancerOptions(id string, loadBalancerPatch map[string]interface{}) *UpdateLoadBalancerOptions { + return &UpdateLoadBalancerOptions{ + ID: core.StringPtr(id), + LoadBalancerPatch: loadBalancerPatch, + } +} + +// SetID : Allow user to set ID +func (options *UpdateLoadBalancerOptions) SetID(id string) *UpdateLoadBalancerOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetLoadBalancerPatch : Allow user to set LoadBalancerPatch +func (options *UpdateLoadBalancerOptions) SetLoadBalancerPatch(loadBalancerPatch map[string]interface{}) *UpdateLoadBalancerOptions { + options.LoadBalancerPatch = loadBalancerPatch + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateLoadBalancerOptions) SetHeaders(param map[string]string) *UpdateLoadBalancerOptions { + options.Headers = param + return options +} + +// UpdateLoadBalancerPoolMemberOptions : The UpdateLoadBalancerPoolMember options. +type UpdateLoadBalancerPoolMemberOptions struct { + // The load balancer identifier. + LoadBalancerID *string `validate:"required,ne="` + + // The pool identifier. + PoolID *string `validate:"required,ne="` + + // The member identifier. + ID *string `validate:"required,ne="` + + // The load balancer pool member patch. + LoadBalancerPoolMemberPatch map[string]interface{} `validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdateLoadBalancerPoolMemberOptions : Instantiate UpdateLoadBalancerPoolMemberOptions +func (*VpcClassicV1) NewUpdateLoadBalancerPoolMemberOptions(loadBalancerID string, poolID string, id string, loadBalancerPoolMemberPatch map[string]interface{}) *UpdateLoadBalancerPoolMemberOptions { + return &UpdateLoadBalancerPoolMemberOptions{ + LoadBalancerID: core.StringPtr(loadBalancerID), + PoolID: core.StringPtr(poolID), + ID: core.StringPtr(id), + LoadBalancerPoolMemberPatch: loadBalancerPoolMemberPatch, + } +} + +// SetLoadBalancerID : Allow user to set LoadBalancerID +func (options *UpdateLoadBalancerPoolMemberOptions) SetLoadBalancerID(loadBalancerID string) *UpdateLoadBalancerPoolMemberOptions { + options.LoadBalancerID = core.StringPtr(loadBalancerID) + return options +} + +// SetPoolID : Allow user to set PoolID +func (options *UpdateLoadBalancerPoolMemberOptions) SetPoolID(poolID string) *UpdateLoadBalancerPoolMemberOptions { + options.PoolID = core.StringPtr(poolID) + return options +} + +// SetID : Allow user to set ID +func (options *UpdateLoadBalancerPoolMemberOptions) SetID(id string) *UpdateLoadBalancerPoolMemberOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetLoadBalancerPoolMemberPatch : Allow user to set LoadBalancerPoolMemberPatch +func (options *UpdateLoadBalancerPoolMemberOptions) SetLoadBalancerPoolMemberPatch(loadBalancerPoolMemberPatch map[string]interface{}) *UpdateLoadBalancerPoolMemberOptions { + options.LoadBalancerPoolMemberPatch = loadBalancerPoolMemberPatch + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateLoadBalancerPoolMemberOptions) SetHeaders(param map[string]string) *UpdateLoadBalancerPoolMemberOptions { + options.Headers = param + return options +} + +// UpdateLoadBalancerPoolOptions : The UpdateLoadBalancerPool options. +type UpdateLoadBalancerPoolOptions struct { + // The load balancer identifier. + LoadBalancerID *string `validate:"required,ne="` + + // The pool identifier. + ID *string `validate:"required,ne="` + + // The load balancer pool patch. + LoadBalancerPoolPatch map[string]interface{} `validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdateLoadBalancerPoolOptions : Instantiate UpdateLoadBalancerPoolOptions +func (*VpcClassicV1) NewUpdateLoadBalancerPoolOptions(loadBalancerID string, id string, loadBalancerPoolPatch map[string]interface{}) *UpdateLoadBalancerPoolOptions { + return &UpdateLoadBalancerPoolOptions{ + LoadBalancerID: core.StringPtr(loadBalancerID), + ID: core.StringPtr(id), + LoadBalancerPoolPatch: loadBalancerPoolPatch, + } +} + +// SetLoadBalancerID : Allow user to set LoadBalancerID +func (options *UpdateLoadBalancerPoolOptions) SetLoadBalancerID(loadBalancerID string) *UpdateLoadBalancerPoolOptions { + options.LoadBalancerID = core.StringPtr(loadBalancerID) + return options +} + +// SetID : Allow user to set ID +func (options *UpdateLoadBalancerPoolOptions) SetID(id string) *UpdateLoadBalancerPoolOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetLoadBalancerPoolPatch : Allow user to set LoadBalancerPoolPatch +func (options *UpdateLoadBalancerPoolOptions) SetLoadBalancerPoolPatch(loadBalancerPoolPatch map[string]interface{}) *UpdateLoadBalancerPoolOptions { + options.LoadBalancerPoolPatch = loadBalancerPoolPatch + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateLoadBalancerPoolOptions) SetHeaders(param map[string]string) *UpdateLoadBalancerPoolOptions { + options.Headers = param + return options +} + +// UpdateNetworkACLOptions : The UpdateNetworkACL options. +type UpdateNetworkACLOptions struct { + // The network ACL identifier. + ID *string `validate:"required,ne="` + + // The network ACL patch. + NetworkACLPatch map[string]interface{} `validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdateNetworkACLOptions : Instantiate UpdateNetworkACLOptions +func (*VpcClassicV1) NewUpdateNetworkACLOptions(id string, networkACLPatch map[string]interface{}) *UpdateNetworkACLOptions { + return &UpdateNetworkACLOptions{ + ID: core.StringPtr(id), + NetworkACLPatch: networkACLPatch, + } +} + +// SetID : Allow user to set ID +func (options *UpdateNetworkACLOptions) SetID(id string) *UpdateNetworkACLOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetNetworkACLPatch : Allow user to set NetworkACLPatch +func (options *UpdateNetworkACLOptions) SetNetworkACLPatch(networkACLPatch map[string]interface{}) *UpdateNetworkACLOptions { + options.NetworkACLPatch = networkACLPatch + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateNetworkACLOptions) SetHeaders(param map[string]string) *UpdateNetworkACLOptions { + options.Headers = param + return options +} + +// UpdateNetworkACLRuleOptions : The UpdateNetworkACLRule options. +type UpdateNetworkACLRuleOptions struct { + // The network ACL identifier. + NetworkACLID *string `validate:"required,ne="` + + // The rule identifier. + ID *string `validate:"required,ne="` + + // The network ACL rule patch. + NetworkACLRulePatch map[string]interface{} `validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdateNetworkACLRuleOptions : Instantiate UpdateNetworkACLRuleOptions +func (*VpcClassicV1) NewUpdateNetworkACLRuleOptions(networkACLID string, id string, networkACLRulePatch map[string]interface{}) *UpdateNetworkACLRuleOptions { + return &UpdateNetworkACLRuleOptions{ + NetworkACLID: core.StringPtr(networkACLID), + ID: core.StringPtr(id), + NetworkACLRulePatch: networkACLRulePatch, + } +} + +// SetNetworkACLID : Allow user to set NetworkACLID +func (options *UpdateNetworkACLRuleOptions) SetNetworkACLID(networkACLID string) *UpdateNetworkACLRuleOptions { + options.NetworkACLID = core.StringPtr(networkACLID) + return options +} + +// SetID : Allow user to set ID +func (options *UpdateNetworkACLRuleOptions) SetID(id string) *UpdateNetworkACLRuleOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetNetworkACLRulePatch : Allow user to set NetworkACLRulePatch +func (options *UpdateNetworkACLRuleOptions) SetNetworkACLRulePatch(networkACLRulePatch map[string]interface{}) *UpdateNetworkACLRuleOptions { + options.NetworkACLRulePatch = networkACLRulePatch + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateNetworkACLRuleOptions) SetHeaders(param map[string]string) *UpdateNetworkACLRuleOptions { + options.Headers = param + return options +} + +// UpdatePublicGatewayOptions : The UpdatePublicGateway options. +type UpdatePublicGatewayOptions struct { + // The public gateway identifier. + ID *string `validate:"required,ne="` + + // The public gateway patch. + PublicGatewayPatch map[string]interface{} `validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdatePublicGatewayOptions : Instantiate UpdatePublicGatewayOptions +func (*VpcClassicV1) NewUpdatePublicGatewayOptions(id string, publicGatewayPatch map[string]interface{}) *UpdatePublicGatewayOptions { + return &UpdatePublicGatewayOptions{ + ID: core.StringPtr(id), + PublicGatewayPatch: publicGatewayPatch, + } +} + +// SetID : Allow user to set ID +func (options *UpdatePublicGatewayOptions) SetID(id string) *UpdatePublicGatewayOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetPublicGatewayPatch : Allow user to set PublicGatewayPatch +func (options *UpdatePublicGatewayOptions) SetPublicGatewayPatch(publicGatewayPatch map[string]interface{}) *UpdatePublicGatewayOptions { + options.PublicGatewayPatch = publicGatewayPatch + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdatePublicGatewayOptions) SetHeaders(param map[string]string) *UpdatePublicGatewayOptions { + options.Headers = param + return options +} + +// UpdateSecurityGroupOptions : The UpdateSecurityGroup options. +type UpdateSecurityGroupOptions struct { + // The security group identifier. + ID *string `validate:"required,ne="` + + // The security group patch. + SecurityGroupPatch map[string]interface{} `validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdateSecurityGroupOptions : Instantiate UpdateSecurityGroupOptions +func (*VpcClassicV1) NewUpdateSecurityGroupOptions(id string, securityGroupPatch map[string]interface{}) *UpdateSecurityGroupOptions { + return &UpdateSecurityGroupOptions{ + ID: core.StringPtr(id), + SecurityGroupPatch: securityGroupPatch, + } +} + +// SetID : Allow user to set ID +func (options *UpdateSecurityGroupOptions) SetID(id string) *UpdateSecurityGroupOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetSecurityGroupPatch : Allow user to set SecurityGroupPatch +func (options *UpdateSecurityGroupOptions) SetSecurityGroupPatch(securityGroupPatch map[string]interface{}) *UpdateSecurityGroupOptions { + options.SecurityGroupPatch = securityGroupPatch + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateSecurityGroupOptions) SetHeaders(param map[string]string) *UpdateSecurityGroupOptions { + options.Headers = param + return options +} + +// UpdateSecurityGroupRuleOptions : The UpdateSecurityGroupRule options. +type UpdateSecurityGroupRuleOptions struct { + // The security group identifier. + SecurityGroupID *string `validate:"required,ne="` + + // The rule identifier. + ID *string `validate:"required,ne="` + + // The security group rule patch. + SecurityGroupRulePatch map[string]interface{} `validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdateSecurityGroupRuleOptions : Instantiate UpdateSecurityGroupRuleOptions +func (*VpcClassicV1) NewUpdateSecurityGroupRuleOptions(securityGroupID string, id string, securityGroupRulePatch map[string]interface{}) *UpdateSecurityGroupRuleOptions { + return &UpdateSecurityGroupRuleOptions{ + SecurityGroupID: core.StringPtr(securityGroupID), + ID: core.StringPtr(id), + SecurityGroupRulePatch: securityGroupRulePatch, + } +} + +// SetSecurityGroupID : Allow user to set SecurityGroupID +func (options *UpdateSecurityGroupRuleOptions) SetSecurityGroupID(securityGroupID string) *UpdateSecurityGroupRuleOptions { + options.SecurityGroupID = core.StringPtr(securityGroupID) + return options +} + +// SetID : Allow user to set ID +func (options *UpdateSecurityGroupRuleOptions) SetID(id string) *UpdateSecurityGroupRuleOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetSecurityGroupRulePatch : Allow user to set SecurityGroupRulePatch +func (options *UpdateSecurityGroupRuleOptions) SetSecurityGroupRulePatch(securityGroupRulePatch map[string]interface{}) *UpdateSecurityGroupRuleOptions { + options.SecurityGroupRulePatch = securityGroupRulePatch + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateSecurityGroupRuleOptions) SetHeaders(param map[string]string) *UpdateSecurityGroupRuleOptions { + options.Headers = param + return options +} + +// UpdateSubnetOptions : The UpdateSubnet options. +type UpdateSubnetOptions struct { + // The subnet identifier. + ID *string `validate:"required,ne="` + + // The subnet patch. + SubnetPatch map[string]interface{} `validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdateSubnetOptions : Instantiate UpdateSubnetOptions +func (*VpcClassicV1) NewUpdateSubnetOptions(id string, subnetPatch map[string]interface{}) *UpdateSubnetOptions { + return &UpdateSubnetOptions{ + ID: core.StringPtr(id), + SubnetPatch: subnetPatch, + } +} + +// SetID : Allow user to set ID +func (options *UpdateSubnetOptions) SetID(id string) *UpdateSubnetOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetSubnetPatch : Allow user to set SubnetPatch +func (options *UpdateSubnetOptions) SetSubnetPatch(subnetPatch map[string]interface{}) *UpdateSubnetOptions { + options.SubnetPatch = subnetPatch + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateSubnetOptions) SetHeaders(param map[string]string) *UpdateSubnetOptions { + options.Headers = param + return options +} + +// UpdateVolumeOptions : The UpdateVolume options. +type UpdateVolumeOptions struct { + // The volume identifier. + ID *string `validate:"required,ne="` + + // The volume patch. + VolumePatch map[string]interface{} `validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdateVolumeOptions : Instantiate UpdateVolumeOptions +func (*VpcClassicV1) NewUpdateVolumeOptions(id string, volumePatch map[string]interface{}) *UpdateVolumeOptions { + return &UpdateVolumeOptions{ + ID: core.StringPtr(id), + VolumePatch: volumePatch, + } +} + +// SetID : Allow user to set ID +func (options *UpdateVolumeOptions) SetID(id string) *UpdateVolumeOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetVolumePatch : Allow user to set VolumePatch +func (options *UpdateVolumeOptions) SetVolumePatch(volumePatch map[string]interface{}) *UpdateVolumeOptions { + options.VolumePatch = volumePatch + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateVolumeOptions) SetHeaders(param map[string]string) *UpdateVolumeOptions { + options.Headers = param + return options +} + +// UpdateVPCAddressPrefixOptions : The UpdateVPCAddressPrefix options. +type UpdateVPCAddressPrefixOptions struct { + // The VPC identifier. + VPCID *string `validate:"required,ne="` + + // The prefix identifier. + ID *string `validate:"required,ne="` + + // The prefix patch. + AddressPrefixPatch map[string]interface{} `validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdateVPCAddressPrefixOptions : Instantiate UpdateVPCAddressPrefixOptions +func (*VpcClassicV1) NewUpdateVPCAddressPrefixOptions(vpcID string, id string, addressPrefixPatch map[string]interface{}) *UpdateVPCAddressPrefixOptions { + return &UpdateVPCAddressPrefixOptions{ + VPCID: core.StringPtr(vpcID), + ID: core.StringPtr(id), + AddressPrefixPatch: addressPrefixPatch, + } +} + +// SetVPCID : Allow user to set VPCID +func (options *UpdateVPCAddressPrefixOptions) SetVPCID(vpcID string) *UpdateVPCAddressPrefixOptions { + options.VPCID = core.StringPtr(vpcID) + return options +} + +// SetID : Allow user to set ID +func (options *UpdateVPCAddressPrefixOptions) SetID(id string) *UpdateVPCAddressPrefixOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetAddressPrefixPatch : Allow user to set AddressPrefixPatch +func (options *UpdateVPCAddressPrefixOptions) SetAddressPrefixPatch(addressPrefixPatch map[string]interface{}) *UpdateVPCAddressPrefixOptions { + options.AddressPrefixPatch = addressPrefixPatch + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateVPCAddressPrefixOptions) SetHeaders(param map[string]string) *UpdateVPCAddressPrefixOptions { + options.Headers = param + return options +} + +// UpdateVPCOptions : The UpdateVPC options. +type UpdateVPCOptions struct { + // The VPC identifier. + ID *string `validate:"required,ne="` + + // The VPC patch. + VPCPatch map[string]interface{} `validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdateVPCOptions : Instantiate UpdateVPCOptions +func (*VpcClassicV1) NewUpdateVPCOptions(id string, vpcPatch map[string]interface{}) *UpdateVPCOptions { + return &UpdateVPCOptions{ + ID: core.StringPtr(id), + VPCPatch: vpcPatch, + } +} + +// SetID : Allow user to set ID +func (options *UpdateVPCOptions) SetID(id string) *UpdateVPCOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetVPCPatch : Allow user to set VPCPatch +func (options *UpdateVPCOptions) SetVPCPatch(vpcPatch map[string]interface{}) *UpdateVPCOptions { + options.VPCPatch = vpcPatch + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateVPCOptions) SetHeaders(param map[string]string) *UpdateVPCOptions { + options.Headers = param + return options +} + +// UpdateVPCRouteOptions : The UpdateVPCRoute options. +type UpdateVPCRouteOptions struct { + // The VPC identifier. + VPCID *string `validate:"required,ne="` + + // The route identifier. + ID *string `validate:"required,ne="` + + // The route patch. + RoutePatch map[string]interface{} `validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdateVPCRouteOptions : Instantiate UpdateVPCRouteOptions +func (*VpcClassicV1) NewUpdateVPCRouteOptions(vpcID string, id string, routePatch map[string]interface{}) *UpdateVPCRouteOptions { + return &UpdateVPCRouteOptions{ + VPCID: core.StringPtr(vpcID), + ID: core.StringPtr(id), + RoutePatch: routePatch, + } +} + +// SetVPCID : Allow user to set VPCID +func (options *UpdateVPCRouteOptions) SetVPCID(vpcID string) *UpdateVPCRouteOptions { + options.VPCID = core.StringPtr(vpcID) + return options +} + +// SetID : Allow user to set ID +func (options *UpdateVPCRouteOptions) SetID(id string) *UpdateVPCRouteOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetRoutePatch : Allow user to set RoutePatch +func (options *UpdateVPCRouteOptions) SetRoutePatch(routePatch map[string]interface{}) *UpdateVPCRouteOptions { + options.RoutePatch = routePatch + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateVPCRouteOptions) SetHeaders(param map[string]string) *UpdateVPCRouteOptions { + options.Headers = param + return options +} + +// UpdateVPNGatewayConnectionOptions : The UpdateVPNGatewayConnection options. +type UpdateVPNGatewayConnectionOptions struct { + // The VPN gateway identifier. + VPNGatewayID *string `validate:"required,ne="` + + // The VPN gateway connection identifier. + ID *string `validate:"required,ne="` + + // The VPN gateway connection patch. + VPNGatewayConnectionPatch map[string]interface{} `validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdateVPNGatewayConnectionOptions : Instantiate UpdateVPNGatewayConnectionOptions +func (*VpcClassicV1) NewUpdateVPNGatewayConnectionOptions(vpnGatewayID string, id string, vpnGatewayConnectionPatch map[string]interface{}) *UpdateVPNGatewayConnectionOptions { + return &UpdateVPNGatewayConnectionOptions{ + VPNGatewayID: core.StringPtr(vpnGatewayID), + ID: core.StringPtr(id), + VPNGatewayConnectionPatch: vpnGatewayConnectionPatch, + } +} + +// SetVPNGatewayID : Allow user to set VPNGatewayID +func (options *UpdateVPNGatewayConnectionOptions) SetVPNGatewayID(vpnGatewayID string) *UpdateVPNGatewayConnectionOptions { + options.VPNGatewayID = core.StringPtr(vpnGatewayID) + return options +} + +// SetID : Allow user to set ID +func (options *UpdateVPNGatewayConnectionOptions) SetID(id string) *UpdateVPNGatewayConnectionOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetVPNGatewayConnectionPatch : Allow user to set VPNGatewayConnectionPatch +func (options *UpdateVPNGatewayConnectionOptions) SetVPNGatewayConnectionPatch(vpnGatewayConnectionPatch map[string]interface{}) *UpdateVPNGatewayConnectionOptions { + options.VPNGatewayConnectionPatch = vpnGatewayConnectionPatch + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateVPNGatewayConnectionOptions) SetHeaders(param map[string]string) *UpdateVPNGatewayConnectionOptions { + options.Headers = param + return options +} + +// UpdateVPNGatewayOptions : The UpdateVPNGateway options. +type UpdateVPNGatewayOptions struct { + // The VPN gateway identifier. + ID *string `validate:"required,ne="` + + // The VPN gateway patch. + VPNGatewayPatch map[string]interface{} `validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdateVPNGatewayOptions : Instantiate UpdateVPNGatewayOptions +func (*VpcClassicV1) NewUpdateVPNGatewayOptions(id string, vpnGatewayPatch map[string]interface{}) *UpdateVPNGatewayOptions { + return &UpdateVPNGatewayOptions{ + ID: core.StringPtr(id), + VPNGatewayPatch: vpnGatewayPatch, + } +} + +// SetID : Allow user to set ID +func (options *UpdateVPNGatewayOptions) SetID(id string) *UpdateVPNGatewayOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetVPNGatewayPatch : Allow user to set VPNGatewayPatch +func (options *UpdateVPNGatewayOptions) SetVPNGatewayPatch(vpnGatewayPatch map[string]interface{}) *UpdateVPNGatewayOptions { + options.VPNGatewayPatch = vpnGatewayPatch + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateVPNGatewayOptions) SetHeaders(param map[string]string) *UpdateVPNGatewayOptions { + options.Headers = param + return options +} + +// VPC : VPC struct +type VPC struct { + // Indicates whether this VPC is connected to Classic Infrastructure. If true, this VPC's resources have private + // network connectivity to the account's Classic Infrastructure resources. Only one VPC, per region, may be connected + // in this way. This value is set at creation and subsequently immutable. + ClassicAccess *bool `json:"classic_access" validate:"required"` + + // The date and time that the VPC was created. + CreatedAt *strfmt.DateTime `json:"created_at" validate:"required"` + + // The CRN for this VPC. + CRN *string `json:"crn" validate:"required"` + + // Array of CSE ([Cloud Service Endpoint](https://cloud.ibm.com/docs/resources?topic=resources-service-endpoints)) + // source IP addresses for the VPC. The VPC will have one CSE source IP address per zone. + CseSourceIps []VpccseSourceIP `json:"cse_source_ips,omitempty"` + + // The default network ACL to use for subnets created in this VPC. + DefaultNetworkACL *NetworkACLReference `json:"default_network_acl" validate:"required"` + + // The default security group to use for network interfaces created in this VPC. + DefaultSecurityGroup *SecurityGroupReference `json:"default_security_group" validate:"required"` + + // The URL for this VPC. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this VPC. + ID *string `json:"id" validate:"required"` + + // The unique user-defined name for this VPC. + Name *string `json:"name" validate:"required"` + + // The resource group for this VPC. + ResourceGroup *ResourceGroupReference `json:"resource_group" validate:"required"` + + // The status of this VPC. + Status *string `json:"status" validate:"required"` +} + +// Constants associated with the VPC.Status property. +// The status of this VPC. +const ( + VPCStatusAvailableConst = "available" + VPCStatusDeletingConst = "deleting" + VPCStatusFailedConst = "failed" + VPCStatusPendingConst = "pending" +) + +// UnmarshalVPC unmarshals an instance of VPC from the specified map of raw messages. +func UnmarshalVPC(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VPC) + err = core.UnmarshalPrimitive(m, "classic_access", &obj.ClassicAccess) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalModel(m, "cse_source_ips", &obj.CseSourceIps, UnmarshalVpccseSourceIP) + if err != nil { + return + } + err = core.UnmarshalModel(m, "default_network_acl", &obj.DefaultNetworkACL, UnmarshalNetworkACLReference) + if err != nil { + return + } + err = core.UnmarshalModel(m, "default_security_group", &obj.DefaultSecurityGroup, UnmarshalSecurityGroupReference) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalModel(m, "resource_group", &obj.ResourceGroup, UnmarshalResourceGroupReference) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "status", &obj.Status) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VpccseSourceIP : VpccseSourceIP struct +type VpccseSourceIP struct { + // The Cloud Service Endpoint source IP address for this zone. + IP *IP `json:"ip" validate:"required"` + + // The zone this Cloud Service Endpoint source IP belongs to. + Zone *ZoneReference `json:"zone" validate:"required"` +} + +// UnmarshalVpccseSourceIP unmarshals an instance of VpccseSourceIP from the specified map of raw messages. +func UnmarshalVpccseSourceIP(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VpccseSourceIP) + err = core.UnmarshalModel(m, "ip", &obj.IP, UnmarshalIP) + if err != nil { + return + } + err = core.UnmarshalModel(m, "zone", &obj.Zone, UnmarshalZoneReference) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VPCCollection : VPCCollection struct +type VPCCollection struct { + // A link to the first page of resources. + First *VPCCollectionFirst `json:"first" validate:"required"` + + // The maximum number of resources that can be returned by the request. + Limit *int64 `json:"limit" validate:"required"` + + // A link to the next page of resources. This property is present for all pages + // except the last page. + Next *VPCCollectionNext `json:"next,omitempty"` + + // Collection of VPCs. + Vpcs []VPC `json:"vpcs" validate:"required"` +} + +// UnmarshalVPCCollection unmarshals an instance of VPCCollection from the specified map of raw messages. +func UnmarshalVPCCollection(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VPCCollection) + err = core.UnmarshalModel(m, "first", &obj.First, UnmarshalVPCCollectionFirst) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "limit", &obj.Limit) + if err != nil { + return + } + err = core.UnmarshalModel(m, "next", &obj.Next, UnmarshalVPCCollectionNext) + if err != nil { + return + } + err = core.UnmarshalModel(m, "vpcs", &obj.Vpcs, UnmarshalVPC) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VPCCollectionFirst : A link to the first page of resources. +type VPCCollectionFirst struct { + // The URL for a page of resources. + Href *string `json:"href" validate:"required"` +} + +// UnmarshalVPCCollectionFirst unmarshals an instance of VPCCollectionFirst from the specified map of raw messages. +func UnmarshalVPCCollectionFirst(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VPCCollectionFirst) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VPCCollectionNext : A link to the next page of resources. This property is present for all pages except the last page. +type VPCCollectionNext struct { + // The URL for a page of resources. + Href *string `json:"href" validate:"required"` +} + +// UnmarshalVPCCollectionNext unmarshals an instance of VPCCollectionNext from the specified map of raw messages. +func UnmarshalVPCCollectionNext(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VPCCollectionNext) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VPCIdentity : Identifies a VPC by a unique property. +// Models which "extend" this model: +// - VPCIdentityByID +// - VPCIdentityByCRN +// - VPCIdentityByHref +type VPCIdentity struct { + // The unique identifier for this VPC. + ID *string `json:"id,omitempty"` + + // The CRN for this VPC. + CRN *string `json:"crn,omitempty"` + + // The URL for this VPC. + Href *string `json:"href,omitempty"` +} + +func (*VPCIdentity) isaVPCIdentity() bool { + return true +} + +type VPCIdentityIntf interface { + isaVPCIdentity() bool +} + +// UnmarshalVPCIdentity unmarshals an instance of VPCIdentity from the specified map of raw messages. +func UnmarshalVPCIdentity(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VPCIdentity) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VPCPatch : VPCPatch struct +type VPCPatch struct { + // The unique user-defined name for this VPC. + Name *string `json:"name,omitempty"` +} + +// UnmarshalVPCPatch unmarshals an instance of VPCPatch from the specified map of raw messages. +func UnmarshalVPCPatch(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VPCPatch) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// AsPatch returns a generic map representation of the VPCPatch +func (vpcPatch *VPCPatch) AsPatch() (patch map[string]interface{}, err error) { + var jsonData []byte + jsonData, err = json.Marshal(vpcPatch) + if err == nil { + err = json.Unmarshal(jsonData, &patch) + } + return +} + +// VPCReference : VPCReference struct +type VPCReference struct { + // The CRN for this VPC. + CRN *string `json:"crn" validate:"required"` + + // The URL for this VPC. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this VPC. + ID *string `json:"id" validate:"required"` + + // The unique user-defined name for this VPC. + Name *string `json:"name" validate:"required"` +} + +// UnmarshalVPCReference unmarshals an instance of VPCReference from the specified map of raw messages. +func UnmarshalVPCReference(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VPCReference) + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VPNGateway : VPNGateway struct +// Models which "extend" this model: +// - VPNGatewayPolicyMode +type VPNGateway struct { + // Collection of references to VPN gateway connections. + Connections []VPNGatewayConnectionReference `json:"connections" validate:"required"` + + // The date and time that this VPN gateway was created. + CreatedAt *strfmt.DateTime `json:"created_at" validate:"required"` + + // The VPN gateway's CRN. + CRN *string `json:"crn" validate:"required"` + + // The VPN gateway's canonical URL. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this VPN gateway. + ID *string `json:"id" validate:"required"` + + // Collection of VPN gateway members. + Members []VPNGatewayMember `json:"members" validate:"required"` + + // The user-defined name for this VPN gateway. + Name *string `json:"name" validate:"required"` + + // The resource group for this VPN gateway. + ResourceGroup *ResourceGroupReference `json:"resource_group" validate:"required"` + + // The resource type. + ResourceType *string `json:"resource_type" validate:"required"` + + // The status of the VPN gateway. + Status *string `json:"status" validate:"required"` + + Subnet *SubnetReference `json:"subnet" validate:"required"` + + // Policy mode VPN gateway. + Mode *string `json:"mode,omitempty"` +} + +// Constants associated with the VPNGateway.ResourceType property. +// The resource type. +const ( + VPNGatewayResourceTypeVPNGatewayConst = "vpn_gateway" +) + +// Constants associated with the VPNGateway.Status property. +// The status of the VPN gateway. +const ( + VPNGatewayStatusAvailableConst = "available" + VPNGatewayStatusDeletingConst = "deleting" + VPNGatewayStatusFailedConst = "failed" + VPNGatewayStatusPendingConst = "pending" +) + +// Constants associated with the VPNGateway.Mode property. +// Policy mode VPN gateway. +const ( + VPNGatewayModePolicyConst = "policy" +) + +func (*VPNGateway) isaVPNGateway() bool { + return true +} + +type VPNGatewayIntf interface { + isaVPNGateway() bool +} + +// UnmarshalVPNGateway unmarshals an instance of VPNGateway from the specified map of raw messages. +func UnmarshalVPNGateway(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VPNGateway) + err = core.UnmarshalModel(m, "connections", &obj.Connections, UnmarshalVPNGatewayConnectionReference) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalModel(m, "members", &obj.Members, UnmarshalVPNGatewayMember) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalModel(m, "resource_group", &obj.ResourceGroup, UnmarshalResourceGroupReference) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "resource_type", &obj.ResourceType) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "status", &obj.Status) + if err != nil { + return + } + err = core.UnmarshalModel(m, "subnet", &obj.Subnet, UnmarshalSubnetReference) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "mode", &obj.Mode) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VPNGatewayCollection : VPNGatewayCollection struct +type VPNGatewayCollection struct { + // A link to the first page of resources. + First *VPNGatewayCollectionFirst `json:"first" validate:"required"` + + // The maximum number of resources that can be returned by the request. + Limit *int64 `json:"limit" validate:"required"` + + // A link to the next page of resources. This property is present for all pages + // except the last page. + Next *VPNGatewayCollectionNext `json:"next,omitempty"` + + // The total number of resources across all pages. + TotalCount *int64 `json:"total_count" validate:"required"` + + // Collection of VPN gateways. + VPNGateways []VPNGatewayIntf `json:"vpn_gateways" validate:"required"` +} + +// UnmarshalVPNGatewayCollection unmarshals an instance of VPNGatewayCollection from the specified map of raw messages. +func UnmarshalVPNGatewayCollection(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VPNGatewayCollection) + err = core.UnmarshalModel(m, "first", &obj.First, UnmarshalVPNGatewayCollectionFirst) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "limit", &obj.Limit) + if err != nil { + return + } + err = core.UnmarshalModel(m, "next", &obj.Next, UnmarshalVPNGatewayCollectionNext) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "total_count", &obj.TotalCount) + if err != nil { + return + } + err = core.UnmarshalModel(m, "vpn_gateways", &obj.VPNGateways, UnmarshalVPNGateway) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VPNGatewayCollectionFirst : A link to the first page of resources. +type VPNGatewayCollectionFirst struct { + // The URL for a page of resources. + Href *string `json:"href" validate:"required"` +} + +// UnmarshalVPNGatewayCollectionFirst unmarshals an instance of VPNGatewayCollectionFirst from the specified map of raw messages. +func UnmarshalVPNGatewayCollectionFirst(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VPNGatewayCollectionFirst) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VPNGatewayCollectionNext : A link to the next page of resources. This property is present for all pages except the last page. +type VPNGatewayCollectionNext struct { + // The URL for a page of resources. + Href *string `json:"href" validate:"required"` +} + +// UnmarshalVPNGatewayCollectionNext unmarshals an instance of VPNGatewayCollectionNext from the specified map of raw messages. +func UnmarshalVPNGatewayCollectionNext(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VPNGatewayCollectionNext) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VPNGatewayConnection : VPNGatewayConnection struct +// Models which "extend" this model: +// - VPNGatewayConnectionPolicyMode +type VPNGatewayConnection struct { + // If set to false, the VPN gateway connection is shut down. + AdminStateUp *bool `json:"admin_state_up" validate:"required"` + + // The authentication mode. Only `psk` is currently supported. + AuthenticationMode *string `json:"authentication_mode" validate:"required"` + + // The date and time that this VPN gateway connection was created. + CreatedAt *strfmt.DateTime `json:"created_at" validate:"required"` + + // The Dead Peer Detection settings. + DeadPeerDetection *VPNGatewayConnectionDpd `json:"dead_peer_detection" validate:"required"` + + // The VPN connection's canonical URL. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this VPN gateway connection. + ID *string `json:"id" validate:"required"` + + // Optional IKE policy configuration. The absence of a policy indicates autonegotiation. + IkePolicy *IkePolicyReference `json:"ike_policy,omitempty"` + + // Optional IPsec policy configuration. The absence of a policy indicates + // autonegotiation. + IpsecPolicy *IPsecPolicyReference `json:"ipsec_policy,omitempty"` + + // The mode of the VPN gateway. + Mode *string `json:"mode" validate:"required"` + + // The user-defined name for this VPN gateway connection. + Name *string `json:"name" validate:"required"` + + // The IP address of the peer VPN gateway. + PeerAddress *string `json:"peer_address" validate:"required"` + + // The preshared key. + Psk *string `json:"psk" validate:"required"` + + // The resource type. + ResourceType *string `json:"resource_type" validate:"required"` + + // The status of a VPN gateway connection. + Status *string `json:"status" validate:"required"` + + // A collection of local CIDRs for this resource. + LocalCIDRs []string `json:"local_cidrs,omitempty"` + + // A collection of peer CIDRs for this resource. + PeerCIDRs []string `json:"peer_cidrs,omitempty"` +} + +// Constants associated with the VPNGatewayConnection.AuthenticationMode property. +// The authentication mode. Only `psk` is currently supported. +const ( + VPNGatewayConnectionAuthenticationModePskConst = "psk" +) + +// Constants associated with the VPNGatewayConnection.Mode property. +// The mode of the VPN gateway. +const ( + VPNGatewayConnectionModePolicyConst = "policy" + VPNGatewayConnectionModeRouteConst = "route" +) + +// Constants associated with the VPNGatewayConnection.ResourceType property. +// The resource type. +const ( + VPNGatewayConnectionResourceTypeVPNGatewayConnectionConst = "vpn_gateway_connection" +) + +// Constants associated with the VPNGatewayConnection.Status property. +// The status of a VPN gateway connection. +const ( + VPNGatewayConnectionStatusDownConst = "down" + VPNGatewayConnectionStatusUpConst = "up" +) + +func (*VPNGatewayConnection) isaVPNGatewayConnection() bool { + return true +} + +type VPNGatewayConnectionIntf interface { + isaVPNGatewayConnection() bool +} + +// UnmarshalVPNGatewayConnection unmarshals an instance of VPNGatewayConnection from the specified map of raw messages. +func UnmarshalVPNGatewayConnection(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VPNGatewayConnection) + err = core.UnmarshalPrimitive(m, "admin_state_up", &obj.AdminStateUp) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "authentication_mode", &obj.AuthenticationMode) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) + if err != nil { + return + } + err = core.UnmarshalModel(m, "dead_peer_detection", &obj.DeadPeerDetection, UnmarshalVPNGatewayConnectionDpd) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalModel(m, "ike_policy", &obj.IkePolicy, UnmarshalIkePolicyReference) + if err != nil { + return + } + err = core.UnmarshalModel(m, "ipsec_policy", &obj.IpsecPolicy, UnmarshalIPsecPolicyReference) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "mode", &obj.Mode) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "peer_address", &obj.PeerAddress) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "psk", &obj.Psk) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "resource_type", &obj.ResourceType) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "status", &obj.Status) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "local_cidrs", &obj.LocalCIDRs) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "peer_cidrs", &obj.PeerCIDRs) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VPNGatewayConnectionCollection : Collection of VPN gateway connections in a VPN gateway. +type VPNGatewayConnectionCollection struct { + // Array of VPN gateway connections. + Connections []VPNGatewayConnectionIntf `json:"connections" validate:"required"` +} + +// UnmarshalVPNGatewayConnectionCollection unmarshals an instance of VPNGatewayConnectionCollection from the specified map of raw messages. +func UnmarshalVPNGatewayConnectionCollection(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VPNGatewayConnectionCollection) + err = core.UnmarshalModel(m, "connections", &obj.Connections, UnmarshalVPNGatewayConnection) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VPNGatewayConnectionDpd : The Dead Peer Detection settings. +type VPNGatewayConnectionDpd struct { + // Dead Peer Detection actions. + Action *string `json:"action" validate:"required"` + + // Dead Peer Detection interval in seconds. + Interval *int64 `json:"interval" validate:"required"` + + // Dead Peer Detection timeout in seconds. Must be at least the interval. + Timeout *int64 `json:"timeout" validate:"required"` +} + +// Constants associated with the VPNGatewayConnectionDpd.Action property. +// Dead Peer Detection actions. +const ( + VPNGatewayConnectionDpdActionClearConst = "clear" + VPNGatewayConnectionDpdActionHoldConst = "hold" + VPNGatewayConnectionDpdActionNoneConst = "none" + VPNGatewayConnectionDpdActionRestartConst = "restart" +) + +// UnmarshalVPNGatewayConnectionDpd unmarshals an instance of VPNGatewayConnectionDpd from the specified map of raw messages. +func UnmarshalVPNGatewayConnectionDpd(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VPNGatewayConnectionDpd) + err = core.UnmarshalPrimitive(m, "action", &obj.Action) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "interval", &obj.Interval) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "timeout", &obj.Timeout) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VPNGatewayConnectionDpdPrototype : The Dead Peer Detection settings. +type VPNGatewayConnectionDpdPrototype struct { + // Dead Peer Detection actions. + Action *string `json:"action,omitempty"` + + // Dead Peer Detection interval in seconds. + Interval *int64 `json:"interval,omitempty"` + + // Dead Peer Detection timeout in seconds. Must be at least the interval. + Timeout *int64 `json:"timeout,omitempty"` +} + +// Constants associated with the VPNGatewayConnectionDpdPrototype.Action property. +// Dead Peer Detection actions. +const ( + VPNGatewayConnectionDpdPrototypeActionClearConst = "clear" + VPNGatewayConnectionDpdPrototypeActionHoldConst = "hold" + VPNGatewayConnectionDpdPrototypeActionNoneConst = "none" + VPNGatewayConnectionDpdPrototypeActionRestartConst = "restart" +) + +// UnmarshalVPNGatewayConnectionDpdPrototype unmarshals an instance of VPNGatewayConnectionDpdPrototype from the specified map of raw messages. +func UnmarshalVPNGatewayConnectionDpdPrototype(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VPNGatewayConnectionDpdPrototype) + err = core.UnmarshalPrimitive(m, "action", &obj.Action) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "interval", &obj.Interval) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "timeout", &obj.Timeout) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VPNGatewayConnectionLocalCIDRs : VPNGatewayConnectionLocalCIDRs struct +type VPNGatewayConnectionLocalCIDRs struct { + // A collection of local CIDRs for this resource. + LocalCIDRs []string `json:"local_cidrs,omitempty"` +} + +// UnmarshalVPNGatewayConnectionLocalCIDRs unmarshals an instance of VPNGatewayConnectionLocalCIDRs from the specified map of raw messages. +func UnmarshalVPNGatewayConnectionLocalCIDRs(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VPNGatewayConnectionLocalCIDRs) + err = core.UnmarshalPrimitive(m, "local_cidrs", &obj.LocalCIDRs) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VPNGatewayConnectionPatch : VPNGatewayConnectionPatch struct +type VPNGatewayConnectionPatch struct { + // If set to false, the VPN gateway connection is shut down. + AdminStateUp *bool `json:"admin_state_up,omitempty"` + + // The Dead Peer Detection settings. + DeadPeerDetection *VPNGatewayConnectionDpdPrototype `json:"dead_peer_detection,omitempty"` + + // Optional IKE policy configuration. The absence of a policy indicates autonegotiation. + IkePolicy IkePolicyIdentityIntf `json:"ike_policy,omitempty"` + + // Optional IPsec policy configuration. The absence of a policy indicates + // autonegotiation. + IpsecPolicy IPsecPolicyIdentityIntf `json:"ipsec_policy,omitempty"` + + // The user-defined name for this VPN gateway connection. + Name *string `json:"name,omitempty"` + + // The IP address of the peer VPN gateway. + PeerAddress *string `json:"peer_address,omitempty"` + + // The preshared key. + Psk *string `json:"psk,omitempty"` +} + +// UnmarshalVPNGatewayConnectionPatch unmarshals an instance of VPNGatewayConnectionPatch from the specified map of raw messages. +func UnmarshalVPNGatewayConnectionPatch(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VPNGatewayConnectionPatch) + err = core.UnmarshalPrimitive(m, "admin_state_up", &obj.AdminStateUp) + if err != nil { + return + } + err = core.UnmarshalModel(m, "dead_peer_detection", &obj.DeadPeerDetection, UnmarshalVPNGatewayConnectionDpdPrototype) + if err != nil { + return + } + err = core.UnmarshalModel(m, "ike_policy", &obj.IkePolicy, UnmarshalIkePolicyIdentity) + if err != nil { + return + } + err = core.UnmarshalModel(m, "ipsec_policy", &obj.IpsecPolicy, UnmarshalIPsecPolicyIdentity) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "peer_address", &obj.PeerAddress) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "psk", &obj.Psk) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// AsPatch returns a generic map representation of the VPNGatewayConnectionPatch +func (vpnGatewayConnectionPatch *VPNGatewayConnectionPatch) AsPatch() (patch map[string]interface{}, err error) { + var jsonData []byte + jsonData, err = json.Marshal(vpnGatewayConnectionPatch) + if err == nil { + err = json.Unmarshal(jsonData, &patch) + } + return +} + +// VPNGatewayConnectionPeerCIDRs : VPNGatewayConnectionPeerCIDRs struct +type VPNGatewayConnectionPeerCIDRs struct { + // A collection of peer CIDRs for this resource. + PeerCIDRs []string `json:"peer_cidrs,omitempty"` +} + +// UnmarshalVPNGatewayConnectionPeerCIDRs unmarshals an instance of VPNGatewayConnectionPeerCIDRs from the specified map of raw messages. +func UnmarshalVPNGatewayConnectionPeerCIDRs(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VPNGatewayConnectionPeerCIDRs) + err = core.UnmarshalPrimitive(m, "peer_cidrs", &obj.PeerCIDRs) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VPNGatewayConnectionPrototype : VPNGatewayConnectionPrototype struct +// Models which "extend" this model: +// - VPNGatewayConnectionPrototypeVPNGatewayConnectionPolicyModePrototype +type VPNGatewayConnectionPrototype struct { + // If set to false, the VPN gateway connection is shut down. + AdminStateUp *bool `json:"admin_state_up,omitempty"` + + // The Dead Peer Detection settings. + DeadPeerDetection *VPNGatewayConnectionDpdPrototype `json:"dead_peer_detection,omitempty"` + + // Optional IKE policy configuration. The absence of a policy indicates autonegotiation. + IkePolicy IkePolicyIdentityIntf `json:"ike_policy,omitempty"` + + // Optional IPsec policy configuration. The absence of a policy indicates + // autonegotiation. + IpsecPolicy IPsecPolicyIdentityIntf `json:"ipsec_policy,omitempty"` + + // The user-defined name for this VPN gateway connection. + Name *string `json:"name,omitempty"` + + // The IP address of the peer VPN gateway. + PeerAddress *string `json:"peer_address" validate:"required"` + + // The preshared key. + Psk *string `json:"psk" validate:"required"` + + // A collection of local CIDRs for this resource. + LocalCIDRs []string `json:"local_cidrs,omitempty"` + + // A collection of peer CIDRs for this resource. + PeerCIDRs []string `json:"peer_cidrs,omitempty"` +} + +func (*VPNGatewayConnectionPrototype) isaVPNGatewayConnectionPrototype() bool { + return true +} + +type VPNGatewayConnectionPrototypeIntf interface { + isaVPNGatewayConnectionPrototype() bool +} + +// UnmarshalVPNGatewayConnectionPrototype unmarshals an instance of VPNGatewayConnectionPrototype from the specified map of raw messages. +func UnmarshalVPNGatewayConnectionPrototype(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VPNGatewayConnectionPrototype) + err = core.UnmarshalPrimitive(m, "admin_state_up", &obj.AdminStateUp) + if err != nil { + return + } + err = core.UnmarshalModel(m, "dead_peer_detection", &obj.DeadPeerDetection, UnmarshalVPNGatewayConnectionDpdPrototype) + if err != nil { + return + } + err = core.UnmarshalModel(m, "ike_policy", &obj.IkePolicy, UnmarshalIkePolicyIdentity) + if err != nil { + return + } + err = core.UnmarshalModel(m, "ipsec_policy", &obj.IpsecPolicy, UnmarshalIPsecPolicyIdentity) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "peer_address", &obj.PeerAddress) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "psk", &obj.Psk) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "local_cidrs", &obj.LocalCIDRs) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "peer_cidrs", &obj.PeerCIDRs) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VPNGatewayConnectionReference : VPNGatewayConnectionReference struct +type VPNGatewayConnectionReference struct { + // The VPN connection's canonical URL. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this VPN gateway connection. + ID *string `json:"id" validate:"required"` + + // The user-defined name for this VPN connection. + Name *string `json:"name" validate:"required"` + + // The resource type. + ResourceType *string `json:"resource_type" validate:"required"` +} + +// Constants associated with the VPNGatewayConnectionReference.ResourceType property. +// The resource type. +const ( + VPNGatewayConnectionReferenceResourceTypeVPNGatewayConnectionConst = "vpn_gateway_connection" +) + +// UnmarshalVPNGatewayConnectionReference unmarshals an instance of VPNGatewayConnectionReference from the specified map of raw messages. +func UnmarshalVPNGatewayConnectionReference(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VPNGatewayConnectionReference) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "resource_type", &obj.ResourceType) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VPNGatewayMember : VPNGatewayMember struct +type VPNGatewayMember struct { + // The public IP address assigned to the VPN gateway member. + PublicIP *IP `json:"public_ip" validate:"required"` + + // The high availability role assigned to the VPN gateway member. + Role *string `json:"role" validate:"required"` + + // The status of the VPN gateway member. + Status *string `json:"status" validate:"required"` +} + +// Constants associated with the VPNGatewayMember.Role property. +// The high availability role assigned to the VPN gateway member. +const ( + VPNGatewayMemberRoleActiveConst = "active" + VPNGatewayMemberRoleStandbyConst = "standby" +) + +// Constants associated with the VPNGatewayMember.Status property. +// The status of the VPN gateway member. +const ( + VPNGatewayMemberStatusAvailableConst = "available" + VPNGatewayMemberStatusDeletingConst = "deleting" + VPNGatewayMemberStatusFailedConst = "failed" + VPNGatewayMemberStatusPendingConst = "pending" +) + +// UnmarshalVPNGatewayMember unmarshals an instance of VPNGatewayMember from the specified map of raw messages. +func UnmarshalVPNGatewayMember(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VPNGatewayMember) + err = core.UnmarshalModel(m, "public_ip", &obj.PublicIP, UnmarshalIP) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "role", &obj.Role) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "status", &obj.Status) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VPNGatewayPatch : VPNGatewayPatch struct +type VPNGatewayPatch struct { + // The user-defined name for this VPN gateway. + Name *string `json:"name,omitempty"` +} + +// UnmarshalVPNGatewayPatch unmarshals an instance of VPNGatewayPatch from the specified map of raw messages. +func UnmarshalVPNGatewayPatch(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VPNGatewayPatch) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// AsPatch returns a generic map representation of the VPNGatewayPatch +func (vpnGatewayPatch *VPNGatewayPatch) AsPatch() (patch map[string]interface{}, err error) { + var jsonData []byte + jsonData, err = json.Marshal(vpnGatewayPatch) + if err == nil { + err = json.Unmarshal(jsonData, &patch) + } + return +} + +// VPNGatewayPrototype : VPNGatewayPrototype struct +// Models which "extend" this model: +// - VPNGatewayPrototypeVPNGatewayPolicyModePrototype +type VPNGatewayPrototype struct { + // The user-defined name for this VPN gateway. + Name *string `json:"name,omitempty"` + + // The resource group to use. If unspecified, the account's [default resource + // group](https://cloud.ibm.com/apidocs/resource-manager#introduction) is used. + ResourceGroup ResourceGroupIdentityIntf `json:"resource_group,omitempty"` + + // Identifies a subnet by a unique property. + Subnet SubnetIdentityIntf `json:"subnet" validate:"required"` + + // Policy mode VPN gateway. + Mode *string `json:"mode,omitempty"` +} + +// Constants associated with the VPNGatewayPrototype.Mode property. +// Policy mode VPN gateway. +const ( + VPNGatewayPrototypeModePolicyConst = "policy" +) + +func (*VPNGatewayPrototype) isaVPNGatewayPrototype() bool { + return true +} + +type VPNGatewayPrototypeIntf interface { + isaVPNGatewayPrototype() bool +} + +// UnmarshalVPNGatewayPrototype unmarshals an instance of VPNGatewayPrototype from the specified map of raw messages. +func UnmarshalVPNGatewayPrototype(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VPNGatewayPrototype) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalModel(m, "resource_group", &obj.ResourceGroup, UnmarshalResourceGroupIdentity) + if err != nil { + return + } + err = core.UnmarshalModel(m, "subnet", &obj.Subnet, UnmarshalSubnetIdentity) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "mode", &obj.Mode) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// Volume : Volume struct +type Volume struct { + // The capacity of the volume in gigabytes. The specified minimum and maximum capacity values for creating or updating + // volumes may expand in the future. + Capacity *int64 `json:"capacity" validate:"required"` + + // The date and time that the volume was created. + CreatedAt *strfmt.DateTime `json:"created_at" validate:"required"` + + // The CRN for this volume. + CRN *string `json:"crn" validate:"required"` + + // The type of encryption used on the volume. + Encryption *string `json:"encryption" validate:"required"` + + // A reference to the root key used to wrap the data encryption key for the volume. + // + // This property will be present for volumes with an `encryption` type of + // `user_managed`. + EncryptionKey *EncryptionKeyReference `json:"encryption_key,omitempty"` + + // The URL for this volume. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this volume. + ID *string `json:"id" validate:"required"` + + // The bandwidth for the volume. + Iops *int64 `json:"iops" validate:"required"` + + // The unique user-defined name for this volume. + Name *string `json:"name" validate:"required"` + + // The profile this volume uses. + Profile *VolumeProfileReference `json:"profile" validate:"required"` + + // The resource group for this volume. + ResourceGroup *ResourceGroupReference `json:"resource_group" validate:"required"` + + // The status of the volume. + // + // The enumerated values for this property will expand in the future. When processing this property, check for and log + // unknown values. Optionally halt processing and surface the error, or bypass the volume on which the unexpected + // property value was encountered. + Status *string `json:"status" validate:"required"` + + // The collection of volume attachments attaching instances to the volume. + VolumeAttachments []VolumeAttachmentReferenceVolumeContext `json:"volume_attachments" validate:"required"` + + // The zone this volume resides in. + Zone *ZoneReference `json:"zone" validate:"required"` +} + +// Constants associated with the Volume.Encryption property. +// The type of encryption used on the volume. +const ( + VolumeEncryptionProviderManagedConst = "provider_managed" + VolumeEncryptionUserManagedConst = "user_managed" +) + +// Constants associated with the Volume.Status property. +// The status of the volume. +// +// The enumerated values for this property will expand in the future. When processing this property, check for and log +// unknown values. Optionally halt processing and surface the error, or bypass the volume on which the unexpected +// property value was encountered. +const ( + VolumeStatusAvailableConst = "available" + VolumeStatusFailedConst = "failed" + VolumeStatusPendingConst = "pending" + VolumeStatusPendingDeletionConst = "pending_deletion" + VolumeStatusUnusableConst = "unusable" +) + +// UnmarshalVolume unmarshals an instance of Volume from the specified map of raw messages. +func UnmarshalVolume(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Volume) + err = core.UnmarshalPrimitive(m, "capacity", &obj.Capacity) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "encryption", &obj.Encryption) + if err != nil { + return + } + err = core.UnmarshalModel(m, "encryption_key", &obj.EncryptionKey, UnmarshalEncryptionKeyReference) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "iops", &obj.Iops) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalModel(m, "profile", &obj.Profile, UnmarshalVolumeProfileReference) + if err != nil { + return + } + err = core.UnmarshalModel(m, "resource_group", &obj.ResourceGroup, UnmarshalResourceGroupReference) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "status", &obj.Status) + if err != nil { + return + } + err = core.UnmarshalModel(m, "volume_attachments", &obj.VolumeAttachments, UnmarshalVolumeAttachmentReferenceVolumeContext) + if err != nil { + return + } + err = core.UnmarshalModel(m, "zone", &obj.Zone, UnmarshalZoneReference) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VolumeAttachment : VolumeAttachment struct +type VolumeAttachment struct { + // The date and time that the volume was attached. + CreatedAt *strfmt.DateTime `json:"created_at" validate:"required"` + + // If set to true, when deleting the instance the volume will also be deleted. + DeleteVolumeOnInstanceDelete *bool `json:"delete_volume_on_instance_delete,omitempty"` + + // Information about how the volume is exposed to the instance operating system. + // + // This property may be absent if the volume attachment's `status` is not `attached`. + Device *VolumeAttachmentDevice `json:"device,omitempty"` + + // The URL for this volume attachment. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this volume attachment. + ID *string `json:"id" validate:"required"` + + // The user-defined name for this volume attachment. + Name *string `json:"name" validate:"required"` + + // The status of this volume attachment. + Status *string `json:"status" validate:"required"` + + // The type of volume attachment. + Type *string `json:"type" validate:"required"` + + // The attached volume. + Volume *VolumeReference `json:"volume" validate:"required"` +} + +// Constants associated with the VolumeAttachment.Status property. +// The status of this volume attachment. +const ( + VolumeAttachmentStatusAttachedConst = "attached" + VolumeAttachmentStatusAttachingConst = "attaching" + VolumeAttachmentStatusDeletingConst = "deleting" + VolumeAttachmentStatusDetachingConst = "detaching" +) + +// Constants associated with the VolumeAttachment.Type property. +// The type of volume attachment. +const ( + VolumeAttachmentTypeBootConst = "boot" + VolumeAttachmentTypeDataConst = "data" +) + +// UnmarshalVolumeAttachment unmarshals an instance of VolumeAttachment from the specified map of raw messages. +func UnmarshalVolumeAttachment(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VolumeAttachment) + err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "delete_volume_on_instance_delete", &obj.DeleteVolumeOnInstanceDelete) + if err != nil { + return + } + err = core.UnmarshalModel(m, "device", &obj.Device, UnmarshalVolumeAttachmentDevice) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "status", &obj.Status) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + err = core.UnmarshalModel(m, "volume", &obj.Volume, UnmarshalVolumeReference) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VolumeAttachmentCollection : VolumeAttachmentCollection struct +type VolumeAttachmentCollection struct { + // Collection of volume attachments. + VolumeAttachments []VolumeAttachment `json:"volume_attachments" validate:"required"` +} + +// UnmarshalVolumeAttachmentCollection unmarshals an instance of VolumeAttachmentCollection from the specified map of raw messages. +func UnmarshalVolumeAttachmentCollection(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VolumeAttachmentCollection) + err = core.UnmarshalModel(m, "volume_attachments", &obj.VolumeAttachments, UnmarshalVolumeAttachment) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VolumeAttachmentDevice : VolumeAttachmentDevice struct +type VolumeAttachmentDevice struct { + // A unique identifier for the device which is exposed to the instance operating system. + ID *string `json:"id,omitempty"` +} + +// UnmarshalVolumeAttachmentDevice unmarshals an instance of VolumeAttachmentDevice from the specified map of raw messages. +func UnmarshalVolumeAttachmentDevice(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VolumeAttachmentDevice) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VolumeAttachmentPatch : VolumeAttachmentPatch struct +type VolumeAttachmentPatch struct { + // If set to true, when deleting the instance the volume will also be deleted. + DeleteVolumeOnInstanceDelete *bool `json:"delete_volume_on_instance_delete,omitempty"` + + // The user-defined name for this volume attachment. + Name *string `json:"name,omitempty"` +} + +// UnmarshalVolumeAttachmentPatch unmarshals an instance of VolumeAttachmentPatch from the specified map of raw messages. +func UnmarshalVolumeAttachmentPatch(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VolumeAttachmentPatch) + err = core.UnmarshalPrimitive(m, "delete_volume_on_instance_delete", &obj.DeleteVolumeOnInstanceDelete) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// AsPatch returns a generic map representation of the VolumeAttachmentPatch +func (volumeAttachmentPatch *VolumeAttachmentPatch) AsPatch() (patch map[string]interface{}, err error) { + var jsonData []byte + jsonData, err = json.Marshal(volumeAttachmentPatch) + if err == nil { + err = json.Unmarshal(jsonData, &patch) + } + return +} + +// VolumeAttachmentPrototypeInstanceByImageContext : VolumeAttachmentPrototypeInstanceByImageContext struct +type VolumeAttachmentPrototypeInstanceByImageContext struct { + // If set to true, when deleting the instance the volume will also be deleted. + DeleteVolumeOnInstanceDelete *bool `json:"delete_volume_on_instance_delete,omitempty"` + + // The user-defined name for this volume attachment. + Name *string `json:"name,omitempty"` + + // A prototype object for a new volume. + Volume *VolumePrototypeInstanceByImageContext `json:"volume" validate:"required"` +} + +// NewVolumeAttachmentPrototypeInstanceByImageContext : Instantiate VolumeAttachmentPrototypeInstanceByImageContext (Generic Model Constructor) +func (*VpcClassicV1) NewVolumeAttachmentPrototypeInstanceByImageContext(volume *VolumePrototypeInstanceByImageContext) (model *VolumeAttachmentPrototypeInstanceByImageContext, err error) { + model = &VolumeAttachmentPrototypeInstanceByImageContext{ + Volume: volume, + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalVolumeAttachmentPrototypeInstanceByImageContext unmarshals an instance of VolumeAttachmentPrototypeInstanceByImageContext from the specified map of raw messages. +func UnmarshalVolumeAttachmentPrototypeInstanceByImageContext(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VolumeAttachmentPrototypeInstanceByImageContext) + err = core.UnmarshalPrimitive(m, "delete_volume_on_instance_delete", &obj.DeleteVolumeOnInstanceDelete) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalModel(m, "volume", &obj.Volume, UnmarshalVolumePrototypeInstanceByImageContext) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VolumeAttachmentPrototypeInstanceContext : VolumeAttachmentPrototypeInstanceContext struct +type VolumeAttachmentPrototypeInstanceContext struct { + // If set to true, when deleting the instance the volume will also be deleted. + DeleteVolumeOnInstanceDelete *bool `json:"delete_volume_on_instance_delete,omitempty"` + + // The user-defined name for this volume attachment. + Name *string `json:"name,omitempty"` + + // The identity of the volume to attach to the instance, or a prototype object for a new + // volume. + Volume VolumeAttachmentVolumePrototypeInstanceContextIntf `json:"volume" validate:"required"` +} + +// NewVolumeAttachmentPrototypeInstanceContext : Instantiate VolumeAttachmentPrototypeInstanceContext (Generic Model Constructor) +func (*VpcClassicV1) NewVolumeAttachmentPrototypeInstanceContext(volume VolumeAttachmentVolumePrototypeInstanceContextIntf) (model *VolumeAttachmentPrototypeInstanceContext, err error) { + model = &VolumeAttachmentPrototypeInstanceContext{ + Volume: volume, + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalVolumeAttachmentPrototypeInstanceContext unmarshals an instance of VolumeAttachmentPrototypeInstanceContext from the specified map of raw messages. +func UnmarshalVolumeAttachmentPrototypeInstanceContext(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VolumeAttachmentPrototypeInstanceContext) + err = core.UnmarshalPrimitive(m, "delete_volume_on_instance_delete", &obj.DeleteVolumeOnInstanceDelete) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalModel(m, "volume", &obj.Volume, UnmarshalVolumeAttachmentVolumePrototypeInstanceContext) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VolumeAttachmentReferenceInstanceContext : VolumeAttachmentReferenceInstanceContext struct +type VolumeAttachmentReferenceInstanceContext struct { + // Information about how the volume is exposed to the instance operating system. + // + // This property may be absent if the volume attachment's `status` is not `attached`. + Device *VolumeAttachmentDevice `json:"device,omitempty"` + + // The URL for this volume attachment. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this volume attachment. + ID *string `json:"id" validate:"required"` + + // The user-defined name for this volume attachment. + Name *string `json:"name" validate:"required"` + + // The attached volume. + Volume *VolumeReference `json:"volume" validate:"required"` +} + +// UnmarshalVolumeAttachmentReferenceInstanceContext unmarshals an instance of VolumeAttachmentReferenceInstanceContext from the specified map of raw messages. +func UnmarshalVolumeAttachmentReferenceInstanceContext(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VolumeAttachmentReferenceInstanceContext) + err = core.UnmarshalModel(m, "device", &obj.Device, UnmarshalVolumeAttachmentDevice) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalModel(m, "volume", &obj.Volume, UnmarshalVolumeReference) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VolumeAttachmentReferenceVolumeContext : VolumeAttachmentReferenceVolumeContext struct +type VolumeAttachmentReferenceVolumeContext struct { + // Information about how the volume is exposed to the instance operating system. + // + // This property may be absent if the volume attachment's `status` is not `attached`. + Device *VolumeAttachmentDevice `json:"device,omitempty"` + + // The URL for this volume attachment. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this volume attachment. + ID *string `json:"id" validate:"required"` + + // The attached instance. + Instance *InstanceReference `json:"instance" validate:"required"` + + // The user-defined name for this volume attachment. + Name *string `json:"name" validate:"required"` + + // The type of volume attachment. + Type *string `json:"type" validate:"required"` +} + +// Constants associated with the VolumeAttachmentReferenceVolumeContext.Type property. +// The type of volume attachment. +const ( + VolumeAttachmentReferenceVolumeContextTypeBootConst = "boot" + VolumeAttachmentReferenceVolumeContextTypeDataConst = "data" +) + +// UnmarshalVolumeAttachmentReferenceVolumeContext unmarshals an instance of VolumeAttachmentReferenceVolumeContext from the specified map of raw messages. +func UnmarshalVolumeAttachmentReferenceVolumeContext(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VolumeAttachmentReferenceVolumeContext) + err = core.UnmarshalModel(m, "device", &obj.Device, UnmarshalVolumeAttachmentDevice) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalModel(m, "instance", &obj.Instance, UnmarshalInstanceReference) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VolumeAttachmentVolumePrototypeInstanceContext : The identity of the volume to attach to the instance, or a prototype object for a new volume. +// Models which "extend" this model: +// - VolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContext +type VolumeAttachmentVolumePrototypeInstanceContext struct { + // The identity of the root key to use to wrap the data encryption key for the volume. + // + // If this property is not provided, the `encryption` type for the volume will be + // `provider_managed`. + EncryptionKey EncryptionKeyIdentityIntf `json:"encryption_key,omitempty"` + + // The bandwidth for the volume. + Iops *int64 `json:"iops,omitempty"` + + // The unique user-defined name for this volume. + Name *string `json:"name,omitempty"` + + // The profile to use for this volume. + Profile VolumeProfileIdentityIntf `json:"profile,omitempty"` + + // The capacity of the volume in gigabytes. The specified minimum and maximum capacity values for creating or updating + // volumes may expand in the future. + Capacity *int64 `json:"capacity,omitempty"` +} + +func (*VolumeAttachmentVolumePrototypeInstanceContext) isaVolumeAttachmentVolumePrototypeInstanceContext() bool { + return true +} + +type VolumeAttachmentVolumePrototypeInstanceContextIntf interface { + isaVolumeAttachmentVolumePrototypeInstanceContext() bool +} + +// UnmarshalVolumeAttachmentVolumePrototypeInstanceContext unmarshals an instance of VolumeAttachmentVolumePrototypeInstanceContext from the specified map of raw messages. +func UnmarshalVolumeAttachmentVolumePrototypeInstanceContext(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VolumeAttachmentVolumePrototypeInstanceContext) + err = core.UnmarshalModel(m, "encryption_key", &obj.EncryptionKey, UnmarshalEncryptionKeyIdentity) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "iops", &obj.Iops) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalModel(m, "profile", &obj.Profile, UnmarshalVolumeProfileIdentity) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "capacity", &obj.Capacity) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VolumeCollection : VolumeCollection struct +type VolumeCollection struct { + // A link to the first page of resources. + First *VolumeCollectionFirst `json:"first" validate:"required"` + + // The maximum number of resources that can be returned by the request. + Limit *int64 `json:"limit" validate:"required"` + + // A link to the next page of resources. This property is present for all pages + // except the last page. + Next *VolumeCollectionNext `json:"next,omitempty"` + + // Collection of volumes. + Volumes []Volume `json:"volumes" validate:"required"` +} + +// UnmarshalVolumeCollection unmarshals an instance of VolumeCollection from the specified map of raw messages. +func UnmarshalVolumeCollection(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VolumeCollection) + err = core.UnmarshalModel(m, "first", &obj.First, UnmarshalVolumeCollectionFirst) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "limit", &obj.Limit) + if err != nil { + return + } + err = core.UnmarshalModel(m, "next", &obj.Next, UnmarshalVolumeCollectionNext) + if err != nil { + return + } + err = core.UnmarshalModel(m, "volumes", &obj.Volumes, UnmarshalVolume) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VolumeCollectionFirst : A link to the first page of resources. +type VolumeCollectionFirst struct { + // The URL for a page of resources. + Href *string `json:"href" validate:"required"` +} + +// UnmarshalVolumeCollectionFirst unmarshals an instance of VolumeCollectionFirst from the specified map of raw messages. +func UnmarshalVolumeCollectionFirst(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VolumeCollectionFirst) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VolumeCollectionNext : A link to the next page of resources. This property is present for all pages except the last page. +type VolumeCollectionNext struct { + // The URL for a page of resources. + Href *string `json:"href" validate:"required"` +} + +// UnmarshalVolumeCollectionNext unmarshals an instance of VolumeCollectionNext from the specified map of raw messages. +func UnmarshalVolumeCollectionNext(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VolumeCollectionNext) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VolumeIdentity : Identifies a volume by a unique property. +// Models which "extend" this model: +// - VolumeIdentityByID +// - VolumeIdentityByCRN +// - VolumeIdentityByHref +type VolumeIdentity struct { + // The unique identifier for this volume. + ID *string `json:"id,omitempty"` + + // The CRN for this volume. + CRN *string `json:"crn,omitempty"` + + // The URL for this volume. + Href *string `json:"href,omitempty"` +} + +func (*VolumeIdentity) isaVolumeIdentity() bool { + return true +} + +type VolumeIdentityIntf interface { + isaVolumeIdentity() bool +} + +// UnmarshalVolumeIdentity unmarshals an instance of VolumeIdentity from the specified map of raw messages. +func UnmarshalVolumeIdentity(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VolumeIdentity) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VolumePatch : VolumePatch struct +type VolumePatch struct { + // The unique user-defined name for this volume. + Name *string `json:"name,omitempty"` +} + +// UnmarshalVolumePatch unmarshals an instance of VolumePatch from the specified map of raw messages. +func UnmarshalVolumePatch(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VolumePatch) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// AsPatch returns a generic map representation of the VolumePatch +func (volumePatch *VolumePatch) AsPatch() (patch map[string]interface{}, err error) { + var jsonData []byte + jsonData, err = json.Marshal(volumePatch) + if err == nil { + err = json.Unmarshal(jsonData, &patch) + } + return +} + +// VolumeProfile : VolumeProfile struct +type VolumeProfile struct { + // The product family this volume profile belongs to. + Family *string `json:"family,omitempty"` + + // The URL for this volume profile. + Href *string `json:"href" validate:"required"` + + // The globally unique name for this volume profile. + Name *string `json:"name" validate:"required"` +} + +// UnmarshalVolumeProfile unmarshals an instance of VolumeProfile from the specified map of raw messages. +func UnmarshalVolumeProfile(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VolumeProfile) + err = core.UnmarshalPrimitive(m, "family", &obj.Family) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VolumeProfileCollection : VolumeProfileCollection struct +type VolumeProfileCollection struct { + // A link to the first page of resources. + First *VolumeProfileCollectionFirst `json:"first" validate:"required"` + + // The maximum number of resources that can be returned by the request. + Limit *int64 `json:"limit" validate:"required"` + + // A link to the next page of resources. This property is present for all pages + // except the last page. + Next *VolumeProfileCollectionNext `json:"next,omitempty"` + + // Collection of volume profiles. + Profiles []VolumeProfile `json:"profiles" validate:"required"` + + // The total number of resources across all pages. + TotalCount *int64 `json:"total_count" validate:"required"` +} + +// UnmarshalVolumeProfileCollection unmarshals an instance of VolumeProfileCollection from the specified map of raw messages. +func UnmarshalVolumeProfileCollection(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VolumeProfileCollection) + err = core.UnmarshalModel(m, "first", &obj.First, UnmarshalVolumeProfileCollectionFirst) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "limit", &obj.Limit) + if err != nil { + return + } + err = core.UnmarshalModel(m, "next", &obj.Next, UnmarshalVolumeProfileCollectionNext) + if err != nil { + return + } + err = core.UnmarshalModel(m, "profiles", &obj.Profiles, UnmarshalVolumeProfile) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "total_count", &obj.TotalCount) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VolumeProfileCollectionFirst : A link to the first page of resources. +type VolumeProfileCollectionFirst struct { + // The URL for a page of resources. + Href *string `json:"href" validate:"required"` +} + +// UnmarshalVolumeProfileCollectionFirst unmarshals an instance of VolumeProfileCollectionFirst from the specified map of raw messages. +func UnmarshalVolumeProfileCollectionFirst(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VolumeProfileCollectionFirst) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VolumeProfileCollectionNext : A link to the next page of resources. This property is present for all pages except the last page. +type VolumeProfileCollectionNext struct { + // The URL for a page of resources. + Href *string `json:"href" validate:"required"` +} + +// UnmarshalVolumeProfileCollectionNext unmarshals an instance of VolumeProfileCollectionNext from the specified map of raw messages. +func UnmarshalVolumeProfileCollectionNext(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VolumeProfileCollectionNext) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VolumeProfileIdentity : Identifies a volume profile by a unique property. +// Models which "extend" this model: +// - VolumeProfileIdentityByName +// - VolumeProfileIdentityByHref +type VolumeProfileIdentity struct { + // The globally unique name for this volume profile. + Name *string `json:"name,omitempty"` + + // The URL for this volume profile. + Href *string `json:"href,omitempty"` +} + +func (*VolumeProfileIdentity) isaVolumeProfileIdentity() bool { + return true +} + +type VolumeProfileIdentityIntf interface { + isaVolumeProfileIdentity() bool +} + +// UnmarshalVolumeProfileIdentity unmarshals an instance of VolumeProfileIdentity from the specified map of raw messages. +func UnmarshalVolumeProfileIdentity(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VolumeProfileIdentity) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VolumeProfileReference : VolumeProfileReference struct +type VolumeProfileReference struct { + // The URL for this volume profile. + Href *string `json:"href" validate:"required"` + + // The globally unique name for this volume profile. + Name *string `json:"name" validate:"required"` +} + +// UnmarshalVolumeProfileReference unmarshals an instance of VolumeProfileReference from the specified map of raw messages. +func UnmarshalVolumeProfileReference(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VolumeProfileReference) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VolumePrototype : VolumePrototype struct +// Models which "extend" this model: +// - VolumePrototypeVolumeByCapacity +type VolumePrototype struct { + // The identity of the root key to use to wrap the data encryption key for the volume. + // + // If this property is not provided, the `encryption` type for the volume will be + // `provider_managed`. + EncryptionKey EncryptionKeyIdentityIntf `json:"encryption_key,omitempty"` + + // The bandwidth for the volume. + Iops *int64 `json:"iops,omitempty"` + + // The unique user-defined name for this volume. + Name *string `json:"name,omitempty"` + + // The profile to use for this volume. + Profile VolumeProfileIdentityIntf `json:"profile" validate:"required"` + + // The resource group to use. If unspecified, the account's [default resource + // group](https://cloud.ibm.com/apidocs/resource-manager#introduction) is used. + ResourceGroup ResourceGroupIdentityIntf `json:"resource_group,omitempty"` + + // The location of the volume. + Zone ZoneIdentityIntf `json:"zone" validate:"required"` + + // The capacity of the volume in gigabytes. The specified minimum and maximum capacity values for creating or updating + // volumes may expand in the future. + Capacity *int64 `json:"capacity,omitempty"` +} + +func (*VolumePrototype) isaVolumePrototype() bool { + return true +} + +type VolumePrototypeIntf interface { + isaVolumePrototype() bool +} + +// UnmarshalVolumePrototype unmarshals an instance of VolumePrototype from the specified map of raw messages. +func UnmarshalVolumePrototype(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VolumePrototype) + err = core.UnmarshalModel(m, "encryption_key", &obj.EncryptionKey, UnmarshalEncryptionKeyIdentity) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "iops", &obj.Iops) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalModel(m, "profile", &obj.Profile, UnmarshalVolumeProfileIdentity) + if err != nil { + return + } + err = core.UnmarshalModel(m, "resource_group", &obj.ResourceGroup, UnmarshalResourceGroupIdentity) + if err != nil { + return + } + err = core.UnmarshalModel(m, "zone", &obj.Zone, UnmarshalZoneIdentity) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "capacity", &obj.Capacity) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VolumePrototypeInstanceByImageContext : VolumePrototypeInstanceByImageContext struct +type VolumePrototypeInstanceByImageContext struct { + // The capacity of the volume in gigabytes. The specified minimum and maximum capacity values for creating or updating + // volumes may expand in the future. + Capacity *int64 `json:"capacity,omitempty"` + + // The identity of the root key to use to wrap the data encryption key for the volume. + // + // If this property is not provided, the `encryption` type for the volume will be + // `provider_managed`. + EncryptionKey EncryptionKeyIdentityIntf `json:"encryption_key,omitempty"` + + // The bandwidth for the volume. + Iops *int64 `json:"iops,omitempty"` + + // The unique user-defined name for this volume. + Name *string `json:"name,omitempty"` + + // The profile to use for this volume. + Profile VolumeProfileIdentityIntf `json:"profile" validate:"required"` +} + +// NewVolumePrototypeInstanceByImageContext : Instantiate VolumePrototypeInstanceByImageContext (Generic Model Constructor) +func (*VpcClassicV1) NewVolumePrototypeInstanceByImageContext(profile VolumeProfileIdentityIntf) (model *VolumePrototypeInstanceByImageContext, err error) { + model = &VolumePrototypeInstanceByImageContext{ + Profile: profile, + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalVolumePrototypeInstanceByImageContext unmarshals an instance of VolumePrototypeInstanceByImageContext from the specified map of raw messages. +func UnmarshalVolumePrototypeInstanceByImageContext(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VolumePrototypeInstanceByImageContext) + err = core.UnmarshalPrimitive(m, "capacity", &obj.Capacity) + if err != nil { + return + } + err = core.UnmarshalModel(m, "encryption_key", &obj.EncryptionKey, UnmarshalEncryptionKeyIdentity) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "iops", &obj.Iops) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalModel(m, "profile", &obj.Profile, UnmarshalVolumeProfileIdentity) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VolumeReference : VolumeReference struct +type VolumeReference struct { + // The CRN for this volume. + CRN *string `json:"crn" validate:"required"` + + // The URL for this volume. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this volume. + ID *string `json:"id" validate:"required"` + + // The unique user-defined name for this volume. + Name *string `json:"name" validate:"required"` +} + +// UnmarshalVolumeReference unmarshals an instance of VolumeReference from the specified map of raw messages. +func UnmarshalVolumeReference(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VolumeReference) + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// Zone : Zone struct +type Zone struct { + // The URL for this zone. + Href *string `json:"href" validate:"required"` + + // The globally unique name for this zone. + Name *string `json:"name" validate:"required"` + + // The region this zone belongs to. + Region *RegionReference `json:"region" validate:"required"` + + // The availability status of this zone. + Status *string `json:"status" validate:"required"` +} + +// Constants associated with the Zone.Status property. +// The availability status of this zone. +const ( + ZoneStatusAvailableConst = "available" + ZoneStatusImpairedConst = "impaired" + ZoneStatusUnavailableConst = "unavailable" +) + +// UnmarshalZone unmarshals an instance of Zone from the specified map of raw messages. +func UnmarshalZone(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Zone) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalModel(m, "region", &obj.Region, UnmarshalRegionReference) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "status", &obj.Status) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ZoneCollection : ZoneCollection struct +type ZoneCollection struct { + // Collection of zones. + Zones []Zone `json:"zones" validate:"required"` +} + +// UnmarshalZoneCollection unmarshals an instance of ZoneCollection from the specified map of raw messages. +func UnmarshalZoneCollection(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ZoneCollection) + err = core.UnmarshalModel(m, "zones", &obj.Zones, UnmarshalZone) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ZoneIdentity : Identifies a zone by a unique property. +// Models which "extend" this model: +// - ZoneIdentityByName +// - ZoneIdentityByHref +type ZoneIdentity struct { + // The globally unique name for this zone. + Name *string `json:"name,omitempty"` + + // The URL for this zone. + Href *string `json:"href,omitempty"` +} + +func (*ZoneIdentity) isaZoneIdentity() bool { + return true +} + +type ZoneIdentityIntf interface { + isaZoneIdentity() bool +} + +// UnmarshalZoneIdentity unmarshals an instance of ZoneIdentity from the specified map of raw messages. +func UnmarshalZoneIdentity(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ZoneIdentity) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ZoneReference : ZoneReference struct +type ZoneReference struct { + // The URL for this zone. + Href *string `json:"href" validate:"required"` + + // The globally unique name for this zone. + Name *string `json:"name" validate:"required"` +} + +// UnmarshalZoneReference unmarshals an instance of ZoneReference from the specified map of raw messages. +func UnmarshalZoneReference(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ZoneReference) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// CertificateInstanceIdentityByCRN : CertificateInstanceIdentityByCRN struct +// This model "extends" CertificateInstanceIdentity +type CertificateInstanceIdentityByCRN struct { + // The CRN for this certificate instance. + CRN *string `json:"crn" validate:"required"` +} + +// NewCertificateInstanceIdentityByCRN : Instantiate CertificateInstanceIdentityByCRN (Generic Model Constructor) +func (*VpcClassicV1) NewCertificateInstanceIdentityByCRN(crn string) (model *CertificateInstanceIdentityByCRN, err error) { + model = &CertificateInstanceIdentityByCRN{ + CRN: core.StringPtr(crn), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*CertificateInstanceIdentityByCRN) isaCertificateInstanceIdentity() bool { + return true +} + +// UnmarshalCertificateInstanceIdentityByCRN unmarshals an instance of CertificateInstanceIdentityByCRN from the specified map of raw messages. +func UnmarshalCertificateInstanceIdentityByCRN(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(CertificateInstanceIdentityByCRN) + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// EncryptionKeyIdentityByCRN : EncryptionKeyIdentityByCRN struct +// This model "extends" EncryptionKeyIdentity +type EncryptionKeyIdentityByCRN struct { + // The CRN of the [Key Protect Root + // Key](https://cloud.ibm.com/docs/key-protect?topic=key-protect-getting-started-tutorial) or [Hyper Protect Crypto + // Service Root Key](https://cloud.ibm.com/docs/hs-crypto?topic=hs-crypto-get-started) for this resource. + CRN *string `json:"crn" validate:"required"` +} + +// NewEncryptionKeyIdentityByCRN : Instantiate EncryptionKeyIdentityByCRN (Generic Model Constructor) +func (*VpcClassicV1) NewEncryptionKeyIdentityByCRN(crn string) (model *EncryptionKeyIdentityByCRN, err error) { + model = &EncryptionKeyIdentityByCRN{ + CRN: core.StringPtr(crn), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*EncryptionKeyIdentityByCRN) isaEncryptionKeyIdentity() bool { + return true +} + +// UnmarshalEncryptionKeyIdentityByCRN unmarshals an instance of EncryptionKeyIdentityByCRN from the specified map of raw messages. +func UnmarshalEncryptionKeyIdentityByCRN(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(EncryptionKeyIdentityByCRN) + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// FloatingIPByTargetNetworkInterfaceIdentityNetworkInterfaceIdentityByHref : FloatingIPByTargetNetworkInterfaceIdentityNetworkInterfaceIdentityByHref struct +// This model "extends" FloatingIPByTargetNetworkInterfaceIdentity +type FloatingIPByTargetNetworkInterfaceIdentityNetworkInterfaceIdentityByHref struct { + // The URL for this network interface. + Href *string `json:"href" validate:"required"` +} + +// NewFloatingIPByTargetNetworkInterfaceIdentityNetworkInterfaceIdentityByHref : Instantiate FloatingIPByTargetNetworkInterfaceIdentityNetworkInterfaceIdentityByHref (Generic Model Constructor) +func (*VpcClassicV1) NewFloatingIPByTargetNetworkInterfaceIdentityNetworkInterfaceIdentityByHref(href string) (model *FloatingIPByTargetNetworkInterfaceIdentityNetworkInterfaceIdentityByHref, err error) { + model = &FloatingIPByTargetNetworkInterfaceIdentityNetworkInterfaceIdentityByHref{ + Href: core.StringPtr(href), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*FloatingIPByTargetNetworkInterfaceIdentityNetworkInterfaceIdentityByHref) isaFloatingIPByTargetNetworkInterfaceIdentity() bool { + return true +} + +// UnmarshalFloatingIPByTargetNetworkInterfaceIdentityNetworkInterfaceIdentityByHref unmarshals an instance of FloatingIPByTargetNetworkInterfaceIdentityNetworkInterfaceIdentityByHref from the specified map of raw messages. +func UnmarshalFloatingIPByTargetNetworkInterfaceIdentityNetworkInterfaceIdentityByHref(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(FloatingIPByTargetNetworkInterfaceIdentityNetworkInterfaceIdentityByHref) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// FloatingIPByTargetNetworkInterfaceIdentityNetworkInterfaceIdentityByID : FloatingIPByTargetNetworkInterfaceIdentityNetworkInterfaceIdentityByID struct +// This model "extends" FloatingIPByTargetNetworkInterfaceIdentity +type FloatingIPByTargetNetworkInterfaceIdentityNetworkInterfaceIdentityByID struct { + // The unique identifier for this network interface. + ID *string `json:"id" validate:"required"` +} + +// NewFloatingIPByTargetNetworkInterfaceIdentityNetworkInterfaceIdentityByID : Instantiate FloatingIPByTargetNetworkInterfaceIdentityNetworkInterfaceIdentityByID (Generic Model Constructor) +func (*VpcClassicV1) NewFloatingIPByTargetNetworkInterfaceIdentityNetworkInterfaceIdentityByID(id string) (model *FloatingIPByTargetNetworkInterfaceIdentityNetworkInterfaceIdentityByID, err error) { + model = &FloatingIPByTargetNetworkInterfaceIdentityNetworkInterfaceIdentityByID{ + ID: core.StringPtr(id), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*FloatingIPByTargetNetworkInterfaceIdentityNetworkInterfaceIdentityByID) isaFloatingIPByTargetNetworkInterfaceIdentity() bool { + return true +} + +// UnmarshalFloatingIPByTargetNetworkInterfaceIdentityNetworkInterfaceIdentityByID unmarshals an instance of FloatingIPByTargetNetworkInterfaceIdentityNetworkInterfaceIdentityByID from the specified map of raw messages. +func UnmarshalFloatingIPByTargetNetworkInterfaceIdentityNetworkInterfaceIdentityByID(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(FloatingIPByTargetNetworkInterfaceIdentityNetworkInterfaceIdentityByID) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// FloatingIPPatchTargetNetworkInterfaceIdentityNetworkInterfaceIdentityByHref : FloatingIPPatchTargetNetworkInterfaceIdentityNetworkInterfaceIdentityByHref struct +// This model "extends" FloatingIPPatchTargetNetworkInterfaceIdentity +type FloatingIPPatchTargetNetworkInterfaceIdentityNetworkInterfaceIdentityByHref struct { + // The URL for this network interface. + Href *string `json:"href" validate:"required"` +} + +// NewFloatingIPPatchTargetNetworkInterfaceIdentityNetworkInterfaceIdentityByHref : Instantiate FloatingIPPatchTargetNetworkInterfaceIdentityNetworkInterfaceIdentityByHref (Generic Model Constructor) +func (*VpcClassicV1) NewFloatingIPPatchTargetNetworkInterfaceIdentityNetworkInterfaceIdentityByHref(href string) (model *FloatingIPPatchTargetNetworkInterfaceIdentityNetworkInterfaceIdentityByHref, err error) { + model = &FloatingIPPatchTargetNetworkInterfaceIdentityNetworkInterfaceIdentityByHref{ + Href: core.StringPtr(href), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*FloatingIPPatchTargetNetworkInterfaceIdentityNetworkInterfaceIdentityByHref) isaFloatingIPPatchTargetNetworkInterfaceIdentity() bool { + return true +} + +// UnmarshalFloatingIPPatchTargetNetworkInterfaceIdentityNetworkInterfaceIdentityByHref unmarshals an instance of FloatingIPPatchTargetNetworkInterfaceIdentityNetworkInterfaceIdentityByHref from the specified map of raw messages. +func UnmarshalFloatingIPPatchTargetNetworkInterfaceIdentityNetworkInterfaceIdentityByHref(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(FloatingIPPatchTargetNetworkInterfaceIdentityNetworkInterfaceIdentityByHref) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// FloatingIPPatchTargetNetworkInterfaceIdentityNetworkInterfaceIdentityByID : FloatingIPPatchTargetNetworkInterfaceIdentityNetworkInterfaceIdentityByID struct +// This model "extends" FloatingIPPatchTargetNetworkInterfaceIdentity +type FloatingIPPatchTargetNetworkInterfaceIdentityNetworkInterfaceIdentityByID struct { + // The unique identifier for this network interface. + ID *string `json:"id" validate:"required"` +} + +// NewFloatingIPPatchTargetNetworkInterfaceIdentityNetworkInterfaceIdentityByID : Instantiate FloatingIPPatchTargetNetworkInterfaceIdentityNetworkInterfaceIdentityByID (Generic Model Constructor) +func (*VpcClassicV1) NewFloatingIPPatchTargetNetworkInterfaceIdentityNetworkInterfaceIdentityByID(id string) (model *FloatingIPPatchTargetNetworkInterfaceIdentityNetworkInterfaceIdentityByID, err error) { + model = &FloatingIPPatchTargetNetworkInterfaceIdentityNetworkInterfaceIdentityByID{ + ID: core.StringPtr(id), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*FloatingIPPatchTargetNetworkInterfaceIdentityNetworkInterfaceIdentityByID) isaFloatingIPPatchTargetNetworkInterfaceIdentity() bool { + return true +} + +// UnmarshalFloatingIPPatchTargetNetworkInterfaceIdentityNetworkInterfaceIdentityByID unmarshals an instance of FloatingIPPatchTargetNetworkInterfaceIdentityNetworkInterfaceIdentityByID from the specified map of raw messages. +func UnmarshalFloatingIPPatchTargetNetworkInterfaceIdentityNetworkInterfaceIdentityByID(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(FloatingIPPatchTargetNetworkInterfaceIdentityNetworkInterfaceIdentityByID) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// FloatingIPPrototypeFloatingIPByTarget : FloatingIPPrototypeFloatingIPByTarget struct +// This model "extends" FloatingIPPrototype +type FloatingIPPrototypeFloatingIPByTarget struct { + // The unique user-defined name for this floating IP. If unspecified, the name will be a hyphenated list of + // randomly-selected words. + Name *string `json:"name,omitempty"` + + // The network interface this floating IP is to be bound to. + Target FloatingIPByTargetNetworkInterfaceIdentityIntf `json:"target" validate:"required"` +} + +// NewFloatingIPPrototypeFloatingIPByTarget : Instantiate FloatingIPPrototypeFloatingIPByTarget (Generic Model Constructor) +func (*VpcClassicV1) NewFloatingIPPrototypeFloatingIPByTarget(target FloatingIPByTargetNetworkInterfaceIdentityIntf) (model *FloatingIPPrototypeFloatingIPByTarget, err error) { + model = &FloatingIPPrototypeFloatingIPByTarget{ + Target: target, + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*FloatingIPPrototypeFloatingIPByTarget) isaFloatingIPPrototype() bool { + return true +} + +// UnmarshalFloatingIPPrototypeFloatingIPByTarget unmarshals an instance of FloatingIPPrototypeFloatingIPByTarget from the specified map of raw messages. +func UnmarshalFloatingIPPrototypeFloatingIPByTarget(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(FloatingIPPrototypeFloatingIPByTarget) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalModel(m, "target", &obj.Target, UnmarshalFloatingIPByTargetNetworkInterfaceIdentity) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// FloatingIPPrototypeFloatingIPByZone : FloatingIPPrototypeFloatingIPByZone struct +// This model "extends" FloatingIPPrototype +type FloatingIPPrototypeFloatingIPByZone struct { + // The unique user-defined name for this floating IP. If unspecified, the name will be a hyphenated list of + // randomly-selected words. + Name *string `json:"name,omitempty"` + + // The identity of the zone to provision a floating IP in. + Zone ZoneIdentityIntf `json:"zone" validate:"required"` +} + +// NewFloatingIPPrototypeFloatingIPByZone : Instantiate FloatingIPPrototypeFloatingIPByZone (Generic Model Constructor) +func (*VpcClassicV1) NewFloatingIPPrototypeFloatingIPByZone(zone ZoneIdentityIntf) (model *FloatingIPPrototypeFloatingIPByZone, err error) { + model = &FloatingIPPrototypeFloatingIPByZone{ + Zone: zone, + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*FloatingIPPrototypeFloatingIPByZone) isaFloatingIPPrototype() bool { + return true +} + +// UnmarshalFloatingIPPrototypeFloatingIPByZone unmarshals an instance of FloatingIPPrototypeFloatingIPByZone from the specified map of raw messages. +func UnmarshalFloatingIPPrototypeFloatingIPByZone(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(FloatingIPPrototypeFloatingIPByZone) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalModel(m, "zone", &obj.Zone, UnmarshalZoneIdentity) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// FloatingIPTargetNetworkInterfaceReference : FloatingIPTargetNetworkInterfaceReference struct +// This model "extends" FloatingIPTarget +type FloatingIPTargetNetworkInterfaceReference struct { + // The URL for this network interface. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this network interface. + ID *string `json:"id" validate:"required"` + + // The user-defined name for this network interface. + Name *string `json:"name" validate:"required"` + + // The primary IPv4 address. + PrimaryIpv4Address *string `json:"primary_ipv4_address" validate:"required"` + + // The resource type. + ResourceType *string `json:"resource_type" validate:"required"` +} + +// Constants associated with the FloatingIPTargetNetworkInterfaceReference.ResourceType property. +// The resource type. +const ( + FloatingIPTargetNetworkInterfaceReferenceResourceTypeNetworkInterfaceConst = "network_interface" +) + +func (*FloatingIPTargetNetworkInterfaceReference) isaFloatingIPTarget() bool { + return true +} + +// UnmarshalFloatingIPTargetNetworkInterfaceReference unmarshals an instance of FloatingIPTargetNetworkInterfaceReference from the specified map of raw messages. +func UnmarshalFloatingIPTargetNetworkInterfaceReference(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(FloatingIPTargetNetworkInterfaceReference) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "primary_ipv4_address", &obj.PrimaryIpv4Address) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "resource_type", &obj.ResourceType) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// FloatingIPTargetPublicGatewayReference : FloatingIPTargetPublicGatewayReference struct +// This model "extends" FloatingIPTarget +type FloatingIPTargetPublicGatewayReference struct { + // The CRN for this public gateway. + CRN *string `json:"crn" validate:"required"` + + // The URL for this public gateway. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this public gateway. + ID *string `json:"id" validate:"required"` + + // The user-defined name for this public gateway. + Name *string `json:"name" validate:"required"` + + // The resource type. + ResourceType *string `json:"resource_type" validate:"required"` +} + +// Constants associated with the FloatingIPTargetPublicGatewayReference.ResourceType property. +// The resource type. +const ( + FloatingIPTargetPublicGatewayReferenceResourceTypePublicGatewayConst = "public_gateway" +) + +func (*FloatingIPTargetPublicGatewayReference) isaFloatingIPTarget() bool { + return true +} + +// UnmarshalFloatingIPTargetPublicGatewayReference unmarshals an instance of FloatingIPTargetPublicGatewayReference from the specified map of raw messages. +func UnmarshalFloatingIPTargetPublicGatewayReference(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(FloatingIPTargetPublicGatewayReference) + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "resource_type", &obj.ResourceType) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// IkePolicyIdentityByHref : IkePolicyIdentityByHref struct +// This model "extends" IkePolicyIdentity +type IkePolicyIdentityByHref struct { + // The IKE policy's canonical URL. + Href *string `json:"href" validate:"required"` +} + +// NewIkePolicyIdentityByHref : Instantiate IkePolicyIdentityByHref (Generic Model Constructor) +func (*VpcClassicV1) NewIkePolicyIdentityByHref(href string) (model *IkePolicyIdentityByHref, err error) { + model = &IkePolicyIdentityByHref{ + Href: core.StringPtr(href), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*IkePolicyIdentityByHref) isaIkePolicyIdentity() bool { + return true +} + +// UnmarshalIkePolicyIdentityByHref unmarshals an instance of IkePolicyIdentityByHref from the specified map of raw messages. +func UnmarshalIkePolicyIdentityByHref(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(IkePolicyIdentityByHref) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// IkePolicyIdentityByID : IkePolicyIdentityByID struct +// This model "extends" IkePolicyIdentity +type IkePolicyIdentityByID struct { + // The unique identifier for this IKE policy. + ID *string `json:"id" validate:"required"` +} + +// NewIkePolicyIdentityByID : Instantiate IkePolicyIdentityByID (Generic Model Constructor) +func (*VpcClassicV1) NewIkePolicyIdentityByID(id string) (model *IkePolicyIdentityByID, err error) { + model = &IkePolicyIdentityByID{ + ID: core.StringPtr(id), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*IkePolicyIdentityByID) isaIkePolicyIdentity() bool { + return true +} + +// UnmarshalIkePolicyIdentityByID unmarshals an instance of IkePolicyIdentityByID from the specified map of raw messages. +func UnmarshalIkePolicyIdentityByID(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(IkePolicyIdentityByID) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// IPsecPolicyIdentityByHref : IPsecPolicyIdentityByHref struct +// This model "extends" IPsecPolicyIdentity +type IPsecPolicyIdentityByHref struct { + // The IPsec policy's canonical URL. + Href *string `json:"href" validate:"required"` +} + +// NewIPsecPolicyIdentityByHref : Instantiate IPsecPolicyIdentityByHref (Generic Model Constructor) +func (*VpcClassicV1) NewIPsecPolicyIdentityByHref(href string) (model *IPsecPolicyIdentityByHref, err error) { + model = &IPsecPolicyIdentityByHref{ + Href: core.StringPtr(href), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*IPsecPolicyIdentityByHref) isaIPsecPolicyIdentity() bool { + return true +} + +// UnmarshalIPsecPolicyIdentityByHref unmarshals an instance of IPsecPolicyIdentityByHref from the specified map of raw messages. +func UnmarshalIPsecPolicyIdentityByHref(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(IPsecPolicyIdentityByHref) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// IPsecPolicyIdentityByID : IPsecPolicyIdentityByID struct +// This model "extends" IPsecPolicyIdentity +type IPsecPolicyIdentityByID struct { + // The unique identifier for this IPsec policy. + ID *string `json:"id" validate:"required"` +} + +// NewIPsecPolicyIdentityByID : Instantiate IPsecPolicyIdentityByID (Generic Model Constructor) +func (*VpcClassicV1) NewIPsecPolicyIdentityByID(id string) (model *IPsecPolicyIdentityByID, err error) { + model = &IPsecPolicyIdentityByID{ + ID: core.StringPtr(id), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*IPsecPolicyIdentityByID) isaIPsecPolicyIdentity() bool { + return true +} + +// UnmarshalIPsecPolicyIdentityByID unmarshals an instance of IPsecPolicyIdentityByID from the specified map of raw messages. +func UnmarshalIPsecPolicyIdentityByID(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(IPsecPolicyIdentityByID) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ImageIdentityByCRN : ImageIdentityByCRN struct +// This model "extends" ImageIdentity +type ImageIdentityByCRN struct { + // The CRN for this image. + CRN *string `json:"crn" validate:"required"` +} + +// NewImageIdentityByCRN : Instantiate ImageIdentityByCRN (Generic Model Constructor) +func (*VpcClassicV1) NewImageIdentityByCRN(crn string) (model *ImageIdentityByCRN, err error) { + model = &ImageIdentityByCRN{ + CRN: core.StringPtr(crn), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*ImageIdentityByCRN) isaImageIdentity() bool { + return true +} + +// UnmarshalImageIdentityByCRN unmarshals an instance of ImageIdentityByCRN from the specified map of raw messages. +func UnmarshalImageIdentityByCRN(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ImageIdentityByCRN) + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ImageIdentityByHref : ImageIdentityByHref struct +// This model "extends" ImageIdentity +type ImageIdentityByHref struct { + // The URL for this image. + Href *string `json:"href" validate:"required"` +} + +// NewImageIdentityByHref : Instantiate ImageIdentityByHref (Generic Model Constructor) +func (*VpcClassicV1) NewImageIdentityByHref(href string) (model *ImageIdentityByHref, err error) { + model = &ImageIdentityByHref{ + Href: core.StringPtr(href), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*ImageIdentityByHref) isaImageIdentity() bool { + return true +} + +// UnmarshalImageIdentityByHref unmarshals an instance of ImageIdentityByHref from the specified map of raw messages. +func UnmarshalImageIdentityByHref(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ImageIdentityByHref) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ImageIdentityByID : ImageIdentityByID struct +// This model "extends" ImageIdentity +type ImageIdentityByID struct { + // The unique identifier for this image. + ID *string `json:"id" validate:"required"` +} + +// NewImageIdentityByID : Instantiate ImageIdentityByID (Generic Model Constructor) +func (*VpcClassicV1) NewImageIdentityByID(id string) (model *ImageIdentityByID, err error) { + model = &ImageIdentityByID{ + ID: core.StringPtr(id), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*ImageIdentityByID) isaImageIdentity() bool { + return true +} + +// UnmarshalImageIdentityByID unmarshals an instance of ImageIdentityByID from the specified map of raw messages. +func UnmarshalImageIdentityByID(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ImageIdentityByID) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ImagePrototypeImageByFile : ImagePrototypeImageByFile struct +// This model "extends" ImagePrototype +type ImagePrototypeImageByFile struct { + // The unique user-defined name for this image. Names starting with "ibm-" are not allowed. If unspecified, the name + // will be a hyphenated list of randomly-selected words. + Name *string `json:"name,omitempty"` + + ResourceGroup ResourceGroupIdentityIntf `json:"resource_group,omitempty"` + + // The file from which to create the image. + File *ImageFilePrototype `json:"file" validate:"required"` + + // The identity of the [supported operating + // system](https://cloud.ibm.com/apidocs/vpc#list-operating-systems) included in + // this image. + OperatingSystem OperatingSystemIdentityIntf `json:"operating_system" validate:"required"` +} + +// NewImagePrototypeImageByFile : Instantiate ImagePrototypeImageByFile (Generic Model Constructor) +func (*VpcClassicV1) NewImagePrototypeImageByFile(file *ImageFilePrototype, operatingSystem OperatingSystemIdentityIntf) (model *ImagePrototypeImageByFile, err error) { + model = &ImagePrototypeImageByFile{ + File: file, + OperatingSystem: operatingSystem, + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*ImagePrototypeImageByFile) isaImagePrototype() bool { + return true +} + +// UnmarshalImagePrototypeImageByFile unmarshals an instance of ImagePrototypeImageByFile from the specified map of raw messages. +func UnmarshalImagePrototypeImageByFile(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ImagePrototypeImageByFile) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalModel(m, "resource_group", &obj.ResourceGroup, UnmarshalResourceGroupIdentity) + if err != nil { + return + } + err = core.UnmarshalModel(m, "file", &obj.File, UnmarshalImageFilePrototype) + if err != nil { + return + } + err = core.UnmarshalModel(m, "operating_system", &obj.OperatingSystem, UnmarshalOperatingSystemIdentity) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstanceProfileBandwidthDependent : The total bandwidth shared across the network interfaces of an instance with this profile depends on its +// configuration. +// This model "extends" InstanceProfileBandwidth +type InstanceProfileBandwidthDependent struct { + // The type for this profile field. + Type *string `json:"type" validate:"required"` +} + +// Constants associated with the InstanceProfileBandwidthDependent.Type property. +// The type for this profile field. +const ( + InstanceProfileBandwidthDependentTypeDependentConst = "dependent" +) + +func (*InstanceProfileBandwidthDependent) isaInstanceProfileBandwidth() bool { + return true +} + +// UnmarshalInstanceProfileBandwidthDependent unmarshals an instance of InstanceProfileBandwidthDependent from the specified map of raw messages. +func UnmarshalInstanceProfileBandwidthDependent(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstanceProfileBandwidthDependent) + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstanceProfileBandwidthEnum : The permitted total bandwidth values (in megabits per second) shared across the network interfaces of an instance +// with this profile. +// This model "extends" InstanceProfileBandwidth +type InstanceProfileBandwidthEnum struct { + // The default value for this profile field. + Default *int64 `json:"default" validate:"required"` + + // The type for this profile field. + Type *string `json:"type" validate:"required"` + + // The permitted values for this profile field. + Values []int64 `json:"values" validate:"required"` +} + +// Constants associated with the InstanceProfileBandwidthEnum.Type property. +// The type for this profile field. +const ( + InstanceProfileBandwidthEnumTypeEnumConst = "enum" +) + +func (*InstanceProfileBandwidthEnum) isaInstanceProfileBandwidth() bool { + return true +} + +// UnmarshalInstanceProfileBandwidthEnum unmarshals an instance of InstanceProfileBandwidthEnum from the specified map of raw messages. +func UnmarshalInstanceProfileBandwidthEnum(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstanceProfileBandwidthEnum) + err = core.UnmarshalPrimitive(m, "default", &obj.Default) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "values", &obj.Values) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstanceProfileBandwidthFixed : The total bandwidth (in megabits per second) shared across the network interfaces of an instance with this profile. +// This model "extends" InstanceProfileBandwidth +type InstanceProfileBandwidthFixed struct { + // The type for this profile field. + Type *string `json:"type" validate:"required"` + + // The value for this profile field. + Value *int64 `json:"value" validate:"required"` +} + +// Constants associated with the InstanceProfileBandwidthFixed.Type property. +// The type for this profile field. +const ( + InstanceProfileBandwidthFixedTypeFixedConst = "fixed" +) + +func (*InstanceProfileBandwidthFixed) isaInstanceProfileBandwidth() bool { + return true +} + +// UnmarshalInstanceProfileBandwidthFixed unmarshals an instance of InstanceProfileBandwidthFixed from the specified map of raw messages. +func UnmarshalInstanceProfileBandwidthFixed(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstanceProfileBandwidthFixed) + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstanceProfileBandwidthRange : The permitted total bandwidth range (in megabits per second) shared across the network interfaces of an instance with +// this profile. +// This model "extends" InstanceProfileBandwidth +type InstanceProfileBandwidthRange struct { + // The default value for this profile field. + Default *int64 `json:"default" validate:"required"` + + // The maximum value for this profile field. + Max *int64 `json:"max" validate:"required"` + + // The minimum value for this profile field. + Min *int64 `json:"min" validate:"required"` + + // The increment step value for this profile field. + Step *int64 `json:"step" validate:"required"` + + // The type for this profile field. + Type *string `json:"type" validate:"required"` +} + +// Constants associated with the InstanceProfileBandwidthRange.Type property. +// The type for this profile field. +const ( + InstanceProfileBandwidthRangeTypeRangeConst = "range" +) + +func (*InstanceProfileBandwidthRange) isaInstanceProfileBandwidth() bool { + return true +} + +// UnmarshalInstanceProfileBandwidthRange unmarshals an instance of InstanceProfileBandwidthRange from the specified map of raw messages. +func UnmarshalInstanceProfileBandwidthRange(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstanceProfileBandwidthRange) + err = core.UnmarshalPrimitive(m, "default", &obj.Default) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "max", &obj.Max) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "min", &obj.Min) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "step", &obj.Step) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstanceProfileIdentityByCRN : InstanceProfileIdentityByCRN struct +// This model "extends" InstanceProfileIdentity +type InstanceProfileIdentityByCRN struct { + // The CRN for this virtual server instance profile. + CRN *string `json:"crn" validate:"required"` +} + +// NewInstanceProfileIdentityByCRN : Instantiate InstanceProfileIdentityByCRN (Generic Model Constructor) +func (*VpcClassicV1) NewInstanceProfileIdentityByCRN(crn string) (model *InstanceProfileIdentityByCRN, err error) { + model = &InstanceProfileIdentityByCRN{ + CRN: core.StringPtr(crn), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*InstanceProfileIdentityByCRN) isaInstanceProfileIdentity() bool { + return true +} + +// UnmarshalInstanceProfileIdentityByCRN unmarshals an instance of InstanceProfileIdentityByCRN from the specified map of raw messages. +func UnmarshalInstanceProfileIdentityByCRN(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstanceProfileIdentityByCRN) + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstanceProfileIdentityByHref : InstanceProfileIdentityByHref struct +// This model "extends" InstanceProfileIdentity +type InstanceProfileIdentityByHref struct { + // The URL for this virtual server instance profile. + Href *string `json:"href" validate:"required"` +} + +// NewInstanceProfileIdentityByHref : Instantiate InstanceProfileIdentityByHref (Generic Model Constructor) +func (*VpcClassicV1) NewInstanceProfileIdentityByHref(href string) (model *InstanceProfileIdentityByHref, err error) { + model = &InstanceProfileIdentityByHref{ + Href: core.StringPtr(href), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*InstanceProfileIdentityByHref) isaInstanceProfileIdentity() bool { + return true +} + +// UnmarshalInstanceProfileIdentityByHref unmarshals an instance of InstanceProfileIdentityByHref from the specified map of raw messages. +func UnmarshalInstanceProfileIdentityByHref(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstanceProfileIdentityByHref) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstanceProfileIdentityByName : InstanceProfileIdentityByName struct +// This model "extends" InstanceProfileIdentity +type InstanceProfileIdentityByName struct { + // The globally unique name for this virtual server instance profile. + Name *string `json:"name" validate:"required"` +} + +// NewInstanceProfileIdentityByName : Instantiate InstanceProfileIdentityByName (Generic Model Constructor) +func (*VpcClassicV1) NewInstanceProfileIdentityByName(name string) (model *InstanceProfileIdentityByName, err error) { + model = &InstanceProfileIdentityByName{ + Name: core.StringPtr(name), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*InstanceProfileIdentityByName) isaInstanceProfileIdentity() bool { + return true +} + +// UnmarshalInstanceProfileIdentityByName unmarshals an instance of InstanceProfileIdentityByName from the specified map of raw messages. +func UnmarshalInstanceProfileIdentityByName(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstanceProfileIdentityByName) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstanceProfilePortSpeedDependent : The port speed of each network interface of an instance with this profile depends on its configuration. +// This model "extends" InstanceProfilePortSpeed +type InstanceProfilePortSpeedDependent struct { + // The type for this profile field. + Type *string `json:"type" validate:"required"` +} + +// Constants associated with the InstanceProfilePortSpeedDependent.Type property. +// The type for this profile field. +const ( + InstanceProfilePortSpeedDependentTypeDependentConst = "dependent" +) + +func (*InstanceProfilePortSpeedDependent) isaInstanceProfilePortSpeed() bool { + return true +} + +// UnmarshalInstanceProfilePortSpeedDependent unmarshals an instance of InstanceProfilePortSpeedDependent from the specified map of raw messages. +func UnmarshalInstanceProfilePortSpeedDependent(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstanceProfilePortSpeedDependent) + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstanceProfilePortSpeedFixed : The maximum speed (in megabits per second) of each network interface of an instance with this profile. +// This model "extends" InstanceProfilePortSpeed +type InstanceProfilePortSpeedFixed struct { + // The type for this profile field. + Type *string `json:"type" validate:"required"` + + // The value for this profile field. + Value *int64 `json:"value" validate:"required"` +} + +// Constants associated with the InstanceProfilePortSpeedFixed.Type property. +// The type for this profile field. +const ( + InstanceProfilePortSpeedFixedTypeFixedConst = "fixed" +) + +func (*InstanceProfilePortSpeedFixed) isaInstanceProfilePortSpeed() bool { + return true +} + +// UnmarshalInstanceProfilePortSpeedFixed unmarshals an instance of InstanceProfilePortSpeedFixed from the specified map of raw messages. +func UnmarshalInstanceProfilePortSpeedFixed(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstanceProfilePortSpeedFixed) + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "value", &obj.Value) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstancePrototypeInstanceByImage : InstancePrototypeInstanceByImage struct +// This model "extends" InstancePrototype +type InstancePrototypeInstanceByImage struct { + // The public SSH keys for the administrative user of the virtual server instance. Up to 10 keys may be provided; if no + // keys are provided the instance will be inaccessible unless the image used provides another means of access. For + // Windows instances, one of the keys will be used to encrypt the administrator password. + // + // Keys will be made available to the virtual server instance as cloud-init vendor data. For cloud-init enabled images, + // these keys will also be added as SSH authorized keys for the administrative user. + Keys []KeyIdentityIntf `json:"keys,omitempty"` + + // The user-defined name for this virtual server instance (and default system hostname). If unspecified, the name will + // be a hyphenated list of randomly-selected words. + Name *string `json:"name,omitempty"` + + // Collection of additional network interfaces to create for the virtual server instance. + NetworkInterfaces []NetworkInterfacePrototype `json:"network_interfaces,omitempty"` + + // The profile to use for this virtual server instance. + Profile InstanceProfileIdentityIntf `json:"profile,omitempty"` + + ResourceGroup ResourceGroupIdentityIntf `json:"resource_group,omitempty"` + + // User data to be made available when setting up the virtual server instance. + UserData *string `json:"user_data,omitempty"` + + // Collection of volume attachments. + VolumeAttachments []VolumeAttachmentPrototypeInstanceContext `json:"volume_attachments,omitempty"` + + // The VPC the virtual server instance is to be a part of. If provided, must match the VPC tied to the subnets of the + // instance's network interfaces. + VPC VPCIdentityIntf `json:"vpc,omitempty"` + + // The boot volume attachment for the virtual server instance. + BootVolumeAttachment *VolumeAttachmentPrototypeInstanceByImageContext `json:"boot_volume_attachment,omitempty"` + + // The identity of the image to use when provisioning the virtual server instance. + Image ImageIdentityIntf `json:"image" validate:"required"` + + // Primary network interface. + PrimaryNetworkInterface *NetworkInterfacePrototype `json:"primary_network_interface" validate:"required"` + + // The identity of the zone to provision the virtual server instance in. + Zone ZoneIdentityIntf `json:"zone" validate:"required"` +} + +// NewInstancePrototypeInstanceByImage : Instantiate InstancePrototypeInstanceByImage (Generic Model Constructor) +func (*VpcClassicV1) NewInstancePrototypeInstanceByImage(image ImageIdentityIntf, primaryNetworkInterface *NetworkInterfacePrototype, zone ZoneIdentityIntf) (model *InstancePrototypeInstanceByImage, err error) { + model = &InstancePrototypeInstanceByImage{ + Image: image, + PrimaryNetworkInterface: primaryNetworkInterface, + Zone: zone, + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*InstancePrototypeInstanceByImage) isaInstancePrototype() bool { + return true +} + +// UnmarshalInstancePrototypeInstanceByImage unmarshals an instance of InstancePrototypeInstanceByImage from the specified map of raw messages. +func UnmarshalInstancePrototypeInstanceByImage(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstancePrototypeInstanceByImage) + err = core.UnmarshalModel(m, "keys", &obj.Keys, UnmarshalKeyIdentity) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalModel(m, "network_interfaces", &obj.NetworkInterfaces, UnmarshalNetworkInterfacePrototype) + if err != nil { + return + } + err = core.UnmarshalModel(m, "profile", &obj.Profile, UnmarshalInstanceProfileIdentity) + if err != nil { + return + } + err = core.UnmarshalModel(m, "resource_group", &obj.ResourceGroup, UnmarshalResourceGroupIdentity) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "user_data", &obj.UserData) + if err != nil { + return + } + err = core.UnmarshalModel(m, "volume_attachments", &obj.VolumeAttachments, UnmarshalVolumeAttachmentPrototypeInstanceContext) + if err != nil { + return + } + err = core.UnmarshalModel(m, "vpc", &obj.VPC, UnmarshalVPCIdentity) + if err != nil { + return + } + err = core.UnmarshalModel(m, "boot_volume_attachment", &obj.BootVolumeAttachment, UnmarshalVolumeAttachmentPrototypeInstanceByImageContext) + if err != nil { + return + } + err = core.UnmarshalModel(m, "image", &obj.Image, UnmarshalImageIdentity) + if err != nil { + return + } + err = core.UnmarshalModel(m, "primary_network_interface", &obj.PrimaryNetworkInterface, UnmarshalNetworkInterfacePrototype) + if err != nil { + return + } + err = core.UnmarshalModel(m, "zone", &obj.Zone, UnmarshalZoneIdentity) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// KeyIdentityByCRN : KeyIdentityByCRN struct +// This model "extends" KeyIdentity +type KeyIdentityByCRN struct { + // The CRN for this key. + CRN *string `json:"crn" validate:"required"` +} + +// NewKeyIdentityByCRN : Instantiate KeyIdentityByCRN (Generic Model Constructor) +func (*VpcClassicV1) NewKeyIdentityByCRN(crn string) (model *KeyIdentityByCRN, err error) { + model = &KeyIdentityByCRN{ + CRN: core.StringPtr(crn), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*KeyIdentityByCRN) isaKeyIdentity() bool { + return true +} + +// UnmarshalKeyIdentityByCRN unmarshals an instance of KeyIdentityByCRN from the specified map of raw messages. +func UnmarshalKeyIdentityByCRN(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(KeyIdentityByCRN) + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// KeyIdentityByHref : KeyIdentityByHref struct +// This model "extends" KeyIdentity +type KeyIdentityByHref struct { + // The URL for this key. + Href *string `json:"href" validate:"required"` +} + +// NewKeyIdentityByHref : Instantiate KeyIdentityByHref (Generic Model Constructor) +func (*VpcClassicV1) NewKeyIdentityByHref(href string) (model *KeyIdentityByHref, err error) { + model = &KeyIdentityByHref{ + Href: core.StringPtr(href), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*KeyIdentityByHref) isaKeyIdentity() bool { + return true +} + +// UnmarshalKeyIdentityByHref unmarshals an instance of KeyIdentityByHref from the specified map of raw messages. +func UnmarshalKeyIdentityByHref(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(KeyIdentityByHref) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// KeyIdentityByID : KeyIdentityByID struct +// This model "extends" KeyIdentity +type KeyIdentityByID struct { + // The unique identifier for this key. + ID *string `json:"id" validate:"required"` +} + +// NewKeyIdentityByID : Instantiate KeyIdentityByID (Generic Model Constructor) +func (*VpcClassicV1) NewKeyIdentityByID(id string) (model *KeyIdentityByID, err error) { + model = &KeyIdentityByID{ + ID: core.StringPtr(id), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*KeyIdentityByID) isaKeyIdentity() bool { + return true +} + +// UnmarshalKeyIdentityByID unmarshals an instance of KeyIdentityByID from the specified map of raw messages. +func UnmarshalKeyIdentityByID(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(KeyIdentityByID) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// KeyIdentityKeyIdentityByFingerprint : KeyIdentityKeyIdentityByFingerprint struct +// This model "extends" KeyIdentity +type KeyIdentityKeyIdentityByFingerprint struct { + // The fingerprint for this key. The value is returned base64-encoded and prefixed with the hash algorithm (always + // `SHA256`). + Fingerprint *string `json:"fingerprint" validate:"required"` +} + +// NewKeyIdentityKeyIdentityByFingerprint : Instantiate KeyIdentityKeyIdentityByFingerprint (Generic Model Constructor) +func (*VpcClassicV1) NewKeyIdentityKeyIdentityByFingerprint(fingerprint string) (model *KeyIdentityKeyIdentityByFingerprint, err error) { + model = &KeyIdentityKeyIdentityByFingerprint{ + Fingerprint: core.StringPtr(fingerprint), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*KeyIdentityKeyIdentityByFingerprint) isaKeyIdentity() bool { + return true +} + +// UnmarshalKeyIdentityKeyIdentityByFingerprint unmarshals an instance of KeyIdentityKeyIdentityByFingerprint from the specified map of raw messages. +func UnmarshalKeyIdentityKeyIdentityByFingerprint(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(KeyIdentityKeyIdentityByFingerprint) + err = core.UnmarshalPrimitive(m, "fingerprint", &obj.Fingerprint) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// KeyReferenceInstanceInitializationContextKeyIdentityByFingerprint : KeyReferenceInstanceInitializationContextKeyIdentityByFingerprint struct +// This model "extends" KeyReferenceInstanceInitializationContext +type KeyReferenceInstanceInitializationContextKeyIdentityByFingerprint struct { + // The fingerprint for this key. The value is returned base64-encoded and prefixed with the hash algorithm (always + // `SHA256`). + Fingerprint *string `json:"fingerprint" validate:"required"` +} + +func (*KeyReferenceInstanceInitializationContextKeyIdentityByFingerprint) isaKeyReferenceInstanceInitializationContext() bool { + return true +} + +// UnmarshalKeyReferenceInstanceInitializationContextKeyIdentityByFingerprint unmarshals an instance of KeyReferenceInstanceInitializationContextKeyIdentityByFingerprint from the specified map of raw messages. +func UnmarshalKeyReferenceInstanceInitializationContextKeyIdentityByFingerprint(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(KeyReferenceInstanceInitializationContextKeyIdentityByFingerprint) + err = core.UnmarshalPrimitive(m, "fingerprint", &obj.Fingerprint) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// KeyReferenceInstanceInitializationContextKeyReference : KeyReferenceInstanceInitializationContextKeyReference struct +// This model "extends" KeyReferenceInstanceInitializationContext +type KeyReferenceInstanceInitializationContextKeyReference struct { + // The CRN for this key. + CRN *string `json:"crn" validate:"required"` + + // The fingerprint for this key. The value is returned base64-encoded and prefixed with the hash algorithm (always + // `SHA256`). + Fingerprint *string `json:"fingerprint" validate:"required"` + + // The URL for this key. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this key. + ID *string `json:"id" validate:"required"` + + // The user-defined name for this key. + Name *string `json:"name" validate:"required"` +} + +func (*KeyReferenceInstanceInitializationContextKeyReference) isaKeyReferenceInstanceInitializationContext() bool { + return true +} + +// UnmarshalKeyReferenceInstanceInitializationContextKeyReference unmarshals an instance of KeyReferenceInstanceInitializationContextKeyReference from the specified map of raw messages. +func UnmarshalKeyReferenceInstanceInitializationContextKeyReference(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(KeyReferenceInstanceInitializationContextKeyReference) + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "fingerprint", &obj.Fingerprint) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LoadBalancerListenerPolicyTargetPatchLoadBalancerListenerPolicyRedirectURLPatch : LoadBalancerListenerPolicyTargetPatchLoadBalancerListenerPolicyRedirectURLPatch struct +// This model "extends" LoadBalancerListenerPolicyTargetPatch +type LoadBalancerListenerPolicyTargetPatchLoadBalancerListenerPolicyRedirectURLPatch struct { + // The http status code in the redirect response. + HTTPStatusCode *int64 `json:"http_status_code,omitempty"` + + // The redirect target URL. + URL *string `json:"url,omitempty"` +} + +func (*LoadBalancerListenerPolicyTargetPatchLoadBalancerListenerPolicyRedirectURLPatch) isaLoadBalancerListenerPolicyTargetPatch() bool { + return true +} + +// UnmarshalLoadBalancerListenerPolicyTargetPatchLoadBalancerListenerPolicyRedirectURLPatch unmarshals an instance of LoadBalancerListenerPolicyTargetPatchLoadBalancerListenerPolicyRedirectURLPatch from the specified map of raw messages. +func UnmarshalLoadBalancerListenerPolicyTargetPatchLoadBalancerListenerPolicyRedirectURLPatch(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancerListenerPolicyTargetPatchLoadBalancerListenerPolicyRedirectURLPatch) + err = core.UnmarshalPrimitive(m, "http_status_code", &obj.HTTPStatusCode) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "url", &obj.URL) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LoadBalancerListenerPolicyTargetPatchLoadBalancerPoolIdentity : Identifies a load balancer pool by a unique property. +// Models which "extend" this model: +// - LoadBalancerListenerPolicyTargetPatchLoadBalancerPoolIdentityLoadBalancerPoolIdentityByID +// - LoadBalancerListenerPolicyTargetPatchLoadBalancerPoolIdentityLoadBalancerPoolIdentityByHref +// This model "extends" LoadBalancerListenerPolicyTargetPatch +type LoadBalancerListenerPolicyTargetPatchLoadBalancerPoolIdentity struct { + // The unique identifier for this load balancer pool. + ID *string `json:"id,omitempty"` + + // The pool's canonical URL. + Href *string `json:"href,omitempty"` +} + +func (*LoadBalancerListenerPolicyTargetPatchLoadBalancerPoolIdentity) isaLoadBalancerListenerPolicyTargetPatchLoadBalancerPoolIdentity() bool { + return true +} + +type LoadBalancerListenerPolicyTargetPatchLoadBalancerPoolIdentityIntf interface { + LoadBalancerListenerPolicyTargetPatchIntf + isaLoadBalancerListenerPolicyTargetPatchLoadBalancerPoolIdentity() bool +} + +func (*LoadBalancerListenerPolicyTargetPatchLoadBalancerPoolIdentity) isaLoadBalancerListenerPolicyTargetPatch() bool { + return true +} + +// UnmarshalLoadBalancerListenerPolicyTargetPatchLoadBalancerPoolIdentity unmarshals an instance of LoadBalancerListenerPolicyTargetPatchLoadBalancerPoolIdentity from the specified map of raw messages. +func UnmarshalLoadBalancerListenerPolicyTargetPatchLoadBalancerPoolIdentity(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancerListenerPolicyTargetPatchLoadBalancerPoolIdentity) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LoadBalancerListenerPolicyTargetPrototypeLoadBalancerListenerPolicyRedirectURLPrototype : LoadBalancerListenerPolicyTargetPrototypeLoadBalancerListenerPolicyRedirectURLPrototype struct +// This model "extends" LoadBalancerListenerPolicyTargetPrototype +type LoadBalancerListenerPolicyTargetPrototypeLoadBalancerListenerPolicyRedirectURLPrototype struct { + // The http status code in the redirect response. + HTTPStatusCode *int64 `json:"http_status_code" validate:"required"` + + // The redirect target URL. + URL *string `json:"url" validate:"required"` +} + +// NewLoadBalancerListenerPolicyTargetPrototypeLoadBalancerListenerPolicyRedirectURLPrototype : Instantiate LoadBalancerListenerPolicyTargetPrototypeLoadBalancerListenerPolicyRedirectURLPrototype (Generic Model Constructor) +func (*VpcClassicV1) NewLoadBalancerListenerPolicyTargetPrototypeLoadBalancerListenerPolicyRedirectURLPrototype(httpStatusCode int64, url string) (model *LoadBalancerListenerPolicyTargetPrototypeLoadBalancerListenerPolicyRedirectURLPrototype, err error) { + model = &LoadBalancerListenerPolicyTargetPrototypeLoadBalancerListenerPolicyRedirectURLPrototype{ + HTTPStatusCode: core.Int64Ptr(httpStatusCode), + URL: core.StringPtr(url), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*LoadBalancerListenerPolicyTargetPrototypeLoadBalancerListenerPolicyRedirectURLPrototype) isaLoadBalancerListenerPolicyTargetPrototype() bool { + return true +} + +// UnmarshalLoadBalancerListenerPolicyTargetPrototypeLoadBalancerListenerPolicyRedirectURLPrototype unmarshals an instance of LoadBalancerListenerPolicyTargetPrototypeLoadBalancerListenerPolicyRedirectURLPrototype from the specified map of raw messages. +func UnmarshalLoadBalancerListenerPolicyTargetPrototypeLoadBalancerListenerPolicyRedirectURLPrototype(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancerListenerPolicyTargetPrototypeLoadBalancerListenerPolicyRedirectURLPrototype) + err = core.UnmarshalPrimitive(m, "http_status_code", &obj.HTTPStatusCode) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "url", &obj.URL) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LoadBalancerListenerPolicyTargetPrototypeLoadBalancerPoolIdentity : Identifies a load balancer pool by a unique property. +// Models which "extend" this model: +// - LoadBalancerListenerPolicyTargetPrototypeLoadBalancerPoolIdentityLoadBalancerPoolIdentityByID +// - LoadBalancerListenerPolicyTargetPrototypeLoadBalancerPoolIdentityLoadBalancerPoolIdentityByHref +// This model "extends" LoadBalancerListenerPolicyTargetPrototype +type LoadBalancerListenerPolicyTargetPrototypeLoadBalancerPoolIdentity struct { + // The unique identifier for this load balancer pool. + ID *string `json:"id,omitempty"` + + // The pool's canonical URL. + Href *string `json:"href,omitempty"` +} + +func (*LoadBalancerListenerPolicyTargetPrototypeLoadBalancerPoolIdentity) isaLoadBalancerListenerPolicyTargetPrototypeLoadBalancerPoolIdentity() bool { + return true +} + +type LoadBalancerListenerPolicyTargetPrototypeLoadBalancerPoolIdentityIntf interface { + LoadBalancerListenerPolicyTargetPrototypeIntf + isaLoadBalancerListenerPolicyTargetPrototypeLoadBalancerPoolIdentity() bool +} + +func (*LoadBalancerListenerPolicyTargetPrototypeLoadBalancerPoolIdentity) isaLoadBalancerListenerPolicyTargetPrototype() bool { + return true +} + +// UnmarshalLoadBalancerListenerPolicyTargetPrototypeLoadBalancerPoolIdentity unmarshals an instance of LoadBalancerListenerPolicyTargetPrototypeLoadBalancerPoolIdentity from the specified map of raw messages. +func UnmarshalLoadBalancerListenerPolicyTargetPrototypeLoadBalancerPoolIdentity(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancerListenerPolicyTargetPrototypeLoadBalancerPoolIdentity) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LoadBalancerListenerPolicyTargetLoadBalancerListenerPolicyRedirectURL : LoadBalancerListenerPolicyTargetLoadBalancerListenerPolicyRedirectURL struct +// This model "extends" LoadBalancerListenerPolicyTarget +type LoadBalancerListenerPolicyTargetLoadBalancerListenerPolicyRedirectURL struct { + // The http status code in the redirect response. + HTTPStatusCode *int64 `json:"http_status_code" validate:"required"` + + // The redirect target URL. + URL *string `json:"url" validate:"required"` +} + +func (*LoadBalancerListenerPolicyTargetLoadBalancerListenerPolicyRedirectURL) isaLoadBalancerListenerPolicyTarget() bool { + return true +} + +// UnmarshalLoadBalancerListenerPolicyTargetLoadBalancerListenerPolicyRedirectURL unmarshals an instance of LoadBalancerListenerPolicyTargetLoadBalancerListenerPolicyRedirectURL from the specified map of raw messages. +func UnmarshalLoadBalancerListenerPolicyTargetLoadBalancerListenerPolicyRedirectURL(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancerListenerPolicyTargetLoadBalancerListenerPolicyRedirectURL) + err = core.UnmarshalPrimitive(m, "http_status_code", &obj.HTTPStatusCode) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "url", &obj.URL) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LoadBalancerListenerPolicyTargetLoadBalancerPoolReference : LoadBalancerListenerPolicyTargetLoadBalancerPoolReference struct +// This model "extends" LoadBalancerListenerPolicyTarget +type LoadBalancerListenerPolicyTargetLoadBalancerPoolReference struct { + // The pool's canonical URL. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this load balancer pool. + ID *string `json:"id" validate:"required"` + + // The user-defined name for this load balancer pool. + Name *string `json:"name" validate:"required"` +} + +func (*LoadBalancerListenerPolicyTargetLoadBalancerPoolReference) isaLoadBalancerListenerPolicyTarget() bool { + return true +} + +// UnmarshalLoadBalancerListenerPolicyTargetLoadBalancerPoolReference unmarshals an instance of LoadBalancerListenerPolicyTargetLoadBalancerPoolReference from the specified map of raw messages. +func UnmarshalLoadBalancerListenerPolicyTargetLoadBalancerPoolReference(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancerListenerPolicyTargetLoadBalancerPoolReference) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LoadBalancerPoolIdentityByHref : LoadBalancerPoolIdentityByHref struct +// This model "extends" LoadBalancerPoolIdentity +type LoadBalancerPoolIdentityByHref struct { + // The pool's canonical URL. + Href *string `json:"href" validate:"required"` +} + +// NewLoadBalancerPoolIdentityByHref : Instantiate LoadBalancerPoolIdentityByHref (Generic Model Constructor) +func (*VpcClassicV1) NewLoadBalancerPoolIdentityByHref(href string) (model *LoadBalancerPoolIdentityByHref, err error) { + model = &LoadBalancerPoolIdentityByHref{ + Href: core.StringPtr(href), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*LoadBalancerPoolIdentityByHref) isaLoadBalancerPoolIdentity() bool { + return true +} + +// UnmarshalLoadBalancerPoolIdentityByHref unmarshals an instance of LoadBalancerPoolIdentityByHref from the specified map of raw messages. +func UnmarshalLoadBalancerPoolIdentityByHref(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancerPoolIdentityByHref) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LoadBalancerPoolIdentityByID : LoadBalancerPoolIdentityByID struct +// This model "extends" LoadBalancerPoolIdentity +type LoadBalancerPoolIdentityByID struct { + // The unique identifier for this load balancer pool. + ID *string `json:"id" validate:"required"` +} + +// NewLoadBalancerPoolIdentityByID : Instantiate LoadBalancerPoolIdentityByID (Generic Model Constructor) +func (*VpcClassicV1) NewLoadBalancerPoolIdentityByID(id string) (model *LoadBalancerPoolIdentityByID, err error) { + model = &LoadBalancerPoolIdentityByID{ + ID: core.StringPtr(id), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*LoadBalancerPoolIdentityByID) isaLoadBalancerPoolIdentity() bool { + return true +} + +// UnmarshalLoadBalancerPoolIdentityByID unmarshals an instance of LoadBalancerPoolIdentityByID from the specified map of raw messages. +func UnmarshalLoadBalancerPoolIdentityByID(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancerPoolIdentityByID) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LoadBalancerPoolMemberTargetPrototypeIP : LoadBalancerPoolMemberTargetPrototypeIP struct +// This model "extends" LoadBalancerPoolMemberTargetPrototype +type LoadBalancerPoolMemberTargetPrototypeIP struct { + // The IP address. This property may add support for IPv6 addresses in the future. When processing a value in this + // property, verify that the address is in an expected format. If it is not, log an error. Optionally halt processing + // and surface the error, or bypass the resource on which the unexpected IP address format was encountered. + Address *string `json:"address" validate:"required"` +} + +// NewLoadBalancerPoolMemberTargetPrototypeIP : Instantiate LoadBalancerPoolMemberTargetPrototypeIP (Generic Model Constructor) +func (*VpcClassicV1) NewLoadBalancerPoolMemberTargetPrototypeIP(address string) (model *LoadBalancerPoolMemberTargetPrototypeIP, err error) { + model = &LoadBalancerPoolMemberTargetPrototypeIP{ + Address: core.StringPtr(address), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*LoadBalancerPoolMemberTargetPrototypeIP) isaLoadBalancerPoolMemberTargetPrototype() bool { + return true +} + +// UnmarshalLoadBalancerPoolMemberTargetPrototypeIP unmarshals an instance of LoadBalancerPoolMemberTargetPrototypeIP from the specified map of raw messages. +func UnmarshalLoadBalancerPoolMemberTargetPrototypeIP(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancerPoolMemberTargetPrototypeIP) + err = core.UnmarshalPrimitive(m, "address", &obj.Address) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LoadBalancerPoolMemberTargetIP : LoadBalancerPoolMemberTargetIP struct +// This model "extends" LoadBalancerPoolMemberTarget +type LoadBalancerPoolMemberTargetIP struct { + // The IP address. This property may add support for IPv6 addresses in the future. When processing a value in this + // property, verify that the address is in an expected format. If it is not, log an error. Optionally halt processing + // and surface the error, or bypass the resource on which the unexpected IP address format was encountered. + Address *string `json:"address" validate:"required"` +} + +func (*LoadBalancerPoolMemberTargetIP) isaLoadBalancerPoolMemberTarget() bool { + return true +} + +// UnmarshalLoadBalancerPoolMemberTargetIP unmarshals an instance of LoadBalancerPoolMemberTargetIP from the specified map of raw messages. +func UnmarshalLoadBalancerPoolMemberTargetIP(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancerPoolMemberTargetIP) + err = core.UnmarshalPrimitive(m, "address", &obj.Address) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// NetworkACLIdentityByHref : NetworkACLIdentityByHref struct +// This model "extends" NetworkACLIdentity +type NetworkACLIdentityByHref struct { + // The URL for this network ACL. + Href *string `json:"href" validate:"required"` +} + +// NewNetworkACLIdentityByHref : Instantiate NetworkACLIdentityByHref (Generic Model Constructor) +func (*VpcClassicV1) NewNetworkACLIdentityByHref(href string) (model *NetworkACLIdentityByHref, err error) { + model = &NetworkACLIdentityByHref{ + Href: core.StringPtr(href), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*NetworkACLIdentityByHref) isaNetworkACLIdentity() bool { + return true +} + +// UnmarshalNetworkACLIdentityByHref unmarshals an instance of NetworkACLIdentityByHref from the specified map of raw messages. +func UnmarshalNetworkACLIdentityByHref(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(NetworkACLIdentityByHref) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// NetworkACLIdentityByID : NetworkACLIdentityByID struct +// This model "extends" NetworkACLIdentity +type NetworkACLIdentityByID struct { + // The unique identifier for this network ACL. + ID *string `json:"id" validate:"required"` +} + +// NewNetworkACLIdentityByID : Instantiate NetworkACLIdentityByID (Generic Model Constructor) +func (*VpcClassicV1) NewNetworkACLIdentityByID(id string) (model *NetworkACLIdentityByID, err error) { + model = &NetworkACLIdentityByID{ + ID: core.StringPtr(id), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*NetworkACLIdentityByID) isaNetworkACLIdentity() bool { + return true +} + +// UnmarshalNetworkACLIdentityByID unmarshals an instance of NetworkACLIdentityByID from the specified map of raw messages. +func UnmarshalNetworkACLIdentityByID(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(NetworkACLIdentityByID) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// NetworkACLPrototypeNetworkACLByRules : NetworkACLPrototypeNetworkACLByRules struct +// This model "extends" NetworkACLPrototype +type NetworkACLPrototypeNetworkACLByRules struct { + // The unique user-defined name for this network ACL. If unspecified, the name will be a hyphenated list of + // randomly-selected words. + Name *string `json:"name,omitempty"` + + // Array of prototype objects for rules to create along with this network ACL. If unspecified, rules will be created to + // allow all traffic. + Rules []NetworkACLRulePrototypeNetworkACLContextIntf `json:"rules,omitempty"` +} + +func (*NetworkACLPrototypeNetworkACLByRules) isaNetworkACLPrototype() bool { + return true +} + +// UnmarshalNetworkACLPrototypeNetworkACLByRules unmarshals an instance of NetworkACLPrototypeNetworkACLByRules from the specified map of raw messages. +func UnmarshalNetworkACLPrototypeNetworkACLByRules(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(NetworkACLPrototypeNetworkACLByRules) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalModel(m, "rules", &obj.Rules, UnmarshalNetworkACLRulePrototypeNetworkACLContext) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// NetworkACLPrototypeNetworkACLBySourceNetworkACL : NetworkACLPrototypeNetworkACLBySourceNetworkACL struct +// This model "extends" NetworkACLPrototype +type NetworkACLPrototypeNetworkACLBySourceNetworkACL struct { + // The unique user-defined name for this network ACL. If unspecified, the name will be a hyphenated list of + // randomly-selected words. + Name *string `json:"name,omitempty"` + + // Network ACL to copy rules from. + SourceNetworkACL NetworkACLIdentityIntf `json:"source_network_acl" validate:"required"` +} + +// NewNetworkACLPrototypeNetworkACLBySourceNetworkACL : Instantiate NetworkACLPrototypeNetworkACLBySourceNetworkACL (Generic Model Constructor) +func (*VpcClassicV1) NewNetworkACLPrototypeNetworkACLBySourceNetworkACL(sourceNetworkACL NetworkACLIdentityIntf) (model *NetworkACLPrototypeNetworkACLBySourceNetworkACL, err error) { + model = &NetworkACLPrototypeNetworkACLBySourceNetworkACL{ + SourceNetworkACL: sourceNetworkACL, + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*NetworkACLPrototypeNetworkACLBySourceNetworkACL) isaNetworkACLPrototype() bool { + return true +} + +// UnmarshalNetworkACLPrototypeNetworkACLBySourceNetworkACL unmarshals an instance of NetworkACLPrototypeNetworkACLBySourceNetworkACL from the specified map of raw messages. +func UnmarshalNetworkACLPrototypeNetworkACLBySourceNetworkACL(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(NetworkACLPrototypeNetworkACLBySourceNetworkACL) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalModel(m, "source_network_acl", &obj.SourceNetworkACL, UnmarshalNetworkACLIdentity) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// NetworkACLRuleBeforePatchNetworkACLRuleIdentityByHref : NetworkACLRuleBeforePatchNetworkACLRuleIdentityByHref struct +// This model "extends" NetworkACLRuleBeforePatch +type NetworkACLRuleBeforePatchNetworkACLRuleIdentityByHref struct { + // The URL for this network ACL rule. + Href *string `json:"href" validate:"required"` +} + +// NewNetworkACLRuleBeforePatchNetworkACLRuleIdentityByHref : Instantiate NetworkACLRuleBeforePatchNetworkACLRuleIdentityByHref (Generic Model Constructor) +func (*VpcClassicV1) NewNetworkACLRuleBeforePatchNetworkACLRuleIdentityByHref(href string) (model *NetworkACLRuleBeforePatchNetworkACLRuleIdentityByHref, err error) { + model = &NetworkACLRuleBeforePatchNetworkACLRuleIdentityByHref{ + Href: core.StringPtr(href), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*NetworkACLRuleBeforePatchNetworkACLRuleIdentityByHref) isaNetworkACLRuleBeforePatch() bool { + return true +} + +// UnmarshalNetworkACLRuleBeforePatchNetworkACLRuleIdentityByHref unmarshals an instance of NetworkACLRuleBeforePatchNetworkACLRuleIdentityByHref from the specified map of raw messages. +func UnmarshalNetworkACLRuleBeforePatchNetworkACLRuleIdentityByHref(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(NetworkACLRuleBeforePatchNetworkACLRuleIdentityByHref) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// NetworkACLRuleBeforePatchNetworkACLRuleIdentityByID : NetworkACLRuleBeforePatchNetworkACLRuleIdentityByID struct +// This model "extends" NetworkACLRuleBeforePatch +type NetworkACLRuleBeforePatchNetworkACLRuleIdentityByID struct { + // The unique identifier for this network ACL rule. + ID *string `json:"id" validate:"required"` +} + +// NewNetworkACLRuleBeforePatchNetworkACLRuleIdentityByID : Instantiate NetworkACLRuleBeforePatchNetworkACLRuleIdentityByID (Generic Model Constructor) +func (*VpcClassicV1) NewNetworkACLRuleBeforePatchNetworkACLRuleIdentityByID(id string) (model *NetworkACLRuleBeforePatchNetworkACLRuleIdentityByID, err error) { + model = &NetworkACLRuleBeforePatchNetworkACLRuleIdentityByID{ + ID: core.StringPtr(id), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*NetworkACLRuleBeforePatchNetworkACLRuleIdentityByID) isaNetworkACLRuleBeforePatch() bool { + return true +} + +// UnmarshalNetworkACLRuleBeforePatchNetworkACLRuleIdentityByID unmarshals an instance of NetworkACLRuleBeforePatchNetworkACLRuleIdentityByID from the specified map of raw messages. +func UnmarshalNetworkACLRuleBeforePatchNetworkACLRuleIdentityByID(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(NetworkACLRuleBeforePatchNetworkACLRuleIdentityByID) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// NetworkACLRuleBeforePrototypeNetworkACLRuleIdentityByHref : NetworkACLRuleBeforePrototypeNetworkACLRuleIdentityByHref struct +// This model "extends" NetworkACLRuleBeforePrototype +type NetworkACLRuleBeforePrototypeNetworkACLRuleIdentityByHref struct { + // The URL for this network ACL rule. + Href *string `json:"href" validate:"required"` +} + +// NewNetworkACLRuleBeforePrototypeNetworkACLRuleIdentityByHref : Instantiate NetworkACLRuleBeforePrototypeNetworkACLRuleIdentityByHref (Generic Model Constructor) +func (*VpcClassicV1) NewNetworkACLRuleBeforePrototypeNetworkACLRuleIdentityByHref(href string) (model *NetworkACLRuleBeforePrototypeNetworkACLRuleIdentityByHref, err error) { + model = &NetworkACLRuleBeforePrototypeNetworkACLRuleIdentityByHref{ + Href: core.StringPtr(href), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*NetworkACLRuleBeforePrototypeNetworkACLRuleIdentityByHref) isaNetworkACLRuleBeforePrototype() bool { + return true +} + +// UnmarshalNetworkACLRuleBeforePrototypeNetworkACLRuleIdentityByHref unmarshals an instance of NetworkACLRuleBeforePrototypeNetworkACLRuleIdentityByHref from the specified map of raw messages. +func UnmarshalNetworkACLRuleBeforePrototypeNetworkACLRuleIdentityByHref(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(NetworkACLRuleBeforePrototypeNetworkACLRuleIdentityByHref) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// NetworkACLRuleBeforePrototypeNetworkACLRuleIdentityByID : NetworkACLRuleBeforePrototypeNetworkACLRuleIdentityByID struct +// This model "extends" NetworkACLRuleBeforePrototype +type NetworkACLRuleBeforePrototypeNetworkACLRuleIdentityByID struct { + // The unique identifier for this network ACL rule. + ID *string `json:"id" validate:"required"` +} + +// NewNetworkACLRuleBeforePrototypeNetworkACLRuleIdentityByID : Instantiate NetworkACLRuleBeforePrototypeNetworkACLRuleIdentityByID (Generic Model Constructor) +func (*VpcClassicV1) NewNetworkACLRuleBeforePrototypeNetworkACLRuleIdentityByID(id string) (model *NetworkACLRuleBeforePrototypeNetworkACLRuleIdentityByID, err error) { + model = &NetworkACLRuleBeforePrototypeNetworkACLRuleIdentityByID{ + ID: core.StringPtr(id), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*NetworkACLRuleBeforePrototypeNetworkACLRuleIdentityByID) isaNetworkACLRuleBeforePrototype() bool { + return true +} + +// UnmarshalNetworkACLRuleBeforePrototypeNetworkACLRuleIdentityByID unmarshals an instance of NetworkACLRuleBeforePrototypeNetworkACLRuleIdentityByID from the specified map of raw messages. +func UnmarshalNetworkACLRuleBeforePrototypeNetworkACLRuleIdentityByID(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(NetworkACLRuleBeforePrototypeNetworkACLRuleIdentityByID) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// NetworkACLRuleItemNetworkACLRuleProtocolAll : NetworkACLRuleItemNetworkACLRuleProtocolAll struct +// This model "extends" NetworkACLRuleItem +type NetworkACLRuleItemNetworkACLRuleProtocolAll struct { + // Whether to allow or deny matching traffic. + Action *string `json:"action" validate:"required"` + + // The rule that this rule is immediately before. In a rule collection, this always refers to the next item in the + // collection. If absent, this is the last rule. + Before *NetworkACLRuleReference `json:"before,omitempty"` + + // The date and time that the rule was created. + CreatedAt *strfmt.DateTime `json:"created_at" validate:"required"` + + // The destination CIDR block. The CIDR block `0.0.0.0/0` applies to all addresses. + Destination *string `json:"destination" validate:"required"` + + // Whether the traffic to be matched is `inbound` or `outbound`. + Direction *string `json:"direction" validate:"required"` + + // The URL for this network ACL rule. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this network ACL rule. + ID *string `json:"id" validate:"required"` + + // The IP version for this rule. + IPVersion *string `json:"ip_version" validate:"required"` + + // The user-defined name for this rule. Names must be unique within the network ACL the rule resides in. If + // unspecified, the name will be a hyphenated list of randomly-selected words. + Name *string `json:"name" validate:"required"` + + // The source CIDR block. The CIDR block `0.0.0.0/0` applies to all addresses. + Source *string `json:"source" validate:"required"` + + // The protocol to enforce. + Protocol *string `json:"protocol" validate:"required"` +} + +// Constants associated with the NetworkACLRuleItemNetworkACLRuleProtocolAll.Action property. +// Whether to allow or deny matching traffic. +const ( + NetworkACLRuleItemNetworkACLRuleProtocolAllActionAllowConst = "allow" + NetworkACLRuleItemNetworkACLRuleProtocolAllActionDenyConst = "deny" +) + +// Constants associated with the NetworkACLRuleItemNetworkACLRuleProtocolAll.Direction property. +// Whether the traffic to be matched is `inbound` or `outbound`. +const ( + NetworkACLRuleItemNetworkACLRuleProtocolAllDirectionInboundConst = "inbound" + NetworkACLRuleItemNetworkACLRuleProtocolAllDirectionOutboundConst = "outbound" +) + +// Constants associated with the NetworkACLRuleItemNetworkACLRuleProtocolAll.IPVersion property. +// The IP version for this rule. +const ( + NetworkACLRuleItemNetworkACLRuleProtocolAllIPVersionIpv4Const = "ipv4" + NetworkACLRuleItemNetworkACLRuleProtocolAllIPVersionIpv6Const = "ipv6" +) + +// Constants associated with the NetworkACLRuleItemNetworkACLRuleProtocolAll.Protocol property. +// The protocol to enforce. +const ( + NetworkACLRuleItemNetworkACLRuleProtocolAllProtocolAllConst = "all" +) + +func (*NetworkACLRuleItemNetworkACLRuleProtocolAll) isaNetworkACLRuleItem() bool { + return true +} + +// UnmarshalNetworkACLRuleItemNetworkACLRuleProtocolAll unmarshals an instance of NetworkACLRuleItemNetworkACLRuleProtocolAll from the specified map of raw messages. +func UnmarshalNetworkACLRuleItemNetworkACLRuleProtocolAll(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(NetworkACLRuleItemNetworkACLRuleProtocolAll) + err = core.UnmarshalPrimitive(m, "action", &obj.Action) + if err != nil { + return + } + err = core.UnmarshalModel(m, "before", &obj.Before, UnmarshalNetworkACLRuleReference) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "destination", &obj.Destination) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "direction", &obj.Direction) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "ip_version", &obj.IPVersion) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "source", &obj.Source) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "protocol", &obj.Protocol) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// NetworkACLRuleItemNetworkACLRuleProtocolIcmp : NetworkACLRuleItemNetworkACLRuleProtocolIcmp struct +// This model "extends" NetworkACLRuleItem +type NetworkACLRuleItemNetworkACLRuleProtocolIcmp struct { + // Whether to allow or deny matching traffic. + Action *string `json:"action" validate:"required"` + + // The rule that this rule is immediately before. In a rule collection, this always refers to the next item in the + // collection. If absent, this is the last rule. + Before *NetworkACLRuleReference `json:"before,omitempty"` + + // The date and time that the rule was created. + CreatedAt *strfmt.DateTime `json:"created_at" validate:"required"` + + // The destination CIDR block. The CIDR block `0.0.0.0/0` applies to all addresses. + Destination *string `json:"destination" validate:"required"` + + // Whether the traffic to be matched is `inbound` or `outbound`. + Direction *string `json:"direction" validate:"required"` + + // The URL for this network ACL rule. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this network ACL rule. + ID *string `json:"id" validate:"required"` + + // The IP version for this rule. + IPVersion *string `json:"ip_version" validate:"required"` + + // The user-defined name for this rule. Names must be unique within the network ACL the rule resides in. If + // unspecified, the name will be a hyphenated list of randomly-selected words. + Name *string `json:"name" validate:"required"` + + // The source CIDR block. The CIDR block `0.0.0.0/0` applies to all addresses. + Source *string `json:"source" validate:"required"` + + // The ICMP traffic code to allow. If unspecified, all codes are allowed. This can only be specified if type is also + // specified. + Code *int64 `json:"code,omitempty"` + + // The protocol to enforce. + Protocol *string `json:"protocol" validate:"required"` + + // The ICMP traffic type to allow. If unspecified, all types are allowed by this rule. + Type *int64 `json:"type,omitempty"` +} + +// Constants associated with the NetworkACLRuleItemNetworkACLRuleProtocolIcmp.Action property. +// Whether to allow or deny matching traffic. +const ( + NetworkACLRuleItemNetworkACLRuleProtocolIcmpActionAllowConst = "allow" + NetworkACLRuleItemNetworkACLRuleProtocolIcmpActionDenyConst = "deny" +) + +// Constants associated with the NetworkACLRuleItemNetworkACLRuleProtocolIcmp.Direction property. +// Whether the traffic to be matched is `inbound` or `outbound`. +const ( + NetworkACLRuleItemNetworkACLRuleProtocolIcmpDirectionInboundConst = "inbound" + NetworkACLRuleItemNetworkACLRuleProtocolIcmpDirectionOutboundConst = "outbound" +) + +// Constants associated with the NetworkACLRuleItemNetworkACLRuleProtocolIcmp.IPVersion property. +// The IP version for this rule. +const ( + NetworkACLRuleItemNetworkACLRuleProtocolIcmpIPVersionIpv4Const = "ipv4" + NetworkACLRuleItemNetworkACLRuleProtocolIcmpIPVersionIpv6Const = "ipv6" +) + +// Constants associated with the NetworkACLRuleItemNetworkACLRuleProtocolIcmp.Protocol property. +// The protocol to enforce. +const ( + NetworkACLRuleItemNetworkACLRuleProtocolIcmpProtocolIcmpConst = "icmp" +) + +func (*NetworkACLRuleItemNetworkACLRuleProtocolIcmp) isaNetworkACLRuleItem() bool { + return true +} + +// UnmarshalNetworkACLRuleItemNetworkACLRuleProtocolIcmp unmarshals an instance of NetworkACLRuleItemNetworkACLRuleProtocolIcmp from the specified map of raw messages. +func UnmarshalNetworkACLRuleItemNetworkACLRuleProtocolIcmp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(NetworkACLRuleItemNetworkACLRuleProtocolIcmp) + err = core.UnmarshalPrimitive(m, "action", &obj.Action) + if err != nil { + return + } + err = core.UnmarshalModel(m, "before", &obj.Before, UnmarshalNetworkACLRuleReference) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "destination", &obj.Destination) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "direction", &obj.Direction) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "ip_version", &obj.IPVersion) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "source", &obj.Source) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "code", &obj.Code) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "protocol", &obj.Protocol) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// NetworkACLRuleItemNetworkACLRuleProtocolTcpudp : NetworkACLRuleItemNetworkACLRuleProtocolTcpudp struct +// This model "extends" NetworkACLRuleItem +type NetworkACLRuleItemNetworkACLRuleProtocolTcpudp struct { + // Whether to allow or deny matching traffic. + Action *string `json:"action" validate:"required"` + + // The rule that this rule is immediately before. In a rule collection, this always refers to the next item in the + // collection. If absent, this is the last rule. + Before *NetworkACLRuleReference `json:"before,omitempty"` + + // The date and time that the rule was created. + CreatedAt *strfmt.DateTime `json:"created_at" validate:"required"` + + // The destination CIDR block. The CIDR block `0.0.0.0/0` applies to all addresses. + Destination *string `json:"destination" validate:"required"` + + // Whether the traffic to be matched is `inbound` or `outbound`. + Direction *string `json:"direction" validate:"required"` + + // The URL for this network ACL rule. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this network ACL rule. + ID *string `json:"id" validate:"required"` + + // The IP version for this rule. + IPVersion *string `json:"ip_version" validate:"required"` + + // The user-defined name for this rule. Names must be unique within the network ACL the rule resides in. If + // unspecified, the name will be a hyphenated list of randomly-selected words. + Name *string `json:"name" validate:"required"` + + // The source CIDR block. The CIDR block `0.0.0.0/0` applies to all addresses. + Source *string `json:"source" validate:"required"` + + // The inclusive upper bound of TCP/UDP destination port range. + PortMax *int64 `json:"port_max,omitempty"` + + // The inclusive lower bound of TCP/UDP destination port range. + PortMin *int64 `json:"port_min,omitempty"` + + // The protocol to enforce. + Protocol *string `json:"protocol" validate:"required"` + + // The inclusive upper bound of TCP/UDP source port range. + SourcePortMax *int64 `json:"source_port_max,omitempty"` + + // The inclusive lower bound of TCP/UDP source port range. + SourcePortMin *int64 `json:"source_port_min,omitempty"` +} + +// Constants associated with the NetworkACLRuleItemNetworkACLRuleProtocolTcpudp.Action property. +// Whether to allow or deny matching traffic. +const ( + NetworkACLRuleItemNetworkACLRuleProtocolTcpudpActionAllowConst = "allow" + NetworkACLRuleItemNetworkACLRuleProtocolTcpudpActionDenyConst = "deny" +) + +// Constants associated with the NetworkACLRuleItemNetworkACLRuleProtocolTcpudp.Direction property. +// Whether the traffic to be matched is `inbound` or `outbound`. +const ( + NetworkACLRuleItemNetworkACLRuleProtocolTcpudpDirectionInboundConst = "inbound" + NetworkACLRuleItemNetworkACLRuleProtocolTcpudpDirectionOutboundConst = "outbound" +) + +// Constants associated with the NetworkACLRuleItemNetworkACLRuleProtocolTcpudp.IPVersion property. +// The IP version for this rule. +const ( + NetworkACLRuleItemNetworkACLRuleProtocolTcpudpIPVersionIpv4Const = "ipv4" + NetworkACLRuleItemNetworkACLRuleProtocolTcpudpIPVersionIpv6Const = "ipv6" +) + +// Constants associated with the NetworkACLRuleItemNetworkACLRuleProtocolTcpudp.Protocol property. +// The protocol to enforce. +const ( + NetworkACLRuleItemNetworkACLRuleProtocolTcpudpProtocolTCPConst = "tcp" + NetworkACLRuleItemNetworkACLRuleProtocolTcpudpProtocolUDPConst = "udp" +) + +func (*NetworkACLRuleItemNetworkACLRuleProtocolTcpudp) isaNetworkACLRuleItem() bool { + return true +} + +// UnmarshalNetworkACLRuleItemNetworkACLRuleProtocolTcpudp unmarshals an instance of NetworkACLRuleItemNetworkACLRuleProtocolTcpudp from the specified map of raw messages. +func UnmarshalNetworkACLRuleItemNetworkACLRuleProtocolTcpudp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(NetworkACLRuleItemNetworkACLRuleProtocolTcpudp) + err = core.UnmarshalPrimitive(m, "action", &obj.Action) + if err != nil { + return + } + err = core.UnmarshalModel(m, "before", &obj.Before, UnmarshalNetworkACLRuleReference) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "destination", &obj.Destination) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "direction", &obj.Direction) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "ip_version", &obj.IPVersion) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "source", &obj.Source) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "port_max", &obj.PortMax) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "port_min", &obj.PortMin) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "protocol", &obj.Protocol) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "source_port_max", &obj.SourcePortMax) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "source_port_min", &obj.SourcePortMin) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// NetworkACLRulePatchNetworkACLRuleProtocolAll : NetworkACLRulePatchNetworkACLRuleProtocolAll struct +// This model "extends" NetworkACLRulePatch +type NetworkACLRulePatchNetworkACLRuleProtocolAll struct { + // Whether to allow or deny matching traffic. + Action *string `json:"action,omitempty"` + + Before NetworkACLRuleBeforePatchIntf `json:"before,omitempty"` + + // The destination IP address or CIDR block. The CIDR block `0.0.0.0/0` applies to all addresses. + Destination *string `json:"destination,omitempty"` + + // Whether the traffic to be matched is `inbound` or `outbound`. + Direction *string `json:"direction,omitempty"` + + // The user-defined name for this rule. Names must be unique within the network ACL the rule resides in. + Name *string `json:"name,omitempty"` + + // The source IP address or CIDR block. The CIDR block `0.0.0.0/0` applies to all addresses. + Source *string `json:"source,omitempty"` + + // The protocol to enforce. + Protocol *string `json:"protocol" validate:"required"` +} + +// Constants associated with the NetworkACLRulePatchNetworkACLRuleProtocolAll.Action property. +// Whether to allow or deny matching traffic. +const ( + NetworkACLRulePatchNetworkACLRuleProtocolAllActionAllowConst = "allow" + NetworkACLRulePatchNetworkACLRuleProtocolAllActionDenyConst = "deny" +) + +// Constants associated with the NetworkACLRulePatchNetworkACLRuleProtocolAll.Direction property. +// Whether the traffic to be matched is `inbound` or `outbound`. +const ( + NetworkACLRulePatchNetworkACLRuleProtocolAllDirectionInboundConst = "inbound" + NetworkACLRulePatchNetworkACLRuleProtocolAllDirectionOutboundConst = "outbound" +) + +// Constants associated with the NetworkACLRulePatchNetworkACLRuleProtocolAll.Protocol property. +// The protocol to enforce. +const ( + NetworkACLRulePatchNetworkACLRuleProtocolAllProtocolAllConst = "all" +) + +// NewNetworkACLRulePatchNetworkACLRuleProtocolAll : Instantiate NetworkACLRulePatchNetworkACLRuleProtocolAll (Generic Model Constructor) +func (*VpcClassicV1) NewNetworkACLRulePatchNetworkACLRuleProtocolAll(protocol string) (model *NetworkACLRulePatchNetworkACLRuleProtocolAll, err error) { + model = &NetworkACLRulePatchNetworkACLRuleProtocolAll{ + Protocol: core.StringPtr(protocol), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*NetworkACLRulePatchNetworkACLRuleProtocolAll) isaNetworkACLRulePatch() bool { + return true +} + +// UnmarshalNetworkACLRulePatchNetworkACLRuleProtocolAll unmarshals an instance of NetworkACLRulePatchNetworkACLRuleProtocolAll from the specified map of raw messages. +func UnmarshalNetworkACLRulePatchNetworkACLRuleProtocolAll(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(NetworkACLRulePatchNetworkACLRuleProtocolAll) + err = core.UnmarshalPrimitive(m, "action", &obj.Action) + if err != nil { + return + } + err = core.UnmarshalModel(m, "before", &obj.Before, UnmarshalNetworkACLRuleBeforePatch) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "destination", &obj.Destination) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "direction", &obj.Direction) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "source", &obj.Source) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "protocol", &obj.Protocol) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// NetworkACLRulePatchNetworkACLRuleProtocolIcmp : NetworkACLRulePatchNetworkACLRuleProtocolIcmp struct +// This model "extends" NetworkACLRulePatch +type NetworkACLRulePatchNetworkACLRuleProtocolIcmp struct { + // Whether to allow or deny matching traffic. + Action *string `json:"action,omitempty"` + + Before NetworkACLRuleBeforePatchIntf `json:"before,omitempty"` + + // The destination IP address or CIDR block. The CIDR block `0.0.0.0/0` applies to all addresses. + Destination *string `json:"destination,omitempty"` + + // Whether the traffic to be matched is `inbound` or `outbound`. + Direction *string `json:"direction,omitempty"` + + // The user-defined name for this rule. Names must be unique within the network ACL the rule resides in. + Name *string `json:"name,omitempty"` + + // The source IP address or CIDR block. The CIDR block `0.0.0.0/0` applies to all addresses. + Source *string `json:"source,omitempty"` + + // The ICMP traffic code to allow. If unspecified, all codes are allowed. This can only be specified if type is also + // specified. + Code *int64 `json:"code,omitempty"` + + // The protocol to enforce. + Protocol *string `json:"protocol" validate:"required"` + + // The ICMP traffic type to allow. If unspecified, all types are allowed by this rule. + Type *int64 `json:"type,omitempty"` +} + +// Constants associated with the NetworkACLRulePatchNetworkACLRuleProtocolIcmp.Action property. +// Whether to allow or deny matching traffic. +const ( + NetworkACLRulePatchNetworkACLRuleProtocolIcmpActionAllowConst = "allow" + NetworkACLRulePatchNetworkACLRuleProtocolIcmpActionDenyConst = "deny" +) + +// Constants associated with the NetworkACLRulePatchNetworkACLRuleProtocolIcmp.Direction property. +// Whether the traffic to be matched is `inbound` or `outbound`. +const ( + NetworkACLRulePatchNetworkACLRuleProtocolIcmpDirectionInboundConst = "inbound" + NetworkACLRulePatchNetworkACLRuleProtocolIcmpDirectionOutboundConst = "outbound" +) + +// Constants associated with the NetworkACLRulePatchNetworkACLRuleProtocolIcmp.Protocol property. +// The protocol to enforce. +const ( + NetworkACLRulePatchNetworkACLRuleProtocolIcmpProtocolIcmpConst = "icmp" +) + +// NewNetworkACLRulePatchNetworkACLRuleProtocolIcmp : Instantiate NetworkACLRulePatchNetworkACLRuleProtocolIcmp (Generic Model Constructor) +func (*VpcClassicV1) NewNetworkACLRulePatchNetworkACLRuleProtocolIcmp(protocol string) (model *NetworkACLRulePatchNetworkACLRuleProtocolIcmp, err error) { + model = &NetworkACLRulePatchNetworkACLRuleProtocolIcmp{ + Protocol: core.StringPtr(protocol), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*NetworkACLRulePatchNetworkACLRuleProtocolIcmp) isaNetworkACLRulePatch() bool { + return true +} + +// UnmarshalNetworkACLRulePatchNetworkACLRuleProtocolIcmp unmarshals an instance of NetworkACLRulePatchNetworkACLRuleProtocolIcmp from the specified map of raw messages. +func UnmarshalNetworkACLRulePatchNetworkACLRuleProtocolIcmp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(NetworkACLRulePatchNetworkACLRuleProtocolIcmp) + err = core.UnmarshalPrimitive(m, "action", &obj.Action) + if err != nil { + return + } + err = core.UnmarshalModel(m, "before", &obj.Before, UnmarshalNetworkACLRuleBeforePatch) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "destination", &obj.Destination) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "direction", &obj.Direction) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "source", &obj.Source) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "code", &obj.Code) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "protocol", &obj.Protocol) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// NetworkACLRulePatchNetworkACLRuleProtocolTcpudp : NetworkACLRulePatchNetworkACLRuleProtocolTcpudp struct +// This model "extends" NetworkACLRulePatch +type NetworkACLRulePatchNetworkACLRuleProtocolTcpudp struct { + // Whether to allow or deny matching traffic. + Action *string `json:"action,omitempty"` + + Before NetworkACLRuleBeforePatchIntf `json:"before,omitempty"` + + // The destination IP address or CIDR block. The CIDR block `0.0.0.0/0` applies to all addresses. + Destination *string `json:"destination,omitempty"` + + // Whether the traffic to be matched is `inbound` or `outbound`. + Direction *string `json:"direction,omitempty"` + + // The user-defined name for this rule. Names must be unique within the network ACL the rule resides in. + Name *string `json:"name,omitempty"` + + // The source IP address or CIDR block. The CIDR block `0.0.0.0/0` applies to all addresses. + Source *string `json:"source,omitempty"` + + // The inclusive upper bound of TCP/UDP destination port range. + PortMax *int64 `json:"port_max,omitempty"` + + // The inclusive lower bound of TCP/UDP destination port range. + PortMin *int64 `json:"port_min,omitempty"` + + // The protocol to enforce. + Protocol *string `json:"protocol" validate:"required"` + + // The inclusive upper bound of TCP/UDP source port range. + SourcePortMax *int64 `json:"source_port_max,omitempty"` + + // The inclusive lower bound of TCP/UDP source port range. + SourcePortMin *int64 `json:"source_port_min,omitempty"` +} + +// Constants associated with the NetworkACLRulePatchNetworkACLRuleProtocolTcpudp.Action property. +// Whether to allow or deny matching traffic. +const ( + NetworkACLRulePatchNetworkACLRuleProtocolTcpudpActionAllowConst = "allow" + NetworkACLRulePatchNetworkACLRuleProtocolTcpudpActionDenyConst = "deny" +) + +// Constants associated with the NetworkACLRulePatchNetworkACLRuleProtocolTcpudp.Direction property. +// Whether the traffic to be matched is `inbound` or `outbound`. +const ( + NetworkACLRulePatchNetworkACLRuleProtocolTcpudpDirectionInboundConst = "inbound" + NetworkACLRulePatchNetworkACLRuleProtocolTcpudpDirectionOutboundConst = "outbound" +) + +// Constants associated with the NetworkACLRulePatchNetworkACLRuleProtocolTcpudp.Protocol property. +// The protocol to enforce. +const ( + NetworkACLRulePatchNetworkACLRuleProtocolTcpudpProtocolTCPConst = "tcp" + NetworkACLRulePatchNetworkACLRuleProtocolTcpudpProtocolUDPConst = "udp" +) + +// NewNetworkACLRulePatchNetworkACLRuleProtocolTcpudp : Instantiate NetworkACLRulePatchNetworkACLRuleProtocolTcpudp (Generic Model Constructor) +func (*VpcClassicV1) NewNetworkACLRulePatchNetworkACLRuleProtocolTcpudp(protocol string) (model *NetworkACLRulePatchNetworkACLRuleProtocolTcpudp, err error) { + model = &NetworkACLRulePatchNetworkACLRuleProtocolTcpudp{ + Protocol: core.StringPtr(protocol), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*NetworkACLRulePatchNetworkACLRuleProtocolTcpudp) isaNetworkACLRulePatch() bool { + return true +} + +// UnmarshalNetworkACLRulePatchNetworkACLRuleProtocolTcpudp unmarshals an instance of NetworkACLRulePatchNetworkACLRuleProtocolTcpudp from the specified map of raw messages. +func UnmarshalNetworkACLRulePatchNetworkACLRuleProtocolTcpudp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(NetworkACLRulePatchNetworkACLRuleProtocolTcpudp) + err = core.UnmarshalPrimitive(m, "action", &obj.Action) + if err != nil { + return + } + err = core.UnmarshalModel(m, "before", &obj.Before, UnmarshalNetworkACLRuleBeforePatch) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "destination", &obj.Destination) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "direction", &obj.Direction) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "source", &obj.Source) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "port_max", &obj.PortMax) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "port_min", &obj.PortMin) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "protocol", &obj.Protocol) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "source_port_max", &obj.SourcePortMax) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "source_port_min", &obj.SourcePortMin) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// NetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolAll : NetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolAll struct +// This model "extends" NetworkACLRulePrototypeNetworkACLContext +type NetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolAll struct { + // Whether to allow or deny matching traffic. + Action *string `json:"action" validate:"required"` + + // The destination IP address or CIDR block. The CIDR block `0.0.0.0/0` applies to all addresses. + Destination *string `json:"destination" validate:"required"` + + // Whether the traffic to be matched is `inbound` or `outbound`. + Direction *string `json:"direction" validate:"required"` + + // The user-defined name for this rule. Names must be unique within the network ACL the rule resides in. If + // unspecified, the name will be a hyphenated list of randomly-selected words. + Name *string `json:"name,omitempty"` + + // The source IP address or CIDR block. The CIDR block `0.0.0.0/0` applies to all addresses. + Source *string `json:"source" validate:"required"` + + // The protocol to enforce. + Protocol *string `json:"protocol" validate:"required"` +} + +// Constants associated with the NetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolAll.Action property. +// Whether to allow or deny matching traffic. +const ( + NetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolAllActionAllowConst = "allow" + NetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolAllActionDenyConst = "deny" +) + +// Constants associated with the NetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolAll.Direction property. +// Whether the traffic to be matched is `inbound` or `outbound`. +const ( + NetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolAllDirectionInboundConst = "inbound" + NetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolAllDirectionOutboundConst = "outbound" +) + +// Constants associated with the NetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolAll.Protocol property. +// The protocol to enforce. +const ( + NetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolAllProtocolAllConst = "all" +) + +// NewNetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolAll : Instantiate NetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolAll (Generic Model Constructor) +func (*VpcClassicV1) NewNetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolAll(action string, destination string, direction string, source string, protocol string) (model *NetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolAll, err error) { + model = &NetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolAll{ + Action: core.StringPtr(action), + Destination: core.StringPtr(destination), + Direction: core.StringPtr(direction), + Source: core.StringPtr(source), + Protocol: core.StringPtr(protocol), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*NetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolAll) isaNetworkACLRulePrototypeNetworkACLContext() bool { + return true +} + +// UnmarshalNetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolAll unmarshals an instance of NetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolAll from the specified map of raw messages. +func UnmarshalNetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolAll(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(NetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolAll) + err = core.UnmarshalPrimitive(m, "action", &obj.Action) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "destination", &obj.Destination) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "direction", &obj.Direction) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "source", &obj.Source) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "protocol", &obj.Protocol) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// NetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolIcmp : NetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolIcmp struct +// This model "extends" NetworkACLRulePrototypeNetworkACLContext +type NetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolIcmp struct { + // Whether to allow or deny matching traffic. + Action *string `json:"action" validate:"required"` + + // The destination IP address or CIDR block. The CIDR block `0.0.0.0/0` applies to all addresses. + Destination *string `json:"destination" validate:"required"` + + // Whether the traffic to be matched is `inbound` or `outbound`. + Direction *string `json:"direction" validate:"required"` + + // The user-defined name for this rule. Names must be unique within the network ACL the rule resides in. If + // unspecified, the name will be a hyphenated list of randomly-selected words. + Name *string `json:"name,omitempty"` + + // The source IP address or CIDR block. The CIDR block `0.0.0.0/0` applies to all addresses. + Source *string `json:"source" validate:"required"` + + // The ICMP traffic code to allow. If unspecified, all codes are allowed. This can only be specified if type is also + // specified. + Code *int64 `json:"code,omitempty"` + + // The protocol to enforce. + Protocol *string `json:"protocol" validate:"required"` + + // The ICMP traffic type to allow. If unspecified, all types are allowed by this rule. + Type *int64 `json:"type,omitempty"` +} + +// Constants associated with the NetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolIcmp.Action property. +// Whether to allow or deny matching traffic. +const ( + NetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolIcmpActionAllowConst = "allow" + NetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolIcmpActionDenyConst = "deny" +) + +// Constants associated with the NetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolIcmp.Direction property. +// Whether the traffic to be matched is `inbound` or `outbound`. +const ( + NetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolIcmpDirectionInboundConst = "inbound" + NetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolIcmpDirectionOutboundConst = "outbound" +) + +// Constants associated with the NetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolIcmp.Protocol property. +// The protocol to enforce. +const ( + NetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolIcmpProtocolIcmpConst = "icmp" +) + +// NewNetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolIcmp : Instantiate NetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolIcmp (Generic Model Constructor) +func (*VpcClassicV1) NewNetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolIcmp(action string, destination string, direction string, source string, protocol string) (model *NetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolIcmp, err error) { + model = &NetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolIcmp{ + Action: core.StringPtr(action), + Destination: core.StringPtr(destination), + Direction: core.StringPtr(direction), + Source: core.StringPtr(source), + Protocol: core.StringPtr(protocol), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*NetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolIcmp) isaNetworkACLRulePrototypeNetworkACLContext() bool { + return true +} + +// UnmarshalNetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolIcmp unmarshals an instance of NetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolIcmp from the specified map of raw messages. +func UnmarshalNetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolIcmp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(NetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolIcmp) + err = core.UnmarshalPrimitive(m, "action", &obj.Action) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "destination", &obj.Destination) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "direction", &obj.Direction) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "source", &obj.Source) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "code", &obj.Code) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "protocol", &obj.Protocol) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// NetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolTcpudp : NetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolTcpudp struct +// This model "extends" NetworkACLRulePrototypeNetworkACLContext +type NetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolTcpudp struct { + // Whether to allow or deny matching traffic. + Action *string `json:"action" validate:"required"` + + // The destination IP address or CIDR block. The CIDR block `0.0.0.0/0` applies to all addresses. + Destination *string `json:"destination" validate:"required"` + + // Whether the traffic to be matched is `inbound` or `outbound`. + Direction *string `json:"direction" validate:"required"` + + // The user-defined name for this rule. Names must be unique within the network ACL the rule resides in. If + // unspecified, the name will be a hyphenated list of randomly-selected words. + Name *string `json:"name,omitempty"` + + // The source IP address or CIDR block. The CIDR block `0.0.0.0/0` applies to all addresses. + Source *string `json:"source" validate:"required"` + + // The inclusive upper bound of TCP/UDP destination port range. + PortMax *int64 `json:"port_max,omitempty"` + + // The inclusive lower bound of TCP/UDP destination port range. + PortMin *int64 `json:"port_min,omitempty"` + + // The protocol to enforce. + Protocol *string `json:"protocol" validate:"required"` + + // The inclusive upper bound of TCP/UDP source port range. + SourcePortMax *int64 `json:"source_port_max,omitempty"` + + // The inclusive lower bound of TCP/UDP source port range. + SourcePortMin *int64 `json:"source_port_min,omitempty"` +} + +// Constants associated with the NetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolTcpudp.Action property. +// Whether to allow or deny matching traffic. +const ( + NetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolTcpudpActionAllowConst = "allow" + NetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolTcpudpActionDenyConst = "deny" +) + +// Constants associated with the NetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolTcpudp.Direction property. +// Whether the traffic to be matched is `inbound` or `outbound`. +const ( + NetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolTcpudpDirectionInboundConst = "inbound" + NetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolTcpudpDirectionOutboundConst = "outbound" +) + +// Constants associated with the NetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolTcpudp.Protocol property. +// The protocol to enforce. +const ( + NetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolTcpudpProtocolTCPConst = "tcp" + NetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolTcpudpProtocolUDPConst = "udp" +) + +// NewNetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolTcpudp : Instantiate NetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolTcpudp (Generic Model Constructor) +func (*VpcClassicV1) NewNetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolTcpudp(action string, destination string, direction string, source string, protocol string) (model *NetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolTcpudp, err error) { + model = &NetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolTcpudp{ + Action: core.StringPtr(action), + Destination: core.StringPtr(destination), + Direction: core.StringPtr(direction), + Source: core.StringPtr(source), + Protocol: core.StringPtr(protocol), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*NetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolTcpudp) isaNetworkACLRulePrototypeNetworkACLContext() bool { + return true +} + +// UnmarshalNetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolTcpudp unmarshals an instance of NetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolTcpudp from the specified map of raw messages. +func UnmarshalNetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolTcpudp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(NetworkACLRulePrototypeNetworkACLContextNetworkACLRuleProtocolTcpudp) + err = core.UnmarshalPrimitive(m, "action", &obj.Action) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "destination", &obj.Destination) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "direction", &obj.Direction) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "source", &obj.Source) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "port_max", &obj.PortMax) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "port_min", &obj.PortMin) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "protocol", &obj.Protocol) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "source_port_max", &obj.SourcePortMax) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "source_port_min", &obj.SourcePortMin) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// NetworkACLRulePrototypeNetworkACLRuleProtocolAll : NetworkACLRulePrototypeNetworkACLRuleProtocolAll struct +// This model "extends" NetworkACLRulePrototype +type NetworkACLRulePrototypeNetworkACLRuleProtocolAll struct { + // Whether to allow or deny matching traffic. + Action *string `json:"action" validate:"required"` + + Before NetworkACLRuleBeforePrototypeIntf `json:"before,omitempty"` + + // The destination IP address or CIDR block. The CIDR block `0.0.0.0/0` applies to all addresses. + Destination *string `json:"destination" validate:"required"` + + // Whether the traffic to be matched is `inbound` or `outbound`. + Direction *string `json:"direction" validate:"required"` + + // The user-defined name for this rule. Names must be unique within the network ACL the rule resides in. If + // unspecified, the name will be a hyphenated list of randomly-selected words. + Name *string `json:"name,omitempty"` + + // The source IP address or CIDR block. The CIDR block `0.0.0.0/0` applies to all addresses. + Source *string `json:"source" validate:"required"` + + // The protocol to enforce. + Protocol *string `json:"protocol" validate:"required"` +} + +// Constants associated with the NetworkACLRulePrototypeNetworkACLRuleProtocolAll.Action property. +// Whether to allow or deny matching traffic. +const ( + NetworkACLRulePrototypeNetworkACLRuleProtocolAllActionAllowConst = "allow" + NetworkACLRulePrototypeNetworkACLRuleProtocolAllActionDenyConst = "deny" +) + +// Constants associated with the NetworkACLRulePrototypeNetworkACLRuleProtocolAll.Direction property. +// Whether the traffic to be matched is `inbound` or `outbound`. +const ( + NetworkACLRulePrototypeNetworkACLRuleProtocolAllDirectionInboundConst = "inbound" + NetworkACLRulePrototypeNetworkACLRuleProtocolAllDirectionOutboundConst = "outbound" +) + +// Constants associated with the NetworkACLRulePrototypeNetworkACLRuleProtocolAll.Protocol property. +// The protocol to enforce. +const ( + NetworkACLRulePrototypeNetworkACLRuleProtocolAllProtocolAllConst = "all" +) + +// NewNetworkACLRulePrototypeNetworkACLRuleProtocolAll : Instantiate NetworkACLRulePrototypeNetworkACLRuleProtocolAll (Generic Model Constructor) +func (*VpcClassicV1) NewNetworkACLRulePrototypeNetworkACLRuleProtocolAll(action string, destination string, direction string, source string, protocol string) (model *NetworkACLRulePrototypeNetworkACLRuleProtocolAll, err error) { + model = &NetworkACLRulePrototypeNetworkACLRuleProtocolAll{ + Action: core.StringPtr(action), + Destination: core.StringPtr(destination), + Direction: core.StringPtr(direction), + Source: core.StringPtr(source), + Protocol: core.StringPtr(protocol), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*NetworkACLRulePrototypeNetworkACLRuleProtocolAll) isaNetworkACLRulePrototype() bool { + return true +} + +// UnmarshalNetworkACLRulePrototypeNetworkACLRuleProtocolAll unmarshals an instance of NetworkACLRulePrototypeNetworkACLRuleProtocolAll from the specified map of raw messages. +func UnmarshalNetworkACLRulePrototypeNetworkACLRuleProtocolAll(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(NetworkACLRulePrototypeNetworkACLRuleProtocolAll) + err = core.UnmarshalPrimitive(m, "action", &obj.Action) + if err != nil { + return + } + err = core.UnmarshalModel(m, "before", &obj.Before, UnmarshalNetworkACLRuleBeforePrototype) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "destination", &obj.Destination) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "direction", &obj.Direction) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "source", &obj.Source) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "protocol", &obj.Protocol) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// NetworkACLRulePrototypeNetworkACLRuleProtocolIcmp : NetworkACLRulePrototypeNetworkACLRuleProtocolIcmp struct +// This model "extends" NetworkACLRulePrototype +type NetworkACLRulePrototypeNetworkACLRuleProtocolIcmp struct { + // Whether to allow or deny matching traffic. + Action *string `json:"action" validate:"required"` + + Before NetworkACLRuleBeforePrototypeIntf `json:"before,omitempty"` + + // The destination IP address or CIDR block. The CIDR block `0.0.0.0/0` applies to all addresses. + Destination *string `json:"destination" validate:"required"` + + // Whether the traffic to be matched is `inbound` or `outbound`. + Direction *string `json:"direction" validate:"required"` + + // The user-defined name for this rule. Names must be unique within the network ACL the rule resides in. If + // unspecified, the name will be a hyphenated list of randomly-selected words. + Name *string `json:"name,omitempty"` + + // The source IP address or CIDR block. The CIDR block `0.0.0.0/0` applies to all addresses. + Source *string `json:"source" validate:"required"` + + // The ICMP traffic code to allow. If unspecified, all codes are allowed. This can only be specified if type is also + // specified. + Code *int64 `json:"code,omitempty"` + + // The protocol to enforce. + Protocol *string `json:"protocol" validate:"required"` + + // The ICMP traffic type to allow. If unspecified, all types are allowed by this rule. + Type *int64 `json:"type,omitempty"` +} + +// Constants associated with the NetworkACLRulePrototypeNetworkACLRuleProtocolIcmp.Action property. +// Whether to allow or deny matching traffic. +const ( + NetworkACLRulePrototypeNetworkACLRuleProtocolIcmpActionAllowConst = "allow" + NetworkACLRulePrototypeNetworkACLRuleProtocolIcmpActionDenyConst = "deny" +) + +// Constants associated with the NetworkACLRulePrototypeNetworkACLRuleProtocolIcmp.Direction property. +// Whether the traffic to be matched is `inbound` or `outbound`. +const ( + NetworkACLRulePrototypeNetworkACLRuleProtocolIcmpDirectionInboundConst = "inbound" + NetworkACLRulePrototypeNetworkACLRuleProtocolIcmpDirectionOutboundConst = "outbound" +) + +// Constants associated with the NetworkACLRulePrototypeNetworkACLRuleProtocolIcmp.Protocol property. +// The protocol to enforce. +const ( + NetworkACLRulePrototypeNetworkACLRuleProtocolIcmpProtocolIcmpConst = "icmp" +) + +// NewNetworkACLRulePrototypeNetworkACLRuleProtocolIcmp : Instantiate NetworkACLRulePrototypeNetworkACLRuleProtocolIcmp (Generic Model Constructor) +func (*VpcClassicV1) NewNetworkACLRulePrototypeNetworkACLRuleProtocolIcmp(action string, destination string, direction string, source string, protocol string) (model *NetworkACLRulePrototypeNetworkACLRuleProtocolIcmp, err error) { + model = &NetworkACLRulePrototypeNetworkACLRuleProtocolIcmp{ + Action: core.StringPtr(action), + Destination: core.StringPtr(destination), + Direction: core.StringPtr(direction), + Source: core.StringPtr(source), + Protocol: core.StringPtr(protocol), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*NetworkACLRulePrototypeNetworkACLRuleProtocolIcmp) isaNetworkACLRulePrototype() bool { + return true +} + +// UnmarshalNetworkACLRulePrototypeNetworkACLRuleProtocolIcmp unmarshals an instance of NetworkACLRulePrototypeNetworkACLRuleProtocolIcmp from the specified map of raw messages. +func UnmarshalNetworkACLRulePrototypeNetworkACLRuleProtocolIcmp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(NetworkACLRulePrototypeNetworkACLRuleProtocolIcmp) + err = core.UnmarshalPrimitive(m, "action", &obj.Action) + if err != nil { + return + } + err = core.UnmarshalModel(m, "before", &obj.Before, UnmarshalNetworkACLRuleBeforePrototype) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "destination", &obj.Destination) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "direction", &obj.Direction) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "source", &obj.Source) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "code", &obj.Code) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "protocol", &obj.Protocol) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// NetworkACLRulePrototypeNetworkACLRuleProtocolTcpudp : NetworkACLRulePrototypeNetworkACLRuleProtocolTcpudp struct +// This model "extends" NetworkACLRulePrototype +type NetworkACLRulePrototypeNetworkACLRuleProtocolTcpudp struct { + // Whether to allow or deny matching traffic. + Action *string `json:"action" validate:"required"` + + Before NetworkACLRuleBeforePrototypeIntf `json:"before,omitempty"` + + // The destination IP address or CIDR block. The CIDR block `0.0.0.0/0` applies to all addresses. + Destination *string `json:"destination" validate:"required"` + + // Whether the traffic to be matched is `inbound` or `outbound`. + Direction *string `json:"direction" validate:"required"` + + // The user-defined name for this rule. Names must be unique within the network ACL the rule resides in. If + // unspecified, the name will be a hyphenated list of randomly-selected words. + Name *string `json:"name,omitempty"` + + // The source IP address or CIDR block. The CIDR block `0.0.0.0/0` applies to all addresses. + Source *string `json:"source" validate:"required"` + + // The inclusive upper bound of TCP/UDP destination port range. + PortMax *int64 `json:"port_max,omitempty"` + + // The inclusive lower bound of TCP/UDP destination port range. + PortMin *int64 `json:"port_min,omitempty"` + + // The protocol to enforce. + Protocol *string `json:"protocol" validate:"required"` + + // The inclusive upper bound of TCP/UDP source port range. + SourcePortMax *int64 `json:"source_port_max,omitempty"` + + // The inclusive lower bound of TCP/UDP source port range. + SourcePortMin *int64 `json:"source_port_min,omitempty"` +} + +// Constants associated with the NetworkACLRulePrototypeNetworkACLRuleProtocolTcpudp.Action property. +// Whether to allow or deny matching traffic. +const ( + NetworkACLRulePrototypeNetworkACLRuleProtocolTcpudpActionAllowConst = "allow" + NetworkACLRulePrototypeNetworkACLRuleProtocolTcpudpActionDenyConst = "deny" +) + +// Constants associated with the NetworkACLRulePrototypeNetworkACLRuleProtocolTcpudp.Direction property. +// Whether the traffic to be matched is `inbound` or `outbound`. +const ( + NetworkACLRulePrototypeNetworkACLRuleProtocolTcpudpDirectionInboundConst = "inbound" + NetworkACLRulePrototypeNetworkACLRuleProtocolTcpudpDirectionOutboundConst = "outbound" +) + +// Constants associated with the NetworkACLRulePrototypeNetworkACLRuleProtocolTcpudp.Protocol property. +// The protocol to enforce. +const ( + NetworkACLRulePrototypeNetworkACLRuleProtocolTcpudpProtocolTCPConst = "tcp" + NetworkACLRulePrototypeNetworkACLRuleProtocolTcpudpProtocolUDPConst = "udp" +) + +// NewNetworkACLRulePrototypeNetworkACLRuleProtocolTcpudp : Instantiate NetworkACLRulePrototypeNetworkACLRuleProtocolTcpudp (Generic Model Constructor) +func (*VpcClassicV1) NewNetworkACLRulePrototypeNetworkACLRuleProtocolTcpudp(action string, destination string, direction string, source string, protocol string) (model *NetworkACLRulePrototypeNetworkACLRuleProtocolTcpudp, err error) { + model = &NetworkACLRulePrototypeNetworkACLRuleProtocolTcpudp{ + Action: core.StringPtr(action), + Destination: core.StringPtr(destination), + Direction: core.StringPtr(direction), + Source: core.StringPtr(source), + Protocol: core.StringPtr(protocol), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*NetworkACLRulePrototypeNetworkACLRuleProtocolTcpudp) isaNetworkACLRulePrototype() bool { + return true +} + +// UnmarshalNetworkACLRulePrototypeNetworkACLRuleProtocolTcpudp unmarshals an instance of NetworkACLRulePrototypeNetworkACLRuleProtocolTcpudp from the specified map of raw messages. +func UnmarshalNetworkACLRulePrototypeNetworkACLRuleProtocolTcpudp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(NetworkACLRulePrototypeNetworkACLRuleProtocolTcpudp) + err = core.UnmarshalPrimitive(m, "action", &obj.Action) + if err != nil { + return + } + err = core.UnmarshalModel(m, "before", &obj.Before, UnmarshalNetworkACLRuleBeforePrototype) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "destination", &obj.Destination) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "direction", &obj.Direction) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "source", &obj.Source) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "port_max", &obj.PortMax) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "port_min", &obj.PortMin) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "protocol", &obj.Protocol) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "source_port_max", &obj.SourcePortMax) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "source_port_min", &obj.SourcePortMin) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// NetworkACLRuleNetworkACLRuleProtocolAll : NetworkACLRuleNetworkACLRuleProtocolAll struct +// This model "extends" NetworkACLRule +type NetworkACLRuleNetworkACLRuleProtocolAll struct { + // Whether to allow or deny matching traffic. + Action *string `json:"action" validate:"required"` + + // The rule that this rule is immediately before. If absent, this is the last rule. + Before *NetworkACLRuleReference `json:"before,omitempty"` + + // The date and time that the rule was created. + CreatedAt *strfmt.DateTime `json:"created_at" validate:"required"` + + // The destination CIDR block. The CIDR block `0.0.0.0/0` applies to all addresses. + Destination *string `json:"destination" validate:"required"` + + // Whether the traffic to be matched is `inbound` or `outbound`. + Direction *string `json:"direction" validate:"required"` + + // The URL for this network ACL rule. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this network ACL rule. + ID *string `json:"id" validate:"required"` + + // The IP version for this rule. + IPVersion *string `json:"ip_version" validate:"required"` + + // The user-defined name for this rule. Names must be unique within the network ACL the rule resides in. If + // unspecified, the name will be a hyphenated list of randomly-selected words. + Name *string `json:"name" validate:"required"` + + // The source CIDR block. The CIDR block `0.0.0.0/0` applies to all addresses. + Source *string `json:"source" validate:"required"` + + // The protocol to enforce. + Protocol *string `json:"protocol" validate:"required"` +} + +// Constants associated with the NetworkACLRuleNetworkACLRuleProtocolAll.Action property. +// Whether to allow or deny matching traffic. +const ( + NetworkACLRuleNetworkACLRuleProtocolAllActionAllowConst = "allow" + NetworkACLRuleNetworkACLRuleProtocolAllActionDenyConst = "deny" +) + +// Constants associated with the NetworkACLRuleNetworkACLRuleProtocolAll.Direction property. +// Whether the traffic to be matched is `inbound` or `outbound`. +const ( + NetworkACLRuleNetworkACLRuleProtocolAllDirectionInboundConst = "inbound" + NetworkACLRuleNetworkACLRuleProtocolAllDirectionOutboundConst = "outbound" +) + +// Constants associated with the NetworkACLRuleNetworkACLRuleProtocolAll.IPVersion property. +// The IP version for this rule. +const ( + NetworkACLRuleNetworkACLRuleProtocolAllIPVersionIpv4Const = "ipv4" + NetworkACLRuleNetworkACLRuleProtocolAllIPVersionIpv6Const = "ipv6" +) + +// Constants associated with the NetworkACLRuleNetworkACLRuleProtocolAll.Protocol property. +// The protocol to enforce. +const ( + NetworkACLRuleNetworkACLRuleProtocolAllProtocolAllConst = "all" +) + +func (*NetworkACLRuleNetworkACLRuleProtocolAll) isaNetworkACLRule() bool { + return true +} + +// UnmarshalNetworkACLRuleNetworkACLRuleProtocolAll unmarshals an instance of NetworkACLRuleNetworkACLRuleProtocolAll from the specified map of raw messages. +func UnmarshalNetworkACLRuleNetworkACLRuleProtocolAll(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(NetworkACLRuleNetworkACLRuleProtocolAll) + err = core.UnmarshalPrimitive(m, "action", &obj.Action) + if err != nil { + return + } + err = core.UnmarshalModel(m, "before", &obj.Before, UnmarshalNetworkACLRuleReference) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "destination", &obj.Destination) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "direction", &obj.Direction) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "ip_version", &obj.IPVersion) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "source", &obj.Source) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "protocol", &obj.Protocol) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// NetworkACLRuleNetworkACLRuleProtocolIcmp : NetworkACLRuleNetworkACLRuleProtocolIcmp struct +// This model "extends" NetworkACLRule +type NetworkACLRuleNetworkACLRuleProtocolIcmp struct { + // Whether to allow or deny matching traffic. + Action *string `json:"action" validate:"required"` + + // The rule that this rule is immediately before. If absent, this is the last rule. + Before *NetworkACLRuleReference `json:"before,omitempty"` + + // The date and time that the rule was created. + CreatedAt *strfmt.DateTime `json:"created_at" validate:"required"` + + // The destination CIDR block. The CIDR block `0.0.0.0/0` applies to all addresses. + Destination *string `json:"destination" validate:"required"` + + // Whether the traffic to be matched is `inbound` or `outbound`. + Direction *string `json:"direction" validate:"required"` + + // The URL for this network ACL rule. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this network ACL rule. + ID *string `json:"id" validate:"required"` + + // The IP version for this rule. + IPVersion *string `json:"ip_version" validate:"required"` + + // The user-defined name for this rule. Names must be unique within the network ACL the rule resides in. If + // unspecified, the name will be a hyphenated list of randomly-selected words. + Name *string `json:"name" validate:"required"` + + // The source CIDR block. The CIDR block `0.0.0.0/0` applies to all addresses. + Source *string `json:"source" validate:"required"` + + // The ICMP traffic code to allow. If unspecified, all codes are allowed. This can only be specified if type is also + // specified. + Code *int64 `json:"code,omitempty"` + + // The protocol to enforce. + Protocol *string `json:"protocol" validate:"required"` + + // The ICMP traffic type to allow. If unspecified, all types are allowed by this rule. + Type *int64 `json:"type,omitempty"` +} + +// Constants associated with the NetworkACLRuleNetworkACLRuleProtocolIcmp.Action property. +// Whether to allow or deny matching traffic. +const ( + NetworkACLRuleNetworkACLRuleProtocolIcmpActionAllowConst = "allow" + NetworkACLRuleNetworkACLRuleProtocolIcmpActionDenyConst = "deny" +) + +// Constants associated with the NetworkACLRuleNetworkACLRuleProtocolIcmp.Direction property. +// Whether the traffic to be matched is `inbound` or `outbound`. +const ( + NetworkACLRuleNetworkACLRuleProtocolIcmpDirectionInboundConst = "inbound" + NetworkACLRuleNetworkACLRuleProtocolIcmpDirectionOutboundConst = "outbound" +) + +// Constants associated with the NetworkACLRuleNetworkACLRuleProtocolIcmp.IPVersion property. +// The IP version for this rule. +const ( + NetworkACLRuleNetworkACLRuleProtocolIcmpIPVersionIpv4Const = "ipv4" + NetworkACLRuleNetworkACLRuleProtocolIcmpIPVersionIpv6Const = "ipv6" +) + +// Constants associated with the NetworkACLRuleNetworkACLRuleProtocolIcmp.Protocol property. +// The protocol to enforce. +const ( + NetworkACLRuleNetworkACLRuleProtocolIcmpProtocolIcmpConst = "icmp" +) + +func (*NetworkACLRuleNetworkACLRuleProtocolIcmp) isaNetworkACLRule() bool { + return true +} + +// UnmarshalNetworkACLRuleNetworkACLRuleProtocolIcmp unmarshals an instance of NetworkACLRuleNetworkACLRuleProtocolIcmp from the specified map of raw messages. +func UnmarshalNetworkACLRuleNetworkACLRuleProtocolIcmp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(NetworkACLRuleNetworkACLRuleProtocolIcmp) + err = core.UnmarshalPrimitive(m, "action", &obj.Action) + if err != nil { + return + } + err = core.UnmarshalModel(m, "before", &obj.Before, UnmarshalNetworkACLRuleReference) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "destination", &obj.Destination) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "direction", &obj.Direction) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "ip_version", &obj.IPVersion) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "source", &obj.Source) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "code", &obj.Code) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "protocol", &obj.Protocol) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// NetworkACLRuleNetworkACLRuleProtocolTcpudp : NetworkACLRuleNetworkACLRuleProtocolTcpudp struct +// This model "extends" NetworkACLRule +type NetworkACLRuleNetworkACLRuleProtocolTcpudp struct { + // Whether to allow or deny matching traffic. + Action *string `json:"action" validate:"required"` + + // The rule that this rule is immediately before. If absent, this is the last rule. + Before *NetworkACLRuleReference `json:"before,omitempty"` + + // The date and time that the rule was created. + CreatedAt *strfmt.DateTime `json:"created_at" validate:"required"` + + // The destination CIDR block. The CIDR block `0.0.0.0/0` applies to all addresses. + Destination *string `json:"destination" validate:"required"` + + // Whether the traffic to be matched is `inbound` or `outbound`. + Direction *string `json:"direction" validate:"required"` + + // The URL for this network ACL rule. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this network ACL rule. + ID *string `json:"id" validate:"required"` + + // The IP version for this rule. + IPVersion *string `json:"ip_version" validate:"required"` + + // The user-defined name for this rule. Names must be unique within the network ACL the rule resides in. If + // unspecified, the name will be a hyphenated list of randomly-selected words. + Name *string `json:"name" validate:"required"` + + // The source CIDR block. The CIDR block `0.0.0.0/0` applies to all addresses. + Source *string `json:"source" validate:"required"` + + // The inclusive upper bound of TCP/UDP destination port range. + PortMax *int64 `json:"port_max,omitempty"` + + // The inclusive lower bound of TCP/UDP destination port range. + PortMin *int64 `json:"port_min,omitempty"` + + // The protocol to enforce. + Protocol *string `json:"protocol" validate:"required"` + + // The inclusive upper bound of TCP/UDP source port range. + SourcePortMax *int64 `json:"source_port_max,omitempty"` + + // The inclusive lower bound of TCP/UDP source port range. + SourcePortMin *int64 `json:"source_port_min,omitempty"` +} + +// Constants associated with the NetworkACLRuleNetworkACLRuleProtocolTcpudp.Action property. +// Whether to allow or deny matching traffic. +const ( + NetworkACLRuleNetworkACLRuleProtocolTcpudpActionAllowConst = "allow" + NetworkACLRuleNetworkACLRuleProtocolTcpudpActionDenyConst = "deny" +) + +// Constants associated with the NetworkACLRuleNetworkACLRuleProtocolTcpudp.Direction property. +// Whether the traffic to be matched is `inbound` or `outbound`. +const ( + NetworkACLRuleNetworkACLRuleProtocolTcpudpDirectionInboundConst = "inbound" + NetworkACLRuleNetworkACLRuleProtocolTcpudpDirectionOutboundConst = "outbound" +) + +// Constants associated with the NetworkACLRuleNetworkACLRuleProtocolTcpudp.IPVersion property. +// The IP version for this rule. +const ( + NetworkACLRuleNetworkACLRuleProtocolTcpudpIPVersionIpv4Const = "ipv4" + NetworkACLRuleNetworkACLRuleProtocolTcpudpIPVersionIpv6Const = "ipv6" +) + +// Constants associated with the NetworkACLRuleNetworkACLRuleProtocolTcpudp.Protocol property. +// The protocol to enforce. +const ( + NetworkACLRuleNetworkACLRuleProtocolTcpudpProtocolTCPConst = "tcp" + NetworkACLRuleNetworkACLRuleProtocolTcpudpProtocolUDPConst = "udp" +) + +func (*NetworkACLRuleNetworkACLRuleProtocolTcpudp) isaNetworkACLRule() bool { + return true +} + +// UnmarshalNetworkACLRuleNetworkACLRuleProtocolTcpudp unmarshals an instance of NetworkACLRuleNetworkACLRuleProtocolTcpudp from the specified map of raw messages. +func UnmarshalNetworkACLRuleNetworkACLRuleProtocolTcpudp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(NetworkACLRuleNetworkACLRuleProtocolTcpudp) + err = core.UnmarshalPrimitive(m, "action", &obj.Action) + if err != nil { + return + } + err = core.UnmarshalModel(m, "before", &obj.Before, UnmarshalNetworkACLRuleReference) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "destination", &obj.Destination) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "direction", &obj.Direction) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "ip_version", &obj.IPVersion) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "source", &obj.Source) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "port_max", &obj.PortMax) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "port_min", &obj.PortMin) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "protocol", &obj.Protocol) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "source_port_max", &obj.SourcePortMax) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "source_port_min", &obj.SourcePortMin) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// OperatingSystemIdentityByHref : OperatingSystemIdentityByHref struct +// This model "extends" OperatingSystemIdentity +type OperatingSystemIdentityByHref struct { + // The URL for this operating system. + Href *string `json:"href" validate:"required"` +} + +// NewOperatingSystemIdentityByHref : Instantiate OperatingSystemIdentityByHref (Generic Model Constructor) +func (*VpcClassicV1) NewOperatingSystemIdentityByHref(href string) (model *OperatingSystemIdentityByHref, err error) { + model = &OperatingSystemIdentityByHref{ + Href: core.StringPtr(href), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*OperatingSystemIdentityByHref) isaOperatingSystemIdentity() bool { + return true +} + +// UnmarshalOperatingSystemIdentityByHref unmarshals an instance of OperatingSystemIdentityByHref from the specified map of raw messages. +func UnmarshalOperatingSystemIdentityByHref(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(OperatingSystemIdentityByHref) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// OperatingSystemIdentityByName : OperatingSystemIdentityByName struct +// This model "extends" OperatingSystemIdentity +type OperatingSystemIdentityByName struct { + // The globally unique name for this operating system. + Name *string `json:"name" validate:"required"` +} + +// NewOperatingSystemIdentityByName : Instantiate OperatingSystemIdentityByName (Generic Model Constructor) +func (*VpcClassicV1) NewOperatingSystemIdentityByName(name string) (model *OperatingSystemIdentityByName, err error) { + model = &OperatingSystemIdentityByName{ + Name: core.StringPtr(name), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*OperatingSystemIdentityByName) isaOperatingSystemIdentity() bool { + return true +} + +// UnmarshalOperatingSystemIdentityByName unmarshals an instance of OperatingSystemIdentityByName from the specified map of raw messages. +func UnmarshalOperatingSystemIdentityByName(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(OperatingSystemIdentityByName) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// PublicGatewayFloatingIPPrototypeFloatingIPIdentity : Identifies a floating IP by a unique property. +// Models which "extend" this model: +// - PublicGatewayFloatingIPPrototypeFloatingIPIdentityFloatingIPIdentityByID +// - PublicGatewayFloatingIPPrototypeFloatingIPIdentityFloatingIPIdentityByCRN +// - PublicGatewayFloatingIPPrototypeFloatingIPIdentityFloatingIPIdentityByHref +// - PublicGatewayFloatingIPPrototypeFloatingIPIdentityFloatingIPIdentityByAddress +// This model "extends" PublicGatewayFloatingIPPrototype +type PublicGatewayFloatingIPPrototypeFloatingIPIdentity struct { + // The unique identifier for this floating IP. + ID *string `json:"id,omitempty"` + + // The CRN for this floating IP. + CRN *string `json:"crn,omitempty"` + + // The URL for this floating IP. + Href *string `json:"href,omitempty"` + + // The globally unique IP address. + Address *string `json:"address,omitempty"` +} + +func (*PublicGatewayFloatingIPPrototypeFloatingIPIdentity) isaPublicGatewayFloatingIPPrototypeFloatingIPIdentity() bool { + return true +} + +type PublicGatewayFloatingIPPrototypeFloatingIPIdentityIntf interface { + PublicGatewayFloatingIPPrototypeIntf + isaPublicGatewayFloatingIPPrototypeFloatingIPIdentity() bool +} + +func (*PublicGatewayFloatingIPPrototypeFloatingIPIdentity) isaPublicGatewayFloatingIPPrototype() bool { + return true +} + +// UnmarshalPublicGatewayFloatingIPPrototypeFloatingIPIdentity unmarshals an instance of PublicGatewayFloatingIPPrototypeFloatingIPIdentity from the specified map of raw messages. +func UnmarshalPublicGatewayFloatingIPPrototypeFloatingIPIdentity(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(PublicGatewayFloatingIPPrototypeFloatingIPIdentity) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "address", &obj.Address) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// PublicGatewayFloatingIPPrototypeFloatingIPPrototypeTargetContext : PublicGatewayFloatingIPPrototypeFloatingIPPrototypeTargetContext struct +// This model "extends" PublicGatewayFloatingIPPrototype +type PublicGatewayFloatingIPPrototypeFloatingIPPrototypeTargetContext struct { + // The unique user-defined name for this floating IP. If unspecified, the name will be a hyphenated list of + // randomly-selected words. + Name *string `json:"name,omitempty"` +} + +func (*PublicGatewayFloatingIPPrototypeFloatingIPPrototypeTargetContext) isaPublicGatewayFloatingIPPrototype() bool { + return true +} + +// UnmarshalPublicGatewayFloatingIPPrototypeFloatingIPPrototypeTargetContext unmarshals an instance of PublicGatewayFloatingIPPrototypeFloatingIPPrototypeTargetContext from the specified map of raw messages. +func UnmarshalPublicGatewayFloatingIPPrototypeFloatingIPPrototypeTargetContext(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(PublicGatewayFloatingIPPrototypeFloatingIPPrototypeTargetContext) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// PublicGatewayIdentityByCRN : PublicGatewayIdentityByCRN struct +// This model "extends" PublicGatewayIdentity +type PublicGatewayIdentityByCRN struct { + // The CRN for this public gateway. + CRN *string `json:"crn" validate:"required"` +} + +// NewPublicGatewayIdentityByCRN : Instantiate PublicGatewayIdentityByCRN (Generic Model Constructor) +func (*VpcClassicV1) NewPublicGatewayIdentityByCRN(crn string) (model *PublicGatewayIdentityByCRN, err error) { + model = &PublicGatewayIdentityByCRN{ + CRN: core.StringPtr(crn), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*PublicGatewayIdentityByCRN) isaPublicGatewayIdentity() bool { + return true +} + +// UnmarshalPublicGatewayIdentityByCRN unmarshals an instance of PublicGatewayIdentityByCRN from the specified map of raw messages. +func UnmarshalPublicGatewayIdentityByCRN(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(PublicGatewayIdentityByCRN) + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// PublicGatewayIdentityByHref : PublicGatewayIdentityByHref struct +// This model "extends" PublicGatewayIdentity +type PublicGatewayIdentityByHref struct { + // The URL for this public gateway. + Href *string `json:"href" validate:"required"` +} + +// NewPublicGatewayIdentityByHref : Instantiate PublicGatewayIdentityByHref (Generic Model Constructor) +func (*VpcClassicV1) NewPublicGatewayIdentityByHref(href string) (model *PublicGatewayIdentityByHref, err error) { + model = &PublicGatewayIdentityByHref{ + Href: core.StringPtr(href), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*PublicGatewayIdentityByHref) isaPublicGatewayIdentity() bool { + return true +} + +// UnmarshalPublicGatewayIdentityByHref unmarshals an instance of PublicGatewayIdentityByHref from the specified map of raw messages. +func UnmarshalPublicGatewayIdentityByHref(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(PublicGatewayIdentityByHref) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// PublicGatewayIdentityByID : PublicGatewayIdentityByID struct +// This model "extends" PublicGatewayIdentity +type PublicGatewayIdentityByID struct { + // The unique identifier for this public gateway. + ID *string `json:"id" validate:"required"` +} + +// NewPublicGatewayIdentityByID : Instantiate PublicGatewayIdentityByID (Generic Model Constructor) +func (*VpcClassicV1) NewPublicGatewayIdentityByID(id string) (model *PublicGatewayIdentityByID, err error) { + model = &PublicGatewayIdentityByID{ + ID: core.StringPtr(id), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*PublicGatewayIdentityByID) isaPublicGatewayIdentity() bool { + return true +} + +// UnmarshalPublicGatewayIdentityByID unmarshals an instance of PublicGatewayIdentityByID from the specified map of raw messages. +func UnmarshalPublicGatewayIdentityByID(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(PublicGatewayIdentityByID) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ResourceGroupIdentityByID : ResourceGroupIdentityByID struct +// This model "extends" ResourceGroupIdentity +type ResourceGroupIdentityByID struct { + // The unique identifier for this resource group. + ID *string `json:"id" validate:"required"` +} + +// NewResourceGroupIdentityByID : Instantiate ResourceGroupIdentityByID (Generic Model Constructor) +func (*VpcClassicV1) NewResourceGroupIdentityByID(id string) (model *ResourceGroupIdentityByID, err error) { + model = &ResourceGroupIdentityByID{ + ID: core.StringPtr(id), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*ResourceGroupIdentityByID) isaResourceGroupIdentity() bool { + return true +} + +// UnmarshalResourceGroupIdentityByID unmarshals an instance of ResourceGroupIdentityByID from the specified map of raw messages. +func UnmarshalResourceGroupIdentityByID(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ResourceGroupIdentityByID) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// RouteNextHopIP : RouteNextHopIP struct +// This model "extends" RouteNextHop +type RouteNextHopIP struct { + // The IP address. This property may add support for IPv6 addresses in the future. When processing a value in this + // property, verify that the address is in an expected format. If it is not, log an error. Optionally halt processing + // and surface the error, or bypass the resource on which the unexpected IP address format was encountered. + Address *string `json:"address" validate:"required"` +} + +func (*RouteNextHopIP) isaRouteNextHop() bool { + return true +} + +// UnmarshalRouteNextHopIP unmarshals an instance of RouteNextHopIP from the specified map of raw messages. +func UnmarshalRouteNextHopIP(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(RouteNextHopIP) + err = core.UnmarshalPrimitive(m, "address", &obj.Address) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// RouteNextHopPrototypeRouteNextHopIP : The IP address of the next hop to which to route packets. +// This model "extends" RouteNextHopPrototype +type RouteNextHopPrototypeRouteNextHopIP struct { + // The IP address. This property may add support for IPv6 addresses in the future. When processing a value in this + // property, verify that the address is in an expected format. If it is not, log an error. Optionally halt processing + // and surface the error, or bypass the resource on which the unexpected IP address format was encountered. + Address *string `json:"address" validate:"required"` +} + +// NewRouteNextHopPrototypeRouteNextHopIP : Instantiate RouteNextHopPrototypeRouteNextHopIP (Generic Model Constructor) +func (*VpcClassicV1) NewRouteNextHopPrototypeRouteNextHopIP(address string) (model *RouteNextHopPrototypeRouteNextHopIP, err error) { + model = &RouteNextHopPrototypeRouteNextHopIP{ + Address: core.StringPtr(address), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*RouteNextHopPrototypeRouteNextHopIP) isaRouteNextHopPrototype() bool { + return true +} + +// UnmarshalRouteNextHopPrototypeRouteNextHopIP unmarshals an instance of RouteNextHopPrototypeRouteNextHopIP from the specified map of raw messages. +func UnmarshalRouteNextHopPrototypeRouteNextHopIP(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(RouteNextHopPrototypeRouteNextHopIP) + err = core.UnmarshalPrimitive(m, "address", &obj.Address) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SecurityGroupIdentityByCRN : SecurityGroupIdentityByCRN struct +// This model "extends" SecurityGroupIdentity +type SecurityGroupIdentityByCRN struct { + // The security group's CRN. + CRN *string `json:"crn" validate:"required"` +} + +// NewSecurityGroupIdentityByCRN : Instantiate SecurityGroupIdentityByCRN (Generic Model Constructor) +func (*VpcClassicV1) NewSecurityGroupIdentityByCRN(crn string) (model *SecurityGroupIdentityByCRN, err error) { + model = &SecurityGroupIdentityByCRN{ + CRN: core.StringPtr(crn), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*SecurityGroupIdentityByCRN) isaSecurityGroupIdentity() bool { + return true +} + +// UnmarshalSecurityGroupIdentityByCRN unmarshals an instance of SecurityGroupIdentityByCRN from the specified map of raw messages. +func UnmarshalSecurityGroupIdentityByCRN(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SecurityGroupIdentityByCRN) + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SecurityGroupIdentityByHref : SecurityGroupIdentityByHref struct +// This model "extends" SecurityGroupIdentity +type SecurityGroupIdentityByHref struct { + // The security group's canonical URL. + Href *string `json:"href" validate:"required"` +} + +// NewSecurityGroupIdentityByHref : Instantiate SecurityGroupIdentityByHref (Generic Model Constructor) +func (*VpcClassicV1) NewSecurityGroupIdentityByHref(href string) (model *SecurityGroupIdentityByHref, err error) { + model = &SecurityGroupIdentityByHref{ + Href: core.StringPtr(href), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*SecurityGroupIdentityByHref) isaSecurityGroupIdentity() bool { + return true +} + +// UnmarshalSecurityGroupIdentityByHref unmarshals an instance of SecurityGroupIdentityByHref from the specified map of raw messages. +func UnmarshalSecurityGroupIdentityByHref(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SecurityGroupIdentityByHref) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SecurityGroupIdentityByID : SecurityGroupIdentityByID struct +// This model "extends" SecurityGroupIdentity +type SecurityGroupIdentityByID struct { + // The unique identifier for this security group. + ID *string `json:"id" validate:"required"` +} + +// NewSecurityGroupIdentityByID : Instantiate SecurityGroupIdentityByID (Generic Model Constructor) +func (*VpcClassicV1) NewSecurityGroupIdentityByID(id string) (model *SecurityGroupIdentityByID, err error) { + model = &SecurityGroupIdentityByID{ + ID: core.StringPtr(id), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*SecurityGroupIdentityByID) isaSecurityGroupIdentity() bool { + return true +} + +// UnmarshalSecurityGroupIdentityByID unmarshals an instance of SecurityGroupIdentityByID from the specified map of raw messages. +func UnmarshalSecurityGroupIdentityByID(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SecurityGroupIdentityByID) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SecurityGroupRulePatchSecurityGroupRuleProtocolAll : When `protocol` is `all`, then it's invalid to specify `port_min`, `port_max`, `type` or +// `code`. +// This model "extends" SecurityGroupRulePatch +type SecurityGroupRulePatchSecurityGroupRuleProtocolAll struct { + // The direction of traffic to enforce, either `inbound` or `outbound`. + Direction *string `json:"direction,omitempty"` + + // The IP version to enforce. The format of `remote.address` or `remote.cidr_block` must match this field, if they are + // used. Alternatively, if `remote` references a security group, then this rule only applies to IP addresses (network + // interfaces) in that group matching this IP version. + IPVersion *string `json:"ip_version,omitempty"` + + Remote SecurityGroupRuleRemotePatchIntf `json:"remote,omitempty"` + + // The protocol to enforce. + Protocol *string `json:"protocol" validate:"required"` +} + +// Constants associated with the SecurityGroupRulePatchSecurityGroupRuleProtocolAll.Direction property. +// The direction of traffic to enforce, either `inbound` or `outbound`. +const ( + SecurityGroupRulePatchSecurityGroupRuleProtocolAllDirectionInboundConst = "inbound" + SecurityGroupRulePatchSecurityGroupRuleProtocolAllDirectionOutboundConst = "outbound" +) + +// Constants associated with the SecurityGroupRulePatchSecurityGroupRuleProtocolAll.IPVersion property. +// The IP version to enforce. The format of `remote.address` or `remote.cidr_block` must match this field, if they are +// used. Alternatively, if `remote` references a security group, then this rule only applies to IP addresses (network +// interfaces) in that group matching this IP version. +const ( + SecurityGroupRulePatchSecurityGroupRuleProtocolAllIPVersionIpv4Const = "ipv4" +) + +// Constants associated with the SecurityGroupRulePatchSecurityGroupRuleProtocolAll.Protocol property. +// The protocol to enforce. +const ( + SecurityGroupRulePatchSecurityGroupRuleProtocolAllProtocolAllConst = "all" +) + +// NewSecurityGroupRulePatchSecurityGroupRuleProtocolAll : Instantiate SecurityGroupRulePatchSecurityGroupRuleProtocolAll (Generic Model Constructor) +func (*VpcClassicV1) NewSecurityGroupRulePatchSecurityGroupRuleProtocolAll(protocol string) (model *SecurityGroupRulePatchSecurityGroupRuleProtocolAll, err error) { + model = &SecurityGroupRulePatchSecurityGroupRuleProtocolAll{ + Protocol: core.StringPtr(protocol), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*SecurityGroupRulePatchSecurityGroupRuleProtocolAll) isaSecurityGroupRulePatch() bool { + return true +} + +// UnmarshalSecurityGroupRulePatchSecurityGroupRuleProtocolAll unmarshals an instance of SecurityGroupRulePatchSecurityGroupRuleProtocolAll from the specified map of raw messages. +func UnmarshalSecurityGroupRulePatchSecurityGroupRuleProtocolAll(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SecurityGroupRulePatchSecurityGroupRuleProtocolAll) + err = core.UnmarshalPrimitive(m, "direction", &obj.Direction) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "ip_version", &obj.IPVersion) + if err != nil { + return + } + err = core.UnmarshalModel(m, "remote", &obj.Remote, UnmarshalSecurityGroupRuleRemotePatch) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "protocol", &obj.Protocol) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SecurityGroupRulePatchSecurityGroupRuleProtocolIcmp : When `protocol` is `icmp`, then the rule may also contain fields to specify an ICMP `type` and `code`. Field `code` +// may only be specified if `type` is also specified. If type is not specified, then traffic is allowed for all types +// and codes. If type is specified and code is not specified, then traffic is allowed with the specified type for all +// codes. +// This model "extends" SecurityGroupRulePatch +type SecurityGroupRulePatchSecurityGroupRuleProtocolIcmp struct { + // The direction of traffic to enforce, either `inbound` or `outbound`. + Direction *string `json:"direction,omitempty"` + + // The IP version to enforce. The format of `remote.address` or `remote.cidr_block` must match this field, if they are + // used. Alternatively, if `remote` references a security group, then this rule only applies to IP addresses (network + // interfaces) in that group matching this IP version. + IPVersion *string `json:"ip_version,omitempty"` + + Remote SecurityGroupRuleRemotePatchIntf `json:"remote,omitempty"` + + // The ICMP traffic code to allow. + Code *int64 `json:"code,omitempty"` + + // The protocol to enforce. + Protocol *string `json:"protocol" validate:"required"` + + // The ICMP traffic type to allow. + Type *int64 `json:"type,omitempty"` +} + +// Constants associated with the SecurityGroupRulePatchSecurityGroupRuleProtocolIcmp.Direction property. +// The direction of traffic to enforce, either `inbound` or `outbound`. +const ( + SecurityGroupRulePatchSecurityGroupRuleProtocolIcmpDirectionInboundConst = "inbound" + SecurityGroupRulePatchSecurityGroupRuleProtocolIcmpDirectionOutboundConst = "outbound" +) + +// Constants associated with the SecurityGroupRulePatchSecurityGroupRuleProtocolIcmp.IPVersion property. +// The IP version to enforce. The format of `remote.address` or `remote.cidr_block` must match this field, if they are +// used. Alternatively, if `remote` references a security group, then this rule only applies to IP addresses (network +// interfaces) in that group matching this IP version. +const ( + SecurityGroupRulePatchSecurityGroupRuleProtocolIcmpIPVersionIpv4Const = "ipv4" +) + +// Constants associated with the SecurityGroupRulePatchSecurityGroupRuleProtocolIcmp.Protocol property. +// The protocol to enforce. +const ( + SecurityGroupRulePatchSecurityGroupRuleProtocolIcmpProtocolIcmpConst = "icmp" +) + +// NewSecurityGroupRulePatchSecurityGroupRuleProtocolIcmp : Instantiate SecurityGroupRulePatchSecurityGroupRuleProtocolIcmp (Generic Model Constructor) +func (*VpcClassicV1) NewSecurityGroupRulePatchSecurityGroupRuleProtocolIcmp(protocol string) (model *SecurityGroupRulePatchSecurityGroupRuleProtocolIcmp, err error) { + model = &SecurityGroupRulePatchSecurityGroupRuleProtocolIcmp{ + Protocol: core.StringPtr(protocol), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*SecurityGroupRulePatchSecurityGroupRuleProtocolIcmp) isaSecurityGroupRulePatch() bool { + return true +} + +// UnmarshalSecurityGroupRulePatchSecurityGroupRuleProtocolIcmp unmarshals an instance of SecurityGroupRulePatchSecurityGroupRuleProtocolIcmp from the specified map of raw messages. +func UnmarshalSecurityGroupRulePatchSecurityGroupRuleProtocolIcmp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SecurityGroupRulePatchSecurityGroupRuleProtocolIcmp) + err = core.UnmarshalPrimitive(m, "direction", &obj.Direction) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "ip_version", &obj.IPVersion) + if err != nil { + return + } + err = core.UnmarshalModel(m, "remote", &obj.Remote, UnmarshalSecurityGroupRuleRemotePatch) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "code", &obj.Code) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "protocol", &obj.Protocol) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SecurityGroupRulePatchSecurityGroupRuleProtocolTcpudp : If `protocol` is either `tcp` or `udp`, then the rule may also contain `port_min` and +// `port_max`. Either both should be set, or neither. When neither is set then traffic is allowed on all ports. For a +// single port, set both to the same value. +// This model "extends" SecurityGroupRulePatch +type SecurityGroupRulePatchSecurityGroupRuleProtocolTcpudp struct { + // The direction of traffic to enforce, either `inbound` or `outbound`. + Direction *string `json:"direction,omitempty"` + + // The IP version to enforce. The format of `remote.address` or `remote.cidr_block` must match this field, if they are + // used. Alternatively, if `remote` references a security group, then this rule only applies to IP addresses (network + // interfaces) in that group matching this IP version. + IPVersion *string `json:"ip_version,omitempty"` + + Remote SecurityGroupRuleRemotePatchIntf `json:"remote,omitempty"` + + // The inclusive upper bound of TCP/UDP port range. + PortMax *int64 `json:"port_max,omitempty"` + + // The inclusive lower bound of TCP/UDP port range. + PortMin *int64 `json:"port_min,omitempty"` + + // The protocol to enforce. + Protocol *string `json:"protocol" validate:"required"` +} + +// Constants associated with the SecurityGroupRulePatchSecurityGroupRuleProtocolTcpudp.Direction property. +// The direction of traffic to enforce, either `inbound` or `outbound`. +const ( + SecurityGroupRulePatchSecurityGroupRuleProtocolTcpudpDirectionInboundConst = "inbound" + SecurityGroupRulePatchSecurityGroupRuleProtocolTcpudpDirectionOutboundConst = "outbound" +) + +// Constants associated with the SecurityGroupRulePatchSecurityGroupRuleProtocolTcpudp.IPVersion property. +// The IP version to enforce. The format of `remote.address` or `remote.cidr_block` must match this field, if they are +// used. Alternatively, if `remote` references a security group, then this rule only applies to IP addresses (network +// interfaces) in that group matching this IP version. +const ( + SecurityGroupRulePatchSecurityGroupRuleProtocolTcpudpIPVersionIpv4Const = "ipv4" +) + +// Constants associated with the SecurityGroupRulePatchSecurityGroupRuleProtocolTcpudp.Protocol property. +// The protocol to enforce. +const ( + SecurityGroupRulePatchSecurityGroupRuleProtocolTcpudpProtocolTCPConst = "tcp" + SecurityGroupRulePatchSecurityGroupRuleProtocolTcpudpProtocolUDPConst = "udp" +) + +// NewSecurityGroupRulePatchSecurityGroupRuleProtocolTcpudp : Instantiate SecurityGroupRulePatchSecurityGroupRuleProtocolTcpudp (Generic Model Constructor) +func (*VpcClassicV1) NewSecurityGroupRulePatchSecurityGroupRuleProtocolTcpudp(protocol string) (model *SecurityGroupRulePatchSecurityGroupRuleProtocolTcpudp, err error) { + model = &SecurityGroupRulePatchSecurityGroupRuleProtocolTcpudp{ + Protocol: core.StringPtr(protocol), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*SecurityGroupRulePatchSecurityGroupRuleProtocolTcpudp) isaSecurityGroupRulePatch() bool { + return true +} + +// UnmarshalSecurityGroupRulePatchSecurityGroupRuleProtocolTcpudp unmarshals an instance of SecurityGroupRulePatchSecurityGroupRuleProtocolTcpudp from the specified map of raw messages. +func UnmarshalSecurityGroupRulePatchSecurityGroupRuleProtocolTcpudp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SecurityGroupRulePatchSecurityGroupRuleProtocolTcpudp) + err = core.UnmarshalPrimitive(m, "direction", &obj.Direction) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "ip_version", &obj.IPVersion) + if err != nil { + return + } + err = core.UnmarshalModel(m, "remote", &obj.Remote, UnmarshalSecurityGroupRuleRemotePatch) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "port_max", &obj.PortMax) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "port_min", &obj.PortMin) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "protocol", &obj.Protocol) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SecurityGroupRulePrototypeSecurityGroupRuleProtocolAll : When `protocol` is `all`, then it's invalid to specify `port_min`, `port_max`, `type` or +// `code`. +// This model "extends" SecurityGroupRulePrototype +type SecurityGroupRulePrototypeSecurityGroupRuleProtocolAll struct { + // The direction of traffic to enforce, either `inbound` or `outbound`. + Direction *string `json:"direction" validate:"required"` + + // The IP version to enforce. The format of `remote.address` or `remote.cidr_block` must match this field, if they are + // used. Alternatively, if `remote` references a security group, then this rule only applies to IP addresses (network + // interfaces) in that group matching this IP version. + IPVersion *string `json:"ip_version,omitempty"` + + Remote SecurityGroupRuleRemotePrototypeIntf `json:"remote,omitempty"` + + // The protocol to enforce. + Protocol *string `json:"protocol" validate:"required"` +} + +// Constants associated with the SecurityGroupRulePrototypeSecurityGroupRuleProtocolAll.Direction property. +// The direction of traffic to enforce, either `inbound` or `outbound`. +const ( + SecurityGroupRulePrototypeSecurityGroupRuleProtocolAllDirectionInboundConst = "inbound" + SecurityGroupRulePrototypeSecurityGroupRuleProtocolAllDirectionOutboundConst = "outbound" +) + +// Constants associated with the SecurityGroupRulePrototypeSecurityGroupRuleProtocolAll.IPVersion property. +// The IP version to enforce. The format of `remote.address` or `remote.cidr_block` must match this field, if they are +// used. Alternatively, if `remote` references a security group, then this rule only applies to IP addresses (network +// interfaces) in that group matching this IP version. +const ( + SecurityGroupRulePrototypeSecurityGroupRuleProtocolAllIPVersionIpv4Const = "ipv4" +) + +// Constants associated with the SecurityGroupRulePrototypeSecurityGroupRuleProtocolAll.Protocol property. +// The protocol to enforce. +const ( + SecurityGroupRulePrototypeSecurityGroupRuleProtocolAllProtocolAllConst = "all" +) + +// NewSecurityGroupRulePrototypeSecurityGroupRuleProtocolAll : Instantiate SecurityGroupRulePrototypeSecurityGroupRuleProtocolAll (Generic Model Constructor) +func (*VpcClassicV1) NewSecurityGroupRulePrototypeSecurityGroupRuleProtocolAll(direction string, protocol string) (model *SecurityGroupRulePrototypeSecurityGroupRuleProtocolAll, err error) { + model = &SecurityGroupRulePrototypeSecurityGroupRuleProtocolAll{ + Direction: core.StringPtr(direction), + Protocol: core.StringPtr(protocol), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*SecurityGroupRulePrototypeSecurityGroupRuleProtocolAll) isaSecurityGroupRulePrototype() bool { + return true +} + +// UnmarshalSecurityGroupRulePrototypeSecurityGroupRuleProtocolAll unmarshals an instance of SecurityGroupRulePrototypeSecurityGroupRuleProtocolAll from the specified map of raw messages. +func UnmarshalSecurityGroupRulePrototypeSecurityGroupRuleProtocolAll(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SecurityGroupRulePrototypeSecurityGroupRuleProtocolAll) + err = core.UnmarshalPrimitive(m, "direction", &obj.Direction) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "ip_version", &obj.IPVersion) + if err != nil { + return + } + err = core.UnmarshalModel(m, "remote", &obj.Remote, UnmarshalSecurityGroupRuleRemotePrototype) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "protocol", &obj.Protocol) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SecurityGroupRulePrototypeSecurityGroupRuleProtocolIcmp : When `protocol` is `icmp`, then the rule may also contain fields to specify an ICMP `type` and `code`. Field `code` +// may only be specified if `type` is also specified. If type is not specified, then traffic is allowed for all types +// and codes. If type is specified and code is not specified, then traffic is allowed with the specified type for all +// codes. +// This model "extends" SecurityGroupRulePrototype +type SecurityGroupRulePrototypeSecurityGroupRuleProtocolIcmp struct { + // The direction of traffic to enforce, either `inbound` or `outbound`. + Direction *string `json:"direction" validate:"required"` + + // The IP version to enforce. The format of `remote.address` or `remote.cidr_block` must match this field, if they are + // used. Alternatively, if `remote` references a security group, then this rule only applies to IP addresses (network + // interfaces) in that group matching this IP version. + IPVersion *string `json:"ip_version,omitempty"` + + Remote SecurityGroupRuleRemotePrototypeIntf `json:"remote,omitempty"` + + // The ICMP traffic code to allow. + Code *int64 `json:"code,omitempty"` + + // The protocol to enforce. + Protocol *string `json:"protocol" validate:"required"` + + // The ICMP traffic type to allow. + Type *int64 `json:"type,omitempty"` +} + +// Constants associated with the SecurityGroupRulePrototypeSecurityGroupRuleProtocolIcmp.Direction property. +// The direction of traffic to enforce, either `inbound` or `outbound`. +const ( + SecurityGroupRulePrototypeSecurityGroupRuleProtocolIcmpDirectionInboundConst = "inbound" + SecurityGroupRulePrototypeSecurityGroupRuleProtocolIcmpDirectionOutboundConst = "outbound" +) + +// Constants associated with the SecurityGroupRulePrototypeSecurityGroupRuleProtocolIcmp.IPVersion property. +// The IP version to enforce. The format of `remote.address` or `remote.cidr_block` must match this field, if they are +// used. Alternatively, if `remote` references a security group, then this rule only applies to IP addresses (network +// interfaces) in that group matching this IP version. +const ( + SecurityGroupRulePrototypeSecurityGroupRuleProtocolIcmpIPVersionIpv4Const = "ipv4" +) + +// Constants associated with the SecurityGroupRulePrototypeSecurityGroupRuleProtocolIcmp.Protocol property. +// The protocol to enforce. +const ( + SecurityGroupRulePrototypeSecurityGroupRuleProtocolIcmpProtocolIcmpConst = "icmp" +) + +// NewSecurityGroupRulePrototypeSecurityGroupRuleProtocolIcmp : Instantiate SecurityGroupRulePrototypeSecurityGroupRuleProtocolIcmp (Generic Model Constructor) +func (*VpcClassicV1) NewSecurityGroupRulePrototypeSecurityGroupRuleProtocolIcmp(direction string, protocol string) (model *SecurityGroupRulePrototypeSecurityGroupRuleProtocolIcmp, err error) { + model = &SecurityGroupRulePrototypeSecurityGroupRuleProtocolIcmp{ + Direction: core.StringPtr(direction), + Protocol: core.StringPtr(protocol), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*SecurityGroupRulePrototypeSecurityGroupRuleProtocolIcmp) isaSecurityGroupRulePrototype() bool { + return true +} + +// UnmarshalSecurityGroupRulePrototypeSecurityGroupRuleProtocolIcmp unmarshals an instance of SecurityGroupRulePrototypeSecurityGroupRuleProtocolIcmp from the specified map of raw messages. +func UnmarshalSecurityGroupRulePrototypeSecurityGroupRuleProtocolIcmp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SecurityGroupRulePrototypeSecurityGroupRuleProtocolIcmp) + err = core.UnmarshalPrimitive(m, "direction", &obj.Direction) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "ip_version", &obj.IPVersion) + if err != nil { + return + } + err = core.UnmarshalModel(m, "remote", &obj.Remote, UnmarshalSecurityGroupRuleRemotePrototype) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "code", &obj.Code) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "protocol", &obj.Protocol) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SecurityGroupRulePrototypeSecurityGroupRuleProtocolTcpudp : If `protocol` is either `tcp` or `udp`, then the rule may also contain `port_min` and +// `port_max`. Either both should be set, or neither. When neither is set then traffic is allowed on all ports. For a +// single port, set both to the same value. +// This model "extends" SecurityGroupRulePrototype +type SecurityGroupRulePrototypeSecurityGroupRuleProtocolTcpudp struct { + // The direction of traffic to enforce, either `inbound` or `outbound`. + Direction *string `json:"direction" validate:"required"` + + // The IP version to enforce. The format of `remote.address` or `remote.cidr_block` must match this field, if they are + // used. Alternatively, if `remote` references a security group, then this rule only applies to IP addresses (network + // interfaces) in that group matching this IP version. + IPVersion *string `json:"ip_version,omitempty"` + + Remote SecurityGroupRuleRemotePrototypeIntf `json:"remote,omitempty"` + + // The inclusive upper bound of TCP/UDP port range. + PortMax *int64 `json:"port_max,omitempty"` + + // The inclusive lower bound of TCP/UDP port range. + PortMin *int64 `json:"port_min,omitempty"` + + // The protocol to enforce. + Protocol *string `json:"protocol" validate:"required"` +} + +// Constants associated with the SecurityGroupRulePrototypeSecurityGroupRuleProtocolTcpudp.Direction property. +// The direction of traffic to enforce, either `inbound` or `outbound`. +const ( + SecurityGroupRulePrototypeSecurityGroupRuleProtocolTcpudpDirectionInboundConst = "inbound" + SecurityGroupRulePrototypeSecurityGroupRuleProtocolTcpudpDirectionOutboundConst = "outbound" +) + +// Constants associated with the SecurityGroupRulePrototypeSecurityGroupRuleProtocolTcpudp.IPVersion property. +// The IP version to enforce. The format of `remote.address` or `remote.cidr_block` must match this field, if they are +// used. Alternatively, if `remote` references a security group, then this rule only applies to IP addresses (network +// interfaces) in that group matching this IP version. +const ( + SecurityGroupRulePrototypeSecurityGroupRuleProtocolTcpudpIPVersionIpv4Const = "ipv4" +) + +// Constants associated with the SecurityGroupRulePrototypeSecurityGroupRuleProtocolTcpudp.Protocol property. +// The protocol to enforce. +const ( + SecurityGroupRulePrototypeSecurityGroupRuleProtocolTcpudpProtocolTCPConst = "tcp" + SecurityGroupRulePrototypeSecurityGroupRuleProtocolTcpudpProtocolUDPConst = "udp" +) + +// NewSecurityGroupRulePrototypeSecurityGroupRuleProtocolTcpudp : Instantiate SecurityGroupRulePrototypeSecurityGroupRuleProtocolTcpudp (Generic Model Constructor) +func (*VpcClassicV1) NewSecurityGroupRulePrototypeSecurityGroupRuleProtocolTcpudp(direction string, protocol string) (model *SecurityGroupRulePrototypeSecurityGroupRuleProtocolTcpudp, err error) { + model = &SecurityGroupRulePrototypeSecurityGroupRuleProtocolTcpudp{ + Direction: core.StringPtr(direction), + Protocol: core.StringPtr(protocol), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*SecurityGroupRulePrototypeSecurityGroupRuleProtocolTcpudp) isaSecurityGroupRulePrototype() bool { + return true +} + +// UnmarshalSecurityGroupRulePrototypeSecurityGroupRuleProtocolTcpudp unmarshals an instance of SecurityGroupRulePrototypeSecurityGroupRuleProtocolTcpudp from the specified map of raw messages. +func UnmarshalSecurityGroupRulePrototypeSecurityGroupRuleProtocolTcpudp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SecurityGroupRulePrototypeSecurityGroupRuleProtocolTcpudp) + err = core.UnmarshalPrimitive(m, "direction", &obj.Direction) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "ip_version", &obj.IPVersion) + if err != nil { + return + } + err = core.UnmarshalModel(m, "remote", &obj.Remote, UnmarshalSecurityGroupRuleRemotePrototype) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "port_max", &obj.PortMax) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "port_min", &obj.PortMin) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "protocol", &obj.Protocol) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SecurityGroupRuleRemotePatchCIDR : SecurityGroupRuleRemotePatchCIDR struct +// This model "extends" SecurityGroupRuleRemotePatch +type SecurityGroupRuleRemotePatchCIDR struct { + // The CIDR block. This property may add support for IPv6 CIDR blocks in the future. When processing a value in this + // property, verify that the CIDR block is in an expected format. If it is not, log an error. Optionally halt + // processing and surface the error, or bypass the resource on which the unexpected CIDR block format was encountered. + CIDRBlock *string `json:"cidr_block" validate:"required"` +} + +// NewSecurityGroupRuleRemotePatchCIDR : Instantiate SecurityGroupRuleRemotePatchCIDR (Generic Model Constructor) +func (*VpcClassicV1) NewSecurityGroupRuleRemotePatchCIDR(cidrBlock string) (model *SecurityGroupRuleRemotePatchCIDR, err error) { + model = &SecurityGroupRuleRemotePatchCIDR{ + CIDRBlock: core.StringPtr(cidrBlock), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*SecurityGroupRuleRemotePatchCIDR) isaSecurityGroupRuleRemotePatch() bool { + return true +} + +// UnmarshalSecurityGroupRuleRemotePatchCIDR unmarshals an instance of SecurityGroupRuleRemotePatchCIDR from the specified map of raw messages. +func UnmarshalSecurityGroupRuleRemotePatchCIDR(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SecurityGroupRuleRemotePatchCIDR) + err = core.UnmarshalPrimitive(m, "cidr_block", &obj.CIDRBlock) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SecurityGroupRuleRemotePatchIP : SecurityGroupRuleRemotePatchIP struct +// This model "extends" SecurityGroupRuleRemotePatch +type SecurityGroupRuleRemotePatchIP struct { + // The IP address. This property may add support for IPv6 addresses in the future. When processing a value in this + // property, verify that the address is in an expected format. If it is not, log an error. Optionally halt processing + // and surface the error, or bypass the resource on which the unexpected IP address format was encountered. + Address *string `json:"address" validate:"required"` +} + +// NewSecurityGroupRuleRemotePatchIP : Instantiate SecurityGroupRuleRemotePatchIP (Generic Model Constructor) +func (*VpcClassicV1) NewSecurityGroupRuleRemotePatchIP(address string) (model *SecurityGroupRuleRemotePatchIP, err error) { + model = &SecurityGroupRuleRemotePatchIP{ + Address: core.StringPtr(address), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*SecurityGroupRuleRemotePatchIP) isaSecurityGroupRuleRemotePatch() bool { + return true +} + +// UnmarshalSecurityGroupRuleRemotePatchIP unmarshals an instance of SecurityGroupRuleRemotePatchIP from the specified map of raw messages. +func UnmarshalSecurityGroupRuleRemotePatchIP(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SecurityGroupRuleRemotePatchIP) + err = core.UnmarshalPrimitive(m, "address", &obj.Address) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SecurityGroupRuleRemotePatchSecurityGroupIdentity : Identifies a security group by a unique property. +// Models which "extend" this model: +// - SecurityGroupRuleRemotePatchSecurityGroupIdentitySecurityGroupIdentityByID +// - SecurityGroupRuleRemotePatchSecurityGroupIdentitySecurityGroupIdentityByCRN +// - SecurityGroupRuleRemotePatchSecurityGroupIdentitySecurityGroupIdentityByHref +// This model "extends" SecurityGroupRuleRemotePatch +type SecurityGroupRuleRemotePatchSecurityGroupIdentity struct { + // The unique identifier for this security group. + ID *string `json:"id,omitempty"` + + // The security group's CRN. + CRN *string `json:"crn,omitempty"` + + // The security group's canonical URL. + Href *string `json:"href,omitempty"` +} + +func (*SecurityGroupRuleRemotePatchSecurityGroupIdentity) isaSecurityGroupRuleRemotePatchSecurityGroupIdentity() bool { + return true +} + +type SecurityGroupRuleRemotePatchSecurityGroupIdentityIntf interface { + SecurityGroupRuleRemotePatchIntf + isaSecurityGroupRuleRemotePatchSecurityGroupIdentity() bool +} + +func (*SecurityGroupRuleRemotePatchSecurityGroupIdentity) isaSecurityGroupRuleRemotePatch() bool { + return true +} + +// UnmarshalSecurityGroupRuleRemotePatchSecurityGroupIdentity unmarshals an instance of SecurityGroupRuleRemotePatchSecurityGroupIdentity from the specified map of raw messages. +func UnmarshalSecurityGroupRuleRemotePatchSecurityGroupIdentity(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SecurityGroupRuleRemotePatchSecurityGroupIdentity) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SecurityGroupRuleRemotePrototypeCIDR : SecurityGroupRuleRemotePrototypeCIDR struct +// This model "extends" SecurityGroupRuleRemotePrototype +type SecurityGroupRuleRemotePrototypeCIDR struct { + // The CIDR block. This property may add support for IPv6 CIDR blocks in the future. When processing a value in this + // property, verify that the CIDR block is in an expected format. If it is not, log an error. Optionally halt + // processing and surface the error, or bypass the resource on which the unexpected CIDR block format was encountered. + CIDRBlock *string `json:"cidr_block" validate:"required"` +} + +// NewSecurityGroupRuleRemotePrototypeCIDR : Instantiate SecurityGroupRuleRemotePrototypeCIDR (Generic Model Constructor) +func (*VpcClassicV1) NewSecurityGroupRuleRemotePrototypeCIDR(cidrBlock string) (model *SecurityGroupRuleRemotePrototypeCIDR, err error) { + model = &SecurityGroupRuleRemotePrototypeCIDR{ + CIDRBlock: core.StringPtr(cidrBlock), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*SecurityGroupRuleRemotePrototypeCIDR) isaSecurityGroupRuleRemotePrototype() bool { + return true +} + +// UnmarshalSecurityGroupRuleRemotePrototypeCIDR unmarshals an instance of SecurityGroupRuleRemotePrototypeCIDR from the specified map of raw messages. +func UnmarshalSecurityGroupRuleRemotePrototypeCIDR(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SecurityGroupRuleRemotePrototypeCIDR) + err = core.UnmarshalPrimitive(m, "cidr_block", &obj.CIDRBlock) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SecurityGroupRuleRemotePrototypeIP : SecurityGroupRuleRemotePrototypeIP struct +// This model "extends" SecurityGroupRuleRemotePrototype +type SecurityGroupRuleRemotePrototypeIP struct { + // The IP address. This property may add support for IPv6 addresses in the future. When processing a value in this + // property, verify that the address is in an expected format. If it is not, log an error. Optionally halt processing + // and surface the error, or bypass the resource on which the unexpected IP address format was encountered. + Address *string `json:"address" validate:"required"` +} + +// NewSecurityGroupRuleRemotePrototypeIP : Instantiate SecurityGroupRuleRemotePrototypeIP (Generic Model Constructor) +func (*VpcClassicV1) NewSecurityGroupRuleRemotePrototypeIP(address string) (model *SecurityGroupRuleRemotePrototypeIP, err error) { + model = &SecurityGroupRuleRemotePrototypeIP{ + Address: core.StringPtr(address), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*SecurityGroupRuleRemotePrototypeIP) isaSecurityGroupRuleRemotePrototype() bool { + return true +} + +// UnmarshalSecurityGroupRuleRemotePrototypeIP unmarshals an instance of SecurityGroupRuleRemotePrototypeIP from the specified map of raw messages. +func UnmarshalSecurityGroupRuleRemotePrototypeIP(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SecurityGroupRuleRemotePrototypeIP) + err = core.UnmarshalPrimitive(m, "address", &obj.Address) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SecurityGroupRuleRemotePrototypeSecurityGroupIdentity : Identifies a security group by a unique property. +// Models which "extend" this model: +// - SecurityGroupRuleRemotePrototypeSecurityGroupIdentitySecurityGroupIdentityByID +// - SecurityGroupRuleRemotePrototypeSecurityGroupIdentitySecurityGroupIdentityByCRN +// - SecurityGroupRuleRemotePrototypeSecurityGroupIdentitySecurityGroupIdentityByHref +// This model "extends" SecurityGroupRuleRemotePrototype +type SecurityGroupRuleRemotePrototypeSecurityGroupIdentity struct { + // The unique identifier for this security group. + ID *string `json:"id,omitempty"` + + // The security group's CRN. + CRN *string `json:"crn,omitempty"` + + // The security group's canonical URL. + Href *string `json:"href,omitempty"` +} + +func (*SecurityGroupRuleRemotePrototypeSecurityGroupIdentity) isaSecurityGroupRuleRemotePrototypeSecurityGroupIdentity() bool { + return true +} + +type SecurityGroupRuleRemotePrototypeSecurityGroupIdentityIntf interface { + SecurityGroupRuleRemotePrototypeIntf + isaSecurityGroupRuleRemotePrototypeSecurityGroupIdentity() bool +} + +func (*SecurityGroupRuleRemotePrototypeSecurityGroupIdentity) isaSecurityGroupRuleRemotePrototype() bool { + return true +} + +// UnmarshalSecurityGroupRuleRemotePrototypeSecurityGroupIdentity unmarshals an instance of SecurityGroupRuleRemotePrototypeSecurityGroupIdentity from the specified map of raw messages. +func UnmarshalSecurityGroupRuleRemotePrototypeSecurityGroupIdentity(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SecurityGroupRuleRemotePrototypeSecurityGroupIdentity) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SecurityGroupRuleRemoteCIDR : SecurityGroupRuleRemoteCIDR struct +// This model "extends" SecurityGroupRuleRemote +type SecurityGroupRuleRemoteCIDR struct { + // The CIDR block. This property may add support for IPv6 CIDR blocks in the future. When processing a value in this + // property, verify that the CIDR block is in an expected format. If it is not, log an error. Optionally halt + // processing and surface the error, or bypass the resource on which the unexpected CIDR block format was encountered. + CIDRBlock *string `json:"cidr_block" validate:"required"` +} + +func (*SecurityGroupRuleRemoteCIDR) isaSecurityGroupRuleRemote() bool { + return true +} + +// UnmarshalSecurityGroupRuleRemoteCIDR unmarshals an instance of SecurityGroupRuleRemoteCIDR from the specified map of raw messages. +func UnmarshalSecurityGroupRuleRemoteCIDR(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SecurityGroupRuleRemoteCIDR) + err = core.UnmarshalPrimitive(m, "cidr_block", &obj.CIDRBlock) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SecurityGroupRuleRemoteIP : SecurityGroupRuleRemoteIP struct +// This model "extends" SecurityGroupRuleRemote +type SecurityGroupRuleRemoteIP struct { + // The IP address. This property may add support for IPv6 addresses in the future. When processing a value in this + // property, verify that the address is in an expected format. If it is not, log an error. Optionally halt processing + // and surface the error, or bypass the resource on which the unexpected IP address format was encountered. + Address *string `json:"address" validate:"required"` +} + +func (*SecurityGroupRuleRemoteIP) isaSecurityGroupRuleRemote() bool { + return true +} + +// UnmarshalSecurityGroupRuleRemoteIP unmarshals an instance of SecurityGroupRuleRemoteIP from the specified map of raw messages. +func UnmarshalSecurityGroupRuleRemoteIP(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SecurityGroupRuleRemoteIP) + err = core.UnmarshalPrimitive(m, "address", &obj.Address) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SecurityGroupRuleRemoteSecurityGroupReference : SecurityGroupRuleRemoteSecurityGroupReference struct +// This model "extends" SecurityGroupRuleRemote +type SecurityGroupRuleRemoteSecurityGroupReference struct { + // The security group's CRN. + CRN *string `json:"crn" validate:"required"` + + // The security group's canonical URL. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this security group. + ID *string `json:"id" validate:"required"` + + // The user-defined name for this security group. Security group names must be unique, within the scope of an account. + Name *string `json:"name" validate:"required"` +} + +func (*SecurityGroupRuleRemoteSecurityGroupReference) isaSecurityGroupRuleRemote() bool { + return true +} + +// UnmarshalSecurityGroupRuleRemoteSecurityGroupReference unmarshals an instance of SecurityGroupRuleRemoteSecurityGroupReference from the specified map of raw messages. +func UnmarshalSecurityGroupRuleRemoteSecurityGroupReference(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SecurityGroupRuleRemoteSecurityGroupReference) + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SecurityGroupRuleSecurityGroupRuleProtocolAll : When `protocol` is `all`, then it's invalid to specify `port_min`, `port_max`, `type` or +// `code`. +// This model "extends" SecurityGroupRule +type SecurityGroupRuleSecurityGroupRuleProtocolAll struct { + // The direction of traffic to enforce, either `inbound` or `outbound`. + Direction *string `json:"direction" validate:"required"` + + // The unique identifier for this security group rule. + ID *string `json:"id" validate:"required"` + + // The IP version to enforce. The format of `remote.address` or `remote.cidr_block` must match this field, if they are + // used. Alternatively, if `remote` references a security group, then this rule only applies to IP addresses (network + // interfaces) in that group matching this IP version. + IPVersion *string `json:"ip_version,omitempty"` + + Remote SecurityGroupRuleRemoteIntf `json:"remote,omitempty"` + + // The protocol to enforce. + Protocol *string `json:"protocol" validate:"required"` +} + +// Constants associated with the SecurityGroupRuleSecurityGroupRuleProtocolAll.Direction property. +// The direction of traffic to enforce, either `inbound` or `outbound`. +const ( + SecurityGroupRuleSecurityGroupRuleProtocolAllDirectionInboundConst = "inbound" + SecurityGroupRuleSecurityGroupRuleProtocolAllDirectionOutboundConst = "outbound" +) + +// Constants associated with the SecurityGroupRuleSecurityGroupRuleProtocolAll.IPVersion property. +// The IP version to enforce. The format of `remote.address` or `remote.cidr_block` must match this field, if they are +// used. Alternatively, if `remote` references a security group, then this rule only applies to IP addresses (network +// interfaces) in that group matching this IP version. +const ( + SecurityGroupRuleSecurityGroupRuleProtocolAllIPVersionIpv4Const = "ipv4" +) + +// Constants associated with the SecurityGroupRuleSecurityGroupRuleProtocolAll.Protocol property. +// The protocol to enforce. +const ( + SecurityGroupRuleSecurityGroupRuleProtocolAllProtocolAllConst = "all" +) + +func (*SecurityGroupRuleSecurityGroupRuleProtocolAll) isaSecurityGroupRule() bool { + return true +} + +// UnmarshalSecurityGroupRuleSecurityGroupRuleProtocolAll unmarshals an instance of SecurityGroupRuleSecurityGroupRuleProtocolAll from the specified map of raw messages. +func UnmarshalSecurityGroupRuleSecurityGroupRuleProtocolAll(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SecurityGroupRuleSecurityGroupRuleProtocolAll) + err = core.UnmarshalPrimitive(m, "direction", &obj.Direction) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "ip_version", &obj.IPVersion) + if err != nil { + return + } + err = core.UnmarshalModel(m, "remote", &obj.Remote, UnmarshalSecurityGroupRuleRemote) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "protocol", &obj.Protocol) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SecurityGroupRuleSecurityGroupRuleProtocolIcmp : When `protocol` is `icmp`, then the rule may also contain fields to specify an ICMP `type` and `code`. Field `code` +// may only be specified if `type` is also specified. If type is not specified, then traffic is allowed for all types +// and codes. If type is specified and code is not specified, then traffic is allowed with the specified type for all +// codes. +// This model "extends" SecurityGroupRule +type SecurityGroupRuleSecurityGroupRuleProtocolIcmp struct { + // The direction of traffic to enforce, either `inbound` or `outbound`. + Direction *string `json:"direction" validate:"required"` + + // The unique identifier for this security group rule. + ID *string `json:"id" validate:"required"` + + // The IP version to enforce. The format of `remote.address` or `remote.cidr_block` must match this field, if they are + // used. Alternatively, if `remote` references a security group, then this rule only applies to IP addresses (network + // interfaces) in that group matching this IP version. + IPVersion *string `json:"ip_version,omitempty"` + + Remote SecurityGroupRuleRemoteIntf `json:"remote,omitempty"` + + // The ICMP traffic code to allow. + Code *int64 `json:"code,omitempty"` + + // The protocol to enforce. + Protocol *string `json:"protocol" validate:"required"` + + // The ICMP traffic type to allow. + Type *int64 `json:"type,omitempty"` +} + +// Constants associated with the SecurityGroupRuleSecurityGroupRuleProtocolIcmp.Direction property. +// The direction of traffic to enforce, either `inbound` or `outbound`. +const ( + SecurityGroupRuleSecurityGroupRuleProtocolIcmpDirectionInboundConst = "inbound" + SecurityGroupRuleSecurityGroupRuleProtocolIcmpDirectionOutboundConst = "outbound" +) + +// Constants associated with the SecurityGroupRuleSecurityGroupRuleProtocolIcmp.IPVersion property. +// The IP version to enforce. The format of `remote.address` or `remote.cidr_block` must match this field, if they are +// used. Alternatively, if `remote` references a security group, then this rule only applies to IP addresses (network +// interfaces) in that group matching this IP version. +const ( + SecurityGroupRuleSecurityGroupRuleProtocolIcmpIPVersionIpv4Const = "ipv4" +) + +// Constants associated with the SecurityGroupRuleSecurityGroupRuleProtocolIcmp.Protocol property. +// The protocol to enforce. +const ( + SecurityGroupRuleSecurityGroupRuleProtocolIcmpProtocolIcmpConst = "icmp" +) + +func (*SecurityGroupRuleSecurityGroupRuleProtocolIcmp) isaSecurityGroupRule() bool { + return true +} + +// UnmarshalSecurityGroupRuleSecurityGroupRuleProtocolIcmp unmarshals an instance of SecurityGroupRuleSecurityGroupRuleProtocolIcmp from the specified map of raw messages. +func UnmarshalSecurityGroupRuleSecurityGroupRuleProtocolIcmp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SecurityGroupRuleSecurityGroupRuleProtocolIcmp) + err = core.UnmarshalPrimitive(m, "direction", &obj.Direction) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "ip_version", &obj.IPVersion) + if err != nil { + return + } + err = core.UnmarshalModel(m, "remote", &obj.Remote, UnmarshalSecurityGroupRuleRemote) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "code", &obj.Code) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "protocol", &obj.Protocol) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "type", &obj.Type) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SecurityGroupRuleSecurityGroupRuleProtocolTcpudp : If `protocol` is either `tcp` or `udp`, then the rule may also contain `port_min` and +// `port_max`. Either both should be set, or neither. When neither is set then traffic is allowed on all ports. For a +// single port, set both to the same value. +// This model "extends" SecurityGroupRule +type SecurityGroupRuleSecurityGroupRuleProtocolTcpudp struct { + // The direction of traffic to enforce, either `inbound` or `outbound`. + Direction *string `json:"direction" validate:"required"` + + // The unique identifier for this security group rule. + ID *string `json:"id" validate:"required"` + + // The IP version to enforce. The format of `remote.address` or `remote.cidr_block` must match this field, if they are + // used. Alternatively, if `remote` references a security group, then this rule only applies to IP addresses (network + // interfaces) in that group matching this IP version. + IPVersion *string `json:"ip_version,omitempty"` + + Remote SecurityGroupRuleRemoteIntf `json:"remote,omitempty"` + + // The inclusive upper bound of TCP/UDP port range. + PortMax *int64 `json:"port_max,omitempty"` + + // The inclusive lower bound of TCP/UDP port range. + PortMin *int64 `json:"port_min,omitempty"` + + // The protocol to enforce. + Protocol *string `json:"protocol" validate:"required"` +} + +// Constants associated with the SecurityGroupRuleSecurityGroupRuleProtocolTcpudp.Direction property. +// The direction of traffic to enforce, either `inbound` or `outbound`. +const ( + SecurityGroupRuleSecurityGroupRuleProtocolTcpudpDirectionInboundConst = "inbound" + SecurityGroupRuleSecurityGroupRuleProtocolTcpudpDirectionOutboundConst = "outbound" +) + +// Constants associated with the SecurityGroupRuleSecurityGroupRuleProtocolTcpudp.IPVersion property. +// The IP version to enforce. The format of `remote.address` or `remote.cidr_block` must match this field, if they are +// used. Alternatively, if `remote` references a security group, then this rule only applies to IP addresses (network +// interfaces) in that group matching this IP version. +const ( + SecurityGroupRuleSecurityGroupRuleProtocolTcpudpIPVersionIpv4Const = "ipv4" +) + +// Constants associated with the SecurityGroupRuleSecurityGroupRuleProtocolTcpudp.Protocol property. +// The protocol to enforce. +const ( + SecurityGroupRuleSecurityGroupRuleProtocolTcpudpProtocolTCPConst = "tcp" + SecurityGroupRuleSecurityGroupRuleProtocolTcpudpProtocolUDPConst = "udp" +) + +func (*SecurityGroupRuleSecurityGroupRuleProtocolTcpudp) isaSecurityGroupRule() bool { + return true +} + +// UnmarshalSecurityGroupRuleSecurityGroupRuleProtocolTcpudp unmarshals an instance of SecurityGroupRuleSecurityGroupRuleProtocolTcpudp from the specified map of raw messages. +func UnmarshalSecurityGroupRuleSecurityGroupRuleProtocolTcpudp(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SecurityGroupRuleSecurityGroupRuleProtocolTcpudp) + err = core.UnmarshalPrimitive(m, "direction", &obj.Direction) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "ip_version", &obj.IPVersion) + if err != nil { + return + } + err = core.UnmarshalModel(m, "remote", &obj.Remote, UnmarshalSecurityGroupRuleRemote) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "port_max", &obj.PortMax) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "port_min", &obj.PortMin) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "protocol", &obj.Protocol) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SubnetIdentityByCRN : SubnetIdentityByCRN struct +// This model "extends" SubnetIdentity +type SubnetIdentityByCRN struct { + // The CRN for this subnet. + CRN *string `json:"crn" validate:"required"` +} + +// NewSubnetIdentityByCRN : Instantiate SubnetIdentityByCRN (Generic Model Constructor) +func (*VpcClassicV1) NewSubnetIdentityByCRN(crn string) (model *SubnetIdentityByCRN, err error) { + model = &SubnetIdentityByCRN{ + CRN: core.StringPtr(crn), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*SubnetIdentityByCRN) isaSubnetIdentity() bool { + return true +} + +// UnmarshalSubnetIdentityByCRN unmarshals an instance of SubnetIdentityByCRN from the specified map of raw messages. +func UnmarshalSubnetIdentityByCRN(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SubnetIdentityByCRN) + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SubnetIdentityByHref : SubnetIdentityByHref struct +// This model "extends" SubnetIdentity +type SubnetIdentityByHref struct { + // The URL for this subnet. + Href *string `json:"href" validate:"required"` +} + +// NewSubnetIdentityByHref : Instantiate SubnetIdentityByHref (Generic Model Constructor) +func (*VpcClassicV1) NewSubnetIdentityByHref(href string) (model *SubnetIdentityByHref, err error) { + model = &SubnetIdentityByHref{ + Href: core.StringPtr(href), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*SubnetIdentityByHref) isaSubnetIdentity() bool { + return true +} + +// UnmarshalSubnetIdentityByHref unmarshals an instance of SubnetIdentityByHref from the specified map of raw messages. +func UnmarshalSubnetIdentityByHref(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SubnetIdentityByHref) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SubnetIdentityByID : SubnetIdentityByID struct +// This model "extends" SubnetIdentity +type SubnetIdentityByID struct { + // The unique identifier for this subnet. + ID *string `json:"id" validate:"required"` +} + +// NewSubnetIdentityByID : Instantiate SubnetIdentityByID (Generic Model Constructor) +func (*VpcClassicV1) NewSubnetIdentityByID(id string) (model *SubnetIdentityByID, err error) { + model = &SubnetIdentityByID{ + ID: core.StringPtr(id), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*SubnetIdentityByID) isaSubnetIdentity() bool { + return true +} + +// UnmarshalSubnetIdentityByID unmarshals an instance of SubnetIdentityByID from the specified map of raw messages. +func UnmarshalSubnetIdentityByID(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SubnetIdentityByID) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SubnetPrototypeSubnetByCIDR : SubnetPrototypeSubnetByCIDR struct +// This model "extends" SubnetPrototype +type SubnetPrototypeSubnetByCIDR struct { + // The user-defined name for this subnet. Names must be unique within the VPC the subnet resides in. If unspecified, + // the name will be a hyphenated list of randomly-selected words. + Name *string `json:"name,omitempty"` + + // The network ACL to use for this subnet. If unspecified, the default network ACL for the VPC is used. + NetworkACL NetworkACLIdentityIntf `json:"network_acl,omitempty"` + + // The public gateway to handle internet bound traffic for this subnet. + PublicGateway PublicGatewayIdentityIntf `json:"public_gateway,omitempty"` + + // The VPC the subnet is to be a part of. + VPC VPCIdentityIntf `json:"vpc" validate:"required"` + + // The IPv4 range of the subnet, expressed in CIDR format. The prefix length of the subnet's CIDR must be between `/8` + // (16,777,216 addresses) and `/29` (8 addresses). The IPv4 range of the subnet's CIDR must fall within an existing + // address prefix in the VPC. The subnet will be created in the zone of the address prefix that contains the IPv4 CIDR. + // If zone is specified, it must match the zone of the address prefix that contains the subnet's IPv4 CIDR. + Ipv4CIDRBlock *string `json:"ipv4_cidr_block" validate:"required"` + + // The zone the subnet is to reside in. + Zone ZoneIdentityIntf `json:"zone,omitempty"` +} + +// NewSubnetPrototypeSubnetByCIDR : Instantiate SubnetPrototypeSubnetByCIDR (Generic Model Constructor) +func (*VpcClassicV1) NewSubnetPrototypeSubnetByCIDR(vpc VPCIdentityIntf, ipv4CIDRBlock string) (model *SubnetPrototypeSubnetByCIDR, err error) { + model = &SubnetPrototypeSubnetByCIDR{ + VPC: vpc, + Ipv4CIDRBlock: core.StringPtr(ipv4CIDRBlock), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*SubnetPrototypeSubnetByCIDR) isaSubnetPrototype() bool { + return true +} + +// UnmarshalSubnetPrototypeSubnetByCIDR unmarshals an instance of SubnetPrototypeSubnetByCIDR from the specified map of raw messages. +func UnmarshalSubnetPrototypeSubnetByCIDR(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SubnetPrototypeSubnetByCIDR) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalModel(m, "network_acl", &obj.NetworkACL, UnmarshalNetworkACLIdentity) + if err != nil { + return + } + err = core.UnmarshalModel(m, "public_gateway", &obj.PublicGateway, UnmarshalPublicGatewayIdentity) + if err != nil { + return + } + err = core.UnmarshalModel(m, "vpc", &obj.VPC, UnmarshalVPCIdentity) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "ipv4_cidr_block", &obj.Ipv4CIDRBlock) + if err != nil { + return + } + err = core.UnmarshalModel(m, "zone", &obj.Zone, UnmarshalZoneIdentity) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SubnetPrototypeSubnetByTotalCount : SubnetPrototypeSubnetByTotalCount struct +// This model "extends" SubnetPrototype +type SubnetPrototypeSubnetByTotalCount struct { + // The user-defined name for this subnet. Names must be unique within the VPC the subnet resides in. If unspecified, + // the name will be a hyphenated list of randomly-selected words. + Name *string `json:"name,omitempty"` + + // The network ACL to use for this subnet. If unspecified, the default network ACL for the VPC is used. + NetworkACL NetworkACLIdentityIntf `json:"network_acl,omitempty"` + + // The public gateway to handle internet bound traffic for this subnet. + PublicGateway PublicGatewayIdentityIntf `json:"public_gateway,omitempty"` + + // The VPC the subnet is to be a part of. + VPC VPCIdentityIntf `json:"vpc" validate:"required"` + + // The total number of IPv4 addresses required. Must be a power of 2. The VPC must have a default address prefix in the + // specified zone, and that prefix must have a free CIDR range with at least this number of addresses. + TotalIpv4AddressCount *int64 `json:"total_ipv4_address_count" validate:"required"` + + // The zone the subnet is to reside in. + Zone ZoneIdentityIntf `json:"zone" validate:"required"` +} + +// NewSubnetPrototypeSubnetByTotalCount : Instantiate SubnetPrototypeSubnetByTotalCount (Generic Model Constructor) +func (*VpcClassicV1) NewSubnetPrototypeSubnetByTotalCount(vpc VPCIdentityIntf, totalIpv4AddressCount int64, zone ZoneIdentityIntf) (model *SubnetPrototypeSubnetByTotalCount, err error) { + model = &SubnetPrototypeSubnetByTotalCount{ + VPC: vpc, + TotalIpv4AddressCount: core.Int64Ptr(totalIpv4AddressCount), + Zone: zone, + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*SubnetPrototypeSubnetByTotalCount) isaSubnetPrototype() bool { + return true +} + +// UnmarshalSubnetPrototypeSubnetByTotalCount unmarshals an instance of SubnetPrototypeSubnetByTotalCount from the specified map of raw messages. +func UnmarshalSubnetPrototypeSubnetByTotalCount(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SubnetPrototypeSubnetByTotalCount) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalModel(m, "network_acl", &obj.NetworkACL, UnmarshalNetworkACLIdentity) + if err != nil { + return + } + err = core.UnmarshalModel(m, "public_gateway", &obj.PublicGateway, UnmarshalPublicGatewayIdentity) + if err != nil { + return + } + err = core.UnmarshalModel(m, "vpc", &obj.VPC, UnmarshalVPCIdentity) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "total_ipv4_address_count", &obj.TotalIpv4AddressCount) + if err != nil { + return + } + err = core.UnmarshalModel(m, "zone", &obj.Zone, UnmarshalZoneIdentity) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VPCIdentityByCRN : VPCIdentityByCRN struct +// This model "extends" VPCIdentity +type VPCIdentityByCRN struct { + // The CRN for this VPC. + CRN *string `json:"crn" validate:"required"` +} + +// NewVPCIdentityByCRN : Instantiate VPCIdentityByCRN (Generic Model Constructor) +func (*VpcClassicV1) NewVPCIdentityByCRN(crn string) (model *VPCIdentityByCRN, err error) { + model = &VPCIdentityByCRN{ + CRN: core.StringPtr(crn), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*VPCIdentityByCRN) isaVPCIdentity() bool { + return true +} + +// UnmarshalVPCIdentityByCRN unmarshals an instance of VPCIdentityByCRN from the specified map of raw messages. +func UnmarshalVPCIdentityByCRN(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VPCIdentityByCRN) + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VPCIdentityByHref : VPCIdentityByHref struct +// This model "extends" VPCIdentity +type VPCIdentityByHref struct { + // The URL for this VPC. + Href *string `json:"href" validate:"required"` +} + +// NewVPCIdentityByHref : Instantiate VPCIdentityByHref (Generic Model Constructor) +func (*VpcClassicV1) NewVPCIdentityByHref(href string) (model *VPCIdentityByHref, err error) { + model = &VPCIdentityByHref{ + Href: core.StringPtr(href), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*VPCIdentityByHref) isaVPCIdentity() bool { + return true +} + +// UnmarshalVPCIdentityByHref unmarshals an instance of VPCIdentityByHref from the specified map of raw messages. +func UnmarshalVPCIdentityByHref(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VPCIdentityByHref) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VPCIdentityByID : VPCIdentityByID struct +// This model "extends" VPCIdentity +type VPCIdentityByID struct { + // The unique identifier for this VPC. + ID *string `json:"id" validate:"required"` +} + +// NewVPCIdentityByID : Instantiate VPCIdentityByID (Generic Model Constructor) +func (*VpcClassicV1) NewVPCIdentityByID(id string) (model *VPCIdentityByID, err error) { + model = &VPCIdentityByID{ + ID: core.StringPtr(id), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*VPCIdentityByID) isaVPCIdentity() bool { + return true +} + +// UnmarshalVPCIdentityByID unmarshals an instance of VPCIdentityByID from the specified map of raw messages. +func UnmarshalVPCIdentityByID(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VPCIdentityByID) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VPNGatewayConnectionPolicyMode : VPNGatewayConnectionPolicyMode struct +// This model "extends" VPNGatewayConnection +type VPNGatewayConnectionPolicyMode struct { + // If set to false, the VPN gateway connection is shut down. + AdminStateUp *bool `json:"admin_state_up" validate:"required"` + + // The authentication mode. Only `psk` is currently supported. + AuthenticationMode *string `json:"authentication_mode" validate:"required"` + + // The date and time that this VPN gateway connection was created. + CreatedAt *strfmt.DateTime `json:"created_at" validate:"required"` + + DeadPeerDetection *VPNGatewayConnectionDpd `json:"dead_peer_detection" validate:"required"` + + // The VPN connection's canonical URL. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this VPN gateway connection. + ID *string `json:"id" validate:"required"` + + // Optional IKE policy configuration. The absence of a policy indicates autonegotiation. + IkePolicy *IkePolicyReference `json:"ike_policy,omitempty"` + + // Optional IPsec policy configuration. The absence of a policy indicates autonegotiation. + IpsecPolicy *IPsecPolicyReference `json:"ipsec_policy,omitempty"` + + // The mode of the VPN gateway. + Mode *string `json:"mode" validate:"required"` + + // The user-defined name for this VPN gateway connection. + Name *string `json:"name" validate:"required"` + + // The IP address of the peer VPN gateway. + PeerAddress *string `json:"peer_address" validate:"required"` + + // The preshared key. + Psk *string `json:"psk" validate:"required"` + + // The resource type. + ResourceType *string `json:"resource_type" validate:"required"` + + // The status of a VPN gateway connection. + Status *string `json:"status" validate:"required"` + + // A collection of local CIDRs for this resource. + LocalCIDRs []string `json:"local_cidrs" validate:"required"` + + // A collection of peer CIDRs for this resource. + PeerCIDRs []string `json:"peer_cidrs" validate:"required"` +} + +// Constants associated with the VPNGatewayConnectionPolicyMode.AuthenticationMode property. +// The authentication mode. Only `psk` is currently supported. +const ( + VPNGatewayConnectionPolicyModeAuthenticationModePskConst = "psk" +) + +// Constants associated with the VPNGatewayConnectionPolicyMode.Mode property. +// The mode of the VPN gateway. +const ( + VPNGatewayConnectionPolicyModeModePolicyConst = "policy" + VPNGatewayConnectionPolicyModeModeRouteConst = "route" +) + +// Constants associated with the VPNGatewayConnectionPolicyMode.ResourceType property. +// The resource type. +const ( + VPNGatewayConnectionPolicyModeResourceTypeVPNGatewayConnectionConst = "vpn_gateway_connection" +) + +// Constants associated with the VPNGatewayConnectionPolicyMode.Status property. +// The status of a VPN gateway connection. +const ( + VPNGatewayConnectionPolicyModeStatusDownConst = "down" + VPNGatewayConnectionPolicyModeStatusUpConst = "up" +) + +func (*VPNGatewayConnectionPolicyMode) isaVPNGatewayConnection() bool { + return true +} + +// UnmarshalVPNGatewayConnectionPolicyMode unmarshals an instance of VPNGatewayConnectionPolicyMode from the specified map of raw messages. +func UnmarshalVPNGatewayConnectionPolicyMode(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VPNGatewayConnectionPolicyMode) + err = core.UnmarshalPrimitive(m, "admin_state_up", &obj.AdminStateUp) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "authentication_mode", &obj.AuthenticationMode) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) + if err != nil { + return + } + err = core.UnmarshalModel(m, "dead_peer_detection", &obj.DeadPeerDetection, UnmarshalVPNGatewayConnectionDpd) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalModel(m, "ike_policy", &obj.IkePolicy, UnmarshalIkePolicyReference) + if err != nil { + return + } + err = core.UnmarshalModel(m, "ipsec_policy", &obj.IpsecPolicy, UnmarshalIPsecPolicyReference) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "mode", &obj.Mode) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "peer_address", &obj.PeerAddress) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "psk", &obj.Psk) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "resource_type", &obj.ResourceType) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "status", &obj.Status) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "local_cidrs", &obj.LocalCIDRs) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "peer_cidrs", &obj.PeerCIDRs) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VPNGatewayConnectionPrototypeVPNGatewayConnectionPolicyModePrototype : VPNGatewayConnectionPrototypeVPNGatewayConnectionPolicyModePrototype struct +// This model "extends" VPNGatewayConnectionPrototype +type VPNGatewayConnectionPrototypeVPNGatewayConnectionPolicyModePrototype struct { + // If set to false, the VPN gateway connection is shut down. + AdminStateUp *bool `json:"admin_state_up,omitempty"` + + DeadPeerDetection *VPNGatewayConnectionDpdPrototype `json:"dead_peer_detection,omitempty"` + + // Optional IKE policy configuration. The absence of a policy indicates autonegotiation. + IkePolicy IkePolicyIdentityIntf `json:"ike_policy,omitempty"` + + // Optional IPsec policy configuration. The absence of a policy indicates autonegotiation. + IpsecPolicy IPsecPolicyIdentityIntf `json:"ipsec_policy,omitempty"` + + // The user-defined name for this VPN gateway connection. + Name *string `json:"name,omitempty"` + + // The IP address of the peer VPN gateway. + PeerAddress *string `json:"peer_address" validate:"required"` + + // The preshared key. + Psk *string `json:"psk" validate:"required"` + + // A collection of local CIDRs for this resource. + LocalCIDRs []string `json:"local_cidrs" validate:"required"` + + // A collection of peer CIDRs for this resource. + PeerCIDRs []string `json:"peer_cidrs" validate:"required"` +} + +// NewVPNGatewayConnectionPrototypeVPNGatewayConnectionPolicyModePrototype : Instantiate VPNGatewayConnectionPrototypeVPNGatewayConnectionPolicyModePrototype (Generic Model Constructor) +func (*VpcClassicV1) NewVPNGatewayConnectionPrototypeVPNGatewayConnectionPolicyModePrototype(peerAddress string, psk string, localCIDRs []string, peerCIDRs []string) (model *VPNGatewayConnectionPrototypeVPNGatewayConnectionPolicyModePrototype, err error) { + model = &VPNGatewayConnectionPrototypeVPNGatewayConnectionPolicyModePrototype{ + PeerAddress: core.StringPtr(peerAddress), + Psk: core.StringPtr(psk), + LocalCIDRs: localCIDRs, + PeerCIDRs: peerCIDRs, + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*VPNGatewayConnectionPrototypeVPNGatewayConnectionPolicyModePrototype) isaVPNGatewayConnectionPrototype() bool { + return true +} + +// UnmarshalVPNGatewayConnectionPrototypeVPNGatewayConnectionPolicyModePrototype unmarshals an instance of VPNGatewayConnectionPrototypeVPNGatewayConnectionPolicyModePrototype from the specified map of raw messages. +func UnmarshalVPNGatewayConnectionPrototypeVPNGatewayConnectionPolicyModePrototype(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VPNGatewayConnectionPrototypeVPNGatewayConnectionPolicyModePrototype) + err = core.UnmarshalPrimitive(m, "admin_state_up", &obj.AdminStateUp) + if err != nil { + return + } + err = core.UnmarshalModel(m, "dead_peer_detection", &obj.DeadPeerDetection, UnmarshalVPNGatewayConnectionDpdPrototype) + if err != nil { + return + } + err = core.UnmarshalModel(m, "ike_policy", &obj.IkePolicy, UnmarshalIkePolicyIdentity) + if err != nil { + return + } + err = core.UnmarshalModel(m, "ipsec_policy", &obj.IpsecPolicy, UnmarshalIPsecPolicyIdentity) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "peer_address", &obj.PeerAddress) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "psk", &obj.Psk) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "local_cidrs", &obj.LocalCIDRs) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "peer_cidrs", &obj.PeerCIDRs) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VPNGatewayPolicyMode : VPNGatewayPolicyMode struct +// This model "extends" VPNGateway +type VPNGatewayPolicyMode struct { + // Collection of references to VPN gateway connections. + Connections []VPNGatewayConnectionReference `json:"connections" validate:"required"` + + // The date and time that this VPN gateway was created. + CreatedAt *strfmt.DateTime `json:"created_at" validate:"required"` + + // The VPN gateway's CRN. + CRN *string `json:"crn" validate:"required"` + + // The VPN gateway's canonical URL. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this VPN gateway. + ID *string `json:"id" validate:"required"` + + // Collection of VPN gateway members. + Members []VPNGatewayMember `json:"members" validate:"required"` + + // The user-defined name for this VPN gateway. + Name *string `json:"name" validate:"required"` + + // The resource group for this VPN gateway. + ResourceGroup *ResourceGroupReference `json:"resource_group" validate:"required"` + + // The resource type. + ResourceType *string `json:"resource_type" validate:"required"` + + // The status of the VPN gateway. + Status *string `json:"status" validate:"required"` + + Subnet *SubnetReference `json:"subnet" validate:"required"` + + // Policy mode VPN gateway. + Mode *string `json:"mode" validate:"required"` +} + +// Constants associated with the VPNGatewayPolicyMode.ResourceType property. +// The resource type. +const ( + VPNGatewayPolicyModeResourceTypeVPNGatewayConst = "vpn_gateway" +) + +// Constants associated with the VPNGatewayPolicyMode.Status property. +// The status of the VPN gateway. +const ( + VPNGatewayPolicyModeStatusAvailableConst = "available" + VPNGatewayPolicyModeStatusDeletingConst = "deleting" + VPNGatewayPolicyModeStatusFailedConst = "failed" + VPNGatewayPolicyModeStatusPendingConst = "pending" +) + +// Constants associated with the VPNGatewayPolicyMode.Mode property. +// Policy mode VPN gateway. +const ( + VPNGatewayPolicyModeModePolicyConst = "policy" +) + +func (*VPNGatewayPolicyMode) isaVPNGateway() bool { + return true +} + +// UnmarshalVPNGatewayPolicyMode unmarshals an instance of VPNGatewayPolicyMode from the specified map of raw messages. +func UnmarshalVPNGatewayPolicyMode(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VPNGatewayPolicyMode) + err = core.UnmarshalModel(m, "connections", &obj.Connections, UnmarshalVPNGatewayConnectionReference) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalModel(m, "members", &obj.Members, UnmarshalVPNGatewayMember) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalModel(m, "resource_group", &obj.ResourceGroup, UnmarshalResourceGroupReference) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "resource_type", &obj.ResourceType) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "status", &obj.Status) + if err != nil { + return + } + err = core.UnmarshalModel(m, "subnet", &obj.Subnet, UnmarshalSubnetReference) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "mode", &obj.Mode) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VPNGatewayPrototypeVPNGatewayPolicyModePrototype : VPNGatewayPrototypeVPNGatewayPolicyModePrototype struct +// This model "extends" VPNGatewayPrototype +type VPNGatewayPrototypeVPNGatewayPolicyModePrototype struct { + // The user-defined name for this VPN gateway. + Name *string `json:"name,omitempty"` + + ResourceGroup ResourceGroupIdentityIntf `json:"resource_group,omitempty"` + + Subnet SubnetIdentityIntf `json:"subnet" validate:"required"` + + // Policy mode VPN gateway. + Mode *string `json:"mode,omitempty"` +} + +// Constants associated with the VPNGatewayPrototypeVPNGatewayPolicyModePrototype.Mode property. +// Policy mode VPN gateway. +const ( + VPNGatewayPrototypeVPNGatewayPolicyModePrototypeModePolicyConst = "policy" +) + +func (*VPNGatewayPrototypeVPNGatewayPolicyModePrototype) isaVPNGatewayPrototype() bool { + return true +} + +// UnmarshalVPNGatewayPrototypeVPNGatewayPolicyModePrototype unmarshals an instance of VPNGatewayPrototypeVPNGatewayPolicyModePrototype from the specified map of raw messages. +func UnmarshalVPNGatewayPrototypeVPNGatewayPolicyModePrototype(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VPNGatewayPrototypeVPNGatewayPolicyModePrototype) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalModel(m, "resource_group", &obj.ResourceGroup, UnmarshalResourceGroupIdentity) + if err != nil { + return + } + err = core.UnmarshalModel(m, "subnet", &obj.Subnet, UnmarshalSubnetIdentity) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "mode", &obj.Mode) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContext : VolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContext struct +// Models which "extend" this model: +// - VolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeByCapacity +// This model "extends" VolumeAttachmentVolumePrototypeInstanceContext +type VolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContext struct { + // The identity of the root key to use to wrap the data encryption key for the volume. + // + // If this property is not provided, the `encryption` type for the volume will be + // `provider_managed`. + EncryptionKey EncryptionKeyIdentityIntf `json:"encryption_key,omitempty"` + + // The bandwidth for the volume. + Iops *int64 `json:"iops,omitempty"` + + // The unique user-defined name for this volume. + Name *string `json:"name,omitempty"` + + // The profile to use for this volume. + Profile VolumeProfileIdentityIntf `json:"profile" validate:"required"` + + // The capacity of the volume in gigabytes. The specified minimum and maximum capacity values for creating or updating + // volumes may expand in the future. + Capacity *int64 `json:"capacity,omitempty"` +} + +func (*VolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContext) isaVolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContext() bool { + return true +} + +type VolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContextIntf interface { + VolumeAttachmentVolumePrototypeInstanceContextIntf + isaVolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContext() bool +} + +func (*VolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContext) isaVolumeAttachmentVolumePrototypeInstanceContext() bool { + return true +} + +// UnmarshalVolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContext unmarshals an instance of VolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContext from the specified map of raw messages. +func UnmarshalVolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContext(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContext) + err = core.UnmarshalModel(m, "encryption_key", &obj.EncryptionKey, UnmarshalEncryptionKeyIdentity) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "iops", &obj.Iops) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalModel(m, "profile", &obj.Profile, UnmarshalVolumeProfileIdentity) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "capacity", &obj.Capacity) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VolumeIdentityByCRN : VolumeIdentityByCRN struct +// This model "extends" VolumeIdentity +type VolumeIdentityByCRN struct { + // The CRN for this volume. + CRN *string `json:"crn" validate:"required"` +} + +// NewVolumeIdentityByCRN : Instantiate VolumeIdentityByCRN (Generic Model Constructor) +func (*VpcClassicV1) NewVolumeIdentityByCRN(crn string) (model *VolumeIdentityByCRN, err error) { + model = &VolumeIdentityByCRN{ + CRN: core.StringPtr(crn), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*VolumeIdentityByCRN) isaVolumeIdentity() bool { + return true +} + +// UnmarshalVolumeIdentityByCRN unmarshals an instance of VolumeIdentityByCRN from the specified map of raw messages. +func UnmarshalVolumeIdentityByCRN(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VolumeIdentityByCRN) + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VolumeIdentityByHref : VolumeIdentityByHref struct +// This model "extends" VolumeIdentity +type VolumeIdentityByHref struct { + // The URL for this volume. + Href *string `json:"href" validate:"required"` +} + +// NewVolumeIdentityByHref : Instantiate VolumeIdentityByHref (Generic Model Constructor) +func (*VpcClassicV1) NewVolumeIdentityByHref(href string) (model *VolumeIdentityByHref, err error) { + model = &VolumeIdentityByHref{ + Href: core.StringPtr(href), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*VolumeIdentityByHref) isaVolumeIdentity() bool { + return true +} + +// UnmarshalVolumeIdentityByHref unmarshals an instance of VolumeIdentityByHref from the specified map of raw messages. +func UnmarshalVolumeIdentityByHref(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VolumeIdentityByHref) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VolumeIdentityByID : VolumeIdentityByID struct +// This model "extends" VolumeIdentity +type VolumeIdentityByID struct { + // The unique identifier for this volume. + ID *string `json:"id" validate:"required"` +} + +// NewVolumeIdentityByID : Instantiate VolumeIdentityByID (Generic Model Constructor) +func (*VpcClassicV1) NewVolumeIdentityByID(id string) (model *VolumeIdentityByID, err error) { + model = &VolumeIdentityByID{ + ID: core.StringPtr(id), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*VolumeIdentityByID) isaVolumeIdentity() bool { + return true +} + +// UnmarshalVolumeIdentityByID unmarshals an instance of VolumeIdentityByID from the specified map of raw messages. +func UnmarshalVolumeIdentityByID(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VolumeIdentityByID) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VolumeProfileIdentityByHref : VolumeProfileIdentityByHref struct +// This model "extends" VolumeProfileIdentity +type VolumeProfileIdentityByHref struct { + // The URL for this volume profile. + Href *string `json:"href" validate:"required"` +} + +// NewVolumeProfileIdentityByHref : Instantiate VolumeProfileIdentityByHref (Generic Model Constructor) +func (*VpcClassicV1) NewVolumeProfileIdentityByHref(href string) (model *VolumeProfileIdentityByHref, err error) { + model = &VolumeProfileIdentityByHref{ + Href: core.StringPtr(href), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*VolumeProfileIdentityByHref) isaVolumeProfileIdentity() bool { + return true +} + +// UnmarshalVolumeProfileIdentityByHref unmarshals an instance of VolumeProfileIdentityByHref from the specified map of raw messages. +func UnmarshalVolumeProfileIdentityByHref(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VolumeProfileIdentityByHref) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VolumeProfileIdentityByName : VolumeProfileIdentityByName struct +// This model "extends" VolumeProfileIdentity +type VolumeProfileIdentityByName struct { + // The globally unique name for this volume profile. + Name *string `json:"name" validate:"required"` +} + +// NewVolumeProfileIdentityByName : Instantiate VolumeProfileIdentityByName (Generic Model Constructor) +func (*VpcClassicV1) NewVolumeProfileIdentityByName(name string) (model *VolumeProfileIdentityByName, err error) { + model = &VolumeProfileIdentityByName{ + Name: core.StringPtr(name), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*VolumeProfileIdentityByName) isaVolumeProfileIdentity() bool { + return true +} + +// UnmarshalVolumeProfileIdentityByName unmarshals an instance of VolumeProfileIdentityByName from the specified map of raw messages. +func UnmarshalVolumeProfileIdentityByName(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VolumeProfileIdentityByName) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VolumePrototypeVolumeByCapacity : VolumePrototypeVolumeByCapacity struct +// This model "extends" VolumePrototype +type VolumePrototypeVolumeByCapacity struct { + // The identity of the root key to use to wrap the data encryption key for the volume. + // + // If this property is not provided, the `encryption` type for the volume will be + // `provider_managed`. + EncryptionKey EncryptionKeyIdentityIntf `json:"encryption_key,omitempty"` + + // The bandwidth for the volume. + Iops *int64 `json:"iops,omitempty"` + + // The unique user-defined name for this volume. + Name *string `json:"name,omitempty"` + + // The profile to use for this volume. + Profile VolumeProfileIdentityIntf `json:"profile" validate:"required"` + + ResourceGroup ResourceGroupIdentityIntf `json:"resource_group,omitempty"` + + // The location of the volume. + Zone ZoneIdentityIntf `json:"zone" validate:"required"` + + // The capacity of the volume in gigabytes. The specified minimum and maximum capacity values for creating or updating + // volumes may expand in the future. + Capacity *int64 `json:"capacity" validate:"required"` +} + +// NewVolumePrototypeVolumeByCapacity : Instantiate VolumePrototypeVolumeByCapacity (Generic Model Constructor) +func (*VpcClassicV1) NewVolumePrototypeVolumeByCapacity(profile VolumeProfileIdentityIntf, zone ZoneIdentityIntf, capacity int64) (model *VolumePrototypeVolumeByCapacity, err error) { + model = &VolumePrototypeVolumeByCapacity{ + Profile: profile, + Zone: zone, + Capacity: core.Int64Ptr(capacity), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*VolumePrototypeVolumeByCapacity) isaVolumePrototype() bool { + return true +} + +// UnmarshalVolumePrototypeVolumeByCapacity unmarshals an instance of VolumePrototypeVolumeByCapacity from the specified map of raw messages. +func UnmarshalVolumePrototypeVolumeByCapacity(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VolumePrototypeVolumeByCapacity) + err = core.UnmarshalModel(m, "encryption_key", &obj.EncryptionKey, UnmarshalEncryptionKeyIdentity) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "iops", &obj.Iops) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalModel(m, "profile", &obj.Profile, UnmarshalVolumeProfileIdentity) + if err != nil { + return + } + err = core.UnmarshalModel(m, "resource_group", &obj.ResourceGroup, UnmarshalResourceGroupIdentity) + if err != nil { + return + } + err = core.UnmarshalModel(m, "zone", &obj.Zone, UnmarshalZoneIdentity) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "capacity", &obj.Capacity) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ZoneIdentityByHref : ZoneIdentityByHref struct +// This model "extends" ZoneIdentity +type ZoneIdentityByHref struct { + // The URL for this zone. + Href *string `json:"href" validate:"required"` +} + +// NewZoneIdentityByHref : Instantiate ZoneIdentityByHref (Generic Model Constructor) +func (*VpcClassicV1) NewZoneIdentityByHref(href string) (model *ZoneIdentityByHref, err error) { + model = &ZoneIdentityByHref{ + Href: core.StringPtr(href), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*ZoneIdentityByHref) isaZoneIdentity() bool { + return true +} + +// UnmarshalZoneIdentityByHref unmarshals an instance of ZoneIdentityByHref from the specified map of raw messages. +func UnmarshalZoneIdentityByHref(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ZoneIdentityByHref) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// ZoneIdentityByName : ZoneIdentityByName struct +// This model "extends" ZoneIdentity +type ZoneIdentityByName struct { + // The globally unique name for this zone. + Name *string `json:"name" validate:"required"` +} + +// NewZoneIdentityByName : Instantiate ZoneIdentityByName (Generic Model Constructor) +func (*VpcClassicV1) NewZoneIdentityByName(name string) (model *ZoneIdentityByName, err error) { + model = &ZoneIdentityByName{ + Name: core.StringPtr(name), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*ZoneIdentityByName) isaZoneIdentity() bool { + return true +} + +// UnmarshalZoneIdentityByName unmarshals an instance of ZoneIdentityByName from the specified map of raw messages. +func UnmarshalZoneIdentityByName(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ZoneIdentityByName) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LoadBalancerListenerPolicyTargetPatchLoadBalancerPoolIdentityLoadBalancerPoolIdentityByHref : LoadBalancerListenerPolicyTargetPatchLoadBalancerPoolIdentityLoadBalancerPoolIdentityByHref struct +// This model "extends" LoadBalancerListenerPolicyTargetPatchLoadBalancerPoolIdentity +type LoadBalancerListenerPolicyTargetPatchLoadBalancerPoolIdentityLoadBalancerPoolIdentityByHref struct { + // The pool's canonical URL. + Href *string `json:"href" validate:"required"` +} + +// NewLoadBalancerListenerPolicyTargetPatchLoadBalancerPoolIdentityLoadBalancerPoolIdentityByHref : Instantiate LoadBalancerListenerPolicyTargetPatchLoadBalancerPoolIdentityLoadBalancerPoolIdentityByHref (Generic Model Constructor) +func (*VpcClassicV1) NewLoadBalancerListenerPolicyTargetPatchLoadBalancerPoolIdentityLoadBalancerPoolIdentityByHref(href string) (model *LoadBalancerListenerPolicyTargetPatchLoadBalancerPoolIdentityLoadBalancerPoolIdentityByHref, err error) { + model = &LoadBalancerListenerPolicyTargetPatchLoadBalancerPoolIdentityLoadBalancerPoolIdentityByHref{ + Href: core.StringPtr(href), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*LoadBalancerListenerPolicyTargetPatchLoadBalancerPoolIdentityLoadBalancerPoolIdentityByHref) isaLoadBalancerListenerPolicyTargetPatchLoadBalancerPoolIdentity() bool { + return true +} + +func (*LoadBalancerListenerPolicyTargetPatchLoadBalancerPoolIdentityLoadBalancerPoolIdentityByHref) isaLoadBalancerListenerPolicyTargetPatch() bool { + return true +} + +// UnmarshalLoadBalancerListenerPolicyTargetPatchLoadBalancerPoolIdentityLoadBalancerPoolIdentityByHref unmarshals an instance of LoadBalancerListenerPolicyTargetPatchLoadBalancerPoolIdentityLoadBalancerPoolIdentityByHref from the specified map of raw messages. +func UnmarshalLoadBalancerListenerPolicyTargetPatchLoadBalancerPoolIdentityLoadBalancerPoolIdentityByHref(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancerListenerPolicyTargetPatchLoadBalancerPoolIdentityLoadBalancerPoolIdentityByHref) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LoadBalancerListenerPolicyTargetPatchLoadBalancerPoolIdentityLoadBalancerPoolIdentityByID : LoadBalancerListenerPolicyTargetPatchLoadBalancerPoolIdentityLoadBalancerPoolIdentityByID struct +// This model "extends" LoadBalancerListenerPolicyTargetPatchLoadBalancerPoolIdentity +type LoadBalancerListenerPolicyTargetPatchLoadBalancerPoolIdentityLoadBalancerPoolIdentityByID struct { + // The unique identifier for this load balancer pool. + ID *string `json:"id" validate:"required"` +} + +// NewLoadBalancerListenerPolicyTargetPatchLoadBalancerPoolIdentityLoadBalancerPoolIdentityByID : Instantiate LoadBalancerListenerPolicyTargetPatchLoadBalancerPoolIdentityLoadBalancerPoolIdentityByID (Generic Model Constructor) +func (*VpcClassicV1) NewLoadBalancerListenerPolicyTargetPatchLoadBalancerPoolIdentityLoadBalancerPoolIdentityByID(id string) (model *LoadBalancerListenerPolicyTargetPatchLoadBalancerPoolIdentityLoadBalancerPoolIdentityByID, err error) { + model = &LoadBalancerListenerPolicyTargetPatchLoadBalancerPoolIdentityLoadBalancerPoolIdentityByID{ + ID: core.StringPtr(id), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*LoadBalancerListenerPolicyTargetPatchLoadBalancerPoolIdentityLoadBalancerPoolIdentityByID) isaLoadBalancerListenerPolicyTargetPatchLoadBalancerPoolIdentity() bool { + return true +} + +func (*LoadBalancerListenerPolicyTargetPatchLoadBalancerPoolIdentityLoadBalancerPoolIdentityByID) isaLoadBalancerListenerPolicyTargetPatch() bool { + return true +} + +// UnmarshalLoadBalancerListenerPolicyTargetPatchLoadBalancerPoolIdentityLoadBalancerPoolIdentityByID unmarshals an instance of LoadBalancerListenerPolicyTargetPatchLoadBalancerPoolIdentityLoadBalancerPoolIdentityByID from the specified map of raw messages. +func UnmarshalLoadBalancerListenerPolicyTargetPatchLoadBalancerPoolIdentityLoadBalancerPoolIdentityByID(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancerListenerPolicyTargetPatchLoadBalancerPoolIdentityLoadBalancerPoolIdentityByID) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LoadBalancerListenerPolicyTargetPrototypeLoadBalancerPoolIdentityLoadBalancerPoolIdentityByHref : LoadBalancerListenerPolicyTargetPrototypeLoadBalancerPoolIdentityLoadBalancerPoolIdentityByHref struct +// This model "extends" LoadBalancerListenerPolicyTargetPrototypeLoadBalancerPoolIdentity +type LoadBalancerListenerPolicyTargetPrototypeLoadBalancerPoolIdentityLoadBalancerPoolIdentityByHref struct { + // The pool's canonical URL. + Href *string `json:"href" validate:"required"` +} + +// NewLoadBalancerListenerPolicyTargetPrototypeLoadBalancerPoolIdentityLoadBalancerPoolIdentityByHref : Instantiate LoadBalancerListenerPolicyTargetPrototypeLoadBalancerPoolIdentityLoadBalancerPoolIdentityByHref (Generic Model Constructor) +func (*VpcClassicV1) NewLoadBalancerListenerPolicyTargetPrototypeLoadBalancerPoolIdentityLoadBalancerPoolIdentityByHref(href string) (model *LoadBalancerListenerPolicyTargetPrototypeLoadBalancerPoolIdentityLoadBalancerPoolIdentityByHref, err error) { + model = &LoadBalancerListenerPolicyTargetPrototypeLoadBalancerPoolIdentityLoadBalancerPoolIdentityByHref{ + Href: core.StringPtr(href), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*LoadBalancerListenerPolicyTargetPrototypeLoadBalancerPoolIdentityLoadBalancerPoolIdentityByHref) isaLoadBalancerListenerPolicyTargetPrototypeLoadBalancerPoolIdentity() bool { + return true +} + +func (*LoadBalancerListenerPolicyTargetPrototypeLoadBalancerPoolIdentityLoadBalancerPoolIdentityByHref) isaLoadBalancerListenerPolicyTargetPrototype() bool { + return true +} + +// UnmarshalLoadBalancerListenerPolicyTargetPrototypeLoadBalancerPoolIdentityLoadBalancerPoolIdentityByHref unmarshals an instance of LoadBalancerListenerPolicyTargetPrototypeLoadBalancerPoolIdentityLoadBalancerPoolIdentityByHref from the specified map of raw messages. +func UnmarshalLoadBalancerListenerPolicyTargetPrototypeLoadBalancerPoolIdentityLoadBalancerPoolIdentityByHref(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancerListenerPolicyTargetPrototypeLoadBalancerPoolIdentityLoadBalancerPoolIdentityByHref) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LoadBalancerListenerPolicyTargetPrototypeLoadBalancerPoolIdentityLoadBalancerPoolIdentityByID : LoadBalancerListenerPolicyTargetPrototypeLoadBalancerPoolIdentityLoadBalancerPoolIdentityByID struct +// This model "extends" LoadBalancerListenerPolicyTargetPrototypeLoadBalancerPoolIdentity +type LoadBalancerListenerPolicyTargetPrototypeLoadBalancerPoolIdentityLoadBalancerPoolIdentityByID struct { + // The unique identifier for this load balancer pool. + ID *string `json:"id" validate:"required"` +} + +// NewLoadBalancerListenerPolicyTargetPrototypeLoadBalancerPoolIdentityLoadBalancerPoolIdentityByID : Instantiate LoadBalancerListenerPolicyTargetPrototypeLoadBalancerPoolIdentityLoadBalancerPoolIdentityByID (Generic Model Constructor) +func (*VpcClassicV1) NewLoadBalancerListenerPolicyTargetPrototypeLoadBalancerPoolIdentityLoadBalancerPoolIdentityByID(id string) (model *LoadBalancerListenerPolicyTargetPrototypeLoadBalancerPoolIdentityLoadBalancerPoolIdentityByID, err error) { + model = &LoadBalancerListenerPolicyTargetPrototypeLoadBalancerPoolIdentityLoadBalancerPoolIdentityByID{ + ID: core.StringPtr(id), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*LoadBalancerListenerPolicyTargetPrototypeLoadBalancerPoolIdentityLoadBalancerPoolIdentityByID) isaLoadBalancerListenerPolicyTargetPrototypeLoadBalancerPoolIdentity() bool { + return true +} + +func (*LoadBalancerListenerPolicyTargetPrototypeLoadBalancerPoolIdentityLoadBalancerPoolIdentityByID) isaLoadBalancerListenerPolicyTargetPrototype() bool { + return true +} + +// UnmarshalLoadBalancerListenerPolicyTargetPrototypeLoadBalancerPoolIdentityLoadBalancerPoolIdentityByID unmarshals an instance of LoadBalancerListenerPolicyTargetPrototypeLoadBalancerPoolIdentityLoadBalancerPoolIdentityByID from the specified map of raw messages. +func UnmarshalLoadBalancerListenerPolicyTargetPrototypeLoadBalancerPoolIdentityLoadBalancerPoolIdentityByID(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancerListenerPolicyTargetPrototypeLoadBalancerPoolIdentityLoadBalancerPoolIdentityByID) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// PublicGatewayFloatingIPPrototypeFloatingIPIdentityFloatingIPIdentityByAddress : PublicGatewayFloatingIPPrototypeFloatingIPIdentityFloatingIPIdentityByAddress struct +// This model "extends" PublicGatewayFloatingIPPrototypeFloatingIPIdentity +type PublicGatewayFloatingIPPrototypeFloatingIPIdentityFloatingIPIdentityByAddress struct { + // The globally unique IP address. + Address *string `json:"address" validate:"required"` +} + +// NewPublicGatewayFloatingIPPrototypeFloatingIPIdentityFloatingIPIdentityByAddress : Instantiate PublicGatewayFloatingIPPrototypeFloatingIPIdentityFloatingIPIdentityByAddress (Generic Model Constructor) +func (*VpcClassicV1) NewPublicGatewayFloatingIPPrototypeFloatingIPIdentityFloatingIPIdentityByAddress(address string) (model *PublicGatewayFloatingIPPrototypeFloatingIPIdentityFloatingIPIdentityByAddress, err error) { + model = &PublicGatewayFloatingIPPrototypeFloatingIPIdentityFloatingIPIdentityByAddress{ + Address: core.StringPtr(address), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*PublicGatewayFloatingIPPrototypeFloatingIPIdentityFloatingIPIdentityByAddress) isaPublicGatewayFloatingIPPrototypeFloatingIPIdentity() bool { + return true +} + +func (*PublicGatewayFloatingIPPrototypeFloatingIPIdentityFloatingIPIdentityByAddress) isaPublicGatewayFloatingIPPrototype() bool { + return true +} + +// UnmarshalPublicGatewayFloatingIPPrototypeFloatingIPIdentityFloatingIPIdentityByAddress unmarshals an instance of PublicGatewayFloatingIPPrototypeFloatingIPIdentityFloatingIPIdentityByAddress from the specified map of raw messages. +func UnmarshalPublicGatewayFloatingIPPrototypeFloatingIPIdentityFloatingIPIdentityByAddress(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(PublicGatewayFloatingIPPrototypeFloatingIPIdentityFloatingIPIdentityByAddress) + err = core.UnmarshalPrimitive(m, "address", &obj.Address) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// PublicGatewayFloatingIPPrototypeFloatingIPIdentityFloatingIPIdentityByCRN : PublicGatewayFloatingIPPrototypeFloatingIPIdentityFloatingIPIdentityByCRN struct +// This model "extends" PublicGatewayFloatingIPPrototypeFloatingIPIdentity +type PublicGatewayFloatingIPPrototypeFloatingIPIdentityFloatingIPIdentityByCRN struct { + // The CRN for this floating IP. + CRN *string `json:"crn" validate:"required"` +} + +// NewPublicGatewayFloatingIPPrototypeFloatingIPIdentityFloatingIPIdentityByCRN : Instantiate PublicGatewayFloatingIPPrototypeFloatingIPIdentityFloatingIPIdentityByCRN (Generic Model Constructor) +func (*VpcClassicV1) NewPublicGatewayFloatingIPPrototypeFloatingIPIdentityFloatingIPIdentityByCRN(crn string) (model *PublicGatewayFloatingIPPrototypeFloatingIPIdentityFloatingIPIdentityByCRN, err error) { + model = &PublicGatewayFloatingIPPrototypeFloatingIPIdentityFloatingIPIdentityByCRN{ + CRN: core.StringPtr(crn), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*PublicGatewayFloatingIPPrototypeFloatingIPIdentityFloatingIPIdentityByCRN) isaPublicGatewayFloatingIPPrototypeFloatingIPIdentity() bool { + return true +} + +func (*PublicGatewayFloatingIPPrototypeFloatingIPIdentityFloatingIPIdentityByCRN) isaPublicGatewayFloatingIPPrototype() bool { + return true +} + +// UnmarshalPublicGatewayFloatingIPPrototypeFloatingIPIdentityFloatingIPIdentityByCRN unmarshals an instance of PublicGatewayFloatingIPPrototypeFloatingIPIdentityFloatingIPIdentityByCRN from the specified map of raw messages. +func UnmarshalPublicGatewayFloatingIPPrototypeFloatingIPIdentityFloatingIPIdentityByCRN(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(PublicGatewayFloatingIPPrototypeFloatingIPIdentityFloatingIPIdentityByCRN) + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// PublicGatewayFloatingIPPrototypeFloatingIPIdentityFloatingIPIdentityByHref : PublicGatewayFloatingIPPrototypeFloatingIPIdentityFloatingIPIdentityByHref struct +// This model "extends" PublicGatewayFloatingIPPrototypeFloatingIPIdentity +type PublicGatewayFloatingIPPrototypeFloatingIPIdentityFloatingIPIdentityByHref struct { + // The URL for this floating IP. + Href *string `json:"href" validate:"required"` +} + +// NewPublicGatewayFloatingIPPrototypeFloatingIPIdentityFloatingIPIdentityByHref : Instantiate PublicGatewayFloatingIPPrototypeFloatingIPIdentityFloatingIPIdentityByHref (Generic Model Constructor) +func (*VpcClassicV1) NewPublicGatewayFloatingIPPrototypeFloatingIPIdentityFloatingIPIdentityByHref(href string) (model *PublicGatewayFloatingIPPrototypeFloatingIPIdentityFloatingIPIdentityByHref, err error) { + model = &PublicGatewayFloatingIPPrototypeFloatingIPIdentityFloatingIPIdentityByHref{ + Href: core.StringPtr(href), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*PublicGatewayFloatingIPPrototypeFloatingIPIdentityFloatingIPIdentityByHref) isaPublicGatewayFloatingIPPrototypeFloatingIPIdentity() bool { + return true +} + +func (*PublicGatewayFloatingIPPrototypeFloatingIPIdentityFloatingIPIdentityByHref) isaPublicGatewayFloatingIPPrototype() bool { + return true +} + +// UnmarshalPublicGatewayFloatingIPPrototypeFloatingIPIdentityFloatingIPIdentityByHref unmarshals an instance of PublicGatewayFloatingIPPrototypeFloatingIPIdentityFloatingIPIdentityByHref from the specified map of raw messages. +func UnmarshalPublicGatewayFloatingIPPrototypeFloatingIPIdentityFloatingIPIdentityByHref(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(PublicGatewayFloatingIPPrototypeFloatingIPIdentityFloatingIPIdentityByHref) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// PublicGatewayFloatingIPPrototypeFloatingIPIdentityFloatingIPIdentityByID : PublicGatewayFloatingIPPrototypeFloatingIPIdentityFloatingIPIdentityByID struct +// This model "extends" PublicGatewayFloatingIPPrototypeFloatingIPIdentity +type PublicGatewayFloatingIPPrototypeFloatingIPIdentityFloatingIPIdentityByID struct { + // The unique identifier for this floating IP. + ID *string `json:"id" validate:"required"` +} + +// NewPublicGatewayFloatingIPPrototypeFloatingIPIdentityFloatingIPIdentityByID : Instantiate PublicGatewayFloatingIPPrototypeFloatingIPIdentityFloatingIPIdentityByID (Generic Model Constructor) +func (*VpcClassicV1) NewPublicGatewayFloatingIPPrototypeFloatingIPIdentityFloatingIPIdentityByID(id string) (model *PublicGatewayFloatingIPPrototypeFloatingIPIdentityFloatingIPIdentityByID, err error) { + model = &PublicGatewayFloatingIPPrototypeFloatingIPIdentityFloatingIPIdentityByID{ + ID: core.StringPtr(id), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*PublicGatewayFloatingIPPrototypeFloatingIPIdentityFloatingIPIdentityByID) isaPublicGatewayFloatingIPPrototypeFloatingIPIdentity() bool { + return true +} + +func (*PublicGatewayFloatingIPPrototypeFloatingIPIdentityFloatingIPIdentityByID) isaPublicGatewayFloatingIPPrototype() bool { + return true +} + +// UnmarshalPublicGatewayFloatingIPPrototypeFloatingIPIdentityFloatingIPIdentityByID unmarshals an instance of PublicGatewayFloatingIPPrototypeFloatingIPIdentityFloatingIPIdentityByID from the specified map of raw messages. +func UnmarshalPublicGatewayFloatingIPPrototypeFloatingIPIdentityFloatingIPIdentityByID(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(PublicGatewayFloatingIPPrototypeFloatingIPIdentityFloatingIPIdentityByID) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SecurityGroupRuleRemotePatchSecurityGroupIdentitySecurityGroupIdentityByCRN : SecurityGroupRuleRemotePatchSecurityGroupIdentitySecurityGroupIdentityByCRN struct +// This model "extends" SecurityGroupRuleRemotePatchSecurityGroupIdentity +type SecurityGroupRuleRemotePatchSecurityGroupIdentitySecurityGroupIdentityByCRN struct { + // The security group's CRN. + CRN *string `json:"crn" validate:"required"` +} + +// NewSecurityGroupRuleRemotePatchSecurityGroupIdentitySecurityGroupIdentityByCRN : Instantiate SecurityGroupRuleRemotePatchSecurityGroupIdentitySecurityGroupIdentityByCRN (Generic Model Constructor) +func (*VpcClassicV1) NewSecurityGroupRuleRemotePatchSecurityGroupIdentitySecurityGroupIdentityByCRN(crn string) (model *SecurityGroupRuleRemotePatchSecurityGroupIdentitySecurityGroupIdentityByCRN, err error) { + model = &SecurityGroupRuleRemotePatchSecurityGroupIdentitySecurityGroupIdentityByCRN{ + CRN: core.StringPtr(crn), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*SecurityGroupRuleRemotePatchSecurityGroupIdentitySecurityGroupIdentityByCRN) isaSecurityGroupRuleRemotePatchSecurityGroupIdentity() bool { + return true +} + +func (*SecurityGroupRuleRemotePatchSecurityGroupIdentitySecurityGroupIdentityByCRN) isaSecurityGroupRuleRemotePatch() bool { + return true +} + +// UnmarshalSecurityGroupRuleRemotePatchSecurityGroupIdentitySecurityGroupIdentityByCRN unmarshals an instance of SecurityGroupRuleRemotePatchSecurityGroupIdentitySecurityGroupIdentityByCRN from the specified map of raw messages. +func UnmarshalSecurityGroupRuleRemotePatchSecurityGroupIdentitySecurityGroupIdentityByCRN(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SecurityGroupRuleRemotePatchSecurityGroupIdentitySecurityGroupIdentityByCRN) + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SecurityGroupRuleRemotePatchSecurityGroupIdentitySecurityGroupIdentityByHref : SecurityGroupRuleRemotePatchSecurityGroupIdentitySecurityGroupIdentityByHref struct +// This model "extends" SecurityGroupRuleRemotePatchSecurityGroupIdentity +type SecurityGroupRuleRemotePatchSecurityGroupIdentitySecurityGroupIdentityByHref struct { + // The security group's canonical URL. + Href *string `json:"href" validate:"required"` +} + +// NewSecurityGroupRuleRemotePatchSecurityGroupIdentitySecurityGroupIdentityByHref : Instantiate SecurityGroupRuleRemotePatchSecurityGroupIdentitySecurityGroupIdentityByHref (Generic Model Constructor) +func (*VpcClassicV1) NewSecurityGroupRuleRemotePatchSecurityGroupIdentitySecurityGroupIdentityByHref(href string) (model *SecurityGroupRuleRemotePatchSecurityGroupIdentitySecurityGroupIdentityByHref, err error) { + model = &SecurityGroupRuleRemotePatchSecurityGroupIdentitySecurityGroupIdentityByHref{ + Href: core.StringPtr(href), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*SecurityGroupRuleRemotePatchSecurityGroupIdentitySecurityGroupIdentityByHref) isaSecurityGroupRuleRemotePatchSecurityGroupIdentity() bool { + return true +} + +func (*SecurityGroupRuleRemotePatchSecurityGroupIdentitySecurityGroupIdentityByHref) isaSecurityGroupRuleRemotePatch() bool { + return true +} + +// UnmarshalSecurityGroupRuleRemotePatchSecurityGroupIdentitySecurityGroupIdentityByHref unmarshals an instance of SecurityGroupRuleRemotePatchSecurityGroupIdentitySecurityGroupIdentityByHref from the specified map of raw messages. +func UnmarshalSecurityGroupRuleRemotePatchSecurityGroupIdentitySecurityGroupIdentityByHref(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SecurityGroupRuleRemotePatchSecurityGroupIdentitySecurityGroupIdentityByHref) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SecurityGroupRuleRemotePatchSecurityGroupIdentitySecurityGroupIdentityByID : SecurityGroupRuleRemotePatchSecurityGroupIdentitySecurityGroupIdentityByID struct +// This model "extends" SecurityGroupRuleRemotePatchSecurityGroupIdentity +type SecurityGroupRuleRemotePatchSecurityGroupIdentitySecurityGroupIdentityByID struct { + // The unique identifier for this security group. + ID *string `json:"id" validate:"required"` +} + +// NewSecurityGroupRuleRemotePatchSecurityGroupIdentitySecurityGroupIdentityByID : Instantiate SecurityGroupRuleRemotePatchSecurityGroupIdentitySecurityGroupIdentityByID (Generic Model Constructor) +func (*VpcClassicV1) NewSecurityGroupRuleRemotePatchSecurityGroupIdentitySecurityGroupIdentityByID(id string) (model *SecurityGroupRuleRemotePatchSecurityGroupIdentitySecurityGroupIdentityByID, err error) { + model = &SecurityGroupRuleRemotePatchSecurityGroupIdentitySecurityGroupIdentityByID{ + ID: core.StringPtr(id), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*SecurityGroupRuleRemotePatchSecurityGroupIdentitySecurityGroupIdentityByID) isaSecurityGroupRuleRemotePatchSecurityGroupIdentity() bool { + return true +} + +func (*SecurityGroupRuleRemotePatchSecurityGroupIdentitySecurityGroupIdentityByID) isaSecurityGroupRuleRemotePatch() bool { + return true +} + +// UnmarshalSecurityGroupRuleRemotePatchSecurityGroupIdentitySecurityGroupIdentityByID unmarshals an instance of SecurityGroupRuleRemotePatchSecurityGroupIdentitySecurityGroupIdentityByID from the specified map of raw messages. +func UnmarshalSecurityGroupRuleRemotePatchSecurityGroupIdentitySecurityGroupIdentityByID(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SecurityGroupRuleRemotePatchSecurityGroupIdentitySecurityGroupIdentityByID) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SecurityGroupRuleRemotePrototypeSecurityGroupIdentitySecurityGroupIdentityByCRN : SecurityGroupRuleRemotePrototypeSecurityGroupIdentitySecurityGroupIdentityByCRN struct +// This model "extends" SecurityGroupRuleRemotePrototypeSecurityGroupIdentity +type SecurityGroupRuleRemotePrototypeSecurityGroupIdentitySecurityGroupIdentityByCRN struct { + // The security group's CRN. + CRN *string `json:"crn" validate:"required"` +} + +// NewSecurityGroupRuleRemotePrototypeSecurityGroupIdentitySecurityGroupIdentityByCRN : Instantiate SecurityGroupRuleRemotePrototypeSecurityGroupIdentitySecurityGroupIdentityByCRN (Generic Model Constructor) +func (*VpcClassicV1) NewSecurityGroupRuleRemotePrototypeSecurityGroupIdentitySecurityGroupIdentityByCRN(crn string) (model *SecurityGroupRuleRemotePrototypeSecurityGroupIdentitySecurityGroupIdentityByCRN, err error) { + model = &SecurityGroupRuleRemotePrototypeSecurityGroupIdentitySecurityGroupIdentityByCRN{ + CRN: core.StringPtr(crn), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*SecurityGroupRuleRemotePrototypeSecurityGroupIdentitySecurityGroupIdentityByCRN) isaSecurityGroupRuleRemotePrototypeSecurityGroupIdentity() bool { + return true +} + +func (*SecurityGroupRuleRemotePrototypeSecurityGroupIdentitySecurityGroupIdentityByCRN) isaSecurityGroupRuleRemotePrototype() bool { + return true +} + +// UnmarshalSecurityGroupRuleRemotePrototypeSecurityGroupIdentitySecurityGroupIdentityByCRN unmarshals an instance of SecurityGroupRuleRemotePrototypeSecurityGroupIdentitySecurityGroupIdentityByCRN from the specified map of raw messages. +func UnmarshalSecurityGroupRuleRemotePrototypeSecurityGroupIdentitySecurityGroupIdentityByCRN(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SecurityGroupRuleRemotePrototypeSecurityGroupIdentitySecurityGroupIdentityByCRN) + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SecurityGroupRuleRemotePrototypeSecurityGroupIdentitySecurityGroupIdentityByHref : SecurityGroupRuleRemotePrototypeSecurityGroupIdentitySecurityGroupIdentityByHref struct +// This model "extends" SecurityGroupRuleRemotePrototypeSecurityGroupIdentity +type SecurityGroupRuleRemotePrototypeSecurityGroupIdentitySecurityGroupIdentityByHref struct { + // The security group's canonical URL. + Href *string `json:"href" validate:"required"` +} + +// NewSecurityGroupRuleRemotePrototypeSecurityGroupIdentitySecurityGroupIdentityByHref : Instantiate SecurityGroupRuleRemotePrototypeSecurityGroupIdentitySecurityGroupIdentityByHref (Generic Model Constructor) +func (*VpcClassicV1) NewSecurityGroupRuleRemotePrototypeSecurityGroupIdentitySecurityGroupIdentityByHref(href string) (model *SecurityGroupRuleRemotePrototypeSecurityGroupIdentitySecurityGroupIdentityByHref, err error) { + model = &SecurityGroupRuleRemotePrototypeSecurityGroupIdentitySecurityGroupIdentityByHref{ + Href: core.StringPtr(href), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*SecurityGroupRuleRemotePrototypeSecurityGroupIdentitySecurityGroupIdentityByHref) isaSecurityGroupRuleRemotePrototypeSecurityGroupIdentity() bool { + return true +} + +func (*SecurityGroupRuleRemotePrototypeSecurityGroupIdentitySecurityGroupIdentityByHref) isaSecurityGroupRuleRemotePrototype() bool { + return true +} + +// UnmarshalSecurityGroupRuleRemotePrototypeSecurityGroupIdentitySecurityGroupIdentityByHref unmarshals an instance of SecurityGroupRuleRemotePrototypeSecurityGroupIdentitySecurityGroupIdentityByHref from the specified map of raw messages. +func UnmarshalSecurityGroupRuleRemotePrototypeSecurityGroupIdentitySecurityGroupIdentityByHref(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SecurityGroupRuleRemotePrototypeSecurityGroupIdentitySecurityGroupIdentityByHref) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SecurityGroupRuleRemotePrototypeSecurityGroupIdentitySecurityGroupIdentityByID : SecurityGroupRuleRemotePrototypeSecurityGroupIdentitySecurityGroupIdentityByID struct +// This model "extends" SecurityGroupRuleRemotePrototypeSecurityGroupIdentity +type SecurityGroupRuleRemotePrototypeSecurityGroupIdentitySecurityGroupIdentityByID struct { + // The unique identifier for this security group. + ID *string `json:"id" validate:"required"` +} + +// NewSecurityGroupRuleRemotePrototypeSecurityGroupIdentitySecurityGroupIdentityByID : Instantiate SecurityGroupRuleRemotePrototypeSecurityGroupIdentitySecurityGroupIdentityByID (Generic Model Constructor) +func (*VpcClassicV1) NewSecurityGroupRuleRemotePrototypeSecurityGroupIdentitySecurityGroupIdentityByID(id string) (model *SecurityGroupRuleRemotePrototypeSecurityGroupIdentitySecurityGroupIdentityByID, err error) { + model = &SecurityGroupRuleRemotePrototypeSecurityGroupIdentitySecurityGroupIdentityByID{ + ID: core.StringPtr(id), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*SecurityGroupRuleRemotePrototypeSecurityGroupIdentitySecurityGroupIdentityByID) isaSecurityGroupRuleRemotePrototypeSecurityGroupIdentity() bool { + return true +} + +func (*SecurityGroupRuleRemotePrototypeSecurityGroupIdentitySecurityGroupIdentityByID) isaSecurityGroupRuleRemotePrototype() bool { + return true +} + +// UnmarshalSecurityGroupRuleRemotePrototypeSecurityGroupIdentitySecurityGroupIdentityByID unmarshals an instance of SecurityGroupRuleRemotePrototypeSecurityGroupIdentitySecurityGroupIdentityByID from the specified map of raw messages. +func UnmarshalSecurityGroupRuleRemotePrototypeSecurityGroupIdentitySecurityGroupIdentityByID(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SecurityGroupRuleRemotePrototypeSecurityGroupIdentitySecurityGroupIdentityByID) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeByCapacity : VolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeByCapacity struct +// This model "extends" VolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContext +type VolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeByCapacity struct { + // The identity of the root key to use to wrap the data encryption key for the volume. + // + // If this property is not provided, the `encryption` type for the volume will be + // `provider_managed`. + EncryptionKey EncryptionKeyIdentityIntf `json:"encryption_key,omitempty"` + + // The bandwidth for the volume. + Iops *int64 `json:"iops,omitempty"` + + // The unique user-defined name for this volume. + Name *string `json:"name,omitempty"` + + // The profile to use for this volume. + Profile VolumeProfileIdentityIntf `json:"profile" validate:"required"` + + // The capacity of the volume in gigabytes. The specified minimum and maximum capacity values for creating or updating + // volumes may expand in the future. + Capacity *int64 `json:"capacity" validate:"required"` +} + +// NewVolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeByCapacity : Instantiate VolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeByCapacity (Generic Model Constructor) +func (*VpcClassicV1) NewVolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeByCapacity(profile VolumeProfileIdentityIntf, capacity int64) (model *VolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeByCapacity, err error) { + model = &VolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeByCapacity{ + Profile: profile, + Capacity: core.Int64Ptr(capacity), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*VolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeByCapacity) isaVolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContext() bool { + return true +} + +func (*VolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeByCapacity) isaVolumeAttachmentVolumePrototypeInstanceContext() bool { + return true +} + +// UnmarshalVolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeByCapacity unmarshals an instance of VolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeByCapacity from the specified map of raw messages. +func UnmarshalVolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeByCapacity(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeByCapacity) + err = core.UnmarshalModel(m, "encryption_key", &obj.EncryptionKey, UnmarshalEncryptionKeyIdentity) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "iops", &obj.Iops) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalModel(m, "profile", &obj.Profile, UnmarshalVolumeProfileIdentity) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "capacity", &obj.Capacity) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} diff --git a/vendor/github.com/IBM/vpc-go-sdk/vpcv1/vpc_v1.go b/vendor/github.com/IBM/vpc-go-sdk/vpcv1/vpc_v1.go index 5b5b30be5d4..ed61f7a4c4b 100644 --- a/vendor/github.com/IBM/vpc-go-sdk/vpcv1/vpc_v1.go +++ b/vendor/github.com/IBM/vpc-go-sdk/vpcv1/vpc_v1.go @@ -15,7 +15,7 @@ */ /* - * IBM OpenAPI SDK Code Generator Version: 3.28.0-55613c9e-20210220-164656 + * IBM OpenAPI SDK Code Generator Version: 3.32.0-4c6a3129-20210514-210323 */ // Package vpcv1 : Operations and models for the VpcV1 service @@ -37,7 +37,7 @@ import ( // VpcV1 : The IBM Cloud Virtual Private Cloud (VPC) API can be used to programmatically provision and manage // infrastructure resources, including virtual server instances, subnets, volumes, and load balancers. // -// Version: 2021-03-30 +// Version: 2021-06-08 type VpcV1 struct { Service *core.BaseService @@ -121,7 +121,7 @@ func NewVpcV1(options *VpcV1Options) (service *VpcV1, err error) { } if options.Version == nil { - options.Version = core.StringPtr("2021-03-30") + options.Version = core.StringPtr("2021-06-08") } service = &VpcV1{ @@ -242,11 +242,13 @@ func (vpc *VpcV1) ListVpcsWithContext(ctx context.Context, listVpcsOptions *List if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVPCCollection) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVPCCollection) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -315,11 +317,13 @@ func (vpc *VpcV1) CreateVPCWithContext(ctx context.Context, createVPCOptions *Cr if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVPC) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVPC) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -430,11 +434,13 @@ func (vpc *VpcV1) GetVPCWithContext(ctx context.Context, getVPCOptions *GetVPCOp if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVPC) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVPC) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -497,11 +503,13 @@ func (vpc *VpcV1) UpdateVPCWithContext(ctx context.Context, updateVPCOptions *Up if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVPC) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVPC) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -559,11 +567,13 @@ func (vpc *VpcV1) GetVPCDefaultNetworkACLWithContext(ctx context.Context, getVPC if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalDefaultNetworkACL) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalDefaultNetworkACL) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -622,11 +632,13 @@ func (vpc *VpcV1) GetVPCDefaultRoutingTableWithContext(ctx context.Context, getV if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalDefaultRoutingTable) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalDefaultRoutingTable) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -684,11 +696,13 @@ func (vpc *VpcV1) GetVPCDefaultSecurityGroupWithContext(ctx context.Context, get if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalDefaultSecurityGroup) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalDefaultSecurityGroup) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -751,11 +765,13 @@ func (vpc *VpcV1) ListVPCAddressPrefixesWithContext(ctx context.Context, listVPC if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalAddressPrefixCollection) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalAddressPrefixCollection) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -832,11 +848,13 @@ func (vpc *VpcV1) CreateVPCAddressPrefixWithContext(ctx context.Context, createV if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalAddressPrefix) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalAddressPrefix) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -947,11 +965,13 @@ func (vpc *VpcV1) GetVPCAddressPrefixWithContext(ctx context.Context, getVPCAddr if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalAddressPrefix) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalAddressPrefix) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -1016,11 +1036,13 @@ func (vpc *VpcV1) UpdateVPCAddressPrefixWithContext(ctx context.Context, updateV if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalAddressPrefix) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalAddressPrefix) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -1088,11 +1110,13 @@ func (vpc *VpcV1) ListVPCRoutesWithContext(ctx context.Context, listVPCRoutesOpt if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalRouteCollection) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalRouteCollection) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -1173,11 +1197,13 @@ func (vpc *VpcV1) CreateVPCRouteWithContext(ctx context.Context, createVPCRouteO if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalRoute) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalRoute) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -1287,11 +1313,13 @@ func (vpc *VpcV1) GetVPCRouteWithContext(ctx context.Context, getVPCRouteOptions if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalRoute) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalRoute) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -1356,11 +1384,13 @@ func (vpc *VpcV1) UpdateVPCRouteWithContext(ctx context.Context, updateVPCRouteO if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalRoute) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalRoute) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -1429,11 +1459,13 @@ func (vpc *VpcV1) ListVPCRoutingTablesWithContext(ctx context.Context, listVPCRo if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalRoutingTableCollection) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalRoutingTableCollection) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -1514,11 +1546,13 @@ func (vpc *VpcV1) CreateVPCRoutingTableWithContext(ctx context.Context, createVP if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalRoutingTable) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalRoutingTable) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -1629,11 +1663,13 @@ func (vpc *VpcV1) GetVPCRoutingTableWithContext(ctx context.Context, getVPCRouti if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalRoutingTable) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalRoutingTable) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -1698,11 +1734,13 @@ func (vpc *VpcV1) UpdateVPCRoutingTableWithContext(ctx context.Context, updateVP if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalRoutingTable) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalRoutingTable) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -1769,11 +1807,13 @@ func (vpc *VpcV1) ListVPCRoutingTableRoutesWithContext(ctx context.Context, list if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalRouteCollection) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalRouteCollection) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -1854,11 +1894,13 @@ func (vpc *VpcV1) CreateVPCRoutingTableRouteWithContext(ctx context.Context, cre if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalRoute) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalRoute) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -1970,11 +2012,13 @@ func (vpc *VpcV1) GetVPCRoutingTableRouteWithContext(ctx context.Context, getVPC if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalRoute) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalRoute) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -2040,11 +2084,13 @@ func (vpc *VpcV1) UpdateVPCRoutingTableRouteWithContext(ctx context.Context, upd if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalRoute) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalRoute) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -2109,11 +2155,13 @@ func (vpc *VpcV1) ListSubnetsWithContext(ctx context.Context, listSubnetsOptions if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSubnetCollection) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSubnetCollection) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -2174,11 +2222,13 @@ func (vpc *VpcV1) CreateSubnetWithContext(ctx context.Context, createSubnetOptio if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSubnet) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSubnet) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -2289,11 +2339,13 @@ func (vpc *VpcV1) GetSubnetWithContext(ctx context.Context, getSubnetOptions *Ge if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSubnet) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSubnet) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -2357,11 +2409,13 @@ func (vpc *VpcV1) UpdateSubnetWithContext(ctx context.Context, updateSubnetOptio if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSubnet) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSubnet) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -2418,11 +2472,13 @@ func (vpc *VpcV1) GetSubnetNetworkACLWithContext(ctx context.Context, getSubnetN if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalNetworkACL) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalNetworkACL) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -2486,11 +2542,13 @@ func (vpc *VpcV1) ReplaceSubnetNetworkACLWithContext(ctx context.Context, replac if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalNetworkACL) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalNetworkACL) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -2598,11 +2656,13 @@ func (vpc *VpcV1) GetSubnetPublicGatewayWithContext(ctx context.Context, getSubn if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalPublicGateway) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalPublicGateway) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -2666,11 +2726,13 @@ func (vpc *VpcV1) SetSubnetPublicGatewayWithContext(ctx context.Context, setSubn if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalPublicGateway) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalPublicGateway) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -2727,11 +2789,13 @@ func (vpc *VpcV1) GetSubnetRoutingTableWithContext(ctx context.Context, getSubne if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalRoutingTable) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalRoutingTable) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -2798,11 +2862,13 @@ func (vpc *VpcV1) ReplaceSubnetRoutingTableWithContext(ctx context.Context, repl if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalRoutingTable) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalRoutingTable) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -2868,11 +2934,13 @@ func (vpc *VpcV1) ListSubnetReservedIpsWithContext(ctx context.Context, listSubn if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalReservedIPCollection) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalReservedIPCollection) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -2945,11 +3013,13 @@ func (vpc *VpcV1) CreateSubnetReservedIPWithContext(ctx context.Context, createS if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalReservedIP) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalReservedIP) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -3059,11 +3129,13 @@ func (vpc *VpcV1) GetSubnetReservedIPWithContext(ctx context.Context, getSubnetR if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalReservedIP) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalReservedIP) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -3128,11 +3200,13 @@ func (vpc *VpcV1) UpdateSubnetReservedIPWithContext(ctx context.Context, updateS if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalReservedIP) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalReservedIP) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -3200,19 +3274,22 @@ func (vpc *VpcV1) ListImagesWithContext(ctx context.Context, listImagesOptions * if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalImageCollection) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalImageCollection) + if err != nil { + return + } + response.Result = result } - response.Result = result return } // CreateImage : Create an image // This request creates a new image from an image prototype object. The prototype object is structured in the same way -// as a retrieved image, and contains the information necessary to create the new image. A URL to the image file on -// object storage must be provided. +// as a retrieved image, and contains the information necessary to create the new image. If an image is being imported, +// a URL to the image file on object storage must be specified. If an image is being created from an existing volume, +// that volume must be specified. func (vpc *VpcV1) CreateImage(createImageOptions *CreateImageOptions) (result *Image, response *core.DetailedResponse, err error) { return vpc.CreateImageWithContext(context.Background(), createImageOptions) } @@ -3265,18 +3342,21 @@ func (vpc *VpcV1) CreateImageWithContext(ctx context.Context, createImageOptions if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalImage) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalImage) + if err != nil { + return + } + response.Result = result } - response.Result = result return } // DeleteImage : Delete an image -// This request deletes an image. This operation cannot be reversed. System-provided images are not allowed to be -// deleted. An image with a `status` of `pending`, `tentative`, or `deleting` cannot be deleted. +// This request deletes an image. This operation cannot be reversed. A system-provided image is not allowed to be +// deleted. Additionally, an image cannot be deleted if it has a +// `status` of `pending`, `tentative`, or `deleting`. func (vpc *VpcV1) DeleteImage(deleteImageOptions *DeleteImageOptions) (response *core.DetailedResponse, err error) { return vpc.DeleteImageWithContext(context.Background(), deleteImageOptions) } @@ -3378,18 +3458,20 @@ func (vpc *VpcV1) GetImageWithContext(ctx context.Context, getImageOptions *GetI if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalImage) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalImage) + if err != nil { + return + } + response.Result = result } - response.Result = result return } // UpdateImage : Update an image // This request updates an image with the information in a provided image patch. The image patch object is structured in -// the same way as a retrieved image and contains only the information to be updated. System-provided images are not +// the same way as a retrieved image and contains only the information to be updated. A system-provided image is not // allowed to be updated. An image with a `status` of `deleting` cannot be updated. func (vpc *VpcV1) UpdateImage(updateImageOptions *UpdateImageOptions) (result *Image, response *core.DetailedResponse, err error) { return vpc.UpdateImageWithContext(context.Background(), updateImageOptions) @@ -3447,11 +3529,13 @@ func (vpc *VpcV1) UpdateImageWithContext(ctx context.Context, updateImageOptions if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalImage) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalImage) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -3506,11 +3590,13 @@ func (vpc *VpcV1) ListOperatingSystemsWithContext(ctx context.Context, listOpera if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalOperatingSystemCollection) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalOperatingSystemCollection) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -3567,11 +3653,13 @@ func (vpc *VpcV1) GetOperatingSystemWithContext(ctx context.Context, getOperatin if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalOperatingSystem) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalOperatingSystem) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -3624,11 +3712,13 @@ func (vpc *VpcV1) ListKeysWithContext(ctx context.Context, listKeysOptions *List if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalKeyCollection) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalKeyCollection) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -3702,11 +3792,13 @@ func (vpc *VpcV1) CreateKeyWithContext(ctx context.Context, createKeyOptions *Cr if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalKey) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalKey) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -3814,11 +3906,13 @@ func (vpc *VpcV1) GetKeyWithContext(ctx context.Context, getKeyOptions *GetKeyOp if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalKey) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalKey) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -3881,11 +3975,13 @@ func (vpc *VpcV1) UpdateKeyWithContext(ctx context.Context, updateKeyOptions *Up if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalKey) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalKey) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -3935,11 +4031,13 @@ func (vpc *VpcV1) ListInstanceProfilesWithContext(ctx context.Context, listInsta if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstanceProfileCollection) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstanceProfileCollection) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -3996,11 +4094,13 @@ func (vpc *VpcV1) GetInstanceProfileWithContext(ctx context.Context, getInstance if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstanceProfile) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstanceProfile) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -4049,11 +4149,13 @@ func (vpc *VpcV1) ListInstanceTemplatesWithContext(ctx context.Context, listInst if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstanceTemplateCollection) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstanceTemplateCollection) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -4112,11 +4214,13 @@ func (vpc *VpcV1) CreateInstanceTemplateWithContext(ctx context.Context, createI if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstanceTemplate) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstanceTemplate) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -4224,11 +4328,13 @@ func (vpc *VpcV1) GetInstanceTemplateWithContext(ctx context.Context, getInstanc if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstanceTemplate) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstanceTemplate) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -4293,11 +4399,13 @@ func (vpc *VpcV1) UpdateInstanceTemplateWithContext(ctx context.Context, updateI if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstanceTemplate) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstanceTemplate) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -4367,11 +4475,13 @@ func (vpc *VpcV1) ListInstancesWithContext(ctx context.Context, listInstancesOpt if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstanceCollection) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstanceCollection) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -4432,11 +4542,13 @@ func (vpc *VpcV1) CreateInstanceWithContext(ctx context.Context, createInstanceO if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstance) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstance) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -4546,11 +4658,13 @@ func (vpc *VpcV1) GetInstanceWithContext(ctx context.Context, getInstanceOptions if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstance) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstance) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -4614,11 +4728,13 @@ func (vpc *VpcV1) UpdateInstanceWithContext(ctx context.Context, updateInstanceO if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstance) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstance) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -4676,11 +4792,13 @@ func (vpc *VpcV1) GetInstanceInitializationWithContext(ctx context.Context, getI if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstanceInitialization) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstanceInitialization) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -4751,11 +4869,13 @@ func (vpc *VpcV1) CreateInstanceActionWithContext(ctx context.Context, createIns if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstanceAction) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstanceAction) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -4828,11 +4948,13 @@ func (vpc *VpcV1) CreateInstanceConsoleAccessTokenWithContext(ctx context.Contex if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstanceConsoleAccessToken) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstanceConsoleAccessToken) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -4891,11 +5013,13 @@ func (vpc *VpcV1) ListInstanceDisksWithContext(ctx context.Context, listInstance if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstanceDiskCollection) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstanceDiskCollection) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -4953,11 +5077,13 @@ func (vpc *VpcV1) GetInstanceDiskWithContext(ctx context.Context, getInstanceDis if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstanceDisk) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstanceDisk) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -5021,11 +5147,13 @@ func (vpc *VpcV1) UpdateInstanceDiskWithContext(ctx context.Context, updateInsta if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstanceDisk) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstanceDisk) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -5085,11 +5213,13 @@ func (vpc *VpcV1) ListInstanceNetworkInterfacesWithContext(ctx context.Context, if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalNetworkInterfaceUnpaginatedCollection) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalNetworkInterfaceUnpaginatedCollection) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -5171,11 +5301,13 @@ func (vpc *VpcV1) CreateInstanceNetworkInterfaceWithContext(ctx context.Context, if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalNetworkInterface) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalNetworkInterface) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -5287,19 +5419,21 @@ func (vpc *VpcV1) GetInstanceNetworkInterfaceWithContext(ctx context.Context, ge if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalNetworkInterface) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalNetworkInterface) + if err != nil { + return + } + response.Result = result } - response.Result = result return } // UpdateInstanceNetworkInterface : Update a network interface -// This request updates a network interface with the information in a provided network interface patch. The network -// interface patch object is structured in the same way as a retrieved network interface and can contain an updated name -// and/or port speed. +// This request updates a network interface with the information provided in a network interface patch object. The +// network interface patch object is structured in the same way as a retrieved network interface and needs to contain +// only the information to be updated. func (vpc *VpcV1) UpdateInstanceNetworkInterface(updateInstanceNetworkInterfaceOptions *UpdateInstanceNetworkInterfaceOptions) (result *NetworkInterface, response *core.DetailedResponse, err error) { return vpc.UpdateInstanceNetworkInterfaceWithContext(context.Background(), updateInstanceNetworkInterfaceOptions) } @@ -5357,11 +5491,13 @@ func (vpc *VpcV1) UpdateInstanceNetworkInterfaceWithContext(ctx context.Context, if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalNetworkInterface) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalNetworkInterface) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -5419,11 +5555,13 @@ func (vpc *VpcV1) ListInstanceNetworkInterfaceFloatingIpsWithContext(ctx context if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalFloatingIPUnpaginatedCollection) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalFloatingIPUnpaginatedCollection) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -5536,11 +5674,13 @@ func (vpc *VpcV1) GetInstanceNetworkInterfaceFloatingIPWithContext(ctx context.C if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalFloatingIP) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalFloatingIP) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -5601,11 +5741,13 @@ func (vpc *VpcV1) AddInstanceNetworkInterfaceFloatingIPWithContext(ctx context.C if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalFloatingIP) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalFloatingIP) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -5664,11 +5806,13 @@ func (vpc *VpcV1) ListInstanceVolumeAttachmentsWithContext(ctx context.Context, if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVolumeAttachmentCollection) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVolumeAttachmentCollection) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -5743,11 +5887,13 @@ func (vpc *VpcV1) CreateInstanceVolumeAttachmentWithContext(ctx context.Context, if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVolumeAttachment) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVolumeAttachment) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -5857,19 +6003,21 @@ func (vpc *VpcV1) GetInstanceVolumeAttachmentWithContext(ctx context.Context, ge if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVolumeAttachment) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVolumeAttachment) + if err != nil { + return + } + response.Result = result } - response.Result = result return } // UpdateInstanceVolumeAttachment : Update a volume attachment -// This request updates a volume attachment with the information in a provided volume attachment patch. The volume -// attachment patch object is structured in the same way as a retrieved volume attachment and can contain an updated -// name. +// This request updates a volume attachment with the information provided in a volume attachment patch object. The +// volume attachment patch object is structured in the same way as a retrieved volume attachment and needs to contain +// only the information to be updated. func (vpc *VpcV1) UpdateInstanceVolumeAttachment(updateInstanceVolumeAttachmentOptions *UpdateInstanceVolumeAttachmentOptions) (result *VolumeAttachment, response *core.DetailedResponse, err error) { return vpc.UpdateInstanceVolumeAttachmentWithContext(context.Background(), updateInstanceVolumeAttachmentOptions) } @@ -5927,11 +6075,13 @@ func (vpc *VpcV1) UpdateInstanceVolumeAttachmentWithContext(ctx context.Context, if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVolumeAttachment) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVolumeAttachment) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -5986,11 +6136,13 @@ func (vpc *VpcV1) ListInstanceGroupsWithContext(ctx context.Context, listInstanc if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstanceGroupCollection) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstanceGroupCollection) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -6074,11 +6226,13 @@ func (vpc *VpcV1) CreateInstanceGroupWithContext(ctx context.Context, createInst if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstanceGroup) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstanceGroup) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -6187,11 +6341,13 @@ func (vpc *VpcV1) GetInstanceGroupWithContext(ctx context.Context, getInstanceGr if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstanceGroup) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstanceGroup) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -6255,11 +6411,13 @@ func (vpc *VpcV1) UpdateInstanceGroupWithContext(ctx context.Context, updateInst if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstanceGroup) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstanceGroup) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -6373,11 +6531,13 @@ func (vpc *VpcV1) ListInstanceGroupManagersWithContext(ctx context.Context, list if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstanceGroupManagerCollection) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstanceGroupManagerCollection) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -6440,11 +6600,13 @@ func (vpc *VpcV1) CreateInstanceGroupManagerWithContext(ctx context.Context, cre if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstanceGroupManager) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstanceGroupManager) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -6554,11 +6716,13 @@ func (vpc *VpcV1) GetInstanceGroupManagerWithContext(ctx context.Context, getIns if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstanceGroupManager) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstanceGroupManager) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -6622,11 +6786,342 @@ func (vpc *VpcV1) UpdateInstanceGroupManagerWithContext(ctx context.Context, upd if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstanceGroupManager) + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstanceGroupManager) + if err != nil { + return + } + response.Result = result + } + + return +} + +// ListInstanceGroupManagerActions : List all actions for an instance group manager +// This request lists all instance group actions for an instance group manager. +func (vpc *VpcV1) ListInstanceGroupManagerActions(listInstanceGroupManagerActionsOptions *ListInstanceGroupManagerActionsOptions) (result *InstanceGroupManagerActionsCollection, response *core.DetailedResponse, err error) { + return vpc.ListInstanceGroupManagerActionsWithContext(context.Background(), listInstanceGroupManagerActionsOptions) +} + +// ListInstanceGroupManagerActionsWithContext is an alternate form of the ListInstanceGroupManagerActions method which supports a Context parameter +func (vpc *VpcV1) ListInstanceGroupManagerActionsWithContext(ctx context.Context, listInstanceGroupManagerActionsOptions *ListInstanceGroupManagerActionsOptions) (result *InstanceGroupManagerActionsCollection, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(listInstanceGroupManagerActionsOptions, "listInstanceGroupManagerActionsOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(listInstanceGroupManagerActionsOptions, "listInstanceGroupManagerActionsOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "instance_group_id": *listInstanceGroupManagerActionsOptions.InstanceGroupID, + "instance_group_manager_id": *listInstanceGroupManagerActionsOptions.InstanceGroupManagerID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpc.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpc.Service.Options.URL, `/instance_groups/{instance_group_id}/managers/{instance_group_manager_id}/actions`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range listInstanceGroupManagerActionsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc", "V1", "ListInstanceGroupManagerActions") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpc.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpc.generation)) + if listInstanceGroupManagerActionsOptions.Start != nil { + builder.AddQuery("start", fmt.Sprint(*listInstanceGroupManagerActionsOptions.Start)) + } + if listInstanceGroupManagerActionsOptions.Limit != nil { + builder.AddQuery("limit", fmt.Sprint(*listInstanceGroupManagerActionsOptions.Limit)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpc.Service.Request(request, &rawResponse) if err != nil { return } - response.Result = result + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstanceGroupManagerActionsCollection) + if err != nil { + return + } + response.Result = result + } + + return +} + +// CreateInstanceGroupManagerAction : Create an instance group manager action +// This request creates a new instance group manager action. +func (vpc *VpcV1) CreateInstanceGroupManagerAction(createInstanceGroupManagerActionOptions *CreateInstanceGroupManagerActionOptions) (result InstanceGroupManagerActionIntf, response *core.DetailedResponse, err error) { + return vpc.CreateInstanceGroupManagerActionWithContext(context.Background(), createInstanceGroupManagerActionOptions) +} + +// CreateInstanceGroupManagerActionWithContext is an alternate form of the CreateInstanceGroupManagerAction method which supports a Context parameter +func (vpc *VpcV1) CreateInstanceGroupManagerActionWithContext(ctx context.Context, createInstanceGroupManagerActionOptions *CreateInstanceGroupManagerActionOptions) (result InstanceGroupManagerActionIntf, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createInstanceGroupManagerActionOptions, "createInstanceGroupManagerActionOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createInstanceGroupManagerActionOptions, "createInstanceGroupManagerActionOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "instance_group_id": *createInstanceGroupManagerActionOptions.InstanceGroupID, + "instance_group_manager_id": *createInstanceGroupManagerActionOptions.InstanceGroupManagerID, + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpc.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpc.Service.Options.URL, `/instance_groups/{instance_group_id}/managers/{instance_group_manager_id}/actions`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range createInstanceGroupManagerActionOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc", "V1", "CreateInstanceGroupManagerAction") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpc.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpc.generation)) + + _, err = builder.SetBodyContentJSON(createInstanceGroupManagerActionOptions.InstanceGroupManagerActionPrototype) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpc.Service.Request(request, &rawResponse) + if err != nil { + return + } + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstanceGroupManagerAction) + if err != nil { + return + } + response.Result = result + } + + return +} + +// DeleteInstanceGroupManagerAction : Delete specified instance group manager action +// This request deletes an instance group manager action. This operation cannot be reversed. +func (vpc *VpcV1) DeleteInstanceGroupManagerAction(deleteInstanceGroupManagerActionOptions *DeleteInstanceGroupManagerActionOptions) (response *core.DetailedResponse, err error) { + return vpc.DeleteInstanceGroupManagerActionWithContext(context.Background(), deleteInstanceGroupManagerActionOptions) +} + +// DeleteInstanceGroupManagerActionWithContext is an alternate form of the DeleteInstanceGroupManagerAction method which supports a Context parameter +func (vpc *VpcV1) DeleteInstanceGroupManagerActionWithContext(ctx context.Context, deleteInstanceGroupManagerActionOptions *DeleteInstanceGroupManagerActionOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteInstanceGroupManagerActionOptions, "deleteInstanceGroupManagerActionOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteInstanceGroupManagerActionOptions, "deleteInstanceGroupManagerActionOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "instance_group_id": *deleteInstanceGroupManagerActionOptions.InstanceGroupID, + "instance_group_manager_id": *deleteInstanceGroupManagerActionOptions.InstanceGroupManagerID, + "id": *deleteInstanceGroupManagerActionOptions.ID, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpc.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpc.Service.Options.URL, `/instance_groups/{instance_group_id}/managers/{instance_group_manager_id}/actions/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteInstanceGroupManagerActionOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc", "V1", "DeleteInstanceGroupManagerAction") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + builder.AddQuery("version", fmt.Sprint(*vpc.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpc.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + response, err = vpc.Service.Request(request, nil) + + return +} + +// GetInstanceGroupManagerAction : Retrieve specified instance group manager action +// This request retrieves a single instance group manager action specified by identifier in the URL. +func (vpc *VpcV1) GetInstanceGroupManagerAction(getInstanceGroupManagerActionOptions *GetInstanceGroupManagerActionOptions) (result InstanceGroupManagerActionIntf, response *core.DetailedResponse, err error) { + return vpc.GetInstanceGroupManagerActionWithContext(context.Background(), getInstanceGroupManagerActionOptions) +} + +// GetInstanceGroupManagerActionWithContext is an alternate form of the GetInstanceGroupManagerAction method which supports a Context parameter +func (vpc *VpcV1) GetInstanceGroupManagerActionWithContext(ctx context.Context, getInstanceGroupManagerActionOptions *GetInstanceGroupManagerActionOptions) (result InstanceGroupManagerActionIntf, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getInstanceGroupManagerActionOptions, "getInstanceGroupManagerActionOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getInstanceGroupManagerActionOptions, "getInstanceGroupManagerActionOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "instance_group_id": *getInstanceGroupManagerActionOptions.InstanceGroupID, + "instance_group_manager_id": *getInstanceGroupManagerActionOptions.InstanceGroupManagerID, + "id": *getInstanceGroupManagerActionOptions.ID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpc.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpc.Service.Options.URL, `/instance_groups/{instance_group_id}/managers/{instance_group_manager_id}/actions/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getInstanceGroupManagerActionOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc", "V1", "GetInstanceGroupManagerAction") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpc.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpc.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpc.Service.Request(request, &rawResponse) + if err != nil { + return + } + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstanceGroupManagerAction) + if err != nil { + return + } + response.Result = result + } + + return +} + +// UpdateInstanceGroupManagerAction : Update specified instance group manager action +// This request updates an instance group manager action. +func (vpc *VpcV1) UpdateInstanceGroupManagerAction(updateInstanceGroupManagerActionOptions *UpdateInstanceGroupManagerActionOptions) (result InstanceGroupManagerActionIntf, response *core.DetailedResponse, err error) { + return vpc.UpdateInstanceGroupManagerActionWithContext(context.Background(), updateInstanceGroupManagerActionOptions) +} + +// UpdateInstanceGroupManagerActionWithContext is an alternate form of the UpdateInstanceGroupManagerAction method which supports a Context parameter +func (vpc *VpcV1) UpdateInstanceGroupManagerActionWithContext(ctx context.Context, updateInstanceGroupManagerActionOptions *UpdateInstanceGroupManagerActionOptions) (result InstanceGroupManagerActionIntf, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateInstanceGroupManagerActionOptions, "updateInstanceGroupManagerActionOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateInstanceGroupManagerActionOptions, "updateInstanceGroupManagerActionOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "instance_group_id": *updateInstanceGroupManagerActionOptions.InstanceGroupID, + "instance_group_manager_id": *updateInstanceGroupManagerActionOptions.InstanceGroupManagerID, + "id": *updateInstanceGroupManagerActionOptions.ID, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpc.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpc.Service.Options.URL, `/instance_groups/{instance_group_id}/managers/{instance_group_manager_id}/actions/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range updateInstanceGroupManagerActionOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc", "V1", "UpdateInstanceGroupManagerAction") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/merge-patch+json") + + builder.AddQuery("version", fmt.Sprint(*vpc.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpc.generation)) + + _, err = builder.SetBodyContentJSON(updateInstanceGroupManagerActionOptions.InstanceGroupManagerActionPatch) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpc.Service.Request(request, &rawResponse) + if err != nil { + return + } + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstanceGroupManagerAction) + if err != nil { + return + } + response.Result = result + } return } @@ -6690,11 +7185,13 @@ func (vpc *VpcV1) ListInstanceGroupManagerPoliciesWithContext(ctx context.Contex if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstanceGroupManagerPolicyCollection) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstanceGroupManagerPolicyCollection) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -6758,11 +7255,13 @@ func (vpc *VpcV1) CreateInstanceGroupManagerPolicyWithContext(ctx context.Contex if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstanceGroupManagerPolicy) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstanceGroupManagerPolicy) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -6874,11 +7373,13 @@ func (vpc *VpcV1) GetInstanceGroupManagerPolicyWithContext(ctx context.Context, if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstanceGroupManagerPolicy) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstanceGroupManagerPolicy) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -6943,11 +7444,13 @@ func (vpc *VpcV1) UpdateInstanceGroupManagerPolicyWithContext(ctx context.Contex if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstanceGroupManagerPolicy) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstanceGroupManagerPolicy) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -7062,11 +7565,13 @@ func (vpc *VpcV1) ListInstanceGroupMembershipsWithContext(ctx context.Context, l if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstanceGroupMembershipCollection) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstanceGroupMembershipCollection) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -7177,11 +7682,13 @@ func (vpc *VpcV1) GetInstanceGroupMembershipWithContext(ctx context.Context, get if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstanceGroupMembership) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstanceGroupMembership) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -7245,11 +7752,13 @@ func (vpc *VpcV1) UpdateInstanceGroupMembershipWithContext(ctx context.Context, if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstanceGroupMembership) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalInstanceGroupMembership) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -7311,11 +7820,13 @@ func (vpc *VpcV1) ListDedicatedHostGroupsWithContext(ctx context.Context, listDe if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalDedicatedHostGroupCollection) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalDedicatedHostGroupCollection) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -7386,11 +7897,13 @@ func (vpc *VpcV1) CreateDedicatedHostGroupWithContext(ctx context.Context, creat if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalDedicatedHostGroup) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalDedicatedHostGroup) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -7498,11 +8011,13 @@ func (vpc *VpcV1) GetDedicatedHostGroupWithContext(ctx context.Context, getDedic if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalDedicatedHostGroup) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalDedicatedHostGroup) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -7567,11 +8082,13 @@ func (vpc *VpcV1) UpdateDedicatedHostGroupWithContext(ctx context.Context, updat if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalDedicatedHostGroup) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalDedicatedHostGroup) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -7627,11 +8144,13 @@ func (vpc *VpcV1) ListDedicatedHostProfilesWithContext(ctx context.Context, list if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalDedicatedHostProfileCollection) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalDedicatedHostProfileCollection) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -7688,11 +8207,13 @@ func (vpc *VpcV1) GetDedicatedHostProfileWithContext(ctx context.Context, getDed if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalDedicatedHostProfile) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalDedicatedHostProfile) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -7756,11 +8277,13 @@ func (vpc *VpcV1) ListDedicatedHostsWithContext(ctx context.Context, listDedicat if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalDedicatedHostCollection) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalDedicatedHostCollection) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -7819,11 +8342,13 @@ func (vpc *VpcV1) CreateDedicatedHostWithContext(ctx context.Context, createDedi if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalDedicatedHost) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalDedicatedHost) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -7882,11 +8407,13 @@ func (vpc *VpcV1) ListDedicatedHostDisksWithContext(ctx context.Context, listDed if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalDedicatedHostDiskCollection) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalDedicatedHostDiskCollection) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -7944,11 +8471,13 @@ func (vpc *VpcV1) GetDedicatedHostDiskWithContext(ctx context.Context, getDedica if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalDedicatedHostDisk) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalDedicatedHostDisk) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -8012,11 +8541,13 @@ func (vpc *VpcV1) UpdateDedicatedHostDiskWithContext(ctx context.Context, update if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalDedicatedHostDisk) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalDedicatedHostDisk) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -8124,11 +8655,13 @@ func (vpc *VpcV1) GetDedicatedHostWithContext(ctx context.Context, getDedicatedH if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalDedicatedHost) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalDedicatedHost) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -8193,11 +8726,13 @@ func (vpc *VpcV1) UpdateDedicatedHostWithContext(ctx context.Context, updateDedi if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalDedicatedHost) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalDedicatedHost) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -8253,11 +8788,13 @@ func (vpc *VpcV1) ListVolumeProfilesWithContext(ctx context.Context, listVolumeP if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVolumeProfileCollection) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVolumeProfileCollection) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -8314,11 +8851,13 @@ func (vpc *VpcV1) GetVolumeProfileWithContext(ctx context.Context, getVolumeProf if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVolumeProfile) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVolumeProfile) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -8380,11 +8919,13 @@ func (vpc *VpcV1) ListVolumesWithContext(ctx context.Context, listVolumesOptions if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVolumeCollection) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVolumeCollection) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -8444,11 +8985,13 @@ func (vpc *VpcV1) CreateVolumeWithContext(ctx context.Context, createVolumeOptio if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVolume) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVolume) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -8557,11 +9100,13 @@ func (vpc *VpcV1) GetVolumeWithContext(ctx context.Context, getVolumeOptions *Ge if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVolume) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVolume) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -8625,11 +9170,403 @@ func (vpc *VpcV1) UpdateVolumeWithContext(ctx context.Context, updateVolumeOptio if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVolume) + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVolume) + if err != nil { + return + } + response.Result = result + } + + return +} + +// DeleteSnapshots : Delete a filtered collection of snapshots +// This request deletes all snapshots created from a specific source volume. +func (vpc *VpcV1) DeleteSnapshots(deleteSnapshotsOptions *DeleteSnapshotsOptions) (response *core.DetailedResponse, err error) { + return vpc.DeleteSnapshotsWithContext(context.Background(), deleteSnapshotsOptions) +} + +// DeleteSnapshotsWithContext is an alternate form of the DeleteSnapshots method which supports a Context parameter +func (vpc *VpcV1) DeleteSnapshotsWithContext(ctx context.Context, deleteSnapshotsOptions *DeleteSnapshotsOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteSnapshotsOptions, "deleteSnapshotsOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteSnapshotsOptions, "deleteSnapshotsOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpc.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpc.Service.Options.URL, `/snapshots`, nil) + if err != nil { + return + } + + for headerName, headerValue := range deleteSnapshotsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc", "V1", "DeleteSnapshots") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + builder.AddQuery("version", fmt.Sprint(*vpc.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpc.generation)) + builder.AddQuery("source_volume.id", fmt.Sprint(*deleteSnapshotsOptions.SourceVolumeID)) + + request, err := builder.Build() + if err != nil { + return + } + + response, err = vpc.Service.Request(request, nil) + + return +} + +// ListSnapshots : List all snapshots +// This request lists all snapshots in the region. A snapshot preserves the data of a volume at the time the snapshot is +// created. +func (vpc *VpcV1) ListSnapshots(listSnapshotsOptions *ListSnapshotsOptions) (result *SnapshotCollection, response *core.DetailedResponse, err error) { + return vpc.ListSnapshotsWithContext(context.Background(), listSnapshotsOptions) +} + +// ListSnapshotsWithContext is an alternate form of the ListSnapshots method which supports a Context parameter +func (vpc *VpcV1) ListSnapshotsWithContext(ctx context.Context, listSnapshotsOptions *ListSnapshotsOptions) (result *SnapshotCollection, response *core.DetailedResponse, err error) { + err = core.ValidateStruct(listSnapshotsOptions, "listSnapshotsOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpc.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpc.Service.Options.URL, `/snapshots`, nil) + if err != nil { + return + } + + for headerName, headerValue := range listSnapshotsOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc", "V1", "ListSnapshots") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpc.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpc.generation)) + if listSnapshotsOptions.Start != nil { + builder.AddQuery("start", fmt.Sprint(*listSnapshotsOptions.Start)) + } + if listSnapshotsOptions.Limit != nil { + builder.AddQuery("limit", fmt.Sprint(*listSnapshotsOptions.Limit)) + } + if listSnapshotsOptions.ResourceGroupID != nil { + builder.AddQuery("resource_group.id", fmt.Sprint(*listSnapshotsOptions.ResourceGroupID)) + } + if listSnapshotsOptions.Name != nil { + builder.AddQuery("name", fmt.Sprint(*listSnapshotsOptions.Name)) + } + if listSnapshotsOptions.SourceVolumeID != nil { + builder.AddQuery("source_volume.id", fmt.Sprint(*listSnapshotsOptions.SourceVolumeID)) + } + if listSnapshotsOptions.SourceVolumeCRN != nil { + builder.AddQuery("source_volume.crn", fmt.Sprint(*listSnapshotsOptions.SourceVolumeCRN)) + } + if listSnapshotsOptions.SourceImageID != nil { + builder.AddQuery("source_image.id", fmt.Sprint(*listSnapshotsOptions.SourceImageID)) + } + if listSnapshotsOptions.SourceImageCRN != nil { + builder.AddQuery("source_image.crn", fmt.Sprint(*listSnapshotsOptions.SourceImageCRN)) + } + if listSnapshotsOptions.Sort != nil { + builder.AddQuery("sort", fmt.Sprint(*listSnapshotsOptions.Sort)) + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpc.Service.Request(request, &rawResponse) + if err != nil { + return + } + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSnapshotCollection) + if err != nil { + return + } + response.Result = result + } + + return +} + +// CreateSnapshot : Create a snapshot +// This request creates a new snapshot from a snapshot prototype object. The prototype object is structured in the same +// way as a retrieved snapshot, and contains the information necessary to provision the new snapshot. +func (vpc *VpcV1) CreateSnapshot(createSnapshotOptions *CreateSnapshotOptions) (result *Snapshot, response *core.DetailedResponse, err error) { + return vpc.CreateSnapshotWithContext(context.Background(), createSnapshotOptions) +} + +// CreateSnapshotWithContext is an alternate form of the CreateSnapshot method which supports a Context parameter +func (vpc *VpcV1) CreateSnapshotWithContext(ctx context.Context, createSnapshotOptions *CreateSnapshotOptions) (result *Snapshot, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(createSnapshotOptions, "createSnapshotOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(createSnapshotOptions, "createSnapshotOptions") + if err != nil { + return + } + + builder := core.NewRequestBuilder(core.POST) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpc.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpc.Service.Options.URL, `/snapshots`, nil) + if err != nil { + return + } + + for headerName, headerValue := range createSnapshotOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc", "V1", "CreateSnapshot") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpc.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpc.generation)) + + body := make(map[string]interface{}) + if createSnapshotOptions.SourceVolume != nil { + body["source_volume"] = createSnapshotOptions.SourceVolume + } + if createSnapshotOptions.Name != nil { + body["name"] = createSnapshotOptions.Name + } + if createSnapshotOptions.ResourceGroup != nil { + body["resource_group"] = createSnapshotOptions.ResourceGroup + } + _, err = builder.SetBodyContentJSON(body) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpc.Service.Request(request, &rawResponse) + if err != nil { + return + } + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSnapshot) + if err != nil { + return + } + response.Result = result + } + + return +} + +// DeleteSnapshot : Delete a snapshot +// This request deletes a snapshot. This operation cannot be reversed. +func (vpc *VpcV1) DeleteSnapshot(deleteSnapshotOptions *DeleteSnapshotOptions) (response *core.DetailedResponse, err error) { + return vpc.DeleteSnapshotWithContext(context.Background(), deleteSnapshotOptions) +} + +// DeleteSnapshotWithContext is an alternate form of the DeleteSnapshot method which supports a Context parameter +func (vpc *VpcV1) DeleteSnapshotWithContext(ctx context.Context, deleteSnapshotOptions *DeleteSnapshotOptions) (response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(deleteSnapshotOptions, "deleteSnapshotOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(deleteSnapshotOptions, "deleteSnapshotOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *deleteSnapshotOptions.ID, + } + + builder := core.NewRequestBuilder(core.DELETE) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpc.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpc.Service.Options.URL, `/snapshots/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range deleteSnapshotOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc", "V1", "DeleteSnapshot") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + + builder.AddQuery("version", fmt.Sprint(*vpc.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpc.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + response, err = vpc.Service.Request(request, nil) + + return +} + +// GetSnapshot : Retrieve a snapshot +// This request retrieves a single snapshot specified by the identifier in the URL. +func (vpc *VpcV1) GetSnapshot(getSnapshotOptions *GetSnapshotOptions) (result *Snapshot, response *core.DetailedResponse, err error) { + return vpc.GetSnapshotWithContext(context.Background(), getSnapshotOptions) +} + +// GetSnapshotWithContext is an alternate form of the GetSnapshot method which supports a Context parameter +func (vpc *VpcV1) GetSnapshotWithContext(ctx context.Context, getSnapshotOptions *GetSnapshotOptions) (result *Snapshot, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(getSnapshotOptions, "getSnapshotOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(getSnapshotOptions, "getSnapshotOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *getSnapshotOptions.ID, + } + + builder := core.NewRequestBuilder(core.GET) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpc.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpc.Service.Options.URL, `/snapshots/{id}`, pathParamsMap) + if err != nil { + return + } + + for headerName, headerValue := range getSnapshotOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc", "V1", "GetSnapshot") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + + builder.AddQuery("version", fmt.Sprint(*vpc.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpc.generation)) + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpc.Service.Request(request, &rawResponse) + if err != nil { + return + } + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSnapshot) + if err != nil { + return + } + response.Result = result + } + + return +} + +// UpdateSnapshot : Update a snapshot +// This request updates a snapshot's name. +func (vpc *VpcV1) UpdateSnapshot(updateSnapshotOptions *UpdateSnapshotOptions) (result *Snapshot, response *core.DetailedResponse, err error) { + return vpc.UpdateSnapshotWithContext(context.Background(), updateSnapshotOptions) +} + +// UpdateSnapshotWithContext is an alternate form of the UpdateSnapshot method which supports a Context parameter +func (vpc *VpcV1) UpdateSnapshotWithContext(ctx context.Context, updateSnapshotOptions *UpdateSnapshotOptions) (result *Snapshot, response *core.DetailedResponse, err error) { + err = core.ValidateNotNil(updateSnapshotOptions, "updateSnapshotOptions cannot be nil") + if err != nil { + return + } + err = core.ValidateStruct(updateSnapshotOptions, "updateSnapshotOptions") + if err != nil { + return + } + + pathParamsMap := map[string]string{ + "id": *updateSnapshotOptions.ID, + } + + builder := core.NewRequestBuilder(core.PATCH) + builder = builder.WithContext(ctx) + builder.EnableGzipCompression = vpc.GetEnableGzipCompression() + _, err = builder.ResolveRequestURL(vpc.Service.Options.URL, `/snapshots/{id}`, pathParamsMap) if err != nil { return } - response.Result = result + + for headerName, headerValue := range updateSnapshotOptions.Headers { + builder.AddHeader(headerName, headerValue) + } + + sdkHeaders := common.GetSdkHeaders("vpc", "V1", "UpdateSnapshot") + for headerName, headerValue := range sdkHeaders { + builder.AddHeader(headerName, headerValue) + } + builder.AddHeader("Accept", "application/json") + builder.AddHeader("Content-Type", "application/merge-patch+json") + + builder.AddQuery("version", fmt.Sprint(*vpc.Version)) + builder.AddQuery("generation", fmt.Sprint(*vpc.generation)) + + _, err = builder.SetBodyContentJSON(updateSnapshotOptions.SnapshotPatch) + if err != nil { + return + } + + request, err := builder.Build() + if err != nil { + return + } + + var rawResponse map[string]json.RawMessage + response, err = vpc.Service.Request(request, &rawResponse) + if err != nil { + return + } + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSnapshot) + if err != nil { + return + } + response.Result = result + } return } @@ -8682,11 +9619,13 @@ func (vpc *VpcV1) ListRegionsWithContext(ctx context.Context, listRegionsOptions if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalRegionCollection) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalRegionCollection) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -8743,11 +9682,13 @@ func (vpc *VpcV1) GetRegionWithContext(ctx context.Context, getRegionOptions *Ge if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalRegion) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalRegion) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -8805,11 +9746,13 @@ func (vpc *VpcV1) ListRegionZonesWithContext(ctx context.Context, listRegionZone if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalZoneCollection) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalZoneCollection) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -8867,11 +9810,13 @@ func (vpc *VpcV1) GetRegionZoneWithContext(ctx context.Context, getRegionZoneOpt if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalZone) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalZone) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -8931,11 +9876,13 @@ func (vpc *VpcV1) ListPublicGatewaysWithContext(ctx context.Context, listPublicG if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalPublicGatewayCollection) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalPublicGatewayCollection) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -9015,11 +9962,13 @@ func (vpc *VpcV1) CreatePublicGatewayWithContext(ctx context.Context, createPubl if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalPublicGateway) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalPublicGateway) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -9129,11 +10078,13 @@ func (vpc *VpcV1) GetPublicGatewayWithContext(ctx context.Context, getPublicGate if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalPublicGateway) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalPublicGateway) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -9196,11 +10147,13 @@ func (vpc *VpcV1) UpdatePublicGatewayWithContext(ctx context.Context, updatePubl if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalPublicGateway) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalPublicGateway) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -9259,11 +10212,13 @@ func (vpc *VpcV1) ListFloatingIpsWithContext(ctx context.Context, listFloatingIp if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalFloatingIPCollection) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalFloatingIPCollection) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -9322,11 +10277,13 @@ func (vpc *VpcV1) CreateFloatingIPWithContext(ctx context.Context, createFloatin if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalFloatingIP) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalFloatingIP) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -9435,11 +10392,13 @@ func (vpc *VpcV1) GetFloatingIPWithContext(ctx context.Context, getFloatingIPOpt if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalFloatingIP) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalFloatingIP) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -9502,11 +10461,13 @@ func (vpc *VpcV1) UpdateFloatingIPWithContext(ctx context.Context, updateFloatin if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalFloatingIP) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalFloatingIP) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -9566,11 +10527,13 @@ func (vpc *VpcV1) ListNetworkAclsWithContext(ctx context.Context, listNetworkAcl if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalNetworkACLCollection) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalNetworkACLCollection) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -9628,11 +10591,13 @@ func (vpc *VpcV1) CreateNetworkACLWithContext(ctx context.Context, createNetwork if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalNetworkACL) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalNetworkACL) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -9741,11 +10706,13 @@ func (vpc *VpcV1) GetNetworkACLWithContext(ctx context.Context, getNetworkACLOpt if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalNetworkACL) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalNetworkACL) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -9808,11 +10775,13 @@ func (vpc *VpcV1) UpdateNetworkACLWithContext(ctx context.Context, updateNetwork if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalNetworkACL) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalNetworkACL) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -9879,11 +10848,13 @@ func (vpc *VpcV1) ListNetworkACLRulesWithContext(ctx context.Context, listNetwor if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalNetworkACLRuleCollection) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalNetworkACLRuleCollection) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -9947,11 +10918,13 @@ func (vpc *VpcV1) CreateNetworkACLRuleWithContext(ctx context.Context, createNet if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalNetworkACLRule) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalNetworkACLRule) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -10061,11 +11034,13 @@ func (vpc *VpcV1) GetNetworkACLRuleWithContext(ctx context.Context, getNetworkAC if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalNetworkACLRule) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalNetworkACLRule) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -10130,11 +11105,13 @@ func (vpc *VpcV1) UpdateNetworkACLRuleWithContext(ctx context.Context, updateNet if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalNetworkACLRule) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalNetworkACLRule) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -10204,11 +11181,13 @@ func (vpc *VpcV1) ListSecurityGroupsWithContext(ctx context.Context, listSecurit if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSecurityGroupCollection) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSecurityGroupCollection) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -10284,11 +11263,13 @@ func (vpc *VpcV1) CreateSecurityGroupWithContext(ctx context.Context, createSecu if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSecurityGroup) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSecurityGroup) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -10398,11 +11379,13 @@ func (vpc *VpcV1) GetSecurityGroupWithContext(ctx context.Context, getSecurityGr if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSecurityGroup) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSecurityGroup) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -10467,11 +11450,13 @@ func (vpc *VpcV1) UpdateSecurityGroupWithContext(ctx context.Context, updateSecu if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSecurityGroup) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSecurityGroup) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -10535,11 +11520,13 @@ func (vpc *VpcV1) ListSecurityGroupNetworkInterfacesWithContext(ctx context.Cont if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalNetworkInterfaceCollection) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalNetworkInterfaceCollection) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -10653,11 +11640,13 @@ func (vpc *VpcV1) GetSecurityGroupNetworkInterfaceWithContext(ctx context.Contex if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalNetworkInterface) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalNetworkInterface) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -10717,11 +11706,13 @@ func (vpc *VpcV1) AddSecurityGroupNetworkInterfaceWithContext(ctx context.Contex if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalNetworkInterface) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalNetworkInterface) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -10780,11 +11771,13 @@ func (vpc *VpcV1) ListSecurityGroupRulesWithContext(ctx context.Context, listSec if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSecurityGroupRuleCollection) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSecurityGroupRuleCollection) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -10852,11 +11845,13 @@ func (vpc *VpcV1) CreateSecurityGroupRuleWithContext(ctx context.Context, create if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSecurityGroupRule) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSecurityGroupRule) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -10967,11 +11962,13 @@ func (vpc *VpcV1) GetSecurityGroupRuleWithContext(ctx context.Context, getSecuri if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSecurityGroupRule) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSecurityGroupRule) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -11037,11 +12034,13 @@ func (vpc *VpcV1) UpdateSecurityGroupRuleWithContext(ctx context.Context, update if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSecurityGroupRule) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSecurityGroupRule) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -11105,11 +12104,13 @@ func (vpc *VpcV1) ListSecurityGroupTargetsWithContext(ctx context.Context, listS if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSecurityGroupTargetCollection) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSecurityGroupTargetCollection) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -11227,11 +12228,13 @@ func (vpc *VpcV1) GetSecurityGroupTargetWithContext(ctx context.Context, getSecu if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSecurityGroupTargetReference) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSecurityGroupTargetReference) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -11295,11 +12298,13 @@ func (vpc *VpcV1) CreateSecurityGroupTargetBindingWithContext(ctx context.Contex if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSecurityGroupTargetReference) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalSecurityGroupTargetReference) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -11354,11 +12359,13 @@ func (vpc *VpcV1) ListIkePoliciesWithContext(ctx context.Context, listIkePolicie if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalIkePolicyCollection) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalIkePolicyCollection) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -11439,11 +12446,13 @@ func (vpc *VpcV1) CreateIkePolicyWithContext(ctx context.Context, createIkePolic if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalIkePolicy) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalIkePolicy) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -11551,11 +12560,13 @@ func (vpc *VpcV1) GetIkePolicyWithContext(ctx context.Context, getIkePolicyOptio if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalIkePolicy) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalIkePolicy) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -11618,11 +12629,13 @@ func (vpc *VpcV1) UpdateIkePolicyWithContext(ctx context.Context, updateIkePolic if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalIkePolicy) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalIkePolicy) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -11679,11 +12692,13 @@ func (vpc *VpcV1) ListIkePolicyConnectionsWithContext(ctx context.Context, listI if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVPNGatewayConnectionCollection) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVPNGatewayConnectionCollection) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -11738,11 +12753,13 @@ func (vpc *VpcV1) ListIpsecPoliciesWithContext(ctx context.Context, listIpsecPol if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalIPsecPolicyCollection) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalIPsecPolicyCollection) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -11820,11 +12837,13 @@ func (vpc *VpcV1) CreateIpsecPolicyWithContext(ctx context.Context, createIpsecP if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalIPsecPolicy) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalIPsecPolicy) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -11932,11 +12951,13 @@ func (vpc *VpcV1) GetIpsecPolicyWithContext(ctx context.Context, getIpsecPolicyO if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalIPsecPolicy) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalIPsecPolicy) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -11999,11 +13020,13 @@ func (vpc *VpcV1) UpdateIpsecPolicyWithContext(ctx context.Context, updateIpsecP if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalIPsecPolicy) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalIPsecPolicy) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -12060,11 +13083,13 @@ func (vpc *VpcV1) ListIpsecPolicyConnectionsWithContext(ctx context.Context, lis if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVPNGatewayConnectionCollection) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVPNGatewayConnectionCollection) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -12125,11 +13150,13 @@ func (vpc *VpcV1) ListVPNGatewaysWithContext(ctx context.Context, listVPNGateway if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVPNGatewayCollection) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVPNGatewayCollection) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -12188,11 +13215,13 @@ func (vpc *VpcV1) CreateVPNGatewayWithContext(ctx context.Context, createVPNGate if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVPNGateway) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVPNGateway) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -12301,11 +13330,13 @@ func (vpc *VpcV1) GetVPNGatewayWithContext(ctx context.Context, getVPNGatewayOpt if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVPNGateway) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVPNGateway) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -12368,11 +13399,13 @@ func (vpc *VpcV1) UpdateVPNGatewayWithContext(ctx context.Context, updateVPNGate if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVPNGateway) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVPNGateway) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -12432,11 +13465,13 @@ func (vpc *VpcV1) ListVPNGatewayConnectionsWithContext(ctx context.Context, list if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVPNGatewayConnectionCollection) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVPNGatewayConnectionCollection) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -12499,11 +13534,13 @@ func (vpc *VpcV1) CreateVPNGatewayConnectionWithContext(ctx context.Context, cre if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVPNGatewayConnection) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVPNGatewayConnection) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -12613,11 +13650,13 @@ func (vpc *VpcV1) GetVPNGatewayConnectionWithContext(ctx context.Context, getVPN if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVPNGatewayConnection) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVPNGatewayConnection) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -12681,11 +13720,13 @@ func (vpc *VpcV1) UpdateVPNGatewayConnectionWithContext(ctx context.Context, upd if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVPNGatewayConnection) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVPNGatewayConnection) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -12743,11 +13784,13 @@ func (vpc *VpcV1) ListVPNGatewayConnectionLocalCIDRsWithContext(ctx context.Cont if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVPNGatewayConnectionLocalCIDRs) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVPNGatewayConnectionLocalCIDRs) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -12968,11 +14011,13 @@ func (vpc *VpcV1) ListVPNGatewayConnectionPeerCIDRsWithContext(ctx context.Conte if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVPNGatewayConnectionPeerCIDRs) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalVPNGatewayConnectionPeerCIDRs) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -13191,11 +14236,13 @@ func (vpc *VpcV1) ListLoadBalancerProfilesWithContext(ctx context.Context, listL if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerProfileCollection) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerProfileCollection) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -13252,11 +14299,13 @@ func (vpc *VpcV1) GetLoadBalancerProfileWithContext(ctx context.Context, getLoad if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerProfile) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerProfile) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -13294,6 +14343,12 @@ func (vpc *VpcV1) ListLoadBalancersWithContext(ctx context.Context, listLoadBala builder.AddQuery("version", fmt.Sprint(*vpc.Version)) builder.AddQuery("generation", fmt.Sprint(*vpc.generation)) + if listLoadBalancersOptions.Start != nil { + builder.AddQuery("start", fmt.Sprint(*listLoadBalancersOptions.Start)) + } + if listLoadBalancersOptions.Limit != nil { + builder.AddQuery("limit", fmt.Sprint(*listLoadBalancersOptions.Limit)) + } request, err := builder.Build() if err != nil { @@ -13305,11 +14360,13 @@ func (vpc *VpcV1) ListLoadBalancersWithContext(ctx context.Context, listLoadBala if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerCollection) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerCollection) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -13396,11 +14453,13 @@ func (vpc *VpcV1) CreateLoadBalancerWithContext(ctx context.Context, createLoadB if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancer) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancer) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -13508,11 +14567,13 @@ func (vpc *VpcV1) GetLoadBalancerWithContext(ctx context.Context, getLoadBalance if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancer) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancer) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -13575,11 +14636,13 @@ func (vpc *VpcV1) UpdateLoadBalancerWithContext(ctx context.Context, updateLoadB if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancer) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancer) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -13636,11 +14699,13 @@ func (vpc *VpcV1) GetLoadBalancerStatisticsWithContext(ctx context.Context, getL if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerStatistics) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerStatistics) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -13697,11 +14762,13 @@ func (vpc *VpcV1) ListLoadBalancerListenersWithContext(ctx context.Context, list if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerListenerCollection) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerListenerCollection) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -13786,11 +14853,13 @@ func (vpc *VpcV1) CreateLoadBalancerListenerWithContext(ctx context.Context, cre if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerListener) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerListener) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -13900,11 +14969,13 @@ func (vpc *VpcV1) GetLoadBalancerListenerWithContext(ctx context.Context, getLoa if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerListener) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerListener) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -13968,11 +15039,13 @@ func (vpc *VpcV1) UpdateLoadBalancerListenerWithContext(ctx context.Context, upd if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerListener) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerListener) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -14030,11 +15103,13 @@ func (vpc *VpcV1) ListLoadBalancerListenerPoliciesWithContext(ctx context.Contex if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerListenerPolicyCollection) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerListenerPolicyCollection) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -14114,11 +15189,13 @@ func (vpc *VpcV1) CreateLoadBalancerListenerPolicyWithContext(ctx context.Contex if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerListenerPolicy) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerListenerPolicy) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -14230,11 +15307,13 @@ func (vpc *VpcV1) GetLoadBalancerListenerPolicyWithContext(ctx context.Context, if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerListenerPolicy) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerListenerPolicy) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -14299,11 +15378,13 @@ func (vpc *VpcV1) UpdateLoadBalancerListenerPolicyWithContext(ctx context.Contex if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerListenerPolicy) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerListenerPolicy) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -14362,11 +15443,13 @@ func (vpc *VpcV1) ListLoadBalancerListenerPolicyRulesWithContext(ctx context.Con if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerListenerPolicyRuleCollection) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerListenerPolicyRuleCollection) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -14444,11 +15527,13 @@ func (vpc *VpcV1) CreateLoadBalancerListenerPolicyRuleWithContext(ctx context.Co if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerListenerPolicyRule) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerListenerPolicyRule) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -14562,11 +15647,13 @@ func (vpc *VpcV1) GetLoadBalancerListenerPolicyRuleWithContext(ctx context.Conte if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerListenerPolicyRule) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerListenerPolicyRule) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -14632,11 +15719,13 @@ func (vpc *VpcV1) UpdateLoadBalancerListenerPolicyRuleWithContext(ctx context.Co if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerListenerPolicyRule) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerListenerPolicyRule) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -14693,11 +15782,13 @@ func (vpc *VpcV1) ListLoadBalancerPoolsWithContext(ctx context.Context, listLoad if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerPoolCollection) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerPoolCollection) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -14782,11 +15873,13 @@ func (vpc *VpcV1) CreateLoadBalancerPoolWithContext(ctx context.Context, createL if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerPool) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerPool) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -14897,11 +15990,13 @@ func (vpc *VpcV1) GetLoadBalancerPoolWithContext(ctx context.Context, getLoadBal if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerPool) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerPool) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -14965,11 +16060,13 @@ func (vpc *VpcV1) UpdateLoadBalancerPoolWithContext(ctx context.Context, updateL if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerPool) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerPool) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -15027,11 +16124,13 @@ func (vpc *VpcV1) ListLoadBalancerPoolMembersWithContext(ctx context.Context, li if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerPoolMemberCollection) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerPoolMemberCollection) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -15105,11 +16204,13 @@ func (vpc *VpcV1) CreateLoadBalancerPoolMemberWithContext(ctx context.Context, c if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerPoolMember) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerPoolMember) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -15178,11 +16279,13 @@ func (vpc *VpcV1) ReplaceLoadBalancerPoolMembersWithContext(ctx context.Context, if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerPoolMemberCollection) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerPoolMemberCollection) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -15294,11 +16397,13 @@ func (vpc *VpcV1) GetLoadBalancerPoolMemberWithContext(ctx context.Context, getL if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerPoolMember) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerPoolMember) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -15363,11 +16468,13 @@ func (vpc *VpcV1) UpdateLoadBalancerPoolMemberWithContext(ctx context.Context, u if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerPoolMember) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalLoadBalancerPoolMember) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -15429,11 +16536,13 @@ func (vpc *VpcV1) ListEndpointGatewaysWithContext(ctx context.Context, listEndpo if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalEndpointGatewayCollection) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalEndpointGatewayCollection) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -15509,11 +16618,13 @@ func (vpc *VpcV1) CreateEndpointGatewayWithContext(ctx context.Context, createEn if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalEndpointGateway) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalEndpointGateway) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -15579,11 +16690,13 @@ func (vpc *VpcV1) ListEndpointGatewayIpsWithContext(ctx context.Context, listEnd if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalReservedIPCollectionEndpointGatewayContext) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalReservedIPCollectionEndpointGatewayContext) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -15695,11 +16808,13 @@ func (vpc *VpcV1) GetEndpointGatewayIPWithContext(ctx context.Context, getEndpoi if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalReservedIP) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalReservedIP) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -15760,11 +16875,13 @@ func (vpc *VpcV1) AddEndpointGatewayIPWithContext(ctx context.Context, addEndpoi if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalReservedIP) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalReservedIP) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -15875,11 +16992,13 @@ func (vpc *VpcV1) GetEndpointGatewayWithContext(ctx context.Context, getEndpoint if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalEndpointGateway) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalEndpointGateway) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -15942,11 +17061,13 @@ func (vpc *VpcV1) UpdateEndpointGatewayWithContext(ctx context.Context, updateEn if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalEndpointGateway) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalEndpointGateway) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -16023,11 +17144,13 @@ func (vpc *VpcV1) ListFlowLogCollectorsWithContext(ctx context.Context, listFlow if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalFlowLogCollectorCollection) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalFlowLogCollectorCollection) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -16104,11 +17227,13 @@ func (vpc *VpcV1) CreateFlowLogCollectorWithContext(ctx context.Context, createF if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalFlowLogCollector) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalFlowLogCollector) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -16217,11 +17342,13 @@ func (vpc *VpcV1) GetFlowLogCollectorWithContext(ctx context.Context, getFlowLog if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalFlowLogCollector) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalFlowLogCollector) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -16286,11 +17413,13 @@ func (vpc *VpcV1) UpdateFlowLogCollectorWithContext(ctx context.Context, updateF if err != nil { return } - err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalFlowLogCollector) - if err != nil { - return + if rawResponse != nil { + err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalFlowLogCollector) + if err != nil { + return + } + response.Result = result } - response.Result = result return } @@ -17031,7 +18160,7 @@ type CreateEndpointGatewayOptions struct { // The VPC this endpoint gateway will serve. VPC VPCIdentityIntf `validate:"required"` - // An array of reserved IPs to bind to this endpoint gateway. At most one reserved IP per zone is allowed. + // The reserved IPs to bind to this endpoint gateway. At most one reserved IP per zone is allowed. Ips []EndpointGatewayReservedIPIntf // The user-defined name for this endpoint gateway. If unspecified, the name will be a hyphenated list of @@ -17431,6 +18560,54 @@ func (options *CreateInstanceConsoleAccessTokenOptions) SetHeaders(param map[str return options } +// CreateInstanceGroupManagerActionOptions : The CreateInstanceGroupManagerAction options. +type CreateInstanceGroupManagerActionOptions struct { + // The instance group identifier. + InstanceGroupID *string `validate:"required,ne="` + + // The instance group manager identifier. + InstanceGroupManagerID *string `validate:"required,ne="` + + // The instance group manager action prototype object. + InstanceGroupManagerActionPrototype InstanceGroupManagerActionPrototypeIntf `validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewCreateInstanceGroupManagerActionOptions : Instantiate CreateInstanceGroupManagerActionOptions +func (*VpcV1) NewCreateInstanceGroupManagerActionOptions(instanceGroupID string, instanceGroupManagerID string, instanceGroupManagerActionPrototype InstanceGroupManagerActionPrototypeIntf) *CreateInstanceGroupManagerActionOptions { + return &CreateInstanceGroupManagerActionOptions{ + InstanceGroupID: core.StringPtr(instanceGroupID), + InstanceGroupManagerID: core.StringPtr(instanceGroupManagerID), + InstanceGroupManagerActionPrototype: instanceGroupManagerActionPrototype, + } +} + +// SetInstanceGroupID : Allow user to set InstanceGroupID +func (options *CreateInstanceGroupManagerActionOptions) SetInstanceGroupID(instanceGroupID string) *CreateInstanceGroupManagerActionOptions { + options.InstanceGroupID = core.StringPtr(instanceGroupID) + return options +} + +// SetInstanceGroupManagerID : Allow user to set InstanceGroupManagerID +func (options *CreateInstanceGroupManagerActionOptions) SetInstanceGroupManagerID(instanceGroupManagerID string) *CreateInstanceGroupManagerActionOptions { + options.InstanceGroupManagerID = core.StringPtr(instanceGroupManagerID) + return options +} + +// SetInstanceGroupManagerActionPrototype : Allow user to set InstanceGroupManagerActionPrototype +func (options *CreateInstanceGroupManagerActionOptions) SetInstanceGroupManagerActionPrototype(instanceGroupManagerActionPrototype InstanceGroupManagerActionPrototypeIntf) *CreateInstanceGroupManagerActionOptions { + options.InstanceGroupManagerActionPrototype = instanceGroupManagerActionPrototype + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateInstanceGroupManagerActionOptions) SetHeaders(param map[string]string) *CreateInstanceGroupManagerActionOptions { + options.Headers = param + return options +} + // CreateInstanceGroupManagerOptions : The CreateInstanceGroupManager options. type CreateInstanceGroupManagerOptions struct { // The instance group identifier. @@ -17522,7 +18699,7 @@ type CreateInstanceGroupOptions struct { // Instance template to use when creating new instances. InstanceTemplate InstanceTemplateIdentityIntf `validate:"required"` - // Array of identities to subnets to use when creating new instances. + // The subnets to use when creating new instances. Subnets []SubnetIdentityIntf `validate:"required"` // Required if specifying a load balancer pool only. Used by the instance group when scaling up instances to supply the @@ -17753,8 +18930,8 @@ type CreateInstanceVolumeAttachmentOptions struct { // The instance identifier. InstanceID *string `validate:"required,ne="` - // The identity of the volume to attach to the instance. - Volume VolumeIdentityIntf `validate:"required"` + // An existing volume to attach to the instance, or a prototype object for a new volume. + Volume VolumeAttachmentPrototypeVolumeIntf `validate:"required"` // If set to true, when deleting the instance the volume will also be deleted. DeleteVolumeOnInstanceDelete *bool @@ -17768,7 +18945,7 @@ type CreateInstanceVolumeAttachmentOptions struct { } // NewCreateInstanceVolumeAttachmentOptions : Instantiate CreateInstanceVolumeAttachmentOptions -func (*VpcV1) NewCreateInstanceVolumeAttachmentOptions(instanceID string, volume VolumeIdentityIntf) *CreateInstanceVolumeAttachmentOptions { +func (*VpcV1) NewCreateInstanceVolumeAttachmentOptions(instanceID string, volume VolumeAttachmentPrototypeVolumeIntf) *CreateInstanceVolumeAttachmentOptions { return &CreateInstanceVolumeAttachmentOptions{ InstanceID: core.StringPtr(instanceID), Volume: volume, @@ -17782,7 +18959,7 @@ func (options *CreateInstanceVolumeAttachmentOptions) SetInstanceID(instanceID s } // SetVolume : Allow user to set Volume -func (options *CreateInstanceVolumeAttachmentOptions) SetVolume(volume VolumeIdentityIntf) *CreateInstanceVolumeAttachmentOptions { +func (options *CreateInstanceVolumeAttachmentOptions) SetVolume(volume VolumeAttachmentPrototypeVolumeIntf) *CreateInstanceVolumeAttachmentOptions { options.Volume = volume return options } @@ -18004,7 +19181,7 @@ type CreateLoadBalancerListenerOptions struct { // - Not already be the default pool for another listener. DefaultPool LoadBalancerPoolIdentityIntf - // An array of policies for this listener. + // The policy prototype objects for this listener. Policies []LoadBalancerListenerPolicyPrototype // Allows users to set headers on API requests @@ -18101,7 +19278,7 @@ type CreateLoadBalancerListenerPolicyOptions struct { // The user-defined name for this policy. Names must be unique within the load balancer listener the policy resides in. Name *string - // An array of rules for this policy. + // The rule prototype objects for this policy. Rules []LoadBalancerListenerPolicyRulePrototype // When `action` is `forward`, `LoadBalancerPoolIdentity` is required to specify which @@ -18754,8 +19931,8 @@ type CreateSecurityGroupOptions struct { // group](https://cloud.ibm.com/apidocs/resource-manager#introduction) is used. ResourceGroup ResourceGroupIdentityIntf - // Array of rule prototype objects for rules to be created for this security group. If unspecified, no rules will be - // created, resulting in all traffic being denied. + // The prototype objects for rules to be created for this security group. If unspecified, no rules will be created, + // resulting in all traffic being denied. Rules []SecurityGroupRulePrototypeIntf // Allows users to set headers on API requests @@ -18875,6 +20052,54 @@ func (options *CreateSecurityGroupTargetBindingOptions) SetHeaders(param map[str return options } +// CreateSnapshotOptions : The CreateSnapshot options. +type CreateSnapshotOptions struct { + // The volume to snapshot. + SourceVolume VolumeIdentityIntf `validate:"required"` + + // The unique user-defined name for this snapshot. If unspecified, the name will be a hyphenated list of + // randomly-selected words. + Name *string + + // The resource group to use. If unspecified, the account's [default resource + // group](https://cloud.ibm.com/apidocs/resource-manager#introduction) is used. + ResourceGroup ResourceGroupIdentityIntf + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewCreateSnapshotOptions : Instantiate CreateSnapshotOptions +func (*VpcV1) NewCreateSnapshotOptions(sourceVolume VolumeIdentityIntf) *CreateSnapshotOptions { + return &CreateSnapshotOptions{ + SourceVolume: sourceVolume, + } +} + +// SetSourceVolume : Allow user to set SourceVolume +func (options *CreateSnapshotOptions) SetSourceVolume(sourceVolume VolumeIdentityIntf) *CreateSnapshotOptions { + options.SourceVolume = sourceVolume + return options +} + +// SetName : Allow user to set Name +func (options *CreateSnapshotOptions) SetName(name string) *CreateSnapshotOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetResourceGroup : Allow user to set ResourceGroup +func (options *CreateSnapshotOptions) SetResourceGroup(resourceGroup ResourceGroupIdentityIntf) *CreateSnapshotOptions { + options.ResourceGroup = resourceGroup + return options +} + +// SetHeaders : Allow user to set Headers +func (options *CreateSnapshotOptions) SetHeaders(param map[string]string) *CreateSnapshotOptions { + options.Headers = param + return options +} + // CreateSubnetOptions : The CreateSubnet options. type CreateSubnetOptions struct { // The subnet prototype object. @@ -19272,8 +20497,8 @@ type CreateVPCRoutingTableOptions struct { // connection, the packet will be dropped. RouteVPCZoneIngress *bool - // Array of route prototype objects for routes to create for this routing table. If unspecified, the routing table will - // be created with no routes. + // The prototype objects for routes to create for this routing table. If unspecified, the routing table will be created + // with no routes. Routes []RoutePrototype // Allows users to set headers on API requests @@ -19533,7 +20758,7 @@ type DedicatedHost struct { // If set to true, instances can be placed on this dedicated host. InstancePlacementEnabled *bool `json:"instance_placement_enabled" validate:"required"` - // Array of instances that are allocated to this dedicated host. + // The instances that are allocated to this dedicated host. Instances []InstanceReference `json:"instances" validate:"required"` // The lifecycle state of the dedicated host resource. @@ -19568,7 +20793,7 @@ type DedicatedHost struct { // the unexpected property value was encountered. State *string `json:"state" validate:"required"` - // Array of instance profiles that can be used by instances placed on this dedicated host. + // The instance profiles usable by instances placed on this dedicated host. SupportedInstanceProfiles []InstanceProfileReference `json:"supported_instance_profiles" validate:"required"` // The total VCPU of the dedicated host. @@ -19581,7 +20806,6 @@ type DedicatedHost struct { // Constants associated with the DedicatedHost.LifecycleState property. // The lifecycle state of the dedicated host resource. const ( - DedicatedHostLifecycleStateDeletedConst = "deleted" DedicatedHostLifecycleStateDeletingConst = "deleting" DedicatedHostLifecycleStateFailedConst = "failed" DedicatedHostLifecycleStatePendingConst = "pending" @@ -19841,7 +21065,6 @@ const ( // Constants associated with the DedicatedHostDisk.LifecycleState property. // The lifecycle state of this dedicated host disk. const ( - DedicatedHostDiskLifecycleStateDeletedConst = "deleted" DedicatedHostDiskLifecycleStateDeletingConst = "deleting" DedicatedHostDiskLifecycleStateFailedConst = "failed" DedicatedHostDiskLifecycleStatePendingConst = "pending" @@ -20000,7 +21223,7 @@ type DedicatedHostGroup struct { // The type of resource referenced. ResourceType *string `json:"resource_type" validate:"required"` - // Array of instance profiles that can be used by instances placed on this dedicated host group. + // The instance profiles usable by instances placed on this dedicated host group. SupportedInstanceProfiles []InstanceProfileReference `json:"supported_instance_profiles" validate:"required"` // The zone this dedicated host group resides in. @@ -20390,7 +21613,7 @@ type DedicatedHostProfile struct { SocketCount DedicatedHostProfileSocketIntf `json:"socket_count" validate:"required"` - // Array of instance profiles that can be used by instances placed on dedicated hosts with this profile. + // The instance profiles usable by instances placed on dedicated hosts with this profile. SupportedInstanceProfiles []InstanceProfileReference `json:"supported_instance_profiles" validate:"required"` VcpuArchitecture *DedicatedHostProfileVcpuArchitecture `json:"vcpu_architecture" validate:"required"` @@ -21322,7 +22545,6 @@ type DefaultRoutingTable struct { // Constants associated with the DefaultRoutingTable.LifecycleState property. // The lifecycle state of the routing table. const ( - DefaultRoutingTableLifecycleStateDeletedConst = "deleted" DefaultRoutingTableLifecycleStateDeletingConst = "deleting" DefaultRoutingTableLifecycleStateFailedConst = "failed" DefaultRoutingTableLifecycleStatePendingConst = "pending" @@ -21414,7 +22636,7 @@ type DefaultSecurityGroup struct { // The resource group for this security group. ResourceGroup *ResourceGroupReference `json:"resource_group" validate:"required"` - // Array of rules for the default security group for a VPC. Defaults to allowing all outbound traffic, and allowing all + // The rules for the default security group for a VPC. Defaults to allowing all outbound traffic, and allowing all // inbound traffic from other interfaces in the VPC's default security group. Rules in the default security group may // be changed, added or removed. Rules []SecurityGroupRuleIntf `json:"rules" validate:"required"` @@ -21686,6 +22908,54 @@ func (options *DeleteInstanceGroupLoadBalancerOptions) SetHeaders(param map[stri return options } +// DeleteInstanceGroupManagerActionOptions : The DeleteInstanceGroupManagerAction options. +type DeleteInstanceGroupManagerActionOptions struct { + // The instance group identifier. + InstanceGroupID *string `validate:"required,ne="` + + // The instance group manager identifier. + InstanceGroupManagerID *string `validate:"required,ne="` + + // The instance group manager action identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteInstanceGroupManagerActionOptions : Instantiate DeleteInstanceGroupManagerActionOptions +func (*VpcV1) NewDeleteInstanceGroupManagerActionOptions(instanceGroupID string, instanceGroupManagerID string, id string) *DeleteInstanceGroupManagerActionOptions { + return &DeleteInstanceGroupManagerActionOptions{ + InstanceGroupID: core.StringPtr(instanceGroupID), + InstanceGroupManagerID: core.StringPtr(instanceGroupManagerID), + ID: core.StringPtr(id), + } +} + +// SetInstanceGroupID : Allow user to set InstanceGroupID +func (options *DeleteInstanceGroupManagerActionOptions) SetInstanceGroupID(instanceGroupID string) *DeleteInstanceGroupManagerActionOptions { + options.InstanceGroupID = core.StringPtr(instanceGroupID) + return options +} + +// SetInstanceGroupManagerID : Allow user to set InstanceGroupManagerID +func (options *DeleteInstanceGroupManagerActionOptions) SetInstanceGroupManagerID(instanceGroupManagerID string) *DeleteInstanceGroupManagerActionOptions { + options.InstanceGroupManagerID = core.StringPtr(instanceGroupManagerID) + return options +} + +// SetID : Allow user to set ID +func (options *DeleteInstanceGroupManagerActionOptions) SetID(id string) *DeleteInstanceGroupManagerActionOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteInstanceGroupManagerActionOptions) SetHeaders(param map[string]string) *DeleteInstanceGroupManagerActionOptions { + options.Headers = param + return options +} + // DeleteInstanceGroupManagerOptions : The DeleteInstanceGroupManager options. type DeleteInstanceGroupManagerOptions struct { // The instance group identifier. @@ -22510,6 +23780,62 @@ func (options *DeleteSecurityGroupTargetBindingOptions) SetHeaders(param map[str return options } +// DeleteSnapshotOptions : The DeleteSnapshot options. +type DeleteSnapshotOptions struct { + // The snapshot identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteSnapshotOptions : Instantiate DeleteSnapshotOptions +func (*VpcV1) NewDeleteSnapshotOptions(id string) *DeleteSnapshotOptions { + return &DeleteSnapshotOptions{ + ID: core.StringPtr(id), + } +} + +// SetID : Allow user to set ID +func (options *DeleteSnapshotOptions) SetID(id string) *DeleteSnapshotOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteSnapshotOptions) SetHeaders(param map[string]string) *DeleteSnapshotOptions { + options.Headers = param + return options +} + +// DeleteSnapshotsOptions : The DeleteSnapshots options. +type DeleteSnapshotsOptions struct { + // Filters the collection to resources with the source volume with the specified identifier. + SourceVolumeID *string `validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewDeleteSnapshotsOptions : Instantiate DeleteSnapshotsOptions +func (*VpcV1) NewDeleteSnapshotsOptions(sourceVolumeID string) *DeleteSnapshotsOptions { + return &DeleteSnapshotsOptions{ + SourceVolumeID: core.StringPtr(sourceVolumeID), + } +} + +// SetSourceVolumeID : Allow user to set SourceVolumeID +func (options *DeleteSnapshotsOptions) SetSourceVolumeID(sourceVolumeID string) *DeleteSnapshotsOptions { + options.SourceVolumeID = core.StringPtr(sourceVolumeID) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *DeleteSnapshotsOptions) SetHeaders(param map[string]string) *DeleteSnapshotsOptions { + options.Headers = param + return options +} + // DeleteSubnetOptions : The DeleteSubnet options. type DeleteSubnetOptions struct { // The subnet identifier. @@ -22931,7 +24257,7 @@ type EndpointGateway struct { // The unique identifier for this endpoint gateway. ID *string `json:"id" validate:"required"` - // Collection of reserved IPs bound to an endpoint gateway. + // The reserved IPs bound to this endpoint gateway. Ips []ReservedIPReference `json:"ips" validate:"required"` // The lifecycle state of the endpoint gateway. @@ -22949,7 +24275,7 @@ type EndpointGateway struct { // The fully qualified domain name for the target service. ServiceEndpoint *string `json:"service_endpoint,omitempty"` - // Collection of fully qualified domain names for the target service. + // The fully qualified domain names for the target service. ServiceEndpoints []string `json:"service_endpoints" validate:"required"` // The target for this endpoint gateway. @@ -22977,7 +24303,6 @@ const ( // Constants associated with the EndpointGateway.LifecycleState property. // The lifecycle state of the endpoint gateway. const ( - EndpointGatewayLifecycleStateDeletedConst = "deleted" EndpointGatewayLifecycleStateDeletingConst = "deleting" EndpointGatewayLifecycleStateFailedConst = "failed" EndpointGatewayLifecycleStatePendingConst = "pending" @@ -23182,9 +24507,9 @@ func UnmarshalEndpointGatewayReferenceDeleted(m map[string]json.RawMessage, resu return } -// EndpointGatewayReservedIP : A reserved IP to bind to the endpoint gateway. This can be an existing reserved IP, or a prototype used to allocate a -// reserved IP. The reserved IP will be bound to the endpoint gateway to function as a virtual private endpoint for the -// service. +// EndpointGatewayReservedIP : A reserved IP to bind to the endpoint gateway. This can be specified using an existing reserved IP, or a prototype +// object for a new reserved IP. The reserved IP will be bound to the endpoint gateway to function as a virtual private +// endpoint for the service. // Models which "extend" this model: // - EndpointGatewayReservedIPReservedIPIdentity // - EndpointGatewayReservedIPReservedIPPrototypeTargetContext @@ -23882,7 +25207,6 @@ type FlowLogCollector struct { // Constants associated with the FlowLogCollector.LifecycleState property. // The lifecycle state of the flow log collector. const ( - FlowLogCollectorLifecycleStateDeletedConst = "deleted" FlowLogCollectorLifecycleStateDeletingConst = "deleting" FlowLogCollectorLifecycleStateFailedConst = "failed" FlowLogCollectorLifecycleStatePendingConst = "pending" @@ -24520,6 +25844,54 @@ func (options *GetInstanceDiskOptions) SetHeaders(param map[string]string) *GetI return options } +// GetInstanceGroupManagerActionOptions : The GetInstanceGroupManagerAction options. +type GetInstanceGroupManagerActionOptions struct { + // The instance group identifier. + InstanceGroupID *string `validate:"required,ne="` + + // The instance group manager identifier. + InstanceGroupManagerID *string `validate:"required,ne="` + + // The instance group manager action identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetInstanceGroupManagerActionOptions : Instantiate GetInstanceGroupManagerActionOptions +func (*VpcV1) NewGetInstanceGroupManagerActionOptions(instanceGroupID string, instanceGroupManagerID string, id string) *GetInstanceGroupManagerActionOptions { + return &GetInstanceGroupManagerActionOptions{ + InstanceGroupID: core.StringPtr(instanceGroupID), + InstanceGroupManagerID: core.StringPtr(instanceGroupManagerID), + ID: core.StringPtr(id), + } +} + +// SetInstanceGroupID : Allow user to set InstanceGroupID +func (options *GetInstanceGroupManagerActionOptions) SetInstanceGroupID(instanceGroupID string) *GetInstanceGroupManagerActionOptions { + options.InstanceGroupID = core.StringPtr(instanceGroupID) + return options +} + +// SetInstanceGroupManagerID : Allow user to set InstanceGroupManagerID +func (options *GetInstanceGroupManagerActionOptions) SetInstanceGroupManagerID(instanceGroupManagerID string) *GetInstanceGroupManagerActionOptions { + options.InstanceGroupManagerID = core.StringPtr(instanceGroupManagerID) + return options +} + +// SetID : Allow user to set ID +func (options *GetInstanceGroupManagerActionOptions) SetID(id string) *GetInstanceGroupManagerActionOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetInstanceGroupManagerActionOptions) SetHeaders(param map[string]string) *GetInstanceGroupManagerActionOptions { + options.Headers = param + return options +} + // GetInstanceGroupManagerOptions : The GetInstanceGroupManager options. type GetInstanceGroupManagerOptions struct { // The instance group identifier. @@ -25608,6 +26980,34 @@ func (options *GetSecurityGroupTargetOptions) SetHeaders(param map[string]string return options } +// GetSnapshotOptions : The GetSnapshot options. +type GetSnapshotOptions struct { + // The snapshot identifier. + ID *string `validate:"required,ne="` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewGetSnapshotOptions : Instantiate GetSnapshotOptions +func (*VpcV1) NewGetSnapshotOptions(id string) *GetSnapshotOptions { + return &GetSnapshotOptions{ + ID: core.StringPtr(id), + } +} + +// SetID : Allow user to set ID +func (options *GetSnapshotOptions) SetID(id string) *GetSnapshotOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *GetSnapshotOptions) SetHeaders(param map[string]string) *GetSnapshotOptions { + options.Headers = param + return options +} + // GetSubnetNetworkACLOptions : The GetSubnetNetworkACL options. type GetSubnetNetworkACLOptions struct { // The subnet identifier. @@ -26159,7 +27559,7 @@ type IkePolicy struct { // The authentication algorithm. AuthenticationAlgorithm *string `json:"authentication_algorithm" validate:"required"` - // Collection of references to VPN gateway connections that use this IKE policy. + // The VPN gateway connections that use this IKE policy. Connections []VPNGatewayConnectionReference `json:"connections" validate:"required"` // The date and time that this IKE policy was created. @@ -26520,7 +27920,7 @@ type IPsecPolicy struct { // The authentication algorithm. AuthenticationAlgorithm *string `json:"authentication_algorithm" validate:"required"` - // Collection of references to VPN gateway connections that use this IPsec policy. + // The VPN gateway connections that use this IPsec policy. Connections []VPNGatewayConnectionReference `json:"connections" validate:"required"` // The date and time that this IPsec policy was created. @@ -27021,6 +28421,11 @@ type Image struct { // The resource group for this image. ResourceGroup *ResourceGroupReference `json:"resource_group" validate:"required"` + // The volume used to create this image (this may be + // [deleted](https://cloud.ibm.com/apidocs/vpc#deleted-resources)). + // If absent, this image was not created from a volume. + SourceVolume *VolumeReference `json:"source_volume,omitempty"` + // The status of this image // - available: image can be used (provisionable) // - deleting: image is being deleted, and can no longer be used to provision new @@ -27037,7 +28442,7 @@ type Image struct { // unexpected property value was encountered. Status *string `json:"status" validate:"required"` - // Array of reasons for the current status (if any). + // The reasons for the current status (if any). // // The enumerated reason code values for this property will expand in the future. When processing this property, check // for and log unknown values. Optionally halt processing and surface the error, or bypass the resource on which the @@ -27134,6 +28539,10 @@ func UnmarshalImage(m map[string]json.RawMessage, result interface{}) (err error if err != nil { return } + err = core.UnmarshalModel(m, "source_volume", &obj.SourceVolume, UnmarshalVolumeReference) + if err != nil { + return + } err = core.UnmarshalPrimitive(m, "status", &obj.Status) if err != nil { return @@ -27341,7 +28750,7 @@ func UnmarshalImageIdentity(m map[string]json.RawMessage, result interface{}) (e // ImagePatch : ImagePatch struct type ImagePatch struct { - // The unique user-defined name for this image. Names starting with "ibm-" are not allowed. + // The unique user-defined name for this image. Names starting with `ibm-` are not allowed. Name *string `json:"name,omitempty"` } @@ -27369,8 +28778,9 @@ func (imagePatch *ImagePatch) AsPatch() (patch map[string]interface{}, err error // ImagePrototype : ImagePrototype struct // Models which "extend" this model: // - ImagePrototypeImageByFile +// - ImagePrototypeImageBySourceVolume type ImagePrototype struct { - // The unique user-defined name for this image. Names starting with "ibm-" are not allowed. If unspecified, the name + // The unique user-defined name for this image. Names starting with `ibm-` are not allowed. If unspecified, the name // will be a hyphenated list of randomly-selected words. Name *string `json:"name,omitempty"` @@ -27387,10 +28797,10 @@ type ImagePrototype struct { // If this property is not provided, the imported image is treated as unencrypted. EncryptedDataKey *string `json:"encrypted_data_key,omitempty"` - // The identity of the root key that was used to wrap the data key (which is ultimately - // represented as `encrypted_data_key`). Additionally, the root key will be used to encrypt - // volumes created from this image (unless an alternate `encryption_key` is provided at - // volume creation). + // The root key that was used to wrap the data key (which is ultimately represented as + // `encrypted_data_key`). Additionally, the root key will be used to encrypt volumes + // created from this image (unless an alternate `encryption_key` is provided at volume + // creation). // // If this property is not provided, the imported image is treated as unencrypted. EncryptionKey EncryptionKeyIdentityIntf `json:"encryption_key,omitempty"` @@ -27398,10 +28808,18 @@ type ImagePrototype struct { // The file from which to create the image. File *ImageFilePrototype `json:"file,omitempty"` - // The identity of the [supported operating - // system](https://cloud.ibm.com/apidocs/vpc#list-operating-systems) included in - // this image. + // The [supported operating + // system](https://cloud.ibm.com/apidocs/vpc#list-operating-systems) included in this + // image. OperatingSystem OperatingSystemIdentityIntf `json:"operating_system,omitempty"` + + // The volume from which to create the image. The specified volume must: + // - Originate from an image, which will be used to populate this image's + // operating system information. + // - Not be `active` or `busy`. + // + // During image creation, the specified volume may briefly become `busy`. + SourceVolume VolumeIdentityIntf `json:"source_volume,omitempty"` } func (*ImagePrototype) isaImagePrototype() bool { @@ -27439,6 +28857,10 @@ func UnmarshalImagePrototype(m map[string]json.RawMessage, result interface{}) ( if err != nil { return } + err = core.UnmarshalModel(m, "source_volume", &obj.SourceVolume, UnmarshalVolumeIdentity) + if err != nil { + return + } reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) return } @@ -27522,8 +28944,10 @@ type ImageStatusReason struct { // Constants associated with the ImageStatusReason.Code property. // A snake case string succinctly identifying the status reason. const ( - ImageStatusReasonCodeEncryptionKeyDeletedConst = "encryption_key_deleted" - ImageStatusReasonCodeEncryptionKeyDisabledConst = "encryption_key_disabled" + ImageStatusReasonCodeEncryptionKeyDeletedConst = "encryption_key_deleted" + ImageStatusReasonCodeEncryptionKeyDisabledConst = "encryption_key_disabled" + ImageStatusReasonCodeImageRequestInProgressConst = "image_request_in_progress" + ImageStatusReasonCodeImageRequestQueuedConst = "image_request_queued" ) // UnmarshalImageStatusReason unmarshals an instance of ImageStatusReason from the specified map of raw messages. @@ -27559,7 +28983,7 @@ type Instance struct { // The CRN for this virtual server instance. CRN *string `json:"crn" validate:"required"` - // Collection of the instance's disks. + // The instance disks for this virtual server instance. Disks []InstanceDisk `json:"disks" validate:"required"` // The virtual server instance GPU configuration. @@ -27580,7 +29004,7 @@ type Instance struct { // The user-defined name for this virtual server instance (and default system hostname). Name *string `json:"name" validate:"required"` - // Collection of the virtual server instance's network interfaces, including the primary network interface. + // The network interfaces for this virtual server instance, including the primary network interface. NetworkInterfaces []NetworkInterfaceInstanceContextReference `json:"network_interfaces" validate:"required"` // Primary network interface. @@ -27592,13 +29016,23 @@ type Instance struct { // The resource group for this instance. ResourceGroup *ResourceGroupReference `json:"resource_group" validate:"required"` + // Indicates whether the state of the virtual server instance permits a start request. + Startable *bool `json:"startable" validate:"required"` + // The status of the virtual server instance. Status *string `json:"status" validate:"required"` + // The reasons for the current status (if any). + // + // The enumerated reason code values for this property will expand in the future. When processing this property, check + // for and log unknown values. Optionally halt processing and surface the error, or bypass the resource on which the + // unexpected reason code was encountered. + StatusReasons []InstanceStatusReason `json:"status_reasons" validate:"required"` + // The virtual server instance VCPU configuration. Vcpu *InstanceVcpu `json:"vcpu" validate:"required"` - // Collection of the virtual server instance's volume attachments, including the boot volume attachment. + // The volume attachments for this virtual server instance, including the boot volume attachment. VolumeAttachments []VolumeAttachmentReferenceInstanceContext `json:"volume_attachments" validate:"required"` // The VPC this virtual server instance resides in. @@ -27687,10 +29121,18 @@ func UnmarshalInstance(m map[string]json.RawMessage, result interface{}) (err er if err != nil { return } + err = core.UnmarshalPrimitive(m, "startable", &obj.Startable) + if err != nil { + return + } err = core.UnmarshalPrimitive(m, "status", &obj.Status) if err != nil { return } + err = core.UnmarshalModel(m, "status_reasons", &obj.StatusReasons, UnmarshalInstanceStatusReason) + if err != nil { + return + } err = core.UnmarshalModel(m, "vcpu", &obj.Vcpu, UnmarshalInstanceVcpu) if err != nil { return @@ -28192,7 +29634,7 @@ type InstanceGroup struct { // pool created. LoadBalancerPool *LoadBalancerPoolReference `json:"load_balancer_pool,omitempty"` - // Array of references to managers for the instance group. + // The managers for the instance group. Managers []InstanceGroupManagerReference `json:"managers" validate:"required"` // The number of instances in the instance group. @@ -28211,9 +29653,12 @@ type InstanceGroup struct { // - `unhealthy`: Group is unable to reach `membership_count` instances. Status *string `json:"status" validate:"required"` - // Array of references to subnets to use when creating new instances. + // The subnets to use when creating new instances. Subnets []SubnetReference `json:"subnets" validate:"required"` + // The date and time that the instance group was updated. + UpdatedAt *strfmt.DateTime `json:"updated_at" validate:"required"` + // The VPC the instance group resides in. VPC *VPCReference `json:"vpc" validate:"required"` } @@ -28287,6 +29732,10 @@ func UnmarshalInstanceGroup(m map[string]json.RawMessage, result interface{}) (e if err != nil { return } + err = core.UnmarshalPrimitive(m, "updated_at", &obj.UpdatedAt) + if err != nil { + return + } err = core.UnmarshalModel(m, "vpc", &obj.VPC, UnmarshalVPCReference) if err != nil { return @@ -28378,7 +29827,11 @@ func UnmarshalInstanceGroupCollectionNext(m map[string]json.RawMessage, result i // InstanceGroupManager : InstanceGroupManager struct // Models which "extend" this model: // - InstanceGroupManagerAutoScale +// - InstanceGroupManagerScheduled type InstanceGroupManager struct { + // The date and time that the instance group manager was created. + CreatedAt *strfmt.DateTime `json:"created_at" validate:"required"` + // The URL for this instance group manager. Href *string `json:"href" validate:"required"` @@ -28391,6 +29844,9 @@ type InstanceGroupManager struct { // The user-defined name for this instance group manager. Names must be unique within the instance group. Name *string `json:"name" validate:"required"` + // The date and time that the instance group manager was updated. + UpdatedAt *strfmt.DateTime `json:"updated_at" validate:"required"` + // The time window in seconds to aggregate metrics prior to evaluation. AggregationWindow *int64 `json:"aggregation_window,omitempty"` @@ -28408,6 +29864,9 @@ type InstanceGroupManager struct { // The policies of the instance group manager. Policies []InstanceGroupManagerPolicyReference `json:"policies,omitempty"` + + // The actions of the instance group manager. + Actions []InstanceGroupManagerActionReference `json:"actions,omitempty"` } // Constants associated with the InstanceGroupManager.ManagerType property. @@ -28427,6 +29886,10 @@ type InstanceGroupManagerIntf interface { // UnmarshalInstanceGroupManager unmarshals an instance of InstanceGroupManager from the specified map of raw messages. func UnmarshalInstanceGroupManager(m map[string]json.RawMessage, result interface{}) (err error) { obj := new(InstanceGroupManager) + err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) + if err != nil { + return + } err = core.UnmarshalPrimitive(m, "href", &obj.Href) if err != nil { return @@ -28443,6 +29906,10 @@ func UnmarshalInstanceGroupManager(m map[string]json.RawMessage, result interfac if err != nil { return } + err = core.UnmarshalPrimitive(m, "updated_at", &obj.UpdatedAt) + if err != nil { + return + } err = core.UnmarshalPrimitive(m, "aggregation_window", &obj.AggregationWindow) if err != nil { return @@ -28467,6 +29934,472 @@ func UnmarshalInstanceGroupManager(m map[string]json.RawMessage, result interfac if err != nil { return } + err = core.UnmarshalModel(m, "actions", &obj.Actions, UnmarshalInstanceGroupManagerActionReference) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstanceGroupManagerAction : InstanceGroupManagerAction struct +// Models which "extend" this model: +// - InstanceGroupManagerActionScheduledAction +type InstanceGroupManagerAction struct { + // If set to `true`, this scheduled action will be automatically deleted after it has finished and the + // `auto_delete_timeout` time has passed. + AutoDelete *bool `json:"auto_delete" validate:"required"` + + // Amount of time in hours that are required to pass before the scheduled action will be automatically deleted once it + // has finished. If this value is 0, the action will be deleted on completion. + AutoDeleteTimeout *int64 `json:"auto_delete_timeout" validate:"required"` + + // The date and time that the instance group manager action was created. + CreatedAt *strfmt.DateTime `json:"created_at" validate:"required"` + + // The URL for this instance group manager action. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this instance group manager action. + ID *string `json:"id" validate:"required"` + + // The user-defined name for this instance group manager action. Names must be unique within the instance group + // manager. + Name *string `json:"name" validate:"required"` + + // The resource type. + ResourceType *string `json:"resource_type" validate:"required"` + + // The status of the instance group action + // - `active`: Action is ready to be run + // - `completed`: Action was completed successfully + // - `failed`: Action could not be completed successfully + // - `incompatible`: Action parameters are not compatible with the group or manager + // - `omitted`: Action was not applied because this action's manager was disabled. + Status *string `json:"status" validate:"required"` + + // The date and time that the instance group manager action was modified. + UpdatedAt *strfmt.DateTime `json:"updated_at" validate:"required"` + + // The type of action for the instance group. + ActionType *string `json:"action_type,omitempty"` + + // The cron specification for a recurring scheduled action. Actions can be applied a maximum of one time within a 5 min + // period. + CronSpec *string `json:"cron_spec,omitempty"` + + // The date and time the scheduled action was last applied. If empty the action has never been applied. + LastAppliedAt *strfmt.DateTime `json:"last_applied_at,omitempty"` + + // The date and time the scheduled action will next run. If empty the system is currently calculating the next run + // time. + NextRunAt *strfmt.DateTime `json:"next_run_at,omitempty"` + + Group *InstanceGroupManagerScheduledActionGroup `json:"group,omitempty"` + + Manager InstanceGroupManagerScheduledActionManagerIntf `json:"manager,omitempty"` +} + +// Constants associated with the InstanceGroupManagerAction.ResourceType property. +// The resource type. +const ( + InstanceGroupManagerActionResourceTypeInstanceGroupManagerActionConst = "instance_group_manager_action" +) + +// Constants associated with the InstanceGroupManagerAction.Status property. +// The status of the instance group action +// - `active`: Action is ready to be run +// - `completed`: Action was completed successfully +// - `failed`: Action could not be completed successfully +// - `incompatible`: Action parameters are not compatible with the group or manager +// - `omitted`: Action was not applied because this action's manager was disabled. +const ( + InstanceGroupManagerActionStatusActiveConst = "active" + InstanceGroupManagerActionStatusCompletedConst = "completed" + InstanceGroupManagerActionStatusFailedConst = "failed" + InstanceGroupManagerActionStatusIncompatibleConst = "incompatible" + InstanceGroupManagerActionStatusOmittedConst = "omitted" +) + +// Constants associated with the InstanceGroupManagerAction.ActionType property. +// The type of action for the instance group. +const ( + InstanceGroupManagerActionActionTypeScheduledConst = "scheduled" +) + +func (*InstanceGroupManagerAction) isaInstanceGroupManagerAction() bool { + return true +} + +type InstanceGroupManagerActionIntf interface { + isaInstanceGroupManagerAction() bool +} + +// UnmarshalInstanceGroupManagerAction unmarshals an instance of InstanceGroupManagerAction from the specified map of raw messages. +func UnmarshalInstanceGroupManagerAction(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstanceGroupManagerAction) + err = core.UnmarshalPrimitive(m, "auto_delete", &obj.AutoDelete) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "auto_delete_timeout", &obj.AutoDeleteTimeout) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "resource_type", &obj.ResourceType) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "status", &obj.Status) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "updated_at", &obj.UpdatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "action_type", &obj.ActionType) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "cron_spec", &obj.CronSpec) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "last_applied_at", &obj.LastAppliedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "next_run_at", &obj.NextRunAt) + if err != nil { + return + } + err = core.UnmarshalModel(m, "group", &obj.Group, UnmarshalInstanceGroupManagerScheduledActionGroup) + if err != nil { + return + } + err = core.UnmarshalModel(m, "manager", &obj.Manager, UnmarshalInstanceGroupManagerScheduledActionManager) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstanceGroupManagerActionGroupPatch : InstanceGroupManagerActionGroupPatch struct +type InstanceGroupManagerActionGroupPatch struct { + // The number of members the instance group should have at the scheduled time. + MembershipCount *int64 `json:"membership_count,omitempty"` +} + +// UnmarshalInstanceGroupManagerActionGroupPatch unmarshals an instance of InstanceGroupManagerActionGroupPatch from the specified map of raw messages. +func UnmarshalInstanceGroupManagerActionGroupPatch(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstanceGroupManagerActionGroupPatch) + err = core.UnmarshalPrimitive(m, "membership_count", &obj.MembershipCount) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstanceGroupManagerActionManagerPatch : InstanceGroupManagerActionManagerPatch struct +type InstanceGroupManagerActionManagerPatch struct { + // The maximum number of members the instance group should have at the scheduled time. + MaxMembershipCount *int64 `json:"max_membership_count,omitempty"` + + // The minimum number of members the instance group should have at the scheduled time. + MinMembershipCount *int64 `json:"min_membership_count,omitempty"` +} + +// UnmarshalInstanceGroupManagerActionManagerPatch unmarshals an instance of InstanceGroupManagerActionManagerPatch from the specified map of raw messages. +func UnmarshalInstanceGroupManagerActionManagerPatch(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstanceGroupManagerActionManagerPatch) + err = core.UnmarshalPrimitive(m, "max_membership_count", &obj.MaxMembershipCount) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "min_membership_count", &obj.MinMembershipCount) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstanceGroupManagerActionPatch : InstanceGroupManagerActionPatch struct +type InstanceGroupManagerActionPatch struct { + // The cron specification for a recurring scheduled action. Actions can be applied a maximum of one time within a 5 min + // period. + CronSpec *string `json:"cron_spec,omitempty"` + + Group *InstanceGroupManagerActionGroupPatch `json:"group,omitempty"` + + Manager *InstanceGroupManagerActionManagerPatch `json:"manager,omitempty"` + + // The user-defined name for this instance group manager action. Names must be unique within the instance group + // manager. + Name *string `json:"name,omitempty"` + + // The date and time the scheduled action will run. + RunAt *strfmt.DateTime `json:"run_at,omitempty"` +} + +// UnmarshalInstanceGroupManagerActionPatch unmarshals an instance of InstanceGroupManagerActionPatch from the specified map of raw messages. +func UnmarshalInstanceGroupManagerActionPatch(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstanceGroupManagerActionPatch) + err = core.UnmarshalPrimitive(m, "cron_spec", &obj.CronSpec) + if err != nil { + return + } + err = core.UnmarshalModel(m, "group", &obj.Group, UnmarshalInstanceGroupManagerActionGroupPatch) + if err != nil { + return + } + err = core.UnmarshalModel(m, "manager", &obj.Manager, UnmarshalInstanceGroupManagerActionManagerPatch) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "run_at", &obj.RunAt) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// AsPatch returns a generic map representation of the InstanceGroupManagerActionPatch +func (instanceGroupManagerActionPatch *InstanceGroupManagerActionPatch) AsPatch() (patch map[string]interface{}, err error) { + var jsonData []byte + jsonData, err = json.Marshal(instanceGroupManagerActionPatch) + if err == nil { + err = json.Unmarshal(jsonData, &patch) + } + return +} + +// InstanceGroupManagerActionPrototype : InstanceGroupManagerActionPrototype struct +// Models which "extend" this model: +// - InstanceGroupManagerActionPrototypeScheduledActionPrototype +type InstanceGroupManagerActionPrototype struct { + // The user-defined name for this instance group manager action. Names must be unique within the instance group + // manager. + Name *string `json:"name,omitempty"` + + // The date and time the scheduled action will run. + RunAt *strfmt.DateTime `json:"run_at,omitempty"` + + Group *InstanceGroupManagerScheduledActionGroupPrototype `json:"group,omitempty"` + + Manager InstanceGroupManagerScheduledActionManagerPrototypeIntf `json:"manager,omitempty"` + + // The cron specification for a recurring scheduled action. Actions can be applied a maximum of one time within a 5 min + // period. + CronSpec *string `json:"cron_spec,omitempty"` +} + +func (*InstanceGroupManagerActionPrototype) isaInstanceGroupManagerActionPrototype() bool { + return true +} + +type InstanceGroupManagerActionPrototypeIntf interface { + isaInstanceGroupManagerActionPrototype() bool +} + +// UnmarshalInstanceGroupManagerActionPrototype unmarshals an instance of InstanceGroupManagerActionPrototype from the specified map of raw messages. +func UnmarshalInstanceGroupManagerActionPrototype(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstanceGroupManagerActionPrototype) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "run_at", &obj.RunAt) + if err != nil { + return + } + err = core.UnmarshalModel(m, "group", &obj.Group, UnmarshalInstanceGroupManagerScheduledActionGroupPrototype) + if err != nil { + return + } + err = core.UnmarshalModel(m, "manager", &obj.Manager, UnmarshalInstanceGroupManagerScheduledActionManagerPrototype) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "cron_spec", &obj.CronSpec) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstanceGroupManagerActionReference : InstanceGroupManagerActionReference struct +type InstanceGroupManagerActionReference struct { + // If present, this property indicates the referenced resource has been deleted and provides + // some supplementary information. + Deleted *InstanceGroupManagerActionReferenceDeleted `json:"deleted,omitempty"` + + // The URL for this instance group manager action. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this instance group manager action. + ID *string `json:"id" validate:"required"` + + // The user-defined name for this instance group manager action. Names must be unique within the instance group + // manager. + Name *string `json:"name" validate:"required"` + + // The resource type. + ResourceType *string `json:"resource_type" validate:"required"` +} + +// Constants associated with the InstanceGroupManagerActionReference.ResourceType property. +// The resource type. +const ( + InstanceGroupManagerActionReferenceResourceTypeInstanceGroupManagerActionConst = "instance_group_manager_action" +) + +// UnmarshalInstanceGroupManagerActionReference unmarshals an instance of InstanceGroupManagerActionReference from the specified map of raw messages. +func UnmarshalInstanceGroupManagerActionReference(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstanceGroupManagerActionReference) + err = core.UnmarshalModel(m, "deleted", &obj.Deleted, UnmarshalInstanceGroupManagerActionReferenceDeleted) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "resource_type", &obj.ResourceType) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstanceGroupManagerActionReferenceDeleted : If present, this property indicates the referenced resource has been deleted and provides some supplementary +// information. +type InstanceGroupManagerActionReferenceDeleted struct { + // Link to documentation about deleted resources. + MoreInfo *string `json:"more_info" validate:"required"` +} + +// UnmarshalInstanceGroupManagerActionReferenceDeleted unmarshals an instance of InstanceGroupManagerActionReferenceDeleted from the specified map of raw messages. +func UnmarshalInstanceGroupManagerActionReferenceDeleted(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstanceGroupManagerActionReferenceDeleted) + err = core.UnmarshalPrimitive(m, "more_info", &obj.MoreInfo) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstanceGroupManagerActionsCollection : InstanceGroupManagerActionsCollection struct +type InstanceGroupManagerActionsCollection struct { + // Collection of instance group manager actions. + Actions []InstanceGroupManagerActionIntf `json:"actions" validate:"required"` + + // A link to the first page of resources. + First *InstanceGroupManagerActionsCollectionFirst `json:"first" validate:"required"` + + // The maximum number of resources that can be returned by the request. + Limit *int64 `json:"limit" validate:"required"` + + // A link to the next page of resources. This property is present for all pages + // except the last page. + Next *InstanceGroupManagerActionsCollectionNext `json:"next,omitempty"` + + // The total number of resources across all pages. + TotalCount *int64 `json:"total_count" validate:"required"` +} + +// UnmarshalInstanceGroupManagerActionsCollection unmarshals an instance of InstanceGroupManagerActionsCollection from the specified map of raw messages. +func UnmarshalInstanceGroupManagerActionsCollection(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstanceGroupManagerActionsCollection) + err = core.UnmarshalModel(m, "actions", &obj.Actions, UnmarshalInstanceGroupManagerAction) + if err != nil { + return + } + err = core.UnmarshalModel(m, "first", &obj.First, UnmarshalInstanceGroupManagerActionsCollectionFirst) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "limit", &obj.Limit) + if err != nil { + return + } + err = core.UnmarshalModel(m, "next", &obj.Next, UnmarshalInstanceGroupManagerActionsCollectionNext) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "total_count", &obj.TotalCount) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstanceGroupManagerActionsCollectionFirst : A link to the first page of resources. +type InstanceGroupManagerActionsCollectionFirst struct { + // The URL for a page of resources. + Href *string `json:"href" validate:"required"` +} + +// UnmarshalInstanceGroupManagerActionsCollectionFirst unmarshals an instance of InstanceGroupManagerActionsCollectionFirst from the specified map of raw messages. +func UnmarshalInstanceGroupManagerActionsCollectionFirst(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstanceGroupManagerActionsCollectionFirst) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstanceGroupManagerActionsCollectionNext : A link to the next page of resources. This property is present for all pages except the last page. +type InstanceGroupManagerActionsCollectionNext struct { + // The URL for a page of resources. + Href *string `json:"href" validate:"required"` +} + +// UnmarshalInstanceGroupManagerActionsCollectionNext unmarshals an instance of InstanceGroupManagerActionsCollectionNext from the specified map of raw messages. +func UnmarshalInstanceGroupManagerActionsCollectionNext(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstanceGroupManagerActionsCollectionNext) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) return } @@ -28617,6 +30550,9 @@ func (instanceGroupManagerPatch *InstanceGroupManagerPatch) AsPatch() (patch map // Models which "extend" this model: // - InstanceGroupManagerPolicyInstanceGroupManagerTargetPolicy type InstanceGroupManagerPolicy struct { + // The date and time that the instance group manager policy was created. + CreatedAt *strfmt.DateTime `json:"created_at" validate:"required"` + // The URL for this instance group manager policy. Href *string `json:"href" validate:"required"` @@ -28627,6 +30563,9 @@ type InstanceGroupManagerPolicy struct { // manager. Name *string `json:"name" validate:"required"` + // The date and time that the instance group manager policy was updated. + UpdatedAt *strfmt.DateTime `json:"updated_at" validate:"required"` + // The type of metric to be evaluated. MetricType *string `json:"metric_type,omitempty"` @@ -28663,6 +30602,10 @@ type InstanceGroupManagerPolicyIntf interface { // UnmarshalInstanceGroupManagerPolicy unmarshals an instance of InstanceGroupManagerPolicy from the specified map of raw messages. func UnmarshalInstanceGroupManagerPolicy(m map[string]json.RawMessage, result interface{}) (err error) { obj := new(InstanceGroupManagerPolicy) + err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) + if err != nil { + return + } err = core.UnmarshalPrimitive(m, "href", &obj.Href) if err != nil { return @@ -28675,6 +30618,10 @@ func UnmarshalInstanceGroupManagerPolicy(m map[string]json.RawMessage, result in if err != nil { return } + err = core.UnmarshalPrimitive(m, "updated_at", &obj.UpdatedAt) + if err != nil { + return + } err = core.UnmarshalPrimitive(m, "metric_type", &obj.MetricType) if err != nil { return @@ -28947,6 +30894,7 @@ func UnmarshalInstanceGroupManagerPolicyReferenceDeleted(m map[string]json.RawMe // InstanceGroupManagerPrototype : InstanceGroupManagerPrototype struct // Models which "extend" this model: // - InstanceGroupManagerPrototypeInstanceGroupManagerAutoScalePrototype +// - InstanceGroupManagerPrototypeInstanceGroupManagerScheduledPrototype type InstanceGroupManagerPrototype struct { // If set to `true`, this manager will control the instance group. ManagementEnabled *bool `json:"management_enabled,omitempty"` @@ -29076,8 +31024,165 @@ func UnmarshalInstanceGroupManagerReferenceDeleted(m map[string]json.RawMessage, return } +// InstanceGroupManagerScheduledActionGroup : InstanceGroupManagerScheduledActionGroup struct +type InstanceGroupManagerScheduledActionGroup struct { + // The number of members the instance group should have at the scheduled time. + MembershipCount *int64 `json:"membership_count" validate:"required"` +} + +// UnmarshalInstanceGroupManagerScheduledActionGroup unmarshals an instance of InstanceGroupManagerScheduledActionGroup from the specified map of raw messages. +func UnmarshalInstanceGroupManagerScheduledActionGroup(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstanceGroupManagerScheduledActionGroup) + err = core.UnmarshalPrimitive(m, "membership_count", &obj.MembershipCount) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstanceGroupManagerScheduledActionGroupPrototype : InstanceGroupManagerScheduledActionGroupPrototype struct +type InstanceGroupManagerScheduledActionGroupPrototype struct { + // The number of members the instance group should have at the scheduled time. + MembershipCount *int64 `json:"membership_count" validate:"required"` +} + +// NewInstanceGroupManagerScheduledActionGroupPrototype : Instantiate InstanceGroupManagerScheduledActionGroupPrototype (Generic Model Constructor) +func (*VpcV1) NewInstanceGroupManagerScheduledActionGroupPrototype(membershipCount int64) (model *InstanceGroupManagerScheduledActionGroupPrototype, err error) { + model = &InstanceGroupManagerScheduledActionGroupPrototype{ + MembershipCount: core.Int64Ptr(membershipCount), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalInstanceGroupManagerScheduledActionGroupPrototype unmarshals an instance of InstanceGroupManagerScheduledActionGroupPrototype from the specified map of raw messages. +func UnmarshalInstanceGroupManagerScheduledActionGroupPrototype(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstanceGroupManagerScheduledActionGroupPrototype) + err = core.UnmarshalPrimitive(m, "membership_count", &obj.MembershipCount) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstanceGroupManagerScheduledActionManager : InstanceGroupManagerScheduledActionManager struct +// Models which "extend" this model: +// - InstanceGroupManagerScheduledActionManagerAutoScale +type InstanceGroupManagerScheduledActionManager struct { + // If present, this property indicates the referenced resource has been deleted and provides + // some supplementary information. + Deleted *InstanceGroupManagerReferenceDeleted `json:"deleted,omitempty"` + + // The URL for this instance group manager. + Href *string `json:"href,omitempty"` + + // The unique identifier for this instance group manager. + ID *string `json:"id,omitempty"` + + // The user-defined name for this instance group manager. Names must be unique within the instance group. + Name *string `json:"name,omitempty"` + + // The maximum number of members the instance group should have at the scheduled time. + MaxMembershipCount *int64 `json:"max_membership_count,omitempty"` + + // The minimum number of members the instance group should have at the scheduled time. + MinMembershipCount *int64 `json:"min_membership_count,omitempty"` +} + +func (*InstanceGroupManagerScheduledActionManager) isaInstanceGroupManagerScheduledActionManager() bool { + return true +} + +type InstanceGroupManagerScheduledActionManagerIntf interface { + isaInstanceGroupManagerScheduledActionManager() bool +} + +// UnmarshalInstanceGroupManagerScheduledActionManager unmarshals an instance of InstanceGroupManagerScheduledActionManager from the specified map of raw messages. +func UnmarshalInstanceGroupManagerScheduledActionManager(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstanceGroupManagerScheduledActionManager) + err = core.UnmarshalModel(m, "deleted", &obj.Deleted, UnmarshalInstanceGroupManagerReferenceDeleted) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "max_membership_count", &obj.MaxMembershipCount) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "min_membership_count", &obj.MinMembershipCount) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstanceGroupManagerScheduledActionManagerPrototype : InstanceGroupManagerScheduledActionManagerPrototype struct +// Models which "extend" this model: +// - InstanceGroupManagerScheduledActionManagerPrototypeAutoScalePrototype +type InstanceGroupManagerScheduledActionManagerPrototype struct { + // The maximum number of members the instance group should have at the scheduled time. + MaxMembershipCount *int64 `json:"max_membership_count,omitempty"` + + // The minimum number of members the instance group should have at the scheduled time. + MinMembershipCount *int64 `json:"min_membership_count,omitempty"` + + // The unique identifier for this instance group manager. + ID *string `json:"id,omitempty"` + + // The URL for this instance group manager. + Href *string `json:"href,omitempty"` +} + +func (*InstanceGroupManagerScheduledActionManagerPrototype) isaInstanceGroupManagerScheduledActionManagerPrototype() bool { + return true +} + +type InstanceGroupManagerScheduledActionManagerPrototypeIntf interface { + isaInstanceGroupManagerScheduledActionManagerPrototype() bool +} + +// UnmarshalInstanceGroupManagerScheduledActionManagerPrototype unmarshals an instance of InstanceGroupManagerScheduledActionManagerPrototype from the specified map of raw messages. +func UnmarshalInstanceGroupManagerScheduledActionManagerPrototype(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstanceGroupManagerScheduledActionManagerPrototype) + err = core.UnmarshalPrimitive(m, "max_membership_count", &obj.MaxMembershipCount) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "min_membership_count", &obj.MinMembershipCount) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + // InstanceGroupMembership : InstanceGroupMembership struct type InstanceGroupMembership struct { + // The date and time that the instance group manager policy was created. + CreatedAt *strfmt.DateTime `json:"created_at" validate:"required"` + // If set to true, when deleting the membership the instance will also be deleted. DeleteInstanceOnMembershipDelete *bool `json:"delete_instance_on_membership_delete" validate:"required"` @@ -29103,6 +31208,9 @@ type InstanceGroupMembership struct { // - `pending`: Membership is waiting for dependent resources // - `unhealthy`: Membership has unhealthy dependent resources. Status *string `json:"status" validate:"required"` + + // The date and time that the instance group membership was updated. + UpdatedAt *strfmt.DateTime `json:"updated_at" validate:"required"` } // Constants associated with the InstanceGroupMembership.Status property. @@ -29123,6 +31231,10 @@ const ( // UnmarshalInstanceGroupMembership unmarshals an instance of InstanceGroupMembership from the specified map of raw messages. func UnmarshalInstanceGroupMembership(m map[string]json.RawMessage, result interface{}) (err error) { obj := new(InstanceGroupMembership) + err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) + if err != nil { + return + } err = core.UnmarshalPrimitive(m, "delete_instance_on_membership_delete", &obj.DeleteInstanceOnMembershipDelete) if err != nil { return @@ -29155,6 +31267,10 @@ func UnmarshalInstanceGroupMembership(m map[string]json.RawMessage, result inter if err != nil { return } + err = core.UnmarshalPrimitive(m, "updated_at", &obj.UpdatedAt) + if err != nil { + return + } reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) return } @@ -29291,7 +31407,7 @@ type InstanceGroupPatch struct { // The user-defined name for this instance group. Name *string `json:"name,omitempty"` - // Array of identities to subnets to use when creating new instances. + // The subnets to use when creating new instances. Subnets []SubnetIdentityIntf `json:"subnets,omitempty"` } @@ -29406,7 +31522,7 @@ func UnmarshalInstanceGroupReferenceDeleted(m map[string]json.RawMessage, result // InstanceInitialization : InstanceInitialization struct type InstanceInitialization struct { - // Collection of references to public SSH keys used at instance initialization. + // The public SSH keys used at instance initialization. Keys []KeyReferenceInstanceInitializationContextIntf `json:"keys" validate:"required"` Password *InstanceInitializationPassword `json:"password,omitempty"` @@ -29432,7 +31548,7 @@ type InstanceInitializationPassword struct { // The administrator password at initialization, encrypted using `encryption_key`, and returned base64-encoded. EncryptedPassword *[]byte `json:"encrypted_password" validate:"required"` - // The reference to the public SSH key used to encrypt the administrator password. + // The public SSH key used to encrypt the administrator password. EncryptionKey KeyReferenceInstanceInitializationContextIntf `json:"encryption_key" validate:"required"` } @@ -29535,6 +31651,48 @@ func UnmarshalInstancePatchProfile(m map[string]json.RawMessage, result interfac return } +// InstancePlacementTargetPrototype : InstancePlacementTargetPrototype struct +// Models which "extend" this model: +// - InstancePlacementTargetPrototypeDedicatedHostIdentity +// - InstancePlacementTargetPrototypeDedicatedHostGroupIdentity +type InstancePlacementTargetPrototype struct { + // The unique identifier for this dedicated host. + ID *string `json:"id,omitempty"` + + // The CRN for this dedicated host. + CRN *string `json:"crn,omitempty"` + + // The URL for this dedicated host. + Href *string `json:"href,omitempty"` +} + +func (*InstancePlacementTargetPrototype) isaInstancePlacementTargetPrototype() bool { + return true +} + +type InstancePlacementTargetPrototypeIntf interface { + isaInstancePlacementTargetPrototype() bool +} + +// UnmarshalInstancePlacementTargetPrototype unmarshals an instance of InstancePlacementTargetPrototype from the specified map of raw messages. +func UnmarshalInstancePlacementTargetPrototype(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstancePlacementTargetPrototype) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + // InstanceProfile : InstanceProfile struct type InstanceProfile struct { Bandwidth InstanceProfileBandwidthIntf `json:"bandwidth" validate:"required"` @@ -30284,6 +32442,7 @@ func UnmarshalInstanceProfileVcpuArchitecture(m map[string]json.RawMessage, resu // InstancePrototype : InstancePrototype struct // Models which "extend" this model: // - InstancePrototypeInstanceByImage +// - InstancePrototypeInstanceByVolume // - InstancePrototypeInstanceBySourceTemplate type InstancePrototype struct { // The public SSH keys for the administrative user of the virtual server instance. Up to 10 keys may be provided; if no @@ -30298,9 +32457,12 @@ type InstancePrototype struct { // name will be a hyphenated list of randomly-selected words. Name *string `json:"name,omitempty"` - // Collection of additional network interfaces to create for the virtual server instance. + // The additional network interfaces to create for the virtual server instance. NetworkInterfaces []NetworkInterfacePrototype `json:"network_interfaces,omitempty"` + // The placement restrictions to use for the virtual server instance. + PlacementTarget InstancePlacementTargetPrototypeIntf `json:"placement_target,omitempty"` + // The profile to use for this virtual server instance. Profile InstanceProfileIdentityIntf `json:"profile,omitempty"` @@ -30311,7 +32473,7 @@ type InstancePrototype struct { // User data to be made available when setting up the virtual server instance. UserData *string `json:"user_data,omitempty"` - // Collection of volume attachments. + // The volume attachments for this virtual server instance. VolumeAttachments []VolumeAttachmentPrototypeInstanceContext `json:"volume_attachments,omitempty"` // The VPC the virtual server instance is to be a part of. If provided, must match the @@ -30321,7 +32483,7 @@ type InstancePrototype struct { // The boot volume attachment for the virtual server instance. BootVolumeAttachment *VolumeAttachmentPrototypeInstanceByImageContext `json:"boot_volume_attachment,omitempty"` - // The identity of the image to use when provisioning the virtual server instance. + // The image to use when provisioning the virtual server instance. Image ImageIdentityIntf `json:"image,omitempty"` // Primary network interface. @@ -30357,6 +32519,10 @@ func UnmarshalInstancePrototype(m map[string]json.RawMessage, result interface{} if err != nil { return } + err = core.UnmarshalModel(m, "placement_target", &obj.PlacementTarget, UnmarshalInstancePlacementTargetPrototype) + if err != nil { + return + } err = core.UnmarshalModel(m, "profile", &obj.Profile, UnmarshalInstanceProfileIdentity) if err != nil { return @@ -30465,9 +32631,53 @@ func UnmarshalInstanceReferenceDeleted(m map[string]json.RawMessage, result inte return } +// InstanceStatusReason : InstanceStatusReason struct +type InstanceStatusReason struct { + // A snake case string succinctly identifying the status reason. + Code *string `json:"code" validate:"required"` + + // An explanation of the status reason. + Message *string `json:"message" validate:"required"` + + // Link to documentation about this status reason. + MoreInfo *string `json:"more_info,omitempty"` +} + +// Constants associated with the InstanceStatusReason.Code property. +// A snake case string succinctly identifying the status reason. +const ( + InstanceStatusReasonCodeCannotStartConst = "cannot_start" + InstanceStatusReasonCodeCannotStartCapacityConst = "cannot_start_capacity" + InstanceStatusReasonCodeCannotStartComputeConst = "cannot_start_compute" + InstanceStatusReasonCodeCannotStartIPAddressConst = "cannot_start_ip_address" + InstanceStatusReasonCodeCannotStartNetworkConst = "cannot_start_network" + InstanceStatusReasonCodeCannotStartStorageConst = "cannot_start_storage" + InstanceStatusReasonCodeEncryptionKeyDeletedConst = "encryption_key_deleted" +) + +// UnmarshalInstanceStatusReason unmarshals an instance of InstanceStatusReason from the specified map of raw messages. +func UnmarshalInstanceStatusReason(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstanceStatusReason) + err = core.UnmarshalPrimitive(m, "code", &obj.Code) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "message", &obj.Message) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "more_info", &obj.MoreInfo) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + // InstanceTemplate : InstanceTemplate struct // Models which "extend" this model: // - InstanceTemplateInstanceByImage +// - InstanceTemplateInstanceByVolume // - InstanceTemplateInstanceBySourceTemplate type InstanceTemplate struct { // The date and time that the instance template was created. @@ -30493,9 +32703,12 @@ type InstanceTemplate struct { // The unique user-defined name for this instance template. Name *string `json:"name" validate:"required"` - // Collection of additional network interfaces to create for the virtual server instance. + // The additional network interfaces to create for the virtual server instance. NetworkInterfaces []NetworkInterfacePrototype `json:"network_interfaces,omitempty"` + // The placement restrictions to use for the virtual server instance. + PlacementTarget InstancePlacementTargetPrototypeIntf `json:"placement_target,omitempty"` + // The profile to use for this virtual server instance. Profile InstanceProfileIdentityIntf `json:"profile,omitempty"` @@ -30505,7 +32718,7 @@ type InstanceTemplate struct { // User data to be made available when setting up the virtual server instance. UserData *string `json:"user_data,omitempty"` - // Collection of volume attachments. + // The volume attachments for this virtual server instance. VolumeAttachments []VolumeAttachmentPrototypeInstanceContext `json:"volume_attachments,omitempty"` // The VPC the virtual server instance is to be a part of. If provided, must match the @@ -30515,7 +32728,7 @@ type InstanceTemplate struct { // The boot volume attachment for the virtual server instance. BootVolumeAttachment *VolumeAttachmentPrototypeInstanceByImageContext `json:"boot_volume_attachment,omitempty"` - // The identity of the image to use when provisioning the virtual server instance. + // The image to use when provisioning the virtual server instance. Image ImageIdentityIntf `json:"image,omitempty"` // Primary network interface. @@ -30567,6 +32780,10 @@ func UnmarshalInstanceTemplate(m map[string]json.RawMessage, result interface{}) if err != nil { return } + err = core.UnmarshalModel(m, "placement_target", &obj.PlacementTarget, UnmarshalInstancePlacementTargetPrototype) + if err != nil { + return + } err = core.UnmarshalModel(m, "profile", &obj.Profile, UnmarshalInstanceProfileIdentity) if err != nil { return @@ -30764,6 +32981,7 @@ func (instanceTemplatePatch *InstanceTemplatePatch) AsPatch() (patch map[string] // InstanceTemplatePrototype : InstanceTemplatePrototype struct // Models which "extend" this model: // - InstanceTemplatePrototypeInstanceByImage +// - InstanceTemplatePrototypeInstanceByVolume // - InstanceTemplatePrototypeInstanceBySourceTemplate type InstanceTemplatePrototype struct { // The public SSH keys for the administrative user of the virtual server instance. Up to 10 keys may be provided; if no @@ -30778,9 +32996,12 @@ type InstanceTemplatePrototype struct { // name will be a hyphenated list of randomly-selected words. Name *string `json:"name,omitempty"` - // Collection of additional network interfaces to create for the virtual server instance. + // The additional network interfaces to create for the virtual server instance. NetworkInterfaces []NetworkInterfacePrototype `json:"network_interfaces,omitempty"` + // The placement restrictions to use for the virtual server instance. + PlacementTarget InstancePlacementTargetPrototypeIntf `json:"placement_target,omitempty"` + // The profile to use for this virtual server instance. Profile InstanceProfileIdentityIntf `json:"profile,omitempty"` @@ -30791,7 +33012,7 @@ type InstanceTemplatePrototype struct { // User data to be made available when setting up the virtual server instance. UserData *string `json:"user_data,omitempty"` - // Collection of volume attachments. + // The volume attachments for this virtual server instance. VolumeAttachments []VolumeAttachmentPrototypeInstanceContext `json:"volume_attachments,omitempty"` // The VPC the virtual server instance is to be a part of. If provided, must match the @@ -30801,7 +33022,7 @@ type InstanceTemplatePrototype struct { // The boot volume attachment for the virtual server instance. BootVolumeAttachment *VolumeAttachmentPrototypeInstanceByImageContext `json:"boot_volume_attachment,omitempty"` - // The identity of the image to use when provisioning the virtual server instance. + // The image to use when provisioning the virtual server instance. Image ImageIdentityIntf `json:"image,omitempty"` // Primary network interface. @@ -30837,6 +33058,10 @@ func UnmarshalInstanceTemplatePrototype(m map[string]json.RawMessage, result int if err != nil { return } + err = core.UnmarshalModel(m, "placement_target", &obj.PlacementTarget, UnmarshalInstancePlacementTargetPrototype) + if err != nil { + return + } err = core.UnmarshalModel(m, "profile", &obj.Profile, UnmarshalInstanceProfileIdentity) if err != nil { return @@ -31853,6 +34078,62 @@ func (options *ListInstanceDisksOptions) SetHeaders(param map[string]string) *Li return options } +// ListInstanceGroupManagerActionsOptions : The ListInstanceGroupManagerActions options. +type ListInstanceGroupManagerActionsOptions struct { + // The instance group identifier. + InstanceGroupID *string `validate:"required,ne="` + + // The instance group manager identifier. + InstanceGroupManagerID *string `validate:"required,ne="` + + // A server-supplied token determining what resource to start the page on. + Start *string + + // The number of resources to return on a page. + Limit *int64 + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewListInstanceGroupManagerActionsOptions : Instantiate ListInstanceGroupManagerActionsOptions +func (*VpcV1) NewListInstanceGroupManagerActionsOptions(instanceGroupID string, instanceGroupManagerID string) *ListInstanceGroupManagerActionsOptions { + return &ListInstanceGroupManagerActionsOptions{ + InstanceGroupID: core.StringPtr(instanceGroupID), + InstanceGroupManagerID: core.StringPtr(instanceGroupManagerID), + } +} + +// SetInstanceGroupID : Allow user to set InstanceGroupID +func (options *ListInstanceGroupManagerActionsOptions) SetInstanceGroupID(instanceGroupID string) *ListInstanceGroupManagerActionsOptions { + options.InstanceGroupID = core.StringPtr(instanceGroupID) + return options +} + +// SetInstanceGroupManagerID : Allow user to set InstanceGroupManagerID +func (options *ListInstanceGroupManagerActionsOptions) SetInstanceGroupManagerID(instanceGroupManagerID string) *ListInstanceGroupManagerActionsOptions { + options.InstanceGroupManagerID = core.StringPtr(instanceGroupManagerID) + return options +} + +// SetStart : Allow user to set Start +func (options *ListInstanceGroupManagerActionsOptions) SetStart(start string) *ListInstanceGroupManagerActionsOptions { + options.Start = core.StringPtr(start) + return options +} + +// SetLimit : Allow user to set Limit +func (options *ListInstanceGroupManagerActionsOptions) SetLimit(limit int64) *ListInstanceGroupManagerActionsOptions { + options.Limit = core.Int64Ptr(limit) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListInstanceGroupManagerActionsOptions) SetHeaders(param map[string]string) *ListInstanceGroupManagerActionsOptions { + options.Headers = param + return options +} + // ListInstanceGroupManagerPoliciesOptions : The ListInstanceGroupManagerPolicies options. type ListInstanceGroupManagerPoliciesOptions struct { // The instance group identifier. @@ -32554,6 +34835,11 @@ func (options *ListLoadBalancerProfilesOptions) SetHeaders(param map[string]stri // ListLoadBalancersOptions : The ListLoadBalancers options. type ListLoadBalancersOptions struct { + // A server-supplied token determining what resource to start the page on. + Start *string + + // The number of resources to return on a page. + Limit *int64 // Allows users to set headers on API requests Headers map[string]string @@ -32564,6 +34850,18 @@ func (*VpcV1) NewListLoadBalancersOptions() *ListLoadBalancersOptions { return &ListLoadBalancersOptions{} } +// SetStart : Allow user to set Start +func (options *ListLoadBalancersOptions) SetStart(start string) *ListLoadBalancersOptions { + options.Start = core.StringPtr(start) + return options +} + +// SetLimit : Allow user to set Limit +func (options *ListLoadBalancersOptions) SetLimit(limit int64) *ListLoadBalancersOptions { + options.Limit = core.Int64Ptr(limit) + return options +} + // SetHeaders : Allow user to set Headers func (options *ListLoadBalancersOptions) SetHeaders(param map[string]string) *ListLoadBalancersOptions { options.Headers = param @@ -32995,6 +35293,124 @@ func (options *ListSecurityGroupsOptions) SetHeaders(param map[string]string) *L return options } +// ListSnapshotsOptions : The ListSnapshots options. +type ListSnapshotsOptions struct { + // A server-supplied token determining what resource to start the page on. + Start *string + + // The number of resources to return on a page. + Limit *int64 + + // Filters the collection to resources within one of the resource groups identified in a comma-separated list of + // resource group identifiers. + ResourceGroupID *string + + // Filters the collection to resources with the exact specified name. + Name *string + + // Filters the collection to resources with the source volume with the specified identifier. + SourceVolumeID *string + + // Filters the collection to resources with the source volume with the specified CRN. + SourceVolumeCRN *string + + // Filters the collection to resources with the source image with the specified identifier. + // + // This parameter also supports the values `null` and `not:null` which filter the collection to resources which have no + // source image or any existent source image, respectively. + SourceImageID *string + + // Filters the collection to resources with the source volume with the specified CRN. + // + // This parameter also supports the values `null` and `not:null` which filter the collection to resources which have no + // source image or any existent source image, respectively. + SourceImageCRN *string + + // Sorts the returned collection by the specified field name in ascending order. A `-` may be prepended to the field + // name to sort in descending order. For example, the value + // `-created_at` sorts the collection by the `created_at` field in descending order, and the value `name` sorts it by + // the `name` field in ascending order. + Sort *string + + // Allows users to set headers on API requests + Headers map[string]string +} + +// Constants associated with the ListSnapshotsOptions.Sort property. +// Sorts the returned collection by the specified field name in ascending order. A `-` may be prepended to the field +// name to sort in descending order. For example, the value +// `-created_at` sorts the collection by the `created_at` field in descending order, and the value `name` sorts it by +// the `name` field in ascending order. +const ( + ListSnapshotsOptionsSortCreatedAtConst = "created_at" + ListSnapshotsOptionsSortNameConst = "name" +) + +// NewListSnapshotsOptions : Instantiate ListSnapshotsOptions +func (*VpcV1) NewListSnapshotsOptions() *ListSnapshotsOptions { + return &ListSnapshotsOptions{} +} + +// SetStart : Allow user to set Start +func (options *ListSnapshotsOptions) SetStart(start string) *ListSnapshotsOptions { + options.Start = core.StringPtr(start) + return options +} + +// SetLimit : Allow user to set Limit +func (options *ListSnapshotsOptions) SetLimit(limit int64) *ListSnapshotsOptions { + options.Limit = core.Int64Ptr(limit) + return options +} + +// SetResourceGroupID : Allow user to set ResourceGroupID +func (options *ListSnapshotsOptions) SetResourceGroupID(resourceGroupID string) *ListSnapshotsOptions { + options.ResourceGroupID = core.StringPtr(resourceGroupID) + return options +} + +// SetName : Allow user to set Name +func (options *ListSnapshotsOptions) SetName(name string) *ListSnapshotsOptions { + options.Name = core.StringPtr(name) + return options +} + +// SetSourceVolumeID : Allow user to set SourceVolumeID +func (options *ListSnapshotsOptions) SetSourceVolumeID(sourceVolumeID string) *ListSnapshotsOptions { + options.SourceVolumeID = core.StringPtr(sourceVolumeID) + return options +} + +// SetSourceVolumeCRN : Allow user to set SourceVolumeCRN +func (options *ListSnapshotsOptions) SetSourceVolumeCRN(sourceVolumeCRN string) *ListSnapshotsOptions { + options.SourceVolumeCRN = core.StringPtr(sourceVolumeCRN) + return options +} + +// SetSourceImageID : Allow user to set SourceImageID +func (options *ListSnapshotsOptions) SetSourceImageID(sourceImageID string) *ListSnapshotsOptions { + options.SourceImageID = core.StringPtr(sourceImageID) + return options +} + +// SetSourceImageCRN : Allow user to set SourceImageCRN +func (options *ListSnapshotsOptions) SetSourceImageCRN(sourceImageCRN string) *ListSnapshotsOptions { + options.SourceImageCRN = core.StringPtr(sourceImageCRN) + return options +} + +// SetSort : Allow user to set Sort +func (options *ListSnapshotsOptions) SetSort(sort string) *ListSnapshotsOptions { + options.Sort = core.StringPtr(sort) + return options +} + +// SetHeaders : Allow user to set Headers +func (options *ListSnapshotsOptions) SetHeaders(param map[string]string) *ListSnapshotsOptions { + options.Headers = param + return options +} + // ListSubnetReservedIpsOptions : The ListSubnetReservedIps options. type ListSubnetReservedIpsOptions struct { // The subnet identifier. @@ -33825,17 +36241,80 @@ func UnmarshalLoadBalancer(m map[string]json.RawMessage, result interface{}) (er // LoadBalancerCollection : LoadBalancerCollection struct type LoadBalancerCollection struct { + // A link to the first page of resources. + First *LoadBalancerCollectionFirst `json:"first" validate:"required"` + + // The maximum number of resources that can be returned by the request. + Limit *int64 `json:"limit" validate:"required"` + // Collection of load balancers. LoadBalancers []LoadBalancer `json:"load_balancers" validate:"required"` + + // A link to the next page of resources. This property is present for all pages + // except the last page. + Next *LoadBalancerCollectionNext `json:"next,omitempty"` + + // The total number of resources across all pages. + TotalCount *int64 `json:"total_count" validate:"required"` } // UnmarshalLoadBalancerCollection unmarshals an instance of LoadBalancerCollection from the specified map of raw messages. func UnmarshalLoadBalancerCollection(m map[string]json.RawMessage, result interface{}) (err error) { obj := new(LoadBalancerCollection) + err = core.UnmarshalModel(m, "first", &obj.First, UnmarshalLoadBalancerCollectionFirst) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "limit", &obj.Limit) + if err != nil { + return + } err = core.UnmarshalModel(m, "load_balancers", &obj.LoadBalancers, UnmarshalLoadBalancer) if err != nil { return } + err = core.UnmarshalModel(m, "next", &obj.Next, UnmarshalLoadBalancerCollectionNext) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "total_count", &obj.TotalCount) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LoadBalancerCollectionFirst : A link to the first page of resources. +type LoadBalancerCollectionFirst struct { + // The URL for a page of resources. + Href *string `json:"href" validate:"required"` +} + +// UnmarshalLoadBalancerCollectionFirst unmarshals an instance of LoadBalancerCollectionFirst from the specified map of raw messages. +func UnmarshalLoadBalancerCollectionFirst(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancerCollectionFirst) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// LoadBalancerCollectionNext : A link to the next page of resources. This property is present for all pages except the last page. +type LoadBalancerCollectionNext struct { + // The URL for a page of resources. + Href *string `json:"href" validate:"required"` +} + +// UnmarshalLoadBalancerCollectionNext unmarshals an instance of LoadBalancerCollectionNext from the specified map of raw messages. +func UnmarshalLoadBalancerCollectionNext(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(LoadBalancerCollectionNext) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) return } @@ -33908,7 +36387,7 @@ type LoadBalancerListener struct { // The unique identifier for this load balancer listener. ID *string `json:"id" validate:"required"` - // An array of policies for this listener. + // The policies for this listener. Policies []LoadBalancerListenerPolicyReference `json:"policies,omitempty"` // The listener port number. Each listener in the load balancer must have a unique @@ -34117,7 +36596,7 @@ type LoadBalancerListenerPolicy struct { // The provisioning status of this policy. ProvisioningStatus *string `json:"provisioning_status" validate:"required"` - // The rules of this policy. + // The rules for this policy. Rules []LoadBalancerListenerPolicyRuleReference `json:"rules" validate:"required"` // `LoadBalancerPoolReference` is in the response if `action` is `forward`. @@ -34259,7 +36738,7 @@ type LoadBalancerListenerPolicyPrototype struct { // Priority of the policy. Lower value indicates higher priority. Priority *int64 `json:"priority" validate:"required"` - // An array of rules for this policy. + // The rule prototype objects for this policy. Rules []LoadBalancerListenerPolicyRulePrototype `json:"rules,omitempty"` // When `action` is `forward`, `LoadBalancerPoolIdentity` is required to specify which @@ -36145,12 +38624,14 @@ func UnmarshalLoadBalancerPoolReferenceDeleted(m map[string]json.RawMessage, res // LoadBalancerPoolSessionPersistence : LoadBalancerPoolSessionPersistence struct type LoadBalancerPoolSessionPersistence struct { - // The session persistence type. + // The session persistence type. The `http_cookie` and `app_cookie` types are applicable only to the `http` and `https` + // protocols. Type *string `json:"type" validate:"required"` } // Constants associated with the LoadBalancerPoolSessionPersistence.Type property. -// The session persistence type. +// The session persistence type. The `http_cookie` and `app_cookie` types are applicable only to the `http` and `https` +// protocols. const ( LoadBalancerPoolSessionPersistenceTypeSourceIPConst = "source_ip" ) @@ -36166,14 +38647,16 @@ func UnmarshalLoadBalancerPoolSessionPersistence(m map[string]json.RawMessage, r return } -// LoadBalancerPoolSessionPersistencePatch : LoadBalancerPoolSessionPersistencePatch struct +// LoadBalancerPoolSessionPersistencePatch : The session persistence configuration. Specify `null` to remove any existing session persistence configuration. type LoadBalancerPoolSessionPersistencePatch struct { - // The session persistence type. + // The session persistence type. The `http_cookie` and `app_cookie` types are applicable only to the `http` and `https` + // protocols. Type *string `json:"type" validate:"required"` } // Constants associated with the LoadBalancerPoolSessionPersistencePatch.Type property. -// The session persistence type. +// The session persistence type. The `http_cookie` and `app_cookie` types are applicable only to the `http` and `https` +// protocols. const ( LoadBalancerPoolSessionPersistencePatchTypeSourceIPConst = "source_ip" ) @@ -36200,12 +38683,14 @@ func UnmarshalLoadBalancerPoolSessionPersistencePatch(m map[string]json.RawMessa // LoadBalancerPoolSessionPersistencePrototype : LoadBalancerPoolSessionPersistencePrototype struct type LoadBalancerPoolSessionPersistencePrototype struct { - // The session persistence type. + // The session persistence type. The `http_cookie` and `app_cookie` types are applicable only to the `http` and `https` + // protocols. Type *string `json:"type" validate:"required"` } // Constants associated with the LoadBalancerPoolSessionPersistencePrototype.Type property. -// The session persistence type. +// The session persistence type. The `http_cookie` and `app_cookie` types are applicable only to the `http` and `https` +// protocols. const ( LoadBalancerPoolSessionPersistencePrototypeTypeSourceIPConst = "source_ip" ) @@ -36786,8 +39271,8 @@ type NetworkACLPrototype struct { // The VPC this network ACL is to be a part of. VPC VPCIdentityIntf `json:"vpc" validate:"required"` - // Array of prototype objects for rules to create along with this network ACL. If unspecified, no rules will be - // created, resulting in all traffic being denied. + // The prototype objects for rules to create along with this network ACL. If unspecified, no rules will be created, + // resulting in all traffic being denied. Rules []NetworkACLRulePrototypeNetworkACLContextIntf `json:"rules,omitempty"` // Network ACL to copy rules from. @@ -37653,7 +40138,7 @@ type NetworkInterface struct { // The date and time that the network interface was created. CreatedAt *strfmt.DateTime `json:"created_at" validate:"required"` - // Array of references to floating IPs associated with this network interface. + // The floating IPs associated with this network interface. FloatingIps []FloatingIPReference `json:"floating_ips,omitempty"` // The URL for this network interface. @@ -38310,6 +40795,30 @@ func UnmarshalOperatingSystemIdentity(m map[string]json.RawMessage, result inter return } +// OperatingSystemReference : OperatingSystemReference struct +type OperatingSystemReference struct { + // The URL for this operating system. + Href *string `json:"href" validate:"required"` + + // The globally unique name for this operating system. + Name *string `json:"name" validate:"required"` +} + +// UnmarshalOperatingSystemReference unmarshals an instance of OperatingSystemReference from the specified map of raw messages. +func UnmarshalOperatingSystemReference(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(OperatingSystemReference) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + // PublicGateway : PublicGateway struct type PublicGateway struct { // The date and time that the public gateway was created. @@ -38318,7 +40827,7 @@ type PublicGateway struct { // The CRN for this public gateway. CRN *string `json:"crn" validate:"required"` - // Reference to the floating IP which is bound to this public gateway. + // The floating IP bound to this public gateway. FloatingIP *PublicGatewayFloatingIP `json:"floating_ip" validate:"required"` // The URL for this public gateway. @@ -38492,7 +41001,7 @@ func UnmarshalPublicGatewayCollectionNext(m map[string]json.RawMessage, result i return } -// PublicGatewayFloatingIP : Reference to the floating IP which is bound to this public gateway. +// PublicGatewayFloatingIP : The floating IP bound to this public gateway. type PublicGatewayFloatingIP struct { // The globally unique IP address. Address *string `json:"address" validate:"required"` @@ -39091,7 +41600,7 @@ type ReplaceLoadBalancerPoolMembersOptions struct { // The pool identifier. PoolID *string `validate:"required,ne="` - // Array of pool member prototype objects. + // The member prototype objects for this pool. Members []LoadBalancerPoolMemberPrototype `validate:"required"` // Allows users to set headers on API requests @@ -39772,7 +42281,6 @@ type Route struct { // Constants associated with the Route.LifecycleState property. // The lifecycle state of the route. const ( - RouteLifecycleStateDeletedConst = "deleted" RouteLifecycleStateDeletingConst = "deleting" RouteLifecycleStateFailedConst = "failed" RouteLifecycleStatePendingConst = "pending" @@ -40240,7 +42748,6 @@ type RoutingTable struct { // Constants associated with the RoutingTable.LifecycleState property. // The lifecycle state of the routing table. const ( - RoutingTableLifecycleStateDeletedConst = "deleted" RoutingTableLifecycleStateDeletingConst = "deleting" RoutingTableLifecycleStateFailedConst = "failed" RoutingTableLifecycleStatePendingConst = "pending" @@ -40589,16 +43096,16 @@ type SecurityGroup struct { // The user-defined name for this security group. Names must be unique within the VPC the security group resides in. Name *string `json:"name" validate:"required"` - // Array of references to network interfaces. + // The network interfaces for this security group. NetworkInterfaces []NetworkInterfaceReference `json:"network_interfaces" validate:"required"` // The resource group for this security group. ResourceGroup *ResourceGroupReference `json:"resource_group" validate:"required"` - // Array of rules for this security group. If no rules exist, all traffic will be denied. + // The rules for this security group. If no rules exist, all traffic will be denied. Rules []SecurityGroupRuleIntf `json:"rules" validate:"required"` - // Array of references to targets. + // The targets for this security group. Targets []SecurityGroupTargetReferenceIntf `json:"targets" validate:"required"` // The VPC this security group is a part of. @@ -40977,7 +43484,7 @@ func UnmarshalSecurityGroupRuleCollection(m map[string]json.RawMessage, result i // SecurityGroupRulePatch : SecurityGroupRulePatch struct type SecurityGroupRulePatch struct { - // The ICMP traffic code to allow. + // The ICMP traffic code to allow. Specify `null` to remove an existing ICMP traffic code value. Code *int64 `json:"code,omitempty"` // The direction of traffic to enforce, either `inbound` or `outbound`. @@ -40988,10 +43495,12 @@ type SecurityGroupRulePatch struct { // interfaces) in that group matching this IP version. IPVersion *string `json:"ip_version,omitempty"` - // The inclusive upper bound of TCP/UDP port range. + // The inclusive upper bound of the protocol port range. Specify `null` to clear an existing upper bound. If a lower + // bound has been set, the upper bound must also be set, and must not be smaller. PortMax *int64 `json:"port_max,omitempty"` - // The inclusive lower bound of TCP/UDP port range. + // The inclusive lower bound of the protocol port range. Specify `null` to clear an existing lower bound. If an upper + // bound has been set, the lower bound must also be set, and must not be larger. PortMin *int64 `json:"port_min,omitempty"` // The IP addresses or security groups from which this rule will allow traffic (or to @@ -41000,7 +43509,7 @@ type SecurityGroupRulePatch struct { // any source, for outbound rules). Remote SecurityGroupRuleRemotePatchIntf `json:"remote,omitempty"` - // The ICMP traffic type to allow. + // The ICMP traffic type to allow. Specify `null` to remove an existing ICMP traffic type value. Type *int64 `json:"type,omitempty"` } @@ -41366,7 +43875,7 @@ type SecurityGroupTargetCollection struct { // except the last page. Next *SecurityGroupTargetCollectionNext `json:"next,omitempty"` - // Collection of security group target references. + // Collection of targets for this security group. Targets []SecurityGroupTargetReferenceIntf `json:"targets" validate:"required"` // The total number of resources across all pages. @@ -41544,6 +44053,393 @@ func (options *SetSubnetPublicGatewayOptions) SetHeaders(param map[string]string return options } +// Snapshot : Snapshot struct +type Snapshot struct { + // Indicates if a boot volume attachment can be created with a volume created from this snapshot. + Bootable *bool `json:"bootable" validate:"required"` + + // The date and time that this snapshot was created. + CreatedAt *strfmt.DateTime `json:"created_at" validate:"required"` + + // The CRN for this snapshot. + CRN *string `json:"crn" validate:"required"` + + // Indicates whether this snapshot can be deleted. This value will not be `true` if any other snapshots depend on it. + Deletable *bool `json:"deletable" validate:"required"` + + // The type of encryption used on the source volume. + Encryption *string `json:"encryption" validate:"required"` + + // The root key used to wrap the data encryption key for the source volume. + // + // This property will be present for volumes with an `encryption` type of + // `user_managed`. + EncryptionKey *EncryptionKeyReference `json:"encryption_key,omitempty"` + + // The URL for this snapshot. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this snapshot. + ID *string `json:"id" validate:"required"` + + // The lifecycle state of this snapshot. + LifecycleState *string `json:"lifecycle_state" validate:"required"` + + // The minimum capacity of a volume created from this snapshot. When a snapshot is created, this will be set to the + // capacity of the `source_volume`. + MinimumCapacity *int64 `json:"minimum_capacity" validate:"required"` + + // The user-defined name for this snapshot. + Name *string `json:"name" validate:"required"` + + // The operating system included in this image. + OperatingSystem *OperatingSystem `json:"operating_system,omitempty"` + + // The resource group for this snapshot. + ResourceGroup *ResourceGroupReference `json:"resource_group" validate:"required"` + + // The resource type. + ResourceType *string `json:"resource_type" validate:"required"` + + // The size of this snapshot rounded up to the next gigabyte. + Size *int64 `json:"size" validate:"required"` + + // If present, the image from which the data on this volume was most directly + // provisioned. + SourceImage *ImageReference `json:"source_image,omitempty"` + + // The source volume this snapshot was created from (may be + // [deleted](https://cloud.ibm.com/apidocs/vpc#deleted-resources)). + SourceVolume *VolumeReference `json:"source_volume" validate:"required"` +} + +// Constants associated with the Snapshot.Encryption property. +// The type of encryption used on the source volume. +const ( + SnapshotEncryptionProviderManagedConst = "provider_managed" + SnapshotEncryptionUserManagedConst = "user_managed" +) + +// Constants associated with the Snapshot.LifecycleState property. +// The lifecycle state of this snapshot. +const ( + SnapshotLifecycleStateDeletingConst = "deleting" + SnapshotLifecycleStateFailedConst = "failed" + SnapshotLifecycleStatePendingConst = "pending" + SnapshotLifecycleStateStableConst = "stable" + SnapshotLifecycleStateSuspendedConst = "suspended" + SnapshotLifecycleStateUpdatingConst = "updating" + SnapshotLifecycleStateWaitingConst = "waiting" +) + +// Constants associated with the Snapshot.ResourceType property. +// The resource type. +const ( + SnapshotResourceTypeSnapshotConst = "snapshot" +) + +// UnmarshalSnapshot unmarshals an instance of Snapshot from the specified map of raw messages. +func UnmarshalSnapshot(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(Snapshot) + err = core.UnmarshalPrimitive(m, "bootable", &obj.Bootable) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "deletable", &obj.Deletable) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "encryption", &obj.Encryption) + if err != nil { + return + } + err = core.UnmarshalModel(m, "encryption_key", &obj.EncryptionKey, UnmarshalEncryptionKeyReference) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "lifecycle_state", &obj.LifecycleState) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "minimum_capacity", &obj.MinimumCapacity) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalModel(m, "operating_system", &obj.OperatingSystem, UnmarshalOperatingSystem) + if err != nil { + return + } + err = core.UnmarshalModel(m, "resource_group", &obj.ResourceGroup, UnmarshalResourceGroupReference) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "resource_type", &obj.ResourceType) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "size", &obj.Size) + if err != nil { + return + } + err = core.UnmarshalModel(m, "source_image", &obj.SourceImage, UnmarshalImageReference) + if err != nil { + return + } + err = core.UnmarshalModel(m, "source_volume", &obj.SourceVolume, UnmarshalVolumeReference) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SnapshotCollection : SnapshotCollection struct +type SnapshotCollection struct { + // A link to the first page of resources. + First *SnapshotCollectionFirst `json:"first" validate:"required"` + + // The maximum number of resources that can be returned by the request. + Limit *int64 `json:"limit" validate:"required"` + + // A link to the next page of resources. This property is present for all pages + // except the last page. + Next *SnapshotCollectionNext `json:"next,omitempty"` + + // Collection of snapshots. + Snapshots []Snapshot `json:"snapshots" validate:"required"` + + // The total number of resources across all pages. + TotalCount *int64 `json:"total_count" validate:"required"` +} + +// UnmarshalSnapshotCollection unmarshals an instance of SnapshotCollection from the specified map of raw messages. +func UnmarshalSnapshotCollection(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SnapshotCollection) + err = core.UnmarshalModel(m, "first", &obj.First, UnmarshalSnapshotCollectionFirst) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "limit", &obj.Limit) + if err != nil { + return + } + err = core.UnmarshalModel(m, "next", &obj.Next, UnmarshalSnapshotCollectionNext) + if err != nil { + return + } + err = core.UnmarshalModel(m, "snapshots", &obj.Snapshots, UnmarshalSnapshot) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "total_count", &obj.TotalCount) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SnapshotCollectionFirst : A link to the first page of resources. +type SnapshotCollectionFirst struct { + // The URL for a page of resources. + Href *string `json:"href" validate:"required"` +} + +// UnmarshalSnapshotCollectionFirst unmarshals an instance of SnapshotCollectionFirst from the specified map of raw messages. +func UnmarshalSnapshotCollectionFirst(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SnapshotCollectionFirst) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SnapshotCollectionNext : A link to the next page of resources. This property is present for all pages except the last page. +type SnapshotCollectionNext struct { + // The URL for a page of resources. + Href *string `json:"href" validate:"required"` +} + +// UnmarshalSnapshotCollectionNext unmarshals an instance of SnapshotCollectionNext from the specified map of raw messages. +func UnmarshalSnapshotCollectionNext(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SnapshotCollectionNext) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SnapshotIdentity : Identifies a snapshot by a unique property. +// Models which "extend" this model: +// - SnapshotIdentityByID +// - SnapshotIdentityByCRN +// - SnapshotIdentityByHref +type SnapshotIdentity struct { + // The unique identifier for this snapshot. + ID *string `json:"id,omitempty"` + + // The CRN for this snapshot. + CRN *string `json:"crn,omitempty"` + + // The URL for this snapshot. + Href *string `json:"href,omitempty"` +} + +func (*SnapshotIdentity) isaSnapshotIdentity() bool { + return true +} + +type SnapshotIdentityIntf interface { + isaSnapshotIdentity() bool +} + +// UnmarshalSnapshotIdentity unmarshals an instance of SnapshotIdentity from the specified map of raw messages. +func UnmarshalSnapshotIdentity(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SnapshotIdentity) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SnapshotPatch : SnapshotPatch struct +type SnapshotPatch struct { + // The user-defined name for this snapshot. + Name *string `json:"name,omitempty"` +} + +// UnmarshalSnapshotPatch unmarshals an instance of SnapshotPatch from the specified map of raw messages. +func UnmarshalSnapshotPatch(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SnapshotPatch) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// AsPatch returns a generic map representation of the SnapshotPatch +func (snapshotPatch *SnapshotPatch) AsPatch() (patch map[string]interface{}, err error) { + var jsonData []byte + jsonData, err = json.Marshal(snapshotPatch) + if err == nil { + err = json.Unmarshal(jsonData, &patch) + } + return +} + +// SnapshotReference : SnapshotReference struct +type SnapshotReference struct { + // The CRN for this snapshot. + CRN *string `json:"crn" validate:"required"` + + // If present, this property indicates the referenced resource has been deleted and provides + // some supplementary information. + Deleted *SnapshotReferenceDeleted `json:"deleted,omitempty"` + + // The URL for this snapshot. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this snapshot. + ID *string `json:"id" validate:"required"` + + // The user-defined name for this snapshot. + Name *string `json:"name" validate:"required"` + + // The resource type. + ResourceType *string `json:"resource_type" validate:"required"` +} + +// Constants associated with the SnapshotReference.ResourceType property. +// The resource type. +const ( + SnapshotReferenceResourceTypeSnapshotConst = "snapshot" +) + +// UnmarshalSnapshotReference unmarshals an instance of SnapshotReference from the specified map of raw messages. +func UnmarshalSnapshotReference(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SnapshotReference) + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalModel(m, "deleted", &obj.Deleted, UnmarshalSnapshotReferenceDeleted) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "resource_type", &obj.ResourceType) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SnapshotReferenceDeleted : If present, this property indicates the referenced resource has been deleted and provides some supplementary +// information. +type SnapshotReferenceDeleted struct { + // Link to documentation about deleted resources. + MoreInfo *string `json:"more_info" validate:"required"` +} + +// UnmarshalSnapshotReferenceDeleted unmarshals an instance of SnapshotReferenceDeleted from the specified map of raw messages. +func UnmarshalSnapshotReferenceDeleted(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SnapshotReferenceDeleted) + err = core.UnmarshalPrimitive(m, "more_info", &obj.MoreInfo) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + // Subnet : Subnet struct type Subnet struct { // The number of IPv4 addresses in this subnet that are not in-use, and have not been reserved by the user or the @@ -42417,6 +45313,64 @@ func (options *UpdateInstanceDiskOptions) SetHeaders(param map[string]string) *U return options } +// UpdateInstanceGroupManagerActionOptions : The UpdateInstanceGroupManagerAction options. +type UpdateInstanceGroupManagerActionOptions struct { + // The instance group identifier. + InstanceGroupID *string `validate:"required,ne="` + + // The instance group manager identifier. + InstanceGroupManagerID *string `validate:"required,ne="` + + // The instance group manager action identifier. + ID *string `validate:"required,ne="` + + // The instance group manager action patch. + InstanceGroupManagerActionPatch map[string]interface{} `validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdateInstanceGroupManagerActionOptions : Instantiate UpdateInstanceGroupManagerActionOptions +func (*VpcV1) NewUpdateInstanceGroupManagerActionOptions(instanceGroupID string, instanceGroupManagerID string, id string, instanceGroupManagerActionPatch map[string]interface{}) *UpdateInstanceGroupManagerActionOptions { + return &UpdateInstanceGroupManagerActionOptions{ + InstanceGroupID: core.StringPtr(instanceGroupID), + InstanceGroupManagerID: core.StringPtr(instanceGroupManagerID), + ID: core.StringPtr(id), + InstanceGroupManagerActionPatch: instanceGroupManagerActionPatch, + } +} + +// SetInstanceGroupID : Allow user to set InstanceGroupID +func (options *UpdateInstanceGroupManagerActionOptions) SetInstanceGroupID(instanceGroupID string) *UpdateInstanceGroupManagerActionOptions { + options.InstanceGroupID = core.StringPtr(instanceGroupID) + return options +} + +// SetInstanceGroupManagerID : Allow user to set InstanceGroupManagerID +func (options *UpdateInstanceGroupManagerActionOptions) SetInstanceGroupManagerID(instanceGroupManagerID string) *UpdateInstanceGroupManagerActionOptions { + options.InstanceGroupManagerID = core.StringPtr(instanceGroupManagerID) + return options +} + +// SetID : Allow user to set ID +func (options *UpdateInstanceGroupManagerActionOptions) SetID(id string) *UpdateInstanceGroupManagerActionOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetInstanceGroupManagerActionPatch : Allow user to set InstanceGroupManagerActionPatch +func (options *UpdateInstanceGroupManagerActionOptions) SetInstanceGroupManagerActionPatch(instanceGroupManagerActionPatch map[string]interface{}) *UpdateInstanceGroupManagerActionOptions { + options.InstanceGroupManagerActionPatch = instanceGroupManagerActionPatch + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateInstanceGroupManagerActionOptions) SetHeaders(param map[string]string) *UpdateInstanceGroupManagerActionOptions { + options.Headers = param + return options +} + // UpdateInstanceGroupManagerOptions : The UpdateInstanceGroupManager options. type UpdateInstanceGroupManagerOptions struct { // The instance group identifier. @@ -43385,6 +46339,44 @@ func (options *UpdateSecurityGroupRuleOptions) SetHeaders(param map[string]strin return options } +// UpdateSnapshotOptions : The UpdateSnapshot options. +type UpdateSnapshotOptions struct { + // The snapshot identifier. + ID *string `validate:"required,ne="` + + // The snapshot patch. + SnapshotPatch map[string]interface{} `validate:"required"` + + // Allows users to set headers on API requests + Headers map[string]string +} + +// NewUpdateSnapshotOptions : Instantiate UpdateSnapshotOptions +func (*VpcV1) NewUpdateSnapshotOptions(id string, snapshotPatch map[string]interface{}) *UpdateSnapshotOptions { + return &UpdateSnapshotOptions{ + ID: core.StringPtr(id), + SnapshotPatch: snapshotPatch, + } +} + +// SetID : Allow user to set ID +func (options *UpdateSnapshotOptions) SetID(id string) *UpdateSnapshotOptions { + options.ID = core.StringPtr(id) + return options +} + +// SetSnapshotPatch : Allow user to set SnapshotPatch +func (options *UpdateSnapshotOptions) SetSnapshotPatch(snapshotPatch map[string]interface{}) *UpdateSnapshotOptions { + options.SnapshotPatch = snapshotPatch + return options +} + +// SetHeaders : Allow user to set Headers +func (options *UpdateSnapshotOptions) SetHeaders(param map[string]string) *UpdateSnapshotOptions { + options.Headers = param + return options +} + // UpdateSubnetOptions : The UpdateSubnet options. type UpdateSubnetOptions struct { // The subnet identifier. @@ -43872,8 +46864,8 @@ type VPC struct { // The CRN for this VPC. CRN *string `json:"crn" validate:"required"` - // Array of CSE ([Cloud Service Endpoint](https://cloud.ibm.com/docs/resources?topic=resources-service-endpoints)) - // source IP addresses for the VPC. The VPC will have one CSE source IP address per zone. + // The CSE ([Cloud Service Endpoint](https://cloud.ibm.com/docs/resources?topic=resources-service-endpoints)) source IP + // addresses for the VPC. The VPC will have one CSE source IP address per zone. CseSourceIps []VpccseSourceIP `json:"cse_source_ips,omitempty"` // The default network ACL to use for subnets created in this VPC. @@ -44208,7 +47200,7 @@ func UnmarshalVPCReferenceDeleted(m map[string]json.RawMessage, result interface // - VPNGatewayRouteMode // - VPNGatewayPolicyMode type VPNGateway struct { - // Collection of references to VPN gateway connections. + // Connections for this VPN gateway. Connections []VPNGatewayConnectionReference `json:"connections" validate:"required"` // The date and time that this VPN gateway was created. @@ -44462,10 +47454,10 @@ type VPNGatewayConnection struct { // The VPN tunnel configuration for this VPN gateway connection (in static route mode). Tunnels []VPNGatewayConnectionStaticRouteModeTunnel `json:"tunnels,omitempty"` - // A collection of local CIDRs for this resource. + // The local CIDRs for this resource. LocalCIDRs []string `json:"local_cidrs,omitempty"` - // A collection of peer CIDRs for this resource. + // The peer CIDRs for this resource. PeerCIDRs []string `json:"peer_cidrs,omitempty"` } @@ -44687,7 +47679,7 @@ func UnmarshalVPNGatewayConnectionDpdPrototype(m map[string]json.RawMessage, res // VPNGatewayConnectionLocalCIDRs : VPNGatewayConnectionLocalCIDRs struct type VPNGatewayConnectionLocalCIDRs struct { - // A collection of local CIDRs for this resource. + // The local CIDRs for this resource. LocalCIDRs []string `json:"local_cidrs,omitempty"` } @@ -44797,7 +47789,7 @@ func (vpnGatewayConnectionPatch *VPNGatewayConnectionPatch) AsPatch() (patch map // VPNGatewayConnectionPeerCIDRs : VPNGatewayConnectionPeerCIDRs struct type VPNGatewayConnectionPeerCIDRs struct { - // A collection of peer CIDRs for this resource. + // The peer CIDRs for this resource. PeerCIDRs []string `json:"peer_cidrs,omitempty"` } @@ -44842,10 +47834,10 @@ type VPNGatewayConnectionPrototype struct { // Routing protocols are disabled for this VPN gateway connection. RoutingProtocol *string `json:"routing_protocol,omitempty"` - // A collection of local CIDRs for this resource. + // The local CIDRs for this resource. LocalCIDRs []string `json:"local_cidrs,omitempty"` - // A collection of peer CIDRs for this resource. + // The peer CIDRs for this resource. PeerCIDRs []string `json:"peer_cidrs,omitempty"` } @@ -45152,6 +48144,13 @@ func UnmarshalVPNGatewayPrototype(m map[string]json.RawMessage, result interface // Volume : Volume struct type Volume struct { + // Indicates whether a running virtual server instance has an attachment to this volume. + Active *bool `json:"active" validate:"required"` + + // Indicates whether this volume is performing an operation that must be serialized. If an operation specifies that it + // requires serialization, the operation will fail unless this property is `false`. + Busy *bool `json:"busy" validate:"required"` + // The capacity of the volume in gigabytes. The specified minimum and maximum capacity values for creating or updating // volumes may expand in the future. Capacity *int64 `json:"capacity" validate:"required"` @@ -45165,7 +48164,7 @@ type Volume struct { // The type of encryption used on the volume. Encryption *string `json:"encryption" validate:"required"` - // A reference to the root key used to wrap the data encryption key for the volume. + // The root key used to wrap the data encryption key for the volume. // // This property will be present for volumes with an `encryption` type of // `user_managed`. @@ -45183,12 +48182,24 @@ type Volume struct { // The unique user-defined name for this volume. Name *string `json:"name" validate:"required"` + // The operating system associated with this volume. If absent, this volume was not + // created from an image, or the image did not include an operating system. + OperatingSystem *OperatingSystemReference `json:"operating_system,omitempty"` + // The profile this volume uses. Profile *VolumeProfileReference `json:"profile" validate:"required"` // The resource group for this volume. ResourceGroup *ResourceGroupReference `json:"resource_group" validate:"required"` + // The image from which this volume was created (this may be + // [deleted](https://cloud.ibm.com/apidocs/vpc#deleted-resources)). + // If absent, this volume was not created from an image. + SourceImage *ImageReference `json:"source_image,omitempty"` + + // The snapshot from which this volume was cloned. + SourceSnapshot *SnapshotReference `json:"source_snapshot,omitempty"` + // The status of the volume. // // The enumerated values for this property will expand in the future. When processing this property, check for and log @@ -45196,14 +48207,14 @@ type Volume struct { // property value was encountered. Status *string `json:"status" validate:"required"` - // Array of reasons for the current status (if any). + // The reasons for the current status (if any). // // The enumerated reason code values for this property will expand in the future. When processing this property, check // for and log unknown values. Optionally halt processing and surface the error, or bypass the resource on which the // unexpected reason code was encountered. StatusReasons []VolumeStatusReason `json:"status_reasons" validate:"required"` - // The collection of volume attachments attaching instances to the volume. + // The volume attachments for this volume. VolumeAttachments []VolumeAttachmentReferenceVolumeContext `json:"volume_attachments" validate:"required"` // The zone this volume resides in. @@ -45234,6 +48245,14 @@ const ( // UnmarshalVolume unmarshals an instance of Volume from the specified map of raw messages. func UnmarshalVolume(m map[string]json.RawMessage, result interface{}) (err error) { obj := new(Volume) + err = core.UnmarshalPrimitive(m, "active", &obj.Active) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "busy", &obj.Busy) + if err != nil { + return + } err = core.UnmarshalPrimitive(m, "capacity", &obj.Capacity) if err != nil { return @@ -45270,6 +48289,10 @@ func UnmarshalVolume(m map[string]json.RawMessage, result interface{}) (err erro if err != nil { return } + err = core.UnmarshalModel(m, "operating_system", &obj.OperatingSystem, UnmarshalOperatingSystemReference) + if err != nil { + return + } err = core.UnmarshalModel(m, "profile", &obj.Profile, UnmarshalVolumeProfileReference) if err != nil { return @@ -45278,6 +48301,14 @@ func UnmarshalVolume(m map[string]json.RawMessage, result interface{}) (err erro if err != nil { return } + err = core.UnmarshalModel(m, "source_image", &obj.SourceImage, UnmarshalImageReference) + if err != nil { + return + } + err = core.UnmarshalModel(m, "source_snapshot", &obj.SourceSnapshot, UnmarshalSnapshotReference) + if err != nil { + return + } err = core.UnmarshalPrimitive(m, "status", &obj.Status) if err != nil { return @@ -45497,6 +48528,46 @@ func UnmarshalVolumeAttachmentPrototypeInstanceByImageContext(m map[string]json. return } +// VolumeAttachmentPrototypeInstanceByVolumeContext : VolumeAttachmentPrototypeInstanceByVolumeContext struct +type VolumeAttachmentPrototypeInstanceByVolumeContext struct { + // If set to true, when deleting the instance the volume will also be deleted. + DeleteVolumeOnInstanceDelete *bool `json:"delete_volume_on_instance_delete,omitempty"` + + // The user-defined name for this volume attachment. + Name *string `json:"name,omitempty"` + + // An existing volume to attach to the instance, or a prototype object for a new volume. + Volume VolumeAttachmentVolumePrototypeInstanceByVolumeContextIntf `json:"volume" validate:"required"` +} + +// NewVolumeAttachmentPrototypeInstanceByVolumeContext : Instantiate VolumeAttachmentPrototypeInstanceByVolumeContext (Generic Model Constructor) +func (*VpcV1) NewVolumeAttachmentPrototypeInstanceByVolumeContext(volume VolumeAttachmentVolumePrototypeInstanceByVolumeContextIntf) (model *VolumeAttachmentPrototypeInstanceByVolumeContext, err error) { + model = &VolumeAttachmentPrototypeInstanceByVolumeContext{ + Volume: volume, + } + err = core.ValidateStruct(model, "required parameters") + return +} + +// UnmarshalVolumeAttachmentPrototypeInstanceByVolumeContext unmarshals an instance of VolumeAttachmentPrototypeInstanceByVolumeContext from the specified map of raw messages. +func UnmarshalVolumeAttachmentPrototypeInstanceByVolumeContext(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VolumeAttachmentPrototypeInstanceByVolumeContext) + err = core.UnmarshalPrimitive(m, "delete_volume_on_instance_delete", &obj.DeleteVolumeOnInstanceDelete) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalModel(m, "volume", &obj.Volume, UnmarshalVolumeAttachmentVolumePrototypeInstanceByVolumeContext) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + // VolumeAttachmentPrototypeInstanceContext : VolumeAttachmentPrototypeInstanceContext struct type VolumeAttachmentPrototypeInstanceContext struct { // If set to true, when deleting the instance the volume will also be deleted. @@ -45505,8 +48576,7 @@ type VolumeAttachmentPrototypeInstanceContext struct { // The user-defined name for this volume attachment. Name *string `json:"name,omitempty"` - // The identity of the volume to attach to the instance, or a prototype object for a new - // volume. + // An existing volume to attach to the instance, or a prototype object for a new volume. Volume VolumeAttachmentVolumePrototypeInstanceContextIntf `json:"volume" validate:"required"` } @@ -45538,6 +48608,94 @@ func UnmarshalVolumeAttachmentPrototypeInstanceContext(m map[string]json.RawMess return } +// VolumeAttachmentPrototypeVolume : An existing volume to attach to the instance, or a prototype object for a new volume. +// Models which "extend" this model: +// - VolumeAttachmentPrototypeVolumeVolumeIdentity +// - VolumeAttachmentPrototypeVolumeVolumePrototypeInstanceContext +type VolumeAttachmentPrototypeVolume struct { + // The unique identifier for this volume. + ID *string `json:"id,omitempty"` + + // The CRN for this volume. + CRN *string `json:"crn,omitempty"` + + // The URL for this volume. + Href *string `json:"href,omitempty"` + + // The bandwidth for the volume. + Iops *int64 `json:"iops,omitempty"` + + // The unique user-defined name for this volume. + Name *string `json:"name,omitempty"` + + // The profile to use for this volume. + Profile VolumeProfileIdentityIntf `json:"profile,omitempty"` + + // The capacity of the volume in gigabytes. The specified minimum and maximum capacity values for creating or updating + // volumes may expand in the future. + Capacity *int64 `json:"capacity,omitempty"` + + // The root key to use to wrap the data encryption key for the volume. + // + // If this property is not provided, the `encryption` type for the volume will be + // `provider_managed`. + EncryptionKey EncryptionKeyIdentityIntf `json:"encryption_key,omitempty"` + + // The snapshot from which to clone the volume. + SourceSnapshot SnapshotIdentityIntf `json:"source_snapshot,omitempty"` +} + +func (*VolumeAttachmentPrototypeVolume) isaVolumeAttachmentPrototypeVolume() bool { + return true +} + +type VolumeAttachmentPrototypeVolumeIntf interface { + isaVolumeAttachmentPrototypeVolume() bool +} + +// UnmarshalVolumeAttachmentPrototypeVolume unmarshals an instance of VolumeAttachmentPrototypeVolume from the specified map of raw messages. +func UnmarshalVolumeAttachmentPrototypeVolume(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VolumeAttachmentPrototypeVolume) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "iops", &obj.Iops) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalModel(m, "profile", &obj.Profile, UnmarshalVolumeProfileIdentity) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "capacity", &obj.Capacity) + if err != nil { + return + } + err = core.UnmarshalModel(m, "encryption_key", &obj.EncryptionKey, UnmarshalEncryptionKeyIdentity) + if err != nil { + return + } + err = core.UnmarshalModel(m, "source_snapshot", &obj.SourceSnapshot, UnmarshalSnapshotIdentity) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + // VolumeAttachmentReferenceInstanceContext : VolumeAttachmentReferenceInstanceContext struct type VolumeAttachmentReferenceInstanceContext struct { // If present, this property indicates the referenced resource has been deleted and provides @@ -45705,7 +48863,72 @@ func UnmarshalVolumeAttachmentReferenceVolumeContextDeleted(m map[string]json.Ra return } -// VolumeAttachmentVolumePrototypeInstanceContext : The identity of the volume to attach to the instance, or a prototype object for a new volume. +// VolumeAttachmentVolumePrototypeInstanceByVolumeContext : An existing volume to attach to the instance, or a prototype object for a new volume. +// Models which "extend" this model: +// - VolumeAttachmentVolumePrototypeInstanceByVolumeContextVolumePrototypeInstanceByVolumeContext +type VolumeAttachmentVolumePrototypeInstanceByVolumeContext struct { + // The capacity of the volume in gigabytes. The specified minimum and maximum capacity values for creating or updating + // volumes may expand in the future. + Capacity *int64 `json:"capacity,omitempty"` + + // The root key to use to wrap the data encryption key for the volume. + // + // If this property is not provided, the snapshot's `encryption_key` will be used. + EncryptionKey EncryptionKeyIdentityIntf `json:"encryption_key,omitempty"` + + // The bandwidth for the volume. + Iops *int64 `json:"iops,omitempty"` + + // The unique user-defined name for this volume. + Name *string `json:"name,omitempty"` + + // The profile to use for this volume. + Profile VolumeProfileIdentityIntf `json:"profile,omitempty"` + + // The snapshot from which to clone the volume. + SourceSnapshot SnapshotIdentityIntf `json:"source_snapshot,omitempty"` +} + +func (*VolumeAttachmentVolumePrototypeInstanceByVolumeContext) isaVolumeAttachmentVolumePrototypeInstanceByVolumeContext() bool { + return true +} + +type VolumeAttachmentVolumePrototypeInstanceByVolumeContextIntf interface { + isaVolumeAttachmentVolumePrototypeInstanceByVolumeContext() bool +} + +// UnmarshalVolumeAttachmentVolumePrototypeInstanceByVolumeContext unmarshals an instance of VolumeAttachmentVolumePrototypeInstanceByVolumeContext from the specified map of raw messages. +func UnmarshalVolumeAttachmentVolumePrototypeInstanceByVolumeContext(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VolumeAttachmentVolumePrototypeInstanceByVolumeContext) + err = core.UnmarshalPrimitive(m, "capacity", &obj.Capacity) + if err != nil { + return + } + err = core.UnmarshalModel(m, "encryption_key", &obj.EncryptionKey, UnmarshalEncryptionKeyIdentity) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "iops", &obj.Iops) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalModel(m, "profile", &obj.Profile, UnmarshalVolumeProfileIdentity) + if err != nil { + return + } + err = core.UnmarshalModel(m, "source_snapshot", &obj.SourceSnapshot, UnmarshalSnapshotIdentity) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VolumeAttachmentVolumePrototypeInstanceContext : An existing volume to attach to the instance, or a prototype object for a new volume. // Models which "extend" this model: // - VolumeAttachmentVolumePrototypeInstanceContextVolumeIdentity // - VolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContext @@ -45719,12 +48942,6 @@ type VolumeAttachmentVolumePrototypeInstanceContext struct { // The URL for this volume. Href *string `json:"href,omitempty"` - // The identity of the root key to use to wrap the data encryption key for the volume. - // - // If this property is not provided, the `encryption` type for the volume will be - // `provider_managed`. - EncryptionKey EncryptionKeyIdentityIntf `json:"encryption_key,omitempty"` - // The bandwidth for the volume. Iops *int64 `json:"iops,omitempty"` @@ -45737,6 +48954,15 @@ type VolumeAttachmentVolumePrototypeInstanceContext struct { // The capacity of the volume in gigabytes. The specified minimum and maximum capacity values for creating or updating // volumes may expand in the future. Capacity *int64 `json:"capacity,omitempty"` + + // The root key to use to wrap the data encryption key for the volume. + // + // If this property is not provided, the `encryption` type for the volume will be + // `provider_managed`. + EncryptionKey EncryptionKeyIdentityIntf `json:"encryption_key,omitempty"` + + // The snapshot from which to clone the volume. + SourceSnapshot SnapshotIdentityIntf `json:"source_snapshot,omitempty"` } func (*VolumeAttachmentVolumePrototypeInstanceContext) isaVolumeAttachmentVolumePrototypeInstanceContext() bool { @@ -45762,10 +48988,6 @@ func UnmarshalVolumeAttachmentVolumePrototypeInstanceContext(m map[string]json.R if err != nil { return } - err = core.UnmarshalModel(m, "encryption_key", &obj.EncryptionKey, UnmarshalEncryptionKeyIdentity) - if err != nil { - return - } err = core.UnmarshalPrimitive(m, "iops", &obj.Iops) if err != nil { return @@ -45782,6 +49004,14 @@ func UnmarshalVolumeAttachmentVolumePrototypeInstanceContext(m map[string]json.R if err != nil { return } + err = core.UnmarshalModel(m, "encryption_key", &obj.EncryptionKey, UnmarshalEncryptionKeyIdentity) + if err != nil { + return + } + err = core.UnmarshalModel(m, "source_snapshot", &obj.SourceSnapshot, UnmarshalSnapshotIdentity) + if err != nil { + return + } reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) return } @@ -46103,12 +49333,6 @@ func UnmarshalVolumeProfileReference(m map[string]json.RawMessage, result interf // Models which "extend" this model: // - VolumePrototypeVolumeByCapacity type VolumePrototype struct { - // The identity of the root key to use to wrap the data encryption key for the volume. - // - // If this property is not provided, the `encryption` type for the volume will be - // `provider_managed`. - EncryptionKey EncryptionKeyIdentityIntf `json:"encryption_key,omitempty"` - // The bandwidth for the volume. Iops *int64 `json:"iops,omitempty"` @@ -46128,6 +49352,12 @@ type VolumePrototype struct { // The capacity of the volume in gigabytes. The specified minimum and maximum capacity values for creating or updating // volumes may expand in the future. Capacity *int64 `json:"capacity,omitempty"` + + // The root key to use to wrap the data encryption key for the volume. + // + // If this property is not provided, the `encryption` type for the volume will be + // `provider_managed`. + EncryptionKey EncryptionKeyIdentityIntf `json:"encryption_key,omitempty"` } func (*VolumePrototype) isaVolumePrototype() bool { @@ -46141,10 +49371,6 @@ type VolumePrototypeIntf interface { // UnmarshalVolumePrototype unmarshals an instance of VolumePrototype from the specified map of raw messages. func UnmarshalVolumePrototype(m map[string]json.RawMessage, result interface{}) (err error) { obj := new(VolumePrototype) - err = core.UnmarshalModel(m, "encryption_key", &obj.EncryptionKey, UnmarshalEncryptionKeyIdentity) - if err != nil { - return - } err = core.UnmarshalPrimitive(m, "iops", &obj.Iops) if err != nil { return @@ -46169,6 +49395,10 @@ func UnmarshalVolumePrototype(m map[string]json.RawMessage, result interface{}) if err != nil { return } + err = core.UnmarshalModel(m, "encryption_key", &obj.EncryptionKey, UnmarshalEncryptionKeyIdentity) + if err != nil { + return + } reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) return } @@ -46179,7 +49409,7 @@ type VolumePrototypeInstanceByImageContext struct { // volumes may expand in the future. Capacity *int64 `json:"capacity,omitempty"` - // The identity of the root key to use to wrap the data encryption key for the volume. + // The root key to use to wrap the data encryption key for the volume. // // If this property is not provided but the image is encrypted, the image's // `encryption_key` will be used. Otherwise, the `encryption` type for the @@ -48529,7 +51759,7 @@ func UnmarshalImageIdentityByID(m map[string]json.RawMessage, result interface{} // ImagePrototypeImageByFile : ImagePrototypeImageByFile struct // This model "extends" ImagePrototype type ImagePrototypeImageByFile struct { - // The unique user-defined name for this image. Names starting with "ibm-" are not allowed. If unspecified, the name + // The unique user-defined name for this image. Names starting with `ibm-` are not allowed. If unspecified, the name // will be a hyphenated list of randomly-selected words. Name *string `json:"name,omitempty"` @@ -48544,10 +51774,10 @@ type ImagePrototypeImageByFile struct { // If this property is not provided, the imported image is treated as unencrypted. EncryptedDataKey *string `json:"encrypted_data_key,omitempty"` - // The identity of the root key that was used to wrap the data key (which is ultimately - // represented as `encrypted_data_key`). Additionally, the root key will be used to encrypt - // volumes created from this image (unless an alternate `encryption_key` is provided at - // volume creation). + // The root key that was used to wrap the data key (which is ultimately represented as + // `encrypted_data_key`). Additionally, the root key will be used to encrypt volumes + // created from this image (unless an alternate `encryption_key` is provided at volume + // creation). // // If this property is not provided, the imported image is treated as unencrypted. EncryptionKey EncryptionKeyIdentityIntf `json:"encryption_key,omitempty"` @@ -48555,9 +51785,9 @@ type ImagePrototypeImageByFile struct { // The file from which to create the image. File *ImageFilePrototype `json:"file" validate:"required"` - // The identity of the [supported operating - // system](https://cloud.ibm.com/apidocs/vpc#list-operating-systems) included in - // this image. + // The [supported operating + // system](https://cloud.ibm.com/apidocs/vpc#list-operating-systems) included in this + // image. OperatingSystem OperatingSystemIdentityIntf `json:"operating_system" validate:"required"` } @@ -48606,9 +51836,300 @@ func UnmarshalImagePrototypeImageByFile(m map[string]json.RawMessage, result int return } +// ImagePrototypeImageBySourceVolume : ImagePrototypeImageBySourceVolume struct +// This model "extends" ImagePrototype +type ImagePrototypeImageBySourceVolume struct { + // The unique user-defined name for this image. Names starting with `ibm-` are not allowed. If unspecified, the name + // will be a hyphenated list of randomly-selected words. + Name *string `json:"name,omitempty"` + + ResourceGroup ResourceGroupIdentityIntf `json:"resource_group,omitempty"` + + // The root key used to wrap the system-generated data encryption key for the image. + // + // If this property is not provided, the root key from `source_volume` will be used. + EncryptionKey EncryptionKeyIdentityIntf `json:"encryption_key,omitempty"` + + // The volume from which to create the image. The specified volume must: + // - Originate from an image, which will be used to populate this image's + // operating system information. + // - Not be `active` or `busy`. + // + // During image creation, the specified volume may briefly become `busy`. + SourceVolume VolumeIdentityIntf `json:"source_volume" validate:"required"` +} + +// NewImagePrototypeImageBySourceVolume : Instantiate ImagePrototypeImageBySourceVolume (Generic Model Constructor) +func (*VpcV1) NewImagePrototypeImageBySourceVolume(sourceVolume VolumeIdentityIntf) (model *ImagePrototypeImageBySourceVolume, err error) { + model = &ImagePrototypeImageBySourceVolume{ + SourceVolume: sourceVolume, + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*ImagePrototypeImageBySourceVolume) isaImagePrototype() bool { + return true +} + +// UnmarshalImagePrototypeImageBySourceVolume unmarshals an instance of ImagePrototypeImageBySourceVolume from the specified map of raw messages. +func UnmarshalImagePrototypeImageBySourceVolume(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(ImagePrototypeImageBySourceVolume) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalModel(m, "resource_group", &obj.ResourceGroup, UnmarshalResourceGroupIdentity) + if err != nil { + return + } + err = core.UnmarshalModel(m, "encryption_key", &obj.EncryptionKey, UnmarshalEncryptionKeyIdentity) + if err != nil { + return + } + err = core.UnmarshalModel(m, "source_volume", &obj.SourceVolume, UnmarshalVolumeIdentity) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstanceGroupManagerActionPrototypeScheduledActionPrototype : InstanceGroupManagerActionPrototypeScheduledActionPrototype struct +// Models which "extend" this model: +// - InstanceGroupManagerActionPrototypeScheduledActionPrototypeByRunAt +// - InstanceGroupManagerActionPrototypeScheduledActionPrototypeByCronSpec +// This model "extends" InstanceGroupManagerActionPrototype +type InstanceGroupManagerActionPrototypeScheduledActionPrototype struct { + // The user-defined name for this instance group manager action. Names must be unique within the instance group + // manager. + Name *string `json:"name,omitempty"` + + // The date and time the scheduled action will run. + RunAt *strfmt.DateTime `json:"run_at,omitempty"` + + Group *InstanceGroupManagerScheduledActionGroupPrototype `json:"group,omitempty"` + + Manager InstanceGroupManagerScheduledActionManagerPrototypeIntf `json:"manager,omitempty"` + + // The cron specification for a recurring scheduled action. Actions can be applied a maximum of one time within a 5 min + // period. + CronSpec *string `json:"cron_spec,omitempty"` +} + +func (*InstanceGroupManagerActionPrototypeScheduledActionPrototype) isaInstanceGroupManagerActionPrototypeScheduledActionPrototype() bool { + return true +} + +type InstanceGroupManagerActionPrototypeScheduledActionPrototypeIntf interface { + InstanceGroupManagerActionPrototypeIntf + isaInstanceGroupManagerActionPrototypeScheduledActionPrototype() bool +} + +func (*InstanceGroupManagerActionPrototypeScheduledActionPrototype) isaInstanceGroupManagerActionPrototype() bool { + return true +} + +// UnmarshalInstanceGroupManagerActionPrototypeScheduledActionPrototype unmarshals an instance of InstanceGroupManagerActionPrototypeScheduledActionPrototype from the specified map of raw messages. +func UnmarshalInstanceGroupManagerActionPrototypeScheduledActionPrototype(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstanceGroupManagerActionPrototypeScheduledActionPrototype) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "run_at", &obj.RunAt) + if err != nil { + return + } + err = core.UnmarshalModel(m, "group", &obj.Group, UnmarshalInstanceGroupManagerScheduledActionGroupPrototype) + if err != nil { + return + } + err = core.UnmarshalModel(m, "manager", &obj.Manager, UnmarshalInstanceGroupManagerScheduledActionManagerPrototype) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "cron_spec", &obj.CronSpec) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstanceGroupManagerActionScheduledAction : InstanceGroupManagerActionScheduledAction struct +// Models which "extend" this model: +// - InstanceGroupManagerActionScheduledActionGroupTarget +// - InstanceGroupManagerActionScheduledActionManagerTarget +// This model "extends" InstanceGroupManagerAction +type InstanceGroupManagerActionScheduledAction struct { + // If set to `true`, this scheduled action will be automatically deleted after it has finished and the + // `auto_delete_timeout` time has passed. + AutoDelete *bool `json:"auto_delete" validate:"required"` + + // Amount of time in hours that are required to pass before the scheduled action will be automatically deleted once it + // has finished. If this value is 0, the action will be deleted on completion. + AutoDeleteTimeout *int64 `json:"auto_delete_timeout" validate:"required"` + + // The date and time that the instance group manager action was created. + CreatedAt *strfmt.DateTime `json:"created_at" validate:"required"` + + // The URL for this instance group manager action. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this instance group manager action. + ID *string `json:"id" validate:"required"` + + // The user-defined name for this instance group manager action. Names must be unique within the instance group + // manager. + Name *string `json:"name" validate:"required"` + + // The resource type. + ResourceType *string `json:"resource_type" validate:"required"` + + // The status of the instance group action + // - `active`: Action is ready to be run + // - `completed`: Action was completed successfully + // - `failed`: Action could not be completed successfully + // - `incompatible`: Action parameters are not compatible with the group or manager + // - `omitted`: Action was not applied because this action's manager was disabled. + Status *string `json:"status" validate:"required"` + + // The date and time that the instance group manager action was modified. + UpdatedAt *strfmt.DateTime `json:"updated_at" validate:"required"` + + // The type of action for the instance group. + ActionType *string `json:"action_type" validate:"required"` + + // The cron specification for a recurring scheduled action. Actions can be applied a maximum of one time within a 5 min + // period. + CronSpec *string `json:"cron_spec,omitempty"` + + // The date and time the scheduled action was last applied. If empty the action has never been applied. + LastAppliedAt *strfmt.DateTime `json:"last_applied_at,omitempty"` + + // The date and time the scheduled action will next run. If empty the system is currently calculating the next run + // time. + NextRunAt *strfmt.DateTime `json:"next_run_at,omitempty"` + + Group *InstanceGroupManagerScheduledActionGroup `json:"group,omitempty"` + + Manager InstanceGroupManagerScheduledActionManagerIntf `json:"manager,omitempty"` +} + +// Constants associated with the InstanceGroupManagerActionScheduledAction.ResourceType property. +// The resource type. +const ( + InstanceGroupManagerActionScheduledActionResourceTypeInstanceGroupManagerActionConst = "instance_group_manager_action" +) + +// Constants associated with the InstanceGroupManagerActionScheduledAction.Status property. +// The status of the instance group action +// - `active`: Action is ready to be run +// - `completed`: Action was completed successfully +// - `failed`: Action could not be completed successfully +// - `incompatible`: Action parameters are not compatible with the group or manager +// - `omitted`: Action was not applied because this action's manager was disabled. +const ( + InstanceGroupManagerActionScheduledActionStatusActiveConst = "active" + InstanceGroupManagerActionScheduledActionStatusCompletedConst = "completed" + InstanceGroupManagerActionScheduledActionStatusFailedConst = "failed" + InstanceGroupManagerActionScheduledActionStatusIncompatibleConst = "incompatible" + InstanceGroupManagerActionScheduledActionStatusOmittedConst = "omitted" +) + +// Constants associated with the InstanceGroupManagerActionScheduledAction.ActionType property. +// The type of action for the instance group. +const ( + InstanceGroupManagerActionScheduledActionActionTypeScheduledConst = "scheduled" +) + +func (*InstanceGroupManagerActionScheduledAction) isaInstanceGroupManagerActionScheduledAction() bool { + return true +} + +type InstanceGroupManagerActionScheduledActionIntf interface { + InstanceGroupManagerActionIntf + isaInstanceGroupManagerActionScheduledAction() bool +} + +func (*InstanceGroupManagerActionScheduledAction) isaInstanceGroupManagerAction() bool { + return true +} + +// UnmarshalInstanceGroupManagerActionScheduledAction unmarshals an instance of InstanceGroupManagerActionScheduledAction from the specified map of raw messages. +func UnmarshalInstanceGroupManagerActionScheduledAction(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstanceGroupManagerActionScheduledAction) + err = core.UnmarshalPrimitive(m, "auto_delete", &obj.AutoDelete) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "auto_delete_timeout", &obj.AutoDeleteTimeout) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "resource_type", &obj.ResourceType) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "status", &obj.Status) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "updated_at", &obj.UpdatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "action_type", &obj.ActionType) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "cron_spec", &obj.CronSpec) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "last_applied_at", &obj.LastAppliedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "next_run_at", &obj.NextRunAt) + if err != nil { + return + } + err = core.UnmarshalModel(m, "group", &obj.Group, UnmarshalInstanceGroupManagerScheduledActionGroup) + if err != nil { + return + } + err = core.UnmarshalModel(m, "manager", &obj.Manager, UnmarshalInstanceGroupManagerScheduledActionManager) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + // InstanceGroupManagerAutoScale : InstanceGroupManagerAutoScale struct // This model "extends" InstanceGroupManager type InstanceGroupManagerAutoScale struct { + // The date and time that the instance group manager was created. + CreatedAt *strfmt.DateTime `json:"created_at" validate:"required"` + // The URL for this instance group manager. Href *string `json:"href" validate:"required"` @@ -48621,6 +52142,9 @@ type InstanceGroupManagerAutoScale struct { // The user-defined name for this instance group manager. Names must be unique within the instance group. Name *string `json:"name" validate:"required"` + // The date and time that the instance group manager was updated. + UpdatedAt *strfmt.DateTime `json:"updated_at" validate:"required"` + // The time window in seconds to aggregate metrics prior to evaluation. AggregationWindow *int64 `json:"aggregation_window" validate:"required"` @@ -48653,6 +52177,10 @@ func (*InstanceGroupManagerAutoScale) isaInstanceGroupManager() bool { // UnmarshalInstanceGroupManagerAutoScale unmarshals an instance of InstanceGroupManagerAutoScale from the specified map of raw messages. func UnmarshalInstanceGroupManagerAutoScale(m map[string]json.RawMessage, result interface{}) (err error) { obj := new(InstanceGroupManagerAutoScale) + err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) + if err != nil { + return + } err = core.UnmarshalPrimitive(m, "href", &obj.Href) if err != nil { return @@ -48669,6 +52197,10 @@ func UnmarshalInstanceGroupManagerAutoScale(m map[string]json.RawMessage, result if err != nil { return } + err = core.UnmarshalPrimitive(m, "updated_at", &obj.UpdatedAt) + if err != nil { + return + } err = core.UnmarshalPrimitive(m, "aggregation_window", &obj.AggregationWindow) if err != nil { return @@ -48770,6 +52302,9 @@ func UnmarshalInstanceGroupManagerPolicyPrototypeInstanceGroupManagerTargetPolic // InstanceGroupManagerPolicyInstanceGroupManagerTargetPolicy : InstanceGroupManagerPolicyInstanceGroupManagerTargetPolicy struct // This model "extends" InstanceGroupManagerPolicy type InstanceGroupManagerPolicyInstanceGroupManagerTargetPolicy struct { + // The date and time that the instance group manager policy was created. + CreatedAt *strfmt.DateTime `json:"created_at" validate:"required"` + // The URL for this instance group manager policy. Href *string `json:"href" validate:"required"` @@ -48780,6 +52315,9 @@ type InstanceGroupManagerPolicyInstanceGroupManagerTargetPolicy struct { // manager. Name *string `json:"name" validate:"required"` + // The date and time that the instance group manager policy was updated. + UpdatedAt *strfmt.DateTime `json:"updated_at" validate:"required"` + // The type of metric to be evaluated. MetricType *string `json:"metric_type" validate:"required"` @@ -48812,6 +52350,10 @@ func (*InstanceGroupManagerPolicyInstanceGroupManagerTargetPolicy) isaInstanceGr // UnmarshalInstanceGroupManagerPolicyInstanceGroupManagerTargetPolicy unmarshals an instance of InstanceGroupManagerPolicyInstanceGroupManagerTargetPolicy from the specified map of raw messages. func UnmarshalInstanceGroupManagerPolicyInstanceGroupManagerTargetPolicy(m map[string]json.RawMessage, result interface{}) (err error) { obj := new(InstanceGroupManagerPolicyInstanceGroupManagerTargetPolicy) + err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) + if err != nil { + return + } err = core.UnmarshalPrimitive(m, "href", &obj.Href) if err != nil { return @@ -48824,6 +52366,10 @@ func UnmarshalInstanceGroupManagerPolicyInstanceGroupManagerTargetPolicy(m map[s if err != nil { return } + err = core.UnmarshalPrimitive(m, "updated_at", &obj.UpdatedAt) + if err != nil { + return + } err = core.UnmarshalPrimitive(m, "metric_type", &obj.MetricType) if err != nil { return @@ -48920,6 +52466,249 @@ func UnmarshalInstanceGroupManagerPrototypeInstanceGroupManagerAutoScalePrototyp return } +// InstanceGroupManagerPrototypeInstanceGroupManagerScheduledPrototype : InstanceGroupManagerPrototypeInstanceGroupManagerScheduledPrototype struct +// This model "extends" InstanceGroupManagerPrototype +type InstanceGroupManagerPrototypeInstanceGroupManagerScheduledPrototype struct { + // If set to `true`, this manager will control the instance group. + ManagementEnabled *bool `json:"management_enabled,omitempty"` + + // The user-defined name for this instance group manager. Names must be unique within the instance group. + Name *string `json:"name,omitempty"` + + // The type of instance group manager. + ManagerType *string `json:"manager_type" validate:"required"` +} + +// Constants associated with the InstanceGroupManagerPrototypeInstanceGroupManagerScheduledPrototype.ManagerType property. +// The type of instance group manager. +const ( + InstanceGroupManagerPrototypeInstanceGroupManagerScheduledPrototypeManagerTypeScheduledConst = "scheduled" +) + +// NewInstanceGroupManagerPrototypeInstanceGroupManagerScheduledPrototype : Instantiate InstanceGroupManagerPrototypeInstanceGroupManagerScheduledPrototype (Generic Model Constructor) +func (*VpcV1) NewInstanceGroupManagerPrototypeInstanceGroupManagerScheduledPrototype(managerType string) (model *InstanceGroupManagerPrototypeInstanceGroupManagerScheduledPrototype, err error) { + model = &InstanceGroupManagerPrototypeInstanceGroupManagerScheduledPrototype{ + ManagerType: core.StringPtr(managerType), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*InstanceGroupManagerPrototypeInstanceGroupManagerScheduledPrototype) isaInstanceGroupManagerPrototype() bool { + return true +} + +// UnmarshalInstanceGroupManagerPrototypeInstanceGroupManagerScheduledPrototype unmarshals an instance of InstanceGroupManagerPrototypeInstanceGroupManagerScheduledPrototype from the specified map of raw messages. +func UnmarshalInstanceGroupManagerPrototypeInstanceGroupManagerScheduledPrototype(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstanceGroupManagerPrototypeInstanceGroupManagerScheduledPrototype) + err = core.UnmarshalPrimitive(m, "management_enabled", &obj.ManagementEnabled) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "manager_type", &obj.ManagerType) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstanceGroupManagerScheduled : InstanceGroupManagerScheduled struct +// This model "extends" InstanceGroupManager +type InstanceGroupManagerScheduled struct { + // The date and time that the instance group manager was created. + CreatedAt *strfmt.DateTime `json:"created_at" validate:"required"` + + // The URL for this instance group manager. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this instance group manager. + ID *string `json:"id" validate:"required"` + + // If set to `true`, this manager will control the instance group. + ManagementEnabled *bool `json:"management_enabled" validate:"required"` + + // The user-defined name for this instance group manager. Names must be unique within the instance group. + Name *string `json:"name" validate:"required"` + + // The date and time that the instance group manager was updated. + UpdatedAt *strfmt.DateTime `json:"updated_at" validate:"required"` + + // The actions of the instance group manager. + Actions []InstanceGroupManagerActionReference `json:"actions" validate:"required"` + + // The type of instance group manager. + ManagerType *string `json:"manager_type" validate:"required"` +} + +// Constants associated with the InstanceGroupManagerScheduled.ManagerType property. +// The type of instance group manager. +const ( + InstanceGroupManagerScheduledManagerTypeScheduledConst = "scheduled" +) + +func (*InstanceGroupManagerScheduled) isaInstanceGroupManager() bool { + return true +} + +// UnmarshalInstanceGroupManagerScheduled unmarshals an instance of InstanceGroupManagerScheduled from the specified map of raw messages. +func UnmarshalInstanceGroupManagerScheduled(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstanceGroupManagerScheduled) + err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "management_enabled", &obj.ManagementEnabled) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "updated_at", &obj.UpdatedAt) + if err != nil { + return + } + err = core.UnmarshalModel(m, "actions", &obj.Actions, UnmarshalInstanceGroupManagerActionReference) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "manager_type", &obj.ManagerType) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstanceGroupManagerScheduledActionManagerAutoScale : InstanceGroupManagerScheduledActionManagerAutoScale struct +// This model "extends" InstanceGroupManagerScheduledActionManager +type InstanceGroupManagerScheduledActionManagerAutoScale struct { + // If present, this property indicates the referenced resource has been deleted and provides + // some supplementary information. + Deleted *InstanceGroupManagerReferenceDeleted `json:"deleted,omitempty"` + + // The URL for this instance group manager. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this instance group manager. + ID *string `json:"id" validate:"required"` + + // The user-defined name for this instance group manager. Names must be unique within the instance group. + Name *string `json:"name" validate:"required"` + + // The maximum number of members the instance group should have at the scheduled time. + MaxMembershipCount *int64 `json:"max_membership_count,omitempty"` + + // The minimum number of members the instance group should have at the scheduled time. + MinMembershipCount *int64 `json:"min_membership_count,omitempty"` +} + +func (*InstanceGroupManagerScheduledActionManagerAutoScale) isaInstanceGroupManagerScheduledActionManager() bool { + return true +} + +// UnmarshalInstanceGroupManagerScheduledActionManagerAutoScale unmarshals an instance of InstanceGroupManagerScheduledActionManagerAutoScale from the specified map of raw messages. +func UnmarshalInstanceGroupManagerScheduledActionManagerAutoScale(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstanceGroupManagerScheduledActionManagerAutoScale) + err = core.UnmarshalModel(m, "deleted", &obj.Deleted, UnmarshalInstanceGroupManagerReferenceDeleted) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "max_membership_count", &obj.MaxMembershipCount) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "min_membership_count", &obj.MinMembershipCount) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstanceGroupManagerScheduledActionManagerPrototypeAutoScalePrototype : The auto scale manager to update and the property or properties to be updated. Exactly one of `id` or `href` must be +// provided in addition to at least one of `min_membership_count` and +// `max_membership_count`. +// Models which "extend" this model: +// - InstanceGroupManagerScheduledActionManagerPrototypeAutoScalePrototypeByID +// - InstanceGroupManagerScheduledActionManagerPrototypeAutoScalePrototypeByHref +// This model "extends" InstanceGroupManagerScheduledActionManagerPrototype +type InstanceGroupManagerScheduledActionManagerPrototypeAutoScalePrototype struct { + // The maximum number of members the instance group should have at the scheduled time. + MaxMembershipCount *int64 `json:"max_membership_count,omitempty"` + + // The minimum number of members the instance group should have at the scheduled time. + MinMembershipCount *int64 `json:"min_membership_count,omitempty"` + + // The unique identifier for this instance group manager. + ID *string `json:"id,omitempty"` + + // The URL for this instance group manager. + Href *string `json:"href,omitempty"` +} + +func (*InstanceGroupManagerScheduledActionManagerPrototypeAutoScalePrototype) isaInstanceGroupManagerScheduledActionManagerPrototypeAutoScalePrototype() bool { + return true +} + +type InstanceGroupManagerScheduledActionManagerPrototypeAutoScalePrototypeIntf interface { + InstanceGroupManagerScheduledActionManagerPrototypeIntf + isaInstanceGroupManagerScheduledActionManagerPrototypeAutoScalePrototype() bool +} + +func (*InstanceGroupManagerScheduledActionManagerPrototypeAutoScalePrototype) isaInstanceGroupManagerScheduledActionManagerPrototype() bool { + return true +} + +// UnmarshalInstanceGroupManagerScheduledActionManagerPrototypeAutoScalePrototype unmarshals an instance of InstanceGroupManagerScheduledActionManagerPrototypeAutoScalePrototype from the specified map of raw messages. +func UnmarshalInstanceGroupManagerScheduledActionManagerPrototypeAutoScalePrototype(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstanceGroupManagerScheduledActionManagerPrototypeAutoScalePrototype) + err = core.UnmarshalPrimitive(m, "max_membership_count", &obj.MaxMembershipCount) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "min_membership_count", &obj.MinMembershipCount) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + // InstancePatchProfileInstanceProfileIdentityByHref : InstancePatchProfileInstanceProfileIdentityByHref struct // This model "extends" InstancePatchProfile type InstancePatchProfileInstanceProfileIdentityByHref struct { @@ -48982,8 +52771,106 @@ func UnmarshalInstancePatchProfileInstanceProfileIdentityByName(m map[string]jso return } -// InstanceProfileBandwidthDependent : The total bandwidth shared across the network interfaces of an instance with this profile depends on its -// configuration. +// InstancePlacementTargetPrototypeDedicatedHostGroupIdentity : Identifies a dedicated host group by a unique property. +// Models which "extend" this model: +// - InstancePlacementTargetPrototypeDedicatedHostGroupIdentityDedicatedHostGroupIdentityByID +// - InstancePlacementTargetPrototypeDedicatedHostGroupIdentityDedicatedHostGroupIdentityByCRN +// - InstancePlacementTargetPrototypeDedicatedHostGroupIdentityDedicatedHostGroupIdentityByHref +// This model "extends" InstancePlacementTargetPrototype +type InstancePlacementTargetPrototypeDedicatedHostGroupIdentity struct { + // The unique identifier for this dedicated host group. + ID *string `json:"id,omitempty"` + + // The CRN for this dedicated host group. + CRN *string `json:"crn,omitempty"` + + // The URL for this dedicated host group. + Href *string `json:"href,omitempty"` +} + +func (*InstancePlacementTargetPrototypeDedicatedHostGroupIdentity) isaInstancePlacementTargetPrototypeDedicatedHostGroupIdentity() bool { + return true +} + +type InstancePlacementTargetPrototypeDedicatedHostGroupIdentityIntf interface { + InstancePlacementTargetPrototypeIntf + isaInstancePlacementTargetPrototypeDedicatedHostGroupIdentity() bool +} + +func (*InstancePlacementTargetPrototypeDedicatedHostGroupIdentity) isaInstancePlacementTargetPrototype() bool { + return true +} + +// UnmarshalInstancePlacementTargetPrototypeDedicatedHostGroupIdentity unmarshals an instance of InstancePlacementTargetPrototypeDedicatedHostGroupIdentity from the specified map of raw messages. +func UnmarshalInstancePlacementTargetPrototypeDedicatedHostGroupIdentity(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstancePlacementTargetPrototypeDedicatedHostGroupIdentity) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstancePlacementTargetPrototypeDedicatedHostIdentity : Identifies a dedicated host by a unique property. +// Models which "extend" this model: +// - InstancePlacementTargetPrototypeDedicatedHostIdentityDedicatedHostIdentityByID +// - InstancePlacementTargetPrototypeDedicatedHostIdentityDedicatedHostIdentityByCRN +// - InstancePlacementTargetPrototypeDedicatedHostIdentityDedicatedHostIdentityByHref +// This model "extends" InstancePlacementTargetPrototype +type InstancePlacementTargetPrototypeDedicatedHostIdentity struct { + // The unique identifier for this dedicated host. + ID *string `json:"id,omitempty"` + + // The CRN for this dedicated host. + CRN *string `json:"crn,omitempty"` + + // The URL for this dedicated host. + Href *string `json:"href,omitempty"` +} + +func (*InstancePlacementTargetPrototypeDedicatedHostIdentity) isaInstancePlacementTargetPrototypeDedicatedHostIdentity() bool { + return true +} + +type InstancePlacementTargetPrototypeDedicatedHostIdentityIntf interface { + InstancePlacementTargetPrototypeIntf + isaInstancePlacementTargetPrototypeDedicatedHostIdentity() bool +} + +func (*InstancePlacementTargetPrototypeDedicatedHostIdentity) isaInstancePlacementTargetPrototype() bool { + return true +} + +// UnmarshalInstancePlacementTargetPrototypeDedicatedHostIdentity unmarshals an instance of InstancePlacementTargetPrototypeDedicatedHostIdentity from the specified map of raw messages. +func UnmarshalInstancePlacementTargetPrototypeDedicatedHostIdentity(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstancePlacementTargetPrototypeDedicatedHostIdentity) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstanceProfileBandwidthDependent : The total bandwidth shared across the network interfaces and storage volumes of an instance with this profile depends +// on its configuration. // This model "extends" InstanceProfileBandwidth type InstanceProfileBandwidthDependent struct { // The type for this profile field. @@ -49054,7 +52941,8 @@ func UnmarshalInstanceProfileBandwidthEnum(m map[string]json.RawMessage, result return } -// InstanceProfileBandwidthFixed : The total bandwidth (in megabits per second) shared across the network interfaces of an instance with this profile. +// InstanceProfileBandwidthFixed : The total bandwidth (in megabits per second) shared across the network interfaces and storage volumes of an instance +// with this profile. // This model "extends" InstanceProfileBandwidth type InstanceProfileBandwidthFixed struct { // The type for this profile field. @@ -49089,8 +52977,8 @@ func UnmarshalInstanceProfileBandwidthFixed(m map[string]json.RawMessage, result return } -// InstanceProfileBandwidthRange : The permitted total bandwidth range (in megabits per second) shared across the network interfaces of an instance with -// this profile. +// InstanceProfileBandwidthRange : The permitted total bandwidth range (in megabits per second) shared across the network interfaces and storage volumes +// of an instance with this profile. // This model "extends" InstanceProfileBandwidth type InstanceProfileBandwidthRange struct { // The default value for this profile field. @@ -49931,9 +53819,12 @@ type InstancePrototypeInstanceByImage struct { // name will be a hyphenated list of randomly-selected words. Name *string `json:"name,omitempty"` - // Collection of additional network interfaces to create for the virtual server instance. + // The additional network interfaces to create for the virtual server instance. NetworkInterfaces []NetworkInterfacePrototype `json:"network_interfaces,omitempty"` + // The placement restrictions to use for the virtual server instance. + PlacementTarget InstancePlacementTargetPrototypeIntf `json:"placement_target,omitempty"` + // The profile to use for this virtual server instance. Profile InstanceProfileIdentityIntf `json:"profile,omitempty"` @@ -49942,7 +53833,7 @@ type InstancePrototypeInstanceByImage struct { // User data to be made available when setting up the virtual server instance. UserData *string `json:"user_data,omitempty"` - // Collection of volume attachments. + // The volume attachments for this virtual server instance. VolumeAttachments []VolumeAttachmentPrototypeInstanceContext `json:"volume_attachments,omitempty"` // The VPC the virtual server instance is to be a part of. If provided, must match the VPC tied to the subnets of the @@ -49952,7 +53843,7 @@ type InstancePrototypeInstanceByImage struct { // The boot volume attachment for the virtual server instance. BootVolumeAttachment *VolumeAttachmentPrototypeInstanceByImageContext `json:"boot_volume_attachment,omitempty"` - // The identity of the image to use when provisioning the virtual server instance. + // The image to use when provisioning the virtual server instance. Image ImageIdentityIntf `json:"image" validate:"required"` // Primary network interface. @@ -49992,6 +53883,10 @@ func UnmarshalInstancePrototypeInstanceByImage(m map[string]json.RawMessage, res if err != nil { return } + err = core.UnmarshalModel(m, "placement_target", &obj.PlacementTarget, UnmarshalInstancePlacementTargetPrototype) + if err != nil { + return + } err = core.UnmarshalModel(m, "profile", &obj.Profile, UnmarshalInstanceProfileIdentity) if err != nil { return @@ -50047,9 +53942,12 @@ type InstancePrototypeInstanceBySourceTemplate struct { // name will be a hyphenated list of randomly-selected words. Name *string `json:"name,omitempty"` - // Collection of additional network interfaces to create for the virtual server instance. + // The additional network interfaces to create for the virtual server instance. NetworkInterfaces []NetworkInterfacePrototype `json:"network_interfaces,omitempty"` + // The placement restrictions to use for the virtual server instance. + PlacementTarget InstancePlacementTargetPrototypeIntf `json:"placement_target,omitempty"` + // The profile to use for this virtual server instance. Profile InstanceProfileIdentityIntf `json:"profile,omitempty"` @@ -50058,7 +53956,7 @@ type InstancePrototypeInstanceBySourceTemplate struct { // User data to be made available when setting up the virtual server instance. UserData *string `json:"user_data,omitempty"` - // Collection of volume attachments. + // The volume attachments for this virtual server instance. VolumeAttachments []VolumeAttachmentPrototypeInstanceContext `json:"volume_attachments,omitempty"` // The VPC the virtual server instance is to be a part of. If provided, must match the VPC tied to the subnets of the @@ -50068,7 +53966,7 @@ type InstancePrototypeInstanceBySourceTemplate struct { // The boot volume attachment for the virtual server instance. BootVolumeAttachment *VolumeAttachmentPrototypeInstanceByImageContext `json:"boot_volume_attachment,omitempty"` - // The identity of the image to use when provisioning the virtual server instance. + // The image to use when provisioning the virtual server instance. Image ImageIdentityIntf `json:"image,omitempty"` // Primary network interface. @@ -50109,6 +54007,10 @@ func UnmarshalInstancePrototypeInstanceBySourceTemplate(m map[string]json.RawMes if err != nil { return } + err = core.UnmarshalModel(m, "placement_target", &obj.PlacementTarget, UnmarshalInstancePlacementTargetPrototype) + if err != nil { + return + } err = core.UnmarshalModel(m, "profile", &obj.Profile, UnmarshalInstanceProfileIdentity) if err != nil { return @@ -50153,6 +54055,122 @@ func UnmarshalInstancePrototypeInstanceBySourceTemplate(m map[string]json.RawMes return } +// InstancePrototypeInstanceByVolume : InstancePrototypeInstanceByVolume struct +// This model "extends" InstancePrototype +type InstancePrototypeInstanceByVolume struct { + // The public SSH keys for the administrative user of the virtual server instance. Up to 10 keys may be provided; if no + // keys are provided the instance will be inaccessible unless the image used provides another means of access. For + // Windows instances, one of the keys will be used to encrypt the administrator password. + // + // Keys will be made available to the virtual server instance as cloud-init vendor data. For cloud-init enabled images, + // these keys will also be added as SSH authorized keys for the administrative user. + Keys []KeyIdentityIntf `json:"keys,omitempty"` + + // The unique user-defined name for this virtual server instance (and default system hostname). If unspecified, the + // name will be a hyphenated list of randomly-selected words. + Name *string `json:"name,omitempty"` + + // The additional network interfaces to create for the virtual server instance. + NetworkInterfaces []NetworkInterfacePrototype `json:"network_interfaces,omitempty"` + + // The placement restrictions to use for the virtual server instance. + PlacementTarget InstancePlacementTargetPrototypeIntf `json:"placement_target,omitempty"` + + // The profile to use for this virtual server instance. + Profile InstanceProfileIdentityIntf `json:"profile,omitempty"` + + ResourceGroup ResourceGroupIdentityIntf `json:"resource_group,omitempty"` + + // User data to be made available when setting up the virtual server instance. + UserData *string `json:"user_data,omitempty"` + + // The volume attachments for this virtual server instance. + VolumeAttachments []VolumeAttachmentPrototypeInstanceContext `json:"volume_attachments,omitempty"` + + // The VPC the virtual server instance is to be a part of. If provided, must match the VPC tied to the subnets of the + // instance's network interfaces. + VPC VPCIdentityIntf `json:"vpc,omitempty"` + + // The boot volume attachment for the virtual server instance. + BootVolumeAttachment *VolumeAttachmentPrototypeInstanceByVolumeContext `json:"boot_volume_attachment" validate:"required"` + + // Primary network interface. + PrimaryNetworkInterface *NetworkInterfacePrototype `json:"primary_network_interface" validate:"required"` + + // The zone this virtual server instance will reside in. + Zone ZoneIdentityIntf `json:"zone" validate:"required"` +} + +// NewInstancePrototypeInstanceByVolume : Instantiate InstancePrototypeInstanceByVolume (Generic Model Constructor) +func (*VpcV1) NewInstancePrototypeInstanceByVolume(bootVolumeAttachment *VolumeAttachmentPrototypeInstanceByVolumeContext, primaryNetworkInterface *NetworkInterfacePrototype, zone ZoneIdentityIntf) (model *InstancePrototypeInstanceByVolume, err error) { + model = &InstancePrototypeInstanceByVolume{ + BootVolumeAttachment: bootVolumeAttachment, + PrimaryNetworkInterface: primaryNetworkInterface, + Zone: zone, + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*InstancePrototypeInstanceByVolume) isaInstancePrototype() bool { + return true +} + +// UnmarshalInstancePrototypeInstanceByVolume unmarshals an instance of InstancePrototypeInstanceByVolume from the specified map of raw messages. +func UnmarshalInstancePrototypeInstanceByVolume(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstancePrototypeInstanceByVolume) + err = core.UnmarshalModel(m, "keys", &obj.Keys, UnmarshalKeyIdentity) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalModel(m, "network_interfaces", &obj.NetworkInterfaces, UnmarshalNetworkInterfacePrototype) + if err != nil { + return + } + err = core.UnmarshalModel(m, "placement_target", &obj.PlacementTarget, UnmarshalInstancePlacementTargetPrototype) + if err != nil { + return + } + err = core.UnmarshalModel(m, "profile", &obj.Profile, UnmarshalInstanceProfileIdentity) + if err != nil { + return + } + err = core.UnmarshalModel(m, "resource_group", &obj.ResourceGroup, UnmarshalResourceGroupIdentity) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "user_data", &obj.UserData) + if err != nil { + return + } + err = core.UnmarshalModel(m, "volume_attachments", &obj.VolumeAttachments, UnmarshalVolumeAttachmentPrototypeInstanceContext) + if err != nil { + return + } + err = core.UnmarshalModel(m, "vpc", &obj.VPC, UnmarshalVPCIdentity) + if err != nil { + return + } + err = core.UnmarshalModel(m, "boot_volume_attachment", &obj.BootVolumeAttachment, UnmarshalVolumeAttachmentPrototypeInstanceByVolumeContext) + if err != nil { + return + } + err = core.UnmarshalModel(m, "primary_network_interface", &obj.PrimaryNetworkInterface, UnmarshalNetworkInterfacePrototype) + if err != nil { + return + } + err = core.UnmarshalModel(m, "zone", &obj.Zone, UnmarshalZoneIdentity) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + // InstanceTemplateIdentityByCRN : InstanceTemplateIdentityByCRN struct // This model "extends" InstanceTemplateIdentity type InstanceTemplateIdentityByCRN struct { @@ -50261,9 +54279,12 @@ type InstanceTemplatePrototypeInstanceByImage struct { // name will be a hyphenated list of randomly-selected words. Name *string `json:"name,omitempty"` - // Collection of additional network interfaces to create for the virtual server instance. + // The additional network interfaces to create for the virtual server instance. NetworkInterfaces []NetworkInterfacePrototype `json:"network_interfaces,omitempty"` + // The placement restrictions to use for the virtual server instance. + PlacementTarget InstancePlacementTargetPrototypeIntf `json:"placement_target,omitempty"` + // The profile to use for this virtual server instance. Profile InstanceProfileIdentityIntf `json:"profile,omitempty"` @@ -50272,7 +54293,7 @@ type InstanceTemplatePrototypeInstanceByImage struct { // User data to be made available when setting up the virtual server instance. UserData *string `json:"user_data,omitempty"` - // Collection of volume attachments. + // The volume attachments for this virtual server instance. VolumeAttachments []VolumeAttachmentPrototypeInstanceContext `json:"volume_attachments,omitempty"` // The VPC the virtual server instance is to be a part of. If provided, must match the VPC tied to the subnets of the @@ -50282,7 +54303,7 @@ type InstanceTemplatePrototypeInstanceByImage struct { // The boot volume attachment for the virtual server instance. BootVolumeAttachment *VolumeAttachmentPrototypeInstanceByImageContext `json:"boot_volume_attachment,omitempty"` - // The identity of the image to use when provisioning the virtual server instance. + // The image to use when provisioning the virtual server instance. Image ImageIdentityIntf `json:"image" validate:"required"` // Primary network interface. @@ -50322,6 +54343,10 @@ func UnmarshalInstanceTemplatePrototypeInstanceByImage(m map[string]json.RawMess if err != nil { return } + err = core.UnmarshalModel(m, "placement_target", &obj.PlacementTarget, UnmarshalInstancePlacementTargetPrototype) + if err != nil { + return + } err = core.UnmarshalModel(m, "profile", &obj.Profile, UnmarshalInstanceProfileIdentity) if err != nil { return @@ -50377,9 +54402,12 @@ type InstanceTemplatePrototypeInstanceBySourceTemplate struct { // name will be a hyphenated list of randomly-selected words. Name *string `json:"name,omitempty"` - // Collection of additional network interfaces to create for the virtual server instance. + // The additional network interfaces to create for the virtual server instance. NetworkInterfaces []NetworkInterfacePrototype `json:"network_interfaces,omitempty"` + // The placement restrictions to use for the virtual server instance. + PlacementTarget InstancePlacementTargetPrototypeIntf `json:"placement_target,omitempty"` + // The profile to use for this virtual server instance. Profile InstanceProfileIdentityIntf `json:"profile,omitempty"` @@ -50388,7 +54416,7 @@ type InstanceTemplatePrototypeInstanceBySourceTemplate struct { // User data to be made available when setting up the virtual server instance. UserData *string `json:"user_data,omitempty"` - // Collection of volume attachments. + // The volume attachments for this virtual server instance. VolumeAttachments []VolumeAttachmentPrototypeInstanceContext `json:"volume_attachments,omitempty"` // The VPC the virtual server instance is to be a part of. If provided, must match the VPC tied to the subnets of the @@ -50398,7 +54426,7 @@ type InstanceTemplatePrototypeInstanceBySourceTemplate struct { // The boot volume attachment for the virtual server instance. BootVolumeAttachment *VolumeAttachmentPrototypeInstanceByImageContext `json:"boot_volume_attachment,omitempty"` - // The identity of the image to use when provisioning the virtual server instance. + // The image to use when provisioning the virtual server instance. Image ImageIdentityIntf `json:"image,omitempty"` // Primary network interface. @@ -50439,6 +54467,10 @@ func UnmarshalInstanceTemplatePrototypeInstanceBySourceTemplate(m map[string]jso if err != nil { return } + err = core.UnmarshalModel(m, "placement_target", &obj.PlacementTarget, UnmarshalInstancePlacementTargetPrototype) + if err != nil { + return + } err = core.UnmarshalModel(m, "profile", &obj.Profile, UnmarshalInstanceProfileIdentity) if err != nil { return @@ -50483,21 +54515,9 @@ func UnmarshalInstanceTemplatePrototypeInstanceBySourceTemplate(m map[string]jso return } -// InstanceTemplateInstanceByImage : InstanceTemplateInstanceByImage struct -// This model "extends" InstanceTemplate -type InstanceTemplateInstanceByImage struct { - // The date and time that the instance template was created. - CreatedAt *strfmt.DateTime `json:"created_at" validate:"required"` - - // The CRN for this instance template. - CRN *string `json:"crn" validate:"required"` - - // The URL for this instance template. - Href *string `json:"href" validate:"required"` - - // The unique identifier for this instance template. - ID *string `json:"id" validate:"required"` - +// InstanceTemplatePrototypeInstanceByVolume : InstanceTemplatePrototypeInstanceByVolume struct +// This model "extends" InstanceTemplatePrototype +type InstanceTemplatePrototypeInstanceByVolume struct { // The public SSH keys for the administrative user of the virtual server instance. Up to 10 keys may be provided; if no // keys are provided the instance will be inaccessible unless the image used provides another means of access. For // Windows instances, one of the keys will be used to encrypt the administrator password. @@ -50506,22 +54526,25 @@ type InstanceTemplateInstanceByImage struct { // these keys will also be added as SSH authorized keys for the administrative user. Keys []KeyIdentityIntf `json:"keys,omitempty"` - // The unique user-defined name for this instance template. - Name *string `json:"name" validate:"required"` + // The unique user-defined name for this virtual server instance (and default system hostname). If unspecified, the + // name will be a hyphenated list of randomly-selected words. + Name *string `json:"name,omitempty"` - // Collection of additional network interfaces to create for the virtual server instance. + // The additional network interfaces to create for the virtual server instance. NetworkInterfaces []NetworkInterfacePrototype `json:"network_interfaces,omitempty"` + // The placement restrictions to use for the virtual server instance. + PlacementTarget InstancePlacementTargetPrototypeIntf `json:"placement_target,omitempty"` + // The profile to use for this virtual server instance. Profile InstanceProfileIdentityIntf `json:"profile,omitempty"` - // The resource group for this instance template. - ResourceGroup *ResourceGroupReference `json:"resource_group" validate:"required"` + ResourceGroup ResourceGroupIdentityIntf `json:"resource_group,omitempty"` // User data to be made available when setting up the virtual server instance. UserData *string `json:"user_data,omitempty"` - // Collection of volume attachments. + // The volume attachments for this virtual server instance. VolumeAttachments []VolumeAttachmentPrototypeInstanceContext `json:"volume_attachments,omitempty"` // The VPC the virtual server instance is to be a part of. If provided, must match the VPC tied to the subnets of the @@ -50529,10 +54552,7 @@ type InstanceTemplateInstanceByImage struct { VPC VPCIdentityIntf `json:"vpc,omitempty"` // The boot volume attachment for the virtual server instance. - BootVolumeAttachment *VolumeAttachmentPrototypeInstanceByImageContext `json:"boot_volume_attachment,omitempty"` - - // The identity of the image to use when provisioning the virtual server instance. - Image ImageIdentityIntf `json:"image" validate:"required"` + BootVolumeAttachment *VolumeAttachmentPrototypeInstanceByVolumeContext `json:"boot_volume_attachment" validate:"required"` // Primary network interface. PrimaryNetworkInterface *NetworkInterfacePrototype `json:"primary_network_interface" validate:"required"` @@ -50541,29 +54561,24 @@ type InstanceTemplateInstanceByImage struct { Zone ZoneIdentityIntf `json:"zone" validate:"required"` } -func (*InstanceTemplateInstanceByImage) isaInstanceTemplate() bool { +// NewInstanceTemplatePrototypeInstanceByVolume : Instantiate InstanceTemplatePrototypeInstanceByVolume (Generic Model Constructor) +func (*VpcV1) NewInstanceTemplatePrototypeInstanceByVolume(bootVolumeAttachment *VolumeAttachmentPrototypeInstanceByVolumeContext, primaryNetworkInterface *NetworkInterfacePrototype, zone ZoneIdentityIntf) (model *InstanceTemplatePrototypeInstanceByVolume, err error) { + model = &InstanceTemplatePrototypeInstanceByVolume{ + BootVolumeAttachment: bootVolumeAttachment, + PrimaryNetworkInterface: primaryNetworkInterface, + Zone: zone, + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*InstanceTemplatePrototypeInstanceByVolume) isaInstanceTemplatePrototype() bool { return true } -// UnmarshalInstanceTemplateInstanceByImage unmarshals an instance of InstanceTemplateInstanceByImage from the specified map of raw messages. -func UnmarshalInstanceTemplateInstanceByImage(m map[string]json.RawMessage, result interface{}) (err error) { - obj := new(InstanceTemplateInstanceByImage) - err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) - if err != nil { - return - } - err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) - if err != nil { - return - } - err = core.UnmarshalPrimitive(m, "href", &obj.Href) - if err != nil { - return - } - err = core.UnmarshalPrimitive(m, "id", &obj.ID) - if err != nil { - return - } +// UnmarshalInstanceTemplatePrototypeInstanceByVolume unmarshals an instance of InstanceTemplatePrototypeInstanceByVolume from the specified map of raw messages. +func UnmarshalInstanceTemplatePrototypeInstanceByVolume(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstanceTemplatePrototypeInstanceByVolume) err = core.UnmarshalModel(m, "keys", &obj.Keys, UnmarshalKeyIdentity) if err != nil { return @@ -50576,11 +54591,15 @@ func UnmarshalInstanceTemplateInstanceByImage(m map[string]json.RawMessage, resu if err != nil { return } + err = core.UnmarshalModel(m, "placement_target", &obj.PlacementTarget, UnmarshalInstancePlacementTargetPrototype) + if err != nil { + return + } err = core.UnmarshalModel(m, "profile", &obj.Profile, UnmarshalInstanceProfileIdentity) if err != nil { return } - err = core.UnmarshalModel(m, "resource_group", &obj.ResourceGroup, UnmarshalResourceGroupReference) + err = core.UnmarshalModel(m, "resource_group", &obj.ResourceGroup, UnmarshalResourceGroupIdentity) if err != nil { return } @@ -50596,11 +54615,7 @@ func UnmarshalInstanceTemplateInstanceByImage(m map[string]json.RawMessage, resu if err != nil { return } - err = core.UnmarshalModel(m, "boot_volume_attachment", &obj.BootVolumeAttachment, UnmarshalVolumeAttachmentPrototypeInstanceByImageContext) - if err != nil { - return - } - err = core.UnmarshalModel(m, "image", &obj.Image, UnmarshalImageIdentity) + err = core.UnmarshalModel(m, "boot_volume_attachment", &obj.BootVolumeAttachment, UnmarshalVolumeAttachmentPrototypeInstanceByVolumeContext) if err != nil { return } @@ -50616,9 +54631,9 @@ func UnmarshalInstanceTemplateInstanceByImage(m map[string]json.RawMessage, resu return } -// InstanceTemplateInstanceBySourceTemplate : InstanceTemplateInstanceBySourceTemplate struct +// InstanceTemplateInstanceByImage : InstanceTemplateInstanceByImage struct // This model "extends" InstanceTemplate -type InstanceTemplateInstanceBySourceTemplate struct { +type InstanceTemplateInstanceByImage struct { // The date and time that the instance template was created. CreatedAt *strfmt.DateTime `json:"created_at" validate:"required"` @@ -50642,9 +54657,12 @@ type InstanceTemplateInstanceBySourceTemplate struct { // The unique user-defined name for this instance template. Name *string `json:"name" validate:"required"` - // Collection of additional network interfaces to create for the virtual server instance. + // The additional network interfaces to create for the virtual server instance. NetworkInterfaces []NetworkInterfacePrototype `json:"network_interfaces,omitempty"` + // The placement restrictions to use for the virtual server instance. + PlacementTarget InstancePlacementTargetPrototypeIntf `json:"placement_target,omitempty"` + // The profile to use for this virtual server instance. Profile InstanceProfileIdentityIntf `json:"profile,omitempty"` @@ -50654,7 +54672,7 @@ type InstanceTemplateInstanceBySourceTemplate struct { // User data to be made available when setting up the virtual server instance. UserData *string `json:"user_data,omitempty"` - // Collection of volume attachments. + // The volume attachments for this virtual server instance. VolumeAttachments []VolumeAttachmentPrototypeInstanceContext `json:"volume_attachments,omitempty"` // The VPC the virtual server instance is to be a part of. If provided, must match the VPC tied to the subnets of the @@ -50664,7 +54682,147 @@ type InstanceTemplateInstanceBySourceTemplate struct { // The boot volume attachment for the virtual server instance. BootVolumeAttachment *VolumeAttachmentPrototypeInstanceByImageContext `json:"boot_volume_attachment,omitempty"` - // The identity of the image to use when provisioning the virtual server instance. + // The image to use when provisioning the virtual server instance. + Image ImageIdentityIntf `json:"image" validate:"required"` + + // Primary network interface. + PrimaryNetworkInterface *NetworkInterfacePrototype `json:"primary_network_interface" validate:"required"` + + // The zone this virtual server instance will reside in. + Zone ZoneIdentityIntf `json:"zone" validate:"required"` +} + +func (*InstanceTemplateInstanceByImage) isaInstanceTemplate() bool { + return true +} + +// UnmarshalInstanceTemplateInstanceByImage unmarshals an instance of InstanceTemplateInstanceByImage from the specified map of raw messages. +func UnmarshalInstanceTemplateInstanceByImage(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstanceTemplateInstanceByImage) + err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalModel(m, "keys", &obj.Keys, UnmarshalKeyIdentity) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalModel(m, "network_interfaces", &obj.NetworkInterfaces, UnmarshalNetworkInterfacePrototype) + if err != nil { + return + } + err = core.UnmarshalModel(m, "placement_target", &obj.PlacementTarget, UnmarshalInstancePlacementTargetPrototype) + if err != nil { + return + } + err = core.UnmarshalModel(m, "profile", &obj.Profile, UnmarshalInstanceProfileIdentity) + if err != nil { + return + } + err = core.UnmarshalModel(m, "resource_group", &obj.ResourceGroup, UnmarshalResourceGroupReference) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "user_data", &obj.UserData) + if err != nil { + return + } + err = core.UnmarshalModel(m, "volume_attachments", &obj.VolumeAttachments, UnmarshalVolumeAttachmentPrototypeInstanceContext) + if err != nil { + return + } + err = core.UnmarshalModel(m, "vpc", &obj.VPC, UnmarshalVPCIdentity) + if err != nil { + return + } + err = core.UnmarshalModel(m, "boot_volume_attachment", &obj.BootVolumeAttachment, UnmarshalVolumeAttachmentPrototypeInstanceByImageContext) + if err != nil { + return + } + err = core.UnmarshalModel(m, "image", &obj.Image, UnmarshalImageIdentity) + if err != nil { + return + } + err = core.UnmarshalModel(m, "primary_network_interface", &obj.PrimaryNetworkInterface, UnmarshalNetworkInterfacePrototype) + if err != nil { + return + } + err = core.UnmarshalModel(m, "zone", &obj.Zone, UnmarshalZoneIdentity) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstanceTemplateInstanceBySourceTemplate : InstanceTemplateInstanceBySourceTemplate struct +// This model "extends" InstanceTemplate +type InstanceTemplateInstanceBySourceTemplate struct { + // The date and time that the instance template was created. + CreatedAt *strfmt.DateTime `json:"created_at" validate:"required"` + + // The CRN for this instance template. + CRN *string `json:"crn" validate:"required"` + + // The URL for this instance template. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this instance template. + ID *string `json:"id" validate:"required"` + + // The public SSH keys for the administrative user of the virtual server instance. Up to 10 keys may be provided; if no + // keys are provided the instance will be inaccessible unless the image used provides another means of access. For + // Windows instances, one of the keys will be used to encrypt the administrator password. + // + // Keys will be made available to the virtual server instance as cloud-init vendor data. For cloud-init enabled images, + // these keys will also be added as SSH authorized keys for the administrative user. + Keys []KeyIdentityIntf `json:"keys,omitempty"` + + // The unique user-defined name for this instance template. + Name *string `json:"name" validate:"required"` + + // The additional network interfaces to create for the virtual server instance. + NetworkInterfaces []NetworkInterfacePrototype `json:"network_interfaces,omitempty"` + + // The placement restrictions to use for the virtual server instance. + PlacementTarget InstancePlacementTargetPrototypeIntf `json:"placement_target,omitempty"` + + // The profile to use for this virtual server instance. + Profile InstanceProfileIdentityIntf `json:"profile,omitempty"` + + // The resource group for this instance template. + ResourceGroup *ResourceGroupReference `json:"resource_group" validate:"required"` + + // User data to be made available when setting up the virtual server instance. + UserData *string `json:"user_data,omitempty"` + + // The volume attachments for this virtual server instance. + VolumeAttachments []VolumeAttachmentPrototypeInstanceContext `json:"volume_attachments,omitempty"` + + // The VPC the virtual server instance is to be a part of. If provided, must match the VPC tied to the subnets of the + // instance's network interfaces. + VPC VPCIdentityIntf `json:"vpc,omitempty"` + + // The boot volume attachment for the virtual server instance. + BootVolumeAttachment *VolumeAttachmentPrototypeInstanceByImageContext `json:"boot_volume_attachment,omitempty"` + + // The image to use when provisioning the virtual server instance. Image ImageIdentityIntf `json:"image,omitempty"` // Primary network interface. @@ -50712,6 +54870,10 @@ func UnmarshalInstanceTemplateInstanceBySourceTemplate(m map[string]json.RawMess if err != nil { return } + err = core.UnmarshalModel(m, "placement_target", &obj.PlacementTarget, UnmarshalInstancePlacementTargetPrototype) + if err != nil { + return + } err = core.UnmarshalModel(m, "profile", &obj.Profile, UnmarshalInstanceProfileIdentity) if err != nil { return @@ -50756,6 +54918,139 @@ func UnmarshalInstanceTemplateInstanceBySourceTemplate(m map[string]json.RawMess return } +// InstanceTemplateInstanceByVolume : InstanceTemplateInstanceByVolume struct +// This model "extends" InstanceTemplate +type InstanceTemplateInstanceByVolume struct { + // The date and time that the instance template was created. + CreatedAt *strfmt.DateTime `json:"created_at" validate:"required"` + + // The CRN for this instance template. + CRN *string `json:"crn" validate:"required"` + + // The URL for this instance template. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this instance template. + ID *string `json:"id" validate:"required"` + + // The public SSH keys for the administrative user of the virtual server instance. Up to 10 keys may be provided; if no + // keys are provided the instance will be inaccessible unless the image used provides another means of access. For + // Windows instances, one of the keys will be used to encrypt the administrator password. + // + // Keys will be made available to the virtual server instance as cloud-init vendor data. For cloud-init enabled images, + // these keys will also be added as SSH authorized keys for the administrative user. + Keys []KeyIdentityIntf `json:"keys,omitempty"` + + // The unique user-defined name for this instance template. + Name *string `json:"name" validate:"required"` + + // The additional network interfaces to create for the virtual server instance. + NetworkInterfaces []NetworkInterfacePrototype `json:"network_interfaces,omitempty"` + + // The placement restrictions to use for the virtual server instance. + PlacementTarget InstancePlacementTargetPrototypeIntf `json:"placement_target,omitempty"` + + // The profile to use for this virtual server instance. + Profile InstanceProfileIdentityIntf `json:"profile,omitempty"` + + // The resource group for this instance template. + ResourceGroup *ResourceGroupReference `json:"resource_group" validate:"required"` + + // User data to be made available when setting up the virtual server instance. + UserData *string `json:"user_data,omitempty"` + + // The volume attachments for this virtual server instance. + VolumeAttachments []VolumeAttachmentPrototypeInstanceContext `json:"volume_attachments,omitempty"` + + // The VPC the virtual server instance is to be a part of. If provided, must match the VPC tied to the subnets of the + // instance's network interfaces. + VPC VPCIdentityIntf `json:"vpc,omitempty"` + + // The boot volume attachment for the virtual server instance. + BootVolumeAttachment *VolumeAttachmentPrototypeInstanceByVolumeContext `json:"boot_volume_attachment" validate:"required"` + + // Primary network interface. + PrimaryNetworkInterface *NetworkInterfacePrototype `json:"primary_network_interface" validate:"required"` + + // The zone this virtual server instance will reside in. + Zone ZoneIdentityIntf `json:"zone" validate:"required"` +} + +func (*InstanceTemplateInstanceByVolume) isaInstanceTemplate() bool { + return true +} + +// UnmarshalInstanceTemplateInstanceByVolume unmarshals an instance of InstanceTemplateInstanceByVolume from the specified map of raw messages. +func UnmarshalInstanceTemplateInstanceByVolume(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstanceTemplateInstanceByVolume) + err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalModel(m, "keys", &obj.Keys, UnmarshalKeyIdentity) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalModel(m, "network_interfaces", &obj.NetworkInterfaces, UnmarshalNetworkInterfacePrototype) + if err != nil { + return + } + err = core.UnmarshalModel(m, "placement_target", &obj.PlacementTarget, UnmarshalInstancePlacementTargetPrototype) + if err != nil { + return + } + err = core.UnmarshalModel(m, "profile", &obj.Profile, UnmarshalInstanceProfileIdentity) + if err != nil { + return + } + err = core.UnmarshalModel(m, "resource_group", &obj.ResourceGroup, UnmarshalResourceGroupReference) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "user_data", &obj.UserData) + if err != nil { + return + } + err = core.UnmarshalModel(m, "volume_attachments", &obj.VolumeAttachments, UnmarshalVolumeAttachmentPrototypeInstanceContext) + if err != nil { + return + } + err = core.UnmarshalModel(m, "vpc", &obj.VPC, UnmarshalVPCIdentity) + if err != nil { + return + } + err = core.UnmarshalModel(m, "boot_volume_attachment", &obj.BootVolumeAttachment, UnmarshalVolumeAttachmentPrototypeInstanceByVolumeContext) + if err != nil { + return + } + err = core.UnmarshalModel(m, "primary_network_interface", &obj.PrimaryNetworkInterface, UnmarshalNetworkInterfacePrototype) + if err != nil { + return + } + err = core.UnmarshalModel(m, "zone", &obj.Zone, UnmarshalZoneIdentity) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + // KeyIdentityByCRN : KeyIdentityByCRN struct // This model "extends" KeyIdentity type KeyIdentityByCRN struct { @@ -51728,8 +56023,8 @@ type NetworkACLPrototypeNetworkACLByRules struct { // The VPC this network ACL is to be a part of. VPC VPCIdentityIntf `json:"vpc" validate:"required"` - // Array of prototype objects for rules to create along with this network ACL. If unspecified, no rules will be - // created, resulting in all traffic being denied. + // The prototype objects for rules to create along with this network ACL. If unspecified, no rules will be created, + // resulting in all traffic being denied. Rules []NetworkACLRulePrototypeNetworkACLContextIntf `json:"rules,omitempty"` } @@ -54266,7 +58561,7 @@ func UnmarshalSecurityGroupRulePrototypeSecurityGroupRuleProtocolIcmp(m map[stri } // SecurityGroupRulePrototypeSecurityGroupRuleProtocolTcpudp : If `protocol` is either `tcp` or `udp`, then the rule may also contain `port_min` and -// `port_max`. Either both should be set, or neither. When neither is set then traffic is allowed on all ports. For a +// `port_max`. Either both must be set, or neither. When neither is set then traffic is allowed on all ports. For a // single port, set both to the same value. // This model "extends" SecurityGroupRulePrototype type SecurityGroupRulePrototypeSecurityGroupRuleProtocolTcpudp struct { @@ -54863,7 +59158,7 @@ func UnmarshalSecurityGroupRuleSecurityGroupRuleProtocolIcmp(m map[string]json.R } // SecurityGroupRuleSecurityGroupRuleProtocolTcpudp : If `protocol` is either `tcp` or `udp`, then the rule may also contain `port_min` and -// `port_max`. Either both should be set, or neither. When neither is set then traffic is allowed on all ports. For a +// `port_max`. Either both must be set, or neither. When neither is set then traffic is allowed on all ports. For a // single port, set both to the same value. // This model "extends" SecurityGroupRule type SecurityGroupRuleSecurityGroupRuleProtocolTcpudp struct { @@ -55066,6 +59361,99 @@ func UnmarshalSecurityGroupTargetReferenceNetworkInterfaceReferenceTargetContext return } +// SnapshotIdentityByCRN : SnapshotIdentityByCRN struct +// This model "extends" SnapshotIdentity +type SnapshotIdentityByCRN struct { + // The CRN for this snapshot. + CRN *string `json:"crn" validate:"required"` +} + +// NewSnapshotIdentityByCRN : Instantiate SnapshotIdentityByCRN (Generic Model Constructor) +func (*VpcV1) NewSnapshotIdentityByCRN(crn string) (model *SnapshotIdentityByCRN, err error) { + model = &SnapshotIdentityByCRN{ + CRN: core.StringPtr(crn), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*SnapshotIdentityByCRN) isaSnapshotIdentity() bool { + return true +} + +// UnmarshalSnapshotIdentityByCRN unmarshals an instance of SnapshotIdentityByCRN from the specified map of raw messages. +func UnmarshalSnapshotIdentityByCRN(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SnapshotIdentityByCRN) + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SnapshotIdentityByHref : SnapshotIdentityByHref struct +// This model "extends" SnapshotIdentity +type SnapshotIdentityByHref struct { + // The URL for this snapshot. + Href *string `json:"href" validate:"required"` +} + +// NewSnapshotIdentityByHref : Instantiate SnapshotIdentityByHref (Generic Model Constructor) +func (*VpcV1) NewSnapshotIdentityByHref(href string) (model *SnapshotIdentityByHref, err error) { + model = &SnapshotIdentityByHref{ + Href: core.StringPtr(href), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*SnapshotIdentityByHref) isaSnapshotIdentity() bool { + return true +} + +// UnmarshalSnapshotIdentityByHref unmarshals an instance of SnapshotIdentityByHref from the specified map of raw messages. +func UnmarshalSnapshotIdentityByHref(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SnapshotIdentityByHref) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// SnapshotIdentityByID : SnapshotIdentityByID struct +// This model "extends" SnapshotIdentity +type SnapshotIdentityByID struct { + // The unique identifier for this snapshot. + ID *string `json:"id" validate:"required"` +} + +// NewSnapshotIdentityByID : Instantiate SnapshotIdentityByID (Generic Model Constructor) +func (*VpcV1) NewSnapshotIdentityByID(id string) (model *SnapshotIdentityByID, err error) { + model = &SnapshotIdentityByID{ + ID: core.StringPtr(id), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*SnapshotIdentityByID) isaSnapshotIdentity() bool { + return true +} + +// UnmarshalSnapshotIdentityByID unmarshals an instance of SnapshotIdentityByID from the specified map of raw messages. +func UnmarshalSnapshotIdentityByID(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(SnapshotIdentityByID) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + // SubnetIdentityByCRN : SubnetIdentityByCRN struct // This model "extends" SubnetIdentity type SubnetIdentityByCRN struct { @@ -55569,10 +59957,10 @@ type VPNGatewayConnectionPolicyMode struct { // The status of a VPN gateway connection. Status *string `json:"status" validate:"required"` - // A collection of local CIDRs for this resource. + // The local CIDRs for this resource. LocalCIDRs []string `json:"local_cidrs" validate:"required"` - // A collection of peer CIDRs for this resource. + // The peer CIDRs for this resource. PeerCIDRs []string `json:"peer_cidrs" validate:"required"` } @@ -55700,10 +60088,10 @@ type VPNGatewayConnectionPrototypeVPNGatewayConnectionPolicyModePrototype struct // The preshared key. Psk *string `json:"psk" validate:"required"` - // A collection of local CIDRs for this resource. + // The local CIDRs for this resource. LocalCIDRs []string `json:"local_cidrs" validate:"required"` - // A collection of peer CIDRs for this resource. + // The peer CIDRs for this resource. PeerCIDRs []string `json:"peer_cidrs" validate:"required"` } @@ -56003,7 +60391,7 @@ func UnmarshalVPNGatewayConnectionStaticRouteMode(m map[string]json.RawMessage, // VPNGatewayPolicyMode : VPNGatewayPolicyMode struct // This model "extends" VPNGateway type VPNGatewayPolicyMode struct { - // Collection of references to VPN gateway connections. + // Connections for this VPN gateway. Connections []VPNGatewayConnectionReference `json:"connections" validate:"required"` // The date and time that this VPN gateway was created. @@ -56216,7 +60604,7 @@ func UnmarshalVPNGatewayPrototypeVPNGatewayRouteModePrototype(m map[string]json. // VPNGatewayRouteMode : VPNGatewayRouteMode struct // This model "extends" VPNGateway type VPNGatewayRouteMode struct { - // Collection of references to VPN gateway connections. + // Connections for this VPN gateway. Connections []VPNGatewayConnectionReference `json:"connections" validate:"required"` // The date and time that this VPN gateway was created. @@ -56332,6 +60720,198 @@ func UnmarshalVPNGatewayRouteMode(m map[string]json.RawMessage, result interface return } +// VolumeAttachmentPrototypeVolumeVolumeIdentity : Identifies a volume by a unique property. +// Models which "extend" this model: +// - VolumeAttachmentPrototypeVolumeVolumeIdentityVolumeIdentityByID +// - VolumeAttachmentPrototypeVolumeVolumeIdentityVolumeIdentityByCRN +// - VolumeAttachmentPrototypeVolumeVolumeIdentityVolumeIdentityByHref +// This model "extends" VolumeAttachmentPrototypeVolume +type VolumeAttachmentPrototypeVolumeVolumeIdentity struct { + // The unique identifier for this volume. + ID *string `json:"id,omitempty"` + + // The CRN for this volume. + CRN *string `json:"crn,omitempty"` + + // The URL for this volume. + Href *string `json:"href,omitempty"` +} + +func (*VolumeAttachmentPrototypeVolumeVolumeIdentity) isaVolumeAttachmentPrototypeVolumeVolumeIdentity() bool { + return true +} + +type VolumeAttachmentPrototypeVolumeVolumeIdentityIntf interface { + VolumeAttachmentPrototypeVolumeIntf + isaVolumeAttachmentPrototypeVolumeVolumeIdentity() bool +} + +func (*VolumeAttachmentPrototypeVolumeVolumeIdentity) isaVolumeAttachmentPrototypeVolume() bool { + return true +} + +// UnmarshalVolumeAttachmentPrototypeVolumeVolumeIdentity unmarshals an instance of VolumeAttachmentPrototypeVolumeVolumeIdentity from the specified map of raw messages. +func UnmarshalVolumeAttachmentPrototypeVolumeVolumeIdentity(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VolumeAttachmentPrototypeVolumeVolumeIdentity) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VolumeAttachmentPrototypeVolumeVolumePrototypeInstanceContext : VolumeAttachmentPrototypeVolumeVolumePrototypeInstanceContext struct +// Models which "extend" this model: +// - VolumeAttachmentPrototypeVolumeVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeByCapacity +// - VolumeAttachmentPrototypeVolumeVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeBySourceSnapshot +// This model "extends" VolumeAttachmentPrototypeVolume +type VolumeAttachmentPrototypeVolumeVolumePrototypeInstanceContext struct { + // The bandwidth for the volume. + Iops *int64 `json:"iops,omitempty"` + + // The unique user-defined name for this volume. + Name *string `json:"name,omitempty"` + + // The profile to use for this volume. + Profile VolumeProfileIdentityIntf `json:"profile" validate:"required"` + + // The capacity of the volume in gigabytes. The specified minimum and maximum capacity values for creating or updating + // volumes may expand in the future. + Capacity *int64 `json:"capacity,omitempty"` + + // The root key to use to wrap the data encryption key for the volume. + // + // If this property is not provided, the `encryption` type for the volume will be + // `provider_managed`. + EncryptionKey EncryptionKeyIdentityIntf `json:"encryption_key,omitempty"` + + // The snapshot from which to clone the volume. + SourceSnapshot SnapshotIdentityIntf `json:"source_snapshot,omitempty"` +} + +func (*VolumeAttachmentPrototypeVolumeVolumePrototypeInstanceContext) isaVolumeAttachmentPrototypeVolumeVolumePrototypeInstanceContext() bool { + return true +} + +type VolumeAttachmentPrototypeVolumeVolumePrototypeInstanceContextIntf interface { + VolumeAttachmentPrototypeVolumeIntf + isaVolumeAttachmentPrototypeVolumeVolumePrototypeInstanceContext() bool +} + +func (*VolumeAttachmentPrototypeVolumeVolumePrototypeInstanceContext) isaVolumeAttachmentPrototypeVolume() bool { + return true +} + +// UnmarshalVolumeAttachmentPrototypeVolumeVolumePrototypeInstanceContext unmarshals an instance of VolumeAttachmentPrototypeVolumeVolumePrototypeInstanceContext from the specified map of raw messages. +func UnmarshalVolumeAttachmentPrototypeVolumeVolumePrototypeInstanceContext(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VolumeAttachmentPrototypeVolumeVolumePrototypeInstanceContext) + err = core.UnmarshalPrimitive(m, "iops", &obj.Iops) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalModel(m, "profile", &obj.Profile, UnmarshalVolumeProfileIdentity) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "capacity", &obj.Capacity) + if err != nil { + return + } + err = core.UnmarshalModel(m, "encryption_key", &obj.EncryptionKey, UnmarshalEncryptionKeyIdentity) + if err != nil { + return + } + err = core.UnmarshalModel(m, "source_snapshot", &obj.SourceSnapshot, UnmarshalSnapshotIdentity) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VolumeAttachmentVolumePrototypeInstanceByVolumeContextVolumePrototypeInstanceByVolumeContext : VolumeAttachmentVolumePrototypeInstanceByVolumeContextVolumePrototypeInstanceByVolumeContext struct +// This model "extends" VolumeAttachmentVolumePrototypeInstanceByVolumeContext +type VolumeAttachmentVolumePrototypeInstanceByVolumeContextVolumePrototypeInstanceByVolumeContext struct { + // The capacity of the volume in gigabytes. The specified minimum and maximum capacity values for creating or updating + // volumes may expand in the future. + Capacity *int64 `json:"capacity,omitempty"` + + // The root key to use to wrap the data encryption key for the volume. + // + // If this property is not provided, the snapshot's `encryption_key` will be used. + EncryptionKey EncryptionKeyIdentityIntf `json:"encryption_key,omitempty"` + + // The bandwidth for the volume. + Iops *int64 `json:"iops,omitempty"` + + // The unique user-defined name for this volume. + Name *string `json:"name,omitempty"` + + // The profile to use for this volume. + Profile VolumeProfileIdentityIntf `json:"profile" validate:"required"` + + // The snapshot from which to clone the volume. + SourceSnapshot SnapshotIdentityIntf `json:"source_snapshot" validate:"required"` +} + +// NewVolumeAttachmentVolumePrototypeInstanceByVolumeContextVolumePrototypeInstanceByVolumeContext : Instantiate VolumeAttachmentVolumePrototypeInstanceByVolumeContextVolumePrototypeInstanceByVolumeContext (Generic Model Constructor) +func (*VpcV1) NewVolumeAttachmentVolumePrototypeInstanceByVolumeContextVolumePrototypeInstanceByVolumeContext(profile VolumeProfileIdentityIntf, sourceSnapshot SnapshotIdentityIntf) (model *VolumeAttachmentVolumePrototypeInstanceByVolumeContextVolumePrototypeInstanceByVolumeContext, err error) { + model = &VolumeAttachmentVolumePrototypeInstanceByVolumeContextVolumePrototypeInstanceByVolumeContext{ + Profile: profile, + SourceSnapshot: sourceSnapshot, + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*VolumeAttachmentVolumePrototypeInstanceByVolumeContextVolumePrototypeInstanceByVolumeContext) isaVolumeAttachmentVolumePrototypeInstanceByVolumeContext() bool { + return true +} + +// UnmarshalVolumeAttachmentVolumePrototypeInstanceByVolumeContextVolumePrototypeInstanceByVolumeContext unmarshals an instance of VolumeAttachmentVolumePrototypeInstanceByVolumeContextVolumePrototypeInstanceByVolumeContext from the specified map of raw messages. +func UnmarshalVolumeAttachmentVolumePrototypeInstanceByVolumeContextVolumePrototypeInstanceByVolumeContext(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VolumeAttachmentVolumePrototypeInstanceByVolumeContextVolumePrototypeInstanceByVolumeContext) + err = core.UnmarshalPrimitive(m, "capacity", &obj.Capacity) + if err != nil { + return + } + err = core.UnmarshalModel(m, "encryption_key", &obj.EncryptionKey, UnmarshalEncryptionKeyIdentity) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "iops", &obj.Iops) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalModel(m, "profile", &obj.Profile, UnmarshalVolumeProfileIdentity) + if err != nil { + return + } + err = core.UnmarshalModel(m, "source_snapshot", &obj.SourceSnapshot, UnmarshalSnapshotIdentity) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + // VolumeAttachmentVolumePrototypeInstanceContextVolumeIdentity : Identifies a volume by a unique property. // Models which "extend" this model: // - VolumeAttachmentVolumePrototypeInstanceContextVolumeIdentityVolumeIdentityByID @@ -56384,14 +60964,9 @@ func UnmarshalVolumeAttachmentVolumePrototypeInstanceContextVolumeIdentity(m map // VolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContext : VolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContext struct // Models which "extend" this model: // - VolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeByCapacity +// - VolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeBySourceSnapshot // This model "extends" VolumeAttachmentVolumePrototypeInstanceContext type VolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContext struct { - // The identity of the root key to use to wrap the data encryption key for the volume. - // - // If this property is not provided, the `encryption` type for the volume will be - // `provider_managed`. - EncryptionKey EncryptionKeyIdentityIntf `json:"encryption_key,omitempty"` - // The bandwidth for the volume. Iops *int64 `json:"iops,omitempty"` @@ -56404,6 +60979,15 @@ type VolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContex // The capacity of the volume in gigabytes. The specified minimum and maximum capacity values for creating or updating // volumes may expand in the future. Capacity *int64 `json:"capacity,omitempty"` + + // The root key to use to wrap the data encryption key for the volume. + // + // If this property is not provided, the `encryption` type for the volume will be + // `provider_managed`. + EncryptionKey EncryptionKeyIdentityIntf `json:"encryption_key,omitempty"` + + // The snapshot from which to clone the volume. + SourceSnapshot SnapshotIdentityIntf `json:"source_snapshot,omitempty"` } func (*VolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContext) isaVolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContext() bool { @@ -56422,10 +61006,6 @@ func (*VolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceCont // UnmarshalVolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContext unmarshals an instance of VolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContext from the specified map of raw messages. func UnmarshalVolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContext(m map[string]json.RawMessage, result interface{}) (err error) { obj := new(VolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContext) - err = core.UnmarshalModel(m, "encryption_key", &obj.EncryptionKey, UnmarshalEncryptionKeyIdentity) - if err != nil { - return - } err = core.UnmarshalPrimitive(m, "iops", &obj.Iops) if err != nil { return @@ -56442,6 +61022,14 @@ func UnmarshalVolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInsta if err != nil { return } + err = core.UnmarshalModel(m, "encryption_key", &obj.EncryptionKey, UnmarshalEncryptionKeyIdentity) + if err != nil { + return + } + err = core.UnmarshalModel(m, "source_snapshot", &obj.SourceSnapshot, UnmarshalSnapshotIdentity) + if err != nil { + return + } reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) return } @@ -56604,12 +61192,6 @@ func UnmarshalVolumeProfileIdentityByName(m map[string]json.RawMessage, result i // VolumePrototypeVolumeByCapacity : VolumePrototypeVolumeByCapacity struct // This model "extends" VolumePrototype type VolumePrototypeVolumeByCapacity struct { - // The identity of the root key to use to wrap the data encryption key for the volume. - // - // If this property is not provided, the `encryption` type for the volume will be - // `provider_managed`. - EncryptionKey EncryptionKeyIdentityIntf `json:"encryption_key,omitempty"` - // The bandwidth for the volume. Iops *int64 `json:"iops,omitempty"` @@ -56627,6 +61209,12 @@ type VolumePrototypeVolumeByCapacity struct { // The capacity of the volume in gigabytes. The specified minimum and maximum capacity values for creating or updating // volumes may expand in the future. Capacity *int64 `json:"capacity" validate:"required"` + + // The root key to use to wrap the data encryption key for the volume. + // + // If this property is not provided, the `encryption` type for the volume will be + // `provider_managed`. + EncryptionKey EncryptionKeyIdentityIntf `json:"encryption_key,omitempty"` } // NewVolumePrototypeVolumeByCapacity : Instantiate VolumePrototypeVolumeByCapacity (Generic Model Constructor) @@ -56647,10 +61235,6 @@ func (*VolumePrototypeVolumeByCapacity) isaVolumePrototype() bool { // UnmarshalVolumePrototypeVolumeByCapacity unmarshals an instance of VolumePrototypeVolumeByCapacity from the specified map of raw messages. func UnmarshalVolumePrototypeVolumeByCapacity(m map[string]json.RawMessage, result interface{}) (err error) { obj := new(VolumePrototypeVolumeByCapacity) - err = core.UnmarshalModel(m, "encryption_key", &obj.EncryptionKey, UnmarshalEncryptionKeyIdentity) - if err != nil { - return - } err = core.UnmarshalPrimitive(m, "iops", &obj.Iops) if err != nil { return @@ -56675,6 +61259,10 @@ func UnmarshalVolumePrototypeVolumeByCapacity(m map[string]json.RawMessage, resu if err != nil { return } + err = core.UnmarshalModel(m, "encryption_key", &obj.EncryptionKey, UnmarshalEncryptionKeyIdentity) + if err != nil { + return + } reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) return } @@ -57297,6 +61885,737 @@ func UnmarshalFlowLogCollectorTargetPrototypeVPCIdentityVPCIdentityByID(m map[st return } +// InstanceGroupManagerActionPrototypeScheduledActionPrototypeByCronSpec : InstanceGroupManagerActionPrototypeScheduledActionPrototypeByCronSpec struct +// Models which "extend" this model: +// - InstanceGroupManagerActionPrototypeScheduledActionPrototypeByCronSpecByGroup +// - InstanceGroupManagerActionPrototypeScheduledActionPrototypeByCronSpecByManager +// This model "extends" InstanceGroupManagerActionPrototypeScheduledActionPrototype +type InstanceGroupManagerActionPrototypeScheduledActionPrototypeByCronSpec struct { + // The user-defined name for this instance group manager action. Names must be unique within the instance group + // manager. + Name *string `json:"name,omitempty"` + + // The cron specification for a recurring scheduled action. Actions can be applied a maximum of one time within a 5 min + // period. + CronSpec *string `json:"cron_spec,omitempty"` + + Group *InstanceGroupManagerScheduledActionGroupPrototype `json:"group,omitempty"` + + Manager InstanceGroupManagerScheduledActionManagerPrototypeIntf `json:"manager,omitempty"` +} + +func (*InstanceGroupManagerActionPrototypeScheduledActionPrototypeByCronSpec) isaInstanceGroupManagerActionPrototypeScheduledActionPrototypeByCronSpec() bool { + return true +} + +type InstanceGroupManagerActionPrototypeScheduledActionPrototypeByCronSpecIntf interface { + InstanceGroupManagerActionPrototypeScheduledActionPrototypeIntf + isaInstanceGroupManagerActionPrototypeScheduledActionPrototypeByCronSpec() bool +} + +func (*InstanceGroupManagerActionPrototypeScheduledActionPrototypeByCronSpec) isaInstanceGroupManagerActionPrototypeScheduledActionPrototype() bool { + return true +} + +func (*InstanceGroupManagerActionPrototypeScheduledActionPrototypeByCronSpec) isaInstanceGroupManagerActionPrototype() bool { + return true +} + +// UnmarshalInstanceGroupManagerActionPrototypeScheduledActionPrototypeByCronSpec unmarshals an instance of InstanceGroupManagerActionPrototypeScheduledActionPrototypeByCronSpec from the specified map of raw messages. +func UnmarshalInstanceGroupManagerActionPrototypeScheduledActionPrototypeByCronSpec(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstanceGroupManagerActionPrototypeScheduledActionPrototypeByCronSpec) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "cron_spec", &obj.CronSpec) + if err != nil { + return + } + err = core.UnmarshalModel(m, "group", &obj.Group, UnmarshalInstanceGroupManagerScheduledActionGroupPrototype) + if err != nil { + return + } + err = core.UnmarshalModel(m, "manager", &obj.Manager, UnmarshalInstanceGroupManagerScheduledActionManagerPrototype) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstanceGroupManagerActionPrototypeScheduledActionPrototypeByRunAt : InstanceGroupManagerActionPrototypeScheduledActionPrototypeByRunAt struct +// Models which "extend" this model: +// - InstanceGroupManagerActionPrototypeScheduledActionPrototypeByRunAtByGroup +// - InstanceGroupManagerActionPrototypeScheduledActionPrototypeByRunAtByManager +// This model "extends" InstanceGroupManagerActionPrototypeScheduledActionPrototype +type InstanceGroupManagerActionPrototypeScheduledActionPrototypeByRunAt struct { + // The user-defined name for this instance group manager action. Names must be unique within the instance group + // manager. + Name *string `json:"name,omitempty"` + + // The date and time the scheduled action will run. + RunAt *strfmt.DateTime `json:"run_at,omitempty"` + + Group *InstanceGroupManagerScheduledActionGroupPrototype `json:"group,omitempty"` + + Manager InstanceGroupManagerScheduledActionManagerPrototypeIntf `json:"manager,omitempty"` +} + +func (*InstanceGroupManagerActionPrototypeScheduledActionPrototypeByRunAt) isaInstanceGroupManagerActionPrototypeScheduledActionPrototypeByRunAt() bool { + return true +} + +type InstanceGroupManagerActionPrototypeScheduledActionPrototypeByRunAtIntf interface { + InstanceGroupManagerActionPrototypeScheduledActionPrototypeIntf + isaInstanceGroupManagerActionPrototypeScheduledActionPrototypeByRunAt() bool +} + +func (*InstanceGroupManagerActionPrototypeScheduledActionPrototypeByRunAt) isaInstanceGroupManagerActionPrototypeScheduledActionPrototype() bool { + return true +} + +func (*InstanceGroupManagerActionPrototypeScheduledActionPrototypeByRunAt) isaInstanceGroupManagerActionPrototype() bool { + return true +} + +// UnmarshalInstanceGroupManagerActionPrototypeScheduledActionPrototypeByRunAt unmarshals an instance of InstanceGroupManagerActionPrototypeScheduledActionPrototypeByRunAt from the specified map of raw messages. +func UnmarshalInstanceGroupManagerActionPrototypeScheduledActionPrototypeByRunAt(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstanceGroupManagerActionPrototypeScheduledActionPrototypeByRunAt) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "run_at", &obj.RunAt) + if err != nil { + return + } + err = core.UnmarshalModel(m, "group", &obj.Group, UnmarshalInstanceGroupManagerScheduledActionGroupPrototype) + if err != nil { + return + } + err = core.UnmarshalModel(m, "manager", &obj.Manager, UnmarshalInstanceGroupManagerScheduledActionManagerPrototype) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstanceGroupManagerActionScheduledActionGroupTarget : InstanceGroupManagerActionScheduledActionGroupTarget struct +// This model "extends" InstanceGroupManagerActionScheduledAction +type InstanceGroupManagerActionScheduledActionGroupTarget struct { + // If set to `true`, this scheduled action will be automatically deleted after it has finished and the + // `auto_delete_timeout` time has passed. + AutoDelete *bool `json:"auto_delete" validate:"required"` + + // Amount of time in hours that are required to pass before the scheduled action will be automatically deleted once it + // has finished. If this value is 0, the action will be deleted on completion. + AutoDeleteTimeout *int64 `json:"auto_delete_timeout" validate:"required"` + + // The date and time that the instance group manager action was created. + CreatedAt *strfmt.DateTime `json:"created_at" validate:"required"` + + // The URL for this instance group manager action. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this instance group manager action. + ID *string `json:"id" validate:"required"` + + // The user-defined name for this instance group manager action. Names must be unique within the instance group + // manager. + Name *string `json:"name" validate:"required"` + + // The resource type. + ResourceType *string `json:"resource_type" validate:"required"` + + // The status of the instance group action + // - `active`: Action is ready to be run + // - `completed`: Action was completed successfully + // - `failed`: Action could not be completed successfully + // - `incompatible`: Action parameters are not compatible with the group or manager + // - `omitted`: Action was not applied because this action's manager was disabled. + Status *string `json:"status" validate:"required"` + + // The date and time that the instance group manager action was modified. + UpdatedAt *strfmt.DateTime `json:"updated_at" validate:"required"` + + // The type of action for the instance group. + ActionType *string `json:"action_type" validate:"required"` + + // The cron specification for a recurring scheduled action. Actions can be applied a maximum of one time within a 5 min + // period. + CronSpec *string `json:"cron_spec,omitempty"` + + // The date and time the scheduled action was last applied. If empty the action has never been applied. + LastAppliedAt *strfmt.DateTime `json:"last_applied_at,omitempty"` + + // The date and time the scheduled action will next run. If empty the system is currently calculating the next run + // time. + NextRunAt *strfmt.DateTime `json:"next_run_at,omitempty"` + + Group *InstanceGroupManagerScheduledActionGroup `json:"group" validate:"required"` +} + +// Constants associated with the InstanceGroupManagerActionScheduledActionGroupTarget.ResourceType property. +// The resource type. +const ( + InstanceGroupManagerActionScheduledActionGroupTargetResourceTypeInstanceGroupManagerActionConst = "instance_group_manager_action" +) + +// Constants associated with the InstanceGroupManagerActionScheduledActionGroupTarget.Status property. +// The status of the instance group action +// - `active`: Action is ready to be run +// - `completed`: Action was completed successfully +// - `failed`: Action could not be completed successfully +// - `incompatible`: Action parameters are not compatible with the group or manager +// - `omitted`: Action was not applied because this action's manager was disabled. +const ( + InstanceGroupManagerActionScheduledActionGroupTargetStatusActiveConst = "active" + InstanceGroupManagerActionScheduledActionGroupTargetStatusCompletedConst = "completed" + InstanceGroupManagerActionScheduledActionGroupTargetStatusFailedConst = "failed" + InstanceGroupManagerActionScheduledActionGroupTargetStatusIncompatibleConst = "incompatible" + InstanceGroupManagerActionScheduledActionGroupTargetStatusOmittedConst = "omitted" +) + +// Constants associated with the InstanceGroupManagerActionScheduledActionGroupTarget.ActionType property. +// The type of action for the instance group. +const ( + InstanceGroupManagerActionScheduledActionGroupTargetActionTypeScheduledConst = "scheduled" +) + +func (*InstanceGroupManagerActionScheduledActionGroupTarget) isaInstanceGroupManagerActionScheduledAction() bool { + return true +} + +func (*InstanceGroupManagerActionScheduledActionGroupTarget) isaInstanceGroupManagerAction() bool { + return true +} + +// UnmarshalInstanceGroupManagerActionScheduledActionGroupTarget unmarshals an instance of InstanceGroupManagerActionScheduledActionGroupTarget from the specified map of raw messages. +func UnmarshalInstanceGroupManagerActionScheduledActionGroupTarget(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstanceGroupManagerActionScheduledActionGroupTarget) + err = core.UnmarshalPrimitive(m, "auto_delete", &obj.AutoDelete) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "auto_delete_timeout", &obj.AutoDeleteTimeout) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "resource_type", &obj.ResourceType) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "status", &obj.Status) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "updated_at", &obj.UpdatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "action_type", &obj.ActionType) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "cron_spec", &obj.CronSpec) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "last_applied_at", &obj.LastAppliedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "next_run_at", &obj.NextRunAt) + if err != nil { + return + } + err = core.UnmarshalModel(m, "group", &obj.Group, UnmarshalInstanceGroupManagerScheduledActionGroup) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstanceGroupManagerActionScheduledActionManagerTarget : InstanceGroupManagerActionScheduledActionManagerTarget struct +// This model "extends" InstanceGroupManagerActionScheduledAction +type InstanceGroupManagerActionScheduledActionManagerTarget struct { + // If set to `true`, this scheduled action will be automatically deleted after it has finished and the + // `auto_delete_timeout` time has passed. + AutoDelete *bool `json:"auto_delete" validate:"required"` + + // Amount of time in hours that are required to pass before the scheduled action will be automatically deleted once it + // has finished. If this value is 0, the action will be deleted on completion. + AutoDeleteTimeout *int64 `json:"auto_delete_timeout" validate:"required"` + + // The date and time that the instance group manager action was created. + CreatedAt *strfmt.DateTime `json:"created_at" validate:"required"` + + // The URL for this instance group manager action. + Href *string `json:"href" validate:"required"` + + // The unique identifier for this instance group manager action. + ID *string `json:"id" validate:"required"` + + // The user-defined name for this instance group manager action. Names must be unique within the instance group + // manager. + Name *string `json:"name" validate:"required"` + + // The resource type. + ResourceType *string `json:"resource_type" validate:"required"` + + // The status of the instance group action + // - `active`: Action is ready to be run + // - `completed`: Action was completed successfully + // - `failed`: Action could not be completed successfully + // - `incompatible`: Action parameters are not compatible with the group or manager + // - `omitted`: Action was not applied because this action's manager was disabled. + Status *string `json:"status" validate:"required"` + + // The date and time that the instance group manager action was modified. + UpdatedAt *strfmt.DateTime `json:"updated_at" validate:"required"` + + // The type of action for the instance group. + ActionType *string `json:"action_type" validate:"required"` + + // The cron specification for a recurring scheduled action. Actions can be applied a maximum of one time within a 5 min + // period. + CronSpec *string `json:"cron_spec,omitempty"` + + // The date and time the scheduled action was last applied. If empty the action has never been applied. + LastAppliedAt *strfmt.DateTime `json:"last_applied_at,omitempty"` + + // The date and time the scheduled action will next run. If empty the system is currently calculating the next run + // time. + NextRunAt *strfmt.DateTime `json:"next_run_at,omitempty"` + + Manager InstanceGroupManagerScheduledActionManagerIntf `json:"manager" validate:"required"` +} + +// Constants associated with the InstanceGroupManagerActionScheduledActionManagerTarget.ResourceType property. +// The resource type. +const ( + InstanceGroupManagerActionScheduledActionManagerTargetResourceTypeInstanceGroupManagerActionConst = "instance_group_manager_action" +) + +// Constants associated with the InstanceGroupManagerActionScheduledActionManagerTarget.Status property. +// The status of the instance group action +// - `active`: Action is ready to be run +// - `completed`: Action was completed successfully +// - `failed`: Action could not be completed successfully +// - `incompatible`: Action parameters are not compatible with the group or manager +// - `omitted`: Action was not applied because this action's manager was disabled. +const ( + InstanceGroupManagerActionScheduledActionManagerTargetStatusActiveConst = "active" + InstanceGroupManagerActionScheduledActionManagerTargetStatusCompletedConst = "completed" + InstanceGroupManagerActionScheduledActionManagerTargetStatusFailedConst = "failed" + InstanceGroupManagerActionScheduledActionManagerTargetStatusIncompatibleConst = "incompatible" + InstanceGroupManagerActionScheduledActionManagerTargetStatusOmittedConst = "omitted" +) + +// Constants associated with the InstanceGroupManagerActionScheduledActionManagerTarget.ActionType property. +// The type of action for the instance group. +const ( + InstanceGroupManagerActionScheduledActionManagerTargetActionTypeScheduledConst = "scheduled" +) + +func (*InstanceGroupManagerActionScheduledActionManagerTarget) isaInstanceGroupManagerActionScheduledAction() bool { + return true +} + +func (*InstanceGroupManagerActionScheduledActionManagerTarget) isaInstanceGroupManagerAction() bool { + return true +} + +// UnmarshalInstanceGroupManagerActionScheduledActionManagerTarget unmarshals an instance of InstanceGroupManagerActionScheduledActionManagerTarget from the specified map of raw messages. +func UnmarshalInstanceGroupManagerActionScheduledActionManagerTarget(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstanceGroupManagerActionScheduledActionManagerTarget) + err = core.UnmarshalPrimitive(m, "auto_delete", &obj.AutoDelete) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "auto_delete_timeout", &obj.AutoDeleteTimeout) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "created_at", &obj.CreatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "resource_type", &obj.ResourceType) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "status", &obj.Status) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "updated_at", &obj.UpdatedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "action_type", &obj.ActionType) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "cron_spec", &obj.CronSpec) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "last_applied_at", &obj.LastAppliedAt) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "next_run_at", &obj.NextRunAt) + if err != nil { + return + } + err = core.UnmarshalModel(m, "manager", &obj.Manager, UnmarshalInstanceGroupManagerScheduledActionManager) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstanceGroupManagerScheduledActionManagerPrototypeAutoScalePrototypeByHref : InstanceGroupManagerScheduledActionManagerPrototypeAutoScalePrototypeByHref struct +// This model "extends" InstanceGroupManagerScheduledActionManagerPrototypeAutoScalePrototype +type InstanceGroupManagerScheduledActionManagerPrototypeAutoScalePrototypeByHref struct { + // The maximum number of members the instance group should have at the scheduled time. + MaxMembershipCount *int64 `json:"max_membership_count,omitempty"` + + // The minimum number of members the instance group should have at the scheduled time. + MinMembershipCount *int64 `json:"min_membership_count,omitempty"` + + // The URL for this instance group manager. + Href *string `json:"href" validate:"required"` +} + +// NewInstanceGroupManagerScheduledActionManagerPrototypeAutoScalePrototypeByHref : Instantiate InstanceGroupManagerScheduledActionManagerPrototypeAutoScalePrototypeByHref (Generic Model Constructor) +func (*VpcV1) NewInstanceGroupManagerScheduledActionManagerPrototypeAutoScalePrototypeByHref(href string) (model *InstanceGroupManagerScheduledActionManagerPrototypeAutoScalePrototypeByHref, err error) { + model = &InstanceGroupManagerScheduledActionManagerPrototypeAutoScalePrototypeByHref{ + Href: core.StringPtr(href), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*InstanceGroupManagerScheduledActionManagerPrototypeAutoScalePrototypeByHref) isaInstanceGroupManagerScheduledActionManagerPrototypeAutoScalePrototype() bool { + return true +} + +func (*InstanceGroupManagerScheduledActionManagerPrototypeAutoScalePrototypeByHref) isaInstanceGroupManagerScheduledActionManagerPrototype() bool { + return true +} + +// UnmarshalInstanceGroupManagerScheduledActionManagerPrototypeAutoScalePrototypeByHref unmarshals an instance of InstanceGroupManagerScheduledActionManagerPrototypeAutoScalePrototypeByHref from the specified map of raw messages. +func UnmarshalInstanceGroupManagerScheduledActionManagerPrototypeAutoScalePrototypeByHref(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstanceGroupManagerScheduledActionManagerPrototypeAutoScalePrototypeByHref) + err = core.UnmarshalPrimitive(m, "max_membership_count", &obj.MaxMembershipCount) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "min_membership_count", &obj.MinMembershipCount) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstanceGroupManagerScheduledActionManagerPrototypeAutoScalePrototypeByID : InstanceGroupManagerScheduledActionManagerPrototypeAutoScalePrototypeByID struct +// This model "extends" InstanceGroupManagerScheduledActionManagerPrototypeAutoScalePrototype +type InstanceGroupManagerScheduledActionManagerPrototypeAutoScalePrototypeByID struct { + // The maximum number of members the instance group should have at the scheduled time. + MaxMembershipCount *int64 `json:"max_membership_count,omitempty"` + + // The minimum number of members the instance group should have at the scheduled time. + MinMembershipCount *int64 `json:"min_membership_count,omitempty"` + + // The unique identifier for this instance group manager. + ID *string `json:"id" validate:"required"` +} + +// NewInstanceGroupManagerScheduledActionManagerPrototypeAutoScalePrototypeByID : Instantiate InstanceGroupManagerScheduledActionManagerPrototypeAutoScalePrototypeByID (Generic Model Constructor) +func (*VpcV1) NewInstanceGroupManagerScheduledActionManagerPrototypeAutoScalePrototypeByID(id string) (model *InstanceGroupManagerScheduledActionManagerPrototypeAutoScalePrototypeByID, err error) { + model = &InstanceGroupManagerScheduledActionManagerPrototypeAutoScalePrototypeByID{ + ID: core.StringPtr(id), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*InstanceGroupManagerScheduledActionManagerPrototypeAutoScalePrototypeByID) isaInstanceGroupManagerScheduledActionManagerPrototypeAutoScalePrototype() bool { + return true +} + +func (*InstanceGroupManagerScheduledActionManagerPrototypeAutoScalePrototypeByID) isaInstanceGroupManagerScheduledActionManagerPrototype() bool { + return true +} + +// UnmarshalInstanceGroupManagerScheduledActionManagerPrototypeAutoScalePrototypeByID unmarshals an instance of InstanceGroupManagerScheduledActionManagerPrototypeAutoScalePrototypeByID from the specified map of raw messages. +func UnmarshalInstanceGroupManagerScheduledActionManagerPrototypeAutoScalePrototypeByID(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstanceGroupManagerScheduledActionManagerPrototypeAutoScalePrototypeByID) + err = core.UnmarshalPrimitive(m, "max_membership_count", &obj.MaxMembershipCount) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "min_membership_count", &obj.MinMembershipCount) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstancePlacementTargetPrototypeDedicatedHostGroupIdentityDedicatedHostGroupIdentityByCRN : InstancePlacementTargetPrototypeDedicatedHostGroupIdentityDedicatedHostGroupIdentityByCRN struct +// This model "extends" InstancePlacementTargetPrototypeDedicatedHostGroupIdentity +type InstancePlacementTargetPrototypeDedicatedHostGroupIdentityDedicatedHostGroupIdentityByCRN struct { + // The CRN for this dedicated host group. + CRN *string `json:"crn" validate:"required"` +} + +// NewInstancePlacementTargetPrototypeDedicatedHostGroupIdentityDedicatedHostGroupIdentityByCRN : Instantiate InstancePlacementTargetPrototypeDedicatedHostGroupIdentityDedicatedHostGroupIdentityByCRN (Generic Model Constructor) +func (*VpcV1) NewInstancePlacementTargetPrototypeDedicatedHostGroupIdentityDedicatedHostGroupIdentityByCRN(crn string) (model *InstancePlacementTargetPrototypeDedicatedHostGroupIdentityDedicatedHostGroupIdentityByCRN, err error) { + model = &InstancePlacementTargetPrototypeDedicatedHostGroupIdentityDedicatedHostGroupIdentityByCRN{ + CRN: core.StringPtr(crn), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*InstancePlacementTargetPrototypeDedicatedHostGroupIdentityDedicatedHostGroupIdentityByCRN) isaInstancePlacementTargetPrototypeDedicatedHostGroupIdentity() bool { + return true +} + +func (*InstancePlacementTargetPrototypeDedicatedHostGroupIdentityDedicatedHostGroupIdentityByCRN) isaInstancePlacementTargetPrototype() bool { + return true +} + +// UnmarshalInstancePlacementTargetPrototypeDedicatedHostGroupIdentityDedicatedHostGroupIdentityByCRN unmarshals an instance of InstancePlacementTargetPrototypeDedicatedHostGroupIdentityDedicatedHostGroupIdentityByCRN from the specified map of raw messages. +func UnmarshalInstancePlacementTargetPrototypeDedicatedHostGroupIdentityDedicatedHostGroupIdentityByCRN(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstancePlacementTargetPrototypeDedicatedHostGroupIdentityDedicatedHostGroupIdentityByCRN) + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstancePlacementTargetPrototypeDedicatedHostGroupIdentityDedicatedHostGroupIdentityByHref : InstancePlacementTargetPrototypeDedicatedHostGroupIdentityDedicatedHostGroupIdentityByHref struct +// This model "extends" InstancePlacementTargetPrototypeDedicatedHostGroupIdentity +type InstancePlacementTargetPrototypeDedicatedHostGroupIdentityDedicatedHostGroupIdentityByHref struct { + // The URL for this dedicated host group. + Href *string `json:"href" validate:"required"` +} + +// NewInstancePlacementTargetPrototypeDedicatedHostGroupIdentityDedicatedHostGroupIdentityByHref : Instantiate InstancePlacementTargetPrototypeDedicatedHostGroupIdentityDedicatedHostGroupIdentityByHref (Generic Model Constructor) +func (*VpcV1) NewInstancePlacementTargetPrototypeDedicatedHostGroupIdentityDedicatedHostGroupIdentityByHref(href string) (model *InstancePlacementTargetPrototypeDedicatedHostGroupIdentityDedicatedHostGroupIdentityByHref, err error) { + model = &InstancePlacementTargetPrototypeDedicatedHostGroupIdentityDedicatedHostGroupIdentityByHref{ + Href: core.StringPtr(href), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*InstancePlacementTargetPrototypeDedicatedHostGroupIdentityDedicatedHostGroupIdentityByHref) isaInstancePlacementTargetPrototypeDedicatedHostGroupIdentity() bool { + return true +} + +func (*InstancePlacementTargetPrototypeDedicatedHostGroupIdentityDedicatedHostGroupIdentityByHref) isaInstancePlacementTargetPrototype() bool { + return true +} + +// UnmarshalInstancePlacementTargetPrototypeDedicatedHostGroupIdentityDedicatedHostGroupIdentityByHref unmarshals an instance of InstancePlacementTargetPrototypeDedicatedHostGroupIdentityDedicatedHostGroupIdentityByHref from the specified map of raw messages. +func UnmarshalInstancePlacementTargetPrototypeDedicatedHostGroupIdentityDedicatedHostGroupIdentityByHref(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstancePlacementTargetPrototypeDedicatedHostGroupIdentityDedicatedHostGroupIdentityByHref) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstancePlacementTargetPrototypeDedicatedHostGroupIdentityDedicatedHostGroupIdentityByID : InstancePlacementTargetPrototypeDedicatedHostGroupIdentityDedicatedHostGroupIdentityByID struct +// This model "extends" InstancePlacementTargetPrototypeDedicatedHostGroupIdentity +type InstancePlacementTargetPrototypeDedicatedHostGroupIdentityDedicatedHostGroupIdentityByID struct { + // The unique identifier for this dedicated host group. + ID *string `json:"id" validate:"required"` +} + +// NewInstancePlacementTargetPrototypeDedicatedHostGroupIdentityDedicatedHostGroupIdentityByID : Instantiate InstancePlacementTargetPrototypeDedicatedHostGroupIdentityDedicatedHostGroupIdentityByID (Generic Model Constructor) +func (*VpcV1) NewInstancePlacementTargetPrototypeDedicatedHostGroupIdentityDedicatedHostGroupIdentityByID(id string) (model *InstancePlacementTargetPrototypeDedicatedHostGroupIdentityDedicatedHostGroupIdentityByID, err error) { + model = &InstancePlacementTargetPrototypeDedicatedHostGroupIdentityDedicatedHostGroupIdentityByID{ + ID: core.StringPtr(id), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*InstancePlacementTargetPrototypeDedicatedHostGroupIdentityDedicatedHostGroupIdentityByID) isaInstancePlacementTargetPrototypeDedicatedHostGroupIdentity() bool { + return true +} + +func (*InstancePlacementTargetPrototypeDedicatedHostGroupIdentityDedicatedHostGroupIdentityByID) isaInstancePlacementTargetPrototype() bool { + return true +} + +// UnmarshalInstancePlacementTargetPrototypeDedicatedHostGroupIdentityDedicatedHostGroupIdentityByID unmarshals an instance of InstancePlacementTargetPrototypeDedicatedHostGroupIdentityDedicatedHostGroupIdentityByID from the specified map of raw messages. +func UnmarshalInstancePlacementTargetPrototypeDedicatedHostGroupIdentityDedicatedHostGroupIdentityByID(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstancePlacementTargetPrototypeDedicatedHostGroupIdentityDedicatedHostGroupIdentityByID) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstancePlacementTargetPrototypeDedicatedHostIdentityDedicatedHostIdentityByCRN : InstancePlacementTargetPrototypeDedicatedHostIdentityDedicatedHostIdentityByCRN struct +// This model "extends" InstancePlacementTargetPrototypeDedicatedHostIdentity +type InstancePlacementTargetPrototypeDedicatedHostIdentityDedicatedHostIdentityByCRN struct { + // The CRN for this dedicated host. + CRN *string `json:"crn" validate:"required"` +} + +// NewInstancePlacementTargetPrototypeDedicatedHostIdentityDedicatedHostIdentityByCRN : Instantiate InstancePlacementTargetPrototypeDedicatedHostIdentityDedicatedHostIdentityByCRN (Generic Model Constructor) +func (*VpcV1) NewInstancePlacementTargetPrototypeDedicatedHostIdentityDedicatedHostIdentityByCRN(crn string) (model *InstancePlacementTargetPrototypeDedicatedHostIdentityDedicatedHostIdentityByCRN, err error) { + model = &InstancePlacementTargetPrototypeDedicatedHostIdentityDedicatedHostIdentityByCRN{ + CRN: core.StringPtr(crn), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*InstancePlacementTargetPrototypeDedicatedHostIdentityDedicatedHostIdentityByCRN) isaInstancePlacementTargetPrototypeDedicatedHostIdentity() bool { + return true +} + +func (*InstancePlacementTargetPrototypeDedicatedHostIdentityDedicatedHostIdentityByCRN) isaInstancePlacementTargetPrototype() bool { + return true +} + +// UnmarshalInstancePlacementTargetPrototypeDedicatedHostIdentityDedicatedHostIdentityByCRN unmarshals an instance of InstancePlacementTargetPrototypeDedicatedHostIdentityDedicatedHostIdentityByCRN from the specified map of raw messages. +func UnmarshalInstancePlacementTargetPrototypeDedicatedHostIdentityDedicatedHostIdentityByCRN(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstancePlacementTargetPrototypeDedicatedHostIdentityDedicatedHostIdentityByCRN) + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstancePlacementTargetPrototypeDedicatedHostIdentityDedicatedHostIdentityByHref : InstancePlacementTargetPrototypeDedicatedHostIdentityDedicatedHostIdentityByHref struct +// This model "extends" InstancePlacementTargetPrototypeDedicatedHostIdentity +type InstancePlacementTargetPrototypeDedicatedHostIdentityDedicatedHostIdentityByHref struct { + // The URL for this dedicated host. + Href *string `json:"href" validate:"required"` +} + +// NewInstancePlacementTargetPrototypeDedicatedHostIdentityDedicatedHostIdentityByHref : Instantiate InstancePlacementTargetPrototypeDedicatedHostIdentityDedicatedHostIdentityByHref (Generic Model Constructor) +func (*VpcV1) NewInstancePlacementTargetPrototypeDedicatedHostIdentityDedicatedHostIdentityByHref(href string) (model *InstancePlacementTargetPrototypeDedicatedHostIdentityDedicatedHostIdentityByHref, err error) { + model = &InstancePlacementTargetPrototypeDedicatedHostIdentityDedicatedHostIdentityByHref{ + Href: core.StringPtr(href), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*InstancePlacementTargetPrototypeDedicatedHostIdentityDedicatedHostIdentityByHref) isaInstancePlacementTargetPrototypeDedicatedHostIdentity() bool { + return true +} + +func (*InstancePlacementTargetPrototypeDedicatedHostIdentityDedicatedHostIdentityByHref) isaInstancePlacementTargetPrototype() bool { + return true +} + +// UnmarshalInstancePlacementTargetPrototypeDedicatedHostIdentityDedicatedHostIdentityByHref unmarshals an instance of InstancePlacementTargetPrototypeDedicatedHostIdentityDedicatedHostIdentityByHref from the specified map of raw messages. +func UnmarshalInstancePlacementTargetPrototypeDedicatedHostIdentityDedicatedHostIdentityByHref(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstancePlacementTargetPrototypeDedicatedHostIdentityDedicatedHostIdentityByHref) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstancePlacementTargetPrototypeDedicatedHostIdentityDedicatedHostIdentityByID : InstancePlacementTargetPrototypeDedicatedHostIdentityDedicatedHostIdentityByID struct +// This model "extends" InstancePlacementTargetPrototypeDedicatedHostIdentity +type InstancePlacementTargetPrototypeDedicatedHostIdentityDedicatedHostIdentityByID struct { + // The unique identifier for this dedicated host. + ID *string `json:"id" validate:"required"` +} + +// NewInstancePlacementTargetPrototypeDedicatedHostIdentityDedicatedHostIdentityByID : Instantiate InstancePlacementTargetPrototypeDedicatedHostIdentityDedicatedHostIdentityByID (Generic Model Constructor) +func (*VpcV1) NewInstancePlacementTargetPrototypeDedicatedHostIdentityDedicatedHostIdentityByID(id string) (model *InstancePlacementTargetPrototypeDedicatedHostIdentityDedicatedHostIdentityByID, err error) { + model = &InstancePlacementTargetPrototypeDedicatedHostIdentityDedicatedHostIdentityByID{ + ID: core.StringPtr(id), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*InstancePlacementTargetPrototypeDedicatedHostIdentityDedicatedHostIdentityByID) isaInstancePlacementTargetPrototypeDedicatedHostIdentity() bool { + return true +} + +func (*InstancePlacementTargetPrototypeDedicatedHostIdentityDedicatedHostIdentityByID) isaInstancePlacementTargetPrototype() bool { + return true +} + +// UnmarshalInstancePlacementTargetPrototypeDedicatedHostIdentityDedicatedHostIdentityByID unmarshals an instance of InstancePlacementTargetPrototypeDedicatedHostIdentityDedicatedHostIdentityByID from the specified map of raw messages. +func UnmarshalInstancePlacementTargetPrototypeDedicatedHostIdentityDedicatedHostIdentityByID(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstancePlacementTargetPrototypeDedicatedHostIdentityDedicatedHostIdentityByID) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + // LoadBalancerListenerPolicyTargetPatchLoadBalancerPoolIdentityLoadBalancerPoolIdentityByHref : LoadBalancerListenerPolicyTargetPatchLoadBalancerPoolIdentityLoadBalancerPoolIdentityByHref struct // This model "extends" LoadBalancerListenerPolicyTargetPatchLoadBalancerPoolIdentity type LoadBalancerListenerPolicyTargetPatchLoadBalancerPoolIdentityLoadBalancerPoolIdentityByHref struct { @@ -58067,6 +63386,257 @@ func UnmarshalSecurityGroupRuleRemotePrototypeSecurityGroupIdentitySecurityGroup return } +// VolumeAttachmentPrototypeVolumeVolumeIdentityVolumeIdentityByCRN : VolumeAttachmentPrototypeVolumeVolumeIdentityVolumeIdentityByCRN struct +// This model "extends" VolumeAttachmentPrototypeVolumeVolumeIdentity +type VolumeAttachmentPrototypeVolumeVolumeIdentityVolumeIdentityByCRN struct { + // The CRN for this volume. + CRN *string `json:"crn" validate:"required"` +} + +// NewVolumeAttachmentPrototypeVolumeVolumeIdentityVolumeIdentityByCRN : Instantiate VolumeAttachmentPrototypeVolumeVolumeIdentityVolumeIdentityByCRN (Generic Model Constructor) +func (*VpcV1) NewVolumeAttachmentPrototypeVolumeVolumeIdentityVolumeIdentityByCRN(crn string) (model *VolumeAttachmentPrototypeVolumeVolumeIdentityVolumeIdentityByCRN, err error) { + model = &VolumeAttachmentPrototypeVolumeVolumeIdentityVolumeIdentityByCRN{ + CRN: core.StringPtr(crn), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*VolumeAttachmentPrototypeVolumeVolumeIdentityVolumeIdentityByCRN) isaVolumeAttachmentPrototypeVolumeVolumeIdentity() bool { + return true +} + +func (*VolumeAttachmentPrototypeVolumeVolumeIdentityVolumeIdentityByCRN) isaVolumeAttachmentPrototypeVolume() bool { + return true +} + +// UnmarshalVolumeAttachmentPrototypeVolumeVolumeIdentityVolumeIdentityByCRN unmarshals an instance of VolumeAttachmentPrototypeVolumeVolumeIdentityVolumeIdentityByCRN from the specified map of raw messages. +func UnmarshalVolumeAttachmentPrototypeVolumeVolumeIdentityVolumeIdentityByCRN(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VolumeAttachmentPrototypeVolumeVolumeIdentityVolumeIdentityByCRN) + err = core.UnmarshalPrimitive(m, "crn", &obj.CRN) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VolumeAttachmentPrototypeVolumeVolumeIdentityVolumeIdentityByHref : VolumeAttachmentPrototypeVolumeVolumeIdentityVolumeIdentityByHref struct +// This model "extends" VolumeAttachmentPrototypeVolumeVolumeIdentity +type VolumeAttachmentPrototypeVolumeVolumeIdentityVolumeIdentityByHref struct { + // The URL for this volume. + Href *string `json:"href" validate:"required"` +} + +// NewVolumeAttachmentPrototypeVolumeVolumeIdentityVolumeIdentityByHref : Instantiate VolumeAttachmentPrototypeVolumeVolumeIdentityVolumeIdentityByHref (Generic Model Constructor) +func (*VpcV1) NewVolumeAttachmentPrototypeVolumeVolumeIdentityVolumeIdentityByHref(href string) (model *VolumeAttachmentPrototypeVolumeVolumeIdentityVolumeIdentityByHref, err error) { + model = &VolumeAttachmentPrototypeVolumeVolumeIdentityVolumeIdentityByHref{ + Href: core.StringPtr(href), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*VolumeAttachmentPrototypeVolumeVolumeIdentityVolumeIdentityByHref) isaVolumeAttachmentPrototypeVolumeVolumeIdentity() bool { + return true +} + +func (*VolumeAttachmentPrototypeVolumeVolumeIdentityVolumeIdentityByHref) isaVolumeAttachmentPrototypeVolume() bool { + return true +} + +// UnmarshalVolumeAttachmentPrototypeVolumeVolumeIdentityVolumeIdentityByHref unmarshals an instance of VolumeAttachmentPrototypeVolumeVolumeIdentityVolumeIdentityByHref from the specified map of raw messages. +func UnmarshalVolumeAttachmentPrototypeVolumeVolumeIdentityVolumeIdentityByHref(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VolumeAttachmentPrototypeVolumeVolumeIdentityVolumeIdentityByHref) + err = core.UnmarshalPrimitive(m, "href", &obj.Href) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VolumeAttachmentPrototypeVolumeVolumeIdentityVolumeIdentityByID : VolumeAttachmentPrototypeVolumeVolumeIdentityVolumeIdentityByID struct +// This model "extends" VolumeAttachmentPrototypeVolumeVolumeIdentity +type VolumeAttachmentPrototypeVolumeVolumeIdentityVolumeIdentityByID struct { + // The unique identifier for this volume. + ID *string `json:"id" validate:"required"` +} + +// NewVolumeAttachmentPrototypeVolumeVolumeIdentityVolumeIdentityByID : Instantiate VolumeAttachmentPrototypeVolumeVolumeIdentityVolumeIdentityByID (Generic Model Constructor) +func (*VpcV1) NewVolumeAttachmentPrototypeVolumeVolumeIdentityVolumeIdentityByID(id string) (model *VolumeAttachmentPrototypeVolumeVolumeIdentityVolumeIdentityByID, err error) { + model = &VolumeAttachmentPrototypeVolumeVolumeIdentityVolumeIdentityByID{ + ID: core.StringPtr(id), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*VolumeAttachmentPrototypeVolumeVolumeIdentityVolumeIdentityByID) isaVolumeAttachmentPrototypeVolumeVolumeIdentity() bool { + return true +} + +func (*VolumeAttachmentPrototypeVolumeVolumeIdentityVolumeIdentityByID) isaVolumeAttachmentPrototypeVolume() bool { + return true +} + +// UnmarshalVolumeAttachmentPrototypeVolumeVolumeIdentityVolumeIdentityByID unmarshals an instance of VolumeAttachmentPrototypeVolumeVolumeIdentityVolumeIdentityByID from the specified map of raw messages. +func UnmarshalVolumeAttachmentPrototypeVolumeVolumeIdentityVolumeIdentityByID(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VolumeAttachmentPrototypeVolumeVolumeIdentityVolumeIdentityByID) + err = core.UnmarshalPrimitive(m, "id", &obj.ID) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VolumeAttachmentPrototypeVolumeVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeByCapacity : VolumeAttachmentPrototypeVolumeVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeByCapacity struct +// This model "extends" VolumeAttachmentPrototypeVolumeVolumePrototypeInstanceContext +type VolumeAttachmentPrototypeVolumeVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeByCapacity struct { + // The bandwidth for the volume. + Iops *int64 `json:"iops,omitempty"` + + // The unique user-defined name for this volume. + Name *string `json:"name,omitempty"` + + // The profile to use for this volume. + Profile VolumeProfileIdentityIntf `json:"profile" validate:"required"` + + // The capacity of the volume in gigabytes. The specified minimum and maximum capacity values for creating or updating + // volumes may expand in the future. + Capacity *int64 `json:"capacity" validate:"required"` + + // The root key to use to wrap the data encryption key for the volume. + // + // If this property is not provided, the `encryption` type for the volume will be + // `provider_managed`. + EncryptionKey EncryptionKeyIdentityIntf `json:"encryption_key,omitempty"` +} + +// NewVolumeAttachmentPrototypeVolumeVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeByCapacity : Instantiate VolumeAttachmentPrototypeVolumeVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeByCapacity (Generic Model Constructor) +func (*VpcV1) NewVolumeAttachmentPrototypeVolumeVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeByCapacity(profile VolumeProfileIdentityIntf, capacity int64) (model *VolumeAttachmentPrototypeVolumeVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeByCapacity, err error) { + model = &VolumeAttachmentPrototypeVolumeVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeByCapacity{ + Profile: profile, + Capacity: core.Int64Ptr(capacity), + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*VolumeAttachmentPrototypeVolumeVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeByCapacity) isaVolumeAttachmentPrototypeVolumeVolumePrototypeInstanceContext() bool { + return true +} + +func (*VolumeAttachmentPrototypeVolumeVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeByCapacity) isaVolumeAttachmentPrototypeVolume() bool { + return true +} + +// UnmarshalVolumeAttachmentPrototypeVolumeVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeByCapacity unmarshals an instance of VolumeAttachmentPrototypeVolumeVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeByCapacity from the specified map of raw messages. +func UnmarshalVolumeAttachmentPrototypeVolumeVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeByCapacity(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VolumeAttachmentPrototypeVolumeVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeByCapacity) + err = core.UnmarshalPrimitive(m, "iops", &obj.Iops) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalModel(m, "profile", &obj.Profile, UnmarshalVolumeProfileIdentity) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "capacity", &obj.Capacity) + if err != nil { + return + } + err = core.UnmarshalModel(m, "encryption_key", &obj.EncryptionKey, UnmarshalEncryptionKeyIdentity) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VolumeAttachmentPrototypeVolumeVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeBySourceSnapshot : VolumeAttachmentPrototypeVolumeVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeBySourceSnapshot struct +// This model "extends" VolumeAttachmentPrototypeVolumeVolumePrototypeInstanceContext +type VolumeAttachmentPrototypeVolumeVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeBySourceSnapshot struct { + // The bandwidth for the volume. + Iops *int64 `json:"iops,omitempty"` + + // The unique user-defined name for this volume. + Name *string `json:"name,omitempty"` + + // The profile to use for this volume. + Profile VolumeProfileIdentityIntf `json:"profile" validate:"required"` + + // The capacity of the volume in gigabytes. + // + // If this property is not provided, the `minimum_capacity` for the snapshot will be used as the capacity for the + // volume. + // + // The specified minimum and maximum capacity values for creating or updating volumes may expand in the future. + Capacity *int64 `json:"capacity,omitempty"` + + // The root key to use to wrap the data encryption key for the volume. + // + // If this property is not provided, the snapshot's `encryption_key` will be used. + EncryptionKey EncryptionKeyIdentityIntf `json:"encryption_key,omitempty"` + + // The snapshot from which to clone the volume. + SourceSnapshot SnapshotIdentityIntf `json:"source_snapshot" validate:"required"` +} + +// NewVolumeAttachmentPrototypeVolumeVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeBySourceSnapshot : Instantiate VolumeAttachmentPrototypeVolumeVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeBySourceSnapshot (Generic Model Constructor) +func (*VpcV1) NewVolumeAttachmentPrototypeVolumeVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeBySourceSnapshot(profile VolumeProfileIdentityIntf, sourceSnapshot SnapshotIdentityIntf) (model *VolumeAttachmentPrototypeVolumeVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeBySourceSnapshot, err error) { + model = &VolumeAttachmentPrototypeVolumeVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeBySourceSnapshot{ + Profile: profile, + SourceSnapshot: sourceSnapshot, + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*VolumeAttachmentPrototypeVolumeVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeBySourceSnapshot) isaVolumeAttachmentPrototypeVolumeVolumePrototypeInstanceContext() bool { + return true +} + +func (*VolumeAttachmentPrototypeVolumeVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeBySourceSnapshot) isaVolumeAttachmentPrototypeVolume() bool { + return true +} + +// UnmarshalVolumeAttachmentPrototypeVolumeVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeBySourceSnapshot unmarshals an instance of VolumeAttachmentPrototypeVolumeVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeBySourceSnapshot from the specified map of raw messages. +func UnmarshalVolumeAttachmentPrototypeVolumeVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeBySourceSnapshot(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VolumeAttachmentPrototypeVolumeVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeBySourceSnapshot) + err = core.UnmarshalPrimitive(m, "iops", &obj.Iops) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalModel(m, "profile", &obj.Profile, UnmarshalVolumeProfileIdentity) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "capacity", &obj.Capacity) + if err != nil { + return + } + err = core.UnmarshalModel(m, "encryption_key", &obj.EncryptionKey, UnmarshalEncryptionKeyIdentity) + if err != nil { + return + } + err = core.UnmarshalModel(m, "source_snapshot", &obj.SourceSnapshot, UnmarshalSnapshotIdentity) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + // VolumeAttachmentVolumePrototypeInstanceContextVolumeIdentityVolumeIdentityByCRN : VolumeAttachmentVolumePrototypeInstanceContextVolumeIdentityVolumeIdentityByCRN struct // This model "extends" VolumeAttachmentVolumePrototypeInstanceContextVolumeIdentity type VolumeAttachmentVolumePrototypeInstanceContextVolumeIdentityVolumeIdentityByCRN struct { @@ -58175,12 +63745,6 @@ func UnmarshalVolumeAttachmentVolumePrototypeInstanceContextVolumeIdentityVolume // VolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeByCapacity : VolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeByCapacity struct // This model "extends" VolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContext type VolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeByCapacity struct { - // The identity of the root key to use to wrap the data encryption key for the volume. - // - // If this property is not provided, the `encryption` type for the volume will be - // `provider_managed`. - EncryptionKey EncryptionKeyIdentityIntf `json:"encryption_key,omitempty"` - // The bandwidth for the volume. Iops *int64 `json:"iops,omitempty"` @@ -58193,6 +63757,12 @@ type VolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContex // The capacity of the volume in gigabytes. The specified minimum and maximum capacity values for creating or updating // volumes may expand in the future. Capacity *int64 `json:"capacity" validate:"required"` + + // The root key to use to wrap the data encryption key for the volume. + // + // If this property is not provided, the `encryption` type for the volume will be + // `provider_managed`. + EncryptionKey EncryptionKeyIdentityIntf `json:"encryption_key,omitempty"` } // NewVolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeByCapacity : Instantiate VolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeByCapacity (Generic Model Constructor) @@ -58216,10 +63786,80 @@ func (*VolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceCont // UnmarshalVolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeByCapacity unmarshals an instance of VolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeByCapacity from the specified map of raw messages. func UnmarshalVolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeByCapacity(m map[string]json.RawMessage, result interface{}) (err error) { obj := new(VolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeByCapacity) + err = core.UnmarshalPrimitive(m, "iops", &obj.Iops) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalModel(m, "profile", &obj.Profile, UnmarshalVolumeProfileIdentity) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "capacity", &obj.Capacity) + if err != nil { + return + } err = core.UnmarshalModel(m, "encryption_key", &obj.EncryptionKey, UnmarshalEncryptionKeyIdentity) if err != nil { return } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// VolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeBySourceSnapshot : VolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeBySourceSnapshot struct +// This model "extends" VolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContext +type VolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeBySourceSnapshot struct { + // The bandwidth for the volume. + Iops *int64 `json:"iops,omitempty"` + + // The unique user-defined name for this volume. + Name *string `json:"name,omitempty"` + + // The profile to use for this volume. + Profile VolumeProfileIdentityIntf `json:"profile" validate:"required"` + + // The capacity of the volume in gigabytes. + // + // If this property is not provided, the `minimum_capacity` for the snapshot will be used as the capacity for the + // volume. + // + // The specified minimum and maximum capacity values for creating or updating volumes may expand in the future. + Capacity *int64 `json:"capacity,omitempty"` + + // The root key to use to wrap the data encryption key for the volume. + // + // If this property is not provided, the snapshot's `encryption_key` will be used. + EncryptionKey EncryptionKeyIdentityIntf `json:"encryption_key,omitempty"` + + // The snapshot from which to clone the volume. + SourceSnapshot SnapshotIdentityIntf `json:"source_snapshot" validate:"required"` +} + +// NewVolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeBySourceSnapshot : Instantiate VolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeBySourceSnapshot (Generic Model Constructor) +func (*VpcV1) NewVolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeBySourceSnapshot(profile VolumeProfileIdentityIntf, sourceSnapshot SnapshotIdentityIntf) (model *VolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeBySourceSnapshot, err error) { + model = &VolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeBySourceSnapshot{ + Profile: profile, + SourceSnapshot: sourceSnapshot, + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*VolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeBySourceSnapshot) isaVolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContext() bool { + return true +} + +func (*VolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeBySourceSnapshot) isaVolumeAttachmentVolumePrototypeInstanceContext() bool { + return true +} + +// UnmarshalVolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeBySourceSnapshot unmarshals an instance of VolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeBySourceSnapshot from the specified map of raw messages. +func UnmarshalVolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeBySourceSnapshot(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(VolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumePrototypeInstanceContextVolumeBySourceSnapshot) err = core.UnmarshalPrimitive(m, "iops", &obj.Iops) if err != nil { return @@ -58236,6 +63876,228 @@ func UnmarshalVolumeAttachmentVolumePrototypeInstanceContextVolumePrototypeInsta if err != nil { return } + err = core.UnmarshalModel(m, "encryption_key", &obj.EncryptionKey, UnmarshalEncryptionKeyIdentity) + if err != nil { + return + } + err = core.UnmarshalModel(m, "source_snapshot", &obj.SourceSnapshot, UnmarshalSnapshotIdentity) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstanceGroupManagerActionPrototypeScheduledActionPrototypeByCronSpecByGroup : InstanceGroupManagerActionPrototypeScheduledActionPrototypeByCronSpecByGroup struct +// This model "extends" InstanceGroupManagerActionPrototypeScheduledActionPrototypeByCronSpec +type InstanceGroupManagerActionPrototypeScheduledActionPrototypeByCronSpecByGroup struct { + // The user-defined name for this instance group manager action. Names must be unique within the instance group + // manager. + Name *string `json:"name,omitempty"` + + // The cron specification for a recurring scheduled action. Actions can be applied a maximum of one time within a 5 min + // period. + CronSpec *string `json:"cron_spec,omitempty"` + + Group *InstanceGroupManagerScheduledActionGroupPrototype `json:"group" validate:"required"` +} + +// NewInstanceGroupManagerActionPrototypeScheduledActionPrototypeByCronSpecByGroup : Instantiate InstanceGroupManagerActionPrototypeScheduledActionPrototypeByCronSpecByGroup (Generic Model Constructor) +func (*VpcV1) NewInstanceGroupManagerActionPrototypeScheduledActionPrototypeByCronSpecByGroup(group *InstanceGroupManagerScheduledActionGroupPrototype) (model *InstanceGroupManagerActionPrototypeScheduledActionPrototypeByCronSpecByGroup, err error) { + model = &InstanceGroupManagerActionPrototypeScheduledActionPrototypeByCronSpecByGroup{ + Group: group, + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*InstanceGroupManagerActionPrototypeScheduledActionPrototypeByCronSpecByGroup) isaInstanceGroupManagerActionPrototypeScheduledActionPrototypeByCronSpec() bool { + return true +} + +func (*InstanceGroupManagerActionPrototypeScheduledActionPrototypeByCronSpecByGroup) isaInstanceGroupManagerActionPrototypeScheduledActionPrototype() bool { + return true +} + +func (*InstanceGroupManagerActionPrototypeScheduledActionPrototypeByCronSpecByGroup) isaInstanceGroupManagerActionPrototype() bool { + return true +} + +// UnmarshalInstanceGroupManagerActionPrototypeScheduledActionPrototypeByCronSpecByGroup unmarshals an instance of InstanceGroupManagerActionPrototypeScheduledActionPrototypeByCronSpecByGroup from the specified map of raw messages. +func UnmarshalInstanceGroupManagerActionPrototypeScheduledActionPrototypeByCronSpecByGroup(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstanceGroupManagerActionPrototypeScheduledActionPrototypeByCronSpecByGroup) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "cron_spec", &obj.CronSpec) + if err != nil { + return + } + err = core.UnmarshalModel(m, "group", &obj.Group, UnmarshalInstanceGroupManagerScheduledActionGroupPrototype) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstanceGroupManagerActionPrototypeScheduledActionPrototypeByCronSpecByManager : InstanceGroupManagerActionPrototypeScheduledActionPrototypeByCronSpecByManager struct +// This model "extends" InstanceGroupManagerActionPrototypeScheduledActionPrototypeByCronSpec +type InstanceGroupManagerActionPrototypeScheduledActionPrototypeByCronSpecByManager struct { + // The user-defined name for this instance group manager action. Names must be unique within the instance group + // manager. + Name *string `json:"name,omitempty"` + + // The cron specification for a recurring scheduled action. Actions can be applied a maximum of one time within a 5 min + // period. + CronSpec *string `json:"cron_spec,omitempty"` + + Manager InstanceGroupManagerScheduledActionManagerPrototypeIntf `json:"manager" validate:"required"` +} + +// NewInstanceGroupManagerActionPrototypeScheduledActionPrototypeByCronSpecByManager : Instantiate InstanceGroupManagerActionPrototypeScheduledActionPrototypeByCronSpecByManager (Generic Model Constructor) +func (*VpcV1) NewInstanceGroupManagerActionPrototypeScheduledActionPrototypeByCronSpecByManager(manager InstanceGroupManagerScheduledActionManagerPrototypeIntf) (model *InstanceGroupManagerActionPrototypeScheduledActionPrototypeByCronSpecByManager, err error) { + model = &InstanceGroupManagerActionPrototypeScheduledActionPrototypeByCronSpecByManager{ + Manager: manager, + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*InstanceGroupManagerActionPrototypeScheduledActionPrototypeByCronSpecByManager) isaInstanceGroupManagerActionPrototypeScheduledActionPrototypeByCronSpec() bool { + return true +} + +func (*InstanceGroupManagerActionPrototypeScheduledActionPrototypeByCronSpecByManager) isaInstanceGroupManagerActionPrototypeScheduledActionPrototype() bool { + return true +} + +func (*InstanceGroupManagerActionPrototypeScheduledActionPrototypeByCronSpecByManager) isaInstanceGroupManagerActionPrototype() bool { + return true +} + +// UnmarshalInstanceGroupManagerActionPrototypeScheduledActionPrototypeByCronSpecByManager unmarshals an instance of InstanceGroupManagerActionPrototypeScheduledActionPrototypeByCronSpecByManager from the specified map of raw messages. +func UnmarshalInstanceGroupManagerActionPrototypeScheduledActionPrototypeByCronSpecByManager(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstanceGroupManagerActionPrototypeScheduledActionPrototypeByCronSpecByManager) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "cron_spec", &obj.CronSpec) + if err != nil { + return + } + err = core.UnmarshalModel(m, "manager", &obj.Manager, UnmarshalInstanceGroupManagerScheduledActionManagerPrototype) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstanceGroupManagerActionPrototypeScheduledActionPrototypeByRunAtByGroup : InstanceGroupManagerActionPrototypeScheduledActionPrototypeByRunAtByGroup struct +// This model "extends" InstanceGroupManagerActionPrototypeScheduledActionPrototypeByRunAt +type InstanceGroupManagerActionPrototypeScheduledActionPrototypeByRunAtByGroup struct { + // The user-defined name for this instance group manager action. Names must be unique within the instance group + // manager. + Name *string `json:"name,omitempty"` + + // The date and time the scheduled action will run. + RunAt *strfmt.DateTime `json:"run_at,omitempty"` + + Group *InstanceGroupManagerScheduledActionGroupPrototype `json:"group" validate:"required"` +} + +// NewInstanceGroupManagerActionPrototypeScheduledActionPrototypeByRunAtByGroup : Instantiate InstanceGroupManagerActionPrototypeScheduledActionPrototypeByRunAtByGroup (Generic Model Constructor) +func (*VpcV1) NewInstanceGroupManagerActionPrototypeScheduledActionPrototypeByRunAtByGroup(group *InstanceGroupManagerScheduledActionGroupPrototype) (model *InstanceGroupManagerActionPrototypeScheduledActionPrototypeByRunAtByGroup, err error) { + model = &InstanceGroupManagerActionPrototypeScheduledActionPrototypeByRunAtByGroup{ + Group: group, + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*InstanceGroupManagerActionPrototypeScheduledActionPrototypeByRunAtByGroup) isaInstanceGroupManagerActionPrototypeScheduledActionPrototypeByRunAt() bool { + return true +} + +func (*InstanceGroupManagerActionPrototypeScheduledActionPrototypeByRunAtByGroup) isaInstanceGroupManagerActionPrototypeScheduledActionPrototype() bool { + return true +} + +func (*InstanceGroupManagerActionPrototypeScheduledActionPrototypeByRunAtByGroup) isaInstanceGroupManagerActionPrototype() bool { + return true +} + +// UnmarshalInstanceGroupManagerActionPrototypeScheduledActionPrototypeByRunAtByGroup unmarshals an instance of InstanceGroupManagerActionPrototypeScheduledActionPrototypeByRunAtByGroup from the specified map of raw messages. +func UnmarshalInstanceGroupManagerActionPrototypeScheduledActionPrototypeByRunAtByGroup(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstanceGroupManagerActionPrototypeScheduledActionPrototypeByRunAtByGroup) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "run_at", &obj.RunAt) + if err != nil { + return + } + err = core.UnmarshalModel(m, "group", &obj.Group, UnmarshalInstanceGroupManagerScheduledActionGroupPrototype) + if err != nil { + return + } + reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) + return +} + +// InstanceGroupManagerActionPrototypeScheduledActionPrototypeByRunAtByManager : InstanceGroupManagerActionPrototypeScheduledActionPrototypeByRunAtByManager struct +// This model "extends" InstanceGroupManagerActionPrototypeScheduledActionPrototypeByRunAt +type InstanceGroupManagerActionPrototypeScheduledActionPrototypeByRunAtByManager struct { + // The user-defined name for this instance group manager action. Names must be unique within the instance group + // manager. + Name *string `json:"name,omitempty"` + + // The date and time the scheduled action will run. + RunAt *strfmt.DateTime `json:"run_at,omitempty"` + + Manager InstanceGroupManagerScheduledActionManagerPrototypeIntf `json:"manager" validate:"required"` +} + +// NewInstanceGroupManagerActionPrototypeScheduledActionPrototypeByRunAtByManager : Instantiate InstanceGroupManagerActionPrototypeScheduledActionPrototypeByRunAtByManager (Generic Model Constructor) +func (*VpcV1) NewInstanceGroupManagerActionPrototypeScheduledActionPrototypeByRunAtByManager(manager InstanceGroupManagerScheduledActionManagerPrototypeIntf) (model *InstanceGroupManagerActionPrototypeScheduledActionPrototypeByRunAtByManager, err error) { + model = &InstanceGroupManagerActionPrototypeScheduledActionPrototypeByRunAtByManager{ + Manager: manager, + } + err = core.ValidateStruct(model, "required parameters") + return +} + +func (*InstanceGroupManagerActionPrototypeScheduledActionPrototypeByRunAtByManager) isaInstanceGroupManagerActionPrototypeScheduledActionPrototypeByRunAt() bool { + return true +} + +func (*InstanceGroupManagerActionPrototypeScheduledActionPrototypeByRunAtByManager) isaInstanceGroupManagerActionPrototypeScheduledActionPrototype() bool { + return true +} + +func (*InstanceGroupManagerActionPrototypeScheduledActionPrototypeByRunAtByManager) isaInstanceGroupManagerActionPrototype() bool { + return true +} + +// UnmarshalInstanceGroupManagerActionPrototypeScheduledActionPrototypeByRunAtByManager unmarshals an instance of InstanceGroupManagerActionPrototypeScheduledActionPrototypeByRunAtByManager from the specified map of raw messages. +func UnmarshalInstanceGroupManagerActionPrototypeScheduledActionPrototypeByRunAtByManager(m map[string]json.RawMessage, result interface{}) (err error) { + obj := new(InstanceGroupManagerActionPrototypeScheduledActionPrototypeByRunAtByManager) + err = core.UnmarshalPrimitive(m, "name", &obj.Name) + if err != nil { + return + } + err = core.UnmarshalPrimitive(m, "run_at", &obj.RunAt) + if err != nil { + return + } + err = core.UnmarshalModel(m, "manager", &obj.Manager, UnmarshalInstanceGroupManagerScheduledActionManagerPrototype) + if err != nil { + return + } reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj)) return } diff --git a/vendor/github.com/ScaleFT/sshkeys/.gitignore b/vendor/github.com/ScaleFT/sshkeys/.gitignore new file mode 100644 index 00000000000..61ead86667c --- /dev/null +++ b/vendor/github.com/ScaleFT/sshkeys/.gitignore @@ -0,0 +1 @@ +/vendor diff --git a/vendor/github.com/ScaleFT/sshkeys/.travis.yml b/vendor/github.com/ScaleFT/sshkeys/.travis.yml new file mode 100644 index 00000000000..f6d94274ccb --- /dev/null +++ b/vendor/github.com/ScaleFT/sshkeys/.travis.yml @@ -0,0 +1,14 @@ +language: go + +sudo: false + +go_import_path: github.com/ScaleFT/sshkeys + +go: + - "1.13.x" + +env: + - GO111MODULE=on + +script: + - go test -v ./... \ No newline at end of file diff --git a/vendor/github.com/ScaleFT/sshkeys/CODE_OF_CONDUCT.md b/vendor/github.com/ScaleFT/sshkeys/CODE_OF_CONDUCT.md new file mode 100644 index 00000000000..dae63bbccb0 --- /dev/null +++ b/vendor/github.com/ScaleFT/sshkeys/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at [opensource@scaleft.com](mailto:opensource@scaleft.com). All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/ScaleFT/sshkeys/CONTRIBUTING.md b/vendor/github.com/ScaleFT/sshkeys/CONTRIBUTING.md new file mode 100644 index 00000000000..35b7a648b78 --- /dev/null +++ b/vendor/github.com/ScaleFT/sshkeys/CONTRIBUTING.md @@ -0,0 +1,42 @@ +# How to Contribute + +ScaleFT's projects are [Apache 2.0 licensed](LICENSE) and accept contributions +via GitHub pull requests. This document outlines some of the conventions on +development workflow, contact points, community conduct and other resources +to make it easier to get your contribution accepted. + +# Code of Conduct + +This project adheres to the Contributor Covenant [code of conduct](CODE_OF_CONDUCT.md). +By participating, you are expected to uphold this code. + +Please report unacceptable behavior to [opensource@scaleft.com](mailto:opensource@scaleft.com). + +# Reporting Security Issues + +ScaleFT takes security seriously. If you discover a security issue, +please bring it to our attention right away! + +Please DO NOT file a public issue or pull request, +[instead send your report privately to the ScaleFT Security Team](https://www.scaleft.com/company/security/), +reachable at [security@scaleft.com](mailto:security@scaleft.com). + +Security reports are greatly appreciated and we will publicly thank you for them. + +# Getting Started + +- Fork the repository on GitHub +- Read the [README](README.md) for build and test instructions +- Play with the project, submit bugs, submit patches! + +# Contribution Flow + +This is a rough outline of what a contributor's workflow looks like: + +- Create a topic branch from where you want to base your work (usually master). +- Make commits of logical units, rebasing later is ok too! +- Push your changes to a topic branch in your fork of the repository. +- Make sure the tests pass, and add any new tests as appropriate. +- Submit a pull request to the original repository. + +Thanks for your contributions! diff --git a/vendor/github.com/ScaleFT/sshkeys/LICENSE b/vendor/github.com/ScaleFT/sshkeys/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/ScaleFT/sshkeys/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/ScaleFT/sshkeys/NOTICE b/vendor/github.com/ScaleFT/sshkeys/NOTICE new file mode 100644 index 00000000000..21302db9552 --- /dev/null +++ b/vendor/github.com/ScaleFT/sshkeys/NOTICE @@ -0,0 +1,10 @@ +sshkeys +Copyright 2017 ScaleFT, Inc + +This product includes software developed at ScaleFT, Inc. +(https://www.scaleft.com/). + +Portions of this software are derived from +https://github.com/golang/crypto/blob/master/ssh/keys.go + +Copyright (c) 2009 The Go Authors. All rights reserved. diff --git a/vendor/github.com/ScaleFT/sshkeys/README.md b/vendor/github.com/ScaleFT/sshkeys/README.md new file mode 100644 index 00000000000..89d8c21db65 --- /dev/null +++ b/vendor/github.com/ScaleFT/sshkeys/README.md @@ -0,0 +1,14 @@ +# sshkeys + +[![GoDoc](https://godoc.org/github.com/ScaleFT/sshkeys?status.svg)](https://godoc.org/github.com/ScaleFT/sshkeys) +[![Build Status](https://travis-ci.org/ScaleFT/sshkeys.svg?branch=master)](https://travis-ci.org/ScaleFT/sshkeys) + +`sshkeys` provides utilities for parsing and marshalling cryptographic keys used for SSH, in both cleartext and encrypted formats. + +[ssh.ParseRawPrivateKey](https://godoc.org/golang.org/x/crypto/ssh#ParseRawPrivateKey) only supports parsing a subset of the formats `sshkeys` supports, does not support parsing encrypted private keys, and does not support marshalling. + +## Supported Formats + +* OpenSSH's [PROTOCOL.key](https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key) for RSA and ED25519 keys. +* OpenSSH version >= 7.6 using aes256-ctr encryption +* "Classic" PEM containing RSA (PKCS#1), DSA (OpenSSL), and ECDSA private keys. diff --git a/vendor/github.com/ScaleFT/sshkeys/go.mod b/vendor/github.com/ScaleFT/sshkeys/go.mod new file mode 100644 index 00000000000..3e1a6233577 --- /dev/null +++ b/vendor/github.com/ScaleFT/sshkeys/go.mod @@ -0,0 +1,10 @@ +module github.com/ScaleFT/sshkeys + +go 1.13 + +require ( + github.com/dchest/bcrypt_pbkdf v0.0.0-20150205184540-83f37f9c154a + github.com/stretchr/testify v1.5.1 + golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59 + golang.org/x/sys v0.0.0-20200219091948-cb0a6d8edb6c // indirect +) diff --git a/vendor/github.com/ScaleFT/sshkeys/go.sum b/vendor/github.com/ScaleFT/sshkeys/go.sum new file mode 100644 index 00000000000..a96f1e98a7a --- /dev/null +++ b/vendor/github.com/ScaleFT/sshkeys/go.sum @@ -0,0 +1,23 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dchest/bcrypt_pbkdf v0.0.0-20150205184540-83f37f9c154a h1:saTgr5tMLFnmy/yg3qDTft4rE5DY2uJ/cCxCe3q0XTU= +github.com/dchest/bcrypt_pbkdf v0.0.0-20150205184540-83f37f9c154a/go.mod h1:Bw9BbhOJVNR+t0jCqx2GC6zv0TGBsShs56Y3gfSCvl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59 h1:3zb4D3T4G8jdExgVU/95+vQXfpEPiMdCaZgmGVxjNHM= +golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200219091948-cb0a6d8edb6c h1:jceGD5YNJGgGMkJz79agzOln1K9TaZUjv5ird16qniQ= +golang.org/x/sys v0.0.0-20200219091948-cb0a6d8edb6c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/ScaleFT/sshkeys/marshal.go b/vendor/github.com/ScaleFT/sshkeys/marshal.go new file mode 100644 index 00000000000..9737e182c44 --- /dev/null +++ b/vendor/github.com/ScaleFT/sshkeys/marshal.go @@ -0,0 +1,291 @@ +package sshkeys + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/dsa" + "crypto/ecdsa" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/asn1" + "encoding/pem" + "fmt" + "math/big" + mrand "math/rand" + + "github.com/dchest/bcrypt_pbkdf" + "golang.org/x/crypto/ed25519" + "golang.org/x/crypto/ssh" +) + +const keySizeAES256 = 32 + +// Format of private key to use when Marshaling. +type Format int + +const ( + // FormatOpenSSHv1 encodes a private key using OpenSSH's PROTOCOL.key format: https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key + FormatOpenSSHv1 Format = iota + // FormatClassicPEM encodes private keys in PEM, with a key-specific encoding, as used by OpenSSH. + FormatClassicPEM +) + +// MarshalOptions provides the Marshal function format and encryption options. +type MarshalOptions struct { + // Passphrase to encrypt private key with, if nil, the key will not be encrypted. + Passphrase []byte + // Format to encode the private key in. + Format Format +} + +// Marshal converts a private key into an optionally encrypted format. +func Marshal(pk interface{}, opts *MarshalOptions) ([]byte, error) { + switch opts.Format { + case FormatOpenSSHv1: + return marshalOpenssh(pk, opts) + case FormatClassicPEM: + return marshalPem(pk, opts) + default: + return nil, fmt.Errorf("sshkeys: invalid format %d", opts.Format) + } +} + +func marshalPem(pk interface{}, opts *MarshalOptions) ([]byte, error) { + var err error + var plain []byte + var pemType string + + switch key := pk.(type) { + case *rsa.PrivateKey: + pemType = "RSA PRIVATE KEY" + plain = x509.MarshalPKCS1PrivateKey(key) + case *ecdsa.PrivateKey: + pemType = "EC PRIVATE KEY" + plain, err = x509.MarshalECPrivateKey(key) + if err != nil { + return nil, err + } + case *dsa.PrivateKey: + pemType = "DSA PRIVATE KEY" + plain, err = marshalDSAPrivateKey(key) + if err != nil { + return nil, err + } + case *ed25519.PrivateKey: + return nil, fmt.Errorf("sshkeys: ed25519 keys must be marshaled with FormatOpenSSHv1") + default: + return nil, fmt.Errorf("sshkeys: unsupported key type %T", pk) + } + + if len(opts.Passphrase) > 0 { + block, err := x509.EncryptPEMBlock(rand.Reader, pemType, plain, opts.Passphrase, x509.PEMCipherAES128) + if err != nil { + return nil, err + } + return pem.EncodeToMemory(block), nil + } + + return pem.EncodeToMemory(&pem.Block{ + Type: pemType, + Bytes: plain, + }), nil +} + +type dsaOpenssl struct { + Version int + P *big.Int + Q *big.Int + G *big.Int + Pub *big.Int + Priv *big.Int +} + +// https://github.com/golang/crypto/blob/master/ssh/keys.go#L793-L804 +func marshalDSAPrivateKey(pk *dsa.PrivateKey) ([]byte, error) { + k := dsaOpenssl{ + Version: 0, + P: pk.P, + Q: pk.Q, + G: pk.G, + Pub: pk.Y, + Priv: pk.X, + } + + return asn1.Marshal(k) +} + +const opensshv1Magic = "openssh-key-v1" + +type opensshHeader struct { + CipherName string + KdfName string + KdfOpts string + NumKeys uint32 + PubKey string + PrivKeyBlock string +} + +type opensshKey struct { + Check1 uint32 + Check2 uint32 + Keytype string + Rest []byte `ssh:"rest"` +} + +type opensshRsa struct { + N *big.Int + E *big.Int + D *big.Int + Iqmp *big.Int + P *big.Int + Q *big.Int + Comment string + Pad []byte `ssh:"rest"` +} + +type opensshED25519 struct { + Pub []byte + Priv []byte + Comment string + Pad []byte `ssh:"rest"` +} + +func padBytes(data []byte, blocksize int) []byte { + if blocksize != 0 { + var i byte + for i = byte(1); len(data)%blocksize != 0; i++ { + data = append(data, i&0xFF) + } + } + return data +} + +func marshalOpenssh(pk interface{}, opts *MarshalOptions) ([]byte, error) { + var blocksize int + var keylen int + + out := opensshHeader{ + CipherName: "none", + KdfName: "none", + KdfOpts: "", + NumKeys: 1, + PubKey: "", + } + + if len(opts.Passphrase) > 0 { + out.CipherName = "aes256-cbc" + out.KdfName = "bcrypt" + keylen = keySizeAES256 + blocksize = aes.BlockSize + } + + check := mrand.Uint32() + pk1 := opensshKey{ + Check1: check, + Check2: check, + } + + switch key := pk.(type) { + case *rsa.PrivateKey: + k := &opensshRsa{ + N: key.N, + E: big.NewInt(int64(key.E)), + D: key.D, + Iqmp: key.Precomputed.Qinv, + P: key.Primes[0], + Q: key.Primes[1], + Comment: "", + } + + data := ssh.Marshal(k) + pk1.Keytype = ssh.KeyAlgoRSA + pk1.Rest = data + publicKey, err := ssh.NewPublicKey(&key.PublicKey) + if err != nil { + return nil, err + } + out.PubKey = string(publicKey.Marshal()) + + case ed25519.PrivateKey: + k := opensshED25519{ + Pub: key.Public().(ed25519.PublicKey), + Priv: key, + } + data := ssh.Marshal(k) + pk1.Keytype = ssh.KeyAlgoED25519 + pk1.Rest = data + + publicKey, err := ssh.NewPublicKey(key.Public()) + if err != nil { + return nil, err + } + out.PubKey = string(publicKey.Marshal()) + case *ed25519.PrivateKey: + k := opensshED25519{ + Pub: key.Public().(ed25519.PublicKey), + Priv: *key, + } + data := ssh.Marshal(k) + pk1.Keytype = ssh.KeyAlgoED25519 + pk1.Rest = data + + publicKey, err := ssh.NewPublicKey(key.Public()) + if err != nil { + return nil, err + } + out.PubKey = string(publicKey.Marshal()) + default: + return nil, fmt.Errorf("sshkeys: unsupported key type %T", pk) + } + + if len(opts.Passphrase) > 0 { + rounds := 16 + ivlen := blocksize + salt := make([]byte, blocksize) + _, err := rand.Read(salt) + if err != nil { + return nil, err + } + + kdfdata, err := bcrypt_pbkdf.Key(opts.Passphrase, salt, rounds, keylen+ivlen) + if err != nil { + return nil, err + } + iv := kdfdata[keylen : ivlen+keylen] + aeskey := kdfdata[0:keylen] + + block, err := aes.NewCipher(aeskey) + if err != nil { + return nil, err + } + + pkblock := padBytes(ssh.Marshal(pk1), blocksize) + + cbc := cipher.NewCBCEncrypter(block, iv) + cbc.CryptBlocks(pkblock, pkblock) + + out.PrivKeyBlock = string(pkblock) + + var opts struct { + Salt []byte + Rounds uint32 + } + + opts.Salt = salt + opts.Rounds = uint32(rounds) + + out.KdfOpts = string(ssh.Marshal(&opts)) + } else { + out.PrivKeyBlock = string(ssh.Marshal(pk1)) + } + + outBytes := []byte(opensshv1Magic) + outBytes = append(outBytes, 0) + outBytes = append(outBytes, ssh.Marshal(out)...) + block := &pem.Block{ + Type: "OPENSSH PRIVATE KEY", + Bytes: outBytes, + } + return pem.EncodeToMemory(block), nil +} diff --git a/vendor/github.com/ScaleFT/sshkeys/parse.go b/vendor/github.com/ScaleFT/sshkeys/parse.go new file mode 100644 index 00000000000..3c87c5e8a61 --- /dev/null +++ b/vendor/github.com/ScaleFT/sshkeys/parse.go @@ -0,0 +1,40 @@ +// Portions of this file are based on https://github.com/golang/crypto/blob/master/ssh/keys.go +// +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sshkeys + +import ( + "crypto/x509" + + "golang.org/x/crypto/ssh" +) + +// ErrIncorrectPassword is returned when the supplied passphrase was not correct for an encrypted private key. +var ErrIncorrectPassword = x509.IncorrectPasswordError + +// ParseEncryptedPrivateKey returns a Signer from an encrypted private key. It supports +// the same keys as ParseEncryptedRawPrivateKey. +func ParseEncryptedPrivateKey(data []byte, passphrase []byte) (ssh.Signer, error) { + key, err := ParseEncryptedRawPrivateKey(data, passphrase) + if err != nil { + return nil, err + } + + return ssh.NewSignerFromKey(key) +} + +// ParseEncryptedRawPrivateKey returns a private key from an encrypted private key. It +// supports RSA (PKCS#1 or OpenSSH), DSA (OpenSSL), and ECDSA private keys. +// +// ErrIncorrectPassword will be returned if the supplied passphrase is wrong, +// but some formats like RSA in PKCS#1 detecting a wrong passphrase is difficult, +// and other parse errors may be returned. +func ParseEncryptedRawPrivateKey(data []byte, passphrase []byte) (interface{}, error) { + if passphrase == nil { + return ssh.ParseRawPrivateKey(data) + } + return ssh.ParseRawPrivateKeyWithPassphrase(data, passphrase) +} diff --git a/vendor/github.com/Shopify/sarama/.gitignore b/vendor/github.com/Shopify/sarama/.gitignore new file mode 100644 index 00000000000..2c9adc20b31 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/.gitignore @@ -0,0 +1,29 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so +*.test + +# Folders +_obj +_test +.vagrant + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe + +coverage.txt +profile.out + +simplest-uncommitted-msg-0.1-jar-with-dependencies.jar diff --git a/vendor/github.com/Shopify/sarama/.golangci.yml b/vendor/github.com/Shopify/sarama/.golangci.yml new file mode 100644 index 00000000000..ce2b5230d61 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/.golangci.yml @@ -0,0 +1,77 @@ +run: + timeout: 5m + deadline: 10m + +linters-settings: + govet: + check-shadowing: false + golint: + min-confidence: 0 + gocyclo: + min-complexity: 99 + maligned: + suggest-new: true + dupl: + threshold: 100 + goconst: + min-len: 2 + min-occurrences: 3 + misspell: + locale: US + goimports: + local-prefixes: github.com/Shopify/sarama + gocritic: + enabled-tags: + - diagnostic + - experimental + - opinionated + - performance + - style + disabled-checks: + - wrapperFunc + - ifElseChain + funlen: + lines: 300 + statements: 300 + +linters: + disable-all: true + enable: + - bodyclose + - deadcode + - depguard + - dogsled + # - dupl + - errcheck + - funlen + # - gocritic + - gocyclo + - gofmt + - goimports + # - golint + - gosec + # - gosimple + - govet + # - ineffassign + - interfacer + # - misspell + # - nakedret + # - scopelint + # - staticcheck + - structcheck + # - stylecheck + - typecheck + - unconvert + - unused + - varcheck + - whitespace + # - goconst + - gochecknoinits + +issues: + exclude: + - consider giving a name to these results + - include an explanation for nolint directive + - Potential Integer overflow made by strconv.Atoi result conversion to int16/32 + - Use of weak random number generator + - TLS MinVersion too low diff --git a/vendor/github.com/Shopify/sarama/CHANGELOG.md b/vendor/github.com/Shopify/sarama/CHANGELOG.md new file mode 100644 index 00000000000..db928328f96 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/CHANGELOG.md @@ -0,0 +1,1005 @@ +# Changelog + +#### Unreleased + +#### Version 1.27.2 (2020-10-21) + +# Improvements + +#1750 - @krantideep95 Adds missing mock responses for mocking consumer group + +# Fixes + +#1817 - reverts #1785 - Add private method to Client interface to prevent implementation + +#### Version 1.27.1 (2020-10-07) + +# Improvements + +#1775 - @d1egoaz - Adds a Producer Interceptor example +#1781 - @justin-chen - Refresh brokers given list of seed brokers +#1784 - @justin-chen - Add randomize seed broker method +#1790 - @d1egoaz - remove example binary +#1798 - @bai - Test against Go 1.15 +#1785 - @justin-chen - Add private method to Client interface to prevent implementation +#1802 - @uvw - Support Go 1.13 error unwrapping + +# Fixes + +#1791 - @stanislavkozlovski - bump default version to 1.0.0 + +#### Version 1.27.0 (2020-08-11) + +# Improvements + +#1466 - @rubenvp8510 - Expose kerberos fast negotiation configuration +#1695 - @KJTsanaktsidis - Use docker-compose to run the functional tests +#1699 - @wclaeys - Consumer group support for manually comitting offsets +#1714 - @bai - Bump Go to version 1.14.3, golangci-lint to 1.27.0 +#1726 - @d1egoaz - Include zstd on the functional tests +#1730 - @d1egoaz - KIP-42 Add producer and consumer interceptors +#1738 - @varun06 - fixed variable names that are named same as some std lib package names +#1741 - @varun06 - updated zstd dependency to latest v1.10.10 +#1743 - @varun06 - Fixed declaration dependencies and other lint issues in code base +#1763 - @alrs - remove deprecated tls options from test +#1769 - @bai - Add support for Kafka 2.6.0 + +# Fixes + +#1697 - @kvch - Use gofork for encoding/asn1 to fix ASN errors during Kerberos authentication +#1744 - @alrs - Fix isBalanced Function Signature + +#### Version 1.26.4 (2020-05-19) + +# Fixes + +- #1701 - @d1egoaz - Set server name only for the current broker +- #1694 - @dnwe - testfix: set KAFKA_HEAP_OPTS for zk and kafka + +#### Version 1.26.3 (2020-05-07) + +# Fixes + +- #1692 - @d1egoaz - Set tls ServerName to fix issue: either ServerName or InsecureSkipVerify must be specified in the tls.Config + +#### Version 1.26.2 (2020-05-06) + +# ⚠️ Known Issues + +This release has been marked as not ready for production and may be unstable, please use v1.26.4. + +# Improvements + +- #1560 - @iyacontrol - add sync pool for gzip 1-9 +- #1605 - @dnwe - feat: protocol support for V11 fetch w/ rackID +- #1617 - @sladkoff / @dwi-di / @random-dwi - Add support for alter/list partition reassignements APIs +- #1632 - @bai - Add support for Go 1.14 +- #1640 - @random-dwi - Feature/fix list partition reassignments +- #1646 - @mimaison - Add DescribeLogDirs to admin client +- #1667 - @bai - Add support for kafka 2.5.0 + +# Fixes + +- #1594 - @sladkoff - Sets ConfigEntry.Default flag in addition to the ConfigEntry.Source for Kafka versions > V1_1_0_0 +- #1601 - @alrs - fix: remove use of testing.T.FailNow() inside goroutine +- #1602 - @d1egoaz - adds a note about consumer groups Consume method +- #1607 - @darklore - Fix memory leak when Broker.Open and Broker.Close called repeatedly +- #1613 - @wblakecaldwell - Updated "retrying" log message when BackoffFunc implemented +- #1614 - @alrs - produce_response.go: Remove Unused Functions +- #1619 - @alrs - tools/kafka-producer-performance: prune unused flag variables +- #1639 - @agriffaut - Handle errors with no message but error code +- #1643 - @kzinglzy - fix `config.net.keepalive` +- #1644 - @KJTsanaktsidis - Fix brokers continually allocating new Session IDs +- #1645 - @Stephan14 - Remove broker(s) which no longer exist in metadata +- #1650 - @lavoiesl - Return the response error in heartbeatLoop +- #1661 - @KJTsanaktsidis - Fix "broker received out of order sequence" when brokers die +- #1666 - @KevinJCross - Bugfix: Allow TLS connections to work over socks proxy. + +#### Version 1.26.1 (2020-02-04) + +Improvements: +- Add requests-in-flight metric ([1539](https://github.com/Shopify/sarama/pull/1539)) +- Fix misleading example for cluster admin ([1595](https://github.com/Shopify/sarama/pull/1595)) +- Replace Travis with GitHub Actions, linters housekeeping ([1573](https://github.com/Shopify/sarama/pull/1573)) +- Allow BalanceStrategy to provide custom assignment data ([1592](https://github.com/Shopify/sarama/pull/1592)) + +Bug Fixes: +- Adds back Consumer.Offsets.CommitInterval to fix API ([1590](https://github.com/Shopify/sarama/pull/1590)) +- Fix error message s/CommitInterval/AutoCommit.Interval ([1589](https://github.com/Shopify/sarama/pull/1589)) + +#### Version 1.26.0 (2020-01-24) + +New Features: +- Enable zstd compression + ([1574](https://github.com/Shopify/sarama/pull/1574), + [1582](https://github.com/Shopify/sarama/pull/1582)) +- Support headers in tools kafka-console-producer + ([1549](https://github.com/Shopify/sarama/pull/1549)) + +Improvements: +- Add SASL AuthIdentity to SASL frames (authzid) + ([1585](https://github.com/Shopify/sarama/pull/1585)). + +Bug Fixes: +- Sending messages with ZStd compression enabled fails in multiple ways + ([1252](https://github.com/Shopify/sarama/issues/1252)). +- Use the broker for any admin on BrokerConfig + ([1571](https://github.com/Shopify/sarama/pull/1571)). +- Set DescribeConfigRequest Version field + ([1576](https://github.com/Shopify/sarama/pull/1576)). +- ConsumerGroup flooding logs with client/metadata update req + ([1578](https://github.com/Shopify/sarama/pull/1578)). +- MetadataRequest version in DescribeCluster + ([1580](https://github.com/Shopify/sarama/pull/1580)). +- Fix deadlock in consumer group handleError + ([1581](https://github.com/Shopify/sarama/pull/1581)) +- Fill in the Fetch{Request,Response} protocol + ([1582](https://github.com/Shopify/sarama/pull/1582)). +- Retry topic request on ControllerNotAvailable + ([1586](https://github.com/Shopify/sarama/pull/1586)). + +#### Version 1.25.0 (2020-01-13) + +New Features: +- Support TLS protocol in kafka-producer-performance + ([1538](https://github.com/Shopify/sarama/pull/1538)). +- Add support for kafka 2.4.0 + ([1552](https://github.com/Shopify/sarama/pull/1552)). + +Improvements: +- Allow the Consumer to disable auto-commit offsets + ([1164](https://github.com/Shopify/sarama/pull/1164)). +- Produce records with consistent timestamps + ([1455](https://github.com/Shopify/sarama/pull/1455)). + +Bug Fixes: +- Fix incorrect SetTopicMetadata name mentions + ([1534](https://github.com/Shopify/sarama/pull/1534)). +- Fix client.tryRefreshMetadata Println + ([1535](https://github.com/Shopify/sarama/pull/1535)). +- Fix panic on calling updateMetadata on closed client + ([1531](https://github.com/Shopify/sarama/pull/1531)). +- Fix possible faulty metrics in TestFuncProducing + ([1545](https://github.com/Shopify/sarama/pull/1545)). + +#### Version 1.24.1 (2019-10-31) + +New Features: +- Add DescribeLogDirs Request/Response pair + ([1520](https://github.com/Shopify/sarama/pull/1520)). + +Bug Fixes: +- Fix ClusterAdmin returning invalid controller ID on DescribeCluster + ([1518](https://github.com/Shopify/sarama/pull/1518)). +- Fix issue with consumergroup not rebalancing when new partition is added + ([1525](https://github.com/Shopify/sarama/pull/1525)). +- Ensure consistent use of read/write deadlines + ([1529](https://github.com/Shopify/sarama/pull/1529)). + +#### Version 1.24.0 (2019-10-09) + +New Features: +- Add sticky partition assignor + ([1416](https://github.com/Shopify/sarama/pull/1416)). +- Switch from cgo zstd package to pure Go implementation + ([1477](https://github.com/Shopify/sarama/pull/1477)). + +Improvements: +- Allow creating ClusterAdmin from client + ([1415](https://github.com/Shopify/sarama/pull/1415)). +- Set KafkaVersion in ListAcls method + ([1452](https://github.com/Shopify/sarama/pull/1452)). +- Set request version in CreateACL ClusterAdmin method + ([1458](https://github.com/Shopify/sarama/pull/1458)). +- Set request version in DeleteACL ClusterAdmin method + ([1461](https://github.com/Shopify/sarama/pull/1461)). +- Handle missed error codes on TopicMetaDataRequest and GroupCoordinatorRequest + ([1464](https://github.com/Shopify/sarama/pull/1464)). +- Remove direct usage of gofork + ([1465](https://github.com/Shopify/sarama/pull/1465)). +- Add support for Go 1.13 + ([1478](https://github.com/Shopify/sarama/pull/1478)). +- Improve behavior of NewMockListAclsResponse + ([1481](https://github.com/Shopify/sarama/pull/1481)). + +Bug Fixes: +- Fix race condition in consumergroup example + ([1434](https://github.com/Shopify/sarama/pull/1434)). +- Fix brokerProducer goroutine leak + ([1442](https://github.com/Shopify/sarama/pull/1442)). +- Use released version of lz4 library + ([1469](https://github.com/Shopify/sarama/pull/1469)). +- Set correct version in MockDeleteTopicsResponse + ([1484](https://github.com/Shopify/sarama/pull/1484)). +- Fix CLI help message typo + ([1494](https://github.com/Shopify/sarama/pull/1494)). + +Known Issues: +- Please **don't** use Zstd, as it doesn't work right now. + See https://github.com/Shopify/sarama/issues/1252 + +#### Version 1.23.1 (2019-07-22) + +Bug Fixes: +- Fix fetch delete bug record + ([1425](https://github.com/Shopify/sarama/pull/1425)). +- Handle SASL/OAUTHBEARER token rejection + ([1428](https://github.com/Shopify/sarama/pull/1428)). + +#### Version 1.23.0 (2019-07-02) + +New Features: +- Add support for Kafka 2.3.0 + ([1418](https://github.com/Shopify/sarama/pull/1418)). +- Add support for ListConsumerGroupOffsets v2 + ([1374](https://github.com/Shopify/sarama/pull/1374)). +- Add support for DeleteConsumerGroup + ([1417](https://github.com/Shopify/sarama/pull/1417)). +- Add support for SASLVersion configuration + ([1410](https://github.com/Shopify/sarama/pull/1410)). +- Add kerberos support + ([1366](https://github.com/Shopify/sarama/pull/1366)). + +Improvements: +- Improve sasl_scram_client example + ([1406](https://github.com/Shopify/sarama/pull/1406)). +- Fix shutdown and race-condition in consumer-group example + ([1404](https://github.com/Shopify/sarama/pull/1404)). +- Add support for error codes 77—81 + ([1397](https://github.com/Shopify/sarama/pull/1397)). +- Pool internal objects allocated per message + ([1385](https://github.com/Shopify/sarama/pull/1385)). +- Reduce packet decoder allocations + ([1373](https://github.com/Shopify/sarama/pull/1373)). +- Support timeout when fetching metadata + ([1359](https://github.com/Shopify/sarama/pull/1359)). + +Bug Fixes: +- Fix fetch size integer overflow + ([1376](https://github.com/Shopify/sarama/pull/1376)). +- Handle and log throttled FetchResponses + ([1383](https://github.com/Shopify/sarama/pull/1383)). +- Refactor misspelled word Resouce to Resource + ([1368](https://github.com/Shopify/sarama/pull/1368)). + +#### Version 1.22.1 (2019-04-29) + +Improvements: +- Use zstd 1.3.8 + ([1350](https://github.com/Shopify/sarama/pull/1350)). +- Add support for SaslHandshakeRequest v1 + ([1354](https://github.com/Shopify/sarama/pull/1354)). + +Bug Fixes: +- Fix V5 MetadataRequest nullable topics array + ([1353](https://github.com/Shopify/sarama/pull/1353)). +- Use a different SCRAM client for each broker connection + ([1349](https://github.com/Shopify/sarama/pull/1349)). +- Fix AllowAutoTopicCreation for MetadataRequest greater than v3 + ([1344](https://github.com/Shopify/sarama/pull/1344)). + +#### Version 1.22.0 (2019-04-09) + +New Features: +- Add Offline Replicas Operation to Client + ([1318](https://github.com/Shopify/sarama/pull/1318)). +- Allow using proxy when connecting to broker + ([1326](https://github.com/Shopify/sarama/pull/1326)). +- Implement ReadCommitted + ([1307](https://github.com/Shopify/sarama/pull/1307)). +- Add support for Kafka 2.2.0 + ([1331](https://github.com/Shopify/sarama/pull/1331)). +- Add SASL SCRAM-SHA-512 and SCRAM-SHA-256 mechanismes + ([1331](https://github.com/Shopify/sarama/pull/1295)). + +Improvements: +- Unregister all broker metrics on broker stop + ([1232](https://github.com/Shopify/sarama/pull/1232)). +- Add SCRAM authentication example + ([1303](https://github.com/Shopify/sarama/pull/1303)). +- Add consumergroup examples + ([1304](https://github.com/Shopify/sarama/pull/1304)). +- Expose consumer batch size metric + ([1296](https://github.com/Shopify/sarama/pull/1296)). +- Add TLS options to console producer and consumer + ([1300](https://github.com/Shopify/sarama/pull/1300)). +- Reduce client close bookkeeping + ([1297](https://github.com/Shopify/sarama/pull/1297)). +- Satisfy error interface in create responses + ([1154](https://github.com/Shopify/sarama/pull/1154)). +- Please lint gods + ([1346](https://github.com/Shopify/sarama/pull/1346)). + +Bug Fixes: +- Fix multi consumer group instance crash + ([1338](https://github.com/Shopify/sarama/pull/1338)). +- Update lz4 to latest version + ([1347](https://github.com/Shopify/sarama/pull/1347)). +- Retry ErrNotCoordinatorForConsumer in new consumergroup session + ([1231](https://github.com/Shopify/sarama/pull/1231)). +- Fix cleanup error handler + ([1332](https://github.com/Shopify/sarama/pull/1332)). +- Fix rate condition in PartitionConsumer + ([1156](https://github.com/Shopify/sarama/pull/1156)). + +#### Version 1.21.0 (2019-02-24) + +New Features: +- Add CreateAclRequest, DescribeAclRequest, DeleteAclRequest + ([1236](https://github.com/Shopify/sarama/pull/1236)). +- Add DescribeTopic, DescribeConsumerGroup, ListConsumerGroups, ListConsumerGroupOffsets admin requests + ([1178](https://github.com/Shopify/sarama/pull/1178)). +- Implement SASL/OAUTHBEARER + ([1240](https://github.com/Shopify/sarama/pull/1240)). + +Improvements: +- Add Go mod support + ([1282](https://github.com/Shopify/sarama/pull/1282)). +- Add error codes 73—76 + ([1239](https://github.com/Shopify/sarama/pull/1239)). +- Add retry backoff function + ([1160](https://github.com/Shopify/sarama/pull/1160)). +- Maintain metadata in the producer even when retries are disabled + ([1189](https://github.com/Shopify/sarama/pull/1189)). +- Include ReplicaAssignment in ListTopics + ([1274](https://github.com/Shopify/sarama/pull/1274)). +- Add producer performance tool + ([1222](https://github.com/Shopify/sarama/pull/1222)). +- Add support LogAppend timestamps + ([1258](https://github.com/Shopify/sarama/pull/1258)). + +Bug Fixes: +- Fix potential deadlock when a heartbeat request fails + ([1286](https://github.com/Shopify/sarama/pull/1286)). +- Fix consuming compacted topic + ([1227](https://github.com/Shopify/sarama/pull/1227)). +- Set correct Kafka version for DescribeConfigsRequest v1 + ([1277](https://github.com/Shopify/sarama/pull/1277)). +- Update kafka test version + ([1273](https://github.com/Shopify/sarama/pull/1273)). + +#### Version 1.20.1 (2019-01-10) + +New Features: +- Add optional replica id in offset request + ([1100](https://github.com/Shopify/sarama/pull/1100)). + +Improvements: +- Implement DescribeConfigs Request + Response v1 & v2 + ([1230](https://github.com/Shopify/sarama/pull/1230)). +- Reuse compression objects + ([1185](https://github.com/Shopify/sarama/pull/1185)). +- Switch from png to svg for GoDoc link in README + ([1243](https://github.com/Shopify/sarama/pull/1243)). +- Fix typo in deprecation notice for FetchResponseBlock.Records + ([1242](https://github.com/Shopify/sarama/pull/1242)). +- Fix typos in consumer metadata response file + ([1244](https://github.com/Shopify/sarama/pull/1244)). + +Bug Fixes: +- Revert to individual msg retries for non-idempotent + ([1203](https://github.com/Shopify/sarama/pull/1203)). +- Respect MaxMessageBytes limit for uncompressed messages + ([1141](https://github.com/Shopify/sarama/pull/1141)). + +#### Version 1.20.0 (2018-12-10) + +New Features: + - Add support for zstd compression + ([#1170](https://github.com/Shopify/sarama/pull/1170)). + - Add support for Idempotent Producer + ([#1152](https://github.com/Shopify/sarama/pull/1152)). + - Add support support for Kafka 2.1.0 + ([#1229](https://github.com/Shopify/sarama/pull/1229)). + - Add support support for OffsetCommit request/response pairs versions v1 to v5 + ([#1201](https://github.com/Shopify/sarama/pull/1201)). + - Add support support for OffsetFetch request/response pair up to version v5 + ([#1198](https://github.com/Shopify/sarama/pull/1198)). + +Improvements: + - Export broker's Rack setting + ([#1173](https://github.com/Shopify/sarama/pull/1173)). + - Always use latest patch version of Go on CI + ([#1202](https://github.com/Shopify/sarama/pull/1202)). + - Add error codes 61 to 72 + ([#1195](https://github.com/Shopify/sarama/pull/1195)). + +Bug Fixes: + - Fix build without cgo + ([#1182](https://github.com/Shopify/sarama/pull/1182)). + - Fix go vet suggestion in consumer group file + ([#1209](https://github.com/Shopify/sarama/pull/1209)). + - Fix typos in code and comments + ([#1228](https://github.com/Shopify/sarama/pull/1228)). + +#### Version 1.19.0 (2018-09-27) + +New Features: + - Implement a higher-level consumer group + ([#1099](https://github.com/Shopify/sarama/pull/1099)). + +Improvements: + - Add support for Go 1.11 + ([#1176](https://github.com/Shopify/sarama/pull/1176)). + +Bug Fixes: + - Fix encoding of `MetadataResponse` with version 2 and higher + ([#1174](https://github.com/Shopify/sarama/pull/1174)). + - Fix race condition in mock async producer + ([#1174](https://github.com/Shopify/sarama/pull/1174)). + +#### Version 1.18.0 (2018-09-07) + +New Features: + - Make `Partitioner.RequiresConsistency` vary per-message + ([#1112](https://github.com/Shopify/sarama/pull/1112)). + - Add customizable partitioner + ([#1118](https://github.com/Shopify/sarama/pull/1118)). + - Add `ClusterAdmin` support for `CreateTopic`, `DeleteTopic`, `CreatePartitions`, + `DeleteRecords`, `DescribeConfig`, `AlterConfig`, `CreateACL`, `ListAcls`, `DeleteACL` + ([#1055](https://github.com/Shopify/sarama/pull/1055)). + +Improvements: + - Add support for Kafka 2.0.0 + ([#1149](https://github.com/Shopify/sarama/pull/1149)). + - Allow setting `LocalAddr` when dialing an address to support multi-homed hosts + ([#1123](https://github.com/Shopify/sarama/pull/1123)). + - Simpler offset management + ([#1127](https://github.com/Shopify/sarama/pull/1127)). + +Bug Fixes: + - Fix mutation of `ProducerMessage.MetaData` when producing to Kafka + ([#1110](https://github.com/Shopify/sarama/pull/1110)). + - Fix consumer block when response did not contain all the + expected topic/partition blocks + ([#1086](https://github.com/Shopify/sarama/pull/1086)). + - Fix consumer block when response contains only constrol messages + ([#1115](https://github.com/Shopify/sarama/pull/1115)). + - Add timeout config for ClusterAdmin requests + ([#1142](https://github.com/Shopify/sarama/pull/1142)). + - Add version check when producing message with headers + ([#1117](https://github.com/Shopify/sarama/pull/1117)). + - Fix `MetadataRequest` for empty list of topics + ([#1132](https://github.com/Shopify/sarama/pull/1132)). + - Fix producer topic metadata on-demand fetch when topic error happens in metadata response + ([#1125](https://github.com/Shopify/sarama/pull/1125)). + +#### Version 1.17.0 (2018-05-30) + +New Features: + - Add support for gzip compression levels + ([#1044](https://github.com/Shopify/sarama/pull/1044)). + - Add support for Metadata request/response pairs versions v1 to v5 + ([#1047](https://github.com/Shopify/sarama/pull/1047), + [#1069](https://github.com/Shopify/sarama/pull/1069)). + - Add versioning to JoinGroup request/response pairs + ([#1098](https://github.com/Shopify/sarama/pull/1098)) + - Add support for CreatePartitions, DeleteGroups, DeleteRecords request/response pairs + ([#1065](https://github.com/Shopify/sarama/pull/1065), + [#1096](https://github.com/Shopify/sarama/pull/1096), + [#1027](https://github.com/Shopify/sarama/pull/1027)). + - Add `Controller()` method to Client interface + ([#1063](https://github.com/Shopify/sarama/pull/1063)). + +Improvements: + - ConsumerMetadataReq/Resp has been migrated to FindCoordinatorReq/Resp + ([#1010](https://github.com/Shopify/sarama/pull/1010)). + - Expose missing protocol parts: `msgSet` and `recordBatch` + ([#1049](https://github.com/Shopify/sarama/pull/1049)). + - Add support for v1 DeleteTopics Request + ([#1052](https://github.com/Shopify/sarama/pull/1052)). + - Add support for Go 1.10 + ([#1064](https://github.com/Shopify/sarama/pull/1064)). + - Claim support for Kafka 1.1.0 + ([#1073](https://github.com/Shopify/sarama/pull/1073)). + +Bug Fixes: + - Fix FindCoordinatorResponse.encode to allow nil Coordinator + ([#1050](https://github.com/Shopify/sarama/pull/1050), + [#1051](https://github.com/Shopify/sarama/pull/1051)). + - Clear all metadata when we have the latest topic info + ([#1033](https://github.com/Shopify/sarama/pull/1033)). + - Make `PartitionConsumer.Close` idempotent + ([#1092](https://github.com/Shopify/sarama/pull/1092)). + +#### Version 1.16.0 (2018-02-12) + +New Features: + - Add support for the Create/Delete Topics request/response pairs + ([#1007](https://github.com/Shopify/sarama/pull/1007), + [#1008](https://github.com/Shopify/sarama/pull/1008)). + - Add support for the Describe/Create/Delete ACL request/response pairs + ([#1009](https://github.com/Shopify/sarama/pull/1009)). + - Add support for the five transaction-related request/response pairs + ([#1016](https://github.com/Shopify/sarama/pull/1016)). + +Improvements: + - Permit setting version on mock producer responses + ([#999](https://github.com/Shopify/sarama/pull/999)). + - Add `NewMockBrokerListener` helper for testing TLS connections + ([#1019](https://github.com/Shopify/sarama/pull/1019)). + - Changed the default value for `Consumer.Fetch.Default` from 32KiB to 1MiB + which results in much higher throughput in most cases + ([#1024](https://github.com/Shopify/sarama/pull/1024)). + - Reuse the `time.Ticker` across fetch requests in the PartitionConsumer to + reduce CPU and memory usage when processing many partitions + ([#1028](https://github.com/Shopify/sarama/pull/1028)). + - Assign relative offsets to messages in the producer to save the brokers a + recompression pass + ([#1002](https://github.com/Shopify/sarama/pull/1002), + [#1015](https://github.com/Shopify/sarama/pull/1015)). + +Bug Fixes: + - Fix producing uncompressed batches with the new protocol format + ([#1032](https://github.com/Shopify/sarama/issues/1032)). + - Fix consuming compacted topics with the new protocol format + ([#1005](https://github.com/Shopify/sarama/issues/1005)). + - Fix consuming topics with a mix of protocol formats + ([#1021](https://github.com/Shopify/sarama/issues/1021)). + - Fix consuming when the broker includes multiple batches in a single response + ([#1022](https://github.com/Shopify/sarama/issues/1022)). + - Fix detection of `PartialTrailingMessage` when the partial message was + truncated before the magic value indicating its version + ([#1030](https://github.com/Shopify/sarama/pull/1030)). + - Fix expectation-checking in the mock of `SyncProducer.SendMessages` + ([#1035](https://github.com/Shopify/sarama/pull/1035)). + +#### Version 1.15.0 (2017-12-08) + +New Features: + - Claim official support for Kafka 1.0, though it did already work + ([#984](https://github.com/Shopify/sarama/pull/984)). + - Helper methods for Kafka version numbers to/from strings + ([#989](https://github.com/Shopify/sarama/pull/989)). + - Implement CreatePartitions request/response + ([#985](https://github.com/Shopify/sarama/pull/985)). + +Improvements: + - Add error codes 45-60 + ([#986](https://github.com/Shopify/sarama/issues/986)). + +Bug Fixes: + - Fix slow consuming for certain Kafka 0.11/1.0 configurations + ([#982](https://github.com/Shopify/sarama/pull/982)). + - Correctly determine when a FetchResponse contains the new message format + ([#990](https://github.com/Shopify/sarama/pull/990)). + - Fix producing with multiple headers + ([#996](https://github.com/Shopify/sarama/pull/996)). + - Fix handling of truncated record batches + ([#998](https://github.com/Shopify/sarama/pull/998)). + - Fix leaking metrics when closing brokers + ([#991](https://github.com/Shopify/sarama/pull/991)). + +#### Version 1.14.0 (2017-11-13) + +New Features: + - Add support for the new Kafka 0.11 record-batch format, including the wire + protocol and the necessary behavioural changes in the producer and consumer. + Transactions and idempotency are not yet supported, but producing and + consuming should work with all the existing bells and whistles (batching, + compression, etc) as well as the new custom headers. Thanks to Vlad Hanciuta + of Arista Networks for this work. Part of + ([#901](https://github.com/Shopify/sarama/issues/901)). + +Bug Fixes: + - Fix encoding of ProduceResponse versions in test + ([#970](https://github.com/Shopify/sarama/pull/970)). + - Return partial replicas list when we have it + ([#975](https://github.com/Shopify/sarama/pull/975)). + +#### Version 1.13.0 (2017-10-04) + +New Features: + - Support for FetchRequest version 3 + ([#905](https://github.com/Shopify/sarama/pull/905)). + - Permit setting version on mock FetchResponses + ([#939](https://github.com/Shopify/sarama/pull/939)). + - Add a configuration option to support storing only minimal metadata for + extremely large clusters + ([#937](https://github.com/Shopify/sarama/pull/937)). + - Add `PartitionOffsetManager.ResetOffset` for backtracking tracked offsets + ([#932](https://github.com/Shopify/sarama/pull/932)). + +Improvements: + - Provide the block-level timestamp when consuming compressed messages + ([#885](https://github.com/Shopify/sarama/issues/885)). + - `Client.Replicas` and `Client.InSyncReplicas` now respect the order returned + by the broker, which can be meaningful + ([#930](https://github.com/Shopify/sarama/pull/930)). + - Use a `Ticker` to reduce consumer timer overhead at the cost of higher + variance in the actual timeout + ([#933](https://github.com/Shopify/sarama/pull/933)). + +Bug Fixes: + - Gracefully handle messages with negative timestamps + ([#907](https://github.com/Shopify/sarama/pull/907)). + - Raise a proper error when encountering an unknown message version + ([#940](https://github.com/Shopify/sarama/pull/940)). + +#### Version 1.12.0 (2017-05-08) + +New Features: + - Added support for the `ApiVersions` request and response pair, and Kafka + version 0.10.2 ([#867](https://github.com/Shopify/sarama/pull/867)). Note + that you still need to specify the Kafka version in the Sarama configuration + for the time being. + - Added a `Brokers` method to the Client which returns the complete set of + active brokers ([#813](https://github.com/Shopify/sarama/pull/813)). + - Added an `InSyncReplicas` method to the Client which returns the set of all + in-sync broker IDs for the given partition, now that the Kafka versions for + which this was misleading are no longer in our supported set + ([#872](https://github.com/Shopify/sarama/pull/872)). + - Added a `NewCustomHashPartitioner` method which allows constructing a hash + partitioner with a custom hash method in case the default (FNV-1a) is not + suitable + ([#837](https://github.com/Shopify/sarama/pull/837), + [#841](https://github.com/Shopify/sarama/pull/841)). + +Improvements: + - Recognize more Kafka error codes + ([#859](https://github.com/Shopify/sarama/pull/859)). + +Bug Fixes: + - Fix an issue where decoding a malformed FetchRequest would not return the + correct error ([#818](https://github.com/Shopify/sarama/pull/818)). + - Respect ordering of group protocols in JoinGroupRequests. This fix is + transparent if you're using the `AddGroupProtocol` or + `AddGroupProtocolMetadata` helpers; otherwise you will need to switch from + the `GroupProtocols` field (now deprecated) to use `OrderedGroupProtocols` + ([#812](https://github.com/Shopify/sarama/issues/812)). + - Fix an alignment-related issue with atomics on 32-bit architectures + ([#859](https://github.com/Shopify/sarama/pull/859)). + +#### Version 1.11.0 (2016-12-20) + +_Important:_ As of Sarama 1.11 it is necessary to set the config value of +`Producer.Return.Successes` to true in order to use the SyncProducer. Previous +versions would silently override this value when instantiating a SyncProducer +which led to unexpected values and data races. + +New Features: + - Metrics! Thanks to Sébastien Launay for all his work on this feature + ([#701](https://github.com/Shopify/sarama/pull/701), + [#746](https://github.com/Shopify/sarama/pull/746), + [#766](https://github.com/Shopify/sarama/pull/766)). + - Add support for LZ4 compression + ([#786](https://github.com/Shopify/sarama/pull/786)). + - Add support for ListOffsetRequest v1 and Kafka 0.10.1 + ([#775](https://github.com/Shopify/sarama/pull/775)). + - Added a `HighWaterMarks` method to the Consumer which aggregates the + `HighWaterMarkOffset` values of its child topic/partitions + ([#769](https://github.com/Shopify/sarama/pull/769)). + +Bug Fixes: + - Fixed producing when using timestamps, compression and Kafka 0.10 + ([#759](https://github.com/Shopify/sarama/pull/759)). + - Added missing decoder methods to DescribeGroups response + ([#756](https://github.com/Shopify/sarama/pull/756)). + - Fix producer shutdown when `Return.Errors` is disabled + ([#787](https://github.com/Shopify/sarama/pull/787)). + - Don't mutate configuration in SyncProducer + ([#790](https://github.com/Shopify/sarama/pull/790)). + - Fix crash on SASL initialization failure + ([#795](https://github.com/Shopify/sarama/pull/795)). + +#### Version 1.10.1 (2016-08-30) + +Bug Fixes: + - Fix the documentation for `HashPartitioner` which was incorrect + ([#717](https://github.com/Shopify/sarama/pull/717)). + - Permit client creation even when it is limited by ACLs + ([#722](https://github.com/Shopify/sarama/pull/722)). + - Several fixes to the consumer timer optimization code, regressions introduced + in v1.10.0. Go's timers are finicky + ([#730](https://github.com/Shopify/sarama/pull/730), + [#733](https://github.com/Shopify/sarama/pull/733), + [#734](https://github.com/Shopify/sarama/pull/734)). + - Handle consuming compressed relative offsets with Kafka 0.10 + ([#735](https://github.com/Shopify/sarama/pull/735)). + +#### Version 1.10.0 (2016-08-02) + +_Important:_ As of Sarama 1.10 it is necessary to tell Sarama the version of +Kafka you are running against (via the `config.Version` value) in order to use +features that may not be compatible with old Kafka versions. If you don't +specify this value it will default to 0.8.2 (the minimum supported), and trying +to use more recent features (like the offset manager) will fail with an error. + +_Also:_ The offset-manager's behaviour has been changed to match the upstream +java consumer (see [#705](https://github.com/Shopify/sarama/pull/705) and +[#713](https://github.com/Shopify/sarama/pull/713)). If you use the +offset-manager, please ensure that you are committing one *greater* than the +last consumed message offset or else you may end up consuming duplicate +messages. + +New Features: + - Support for Kafka 0.10 + ([#672](https://github.com/Shopify/sarama/pull/672), + [#678](https://github.com/Shopify/sarama/pull/678), + [#681](https://github.com/Shopify/sarama/pull/681), and others). + - Support for configuring the target Kafka version + ([#676](https://github.com/Shopify/sarama/pull/676)). + - Batch producing support in the SyncProducer + ([#677](https://github.com/Shopify/sarama/pull/677)). + - Extend producer mock to allow setting expectations on message contents + ([#667](https://github.com/Shopify/sarama/pull/667)). + +Improvements: + - Support `nil` compressed messages for deleting in compacted topics + ([#634](https://github.com/Shopify/sarama/pull/634)). + - Pre-allocate decoding errors, greatly reducing heap usage and GC time against + misbehaving brokers ([#690](https://github.com/Shopify/sarama/pull/690)). + - Re-use consumer expiry timers, removing one allocation per consumed message + ([#707](https://github.com/Shopify/sarama/pull/707)). + +Bug Fixes: + - Actually default the client ID to "sarama" like we say we do + ([#664](https://github.com/Shopify/sarama/pull/664)). + - Fix a rare issue where `Client.Leader` could return the wrong error + ([#685](https://github.com/Shopify/sarama/pull/685)). + - Fix a possible tight loop in the consumer + ([#693](https://github.com/Shopify/sarama/pull/693)). + - Match upstream's offset-tracking behaviour + ([#705](https://github.com/Shopify/sarama/pull/705)). + - Report UnknownTopicOrPartition errors from the offset manager + ([#706](https://github.com/Shopify/sarama/pull/706)). + - Fix possible negative partition value from the HashPartitioner + ([#709](https://github.com/Shopify/sarama/pull/709)). + +#### Version 1.9.0 (2016-05-16) + +New Features: + - Add support for custom offset manager retention durations + ([#602](https://github.com/Shopify/sarama/pull/602)). + - Publish low-level mocks to enable testing of third-party producer/consumer + implementations ([#570](https://github.com/Shopify/sarama/pull/570)). + - Declare support for Golang 1.6 + ([#611](https://github.com/Shopify/sarama/pull/611)). + - Support for SASL plain-text auth + ([#648](https://github.com/Shopify/sarama/pull/648)). + +Improvements: + - Simplified broker locking scheme slightly + ([#604](https://github.com/Shopify/sarama/pull/604)). + - Documentation cleanup + ([#605](https://github.com/Shopify/sarama/pull/605), + [#621](https://github.com/Shopify/sarama/pull/621), + [#654](https://github.com/Shopify/sarama/pull/654)). + +Bug Fixes: + - Fix race condition shutting down the OffsetManager + ([#658](https://github.com/Shopify/sarama/pull/658)). + +#### Version 1.8.0 (2016-02-01) + +New Features: + - Full support for Kafka 0.9: + - All protocol messages and fields + ([#586](https://github.com/Shopify/sarama/pull/586), + [#588](https://github.com/Shopify/sarama/pull/588), + [#590](https://github.com/Shopify/sarama/pull/590)). + - Verified that TLS support works + ([#581](https://github.com/Shopify/sarama/pull/581)). + - Fixed the OffsetManager compatibility + ([#585](https://github.com/Shopify/sarama/pull/585)). + +Improvements: + - Optimize for fewer system calls when reading from the network + ([#584](https://github.com/Shopify/sarama/pull/584)). + - Automatically retry `InvalidMessage` errors to match upstream behaviour + ([#589](https://github.com/Shopify/sarama/pull/589)). + +#### Version 1.7.0 (2015-12-11) + +New Features: + - Preliminary support for Kafka 0.9 + ([#572](https://github.com/Shopify/sarama/pull/572)). This comes with several + caveats: + - Protocol-layer support is mostly in place + ([#577](https://github.com/Shopify/sarama/pull/577)), however Kafka 0.9 + renamed some messages and fields, which we did not in order to preserve API + compatibility. + - The producer and consumer work against 0.9, but the offset manager does + not ([#573](https://github.com/Shopify/sarama/pull/573)). + - TLS support may or may not work + ([#581](https://github.com/Shopify/sarama/pull/581)). + +Improvements: + - Don't wait for request timeouts on dead brokers, greatly speeding recovery + when the TCP connection is left hanging + ([#548](https://github.com/Shopify/sarama/pull/548)). + - Refactored part of the producer. The new version provides a much more elegant + solution to [#449](https://github.com/Shopify/sarama/pull/449). It is also + slightly more efficient, and much more precise in calculating batch sizes + when compression is used + ([#549](https://github.com/Shopify/sarama/pull/549), + [#550](https://github.com/Shopify/sarama/pull/550), + [#551](https://github.com/Shopify/sarama/pull/551)). + +Bug Fixes: + - Fix race condition in consumer test mock + ([#553](https://github.com/Shopify/sarama/pull/553)). + +#### Version 1.6.1 (2015-09-25) + +Bug Fixes: + - Fix panic that could occur if a user-supplied message value failed to encode + ([#449](https://github.com/Shopify/sarama/pull/449)). + +#### Version 1.6.0 (2015-09-04) + +New Features: + - Implementation of a consumer offset manager using the APIs introduced in + Kafka 0.8.2. The API is designed mainly for integration into a future + high-level consumer, not for direct use, although it is *possible* to use it + directly. + ([#461](https://github.com/Shopify/sarama/pull/461)). + +Improvements: + - CRC32 calculation is much faster on machines with SSE4.2 instructions, + removing a major hotspot from most profiles + ([#255](https://github.com/Shopify/sarama/pull/255)). + +Bug Fixes: + - Make protocol decoding more robust against some malformed packets generated + by go-fuzz ([#523](https://github.com/Shopify/sarama/pull/523), + [#525](https://github.com/Shopify/sarama/pull/525)) or found in other ways + ([#528](https://github.com/Shopify/sarama/pull/528)). + - Fix a potential race condition panic in the consumer on shutdown + ([#529](https://github.com/Shopify/sarama/pull/529)). + +#### Version 1.5.0 (2015-08-17) + +New Features: + - TLS-encrypted network connections are now supported. This feature is subject + to change when Kafka releases built-in TLS support, but for now this is + enough to work with TLS-terminating proxies + ([#154](https://github.com/Shopify/sarama/pull/154)). + +Improvements: + - The consumer will not block if a single partition is not drained by the user; + all other partitions will continue to consume normally + ([#485](https://github.com/Shopify/sarama/pull/485)). + - Formatting of error strings has been much improved + ([#495](https://github.com/Shopify/sarama/pull/495)). + - Internal refactoring of the producer for code cleanliness and to enable + future work ([#300](https://github.com/Shopify/sarama/pull/300)). + +Bug Fixes: + - Fix a potential deadlock in the consumer on shutdown + ([#475](https://github.com/Shopify/sarama/pull/475)). + +#### Version 1.4.3 (2015-07-21) + +Bug Fixes: + - Don't include the partitioner in the producer's "fetch partitions" + circuit-breaker ([#466](https://github.com/Shopify/sarama/pull/466)). + - Don't retry messages until the broker is closed when abandoning a broker in + the producer ([#468](https://github.com/Shopify/sarama/pull/468)). + - Update the import path for snappy-go, it has moved again and the API has + changed slightly ([#486](https://github.com/Shopify/sarama/pull/486)). + +#### Version 1.4.2 (2015-05-27) + +Bug Fixes: + - Update the import path for snappy-go, it has moved from google code to github + ([#456](https://github.com/Shopify/sarama/pull/456)). + +#### Version 1.4.1 (2015-05-25) + +Improvements: + - Optimizations when decoding snappy messages, thanks to John Potocny + ([#446](https://github.com/Shopify/sarama/pull/446)). + +Bug Fixes: + - Fix hypothetical race conditions on producer shutdown + ([#450](https://github.com/Shopify/sarama/pull/450), + [#451](https://github.com/Shopify/sarama/pull/451)). + +#### Version 1.4.0 (2015-05-01) + +New Features: + - The consumer now implements `Topics()` and `Partitions()` methods to enable + users to dynamically choose what topics/partitions to consume without + instantiating a full client + ([#431](https://github.com/Shopify/sarama/pull/431)). + - The partition-consumer now exposes the high water mark offset value returned + by the broker via the `HighWaterMarkOffset()` method ([#339](https://github.com/Shopify/sarama/pull/339)). + - Added a `kafka-console-consumer` tool capable of handling multiple + partitions, and deprecated the now-obsolete `kafka-console-partitionConsumer` + ([#439](https://github.com/Shopify/sarama/pull/439), + [#442](https://github.com/Shopify/sarama/pull/442)). + +Improvements: + - The producer's logging during retry scenarios is more consistent, more + useful, and slightly less verbose + ([#429](https://github.com/Shopify/sarama/pull/429)). + - The client now shuffles its initial list of seed brokers in order to prevent + thundering herd on the first broker in the list + ([#441](https://github.com/Shopify/sarama/pull/441)). + +Bug Fixes: + - The producer now correctly manages its state if retries occur when it is + shutting down, fixing several instances of confusing behaviour and at least + one potential deadlock ([#419](https://github.com/Shopify/sarama/pull/419)). + - The consumer now handles messages for different partitions asynchronously, + making it much more resilient to specific user code ordering + ([#325](https://github.com/Shopify/sarama/pull/325)). + +#### Version 1.3.0 (2015-04-16) + +New Features: + - The client now tracks consumer group coordinators using + ConsumerMetadataRequests similar to how it tracks partition leadership using + regular MetadataRequests ([#411](https://github.com/Shopify/sarama/pull/411)). + This adds two methods to the client API: + - `Coordinator(consumerGroup string) (*Broker, error)` + - `RefreshCoordinator(consumerGroup string) error` + +Improvements: + - ConsumerMetadataResponses now automatically create a Broker object out of the + ID/address/port combination for the Coordinator; accessing the fields + individually has been deprecated + ([#413](https://github.com/Shopify/sarama/pull/413)). + - Much improved handling of `OffsetOutOfRange` errors in the consumer. + Consumers will fail to start if the provided offset is out of range + ([#418](https://github.com/Shopify/sarama/pull/418)) + and they will automatically shut down if the offset falls out of range + ([#424](https://github.com/Shopify/sarama/pull/424)). + - Small performance improvement in encoding and decoding protocol messages + ([#427](https://github.com/Shopify/sarama/pull/427)). + +Bug Fixes: + - Fix a rare race condition in the client's background metadata refresher if + it happens to be activated while the client is being closed + ([#422](https://github.com/Shopify/sarama/pull/422)). + +#### Version 1.2.0 (2015-04-07) + +Improvements: + - The producer's behaviour when `Flush.Frequency` is set is now more intuitive + ([#389](https://github.com/Shopify/sarama/pull/389)). + - The producer is now somewhat more memory-efficient during and after retrying + messages due to an improved queue implementation + ([#396](https://github.com/Shopify/sarama/pull/396)). + - The consumer produces much more useful logging output when leadership + changes ([#385](https://github.com/Shopify/sarama/pull/385)). + - The client's `GetOffset` method will now automatically refresh metadata and + retry once in the event of stale information or similar + ([#394](https://github.com/Shopify/sarama/pull/394)). + - Broker connections now have support for using TCP keepalives + ([#407](https://github.com/Shopify/sarama/issues/407)). + +Bug Fixes: + - The OffsetCommitRequest message now correctly implements all three possible + API versions ([#390](https://github.com/Shopify/sarama/pull/390), + [#400](https://github.com/Shopify/sarama/pull/400)). + +#### Version 1.1.0 (2015-03-20) + +Improvements: + - Wrap the producer's partitioner call in a circuit-breaker so that repeatedly + broken topics don't choke throughput + ([#373](https://github.com/Shopify/sarama/pull/373)). + +Bug Fixes: + - Fix the producer's internal reference counting in certain unusual scenarios + ([#367](https://github.com/Shopify/sarama/pull/367)). + - Fix the consumer's internal reference counting in certain unusual scenarios + ([#369](https://github.com/Shopify/sarama/pull/369)). + - Fix a condition where the producer's internal control messages could have + gotten stuck ([#368](https://github.com/Shopify/sarama/pull/368)). + - Fix an issue where invalid partition lists would be cached when asking for + metadata for a non-existant topic ([#372](https://github.com/Shopify/sarama/pull/372)). + + +#### Version 1.0.0 (2015-03-17) + +Version 1.0.0 is the first tagged version, and is almost a complete rewrite. The primary differences with previous untagged versions are: + +- The producer has been rewritten; there is now a `SyncProducer` with a blocking API, and an `AsyncProducer` that is non-blocking. +- The consumer has been rewritten to only open one connection per broker instead of one connection per partition. +- The main types of Sarama are now interfaces to make depedency injection easy; mock implementations for `Consumer`, `SyncProducer` and `AsyncProducer` are provided in the `github.com/Shopify/sarama/mocks` package. +- For most uses cases, it is no longer necessary to open a `Client`; this will be done for you. +- All the configuration values have been unified in the `Config` struct. +- Much improved test suite. diff --git a/vendor/github.com/Shopify/sarama/LICENSE b/vendor/github.com/Shopify/sarama/LICENSE new file mode 100644 index 00000000000..d2bf4352f4c --- /dev/null +++ b/vendor/github.com/Shopify/sarama/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2013 Shopify + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/Shopify/sarama/Makefile b/vendor/github.com/Shopify/sarama/Makefile new file mode 100644 index 00000000000..a05863480fd --- /dev/null +++ b/vendor/github.com/Shopify/sarama/Makefile @@ -0,0 +1,31 @@ +default: fmt get update test lint + +GO := GO111MODULE=on GOPRIVATE=github.com/linkedin GOSUMDB=off go +GOBUILD := CGO_ENABLED=0 $(GO) build $(BUILD_FLAG) +GOTEST := $(GO) test -gcflags='-l' -p 3 -v -race -timeout 6m -coverprofile=profile.out -covermode=atomic + +FILES := $(shell find . -name '*.go' -type f -not -name '*.pb.go' -not -name '*_generated.go' -not -name '*_test.go') +TESTS := $(shell find . -name '*.go' -type f -not -name '*.pb.go' -not -name '*_generated.go' -name '*_test.go') + +get: + $(GO) get ./... + $(GO) mod verify + $(GO) mod tidy + +update: + $(GO) get -u -v all + $(GO) mod verify + $(GO) mod tidy + +fmt: + gofmt -s -l -w $(FILES) $(TESTS) + +lint: + GOFLAGS="-tags=functional" golangci-lint run + +test: + $(GOTEST) ./... + +.PHONY: test_functional +test_functional: + $(GOTEST) -tags=functional ./... diff --git a/vendor/github.com/Shopify/sarama/README.md b/vendor/github.com/Shopify/sarama/README.md new file mode 100644 index 00000000000..38d39695b74 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/README.md @@ -0,0 +1,36 @@ +# sarama + +[![GoDoc](https://godoc.org/github.com/Shopify/sarama?status.svg)](https://godoc.org/github.com/Shopify/sarama) +[![Build Status](https://travis-ci.org/Shopify/sarama.svg?branch=master)](https://travis-ci.org/Shopify/sarama) +[![Coverage](https://codecov.io/gh/Shopify/sarama/branch/master/graph/badge.svg)](https://codecov.io/gh/Shopify/sarama) + +Sarama is an MIT-licensed Go client library for [Apache Kafka](https://kafka.apache.org/) version 0.8 (and later). + +## Getting started + +- API documentation and examples are available via [godoc](https://godoc.org/github.com/Shopify/sarama). +- Mocks for testing are available in the [mocks](./mocks) subpackage. +- The [examples](./examples) directory contains more elaborate example applications. +- The [tools](./tools) directory contains command line tools that can be useful for testing, diagnostics, and instrumentation. + +You might also want to look at the [Frequently Asked Questions](https://github.com/Shopify/sarama/wiki/Frequently-Asked-Questions). + +## Compatibility and API stability + +Sarama provides a "2 releases + 2 months" compatibility guarantee: we support +the two latest stable releases of Kafka and Go, and we provide a two month +grace period for older releases. This means we currently officially support +Go 1.13 through 1.14, and Kafka 2.4 through 2.6, although older releases are +still likely to work. + +Sarama follows semantic versioning and provides API stability via the gopkg.in service. +You can import a version with a guaranteed stable API via http://gopkg.in/Shopify/sarama.v1. +A changelog is available [here](CHANGELOG.md). + +## Contributing + +- Get started by checking our [contribution guidelines](https://github.com/Shopify/sarama/blob/master/.github/CONTRIBUTING.md). +- Read the [Sarama wiki](https://github.com/Shopify/sarama/wiki) for more technical and design details. +- The [Kafka Protocol Specification](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol) contains a wealth of useful information. +- For more general issues, there is [a google group](https://groups.google.com/forum/#!forum/kafka-clients) for Kafka client developers. +- If you have any questions, just ask! diff --git a/vendor/github.com/Shopify/sarama/Vagrantfile b/vendor/github.com/Shopify/sarama/Vagrantfile new file mode 100644 index 00000000000..07d7ffb8ff4 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/Vagrantfile @@ -0,0 +1,14 @@ +# We have 5 * 192MB ZK processes and 5 * 320MB Kafka processes => 2560MB +MEMORY = 3072 + +Vagrant.configure("2") do |config| + config.vm.box = "ubuntu/bionic64" + + config.vm.provision :shell, path: "vagrant/provision.sh" + + config.vm.network "private_network", ip: "192.168.100.67" + + config.vm.provider "virtualbox" do |v| + v.memory = MEMORY + end +end diff --git a/vendor/github.com/Shopify/sarama/acl_bindings.go b/vendor/github.com/Shopify/sarama/acl_bindings.go new file mode 100644 index 00000000000..50b689d1dfe --- /dev/null +++ b/vendor/github.com/Shopify/sarama/acl_bindings.go @@ -0,0 +1,138 @@ +package sarama + +//Resource holds information about acl resource type +type Resource struct { + ResourceType AclResourceType + ResourceName string + ResourcePatternType AclResourcePatternType +} + +func (r *Resource) encode(pe packetEncoder, version int16) error { + pe.putInt8(int8(r.ResourceType)) + + if err := pe.putString(r.ResourceName); err != nil { + return err + } + + if version == 1 { + if r.ResourcePatternType == AclPatternUnknown { + Logger.Print("Cannot encode an unknown resource pattern type, using Literal instead") + r.ResourcePatternType = AclPatternLiteral + } + pe.putInt8(int8(r.ResourcePatternType)) + } + + return nil +} + +func (r *Resource) decode(pd packetDecoder, version int16) (err error) { + resourceType, err := pd.getInt8() + if err != nil { + return err + } + r.ResourceType = AclResourceType(resourceType) + + if r.ResourceName, err = pd.getString(); err != nil { + return err + } + if version == 1 { + pattern, err := pd.getInt8() + if err != nil { + return err + } + r.ResourcePatternType = AclResourcePatternType(pattern) + } + + return nil +} + +//Acl holds information about acl type +type Acl struct { + Principal string + Host string + Operation AclOperation + PermissionType AclPermissionType +} + +func (a *Acl) encode(pe packetEncoder) error { + if err := pe.putString(a.Principal); err != nil { + return err + } + + if err := pe.putString(a.Host); err != nil { + return err + } + + pe.putInt8(int8(a.Operation)) + pe.putInt8(int8(a.PermissionType)) + + return nil +} + +func (a *Acl) decode(pd packetDecoder, version int16) (err error) { + if a.Principal, err = pd.getString(); err != nil { + return err + } + + if a.Host, err = pd.getString(); err != nil { + return err + } + + operation, err := pd.getInt8() + if err != nil { + return err + } + a.Operation = AclOperation(operation) + + permissionType, err := pd.getInt8() + if err != nil { + return err + } + a.PermissionType = AclPermissionType(permissionType) + + return nil +} + +//ResourceAcls is an acl resource type +type ResourceAcls struct { + Resource + Acls []*Acl +} + +func (r *ResourceAcls) encode(pe packetEncoder, version int16) error { + if err := r.Resource.encode(pe, version); err != nil { + return err + } + + if err := pe.putArrayLength(len(r.Acls)); err != nil { + return err + } + for _, acl := range r.Acls { + if err := acl.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (r *ResourceAcls) decode(pd packetDecoder, version int16) error { + if err := r.Resource.decode(pd, version); err != nil { + return err + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Acls = make([]*Acl, n) + for i := 0; i < n; i++ { + r.Acls[i] = new(Acl) + if err := r.Acls[i].decode(pd, version); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/acl_create_request.go b/vendor/github.com/Shopify/sarama/acl_create_request.go new file mode 100644 index 00000000000..6d8a70e1a20 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/acl_create_request.go @@ -0,0 +1,89 @@ +package sarama + +//CreateAclsRequest is an acl creation request +type CreateAclsRequest struct { + Version int16 + AclCreations []*AclCreation +} + +func (c *CreateAclsRequest) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(c.AclCreations)); err != nil { + return err + } + + for _, aclCreation := range c.AclCreations { + if err := aclCreation.encode(pe, c.Version); err != nil { + return err + } + } + + return nil +} + +func (c *CreateAclsRequest) decode(pd packetDecoder, version int16) (err error) { + c.Version = version + n, err := pd.getArrayLength() + if err != nil { + return err + } + + c.AclCreations = make([]*AclCreation, n) + + for i := 0; i < n; i++ { + c.AclCreations[i] = new(AclCreation) + if err := c.AclCreations[i].decode(pd, version); err != nil { + return err + } + } + + return nil +} + +func (c *CreateAclsRequest) key() int16 { + return 30 +} + +func (c *CreateAclsRequest) version() int16 { + return c.Version +} + +func (c *CreateAclsRequest) headerVersion() int16 { + return 1 +} + +func (c *CreateAclsRequest) requiredVersion() KafkaVersion { + switch c.Version { + case 1: + return V2_0_0_0 + default: + return V0_11_0_0 + } +} + +//AclCreation is a wrapper around Resource and Acl type +type AclCreation struct { + Resource + Acl +} + +func (a *AclCreation) encode(pe packetEncoder, version int16) error { + if err := a.Resource.encode(pe, version); err != nil { + return err + } + if err := a.Acl.encode(pe); err != nil { + return err + } + + return nil +} + +func (a *AclCreation) decode(pd packetDecoder, version int16) (err error) { + if err := a.Resource.decode(pd, version); err != nil { + return err + } + if err := a.Acl.decode(pd, version); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/acl_create_response.go b/vendor/github.com/Shopify/sarama/acl_create_response.go new file mode 100644 index 00000000000..14b1b9e13f3 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/acl_create_response.go @@ -0,0 +1,94 @@ +package sarama + +import "time" + +//CreateAclsResponse is a an acl response creation type +type CreateAclsResponse struct { + ThrottleTime time.Duration + AclCreationResponses []*AclCreationResponse +} + +func (c *CreateAclsResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(c.ThrottleTime / time.Millisecond)) + + if err := pe.putArrayLength(len(c.AclCreationResponses)); err != nil { + return err + } + + for _, aclCreationResponse := range c.AclCreationResponses { + if err := aclCreationResponse.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (c *CreateAclsResponse) decode(pd packetDecoder, version int16) (err error) { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + c.AclCreationResponses = make([]*AclCreationResponse, n) + for i := 0; i < n; i++ { + c.AclCreationResponses[i] = new(AclCreationResponse) + if err := c.AclCreationResponses[i].decode(pd, version); err != nil { + return err + } + } + + return nil +} + +func (c *CreateAclsResponse) key() int16 { + return 30 +} + +func (c *CreateAclsResponse) version() int16 { + return 0 +} + +func (c *CreateAclsResponse) headerVersion() int16 { + return 0 +} + +func (c *CreateAclsResponse) requiredVersion() KafkaVersion { + return V0_11_0_0 +} + +//AclCreationResponse is an acl creation response type +type AclCreationResponse struct { + Err KError + ErrMsg *string +} + +func (a *AclCreationResponse) encode(pe packetEncoder) error { + pe.putInt16(int16(a.Err)) + + if err := pe.putNullableString(a.ErrMsg); err != nil { + return err + } + + return nil +} + +func (a *AclCreationResponse) decode(pd packetDecoder, version int16) (err error) { + kerr, err := pd.getInt16() + if err != nil { + return err + } + a.Err = KError(kerr) + + if a.ErrMsg, err = pd.getNullableString(); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/acl_delete_request.go b/vendor/github.com/Shopify/sarama/acl_delete_request.go new file mode 100644 index 00000000000..4152522598d --- /dev/null +++ b/vendor/github.com/Shopify/sarama/acl_delete_request.go @@ -0,0 +1,62 @@ +package sarama + +//DeleteAclsRequest is a delete acl request +type DeleteAclsRequest struct { + Version int + Filters []*AclFilter +} + +func (d *DeleteAclsRequest) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(d.Filters)); err != nil { + return err + } + + for _, filter := range d.Filters { + filter.Version = d.Version + if err := filter.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (d *DeleteAclsRequest) decode(pd packetDecoder, version int16) (err error) { + d.Version = int(version) + n, err := pd.getArrayLength() + if err != nil { + return err + } + + d.Filters = make([]*AclFilter, n) + for i := 0; i < n; i++ { + d.Filters[i] = new(AclFilter) + d.Filters[i].Version = int(version) + if err := d.Filters[i].decode(pd, version); err != nil { + return err + } + } + + return nil +} + +func (d *DeleteAclsRequest) key() int16 { + return 31 +} + +func (d *DeleteAclsRequest) version() int16 { + return int16(d.Version) +} + +func (c *DeleteAclsRequest) headerVersion() int16 { + return 1 +} + +func (d *DeleteAclsRequest) requiredVersion() KafkaVersion { + switch d.Version { + case 1: + return V2_0_0_0 + default: + return V0_11_0_0 + } +} diff --git a/vendor/github.com/Shopify/sarama/acl_delete_response.go b/vendor/github.com/Shopify/sarama/acl_delete_response.go new file mode 100644 index 00000000000..cb630882673 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/acl_delete_response.go @@ -0,0 +1,163 @@ +package sarama + +import "time" + +//DeleteAclsResponse is a delete acl response +type DeleteAclsResponse struct { + Version int16 + ThrottleTime time.Duration + FilterResponses []*FilterResponse +} + +func (d *DeleteAclsResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(d.ThrottleTime / time.Millisecond)) + + if err := pe.putArrayLength(len(d.FilterResponses)); err != nil { + return err + } + + for _, filterResponse := range d.FilterResponses { + if err := filterResponse.encode(pe, d.Version); err != nil { + return err + } + } + + return nil +} + +func (d *DeleteAclsResponse) decode(pd packetDecoder, version int16) (err error) { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + n, err := pd.getArrayLength() + if err != nil { + return err + } + d.FilterResponses = make([]*FilterResponse, n) + + for i := 0; i < n; i++ { + d.FilterResponses[i] = new(FilterResponse) + if err := d.FilterResponses[i].decode(pd, version); err != nil { + return err + } + } + + return nil +} + +func (d *DeleteAclsResponse) key() int16 { + return 31 +} + +func (d *DeleteAclsResponse) version() int16 { + return d.Version +} + +func (d *DeleteAclsResponse) headerVersion() int16 { + return 0 +} + +func (d *DeleteAclsResponse) requiredVersion() KafkaVersion { + return V0_11_0_0 +} + +//FilterResponse is a filter response type +type FilterResponse struct { + Err KError + ErrMsg *string + MatchingAcls []*MatchingAcl +} + +func (f *FilterResponse) encode(pe packetEncoder, version int16) error { + pe.putInt16(int16(f.Err)) + if err := pe.putNullableString(f.ErrMsg); err != nil { + return err + } + + if err := pe.putArrayLength(len(f.MatchingAcls)); err != nil { + return err + } + for _, matchingAcl := range f.MatchingAcls { + if err := matchingAcl.encode(pe, version); err != nil { + return err + } + } + + return nil +} + +func (f *FilterResponse) decode(pd packetDecoder, version int16) (err error) { + kerr, err := pd.getInt16() + if err != nil { + return err + } + f.Err = KError(kerr) + + if f.ErrMsg, err = pd.getNullableString(); err != nil { + return err + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + f.MatchingAcls = make([]*MatchingAcl, n) + for i := 0; i < n; i++ { + f.MatchingAcls[i] = new(MatchingAcl) + if err := f.MatchingAcls[i].decode(pd, version); err != nil { + return err + } + } + + return nil +} + +//MatchingAcl is a matching acl type +type MatchingAcl struct { + Err KError + ErrMsg *string + Resource + Acl +} + +func (m *MatchingAcl) encode(pe packetEncoder, version int16) error { + pe.putInt16(int16(m.Err)) + if err := pe.putNullableString(m.ErrMsg); err != nil { + return err + } + + if err := m.Resource.encode(pe, version); err != nil { + return err + } + + if err := m.Acl.encode(pe); err != nil { + return err + } + + return nil +} + +func (m *MatchingAcl) decode(pd packetDecoder, version int16) (err error) { + kerr, err := pd.getInt16() + if err != nil { + return err + } + m.Err = KError(kerr) + + if m.ErrMsg, err = pd.getNullableString(); err != nil { + return err + } + + if err := m.Resource.decode(pd, version); err != nil { + return err + } + + if err := m.Acl.decode(pd, version); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/acl_describe_request.go b/vendor/github.com/Shopify/sarama/acl_describe_request.go new file mode 100644 index 00000000000..29841a5ce33 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/acl_describe_request.go @@ -0,0 +1,39 @@ +package sarama + +//DescribeAclsRequest is a secribe acl request type +type DescribeAclsRequest struct { + Version int + AclFilter +} + +func (d *DescribeAclsRequest) encode(pe packetEncoder) error { + d.AclFilter.Version = d.Version + return d.AclFilter.encode(pe) +} + +func (d *DescribeAclsRequest) decode(pd packetDecoder, version int16) (err error) { + d.Version = int(version) + d.AclFilter.Version = int(version) + return d.AclFilter.decode(pd, version) +} + +func (d *DescribeAclsRequest) key() int16 { + return 29 +} + +func (d *DescribeAclsRequest) version() int16 { + return int16(d.Version) +} + +func (d *DescribeAclsRequest) headerVersion() int16 { + return 1 +} + +func (d *DescribeAclsRequest) requiredVersion() KafkaVersion { + switch d.Version { + case 1: + return V2_0_0_0 + default: + return V0_11_0_0 + } +} diff --git a/vendor/github.com/Shopify/sarama/acl_describe_response.go b/vendor/github.com/Shopify/sarama/acl_describe_response.go new file mode 100644 index 00000000000..c43408b244d --- /dev/null +++ b/vendor/github.com/Shopify/sarama/acl_describe_response.go @@ -0,0 +1,91 @@ +package sarama + +import "time" + +//DescribeAclsResponse is a describe acl response type +type DescribeAclsResponse struct { + Version int16 + ThrottleTime time.Duration + Err KError + ErrMsg *string + ResourceAcls []*ResourceAcls +} + +func (d *DescribeAclsResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(d.ThrottleTime / time.Millisecond)) + pe.putInt16(int16(d.Err)) + + if err := pe.putNullableString(d.ErrMsg); err != nil { + return err + } + + if err := pe.putArrayLength(len(d.ResourceAcls)); err != nil { + return err + } + + for _, resourceAcl := range d.ResourceAcls { + if err := resourceAcl.encode(pe, d.Version); err != nil { + return err + } + } + + return nil +} + +func (d *DescribeAclsResponse) decode(pd packetDecoder, version int16) (err error) { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + kerr, err := pd.getInt16() + if err != nil { + return err + } + d.Err = KError(kerr) + + errmsg, err := pd.getString() + if err != nil { + return err + } + if errmsg != "" { + d.ErrMsg = &errmsg + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + d.ResourceAcls = make([]*ResourceAcls, n) + + for i := 0; i < n; i++ { + d.ResourceAcls[i] = new(ResourceAcls) + if err := d.ResourceAcls[i].decode(pd, version); err != nil { + return err + } + } + + return nil +} + +func (d *DescribeAclsResponse) key() int16 { + return 29 +} + +func (d *DescribeAclsResponse) version() int16 { + return d.Version +} + +func (d *DescribeAclsResponse) headerVersion() int16 { + return 0 +} + +func (d *DescribeAclsResponse) requiredVersion() KafkaVersion { + switch d.Version { + case 1: + return V2_0_0_0 + default: + return V0_11_0_0 + } +} diff --git a/vendor/github.com/Shopify/sarama/acl_filter.go b/vendor/github.com/Shopify/sarama/acl_filter.go new file mode 100644 index 00000000000..fad55587535 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/acl_filter.go @@ -0,0 +1,78 @@ +package sarama + +type AclFilter struct { + Version int + ResourceType AclResourceType + ResourceName *string + ResourcePatternTypeFilter AclResourcePatternType + Principal *string + Host *string + Operation AclOperation + PermissionType AclPermissionType +} + +func (a *AclFilter) encode(pe packetEncoder) error { + pe.putInt8(int8(a.ResourceType)) + if err := pe.putNullableString(a.ResourceName); err != nil { + return err + } + + if a.Version == 1 { + pe.putInt8(int8(a.ResourcePatternTypeFilter)) + } + + if err := pe.putNullableString(a.Principal); err != nil { + return err + } + if err := pe.putNullableString(a.Host); err != nil { + return err + } + pe.putInt8(int8(a.Operation)) + pe.putInt8(int8(a.PermissionType)) + + return nil +} + +func (a *AclFilter) decode(pd packetDecoder, version int16) (err error) { + resourceType, err := pd.getInt8() + if err != nil { + return err + } + a.ResourceType = AclResourceType(resourceType) + + if a.ResourceName, err = pd.getNullableString(); err != nil { + return err + } + + if a.Version == 1 { + pattern, err := pd.getInt8() + + if err != nil { + return err + } + + a.ResourcePatternTypeFilter = AclResourcePatternType(pattern) + } + + if a.Principal, err = pd.getNullableString(); err != nil { + return err + } + + if a.Host, err = pd.getNullableString(); err != nil { + return err + } + + operation, err := pd.getInt8() + if err != nil { + return err + } + a.Operation = AclOperation(operation) + + permissionType, err := pd.getInt8() + if err != nil { + return err + } + a.PermissionType = AclPermissionType(permissionType) + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/acl_types.go b/vendor/github.com/Shopify/sarama/acl_types.go new file mode 100644 index 00000000000..c10ad7b9032 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/acl_types.go @@ -0,0 +1,55 @@ +package sarama + +type ( + AclOperation int + + AclPermissionType int + + AclResourceType int + + AclResourcePatternType int +) + +// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/acl/AclOperation.java +const ( + AclOperationUnknown AclOperation = iota + AclOperationAny + AclOperationAll + AclOperationRead + AclOperationWrite + AclOperationCreate + AclOperationDelete + AclOperationAlter + AclOperationDescribe + AclOperationClusterAction + AclOperationDescribeConfigs + AclOperationAlterConfigs + AclOperationIdempotentWrite +) + +// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/acl/AclPermissionType.java +const ( + AclPermissionUnknown AclPermissionType = iota + AclPermissionAny + AclPermissionDeny + AclPermissionAllow +) + +// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/resource/ResourceType.java +const ( + AclResourceUnknown AclResourceType = iota + AclResourceAny + AclResourceTopic + AclResourceGroup + AclResourceCluster + AclResourceTransactionalID +) + +// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/resource/PatternType.java +const ( + AclPatternUnknown AclResourcePatternType = iota + AclPatternAny + AclPatternMatch + AclPatternLiteral + AclPatternPrefixed +) diff --git a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go new file mode 100644 index 00000000000..95586f9a1f8 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go @@ -0,0 +1,57 @@ +package sarama + +//AddOffsetsToTxnRequest adds offsets to a transaction request +type AddOffsetsToTxnRequest struct { + TransactionalID string + ProducerID int64 + ProducerEpoch int16 + GroupID string +} + +func (a *AddOffsetsToTxnRequest) encode(pe packetEncoder) error { + if err := pe.putString(a.TransactionalID); err != nil { + return err + } + + pe.putInt64(a.ProducerID) + + pe.putInt16(a.ProducerEpoch) + + if err := pe.putString(a.GroupID); err != nil { + return err + } + + return nil +} + +func (a *AddOffsetsToTxnRequest) decode(pd packetDecoder, version int16) (err error) { + if a.TransactionalID, err = pd.getString(); err != nil { + return err + } + if a.ProducerID, err = pd.getInt64(); err != nil { + return err + } + if a.ProducerEpoch, err = pd.getInt16(); err != nil { + return err + } + if a.GroupID, err = pd.getString(); err != nil { + return err + } + return nil +} + +func (a *AddOffsetsToTxnRequest) key() int16 { + return 25 +} + +func (a *AddOffsetsToTxnRequest) version() int16 { + return 0 +} + +func (a *AddOffsetsToTxnRequest) headerVersion() int16 { + return 1 +} + +func (a *AddOffsetsToTxnRequest) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go new file mode 100644 index 00000000000..bdb18441993 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go @@ -0,0 +1,49 @@ +package sarama + +import ( + "time" +) + +//AddOffsetsToTxnResponse is a response type for adding offsets to txns +type AddOffsetsToTxnResponse struct { + ThrottleTime time.Duration + Err KError +} + +func (a *AddOffsetsToTxnResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(a.ThrottleTime / time.Millisecond)) + pe.putInt16(int16(a.Err)) + return nil +} + +func (a *AddOffsetsToTxnResponse) decode(pd packetDecoder, version int16) (err error) { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + kerr, err := pd.getInt16() + if err != nil { + return err + } + a.Err = KError(kerr) + + return nil +} + +func (a *AddOffsetsToTxnResponse) key() int16 { + return 25 +} + +func (a *AddOffsetsToTxnResponse) version() int16 { + return 0 +} + +func (a *AddOffsetsToTxnResponse) headerVersion() int16 { + return 0 +} + +func (a *AddOffsetsToTxnResponse) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go new file mode 100644 index 00000000000..6289f451480 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go @@ -0,0 +1,81 @@ +package sarama + +//AddPartitionsToTxnRequest is a add paartition request +type AddPartitionsToTxnRequest struct { + TransactionalID string + ProducerID int64 + ProducerEpoch int16 + TopicPartitions map[string][]int32 +} + +func (a *AddPartitionsToTxnRequest) encode(pe packetEncoder) error { + if err := pe.putString(a.TransactionalID); err != nil { + return err + } + pe.putInt64(a.ProducerID) + pe.putInt16(a.ProducerEpoch) + + if err := pe.putArrayLength(len(a.TopicPartitions)); err != nil { + return err + } + for topic, partitions := range a.TopicPartitions { + if err := pe.putString(topic); err != nil { + return err + } + if err := pe.putInt32Array(partitions); err != nil { + return err + } + } + + return nil +} + +func (a *AddPartitionsToTxnRequest) decode(pd packetDecoder, version int16) (err error) { + if a.TransactionalID, err = pd.getString(); err != nil { + return err + } + if a.ProducerID, err = pd.getInt64(); err != nil { + return err + } + if a.ProducerEpoch, err = pd.getInt16(); err != nil { + return err + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + a.TopicPartitions = make(map[string][]int32) + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + + partitions, err := pd.getInt32Array() + if err != nil { + return err + } + + a.TopicPartitions[topic] = partitions + } + + return nil +} + +func (a *AddPartitionsToTxnRequest) key() int16 { + return 24 +} + +func (a *AddPartitionsToTxnRequest) version() int16 { + return 0 +} + +func (a *AddPartitionsToTxnRequest) headerVersion() int16 { + return 1 +} + +func (a *AddPartitionsToTxnRequest) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go new file mode 100644 index 00000000000..73b73b07f84 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go @@ -0,0 +1,114 @@ +package sarama + +import ( + "time" +) + +//AddPartitionsToTxnResponse is a partition errors to transaction type +type AddPartitionsToTxnResponse struct { + ThrottleTime time.Duration + Errors map[string][]*PartitionError +} + +func (a *AddPartitionsToTxnResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(a.ThrottleTime / time.Millisecond)) + if err := pe.putArrayLength(len(a.Errors)); err != nil { + return err + } + + for topic, e := range a.Errors { + if err := pe.putString(topic); err != nil { + return err + } + if err := pe.putArrayLength(len(e)); err != nil { + return err + } + for _, partitionError := range e { + if err := partitionError.encode(pe); err != nil { + return err + } + } + } + + return nil +} + +func (a *AddPartitionsToTxnResponse) decode(pd packetDecoder, version int16) (err error) { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + a.Errors = make(map[string][]*PartitionError) + + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + + m, err := pd.getArrayLength() + if err != nil { + return err + } + + a.Errors[topic] = make([]*PartitionError, m) + + for j := 0; j < m; j++ { + a.Errors[topic][j] = new(PartitionError) + if err := a.Errors[topic][j].decode(pd, version); err != nil { + return err + } + } + } + + return nil +} + +func (a *AddPartitionsToTxnResponse) key() int16 { + return 24 +} + +func (a *AddPartitionsToTxnResponse) version() int16 { + return 0 +} + +func (a *AddPartitionsToTxnResponse) headerVersion() int16 { + return 0 +} + +func (a *AddPartitionsToTxnResponse) requiredVersion() KafkaVersion { + return V0_11_0_0 +} + +//PartitionError is a partition error type +type PartitionError struct { + Partition int32 + Err KError +} + +func (p *PartitionError) encode(pe packetEncoder) error { + pe.putInt32(p.Partition) + pe.putInt16(int16(p.Err)) + return nil +} + +func (p *PartitionError) decode(pd packetDecoder, version int16) (err error) { + if p.Partition, err = pd.getInt32(); err != nil { + return err + } + + kerr, err := pd.getInt16() + if err != nil { + return err + } + p.Err = KError(kerr) + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/admin.go b/vendor/github.com/Shopify/sarama/admin.go new file mode 100644 index 00000000000..9dea0255f10 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/admin.go @@ -0,0 +1,934 @@ +package sarama + +import ( + "errors" + "fmt" + "math/rand" + "strconv" + "sync" + "time" +) + +// ClusterAdmin is the administrative client for Kafka, which supports managing and inspecting topics, +// brokers, configurations and ACLs. The minimum broker version required is 0.10.0.0. +// Methods with stricter requirements will specify the minimum broker version required. +// You MUST call Close() on a client to avoid leaks +type ClusterAdmin interface { + // Creates a new topic. This operation is supported by brokers with version 0.10.1.0 or higher. + // It may take several seconds after CreateTopic returns success for all the brokers + // to become aware that the topic has been created. During this time, listTopics + // may not return information about the new topic.The validateOnly option is supported from version 0.10.2.0. + CreateTopic(topic string, detail *TopicDetail, validateOnly bool) error + + // List the topics available in the cluster with the default options. + ListTopics() (map[string]TopicDetail, error) + + // Describe some topics in the cluster. + DescribeTopics(topics []string) (metadata []*TopicMetadata, err error) + + // Delete a topic. It may take several seconds after the DeleteTopic to returns success + // and for all the brokers to become aware that the topics are gone. + // During this time, listTopics may continue to return information about the deleted topic. + // If delete.topic.enable is false on the brokers, deleteTopic will mark + // the topic for deletion, but not actually delete them. + // This operation is supported by brokers with version 0.10.1.0 or higher. + DeleteTopic(topic string) error + + // Increase the number of partitions of the topics according to the corresponding values. + // If partitions are increased for a topic that has a key, the partition logic or ordering of + // the messages will be affected. It may take several seconds after this method returns + // success for all the brokers to become aware that the partitions have been created. + // During this time, ClusterAdmin#describeTopics may not return information about the + // new partitions. This operation is supported by brokers with version 1.0.0 or higher. + CreatePartitions(topic string, count int32, assignment [][]int32, validateOnly bool) error + + // Alter the replica assignment for partitions. + // This operation is supported by brokers with version 2.4.0.0 or higher. + AlterPartitionReassignments(topic string, assignment [][]int32) error + + // Provides info on ongoing partitions replica reassignments. + // This operation is supported by brokers with version 2.4.0.0 or higher. + ListPartitionReassignments(topics string, partitions []int32) (topicStatus map[string]map[int32]*PartitionReplicaReassignmentsStatus, err error) + + // Delete records whose offset is smaller than the given offset of the corresponding partition. + // This operation is supported by brokers with version 0.11.0.0 or higher. + DeleteRecords(topic string, partitionOffsets map[int32]int64) error + + // Get the configuration for the specified resources. + // The returned configuration includes default values and the Default is true + // can be used to distinguish them from user supplied values. + // Config entries where ReadOnly is true cannot be updated. + // The value of config entries where Sensitive is true is always nil so + // sensitive information is not disclosed. + // This operation is supported by brokers with version 0.11.0.0 or higher. + DescribeConfig(resource ConfigResource) ([]ConfigEntry, error) + + // Update the configuration for the specified resources with the default options. + // This operation is supported by brokers with version 0.11.0.0 or higher. + // The resources with their configs (topic is the only resource type with configs + // that can be updated currently Updates are not transactional so they may succeed + // for some resources while fail for others. The configs for a particular resource are updated automatically. + AlterConfig(resourceType ConfigResourceType, name string, entries map[string]*string, validateOnly bool) error + + // Creates access control lists (ACLs) which are bound to specific resources. + // This operation is not transactional so it may succeed for some ACLs while fail for others. + // If you attempt to add an ACL that duplicates an existing ACL, no error will be raised, but + // no changes will be made. This operation is supported by brokers with version 0.11.0.0 or higher. + CreateACL(resource Resource, acl Acl) error + + // Lists access control lists (ACLs) according to the supplied filter. + // it may take some time for changes made by createAcls or deleteAcls to be reflected in the output of ListAcls + // This operation is supported by brokers with version 0.11.0.0 or higher. + ListAcls(filter AclFilter) ([]ResourceAcls, error) + + // Deletes access control lists (ACLs) according to the supplied filters. + // This operation is not transactional so it may succeed for some ACLs while fail for others. + // This operation is supported by brokers with version 0.11.0.0 or higher. + DeleteACL(filter AclFilter, validateOnly bool) ([]MatchingAcl, error) + + // List the consumer groups available in the cluster. + ListConsumerGroups() (map[string]string, error) + + // Describe the given consumer groups. + DescribeConsumerGroups(groups []string) ([]*GroupDescription, error) + + // List the consumer group offsets available in the cluster. + ListConsumerGroupOffsets(group string, topicPartitions map[string][]int32) (*OffsetFetchResponse, error) + + // Delete a consumer group. + DeleteConsumerGroup(group string) error + + // Get information about the nodes in the cluster + DescribeCluster() (brokers []*Broker, controllerID int32, err error) + + // Get information about all log directories on the given set of brokers + DescribeLogDirs(brokers []int32) (map[int32][]DescribeLogDirsResponseDirMetadata, error) + + // Close shuts down the admin and closes underlying client. + Close() error +} + +type clusterAdmin struct { + client Client + conf *Config +} + +// NewClusterAdmin creates a new ClusterAdmin using the given broker addresses and configuration. +func NewClusterAdmin(addrs []string, conf *Config) (ClusterAdmin, error) { + client, err := NewClient(addrs, conf) + if err != nil { + return nil, err + } + return NewClusterAdminFromClient(client) +} + +// NewClusterAdminFromClient creates a new ClusterAdmin using the given client. +// Note that underlying client will also be closed on admin's Close() call. +func NewClusterAdminFromClient(client Client) (ClusterAdmin, error) { + //make sure we can retrieve the controller + _, err := client.Controller() + if err != nil { + return nil, err + } + + ca := &clusterAdmin{ + client: client, + conf: client.Config(), + } + return ca, nil +} + +func (ca *clusterAdmin) Close() error { + return ca.client.Close() +} + +func (ca *clusterAdmin) Controller() (*Broker, error) { + return ca.client.Controller() +} + +func (ca *clusterAdmin) refreshController() (*Broker, error) { + return ca.client.RefreshController() +} + +// isErrNoController returns `true` if the given error type unwraps to an +// `ErrNotController` response from Kafka +func isErrNoController(err error) bool { + switch e := err.(type) { + case *TopicError: + return e.Err == ErrNotController + case *TopicPartitionError: + return e.Err == ErrNotController + case KError: + return e == ErrNotController + } + return false +} + +// retryOnError will repeatedly call the given (error-returning) func in the +// case that its response is non-nil and retriable (as determined by the +// provided retriable func) up to the maximum number of tries permitted by +// the admin client configuration +func (ca *clusterAdmin) retryOnError(retriable func(error) bool, fn func() error) error { + var err error + for attempt := 0; attempt < ca.conf.Admin.Retry.Max; attempt++ { + err = fn() + if err == nil || !retriable(err) { + return err + } + Logger.Printf( + "admin/request retrying after %dms... (%d attempts remaining)\n", + ca.conf.Admin.Retry.Backoff/time.Millisecond, ca.conf.Admin.Retry.Max-attempt) + time.Sleep(ca.conf.Admin.Retry.Backoff) + continue + } + return err +} + +func (ca *clusterAdmin) CreateTopic(topic string, detail *TopicDetail, validateOnly bool) error { + if topic == "" { + return ErrInvalidTopic + } + + if detail == nil { + return errors.New("you must specify topic details") + } + + topicDetails := make(map[string]*TopicDetail) + topicDetails[topic] = detail + + request := &CreateTopicsRequest{ + TopicDetails: topicDetails, + ValidateOnly: validateOnly, + Timeout: ca.conf.Admin.Timeout, + } + + if ca.conf.Version.IsAtLeast(V0_11_0_0) { + request.Version = 1 + } + if ca.conf.Version.IsAtLeast(V1_0_0_0) { + request.Version = 2 + } + + return ca.retryOnError(isErrNoController, func() error { + b, err := ca.Controller() + if err != nil { + return err + } + + rsp, err := b.CreateTopics(request) + if err != nil { + return err + } + + topicErr, ok := rsp.TopicErrors[topic] + if !ok { + return ErrIncompleteResponse + } + + if topicErr.Err != ErrNoError { + if topicErr.Err == ErrNotController { + _, _ = ca.refreshController() + } + return topicErr + } + + return nil + }) +} + +func (ca *clusterAdmin) DescribeTopics(topics []string) (metadata []*TopicMetadata, err error) { + controller, err := ca.Controller() + if err != nil { + return nil, err + } + + request := &MetadataRequest{ + Topics: topics, + AllowAutoTopicCreation: false, + } + + if ca.conf.Version.IsAtLeast(V1_0_0_0) { + request.Version = 5 + } else if ca.conf.Version.IsAtLeast(V0_11_0_0) { + request.Version = 4 + } + + response, err := controller.GetMetadata(request) + if err != nil { + return nil, err + } + return response.Topics, nil +} + +func (ca *clusterAdmin) DescribeCluster() (brokers []*Broker, controllerID int32, err error) { + controller, err := ca.Controller() + if err != nil { + return nil, int32(0), err + } + + request := &MetadataRequest{ + Topics: []string{}, + } + + if ca.conf.Version.IsAtLeast(V0_10_0_0) { + request.Version = 1 + } + + response, err := controller.GetMetadata(request) + if err != nil { + return nil, int32(0), err + } + + return response.Brokers, response.ControllerID, nil +} + +func (ca *clusterAdmin) findBroker(id int32) (*Broker, error) { + brokers := ca.client.Brokers() + for _, b := range brokers { + if b.ID() == id { + return b, nil + } + } + return nil, fmt.Errorf("could not find broker id %d", id) +} + +func (ca *clusterAdmin) findAnyBroker() (*Broker, error) { + brokers := ca.client.Brokers() + if len(brokers) > 0 { + index := rand.Intn(len(brokers)) + return brokers[index], nil + } + return nil, errors.New("no available broker") +} + +func (ca *clusterAdmin) ListTopics() (map[string]TopicDetail, error) { + // In order to build TopicDetails we need to first get the list of all + // topics using a MetadataRequest and then get their configs using a + // DescribeConfigsRequest request. To avoid sending many requests to the + // broker, we use a single DescribeConfigsRequest. + + // Send the all-topic MetadataRequest + b, err := ca.findAnyBroker() + if err != nil { + return nil, err + } + _ = b.Open(ca.client.Config()) + + metadataReq := &MetadataRequest{} + metadataResp, err := b.GetMetadata(metadataReq) + if err != nil { + return nil, err + } + + topicsDetailsMap := make(map[string]TopicDetail) + + var describeConfigsResources []*ConfigResource + + for _, topic := range metadataResp.Topics { + topicDetails := TopicDetail{ + NumPartitions: int32(len(topic.Partitions)), + } + if len(topic.Partitions) > 0 { + topicDetails.ReplicaAssignment = map[int32][]int32{} + for _, partition := range topic.Partitions { + topicDetails.ReplicaAssignment[partition.ID] = partition.Replicas + } + topicDetails.ReplicationFactor = int16(len(topic.Partitions[0].Replicas)) + } + topicsDetailsMap[topic.Name] = topicDetails + + // we populate the resources we want to describe from the MetadataResponse + topicResource := ConfigResource{ + Type: TopicResource, + Name: topic.Name, + } + describeConfigsResources = append(describeConfigsResources, &topicResource) + } + + // Send the DescribeConfigsRequest + describeConfigsReq := &DescribeConfigsRequest{ + Resources: describeConfigsResources, + } + + if ca.conf.Version.IsAtLeast(V1_1_0_0) { + describeConfigsReq.Version = 1 + } + + if ca.conf.Version.IsAtLeast(V2_0_0_0) { + describeConfigsReq.Version = 2 + } + + describeConfigsResp, err := b.DescribeConfigs(describeConfigsReq) + if err != nil { + return nil, err + } + + for _, resource := range describeConfigsResp.Resources { + topicDetails := topicsDetailsMap[resource.Name] + topicDetails.ConfigEntries = make(map[string]*string) + + for _, entry := range resource.Configs { + // only include non-default non-sensitive config + // (don't actually think topic config will ever be sensitive) + if entry.Default || entry.Sensitive { + continue + } + topicDetails.ConfigEntries[entry.Name] = &entry.Value + } + + topicsDetailsMap[resource.Name] = topicDetails + } + + return topicsDetailsMap, nil +} + +func (ca *clusterAdmin) DeleteTopic(topic string) error { + if topic == "" { + return ErrInvalidTopic + } + + request := &DeleteTopicsRequest{ + Topics: []string{topic}, + Timeout: ca.conf.Admin.Timeout, + } + + if ca.conf.Version.IsAtLeast(V0_11_0_0) { + request.Version = 1 + } + + return ca.retryOnError(isErrNoController, func() error { + b, err := ca.Controller() + if err != nil { + return err + } + + rsp, err := b.DeleteTopics(request) + if err != nil { + return err + } + + topicErr, ok := rsp.TopicErrorCodes[topic] + if !ok { + return ErrIncompleteResponse + } + + if topicErr != ErrNoError { + if topicErr == ErrNotController { + _, _ = ca.refreshController() + } + return topicErr + } + + return nil + }) +} + +func (ca *clusterAdmin) CreatePartitions(topic string, count int32, assignment [][]int32, validateOnly bool) error { + if topic == "" { + return ErrInvalidTopic + } + + topicPartitions := make(map[string]*TopicPartition) + topicPartitions[topic] = &TopicPartition{Count: count, Assignment: assignment} + + request := &CreatePartitionsRequest{ + TopicPartitions: topicPartitions, + Timeout: ca.conf.Admin.Timeout, + } + + return ca.retryOnError(isErrNoController, func() error { + b, err := ca.Controller() + if err != nil { + return err + } + + rsp, err := b.CreatePartitions(request) + if err != nil { + return err + } + + topicErr, ok := rsp.TopicPartitionErrors[topic] + if !ok { + return ErrIncompleteResponse + } + + if topicErr.Err != ErrNoError { + if topicErr.Err == ErrNotController { + _, _ = ca.refreshController() + } + return topicErr + } + + return nil + }) +} + +func (ca *clusterAdmin) AlterPartitionReassignments(topic string, assignment [][]int32) error { + if topic == "" { + return ErrInvalidTopic + } + + request := &AlterPartitionReassignmentsRequest{ + TimeoutMs: int32(60000), + Version: int16(0), + } + + for i := 0; i < len(assignment); i++ { + request.AddBlock(topic, int32(i), assignment[i]) + } + + return ca.retryOnError(isErrNoController, func() error { + b, err := ca.Controller() + if err != nil { + return err + } + + errs := make([]error, 0) + + rsp, err := b.AlterPartitionReassignments(request) + + if err != nil { + errs = append(errs, err) + } else { + if rsp.ErrorCode > 0 { + errs = append(errs, errors.New(rsp.ErrorCode.Error())) + } + + for topic, topicErrors := range rsp.Errors { + for partition, partitionError := range topicErrors { + if partitionError.errorCode != ErrNoError { + errStr := fmt.Sprintf("[%s-%d]: %s", topic, partition, partitionError.errorCode.Error()) + errs = append(errs, errors.New(errStr)) + } + } + } + } + + if len(errs) > 0 { + return ErrReassignPartitions{MultiError{&errs}} + } + + return nil + }) +} + +func (ca *clusterAdmin) ListPartitionReassignments(topic string, partitions []int32) (topicStatus map[string]map[int32]*PartitionReplicaReassignmentsStatus, err error) { + if topic == "" { + return nil, ErrInvalidTopic + } + + request := &ListPartitionReassignmentsRequest{ + TimeoutMs: int32(60000), + Version: int16(0), + } + + request.AddBlock(topic, partitions) + + b, err := ca.Controller() + if err != nil { + return nil, err + } + _ = b.Open(ca.client.Config()) + + rsp, err := b.ListPartitionReassignments(request) + + if err == nil && rsp != nil { + return rsp.TopicStatus, nil + } else { + return nil, err + } +} + +func (ca *clusterAdmin) DeleteRecords(topic string, partitionOffsets map[int32]int64) error { + if topic == "" { + return ErrInvalidTopic + } + partitionPerBroker := make(map[*Broker][]int32) + for partition := range partitionOffsets { + broker, err := ca.client.Leader(topic, partition) + if err != nil { + return err + } + if _, ok := partitionPerBroker[broker]; ok { + partitionPerBroker[broker] = append(partitionPerBroker[broker], partition) + } else { + partitionPerBroker[broker] = []int32{partition} + } + } + errs := make([]error, 0) + for broker, partitions := range partitionPerBroker { + topics := make(map[string]*DeleteRecordsRequestTopic) + recordsToDelete := make(map[int32]int64) + for _, p := range partitions { + recordsToDelete[p] = partitionOffsets[p] + } + topics[topic] = &DeleteRecordsRequestTopic{PartitionOffsets: recordsToDelete} + request := &DeleteRecordsRequest{ + Topics: topics, + Timeout: ca.conf.Admin.Timeout, + } + + rsp, err := broker.DeleteRecords(request) + if err != nil { + errs = append(errs, err) + } else { + deleteRecordsResponseTopic, ok := rsp.Topics[topic] + if !ok { + errs = append(errs, ErrIncompleteResponse) + } else { + for _, deleteRecordsResponsePartition := range deleteRecordsResponseTopic.Partitions { + if deleteRecordsResponsePartition.Err != ErrNoError { + errs = append(errs, errors.New(deleteRecordsResponsePartition.Err.Error())) + } + } + } + } + } + if len(errs) > 0 { + return ErrDeleteRecords{MultiError{&errs}} + } + //todo since we are dealing with couple of partitions it would be good if we return slice of errors + //for each partition instead of one error + return nil +} + +// Returns a bool indicating whether the resource request needs to go to a +// specific broker +func dependsOnSpecificNode(resource ConfigResource) bool { + return (resource.Type == BrokerResource && resource.Name != "") || + resource.Type == BrokerLoggerResource +} + +func (ca *clusterAdmin) DescribeConfig(resource ConfigResource) ([]ConfigEntry, error) { + var entries []ConfigEntry + var resources []*ConfigResource + resources = append(resources, &resource) + + request := &DescribeConfigsRequest{ + Resources: resources, + } + + if ca.conf.Version.IsAtLeast(V1_1_0_0) { + request.Version = 1 + } + + if ca.conf.Version.IsAtLeast(V2_0_0_0) { + request.Version = 2 + } + + var ( + b *Broker + err error + ) + + // DescribeConfig of broker/broker logger must be sent to the broker in question + if dependsOnSpecificNode(resource) { + id, _ := strconv.Atoi(resource.Name) + b, err = ca.findBroker(int32(id)) + } else { + b, err = ca.findAnyBroker() + } + if err != nil { + return nil, err + } + + _ = b.Open(ca.client.Config()) + rsp, err := b.DescribeConfigs(request) + if err != nil { + return nil, err + } + + for _, rspResource := range rsp.Resources { + if rspResource.Name == resource.Name { + if rspResource.ErrorMsg != "" { + return nil, errors.New(rspResource.ErrorMsg) + } + if rspResource.ErrorCode != 0 { + return nil, KError(rspResource.ErrorCode) + } + for _, cfgEntry := range rspResource.Configs { + entries = append(entries, *cfgEntry) + } + } + } + return entries, nil +} + +func (ca *clusterAdmin) AlterConfig(resourceType ConfigResourceType, name string, entries map[string]*string, validateOnly bool) error { + var resources []*AlterConfigsResource + resources = append(resources, &AlterConfigsResource{ + Type: resourceType, + Name: name, + ConfigEntries: entries, + }) + + request := &AlterConfigsRequest{ + Resources: resources, + ValidateOnly: validateOnly, + } + + var ( + b *Broker + err error + ) + + // AlterConfig of broker/broker logger must be sent to the broker in question + if dependsOnSpecificNode(ConfigResource{Name: name, Type: resourceType}) { + id, _ := strconv.Atoi(name) + b, err = ca.findBroker(int32(id)) + } else { + b, err = ca.findAnyBroker() + } + if err != nil { + return err + } + + _ = b.Open(ca.client.Config()) + rsp, err := b.AlterConfigs(request) + if err != nil { + return err + } + + for _, rspResource := range rsp.Resources { + if rspResource.Name == name { + if rspResource.ErrorMsg != "" { + return errors.New(rspResource.ErrorMsg) + } + if rspResource.ErrorCode != 0 { + return KError(rspResource.ErrorCode) + } + } + } + return nil +} + +func (ca *clusterAdmin) CreateACL(resource Resource, acl Acl) error { + var acls []*AclCreation + acls = append(acls, &AclCreation{resource, acl}) + request := &CreateAclsRequest{AclCreations: acls} + + if ca.conf.Version.IsAtLeast(V2_0_0_0) { + request.Version = 1 + } + + b, err := ca.Controller() + if err != nil { + return err + } + + _, err = b.CreateAcls(request) + return err +} + +func (ca *clusterAdmin) ListAcls(filter AclFilter) ([]ResourceAcls, error) { + request := &DescribeAclsRequest{AclFilter: filter} + + if ca.conf.Version.IsAtLeast(V2_0_0_0) { + request.Version = 1 + } + + b, err := ca.Controller() + if err != nil { + return nil, err + } + + rsp, err := b.DescribeAcls(request) + if err != nil { + return nil, err + } + + var lAcls []ResourceAcls + for _, rAcl := range rsp.ResourceAcls { + lAcls = append(lAcls, *rAcl) + } + return lAcls, nil +} + +func (ca *clusterAdmin) DeleteACL(filter AclFilter, validateOnly bool) ([]MatchingAcl, error) { + var filters []*AclFilter + filters = append(filters, &filter) + request := &DeleteAclsRequest{Filters: filters} + + if ca.conf.Version.IsAtLeast(V2_0_0_0) { + request.Version = 1 + } + + b, err := ca.Controller() + if err != nil { + return nil, err + } + + rsp, err := b.DeleteAcls(request) + if err != nil { + return nil, err + } + + var mAcls []MatchingAcl + for _, fr := range rsp.FilterResponses { + for _, mACL := range fr.MatchingAcls { + mAcls = append(mAcls, *mACL) + } + } + return mAcls, nil +} + +func (ca *clusterAdmin) DescribeConsumerGroups(groups []string) (result []*GroupDescription, err error) { + groupsPerBroker := make(map[*Broker][]string) + + for _, group := range groups { + controller, err := ca.client.Coordinator(group) + if err != nil { + return nil, err + } + groupsPerBroker[controller] = append(groupsPerBroker[controller], group) + } + + for broker, brokerGroups := range groupsPerBroker { + response, err := broker.DescribeGroups(&DescribeGroupsRequest{ + Groups: brokerGroups, + }) + if err != nil { + return nil, err + } + + result = append(result, response.Groups...) + } + return result, nil +} + +func (ca *clusterAdmin) ListConsumerGroups() (allGroups map[string]string, err error) { + allGroups = make(map[string]string) + + // Query brokers in parallel, since we have to query *all* brokers + brokers := ca.client.Brokers() + groupMaps := make(chan map[string]string, len(brokers)) + errChan := make(chan error, len(brokers)) + wg := sync.WaitGroup{} + + for _, b := range brokers { + wg.Add(1) + go func(b *Broker, conf *Config) { + defer wg.Done() + _ = b.Open(conf) // Ensure that broker is opened + + response, err := b.ListGroups(&ListGroupsRequest{}) + if err != nil { + errChan <- err + return + } + + groups := make(map[string]string) + for group, typ := range response.Groups { + groups[group] = typ + } + + groupMaps <- groups + }(b, ca.conf) + } + + wg.Wait() + close(groupMaps) + close(errChan) + + for groupMap := range groupMaps { + for group, protocolType := range groupMap { + allGroups[group] = protocolType + } + } + + // Intentionally return only the first error for simplicity + err = <-errChan + return +} + +func (ca *clusterAdmin) ListConsumerGroupOffsets(group string, topicPartitions map[string][]int32) (*OffsetFetchResponse, error) { + coordinator, err := ca.client.Coordinator(group) + if err != nil { + return nil, err + } + + request := &OffsetFetchRequest{ + ConsumerGroup: group, + partitions: topicPartitions, + } + + if ca.conf.Version.IsAtLeast(V0_10_2_0) { + request.Version = 2 + } else if ca.conf.Version.IsAtLeast(V0_8_2_2) { + request.Version = 1 + } + + return coordinator.FetchOffset(request) +} + +func (ca *clusterAdmin) DeleteConsumerGroup(group string) error { + coordinator, err := ca.client.Coordinator(group) + if err != nil { + return err + } + + request := &DeleteGroupsRequest{ + Groups: []string{group}, + } + + resp, err := coordinator.DeleteGroups(request) + if err != nil { + return err + } + + groupErr, ok := resp.GroupErrorCodes[group] + if !ok { + return ErrIncompleteResponse + } + + if groupErr != ErrNoError { + return groupErr + } + + return nil +} + +func (ca *clusterAdmin) DescribeLogDirs(brokerIds []int32) (allLogDirs map[int32][]DescribeLogDirsResponseDirMetadata, err error) { + allLogDirs = make(map[int32][]DescribeLogDirsResponseDirMetadata) + + // Query brokers in parallel, since we may have to query multiple brokers + logDirsMaps := make(chan map[int32][]DescribeLogDirsResponseDirMetadata, len(brokerIds)) + errChan := make(chan error, len(brokerIds)) + wg := sync.WaitGroup{} + + for _, b := range brokerIds { + wg.Add(1) + broker, err := ca.findBroker(b) + if err != nil { + Logger.Printf("Unable to find broker with ID = %v\n", b) + continue + } + go func(b *Broker, conf *Config) { + defer wg.Done() + _ = b.Open(conf) // Ensure that broker is opened + + response, err := b.DescribeLogDirs(&DescribeLogDirsRequest{}) + if err != nil { + errChan <- err + return + } + logDirs := make(map[int32][]DescribeLogDirsResponseDirMetadata) + logDirs[b.ID()] = response.LogDirs + logDirsMaps <- logDirs + }(broker, ca.conf) + } + + wg.Wait() + close(logDirsMaps) + close(errChan) + + for logDirsMap := range logDirsMaps { + for id, logDirs := range logDirsMap { + allLogDirs[id] = logDirs + } + } + + // Intentionally return only the first error for simplicity + err = <-errChan + return +} diff --git a/vendor/github.com/Shopify/sarama/alter_configs_request.go b/vendor/github.com/Shopify/sarama/alter_configs_request.go new file mode 100644 index 00000000000..c88bb604a43 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/alter_configs_request.go @@ -0,0 +1,126 @@ +package sarama + +//AlterConfigsRequest is an alter config request type +type AlterConfigsRequest struct { + Resources []*AlterConfigsResource + ValidateOnly bool +} + +//AlterConfigsResource is an alter config resource type +type AlterConfigsResource struct { + Type ConfigResourceType + Name string + ConfigEntries map[string]*string +} + +func (a *AlterConfigsRequest) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(a.Resources)); err != nil { + return err + } + + for _, r := range a.Resources { + if err := r.encode(pe); err != nil { + return err + } + } + + pe.putBool(a.ValidateOnly) + return nil +} + +func (a *AlterConfigsRequest) decode(pd packetDecoder, version int16) error { + resourceCount, err := pd.getArrayLength() + if err != nil { + return err + } + + a.Resources = make([]*AlterConfigsResource, resourceCount) + for i := range a.Resources { + r := &AlterConfigsResource{} + err = r.decode(pd, version) + if err != nil { + return err + } + a.Resources[i] = r + } + + validateOnly, err := pd.getBool() + if err != nil { + return err + } + + a.ValidateOnly = validateOnly + + return nil +} + +func (a *AlterConfigsResource) encode(pe packetEncoder) error { + pe.putInt8(int8(a.Type)) + + if err := pe.putString(a.Name); err != nil { + return err + } + + if err := pe.putArrayLength(len(a.ConfigEntries)); err != nil { + return err + } + for configKey, configValue := range a.ConfigEntries { + if err := pe.putString(configKey); err != nil { + return err + } + if err := pe.putNullableString(configValue); err != nil { + return err + } + } + + return nil +} + +func (a *AlterConfigsResource) decode(pd packetDecoder, version int16) error { + t, err := pd.getInt8() + if err != nil { + return err + } + a.Type = ConfigResourceType(t) + + name, err := pd.getString() + if err != nil { + return err + } + a.Name = name + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + if n > 0 { + a.ConfigEntries = make(map[string]*string, n) + for i := 0; i < n; i++ { + configKey, err := pd.getString() + if err != nil { + return err + } + if a.ConfigEntries[configKey], err = pd.getNullableString(); err != nil { + return err + } + } + } + return err +} + +func (a *AlterConfigsRequest) key() int16 { + return 33 +} + +func (a *AlterConfigsRequest) version() int16 { + return 0 +} + +func (a *AlterConfigsRequest) headerVersion() int16 { + return 1 +} + +func (a *AlterConfigsRequest) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/alter_configs_response.go b/vendor/github.com/Shopify/sarama/alter_configs_response.go new file mode 100644 index 00000000000..3266f927406 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/alter_configs_response.go @@ -0,0 +1,101 @@ +package sarama + +import "time" + +//AlterConfigsResponse is a response type for alter config +type AlterConfigsResponse struct { + ThrottleTime time.Duration + Resources []*AlterConfigsResourceResponse +} + +//AlterConfigsResourceResponse is a response type for alter config resource +type AlterConfigsResourceResponse struct { + ErrorCode int16 + ErrorMsg string + Type ConfigResourceType + Name string +} + +func (a *AlterConfigsResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(a.ThrottleTime / time.Millisecond)) + + if err := pe.putArrayLength(len(a.Resources)); err != nil { + return err + } + + for i := range a.Resources { + pe.putInt16(a.Resources[i].ErrorCode) + err := pe.putString(a.Resources[i].ErrorMsg) + if err != nil { + return nil + } + pe.putInt8(int8(a.Resources[i].Type)) + err = pe.putString(a.Resources[i].Name) + if err != nil { + return nil + } + } + + return nil +} + +func (a *AlterConfigsResponse) decode(pd packetDecoder, version int16) error { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + responseCount, err := pd.getArrayLength() + if err != nil { + return err + } + + a.Resources = make([]*AlterConfigsResourceResponse, responseCount) + + for i := range a.Resources { + a.Resources[i] = new(AlterConfigsResourceResponse) + + errCode, err := pd.getInt16() + if err != nil { + return err + } + a.Resources[i].ErrorCode = errCode + + e, err := pd.getString() + if err != nil { + return err + } + a.Resources[i].ErrorMsg = e + + t, err := pd.getInt8() + if err != nil { + return err + } + a.Resources[i].Type = ConfigResourceType(t) + + name, err := pd.getString() + if err != nil { + return err + } + a.Resources[i].Name = name + } + + return nil +} + +func (a *AlterConfigsResponse) key() int16 { + return 32 +} + +func (a *AlterConfigsResponse) version() int16 { + return 0 +} + +func (a *AlterConfigsResponse) headerVersion() int16 { + return 0 +} + +func (a *AlterConfigsResponse) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/alter_partition_reassignments_request.go b/vendor/github.com/Shopify/sarama/alter_partition_reassignments_request.go new file mode 100644 index 00000000000..f0a2f9dd59b --- /dev/null +++ b/vendor/github.com/Shopify/sarama/alter_partition_reassignments_request.go @@ -0,0 +1,130 @@ +package sarama + +type alterPartitionReassignmentsBlock struct { + replicas []int32 +} + +func (b *alterPartitionReassignmentsBlock) encode(pe packetEncoder) error { + if err := pe.putNullableCompactInt32Array(b.replicas); err != nil { + return err + } + + pe.putEmptyTaggedFieldArray() + return nil +} + +func (b *alterPartitionReassignmentsBlock) decode(pd packetDecoder) (err error) { + if b.replicas, err = pd.getCompactInt32Array(); err != nil { + return err + } + return nil +} + +type AlterPartitionReassignmentsRequest struct { + TimeoutMs int32 + blocks map[string]map[int32]*alterPartitionReassignmentsBlock + Version int16 +} + +func (r *AlterPartitionReassignmentsRequest) encode(pe packetEncoder) error { + pe.putInt32(r.TimeoutMs) + + pe.putCompactArrayLength(len(r.blocks)) + + for topic, partitions := range r.blocks { + if err := pe.putCompactString(topic); err != nil { + return err + } + pe.putCompactArrayLength(len(partitions)) + for partition, block := range partitions { + pe.putInt32(partition) + if err := block.encode(pe); err != nil { + return err + } + } + pe.putEmptyTaggedFieldArray() + } + + pe.putEmptyTaggedFieldArray() + + return nil +} + +func (r *AlterPartitionReassignmentsRequest) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + + if r.TimeoutMs, err = pd.getInt32(); err != nil { + return err + } + + topicCount, err := pd.getCompactArrayLength() + if err != nil { + return err + } + if topicCount > 0 { + r.blocks = make(map[string]map[int32]*alterPartitionReassignmentsBlock) + for i := 0; i < topicCount; i++ { + topic, err := pd.getCompactString() + if err != nil { + return err + } + partitionCount, err := pd.getCompactArrayLength() + if err != nil { + return err + } + r.blocks[topic] = make(map[int32]*alterPartitionReassignmentsBlock) + for j := 0; j < partitionCount; j++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + block := &alterPartitionReassignmentsBlock{} + if err := block.decode(pd); err != nil { + return err + } + r.blocks[topic][partition] = block + + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + } + + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + + return +} + +func (r *AlterPartitionReassignmentsRequest) key() int16 { + return 45 +} + +func (r *AlterPartitionReassignmentsRequest) version() int16 { + return r.Version +} + +func (r *AlterPartitionReassignmentsRequest) headerVersion() int16 { + return 2 +} + +func (r *AlterPartitionReassignmentsRequest) requiredVersion() KafkaVersion { + return V2_4_0_0 +} + +func (r *AlterPartitionReassignmentsRequest) AddBlock(topic string, partitionID int32, replicas []int32) { + if r.blocks == nil { + r.blocks = make(map[string]map[int32]*alterPartitionReassignmentsBlock) + } + + if r.blocks[topic] == nil { + r.blocks[topic] = make(map[int32]*alterPartitionReassignmentsBlock) + } + + r.blocks[topic][partitionID] = &alterPartitionReassignmentsBlock{replicas} +} diff --git a/vendor/github.com/Shopify/sarama/alter_partition_reassignments_response.go b/vendor/github.com/Shopify/sarama/alter_partition_reassignments_response.go new file mode 100644 index 00000000000..b3f9a15fe7f --- /dev/null +++ b/vendor/github.com/Shopify/sarama/alter_partition_reassignments_response.go @@ -0,0 +1,157 @@ +package sarama + +type alterPartitionReassignmentsErrorBlock struct { + errorCode KError + errorMessage *string +} + +func (b *alterPartitionReassignmentsErrorBlock) encode(pe packetEncoder) error { + pe.putInt16(int16(b.errorCode)) + if err := pe.putNullableCompactString(b.errorMessage); err != nil { + return err + } + pe.putEmptyTaggedFieldArray() + + return nil +} + +func (b *alterPartitionReassignmentsErrorBlock) decode(pd packetDecoder) (err error) { + errorCode, err := pd.getInt16() + if err != nil { + return err + } + b.errorCode = KError(errorCode) + b.errorMessage, err = pd.getCompactNullableString() + + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + return err +} + +type AlterPartitionReassignmentsResponse struct { + Version int16 + ThrottleTimeMs int32 + ErrorCode KError + ErrorMessage *string + Errors map[string]map[int32]*alterPartitionReassignmentsErrorBlock +} + +func (r *AlterPartitionReassignmentsResponse) AddError(topic string, partition int32, kerror KError, message *string) { + if r.Errors == nil { + r.Errors = make(map[string]map[int32]*alterPartitionReassignmentsErrorBlock) + } + partitions := r.Errors[topic] + if partitions == nil { + partitions = make(map[int32]*alterPartitionReassignmentsErrorBlock) + r.Errors[topic] = partitions + } + + partitions[partition] = &alterPartitionReassignmentsErrorBlock{errorCode: kerror, errorMessage: message} +} + +func (r *AlterPartitionReassignmentsResponse) encode(pe packetEncoder) error { + pe.putInt32(r.ThrottleTimeMs) + pe.putInt16(int16(r.ErrorCode)) + if err := pe.putNullableCompactString(r.ErrorMessage); err != nil { + return err + } + + pe.putCompactArrayLength(len(r.Errors)) + for topic, partitions := range r.Errors { + if err := pe.putCompactString(topic); err != nil { + return err + } + pe.putCompactArrayLength(len(partitions)) + for partition, block := range partitions { + pe.putInt32(partition) + + if err := block.encode(pe); err != nil { + return err + } + } + pe.putEmptyTaggedFieldArray() + } + + pe.putEmptyTaggedFieldArray() + return nil +} + +func (r *AlterPartitionReassignmentsResponse) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + + if r.ThrottleTimeMs, err = pd.getInt32(); err != nil { + return err + } + + kerr, err := pd.getInt16() + if err != nil { + return err + } + + r.ErrorCode = KError(kerr) + + if r.ErrorMessage, err = pd.getCompactNullableString(); err != nil { + return err + } + + numTopics, err := pd.getCompactArrayLength() + if err != nil { + return err + } + + if numTopics > 0 { + r.Errors = make(map[string]map[int32]*alterPartitionReassignmentsErrorBlock, numTopics) + for i := 0; i < numTopics; i++ { + topic, err := pd.getCompactString() + if err != nil { + return err + } + + ongoingPartitionReassignments, err := pd.getCompactArrayLength() + if err != nil { + return err + } + + r.Errors[topic] = make(map[int32]*alterPartitionReassignmentsErrorBlock, ongoingPartitionReassignments) + + for j := 0; j < ongoingPartitionReassignments; j++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + block := &alterPartitionReassignmentsErrorBlock{} + if err := block.decode(pd); err != nil { + return err + } + + r.Errors[topic][partition] = block + } + if _, err = pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + } + + if _, err = pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + + return nil +} + +func (r *AlterPartitionReassignmentsResponse) key() int16 { + return 45 +} + +func (r *AlterPartitionReassignmentsResponse) version() int16 { + return r.Version +} + +func (r *AlterPartitionReassignmentsResponse) headerVersion() int16 { + return 1 +} + +func (r *AlterPartitionReassignmentsResponse) requiredVersion() KafkaVersion { + return V2_4_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/api_versions_request.go b/vendor/github.com/Shopify/sarama/api_versions_request.go new file mode 100644 index 00000000000..d67c5e1e538 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/api_versions_request.go @@ -0,0 +1,29 @@ +package sarama + +//ApiVersionsRequest ... +type ApiVersionsRequest struct { +} + +func (a *ApiVersionsRequest) encode(pe packetEncoder) error { + return nil +} + +func (a *ApiVersionsRequest) decode(pd packetDecoder, version int16) (err error) { + return nil +} + +func (a *ApiVersionsRequest) key() int16 { + return 18 +} + +func (a *ApiVersionsRequest) version() int16 { + return 0 +} + +func (a *ApiVersionsRequest) headerVersion() int16 { + return 1 +} + +func (a *ApiVersionsRequest) requiredVersion() KafkaVersion { + return V0_10_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/api_versions_response.go b/vendor/github.com/Shopify/sarama/api_versions_response.go new file mode 100644 index 00000000000..d09e8d9e153 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/api_versions_response.go @@ -0,0 +1,93 @@ +package sarama + +//ApiVersionsResponseBlock is an api version response block type +type ApiVersionsResponseBlock struct { + ApiKey int16 + MinVersion int16 + MaxVersion int16 +} + +func (b *ApiVersionsResponseBlock) encode(pe packetEncoder) error { + pe.putInt16(b.ApiKey) + pe.putInt16(b.MinVersion) + pe.putInt16(b.MaxVersion) + return nil +} + +func (b *ApiVersionsResponseBlock) decode(pd packetDecoder) error { + var err error + + if b.ApiKey, err = pd.getInt16(); err != nil { + return err + } + + if b.MinVersion, err = pd.getInt16(); err != nil { + return err + } + + if b.MaxVersion, err = pd.getInt16(); err != nil { + return err + } + + return nil +} + +//ApiVersionsResponse is an api version response type +type ApiVersionsResponse struct { + Err KError + ApiVersions []*ApiVersionsResponseBlock +} + +func (r *ApiVersionsResponse) encode(pe packetEncoder) error { + pe.putInt16(int16(r.Err)) + if err := pe.putArrayLength(len(r.ApiVersions)); err != nil { + return err + } + for _, apiVersion := range r.ApiVersions { + if err := apiVersion.encode(pe); err != nil { + return err + } + } + return nil +} + +func (r *ApiVersionsResponse) decode(pd packetDecoder, version int16) error { + kerr, err := pd.getInt16() + if err != nil { + return err + } + + r.Err = KError(kerr) + + numBlocks, err := pd.getArrayLength() + if err != nil { + return err + } + + r.ApiVersions = make([]*ApiVersionsResponseBlock, numBlocks) + for i := 0; i < numBlocks; i++ { + block := new(ApiVersionsResponseBlock) + if err := block.decode(pd); err != nil { + return err + } + r.ApiVersions[i] = block + } + + return nil +} + +func (r *ApiVersionsResponse) key() int16 { + return 18 +} + +func (r *ApiVersionsResponse) version() int16 { + return 0 +} + +func (a *ApiVersionsResponse) headerVersion() int16 { + return 0 +} + +func (r *ApiVersionsResponse) requiredVersion() KafkaVersion { + return V0_10_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/async_producer.go b/vendor/github.com/Shopify/sarama/async_producer.go new file mode 100644 index 00000000000..209fd2d34ec --- /dev/null +++ b/vendor/github.com/Shopify/sarama/async_producer.go @@ -0,0 +1,1162 @@ +package sarama + +import ( + "encoding/binary" + "fmt" + "sync" + "time" + + "github.com/eapache/go-resiliency/breaker" + "github.com/eapache/queue" +) + +// AsyncProducer publishes Kafka messages using a non-blocking API. It routes messages +// to the correct broker for the provided topic-partition, refreshing metadata as appropriate, +// and parses responses for errors. You must read from the Errors() channel or the +// producer will deadlock. You must call Close() or AsyncClose() on a producer to avoid +// leaks: it will not be garbage-collected automatically when it passes out of +// scope. +type AsyncProducer interface { + + // AsyncClose triggers a shutdown of the producer. The shutdown has completed + // when both the Errors and Successes channels have been closed. When calling + // AsyncClose, you *must* continue to read from those channels in order to + // drain the results of any messages in flight. + AsyncClose() + + // Close shuts down the producer and waits for any buffered messages to be + // flushed. You must call this function before a producer object passes out of + // scope, as it may otherwise leak memory. You must call this before calling + // Close on the underlying client. + Close() error + + // Input is the input channel for the user to write messages to that they + // wish to send. + Input() chan<- *ProducerMessage + + // Successes is the success output channel back to the user when Return.Successes is + // enabled. If Return.Successes is true, you MUST read from this channel or the + // Producer will deadlock. It is suggested that you send and read messages + // together in a single select statement. + Successes() <-chan *ProducerMessage + + // Errors is the error output channel back to the user. You MUST read from this + // channel or the Producer will deadlock when the channel is full. Alternatively, + // you can set Producer.Return.Errors in your config to false, which prevents + // errors to be returned. + Errors() <-chan *ProducerError +} + +// transactionManager keeps the state necessary to ensure idempotent production +type transactionManager struct { + producerID int64 + producerEpoch int16 + sequenceNumbers map[string]int32 + mutex sync.Mutex +} + +const ( + noProducerID = -1 + noProducerEpoch = -1 +) + +func (t *transactionManager) getAndIncrementSequenceNumber(topic string, partition int32) (int32, int16) { + key := fmt.Sprintf("%s-%d", topic, partition) + t.mutex.Lock() + defer t.mutex.Unlock() + sequence := t.sequenceNumbers[key] + t.sequenceNumbers[key] = sequence + 1 + return sequence, t.producerEpoch +} + +func (t *transactionManager) bumpEpoch() { + t.mutex.Lock() + defer t.mutex.Unlock() + t.producerEpoch++ + for k := range t.sequenceNumbers { + t.sequenceNumbers[k] = 0 + } +} + +func (t *transactionManager) getProducerID() (int64, int16) { + t.mutex.Lock() + defer t.mutex.Unlock() + return t.producerID, t.producerEpoch +} + +func newTransactionManager(conf *Config, client Client) (*transactionManager, error) { + txnmgr := &transactionManager{ + producerID: noProducerID, + producerEpoch: noProducerEpoch, + } + + if conf.Producer.Idempotent { + initProducerIDResponse, err := client.InitProducerID() + if err != nil { + return nil, err + } + txnmgr.producerID = initProducerIDResponse.ProducerID + txnmgr.producerEpoch = initProducerIDResponse.ProducerEpoch + txnmgr.sequenceNumbers = make(map[string]int32) + txnmgr.mutex = sync.Mutex{} + + Logger.Printf("Obtained a ProducerId: %d and ProducerEpoch: %d\n", txnmgr.producerID, txnmgr.producerEpoch) + } + + return txnmgr, nil +} + +type asyncProducer struct { + client Client + conf *Config + + errors chan *ProducerError + input, successes, retries chan *ProducerMessage + inFlight sync.WaitGroup + + brokers map[*Broker]*brokerProducer + brokerRefs map[*brokerProducer]int + brokerLock sync.Mutex + + txnmgr *transactionManager +} + +// NewAsyncProducer creates a new AsyncProducer using the given broker addresses and configuration. +func NewAsyncProducer(addrs []string, conf *Config) (AsyncProducer, error) { + client, err := NewClient(addrs, conf) + if err != nil { + return nil, err + } + return newAsyncProducer(client) +} + +// NewAsyncProducerFromClient creates a new Producer using the given client. It is still +// necessary to call Close() on the underlying client when shutting down this producer. +func NewAsyncProducerFromClient(client Client) (AsyncProducer, error) { + // For clients passed in by the client, ensure we don't + // call Close() on it. + cli := &nopCloserClient{client} + return newAsyncProducer(cli) +} + +func newAsyncProducer(client Client) (AsyncProducer, error) { + // Check that we are not dealing with a closed Client before processing any other arguments + if client.Closed() { + return nil, ErrClosedClient + } + + txnmgr, err := newTransactionManager(client.Config(), client) + if err != nil { + return nil, err + } + + p := &asyncProducer{ + client: client, + conf: client.Config(), + errors: make(chan *ProducerError), + input: make(chan *ProducerMessage), + successes: make(chan *ProducerMessage), + retries: make(chan *ProducerMessage), + brokers: make(map[*Broker]*brokerProducer), + brokerRefs: make(map[*brokerProducer]int), + txnmgr: txnmgr, + } + + // launch our singleton dispatchers + go withRecover(p.dispatcher) + go withRecover(p.retryHandler) + + return p, nil +} + +type flagSet int8 + +const ( + syn flagSet = 1 << iota // first message from partitionProducer to brokerProducer + fin // final message from partitionProducer to brokerProducer and back + shutdown // start the shutdown process +) + +// ProducerMessage is the collection of elements passed to the Producer in order to send a message. +type ProducerMessage struct { + Topic string // The Kafka topic for this message. + // The partitioning key for this message. Pre-existing Encoders include + // StringEncoder and ByteEncoder. + Key Encoder + // The actual message to store in Kafka. Pre-existing Encoders include + // StringEncoder and ByteEncoder. + Value Encoder + + // The headers are key-value pairs that are transparently passed + // by Kafka between producers and consumers. + Headers []RecordHeader + + // This field is used to hold arbitrary data you wish to include so it + // will be available when receiving on the Successes and Errors channels. + // Sarama completely ignores this field and is only to be used for + // pass-through data. + Metadata interface{} + + // Below this point are filled in by the producer as the message is processed + + // Offset is the offset of the message stored on the broker. This is only + // guaranteed to be defined if the message was successfully delivered and + // RequiredAcks is not NoResponse. + Offset int64 + // Partition is the partition that the message was sent to. This is only + // guaranteed to be defined if the message was successfully delivered. + Partition int32 + // Timestamp can vary in behaviour depending on broker configuration, being + // in either one of the CreateTime or LogAppendTime modes (default CreateTime), + // and requiring version at least 0.10.0. + // + // When configured to CreateTime, the timestamp is specified by the producer + // either by explicitly setting this field, or when the message is added + // to a produce set. + // + // When configured to LogAppendTime, the timestamp assigned to the message + // by the broker. This is only guaranteed to be defined if the message was + // successfully delivered and RequiredAcks is not NoResponse. + Timestamp time.Time + + retries int + flags flagSet + expectation chan *ProducerError + sequenceNumber int32 + producerEpoch int16 + hasSequence bool +} + +const producerMessageOverhead = 26 // the metadata overhead of CRC, flags, etc. + +func (m *ProducerMessage) byteSize(version int) int { + var size int + if version >= 2 { + size = maximumRecordOverhead + for _, h := range m.Headers { + size += len(h.Key) + len(h.Value) + 2*binary.MaxVarintLen32 + } + } else { + size = producerMessageOverhead + } + if m.Key != nil { + size += m.Key.Length() + } + if m.Value != nil { + size += m.Value.Length() + } + return size +} + +func (m *ProducerMessage) clear() { + m.flags = 0 + m.retries = 0 + m.sequenceNumber = 0 + m.producerEpoch = 0 + m.hasSequence = false +} + +// ProducerError is the type of error generated when the producer fails to deliver a message. +// It contains the original ProducerMessage as well as the actual error value. +type ProducerError struct { + Msg *ProducerMessage + Err error +} + +func (pe ProducerError) Error() string { + return fmt.Sprintf("kafka: Failed to produce message to topic %s: %s", pe.Msg.Topic, pe.Err) +} + +func (pe ProducerError) Unwrap() error { + return pe.Err +} + +// ProducerErrors is a type that wraps a batch of "ProducerError"s and implements the Error interface. +// It can be returned from the Producer's Close method to avoid the need to manually drain the Errors channel +// when closing a producer. +type ProducerErrors []*ProducerError + +func (pe ProducerErrors) Error() string { + return fmt.Sprintf("kafka: Failed to deliver %d messages.", len(pe)) +} + +func (p *asyncProducer) Errors() <-chan *ProducerError { + return p.errors +} + +func (p *asyncProducer) Successes() <-chan *ProducerMessage { + return p.successes +} + +func (p *asyncProducer) Input() chan<- *ProducerMessage { + return p.input +} + +func (p *asyncProducer) Close() error { + p.AsyncClose() + + if p.conf.Producer.Return.Successes { + go withRecover(func() { + for range p.successes { + } + }) + } + + var errors ProducerErrors + if p.conf.Producer.Return.Errors { + for event := range p.errors { + errors = append(errors, event) + } + } else { + <-p.errors + } + + if len(errors) > 0 { + return errors + } + return nil +} + +func (p *asyncProducer) AsyncClose() { + go withRecover(p.shutdown) +} + +// singleton +// dispatches messages by topic +func (p *asyncProducer) dispatcher() { + handlers := make(map[string]chan<- *ProducerMessage) + shuttingDown := false + + for msg := range p.input { + if msg == nil { + Logger.Println("Something tried to send a nil message, it was ignored.") + continue + } + + if msg.flags&shutdown != 0 { + shuttingDown = true + p.inFlight.Done() + continue + } else if msg.retries == 0 { + if shuttingDown { + // we can't just call returnError here because that decrements the wait group, + // which hasn't been incremented yet for this message, and shouldn't be + pErr := &ProducerError{Msg: msg, Err: ErrShuttingDown} + if p.conf.Producer.Return.Errors { + p.errors <- pErr + } else { + Logger.Println(pErr) + } + continue + } + p.inFlight.Add(1) + } + + for _, interceptor := range p.conf.Producer.Interceptors { + msg.safelyApplyInterceptor(interceptor) + } + + version := 1 + if p.conf.Version.IsAtLeast(V0_11_0_0) { + version = 2 + } else if msg.Headers != nil { + p.returnError(msg, ConfigurationError("Producing headers requires Kafka at least v0.11")) + continue + } + if msg.byteSize(version) > p.conf.Producer.MaxMessageBytes { + p.returnError(msg, ErrMessageSizeTooLarge) + continue + } + + handler := handlers[msg.Topic] + if handler == nil { + handler = p.newTopicProducer(msg.Topic) + handlers[msg.Topic] = handler + } + + handler <- msg + } + + for _, handler := range handlers { + close(handler) + } +} + +// one per topic +// partitions messages, then dispatches them by partition +type topicProducer struct { + parent *asyncProducer + topic string + input <-chan *ProducerMessage + + breaker *breaker.Breaker + handlers map[int32]chan<- *ProducerMessage + partitioner Partitioner +} + +func (p *asyncProducer) newTopicProducer(topic string) chan<- *ProducerMessage { + input := make(chan *ProducerMessage, p.conf.ChannelBufferSize) + tp := &topicProducer{ + parent: p, + topic: topic, + input: input, + breaker: breaker.New(3, 1, 10*time.Second), + handlers: make(map[int32]chan<- *ProducerMessage), + partitioner: p.conf.Producer.Partitioner(topic), + } + go withRecover(tp.dispatch) + return input +} + +func (tp *topicProducer) dispatch() { + for msg := range tp.input { + if msg.retries == 0 { + if err := tp.partitionMessage(msg); err != nil { + tp.parent.returnError(msg, err) + continue + } + } + + handler := tp.handlers[msg.Partition] + if handler == nil { + handler = tp.parent.newPartitionProducer(msg.Topic, msg.Partition) + tp.handlers[msg.Partition] = handler + } + + handler <- msg + } + + for _, handler := range tp.handlers { + close(handler) + } +} + +func (tp *topicProducer) partitionMessage(msg *ProducerMessage) error { + var partitions []int32 + + err := tp.breaker.Run(func() (err error) { + requiresConsistency := false + if ep, ok := tp.partitioner.(DynamicConsistencyPartitioner); ok { + requiresConsistency = ep.MessageRequiresConsistency(msg) + } else { + requiresConsistency = tp.partitioner.RequiresConsistency() + } + + if requiresConsistency { + partitions, err = tp.parent.client.Partitions(msg.Topic) + } else { + partitions, err = tp.parent.client.WritablePartitions(msg.Topic) + } + return + }) + + if err != nil { + return err + } + + numPartitions := int32(len(partitions)) + + if numPartitions == 0 { + return ErrLeaderNotAvailable + } + + choice, err := tp.partitioner.Partition(msg, numPartitions) + + if err != nil { + return err + } else if choice < 0 || choice >= numPartitions { + return ErrInvalidPartition + } + + msg.Partition = partitions[choice] + + return nil +} + +// one per partition per topic +// dispatches messages to the appropriate broker +// also responsible for maintaining message order during retries +type partitionProducer struct { + parent *asyncProducer + topic string + partition int32 + input <-chan *ProducerMessage + + leader *Broker + breaker *breaker.Breaker + brokerProducer *brokerProducer + + // highWatermark tracks the "current" retry level, which is the only one where we actually let messages through, + // all other messages get buffered in retryState[msg.retries].buf to preserve ordering + // retryState[msg.retries].expectChaser simply tracks whether we've seen a fin message for a given level (and + // therefore whether our buffer is complete and safe to flush) + highWatermark int + retryState []partitionRetryState +} + +type partitionRetryState struct { + buf []*ProducerMessage + expectChaser bool +} + +func (p *asyncProducer) newPartitionProducer(topic string, partition int32) chan<- *ProducerMessage { + input := make(chan *ProducerMessage, p.conf.ChannelBufferSize) + pp := &partitionProducer{ + parent: p, + topic: topic, + partition: partition, + input: input, + + breaker: breaker.New(3, 1, 10*time.Second), + retryState: make([]partitionRetryState, p.conf.Producer.Retry.Max+1), + } + go withRecover(pp.dispatch) + return input +} + +func (pp *partitionProducer) backoff(retries int) { + var backoff time.Duration + if pp.parent.conf.Producer.Retry.BackoffFunc != nil { + maxRetries := pp.parent.conf.Producer.Retry.Max + backoff = pp.parent.conf.Producer.Retry.BackoffFunc(retries, maxRetries) + } else { + backoff = pp.parent.conf.Producer.Retry.Backoff + } + if backoff > 0 { + time.Sleep(backoff) + } +} + +func (pp *partitionProducer) dispatch() { + // try to prefetch the leader; if this doesn't work, we'll do a proper call to `updateLeader` + // on the first message + pp.leader, _ = pp.parent.client.Leader(pp.topic, pp.partition) + if pp.leader != nil { + pp.brokerProducer = pp.parent.getBrokerProducer(pp.leader) + pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight + pp.brokerProducer.input <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn} + } + + defer func() { + if pp.brokerProducer != nil { + pp.parent.unrefBrokerProducer(pp.leader, pp.brokerProducer) + } + }() + + for msg := range pp.input { + if pp.brokerProducer != nil && pp.brokerProducer.abandoned != nil { + select { + case <-pp.brokerProducer.abandoned: + // a message on the abandoned channel means that our current broker selection is out of date + Logger.Printf("producer/leader/%s/%d abandoning broker %d\n", pp.topic, pp.partition, pp.leader.ID()) + pp.parent.unrefBrokerProducer(pp.leader, pp.brokerProducer) + pp.brokerProducer = nil + time.Sleep(pp.parent.conf.Producer.Retry.Backoff) + default: + // producer connection is still open. + } + } + + if msg.retries > pp.highWatermark { + // a new, higher, retry level; handle it and then back off + pp.newHighWatermark(msg.retries) + pp.backoff(msg.retries) + } else if pp.highWatermark > 0 { + // we are retrying something (else highWatermark would be 0) but this message is not a *new* retry level + if msg.retries < pp.highWatermark { + // in fact this message is not even the current retry level, so buffer it for now (unless it's a just a fin) + if msg.flags&fin == fin { + pp.retryState[msg.retries].expectChaser = false + pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected + } else { + pp.retryState[msg.retries].buf = append(pp.retryState[msg.retries].buf, msg) + } + continue + } else if msg.flags&fin == fin { + // this message is of the current retry level (msg.retries == highWatermark) and the fin flag is set, + // meaning this retry level is done and we can go down (at least) one level and flush that + pp.retryState[pp.highWatermark].expectChaser = false + pp.flushRetryBuffers() + pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected + continue + } + } + + // if we made it this far then the current msg contains real data, and can be sent to the next goroutine + // without breaking any of our ordering guarantees + + if pp.brokerProducer == nil { + if err := pp.updateLeader(); err != nil { + pp.parent.returnError(msg, err) + pp.backoff(msg.retries) + continue + } + Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID()) + } + + // Now that we know we have a broker to actually try and send this message to, generate the sequence + // number for it. + // All messages being retried (sent or not) have already had their retry count updated + // Also, ignore "special" syn/fin messages used to sync the brokerProducer and the topicProducer. + if pp.parent.conf.Producer.Idempotent && msg.retries == 0 && msg.flags == 0 { + msg.sequenceNumber, msg.producerEpoch = pp.parent.txnmgr.getAndIncrementSequenceNumber(msg.Topic, msg.Partition) + msg.hasSequence = true + } + + pp.brokerProducer.input <- msg + } +} + +func (pp *partitionProducer) newHighWatermark(hwm int) { + Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, hwm) + pp.highWatermark = hwm + + // send off a fin so that we know when everything "in between" has made it + // back to us and we can safely flush the backlog (otherwise we risk re-ordering messages) + pp.retryState[pp.highWatermark].expectChaser = true + pp.parent.inFlight.Add(1) // we're generating a fin message; track it so we don't shut down while it's still inflight + pp.brokerProducer.input <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: fin, retries: pp.highWatermark - 1} + + // a new HWM means that our current broker selection is out of date + Logger.Printf("producer/leader/%s/%d abandoning broker %d\n", pp.topic, pp.partition, pp.leader.ID()) + pp.parent.unrefBrokerProducer(pp.leader, pp.brokerProducer) + pp.brokerProducer = nil +} + +func (pp *partitionProducer) flushRetryBuffers() { + Logger.Printf("producer/leader/%s/%d state change to [flushing-%d]\n", pp.topic, pp.partition, pp.highWatermark) + for { + pp.highWatermark-- + + if pp.brokerProducer == nil { + if err := pp.updateLeader(); err != nil { + pp.parent.returnErrors(pp.retryState[pp.highWatermark].buf, err) + goto flushDone + } + Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID()) + } + + for _, msg := range pp.retryState[pp.highWatermark].buf { + pp.brokerProducer.input <- msg + } + + flushDone: + pp.retryState[pp.highWatermark].buf = nil + if pp.retryState[pp.highWatermark].expectChaser { + Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, pp.highWatermark) + break + } else if pp.highWatermark == 0 { + Logger.Printf("producer/leader/%s/%d state change to [normal]\n", pp.topic, pp.partition) + break + } + } +} + +func (pp *partitionProducer) updateLeader() error { + return pp.breaker.Run(func() (err error) { + if err = pp.parent.client.RefreshMetadata(pp.topic); err != nil { + return err + } + + if pp.leader, err = pp.parent.client.Leader(pp.topic, pp.partition); err != nil { + return err + } + + pp.brokerProducer = pp.parent.getBrokerProducer(pp.leader) + pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight + pp.brokerProducer.input <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn} + + return nil + }) +} + +// one per broker; also constructs an associated flusher +func (p *asyncProducer) newBrokerProducer(broker *Broker) *brokerProducer { + var ( + input = make(chan *ProducerMessage) + bridge = make(chan *produceSet) + responses = make(chan *brokerProducerResponse) + ) + + bp := &brokerProducer{ + parent: p, + broker: broker, + input: input, + output: bridge, + responses: responses, + stopchan: make(chan struct{}), + buffer: newProduceSet(p), + currentRetries: make(map[string]map[int32]error), + } + go withRecover(bp.run) + + // minimal bridge to make the network response `select`able + go withRecover(func() { + for set := range bridge { + request := set.buildRequest() + + response, err := broker.Produce(request) + + responses <- &brokerProducerResponse{ + set: set, + err: err, + res: response, + } + } + close(responses) + }) + + if p.conf.Producer.Retry.Max <= 0 { + bp.abandoned = make(chan struct{}) + } + + return bp +} + +type brokerProducerResponse struct { + set *produceSet + err error + res *ProduceResponse +} + +// groups messages together into appropriately-sized batches for sending to the broker +// handles state related to retries etc +type brokerProducer struct { + parent *asyncProducer + broker *Broker + + input chan *ProducerMessage + output chan<- *produceSet + responses <-chan *brokerProducerResponse + abandoned chan struct{} + stopchan chan struct{} + + buffer *produceSet + timer <-chan time.Time + timerFired bool + + closing error + currentRetries map[string]map[int32]error +} + +func (bp *brokerProducer) run() { + var output chan<- *produceSet + Logger.Printf("producer/broker/%d starting up\n", bp.broker.ID()) + + for { + select { + case msg, ok := <-bp.input: + if !ok { + Logger.Printf("producer/broker/%d input chan closed\n", bp.broker.ID()) + bp.shutdown() + return + } + + if msg == nil { + continue + } + + if msg.flags&syn == syn { + Logger.Printf("producer/broker/%d state change to [open] on %s/%d\n", + bp.broker.ID(), msg.Topic, msg.Partition) + if bp.currentRetries[msg.Topic] == nil { + bp.currentRetries[msg.Topic] = make(map[int32]error) + } + bp.currentRetries[msg.Topic][msg.Partition] = nil + bp.parent.inFlight.Done() + continue + } + + if reason := bp.needsRetry(msg); reason != nil { + bp.parent.retryMessage(msg, reason) + + if bp.closing == nil && msg.flags&fin == fin { + // we were retrying this partition but we can start processing again + delete(bp.currentRetries[msg.Topic], msg.Partition) + Logger.Printf("producer/broker/%d state change to [closed] on %s/%d\n", + bp.broker.ID(), msg.Topic, msg.Partition) + } + + continue + } + + if bp.buffer.wouldOverflow(msg) { + Logger.Printf("producer/broker/%d maximum request accumulated, waiting for space\n", bp.broker.ID()) + if err := bp.waitForSpace(msg, false); err != nil { + bp.parent.retryMessage(msg, err) + continue + } + } + + if bp.parent.txnmgr.producerID != noProducerID && bp.buffer.producerEpoch != msg.producerEpoch { + // The epoch was reset, need to roll the buffer over + Logger.Printf("producer/broker/%d detected epoch rollover, waiting for new buffer\n", bp.broker.ID()) + if err := bp.waitForSpace(msg, true); err != nil { + bp.parent.retryMessage(msg, err) + continue + } + } + if err := bp.buffer.add(msg); err != nil { + bp.parent.returnError(msg, err) + continue + } + + if bp.parent.conf.Producer.Flush.Frequency > 0 && bp.timer == nil { + bp.timer = time.After(bp.parent.conf.Producer.Flush.Frequency) + } + case <-bp.timer: + bp.timerFired = true + case output <- bp.buffer: + bp.rollOver() + case response, ok := <-bp.responses: + if ok { + bp.handleResponse(response) + } + case <-bp.stopchan: + Logger.Printf( + "producer/broker/%d run loop asked to stop\n", bp.broker.ID()) + return + } + + if bp.timerFired || bp.buffer.readyToFlush() { + output = bp.output + } else { + output = nil + } + } +} + +func (bp *brokerProducer) shutdown() { + for !bp.buffer.empty() { + select { + case response := <-bp.responses: + bp.handleResponse(response) + case bp.output <- bp.buffer: + bp.rollOver() + } + } + close(bp.output) + for response := range bp.responses { + bp.handleResponse(response) + } + close(bp.stopchan) + Logger.Printf("producer/broker/%d shut down\n", bp.broker.ID()) +} + +func (bp *brokerProducer) needsRetry(msg *ProducerMessage) error { + if bp.closing != nil { + return bp.closing + } + + return bp.currentRetries[msg.Topic][msg.Partition] +} + +func (bp *brokerProducer) waitForSpace(msg *ProducerMessage, forceRollover bool) error { + for { + select { + case response := <-bp.responses: + bp.handleResponse(response) + // handling a response can change our state, so re-check some things + if reason := bp.needsRetry(msg); reason != nil { + return reason + } else if !bp.buffer.wouldOverflow(msg) && !forceRollover { + return nil + } + case bp.output <- bp.buffer: + bp.rollOver() + return nil + } + } +} + +func (bp *brokerProducer) rollOver() { + bp.timer = nil + bp.timerFired = false + bp.buffer = newProduceSet(bp.parent) +} + +func (bp *brokerProducer) handleResponse(response *brokerProducerResponse) { + if response.err != nil { + bp.handleError(response.set, response.err) + } else { + bp.handleSuccess(response.set, response.res) + } + + if bp.buffer.empty() { + bp.rollOver() // this can happen if the response invalidated our buffer + } +} + +func (bp *brokerProducer) handleSuccess(sent *produceSet, response *ProduceResponse) { + // we iterate through the blocks in the request set, not the response, so that we notice + // if the response is missing a block completely + var retryTopics []string + sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) { + if response == nil { + // this only happens when RequiredAcks is NoResponse, so we have to assume success + bp.parent.returnSuccesses(pSet.msgs) + return + } + + block := response.GetBlock(topic, partition) + if block == nil { + bp.parent.returnErrors(pSet.msgs, ErrIncompleteResponse) + return + } + + switch block.Err { + // Success + case ErrNoError: + if bp.parent.conf.Version.IsAtLeast(V0_10_0_0) && !block.Timestamp.IsZero() { + for _, msg := range pSet.msgs { + msg.Timestamp = block.Timestamp + } + } + for i, msg := range pSet.msgs { + msg.Offset = block.Offset + int64(i) + } + bp.parent.returnSuccesses(pSet.msgs) + // Duplicate + case ErrDuplicateSequenceNumber: + bp.parent.returnSuccesses(pSet.msgs) + // Retriable errors + case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition, + ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend: + if bp.parent.conf.Producer.Retry.Max <= 0 { + bp.parent.abandonBrokerConnection(bp.broker) + bp.parent.returnErrors(pSet.msgs, block.Err) + } else { + retryTopics = append(retryTopics, topic) + } + // Other non-retriable errors + default: + if bp.parent.conf.Producer.Retry.Max <= 0 { + bp.parent.abandonBrokerConnection(bp.broker) + } + bp.parent.returnErrors(pSet.msgs, block.Err) + } + }) + + if len(retryTopics) > 0 { + if bp.parent.conf.Producer.Idempotent { + err := bp.parent.client.RefreshMetadata(retryTopics...) + if err != nil { + Logger.Printf("Failed refreshing metadata because of %v\n", err) + } + } + + sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) { + block := response.GetBlock(topic, partition) + if block == nil { + // handled in the previous "eachPartition" loop + return + } + + switch block.Err { + case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition, + ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend: + Logger.Printf("producer/broker/%d state change to [retrying] on %s/%d because %v\n", + bp.broker.ID(), topic, partition, block.Err) + if bp.currentRetries[topic] == nil { + bp.currentRetries[topic] = make(map[int32]error) + } + bp.currentRetries[topic][partition] = block.Err + if bp.parent.conf.Producer.Idempotent { + go bp.parent.retryBatch(topic, partition, pSet, block.Err) + } else { + bp.parent.retryMessages(pSet.msgs, block.Err) + } + // dropping the following messages has the side effect of incrementing their retry count + bp.parent.retryMessages(bp.buffer.dropPartition(topic, partition), block.Err) + } + }) + } +} + +func (p *asyncProducer) retryBatch(topic string, partition int32, pSet *partitionSet, kerr KError) { + Logger.Printf("Retrying batch for %v-%d because of %s\n", topic, partition, kerr) + produceSet := newProduceSet(p) + produceSet.msgs[topic] = make(map[int32]*partitionSet) + produceSet.msgs[topic][partition] = pSet + produceSet.bufferBytes += pSet.bufferBytes + produceSet.bufferCount += len(pSet.msgs) + for _, msg := range pSet.msgs { + if msg.retries >= p.conf.Producer.Retry.Max { + p.returnError(msg, kerr) + return + } + msg.retries++ + } + + // it's expected that a metadata refresh has been requested prior to calling retryBatch + leader, err := p.client.Leader(topic, partition) + if err != nil { + Logger.Printf("Failed retrying batch for %v-%d because of %v while looking up for new leader\n", topic, partition, err) + for _, msg := range pSet.msgs { + p.returnError(msg, kerr) + } + return + } + bp := p.getBrokerProducer(leader) + bp.output <- produceSet +} + +func (bp *brokerProducer) handleError(sent *produceSet, err error) { + switch err.(type) { + case PacketEncodingError: + sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) { + bp.parent.returnErrors(pSet.msgs, err) + }) + default: + Logger.Printf("producer/broker/%d state change to [closing] because %s\n", bp.broker.ID(), err) + bp.parent.abandonBrokerConnection(bp.broker) + _ = bp.broker.Close() + bp.closing = err + sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) { + bp.parent.retryMessages(pSet.msgs, err) + }) + bp.buffer.eachPartition(func(topic string, partition int32, pSet *partitionSet) { + bp.parent.retryMessages(pSet.msgs, err) + }) + bp.rollOver() + } +} + +// singleton +// effectively a "bridge" between the flushers and the dispatcher in order to avoid deadlock +// based on https://godoc.org/github.com/eapache/channels#InfiniteChannel +func (p *asyncProducer) retryHandler() { + var msg *ProducerMessage + buf := queue.New() + + for { + if buf.Length() == 0 { + msg = <-p.retries + } else { + select { + case msg = <-p.retries: + case p.input <- buf.Peek().(*ProducerMessage): + buf.Remove() + continue + } + } + + if msg == nil { + return + } + + buf.Add(msg) + } +} + +// utility functions + +func (p *asyncProducer) shutdown() { + Logger.Println("Producer shutting down.") + p.inFlight.Add(1) + p.input <- &ProducerMessage{flags: shutdown} + + p.inFlight.Wait() + + err := p.client.Close() + if err != nil { + Logger.Println("producer/shutdown failed to close the embedded client:", err) + } + + close(p.input) + close(p.retries) + close(p.errors) + close(p.successes) +} + +func (p *asyncProducer) returnError(msg *ProducerMessage, err error) { + // We need to reset the producer ID epoch if we set a sequence number on it, because the broker + // will never see a message with this number, so we can never continue the sequence. + if msg.hasSequence { + Logger.Printf("producer/txnmanager rolling over epoch due to publish failure on %s/%d", msg.Topic, msg.Partition) + p.txnmgr.bumpEpoch() + } + msg.clear() + pErr := &ProducerError{Msg: msg, Err: err} + if p.conf.Producer.Return.Errors { + p.errors <- pErr + } else { + Logger.Println(pErr) + } + p.inFlight.Done() +} + +func (p *asyncProducer) returnErrors(batch []*ProducerMessage, err error) { + for _, msg := range batch { + p.returnError(msg, err) + } +} + +func (p *asyncProducer) returnSuccesses(batch []*ProducerMessage) { + for _, msg := range batch { + if p.conf.Producer.Return.Successes { + msg.clear() + p.successes <- msg + } + p.inFlight.Done() + } +} + +func (p *asyncProducer) retryMessage(msg *ProducerMessage, err error) { + if msg.retries >= p.conf.Producer.Retry.Max { + p.returnError(msg, err) + } else { + msg.retries++ + p.retries <- msg + } +} + +func (p *asyncProducer) retryMessages(batch []*ProducerMessage, err error) { + for _, msg := range batch { + p.retryMessage(msg, err) + } +} + +func (p *asyncProducer) getBrokerProducer(broker *Broker) *brokerProducer { + p.brokerLock.Lock() + defer p.brokerLock.Unlock() + + bp := p.brokers[broker] + + if bp == nil { + bp = p.newBrokerProducer(broker) + p.brokers[broker] = bp + p.brokerRefs[bp] = 0 + } + + p.brokerRefs[bp]++ + + return bp +} + +func (p *asyncProducer) unrefBrokerProducer(broker *Broker, bp *brokerProducer) { + p.brokerLock.Lock() + defer p.brokerLock.Unlock() + + p.brokerRefs[bp]-- + if p.brokerRefs[bp] == 0 { + close(bp.input) + delete(p.brokerRefs, bp) + + if p.brokers[broker] == bp { + delete(p.brokers, broker) + } + } +} + +func (p *asyncProducer) abandonBrokerConnection(broker *Broker) { + p.brokerLock.Lock() + defer p.brokerLock.Unlock() + + bc, ok := p.brokers[broker] + if ok && bc.abandoned != nil { + close(bc.abandoned) + } + + delete(p.brokers, broker) +} diff --git a/vendor/github.com/Shopify/sarama/balance_strategy.go b/vendor/github.com/Shopify/sarama/balance_strategy.go new file mode 100644 index 00000000000..0ce7fea1f61 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/balance_strategy.go @@ -0,0 +1,1061 @@ +package sarama + +import ( + "container/heap" + "math" + "sort" + "strings" +) + +const ( + // RangeBalanceStrategyName identifies strategies that use the range partition assignment strategy + RangeBalanceStrategyName = "range" + + // RoundRobinBalanceStrategyName identifies strategies that use the round-robin partition assignment strategy + RoundRobinBalanceStrategyName = "roundrobin" + + // StickyBalanceStrategyName identifies strategies that use the sticky-partition assignment strategy + StickyBalanceStrategyName = "sticky" + + defaultGeneration = -1 +) + +// BalanceStrategyPlan is the results of any BalanceStrategy.Plan attempt. +// It contains an allocation of topic/partitions by memberID in the form of +// a `memberID -> topic -> partitions` map. +type BalanceStrategyPlan map[string]map[string][]int32 + +// Add assigns a topic with a number partitions to a member. +func (p BalanceStrategyPlan) Add(memberID, topic string, partitions ...int32) { + if len(partitions) == 0 { + return + } + if _, ok := p[memberID]; !ok { + p[memberID] = make(map[string][]int32, 1) + } + p[memberID][topic] = append(p[memberID][topic], partitions...) +} + +// -------------------------------------------------------------------- + +// BalanceStrategy is used to balance topics and partitions +// across members of a consumer group +type BalanceStrategy interface { + // Name uniquely identifies the strategy. + Name() string + + // Plan accepts a map of `memberID -> metadata` and a map of `topic -> partitions` + // and returns a distribution plan. + Plan(members map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error) + + // AssignmentData returns the serialized assignment data for the specified + // memberID + AssignmentData(memberID string, topics map[string][]int32, generationID int32) ([]byte, error) +} + +// -------------------------------------------------------------------- + +// BalanceStrategyRange is the default and assigns partitions as ranges to consumer group members. +// Example with one topic T with six partitions (0..5) and two members (M1, M2): +// M1: {T: [0, 1, 2]} +// M2: {T: [3, 4, 5]} +var BalanceStrategyRange = &balanceStrategy{ + name: RangeBalanceStrategyName, + coreFn: func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32) { + step := float64(len(partitions)) / float64(len(memberIDs)) + + for i, memberID := range memberIDs { + pos := float64(i) + min := int(math.Floor(pos*step + 0.5)) + max := int(math.Floor((pos+1)*step + 0.5)) + plan.Add(memberID, topic, partitions[min:max]...) + } + }, +} + +// BalanceStrategyRoundRobin assigns partitions to members in alternating order. +// Example with topic T with six partitions (0..5) and two members (M1, M2): +// M1: {T: [0, 2, 4]} +// M2: {T: [1, 3, 5]} +var BalanceStrategyRoundRobin = &balanceStrategy{ + name: RoundRobinBalanceStrategyName, + coreFn: func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32) { + for i, part := range partitions { + memberID := memberIDs[i%len(memberIDs)] + plan.Add(memberID, topic, part) + } + }, +} + +// BalanceStrategySticky assigns partitions to members with an attempt to preserve earlier assignments +// while maintain a balanced partition distribution. +// Example with topic T with six partitions (0..5) and two members (M1, M2): +// M1: {T: [0, 2, 4]} +// M2: {T: [1, 3, 5]} +// +// On reassignment with an additional consumer, you might get an assignment plan like: +// M1: {T: [0, 2]} +// M2: {T: [1, 3]} +// M3: {T: [4, 5]} +// +var BalanceStrategySticky = &stickyBalanceStrategy{} + +// -------------------------------------------------------------------- + +type balanceStrategy struct { + name string + coreFn func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32) +} + +// Name implements BalanceStrategy. +func (s *balanceStrategy) Name() string { return s.name } + +// Plan implements BalanceStrategy. +func (s *balanceStrategy) Plan(members map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error) { + // Build members by topic map + mbt := make(map[string][]string) + for memberID, meta := range members { + for _, topic := range meta.Topics { + mbt[topic] = append(mbt[topic], memberID) + } + } + + // Sort members for each topic + for topic, memberIDs := range mbt { + sort.Sort(&balanceStrategySortable{ + topic: topic, + memberIDs: memberIDs, + }) + } + + // Assemble plan + plan := make(BalanceStrategyPlan, len(members)) + for topic, memberIDs := range mbt { + s.coreFn(plan, memberIDs, topic, topics[topic]) + } + return plan, nil +} + +// AssignmentData simple strategies do not require any shared assignment data +func (s *balanceStrategy) AssignmentData(memberID string, topics map[string][]int32, generationID int32) ([]byte, error) { + return nil, nil +} + +type balanceStrategySortable struct { + topic string + memberIDs []string +} + +func (p balanceStrategySortable) Len() int { return len(p.memberIDs) } +func (p balanceStrategySortable) Swap(i, j int) { + p.memberIDs[i], p.memberIDs[j] = p.memberIDs[j], p.memberIDs[i] +} +func (p balanceStrategySortable) Less(i, j int) bool { + return balanceStrategyHashValue(p.topic, p.memberIDs[i]) < balanceStrategyHashValue(p.topic, p.memberIDs[j]) +} + +func balanceStrategyHashValue(vv ...string) uint32 { + h := uint32(2166136261) + for _, s := range vv { + for _, c := range s { + h ^= uint32(c) + h *= 16777619 + } + } + return h +} + +type stickyBalanceStrategy struct { + movements partitionMovements +} + +// Name implements BalanceStrategy. +func (s *stickyBalanceStrategy) Name() string { return StickyBalanceStrategyName } + +// Plan implements BalanceStrategy. +func (s *stickyBalanceStrategy) Plan(members map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error) { + // track partition movements during generation of the partition assignment plan + s.movements = partitionMovements{ + Movements: make(map[topicPartitionAssignment]consumerPair), + PartitionMovementsByTopic: make(map[string]map[consumerPair]map[topicPartitionAssignment]bool), + } + + // prepopulate the current assignment state from userdata on the consumer group members + currentAssignment, prevAssignment, err := prepopulateCurrentAssignments(members) + if err != nil { + return nil, err + } + + // determine if we're dealing with a completely fresh assignment, or if there's existing assignment state + isFreshAssignment := false + if len(currentAssignment) == 0 { + isFreshAssignment = true + } + + // create a mapping of all current topic partitions and the consumers that can be assigned to them + partition2AllPotentialConsumers := make(map[topicPartitionAssignment][]string) + for topic, partitions := range topics { + for _, partition := range partitions { + partition2AllPotentialConsumers[topicPartitionAssignment{Topic: topic, Partition: partition}] = []string{} + } + } + + // create a mapping of all consumers to all potential topic partitions that can be assigned to them + // also, populate the mapping of partitions to potential consumers + consumer2AllPotentialPartitions := make(map[string][]topicPartitionAssignment, len(members)) + for memberID, meta := range members { + consumer2AllPotentialPartitions[memberID] = make([]topicPartitionAssignment, 0) + for _, topicSubscription := range meta.Topics { + // only evaluate topic subscriptions that are present in the supplied topics map + if _, found := topics[topicSubscription]; found { + for _, partition := range topics[topicSubscription] { + topicPartition := topicPartitionAssignment{Topic: topicSubscription, Partition: partition} + consumer2AllPotentialPartitions[memberID] = append(consumer2AllPotentialPartitions[memberID], topicPartition) + partition2AllPotentialConsumers[topicPartition] = append(partition2AllPotentialConsumers[topicPartition], memberID) + } + } + } + + // add this consumer to currentAssignment (with an empty topic partition assignment) if it does not already exist + if _, exists := currentAssignment[memberID]; !exists { + currentAssignment[memberID] = make([]topicPartitionAssignment, 0) + } + } + + // create a mapping of each partition to its current consumer, where possible + currentPartitionConsumers := make(map[topicPartitionAssignment]string, len(currentAssignment)) + unvisitedPartitions := make(map[topicPartitionAssignment]bool, len(partition2AllPotentialConsumers)) + for partition := range partition2AllPotentialConsumers { + unvisitedPartitions[partition] = true + } + var unassignedPartitions []topicPartitionAssignment + for memberID, partitions := range currentAssignment { + var keepPartitions []topicPartitionAssignment + for _, partition := range partitions { + // If this partition no longer exists at all, likely due to the + // topic being deleted, we remove the partition from the member. + if _, exists := partition2AllPotentialConsumers[partition]; !exists { + continue + } + delete(unvisitedPartitions, partition) + currentPartitionConsumers[partition] = memberID + + if !strsContains(members[memberID].Topics, partition.Topic) { + unassignedPartitions = append(unassignedPartitions, partition) + continue + } + keepPartitions = append(keepPartitions, partition) + } + currentAssignment[memberID] = keepPartitions + } + for unvisited := range unvisitedPartitions { + unassignedPartitions = append(unassignedPartitions, unvisited) + } + + // sort the topic partitions in order of priority for reassignment + sortedPartitions := sortPartitions(currentAssignment, prevAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions) + + // at this point we have preserved all valid topic partition to consumer assignments and removed + // all invalid topic partitions and invalid consumers. Now we need to assign unassignedPartitions + // to consumers so that the topic partition assignments are as balanced as possible. + + // an ascending sorted set of consumers based on how many topic partitions are already assigned to them + sortedCurrentSubscriptions := sortMemberIDsByPartitionAssignments(currentAssignment) + s.balance(currentAssignment, prevAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumers) + + // Assemble plan + plan := make(BalanceStrategyPlan, len(currentAssignment)) + for memberID, assignments := range currentAssignment { + if len(assignments) == 0 { + plan[memberID] = make(map[string][]int32) + } else { + for _, assignment := range assignments { + plan.Add(memberID, assignment.Topic, assignment.Partition) + } + } + } + return plan, nil +} + +// AssignmentData serializes the set of topics currently assigned to the +// specified member as part of the supplied balance plan +func (s *stickyBalanceStrategy) AssignmentData(memberID string, topics map[string][]int32, generationID int32) ([]byte, error) { + return encode(&StickyAssignorUserDataV1{ + Topics: topics, + Generation: generationID, + }, nil) +} + +func strsContains(s []string, value string) bool { + for _, entry := range s { + if entry == value { + return true + } + } + return false +} + +// Balance assignments across consumers for maximum fairness and stickiness. +func (s *stickyBalanceStrategy) balance(currentAssignment map[string][]topicPartitionAssignment, prevAssignment map[topicPartitionAssignment]consumerGenerationPair, sortedPartitions []topicPartitionAssignment, unassignedPartitions []topicPartitionAssignment, sortedCurrentSubscriptions []string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string, currentPartitionConsumer map[topicPartitionAssignment]string) { + initializing := false + if len(sortedCurrentSubscriptions) == 0 || len(currentAssignment[sortedCurrentSubscriptions[0]]) == 0 { + initializing = true + } + + // assign all unassigned partitions + for _, partition := range unassignedPartitions { + // skip if there is no potential consumer for the partition + if len(partition2AllPotentialConsumers[partition]) == 0 { + continue + } + sortedCurrentSubscriptions = assignPartition(partition, sortedCurrentSubscriptions, currentAssignment, consumer2AllPotentialPartitions, currentPartitionConsumer) + } + + // narrow down the reassignment scope to only those partitions that can actually be reassigned + for partition := range partition2AllPotentialConsumers { + if !canTopicPartitionParticipateInReassignment(partition, partition2AllPotentialConsumers) { + sortedPartitions = removeTopicPartitionFromMemberAssignments(sortedPartitions, partition) + } + } + + // narrow down the reassignment scope to only those consumers that are subject to reassignment + fixedAssignments := make(map[string][]topicPartitionAssignment) + for memberID := range consumer2AllPotentialPartitions { + if !canConsumerParticipateInReassignment(memberID, currentAssignment, consumer2AllPotentialPartitions, partition2AllPotentialConsumers) { + fixedAssignments[memberID] = currentAssignment[memberID] + delete(currentAssignment, memberID) + sortedCurrentSubscriptions = sortMemberIDsByPartitionAssignments(currentAssignment) + } + } + + // create a deep copy of the current assignment so we can revert to it if we do not get a more balanced assignment later + preBalanceAssignment := deepCopyAssignment(currentAssignment) + preBalancePartitionConsumers := make(map[topicPartitionAssignment]string, len(currentPartitionConsumer)) + for k, v := range currentPartitionConsumer { + preBalancePartitionConsumers[k] = v + } + + reassignmentPerformed := s.performReassignments(sortedPartitions, currentAssignment, prevAssignment, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer) + + // if we are not preserving existing assignments and we have made changes to the current assignment + // make sure we are getting a more balanced assignment; otherwise, revert to previous assignment + if !initializing && reassignmentPerformed && getBalanceScore(currentAssignment) >= getBalanceScore(preBalanceAssignment) { + currentAssignment = deepCopyAssignment(preBalanceAssignment) + currentPartitionConsumer = make(map[topicPartitionAssignment]string, len(preBalancePartitionConsumers)) + for k, v := range preBalancePartitionConsumers { + currentPartitionConsumer[k] = v + } + } + + // add the fixed assignments (those that could not change) back + for consumer, assignments := range fixedAssignments { + currentAssignment[consumer] = assignments + } +} + +// Calculate the balance score of the given assignment, as the sum of assigned partitions size difference of all consumer pairs. +// A perfectly balanced assignment (with all consumers getting the same number of partitions) has a balance score of 0. +// Lower balance score indicates a more balanced assignment. +func getBalanceScore(assignment map[string][]topicPartitionAssignment) int { + consumer2AssignmentSize := make(map[string]int, len(assignment)) + for memberID, partitions := range assignment { + consumer2AssignmentSize[memberID] = len(partitions) + } + + var score float64 + for memberID, consumerAssignmentSize := range consumer2AssignmentSize { + delete(consumer2AssignmentSize, memberID) + for _, otherConsumerAssignmentSize := range consumer2AssignmentSize { + score += math.Abs(float64(consumerAssignmentSize - otherConsumerAssignmentSize)) + } + } + return int(score) +} + +// Determine whether the current assignment plan is balanced. +func isBalanced(currentAssignment map[string][]topicPartitionAssignment, allSubscriptions map[string][]topicPartitionAssignment) bool { + sortedCurrentSubscriptions := sortMemberIDsByPartitionAssignments(currentAssignment) + min := len(currentAssignment[sortedCurrentSubscriptions[0]]) + max := len(currentAssignment[sortedCurrentSubscriptions[len(sortedCurrentSubscriptions)-1]]) + if min >= max-1 { + // if minimum and maximum numbers of partitions assigned to consumers differ by at most one return true + return true + } + + // create a mapping from partitions to the consumer assigned to them + allPartitions := make(map[topicPartitionAssignment]string) + for memberID, partitions := range currentAssignment { + for _, partition := range partitions { + if _, exists := allPartitions[partition]; exists { + Logger.Printf("Topic %s Partition %d is assigned more than one consumer", partition.Topic, partition.Partition) + } + allPartitions[partition] = memberID + } + } + + // for each consumer that does not have all the topic partitions it can get make sure none of the topic partitions it + // could but did not get cannot be moved to it (because that would break the balance) + for _, memberID := range sortedCurrentSubscriptions { + consumerPartitions := currentAssignment[memberID] + consumerPartitionCount := len(consumerPartitions) + + // skip if this consumer already has all the topic partitions it can get + if consumerPartitionCount == len(allSubscriptions[memberID]) { + continue + } + + // otherwise make sure it cannot get any more + potentialTopicPartitions := allSubscriptions[memberID] + for _, partition := range potentialTopicPartitions { + if !memberAssignmentsIncludeTopicPartition(currentAssignment[memberID], partition) { + otherConsumer := allPartitions[partition] + otherConsumerPartitionCount := len(currentAssignment[otherConsumer]) + if consumerPartitionCount < otherConsumerPartitionCount { + return false + } + } + } + } + return true +} + +// Reassign all topic partitions that need reassignment until balanced. +func (s *stickyBalanceStrategy) performReassignments(reassignablePartitions []topicPartitionAssignment, currentAssignment map[string][]topicPartitionAssignment, prevAssignment map[topicPartitionAssignment]consumerGenerationPair, sortedCurrentSubscriptions []string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string, currentPartitionConsumer map[topicPartitionAssignment]string) bool { + reassignmentPerformed := false + modified := false + + // repeat reassignment until no partition can be moved to improve the balance + for { + modified = false + // reassign all reassignable partitions (starting from the partition with least potential consumers and if needed) + // until the full list is processed or a balance is achieved + for _, partition := range reassignablePartitions { + if isBalanced(currentAssignment, consumer2AllPotentialPartitions) { + break + } + + // the partition must have at least two consumers + if len(partition2AllPotentialConsumers[partition]) <= 1 { + Logger.Printf("Expected more than one potential consumer for partition %s topic %d", partition.Topic, partition.Partition) + } + + // the partition must have a consumer + consumer := currentPartitionConsumer[partition] + if consumer == "" { + Logger.Printf("Expected topic %s partition %d to be assigned to a consumer", partition.Topic, partition.Partition) + } + + if _, exists := prevAssignment[partition]; exists { + if len(currentAssignment[consumer]) > (len(currentAssignment[prevAssignment[partition].MemberID]) + 1) { + sortedCurrentSubscriptions = s.reassignPartition(partition, currentAssignment, sortedCurrentSubscriptions, currentPartitionConsumer, prevAssignment[partition].MemberID) + reassignmentPerformed = true + modified = true + continue + } + } + + // check if a better-suited consumer exists for the partition; if so, reassign it + for _, otherConsumer := range partition2AllPotentialConsumers[partition] { + if len(currentAssignment[consumer]) > (len(currentAssignment[otherConsumer]) + 1) { + sortedCurrentSubscriptions = s.reassignPartitionToNewConsumer(partition, currentAssignment, sortedCurrentSubscriptions, currentPartitionConsumer, consumer2AllPotentialPartitions) + reassignmentPerformed = true + modified = true + break + } + } + } + if !modified { + return reassignmentPerformed + } + } +} + +// Identify a new consumer for a topic partition and reassign it. +func (s *stickyBalanceStrategy) reassignPartitionToNewConsumer(partition topicPartitionAssignment, currentAssignment map[string][]topicPartitionAssignment, sortedCurrentSubscriptions []string, currentPartitionConsumer map[topicPartitionAssignment]string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment) []string { + for _, anotherConsumer := range sortedCurrentSubscriptions { + if memberAssignmentsIncludeTopicPartition(consumer2AllPotentialPartitions[anotherConsumer], partition) { + return s.reassignPartition(partition, currentAssignment, sortedCurrentSubscriptions, currentPartitionConsumer, anotherConsumer) + } + } + return sortedCurrentSubscriptions +} + +// Reassign a specific partition to a new consumer +func (s *stickyBalanceStrategy) reassignPartition(partition topicPartitionAssignment, currentAssignment map[string][]topicPartitionAssignment, sortedCurrentSubscriptions []string, currentPartitionConsumer map[topicPartitionAssignment]string, newConsumer string) []string { + consumer := currentPartitionConsumer[partition] + // find the correct partition movement considering the stickiness requirement + partitionToBeMoved := s.movements.getTheActualPartitionToBeMoved(partition, consumer, newConsumer) + return s.processPartitionMovement(partitionToBeMoved, newConsumer, currentAssignment, sortedCurrentSubscriptions, currentPartitionConsumer) +} + +// Track the movement of a topic partition after assignment +func (s *stickyBalanceStrategy) processPartitionMovement(partition topicPartitionAssignment, newConsumer string, currentAssignment map[string][]topicPartitionAssignment, sortedCurrentSubscriptions []string, currentPartitionConsumer map[topicPartitionAssignment]string) []string { + oldConsumer := currentPartitionConsumer[partition] + s.movements.movePartition(partition, oldConsumer, newConsumer) + + currentAssignment[oldConsumer] = removeTopicPartitionFromMemberAssignments(currentAssignment[oldConsumer], partition) + currentAssignment[newConsumer] = append(currentAssignment[newConsumer], partition) + currentPartitionConsumer[partition] = newConsumer + return sortMemberIDsByPartitionAssignments(currentAssignment) +} + +// Determine whether a specific consumer should be considered for topic partition assignment. +func canConsumerParticipateInReassignment(memberID string, currentAssignment map[string][]topicPartitionAssignment, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string) bool { + currentPartitions := currentAssignment[memberID] + currentAssignmentSize := len(currentPartitions) + maxAssignmentSize := len(consumer2AllPotentialPartitions[memberID]) + if currentAssignmentSize > maxAssignmentSize { + Logger.Printf("The consumer %s is assigned more partitions than the maximum possible", memberID) + } + if currentAssignmentSize < maxAssignmentSize { + // if a consumer is not assigned all its potential partitions it is subject to reassignment + return true + } + for _, partition := range currentPartitions { + if canTopicPartitionParticipateInReassignment(partition, partition2AllPotentialConsumers) { + return true + } + } + return false +} + +// Only consider reassigning those topic partitions that have two or more potential consumers. +func canTopicPartitionParticipateInReassignment(partition topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string) bool { + return len(partition2AllPotentialConsumers[partition]) >= 2 +} + +// The assignment should improve the overall balance of the partition assignments to consumers. +func assignPartition(partition topicPartitionAssignment, sortedCurrentSubscriptions []string, currentAssignment map[string][]topicPartitionAssignment, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, currentPartitionConsumer map[topicPartitionAssignment]string) []string { + for _, memberID := range sortedCurrentSubscriptions { + if memberAssignmentsIncludeTopicPartition(consumer2AllPotentialPartitions[memberID], partition) { + currentAssignment[memberID] = append(currentAssignment[memberID], partition) + currentPartitionConsumer[partition] = memberID + break + } + } + return sortMemberIDsByPartitionAssignments(currentAssignment) +} + +// Deserialize topic partition assignment data to aid with creation of a sticky assignment. +func deserializeTopicPartitionAssignment(userDataBytes []byte) (StickyAssignorUserData, error) { + userDataV1 := &StickyAssignorUserDataV1{} + if err := decode(userDataBytes, userDataV1); err != nil { + userDataV0 := &StickyAssignorUserDataV0{} + if err := decode(userDataBytes, userDataV0); err != nil { + return nil, err + } + return userDataV0, nil + } + return userDataV1, nil +} + +// filterAssignedPartitions returns a map of consumer group members to their list of previously-assigned topic partitions, limited +// to those topic partitions currently reported by the Kafka cluster. +func filterAssignedPartitions(currentAssignment map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string) map[string][]topicPartitionAssignment { + assignments := deepCopyAssignment(currentAssignment) + for memberID, partitions := range assignments { + // perform in-place filtering + i := 0 + for _, partition := range partitions { + if _, exists := partition2AllPotentialConsumers[partition]; exists { + partitions[i] = partition + i++ + } + } + assignments[memberID] = partitions[:i] + } + return assignments +} + +func removeTopicPartitionFromMemberAssignments(assignments []topicPartitionAssignment, topic topicPartitionAssignment) []topicPartitionAssignment { + for i, assignment := range assignments { + if assignment == topic { + return append(assignments[:i], assignments[i+1:]...) + } + } + return assignments +} + +func memberAssignmentsIncludeTopicPartition(assignments []topicPartitionAssignment, topic topicPartitionAssignment) bool { + for _, assignment := range assignments { + if assignment == topic { + return true + } + } + return false +} + +func sortPartitions(currentAssignment map[string][]topicPartitionAssignment, partitionsWithADifferentPreviousAssignment map[topicPartitionAssignment]consumerGenerationPair, isFreshAssignment bool, partition2AllPotentialConsumers map[topicPartitionAssignment][]string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment) []topicPartitionAssignment { + unassignedPartitions := make(map[topicPartitionAssignment]bool, len(partition2AllPotentialConsumers)) + for partition := range partition2AllPotentialConsumers { + unassignedPartitions[partition] = true + } + + sortedPartitions := make([]topicPartitionAssignment, 0) + if !isFreshAssignment && areSubscriptionsIdentical(partition2AllPotentialConsumers, consumer2AllPotentialPartitions) { + // if this is a reassignment and the subscriptions are identical (all consumers can consumer from all topics) + // then we just need to simply list partitions in a round robin fashion (from consumers with + // most assigned partitions to those with least) + assignments := filterAssignedPartitions(currentAssignment, partition2AllPotentialConsumers) + + // use priority-queue to evaluate consumer group members in descending-order based on + // the number of topic partition assignments (i.e. consumers with most assignments first) + pq := make(assignmentPriorityQueue, len(assignments)) + i := 0 + for consumerID, consumerAssignments := range assignments { + pq[i] = &consumerGroupMember{ + id: consumerID, + assignments: consumerAssignments, + } + i++ + } + heap.Init(&pq) + + for { + // loop until no consumer-group members remain + if pq.Len() == 0 { + break + } + member := pq[0] + + // partitions that were assigned to a different consumer last time + var prevPartitionIndex int + for i, partition := range member.assignments { + if _, exists := partitionsWithADifferentPreviousAssignment[partition]; exists { + prevPartitionIndex = i + break + } + } + + if len(member.assignments) > 0 { + partition := member.assignments[prevPartitionIndex] + sortedPartitions = append(sortedPartitions, partition) + delete(unassignedPartitions, partition) + if prevPartitionIndex == 0 { + member.assignments = member.assignments[1:] + } else { + member.assignments = append(member.assignments[:prevPartitionIndex], member.assignments[prevPartitionIndex+1:]...) + } + heap.Fix(&pq, 0) + } else { + heap.Pop(&pq) + } + } + + for partition := range unassignedPartitions { + sortedPartitions = append(sortedPartitions, partition) + } + } else { + // an ascending sorted set of topic partitions based on how many consumers can potentially use them + sortedPartitions = sortPartitionsByPotentialConsumerAssignments(partition2AllPotentialConsumers) + } + return sortedPartitions +} + +func sortMemberIDsByPartitionAssignments(assignments map[string][]topicPartitionAssignment) []string { + // sort the members by the number of partition assignments in ascending order + sortedMemberIDs := make([]string, 0, len(assignments)) + for memberID := range assignments { + sortedMemberIDs = append(sortedMemberIDs, memberID) + } + sort.SliceStable(sortedMemberIDs, func(i, j int) bool { + ret := len(assignments[sortedMemberIDs[i]]) - len(assignments[sortedMemberIDs[j]]) + if ret == 0 { + return sortedMemberIDs[i] < sortedMemberIDs[j] + } + return len(assignments[sortedMemberIDs[i]]) < len(assignments[sortedMemberIDs[j]]) + }) + return sortedMemberIDs +} + +func sortPartitionsByPotentialConsumerAssignments(partition2AllPotentialConsumers map[topicPartitionAssignment][]string) []topicPartitionAssignment { + // sort the members by the number of partition assignments in descending order + sortedPartionIDs := make([]topicPartitionAssignment, len(partition2AllPotentialConsumers)) + i := 0 + for partition := range partition2AllPotentialConsumers { + sortedPartionIDs[i] = partition + i++ + } + sort.Slice(sortedPartionIDs, func(i, j int) bool { + if len(partition2AllPotentialConsumers[sortedPartionIDs[i]]) == len(partition2AllPotentialConsumers[sortedPartionIDs[j]]) { + ret := strings.Compare(sortedPartionIDs[i].Topic, sortedPartionIDs[j].Topic) + if ret == 0 { + return sortedPartionIDs[i].Partition < sortedPartionIDs[j].Partition + } + return ret < 0 + } + return len(partition2AllPotentialConsumers[sortedPartionIDs[i]]) < len(partition2AllPotentialConsumers[sortedPartionIDs[j]]) + }) + return sortedPartionIDs +} + +func deepCopyAssignment(assignment map[string][]topicPartitionAssignment) map[string][]topicPartitionAssignment { + m := make(map[string][]topicPartitionAssignment, len(assignment)) + for memberID, subscriptions := range assignment { + m[memberID] = append(subscriptions[:0:0], subscriptions...) + } + return m +} + +func areSubscriptionsIdentical(partition2AllPotentialConsumers map[topicPartitionAssignment][]string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment) bool { + curMembers := make(map[string]int) + for _, cur := range partition2AllPotentialConsumers { + if len(curMembers) == 0 { + for _, curMembersElem := range cur { + curMembers[curMembersElem]++ + } + continue + } + + if len(curMembers) != len(cur) { + return false + } + + yMap := make(map[string]int) + for _, yElem := range cur { + yMap[yElem]++ + } + + for curMembersMapKey, curMembersMapVal := range curMembers { + if yMap[curMembersMapKey] != curMembersMapVal { + return false + } + } + } + + curPartitions := make(map[topicPartitionAssignment]int) + for _, cur := range consumer2AllPotentialPartitions { + if len(curPartitions) == 0 { + for _, curPartitionElem := range cur { + curPartitions[curPartitionElem]++ + } + continue + } + + if len(curPartitions) != len(cur) { + return false + } + + yMap := make(map[topicPartitionAssignment]int) + for _, yElem := range cur { + yMap[yElem]++ + } + + for curMembersMapKey, curMembersMapVal := range curPartitions { + if yMap[curMembersMapKey] != curMembersMapVal { + return false + } + } + } + return true +} + +// We need to process subscriptions' user data with each consumer's reported generation in mind +// higher generations overwrite lower generations in case of a conflict +// note that a conflict could exist only if user data is for different generations +func prepopulateCurrentAssignments(members map[string]ConsumerGroupMemberMetadata) (map[string][]topicPartitionAssignment, map[topicPartitionAssignment]consumerGenerationPair, error) { + currentAssignment := make(map[string][]topicPartitionAssignment) + prevAssignment := make(map[topicPartitionAssignment]consumerGenerationPair) + + // for each partition we create a sorted map of its consumers by generation + sortedPartitionConsumersByGeneration := make(map[topicPartitionAssignment]map[int]string) + for memberID, meta := range members { + consumerUserData, err := deserializeTopicPartitionAssignment(meta.UserData) + if err != nil { + return nil, nil, err + } + for _, partition := range consumerUserData.partitions() { + if consumers, exists := sortedPartitionConsumersByGeneration[partition]; exists { + if consumerUserData.hasGeneration() { + if _, generationExists := consumers[consumerUserData.generation()]; generationExists { + // same partition is assigned to two consumers during the same rebalance. + // log a warning and skip this record + Logger.Printf("Topic %s Partition %d is assigned to multiple consumers following sticky assignment generation %d", partition.Topic, partition.Partition, consumerUserData.generation()) + continue + } else { + consumers[consumerUserData.generation()] = memberID + } + } else { + consumers[defaultGeneration] = memberID + } + } else { + generation := defaultGeneration + if consumerUserData.hasGeneration() { + generation = consumerUserData.generation() + } + sortedPartitionConsumersByGeneration[partition] = map[int]string{generation: memberID} + } + } + } + + // prevAssignment holds the prior ConsumerGenerationPair (before current) of each partition + // current and previous consumers are the last two consumers of each partition in the above sorted map + for partition, consumers := range sortedPartitionConsumersByGeneration { + // sort consumers by generation in decreasing order + var generations []int + for generation := range consumers { + generations = append(generations, generation) + } + sort.Sort(sort.Reverse(sort.IntSlice(generations))) + + consumer := consumers[generations[0]] + if _, exists := currentAssignment[consumer]; !exists { + currentAssignment[consumer] = []topicPartitionAssignment{partition} + } else { + currentAssignment[consumer] = append(currentAssignment[consumer], partition) + } + + // check for previous assignment, if any + if len(generations) > 1 { + prevAssignment[partition] = consumerGenerationPair{ + MemberID: consumers[generations[1]], + Generation: generations[1], + } + } + } + return currentAssignment, prevAssignment, nil +} + +type consumerGenerationPair struct { + MemberID string + Generation int +} + +// consumerPair represents a pair of Kafka consumer ids involved in a partition reassignment. +type consumerPair struct { + SrcMemberID string + DstMemberID string +} + +// partitionMovements maintains some data structures to simplify lookup of partition movements among consumers. +type partitionMovements struct { + PartitionMovementsByTopic map[string]map[consumerPair]map[topicPartitionAssignment]bool + Movements map[topicPartitionAssignment]consumerPair +} + +func (p *partitionMovements) removeMovementRecordOfPartition(partition topicPartitionAssignment) consumerPair { + pair := p.Movements[partition] + delete(p.Movements, partition) + + partitionMovementsForThisTopic := p.PartitionMovementsByTopic[partition.Topic] + delete(partitionMovementsForThisTopic[pair], partition) + if len(partitionMovementsForThisTopic[pair]) == 0 { + delete(partitionMovementsForThisTopic, pair) + } + if len(p.PartitionMovementsByTopic[partition.Topic]) == 0 { + delete(p.PartitionMovementsByTopic, partition.Topic) + } + return pair +} + +func (p *partitionMovements) addPartitionMovementRecord(partition topicPartitionAssignment, pair consumerPair) { + p.Movements[partition] = pair + if _, exists := p.PartitionMovementsByTopic[partition.Topic]; !exists { + p.PartitionMovementsByTopic[partition.Topic] = make(map[consumerPair]map[topicPartitionAssignment]bool) + } + partitionMovementsForThisTopic := p.PartitionMovementsByTopic[partition.Topic] + if _, exists := partitionMovementsForThisTopic[pair]; !exists { + partitionMovementsForThisTopic[pair] = make(map[topicPartitionAssignment]bool) + } + partitionMovementsForThisTopic[pair][partition] = true +} + +func (p *partitionMovements) movePartition(partition topicPartitionAssignment, oldConsumer, newConsumer string) { + pair := consumerPair{ + SrcMemberID: oldConsumer, + DstMemberID: newConsumer, + } + if _, exists := p.Movements[partition]; exists { + // this partition has previously moved + existingPair := p.removeMovementRecordOfPartition(partition) + if existingPair.DstMemberID != oldConsumer { + Logger.Printf("Existing pair DstMemberID %s was not equal to the oldConsumer ID %s", existingPair.DstMemberID, oldConsumer) + } + if existingPair.SrcMemberID != newConsumer { + // the partition is not moving back to its previous consumer + p.addPartitionMovementRecord(partition, consumerPair{ + SrcMemberID: existingPair.SrcMemberID, + DstMemberID: newConsumer, + }) + } + } else { + p.addPartitionMovementRecord(partition, pair) + } +} + +func (p *partitionMovements) getTheActualPartitionToBeMoved(partition topicPartitionAssignment, oldConsumer, newConsumer string) topicPartitionAssignment { + if _, exists := p.PartitionMovementsByTopic[partition.Topic]; !exists { + return partition + } + if _, exists := p.Movements[partition]; exists { + // this partition has previously moved + if oldConsumer != p.Movements[partition].DstMemberID { + Logger.Printf("Partition movement DstMemberID %s was not equal to the oldConsumer ID %s", p.Movements[partition].DstMemberID, oldConsumer) + } + oldConsumer = p.Movements[partition].SrcMemberID + } + + partitionMovementsForThisTopic := p.PartitionMovementsByTopic[partition.Topic] + reversePair := consumerPair{ + SrcMemberID: newConsumer, + DstMemberID: oldConsumer, + } + if _, exists := partitionMovementsForThisTopic[reversePair]; !exists { + return partition + } + var reversePairPartition topicPartitionAssignment + for otherPartition := range partitionMovementsForThisTopic[reversePair] { + reversePairPartition = otherPartition + } + return reversePairPartition +} + +func (p *partitionMovements) isLinked(src, dst string, pairs []consumerPair, currentPath []string) ([]string, bool) { + if src == dst { + return currentPath, false + } + if len(pairs) == 0 { + return currentPath, false + } + for _, pair := range pairs { + if src == pair.SrcMemberID && dst == pair.DstMemberID { + currentPath = append(currentPath, src, dst) + return currentPath, true + } + } + + for _, pair := range pairs { + if pair.SrcMemberID == src { + // create a deep copy of the pairs, excluding the current pair + reducedSet := make([]consumerPair, len(pairs)-1) + i := 0 + for _, p := range pairs { + if p != pair { + reducedSet[i] = pair + i++ + } + } + + currentPath = append(currentPath, pair.SrcMemberID) + return p.isLinked(pair.DstMemberID, dst, reducedSet, currentPath) + } + } + return currentPath, false +} + +func (p *partitionMovements) in(cycle []string, cycles [][]string) bool { + superCycle := make([]string, len(cycle)-1) + for i := 0; i < len(cycle)-1; i++ { + superCycle[i] = cycle[i] + } + superCycle = append(superCycle, cycle...) + for _, foundCycle := range cycles { + if len(foundCycle) == len(cycle) && indexOfSubList(superCycle, foundCycle) != -1 { + return true + } + } + return false +} + +func (p *partitionMovements) hasCycles(pairs []consumerPair) bool { + cycles := make([][]string, 0) + for _, pair := range pairs { + // create a deep copy of the pairs, excluding the current pair + reducedPairs := make([]consumerPair, len(pairs)-1) + i := 0 + for _, p := range pairs { + if p != pair { + reducedPairs[i] = pair + i++ + } + } + if path, linked := p.isLinked(pair.DstMemberID, pair.SrcMemberID, reducedPairs, []string{pair.SrcMemberID}); linked { + if !p.in(path, cycles) { + cycles = append(cycles, path) + Logger.Printf("A cycle of length %d was found: %v", len(path)-1, path) + } + } + } + + // for now we want to make sure there is no partition movements of the same topic between a pair of consumers. + // the odds of finding a cycle among more than two consumers seem to be very low (according to various randomized + // tests with the given sticky algorithm) that it should not worth the added complexity of handling those cases. + for _, cycle := range cycles { + if len(cycle) == 3 { + return true + } + } + return false +} + +func (p *partitionMovements) isSticky() bool { + for topic, movements := range p.PartitionMovementsByTopic { + movementPairs := make([]consumerPair, len(movements)) + i := 0 + for pair := range movements { + movementPairs[i] = pair + i++ + } + if p.hasCycles(movementPairs) { + Logger.Printf("Stickiness is violated for topic %s", topic) + Logger.Printf("Partition movements for this topic occurred among the following consumer pairs: %v", movements) + return false + } + } + return true +} + +func indexOfSubList(source []string, target []string) int { + targetSize := len(target) + maxCandidate := len(source) - targetSize +nextCand: + for candidate := 0; candidate <= maxCandidate; candidate++ { + j := candidate + for i := 0; i < targetSize; i++ { + if target[i] != source[j] { + // Element mismatch, try next cand + continue nextCand + } + j++ + } + // All elements of candidate matched target + return candidate + } + return -1 +} + +type consumerGroupMember struct { + id string + assignments []topicPartitionAssignment +} + +// assignmentPriorityQueue is a priority-queue of consumer group members that is sorted +// in descending order (most assignments to least assignments). +type assignmentPriorityQueue []*consumerGroupMember + +func (pq assignmentPriorityQueue) Len() int { return len(pq) } + +func (pq assignmentPriorityQueue) Less(i, j int) bool { + // order asssignment priority queue in descending order using assignment-count/member-id + if len(pq[i].assignments) == len(pq[j].assignments) { + return strings.Compare(pq[i].id, pq[j].id) > 0 + } + return len(pq[i].assignments) > len(pq[j].assignments) +} + +func (pq assignmentPriorityQueue) Swap(i, j int) { + pq[i], pq[j] = pq[j], pq[i] +} + +func (pq *assignmentPriorityQueue) Push(x interface{}) { + member := x.(*consumerGroupMember) + *pq = append(*pq, member) +} + +func (pq *assignmentPriorityQueue) Pop() interface{} { + old := *pq + n := len(old) + member := old[n-1] + *pq = old[0 : n-1] + return member +} diff --git a/vendor/github.com/Shopify/sarama/broker.go b/vendor/github.com/Shopify/sarama/broker.go new file mode 100644 index 00000000000..5858a23c076 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/broker.go @@ -0,0 +1,1441 @@ +package sarama + +import ( + "crypto/tls" + "encoding/binary" + "fmt" + "io" + "net" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/rcrowley/go-metrics" +) + +// Broker represents a single Kafka broker connection. All operations on this object are entirely concurrency-safe. +type Broker struct { + conf *Config + rack *string + + id int32 + addr string + correlationID int32 + conn net.Conn + connErr error + lock sync.Mutex + opened int32 + responses chan responsePromise + done chan bool + + registeredMetrics []string + + incomingByteRate metrics.Meter + requestRate metrics.Meter + requestSize metrics.Histogram + requestLatency metrics.Histogram + outgoingByteRate metrics.Meter + responseRate metrics.Meter + responseSize metrics.Histogram + requestsInFlight metrics.Counter + brokerIncomingByteRate metrics.Meter + brokerRequestRate metrics.Meter + brokerRequestSize metrics.Histogram + brokerRequestLatency metrics.Histogram + brokerOutgoingByteRate metrics.Meter + brokerResponseRate metrics.Meter + brokerResponseSize metrics.Histogram + brokerRequestsInFlight metrics.Counter + + kerberosAuthenticator GSSAPIKerberosAuth +} + +// SASLMechanism specifies the SASL mechanism the client uses to authenticate with the broker +type SASLMechanism string + +const ( + // SASLTypeOAuth represents the SASL/OAUTHBEARER mechanism (Kafka 2.0.0+) + SASLTypeOAuth = "OAUTHBEARER" + // SASLTypePlaintext represents the SASL/PLAIN mechanism + SASLTypePlaintext = "PLAIN" + // SASLTypeSCRAMSHA256 represents the SCRAM-SHA-256 mechanism. + SASLTypeSCRAMSHA256 = "SCRAM-SHA-256" + // SASLTypeSCRAMSHA512 represents the SCRAM-SHA-512 mechanism. + SASLTypeSCRAMSHA512 = "SCRAM-SHA-512" + SASLTypeGSSAPI = "GSSAPI" + // SASLHandshakeV0 is v0 of the Kafka SASL handshake protocol. Client and + // server negotiate SASL auth using opaque packets. + SASLHandshakeV0 = int16(0) + // SASLHandshakeV1 is v1 of the Kafka SASL handshake protocol. Client and + // server negotiate SASL by wrapping tokens with Kafka protocol headers. + SASLHandshakeV1 = int16(1) + // SASLExtKeyAuth is the reserved extension key name sent as part of the + // SASL/OAUTHBEARER initial client response + SASLExtKeyAuth = "auth" +) + +// AccessToken contains an access token used to authenticate a +// SASL/OAUTHBEARER client along with associated metadata. +type AccessToken struct { + // Token is the access token payload. + Token string + // Extensions is a optional map of arbitrary key-value pairs that can be + // sent with the SASL/OAUTHBEARER initial client response. These values are + // ignored by the SASL server if they are unexpected. This feature is only + // supported by Kafka >= 2.1.0. + Extensions map[string]string +} + +// AccessTokenProvider is the interface that encapsulates how implementors +// can generate access tokens for Kafka broker authentication. +type AccessTokenProvider interface { + // Token returns an access token. The implementation should ensure token + // reuse so that multiple calls at connect time do not create multiple + // tokens. The implementation should also periodically refresh the token in + // order to guarantee that each call returns an unexpired token. This + // method should not block indefinitely--a timeout error should be returned + // after a short period of inactivity so that the broker connection logic + // can log debugging information and retry. + Token() (*AccessToken, error) +} + +// SCRAMClient is a an interface to a SCRAM +// client implementation. +type SCRAMClient interface { + // Begin prepares the client for the SCRAM exchange + // with the server with a user name and a password + Begin(userName, password, authzID string) error + // Step steps client through the SCRAM exchange. It is + // called repeatedly until it errors or `Done` returns true. + Step(challenge string) (response string, err error) + // Done should return true when the SCRAM conversation + // is over. + Done() bool +} + +type responsePromise struct { + requestTime time.Time + correlationID int32 + headerVersion int16 + packets chan []byte + errors chan error +} + +// NewBroker creates and returns a Broker targeting the given host:port address. +// This does not attempt to actually connect, you have to call Open() for that. +func NewBroker(addr string) *Broker { + return &Broker{id: -1, addr: addr} +} + +// Open tries to connect to the Broker if it is not already connected or connecting, but does not block +// waiting for the connection to complete. This means that any subsequent operations on the broker will +// block waiting for the connection to succeed or fail. To get the effect of a fully synchronous Open call, +// follow it by a call to Connected(). The only errors Open will return directly are ConfigurationError or +// AlreadyConnected. If conf is nil, the result of NewConfig() is used. +func (b *Broker) Open(conf *Config) error { + if !atomic.CompareAndSwapInt32(&b.opened, 0, 1) { + return ErrAlreadyConnected + } + + if conf == nil { + conf = NewConfig() + } + + err := conf.Validate() + if err != nil { + return err + } + + b.lock.Lock() + + go withRecover(func() { + defer b.lock.Unlock() + + dialer := conf.getDialer() + b.conn, b.connErr = dialer.Dial("tcp", b.addr) + if b.connErr != nil { + Logger.Printf("Failed to connect to broker %s: %s\n", b.addr, b.connErr) + b.conn = nil + atomic.StoreInt32(&b.opened, 0) + return + } + if conf.Net.TLS.Enable { + b.conn = tls.Client(b.conn, validServerNameTLS(b.addr, conf.Net.TLS.Config)) + } + + b.conn = newBufConn(b.conn) + b.conf = conf + + // Create or reuse the global metrics shared between brokers + b.incomingByteRate = metrics.GetOrRegisterMeter("incoming-byte-rate", conf.MetricRegistry) + b.requestRate = metrics.GetOrRegisterMeter("request-rate", conf.MetricRegistry) + b.requestSize = getOrRegisterHistogram("request-size", conf.MetricRegistry) + b.requestLatency = getOrRegisterHistogram("request-latency-in-ms", conf.MetricRegistry) + b.outgoingByteRate = metrics.GetOrRegisterMeter("outgoing-byte-rate", conf.MetricRegistry) + b.responseRate = metrics.GetOrRegisterMeter("response-rate", conf.MetricRegistry) + b.responseSize = getOrRegisterHistogram("response-size", conf.MetricRegistry) + b.requestsInFlight = metrics.GetOrRegisterCounter("requests-in-flight", conf.MetricRegistry) + // Do not gather metrics for seeded broker (only used during bootstrap) because they share + // the same id (-1) and are already exposed through the global metrics above + if b.id >= 0 { + b.registerMetrics() + } + + if conf.Net.SASL.Enable { + b.connErr = b.authenticateViaSASL() + + if b.connErr != nil { + err = b.conn.Close() + if err == nil { + Logger.Printf("Closed connection to broker %s\n", b.addr) + } else { + Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err) + } + b.conn = nil + atomic.StoreInt32(&b.opened, 0) + return + } + } + + b.done = make(chan bool) + b.responses = make(chan responsePromise, b.conf.Net.MaxOpenRequests-1) + + if b.id >= 0 { + Logger.Printf("Connected to broker at %s (registered as #%d)\n", b.addr, b.id) + } else { + Logger.Printf("Connected to broker at %s (unregistered)\n", b.addr) + } + go withRecover(b.responseReceiver) + }) + + return nil +} + +// Connected returns true if the broker is connected and false otherwise. If the broker is not +// connected but it had tried to connect, the error from that connection attempt is also returned. +func (b *Broker) Connected() (bool, error) { + b.lock.Lock() + defer b.lock.Unlock() + + return b.conn != nil, b.connErr +} + +//Close closes the broker resources +func (b *Broker) Close() error { + b.lock.Lock() + defer b.lock.Unlock() + + if b.conn == nil { + return ErrNotConnected + } + + close(b.responses) + <-b.done + + err := b.conn.Close() + + b.conn = nil + b.connErr = nil + b.done = nil + b.responses = nil + + b.unregisterMetrics() + + if err == nil { + Logger.Printf("Closed connection to broker %s\n", b.addr) + } else { + Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err) + } + + atomic.StoreInt32(&b.opened, 0) + + return err +} + +// ID returns the broker ID retrieved from Kafka's metadata, or -1 if that is not known. +func (b *Broker) ID() int32 { + return b.id +} + +// Addr returns the broker address as either retrieved from Kafka's metadata or passed to NewBroker. +func (b *Broker) Addr() string { + return b.addr +} + +// Rack returns the broker's rack as retrieved from Kafka's metadata or the +// empty string if it is not known. The returned value corresponds to the +// broker's broker.rack configuration setting. Requires protocol version to be +// at least v0.10.0.0. +func (b *Broker) Rack() string { + if b.rack == nil { + return "" + } + return *b.rack +} + +//GetMetadata send a metadata request and returns a metadata response or error +func (b *Broker) GetMetadata(request *MetadataRequest) (*MetadataResponse, error) { + response := new(MetadataResponse) + + err := b.sendAndReceive(request, response) + + if err != nil { + return nil, err + } + + return response, nil +} + +//GetConsumerMetadata send a consumer metadata request and returns a consumer metadata response or error +func (b *Broker) GetConsumerMetadata(request *ConsumerMetadataRequest) (*ConsumerMetadataResponse, error) { + response := new(ConsumerMetadataResponse) + + err := b.sendAndReceive(request, response) + + if err != nil { + return nil, err + } + + return response, nil +} + +//FindCoordinator sends a find coordinate request and returns a response or error +func (b *Broker) FindCoordinator(request *FindCoordinatorRequest) (*FindCoordinatorResponse, error) { + response := new(FindCoordinatorResponse) + + err := b.sendAndReceive(request, response) + + if err != nil { + return nil, err + } + + return response, nil +} + +//GetAvailableOffsets return an offset response or error +func (b *Broker) GetAvailableOffsets(request *OffsetRequest) (*OffsetResponse, error) { + response := new(OffsetResponse) + + err := b.sendAndReceive(request, response) + + if err != nil { + return nil, err + } + + return response, nil +} + +//Produce returns a produce response or error +func (b *Broker) Produce(request *ProduceRequest) (*ProduceResponse, error) { + var ( + response *ProduceResponse + err error + ) + + if request.RequiredAcks == NoResponse { + err = b.sendAndReceive(request, nil) + } else { + response = new(ProduceResponse) + err = b.sendAndReceive(request, response) + } + + if err != nil { + return nil, err + } + + return response, nil +} + +//Fetch returns a FetchResponse or error +func (b *Broker) Fetch(request *FetchRequest) (*FetchResponse, error) { + response := new(FetchResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +//CommitOffset return an Offset commit response or error +func (b *Broker) CommitOffset(request *OffsetCommitRequest) (*OffsetCommitResponse, error) { + response := new(OffsetCommitResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +//FetchOffset returns an offset fetch response or error +func (b *Broker) FetchOffset(request *OffsetFetchRequest) (*OffsetFetchResponse, error) { + response := new(OffsetFetchResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +//JoinGroup returns a join group response or error +func (b *Broker) JoinGroup(request *JoinGroupRequest) (*JoinGroupResponse, error) { + response := new(JoinGroupResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +//SyncGroup returns a sync group response or error +func (b *Broker) SyncGroup(request *SyncGroupRequest) (*SyncGroupResponse, error) { + response := new(SyncGroupResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +//LeaveGroup return a leave group response or error +func (b *Broker) LeaveGroup(request *LeaveGroupRequest) (*LeaveGroupResponse, error) { + response := new(LeaveGroupResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +//Heartbeat returns a heartbeat response or error +func (b *Broker) Heartbeat(request *HeartbeatRequest) (*HeartbeatResponse, error) { + response := new(HeartbeatResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +//ListGroups return a list group response or error +func (b *Broker) ListGroups(request *ListGroupsRequest) (*ListGroupsResponse, error) { + response := new(ListGroupsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +//DescribeGroups return describe group response or error +func (b *Broker) DescribeGroups(request *DescribeGroupsRequest) (*DescribeGroupsResponse, error) { + response := new(DescribeGroupsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +//ApiVersions return api version response or error +func (b *Broker) ApiVersions(request *ApiVersionsRequest) (*ApiVersionsResponse, error) { + response := new(ApiVersionsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +//CreateTopics send a create topic request and returns create topic response +func (b *Broker) CreateTopics(request *CreateTopicsRequest) (*CreateTopicsResponse, error) { + response := new(CreateTopicsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +//DeleteTopics sends a delete topic request and returns delete topic response +func (b *Broker) DeleteTopics(request *DeleteTopicsRequest) (*DeleteTopicsResponse, error) { + response := new(DeleteTopicsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +//CreatePartitions sends a create partition request and returns create +//partitions response or error +func (b *Broker) CreatePartitions(request *CreatePartitionsRequest) (*CreatePartitionsResponse, error) { + response := new(CreatePartitionsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +//AlterPartitionReassignments sends a alter partition reassignments request and +//returns alter partition reassignments response +func (b *Broker) AlterPartitionReassignments(request *AlterPartitionReassignmentsRequest) (*AlterPartitionReassignmentsResponse, error) { + response := new(AlterPartitionReassignmentsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +//ListPartitionReassignments sends a list partition reassignments request and +//returns list partition reassignments response +func (b *Broker) ListPartitionReassignments(request *ListPartitionReassignmentsRequest) (*ListPartitionReassignmentsResponse, error) { + response := new(ListPartitionReassignmentsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +//DeleteRecords send a request to delete records and return delete record +//response or error +func (b *Broker) DeleteRecords(request *DeleteRecordsRequest) (*DeleteRecordsResponse, error) { + response := new(DeleteRecordsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +//DescribeAcls sends a describe acl request and returns a response or error +func (b *Broker) DescribeAcls(request *DescribeAclsRequest) (*DescribeAclsResponse, error) { + response := new(DescribeAclsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +//CreateAcls sends a create acl request and returns a response or error +func (b *Broker) CreateAcls(request *CreateAclsRequest) (*CreateAclsResponse, error) { + response := new(CreateAclsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +//DeleteAcls sends a delete acl request and returns a response or error +func (b *Broker) DeleteAcls(request *DeleteAclsRequest) (*DeleteAclsResponse, error) { + response := new(DeleteAclsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +//InitProducerID sends an init producer request and returns a response or error +func (b *Broker) InitProducerID(request *InitProducerIDRequest) (*InitProducerIDResponse, error) { + response := new(InitProducerIDResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +//AddPartitionsToTxn send a request to add partition to txn and returns +//a response or error +func (b *Broker) AddPartitionsToTxn(request *AddPartitionsToTxnRequest) (*AddPartitionsToTxnResponse, error) { + response := new(AddPartitionsToTxnResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +//AddOffsetsToTxn sends a request to add offsets to txn and returns a response +//or error +func (b *Broker) AddOffsetsToTxn(request *AddOffsetsToTxnRequest) (*AddOffsetsToTxnResponse, error) { + response := new(AddOffsetsToTxnResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +//EndTxn sends a request to end txn and returns a response or error +func (b *Broker) EndTxn(request *EndTxnRequest) (*EndTxnResponse, error) { + response := new(EndTxnResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +//TxnOffsetCommit sends a request to commit transaction offsets and returns +//a response or error +func (b *Broker) TxnOffsetCommit(request *TxnOffsetCommitRequest) (*TxnOffsetCommitResponse, error) { + response := new(TxnOffsetCommitResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +//DescribeConfigs sends a request to describe config and returns a response or +//error +func (b *Broker) DescribeConfigs(request *DescribeConfigsRequest) (*DescribeConfigsResponse, error) { + response := new(DescribeConfigsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +//AlterConfigs sends a request to alter config and return a response or error +func (b *Broker) AlterConfigs(request *AlterConfigsRequest) (*AlterConfigsResponse, error) { + response := new(AlterConfigsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +//DeleteGroups sends a request to delete groups and returns a response or error +func (b *Broker) DeleteGroups(request *DeleteGroupsRequest) (*DeleteGroupsResponse, error) { + response := new(DeleteGroupsResponse) + + if err := b.sendAndReceive(request, response); err != nil { + return nil, err + } + + return response, nil +} + +//DescribeLogDirs sends a request to get the broker's log dir paths and sizes +func (b *Broker) DescribeLogDirs(request *DescribeLogDirsRequest) (*DescribeLogDirsResponse, error) { + response := new(DescribeLogDirsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +// readFull ensures the conn ReadDeadline has been setup before making a +// call to io.ReadFull +func (b *Broker) readFull(buf []byte) (n int, err error) { + if err := b.conn.SetReadDeadline(time.Now().Add(b.conf.Net.ReadTimeout)); err != nil { + return 0, err + } + + return io.ReadFull(b.conn, buf) +} + +// write ensures the conn WriteDeadline has been setup before making a +// call to conn.Write +func (b *Broker) write(buf []byte) (n int, err error) { + if err := b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout)); err != nil { + return 0, err + } + + return b.conn.Write(buf) +} + +func (b *Broker) send(rb protocolBody, promiseResponse bool, responseHeaderVersion int16) (*responsePromise, error) { + b.lock.Lock() + defer b.lock.Unlock() + + if b.conn == nil { + if b.connErr != nil { + return nil, b.connErr + } + return nil, ErrNotConnected + } + + if !b.conf.Version.IsAtLeast(rb.requiredVersion()) { + return nil, ErrUnsupportedVersion + } + + req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb} + buf, err := encode(req, b.conf.MetricRegistry) + if err != nil { + return nil, err + } + + requestTime := time.Now() + // Will be decremented in responseReceiver (except error or request with NoResponse) + b.addRequestInFlightMetrics(1) + bytes, err := b.write(buf) + b.updateOutgoingCommunicationMetrics(bytes) + if err != nil { + b.addRequestInFlightMetrics(-1) + return nil, err + } + b.correlationID++ + + if !promiseResponse { + // Record request latency without the response + b.updateRequestLatencyAndInFlightMetrics(time.Since(requestTime)) + return nil, nil + } + + promise := responsePromise{requestTime, req.correlationID, responseHeaderVersion, make(chan []byte), make(chan error)} + b.responses <- promise + + return &promise, nil +} + +func (b *Broker) sendAndReceive(req protocolBody, res protocolBody) error { + responseHeaderVersion := int16(-1) + if res != nil { + responseHeaderVersion = res.headerVersion() + } + + promise, err := b.send(req, res != nil, responseHeaderVersion) + if err != nil { + return err + } + + if promise == nil { + return nil + } + + select { + case buf := <-promise.packets: + return versionedDecode(buf, res, req.version()) + case err = <-promise.errors: + return err + } +} + +func (b *Broker) decode(pd packetDecoder, version int16) (err error) { + b.id, err = pd.getInt32() + if err != nil { + return err + } + + host, err := pd.getString() + if err != nil { + return err + } + + port, err := pd.getInt32() + if err != nil { + return err + } + + if version >= 1 { + b.rack, err = pd.getNullableString() + if err != nil { + return err + } + } + + b.addr = net.JoinHostPort(host, fmt.Sprint(port)) + if _, _, err := net.SplitHostPort(b.addr); err != nil { + return err + } + + return nil +} + +func (b *Broker) encode(pe packetEncoder, version int16) (err error) { + host, portstr, err := net.SplitHostPort(b.addr) + if err != nil { + return err + } + + port, err := strconv.Atoi(portstr) + if err != nil { + return err + } + + pe.putInt32(b.id) + + err = pe.putString(host) + if err != nil { + return err + } + + pe.putInt32(int32(port)) + + if version >= 1 { + err = pe.putNullableString(b.rack) + if err != nil { + return err + } + } + + return nil +} + +func (b *Broker) responseReceiver() { + var dead error + + for response := range b.responses { + if dead != nil { + // This was previously incremented in send() and + // we are not calling updateIncomingCommunicationMetrics() + b.addRequestInFlightMetrics(-1) + response.errors <- dead + continue + } + + var headerLength = getHeaderLength(response.headerVersion) + header := make([]byte, headerLength) + + bytesReadHeader, err := b.readFull(header) + requestLatency := time.Since(response.requestTime) + if err != nil { + b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency) + dead = err + response.errors <- err + continue + } + + decodedHeader := responseHeader{} + err = versionedDecode(header, &decodedHeader, response.headerVersion) + if err != nil { + b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency) + dead = err + response.errors <- err + continue + } + if decodedHeader.correlationID != response.correlationID { + b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency) + // TODO if decoded ID < cur ID, discard until we catch up + // TODO if decoded ID > cur ID, save it so when cur ID catches up we have a response + dead = PacketDecodingError{fmt.Sprintf("correlation ID didn't match, wanted %d, got %d", response.correlationID, decodedHeader.correlationID)} + response.errors <- dead + continue + } + + buf := make([]byte, decodedHeader.length-int32(headerLength)+4) + bytesReadBody, err := b.readFull(buf) + b.updateIncomingCommunicationMetrics(bytesReadHeader+bytesReadBody, requestLatency) + if err != nil { + dead = err + response.errors <- err + continue + } + + response.packets <- buf + } + close(b.done) +} + +func getHeaderLength(headerVersion int16) int8 { + if headerVersion < 1 { + return 8 + } else { + // header contains additional tagged field length (0), we don't support actual tags yet. + return 9 + } +} + +func (b *Broker) authenticateViaSASL() error { + switch b.conf.Net.SASL.Mechanism { + case SASLTypeOAuth: + return b.sendAndReceiveSASLOAuth(b.conf.Net.SASL.TokenProvider) + case SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512: + return b.sendAndReceiveSASLSCRAMv1() + case SASLTypeGSSAPI: + return b.sendAndReceiveKerberos() + default: + return b.sendAndReceiveSASLPlainAuth() + } +} + +func (b *Broker) sendAndReceiveKerberos() error { + b.kerberosAuthenticator.Config = &b.conf.Net.SASL.GSSAPI + if b.kerberosAuthenticator.NewKerberosClientFunc == nil { + b.kerberosAuthenticator.NewKerberosClientFunc = NewKerberosClient + } + return b.kerberosAuthenticator.Authorize(b) +} + +func (b *Broker) sendAndReceiveSASLHandshake(saslType SASLMechanism, version int16) error { + rb := &SaslHandshakeRequest{Mechanism: string(saslType), Version: version} + + req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb} + buf, err := encode(req, b.conf.MetricRegistry) + if err != nil { + return err + } + + requestTime := time.Now() + // Will be decremented in updateIncomingCommunicationMetrics (except error) + b.addRequestInFlightMetrics(1) + bytes, err := b.write(buf) + b.updateOutgoingCommunicationMetrics(bytes) + if err != nil { + b.addRequestInFlightMetrics(-1) + Logger.Printf("Failed to send SASL handshake %s: %s\n", b.addr, err.Error()) + return err + } + b.correlationID++ + + header := make([]byte, 8) // response header + _, err = b.readFull(header) + if err != nil { + b.addRequestInFlightMetrics(-1) + Logger.Printf("Failed to read SASL handshake header : %s\n", err.Error()) + return err + } + + length := binary.BigEndian.Uint32(header[:4]) + payload := make([]byte, length-4) + n, err := b.readFull(payload) + if err != nil { + b.addRequestInFlightMetrics(-1) + Logger.Printf("Failed to read SASL handshake payload : %s\n", err.Error()) + return err + } + + b.updateIncomingCommunicationMetrics(n+8, time.Since(requestTime)) + res := &SaslHandshakeResponse{} + + err = versionedDecode(payload, res, 0) + if err != nil { + Logger.Printf("Failed to parse SASL handshake : %s\n", err.Error()) + return err + } + + if res.Err != ErrNoError { + Logger.Printf("Invalid SASL Mechanism : %s\n", res.Err.Error()) + return res.Err + } + + Logger.Print("Successful SASL handshake. Available mechanisms: ", res.EnabledMechanisms) + return nil +} + +// Kafka 0.10.x supported SASL PLAIN/Kerberos via KAFKA-3149 (KIP-43). +// Kafka 1.x.x onward added a SaslAuthenticate request/response message which +// wraps the SASL flow in the Kafka protocol, which allows for returning +// meaningful errors on authentication failure. +// +// In SASL Plain, Kafka expects the auth header to be in the following format +// Message format (from https://tools.ietf.org/html/rfc4616): +// +// message = [authzid] UTF8NUL authcid UTF8NUL passwd +// authcid = 1*SAFE ; MUST accept up to 255 octets +// authzid = 1*SAFE ; MUST accept up to 255 octets +// passwd = 1*SAFE ; MUST accept up to 255 octets +// UTF8NUL = %x00 ; UTF-8 encoded NUL character +// +// SAFE = UTF1 / UTF2 / UTF3 / UTF4 +// ;; any UTF-8 encoded Unicode character except NUL +// +// With SASL v0 handshake and auth then: +// When credentials are valid, Kafka returns a 4 byte array of null characters. +// When credentials are invalid, Kafka closes the connection. +// +// With SASL v1 handshake and auth then: +// When credentials are invalid, Kafka replies with a SaslAuthenticate response +// containing an error code and message detailing the authentication failure. +func (b *Broker) sendAndReceiveSASLPlainAuth() error { + // default to V0 to allow for backward compatibility when SASL is enabled + // but not the handshake + if b.conf.Net.SASL.Handshake { + handshakeErr := b.sendAndReceiveSASLHandshake(SASLTypePlaintext, b.conf.Net.SASL.Version) + if handshakeErr != nil { + Logger.Printf("Error while performing SASL handshake %s\n", b.addr) + return handshakeErr + } + } + + if b.conf.Net.SASL.Version == SASLHandshakeV1 { + return b.sendAndReceiveV1SASLPlainAuth() + } + return b.sendAndReceiveV0SASLPlainAuth() +} + +// sendAndReceiveV0SASLPlainAuth flows the v0 sasl auth NOT wrapped in the kafka protocol +func (b *Broker) sendAndReceiveV0SASLPlainAuth() error { + length := len(b.conf.Net.SASL.AuthIdentity) + 1 + len(b.conf.Net.SASL.User) + 1 + len(b.conf.Net.SASL.Password) + authBytes := make([]byte, length+4) //4 byte length header + auth data + binary.BigEndian.PutUint32(authBytes, uint32(length)) + copy(authBytes[4:], b.conf.Net.SASL.AuthIdentity+"\x00"+b.conf.Net.SASL.User+"\x00"+b.conf.Net.SASL.Password) + + requestTime := time.Now() + // Will be decremented in updateIncomingCommunicationMetrics (except error) + b.addRequestInFlightMetrics(1) + bytesWritten, err := b.write(authBytes) + b.updateOutgoingCommunicationMetrics(bytesWritten) + if err != nil { + b.addRequestInFlightMetrics(-1) + Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error()) + return err + } + + header := make([]byte, 4) + n, err := b.readFull(header) + b.updateIncomingCommunicationMetrics(n, time.Since(requestTime)) + // If the credentials are valid, we would get a 4 byte response filled with null characters. + // Otherwise, the broker closes the connection and we get an EOF + if err != nil { + Logger.Printf("Failed to read response while authenticating with SASL to broker %s: %s\n", b.addr, err.Error()) + return err + } + + Logger.Printf("SASL authentication successful with broker %s:%v - %v\n", b.addr, n, header) + return nil +} + +// sendAndReceiveV1SASLPlainAuth flows the v1 sasl authentication using the kafka protocol +func (b *Broker) sendAndReceiveV1SASLPlainAuth() error { + correlationID := b.correlationID + + requestTime := time.Now() + + // Will be decremented in updateIncomingCommunicationMetrics (except error) + b.addRequestInFlightMetrics(1) + bytesWritten, err := b.sendSASLPlainAuthClientResponse(correlationID) + b.updateOutgoingCommunicationMetrics(bytesWritten) + + if err != nil { + b.addRequestInFlightMetrics(-1) + Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error()) + return err + } + + b.correlationID++ + + bytesRead, err := b.receiveSASLServerResponse(&SaslAuthenticateResponse{}, correlationID) + b.updateIncomingCommunicationMetrics(bytesRead, time.Since(requestTime)) + + // With v1 sasl we get an error message set in the response we can return + if err != nil { + Logger.Printf("Error returned from broker during SASL flow %s: %s\n", b.addr, err.Error()) + return err + } + + return nil +} + +// sendAndReceiveSASLOAuth performs the authentication flow as described by KIP-255 +// https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=75968876 +func (b *Broker) sendAndReceiveSASLOAuth(provider AccessTokenProvider) error { + if err := b.sendAndReceiveSASLHandshake(SASLTypeOAuth, SASLHandshakeV1); err != nil { + return err + } + + token, err := provider.Token() + if err != nil { + return err + } + + message, err := buildClientFirstMessage(token) + if err != nil { + return err + } + + challenged, err := b.sendClientMessage(message) + if err != nil { + return err + } + + if challenged { + // Abort the token exchange. The broker returns the failure code. + _, err = b.sendClientMessage([]byte(`\x01`)) + } + + return err +} + +// sendClientMessage sends a SASL/OAUTHBEARER client message and returns true +// if the broker responds with a challenge, in which case the token is +// rejected. +func (b *Broker) sendClientMessage(message []byte) (bool, error) { + requestTime := time.Now() + // Will be decremented in updateIncomingCommunicationMetrics (except error) + b.addRequestInFlightMetrics(1) + correlationID := b.correlationID + + bytesWritten, err := b.sendSASLOAuthBearerClientMessage(message, correlationID) + b.updateOutgoingCommunicationMetrics(bytesWritten) + if err != nil { + b.addRequestInFlightMetrics(-1) + return false, err + } + + b.correlationID++ + + res := &SaslAuthenticateResponse{} + bytesRead, err := b.receiveSASLServerResponse(res, correlationID) + + requestLatency := time.Since(requestTime) + b.updateIncomingCommunicationMetrics(bytesRead, requestLatency) + + isChallenge := len(res.SaslAuthBytes) > 0 + + if isChallenge && err != nil { + Logger.Printf("Broker rejected authentication token: %s", res.SaslAuthBytes) + } + + return isChallenge, err +} + +func (b *Broker) sendAndReceiveSASLSCRAMv1() error { + if err := b.sendAndReceiveSASLHandshake(b.conf.Net.SASL.Mechanism, SASLHandshakeV1); err != nil { + return err + } + + scramClient := b.conf.Net.SASL.SCRAMClientGeneratorFunc() + if err := scramClient.Begin(b.conf.Net.SASL.User, b.conf.Net.SASL.Password, b.conf.Net.SASL.SCRAMAuthzID); err != nil { + return fmt.Errorf("failed to start SCRAM exchange with the server: %s", err.Error()) + } + + msg, err := scramClient.Step("") + if err != nil { + return fmt.Errorf("failed to advance the SCRAM exchange: %s", err.Error()) + } + + for !scramClient.Done() { + requestTime := time.Now() + // Will be decremented in updateIncomingCommunicationMetrics (except error) + b.addRequestInFlightMetrics(1) + correlationID := b.correlationID + bytesWritten, err := b.sendSaslAuthenticateRequest(correlationID, []byte(msg)) + b.updateOutgoingCommunicationMetrics(bytesWritten) + if err != nil { + b.addRequestInFlightMetrics(-1) + Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error()) + return err + } + + b.correlationID++ + challenge, err := b.receiveSaslAuthenticateResponse(correlationID) + if err != nil { + b.addRequestInFlightMetrics(-1) + Logger.Printf("Failed to read response while authenticating with SASL to broker %s: %s\n", b.addr, err.Error()) + return err + } + + b.updateIncomingCommunicationMetrics(len(challenge), time.Since(requestTime)) + msg, err = scramClient.Step(string(challenge)) + if err != nil { + Logger.Println("SASL authentication failed", err) + return err + } + } + + Logger.Println("SASL authentication succeeded") + return nil +} + +func (b *Broker) sendSaslAuthenticateRequest(correlationID int32, msg []byte) (int, error) { + rb := &SaslAuthenticateRequest{msg} + req := &request{correlationID: correlationID, clientID: b.conf.ClientID, body: rb} + buf, err := encode(req, b.conf.MetricRegistry) + if err != nil { + return 0, err + } + + return b.write(buf) +} + +func (b *Broker) receiveSaslAuthenticateResponse(correlationID int32) ([]byte, error) { + buf := make([]byte, responseLengthSize+correlationIDSize) + _, err := b.readFull(buf) + if err != nil { + return nil, err + } + + header := responseHeader{} + err = versionedDecode(buf, &header, 0) + if err != nil { + return nil, err + } + + if header.correlationID != correlationID { + return nil, fmt.Errorf("correlation ID didn't match, wanted %d, got %d", b.correlationID, header.correlationID) + } + + buf = make([]byte, header.length-correlationIDSize) + _, err = b.readFull(buf) + if err != nil { + return nil, err + } + + res := &SaslAuthenticateResponse{} + if err := versionedDecode(buf, res, 0); err != nil { + return nil, err + } + if res.Err != ErrNoError { + return nil, res.Err + } + return res.SaslAuthBytes, nil +} + +// Build SASL/OAUTHBEARER initial client response as described by RFC-7628 +// https://tools.ietf.org/html/rfc7628 +func buildClientFirstMessage(token *AccessToken) ([]byte, error) { + var ext string + + if token.Extensions != nil && len(token.Extensions) > 0 { + if _, ok := token.Extensions[SASLExtKeyAuth]; ok { + return []byte{}, fmt.Errorf("the extension `%s` is invalid", SASLExtKeyAuth) + } + ext = "\x01" + mapToString(token.Extensions, "=", "\x01") + } + + resp := []byte(fmt.Sprintf("n,,\x01auth=Bearer %s%s\x01\x01", token.Token, ext)) + + return resp, nil +} + +// mapToString returns a list of key-value pairs ordered by key. +// keyValSep separates the key from the value. elemSep separates each pair. +func mapToString(extensions map[string]string, keyValSep string, elemSep string) string { + buf := make([]string, 0, len(extensions)) + + for k, v := range extensions { + buf = append(buf, k+keyValSep+v) + } + + sort.Strings(buf) + + return strings.Join(buf, elemSep) +} + +func (b *Broker) sendSASLPlainAuthClientResponse(correlationID int32) (int, error) { + authBytes := []byte(b.conf.Net.SASL.AuthIdentity + "\x00" + b.conf.Net.SASL.User + "\x00" + b.conf.Net.SASL.Password) + rb := &SaslAuthenticateRequest{authBytes} + req := &request{correlationID: correlationID, clientID: b.conf.ClientID, body: rb} + buf, err := encode(req, b.conf.MetricRegistry) + if err != nil { + return 0, err + } + + return b.write(buf) +} + +func (b *Broker) sendSASLOAuthBearerClientMessage(initialResp []byte, correlationID int32) (int, error) { + rb := &SaslAuthenticateRequest{initialResp} + + req := &request{correlationID: correlationID, clientID: b.conf.ClientID, body: rb} + + buf, err := encode(req, b.conf.MetricRegistry) + if err != nil { + return 0, err + } + + return b.write(buf) +} + +func (b *Broker) receiveSASLServerResponse(res *SaslAuthenticateResponse, correlationID int32) (int, error) { + buf := make([]byte, responseLengthSize+correlationIDSize) + bytesRead, err := b.readFull(buf) + if err != nil { + return bytesRead, err + } + + header := responseHeader{} + err = versionedDecode(buf, &header, 0) + if err != nil { + return bytesRead, err + } + + if header.correlationID != correlationID { + return bytesRead, fmt.Errorf("correlation ID didn't match, wanted %d, got %d", b.correlationID, header.correlationID) + } + + buf = make([]byte, header.length-correlationIDSize) + c, err := b.readFull(buf) + bytesRead += c + if err != nil { + return bytesRead, err + } + + if err := versionedDecode(buf, res, 0); err != nil { + return bytesRead, err + } + + if res.Err != ErrNoError { + return bytesRead, res.Err + } + + return bytesRead, nil +} + +func (b *Broker) updateIncomingCommunicationMetrics(bytes int, requestLatency time.Duration) { + b.updateRequestLatencyAndInFlightMetrics(requestLatency) + b.responseRate.Mark(1) + + if b.brokerResponseRate != nil { + b.brokerResponseRate.Mark(1) + } + + responseSize := int64(bytes) + b.incomingByteRate.Mark(responseSize) + if b.brokerIncomingByteRate != nil { + b.brokerIncomingByteRate.Mark(responseSize) + } + + b.responseSize.Update(responseSize) + if b.brokerResponseSize != nil { + b.brokerResponseSize.Update(responseSize) + } +} + +func (b *Broker) updateRequestLatencyAndInFlightMetrics(requestLatency time.Duration) { + requestLatencyInMs := int64(requestLatency / time.Millisecond) + b.requestLatency.Update(requestLatencyInMs) + + if b.brokerRequestLatency != nil { + b.brokerRequestLatency.Update(requestLatencyInMs) + } + + b.addRequestInFlightMetrics(-1) +} + +func (b *Broker) addRequestInFlightMetrics(i int64) { + b.requestsInFlight.Inc(i) + if b.brokerRequestsInFlight != nil { + b.brokerRequestsInFlight.Inc(i) + } +} + +func (b *Broker) updateOutgoingCommunicationMetrics(bytes int) { + b.requestRate.Mark(1) + if b.brokerRequestRate != nil { + b.brokerRequestRate.Mark(1) + } + + requestSize := int64(bytes) + b.outgoingByteRate.Mark(requestSize) + if b.brokerOutgoingByteRate != nil { + b.brokerOutgoingByteRate.Mark(requestSize) + } + + b.requestSize.Update(requestSize) + if b.brokerRequestSize != nil { + b.brokerRequestSize.Update(requestSize) + } +} + +func (b *Broker) registerMetrics() { + b.brokerIncomingByteRate = b.registerMeter("incoming-byte-rate") + b.brokerRequestRate = b.registerMeter("request-rate") + b.brokerRequestSize = b.registerHistogram("request-size") + b.brokerRequestLatency = b.registerHistogram("request-latency-in-ms") + b.brokerOutgoingByteRate = b.registerMeter("outgoing-byte-rate") + b.brokerResponseRate = b.registerMeter("response-rate") + b.brokerResponseSize = b.registerHistogram("response-size") + b.brokerRequestsInFlight = b.registerCounter("requests-in-flight") +} + +func (b *Broker) unregisterMetrics() { + for _, name := range b.registeredMetrics { + b.conf.MetricRegistry.Unregister(name) + } + b.registeredMetrics = nil +} + +func (b *Broker) registerMeter(name string) metrics.Meter { + nameForBroker := getMetricNameForBroker(name, b) + b.registeredMetrics = append(b.registeredMetrics, nameForBroker) + return metrics.GetOrRegisterMeter(nameForBroker, b.conf.MetricRegistry) +} + +func (b *Broker) registerHistogram(name string) metrics.Histogram { + nameForBroker := getMetricNameForBroker(name, b) + b.registeredMetrics = append(b.registeredMetrics, nameForBroker) + return getOrRegisterHistogram(nameForBroker, b.conf.MetricRegistry) +} + +func (b *Broker) registerCounter(name string) metrics.Counter { + nameForBroker := getMetricNameForBroker(name, b) + b.registeredMetrics = append(b.registeredMetrics, nameForBroker) + return metrics.GetOrRegisterCounter(nameForBroker, b.conf.MetricRegistry) +} + +func validServerNameTLS(addr string, cfg *tls.Config) *tls.Config { + if cfg == nil { + cfg = &tls.Config{} + } + if cfg.ServerName != "" { + return cfg + } + + c := cfg.Clone() + sn, _, err := net.SplitHostPort(addr) + if err != nil { + Logger.Println(fmt.Errorf("failed to get ServerName from addr %w", err)) + } + c.ServerName = sn + return c +} diff --git a/vendor/github.com/Shopify/sarama/client.go b/vendor/github.com/Shopify/sarama/client.go new file mode 100644 index 00000000000..f0073382241 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/client.go @@ -0,0 +1,1100 @@ +package sarama + +import ( + "math/rand" + "sort" + "sync" + "time" +) + +// Client is a generic Kafka client. It manages connections to one or more Kafka brokers. +// You MUST call Close() on a client to avoid leaks, it will not be garbage-collected +// automatically when it passes out of scope. It is safe to share a client amongst many +// users, however Kafka will process requests from a single client strictly in serial, +// so it is generally more efficient to use the default one client per producer/consumer. +type Client interface { + // Config returns the Config struct of the client. This struct should not be + // altered after it has been created. + Config() *Config + + // Controller returns the cluster controller broker. It will return a + // locally cached value if it's available. You can call RefreshController + // to update the cached value. Requires Kafka 0.10 or higher. + Controller() (*Broker, error) + + // RefreshController retrieves the cluster controller from fresh metadata + // and stores it in the local cache. Requires Kafka 0.10 or higher. + RefreshController() (*Broker, error) + + // Brokers returns the current set of active brokers as retrieved from cluster metadata. + Brokers() []*Broker + + // Topics returns the set of available topics as retrieved from cluster metadata. + Topics() ([]string, error) + + // Partitions returns the sorted list of all partition IDs for the given topic. + Partitions(topic string) ([]int32, error) + + // WritablePartitions returns the sorted list of all writable partition IDs for + // the given topic, where "writable" means "having a valid leader accepting + // writes". + WritablePartitions(topic string) ([]int32, error) + + // Leader returns the broker object that is the leader of the current + // topic/partition, as determined by querying the cluster metadata. + Leader(topic string, partitionID int32) (*Broker, error) + + // Replicas returns the set of all replica IDs for the given partition. + Replicas(topic string, partitionID int32) ([]int32, error) + + // InSyncReplicas returns the set of all in-sync replica IDs for the given + // partition. In-sync replicas are replicas which are fully caught up with + // the partition leader. + InSyncReplicas(topic string, partitionID int32) ([]int32, error) + + // OfflineReplicas returns the set of all offline replica IDs for the given + // partition. Offline replicas are replicas which are offline + OfflineReplicas(topic string, partitionID int32) ([]int32, error) + + // RefreshBrokers takes a list of addresses to be used as seed brokers. + // Existing broker connections are closed and the updated list of seed brokers + // will be used for the next metadata fetch. + RefreshBrokers(addrs []string) error + + // RefreshMetadata takes a list of topics and queries the cluster to refresh the + // available metadata for those topics. If no topics are provided, it will refresh + // metadata for all topics. + RefreshMetadata(topics ...string) error + + // GetOffset queries the cluster to get the most recent available offset at the + // given time (in milliseconds) on the topic/partition combination. + // Time should be OffsetOldest for the earliest available offset, + // OffsetNewest for the offset of the message that will be produced next, or a time. + GetOffset(topic string, partitionID int32, time int64) (int64, error) + + // Coordinator returns the coordinating broker for a consumer group. It will + // return a locally cached value if it's available. You can call + // RefreshCoordinator to update the cached value. This function only works on + // Kafka 0.8.2 and higher. + Coordinator(consumerGroup string) (*Broker, error) + + // RefreshCoordinator retrieves the coordinator for a consumer group and stores it + // in local cache. This function only works on Kafka 0.8.2 and higher. + RefreshCoordinator(consumerGroup string) error + + // InitProducerID retrieves information required for Idempotent Producer + InitProducerID() (*InitProducerIDResponse, error) + + // Close shuts down all broker connections managed by this client. It is required + // to call this function before a client object passes out of scope, as it will + // otherwise leak memory. You must close any Producers or Consumers using a client + // before you close the client. + Close() error + + // Closed returns true if the client has already had Close called on it + Closed() bool +} + +const ( + // OffsetNewest stands for the log head offset, i.e. the offset that will be + // assigned to the next message that will be produced to the partition. You + // can send this to a client's GetOffset method to get this offset, or when + // calling ConsumePartition to start consuming new messages. + OffsetNewest int64 = -1 + // OffsetOldest stands for the oldest offset available on the broker for a + // partition. You can send this to a client's GetOffset method to get this + // offset, or when calling ConsumePartition to start consuming from the + // oldest offset that is still available on the broker. + OffsetOldest int64 = -2 +) + +type client struct { + conf *Config + closer, closed chan none // for shutting down background metadata updater + + // the broker addresses given to us through the constructor are not guaranteed to be returned in + // the cluster metadata (I *think* it only returns brokers who are currently leading partitions?) + // so we store them separately + seedBrokers []*Broker + deadSeeds []*Broker + + controllerID int32 // cluster controller broker id + brokers map[int32]*Broker // maps broker ids to brokers + metadata map[string]map[int32]*PartitionMetadata // maps topics to partition ids to metadata + metadataTopics map[string]none // topics that need to collect metadata + coordinators map[string]int32 // Maps consumer group names to coordinating broker IDs + + // If the number of partitions is large, we can get some churn calling cachedPartitions, + // so the result is cached. It is important to update this value whenever metadata is changed + cachedPartitionsResults map[string][maxPartitionIndex][]int32 + + lock sync.RWMutex // protects access to the maps that hold cluster state. +} + +// NewClient creates a new Client. It connects to one of the given broker addresses +// and uses that broker to automatically fetch metadata on the rest of the kafka cluster. If metadata cannot +// be retrieved from any of the given broker addresses, the client is not created. +func NewClient(addrs []string, conf *Config) (Client, error) { + Logger.Println("Initializing new client") + + if conf == nil { + conf = NewConfig() + } + + if err := conf.Validate(); err != nil { + return nil, err + } + + if len(addrs) < 1 { + return nil, ConfigurationError("You must provide at least one broker address") + } + + client := &client{ + conf: conf, + closer: make(chan none), + closed: make(chan none), + brokers: make(map[int32]*Broker), + metadata: make(map[string]map[int32]*PartitionMetadata), + metadataTopics: make(map[string]none), + cachedPartitionsResults: make(map[string][maxPartitionIndex][]int32), + coordinators: make(map[string]int32), + } + + client.randomizeSeedBrokers(addrs) + + if conf.Metadata.Full { + // do an initial fetch of all cluster metadata by specifying an empty list of topics + err := client.RefreshMetadata() + switch err { + case nil: + break + case ErrLeaderNotAvailable, ErrReplicaNotAvailable, ErrTopicAuthorizationFailed, ErrClusterAuthorizationFailed: + // indicates that maybe part of the cluster is down, but is not fatal to creating the client + Logger.Println(err) + default: + close(client.closed) // we haven't started the background updater yet, so we have to do this manually + _ = client.Close() + return nil, err + } + } + go withRecover(client.backgroundMetadataUpdater) + + Logger.Println("Successfully initialized new client") + + return client, nil +} + +func (client *client) Config() *Config { + return client.conf +} + +func (client *client) Brokers() []*Broker { + client.lock.RLock() + defer client.lock.RUnlock() + brokers := make([]*Broker, 0, len(client.brokers)) + for _, broker := range client.brokers { + brokers = append(brokers, broker) + } + return brokers +} + +func (client *client) InitProducerID() (*InitProducerIDResponse, error) { + var err error + for broker := client.any(); broker != nil; broker = client.any() { + req := &InitProducerIDRequest{} + + response, err := broker.InitProducerID(req) + switch err.(type) { + case nil: + return response, nil + default: + // some error, remove that broker and try again + Logger.Printf("Client got error from broker %d when issuing InitProducerID : %v\n", broker.ID(), err) + _ = broker.Close() + client.deregisterBroker(broker) + } + } + return nil, err +} + +func (client *client) Close() error { + if client.Closed() { + // Chances are this is being called from a defer() and the error will go unobserved + // so we go ahead and log the event in this case. + Logger.Printf("Close() called on already closed client") + return ErrClosedClient + } + + // shutdown and wait for the background thread before we take the lock, to avoid races + close(client.closer) + <-client.closed + + client.lock.Lock() + defer client.lock.Unlock() + Logger.Println("Closing Client") + + for _, broker := range client.brokers { + safeAsyncClose(broker) + } + + for _, broker := range client.seedBrokers { + safeAsyncClose(broker) + } + + client.brokers = nil + client.metadata = nil + client.metadataTopics = nil + + return nil +} + +func (client *client) Closed() bool { + client.lock.RLock() + defer client.lock.RUnlock() + + return client.brokers == nil +} + +func (client *client) Topics() ([]string, error) { + if client.Closed() { + return nil, ErrClosedClient + } + + client.lock.RLock() + defer client.lock.RUnlock() + + ret := make([]string, 0, len(client.metadata)) + for topic := range client.metadata { + ret = append(ret, topic) + } + + return ret, nil +} + +func (client *client) MetadataTopics() ([]string, error) { + if client.Closed() { + return nil, ErrClosedClient + } + + client.lock.RLock() + defer client.lock.RUnlock() + + ret := make([]string, 0, len(client.metadataTopics)) + for topic := range client.metadataTopics { + ret = append(ret, topic) + } + + return ret, nil +} + +func (client *client) Partitions(topic string) ([]int32, error) { + if client.Closed() { + return nil, ErrClosedClient + } + + partitions := client.cachedPartitions(topic, allPartitions) + + if len(partitions) == 0 { + err := client.RefreshMetadata(topic) + if err != nil { + return nil, err + } + partitions = client.cachedPartitions(topic, allPartitions) + } + + // no partitions found after refresh metadata + if len(partitions) == 0 { + return nil, ErrUnknownTopicOrPartition + } + + return partitions, nil +} + +func (client *client) WritablePartitions(topic string) ([]int32, error) { + if client.Closed() { + return nil, ErrClosedClient + } + + partitions := client.cachedPartitions(topic, writablePartitions) + + // len==0 catches when it's nil (no such topic) and the odd case when every single + // partition is undergoing leader election simultaneously. Callers have to be able to handle + // this function returning an empty slice (which is a valid return value) but catching it + // here the first time (note we *don't* catch it below where we return ErrUnknownTopicOrPartition) triggers + // a metadata refresh as a nicety so callers can just try again and don't have to manually + // trigger a refresh (otherwise they'd just keep getting a stale cached copy). + if len(partitions) == 0 { + err := client.RefreshMetadata(topic) + if err != nil { + return nil, err + } + partitions = client.cachedPartitions(topic, writablePartitions) + } + + if partitions == nil { + return nil, ErrUnknownTopicOrPartition + } + + return partitions, nil +} + +func (client *client) Replicas(topic string, partitionID int32) ([]int32, error) { + if client.Closed() { + return nil, ErrClosedClient + } + + metadata := client.cachedMetadata(topic, partitionID) + + if metadata == nil { + err := client.RefreshMetadata(topic) + if err != nil { + return nil, err + } + metadata = client.cachedMetadata(topic, partitionID) + } + + if metadata == nil { + return nil, ErrUnknownTopicOrPartition + } + + if metadata.Err == ErrReplicaNotAvailable { + return dupInt32Slice(metadata.Replicas), metadata.Err + } + return dupInt32Slice(metadata.Replicas), nil +} + +func (client *client) InSyncReplicas(topic string, partitionID int32) ([]int32, error) { + if client.Closed() { + return nil, ErrClosedClient + } + + metadata := client.cachedMetadata(topic, partitionID) + + if metadata == nil { + err := client.RefreshMetadata(topic) + if err != nil { + return nil, err + } + metadata = client.cachedMetadata(topic, partitionID) + } + + if metadata == nil { + return nil, ErrUnknownTopicOrPartition + } + + if metadata.Err == ErrReplicaNotAvailable { + return dupInt32Slice(metadata.Isr), metadata.Err + } + return dupInt32Slice(metadata.Isr), nil +} + +func (client *client) OfflineReplicas(topic string, partitionID int32) ([]int32, error) { + if client.Closed() { + return nil, ErrClosedClient + } + + metadata := client.cachedMetadata(topic, partitionID) + + if metadata == nil { + err := client.RefreshMetadata(topic) + if err != nil { + return nil, err + } + metadata = client.cachedMetadata(topic, partitionID) + } + + if metadata == nil { + return nil, ErrUnknownTopicOrPartition + } + + if metadata.Err == ErrReplicaNotAvailable { + return dupInt32Slice(metadata.OfflineReplicas), metadata.Err + } + return dupInt32Slice(metadata.OfflineReplicas), nil +} + +func (client *client) Leader(topic string, partitionID int32) (*Broker, error) { + if client.Closed() { + return nil, ErrClosedClient + } + + leader, err := client.cachedLeader(topic, partitionID) + + if leader == nil { + err = client.RefreshMetadata(topic) + if err != nil { + return nil, err + } + leader, err = client.cachedLeader(topic, partitionID) + } + + return leader, err +} + +func (client *client) RefreshBrokers(addrs []string) error { + if client.Closed() { + return ErrClosedClient + } + + client.lock.Lock() + defer client.lock.Unlock() + + for _, broker := range client.brokers { + _ = broker.Close() + delete(client.brokers, broker.ID()) + } + + client.seedBrokers = nil + client.deadSeeds = nil + + client.randomizeSeedBrokers(addrs) + + return nil +} + +func (client *client) RefreshMetadata(topics ...string) error { + if client.Closed() { + return ErrClosedClient + } + + // Prior to 0.8.2, Kafka will throw exceptions on an empty topic and not return a proper + // error. This handles the case by returning an error instead of sending it + // off to Kafka. See: https://github.com/Shopify/sarama/pull/38#issuecomment-26362310 + for _, topic := range topics { + if len(topic) == 0 { + return ErrInvalidTopic // this is the error that 0.8.2 and later correctly return + } + } + + deadline := time.Time{} + if client.conf.Metadata.Timeout > 0 { + deadline = time.Now().Add(client.conf.Metadata.Timeout) + } + return client.tryRefreshMetadata(topics, client.conf.Metadata.Retry.Max, deadline) +} + +func (client *client) GetOffset(topic string, partitionID int32, time int64) (int64, error) { + if client.Closed() { + return -1, ErrClosedClient + } + + offset, err := client.getOffset(topic, partitionID, time) + + if err != nil { + if err := client.RefreshMetadata(topic); err != nil { + return -1, err + } + return client.getOffset(topic, partitionID, time) + } + + return offset, err +} + +func (client *client) Controller() (*Broker, error) { + if client.Closed() { + return nil, ErrClosedClient + } + + if !client.conf.Version.IsAtLeast(V0_10_0_0) { + return nil, ErrUnsupportedVersion + } + + controller := client.cachedController() + if controller == nil { + if err := client.refreshMetadata(); err != nil { + return nil, err + } + controller = client.cachedController() + } + + if controller == nil { + return nil, ErrControllerNotAvailable + } + + _ = controller.Open(client.conf) + return controller, nil +} + +// deregisterController removes the cached controllerID +func (client *client) deregisterController() { + client.lock.Lock() + defer client.lock.Unlock() + delete(client.brokers, client.controllerID) +} + +// RefreshController retrieves the cluster controller from fresh metadata +// and stores it in the local cache. Requires Kafka 0.10 or higher. +func (client *client) RefreshController() (*Broker, error) { + if client.Closed() { + return nil, ErrClosedClient + } + + client.deregisterController() + + if err := client.refreshMetadata(); err != nil { + return nil, err + } + + controller := client.cachedController() + if controller == nil { + return nil, ErrControllerNotAvailable + } + + _ = controller.Open(client.conf) + return controller, nil +} + +func (client *client) Coordinator(consumerGroup string) (*Broker, error) { + if client.Closed() { + return nil, ErrClosedClient + } + + coordinator := client.cachedCoordinator(consumerGroup) + + if coordinator == nil { + if err := client.RefreshCoordinator(consumerGroup); err != nil { + return nil, err + } + coordinator = client.cachedCoordinator(consumerGroup) + } + + if coordinator == nil { + return nil, ErrConsumerCoordinatorNotAvailable + } + + _ = coordinator.Open(client.conf) + return coordinator, nil +} + +func (client *client) RefreshCoordinator(consumerGroup string) error { + if client.Closed() { + return ErrClosedClient + } + + response, err := client.getConsumerMetadata(consumerGroup, client.conf.Metadata.Retry.Max) + if err != nil { + return err + } + + client.lock.Lock() + defer client.lock.Unlock() + client.registerBroker(response.Coordinator) + client.coordinators[consumerGroup] = response.Coordinator.ID() + return nil +} + +// private broker management helpers + +func (client *client) randomizeSeedBrokers(addrs []string) { + random := rand.New(rand.NewSource(time.Now().UnixNano())) + for _, index := range random.Perm(len(addrs)) { + client.seedBrokers = append(client.seedBrokers, NewBroker(addrs[index])) + } +} + +func (client *client) updateBroker(brokers []*Broker) { + var currentBroker = make(map[int32]*Broker, len(brokers)) + + for _, broker := range brokers { + currentBroker[broker.ID()] = broker + if client.brokers[broker.ID()] == nil { // add new broker + client.brokers[broker.ID()] = broker + Logger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr()) + } else if broker.Addr() != client.brokers[broker.ID()].Addr() { // replace broker with new address + safeAsyncClose(client.brokers[broker.ID()]) + client.brokers[broker.ID()] = broker + Logger.Printf("client/brokers replaced registered broker #%d with %s", broker.ID(), broker.Addr()) + } + } + + for id, broker := range client.brokers { + if _, exist := currentBroker[id]; !exist { // remove old broker + safeAsyncClose(broker) + delete(client.brokers, id) + Logger.Printf("client/broker remove invalid broker #%d with %s", broker.ID(), broker.Addr()) + } + } +} + +// registerBroker makes sure a broker received by a Metadata or Coordinator request is registered +// in the brokers map. It returns the broker that is registered, which may be the provided broker, +// or a previously registered Broker instance. You must hold the write lock before calling this function. +func (client *client) registerBroker(broker *Broker) { + if client.brokers == nil { + Logger.Printf("cannot register broker #%d at %s, client already closed", broker.ID(), broker.Addr()) + return + } + + if client.brokers[broker.ID()] == nil { + client.brokers[broker.ID()] = broker + Logger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr()) + } else if broker.Addr() != client.brokers[broker.ID()].Addr() { + safeAsyncClose(client.brokers[broker.ID()]) + client.brokers[broker.ID()] = broker + Logger.Printf("client/brokers replaced registered broker #%d with %s", broker.ID(), broker.Addr()) + } +} + +// deregisterBroker removes a broker from the seedsBroker list, and if it's +// not the seedbroker, removes it from brokers map completely. +func (client *client) deregisterBroker(broker *Broker) { + client.lock.Lock() + defer client.lock.Unlock() + + if len(client.seedBrokers) > 0 && broker == client.seedBrokers[0] { + client.deadSeeds = append(client.deadSeeds, broker) + client.seedBrokers = client.seedBrokers[1:] + } else { + // we do this so that our loop in `tryRefreshMetadata` doesn't go on forever, + // but we really shouldn't have to; once that loop is made better this case can be + // removed, and the function generally can be renamed from `deregisterBroker` to + // `nextSeedBroker` or something + Logger.Printf("client/brokers deregistered broker #%d at %s", broker.ID(), broker.Addr()) + delete(client.brokers, broker.ID()) + } +} + +func (client *client) resurrectDeadBrokers() { + client.lock.Lock() + defer client.lock.Unlock() + + Logger.Printf("client/brokers resurrecting %d dead seed brokers", len(client.deadSeeds)) + client.seedBrokers = append(client.seedBrokers, client.deadSeeds...) + client.deadSeeds = nil +} + +func (client *client) any() *Broker { + client.lock.RLock() + defer client.lock.RUnlock() + + if len(client.seedBrokers) > 0 { + _ = client.seedBrokers[0].Open(client.conf) + return client.seedBrokers[0] + } + + // not guaranteed to be random *or* deterministic + for _, broker := range client.brokers { + _ = broker.Open(client.conf) + return broker + } + + return nil +} + +// private caching/lazy metadata helpers + +type partitionType int + +const ( + allPartitions partitionType = iota + writablePartitions + // If you add any more types, update the partition cache in update() + + // Ensure this is the last partition type value + maxPartitionIndex +) + +func (client *client) cachedMetadata(topic string, partitionID int32) *PartitionMetadata { + client.lock.RLock() + defer client.lock.RUnlock() + + partitions := client.metadata[topic] + if partitions != nil { + return partitions[partitionID] + } + + return nil +} + +func (client *client) cachedPartitions(topic string, partitionSet partitionType) []int32 { + client.lock.RLock() + defer client.lock.RUnlock() + + partitions, exists := client.cachedPartitionsResults[topic] + + if !exists { + return nil + } + return partitions[partitionSet] +} + +func (client *client) setPartitionCache(topic string, partitionSet partitionType) []int32 { + partitions := client.metadata[topic] + + if partitions == nil { + return nil + } + + ret := make([]int32, 0, len(partitions)) + for _, partition := range partitions { + if partitionSet == writablePartitions && partition.Err == ErrLeaderNotAvailable { + continue + } + ret = append(ret, partition.ID) + } + + sort.Sort(int32Slice(ret)) + return ret +} + +func (client *client) cachedLeader(topic string, partitionID int32) (*Broker, error) { + client.lock.RLock() + defer client.lock.RUnlock() + + partitions := client.metadata[topic] + if partitions != nil { + metadata, ok := partitions[partitionID] + if ok { + if metadata.Err == ErrLeaderNotAvailable { + return nil, ErrLeaderNotAvailable + } + b := client.brokers[metadata.Leader] + if b == nil { + return nil, ErrLeaderNotAvailable + } + _ = b.Open(client.conf) + return b, nil + } + } + + return nil, ErrUnknownTopicOrPartition +} + +func (client *client) getOffset(topic string, partitionID int32, time int64) (int64, error) { + broker, err := client.Leader(topic, partitionID) + if err != nil { + return -1, err + } + + request := &OffsetRequest{} + if client.conf.Version.IsAtLeast(V0_10_1_0) { + request.Version = 1 + } + request.AddBlock(topic, partitionID, time, 1) + + response, err := broker.GetAvailableOffsets(request) + if err != nil { + _ = broker.Close() + return -1, err + } + + block := response.GetBlock(topic, partitionID) + if block == nil { + _ = broker.Close() + return -1, ErrIncompleteResponse + } + if block.Err != ErrNoError { + return -1, block.Err + } + if len(block.Offsets) != 1 { + return -1, ErrOffsetOutOfRange + } + + return block.Offsets[0], nil +} + +// core metadata update logic + +func (client *client) backgroundMetadataUpdater() { + defer close(client.closed) + + if client.conf.Metadata.RefreshFrequency == time.Duration(0) { + return + } + + ticker := time.NewTicker(client.conf.Metadata.RefreshFrequency) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + if err := client.refreshMetadata(); err != nil { + Logger.Println("Client background metadata update:", err) + } + case <-client.closer: + return + } + } +} + +func (client *client) refreshMetadata() error { + var topics []string + + if !client.conf.Metadata.Full { + if specificTopics, err := client.MetadataTopics(); err != nil { + return err + } else if len(specificTopics) == 0 { + return ErrNoTopicsToUpdateMetadata + } else { + topics = specificTopics + } + } + + if err := client.RefreshMetadata(topics...); err != nil { + return err + } + + return nil +} + +func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int, deadline time.Time) error { + pastDeadline := func(backoff time.Duration) bool { + if !deadline.IsZero() && time.Now().Add(backoff).After(deadline) { + // we are past the deadline + return true + } + return false + } + retry := func(err error) error { + if attemptsRemaining > 0 { + backoff := client.computeBackoff(attemptsRemaining) + if pastDeadline(backoff) { + Logger.Println("client/metadata skipping last retries as we would go past the metadata timeout") + return err + } + Logger.Printf("client/metadata retrying after %dms... (%d attempts remaining)\n", backoff/time.Millisecond, attemptsRemaining) + if backoff > 0 { + time.Sleep(backoff) + } + return client.tryRefreshMetadata(topics, attemptsRemaining-1, deadline) + } + return err + } + + broker := client.any() + for ; broker != nil && !pastDeadline(0); broker = client.any() { + allowAutoTopicCreation := true + if len(topics) > 0 { + Logger.Printf("client/metadata fetching metadata for %v from broker %s\n", topics, broker.addr) + } else { + allowAutoTopicCreation = false + Logger.Printf("client/metadata fetching metadata for all topics from broker %s\n", broker.addr) + } + + req := &MetadataRequest{Topics: topics, AllowAutoTopicCreation: allowAutoTopicCreation} + if client.conf.Version.IsAtLeast(V1_0_0_0) { + req.Version = 5 + } else if client.conf.Version.IsAtLeast(V0_10_0_0) { + req.Version = 1 + } + response, err := broker.GetMetadata(req) + switch err.(type) { + case nil: + allKnownMetaData := len(topics) == 0 + // valid response, use it + shouldRetry, err := client.updateMetadata(response, allKnownMetaData) + if shouldRetry { + Logger.Println("client/metadata found some partitions to be leaderless") + return retry(err) // note: err can be nil + } + return err + + case PacketEncodingError: + // didn't even send, return the error + return err + + case KError: + // if SASL auth error return as this _should_ be a non retryable err for all brokers + if err.(KError) == ErrSASLAuthenticationFailed { + Logger.Println("client/metadata failed SASL authentication") + return err + } + + if err.(KError) == ErrTopicAuthorizationFailed { + Logger.Println("client is not authorized to access this topic. The topics were: ", topics) + return err + } + // else remove that broker and try again + Logger.Printf("client/metadata got error from broker %d while fetching metadata: %v\n", broker.ID(), err) + _ = broker.Close() + client.deregisterBroker(broker) + + default: + // some other error, remove that broker and try again + Logger.Printf("client/metadata got error from broker %d while fetching metadata: %v\n", broker.ID(), err) + _ = broker.Close() + client.deregisterBroker(broker) + } + } + + if broker != nil { + Logger.Printf("client/metadata not fetching metadata from broker %s as we would go past the metadata timeout\n", broker.addr) + return retry(ErrOutOfBrokers) + } + + Logger.Println("client/metadata no available broker to send metadata request to") + client.resurrectDeadBrokers() + return retry(ErrOutOfBrokers) +} + +// if no fatal error, returns a list of topics that need retrying due to ErrLeaderNotAvailable +func (client *client) updateMetadata(data *MetadataResponse, allKnownMetaData bool) (retry bool, err error) { + if client.Closed() { + return + } + + client.lock.Lock() + defer client.lock.Unlock() + + // For all the brokers we received: + // - if it is a new ID, save it + // - if it is an existing ID, but the address we have is stale, discard the old one and save it + // - if some brokers is not exist in it, remove old broker + // - otherwise ignore it, replacing our existing one would just bounce the connection + client.updateBroker(data.Brokers) + + client.controllerID = data.ControllerID + + if allKnownMetaData { + client.metadata = make(map[string]map[int32]*PartitionMetadata) + client.metadataTopics = make(map[string]none) + client.cachedPartitionsResults = make(map[string][maxPartitionIndex][]int32) + } + for _, topic := range data.Topics { + // topics must be added firstly to `metadataTopics` to guarantee that all + // requested topics must be recorded to keep them trackable for periodically + // metadata refresh. + if _, exists := client.metadataTopics[topic.Name]; !exists { + client.metadataTopics[topic.Name] = none{} + } + delete(client.metadata, topic.Name) + delete(client.cachedPartitionsResults, topic.Name) + + switch topic.Err { + case ErrNoError: + // no-op + case ErrInvalidTopic, ErrTopicAuthorizationFailed: // don't retry, don't store partial results + err = topic.Err + continue + case ErrUnknownTopicOrPartition: // retry, do not store partial partition results + err = topic.Err + retry = true + continue + case ErrLeaderNotAvailable: // retry, but store partial partition results + retry = true + default: // don't retry, don't store partial results + Logger.Printf("Unexpected topic-level metadata error: %s", topic.Err) + err = topic.Err + continue + } + + client.metadata[topic.Name] = make(map[int32]*PartitionMetadata, len(topic.Partitions)) + for _, partition := range topic.Partitions { + client.metadata[topic.Name][partition.ID] = partition + if partition.Err == ErrLeaderNotAvailable { + retry = true + } + } + + var partitionCache [maxPartitionIndex][]int32 + partitionCache[allPartitions] = client.setPartitionCache(topic.Name, allPartitions) + partitionCache[writablePartitions] = client.setPartitionCache(topic.Name, writablePartitions) + client.cachedPartitionsResults[topic.Name] = partitionCache + } + + return +} + +func (client *client) cachedCoordinator(consumerGroup string) *Broker { + client.lock.RLock() + defer client.lock.RUnlock() + if coordinatorID, ok := client.coordinators[consumerGroup]; ok { + return client.brokers[coordinatorID] + } + return nil +} + +func (client *client) cachedController() *Broker { + client.lock.RLock() + defer client.lock.RUnlock() + + return client.brokers[client.controllerID] +} + +func (client *client) computeBackoff(attemptsRemaining int) time.Duration { + if client.conf.Metadata.Retry.BackoffFunc != nil { + maxRetries := client.conf.Metadata.Retry.Max + retries := maxRetries - attemptsRemaining + return client.conf.Metadata.Retry.BackoffFunc(retries, maxRetries) + } + return client.conf.Metadata.Retry.Backoff +} + +func (client *client) getConsumerMetadata(consumerGroup string, attemptsRemaining int) (*FindCoordinatorResponse, error) { + retry := func(err error) (*FindCoordinatorResponse, error) { + if attemptsRemaining > 0 { + backoff := client.computeBackoff(attemptsRemaining) + Logger.Printf("client/coordinator retrying after %dms... (%d attempts remaining)\n", backoff/time.Millisecond, attemptsRemaining) + time.Sleep(backoff) + return client.getConsumerMetadata(consumerGroup, attemptsRemaining-1) + } + return nil, err + } + + for broker := client.any(); broker != nil; broker = client.any() { + Logger.Printf("client/coordinator requesting coordinator for consumergroup %s from %s\n", consumerGroup, broker.Addr()) + + request := new(FindCoordinatorRequest) + request.CoordinatorKey = consumerGroup + request.CoordinatorType = CoordinatorGroup + + response, err := broker.FindCoordinator(request) + + if err != nil { + Logger.Printf("client/coordinator request to broker %s failed: %s\n", broker.Addr(), err) + + switch err.(type) { + case PacketEncodingError: + return nil, err + default: + _ = broker.Close() + client.deregisterBroker(broker) + continue + } + } + + switch response.Err { + case ErrNoError: + Logger.Printf("client/coordinator coordinator for consumergroup %s is #%d (%s)\n", consumerGroup, response.Coordinator.ID(), response.Coordinator.Addr()) + return response, nil + + case ErrConsumerCoordinatorNotAvailable: + Logger.Printf("client/coordinator coordinator for consumer group %s is not available\n", consumerGroup) + + // This is very ugly, but this scenario will only happen once per cluster. + // The __consumer_offsets topic only has to be created one time. + // The number of partitions not configurable, but partition 0 should always exist. + if _, err := client.Leader("__consumer_offsets", 0); err != nil { + Logger.Printf("client/coordinator the __consumer_offsets topic is not initialized completely yet. Waiting 2 seconds...\n") + time.Sleep(2 * time.Second) + } + + return retry(ErrConsumerCoordinatorNotAvailable) + case ErrGroupAuthorizationFailed: + Logger.Printf("client was not authorized to access group %s while attempting to find coordinator", consumerGroup) + return retry(ErrGroupAuthorizationFailed) + + default: + return nil, response.Err + } + } + + Logger.Println("client/coordinator no available broker to send consumer metadata request to") + client.resurrectDeadBrokers() + return retry(ErrOutOfBrokers) +} + +// nopCloserClient embeds an existing Client, but disables +// the Close method (yet all other methods pass +// through unchanged). This is for use in larger structs +// where it is undesirable to close the client that was +// passed in by the caller. +type nopCloserClient struct { + Client +} + +// Close intercepts and purposely does not call the underlying +// client's Close() method. +func (ncc *nopCloserClient) Close() error { + return nil +} diff --git a/vendor/github.com/Shopify/sarama/compress.go b/vendor/github.com/Shopify/sarama/compress.go new file mode 100644 index 00000000000..12cd7c3d510 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/compress.go @@ -0,0 +1,194 @@ +package sarama + +import ( + "bytes" + "compress/gzip" + "fmt" + "sync" + + snappy "github.com/eapache/go-xerial-snappy" + "github.com/pierrec/lz4" +) + +var ( + lz4WriterPool = sync.Pool{ + New: func() interface{} { + return lz4.NewWriter(nil) + }, + } + + gzipWriterPool = sync.Pool{ + New: func() interface{} { + return gzip.NewWriter(nil) + }, + } + gzipWriterPoolForCompressionLevel1 = sync.Pool{ + New: func() interface{} { + gz, err := gzip.NewWriterLevel(nil, 1) + if err != nil { + panic(err) + } + return gz + }, + } + gzipWriterPoolForCompressionLevel2 = sync.Pool{ + New: func() interface{} { + gz, err := gzip.NewWriterLevel(nil, 2) + if err != nil { + panic(err) + } + return gz + }, + } + gzipWriterPoolForCompressionLevel3 = sync.Pool{ + New: func() interface{} { + gz, err := gzip.NewWriterLevel(nil, 3) + if err != nil { + panic(err) + } + return gz + }, + } + gzipWriterPoolForCompressionLevel4 = sync.Pool{ + New: func() interface{} { + gz, err := gzip.NewWriterLevel(nil, 4) + if err != nil { + panic(err) + } + return gz + }, + } + gzipWriterPoolForCompressionLevel5 = sync.Pool{ + New: func() interface{} { + gz, err := gzip.NewWriterLevel(nil, 5) + if err != nil { + panic(err) + } + return gz + }, + } + gzipWriterPoolForCompressionLevel6 = sync.Pool{ + New: func() interface{} { + gz, err := gzip.NewWriterLevel(nil, 6) + if err != nil { + panic(err) + } + return gz + }, + } + gzipWriterPoolForCompressionLevel7 = sync.Pool{ + New: func() interface{} { + gz, err := gzip.NewWriterLevel(nil, 7) + if err != nil { + panic(err) + } + return gz + }, + } + gzipWriterPoolForCompressionLevel8 = sync.Pool{ + New: func() interface{} { + gz, err := gzip.NewWriterLevel(nil, 8) + if err != nil { + panic(err) + } + return gz + }, + } + gzipWriterPoolForCompressionLevel9 = sync.Pool{ + New: func() interface{} { + gz, err := gzip.NewWriterLevel(nil, 9) + if err != nil { + panic(err) + } + return gz + }, + } +) + +func compress(cc CompressionCodec, level int, data []byte) ([]byte, error) { + switch cc { + case CompressionNone: + return data, nil + case CompressionGZIP: + var ( + err error + buf bytes.Buffer + writer *gzip.Writer + ) + + switch level { + case CompressionLevelDefault: + writer = gzipWriterPool.Get().(*gzip.Writer) + defer gzipWriterPool.Put(writer) + writer.Reset(&buf) + case 1: + writer = gzipWriterPoolForCompressionLevel1.Get().(*gzip.Writer) + defer gzipWriterPoolForCompressionLevel1.Put(writer) + writer.Reset(&buf) + case 2: + writer = gzipWriterPoolForCompressionLevel2.Get().(*gzip.Writer) + defer gzipWriterPoolForCompressionLevel2.Put(writer) + writer.Reset(&buf) + case 3: + writer = gzipWriterPoolForCompressionLevel3.Get().(*gzip.Writer) + defer gzipWriterPoolForCompressionLevel3.Put(writer) + writer.Reset(&buf) + case 4: + writer = gzipWriterPoolForCompressionLevel4.Get().(*gzip.Writer) + defer gzipWriterPoolForCompressionLevel4.Put(writer) + writer.Reset(&buf) + case 5: + writer = gzipWriterPoolForCompressionLevel5.Get().(*gzip.Writer) + defer gzipWriterPoolForCompressionLevel5.Put(writer) + writer.Reset(&buf) + case 6: + writer = gzipWriterPoolForCompressionLevel6.Get().(*gzip.Writer) + defer gzipWriterPoolForCompressionLevel6.Put(writer) + writer.Reset(&buf) + case 7: + writer = gzipWriterPoolForCompressionLevel7.Get().(*gzip.Writer) + defer gzipWriterPoolForCompressionLevel7.Put(writer) + writer.Reset(&buf) + case 8: + writer = gzipWriterPoolForCompressionLevel8.Get().(*gzip.Writer) + defer gzipWriterPoolForCompressionLevel8.Put(writer) + writer.Reset(&buf) + case 9: + writer = gzipWriterPoolForCompressionLevel9.Get().(*gzip.Writer) + defer gzipWriterPoolForCompressionLevel9.Put(writer) + writer.Reset(&buf) + default: + writer, err = gzip.NewWriterLevel(&buf, level) + if err != nil { + return nil, err + } + } + if _, err := writer.Write(data); err != nil { + return nil, err + } + if err := writer.Close(); err != nil { + return nil, err + } + return buf.Bytes(), nil + case CompressionSnappy: + return snappy.Encode(data), nil + case CompressionLZ4: + writer := lz4WriterPool.Get().(*lz4.Writer) + defer lz4WriterPool.Put(writer) + + var buf bytes.Buffer + writer.Reset(&buf) + + if _, err := writer.Write(data); err != nil { + return nil, err + } + if err := writer.Close(); err != nil { + return nil, err + } + return buf.Bytes(), nil + case CompressionZSTD: + return zstdCompress(nil, data) + default: + return nil, PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", cc)} + } +} diff --git a/vendor/github.com/Shopify/sarama/config.go b/vendor/github.com/Shopify/sarama/config.go new file mode 100644 index 00000000000..43e739cad95 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/config.go @@ -0,0 +1,765 @@ +package sarama + +import ( + "compress/gzip" + "crypto/tls" + "fmt" + "io/ioutil" + "net" + "regexp" + "time" + + "github.com/rcrowley/go-metrics" + "golang.org/x/net/proxy" +) + +const defaultClientID = "sarama" + +var validID = regexp.MustCompile(`\A[A-Za-z0-9._-]+\z`) + +// Config is used to pass multiple configuration options to Sarama's constructors. +type Config struct { + // Admin is the namespace for ClusterAdmin properties used by the administrative Kafka client. + Admin struct { + Retry struct { + // The total number of times to retry sending (retriable) admin requests (default 5). + // Similar to the `retries` setting of the JVM AdminClientConfig. + Max int + // Backoff time between retries of a failed request (default 100ms) + Backoff time.Duration + } + // The maximum duration the administrative Kafka client will wait for ClusterAdmin operations, + // including topics, brokers, configurations and ACLs (defaults to 3 seconds). + Timeout time.Duration + } + + // Net is the namespace for network-level properties used by the Broker, and + // shared by the Client/Producer/Consumer. + Net struct { + // How many outstanding requests a connection is allowed to have before + // sending on it blocks (default 5). + MaxOpenRequests int + + // All three of the below configurations are similar to the + // `socket.timeout.ms` setting in JVM kafka. All of them default + // to 30 seconds. + DialTimeout time.Duration // How long to wait for the initial connection. + ReadTimeout time.Duration // How long to wait for a response. + WriteTimeout time.Duration // How long to wait for a transmit. + + TLS struct { + // Whether or not to use TLS when connecting to the broker + // (defaults to false). + Enable bool + // The TLS configuration to use for secure connections if + // enabled (defaults to nil). + Config *tls.Config + } + + // SASL based authentication with broker. While there are multiple SASL authentication methods + // the current implementation is limited to plaintext (SASL/PLAIN) authentication + SASL struct { + // Whether or not to use SASL authentication when connecting to the broker + // (defaults to false). + Enable bool + // SASLMechanism is the name of the enabled SASL mechanism. + // Possible values: OAUTHBEARER, PLAIN (defaults to PLAIN). + Mechanism SASLMechanism + // Version is the SASL Protocol Version to use + // Kafka > 1.x should use V1, except on Azure EventHub which use V0 + Version int16 + // Whether or not to send the Kafka SASL handshake first if enabled + // (defaults to true). You should only set this to false if you're using + // a non-Kafka SASL proxy. + Handshake bool + // AuthIdentity is an (optional) authorization identity (authzid) to + // use for SASL/PLAIN authentication (if different from User) when + // an authenticated user is permitted to act as the presented + // alternative user. See RFC4616 for details. + AuthIdentity string + // User is the authentication identity (authcid) to present for + // SASL/PLAIN or SASL/SCRAM authentication + User string + // Password for SASL/PLAIN authentication + Password string + // authz id used for SASL/SCRAM authentication + SCRAMAuthzID string + // SCRAMClientGeneratorFunc is a generator of a user provided implementation of a SCRAM + // client used to perform the SCRAM exchange with the server. + SCRAMClientGeneratorFunc func() SCRAMClient + // TokenProvider is a user-defined callback for generating + // access tokens for SASL/OAUTHBEARER auth. See the + // AccessTokenProvider interface docs for proper implementation + // guidelines. + TokenProvider AccessTokenProvider + + GSSAPI GSSAPIConfig + } + + // KeepAlive specifies the keep-alive period for an active network connection (defaults to 0). + // If zero or positive, keep-alives are enabled. + // If negative, keep-alives are disabled. + KeepAlive time.Duration + + // LocalAddr is the local address to use when dialing an + // address. The address must be of a compatible type for the + // network being dialed. + // If nil, a local address is automatically chosen. + LocalAddr net.Addr + + Proxy struct { + // Whether or not to use proxy when connecting to the broker + // (defaults to false). + Enable bool + // The proxy dialer to use enabled (defaults to nil). + Dialer proxy.Dialer + } + } + + // Metadata is the namespace for metadata management properties used by the + // Client, and shared by the Producer/Consumer. + Metadata struct { + Retry struct { + // The total number of times to retry a metadata request when the + // cluster is in the middle of a leader election (default 3). + Max int + // How long to wait for leader election to occur before retrying + // (default 250ms). Similar to the JVM's `retry.backoff.ms`. + Backoff time.Duration + // Called to compute backoff time dynamically. Useful for implementing + // more sophisticated backoff strategies. This takes precedence over + // `Backoff` if set. + BackoffFunc func(retries, maxRetries int) time.Duration + } + // How frequently to refresh the cluster metadata in the background. + // Defaults to 10 minutes. Set to 0 to disable. Similar to + // `topic.metadata.refresh.interval.ms` in the JVM version. + RefreshFrequency time.Duration + + // Whether to maintain a full set of metadata for all topics, or just + // the minimal set that has been necessary so far. The full set is simpler + // and usually more convenient, but can take up a substantial amount of + // memory if you have many topics and partitions. Defaults to true. + Full bool + + // How long to wait for a successful metadata response. + // Disabled by default which means a metadata request against an unreachable + // cluster (all brokers are unreachable or unresponsive) can take up to + // `Net.[Dial|Read]Timeout * BrokerCount * (Metadata.Retry.Max + 1) + Metadata.Retry.Backoff * Metadata.Retry.Max` + // to fail. + Timeout time.Duration + } + + // Producer is the namespace for configuration related to producing messages, + // used by the Producer. + Producer struct { + // The maximum permitted size of a message (defaults to 1000000). Should be + // set equal to or smaller than the broker's `message.max.bytes`. + MaxMessageBytes int + // The level of acknowledgement reliability needed from the broker (defaults + // to WaitForLocal). Equivalent to the `request.required.acks` setting of the + // JVM producer. + RequiredAcks RequiredAcks + // The maximum duration the broker will wait the receipt of the number of + // RequiredAcks (defaults to 10 seconds). This is only relevant when + // RequiredAcks is set to WaitForAll or a number > 1. Only supports + // millisecond resolution, nanoseconds will be truncated. Equivalent to + // the JVM producer's `request.timeout.ms` setting. + Timeout time.Duration + // The type of compression to use on messages (defaults to no compression). + // Similar to `compression.codec` setting of the JVM producer. + Compression CompressionCodec + // The level of compression to use on messages. The meaning depends + // on the actual compression type used and defaults to default compression + // level for the codec. + CompressionLevel int + // Generates partitioners for choosing the partition to send messages to + // (defaults to hashing the message key). Similar to the `partitioner.class` + // setting for the JVM producer. + Partitioner PartitionerConstructor + // If enabled, the producer will ensure that exactly one copy of each message is + // written. + Idempotent bool + + // Return specifies what channels will be populated. If they are set to true, + // you must read from the respective channels to prevent deadlock. If, + // however, this config is used to create a `SyncProducer`, both must be set + // to true and you shall not read from the channels since the producer does + // this internally. + Return struct { + // If enabled, successfully delivered messages will be returned on the + // Successes channel (default disabled). + Successes bool + + // If enabled, messages that failed to deliver will be returned on the + // Errors channel, including error (default enabled). + Errors bool + } + + // The following config options control how often messages are batched up and + // sent to the broker. By default, messages are sent as fast as possible, and + // all messages received while the current batch is in-flight are placed + // into the subsequent batch. + Flush struct { + // The best-effort number of bytes needed to trigger a flush. Use the + // global sarama.MaxRequestSize to set a hard upper limit. + Bytes int + // The best-effort number of messages needed to trigger a flush. Use + // `MaxMessages` to set a hard upper limit. + Messages int + // The best-effort frequency of flushes. Equivalent to + // `queue.buffering.max.ms` setting of JVM producer. + Frequency time.Duration + // The maximum number of messages the producer will send in a single + // broker request. Defaults to 0 for unlimited. Similar to + // `queue.buffering.max.messages` in the JVM producer. + MaxMessages int + } + + Retry struct { + // The total number of times to retry sending a message (default 3). + // Similar to the `message.send.max.retries` setting of the JVM producer. + Max int + // How long to wait for the cluster to settle between retries + // (default 100ms). Similar to the `retry.backoff.ms` setting of the + // JVM producer. + Backoff time.Duration + // Called to compute backoff time dynamically. Useful for implementing + // more sophisticated backoff strategies. This takes precedence over + // `Backoff` if set. + BackoffFunc func(retries, maxRetries int) time.Duration + } + + // Interceptors to be called when the producer dispatcher reads the + // message for the first time. Interceptors allows to intercept and + // possible mutate the message before they are published to Kafka + // cluster. *ProducerMessage modified by the first interceptor's + // OnSend() is passed to the second interceptor OnSend(), and so on in + // the interceptor chain. + Interceptors []ProducerInterceptor + } + + // Consumer is the namespace for configuration related to consuming messages, + // used by the Consumer. + Consumer struct { + + // Group is the namespace for configuring consumer group. + Group struct { + Session struct { + // The timeout used to detect consumer failures when using Kafka's group management facility. + // The consumer sends periodic heartbeats to indicate its liveness to the broker. + // If no heartbeats are received by the broker before the expiration of this session timeout, + // then the broker will remove this consumer from the group and initiate a rebalance. + // Note that the value must be in the allowable range as configured in the broker configuration + // by `group.min.session.timeout.ms` and `group.max.session.timeout.ms` (default 10s) + Timeout time.Duration + } + Heartbeat struct { + // The expected time between heartbeats to the consumer coordinator when using Kafka's group + // management facilities. Heartbeats are used to ensure that the consumer's session stays active and + // to facilitate rebalancing when new consumers join or leave the group. + // The value must be set lower than Consumer.Group.Session.Timeout, but typically should be set no + // higher than 1/3 of that value. + // It can be adjusted even lower to control the expected time for normal rebalances (default 3s) + Interval time.Duration + } + Rebalance struct { + // Strategy for allocating topic partitions to members (default BalanceStrategyRange) + Strategy BalanceStrategy + // The maximum allowed time for each worker to join the group once a rebalance has begun. + // This is basically a limit on the amount of time needed for all tasks to flush any pending + // data and commit offsets. If the timeout is exceeded, then the worker will be removed from + // the group, which will cause offset commit failures (default 60s). + Timeout time.Duration + + Retry struct { + // When a new consumer joins a consumer group the set of consumers attempt to "rebalance" + // the load to assign partitions to each consumer. If the set of consumers changes while + // this assignment is taking place the rebalance will fail and retry. This setting controls + // the maximum number of attempts before giving up (default 4). + Max int + // Backoff time between retries during rebalance (default 2s) + Backoff time.Duration + } + } + Member struct { + // Custom metadata to include when joining the group. The user data for all joined members + // can be retrieved by sending a DescribeGroupRequest to the broker that is the + // coordinator for the group. + UserData []byte + } + } + + Retry struct { + // How long to wait after a failing to read from a partition before + // trying again (default 2s). + Backoff time.Duration + // Called to compute backoff time dynamically. Useful for implementing + // more sophisticated backoff strategies. This takes precedence over + // `Backoff` if set. + BackoffFunc func(retries int) time.Duration + } + + // Fetch is the namespace for controlling how many bytes are retrieved by any + // given request. + Fetch struct { + // The minimum number of message bytes to fetch in a request - the broker + // will wait until at least this many are available. The default is 1, + // as 0 causes the consumer to spin when no messages are available. + // Equivalent to the JVM's `fetch.min.bytes`. + Min int32 + // The default number of message bytes to fetch from the broker in each + // request (default 1MB). This should be larger than the majority of + // your messages, or else the consumer will spend a lot of time + // negotiating sizes and not actually consuming. Similar to the JVM's + // `fetch.message.max.bytes`. + Default int32 + // The maximum number of message bytes to fetch from the broker in a + // single request. Messages larger than this will return + // ErrMessageTooLarge and will not be consumable, so you must be sure + // this is at least as large as your largest message. Defaults to 0 + // (no limit). Similar to the JVM's `fetch.message.max.bytes`. The + // global `sarama.MaxResponseSize` still applies. + Max int32 + } + // The maximum amount of time the broker will wait for Consumer.Fetch.Min + // bytes to become available before it returns fewer than that anyways. The + // default is 250ms, since 0 causes the consumer to spin when no events are + // available. 100-500ms is a reasonable range for most cases. Kafka only + // supports precision up to milliseconds; nanoseconds will be truncated. + // Equivalent to the JVM's `fetch.wait.max.ms`. + MaxWaitTime time.Duration + + // The maximum amount of time the consumer expects a message takes to + // process for the user. If writing to the Messages channel takes longer + // than this, that partition will stop fetching more messages until it + // can proceed again. + // Note that, since the Messages channel is buffered, the actual grace time is + // (MaxProcessingTime * ChannelBufferSize). Defaults to 100ms. + // If a message is not written to the Messages channel between two ticks + // of the expiryTicker then a timeout is detected. + // Using a ticker instead of a timer to detect timeouts should typically + // result in many fewer calls to Timer functions which may result in a + // significant performance improvement if many messages are being sent + // and timeouts are infrequent. + // The disadvantage of using a ticker instead of a timer is that + // timeouts will be less accurate. That is, the effective timeout could + // be between `MaxProcessingTime` and `2 * MaxProcessingTime`. For + // example, if `MaxProcessingTime` is 100ms then a delay of 180ms + // between two messages being sent may not be recognized as a timeout. + MaxProcessingTime time.Duration + + // Return specifies what channels will be populated. If they are set to true, + // you must read from them to prevent deadlock. + Return struct { + // If enabled, any errors that occurred while consuming are returned on + // the Errors channel (default disabled). + Errors bool + } + + // Offsets specifies configuration for how and when to commit consumed + // offsets. This currently requires the manual use of an OffsetManager + // but will eventually be automated. + Offsets struct { + // Deprecated: CommitInterval exists for historical compatibility + // and should not be used. Please use Consumer.Offsets.AutoCommit + CommitInterval time.Duration + + // AutoCommit specifies configuration for commit messages automatically. + AutoCommit struct { + // Whether or not to auto-commit updated offsets back to the broker. + // (default enabled). + Enable bool + + // How frequently to commit updated offsets. Ineffective unless + // auto-commit is enabled (default 1s) + Interval time.Duration + } + + // The initial offset to use if no offset was previously committed. + // Should be OffsetNewest or OffsetOldest. Defaults to OffsetNewest. + Initial int64 + + // The retention duration for committed offsets. If zero, disabled + // (in which case the `offsets.retention.minutes` option on the + // broker will be used). Kafka only supports precision up to + // milliseconds; nanoseconds will be truncated. Requires Kafka + // broker version 0.9.0 or later. + // (default is 0: disabled). + Retention time.Duration + + Retry struct { + // The total number of times to retry failing commit + // requests during OffsetManager shutdown (default 3). + Max int + } + } + + // IsolationLevel support 2 mode: + // - use `ReadUncommitted` (default) to consume and return all messages in message channel + // - use `ReadCommitted` to hide messages that are part of an aborted transaction + IsolationLevel IsolationLevel + + // Interceptors to be called just before the record is sent to the + // messages channel. Interceptors allows to intercept and possible + // mutate the message before they are returned to the client. + // *ConsumerMessage modified by the first interceptor's OnConsume() is + // passed to the second interceptor OnConsume(), and so on in the + // interceptor chain. + Interceptors []ConsumerInterceptor + } + + // A user-provided string sent with every request to the brokers for logging, + // debugging, and auditing purposes. Defaults to "sarama", but you should + // probably set it to something specific to your application. + ClientID string + // A rack identifier for this client. This can be any string value which + // indicates where this client is physically located. + // It corresponds with the broker config 'broker.rack' + RackID string + // The number of events to buffer in internal and external channels. This + // permits the producer and consumer to continue processing some messages + // in the background while user code is working, greatly improving throughput. + // Defaults to 256. + ChannelBufferSize int + // The version of Kafka that Sarama will assume it is running against. + // Defaults to the oldest supported stable version. Since Kafka provides + // backwards-compatibility, setting it to a version older than you have + // will not break anything, although it may prevent you from using the + // latest features. Setting it to a version greater than you are actually + // running may lead to random breakage. + Version KafkaVersion + // The registry to define metrics into. + // Defaults to a local registry. + // If you want to disable metrics gathering, set "metrics.UseNilMetrics" to "true" + // prior to starting Sarama. + // See Examples on how to use the metrics registry + MetricRegistry metrics.Registry +} + +// NewConfig returns a new configuration instance with sane defaults. +func NewConfig() *Config { + c := &Config{} + + c.Admin.Retry.Max = 5 + c.Admin.Retry.Backoff = 100 * time.Millisecond + c.Admin.Timeout = 3 * time.Second + + c.Net.MaxOpenRequests = 5 + c.Net.DialTimeout = 30 * time.Second + c.Net.ReadTimeout = 30 * time.Second + c.Net.WriteTimeout = 30 * time.Second + c.Net.SASL.Handshake = true + c.Net.SASL.Version = SASLHandshakeV0 + + c.Metadata.Retry.Max = 3 + c.Metadata.Retry.Backoff = 250 * time.Millisecond + c.Metadata.RefreshFrequency = 10 * time.Minute + c.Metadata.Full = true + + c.Producer.MaxMessageBytes = 1000000 + c.Producer.RequiredAcks = WaitForLocal + c.Producer.Timeout = 10 * time.Second + c.Producer.Partitioner = NewHashPartitioner + c.Producer.Retry.Max = 3 + c.Producer.Retry.Backoff = 100 * time.Millisecond + c.Producer.Return.Errors = true + c.Producer.CompressionLevel = CompressionLevelDefault + + c.Consumer.Fetch.Min = 1 + c.Consumer.Fetch.Default = 1024 * 1024 + c.Consumer.Retry.Backoff = 2 * time.Second + c.Consumer.MaxWaitTime = 250 * time.Millisecond + c.Consumer.MaxProcessingTime = 100 * time.Millisecond + c.Consumer.Return.Errors = false + c.Consumer.Offsets.AutoCommit.Enable = true + c.Consumer.Offsets.AutoCommit.Interval = 1 * time.Second + c.Consumer.Offsets.Initial = OffsetNewest + c.Consumer.Offsets.Retry.Max = 3 + + c.Consumer.Group.Session.Timeout = 10 * time.Second + c.Consumer.Group.Heartbeat.Interval = 3 * time.Second + c.Consumer.Group.Rebalance.Strategy = BalanceStrategyRange + c.Consumer.Group.Rebalance.Timeout = 60 * time.Second + c.Consumer.Group.Rebalance.Retry.Max = 4 + c.Consumer.Group.Rebalance.Retry.Backoff = 2 * time.Second + + c.ClientID = defaultClientID + c.ChannelBufferSize = 256 + c.Version = DefaultVersion + c.MetricRegistry = metrics.NewRegistry() + + return c +} + +// Validate checks a Config instance. It will return a +// ConfigurationError if the specified values don't make sense. +func (c *Config) Validate() error { + // some configuration values should be warned on but not fail completely, do those first + if !c.Net.TLS.Enable && c.Net.TLS.Config != nil { + Logger.Println("Net.TLS is disabled but a non-nil configuration was provided.") + } + if !c.Net.SASL.Enable { + if c.Net.SASL.User != "" { + Logger.Println("Net.SASL is disabled but a non-empty username was provided.") + } + if c.Net.SASL.Password != "" { + Logger.Println("Net.SASL is disabled but a non-empty password was provided.") + } + } + if c.Producer.RequiredAcks > 1 { + Logger.Println("Producer.RequiredAcks > 1 is deprecated and will raise an exception with kafka >= 0.8.2.0.") + } + if c.Producer.MaxMessageBytes >= int(MaxRequestSize) { + Logger.Println("Producer.MaxMessageBytes must be smaller than MaxRequestSize; it will be ignored.") + } + if c.Producer.Flush.Bytes >= int(MaxRequestSize) { + Logger.Println("Producer.Flush.Bytes must be smaller than MaxRequestSize; it will be ignored.") + } + if (c.Producer.Flush.Bytes > 0 || c.Producer.Flush.Messages > 0) && c.Producer.Flush.Frequency == 0 { + Logger.Println("Producer.Flush: Bytes or Messages are set, but Frequency is not; messages may not get flushed.") + } + if c.Producer.Timeout%time.Millisecond != 0 { + Logger.Println("Producer.Timeout only supports millisecond resolution; nanoseconds will be truncated.") + } + if c.Consumer.MaxWaitTime < 100*time.Millisecond { + Logger.Println("Consumer.MaxWaitTime is very low, which can cause high CPU and network usage. See documentation for details.") + } + if c.Consumer.MaxWaitTime%time.Millisecond != 0 { + Logger.Println("Consumer.MaxWaitTime only supports millisecond precision; nanoseconds will be truncated.") + } + if c.Consumer.Offsets.Retention%time.Millisecond != 0 { + Logger.Println("Consumer.Offsets.Retention only supports millisecond precision; nanoseconds will be truncated.") + } + if c.Consumer.Group.Session.Timeout%time.Millisecond != 0 { + Logger.Println("Consumer.Group.Session.Timeout only supports millisecond precision; nanoseconds will be truncated.") + } + if c.Consumer.Group.Heartbeat.Interval%time.Millisecond != 0 { + Logger.Println("Consumer.Group.Heartbeat.Interval only supports millisecond precision; nanoseconds will be truncated.") + } + if c.Consumer.Group.Rebalance.Timeout%time.Millisecond != 0 { + Logger.Println("Consumer.Group.Rebalance.Timeout only supports millisecond precision; nanoseconds will be truncated.") + } + if c.ClientID == defaultClientID { + Logger.Println("ClientID is the default of 'sarama', you should consider setting it to something application-specific.") + } + + // validate Net values + switch { + case c.Net.MaxOpenRequests <= 0: + return ConfigurationError("Net.MaxOpenRequests must be > 0") + case c.Net.DialTimeout <= 0: + return ConfigurationError("Net.DialTimeout must be > 0") + case c.Net.ReadTimeout <= 0: + return ConfigurationError("Net.ReadTimeout must be > 0") + case c.Net.WriteTimeout <= 0: + return ConfigurationError("Net.WriteTimeout must be > 0") + case c.Net.SASL.Enable: + if c.Net.SASL.Mechanism == "" { + c.Net.SASL.Mechanism = SASLTypePlaintext + } + + switch c.Net.SASL.Mechanism { + case SASLTypePlaintext: + if c.Net.SASL.User == "" { + return ConfigurationError("Net.SASL.User must not be empty when SASL is enabled") + } + if c.Net.SASL.Password == "" { + return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled") + } + case SASLTypeOAuth: + if c.Net.SASL.TokenProvider == nil { + return ConfigurationError("An AccessTokenProvider instance must be provided to Net.SASL.TokenProvider") + } + case SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512: + if c.Net.SASL.User == "" { + return ConfigurationError("Net.SASL.User must not be empty when SASL is enabled") + } + if c.Net.SASL.Password == "" { + return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled") + } + if c.Net.SASL.SCRAMClientGeneratorFunc == nil { + return ConfigurationError("A SCRAMClientGeneratorFunc function must be provided to Net.SASL.SCRAMClientGeneratorFunc") + } + case SASLTypeGSSAPI: + if c.Net.SASL.GSSAPI.ServiceName == "" { + return ConfigurationError("Net.SASL.GSSAPI.ServiceName must not be empty when GSS-API mechanism is used") + } + + if c.Net.SASL.GSSAPI.AuthType == KRB5_USER_AUTH { + if c.Net.SASL.GSSAPI.Password == "" { + return ConfigurationError("Net.SASL.GSSAPI.Password must not be empty when GSS-API " + + "mechanism is used and Net.SASL.GSSAPI.AuthType = KRB5_USER_AUTH") + } + } else if c.Net.SASL.GSSAPI.AuthType == KRB5_KEYTAB_AUTH { + if c.Net.SASL.GSSAPI.KeyTabPath == "" { + return ConfigurationError("Net.SASL.GSSAPI.KeyTabPath must not be empty when GSS-API mechanism is used" + + " and Net.SASL.GSSAPI.AuthType = KRB5_KEYTAB_AUTH") + } + } else { + return ConfigurationError("Net.SASL.GSSAPI.AuthType is invalid. Possible values are KRB5_USER_AUTH and KRB5_KEYTAB_AUTH") + } + if c.Net.SASL.GSSAPI.KerberosConfigPath == "" { + return ConfigurationError("Net.SASL.GSSAPI.KerberosConfigPath must not be empty when GSS-API mechanism is used") + } + if c.Net.SASL.GSSAPI.Username == "" { + return ConfigurationError("Net.SASL.GSSAPI.Username must not be empty when GSS-API mechanism is used") + } + if c.Net.SASL.GSSAPI.Realm == "" { + return ConfigurationError("Net.SASL.GSSAPI.Realm must not be empty when GSS-API mechanism is used") + } + default: + msg := fmt.Sprintf("The SASL mechanism configuration is invalid. Possible values are `%s`, `%s`, `%s`, `%s` and `%s`", + SASLTypeOAuth, SASLTypePlaintext, SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512, SASLTypeGSSAPI) + return ConfigurationError(msg) + } + } + + // validate the Admin values + switch { + case c.Admin.Timeout <= 0: + return ConfigurationError("Admin.Timeout must be > 0") + } + + // validate the Metadata values + switch { + case c.Metadata.Retry.Max < 0: + return ConfigurationError("Metadata.Retry.Max must be >= 0") + case c.Metadata.Retry.Backoff < 0: + return ConfigurationError("Metadata.Retry.Backoff must be >= 0") + case c.Metadata.RefreshFrequency < 0: + return ConfigurationError("Metadata.RefreshFrequency must be >= 0") + } + + // validate the Producer values + switch { + case c.Producer.MaxMessageBytes <= 0: + return ConfigurationError("Producer.MaxMessageBytes must be > 0") + case c.Producer.RequiredAcks < -1: + return ConfigurationError("Producer.RequiredAcks must be >= -1") + case c.Producer.Timeout <= 0: + return ConfigurationError("Producer.Timeout must be > 0") + case c.Producer.Partitioner == nil: + return ConfigurationError("Producer.Partitioner must not be nil") + case c.Producer.Flush.Bytes < 0: + return ConfigurationError("Producer.Flush.Bytes must be >= 0") + case c.Producer.Flush.Messages < 0: + return ConfigurationError("Producer.Flush.Messages must be >= 0") + case c.Producer.Flush.Frequency < 0: + return ConfigurationError("Producer.Flush.Frequency must be >= 0") + case c.Producer.Flush.MaxMessages < 0: + return ConfigurationError("Producer.Flush.MaxMessages must be >= 0") + case c.Producer.Flush.MaxMessages > 0 && c.Producer.Flush.MaxMessages < c.Producer.Flush.Messages: + return ConfigurationError("Producer.Flush.MaxMessages must be >= Producer.Flush.Messages when set") + case c.Producer.Retry.Max < 0: + return ConfigurationError("Producer.Retry.Max must be >= 0") + case c.Producer.Retry.Backoff < 0: + return ConfigurationError("Producer.Retry.Backoff must be >= 0") + } + + if c.Producer.Compression == CompressionLZ4 && !c.Version.IsAtLeast(V0_10_0_0) { + return ConfigurationError("lz4 compression requires Version >= V0_10_0_0") + } + + if c.Producer.Compression == CompressionGZIP { + if c.Producer.CompressionLevel != CompressionLevelDefault { + if _, err := gzip.NewWriterLevel(ioutil.Discard, c.Producer.CompressionLevel); err != nil { + return ConfigurationError(fmt.Sprintf("gzip compression does not work with level %d: %v", c.Producer.CompressionLevel, err)) + } + } + } + + if c.Producer.Compression == CompressionZSTD && !c.Version.IsAtLeast(V2_1_0_0) { + return ConfigurationError("zstd compression requires Version >= V2_1_0_0") + } + + if c.Producer.Idempotent { + if !c.Version.IsAtLeast(V0_11_0_0) { + return ConfigurationError("Idempotent producer requires Version >= V0_11_0_0") + } + if c.Producer.Retry.Max == 0 { + return ConfigurationError("Idempotent producer requires Producer.Retry.Max >= 1") + } + if c.Producer.RequiredAcks != WaitForAll { + return ConfigurationError("Idempotent producer requires Producer.RequiredAcks to be WaitForAll") + } + if c.Net.MaxOpenRequests > 1 { + return ConfigurationError("Idempotent producer requires Net.MaxOpenRequests to be 1") + } + } + + // validate the Consumer values + switch { + case c.Consumer.Fetch.Min <= 0: + return ConfigurationError("Consumer.Fetch.Min must be > 0") + case c.Consumer.Fetch.Default <= 0: + return ConfigurationError("Consumer.Fetch.Default must be > 0") + case c.Consumer.Fetch.Max < 0: + return ConfigurationError("Consumer.Fetch.Max must be >= 0") + case c.Consumer.MaxWaitTime < 1*time.Millisecond: + return ConfigurationError("Consumer.MaxWaitTime must be >= 1ms") + case c.Consumer.MaxProcessingTime <= 0: + return ConfigurationError("Consumer.MaxProcessingTime must be > 0") + case c.Consumer.Retry.Backoff < 0: + return ConfigurationError("Consumer.Retry.Backoff must be >= 0") + case c.Consumer.Offsets.AutoCommit.Interval <= 0: + return ConfigurationError("Consumer.Offsets.AutoCommit.Interval must be > 0") + case c.Consumer.Offsets.Initial != OffsetOldest && c.Consumer.Offsets.Initial != OffsetNewest: + return ConfigurationError("Consumer.Offsets.Initial must be OffsetOldest or OffsetNewest") + case c.Consumer.Offsets.Retry.Max < 0: + return ConfigurationError("Consumer.Offsets.Retry.Max must be >= 0") + case c.Consumer.IsolationLevel != ReadUncommitted && c.Consumer.IsolationLevel != ReadCommitted: + return ConfigurationError("Consumer.IsolationLevel must be ReadUncommitted or ReadCommitted") + } + + if c.Consumer.Offsets.CommitInterval != 0 { + Logger.Println("Deprecation warning: Consumer.Offsets.CommitInterval exists for historical compatibility" + + " and should not be used. Please use Consumer.Offsets.AutoCommit, the current value will be ignored") + } + + // validate IsolationLevel + if c.Consumer.IsolationLevel == ReadCommitted && !c.Version.IsAtLeast(V0_11_0_0) { + return ConfigurationError("ReadCommitted requires Version >= V0_11_0_0") + } + + // validate the Consumer Group values + switch { + case c.Consumer.Group.Session.Timeout <= 2*time.Millisecond: + return ConfigurationError("Consumer.Group.Session.Timeout must be >= 2ms") + case c.Consumer.Group.Heartbeat.Interval < 1*time.Millisecond: + return ConfigurationError("Consumer.Group.Heartbeat.Interval must be >= 1ms") + case c.Consumer.Group.Heartbeat.Interval >= c.Consumer.Group.Session.Timeout: + return ConfigurationError("Consumer.Group.Heartbeat.Interval must be < Consumer.Group.Session.Timeout") + case c.Consumer.Group.Rebalance.Strategy == nil: + return ConfigurationError("Consumer.Group.Rebalance.Strategy must not be empty") + case c.Consumer.Group.Rebalance.Timeout <= time.Millisecond: + return ConfigurationError("Consumer.Group.Rebalance.Timeout must be >= 1ms") + case c.Consumer.Group.Rebalance.Retry.Max < 0: + return ConfigurationError("Consumer.Group.Rebalance.Retry.Max must be >= 0") + case c.Consumer.Group.Rebalance.Retry.Backoff < 0: + return ConfigurationError("Consumer.Group.Rebalance.Retry.Backoff must be >= 0") + } + + // validate misc shared values + switch { + case c.ChannelBufferSize < 0: + return ConfigurationError("ChannelBufferSize must be >= 0") + case !validID.MatchString(c.ClientID): + return ConfigurationError("ClientID is invalid") + } + + return nil +} + +func (c *Config) getDialer() proxy.Dialer { + if c.Net.Proxy.Enable { + Logger.Printf("using proxy %s", c.Net.Proxy.Dialer) + return c.Net.Proxy.Dialer + } else { + return &net.Dialer{ + Timeout: c.Net.DialTimeout, + KeepAlive: c.Net.KeepAlive, + LocalAddr: c.Net.LocalAddr, + } + } +} diff --git a/vendor/github.com/Shopify/sarama/config_resource_type.go b/vendor/github.com/Shopify/sarama/config_resource_type.go new file mode 100644 index 00000000000..bef1053aaed --- /dev/null +++ b/vendor/github.com/Shopify/sarama/config_resource_type.go @@ -0,0 +1,18 @@ +package sarama + +// ConfigResourceType is a type for resources that have configs. +type ConfigResourceType int8 + +// Taken from: +// https://github.com/apache/kafka/blob/ed7c071e07f1f90e4c2895582f61ca090ced3c42/clients/src/main/java/org/apache/kafka/common/config/ConfigResource.java#L32-L55 + +const ( + // UnknownResource constant type + UnknownResource ConfigResourceType = 0 + // TopicResource constant type + TopicResource ConfigResourceType = 2 + // BrokerResource constant type + BrokerResource ConfigResourceType = 4 + // BrokerLoggerResource constant type + BrokerLoggerResource ConfigResourceType = 8 +) diff --git a/vendor/github.com/Shopify/sarama/consumer.go b/vendor/github.com/Shopify/sarama/consumer.go new file mode 100644 index 00000000000..fbdbff23ba5 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/consumer.go @@ -0,0 +1,918 @@ +package sarama + +import ( + "errors" + "fmt" + "math" + "sync" + "sync/atomic" + "time" + + "github.com/rcrowley/go-metrics" +) + +// ConsumerMessage encapsulates a Kafka message returned by the consumer. +type ConsumerMessage struct { + Headers []*RecordHeader // only set if kafka is version 0.11+ + Timestamp time.Time // only set if kafka is version 0.10+, inner message timestamp + BlockTimestamp time.Time // only set if kafka is version 0.10+, outer (compressed) block timestamp + + Key, Value []byte + Topic string + Partition int32 + Offset int64 +} + +// ConsumerError is what is provided to the user when an error occurs. +// It wraps an error and includes the topic and partition. +type ConsumerError struct { + Topic string + Partition int32 + Err error +} + +func (ce ConsumerError) Error() string { + return fmt.Sprintf("kafka: error while consuming %s/%d: %s", ce.Topic, ce.Partition, ce.Err) +} + +func (ce ConsumerError) Unwrap() error { + return ce.Err +} + +// ConsumerErrors is a type that wraps a batch of errors and implements the Error interface. +// It can be returned from the PartitionConsumer's Close methods to avoid the need to manually drain errors +// when stopping. +type ConsumerErrors []*ConsumerError + +func (ce ConsumerErrors) Error() string { + return fmt.Sprintf("kafka: %d errors while consuming", len(ce)) +} + +// Consumer manages PartitionConsumers which process Kafka messages from brokers. You MUST call Close() +// on a consumer to avoid leaks, it will not be garbage-collected automatically when it passes out of +// scope. +type Consumer interface { + // Topics returns the set of available topics as retrieved from the cluster + // metadata. This method is the same as Client.Topics(), and is provided for + // convenience. + Topics() ([]string, error) + + // Partitions returns the sorted list of all partition IDs for the given topic. + // This method is the same as Client.Partitions(), and is provided for convenience. + Partitions(topic string) ([]int32, error) + + // ConsumePartition creates a PartitionConsumer on the given topic/partition with + // the given offset. It will return an error if this Consumer is already consuming + // on the given topic/partition. Offset can be a literal offset, or OffsetNewest + // or OffsetOldest + ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error) + + // HighWaterMarks returns the current high water marks for each topic and partition. + // Consistency between partitions is not guaranteed since high water marks are updated separately. + HighWaterMarks() map[string]map[int32]int64 + + // Close shuts down the consumer. It must be called after all child + // PartitionConsumers have already been closed. + Close() error +} + +type consumer struct { + conf *Config + children map[string]map[int32]*partitionConsumer + brokerConsumers map[*Broker]*brokerConsumer + client Client + lock sync.Mutex +} + +// NewConsumer creates a new consumer using the given broker addresses and configuration. +func NewConsumer(addrs []string, config *Config) (Consumer, error) { + client, err := NewClient(addrs, config) + if err != nil { + return nil, err + } + return newConsumer(client) +} + +// NewConsumerFromClient creates a new consumer using the given client. It is still +// necessary to call Close() on the underlying client when shutting down this consumer. +func NewConsumerFromClient(client Client) (Consumer, error) { + // For clients passed in by the client, ensure we don't + // call Close() on it. + cli := &nopCloserClient{client} + return newConsumer(cli) +} + +func newConsumer(client Client) (Consumer, error) { + // Check that we are not dealing with a closed Client before processing any other arguments + if client.Closed() { + return nil, ErrClosedClient + } + + c := &consumer{ + client: client, + conf: client.Config(), + children: make(map[string]map[int32]*partitionConsumer), + brokerConsumers: make(map[*Broker]*brokerConsumer), + } + + return c, nil +} + +func (c *consumer) Close() error { + return c.client.Close() +} + +func (c *consumer) Topics() ([]string, error) { + return c.client.Topics() +} + +func (c *consumer) Partitions(topic string) ([]int32, error) { + return c.client.Partitions(topic) +} + +func (c *consumer) ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error) { + child := &partitionConsumer{ + consumer: c, + conf: c.conf, + topic: topic, + partition: partition, + messages: make(chan *ConsumerMessage, c.conf.ChannelBufferSize), + errors: make(chan *ConsumerError, c.conf.ChannelBufferSize), + feeder: make(chan *FetchResponse, 1), + trigger: make(chan none, 1), + dying: make(chan none), + fetchSize: c.conf.Consumer.Fetch.Default, + } + + if err := child.chooseStartingOffset(offset); err != nil { + return nil, err + } + + var leader *Broker + var err error + if leader, err = c.client.Leader(child.topic, child.partition); err != nil { + return nil, err + } + + if err := c.addChild(child); err != nil { + return nil, err + } + + go withRecover(child.dispatcher) + go withRecover(child.responseFeeder) + + child.broker = c.refBrokerConsumer(leader) + child.broker.input <- child + + return child, nil +} + +func (c *consumer) HighWaterMarks() map[string]map[int32]int64 { + c.lock.Lock() + defer c.lock.Unlock() + + hwms := make(map[string]map[int32]int64) + for topic, p := range c.children { + hwm := make(map[int32]int64, len(p)) + for partition, pc := range p { + hwm[partition] = pc.HighWaterMarkOffset() + } + hwms[topic] = hwm + } + + return hwms +} + +func (c *consumer) addChild(child *partitionConsumer) error { + c.lock.Lock() + defer c.lock.Unlock() + + topicChildren := c.children[child.topic] + if topicChildren == nil { + topicChildren = make(map[int32]*partitionConsumer) + c.children[child.topic] = topicChildren + } + + if topicChildren[child.partition] != nil { + return ConfigurationError("That topic/partition is already being consumed") + } + + topicChildren[child.partition] = child + return nil +} + +func (c *consumer) removeChild(child *partitionConsumer) { + c.lock.Lock() + defer c.lock.Unlock() + + delete(c.children[child.topic], child.partition) +} + +func (c *consumer) refBrokerConsumer(broker *Broker) *brokerConsumer { + c.lock.Lock() + defer c.lock.Unlock() + + bc := c.brokerConsumers[broker] + if bc == nil { + bc = c.newBrokerConsumer(broker) + c.brokerConsumers[broker] = bc + } + + bc.refs++ + + return bc +} + +func (c *consumer) unrefBrokerConsumer(brokerWorker *brokerConsumer) { + c.lock.Lock() + defer c.lock.Unlock() + + brokerWorker.refs-- + + if brokerWorker.refs == 0 { + close(brokerWorker.input) + if c.brokerConsumers[brokerWorker.broker] == brokerWorker { + delete(c.brokerConsumers, brokerWorker.broker) + } + } +} + +func (c *consumer) abandonBrokerConsumer(brokerWorker *brokerConsumer) { + c.lock.Lock() + defer c.lock.Unlock() + + delete(c.brokerConsumers, brokerWorker.broker) +} + +// PartitionConsumer + +// PartitionConsumer processes Kafka messages from a given topic and partition. You MUST call one of Close() or +// AsyncClose() on a PartitionConsumer to avoid leaks; it will not be garbage-collected automatically when it passes out +// of scope. +// +// The simplest way of using a PartitionConsumer is to loop over its Messages channel using a for/range +// loop. The PartitionConsumer will only stop itself in one case: when the offset being consumed is reported +// as out of range by the brokers. In this case you should decide what you want to do (try a different offset, +// notify a human, etc) and handle it appropriately. For all other error cases, it will just keep retrying. +// By default, it logs these errors to sarama.Logger; if you want to be notified directly of all errors, set +// your config's Consumer.Return.Errors to true and read from the Errors channel, using a select statement +// or a separate goroutine. Check out the Consumer examples to see implementations of these different approaches. +// +// To terminate such a for/range loop while the loop is executing, call AsyncClose. This will kick off the process of +// consumer tear-down & return immediately. Continue to loop, servicing the Messages channel until the teardown process +// AsyncClose initiated closes it (thus terminating the for/range loop). If you've already ceased reading Messages, call +// Close; this will signal the PartitionConsumer's goroutines to begin shutting down (just like AsyncClose), but will +// also drain the Messages channel, harvest all errors & return them once cleanup has completed. +type PartitionConsumer interface { + // AsyncClose initiates a shutdown of the PartitionConsumer. This method will return immediately, after which you + // should continue to service the 'Messages' and 'Errors' channels until they are empty. It is required to call this + // function, or Close before a consumer object passes out of scope, as it will otherwise leak memory. You must call + // this before calling Close on the underlying client. + AsyncClose() + + // Close stops the PartitionConsumer from fetching messages. It will initiate a shutdown just like AsyncClose, drain + // the Messages channel, harvest any errors & return them to the caller. Note that if you are continuing to service + // the Messages channel when this function is called, you will be competing with Close for messages; consider + // calling AsyncClose, instead. It is required to call this function (or AsyncClose) before a consumer object passes + // out of scope, as it will otherwise leak memory. You must call this before calling Close on the underlying client. + Close() error + + // Messages returns the read channel for the messages that are returned by + // the broker. + Messages() <-chan *ConsumerMessage + + // Errors returns a read channel of errors that occurred during consuming, if + // enabled. By default, errors are logged and not returned over this channel. + // If you want to implement any custom error handling, set your config's + // Consumer.Return.Errors setting to true, and read from this channel. + Errors() <-chan *ConsumerError + + // HighWaterMarkOffset returns the high water mark offset of the partition, + // i.e. the offset that will be used for the next message that will be produced. + // You can use this to determine how far behind the processing is. + HighWaterMarkOffset() int64 +} + +type partitionConsumer struct { + highWaterMarkOffset int64 // must be at the top of the struct because https://golang.org/pkg/sync/atomic/#pkg-note-BUG + + consumer *consumer + conf *Config + broker *brokerConsumer + messages chan *ConsumerMessage + errors chan *ConsumerError + feeder chan *FetchResponse + + trigger, dying chan none + closeOnce sync.Once + topic string + partition int32 + responseResult error + fetchSize int32 + offset int64 + retries int32 +} + +var errTimedOut = errors.New("timed out feeding messages to the user") // not user-facing + +func (child *partitionConsumer) sendError(err error) { + cErr := &ConsumerError{ + Topic: child.topic, + Partition: child.partition, + Err: err, + } + + if child.conf.Consumer.Return.Errors { + child.errors <- cErr + } else { + Logger.Println(cErr) + } +} + +func (child *partitionConsumer) computeBackoff() time.Duration { + if child.conf.Consumer.Retry.BackoffFunc != nil { + retries := atomic.AddInt32(&child.retries, 1) + return child.conf.Consumer.Retry.BackoffFunc(int(retries)) + } + return child.conf.Consumer.Retry.Backoff +} + +func (child *partitionConsumer) dispatcher() { + for range child.trigger { + select { + case <-child.dying: + close(child.trigger) + case <-time.After(child.computeBackoff()): + if child.broker != nil { + child.consumer.unrefBrokerConsumer(child.broker) + child.broker = nil + } + + Logger.Printf("consumer/%s/%d finding new broker\n", child.topic, child.partition) + if err := child.dispatch(); err != nil { + child.sendError(err) + child.trigger <- none{} + } + } + } + + if child.broker != nil { + child.consumer.unrefBrokerConsumer(child.broker) + } + child.consumer.removeChild(child) + close(child.feeder) +} + +func (child *partitionConsumer) dispatch() error { + if err := child.consumer.client.RefreshMetadata(child.topic); err != nil { + return err + } + + var leader *Broker + var err error + if leader, err = child.consumer.client.Leader(child.topic, child.partition); err != nil { + return err + } + + child.broker = child.consumer.refBrokerConsumer(leader) + + child.broker.input <- child + + return nil +} + +func (child *partitionConsumer) chooseStartingOffset(offset int64) error { + newestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetNewest) + if err != nil { + return err + } + oldestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetOldest) + if err != nil { + return err + } + + switch { + case offset == OffsetNewest: + child.offset = newestOffset + case offset == OffsetOldest: + child.offset = oldestOffset + case offset >= oldestOffset && offset <= newestOffset: + child.offset = offset + default: + return ErrOffsetOutOfRange + } + + return nil +} + +func (child *partitionConsumer) Messages() <-chan *ConsumerMessage { + return child.messages +} + +func (child *partitionConsumer) Errors() <-chan *ConsumerError { + return child.errors +} + +func (child *partitionConsumer) AsyncClose() { + // this triggers whatever broker owns this child to abandon it and close its trigger channel, which causes + // the dispatcher to exit its loop, which removes it from the consumer then closes its 'messages' and + // 'errors' channel (alternatively, if the child is already at the dispatcher for some reason, that will + // also just close itself) + child.closeOnce.Do(func() { + close(child.dying) + }) +} + +func (child *partitionConsumer) Close() error { + child.AsyncClose() + + var consumerErrors ConsumerErrors + for err := range child.errors { + consumerErrors = append(consumerErrors, err) + } + + if len(consumerErrors) > 0 { + return consumerErrors + } + return nil +} + +func (child *partitionConsumer) HighWaterMarkOffset() int64 { + return atomic.LoadInt64(&child.highWaterMarkOffset) +} + +func (child *partitionConsumer) responseFeeder() { + var msgs []*ConsumerMessage + expiryTicker := time.NewTicker(child.conf.Consumer.MaxProcessingTime) + firstAttempt := true + +feederLoop: + for response := range child.feeder { + msgs, child.responseResult = child.parseResponse(response) + + if child.responseResult == nil { + atomic.StoreInt32(&child.retries, 0) + } + + for i, msg := range msgs { + for _, interceptor := range child.conf.Consumer.Interceptors { + msg.safelyApplyInterceptor(interceptor) + } + messageSelect: + select { + case <-child.dying: + child.broker.acks.Done() + continue feederLoop + case child.messages <- msg: + firstAttempt = true + case <-expiryTicker.C: + if !firstAttempt { + child.responseResult = errTimedOut + child.broker.acks.Done() + remainingLoop: + for _, msg = range msgs[i:] { + select { + case child.messages <- msg: + case <-child.dying: + break remainingLoop + } + } + child.broker.input <- child + continue feederLoop + } else { + // current message has not been sent, return to select + // statement + firstAttempt = false + goto messageSelect + } + } + } + + child.broker.acks.Done() + } + + expiryTicker.Stop() + close(child.messages) + close(child.errors) +} + +func (child *partitionConsumer) parseMessages(msgSet *MessageSet) ([]*ConsumerMessage, error) { + var messages []*ConsumerMessage + for _, msgBlock := range msgSet.Messages { + for _, msg := range msgBlock.Messages() { + offset := msg.Offset + timestamp := msg.Msg.Timestamp + if msg.Msg.Version >= 1 { + baseOffset := msgBlock.Offset - msgBlock.Messages()[len(msgBlock.Messages())-1].Offset + offset += baseOffset + if msg.Msg.LogAppendTime { + timestamp = msgBlock.Msg.Timestamp + } + } + if offset < child.offset { + continue + } + messages = append(messages, &ConsumerMessage{ + Topic: child.topic, + Partition: child.partition, + Key: msg.Msg.Key, + Value: msg.Msg.Value, + Offset: offset, + Timestamp: timestamp, + BlockTimestamp: msgBlock.Msg.Timestamp, + }) + child.offset = offset + 1 + } + } + if len(messages) == 0 { + child.offset++ + } + return messages, nil +} + +func (child *partitionConsumer) parseRecords(batch *RecordBatch) ([]*ConsumerMessage, error) { + messages := make([]*ConsumerMessage, 0, len(batch.Records)) + + for _, rec := range batch.Records { + offset := batch.FirstOffset + rec.OffsetDelta + if offset < child.offset { + continue + } + timestamp := batch.FirstTimestamp.Add(rec.TimestampDelta) + if batch.LogAppendTime { + timestamp = batch.MaxTimestamp + } + messages = append(messages, &ConsumerMessage{ + Topic: child.topic, + Partition: child.partition, + Key: rec.Key, + Value: rec.Value, + Offset: offset, + Timestamp: timestamp, + Headers: rec.Headers, + }) + child.offset = offset + 1 + } + if len(messages) == 0 { + child.offset++ + } + return messages, nil +} + +func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*ConsumerMessage, error) { + var ( + metricRegistry = child.conf.MetricRegistry + consumerBatchSizeMetric metrics.Histogram + ) + + if metricRegistry != nil { + consumerBatchSizeMetric = getOrRegisterHistogram("consumer-batch-size", metricRegistry) + } + + // If request was throttled and empty we log and return without error + if response.ThrottleTime != time.Duration(0) && len(response.Blocks) == 0 { + Logger.Printf( + "consumer/broker/%d FetchResponse throttled %v\n", + child.broker.broker.ID(), response.ThrottleTime) + return nil, nil + } + + block := response.GetBlock(child.topic, child.partition) + if block == nil { + return nil, ErrIncompleteResponse + } + + if block.Err != ErrNoError { + return nil, block.Err + } + + nRecs, err := block.numRecords() + if err != nil { + return nil, err + } + + consumerBatchSizeMetric.Update(int64(nRecs)) + + if nRecs == 0 { + partialTrailingMessage, err := block.isPartial() + if err != nil { + return nil, err + } + // We got no messages. If we got a trailing one then we need to ask for more data. + // Otherwise we just poll again and wait for one to be produced... + if partialTrailingMessage { + if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize == child.conf.Consumer.Fetch.Max { + // we can't ask for more data, we've hit the configured limit + child.sendError(ErrMessageTooLarge) + child.offset++ // skip this one so we can keep processing future messages + } else { + child.fetchSize *= 2 + // check int32 overflow + if child.fetchSize < 0 { + child.fetchSize = math.MaxInt32 + } + if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize > child.conf.Consumer.Fetch.Max { + child.fetchSize = child.conf.Consumer.Fetch.Max + } + } + } + + return nil, nil + } + + // we got messages, reset our fetch size in case it was increased for a previous request + child.fetchSize = child.conf.Consumer.Fetch.Default + atomic.StoreInt64(&child.highWaterMarkOffset, block.HighWaterMarkOffset) + + // abortedProducerIDs contains producerID which message should be ignored as uncommitted + // - producerID are added when the partitionConsumer iterate over the offset at which an aborted transaction begins (abortedTransaction.FirstOffset) + // - producerID are removed when partitionConsumer iterate over an aborted controlRecord, meaning the aborted transaction for this producer is over + abortedProducerIDs := make(map[int64]struct{}, len(block.AbortedTransactions)) + abortedTransactions := block.getAbortedTransactions() + + var messages []*ConsumerMessage + for _, records := range block.RecordsSet { + switch records.recordsType { + case legacyRecords: + messageSetMessages, err := child.parseMessages(records.MsgSet) + if err != nil { + return nil, err + } + + messages = append(messages, messageSetMessages...) + case defaultRecords: + // Consume remaining abortedTransaction up to last offset of current batch + for _, txn := range abortedTransactions { + if txn.FirstOffset > records.RecordBatch.LastOffset() { + break + } + abortedProducerIDs[txn.ProducerID] = struct{}{} + // Pop abortedTransactions so that we never add it again + abortedTransactions = abortedTransactions[1:] + } + + recordBatchMessages, err := child.parseRecords(records.RecordBatch) + if err != nil { + return nil, err + } + + // Parse and commit offset but do not expose messages that are: + // - control records + // - part of an aborted transaction when set to `ReadCommitted` + + // control record + isControl, err := records.isControl() + if err != nil { + // I don't know why there is this continue in case of error to begin with + // Safe bet is to ignore control messages if ReadUncommitted + // and block on them in case of error and ReadCommitted + if child.conf.Consumer.IsolationLevel == ReadCommitted { + return nil, err + } + continue + } + if isControl { + controlRecord, err := records.getControlRecord() + if err != nil { + return nil, err + } + + if controlRecord.Type == ControlRecordAbort { + delete(abortedProducerIDs, records.RecordBatch.ProducerID) + } + continue + } + + // filter aborted transactions + if child.conf.Consumer.IsolationLevel == ReadCommitted { + _, isAborted := abortedProducerIDs[records.RecordBatch.ProducerID] + if records.RecordBatch.IsTransactional && isAborted { + continue + } + } + + messages = append(messages, recordBatchMessages...) + default: + return nil, fmt.Errorf("unknown records type: %v", records.recordsType) + } + } + + return messages, nil +} + +type brokerConsumer struct { + consumer *consumer + broker *Broker + input chan *partitionConsumer + newSubscriptions chan []*partitionConsumer + subscriptions map[*partitionConsumer]none + wait chan none + acks sync.WaitGroup + refs int +} + +func (c *consumer) newBrokerConsumer(broker *Broker) *brokerConsumer { + bc := &brokerConsumer{ + consumer: c, + broker: broker, + input: make(chan *partitionConsumer), + newSubscriptions: make(chan []*partitionConsumer), + wait: make(chan none), + subscriptions: make(map[*partitionConsumer]none), + refs: 0, + } + + go withRecover(bc.subscriptionManager) + go withRecover(bc.subscriptionConsumer) + + return bc +} + +// The subscriptionManager constantly accepts new subscriptions on `input` (even when the main subscriptionConsumer +// goroutine is in the middle of a network request) and batches it up. The main worker goroutine picks +// up a batch of new subscriptions between every network request by reading from `newSubscriptions`, so we give +// it nil if no new subscriptions are available. We also write to `wait` only when new subscriptions is available, +// so the main goroutine can block waiting for work if it has none. +func (bc *brokerConsumer) subscriptionManager() { + var buffer []*partitionConsumer + + for { + if len(buffer) > 0 { + select { + case event, ok := <-bc.input: + if !ok { + goto done + } + buffer = append(buffer, event) + case bc.newSubscriptions <- buffer: + buffer = nil + case bc.wait <- none{}: + } + } else { + select { + case event, ok := <-bc.input: + if !ok { + goto done + } + buffer = append(buffer, event) + case bc.newSubscriptions <- nil: + } + } + } + +done: + close(bc.wait) + if len(buffer) > 0 { + bc.newSubscriptions <- buffer + } + close(bc.newSubscriptions) +} + +//subscriptionConsumer ensures we will get nil right away if no new subscriptions is available +func (bc *brokerConsumer) subscriptionConsumer() { + <-bc.wait // wait for our first piece of work + + for newSubscriptions := range bc.newSubscriptions { + bc.updateSubscriptions(newSubscriptions) + + if len(bc.subscriptions) == 0 { + // We're about to be shut down or we're about to receive more subscriptions. + // Either way, the signal just hasn't propagated to our goroutine yet. + <-bc.wait + continue + } + + response, err := bc.fetchNewMessages() + + if err != nil { + Logger.Printf("consumer/broker/%d disconnecting due to error processing FetchRequest: %s\n", bc.broker.ID(), err) + bc.abort(err) + return + } + + bc.acks.Add(len(bc.subscriptions)) + for child := range bc.subscriptions { + child.feeder <- response + } + bc.acks.Wait() + bc.handleResponses() + } +} + +func (bc *brokerConsumer) updateSubscriptions(newSubscriptions []*partitionConsumer) { + for _, child := range newSubscriptions { + bc.subscriptions[child] = none{} + Logger.Printf("consumer/broker/%d added subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition) + } + + for child := range bc.subscriptions { + select { + case <-child.dying: + Logger.Printf("consumer/broker/%d closed dead subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition) + close(child.trigger) + delete(bc.subscriptions, child) + default: + // no-op + } + } +} + +//handleResponses handles the response codes left for us by our subscriptions, and abandons ones that have been closed +func (bc *brokerConsumer) handleResponses() { + for child := range bc.subscriptions { + result := child.responseResult + child.responseResult = nil + + switch result { + case nil: + // no-op + case errTimedOut: + Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because consuming was taking too long\n", + bc.broker.ID(), child.topic, child.partition) + delete(bc.subscriptions, child) + case ErrOffsetOutOfRange: + // there's no point in retrying this it will just fail the same way again + // shut it down and force the user to choose what to do + child.sendError(result) + Logger.Printf("consumer/%s/%d shutting down because %s\n", child.topic, child.partition, result) + close(child.trigger) + delete(bc.subscriptions, child) + case ErrUnknownTopicOrPartition, ErrNotLeaderForPartition, ErrLeaderNotAvailable, ErrReplicaNotAvailable: + // not an error, but does need redispatching + Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n", + bc.broker.ID(), child.topic, child.partition, result) + child.trigger <- none{} + delete(bc.subscriptions, child) + default: + // dunno, tell the user and try redispatching + child.sendError(result) + Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n", + bc.broker.ID(), child.topic, child.partition, result) + child.trigger <- none{} + delete(bc.subscriptions, child) + } + } +} + +func (bc *brokerConsumer) abort(err error) { + bc.consumer.abandonBrokerConsumer(bc) + _ = bc.broker.Close() // we don't care about the error this might return, we already have one + + for child := range bc.subscriptions { + child.sendError(err) + child.trigger <- none{} + } + + for newSubscriptions := range bc.newSubscriptions { + if len(newSubscriptions) == 0 { + <-bc.wait + continue + } + for _, child := range newSubscriptions { + child.sendError(err) + child.trigger <- none{} + } + } +} + +func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) { + request := &FetchRequest{ + MinBytes: bc.consumer.conf.Consumer.Fetch.Min, + MaxWaitTime: int32(bc.consumer.conf.Consumer.MaxWaitTime / time.Millisecond), + } + if bc.consumer.conf.Version.IsAtLeast(V0_9_0_0) { + request.Version = 1 + } + if bc.consumer.conf.Version.IsAtLeast(V0_10_0_0) { + request.Version = 2 + } + if bc.consumer.conf.Version.IsAtLeast(V0_10_1_0) { + request.Version = 3 + request.MaxBytes = MaxResponseSize + } + if bc.consumer.conf.Version.IsAtLeast(V0_11_0_0) { + request.Version = 4 + request.Isolation = bc.consumer.conf.Consumer.IsolationLevel + } + if bc.consumer.conf.Version.IsAtLeast(V1_1_0_0) { + request.Version = 7 + // We do not currently implement KIP-227 FetchSessions. Setting the id to 0 + // and the epoch to -1 tells the broker not to generate as session ID we're going + // to just ignore anyway. + request.SessionID = 0 + request.SessionEpoch = -1 + } + if bc.consumer.conf.Version.IsAtLeast(V2_1_0_0) { + request.Version = 10 + } + if bc.consumer.conf.Version.IsAtLeast(V2_3_0_0) { + request.Version = 11 + request.RackID = bc.consumer.conf.RackID + } + + for child := range bc.subscriptions { + request.AddBlock(child.topic, child.partition, child.offset, child.fetchSize) + } + + return bc.broker.Fetch(request) +} diff --git a/vendor/github.com/Shopify/sarama/consumer_group.go b/vendor/github.com/Shopify/sarama/consumer_group.go new file mode 100644 index 00000000000..fcc5792ea53 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/consumer_group.go @@ -0,0 +1,876 @@ +package sarama + +import ( + "context" + "errors" + "fmt" + "sort" + "sync" + "time" +) + +// ErrClosedConsumerGroup is the error returned when a method is called on a consumer group that has been closed. +var ErrClosedConsumerGroup = errors.New("kafka: tried to use a consumer group that was closed") + +// ConsumerGroup is responsible for dividing up processing of topics and partitions +// over a collection of processes (the members of the consumer group). +type ConsumerGroup interface { + // Consume joins a cluster of consumers for a given list of topics and + // starts a blocking ConsumerGroupSession through the ConsumerGroupHandler. + // + // The life-cycle of a session is represented by the following steps: + // + // 1. The consumers join the group (as explained in https://kafka.apache.org/documentation/#intro_consumers) + // and is assigned their "fair share" of partitions, aka 'claims'. + // 2. Before processing starts, the handler's Setup() hook is called to notify the user + // of the claims and allow any necessary preparation or alteration of state. + // 3. For each of the assigned claims the handler's ConsumeClaim() function is then called + // in a separate goroutine which requires it to be thread-safe. Any state must be carefully protected + // from concurrent reads/writes. + // 4. The session will persist until one of the ConsumeClaim() functions exits. This can be either when the + // parent context is cancelled or when a server-side rebalance cycle is initiated. + // 5. Once all the ConsumeClaim() loops have exited, the handler's Cleanup() hook is called + // to allow the user to perform any final tasks before a rebalance. + // 6. Finally, marked offsets are committed one last time before claims are released. + // + // Please note, that once a rebalance is triggered, sessions must be completed within + // Config.Consumer.Group.Rebalance.Timeout. This means that ConsumeClaim() functions must exit + // as quickly as possible to allow time for Cleanup() and the final offset commit. If the timeout + // is exceeded, the consumer will be removed from the group by Kafka, which will cause offset + // commit failures. + // This method should be called inside an infinite loop, when a + // server-side rebalance happens, the consumer session will need to be + // recreated to get the new claims. + Consume(ctx context.Context, topics []string, handler ConsumerGroupHandler) error + + // Errors returns a read channel of errors that occurred during the consumer life-cycle. + // By default, errors are logged and not returned over this channel. + // If you want to implement any custom error handling, set your config's + // Consumer.Return.Errors setting to true, and read from this channel. + Errors() <-chan error + + // Close stops the ConsumerGroup and detaches any running sessions. It is required to call + // this function before the object passes out of scope, as it will otherwise leak memory. + Close() error +} + +type consumerGroup struct { + client Client + + config *Config + consumer Consumer + groupID string + memberID string + errors chan error + + lock sync.Mutex + closed chan none + closeOnce sync.Once + + userData []byte +} + +// NewConsumerGroup creates a new consumer group the given broker addresses and configuration. +func NewConsumerGroup(addrs []string, groupID string, config *Config) (ConsumerGroup, error) { + client, err := NewClient(addrs, config) + if err != nil { + return nil, err + } + + c, err := newConsumerGroup(groupID, client) + if err != nil { + _ = client.Close() + } + return c, err +} + +// NewConsumerGroupFromClient creates a new consumer group using the given client. It is still +// necessary to call Close() on the underlying client when shutting down this consumer. +// PLEASE NOTE: consumer groups can only re-use but not share clients. +func NewConsumerGroupFromClient(groupID string, client Client) (ConsumerGroup, error) { + // For clients passed in by the client, ensure we don't + // call Close() on it. + cli := &nopCloserClient{client} + return newConsumerGroup(groupID, cli) +} + +func newConsumerGroup(groupID string, client Client) (ConsumerGroup, error) { + config := client.Config() + if !config.Version.IsAtLeast(V0_10_2_0) { + return nil, ConfigurationError("consumer groups require Version to be >= V0_10_2_0") + } + + consumer, err := NewConsumerFromClient(client) + if err != nil { + return nil, err + } + + return &consumerGroup{ + client: client, + consumer: consumer, + config: config, + groupID: groupID, + errors: make(chan error, config.ChannelBufferSize), + closed: make(chan none), + }, nil +} + +// Errors implements ConsumerGroup. +func (c *consumerGroup) Errors() <-chan error { return c.errors } + +// Close implements ConsumerGroup. +func (c *consumerGroup) Close() (err error) { + c.closeOnce.Do(func() { + close(c.closed) + + // leave group + if e := c.leave(); e != nil { + err = e + } + + // drain errors + go func() { + close(c.errors) + }() + for e := range c.errors { + err = e + } + + if e := c.client.Close(); e != nil { + err = e + } + }) + return +} + +// Consume implements ConsumerGroup. +func (c *consumerGroup) Consume(ctx context.Context, topics []string, handler ConsumerGroupHandler) error { + // Ensure group is not closed + select { + case <-c.closed: + return ErrClosedConsumerGroup + default: + } + + c.lock.Lock() + defer c.lock.Unlock() + + // Quick exit when no topics are provided + if len(topics) == 0 { + return fmt.Errorf("no topics provided") + } + + // Refresh metadata for requested topics + if err := c.client.RefreshMetadata(topics...); err != nil { + return err + } + + // Init session + sess, err := c.newSession(ctx, topics, handler, c.config.Consumer.Group.Rebalance.Retry.Max) + if err == ErrClosedClient { + return ErrClosedConsumerGroup + } else if err != nil { + return err + } + + // loop check topic partition numbers changed + // will trigger rebalance when any topic partitions number had changed + // avoid Consume function called again that will generate more than loopCheckPartitionNumbers coroutine + go c.loopCheckPartitionNumbers(topics, sess) + + // Wait for session exit signal + <-sess.ctx.Done() + + // Gracefully release session claims + return sess.release(true) +} + +func (c *consumerGroup) retryNewSession(ctx context.Context, topics []string, handler ConsumerGroupHandler, retries int, refreshCoordinator bool) (*consumerGroupSession, error) { + select { + case <-c.closed: + return nil, ErrClosedConsumerGroup + case <-time.After(c.config.Consumer.Group.Rebalance.Retry.Backoff): + } + + if refreshCoordinator { + err := c.client.RefreshCoordinator(c.groupID) + if err != nil { + return c.retryNewSession(ctx, topics, handler, retries, true) + } + } + + return c.newSession(ctx, topics, handler, retries-1) +} + +func (c *consumerGroup) newSession(ctx context.Context, topics []string, handler ConsumerGroupHandler, retries int) (*consumerGroupSession, error) { + coordinator, err := c.client.Coordinator(c.groupID) + if err != nil { + if retries <= 0 { + return nil, err + } + + return c.retryNewSession(ctx, topics, handler, retries, true) + } + + // Join consumer group + join, err := c.joinGroupRequest(coordinator, topics) + if err != nil { + _ = coordinator.Close() + return nil, err + } + switch join.Err { + case ErrNoError: + c.memberID = join.MemberId + case ErrUnknownMemberId, ErrIllegalGeneration: // reset member ID and retry immediately + c.memberID = "" + return c.newSession(ctx, topics, handler, retries) + case ErrNotCoordinatorForConsumer: // retry after backoff with coordinator refresh + if retries <= 0 { + return nil, join.Err + } + + return c.retryNewSession(ctx, topics, handler, retries, true) + case ErrRebalanceInProgress: // retry after backoff + if retries <= 0 { + return nil, join.Err + } + + return c.retryNewSession(ctx, topics, handler, retries, false) + default: + return nil, join.Err + } + + // Prepare distribution plan if we joined as the leader + var plan BalanceStrategyPlan + if join.LeaderId == join.MemberId { + members, err := join.GetMembers() + if err != nil { + return nil, err + } + + plan, err = c.balance(members) + if err != nil { + return nil, err + } + } + + // Sync consumer group + groupRequest, err := c.syncGroupRequest(coordinator, plan, join.GenerationId) + if err != nil { + _ = coordinator.Close() + return nil, err + } + switch groupRequest.Err { + case ErrNoError: + case ErrUnknownMemberId, ErrIllegalGeneration: // reset member ID and retry immediately + c.memberID = "" + return c.newSession(ctx, topics, handler, retries) + case ErrNotCoordinatorForConsumer: // retry after backoff with coordinator refresh + if retries <= 0 { + return nil, groupRequest.Err + } + + return c.retryNewSession(ctx, topics, handler, retries, true) + case ErrRebalanceInProgress: // retry after backoff + if retries <= 0 { + return nil, groupRequest.Err + } + + return c.retryNewSession(ctx, topics, handler, retries, false) + default: + return nil, groupRequest.Err + } + + // Retrieve and sort claims + var claims map[string][]int32 + if len(groupRequest.MemberAssignment) > 0 { + members, err := groupRequest.GetMemberAssignment() + if err != nil { + return nil, err + } + claims = members.Topics + c.userData = members.UserData + + for _, partitions := range claims { + sort.Sort(int32Slice(partitions)) + } + } + + return newConsumerGroupSession(ctx, c, claims, join.MemberId, join.GenerationId, handler) +} + +func (c *consumerGroup) joinGroupRequest(coordinator *Broker, topics []string) (*JoinGroupResponse, error) { + req := &JoinGroupRequest{ + GroupId: c.groupID, + MemberId: c.memberID, + SessionTimeout: int32(c.config.Consumer.Group.Session.Timeout / time.Millisecond), + ProtocolType: "consumer", + } + if c.config.Version.IsAtLeast(V0_10_1_0) { + req.Version = 1 + req.RebalanceTimeout = int32(c.config.Consumer.Group.Rebalance.Timeout / time.Millisecond) + } + + // use static user-data if configured, otherwise use consumer-group userdata from the last sync + userData := c.config.Consumer.Group.Member.UserData + if len(userData) == 0 { + userData = c.userData + } + meta := &ConsumerGroupMemberMetadata{ + Topics: topics, + UserData: userData, + } + strategy := c.config.Consumer.Group.Rebalance.Strategy + if err := req.AddGroupProtocolMetadata(strategy.Name(), meta); err != nil { + return nil, err + } + + return coordinator.JoinGroup(req) +} + +func (c *consumerGroup) syncGroupRequest(coordinator *Broker, plan BalanceStrategyPlan, generationID int32) (*SyncGroupResponse, error) { + req := &SyncGroupRequest{ + GroupId: c.groupID, + MemberId: c.memberID, + GenerationId: generationID, + } + strategy := c.config.Consumer.Group.Rebalance.Strategy + for memberID, topics := range plan { + assignment := &ConsumerGroupMemberAssignment{Topics: topics} + userDataBytes, err := strategy.AssignmentData(memberID, topics, generationID) + if err != nil { + return nil, err + } + assignment.UserData = userDataBytes + if err := req.AddGroupAssignmentMember(memberID, assignment); err != nil { + return nil, err + } + } + return coordinator.SyncGroup(req) +} + +func (c *consumerGroup) heartbeatRequest(coordinator *Broker, memberID string, generationID int32) (*HeartbeatResponse, error) { + req := &HeartbeatRequest{ + GroupId: c.groupID, + MemberId: memberID, + GenerationId: generationID, + } + + return coordinator.Heartbeat(req) +} + +func (c *consumerGroup) balance(members map[string]ConsumerGroupMemberMetadata) (BalanceStrategyPlan, error) { + topics := make(map[string][]int32) + for _, meta := range members { + for _, topic := range meta.Topics { + topics[topic] = nil + } + } + + for topic := range topics { + partitions, err := c.client.Partitions(topic) + if err != nil { + return nil, err + } + topics[topic] = partitions + } + + strategy := c.config.Consumer.Group.Rebalance.Strategy + return strategy.Plan(members, topics) +} + +// Leaves the cluster, called by Close. +func (c *consumerGroup) leave() error { + c.lock.Lock() + defer c.lock.Unlock() + if c.memberID == "" { + return nil + } + + coordinator, err := c.client.Coordinator(c.groupID) + if err != nil { + return err + } + + resp, err := coordinator.LeaveGroup(&LeaveGroupRequest{ + GroupId: c.groupID, + MemberId: c.memberID, + }) + if err != nil { + _ = coordinator.Close() + return err + } + + // Unset memberID + c.memberID = "" + + // Check response + switch resp.Err { + case ErrRebalanceInProgress, ErrUnknownMemberId, ErrNoError: + return nil + default: + return resp.Err + } +} + +func (c *consumerGroup) handleError(err error, topic string, partition int32) { + if _, ok := err.(*ConsumerError); !ok && topic != "" && partition > -1 { + err = &ConsumerError{ + Topic: topic, + Partition: partition, + Err: err, + } + } + + if !c.config.Consumer.Return.Errors { + Logger.Println(err) + return + } + + select { + case <-c.closed: + //consumer is closed + return + default: + } + + select { + case c.errors <- err: + default: + // no error listener + } +} + +func (c *consumerGroup) loopCheckPartitionNumbers(topics []string, session *consumerGroupSession) { + pause := time.NewTicker(c.config.Metadata.RefreshFrequency) + defer session.cancel() + defer pause.Stop() + var oldTopicToPartitionNum map[string]int + var err error + if oldTopicToPartitionNum, err = c.topicToPartitionNumbers(topics); err != nil { + return + } + for { + if newTopicToPartitionNum, err := c.topicToPartitionNumbers(topics); err != nil { + return + } else { + for topic, num := range oldTopicToPartitionNum { + if newTopicToPartitionNum[topic] != num { + return // trigger the end of the session on exit + } + } + } + select { + case <-pause.C: + case <-session.ctx.Done(): + Logger.Printf("loop check partition number coroutine will exit, topics %s", topics) + // if session closed by other, should be exited + return + case <-c.closed: + return + } + } +} + +func (c *consumerGroup) topicToPartitionNumbers(topics []string) (map[string]int, error) { + topicToPartitionNum := make(map[string]int, len(topics)) + for _, topic := range topics { + if partitionNum, err := c.client.Partitions(topic); err != nil { + Logger.Printf("Consumer Group topic %s get partition number failed %v", topic, err) + return nil, err + } else { + topicToPartitionNum[topic] = len(partitionNum) + } + } + return topicToPartitionNum, nil +} + +// -------------------------------------------------------------------- + +// ConsumerGroupSession represents a consumer group member session. +type ConsumerGroupSession interface { + // Claims returns information about the claimed partitions by topic. + Claims() map[string][]int32 + + // MemberID returns the cluster member ID. + MemberID() string + + // GenerationID returns the current generation ID. + GenerationID() int32 + + // MarkOffset marks the provided offset, alongside a metadata string + // that represents the state of the partition consumer at that point in time. The + // metadata string can be used by another consumer to restore that state, so it + // can resume consumption. + // + // To follow upstream conventions, you are expected to mark the offset of the + // next message to read, not the last message read. Thus, when calling `MarkOffset` + // you should typically add one to the offset of the last consumed message. + // + // Note: calling MarkOffset does not necessarily commit the offset to the backend + // store immediately for efficiency reasons, and it may never be committed if + // your application crashes. This means that you may end up processing the same + // message twice, and your processing should ideally be idempotent. + MarkOffset(topic string, partition int32, offset int64, metadata string) + + // Commit the offset to the backend + // + // Note: calling Commit performs a blocking synchronous operation. + Commit() + + // ResetOffset resets to the provided offset, alongside a metadata string that + // represents the state of the partition consumer at that point in time. Reset + // acts as a counterpart to MarkOffset, the difference being that it allows to + // reset an offset to an earlier or smaller value, where MarkOffset only + // allows incrementing the offset. cf MarkOffset for more details. + ResetOffset(topic string, partition int32, offset int64, metadata string) + + // MarkMessage marks a message as consumed. + MarkMessage(msg *ConsumerMessage, metadata string) + + // Context returns the session context. + Context() context.Context +} + +type consumerGroupSession struct { + parent *consumerGroup + memberID string + generationID int32 + handler ConsumerGroupHandler + + claims map[string][]int32 + offsets *offsetManager + ctx context.Context + cancel func() + + waitGroup sync.WaitGroup + releaseOnce sync.Once + hbDying, hbDead chan none +} + +func newConsumerGroupSession(ctx context.Context, parent *consumerGroup, claims map[string][]int32, memberID string, generationID int32, handler ConsumerGroupHandler) (*consumerGroupSession, error) { + // init offset manager + offsets, err := newOffsetManagerFromClient(parent.groupID, memberID, generationID, parent.client) + if err != nil { + return nil, err + } + + // init context + ctx, cancel := context.WithCancel(ctx) + + // init session + sess := &consumerGroupSession{ + parent: parent, + memberID: memberID, + generationID: generationID, + handler: handler, + offsets: offsets, + claims: claims, + ctx: ctx, + cancel: cancel, + hbDying: make(chan none), + hbDead: make(chan none), + } + + // start heartbeat loop + go sess.heartbeatLoop() + + // create a POM for each claim + for topic, partitions := range claims { + for _, partition := range partitions { + pom, err := offsets.ManagePartition(topic, partition) + if err != nil { + _ = sess.release(false) + return nil, err + } + + // handle POM errors + go func(topic string, partition int32) { + for err := range pom.Errors() { + sess.parent.handleError(err, topic, partition) + } + }(topic, partition) + } + } + + // perform setup + if err := handler.Setup(sess); err != nil { + _ = sess.release(true) + return nil, err + } + + // start consuming + for topic, partitions := range claims { + for _, partition := range partitions { + sess.waitGroup.Add(1) + + go func(topic string, partition int32) { + defer sess.waitGroup.Done() + + // cancel the as session as soon as the first + // goroutine exits + defer sess.cancel() + + // consume a single topic/partition, blocking + sess.consume(topic, partition) + }(topic, partition) + } + } + return sess, nil +} + +func (s *consumerGroupSession) Claims() map[string][]int32 { return s.claims } +func (s *consumerGroupSession) MemberID() string { return s.memberID } +func (s *consumerGroupSession) GenerationID() int32 { return s.generationID } + +func (s *consumerGroupSession) MarkOffset(topic string, partition int32, offset int64, metadata string) { + if pom := s.offsets.findPOM(topic, partition); pom != nil { + pom.MarkOffset(offset, metadata) + } +} + +func (s *consumerGroupSession) Commit() { + s.offsets.Commit() +} + +func (s *consumerGroupSession) ResetOffset(topic string, partition int32, offset int64, metadata string) { + if pom := s.offsets.findPOM(topic, partition); pom != nil { + pom.ResetOffset(offset, metadata) + } +} + +func (s *consumerGroupSession) MarkMessage(msg *ConsumerMessage, metadata string) { + s.MarkOffset(msg.Topic, msg.Partition, msg.Offset+1, metadata) +} + +func (s *consumerGroupSession) Context() context.Context { + return s.ctx +} + +func (s *consumerGroupSession) consume(topic string, partition int32) { + // quick exit if rebalance is due + select { + case <-s.ctx.Done(): + return + case <-s.parent.closed: + return + default: + } + + // get next offset + offset := s.parent.config.Consumer.Offsets.Initial + if pom := s.offsets.findPOM(topic, partition); pom != nil { + offset, _ = pom.NextOffset() + } + + // create new claim + claim, err := newConsumerGroupClaim(s, topic, partition, offset) + if err != nil { + s.parent.handleError(err, topic, partition) + return + } + + // handle errors + go func() { + for err := range claim.Errors() { + s.parent.handleError(err, topic, partition) + } + }() + + // trigger close when session is done + go func() { + select { + case <-s.ctx.Done(): + case <-s.parent.closed: + } + claim.AsyncClose() + }() + + // start processing + if err := s.handler.ConsumeClaim(s, claim); err != nil { + s.parent.handleError(err, topic, partition) + } + + // ensure consumer is closed & drained + claim.AsyncClose() + for _, err := range claim.waitClosed() { + s.parent.handleError(err, topic, partition) + } +} + +func (s *consumerGroupSession) release(withCleanup bool) (err error) { + // signal release, stop heartbeat + s.cancel() + + // wait for consumers to exit + s.waitGroup.Wait() + + // perform release + s.releaseOnce.Do(func() { + if withCleanup { + if e := s.handler.Cleanup(s); e != nil { + s.parent.handleError(e, "", -1) + err = e + } + } + + if e := s.offsets.Close(); e != nil { + err = e + } + + close(s.hbDying) + <-s.hbDead + }) + + return +} + +func (s *consumerGroupSession) heartbeatLoop() { + defer close(s.hbDead) + defer s.cancel() // trigger the end of the session on exit + + pause := time.NewTicker(s.parent.config.Consumer.Group.Heartbeat.Interval) + defer pause.Stop() + + retries := s.parent.config.Metadata.Retry.Max + for { + coordinator, err := s.parent.client.Coordinator(s.parent.groupID) + if err != nil { + if retries <= 0 { + s.parent.handleError(err, "", -1) + return + } + + select { + case <-s.hbDying: + return + case <-time.After(s.parent.config.Metadata.Retry.Backoff): + retries-- + } + continue + } + + resp, err := s.parent.heartbeatRequest(coordinator, s.memberID, s.generationID) + if err != nil { + _ = coordinator.Close() + + if retries <= 0 { + s.parent.handleError(err, "", -1) + return + } + + retries-- + continue + } + + switch resp.Err { + case ErrNoError: + retries = s.parent.config.Metadata.Retry.Max + case ErrRebalanceInProgress, ErrUnknownMemberId, ErrIllegalGeneration: + return + default: + s.parent.handleError(resp.Err, "", -1) + return + } + + select { + case <-pause.C: + case <-s.hbDying: + return + } + } +} + +// -------------------------------------------------------------------- + +// ConsumerGroupHandler instances are used to handle individual topic/partition claims. +// It also provides hooks for your consumer group session life-cycle and allow you to +// trigger logic before or after the consume loop(s). +// +// PLEASE NOTE that handlers are likely be called from several goroutines concurrently, +// ensure that all state is safely protected against race conditions. +type ConsumerGroupHandler interface { + // Setup is run at the beginning of a new session, before ConsumeClaim. + Setup(ConsumerGroupSession) error + + // Cleanup is run at the end of a session, once all ConsumeClaim goroutines have exited + // but before the offsets are committed for the very last time. + Cleanup(ConsumerGroupSession) error + + // ConsumeClaim must start a consumer loop of ConsumerGroupClaim's Messages(). + // Once the Messages() channel is closed, the Handler must finish its processing + // loop and exit. + ConsumeClaim(ConsumerGroupSession, ConsumerGroupClaim) error +} + +// ConsumerGroupClaim processes Kafka messages from a given topic and partition within a consumer group. +type ConsumerGroupClaim interface { + // Topic returns the consumed topic name. + Topic() string + + // Partition returns the consumed partition. + Partition() int32 + + // InitialOffset returns the initial offset that was used as a starting point for this claim. + InitialOffset() int64 + + // HighWaterMarkOffset returns the high water mark offset of the partition, + // i.e. the offset that will be used for the next message that will be produced. + // You can use this to determine how far behind the processing is. + HighWaterMarkOffset() int64 + + // Messages returns the read channel for the messages that are returned by + // the broker. The messages channel will be closed when a new rebalance cycle + // is due. You must finish processing and mark offsets within + // Config.Consumer.Group.Session.Timeout before the topic/partition is eventually + // re-assigned to another group member. + Messages() <-chan *ConsumerMessage +} + +type consumerGroupClaim struct { + topic string + partition int32 + offset int64 + PartitionConsumer +} + +func newConsumerGroupClaim(sess *consumerGroupSession, topic string, partition int32, offset int64) (*consumerGroupClaim, error) { + pcm, err := sess.parent.consumer.ConsumePartition(topic, partition, offset) + if err == ErrOffsetOutOfRange { + offset = sess.parent.config.Consumer.Offsets.Initial + pcm, err = sess.parent.consumer.ConsumePartition(topic, partition, offset) + } + if err != nil { + return nil, err + } + + go func() { + for err := range pcm.Errors() { + sess.parent.handleError(err, topic, partition) + } + }() + + return &consumerGroupClaim{ + topic: topic, + partition: partition, + offset: offset, + PartitionConsumer: pcm, + }, nil +} + +func (c *consumerGroupClaim) Topic() string { return c.topic } +func (c *consumerGroupClaim) Partition() int32 { return c.partition } +func (c *consumerGroupClaim) InitialOffset() int64 { return c.offset } + +// Drains messages and errors, ensures the claim is fully closed. +func (c *consumerGroupClaim) waitClosed() (errs ConsumerErrors) { + go func() { + for range c.Messages() { + } + }() + + for err := range c.Errors() { + errs = append(errs, err) + } + return +} diff --git a/vendor/github.com/Shopify/sarama/consumer_group_members.go b/vendor/github.com/Shopify/sarama/consumer_group_members.go new file mode 100644 index 00000000000..2d02cc386f8 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/consumer_group_members.go @@ -0,0 +1,96 @@ +package sarama + +//ConsumerGroupMemberMetadata holds the metadata for consumer group +type ConsumerGroupMemberMetadata struct { + Version int16 + Topics []string + UserData []byte +} + +func (m *ConsumerGroupMemberMetadata) encode(pe packetEncoder) error { + pe.putInt16(m.Version) + + if err := pe.putStringArray(m.Topics); err != nil { + return err + } + + if err := pe.putBytes(m.UserData); err != nil { + return err + } + + return nil +} + +func (m *ConsumerGroupMemberMetadata) decode(pd packetDecoder) (err error) { + if m.Version, err = pd.getInt16(); err != nil { + return + } + + if m.Topics, err = pd.getStringArray(); err != nil { + return + } + + if m.UserData, err = pd.getBytes(); err != nil { + return + } + + return nil +} + +//ConsumerGroupMemberAssignment holds the member assignment for a consume group +type ConsumerGroupMemberAssignment struct { + Version int16 + Topics map[string][]int32 + UserData []byte +} + +func (m *ConsumerGroupMemberAssignment) encode(pe packetEncoder) error { + pe.putInt16(m.Version) + + if err := pe.putArrayLength(len(m.Topics)); err != nil { + return err + } + + for topic, partitions := range m.Topics { + if err := pe.putString(topic); err != nil { + return err + } + if err := pe.putInt32Array(partitions); err != nil { + return err + } + } + + if err := pe.putBytes(m.UserData); err != nil { + return err + } + + return nil +} + +func (m *ConsumerGroupMemberAssignment) decode(pd packetDecoder) (err error) { + if m.Version, err = pd.getInt16(); err != nil { + return + } + + var topicLen int + if topicLen, err = pd.getArrayLength(); err != nil { + return + } + + m.Topics = make(map[string][]int32, topicLen) + for i := 0; i < topicLen; i++ { + var topic string + if topic, err = pd.getString(); err != nil { + return + } + if m.Topics[topic], err = pd.getInt32Array(); err != nil { + return + } + } + + if m.UserData, err = pd.getBytes(); err != nil { + return + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_request.go b/vendor/github.com/Shopify/sarama/consumer_metadata_request.go new file mode 100644 index 00000000000..e5ebdaef5ba --- /dev/null +++ b/vendor/github.com/Shopify/sarama/consumer_metadata_request.go @@ -0,0 +1,38 @@ +package sarama + +//ConsumerMetadataRequest is used for metadata requests +type ConsumerMetadataRequest struct { + ConsumerGroup string +} + +func (r *ConsumerMetadataRequest) encode(pe packetEncoder) error { + tmp := new(FindCoordinatorRequest) + tmp.CoordinatorKey = r.ConsumerGroup + tmp.CoordinatorType = CoordinatorGroup + return tmp.encode(pe) +} + +func (r *ConsumerMetadataRequest) decode(pd packetDecoder, version int16) (err error) { + tmp := new(FindCoordinatorRequest) + if err := tmp.decode(pd, version); err != nil { + return err + } + r.ConsumerGroup = tmp.CoordinatorKey + return nil +} + +func (r *ConsumerMetadataRequest) key() int16 { + return 10 +} + +func (r *ConsumerMetadataRequest) version() int16 { + return 0 +} + +func (r *ConsumerMetadataRequest) headerVersion() int16 { + return 1 +} + +func (r *ConsumerMetadataRequest) requiredVersion() KafkaVersion { + return V0_8_2_0 +} diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_response.go b/vendor/github.com/Shopify/sarama/consumer_metadata_response.go new file mode 100644 index 00000000000..1b5d00d2203 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/consumer_metadata_response.go @@ -0,0 +1,82 @@ +package sarama + +import ( + "net" + "strconv" +) + +//ConsumerMetadataResponse holds the response for a consumer group meta data requests +type ConsumerMetadataResponse struct { + Err KError + Coordinator *Broker + CoordinatorID int32 // deprecated: use Coordinator.ID() + CoordinatorHost string // deprecated: use Coordinator.Addr() + CoordinatorPort int32 // deprecated: use Coordinator.Addr() +} + +func (r *ConsumerMetadataResponse) decode(pd packetDecoder, version int16) (err error) { + tmp := new(FindCoordinatorResponse) + + if err := tmp.decode(pd, version); err != nil { + return err + } + + r.Err = tmp.Err + + r.Coordinator = tmp.Coordinator + if tmp.Coordinator == nil { + return nil + } + + // this can all go away in 2.0, but we have to fill in deprecated fields to maintain + // backwards compatibility + host, portstr, err := net.SplitHostPort(r.Coordinator.Addr()) + if err != nil { + return err + } + port, err := strconv.ParseInt(portstr, 10, 32) + if err != nil { + return err + } + r.CoordinatorID = r.Coordinator.ID() + r.CoordinatorHost = host + r.CoordinatorPort = int32(port) + + return nil +} + +func (r *ConsumerMetadataResponse) encode(pe packetEncoder) error { + if r.Coordinator == nil { + r.Coordinator = new(Broker) + r.Coordinator.id = r.CoordinatorID + r.Coordinator.addr = net.JoinHostPort(r.CoordinatorHost, strconv.Itoa(int(r.CoordinatorPort))) + } + + tmp := &FindCoordinatorResponse{ + Version: 0, + Err: r.Err, + Coordinator: r.Coordinator, + } + + if err := tmp.encode(pe); err != nil { + return err + } + + return nil +} + +func (r *ConsumerMetadataResponse) key() int16 { + return 10 +} + +func (r *ConsumerMetadataResponse) version() int16 { + return 0 +} + +func (r *ConsumerMetadataResponse) headerVersion() int16 { + return 0 +} + +func (r *ConsumerMetadataResponse) requiredVersion() KafkaVersion { + return V0_8_2_0 +} diff --git a/vendor/github.com/Shopify/sarama/control_record.go b/vendor/github.com/Shopify/sarama/control_record.go new file mode 100644 index 00000000000..9b75ab53b3c --- /dev/null +++ b/vendor/github.com/Shopify/sarama/control_record.go @@ -0,0 +1,72 @@ +package sarama + +//ControlRecordType ... +type ControlRecordType int + +const ( + //ControlRecordAbort is a control record for abort + ControlRecordAbort ControlRecordType = iota + //ControlRecordCommit is a control record for commit + ControlRecordCommit + //ControlRecordUnknown is a control record of unknown type + ControlRecordUnknown +) + +// Control records are returned as a record by fetchRequest +// However unlike "normal" records, they mean nothing application wise. +// They only serve internal logic for supporting transactions. +type ControlRecord struct { + Version int16 + CoordinatorEpoch int32 + Type ControlRecordType +} + +func (cr *ControlRecord) decode(key, value packetDecoder) error { + var err error + cr.Version, err = value.getInt16() + if err != nil { + return err + } + + cr.CoordinatorEpoch, err = value.getInt32() + if err != nil { + return err + } + + // There a version for the value part AND the key part. And I have no idea if they are supposed to match or not + // Either way, all these version can only be 0 for now + cr.Version, err = key.getInt16() + if err != nil { + return err + } + + recordType, err := key.getInt16() + if err != nil { + return err + } + + switch recordType { + case 0: + cr.Type = ControlRecordAbort + case 1: + cr.Type = ControlRecordCommit + default: + // from JAVA implementation: + // UNKNOWN is used to indicate a control type which the client is not aware of and should be ignored + cr.Type = ControlRecordUnknown + } + return nil +} + +func (cr *ControlRecord) encode(key, value packetEncoder) { + value.putInt16(cr.Version) + value.putInt32(cr.CoordinatorEpoch) + key.putInt16(cr.Version) + + switch cr.Type { + case ControlRecordAbort: + key.putInt16(0) + case ControlRecordCommit: + key.putInt16(1) + } +} diff --git a/vendor/github.com/Shopify/sarama/crc32_field.go b/vendor/github.com/Shopify/sarama/crc32_field.go new file mode 100644 index 00000000000..38189a3cdf1 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/crc32_field.go @@ -0,0 +1,86 @@ +package sarama + +import ( + "encoding/binary" + "fmt" + "hash/crc32" + "sync" +) + +type crcPolynomial int8 + +const ( + crcIEEE crcPolynomial = iota + crcCastagnoli +) + +var crc32FieldPool = sync.Pool{} + +func acquireCrc32Field(polynomial crcPolynomial) *crc32Field { + val := crc32FieldPool.Get() + if val != nil { + c := val.(*crc32Field) + c.polynomial = polynomial + return c + } + return newCRC32Field(polynomial) +} + +func releaseCrc32Field(c *crc32Field) { + crc32FieldPool.Put(c) +} + +var castagnoliTable = crc32.MakeTable(crc32.Castagnoli) + +// crc32Field implements the pushEncoder and pushDecoder interfaces for calculating CRC32s. +type crc32Field struct { + startOffset int + polynomial crcPolynomial +} + +func (c *crc32Field) saveOffset(in int) { + c.startOffset = in +} + +func (c *crc32Field) reserveLength() int { + return 4 +} + +func newCRC32Field(polynomial crcPolynomial) *crc32Field { + return &crc32Field{polynomial: polynomial} +} + +func (c *crc32Field) run(curOffset int, buf []byte) error { + crc, err := c.crc(curOffset, buf) + if err != nil { + return err + } + binary.BigEndian.PutUint32(buf[c.startOffset:], crc) + return nil +} + +func (c *crc32Field) check(curOffset int, buf []byte) error { + crc, err := c.crc(curOffset, buf) + if err != nil { + return err + } + + expected := binary.BigEndian.Uint32(buf[c.startOffset:]) + if crc != expected { + return PacketDecodingError{fmt.Sprintf("CRC didn't match expected %#x got %#x", expected, crc)} + } + + return nil +} +func (c *crc32Field) crc(curOffset int, buf []byte) (uint32, error) { + var tab *crc32.Table + switch c.polynomial { + case crcIEEE: + tab = crc32.IEEETable + case crcCastagnoli: + tab = castagnoliTable + default: + return 0, PacketDecodingError{"invalid CRC type"} + } + return crc32.Checksum(buf[c.startOffset+4:curOffset], tab), nil +} diff --git a/vendor/github.com/Shopify/sarama/create_partitions_request.go b/vendor/github.com/Shopify/sarama/create_partitions_request.go new file mode 100644 index 00000000000..46fb0440249 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/create_partitions_request.go @@ -0,0 +1,125 @@ +package sarama + +import "time" + +type CreatePartitionsRequest struct { + TopicPartitions map[string]*TopicPartition + Timeout time.Duration + ValidateOnly bool +} + +func (c *CreatePartitionsRequest) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(c.TopicPartitions)); err != nil { + return err + } + + for topic, partition := range c.TopicPartitions { + if err := pe.putString(topic); err != nil { + return err + } + if err := partition.encode(pe); err != nil { + return err + } + } + + pe.putInt32(int32(c.Timeout / time.Millisecond)) + + pe.putBool(c.ValidateOnly) + + return nil +} + +func (c *CreatePartitionsRequest) decode(pd packetDecoder, version int16) (err error) { + n, err := pd.getArrayLength() + if err != nil { + return err + } + c.TopicPartitions = make(map[string]*TopicPartition, n) + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + c.TopicPartitions[topic] = new(TopicPartition) + if err := c.TopicPartitions[topic].decode(pd, version); err != nil { + return err + } + } + + timeout, err := pd.getInt32() + if err != nil { + return err + } + c.Timeout = time.Duration(timeout) * time.Millisecond + + if c.ValidateOnly, err = pd.getBool(); err != nil { + return err + } + + return nil +} + +func (r *CreatePartitionsRequest) key() int16 { + return 37 +} + +func (r *CreatePartitionsRequest) version() int16 { + return 0 +} + +func (r *CreatePartitionsRequest) headerVersion() int16 { + return 1 +} + +func (r *CreatePartitionsRequest) requiredVersion() KafkaVersion { + return V1_0_0_0 +} + +type TopicPartition struct { + Count int32 + Assignment [][]int32 +} + +func (t *TopicPartition) encode(pe packetEncoder) error { + pe.putInt32(t.Count) + + if len(t.Assignment) == 0 { + pe.putInt32(-1) + return nil + } + + if err := pe.putArrayLength(len(t.Assignment)); err != nil { + return err + } + + for _, assign := range t.Assignment { + if err := pe.putInt32Array(assign); err != nil { + return err + } + } + + return nil +} + +func (t *TopicPartition) decode(pd packetDecoder, version int16) (err error) { + if t.Count, err = pd.getInt32(); err != nil { + return err + } + + n, err := pd.getInt32() + if err != nil { + return err + } + if n <= 0 { + return nil + } + t.Assignment = make([][]int32, n) + + for i := 0; i < int(n); i++ { + if t.Assignment[i], err = pd.getInt32Array(); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/create_partitions_response.go b/vendor/github.com/Shopify/sarama/create_partitions_response.go new file mode 100644 index 00000000000..12ce78857bc --- /dev/null +++ b/vendor/github.com/Shopify/sarama/create_partitions_response.go @@ -0,0 +1,109 @@ +package sarama + +import ( + "fmt" + "time" +) + +type CreatePartitionsResponse struct { + ThrottleTime time.Duration + TopicPartitionErrors map[string]*TopicPartitionError +} + +func (c *CreatePartitionsResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(c.ThrottleTime / time.Millisecond)) + if err := pe.putArrayLength(len(c.TopicPartitionErrors)); err != nil { + return err + } + + for topic, partitionError := range c.TopicPartitionErrors { + if err := pe.putString(topic); err != nil { + return err + } + if err := partitionError.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (c *CreatePartitionsResponse) decode(pd packetDecoder, version int16) (err error) { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + c.TopicPartitionErrors = make(map[string]*TopicPartitionError, n) + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + c.TopicPartitionErrors[topic] = new(TopicPartitionError) + if err := c.TopicPartitionErrors[topic].decode(pd, version); err != nil { + return err + } + } + + return nil +} + +func (r *CreatePartitionsResponse) key() int16 { + return 37 +} + +func (r *CreatePartitionsResponse) version() int16 { + return 0 +} + +func (r *CreatePartitionsResponse) headerVersion() int16 { + return 0 +} + +func (r *CreatePartitionsResponse) requiredVersion() KafkaVersion { + return V1_0_0_0 +} + +type TopicPartitionError struct { + Err KError + ErrMsg *string +} + +func (t *TopicPartitionError) Error() string { + text := t.Err.Error() + if t.ErrMsg != nil { + text = fmt.Sprintf("%s - %s", text, *t.ErrMsg) + } + return text +} + +func (t *TopicPartitionError) encode(pe packetEncoder) error { + pe.putInt16(int16(t.Err)) + + if err := pe.putNullableString(t.ErrMsg); err != nil { + return err + } + + return nil +} + +func (t *TopicPartitionError) decode(pd packetDecoder, version int16) (err error) { + kerr, err := pd.getInt16() + if err != nil { + return err + } + t.Err = KError(kerr) + + if t.ErrMsg, err = pd.getNullableString(); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/create_topics_request.go b/vendor/github.com/Shopify/sarama/create_topics_request.go new file mode 100644 index 00000000000..287acd069b6 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/create_topics_request.go @@ -0,0 +1,178 @@ +package sarama + +import ( + "time" +) + +type CreateTopicsRequest struct { + Version int16 + + TopicDetails map[string]*TopicDetail + Timeout time.Duration + ValidateOnly bool +} + +func (c *CreateTopicsRequest) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(c.TopicDetails)); err != nil { + return err + } + for topic, detail := range c.TopicDetails { + if err := pe.putString(topic); err != nil { + return err + } + if err := detail.encode(pe); err != nil { + return err + } + } + + pe.putInt32(int32(c.Timeout / time.Millisecond)) + + if c.Version >= 1 { + pe.putBool(c.ValidateOnly) + } + + return nil +} + +func (c *CreateTopicsRequest) decode(pd packetDecoder, version int16) (err error) { + n, err := pd.getArrayLength() + if err != nil { + return err + } + + c.TopicDetails = make(map[string]*TopicDetail, n) + + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + c.TopicDetails[topic] = new(TopicDetail) + if err = c.TopicDetails[topic].decode(pd, version); err != nil { + return err + } + } + + timeout, err := pd.getInt32() + if err != nil { + return err + } + c.Timeout = time.Duration(timeout) * time.Millisecond + + if version >= 1 { + c.ValidateOnly, err = pd.getBool() + if err != nil { + return err + } + + c.Version = version + } + + return nil +} + +func (c *CreateTopicsRequest) key() int16 { + return 19 +} + +func (c *CreateTopicsRequest) version() int16 { + return c.Version +} + +func (r *CreateTopicsRequest) headerVersion() int16 { + return 1 +} + +func (c *CreateTopicsRequest) requiredVersion() KafkaVersion { + switch c.Version { + case 2: + return V1_0_0_0 + case 1: + return V0_11_0_0 + default: + return V0_10_1_0 + } +} + +type TopicDetail struct { + NumPartitions int32 + ReplicationFactor int16 + ReplicaAssignment map[int32][]int32 + ConfigEntries map[string]*string +} + +func (t *TopicDetail) encode(pe packetEncoder) error { + pe.putInt32(t.NumPartitions) + pe.putInt16(t.ReplicationFactor) + + if err := pe.putArrayLength(len(t.ReplicaAssignment)); err != nil { + return err + } + for partition, assignment := range t.ReplicaAssignment { + pe.putInt32(partition) + if err := pe.putInt32Array(assignment); err != nil { + return err + } + } + + if err := pe.putArrayLength(len(t.ConfigEntries)); err != nil { + return err + } + for configKey, configValue := range t.ConfigEntries { + if err := pe.putString(configKey); err != nil { + return err + } + if err := pe.putNullableString(configValue); err != nil { + return err + } + } + + return nil +} + +func (t *TopicDetail) decode(pd packetDecoder, version int16) (err error) { + if t.NumPartitions, err = pd.getInt32(); err != nil { + return err + } + if t.ReplicationFactor, err = pd.getInt16(); err != nil { + return err + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + if n > 0 { + t.ReplicaAssignment = make(map[int32][]int32, n) + for i := 0; i < n; i++ { + replica, err := pd.getInt32() + if err != nil { + return err + } + if t.ReplicaAssignment[replica], err = pd.getInt32Array(); err != nil { + return err + } + } + } + + n, err = pd.getArrayLength() + if err != nil { + return err + } + + if n > 0 { + t.ConfigEntries = make(map[string]*string, n) + for i := 0; i < n; i++ { + configKey, err := pd.getString() + if err != nil { + return err + } + if t.ConfigEntries[configKey], err = pd.getNullableString(); err != nil { + return err + } + } + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/create_topics_response.go b/vendor/github.com/Shopify/sarama/create_topics_response.go new file mode 100644 index 00000000000..7e1448a6692 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/create_topics_response.go @@ -0,0 +1,127 @@ +package sarama + +import ( + "fmt" + "time" +) + +type CreateTopicsResponse struct { + Version int16 + ThrottleTime time.Duration + TopicErrors map[string]*TopicError +} + +func (c *CreateTopicsResponse) encode(pe packetEncoder) error { + if c.Version >= 2 { + pe.putInt32(int32(c.ThrottleTime / time.Millisecond)) + } + + if err := pe.putArrayLength(len(c.TopicErrors)); err != nil { + return err + } + for topic, topicError := range c.TopicErrors { + if err := pe.putString(topic); err != nil { + return err + } + if err := topicError.encode(pe, c.Version); err != nil { + return err + } + } + + return nil +} + +func (c *CreateTopicsResponse) decode(pd packetDecoder, version int16) (err error) { + c.Version = version + + if version >= 2 { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + c.TopicErrors = make(map[string]*TopicError, n) + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + c.TopicErrors[topic] = new(TopicError) + if err := c.TopicErrors[topic].decode(pd, version); err != nil { + return err + } + } + + return nil +} + +func (c *CreateTopicsResponse) key() int16 { + return 19 +} + +func (c *CreateTopicsResponse) version() int16 { + return c.Version +} + +func (c *CreateTopicsResponse) headerVersion() int16 { + return 0 +} + +func (c *CreateTopicsResponse) requiredVersion() KafkaVersion { + switch c.Version { + case 2: + return V1_0_0_0 + case 1: + return V0_11_0_0 + default: + return V0_10_1_0 + } +} + +type TopicError struct { + Err KError + ErrMsg *string +} + +func (t *TopicError) Error() string { + text := t.Err.Error() + if t.ErrMsg != nil { + text = fmt.Sprintf("%s - %s", text, *t.ErrMsg) + } + return text +} + +func (t *TopicError) encode(pe packetEncoder, version int16) error { + pe.putInt16(int16(t.Err)) + + if version >= 1 { + if err := pe.putNullableString(t.ErrMsg); err != nil { + return err + } + } + + return nil +} + +func (t *TopicError) decode(pd packetDecoder, version int16) (err error) { + kErr, err := pd.getInt16() + if err != nil { + return err + } + t.Err = KError(kErr) + + if version >= 1 { + if t.ErrMsg, err = pd.getNullableString(); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/decompress.go b/vendor/github.com/Shopify/sarama/decompress.go new file mode 100644 index 00000000000..e4dc3c185a6 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/decompress.go @@ -0,0 +1,63 @@ +package sarama + +import ( + "bytes" + "compress/gzip" + "fmt" + "io/ioutil" + "sync" + + snappy "github.com/eapache/go-xerial-snappy" + "github.com/pierrec/lz4" +) + +var ( + lz4ReaderPool = sync.Pool{ + New: func() interface{} { + return lz4.NewReader(nil) + }, + } + + gzipReaderPool sync.Pool +) + +func decompress(cc CompressionCodec, data []byte) ([]byte, error) { + switch cc { + case CompressionNone: + return data, nil + case CompressionGZIP: + var ( + err error + reader *gzip.Reader + readerIntf = gzipReaderPool.Get() + ) + if readerIntf != nil { + reader = readerIntf.(*gzip.Reader) + } else { + reader, err = gzip.NewReader(bytes.NewReader(data)) + if err != nil { + return nil, err + } + } + + defer gzipReaderPool.Put(reader) + + if err := reader.Reset(bytes.NewReader(data)); err != nil { + return nil, err + } + + return ioutil.ReadAll(reader) + case CompressionSnappy: + return snappy.Decode(data) + case CompressionLZ4: + reader := lz4ReaderPool.Get().(*lz4.Reader) + defer lz4ReaderPool.Put(reader) + + reader.Reset(bytes.NewReader(data)) + return ioutil.ReadAll(reader) + case CompressionZSTD: + return zstdDecompress(nil, data) + default: + return nil, PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", cc)} + } +} diff --git a/vendor/github.com/Shopify/sarama/delete_groups_request.go b/vendor/github.com/Shopify/sarama/delete_groups_request.go new file mode 100644 index 00000000000..4ac8bbee4cb --- /dev/null +++ b/vendor/github.com/Shopify/sarama/delete_groups_request.go @@ -0,0 +1,34 @@ +package sarama + +type DeleteGroupsRequest struct { + Groups []string +} + +func (r *DeleteGroupsRequest) encode(pe packetEncoder) error { + return pe.putStringArray(r.Groups) +} + +func (r *DeleteGroupsRequest) decode(pd packetDecoder, version int16) (err error) { + r.Groups, err = pd.getStringArray() + return +} + +func (r *DeleteGroupsRequest) key() int16 { + return 42 +} + +func (r *DeleteGroupsRequest) version() int16 { + return 0 +} + +func (r *DeleteGroupsRequest) headerVersion() int16 { + return 1 +} + +func (r *DeleteGroupsRequest) requiredVersion() KafkaVersion { + return V1_1_0_0 +} + +func (r *DeleteGroupsRequest) AddGroup(group string) { + r.Groups = append(r.Groups, group) +} diff --git a/vendor/github.com/Shopify/sarama/delete_groups_response.go b/vendor/github.com/Shopify/sarama/delete_groups_response.go new file mode 100644 index 00000000000..5e7b1ed3681 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/delete_groups_response.go @@ -0,0 +1,74 @@ +package sarama + +import ( + "time" +) + +type DeleteGroupsResponse struct { + ThrottleTime time.Duration + GroupErrorCodes map[string]KError +} + +func (r *DeleteGroupsResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(r.ThrottleTime / time.Millisecond)) + + if err := pe.putArrayLength(len(r.GroupErrorCodes)); err != nil { + return err + } + for groupID, errorCode := range r.GroupErrorCodes { + if err := pe.putString(groupID); err != nil { + return err + } + pe.putInt16(int16(errorCode)) + } + + return nil +} + +func (r *DeleteGroupsResponse) decode(pd packetDecoder, version int16) error { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + n, err := pd.getArrayLength() + if err != nil { + return err + } + if n == 0 { + return nil + } + + r.GroupErrorCodes = make(map[string]KError, n) + for i := 0; i < n; i++ { + groupID, err := pd.getString() + if err != nil { + return err + } + errorCode, err := pd.getInt16() + if err != nil { + return err + } + + r.GroupErrorCodes[groupID] = KError(errorCode) + } + + return nil +} + +func (r *DeleteGroupsResponse) key() int16 { + return 42 +} + +func (r *DeleteGroupsResponse) version() int16 { + return 0 +} + +func (r *DeleteGroupsResponse) headerVersion() int16 { + return 0 +} + +func (r *DeleteGroupsResponse) requiredVersion() KafkaVersion { + return V1_1_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/delete_records_request.go b/vendor/github.com/Shopify/sarama/delete_records_request.go new file mode 100644 index 00000000000..dc106b17d62 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/delete_records_request.go @@ -0,0 +1,130 @@ +package sarama + +import ( + "sort" + "time" +) + +// request message format is: +// [topic] timeout(int32) +// where topic is: +// name(string) [partition] +// where partition is: +// id(int32) offset(int64) + +type DeleteRecordsRequest struct { + Topics map[string]*DeleteRecordsRequestTopic + Timeout time.Duration +} + +func (d *DeleteRecordsRequest) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(d.Topics)); err != nil { + return err + } + keys := make([]string, 0, len(d.Topics)) + for topic := range d.Topics { + keys = append(keys, topic) + } + sort.Strings(keys) + for _, topic := range keys { + if err := pe.putString(topic); err != nil { + return err + } + if err := d.Topics[topic].encode(pe); err != nil { + return err + } + } + pe.putInt32(int32(d.Timeout / time.Millisecond)) + + return nil +} + +func (d *DeleteRecordsRequest) decode(pd packetDecoder, version int16) error { + n, err := pd.getArrayLength() + if err != nil { + return err + } + + if n > 0 { + d.Topics = make(map[string]*DeleteRecordsRequestTopic, n) + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + details := new(DeleteRecordsRequestTopic) + if err = details.decode(pd, version); err != nil { + return err + } + d.Topics[topic] = details + } + } + + timeout, err := pd.getInt32() + if err != nil { + return err + } + d.Timeout = time.Duration(timeout) * time.Millisecond + + return nil +} + +func (d *DeleteRecordsRequest) key() int16 { + return 21 +} + +func (d *DeleteRecordsRequest) version() int16 { + return 0 +} + +func (d *DeleteRecordsRequest) headerVersion() int16 { + return 1 +} + +func (d *DeleteRecordsRequest) requiredVersion() KafkaVersion { + return V0_11_0_0 +} + +type DeleteRecordsRequestTopic struct { + PartitionOffsets map[int32]int64 // partition => offset +} + +func (t *DeleteRecordsRequestTopic) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(t.PartitionOffsets)); err != nil { + return err + } + keys := make([]int32, 0, len(t.PartitionOffsets)) + for partition := range t.PartitionOffsets { + keys = append(keys, partition) + } + sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] }) + for _, partition := range keys { + pe.putInt32(partition) + pe.putInt64(t.PartitionOffsets[partition]) + } + return nil +} + +func (t *DeleteRecordsRequestTopic) decode(pd packetDecoder, version int16) error { + n, err := pd.getArrayLength() + if err != nil { + return err + } + + if n > 0 { + t.PartitionOffsets = make(map[int32]int64, n) + for i := 0; i < n; i++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + offset, err := pd.getInt64() + if err != nil { + return err + } + t.PartitionOffsets[partition] = offset + } + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/delete_records_response.go b/vendor/github.com/Shopify/sarama/delete_records_response.go new file mode 100644 index 00000000000..d530b4c7e91 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/delete_records_response.go @@ -0,0 +1,162 @@ +package sarama + +import ( + "sort" + "time" +) + +// response message format is: +// throttleMs(int32) [topic] +// where topic is: +// name(string) [partition] +// where partition is: +// id(int32) low_watermark(int64) error_code(int16) + +type DeleteRecordsResponse struct { + Version int16 + ThrottleTime time.Duration + Topics map[string]*DeleteRecordsResponseTopic +} + +func (d *DeleteRecordsResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(d.ThrottleTime / time.Millisecond)) + + if err := pe.putArrayLength(len(d.Topics)); err != nil { + return err + } + keys := make([]string, 0, len(d.Topics)) + for topic := range d.Topics { + keys = append(keys, topic) + } + sort.Strings(keys) + for _, topic := range keys { + if err := pe.putString(topic); err != nil { + return err + } + if err := d.Topics[topic].encode(pe); err != nil { + return err + } + } + return nil +} + +func (d *DeleteRecordsResponse) decode(pd packetDecoder, version int16) error { + d.Version = version + + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + if n > 0 { + d.Topics = make(map[string]*DeleteRecordsResponseTopic, n) + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + details := new(DeleteRecordsResponseTopic) + if err = details.decode(pd, version); err != nil { + return err + } + d.Topics[topic] = details + } + } + + return nil +} + +func (d *DeleteRecordsResponse) key() int16 { + return 21 +} + +func (d *DeleteRecordsResponse) version() int16 { + return 0 +} + +func (d *DeleteRecordsResponse) headerVersion() int16 { + return 0 +} + +func (d *DeleteRecordsResponse) requiredVersion() KafkaVersion { + return V0_11_0_0 +} + +type DeleteRecordsResponseTopic struct { + Partitions map[int32]*DeleteRecordsResponsePartition +} + +func (t *DeleteRecordsResponseTopic) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(t.Partitions)); err != nil { + return err + } + keys := make([]int32, 0, len(t.Partitions)) + for partition := range t.Partitions { + keys = append(keys, partition) + } + sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] }) + for _, partition := range keys { + pe.putInt32(partition) + if err := t.Partitions[partition].encode(pe); err != nil { + return err + } + } + return nil +} + +func (t *DeleteRecordsResponseTopic) decode(pd packetDecoder, version int16) error { + n, err := pd.getArrayLength() + if err != nil { + return err + } + + if n > 0 { + t.Partitions = make(map[int32]*DeleteRecordsResponsePartition, n) + for i := 0; i < n; i++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + details := new(DeleteRecordsResponsePartition) + if err = details.decode(pd, version); err != nil { + return err + } + t.Partitions[partition] = details + } + } + + return nil +} + +type DeleteRecordsResponsePartition struct { + LowWatermark int64 + Err KError +} + +func (t *DeleteRecordsResponsePartition) encode(pe packetEncoder) error { + pe.putInt64(t.LowWatermark) + pe.putInt16(int16(t.Err)) + return nil +} + +func (t *DeleteRecordsResponsePartition) decode(pd packetDecoder, version int16) error { + lowWatermark, err := pd.getInt64() + if err != nil { + return err + } + t.LowWatermark = lowWatermark + + kErr, err := pd.getInt16() + if err != nil { + return err + } + t.Err = KError(kErr) + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/delete_topics_request.go b/vendor/github.com/Shopify/sarama/delete_topics_request.go new file mode 100644 index 00000000000..ba6780a8e39 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/delete_topics_request.go @@ -0,0 +1,52 @@ +package sarama + +import "time" + +type DeleteTopicsRequest struct { + Version int16 + Topics []string + Timeout time.Duration +} + +func (d *DeleteTopicsRequest) encode(pe packetEncoder) error { + if err := pe.putStringArray(d.Topics); err != nil { + return err + } + pe.putInt32(int32(d.Timeout / time.Millisecond)) + + return nil +} + +func (d *DeleteTopicsRequest) decode(pd packetDecoder, version int16) (err error) { + if d.Topics, err = pd.getStringArray(); err != nil { + return err + } + timeout, err := pd.getInt32() + if err != nil { + return err + } + d.Timeout = time.Duration(timeout) * time.Millisecond + d.Version = version + return nil +} + +func (d *DeleteTopicsRequest) key() int16 { + return 20 +} + +func (d *DeleteTopicsRequest) version() int16 { + return d.Version +} + +func (d *DeleteTopicsRequest) headerVersion() int16 { + return 1 +} + +func (d *DeleteTopicsRequest) requiredVersion() KafkaVersion { + switch d.Version { + case 1: + return V0_11_0_0 + default: + return V0_10_1_0 + } +} diff --git a/vendor/github.com/Shopify/sarama/delete_topics_response.go b/vendor/github.com/Shopify/sarama/delete_topics_response.go new file mode 100644 index 00000000000..733961a89a0 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/delete_topics_response.go @@ -0,0 +1,82 @@ +package sarama + +import "time" + +type DeleteTopicsResponse struct { + Version int16 + ThrottleTime time.Duration + TopicErrorCodes map[string]KError +} + +func (d *DeleteTopicsResponse) encode(pe packetEncoder) error { + if d.Version >= 1 { + pe.putInt32(int32(d.ThrottleTime / time.Millisecond)) + } + + if err := pe.putArrayLength(len(d.TopicErrorCodes)); err != nil { + return err + } + for topic, errorCode := range d.TopicErrorCodes { + if err := pe.putString(topic); err != nil { + return err + } + pe.putInt16(int16(errorCode)) + } + + return nil +} + +func (d *DeleteTopicsResponse) decode(pd packetDecoder, version int16) (err error) { + if version >= 1 { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + d.Version = version + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + d.TopicErrorCodes = make(map[string]KError, n) + + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + errorCode, err := pd.getInt16() + if err != nil { + return err + } + + d.TopicErrorCodes[topic] = KError(errorCode) + } + + return nil +} + +func (d *DeleteTopicsResponse) key() int16 { + return 20 +} + +func (d *DeleteTopicsResponse) version() int16 { + return d.Version +} + +func (d *DeleteTopicsResponse) headerVersion() int16 { + return 0 +} + +func (d *DeleteTopicsResponse) requiredVersion() KafkaVersion { + switch d.Version { + case 1: + return V0_11_0_0 + default: + return V0_10_1_0 + } +} diff --git a/vendor/github.com/Shopify/sarama/describe_configs_request.go b/vendor/github.com/Shopify/sarama/describe_configs_request.go new file mode 100644 index 00000000000..d0c73528081 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/describe_configs_request.go @@ -0,0 +1,116 @@ +package sarama + +type DescribeConfigsRequest struct { + Version int16 + Resources []*ConfigResource + IncludeSynonyms bool +} + +type ConfigResource struct { + Type ConfigResourceType + Name string + ConfigNames []string +} + +func (r *DescribeConfigsRequest) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(r.Resources)); err != nil { + return err + } + + for _, c := range r.Resources { + pe.putInt8(int8(c.Type)) + if err := pe.putString(c.Name); err != nil { + return err + } + + if len(c.ConfigNames) == 0 { + pe.putInt32(-1) + continue + } + if err := pe.putStringArray(c.ConfigNames); err != nil { + return err + } + } + + if r.Version >= 1 { + pe.putBool(r.IncludeSynonyms) + } + + return nil +} + +func (r *DescribeConfigsRequest) decode(pd packetDecoder, version int16) (err error) { + n, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Resources = make([]*ConfigResource, n) + + for i := 0; i < n; i++ { + r.Resources[i] = &ConfigResource{} + t, err := pd.getInt8() + if err != nil { + return err + } + r.Resources[i].Type = ConfigResourceType(t) + name, err := pd.getString() + if err != nil { + return err + } + r.Resources[i].Name = name + + confLength, err := pd.getArrayLength() + + if err != nil { + return err + } + + if confLength == -1 { + continue + } + + cfnames := make([]string, confLength) + for i := 0; i < confLength; i++ { + s, err := pd.getString() + if err != nil { + return err + } + cfnames[i] = s + } + r.Resources[i].ConfigNames = cfnames + } + r.Version = version + if r.Version >= 1 { + b, err := pd.getBool() + if err != nil { + return err + } + r.IncludeSynonyms = b + } + + return nil +} + +func (r *DescribeConfigsRequest) key() int16 { + return 32 +} + +func (r *DescribeConfigsRequest) version() int16 { + return r.Version +} + +func (r *DescribeConfigsRequest) headerVersion() int16 { + return 1 +} + +func (r *DescribeConfigsRequest) requiredVersion() KafkaVersion { + switch r.Version { + case 1: + return V1_1_0_0 + case 2: + return V2_0_0_0 + default: + return V0_11_0_0 + } +} diff --git a/vendor/github.com/Shopify/sarama/describe_configs_response.go b/vendor/github.com/Shopify/sarama/describe_configs_response.go new file mode 100644 index 00000000000..063ae911259 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/describe_configs_response.go @@ -0,0 +1,327 @@ +package sarama + +import ( + "fmt" + "time" +) + +type ConfigSource int8 + +func (s ConfigSource) String() string { + switch s { + case SourceUnknown: + return "Unknown" + case SourceTopic: + return "Topic" + case SourceDynamicBroker: + return "DynamicBroker" + case SourceDynamicDefaultBroker: + return "DynamicDefaultBroker" + case SourceStaticBroker: + return "StaticBroker" + case SourceDefault: + return "Default" + } + return fmt.Sprintf("Source Invalid: %d", int(s)) +} + +const ( + SourceUnknown ConfigSource = iota + SourceTopic + SourceDynamicBroker + SourceDynamicDefaultBroker + SourceStaticBroker + SourceDefault +) + +type DescribeConfigsResponse struct { + Version int16 + ThrottleTime time.Duration + Resources []*ResourceResponse +} + +type ResourceResponse struct { + ErrorCode int16 + ErrorMsg string + Type ConfigResourceType + Name string + Configs []*ConfigEntry +} + +type ConfigEntry struct { + Name string + Value string + ReadOnly bool + Default bool + Source ConfigSource + Sensitive bool + Synonyms []*ConfigSynonym +} + +type ConfigSynonym struct { + ConfigName string + ConfigValue string + Source ConfigSource +} + +func (r *DescribeConfigsResponse) encode(pe packetEncoder) (err error) { + pe.putInt32(int32(r.ThrottleTime / time.Millisecond)) + if err = pe.putArrayLength(len(r.Resources)); err != nil { + return err + } + + for _, c := range r.Resources { + if err = c.encode(pe, r.Version); err != nil { + return err + } + } + + return nil +} + +func (r *DescribeConfigsResponse) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Resources = make([]*ResourceResponse, n) + for i := 0; i < n; i++ { + rr := &ResourceResponse{} + if err := rr.decode(pd, version); err != nil { + return err + } + r.Resources[i] = rr + } + + return nil +} + +func (r *DescribeConfigsResponse) key() int16 { + return 32 +} + +func (r *DescribeConfigsResponse) version() int16 { + return r.Version +} + +func (r *DescribeConfigsResponse) headerVersion() int16 { + return 0 +} + +func (r *DescribeConfigsResponse) requiredVersion() KafkaVersion { + switch r.Version { + case 1: + return V1_0_0_0 + case 2: + return V2_0_0_0 + default: + return V0_11_0_0 + } +} + +func (r *ResourceResponse) encode(pe packetEncoder, version int16) (err error) { + pe.putInt16(r.ErrorCode) + + if err = pe.putString(r.ErrorMsg); err != nil { + return err + } + + pe.putInt8(int8(r.Type)) + + if err = pe.putString(r.Name); err != nil { + return err + } + + if err = pe.putArrayLength(len(r.Configs)); err != nil { + return err + } + + for _, c := range r.Configs { + if err = c.encode(pe, version); err != nil { + return err + } + } + return nil +} + +func (r *ResourceResponse) decode(pd packetDecoder, version int16) (err error) { + ec, err := pd.getInt16() + if err != nil { + return err + } + r.ErrorCode = ec + + em, err := pd.getString() + if err != nil { + return err + } + r.ErrorMsg = em + + t, err := pd.getInt8() + if err != nil { + return err + } + r.Type = ConfigResourceType(t) + + name, err := pd.getString() + if err != nil { + return err + } + r.Name = name + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Configs = make([]*ConfigEntry, n) + for i := 0; i < n; i++ { + c := &ConfigEntry{} + if err := c.decode(pd, version); err != nil { + return err + } + r.Configs[i] = c + } + return nil +} + +func (r *ConfigEntry) encode(pe packetEncoder, version int16) (err error) { + if err = pe.putString(r.Name); err != nil { + return err + } + + if err = pe.putString(r.Value); err != nil { + return err + } + + pe.putBool(r.ReadOnly) + + if version <= 0 { + pe.putBool(r.Default) + pe.putBool(r.Sensitive) + } else { + pe.putInt8(int8(r.Source)) + pe.putBool(r.Sensitive) + + if err := pe.putArrayLength(len(r.Synonyms)); err != nil { + return err + } + for _, c := range r.Synonyms { + if err = c.encode(pe, version); err != nil { + return err + } + } + } + + return nil +} + +//https://cwiki.apache.org/confluence/display/KAFKA/KIP-226+-+Dynamic+Broker+Configuration +func (r *ConfigEntry) decode(pd packetDecoder, version int16) (err error) { + if version == 0 { + r.Source = SourceUnknown + } + name, err := pd.getString() + if err != nil { + return err + } + r.Name = name + + value, err := pd.getString() + if err != nil { + return err + } + r.Value = value + + read, err := pd.getBool() + if err != nil { + return err + } + r.ReadOnly = read + + if version == 0 { + defaultB, err := pd.getBool() + if err != nil { + return err + } + r.Default = defaultB + if defaultB { + r.Source = SourceDefault + } + } else { + source, err := pd.getInt8() + if err != nil { + return err + } + r.Source = ConfigSource(source) + r.Default = r.Source == SourceDefault + } + + sensitive, err := pd.getBool() + if err != nil { + return err + } + r.Sensitive = sensitive + + if version > 0 { + n, err := pd.getArrayLength() + if err != nil { + return err + } + r.Synonyms = make([]*ConfigSynonym, n) + + for i := 0; i < n; i++ { + s := &ConfigSynonym{} + if err := s.decode(pd, version); err != nil { + return err + } + r.Synonyms[i] = s + } + } + return nil +} + +func (c *ConfigSynonym) encode(pe packetEncoder, version int16) (err error) { + err = pe.putString(c.ConfigName) + if err != nil { + return err + } + + err = pe.putString(c.ConfigValue) + if err != nil { + return err + } + + pe.putInt8(int8(c.Source)) + + return nil +} + +func (c *ConfigSynonym) decode(pd packetDecoder, version int16) error { + name, err := pd.getString() + if err != nil { + return nil + } + c.ConfigName = name + + value, err := pd.getString() + if err != nil { + return nil + } + c.ConfigValue = value + + source, err := pd.getInt8() + if err != nil { + return nil + } + c.Source = ConfigSource(source) + return nil +} diff --git a/vendor/github.com/Shopify/sarama/describe_groups_request.go b/vendor/github.com/Shopify/sarama/describe_groups_request.go new file mode 100644 index 00000000000..f8962da58fc --- /dev/null +++ b/vendor/github.com/Shopify/sarama/describe_groups_request.go @@ -0,0 +1,34 @@ +package sarama + +type DescribeGroupsRequest struct { + Groups []string +} + +func (r *DescribeGroupsRequest) encode(pe packetEncoder) error { + return pe.putStringArray(r.Groups) +} + +func (r *DescribeGroupsRequest) decode(pd packetDecoder, version int16) (err error) { + r.Groups, err = pd.getStringArray() + return +} + +func (r *DescribeGroupsRequest) key() int16 { + return 15 +} + +func (r *DescribeGroupsRequest) version() int16 { + return 0 +} + +func (r *DescribeGroupsRequest) headerVersion() int16 { + return 1 +} + +func (r *DescribeGroupsRequest) requiredVersion() KafkaVersion { + return V0_9_0_0 +} + +func (r *DescribeGroupsRequest) AddGroup(group string) { + r.Groups = append(r.Groups, group) +} diff --git a/vendor/github.com/Shopify/sarama/describe_groups_response.go b/vendor/github.com/Shopify/sarama/describe_groups_response.go new file mode 100644 index 00000000000..bc242e4217d --- /dev/null +++ b/vendor/github.com/Shopify/sarama/describe_groups_response.go @@ -0,0 +1,191 @@ +package sarama + +type DescribeGroupsResponse struct { + Groups []*GroupDescription +} + +func (r *DescribeGroupsResponse) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(r.Groups)); err != nil { + return err + } + + for _, groupDescription := range r.Groups { + if err := groupDescription.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (r *DescribeGroupsResponse) decode(pd packetDecoder, version int16) (err error) { + n, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Groups = make([]*GroupDescription, n) + for i := 0; i < n; i++ { + r.Groups[i] = new(GroupDescription) + if err := r.Groups[i].decode(pd); err != nil { + return err + } + } + + return nil +} + +func (r *DescribeGroupsResponse) key() int16 { + return 15 +} + +func (r *DescribeGroupsResponse) version() int16 { + return 0 +} + +func (r *DescribeGroupsResponse) headerVersion() int16 { + return 0 +} + +func (r *DescribeGroupsResponse) requiredVersion() KafkaVersion { + return V0_9_0_0 +} + +type GroupDescription struct { + Err KError + GroupId string + State string + ProtocolType string + Protocol string + Members map[string]*GroupMemberDescription +} + +func (gd *GroupDescription) encode(pe packetEncoder) error { + pe.putInt16(int16(gd.Err)) + + if err := pe.putString(gd.GroupId); err != nil { + return err + } + if err := pe.putString(gd.State); err != nil { + return err + } + if err := pe.putString(gd.ProtocolType); err != nil { + return err + } + if err := pe.putString(gd.Protocol); err != nil { + return err + } + + if err := pe.putArrayLength(len(gd.Members)); err != nil { + return err + } + + for memberId, groupMemberDescription := range gd.Members { + if err := pe.putString(memberId); err != nil { + return err + } + if err := groupMemberDescription.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (gd *GroupDescription) decode(pd packetDecoder) (err error) { + kerr, err := pd.getInt16() + if err != nil { + return err + } + + gd.Err = KError(kerr) + + if gd.GroupId, err = pd.getString(); err != nil { + return + } + if gd.State, err = pd.getString(); err != nil { + return + } + if gd.ProtocolType, err = pd.getString(); err != nil { + return + } + if gd.Protocol, err = pd.getString(); err != nil { + return + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + if n == 0 { + return nil + } + + gd.Members = make(map[string]*GroupMemberDescription) + for i := 0; i < n; i++ { + memberId, err := pd.getString() + if err != nil { + return err + } + + gd.Members[memberId] = new(GroupMemberDescription) + if err := gd.Members[memberId].decode(pd); err != nil { + return err + } + } + + return nil +} + +type GroupMemberDescription struct { + ClientId string + ClientHost string + MemberMetadata []byte + MemberAssignment []byte +} + +func (gmd *GroupMemberDescription) encode(pe packetEncoder) error { + if err := pe.putString(gmd.ClientId); err != nil { + return err + } + if err := pe.putString(gmd.ClientHost); err != nil { + return err + } + if err := pe.putBytes(gmd.MemberMetadata); err != nil { + return err + } + if err := pe.putBytes(gmd.MemberAssignment); err != nil { + return err + } + + return nil +} + +func (gmd *GroupMemberDescription) decode(pd packetDecoder) (err error) { + if gmd.ClientId, err = pd.getString(); err != nil { + return + } + if gmd.ClientHost, err = pd.getString(); err != nil { + return + } + if gmd.MemberMetadata, err = pd.getBytes(); err != nil { + return + } + if gmd.MemberAssignment, err = pd.getBytes(); err != nil { + return + } + + return nil +} + +func (gmd *GroupMemberDescription) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) { + assignment := new(ConsumerGroupMemberAssignment) + err := decode(gmd.MemberAssignment, assignment) + return assignment, err +} + +func (gmd *GroupMemberDescription) GetMemberMetadata() (*ConsumerGroupMemberMetadata, error) { + metadata := new(ConsumerGroupMemberMetadata) + err := decode(gmd.MemberMetadata, metadata) + return metadata, err +} diff --git a/vendor/github.com/Shopify/sarama/describe_log_dirs_request.go b/vendor/github.com/Shopify/sarama/describe_log_dirs_request.go new file mode 100644 index 00000000000..c0bf04e04e2 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/describe_log_dirs_request.go @@ -0,0 +1,87 @@ +package sarama + +// DescribeLogDirsRequest is a describe request to get partitions' log size +type DescribeLogDirsRequest struct { + // Version 0 and 1 are equal + // The version number is bumped to indicate that on quota violation brokers send out responses before throttling. + Version int16 + + // If this is an empty array, all topics will be queried + DescribeTopics []DescribeLogDirsRequestTopic +} + +// DescribeLogDirsRequestTopic is a describe request about the log dir of one or more partitions within a Topic +type DescribeLogDirsRequestTopic struct { + Topic string + PartitionIDs []int32 +} + +func (r *DescribeLogDirsRequest) encode(pe packetEncoder) error { + length := len(r.DescribeTopics) + if length == 0 { + // In order to query all topics we must send null + length = -1 + } + + if err := pe.putArrayLength(length); err != nil { + return err + } + + for _, d := range r.DescribeTopics { + if err := pe.putString(d.Topic); err != nil { + return err + } + + if err := pe.putInt32Array(d.PartitionIDs); err != nil { + return err + } + } + + return nil +} + +func (r *DescribeLogDirsRequest) decode(pd packetDecoder, version int16) error { + n, err := pd.getArrayLength() + if err != nil { + return err + } + if n == -1 { + n = 0 + } + + topics := make([]DescribeLogDirsRequestTopic, n) + for i := 0; i < n; i++ { + topics[i] = DescribeLogDirsRequestTopic{} + + topic, err := pd.getString() + if err != nil { + return err + } + topics[i].Topic = topic + + pIDs, err := pd.getInt32Array() + if err != nil { + return err + } + topics[i].PartitionIDs = pIDs + } + r.DescribeTopics = topics + + return nil +} + +func (r *DescribeLogDirsRequest) key() int16 { + return 35 +} + +func (r *DescribeLogDirsRequest) version() int16 { + return r.Version +} + +func (r *DescribeLogDirsRequest) headerVersion() int16 { + return 1 +} + +func (r *DescribeLogDirsRequest) requiredVersion() KafkaVersion { + return V1_0_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/describe_log_dirs_response.go b/vendor/github.com/Shopify/sarama/describe_log_dirs_response.go new file mode 100644 index 00000000000..411da38ad20 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/describe_log_dirs_response.go @@ -0,0 +1,229 @@ +package sarama + +import "time" + +type DescribeLogDirsResponse struct { + ThrottleTime time.Duration + + // Version 0 and 1 are equal + // The version number is bumped to indicate that on quota violation brokers send out responses before throttling. + Version int16 + + LogDirs []DescribeLogDirsResponseDirMetadata +} + +func (r *DescribeLogDirsResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(r.ThrottleTime / time.Millisecond)) + + if err := pe.putArrayLength(len(r.LogDirs)); err != nil { + return err + } + + for _, dir := range r.LogDirs { + if err := dir.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (r *DescribeLogDirsResponse) decode(pd packetDecoder, version int16) error { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + // Decode array of DescribeLogDirsResponseDirMetadata + n, err := pd.getArrayLength() + if err != nil { + return err + } + + r.LogDirs = make([]DescribeLogDirsResponseDirMetadata, n) + for i := 0; i < n; i++ { + dir := DescribeLogDirsResponseDirMetadata{} + if err := dir.decode(pd, version); err != nil { + return err + } + r.LogDirs[i] = dir + } + + return nil +} + +func (r *DescribeLogDirsResponse) key() int16 { + return 35 +} + +func (r *DescribeLogDirsResponse) version() int16 { + return r.Version +} + +func (r *DescribeLogDirsResponse) headerVersion() int16 { + return 0 +} + +func (r *DescribeLogDirsResponse) requiredVersion() KafkaVersion { + return V1_0_0_0 +} + +type DescribeLogDirsResponseDirMetadata struct { + ErrorCode KError + + // The absolute log directory path + Path string + Topics []DescribeLogDirsResponseTopic +} + +func (r *DescribeLogDirsResponseDirMetadata) encode(pe packetEncoder) error { + pe.putInt16(int16(r.ErrorCode)) + + if err := pe.putString(r.Path); err != nil { + return err + } + + if err := pe.putArrayLength(len(r.Topics)); err != nil { + return err + } + for _, topic := range r.Topics { + if err := topic.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (r *DescribeLogDirsResponseDirMetadata) decode(pd packetDecoder, version int16) error { + errCode, err := pd.getInt16() + if err != nil { + return err + } + r.ErrorCode = KError(errCode) + + path, err := pd.getString() + if err != nil { + return err + } + r.Path = path + + // Decode array of DescribeLogDirsResponseTopic + n, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Topics = make([]DescribeLogDirsResponseTopic, n) + for i := 0; i < n; i++ { + t := DescribeLogDirsResponseTopic{} + + if err := t.decode(pd, version); err != nil { + return err + } + + r.Topics[i] = t + } + + return nil +} + +// DescribeLogDirsResponseTopic contains a topic's partitions descriptions +type DescribeLogDirsResponseTopic struct { + Topic string + Partitions []DescribeLogDirsResponsePartition +} + +func (r *DescribeLogDirsResponseTopic) encode(pe packetEncoder) error { + if err := pe.putString(r.Topic); err != nil { + return err + } + + if err := pe.putArrayLength(len(r.Partitions)); err != nil { + return err + } + for _, partition := range r.Partitions { + if err := partition.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (r *DescribeLogDirsResponseTopic) decode(pd packetDecoder, version int16) error { + t, err := pd.getString() + if err != nil { + return err + } + r.Topic = t + + n, err := pd.getArrayLength() + if err != nil { + return err + } + r.Partitions = make([]DescribeLogDirsResponsePartition, n) + for i := 0; i < n; i++ { + p := DescribeLogDirsResponsePartition{} + if err := p.decode(pd, version); err != nil { + return err + } + r.Partitions[i] = p + } + + return nil +} + +// DescribeLogDirsResponsePartition describes a partition's log directory +type DescribeLogDirsResponsePartition struct { + PartitionID int32 + + // The size of the log segments of the partition in bytes. + Size int64 + + // The lag of the log's LEO w.r.t. partition's HW (if it is the current log for the partition) or + // current replica's LEO (if it is the future log for the partition) + OffsetLag int64 + + // True if this log is created by AlterReplicaLogDirsRequest and will replace the current log of + // the replica in the future. + IsTemporary bool +} + +func (r *DescribeLogDirsResponsePartition) encode(pe packetEncoder) error { + pe.putInt32(r.PartitionID) + pe.putInt64(r.Size) + pe.putInt64(r.OffsetLag) + pe.putBool(r.IsTemporary) + + return nil +} + +func (r *DescribeLogDirsResponsePartition) decode(pd packetDecoder, version int16) error { + pID, err := pd.getInt32() + if err != nil { + return err + } + r.PartitionID = pID + + size, err := pd.getInt64() + if err != nil { + return err + } + r.Size = size + + lag, err := pd.getInt64() + if err != nil { + return err + } + r.OffsetLag = lag + + isTemp, err := pd.getBool() + if err != nil { + return err + } + r.IsTemporary = isTemp + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/dev.yml b/vendor/github.com/Shopify/sarama/dev.yml new file mode 100644 index 00000000000..5003542d48b --- /dev/null +++ b/vendor/github.com/Shopify/sarama/dev.yml @@ -0,0 +1,10 @@ +name: sarama + +up: + - go: + version: '1.15.2' + +commands: + test: + run: make test + desc: 'run unit tests' diff --git a/vendor/github.com/Shopify/sarama/docker-compose.yml b/vendor/github.com/Shopify/sarama/docker-compose.yml new file mode 100644 index 00000000000..25593fd3b71 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/docker-compose.yml @@ -0,0 +1,134 @@ +version: '3.7' +services: + zookeeper-1: + image: 'confluentinc/cp-zookeeper:${CONFLUENT_PLATFORM_VERSION:-5.5.0}' + restart: always + environment: + ZOOKEEPER_SERVER_ID: '1' + ZOOKEEPER_SERVERS: 'zookeeper-1:2888:3888;zookeeper-2:2888:3888;zookeeper-3:2888:3888' + ZOOKEEPER_CLIENT_PORT: '2181' + ZOOKEEPER_PEER_PORT: '2888' + ZOOKEEPER_LEADER_PORT: '3888' + ZOOKEEPER_INIT_LIMIT: '10' + ZOOKEEPER_SYNC_LIMIT: '5' + ZOOKEEPER_MAX_CLIENT_CONNS: '0' + zookeeper-2: + image: 'confluentinc/cp-zookeeper:${CONFLUENT_PLATFORM_VERSION:-5.5.0}' + restart: always + environment: + ZOOKEEPER_SERVER_ID: '2' + ZOOKEEPER_SERVERS: 'zookeeper-1:2888:3888;zookeeper-2:2888:3888;zookeeper-3:2888:3888' + ZOOKEEPER_CLIENT_PORT: '2181' + ZOOKEEPER_PEER_PORT: '2888' + ZOOKEEPER_LEADER_PORT: '3888' + ZOOKEEPER_INIT_LIMIT: '10' + ZOOKEEPER_SYNC_LIMIT: '5' + ZOOKEEPER_MAX_CLIENT_CONNS: '0' + zookeeper-3: + image: 'confluentinc/cp-zookeeper:${CONFLUENT_PLATFORM_VERSION:-5.5.0}' + restart: always + environment: + ZOOKEEPER_SERVER_ID: '3' + ZOOKEEPER_SERVERS: 'zookeeper-1:2888:3888;zookeeper-2:2888:3888;zookeeper-3:2888:3888' + ZOOKEEPER_CLIENT_PORT: '2181' + ZOOKEEPER_PEER_PORT: '2888' + ZOOKEEPER_LEADER_PORT: '3888' + ZOOKEEPER_INIT_LIMIT: '10' + ZOOKEEPER_SYNC_LIMIT: '5' + ZOOKEEPER_MAX_CLIENT_CONNS: '0' + kafka-1: + image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-5.5.0}' + restart: always + environment: + KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' + KAFKA_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29091' + KAFKA_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-1:9091,LISTENER_LOCAL://localhost:29091' + KAFKA_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL' + KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT' + KAFKA_DEFAULT_REPLICATION_FACTOR: '2' + KAFKA_BROKER_ID: '1' + KAFKA_BROKER_RACK: '1' + KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: '3000' + KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '3000' + KAFKA_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector' + KAFKA_DELETE_TOPIC_ENABLE: 'true' + KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false' + kafka-2: + image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-5.5.0}' + restart: always + environment: + KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' + KAFKA_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29092' + KAFKA_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-2:9091,LISTENER_LOCAL://localhost:29092' + KAFKA_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL' + KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT' + KAFKA_DEFAULT_REPLICATION_FACTOR: '2' + KAFKA_BROKER_ID: '2' + KAFKA_BROKER_RACK: '2' + KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: '3000' + KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '3000' + KAFKA_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector' + KAFKA_DELETE_TOPIC_ENABLE: 'true' + KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false' + kafka-3: + image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-5.5.0}' + restart: always + environment: + KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' + KAFKA_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29093' + KAFKA_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-3:9091,LISTENER_LOCAL://localhost:29093' + KAFKA_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL' + KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT' + KAFKA_DEFAULT_REPLICATION_FACTOR: '2' + KAFKA_BROKER_ID: '3' + KAFKA_BROKER_RACK: '3' + KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: '3000' + KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '3000' + KAFKA_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector' + KAFKA_DELETE_TOPIC_ENABLE: 'true' + KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false' + kafka-4: + image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-5.5.0}' + restart: always + environment: + KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' + KAFKA_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29094' + KAFKA_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-4:9091,LISTENER_LOCAL://localhost:29094' + KAFKA_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL' + KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT' + KAFKA_DEFAULT_REPLICATION_FACTOR: '2' + KAFKA_BROKER_ID: '4' + KAFKA_BROKER_RACK: '4' + KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: '3000' + KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '3000' + KAFKA_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector' + KAFKA_DELETE_TOPIC_ENABLE: 'true' + KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false' + kafka-5: + image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-5.5.0}' + restart: always + environment: + KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' + KAFKA_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29095' + KAFKA_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-5:9091,LISTENER_LOCAL://localhost:29095' + KAFKA_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL' + KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT' + KAFKA_DEFAULT_REPLICATION_FACTOR: '2' + KAFKA_BROKER_ID: '5' + KAFKA_BROKER_RACK: '5' + KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: '3000' + KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '3000' + KAFKA_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector' + KAFKA_DELETE_TOPIC_ENABLE: 'true' + KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false' + toxiproxy: + image: 'shopify/toxiproxy:2.1.4' + ports: + # The tests themselves actually start the proies on these ports + - '29091:29091' + - '29092:29092' + - '29093:29093' + - '29094:29094' + - '29095:29095' + # This is the toxiproxy API port + - '8474:8474' diff --git a/vendor/github.com/Shopify/sarama/encoder_decoder.go b/vendor/github.com/Shopify/sarama/encoder_decoder.go new file mode 100644 index 00000000000..025bad61f06 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/encoder_decoder.go @@ -0,0 +1,94 @@ +package sarama + +import ( + "fmt" + + "github.com/rcrowley/go-metrics" +) + +// Encoder is the interface that wraps the basic Encode method. +// Anything implementing Encoder can be turned into bytes using Kafka's encoding rules. +type encoder interface { + encode(pe packetEncoder) error +} + +type encoderWithHeader interface { + encoder + headerVersion() int16 +} + +// Encode takes an Encoder and turns it into bytes while potentially recording metrics. +func encode(e encoder, metricRegistry metrics.Registry) ([]byte, error) { + if e == nil { + return nil, nil + } + + var prepEnc prepEncoder + var realEnc realEncoder + + err := e.encode(&prepEnc) + if err != nil { + return nil, err + } + + if prepEnc.length < 0 || prepEnc.length > int(MaxRequestSize) { + return nil, PacketEncodingError{fmt.Sprintf("invalid request size (%d)", prepEnc.length)} + } + + realEnc.raw = make([]byte, prepEnc.length) + realEnc.registry = metricRegistry + err = e.encode(&realEnc) + if err != nil { + return nil, err + } + + return realEnc.raw, nil +} + +// Decoder is the interface that wraps the basic Decode method. +// Anything implementing Decoder can be extracted from bytes using Kafka's encoding rules. +type decoder interface { + decode(pd packetDecoder) error +} + +type versionedDecoder interface { + decode(pd packetDecoder, version int16) error +} + +// Decode takes bytes and a Decoder and fills the fields of the decoder from the bytes, +// interpreted using Kafka's encoding rules. +func decode(buf []byte, in decoder) error { + if buf == nil { + return nil + } + + helper := realDecoder{raw: buf} + err := in.decode(&helper) + if err != nil { + return err + } + + if helper.off != len(buf) { + return PacketDecodingError{"invalid length"} + } + + return nil +} + +func versionedDecode(buf []byte, in versionedDecoder, version int16) error { + if buf == nil { + return nil + } + + helper := realDecoder{raw: buf} + err := in.decode(&helper, version) + if err != nil { + return err + } + + if helper.off != len(buf) { + return PacketDecodingError{"invalid length"} + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/end_txn_request.go b/vendor/github.com/Shopify/sarama/end_txn_request.go new file mode 100644 index 00000000000..6635425ddd6 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/end_txn_request.go @@ -0,0 +1,54 @@ +package sarama + +type EndTxnRequest struct { + TransactionalID string + ProducerID int64 + ProducerEpoch int16 + TransactionResult bool +} + +func (a *EndTxnRequest) encode(pe packetEncoder) error { + if err := pe.putString(a.TransactionalID); err != nil { + return err + } + + pe.putInt64(a.ProducerID) + + pe.putInt16(a.ProducerEpoch) + + pe.putBool(a.TransactionResult) + + return nil +} + +func (a *EndTxnRequest) decode(pd packetDecoder, version int16) (err error) { + if a.TransactionalID, err = pd.getString(); err != nil { + return err + } + if a.ProducerID, err = pd.getInt64(); err != nil { + return err + } + if a.ProducerEpoch, err = pd.getInt16(); err != nil { + return err + } + if a.TransactionResult, err = pd.getBool(); err != nil { + return err + } + return nil +} + +func (a *EndTxnRequest) key() int16 { + return 26 +} + +func (a *EndTxnRequest) version() int16 { + return 0 +} + +func (r *EndTxnRequest) headerVersion() int16 { + return 1 +} + +func (a *EndTxnRequest) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/end_txn_response.go b/vendor/github.com/Shopify/sarama/end_txn_response.go new file mode 100644 index 00000000000..763976726cc --- /dev/null +++ b/vendor/github.com/Shopify/sarama/end_txn_response.go @@ -0,0 +1,48 @@ +package sarama + +import ( + "time" +) + +type EndTxnResponse struct { + ThrottleTime time.Duration + Err KError +} + +func (e *EndTxnResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(e.ThrottleTime / time.Millisecond)) + pe.putInt16(int16(e.Err)) + return nil +} + +func (e *EndTxnResponse) decode(pd packetDecoder, version int16) (err error) { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + e.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + kerr, err := pd.getInt16() + if err != nil { + return err + } + e.Err = KError(kerr) + + return nil +} + +func (e *EndTxnResponse) key() int16 { + return 25 +} + +func (e *EndTxnResponse) version() int16 { + return 0 +} + +func (r *EndTxnResponse) headerVersion() int16 { + return 0 +} + +func (e *EndTxnResponse) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/errors.go b/vendor/github.com/Shopify/sarama/errors.go new file mode 100644 index 00000000000..ca621b09268 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/errors.go @@ -0,0 +1,385 @@ +package sarama + +import ( + "errors" + "fmt" +) + +// ErrOutOfBrokers is the error returned when the client has run out of brokers to talk to because all of them errored +// or otherwise failed to respond. +var ErrOutOfBrokers = errors.New("kafka: client has run out of available brokers to talk to (Is your cluster reachable?)") + +// ErrClosedClient is the error returned when a method is called on a client that has been closed. +var ErrClosedClient = errors.New("kafka: tried to use a client that was closed") + +// ErrIncompleteResponse is the error returned when the server returns a syntactically valid response, but it does +// not contain the expected information. +var ErrIncompleteResponse = errors.New("kafka: response did not contain all the expected topic/partition blocks") + +// ErrInvalidPartition is the error returned when a partitioner returns an invalid partition index +// (meaning one outside of the range [0...numPartitions-1]). +var ErrInvalidPartition = errors.New("kafka: partitioner returned an invalid partition index") + +// ErrAlreadyConnected is the error returned when calling Open() on a Broker that is already connected or connecting. +var ErrAlreadyConnected = errors.New("kafka: broker connection already initiated") + +// ErrNotConnected is the error returned when trying to send or call Close() on a Broker that is not connected. +var ErrNotConnected = errors.New("kafka: broker not connected") + +// ErrInsufficientData is returned when decoding and the packet is truncated. This can be expected +// when requesting messages, since as an optimization the server is allowed to return a partial message at the end +// of the message set. +var ErrInsufficientData = errors.New("kafka: insufficient data to decode packet, more bytes expected") + +// ErrShuttingDown is returned when a producer receives a message during shutdown. +var ErrShuttingDown = errors.New("kafka: message received by producer in process of shutting down") + +// ErrMessageTooLarge is returned when the next message to consume is larger than the configured Consumer.Fetch.Max +var ErrMessageTooLarge = errors.New("kafka: message is larger than Consumer.Fetch.Max") + +// ErrConsumerOffsetNotAdvanced is returned when a partition consumer didn't advance its offset after parsing +// a RecordBatch. +var ErrConsumerOffsetNotAdvanced = errors.New("kafka: consumer offset was not advanced after a RecordBatch") + +// ErrControllerNotAvailable is returned when server didn't give correct controller id. May be kafka server's version +// is lower than 0.10.0.0. +var ErrControllerNotAvailable = errors.New("kafka: controller is not available") + +// ErrNoTopicsToUpdateMetadata is returned when Meta.Full is set to false but no specific topics were found to update +// the metadata. +var ErrNoTopicsToUpdateMetadata = errors.New("kafka: no specific topics to update metadata") + +// PacketEncodingError is returned from a failure while encoding a Kafka packet. This can happen, for example, +// if you try to encode a string over 2^15 characters in length, since Kafka's encoding rules do not permit that. +type PacketEncodingError struct { + Info string +} + +func (err PacketEncodingError) Error() string { + return fmt.Sprintf("kafka: error encoding packet: %s", err.Info) +} + +// PacketDecodingError is returned when there was an error (other than truncated data) decoding the Kafka broker's response. +// This can be a bad CRC or length field, or any other invalid value. +type PacketDecodingError struct { + Info string +} + +func (err PacketDecodingError) Error() string { + return fmt.Sprintf("kafka: error decoding packet: %s", err.Info) +} + +// ConfigurationError is the type of error returned from a constructor (e.g. NewClient, or NewConsumer) +// when the specified configuration is invalid. +type ConfigurationError string + +func (err ConfigurationError) Error() string { + return "kafka: invalid configuration (" + string(err) + ")" +} + +// KError is the type of error that can be returned directly by the Kafka broker. +// See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ErrorCodes +type KError int16 + +// MultiError is used to contain multi error. +type MultiError struct { + Errors *[]error +} + +func (mErr MultiError) Error() string { + var errString = "" + for _, err := range *mErr.Errors { + errString += err.Error() + "," + } + return errString +} + +func (mErr MultiError) PrettyError() string { + var errString = "" + for _, err := range *mErr.Errors { + errString += err.Error() + "\n" + } + return errString +} + +// ErrDeleteRecords is the type of error returned when fail to delete the required records +type ErrDeleteRecords struct { + MultiError +} + +func (err ErrDeleteRecords) Error() string { + return "kafka server: failed to delete records " + err.MultiError.Error() +} + +type ErrReassignPartitions struct { + MultiError +} + +func (err ErrReassignPartitions) Error() string { + return fmt.Sprintf("failed to reassign partitions for topic: \n%s", err.MultiError.PrettyError()) +} + +// Numeric error codes returned by the Kafka server. +const ( + ErrNoError KError = 0 + ErrUnknown KError = -1 + ErrOffsetOutOfRange KError = 1 + ErrInvalidMessage KError = 2 + ErrUnknownTopicOrPartition KError = 3 + ErrInvalidMessageSize KError = 4 + ErrLeaderNotAvailable KError = 5 + ErrNotLeaderForPartition KError = 6 + ErrRequestTimedOut KError = 7 + ErrBrokerNotAvailable KError = 8 + ErrReplicaNotAvailable KError = 9 + ErrMessageSizeTooLarge KError = 10 + ErrStaleControllerEpochCode KError = 11 + ErrOffsetMetadataTooLarge KError = 12 + ErrNetworkException KError = 13 + ErrOffsetsLoadInProgress KError = 14 + ErrConsumerCoordinatorNotAvailable KError = 15 + ErrNotCoordinatorForConsumer KError = 16 + ErrInvalidTopic KError = 17 + ErrMessageSetSizeTooLarge KError = 18 + ErrNotEnoughReplicas KError = 19 + ErrNotEnoughReplicasAfterAppend KError = 20 + ErrInvalidRequiredAcks KError = 21 + ErrIllegalGeneration KError = 22 + ErrInconsistentGroupProtocol KError = 23 + ErrInvalidGroupId KError = 24 + ErrUnknownMemberId KError = 25 + ErrInvalidSessionTimeout KError = 26 + ErrRebalanceInProgress KError = 27 + ErrInvalidCommitOffsetSize KError = 28 + ErrTopicAuthorizationFailed KError = 29 + ErrGroupAuthorizationFailed KError = 30 + ErrClusterAuthorizationFailed KError = 31 + ErrInvalidTimestamp KError = 32 + ErrUnsupportedSASLMechanism KError = 33 + ErrIllegalSASLState KError = 34 + ErrUnsupportedVersion KError = 35 + ErrTopicAlreadyExists KError = 36 + ErrInvalidPartitions KError = 37 + ErrInvalidReplicationFactor KError = 38 + ErrInvalidReplicaAssignment KError = 39 + ErrInvalidConfig KError = 40 + ErrNotController KError = 41 + ErrInvalidRequest KError = 42 + ErrUnsupportedForMessageFormat KError = 43 + ErrPolicyViolation KError = 44 + ErrOutOfOrderSequenceNumber KError = 45 + ErrDuplicateSequenceNumber KError = 46 + ErrInvalidProducerEpoch KError = 47 + ErrInvalidTxnState KError = 48 + ErrInvalidProducerIDMapping KError = 49 + ErrInvalidTransactionTimeout KError = 50 + ErrConcurrentTransactions KError = 51 + ErrTransactionCoordinatorFenced KError = 52 + ErrTransactionalIDAuthorizationFailed KError = 53 + ErrSecurityDisabled KError = 54 + ErrOperationNotAttempted KError = 55 + ErrKafkaStorageError KError = 56 + ErrLogDirNotFound KError = 57 + ErrSASLAuthenticationFailed KError = 58 + ErrUnknownProducerID KError = 59 + ErrReassignmentInProgress KError = 60 + ErrDelegationTokenAuthDisabled KError = 61 + ErrDelegationTokenNotFound KError = 62 + ErrDelegationTokenOwnerMismatch KError = 63 + ErrDelegationTokenRequestNotAllowed KError = 64 + ErrDelegationTokenAuthorizationFailed KError = 65 + ErrDelegationTokenExpired KError = 66 + ErrInvalidPrincipalType KError = 67 + ErrNonEmptyGroup KError = 68 + ErrGroupIDNotFound KError = 69 + ErrFetchSessionIDNotFound KError = 70 + ErrInvalidFetchSessionEpoch KError = 71 + ErrListenerNotFound KError = 72 + ErrTopicDeletionDisabled KError = 73 + ErrFencedLeaderEpoch KError = 74 + ErrUnknownLeaderEpoch KError = 75 + ErrUnsupportedCompressionType KError = 76 + ErrStaleBrokerEpoch KError = 77 + ErrOffsetNotAvailable KError = 78 + ErrMemberIdRequired KError = 79 + ErrPreferredLeaderNotAvailable KError = 80 + ErrGroupMaxSizeReached KError = 81 + ErrFencedInstancedId KError = 82 +) + +func (err KError) Error() string { + // Error messages stolen/adapted from + // https://kafka.apache.org/protocol#protocol_error_codes + switch err { + case ErrNoError: + return "kafka server: Not an error, why are you printing me?" + case ErrUnknown: + return "kafka server: Unexpected (unknown?) server error." + case ErrOffsetOutOfRange: + return "kafka server: The requested offset is outside the range of offsets maintained by the server for the given topic/partition." + case ErrInvalidMessage: + return "kafka server: Message contents does not match its CRC." + case ErrUnknownTopicOrPartition: + return "kafka server: Request was for a topic or partition that does not exist on this broker." + case ErrInvalidMessageSize: + return "kafka server: The message has a negative size." + case ErrLeaderNotAvailable: + return "kafka server: In the middle of a leadership election, there is currently no leader for this partition and hence it is unavailable for writes." + case ErrNotLeaderForPartition: + return "kafka server: Tried to send a message to a replica that is not the leader for some partition. Your metadata is out of date." + case ErrRequestTimedOut: + return "kafka server: Request exceeded the user-specified time limit in the request." + case ErrBrokerNotAvailable: + return "kafka server: Broker not available. Not a client facing error, we should never receive this!!!" + case ErrReplicaNotAvailable: + return "kafka server: Replica information not available, one or more brokers are down." + case ErrMessageSizeTooLarge: + return "kafka server: Message was too large, server rejected it to avoid allocation error." + case ErrStaleControllerEpochCode: + return "kafka server: StaleControllerEpochCode (internal error code for broker-to-broker communication)." + case ErrOffsetMetadataTooLarge: + return "kafka server: Specified a string larger than the configured maximum for offset metadata." + case ErrNetworkException: + return "kafka server: The server disconnected before a response was received." + case ErrOffsetsLoadInProgress: + return "kafka server: The broker is still loading offsets after a leader change for that offset's topic partition." + case ErrConsumerCoordinatorNotAvailable: + return "kafka server: Offset's topic has not yet been created." + case ErrNotCoordinatorForConsumer: + return "kafka server: Request was for a consumer group that is not coordinated by this broker." + case ErrInvalidTopic: + return "kafka server: The request attempted to perform an operation on an invalid topic." + case ErrMessageSetSizeTooLarge: + return "kafka server: The request included message batch larger than the configured segment size on the server." + case ErrNotEnoughReplicas: + return "kafka server: Messages are rejected since there are fewer in-sync replicas than required." + case ErrNotEnoughReplicasAfterAppend: + return "kafka server: Messages are written to the log, but to fewer in-sync replicas than required." + case ErrInvalidRequiredAcks: + return "kafka server: The number of required acks is invalid (should be either -1, 0, or 1)." + case ErrIllegalGeneration: + return "kafka server: The provided generation id is not the current generation." + case ErrInconsistentGroupProtocol: + return "kafka server: The provider group protocol type is incompatible with the other members." + case ErrInvalidGroupId: + return "kafka server: The provided group id was empty." + case ErrUnknownMemberId: + return "kafka server: The provided member is not known in the current generation." + case ErrInvalidSessionTimeout: + return "kafka server: The provided session timeout is outside the allowed range." + case ErrRebalanceInProgress: + return "kafka server: A rebalance for the group is in progress. Please re-join the group." + case ErrInvalidCommitOffsetSize: + return "kafka server: The provided commit metadata was too large." + case ErrTopicAuthorizationFailed: + return "kafka server: The client is not authorized to access this topic." + case ErrGroupAuthorizationFailed: + return "kafka server: The client is not authorized to access this group." + case ErrClusterAuthorizationFailed: + return "kafka server: The client is not authorized to send this request type." + case ErrInvalidTimestamp: + return "kafka server: The timestamp of the message is out of acceptable range." + case ErrUnsupportedSASLMechanism: + return "kafka server: The broker does not support the requested SASL mechanism." + case ErrIllegalSASLState: + return "kafka server: Request is not valid given the current SASL state." + case ErrUnsupportedVersion: + return "kafka server: The version of API is not supported." + case ErrTopicAlreadyExists: + return "kafka server: Topic with this name already exists." + case ErrInvalidPartitions: + return "kafka server: Number of partitions is invalid." + case ErrInvalidReplicationFactor: + return "kafka server: Replication-factor is invalid." + case ErrInvalidReplicaAssignment: + return "kafka server: Replica assignment is invalid." + case ErrInvalidConfig: + return "kafka server: Configuration is invalid." + case ErrNotController: + return "kafka server: This is not the correct controller for this cluster." + case ErrInvalidRequest: + return "kafka server: This most likely occurs because of a request being malformed by the client library or the message was sent to an incompatible broker. See the broker logs for more details." + case ErrUnsupportedForMessageFormat: + return "kafka server: The requested operation is not supported by the message format version." + case ErrPolicyViolation: + return "kafka server: Request parameters do not satisfy the configured policy." + case ErrOutOfOrderSequenceNumber: + return "kafka server: The broker received an out of order sequence number." + case ErrDuplicateSequenceNumber: + return "kafka server: The broker received a duplicate sequence number." + case ErrInvalidProducerEpoch: + return "kafka server: Producer attempted an operation with an old epoch." + case ErrInvalidTxnState: + return "kafka server: The producer attempted a transactional operation in an invalid state." + case ErrInvalidProducerIDMapping: + return "kafka server: The producer attempted to use a producer id which is not currently assigned to its transactional id." + case ErrInvalidTransactionTimeout: + return "kafka server: The transaction timeout is larger than the maximum value allowed by the broker (as configured by max.transaction.timeout.ms)." + case ErrConcurrentTransactions: + return "kafka server: The producer attempted to update a transaction while another concurrent operation on the same transaction was ongoing." + case ErrTransactionCoordinatorFenced: + return "kafka server: The transaction coordinator sending a WriteTxnMarker is no longer the current coordinator for a given producer." + case ErrTransactionalIDAuthorizationFailed: + return "kafka server: Transactional ID authorization failed." + case ErrSecurityDisabled: + return "kafka server: Security features are disabled." + case ErrOperationNotAttempted: + return "kafka server: The broker did not attempt to execute this operation." + case ErrKafkaStorageError: + return "kafka server: Disk error when trying to access log file on the disk." + case ErrLogDirNotFound: + return "kafka server: The specified log directory is not found in the broker config." + case ErrSASLAuthenticationFailed: + return "kafka server: SASL Authentication failed." + case ErrUnknownProducerID: + return "kafka server: The broker could not locate the producer metadata associated with the Producer ID." + case ErrReassignmentInProgress: + return "kafka server: A partition reassignment is in progress." + case ErrDelegationTokenAuthDisabled: + return "kafka server: Delegation Token feature is not enabled." + case ErrDelegationTokenNotFound: + return "kafka server: Delegation Token is not found on server." + case ErrDelegationTokenOwnerMismatch: + return "kafka server: Specified Principal is not valid Owner/Renewer." + case ErrDelegationTokenRequestNotAllowed: + return "kafka server: Delegation Token requests are not allowed on PLAINTEXT/1-way SSL channels and on delegation token authenticated channels." + case ErrDelegationTokenAuthorizationFailed: + return "kafka server: Delegation Token authorization failed." + case ErrDelegationTokenExpired: + return "kafka server: Delegation Token is expired." + case ErrInvalidPrincipalType: + return "kafka server: Supplied principalType is not supported." + case ErrNonEmptyGroup: + return "kafka server: The group is not empty." + case ErrGroupIDNotFound: + return "kafka server: The group id does not exist." + case ErrFetchSessionIDNotFound: + return "kafka server: The fetch session ID was not found." + case ErrInvalidFetchSessionEpoch: + return "kafka server: The fetch session epoch is invalid." + case ErrListenerNotFound: + return "kafka server: There is no listener on the leader broker that matches the listener on which metadata request was processed." + case ErrTopicDeletionDisabled: + return "kafka server: Topic deletion is disabled." + case ErrFencedLeaderEpoch: + return "kafka server: The leader epoch in the request is older than the epoch on the broker." + case ErrUnknownLeaderEpoch: + return "kafka server: The leader epoch in the request is newer than the epoch on the broker." + case ErrUnsupportedCompressionType: + return "kafka server: The requesting client does not support the compression type of given partition." + case ErrStaleBrokerEpoch: + return "kafka server: Broker epoch has changed" + case ErrOffsetNotAvailable: + return "kafka server: The leader high watermark has not caught up from a recent leader election so the offsets cannot be guaranteed to be monotonically increasing" + case ErrMemberIdRequired: + return "kafka server: The group member needs to have a valid member id before actually entering a consumer group" + case ErrPreferredLeaderNotAvailable: + return "kafka server: The preferred leader was not available" + case ErrGroupMaxSizeReached: + return "kafka server: Consumer group The consumer group has reached its max size. already has the configured maximum number of members." + case ErrFencedInstancedId: + return "kafka server: The broker rejected this static consumer since another consumer with the same group.instance.id has registered with a different member.id." + } + + return fmt.Sprintf("Unknown error, how did this happen? Error code = %d", err) +} diff --git a/vendor/github.com/Shopify/sarama/fetch_request.go b/vendor/github.com/Shopify/sarama/fetch_request.go new file mode 100644 index 00000000000..f893aeff7d5 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/fetch_request.go @@ -0,0 +1,295 @@ +package sarama + +type fetchRequestBlock struct { + Version int16 + currentLeaderEpoch int32 + fetchOffset int64 + logStartOffset int64 + maxBytes int32 +} + +func (b *fetchRequestBlock) encode(pe packetEncoder, version int16) error { + b.Version = version + if b.Version >= 9 { + pe.putInt32(b.currentLeaderEpoch) + } + pe.putInt64(b.fetchOffset) + if b.Version >= 5 { + pe.putInt64(b.logStartOffset) + } + pe.putInt32(b.maxBytes) + return nil +} + +func (b *fetchRequestBlock) decode(pd packetDecoder, version int16) (err error) { + b.Version = version + if b.Version >= 9 { + if b.currentLeaderEpoch, err = pd.getInt32(); err != nil { + return err + } + } + if b.fetchOffset, err = pd.getInt64(); err != nil { + return err + } + if b.Version >= 5 { + if b.logStartOffset, err = pd.getInt64(); err != nil { + return err + } + } + if b.maxBytes, err = pd.getInt32(); err != nil { + return err + } + return nil +} + +// FetchRequest (API key 1) will fetch Kafka messages. Version 3 introduced the MaxBytes field. See +// https://issues.apache.org/jira/browse/KAFKA-2063 for a discussion of the issues leading up to that. The KIP is at +// https://cwiki.apache.org/confluence/display/KAFKA/KIP-74%3A+Add+Fetch+Response+Size+Limit+in+Bytes +type FetchRequest struct { + MaxWaitTime int32 + MinBytes int32 + MaxBytes int32 + Version int16 + Isolation IsolationLevel + SessionID int32 + SessionEpoch int32 + blocks map[string]map[int32]*fetchRequestBlock + forgotten map[string][]int32 + RackID string +} + +type IsolationLevel int8 + +const ( + ReadUncommitted IsolationLevel = iota + ReadCommitted +) + +func (r *FetchRequest) encode(pe packetEncoder) (err error) { + pe.putInt32(-1) // replica ID is always -1 for clients + pe.putInt32(r.MaxWaitTime) + pe.putInt32(r.MinBytes) + if r.Version >= 3 { + pe.putInt32(r.MaxBytes) + } + if r.Version >= 4 { + pe.putInt8(int8(r.Isolation)) + } + if r.Version >= 7 { + pe.putInt32(r.SessionID) + pe.putInt32(r.SessionEpoch) + } + err = pe.putArrayLength(len(r.blocks)) + if err != nil { + return err + } + for topic, blocks := range r.blocks { + err = pe.putString(topic) + if err != nil { + return err + } + err = pe.putArrayLength(len(blocks)) + if err != nil { + return err + } + for partition, block := range blocks { + pe.putInt32(partition) + err = block.encode(pe, r.Version) + if err != nil { + return err + } + } + } + if r.Version >= 7 { + err = pe.putArrayLength(len(r.forgotten)) + if err != nil { + return err + } + for topic, partitions := range r.forgotten { + err = pe.putString(topic) + if err != nil { + return err + } + err = pe.putArrayLength(len(partitions)) + if err != nil { + return err + } + for _, partition := range partitions { + pe.putInt32(partition) + } + } + } + if r.Version >= 11 { + err = pe.putString(r.RackID) + if err != nil { + return err + } + } + + return nil +} + +func (r *FetchRequest) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + + if _, err = pd.getInt32(); err != nil { + return err + } + if r.MaxWaitTime, err = pd.getInt32(); err != nil { + return err + } + if r.MinBytes, err = pd.getInt32(); err != nil { + return err + } + if r.Version >= 3 { + if r.MaxBytes, err = pd.getInt32(); err != nil { + return err + } + } + if r.Version >= 4 { + isolation, err := pd.getInt8() + if err != nil { + return err + } + r.Isolation = IsolationLevel(isolation) + } + if r.Version >= 7 { + r.SessionID, err = pd.getInt32() + if err != nil { + return err + } + r.SessionEpoch, err = pd.getInt32() + if err != nil { + return err + } + } + topicCount, err := pd.getArrayLength() + if err != nil { + return err + } + if topicCount == 0 { + return nil + } + r.blocks = make(map[string]map[int32]*fetchRequestBlock) + for i := 0; i < topicCount; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + partitionCount, err := pd.getArrayLength() + if err != nil { + return err + } + r.blocks[topic] = make(map[int32]*fetchRequestBlock) + for j := 0; j < partitionCount; j++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + fetchBlock := &fetchRequestBlock{} + if err = fetchBlock.decode(pd, r.Version); err != nil { + return err + } + r.blocks[topic][partition] = fetchBlock + } + } + + if r.Version >= 7 { + forgottenCount, err := pd.getArrayLength() + if err != nil { + return err + } + r.forgotten = make(map[string][]int32) + for i := 0; i < forgottenCount; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + partitionCount, err := pd.getArrayLength() + if err != nil { + return err + } + r.forgotten[topic] = make([]int32, partitionCount) + + for j := 0; j < partitionCount; j++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + r.forgotten[topic][j] = partition + } + } + } + + if r.Version >= 11 { + r.RackID, err = pd.getString() + if err != nil { + return err + } + } + + return nil +} + +func (r *FetchRequest) key() int16 { + return 1 +} + +func (r *FetchRequest) version() int16 { + return r.Version +} + +func (r *FetchRequest) headerVersion() int16 { + return 1 +} + +func (r *FetchRequest) requiredVersion() KafkaVersion { + switch r.Version { + case 0: + return MinVersion + case 1: + return V0_9_0_0 + case 2: + return V0_10_0_0 + case 3: + return V0_10_1_0 + case 4, 5: + return V0_11_0_0 + case 6: + return V1_0_0_0 + case 7: + return V1_1_0_0 + case 8: + return V2_0_0_0 + case 9, 10: + return V2_1_0_0 + case 11: + return V2_3_0_0 + default: + return MaxVersion + } +} + +func (r *FetchRequest) AddBlock(topic string, partitionID int32, fetchOffset int64, maxBytes int32) { + if r.blocks == nil { + r.blocks = make(map[string]map[int32]*fetchRequestBlock) + } + + if r.Version >= 7 && r.forgotten == nil { + r.forgotten = make(map[string][]int32) + } + + if r.blocks[topic] == nil { + r.blocks[topic] = make(map[int32]*fetchRequestBlock) + } + + tmp := new(fetchRequestBlock) + tmp.Version = r.Version + tmp.maxBytes = maxBytes + tmp.fetchOffset = fetchOffset + if r.Version >= 9 { + tmp.currentLeaderEpoch = int32(-1) + } + + r.blocks[topic][partitionID] = tmp +} diff --git a/vendor/github.com/Shopify/sarama/fetch_response.go b/vendor/github.com/Shopify/sarama/fetch_response.go new file mode 100644 index 00000000000..ca6d78832cf --- /dev/null +++ b/vendor/github.com/Shopify/sarama/fetch_response.go @@ -0,0 +1,546 @@ +package sarama + +import ( + "sort" + "time" +) + +type AbortedTransaction struct { + ProducerID int64 + FirstOffset int64 +} + +func (t *AbortedTransaction) decode(pd packetDecoder) (err error) { + if t.ProducerID, err = pd.getInt64(); err != nil { + return err + } + + if t.FirstOffset, err = pd.getInt64(); err != nil { + return err + } + + return nil +} + +func (t *AbortedTransaction) encode(pe packetEncoder) (err error) { + pe.putInt64(t.ProducerID) + pe.putInt64(t.FirstOffset) + + return nil +} + +type FetchResponseBlock struct { + Err KError + HighWaterMarkOffset int64 + LastStableOffset int64 + LogStartOffset int64 + AbortedTransactions []*AbortedTransaction + PreferredReadReplica int32 + Records *Records // deprecated: use FetchResponseBlock.RecordsSet + RecordsSet []*Records + Partial bool +} + +func (b *FetchResponseBlock) decode(pd packetDecoder, version int16) (err error) { + tmp, err := pd.getInt16() + if err != nil { + return err + } + b.Err = KError(tmp) + + b.HighWaterMarkOffset, err = pd.getInt64() + if err != nil { + return err + } + + if version >= 4 { + b.LastStableOffset, err = pd.getInt64() + if err != nil { + return err + } + + if version >= 5 { + b.LogStartOffset, err = pd.getInt64() + if err != nil { + return err + } + } + + numTransact, err := pd.getArrayLength() + if err != nil { + return err + } + + if numTransact >= 0 { + b.AbortedTransactions = make([]*AbortedTransaction, numTransact) + } + + for i := 0; i < numTransact; i++ { + transact := new(AbortedTransaction) + if err = transact.decode(pd); err != nil { + return err + } + b.AbortedTransactions[i] = transact + } + } + + if version >= 11 { + b.PreferredReadReplica, err = pd.getInt32() + if err != nil { + return err + } + } + + recordsSize, err := pd.getInt32() + if err != nil { + return err + } + + recordsDecoder, err := pd.getSubset(int(recordsSize)) + if err != nil { + return err + } + + b.RecordsSet = []*Records{} + + for recordsDecoder.remaining() > 0 { + records := &Records{} + if err := records.decode(recordsDecoder); err != nil { + // If we have at least one decoded records, this is not an error + if err == ErrInsufficientData { + if len(b.RecordsSet) == 0 { + b.Partial = true + } + break + } + return err + } + + partial, err := records.isPartial() + if err != nil { + return err + } + + n, err := records.numRecords() + if err != nil { + return err + } + + if n > 0 || (partial && len(b.RecordsSet) == 0) { + b.RecordsSet = append(b.RecordsSet, records) + + if b.Records == nil { + b.Records = records + } + } + + overflow, err := records.isOverflow() + if err != nil { + return err + } + + if partial || overflow { + break + } + } + + return nil +} + +func (b *FetchResponseBlock) numRecords() (int, error) { + sum := 0 + + for _, records := range b.RecordsSet { + count, err := records.numRecords() + if err != nil { + return 0, err + } + + sum += count + } + + return sum, nil +} + +func (b *FetchResponseBlock) isPartial() (bool, error) { + if b.Partial { + return true, nil + } + + if len(b.RecordsSet) == 1 { + return b.RecordsSet[0].isPartial() + } + + return false, nil +} + +func (b *FetchResponseBlock) encode(pe packetEncoder, version int16) (err error) { + pe.putInt16(int16(b.Err)) + + pe.putInt64(b.HighWaterMarkOffset) + + if version >= 4 { + pe.putInt64(b.LastStableOffset) + + if version >= 5 { + pe.putInt64(b.LogStartOffset) + } + + if err = pe.putArrayLength(len(b.AbortedTransactions)); err != nil { + return err + } + for _, transact := range b.AbortedTransactions { + if err = transact.encode(pe); err != nil { + return err + } + } + } + + if version >= 11 { + pe.putInt32(b.PreferredReadReplica) + } + + pe.push(&lengthField{}) + for _, records := range b.RecordsSet { + err = records.encode(pe) + if err != nil { + return err + } + } + return pe.pop() +} + +func (b *FetchResponseBlock) getAbortedTransactions() []*AbortedTransaction { + // I can't find any doc that guarantee the field `fetchResponse.AbortedTransactions` is ordered + // plus Java implementation use a PriorityQueue based on `FirstOffset`. I guess we have to order it ourself + at := b.AbortedTransactions + sort.Slice( + at, + func(i, j int) bool { return at[i].FirstOffset < at[j].FirstOffset }, + ) + return at +} + +type FetchResponse struct { + Blocks map[string]map[int32]*FetchResponseBlock + ThrottleTime time.Duration + ErrorCode int16 + SessionID int32 + Version int16 + LogAppendTime bool + Timestamp time.Time +} + +func (r *FetchResponse) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + + if r.Version >= 1 { + throttle, err := pd.getInt32() + if err != nil { + return err + } + r.ThrottleTime = time.Duration(throttle) * time.Millisecond + } + + if r.Version >= 7 { + r.ErrorCode, err = pd.getInt16() + if err != nil { + return err + } + r.SessionID, err = pd.getInt32() + if err != nil { + return err + } + } + + numTopics, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Blocks = make(map[string]map[int32]*FetchResponseBlock, numTopics) + for i := 0; i < numTopics; i++ { + name, err := pd.getString() + if err != nil { + return err + } + + numBlocks, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Blocks[name] = make(map[int32]*FetchResponseBlock, numBlocks) + + for j := 0; j < numBlocks; j++ { + id, err := pd.getInt32() + if err != nil { + return err + } + + block := new(FetchResponseBlock) + err = block.decode(pd, version) + if err != nil { + return err + } + r.Blocks[name][id] = block + } + } + + return nil +} + +func (r *FetchResponse) encode(pe packetEncoder) (err error) { + if r.Version >= 1 { + pe.putInt32(int32(r.ThrottleTime / time.Millisecond)) + } + + if r.Version >= 7 { + pe.putInt16(r.ErrorCode) + pe.putInt32(r.SessionID) + } + + err = pe.putArrayLength(len(r.Blocks)) + if err != nil { + return err + } + + for topic, partitions := range r.Blocks { + err = pe.putString(topic) + if err != nil { + return err + } + + err = pe.putArrayLength(len(partitions)) + if err != nil { + return err + } + + for id, block := range partitions { + pe.putInt32(id) + err = block.encode(pe, r.Version) + if err != nil { + return err + } + } + } + return nil +} + +func (r *FetchResponse) key() int16 { + return 1 +} + +func (r *FetchResponse) version() int16 { + return r.Version +} + +func (r *FetchResponse) headerVersion() int16 { + return 0 +} + +func (r *FetchResponse) requiredVersion() KafkaVersion { + switch r.Version { + case 0: + return MinVersion + case 1: + return V0_9_0_0 + case 2: + return V0_10_0_0 + case 3: + return V0_10_1_0 + case 4, 5: + return V0_11_0_0 + case 6: + return V1_0_0_0 + case 7: + return V1_1_0_0 + case 8: + return V2_0_0_0 + case 9, 10: + return V2_1_0_0 + case 11: + return V2_3_0_0 + default: + return MaxVersion + } +} + +func (r *FetchResponse) GetBlock(topic string, partition int32) *FetchResponseBlock { + if r.Blocks == nil { + return nil + } + + if r.Blocks[topic] == nil { + return nil + } + + return r.Blocks[topic][partition] +} + +func (r *FetchResponse) AddError(topic string, partition int32, err KError) { + if r.Blocks == nil { + r.Blocks = make(map[string]map[int32]*FetchResponseBlock) + } + partitions, ok := r.Blocks[topic] + if !ok { + partitions = make(map[int32]*FetchResponseBlock) + r.Blocks[topic] = partitions + } + frb, ok := partitions[partition] + if !ok { + frb = new(FetchResponseBlock) + partitions[partition] = frb + } + frb.Err = err +} + +func (r *FetchResponse) getOrCreateBlock(topic string, partition int32) *FetchResponseBlock { + if r.Blocks == nil { + r.Blocks = make(map[string]map[int32]*FetchResponseBlock) + } + partitions, ok := r.Blocks[topic] + if !ok { + partitions = make(map[int32]*FetchResponseBlock) + r.Blocks[topic] = partitions + } + frb, ok := partitions[partition] + if !ok { + frb = new(FetchResponseBlock) + partitions[partition] = frb + } + + return frb +} + +func encodeKV(key, value Encoder) ([]byte, []byte) { + var kb []byte + var vb []byte + if key != nil { + kb, _ = key.Encode() + } + if value != nil { + vb, _ = value.Encode() + } + + return kb, vb +} + +func (r *FetchResponse) AddMessageWithTimestamp(topic string, partition int32, key, value Encoder, offset int64, timestamp time.Time, version int8) { + frb := r.getOrCreateBlock(topic, partition) + kb, vb := encodeKV(key, value) + if r.LogAppendTime { + timestamp = r.Timestamp + } + msg := &Message{Key: kb, Value: vb, LogAppendTime: r.LogAppendTime, Timestamp: timestamp, Version: version} + msgBlock := &MessageBlock{Msg: msg, Offset: offset} + if len(frb.RecordsSet) == 0 { + records := newLegacyRecords(&MessageSet{}) + frb.RecordsSet = []*Records{&records} + } + set := frb.RecordsSet[0].MsgSet + set.Messages = append(set.Messages, msgBlock) +} + +func (r *FetchResponse) AddRecordWithTimestamp(topic string, partition int32, key, value Encoder, offset int64, timestamp time.Time) { + frb := r.getOrCreateBlock(topic, partition) + kb, vb := encodeKV(key, value) + if len(frb.RecordsSet) == 0 { + records := newDefaultRecords(&RecordBatch{Version: 2, LogAppendTime: r.LogAppendTime, FirstTimestamp: timestamp, MaxTimestamp: r.Timestamp}) + frb.RecordsSet = []*Records{&records} + } + batch := frb.RecordsSet[0].RecordBatch + rec := &Record{Key: kb, Value: vb, OffsetDelta: offset, TimestampDelta: timestamp.Sub(batch.FirstTimestamp)} + batch.addRecord(rec) +} + +// AddRecordBatchWithTimestamp is similar to AddRecordWithTimestamp +// But instead of appending 1 record to a batch, it append a new batch containing 1 record to the fetchResponse +// Since transaction are handled on batch level (the whole batch is either committed or aborted), use this to test transactions +func (r *FetchResponse) AddRecordBatchWithTimestamp(topic string, partition int32, key, value Encoder, offset int64, producerID int64, isTransactional bool, timestamp time.Time) { + frb := r.getOrCreateBlock(topic, partition) + kb, vb := encodeKV(key, value) + + records := newDefaultRecords(&RecordBatch{Version: 2, LogAppendTime: r.LogAppendTime, FirstTimestamp: timestamp, MaxTimestamp: r.Timestamp}) + batch := &RecordBatch{ + Version: 2, + LogAppendTime: r.LogAppendTime, + FirstTimestamp: timestamp, + MaxTimestamp: r.Timestamp, + FirstOffset: offset, + LastOffsetDelta: 0, + ProducerID: producerID, + IsTransactional: isTransactional, + } + rec := &Record{Key: kb, Value: vb, OffsetDelta: 0, TimestampDelta: timestamp.Sub(batch.FirstTimestamp)} + batch.addRecord(rec) + records.RecordBatch = batch + + frb.RecordsSet = append(frb.RecordsSet, &records) +} + +func (r *FetchResponse) AddControlRecordWithTimestamp(topic string, partition int32, offset int64, producerID int64, recordType ControlRecordType, timestamp time.Time) { + frb := r.getOrCreateBlock(topic, partition) + + // batch + batch := &RecordBatch{ + Version: 2, + LogAppendTime: r.LogAppendTime, + FirstTimestamp: timestamp, + MaxTimestamp: r.Timestamp, + FirstOffset: offset, + LastOffsetDelta: 0, + ProducerID: producerID, + IsTransactional: true, + Control: true, + } + + // records + records := newDefaultRecords(nil) + records.RecordBatch = batch + + // record + crAbort := ControlRecord{ + Version: 0, + Type: recordType, + } + crKey := &realEncoder{raw: make([]byte, 4)} + crValue := &realEncoder{raw: make([]byte, 6)} + crAbort.encode(crKey, crValue) + rec := &Record{Key: ByteEncoder(crKey.raw), Value: ByteEncoder(crValue.raw), OffsetDelta: 0, TimestampDelta: timestamp.Sub(batch.FirstTimestamp)} + batch.addRecord(rec) + + frb.RecordsSet = append(frb.RecordsSet, &records) +} + +func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Encoder, offset int64) { + r.AddMessageWithTimestamp(topic, partition, key, value, offset, time.Time{}, 0) +} + +func (r *FetchResponse) AddRecord(topic string, partition int32, key, value Encoder, offset int64) { + r.AddRecordWithTimestamp(topic, partition, key, value, offset, time.Time{}) +} + +func (r *FetchResponse) AddRecordBatch(topic string, partition int32, key, value Encoder, offset int64, producerID int64, isTransactional bool) { + r.AddRecordBatchWithTimestamp(topic, partition, key, value, offset, producerID, isTransactional, time.Time{}) +} + +func (r *FetchResponse) AddControlRecord(topic string, partition int32, offset int64, producerID int64, recordType ControlRecordType) { + // define controlRecord key and value + r.AddControlRecordWithTimestamp(topic, partition, offset, producerID, recordType, time.Time{}) +} + +func (r *FetchResponse) SetLastOffsetDelta(topic string, partition int32, offset int32) { + frb := r.getOrCreateBlock(topic, partition) + if len(frb.RecordsSet) == 0 { + records := newDefaultRecords(&RecordBatch{Version: 2}) + frb.RecordsSet = []*Records{&records} + } + batch := frb.RecordsSet[0].RecordBatch + batch.LastOffsetDelta = offset +} + +func (r *FetchResponse) SetLastStableOffset(topic string, partition int32, offset int64) { + frb := r.getOrCreateBlock(topic, partition) + frb.LastStableOffset = offset +} diff --git a/vendor/github.com/Shopify/sarama/find_coordinator_request.go b/vendor/github.com/Shopify/sarama/find_coordinator_request.go new file mode 100644 index 00000000000..597bcbf786f --- /dev/null +++ b/vendor/github.com/Shopify/sarama/find_coordinator_request.go @@ -0,0 +1,65 @@ +package sarama + +type CoordinatorType int8 + +const ( + CoordinatorGroup CoordinatorType = iota + CoordinatorTransaction +) + +type FindCoordinatorRequest struct { + Version int16 + CoordinatorKey string + CoordinatorType CoordinatorType +} + +func (f *FindCoordinatorRequest) encode(pe packetEncoder) error { + if err := pe.putString(f.CoordinatorKey); err != nil { + return err + } + + if f.Version >= 1 { + pe.putInt8(int8(f.CoordinatorType)) + } + + return nil +} + +func (f *FindCoordinatorRequest) decode(pd packetDecoder, version int16) (err error) { + if f.CoordinatorKey, err = pd.getString(); err != nil { + return err + } + + if version >= 1 { + f.Version = version + coordinatorType, err := pd.getInt8() + if err != nil { + return err + } + + f.CoordinatorType = CoordinatorType(coordinatorType) + } + + return nil +} + +func (f *FindCoordinatorRequest) key() int16 { + return 10 +} + +func (f *FindCoordinatorRequest) version() int16 { + return f.Version +} + +func (r *FindCoordinatorRequest) headerVersion() int16 { + return 1 +} + +func (f *FindCoordinatorRequest) requiredVersion() KafkaVersion { + switch f.Version { + case 1: + return V0_11_0_0 + default: + return V0_8_2_0 + } +} diff --git a/vendor/github.com/Shopify/sarama/find_coordinator_response.go b/vendor/github.com/Shopify/sarama/find_coordinator_response.go new file mode 100644 index 00000000000..83a648ad4ae --- /dev/null +++ b/vendor/github.com/Shopify/sarama/find_coordinator_response.go @@ -0,0 +1,96 @@ +package sarama + +import ( + "time" +) + +var NoNode = &Broker{id: -1, addr: ":-1"} + +type FindCoordinatorResponse struct { + Version int16 + ThrottleTime time.Duration + Err KError + ErrMsg *string + Coordinator *Broker +} + +func (f *FindCoordinatorResponse) decode(pd packetDecoder, version int16) (err error) { + if version >= 1 { + f.Version = version + + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + f.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + } + + tmp, err := pd.getInt16() + if err != nil { + return err + } + f.Err = KError(tmp) + + if version >= 1 { + if f.ErrMsg, err = pd.getNullableString(); err != nil { + return err + } + } + + coordinator := new(Broker) + // The version is hardcoded to 0, as version 1 of the Broker-decode + // contains the rack-field which is not present in the FindCoordinatorResponse. + if err := coordinator.decode(pd, 0); err != nil { + return err + } + if coordinator.addr == ":0" { + return nil + } + f.Coordinator = coordinator + + return nil +} + +func (f *FindCoordinatorResponse) encode(pe packetEncoder) error { + if f.Version >= 1 { + pe.putInt32(int32(f.ThrottleTime / time.Millisecond)) + } + + pe.putInt16(int16(f.Err)) + + if f.Version >= 1 { + if err := pe.putNullableString(f.ErrMsg); err != nil { + return err + } + } + + coordinator := f.Coordinator + if coordinator == nil { + coordinator = NoNode + } + if err := coordinator.encode(pe, 0); err != nil { + return err + } + return nil +} + +func (f *FindCoordinatorResponse) key() int16 { + return 10 +} + +func (f *FindCoordinatorResponse) version() int16 { + return f.Version +} + +func (r *FindCoordinatorResponse) headerVersion() int16 { + return 0 +} + +func (f *FindCoordinatorResponse) requiredVersion() KafkaVersion { + switch f.Version { + case 1: + return V0_11_0_0 + default: + return V0_8_2_0 + } +} diff --git a/vendor/github.com/Shopify/sarama/go.mod b/vendor/github.com/Shopify/sarama/go.mod new file mode 100644 index 00000000000..1392d610d4e --- /dev/null +++ b/vendor/github.com/Shopify/sarama/go.mod @@ -0,0 +1,35 @@ +module github.com/Shopify/sarama + +go 1.13 + +require ( + github.com/Shopify/toxiproxy v2.1.4+incompatible + github.com/davecgh/go-spew v1.1.1 + github.com/eapache/go-resiliency v1.2.0 + github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 + github.com/eapache/queue v1.1.0 + github.com/fortytw2/leaktest v1.3.0 + github.com/frankban/quicktest v1.10.2 // indirect + github.com/golang/snappy v0.0.1 // indirect + github.com/hashicorp/go-uuid v1.0.2 // indirect + github.com/jcmturner/gofork v1.0.0 + github.com/klauspost/compress v1.11.0 + github.com/kr/text v0.2.0 // indirect + github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect + github.com/pierrec/lz4 v2.5.2+incompatible + github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 + github.com/stretchr/testify v1.6.1 + github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c + github.com/xdg/stringprep v1.0.0 // indirect + golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a // indirect + golang.org/x/net v0.0.0-20200904194848-62affa334b73 + golang.org/x/text v0.3.3 // indirect + golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect + gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b // indirect + gopkg.in/jcmturner/aescts.v1 v1.0.1 // indirect + gopkg.in/jcmturner/dnsutils.v1 v1.0.1 // indirect + gopkg.in/jcmturner/goidentity.v3 v3.0.0 // indirect + gopkg.in/jcmturner/gokrb5.v7 v7.5.0 + gopkg.in/jcmturner/rpc.v1 v1.1.0 // indirect + gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 // indirect +) diff --git a/vendor/github.com/Shopify/sarama/go.sum b/vendor/github.com/Shopify/sarama/go.sum new file mode 100644 index 00000000000..dce6814213c --- /dev/null +++ b/vendor/github.com/Shopify/sarama/go.sum @@ -0,0 +1,87 @@ +github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/eapache/go-resiliency v1.2.0 h1:v7g92e/KSN71Rq7vSThKaWIq68fL4YHvWyiUKorFR1Q= +github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= +github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/frankban/quicktest v1.10.2 h1:19ARM85nVi4xH7xPXuc5eM/udya5ieh7b/Sv+d844Tk= +github.com/frankban/quicktest v1.10.2/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P6txr3mVT54s= +github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8= +github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= +github.com/klauspost/compress v1.11.0 h1:wJbzvpYMVGG9iTI9VxpnNZfd4DzMPoCWze3GgSqz8yg= +github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/pierrec/lz4 v2.5.2+incompatible h1:WCjObylUIOlKy/+7Abdn34TLIkXiA4UWUMhxq9m9ZXI= +github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ= +github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c h1:u40Z8hqBAAQyv+vATcGgV0YCnDjqSL7/q/JyPhhJSPk= +github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= +github.com/xdg/stringprep v1.0.0 h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0= +github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a h1:vclmkQCjlDX5OydZ9wv8rBCcS0QyQY66Mpf/7BZbInM= +golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20200904194848-62affa334b73 h1:MXfv8rhZWmFeqX3GNZRsd6vOLoaCHjYEX3qkRo3YBUA= +golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b h1:QRR6H1YWRnHb4Y/HeNFCTJLFVxaq6wH4YuVdsUOr75U= +gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/jcmturner/aescts.v1 v1.0.1 h1:cVVZBK2b1zY26haWB4vbBiZrfFQnfbTVrE3xZq6hrEw= +gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo= +gopkg.in/jcmturner/dnsutils.v1 v1.0.1 h1:cIuC1OLRGZrld+16ZJvvZxVJeKPsvd5eUIvxfoN5hSM= +gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q= +gopkg.in/jcmturner/goidentity.v3 v3.0.0 h1:1duIyWiTaYvVx3YX2CYtpJbUFd7/UuPYCfgXtQ3VTbI= +gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4= +gopkg.in/jcmturner/gokrb5.v7 v7.5.0 h1:a9tsXlIDD9SKxotJMK3niV7rPZAJeX2aD/0yg3qlIrg= +gopkg.in/jcmturner/gokrb5.v7 v7.5.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= +gopkg.in/jcmturner/rpc.v1 v1.1.0 h1:QHIUxTX1ISuAv9dD2wJ9HWQVuWDX/Zc0PfeC2tjc4rU= +gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vendor/github.com/Shopify/sarama/gssapi_kerberos.go b/vendor/github.com/Shopify/sarama/gssapi_kerberos.go new file mode 100644 index 00000000000..1993fc08fc5 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/gssapi_kerberos.go @@ -0,0 +1,258 @@ +package sarama + +import ( + "encoding/binary" + "fmt" + "io" + "strings" + "time" + + "github.com/jcmturner/gofork/encoding/asn1" + "gopkg.in/jcmturner/gokrb5.v7/asn1tools" + "gopkg.in/jcmturner/gokrb5.v7/gssapi" + "gopkg.in/jcmturner/gokrb5.v7/iana/chksumtype" + "gopkg.in/jcmturner/gokrb5.v7/iana/keyusage" + "gopkg.in/jcmturner/gokrb5.v7/messages" + "gopkg.in/jcmturner/gokrb5.v7/types" +) + +const ( + TOK_ID_KRB_AP_REQ = 256 + GSS_API_GENERIC_TAG = 0x60 + KRB5_USER_AUTH = 1 + KRB5_KEYTAB_AUTH = 2 + GSS_API_INITIAL = 1 + GSS_API_VERIFY = 2 + GSS_API_FINISH = 3 +) + +type GSSAPIConfig struct { + AuthType int + KeyTabPath string + KerberosConfigPath string + ServiceName string + Username string + Password string + Realm string + DisablePAFXFAST bool +} + +type GSSAPIKerberosAuth struct { + Config *GSSAPIConfig + ticket messages.Ticket + encKey types.EncryptionKey + NewKerberosClientFunc func(config *GSSAPIConfig) (KerberosClient, error) + step int +} + +type KerberosClient interface { + Login() error + GetServiceTicket(spn string) (messages.Ticket, types.EncryptionKey, error) + Domain() string + CName() types.PrincipalName + Destroy() +} + +/* +* +* Appends length in big endian before payload, and send it to kafka +* + */ + +func (krbAuth *GSSAPIKerberosAuth) writePackage(broker *Broker, payload []byte) (int, error) { + length := len(payload) + finalPackage := make([]byte, length+4) //4 byte length header + payload + copy(finalPackage[4:], payload) + binary.BigEndian.PutUint32(finalPackage, uint32(length)) + bytes, err := broker.conn.Write(finalPackage) + if err != nil { + return bytes, err + } + return bytes, nil +} + +/* +* +* Read length (4 bytes) and then read the payload +* + */ + +func (krbAuth *GSSAPIKerberosAuth) readPackage(broker *Broker) ([]byte, int, error) { + bytesRead := 0 + lengthInBytes := make([]byte, 4) + bytes, err := io.ReadFull(broker.conn, lengthInBytes) + if err != nil { + return nil, bytesRead, err + } + bytesRead += bytes + payloadLength := binary.BigEndian.Uint32(lengthInBytes) + payloadBytes := make([]byte, payloadLength) // buffer for read.. + bytes, err = io.ReadFull(broker.conn, payloadBytes) // read bytes + if err != nil { + return payloadBytes, bytesRead, err + } + bytesRead += bytes + return payloadBytes, bytesRead, nil +} + +func (krbAuth *GSSAPIKerberosAuth) newAuthenticatorChecksum() []byte { + a := make([]byte, 24) + flags := []int{gssapi.ContextFlagInteg, gssapi.ContextFlagConf} + binary.LittleEndian.PutUint32(a[:4], 16) + for _, i := range flags { + f := binary.LittleEndian.Uint32(a[20:24]) + f |= uint32(i) + binary.LittleEndian.PutUint32(a[20:24], f) + } + return a +} + +/* +* +* Construct Kerberos AP_REQ package, conforming to RFC-4120 +* https://tools.ietf.org/html/rfc4120#page-84 +* + */ +func (krbAuth *GSSAPIKerberosAuth) createKrb5Token( + domain string, cname types.PrincipalName, + ticket messages.Ticket, + sessionKey types.EncryptionKey) ([]byte, error) { + auth, err := types.NewAuthenticator(domain, cname) + if err != nil { + return nil, err + } + auth.Cksum = types.Checksum{ + CksumType: chksumtype.GSSAPI, + Checksum: krbAuth.newAuthenticatorChecksum(), + } + APReq, err := messages.NewAPReq( + ticket, + sessionKey, + auth, + ) + if err != nil { + return nil, err + } + aprBytes := make([]byte, 2) + binary.BigEndian.PutUint16(aprBytes, TOK_ID_KRB_AP_REQ) + tb, err := APReq.Marshal() + if err != nil { + return nil, err + } + aprBytes = append(aprBytes, tb...) + return aprBytes, nil +} + +/* +* +* Append the GSS-API header to the payload, conforming to RFC-2743 +* Section 3.1, Mechanism-Independent Token Format +* +* https://tools.ietf.org/html/rfc2743#page-81 +* +* GSSAPIHeader + +* + */ +func (krbAuth *GSSAPIKerberosAuth) appendGSSAPIHeader(payload []byte) ([]byte, error) { + oidBytes, err := asn1.Marshal(gssapi.OID(gssapi.OIDKRB5)) + if err != nil { + return nil, err + } + tkoLengthBytes := asn1tools.MarshalLengthBytes(len(oidBytes) + len(payload)) + GSSHeader := append([]byte{GSS_API_GENERIC_TAG}, tkoLengthBytes...) + GSSHeader = append(GSSHeader, oidBytes...) + GSSPackage := append(GSSHeader, payload...) + return GSSPackage, nil +} + +func (krbAuth *GSSAPIKerberosAuth) initSecContext(bytes []byte, kerberosClient KerberosClient) ([]byte, error) { + switch krbAuth.step { + case GSS_API_INITIAL: + aprBytes, err := krbAuth.createKrb5Token( + kerberosClient.Domain(), + kerberosClient.CName(), + krbAuth.ticket, + krbAuth.encKey) + if err != nil { + return nil, err + } + krbAuth.step = GSS_API_VERIFY + return krbAuth.appendGSSAPIHeader(aprBytes) + case GSS_API_VERIFY: + wrapTokenReq := gssapi.WrapToken{} + if err := wrapTokenReq.Unmarshal(bytes, true); err != nil { + return nil, err + } + // Validate response. + isValid, err := wrapTokenReq.Verify(krbAuth.encKey, keyusage.GSSAPI_ACCEPTOR_SEAL) + if !isValid { + return nil, err + } + + wrapTokenResponse, err := gssapi.NewInitiatorWrapToken(wrapTokenReq.Payload, krbAuth.encKey) + if err != nil { + return nil, err + } + krbAuth.step = GSS_API_FINISH + return wrapTokenResponse.Marshal() + } + return nil, nil +} + +/* This does the handshake for authorization */ +func (krbAuth *GSSAPIKerberosAuth) Authorize(broker *Broker) error { + kerberosClient, err := krbAuth.NewKerberosClientFunc(krbAuth.Config) + if err != nil { + Logger.Printf("Kerberos client error: %s", err) + return err + } + + err = kerberosClient.Login() + if err != nil { + Logger.Printf("Kerberos client error: %s", err) + return err + } + // Construct SPN using serviceName and host + // SPN format: / + + host := strings.SplitN(broker.addr, ":", 2)[0] // Strip port part + spn := fmt.Sprintf("%s/%s", broker.conf.Net.SASL.GSSAPI.ServiceName, host) + + ticket, encKey, err := kerberosClient.GetServiceTicket(spn) + + if err != nil { + Logger.Printf("Error getting Kerberos service ticket : %s", err) + return err + } + krbAuth.ticket = ticket + krbAuth.encKey = encKey + krbAuth.step = GSS_API_INITIAL + var receivedBytes []byte = nil + defer kerberosClient.Destroy() + for { + packBytes, err := krbAuth.initSecContext(receivedBytes, kerberosClient) + if err != nil { + Logger.Printf("Error while performing GSSAPI Kerberos Authentication: %s\n", err) + return err + } + requestTime := time.Now() + bytesWritten, err := krbAuth.writePackage(broker, packBytes) + if err != nil { + Logger.Printf("Error while performing GSSAPI Kerberos Authentication: %s\n", err) + return err + } + broker.updateOutgoingCommunicationMetrics(bytesWritten) + if krbAuth.step == GSS_API_VERIFY { + bytesRead := 0 + receivedBytes, bytesRead, err = krbAuth.readPackage(broker) + requestLatency := time.Since(requestTime) + broker.updateIncomingCommunicationMetrics(bytesRead, requestLatency) + if err != nil { + Logger.Printf("Error while performing GSSAPI Kerberos Authentication: %s\n", err) + return err + } + } else if krbAuth.step == GSS_API_FINISH { + return nil + } + } +} diff --git a/vendor/github.com/Shopify/sarama/heartbeat_request.go b/vendor/github.com/Shopify/sarama/heartbeat_request.go new file mode 100644 index 00000000000..e9d9af19110 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/heartbeat_request.go @@ -0,0 +1,51 @@ +package sarama + +type HeartbeatRequest struct { + GroupId string + GenerationId int32 + MemberId string +} + +func (r *HeartbeatRequest) encode(pe packetEncoder) error { + if err := pe.putString(r.GroupId); err != nil { + return err + } + + pe.putInt32(r.GenerationId) + + if err := pe.putString(r.MemberId); err != nil { + return err + } + + return nil +} + +func (r *HeartbeatRequest) decode(pd packetDecoder, version int16) (err error) { + if r.GroupId, err = pd.getString(); err != nil { + return + } + if r.GenerationId, err = pd.getInt32(); err != nil { + return + } + if r.MemberId, err = pd.getString(); err != nil { + return + } + + return nil +} + +func (r *HeartbeatRequest) key() int16 { + return 12 +} + +func (r *HeartbeatRequest) version() int16 { + return 0 +} + +func (r *HeartbeatRequest) headerVersion() int16 { + return 1 +} + +func (r *HeartbeatRequest) requiredVersion() KafkaVersion { + return V0_9_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/heartbeat_response.go b/vendor/github.com/Shopify/sarama/heartbeat_response.go new file mode 100644 index 00000000000..577ab72e574 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/heartbeat_response.go @@ -0,0 +1,36 @@ +package sarama + +type HeartbeatResponse struct { + Err KError +} + +func (r *HeartbeatResponse) encode(pe packetEncoder) error { + pe.putInt16(int16(r.Err)) + return nil +} + +func (r *HeartbeatResponse) decode(pd packetDecoder, version int16) error { + kerr, err := pd.getInt16() + if err != nil { + return err + } + r.Err = KError(kerr) + + return nil +} + +func (r *HeartbeatResponse) key() int16 { + return 12 +} + +func (r *HeartbeatResponse) version() int16 { + return 0 +} + +func (r *HeartbeatResponse) headerVersion() int16 { + return 0 +} + +func (r *HeartbeatResponse) requiredVersion() KafkaVersion { + return V0_9_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/init_producer_id_request.go b/vendor/github.com/Shopify/sarama/init_producer_id_request.go new file mode 100644 index 00000000000..689444397d6 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/init_producer_id_request.go @@ -0,0 +1,47 @@ +package sarama + +import "time" + +type InitProducerIDRequest struct { + TransactionalID *string + TransactionTimeout time.Duration +} + +func (i *InitProducerIDRequest) encode(pe packetEncoder) error { + if err := pe.putNullableString(i.TransactionalID); err != nil { + return err + } + pe.putInt32(int32(i.TransactionTimeout / time.Millisecond)) + + return nil +} + +func (i *InitProducerIDRequest) decode(pd packetDecoder, version int16) (err error) { + if i.TransactionalID, err = pd.getNullableString(); err != nil { + return err + } + + timeout, err := pd.getInt32() + if err != nil { + return err + } + i.TransactionTimeout = time.Duration(timeout) * time.Millisecond + + return nil +} + +func (i *InitProducerIDRequest) key() int16 { + return 22 +} + +func (i *InitProducerIDRequest) version() int16 { + return 0 +} + +func (i *InitProducerIDRequest) headerVersion() int16 { + return 1 +} + +func (i *InitProducerIDRequest) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/init_producer_id_response.go b/vendor/github.com/Shopify/sarama/init_producer_id_response.go new file mode 100644 index 00000000000..3e1242bf622 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/init_producer_id_response.go @@ -0,0 +1,59 @@ +package sarama + +import "time" + +type InitProducerIDResponse struct { + ThrottleTime time.Duration + Err KError + ProducerID int64 + ProducerEpoch int16 +} + +func (i *InitProducerIDResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(i.ThrottleTime / time.Millisecond)) + pe.putInt16(int16(i.Err)) + pe.putInt64(i.ProducerID) + pe.putInt16(i.ProducerEpoch) + + return nil +} + +func (i *InitProducerIDResponse) decode(pd packetDecoder, version int16) (err error) { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + i.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + kerr, err := pd.getInt16() + if err != nil { + return err + } + i.Err = KError(kerr) + + if i.ProducerID, err = pd.getInt64(); err != nil { + return err + } + + if i.ProducerEpoch, err = pd.getInt16(); err != nil { + return err + } + + return nil +} + +func (i *InitProducerIDResponse) key() int16 { + return 22 +} + +func (i *InitProducerIDResponse) version() int16 { + return 0 +} + +func (i *InitProducerIDResponse) headerVersion() int16 { + return 0 +} + +func (i *InitProducerIDResponse) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/interceptors.go b/vendor/github.com/Shopify/sarama/interceptors.go new file mode 100644 index 00000000000..d0d33e526f8 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/interceptors.go @@ -0,0 +1,43 @@ +package sarama + +// ProducerInterceptor allows you to intercept (and possibly mutate) the records +// received by the producer before they are published to the Kafka cluster. +// https://cwiki.apache.org/confluence/display/KAFKA/KIP-42%3A+Add+Producer+and+Consumer+Interceptors#KIP42:AddProducerandConsumerInterceptors-Motivation +type ProducerInterceptor interface { + + // OnSend is called when the producer message is intercepted. Please avoid + // modifying the message until it's safe to do so, as this is _not_ a copy + // of the message. + OnSend(*ProducerMessage) +} + +// ConsumerInterceptor allows you to intercept (and possibly mutate) the records +// received by the consumer before they are sent to the messages channel. +// https://cwiki.apache.org/confluence/display/KAFKA/KIP-42%3A+Add+Producer+and+Consumer+Interceptors#KIP42:AddProducerandConsumerInterceptors-Motivation +type ConsumerInterceptor interface { + + // OnConsume is called when the consumed message is intercepted. Please + // avoid modifying the message until it's safe to do so, as this is _not_ a + // copy of the message. + OnConsume(*ConsumerMessage) +} + +func (msg *ProducerMessage) safelyApplyInterceptor(interceptor ProducerInterceptor) { + defer func() { + if r := recover(); r != nil { + Logger.Printf("Error when calling producer interceptor: %s, %w\n", interceptor, r) + } + }() + + interceptor.OnSend(msg) +} + +func (msg *ConsumerMessage) safelyApplyInterceptor(interceptor ConsumerInterceptor) { + defer func() { + if r := recover(); r != nil { + Logger.Printf("Error when calling consumer interceptor: %s, %w\n", interceptor, r) + } + }() + + interceptor.OnConsume(msg) +} diff --git a/vendor/github.com/Shopify/sarama/join_group_request.go b/vendor/github.com/Shopify/sarama/join_group_request.go new file mode 100644 index 00000000000..3734e82e406 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/join_group_request.go @@ -0,0 +1,167 @@ +package sarama + +type GroupProtocol struct { + Name string + Metadata []byte +} + +func (p *GroupProtocol) decode(pd packetDecoder) (err error) { + p.Name, err = pd.getString() + if err != nil { + return err + } + p.Metadata, err = pd.getBytes() + return err +} + +func (p *GroupProtocol) encode(pe packetEncoder) (err error) { + if err := pe.putString(p.Name); err != nil { + return err + } + if err := pe.putBytes(p.Metadata); err != nil { + return err + } + return nil +} + +type JoinGroupRequest struct { + Version int16 + GroupId string + SessionTimeout int32 + RebalanceTimeout int32 + MemberId string + ProtocolType string + GroupProtocols map[string][]byte // deprecated; use OrderedGroupProtocols + OrderedGroupProtocols []*GroupProtocol +} + +func (r *JoinGroupRequest) encode(pe packetEncoder) error { + if err := pe.putString(r.GroupId); err != nil { + return err + } + pe.putInt32(r.SessionTimeout) + if r.Version >= 1 { + pe.putInt32(r.RebalanceTimeout) + } + if err := pe.putString(r.MemberId); err != nil { + return err + } + if err := pe.putString(r.ProtocolType); err != nil { + return err + } + + if len(r.GroupProtocols) > 0 { + if len(r.OrderedGroupProtocols) > 0 { + return PacketDecodingError{"cannot specify both GroupProtocols and OrderedGroupProtocols on JoinGroupRequest"} + } + + if err := pe.putArrayLength(len(r.GroupProtocols)); err != nil { + return err + } + for name, metadata := range r.GroupProtocols { + if err := pe.putString(name); err != nil { + return err + } + if err := pe.putBytes(metadata); err != nil { + return err + } + } + } else { + if err := pe.putArrayLength(len(r.OrderedGroupProtocols)); err != nil { + return err + } + for _, protocol := range r.OrderedGroupProtocols { + if err := protocol.encode(pe); err != nil { + return err + } + } + } + + return nil +} + +func (r *JoinGroupRequest) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + + if r.GroupId, err = pd.getString(); err != nil { + return + } + + if r.SessionTimeout, err = pd.getInt32(); err != nil { + return + } + + if version >= 1 { + if r.RebalanceTimeout, err = pd.getInt32(); err != nil { + return err + } + } + + if r.MemberId, err = pd.getString(); err != nil { + return + } + + if r.ProtocolType, err = pd.getString(); err != nil { + return + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + if n == 0 { + return nil + } + + r.GroupProtocols = make(map[string][]byte) + for i := 0; i < n; i++ { + protocol := &GroupProtocol{} + if err := protocol.decode(pd); err != nil { + return err + } + r.GroupProtocols[protocol.Name] = protocol.Metadata + r.OrderedGroupProtocols = append(r.OrderedGroupProtocols, protocol) + } + + return nil +} + +func (r *JoinGroupRequest) key() int16 { + return 11 +} + +func (r *JoinGroupRequest) version() int16 { + return r.Version +} + +func (r *JoinGroupRequest) headerVersion() int16 { + return 1 +} + +func (r *JoinGroupRequest) requiredVersion() KafkaVersion { + switch r.Version { + case 2: + return V0_11_0_0 + case 1: + return V0_10_1_0 + default: + return V0_9_0_0 + } +} + +func (r *JoinGroupRequest) AddGroupProtocol(name string, metadata []byte) { + r.OrderedGroupProtocols = append(r.OrderedGroupProtocols, &GroupProtocol{ + Name: name, + Metadata: metadata, + }) +} + +func (r *JoinGroupRequest) AddGroupProtocolMetadata(name string, metadata *ConsumerGroupMemberMetadata) error { + bin, err := encode(metadata, nil) + if err != nil { + return err + } + + r.AddGroupProtocol(name, bin) + return nil +} diff --git a/vendor/github.com/Shopify/sarama/join_group_response.go b/vendor/github.com/Shopify/sarama/join_group_response.go new file mode 100644 index 00000000000..54b0a45c28e --- /dev/null +++ b/vendor/github.com/Shopify/sarama/join_group_response.go @@ -0,0 +1,139 @@ +package sarama + +type JoinGroupResponse struct { + Version int16 + ThrottleTime int32 + Err KError + GenerationId int32 + GroupProtocol string + LeaderId string + MemberId string + Members map[string][]byte +} + +func (r *JoinGroupResponse) GetMembers() (map[string]ConsumerGroupMemberMetadata, error) { + members := make(map[string]ConsumerGroupMemberMetadata, len(r.Members)) + for id, bin := range r.Members { + meta := new(ConsumerGroupMemberMetadata) + if err := decode(bin, meta); err != nil { + return nil, err + } + members[id] = *meta + } + return members, nil +} + +func (r *JoinGroupResponse) encode(pe packetEncoder) error { + if r.Version >= 2 { + pe.putInt32(r.ThrottleTime) + } + pe.putInt16(int16(r.Err)) + pe.putInt32(r.GenerationId) + + if err := pe.putString(r.GroupProtocol); err != nil { + return err + } + if err := pe.putString(r.LeaderId); err != nil { + return err + } + if err := pe.putString(r.MemberId); err != nil { + return err + } + + if err := pe.putArrayLength(len(r.Members)); err != nil { + return err + } + + for memberId, memberMetadata := range r.Members { + if err := pe.putString(memberId); err != nil { + return err + } + + if err := pe.putBytes(memberMetadata); err != nil { + return err + } + } + + return nil +} + +func (r *JoinGroupResponse) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + + if version >= 2 { + if r.ThrottleTime, err = pd.getInt32(); err != nil { + return + } + } + + kerr, err := pd.getInt16() + if err != nil { + return err + } + + r.Err = KError(kerr) + + if r.GenerationId, err = pd.getInt32(); err != nil { + return + } + + if r.GroupProtocol, err = pd.getString(); err != nil { + return + } + + if r.LeaderId, err = pd.getString(); err != nil { + return + } + + if r.MemberId, err = pd.getString(); err != nil { + return + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + if n == 0 { + return nil + } + + r.Members = make(map[string][]byte) + for i := 0; i < n; i++ { + memberId, err := pd.getString() + if err != nil { + return err + } + + memberMetadata, err := pd.getBytes() + if err != nil { + return err + } + + r.Members[memberId] = memberMetadata + } + + return nil +} + +func (r *JoinGroupResponse) key() int16 { + return 11 +} + +func (r *JoinGroupResponse) version() int16 { + return r.Version +} + +func (r *JoinGroupResponse) headerVersion() int16 { + return 0 +} + +func (r *JoinGroupResponse) requiredVersion() KafkaVersion { + switch r.Version { + case 2: + return V0_11_0_0 + case 1: + return V0_10_1_0 + default: + return V0_9_0_0 + } +} diff --git a/vendor/github.com/Shopify/sarama/kerberos_client.go b/vendor/github.com/Shopify/sarama/kerberos_client.go new file mode 100644 index 00000000000..ebc11417988 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/kerberos_client.go @@ -0,0 +1,46 @@ +package sarama + +import ( + krb5client "gopkg.in/jcmturner/gokrb5.v7/client" + krb5config "gopkg.in/jcmturner/gokrb5.v7/config" + "gopkg.in/jcmturner/gokrb5.v7/keytab" + "gopkg.in/jcmturner/gokrb5.v7/types" +) + +type KerberosGoKrb5Client struct { + krb5client.Client +} + +func (c *KerberosGoKrb5Client) Domain() string { + return c.Credentials.Domain() +} + +func (c *KerberosGoKrb5Client) CName() types.PrincipalName { + return c.Credentials.CName() +} + +// NewKerberosClient creates kerberos client used to obtain TGT and TGS tokens. +// It uses pure go Kerberos 5 solution (RFC-4121 and RFC-4120). +// uses gokrb5 library underlying which is a pure go kerberos client with some GSS-API capabilities. +func NewKerberosClient(config *GSSAPIConfig) (KerberosClient, error) { + cfg, err := krb5config.Load(config.KerberosConfigPath) + if err != nil { + return nil, err + } + return createClient(config, cfg) +} + +func createClient(config *GSSAPIConfig, cfg *krb5config.Config) (KerberosClient, error) { + var client *krb5client.Client + if config.AuthType == KRB5_KEYTAB_AUTH { + kt, err := keytab.Load(config.KeyTabPath) + if err != nil { + return nil, err + } + client = krb5client.NewClientWithKeytab(config.Username, config.Realm, kt, cfg, krb5client.DisablePAFXFAST(config.DisablePAFXFAST)) + } else { + client = krb5client.NewClientWithPassword(config.Username, + config.Realm, config.Password, cfg, krb5client.DisablePAFXFAST(config.DisablePAFXFAST)) + } + return &KerberosGoKrb5Client{*client}, nil +} diff --git a/vendor/github.com/Shopify/sarama/leave_group_request.go b/vendor/github.com/Shopify/sarama/leave_group_request.go new file mode 100644 index 00000000000..d7789b68dbe --- /dev/null +++ b/vendor/github.com/Shopify/sarama/leave_group_request.go @@ -0,0 +1,44 @@ +package sarama + +type LeaveGroupRequest struct { + GroupId string + MemberId string +} + +func (r *LeaveGroupRequest) encode(pe packetEncoder) error { + if err := pe.putString(r.GroupId); err != nil { + return err + } + if err := pe.putString(r.MemberId); err != nil { + return err + } + + return nil +} + +func (r *LeaveGroupRequest) decode(pd packetDecoder, version int16) (err error) { + if r.GroupId, err = pd.getString(); err != nil { + return + } + if r.MemberId, err = pd.getString(); err != nil { + return + } + + return nil +} + +func (r *LeaveGroupRequest) key() int16 { + return 13 +} + +func (r *LeaveGroupRequest) version() int16 { + return 0 +} + +func (r *LeaveGroupRequest) headerVersion() int16 { + return 1 +} + +func (r *LeaveGroupRequest) requiredVersion() KafkaVersion { + return V0_9_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/leave_group_response.go b/vendor/github.com/Shopify/sarama/leave_group_response.go new file mode 100644 index 00000000000..25f8d5eb36b --- /dev/null +++ b/vendor/github.com/Shopify/sarama/leave_group_response.go @@ -0,0 +1,36 @@ +package sarama + +type LeaveGroupResponse struct { + Err KError +} + +func (r *LeaveGroupResponse) encode(pe packetEncoder) error { + pe.putInt16(int16(r.Err)) + return nil +} + +func (r *LeaveGroupResponse) decode(pd packetDecoder, version int16) (err error) { + kerr, err := pd.getInt16() + if err != nil { + return err + } + r.Err = KError(kerr) + + return nil +} + +func (r *LeaveGroupResponse) key() int16 { + return 13 +} + +func (r *LeaveGroupResponse) version() int16 { + return 0 +} + +func (r *LeaveGroupResponse) headerVersion() int16 { + return 0 +} + +func (r *LeaveGroupResponse) requiredVersion() KafkaVersion { + return V0_9_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/length_field.go b/vendor/github.com/Shopify/sarama/length_field.go new file mode 100644 index 00000000000..7d864f6bf97 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/length_field.go @@ -0,0 +1,99 @@ +package sarama + +import ( + "encoding/binary" + "sync" +) + +// LengthField implements the PushEncoder and PushDecoder interfaces for calculating 4-byte lengths. +type lengthField struct { + startOffset int + length int32 +} + +var lengthFieldPool = sync.Pool{} + +func acquireLengthField() *lengthField { + val := lengthFieldPool.Get() + if val != nil { + return val.(*lengthField) + } + return &lengthField{} +} + +func releaseLengthField(m *lengthField) { + lengthFieldPool.Put(m) +} + +func (l *lengthField) decode(pd packetDecoder) error { + var err error + l.length, err = pd.getInt32() + if err != nil { + return err + } + if l.length > int32(pd.remaining()) { + return ErrInsufficientData + } + return nil +} + +func (l *lengthField) saveOffset(in int) { + l.startOffset = in +} + +func (l *lengthField) reserveLength() int { + return 4 +} + +func (l *lengthField) run(curOffset int, buf []byte) error { + binary.BigEndian.PutUint32(buf[l.startOffset:], uint32(curOffset-l.startOffset-4)) + return nil +} + +func (l *lengthField) check(curOffset int, buf []byte) error { + if int32(curOffset-l.startOffset-4) != l.length { + return PacketDecodingError{"length field invalid"} + } + + return nil +} + +type varintLengthField struct { + startOffset int + length int64 +} + +func (l *varintLengthField) decode(pd packetDecoder) error { + var err error + l.length, err = pd.getVarint() + return err +} + +func (l *varintLengthField) saveOffset(in int) { + l.startOffset = in +} + +func (l *varintLengthField) adjustLength(currOffset int) int { + oldFieldSize := l.reserveLength() + l.length = int64(currOffset - l.startOffset - oldFieldSize) + + return l.reserveLength() - oldFieldSize +} + +func (l *varintLengthField) reserveLength() int { + var tmp [binary.MaxVarintLen64]byte + return binary.PutVarint(tmp[:], l.length) +} + +func (l *varintLengthField) run(curOffset int, buf []byte) error { + binary.PutVarint(buf[l.startOffset:], l.length) + return nil +} + +func (l *varintLengthField) check(curOffset int, buf []byte) error { + if int64(curOffset-l.startOffset-l.reserveLength()) != l.length { + return PacketDecodingError{"length field invalid"} + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/list_groups_request.go b/vendor/github.com/Shopify/sarama/list_groups_request.go new file mode 100644 index 00000000000..ed44cc27e36 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/list_groups_request.go @@ -0,0 +1,28 @@ +package sarama + +type ListGroupsRequest struct { +} + +func (r *ListGroupsRequest) encode(pe packetEncoder) error { + return nil +} + +func (r *ListGroupsRequest) decode(pd packetDecoder, version int16) (err error) { + return nil +} + +func (r *ListGroupsRequest) key() int16 { + return 16 +} + +func (r *ListGroupsRequest) version() int16 { + return 0 +} + +func (r *ListGroupsRequest) headerVersion() int16 { + return 1 +} + +func (r *ListGroupsRequest) requiredVersion() KafkaVersion { + return V0_9_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/list_groups_response.go b/vendor/github.com/Shopify/sarama/list_groups_response.go new file mode 100644 index 00000000000..777bae7e63e --- /dev/null +++ b/vendor/github.com/Shopify/sarama/list_groups_response.go @@ -0,0 +1,73 @@ +package sarama + +type ListGroupsResponse struct { + Err KError + Groups map[string]string +} + +func (r *ListGroupsResponse) encode(pe packetEncoder) error { + pe.putInt16(int16(r.Err)) + + if err := pe.putArrayLength(len(r.Groups)); err != nil { + return err + } + for groupId, protocolType := range r.Groups { + if err := pe.putString(groupId); err != nil { + return err + } + if err := pe.putString(protocolType); err != nil { + return err + } + } + + return nil +} + +func (r *ListGroupsResponse) decode(pd packetDecoder, version int16) error { + kerr, err := pd.getInt16() + if err != nil { + return err + } + + r.Err = KError(kerr) + + n, err := pd.getArrayLength() + if err != nil { + return err + } + if n == 0 { + return nil + } + + r.Groups = make(map[string]string) + for i := 0; i < n; i++ { + groupId, err := pd.getString() + if err != nil { + return err + } + protocolType, err := pd.getString() + if err != nil { + return err + } + + r.Groups[groupId] = protocolType + } + + return nil +} + +func (r *ListGroupsResponse) key() int16 { + return 16 +} + +func (r *ListGroupsResponse) version() int16 { + return 0 +} + +func (r *ListGroupsResponse) headerVersion() int16 { + return 0 +} + +func (r *ListGroupsResponse) requiredVersion() KafkaVersion { + return V0_9_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/list_partition_reassignments_request.go b/vendor/github.com/Shopify/sarama/list_partition_reassignments_request.go new file mode 100644 index 00000000000..c1ffa9ba02b --- /dev/null +++ b/vendor/github.com/Shopify/sarama/list_partition_reassignments_request.go @@ -0,0 +1,98 @@ +package sarama + +type ListPartitionReassignmentsRequest struct { + TimeoutMs int32 + blocks map[string][]int32 + Version int16 +} + +func (r *ListPartitionReassignmentsRequest) encode(pe packetEncoder) error { + pe.putInt32(r.TimeoutMs) + + pe.putCompactArrayLength(len(r.blocks)) + + for topic, partitions := range r.blocks { + if err := pe.putCompactString(topic); err != nil { + return err + } + + if err := pe.putCompactInt32Array(partitions); err != nil { + return err + } + + pe.putEmptyTaggedFieldArray() + } + + pe.putEmptyTaggedFieldArray() + + return nil +} + +func (r *ListPartitionReassignmentsRequest) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + + if r.TimeoutMs, err = pd.getInt32(); err != nil { + return err + } + + topicCount, err := pd.getCompactArrayLength() + if err != nil { + return err + } + if topicCount > 0 { + r.blocks = make(map[string][]int32) + for i := 0; i < topicCount; i++ { + topic, err := pd.getCompactString() + if err != nil { + return err + } + partitionCount, err := pd.getCompactArrayLength() + if err != nil { + return err + } + r.blocks[topic] = make([]int32, partitionCount) + for j := 0; j < partitionCount; j++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + r.blocks[topic][j] = partition + } + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + } + + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + + return +} + +func (r *ListPartitionReassignmentsRequest) key() int16 { + return 46 +} + +func (r *ListPartitionReassignmentsRequest) version() int16 { + return r.Version +} + +func (r *ListPartitionReassignmentsRequest) headerVersion() int16 { + return 2 +} + +func (r *ListPartitionReassignmentsRequest) requiredVersion() KafkaVersion { + return V2_4_0_0 +} + +func (r *ListPartitionReassignmentsRequest) AddBlock(topic string, partitionIDs []int32) { + if r.blocks == nil { + r.blocks = make(map[string][]int32) + } + + if r.blocks[topic] == nil { + r.blocks[topic] = partitionIDs + } +} diff --git a/vendor/github.com/Shopify/sarama/list_partition_reassignments_response.go b/vendor/github.com/Shopify/sarama/list_partition_reassignments_response.go new file mode 100644 index 00000000000..4baa6a08e83 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/list_partition_reassignments_response.go @@ -0,0 +1,169 @@ +package sarama + +type PartitionReplicaReassignmentsStatus struct { + Replicas []int32 + AddingReplicas []int32 + RemovingReplicas []int32 +} + +func (b *PartitionReplicaReassignmentsStatus) encode(pe packetEncoder) error { + if err := pe.putCompactInt32Array(b.Replicas); err != nil { + return err + } + if err := pe.putCompactInt32Array(b.AddingReplicas); err != nil { + return err + } + if err := pe.putCompactInt32Array(b.RemovingReplicas); err != nil { + return err + } + + pe.putEmptyTaggedFieldArray() + + return nil +} + +func (b *PartitionReplicaReassignmentsStatus) decode(pd packetDecoder) (err error) { + if b.Replicas, err = pd.getCompactInt32Array(); err != nil { + return err + } + + if b.AddingReplicas, err = pd.getCompactInt32Array(); err != nil { + return err + } + + if b.RemovingReplicas, err = pd.getCompactInt32Array(); err != nil { + return err + } + + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + + return err +} + +type ListPartitionReassignmentsResponse struct { + Version int16 + ThrottleTimeMs int32 + ErrorCode KError + ErrorMessage *string + TopicStatus map[string]map[int32]*PartitionReplicaReassignmentsStatus +} + +func (r *ListPartitionReassignmentsResponse) AddBlock(topic string, partition int32, replicas, addingReplicas, removingReplicas []int32) { + if r.TopicStatus == nil { + r.TopicStatus = make(map[string]map[int32]*PartitionReplicaReassignmentsStatus) + } + partitions := r.TopicStatus[topic] + if partitions == nil { + partitions = make(map[int32]*PartitionReplicaReassignmentsStatus) + r.TopicStatus[topic] = partitions + } + + partitions[partition] = &PartitionReplicaReassignmentsStatus{Replicas: replicas, AddingReplicas: addingReplicas, RemovingReplicas: removingReplicas} +} + +func (r *ListPartitionReassignmentsResponse) encode(pe packetEncoder) error { + pe.putInt32(r.ThrottleTimeMs) + pe.putInt16(int16(r.ErrorCode)) + if err := pe.putNullableCompactString(r.ErrorMessage); err != nil { + return err + } + + pe.putCompactArrayLength(len(r.TopicStatus)) + for topic, partitions := range r.TopicStatus { + if err := pe.putCompactString(topic); err != nil { + return err + } + pe.putCompactArrayLength(len(partitions)) + for partition, block := range partitions { + pe.putInt32(partition) + + if err := block.encode(pe); err != nil { + return err + } + } + pe.putEmptyTaggedFieldArray() + } + + pe.putEmptyTaggedFieldArray() + + return nil +} + +func (r *ListPartitionReassignmentsResponse) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + + if r.ThrottleTimeMs, err = pd.getInt32(); err != nil { + return err + } + + kerr, err := pd.getInt16() + if err != nil { + return err + } + + r.ErrorCode = KError(kerr) + + if r.ErrorMessage, err = pd.getCompactNullableString(); err != nil { + return err + } + + numTopics, err := pd.getCompactArrayLength() + if err != nil { + return err + } + + r.TopicStatus = make(map[string]map[int32]*PartitionReplicaReassignmentsStatus, numTopics) + for i := 0; i < numTopics; i++ { + topic, err := pd.getCompactString() + if err != nil { + return err + } + + ongoingPartitionReassignments, err := pd.getCompactArrayLength() + if err != nil { + return err + } + + r.TopicStatus[topic] = make(map[int32]*PartitionReplicaReassignmentsStatus, ongoingPartitionReassignments) + + for j := 0; j < ongoingPartitionReassignments; j++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + + block := &PartitionReplicaReassignmentsStatus{} + if err := block.decode(pd); err != nil { + return err + } + r.TopicStatus[topic][partition] = block + } + + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + + return nil +} + +func (r *ListPartitionReassignmentsResponse) key() int16 { + return 46 +} + +func (r *ListPartitionReassignmentsResponse) version() int16 { + return r.Version +} + +func (r *ListPartitionReassignmentsResponse) headerVersion() int16 { + return 1 +} + +func (r *ListPartitionReassignmentsResponse) requiredVersion() KafkaVersion { + return V2_4_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/message.go b/vendor/github.com/Shopify/sarama/message.go new file mode 100644 index 00000000000..e48566b37cd --- /dev/null +++ b/vendor/github.com/Shopify/sarama/message.go @@ -0,0 +1,174 @@ +package sarama + +import ( + "fmt" + "time" +) + +const ( + //CompressionNone no compression + CompressionNone CompressionCodec = iota + //CompressionGZIP compression using GZIP + CompressionGZIP + //CompressionSnappy compression using snappy + CompressionSnappy + //CompressionLZ4 compression using LZ4 + CompressionLZ4 + //CompressionZSTD compression using ZSTD + CompressionZSTD + + // The lowest 3 bits contain the compression codec used for the message + compressionCodecMask int8 = 0x07 + + // Bit 3 set for "LogAppend" timestamps + timestampTypeMask = 0x08 + + // CompressionLevelDefault is the constant to use in CompressionLevel + // to have the default compression level for any codec. The value is picked + // that we don't use any existing compression levels. + CompressionLevelDefault = -1000 +) + +// CompressionCodec represents the various compression codecs recognized by Kafka in messages. +type CompressionCodec int8 + +func (cc CompressionCodec) String() string { + return []string{ + "none", + "gzip", + "snappy", + "lz4", + "zstd", + }[int(cc)] +} + +//Message is a kafka message type +type Message struct { + Codec CompressionCodec // codec used to compress the message contents + CompressionLevel int // compression level + LogAppendTime bool // the used timestamp is LogAppendTime + Key []byte // the message key, may be nil + Value []byte // the message contents + Set *MessageSet // the message set a message might wrap + Version int8 // v1 requires Kafka 0.10 + Timestamp time.Time // the timestamp of the message (version 1+ only) + + compressedCache []byte + compressedSize int // used for computing the compression ratio metrics +} + +func (m *Message) encode(pe packetEncoder) error { + pe.push(newCRC32Field(crcIEEE)) + + pe.putInt8(m.Version) + + attributes := int8(m.Codec) & compressionCodecMask + if m.LogAppendTime { + attributes |= timestampTypeMask + } + pe.putInt8(attributes) + + if m.Version >= 1 { + if err := (Timestamp{&m.Timestamp}).encode(pe); err != nil { + return err + } + } + + err := pe.putBytes(m.Key) + if err != nil { + return err + } + + var payload []byte + + if m.compressedCache != nil { + payload = m.compressedCache + m.compressedCache = nil + } else if m.Value != nil { + payload, err = compress(m.Codec, m.CompressionLevel, m.Value) + if err != nil { + return err + } + m.compressedCache = payload + // Keep in mind the compressed payload size for metric gathering + m.compressedSize = len(payload) + } + + if err = pe.putBytes(payload); err != nil { + return err + } + + return pe.pop() +} + +func (m *Message) decode(pd packetDecoder) (err error) { + crc32Decoder := acquireCrc32Field(crcIEEE) + defer releaseCrc32Field(crc32Decoder) + + err = pd.push(crc32Decoder) + if err != nil { + return err + } + + m.Version, err = pd.getInt8() + if err != nil { + return err + } + + if m.Version > 1 { + return PacketDecodingError{fmt.Sprintf("unknown magic byte (%v)", m.Version)} + } + + attribute, err := pd.getInt8() + if err != nil { + return err + } + m.Codec = CompressionCodec(attribute & compressionCodecMask) + m.LogAppendTime = attribute×tampTypeMask == timestampTypeMask + + if m.Version == 1 { + if err := (Timestamp{&m.Timestamp}).decode(pd); err != nil { + return err + } + } + + m.Key, err = pd.getBytes() + if err != nil { + return err + } + + m.Value, err = pd.getBytes() + if err != nil { + return err + } + + // Required for deep equal assertion during tests but might be useful + // for future metrics about the compression ratio in fetch requests + m.compressedSize = len(m.Value) + + switch m.Codec { + case CompressionNone: + // nothing to do + default: + if m.Value == nil { + break + } + + m.Value, err = decompress(m.Codec, m.Value) + if err != nil { + return err + } + if err := m.decodeSet(); err != nil { + return err + } + } + + return pd.pop() +} + +// decodes a message set from a previously encoded bulk-message +func (m *Message) decodeSet() (err error) { + pd := realDecoder{raw: m.Value} + m.Set = &MessageSet{} + return m.Set.decode(&pd) +} diff --git a/vendor/github.com/Shopify/sarama/message_set.go b/vendor/github.com/Shopify/sarama/message_set.go new file mode 100644 index 00000000000..6523ec2f74d --- /dev/null +++ b/vendor/github.com/Shopify/sarama/message_set.go @@ -0,0 +1,111 @@ +package sarama + +type MessageBlock struct { + Offset int64 + Msg *Message +} + +// Messages convenience helper which returns either all the +// messages that are wrapped in this block +func (msb *MessageBlock) Messages() []*MessageBlock { + if msb.Msg.Set != nil { + return msb.Msg.Set.Messages + } + return []*MessageBlock{msb} +} + +func (msb *MessageBlock) encode(pe packetEncoder) error { + pe.putInt64(msb.Offset) + pe.push(&lengthField{}) + err := msb.Msg.encode(pe) + if err != nil { + return err + } + return pe.pop() +} + +func (msb *MessageBlock) decode(pd packetDecoder) (err error) { + if msb.Offset, err = pd.getInt64(); err != nil { + return err + } + + lengthDecoder := acquireLengthField() + defer releaseLengthField(lengthDecoder) + + if err = pd.push(lengthDecoder); err != nil { + return err + } + + msb.Msg = new(Message) + if err = msb.Msg.decode(pd); err != nil { + return err + } + + if err = pd.pop(); err != nil { + return err + } + + return nil +} + +type MessageSet struct { + PartialTrailingMessage bool // whether the set on the wire contained an incomplete trailing MessageBlock + OverflowMessage bool // whether the set on the wire contained an overflow message + Messages []*MessageBlock +} + +func (ms *MessageSet) encode(pe packetEncoder) error { + for i := range ms.Messages { + err := ms.Messages[i].encode(pe) + if err != nil { + return err + } + } + return nil +} + +func (ms *MessageSet) decode(pd packetDecoder) (err error) { + ms.Messages = nil + + for pd.remaining() > 0 { + magic, err := magicValue(pd) + if err != nil { + if err == ErrInsufficientData { + ms.PartialTrailingMessage = true + return nil + } + return err + } + + if magic > 1 { + return nil + } + + msb := new(MessageBlock) + err = msb.decode(pd) + switch err { + case nil: + ms.Messages = append(ms.Messages, msb) + case ErrInsufficientData: + // As an optimization the server is allowed to return a partial message at the + // end of the message set. Clients should handle this case. So we just ignore such things. + if msb.Offset == -1 { + // This is an overflow message caused by chunked down conversion + ms.OverflowMessage = true + } else { + ms.PartialTrailingMessage = true + } + return nil + default: + return err + } + } + + return nil +} + +func (ms *MessageSet) addMessage(msg *Message) { + block := new(MessageBlock) + block.Msg = msg + ms.Messages = append(ms.Messages, block) +} diff --git a/vendor/github.com/Shopify/sarama/metadata_request.go b/vendor/github.com/Shopify/sarama/metadata_request.go new file mode 100644 index 00000000000..e835f5a9c8a --- /dev/null +++ b/vendor/github.com/Shopify/sarama/metadata_request.go @@ -0,0 +1,85 @@ +package sarama + +type MetadataRequest struct { + Version int16 + Topics []string + AllowAutoTopicCreation bool +} + +func (r *MetadataRequest) encode(pe packetEncoder) error { + if r.Version < 0 || r.Version > 5 { + return PacketEncodingError{"invalid or unsupported MetadataRequest version field"} + } + if r.Version == 0 || len(r.Topics) > 0 { + err := pe.putArrayLength(len(r.Topics)) + if err != nil { + return err + } + + for i := range r.Topics { + err = pe.putString(r.Topics[i]) + if err != nil { + return err + } + } + } else { + pe.putInt32(-1) + } + if r.Version > 3 { + pe.putBool(r.AllowAutoTopicCreation) + } + return nil +} + +func (r *MetadataRequest) decode(pd packetDecoder, version int16) error { + r.Version = version + size, err := pd.getInt32() + if err != nil { + return err + } + if size > 0 { + r.Topics = make([]string, size) + for i := range r.Topics { + topic, err := pd.getString() + if err != nil { + return err + } + r.Topics[i] = topic + } + } + if r.Version > 3 { + autoCreation, err := pd.getBool() + if err != nil { + return err + } + r.AllowAutoTopicCreation = autoCreation + } + return nil +} + +func (r *MetadataRequest) key() int16 { + return 3 +} + +func (r *MetadataRequest) version() int16 { + return r.Version +} + +func (r *MetadataRequest) headerVersion() int16 { + return 1 +} + +func (r *MetadataRequest) requiredVersion() KafkaVersion { + switch r.Version { + case 1: + return V0_10_0_0 + case 2: + return V0_10_1_0 + case 3, 4: + return V0_11_0_0 + case 5: + return V1_0_0_0 + default: + return MinVersion + } +} diff --git a/vendor/github.com/Shopify/sarama/metadata_response.go b/vendor/github.com/Shopify/sarama/metadata_response.go new file mode 100644 index 00000000000..0bb8702cc37 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/metadata_response.go @@ -0,0 +1,325 @@ +package sarama + +type PartitionMetadata struct { + Err KError + ID int32 + Leader int32 + Replicas []int32 + Isr []int32 + OfflineReplicas []int32 +} + +func (pm *PartitionMetadata) decode(pd packetDecoder, version int16) (err error) { + tmp, err := pd.getInt16() + if err != nil { + return err + } + pm.Err = KError(tmp) + + pm.ID, err = pd.getInt32() + if err != nil { + return err + } + + pm.Leader, err = pd.getInt32() + if err != nil { + return err + } + + pm.Replicas, err = pd.getInt32Array() + if err != nil { + return err + } + + pm.Isr, err = pd.getInt32Array() + if err != nil { + return err + } + + if version >= 5 { + pm.OfflineReplicas, err = pd.getInt32Array() + if err != nil { + return err + } + } + + return nil +} + +func (pm *PartitionMetadata) encode(pe packetEncoder, version int16) (err error) { + pe.putInt16(int16(pm.Err)) + pe.putInt32(pm.ID) + pe.putInt32(pm.Leader) + + err = pe.putInt32Array(pm.Replicas) + if err != nil { + return err + } + + err = pe.putInt32Array(pm.Isr) + if err != nil { + return err + } + + if version >= 5 { + err = pe.putInt32Array(pm.OfflineReplicas) + if err != nil { + return err + } + } + + return nil +} + +type TopicMetadata struct { + Err KError + Name string + IsInternal bool // Only valid for Version >= 1 + Partitions []*PartitionMetadata +} + +func (tm *TopicMetadata) decode(pd packetDecoder, version int16) (err error) { + tmp, err := pd.getInt16() + if err != nil { + return err + } + tm.Err = KError(tmp) + + tm.Name, err = pd.getString() + if err != nil { + return err + } + + if version >= 1 { + tm.IsInternal, err = pd.getBool() + if err != nil { + return err + } + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + tm.Partitions = make([]*PartitionMetadata, n) + for i := 0; i < n; i++ { + tm.Partitions[i] = new(PartitionMetadata) + err = tm.Partitions[i].decode(pd, version) + if err != nil { + return err + } + } + + return nil +} + +func (tm *TopicMetadata) encode(pe packetEncoder, version int16) (err error) { + pe.putInt16(int16(tm.Err)) + + err = pe.putString(tm.Name) + if err != nil { + return err + } + + if version >= 1 { + pe.putBool(tm.IsInternal) + } + + err = pe.putArrayLength(len(tm.Partitions)) + if err != nil { + return err + } + + for _, pm := range tm.Partitions { + err = pm.encode(pe, version) + if err != nil { + return err + } + } + + return nil +} + +type MetadataResponse struct { + Version int16 + ThrottleTimeMs int32 + Brokers []*Broker + ClusterID *string + ControllerID int32 + Topics []*TopicMetadata +} + +func (r *MetadataResponse) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + + if version >= 3 { + r.ThrottleTimeMs, err = pd.getInt32() + if err != nil { + return err + } + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Brokers = make([]*Broker, n) + for i := 0; i < n; i++ { + r.Brokers[i] = new(Broker) + err = r.Brokers[i].decode(pd, version) + if err != nil { + return err + } + } + + if version >= 2 { + r.ClusterID, err = pd.getNullableString() + if err != nil { + return err + } + } + + if version >= 1 { + r.ControllerID, err = pd.getInt32() + if err != nil { + return err + } + } else { + r.ControllerID = -1 + } + + n, err = pd.getArrayLength() + if err != nil { + return err + } + + r.Topics = make([]*TopicMetadata, n) + for i := 0; i < n; i++ { + r.Topics[i] = new(TopicMetadata) + err = r.Topics[i].decode(pd, version) + if err != nil { + return err + } + } + + return nil +} + +func (r *MetadataResponse) encode(pe packetEncoder) error { + if r.Version >= 3 { + pe.putInt32(r.ThrottleTimeMs) + } + + err := pe.putArrayLength(len(r.Brokers)) + if err != nil { + return err + } + for _, broker := range r.Brokers { + err = broker.encode(pe, r.Version) + if err != nil { + return err + } + } + + if r.Version >= 2 { + err := pe.putNullableString(r.ClusterID) + if err != nil { + return err + } + } + + if r.Version >= 1 { + pe.putInt32(r.ControllerID) + } + + err = pe.putArrayLength(len(r.Topics)) + if err != nil { + return err + } + for _, tm := range r.Topics { + err = tm.encode(pe, r.Version) + if err != nil { + return err + } + } + + return nil +} + +func (r *MetadataResponse) key() int16 { + return 3 +} + +func (r *MetadataResponse) version() int16 { + return r.Version +} + +func (r *MetadataResponse) headerVersion() int16 { + return 0 +} + +func (r *MetadataResponse) requiredVersion() KafkaVersion { + switch r.Version { + case 1: + return V0_10_0_0 + case 2: + return V0_10_1_0 + case 3, 4: + return V0_11_0_0 + case 5: + return V1_0_0_0 + default: + return MinVersion + } +} + +// testing API + +func (r *MetadataResponse) AddBroker(addr string, id int32) { + r.Brokers = append(r.Brokers, &Broker{id: id, addr: addr}) +} + +func (r *MetadataResponse) AddTopic(topic string, err KError) *TopicMetadata { + var tmatch *TopicMetadata + + for _, tm := range r.Topics { + if tm.Name == topic { + tmatch = tm + goto foundTopic + } + } + + tmatch = new(TopicMetadata) + tmatch.Name = topic + r.Topics = append(r.Topics, tmatch) + +foundTopic: + + tmatch.Err = err + return tmatch +} + +func (r *MetadataResponse) AddTopicPartition(topic string, partition, brokerID int32, replicas, isr []int32, offline []int32, err KError) { + tmatch := r.AddTopic(topic, ErrNoError) + var pmatch *PartitionMetadata + + for _, pm := range tmatch.Partitions { + if pm.ID == partition { + pmatch = pm + goto foundPartition + } + } + + pmatch = new(PartitionMetadata) + pmatch.ID = partition + tmatch.Partitions = append(tmatch.Partitions, pmatch) + +foundPartition: + + pmatch.Leader = brokerID + pmatch.Replicas = replicas + pmatch.Isr = isr + pmatch.OfflineReplicas = offline + pmatch.Err = err +} diff --git a/vendor/github.com/Shopify/sarama/metrics.go b/vendor/github.com/Shopify/sarama/metrics.go new file mode 100644 index 00000000000..90e5a87f497 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/metrics.go @@ -0,0 +1,43 @@ +package sarama + +import ( + "fmt" + "strings" + + "github.com/rcrowley/go-metrics" +) + +// Use exponentially decaying reservoir for sampling histograms with the same defaults as the Java library: +// 1028 elements, which offers a 99.9% confidence level with a 5% margin of error assuming a normal distribution, +// and an alpha factor of 0.015, which heavily biases the reservoir to the past 5 minutes of measurements. +// See https://github.com/dropwizard/metrics/blob/v3.1.0/metrics-core/src/main/java/com/codahale/metrics/ExponentiallyDecayingReservoir.java#L38 +const ( + metricsReservoirSize = 1028 + metricsAlphaFactor = 0.015 +) + +func getOrRegisterHistogram(name string, r metrics.Registry) metrics.Histogram { + return r.GetOrRegister(name, func() metrics.Histogram { + return metrics.NewHistogram(metrics.NewExpDecaySample(metricsReservoirSize, metricsAlphaFactor)) + }).(metrics.Histogram) +} + +func getMetricNameForBroker(name string, broker *Broker) string { + // Use broker id like the Java client as it does not contain '.' or ':' characters that + // can be interpreted as special character by monitoring tool (e.g. Graphite) + return fmt.Sprintf(name+"-for-broker-%d", broker.ID()) +} + +func getMetricNameForTopic(name string, topic string) string { + // Convert dot to _ since reporters like Graphite typically use dot to represent hierarchy + // cf. KAFKA-1902 and KAFKA-2337 + return fmt.Sprintf(name+"-for-topic-%s", strings.Replace(topic, ".", "_", -1)) +} + +func getOrRegisterTopicMeter(name string, topic string, r metrics.Registry) metrics.Meter { + return metrics.GetOrRegisterMeter(getMetricNameForTopic(name, topic), r) +} + +func getOrRegisterTopicHistogram(name string, topic string, r metrics.Registry) metrics.Histogram { + return getOrRegisterHistogram(getMetricNameForTopic(name, topic), r) +} diff --git a/vendor/github.com/Shopify/sarama/mockbroker.go b/vendor/github.com/Shopify/sarama/mockbroker.go new file mode 100644 index 00000000000..ff5a68ae7fe --- /dev/null +++ b/vendor/github.com/Shopify/sarama/mockbroker.go @@ -0,0 +1,415 @@ +package sarama + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "net" + "reflect" + "strconv" + "sync" + "time" + + "github.com/davecgh/go-spew/spew" +) + +const ( + expectationTimeout = 500 * time.Millisecond +) + +type GSSApiHandlerFunc func([]byte) []byte + +type requestHandlerFunc func(req *request) (res encoderWithHeader) + +// RequestNotifierFunc is invoked when a mock broker processes a request successfully +// and will provides the number of bytes read and written. +type RequestNotifierFunc func(bytesRead, bytesWritten int) + +// MockBroker is a mock Kafka broker that is used in unit tests. It is exposed +// to facilitate testing of higher level or specialized consumers and producers +// built on top of Sarama. Note that it does not 'mimic' the Kafka API protocol, +// but rather provides a facility to do that. It takes care of the TCP +// transport, request unmarshaling, response marshaling, and makes it the test +// writer responsibility to program correct according to the Kafka API protocol +// MockBroker behaviour. +// +// MockBroker is implemented as a TCP server listening on a kernel-selected +// localhost port that can accept many connections. It reads Kafka requests +// from that connection and returns responses programmed by the SetHandlerByMap +// function. If a MockBroker receives a request that it has no programmed +// response for, then it returns nothing and the request times out. +// +// A set of MockRequest builders to define mappings used by MockBroker is +// provided by Sarama. But users can develop MockRequests of their own and use +// them along with or instead of the standard ones. +// +// When running tests with MockBroker it is strongly recommended to specify +// a timeout to `go test` so that if the broker hangs waiting for a response, +// the test panics. +// +// It is not necessary to prefix message length or correlation ID to your +// response bytes, the server does that automatically as a convenience. +type MockBroker struct { + brokerID int32 + port int32 + closing chan none + stopper chan none + expectations chan encoderWithHeader + listener net.Listener + t TestReporter + latency time.Duration + handler requestHandlerFunc + notifier RequestNotifierFunc + history []RequestResponse + lock sync.Mutex + gssApiHandler GSSApiHandlerFunc +} + +// RequestResponse represents a Request/Response pair processed by MockBroker. +type RequestResponse struct { + Request protocolBody + Response encoder +} + +// SetLatency makes broker pause for the specified period every time before +// replying. +func (b *MockBroker) SetLatency(latency time.Duration) { + b.latency = latency +} + +// SetHandlerByMap defines mapping of Request types to MockResponses. When a +// request is received by the broker, it looks up the request type in the map +// and uses the found MockResponse instance to generate an appropriate reply. +// If the request type is not found in the map then nothing is sent. +func (b *MockBroker) SetHandlerByMap(handlerMap map[string]MockResponse) { + b.setHandler(func(req *request) (res encoderWithHeader) { + reqTypeName := reflect.TypeOf(req.body).Elem().Name() + mockResponse := handlerMap[reqTypeName] + if mockResponse == nil { + return nil + } + return mockResponse.For(req.body) + }) +} + +// SetNotifier set a function that will get invoked whenever a request has been +// processed successfully and will provide the number of bytes read and written +func (b *MockBroker) SetNotifier(notifier RequestNotifierFunc) { + b.lock.Lock() + b.notifier = notifier + b.lock.Unlock() +} + +// BrokerID returns broker ID assigned to the broker. +func (b *MockBroker) BrokerID() int32 { + return b.brokerID +} + +// History returns a slice of RequestResponse pairs in the order they were +// processed by the broker. Note that in case of multiple connections to the +// broker the order expected by a test can be different from the order recorded +// in the history, unless some synchronization is implemented in the test. +func (b *MockBroker) History() []RequestResponse { + b.lock.Lock() + history := make([]RequestResponse, len(b.history)) + copy(history, b.history) + b.lock.Unlock() + return history +} + +// Port returns the TCP port number the broker is listening for requests on. +func (b *MockBroker) Port() int32 { + return b.port +} + +// Addr returns the broker connection string in the form "
:". +func (b *MockBroker) Addr() string { + return b.listener.Addr().String() +} + +// Close terminates the broker blocking until it stops internal goroutines and +// releases all resources. +func (b *MockBroker) Close() { + close(b.expectations) + if len(b.expectations) > 0 { + buf := bytes.NewBufferString(fmt.Sprintf("mockbroker/%d: not all expectations were satisfied! Still waiting on:\n", b.BrokerID())) + for e := range b.expectations { + _, _ = buf.WriteString(spew.Sdump(e)) + } + b.t.Error(buf.String()) + } + close(b.closing) + <-b.stopper +} + +// setHandler sets the specified function as the request handler. Whenever +// a mock broker reads a request from the wire it passes the request to the +// function and sends back whatever the handler function returns. +func (b *MockBroker) setHandler(handler requestHandlerFunc) { + b.lock.Lock() + b.handler = handler + b.lock.Unlock() +} + +func (b *MockBroker) serverLoop() { + defer close(b.stopper) + var err error + var conn net.Conn + + go func() { + <-b.closing + err := b.listener.Close() + if err != nil { + b.t.Error(err) + } + }() + + wg := &sync.WaitGroup{} + i := 0 + for conn, err = b.listener.Accept(); err == nil; conn, err = b.listener.Accept() { + wg.Add(1) + go b.handleRequests(conn, i, wg) + i++ + } + wg.Wait() + Logger.Printf("*** mockbroker/%d: listener closed, err=%v", b.BrokerID(), err) +} + +func (b *MockBroker) SetGSSAPIHandler(handler GSSApiHandlerFunc) { + b.gssApiHandler = handler +} + +func (b *MockBroker) readToBytes(r io.Reader) ([]byte, error) { + var ( + bytesRead int + lengthBytes = make([]byte, 4) + ) + + if _, err := io.ReadFull(r, lengthBytes); err != nil { + return nil, err + } + + bytesRead += len(lengthBytes) + length := int32(binary.BigEndian.Uint32(lengthBytes)) + + if length <= 4 || length > MaxRequestSize { + return nil, PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", length)} + } + + encodedReq := make([]byte, length) + if _, err := io.ReadFull(r, encodedReq); err != nil { + return nil, err + } + + bytesRead += len(encodedReq) + + fullBytes := append(lengthBytes, encodedReq...) + + return fullBytes, nil +} + +func (b *MockBroker) isGSSAPI(buffer []byte) bool { + return buffer[4] == 0x60 || bytes.Equal(buffer[4:6], []byte{0x05, 0x04}) +} + +func (b *MockBroker) handleRequests(conn io.ReadWriteCloser, idx int, wg *sync.WaitGroup) { + defer wg.Done() + defer func() { + _ = conn.Close() + }() + Logger.Printf("*** mockbroker/%d/%d: connection opened", b.BrokerID(), idx) + var err error + + abort := make(chan none) + defer close(abort) + go func() { + select { + case <-b.closing: + _ = conn.Close() + case <-abort: + } + }() + + var bytesWritten int + var bytesRead int + for { + buffer, err := b.readToBytes(conn) + if err != nil { + Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(buffer)) + b.serverError(err) + break + } + + bytesWritten = 0 + if !b.isGSSAPI(buffer) { + req, br, err := decodeRequest(bytes.NewReader(buffer)) + bytesRead = br + if err != nil { + Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(req)) + b.serverError(err) + break + } + + if b.latency > 0 { + time.Sleep(b.latency) + } + + b.lock.Lock() + res := b.handler(req) + b.history = append(b.history, RequestResponse{req.body, res}) + b.lock.Unlock() + + if res == nil { + Logger.Printf("*** mockbroker/%d/%d: ignored %v", b.brokerID, idx, spew.Sdump(req)) + continue + } + Logger.Printf("*** mockbroker/%d/%d: served %v -> %v", b.brokerID, idx, req, res) + + encodedRes, err := encode(res, nil) + if err != nil { + b.serverError(err) + break + } + if len(encodedRes) == 0 { + b.lock.Lock() + if b.notifier != nil { + b.notifier(bytesRead, 0) + } + b.lock.Unlock() + continue + } + + resHeader := b.encodeHeader(res.headerVersion(), req.correlationID, uint32(len(encodedRes))) + if _, err = conn.Write(resHeader); err != nil { + b.serverError(err) + break + } + if _, err = conn.Write(encodedRes); err != nil { + b.serverError(err) + break + } + bytesWritten = len(resHeader) + len(encodedRes) + } else { + // GSSAPI is not part of kafka protocol, but is supported for authentication proposes. + // Don't support history for this kind of request as is only used for test GSSAPI authentication mechanism + b.lock.Lock() + res := b.gssApiHandler(buffer) + b.lock.Unlock() + if res == nil { + Logger.Printf("*** mockbroker/%d/%d: ignored %v", b.brokerID, idx, spew.Sdump(buffer)) + continue + } + if _, err = conn.Write(res); err != nil { + b.serverError(err) + break + } + bytesWritten = len(res) + } + + b.lock.Lock() + if b.notifier != nil { + b.notifier(bytesRead, bytesWritten) + } + b.lock.Unlock() + } + Logger.Printf("*** mockbroker/%d/%d: connection closed, err=%v", b.BrokerID(), idx, err) +} + +func (b *MockBroker) encodeHeader(headerVersion int16, correlationId int32, payloadLength uint32) []byte { + headerLength := uint32(8) + + if headerVersion >= 1 { + headerLength = 9 + } + + resHeader := make([]byte, headerLength) + binary.BigEndian.PutUint32(resHeader, payloadLength+headerLength-4) + binary.BigEndian.PutUint32(resHeader[4:], uint32(correlationId)) + + if headerVersion >= 1 { + binary.PutUvarint(resHeader[8:], 0) + } + + return resHeader +} + +func (b *MockBroker) defaultRequestHandler(req *request) (res encoderWithHeader) { + select { + case res, ok := <-b.expectations: + if !ok { + return nil + } + return res + case <-time.After(expectationTimeout): + return nil + } +} + +func (b *MockBroker) serverError(err error) { + isConnectionClosedError := false + if _, ok := err.(*net.OpError); ok { + isConnectionClosedError = true + } else if err == io.EOF { + isConnectionClosedError = true + } else if err.Error() == "use of closed network connection" { + isConnectionClosedError = true + } + + if isConnectionClosedError { + return + } + + b.t.Errorf(err.Error()) +} + +// NewMockBroker launches a fake Kafka broker. It takes a TestReporter as provided by the +// test framework and a channel of responses to use. If an error occurs it is +// simply logged to the TestReporter and the broker exits. +func NewMockBroker(t TestReporter, brokerID int32) *MockBroker { + return NewMockBrokerAddr(t, brokerID, "localhost:0") +} + +// NewMockBrokerAddr behaves like newMockBroker but listens on the address you give +// it rather than just some ephemeral port. +func NewMockBrokerAddr(t TestReporter, brokerID int32, addr string) *MockBroker { + listener, err := net.Listen("tcp", addr) + if err != nil { + t.Fatal(err) + } + return NewMockBrokerListener(t, brokerID, listener) +} + +// NewMockBrokerListener behaves like newMockBrokerAddr but accepts connections on the listener specified. +func NewMockBrokerListener(t TestReporter, brokerID int32, listener net.Listener) *MockBroker { + var err error + + broker := &MockBroker{ + closing: make(chan none), + stopper: make(chan none), + t: t, + brokerID: brokerID, + expectations: make(chan encoderWithHeader, 512), + listener: listener, + } + broker.handler = broker.defaultRequestHandler + + Logger.Printf("*** mockbroker/%d listening on %s\n", brokerID, broker.listener.Addr().String()) + _, portStr, err := net.SplitHostPort(broker.listener.Addr().String()) + if err != nil { + t.Fatal(err) + } + tmp, err := strconv.ParseInt(portStr, 10, 32) + if err != nil { + t.Fatal(err) + } + broker.port = int32(tmp) + + go broker.serverLoop() + + return broker +} + +func (b *MockBroker) Returns(e encoderWithHeader) { + b.expectations <- e +} diff --git a/vendor/github.com/Shopify/sarama/mockkerberos.go b/vendor/github.com/Shopify/sarama/mockkerberos.go new file mode 100644 index 00000000000..d36649d8ace --- /dev/null +++ b/vendor/github.com/Shopify/sarama/mockkerberos.go @@ -0,0 +1,123 @@ +package sarama + +import ( + "encoding/binary" + "encoding/hex" + + "gopkg.in/jcmturner/gokrb5.v7/credentials" + "gopkg.in/jcmturner/gokrb5.v7/gssapi" + "gopkg.in/jcmturner/gokrb5.v7/iana/keyusage" + "gopkg.in/jcmturner/gokrb5.v7/messages" + "gopkg.in/jcmturner/gokrb5.v7/types" +) + +type KafkaGSSAPIHandler struct { + client *MockKerberosClient + badResponse bool + badKeyChecksum bool +} + +func (h *KafkaGSSAPIHandler) MockKafkaGSSAPI(buffer []byte) []byte { + // Default payload used for verify + err := h.client.Login() // Mock client construct keys when login + if err != nil { + return nil + } + if h.badResponse { // Returns trash + return []byte{0x00, 0x00, 0x00, 0x01, 0xAD} + } + + var pack = gssapi.WrapToken{ + Flags: KRB5_USER_AUTH, + EC: 12, + RRC: 0, + SndSeqNum: 3398292281, + Payload: []byte{0x11, 0x00}, // 1100 + } + // Compute checksum + if h.badKeyChecksum { + pack.CheckSum = []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF} + } else { + err = pack.SetCheckSum(h.client.ASRep.DecryptedEncPart.Key, keyusage.GSSAPI_ACCEPTOR_SEAL) + if err != nil { + return nil + } + } + + packBytes, err := pack.Marshal() + if err != nil { + return nil + } + lenBytes := len(packBytes) + response := make([]byte, lenBytes+4) + copy(response[4:], packBytes) + binary.BigEndian.PutUint32(response, uint32(lenBytes)) + return response +} + +type MockKerberosClient struct { + asRepBytes string + ASRep messages.ASRep + credentials *credentials.Credentials + mockError error + errorStage string +} + +func (c *MockKerberosClient) Login() error { + if c.errorStage == "login" && c.mockError != nil { + return c.mockError + } + c.asRepBytes = "6b8202e9308202e5a003020105a10302010ba22b30293027a103020113a220041e301c301aa003020112a1131b114" + + "558414d504c452e434f4d636c69656e74a30d1b0b4558414d504c452e434f4da4133011a003020101a10a30081b06636c69656e7" + + "4a5820156618201523082014ea003020105a10d1b0b4558414d504c452e434f4da220301ea003020102a11730151b066b7262746" + + "7741b0b4558414d504c452e434f4da382011430820110a003020112a103020101a28201020481ffdb9891175d106818e61008c51" + + "d0b3462bca92f3bf9d4cfa82de4c4d7aff9994ec87c573e3a3d54dcb2bb79618c76f2bf4a3d006f90d5bdbd049bc18f48be39203" + + "549ca02acaf63f292b12404f9b74c34b83687119d8f56552ccc0c50ebee2a53bb114c1b4619bb1d5d31f0f49b4d40a08a9b4c046" + + "2e1398d0b648be1c0e50c552ad16e1d8d8e74263dd0bf0ec591e4797dfd40a9a1be4ae830d03a306e053fd7586fef84ffc5e4a83" + + "7c3122bf3e6a40fe87e84019f6283634461b955712b44a5f7386c278bff94ec2c2dc0403247e29c2450e853471ceababf9b8911f" + + "997f2e3010b046d2c49eb438afb0f4c210821e80d4ffa4c9521eb895dcd68610b3feaa682012c30820128a003020112a282011f0" + + "482011bce73cbce3f1dd17661c412005f0f2257c756fe8e98ff97e6ec24b7bab66e5fd3a3827aeeae4757af0c6e892948122d8b2" + + "03c8df48df0ef5d142d0e416d688f11daa0fcd63d96bdd431d02b8e951c664eeff286a2be62383d274a04016d5f0e141da58cb86" + + "331de64063062f4f885e8e9ce5b181ca2fdc67897c5995e0ae1ae0c171a64493ff7bd91bc6d89cd4fce1e2b3ea0a10e34b0d5eda" + + "aa38ee727b50c5632ed1d2f2b457908e616178d0d80b72af209fb8ac9dbaa1768fa45931392b36b6d8c12400f8ded2efaa0654d0" + + "da1db966e8b5aab4706c800f95d559664646041fdb38b411c62fc0fbe0d25083a28562b0e1c8df16e62e9d5626b0addee489835f" + + "eedb0f26c05baa596b69b17f47920aa64b29dc77cfcc97ba47885" + apRepBytes, err := hex.DecodeString(c.asRepBytes) + if err != nil { + return err + } + err = c.ASRep.Unmarshal(apRepBytes) + if err != nil { + return err + } + c.credentials = credentials.New("client", "EXAMPLE.COM").WithPassword("qwerty") + _, err = c.ASRep.DecryptEncPart(c.credentials) + if err != nil { + return err + } + return nil +} + +func (c *MockKerberosClient) GetServiceTicket(spn string) (messages.Ticket, types.EncryptionKey, error) { + if c.errorStage == "service_ticket" && c.mockError != nil { + return messages.Ticket{}, types.EncryptionKey{}, c.mockError + } + return c.ASRep.Ticket, c.ASRep.DecryptedEncPart.Key, nil +} + +func (c *MockKerberosClient) Domain() string { + return "EXAMPLE.COM" +} +func (c *MockKerberosClient) CName() types.PrincipalName { + var p = types.PrincipalName{ + NameType: KRB5_USER_AUTH, + NameString: []string{ + "kafka", + "kafka", + }, + } + return p +} +func (c *MockKerberosClient) Destroy() { + // Do nothing. +} diff --git a/vendor/github.com/Shopify/sarama/mockresponses.go b/vendor/github.com/Shopify/sarama/mockresponses.go new file mode 100644 index 00000000000..3df1ee0a72a --- /dev/null +++ b/vendor/github.com/Shopify/sarama/mockresponses.go @@ -0,0 +1,1269 @@ +package sarama + +import ( + "fmt" + "strings" +) + +// TestReporter has methods matching go's testing.T to avoid importing +// `testing` in the main part of the library. +type TestReporter interface { + Error(...interface{}) + Errorf(string, ...interface{}) + Fatal(...interface{}) + Fatalf(string, ...interface{}) +} + +// MockResponse is a response builder interface it defines one method that +// allows generating a response based on a request body. MockResponses are used +// to program behavior of MockBroker in tests. +type MockResponse interface { + For(reqBody versionedDecoder) (res encoderWithHeader) +} + +// MockWrapper is a mock response builder that returns a particular concrete +// response regardless of the actual request passed to the `For` method. +type MockWrapper struct { + res encoderWithHeader +} + +func (mw *MockWrapper) For(reqBody versionedDecoder) (res encoderWithHeader) { + return mw.res +} + +func NewMockWrapper(res encoderWithHeader) *MockWrapper { + return &MockWrapper{res: res} +} + +// MockSequence is a mock response builder that is created from a sequence of +// concrete responses. Every time when a `MockBroker` calls its `For` method +// the next response from the sequence is returned. When the end of the +// sequence is reached the last element from the sequence is returned. +type MockSequence struct { + responses []MockResponse +} + +func NewMockSequence(responses ...interface{}) *MockSequence { + ms := &MockSequence{} + ms.responses = make([]MockResponse, len(responses)) + for i, res := range responses { + switch res := res.(type) { + case MockResponse: + ms.responses[i] = res + case encoderWithHeader: + ms.responses[i] = NewMockWrapper(res) + default: + panic(fmt.Sprintf("Unexpected response type: %T", res)) + } + } + return ms +} + +func (mc *MockSequence) For(reqBody versionedDecoder) (res encoderWithHeader) { + res = mc.responses[0].For(reqBody) + if len(mc.responses) > 1 { + mc.responses = mc.responses[1:] + } + return res +} + +type MockListGroupsResponse struct { + groups map[string]string + t TestReporter +} + +func NewMockListGroupsResponse(t TestReporter) *MockListGroupsResponse { + return &MockListGroupsResponse{ + groups: make(map[string]string), + t: t, + } +} + +func (m *MockListGroupsResponse) For(reqBody versionedDecoder) encoderWithHeader { + request := reqBody.(*ListGroupsRequest) + _ = request + response := &ListGroupsResponse{ + Groups: m.groups, + } + return response +} + +func (m *MockListGroupsResponse) AddGroup(groupID, protocolType string) *MockListGroupsResponse { + m.groups[groupID] = protocolType + return m +} + +type MockDescribeGroupsResponse struct { + groups map[string]*GroupDescription + t TestReporter +} + +func NewMockDescribeGroupsResponse(t TestReporter) *MockDescribeGroupsResponse { + return &MockDescribeGroupsResponse{ + t: t, + groups: make(map[string]*GroupDescription), + } +} + +func (m *MockDescribeGroupsResponse) AddGroupDescription(groupID string, description *GroupDescription) *MockDescribeGroupsResponse { + m.groups[groupID] = description + return m +} + +func (m *MockDescribeGroupsResponse) For(reqBody versionedDecoder) encoderWithHeader { + request := reqBody.(*DescribeGroupsRequest) + + response := &DescribeGroupsResponse{} + for _, requestedGroup := range request.Groups { + if group, ok := m.groups[requestedGroup]; ok { + response.Groups = append(response.Groups, group) + } else { + // Mimic real kafka - if a group doesn't exist, return + // an entry with state "Dead" + response.Groups = append(response.Groups, &GroupDescription{ + GroupId: requestedGroup, + State: "Dead", + }) + } + } + + return response +} + +// MockMetadataResponse is a `MetadataResponse` builder. +type MockMetadataResponse struct { + controllerID int32 + leaders map[string]map[int32]int32 + brokers map[string]int32 + t TestReporter +} + +func NewMockMetadataResponse(t TestReporter) *MockMetadataResponse { + return &MockMetadataResponse{ + leaders: make(map[string]map[int32]int32), + brokers: make(map[string]int32), + t: t, + } +} + +func (mmr *MockMetadataResponse) SetLeader(topic string, partition, brokerID int32) *MockMetadataResponse { + partitions := mmr.leaders[topic] + if partitions == nil { + partitions = make(map[int32]int32) + mmr.leaders[topic] = partitions + } + partitions[partition] = brokerID + return mmr +} + +func (mmr *MockMetadataResponse) SetBroker(addr string, brokerID int32) *MockMetadataResponse { + mmr.brokers[addr] = brokerID + return mmr +} + +func (mmr *MockMetadataResponse) SetController(brokerID int32) *MockMetadataResponse { + mmr.controllerID = brokerID + return mmr +} + +func (mmr *MockMetadataResponse) For(reqBody versionedDecoder) encoderWithHeader { + metadataRequest := reqBody.(*MetadataRequest) + metadataResponse := &MetadataResponse{ + Version: metadataRequest.version(), + ControllerID: mmr.controllerID, + } + for addr, brokerID := range mmr.brokers { + metadataResponse.AddBroker(addr, brokerID) + } + + // Generate set of replicas + var replicas []int32 + var offlineReplicas []int32 + for _, brokerID := range mmr.brokers { + replicas = append(replicas, brokerID) + } + + if len(metadataRequest.Topics) == 0 { + for topic, partitions := range mmr.leaders { + for partition, brokerID := range partitions { + metadataResponse.AddTopicPartition(topic, partition, brokerID, replicas, replicas, offlineReplicas, ErrNoError) + } + } + return metadataResponse + } + for _, topic := range metadataRequest.Topics { + for partition, brokerID := range mmr.leaders[topic] { + metadataResponse.AddTopicPartition(topic, partition, brokerID, replicas, replicas, offlineReplicas, ErrNoError) + } + } + return metadataResponse +} + +// MockOffsetResponse is an `OffsetResponse` builder. +type MockOffsetResponse struct { + offsets map[string]map[int32]map[int64]int64 + t TestReporter + version int16 +} + +func NewMockOffsetResponse(t TestReporter) *MockOffsetResponse { + return &MockOffsetResponse{ + offsets: make(map[string]map[int32]map[int64]int64), + t: t, + } +} + +func (mor *MockOffsetResponse) SetVersion(version int16) *MockOffsetResponse { + mor.version = version + return mor +} + +func (mor *MockOffsetResponse) SetOffset(topic string, partition int32, time, offset int64) *MockOffsetResponse { + partitions := mor.offsets[topic] + if partitions == nil { + partitions = make(map[int32]map[int64]int64) + mor.offsets[topic] = partitions + } + times := partitions[partition] + if times == nil { + times = make(map[int64]int64) + partitions[partition] = times + } + times[time] = offset + return mor +} + +func (mor *MockOffsetResponse) For(reqBody versionedDecoder) encoderWithHeader { + offsetRequest := reqBody.(*OffsetRequest) + offsetResponse := &OffsetResponse{Version: mor.version} + for topic, partitions := range offsetRequest.blocks { + for partition, block := range partitions { + offset := mor.getOffset(topic, partition, block.time) + offsetResponse.AddTopicPartition(topic, partition, offset) + } + } + return offsetResponse +} + +func (mor *MockOffsetResponse) getOffset(topic string, partition int32, time int64) int64 { + partitions := mor.offsets[topic] + if partitions == nil { + mor.t.Errorf("missing topic: %s", topic) + } + times := partitions[partition] + if times == nil { + mor.t.Errorf("missing partition: %d", partition) + } + offset, ok := times[time] + if !ok { + mor.t.Errorf("missing time: %d", time) + } + return offset +} + +// MockFetchResponse is a `FetchResponse` builder. +type MockFetchResponse struct { + messages map[string]map[int32]map[int64]Encoder + highWaterMarks map[string]map[int32]int64 + t TestReporter + batchSize int + version int16 +} + +func NewMockFetchResponse(t TestReporter, batchSize int) *MockFetchResponse { + return &MockFetchResponse{ + messages: make(map[string]map[int32]map[int64]Encoder), + highWaterMarks: make(map[string]map[int32]int64), + t: t, + batchSize: batchSize, + } +} + +func (mfr *MockFetchResponse) SetVersion(version int16) *MockFetchResponse { + mfr.version = version + return mfr +} + +func (mfr *MockFetchResponse) SetMessage(topic string, partition int32, offset int64, msg Encoder) *MockFetchResponse { + partitions := mfr.messages[topic] + if partitions == nil { + partitions = make(map[int32]map[int64]Encoder) + mfr.messages[topic] = partitions + } + messages := partitions[partition] + if messages == nil { + messages = make(map[int64]Encoder) + partitions[partition] = messages + } + messages[offset] = msg + return mfr +} + +func (mfr *MockFetchResponse) SetHighWaterMark(topic string, partition int32, offset int64) *MockFetchResponse { + partitions := mfr.highWaterMarks[topic] + if partitions == nil { + partitions = make(map[int32]int64) + mfr.highWaterMarks[topic] = partitions + } + partitions[partition] = offset + return mfr +} + +func (mfr *MockFetchResponse) For(reqBody versionedDecoder) encoderWithHeader { + fetchRequest := reqBody.(*FetchRequest) + res := &FetchResponse{ + Version: mfr.version, + } + for topic, partitions := range fetchRequest.blocks { + for partition, block := range partitions { + initialOffset := block.fetchOffset + offset := initialOffset + maxOffset := initialOffset + int64(mfr.getMessageCount(topic, partition)) + for i := 0; i < mfr.batchSize && offset < maxOffset; { + msg := mfr.getMessage(topic, partition, offset) + if msg != nil { + res.AddMessage(topic, partition, nil, msg, offset) + i++ + } + offset++ + } + fb := res.GetBlock(topic, partition) + if fb == nil { + res.AddError(topic, partition, ErrNoError) + fb = res.GetBlock(topic, partition) + } + fb.HighWaterMarkOffset = mfr.getHighWaterMark(topic, partition) + } + } + return res +} + +func (mfr *MockFetchResponse) getMessage(topic string, partition int32, offset int64) Encoder { + partitions := mfr.messages[topic] + if partitions == nil { + return nil + } + messages := partitions[partition] + if messages == nil { + return nil + } + return messages[offset] +} + +func (mfr *MockFetchResponse) getMessageCount(topic string, partition int32) int { + partitions := mfr.messages[topic] + if partitions == nil { + return 0 + } + messages := partitions[partition] + if messages == nil { + return 0 + } + return len(messages) +} + +func (mfr *MockFetchResponse) getHighWaterMark(topic string, partition int32) int64 { + partitions := mfr.highWaterMarks[topic] + if partitions == nil { + return 0 + } + return partitions[partition] +} + +// MockConsumerMetadataResponse is a `ConsumerMetadataResponse` builder. +type MockConsumerMetadataResponse struct { + coordinators map[string]interface{} + t TestReporter +} + +func NewMockConsumerMetadataResponse(t TestReporter) *MockConsumerMetadataResponse { + return &MockConsumerMetadataResponse{ + coordinators: make(map[string]interface{}), + t: t, + } +} + +func (mr *MockConsumerMetadataResponse) SetCoordinator(group string, broker *MockBroker) *MockConsumerMetadataResponse { + mr.coordinators[group] = broker + return mr +} + +func (mr *MockConsumerMetadataResponse) SetError(group string, kerror KError) *MockConsumerMetadataResponse { + mr.coordinators[group] = kerror + return mr +} + +func (mr *MockConsumerMetadataResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*ConsumerMetadataRequest) + group := req.ConsumerGroup + res := &ConsumerMetadataResponse{} + v := mr.coordinators[group] + switch v := v.(type) { + case *MockBroker: + res.Coordinator = &Broker{id: v.BrokerID(), addr: v.Addr()} + case KError: + res.Err = v + } + return res +} + +// MockFindCoordinatorResponse is a `FindCoordinatorResponse` builder. +type MockFindCoordinatorResponse struct { + groupCoordinators map[string]interface{} + transCoordinators map[string]interface{} + t TestReporter +} + +func NewMockFindCoordinatorResponse(t TestReporter) *MockFindCoordinatorResponse { + return &MockFindCoordinatorResponse{ + groupCoordinators: make(map[string]interface{}), + transCoordinators: make(map[string]interface{}), + t: t, + } +} + +func (mr *MockFindCoordinatorResponse) SetCoordinator(coordinatorType CoordinatorType, group string, broker *MockBroker) *MockFindCoordinatorResponse { + switch coordinatorType { + case CoordinatorGroup: + mr.groupCoordinators[group] = broker + case CoordinatorTransaction: + mr.transCoordinators[group] = broker + } + return mr +} + +func (mr *MockFindCoordinatorResponse) SetError(coordinatorType CoordinatorType, group string, kerror KError) *MockFindCoordinatorResponse { + switch coordinatorType { + case CoordinatorGroup: + mr.groupCoordinators[group] = kerror + case CoordinatorTransaction: + mr.transCoordinators[group] = kerror + } + return mr +} + +func (mr *MockFindCoordinatorResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*FindCoordinatorRequest) + res := &FindCoordinatorResponse{} + var v interface{} + switch req.CoordinatorType { + case CoordinatorGroup: + v = mr.groupCoordinators[req.CoordinatorKey] + case CoordinatorTransaction: + v = mr.transCoordinators[req.CoordinatorKey] + } + switch v := v.(type) { + case *MockBroker: + res.Coordinator = &Broker{id: v.BrokerID(), addr: v.Addr()} + case KError: + res.Err = v + } + return res +} + +// MockOffsetCommitResponse is a `OffsetCommitResponse` builder. +type MockOffsetCommitResponse struct { + errors map[string]map[string]map[int32]KError + t TestReporter +} + +func NewMockOffsetCommitResponse(t TestReporter) *MockOffsetCommitResponse { + return &MockOffsetCommitResponse{t: t} +} + +func (mr *MockOffsetCommitResponse) SetError(group, topic string, partition int32, kerror KError) *MockOffsetCommitResponse { + if mr.errors == nil { + mr.errors = make(map[string]map[string]map[int32]KError) + } + topics := mr.errors[group] + if topics == nil { + topics = make(map[string]map[int32]KError) + mr.errors[group] = topics + } + partitions := topics[topic] + if partitions == nil { + partitions = make(map[int32]KError) + topics[topic] = partitions + } + partitions[partition] = kerror + return mr +} + +func (mr *MockOffsetCommitResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*OffsetCommitRequest) + group := req.ConsumerGroup + res := &OffsetCommitResponse{} + for topic, partitions := range req.blocks { + for partition := range partitions { + res.AddError(topic, partition, mr.getError(group, topic, partition)) + } + } + return res +} + +func (mr *MockOffsetCommitResponse) getError(group, topic string, partition int32) KError { + topics := mr.errors[group] + if topics == nil { + return ErrNoError + } + partitions := topics[topic] + if partitions == nil { + return ErrNoError + } + kerror, ok := partitions[partition] + if !ok { + return ErrNoError + } + return kerror +} + +// MockProduceResponse is a `ProduceResponse` builder. +type MockProduceResponse struct { + version int16 + errors map[string]map[int32]KError + t TestReporter +} + +func NewMockProduceResponse(t TestReporter) *MockProduceResponse { + return &MockProduceResponse{t: t} +} + +func (mr *MockProduceResponse) SetVersion(version int16) *MockProduceResponse { + mr.version = version + return mr +} + +func (mr *MockProduceResponse) SetError(topic string, partition int32, kerror KError) *MockProduceResponse { + if mr.errors == nil { + mr.errors = make(map[string]map[int32]KError) + } + partitions := mr.errors[topic] + if partitions == nil { + partitions = make(map[int32]KError) + mr.errors[topic] = partitions + } + partitions[partition] = kerror + return mr +} + +func (mr *MockProduceResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*ProduceRequest) + res := &ProduceResponse{ + Version: mr.version, + } + for topic, partitions := range req.records { + for partition := range partitions { + res.AddTopicPartition(topic, partition, mr.getError(topic, partition)) + } + } + return res +} + +func (mr *MockProduceResponse) getError(topic string, partition int32) KError { + partitions := mr.errors[topic] + if partitions == nil { + return ErrNoError + } + kerror, ok := partitions[partition] + if !ok { + return ErrNoError + } + return kerror +} + +// MockOffsetFetchResponse is a `OffsetFetchResponse` builder. +type MockOffsetFetchResponse struct { + offsets map[string]map[string]map[int32]*OffsetFetchResponseBlock + error KError + t TestReporter +} + +func NewMockOffsetFetchResponse(t TestReporter) *MockOffsetFetchResponse { + return &MockOffsetFetchResponse{t: t} +} + +func (mr *MockOffsetFetchResponse) SetOffset(group, topic string, partition int32, offset int64, metadata string, kerror KError) *MockOffsetFetchResponse { + if mr.offsets == nil { + mr.offsets = make(map[string]map[string]map[int32]*OffsetFetchResponseBlock) + } + topics := mr.offsets[group] + if topics == nil { + topics = make(map[string]map[int32]*OffsetFetchResponseBlock) + mr.offsets[group] = topics + } + partitions := topics[topic] + if partitions == nil { + partitions = make(map[int32]*OffsetFetchResponseBlock) + topics[topic] = partitions + } + partitions[partition] = &OffsetFetchResponseBlock{offset, 0, metadata, kerror} + return mr +} + +func (mr *MockOffsetFetchResponse) SetError(kerror KError) *MockOffsetFetchResponse { + mr.error = kerror + return mr +} + +func (mr *MockOffsetFetchResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*OffsetFetchRequest) + group := req.ConsumerGroup + res := &OffsetFetchResponse{Version: req.Version} + + for topic, partitions := range mr.offsets[group] { + for partition, block := range partitions { + res.AddBlock(topic, partition, block) + } + } + + if res.Version >= 2 { + res.Err = mr.error + } + return res +} + +type MockCreateTopicsResponse struct { + t TestReporter +} + +func NewMockCreateTopicsResponse(t TestReporter) *MockCreateTopicsResponse { + return &MockCreateTopicsResponse{t: t} +} + +func (mr *MockCreateTopicsResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*CreateTopicsRequest) + res := &CreateTopicsResponse{ + Version: req.Version, + } + res.TopicErrors = make(map[string]*TopicError) + + for topic := range req.TopicDetails { + if res.Version >= 1 && strings.HasPrefix(topic, "_") { + msg := "insufficient permissions to create topic with reserved prefix" + res.TopicErrors[topic] = &TopicError{ + Err: ErrTopicAuthorizationFailed, + ErrMsg: &msg, + } + continue + } + res.TopicErrors[topic] = &TopicError{Err: ErrNoError} + } + return res +} + +type MockDeleteTopicsResponse struct { + t TestReporter +} + +func NewMockDeleteTopicsResponse(t TestReporter) *MockDeleteTopicsResponse { + return &MockDeleteTopicsResponse{t: t} +} + +func (mr *MockDeleteTopicsResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*DeleteTopicsRequest) + res := &DeleteTopicsResponse{} + res.TopicErrorCodes = make(map[string]KError) + + for _, topic := range req.Topics { + res.TopicErrorCodes[topic] = ErrNoError + } + res.Version = req.Version + return res +} + +type MockCreatePartitionsResponse struct { + t TestReporter +} + +func NewMockCreatePartitionsResponse(t TestReporter) *MockCreatePartitionsResponse { + return &MockCreatePartitionsResponse{t: t} +} + +func (mr *MockCreatePartitionsResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*CreatePartitionsRequest) + res := &CreatePartitionsResponse{} + res.TopicPartitionErrors = make(map[string]*TopicPartitionError) + + for topic := range req.TopicPartitions { + if strings.HasPrefix(topic, "_") { + msg := "insufficient permissions to create partition on topic with reserved prefix" + res.TopicPartitionErrors[topic] = &TopicPartitionError{ + Err: ErrTopicAuthorizationFailed, + ErrMsg: &msg, + } + continue + } + res.TopicPartitionErrors[topic] = &TopicPartitionError{Err: ErrNoError} + } + return res +} + +type MockAlterPartitionReassignmentsResponse struct { + t TestReporter +} + +func NewMockAlterPartitionReassignmentsResponse(t TestReporter) *MockAlterPartitionReassignmentsResponse { + return &MockAlterPartitionReassignmentsResponse{t: t} +} + +func (mr *MockAlterPartitionReassignmentsResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*AlterPartitionReassignmentsRequest) + _ = req + res := &AlterPartitionReassignmentsResponse{} + return res +} + +type MockListPartitionReassignmentsResponse struct { + t TestReporter +} + +func NewMockListPartitionReassignmentsResponse(t TestReporter) *MockListPartitionReassignmentsResponse { + return &MockListPartitionReassignmentsResponse{t: t} +} + +func (mr *MockListPartitionReassignmentsResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*ListPartitionReassignmentsRequest) + _ = req + res := &ListPartitionReassignmentsResponse{} + + for topic, partitions := range req.blocks { + for _, partition := range partitions { + res.AddBlock(topic, partition, []int32{0}, []int32{1}, []int32{2}) + } + } + + return res +} + +type MockDeleteRecordsResponse struct { + t TestReporter +} + +func NewMockDeleteRecordsResponse(t TestReporter) *MockDeleteRecordsResponse { + return &MockDeleteRecordsResponse{t: t} +} + +func (mr *MockDeleteRecordsResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*DeleteRecordsRequest) + res := &DeleteRecordsResponse{} + res.Topics = make(map[string]*DeleteRecordsResponseTopic) + + for topic, deleteRecordRequestTopic := range req.Topics { + partitions := make(map[int32]*DeleteRecordsResponsePartition) + for partition := range deleteRecordRequestTopic.PartitionOffsets { + partitions[partition] = &DeleteRecordsResponsePartition{Err: ErrNoError} + } + res.Topics[topic] = &DeleteRecordsResponseTopic{Partitions: partitions} + } + return res +} + +type MockDescribeConfigsResponse struct { + t TestReporter +} + +func NewMockDescribeConfigsResponse(t TestReporter) *MockDescribeConfigsResponse { + return &MockDescribeConfigsResponse{t: t} +} + +func (mr *MockDescribeConfigsResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*DescribeConfigsRequest) + res := &DescribeConfigsResponse{ + Version: req.Version, + } + + includeSynonyms := req.Version > 0 + includeSource := req.Version > 0 + + for _, r := range req.Resources { + var configEntries []*ConfigEntry + switch r.Type { + case BrokerResource: + configEntries = append(configEntries, + &ConfigEntry{ + Name: "min.insync.replicas", + Value: "2", + ReadOnly: false, + Default: false, + }, + ) + res.Resources = append(res.Resources, &ResourceResponse{ + Name: r.Name, + Configs: configEntries, + }) + case BrokerLoggerResource: + configEntries = append(configEntries, + &ConfigEntry{ + Name: "kafka.controller.KafkaController", + Value: "DEBUG", + ReadOnly: false, + Default: false, + }, + ) + res.Resources = append(res.Resources, &ResourceResponse{ + Name: r.Name, + Configs: configEntries, + }) + case TopicResource: + maxMessageBytes := &ConfigEntry{Name: "max.message.bytes", + Value: "1000000", + ReadOnly: false, + Default: !includeSource, + Sensitive: false, + } + if includeSource { + maxMessageBytes.Source = SourceDefault + } + if includeSynonyms { + maxMessageBytes.Synonyms = []*ConfigSynonym{ + { + ConfigName: "max.message.bytes", + ConfigValue: "500000", + }, + } + } + retentionMs := &ConfigEntry{Name: "retention.ms", + Value: "5000", + ReadOnly: false, + Default: false, + Sensitive: false, + } + if includeSynonyms { + retentionMs.Synonyms = []*ConfigSynonym{ + { + ConfigName: "log.retention.ms", + ConfigValue: "2500", + }, + } + } + password := &ConfigEntry{Name: "password", + Value: "12345", + ReadOnly: false, + Default: false, + Sensitive: true, + } + configEntries = append( + configEntries, maxMessageBytes, retentionMs, password) + res.Resources = append(res.Resources, &ResourceResponse{ + Name: r.Name, + Configs: configEntries, + }) + } + } + return res +} + +type MockDescribeConfigsResponseWithErrorCode struct { + t TestReporter +} + +func NewMockDescribeConfigsResponseWithErrorCode(t TestReporter) *MockDescribeConfigsResponseWithErrorCode { + return &MockDescribeConfigsResponseWithErrorCode{t: t} +} + +func (mr *MockDescribeConfigsResponseWithErrorCode) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*DescribeConfigsRequest) + res := &DescribeConfigsResponse{ + Version: req.Version, + } + + for _, r := range req.Resources { + res.Resources = append(res.Resources, &ResourceResponse{ + Name: r.Name, + Type: r.Type, + ErrorCode: 83, + ErrorMsg: "", + }) + } + return res +} + +type MockAlterConfigsResponse struct { + t TestReporter +} + +func NewMockAlterConfigsResponse(t TestReporter) *MockAlterConfigsResponse { + return &MockAlterConfigsResponse{t: t} +} + +func (mr *MockAlterConfigsResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*AlterConfigsRequest) + res := &AlterConfigsResponse{} + + for _, r := range req.Resources { + res.Resources = append(res.Resources, &AlterConfigsResourceResponse{Name: r.Name, + Type: r.Type, + ErrorMsg: "", + }) + } + return res +} + +type MockAlterConfigsResponseWithErrorCode struct { + t TestReporter +} + +func NewMockAlterConfigsResponseWithErrorCode(t TestReporter) *MockAlterConfigsResponseWithErrorCode { + return &MockAlterConfigsResponseWithErrorCode{t: t} +} + +func (mr *MockAlterConfigsResponseWithErrorCode) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*AlterConfigsRequest) + res := &AlterConfigsResponse{} + + for _, r := range req.Resources { + res.Resources = append(res.Resources, &AlterConfigsResourceResponse{ + Name: r.Name, + Type: r.Type, + ErrorCode: 83, + ErrorMsg: "", + }) + } + return res +} + +type MockCreateAclsResponse struct { + t TestReporter +} + +func NewMockCreateAclsResponse(t TestReporter) *MockCreateAclsResponse { + return &MockCreateAclsResponse{t: t} +} + +func (mr *MockCreateAclsResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*CreateAclsRequest) + res := &CreateAclsResponse{} + + for range req.AclCreations { + res.AclCreationResponses = append(res.AclCreationResponses, &AclCreationResponse{Err: ErrNoError}) + } + return res +} + +type MockListAclsResponse struct { + t TestReporter +} + +func NewMockListAclsResponse(t TestReporter) *MockListAclsResponse { + return &MockListAclsResponse{t: t} +} + +func (mr *MockListAclsResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*DescribeAclsRequest) + res := &DescribeAclsResponse{} + res.Err = ErrNoError + acl := &ResourceAcls{} + if req.ResourceName != nil { + acl.Resource.ResourceName = *req.ResourceName + } + acl.Resource.ResourcePatternType = req.ResourcePatternTypeFilter + acl.Resource.ResourceType = req.ResourceType + + host := "*" + if req.Host != nil { + host = *req.Host + } + + principal := "User:test" + if req.Principal != nil { + principal = *req.Principal + } + + permissionType := req.PermissionType + if permissionType == AclPermissionAny { + permissionType = AclPermissionAllow + } + + acl.Acls = append(acl.Acls, &Acl{Operation: req.Operation, PermissionType: permissionType, Host: host, Principal: principal}) + res.ResourceAcls = append(res.ResourceAcls, acl) + res.Version = int16(req.Version) + return res +} + +type MockSaslAuthenticateResponse struct { + t TestReporter + kerror KError + saslAuthBytes []byte +} + +func NewMockSaslAuthenticateResponse(t TestReporter) *MockSaslAuthenticateResponse { + return &MockSaslAuthenticateResponse{t: t} +} + +func (msar *MockSaslAuthenticateResponse) For(reqBody versionedDecoder) encoderWithHeader { + res := &SaslAuthenticateResponse{} + res.Err = msar.kerror + res.SaslAuthBytes = msar.saslAuthBytes + return res +} + +func (msar *MockSaslAuthenticateResponse) SetError(kerror KError) *MockSaslAuthenticateResponse { + msar.kerror = kerror + return msar +} + +func (msar *MockSaslAuthenticateResponse) SetAuthBytes(saslAuthBytes []byte) *MockSaslAuthenticateResponse { + msar.saslAuthBytes = saslAuthBytes + return msar +} + +type MockDeleteAclsResponse struct { + t TestReporter +} + +type MockSaslHandshakeResponse struct { + enabledMechanisms []string + kerror KError + t TestReporter +} + +func NewMockSaslHandshakeResponse(t TestReporter) *MockSaslHandshakeResponse { + return &MockSaslHandshakeResponse{t: t} +} + +func (mshr *MockSaslHandshakeResponse) For(reqBody versionedDecoder) encoderWithHeader { + res := &SaslHandshakeResponse{} + res.Err = mshr.kerror + res.EnabledMechanisms = mshr.enabledMechanisms + return res +} + +func (mshr *MockSaslHandshakeResponse) SetError(kerror KError) *MockSaslHandshakeResponse { + mshr.kerror = kerror + return mshr +} + +func (mshr *MockSaslHandshakeResponse) SetEnabledMechanisms(enabledMechanisms []string) *MockSaslHandshakeResponse { + mshr.enabledMechanisms = enabledMechanisms + return mshr +} + +func NewMockDeleteAclsResponse(t TestReporter) *MockDeleteAclsResponse { + return &MockDeleteAclsResponse{t: t} +} + +func (mr *MockDeleteAclsResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*DeleteAclsRequest) + res := &DeleteAclsResponse{} + + for range req.Filters { + response := &FilterResponse{Err: ErrNoError} + response.MatchingAcls = append(response.MatchingAcls, &MatchingAcl{Err: ErrNoError}) + res.FilterResponses = append(res.FilterResponses, response) + } + res.Version = int16(req.Version) + return res +} + +type MockDeleteGroupsResponse struct { + deletedGroups []string +} + +func NewMockDeleteGroupsRequest(t TestReporter) *MockDeleteGroupsResponse { + return &MockDeleteGroupsResponse{} +} + +func (m *MockDeleteGroupsResponse) SetDeletedGroups(groups []string) *MockDeleteGroupsResponse { + m.deletedGroups = groups + return m +} + +func (m *MockDeleteGroupsResponse) For(reqBody versionedDecoder) encoderWithHeader { + resp := &DeleteGroupsResponse{ + GroupErrorCodes: map[string]KError{}, + } + for _, group := range m.deletedGroups { + resp.GroupErrorCodes[group] = ErrNoError + } + return resp +} + +type MockJoinGroupResponse struct { + t TestReporter + + ThrottleTime int32 + Err KError + GenerationId int32 + GroupProtocol string + LeaderId string + MemberId string + Members map[string][]byte +} + +func NewMockJoinGroupResponse(t TestReporter) *MockJoinGroupResponse { + return &MockJoinGroupResponse{ + t: t, + Members: make(map[string][]byte), + } +} + +func (m *MockJoinGroupResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*JoinGroupRequest) + resp := &JoinGroupResponse{ + Version: req.Version, + ThrottleTime: m.ThrottleTime, + Err: m.Err, + GenerationId: m.GenerationId, + GroupProtocol: m.GroupProtocol, + LeaderId: m.LeaderId, + MemberId: m.MemberId, + Members: m.Members, + } + return resp +} + +func (m *MockJoinGroupResponse) SetThrottleTime(t int32) *MockJoinGroupResponse { + m.ThrottleTime = t + return m +} + +func (m *MockJoinGroupResponse) SetError(kerr KError) *MockJoinGroupResponse { + m.Err = kerr + return m +} + +func (m *MockJoinGroupResponse) SetGenerationId(id int32) *MockJoinGroupResponse { + m.GenerationId = id + return m +} + +func (m *MockJoinGroupResponse) SetGroupProtocol(proto string) *MockJoinGroupResponse { + m.GroupProtocol = proto + return m +} + +func (m *MockJoinGroupResponse) SetLeaderId(id string) *MockJoinGroupResponse { + m.LeaderId = id + return m +} + +func (m *MockJoinGroupResponse) SetMemberId(id string) *MockJoinGroupResponse { + m.MemberId = id + return m +} + +func (m *MockJoinGroupResponse) SetMember(id string, meta *ConsumerGroupMemberMetadata) *MockJoinGroupResponse { + bin, err := encode(meta, nil) + if err != nil { + panic(fmt.Sprintf("error encoding member metadata: %v", err)) + } + m.Members[id] = bin + return m +} + +type MockLeaveGroupResponse struct { + t TestReporter + + Err KError +} + +func NewMockLeaveGroupResponse(t TestReporter) *MockLeaveGroupResponse { + return &MockLeaveGroupResponse{t: t} +} + +func (m *MockLeaveGroupResponse) For(reqBody versionedDecoder) encoderWithHeader { + resp := &LeaveGroupResponse{ + Err: m.Err, + } + return resp +} + +func (m *MockLeaveGroupResponse) SetError(kerr KError) *MockLeaveGroupResponse { + m.Err = kerr + return m +} + +type MockSyncGroupResponse struct { + t TestReporter + + Err KError + MemberAssignment []byte +} + +func NewMockSyncGroupResponse(t TestReporter) *MockSyncGroupResponse { + return &MockSyncGroupResponse{t: t} +} + +func (m *MockSyncGroupResponse) For(reqBody versionedDecoder) encoderWithHeader { + resp := &SyncGroupResponse{ + Err: m.Err, + MemberAssignment: m.MemberAssignment, + } + return resp +} + +func (m *MockSyncGroupResponse) SetError(kerr KError) *MockSyncGroupResponse { + m.Err = kerr + return m +} + +func (m *MockSyncGroupResponse) SetMemberAssignment(assignment *ConsumerGroupMemberAssignment) *MockSyncGroupResponse { + bin, err := encode(assignment, nil) + if err != nil { + panic(fmt.Sprintf("error encoding member assignment: %v", err)) + } + m.MemberAssignment = bin + return m +} + +type MockHeartbeatResponse struct { + t TestReporter + + Err KError +} + +func NewMockHeartbeatResponse(t TestReporter) *MockHeartbeatResponse { + return &MockHeartbeatResponse{t: t} +} + +func (m *MockHeartbeatResponse) For(reqBody versionedDecoder) encoderWithHeader { + resp := &HeartbeatResponse{} + return resp +} + +func (m *MockHeartbeatResponse) SetError(kerr KError) *MockHeartbeatResponse { + m.Err = kerr + return m +} + +type MockDescribeLogDirsResponse struct { + t TestReporter + logDirs []DescribeLogDirsResponseDirMetadata +} + +func NewMockDescribeLogDirsResponse(t TestReporter) *MockDescribeLogDirsResponse { + return &MockDescribeLogDirsResponse{t: t} +} + +func (m *MockDescribeLogDirsResponse) SetLogDirs(logDirPath string, topicPartitions map[string]int) *MockDescribeLogDirsResponse { + var topics []DescribeLogDirsResponseTopic + for topic := range topicPartitions { + var partitions []DescribeLogDirsResponsePartition + for i := 0; i < topicPartitions[topic]; i++ { + partitions = append(partitions, DescribeLogDirsResponsePartition{ + PartitionID: int32(i), + IsTemporary: false, + OffsetLag: int64(0), + Size: int64(1234), + }) + } + topics = append(topics, DescribeLogDirsResponseTopic{ + Topic: topic, + Partitions: partitions, + }) + } + logDir := DescribeLogDirsResponseDirMetadata{ + ErrorCode: ErrNoError, + Path: logDirPath, + Topics: topics, + } + m.logDirs = []DescribeLogDirsResponseDirMetadata{logDir} + return m +} + +func (m *MockDescribeLogDirsResponse) For(reqBody versionedDecoder) encoderWithHeader { + resp := &DescribeLogDirsResponse{ + LogDirs: m.logDirs, + } + return resp +} diff --git a/vendor/github.com/Shopify/sarama/offset_commit_request.go b/vendor/github.com/Shopify/sarama/offset_commit_request.go new file mode 100644 index 00000000000..9931cade512 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/offset_commit_request.go @@ -0,0 +1,214 @@ +package sarama + +import "errors" + +// ReceiveTime is a special value for the timestamp field of Offset Commit Requests which +// tells the broker to set the timestamp to the time at which the request was received. +// The timestamp is only used if message version 1 is used, which requires kafka 0.8.2. +const ReceiveTime int64 = -1 + +// GroupGenerationUndefined is a special value for the group generation field of +// Offset Commit Requests that should be used when a consumer group does not rely +// on Kafka for partition management. +const GroupGenerationUndefined = -1 + +type offsetCommitRequestBlock struct { + offset int64 + timestamp int64 + metadata string +} + +func (b *offsetCommitRequestBlock) encode(pe packetEncoder, version int16) error { + pe.putInt64(b.offset) + if version == 1 { + pe.putInt64(b.timestamp) + } else if b.timestamp != 0 { + Logger.Println("Non-zero timestamp specified for OffsetCommitRequest not v1, it will be ignored") + } + + return pe.putString(b.metadata) +} + +func (b *offsetCommitRequestBlock) decode(pd packetDecoder, version int16) (err error) { + if b.offset, err = pd.getInt64(); err != nil { + return err + } + if version == 1 { + if b.timestamp, err = pd.getInt64(); err != nil { + return err + } + } + b.metadata, err = pd.getString() + return err +} + +type OffsetCommitRequest struct { + ConsumerGroup string + ConsumerGroupGeneration int32 // v1 or later + ConsumerID string // v1 or later + RetentionTime int64 // v2 or later + + // Version can be: + // - 0 (kafka 0.8.1 and later) + // - 1 (kafka 0.8.2 and later) + // - 2 (kafka 0.9.0 and later) + // - 3 (kafka 0.11.0 and later) + // - 4 (kafka 2.0.0 and later) + Version int16 + blocks map[string]map[int32]*offsetCommitRequestBlock +} + +func (r *OffsetCommitRequest) encode(pe packetEncoder) error { + if r.Version < 0 || r.Version > 4 { + return PacketEncodingError{"invalid or unsupported OffsetCommitRequest version field"} + } + + if err := pe.putString(r.ConsumerGroup); err != nil { + return err + } + + if r.Version >= 1 { + pe.putInt32(r.ConsumerGroupGeneration) + if err := pe.putString(r.ConsumerID); err != nil { + return err + } + } else { + if r.ConsumerGroupGeneration != 0 { + Logger.Println("Non-zero ConsumerGroupGeneration specified for OffsetCommitRequest v0, it will be ignored") + } + if r.ConsumerID != "" { + Logger.Println("Non-empty ConsumerID specified for OffsetCommitRequest v0, it will be ignored") + } + } + + if r.Version >= 2 { + pe.putInt64(r.RetentionTime) + } else if r.RetentionTime != 0 { + Logger.Println("Non-zero RetentionTime specified for OffsetCommitRequest version <2, it will be ignored") + } + + if err := pe.putArrayLength(len(r.blocks)); err != nil { + return err + } + for topic, partitions := range r.blocks { + if err := pe.putString(topic); err != nil { + return err + } + if err := pe.putArrayLength(len(partitions)); err != nil { + return err + } + for partition, block := range partitions { + pe.putInt32(partition) + if err := block.encode(pe, r.Version); err != nil { + return err + } + } + } + return nil +} + +func (r *OffsetCommitRequest) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + + if r.ConsumerGroup, err = pd.getString(); err != nil { + return err + } + + if r.Version >= 1 { + if r.ConsumerGroupGeneration, err = pd.getInt32(); err != nil { + return err + } + if r.ConsumerID, err = pd.getString(); err != nil { + return err + } + } + + if r.Version >= 2 { + if r.RetentionTime, err = pd.getInt64(); err != nil { + return err + } + } + + topicCount, err := pd.getArrayLength() + if err != nil { + return err + } + if topicCount == 0 { + return nil + } + r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock) + for i := 0; i < topicCount; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + partitionCount, err := pd.getArrayLength() + if err != nil { + return err + } + r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock) + for j := 0; j < partitionCount; j++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + block := &offsetCommitRequestBlock{} + if err := block.decode(pd, r.Version); err != nil { + return err + } + r.blocks[topic][partition] = block + } + } + return nil +} + +func (r *OffsetCommitRequest) key() int16 { + return 8 +} + +func (r *OffsetCommitRequest) version() int16 { + return r.Version +} + +func (r *OffsetCommitRequest) headerVersion() int16 { + return 1 +} + +func (r *OffsetCommitRequest) requiredVersion() KafkaVersion { + switch r.Version { + case 1: + return V0_8_2_0 + case 2: + return V0_9_0_0 + case 3: + return V0_11_0_0 + case 4: + return V2_0_0_0 + default: + return MinVersion + } +} + +func (r *OffsetCommitRequest) AddBlock(topic string, partitionID int32, offset int64, timestamp int64, metadata string) { + if r.blocks == nil { + r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock) + } + + if r.blocks[topic] == nil { + r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock) + } + + r.blocks[topic][partitionID] = &offsetCommitRequestBlock{offset, timestamp, metadata} +} + +func (r *OffsetCommitRequest) Offset(topic string, partitionID int32) (int64, string, error) { + partitions := r.blocks[topic] + if partitions == nil { + return 0, "", errors.New("no such offset") + } + block := partitions[partitionID] + if block == nil { + return 0, "", errors.New("no such offset") + } + return block.offset, block.metadata, nil +} diff --git a/vendor/github.com/Shopify/sarama/offset_commit_response.go b/vendor/github.com/Shopify/sarama/offset_commit_response.go new file mode 100644 index 00000000000..342260ef599 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/offset_commit_response.go @@ -0,0 +1,114 @@ +package sarama + +type OffsetCommitResponse struct { + Version int16 + ThrottleTimeMs int32 + Errors map[string]map[int32]KError +} + +func (r *OffsetCommitResponse) AddError(topic string, partition int32, kerror KError) { + if r.Errors == nil { + r.Errors = make(map[string]map[int32]KError) + } + partitions := r.Errors[topic] + if partitions == nil { + partitions = make(map[int32]KError) + r.Errors[topic] = partitions + } + partitions[partition] = kerror +} + +func (r *OffsetCommitResponse) encode(pe packetEncoder) error { + if r.Version >= 3 { + pe.putInt32(r.ThrottleTimeMs) + } + if err := pe.putArrayLength(len(r.Errors)); err != nil { + return err + } + for topic, partitions := range r.Errors { + if err := pe.putString(topic); err != nil { + return err + } + if err := pe.putArrayLength(len(partitions)); err != nil { + return err + } + for partition, kerror := range partitions { + pe.putInt32(partition) + pe.putInt16(int16(kerror)) + } + } + return nil +} + +func (r *OffsetCommitResponse) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + + if version >= 3 { + r.ThrottleTimeMs, err = pd.getInt32() + if err != nil { + return err + } + } + + numTopics, err := pd.getArrayLength() + if err != nil || numTopics == 0 { + return err + } + + r.Errors = make(map[string]map[int32]KError, numTopics) + for i := 0; i < numTopics; i++ { + name, err := pd.getString() + if err != nil { + return err + } + + numErrors, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Errors[name] = make(map[int32]KError, numErrors) + + for j := 0; j < numErrors; j++ { + id, err := pd.getInt32() + if err != nil { + return err + } + + tmp, err := pd.getInt16() + if err != nil { + return err + } + r.Errors[name][id] = KError(tmp) + } + } + + return nil +} + +func (r *OffsetCommitResponse) key() int16 { + return 8 +} + +func (r *OffsetCommitResponse) version() int16 { + return r.Version +} + +func (r *OffsetCommitResponse) headerVersion() int16 { + return 0 +} + +func (r *OffsetCommitResponse) requiredVersion() KafkaVersion { + switch r.Version { + case 1: + return V0_8_2_0 + case 2: + return V0_9_0_0 + case 3: + return V0_11_0_0 + case 4: + return V2_0_0_0 + default: + return MinVersion + } +} diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_request.go b/vendor/github.com/Shopify/sarama/offset_fetch_request.go new file mode 100644 index 00000000000..51e9faa3f73 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/offset_fetch_request.go @@ -0,0 +1,104 @@ +package sarama + +type OffsetFetchRequest struct { + Version int16 + ConsumerGroup string + partitions map[string][]int32 +} + +func (r *OffsetFetchRequest) encode(pe packetEncoder) (err error) { + if r.Version < 0 || r.Version > 5 { + return PacketEncodingError{"invalid or unsupported OffsetFetchRequest version field"} + } + + if err = pe.putString(r.ConsumerGroup); err != nil { + return err + } + + if r.Version >= 2 && r.partitions == nil { + pe.putInt32(-1) + } else { + if err = pe.putArrayLength(len(r.partitions)); err != nil { + return err + } + for topic, partitions := range r.partitions { + if err = pe.putString(topic); err != nil { + return err + } + if err = pe.putInt32Array(partitions); err != nil { + return err + } + } + } + return nil +} + +func (r *OffsetFetchRequest) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + if r.ConsumerGroup, err = pd.getString(); err != nil { + return err + } + partitionCount, err := pd.getArrayLength() + if err != nil { + return err + } + if (partitionCount == 0 && version < 2) || partitionCount < 0 { + return nil + } + r.partitions = make(map[string][]int32) + for i := 0; i < partitionCount; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + partitions, err := pd.getInt32Array() + if err != nil { + return err + } + r.partitions[topic] = partitions + } + return nil +} + +func (r *OffsetFetchRequest) key() int16 { + return 9 +} + +func (r *OffsetFetchRequest) version() int16 { + return r.Version +} + +func (r *OffsetFetchRequest) headerVersion() int16 { + return 1 +} + +func (r *OffsetFetchRequest) requiredVersion() KafkaVersion { + switch r.Version { + case 1: + return V0_8_2_0 + case 2: + return V0_10_2_0 + case 3: + return V0_11_0_0 + case 4: + return V2_0_0_0 + case 5: + return V2_1_0_0 + default: + return MinVersion + } +} + +func (r *OffsetFetchRequest) ZeroPartitions() { + if r.partitions == nil && r.Version >= 2 { + r.partitions = make(map[string][]int32) + } +} + +func (r *OffsetFetchRequest) AddPartition(topic string, partitionID int32) { + if r.partitions == nil { + r.partitions = make(map[string][]int32) + } + + r.partitions[topic] = append(r.partitions[topic], partitionID) +} diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_response.go b/vendor/github.com/Shopify/sarama/offset_fetch_response.go new file mode 100644 index 00000000000..9c64e0708d1 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/offset_fetch_response.go @@ -0,0 +1,201 @@ +package sarama + +type OffsetFetchResponseBlock struct { + Offset int64 + LeaderEpoch int32 + Metadata string + Err KError +} + +func (b *OffsetFetchResponseBlock) decode(pd packetDecoder, version int16) (err error) { + b.Offset, err = pd.getInt64() + if err != nil { + return err + } + + if version >= 5 { + b.LeaderEpoch, err = pd.getInt32() + if err != nil { + return err + } + } + + b.Metadata, err = pd.getString() + if err != nil { + return err + } + + tmp, err := pd.getInt16() + if err != nil { + return err + } + b.Err = KError(tmp) + + return nil +} + +func (b *OffsetFetchResponseBlock) encode(pe packetEncoder, version int16) (err error) { + pe.putInt64(b.Offset) + + if version >= 5 { + pe.putInt32(b.LeaderEpoch) + } + + err = pe.putString(b.Metadata) + if err != nil { + return err + } + + pe.putInt16(int16(b.Err)) + + return nil +} + +type OffsetFetchResponse struct { + Version int16 + ThrottleTimeMs int32 + Blocks map[string]map[int32]*OffsetFetchResponseBlock + Err KError +} + +func (r *OffsetFetchResponse) encode(pe packetEncoder) error { + if r.Version >= 3 { + pe.putInt32(r.ThrottleTimeMs) + } + + if err := pe.putArrayLength(len(r.Blocks)); err != nil { + return err + } + for topic, partitions := range r.Blocks { + if err := pe.putString(topic); err != nil { + return err + } + if err := pe.putArrayLength(len(partitions)); err != nil { + return err + } + for partition, block := range partitions { + pe.putInt32(partition) + if err := block.encode(pe, r.Version); err != nil { + return err + } + } + } + if r.Version >= 2 { + pe.putInt16(int16(r.Err)) + } + return nil +} + +func (r *OffsetFetchResponse) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + + if version >= 3 { + r.ThrottleTimeMs, err = pd.getInt32() + if err != nil { + return err + } + } + + numTopics, err := pd.getArrayLength() + if err != nil { + return err + } + + if numTopics > 0 { + r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock, numTopics) + for i := 0; i < numTopics; i++ { + name, err := pd.getString() + if err != nil { + return err + } + + numBlocks, err := pd.getArrayLength() + if err != nil { + return err + } + + if numBlocks == 0 { + r.Blocks[name] = nil + continue + } + r.Blocks[name] = make(map[int32]*OffsetFetchResponseBlock, numBlocks) + + for j := 0; j < numBlocks; j++ { + id, err := pd.getInt32() + if err != nil { + return err + } + + block := new(OffsetFetchResponseBlock) + err = block.decode(pd, version) + if err != nil { + return err + } + r.Blocks[name][id] = block + } + } + } + + if version >= 2 { + kerr, err := pd.getInt16() + if err != nil { + return err + } + r.Err = KError(kerr) + } + + return nil +} + +func (r *OffsetFetchResponse) key() int16 { + return 9 +} + +func (r *OffsetFetchResponse) version() int16 { + return r.Version +} + +func (r *OffsetFetchResponse) headerVersion() int16 { + return 0 +} + +func (r *OffsetFetchResponse) requiredVersion() KafkaVersion { + switch r.Version { + case 1: + return V0_8_2_0 + case 2: + return V0_10_2_0 + case 3: + return V0_11_0_0 + case 4: + return V2_0_0_0 + case 5: + return V2_1_0_0 + default: + return MinVersion + } +} + +func (r *OffsetFetchResponse) GetBlock(topic string, partition int32) *OffsetFetchResponseBlock { + if r.Blocks == nil { + return nil + } + + if r.Blocks[topic] == nil { + return nil + } + + return r.Blocks[topic][partition] +} + +func (r *OffsetFetchResponse) AddBlock(topic string, partition int32, block *OffsetFetchResponseBlock) { + if r.Blocks == nil { + r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock) + } + partitions := r.Blocks[topic] + if partitions == nil { + partitions = make(map[int32]*OffsetFetchResponseBlock) + r.Blocks[topic] = partitions + } + partitions[partition] = block +} diff --git a/vendor/github.com/Shopify/sarama/offset_manager.go b/vendor/github.com/Shopify/sarama/offset_manager.go new file mode 100644 index 00000000000..b4fea8226ad --- /dev/null +++ b/vendor/github.com/Shopify/sarama/offset_manager.go @@ -0,0 +1,591 @@ +package sarama + +import ( + "sync" + "time" +) + +// Offset Manager + +// OffsetManager uses Kafka to store and fetch consumed partition offsets. +type OffsetManager interface { + // ManagePartition creates a PartitionOffsetManager on the given topic/partition. + // It will return an error if this OffsetManager is already managing the given + // topic/partition. + ManagePartition(topic string, partition int32) (PartitionOffsetManager, error) + + // Close stops the OffsetManager from managing offsets. It is required to call + // this function before an OffsetManager object passes out of scope, as it + // will otherwise leak memory. You must call this after all the + // PartitionOffsetManagers are closed. + Close() error + + // Commit commits the offsets. This method can be used if AutoCommit.Enable is + // set to false. + Commit() +} + +type offsetManager struct { + client Client + conf *Config + group string + ticker *time.Ticker + + memberID string + generation int32 + + broker *Broker + brokerLock sync.RWMutex + + poms map[string]map[int32]*partitionOffsetManager + pomsLock sync.RWMutex + + closeOnce sync.Once + closing chan none + closed chan none +} + +// NewOffsetManagerFromClient creates a new OffsetManager from the given client. +// It is still necessary to call Close() on the underlying client when finished with the partition manager. +func NewOffsetManagerFromClient(group string, client Client) (OffsetManager, error) { + return newOffsetManagerFromClient(group, "", GroupGenerationUndefined, client) +} + +func newOffsetManagerFromClient(group, memberID string, generation int32, client Client) (*offsetManager, error) { + // Check that we are not dealing with a closed Client before processing any other arguments + if client.Closed() { + return nil, ErrClosedClient + } + + conf := client.Config() + om := &offsetManager{ + client: client, + conf: conf, + group: group, + poms: make(map[string]map[int32]*partitionOffsetManager), + + memberID: memberID, + generation: generation, + + closing: make(chan none), + closed: make(chan none), + } + if conf.Consumer.Offsets.AutoCommit.Enable { + om.ticker = time.NewTicker(conf.Consumer.Offsets.AutoCommit.Interval) + go withRecover(om.mainLoop) + } + + return om, nil +} + +func (om *offsetManager) ManagePartition(topic string, partition int32) (PartitionOffsetManager, error) { + pom, err := om.newPartitionOffsetManager(topic, partition) + if err != nil { + return nil, err + } + + om.pomsLock.Lock() + defer om.pomsLock.Unlock() + + topicManagers := om.poms[topic] + if topicManagers == nil { + topicManagers = make(map[int32]*partitionOffsetManager) + om.poms[topic] = topicManagers + } + + if topicManagers[partition] != nil { + return nil, ConfigurationError("That topic/partition is already being managed") + } + + topicManagers[partition] = pom + return pom, nil +} + +func (om *offsetManager) Close() error { + om.closeOnce.Do(func() { + // exit the mainLoop + close(om.closing) + if om.conf.Consumer.Offsets.AutoCommit.Enable { + <-om.closed + } + + // mark all POMs as closed + om.asyncClosePOMs() + + // flush one last time + for attempt := 0; attempt <= om.conf.Consumer.Offsets.Retry.Max; attempt++ { + om.flushToBroker() + if om.releasePOMs(false) == 0 { + break + } + } + + om.releasePOMs(true) + om.brokerLock.Lock() + om.broker = nil + om.brokerLock.Unlock() + }) + return nil +} + +func (om *offsetManager) computeBackoff(retries int) time.Duration { + if om.conf.Metadata.Retry.BackoffFunc != nil { + return om.conf.Metadata.Retry.BackoffFunc(retries, om.conf.Metadata.Retry.Max) + } else { + return om.conf.Metadata.Retry.Backoff + } +} + +func (om *offsetManager) fetchInitialOffset(topic string, partition int32, retries int) (int64, string, error) { + broker, err := om.coordinator() + if err != nil { + if retries <= 0 { + return 0, "", err + } + return om.fetchInitialOffset(topic, partition, retries-1) + } + + req := new(OffsetFetchRequest) + req.Version = 1 + req.ConsumerGroup = om.group + req.AddPartition(topic, partition) + + resp, err := broker.FetchOffset(req) + if err != nil { + if retries <= 0 { + return 0, "", err + } + om.releaseCoordinator(broker) + return om.fetchInitialOffset(topic, partition, retries-1) + } + + block := resp.GetBlock(topic, partition) + if block == nil { + return 0, "", ErrIncompleteResponse + } + + switch block.Err { + case ErrNoError: + return block.Offset, block.Metadata, nil + case ErrNotCoordinatorForConsumer: + if retries <= 0 { + return 0, "", block.Err + } + om.releaseCoordinator(broker) + return om.fetchInitialOffset(topic, partition, retries-1) + case ErrOffsetsLoadInProgress: + if retries <= 0 { + return 0, "", block.Err + } + backoff := om.computeBackoff(retries) + select { + case <-om.closing: + return 0, "", block.Err + case <-time.After(backoff): + } + return om.fetchInitialOffset(topic, partition, retries-1) + default: + return 0, "", block.Err + } +} + +func (om *offsetManager) coordinator() (*Broker, error) { + om.brokerLock.RLock() + broker := om.broker + om.brokerLock.RUnlock() + + if broker != nil { + return broker, nil + } + + om.brokerLock.Lock() + defer om.brokerLock.Unlock() + + if broker := om.broker; broker != nil { + return broker, nil + } + + if err := om.client.RefreshCoordinator(om.group); err != nil { + return nil, err + } + + broker, err := om.client.Coordinator(om.group) + if err != nil { + return nil, err + } + + om.broker = broker + return broker, nil +} + +func (om *offsetManager) releaseCoordinator(b *Broker) { + om.brokerLock.Lock() + if om.broker == b { + om.broker = nil + } + om.brokerLock.Unlock() +} + +func (om *offsetManager) mainLoop() { + defer om.ticker.Stop() + defer close(om.closed) + + for { + select { + case <-om.ticker.C: + om.Commit() + case <-om.closing: + return + } + } +} + +func (om *offsetManager) Commit() { + om.flushToBroker() + om.releasePOMs(false) +} + +func (om *offsetManager) flushToBroker() { + req := om.constructRequest() + if req == nil { + return + } + + broker, err := om.coordinator() + if err != nil { + om.handleError(err) + return + } + + resp, err := broker.CommitOffset(req) + if err != nil { + om.handleError(err) + om.releaseCoordinator(broker) + _ = broker.Close() + return + } + + om.handleResponse(broker, req, resp) +} + +func (om *offsetManager) constructRequest() *OffsetCommitRequest { + var r *OffsetCommitRequest + var perPartitionTimestamp int64 + if om.conf.Consumer.Offsets.Retention == 0 { + perPartitionTimestamp = ReceiveTime + r = &OffsetCommitRequest{ + Version: 1, + ConsumerGroup: om.group, + ConsumerID: om.memberID, + ConsumerGroupGeneration: om.generation, + } + } else { + r = &OffsetCommitRequest{ + Version: 2, + RetentionTime: int64(om.conf.Consumer.Offsets.Retention / time.Millisecond), + ConsumerGroup: om.group, + ConsumerID: om.memberID, + ConsumerGroupGeneration: om.generation, + } + } + + om.pomsLock.RLock() + defer om.pomsLock.RUnlock() + + for _, topicManagers := range om.poms { + for _, pom := range topicManagers { + pom.lock.Lock() + if pom.dirty { + r.AddBlock(pom.topic, pom.partition, pom.offset, perPartitionTimestamp, pom.metadata) + } + pom.lock.Unlock() + } + } + + if len(r.blocks) > 0 { + return r + } + + return nil +} + +func (om *offsetManager) handleResponse(broker *Broker, req *OffsetCommitRequest, resp *OffsetCommitResponse) { + om.pomsLock.RLock() + defer om.pomsLock.RUnlock() + + for _, topicManagers := range om.poms { + for _, pom := range topicManagers { + if req.blocks[pom.topic] == nil || req.blocks[pom.topic][pom.partition] == nil { + continue + } + + var err KError + var ok bool + + if resp.Errors[pom.topic] == nil { + pom.handleError(ErrIncompleteResponse) + continue + } + if err, ok = resp.Errors[pom.topic][pom.partition]; !ok { + pom.handleError(ErrIncompleteResponse) + continue + } + + switch err { + case ErrNoError: + block := req.blocks[pom.topic][pom.partition] + pom.updateCommitted(block.offset, block.metadata) + case ErrNotLeaderForPartition, ErrLeaderNotAvailable, + ErrConsumerCoordinatorNotAvailable, ErrNotCoordinatorForConsumer: + // not a critical error, we just need to redispatch + om.releaseCoordinator(broker) + case ErrOffsetMetadataTooLarge, ErrInvalidCommitOffsetSize: + // nothing we can do about this, just tell the user and carry on + pom.handleError(err) + case ErrOffsetsLoadInProgress: + // nothing wrong but we didn't commit, we'll get it next time round + case ErrUnknownTopicOrPartition: + // let the user know *and* try redispatching - if topic-auto-create is + // enabled, redispatching should trigger a metadata req and create the + // topic; if not then re-dispatching won't help, but we've let the user + // know and it shouldn't hurt either (see https://github.com/Shopify/sarama/issues/706) + fallthrough + default: + // dunno, tell the user and try redispatching + pom.handleError(err) + om.releaseCoordinator(broker) + } + } + } +} + +func (om *offsetManager) handleError(err error) { + om.pomsLock.RLock() + defer om.pomsLock.RUnlock() + + for _, topicManagers := range om.poms { + for _, pom := range topicManagers { + pom.handleError(err) + } + } +} + +func (om *offsetManager) asyncClosePOMs() { + om.pomsLock.RLock() + defer om.pomsLock.RUnlock() + + for _, topicManagers := range om.poms { + for _, pom := range topicManagers { + pom.AsyncClose() + } + } +} + +// Releases/removes closed POMs once they are clean (or when forced) +func (om *offsetManager) releasePOMs(force bool) (remaining int) { + om.pomsLock.Lock() + defer om.pomsLock.Unlock() + + for topic, topicManagers := range om.poms { + for partition, pom := range topicManagers { + pom.lock.Lock() + releaseDue := pom.done && (force || !pom.dirty) + pom.lock.Unlock() + + if releaseDue { + pom.release() + + delete(om.poms[topic], partition) + if len(om.poms[topic]) == 0 { + delete(om.poms, topic) + } + } + } + remaining += len(om.poms[topic]) + } + return +} + +func (om *offsetManager) findPOM(topic string, partition int32) *partitionOffsetManager { + om.pomsLock.RLock() + defer om.pomsLock.RUnlock() + + if partitions, ok := om.poms[topic]; ok { + if pom, ok := partitions[partition]; ok { + return pom + } + } + return nil +} + +// Partition Offset Manager + +// PartitionOffsetManager uses Kafka to store and fetch consumed partition offsets. You MUST call Close() +// on a partition offset manager to avoid leaks, it will not be garbage-collected automatically when it passes +// out of scope. +type PartitionOffsetManager interface { + // NextOffset returns the next offset that should be consumed for the managed + // partition, accompanied by metadata which can be used to reconstruct the state + // of the partition consumer when it resumes. NextOffset() will return + // `config.Consumer.Offsets.Initial` and an empty metadata string if no offset + // was committed for this partition yet. + NextOffset() (int64, string) + + // MarkOffset marks the provided offset, alongside a metadata string + // that represents the state of the partition consumer at that point in time. The + // metadata string can be used by another consumer to restore that state, so it + // can resume consumption. + // + // To follow upstream conventions, you are expected to mark the offset of the + // next message to read, not the last message read. Thus, when calling `MarkOffset` + // you should typically add one to the offset of the last consumed message. + // + // Note: calling MarkOffset does not necessarily commit the offset to the backend + // store immediately for efficiency reasons, and it may never be committed if + // your application crashes. This means that you may end up processing the same + // message twice, and your processing should ideally be idempotent. + MarkOffset(offset int64, metadata string) + + // ResetOffset resets to the provided offset, alongside a metadata string that + // represents the state of the partition consumer at that point in time. Reset + // acts as a counterpart to MarkOffset, the difference being that it allows to + // reset an offset to an earlier or smaller value, where MarkOffset only + // allows incrementing the offset. cf MarkOffset for more details. + ResetOffset(offset int64, metadata string) + + // Errors returns a read channel of errors that occur during offset management, if + // enabled. By default, errors are logged and not returned over this channel. If + // you want to implement any custom error handling, set your config's + // Consumer.Return.Errors setting to true, and read from this channel. + Errors() <-chan *ConsumerError + + // AsyncClose initiates a shutdown of the PartitionOffsetManager. This method will + // return immediately, after which you should wait until the 'errors' channel has + // been drained and closed. It is required to call this function, or Close before + // a consumer object passes out of scope, as it will otherwise leak memory. You + // must call this before calling Close on the underlying client. + AsyncClose() + + // Close stops the PartitionOffsetManager from managing offsets. It is required to + // call this function (or AsyncClose) before a PartitionOffsetManager object + // passes out of scope, as it will otherwise leak memory. You must call this + // before calling Close on the underlying client. + Close() error +} + +type partitionOffsetManager struct { + parent *offsetManager + topic string + partition int32 + + lock sync.Mutex + offset int64 + metadata string + dirty bool + done bool + + releaseOnce sync.Once + errors chan *ConsumerError +} + +func (om *offsetManager) newPartitionOffsetManager(topic string, partition int32) (*partitionOffsetManager, error) { + offset, metadata, err := om.fetchInitialOffset(topic, partition, om.conf.Metadata.Retry.Max) + if err != nil { + return nil, err + } + + return &partitionOffsetManager{ + parent: om, + topic: topic, + partition: partition, + errors: make(chan *ConsumerError, om.conf.ChannelBufferSize), + offset: offset, + metadata: metadata, + }, nil +} + +func (pom *partitionOffsetManager) Errors() <-chan *ConsumerError { + return pom.errors +} + +func (pom *partitionOffsetManager) MarkOffset(offset int64, metadata string) { + pom.lock.Lock() + defer pom.lock.Unlock() + + if offset > pom.offset { + pom.offset = offset + pom.metadata = metadata + pom.dirty = true + } +} + +func (pom *partitionOffsetManager) ResetOffset(offset int64, metadata string) { + pom.lock.Lock() + defer pom.lock.Unlock() + + if offset <= pom.offset { + pom.offset = offset + pom.metadata = metadata + pom.dirty = true + } +} + +func (pom *partitionOffsetManager) updateCommitted(offset int64, metadata string) { + pom.lock.Lock() + defer pom.lock.Unlock() + + if pom.offset == offset && pom.metadata == metadata { + pom.dirty = false + } +} + +func (pom *partitionOffsetManager) NextOffset() (int64, string) { + pom.lock.Lock() + defer pom.lock.Unlock() + + if pom.offset >= 0 { + return pom.offset, pom.metadata + } + + return pom.parent.conf.Consumer.Offsets.Initial, "" +} + +func (pom *partitionOffsetManager) AsyncClose() { + pom.lock.Lock() + pom.done = true + pom.lock.Unlock() +} + +func (pom *partitionOffsetManager) Close() error { + pom.AsyncClose() + + var errors ConsumerErrors + for err := range pom.errors { + errors = append(errors, err) + } + + if len(errors) > 0 { + return errors + } + return nil +} + +func (pom *partitionOffsetManager) handleError(err error) { + cErr := &ConsumerError{ + Topic: pom.topic, + Partition: pom.partition, + Err: err, + } + + if pom.parent.conf.Consumer.Return.Errors { + pom.errors <- cErr + } else { + Logger.Println(cErr) + } +} + +func (pom *partitionOffsetManager) release() { + pom.releaseOnce.Do(func() { + close(pom.errors) + }) +} diff --git a/vendor/github.com/Shopify/sarama/offset_request.go b/vendor/github.com/Shopify/sarama/offset_request.go new file mode 100644 index 00000000000..c0b3305f661 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/offset_request.go @@ -0,0 +1,160 @@ +package sarama + +type offsetRequestBlock struct { + time int64 + maxOffsets int32 // Only used in version 0 +} + +func (b *offsetRequestBlock) encode(pe packetEncoder, version int16) error { + pe.putInt64(b.time) + if version == 0 { + pe.putInt32(b.maxOffsets) + } + + return nil +} + +func (b *offsetRequestBlock) decode(pd packetDecoder, version int16) (err error) { + if b.time, err = pd.getInt64(); err != nil { + return err + } + if version == 0 { + if b.maxOffsets, err = pd.getInt32(); err != nil { + return err + } + } + return nil +} + +type OffsetRequest struct { + Version int16 + replicaID int32 + isReplicaIDSet bool + blocks map[string]map[int32]*offsetRequestBlock +} + +func (r *OffsetRequest) encode(pe packetEncoder) error { + if r.isReplicaIDSet { + pe.putInt32(r.replicaID) + } else { + // default replica ID is always -1 for clients + pe.putInt32(-1) + } + + err := pe.putArrayLength(len(r.blocks)) + if err != nil { + return err + } + for topic, partitions := range r.blocks { + err = pe.putString(topic) + if err != nil { + return err + } + err = pe.putArrayLength(len(partitions)) + if err != nil { + return err + } + for partition, block := range partitions { + pe.putInt32(partition) + if err = block.encode(pe, r.Version); err != nil { + return err + } + } + } + return nil +} + +func (r *OffsetRequest) decode(pd packetDecoder, version int16) error { + r.Version = version + + replicaID, err := pd.getInt32() + if err != nil { + return err + } + if replicaID >= 0 { + r.SetReplicaID(replicaID) + } + + blockCount, err := pd.getArrayLength() + if err != nil { + return err + } + if blockCount == 0 { + return nil + } + r.blocks = make(map[string]map[int32]*offsetRequestBlock) + for i := 0; i < blockCount; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + partitionCount, err := pd.getArrayLength() + if err != nil { + return err + } + r.blocks[topic] = make(map[int32]*offsetRequestBlock) + for j := 0; j < partitionCount; j++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + block := &offsetRequestBlock{} + if err := block.decode(pd, version); err != nil { + return err + } + r.blocks[topic][partition] = block + } + } + return nil +} + +func (r *OffsetRequest) key() int16 { + return 2 +} + +func (r *OffsetRequest) version() int16 { + return r.Version +} + +func (r *OffsetRequest) headerVersion() int16 { + return 1 +} + +func (r *OffsetRequest) requiredVersion() KafkaVersion { + switch r.Version { + case 1: + return V0_10_1_0 + default: + return MinVersion + } +} + +func (r *OffsetRequest) SetReplicaID(id int32) { + r.replicaID = id + r.isReplicaIDSet = true +} + +func (r *OffsetRequest) ReplicaID() int32 { + if r.isReplicaIDSet { + return r.replicaID + } + return -1 +} + +func (r *OffsetRequest) AddBlock(topic string, partitionID int32, time int64, maxOffsets int32) { + if r.blocks == nil { + r.blocks = make(map[string]map[int32]*offsetRequestBlock) + } + + if r.blocks[topic] == nil { + r.blocks[topic] = make(map[int32]*offsetRequestBlock) + } + + tmp := new(offsetRequestBlock) + tmp.time = time + if r.Version == 0 { + tmp.maxOffsets = maxOffsets + } + + r.blocks[topic][partitionID] = tmp +} diff --git a/vendor/github.com/Shopify/sarama/offset_response.go b/vendor/github.com/Shopify/sarama/offset_response.go new file mode 100644 index 00000000000..ead3ebbcc2c --- /dev/null +++ b/vendor/github.com/Shopify/sarama/offset_response.go @@ -0,0 +1,178 @@ +package sarama + +type OffsetResponseBlock struct { + Err KError + Offsets []int64 // Version 0 + Offset int64 // Version 1 + Timestamp int64 // Version 1 +} + +func (b *OffsetResponseBlock) decode(pd packetDecoder, version int16) (err error) { + tmp, err := pd.getInt16() + if err != nil { + return err + } + b.Err = KError(tmp) + + if version == 0 { + b.Offsets, err = pd.getInt64Array() + + return err + } + + b.Timestamp, err = pd.getInt64() + if err != nil { + return err + } + + b.Offset, err = pd.getInt64() + if err != nil { + return err + } + + // For backwards compatibility put the offset in the offsets array too + b.Offsets = []int64{b.Offset} + + return nil +} + +func (b *OffsetResponseBlock) encode(pe packetEncoder, version int16) (err error) { + pe.putInt16(int16(b.Err)) + + if version == 0 { + return pe.putInt64Array(b.Offsets) + } + + pe.putInt64(b.Timestamp) + pe.putInt64(b.Offset) + + return nil +} + +type OffsetResponse struct { + Version int16 + Blocks map[string]map[int32]*OffsetResponseBlock +} + +func (r *OffsetResponse) decode(pd packetDecoder, version int16) (err error) { + numTopics, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Blocks = make(map[string]map[int32]*OffsetResponseBlock, numTopics) + for i := 0; i < numTopics; i++ { + name, err := pd.getString() + if err != nil { + return err + } + + numBlocks, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Blocks[name] = make(map[int32]*OffsetResponseBlock, numBlocks) + + for j := 0; j < numBlocks; j++ { + id, err := pd.getInt32() + if err != nil { + return err + } + + block := new(OffsetResponseBlock) + err = block.decode(pd, version) + if err != nil { + return err + } + r.Blocks[name][id] = block + } + } + + return nil +} + +func (r *OffsetResponse) GetBlock(topic string, partition int32) *OffsetResponseBlock { + if r.Blocks == nil { + return nil + } + + if r.Blocks[topic] == nil { + return nil + } + + return r.Blocks[topic][partition] +} + +/* +// [0 0 0 1 ntopics +0 8 109 121 95 116 111 112 105 99 topic +0 0 0 1 npartitions +0 0 0 0 id +0 0 + +0 0 0 1 0 0 0 0 +0 1 1 1 0 0 0 1 +0 8 109 121 95 116 111 112 +105 99 0 0 0 1 0 0 +0 0 0 0 0 0 0 1 +0 0 0 0 0 1 1 1] + +*/ +func (r *OffsetResponse) encode(pe packetEncoder) (err error) { + if err = pe.putArrayLength(len(r.Blocks)); err != nil { + return err + } + + for topic, partitions := range r.Blocks { + if err = pe.putString(topic); err != nil { + return err + } + if err = pe.putArrayLength(len(partitions)); err != nil { + return err + } + for partition, block := range partitions { + pe.putInt32(partition) + if err = block.encode(pe, r.version()); err != nil { + return err + } + } + } + + return nil +} + +func (r *OffsetResponse) key() int16 { + return 2 +} + +func (r *OffsetResponse) version() int16 { + return r.Version +} + +func (r *OffsetResponse) headerVersion() int16 { + return 0 +} + +func (r *OffsetResponse) requiredVersion() KafkaVersion { + switch r.Version { + case 1: + return V0_10_1_0 + default: + return MinVersion + } +} + +// testing API + +func (r *OffsetResponse) AddTopicPartition(topic string, partition int32, offset int64) { + if r.Blocks == nil { + r.Blocks = make(map[string]map[int32]*OffsetResponseBlock) + } + byTopic, ok := r.Blocks[topic] + if !ok { + byTopic = make(map[int32]*OffsetResponseBlock) + r.Blocks[topic] = byTopic + } + byTopic[partition] = &OffsetResponseBlock{Offsets: []int64{offset}, Offset: offset} +} diff --git a/vendor/github.com/Shopify/sarama/packet_decoder.go b/vendor/github.com/Shopify/sarama/packet_decoder.go new file mode 100644 index 00000000000..ed00ba350b5 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/packet_decoder.go @@ -0,0 +1,67 @@ +package sarama + +// PacketDecoder is the interface providing helpers for reading with Kafka's encoding rules. +// Types implementing Decoder only need to worry about calling methods like GetString, +// not about how a string is represented in Kafka. +type packetDecoder interface { + // Primitives + getInt8() (int8, error) + getInt16() (int16, error) + getInt32() (int32, error) + getInt64() (int64, error) + getVarint() (int64, error) + getUVarint() (uint64, error) + getArrayLength() (int, error) + getCompactArrayLength() (int, error) + getBool() (bool, error) + getEmptyTaggedFieldArray() (int, error) + + // Collections + getBytes() ([]byte, error) + getVarintBytes() ([]byte, error) + getRawBytes(length int) ([]byte, error) + getString() (string, error) + getNullableString() (*string, error) + getCompactString() (string, error) + getCompactNullableString() (*string, error) + getCompactInt32Array() ([]int32, error) + getInt32Array() ([]int32, error) + getInt64Array() ([]int64, error) + getStringArray() ([]string, error) + + // Subsets + remaining() int + getSubset(length int) (packetDecoder, error) + peek(offset, length int) (packetDecoder, error) // similar to getSubset, but it doesn't advance the offset + peekInt8(offset int) (int8, error) // similar to peek, but just one byte + + // Stacks, see PushDecoder + push(in pushDecoder) error + pop() error +} + +// PushDecoder is the interface for decoding fields like CRCs and lengths where the validity +// of the field depends on what is after it in the packet. Start them with PacketDecoder.Push() where +// the actual value is located in the packet, then PacketDecoder.Pop() them when all the bytes they +// depend upon have been decoded. +type pushDecoder interface { + // Saves the offset into the input buffer as the location to actually read the calculated value when able. + saveOffset(in int) + + // Returns the length of data to reserve for the input of this encoder (eg 4 bytes for a CRC32). + reserveLength() int + + // Indicates that all required data is now available to calculate and check the field. + // SaveOffset is guaranteed to have been called first. The implementation should read ReserveLength() bytes + // of data from the saved offset, and verify it based on the data between the saved offset and curOffset. + check(curOffset int, buf []byte) error +} + +// dynamicPushDecoder extends the interface of pushDecoder for uses cases where the length of the +// fields itself is unknown until its value was decoded (for instance varint encoded length +// fields). +// During push, dynamicPushDecoder.decode() method will be called instead of reserveLength() +type dynamicPushDecoder interface { + pushDecoder + decoder +} diff --git a/vendor/github.com/Shopify/sarama/packet_encoder.go b/vendor/github.com/Shopify/sarama/packet_encoder.go new file mode 100644 index 00000000000..50c735c0445 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/packet_encoder.go @@ -0,0 +1,72 @@ +package sarama + +import "github.com/rcrowley/go-metrics" + +// PacketEncoder is the interface providing helpers for writing with Kafka's encoding rules. +// Types implementing Encoder only need to worry about calling methods like PutString, +// not about how a string is represented in Kafka. +type packetEncoder interface { + // Primitives + putInt8(in int8) + putInt16(in int16) + putInt32(in int32) + putInt64(in int64) + putVarint(in int64) + putUVarint(in uint64) + putCompactArrayLength(in int) + putArrayLength(in int) error + putBool(in bool) + + // Collections + putBytes(in []byte) error + putVarintBytes(in []byte) error + putRawBytes(in []byte) error + putCompactString(in string) error + putNullableCompactString(in *string) error + putString(in string) error + putNullableString(in *string) error + putStringArray(in []string) error + putCompactInt32Array(in []int32) error + putNullableCompactInt32Array(in []int32) error + putInt32Array(in []int32) error + putInt64Array(in []int64) error + putEmptyTaggedFieldArray() + + // Provide the current offset to record the batch size metric + offset() int + + // Stacks, see PushEncoder + push(in pushEncoder) + pop() error + + // To record metrics when provided + metricRegistry() metrics.Registry +} + +// PushEncoder is the interface for encoding fields like CRCs and lengths where the value +// of the field depends on what is encoded after it in the packet. Start them with PacketEncoder.Push() where +// the actual value is located in the packet, then PacketEncoder.Pop() them when all the bytes they +// depend upon have been written. +type pushEncoder interface { + // Saves the offset into the input buffer as the location to actually write the calculated value when able. + saveOffset(in int) + + // Returns the length of data to reserve for the output of this encoder (eg 4 bytes for a CRC32). + reserveLength() int + + // Indicates that all required data is now available to calculate and write the field. + // SaveOffset is guaranteed to have been called first. The implementation should write ReserveLength() bytes + // of data to the saved offset, based on the data between the saved offset and curOffset. + run(curOffset int, buf []byte) error +} + +// dynamicPushEncoder extends the interface of pushEncoder for uses cases where the length of the +// fields itself is unknown until its value was computed (for instance varint encoded length +// fields). +type dynamicPushEncoder interface { + pushEncoder + + // Called during pop() to adjust the length of the field. + // It should return the difference in bytes between the last computed length and current length. + adjustLength(currOffset int) int +} diff --git a/vendor/github.com/Shopify/sarama/partitioner.go b/vendor/github.com/Shopify/sarama/partitioner.go new file mode 100644 index 00000000000..6a708e729ee --- /dev/null +++ b/vendor/github.com/Shopify/sarama/partitioner.go @@ -0,0 +1,217 @@ +package sarama + +import ( + "hash" + "hash/fnv" + "math/rand" + "time" +) + +// Partitioner is anything that, given a Kafka message and a number of partitions indexed [0...numPartitions-1], +// decides to which partition to send the message. RandomPartitioner, RoundRobinPartitioner and HashPartitioner are provided +// as simple default implementations. +type Partitioner interface { + // Partition takes a message and partition count and chooses a partition + Partition(message *ProducerMessage, numPartitions int32) (int32, error) + + // RequiresConsistency indicates to the user of the partitioner whether the + // mapping of key->partition is consistent or not. Specifically, if a + // partitioner requires consistency then it must be allowed to choose from all + // partitions (even ones known to be unavailable), and its choice must be + // respected by the caller. The obvious example is the HashPartitioner. + RequiresConsistency() bool +} + +// DynamicConsistencyPartitioner can optionally be implemented by Partitioners +// in order to allow more flexibility than is originally allowed by the +// RequiresConsistency method in the Partitioner interface. This allows +// partitioners to require consistency sometimes, but not all times. It's useful +// for, e.g., the HashPartitioner, which does not require consistency if the +// message key is nil. +type DynamicConsistencyPartitioner interface { + Partitioner + + // MessageRequiresConsistency is similar to Partitioner.RequiresConsistency, + // but takes in the message being partitioned so that the partitioner can + // make a per-message determination. + MessageRequiresConsistency(message *ProducerMessage) bool +} + +// PartitionerConstructor is the type for a function capable of constructing new Partitioners. +type PartitionerConstructor func(topic string) Partitioner + +type manualPartitioner struct{} + +// HashPartitionOption lets you modify default values of the partitioner +type HashPartitionerOption func(*hashPartitioner) + +// WithAbsFirst means that the partitioner handles absolute values +// in the same way as the reference Java implementation +func WithAbsFirst() HashPartitionerOption { + return func(hp *hashPartitioner) { + hp.referenceAbs = true + } +} + +// WithCustomHashFunction lets you specify what hash function to use for the partitioning +func WithCustomHashFunction(hasher func() hash.Hash32) HashPartitionerOption { + return func(hp *hashPartitioner) { + hp.hasher = hasher() + } +} + +// WithCustomFallbackPartitioner lets you specify what HashPartitioner should be used in case a Distribution Key is empty +func WithCustomFallbackPartitioner(randomHP *hashPartitioner) HashPartitionerOption { + return func(hp *hashPartitioner) { + hp.random = hp + } +} + +// NewManualPartitioner returns a Partitioner which uses the partition manually set in the provided +// ProducerMessage's Partition field as the partition to produce to. +func NewManualPartitioner(topic string) Partitioner { + return new(manualPartitioner) +} + +func (p *manualPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) { + return message.Partition, nil +} + +func (p *manualPartitioner) RequiresConsistency() bool { + return true +} + +type randomPartitioner struct { + generator *rand.Rand +} + +// NewRandomPartitioner returns a Partitioner which chooses a random partition each time. +func NewRandomPartitioner(topic string) Partitioner { + p := new(randomPartitioner) + p.generator = rand.New(rand.NewSource(time.Now().UTC().UnixNano())) + return p +} + +func (p *randomPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) { + return int32(p.generator.Intn(int(numPartitions))), nil +} + +func (p *randomPartitioner) RequiresConsistency() bool { + return false +} + +type roundRobinPartitioner struct { + partition int32 +} + +// NewRoundRobinPartitioner returns a Partitioner which walks through the available partitions one at a time. +func NewRoundRobinPartitioner(topic string) Partitioner { + return &roundRobinPartitioner{} +} + +func (p *roundRobinPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) { + if p.partition >= numPartitions { + p.partition = 0 + } + ret := p.partition + p.partition++ + return ret, nil +} + +func (p *roundRobinPartitioner) RequiresConsistency() bool { + return false +} + +type hashPartitioner struct { + random Partitioner + hasher hash.Hash32 + referenceAbs bool +} + +// NewCustomHashPartitioner is a wrapper around NewHashPartitioner, allowing the use of custom hasher. +// The argument is a function providing the instance, implementing the hash.Hash32 interface. This is to ensure that +// each partition dispatcher gets its own hasher, to avoid concurrency issues by sharing an instance. +func NewCustomHashPartitioner(hasher func() hash.Hash32) PartitionerConstructor { + return func(topic string) Partitioner { + p := new(hashPartitioner) + p.random = NewRandomPartitioner(topic) + p.hasher = hasher() + p.referenceAbs = false + return p + } +} + +// NewCustomPartitioner creates a default Partitioner but lets you specify the behavior of each component via options +func NewCustomPartitioner(options ...HashPartitionerOption) PartitionerConstructor { + return func(topic string) Partitioner { + p := new(hashPartitioner) + p.random = NewRandomPartitioner(topic) + p.hasher = fnv.New32a() + p.referenceAbs = false + for _, option := range options { + option(p) + } + return p + } +} + +// NewHashPartitioner returns a Partitioner which behaves as follows. If the message's key is nil then a +// random partition is chosen. Otherwise the FNV-1a hash of the encoded bytes of the message key is used, +// modulus the number of partitions. This ensures that messages with the same key always end up on the +// same partition. +func NewHashPartitioner(topic string) Partitioner { + p := new(hashPartitioner) + p.random = NewRandomPartitioner(topic) + p.hasher = fnv.New32a() + p.referenceAbs = false + return p +} + +// NewReferenceHashPartitioner is like NewHashPartitioner except that it handles absolute values +// in the same way as the reference Java implementation. NewHashPartitioner was supposed to do +// that but it had a mistake and now there are people depending on both behaviours. This will +// all go away on the next major version bump. +func NewReferenceHashPartitioner(topic string) Partitioner { + p := new(hashPartitioner) + p.random = NewRandomPartitioner(topic) + p.hasher = fnv.New32a() + p.referenceAbs = true + return p +} + +func (p *hashPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) { + if message.Key == nil { + return p.random.Partition(message, numPartitions) + } + bytes, err := message.Key.Encode() + if err != nil { + return -1, err + } + p.hasher.Reset() + _, err = p.hasher.Write(bytes) + if err != nil { + return -1, err + } + var partition int32 + // Turns out we were doing our absolute value in a subtly different way from the upstream + // implementation, but now we need to maintain backwards compat for people who started using + // the old version; if referenceAbs is set we are compatible with the reference java client + // but not past Sarama versions + if p.referenceAbs { + partition = (int32(p.hasher.Sum32()) & 0x7fffffff) % numPartitions + } else { + partition = int32(p.hasher.Sum32()) % numPartitions + if partition < 0 { + partition = -partition + } + } + return partition, nil +} + +func (p *hashPartitioner) RequiresConsistency() bool { + return true +} + +func (p *hashPartitioner) MessageRequiresConsistency(message *ProducerMessage) bool { + return message.Key != nil +} diff --git a/vendor/github.com/Shopify/sarama/prep_encoder.go b/vendor/github.com/Shopify/sarama/prep_encoder.go new file mode 100644 index 00000000000..827542c5030 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/prep_encoder.go @@ -0,0 +1,202 @@ +package sarama + +import ( + "encoding/binary" + "errors" + "fmt" + "math" + + "github.com/rcrowley/go-metrics" +) + +type prepEncoder struct { + stack []pushEncoder + length int +} + +// primitives + +func (pe *prepEncoder) putInt8(in int8) { + pe.length++ +} + +func (pe *prepEncoder) putInt16(in int16) { + pe.length += 2 +} + +func (pe *prepEncoder) putInt32(in int32) { + pe.length += 4 +} + +func (pe *prepEncoder) putInt64(in int64) { + pe.length += 8 +} + +func (pe *prepEncoder) putVarint(in int64) { + var buf [binary.MaxVarintLen64]byte + pe.length += binary.PutVarint(buf[:], in) +} + +func (pe *prepEncoder) putUVarint(in uint64) { + var buf [binary.MaxVarintLen64]byte + pe.length += binary.PutUvarint(buf[:], in) +} + +func (pe *prepEncoder) putArrayLength(in int) error { + if in > math.MaxInt32 { + return PacketEncodingError{fmt.Sprintf("array too long (%d)", in)} + } + pe.length += 4 + return nil +} + +func (pe *prepEncoder) putCompactArrayLength(in int) { + pe.putUVarint(uint64(in + 1)) +} + +func (pe *prepEncoder) putBool(in bool) { + pe.length++ +} + +// arrays + +func (pe *prepEncoder) putBytes(in []byte) error { + pe.length += 4 + if in == nil { + return nil + } + return pe.putRawBytes(in) +} + +func (pe *prepEncoder) putVarintBytes(in []byte) error { + if in == nil { + pe.putVarint(-1) + return nil + } + pe.putVarint(int64(len(in))) + return pe.putRawBytes(in) +} + +func (pe *prepEncoder) putCompactString(in string) error { + pe.putCompactArrayLength(len(in)) + return pe.putRawBytes([]byte(in)) +} + +func (pe *prepEncoder) putNullableCompactString(in *string) error { + if in == nil { + pe.putUVarint(0) + return nil + } else { + return pe.putCompactString(*in) + } +} + +func (pe *prepEncoder) putRawBytes(in []byte) error { + if len(in) > math.MaxInt32 { + return PacketEncodingError{fmt.Sprintf("byteslice too long (%d)", len(in))} + } + pe.length += len(in) + return nil +} + +func (pe *prepEncoder) putNullableString(in *string) error { + if in == nil { + pe.length += 2 + return nil + } + return pe.putString(*in) +} + +func (pe *prepEncoder) putString(in string) error { + pe.length += 2 + if len(in) > math.MaxInt16 { + return PacketEncodingError{fmt.Sprintf("string too long (%d)", len(in))} + } + pe.length += len(in) + return nil +} + +func (pe *prepEncoder) putStringArray(in []string) error { + err := pe.putArrayLength(len(in)) + if err != nil { + return err + } + + for _, str := range in { + if err := pe.putString(str); err != nil { + return err + } + } + + return nil +} + +func (pe *prepEncoder) putCompactInt32Array(in []int32) error { + if in == nil { + return errors.New("expected int32 array to be non null") + } + + pe.putUVarint(uint64(len(in)) + 1) + pe.length += 4 * len(in) + return nil +} + +func (pe *prepEncoder) putNullableCompactInt32Array(in []int32) error { + if in == nil { + pe.putUVarint(0) + return nil + } + + pe.putUVarint(uint64(len(in)) + 1) + pe.length += 4 * len(in) + return nil +} + +func (pe *prepEncoder) putInt32Array(in []int32) error { + err := pe.putArrayLength(len(in)) + if err != nil { + return err + } + pe.length += 4 * len(in) + return nil +} + +func (pe *prepEncoder) putInt64Array(in []int64) error { + err := pe.putArrayLength(len(in)) + if err != nil { + return err + } + pe.length += 8 * len(in) + return nil +} + +func (pe *prepEncoder) putEmptyTaggedFieldArray() { + pe.putUVarint(0) +} + +func (pe *prepEncoder) offset() int { + return pe.length +} + +// stackable + +func (pe *prepEncoder) push(in pushEncoder) { + in.saveOffset(pe.length) + pe.length += in.reserveLength() + pe.stack = append(pe.stack, in) +} + +func (pe *prepEncoder) pop() error { + in := pe.stack[len(pe.stack)-1] + pe.stack = pe.stack[:len(pe.stack)-1] + if dpe, ok := in.(dynamicPushEncoder); ok { + pe.length += dpe.adjustLength(pe.length) + } + + return nil +} + +// we do not record metrics during the prep encoder pass +func (pe *prepEncoder) metricRegistry() metrics.Registry { + return nil +} diff --git a/vendor/github.com/Shopify/sarama/produce_request.go b/vendor/github.com/Shopify/sarama/produce_request.go new file mode 100644 index 00000000000..0034651e254 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/produce_request.go @@ -0,0 +1,258 @@ +package sarama + +import "github.com/rcrowley/go-metrics" + +// RequiredAcks is used in Produce Requests to tell the broker how many replica acknowledgements +// it must see before responding. Any of the constants defined here are valid. On broker versions +// prior to 0.8.2.0 any other positive int16 is also valid (the broker will wait for that many +// acknowledgements) but in 0.8.2.0 and later this will raise an exception (it has been replaced +// by setting the `min.isr` value in the brokers configuration). +type RequiredAcks int16 + +const ( + // NoResponse doesn't send any response, the TCP ACK is all you get. + NoResponse RequiredAcks = 0 + // WaitForLocal waits for only the local commit to succeed before responding. + WaitForLocal RequiredAcks = 1 + // WaitForAll waits for all in-sync replicas to commit before responding. + // The minimum number of in-sync replicas is configured on the broker via + // the `min.insync.replicas` configuration key. + WaitForAll RequiredAcks = -1 +) + +type ProduceRequest struct { + TransactionalID *string + RequiredAcks RequiredAcks + Timeout int32 + Version int16 // v1 requires Kafka 0.9, v2 requires Kafka 0.10, v3 requires Kafka 0.11 + records map[string]map[int32]Records +} + +func updateMsgSetMetrics(msgSet *MessageSet, compressionRatioMetric metrics.Histogram, + topicCompressionRatioMetric metrics.Histogram) int64 { + var topicRecordCount int64 + for _, messageBlock := range msgSet.Messages { + // Is this a fake "message" wrapping real messages? + if messageBlock.Msg.Set != nil { + topicRecordCount += int64(len(messageBlock.Msg.Set.Messages)) + } else { + // A single uncompressed message + topicRecordCount++ + } + // Better be safe than sorry when computing the compression ratio + if messageBlock.Msg.compressedSize != 0 { + compressionRatio := float64(len(messageBlock.Msg.Value)) / + float64(messageBlock.Msg.compressedSize) + // Histogram do not support decimal values, let's multiple it by 100 for better precision + intCompressionRatio := int64(100 * compressionRatio) + compressionRatioMetric.Update(intCompressionRatio) + topicCompressionRatioMetric.Update(intCompressionRatio) + } + } + return topicRecordCount +} + +func updateBatchMetrics(recordBatch *RecordBatch, compressionRatioMetric metrics.Histogram, + topicCompressionRatioMetric metrics.Histogram) int64 { + if recordBatch.compressedRecords != nil { + compressionRatio := int64(float64(recordBatch.recordsLen) / float64(len(recordBatch.compressedRecords)) * 100) + compressionRatioMetric.Update(compressionRatio) + topicCompressionRatioMetric.Update(compressionRatio) + } + + return int64(len(recordBatch.Records)) +} + +func (r *ProduceRequest) encode(pe packetEncoder) error { + if r.Version >= 3 { + if err := pe.putNullableString(r.TransactionalID); err != nil { + return err + } + } + pe.putInt16(int16(r.RequiredAcks)) + pe.putInt32(r.Timeout) + metricRegistry := pe.metricRegistry() + var batchSizeMetric metrics.Histogram + var compressionRatioMetric metrics.Histogram + if metricRegistry != nil { + batchSizeMetric = getOrRegisterHistogram("batch-size", metricRegistry) + compressionRatioMetric = getOrRegisterHistogram("compression-ratio", metricRegistry) + } + totalRecordCount := int64(0) + + err := pe.putArrayLength(len(r.records)) + if err != nil { + return err + } + + for topic, partitions := range r.records { + err = pe.putString(topic) + if err != nil { + return err + } + err = pe.putArrayLength(len(partitions)) + if err != nil { + return err + } + topicRecordCount := int64(0) + var topicCompressionRatioMetric metrics.Histogram + if metricRegistry != nil { + topicCompressionRatioMetric = getOrRegisterTopicHistogram("compression-ratio", topic, metricRegistry) + } + for id, records := range partitions { + startOffset := pe.offset() + pe.putInt32(id) + pe.push(&lengthField{}) + err = records.encode(pe) + if err != nil { + return err + } + err = pe.pop() + if err != nil { + return err + } + if metricRegistry != nil { + if r.Version >= 3 { + topicRecordCount += updateBatchMetrics(records.RecordBatch, compressionRatioMetric, topicCompressionRatioMetric) + } else { + topicRecordCount += updateMsgSetMetrics(records.MsgSet, compressionRatioMetric, topicCompressionRatioMetric) + } + batchSize := int64(pe.offset() - startOffset) + batchSizeMetric.Update(batchSize) + getOrRegisterTopicHistogram("batch-size", topic, metricRegistry).Update(batchSize) + } + } + if topicRecordCount > 0 { + getOrRegisterTopicMeter("record-send-rate", topic, metricRegistry).Mark(topicRecordCount) + getOrRegisterTopicHistogram("records-per-request", topic, metricRegistry).Update(topicRecordCount) + totalRecordCount += topicRecordCount + } + } + if totalRecordCount > 0 { + metrics.GetOrRegisterMeter("record-send-rate", metricRegistry).Mark(totalRecordCount) + getOrRegisterHistogram("records-per-request", metricRegistry).Update(totalRecordCount) + } + + return nil +} + +func (r *ProduceRequest) decode(pd packetDecoder, version int16) error { + r.Version = version + + if version >= 3 { + id, err := pd.getNullableString() + if err != nil { + return err + } + r.TransactionalID = id + } + requiredAcks, err := pd.getInt16() + if err != nil { + return err + } + r.RequiredAcks = RequiredAcks(requiredAcks) + if r.Timeout, err = pd.getInt32(); err != nil { + return err + } + topicCount, err := pd.getArrayLength() + if err != nil { + return err + } + if topicCount == 0 { + return nil + } + + r.records = make(map[string]map[int32]Records) + for i := 0; i < topicCount; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + partitionCount, err := pd.getArrayLength() + if err != nil { + return err + } + r.records[topic] = make(map[int32]Records) + + for j := 0; j < partitionCount; j++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + size, err := pd.getInt32() + if err != nil { + return err + } + recordsDecoder, err := pd.getSubset(int(size)) + if err != nil { + return err + } + var records Records + if err := records.decode(recordsDecoder); err != nil { + return err + } + r.records[topic][partition] = records + } + } + + return nil +} + +func (r *ProduceRequest) key() int16 { + return 0 +} + +func (r *ProduceRequest) version() int16 { + return r.Version +} + +func (r *ProduceRequest) headerVersion() int16 { + return 1 +} + +func (r *ProduceRequest) requiredVersion() KafkaVersion { + switch r.Version { + case 1: + return V0_9_0_0 + case 2: + return V0_10_0_0 + case 3: + return V0_11_0_0 + case 7: + return V2_1_0_0 + default: + return MinVersion + } +} + +func (r *ProduceRequest) ensureRecords(topic string, partition int32) { + if r.records == nil { + r.records = make(map[string]map[int32]Records) + } + + if r.records[topic] == nil { + r.records[topic] = make(map[int32]Records) + } +} + +func (r *ProduceRequest) AddMessage(topic string, partition int32, msg *Message) { + r.ensureRecords(topic, partition) + set := r.records[topic][partition].MsgSet + + if set == nil { + set = new(MessageSet) + r.records[topic][partition] = newLegacyRecords(set) + } + + set.addMessage(msg) +} + +func (r *ProduceRequest) AddSet(topic string, partition int32, set *MessageSet) { + r.ensureRecords(topic, partition) + r.records[topic][partition] = newLegacyRecords(set) +} + +func (r *ProduceRequest) AddBatch(topic string, partition int32, batch *RecordBatch) { + r.ensureRecords(topic, partition) + r.records[topic][partition] = newDefaultRecords(batch) +} diff --git a/vendor/github.com/Shopify/sarama/produce_response.go b/vendor/github.com/Shopify/sarama/produce_response.go new file mode 100644 index 00000000000..edf978790c9 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/produce_response.go @@ -0,0 +1,212 @@ +package sarama + +import ( + "fmt" + "time" +) + +// Protocol, http://kafka.apache.org/protocol.html +// v1 +// v2 = v3 = v4 +// v5 = v6 = v7 +// Produce Response (Version: 7) => [responses] throttle_time_ms +// responses => topic [partition_responses] +// topic => STRING +// partition_responses => partition error_code base_offset log_append_time log_start_offset +// partition => INT32 +// error_code => INT16 +// base_offset => INT64 +// log_append_time => INT64 +// log_start_offset => INT64 +// throttle_time_ms => INT32 + +// partition_responses in protocol +type ProduceResponseBlock struct { + Err KError // v0, error_code + Offset int64 // v0, base_offset + Timestamp time.Time // v2, log_append_time, and the broker is configured with `LogAppendTime` + StartOffset int64 // v5, log_start_offset +} + +func (b *ProduceResponseBlock) decode(pd packetDecoder, version int16) (err error) { + tmp, err := pd.getInt16() + if err != nil { + return err + } + b.Err = KError(tmp) + + b.Offset, err = pd.getInt64() + if err != nil { + return err + } + + if version >= 2 { + if millis, err := pd.getInt64(); err != nil { + return err + } else if millis != -1 { + b.Timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond)) + } + } + + if version >= 5 { + b.StartOffset, err = pd.getInt64() + if err != nil { + return err + } + } + + return nil +} + +func (b *ProduceResponseBlock) encode(pe packetEncoder, version int16) (err error) { + pe.putInt16(int16(b.Err)) + pe.putInt64(b.Offset) + + if version >= 2 { + timestamp := int64(-1) + if !b.Timestamp.Before(time.Unix(0, 0)) { + timestamp = b.Timestamp.UnixNano() / int64(time.Millisecond) + } else if !b.Timestamp.IsZero() { + return PacketEncodingError{fmt.Sprintf("invalid timestamp (%v)", b.Timestamp)} + } + pe.putInt64(timestamp) + } + + if version >= 5 { + pe.putInt64(b.StartOffset) + } + + return nil +} + +type ProduceResponse struct { + Blocks map[string]map[int32]*ProduceResponseBlock // v0, responses + Version int16 + ThrottleTime time.Duration // v1, throttle_time_ms +} + +func (r *ProduceResponse) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + + numTopics, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Blocks = make(map[string]map[int32]*ProduceResponseBlock, numTopics) + for i := 0; i < numTopics; i++ { + name, err := pd.getString() + if err != nil { + return err + } + + numBlocks, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Blocks[name] = make(map[int32]*ProduceResponseBlock, numBlocks) + + for j := 0; j < numBlocks; j++ { + id, err := pd.getInt32() + if err != nil { + return err + } + + block := new(ProduceResponseBlock) + err = block.decode(pd, version) + if err != nil { + return err + } + r.Blocks[name][id] = block + } + } + + if r.Version >= 1 { + millis, err := pd.getInt32() + if err != nil { + return err + } + + r.ThrottleTime = time.Duration(millis) * time.Millisecond + } + + return nil +} + +func (r *ProduceResponse) encode(pe packetEncoder) error { + err := pe.putArrayLength(len(r.Blocks)) + if err != nil { + return err + } + for topic, partitions := range r.Blocks { + err = pe.putString(topic) + if err != nil { + return err + } + err = pe.putArrayLength(len(partitions)) + if err != nil { + return err + } + for id, prb := range partitions { + pe.putInt32(id) + err = prb.encode(pe, r.Version) + if err != nil { + return err + } + } + } + + if r.Version >= 1 { + pe.putInt32(int32(r.ThrottleTime / time.Millisecond)) + } + return nil +} + +func (r *ProduceResponse) key() int16 { + return 0 +} + +func (r *ProduceResponse) version() int16 { + return r.Version +} + +func (r *ProduceResponse) headerVersion() int16 { + return 0 +} + +func (r *ProduceResponse) requiredVersion() KafkaVersion { + return MinVersion +} + +func (r *ProduceResponse) GetBlock(topic string, partition int32) *ProduceResponseBlock { + if r.Blocks == nil { + return nil + } + + if r.Blocks[topic] == nil { + return nil + } + + return r.Blocks[topic][partition] +} + +// Testing API + +func (r *ProduceResponse) AddTopicPartition(topic string, partition int32, err KError) { + if r.Blocks == nil { + r.Blocks = make(map[string]map[int32]*ProduceResponseBlock) + } + byTopic, ok := r.Blocks[topic] + if !ok { + byTopic = make(map[int32]*ProduceResponseBlock) + r.Blocks[topic] = byTopic + } + block := &ProduceResponseBlock{ + Err: err, + } + if r.Version >= 2 { + block.Timestamp = time.Now() + } + byTopic[partition] = block +} diff --git a/vendor/github.com/Shopify/sarama/produce_set.go b/vendor/github.com/Shopify/sarama/produce_set.go new file mode 100644 index 00000000000..9c70f818006 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/produce_set.go @@ -0,0 +1,273 @@ +package sarama + +import ( + "encoding/binary" + "errors" + "time" +) + +type partitionSet struct { + msgs []*ProducerMessage + recordsToSend Records + bufferBytes int +} + +type produceSet struct { + parent *asyncProducer + msgs map[string]map[int32]*partitionSet + producerID int64 + producerEpoch int16 + + bufferBytes int + bufferCount int +} + +func newProduceSet(parent *asyncProducer) *produceSet { + pid, epoch := parent.txnmgr.getProducerID() + return &produceSet{ + msgs: make(map[string]map[int32]*partitionSet), + parent: parent, + producerID: pid, + producerEpoch: epoch, + } +} + +func (ps *produceSet) add(msg *ProducerMessage) error { + var err error + var key, val []byte + + if msg.Key != nil { + if key, err = msg.Key.Encode(); err != nil { + return err + } + } + + if msg.Value != nil { + if val, err = msg.Value.Encode(); err != nil { + return err + } + } + + timestamp := msg.Timestamp + if timestamp.IsZero() { + timestamp = time.Now() + } + timestamp = timestamp.Truncate(time.Millisecond) + + partitions := ps.msgs[msg.Topic] + if partitions == nil { + partitions = make(map[int32]*partitionSet) + ps.msgs[msg.Topic] = partitions + } + + var size int + + set := partitions[msg.Partition] + if set == nil { + if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) { + batch := &RecordBatch{ + FirstTimestamp: timestamp, + Version: 2, + Codec: ps.parent.conf.Producer.Compression, + CompressionLevel: ps.parent.conf.Producer.CompressionLevel, + ProducerID: ps.producerID, + ProducerEpoch: ps.producerEpoch, + } + if ps.parent.conf.Producer.Idempotent { + batch.FirstSequence = msg.sequenceNumber + } + set = &partitionSet{recordsToSend: newDefaultRecords(batch)} + size = recordBatchOverhead + } else { + set = &partitionSet{recordsToSend: newLegacyRecords(new(MessageSet))} + } + partitions[msg.Partition] = set + } + + if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) { + if ps.parent.conf.Producer.Idempotent && msg.sequenceNumber < set.recordsToSend.RecordBatch.FirstSequence { + return errors.New("assertion failed: message out of sequence added to a batch") + } + } + + // Past this point we can't return an error, because we've already added the message to the set. + set.msgs = append(set.msgs, msg) + + if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) { + // We are being conservative here to avoid having to prep encode the record + size += maximumRecordOverhead + rec := &Record{ + Key: key, + Value: val, + TimestampDelta: timestamp.Sub(set.recordsToSend.RecordBatch.FirstTimestamp), + } + size += len(key) + len(val) + if len(msg.Headers) > 0 { + rec.Headers = make([]*RecordHeader, len(msg.Headers)) + for i := range msg.Headers { + rec.Headers[i] = &msg.Headers[i] + size += len(rec.Headers[i].Key) + len(rec.Headers[i].Value) + 2*binary.MaxVarintLen32 + } + } + set.recordsToSend.RecordBatch.addRecord(rec) + } else { + msgToSend := &Message{Codec: CompressionNone, Key: key, Value: val} + if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) { + msgToSend.Timestamp = timestamp + msgToSend.Version = 1 + } + set.recordsToSend.MsgSet.addMessage(msgToSend) + size = producerMessageOverhead + len(key) + len(val) + } + + set.bufferBytes += size + ps.bufferBytes += size + ps.bufferCount++ + + return nil +} + +func (ps *produceSet) buildRequest() *ProduceRequest { + req := &ProduceRequest{ + RequiredAcks: ps.parent.conf.Producer.RequiredAcks, + Timeout: int32(ps.parent.conf.Producer.Timeout / time.Millisecond), + } + if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) { + req.Version = 2 + } + if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) { + req.Version = 3 + } + + if ps.parent.conf.Producer.Compression == CompressionZSTD && ps.parent.conf.Version.IsAtLeast(V2_1_0_0) { + req.Version = 7 + } + + for topic, partitionSets := range ps.msgs { + for partition, set := range partitionSets { + if req.Version >= 3 { + // If the API version we're hitting is 3 or greater, we need to calculate + // offsets for each record in the batch relative to FirstOffset. + // Additionally, we must set LastOffsetDelta to the value of the last offset + // in the batch. Since the OffsetDelta of the first record is 0, we know that the + // final record of any batch will have an offset of (# of records in batch) - 1. + // (See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-Messagesets + // under the RecordBatch section for details.) + rb := set.recordsToSend.RecordBatch + if len(rb.Records) > 0 { + rb.LastOffsetDelta = int32(len(rb.Records) - 1) + for i, record := range rb.Records { + record.OffsetDelta = int64(i) + } + } + req.AddBatch(topic, partition, rb) + continue + } + if ps.parent.conf.Producer.Compression == CompressionNone { + req.AddSet(topic, partition, set.recordsToSend.MsgSet) + } else { + // When compression is enabled, the entire set for each partition is compressed + // and sent as the payload of a single fake "message" with the appropriate codec + // set and no key. When the server sees a message with a compression codec, it + // decompresses the payload and treats the result as its message set. + + if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) { + // If our version is 0.10 or later, assign relative offsets + // to the inner messages. This lets the broker avoid + // recompressing the message set. + // (See https://cwiki.apache.org/confluence/display/KAFKA/KIP-31+-+Move+to+relative+offsets+in+compressed+message+sets + // for details on relative offsets.) + for i, msg := range set.recordsToSend.MsgSet.Messages { + msg.Offset = int64(i) + } + } + payload, err := encode(set.recordsToSend.MsgSet, ps.parent.conf.MetricRegistry) + if err != nil { + Logger.Println(err) // if this happens, it's basically our fault. + panic(err) + } + compMsg := &Message{ + Codec: ps.parent.conf.Producer.Compression, + CompressionLevel: ps.parent.conf.Producer.CompressionLevel, + Key: nil, + Value: payload, + Set: set.recordsToSend.MsgSet, // Provide the underlying message set for accurate metrics + } + if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) { + compMsg.Version = 1 + compMsg.Timestamp = set.recordsToSend.MsgSet.Messages[0].Msg.Timestamp + } + req.AddMessage(topic, partition, compMsg) + } + } + } + + return req +} + +func (ps *produceSet) eachPartition(cb func(topic string, partition int32, pSet *partitionSet)) { + for topic, partitionSet := range ps.msgs { + for partition, set := range partitionSet { + cb(topic, partition, set) + } + } +} + +func (ps *produceSet) dropPartition(topic string, partition int32) []*ProducerMessage { + if ps.msgs[topic] == nil { + return nil + } + set := ps.msgs[topic][partition] + if set == nil { + return nil + } + ps.bufferBytes -= set.bufferBytes + ps.bufferCount -= len(set.msgs) + delete(ps.msgs[topic], partition) + return set.msgs +} + +func (ps *produceSet) wouldOverflow(msg *ProducerMessage) bool { + version := 1 + if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) { + version = 2 + } + + switch { + // Would we overflow our maximum possible size-on-the-wire? 10KiB is arbitrary overhead for safety. + case ps.bufferBytes+msg.byteSize(version) >= int(MaxRequestSize-(10*1024)): + return true + // Would we overflow the size-limit of a message-batch for this partition? + case ps.msgs[msg.Topic] != nil && ps.msgs[msg.Topic][msg.Partition] != nil && + ps.msgs[msg.Topic][msg.Partition].bufferBytes+msg.byteSize(version) >= ps.parent.conf.Producer.MaxMessageBytes: + return true + // Would we overflow simply in number of messages? + case ps.parent.conf.Producer.Flush.MaxMessages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.MaxMessages: + return true + default: + return false + } +} + +func (ps *produceSet) readyToFlush() bool { + switch { + // If we don't have any messages, nothing else matters + case ps.empty(): + return false + // If all three config values are 0, we always flush as-fast-as-possible + case ps.parent.conf.Producer.Flush.Frequency == 0 && ps.parent.conf.Producer.Flush.Bytes == 0 && ps.parent.conf.Producer.Flush.Messages == 0: + return true + // If we've passed the message trigger-point + case ps.parent.conf.Producer.Flush.Messages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.Messages: + return true + // If we've passed the byte trigger-point + case ps.parent.conf.Producer.Flush.Bytes > 0 && ps.bufferBytes >= ps.parent.conf.Producer.Flush.Bytes: + return true + default: + return false + } +} + +func (ps *produceSet) empty() bool { + return ps.bufferCount == 0 +} diff --git a/vendor/github.com/Shopify/sarama/real_decoder.go b/vendor/github.com/Shopify/sarama/real_decoder.go new file mode 100644 index 00000000000..8ac576db2a0 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/real_decoder.go @@ -0,0 +1,426 @@ +package sarama + +import ( + "encoding/binary" + "math" +) + +var errInvalidArrayLength = PacketDecodingError{"invalid array length"} +var errInvalidByteSliceLength = PacketDecodingError{"invalid byteslice length"} +var errInvalidStringLength = PacketDecodingError{"invalid string length"} +var errVarintOverflow = PacketDecodingError{"varint overflow"} +var errUVarintOverflow = PacketDecodingError{"uvarint overflow"} +var errInvalidBool = PacketDecodingError{"invalid bool"} +var errUnsupportedTaggedFields = PacketDecodingError{"non-empty tagged fields are not supported yet"} + +type realDecoder struct { + raw []byte + off int + stack []pushDecoder +} + +// primitives + +func (rd *realDecoder) getInt8() (int8, error) { + if rd.remaining() < 1 { + rd.off = len(rd.raw) + return -1, ErrInsufficientData + } + tmp := int8(rd.raw[rd.off]) + rd.off++ + return tmp, nil +} + +func (rd *realDecoder) getInt16() (int16, error) { + if rd.remaining() < 2 { + rd.off = len(rd.raw) + return -1, ErrInsufficientData + } + tmp := int16(binary.BigEndian.Uint16(rd.raw[rd.off:])) + rd.off += 2 + return tmp, nil +} + +func (rd *realDecoder) getInt32() (int32, error) { + if rd.remaining() < 4 { + rd.off = len(rd.raw) + return -1, ErrInsufficientData + } + tmp := int32(binary.BigEndian.Uint32(rd.raw[rd.off:])) + rd.off += 4 + return tmp, nil +} + +func (rd *realDecoder) getInt64() (int64, error) { + if rd.remaining() < 8 { + rd.off = len(rd.raw) + return -1, ErrInsufficientData + } + tmp := int64(binary.BigEndian.Uint64(rd.raw[rd.off:])) + rd.off += 8 + return tmp, nil +} + +func (rd *realDecoder) getVarint() (int64, error) { + tmp, n := binary.Varint(rd.raw[rd.off:]) + if n == 0 { + rd.off = len(rd.raw) + return -1, ErrInsufficientData + } + if n < 0 { + rd.off -= n + return -1, errVarintOverflow + } + rd.off += n + return tmp, nil +} + +func (rd *realDecoder) getUVarint() (uint64, error) { + tmp, n := binary.Uvarint(rd.raw[rd.off:]) + if n == 0 { + rd.off = len(rd.raw) + return 0, ErrInsufficientData + } + + if n < 0 { + rd.off -= n + return 0, errUVarintOverflow + } + + rd.off += n + return tmp, nil +} + +func (rd *realDecoder) getArrayLength() (int, error) { + if rd.remaining() < 4 { + rd.off = len(rd.raw) + return -1, ErrInsufficientData + } + tmp := int(int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))) + rd.off += 4 + if tmp > rd.remaining() { + rd.off = len(rd.raw) + return -1, ErrInsufficientData + } else if tmp > 2*math.MaxUint16 { + return -1, errInvalidArrayLength + } + return tmp, nil +} + +func (rd *realDecoder) getCompactArrayLength() (int, error) { + n, err := rd.getUVarint() + if err != nil { + return 0, err + } + + if n == 0 { + return 0, nil + } + + return int(n) - 1, nil +} + +func (rd *realDecoder) getBool() (bool, error) { + b, err := rd.getInt8() + if err != nil || b == 0 { + return false, err + } + if b != 1 { + return false, errInvalidBool + } + return true, nil +} + +func (rd *realDecoder) getEmptyTaggedFieldArray() (int, error) { + tagCount, err := rd.getUVarint() + if err != nil { + return 0, err + } + + if tagCount != 0 { + return 0, errUnsupportedTaggedFields + } + + return 0, nil +} + +// collections + +func (rd *realDecoder) getBytes() ([]byte, error) { + tmp, err := rd.getInt32() + if err != nil { + return nil, err + } + if tmp == -1 { + return nil, nil + } + + return rd.getRawBytes(int(tmp)) +} + +func (rd *realDecoder) getVarintBytes() ([]byte, error) { + tmp, err := rd.getVarint() + if err != nil { + return nil, err + } + if tmp == -1 { + return nil, nil + } + + return rd.getRawBytes(int(tmp)) +} + +func (rd *realDecoder) getStringLength() (int, error) { + length, err := rd.getInt16() + if err != nil { + return 0, err + } + + n := int(length) + + switch { + case n < -1: + return 0, errInvalidStringLength + case n > rd.remaining(): + rd.off = len(rd.raw) + return 0, ErrInsufficientData + } + + return n, nil +} + +func (rd *realDecoder) getString() (string, error) { + n, err := rd.getStringLength() + if err != nil || n == -1 { + return "", err + } + + tmpStr := string(rd.raw[rd.off : rd.off+n]) + rd.off += n + return tmpStr, nil +} + +func (rd *realDecoder) getNullableString() (*string, error) { + n, err := rd.getStringLength() + if err != nil || n == -1 { + return nil, err + } + + tmpStr := string(rd.raw[rd.off : rd.off+n]) + rd.off += n + return &tmpStr, err +} + +func (rd *realDecoder) getCompactString() (string, error) { + n, err := rd.getUVarint() + if err != nil { + return "", err + } + + var length = int(n - 1) + + tmpStr := string(rd.raw[rd.off : rd.off+length]) + rd.off += length + return tmpStr, nil +} + +func (rd *realDecoder) getCompactNullableString() (*string, error) { + n, err := rd.getUVarint() + + if err != nil { + return nil, err + } + + var length = int(n - 1) + + if length < 0 { + return nil, err + } + + tmpStr := string(rd.raw[rd.off : rd.off+length]) + rd.off += length + return &tmpStr, err +} + +func (rd *realDecoder) getCompactInt32Array() ([]int32, error) { + n, err := rd.getUVarint() + if err != nil { + return nil, err + } + + if n == 0 { + return nil, nil + } + + arrayLength := int(n) - 1 + + ret := make([]int32, arrayLength) + + for i := range ret { + ret[i] = int32(binary.BigEndian.Uint32(rd.raw[rd.off:])) + rd.off += 4 + } + return ret, nil +} + +func (rd *realDecoder) getInt32Array() ([]int32, error) { + if rd.remaining() < 4 { + rd.off = len(rd.raw) + return nil, ErrInsufficientData + } + n := int(binary.BigEndian.Uint32(rd.raw[rd.off:])) + rd.off += 4 + + if rd.remaining() < 4*n { + rd.off = len(rd.raw) + return nil, ErrInsufficientData + } + + if n == 0 { + return nil, nil + } + + if n < 0 { + return nil, errInvalidArrayLength + } + + ret := make([]int32, n) + for i := range ret { + ret[i] = int32(binary.BigEndian.Uint32(rd.raw[rd.off:])) + rd.off += 4 + } + return ret, nil +} + +func (rd *realDecoder) getInt64Array() ([]int64, error) { + if rd.remaining() < 4 { + rd.off = len(rd.raw) + return nil, ErrInsufficientData + } + n := int(binary.BigEndian.Uint32(rd.raw[rd.off:])) + rd.off += 4 + + if rd.remaining() < 8*n { + rd.off = len(rd.raw) + return nil, ErrInsufficientData + } + + if n == 0 { + return nil, nil + } + + if n < 0 { + return nil, errInvalidArrayLength + } + + ret := make([]int64, n) + for i := range ret { + ret[i] = int64(binary.BigEndian.Uint64(rd.raw[rd.off:])) + rd.off += 8 + } + return ret, nil +} + +func (rd *realDecoder) getStringArray() ([]string, error) { + if rd.remaining() < 4 { + rd.off = len(rd.raw) + return nil, ErrInsufficientData + } + n := int(binary.BigEndian.Uint32(rd.raw[rd.off:])) + rd.off += 4 + + if n == 0 { + return nil, nil + } + + if n < 0 { + return nil, errInvalidArrayLength + } + + ret := make([]string, n) + for i := range ret { + str, err := rd.getString() + if err != nil { + return nil, err + } + + ret[i] = str + } + return ret, nil +} + +// subsets + +func (rd *realDecoder) remaining() int { + return len(rd.raw) - rd.off +} + +func (rd *realDecoder) getSubset(length int) (packetDecoder, error) { + buf, err := rd.getRawBytes(length) + if err != nil { + return nil, err + } + return &realDecoder{raw: buf}, nil +} + +func (rd *realDecoder) getRawBytes(length int) ([]byte, error) { + if length < 0 { + return nil, errInvalidByteSliceLength + } else if length > rd.remaining() { + rd.off = len(rd.raw) + return nil, ErrInsufficientData + } + + start := rd.off + rd.off += length + return rd.raw[start:rd.off], nil +} + +func (rd *realDecoder) peek(offset, length int) (packetDecoder, error) { + if rd.remaining() < offset+length { + return nil, ErrInsufficientData + } + off := rd.off + offset + return &realDecoder{raw: rd.raw[off : off+length]}, nil +} + +func (rd *realDecoder) peekInt8(offset int) (int8, error) { + const byteLen = 1 + if rd.remaining() < offset+byteLen { + return -1, ErrInsufficientData + } + return int8(rd.raw[rd.off+offset]), nil +} + +// stacks + +func (rd *realDecoder) push(in pushDecoder) error { + in.saveOffset(rd.off) + + var reserve int + if dpd, ok := in.(dynamicPushDecoder); ok { + if err := dpd.decode(rd); err != nil { + return err + } + } else { + reserve = in.reserveLength() + if rd.remaining() < reserve { + rd.off = len(rd.raw) + return ErrInsufficientData + } + } + + rd.stack = append(rd.stack, in) + + rd.off += reserve + + return nil +} + +func (rd *realDecoder) pop() error { + // this is go's ugly pop pattern (the inverse of append) + in := rd.stack[len(rd.stack)-1] + rd.stack = rd.stack[:len(rd.stack)-1] + + return in.check(rd.off, rd.raw) +} diff --git a/vendor/github.com/Shopify/sarama/real_encoder.go b/vendor/github.com/Shopify/sarama/real_encoder.go new file mode 100644 index 00000000000..ba073f7d38a --- /dev/null +++ b/vendor/github.com/Shopify/sarama/real_encoder.go @@ -0,0 +1,208 @@ +package sarama + +import ( + "encoding/binary" + "errors" + + "github.com/rcrowley/go-metrics" +) + +type realEncoder struct { + raw []byte + off int + stack []pushEncoder + registry metrics.Registry +} + +// primitives + +func (re *realEncoder) putInt8(in int8) { + re.raw[re.off] = byte(in) + re.off++ +} + +func (re *realEncoder) putInt16(in int16) { + binary.BigEndian.PutUint16(re.raw[re.off:], uint16(in)) + re.off += 2 +} + +func (re *realEncoder) putInt32(in int32) { + binary.BigEndian.PutUint32(re.raw[re.off:], uint32(in)) + re.off += 4 +} + +func (re *realEncoder) putInt64(in int64) { + binary.BigEndian.PutUint64(re.raw[re.off:], uint64(in)) + re.off += 8 +} + +func (re *realEncoder) putVarint(in int64) { + re.off += binary.PutVarint(re.raw[re.off:], in) +} + +func (re *realEncoder) putUVarint(in uint64) { + re.off += binary.PutUvarint(re.raw[re.off:], in) +} + +func (re *realEncoder) putArrayLength(in int) error { + re.putInt32(int32(in)) + return nil +} + +func (re *realEncoder) putCompactArrayLength(in int) { + // 0 represents a null array, so +1 has to be added + re.putUVarint(uint64(in + 1)) +} + +func (re *realEncoder) putBool(in bool) { + if in { + re.putInt8(1) + return + } + re.putInt8(0) +} + +// collection + +func (re *realEncoder) putRawBytes(in []byte) error { + copy(re.raw[re.off:], in) + re.off += len(in) + return nil +} + +func (re *realEncoder) putBytes(in []byte) error { + if in == nil { + re.putInt32(-1) + return nil + } + re.putInt32(int32(len(in))) + return re.putRawBytes(in) +} + +func (re *realEncoder) putVarintBytes(in []byte) error { + if in == nil { + re.putVarint(-1) + return nil + } + re.putVarint(int64(len(in))) + return re.putRawBytes(in) +} + +func (re *realEncoder) putCompactString(in string) error { + re.putCompactArrayLength(len(in)) + return re.putRawBytes([]byte(in)) +} + +func (re *realEncoder) putNullableCompactString(in *string) error { + if in == nil { + re.putInt8(0) + return nil + } + return re.putCompactString(*in) +} + +func (re *realEncoder) putString(in string) error { + re.putInt16(int16(len(in))) + copy(re.raw[re.off:], in) + re.off += len(in) + return nil +} + +func (re *realEncoder) putNullableString(in *string) error { + if in == nil { + re.putInt16(-1) + return nil + } + return re.putString(*in) +} + +func (re *realEncoder) putStringArray(in []string) error { + err := re.putArrayLength(len(in)) + if err != nil { + return err + } + + for _, val := range in { + if err := re.putString(val); err != nil { + return err + } + } + + return nil +} + +func (re *realEncoder) putCompactInt32Array(in []int32) error { + if in == nil { + return errors.New("expected int32 array to be non null") + } + // 0 represents a null array, so +1 has to be added + re.putUVarint(uint64(len(in)) + 1) + for _, val := range in { + re.putInt32(val) + } + return nil +} + +func (re *realEncoder) putNullableCompactInt32Array(in []int32) error { + if in == nil { + re.putUVarint(0) + return nil + } + // 0 represents a null array, so +1 has to be added + re.putUVarint(uint64(len(in)) + 1) + for _, val := range in { + re.putInt32(val) + } + return nil +} + +func (re *realEncoder) putInt32Array(in []int32) error { + err := re.putArrayLength(len(in)) + if err != nil { + return err + } + for _, val := range in { + re.putInt32(val) + } + return nil +} + +func (re *realEncoder) putInt64Array(in []int64) error { + err := re.putArrayLength(len(in)) + if err != nil { + return err + } + for _, val := range in { + re.putInt64(val) + } + return nil +} + +func (re *realEncoder) putEmptyTaggedFieldArray() { + re.putUVarint(0) +} + +func (re *realEncoder) offset() int { + return re.off +} + +// stacks + +func (re *realEncoder) push(in pushEncoder) { + in.saveOffset(re.off) + re.off += in.reserveLength() + re.stack = append(re.stack, in) +} + +func (re *realEncoder) pop() error { + // this is go's ugly pop pattern (the inverse of append) + in := re.stack[len(re.stack)-1] + re.stack = re.stack[:len(re.stack)-1] + + return in.run(re.off, re.raw) +} + +// we do record metrics during the real encoder pass +func (re *realEncoder) metricRegistry() metrics.Registry { + return re.registry +} diff --git a/vendor/github.com/Shopify/sarama/record.go b/vendor/github.com/Shopify/sarama/record.go new file mode 100644 index 00000000000..cdccfe32269 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/record.go @@ -0,0 +1,116 @@ +package sarama + +import ( + "encoding/binary" + "time" +) + +const ( + isTransactionalMask = 0x10 + controlMask = 0x20 + maximumRecordOverhead = 5*binary.MaxVarintLen32 + binary.MaxVarintLen64 + 1 +) + +//RecordHeader stores key and value for a record header +type RecordHeader struct { + Key []byte + Value []byte +} + +func (h *RecordHeader) encode(pe packetEncoder) error { + if err := pe.putVarintBytes(h.Key); err != nil { + return err + } + return pe.putVarintBytes(h.Value) +} + +func (h *RecordHeader) decode(pd packetDecoder) (err error) { + if h.Key, err = pd.getVarintBytes(); err != nil { + return err + } + + if h.Value, err = pd.getVarintBytes(); err != nil { + return err + } + return nil +} + +//Record is kafka record type +type Record struct { + Headers []*RecordHeader + + Attributes int8 + TimestampDelta time.Duration + OffsetDelta int64 + Key []byte + Value []byte + length varintLengthField +} + +func (r *Record) encode(pe packetEncoder) error { + pe.push(&r.length) + pe.putInt8(r.Attributes) + pe.putVarint(int64(r.TimestampDelta / time.Millisecond)) + pe.putVarint(r.OffsetDelta) + if err := pe.putVarintBytes(r.Key); err != nil { + return err + } + if err := pe.putVarintBytes(r.Value); err != nil { + return err + } + pe.putVarint(int64(len(r.Headers))) + + for _, h := range r.Headers { + if err := h.encode(pe); err != nil { + return err + } + } + + return pe.pop() +} + +func (r *Record) decode(pd packetDecoder) (err error) { + if err = pd.push(&r.length); err != nil { + return err + } + + if r.Attributes, err = pd.getInt8(); err != nil { + return err + } + + timestamp, err := pd.getVarint() + if err != nil { + return err + } + r.TimestampDelta = time.Duration(timestamp) * time.Millisecond + + if r.OffsetDelta, err = pd.getVarint(); err != nil { + return err + } + + if r.Key, err = pd.getVarintBytes(); err != nil { + return err + } + + if r.Value, err = pd.getVarintBytes(); err != nil { + return err + } + + numHeaders, err := pd.getVarint() + if err != nil { + return err + } + + if numHeaders >= 0 { + r.Headers = make([]*RecordHeader, numHeaders) + } + for i := int64(0); i < numHeaders; i++ { + hdr := new(RecordHeader) + if err := hdr.decode(pd); err != nil { + return err + } + r.Headers[i] = hdr + } + + return pd.pop() +} diff --git a/vendor/github.com/Shopify/sarama/record_batch.go b/vendor/github.com/Shopify/sarama/record_batch.go new file mode 100644 index 00000000000..c653763eca8 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/record_batch.go @@ -0,0 +1,225 @@ +package sarama + +import ( + "fmt" + "time" +) + +const recordBatchOverhead = 49 + +type recordsArray []*Record + +func (e recordsArray) encode(pe packetEncoder) error { + for _, r := range e { + if err := r.encode(pe); err != nil { + return err + } + } + return nil +} + +func (e recordsArray) decode(pd packetDecoder) error { + for i := range e { + rec := &Record{} + if err := rec.decode(pd); err != nil { + return err + } + e[i] = rec + } + return nil +} + +type RecordBatch struct { + FirstOffset int64 + PartitionLeaderEpoch int32 + Version int8 + Codec CompressionCodec + CompressionLevel int + Control bool + LogAppendTime bool + LastOffsetDelta int32 + FirstTimestamp time.Time + MaxTimestamp time.Time + ProducerID int64 + ProducerEpoch int16 + FirstSequence int32 + Records []*Record + PartialTrailingRecord bool + IsTransactional bool + + compressedRecords []byte + recordsLen int // uncompressed records size +} + +func (b *RecordBatch) LastOffset() int64 { + return b.FirstOffset + int64(b.LastOffsetDelta) +} + +func (b *RecordBatch) encode(pe packetEncoder) error { + if b.Version != 2 { + return PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", b.Codec)} + } + pe.putInt64(b.FirstOffset) + pe.push(&lengthField{}) + pe.putInt32(b.PartitionLeaderEpoch) + pe.putInt8(b.Version) + pe.push(newCRC32Field(crcCastagnoli)) + pe.putInt16(b.computeAttributes()) + pe.putInt32(b.LastOffsetDelta) + + if err := (Timestamp{&b.FirstTimestamp}).encode(pe); err != nil { + return err + } + + if err := (Timestamp{&b.MaxTimestamp}).encode(pe); err != nil { + return err + } + + pe.putInt64(b.ProducerID) + pe.putInt16(b.ProducerEpoch) + pe.putInt32(b.FirstSequence) + + if err := pe.putArrayLength(len(b.Records)); err != nil { + return err + } + + if b.compressedRecords == nil { + if err := b.encodeRecords(pe); err != nil { + return err + } + } + if err := pe.putRawBytes(b.compressedRecords); err != nil { + return err + } + + if err := pe.pop(); err != nil { + return err + } + return pe.pop() +} + +func (b *RecordBatch) decode(pd packetDecoder) (err error) { + if b.FirstOffset, err = pd.getInt64(); err != nil { + return err + } + + batchLen, err := pd.getInt32() + if err != nil { + return err + } + + if b.PartitionLeaderEpoch, err = pd.getInt32(); err != nil { + return err + } + + if b.Version, err = pd.getInt8(); err != nil { + return err + } + + crc32Decoder := acquireCrc32Field(crcCastagnoli) + defer releaseCrc32Field(crc32Decoder) + + if err = pd.push(crc32Decoder); err != nil { + return err + } + + attributes, err := pd.getInt16() + if err != nil { + return err + } + b.Codec = CompressionCodec(int8(attributes) & compressionCodecMask) + b.Control = attributes&controlMask == controlMask + b.LogAppendTime = attributes×tampTypeMask == timestampTypeMask + b.IsTransactional = attributes&isTransactionalMask == isTransactionalMask + + if b.LastOffsetDelta, err = pd.getInt32(); err != nil { + return err + } + + if err = (Timestamp{&b.FirstTimestamp}).decode(pd); err != nil { + return err + } + + if err = (Timestamp{&b.MaxTimestamp}).decode(pd); err != nil { + return err + } + + if b.ProducerID, err = pd.getInt64(); err != nil { + return err + } + + if b.ProducerEpoch, err = pd.getInt16(); err != nil { + return err + } + + if b.FirstSequence, err = pd.getInt32(); err != nil { + return err + } + + numRecs, err := pd.getArrayLength() + if err != nil { + return err + } + if numRecs >= 0 { + b.Records = make([]*Record, numRecs) + } + + bufSize := int(batchLen) - recordBatchOverhead + recBuffer, err := pd.getRawBytes(bufSize) + if err != nil { + if err == ErrInsufficientData { + b.PartialTrailingRecord = true + b.Records = nil + return nil + } + return err + } + + if err = pd.pop(); err != nil { + return err + } + + recBuffer, err = decompress(b.Codec, recBuffer) + if err != nil { + return err + } + + b.recordsLen = len(recBuffer) + err = decode(recBuffer, recordsArray(b.Records)) + if err == ErrInsufficientData { + b.PartialTrailingRecord = true + b.Records = nil + return nil + } + return err +} + +func (b *RecordBatch) encodeRecords(pe packetEncoder) error { + var raw []byte + var err error + if raw, err = encode(recordsArray(b.Records), pe.metricRegistry()); err != nil { + return err + } + b.recordsLen = len(raw) + + b.compressedRecords, err = compress(b.Codec, b.CompressionLevel, raw) + return err +} + +func (b *RecordBatch) computeAttributes() int16 { + attr := int16(b.Codec) & int16(compressionCodecMask) + if b.Control { + attr |= controlMask + } + if b.LogAppendTime { + attr |= timestampTypeMask + } + if b.IsTransactional { + attr |= isTransactionalMask + } + return attr +} + +func (b *RecordBatch) addRecord(r *Record) { + b.Records = append(b.Records, r) +} diff --git a/vendor/github.com/Shopify/sarama/records.go b/vendor/github.com/Shopify/sarama/records.go new file mode 100644 index 00000000000..f4c5e95f1de --- /dev/null +++ b/vendor/github.com/Shopify/sarama/records.go @@ -0,0 +1,203 @@ +package sarama + +import "fmt" + +const ( + unknownRecords = iota + legacyRecords + defaultRecords + + magicOffset = 16 +) + +// Records implements a union type containing either a RecordBatch or a legacy MessageSet. +type Records struct { + recordsType int + MsgSet *MessageSet + RecordBatch *RecordBatch +} + +func newLegacyRecords(msgSet *MessageSet) Records { + return Records{recordsType: legacyRecords, MsgSet: msgSet} +} + +func newDefaultRecords(batch *RecordBatch) Records { + return Records{recordsType: defaultRecords, RecordBatch: batch} +} + +// setTypeFromFields sets type of Records depending on which of MsgSet or RecordBatch is not nil. +// The first return value indicates whether both fields are nil (and the type is not set). +// If both fields are not nil, it returns an error. +func (r *Records) setTypeFromFields() (bool, error) { + if r.MsgSet == nil && r.RecordBatch == nil { + return true, nil + } + if r.MsgSet != nil && r.RecordBatch != nil { + return false, fmt.Errorf("both MsgSet and RecordBatch are set, but record type is unknown") + } + r.recordsType = defaultRecords + if r.MsgSet != nil { + r.recordsType = legacyRecords + } + return false, nil +} + +func (r *Records) encode(pe packetEncoder) error { + if r.recordsType == unknownRecords { + if empty, err := r.setTypeFromFields(); err != nil || empty { + return err + } + } + + switch r.recordsType { + case legacyRecords: + if r.MsgSet == nil { + return nil + } + return r.MsgSet.encode(pe) + case defaultRecords: + if r.RecordBatch == nil { + return nil + } + return r.RecordBatch.encode(pe) + } + + return fmt.Errorf("unknown records type: %v", r.recordsType) +} + +func (r *Records) setTypeFromMagic(pd packetDecoder) error { + magic, err := magicValue(pd) + if err != nil { + return err + } + + r.recordsType = defaultRecords + if magic < 2 { + r.recordsType = legacyRecords + } + + return nil +} + +func (r *Records) decode(pd packetDecoder) error { + if r.recordsType == unknownRecords { + if err := r.setTypeFromMagic(pd); err != nil { + return err + } + } + + switch r.recordsType { + case legacyRecords: + r.MsgSet = &MessageSet{} + return r.MsgSet.decode(pd) + case defaultRecords: + r.RecordBatch = &RecordBatch{} + return r.RecordBatch.decode(pd) + } + return fmt.Errorf("unknown records type: %v", r.recordsType) +} + +func (r *Records) numRecords() (int, error) { + if r.recordsType == unknownRecords { + if empty, err := r.setTypeFromFields(); err != nil || empty { + return 0, err + } + } + + switch r.recordsType { + case legacyRecords: + if r.MsgSet == nil { + return 0, nil + } + return len(r.MsgSet.Messages), nil + case defaultRecords: + if r.RecordBatch == nil { + return 0, nil + } + return len(r.RecordBatch.Records), nil + } + return 0, fmt.Errorf("unknown records type: %v", r.recordsType) +} + +func (r *Records) isPartial() (bool, error) { + if r.recordsType == unknownRecords { + if empty, err := r.setTypeFromFields(); err != nil || empty { + return false, err + } + } + + switch r.recordsType { + case unknownRecords: + return false, nil + case legacyRecords: + if r.MsgSet == nil { + return false, nil + } + return r.MsgSet.PartialTrailingMessage, nil + case defaultRecords: + if r.RecordBatch == nil { + return false, nil + } + return r.RecordBatch.PartialTrailingRecord, nil + } + return false, fmt.Errorf("unknown records type: %v", r.recordsType) +} + +func (r *Records) isControl() (bool, error) { + if r.recordsType == unknownRecords { + if empty, err := r.setTypeFromFields(); err != nil || empty { + return false, err + } + } + + switch r.recordsType { + case legacyRecords: + return false, nil + case defaultRecords: + if r.RecordBatch == nil { + return false, nil + } + return r.RecordBatch.Control, nil + } + return false, fmt.Errorf("unknown records type: %v", r.recordsType) +} + +func (r *Records) isOverflow() (bool, error) { + if r.recordsType == unknownRecords { + if empty, err := r.setTypeFromFields(); err != nil || empty { + return false, err + } + } + + switch r.recordsType { + case unknownRecords: + return false, nil + case legacyRecords: + if r.MsgSet == nil { + return false, nil + } + return r.MsgSet.OverflowMessage, nil + case defaultRecords: + return false, nil + } + return false, fmt.Errorf("unknown records type: %v", r.recordsType) +} + +func magicValue(pd packetDecoder) (int8, error) { + return pd.peekInt8(magicOffset) +} + +func (r *Records) getControlRecord() (ControlRecord, error) { + if r.RecordBatch == nil || len(r.RecordBatch.Records) <= 0 { + return ControlRecord{}, fmt.Errorf("cannot get control record, record batch is empty") + } + + firstRecord := r.RecordBatch.Records[0] + controlRecord := ControlRecord{} + err := controlRecord.decode(&realDecoder{raw: firstRecord.Key}, &realDecoder{raw: firstRecord.Value}) + if err != nil { + return ControlRecord{}, err + } + + return controlRecord, nil +} diff --git a/vendor/github.com/Shopify/sarama/request.go b/vendor/github.com/Shopify/sarama/request.go new file mode 100644 index 00000000000..dcfd3946c81 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/request.go @@ -0,0 +1,191 @@ +package sarama + +import ( + "encoding/binary" + "fmt" + "io" +) + +type protocolBody interface { + encoder + versionedDecoder + key() int16 + version() int16 + headerVersion() int16 + requiredVersion() KafkaVersion +} + +type request struct { + correlationID int32 + clientID string + body protocolBody +} + +func (r *request) encode(pe packetEncoder) error { + pe.push(&lengthField{}) + pe.putInt16(r.body.key()) + pe.putInt16(r.body.version()) + pe.putInt32(r.correlationID) + + if r.body.headerVersion() >= 1 { + err := pe.putString(r.clientID) + if err != nil { + return err + } + } + + if r.body.headerVersion() >= 2 { + // we don't use tag headers at the moment so we just put an array length of 0 + pe.putUVarint(0) + } + + err := r.body.encode(pe) + if err != nil { + return err + } + + return pe.pop() +} + +func (r *request) decode(pd packetDecoder) (err error) { + key, err := pd.getInt16() + if err != nil { + return err + } + + version, err := pd.getInt16() + if err != nil { + return err + } + + r.correlationID, err = pd.getInt32() + if err != nil { + return err + } + + r.clientID, err = pd.getString() + if err != nil { + return err + } + + r.body = allocateBody(key, version) + if r.body == nil { + return PacketDecodingError{fmt.Sprintf("unknown request key (%d)", key)} + } + + if r.body.headerVersion() >= 2 { + // tagged field + _, err = pd.getUVarint() + if err != nil { + return err + } + } + + return r.body.decode(pd, version) +} + +func decodeRequest(r io.Reader) (*request, int, error) { + var ( + bytesRead int + lengthBytes = make([]byte, 4) + ) + + if _, err := io.ReadFull(r, lengthBytes); err != nil { + return nil, bytesRead, err + } + + bytesRead += len(lengthBytes) + length := int32(binary.BigEndian.Uint32(lengthBytes)) + + if length <= 4 || length > MaxRequestSize { + return nil, bytesRead, PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", length)} + } + + encodedReq := make([]byte, length) + if _, err := io.ReadFull(r, encodedReq); err != nil { + return nil, bytesRead, err + } + + bytesRead += len(encodedReq) + + req := &request{} + if err := decode(encodedReq, req); err != nil { + return nil, bytesRead, err + } + + return req, bytesRead, nil +} + +func allocateBody(key, version int16) protocolBody { + switch key { + case 0: + return &ProduceRequest{} + case 1: + return &FetchRequest{Version: version} + case 2: + return &OffsetRequest{Version: version} + case 3: + return &MetadataRequest{} + case 8: + return &OffsetCommitRequest{Version: version} + case 9: + return &OffsetFetchRequest{} + case 10: + return &FindCoordinatorRequest{} + case 11: + return &JoinGroupRequest{} + case 12: + return &HeartbeatRequest{} + case 13: + return &LeaveGroupRequest{} + case 14: + return &SyncGroupRequest{} + case 15: + return &DescribeGroupsRequest{} + case 16: + return &ListGroupsRequest{} + case 17: + return &SaslHandshakeRequest{} + case 18: + return &ApiVersionsRequest{} + case 19: + return &CreateTopicsRequest{} + case 20: + return &DeleteTopicsRequest{} + case 21: + return &DeleteRecordsRequest{} + case 22: + return &InitProducerIDRequest{} + case 24: + return &AddPartitionsToTxnRequest{} + case 25: + return &AddOffsetsToTxnRequest{} + case 26: + return &EndTxnRequest{} + case 28: + return &TxnOffsetCommitRequest{} + case 29: + return &DescribeAclsRequest{} + case 30: + return &CreateAclsRequest{} + case 31: + return &DeleteAclsRequest{} + case 32: + return &DescribeConfigsRequest{} + case 33: + return &AlterConfigsRequest{} + case 35: + return &DescribeLogDirsRequest{} + case 36: + return &SaslAuthenticateRequest{} + case 37: + return &CreatePartitionsRequest{} + case 42: + return &DeleteGroupsRequest{} + case 45: + return &AlterPartitionReassignmentsRequest{} + case 46: + return &ListPartitionReassignmentsRequest{} + } + return nil +} diff --git a/vendor/github.com/Shopify/sarama/response_header.go b/vendor/github.com/Shopify/sarama/response_header.go new file mode 100644 index 00000000000..5dffb75be65 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/response_header.go @@ -0,0 +1,31 @@ +package sarama + +import "fmt" + +const responseLengthSize = 4 +const correlationIDSize = 4 + +type responseHeader struct { + length int32 + correlationID int32 +} + +func (r *responseHeader) decode(pd packetDecoder, version int16) (err error) { + r.length, err = pd.getInt32() + if err != nil { + return err + } + if r.length <= 4 || r.length > MaxResponseSize { + return PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", r.length)} + } + + r.correlationID, err = pd.getInt32() + + if version >= 1 { + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + + return err +} diff --git a/vendor/github.com/Shopify/sarama/sarama.go b/vendor/github.com/Shopify/sarama/sarama.go new file mode 100644 index 00000000000..48f362d287e --- /dev/null +++ b/vendor/github.com/Shopify/sarama/sarama.go @@ -0,0 +1,110 @@ +/* +Package sarama is a pure Go client library for dealing with Apache Kafka (versions 0.8 and later). It includes a high-level +API for easily producing and consuming messages, and a low-level API for controlling bytes on the wire when the high-level +API is insufficient. Usage examples for the high-level APIs are provided inline with their full documentation. + +To produce messages, use either the AsyncProducer or the SyncProducer. The AsyncProducer accepts messages on a channel +and produces them asynchronously in the background as efficiently as possible; it is preferred in most cases. +The SyncProducer provides a method which will block until Kafka acknowledges the message as produced. This can be +useful but comes with two caveats: it will generally be less efficient, and the actual durability guarantees +depend on the configured value of `Producer.RequiredAcks`. There are configurations where a message acknowledged by the +SyncProducer can still sometimes be lost. + +To consume messages, use Consumer or Consumer-Group API. + +For lower-level needs, the Broker and Request/Response objects permit precise control over each connection +and message sent on the wire; the Client provides higher-level metadata management that is shared between +the producers and the consumer. The Request/Response objects and properties are mostly undocumented, as they line up +exactly with the protocol fields documented by Kafka at +https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol + +Metrics are exposed through https://github.com/rcrowley/go-metrics library in a local registry. + +Broker related metrics: + + +----------------------------------------------+------------+---------------------------------------------------------------+ + | Name | Type | Description | + +----------------------------------------------+------------+---------------------------------------------------------------+ + | incoming-byte-rate | meter | Bytes/second read off all brokers | + | incoming-byte-rate-for-broker- | meter | Bytes/second read off a given broker | + | outgoing-byte-rate | meter | Bytes/second written off all brokers | + | outgoing-byte-rate-for-broker- | meter | Bytes/second written off a given broker | + | request-rate | meter | Requests/second sent to all brokers | + | request-rate-for-broker- | meter | Requests/second sent to a given broker | + | request-size | histogram | Distribution of the request size in bytes for all brokers | + | request-size-for-broker- | histogram | Distribution of the request size in bytes for a given broker | + | request-latency-in-ms | histogram | Distribution of the request latency in ms for all brokers | + | request-latency-in-ms-for-broker- | histogram | Distribution of the request latency in ms for a given broker | + | response-rate | meter | Responses/second received from all brokers | + | response-rate-for-broker- | meter | Responses/second received from a given broker | + | response-size | histogram | Distribution of the response size in bytes for all brokers | + | response-size-for-broker- | histogram | Distribution of the response size in bytes for a given broker | + | requests-in-flight | counter | The current number of in-flight requests awaiting a response | + | | | for all brokers | + | requests-in-flight-for-broker- | counter | The current number of in-flight requests awaiting a response | + | | | for a given broker | + +----------------------------------------------+------------+---------------------------------------------------------------+ + +Note that we do not gather specific metrics for seed brokers but they are part of the "all brokers" metrics. + +Producer related metrics: + + +-------------------------------------------+------------+--------------------------------------------------------------------------------------+ + | Name | Type | Description | + +-------------------------------------------+------------+--------------------------------------------------------------------------------------+ + | batch-size | histogram | Distribution of the number of bytes sent per partition per request for all topics | + | batch-size-for-topic- | histogram | Distribution of the number of bytes sent per partition per request for a given topic | + | record-send-rate | meter | Records/second sent to all topics | + | record-send-rate-for-topic- | meter | Records/second sent to a given topic | + | records-per-request | histogram | Distribution of the number of records sent per request for all topics | + | records-per-request-for-topic- | histogram | Distribution of the number of records sent per request for a given topic | + | compression-ratio | histogram | Distribution of the compression ratio times 100 of record batches for all topics | + | compression-ratio-for-topic- | histogram | Distribution of the compression ratio times 100 of record batches for a given topic | + +-------------------------------------------+------------+--------------------------------------------------------------------------------------+ + +Consumer related metrics: + + +-------------------------------------------+------------+--------------------------------------------------------------------------------------+ + | Name | Type | Description | + +-------------------------------------------+------------+--------------------------------------------------------------------------------------+ + | consumer-batch-size | histogram | Distribution of the number of messages in a batch | + +-------------------------------------------+------------+--------------------------------------------------------------------------------------+ + +*/ +package sarama + +import ( + "io/ioutil" + "log" +) + +var ( + // Logger is the instance of a StdLogger interface that Sarama writes connection + // management events to. By default it is set to discard all log messages via ioutil.Discard, + // but you can set it to redirect wherever you want. + Logger StdLogger = log.New(ioutil.Discard, "[Sarama] ", log.LstdFlags) + + // PanicHandler is called for recovering from panics spawned internally to the library (and thus + // not recoverable by the caller's goroutine). Defaults to nil, which means panics are not recovered. + PanicHandler func(interface{}) + + // MaxRequestSize is the maximum size (in bytes) of any request that Sarama will attempt to send. Trying + // to send a request larger than this will result in an PacketEncodingError. The default of 100 MiB is aligned + // with Kafka's default `socket.request.max.bytes`, which is the largest request the broker will attempt + // to process. + MaxRequestSize int32 = 100 * 1024 * 1024 + + // MaxResponseSize is the maximum size (in bytes) of any response that Sarama will attempt to parse. If + // a broker returns a response message larger than this value, Sarama will return a PacketDecodingError to + // protect the client from running out of memory. Please note that brokers do not have any natural limit on + // the size of responses they send. In particular, they can send arbitrarily large fetch responses to consumers + // (see https://issues.apache.org/jira/browse/KAFKA-2063). + MaxResponseSize int32 = 100 * 1024 * 1024 +) + +// StdLogger is used to log error messages. +type StdLogger interface { + Print(v ...interface{}) + Printf(format string, v ...interface{}) + Println(v ...interface{}) +} diff --git a/vendor/github.com/Shopify/sarama/sasl_authenticate_request.go b/vendor/github.com/Shopify/sarama/sasl_authenticate_request.go new file mode 100644 index 00000000000..90504df6f52 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/sasl_authenticate_request.go @@ -0,0 +1,33 @@ +package sarama + +type SaslAuthenticateRequest struct { + SaslAuthBytes []byte +} + +// APIKeySASLAuth is the API key for the SaslAuthenticate Kafka API +const APIKeySASLAuth = 36 + +func (r *SaslAuthenticateRequest) encode(pe packetEncoder) error { + return pe.putBytes(r.SaslAuthBytes) +} + +func (r *SaslAuthenticateRequest) decode(pd packetDecoder, version int16) (err error) { + r.SaslAuthBytes, err = pd.getBytes() + return err +} + +func (r *SaslAuthenticateRequest) key() int16 { + return APIKeySASLAuth +} + +func (r *SaslAuthenticateRequest) version() int16 { + return 0 +} + +func (r *SaslAuthenticateRequest) headerVersion() int16 { + return 1 +} + +func (r *SaslAuthenticateRequest) requiredVersion() KafkaVersion { + return V1_0_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/sasl_authenticate_response.go b/vendor/github.com/Shopify/sarama/sasl_authenticate_response.go new file mode 100644 index 00000000000..3ef57b5afad --- /dev/null +++ b/vendor/github.com/Shopify/sarama/sasl_authenticate_response.go @@ -0,0 +1,48 @@ +package sarama + +type SaslAuthenticateResponse struct { + Err KError + ErrorMessage *string + SaslAuthBytes []byte +} + +func (r *SaslAuthenticateResponse) encode(pe packetEncoder) error { + pe.putInt16(int16(r.Err)) + if err := pe.putNullableString(r.ErrorMessage); err != nil { + return err + } + return pe.putBytes(r.SaslAuthBytes) +} + +func (r *SaslAuthenticateResponse) decode(pd packetDecoder, version int16) error { + kerr, err := pd.getInt16() + if err != nil { + return err + } + + r.Err = KError(kerr) + + if r.ErrorMessage, err = pd.getNullableString(); err != nil { + return err + } + + r.SaslAuthBytes, err = pd.getBytes() + + return err +} + +func (r *SaslAuthenticateResponse) key() int16 { + return APIKeySASLAuth +} + +func (r *SaslAuthenticateResponse) version() int16 { + return 0 +} + +func (r *SaslAuthenticateResponse) headerVersion() int16 { + return 0 +} + +func (r *SaslAuthenticateResponse) requiredVersion() KafkaVersion { + return V1_0_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_request.go b/vendor/github.com/Shopify/sarama/sasl_handshake_request.go new file mode 100644 index 00000000000..74dc3072f48 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/sasl_handshake_request.go @@ -0,0 +1,38 @@ +package sarama + +type SaslHandshakeRequest struct { + Mechanism string + Version int16 +} + +func (r *SaslHandshakeRequest) encode(pe packetEncoder) error { + if err := pe.putString(r.Mechanism); err != nil { + return err + } + + return nil +} + +func (r *SaslHandshakeRequest) decode(pd packetDecoder, version int16) (err error) { + if r.Mechanism, err = pd.getString(); err != nil { + return err + } + + return nil +} + +func (r *SaslHandshakeRequest) key() int16 { + return 17 +} + +func (r *SaslHandshakeRequest) version() int16 { + return r.Version +} + +func (r *SaslHandshakeRequest) headerVersion() int16 { + return 1 +} + +func (r *SaslHandshakeRequest) requiredVersion() KafkaVersion { + return V0_10_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_response.go b/vendor/github.com/Shopify/sarama/sasl_handshake_response.go new file mode 100644 index 00000000000..69dfc3178ec --- /dev/null +++ b/vendor/github.com/Shopify/sarama/sasl_handshake_response.go @@ -0,0 +1,42 @@ +package sarama + +type SaslHandshakeResponse struct { + Err KError + EnabledMechanisms []string +} + +func (r *SaslHandshakeResponse) encode(pe packetEncoder) error { + pe.putInt16(int16(r.Err)) + return pe.putStringArray(r.EnabledMechanisms) +} + +func (r *SaslHandshakeResponse) decode(pd packetDecoder, version int16) error { + kerr, err := pd.getInt16() + if err != nil { + return err + } + + r.Err = KError(kerr) + + if r.EnabledMechanisms, err = pd.getStringArray(); err != nil { + return err + } + + return nil +} + +func (r *SaslHandshakeResponse) key() int16 { + return 17 +} + +func (r *SaslHandshakeResponse) version() int16 { + return 0 +} + +func (r *SaslHandshakeResponse) headerVersion() int16 { + return 0 +} + +func (r *SaslHandshakeResponse) requiredVersion() KafkaVersion { + return V0_10_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/sticky_assignor_user_data.go b/vendor/github.com/Shopify/sarama/sticky_assignor_user_data.go new file mode 100644 index 00000000000..bb0c82c349a --- /dev/null +++ b/vendor/github.com/Shopify/sarama/sticky_assignor_user_data.go @@ -0,0 +1,124 @@ +package sarama + +type topicPartitionAssignment struct { + Topic string + Partition int32 +} + +type StickyAssignorUserData interface { + partitions() []topicPartitionAssignment + hasGeneration() bool + generation() int +} + +//StickyAssignorUserDataV0 holds topic partition information for an assignment +type StickyAssignorUserDataV0 struct { + Topics map[string][]int32 + + topicPartitions []topicPartitionAssignment +} + +func (m *StickyAssignorUserDataV0) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(m.Topics)); err != nil { + return err + } + + for topic, partitions := range m.Topics { + if err := pe.putString(topic); err != nil { + return err + } + if err := pe.putInt32Array(partitions); err != nil { + return err + } + } + return nil +} + +func (m *StickyAssignorUserDataV0) decode(pd packetDecoder) (err error) { + var topicLen int + if topicLen, err = pd.getArrayLength(); err != nil { + return + } + + m.Topics = make(map[string][]int32, topicLen) + for i := 0; i < topicLen; i++ { + var topic string + if topic, err = pd.getString(); err != nil { + return + } + if m.Topics[topic], err = pd.getInt32Array(); err != nil { + return + } + } + m.topicPartitions = populateTopicPartitions(m.Topics) + return nil +} + +func (m *StickyAssignorUserDataV0) partitions() []topicPartitionAssignment { return m.topicPartitions } +func (m *StickyAssignorUserDataV0) hasGeneration() bool { return false } +func (m *StickyAssignorUserDataV0) generation() int { return defaultGeneration } + +//StickyAssignorUserDataV1 holds topic partition information for an assignment +type StickyAssignorUserDataV1 struct { + Topics map[string][]int32 + Generation int32 + + topicPartitions []topicPartitionAssignment +} + +func (m *StickyAssignorUserDataV1) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(m.Topics)); err != nil { + return err + } + + for topic, partitions := range m.Topics { + if err := pe.putString(topic); err != nil { + return err + } + if err := pe.putInt32Array(partitions); err != nil { + return err + } + } + + pe.putInt32(m.Generation) + return nil +} + +func (m *StickyAssignorUserDataV1) decode(pd packetDecoder) (err error) { + var topicLen int + if topicLen, err = pd.getArrayLength(); err != nil { + return + } + + m.Topics = make(map[string][]int32, topicLen) + for i := 0; i < topicLen; i++ { + var topic string + if topic, err = pd.getString(); err != nil { + return + } + if m.Topics[topic], err = pd.getInt32Array(); err != nil { + return + } + } + + m.Generation, err = pd.getInt32() + if err != nil { + return err + } + m.topicPartitions = populateTopicPartitions(m.Topics) + return nil +} + +func (m *StickyAssignorUserDataV1) partitions() []topicPartitionAssignment { return m.topicPartitions } +func (m *StickyAssignorUserDataV1) hasGeneration() bool { return true } +func (m *StickyAssignorUserDataV1) generation() int { return int(m.Generation) } + +func populateTopicPartitions(topics map[string][]int32) []topicPartitionAssignment { + topicPartitions := make([]topicPartitionAssignment, 0) + for topic, partitions := range topics { + for _, partition := range partitions { + topicPartitions = append(topicPartitions, topicPartitionAssignment{Topic: topic, Partition: partition}) + } + } + return topicPartitions +} diff --git a/vendor/github.com/Shopify/sarama/sync_group_request.go b/vendor/github.com/Shopify/sarama/sync_group_request.go new file mode 100644 index 00000000000..ac6ecb13e04 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/sync_group_request.go @@ -0,0 +1,104 @@ +package sarama + +type SyncGroupRequest struct { + GroupId string + GenerationId int32 + MemberId string + GroupAssignments map[string][]byte +} + +func (r *SyncGroupRequest) encode(pe packetEncoder) error { + if err := pe.putString(r.GroupId); err != nil { + return err + } + + pe.putInt32(r.GenerationId) + + if err := pe.putString(r.MemberId); err != nil { + return err + } + + if err := pe.putArrayLength(len(r.GroupAssignments)); err != nil { + return err + } + for memberId, memberAssignment := range r.GroupAssignments { + if err := pe.putString(memberId); err != nil { + return err + } + if err := pe.putBytes(memberAssignment); err != nil { + return err + } + } + + return nil +} + +func (r *SyncGroupRequest) decode(pd packetDecoder, version int16) (err error) { + if r.GroupId, err = pd.getString(); err != nil { + return + } + if r.GenerationId, err = pd.getInt32(); err != nil { + return + } + if r.MemberId, err = pd.getString(); err != nil { + return + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + if n == 0 { + return nil + } + + r.GroupAssignments = make(map[string][]byte) + for i := 0; i < n; i++ { + memberId, err := pd.getString() + if err != nil { + return err + } + memberAssignment, err := pd.getBytes() + if err != nil { + return err + } + + r.GroupAssignments[memberId] = memberAssignment + } + + return nil +} + +func (r *SyncGroupRequest) key() int16 { + return 14 +} + +func (r *SyncGroupRequest) version() int16 { + return 0 +} + +func (r *SyncGroupRequest) headerVersion() int16 { + return 1 +} + +func (r *SyncGroupRequest) requiredVersion() KafkaVersion { + return V0_9_0_0 +} + +func (r *SyncGroupRequest) AddGroupAssignment(memberId string, memberAssignment []byte) { + if r.GroupAssignments == nil { + r.GroupAssignments = make(map[string][]byte) + } + + r.GroupAssignments[memberId] = memberAssignment +} + +func (r *SyncGroupRequest) AddGroupAssignmentMember(memberId string, memberAssignment *ConsumerGroupMemberAssignment) error { + bin, err := encode(memberAssignment, nil) + if err != nil { + return err + } + + r.AddGroupAssignment(memberId, bin) + return nil +} diff --git a/vendor/github.com/Shopify/sarama/sync_group_response.go b/vendor/github.com/Shopify/sarama/sync_group_response.go new file mode 100644 index 00000000000..af019c42f97 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/sync_group_response.go @@ -0,0 +1,45 @@ +package sarama + +type SyncGroupResponse struct { + Err KError + MemberAssignment []byte +} + +func (r *SyncGroupResponse) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) { + assignment := new(ConsumerGroupMemberAssignment) + err := decode(r.MemberAssignment, assignment) + return assignment, err +} + +func (r *SyncGroupResponse) encode(pe packetEncoder) error { + pe.putInt16(int16(r.Err)) + return pe.putBytes(r.MemberAssignment) +} + +func (r *SyncGroupResponse) decode(pd packetDecoder, version int16) (err error) { + kerr, err := pd.getInt16() + if err != nil { + return err + } + + r.Err = KError(kerr) + + r.MemberAssignment, err = pd.getBytes() + return +} + +func (r *SyncGroupResponse) key() int16 { + return 14 +} + +func (r *SyncGroupResponse) version() int16 { + return 0 +} + +func (r *SyncGroupResponse) headerVersion() int16 { + return 0 +} + +func (r *SyncGroupResponse) requiredVersion() KafkaVersion { + return V0_9_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/sync_producer.go b/vendor/github.com/Shopify/sarama/sync_producer.go new file mode 100644 index 00000000000..021c5a01032 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/sync_producer.go @@ -0,0 +1,149 @@ +package sarama + +import "sync" + +// SyncProducer publishes Kafka messages, blocking until they have been acknowledged. It routes messages to the correct +// broker, refreshing metadata as appropriate, and parses responses for errors. You must call Close() on a producer +// to avoid leaks, it may not be garbage-collected automatically when it passes out of scope. +// +// The SyncProducer comes with two caveats: it will generally be less efficient than the AsyncProducer, and the actual +// durability guarantee provided when a message is acknowledged depend on the configured value of `Producer.RequiredAcks`. +// There are configurations where a message acknowledged by the SyncProducer can still sometimes be lost. +// +// For implementation reasons, the SyncProducer requires `Producer.Return.Errors` and `Producer.Return.Successes` to +// be set to true in its configuration. +type SyncProducer interface { + + // SendMessage produces a given message, and returns only when it either has + // succeeded or failed to produce. It will return the partition and the offset + // of the produced message, or an error if the message failed to produce. + SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error) + + // SendMessages produces a given set of messages, and returns only when all + // messages in the set have either succeeded or failed. Note that messages + // can succeed and fail individually; if some succeed and some fail, + // SendMessages will return an error. + SendMessages(msgs []*ProducerMessage) error + + // Close shuts down the producer and waits for any buffered messages to be + // flushed. You must call this function before a producer object passes out of + // scope, as it may otherwise leak memory. You must call this before calling + // Close on the underlying client. + Close() error +} + +type syncProducer struct { + producer *asyncProducer + wg sync.WaitGroup +} + +// NewSyncProducer creates a new SyncProducer using the given broker addresses and configuration. +func NewSyncProducer(addrs []string, config *Config) (SyncProducer, error) { + if config == nil { + config = NewConfig() + config.Producer.Return.Successes = true + } + + if err := verifyProducerConfig(config); err != nil { + return nil, err + } + + p, err := NewAsyncProducer(addrs, config) + if err != nil { + return nil, err + } + return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil +} + +// NewSyncProducerFromClient creates a new SyncProducer using the given client. It is still +// necessary to call Close() on the underlying client when shutting down this producer. +func NewSyncProducerFromClient(client Client) (SyncProducer, error) { + if err := verifyProducerConfig(client.Config()); err != nil { + return nil, err + } + + p, err := NewAsyncProducerFromClient(client) + if err != nil { + return nil, err + } + return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil +} + +func newSyncProducerFromAsyncProducer(p *asyncProducer) *syncProducer { + sp := &syncProducer{producer: p} + + sp.wg.Add(2) + go withRecover(sp.handleSuccesses) + go withRecover(sp.handleErrors) + + return sp +} + +func verifyProducerConfig(config *Config) error { + if !config.Producer.Return.Errors { + return ConfigurationError("Producer.Return.Errors must be true to be used in a SyncProducer") + } + if !config.Producer.Return.Successes { + return ConfigurationError("Producer.Return.Successes must be true to be used in a SyncProducer") + } + return nil +} + +func (sp *syncProducer) SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error) { + expectation := make(chan *ProducerError, 1) + msg.expectation = expectation + sp.producer.Input() <- msg + + if err := <-expectation; err != nil { + return -1, -1, err.Err + } + + return msg.Partition, msg.Offset, nil +} + +func (sp *syncProducer) SendMessages(msgs []*ProducerMessage) error { + expectations := make(chan chan *ProducerError, len(msgs)) + go func() { + for _, msg := range msgs { + expectation := make(chan *ProducerError, 1) + msg.expectation = expectation + sp.producer.Input() <- msg + expectations <- expectation + } + close(expectations) + }() + + var errors ProducerErrors + for expectation := range expectations { + if err := <-expectation; err != nil { + errors = append(errors, err) + } + } + + if len(errors) > 0 { + return errors + } + return nil +} + +func (sp *syncProducer) handleSuccesses() { + defer sp.wg.Done() + for msg := range sp.producer.Successes() { + expectation := msg.expectation + expectation <- nil + } +} + +func (sp *syncProducer) handleErrors() { + defer sp.wg.Done() + for err := range sp.producer.Errors() { + expectation := err.Msg.expectation + expectation <- err + } +} + +func (sp *syncProducer) Close() error { + sp.producer.AsyncClose() + sp.wg.Wait() + return nil +} diff --git a/vendor/github.com/Shopify/sarama/timestamp.go b/vendor/github.com/Shopify/sarama/timestamp.go new file mode 100644 index 00000000000..372278d0bfa --- /dev/null +++ b/vendor/github.com/Shopify/sarama/timestamp.go @@ -0,0 +1,40 @@ +package sarama + +import ( + "fmt" + "time" +) + +type Timestamp struct { + *time.Time +} + +func (t Timestamp) encode(pe packetEncoder) error { + timestamp := int64(-1) + + if !t.Before(time.Unix(0, 0)) { + timestamp = t.UnixNano() / int64(time.Millisecond) + } else if !t.IsZero() { + return PacketEncodingError{fmt.Sprintf("invalid timestamp (%v)", t)} + } + + pe.putInt64(timestamp) + return nil +} + +func (t Timestamp) decode(pd packetDecoder) error { + millis, err := pd.getInt64() + if err != nil { + return err + } + + // negative timestamps are invalid, in these cases we should return + // a zero time + timestamp := time.Time{} + if millis >= 0 { + timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond)) + } + + *t.Time = timestamp + return nil +} diff --git a/vendor/github.com/Shopify/sarama/txn_offset_commit_request.go b/vendor/github.com/Shopify/sarama/txn_offset_commit_request.go new file mode 100644 index 00000000000..c4043a33520 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/txn_offset_commit_request.go @@ -0,0 +1,130 @@ +package sarama + +type TxnOffsetCommitRequest struct { + TransactionalID string + GroupID string + ProducerID int64 + ProducerEpoch int16 + Topics map[string][]*PartitionOffsetMetadata +} + +func (t *TxnOffsetCommitRequest) encode(pe packetEncoder) error { + if err := pe.putString(t.TransactionalID); err != nil { + return err + } + if err := pe.putString(t.GroupID); err != nil { + return err + } + pe.putInt64(t.ProducerID) + pe.putInt16(t.ProducerEpoch) + + if err := pe.putArrayLength(len(t.Topics)); err != nil { + return err + } + for topic, partitions := range t.Topics { + if err := pe.putString(topic); err != nil { + return err + } + if err := pe.putArrayLength(len(partitions)); err != nil { + return err + } + for _, partition := range partitions { + if err := partition.encode(pe); err != nil { + return err + } + } + } + + return nil +} + +func (t *TxnOffsetCommitRequest) decode(pd packetDecoder, version int16) (err error) { + if t.TransactionalID, err = pd.getString(); err != nil { + return err + } + if t.GroupID, err = pd.getString(); err != nil { + return err + } + if t.ProducerID, err = pd.getInt64(); err != nil { + return err + } + if t.ProducerEpoch, err = pd.getInt16(); err != nil { + return err + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + t.Topics = make(map[string][]*PartitionOffsetMetadata) + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + + m, err := pd.getArrayLength() + if err != nil { + return err + } + + t.Topics[topic] = make([]*PartitionOffsetMetadata, m) + + for j := 0; j < m; j++ { + partitionOffsetMetadata := new(PartitionOffsetMetadata) + if err := partitionOffsetMetadata.decode(pd, version); err != nil { + return err + } + t.Topics[topic][j] = partitionOffsetMetadata + } + } + + return nil +} + +func (a *TxnOffsetCommitRequest) key() int16 { + return 28 +} + +func (a *TxnOffsetCommitRequest) version() int16 { + return 0 +} + +func (a *TxnOffsetCommitRequest) headerVersion() int16 { + return 1 +} + +func (a *TxnOffsetCommitRequest) requiredVersion() KafkaVersion { + return V0_11_0_0 +} + +type PartitionOffsetMetadata struct { + Partition int32 + Offset int64 + Metadata *string +} + +func (p *PartitionOffsetMetadata) encode(pe packetEncoder) error { + pe.putInt32(p.Partition) + pe.putInt64(p.Offset) + if err := pe.putNullableString(p.Metadata); err != nil { + return err + } + + return nil +} + +func (p *PartitionOffsetMetadata) decode(pd packetDecoder, version int16) (err error) { + if p.Partition, err = pd.getInt32(); err != nil { + return err + } + if p.Offset, err = pd.getInt64(); err != nil { + return err + } + if p.Metadata, err = pd.getNullableString(); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/txn_offset_commit_response.go b/vendor/github.com/Shopify/sarama/txn_offset_commit_response.go new file mode 100644 index 00000000000..94d8029dace --- /dev/null +++ b/vendor/github.com/Shopify/sarama/txn_offset_commit_response.go @@ -0,0 +1,87 @@ +package sarama + +import ( + "time" +) + +type TxnOffsetCommitResponse struct { + ThrottleTime time.Duration + Topics map[string][]*PartitionError +} + +func (t *TxnOffsetCommitResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(t.ThrottleTime / time.Millisecond)) + if err := pe.putArrayLength(len(t.Topics)); err != nil { + return err + } + + for topic, e := range t.Topics { + if err := pe.putString(topic); err != nil { + return err + } + if err := pe.putArrayLength(len(e)); err != nil { + return err + } + for _, partitionError := range e { + if err := partitionError.encode(pe); err != nil { + return err + } + } + } + + return nil +} + +func (t *TxnOffsetCommitResponse) decode(pd packetDecoder, version int16) (err error) { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + t.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + t.Topics = make(map[string][]*PartitionError) + + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + + m, err := pd.getArrayLength() + if err != nil { + return err + } + + t.Topics[topic] = make([]*PartitionError, m) + + for j := 0; j < m; j++ { + t.Topics[topic][j] = new(PartitionError) + if err := t.Topics[topic][j].decode(pd, version); err != nil { + return err + } + } + } + + return nil +} + +func (a *TxnOffsetCommitResponse) key() int16 { + return 28 +} + +func (a *TxnOffsetCommitResponse) version() int16 { + return 0 +} + +func (a *TxnOffsetCommitResponse) headerVersion() int16 { + return 0 +} + +func (a *TxnOffsetCommitResponse) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/utils.go b/vendor/github.com/Shopify/sarama/utils.go new file mode 100644 index 00000000000..3e9dfd7add7 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/utils.go @@ -0,0 +1,230 @@ +package sarama + +import ( + "bufio" + "fmt" + "net" + "regexp" +) + +type none struct{} + +// make []int32 sortable so we can sort partition numbers +type int32Slice []int32 + +func (slice int32Slice) Len() int { + return len(slice) +} + +func (slice int32Slice) Less(i, j int) bool { + return slice[i] < slice[j] +} + +func (slice int32Slice) Swap(i, j int) { + slice[i], slice[j] = slice[j], slice[i] +} + +func dupInt32Slice(input []int32) []int32 { + ret := make([]int32, 0, len(input)) + ret = append(ret, input...) + return ret +} + +func withRecover(fn func()) { + defer func() { + handler := PanicHandler + if handler != nil { + if err := recover(); err != nil { + handler(err) + } + } + }() + + fn() +} + +func safeAsyncClose(b *Broker) { + tmp := b // local var prevents clobbering in goroutine + go withRecover(func() { + if connected, _ := tmp.Connected(); connected { + if err := tmp.Close(); err != nil { + Logger.Println("Error closing broker", tmp.ID(), ":", err) + } + } + }) +} + +// Encoder is a simple interface for any type that can be encoded as an array of bytes +// in order to be sent as the key or value of a Kafka message. Length() is provided as an +// optimization, and must return the same as len() on the result of Encode(). +type Encoder interface { + Encode() ([]byte, error) + Length() int +} + +// make strings and byte slices encodable for convenience so they can be used as keys +// and/or values in kafka messages + +// StringEncoder implements the Encoder interface for Go strings so that they can be used +// as the Key or Value in a ProducerMessage. +type StringEncoder string + +func (s StringEncoder) Encode() ([]byte, error) { + return []byte(s), nil +} + +func (s StringEncoder) Length() int { + return len(s) +} + +// ByteEncoder implements the Encoder interface for Go byte slices so that they can be used +// as the Key or Value in a ProducerMessage. +type ByteEncoder []byte + +func (b ByteEncoder) Encode() ([]byte, error) { + return b, nil +} + +func (b ByteEncoder) Length() int { + return len(b) +} + +// bufConn wraps a net.Conn with a buffer for reads to reduce the number of +// reads that trigger syscalls. +type bufConn struct { + net.Conn + buf *bufio.Reader +} + +func newBufConn(conn net.Conn) *bufConn { + return &bufConn{ + Conn: conn, + buf: bufio.NewReader(conn), + } +} + +func (bc *bufConn) Read(b []byte) (n int, err error) { + return bc.buf.Read(b) +} + +// KafkaVersion instances represent versions of the upstream Kafka broker. +type KafkaVersion struct { + // it's a struct rather than just typing the array directly to make it opaque and stop people + // generating their own arbitrary versions + version [4]uint +} + +func newKafkaVersion(major, minor, veryMinor, patch uint) KafkaVersion { + return KafkaVersion{ + version: [4]uint{major, minor, veryMinor, patch}, + } +} + +// IsAtLeast return true if and only if the version it is called on is +// greater than or equal to the version passed in: +// V1.IsAtLeast(V2) // false +// V2.IsAtLeast(V1) // true +func (v KafkaVersion) IsAtLeast(other KafkaVersion) bool { + for i := range v.version { + if v.version[i] > other.version[i] { + return true + } else if v.version[i] < other.version[i] { + return false + } + } + return true +} + +// Effective constants defining the supported kafka versions. +var ( + V0_8_2_0 = newKafkaVersion(0, 8, 2, 0) + V0_8_2_1 = newKafkaVersion(0, 8, 2, 1) + V0_8_2_2 = newKafkaVersion(0, 8, 2, 2) + V0_9_0_0 = newKafkaVersion(0, 9, 0, 0) + V0_9_0_1 = newKafkaVersion(0, 9, 0, 1) + V0_10_0_0 = newKafkaVersion(0, 10, 0, 0) + V0_10_0_1 = newKafkaVersion(0, 10, 0, 1) + V0_10_1_0 = newKafkaVersion(0, 10, 1, 0) + V0_10_1_1 = newKafkaVersion(0, 10, 1, 1) + V0_10_2_0 = newKafkaVersion(0, 10, 2, 0) + V0_10_2_1 = newKafkaVersion(0, 10, 2, 1) + V0_11_0_0 = newKafkaVersion(0, 11, 0, 0) + V0_11_0_1 = newKafkaVersion(0, 11, 0, 1) + V0_11_0_2 = newKafkaVersion(0, 11, 0, 2) + V1_0_0_0 = newKafkaVersion(1, 0, 0, 0) + V1_1_0_0 = newKafkaVersion(1, 1, 0, 0) + V1_1_1_0 = newKafkaVersion(1, 1, 1, 0) + V2_0_0_0 = newKafkaVersion(2, 0, 0, 0) + V2_0_1_0 = newKafkaVersion(2, 0, 1, 0) + V2_1_0_0 = newKafkaVersion(2, 1, 0, 0) + V2_2_0_0 = newKafkaVersion(2, 2, 0, 0) + V2_3_0_0 = newKafkaVersion(2, 3, 0, 0) + V2_4_0_0 = newKafkaVersion(2, 4, 0, 0) + V2_5_0_0 = newKafkaVersion(2, 5, 0, 0) + V2_6_0_0 = newKafkaVersion(2, 6, 0, 0) + + SupportedVersions = []KafkaVersion{ + V0_8_2_0, + V0_8_2_1, + V0_8_2_2, + V0_9_0_0, + V0_9_0_1, + V0_10_0_0, + V0_10_0_1, + V0_10_1_0, + V0_10_1_1, + V0_10_2_0, + V0_10_2_1, + V0_11_0_0, + V0_11_0_1, + V0_11_0_2, + V1_0_0_0, + V1_1_0_0, + V1_1_1_0, + V2_0_0_0, + V2_0_1_0, + V2_1_0_0, + V2_2_0_0, + V2_3_0_0, + V2_4_0_0, + V2_5_0_0, + V2_6_0_0, + } + MinVersion = V0_8_2_0 + MaxVersion = V2_6_0_0 + DefaultVersion = V1_0_0_0 +) + +//ParseKafkaVersion parses and returns kafka version or error from a string +func ParseKafkaVersion(s string) (KafkaVersion, error) { + if len(s) < 5 { + return DefaultVersion, fmt.Errorf("invalid version `%s`", s) + } + var major, minor, veryMinor, patch uint + var err error + if s[0] == '0' { + err = scanKafkaVersion(s, `^0\.\d+\.\d+\.\d+$`, "0.%d.%d.%d", [3]*uint{&minor, &veryMinor, &patch}) + } else { + err = scanKafkaVersion(s, `^\d+\.\d+\.\d+$`, "%d.%d.%d", [3]*uint{&major, &minor, &veryMinor}) + } + if err != nil { + return DefaultVersion, err + } + return newKafkaVersion(major, minor, veryMinor, patch), nil +} + +func scanKafkaVersion(s string, pattern string, format string, v [3]*uint) error { + if !regexp.MustCompile(pattern).MatchString(s) { + return fmt.Errorf("invalid version `%s`", s) + } + _, err := fmt.Sscanf(s, format, v[0], v[1], v[2]) + return err +} + +func (v KafkaVersion) String() string { + if v.version[0] == 0 { + return fmt.Sprintf("0.%d.%d.%d", v.version[1], v.version[2], v.version[3]) + } + + return fmt.Sprintf("%d.%d.%d", v.version[0], v.version[1], v.version[2]) +} diff --git a/vendor/github.com/Shopify/sarama/zstd.go b/vendor/github.com/Shopify/sarama/zstd.go new file mode 100644 index 00000000000..7c9951acc8a --- /dev/null +++ b/vendor/github.com/Shopify/sarama/zstd.go @@ -0,0 +1,28 @@ +package sarama + +import ( + "sync" + + "github.com/klauspost/compress/zstd" +) + +var ( + zstdDec *zstd.Decoder + zstdEnc *zstd.Encoder + + zstdEncOnce, zstdDecOnce sync.Once +) + +func zstdDecompress(dst, src []byte) ([]byte, error) { + zstdDecOnce.Do(func() { + zstdDec, _ = zstd.NewReader(nil) + }) + return zstdDec.DecodeAll(src, dst) +} + +func zstdCompress(dst, src []byte) ([]byte, error) { + zstdEncOnce.Do(func() { + zstdEnc, _ = zstd.NewWriter(nil, zstd.WithZeroFrames(true)) + }) + return zstdEnc.EncodeAll(src, dst), nil +} diff --git a/vendor/github.com/apache/openwhisk-client-go/LICENSE.txt b/vendor/github.com/apache/openwhisk-client-go/LICENSE.txt new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/apache/openwhisk-client-go/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/apache/openwhisk-client-go/NOTICE.txt b/vendor/github.com/apache/openwhisk-client-go/NOTICE.txt new file mode 100644 index 00000000000..9b24b2e75f9 --- /dev/null +++ b/vendor/github.com/apache/openwhisk-client-go/NOTICE.txt @@ -0,0 +1,5 @@ +Apache OpenWhisk Client Go +Copyright 2016-2019 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). diff --git a/vendor/github.com/apache/openwhisk-client-go/whisk/action.go b/vendor/github.com/apache/openwhisk-client-go/whisk/action.go new file mode 100644 index 00000000000..9935871d6cf --- /dev/null +++ b/vendor/github.com/apache/openwhisk-client-go/whisk/action.go @@ -0,0 +1,315 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package whisk + +import ( + "errors" + "fmt" + "github.com/apache/openwhisk-client-go/wski18n" + "net/http" + "net/url" + "strings" +) + +type ActionService struct { + client ClientInterface +} + +type Action struct { + Namespace string `json:"namespace,omitempty"` + Name string `json:"name,omitempty"` + Version string `json:"version,omitempty"` + Exec *Exec `json:"exec,omitempty"` + Annotations KeyValueArr `json:"annotations,omitempty"` + Parameters KeyValueArr `json:"parameters,omitempty"` + Limits *Limits `json:"limits,omitempty"` + Error string `json:"error,omitempty"` + Code int `json:"code,omitempty"` + Publish *bool `json:"publish,omitempty"` + Updated int64 `json:"updated,omitempty"` +} + +type Exec struct { + Kind string `json:"kind,omitempty"` + Code *string `json:"code,omitempty"` + Image string `json:"image,omitempty"` + Init string `json:"init,omitempty"` + Main string `json:"main,omitempty"` + Components []string `json:"components,omitempty"` // List of fully qualified actions + Binary *bool `json:"binary,omitempty"` +} + +type ActionListOptions struct { + Limit int `url:"limit"` + Skip int `url:"skip"` + Docs bool `url:"docs,omitempty"` +} + +// Compare(sortable) compares action to sortable for the purpose of sorting. +// REQUIRED: sortable must also be of type Action. +// ***Method of type Sortable*** +func (action Action) Compare(sortable Sortable) bool { + // Sorts alphabetically by NAMESPACE -> PACKAGE_NAME -> ACTION_NAME, with + // actions under default package at the top. + var actionString string + var compareString string + actionToCompare := sortable.(Action) + + actionString = strings.ToLower(fmt.Sprintf("%s%s", action.Namespace, action.Name)) + compareString = strings.ToLower(fmt.Sprintf("%s%s", actionToCompare.Namespace, + actionToCompare.Name)) + if strings.Contains(action.Namespace, "/") && !strings.Contains(actionToCompare.Namespace, "/") { + return false + } else if !strings.Contains(action.Namespace, "/") && strings.Contains(actionToCompare.Namespace, "/") { + return true + } else if strings.Contains(action.Namespace, "/") && strings.Contains(actionToCompare.Namespace, "/") { + return actionString < compareString + } else { + return action.Name < actionToCompare.Name + } +} + +// ToHeaderString() returns the header for a list of actions +func (action Action) ToHeaderString() string { + return fmt.Sprintf("%s\n", "actions") +} + +// ToSummaryRowString() returns a compound string of required parameters for printing +// from CLI command `wsk action list`. +// ***Method of type Sortable*** +func (action Action) ToSummaryRowString() string { + var kind string + publishState := wski18n.T("private") + + for i := range action.Annotations { + if action.Annotations[i].Key == "exec" { + kind = action.Annotations[i].Value.(string) + break + } + } + return fmt.Sprintf("%-70s %s %s\n", fmt.Sprintf("/%s/%s", action.Namespace, action.Name), publishState, kind) +} + +/* +Determines if an action is a web action by examining the action's annotations. A value of true is returned if the +action's annotations contains a "web-export" key and its associated value is a boolean value of "true". Otherwise, false +is returned. +*/ +func (action Action) WebAction() (webExportValue bool) { + webExport := action.Annotations.GetValue("web-export") + webExportValue, _ = webExport.(bool) + + Debug(DbgInfo, "Web export value is '%t'\n", webExportValue) + + return webExportValue +} + +/* +Returns the URL of an action as a string. A valid API host, path and version must be passed. A package that contains the +action must be passed as well. An empty string must be passed if the action is not packaged. +*/ +func (action Action) ActionURL(apiHost string, apiPath string, apiVersion string, pkg string) (string, error) { + baseURL, err := GetURLBase(apiHost, apiPath) + if err != nil { + Debug(DbgError, "GetURLBase(%s, %s) failed: %s\n", apiHost, apiPath, err) + return "", err + } + webActionPath := "%s/%s/web/%s/%s/%s" + actionPath := "%s/%s/namespaces/%s/actions/%s" + packagedActionPath := actionPath + "/%s" + namespace := strings.Split(action.Namespace, "/")[0] + namespace = strings.Replace(url.QueryEscape(namespace), "+", "%20", -1) + name := strings.Replace(url.QueryEscape(action.Name), "+", "%20", -1) + pkg = strings.Replace(url.QueryEscape(pkg), "+", "%20", -1) + + var actionURL string + if action.WebAction() { + if len(pkg) == 0 { + pkg = "default" + } + + actionURL = fmt.Sprintf(webActionPath, baseURL, apiVersion, namespace, pkg, name) + Debug(DbgInfo, "Web action URL: %s\n", actionURL) + } else { + if len(pkg) == 0 { + actionURL = fmt.Sprintf(actionPath, baseURL, apiVersion, namespace, name) + Debug(DbgInfo, "Packaged action URL: %s\n", actionURL) + } else { + actionURL = fmt.Sprintf(packagedActionPath, baseURL, apiVersion, namespace, pkg, name) + Debug(DbgInfo, "Action URL: %s\n", actionURL) + } + } + + return actionURL, nil +} + +//////////////////// +// Action Methods // +//////////////////// + +func (s *ActionService) List(packageName string, options *ActionListOptions) ([]Action, *http.Response, error) { + var route string + var actions []Action + + if len(packageName) > 0 { + // Encode resource name as a path (with no query params) before inserting it into the URI + // This way any '?' chars in the name won't be treated as the beginning of the query params + packageName = (&url.URL{Path: packageName}).String() + route = fmt.Sprintf("actions/%s/", packageName) + } else { + route = fmt.Sprintf("actions") + } + + routeUrl, err := addRouteOptions(route, options) + if err != nil { + Debug(DbgError, "addRouteOptions(%s, %#v) error: '%s'\n", route, options, err) + errMsg := wski18n.T("Unable to add route options '{{.options}}'", + map[string]interface{}{"options": options}) + whiskErr := MakeWskErrorFromWskError(errors.New(errMsg), err, EXIT_CODE_ERR_GENERAL, DISPLAY_MSG, + NO_DISPLAY_USAGE) + return nil, nil, whiskErr + } + Debug(DbgError, "Action list route with options: %s\n", route) + + req, err := s.client.NewRequestUrl("GET", routeUrl, nil, IncludeNamespaceInUrl, AppendOpenWhiskPathPrefix, EncodeBodyAsJson, AuthRequired) + if err != nil { + Debug(DbgError, "http.NewRequestUrl(GET, %s, nil, IncludeNamespaceInUrl, AppendOpenWhiskPathPrefix, EncodeBodyAsJson, AuthRequired) error: '%s'\n", routeUrl, err) + errMsg := wski18n.T("Unable to create HTTP request for GET '{{.route}}': {{.err}}", + map[string]interface{}{"route": routeUrl, "err": err}) + whiskErr := MakeWskErrorFromWskError(errors.New(errMsg), err, EXIT_CODE_ERR_NETWORK, DISPLAY_MSG, + NO_DISPLAY_USAGE) + return nil, nil, whiskErr + } + + resp, err := s.client.Do(req, &actions, ExitWithSuccessOnTimeout) + if err != nil { + Debug(DbgError, "s.client.Do() error - HTTP req %s; error '%s'\n", req.URL.String(), err) + return nil, resp, err + } + + return actions, resp, err +} + +func (s *ActionService) Insert(action *Action, overwrite bool) (*Action, *http.Response, error) { + // Encode resource name as a path (with no query params) before inserting it into the URI + // This way any '?' chars in the name won't be treated as the beginning of the query params + actionName := (&url.URL{Path: action.Name}).String() + route := fmt.Sprintf("actions/%s?overwrite=%t", actionName, overwrite) + Debug(DbgInfo, "Action insert route: %s\n", route) + + req, err := s.client.NewRequest("PUT", route, action, IncludeNamespaceInUrl) + if err != nil { + Debug(DbgError, "http.NewRequest(PUT, %s, %#v) error: '%s'\n", route, action, err) + errMsg := wski18n.T("Unable to create HTTP request for PUT '{{.route}}': {{.err}}", + map[string]interface{}{"route": route, "err": err}) + whiskErr := MakeWskErrorFromWskError(errors.New(errMsg), err, EXIT_CODE_ERR_NETWORK, DISPLAY_MSG, + NO_DISPLAY_USAGE) + return nil, nil, whiskErr + } + + a := new(Action) + resp, err := s.client.Do(req, &a, ExitWithSuccessOnTimeout) + if err != nil { + Debug(DbgError, "s.client.Do() error - HTTP req %s; error '%s'\n", req.URL.String(), err) + return nil, resp, err + } + + return a, resp, nil +} + +func (s *ActionService) Get(actionName string, fetchCode bool) (*Action, *http.Response, error) { + // Encode resource name as a path (with no query params) before inserting it into the URI + // This way any '?' chars in the name won't be treated as the beginning of the query params + actionName = (&url.URL{Path: actionName}).String() + route := fmt.Sprintf("actions/%s?code=%t", actionName, fetchCode) + + req, err := s.client.NewRequest("GET", route, nil, IncludeNamespaceInUrl) + if err != nil { + Debug(DbgError, "http.NewRequest(GET, %s, nil) error: '%s'\n", route, err) + errMsg := wski18n.T("Unable to create HTTP request for GET '{{.route}}': {{.err}}", + map[string]interface{}{"route": route, "err": err}) + whiskErr := MakeWskErrorFromWskError(errors.New(errMsg), err, EXIT_CODE_ERR_NETWORK, DISPLAY_MSG, + NO_DISPLAY_USAGE) + return nil, nil, whiskErr + } + + a := new(Action) + resp, err := s.client.Do(req, &a, ExitWithSuccessOnTimeout) + if err != nil { + Debug(DbgError, "s.client.Do() error - HTTP req %s; error '%s'\n", req.URL.String(), err) + return nil, resp, err + } + + return a, resp, nil +} + +func (s *ActionService) Delete(actionName string) (*http.Response, error) { + // Encode resource name as a path (with no query params) before inserting it into the URI + // This way any '?' chars in the name won't be treated as the beginning of the query params + actionName = (&url.URL{Path: actionName}).String() + route := fmt.Sprintf("actions/%s", actionName) + Debug(DbgInfo, "HTTP route: %s\n", route) + + req, err := s.client.NewRequest("DELETE", route, nil, IncludeNamespaceInUrl) + if err != nil { + Debug(DbgError, "http.NewRequest(DELETE, %s, nil) error: '%s'\n", route, err) + errMsg := wski18n.T("Unable to create HTTP request for DELETE '{{.route}}': {{.err}}", + map[string]interface{}{"route": route, "err": err}) + whiskErr := MakeWskErrorFromWskError(errors.New(errMsg), err, EXIT_CODE_ERR_NETWORK, DISPLAY_MSG, + NO_DISPLAY_USAGE) + return nil, whiskErr + } + + a := new(Action) + resp, err := s.client.Do(req, a, ExitWithSuccessOnTimeout) + if err != nil { + Debug(DbgError, "s.client.Do() error - HTTP req %s; error '%s'\n", req.URL.String(), err) + return resp, err + } + + return resp, nil +} + +func (s *ActionService) Invoke(actionName string, payload interface{}, blocking bool, result bool) (map[string]interface{}, *http.Response, error) { + var res map[string]interface{} + + // Encode resource name as a path (with no query params) before inserting it into the URI + // This way any '?' chars in the name won't be treated as the beginning of the query params + actionName = (&url.URL{Path: actionName}).String() + route := fmt.Sprintf("actions/%s?blocking=%t&result=%t", actionName, blocking, result) + Debug(DbgInfo, "HTTP route: %s\n", route) + + req, err := s.client.NewRequest("POST", route, payload, IncludeNamespaceInUrl) + if err != nil { + Debug(DbgError, "http.NewRequest(POST, %s, %#v) error: '%s'\n", route, payload, err) + errMsg := wski18n.T("Unable to create HTTP request for POST '{{.route}}': {{.err}}", + map[string]interface{}{"route": route, "err": err}) + whiskErr := MakeWskErrorFromWskError(errors.New(errMsg), err, EXIT_CODE_ERR_NETWORK, DISPLAY_MSG, + NO_DISPLAY_USAGE) + return nil, nil, whiskErr + } + + resp, err := s.client.Do(req, &res, blocking) + + if err != nil { + Debug(DbgError, "s.client.Do() error - HTTP req %s; error '%s'\n", req.URL.String(), err) + return res, resp, err + } + + return res, resp, nil +} diff --git a/vendor/github.com/apache/openwhisk-client-go/whisk/activation.go b/vendor/github.com/apache/openwhisk-client-go/whisk/activation.go new file mode 100644 index 00000000000..0204dbaea2c --- /dev/null +++ b/vendor/github.com/apache/openwhisk-client-go/whisk/activation.go @@ -0,0 +1,278 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package whisk + +import ( + "errors" + "fmt" + "github.com/apache/openwhisk-client-go/wski18n" + "net/http" + "net/url" + "strconv" + "time" +) + +type ActivationService struct { + client *Client +} + +type Activation struct { + Namespace string `json:"namespace"` + Name string `json:"name"` + Version string `json:"version"` + Subject string `json:"subject"` + ActivationID string `json:"activationId"` + Cause string `json:"cause,omitempty"` + Start int64 `json:"start"` // When action started (in milliseconds since January 1, 1970 UTC) + End int64 `json:"end"` // Since a 0 is a valid value from server, don't omit + Duration int64 `json:"duration"` // Only available for actions + StatusCode int `json:"statusCode"` + Response `json:"response"` + Logs []string `json:"logs"` + Annotations KeyValueArr `json:"annotations"` + Publish *bool `json:"publish,omitempty"` +} + +type ActivationFilteredRow struct { + Row Activation + HeaderFmt string + RowFmt string +} + +type Response struct { + Status string `json:"status"` + StatusCode int `json:"statusCode"` + Success bool `json:"success"` + Result *Result `json:"result,omitempty"` +} + +type Result map[string]interface{} + +type ActivationListOptions struct { + Name string `url:"name,omitempty"` + Limit int `url:"limit"` + Skip int `url:"skip"` + Since int64 `url:"since,omitempty"` + Upto int64 `url:"upto,omitempty"` + Docs bool `url:"docs,omitempty"` +} + +//MWD - This structure may no longer be needed as the log format is now a string and not JSON +type Log struct { + Log string `json:"log,omitempty"` + Stream string `json:"stream,omitempty"` + Time string `json:"time,omitempty"` +} + +// Status codes to descriptions +var StatusCodes = []string{"success", "application error", "developer error", "internal error"} + +// Compare(sortable) compares activation to sortable for the purpose of sorting. +// REQUIRED: sortable must also be of type Activation. +// ***Method of type Sortable*** +// ***Currently, no method of sorting defined*** +func (activation Activation) Compare(sortable Sortable) bool { + return true +} + +// Compare(sortable) compares activation to sortable for the purpose of sorting. +// REQUIRED: sortable must also be of type Activation. +// ***Method of type Sortable*** +// ***Currently, no method of sorting defined*** +func (activation ActivationFilteredRow) Compare(sortable Sortable) bool { + return true +} + +// ToHeaderString() returns the header for a list of activations +func (activation ActivationFilteredRow) ToHeaderString() string { + return fmt.Sprintf(activation.HeaderFmt, "Datetime", "Activation ID", "Kind", "Start", "Duration", "Status", "Entity") +} + +// TruncateStr() returns the string, truncated with ...in the middle if it exceeds the specified length +func TruncateStr(str string, maxlen int) string { + if len(str) <= maxlen { + return str + } else { + mid := maxlen / 2 + upp := len(str) - mid + 3 + if maxlen%2 != 0 { + mid++ + } + return str[0:mid] + "..." + str[upp:] + } +} + +// ToSummaryRowString() returns a compound string of required parameters for printing +// from CLI command `wsk activation list`. +// ***Method of type Sortable*** +func (activation ActivationFilteredRow) ToSummaryRowString() string { + s := time.Unix(0, activation.Row.Start*1000000) + e := time.Unix(0, activation.Row.End*1000000) + + var duration = e.Sub(s) + var kind interface{} = activation.Row.Annotations.GetValue("kind") + var initTime interface{} = activation.Row.Annotations.GetValue("initTime") + var status = StatusCodes[0] // assume success + var start = "warm" // assume warm + var fqn = TruncateStr(activation.Row.Namespace, 20) + "/" + TruncateStr(activation.Row.Name, 30) + ":" + TruncateStr(activation.Row.Version, 20) + + if activation.Row.Duration == 0 { + duration = s.Sub(s) + } + if kind == nil { + kind = "unknown" + } + if activation.Row.StatusCode > 0 && activation.Row.StatusCode < len(StatusCodes) { + status = StatusCodes[activation.Row.StatusCode] + } + if initTime != nil { + start = "cold" + } + + return fmt.Sprintf( + activation.RowFmt+strconv.Itoa(len(fqn))+"s\n", + s.Year(), s.Month(), s.Day(), s.Hour(), s.Minute(), s.Second(), + activation.Row.ActivationID, + kind.(string), + start, + duration, + status, + fqn) +} + +func (s *ActivationService) List(options *ActivationListOptions) ([]Activation, *http.Response, error) { + // TODO :: for some reason /activations only works with "_" as namespace + s.client.Namespace = "_" + route := "activations" + routeUrl, err := addRouteOptions(route, options) + if err != nil { + Debug(DbgError, "addRouteOptions(%s, %#v) error: '%s'\n", route, options, err) + errStr := wski18n.T("Unable to append options '{{.options}}' to URL route '{{.route}}': {{.err}}", + map[string]interface{}{"options": fmt.Sprintf("%#v", options), "route": route, "err": err}) + werr := MakeWskErrorFromWskError(errors.New(errStr), err, EXIT_CODE_ERR_GENERAL, DISPLAY_MSG, NO_DISPLAY_USAGE) + return nil, nil, werr + } + + req, err := s.client.NewRequestUrl("GET", routeUrl, nil, IncludeNamespaceInUrl, AppendOpenWhiskPathPrefix, EncodeBodyAsJson, AuthRequired) + if err != nil { + Debug(DbgError, "http.NewRequestUrl(GET, %s, nil, IncludeNamespaceInUrl, AppendOpenWhiskPathPrefix, EncodeBodyAsJson, AuthRequired) error: '%s'\n", route, err) + errStr := wski18n.T("Unable to create HTTP request for GET '{{.route}}': {{.err}}", + map[string]interface{}{"route": route, "err": err}) + werr := MakeWskErrorFromWskError(errors.New(errStr), err, EXIT_CODE_ERR_GENERAL, DISPLAY_MSG, NO_DISPLAY_USAGE) + return nil, nil, werr + } + + Debug(DbgInfo, "Sending HTTP request - URL '%s'; req %#v\n", req.URL.String(), req) + + var activations []Activation + resp, err := s.client.Do(req, &activations, ExitWithSuccessOnTimeout) + if err != nil { + Debug(DbgError, "s.client.Do() error - HTTP req %s; error '%s'\n", req.URL.String(), err) + return nil, resp, err + } + + return activations, resp, nil +} + +func (s *ActivationService) Get(activationID string) (*Activation, *http.Response, error) { + // TODO :: for some reason /activations/:id only works with "_" as namespace + s.client.Namespace = "_" + + // Encode resource name as a path (with no query params) before inserting it into the URI + // This way any '?' chars in the name won't be treated as the beginning of the query params + activationID = (&url.URL{Path: activationID}).String() + route := fmt.Sprintf("activations/%s", activationID) + + req, err := s.client.NewRequest("GET", route, nil, IncludeNamespaceInUrl) + if err != nil { + Debug(DbgError, "http.NewRequest(GET, %s) error: '%s'\n", route, err) + errStr := wski18n.T("Unable to create HTTP request for GET '{{.route}}': {{.err}}", + map[string]interface{}{"route": route, "err": err}) + werr := MakeWskErrorFromWskError(errors.New(errStr), err, EXIT_CODE_ERR_GENERAL, DISPLAY_MSG, NO_DISPLAY_USAGE) + return nil, nil, werr + } + + Debug(DbgInfo, "Sending HTTP request - URL '%s'; req %#v\n", req.URL.String(), req) + + a := new(Activation) + resp, err := s.client.Do(req, &a, ExitWithSuccessOnTimeout) + if err != nil { + Debug(DbgError, "s.client.Do() error - HTTP req %s; error '%s'\n", req.URL.String(), err) + return nil, resp, err + } + + return a, resp, nil +} + +func (s *ActivationService) Logs(activationID string) (*Activation, *http.Response, error) { + // TODO :: for some reason /activations/:id/logs only works with "_" as namespace + s.client.Namespace = "_" + // Encode resource name as a path (with no query params) before inserting it into the URI + // This way any '?' chars in the name won't be treated as the beginning of the query params + activationID = (&url.URL{Path: activationID}).String() + route := fmt.Sprintf("activations/%s/logs", activationID) + + req, err := s.client.NewRequest("GET", route, nil, IncludeNamespaceInUrl) + if err != nil { + Debug(DbgError, "http.NewRequest(GET, %s) error: '%s'\n", route, err) + errStr := wski18n.T("Unable to create HTTP request for GET '{{.route}}': {{.err}}", + map[string]interface{}{"route": route, "err": err}) + werr := MakeWskErrorFromWskError(errors.New(errStr), err, EXIT_CODE_ERR_GENERAL, DISPLAY_MSG, NO_DISPLAY_USAGE) + return nil, nil, werr + } + + Debug(DbgInfo, "Sending HTTP request - URL '%s'; req %#v\n", req.URL.String(), req) + + activation := new(Activation) + resp, err := s.client.Do(req, &activation, ExitWithSuccessOnTimeout) + if err != nil { + Debug(DbgError, "s.client.Do() error - HTTP req %s; error '%s'\n", req.URL.String(), err) + return nil, resp, err + } + + return activation, resp, nil +} + +func (s *ActivationService) Result(activationID string) (*Response, *http.Response, error) { + // TODO :: for some reason /activations only works with "_" as namespace + s.client.Namespace = "_" + // Encode resource name as a path (with no query params) before inserting it into the URI + // This way any '?' chars in the name won't be treated as the beginning of the query params + activationID = (&url.URL{Path: activationID}).String() + route := fmt.Sprintf("activations/%s/result", activationID) + + req, err := s.client.NewRequest("GET", route, nil, IncludeNamespaceInUrl) + if err != nil { + Debug(DbgError, "http.NewRequest(GET, %s) error: '%s'\n", route, err) + errStr := wski18n.T("Unable to create HTTP request for GET '{{.route}}': {{.err}}", + map[string]interface{}{"route": route, "err": err}) + werr := MakeWskErrorFromWskError(errors.New(errStr), err, EXIT_CODE_ERR_GENERAL, DISPLAY_MSG, NO_DISPLAY_USAGE) + return nil, nil, werr + } + + Debug(DbgInfo, "Sending HTTP request - URL '%s'; req %#v\n", req.URL.String(), req) + + r := new(Response) + resp, err := s.client.Do(req, &r, ExitWithSuccessOnTimeout) + if err != nil { + Debug(DbgError, "s.client.Do() error - HTTP req %s; error '%s'\n", req.URL.String(), err) + return nil, resp, err + } + + return r, resp, nil +} diff --git a/vendor/github.com/apache/openwhisk-client-go/whisk/api.go b/vendor/github.com/apache/openwhisk-client-go/whisk/api.go new file mode 100644 index 00000000000..51ed0d58f22 --- /dev/null +++ b/vendor/github.com/apache/openwhisk-client-go/whisk/api.go @@ -0,0 +1,548 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package whisk + +import ( + "errors" + "fmt" + "github.com/apache/openwhisk-client-go/wski18n" + "net/http" + "strings" +) + +type ApiService struct { + client *Client +} + +// wsk api create : Request, Response +type ApiCreateRequest struct { + ApiDoc *Api `json:"apidoc,omitempty"` +} +type ApiCreateRequestOptions ApiOptions +type ApiCreateResponse RetApi + +// wsk api list : Request, Response +type ApiListRequest struct { +} +type ApiListRequestOptions struct { + ApiOptions + Limit int `url:"limit"` + Skip int `url:"skip"` + Docs bool `url:"docs,omitempty"` +} +type ApiListResponse RetApiArray + +// wsk api get : Request, Response +type ApiGetRequest struct { + Api +} +type ApiGetRequestOptions ApiOptions +type ApiGetResponse RetApiArray + +// wsk api delete : Request, Response +type ApiDeleteRequest struct { + Api +} +type ApiDeleteRequestOptions ApiOptions +type ApiDeleteResponse struct{} + +type Api struct { + Namespace string `json:"namespace,omitempty"` + ApiName string `json:"apiName,omitempty"` + GatewayBasePath string `json:"gatewayBasePath,omitempty"` + GatewayRelPath string `json:"gatewayPath,omitempty"` + GatewayMethod string `json:"gatewayMethod,omitempty"` + Id string `json:"id,omitempty"` + GatewayFullPath string `json:"gatewayFullPath,omitempty"` + Swagger string `json:"swagger,omitempty"` + Action *ApiAction `json:"action,omitempty"` + PathParameters []ApiParameter `json:"pathParameters,omitempty"` +} + +type ApiParameter struct { + Name string `json:"name"` + In string `json:"in"` + Description string `json:"description,omitempty"` + Required bool `json:"required,omitempty"` + Type string `json:"type,omitempty"` + Format string `json:"format,omitempty"` + AllowEmptyValue bool `json:"allowEmptyValue,omitempty"` + Items map[string]interface{} `json:"items,omitempty"` + CollectionFormat string `json:"collectionFormat,omitempty"` + Default interface{} `json:"default,omitempty"` + Maximum int `json:"maximum,omitempty"` + ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty"` + Minimum int `json:"minimum,omitempty"` + ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty"` + MaxLength int `json:"maxLength,omitempty"` + MinLength int `json:"minLength,omitempty"` + Pattern string `json:"pattern,omitempty"` + MaxItems int `json:"maxItems,omitempty"` + MinItems int `json:"minItems,omitempty"` + UniqueItems bool `json:"uniqueItems,omitempty"` + MultipleOf int `json:"multipleOf,omitempty"` + Enum interface{} `json:"enum,omitempty"` + Ref string `json:"$ref,omitempty"` +} + +type ApiAction struct { + Name string `json:"name,omitempty"` + Namespace string `json:"namespace,omitempty"` + BackendMethod string `json:"backendMethod,omitempty"` + BackendUrl string `json:"backendUrl,omitempty"` + Auth string `json:"authkey,omitempty"` + SecureKey interface{} `json:"secureKey,omitempty"` +} + +type ApiOptions struct { + ActionName string `url:"action,omitempty"` + ApiBasePath string `url:"basepath,omitempty"` + ApiRelPath string `url:"relpath,omitempty"` + ApiVerb string `url:"operation,omitempty"` + ApiName string `url:"apiname,omitempty"` + SpaceGuid string `url:"spaceguid,omitempty"` + AccessToken string `url:"accesstoken,omitempty"` + ResponseType string `url:"responsetype,omitempty"` +} + +type ApiUserAuth struct { + SpaceGuid string `json:"spaceguid,omitempty"` + AccessToken string `json:"accesstoken,omitempty"` +} + +type RetApiArray struct { + Apis []ApiItem `json:"apis,omitempty"` +} + +type ApiItem struct { + ApiId string `json:"id,omitempty"` + QueryKey string `json:"key,omitempty"` + ApiValue *RetApi `json:"value,omitempty"` +} + +type RetApi struct { + Namespace string `json:"namespace"` + BaseUrl string `json:"gwApiUrl"` + Activated bool `json:"gwApiActivated"` + TenantId string `json:"tenantId"` + Swagger *ApiSwagger `json:"apidoc,omitempty"` +} + +type ApiSwagger struct { + SwaggerName string `json:"swagger,omitempty"` + BasePath string `json:"basePath,omitempty"` + Info *ApiSwaggerInfo `json:"info,omitempty"` + Paths map[string]*ApiSwaggerPath `json:"paths,omitempty"` + SecurityDef interface{} `json:"securityDefinitions,omitempty"` + Security interface{} `json:"security,omitempty"` + XConfig interface{} `json:"x-ibm-configuration,omitempty"` + XRateLimit interface{} `json:"x-ibm-rate-limit,omitempty"` +} + +type ApiSwaggerPath struct { + Get *ApiSwaggerOperation `json:"get,omitempty"` + Put *ApiSwaggerOperation `json:"put,omitempty"` + Post *ApiSwaggerOperation `json:"post,omitempty"` + Delete *ApiSwaggerOperation `json:"delete,omitempty"` + Options *ApiSwaggerOperation `json:"options,omitempty"` + Head *ApiSwaggerOperation `json:"head,omitempty"` + Patch *ApiSwaggerOperation `json:"patch,omitempty"` + Parameters []ApiParameter `json:"parameters,omitempty"` +} + +func (asp *ApiSwaggerPath) MakeOperationMap() map[string]*ApiSwaggerOperation { + var opMap map[string]*ApiSwaggerOperation = make(map[string]*ApiSwaggerOperation) + if asp.Get != nil { + opMap["get"] = asp.Get + } + if asp.Put != nil { + opMap["put"] = asp.Put + } + if asp.Post != nil { + opMap["post"] = asp.Post + } + if asp.Delete != nil { + opMap["delete"] = asp.Delete + } + if asp.Options != nil { + opMap["options"] = asp.Options + } + if asp.Head != nil { + opMap["head"] = asp.Head + } + if asp.Patch != nil { + opMap["patch"] = asp.Patch + } + return opMap +} + +type ApiSwaggerInfo struct { + Title string `json:"title,omitempty"` + Version string `json:"version,omitempty"` +} + +type ApiSwaggerOperation struct { + OperationId string `json:"operationId"` + Parameters []ApiParameter `json:"parameters,omitempty"` + Responses interface{} `json:"responses"` + XOpenWhisk *ApiSwaggerOpXOpenWhisk `json:"x-openwhisk,omitempty"` +} + +type ApiSwaggerOpXOpenWhisk struct { + ActionName string `json:"action"` + Namespace string `json:"namespace"` + Package string `json:"package"` + ApiUrl string `json:"url"` +} + +// Used for printing individual APIs in non-truncated form +type ApiFilteredList struct { + ActionName string + ApiName string + BasePath string + RelPath string + Verb string + Url string +} + +// Used for printing individual APIs in truncated form +type ApiFilteredRow struct { + ActionName string + ApiName string + BasePath string + RelPath string + Verb string + Url string + FmtString string +} + +var ApiVerbs map[string]bool = map[string]bool{ + "GET": true, + "PUT": true, + "POST": true, + "DELETE": true, + "PATCH": true, + "HEAD": true, + "OPTIONS": true, +} + +const ( + Overwrite = true + DoNotOverwrite = false +) + +///////////////// +// Api Methods // +///////////////// + +// Compare(sortable) compares api to sortable for the purpose of sorting. +// REQUIRED: sortable must also be of type ApiFilteredList. +// ***Method of type Sortable*** +func (api ApiFilteredList) Compare(sortable Sortable) bool { + // Sorts alphabetically by [BASE_PATH | API_NAME] -> REL_PATH -> API_VERB + apiToCompare := sortable.(ApiFilteredList) + var apiString string + var compareString string + + apiString = strings.ToLower(fmt.Sprintf("%s%s%s", api.BasePath, api.RelPath, + api.Verb)) + compareString = strings.ToLower(fmt.Sprintf("%s%s%s", apiToCompare.BasePath, + apiToCompare.RelPath, apiToCompare.Verb)) + + return apiString < compareString +} + +// ToHeaderString() returns the header for a list of apis +func (api ApiFilteredList) ToHeaderString() string { + return "" +} + +// ToSummaryRowString() returns a compound string of required parameters for printing +// from CLI command `wsk api list` or `wsk api-experimental list`. +// ***Method of type Sortable*** +func (api ApiFilteredList) ToSummaryRowString() string { + return fmt.Sprintf("%s %s %s %s %s %s", + fmt.Sprintf("%s: %s\n", wski18n.T("Action"), api.ActionName), + fmt.Sprintf(" %s: %s\n", wski18n.T("API Name"), api.ApiName), + fmt.Sprintf(" %s: %s\n", wski18n.T("Base path"), api.BasePath), + fmt.Sprintf(" %s: %s\n", wski18n.T("Path"), api.RelPath), + fmt.Sprintf(" %s: %s\n", wski18n.T("Verb"), api.Verb), + fmt.Sprintf(" %s: %s\n", wski18n.T("URL"), api.Url)) +} + +// Compare(sortable) compares api to sortable for the purpose of sorting. +// REQUIRED: sortable must also be of type ApiFilteredRow. +// ***Method of type Sortable*** +func (api ApiFilteredRow) Compare(sortable Sortable) bool { + // Sorts alphabetically by [BASE_PATH | API_NAME] -> REL_PATH -> API_VERB + var apiString string + var compareString string + apiToCompare := sortable.(ApiFilteredRow) + + apiString = strings.ToLower(fmt.Sprintf("%s%s%s", api.BasePath, api.RelPath, + api.Verb)) + compareString = strings.ToLower(fmt.Sprintf("%s%s%s", apiToCompare.BasePath, + apiToCompare.RelPath, apiToCompare.Verb)) + + return apiString < compareString +} + +// ToHeaderString() returns the header for a list of apis +func (api ApiFilteredRow) ToHeaderString() string { + return fmt.Sprintf("%s", fmt.Sprintf(api.FmtString, "Action", "Verb", "API Name", "URL")) +} + +// ToSummaryRowString() returns a compound string of required parameters for printing +// from CLI command `wsk api list -f` or `wsk api-experimental list -f`. +// ***Method of type Sortable*** +func (api ApiFilteredRow) ToSummaryRowString() string { + return fmt.Sprintf(api.FmtString, api.ActionName, api.Verb, api.ApiName, api.Url) +} + +func (s *ApiService) List(apiListOptions *ApiListRequestOptions) (*ApiListResponse, *http.Response, error) { + route := "web/whisk.system/apimgmt/getApi.http" + + routeUrl, err := addRouteOptions(route, apiListOptions) + if err != nil { + Debug(DbgError, "addRouteOptions(%s, %#v) error: '%s'\n", route, apiListOptions, err) + errMsg := wski18n.T("Unable to add route options '{{.options}}'", + map[string]interface{}{"options": apiListOptions}) + whiskErr := MakeWskErrorFromWskError(errors.New(errMsg), err, EXIT_CODE_ERR_GENERAL, DISPLAY_MSG, + NO_DISPLAY_USAGE) + return nil, nil, whiskErr + } + Debug(DbgInfo, "Api GET/list route with api options: %s\n", routeUrl) + + req, err := s.client.NewRequestUrl("GET", routeUrl, nil, DoNotIncludeNamespaceInUrl, AppendOpenWhiskPathPrefix, EncodeBodyAsJson, AuthRequired) + if err != nil { + Debug(DbgError, "http.NewRequestUrl(GET, %s, nil, DoNotIncludeNamespaceInUrl, AppendOpenWhiskPathPrefix, EncodeBodyAsJson) error: '%s'\n", routeUrl, err) + errMsg := wski18n.T("Unable to create HTTP request for GET '{{.route}}': {{.err}}", + map[string]interface{}{"route": routeUrl, "err": err}) + whiskErr := MakeWskErrorFromWskError(errors.New(errMsg), err, EXIT_CODE_ERR_NETWORK, DISPLAY_MSG, + NO_DISPLAY_USAGE) + return nil, nil, whiskErr + } + + apiArray := new(ApiListResponse) + resp, err := s.client.Do(req, &apiArray, ExitWithErrorOnTimeout) + if err != nil { + Debug(DbgError, "s.client.Do() error - HTTP req %s; error '%s'\n", req.URL.String(), err) + return nil, resp, err + } + + err = validateApiListResponse(apiArray) + if err != nil { + Debug(DbgError, "Not a valid ApiListReponse object\n") + return nil, resp, err + } + + return apiArray, resp, err +} + +func (s *ApiService) Insert(api *ApiCreateRequest, options *ApiCreateRequestOptions, overwrite bool) (*ApiCreateResponse, *http.Response, error) { + route := "web/whisk.system/apimgmt/createApi.http" + Debug(DbgInfo, "Api PUT route: %s\n", route) + + routeUrl, err := addRouteOptions(route, options) + if err != nil { + Debug(DbgError, "addRouteOptions(%s, %#v) error: '%s'\n", route, options, err) + errMsg := wski18n.T("Unable to add route options '{{.options}}'", + map[string]interface{}{"options": options}) + whiskErr := MakeWskErrorFromWskError(errors.New(errMsg), err, EXIT_CODE_ERR_GENERAL, DISPLAY_MSG, + NO_DISPLAY_USAGE) + return nil, nil, whiskErr + } + Debug(DbgError, "Api create route with options: %s\n", routeUrl) + + req, err := s.client.NewRequestUrl("POST", routeUrl, api, DoNotIncludeNamespaceInUrl, AppendOpenWhiskPathPrefix, EncodeBodyAsJson, AuthRequired) + if err != nil { + Debug(DbgError, "http.NewRequestUrl(POST, %s, nil, DoNotIncludeNamespaceInUrl, AppendOpenWhiskPathPrefix, EncodeBodyAsJson) error: '%s'\n", route, err) + errMsg := wski18n.T("Unable to create HTTP request for POST '{{.route}}': {{.err}}", + map[string]interface{}{"route": route, "err": err}) + whiskErr := MakeWskErrorFromWskError(errors.New(errMsg), err, EXIT_CODE_ERR_NETWORK, DISPLAY_MSG, + NO_DISPLAY_USAGE) + return nil, nil, whiskErr + } + + retApi := new(ApiCreateResponse) + resp, err := s.client.Do(req, &retApi, ExitWithErrorOnTimeout) + if err != nil { + Debug(DbgError, "s.client.Do() error - HTTP req %s; error '%s'\n", req.URL.String(), err) + return nil, resp, err + } + + err = validateApiSwaggerResponse(retApi.Swagger) + if err != nil { + Debug(DbgError, "Not a valid API creation response\n") + return nil, resp, err + } + + return retApi, resp, nil +} + +func (s *ApiService) Get(api *ApiGetRequest, options *ApiGetRequestOptions) (*ApiGetResponse, *http.Response, error) { + route := "web/whisk.system/apimgmt/getApi.http" + Debug(DbgInfo, "Api GET route: %s\n", route) + + routeUrl, err := addRouteOptions(route, options) + if err != nil { + Debug(DbgError, "addRouteOptions(%s, %#v) error: '%s'\n", route, options, err) + errMsg := wski18n.T("Unable to add route options '{{.options}}'", + map[string]interface{}{"options": options}) + whiskErr := MakeWskErrorFromWskError(errors.New(errMsg), err, EXIT_CODE_ERR_GENERAL, DISPLAY_MSG, + NO_DISPLAY_USAGE) + return nil, nil, whiskErr + } + Debug(DbgError, "Api get route with options: %s\n", routeUrl) + + req, err := s.client.NewRequestUrl("GET", routeUrl, nil, DoNotIncludeNamespaceInUrl, AppendOpenWhiskPathPrefix, EncodeBodyAsJson, AuthRequired) + if err != nil { + Debug(DbgError, "http.NewRequestUrl(GET, %s, nil, DoNotIncludeNamespaceInUrl, AppendOpenWhiskPathPrefix, EncodeBodyAsJson) error: '%s'\n", route, err) + errMsg := wski18n.T("Unable to create HTTP request for GET '{{.route}}': {{.err}}", + map[string]interface{}{"route": route, "err": err}) + whiskErr := MakeWskErrorFromWskError(errors.New(errMsg), err, EXIT_CODE_ERR_NETWORK, DISPLAY_MSG, + NO_DISPLAY_USAGE) + return nil, nil, whiskErr + } + + retApi := new(ApiGetResponse) + resp, err := s.client.Do(req, &retApi, ExitWithErrorOnTimeout) + if err != nil { + Debug(DbgError, "s.client.Do() error - HTTP req %s; error '%s'\n", req.URL.String(), err) + return nil, resp, err + } + + return retApi, resp, nil +} + +func (s *ApiService) Delete(api *ApiDeleteRequest, options *ApiDeleteRequestOptions) (*http.Response, error) { + route := "web/whisk.system/apimgmt/deleteApi.http" + Debug(DbgInfo, "Api DELETE route: %s\n", route) + + routeUrl, err := addRouteOptions(route, options) + if err != nil { + Debug(DbgError, "addRouteOptions(%s, %#v) error: '%s'\n", route, options, err) + errMsg := wski18n.T("Unable to add route options '{{.options}}'", + map[string]interface{}{"options": options}) + whiskErr := MakeWskErrorFromWskError(errors.New(errMsg), err, EXIT_CODE_ERR_GENERAL, DISPLAY_MSG, + NO_DISPLAY_USAGE) + return nil, whiskErr + } + Debug(DbgError, "Api DELETE route with options: %s\n", routeUrl) + + req, err := s.client.NewRequestUrl("DELETE", routeUrl, nil, DoNotIncludeNamespaceInUrl, AppendOpenWhiskPathPrefix, EncodeBodyAsJson, AuthRequired) + if err != nil { + Debug(DbgError, "http.NewRequestUrl(DELETE, %s, nil, DoNotIncludeNamespaceInUrl, AppendOpenWhiskPathPrefix, EncodeBodyAsJson) error: '%s'\n", route, err) + errMsg := wski18n.T("Unable to create HTTP request for DELETE '{{.route}}': {{.err}}", + map[string]interface{}{"route": route, "err": err}) + whiskErr := MakeWskErrorFromWskError(errors.New(errMsg), err, EXIT_CODE_ERR_NETWORK, DISPLAY_MSG, + NO_DISPLAY_USAGE) + return nil, whiskErr + } + + retApi := new(ApiDeleteResponse) + resp, err := s.client.Do(req, &retApi, ExitWithErrorOnTimeout) + if err != nil { + Debug(DbgError, "s.client.Do() error - HTTP req %s; error '%s'\n", req.URL.String(), err) + return resp, err + } + + return nil, nil +} + +func validateApiListResponse(apiList *ApiListResponse) error { + for i := 0; i < len(apiList.Apis); i++ { + if apiList.Apis[i].ApiValue == nil { + Debug(DbgError, "validateApiResponse: No value stanza in api %v\n", apiList.Apis[i]) + errMsg := wski18n.T("Internal error. Missing value stanza in API configuration response") + whiskErr := MakeWskError(errors.New(errMsg), EXIT_CODE_ERR_NETWORK, DISPLAY_MSG, NO_DISPLAY_USAGE) + return whiskErr + } + err := validateApiSwaggerResponse(apiList.Apis[i].ApiValue.Swagger) + if err != nil { + Debug(DbgError, "validateApiListResponse: Invalid Api: %v\n", apiList.Apis[i]) + return err + } + } + return nil +} + +func validateApiSwaggerResponse(swagger *ApiSwagger) error { + if swagger == nil { + Debug(DbgError, "validateApiSwaggerResponse: No apidoc stanza in api\n") + errMsg := wski18n.T("Internal error. Missing apidoc stanza in API configuration") + whiskErr := MakeWskError(errors.New(errMsg), EXIT_CODE_ERR_NETWORK, DISPLAY_MSG, NO_DISPLAY_USAGE) + return whiskErr + } + for path := range swagger.Paths { + err := validateApiPath(swagger.Paths[path]) + if err != nil { + Debug(DbgError, "validateApiResponse: Invalid Api Path object: %v\n", swagger.Paths[path]) + return err + } + } + + return nil +} + +func validateApiPath(path *ApiSwaggerPath) error { + for op, opv := range path.MakeOperationMap() { + err := validateApiOperation(op, opv) + if err != nil { + Debug(DbgError, "validateApiPath: Invalid Api operation object: %v\n", opv) + return err + } + } + return nil +} + +func validateApiOperation(opName string, op *ApiSwaggerOperation) error { + if op.XOpenWhisk != nil && len(op.OperationId) == 0 { + Debug(DbgError, "validateApiOperation: No operationId field in operation %v\n", op) + errMsg := wski18n.T("Missing operationId field in API configuration for operation {{.op}}", + map[string]interface{}{"op": opName}) + whiskErr := MakeWskError(errors.New(errMsg), EXIT_CODE_ERR_NETWORK, DISPLAY_MSG, NO_DISPLAY_USAGE) + return whiskErr + } + + if op.XOpenWhisk != nil && len(op.XOpenWhisk.Namespace) == 0 { + Debug(DbgError, "validateApiOperation: no x-openwhisk.namespace stanza in operation %v\n", op) + errMsg := wski18n.T("Missing x-openwhisk.namespace field in API configuration for operation {{.op}}", + map[string]interface{}{"op": opName}) + whiskErr := MakeWskError(errors.New(errMsg), EXIT_CODE_ERR_NETWORK, DISPLAY_MSG, NO_DISPLAY_USAGE) + return whiskErr + } + + // Note: The op.XOpenWhisk.Package field can have a value of "", so don't enforce a value + + if op.XOpenWhisk != nil && len(op.XOpenWhisk.ActionName) == 0 { + Debug(DbgError, "validateApiOperation: no x-openwhisk.action stanza in operation %v\n", op) + errMsg := wski18n.T("Missing x-openwhisk.action field in API configuration for operation {{.op}}", + map[string]interface{}{"op": opName}) + whiskErr := MakeWskError(errors.New(errMsg), EXIT_CODE_ERR_NETWORK, DISPLAY_MSG, NO_DISPLAY_USAGE) + return whiskErr + } + if op.XOpenWhisk != nil && len(op.XOpenWhisk.ApiUrl) == 0 { + Debug(DbgError, "validateApiOperation: no x-openwhisk.url stanza in operation %v\n", op) + errMsg := wski18n.T("Missing x-openwhisk.url field in API configuration for operation {{.op}}", + map[string]interface{}{"op": opName}) + whiskErr := MakeWskError(errors.New(errMsg), EXIT_CODE_ERR_NETWORK, DISPLAY_MSG, NO_DISPLAY_USAGE) + return whiskErr + } + return nil +} diff --git a/vendor/github.com/apache/openwhisk-client-go/whisk/client.go b/vendor/github.com/apache/openwhisk-client-go/whisk/client.go new file mode 100644 index 00000000000..9ea01bcc150 --- /dev/null +++ b/vendor/github.com/apache/openwhisk-client-go/whisk/client.go @@ -0,0 +1,914 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package whisk + +import ( + "bytes" + "crypto/tls" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "reflect" + "regexp" + "runtime" + "strings" + "time" + + "github.com/apache/openwhisk-client-go/wski18n" +) + +const ( + AuthRequired = true + NoAuth = false + IncludeNamespaceInUrl = true + DoNotIncludeNamespaceInUrl = false + AppendOpenWhiskPathPrefix = true + DoNotAppendOpenWhiskPathPrefix = false + EncodeBodyAsJson = "json" + EncodeBodyAsFormData = "formdata" + ProcessTimeOut = true + DoNotProcessTimeOut = false + ExitWithErrorOnTimeout = true + ExitWithSuccessOnTimeout = false + DEFAULT_HTTP_TIMEOUT = 30 +) + +type ClientInterface interface { + NewRequestUrl(method string, urlRelResource *url.URL, body interface{}, includeNamespaceInUrl bool, appendOpenWhiskPath bool, encodeBodyAs string, useAuthentication bool) (*http.Request, error) + NewRequest(method, urlStr string, body interface{}, includeNamespaceInUrl bool) (*http.Request, error) + Do(req *http.Request, v interface{}, ExitWithErrorOnTimeout bool, secretToObfuscate ...ObfuscateSet) (*http.Response, error) +} + +type TriggerServiceInterface interface { + List(options *TriggerListOptions) ([]Trigger, *http.Response, error) + Insert(trigger *Trigger, overwrite bool) (*Trigger, *http.Response, error) + Get(triggerName string) (*Trigger, *http.Response, error) + Delete(triggerName string) (*Trigger, *http.Response, error) + Fire(triggerName string, payload interface{}) (*Trigger, *http.Response, error) +} + +type Client struct { + client *http.Client + *Config + Transport *http.Transport + + Sdks *SdkService + Triggers TriggerServiceInterface + Actions *ActionService + Rules *RuleService + Activations *ActivationService + Packages *PackageService + Namespaces *NamespaceService + Info *InfoService + Apis *ApiService +} + +type Config struct { + Namespace string // NOTE :: Default is "_" + Cert string + Key string + AuthToken string + Host string + BaseURL *url.URL // NOTE :: Default is "openwhisk.ng.bluemix.net" + Version string + Verbose bool + Debug bool // For detailed tracing + Insecure bool + UserAgent string + ApigwAccessToken string + ApigwTenantId string + AdditionalHeaders http.Header +} + +type ObfuscateSet struct { + Regex string + Replacement string +} + +var DefaultObfuscateArr = []ObfuscateSet{ + { + Regex: "\"[Pp]assword\":\\s*\".*\"", + Replacement: `"password": "******"`, + }, +} + +// NewClient creates a new whisk client with the provided http client and whisk configuration. +// +// A new http.Transport will be created when client cert or TLS insecure options are set. +// If one use custom tranport and want to keep it intact, please opt out TLS related fields +// in configInput and construct TLS conguration in the custom transport. +func NewClient(httpClient *http.Client, configInput *Config) (*Client, error) { + + var config *Config + if configInput == nil { + defaultConfig, err := GetDefaultConfig() + if err != nil { + return nil, err + } else { + config = defaultConfig + } + } else { + config = configInput + } + + if httpClient == nil { + httpClient = &http.Client{ + Timeout: time.Second * DEFAULT_HTTP_TIMEOUT, + } + } + + var err error + var errStr = "" + if len(config.Host) == 0 { + errStr = wski18n.T("Unable to create request URL, because OpenWhisk API host is missing") + } else if config.BaseURL == nil { + config.BaseURL, err = GetUrlBase(config.Host) + if err != nil { + Debug(DbgError, "Unable to create request URL, because the api host %s is invalid: %s\n", config.Host, err) + errStr = wski18n.T("Unable to create request URL, because the api host '{{.host}}' is invalid: {{.err}}", + map[string]interface{}{"host": config.Host, "err": err}) + } + } + + if len(errStr) != 0 { + werr := MakeWskError(errors.New(errStr), EXIT_CODE_ERR_GENERAL, DISPLAY_MSG, NO_DISPLAY_USAGE) + return nil, werr + } + + if len(config.Namespace) == 0 { + config.Namespace = "_" + } + + if len(config.Version) == 0 { + config.Version = "v1" + } + + if len(config.UserAgent) == 0 { + config.UserAgent = "OpenWhisk-Go-Client " + runtime.GOOS + " " + runtime.GOARCH + } + + c := &Client{ + client: httpClient, + Config: config, + } + + c.Sdks = &SdkService{client: c} + c.Triggers = &TriggerService{client: c} + c.Actions = &ActionService{client: c} + c.Rules = &RuleService{client: c} + c.Activations = &ActivationService{client: c} + c.Packages = &PackageService{client: c} + c.Namespaces = &NamespaceService{client: c} + c.Info = &InfoService{client: c} + c.Apis = &ApiService{client: c} + + werr := c.LoadX509KeyPair() + if werr != nil { + return nil, werr + } + + return c, nil +} + +func (c *Client) LoadX509KeyPair() error { + tlsConfig := &tls.Config{ + InsecureSkipVerify: c.Config.Insecure, + } + + if c.Config.Cert != "" && c.Config.Key != "" { + if cert, err := ReadX509KeyPair(c.Config.Cert, c.Config.Key); err == nil { + tlsConfig.Certificates = []tls.Certificate{cert} + } else { + errStr := wski18n.T("Unable to load the X509 key pair due to the following reason: {{.err}}", + map[string]interface{}{"err": err}) + werr := MakeWskError(errors.New(errStr), EXIT_CODE_ERR_GENERAL, DISPLAY_MSG, NO_DISPLAY_USAGE) + return werr + } + } else if !c.Config.Insecure { + if c.Config.Cert == "" { + warningStr := "The Cert file is not configured. Please configure the missing Cert file, if there is a security issue accessing the service.\n" + Debug(DbgWarn, warningStr) + if c.Config.Key != "" { + errStr := wski18n.T("The Cert file is not configured. Please configure the missing Cert file.\n") + werr := MakeWskError(errors.New(errStr), EXIT_CODE_ERR_GENERAL, DISPLAY_MSG, NO_DISPLAY_USAGE) + return werr + } + } + if c.Config.Key == "" { + warningStr := "The Key file is not configured. Please configure the missing Key file, if there is a security issue accessing the service.\n" + Debug(DbgWarn, warningStr) + if c.Config.Cert != "" { + errStr := wski18n.T("The Key file is not configured. Please configure the missing Key file.\n") + werr := MakeWskError(errors.New(errStr), EXIT_CODE_ERR_GENERAL, DISPLAY_MSG, NO_DISPLAY_USAGE) + return werr + } + } + } + + // Only replace the existing transport when a custom TLS configuration is needed + if tlsConfig.InsecureSkipVerify || tlsConfig.Certificates != nil { + if c.client.Transport != nil { + warningStr := "The provided http.Transport is replaced to match the TLS configuration. Custom transport cannot coexist with nondefault TLS configuration" + Debug(DbgWarn, warningStr) + } + // Use the defaultTransport as the transport basis to maintain proxy support + c.client.Transport = &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + TLSClientConfig: tlsConfig, + } + } + + return nil +} + +var ReadX509KeyPair = func(certFile, keyFile string) (tls.Certificate, error) { + return tls.LoadX509KeyPair(certFile, keyFile) +} + +/////////////////////////////// +// Request/Utility Functions // +/////////////////////////////// + +func (c *Client) NewRequest(method, urlStr string, body interface{}, includeNamespaceInUrl bool) (*http.Request, error) { + if includeNamespaceInUrl { + if c.Config.Namespace != "" { + urlStr = fmt.Sprintf("%s/namespaces/%s/%s", c.Config.Version, c.Config.Namespace, urlStr) + } else { + urlStr = fmt.Sprintf("%s/namespaces", c.Config.Version) + } + } else { + urlStr = fmt.Sprintf("%s/%s", c.Config.Version, urlStr) + } + + urlStr = fmt.Sprintf("%s/%s", c.BaseURL.String(), urlStr) + u, err := url.Parse(urlStr) + if err != nil { + Debug(DbgError, "url.Parse(%s) error: %s\n", urlStr, err) + errStr := wski18n.T("Invalid request URL '{{.url}}': {{.err}}", + map[string]interface{}{"url": urlStr, "err": err}) + werr := MakeWskError(errors.New(errStr), EXIT_CODE_ERR_GENERAL, DISPLAY_MSG, NO_DISPLAY_USAGE) + return nil, werr + } + + var buf io.ReadWriter + if body != nil { + buf = new(bytes.Buffer) + encoder := json.NewEncoder(buf) + encoder.SetEscapeHTML(false) + err := encoder.Encode(body) + + if err != nil { + Debug(DbgError, "json.Encode(%#v) error: %s\n", body, err) + errStr := wski18n.T("Error encoding request body: {{.err}}", map[string]interface{}{"err": err}) + werr := MakeWskError(errors.New(errStr), EXIT_CODE_ERR_GENERAL, DISPLAY_MSG, NO_DISPLAY_USAGE) + return nil, werr + } + } + + req, err := http.NewRequest(method, u.String(), buf) + if err != nil { + Debug(DbgError, "http.NewRequest(%v, %s, buf) error: %s\n", method, u.String(), err) + errStr := wski18n.T("Error initializing request: {{.err}}", map[string]interface{}{"err": err}) + werr := MakeWskError(errors.New(errStr), EXIT_CODE_ERR_GENERAL, DISPLAY_MSG, NO_DISPLAY_USAGE) + return nil, werr + } + if req.Body != nil { + req.Header.Add("Content-Type", "application/json") + } + + err = c.addAuthHeader(req, AuthRequired) + if err != nil { + Debug(DbgError, "addAuthHeader() error: %s\n", err) + errStr := wski18n.T("Unable to add the HTTP authentication header: {{.err}}", + map[string]interface{}{"err": err}) + werr := MakeWskErrorFromWskError(errors.New(errStr), err, EXIT_CODE_ERR_GENERAL, DISPLAY_MSG, NO_DISPLAY_USAGE) + return nil, werr + } + + req.Header.Add("User-Agent", c.Config.UserAgent) + + for key := range c.Config.AdditionalHeaders { + req.Header.Add(key, c.Config.AdditionalHeaders.Get(key)) + } + + return req, nil +} + +func (c *Client) addAuthHeader(req *http.Request, authRequired bool) error { + // Allow for authorization override via Additional Headers + authHeaderValue := c.Config.AdditionalHeaders.Get("Authorization") + if authHeaderValue != "" { + Debug(DbgInfo, "Using additional header authorization\n") + } else if c.Config.AuthToken != "" { + encodedAuthToken := base64.StdEncoding.EncodeToString([]byte(c.Config.AuthToken)) + req.Header.Add("Authorization", fmt.Sprintf("Basic %s", encodedAuthToken)) + Debug(DbgInfo, "Adding basic auth header; using authkey\n") + } else { + if authRequired { + Debug(DbgError, "The required authorization key is not configured - neither set as a property nor set via the --auth CLI argument\n") + errStr := wski18n.T("Authorization key is not configured (--auth is required)") + werr := MakeWskError(errors.New(errStr), EXIT_CODE_ERR_USAGE, DISPLAY_MSG, DISPLAY_USAGE) + return werr + } + } + return nil +} + +// bodyTruncator limits the size of Req/Resp Body for --verbose ONLY. +// It returns truncated Req/Resp Body, reloaded io.ReadCloser and any errors. +func BodyTruncator(body io.ReadCloser) (string, io.ReadCloser, error) { + limit := 1000 // 1000 byte limit, anything over is truncated + + data, err := ioutil.ReadAll(body) + if err != nil { + Verbose("ioutil.ReadAll(req.Body) error: %s\n", err) + werr := MakeWskError(err, EXIT_CODE_ERR_NETWORK, DISPLAY_MSG, NO_DISPLAY_USAGE) + return "", body, werr + } + + reload := ioutil.NopCloser(bytes.NewBuffer(data)) + + if len(data) > limit { + Verbose("Body exceeds %d bytes and will be truncated\n", limit) + newData := string(data)[:limit] + "..." + return string(newData), reload, nil + } + + return string(data), reload, nil +} + +// Do sends an API request and returns the API response. The API response is +// JSON decoded and stored in the value pointed to by v, or returned as an +// error if an API error has occurred. If v implements the io.Writer +// interface, the raw response body will be written to v, without attempting to +// first decode it. +func (c *Client) Do(req *http.Request, v interface{}, ExitWithErrorOnTimeout bool, secretToObfuscate ...ObfuscateSet) (*http.Response, error) { + var err error + var data []byte + secrets := append(DefaultObfuscateArr, secretToObfuscate...) + + req, err = PrintRequestInfo(req, secrets...) + //Putting this based on previous code + if err != nil { + return nil, err + } + + // Issue the request to the Whisk server endpoint + resp, err := c.client.Do(req) + if err != nil { + Debug(DbgError, "HTTP Do() [req %s] error: %s\n", req.URL.String(), err) + werr := MakeWskError(err, EXIT_CODE_ERR_NETWORK, DISPLAY_MSG, NO_DISPLAY_USAGE) + return nil, werr + } + + resp, data, err = PrintResponseInfo(resp, secrets...) + if err != nil { + return resp, err + } + + // With the HTTP response status code and the HTTP body contents, + // the possible response scenarios are: + // + // 0. HTTP Success + Body indicating a whisk failure result + // 1. HTTP Success + Valid body matching request expectations + // 2. HTTP Success + No body expected + // 3. HTTP Success + Body does NOT match request expectations + // 4. HTTP Failure + No body + // 5. HTTP Failure + Body matching error format expectation + // 6. HTTP Failure + Body NOT matching error format expectation + + // Handle 4. HTTP Failure + No body + // If this happens, just return no data and an error + if !IsHttpRespSuccess(resp) && data == nil { + Debug(DbgError, "HTTP failure %d + no body\n", resp.StatusCode) + werr := MakeWskError(errors.New(wski18n.T("Command failed due to an HTTP failure")), resp.StatusCode-256, + DISPLAY_MSG, NO_DISPLAY_USAGE) + return resp, werr + } + + // Handle 5. HTTP Failure + Body matching error format expectation, or body matching a whisk.error() response + // Handle 6. HTTP Failure + Body NOT matching error format expectation + if !IsHttpRespSuccess(resp) && data != nil { + return parseErrorResponse(resp, data, v) + } + + // Handle 0. HTTP Success + Body indicating a whisk failure result + // NOTE: Need to ignore activation records send in response to 'wsk get activation NNN` as + // these will report the same original error giving the appearance that the command failed. + // Need to ignore `wsk action invoke NNN --result` too, otherwise action whose result is sth likes + // '{"response": {"key": "value"}}' will return an error to such command. + if IsHttpRespSuccess(resp) && // HTTP Status == 200 + data != nil && // HTTP response body exists + v != nil && + !strings.Contains(reflect.TypeOf(v).String(), "Activation") && // Request is not `wsk activation get` + !(req.URL.Query().Get("result") == "true") && // Request is not `wsk action invoke NNN --result` + !IsResponseResultSuccess(data) { // HTTP response body has Whisk error result + Debug(DbgInfo, "Got successful HTTP; but activation response reports an error\n") + return parseErrorResponse(resp, data, v) + } + + // Handle 2. HTTP Success + No body expected + if IsHttpRespSuccess(resp) && v == nil { + Debug(DbgInfo, "No interface provided; no HTTP response body expected\n") + return resp, nil + } + + // Handle 1. HTTP Success + Valid body matching request expectations + // Handle 3. HTTP Success + Body does NOT match request expectations + if IsHttpRespSuccess(resp) && v != nil { + + // If a timeout occurs, 202 HTTP status code is returned, and the caller wishes to handle such an event, return + // an error corresponding with the timeout + if ExitWithErrorOnTimeout && resp.StatusCode == EXIT_CODE_TIMED_OUT { + errMsg := wski18n.T("Request accepted, but processing not completed yet.") + err = MakeWskError(errors.New(errMsg), EXIT_CODE_TIMED_OUT, NO_DISPLAY_MSG, NO_DISPLAY_USAGE, + NO_MSG_DISPLAYED, NO_DISPLAY_PREFIX, NO_APPLICATION_ERR, TIMED_OUT) + } + + return parseSuccessResponse(resp, data, v), err + } + + // We should never get here, but just in case return failure to keep the compiler happy + werr := MakeWskError(errors.New(wski18n.T("Command failed due to an internal failure")), EXIT_CODE_ERR_GENERAL, + DISPLAY_MSG, NO_DISPLAY_USAGE) + return resp, werr +} + +func PrintRequestInfo(req *http.Request, secretToObfuscate ...ObfuscateSet) (*http.Request, error) { + var truncatedBody string + var err error + if IsVerbose() { + fmt.Println("REQUEST:") + fmt.Printf("[%s]\t%s\n", req.Method, req.URL) + + if len(req.Header) > 0 { + fmt.Println("Req Headers") + PrintJSON(req.Header) + } + + if req.Body != nil { + fmt.Println("Req Body") + // Since we're emptying out the reader, which is the req.Body, we have to reset it, + // but create some copies for our debug messages. + buffer, _ := ioutil.ReadAll(req.Body) + obfuscatedRequest := ObfuscateText(string(buffer), secretToObfuscate) + req.Body = ioutil.NopCloser(bytes.NewBuffer(buffer)) + + if !IsDebug() { + if truncatedBody, req.Body, err = BodyTruncator(ioutil.NopCloser(bytes.NewBuffer(buffer))); err != nil { + return nil, err + } + fmt.Println(ObfuscateText(truncatedBody, secretToObfuscate)) + } else { + fmt.Println(obfuscatedRequest) + } + Debug(DbgInfo, "Req Body (ASCII quoted string):\n%+q\n", obfuscatedRequest) + } + } + return req, nil +} + +func PrintResponseInfo(resp *http.Response, secretToObfuscate ...ObfuscateSet) (*http.Response, []byte, error) { + var truncatedBody string + // Don't "defer resp.Body.Close()" here because the body is reloaded to allow caller to + // do custom body parsing, such as handling per-route error responses. + Verbose("RESPONSE:") + Verbose("Got response with code %d\n", resp.StatusCode) + + if IsVerbose() && len(resp.Header) > 0 { + fmt.Println("Resp Headers") + PrintJSON(resp.Header) + } + + // Read the response body + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + Debug(DbgError, "ioutil.ReadAll(resp.Body) error: %s\n", err) + werr := MakeWskError(err, EXIT_CODE_ERR_NETWORK, DISPLAY_MSG, NO_DISPLAY_USAGE) + resp.Body = ioutil.NopCloser(bytes.NewBuffer(data)) + return resp, data, werr + } + + // Reload the response body to allow caller access to the body; otherwise, + // the caller will have any empty body to read + resp.Body = ioutil.NopCloser(bytes.NewBuffer(data)) + + Verbose("Response body size is %d bytes\n", len(data)) + + if !IsDebug() { + if truncatedBody, resp.Body, err = BodyTruncator(ioutil.NopCloser(bytes.NewBuffer(data))); err != nil { + return nil, data, err + } + Verbose("Response body received:\n%s\n", ObfuscateText(truncatedBody, secretToObfuscate)) + } else { + obfuscatedResponse := ObfuscateText(string(data), secretToObfuscate) + Verbose("Response body received:\n%s\n", obfuscatedResponse) + Debug(DbgInfo, "Response body received (ASCII quoted string):\n%+q\n", obfuscatedResponse) + } + return resp, data, err +} + +func ObfuscateText(text string, replacements []ObfuscateSet) string { + obfuscated := text + for _, oSet := range replacements { + r, _ := regexp.Compile(oSet.Regex) + obfuscated = r.ReplaceAllString(obfuscated, oSet.Replacement) + } + return obfuscated +} + +func parseErrorResponse(resp *http.Response, data []byte, v interface{}) (*http.Response, error) { + Debug(DbgInfo, "HTTP failure %d + body\n", resp.StatusCode) + + // Determine if an application error was received (#5) + buf := bytes.NewBuffer(data) + d := json.NewDecoder(buf) + d.UseNumber() + + errorResponse := &ErrorResponse{Response: resp} + err := d.Decode(&errorResponse) + + // Determine if error is an application error or an error generated by API + if err == nil { + if errorResponse.Code == nil /*&& errorResponse.ErrMsg != nil */ && resp.StatusCode == 502 { + return parseApplicationError(resp, data, v) + } else if errorResponse.Code != nil && errorResponse.ErrMsg != nil { + Debug(DbgInfo, "HTTP failure %d; server error %s\n", resp.StatusCode, errorResponse) + werr := MakeWskError(errorResponse, resp.StatusCode-256, DISPLAY_MSG, NO_DISPLAY_USAGE) + return resp, werr + } + } + + // Body contents are unknown (#6) + Debug(DbgError, "HTTP response with unexpected body failed due to contents parsing error: '%v'\n", err) + errMsg := wski18n.T("The connection failed, or timed out. (HTTP status code {{.code}})", + map[string]interface{}{"code": resp.StatusCode}) + whiskErr := MakeWskError(errors.New(errMsg), resp.StatusCode-256, DISPLAY_MSG, NO_DISPLAY_USAGE) + return resp, whiskErr +} + +func parseApplicationError(resp *http.Response, data []byte, v interface{}) (*http.Response, error) { + Debug(DbgInfo, "Parsing application error\n") + + whiskErrorResponse := &WhiskErrorResponse{} + err := json.Unmarshal(data, whiskErrorResponse) + + // Handle application errors that occur when --result option is false (#5) + if err == nil && whiskErrorResponse != nil && whiskErrorResponse.Response != nil && whiskErrorResponse.Response.Status != nil { + Debug(DbgInfo, "Detected response status `%s` that a whisk.error(\"%#v\") was returned\n", + *whiskErrorResponse.Response.Status, whiskErrorResponse.Response.Result) + + errStr := getApplicationErrorMessage(whiskErrorResponse.Response.Result) + Debug(DbgInfo, "Application error received: %s\n", errStr) + + errMsg := wski18n.T("The following application error was received: {{.err}}", + map[string]interface{}{"err": errStr}) + whiskErr := MakeWskError(errors.New(errMsg), resp.StatusCode-256, NO_DISPLAY_MSG, NO_DISPLAY_USAGE, + NO_MSG_DISPLAYED, DISPLAY_PREFIX, APPLICATION_ERR) + return parseSuccessResponse(resp, data, v), whiskErr + } + + appErrResult := &AppErrorResult{} + err = json.Unmarshal(data, appErrResult) + + // Handle application errors that occur with blocking invocations when --result option is true (#5) + if err == nil && appErrResult.Error != nil { + Debug(DbgInfo, "Error code is null, blocking with result invocation error has occurred\n") + errStr := getApplicationErrorMessage(*appErrResult.Error) + Debug(DbgInfo, "Application error received: %s\n", errStr) + + whiskErr := MakeWskError(errors.New(errStr), resp.StatusCode-256, NO_DISPLAY_MSG, NO_DISPLAY_USAGE, + NO_MSG_DISPLAYED, DISPLAY_PREFIX, APPLICATION_ERR) + return parseSuccessResponse(resp, data, v), whiskErr + } + + // Body contents are unknown (#6) + Debug(DbgError, "HTTP response with unexpected body failed due to contents parsing error: '%v'\n", err) + errMsg := wski18n.T("The connection failed, or timed out. (HTTP status code {{.code}})", + map[string]interface{}{"code": resp.StatusCode}) + whiskErr := MakeWskError(errors.New(errMsg), resp.StatusCode-256, DISPLAY_MSG, NO_DISPLAY_USAGE) + return resp, whiskErr +} + +func getApplicationErrorMessage(errResp interface{}) string { + var errStr string + + // Handle error results that looks like: + // + // { + // "error": { + // "error": "An error string", + // "message": "An error message", + // "another-message": "Another error message" + // } + // } + // Returns "An error string; An error message; Another error message" + // + // OR + // { + // "error": "An error string" + // } + // Returns "An error string" + // + // OR + // { + // "error": { + // "custom-err": { + // "error": "An error string", + // "message": "An error message" + // } + // } + // } + // Returns "{"error": { "custom-err": { "error": "An error string", "message": "An error message" } } }" + + errMapIntf, errMapIntfOk := errResp.(map[string]interface{}) + if !errMapIntfOk { + errStr = fmt.Sprintf("%v", errResp) + } else { + // Check if the "error" field in the response JSON + errObjIntf, errObjIntfOk := errMapIntf["error"] + if !errObjIntfOk { + errStr = fmt.Sprintf("%v", errMapIntf) + } else { + // Check if the "error" field value is a JSON object + errObj, errObjOk := errObjIntf.(map[string]interface{}) + if !errObjOk { + // The "error" field value is not JSON; check if it's a string + errorStr, errorStrOk := errObjIntf.(string) + if !errorStrOk { + errStr = fmt.Sprintf("%v", errObjIntf) + } else { + errStr = errorStr + } + } else { + Debug(DbgInfo, "Application failure error json: %+v\n", errObj) + + // Concatenate all string field values into a single error string + msgSeparator := "" + for _, val := range errObj { + valStr, valStrOk := val.(string) + if valStrOk { + errStr = errStr + msgSeparator + valStr + msgSeparator = "; " + } + } + + // If no top level string fields exist, return the entire error object + // Return a nice JSON string if possible; otherwise let Go try it's best + if len(errStr) == 0 { + jsonBytes, err := json.Marshal(errObj) + if err != nil { + errStr = fmt.Sprintf("%v", errObj) + } else { + errStr = string(jsonBytes) + } + } + } + } + } + + return errStr +} +func parseSuccessResponse(resp *http.Response, data []byte, v interface{}) *http.Response { + Debug(DbgInfo, "Parsing HTTP response into struct type: %s\n", reflect.TypeOf(v)) + + dc := json.NewDecoder(strings.NewReader(string(data))) + dc.UseNumber() + err := dc.Decode(v) + + // If the decode was successful, return the response without error (#1). Otherwise, the decode did not work, so the + // server response was unexpected (#3) + if err == nil { + Debug(DbgInfo, "Successful parse of HTTP response into struct type: %s\n", reflect.TypeOf(v)) + return resp + } else { + Debug(DbgWarn, "Unsuccessful parse of HTTP response into struct type: %s; parse error '%v'\n", reflect.TypeOf(v), err) + Debug(DbgWarn, "Request was successful, so ignoring the following unexpected response body that could not be parsed: %s\n", data) + return resp + } +} + +//////////// +// Errors // +//////////// + +// For containing the server response body when an error message is returned +// Here's an example error response body with HTTP status code == 400 +// { +// "error": "namespace contains invalid characters", +// "code": "1422870" +// } +type ErrorResponse struct { + Response *http.Response // HTTP response that caused this error + ErrMsg *interface{} `json:"error"` // error message string + Code *interface{} `json:"code"` // validation error code (tid) +} + +type AppErrorResult struct { + Error *interface{} `json:"error"` +} + +type WhiskErrorResponse struct { + Response *WhiskResponse `json:"response"` +} + +type WhiskResponse struct { + Result map[string]interface{} `json:"result"` + Success bool `json:"success"` + Status *interface{} `json:"status"` +} + +type WhiskResult struct { + // Error *WhiskError `json:"error"` // whisk.error() and whisk.reject({msg:}) result in two different kinds of 'error' JSON objects +} + +type WhiskError struct { + Msg *string `json:"msg"` +} + +func (r ErrorResponse) Error() string { + return wski18n.T("{{.msg}} (code {{.code}})", + map[string]interface{}{"msg": fmt.Sprintf("%v", *r.ErrMsg), "code": r.Code}) +} + +//////////////////////////// +// Basic Client Functions // +//////////////////////////// + +func IsHttpRespSuccess(r *http.Response) bool { + return r.StatusCode >= 200 && r.StatusCode <= 299 +} + +func IsResponseResultSuccess(data []byte) bool { + errResp := new(WhiskErrorResponse) + err := json.Unmarshal(data, &errResp) + + if errResp.Response != nil { + return errResp.Response.Success + } else if err != nil { //failed to parse WhiskErrorResponse + Debug(DbgWarn, "IsResponseResultSuccess: failed to parse response result: %v\n", err) + } + + return true +} + +// +// Create a HTTP request object using URL stored in url.URL object +// Arguments: +// method - HTTP verb (i.e. "GET", "PUT", etc) +// urlRelResource - *url.URL structure representing the relative resource URL, including query params +// body - optional. Object whose contents will be JSON encoded and placed in HTTP request body +// includeNamespaceInUrl - when true "/namespaces/NAMESPACE" is included in the final URL; otherwise not included. +// appendOpenWhiskPath - when true, the OpenWhisk URL format is generated +// encodeBodyAs - specifies body encoding (json or form data) +// useAuthentication - when true, the basic Authorization is included with the configured authkey as the value +func (c *Client) NewRequestUrl( + method string, + urlRelResource *url.URL, + body interface{}, + includeNamespaceInUrl bool, + appendOpenWhiskPath bool, + encodeBodyAs string, + useAuthentication bool) (*http.Request, error) { + var requestUrl *url.URL + var err error + + if appendOpenWhiskPath { + var urlVerNamespaceStr string + var verPathEncoded = (&url.URL{Path: c.Config.Version}).String() + + if includeNamespaceInUrl { + if c.Config.Namespace != "" { + // Encode path parts before inserting them into the URI so that any '?' is correctly encoded + // as part of the path and not the start of the query params + verNamespaceEncoded := (&url.URL{Path: c.Config.Namespace}).String() + urlVerNamespaceStr = fmt.Sprintf("%s/namespaces/%s", verPathEncoded, verNamespaceEncoded) + } else { + urlVerNamespaceStr = fmt.Sprintf("%s/namespaces", verPathEncoded) + } + } else { + urlVerNamespaceStr = fmt.Sprintf("%s", verPathEncoded) + } + + // Assemble the complete URL: base + version + [namespace] + resource_relative_path + Debug(DbgInfo, "basepath: %s, version/namespace path: %s, resource path: %s\n", c.BaseURL.String(), urlVerNamespaceStr, urlRelResource.String()) + urlStr := fmt.Sprintf("%s/%s/%s", c.BaseURL.String(), urlVerNamespaceStr, urlRelResource.String()) + requestUrl, err = url.Parse(urlStr) + if err != nil { + Debug(DbgError, "url.Parse(%s) error: %s\n", urlStr, err) + errStr := wski18n.T("Invalid request URL '{{.url}}': {{.err}}", + map[string]interface{}{"url": urlStr, "err": err}) + werr := MakeWskError(errors.New(errStr), EXIT_CODE_ERR_GENERAL, DISPLAY_MSG, NO_DISPLAY_USAGE) + return nil, werr + } + } else { + Debug(DbgInfo, "basepath: %s, resource path: %s\n", c.BaseURL.String(), urlRelResource.String()) + urlStr := fmt.Sprintf("%s/%s", c.BaseURL.String(), urlRelResource.String()) + requestUrl, err = url.Parse(urlStr) + if err != nil { + Debug(DbgError, "url.Parse(%s) error: %s\n", urlStr, err) + errStr := wski18n.T("Invalid request URL '{{.url}}': {{.err}}", + map[string]interface{}{"url": urlStr, "err": err}) + werr := MakeWskError(errors.New(errStr), EXIT_CODE_ERR_GENERAL, DISPLAY_MSG, NO_DISPLAY_USAGE) + return nil, werr + } + } + + var buf io.ReadWriter + if body != nil { + if encodeBodyAs == EncodeBodyAsJson { + buf = new(bytes.Buffer) + encoder := json.NewEncoder(buf) + encoder.SetEscapeHTML(false) + err := encoder.Encode(body) + + if err != nil { + Debug(DbgError, "json.Encode(%#v) error: %s\n", body, err) + errStr := wski18n.T("Error encoding request body: {{.err}}", + map[string]interface{}{"err": err}) + werr := MakeWskError(errors.New(errStr), EXIT_CODE_ERR_GENERAL, DISPLAY_MSG, NO_DISPLAY_USAGE) + return nil, werr + } + } else if encodeBodyAs == EncodeBodyAsFormData { + if values, ok := body.(url.Values); ok { + buf = bytes.NewBufferString(values.Encode()) + } else { + Debug(DbgError, "Invalid form data body: %v\n", body) + errStr := wski18n.T("Internal error. Form data encoding failure") + werr := MakeWskError(errors.New(errStr), EXIT_CODE_ERR_GENERAL, DISPLAY_MSG, NO_DISPLAY_USAGE) + return nil, werr + } + } else { + Debug(DbgError, "Invalid body encode type: %s\n", encodeBodyAs) + errStr := wski18n.T("Internal error. Invalid encoding type '{{.encodetype}}'", + map[string]interface{}{"encodetype": encodeBodyAs}) + werr := MakeWskError(errors.New(errStr), EXIT_CODE_ERR_GENERAL, DISPLAY_MSG, NO_DISPLAY_USAGE) + return nil, werr + } + } + + req, err := http.NewRequest(method, requestUrl.String(), buf) + if err != nil { + Debug(DbgError, "http.NewRequest(%v, %s, buf) error: %s\n", method, requestUrl.String(), err) + errStr := wski18n.T("Error initializing request: {{.err}}", map[string]interface{}{"err": err}) + werr := MakeWskError(errors.New(errStr), EXIT_CODE_ERR_GENERAL, DISPLAY_MSG, NO_DISPLAY_USAGE) + return nil, werr + } + if req.Body != nil && encodeBodyAs == EncodeBodyAsJson { + req.Header.Add("Content-Type", "application/json") + } + if req.Body != nil && encodeBodyAs == EncodeBodyAsFormData { + req.Header.Add("Content-Type", "application/x-www-form-urlencoded") + } + + if useAuthentication { + err = c.addAuthHeader(req, AuthRequired) + if err != nil { + Debug(DbgError, "addAuthHeader() error: %s\n", err) + errStr := wski18n.T("Unable to add the HTTP authentication header: {{.err}}", + map[string]interface{}{"err": err}) + werr := MakeWskErrorFromWskError(errors.New(errStr), err, EXIT_CODE_ERR_GENERAL, DISPLAY_MSG, NO_DISPLAY_USAGE) + return nil, werr + } + } else { + Debug(DbgInfo, "No auth header required\n") + } + + req.Header.Add("User-Agent", c.Config.UserAgent) + + for key := range c.Config.AdditionalHeaders { + req.Header.Add(key, c.Config.AdditionalHeaders.Get(key)) + } + + return req, nil +} diff --git a/vendor/github.com/apache/openwhisk-client-go/whisk/info.go b/vendor/github.com/apache/openwhisk-client-go/whisk/info.go new file mode 100644 index 00000000000..9cbac6a01e2 --- /dev/null +++ b/vendor/github.com/apache/openwhisk-client-go/whisk/info.go @@ -0,0 +1,69 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package whisk + +import ( + "errors" + "fmt" + "github.com/apache/openwhisk-client-go/wski18n" + "net/http" + "net/url" +) + +type Info struct { + Whisk string `json:"whisk,omitempty"` + Version string `json:"version,omitempty"` + Build string `json:"build,omitempty"` + BuildNo string `json:"buildno,omitempty"` +} + +type InfoService struct { + client *Client +} + +func (s *InfoService) Get() (*Info, *http.Response, error) { + // make a request to c.BaseURL / v1 + urlStr := fmt.Sprintf("%s/%s", s.client.BaseURL.String(), s.client.Config.Version) + u, err := url.Parse(urlStr) + if err != nil { + Debug(DbgError, "url.Parse(%s) error: %s\n", urlStr, err) + errStr := wski18n.T("Unable to URL parse '{{.version}}': {{.err}}", + map[string]interface{}{"version": urlStr, "err": err}) + werr := MakeWskError(errors.New(errStr), EXIT_CODE_ERR_GENERAL, DISPLAY_MSG, NO_DISPLAY_USAGE) + return nil, nil, werr + } + + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + Debug(DbgError, "http.NewRequest(GET, %s) error: %s\n", u.String(), err) + errStr := wski18n.T("Unable to create HTTP request for GET '{{.url}}': {{.err}}", + map[string]interface{}{"url": u.String(), "err": err}) + werr := MakeWskError(errors.New(errStr), EXIT_CODE_ERR_GENERAL, DISPLAY_MSG, NO_DISPLAY_USAGE) + return nil, nil, werr + } + + Debug(DbgInfo, "Sending HTTP URL '%s'; req %#v\n", req.URL.String(), req) + info := new(Info) + resp, err := s.client.Do(req, &info, ExitWithSuccessOnTimeout) + if err != nil { + Debug(DbgError, "s.client.Do() error - HTTP req %s; error '%s'\n", req.URL.String(), err) + return nil, nil, err + } + + return info, resp, nil +} diff --git a/vendor/github.com/apache/openwhisk-client-go/whisk/namespace.go b/vendor/github.com/apache/openwhisk-client-go/whisk/namespace.go new file mode 100644 index 00000000000..cad09c81af6 --- /dev/null +++ b/vendor/github.com/apache/openwhisk-client-go/whisk/namespace.go @@ -0,0 +1,96 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package whisk + +import ( + "errors" + "fmt" + "github.com/apache/openwhisk-client-go/wski18n" + "net/http" + "strings" +) + +type Namespace struct { + Name string `json:"name"` +} + +type NamespaceService struct { + client *Client +} + +// Compare(sortable) compares namespace to sortable for the purpose of sorting. +// REQUIRED: sortable must also be of type Namespace. +// ***Method of type Sortable*** +func (namespace Namespace) Compare(sortable Sortable) bool { + // Sorts alphabetically + namespaceToCompare := sortable.(Namespace) + var namespaceString string + var compareString string + + namespaceString = strings.ToLower(namespace.Name) + compareString = strings.ToLower(namespaceToCompare.Name) + + return namespaceString < compareString +} + +// ToHeaderString() returns the header for a list of namespaces +func (namespace Namespace) ToHeaderString() string { + return fmt.Sprintf("%s\n", "namespaces") +} + +// ToSummaryRowString() returns a compound string of required parameters for printing +// from CLI command `wsk namespace list`. +// ***Method of type Sortable*** +func (namespace Namespace) ToSummaryRowString() string { + return fmt.Sprintf("%s\n", namespace.Name) +} + +// get a list of available namespaces +func (s *NamespaceService) List() ([]Namespace, *http.Response, error) { + // make a request to c.BaseURL / namespaces + + // Create the request against the namespaces resource + s.client.Config.Namespace = "" + route := "" + req, err := s.client.NewRequest("GET", route, nil, IncludeNamespaceInUrl) + if err != nil { + Debug(DbgError, "s.client.NewRequest(GET) error: %s\n", err) + errStr := wski18n.T("Unable to create HTTP request for GET: {{.err}}", + map[string]interface{}{"err": err}) + werr := MakeWskErrorFromWskError(errors.New(errStr), err, EXIT_CODE_ERR_GENERAL, DISPLAY_MSG, NO_DISPLAY_USAGE) + return nil, nil, werr + } + + var namespaceNames []string + resp, err := s.client.Do(req, &namespaceNames, ExitWithSuccessOnTimeout) + if err != nil { + Debug(DbgError, "s.client.Do() error - HTTP req %s; error '%s'\n", req.URL.String(), err) + return nil, resp, err + } + + var namespaces []Namespace + for _, nsName := range namespaceNames { + ns := Namespace{ + Name: nsName, + } + namespaces = append(namespaces, ns) + } + + Debug(DbgInfo, "Returning []namespaces: %#v\n", namespaces) + return namespaces, resp, nil +} diff --git a/vendor/github.com/apache/openwhisk-client-go/whisk/package.go b/vendor/github.com/apache/openwhisk-client-go/whisk/package.go new file mode 100644 index 00000000000..a1381261400 --- /dev/null +++ b/vendor/github.com/apache/openwhisk-client-go/whisk/package.go @@ -0,0 +1,253 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package whisk + +import ( + "errors" + "fmt" + "github.com/apache/openwhisk-client-go/wski18n" + "net/http" + "net/url" + "strings" +) + +type PackageService struct { + client *Client +} + +type PackageInterface interface { + GetName() string +} + +// Use this struct to represent the package/binding sent from the Whisk server +// Binding is a bool ???MWD20160602 now seeing Binding as a struct??? +type Package struct { + Namespace string `json:"namespace,omitempty"` + Name string `json:"name,omitempty"` + Version string `json:"version,omitempty"` + Publish *bool `json:"publish,omitempty"` + Annotations KeyValueArr `json:"annotations,omitempty"` + Parameters KeyValueArr `json:"parameters,omitempty"` + Binding *Binding `json:"binding,omitempty"` + Actions []Action `json:"actions,omitempty"` + Feeds []Action `json:"feeds,omitempty"` + Updated int64 `json:"updated,omitempty"` +} + +func (p *Package) GetName() string { + return p.Name +} + +// Use this struct when creating a binding +// Publish is NOT optional; Binding is a namespace/name object, not a bool +type BindingPackage struct { + Namespace string `json:"-"` + Name string `json:"-"` + Version string `json:"version,omitempty"` + Publish *bool `json:"publish,omitempty"` + Annotations KeyValueArr `json:"annotations,omitempty"` + Parameters KeyValueArr `json:"parameters,omitempty"` + Binding `json:"binding"` +} + +func (p *BindingPackage) GetName() string { + return p.Name +} + +type Binding struct { + Namespace string `json:"namespace,omitempty"` + Name string `json:"name,omitempty"` +} + +type BindingUpdates struct { + Added []string `json:"added,omitempty"` + Updated []string `json:"updated,omitempty"` + Deleted []string `json:"deleted,omitempty"` +} + +type PackageListOptions struct { + Public bool `url:"public,omitempty"` + Limit int `url:"limit"` + Skip int `url:"skip"` + Since int `url:"since,omitempty"` + Docs bool `url:"docs,omitempty"` +} + +// Compare(sortable) compares xPackage to sortable for the purpose of sorting. +// REQUIRED: sortable must also be of type Package. +// ***Method of type Sortable*** +func (xPackage Package) Compare(sortable Sortable) bool { + // Sorts alphabetically by NAMESPACE -> PACKAGE_NAME + packageToCompare := sortable.(Package) + + var packageString string + var compareString string + + packageString = strings.ToLower(fmt.Sprintf("%s%s", xPackage.Namespace, + xPackage.Name)) + compareString = strings.ToLower(fmt.Sprintf("%s%s", packageToCompare.Namespace, + packageToCompare.Name)) + + return packageString < compareString +} + +// ToHeaderString() returns the header for a list of actions +func (pkg Package) ToHeaderString() string { + return fmt.Sprintf("%s\n", "packages") +} + +// ToSummaryRowString() returns a compound string of required parameters for printing +// from CLI command `wsk package list`. +// ***Method of type Sortable*** +func (xPackage Package) ToSummaryRowString() string { + publishState := wski18n.T("private") + + if xPackage.Publish != nil && *xPackage.Publish { + publishState = wski18n.T("shared") + } + + return fmt.Sprintf("%-70s %s\n", fmt.Sprintf("/%s/%s", xPackage.Namespace, + xPackage.Name), publishState) +} + +func (s *PackageService) List(options *PackageListOptions) ([]Package, *http.Response, error) { + route := fmt.Sprintf("packages") + routeUrl, err := addRouteOptions(route, options) + if err != nil { + Debug(DbgError, "addRouteOptions(%s, %#v) error: '%s'\n", route, options, err) + errStr := wski18n.T("Unable to build request URL: {{.err}}", map[string]interface{}{"err": err}) + werr := MakeWskErrorFromWskError(errors.New(errStr), err, EXIT_CODE_ERR_GENERAL, DISPLAY_MSG, NO_DISPLAY_USAGE) + return nil, nil, werr + } + + req, err := s.client.NewRequestUrl("GET", routeUrl, nil, IncludeNamespaceInUrl, AppendOpenWhiskPathPrefix, EncodeBodyAsJson, AuthRequired) + if err != nil { + Debug(DbgError, "http.NewRequestUrl(GET, %s, nil, IncludeNamespaceInUrl, AppendOpenWhiskPathPrefix, EncodeBodyAsJson, AuthRequired); error: '%s'\n", route, err) + errStr := wski18n.T("Unable to create GET HTTP request for '{{.route}}': {{.err}}", + map[string]interface{}{"route": route, "err": err}) + werr := MakeWskErrorFromWskError(errors.New(errStr), err, EXIT_CODE_ERR_GENERAL, DISPLAY_MSG, NO_DISPLAY_USAGE) + return nil, nil, werr + } + + var packages []Package + resp, err := s.client.Do(req, &packages, ExitWithSuccessOnTimeout) + if err != nil { + Debug(DbgError, "s.client.Do() error - HTTP req %s; error '%s'\n", req.URL.String(), err) + return nil, resp, err + } + + return packages, resp, err + +} + +func (s *PackageService) Get(packageName string) (*Package, *http.Response, error) { + // Encode resource name as a path (with no query params) before inserting it into the URI + // This way any '?' chars in the name won't be treated as the beginning of the query params + packageName = (&url.URL{Path: packageName}).String() + route := fmt.Sprintf("packages/%s", packageName) + + req, err := s.client.NewRequest("GET", route, nil, IncludeNamespaceInUrl) + if err != nil { + Debug(DbgError, "http.NewRequest(GET, %s); error: '%s'\n", route, err) + errStr := wski18n.T("Unable to create GET HTTP request for '{{.route}}': {{.err}}", + map[string]interface{}{"route": route, "err": err}) + werr := MakeWskErrorFromWskError(errors.New(errStr), err, EXIT_CODE_ERR_GENERAL, DISPLAY_MSG, NO_DISPLAY_USAGE) + return nil, nil, werr + } + + p := new(Package) + resp, err := s.client.Do(req, &p, ExitWithSuccessOnTimeout) + if err != nil { + Debug(DbgError, "s.client.Do() error - HTTP req %s; error: '%s'\n", req.URL.String(), err) + return nil, resp, err + } + + return p, resp, nil + +} + +func (s *PackageService) Insert(x_package PackageInterface, overwrite bool) (*Package, *http.Response, error) { + // Encode resource name as a path (with no query params) before inserting it into the URI + // This way any '?' chars in the name won't be treated as the beginning of the query params + packageName := (&url.URL{Path: x_package.GetName()}).String() + route := fmt.Sprintf("packages/%s?overwrite=%t", packageName, overwrite) + + req, err := s.client.NewRequest("PUT", route, x_package, IncludeNamespaceInUrl) + if err != nil { + Debug(DbgError, "http.NewRequest(PUT, %s); error: '%s'\n", route, err) + errStr := wski18n.T("Unable to create PUT HTTP request for '{{.route}}': {{.err}}", + map[string]interface{}{"route": route, "err": err}) + werr := MakeWskErrorFromWskError(errors.New(errStr), err, EXIT_CODE_ERR_GENERAL, DISPLAY_MSG, NO_DISPLAY_USAGE) + return nil, nil, werr + } + + p := new(Package) + resp, err := s.client.Do(req, &p, ExitWithSuccessOnTimeout) + if err != nil { + Debug(DbgError, "s.client.Do() error - HTTP req %s; error: '%s'\n", req.URL.String(), err) + return nil, resp, err + } + + return p, resp, nil +} + +func (s *PackageService) Delete(packageName string) (*http.Response, error) { + // Encode resource name as a path (with no query params) before inserting it into the URI + // This way any '?' chars in the name won't be treated as the beginning of the query params + packageName = (&url.URL{Path: packageName}).String() + route := fmt.Sprintf("packages/%s", packageName) + + req, err := s.client.NewRequest("DELETE", route, nil, IncludeNamespaceInUrl) + if err != nil { + Debug(DbgError, "http.NewRequest(DELETE, %s); error: '%s'\n", route, err) + errStr := wski18n.T("Unable to create DELETE HTTP request for '{{.route}}': {{.err}}", + map[string]interface{}{"route": route, "err": err}) + werr := MakeWskErrorFromWskError(errors.New(errStr), err, EXIT_CODE_ERR_GENERAL, DISPLAY_MSG, NO_DISPLAY_USAGE) + return nil, werr + } + + resp, err := s.client.Do(req, nil, ExitWithSuccessOnTimeout) + if err != nil { + Debug(DbgError, "s.client.Do() error - HTTP req %s; error: '%s'\n", req.URL.String(), err) + return resp, err + } + + return resp, nil +} + +func (s *PackageService) Refresh() (*BindingUpdates, *http.Response, error) { + route := "packages/refresh" + + req, err := s.client.NewRequest("POST", route, nil, IncludeNamespaceInUrl) + if err != nil { + Debug(DbgError, "http.NewRequest(POST, %s); error: '%s'\n", route, err) + errStr := wski18n.T("Unable to create POST HTTP request for '{{.route}}': {{.err}}", + map[string]interface{}{"route": route, "err": err}) + werr := MakeWskErrorFromWskError(errors.New(errStr), err, EXIT_CODE_ERR_GENERAL, DISPLAY_MSG, NO_DISPLAY_USAGE) + return nil, nil, werr + } + + updates := &BindingUpdates{} + resp, err := s.client.Do(req, updates, ExitWithSuccessOnTimeout) + if err != nil { + Debug(DbgError, "s.client.Do() error - HTTP req %s; error: '%s'\n", req.URL.String(), err) + return nil, resp, err + } + + return updates, resp, nil +} diff --git a/vendor/github.com/apache/openwhisk-client-go/whisk/rule.go b/vendor/github.com/apache/openwhisk-client-go/whisk/rule.go new file mode 100644 index 00000000000..e52d41f559e --- /dev/null +++ b/vendor/github.com/apache/openwhisk-client-go/whisk/rule.go @@ -0,0 +1,228 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package whisk + +import ( + "errors" + "fmt" + "github.com/apache/openwhisk-client-go/wski18n" + "net/http" + "net/url" + "strings" +) + +type RuleService struct { + client *Client +} + +type Rule struct { + Namespace string `json:"namespace,omitempty"` + Name string `json:"name,omitempty"` + Version string `json:"version,omitempty"` + Annotations KeyValueArr `json:"annotations,omitempty"` + Status string `json:"status"` + Trigger interface{} `json:"trigger"` + Action interface{} `json:"action"` + Publish *bool `json:"publish,omitempty"` + Updated int64 `json:"updated,omitempty"` +} + +type RuleListOptions struct { + Limit int `url:"limit"` + Skip int `url:"skip"` + Docs bool `url:"docs,omitempty"` +} + +// Compare(sortable) compares rule to sortable for the purpose of sorting. +// REQUIRED: sortable must also be of type Rule. +// ***Method of type Sortable*** +func (rule Rule) Compare(sortable Sortable) bool { + // Sorts alphabetically by NAMESPACE -> PACKAGE_NAME + ruleToCompare := sortable.(Rule) + var ruleString string + var compareString string + + ruleString = strings.ToLower(fmt.Sprintf("%s%s", rule.Namespace, rule.Name)) + compareString = strings.ToLower(fmt.Sprintf("%s%s", ruleToCompare.Namespace, + ruleToCompare.Name)) + + return ruleString < compareString +} + +// ToHeaderString() returns the header for a list of rules +func (rule Rule) ToHeaderString() string { + return fmt.Sprintf("%s\n", "rules") +} + +// ToSummaryRowString() returns a compound string of required parameters for printing +// from CLI command `wsk rule list`. +// ***Method of type Sortable*** +func (rule Rule) ToSummaryRowString() string { + publishState := wski18n.T("private") + + return fmt.Sprintf("%-70s %-20s %s\n", fmt.Sprintf("/%s/%s", rule.Namespace, + rule.Name), publishState, rule.Status) +} + +func (s *RuleService) List(options *RuleListOptions) ([]Rule, *http.Response, error) { + route := "rules" + routeUrl, err := addRouteOptions(route, options) + if err != nil { + Debug(DbgError, "addRouteOptions(%s, %#v) error: '%s'\n", route, options, err) + errStr := wski18n.T("Unable to append options '{{.options}}' to URL route '{{.route}}': {{.err}}", + map[string]interface{}{"options": fmt.Sprintf("%#v", options), "route": route, "err": err}) + werr := MakeWskError(errors.New(errStr), EXIT_CODE_ERR_GENERAL, DISPLAY_MSG, NO_DISPLAY_USAGE) + return nil, nil, werr + } + + req, err := s.client.NewRequestUrl("GET", routeUrl, nil, IncludeNamespaceInUrl, AppendOpenWhiskPathPrefix, EncodeBodyAsJson, AuthRequired) + if err != nil { + Debug(DbgError, "http.NewRequestUrl(GET, %s, nil, IncludeNamespaceInUrl, AppendOpenWhiskPathPrefix, EncodeBodyAsJson, AuthRequired); error: '%s'\n", route, err) + errStr := wski18n.T("Unable to create HTTP request for GET '{{.route}}': {{.err}}", + map[string]interface{}{"route": route, "err": err}) + werr := MakeWskErrorFromWskError(errors.New(errStr), err, EXIT_CODE_ERR_GENERAL, DISPLAY_MSG, NO_DISPLAY_USAGE) + return nil, nil, werr + } + + var rules []Rule + resp, err := s.client.Do(req, &rules, ExitWithSuccessOnTimeout) + if err != nil { + Debug(DbgError, "s.client.Do() error - HTTP req %s; error: '%s'\n", req.URL.String(), err) + return nil, resp, err + } + + return rules, resp, err +} + +func (s *RuleService) Insert(rule *Rule, overwrite bool) (*Rule, *http.Response, error) { + // Encode resource name as a path (with no query params) before inserting it into the URI + // This way any '?' chars in the name won't be treated as the beginning of the query params + ruleName := (&url.URL{Path: rule.Name}).String() + route := fmt.Sprintf("rules/%s?overwrite=%t", ruleName, overwrite) + + routeUrl, err := url.Parse(route) + if err != nil { + Debug(DbgError, "url.Parse(%s) error: %s\n", route, err) + errStr := wski18n.T("Invalid request URL '{{.url}}': {{.err}}", + map[string]interface{}{"url": route, "err": err}) + werr := MakeWskError(errors.New(errStr), EXIT_CODE_ERR_GENERAL, DISPLAY_MSG, NO_DISPLAY_USAGE) + return nil, nil, werr + } + + req, err := s.client.NewRequestUrl("PUT", routeUrl, rule, IncludeNamespaceInUrl, AppendOpenWhiskPathPrefix, EncodeBodyAsJson, AuthRequired) + if err != nil { + Debug(DbgError, "http.NewRequestUrl(PUT, %s, %+v, IncludeNamespaceInUrl, AppendOpenWhiskPathPrefix, EncodeBodyAsJson, AuthRequired); error: '%s'\n", routeUrl, rule, err) + errStr := wski18n.T("Unable to create HTTP request for PUT '{{.route}}': {{.err}}", + map[string]interface{}{"route": routeUrl, "err": err}) + werr := MakeWskErrorFromWskError(errors.New(errStr), err, EXIT_CODE_ERR_GENERAL, DISPLAY_MSG, NO_DISPLAY_USAGE) + return nil, nil, werr + } + + r := new(Rule) + resp, err := s.client.Do(req, &r, ExitWithSuccessOnTimeout) + if err != nil { + Debug(DbgError, "s.client.Do() error - HTTP req %s; error: '%s'\n", req.URL.String(), err) + return nil, resp, err + } + + return r, resp, nil +} + +func (s *RuleService) Get(ruleName string) (*Rule, *http.Response, error) { + // Encode resource name as a path (with no query params) before inserting it into the URI + // This way any '?' chars in the name won't be treated as the beginning of the query params + ruleName = (&url.URL{Path: ruleName}).String() + route := fmt.Sprintf("rules/%s", ruleName) + + req, err := s.client.NewRequest("GET", route, nil, IncludeNamespaceInUrl) + if err != nil { + Debug(DbgError, "http.NewRequest(GET, %s); error: '%s'\n", route, err) + errStr := wski18n.T("Unable to create HTTP request for GET '{{.route}}': {{.err}}", + map[string]interface{}{"route": route, "err": err}) + werr := MakeWskErrorFromWskError(errors.New(errStr), err, EXIT_CODE_ERR_GENERAL, DISPLAY_MSG, NO_DISPLAY_USAGE) + return nil, nil, werr + } + + r := new(Rule) + resp, err := s.client.Do(req, &r, ExitWithSuccessOnTimeout) + if err != nil { + Debug(DbgError, "s.client.Do() error - HTTP req %s; error: '%s'\n", req.URL.String(), err) + return nil, resp, err + } + + return r, resp, nil +} + +func (s *RuleService) Delete(ruleName string) (*http.Response, error) { + // Encode resource name as a path (with no query params) before inserting it into the URI + // This way any '?' chars in the name won't be treated as the beginning of the query params + ruleName = (&url.URL{Path: ruleName}).String() + route := fmt.Sprintf("rules/%s", ruleName) + + req, err := s.client.NewRequest("DELETE", route, nil, IncludeNamespaceInUrl) + if err != nil { + Debug(DbgError, "http.NewRequest(DELETE, %s); error: '%s'\n", route, err) + errStr := wski18n.T("Unable to create HTTP request for DELETE '{{.route}}': {{.err}}", + map[string]interface{}{"route": route, "err": err}) + werr := MakeWskErrorFromWskError(errors.New(errStr), err, EXIT_CODE_ERR_GENERAL, DISPLAY_MSG, NO_DISPLAY_USAGE) + return nil, werr + } + + resp, err := s.client.Do(req, nil, ExitWithSuccessOnTimeout) + if err != nil { + Debug(DbgError, "s.client.Do() error - HTTP req %s; error: '%s'\n", req.URL.String(), err) + return resp, err + } + + return resp, nil +} + +func (s *RuleService) SetState(ruleName string, state string) (*Rule, *http.Response, error) { + state = strings.ToLower(state) + if state != "active" && state != "inactive" { + errStr := wski18n.T("Internal error. Invalid state option '{{.state}}'. Valid options are \"active\" and \"inactive\".", + map[string]interface{}{"state": state}) + werr := MakeWskError(errors.New(errStr), EXIT_CODE_ERR_GENERAL, DISPLAY_MSG, DISPLAY_USAGE) + return nil, nil, werr + } + + // Encode resource name as a path (with no query params) before inserting it into the URI + // This way any '?' chars in the name won't be treated as the beginning of the query params + ruleName = (&url.URL{Path: ruleName}).String() + route := fmt.Sprintf("rules/%s", ruleName) + + ruleState := &Rule{Status: state} + + req, err := s.client.NewRequest("POST", route, ruleState, IncludeNamespaceInUrl) + if err != nil { + Debug(DbgError, "http.NewRequest(POST, %s); error: '%s'\n", route, err) + errStr := wski18n.T("Unable to create HTTP request for POST '{{.route}}': {{.err}}", + map[string]interface{}{"route": route, "err": err}) + werr := MakeWskErrorFromWskError(errors.New(errStr), err, EXIT_CODE_ERR_GENERAL, DISPLAY_MSG, NO_DISPLAY_USAGE) + return nil, nil, werr + } + + r := new(Rule) + resp, err := s.client.Do(req, &r, ExitWithSuccessOnTimeout) + if err != nil { + Debug(DbgError, "s.client.Do() error - HTTP req %s; error: '%s'\n", req.URL.String(), err) + return nil, resp, err + } + + return r, resp, nil +} diff --git a/vendor/github.com/apache/openwhisk-client-go/whisk/sdk.go b/vendor/github.com/apache/openwhisk-client-go/whisk/sdk.go new file mode 100644 index 00000000000..4d735de1102 --- /dev/null +++ b/vendor/github.com/apache/openwhisk-client-go/whisk/sdk.go @@ -0,0 +1,78 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package whisk + +import ( + "errors" + "fmt" + "github.com/apache/openwhisk-client-go/wski18n" + "net/http" +) + +type SdkService struct { + client *Client +} + +// Structure for SDK request responses +type Sdk struct { + // TODO :: Add SDK fields +} + +type SdkRequest struct { + // TODO :: Add SDK +} + +// Install artifact {component = docker || swift || iOS} +func (s *SdkService) Install(relFileUrl string) (*http.Response, error) { + baseURL := s.client.Config.BaseURL + // Remove everything but the scheme, host, and port + baseURL.Path, baseURL.RawQuery, baseURL.Fragment = "", "", "" + + urlStr := fmt.Sprintf("%s/%s", baseURL, relFileUrl) + + req, err := http.NewRequest("GET", urlStr, nil) + if err != nil { + Debug(DbgError, "http.NewRequest(GET, %s, nil) error: %s\n", urlStr, err) + errStr := wski18n.T("Unable to create HTTP request for GET '{{.url}}': {{.err}}", + map[string]interface{}{"url": urlStr, "err": err}) + werr := MakeWskError(errors.New(errStr), EXIT_CODE_ERR_GENERAL, DISPLAY_MSG, NO_DISPLAY_USAGE) + return nil, werr + } + + if IsVerbose() { + fmt.Println("REQUEST:") + fmt.Printf("[%s]\t%s\n", req.Method, req.URL) + if len(req.Header) > 0 { + fmt.Println("Req Headers") + PrintJSON(req.Header) + } + if req.Body != nil { + fmt.Println("Req Body") + fmt.Println(req.Body) + } + } + + // Directly use the HTTP client, not the Whisk CLI client, so that the response body is left alone + resp, err := s.client.client.Do(req) + if err != nil { + Debug(DbgError, "s.client.Do() error - HTTP req %s; error: '%s'\n", req.URL.String(), err) + return resp, err + } + + return resp, nil +} diff --git a/vendor/github.com/apache/openwhisk-client-go/whisk/shared.go b/vendor/github.com/apache/openwhisk-client-go/whisk/shared.go new file mode 100644 index 00000000000..954b2a4542b --- /dev/null +++ b/vendor/github.com/apache/openwhisk-client-go/whisk/shared.go @@ -0,0 +1,101 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package whisk + +import ( + "encoding/json" + "strings" +) + +type KeyValue struct { + Key string `json:"key"` + Value interface{} `json:"value"` +} + +type KeyValueArr []KeyValue + +/* +Retrieves a value associated with a given key from a KeyValueArr. A key of type string must be passed to the method. +An interface will be returned containing the found value. If a key could not be found, a nil value will be returned. +*/ +func (keyValueArr KeyValueArr) GetValue(key string) (res interface{}) { + for i := 0; i < len(keyValueArr); i++ { + if keyValueArr[i].Key == key { + res = keyValueArr[i].Value + break + } + } + + Debug(DbgInfo, "Got value '%v' for key '%s' from '%v'\n", res, key, keyValueArr) + + return res +} + +func (keyValueArr KeyValueArr) FindKeyValue(key string) int { + for i := 0; i < len(keyValueArr); i++ { + if strings.ToLower(keyValueArr[i].Key) == strings.ToLower(key) { + return i + } + } + + return -1 +} + +/* + * Adds the specified KeyValue to the key value array. If the KeyValue's key + * is already in the array, that entry is updated with the KeyValue's value. + * + * Returns a new key value array with the update + */ +func (keyValueArr KeyValueArr) AddOrReplace(kv *KeyValue) KeyValueArr { + var replaced = false + for i := 0; i < len(keyValueArr); i++ { + if strings.ToLower(keyValueArr[i].Key) == strings.ToLower(kv.Key) { + keyValueArr[i].Value = kv.Value + replaced = true + } + } + if !replaced { + return append(keyValueArr, *kv) + } + return keyValueArr +} + +/* +Appends items from appKeyValueArr to keyValueArr if the appKeyValueArr item does not exist in keyValueArr. +*/ +func (keyValueArr KeyValueArr) AppendKeyValueArr(appKeyValueArr KeyValueArr) KeyValueArr { + for i := 0; i < len(appKeyValueArr); i++ { + if KeyValueArr.FindKeyValue(keyValueArr, appKeyValueArr[i].Key) == -1 { + keyValueArr = append(keyValueArr, appKeyValueArr[i]) + } + } + + return keyValueArr +} + +type Annotations []map[string]interface{} + +type Parameters *json.RawMessage + +type Limits struct { + Timeout *int `json:"timeout,omitempty"` + Memory *int `json:"memory,omitempty"` + Logsize *int `json:"logs,omitempty"` + Concurrency *int `json:"concurrency,omitempty"` +} diff --git a/vendor/github.com/apache/openwhisk-client-go/whisk/start.go b/vendor/github.com/apache/openwhisk-client-go/whisk/start.go new file mode 100644 index 00000000000..c5f29a80bf3 --- /dev/null +++ b/vendor/github.com/apache/openwhisk-client-go/whisk/start.go @@ -0,0 +1,79 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package whisk + +import ( + "bufio" + "encoding/json" + "fmt" + "io" + "log" + "os" +) + +// ActionFunction is the signature of an action in OpenWhisk +type ActionFunction func(event json.RawMessage) (json.RawMessage, error) + +// actual implementation of a read-eval-print-loop +func repl(fn ActionFunction, in io.Reader, out io.Writer) { + // read loop + reader := bufio.NewReader(in) + for { + event, err := reader.ReadBytes('\n') + if err != nil { + break + } + result, err := fn(event) + if err != nil { + fmt.Fprintf(out, "{ error: %q}\n", err.Error()) + continue + } + fmt.Fprintln(out, string(result)) + } +} + +// Start will start a loop reading in stdin and outputting in fd3 +// This is expected to be uses for implementing Go actions +func Start(fn ActionFunction) { + out := os.NewFile(3, "pipe") + defer out.Close() + repl(fn, os.Stdin, out) +} + +// StartWithArgs will execute the function for each arg +// If there are no args it will start a read-write loop on the function +// Expected to be used as starting point for implementing Go Actions +// as whisk.StartWithArgs(function, os.Args[:1]) +// if args are 2 (command and one parameter) it will invoke the function once +// otherwise it will stat the function in a read-write loop +func StartWithArgs(action ActionFunction, args []string) { + // handle command line argument + if len(args) > 0 { + for _, arg := range args { + log.Println(arg) + result, err := action([]byte(arg)) + if err == nil { + fmt.Println(string(result)) + } else { + log.Println(err) + } + } + return + } + Start(action) +} diff --git a/vendor/github.com/apache/openwhisk-client-go/whisk/trace.go b/vendor/github.com/apache/openwhisk-client-go/whisk/trace.go new file mode 100644 index 00000000000..86dc76dcbf1 --- /dev/null +++ b/vendor/github.com/apache/openwhisk-client-go/whisk/trace.go @@ -0,0 +1,96 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package whisk + +import ( + "fmt" + "os" + "runtime" + "strings" +) + +type DebugLevel string + +const ( + DbgInfo DebugLevel = "Inf" + DbgWarn DebugLevel = "Wrn" + DbgError DebugLevel = "Err" + DbgFatal DebugLevel = "Ftl" +) + +const MaxNameLen int = 25 + +var isVerbose bool +var isDebug bool + +func init() { + if len(os.Getenv("WSK_CLI_DEBUG")) > 0 { // Useful for tracing init() code, before parms are parsed + SetDebug(true) + } +} + +func SetDebug(b bool) { + isDebug = b +} + +func SetVerbose(b bool) { + isVerbose = b +} + +func IsVerbose() bool { + return isVerbose || isDebug +} +func IsDebug() bool { + return isDebug +} + +/* Function for tracing debug level messages to stdout + Output format: + [file-or-function-name]:line-#:[DebugLevel] The formated message without any appended \n +*/ +func Debug(dl DebugLevel, msgFormat string, args ...interface{}) { + if isDebug { + pc, file, line, _ := runtime.Caller(1) + fcn := runtime.FuncForPC(pc) + msg := fmt.Sprintf(msgFormat, args...) + fcnName := fcn.Name() + + // Cobra command Run/RunE functions are anonymous, so the function name is unfriendly; + // use the file name instead + if strings.Contains(fcnName, "commands.glob.") || strings.Contains(fcnName, "whisk.glob.") { + fcnName = file + } + + // Only interested in the the trailing function/file name characters + if len(fcnName) > MaxNameLen { + fcnName = fcnName[len(fcnName)-MaxNameLen:] + } + fmt.Printf("[%-25s]:%03d:[%3s] %v", fcnName, line, dl, msg) + } +} + +/* Function for tracing debug level messages to stdout + Output format: + [file-or-function-name]:line-#:[DebugLevel] The formatted message without any appended newline characters +*/ +func Verbose(msgFormat string, args ...interface{}) { + if IsVerbose() { + msg := fmt.Sprintf(msgFormat, args...) + fmt.Printf("%v", msg) + } +} diff --git a/vendor/github.com/apache/openwhisk-client-go/whisk/trigger.go b/vendor/github.com/apache/openwhisk-client-go/whisk/trigger.go new file mode 100644 index 00000000000..bda970dd20e --- /dev/null +++ b/vendor/github.com/apache/openwhisk-client-go/whisk/trigger.go @@ -0,0 +1,223 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package whisk + +import ( + "errors" + "fmt" + "github.com/apache/openwhisk-client-go/wski18n" + "net/http" + "net/url" + "strings" +) + +type TriggerService struct { + client ClientInterface +} + +type Trigger struct { + Namespace string `json:"namespace,omitempty"` + Name string `json:"name,omityempty"` + Version string `json:"version,omitempty"` + ActivationId string `json:"activationId,omitempty"` + Annotations KeyValueArr `json:"annotations,omitempty"` + Parameters KeyValueArr `json:"parameters,omitempty"` + Limits *Limits `json:"limits,omitempty"` + Publish *bool `json:"publish,omitempty"` + Rules map[string]interface{} `json:"rules,omitempty"` + Updated int64 `json:"updated,omitempty"` +} + +type TriggerListOptions struct { + Limit int `url:"limit"` + Skip int `url:"skip"` + Docs bool `url:"docs,omitempty"` +} + +// Compare(sortable) compares trigger to sortable for the purpose of sorting. +// REQUIRED: sortable must also be of type Trigger. +// ***Method of type Sortable*** +func (trigger Trigger) Compare(sortable Sortable) bool { + // Sorts alphabetically by NAMESPACE -> TRIGGER_NAME + triggerToCompare := sortable.(Trigger) + var triggerString string + var compareString string + + triggerString = strings.ToLower(fmt.Sprintf("%s%s", trigger.Namespace, + trigger.Name)) + compareString = strings.ToLower(fmt.Sprintf("%s%s", triggerToCompare.Namespace, + triggerToCompare.Name)) + + return triggerString < compareString +} + +// ToHeaderString() returns the header for a list of triggers +func (trigger Trigger) ToHeaderString() string { + return fmt.Sprintf("%s\n", "triggers") +} + +// ToSummaryRowString() returns a compound string of required parameters for printing +// from CLI command `wsk trigger list`. +// ***Method of type Sortable*** +func (trigger Trigger) ToSummaryRowString() string { + publishState := wski18n.T("private") + + return fmt.Sprintf("%-70s %s\n", fmt.Sprintf("/%s/%s", trigger.Namespace, + trigger.Name), publishState) +} + +func (s *TriggerService) List(options *TriggerListOptions) ([]Trigger, *http.Response, error) { + route := "triggers" + routeUrl, err := addRouteOptions(route, options) + if err != nil { + Debug(DbgError, "addRouteOptions(%s, %#v) error: '%s'\n", route, options, err) + errStr := wski18n.T("Unable to append options '{{.options}}' to URL route '{{.route}}': {{.err}}", + map[string]interface{}{"options": fmt.Sprintf("%#v", options), "route": route, "err": err}) + werr := MakeWskError(errors.New(errStr), EXIT_CODE_ERR_GENERAL, DISPLAY_MSG, NO_DISPLAY_USAGE) + return nil, nil, werr + } + + req, err := s.client.NewRequestUrl("GET", routeUrl, nil, IncludeNamespaceInUrl, AppendOpenWhiskPathPrefix, EncodeBodyAsJson, AuthRequired) + if err != nil { + Debug(DbgError, "http.NewRequestUrl(GET, %s, nil, IncludeNamespaceInUrl, AppendOpenWhiskPathPrefix, EncodeBodyAsJson, AuthRequired); error: '%s'\n", route, err) + errStr := wski18n.T("Unable to create HTTP request for GET '{{.route}}': {{.err}}", + map[string]interface{}{"route": route, "err": err}) + werr := MakeWskErrorFromWskError(errors.New(errStr), err, EXIT_CODE_ERR_GENERAL, DISPLAY_MSG, NO_DISPLAY_USAGE) + return nil, nil, werr + } + + var triggers []Trigger + resp, err := s.client.Do(req, &triggers, ExitWithSuccessOnTimeout) + if err != nil { + Debug(DbgError, "s.client.Do() error - HTTP req %s; error: '%s'\n", req.URL.String(), err) + return nil, resp, err + } + + return triggers, resp, nil +} + +func (s *TriggerService) Insert(trigger *Trigger, overwrite bool) (*Trigger, *http.Response, error) { + // Encode resource name as a path (with no query params) before inserting it into the URI + // This way any '?' chars in the name won't be treated as the beginning of the query params + triggerName := (&url.URL{Path: trigger.Name}).String() + route := fmt.Sprintf("triggers/%s?overwrite=%t", triggerName, overwrite) + + routeUrl, err := url.Parse(route) + if err != nil { + Debug(DbgError, "url.Parse(%s) error: %s\n", route, err) + errStr := wski18n.T("Invalid request URL '{{.url}}': {{.err}}", + map[string]interface{}{"url": route, "err": err}) + werr := MakeWskError(errors.New(errStr), EXIT_CODE_ERR_GENERAL, DISPLAY_MSG, NO_DISPLAY_USAGE) + return nil, nil, werr + } + + req, err := s.client.NewRequestUrl("PUT", routeUrl, trigger, IncludeNamespaceInUrl, AppendOpenWhiskPathPrefix, EncodeBodyAsJson, AuthRequired) + if err != nil { + Debug(DbgError, "http.NewRequestUrl(PUT, %s, %+v, IncludeNamespaceInUrl, AppendOpenWhiskPathPrefix, EncodeBodyAsJson, AuthRequired); error: '%s'\n", routeUrl, trigger, err) + errStr := wski18n.T("Unable to create HTTP request for PUT '{{.route}}': {{.err}}", + map[string]interface{}{"route": routeUrl, "err": err}) + werr := MakeWskErrorFromWskError(errors.New(errStr), err, EXIT_CODE_ERR_GENERAL, DISPLAY_MSG, NO_DISPLAY_USAGE) + return nil, nil, werr + } + + t := new(Trigger) + resp, err := s.client.Do(req, &t, ExitWithSuccessOnTimeout) + if err != nil { + Debug(DbgError, "s.client.Do() error - HTTP req %s; error: '%s'\n", req.URL.String(), err) + return nil, resp, err + } + + return t, resp, nil + +} + +func (s *TriggerService) Get(triggerName string) (*Trigger, *http.Response, error) { + // Encode resource name as a path (with no query params) before inserting it into the URI + // This way any '?' chars in the name won't be treated as the beginning of the query params + triggerName = (&url.URL{Path: triggerName}).String() + route := fmt.Sprintf("triggers/%s", triggerName) + + req, err := s.client.NewRequest("GET", route, nil, IncludeNamespaceInUrl) + if err != nil { + Debug(DbgError, "http.NewRequest(GET, %s); error: '%s'\n", route, err) + errStr := wski18n.T("Unable to create HTTP request for GET '{{.route}}': {{.err}}", + map[string]interface{}{"route": route, "err": err}) + werr := MakeWskErrorFromWskError(errors.New(errStr), err, EXIT_CODE_ERR_GENERAL, DISPLAY_MSG, NO_DISPLAY_USAGE) + return nil, nil, werr + } + + t := new(Trigger) + resp, err := s.client.Do(req, &t, ExitWithSuccessOnTimeout) + if err != nil { + Debug(DbgError, "s.client.Do() error - HTTP req %s; error: '%s'\n", req.URL.String(), err) + return nil, resp, err + } + + return t, resp, nil + +} + +func (s *TriggerService) Delete(triggerName string) (*Trigger, *http.Response, error) { + // Encode resource name as a path (with no query params) before inserting it into the URI + // This way any '?' chars in the name won't be treated as the beginning of the query params + triggerName = (&url.URL{Path: triggerName}).String() + route := fmt.Sprintf("triggers/%s", triggerName) + + req, err := s.client.NewRequest("DELETE", route, nil, IncludeNamespaceInUrl) + if err != nil { + Debug(DbgError, "http.NewRequest(DELETE, %s); error: '%s'\n", route, err) + errStr := wski18n.T("Unable to create HTTP request for DELETE '{{.route}}': {{.err}}", + map[string]interface{}{"route": route, "err": err}) + werr := MakeWskErrorFromWskError(errors.New(errStr), err, EXIT_CODE_ERR_GENERAL, DISPLAY_MSG, NO_DISPLAY_USAGE) + return nil, nil, werr + } + + t := new(Trigger) + resp, err := s.client.Do(req, &t, ExitWithSuccessOnTimeout) + if err != nil { + Debug(DbgError, "s.client.Do() error - HTTP req %s; error: '%s'\n", req.URL.String(), err) + return nil, resp, err + } + + return t, resp, nil +} + +func (s *TriggerService) Fire(triggerName string, payload interface{}) (*Trigger, *http.Response, error) { + // Encode resource name as a path (with no query params) before inserting it into the URI + // This way any '?' chars in the name won't be treated as the beginning of the query params + triggerName = (&url.URL{Path: triggerName}).String() + route := fmt.Sprintf("triggers/%s", triggerName) + + req, err := s.client.NewRequest("POST", route, payload, IncludeNamespaceInUrl) + if err != nil { + Debug(DbgError, " http.NewRequest(POST, %s); error: '%s'\n", route, err) + errStr := wski18n.T("Unable to create HTTP request for POST '{{.route}}': {{.err}}", + map[string]interface{}{"route": route, "err": err}) + werr := MakeWskErrorFromWskError(errors.New(errStr), err, EXIT_CODE_ERR_GENERAL, DISPLAY_MSG, NO_DISPLAY_USAGE) + return nil, nil, werr + } + + t := new(Trigger) + resp, err := s.client.Do(req, &t, ExitWithSuccessOnTimeout) + if err != nil { + Debug(DbgError, "s.client.Do() error - HTTP req %s; error: '%s'\n", req.URL.String(), err) + return nil, resp, err + } + + return t, resp, nil +} diff --git a/vendor/github.com/apache/openwhisk-client-go/whisk/util.go b/vendor/github.com/apache/openwhisk-client-go/whisk/util.go new file mode 100644 index 00000000000..683c6803b47 --- /dev/null +++ b/vendor/github.com/apache/openwhisk-client-go/whisk/util.go @@ -0,0 +1,106 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package whisk + +import ( + "errors" + "fmt" + "net/url" + "reflect" + "strings" + + "github.com/apache/openwhisk-client-go/wski18n" + "github.com/fatih/color" + "github.com/google/go-querystring/query" + "github.com/hokaccha/go-prettyjson" +) + +// Sortable items are anything that needs to be sorted for listing purposes. +type Sortable interface { + // Compare(sortable) compares an two sortables and returns true + // if the item calling the Compare method is less than toBeCompared. + // Sorts alphabetically by default, can have other parameters to sort by + // passed by sortByName. + Compare(toBeCompared Sortable) bool +} + +// Printable items are anything that need to be printed for listing purposes. +type Printable interface { + ToHeaderString() string // Prints header information of a Printable + ToSummaryRowString() string // Prints summary info of one Printable +} + +// addOptions adds the parameters in opt as URL query parameters to s. opt +// must be a struct whose fields may contain "url" tags. +func addRouteOptions(route string, options interface{}) (*url.URL, error) { + Debug(DbgInfo, "Adding options %+v to route '%s'\n", options, route) + u, err := url.Parse(route) + if err != nil { + Debug(DbgError, "url.Parse(%s) error: %s\n", route, err) + errStr := wski18n.T("Unable to parse URL '{{.route}}': {{.err}}", + map[string]interface{}{"route": route, "err": err}) + werr := MakeWskError(errors.New(errStr), EXIT_CODE_ERR_GENERAL, DISPLAY_MSG, NO_DISPLAY_USAGE) + return nil, werr + } + + v := reflect.ValueOf(options) + if v.Kind() == reflect.Ptr && v.IsNil() { + return u, nil + } + + qs, err := query.Values(options) + if err != nil { + Debug(DbgError, "query.Values(%#v) error: %s\n", options, err) + errStr := wski18n.T("Unable to process URL query options '{{.options}}': {{.err}}", + map[string]interface{}{"options": fmt.Sprintf("%#v", options), "err": err}) + werr := MakeWskError(errors.New(errStr), EXIT_CODE_ERR_GENERAL, DISPLAY_MSG, NO_DISPLAY_USAGE) + return nil, werr + } + + u.RawQuery = qs.Encode() + Debug(DbgInfo, "Returning route options '%s' from input struct %+v\n", u.String(), options) + return u, nil +} + +func PrintJSON(v interface{}) { + output, _ := prettyjson.Marshal(v) + fmt.Fprintln(color.Output, string(output)) +} + +func GetURLBase(host string, path string) (*url.URL, error) { + if len(host) == 0 { + errMsg := wski18n.T("An API host must be provided.\n") + whiskErr := MakeWskError(errors.New(errMsg), EXIT_CODE_ERR_GENERAL, + DISPLAY_MSG, DISPLAY_USAGE) + return nil, whiskErr + } + + if !strings.HasPrefix(host, "http") { + host = "https://" + host + } + + urlBase := fmt.Sprintf("%s%s", host, path) + url, err := url.Parse(urlBase) + + if len(url.Scheme) == 0 || len(url.Host) == 0 { + urlBase = fmt.Sprintf("https://%s%s", host, path) + url, err = url.Parse(urlBase) + } + + return url, err +} diff --git a/vendor/github.com/apache/openwhisk-client-go/whisk/wskerror.go b/vendor/github.com/apache/openwhisk-client-go/whisk/wskerror.go new file mode 100644 index 00000000000..f88fdaef9ed --- /dev/null +++ b/vendor/github.com/apache/openwhisk-client-go/whisk/wskerror.go @@ -0,0 +1,186 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package whisk + +const EXIT_CODE_ERR_GENERAL int = 1 +const EXIT_CODE_ERR_USAGE int = 2 +const EXIT_CODE_ERR_NETWORK int = 3 +const EXIT_CODE_ERR_HTTP_RESP int = 4 +const NOT_ALLOWED int = 149 +const EXIT_CODE_TIMED_OUT int = 202 +const EXIT_CODE_NOT_FOUND int = 148 + +const DISPLAY_MSG bool = true +const NO_DISPLAY_MSG bool = false +const DISPLAY_USAGE bool = true +const NO_DISPLAY_USAGE bool = false +const NO_MSG_DISPLAYED bool = false +const DISPLAY_PREFIX bool = true +const NO_DISPLAY_PREFIX bool = false +const APPLICATION_ERR bool = true +const NO_APPLICATION_ERR bool = false +const TIMED_OUT bool = true + +type WskError struct { + RootErr error // Parent error + ExitCode int // Error code to be returned to the OS + DisplayMsg bool // When true, the error message should be displayed to console + MsgDisplayed bool // When true, the error message has already been displayed, don't display it again + DisplayUsage bool // When true, the CLI usage should be displayed before exiting + DisplayPrefix bool // When true, the CLI will prefix an error message with "error: " + ApplicationError bool // When true, the error is a result of an application failure + TimedOut bool // When True, the error is a result of a timeout +} + +/* +Prints the error message contained inside an WskError. An error prefix may, or may not be displayed depending on the +WskError's setting for DisplayPrefix. + +Parameters: + err - WskError object used to display an error message from +*/ +func (whiskError WskError) Error() string { + return whiskError.RootErr.Error() +} + +/* +Instantiate a WskError structure +Parameters: + error - RootErr. object implementing the error interface + int - ExitCode. Used if error object does not have an exit code OR if ExitCodeOverride is true + bool - DisplayMsg. If true, the error message should be displayed on the console + bool - DisplayUsage. If true, the command usage syntax/help should be displayed on the console + bool - MsgDisplayed. If true, the error message has been displayed on the console + bool - DisplayPreview. If true, the error message will be prefixed with "error: " + bool - TimedOut. If true, the error is a result of a timeout +*/ +func MakeWskError(err error, exitCode int, flags ...bool) (resWhiskError *WskError) { + resWhiskError = &WskError{ + RootErr: err, + ExitCode: exitCode, + DisplayMsg: false, + DisplayUsage: false, + MsgDisplayed: false, + DisplayPrefix: true, + ApplicationError: false, + TimedOut: false, + } + + if len(flags) > 0 { + resWhiskError.DisplayMsg = flags[0] + } + if len(flags) > 1 { + resWhiskError.DisplayUsage = flags[1] + } + if len(flags) > 2 { + resWhiskError.MsgDisplayed = flags[2] + } + if len(flags) > 3 { + resWhiskError.DisplayPrefix = flags[3] + } + if len(flags) > 4 { + resWhiskError.ApplicationError = flags[4] + } + if len(flags) > 5 { + resWhiskError.TimedOut = flags[5] + } + + return resWhiskError +} + +/* +Instantiate a WskError structure +Parameters: + error - RootErr. object implementing the error interface + WskError - WskError being wrappered. It's exitcode will be used as this WskError's exitcode. Ignored if nil + int - ExitCode. Used if error object is nil or if the error object is not a WskError + bool - DisplayMsg. If true, the error message should be displayed on the console + bool - DisplayUsage. If true, the command usage syntax/help should be displayed on the console + bool - MsgDisplayed. If true, the error message has been displayed on the console + bool - ApplicationError. If true, the error is a result of an application error + bool - TimedOut. If true, the error resulted from a timeout +*/ +func MakeWskErrorFromWskError(baseError error, whiskError error, exitCode int, flags ...bool) (resWhiskError *WskError) { + + // Get the exit code, and flags from the existing Whisk error + if whiskError != nil { + + // Ensure the Whisk error is a pointer + switch errorType := whiskError.(type) { + case *WskError: + resWhiskError = errorType + case WskError: + resWhiskError = &errorType + } + + if resWhiskError != nil { + exitCode, flags = getWhiskErrorProperties(resWhiskError, flags...) + } + } + + return MakeWskError(baseError, exitCode, flags...) +} + +/* +Returns the settings from a WskError. Values returned will include ExitCode, DisplayMsg, DisplayUsage, MsgDisplayed, +DisplayPrefix, TimedOut. + +Parameters: + whiskError - WskError to examine. + flags - Boolean values that may override the WskError object's values for DisplayMsg, DisplayUsage, + MsgDisplayed, ApplicationError, TimedOut. +*/ +func getWhiskErrorProperties(whiskError *WskError, flags ...bool) (int, []bool) { + if len(flags) > 0 { + flags[0] = whiskError.DisplayMsg + } else { + flags = append(flags, whiskError.DisplayMsg) + } + + if len(flags) > 1 { + flags[1] = whiskError.DisplayUsage || flags[1] + } else { + flags = append(flags, whiskError.DisplayUsage) + } + + if len(flags) > 2 { + flags[2] = whiskError.MsgDisplayed || flags[2] + } else { + flags = append(flags, whiskError.MsgDisplayed) + } + + if len(flags) > 3 { + flags[3] = whiskError.DisplayPrefix || flags[3] + } else { + flags = append(flags, whiskError.DisplayPrefix) + } + + if len(flags) > 4 { + flags[4] = whiskError.ApplicationError || flags[4] + } else { + flags = append(flags, whiskError.ApplicationError) + } + + if len(flags) > 5 { + flags[5] = whiskError.TimedOut || flags[5] + } else { + flags = append(flags, whiskError.TimedOut) + } + + return whiskError.ExitCode, flags +} diff --git a/vendor/github.com/apache/openwhisk-client-go/whisk/wskprops.go b/vendor/github.com/apache/openwhisk-client-go/whisk/wskprops.go new file mode 100644 index 00000000000..22e301faec5 --- /dev/null +++ b/vendor/github.com/apache/openwhisk-client-go/whisk/wskprops.go @@ -0,0 +1,335 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package whisk + +import ( + "bufio" + "errors" + "fmt" + "github.com/apache/openwhisk-client-go/wski18n" + "io/ioutil" + "net/url" + "os" + "strings" +) + +const ( + OPENWHISK_HOME = "OPENWHISK_HOME" + HOMEPATH = "HOME" + DEFAULT_LOCAL_CONFIG = ".wskprops" + OPENWHISK_PROPERTIES = "whisk.properties" + TEST_AUTH_FILE = "testing.auth" + OPENWHISK_PRO = "whisk.api.host.proto" + OPENWHISK_PORT = "whisk.api.host.port" + OPENWHISK_HOST = "whisk.api.host.name" + DEFAULT_VERSION = "v1" + DEFAULT_NAMESPACE = "_" + + APIGW_ACCESS_TOKEN = "APIGW_ACCESS_TOKEN" + APIGW_TENANT_ID = "APIGW_TENANT_ID" + APIHOST = "APIHOST" + APIVERSION = "APIVERSION" + AUTH = "AUTH" + CERT = "CERT" + KEY = "KEY" + NAMESPACE = "NAMESPACE" + + DEFAULT_SOURCE = "wsk props" + WSKPROP = "wsk props" + WHISK_PROPERTY = "whisk.properties" +) + +type Wskprops struct { + APIGWSpaceSuid string + APIGWTenantId string + APIHost string + Apiversion string + AuthAPIGWKey string + AuthKey string + Cert string + Key string + Namespace string + Source string +} + +func GetUrlBase(host string) (*url.URL, error) { + urlBase := fmt.Sprintf("%s/api", host) + url, err := url.Parse(urlBase) + + if err != nil || len(url.Scheme) == 0 || len(url.Host) == 0 { + urlBase = fmt.Sprintf("https://%s/api", host) + url, err = url.Parse(urlBase) + } + + return url, err +} + +func convertWskpropsToConfig(dep *Wskprops) *Config { + var config Config + config.Host = dep.APIHost + if len(config.Host) != 0 { + v, err := GetUrlBase(config.Host) + if err == nil { + config.BaseURL = v + } + } + config.Namespace = dep.Namespace + config.Cert = dep.Cert + config.Key = dep.Key + config.AuthToken = dep.AuthKey + + config.Version = dep.Apiversion + config.Verbose = false + config.Debug = false + config.Insecure = true + + return &config +} + +func GetDefaultConfigFromProperties(pi Properties) (*Config, error) { + var config *Config + dep, e := GetDefaultWskProp(pi) + config = convertWskpropsToConfig(dep) + return config, e +} + +func GetConfigFromWhiskProperties(pi Properties) (*Config, error) { + var config *Config + dep, e := GetWskPropFromWhiskProperty(pi) + config = convertWskpropsToConfig(dep) + return config, e +} + +func GetConfigFromWskprops(pi Properties, path string) (*Config, error) { + var config *Config + dep, e := GetWskPropFromWskprops(pi, path) + config = convertWskpropsToConfig(dep) + return config, e +} + +var GetDefaultWskProp = func(pi Properties) (*Wskprops, error) { + var dep *Wskprops + dep = pi.GetPropsFromWskprops("") + error := ValidateWskprops(dep) + if error != nil { + dep_whisk := pi.GetPropsFromWhiskProperties() + error_whisk := ValidateWskprops(dep_whisk) + if error_whisk != nil { + return dep, error + } else { + return dep_whisk, error_whisk + } + } + return dep, error +} + +var GetWskPropFromWskprops = func(pi Properties, path string) (*Wskprops, error) { + var dep *Wskprops + dep = pi.GetPropsFromWskprops(path) + error := ValidateWskprops(dep) + return dep, error +} + +var GetWskPropFromWhiskProperty = func(pi Properties) (*Wskprops, error) { + var dep *Wskprops + dep = pi.GetPropsFromWhiskProperties() + error := ValidateWskprops(dep) + return dep, error +} + +type Properties interface { + GetPropsFromWskprops(string) *Wskprops + GetPropsFromWhiskProperties() *Wskprops +} + +type PropertiesImp struct { + OsPackage OSPackage +} + +func (pi PropertiesImp) GetPropsFromWskprops(path string) *Wskprops { + dep := GetDefaultWskprops(WSKPROP) + + var wskpropsPath string + if path != "" { + wskpropsPath = path + } else { + wskpropsPath = pi.OsPackage.Getenv(HOMEPATH, "") + "/" + DEFAULT_LOCAL_CONFIG + } + results, err := ReadProps(wskpropsPath) + + if err == nil { + + dep.APIHost = GetValue(results, APIHOST, dep.APIHost) + + dep.AuthKey = GetValue(results, AUTH, dep.AuthKey) + dep.Namespace = GetValue(results, NAMESPACE, dep.Namespace) + dep.AuthAPIGWKey = GetValue(results, APIGW_ACCESS_TOKEN, dep.AuthAPIGWKey) + dep.APIGWTenantId = GetValue(results, APIGW_TENANT_ID, dep.APIGWTenantId) + if len(dep.AuthKey) > 0 { + dep.APIGWSpaceSuid = strings.Split(dep.AuthKey, ":")[0] + } + dep.Apiversion = GetValue(results, APIVERSION, dep.Apiversion) + dep.Key = GetValue(results, KEY, dep.Key) + dep.Cert = GetValue(results, CERT, dep.Cert) + } + + return dep +} + +func (pi PropertiesImp) GetPropsFromWhiskProperties() *Wskprops { + dep := GetDefaultWskprops(WHISK_PROPERTY) + path := pi.OsPackage.Getenv(OPENWHISK_HOME, "") + "/" + OPENWHISK_PROPERTIES + results, err := ReadProps(path) + + if err == nil { + // TODO Determine why we have a hardcoed "test.auth" file here, is this only for unit tests? documented? + authPath := GetValue(results, TEST_AUTH_FILE, "") + b, err := ioutil.ReadFile(authPath) + if err == nil { + dep.AuthKey = strings.TrimSpace(string(b)) + } + dep.APIHost = GetValue(results, OPENWHISK_HOST, "") + dep.Namespace = DEFAULT_NAMESPACE + if len(dep.AuthKey) > 0 { + dep.APIGWSpaceSuid = strings.Split(dep.AuthKey, ":")[0] + } + } + return dep +} + +var ValidateWskprops = func(wskprops *Wskprops) error { + // There are at least two fields: WHISKAPIURL and AuthKey, mandatory for a valid Wskprops. + errStr := "" + if len(wskprops.APIHost) == 0 { + if wskprops.Source == WHISK_PROPERTY { + errStr = wski18n.T("OpenWhisk API host is missing (Please configure WHISK_APIHOST in .wskprops under the system HOME directory.)") + } else { + errStr = wski18n.T("OpenWhisk API host is missing (Please configure whisk.api.host.proto, whisk.api.host.name and whisk.api.host.port in whisk.properties under the OPENWHISK_HOME directory.)") + } + return MakeWskError(errors.New(errStr), EXIT_CODE_ERR_GENERAL, DISPLAY_MSG, DISPLAY_USAGE) + } else { + if len(wskprops.AuthKey) == 0 { + if wskprops.Source == WHISK_PROPERTY { + errStr = wski18n.T("Authentication key is missing (Please configure AUTH in .wskprops under the system HOME directory.)") + } else { + errStr = wski18n.T("Authentication key is missing (Please configure testing.auth as the path of the authentication key file in whisk.properties under the OPENWHISK_HOME directory.)") + } + return MakeWskError(errors.New(errStr), EXIT_CODE_ERR_GENERAL, DISPLAY_MSG, DISPLAY_USAGE) + } else { + return nil + } + } +} + +type OSPackage interface { + Getenv(key string, defaultValue string) string +} + +type OSPackageImp struct{} + +func (osPackage OSPackageImp) Getenv(key string, defaultValue string) string { + value := os.Getenv(key) + if len(value) == 0 { + return defaultValue + } + return value +} + +func GetDefaultConfig() (*Config, error) { + pi := PropertiesImp{ + OsPackage: OSPackageImp{}, + } + return GetDefaultConfigFromProperties(pi) +} + +func GetWhiskPropertiesConfig() (*Config, error) { + pi := PropertiesImp{ + OsPackage: OSPackageImp{}, + } + return GetConfigFromWhiskProperties(pi) +} + +func GetProperties() Properties { + return PropertiesImp{ + OsPackage: OSPackageImp{}, + } +} + +func GetWskpropsConfig(path string) (*Config, error) { + pi := GetProperties() + return GetConfigFromWskprops(pi, path) +} + +func GetDefaultWskprops(source string) *Wskprops { + if len(source) == 0 { + source = DEFAULT_SOURCE + } + + dep := Wskprops{ + APIHost: "", + AuthKey: "", + Namespace: DEFAULT_NAMESPACE, + AuthAPIGWKey: "", + APIGWTenantId: "", + APIGWSpaceSuid: "", + Apiversion: DEFAULT_VERSION, + Key: "", + Cert: "", + Source: source, + } + return &dep +} + +func GetValue(StoredValues map[string]string, key string, defaultvalue string) string { + if val, ok := StoredValues[key]; ok { + return val + } else { + return defaultvalue + } +} + +func ReadProps(path string) (map[string]string, error) { + + props := map[string]string{} + + file, err := os.Open(path) + if err != nil { + return props, err + } + defer file.Close() + + lines := []string{} + scanner := bufio.NewScanner(file) + for scanner.Scan() { + lines = append(lines, scanner.Text()) + } + + props = map[string]string{} + for _, line := range lines { + kv := strings.Split(line, "=") + if len(kv) != 2 { + continue + } + key := strings.TrimSpace(kv[0]) + value := strings.TrimSpace(kv[1]) + props[key] = value + } + + return props, nil + +} diff --git a/vendor/github.com/apache/openwhisk-client-go/wski18n/detection.go b/vendor/github.com/apache/openwhisk-client-go/wski18n/detection.go new file mode 100644 index 00000000000..c3866ddd345 --- /dev/null +++ b/vendor/github.com/apache/openwhisk-client-go/wski18n/detection.go @@ -0,0 +1,43 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package wski18n + +import "github.com/cloudfoundry/jibber_jabber" + +type Detector interface { + DetectLocale() string + DetectLanguage() string +} + +type JibberJabberDetector struct{} + +func (d *JibberJabberDetector) DetectLocale() string { + userLocale, err := jibber_jabber.DetectIETF() + if err != nil { + userLocale = "" + } + return userLocale +} + +func (d *JibberJabberDetector) DetectLanguage() string { + lang, err := jibber_jabber.DetectLanguage() + if err != nil { + lang = "" + } + return lang +} diff --git a/vendor/github.com/apache/openwhisk-client-go/wski18n/i18n.go b/vendor/github.com/apache/openwhisk-client-go/wski18n/i18n.go new file mode 100644 index 00000000000..02a25d3ead1 --- /dev/null +++ b/vendor/github.com/apache/openwhisk-client-go/wski18n/i18n.go @@ -0,0 +1,148 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package wski18n + +import ( + "path/filepath" + "strings" + + goi18n "github.com/nicksnyder/go-i18n/i18n" +) + +const ( + DEFAULT_LOCALE = "en_US" +) + +var SUPPORTED_LOCALES = []string{ + "de_DE", + "en_US", + "es_ES", + "fr_FR", + "it_IT", + "ja_JA", + "ko_KR", + "pt_BR", + "zh_Hans", + "zh_Hant", +} + +var resourcePath = filepath.Join("wski18n", "resources") + +func GetResourcePath() string { + return resourcePath +} + +func SetResourcePath(path string) { + resourcePath = path +} + +var T goi18n.TranslateFunc +var curLocale string + +func init() { + curLocale = Init(new(JibberJabberDetector)) +} + +func CurLocale() string { + return curLocale +} + +func Locale(detector Detector) string { + + // Use default locale until strings are translated + /*sysLocale := normalize(detector.DetectLocale()) + if isSupported(sysLocale) { + return sysLocale + } + + locale := defaultLocaleForLang(detector.DetectLanguage()) + if locale != "" { + return locale + }*/ + + return DEFAULT_LOCALE +} + +func Init(detector Detector) string { + l := Locale(detector) + InitWithLocale(l) + return l +} + +func InitWithLocale(locale string) { + err := loadFromAsset(locale) + if err != nil { + panic(err) + } + T = goi18n.MustTfunc(locale) +} + +func loadFromAsset(locale string) (err error) { + assetName := locale + ".all.json" + assetKey := filepath.Join(resourcePath, assetName) + bytes, err := Asset(assetKey) + if err != nil { + return + } + err = goi18n.ParseTranslationFileBytes(assetName, bytes) + return +} + +func normalize(locale string) string { + locale = strings.ToLower(strings.Replace(locale, "-", "_", 1)) + for _, l := range SUPPORTED_LOCALES { + if strings.EqualFold(locale, l) { + return l + } + } + switch locale { + case "zh_cn", "zh_sg": + return "zh_Hans" + case "zh_hk", "zh_tw": + return "zh_Hant" + } + return locale +} + +func isSupported(locale string) bool { + for _, l := range SUPPORTED_LOCALES { + if strings.EqualFold(locale, l) { + return true + } + } + return false +} + +func defaultLocaleForLang(lang string) string { + if lang != "" { + lang = strings.ToLower(lang) + for _, l := range SUPPORTED_LOCALES { + if lang == LangOfLocale(l) { + return l + } + } + } + return "" +} + +func LangOfLocale(locale string) string { + if len(locale) < 2 { + return "" + } + return locale[0:2] +} diff --git a/vendor/github.com/apache/openwhisk-client-go/wski18n/i18n_resources.go b/vendor/github.com/apache/openwhisk-client-go/wski18n/i18n_resources.go new file mode 100644 index 00000000000..aca3ea60c84 --- /dev/null +++ b/vendor/github.com/apache/openwhisk-client-go/wski18n/i18n_resources.go @@ -0,0 +1,463 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Code generated by go-bindata. +// sources: +// wski18n/resources/de_DE.all.json +// wski18n/resources/en_US.all.json +// wski18n/resources/es_ES.all.json +// wski18n/resources/fr_FR.all.json +// wski18n/resources/it_IT.all.json +// wski18n/resources/ja_JA.all.json +// wski18n/resources/ko_KR.all.json +// wski18n/resources/pt_BR.all.json +// wski18n/resources/zh_Hans.all.json +// wski18n/resources/zh_Hant.all.json +// DO NOT EDIT! + +package wski18n + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" +) + +func bindataRead(data []byte, name string) ([]byte, error) { + gz, err := gzip.NewReader(bytes.NewBuffer(data)) + if err != nil { + return nil, fmt.Errorf("Read %q: %v", name, err) + } + + var buf bytes.Buffer + _, err = io.Copy(&buf, gz) + clErr := gz.Close() + + if err != nil { + return nil, fmt.Errorf("Read %q: %v", name, err) + } + if clErr != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +type asset struct { + bytes []byte + info os.FileInfo +} + +type bindataFileInfo struct { + name string + size int64 + mode os.FileMode + modTime time.Time +} + +func (fi bindataFileInfo) Name() string { + return fi.name +} +func (fi bindataFileInfo) Size() int64 { + return fi.size +} +func (fi bindataFileInfo) Mode() os.FileMode { + return fi.mode +} +func (fi bindataFileInfo) ModTime() time.Time { + return fi.modTime +} +func (fi bindataFileInfo) IsDir() bool { + return false +} +func (fi bindataFileInfo) Sys() interface{} { + return nil +} + +var _wski18nResourcesDe_deAllJson = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x01\x00\x00\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00") + +func wski18nResourcesDe_deAllJsonBytes() ([]byte, error) { + return bindataRead( + _wski18nResourcesDe_deAllJson, + "wski18n/resources/de_DE.all.json", + ) +} + +func wski18nResourcesDe_deAllJson() (*asset, error) { + bytes, err := wski18nResourcesDe_deAllJsonBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "wski18n/resources/de_DE.all.json", size: 0, mode: os.FileMode(420), modTime: time.Unix(1510603210, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _wski18nResourcesEn_usAllJson = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xcc\x58\x5f\x6f\xdb\x36\x10\x7f\xf7\xa7\x38\xf8\xc5\x19\xe0\x0a\x7b\xd9\xc3\xba\xa7\xa0\x33\xe6\xa0\x5d\x63\xac\xce\x3a\x60\x19\x06\x46\x3c\x5b\x87\xc8\xa4\x7a\xa4\x9c\xb9\x86\xbe\xfb\x40\xca\x72\xd2\xc4\xb4\xfe\x58\x49\xf3\x64\x81\xe6\xfd\xee\xc7\xbb\xe3\xfd\xe1\xdf\x03\x80\xed\x00\x00\x60\x48\x72\xf8\x16\x86\x57\x4a\xdc\xa4\x08\x56\x83\x90\x12\x58\xe7\x16\x41\x67\x96\xb4\x32\x30\xda\x6e\xa3\xdd\x77\x51\x8c\x86\xe3\x52\xce\xb2\x50\x26\x15\x6e\xb9\x06\xe0\x2d\x3c\x04\x18\x0e\x00\x8a\x71\x58\x7f\xcc\x28\x2c\xc2\x74\x3e\x9f\x01\xe3\x97\x1c\x8d\x85\x85\x66\x98\x5d\xcd\x3d\x13\x0f\x5d\x14\x23\x8f\x8a\xcc\x45\x51\xcb\xa8\x03\x64\x47\x92\xbf\x4d\x7a\x27\x79\x04\xb2\x23\xc9\x5f\x27\x1f\x26\xf3\x49\xdf\x3c\x8f\xa3\x76\x75\xfa\xe5\xa7\xfe\xbd\x7e\x04\xb3\x86\xa6\xc8\x32\x54\x32\x70\x31\xdc\x86\xab\x3f\x3e\xec\x62\xbf\x23\xe9\xd3\x35\x34\xb3\x74\x65\x10\x07\xe7\x80\x72\x4e\x3b\x59\xb7\x16\xe7\x20\x9d\x0b\xb5\x16\x29\xc9\xae\x2c\x1a\x8b\x1f\x54\x3e\x61\xd6\x0c\xa8\x62\x2d\x49\x2d\xf7\x20\x37\x5a\x6e\x6a\x35\x37\x93\x3d\xa2\x96\x14\x59\x12\x29\x7d\x7d\x20\xde\x50\x6b\x8d\x68\x5d\xe8\x4a\x09\x36\xd9\x5d\x07\x91\xdb\x04\x95\xa5\xd8\xab\x80\x04\x85\x44\x6e\x13\xa5\x6d\xc0\x0e\x12\x3b\xcf\x6d\xa2\x99\xbe\x96\x32\xb7\xb8\x01\x32\xa0\xb4\x85\x58\xab\x05\x2d\x73\x46\x09\x67\x6f\xde\x38\x6c\xf7\x8f\x3b\x2e\x31\xca\x1f\x02\xd4\x3a\xc3\x1d\x26\xa7\xe0\x7c\x76\x01\x89\x36\x16\x56\xb9\xf3\x2f\x42\xc6\x7a\x4d\x12\x65\x74\xad\x42\x1c\x6a\xa4\x1a\x38\xe8\xe5\xeb\xee\x3b\xbd\x5a\x09\x25\x61\x21\x28\x45\x09\x32\x2f\xa1\x54\xe9\x5a\xb7\x9a\x33\x06\x54\x37\x93\x3d\xa8\xf6\xa3\x06\x52\x16\x79\x21\xe2\x7b\x23\xfd\x02\x4a\x57\xe9\xda\x64\x5a\x19\xf4\x17\x0b\xf0\xbf\x0c\x63\x8b\x32\x40\xa3\x1b\x56\x3b\x6b\x78\x05\x4a\xa4\x5d\x2d\xf2\x44\xfe\xa0\xfa\x79\x82\xb0\xd0\x69\xaa\xef\xdc\x15\x17\x59\x96\x56\x97\x0a\x7d\x06\xb8\x13\x2e\x74\x63\xa4\x35\xca\xda\xdb\xda\x11\xec\xf5\xe5\xeb\x57\x9b\xc3\xee\xb1\xdc\x81\x32\xc1\xa6\x2c\xca\x6b\x64\x43\x5a\xb5\xab\xa7\x0d\x20\x4e\xec\x49\xbb\x96\xf8\xe6\x80\xdd\x09\xf6\xc3\xaa\x39\x95\x9b\x9c\xd2\x6f\xe2\xb1\x05\x81\x63\xb2\xcd\x2c\xe0\x0c\xf8\x84\xff\x69\x5d\x6e\x1b\xc8\x66\x24\xdd\x70\xd4\x33\xc9\x36\x90\xcd\x48\xee\x86\x8e\x9e\x79\xb6\x44\x6d\x68\x4f\x37\x76\xf4\x6d\xd0\x36\x98\x81\xcc\xbe\x2b\x4c\xbe\x28\x44\x50\xa5\x6a\x63\xc5\xbe\x91\xf0\x80\x7e\xa1\x28\x46\x11\xfc\xe9\x37\x54\x4d\x8a\x60\x84\xeb\xa1\x88\x2d\xad\xf1\x7a\x08\xae\xfa\x5d\x0f\x49\x55\x0b\x51\xb0\x24\x3c\xb7\xde\x1a\xaf\x94\xb9\xb6\xaa\x44\x1d\x5c\x50\x0b\x50\x47\x80\x75\x8c\xc6\x78\x84\x2f\x39\xf2\x26\xd0\xf7\xb5\xa1\xd4\x1e\xf2\x20\xc9\xed\x36\x5a\x99\x65\x51\xc0\x59\xac\x25\xba\xcd\xee\xb7\x28\x42\xdd\x77\x78\x7f\xb0\xcd\x89\xb5\x52\x18\x7b\x2f\x97\xcd\xd2\x18\x34\x83\xa5\x15\x4a\xd0\xb9\x8d\xe0\xcc\x87\xb5\xf3\x7e\x6e\xa0\x19\x8d\xd3\x71\x5b\xcf\xce\x63\xb8\xc1\x58\xe4\x06\xe1\x32\x43\xf5\x39\x21\x73\x7b\x3f\x04\x90\x81\x15\x19\x43\x6a\xd9\x61\x9a\x6e\x8a\x7c\x02\x65\xd7\xfd\x88\x8c\x4a\x48\x17\x21\xee\xa3\x28\x46\x0e\x9e\xca\xfb\x78\xd2\x83\x40\x57\x4d\x07\x8f\x74\xd4\x0a\x70\x36\x4b\x51\x18\xbc\x1f\xf7\xe0\xf3\xf4\xe2\xd3\xfb\x7f\xcf\x67\x17\x53\x97\x23\x49\x41\x74\x67\x6e\x33\xd6\x99\x81\x5c\x49\x64\xcf\xc9\x6c\x8c\xc5\x15\x4c\x2f\x7f\x9f\x80\x24\xc6\xd8\x6a\xde\x44\xa1\xf8\x7a\x51\x0a\xbd\x18\xe1\xce\xed\x8d\x44\x46\xde\xe0\x51\xc6\xda\xea\xf1\xe3\x55\x25\x56\xe8\xf3\xe7\xe3\xdd\x9a\xad\x63\x5d\x2e\x3b\xde\xc8\x96\xf0\x21\xf9\xcb\xd9\xe4\x63\x79\xca\x67\x32\xe1\x77\x3c\x40\xf0\xd9\xe2\xc1\x9c\xb0\x7b\x68\x08\xf3\x3f\xbf\x9a\x4f\xfb\x09\xbe\x97\xd0\xdc\xcb\x91\x2d\x1a\x4b\x6a\x19\xf9\xb7\x16\x61\xbc\xc2\x4c\xd8\x04\xf4\xa2\xcc\x03\x4f\xf1\x16\x94\x62\xdf\x91\xf6\xea\x69\xd7\x24\xee\x54\x8b\x72\x3e\xfd\xeb\xa7\x1f\x7f\xf6\xea\x32\x41\x5c\x3d\x29\xd8\x6f\x86\x7b\x46\x61\xb4\x6a\x91\xab\x4f\x02\x0f\xd6\xf4\x77\xc8\x76\x67\x94\xc7\x8f\x6f\x11\x3c\x35\x78\x82\x7b\x77\xec\x25\xc3\xaf\x6b\x3d\x2a\x08\x1e\xe0\xfd\xde\xa9\x2d\xe1\x2b\xc1\xe3\xf4\x7b\x81\x77\xe4\x07\xff\x0c\xfe\x0f\x00\x00\xff\xff\xab\x99\xa5\xc1\xc1\x1b\x00\x00") + +func wski18nResourcesEn_usAllJsonBytes() ([]byte, error) { + return bindataRead( + _wski18nResourcesEn_usAllJson, + "wski18n/resources/en_US.all.json", + ) +} + +func wski18nResourcesEn_usAllJson() (*asset, error) { + bytes, err := wski18nResourcesEn_usAllJsonBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "wski18n/resources/en_US.all.json", size: 7105, mode: os.FileMode(420), modTime: time.Unix(1510603813, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _wski18nResourcesEs_esAllJson = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x01\x00\x00\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00") + +func wski18nResourcesEs_esAllJsonBytes() ([]byte, error) { + return bindataRead( + _wski18nResourcesEs_esAllJson, + "wski18n/resources/es_ES.all.json", + ) +} + +func wski18nResourcesEs_esAllJson() (*asset, error) { + bytes, err := wski18nResourcesEs_esAllJsonBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "wski18n/resources/es_ES.all.json", size: 0, mode: os.FileMode(420), modTime: time.Unix(1510603210, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _wski18nResourcesFr_frAllJson = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x01\x00\x00\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00") + +func wski18nResourcesFr_frAllJsonBytes() ([]byte, error) { + return bindataRead( + _wski18nResourcesFr_frAllJson, + "wski18n/resources/fr_FR.all.json", + ) +} + +func wski18nResourcesFr_frAllJson() (*asset, error) { + bytes, err := wski18nResourcesFr_frAllJsonBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "wski18n/resources/fr_FR.all.json", size: 0, mode: os.FileMode(420), modTime: time.Unix(1510603210, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _wski18nResourcesIt_itAllJson = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x01\x00\x00\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00") + +func wski18nResourcesIt_itAllJsonBytes() ([]byte, error) { + return bindataRead( + _wski18nResourcesIt_itAllJson, + "wski18n/resources/it_IT.all.json", + ) +} + +func wski18nResourcesIt_itAllJson() (*asset, error) { + bytes, err := wski18nResourcesIt_itAllJsonBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "wski18n/resources/it_IT.all.json", size: 0, mode: os.FileMode(420), modTime: time.Unix(1510603210, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _wski18nResourcesJa_jaAllJson = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x01\x00\x00\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00") + +func wski18nResourcesJa_jaAllJsonBytes() ([]byte, error) { + return bindataRead( + _wski18nResourcesJa_jaAllJson, + "wski18n/resources/ja_JA.all.json", + ) +} + +func wski18nResourcesJa_jaAllJson() (*asset, error) { + bytes, err := wski18nResourcesJa_jaAllJsonBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "wski18n/resources/ja_JA.all.json", size: 0, mode: os.FileMode(420), modTime: time.Unix(1510603210, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _wski18nResourcesKo_krAllJson = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x01\x00\x00\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00") + +func wski18nResourcesKo_krAllJsonBytes() ([]byte, error) { + return bindataRead( + _wski18nResourcesKo_krAllJson, + "wski18n/resources/ko_KR.all.json", + ) +} + +func wski18nResourcesKo_krAllJson() (*asset, error) { + bytes, err := wski18nResourcesKo_krAllJsonBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "wski18n/resources/ko_KR.all.json", size: 0, mode: os.FileMode(420), modTime: time.Unix(1510603210, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _wski18nResourcesPt_brAllJson = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x01\x00\x00\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00") + +func wski18nResourcesPt_brAllJsonBytes() ([]byte, error) { + return bindataRead( + _wski18nResourcesPt_brAllJson, + "wski18n/resources/pt_BR.all.json", + ) +} + +func wski18nResourcesPt_brAllJson() (*asset, error) { + bytes, err := wski18nResourcesPt_brAllJsonBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "wski18n/resources/pt_BR.all.json", size: 0, mode: os.FileMode(420), modTime: time.Unix(1510603210, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _wski18nResourcesZh_hansAllJson = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x01\x00\x00\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00") + +func wski18nResourcesZh_hansAllJsonBytes() ([]byte, error) { + return bindataRead( + _wski18nResourcesZh_hansAllJson, + "wski18n/resources/zh_Hans.all.json", + ) +} + +func wski18nResourcesZh_hansAllJson() (*asset, error) { + bytes, err := wski18nResourcesZh_hansAllJsonBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "wski18n/resources/zh_Hans.all.json", size: 0, mode: os.FileMode(420), modTime: time.Unix(1510603210, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _wski18nResourcesZh_hantAllJson = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x01\x00\x00\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00") + +func wski18nResourcesZh_hantAllJsonBytes() ([]byte, error) { + return bindataRead( + _wski18nResourcesZh_hantAllJson, + "wski18n/resources/zh_Hant.all.json", + ) +} + +func wski18nResourcesZh_hantAllJson() (*asset, error) { + bytes, err := wski18nResourcesZh_hantAllJsonBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "wski18n/resources/zh_Hant.all.json", size: 0, mode: os.FileMode(420), modTime: time.Unix(1510603210, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +// Asset loads and returns the asset for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func Asset(name string) ([]byte, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) + } + return a.bytes, nil + } + return nil, fmt.Errorf("Asset %s not found", name) +} + +// MustAsset is like Asset but panics when Asset would return an error. +// It simplifies safe initialization of global variables. +func MustAsset(name string) []byte { + a, err := Asset(name) + if err != nil { + panic("asset: Asset(" + name + "): " + err.Error()) + } + + return a +} + +// AssetInfo loads and returns the asset info for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func AssetInfo(name string) (os.FileInfo, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) + } + return a.info, nil + } + return nil, fmt.Errorf("AssetInfo %s not found", name) +} + +// AssetNames returns the names of the assets. +func AssetNames() []string { + names := make([]string, 0, len(_bindata)) + for name := range _bindata { + names = append(names, name) + } + return names +} + +// _bindata is a table, holding each asset generator, mapped to its name. +var _bindata = map[string]func() (*asset, error){ + "wski18n/resources/de_DE.all.json": wski18nResourcesDe_deAllJson, + "wski18n/resources/en_US.all.json": wski18nResourcesEn_usAllJson, + "wski18n/resources/es_ES.all.json": wski18nResourcesEs_esAllJson, + "wski18n/resources/fr_FR.all.json": wski18nResourcesFr_frAllJson, + "wski18n/resources/it_IT.all.json": wski18nResourcesIt_itAllJson, + "wski18n/resources/ja_JA.all.json": wski18nResourcesJa_jaAllJson, + "wski18n/resources/ko_KR.all.json": wski18nResourcesKo_krAllJson, + "wski18n/resources/pt_BR.all.json": wski18nResourcesPt_brAllJson, + "wski18n/resources/zh_Hans.all.json": wski18nResourcesZh_hansAllJson, + "wski18n/resources/zh_Hant.all.json": wski18nResourcesZh_hantAllJson, +} + +// AssetDir returns the file names below a certain +// directory embedded in the file by go-bindata. +// For example if you run go-bindata on data/... and data contains the +// following hierarchy: +// data/ +// foo.txt +// img/ +// a.png +// b.png +// then AssetDir("data") would return []string{"foo.txt", "img"} +// AssetDir("data/img") would return []string{"a.png", "b.png"} +// AssetDir("foo.txt") and AssetDir("notexist") would return an error +// AssetDir("") will return []string{"data"}. +func AssetDir(name string) ([]string, error) { + node := _bintree + if len(name) != 0 { + cannonicalName := strings.Replace(name, "\\", "/", -1) + pathList := strings.Split(cannonicalName, "/") + for _, p := range pathList { + node = node.Children[p] + if node == nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + } + } + if node.Func != nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + rv := make([]string, 0, len(node.Children)) + for childName := range node.Children { + rv = append(rv, childName) + } + return rv, nil +} + +type bintree struct { + Func func() (*asset, error) + Children map[string]*bintree +} + +var _bintree = &bintree{nil, map[string]*bintree{ + "wski18n": {nil, map[string]*bintree{ + "resources": {nil, map[string]*bintree{ + "de_DE.all.json": {wski18nResourcesDe_deAllJson, map[string]*bintree{}}, + "en_US.all.json": {wski18nResourcesEn_usAllJson, map[string]*bintree{}}, + "es_ES.all.json": {wski18nResourcesEs_esAllJson, map[string]*bintree{}}, + "fr_FR.all.json": {wski18nResourcesFr_frAllJson, map[string]*bintree{}}, + "it_IT.all.json": {wski18nResourcesIt_itAllJson, map[string]*bintree{}}, + "ja_JA.all.json": {wski18nResourcesJa_jaAllJson, map[string]*bintree{}}, + "ko_KR.all.json": {wski18nResourcesKo_krAllJson, map[string]*bintree{}}, + "pt_BR.all.json": {wski18nResourcesPt_brAllJson, map[string]*bintree{}}, + "zh_Hans.all.json": {wski18nResourcesZh_hansAllJson, map[string]*bintree{}}, + "zh_Hant.all.json": {wski18nResourcesZh_hantAllJson, map[string]*bintree{}}, + }}, + }}, +}} + +// RestoreAsset restores an asset under the given directory +func RestoreAsset(dir, name string) error { + data, err := Asset(name) + if err != nil { + return err + } + info, err := AssetInfo(name) + if err != nil { + return err + } + err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) + if err != nil { + return err + } + err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) + if err != nil { + return err + } + err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) + if err != nil { + return err + } + return nil +} + +// RestoreAssets restores an asset under the given directory recursively +func RestoreAssets(dir, name string) error { + children, err := AssetDir(name) + // File + if err != nil { + return RestoreAsset(dir, name) + } + // Dir + for _, child := range children { + err = RestoreAssets(dir, filepath.Join(name, child)) + if err != nil { + return err + } + } + return nil +} + +func _filePath(dir, name string) string { + cannonicalName := strings.Replace(name, "\\", "/", -1) + return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) +} diff --git a/vendor/github.com/asaskevich/govalidator/README.md b/vendor/github.com/asaskevich/govalidator/README.md index 39121ea8e37..2c3fc35eb64 100644 --- a/vendor/github.com/asaskevich/govalidator/README.md +++ b/vendor/github.com/asaskevich/govalidator/README.md @@ -177,6 +177,7 @@ func IsPrintableASCII(str string) bool func IsRFC3339(str string) bool func IsRFC3339WithoutZone(str string) bool func IsRGBcolor(str string) bool +func IsRegex(str string) bool func IsRequestURI(rawurl string) bool func IsRequestURL(rawurl string) bool func IsRipeMD128(str string) bool @@ -203,6 +204,7 @@ func IsUUID(str string) bool func IsUUIDv3(str string) bool func IsUUIDv4(str string) bool func IsUUIDv5(str string) bool +func IsULID(str string) bool func IsUnixTime(str string) bool func IsUpperCase(str string) bool func IsVariableWidth(str string) bool @@ -382,6 +384,7 @@ Here is a list of available validators for struct fields (validator - used funct "rfc3339WithoutZone": IsRFC3339WithoutZone, "ISO3166Alpha2": IsISO3166Alpha2, "ISO3166Alpha3": IsISO3166Alpha3, +"ulid": IsULID, ``` Validators with parameters diff --git a/vendor/github.com/asaskevich/govalidator/patterns.go b/vendor/github.com/asaskevich/govalidator/patterns.go index 106ed94f80a..bafc3765ea1 100644 --- a/vendor/github.com/asaskevich/govalidator/patterns.go +++ b/vendor/github.com/asaskevich/govalidator/patterns.go @@ -42,6 +42,8 @@ const ( SSN string = `^\d{3}[- ]?\d{2}[- ]?\d{4}$` WinPath string = `^[a-zA-Z]:\\(?:[^\\/:*?"<>|\r\n]+\\)*[^\\/:*?"<>|\r\n]*$` UnixPath string = `^(/[^/\x00]*)+/?$` + WinARPath string = `^(?:(?:[a-zA-Z]:|\\\\[a-z0-9_.$●-]+\\[a-z0-9_.$●-]+)\\|\\?[^\\/:*?"<>|\r\n]+\\?)(?:[^\\/:*?"<>|\r\n]+\\)*[^\\/:*?"<>|\r\n]*$` + UnixARPath string = `^((\.{0,2}/)?([^/\x00]*))+/?$` Semver string = "^v?(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)(-(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(\\.(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*)?(\\+[0-9a-zA-Z-]+(\\.[0-9a-zA-Z-]+)*)?$" tagName string = "valid" hasLowerCase string = ".*[[:lower:]]" @@ -50,6 +52,7 @@ const ( hasWhitespaceOnly string = "^[[:space:]]+$" IMEI string = "^[0-9a-f]{14}$|^\\d{15}$|^\\d{18}$" IMSI string = "^\\d{14,15}$" + E164 string = `^\+?[1-9]\d{1,14}$` ) // Used by IsFilePath func @@ -97,6 +100,8 @@ var ( rxSSN = regexp.MustCompile(SSN) rxWinPath = regexp.MustCompile(WinPath) rxUnixPath = regexp.MustCompile(UnixPath) + rxARWinPath = regexp.MustCompile(WinARPath) + rxARUnixPath = regexp.MustCompile(UnixARPath) rxSemver = regexp.MustCompile(Semver) rxHasLowerCase = regexp.MustCompile(hasLowerCase) rxHasUpperCase = regexp.MustCompile(hasUpperCase) @@ -104,4 +109,5 @@ var ( rxHasWhitespaceOnly = regexp.MustCompile(hasWhitespaceOnly) rxIMEI = regexp.MustCompile(IMEI) rxIMSI = regexp.MustCompile(IMSI) + rxE164 = regexp.MustCompile(E164) ) diff --git a/vendor/github.com/asaskevich/govalidator/types.go b/vendor/github.com/asaskevich/govalidator/types.go index 54218bf05a2..c573abb51af 100644 --- a/vendor/github.com/asaskevich/govalidator/types.go +++ b/vendor/github.com/asaskevich/govalidator/types.go @@ -165,6 +165,7 @@ var TagMap = map[string]Validator{ "ISO3166Alpha3": IsISO3166Alpha3, "ISO4217": IsISO4217, "IMEI": IsIMEI, + "ulid": IsULID, } // ISO3166Entry stores country codes diff --git a/vendor/github.com/asaskevich/govalidator/validator.go b/vendor/github.com/asaskevich/govalidator/validator.go index 5c918fc4bc7..46ecfc84a4c 100644 --- a/vendor/github.com/asaskevich/govalidator/validator.go +++ b/vendor/github.com/asaskevich/govalidator/validator.go @@ -361,9 +361,96 @@ func IsUUID(str string) bool { return rxUUID.MatchString(str) } +// Byte to index table for O(1) lookups when unmarshaling. +// We use 0xFF as sentinel value for invalid indexes. +var ulidDec = [...]byte{ + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x01, + 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, + 0x0F, 0x10, 0x11, 0xFF, 0x12, 0x13, 0xFF, 0x14, 0x15, 0xFF, + 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C, 0x1D, 0x1E, + 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C, + 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0xFF, 0x12, 0x13, 0xFF, 0x14, + 0x15, 0xFF, 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C, + 0x1D, 0x1E, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, +} + +// EncodedSize is the length of a text encoded ULID. +const ulidEncodedSize = 26 + +// IsULID checks if the string is a ULID. +// +// Implementation got from: +// https://github.com/oklog/ulid (Apache-2.0 License) +// +func IsULID(str string) bool { + // Check if a base32 encoded ULID is the right length. + if len(str) != ulidEncodedSize { + return false + } + + // Check if all the characters in a base32 encoded ULID are part of the + // expected base32 character set. + if ulidDec[str[0]] == 0xFF || + ulidDec[str[1]] == 0xFF || + ulidDec[str[2]] == 0xFF || + ulidDec[str[3]] == 0xFF || + ulidDec[str[4]] == 0xFF || + ulidDec[str[5]] == 0xFF || + ulidDec[str[6]] == 0xFF || + ulidDec[str[7]] == 0xFF || + ulidDec[str[8]] == 0xFF || + ulidDec[str[9]] == 0xFF || + ulidDec[str[10]] == 0xFF || + ulidDec[str[11]] == 0xFF || + ulidDec[str[12]] == 0xFF || + ulidDec[str[13]] == 0xFF || + ulidDec[str[14]] == 0xFF || + ulidDec[str[15]] == 0xFF || + ulidDec[str[16]] == 0xFF || + ulidDec[str[17]] == 0xFF || + ulidDec[str[18]] == 0xFF || + ulidDec[str[19]] == 0xFF || + ulidDec[str[20]] == 0xFF || + ulidDec[str[21]] == 0xFF || + ulidDec[str[22]] == 0xFF || + ulidDec[str[23]] == 0xFF || + ulidDec[str[24]] == 0xFF || + ulidDec[str[25]] == 0xFF { + return false + } + + // Check if the first character in a base32 encoded ULID will overflow. This + // happens because the base32 representation encodes 130 bits, while the + // ULID is only 128 bits. + // + // See https://github.com/oklog/ulid/issues/9 for details. + if str[0] > '7' { + return false + } + return true +} + // IsCreditCard checks if the string is a credit card. func IsCreditCard(str string) bool { - sanitized := notNumberRegexp.ReplaceAllString(str, "") + sanitized := whiteSpacesAndMinus.ReplaceAllString(str, "") if !rxCreditCard.MatchString(sanitized) { return false } @@ -509,6 +596,27 @@ func IsFilePath(str string) (bool, int) { return false, Unknown } +//IsWinFilePath checks both relative & absolute paths in Windows +func IsWinFilePath(str string) bool { + if rxARWinPath.MatchString(str) { + //check windows path limit see: + // http://msdn.microsoft.com/en-us/library/aa365247(VS.85).aspx#maxpath + if len(str[3:]) > 32767 { + return false + } + return true + } + return false +} + +//IsUnixFilePath checks both relative & absolute paths in Unix +func IsUnixFilePath(str string) bool { + if rxARUnixPath.MatchString(str) { + return true + } + return false +} + // IsDataURI checks if a string is base64 encoded data URI such as an image func IsDataURI(str string) bool { dataURI := strings.Split(str, ",") @@ -586,11 +694,13 @@ func IsHash(str string, algorithm string) bool { len = "40" } else if algo == "tiger192" { len = "48" - } else if algo == "sha256" { + } else if algo == "sha3-224" { + len = "56" + } else if algo == "sha256" || algo == "sha3-256" { len = "64" - } else if algo == "sha384" { + } else if algo == "sha384" || algo == "sha3-384" { len = "96" - } else if algo == "sha512" { + } else if algo == "sha512" || algo == "sha3-512" { len = "128" } else { return false @@ -599,6 +709,26 @@ func IsHash(str string, algorithm string) bool { return Matches(str, "^[a-f0-9]{"+len+"}$") } +// IsSHA3224 checks is a string is a SHA3-224 hash. Alias for `IsHash(str, "sha3-224")` +func IsSHA3224(str string) bool { + return IsHash(str, "sha3-224") +} + +// IsSHA3256 checks is a string is a SHA3-256 hash. Alias for `IsHash(str, "sha3-256")` +func IsSHA3256(str string) bool { + return IsHash(str, "sha3-256") +} + +// IsSHA3384 checks is a string is a SHA3-384 hash. Alias for `IsHash(str, "sha3-384")` +func IsSHA3384(str string) bool { + return IsHash(str, "sha3-384") +} + +// IsSHA3512 checks is a string is a SHA3-512 hash. Alias for `IsHash(str, "sha3-512")` +func IsSHA3512(str string) bool { + return IsHash(str, "sha3-512") +} + // IsSHA512 checks is a string is a SHA512 hash. Alias for `IsHash(str, "sha512")` func IsSHA512(str string) bool { return IsHash(str, "sha512") @@ -819,6 +949,14 @@ func IsRsaPublicKey(str string, keylen int) bool { return bitlen == int(keylen) } +// IsRegex checks if a give string is a valid regex with RE2 syntax or not +func IsRegex(str string) bool { + if _, err := regexp.Compile(str); err == nil { + return true + } + return false +} + func toJSONName(tag string) string { if tag == "" { return "" @@ -1625,3 +1763,7 @@ func (sv stringValues) Len() int { return len(sv) } func (sv stringValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } func (sv stringValues) Less(i, j int) bool { return sv.get(i) < sv.get(j) } func (sv stringValues) get(i int) string { return sv[i].String() } + +func IsE164(str string) bool { + return rxE164.MatchString(str) +} diff --git a/vendor/github.com/cloudfoundry/jibber_jabber/.travis.yml b/vendor/github.com/cloudfoundry/jibber_jabber/.travis.yml new file mode 100644 index 00000000000..b19c2e53535 --- /dev/null +++ b/vendor/github.com/cloudfoundry/jibber_jabber/.travis.yml @@ -0,0 +1,11 @@ +language: go +go: + - 1.2 +before_install: +- go get github.com/onsi/ginkgo/... +- go get github.com/onsi/gomega/... +- go install github.com/onsi/ginkgo/ginkgo +script: PATH=$PATH:$HOME/gopath/bin ginkgo -r . +branches: + only: + - master diff --git a/vendor/github.com/cloudfoundry/jibber_jabber/LICENSE b/vendor/github.com/cloudfoundry/jibber_jabber/LICENSE new file mode 100644 index 00000000000..915b208920b --- /dev/null +++ b/vendor/github.com/cloudfoundry/jibber_jabber/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright 2014 Pivotal + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/cloudfoundry/jibber_jabber/README.md b/vendor/github.com/cloudfoundry/jibber_jabber/README.md new file mode 100644 index 00000000000..d696eb6b6d0 --- /dev/null +++ b/vendor/github.com/cloudfoundry/jibber_jabber/README.md @@ -0,0 +1,44 @@ +# Jibber Jabber [![Build Status](https://travis-ci.org/cloudfoundry/jibber_jabber.svg?branch=master)](https://travis-ci.org/cloudfoundry/jibber_jabber) +Jibber Jabber is a GoLang Library that can be used to detect an operating system's current language. + +### OS Support + +OSX and Linux via the `LC_ALL` and `LANG` environment variables. These are standard variables that are used in ALL versions of UNIX for language detection. + +Windows via [GetUserDefaultLocaleName](http://msdn.microsoft.com/en-us/library/windows/desktop/dd318136.aspx) and [GetSystemDefaultLocaleName](http://msdn.microsoft.com/en-us/library/windows/desktop/dd318122.aspx) system calls. These calls are supported in Windows Vista and up. + +# Usage +Add the following line to your go `import`: + +``` + "github.com/cloudfoundry/jibber_jabber" +``` + +### DetectIETF +`DetectIETF` will return the current locale as a string. The format of the locale will be the [ISO 639](http://en.wikipedia.org/wiki/ISO_639) two-letter language code, a DASH, then an [ISO 3166](http://en.wikipedia.org/wiki/ISO_3166-1) two-letter country code. + +``` + userLocale, err := jibber_jabber.DetectIETF() + println("Locale:", userLocale) +``` + +### DetectLanguage +`DetectLanguage` will return the current languge as a string. The format will be the [ISO 639](http://en.wikipedia.org/wiki/ISO_639) two-letter language code. + +``` + userLanguage, err := jibber_jabber.DetectLanguage() + println("Language:", userLanguage) +``` + +### DetectTerritory +`DetectTerritory` will return the current locale territory as a string. The format will be the [ISO 3166](http://en.wikipedia.org/wiki/ISO_3166-1) two-letter country code. + +``` + localeTerritory, err := jibber_jabber.DetectTerritory() + println("Territory:", localeTerritory) +``` + +### Errors +All the Detect commands will return an error if they are unable to read the Locale from the system. + +For Windows, additional error information is provided due to the nature of the system call being used. diff --git a/vendor/github.com/cloudfoundry/jibber_jabber/jibber_jabber.go b/vendor/github.com/cloudfoundry/jibber_jabber/jibber_jabber.go new file mode 100644 index 00000000000..45d288ea87a --- /dev/null +++ b/vendor/github.com/cloudfoundry/jibber_jabber/jibber_jabber.go @@ -0,0 +1,22 @@ +package jibber_jabber + +import ( + "strings" +) + +const ( + COULD_NOT_DETECT_PACKAGE_ERROR_MESSAGE = "Could not detect Language" +) + +func splitLocale(locale string) (string, string) { + formattedLocale := strings.Split(locale, ".")[0] + formattedLocale = strings.Replace(formattedLocale, "-", "_", -1) + + pieces := strings.Split(formattedLocale, "_") + language := pieces[0] + territory := "" + if len(pieces) > 1 { + territory = strings.Split(formattedLocale, "_")[1] + } + return language, territory +} diff --git a/vendor/github.com/cloudfoundry/jibber_jabber/jibber_jabber_unix.go b/vendor/github.com/cloudfoundry/jibber_jabber/jibber_jabber_unix.go new file mode 100644 index 00000000000..374d7617630 --- /dev/null +++ b/vendor/github.com/cloudfoundry/jibber_jabber/jibber_jabber_unix.go @@ -0,0 +1,57 @@ +// +build darwin freebsd linux netbsd openbsd + +package jibber_jabber + +import ( + "errors" + "os" + "strings" +) + +func getLangFromEnv() (locale string) { + locale = os.Getenv("LC_ALL") + if locale == "" { + locale = os.Getenv("LANG") + } + return +} + +func getUnixLocale() (unix_locale string, err error) { + unix_locale = getLangFromEnv() + if unix_locale == "" { + err = errors.New(COULD_NOT_DETECT_PACKAGE_ERROR_MESSAGE) + } + + return +} + +func DetectIETF() (locale string, err error) { + unix_locale, err := getUnixLocale() + if err == nil { + language, territory := splitLocale(unix_locale) + locale = language + if territory != "" { + locale = strings.Join([]string{language, territory}, "-") + } + } + + return +} + +func DetectLanguage() (language string, err error) { + unix_locale, err := getUnixLocale() + if err == nil { + language, _ = splitLocale(unix_locale) + } + + return +} + +func DetectTerritory() (territory string, err error) { + unix_locale, err := getUnixLocale() + if err == nil { + _, territory = splitLocale(unix_locale) + } + + return +} diff --git a/vendor/github.com/cloudfoundry/jibber_jabber/jibber_jabber_windows.go b/vendor/github.com/cloudfoundry/jibber_jabber/jibber_jabber_windows.go new file mode 100644 index 00000000000..1acd96c38b1 --- /dev/null +++ b/vendor/github.com/cloudfoundry/jibber_jabber/jibber_jabber_windows.go @@ -0,0 +1,114 @@ +// +build windows + +package jibber_jabber + +import ( + "errors" + "syscall" + "unsafe" +) + +const LOCALE_NAME_MAX_LENGTH uint32 = 85 + +var SUPPORTED_LOCALES = map[uintptr]string{ + 0x0407: "de-DE", + 0x0409: "en-US", + 0x0c0a: "es-ES", //or is it 0x040a + 0x040c: "fr-FR", + 0x0410: "it-IT", + 0x0411: "ja-JA", + 0x0412: "ko_KR", + 0x0416: "pt-BR", + //0x0419: "ru_RU", - Will add support for Russian when nicksnyder/go-i18n supports Russian + 0x0804: "zh-CN", + 0x0c04: "zh-HK", + 0x0404: "zh-TW", +} + +func getWindowsLocaleFrom(sysCall string) (locale string, err error) { + buffer := make([]uint16, LOCALE_NAME_MAX_LENGTH) + + dll := syscall.MustLoadDLL("kernel32") + proc := dll.MustFindProc(sysCall) + r, _, dllError := proc.Call(uintptr(unsafe.Pointer(&buffer[0])), uintptr(LOCALE_NAME_MAX_LENGTH)) + if r == 0 { + err = errors.New(COULD_NOT_DETECT_PACKAGE_ERROR_MESSAGE + ":\n" + dllError.Error()) + return + } + + locale = syscall.UTF16ToString(buffer) + + return +} + +func getAllWindowsLocaleFrom(sysCall string) (string, error) { + dll, err := syscall.LoadDLL("kernel32") + if err != nil { + return "", errors.New("Could not find kernel32 dll") + } + + proc, err := dll.FindProc(sysCall) + if err != nil { + return "", err + } + + locale, _, dllError := proc.Call() + if locale == 0 { + return "", errors.New(COULD_NOT_DETECT_PACKAGE_ERROR_MESSAGE + ":\n" + dllError.Error()) + } + + return SUPPORTED_LOCALES[locale], nil +} + +func getWindowsLocale() (locale string, err error) { + dll, err := syscall.LoadDLL("kernel32") + if err != nil { + return "", errors.New("Could not find kernel32 dll") + } + + proc, err := dll.FindProc("GetVersion") + if err != nil { + return "", err + } + + v, _, _ := proc.Call() + windowsVersion := byte(v) + isVistaOrGreater := (windowsVersion >= 6) + + if isVistaOrGreater { + locale, err = getWindowsLocaleFrom("GetUserDefaultLocaleName") + if err != nil { + locale, err = getWindowsLocaleFrom("GetSystemDefaultLocaleName") + } + } else if !isVistaOrGreater { + locale, err = getAllWindowsLocaleFrom("GetUserDefaultLCID") + if err != nil { + locale, err = getAllWindowsLocaleFrom("GetSystemDefaultLCID") + } + } else { + panic(v) + } + return +} +func DetectIETF() (locale string, err error) { + locale, err = getWindowsLocale() + return +} + +func DetectLanguage() (language string, err error) { + windows_locale, err := getWindowsLocale() + if err == nil { + language, _ = splitLocale(windows_locale) + } + + return +} + +func DetectTerritory() (territory string, err error) { + windows_locale, err := getWindowsLocale() + if err == nil { + _, territory = splitLocale(windows_locale) + } + + return +} diff --git a/vendor/github.com/dchest/bcrypt_pbkdf/LICENSE b/vendor/github.com/dchest/bcrypt_pbkdf/LICENSE new file mode 100644 index 00000000000..b99c5e3b989 --- /dev/null +++ b/vendor/github.com/dchest/bcrypt_pbkdf/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2014 Dmitry Chestnykh +Copyright (c) 2010 The Go Authors +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials + provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/dchest/bcrypt_pbkdf/README b/vendor/github.com/dchest/bcrypt_pbkdf/README new file mode 100644 index 00000000000..8ce68743213 --- /dev/null +++ b/vendor/github.com/dchest/bcrypt_pbkdf/README @@ -0,0 +1,21 @@ +Go implementation of bcrypt_pbkdf(3) from OpenBSD +(a variant of PBKDF2 with bcrypt-based PRF). + + +USAGE + + func Key(password, salt []byte, rounds, keyLen int) ([]byte, error) + + + Key derives a key from the password, salt and rounds count, returning a + []byte of length keyLen that can be used as cryptographic key. + + Remember to get a good random salt of at least 16 bytes. Using a higher + rounds count will increase the cost of an exhaustive search but will also + make derivation proportionally slower. + + +REFERENCES + +* http://www.tedunangst.com/flak/post/bcrypt-pbkdf +* http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/lib/libutil/bcrypt_pbkdf.c diff --git a/vendor/github.com/dchest/bcrypt_pbkdf/bcrypt_pbkdf.go b/vendor/github.com/dchest/bcrypt_pbkdf/bcrypt_pbkdf.go new file mode 100644 index 00000000000..60bba1c3653 --- /dev/null +++ b/vendor/github.com/dchest/bcrypt_pbkdf/bcrypt_pbkdf.go @@ -0,0 +1,97 @@ +// Copyright 2014 Dmitry Chestnykh. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package bcrypt_pbkdf implements password-based key derivation function based +// on bcrypt compatible with bcrypt_pbkdf(3) from OpenBSD. +package bcrypt_pbkdf + +import ( + "crypto/sha512" + "errors" + + // NOTE! Requires blowfish package version from Aug 1, 2014 or later. + // Will produce incorrect results if the package is older. + // See commit message for details: http://goo.gl/wx6g8O + "golang.org/x/crypto/blowfish" +) + +// Key derives a key from the password, salt and rounds count, returning a +// []byte of length keyLen that can be used as cryptographic key. +// +// Remember to get a good random salt of at least 16 bytes. Using a higher +// rounds count will increase the cost of an exhaustive search but will also +// make derivation proportionally slower. +func Key(password, salt []byte, rounds, keyLen int) ([]byte, error) { + if rounds < 1 { + return nil, errors.New("bcrypt_pbkdf: number of rounds is too small") + } + if len(password) == 0 { + return nil, errors.New("bcrypt_pbkdf: empty password") + } + if len(salt) == 0 || len(salt) > 1<<20 { + return nil, errors.New("bcrypt_pbkdf: bad salt length") + } + if keyLen > 1024 { + return nil, errors.New("bcrypt_pbkdf: keyLen is too large") + } + var shapass, shasalt [sha512.Size]byte + var out, tmp [32]byte + var cnt [4]byte + + numBlocks := (keyLen + len(out) - 1) / len(out) + key := make([]byte, numBlocks*len(out)) + + h := sha512.New() + h.Write(password) + h.Sum(shapass[:0]) + + for block := 1; block <= numBlocks; block++ { + h.Reset() + h.Write(salt) + cnt[0] = byte(block >> 24) + cnt[1] = byte(block >> 16) + cnt[2] = byte(block >> 8) + cnt[3] = byte(block) + h.Write(cnt[:]) + bcryptHash(tmp[:], shapass[:], h.Sum(shasalt[:0])) + copy(out[:], tmp[:]) + + for i := 2; i <= rounds; i++ { + h.Reset() + h.Write(tmp[:]) + bcryptHash(tmp[:], shapass[:], h.Sum(shasalt[:0])) + for j := 0; j < len(out); j++ { + out[j] ^= tmp[j] + } + } + + for i, v := range out { + key[i*numBlocks+(block-1)] = v + } + } + return key[:keyLen], nil +} + +var magic = []byte("OxychromaticBlowfishSwatDynamite") + +func bcryptHash(out, shapass, shasalt []byte) { + c, err := blowfish.NewSaltedCipher(shapass, shasalt) + if err != nil { + panic(err) + } + for i := 0; i < 64; i++ { + blowfish.ExpandKey(shasalt, c) + blowfish.ExpandKey(shapass, c) + } + copy(out[:], magic) + for i := 0; i < 32; i += 8 { + for j := 0; j < 64; j++ { + c.Encrypt(out[i:i+8], out[i:i+8]) + } + } + // Swap bytes due to different endianness. + for i := 0; i < 32; i += 4 { + out[i+3], out[i+2], out[i+1], out[i] = out[i], out[i+1], out[i+2], out[i+3] + } +} diff --git a/vendor/github.com/dgrijalva/jwt-go/.gitignore b/vendor/github.com/dgrijalva/jwt-go/.gitignore new file mode 100644 index 00000000000..80bed650ec0 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/.gitignore @@ -0,0 +1,4 @@ +.DS_Store +bin + + diff --git a/vendor/github.com/dgrijalva/jwt-go/.travis.yml b/vendor/github.com/dgrijalva/jwt-go/.travis.yml new file mode 100644 index 00000000000..1027f56cd94 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/.travis.yml @@ -0,0 +1,13 @@ +language: go + +script: + - go vet ./... + - go test -v ./... + +go: + - 1.3 + - 1.4 + - 1.5 + - 1.6 + - 1.7 + - tip diff --git a/vendor/github.com/dgrijalva/jwt-go/LICENSE b/vendor/github.com/dgrijalva/jwt-go/LICENSE new file mode 100644 index 00000000000..df83a9c2f01 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/LICENSE @@ -0,0 +1,8 @@ +Copyright (c) 2012 Dave Grijalva + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md b/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md new file mode 100644 index 00000000000..7fc1f793cbc --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md @@ -0,0 +1,97 @@ +## Migration Guide from v2 -> v3 + +Version 3 adds several new, frequently requested features. To do so, it introduces a few breaking changes. We've worked to keep these as minimal as possible. This guide explains the breaking changes and how you can quickly update your code. + +### `Token.Claims` is now an interface type + +The most requested feature from the 2.0 verison of this library was the ability to provide a custom type to the JSON parser for claims. This was implemented by introducing a new interface, `Claims`, to replace `map[string]interface{}`. We also included two concrete implementations of `Claims`: `MapClaims` and `StandardClaims`. + +`MapClaims` is an alias for `map[string]interface{}` with built in validation behavior. It is the default claims type when using `Parse`. The usage is unchanged except you must type cast the claims property. + +The old example for parsing a token looked like this.. + +```go + if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil { + fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"]) + } +``` + +is now directly mapped to... + +```go + if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil { + claims := token.Claims.(jwt.MapClaims) + fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"]) + } +``` + +`StandardClaims` is designed to be embedded in your custom type. You can supply a custom claims type with the new `ParseWithClaims` function. Here's an example of using a custom claims type. + +```go + type MyCustomClaims struct { + User string + *StandardClaims + } + + if token, err := jwt.ParseWithClaims(tokenString, &MyCustomClaims{}, keyLookupFunc); err == nil { + claims := token.Claims.(*MyCustomClaims) + fmt.Printf("Token for user %v expires %v", claims.User, claims.StandardClaims.ExpiresAt) + } +``` + +### `ParseFromRequest` has been moved + +To keep this library focused on the tokens without becoming overburdened with complex request processing logic, `ParseFromRequest` and its new companion `ParseFromRequestWithClaims` have been moved to a subpackage, `request`. The method signatues have also been augmented to receive a new argument: `Extractor`. + +`Extractors` do the work of picking the token string out of a request. The interface is simple and composable. + +This simple parsing example: + +```go + if token, err := jwt.ParseFromRequest(tokenString, req, keyLookupFunc); err == nil { + fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"]) + } +``` + +is directly mapped to: + +```go + if token, err := request.ParseFromRequest(req, request.OAuth2Extractor, keyLookupFunc); err == nil { + claims := token.Claims.(jwt.MapClaims) + fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"]) + } +``` + +There are several concrete `Extractor` types provided for your convenience: + +* `HeaderExtractor` will search a list of headers until one contains content. +* `ArgumentExtractor` will search a list of keys in request query and form arguments until one contains content. +* `MultiExtractor` will try a list of `Extractors` in order until one returns content. +* `AuthorizationHeaderExtractor` will look in the `Authorization` header for a `Bearer` token. +* `OAuth2Extractor` searches the places an OAuth2 token would be specified (per the spec): `Authorization` header and `access_token` argument +* `PostExtractionFilter` wraps an `Extractor`, allowing you to process the content before it's parsed. A simple example is stripping the `Bearer ` text from a header + + +### RSA signing methods no longer accept `[]byte` keys + +Due to a [critical vulnerability](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/), we've decided the convenience of accepting `[]byte` instead of `rsa.PublicKey` or `rsa.PrivateKey` isn't worth the risk of misuse. + +To replace this behavior, we've added two helper methods: `ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error)` and `ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error)`. These are just simple helpers for unpacking PEM encoded PKCS1 and PKCS8 keys. If your keys are encoded any other way, all you need to do is convert them to the `crypto/rsa` package's types. + +```go + func keyLookupFunc(*Token) (interface{}, error) { + // Don't forget to validate the alg is what you expect: + if _, ok := token.Method.(*jwt.SigningMethodRSA); !ok { + return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"]) + } + + // Look up key + key, err := lookupPublicKey(token.Header["kid"]) + if err != nil { + return nil, err + } + + // Unpack key from PEM encoded PKCS8 + return jwt.ParseRSAPublicKeyFromPEM(key) + } +``` diff --git a/vendor/github.com/dgrijalva/jwt-go/README.md b/vendor/github.com/dgrijalva/jwt-go/README.md new file mode 100644 index 00000000000..d358d881b8d --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/README.md @@ -0,0 +1,100 @@ +# jwt-go + +[![Build Status](https://travis-ci.org/dgrijalva/jwt-go.svg?branch=master)](https://travis-ci.org/dgrijalva/jwt-go) +[![GoDoc](https://godoc.org/github.com/dgrijalva/jwt-go?status.svg)](https://godoc.org/github.com/dgrijalva/jwt-go) + +A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html) + +**NEW VERSION COMING:** There have been a lot of improvements suggested since the version 3.0.0 released in 2016. I'm working now on cutting two different releases: 3.2.0 will contain any non-breaking changes or enhancements. 4.0.0 will follow shortly which will include breaking changes. See the 4.0.0 milestone to get an idea of what's coming. If you have other ideas, or would like to participate in 4.0.0, now's the time. If you depend on this library and don't want to be interrupted, I recommend you use your dependency mangement tool to pin to version 3. + +**SECURITY NOTICE:** Some older versions of Go have a security issue in the cryotp/elliptic. Recommendation is to upgrade to at least 1.8.3. See issue #216 for more detail. + +**SECURITY NOTICE:** It's important that you [validate the `alg` presented is what you expect](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/). This library attempts to make it easy to do the right thing by requiring key types match the expected alg, but you should take the extra step to verify it in your usage. See the examples provided. + +## What the heck is a JWT? + +JWT.io has [a great introduction](https://jwt.io/introduction) to JSON Web Tokens. + +In short, it's a signed JSON object that does something useful (for example, authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is made of three parts, separated by `.`'s. The first two parts are JSON objects, that have been [base64url](http://tools.ietf.org/html/rfc4648) encoded. The last part is the signature, encoded the same way. + +The first part is called the header. It contains the necessary information for verifying the last part, the signature. For example, which encryption method was used for signing and what key was used. + +The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [the RFC](http://self-issued.info/docs/draft-jones-json-web-token.html) for information about reserved keys and the proper way to add your own. + +## What's in the box? + +This library supports the parsing and verification as well as the generation and signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA, RSA-PSS, and ECDSA, though hooks are present for adding your own. + +## Examples + +See [the project documentation](https://godoc.org/github.com/dgrijalva/jwt-go) for examples of usage: + +* [Simple example of parsing and validating a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-Parse--Hmac) +* [Simple example of building and signing a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-New--Hmac) +* [Directory of Examples](https://godoc.org/github.com/dgrijalva/jwt-go#pkg-examples) + +## Extensions + +This library publishes all the necessary components for adding your own signing methods. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod`. + +Here's an example of an extension that integrates with the Google App Engine signing tools: https://github.com/someone1/gcp-jwt-go + +## Compliance + +This library was last reviewed to comply with [RTF 7519](http://www.rfc-editor.org/info/rfc7519) dated May 2015 with a few notable differences: + +* In order to protect against accidental use of [Unsecured JWTs](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html#UnsecuredJWT), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key. + +## Project Status & Versioning + +This library is considered production ready. Feedback and feature requests are appreciated. The API should be considered stable. There should be very few backwards-incompatible changes outside of major version updates (and only with good reason). + +This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `master`. Periodically, versions will be tagged from `master`. You can find all the releases on [the project releases page](https://github.com/dgrijalva/jwt-go/releases). + +While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/dgrijalva/jwt-go.v3`. It will do the right thing WRT semantic versioning. + +**BREAKING CHANGES:*** +* Version 3.0.0 includes _a lot_ of changes from the 2.x line, including a few that break the API. We've tried to break as few things as possible, so there should just be a few type signature changes. A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code. + +## Usage Tips + +### Signing vs Encryption + +A token is simply a JSON object that is signed by its author. this tells you exactly two things about the data: + +* The author of the token was in the possession of the signing secret +* The data has not been modified since it was signed + +It's important to know that JWT does not provide encryption, which means anyone who has access to the token can read its contents. If you need to protect (encrypt) the data, there is a companion spec, `JWE`, that provides this functionality. JWE is currently outside the scope of this library. + +### Choosing a Signing Method + +There are several signing methods available, and you should probably take the time to learn about the various options before choosing one. The principal design decision is most likely going to be symmetric vs asymmetric. + +Symmetric signing methods, such as HSA, use only a single secret. This is probably the simplest signing method to use since any `[]byte` can be used as a valid secret. They are also slightly computationally faster to use, though this rarely is enough to matter. Symmetric signing methods work the best when both producers and consumers of tokens are trusted, or even the same system. Since the same secret is used to both sign and validate tokens, you can't easily distribute the key for validation. + +Asymmetric signing methods, such as RSA, use different keys for signing and verifying tokens. This makes it possible to produce tokens with a private key, and allow any consumer to access the public key for verification. + +### Signing Methods and Key Types + +Each signing method expects a different object type for its signing keys. See the package documentation for details. Here are the most common ones: + +* The [HMAC signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodHMAC) (`HS256`,`HS384`,`HS512`) expect `[]byte` values for signing and validation +* The [RSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodRSA) (`RS256`,`RS384`,`RS512`) expect `*rsa.PrivateKey` for signing and `*rsa.PublicKey` for validation +* The [ECDSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodECDSA) (`ES256`,`ES384`,`ES512`) expect `*ecdsa.PrivateKey` for signing and `*ecdsa.PublicKey` for validation + +### JWT and OAuth + +It's worth mentioning that OAuth and JWT are not the same thing. A JWT token is simply a signed JSON object. It can be used anywhere such a thing is useful. There is some confusion, though, as JWT is the most common type of bearer token used in OAuth2 authentication. + +Without going too far down the rabbit hole, here's a description of the interaction of these technologies: + +* OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth. +* OAuth defines several options for passing around authentication data. One popular method is called a "bearer token". A bearer token is simply a string that _should_ only be held by an authenticated user. Thus, simply presenting this token proves your identity. You can probably derive from here why a JWT might make a good bearer token. +* Because bearer tokens are used for authentication, it's important they're kept secret. This is why transactions that use bearer tokens typically happen over SSL. + +## More + +Documentation can be found [on godoc.org](http://godoc.org/github.com/dgrijalva/jwt-go). + +The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in the documentation. diff --git a/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md b/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md new file mode 100644 index 00000000000..6370298313a --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md @@ -0,0 +1,118 @@ +## `jwt-go` Version History + +#### 3.2.0 + +* Added method `ParseUnverified` to allow users to split up the tasks of parsing and validation +* HMAC signing method returns `ErrInvalidKeyType` instead of `ErrInvalidKey` where appropriate +* Added options to `request.ParseFromRequest`, which allows for an arbitrary list of modifiers to parsing behavior. Initial set include `WithClaims` and `WithParser`. Existing usage of this function will continue to work as before. +* Deprecated `ParseFromRequestWithClaims` to simplify API in the future. + +#### 3.1.0 + +* Improvements to `jwt` command line tool +* Added `SkipClaimsValidation` option to `Parser` +* Documentation updates + +#### 3.0.0 + +* **Compatibility Breaking Changes**: See MIGRATION_GUIDE.md for tips on updating your code + * Dropped support for `[]byte` keys when using RSA signing methods. This convenience feature could contribute to security vulnerabilities involving mismatched key types with signing methods. + * `ParseFromRequest` has been moved to `request` subpackage and usage has changed + * The `Claims` property on `Token` is now type `Claims` instead of `map[string]interface{}`. The default value is type `MapClaims`, which is an alias to `map[string]interface{}`. This makes it possible to use a custom type when decoding claims. +* Other Additions and Changes + * Added `Claims` interface type to allow users to decode the claims into a custom type + * Added `ParseWithClaims`, which takes a third argument of type `Claims`. Use this function instead of `Parse` if you have a custom type you'd like to decode into. + * Dramatically improved the functionality and flexibility of `ParseFromRequest`, which is now in the `request` subpackage + * Added `ParseFromRequestWithClaims` which is the `FromRequest` equivalent of `ParseWithClaims` + * Added new interface type `Extractor`, which is used for extracting JWT strings from http requests. Used with `ParseFromRequest` and `ParseFromRequestWithClaims`. + * Added several new, more specific, validation errors to error type bitmask + * Moved examples from README to executable example files + * Signing method registry is now thread safe + * Added new property to `ValidationError`, which contains the raw error returned by calls made by parse/verify (such as those returned by keyfunc or json parser) + +#### 2.7.0 + +This will likely be the last backwards compatible release before 3.0.0, excluding essential bug fixes. + +* Added new option `-show` to the `jwt` command that will just output the decoded token without verifying +* Error text for expired tokens includes how long it's been expired +* Fixed incorrect error returned from `ParseRSAPublicKeyFromPEM` +* Documentation updates + +#### 2.6.0 + +* Exposed inner error within ValidationError +* Fixed validation errors when using UseJSONNumber flag +* Added several unit tests + +#### 2.5.0 + +* Added support for signing method none. You shouldn't use this. The API tries to make this clear. +* Updated/fixed some documentation +* Added more helpful error message when trying to parse tokens that begin with `BEARER ` + +#### 2.4.0 + +* Added new type, Parser, to allow for configuration of various parsing parameters + * You can now specify a list of valid signing methods. Anything outside this set will be rejected. + * You can now opt to use the `json.Number` type instead of `float64` when parsing token JSON +* Added support for [Travis CI](https://travis-ci.org/dgrijalva/jwt-go) +* Fixed some bugs with ECDSA parsing + +#### 2.3.0 + +* Added support for ECDSA signing methods +* Added support for RSA PSS signing methods (requires go v1.4) + +#### 2.2.0 + +* Gracefully handle a `nil` `Keyfunc` being passed to `Parse`. Result will now be the parsed token and an error, instead of a panic. + +#### 2.1.0 + +Backwards compatible API change that was missed in 2.0.0. + +* The `SignedString` method on `Token` now takes `interface{}` instead of `[]byte` + +#### 2.0.0 + +There were two major reasons for breaking backwards compatibility with this update. The first was a refactor required to expand the width of the RSA and HMAC-SHA signing implementations. There will likely be no required code changes to support this change. + +The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods. Not all keys used for all signing methods have a single standard on-disk representation. Requiring `[]byte` as the type for all keys proved too limiting. Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys. Backwards compatibilty has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`. + +It is likely the only integration change required here will be to change `func(t *jwt.Token) ([]byte, error)` to `func(t *jwt.Token) (interface{}, error)` when calling `Parse`. + +* **Compatibility Breaking Changes** + * `SigningMethodHS256` is now `*SigningMethodHMAC` instead of `type struct` + * `SigningMethodRS256` is now `*SigningMethodRSA` instead of `type struct` + * `KeyFunc` now returns `interface{}` instead of `[]byte` + * `SigningMethod.Sign` now takes `interface{}` instead of `[]byte` for the key + * `SigningMethod.Verify` now takes `interface{}` instead of `[]byte` for the key +* Renamed type `SigningMethodHS256` to `SigningMethodHMAC`. Specific sizes are now just instances of this type. + * Added public package global `SigningMethodHS256` + * Added public package global `SigningMethodHS384` + * Added public package global `SigningMethodHS512` +* Renamed type `SigningMethodRS256` to `SigningMethodRSA`. Specific sizes are now just instances of this type. + * Added public package global `SigningMethodRS256` + * Added public package global `SigningMethodRS384` + * Added public package global `SigningMethodRS512` +* Moved sample private key for HMAC tests from an inline value to a file on disk. Value is unchanged. +* Refactored the RSA implementation to be easier to read +* Exposed helper methods `ParseRSAPrivateKeyFromPEM` and `ParseRSAPublicKeyFromPEM` + +#### 1.0.2 + +* Fixed bug in parsing public keys from certificates +* Added more tests around the parsing of keys for RS256 +* Code refactoring in RS256 implementation. No functional changes + +#### 1.0.1 + +* Fixed panic if RS256 signing method was passed an invalid key + +#### 1.0.0 + +* First versioned release +* API stabilized +* Supports creating, signing, parsing, and validating JWT tokens +* Supports RS256 and HS256 signing methods \ No newline at end of file diff --git a/vendor/github.com/dgrijalva/jwt-go/claims.go b/vendor/github.com/dgrijalva/jwt-go/claims.go new file mode 100644 index 00000000000..f0228f02e03 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/claims.go @@ -0,0 +1,134 @@ +package jwt + +import ( + "crypto/subtle" + "fmt" + "time" +) + +// For a type to be a Claims object, it must just have a Valid method that determines +// if the token is invalid for any supported reason +type Claims interface { + Valid() error +} + +// Structured version of Claims Section, as referenced at +// https://tools.ietf.org/html/rfc7519#section-4.1 +// See examples for how to use this with your own claim types +type StandardClaims struct { + Audience string `json:"aud,omitempty"` + ExpiresAt int64 `json:"exp,omitempty"` + Id string `json:"jti,omitempty"` + IssuedAt int64 `json:"iat,omitempty"` + Issuer string `json:"iss,omitempty"` + NotBefore int64 `json:"nbf,omitempty"` + Subject string `json:"sub,omitempty"` +} + +// Validates time based claims "exp, iat, nbf". +// There is no accounting for clock skew. +// As well, if any of the above claims are not in the token, it will still +// be considered a valid claim. +func (c StandardClaims) Valid() error { + vErr := new(ValidationError) + now := TimeFunc().Unix() + + // The claims below are optional, by default, so if they are set to the + // default value in Go, let's not fail the verification for them. + if c.VerifyExpiresAt(now, false) == false { + delta := time.Unix(now, 0).Sub(time.Unix(c.ExpiresAt, 0)) + vErr.Inner = fmt.Errorf("token is expired by %v", delta) + vErr.Errors |= ValidationErrorExpired + } + + if c.VerifyIssuedAt(now, false) == false { + vErr.Inner = fmt.Errorf("Token used before issued") + vErr.Errors |= ValidationErrorIssuedAt + } + + if c.VerifyNotBefore(now, false) == false { + vErr.Inner = fmt.Errorf("token is not valid yet") + vErr.Errors |= ValidationErrorNotValidYet + } + + if vErr.valid() { + return nil + } + + return vErr +} + +// Compares the aud claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyAudience(cmp string, req bool) bool { + return verifyAud(c.Audience, cmp, req) +} + +// Compares the exp claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyExpiresAt(cmp int64, req bool) bool { + return verifyExp(c.ExpiresAt, cmp, req) +} + +// Compares the iat claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyIssuedAt(cmp int64, req bool) bool { + return verifyIat(c.IssuedAt, cmp, req) +} + +// Compares the iss claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyIssuer(cmp string, req bool) bool { + return verifyIss(c.Issuer, cmp, req) +} + +// Compares the nbf claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyNotBefore(cmp int64, req bool) bool { + return verifyNbf(c.NotBefore, cmp, req) +} + +// ----- helpers + +func verifyAud(aud string, cmp string, required bool) bool { + if aud == "" { + return !required + } + if subtle.ConstantTimeCompare([]byte(aud), []byte(cmp)) != 0 { + return true + } else { + return false + } +} + +func verifyExp(exp int64, now int64, required bool) bool { + if exp == 0 { + return !required + } + return now <= exp +} + +func verifyIat(iat int64, now int64, required bool) bool { + if iat == 0 { + return !required + } + return now >= iat +} + +func verifyIss(iss string, cmp string, required bool) bool { + if iss == "" { + return !required + } + if subtle.ConstantTimeCompare([]byte(iss), []byte(cmp)) != 0 { + return true + } else { + return false + } +} + +func verifyNbf(nbf int64, now int64, required bool) bool { + if nbf == 0 { + return !required + } + return now >= nbf +} diff --git a/vendor/github.com/dgrijalva/jwt-go/doc.go b/vendor/github.com/dgrijalva/jwt-go/doc.go new file mode 100644 index 00000000000..a86dc1a3b34 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/doc.go @@ -0,0 +1,4 @@ +// Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html +// +// See README.md for more info. +package jwt diff --git a/vendor/github.com/dgrijalva/jwt-go/ecdsa.go b/vendor/github.com/dgrijalva/jwt-go/ecdsa.go new file mode 100644 index 00000000000..f977381240e --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/ecdsa.go @@ -0,0 +1,148 @@ +package jwt + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rand" + "errors" + "math/big" +) + +var ( + // Sadly this is missing from crypto/ecdsa compared to crypto/rsa + ErrECDSAVerification = errors.New("crypto/ecdsa: verification error") +) + +// Implements the ECDSA family of signing methods signing methods +// Expects *ecdsa.PrivateKey for signing and *ecdsa.PublicKey for verification +type SigningMethodECDSA struct { + Name string + Hash crypto.Hash + KeySize int + CurveBits int +} + +// Specific instances for EC256 and company +var ( + SigningMethodES256 *SigningMethodECDSA + SigningMethodES384 *SigningMethodECDSA + SigningMethodES512 *SigningMethodECDSA +) + +func init() { + // ES256 + SigningMethodES256 = &SigningMethodECDSA{"ES256", crypto.SHA256, 32, 256} + RegisterSigningMethod(SigningMethodES256.Alg(), func() SigningMethod { + return SigningMethodES256 + }) + + // ES384 + SigningMethodES384 = &SigningMethodECDSA{"ES384", crypto.SHA384, 48, 384} + RegisterSigningMethod(SigningMethodES384.Alg(), func() SigningMethod { + return SigningMethodES384 + }) + + // ES512 + SigningMethodES512 = &SigningMethodECDSA{"ES512", crypto.SHA512, 66, 521} + RegisterSigningMethod(SigningMethodES512.Alg(), func() SigningMethod { + return SigningMethodES512 + }) +} + +func (m *SigningMethodECDSA) Alg() string { + return m.Name +} + +// Implements the Verify method from SigningMethod +// For this verify method, key must be an ecdsa.PublicKey struct +func (m *SigningMethodECDSA) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + // Get the key + var ecdsaKey *ecdsa.PublicKey + switch k := key.(type) { + case *ecdsa.PublicKey: + ecdsaKey = k + default: + return ErrInvalidKeyType + } + + if len(sig) != 2*m.KeySize { + return ErrECDSAVerification + } + + r := big.NewInt(0).SetBytes(sig[:m.KeySize]) + s := big.NewInt(0).SetBytes(sig[m.KeySize:]) + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Verify the signature + if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus == true { + return nil + } else { + return ErrECDSAVerification + } +} + +// Implements the Sign method from SigningMethod +// For this signing method, key must be an ecdsa.PrivateKey struct +func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) (string, error) { + // Get the key + var ecdsaKey *ecdsa.PrivateKey + switch k := key.(type) { + case *ecdsa.PrivateKey: + ecdsaKey = k + default: + return "", ErrInvalidKeyType + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return r, s + if r, s, err := ecdsa.Sign(rand.Reader, ecdsaKey, hasher.Sum(nil)); err == nil { + curveBits := ecdsaKey.Curve.Params().BitSize + + if m.CurveBits != curveBits { + return "", ErrInvalidKey + } + + keyBytes := curveBits / 8 + if curveBits%8 > 0 { + keyBytes += 1 + } + + // We serialize the outpus (r and s) into big-endian byte arrays and pad + // them with zeros on the left to make sure the sizes work out. Both arrays + // must be keyBytes long, and the output must be 2*keyBytes long. + rBytes := r.Bytes() + rBytesPadded := make([]byte, keyBytes) + copy(rBytesPadded[keyBytes-len(rBytes):], rBytes) + + sBytes := s.Bytes() + sBytesPadded := make([]byte, keyBytes) + copy(sBytesPadded[keyBytes-len(sBytes):], sBytes) + + out := append(rBytesPadded, sBytesPadded...) + + return EncodeSegment(out), nil + } else { + return "", err + } +} diff --git a/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go b/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go new file mode 100644 index 00000000000..d19624b7264 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go @@ -0,0 +1,67 @@ +package jwt + +import ( + "crypto/ecdsa" + "crypto/x509" + "encoding/pem" + "errors" +) + +var ( + ErrNotECPublicKey = errors.New("Key is not a valid ECDSA public key") + ErrNotECPrivateKey = errors.New("Key is not a valid ECDSA private key") +) + +// Parse PEM encoded Elliptic Curve Private Key Structure +func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil { + return nil, err + } + + var pkey *ecdsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*ecdsa.PrivateKey); !ok { + return nil, ErrNotECPrivateKey + } + + return pkey, nil +} + +// Parse PEM encoded PKCS1 or PKCS8 public key +func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + if cert, err := x509.ParseCertificate(block.Bytes); err == nil { + parsedKey = cert.PublicKey + } else { + return nil, err + } + } + + var pkey *ecdsa.PublicKey + var ok bool + if pkey, ok = parsedKey.(*ecdsa.PublicKey); !ok { + return nil, ErrNotECPublicKey + } + + return pkey, nil +} diff --git a/vendor/github.com/dgrijalva/jwt-go/errors.go b/vendor/github.com/dgrijalva/jwt-go/errors.go new file mode 100644 index 00000000000..1c93024aad2 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/errors.go @@ -0,0 +1,59 @@ +package jwt + +import ( + "errors" +) + +// Error constants +var ( + ErrInvalidKey = errors.New("key is invalid") + ErrInvalidKeyType = errors.New("key is of invalid type") + ErrHashUnavailable = errors.New("the requested hash function is unavailable") +) + +// The errors that might occur when parsing and validating a token +const ( + ValidationErrorMalformed uint32 = 1 << iota // Token is malformed + ValidationErrorUnverifiable // Token could not be verified because of signing problems + ValidationErrorSignatureInvalid // Signature validation failed + + // Standard Claim validation errors + ValidationErrorAudience // AUD validation failed + ValidationErrorExpired // EXP validation failed + ValidationErrorIssuedAt // IAT validation failed + ValidationErrorIssuer // ISS validation failed + ValidationErrorNotValidYet // NBF validation failed + ValidationErrorId // JTI validation failed + ValidationErrorClaimsInvalid // Generic claims validation error +) + +// Helper for constructing a ValidationError with a string error message +func NewValidationError(errorText string, errorFlags uint32) *ValidationError { + return &ValidationError{ + text: errorText, + Errors: errorFlags, + } +} + +// The error from Parse if token is not valid +type ValidationError struct { + Inner error // stores the error returned by external dependencies, i.e.: KeyFunc + Errors uint32 // bitfield. see ValidationError... constants + text string // errors that do not have a valid error just have text +} + +// Validation error is an error type +func (e ValidationError) Error() string { + if e.Inner != nil { + return e.Inner.Error() + } else if e.text != "" { + return e.text + } else { + return "token is invalid" + } +} + +// No errors +func (e *ValidationError) valid() bool { + return e.Errors == 0 +} diff --git a/vendor/github.com/dgrijalva/jwt-go/hmac.go b/vendor/github.com/dgrijalva/jwt-go/hmac.go new file mode 100644 index 00000000000..addbe5d4018 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/hmac.go @@ -0,0 +1,95 @@ +package jwt + +import ( + "crypto" + "crypto/hmac" + "errors" +) + +// Implements the HMAC-SHA family of signing methods signing methods +// Expects key type of []byte for both signing and validation +type SigningMethodHMAC struct { + Name string + Hash crypto.Hash +} + +// Specific instances for HS256 and company +var ( + SigningMethodHS256 *SigningMethodHMAC + SigningMethodHS384 *SigningMethodHMAC + SigningMethodHS512 *SigningMethodHMAC + ErrSignatureInvalid = errors.New("signature is invalid") +) + +func init() { + // HS256 + SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256} + RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod { + return SigningMethodHS256 + }) + + // HS384 + SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384} + RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod { + return SigningMethodHS384 + }) + + // HS512 + SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512} + RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod { + return SigningMethodHS512 + }) +} + +func (m *SigningMethodHMAC) Alg() string { + return m.Name +} + +// Verify the signature of HSXXX tokens. Returns nil if the signature is valid. +func (m *SigningMethodHMAC) Verify(signingString, signature string, key interface{}) error { + // Verify the key is the right type + keyBytes, ok := key.([]byte) + if !ok { + return ErrInvalidKeyType + } + + // Decode signature, for comparison + sig, err := DecodeSegment(signature) + if err != nil { + return err + } + + // Can we use the specified hashing method? + if !m.Hash.Available() { + return ErrHashUnavailable + } + + // This signing method is symmetric, so we validate the signature + // by reproducing the signature from the signing string and key, then + // comparing that against the provided signature. + hasher := hmac.New(m.Hash.New, keyBytes) + hasher.Write([]byte(signingString)) + if !hmac.Equal(sig, hasher.Sum(nil)) { + return ErrSignatureInvalid + } + + // No validation errors. Signature is good. + return nil +} + +// Implements the Sign method from SigningMethod for this signing method. +// Key must be []byte +func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string, error) { + if keyBytes, ok := key.([]byte); ok { + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := hmac.New(m.Hash.New, keyBytes) + hasher.Write([]byte(signingString)) + + return EncodeSegment(hasher.Sum(nil)), nil + } + + return "", ErrInvalidKeyType +} diff --git a/vendor/github.com/dgrijalva/jwt-go/map_claims.go b/vendor/github.com/dgrijalva/jwt-go/map_claims.go new file mode 100644 index 00000000000..291213c460d --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/map_claims.go @@ -0,0 +1,94 @@ +package jwt + +import ( + "encoding/json" + "errors" + // "fmt" +) + +// Claims type that uses the map[string]interface{} for JSON decoding +// This is the default claims type if you don't supply one +type MapClaims map[string]interface{} + +// Compares the aud claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyAudience(cmp string, req bool) bool { + aud, _ := m["aud"].(string) + return verifyAud(aud, cmp, req) +} + +// Compares the exp claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyExpiresAt(cmp int64, req bool) bool { + switch exp := m["exp"].(type) { + case float64: + return verifyExp(int64(exp), cmp, req) + case json.Number: + v, _ := exp.Int64() + return verifyExp(v, cmp, req) + } + return req == false +} + +// Compares the iat claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyIssuedAt(cmp int64, req bool) bool { + switch iat := m["iat"].(type) { + case float64: + return verifyIat(int64(iat), cmp, req) + case json.Number: + v, _ := iat.Int64() + return verifyIat(v, cmp, req) + } + return req == false +} + +// Compares the iss claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyIssuer(cmp string, req bool) bool { + iss, _ := m["iss"].(string) + return verifyIss(iss, cmp, req) +} + +// Compares the nbf claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyNotBefore(cmp int64, req bool) bool { + switch nbf := m["nbf"].(type) { + case float64: + return verifyNbf(int64(nbf), cmp, req) + case json.Number: + v, _ := nbf.Int64() + return verifyNbf(v, cmp, req) + } + return req == false +} + +// Validates time based claims "exp, iat, nbf". +// There is no accounting for clock skew. +// As well, if any of the above claims are not in the token, it will still +// be considered a valid claim. +func (m MapClaims) Valid() error { + vErr := new(ValidationError) + now := TimeFunc().Unix() + + if m.VerifyExpiresAt(now, false) == false { + vErr.Inner = errors.New("Token is expired") + vErr.Errors |= ValidationErrorExpired + } + + if m.VerifyIssuedAt(now, false) == false { + vErr.Inner = errors.New("Token used before issued") + vErr.Errors |= ValidationErrorIssuedAt + } + + if m.VerifyNotBefore(now, false) == false { + vErr.Inner = errors.New("Token is not valid yet") + vErr.Errors |= ValidationErrorNotValidYet + } + + if vErr.valid() { + return nil + } + + return vErr +} diff --git a/vendor/github.com/dgrijalva/jwt-go/none.go b/vendor/github.com/dgrijalva/jwt-go/none.go new file mode 100644 index 00000000000..f04d189d067 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/none.go @@ -0,0 +1,52 @@ +package jwt + +// Implements the none signing method. This is required by the spec +// but you probably should never use it. +var SigningMethodNone *signingMethodNone + +const UnsafeAllowNoneSignatureType unsafeNoneMagicConstant = "none signing method allowed" + +var NoneSignatureTypeDisallowedError error + +type signingMethodNone struct{} +type unsafeNoneMagicConstant string + +func init() { + SigningMethodNone = &signingMethodNone{} + NoneSignatureTypeDisallowedError = NewValidationError("'none' signature type is not allowed", ValidationErrorSignatureInvalid) + + RegisterSigningMethod(SigningMethodNone.Alg(), func() SigningMethod { + return SigningMethodNone + }) +} + +func (m *signingMethodNone) Alg() string { + return "none" +} + +// Only allow 'none' alg type if UnsafeAllowNoneSignatureType is specified as the key +func (m *signingMethodNone) Verify(signingString, signature string, key interface{}) (err error) { + // Key must be UnsafeAllowNoneSignatureType to prevent accidentally + // accepting 'none' signing method + if _, ok := key.(unsafeNoneMagicConstant); !ok { + return NoneSignatureTypeDisallowedError + } + // If signing method is none, signature must be an empty string + if signature != "" { + return NewValidationError( + "'none' signing method with non-empty signature", + ValidationErrorSignatureInvalid, + ) + } + + // Accept 'none' signing method. + return nil +} + +// Only allow 'none' signing if UnsafeAllowNoneSignatureType is specified as the key +func (m *signingMethodNone) Sign(signingString string, key interface{}) (string, error) { + if _, ok := key.(unsafeNoneMagicConstant); ok { + return "", nil + } + return "", NoneSignatureTypeDisallowedError +} diff --git a/vendor/github.com/dgrijalva/jwt-go/parser.go b/vendor/github.com/dgrijalva/jwt-go/parser.go new file mode 100644 index 00000000000..d6901d9adb5 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/parser.go @@ -0,0 +1,148 @@ +package jwt + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" +) + +type Parser struct { + ValidMethods []string // If populated, only these methods will be considered valid + UseJSONNumber bool // Use JSON Number format in JSON decoder + SkipClaimsValidation bool // Skip claims validation during token parsing +} + +// Parse, validate, and return a token. +// keyFunc will receive the parsed token and should return the key for validating. +// If everything is kosher, err will be nil +func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { + return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc) +} + +func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { + token, parts, err := p.ParseUnverified(tokenString, claims) + if err != nil { + return token, err + } + + // Verify signing method is in the required set + if p.ValidMethods != nil { + var signingMethodValid = false + var alg = token.Method.Alg() + for _, m := range p.ValidMethods { + if m == alg { + signingMethodValid = true + break + } + } + if !signingMethodValid { + // signing method is not in the listed set + return token, NewValidationError(fmt.Sprintf("signing method %v is invalid", alg), ValidationErrorSignatureInvalid) + } + } + + // Lookup key + var key interface{} + if keyFunc == nil { + // keyFunc was not provided. short circuiting validation + return token, NewValidationError("no Keyfunc was provided.", ValidationErrorUnverifiable) + } + if key, err = keyFunc(token); err != nil { + // keyFunc returned an error + if ve, ok := err.(*ValidationError); ok { + return token, ve + } + return token, &ValidationError{Inner: err, Errors: ValidationErrorUnverifiable} + } + + vErr := &ValidationError{} + + // Validate Claims + if !p.SkipClaimsValidation { + if err := token.Claims.Valid(); err != nil { + + // If the Claims Valid returned an error, check if it is a validation error, + // If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set + if e, ok := err.(*ValidationError); !ok { + vErr = &ValidationError{Inner: err, Errors: ValidationErrorClaimsInvalid} + } else { + vErr = e + } + } + } + + // Perform validation + token.Signature = parts[2] + if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil { + vErr.Inner = err + vErr.Errors |= ValidationErrorSignatureInvalid + } + + if vErr.valid() { + token.Valid = true + return token, nil + } + + return token, vErr +} + +// WARNING: Don't use this method unless you know what you're doing +// +// This method parses the token but doesn't validate the signature. It's only +// ever useful in cases where you know the signature is valid (because it has +// been checked previously in the stack) and you want to extract values from +// it. +func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) { + parts = strings.Split(tokenString, ".") + if len(parts) != 3 { + return nil, parts, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed) + } + + token = &Token{Raw: tokenString} + + // parse Header + var headerBytes []byte + if headerBytes, err = DecodeSegment(parts[0]); err != nil { + if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") { + return token, parts, NewValidationError("tokenstring should not contain 'bearer '", ValidationErrorMalformed) + } + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + if err = json.Unmarshal(headerBytes, &token.Header); err != nil { + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + + // parse Claims + var claimBytes []byte + token.Claims = claims + + if claimBytes, err = DecodeSegment(parts[1]); err != nil { + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + dec := json.NewDecoder(bytes.NewBuffer(claimBytes)) + if p.UseJSONNumber { + dec.UseNumber() + } + // JSON Decode. Special case for map type to avoid weird pointer behavior + if c, ok := token.Claims.(MapClaims); ok { + err = dec.Decode(&c) + } else { + err = dec.Decode(&claims) + } + // Handle decode error + if err != nil { + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + + // Lookup signature method + if method, ok := token.Header["alg"].(string); ok { + if token.Method = GetSigningMethod(method); token.Method == nil { + return token, parts, NewValidationError("signing method (alg) is unavailable.", ValidationErrorUnverifiable) + } + } else { + return token, parts, NewValidationError("signing method (alg) is unspecified.", ValidationErrorUnverifiable) + } + + return token, parts, nil +} diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa.go b/vendor/github.com/dgrijalva/jwt-go/rsa.go new file mode 100644 index 00000000000..e4caf1ca4a1 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/rsa.go @@ -0,0 +1,101 @@ +package jwt + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" +) + +// Implements the RSA family of signing methods signing methods +// Expects *rsa.PrivateKey for signing and *rsa.PublicKey for validation +type SigningMethodRSA struct { + Name string + Hash crypto.Hash +} + +// Specific instances for RS256 and company +var ( + SigningMethodRS256 *SigningMethodRSA + SigningMethodRS384 *SigningMethodRSA + SigningMethodRS512 *SigningMethodRSA +) + +func init() { + // RS256 + SigningMethodRS256 = &SigningMethodRSA{"RS256", crypto.SHA256} + RegisterSigningMethod(SigningMethodRS256.Alg(), func() SigningMethod { + return SigningMethodRS256 + }) + + // RS384 + SigningMethodRS384 = &SigningMethodRSA{"RS384", crypto.SHA384} + RegisterSigningMethod(SigningMethodRS384.Alg(), func() SigningMethod { + return SigningMethodRS384 + }) + + // RS512 + SigningMethodRS512 = &SigningMethodRSA{"RS512", crypto.SHA512} + RegisterSigningMethod(SigningMethodRS512.Alg(), func() SigningMethod { + return SigningMethodRS512 + }) +} + +func (m *SigningMethodRSA) Alg() string { + return m.Name +} + +// Implements the Verify method from SigningMethod +// For this signing method, must be an *rsa.PublicKey structure. +func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + var rsaKey *rsa.PublicKey + var ok bool + + if rsaKey, ok = key.(*rsa.PublicKey); !ok { + return ErrInvalidKeyType + } + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Verify the signature + return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig) +} + +// Implements the Sign method from SigningMethod +// For this signing method, must be an *rsa.PrivateKey structure. +func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) { + var rsaKey *rsa.PrivateKey + var ok bool + + // Validate type of key + if rsaKey, ok = key.(*rsa.PrivateKey); !ok { + return "", ErrInvalidKey + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return the encoded bytes + if sigBytes, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil)); err == nil { + return EncodeSegment(sigBytes), nil + } else { + return "", err + } +} diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go b/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go new file mode 100644 index 00000000000..10ee9db8a4e --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go @@ -0,0 +1,126 @@ +// +build go1.4 + +package jwt + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" +) + +// Implements the RSAPSS family of signing methods signing methods +type SigningMethodRSAPSS struct { + *SigningMethodRSA + Options *rsa.PSSOptions +} + +// Specific instances for RS/PS and company +var ( + SigningMethodPS256 *SigningMethodRSAPSS + SigningMethodPS384 *SigningMethodRSAPSS + SigningMethodPS512 *SigningMethodRSAPSS +) + +func init() { + // PS256 + SigningMethodPS256 = &SigningMethodRSAPSS{ + &SigningMethodRSA{ + Name: "PS256", + Hash: crypto.SHA256, + }, + &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + Hash: crypto.SHA256, + }, + } + RegisterSigningMethod(SigningMethodPS256.Alg(), func() SigningMethod { + return SigningMethodPS256 + }) + + // PS384 + SigningMethodPS384 = &SigningMethodRSAPSS{ + &SigningMethodRSA{ + Name: "PS384", + Hash: crypto.SHA384, + }, + &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + Hash: crypto.SHA384, + }, + } + RegisterSigningMethod(SigningMethodPS384.Alg(), func() SigningMethod { + return SigningMethodPS384 + }) + + // PS512 + SigningMethodPS512 = &SigningMethodRSAPSS{ + &SigningMethodRSA{ + Name: "PS512", + Hash: crypto.SHA512, + }, + &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + Hash: crypto.SHA512, + }, + } + RegisterSigningMethod(SigningMethodPS512.Alg(), func() SigningMethod { + return SigningMethodPS512 + }) +} + +// Implements the Verify method from SigningMethod +// For this verify method, key must be an rsa.PublicKey struct +func (m *SigningMethodRSAPSS) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + var rsaKey *rsa.PublicKey + switch k := key.(type) { + case *rsa.PublicKey: + rsaKey = k + default: + return ErrInvalidKey + } + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, m.Options) +} + +// Implements the Sign method from SigningMethod +// For this signing method, key must be an rsa.PrivateKey struct +func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) (string, error) { + var rsaKey *rsa.PrivateKey + + switch k := key.(type) { + case *rsa.PrivateKey: + rsaKey = k + default: + return "", ErrInvalidKeyType + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return the encoded bytes + if sigBytes, err := rsa.SignPSS(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil), m.Options); err == nil { + return EncodeSegment(sigBytes), nil + } else { + return "", err + } +} diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go b/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go new file mode 100644 index 00000000000..a5ababf956c --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go @@ -0,0 +1,101 @@ +package jwt + +import ( + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" +) + +var ( + ErrKeyMustBePEMEncoded = errors.New("Invalid Key: Key must be PEM encoded PKCS1 or PKCS8 private key") + ErrNotRSAPrivateKey = errors.New("Key is not a valid RSA private key") + ErrNotRSAPublicKey = errors.New("Key is not a valid RSA public key") +) + +// Parse PEM encoded PKCS1 or PKCS8 private key +func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + var parsedKey interface{} + if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { + return nil, err + } + } + + var pkey *rsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { + return nil, ErrNotRSAPrivateKey + } + + return pkey, nil +} + +// Parse PEM encoded PKCS1 or PKCS8 private key protected with password +func ParseRSAPrivateKeyFromPEMWithPassword(key []byte, password string) (*rsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + var parsedKey interface{} + + var blockDecrypted []byte + if blockDecrypted, err = x509.DecryptPEMBlock(block, []byte(password)); err != nil { + return nil, err + } + + if parsedKey, err = x509.ParsePKCS1PrivateKey(blockDecrypted); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(blockDecrypted); err != nil { + return nil, err + } + } + + var pkey *rsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { + return nil, ErrNotRSAPrivateKey + } + + return pkey, nil +} + +// Parse PEM encoded PKCS1 or PKCS8 public key +func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + if cert, err := x509.ParseCertificate(block.Bytes); err == nil { + parsedKey = cert.PublicKey + } else { + return nil, err + } + } + + var pkey *rsa.PublicKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PublicKey); !ok { + return nil, ErrNotRSAPublicKey + } + + return pkey, nil +} diff --git a/vendor/github.com/dgrijalva/jwt-go/signing_method.go b/vendor/github.com/dgrijalva/jwt-go/signing_method.go new file mode 100644 index 00000000000..ed1f212b21e --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/signing_method.go @@ -0,0 +1,35 @@ +package jwt + +import ( + "sync" +) + +var signingMethods = map[string]func() SigningMethod{} +var signingMethodLock = new(sync.RWMutex) + +// Implement SigningMethod to add new methods for signing or verifying tokens. +type SigningMethod interface { + Verify(signingString, signature string, key interface{}) error // Returns nil if signature is valid + Sign(signingString string, key interface{}) (string, error) // Returns encoded signature or error + Alg() string // returns the alg identifier for this method (example: 'HS256') +} + +// Register the "alg" name and a factory function for signing method. +// This is typically done during init() in the method's implementation +func RegisterSigningMethod(alg string, f func() SigningMethod) { + signingMethodLock.Lock() + defer signingMethodLock.Unlock() + + signingMethods[alg] = f +} + +// Get a signing method from an "alg" string +func GetSigningMethod(alg string) (method SigningMethod) { + signingMethodLock.RLock() + defer signingMethodLock.RUnlock() + + if methodF, ok := signingMethods[alg]; ok { + method = methodF() + } + return +} diff --git a/vendor/github.com/dgrijalva/jwt-go/token.go b/vendor/github.com/dgrijalva/jwt-go/token.go new file mode 100644 index 00000000000..d637e0867c6 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/token.go @@ -0,0 +1,108 @@ +package jwt + +import ( + "encoding/base64" + "encoding/json" + "strings" + "time" +) + +// TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time). +// You can override it to use another time value. This is useful for testing or if your +// server uses a different time zone than your tokens. +var TimeFunc = time.Now + +// Parse methods use this callback function to supply +// the key for verification. The function receives the parsed, +// but unverified Token. This allows you to use properties in the +// Header of the token (such as `kid`) to identify which key to use. +type Keyfunc func(*Token) (interface{}, error) + +// A JWT Token. Different fields will be used depending on whether you're +// creating or parsing/verifying a token. +type Token struct { + Raw string // The raw token. Populated when you Parse a token + Method SigningMethod // The signing method used or to be used + Header map[string]interface{} // The first segment of the token + Claims Claims // The second segment of the token + Signature string // The third segment of the token. Populated when you Parse a token + Valid bool // Is the token valid? Populated when you Parse/Verify a token +} + +// Create a new Token. Takes a signing method +func New(method SigningMethod) *Token { + return NewWithClaims(method, MapClaims{}) +} + +func NewWithClaims(method SigningMethod, claims Claims) *Token { + return &Token{ + Header: map[string]interface{}{ + "typ": "JWT", + "alg": method.Alg(), + }, + Claims: claims, + Method: method, + } +} + +// Get the complete, signed token +func (t *Token) SignedString(key interface{}) (string, error) { + var sig, sstr string + var err error + if sstr, err = t.SigningString(); err != nil { + return "", err + } + if sig, err = t.Method.Sign(sstr, key); err != nil { + return "", err + } + return strings.Join([]string{sstr, sig}, "."), nil +} + +// Generate the signing string. This is the +// most expensive part of the whole deal. Unless you +// need this for something special, just go straight for +// the SignedString. +func (t *Token) SigningString() (string, error) { + var err error + parts := make([]string, 2) + for i, _ := range parts { + var jsonValue []byte + if i == 0 { + if jsonValue, err = json.Marshal(t.Header); err != nil { + return "", err + } + } else { + if jsonValue, err = json.Marshal(t.Claims); err != nil { + return "", err + } + } + + parts[i] = EncodeSegment(jsonValue) + } + return strings.Join(parts, "."), nil +} + +// Parse, validate, and return a token. +// keyFunc will receive the parsed token and should return the key for validating. +// If everything is kosher, err will be nil +func Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { + return new(Parser).Parse(tokenString, keyFunc) +} + +func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { + return new(Parser).ParseWithClaims(tokenString, claims, keyFunc) +} + +// Encode JWT specific base64url encoding with padding stripped +func EncodeSegment(seg []byte) string { + return strings.TrimRight(base64.URLEncoding.EncodeToString(seg), "=") +} + +// Decode JWT specific base64url encoding with padding stripped +func DecodeSegment(seg string) ([]byte, error) { + if l := len(seg) % 4; l > 0 { + seg += strings.Repeat("=", 4-l) + } + + return base64.URLEncoding.DecodeString(seg) +} diff --git a/vendor/github.com/eapache/go-resiliency/LICENSE b/vendor/github.com/eapache/go-resiliency/LICENSE new file mode 100644 index 00000000000..698a3f51397 --- /dev/null +++ b/vendor/github.com/eapache/go-resiliency/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2014 Evan Huus + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/eapache/go-resiliency/breaker/README.md b/vendor/github.com/eapache/go-resiliency/breaker/README.md new file mode 100644 index 00000000000..2d1b3d93225 --- /dev/null +++ b/vendor/github.com/eapache/go-resiliency/breaker/README.md @@ -0,0 +1,34 @@ +circuit-breaker +=============== + +[![Build Status](https://travis-ci.org/eapache/go-resiliency.svg?branch=master)](https://travis-ci.org/eapache/go-resiliency) +[![GoDoc](https://godoc.org/github.com/eapache/go-resiliency/breaker?status.svg)](https://godoc.org/github.com/eapache/go-resiliency/breaker) +[![Code of Conduct](https://img.shields.io/badge/code%20of%20conduct-active-blue.svg)](https://eapache.github.io/conduct.html) + +The circuit-breaker resiliency pattern for golang. + +Creating a breaker takes three parameters: +- error threshold (for opening the breaker) +- success threshold (for closing the breaker) +- timeout (how long to keep the breaker open) + +```go +b := breaker.New(3, 1, 5*time.Second) + +for { + result := b.Run(func() error { + // communicate with some external service and + // return an error if the communication failed + return nil + }) + + switch result { + case nil: + // success! + case breaker.ErrBreakerOpen: + // our function wasn't run because the breaker was open + default: + // some other error + } +} +``` diff --git a/vendor/github.com/eapache/go-resiliency/breaker/breaker.go b/vendor/github.com/eapache/go-resiliency/breaker/breaker.go new file mode 100644 index 00000000000..f88ca7248b0 --- /dev/null +++ b/vendor/github.com/eapache/go-resiliency/breaker/breaker.go @@ -0,0 +1,161 @@ +// Package breaker implements the circuit-breaker resiliency pattern for Go. +package breaker + +import ( + "errors" + "sync" + "sync/atomic" + "time" +) + +// ErrBreakerOpen is the error returned from Run() when the function is not executed +// because the breaker is currently open. +var ErrBreakerOpen = errors.New("circuit breaker is open") + +const ( + closed uint32 = iota + open + halfOpen +) + +// Breaker implements the circuit-breaker resiliency pattern +type Breaker struct { + errorThreshold, successThreshold int + timeout time.Duration + + lock sync.Mutex + state uint32 + errors, successes int + lastError time.Time +} + +// New constructs a new circuit-breaker that starts closed. +// From closed, the breaker opens if "errorThreshold" errors are seen +// without an error-free period of at least "timeout". From open, the +// breaker half-closes after "timeout". From half-open, the breaker closes +// after "successThreshold" consecutive successes, or opens on a single error. +func New(errorThreshold, successThreshold int, timeout time.Duration) *Breaker { + return &Breaker{ + errorThreshold: errorThreshold, + successThreshold: successThreshold, + timeout: timeout, + } +} + +// Run will either return ErrBreakerOpen immediately if the circuit-breaker is +// already open, or it will run the given function and pass along its return +// value. It is safe to call Run concurrently on the same Breaker. +func (b *Breaker) Run(work func() error) error { + state := atomic.LoadUint32(&b.state) + + if state == open { + return ErrBreakerOpen + } + + return b.doWork(state, work) +} + +// Go will either return ErrBreakerOpen immediately if the circuit-breaker is +// already open, or it will run the given function in a separate goroutine. +// If the function is run, Go will return nil immediately, and will *not* return +// the return value of the function. It is safe to call Go concurrently on the +// same Breaker. +func (b *Breaker) Go(work func() error) error { + state := atomic.LoadUint32(&b.state) + + if state == open { + return ErrBreakerOpen + } + + // errcheck complains about ignoring the error return value, but + // that's on purpose; if you want an error from a goroutine you have to + // get it over a channel or something + go b.doWork(state, work) + + return nil +} + +func (b *Breaker) doWork(state uint32, work func() error) error { + var panicValue interface{} + + result := func() error { + defer func() { + panicValue = recover() + }() + return work() + }() + + if result == nil && panicValue == nil && state == closed { + // short-circuit the normal, success path without contending + // on the lock + return nil + } + + // oh well, I guess we have to contend on the lock + b.processResult(result, panicValue) + + if panicValue != nil { + // as close as Go lets us come to a "rethrow" although unfortunately + // we lose the original panicing location + panic(panicValue) + } + + return result +} + +func (b *Breaker) processResult(result error, panicValue interface{}) { + b.lock.Lock() + defer b.lock.Unlock() + + if result == nil && panicValue == nil { + if b.state == halfOpen { + b.successes++ + if b.successes == b.successThreshold { + b.closeBreaker() + } + } + } else { + if b.errors > 0 { + expiry := b.lastError.Add(b.timeout) + if time.Now().After(expiry) { + b.errors = 0 + } + } + + switch b.state { + case closed: + b.errors++ + if b.errors == b.errorThreshold { + b.openBreaker() + } else { + b.lastError = time.Now() + } + case halfOpen: + b.openBreaker() + } + } +} + +func (b *Breaker) openBreaker() { + b.changeState(open) + go b.timer() +} + +func (b *Breaker) closeBreaker() { + b.changeState(closed) +} + +func (b *Breaker) timer() { + time.Sleep(b.timeout) + + b.lock.Lock() + defer b.lock.Unlock() + + b.changeState(halfOpen) +} + +func (b *Breaker) changeState(newState uint32) { + b.errors = 0 + b.successes = 0 + atomic.StoreUint32(&b.state, newState) +} diff --git a/vendor/github.com/eapache/go-xerial-snappy/.gitignore b/vendor/github.com/eapache/go-xerial-snappy/.gitignore new file mode 100644 index 00000000000..daf913b1b34 --- /dev/null +++ b/vendor/github.com/eapache/go-xerial-snappy/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/hashicorp/hil/.travis.yml b/vendor/github.com/eapache/go-xerial-snappy/.travis.yml similarity index 53% rename from vendor/github.com/hashicorp/hil/.travis.yml rename to vendor/github.com/eapache/go-xerial-snappy/.travis.yml index a785444222c..d6cf4f1fa1b 100644 --- a/vendor/github.com/hashicorp/hil/.travis.yml +++ b/vendor/github.com/eapache/go-xerial-snappy/.travis.yml @@ -1,3 +1,7 @@ -sudo: false language: go -go: 1.7 + +go: +- 1.5.4 +- 1.6.1 + +sudo: false diff --git a/vendor/github.com/eapache/go-xerial-snappy/LICENSE b/vendor/github.com/eapache/go-xerial-snappy/LICENSE new file mode 100644 index 00000000000..5bf3688d9e4 --- /dev/null +++ b/vendor/github.com/eapache/go-xerial-snappy/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Evan Huus + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/eapache/go-xerial-snappy/README.md b/vendor/github.com/eapache/go-xerial-snappy/README.md new file mode 100644 index 00000000000..3f2695c7282 --- /dev/null +++ b/vendor/github.com/eapache/go-xerial-snappy/README.md @@ -0,0 +1,13 @@ +# go-xerial-snappy + +[![Build Status](https://travis-ci.org/eapache/go-xerial-snappy.svg?branch=master)](https://travis-ci.org/eapache/go-xerial-snappy) + +Xerial-compatible Snappy framing support for golang. + +Packages using Xerial for snappy encoding use a framing format incompatible with +basically everything else in existence. This package wraps Go's built-in snappy +package to support it. + +Apps that use this format include Apache Kafka (see +https://github.com/dpkp/kafka-python/issues/126#issuecomment-35478921 for +details). diff --git a/vendor/github.com/eapache/go-xerial-snappy/fuzz.go b/vendor/github.com/eapache/go-xerial-snappy/fuzz.go new file mode 100644 index 00000000000..6a46f4784e1 --- /dev/null +++ b/vendor/github.com/eapache/go-xerial-snappy/fuzz.go @@ -0,0 +1,16 @@ +// +build gofuzz + +package snappy + +func Fuzz(data []byte) int { + decode, err := Decode(data) + if decode == nil && err == nil { + panic("nil error with nil result") + } + + if err != nil { + return 0 + } + + return 1 +} diff --git a/vendor/github.com/eapache/go-xerial-snappy/snappy.go b/vendor/github.com/eapache/go-xerial-snappy/snappy.go new file mode 100644 index 00000000000..ea8f7afeb33 --- /dev/null +++ b/vendor/github.com/eapache/go-xerial-snappy/snappy.go @@ -0,0 +1,131 @@ +package snappy + +import ( + "bytes" + "encoding/binary" + "errors" + + master "github.com/golang/snappy" +) + +const ( + sizeOffset = 16 + sizeBytes = 4 +) + +var ( + xerialHeader = []byte{130, 83, 78, 65, 80, 80, 89, 0} + + // This is xerial version 1 and minimally compatible with version 1 + xerialVersionInfo = []byte{0, 0, 0, 1, 0, 0, 0, 1} + + // ErrMalformed is returned by the decoder when the xerial framing + // is malformed + ErrMalformed = errors.New("malformed xerial framing") +) + +func min(x, y int) int { + if x < y { + return x + } + return y +} + +// Encode encodes data as snappy with no framing header. +func Encode(src []byte) []byte { + return master.Encode(nil, src) +} + +// EncodeStream *appends* to the specified 'dst' the compressed +// 'src' in xerial framing format. If 'dst' does not have enough +// capacity, then a new slice will be allocated. If 'dst' has +// non-zero length, then if *must* have been built using this function. +func EncodeStream(dst, src []byte) []byte { + if len(dst) == 0 { + dst = append(dst, xerialHeader...) + dst = append(dst, xerialVersionInfo...) + } + + // Snappy encode in blocks of maximum 32KB + var ( + max = len(src) + blockSize = 32 * 1024 + pos = 0 + chunk []byte + ) + + for pos < max { + newPos := min(pos + blockSize, max) + chunk = master.Encode(chunk[:cap(chunk)], src[pos:newPos]) + + // First encode the compressed size (big-endian) + // Put* panics if the buffer is too small, so pad 4 bytes first + origLen := len(dst) + dst = append(dst, dst[0:4]...) + binary.BigEndian.PutUint32(dst[origLen:], uint32(len(chunk))) + + // And now the compressed data + dst = append(dst, chunk...) + pos = newPos + } + return dst +} + +// Decode decodes snappy data whether it is traditional unframed +// or includes the xerial framing format. +func Decode(src []byte) ([]byte, error) { + return DecodeInto(nil, src) +} + +// DecodeInto decodes snappy data whether it is traditional unframed +// or includes the xerial framing format into the specified `dst`. +// It is assumed that the entirety of `dst` including all capacity is available +// for use by this function. If `dst` is nil *or* insufficiently large to hold +// the decoded `src`, new space will be allocated. +func DecodeInto(dst, src []byte) ([]byte, error) { + var max = len(src) + if max < len(xerialHeader) { + return nil, ErrMalformed + } + + if !bytes.Equal(src[:8], xerialHeader) { + return master.Decode(dst[:cap(dst)], src) + } + + if max < sizeOffset+sizeBytes { + return nil, ErrMalformed + } + + if dst == nil { + dst = make([]byte, 0, len(src)) + } + + dst = dst[:0] + var ( + pos = sizeOffset + chunk []byte + err error + ) + + for pos+sizeBytes <= max { + size := int(binary.BigEndian.Uint32(src[pos : pos+sizeBytes])) + pos += sizeBytes + + nextPos := pos + size + // On architectures where int is 32-bytes wide size + pos could + // overflow so we need to check the low bound as well as the + // high + if nextPos < pos || nextPos > max { + return nil, ErrMalformed + } + + chunk, err = master.Decode(chunk[:cap(chunk)], src[pos:nextPos]) + + if err != nil { + return nil, err + } + pos = nextPos + dst = append(dst, chunk...) + } + return dst, nil +} diff --git a/vendor/github.com/eapache/queue/.gitignore b/vendor/github.com/eapache/queue/.gitignore new file mode 100644 index 00000000000..836562412fe --- /dev/null +++ b/vendor/github.com/eapache/queue/.gitignore @@ -0,0 +1,23 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test diff --git a/vendor/github.com/eapache/queue/.travis.yml b/vendor/github.com/eapache/queue/.travis.yml new file mode 100644 index 00000000000..235a40a493f --- /dev/null +++ b/vendor/github.com/eapache/queue/.travis.yml @@ -0,0 +1,7 @@ +language: go +sudo: false + +go: + - 1.2 + - 1.3 + - 1.4 diff --git a/vendor/github.com/eapache/queue/LICENSE b/vendor/github.com/eapache/queue/LICENSE new file mode 100644 index 00000000000..d5f36dbcaaf --- /dev/null +++ b/vendor/github.com/eapache/queue/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Evan Huus + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/eapache/queue/README.md b/vendor/github.com/eapache/queue/README.md new file mode 100644 index 00000000000..8e782335cd7 --- /dev/null +++ b/vendor/github.com/eapache/queue/README.md @@ -0,0 +1,16 @@ +Queue +===== + +[![Build Status](https://travis-ci.org/eapache/queue.svg)](https://travis-ci.org/eapache/queue) +[![GoDoc](https://godoc.org/github.com/eapache/queue?status.png)](https://godoc.org/github.com/eapache/queue) +[![Code of Conduct](https://img.shields.io/badge/code%20of%20conduct-active-blue.svg)](https://eapache.github.io/conduct.html) + +A fast Golang queue using a ring-buffer, based on the version suggested by Dariusz Górecki. +Using this instead of other, simpler, queue implementations (slice+append or linked list) provides +substantial memory and time benefits, and fewer GC pauses. + +The queue implemented here is as fast as it is in part because it is *not* thread-safe. + +Follows semantic versioning using https://gopkg.in/ - import from +[`gopkg.in/eapache/queue.v1`](https://gopkg.in/eapache/queue.v1) +for guaranteed API stability. diff --git a/vendor/github.com/eapache/queue/queue.go b/vendor/github.com/eapache/queue/queue.go new file mode 100644 index 00000000000..71d1acdf27b --- /dev/null +++ b/vendor/github.com/eapache/queue/queue.go @@ -0,0 +1,102 @@ +/* +Package queue provides a fast, ring-buffer queue based on the version suggested by Dariusz Górecki. +Using this instead of other, simpler, queue implementations (slice+append or linked list) provides +substantial memory and time benefits, and fewer GC pauses. + +The queue implemented here is as fast as it is for an additional reason: it is *not* thread-safe. +*/ +package queue + +// minQueueLen is smallest capacity that queue may have. +// Must be power of 2 for bitwise modulus: x % n == x & (n - 1). +const minQueueLen = 16 + +// Queue represents a single instance of the queue data structure. +type Queue struct { + buf []interface{} + head, tail, count int +} + +// New constructs and returns a new Queue. +func New() *Queue { + return &Queue{ + buf: make([]interface{}, minQueueLen), + } +} + +// Length returns the number of elements currently stored in the queue. +func (q *Queue) Length() int { + return q.count +} + +// resizes the queue to fit exactly twice its current contents +// this can result in shrinking if the queue is less than half-full +func (q *Queue) resize() { + newBuf := make([]interface{}, q.count<<1) + + if q.tail > q.head { + copy(newBuf, q.buf[q.head:q.tail]) + } else { + n := copy(newBuf, q.buf[q.head:]) + copy(newBuf[n:], q.buf[:q.tail]) + } + + q.head = 0 + q.tail = q.count + q.buf = newBuf +} + +// Add puts an element on the end of the queue. +func (q *Queue) Add(elem interface{}) { + if q.count == len(q.buf) { + q.resize() + } + + q.buf[q.tail] = elem + // bitwise modulus + q.tail = (q.tail + 1) & (len(q.buf) - 1) + q.count++ +} + +// Peek returns the element at the head of the queue. This call panics +// if the queue is empty. +func (q *Queue) Peek() interface{} { + if q.count <= 0 { + panic("queue: Peek() called on empty queue") + } + return q.buf[q.head] +} + +// Get returns the element at index i in the queue. If the index is +// invalid, the call will panic. This method accepts both positive and +// negative index values. Index 0 refers to the first element, and +// index -1 refers to the last. +func (q *Queue) Get(i int) interface{} { + // If indexing backwards, convert to positive index. + if i < 0 { + i += q.count + } + if i < 0 || i >= q.count { + panic("queue: Get() called with index out of range") + } + // bitwise modulus + return q.buf[(q.head+i)&(len(q.buf)-1)] +} + +// Remove removes and returns the element from the front of the queue. If the +// queue is empty, the call will panic. +func (q *Queue) Remove() interface{} { + if q.count <= 0 { + panic("queue: Remove() called on empty queue") + } + ret := q.buf[q.head] + q.buf[q.head] = nil + // bitwise modulus + q.head = (q.head + 1) & (len(q.buf) - 1) + q.count-- + // Resize down if buffer 1/4 full. + if len(q.buf) > minQueueLen && (q.count<<2) == len(q.buf) { + q.resize() + } + return ret +} diff --git a/vendor/github.com/go-openapi/analysis/.codecov.yml b/vendor/github.com/go-openapi/analysis/.codecov.yml new file mode 100644 index 00000000000..841c4281e23 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/.codecov.yml @@ -0,0 +1,5 @@ +coverage: + status: + patch: + default: + target: 80% diff --git a/vendor/github.com/go-openapi/analysis/.gitignore b/vendor/github.com/go-openapi/analysis/.gitignore new file mode 100644 index 00000000000..87c3bd3e66e --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/.gitignore @@ -0,0 +1,5 @@ +secrets.yml +coverage.out +coverage.txt +*.cov +.idea diff --git a/vendor/github.com/go-openapi/analysis/.golangci.yml b/vendor/github.com/go-openapi/analysis/.golangci.yml new file mode 100644 index 00000000000..76af8ab1c87 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/.golangci.yml @@ -0,0 +1,27 @@ +linters-settings: + govet: + check-shadowing: true + golint: + min-confidence: 0 + gocyclo: + min-complexity: 40 + maligned: + suggest-new: true + dupl: + threshold: 100 + goconst: + min-len: 2 + min-occurrences: 4 + +linters: + enable-all: true + disable: + - maligned + - lll + - gochecknoglobals + - gochecknoinits + # scopelint is useful, but also reports false positives + # that unfortunately can't be disabled. So we disable the + # linter rather than changing code that works. + # see: https://github.com/kyoh86/scopelint/issues/4 + - scopelint diff --git a/vendor/github.com/go-openapi/analysis/.travis.yml b/vendor/github.com/go-openapi/analysis/.travis.yml new file mode 100644 index 00000000000..7ecf865c21c --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/.travis.yml @@ -0,0 +1,15 @@ +after_success: +- bash <(curl -s https://codecov.io/bash) +go: +- 1.11.x +- 1.12.x +install: +- GO111MODULE=off go get -u gotest.tools/gotestsum +env: +- GO111MODULE=on +language: go +notifications: + slack: + secure: Sf7kZf7ZGbnwWUMpffHwMu5A0cHkLK2MYY32LNTPj4+/3qC3Ghl7+9v4TSLOqOlCwdRNjOGblAq7s+GDJed6/xgRQl1JtCi1klzZNrYX4q01pgTPvvGcwbBkIYgeMaPeIRcK9OZnud7sRXdttozgTOpytps2U6Js32ip7uj5mHSg2ub0FwoSJwlS6dbezZ8+eDhoha0F/guY99BEwx8Bd+zROrT2TFGsSGOFGN6wFc7moCqTHO/YkWib13a2QNXqOxCCVBy/lt76Wp+JkeFppjHlzs/2lP3EAk13RIUAaesdEUHvIHrzCyNJEd3/+KO2DzsWOYfpktd+KBCvgaYOsoo7ubdT3IROeAegZdCgo/6xgCEsmFc9ZcqCfN5yNx2A+BZ2Vwmpws+bQ1E1+B5HDzzaiLcYfG4X2O210QVGVDLWsv1jqD+uPYeHY2WRfh5ZsIUFvaqgUEnwHwrK44/8REAhQavt1QAj5uJpsRd7CkRVPWRNK+yIky+wgbVUFEchRNmS55E7QWf+W4+4QZkQi7vUTMc9nbTUu2Es9NfvfudOpM2wZbn98fjpb/qq/nRv6Bk+ca+7XD5/IgNLMbWp2ouDdzbiHLCOfDUiHiDJhLfFZx9Bwo7ZwfzeOlbrQX66bx7xRKYmOe4DLrXhNcpbsMa8qbfxlZRCmYbubB/Y8h4= +script: +- gotestsum -f short-verbose -- -race -timeout=20m -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md new file mode 100644 index 00000000000..9322b065e37 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/analysis/LICENSE b/vendor/github.com/go-openapi/analysis/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/analysis/README.md b/vendor/github.com/go-openapi/analysis/README.md new file mode 100644 index 00000000000..efafdf8fd32 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/README.md @@ -0,0 +1,9 @@ +# OpenAPI initiative analysis [![Build Status](https://travis-ci.org/go-openapi/analysis.svg?branch=master)](https://travis-ci.org/go-openapi/analysis) [![Build status](https://ci.appveyor.com/api/projects/status/x377t5o9ennm847o/branch/master?svg=true)](https://ci.appveyor.com/project/casualjim/go-openapi/analysis/branch/master) [![codecov](https://codecov.io/gh/go-openapi/analysis/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/analysis) +[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/analysis/master/LICENSE) +[![GoDoc](https://godoc.org/github.com/go-openapi/analysis?status.svg)](http://godoc.org/github.com/go-openapi/analysis) +[![GolangCI](https://golangci.com/badges/github.com/go-openapi/analysis.svg)](https://golangci.com) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/analysis)](https://goreportcard.com/report/github.com/go-openapi/analysis) + + +A foundational library to analyze an OAI specification document for easier reasoning about the content. diff --git a/vendor/github.com/go-openapi/analysis/analyzer.go b/vendor/github.com/go-openapi/analysis/analyzer.go new file mode 100644 index 00000000000..4d98718c4e6 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/analyzer.go @@ -0,0 +1,970 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package analysis + +import ( + "fmt" + slashpath "path" + "strconv" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/spec" + "github.com/go-openapi/swag" +) + +type referenceAnalysis struct { + schemas map[string]spec.Ref + responses map[string]spec.Ref + parameters map[string]spec.Ref + items map[string]spec.Ref + headerItems map[string]spec.Ref + parameterItems map[string]spec.Ref + allRefs map[string]spec.Ref + pathItems map[string]spec.Ref +} + +func (r *referenceAnalysis) addRef(key string, ref spec.Ref) { + r.allRefs["#"+key] = ref +} + +func (r *referenceAnalysis) addItemsRef(key string, items *spec.Items, location string) { + r.items["#"+key] = items.Ref + r.addRef(key, items.Ref) + if location == "header" { + // NOTE: in swagger 2.0, headers and parameters (but not body param schemas) are simple schemas + // and $ref are not supported here. However it is possible to analyze this. + r.headerItems["#"+key] = items.Ref + } else { + r.parameterItems["#"+key] = items.Ref + } +} + +func (r *referenceAnalysis) addSchemaRef(key string, ref SchemaRef) { + r.schemas["#"+key] = ref.Schema.Ref + r.addRef(key, ref.Schema.Ref) +} + +func (r *referenceAnalysis) addResponseRef(key string, resp *spec.Response) { + r.responses["#"+key] = resp.Ref + r.addRef(key, resp.Ref) +} + +func (r *referenceAnalysis) addParamRef(key string, param *spec.Parameter) { + r.parameters["#"+key] = param.Ref + r.addRef(key, param.Ref) +} + +func (r *referenceAnalysis) addPathItemRef(key string, pathItem *spec.PathItem) { + r.pathItems["#"+key] = pathItem.Ref + r.addRef(key, pathItem.Ref) +} + +type patternAnalysis struct { + parameters map[string]string + headers map[string]string + items map[string]string + schemas map[string]string + allPatterns map[string]string +} + +func (p *patternAnalysis) addPattern(key, pattern string) { + p.allPatterns["#"+key] = pattern +} + +func (p *patternAnalysis) addParameterPattern(key, pattern string) { + p.parameters["#"+key] = pattern + p.addPattern(key, pattern) +} + +func (p *patternAnalysis) addHeaderPattern(key, pattern string) { + p.headers["#"+key] = pattern + p.addPattern(key, pattern) +} + +func (p *patternAnalysis) addItemsPattern(key, pattern string) { + p.items["#"+key] = pattern + p.addPattern(key, pattern) +} + +func (p *patternAnalysis) addSchemaPattern(key, pattern string) { + p.schemas["#"+key] = pattern + p.addPattern(key, pattern) +} + +type enumAnalysis struct { + parameters map[string][]interface{} + headers map[string][]interface{} + items map[string][]interface{} + schemas map[string][]interface{} + allEnums map[string][]interface{} +} + +func (p *enumAnalysis) addEnum(key string, enum []interface{}) { + p.allEnums["#"+key] = enum +} + +func (p *enumAnalysis) addParameterEnum(key string, enum []interface{}) { + p.parameters["#"+key] = enum + p.addEnum(key, enum) +} + +func (p *enumAnalysis) addHeaderEnum(key string, enum []interface{}) { + p.headers["#"+key] = enum + p.addEnum(key, enum) +} + +func (p *enumAnalysis) addItemsEnum(key string, enum []interface{}) { + p.items["#"+key] = enum + p.addEnum(key, enum) +} + +func (p *enumAnalysis) addSchemaEnum(key string, enum []interface{}) { + p.schemas["#"+key] = enum + p.addEnum(key, enum) +} + +// New takes a swagger spec object and returns an analyzed spec document. +// The analyzed document contains a number of indices that make it easier to +// reason about semantics of a swagger specification for use in code generation +// or validation etc. +func New(doc *spec.Swagger) *Spec { + a := &Spec{ + spec: doc, + references: referenceAnalysis{}, + patterns: patternAnalysis{}, + enums: enumAnalysis{}, + } + a.reset() + a.initialize() + return a +} + +// Spec is an analyzed specification object. It takes a swagger spec object and turns it into a registry +// with a bunch of utility methods to act on the information in the spec. +type Spec struct { + spec *spec.Swagger + consumes map[string]struct{} + produces map[string]struct{} + authSchemes map[string]struct{} + operations map[string]map[string]*spec.Operation + references referenceAnalysis + patterns patternAnalysis + enums enumAnalysis + allSchemas map[string]SchemaRef + allOfs map[string]SchemaRef +} + +func (s *Spec) reset() { + s.consumes = make(map[string]struct{}, 150) + s.produces = make(map[string]struct{}, 150) + s.authSchemes = make(map[string]struct{}, 150) + s.operations = make(map[string]map[string]*spec.Operation, 150) + s.allSchemas = make(map[string]SchemaRef, 150) + s.allOfs = make(map[string]SchemaRef, 150) + s.references.schemas = make(map[string]spec.Ref, 150) + s.references.pathItems = make(map[string]spec.Ref, 150) + s.references.responses = make(map[string]spec.Ref, 150) + s.references.parameters = make(map[string]spec.Ref, 150) + s.references.items = make(map[string]spec.Ref, 150) + s.references.headerItems = make(map[string]spec.Ref, 150) + s.references.parameterItems = make(map[string]spec.Ref, 150) + s.references.allRefs = make(map[string]spec.Ref, 150) + s.patterns.parameters = make(map[string]string, 150) + s.patterns.headers = make(map[string]string, 150) + s.patterns.items = make(map[string]string, 150) + s.patterns.schemas = make(map[string]string, 150) + s.patterns.allPatterns = make(map[string]string, 150) + s.enums.parameters = make(map[string][]interface{}, 150) + s.enums.headers = make(map[string][]interface{}, 150) + s.enums.items = make(map[string][]interface{}, 150) + s.enums.schemas = make(map[string][]interface{}, 150) + s.enums.allEnums = make(map[string][]interface{}, 150) +} + +func (s *Spec) reload() { + s.reset() + s.initialize() +} + +func (s *Spec) initialize() { + for _, c := range s.spec.Consumes { + s.consumes[c] = struct{}{} + } + for _, c := range s.spec.Produces { + s.produces[c] = struct{}{} + } + for _, ss := range s.spec.Security { + for k := range ss { + s.authSchemes[k] = struct{}{} + } + } + for path, pathItem := range s.AllPaths() { + s.analyzeOperations(path, &pathItem) + } + + for name, parameter := range s.spec.Parameters { + refPref := slashpath.Join("/parameters", jsonpointer.Escape(name)) + if parameter.Items != nil { + s.analyzeItems("items", parameter.Items, refPref, "parameter") + } + if parameter.In == "body" && parameter.Schema != nil { + s.analyzeSchema("schema", *parameter.Schema, refPref) + } + if parameter.Pattern != "" { + s.patterns.addParameterPattern(refPref, parameter.Pattern) + } + if len(parameter.Enum) > 0 { + s.enums.addParameterEnum(refPref, parameter.Enum) + } + } + + for name, response := range s.spec.Responses { + refPref := slashpath.Join("/responses", jsonpointer.Escape(name)) + for k, v := range response.Headers { + hRefPref := slashpath.Join(refPref, "headers", k) + if v.Items != nil { + s.analyzeItems("items", v.Items, hRefPref, "header") + } + if v.Pattern != "" { + s.patterns.addHeaderPattern(hRefPref, v.Pattern) + } + if len(v.Enum) > 0 { + s.enums.addHeaderEnum(hRefPref, v.Enum) + } + } + if response.Schema != nil { + s.analyzeSchema("schema", *response.Schema, refPref) + } + } + + for name, schema := range s.spec.Definitions { + s.analyzeSchema(name, schema, "/definitions") + } + // TODO: after analyzing all things and flattening schemas etc + // resolve all the collected references to their final representations + // best put in a separate method because this could get expensive +} + +func (s *Spec) analyzeOperations(path string, pi *spec.PathItem) { + // TODO: resolve refs here? + // Currently, operations declared via pathItem $ref are known only after expansion + op := pi + if pi.Ref.String() != "" { + key := slashpath.Join("/paths", jsonpointer.Escape(path)) + s.references.addPathItemRef(key, pi) + } + s.analyzeOperation("GET", path, op.Get) + s.analyzeOperation("PUT", path, op.Put) + s.analyzeOperation("POST", path, op.Post) + s.analyzeOperation("PATCH", path, op.Patch) + s.analyzeOperation("DELETE", path, op.Delete) + s.analyzeOperation("HEAD", path, op.Head) + s.analyzeOperation("OPTIONS", path, op.Options) + for i, param := range op.Parameters { + refPref := slashpath.Join("/paths", jsonpointer.Escape(path), "parameters", strconv.Itoa(i)) + if param.Ref.String() != "" { + s.references.addParamRef(refPref, ¶m) + } + if param.Pattern != "" { + s.patterns.addParameterPattern(refPref, param.Pattern) + } + if len(param.Enum) > 0 { + s.enums.addParameterEnum(refPref, param.Enum) + } + if param.Items != nil { + s.analyzeItems("items", param.Items, refPref, "parameter") + } + if param.Schema != nil { + s.analyzeSchema("schema", *param.Schema, refPref) + } + } +} + +func (s *Spec) analyzeItems(name string, items *spec.Items, prefix, location string) { + if items == nil { + return + } + refPref := slashpath.Join(prefix, name) + s.analyzeItems(name, items.Items, refPref, location) + if items.Ref.String() != "" { + s.references.addItemsRef(refPref, items, location) + } + if items.Pattern != "" { + s.patterns.addItemsPattern(refPref, items.Pattern) + } + if len(items.Enum) > 0 { + s.enums.addItemsEnum(refPref, items.Enum) + } +} + +func (s *Spec) analyzeOperation(method, path string, op *spec.Operation) { + if op == nil { + return + } + + for _, c := range op.Consumes { + s.consumes[c] = struct{}{} + } + for _, c := range op.Produces { + s.produces[c] = struct{}{} + } + for _, ss := range op.Security { + for k := range ss { + s.authSchemes[k] = struct{}{} + } + } + if _, ok := s.operations[method]; !ok { + s.operations[method] = make(map[string]*spec.Operation) + } + s.operations[method][path] = op + prefix := slashpath.Join("/paths", jsonpointer.Escape(path), strings.ToLower(method)) + for i, param := range op.Parameters { + refPref := slashpath.Join(prefix, "parameters", strconv.Itoa(i)) + if param.Ref.String() != "" { + s.references.addParamRef(refPref, ¶m) + } + if param.Pattern != "" { + s.patterns.addParameterPattern(refPref, param.Pattern) + } + if len(param.Enum) > 0 { + s.enums.addParameterEnum(refPref, param.Enum) + } + s.analyzeItems("items", param.Items, refPref, "parameter") + if param.In == "body" && param.Schema != nil { + s.analyzeSchema("schema", *param.Schema, refPref) + } + } + if op.Responses != nil { + if op.Responses.Default != nil { + refPref := slashpath.Join(prefix, "responses", "default") + if op.Responses.Default.Ref.String() != "" { + s.references.addResponseRef(refPref, op.Responses.Default) + } + for k, v := range op.Responses.Default.Headers { + hRefPref := slashpath.Join(refPref, "headers", k) + s.analyzeItems("items", v.Items, hRefPref, "header") + if v.Pattern != "" { + s.patterns.addHeaderPattern(hRefPref, v.Pattern) + } + } + if op.Responses.Default.Schema != nil { + s.analyzeSchema("schema", *op.Responses.Default.Schema, refPref) + } + } + for k, res := range op.Responses.StatusCodeResponses { + refPref := slashpath.Join(prefix, "responses", strconv.Itoa(k)) + if res.Ref.String() != "" { + s.references.addResponseRef(refPref, &res) + } + for k, v := range res.Headers { + hRefPref := slashpath.Join(refPref, "headers", k) + s.analyzeItems("items", v.Items, hRefPref, "header") + if v.Pattern != "" { + s.patterns.addHeaderPattern(hRefPref, v.Pattern) + } + if len(v.Enum) > 0 { + s.enums.addHeaderEnum(hRefPref, v.Enum) + } + } + if res.Schema != nil { + s.analyzeSchema("schema", *res.Schema, refPref) + } + } + } +} + +func (s *Spec) analyzeSchema(name string, schema spec.Schema, prefix string) { + refURI := slashpath.Join(prefix, jsonpointer.Escape(name)) + schRef := SchemaRef{ + Name: name, + Schema: &schema, + Ref: spec.MustCreateRef("#" + refURI), + TopLevel: prefix == "/definitions", + } + + s.allSchemas["#"+refURI] = schRef + + if schema.Ref.String() != "" { + s.references.addSchemaRef(refURI, schRef) + } + if schema.Pattern != "" { + s.patterns.addSchemaPattern(refURI, schema.Pattern) + } + if len(schema.Enum) > 0 { + s.enums.addSchemaEnum(refURI, schema.Enum) + } + + for k, v := range schema.Definitions { + s.analyzeSchema(k, v, slashpath.Join(refURI, "definitions")) + } + for k, v := range schema.Properties { + s.analyzeSchema(k, v, slashpath.Join(refURI, "properties")) + } + for k, v := range schema.PatternProperties { + // NOTE: swagger 2.0 does not support PatternProperties. + // However it is possible to analyze this in a schema + s.analyzeSchema(k, v, slashpath.Join(refURI, "patternProperties")) + } + for i, v := range schema.AllOf { + s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "allOf")) + } + if len(schema.AllOf) > 0 { + s.allOfs["#"+refURI] = schRef + } + for i, v := range schema.AnyOf { + // NOTE: swagger 2.0 does not support anyOf constructs. + // However it is possible to analyze this in a schema + s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "anyOf")) + } + for i, v := range schema.OneOf { + // NOTE: swagger 2.0 does not support oneOf constructs. + // However it is possible to analyze this in a schema + s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "oneOf")) + } + if schema.Not != nil { + // NOTE: swagger 2.0 does not support "not" constructs. + // However it is possible to analyze this in a schema + s.analyzeSchema("not", *schema.Not, refURI) + } + if schema.AdditionalProperties != nil && schema.AdditionalProperties.Schema != nil { + s.analyzeSchema("additionalProperties", *schema.AdditionalProperties.Schema, refURI) + } + if schema.AdditionalItems != nil && schema.AdditionalItems.Schema != nil { + // NOTE: swagger 2.0 does not support AdditionalItems. + // However it is possible to analyze this in a schema + s.analyzeSchema("additionalItems", *schema.AdditionalItems.Schema, refURI) + } + if schema.Items != nil { + if schema.Items.Schema != nil { + s.analyzeSchema("items", *schema.Items.Schema, refURI) + } + for i, sch := range schema.Items.Schemas { + s.analyzeSchema(strconv.Itoa(i), sch, slashpath.Join(refURI, "items")) + } + } +} + +// SecurityRequirement is a representation of a security requirement for an operation +type SecurityRequirement struct { + Name string + Scopes []string +} + +// SecurityRequirementsFor gets the security requirements for the operation +func (s *Spec) SecurityRequirementsFor(operation *spec.Operation) [][]SecurityRequirement { + if s.spec.Security == nil && operation.Security == nil { + return nil + } + + schemes := s.spec.Security + if operation.Security != nil { + schemes = operation.Security + } + + result := [][]SecurityRequirement{} + for _, scheme := range schemes { + if len(scheme) == 0 { + // append a zero object for anonymous + result = append(result, []SecurityRequirement{{}}) + continue + } + var reqs []SecurityRequirement + for k, v := range scheme { + if v == nil { + v = []string{} + } + reqs = append(reqs, SecurityRequirement{Name: k, Scopes: v}) + } + result = append(result, reqs) + } + return result +} + +// SecurityDefinitionsForRequirements gets the matching security definitions for a set of requirements +func (s *Spec) SecurityDefinitionsForRequirements(requirements []SecurityRequirement) map[string]spec.SecurityScheme { + result := make(map[string]spec.SecurityScheme) + + for _, v := range requirements { + if definition, ok := s.spec.SecurityDefinitions[v.Name]; ok { + if definition != nil { + result[v.Name] = *definition + } + } + } + return result +} + +// SecurityDefinitionsFor gets the matching security definitions for a set of requirements +func (s *Spec) SecurityDefinitionsFor(operation *spec.Operation) map[string]spec.SecurityScheme { + requirements := s.SecurityRequirementsFor(operation) + if len(requirements) == 0 { + return nil + } + + result := make(map[string]spec.SecurityScheme) + for _, reqs := range requirements { + for _, v := range reqs { + if v.Name == "" { + // optional requirement + continue + } + if _, ok := result[v.Name]; ok { + // duplicate requirement + continue + } + if definition, ok := s.spec.SecurityDefinitions[v.Name]; ok { + if definition != nil { + result[v.Name] = *definition + } + } + } + } + return result +} + +// ConsumesFor gets the mediatypes for the operation +func (s *Spec) ConsumesFor(operation *spec.Operation) []string { + + if len(operation.Consumes) == 0 { + cons := make(map[string]struct{}, len(s.spec.Consumes)) + for _, k := range s.spec.Consumes { + cons[k] = struct{}{} + } + return s.structMapKeys(cons) + } + + cons := make(map[string]struct{}, len(operation.Consumes)) + for _, c := range operation.Consumes { + cons[c] = struct{}{} + } + return s.structMapKeys(cons) +} + +// ProducesFor gets the mediatypes for the operation +func (s *Spec) ProducesFor(operation *spec.Operation) []string { + if len(operation.Produces) == 0 { + prod := make(map[string]struct{}, len(s.spec.Produces)) + for _, k := range s.spec.Produces { + prod[k] = struct{}{} + } + return s.structMapKeys(prod) + } + + prod := make(map[string]struct{}, len(operation.Produces)) + for _, c := range operation.Produces { + prod[c] = struct{}{} + } + return s.structMapKeys(prod) +} + +func mapKeyFromParam(param *spec.Parameter) string { + return fmt.Sprintf("%s#%s", param.In, fieldNameFromParam(param)) +} + +func fieldNameFromParam(param *spec.Parameter) string { + // TODO: this should be x-go-name + if nm, ok := param.Extensions.GetString("go-name"); ok { + return nm + } + return swag.ToGoName(param.Name) +} + +// ErrorOnParamFunc is a callback function to be invoked +// whenever an error is encountered while resolving references +// on parameters. +// +// This function takes as input the spec.Parameter which triggered the +// error and the error itself. +// +// If the callback function returns false, the calling function should bail. +// +// If it returns true, the calling function should continue evaluating parameters. +// A nil ErrorOnParamFunc must be evaluated as equivalent to panic(). +type ErrorOnParamFunc func(spec.Parameter, error) bool + +func (s *Spec) paramsAsMap(parameters []spec.Parameter, res map[string]spec.Parameter, callmeOnError ErrorOnParamFunc) { + for _, param := range parameters { + pr := param + if pr.Ref.String() != "" { + obj, _, err := pr.Ref.GetPointer().Get(s.spec) + if err != nil { + if callmeOnError != nil { + if callmeOnError(param, fmt.Errorf("invalid reference: %q", pr.Ref.String())) { + continue + } + break + } else { + panic(fmt.Sprintf("invalid reference: %q", pr.Ref.String())) + } + } + if objAsParam, ok := obj.(spec.Parameter); ok { + pr = objAsParam + } else { + if callmeOnError != nil { + if callmeOnError(param, fmt.Errorf("resolved reference is not a parameter: %q", pr.Ref.String())) { + continue + } + break + } else { + panic(fmt.Sprintf("resolved reference is not a parameter: %q", pr.Ref.String())) + } + } + } + res[mapKeyFromParam(&pr)] = pr + } +} + +// ParametersFor the specified operation id. +// +// Assumes parameters properly resolve references if any and that +// such references actually resolve to a parameter object. +// Otherwise, panics. +func (s *Spec) ParametersFor(operationID string) []spec.Parameter { + return s.SafeParametersFor(operationID, nil) +} + +// SafeParametersFor the specified operation id. +// +// Does not assume parameters properly resolve references or that +// such references actually resolve to a parameter object. +// +// Upon error, invoke a ErrorOnParamFunc callback with the erroneous +// parameters. If the callback is set to nil, panics upon errors. +func (s *Spec) SafeParametersFor(operationID string, callmeOnError ErrorOnParamFunc) []spec.Parameter { + gatherParams := func(pi *spec.PathItem, op *spec.Operation) []spec.Parameter { + bag := make(map[string]spec.Parameter) + s.paramsAsMap(pi.Parameters, bag, callmeOnError) + s.paramsAsMap(op.Parameters, bag, callmeOnError) + + var res []spec.Parameter + for _, v := range bag { + res = append(res, v) + } + return res + } + for _, pi := range s.spec.Paths.Paths { + if pi.Get != nil && pi.Get.ID == operationID { + return gatherParams(&pi, pi.Get) + } + if pi.Head != nil && pi.Head.ID == operationID { + return gatherParams(&pi, pi.Head) + } + if pi.Options != nil && pi.Options.ID == operationID { + return gatherParams(&pi, pi.Options) + } + if pi.Post != nil && pi.Post.ID == operationID { + return gatherParams(&pi, pi.Post) + } + if pi.Patch != nil && pi.Patch.ID == operationID { + return gatherParams(&pi, pi.Patch) + } + if pi.Put != nil && pi.Put.ID == operationID { + return gatherParams(&pi, pi.Put) + } + if pi.Delete != nil && pi.Delete.ID == operationID { + return gatherParams(&pi, pi.Delete) + } + } + return nil +} + +// ParamsFor the specified method and path. Aggregates them with the defaults etc, so it's all the params that +// apply for the method and path. +// +// Assumes parameters properly resolve references if any and that +// such references actually resolve to a parameter object. +// Otherwise, panics. +func (s *Spec) ParamsFor(method, path string) map[string]spec.Parameter { + return s.SafeParamsFor(method, path, nil) +} + +// SafeParamsFor the specified method and path. Aggregates them with the defaults etc, so it's all the params that +// apply for the method and path. +// +// Does not assume parameters properly resolve references or that +// such references actually resolve to a parameter object. +// +// Upon error, invoke a ErrorOnParamFunc callback with the erroneous +// parameters. If the callback is set to nil, panics upon errors. +func (s *Spec) SafeParamsFor(method, path string, callmeOnError ErrorOnParamFunc) map[string]spec.Parameter { + res := make(map[string]spec.Parameter) + if pi, ok := s.spec.Paths.Paths[path]; ok { + s.paramsAsMap(pi.Parameters, res, callmeOnError) + s.paramsAsMap(s.operations[strings.ToUpper(method)][path].Parameters, res, callmeOnError) + } + return res +} + +// OperationForName gets the operation for the given id +func (s *Spec) OperationForName(operationID string) (string, string, *spec.Operation, bool) { + for method, pathItem := range s.operations { + for path, op := range pathItem { + if operationID == op.ID { + return method, path, op, true + } + } + } + return "", "", nil, false +} + +// OperationFor the given method and path +func (s *Spec) OperationFor(method, path string) (*spec.Operation, bool) { + if mp, ok := s.operations[strings.ToUpper(method)]; ok { + op, fn := mp[path] + return op, fn + } + return nil, false +} + +// Operations gathers all the operations specified in the spec document +func (s *Spec) Operations() map[string]map[string]*spec.Operation { + return s.operations +} + +func (s *Spec) structMapKeys(mp map[string]struct{}) []string { + if len(mp) == 0 { + return nil + } + + result := make([]string, 0, len(mp)) + for k := range mp { + result = append(result, k) + } + return result +} + +// AllPaths returns all the paths in the swagger spec +func (s *Spec) AllPaths() map[string]spec.PathItem { + if s.spec == nil || s.spec.Paths == nil { + return nil + } + return s.spec.Paths.Paths +} + +// OperationIDs gets all the operation ids based on method an dpath +func (s *Spec) OperationIDs() []string { + if len(s.operations) == 0 { + return nil + } + result := make([]string, 0, len(s.operations)) + for method, v := range s.operations { + for p, o := range v { + if o.ID != "" { + result = append(result, o.ID) + } else { + result = append(result, fmt.Sprintf("%s %s", strings.ToUpper(method), p)) + } + } + } + return result +} + +// OperationMethodPaths gets all the operation ids based on method an dpath +func (s *Spec) OperationMethodPaths() []string { + if len(s.operations) == 0 { + return nil + } + result := make([]string, 0, len(s.operations)) + for method, v := range s.operations { + for p := range v { + result = append(result, fmt.Sprintf("%s %s", strings.ToUpper(method), p)) + } + } + return result +} + +// RequiredConsumes gets all the distinct consumes that are specified in the specification document +func (s *Spec) RequiredConsumes() []string { + return s.structMapKeys(s.consumes) +} + +// RequiredProduces gets all the distinct produces that are specified in the specification document +func (s *Spec) RequiredProduces() []string { + return s.structMapKeys(s.produces) +} + +// RequiredSecuritySchemes gets all the distinct security schemes that are specified in the swagger spec +func (s *Spec) RequiredSecuritySchemes() []string { + return s.structMapKeys(s.authSchemes) +} + +// SchemaRef is a reference to a schema +type SchemaRef struct { + Name string + Ref spec.Ref + Schema *spec.Schema + TopLevel bool +} + +// SchemasWithAllOf returns schema references to all schemas that are defined +// with an allOf key +func (s *Spec) SchemasWithAllOf() (result []SchemaRef) { + for _, v := range s.allOfs { + result = append(result, v) + } + return +} + +// AllDefinitions returns schema references for all the definitions that were discovered +func (s *Spec) AllDefinitions() (result []SchemaRef) { + for _, v := range s.allSchemas { + result = append(result, v) + } + return +} + +// AllDefinitionReferences returns json refs for all the discovered schemas +func (s *Spec) AllDefinitionReferences() (result []string) { + for _, v := range s.references.schemas { + result = append(result, v.String()) + } + return +} + +// AllParameterReferences returns json refs for all the discovered parameters +func (s *Spec) AllParameterReferences() (result []string) { + for _, v := range s.references.parameters { + result = append(result, v.String()) + } + return +} + +// AllResponseReferences returns json refs for all the discovered responses +func (s *Spec) AllResponseReferences() (result []string) { + for _, v := range s.references.responses { + result = append(result, v.String()) + } + return +} + +// AllPathItemReferences returns the references for all the items +func (s *Spec) AllPathItemReferences() (result []string) { + for _, v := range s.references.pathItems { + result = append(result, v.String()) + } + return +} + +// AllItemsReferences returns the references for all the items in simple schemas (parameters or headers). +// +// NOTE: since Swagger 2.0 forbids $ref in simple params, this should always yield an empty slice for a valid +// Swagger 2.0 spec. +func (s *Spec) AllItemsReferences() (result []string) { + for _, v := range s.references.items { + result = append(result, v.String()) + } + return +} + +// AllReferences returns all the references found in the document, with possible duplicates +func (s *Spec) AllReferences() (result []string) { + for _, v := range s.references.allRefs { + result = append(result, v.String()) + } + return +} + +// AllRefs returns all the unique references found in the document +func (s *Spec) AllRefs() (result []spec.Ref) { + set := make(map[string]struct{}) + for _, v := range s.references.allRefs { + a := v.String() + if a == "" { + continue + } + if _, ok := set[a]; !ok { + set[a] = struct{}{} + result = append(result, v) + } + } + return +} + +func cloneStringMap(source map[string]string) map[string]string { + res := make(map[string]string, len(source)) + for k, v := range source { + res[k] = v + } + return res +} + +func cloneEnumMap(source map[string][]interface{}) map[string][]interface{} { + res := make(map[string][]interface{}, len(source)) + for k, v := range source { + res[k] = v + } + return res +} + +// ParameterPatterns returns all the patterns found in parameters +// the map is cloned to avoid accidental changes +func (s *Spec) ParameterPatterns() map[string]string { + return cloneStringMap(s.patterns.parameters) +} + +// HeaderPatterns returns all the patterns found in response headers +// the map is cloned to avoid accidental changes +func (s *Spec) HeaderPatterns() map[string]string { + return cloneStringMap(s.patterns.headers) +} + +// ItemsPatterns returns all the patterns found in simple array items +// the map is cloned to avoid accidental changes +func (s *Spec) ItemsPatterns() map[string]string { + return cloneStringMap(s.patterns.items) +} + +// SchemaPatterns returns all the patterns found in schemas +// the map is cloned to avoid accidental changes +func (s *Spec) SchemaPatterns() map[string]string { + return cloneStringMap(s.patterns.schemas) +} + +// AllPatterns returns all the patterns found in the spec +// the map is cloned to avoid accidental changes +func (s *Spec) AllPatterns() map[string]string { + return cloneStringMap(s.patterns.allPatterns) +} + +// ParameterEnums returns all the enums found in parameters +// the map is cloned to avoid accidental changes +func (s *Spec) ParameterEnums() map[string][]interface{} { + return cloneEnumMap(s.enums.parameters) +} + +// HeaderEnums returns all the enums found in response headers +// the map is cloned to avoid accidental changes +func (s *Spec) HeaderEnums() map[string][]interface{} { + return cloneEnumMap(s.enums.headers) +} + +// ItemsEnums returns all the enums found in simple array items +// the map is cloned to avoid accidental changes +func (s *Spec) ItemsEnums() map[string][]interface{} { + return cloneEnumMap(s.enums.items) +} + +// SchemaEnums returns all the enums found in schemas +// the map is cloned to avoid accidental changes +func (s *Spec) SchemaEnums() map[string][]interface{} { + return cloneEnumMap(s.enums.schemas) +} + +// AllEnums returns all the enums found in the spec +// the map is cloned to avoid accidental changes +func (s *Spec) AllEnums() map[string][]interface{} { + return cloneEnumMap(s.enums.allEnums) +} diff --git a/vendor/github.com/go-openapi/analysis/appveyor.yml b/vendor/github.com/go-openapi/analysis/appveyor.yml new file mode 100644 index 00000000000..3239d74416a --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/appveyor.yml @@ -0,0 +1,33 @@ +version: "0.1.{build}" + +clone_folder: C:\go-openapi\analysis +shallow_clone: true # for startup speed +pull_requests: + do_not_increment_build_number: true + +#skip_tags: true +#skip_branch_with_pr: true + +# appveyor.yml +build: off + +environment: + GOPATH: c:\gopath + +stack: go 1.12 + +test_script: + - go test -v -timeout 20m ./... +#artifacts: +# - path: '%GOPATH%\bin\*.exe' +deploy: off + +notifications: + - provider: Slack + incoming_webhook: https://hooks.slack.com/services/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 + auth_token: + secure: Sf7kZf7ZGbnwWUMpffHwMu5A0cHkLK2MYY32LNTPj4+/3qC3Ghl7+9v4TSLOqOlCwdRNjOGblAq7s+GDJed6/xgRQl1JtCi1klzZNrYX4q01pgTPvvGcwbBkIYgeMaPeIRcK9OZnud7sRXdttozgTOpytps2U6Js32ip7uj5mHSg2ub0FwoSJwlS6dbezZ8+eDhoha0F/guY99BEwx8Bd+zROrT2TFGsSGOFGN6wFc7moCqTHO/YkWib13a2QNXqOxCCVBy/lt76Wp+JkeFppjHlzs/2lP3EAk13RIUAaesdEUHvIHrzCyNJEd3/+KO2DzsWOYfpktd+KBCvgaYOsoo7ubdT3IROeAegZdCgo/6xgCEsmFc9ZcqCfN5yNx2A+BZ2Vwmpws+bQ1E1+B5HDzzaiLcYfG4X2O210QVGVDLWsv1jqD+uPYeHY2WRfh5ZsIUFvaqgUEnwHwrK44/8REAhQavt1QAj5uJpsRd7CkRVPWRNK+yIky+wgbVUFEchRNmS55E7QWf+W4+4QZkQi7vUTMc9nbTUu2Es9NfvfudOpM2wZbn98fjpb/qq/nRv6Bk+ca+7XD5/IgNLMbWp2ouDdzbiHLCOfDUiHiDJhLfFZx9Bwo7ZwfzeOlbrQX66bx7xRKYmOe4DLrXhNcpbsMa8qbfxlZRCmYbubB/Y8h4= + channel: bots + on_build_success: false + on_build_failure: true + on_build_status_changed: true diff --git a/vendor/github.com/go-openapi/analysis/debug.go b/vendor/github.com/go-openapi/analysis/debug.go new file mode 100644 index 00000000000..84cc4e54cb7 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/debug.go @@ -0,0 +1,47 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package analysis + +import ( + "fmt" + "log" + "os" + "path/filepath" + "runtime" +) + +var ( + // Debug is true when the SWAGGER_DEBUG env var is not empty. + // It enables a more verbose logging of the spec analyzer. + Debug = os.Getenv("SWAGGER_DEBUG") != "" + // analysisLogger is a debug logger for this package + analysisLogger *log.Logger +) + +func init() { + debugOptions() +} + +func debugOptions() { + analysisLogger = log.New(os.Stdout, "analysis:", log.LstdFlags) +} + +func debugLog(msg string, args ...interface{}) { + // A private, trivial trace logger, based on go-openapi/spec/expander.go:debugLog() + if Debug { + _, file1, pos1, _ := runtime.Caller(1) + analysisLogger.Printf("%s:%d: %s", filepath.Base(file1), pos1, fmt.Sprintf(msg, args...)) + } +} diff --git a/vendor/github.com/go-openapi/analysis/doc.go b/vendor/github.com/go-openapi/analysis/doc.go new file mode 100644 index 00000000000..d5294c0950b --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/doc.go @@ -0,0 +1,43 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package analysis provides methods to work with a Swagger specification document from +package go-openapi/spec. + +Analyzing a specification + +An analysed specification object (type Spec) provides methods to work with swagger definition. + +Flattening or expanding a specification + +Flattening a specification bundles all remote $ref in the main spec document. +Depending on flattening options, additional preprocessing may take place: + - full flattening: replacing all inline complex constructs by a named entry in #/definitions + - expand: replace all $ref's in the document by their expanded content + +Merging several specifications + +Mixin several specifications merges all Swagger constructs, and warns about found conflicts. + +Fixing a specification + +Unmarshalling a specification with golang json unmarshalling may lead to +some unwanted result on present but empty fields. + +Analyzing a Swagger schema + +Swagger schemas are analyzed to determine their complexity and qualify their content. +*/ +package analysis diff --git a/vendor/github.com/go-openapi/analysis/fixer.go b/vendor/github.com/go-openapi/analysis/fixer.go new file mode 100644 index 00000000000..bfe014ca51a --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/fixer.go @@ -0,0 +1,76 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package analysis + +import "github.com/go-openapi/spec" + +// FixEmptyResponseDescriptions replaces empty ("") response +// descriptions in the input with "(empty)" to ensure that the +// resulting Swagger is stays valid. The problem appears to arise +// from reading in valid specs that have a explicit response +// description of "" (valid, response.description is required), but +// due to zero values being omitted upon re-serializing (omitempty) we +// lose them unless we stick some chars in there. +func FixEmptyResponseDescriptions(s *spec.Swagger) { + if s.Paths != nil { + for _, v := range s.Paths.Paths { + if v.Get != nil { + FixEmptyDescs(v.Get.Responses) + } + if v.Put != nil { + FixEmptyDescs(v.Put.Responses) + } + if v.Post != nil { + FixEmptyDescs(v.Post.Responses) + } + if v.Delete != nil { + FixEmptyDescs(v.Delete.Responses) + } + if v.Options != nil { + FixEmptyDescs(v.Options.Responses) + } + if v.Head != nil { + FixEmptyDescs(v.Head.Responses) + } + if v.Patch != nil { + FixEmptyDescs(v.Patch.Responses) + } + } + } + for k, v := range s.Responses { + FixEmptyDesc(&v) + s.Responses[k] = v + } +} + +// FixEmptyDescs adds "(empty)" as the description for any Response in +// the given Responses object that doesn't already have one. +func FixEmptyDescs(rs *spec.Responses) { + FixEmptyDesc(rs.Default) + for k, v := range rs.StatusCodeResponses { + FixEmptyDesc(&v) + rs.StatusCodeResponses[k] = v + } +} + +// FixEmptyDesc adds "(empty)" as the description to the given +// Response object if it doesn't already have one and isn't a +// ref. No-op on nil input. +func FixEmptyDesc(rs *spec.Response) { + if rs == nil || rs.Description != "" || rs.Ref.Ref.GetURL() != nil { + return + } + rs.Description = "(empty)" +} diff --git a/vendor/github.com/go-openapi/analysis/flatten.go b/vendor/github.com/go-openapi/analysis/flatten.go new file mode 100644 index 00000000000..ae1eef5d197 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/flatten.go @@ -0,0 +1,1732 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package analysis + +import ( + "fmt" + "log" + "net/http" + "net/url" + "os" + slashpath "path" + "path/filepath" + "sort" + "strings" + + "strconv" + + "github.com/go-openapi/analysis/internal" + "github.com/go-openapi/jsonpointer" + swspec "github.com/go-openapi/spec" + "github.com/go-openapi/swag" +) + +// FlattenOpts configuration for flattening a swagger specification. +type FlattenOpts struct { + Spec *Spec // The analyzed spec to work with + flattenContext *context // Internal context to track flattening activity + + BasePath string + + // Flattening options + Expand bool // If Expand is true, we skip flattening the spec and expand it instead + Minimal bool + Verbose bool + RemoveUnused bool + + /* Extra keys */ + _ struct{} // require keys +} + +// ExpandOpts creates a spec.ExpandOptions to configure expanding a specification document. +func (f *FlattenOpts) ExpandOpts(skipSchemas bool) *swspec.ExpandOptions { + return &swspec.ExpandOptions{RelativeBase: f.BasePath, SkipSchemas: skipSchemas} +} + +// Swagger gets the swagger specification for this flatten operation +func (f *FlattenOpts) Swagger() *swspec.Swagger { + return f.Spec.spec +} + +// newRef stores information about refs created during the flattening process +type newRef struct { + key string + newName string + path string + isOAIGen bool + resolved bool + schema *swspec.Schema + parents []string +} + +// context stores intermediary results from flatten +type context struct { + newRefs map[string]*newRef + warnings []string + resolved map[string]string +} + +func newContext() *context { + return &context{ + newRefs: make(map[string]*newRef, 150), + warnings: make([]string, 0), + resolved: make(map[string]string, 50), + } +} + +// Flatten an analyzed spec and produce a self-contained spec bundle. +// +// There is a minimal and a full flattening mode. +// +// Minimally flattening a spec means: +// - Expanding parameters, responses, path items, parameter items and header items (references to schemas are left +// unscathed) +// - Importing external (http, file) references so they become internal to the document +// - Moving every JSON pointer to a $ref to a named definition (i.e. the reworked spec does not contain pointers +// like "$ref": "#/definitions/myObject/allOfs/1") +// +// A minimally flattened spec thus guarantees the following properties: +// - all $refs point to a local definition (i.e. '#/definitions/...') +// - definitions are unique +// +// NOTE: arbitrary JSON pointers (other than $refs to top level definitions) are rewritten as definitions if they +// represent a complex schema or express commonality in the spec. +// Otherwise, they are simply expanded. +// +// Minimal flattening is necessary and sufficient for codegen rendering using go-swagger. +// +// Fully flattening a spec means: +// - Moving every complex inline schema to be a definition with an auto-generated name in a depth-first fashion. +// +// By complex, we mean every JSON object with some properties. +// Arrays, when they do not define a tuple, +// or empty objects with or without additionalProperties, are not considered complex and remain inline. +// +// NOTE: rewritten schemas get a vendor extension x-go-gen-location so we know from which part of the spec definitions +// have been created. +// +// Available flattening options: +// - Minimal: stops flattening after minimal $ref processing, leaving schema constructs untouched +// - Expand: expand all $ref's in the document (inoperant if Minimal set to true) +// - Verbose: croaks about name conflicts detected +// - RemoveUnused: removes unused parameters, responses and definitions after expansion/flattening +// +// NOTE: expansion removes all $ref save circular $ref, which remain in place +// +// TODO: additional options +// - ProgagateNameExtensions: ensure that created entries properly follow naming rules when their parent have set a +// x-go-name extension +// - LiftAllOfs: +// - limit the flattening of allOf members when simple objects +// - merge allOf with validation only +// - merge allOf with extensions only +// - ... +// +func Flatten(opts FlattenOpts) error { + // Make sure opts.BasePath is an absolute path + if !filepath.IsAbs(opts.BasePath) { + cwd, _ := os.Getwd() + opts.BasePath = filepath.Join(cwd, opts.BasePath) + } + // make sure drive letter on windows is normalized to lower case + u, _ := url.Parse(opts.BasePath) + opts.BasePath = u.String() + + opts.flattenContext = newContext() + + // recursively expand responses, parameters, path items and items in simple schemas. + // This simplifies the spec and leaves $ref only into schema objects. + if err := swspec.ExpandSpec(opts.Swagger(), opts.ExpandOpts(!opts.Expand)); err != nil { + return err + } + + // strip current file from $ref's, so we can recognize them as proper definitions + // In particular, this works around for issue go-openapi/spec#76: leading absolute file in $ref is stripped + if err := normalizeRef(&opts); err != nil { + return err + } + + if opts.RemoveUnused { + // optionally removes shared parameters and responses already expanded (now unused) + // default parameters (i.e. under paths) remain. + opts.Swagger().Parameters = nil + opts.Swagger().Responses = nil + } + + opts.Spec.reload() // re-analyze + + // at this point there are no references left but in schemas + + for imported := false; !imported; { + // iteratively import remote references until none left. + // This inlining deals with name conflicts by introducing auto-generated names ("OAIGen") + var err error + if imported, err = importExternalReferences(&opts); err != nil { + return err + } + opts.Spec.reload() // re-analyze + } + + if !opts.Minimal && !opts.Expand { + // full flattening: rewrite inline schemas (schemas that aren't simple types or arrays or maps) + if err := nameInlinedSchemas(&opts); err != nil { + return err + } + + opts.Spec.reload() // re-analyze + } + + // rewrite JSON pointers other than $ref to named definitions + // and attempt to resolve conflicting names whenever possible. + if err := stripPointersAndOAIGen(&opts); err != nil { + return err + } + + if opts.RemoveUnused { + // remove unused definitions + expected := make(map[string]struct{}) + for k := range opts.Swagger().Definitions { + expected[slashpath.Join(definitionsPath, jsonpointer.Escape(k))] = struct{}{} + } + for _, k := range opts.Spec.AllDefinitionReferences() { + delete(expected, k) + } + for k := range expected { + debugLog("removing unused definition %s", slashpath.Base(k)) + if opts.Verbose { + log.Printf("info: removing unused definition: %s", slashpath.Base(k)) + } + delete(opts.Swagger().Definitions, slashpath.Base(k)) + } + opts.Spec.reload() // re-analyze + } + + // TODO: simplify known schema patterns to flat objects with properties + // examples: + // - lift simple allOf object, + // - empty allOf with validation only or extensions only + // - rework allOf arrays + // - rework allOf additionalProperties + + if opts.Verbose { + // issue notifications + croak(&opts) + } + return nil +} + +// isAnalyzedAsComplex determines if an analyzed schema is eligible to flattening (i.e. it is "complex"). +// +// Complex means the schema is any of: +// - a simple type (primitive) +// - an array of something (items are possibly complex ; if this is the case, items will generate a definition) +// - a map of something (additionalProperties are possibly complex ; if this is the case, additionalProperties will +// generate a definition) +func isAnalyzedAsComplex(asch *AnalyzedSchema) bool { + if !asch.IsSimpleSchema && !asch.IsArray && !asch.IsMap { + return true + } + return false +} + +// nameInlinedSchemas replaces every complex inline construct by a named definition. +func nameInlinedSchemas(opts *FlattenOpts) error { + debugLog("nameInlinedSchemas") + namer := &inlineSchemaNamer{ + Spec: opts.Swagger(), + Operations: opRefsByRef(gatherOperations(opts.Spec, nil)), + flattenContext: opts.flattenContext, + opts: opts, + } + depthFirst := sortDepthFirst(opts.Spec.allSchemas) + for _, key := range depthFirst { + sch := opts.Spec.allSchemas[key] + if sch.Schema != nil && sch.Schema.Ref.String() == "" && !sch.TopLevel { // inline schema + asch, err := Schema(SchemaOpts{Schema: sch.Schema, Root: opts.Swagger(), BasePath: opts.BasePath}) + if err != nil { + return fmt.Errorf("schema analysis [%s]: %v", key, err) + } + + if isAnalyzedAsComplex(asch) { // move complex schemas to definitions + if err := namer.Name(key, sch.Schema, asch); err != nil { + return err + } + } + } + } + return nil +} + +var depthGroupOrder = []string{ + "sharedParam", "sharedResponse", "sharedOpParam", "opParam", "codeResponse", "defaultResponse", "definition", +} + +func sortDepthFirst(data map[string]SchemaRef) []string { + // group by category (shared params, op param, statuscode response, default response, definitions) + // sort groups internally by number of parts in the key and lexical names + // flatten groups into a single list of keys + sorted := make([]string, 0, len(data)) + grouped := make(map[string]keys, len(data)) + for k := range data { + split := keyParts(k) + var pk string + if split.IsSharedOperationParam() { + pk = "sharedOpParam" + } + if split.IsOperationParam() { + pk = "opParam" + } + if split.IsStatusCodeResponse() { + pk = "codeResponse" + } + if split.IsDefaultResponse() { + pk = "defaultResponse" + } + if split.IsDefinition() { + pk = "definition" + } + if split.IsSharedParam() { + pk = "sharedParam" + } + if split.IsSharedResponse() { + pk = "sharedResponse" + } + grouped[pk] = append(grouped[pk], key{Segments: len(split), Key: k}) + } + + for _, pk := range depthGroupOrder { + res := grouped[pk] + sort.Sort(res) + for _, v := range res { + sorted = append(sorted, v.Key) + } + } + return sorted +} + +type key struct { + Segments int + Key string +} +type keys []key + +func (k keys) Len() int { return len(k) } +func (k keys) Swap(i, j int) { k[i], k[j] = k[j], k[i] } +func (k keys) Less(i, j int) bool { + return k[i].Segments > k[j].Segments || (k[i].Segments == k[j].Segments && k[i].Key < k[j].Key) +} + +type inlineSchemaNamer struct { + Spec *swspec.Swagger + Operations map[string]opRef + flattenContext *context + opts *FlattenOpts +} + +func opRefsByRef(oprefs map[string]opRef) map[string]opRef { + result := make(map[string]opRef, len(oprefs)) + for _, v := range oprefs { + result[v.Ref.String()] = v + } + return result +} + +func (isn *inlineSchemaNamer) Name(key string, schema *swspec.Schema, aschema *AnalyzedSchema) error { + debugLog("naming inlined schema at %s", key) + + parts := keyParts(key) + for _, name := range namesFromKey(parts, aschema, isn.Operations) { + if name != "" { + // create unique name + newName, isOAIGen := uniqifyName(isn.Spec.Definitions, swag.ToJSONName(name)) + + // clone schema + sch, err := cloneSchema(schema) + if err != nil { + return err + } + + // replace values on schema + if err := rewriteSchemaToRef(isn.Spec, key, + swspec.MustCreateRef(slashpath.Join(definitionsPath, newName))); err != nil { + return fmt.Errorf("error while creating definition %q from inline schema: %v", newName, err) + } + + // rewrite any dependent $ref pointing to this place, + // when not already pointing to a top-level definition. + // + // NOTE: this is important if such referers use arbitrary JSON pointers. + an := New(isn.Spec) + for k, v := range an.references.allRefs { + r, _, erd := deepestRef(isn.opts, v) + if erd != nil { + return fmt.Errorf("at %s, %v", k, erd) + } + if r.String() == key || + r.String() == slashpath.Join(definitionsPath, newName) && + slashpath.Dir(v.String()) != definitionsPath { + debugLog("found a $ref to a rewritten schema: %s points to %s", k, v.String()) + + // rewrite $ref to the new target + if err := updateRef(isn.Spec, k, + swspec.MustCreateRef(slashpath.Join(definitionsPath, newName))); err != nil { + return err + } + } + } + + // NOTE: this extension is currently not used by go-swagger (provided for information only) + sch.AddExtension("x-go-gen-location", genLocation(parts)) + + // save cloned schema to definitions + saveSchema(isn.Spec, newName, sch) + + // keep track of created refs + if isn.flattenContext != nil { + debugLog("track created ref: key=%s, newName=%s, isOAIGen=%t", key, newName, isOAIGen) + resolved := false + if _, ok := isn.flattenContext.newRefs[key]; ok { + resolved = isn.flattenContext.newRefs[key].resolved + } + isn.flattenContext.newRefs[key] = &newRef{ + key: key, + newName: newName, + path: slashpath.Join(definitionsPath, newName), + isOAIGen: isOAIGen, + resolved: resolved, + schema: sch, + } + } + } + } + return nil +} + +// genLocation indicates from which section of the specification (models or operations) a definition has been created. +// +// This is reflected in the output spec with a "x-go-gen-location" extension. At the moment, this is is provided +// for information only. +func genLocation(parts splitKey) string { + if parts.IsOperation() { + return "operations" + } + if parts.IsDefinition() { + return "models" + } + return "" +} + +// uniqifyName yields a unique name for a definition +func uniqifyName(definitions swspec.Definitions, name string) (string, bool) { + isOAIGen := false + if name == "" { + name = "oaiGen" + isOAIGen = true + } + if len(definitions) == 0 { + return name, isOAIGen + } + + unq := true + for k := range definitions { + if strings.EqualFold(k, name) { + unq = false + break + } + } + + if unq { + return name, isOAIGen + } + + name += "OAIGen" + isOAIGen = true + var idx int + unique := name + _, known := definitions[unique] + for known { + idx++ + unique = fmt.Sprintf("%s%d", name, idx) + _, known = definitions[unique] + } + return unique, isOAIGen +} + +func namesFromKey(parts splitKey, aschema *AnalyzedSchema, operations map[string]opRef) []string { + var baseNames [][]string + var startIndex int + if parts.IsOperation() { + // params + if parts.IsOperationParam() || parts.IsSharedOperationParam() { + piref := parts.PathItemRef() + if piref.String() != "" && parts.IsOperationParam() { + if op, ok := operations[piref.String()]; ok { + startIndex = 5 + baseNames = append(baseNames, []string{op.ID, "params", "body"}) + } + } else if parts.IsSharedOperationParam() { + pref := parts.PathRef() + for k, v := range operations { + if strings.HasPrefix(k, pref.String()) { + startIndex = 4 + baseNames = append(baseNames, []string{v.ID, "params", "body"}) + } + } + } + } + // responses + if parts.IsOperationResponse() { + piref := parts.PathItemRef() + if piref.String() != "" { + if op, ok := operations[piref.String()]; ok { + startIndex = 6 + baseNames = append(baseNames, []string{op.ID, parts.ResponseName(), "body"}) + } + } + } + } + + // definitions + if parts.IsDefinition() { + nm := parts.DefinitionName() + if nm != "" { + startIndex = 2 + baseNames = append(baseNames, []string{parts.DefinitionName()}) + } + } + + var result []string + for _, segments := range baseNames { + nm := parts.BuildName(segments, startIndex, aschema) + if nm != "" { + result = append(result, nm) + } + } + sort.Strings(result) + return result +} + +const ( + paths = "paths" + responses = "responses" + parameters = "parameters" + definitions = "definitions" + definitionsPath = "#/definitions" +) + +var ( + ignoredKeys map[string]struct{} + validMethods map[string]struct{} +) + +func init() { + ignoredKeys = map[string]struct{}{ + "schema": {}, + "properties": {}, + "not": {}, + "anyOf": {}, + "oneOf": {}, + } + + validMethods = map[string]struct{}{ + "GET": {}, + "HEAD": {}, + "OPTIONS": {}, + "PATCH": {}, + "POST": {}, + "PUT": {}, + "DELETE": {}, + } +} + +type splitKey []string + +func (s splitKey) IsDefinition() bool { + return len(s) > 1 && s[0] == definitions +} + +func (s splitKey) DefinitionName() string { + if !s.IsDefinition() { + return "" + } + return s[1] +} + +func (s splitKey) isKeyName(i int) bool { + if i <= 0 { + return false + } + count := 0 + for idx := i - 1; idx > 0; idx-- { + if s[idx] != "properties" { + break + } + count++ + } + + return count%2 != 0 +} + +func (s splitKey) BuildName(segments []string, startIndex int, aschema *AnalyzedSchema) string { + for i, part := range s[startIndex:] { + if _, ignored := ignoredKeys[part]; !ignored || s.isKeyName(startIndex+i) { + if part == "items" || part == "additionalItems" { + if aschema.IsTuple || aschema.IsTupleWithExtra { + segments = append(segments, "tuple") + } else { + segments = append(segments, "items") + } + if part == "additionalItems" { + segments = append(segments, part) + } + continue + } + segments = append(segments, part) + } + } + return strings.Join(segments, " ") +} + +func (s splitKey) IsOperation() bool { + return len(s) > 1 && s[0] == paths +} + +func (s splitKey) IsSharedOperationParam() bool { + return len(s) > 2 && s[0] == paths && s[2] == parameters +} + +func (s splitKey) IsSharedParam() bool { + return len(s) > 1 && s[0] == parameters +} + +func (s splitKey) IsOperationParam() bool { + return len(s) > 3 && s[0] == paths && s[3] == parameters +} + +func (s splitKey) IsOperationResponse() bool { + return len(s) > 3 && s[0] == paths && s[3] == responses +} + +func (s splitKey) IsSharedResponse() bool { + return len(s) > 1 && s[0] == responses +} + +func (s splitKey) IsDefaultResponse() bool { + return len(s) > 4 && s[0] == paths && s[3] == responses && s[4] == "default" +} + +func (s splitKey) IsStatusCodeResponse() bool { + isInt := func() bool { + _, err := strconv.Atoi(s[4]) + return err == nil + } + return len(s) > 4 && s[0] == paths && s[3] == responses && isInt() +} + +func (s splitKey) ResponseName() string { + if s.IsStatusCodeResponse() { + code, _ := strconv.Atoi(s[4]) + return http.StatusText(code) + } + if s.IsDefaultResponse() { + return "Default" + } + return "" +} + +func (s splitKey) PathItemRef() swspec.Ref { + if len(s) < 3 { + return swspec.Ref{} + } + pth, method := s[1], s[2] + if _, isValidMethod := validMethods[strings.ToUpper(method)]; !isValidMethod && !strings.HasPrefix(method, "x-") { + return swspec.Ref{} + } + return swspec.MustCreateRef("#" + slashpath.Join("/", paths, jsonpointer.Escape(pth), strings.ToUpper(method))) +} + +func (s splitKey) PathRef() swspec.Ref { + if !s.IsOperation() { + return swspec.Ref{} + } + return swspec.MustCreateRef("#" + slashpath.Join("/", paths, jsonpointer.Escape(s[1]))) +} + +func keyParts(key string) splitKey { + var res []string + for _, part := range strings.Split(key[1:], "/") { + if part != "" { + res = append(res, jsonpointer.Unescape(part)) + } + } + return res +} + +func rewriteSchemaToRef(spec *swspec.Swagger, key string, ref swspec.Ref) error { + debugLog("rewriting schema to ref for %s with %s", key, ref.String()) + _, value, err := getPointerFromKey(spec, key) + if err != nil { + return err + } + + switch refable := value.(type) { + case *swspec.Schema: + return rewriteParentRef(spec, key, ref) + + case swspec.Schema: + return rewriteParentRef(spec, key, ref) + + case *swspec.SchemaOrArray: + if refable.Schema != nil { + refable.Schema = &swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} + } + + case *swspec.SchemaOrBool: + if refable.Schema != nil { + refable.Schema = &swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} + } + default: + return fmt.Errorf("no schema with ref found at %s for %T", key, value) + } + + return nil +} + +func rewriteParentRef(spec *swspec.Swagger, key string, ref swspec.Ref) error { + parent, entry, pvalue, err := getParentFromKey(spec, key) + if err != nil { + return err + } + + debugLog("rewriting holder for %T", pvalue) + switch container := pvalue.(type) { + case swspec.Response: + if err := rewriteParentRef(spec, "#"+parent, ref); err != nil { + return err + } + + case *swspec.Response: + container.Schema = &swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} + + case *swspec.Responses: + statusCode, err := strconv.Atoi(entry) + if err != nil { + return fmt.Errorf("%s not a number: %v", key[1:], err) + } + resp := container.StatusCodeResponses[statusCode] + resp.Schema = &swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} + container.StatusCodeResponses[statusCode] = resp + + case map[string]swspec.Response: + resp := container[entry] + resp.Schema = &swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} + container[entry] = resp + + case swspec.Parameter: + if err := rewriteParentRef(spec, "#"+parent, ref); err != nil { + return err + } + + case map[string]swspec.Parameter: + param := container[entry] + param.Schema = &swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} + container[entry] = param + + case []swspec.Parameter: + idx, err := strconv.Atoi(entry) + if err != nil { + return fmt.Errorf("%s not a number: %v", key[1:], err) + } + param := container[idx] + param.Schema = &swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} + container[idx] = param + + case swspec.Definitions: + container[entry] = swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} + + case map[string]swspec.Schema: + container[entry] = swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} + + case []swspec.Schema: + idx, err := strconv.Atoi(entry) + if err != nil { + return fmt.Errorf("%s not a number: %v", key[1:], err) + } + container[idx] = swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} + + case *swspec.SchemaOrArray: + // NOTE: this is necessarily an array - otherwise, the parent would be *Schema + idx, err := strconv.Atoi(entry) + if err != nil { + return fmt.Errorf("%s not a number: %v", key[1:], err) + } + container.Schemas[idx] = swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} + + // NOTE: can't have case *swspec.SchemaOrBool = parent in this case is *Schema + + default: + return fmt.Errorf("unhandled parent schema rewrite %s (%T)", key, pvalue) + } + return nil +} + +func cloneSchema(schema *swspec.Schema) (*swspec.Schema, error) { + var sch swspec.Schema + if err := swag.FromDynamicJSON(schema, &sch); err != nil { + return nil, fmt.Errorf("cannot clone schema: %v", err) + } + return &sch, nil +} + +// importExternalReferences iteratively digs remote references and imports them into the main schema. +// +// At every iteration, new remotes may be found when digging deeper: they are rebased to the current schema before being imported. +// +// This returns true when no more remote references can be found. +func importExternalReferences(opts *FlattenOpts) (bool, error) { + debugLog("importExternalReferences") + + groupedRefs := reverseIndexForSchemaRefs(opts) + sortedRefStr := make([]string, 0, len(groupedRefs)) + if opts.flattenContext == nil { + opts.flattenContext = newContext() + } + + // sort $ref resolution to ensure deterministic name conflict resolution + for refStr := range groupedRefs { + sortedRefStr = append(sortedRefStr, refStr) + } + sort.Strings(sortedRefStr) + + complete := true + + for _, refStr := range sortedRefStr { + entry := groupedRefs[refStr] + if entry.Ref.HasFragmentOnly { + continue + } + complete = false + var isOAIGen bool + + newName := opts.flattenContext.resolved[refStr] + if newName != "" { + // rewrite ref with already resolved external ref (useful for cyclical refs): + // rewrite external refs to local ones + debugLog("resolving known ref [%s] to %s", refStr, newName) + for _, key := range entry.Keys { + if err := updateRef(opts.Swagger(), key, + swspec.MustCreateRef(slashpath.Join(definitionsPath, newName))); err != nil { + return false, err + } + } + } else { + // resolve schemas + debugLog("resolving schema from remote $ref [%s]", refStr) + sch, err := swspec.ResolveRefWithBase(opts.Swagger(), &entry.Ref, opts.ExpandOpts(false)) + if err != nil { + return false, fmt.Errorf("could not resolve schema: %v", err) + } + + // at this stage only $ref analysis matters + partialAnalyzer := &Spec{ + references: referenceAnalysis{}, + patterns: patternAnalysis{}, + enums: enumAnalysis{}, + } + partialAnalyzer.reset() + partialAnalyzer.analyzeSchema("", *sch, "/") + + // now rewrite those refs with rebase + for key, ref := range partialAnalyzer.references.allRefs { + if err := updateRef(sch, key, swspec.MustCreateRef(rebaseRef(entry.Ref.String(), ref.String()))); err != nil { + return false, fmt.Errorf("failed to rewrite ref for key %q at %s: %v", key, entry.Ref.String(), err) + } + } + + // generate a unique name - isOAIGen means that a naming conflict was resolved by changing the name + newName, isOAIGen = uniqifyName(opts.Swagger().Definitions, nameFromRef(entry.Ref)) + debugLog("new name for [%s]: %s - with name conflict:%t", + strings.Join(entry.Keys, ", "), newName, isOAIGen) + + opts.flattenContext.resolved[refStr] = newName + + // rewrite the external refs to local ones + for _, key := range entry.Keys { + if err := updateRef(opts.Swagger(), key, + swspec.MustCreateRef(slashpath.Join(definitionsPath, newName))); err != nil { + return false, err + } + + // keep track of created refs + resolved := false + if _, ok := opts.flattenContext.newRefs[key]; ok { + resolved = opts.flattenContext.newRefs[key].resolved + } + opts.flattenContext.newRefs[key] = &newRef{ + key: key, + newName: newName, + path: slashpath.Join(definitionsPath, newName), + isOAIGen: isOAIGen, + resolved: resolved, + schema: sch, + } + } + + // add the resolved schema to the definitions + saveSchema(opts.Swagger(), newName, sch) + } + } + // maintains ref index entries + for k := range opts.flattenContext.newRefs { + r := opts.flattenContext.newRefs[k] + + // update tracking with resolved schemas + if r.schema.Ref.String() != "" { + ref := swspec.MustCreateRef(r.path) + sch, err := swspec.ResolveRefWithBase(opts.Swagger(), &ref, opts.ExpandOpts(false)) + if err != nil { + return false, fmt.Errorf("could not resolve schema: %v", err) + } + r.schema = sch + } + // update tracking with renamed keys: got a cascade of refs + if r.path != k { + renamed := *r + renamed.key = r.path + opts.flattenContext.newRefs[renamed.path] = &renamed + + // indirect ref + r.newName = slashpath.Base(k) + r.schema = swspec.RefSchema(r.path) + r.path = k + r.isOAIGen = strings.Contains(k, "OAIGen") + } + } + + return complete, nil +} + +type refRevIdx struct { + Ref swspec.Ref + Keys []string +} + +// rebaseRef rebase a remote ref relative to a base ref. +// +// NOTE: does not support JSONschema ID for $ref (we assume we are working with swagger specs here). +// +// NOTE(windows): +// * refs are assumed to have been normalized with drive letter lower cased (from go-openapi/spec) +// * "/ in paths may appear as escape sequences +func rebaseRef(baseRef string, ref string) string { + debugLog("rebasing ref: %s onto %s", ref, baseRef) + baseRef, _ = url.PathUnescape(baseRef) + ref, _ = url.PathUnescape(ref) + if baseRef == "" || baseRef == "." || strings.HasPrefix(baseRef, "#") { + return ref + } + + parts := strings.Split(ref, "#") + + baseParts := strings.Split(baseRef, "#") + baseURL, _ := url.Parse(baseParts[0]) + if strings.HasPrefix(ref, "#") { + if baseURL.Host == "" { + return strings.Join([]string{baseParts[0], parts[1]}, "#") + } + return strings.Join([]string{baseParts[0], parts[1]}, "#") + } + + refURL, _ := url.Parse(parts[0]) + if refURL.Host != "" || filepath.IsAbs(parts[0]) { + // not rebasing an absolute path + return ref + } + + // there is a relative path + var basePath string + if baseURL.Host != "" { + // when there is a host, standard URI rules apply (with "/") + baseURL.Path = slashpath.Dir(baseURL.Path) + baseURL.Path = slashpath.Join(baseURL.Path, "/"+parts[0]) + return baseURL.String() + } + + // this is a local relative path + // basePart[0] and parts[0] are local filesystem directories/files + basePath = filepath.Dir(baseParts[0]) + relPath := filepath.Join(basePath, string(filepath.Separator)+parts[0]) + if len(parts) > 1 { + return strings.Join([]string{relPath, parts[1]}, "#") + } + return relPath +} + +// normalizePath renders absolute path on remote file refs +// +// NOTE(windows): +// * refs are assumed to have been normalized with drive letter lower cased (from go-openapi/spec) +// * "/ in paths may appear as escape sequences +func normalizePath(ref swspec.Ref, opts *FlattenOpts) (normalizedPath string) { + uri, _ := url.PathUnescape(ref.String()) + if ref.HasFragmentOnly || filepath.IsAbs(uri) { + normalizedPath = uri + return + } + + refURL, _ := url.Parse(uri) + if refURL.Host != "" { + normalizedPath = uri + return + } + + parts := strings.Split(uri, "#") + // BasePath, parts[0] are local filesystem directories, guaranteed to be absolute at this stage + parts[0] = filepath.Join(filepath.Dir(opts.BasePath), parts[0]) + normalizedPath = strings.Join(parts, "#") + return +} + +func reverseIndexForSchemaRefs(opts *FlattenOpts) map[string]refRevIdx { + collected := make(map[string]refRevIdx) + for key, schRef := range opts.Spec.references.schemas { + // normalize paths before sorting, + // so we get together keys in same external file + normalizedPath := normalizePath(schRef, opts) + if entry, ok := collected[normalizedPath]; ok { + entry.Keys = append(entry.Keys, key) + collected[normalizedPath] = entry + } else { + collected[normalizedPath] = refRevIdx{ + Ref: schRef, + Keys: []string{key}, + } + } + } + return collected +} + +func nameFromRef(ref swspec.Ref) string { + u := ref.GetURL() + if u.Fragment != "" { + return swag.ToJSONName(slashpath.Base(u.Fragment)) + } + if u.Path != "" { + bn := slashpath.Base(u.Path) + if bn != "" && bn != "/" { + ext := slashpath.Ext(bn) + if ext != "" { + return swag.ToJSONName(bn[:len(bn)-len(ext)]) + } + return swag.ToJSONName(bn) + } + } + return swag.ToJSONName(strings.Replace(u.Host, ".", " ", -1)) +} + +func saveSchema(spec *swspec.Swagger, name string, schema *swspec.Schema) { + if schema == nil { + return + } + if spec.Definitions == nil { + spec.Definitions = make(map[string]swspec.Schema, 150) + } + spec.Definitions[name] = *schema +} + +// getPointerFromKey retrieves the content of the JSON pointer "key" +func getPointerFromKey(spec interface{}, key string) (string, interface{}, error) { + switch spec.(type) { + case *swspec.Schema: + case *swspec.Swagger: + default: + panic("unexpected type used in getPointerFromKey") + } + if key == "#/" { + return "", spec, nil + } + // unescape chars in key, e.g. "{}" from path params + pth, _ := internal.PathUnescape(key[1:]) + ptr, err := jsonpointer.New(pth) + if err != nil { + return "", nil, err + } + + value, _, err := ptr.Get(spec) + if err != nil { + debugLog("error when getting key: %s with path: %s", key, pth) + return "", nil, err + } + return pth, value, nil +} + +// getParentFromKey retrieves the container of the JSON pointer "key" +func getParentFromKey(spec interface{}, key string) (string, string, interface{}, error) { + switch spec.(type) { + case *swspec.Schema: + case *swspec.Swagger: + default: + panic("unexpected type used in getPointerFromKey") + } + // unescape chars in key, e.g. "{}" from path params + pth, _ := internal.PathUnescape(key[1:]) + + parent, entry := slashpath.Dir(pth), slashpath.Base(pth) + debugLog("getting schema holder at: %s, with entry: %s", parent, entry) + + pptr, err := jsonpointer.New(parent) + if err != nil { + return "", "", nil, err + } + pvalue, _, err := pptr.Get(spec) + if err != nil { + return "", "", nil, fmt.Errorf("can't get parent for %s: %v", parent, err) + } + return parent, entry, pvalue, nil +} + +// updateRef replaces a ref by another one +func updateRef(spec interface{}, key string, ref swspec.Ref) error { + switch spec.(type) { + case *swspec.Schema: + case *swspec.Swagger: + default: + panic("unexpected type used in getPointerFromKey") + } + debugLog("updating ref for %s with %s", key, ref.String()) + pth, value, err := getPointerFromKey(spec, key) + if err != nil { + return err + } + + switch refable := value.(type) { + case *swspec.Schema: + refable.Ref = ref + case *swspec.SchemaOrArray: + if refable.Schema != nil { + refable.Schema.Ref = ref + } + case *swspec.SchemaOrBool: + if refable.Schema != nil { + refable.Schema.Ref = ref + } + case swspec.Schema: + debugLog("rewriting holder for %T", refable) + _, entry, pvalue, erp := getParentFromKey(spec, key) + if erp != nil { + return err + } + switch container := pvalue.(type) { + case swspec.Definitions: + container[entry] = swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} + + case map[string]swspec.Schema: + container[entry] = swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} + + case []swspec.Schema: + idx, err := strconv.Atoi(entry) + if err != nil { + return fmt.Errorf("%s not a number: %v", pth, err) + } + container[idx] = swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} + + case *swspec.SchemaOrArray: + // NOTE: this is necessarily an array - otherwise, the parent would be *Schema + idx, err := strconv.Atoi(entry) + if err != nil { + return fmt.Errorf("%s not a number: %v", pth, err) + } + container.Schemas[idx] = swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} + + // NOTE: can't have case *swspec.SchemaOrBool = parent in this case is *Schema + + default: + return fmt.Errorf("unhandled container type at %s: %T", key, value) + } + + default: + return fmt.Errorf("no schema with ref found at %s for %T", key, value) + } + + return nil +} + +// updateRefWithSchema replaces a ref with a schema (i.e. re-inline schema) +func updateRefWithSchema(spec *swspec.Swagger, key string, sch *swspec.Schema) error { + debugLog("updating ref for %s with schema", key) + pth, value, err := getPointerFromKey(spec, key) + if err != nil { + return err + } + + switch refable := value.(type) { + case *swspec.Schema: + *refable = *sch + case swspec.Schema: + _, entry, pvalue, erp := getParentFromKey(spec, key) + if erp != nil { + return err + } + switch container := pvalue.(type) { + case swspec.Definitions: + container[entry] = *sch + + case map[string]swspec.Schema: + container[entry] = *sch + + case []swspec.Schema: + idx, err := strconv.Atoi(entry) + if err != nil { + return fmt.Errorf("%s not a number: %v", pth, err) + } + container[idx] = *sch + + case *swspec.SchemaOrArray: + // NOTE: this is necessarily an array - otherwise, the parent would be *Schema + idx, err := strconv.Atoi(entry) + if err != nil { + return fmt.Errorf("%s not a number: %v", pth, err) + } + container.Schemas[idx] = *sch + + // NOTE: can't have case *swspec.SchemaOrBool = parent in this case is *Schema + + default: + return fmt.Errorf("unhandled type for parent of [%s]: %T", key, value) + } + case *swspec.SchemaOrArray: + *refable.Schema = *sch + // NOTE: can't have case *swspec.SchemaOrBool = parent in this case is *Schema + case *swspec.SchemaOrBool: + *refable.Schema = *sch + default: + return fmt.Errorf("no schema with ref found at %s for %T", key, value) + } + + return nil +} + +func containsString(names []string, name string) bool { + for _, nm := range names { + if nm == name { + return true + } + } + return false +} + +type opRef struct { + Method string + Path string + Key string + ID string + Op *swspec.Operation + Ref swspec.Ref +} + +type opRefs []opRef + +func (o opRefs) Len() int { return len(o) } +func (o opRefs) Swap(i, j int) { o[i], o[j] = o[j], o[i] } +func (o opRefs) Less(i, j int) bool { return o[i].Key < o[j].Key } + +func gatherOperations(specDoc *Spec, operationIDs []string) map[string]opRef { + var oprefs opRefs + + for method, pathItem := range specDoc.Operations() { + for pth, operation := range pathItem { + vv := *operation + oprefs = append(oprefs, opRef{ + Key: swag.ToGoName(strings.ToLower(method) + " " + pth), + Method: method, + Path: pth, + ID: vv.ID, + Op: &vv, + Ref: swspec.MustCreateRef("#" + slashpath.Join("/paths", jsonpointer.Escape(pth), method)), + }) + } + } + + sort.Sort(oprefs) + + operations := make(map[string]opRef) + for _, opr := range oprefs { + nm := opr.ID + if nm == "" { + nm = opr.Key + } + + oo, found := operations[nm] + if found && oo.Method != opr.Method && oo.Path != opr.Path { + nm = opr.Key + } + if len(operationIDs) == 0 || containsString(operationIDs, opr.ID) || containsString(operationIDs, nm) { + opr.ID = nm + opr.Op.ID = nm + operations[nm] = opr + } + } + return operations +} + +// stripPointersAndOAIGen removes anonymous JSON pointers from spec and chain with name conflicts handler. +// This loops until the spec has no such pointer and all name conflicts have been reduced as much as possible. +func stripPointersAndOAIGen(opts *FlattenOpts) error { + // name all JSON pointers to anonymous documents + if err := namePointers(opts); err != nil { + return err + } + + // remove unnecessary OAIGen ref (created when flattening external refs creates name conflicts) + hasIntroducedPointerOrInline, ers := stripOAIGen(opts) + if ers != nil { + return ers + } + + // iterate as pointer or OAIGen resolution may introduce inline schemas or pointers + for hasIntroducedPointerOrInline { + if !opts.Minimal { + opts.Spec.reload() // re-analyze + if err := nameInlinedSchemas(opts); err != nil { + return err + } + } + + if err := namePointers(opts); err != nil { + return err + } + + // restrip + if hasIntroducedPointerOrInline, ers = stripOAIGen(opts); ers != nil { + return ers + } + + opts.Spec.reload() // re-analyze + } + return nil +} + +// stripOAIGen strips the spec from unnecessary OAIGen constructs, initially created to dedupe flattened definitions. +// +// A dedupe is deemed unnecessary whenever: +// - the only conflict is with its (single) parent: OAIGen is merged into its parent (reinlining) +// - there is a conflict with multiple parents: merge OAIGen in first parent, the rewrite other parents to point to +// the first parent. +// +// This function returns a true bool whenever it re-inlined a complex schema, so the caller may chose to iterate +// pointer and name resolution again. +func stripOAIGen(opts *FlattenOpts) (bool, error) { + debugLog("stripOAIGen") + replacedWithComplex := false + + // figure out referers of OAIGen definitions + for _, r := range opts.flattenContext.newRefs { + if !r.isOAIGen || r.resolved { // bail on already resolved entries (avoid looping) + continue + } + for k, v := range opts.Spec.references.allRefs { + if r.path != v.String() { + continue + } + found := false + for _, p := range r.parents { + if p == k { + found = true + break + } + } + if !found { + r.parents = append(r.parents, k) + } + } + } + + for k := range opts.flattenContext.newRefs { + r := opts.flattenContext.newRefs[k] + //debugLog("newRefs[%s]: isOAIGen: %t, resolved: %t, name: %s, path:%s, #parents: %d, parents: %v, ref: %s", + // k, r.isOAIGen, r.resolved, r.newName, r.path, len(r.parents), r.parents, r.schema.Ref.String()) + if r.isOAIGen && len(r.parents) >= 1 { + pr := r.parents + sort.Strings(pr) + + // rewrite first parent schema in lexicographical order + debugLog("rewrite first parent in lex order %s with schema", pr[0]) + if err := updateRefWithSchema(opts.Swagger(), pr[0], r.schema); err != nil { + return false, err + } + if pa, ok := opts.flattenContext.newRefs[pr[0]]; ok && pa.isOAIGen { + // update parent in ref index entry + debugLog("update parent entry: %s", pr[0]) + pa.schema = r.schema + pa.resolved = false + replacedWithComplex = true + } + + // rewrite other parents to point to first parent + if len(pr) > 1 { + for _, p := range pr[1:] { + replacingRef := swspec.MustCreateRef(pr[0]) + + // set complex when replacing ref is an anonymous jsonpointer: further processing may be required + replacedWithComplex = replacedWithComplex || + slashpath.Dir(replacingRef.String()) != definitionsPath + debugLog("rewrite parent with ref: %s", replacingRef.String()) + + // NOTE: it is possible at this stage to introduce json pointers (to non-definitions places). + // Those are stripped later on. + if err := updateRef(opts.Swagger(), p, replacingRef); err != nil { + return false, err + } + + if pa, ok := opts.flattenContext.newRefs[p]; ok && pa.isOAIGen { + // update parent in ref index + debugLog("update parent entry: %s", p) + pa.schema = r.schema + pa.resolved = false + replacedWithComplex = true + } + } + } + + // remove OAIGen definition + debugLog("removing definition %s", slashpath.Base(r.path)) + delete(opts.Swagger().Definitions, slashpath.Base(r.path)) + + // propagate changes in ref index for keys which have this one as a parent + for kk, value := range opts.flattenContext.newRefs { + if kk == k || !value.isOAIGen || value.resolved { + continue + } + found := false + newParents := make([]string, 0, len(value.parents)) + for _, parent := range value.parents { + switch { + case parent == r.path: + found = true + parent = pr[0] + case strings.HasPrefix(parent, r.path+"/"): + found = true + parent = slashpath.Join(pr[0], strings.TrimPrefix(parent, r.path)) + } + newParents = append(newParents, parent) + } + if found { + value.parents = newParents + } + } + + // mark naming conflict as resolved + debugLog("marking naming conflict resolved for key: %s", r.key) + opts.flattenContext.newRefs[r.key].isOAIGen = false + opts.flattenContext.newRefs[r.key].resolved = true + + // determine if the previous substitution did inline a complex schema + if r.schema != nil && r.schema.Ref.String() == "" { // inline schema + asch, err := Schema(SchemaOpts{Schema: r.schema, Root: opts.Swagger(), BasePath: opts.BasePath}) + if err != nil { + return false, err + } + debugLog("re-inlined schema: parent: %s, %t", pr[0], isAnalyzedAsComplex(asch)) + replacedWithComplex = replacedWithComplex || + !(slashpath.Dir(pr[0]) == definitionsPath) && isAnalyzedAsComplex(asch) + } + } + } + + debugLog("replacedWithComplex: %t", replacedWithComplex) + opts.Spec.reload() // re-analyze + return replacedWithComplex, nil +} + +// croak logs notifications and warnings about valid, but possibly unwanted constructs resulting +// from flattening a spec +func croak(opts *FlattenOpts) { + reported := make(map[string]bool, len(opts.flattenContext.newRefs)) + for _, v := range opts.Spec.references.allRefs { + // warns about duplicate handling + for _, r := range opts.flattenContext.newRefs { + if r.isOAIGen && r.path == v.String() { + reported[r.newName] = true + } + } + } + for k := range reported { + log.Printf("warning: duplicate flattened definition name resolved as %s", k) + } + // warns about possible type mismatches + uniqueMsg := make(map[string]bool) + for _, msg := range opts.flattenContext.warnings { + if _, ok := uniqueMsg[msg]; ok { + continue + } + log.Printf("warning: %s", msg) + uniqueMsg[msg] = true + } +} + +// namePointers replaces all JSON pointers to anonymous documents by a $ref to a new named definitions. +// +// This is carried on depth-first. Pointers to $refs which are top level definitions are replaced by the $ref itself. +// Pointers to simple types are expanded, unless they express commonality (i.e. several such $ref are used). +func namePointers(opts *FlattenOpts) error { + debugLog("name pointers") + refsToReplace := make(map[string]SchemaRef, len(opts.Spec.references.schemas)) + for k, ref := range opts.Spec.references.allRefs { + if slashpath.Dir(ref.String()) == definitionsPath { + // this a ref to a top-level definition: ok + continue + } + replacingRef, sch, erd := deepestRef(opts, ref) + if erd != nil { + return fmt.Errorf("at %s, %v", k, erd) + } + debugLog("planning pointer to replace at %s: %s, resolved to: %s", k, ref.String(), replacingRef.String()) + refsToReplace[k] = SchemaRef{ + Name: k, // caller + Ref: replacingRef, // callee + Schema: sch, + TopLevel: slashpath.Dir(replacingRef.String()) == definitionsPath, + } + } + depthFirst := sortDepthFirst(refsToReplace) + namer := &inlineSchemaNamer{ + Spec: opts.Swagger(), + Operations: opRefsByRef(gatherOperations(opts.Spec, nil)), + flattenContext: opts.flattenContext, + opts: opts, + } + + for _, key := range depthFirst { + v := refsToReplace[key] + // update current replacement, which may have been updated by previous changes of deeper elements + replacingRef, sch, erd := deepestRef(opts, v.Ref) + if erd != nil { + return fmt.Errorf("at %s, %v", key, erd) + } + v.Ref = replacingRef + v.Schema = sch + v.TopLevel = slashpath.Dir(replacingRef.String()) == definitionsPath + debugLog("replacing pointer at %s: resolved to: %s", key, v.Ref.String()) + + if v.TopLevel { + debugLog("replace pointer %s by canonical definition: %s", key, v.Ref.String()) + // if the schema is a $ref to a top level definition, just rewrite the pointer to this $ref + if err := updateRef(opts.Swagger(), key, v.Ref); err != nil { + return err + } + } else { + // this is a JSON pointer to an anonymous document (internal or external): + // create a definition for this schema when: + // - it is a complex schema + // - or it is pointed by more than one $ref (i.e. expresses commonality) + // otherwise, expand the pointer (single reference to a simple type) + // + // The named definition for this follows the target's key, not the caller's + debugLog("namePointers at %s for %s", key, v.Ref.String()) + + // qualify the expanded schema + /* + if key == "#/paths/~1some~1where~1{id}/get/parameters/1/items" { + // DEBUG + //func getPointerFromKey(spec interface{}, key string) (string, interface{}, error) { + k, res, err := getPointerFromKey(namer.Spec, key) + debugLog("k = %s, res=%#v, err=%v", k, res, err) + } + */ + asch, ers := Schema(SchemaOpts{Schema: v.Schema, Root: opts.Swagger(), BasePath: opts.BasePath}) + if ers != nil { + return fmt.Errorf("schema analysis [%s]: %v", key, ers) + } + callers := make([]string, 0, 64) + + debugLog("looking for callers") + an := New(opts.Swagger()) + for k, w := range an.references.allRefs { + r, _, erd := deepestRef(opts, w) + if erd != nil { + return fmt.Errorf("at %s, %v", key, erd) + } + if r.String() == v.Ref.String() { + callers = append(callers, k) + } + } + debugLog("callers for %s: %d", v.Ref.String(), len(callers)) + if len(callers) == 0 { + // has already been updated and resolved + continue + } + + parts := keyParts(v.Ref.String()) + debugLog("number of callers for %s: %d", v.Ref.String(), len(callers)) + // identifying edge case when the namer did nothing because we point to a non-schema object + // no definition is created and we expand the $ref for all callers + if (!asch.IsSimpleSchema || len(callers) > 1) && !parts.IsSharedParam() && !parts.IsSharedResponse() { + debugLog("replace JSON pointer at [%s] by definition: %s", key, v.Ref.String()) + if err := namer.Name(v.Ref.String(), v.Schema, asch); err != nil { + return err + } + + // regular case: we named the $ref as a definition, and we move all callers to this new $ref + for _, caller := range callers { + if caller != key { + // move $ref for next to resolve + debugLog("identified caller of %s at [%s]", v.Ref.String(), caller) + c := refsToReplace[caller] + c.Ref = v.Ref + refsToReplace[caller] = c + } + } + } else { + debugLog("expand JSON pointer for key=%s", key) + if err := updateRefWithSchema(opts.Swagger(), key, v.Schema); err != nil { + return err + } + // NOTE: there is no other caller to update + } + } + } + opts.Spec.reload() // re-analyze + return nil +} + +// deepestRef finds the first definition ref, from a cascade of nested refs which are not definitions. +// - if no definition is found, returns the deepest ref. +// - pointers to external files are expanded +// +// NOTE: all external $ref's are assumed to be already expanded at this stage. +func deepestRef(opts *FlattenOpts, ref swspec.Ref) (swspec.Ref, *swspec.Schema, error) { + if !ref.HasFragmentOnly { + // we found an external $ref, which is odd + // does nothing on external $refs + return ref, nil, nil + } + currentRef := ref + visited := make(map[string]bool, 64) +DOWNREF: + for currentRef.String() != "" { + if slashpath.Dir(currentRef.String()) == definitionsPath { + // this is a top-level definition: stop here and return this ref + return currentRef, nil, nil + } + if _, beenThere := visited[currentRef.String()]; beenThere { + return swspec.Ref{}, nil, + fmt.Errorf("cannot resolve cyclic chain of pointers under %s", currentRef.String()) + } + visited[currentRef.String()] = true + value, _, err := currentRef.GetPointer().Get(opts.Swagger()) + if err != nil { + return swspec.Ref{}, nil, err + } + switch refable := value.(type) { + case *swspec.Schema: + if refable.Ref.String() == "" { + break DOWNREF + } + currentRef = refable.Ref + + case swspec.Schema: + if refable.Ref.String() == "" { + break DOWNREF + } + currentRef = refable.Ref + + case *swspec.SchemaOrArray: + if refable.Schema == nil || refable.Schema != nil && refable.Schema.Ref.String() == "" { + break DOWNREF + } + currentRef = refable.Schema.Ref + + case *swspec.SchemaOrBool: + if refable.Schema == nil || refable.Schema != nil && refable.Schema.Ref.String() == "" { + break DOWNREF + } + currentRef = refable.Schema.Ref + + case swspec.Response: + // a pointer points to a schema initially marshalled in responses section... + // Attempt to convert this to a schema. If this fails, the spec is invalid + asJSON, _ := refable.MarshalJSON() + var asSchema swspec.Schema + err := asSchema.UnmarshalJSON(asJSON) + if err != nil { + return swspec.Ref{}, nil, + fmt.Errorf("invalid type for resolved JSON pointer %s. Expected a schema a, got: %T", + currentRef.String(), value) + + } + opts.flattenContext.warnings = append(opts.flattenContext.warnings, + fmt.Sprintf("found $ref %q (response) interpreted as schema", currentRef.String())) + + if asSchema.Ref.String() == "" { + break DOWNREF + } + currentRef = asSchema.Ref + + case swspec.Parameter: + // a pointer points to a schema initially marshalled in parameters section... + // Attempt to convert this to a schema. If this fails, the spec is invalid + asJSON, _ := refable.MarshalJSON() + var asSchema swspec.Schema + err := asSchema.UnmarshalJSON(asJSON) + if err != nil { + return swspec.Ref{}, nil, + fmt.Errorf("invalid type for resolved JSON pointer %s. Expected a schema a, got: %T", + currentRef.String(), value) + + } + opts.flattenContext.warnings = append(opts.flattenContext.warnings, + fmt.Sprintf("found $ref %q (parameter) interpreted as schema", currentRef.String())) + + if asSchema.Ref.String() == "" { + break DOWNREF + } + currentRef = asSchema.Ref + + default: + return swspec.Ref{}, nil, + fmt.Errorf("unhandled type to resolve JSON pointer %s. Expected a Schema, got: %T", + currentRef.String(), value) + + } + } + // assess what schema we're ending with + sch, erv := swspec.ResolveRefWithBase(opts.Swagger(), ¤tRef, opts.ExpandOpts(false)) + if erv != nil { + return swspec.Ref{}, nil, erv + } + if sch == nil { + return swspec.Ref{}, nil, fmt.Errorf("no schema found at %s", currentRef.String()) + } + return currentRef, sch, nil +} + +// normalizeRef strips the current file from any $ref. This works around issue go-openapi/spec#76: +// leading absolute file in $ref is stripped +func normalizeRef(opts *FlattenOpts) error { + debugLog("normalizeRef") + opts.Spec.reload() // re-analyze + for k, w := range opts.Spec.references.allRefs { + if strings.HasPrefix(w.String(), opts.BasePath+definitionsPath) { // may be a mix of / and \, depending on OS + // strip base path from definition + debugLog("stripping absolute path for: %s", w.String()) + if err := updateRef(opts.Swagger(), k, + swspec.MustCreateRef(slashpath.Join(definitionsPath, slashpath.Base(w.String())))); err != nil { + return err + } + } + } + opts.Spec.reload() // re-analyze + return nil +} diff --git a/vendor/github.com/go-openapi/analysis/go.mod b/vendor/github.com/go-openapi/analysis/go.mod new file mode 100644 index 00000000000..6c8e58577cf --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/go.mod @@ -0,0 +1,13 @@ +module github.com/go-openapi/analysis + +require ( + github.com/go-openapi/jsonpointer v0.19.3 + github.com/go-openapi/loads v0.19.0 + github.com/go-openapi/spec v0.19.3 + github.com/go-openapi/strfmt v0.19.3 + github.com/go-openapi/swag v0.19.5 + github.com/stretchr/testify v1.3.0 + go.mongodb.org/mongo-driver v1.1.1 // indirect +) + +go 1.13 diff --git a/vendor/github.com/go-openapi/analysis/go.sum b/vendor/github.com/go-openapi/analysis/go.sum new file mode 100644 index 00000000000..8e8b5f9bc47 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/go.sum @@ -0,0 +1,97 @@ +github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= +github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.19.2 h1:a2kIyV3w+OS3S97zxUndRVD46+FhGOUBDFY7nmu4CsY= +github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.19.2 h1:A9+F4Dc/MCNB5jibxf6rRvOvR/iFgQdyNx9eIhnGqq0= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.19.2 h1:o20suLFB4Ri0tuzpWtyHlh7E7HnkqTNLq6aR6WVNS1w= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/loads v0.19.0 h1:wCOBNscACI8L93tt5tvB2zOMkJ098XCw3fP0BY2ybDA= +github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.19.3 h1:0XRyw8kguri6Yw4SxhsQA/atC88yqrk0+G4YhI2wabc= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.19.2 h1:clPGfBnJohokno0e+d7hs6Yocrzjlgz6EsQSDncCRnE= +github.com/go-openapi/strfmt v0.19.2/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= +github.com/go-openapi/strfmt v0.19.3 h1:eRfyY5SkaNJCAwmmMcADjY31ow9+N7MCLW7oRkbsINA= +github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= +github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.19.2 h1:jvO6bCMBEilGwMfHhrd61zIID4oIFdwb76V17SM88dE= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63 h1:nTT4s92Dgz2HlrB2NaMgvlfqHH39OgMhA7z3PK7PGD4= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e h1:hB2xlXdHp/pmPZq0y3QnmWAArdw9PqbmotexnWx/FU8= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +go.mongodb.org/mongo-driver v1.0.3 h1:GKoji1ld3tw2aC+GX1wbr/J2fX13yNacEYoJ8Nhr0yU= +go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.1.1 h1:Sq1fR+0c58RME5EoqKdjkiQAmPjmfHlZOoRI6fTUOcs= +go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 h1:dfGZHvZk057jK2MCeWus/TowKpJ8y4AmooUzdBSR9GU= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 h1:k7pJ2yAPLPgbskkFdhRCsA77k2fySZ1zf2zCjvQCiIM= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/go-openapi/analysis/internal/post_go18.go b/vendor/github.com/go-openapi/analysis/internal/post_go18.go new file mode 100644 index 00000000000..f96f55c0873 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/internal/post_go18.go @@ -0,0 +1,29 @@ +// +build go1.8 + +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import "net/url" + +// PathUnescape provides url.PathUnescape(), with seamless +// go version support for pre-go1.8 +// +// TODO: this function is currently defined in go-openapi/swag, +// but unexported. We might chose to export it, or simple phase +// out pre-go1.8 support. +func PathUnescape(path string) (string, error) { + return url.PathUnescape(path) +} diff --git a/vendor/github.com/go-openapi/analysis/internal/pre_go18.go b/vendor/github.com/go-openapi/analysis/internal/pre_go18.go new file mode 100644 index 00000000000..4cc64418220 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/internal/pre_go18.go @@ -0,0 +1,29 @@ +// +build !go1.8 + +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import "net/url" + +// PathUnescape provides url.PathUnescape(), with seamless +// go version support for pre-go1.8 +// +// TODO: this function is currently defined in go-openapi/swag, +// but unexported. We might chose to export it, or simple phase +// out pre-go1.8 support. +func PathUnescape(path string) (string, error) { + return url.QueryUnescape(path) +} diff --git a/vendor/github.com/go-openapi/analysis/mixin.go b/vendor/github.com/go-openapi/analysis/mixin.go new file mode 100644 index 00000000000..625c46f8f9f --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/mixin.go @@ -0,0 +1,425 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package analysis + +import ( + "fmt" + "reflect" + + "github.com/go-openapi/spec" +) + +// Mixin modifies the primary swagger spec by adding the paths and +// definitions from the mixin specs. Top level parameters and +// responses from the mixins are also carried over. Operation id +// collisions are avoided by appending "Mixin" but only if +// needed. +// +// The following parts of primary are subject to merge, filling empty details +// - Info +// - BasePath +// - Host +// - ExternalDocs +// +// Consider calling FixEmptyResponseDescriptions() on the modified primary +// if you read them from storage and they are valid to start with. +// +// Entries in "paths", "definitions", "parameters" and "responses" are +// added to the primary in the order of the given mixins. If the entry +// already exists in primary it is skipped with a warning message. +// +// The count of skipped entries (from collisions) is returned so any +// deviation from the number expected can flag a warning in your build +// scripts. Carefully review the collisions before accepting them; +// consider renaming things if possible. +// +// No key normalization takes place (paths, type defs, +// etc). Ensure they are canonical if your downstream tools do +// key normalization of any form. +// +// Merging schemes (http, https), and consumers/producers do not account for +// collisions. +func Mixin(primary *spec.Swagger, mixins ...*spec.Swagger) []string { + skipped := make([]string, 0, len(mixins)) + opIds := getOpIds(primary) + initPrimary(primary) + + for i, m := range mixins { + skipped = append(skipped, mergeSwaggerProps(primary, m)...) + + skipped = append(skipped, mergeConsumes(primary, m)...) + + skipped = append(skipped, mergeProduces(primary, m)...) + + skipped = append(skipped, mergeTags(primary, m)...) + + skipped = append(skipped, mergeSchemes(primary, m)...) + + skipped = append(skipped, mergeSecurityDefinitions(primary, m)...) + + skipped = append(skipped, mergeSecurityRequirements(primary, m)...) + + skipped = append(skipped, mergeDefinitions(primary, m)...) + + // merging paths requires a map of operationIDs to work with + skipped = append(skipped, mergePaths(primary, m, opIds, i)...) + + skipped = append(skipped, mergeParameters(primary, m)...) + + skipped = append(skipped, mergeResponses(primary, m)...) + } + return skipped +} + +// getOpIds extracts all the paths..operationIds from the given +// spec and returns them as the keys in a map with 'true' values. +func getOpIds(s *spec.Swagger) map[string]bool { + rv := make(map[string]bool) + if s.Paths == nil { + return rv + } + for _, v := range s.Paths.Paths { + piops := pathItemOps(v) + for _, op := range piops { + rv[op.ID] = true + } + } + return rv +} + +func pathItemOps(p spec.PathItem) []*spec.Operation { + var rv []*spec.Operation + rv = appendOp(rv, p.Get) + rv = appendOp(rv, p.Put) + rv = appendOp(rv, p.Post) + rv = appendOp(rv, p.Delete) + rv = appendOp(rv, p.Head) + rv = appendOp(rv, p.Patch) + return rv +} + +func appendOp(ops []*spec.Operation, op *spec.Operation) []*spec.Operation { + if op == nil { + return ops + } + return append(ops, op) +} + +func mergeSecurityDefinitions(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { + for k, v := range m.SecurityDefinitions { + if _, exists := primary.SecurityDefinitions[k]; exists { + warn := fmt.Sprintf( + "SecurityDefinitions entry '%v' already exists in primary or higher priority mixin, skipping\n", k) + skipped = append(skipped, warn) + continue + } + primary.SecurityDefinitions[k] = v + } + return +} + +func mergeSecurityRequirements(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { + for _, v := range m.Security { + found := false + for _, vv := range primary.Security { + if reflect.DeepEqual(v, vv) { + found = true + break + } + } + if found { + warn := fmt.Sprintf( + "Security requirement: '%v' already exists in primary or higher priority mixin, skipping\n", v) + skipped = append(skipped, warn) + continue + } + primary.Security = append(primary.Security, v) + } + return +} + +func mergeDefinitions(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { + for k, v := range m.Definitions { + // assume name collisions represent IDENTICAL type. careful. + if _, exists := primary.Definitions[k]; exists { + warn := fmt.Sprintf( + "definitions entry '%v' already exists in primary or higher priority mixin, skipping\n", k) + skipped = append(skipped, warn) + continue + } + primary.Definitions[k] = v + } + return +} + +func mergePaths(primary *spec.Swagger, m *spec.Swagger, opIds map[string]bool, mixIndex int) (skipped []string) { + if m.Paths != nil { + for k, v := range m.Paths.Paths { + if _, exists := primary.Paths.Paths[k]; exists { + warn := fmt.Sprintf( + "paths entry '%v' already exists in primary or higher priority mixin, skipping\n", k) + skipped = append(skipped, warn) + continue + } + + // Swagger requires that operationIds be + // unique within a spec. If we find a + // collision we append "Mixin0" to the + // operatoinId we are adding, where 0 is mixin + // index. We assume that operationIds with + // all the proivded specs are already unique. + piops := pathItemOps(v) + for _, piop := range piops { + if opIds[piop.ID] { + piop.ID = fmt.Sprintf("%v%v%v", piop.ID, "Mixin", mixIndex) + } + opIds[piop.ID] = true + } + primary.Paths.Paths[k] = v + } + } + return +} + +func mergeParameters(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { + for k, v := range m.Parameters { + // could try to rename on conflict but would + // have to fix $refs in the mixin. Complain + // for now + if _, exists := primary.Parameters[k]; exists { + warn := fmt.Sprintf( + "top level parameters entry '%v' already exists in primary or higher priority mixin, skipping\n", k) + skipped = append(skipped, warn) + continue + } + primary.Parameters[k] = v + } + return +} + +func mergeResponses(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { + for k, v := range m.Responses { + // could try to rename on conflict but would + // have to fix $refs in the mixin. Complain + // for now + if _, exists := primary.Responses[k]; exists { + warn := fmt.Sprintf( + "top level responses entry '%v' already exists in primary or higher priority mixin, skipping\n", k) + skipped = append(skipped, warn) + continue + } + primary.Responses[k] = v + } + return skipped +} + +func mergeConsumes(primary *spec.Swagger, m *spec.Swagger) []string { + for _, v := range m.Consumes { + found := false + for _, vv := range primary.Consumes { + if v == vv { + found = true + break + } + } + if found { + // no warning here: we just skip it + continue + } + primary.Consumes = append(primary.Consumes, v) + } + return []string{} +} + +func mergeProduces(primary *spec.Swagger, m *spec.Swagger) []string { + for _, v := range m.Produces { + found := false + for _, vv := range primary.Produces { + if v == vv { + found = true + break + } + } + if found { + // no warning here: we just skip it + continue + } + primary.Produces = append(primary.Produces, v) + } + return []string{} +} + +func mergeTags(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { + for _, v := range m.Tags { + found := false + for _, vv := range primary.Tags { + if v.Name == vv.Name { + found = true + break + } + } + if found { + warn := fmt.Sprintf( + "top level tags entry with name '%v' already exists in primary or higher priority mixin, skipping\n", v.Name) + skipped = append(skipped, warn) + continue + } + primary.Tags = append(primary.Tags, v) + } + return +} + +func mergeSchemes(primary *spec.Swagger, m *spec.Swagger) []string { + for _, v := range m.Schemes { + found := false + for _, vv := range primary.Schemes { + if v == vv { + found = true + break + } + } + if found { + // no warning here: we just skip it + continue + } + primary.Schemes = append(primary.Schemes, v) + } + return []string{} +} + +func mergeSwaggerProps(primary *spec.Swagger, m *spec.Swagger) []string { + var skipped []string + primary.Extensions, skipped = mergeExtensions(primary.Extensions, m.Extensions) + + // merging details in swagger top properties + if primary.Host == "" { + primary.Host = m.Host + } + if primary.BasePath == "" { + primary.BasePath = m.BasePath + } + if primary.Info == nil { + primary.Info = m.Info + } else if m.Info != nil { + var sk []string + primary.Info.Extensions, sk = mergeExtensions(primary.Info.Extensions, m.Info.Extensions) + skipped = append(skipped, sk...) + if primary.Info.Description == "" { + primary.Info.Description = m.Info.Description + } + if primary.Info.Title == "" { + primary.Info.Description = m.Info.Description + } + if primary.Info.TermsOfService == "" { + primary.Info.TermsOfService = m.Info.TermsOfService + } + if primary.Info.Version == "" { + primary.Info.Version = m.Info.Version + } + + if primary.Info.Contact == nil { + primary.Info.Contact = m.Info.Contact + } else if m.Info.Contact != nil { + if primary.Info.Contact.Name == "" { + primary.Info.Contact.Name = m.Info.Contact.Name + } + if primary.Info.Contact.URL == "" { + primary.Info.Contact.URL = m.Info.Contact.URL + } + if primary.Info.Contact.Email == "" { + primary.Info.Contact.Email = m.Info.Contact.Email + } + } + + if primary.Info.License == nil { + primary.Info.License = m.Info.License + } else if m.Info.License != nil { + if primary.Info.License.Name == "" { + primary.Info.License.Name = m.Info.License.Name + } + if primary.Info.License.URL == "" { + primary.Info.License.URL = m.Info.License.URL + } + } + + } + if primary.ExternalDocs == nil { + primary.ExternalDocs = m.ExternalDocs + } else if m.ExternalDocs != nil { + if primary.ExternalDocs.Description == "" { + primary.ExternalDocs.Description = m.ExternalDocs.Description + } + if primary.ExternalDocs.URL == "" { + primary.ExternalDocs.URL = m.ExternalDocs.URL + } + } + return skipped +} + +func mergeExtensions(primary spec.Extensions, m spec.Extensions) (result spec.Extensions, skipped []string) { + if primary == nil { + result = m + return + } + if m == nil { + result = primary + return + } + result = primary + for k, v := range m { + if _, found := primary[k]; found { + skipped = append(skipped, k) + continue + } + primary[k] = v + } + return +} + +func initPrimary(primary *spec.Swagger) { + if primary.SecurityDefinitions == nil { + primary.SecurityDefinitions = make(map[string]*spec.SecurityScheme) + } + if primary.Security == nil { + primary.Security = make([]map[string][]string, 0, 10) + } + if primary.Produces == nil { + primary.Produces = make([]string, 0, 10) + } + if primary.Consumes == nil { + primary.Consumes = make([]string, 0, 10) + } + if primary.Tags == nil { + primary.Tags = make([]spec.Tag, 0, 10) + } + if primary.Schemes == nil { + primary.Schemes = make([]string, 0, 10) + } + if primary.Paths == nil { + primary.Paths = &spec.Paths{Paths: make(map[string]spec.PathItem)} + } + if primary.Paths.Paths == nil { + primary.Paths.Paths = make(map[string]spec.PathItem) + } + if primary.Definitions == nil { + primary.Definitions = make(spec.Definitions) + } + if primary.Parameters == nil { + primary.Parameters = make(map[string]spec.Parameter) + } + if primary.Responses == nil { + primary.Responses = make(map[string]spec.Response) + } +} diff --git a/vendor/github.com/go-openapi/analysis/schema.go b/vendor/github.com/go-openapi/analysis/schema.go new file mode 100644 index 00000000000..398c7806394 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/schema.go @@ -0,0 +1,234 @@ +package analysis + +import ( + "fmt" + + "github.com/go-openapi/spec" + "github.com/go-openapi/strfmt" +) + +// SchemaOpts configures the schema analyzer +type SchemaOpts struct { + Schema *spec.Schema + Root interface{} + BasePath string + _ struct{} +} + +// Schema analysis, will classify the schema according to known +// patterns. +func Schema(opts SchemaOpts) (*AnalyzedSchema, error) { + if opts.Schema == nil { + return nil, fmt.Errorf("no schema to analyze") + } + + a := &AnalyzedSchema{ + schema: opts.Schema, + root: opts.Root, + basePath: opts.BasePath, + } + + a.initializeFlags() + a.inferKnownType() + a.inferEnum() + a.inferBaseType() + + if err := a.inferMap(); err != nil { + return nil, err + } + if err := a.inferArray(); err != nil { + return nil, err + } + + a.inferTuple() + + if err := a.inferFromRef(); err != nil { + return nil, err + } + + a.inferSimpleSchema() + return a, nil +} + +// AnalyzedSchema indicates what the schema represents +type AnalyzedSchema struct { + schema *spec.Schema + root interface{} + basePath string + + hasProps bool + hasAllOf bool + hasItems bool + hasAdditionalProps bool + hasAdditionalItems bool + hasRef bool + + IsKnownType bool + IsSimpleSchema bool + IsArray bool + IsSimpleArray bool + IsMap bool + IsSimpleMap bool + IsExtendedObject bool + IsTuple bool + IsTupleWithExtra bool + IsBaseType bool + IsEnum bool +} + +// Inherits copies value fields from other onto this schema +func (a *AnalyzedSchema) inherits(other *AnalyzedSchema) { + if other == nil { + return + } + a.hasProps = other.hasProps + a.hasAllOf = other.hasAllOf + a.hasItems = other.hasItems + a.hasAdditionalItems = other.hasAdditionalItems + a.hasAdditionalProps = other.hasAdditionalProps + a.hasRef = other.hasRef + + a.IsKnownType = other.IsKnownType + a.IsSimpleSchema = other.IsSimpleSchema + a.IsArray = other.IsArray + a.IsSimpleArray = other.IsSimpleArray + a.IsMap = other.IsMap + a.IsSimpleMap = other.IsSimpleMap + a.IsExtendedObject = other.IsExtendedObject + a.IsTuple = other.IsTuple + a.IsTupleWithExtra = other.IsTupleWithExtra + a.IsBaseType = other.IsBaseType + a.IsEnum = other.IsEnum +} + +func (a *AnalyzedSchema) inferFromRef() error { + if a.hasRef { + sch := new(spec.Schema) + sch.Ref = a.schema.Ref + err := spec.ExpandSchema(sch, a.root, nil) + if err != nil { + return err + } + rsch, err := Schema(SchemaOpts{ + Schema: sch, + Root: a.root, + BasePath: a.basePath, + }) + if err != nil { + // NOTE(fredbi): currently the only cause for errors is + // unresolved ref. Since spec.ExpandSchema() expands the + // schema recursively, there is no chance to get there, + // until we add more causes for error in this schema analysis. + return err + } + a.inherits(rsch) + } + return nil +} + +func (a *AnalyzedSchema) inferSimpleSchema() { + a.IsSimpleSchema = a.IsKnownType || a.IsSimpleArray || a.IsSimpleMap +} + +func (a *AnalyzedSchema) inferKnownType() { + tpe := a.schema.Type + format := a.schema.Format + a.IsKnownType = tpe.Contains("boolean") || + tpe.Contains("integer") || + tpe.Contains("number") || + tpe.Contains("string") || + (format != "" && strfmt.Default.ContainsName(format)) || + (a.isObjectType() && !a.hasProps && !a.hasAllOf && !a.hasAdditionalProps && !a.hasAdditionalItems) +} + +func (a *AnalyzedSchema) inferMap() error { + if a.isObjectType() { + hasExtra := a.hasProps || a.hasAllOf + a.IsMap = a.hasAdditionalProps && !hasExtra + a.IsExtendedObject = a.hasAdditionalProps && hasExtra + if a.IsMap { + if a.schema.AdditionalProperties.Schema != nil { + msch, err := Schema(SchemaOpts{ + Schema: a.schema.AdditionalProperties.Schema, + Root: a.root, + BasePath: a.basePath, + }) + if err != nil { + return err + } + a.IsSimpleMap = msch.IsSimpleSchema + } else if a.schema.AdditionalProperties.Allows { + a.IsSimpleMap = true + } + } + } + return nil +} + +func (a *AnalyzedSchema) inferArray() error { + // an array has Items defined as an object schema, otherwise we qualify this JSON array as a tuple + // (yes, even if the Items array contains only one element). + // arrays in JSON schema may be unrestricted (i.e no Items specified). + // Note that arrays in Swagger MUST have Items. Nonetheless, we analyze unrestricted arrays. + // + // NOTE: the spec package misses the distinction between: + // items: [] and items: {}, so we consider both arrays here. + a.IsArray = a.isArrayType() && (a.schema.Items == nil || a.schema.Items.Schemas == nil) + if a.IsArray && a.hasItems { + if a.schema.Items.Schema != nil { + itsch, err := Schema(SchemaOpts{ + Schema: a.schema.Items.Schema, + Root: a.root, + BasePath: a.basePath, + }) + if err != nil { + return err + } + a.IsSimpleArray = itsch.IsSimpleSchema + } + } + if a.IsArray && !a.hasItems { + a.IsSimpleArray = true + } + return nil +} + +func (a *AnalyzedSchema) inferTuple() { + tuple := a.hasItems && a.schema.Items.Schemas != nil + a.IsTuple = tuple && !a.hasAdditionalItems + a.IsTupleWithExtra = tuple && a.hasAdditionalItems +} + +func (a *AnalyzedSchema) inferBaseType() { + if a.isObjectType() { + a.IsBaseType = a.schema.Discriminator != "" + } +} + +func (a *AnalyzedSchema) inferEnum() { + a.IsEnum = len(a.schema.Enum) > 0 +} + +func (a *AnalyzedSchema) initializeFlags() { + a.hasProps = len(a.schema.Properties) > 0 + a.hasAllOf = len(a.schema.AllOf) > 0 + a.hasRef = a.schema.Ref.String() != "" + + a.hasItems = a.schema.Items != nil && + (a.schema.Items.Schema != nil || len(a.schema.Items.Schemas) > 0) + + a.hasAdditionalProps = a.schema.AdditionalProperties != nil && + (a.schema.AdditionalProperties != nil || a.schema.AdditionalProperties.Allows) + + a.hasAdditionalItems = a.schema.AdditionalItems != nil && + (a.schema.AdditionalItems.Schema != nil || a.schema.AdditionalItems.Allows) + +} + +func (a *AnalyzedSchema) isObjectType() bool { + return !a.hasRef && (a.schema.Type == nil || a.schema.Type.Contains("") || a.schema.Type.Contains("object")) +} + +func (a *AnalyzedSchema) isArrayType() bool { + return !a.hasRef && (a.schema.Type != nil && a.schema.Type.Contains("array")) +} diff --git a/vendor/github.com/go-openapi/errors/.travis.yml b/vendor/github.com/go-openapi/errors/.travis.yml index 57631a0b204..ba8a6d5918f 100644 --- a/vendor/github.com/go-openapi/errors/.travis.yml +++ b/vendor/github.com/go-openapi/errors/.travis.yml @@ -1,13 +1,12 @@ after_success: - bash <(curl -s https://codecov.io/bash) go: -- 1.13.x -- 1.14.x -arch: - - amd64 - - ppc64le +- 1.11.x +- 1.12.x install: - GO111MODULE=off go get -u gotest.tools/gotestsum +env: +- GO111MODULE=on language: go notifications: slack: diff --git a/vendor/github.com/go-openapi/errors/go.mod b/vendor/github.com/go-openapi/errors/go.mod index d7a9030ab65..084143001f0 100644 --- a/vendor/github.com/go-openapi/errors/go.mod +++ b/vendor/github.com/go-openapi/errors/go.mod @@ -1,12 +1,6 @@ module github.com/go-openapi/errors -go 1.14 - require ( - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/kr/text v0.2.0 // indirect - github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect - github.com/stretchr/testify v1.6.1 - gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect - gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c // indirect + github.com/stretchr/objx v0.2.0 // indirect + github.com/stretchr/testify v1.3.0 ) diff --git a/vendor/github.com/go-openapi/errors/go.sum b/vendor/github.com/go-openapi/errors/go.sum index ecd91c75b1d..e7314e279fb 100644 --- a/vendor/github.com/go-openapi/errors/go.sum +++ b/vendor/github.com/go-openapi/errors/go.sum @@ -1,26 +1,9 @@ -github.com/creack/pty v1.1.9 h1:uDmaGzcdjhF4i/plgjmEsriH11Y0o7RKapEf/LDaM3w= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/kr/pty v1.1.1 h1:VkoXIwSboBpnk99O/KFauAEILuNHv5DVFKZMBN/gUgw= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c h1:grhR+C34yXImVGp7EzNk+DTIk+323eIUWOmEevy6bDo= -gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= diff --git a/vendor/github.com/go-openapi/errors/parsing.go b/vendor/github.com/go-openapi/errors/parsing.go index 0f96ce2094e..1bae87302a6 100644 --- a/vendor/github.com/go-openapi/errors/parsing.go +++ b/vendor/github.com/go-openapi/errors/parsing.go @@ -16,7 +16,7 @@ package errors import "fmt" -// ParseError represents a parsing error +// ParseError respresents a parsing error type ParseError struct { code int32 Name string diff --git a/vendor/github.com/go-openapi/errors/schema.go b/vendor/github.com/go-openapi/errors/schema.go index f4a7d4ac239..14fb2c5f116 100644 --- a/vendor/github.com/go-openapi/errors/schema.go +++ b/vendor/github.com/go-openapi/errors/schema.go @@ -25,7 +25,6 @@ const ( typeFailWithData = "%s in %s must be of type %s: %q" typeFailWithError = "%s in %s must be of type %s, because: %s" requiredFail = "%s in %s is required" - readOnlyFail = "%s in %s is readOnly" tooLongMessage = "%s in %s should be at most %d chars long" tooShortMessage = "%s in %s should be at least %d chars long" patternFail = "%s in %s should match '%s'" @@ -42,7 +41,6 @@ const ( typeFailWithDataNoIn = "%s must be of type %s: %q" typeFailWithErrorNoIn = "%s must be of type %s, because: %s" requiredFailNoIn = "%s is required" - readOnlyFailNoIn = "%s is readOnly" tooLongMessageNoIn = "%s should be at most %d chars long" tooShortMessageNoIn = "%s should be at least %d chars long" patternFailNoIn = "%s should match '%s'" @@ -93,7 +91,6 @@ const ( UnallowedPropertyCode FailedAllPatternPropsCode MultipleOfMustBePositiveCode - ReadOnlyFailCode ) // CompositeError is an error that groups several errors together @@ -271,7 +268,7 @@ func DuplicateItems(name, in string) *Validation { } // TooManyItems error for when an array contains too many items -func TooManyItems(name, in string, max int64, value interface{}) *Validation { +func TooManyItems(name, in string, max int64) *Validation { msg := fmt.Sprintf(maxItemsFail, name, in, max) if in == "" { msg = fmt.Sprintf(maxItemsFailNoIn, name, max) @@ -281,13 +278,12 @@ func TooManyItems(name, in string, max int64, value interface{}) *Validation { code: MaxItemsFailCode, Name: name, In: in, - Value: value, message: msg, } } // TooFewItems error for when an array contains too few items -func TooFewItems(name, in string, min int64, value interface{}) *Validation { +func TooFewItems(name, in string, min int64) *Validation { msg := fmt.Sprintf(minItemsFail, name, in, min) if in == "" { msg = fmt.Sprintf(minItemsFailNoIn, name, min) @@ -296,13 +292,12 @@ func TooFewItems(name, in string, min int64, value interface{}) *Validation { code: MinItemsFailCode, Name: name, In: in, - Value: value, message: msg, } } -// ExceedsMaximumInt error for when maximum validation fails -func ExceedsMaximumInt(name, in string, max int64, exclusive bool, value interface{}) *Validation { +// ExceedsMaximumInt error for when maxinum validation fails +func ExceedsMaximumInt(name, in string, max int64, exclusive bool) *Validation { var message string if in == "" { m := maxIncFailNoIn @@ -321,13 +316,13 @@ func ExceedsMaximumInt(name, in string, max int64, exclusive bool, value interfa code: MaxFailCode, Name: name, In: in, - Value: value, + Value: max, message: message, } } -// ExceedsMaximumUint error for when maximum validation fails -func ExceedsMaximumUint(name, in string, max uint64, exclusive bool, value interface{}) *Validation { +// ExceedsMaximumUint error for when maxinum validation fails +func ExceedsMaximumUint(name, in string, max uint64, exclusive bool) *Validation { var message string if in == "" { m := maxIncFailNoIn @@ -346,13 +341,13 @@ func ExceedsMaximumUint(name, in string, max uint64, exclusive bool, value inter code: MaxFailCode, Name: name, In: in, - Value: value, + Value: max, message: message, } } -// ExceedsMaximum error for when maximum validation fails -func ExceedsMaximum(name, in string, max float64, exclusive bool, value interface{}) *Validation { +// ExceedsMaximum error for when maxinum validation fails +func ExceedsMaximum(name, in string, max float64, exclusive bool) *Validation { var message string if in == "" { m := maxIncFailNoIn @@ -371,13 +366,13 @@ func ExceedsMaximum(name, in string, max float64, exclusive bool, value interfac code: MaxFailCode, Name: name, In: in, - Value: value, + Value: max, message: message, } } -// ExceedsMinimumInt error for when minimum validation fails -func ExceedsMinimumInt(name, in string, min int64, exclusive bool, value interface{}) *Validation { +// ExceedsMinimumInt error for when maxinum validation fails +func ExceedsMinimumInt(name, in string, min int64, exclusive bool) *Validation { var message string if in == "" { m := minIncFailNoIn @@ -396,13 +391,13 @@ func ExceedsMinimumInt(name, in string, min int64, exclusive bool, value interfa code: MinFailCode, Name: name, In: in, - Value: value, + Value: min, message: message, } } -// ExceedsMinimumUint error for when minimum validation fails -func ExceedsMinimumUint(name, in string, min uint64, exclusive bool, value interface{}) *Validation { +// ExceedsMinimumUint error for when maxinum validation fails +func ExceedsMinimumUint(name, in string, min uint64, exclusive bool) *Validation { var message string if in == "" { m := minIncFailNoIn @@ -421,13 +416,13 @@ func ExceedsMinimumUint(name, in string, min uint64, exclusive bool, value inter code: MinFailCode, Name: name, In: in, - Value: value, + Value: min, message: message, } } -// ExceedsMinimum error for when minimum validation fails -func ExceedsMinimum(name, in string, min float64, exclusive bool, value interface{}) *Validation { +// ExceedsMinimum error for when maxinum validation fails +func ExceedsMinimum(name, in string, min float64, exclusive bool) *Validation { var message string if in == "" { m := minIncFailNoIn @@ -446,13 +441,13 @@ func ExceedsMinimum(name, in string, min float64, exclusive bool, value interfac code: MinFailCode, Name: name, In: in, - Value: value, + Value: min, message: message, } } // NotMultipleOf error for when multiple of validation fails -func NotMultipleOf(name, in string, multiple, value interface{}) *Validation { +func NotMultipleOf(name, in string, multiple interface{}) *Validation { var msg string if in == "" { msg = fmt.Sprintf(multipleOfFailNoIn, name, multiple) @@ -463,7 +458,7 @@ func NotMultipleOf(name, in string, multiple, value interface{}) *Validation { code: MultipleOfFailCode, Name: name, In: in, - Value: value, + Value: multiple, message: msg, } } @@ -488,7 +483,7 @@ func EnumFail(name, in string, value interface{}, values []interface{}) *Validat } // Required error for when a value is missing -func Required(name, in string, value interface{}) *Validation { +func Required(name, in string) *Validation { var msg string if in == "" { msg = fmt.Sprintf(requiredFailNoIn, name) @@ -499,30 +494,12 @@ func Required(name, in string, value interface{}) *Validation { code: RequiredFailCode, Name: name, In: in, - Value: value, - message: msg, - } -} - -// ReadOnly error for when a value is present in request -func ReadOnly(name, in string, value interface{}) *Validation { - var msg string - if in == "" { - msg = fmt.Sprintf(readOnlyFailNoIn, name) - } else { - msg = fmt.Sprintf(readOnlyFail, name, in) - } - return &Validation{ - code: ReadOnlyFailCode, - Name: name, - In: in, - Value: value, message: msg, } } // TooLong error for when a string is too long -func TooLong(name, in string, max int64, value interface{}) *Validation { +func TooLong(name, in string, max int64) *Validation { var msg string if in == "" { msg = fmt.Sprintf(tooLongMessageNoIn, name, max) @@ -533,13 +510,12 @@ func TooLong(name, in string, max int64, value interface{}) *Validation { code: TooLongFailCode, Name: name, In: in, - Value: value, message: msg, } } // TooShort error for when a string is too short -func TooShort(name, in string, min int64, value interface{}) *Validation { +func TooShort(name, in string, min int64) *Validation { var msg string if in == "" { msg = fmt.Sprintf(tooShortMessageNoIn, name, min) @@ -551,14 +527,13 @@ func TooShort(name, in string, min int64, value interface{}) *Validation { code: TooShortFailCode, Name: name, In: in, - Value: value, message: msg, } } // FailedPattern error for when a string fails a regex pattern match // the pattern that is returned is the ECMA syntax version of the pattern not the golang version. -func FailedPattern(name, in, pattern string, value interface{}) *Validation { +func FailedPattern(name, in, pattern string) *Validation { var msg string if in == "" { msg = fmt.Sprintf(patternFailNoIn, name, pattern) @@ -570,7 +545,6 @@ func FailedPattern(name, in, pattern string, value interface{}) *Validation { code: PatternFailCode, Name: name, In: in, - Value: value, message: msg, } } diff --git a/vendor/github.com/go-openapi/loads/.editorconfig b/vendor/github.com/go-openapi/loads/.editorconfig new file mode 100644 index 00000000000..3152da69a5d --- /dev/null +++ b/vendor/github.com/go-openapi/loads/.editorconfig @@ -0,0 +1,26 @@ +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true +indent_style = space +indent_size = 2 +trim_trailing_whitespace = true + +# Set default charset +[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] +charset = utf-8 + +# Tab indentation (no size specified) +[*.go] +indent_style = tab + +[*.md] +trim_trailing_whitespace = false + +# Matches the exact files either package.json or .travis.yml +[{package.json,.travis.yml}] +indent_style = space +indent_size = 2 diff --git a/vendor/github.com/go-openapi/loads/.gitignore b/vendor/github.com/go-openapi/loads/.gitignore new file mode 100644 index 00000000000..e4f15f17bfc --- /dev/null +++ b/vendor/github.com/go-openapi/loads/.gitignore @@ -0,0 +1,4 @@ +secrets.yml +coverage.out +profile.cov +profile.out diff --git a/vendor/github.com/go-openapi/loads/.golangci.yml b/vendor/github.com/go-openapi/loads/.golangci.yml new file mode 100644 index 00000000000..1932914e6d1 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/.golangci.yml @@ -0,0 +1,22 @@ +linters-settings: + govet: + check-shadowing: true + golint: + min-confidence: 0 + gocyclo: + min-complexity: 30 + maligned: + suggest-new: true + dupl: + threshold: 100 + goconst: + min-len: 2 + min-occurrences: 4 + +linters: + enable-all: true + disable: + - maligned + - lll + - gochecknoglobals + - gochecknoinits diff --git a/vendor/github.com/go-openapi/loads/.travis.yml b/vendor/github.com/go-openapi/loads/.travis.yml new file mode 100644 index 00000000000..8a7e05d911c --- /dev/null +++ b/vendor/github.com/go-openapi/loads/.travis.yml @@ -0,0 +1,15 @@ +after_success: +- bash <(curl -s https://codecov.io/bash) +go: +- 1.11.x +- 1.12.x +install: +- GO111MODULE=off go get -u gotest.tools/gotestsum +env: +- GO111MODULE=on +language: go +notifications: + slack: + secure: OxkPwVp35qBTUilgWC8xykSj+sGMcj0h8IIOKD+Rflx2schZVlFfdYdyVBM+s9OqeOfvtuvnR9v1Ye2rPKAvcjWdC4LpRGUsgmItZaI6Um8Aj6+K9udCw5qrtZVfOVmRu8LieH//XznWWKdOultUuniW0MLqw5+II87Gd00RWbCGi0hk0PykHe7uK+PDA2BEbqyZ2WKKYCvfB3j+0nrFOHScXqnh0V05l2E83J4+Sgy1fsPy+1WdX58ZlNBG333ibaC1FS79XvKSmTgKRkx3+YBo97u6ZtUmJa5WZjf2OdLG3KIckGWAv6R5xgxeU31N0Ng8L332w/Edpp2O/M2bZwdnKJ8hJQikXIAQbICbr+lTDzsoNzMdEIYcHpJ5hjPbiUl3Bmd+Jnsjf5McgAZDiWIfpCKZ29tPCEkVwRsOCqkyPRMNMzHHmoja495P5jR+ODS7+J8RFg5xgcnOgpP9D4Wlhztlf5WyZMpkLxTUD+bZq2SRf50HfHFXTkfq22zPl3d1eq0yrLwh/Z/fWKkfb6SyysROL8y6s8u3dpFX1YHSg0BR6i913h4aoZw9B2BG27cafLLTwKYsp2dFo1PWl4O6u9giFJIeqwloZHLKKrwh0cBFhB7RH0I58asxkZpCH6uWjJierahmHe7iS+E6i+9oCHkOZ59hmCYNimIs3hM= +script: +- gotestsum -f short-verbose -- -race -timeout=20m -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md new file mode 100644 index 00000000000..9322b065e37 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/loads/LICENSE b/vendor/github.com/go-openapi/loads/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/loads/README.md b/vendor/github.com/go-openapi/loads/README.md new file mode 100644 index 00000000000..071cf69ab97 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/README.md @@ -0,0 +1,7 @@ +# Loads OAI specs [![Build Status](https://travis-ci.org/go-openapi/loads.svg?branch=master)](https://travis-ci.org/go-openapi/loads) [![codecov](https://codecov.io/gh/go-openapi/loads/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/loads) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) + +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/loads/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/loads?status.svg)](http://godoc.org/github.com/go-openapi/loads) +[![GolangCI](https://golangci.com/badges/github.com/go-openapi/loads.svg)](https://golangci.com) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/loads)](https://goreportcard.com/report/github.com/go-openapi/loads) + +Loading of OAI specification documents from local or remote locations. Supports JSON and YAML documents. diff --git a/vendor/github.com/go-openapi/loads/doc.go b/vendor/github.com/go-openapi/loads/doc.go new file mode 100644 index 00000000000..3046da4cef3 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/doc.go @@ -0,0 +1,21 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package loads provides document loading methods for swagger (OAI) specifications. + +It is used by other go-openapi packages to load and run analysis on local or remote spec documents. + +*/ +package loads diff --git a/vendor/github.com/go-openapi/loads/go.mod b/vendor/github.com/go-openapi/loads/go.mod new file mode 100644 index 00000000000..8cf62326f6c --- /dev/null +++ b/vendor/github.com/go-openapi/loads/go.mod @@ -0,0 +1,11 @@ +module github.com/go-openapi/loads + +require ( + github.com/go-openapi/analysis v0.19.5 + github.com/go-openapi/spec v0.19.3 + github.com/go-openapi/swag v0.19.5 + github.com/stretchr/testify v1.3.0 + gopkg.in/yaml.v2 v2.2.4 +) + +go 1.13 diff --git a/vendor/github.com/go-openapi/loads/go.sum b/vendor/github.com/go-openapi/loads/go.sum new file mode 100644 index 00000000000..6eebff99d9c --- /dev/null +++ b/vendor/github.com/go-openapi/loads/go.sum @@ -0,0 +1,98 @@ +github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= +github.com/go-openapi/analysis v0.19.5 h1:8b2ZgKfKIUTVQpTb77MoRDIMEIwvDVw40o3aOXdfYzI= +github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= +github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.19.2 h1:a2kIyV3w+OS3S97zxUndRVD46+FhGOUBDFY7nmu4CsY= +github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.19.2 h1:A9+F4Dc/MCNB5jibxf6rRvOvR/iFgQdyNx9eIhnGqq0= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.19.2 h1:o20suLFB4Ri0tuzpWtyHlh7E7HnkqTNLq6aR6WVNS1w= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.19.3 h1:0XRyw8kguri6Yw4SxhsQA/atC88yqrk0+G4YhI2wabc= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.19.3 h1:eRfyY5SkaNJCAwmmMcADjY31ow9+N7MCLW7oRkbsINA= +github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= +github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.19.2 h1:jvO6bCMBEilGwMfHhrd61zIID4oIFdwb76V17SM88dE= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63 h1:nTT4s92Dgz2HlrB2NaMgvlfqHH39OgMhA7z3PK7PGD4= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e h1:hB2xlXdHp/pmPZq0y3QnmWAArdw9PqbmotexnWx/FU8= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +go.mongodb.org/mongo-driver v1.0.3 h1:GKoji1ld3tw2aC+GX1wbr/J2fX13yNacEYoJ8Nhr0yU= +go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.1.1 h1:Sq1fR+0c58RME5EoqKdjkiQAmPjmfHlZOoRI6fTUOcs= +go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 h1:dfGZHvZk057jK2MCeWus/TowKpJ8y4AmooUzdBSR9GU= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 h1:k7pJ2yAPLPgbskkFdhRCsA77k2fySZ1zf2zCjvQCiIM= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/go-openapi/loads/spec.go b/vendor/github.com/go-openapi/loads/spec.go new file mode 100644 index 00000000000..e4b4a3cf763 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/spec.go @@ -0,0 +1,298 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package loads + +import ( + "bytes" + "encoding/gob" + "encoding/json" + "fmt" + "net/url" + + "github.com/go-openapi/analysis" + "github.com/go-openapi/spec" + "github.com/go-openapi/swag" +) + +// JSONDoc loads a json document from either a file or a remote url +func JSONDoc(path string) (json.RawMessage, error) { + data, err := swag.LoadFromFileOrHTTP(path) + if err != nil { + return nil, err + } + return json.RawMessage(data), nil +} + +// DocLoader represents a doc loader type +type DocLoader func(string) (json.RawMessage, error) + +// DocMatcher represents a predicate to check if a loader matches +type DocMatcher func(string) bool + +var ( + loaders *loader + defaultLoader *loader +) + +func init() { + defaultLoader = &loader{Match: func(_ string) bool { return true }, Fn: JSONDoc} + loaders = defaultLoader + spec.PathLoader = loaders.Fn + AddLoader(swag.YAMLMatcher, swag.YAMLDoc) + + gob.Register(map[string]interface{}{}) + gob.Register([]interface{}{}) + //gob.Register(spec.Refable{}) +} + +// AddLoader for a document +func AddLoader(predicate DocMatcher, load DocLoader) { + prev := loaders + loaders = &loader{ + Match: predicate, + Fn: load, + Next: prev, + } + spec.PathLoader = loaders.Fn +} + +type loader struct { + Fn DocLoader + Match DocMatcher + Next *loader +} + +// JSONSpec loads a spec from a json document +func JSONSpec(path string) (*Document, error) { + data, err := JSONDoc(path) + if err != nil { + return nil, err + } + // convert to json + return Analyzed(data, "") +} + +// Document represents a swagger spec document +type Document struct { + // specAnalyzer + Analyzer *analysis.Spec + spec *spec.Swagger + specFilePath string + origSpec *spec.Swagger + schema *spec.Schema + raw json.RawMessage +} + +// Embedded returns a Document based on embedded specs. No analysis is required +func Embedded(orig, flat json.RawMessage) (*Document, error) { + var origSpec, flatSpec spec.Swagger + if err := json.Unmarshal(orig, &origSpec); err != nil { + return nil, err + } + if err := json.Unmarshal(flat, &flatSpec); err != nil { + return nil, err + } + return &Document{ + raw: orig, + origSpec: &origSpec, + spec: &flatSpec, + }, nil +} + +// Spec loads a new spec document +func Spec(path string) (*Document, error) { + specURL, err := url.Parse(path) + if err != nil { + return nil, err + } + var lastErr error + for l := loaders.Next; l != nil; l = l.Next { + if loaders.Match(specURL.Path) { + b, err2 := loaders.Fn(path) + if err2 != nil { + lastErr = err2 + continue + } + doc, err3 := Analyzed(b, "") + if err3 != nil { + return nil, err3 + } + if doc != nil { + doc.specFilePath = path + } + return doc, nil + } + } + if lastErr != nil { + return nil, lastErr + } + b, err := defaultLoader.Fn(path) + if err != nil { + return nil, err + } + + document, err := Analyzed(b, "") + if document != nil { + document.specFilePath = path + } + + return document, err +} + +// Analyzed creates a new analyzed spec document +func Analyzed(data json.RawMessage, version string) (*Document, error) { + if version == "" { + version = "2.0" + } + if version != "2.0" { + return nil, fmt.Errorf("spec version %q is not supported", version) + } + + raw := data + trimmed := bytes.TrimSpace(data) + if len(trimmed) > 0 { + if trimmed[0] != '{' && trimmed[0] != '[' { + yml, err := swag.BytesToYAMLDoc(trimmed) + if err != nil { + return nil, fmt.Errorf("analyzed: %v", err) + } + d, err := swag.YAMLToJSON(yml) + if err != nil { + return nil, fmt.Errorf("analyzed: %v", err) + } + raw = d + } + } + + swspec := new(spec.Swagger) + if err := json.Unmarshal(raw, swspec); err != nil { + return nil, err + } + + origsqspec, err := cloneSpec(swspec) + if err != nil { + return nil, err + } + + d := &Document{ + Analyzer: analysis.New(swspec), + schema: spec.MustLoadSwagger20Schema(), + spec: swspec, + raw: raw, + origSpec: origsqspec, + } + return d, nil +} + +// Expanded expands the ref fields in the spec document and returns a new spec document +func (d *Document) Expanded(options ...*spec.ExpandOptions) (*Document, error) { + swspec := new(spec.Swagger) + if err := json.Unmarshal(d.raw, swspec); err != nil { + return nil, err + } + + var expandOptions *spec.ExpandOptions + if len(options) > 0 { + expandOptions = options[0] + } else { + expandOptions = &spec.ExpandOptions{ + RelativeBase: d.specFilePath, + } + } + + if err := spec.ExpandSpec(swspec, expandOptions); err != nil { + return nil, err + } + + dd := &Document{ + Analyzer: analysis.New(swspec), + spec: swspec, + specFilePath: d.specFilePath, + schema: spec.MustLoadSwagger20Schema(), + raw: d.raw, + origSpec: d.origSpec, + } + return dd, nil +} + +// BasePath the base path for this spec +func (d *Document) BasePath() string { + return d.spec.BasePath +} + +// Version returns the version of this spec +func (d *Document) Version() string { + return d.spec.Swagger +} + +// Schema returns the swagger 2.0 schema +func (d *Document) Schema() *spec.Schema { + return d.schema +} + +// Spec returns the swagger spec object model +func (d *Document) Spec() *spec.Swagger { + return d.spec +} + +// Host returns the host for the API +func (d *Document) Host() string { + return d.spec.Host +} + +// Raw returns the raw swagger spec as json bytes +func (d *Document) Raw() json.RawMessage { + return d.raw +} + +// OrigSpec yields the original spec +func (d *Document) OrigSpec() *spec.Swagger { + return d.origSpec +} + +// ResetDefinitions gives a shallow copy with the models reset +func (d *Document) ResetDefinitions() *Document { + defs := make(map[string]spec.Schema, len(d.origSpec.Definitions)) + for k, v := range d.origSpec.Definitions { + defs[k] = v + } + + d.spec.Definitions = defs + return d +} + +// Pristine creates a new pristine document instance based on the input data +func (d *Document) Pristine() *Document { + dd, _ := Analyzed(d.Raw(), d.Version()) + return dd +} + +// SpecFilePath returns the file path of the spec if one is defined +func (d *Document) SpecFilePath() string { + return d.specFilePath +} + +func cloneSpec(src *spec.Swagger) (*spec.Swagger, error) { + var b bytes.Buffer + if err := gob.NewEncoder(&b).Encode(src); err != nil { + return nil, err + } + + var dst spec.Swagger + if err := gob.NewDecoder(&b).Decode(&dst); err != nil { + return nil, err + } + return &dst, nil +} diff --git a/vendor/github.com/go-openapi/runtime/.editorconfig b/vendor/github.com/go-openapi/runtime/.editorconfig new file mode 100644 index 00000000000..3152da69a5d --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/.editorconfig @@ -0,0 +1,26 @@ +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true +indent_style = space +indent_size = 2 +trim_trailing_whitespace = true + +# Set default charset +[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] +charset = utf-8 + +# Tab indentation (no size specified) +[*.go] +indent_style = tab + +[*.md] +trim_trailing_whitespace = false + +# Matches the exact files either package.json or .travis.yml +[{package.json,.travis.yml}] +indent_style = space +indent_size = 2 diff --git a/vendor/github.com/go-openapi/runtime/.gitignore b/vendor/github.com/go-openapi/runtime/.gitignore new file mode 100644 index 00000000000..fea8b84eca9 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/.gitignore @@ -0,0 +1,5 @@ +secrets.yml +coverage.out +*.cov +*.out +playground diff --git a/vendor/github.com/go-openapi/runtime/.travis.yml b/vendor/github.com/go-openapi/runtime/.travis.yml new file mode 100644 index 00000000000..2fc7b58ff20 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/.travis.yml @@ -0,0 +1,15 @@ +after_success: +- bash <(curl -s https://codecov.io/bash) +go: +- 1.11.x +- 1.12.x +install: +- GO111MODULE=off go get -u gotest.tools/gotestsum +env: +- GO111MODULE=on +language: go +notifications: + slack: + secure: EmObnQuM9Mw8J9vpFaKKHqSMN4Wsr/A9+v7ewAD5cEhA0T1P4m7MbJMiJOhxUhj/X+BFh2DamW+P2lT8mybj5wg8wnkQ2BteKA8Tawi6f9PRw2NRheO8tAi8o/npLnlmet0kc93mn+oLuqHw36w4+j5mkOl2FghkfGiUVhwrhkCP7KXQN+3TU87e+/HzQumlJ3nsE+6terVxkH3PmaUTsS5ONaODZfuxFpfb7RsoEl3skHf6d+tr+1nViLxxly7558Nc33C+W1mr0qiEvMLZ+kJ/CpGWBJ6CUJM3jm6hNe2eMuIPwEK2hxZob8c7n22VPap4K6a0bBRoydoDXaba+2sD7Ym6ivDO/DVyL44VeBBLyIiIBylDGQdZH+6SoWm90Qe/i7tnY/T5Ao5igT8f3cfQY1c3EsTfqmlDfrhmACBmwSlgkdVBLTprHL63JMY24LWmh4jhxsmMRZhCL4dze8su1w6pLN/pD1pGHtKYCEVbdTmaM3PblNRFf12XB7qosmQsgUndH4Vq3bTbU0s1pKjeDhRyLvFzvR0TBbo0pDLEoF1A/i5GVFWa7yLZNUDudQERRh7qv/xBl2excIaQ1sV4DSVm7bAE9l6Kp+yeHQJW2uN6Y3X8wu9gB9nv9l5HBze7wh8KE6PyWAOLYYqZg9/sAtsv/2GcQqXcKFF1zcA= +script: +- gotestsum -f short-verbose -- -race -timeout=20m -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/runtime/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/runtime/CODE_OF_CONDUCT.md new file mode 100644 index 00000000000..9322b065e37 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/runtime/LICENSE b/vendor/github.com/go-openapi/runtime/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/runtime/README.md b/vendor/github.com/go-openapi/runtime/README.md new file mode 100644 index 00000000000..5b1ec649454 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/README.md @@ -0,0 +1,7 @@ +# runtime [![Build Status](https://travis-ci.org/go-openapi/runtime.svg?branch=client-context)](https://travis-ci.org/go-openapi/runtime) [![codecov](https://codecov.io/gh/go-openapi/runtime/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/runtime) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) + +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/runtime/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/runtime?status.svg)](http://godoc.org/github.com/go-openapi/runtime) + +# golang Open-API toolkit - runtime + +The runtime component for use in codegeneration or as untyped usage. diff --git a/vendor/github.com/go-openapi/runtime/bytestream.go b/vendor/github.com/go-openapi/runtime/bytestream.go new file mode 100644 index 00000000000..4459025b927 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/bytestream.go @@ -0,0 +1,155 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "bytes" + "encoding" + "errors" + "fmt" + "io" + "reflect" + + "github.com/go-openapi/swag" +) + +func defaultCloser() error { return nil } + +type byteStreamOpt func(opts *byteStreamOpts) + +// ClosesStream when the bytestream consumer or producer is finished +func ClosesStream(opts *byteStreamOpts) { + opts.Close = true +} + +type byteStreamOpts struct { + Close bool +} + +// ByteStreamConsumer creates a consmer for byte streams, +// takes a Writer/BinaryUnmarshaler interface or binary slice by reference, +// and reads from the provided reader +func ByteStreamConsumer(opts ...byteStreamOpt) Consumer { + var vals byteStreamOpts + for _, opt := range opts { + opt(&vals) + } + + return ConsumerFunc(func(reader io.Reader, data interface{}) error { + if reader == nil { + return errors.New("ByteStreamConsumer requires a reader") // early exit + } + + close := defaultCloser + if vals.Close { + if cl, ok := reader.(io.Closer); ok { + close = cl.Close + } + } + defer close() + + if wrtr, ok := data.(io.Writer); ok { + _, err := io.Copy(wrtr, reader) + return err + } + + buf := new(bytes.Buffer) + _, err := buf.ReadFrom(reader) + if err != nil { + return err + } + b := buf.Bytes() + + if bu, ok := data.(encoding.BinaryUnmarshaler); ok { + return bu.UnmarshalBinary(b) + } + + if t := reflect.TypeOf(data); data != nil && t.Kind() == reflect.Ptr { + v := reflect.Indirect(reflect.ValueOf(data)) + if t = v.Type(); t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8 { + v.SetBytes(b) + return nil + } + } + + return fmt.Errorf("%v (%T) is not supported by the ByteStreamConsumer, %s", + data, data, "can be resolved by supporting Writer/BinaryUnmarshaler interface") + }) +} + +// ByteStreamProducer creates a producer for byte streams, +// takes a Reader/BinaryMarshaler interface or binary slice, +// and writes to a writer (essentially a pipe) +func ByteStreamProducer(opts ...byteStreamOpt) Producer { + var vals byteStreamOpts + for _, opt := range opts { + opt(&vals) + } + return ProducerFunc(func(writer io.Writer, data interface{}) error { + if writer == nil { + return errors.New("ByteStreamProducer requires a writer") // early exit + } + close := defaultCloser + if vals.Close { + if cl, ok := writer.(io.Closer); ok { + close = cl.Close + } + } + defer close() + + if rc, ok := data.(io.ReadCloser); ok { + defer rc.Close() + } + + if rdr, ok := data.(io.Reader); ok { + _, err := io.Copy(writer, rdr) + return err + } + + if bm, ok := data.(encoding.BinaryMarshaler); ok { + bytes, err := bm.MarshalBinary() + if err != nil { + return err + } + + _, err = writer.Write(bytes) + return err + } + + if data != nil { + if e, ok := data.(error); ok { + _, err := writer.Write([]byte(e.Error())) + return err + } + + v := reflect.Indirect(reflect.ValueOf(data)) + if t := v.Type(); t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8 { + _, err := writer.Write(v.Bytes()) + return err + } + if t := v.Type(); t.Kind() == reflect.Struct || t.Kind() == reflect.Slice { + b, err := swag.WriteJSON(data) + if err != nil { + return err + } + _, err = writer.Write(b) + return err + } + } + + return fmt.Errorf("%v (%T) is not supported by the ByteStreamProducer, %s", + data, data, "can be resolved by supporting Reader/BinaryMarshaler interface") + }) +} diff --git a/vendor/github.com/go-openapi/runtime/client/auth_info.go b/vendor/github.com/go-openapi/runtime/client/auth_info.go new file mode 100644 index 00000000000..bbe1479c35d --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/client/auth_info.go @@ -0,0 +1,61 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "encoding/base64" + + "github.com/go-openapi/strfmt" + + "github.com/go-openapi/runtime" +) + +// PassThroughAuth never manipulates the request +var PassThroughAuth runtime.ClientAuthInfoWriter + +func init() { + PassThroughAuth = runtime.ClientAuthInfoWriterFunc(func(_ runtime.ClientRequest, _ strfmt.Registry) error { return nil }) +} + +// BasicAuth provides a basic auth info writer +func BasicAuth(username, password string) runtime.ClientAuthInfoWriter { + return runtime.ClientAuthInfoWriterFunc(func(r runtime.ClientRequest, _ strfmt.Registry) error { + encoded := base64.StdEncoding.EncodeToString([]byte(username + ":" + password)) + return r.SetHeaderParam("Authorization", "Basic "+encoded) + }) +} + +// APIKeyAuth provides an API key auth info writer +func APIKeyAuth(name, in, value string) runtime.ClientAuthInfoWriter { + if in == "query" { + return runtime.ClientAuthInfoWriterFunc(func(r runtime.ClientRequest, _ strfmt.Registry) error { + return r.SetQueryParam(name, value) + }) + } + + if in == "header" { + return runtime.ClientAuthInfoWriterFunc(func(r runtime.ClientRequest, _ strfmt.Registry) error { + return r.SetHeaderParam(name, value) + }) + } + return nil +} + +// BearerToken provides a header based oauth2 bearer access token auth info writer +func BearerToken(token string) runtime.ClientAuthInfoWriter { + return runtime.ClientAuthInfoWriterFunc(func(r runtime.ClientRequest, _ strfmt.Registry) error { + return r.SetHeaderParam("Authorization", "Bearer "+token) + }) +} diff --git a/vendor/github.com/go-openapi/runtime/client/keepalive.go b/vendor/github.com/go-openapi/runtime/client/keepalive.go new file mode 100644 index 00000000000..f83254515ba --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/client/keepalive.go @@ -0,0 +1,53 @@ +package client + +import ( + "io" + "io/ioutil" + "net/http" + "sync/atomic" +) + +// KeepAliveTransport drains the remaining body from a response +// so that go will reuse the TCP connections. +// This is not enabled by default because there are servers where +// the response never gets closed and that would make the code hang forever. +// So instead it's provided as a http client middleware that can be used to override +// any request. +func KeepAliveTransport(rt http.RoundTripper) http.RoundTripper { + return &keepAliveTransport{wrapped: rt} +} + +type keepAliveTransport struct { + wrapped http.RoundTripper +} + +func (k *keepAliveTransport) RoundTrip(r *http.Request) (*http.Response, error) { + resp, err := k.wrapped.RoundTrip(r) + if err != nil { + return resp, err + } + resp.Body = &drainingReadCloser{rdr: resp.Body} + return resp, nil +} + +type drainingReadCloser struct { + rdr io.ReadCloser + seenEOF uint32 +} + +func (d *drainingReadCloser) Read(p []byte) (n int, err error) { + n, err = d.rdr.Read(p) + if err == io.EOF || n == 0 { + atomic.StoreUint32(&d.seenEOF, 1) + } + return +} + +func (d *drainingReadCloser) Close() error { + // drain buffer + if atomic.LoadUint32(&d.seenEOF) != 1 { + //#nosec + io.Copy(ioutil.Discard, d.rdr) + } + return d.rdr.Close() +} diff --git a/vendor/github.com/go-openapi/runtime/client/request.go b/vendor/github.com/go-openapi/runtime/client/request.go new file mode 100644 index 00000000000..b7ea8b811b5 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/client/request.go @@ -0,0 +1,431 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "bytes" + "fmt" + "io" + "log" + "mime/multipart" + "net/http" + "net/textproto" + "net/url" + "os" + "path" + "path/filepath" + "strings" + "time" + + "github.com/go-openapi/strfmt" + + "github.com/go-openapi/runtime" +) + +// NewRequest creates a new swagger http client request +func newRequest(method, pathPattern string, writer runtime.ClientRequestWriter) (*request, error) { + return &request{ + pathPattern: pathPattern, + method: method, + writer: writer, + header: make(http.Header), + query: make(url.Values), + timeout: DefaultTimeout, + getBody: getRequestBuffer, + }, nil +} + +// Request represents a swagger client request. +// +// This Request struct converts to a HTTP request. +// There might be others that convert to other transports. +// There is no error checking here, it is assumed to be used after a spec has been validated. +// so impossible combinations should not arise (hopefully). +// +// The main purpose of this struct is to hide the machinery of adding params to a transport request. +// The generated code only implements what is necessary to turn a param into a valid value for these methods. +type request struct { + pathPattern string + method string + writer runtime.ClientRequestWriter + + pathParams map[string]string + header http.Header + query url.Values + formFields url.Values + fileFields map[string][]runtime.NamedReadCloser + payload interface{} + timeout time.Duration + buf *bytes.Buffer + + getBody func(r *request) []byte +} + +var ( + // ensure interface compliance + _ runtime.ClientRequest = new(request) +) + +func (r *request) isMultipart(mediaType string) bool { + if len(r.fileFields) > 0 { + return true + } + + return runtime.MultipartFormMime == mediaType +} + +// BuildHTTP creates a new http request based on the data from the params +func (r *request) BuildHTTP(mediaType, basePath string, producers map[string]runtime.Producer, registry strfmt.Registry) (*http.Request, error) { + return r.buildHTTP(mediaType, basePath, producers, registry, nil) +} +func escapeQuotes(s string) string { + return strings.NewReplacer("\\", "\\\\", `"`, "\\\"").Replace(s) +} +func (r *request) buildHTTP(mediaType, basePath string, producers map[string]runtime.Producer, registry strfmt.Registry, auth runtime.ClientAuthInfoWriter) (*http.Request, error) { + // build the data + if err := r.writer.WriteToRequest(r, registry); err != nil { + return nil, err + } + + // Our body must be an io.Reader. + // When we create the http.Request, if we pass it a + // bytes.Buffer then it will wrap it in an io.ReadCloser + // and set the content length automatically. + var body io.Reader + var pr *io.PipeReader + var pw *io.PipeWriter + + r.buf = bytes.NewBuffer(nil) + if r.payload != nil || len(r.formFields) > 0 || len(r.fileFields) > 0 { + body = r.buf + if r.isMultipart(mediaType) { + pr, pw = io.Pipe() + body = pr + } + } + + // check if this is a form type request + if len(r.formFields) > 0 || len(r.fileFields) > 0 { + if !r.isMultipart(mediaType) { + r.header.Set(runtime.HeaderContentType, mediaType) + formString := r.formFields.Encode() + r.buf.WriteString(formString) + goto DoneChoosingBodySource + } + + mp := multipart.NewWriter(pw) + r.header.Set(runtime.HeaderContentType, mangleContentType(mediaType, mp.Boundary())) + + go func() { + defer func() { + mp.Close() + pw.Close() + }() + + for fn, v := range r.formFields { + for _, vi := range v { + if err := mp.WriteField(fn, vi); err != nil { + pw.CloseWithError(err) + log.Println(err) + } + } + } + + defer func() { + for _, ff := range r.fileFields { + for _, ffi := range ff { + ffi.Close() + } + } + }() + for fn, f := range r.fileFields { + for _, fi := range f { + buf := bytes.NewBuffer([]byte{}) + + // Need to read the data so that we can detect the content type + _, err := io.Copy(buf, fi) + if err != nil { + _ = pw.CloseWithError(err) + log.Println(err) + } + fileBytes := buf.Bytes() + fileContentType := http.DetectContentType(fileBytes) + + newFi := runtime.NamedReader(fi.Name(), buf) + + // Create the MIME headers for the new part + h := make(textproto.MIMEHeader) + h.Set("Content-Disposition", + fmt.Sprintf(`form-data; name="%s"; filename="%s"`, + escapeQuotes(fn), escapeQuotes(filepath.Base(fi.Name())))) + h.Set("Content-Type", fileContentType) + + wrtr, err := mp.CreatePart(h) + if err != nil { + pw.CloseWithError(err) + log.Println(err) + } else if _, err := io.Copy(wrtr, newFi); err != nil { + pw.CloseWithError(err) + log.Println(err) + } + } + } + }() + + goto DoneChoosingBodySource + } + + // if there is payload, use the producer to write the payload, and then + // set the header to the content-type appropriate for the payload produced + if r.payload != nil { + // TODO: infer most appropriate content type based on the producer used, + // and the `consumers` section of the spec/operation + r.header.Set(runtime.HeaderContentType, mediaType) + if rdr, ok := r.payload.(io.ReadCloser); ok { + body = rdr + goto DoneChoosingBodySource + } + + if rdr, ok := r.payload.(io.Reader); ok { + body = rdr + goto DoneChoosingBodySource + } + + producer := producers[mediaType] + if err := producer.Produce(r.buf, r.payload); err != nil { + return nil, err + } + } + +DoneChoosingBodySource: + + if runtime.CanHaveBody(r.method) && body == nil && r.header.Get(runtime.HeaderContentType) == "" { + r.header.Set(runtime.HeaderContentType, mediaType) + } + + if auth != nil { + // If we're not using r.buf as our http.Request's body, + // either the payload is an io.Reader or io.ReadCloser, + // or we're doing a multipart form/file. + // + // In those cases, if the AuthenticateRequest call asks for the body, + // we must read it into a buffer and provide that, then use that buffer + // as the body of our http.Request. + // + // This is done in-line with the GetBody() request rather than ahead + // of time, because there's no way to know if the AuthenticateRequest + // will even ask for the body of the request. + // + // If for some reason the copy fails, there's no way to return that + // error to the GetBody() call, so return it afterwards. + // + // An error from the copy action is prioritized over any error + // from the AuthenticateRequest call, because the mis-read + // body may have interfered with the auth. + // + var copyErr error + if buf, ok := body.(*bytes.Buffer); body != nil && (!ok || buf != r.buf) { + var copied bool + r.getBody = func(r *request) []byte { + if copied { + return getRequestBuffer(r) + } + + defer func() { + copied = true + }() + + if _, copyErr = io.Copy(r.buf, body); copyErr != nil { + return nil + } + + if closer, ok := body.(io.ReadCloser); ok { + if copyErr = closer.Close(); copyErr != nil { + return nil + } + } + + body = r.buf + return getRequestBuffer(r) + } + } + + authErr := auth.AuthenticateRequest(r, registry) + + if copyErr != nil { + return nil, fmt.Errorf("error retrieving the response body: %v", copyErr) + } + + if authErr != nil { + return nil, authErr + } + } + + // create http request + var reinstateSlash bool + if r.pathPattern != "" && r.pathPattern != "/" && r.pathPattern[len(r.pathPattern)-1] == '/' { + reinstateSlash = true + } + urlPath := path.Join(basePath, r.pathPattern) + for k, v := range r.pathParams { + urlPath = strings.Replace(urlPath, "{"+k+"}", url.PathEscape(v), -1) + } + if reinstateSlash { + urlPath = urlPath + "/" + } + + req, err := http.NewRequest(r.method, urlPath, body) + if err != nil { + return nil, err + } + + req.URL.RawQuery = r.query.Encode() + req.Header = r.header + + return req, nil +} + +func mangleContentType(mediaType, boundary string) string { + if strings.ToLower(mediaType) == runtime.URLencodedFormMime { + return fmt.Sprintf("%s; boundary=%s", mediaType, boundary) + } + return "multipart/form-data; boundary=" + boundary +} + +func (r *request) GetMethod() string { + return r.method +} + +func (r *request) GetPath() string { + path := r.pathPattern + for k, v := range r.pathParams { + path = strings.Replace(path, "{"+k+"}", v, -1) + } + return path +} + +func (r *request) GetBody() []byte { + return r.getBody(r) +} + +func getRequestBuffer(r *request) []byte { + if r.buf == nil { + return nil + } + return r.buf.Bytes() +} + +// SetHeaderParam adds a header param to the request +// when there is only 1 value provided for the varargs, it will set it. +// when there are several values provided for the varargs it will add it (no overriding) +func (r *request) SetHeaderParam(name string, values ...string) error { + if r.header == nil { + r.header = make(http.Header) + } + r.header[http.CanonicalHeaderKey(name)] = values + return nil +} + +// GetHeaderParams returns the all headers currently set for the request +func (r *request) GetHeaderParams() http.Header { + return r.header +} + +// SetQueryParam adds a query param to the request +// when there is only 1 value provided for the varargs, it will set it. +// when there are several values provided for the varargs it will add it (no overriding) +func (r *request) SetQueryParam(name string, values ...string) error { + if r.query == nil { + r.query = make(url.Values) + } + r.query[name] = values + return nil +} + +// GetQueryParams returns a copy of all query params currently set for the request +func (r *request) GetQueryParams() url.Values { + var result = make(url.Values) + for key, value := range r.query { + result[key] = append([]string{}, value...) + } + return result +} + +// SetFormParam adds a forn param to the request +// when there is only 1 value provided for the varargs, it will set it. +// when there are several values provided for the varargs it will add it (no overriding) +func (r *request) SetFormParam(name string, values ...string) error { + if r.formFields == nil { + r.formFields = make(url.Values) + } + r.formFields[name] = values + return nil +} + +// SetPathParam adds a path param to the request +func (r *request) SetPathParam(name string, value string) error { + if r.pathParams == nil { + r.pathParams = make(map[string]string) + } + + r.pathParams[name] = value + return nil +} + +// SetFileParam adds a file param to the request +func (r *request) SetFileParam(name string, files ...runtime.NamedReadCloser) error { + for _, file := range files { + if actualFile, ok := file.(*os.File); ok { + fi, err := os.Stat(actualFile.Name()) + if err != nil { + return err + } + if fi.IsDir() { + return fmt.Errorf("%q is a directory, only files are supported", file.Name()) + } + } + } + + if r.fileFields == nil { + r.fileFields = make(map[string][]runtime.NamedReadCloser) + } + if r.formFields == nil { + r.formFields = make(url.Values) + } + + r.fileFields[name] = files + return nil +} + +func (r *request) GetFileParam() map[string][]runtime.NamedReadCloser { + return r.fileFields +} + +// SetBodyParam sets a body parameter on the request. +// This does not yet serialze the object, this happens as late as possible. +func (r *request) SetBodyParam(payload interface{}) error { + r.payload = payload + return nil +} + +func (r *request) GetBodyParam() interface{} { + return r.payload +} + +// SetTimeout sets the timeout for a request +func (r *request) SetTimeout(timeout time.Duration) error { + r.timeout = timeout + return nil +} diff --git a/vendor/github.com/go-openapi/runtime/client/response.go b/vendor/github.com/go-openapi/runtime/client/response.go new file mode 100644 index 00000000000..bd238588b7d --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/client/response.go @@ -0,0 +1,44 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "io" + "net/http" + + "github.com/go-openapi/runtime" +) + +var _ runtime.ClientResponse = response{} + +type response struct { + resp *http.Response +} + +func (r response) Code() int { + return r.resp.StatusCode +} + +func (r response) Message() string { + return r.resp.Status +} + +func (r response) GetHeader(name string) string { + return r.resp.Header.Get(name) +} + +func (r response) Body() io.ReadCloser { + return r.resp.Body +} diff --git a/vendor/github.com/go-openapi/runtime/client/runtime.go b/vendor/github.com/go-openapi/runtime/client/runtime.go new file mode 100644 index 00000000000..00ce53d675e --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/client/runtime.go @@ -0,0 +1,481 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "context" + "crypto" + "crypto/ecdsa" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "encoding/pem" + "fmt" + "io/ioutil" + "mime" + "net/http" + "net/http/httputil" + "strings" + "sync" + "time" + + "github.com/go-openapi/strfmt" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/logger" + "github.com/go-openapi/runtime/middleware" +) + +// TLSClientOptions to configure client authentication with mutual TLS +type TLSClientOptions struct { + // Certificate is the path to a PEM-encoded certificate to be used for + // client authentication. If set then Key must also be set. + Certificate string + + // LoadedCertificate is the certificate to be used for client authentication. + // This field is ignored if Certificate is set. If this field is set, LoadedKey + // is also required. + LoadedCertificate *x509.Certificate + + // Key is the path to an unencrypted PEM-encoded private key for client + // authentication. This field is required if Certificate is set. + Key string + + // LoadedKey is the key for client authentication. This field is required if + // LoadedCertificate is set. + LoadedKey crypto.PrivateKey + + // CA is a path to a PEM-encoded certificate that specifies the root certificate + // to use when validating the TLS certificate presented by the server. If this field + // (and LoadedCA) is not set, the system certificate pool is used. This field is ignored if LoadedCA + // is set. + CA string + + // LoadedCA specifies the root certificate to use when validating the server's TLS certificate. + // If this field (and CA) is not set, the system certificate pool is used. + LoadedCA *x509.Certificate + + // LoadedCAPool specifies a pool of RootCAs to use when validating the server's TLS certificate. + // If set, it will be combined with the the other loaded certificates (see LoadedCA and CA). + // If neither LoadedCA or CA is set, the provided pool with override the system + // certificate pool. + // The caller must not use the supplied pool after calling TLSClientAuth. + LoadedCAPool *x509.CertPool + + // ServerName specifies the hostname to use when verifying the server certificate. + // If this field is set then InsecureSkipVerify will be ignored and treated as + // false. + ServerName string + + // InsecureSkipVerify controls whether the certificate chain and hostname presented + // by the server are validated. If false, any certificate is accepted. + InsecureSkipVerify bool + + // VerifyPeerCertificate, if not nil, is called after normal + // certificate verification. It receives the raw ASN.1 certificates + // provided by the peer and also any verified chains that normal processing found. + // If it returns a non-nil error, the handshake is aborted and that error results. + // + // If normal verification fails then the handshake will abort before + // considering this callback. If normal verification is disabled by + // setting InsecureSkipVerify then this callback will be considered but + // the verifiedChains argument will always be nil. + VerifyPeerCertificate func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error + + // SessionTicketsDisabled may be set to true to disable session ticket and + // PSK (resumption) support. Note that on clients, session ticket support is + // also disabled if ClientSessionCache is nil. + SessionTicketsDisabled bool + + // ClientSessionCache is a cache of ClientSessionState entries for TLS + // session resumption. It is only used by clients. + ClientSessionCache tls.ClientSessionCache + + // Prevents callers using unkeyed fields. + _ struct{} +} + +// TLSClientAuth creates a tls.Config for mutual auth +func TLSClientAuth(opts TLSClientOptions) (*tls.Config, error) { + // create client tls config + cfg := &tls.Config{} + + // load client cert if specified + if opts.Certificate != "" { + cert, err := tls.LoadX509KeyPair(opts.Certificate, opts.Key) + if err != nil { + return nil, fmt.Errorf("tls client cert: %v", err) + } + cfg.Certificates = []tls.Certificate{cert} + } else if opts.LoadedCertificate != nil { + block := pem.Block{Type: "CERTIFICATE", Bytes: opts.LoadedCertificate.Raw} + certPem := pem.EncodeToMemory(&block) + + var keyBytes []byte + switch k := opts.LoadedKey.(type) { + case *rsa.PrivateKey: + keyBytes = x509.MarshalPKCS1PrivateKey(k) + case *ecdsa.PrivateKey: + var err error + keyBytes, err = x509.MarshalECPrivateKey(k) + if err != nil { + return nil, fmt.Errorf("tls client priv key: %v", err) + } + default: + return nil, fmt.Errorf("tls client priv key: unsupported key type") + } + + block = pem.Block{Type: "PRIVATE KEY", Bytes: keyBytes} + keyPem := pem.EncodeToMemory(&block) + + cert, err := tls.X509KeyPair(certPem, keyPem) + if err != nil { + return nil, fmt.Errorf("tls client cert: %v", err) + } + cfg.Certificates = []tls.Certificate{cert} + } + + cfg.InsecureSkipVerify = opts.InsecureSkipVerify + + cfg.VerifyPeerCertificate = opts.VerifyPeerCertificate + cfg.SessionTicketsDisabled = opts.SessionTicketsDisabled + cfg.ClientSessionCache = opts.ClientSessionCache + + // When no CA certificate is provided, default to the system cert pool + // that way when a request is made to a server known by the system trust store, + // the name is still verified + if opts.LoadedCA != nil { + caCertPool := basePool(opts.LoadedCAPool) + caCertPool.AddCert(opts.LoadedCA) + cfg.RootCAs = caCertPool + } else if opts.CA != "" { + // load ca cert + caCert, err := ioutil.ReadFile(opts.CA) + if err != nil { + return nil, fmt.Errorf("tls client ca: %v", err) + } + caCertPool := basePool(opts.LoadedCAPool) + caCertPool.AppendCertsFromPEM(caCert) + cfg.RootCAs = caCertPool + } else if opts.LoadedCAPool != nil { + cfg.RootCAs = opts.LoadedCAPool + } + + // apply servername overrride + if opts.ServerName != "" { + cfg.InsecureSkipVerify = false + cfg.ServerName = opts.ServerName + } + + cfg.BuildNameToCertificate() + + return cfg, nil +} + +func basePool(pool *x509.CertPool) *x509.CertPool { + if pool == nil { + return x509.NewCertPool() + } + return pool +} + +// TLSTransport creates a http client transport suitable for mutual tls auth +func TLSTransport(opts TLSClientOptions) (http.RoundTripper, error) { + cfg, err := TLSClientAuth(opts) + if err != nil { + return nil, err + } + + return &http.Transport{TLSClientConfig: cfg}, nil +} + +// TLSClient creates a http.Client for mutual auth +func TLSClient(opts TLSClientOptions) (*http.Client, error) { + transport, err := TLSTransport(opts) + if err != nil { + return nil, err + } + return &http.Client{Transport: transport}, nil +} + +// DefaultTimeout the default request timeout +var DefaultTimeout = 30 * time.Second + +// Runtime represents an API client that uses the transport +// to make http requests based on a swagger specification. +type Runtime struct { + DefaultMediaType string + DefaultAuthentication runtime.ClientAuthInfoWriter + Consumers map[string]runtime.Consumer + Producers map[string]runtime.Producer + + Transport http.RoundTripper + Jar http.CookieJar + //Spec *spec.Document + Host string + BasePath string + Formats strfmt.Registry + Context context.Context + + Debug bool + logger logger.Logger + + clientOnce *sync.Once + client *http.Client + schemes []string +} + +// New creates a new default runtime for a swagger api runtime.Client +func New(host, basePath string, schemes []string) *Runtime { + var rt Runtime + rt.DefaultMediaType = runtime.JSONMime + + // TODO: actually infer this stuff from the spec + rt.Consumers = map[string]runtime.Consumer{ + runtime.JSONMime: runtime.JSONConsumer(), + runtime.XMLMime: runtime.XMLConsumer(), + runtime.TextMime: runtime.TextConsumer(), + runtime.HTMLMime: runtime.TextConsumer(), + runtime.CSVMime: runtime.CSVConsumer(), + runtime.DefaultMime: runtime.ByteStreamConsumer(), + } + rt.Producers = map[string]runtime.Producer{ + runtime.JSONMime: runtime.JSONProducer(), + runtime.XMLMime: runtime.XMLProducer(), + runtime.TextMime: runtime.TextProducer(), + runtime.HTMLMime: runtime.TextProducer(), + runtime.CSVMime: runtime.CSVProducer(), + runtime.DefaultMime: runtime.ByteStreamProducer(), + } + rt.Transport = http.DefaultTransport + rt.Jar = nil + rt.Host = host + rt.BasePath = basePath + rt.Context = context.Background() + rt.clientOnce = new(sync.Once) + if !strings.HasPrefix(rt.BasePath, "/") { + rt.BasePath = "/" + rt.BasePath + } + + rt.Debug = logger.DebugEnabled() + rt.logger = logger.StandardLogger{} + + if len(schemes) > 0 { + rt.schemes = schemes + } + return &rt +} + +// NewWithClient allows you to create a new transport with a configured http.Client +func NewWithClient(host, basePath string, schemes []string, client *http.Client) *Runtime { + rt := New(host, basePath, schemes) + if client != nil { + rt.clientOnce.Do(func() { + rt.client = client + }) + } + return rt +} + +func (r *Runtime) pickScheme(schemes []string) string { + if v := r.selectScheme(r.schemes); v != "" { + return v + } + if v := r.selectScheme(schemes); v != "" { + return v + } + return "http" +} + +func (r *Runtime) selectScheme(schemes []string) string { + schLen := len(schemes) + if schLen == 0 { + return "" + } + + scheme := schemes[0] + // prefer https, but skip when not possible + if scheme != "https" && schLen > 1 { + for _, sch := range schemes { + if sch == "https" { + scheme = sch + break + } + } + } + return scheme +} +func transportOrDefault(left, right http.RoundTripper) http.RoundTripper { + if left == nil { + return right + } + return left +} + +// EnableConnectionReuse drains the remaining body from a response +// so that go will reuse the TCP connections. +// +// This is not enabled by default because there are servers where +// the response never gets closed and that would make the code hang forever. +// So instead it's provided as a http client middleware that can be used to override +// any request. +func (r *Runtime) EnableConnectionReuse() { + if r.client == nil { + r.Transport = KeepAliveTransport( + transportOrDefault(r.Transport, http.DefaultTransport), + ) + return + } + + r.client.Transport = KeepAliveTransport( + transportOrDefault(r.client.Transport, + transportOrDefault(r.Transport, http.DefaultTransport), + ), + ) +} + +// Submit a request and when there is a body on success it will turn that into the result +// all other things are turned into an api error for swagger which retains the status code +func (r *Runtime) Submit(operation *runtime.ClientOperation) (interface{}, error) { + params, readResponse, auth := operation.Params, operation.Reader, operation.AuthInfo + + request, err := newRequest(operation.Method, operation.PathPattern, params) + if err != nil { + return nil, err + } + + var accept []string + accept = append(accept, operation.ProducesMediaTypes...) + if err = request.SetHeaderParam(runtime.HeaderAccept, accept...); err != nil { + return nil, err + } + + if auth == nil && r.DefaultAuthentication != nil { + auth = r.DefaultAuthentication + } + //if auth != nil { + // if err := auth.AuthenticateRequest(request, r.Formats); err != nil { + // return nil, err + // } + //} + + // TODO: pick appropriate media type + cmt := r.DefaultMediaType + for _, mediaType := range operation.ConsumesMediaTypes { + // Pick first non-empty media type + if mediaType != "" { + cmt = mediaType + break + } + } + + if _, ok := r.Producers[cmt]; !ok && cmt != runtime.MultipartFormMime && cmt != runtime.URLencodedFormMime { + return nil, fmt.Errorf("none of producers: %v registered. try %s", r.Producers, cmt) + } + + req, err := request.buildHTTP(cmt, r.BasePath, r.Producers, r.Formats, auth) + if err != nil { + return nil, err + } + req.URL.Scheme = r.pickScheme(operation.Schemes) + req.URL.Host = r.Host + req.Host = r.Host + + r.clientOnce.Do(func() { + r.client = &http.Client{ + Transport: r.Transport, + Jar: r.Jar, + } + }) + + if r.Debug { + b, err2 := httputil.DumpRequestOut(req, true) + if err2 != nil { + return nil, err2 + } + r.logger.Debugf("%s\n", string(b)) + } + + var hasTimeout bool + pctx := operation.Context + if pctx == nil { + pctx = r.Context + } else { + hasTimeout = true + } + if pctx == nil { + pctx = context.Background() + } + var ctx context.Context + var cancel context.CancelFunc + if hasTimeout { + ctx, cancel = context.WithCancel(pctx) + } else { + ctx, cancel = context.WithTimeout(pctx, request.timeout) + } + defer cancel() + + client := operation.Client + if client == nil { + client = r.client + } + req = req.WithContext(ctx) + res, err := client.Do(req) // make requests, by default follows 10 redirects before failing + if err != nil { + return nil, err + } + defer res.Body.Close() + + if r.Debug { + b, err2 := httputil.DumpResponse(res, true) + if err2 != nil { + return nil, err2 + } + r.logger.Debugf("%s\n", string(b)) + } + + ct := res.Header.Get(runtime.HeaderContentType) + if ct == "" { // this should really really never occur + ct = r.DefaultMediaType + } + + mt, _, err := mime.ParseMediaType(ct) + if err != nil { + return nil, fmt.Errorf("parse content type: %s", err) + } + + cons, ok := r.Consumers[mt] + if !ok { + if cons, ok = r.Consumers["*/*"]; !ok { + // scream about not knowing what to do + return nil, fmt.Errorf("no consumer: %q", ct) + } + } + return readResponse.ReadResponse(response{res}, cons) +} + +// SetDebug changes the debug flag. +// It ensures that client and middlewares have the set debug level. +func (r *Runtime) SetDebug(debug bool) { + r.Debug = debug + middleware.Debug = debug +} + +// SetLogger changes the logger stream. +// It ensures that client and middlewares use the same logger. +func (r *Runtime) SetLogger(logger logger.Logger) { + r.logger = logger + middleware.Logger = logger +} diff --git a/vendor/github.com/go-openapi/runtime/client_auth_info.go b/vendor/github.com/go-openapi/runtime/client_auth_info.go new file mode 100644 index 00000000000..c6c97d9a7c3 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/client_auth_info.go @@ -0,0 +1,30 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import "github.com/go-openapi/strfmt" + +// A ClientAuthInfoWriterFunc converts a function to a request writer interface +type ClientAuthInfoWriterFunc func(ClientRequest, strfmt.Registry) error + +// AuthenticateRequest adds authentication data to the request +func (fn ClientAuthInfoWriterFunc) AuthenticateRequest(req ClientRequest, reg strfmt.Registry) error { + return fn(req, reg) +} + +// A ClientAuthInfoWriter implementor knows how to write authentication info to a request +type ClientAuthInfoWriter interface { + AuthenticateRequest(ClientRequest, strfmt.Registry) error +} diff --git a/vendor/github.com/go-openapi/runtime/client_operation.go b/vendor/github.com/go-openapi/runtime/client_operation.go new file mode 100644 index 00000000000..fa21eacf330 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/client_operation.go @@ -0,0 +1,41 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "context" + "net/http" +) + +// ClientOperation represents the context for a swagger operation to be submitted to the transport +type ClientOperation struct { + ID string + Method string + PathPattern string + ProducesMediaTypes []string + ConsumesMediaTypes []string + Schemes []string + AuthInfo ClientAuthInfoWriter + Params ClientRequestWriter + Reader ClientResponseReader + Context context.Context + Client *http.Client +} + +// A ClientTransport implementor knows how to submit Request objects to some destination +type ClientTransport interface { + //Submit(string, RequestWriter, ResponseReader, AuthInfoWriter) (interface{}, error) + Submit(*ClientOperation) (interface{}, error) +} diff --git a/vendor/github.com/go-openapi/runtime/client_request.go b/vendor/github.com/go-openapi/runtime/client_request.go new file mode 100644 index 00000000000..6215e0a1c1f --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/client_request.go @@ -0,0 +1,103 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "io" + "io/ioutil" + "net/http" + "net/url" + "time" + + "github.com/go-openapi/strfmt" +) + +// ClientRequestWriterFunc converts a function to a request writer interface +type ClientRequestWriterFunc func(ClientRequest, strfmt.Registry) error + +// WriteToRequest adds data to the request +func (fn ClientRequestWriterFunc) WriteToRequest(req ClientRequest, reg strfmt.Registry) error { + return fn(req, reg) +} + +// ClientRequestWriter is an interface for things that know how to write to a request +type ClientRequestWriter interface { + WriteToRequest(ClientRequest, strfmt.Registry) error +} + +// ClientRequest is an interface for things that know how to +// add information to a swagger client request +type ClientRequest interface { + SetHeaderParam(string, ...string) error + + GetHeaderParams() http.Header + + SetQueryParam(string, ...string) error + + SetFormParam(string, ...string) error + + SetPathParam(string, string) error + + GetQueryParams() url.Values + + SetFileParam(string, ...NamedReadCloser) error + + SetBodyParam(interface{}) error + + SetTimeout(time.Duration) error + + GetMethod() string + + GetPath() string + + GetBody() []byte + + GetBodyParam() interface{} + + GetFileParam() map[string][]NamedReadCloser +} + +// NamedReadCloser represents a named ReadCloser interface +type NamedReadCloser interface { + io.ReadCloser + Name() string +} + +// NamedReader creates a NamedReadCloser for use as file upload +func NamedReader(name string, rdr io.Reader) NamedReadCloser { + rc, ok := rdr.(io.ReadCloser) + if !ok { + rc = ioutil.NopCloser(rdr) + } + return &namedReadCloser{ + name: name, + cr: rc, + } +} + +type namedReadCloser struct { + name string + cr io.ReadCloser +} + +func (n *namedReadCloser) Close() error { + return n.cr.Close() +} +func (n *namedReadCloser) Read(p []byte) (int, error) { + return n.cr.Read(p) +} +func (n *namedReadCloser) Name() string { + return n.name +} diff --git a/vendor/github.com/go-openapi/runtime/client_response.go b/vendor/github.com/go-openapi/runtime/client_response.go new file mode 100644 index 00000000000..729e18b2283 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/client_response.go @@ -0,0 +1,63 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "fmt" + "io" +) + +// A ClientResponse represents a client response +// This bridges between responses obtained from different transports +type ClientResponse interface { + Code() int + Message() string + GetHeader(string) string + Body() io.ReadCloser +} + +// A ClientResponseReaderFunc turns a function into a ClientResponseReader interface implementation +type ClientResponseReaderFunc func(ClientResponse, Consumer) (interface{}, error) + +// ReadResponse reads the response +func (read ClientResponseReaderFunc) ReadResponse(resp ClientResponse, consumer Consumer) (interface{}, error) { + return read(resp, consumer) +} + +// A ClientResponseReader is an interface for things want to read a response. +// An application of this is to create structs from response values +type ClientResponseReader interface { + ReadResponse(ClientResponse, Consumer) (interface{}, error) +} + +// NewAPIError creates a new API error +func NewAPIError(opName string, payload interface{}, code int) *APIError { + return &APIError{ + OperationName: opName, + Response: payload, + Code: code, + } +} + +// APIError wraps an error model and captures the status code +type APIError struct { + OperationName string + Response interface{} + Code int +} + +func (a *APIError) Error() string { + return fmt.Sprintf("%s (status %d): %+v ", a.OperationName, a.Code, a.Response) +} diff --git a/vendor/github.com/go-openapi/runtime/constants.go b/vendor/github.com/go-openapi/runtime/constants.go new file mode 100644 index 00000000000..a4de897adcd --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/constants.go @@ -0,0 +1,47 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +const ( + // HeaderContentType represents a http content-type header, it's value is supposed to be a mime type + HeaderContentType = "Content-Type" + + // HeaderTransferEncoding represents a http transfer-encoding header. + HeaderTransferEncoding = "Transfer-Encoding" + + // HeaderAccept the Accept header + HeaderAccept = "Accept" + + charsetKey = "charset" + + // DefaultMime the default fallback mime type + DefaultMime = "application/octet-stream" + // JSONMime the json mime type + JSONMime = "application/json" + // YAMLMime the yaml mime type + YAMLMime = "application/x-yaml" + // XMLMime the xml mime type + XMLMime = "application/xml" + // TextMime the text mime type + TextMime = "text/plain" + // HTMLMime the html mime type + HTMLMime = "text/html" + // CSVMime the csv mime type + CSVMime = "text/csv" + // MultipartFormMime the multipart form mime type + MultipartFormMime = "multipart/form-data" + // URLencodedFormMime the url encoded form mime type + URLencodedFormMime = "application/x-www-form-urlencoded" +) diff --git a/vendor/github.com/go-openapi/runtime/csv.go b/vendor/github.com/go-openapi/runtime/csv.go new file mode 100644 index 00000000000..d807bd915b4 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/csv.go @@ -0,0 +1,77 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "bytes" + "encoding/csv" + "errors" + "io" +) + +// CSVConsumer creates a new CSV consumer +func CSVConsumer() Consumer { + return ConsumerFunc(func(reader io.Reader, data interface{}) error { + if reader == nil { + return errors.New("CSVConsumer requires a reader") + } + + csvReader := csv.NewReader(reader) + writer, ok := data.(io.Writer) + if !ok { + return errors.New("data type must be io.Writer") + } + csvWriter := csv.NewWriter(writer) + records, err := csvReader.ReadAll() + if err != nil { + return err + } + for _, r := range records { + if err := csvWriter.Write(r); err != nil { + return err + } + } + csvWriter.Flush() + return nil + }) +} + +// CSVProducer creates a new CSV producer +func CSVProducer() Producer { + return ProducerFunc(func(writer io.Writer, data interface{}) error { + if writer == nil { + return errors.New("CSVProducer requires a writer") + } + + dataBytes, ok := data.([]byte) + if !ok { + return errors.New("data type must be byte array") + } + + csvReader := csv.NewReader(bytes.NewBuffer(dataBytes)) + records, err := csvReader.ReadAll() + if err != nil { + return err + } + csvWriter := csv.NewWriter(writer) + for _, r := range records { + if err := csvWriter.Write(r); err != nil { + return err + } + } + csvWriter.Flush() + return nil + }) +} diff --git a/vendor/github.com/go-openapi/runtime/discard.go b/vendor/github.com/go-openapi/runtime/discard.go new file mode 100644 index 00000000000..0d390cfd64c --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/discard.go @@ -0,0 +1,9 @@ +package runtime + +import "io" + +// DiscardConsumer does absolutely nothing, it's a black hole. +var DiscardConsumer = ConsumerFunc(func(_ io.Reader, _ interface{}) error { return nil }) + +// DiscardProducer does absolutely nothing, it's a black hole. +var DiscardProducer = ProducerFunc(func(_ io.Writer, _ interface{}) error { return nil }) diff --git a/vendor/github.com/go-openapi/runtime/file.go b/vendor/github.com/go-openapi/runtime/file.go new file mode 100644 index 00000000000..85971c18c4b --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/file.go @@ -0,0 +1,33 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import "mime/multipart" + +// File represents an uploaded file. +type File struct { + Data multipart.File + Header *multipart.FileHeader +} + +// Read bytes from the file +func (f *File) Read(p []byte) (n int, err error) { + return f.Data.Read(p) +} + +// Close the file +func (f *File) Close() error { + return f.Data.Close() +} diff --git a/vendor/github.com/go-openapi/runtime/go.mod b/vendor/github.com/go-openapi/runtime/go.mod new file mode 100644 index 00000000000..f5f9d16be9e --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/go.mod @@ -0,0 +1,16 @@ +module github.com/go-openapi/runtime + +require ( + github.com/docker/go-units v0.4.0 + github.com/go-openapi/analysis v0.19.5 + github.com/go-openapi/errors v0.19.2 + github.com/go-openapi/loads v0.19.3 + github.com/go-openapi/spec v0.19.3 + github.com/go-openapi/strfmt v0.19.3 + github.com/go-openapi/swag v0.19.5 + github.com/go-openapi/validate v0.19.3 + github.com/stretchr/testify v1.4.0 + gopkg.in/yaml.v2 v2.2.4 +) + +go 1.13 diff --git a/vendor/github.com/go-openapi/runtime/go.sum b/vendor/github.com/go-openapi/runtime/go.sum new file mode 100644 index 00000000000..c24bb9865fd --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/go.sum @@ -0,0 +1,149 @@ +github.com/PuerkitoBio/purell v1.1.0 h1:rmGxhojJlM0tuKtfdvliR84CFHljx9ag64t2xmVkjK4= +github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf h1:eg0MeVzsP1G42dRafH3vf+al2vQIJU0YHX+1Tw87oco= +github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/docker/go-units v0.3.3 h1:Xk8S3Xj5sLGlG5g67hJmYMmUgXv5N4PhkjJHHqrwnTk= +github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8 h1:DujepqpGd1hyOd7aW59XpK7Qymp8iy83xq74fLr21is= +github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= +github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.19.2 h1:ophLETFestFZHk3ji7niPEL4d466QjW+0Tdg5VyDq7E= +github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= +github.com/go-openapi/analysis v0.19.4/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= +github.com/go-openapi/analysis v0.19.5 h1:8b2ZgKfKIUTVQpTb77MoRDIMEIwvDVw40o3aOXdfYzI= +github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= +github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.19.2 h1:a2kIyV3w+OS3S97zxUndRVD46+FhGOUBDFY7nmu4CsY= +github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.19.2 h1:A9+F4Dc/MCNB5jibxf6rRvOvR/iFgQdyNx9eIhnGqq0= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.19.2 h1:o20suLFB4Ri0tuzpWtyHlh7E7HnkqTNLq6aR6WVNS1w= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.2 h1:rf5ArTHmIJxyV5Oiks+Su0mUens1+AjpkPoWr5xFRcI= +github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= +github.com/go-openapi/loads v0.19.3 h1:jwIoahqCmaA5OBoc/B+1+Mu2L0Gr8xYQnbeyQEo/7b0= +github.com/go-openapi/loads v0.19.3/go.mod h1:YVfqhUCdahYwR3f3iiwQLhicVRvLlU/WO5WPaZvcvSI= +github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= +github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= +github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= +github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.19.2 h1:SStNd1jRcYtfKCN7R0laGNs80WYYvn5CbBjM2sOmCrE= +github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= +github.com/go-openapi/spec v0.19.3 h1:0XRyw8kguri6Yw4SxhsQA/atC88yqrk0+G4YhI2wabc= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.19.0 h1:0Dn9qy1G9+UJfRU7TR8bmdGxb4uifB7HNrJjOnV0yPk= +github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= +github.com/go-openapi/strfmt v0.19.2/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= +github.com/go-openapi/strfmt v0.19.3 h1:eRfyY5SkaNJCAwmmMcADjY31ow9+N7MCLW7oRkbsINA= +github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= +github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.19.2 h1:jvO6bCMBEilGwMfHhrd61zIID4oIFdwb76V17SM88dE= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= +github.com/go-openapi/validate v0.19.2 h1:ky5l57HjyVRrsJfd2+Ro5Z9PjGuKbsmftwyMtk8H7js= +github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= +github.com/go-openapi/validate v0.19.3 h1:PAH/2DylwWcIU1s0Y7k3yNmeAgWOcKrNE2Q7Ww/kCg4= +github.com/go-openapi/validate v0.19.3/go.mod h1:90Vh6jjkTn+OT1Eefm0ZixWNFjhtOH7vS9k0lo6zwJo= +github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 h1:2gxZ0XQIU/5z3Z3bUBu+FXuk2pFbkN6tcwi/pjyaDic= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe h1:W/GaMY0y69G4cFlmsC6B9sbuo2fP8OFP1ABjt4kPz+w= +github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63 h1:nTT4s92Dgz2HlrB2NaMgvlfqHH39OgMhA7z3PK7PGD4= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e h1:hB2xlXdHp/pmPZq0y3QnmWAArdw9PqbmotexnWx/FU8= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.1.1 h1:Sq1fR+0c58RME5EoqKdjkiQAmPjmfHlZOoRI6fTUOcs= +go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53 h1:kcXqo9vE6fsZY5X5Rd7R1l7fTgnWaDCVmln65REefiE= +golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 h1:dfGZHvZk057jK2MCeWus/TowKpJ8y4AmooUzdBSR9GU= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 h1:k7pJ2yAPLPgbskkFdhRCsA77k2fySZ1zf2zCjvQCiIM= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/go-openapi/runtime/headers.go b/vendor/github.com/go-openapi/runtime/headers.go new file mode 100644 index 00000000000..4d111db4fec --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/headers.go @@ -0,0 +1,45 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "mime" + "net/http" + + "github.com/go-openapi/errors" +) + +// ContentType parses a content type header +func ContentType(headers http.Header) (string, string, error) { + ct := headers.Get(HeaderContentType) + orig := ct + if ct == "" { + ct = DefaultMime + } + if ct == "" { + return "", "", nil + } + + mt, opts, err := mime.ParseMediaType(ct) + if err != nil { + return "", "", errors.NewParseError(HeaderContentType, "header", orig, err) + } + + if cs, ok := opts[charsetKey]; ok { + return mt, cs, nil + } + + return mt, "", nil +} diff --git a/vendor/github.com/go-openapi/runtime/interfaces.go b/vendor/github.com/go-openapi/runtime/interfaces.go new file mode 100644 index 00000000000..65de0aa44b9 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/interfaces.go @@ -0,0 +1,103 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "io" + "net/http" + + "github.com/go-openapi/strfmt" +) + +// OperationHandlerFunc an adapter for a function to the OperationHandler interface +type OperationHandlerFunc func(interface{}) (interface{}, error) + +// Handle implements the operation handler interface +func (s OperationHandlerFunc) Handle(data interface{}) (interface{}, error) { + return s(data) +} + +// OperationHandler a handler for a swagger operation +type OperationHandler interface { + Handle(interface{}) (interface{}, error) +} + +// ConsumerFunc represents a function that can be used as a consumer +type ConsumerFunc func(io.Reader, interface{}) error + +// Consume consumes the reader into the data parameter +func (fn ConsumerFunc) Consume(reader io.Reader, data interface{}) error { + return fn(reader, data) +} + +// Consumer implementations know how to bind the values on the provided interface to +// data provided by the request body +type Consumer interface { + // Consume performs the binding of request values + Consume(io.Reader, interface{}) error +} + +// ProducerFunc represents a function that can be used as a producer +type ProducerFunc func(io.Writer, interface{}) error + +// Produce produces the response for the provided data +func (f ProducerFunc) Produce(writer io.Writer, data interface{}) error { + return f(writer, data) +} + +// Producer implementations know how to turn the provided interface into a valid +// HTTP response +type Producer interface { + // Produce writes to the http response + Produce(io.Writer, interface{}) error +} + +// AuthenticatorFunc turns a function into an authenticator +type AuthenticatorFunc func(interface{}) (bool, interface{}, error) + +// Authenticate authenticates the request with the provided data +func (f AuthenticatorFunc) Authenticate(params interface{}) (bool, interface{}, error) { + return f(params) +} + +// Authenticator represents an authentication strategy +// implementations of Authenticator know how to authenticate the +// request data and translate that into a valid principal object or an error +type Authenticator interface { + Authenticate(interface{}) (bool, interface{}, error) +} + +// AuthorizerFunc turns a function into an authorizer +type AuthorizerFunc func(*http.Request, interface{}) error + +// Authorize authorizes the processing of the request for the principal +func (f AuthorizerFunc) Authorize(r *http.Request, principal interface{}) error { + return f(r, principal) +} + +// Authorizer represents an authorization strategy +// implementations of Authorizer know how to authorize the principal object +// using the request data and returns error if unauthorized +type Authorizer interface { + Authorize(*http.Request, interface{}) error +} + +// Validatable types implementing this interface allow customizing their validation +// this will be used instead of the reflective validation based on the spec document. +// the implementations are assumed to have been generated by the swagger tool so they should +// contain all the validations obtained from the spec +type Validatable interface { + Validate(strfmt.Registry) error +} diff --git a/vendor/github.com/go-openapi/runtime/json.go b/vendor/github.com/go-openapi/runtime/json.go new file mode 100644 index 00000000000..5a690559cc5 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/json.go @@ -0,0 +1,38 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "encoding/json" + "io" +) + +// JSONConsumer creates a new JSON consumer +func JSONConsumer() Consumer { + return ConsumerFunc(func(reader io.Reader, data interface{}) error { + dec := json.NewDecoder(reader) + dec.UseNumber() // preserve number formats + return dec.Decode(data) + }) +} + +// JSONProducer creates a new JSON producer +func JSONProducer() Producer { + return ProducerFunc(func(writer io.Writer, data interface{}) error { + enc := json.NewEncoder(writer) + enc.SetEscapeHTML(false) + return enc.Encode(data) + }) +} diff --git a/vendor/github.com/go-openapi/runtime/logger/logger.go b/vendor/github.com/go-openapi/runtime/logger/logger.go new file mode 100644 index 00000000000..6f4debcc145 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/logger/logger.go @@ -0,0 +1,20 @@ +package logger + +import "os" + +type Logger interface { + Printf(format string, args ...interface{}) + Debugf(format string, args ...interface{}) +} + +func DebugEnabled() bool { + d := os.Getenv("SWAGGER_DEBUG") + if d != "" && d != "false" && d != "0" { + return true + } + d = os.Getenv("DEBUG") + if d != "" && d != "false" && d != "0" { + return true + } + return false +} diff --git a/vendor/github.com/go-openapi/runtime/logger/standard.go b/vendor/github.com/go-openapi/runtime/logger/standard.go new file mode 100644 index 00000000000..f7e67ebb9e7 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/logger/standard.go @@ -0,0 +1,22 @@ +package logger + +import ( + "fmt" + "os" +) + +type StandardLogger struct{} + +func (StandardLogger) Printf(format string, args ...interface{}) { + if len(format) == 0 || format[len(format)-1] != '\n' { + format += "\n" + } + fmt.Fprintf(os.Stderr, format, args...) +} + +func (StandardLogger) Debugf(format string, args ...interface{}) { + if len(format) == 0 || format[len(format)-1] != '\n' { + format += "\n" + } + fmt.Fprintf(os.Stderr, format, args...) +} diff --git a/vendor/github.com/go-openapi/runtime/middleware/context.go b/vendor/github.com/go-openapi/runtime/middleware/context.go new file mode 100644 index 00000000000..0ff9e39a888 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/context.go @@ -0,0 +1,592 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package middleware + +import ( + stdContext "context" + "fmt" + "net/http" + "strings" + "sync" + + "github.com/go-openapi/analysis" + "github.com/go-openapi/errors" + "github.com/go-openapi/loads" + "github.com/go-openapi/spec" + "github.com/go-openapi/strfmt" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/logger" + "github.com/go-openapi/runtime/middleware/untyped" + "github.com/go-openapi/runtime/security" +) + +// Debug when true turns on verbose logging +var Debug = logger.DebugEnabled() +var Logger logger.Logger = logger.StandardLogger{} + +func debugLog(format string, args ...interface{}) { + if Debug { + Logger.Printf(format, args...) + } +} + +// A Builder can create middlewares +type Builder func(http.Handler) http.Handler + +// PassthroughBuilder returns the handler, aka the builder identity function +func PassthroughBuilder(handler http.Handler) http.Handler { return handler } + +// RequestBinder is an interface for types to implement +// when they want to be able to bind from a request +type RequestBinder interface { + BindRequest(*http.Request, *MatchedRoute) error +} + +// Responder is an interface for types to implement +// when they want to be considered for writing HTTP responses +type Responder interface { + WriteResponse(http.ResponseWriter, runtime.Producer) +} + +// ResponderFunc wraps a func as a Responder interface +type ResponderFunc func(http.ResponseWriter, runtime.Producer) + +// WriteResponse writes to the response +func (fn ResponderFunc) WriteResponse(rw http.ResponseWriter, pr runtime.Producer) { + fn(rw, pr) +} + +// Context is a type safe wrapper around an untyped request context +// used throughout to store request context with the standard context attached +// to the http.Request +type Context struct { + spec *loads.Document + analyzer *analysis.Spec + api RoutableAPI + router Router +} + +type routableUntypedAPI struct { + api *untyped.API + hlock *sync.Mutex + handlers map[string]map[string]http.Handler + defaultConsumes string + defaultProduces string +} + +func newRoutableUntypedAPI(spec *loads.Document, api *untyped.API, context *Context) *routableUntypedAPI { + var handlers map[string]map[string]http.Handler + if spec == nil || api == nil { + return nil + } + analyzer := analysis.New(spec.Spec()) + for method, hls := range analyzer.Operations() { + um := strings.ToUpper(method) + for path, op := range hls { + schemes := analyzer.SecurityRequirementsFor(op) + + if oh, ok := api.OperationHandlerFor(method, path); ok { + if handlers == nil { + handlers = make(map[string]map[string]http.Handler) + } + if b, ok := handlers[um]; !ok || b == nil { + handlers[um] = make(map[string]http.Handler) + } + + var handler http.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // lookup route info in the context + route, rCtx, _ := context.RouteInfo(r) + if rCtx != nil { + r = rCtx + } + + // bind and validate the request using reflection + var bound interface{} + var validation error + bound, r, validation = context.BindAndValidate(r, route) + if validation != nil { + context.Respond(w, r, route.Produces, route, validation) + return + } + + // actually handle the request + result, err := oh.Handle(bound) + if err != nil { + // respond with failure + context.Respond(w, r, route.Produces, route, err) + return + } + + // respond with success + context.Respond(w, r, route.Produces, route, result) + }) + + if len(schemes) > 0 { + handler = newSecureAPI(context, handler) + } + handlers[um][path] = handler + } + } + } + + return &routableUntypedAPI{ + api: api, + hlock: new(sync.Mutex), + handlers: handlers, + defaultProduces: api.DefaultProduces, + defaultConsumes: api.DefaultConsumes, + } +} + +func (r *routableUntypedAPI) HandlerFor(method, path string) (http.Handler, bool) { + r.hlock.Lock() + paths, ok := r.handlers[strings.ToUpper(method)] + if !ok { + r.hlock.Unlock() + return nil, false + } + handler, ok := paths[path] + r.hlock.Unlock() + return handler, ok +} +func (r *routableUntypedAPI) ServeErrorFor(operationID string) func(http.ResponseWriter, *http.Request, error) { + return r.api.ServeError +} +func (r *routableUntypedAPI) ConsumersFor(mediaTypes []string) map[string]runtime.Consumer { + return r.api.ConsumersFor(mediaTypes) +} +func (r *routableUntypedAPI) ProducersFor(mediaTypes []string) map[string]runtime.Producer { + return r.api.ProducersFor(mediaTypes) +} +func (r *routableUntypedAPI) AuthenticatorsFor(schemes map[string]spec.SecurityScheme) map[string]runtime.Authenticator { + return r.api.AuthenticatorsFor(schemes) +} +func (r *routableUntypedAPI) Authorizer() runtime.Authorizer { + return r.api.Authorizer() +} +func (r *routableUntypedAPI) Formats() strfmt.Registry { + return r.api.Formats() +} + +func (r *routableUntypedAPI) DefaultProduces() string { + return r.defaultProduces +} + +func (r *routableUntypedAPI) DefaultConsumes() string { + return r.defaultConsumes +} + +// NewRoutableContext creates a new context for a routable API +func NewRoutableContext(spec *loads.Document, routableAPI RoutableAPI, routes Router) *Context { + var an *analysis.Spec + if spec != nil { + an = analysis.New(spec.Spec()) + } + ctx := &Context{spec: spec, api: routableAPI, analyzer: an, router: routes} + return ctx +} + +// NewContext creates a new context wrapper +func NewContext(spec *loads.Document, api *untyped.API, routes Router) *Context { + var an *analysis.Spec + if spec != nil { + an = analysis.New(spec.Spec()) + } + ctx := &Context{spec: spec, analyzer: an} + ctx.api = newRoutableUntypedAPI(spec, api, ctx) + ctx.router = routes + return ctx +} + +// Serve serves the specified spec with the specified api registrations as a http.Handler +func Serve(spec *loads.Document, api *untyped.API) http.Handler { + return ServeWithBuilder(spec, api, PassthroughBuilder) +} + +// ServeWithBuilder serves the specified spec with the specified api registrations as a http.Handler that is decorated +// by the Builder +func ServeWithBuilder(spec *loads.Document, api *untyped.API, builder Builder) http.Handler { + context := NewContext(spec, api, nil) + return context.APIHandler(builder) +} + +type contextKey int8 + +const ( + _ contextKey = iota + ctxContentType + ctxResponseFormat + ctxMatchedRoute + ctxBoundParams + ctxSecurityPrincipal + ctxSecurityScopes +) + +// MatchedRouteFrom request context value. +func MatchedRouteFrom(req *http.Request) *MatchedRoute { + mr := req.Context().Value(ctxMatchedRoute) + if mr == nil { + return nil + } + if res, ok := mr.(*MatchedRoute); ok { + return res + } + return nil +} + +// SecurityPrincipalFrom request context value. +func SecurityPrincipalFrom(req *http.Request) interface{} { + return req.Context().Value(ctxSecurityPrincipal) +} + +// SecurityScopesFrom request context value. +func SecurityScopesFrom(req *http.Request) []string { + rs := req.Context().Value(ctxSecurityScopes) + if res, ok := rs.([]string); ok { + return res + } + return nil +} + +type contentTypeValue struct { + MediaType string + Charset string +} + +// BasePath returns the base path for this API +func (c *Context) BasePath() string { + return c.spec.BasePath() +} + +// RequiredProduces returns the accepted content types for responses +func (c *Context) RequiredProduces() []string { + return c.analyzer.RequiredProduces() +} + +// BindValidRequest binds a params object to a request but only when the request is valid +// if the request is not valid an error will be returned +func (c *Context) BindValidRequest(request *http.Request, route *MatchedRoute, binder RequestBinder) error { + var res []error + var requestContentType string + + // check and validate content type, select consumer + if runtime.HasBody(request) { + ct, _, err := runtime.ContentType(request.Header) + if err != nil { + res = append(res, err) + } else { + if err := validateContentType(route.Consumes, ct); err != nil { + res = append(res, err) + } + if len(res) == 0 { + cons, ok := route.Consumers[ct] + if !ok { + res = append(res, errors.New(500, "no consumer registered for %s", ct)) + } else { + route.Consumer = cons + requestContentType = ct + } + } + } + } + + // check and validate the response format + if len(res) == 0 { + if str := NegotiateContentType(request, route.Produces, requestContentType); str == "" { + res = append(res, errors.InvalidResponseFormat(request.Header.Get(runtime.HeaderAccept), route.Produces)) + } + } + + // now bind the request with the provided binder + // it's assumed the binder will also validate the request and return an error if the + // request is invalid + if binder != nil && len(res) == 0 { + if err := binder.BindRequest(request, route); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// ContentType gets the parsed value of a content type +// Returns the media type, its charset and a shallow copy of the request +// when its context doesn't contain the content type value, otherwise it returns +// the same request +// Returns the error that runtime.ContentType may retunrs. +func (c *Context) ContentType(request *http.Request) (string, string, *http.Request, error) { + var rCtx = request.Context() + + if v, ok := rCtx.Value(ctxContentType).(*contentTypeValue); ok { + return v.MediaType, v.Charset, request, nil + } + + mt, cs, err := runtime.ContentType(request.Header) + if err != nil { + return "", "", nil, err + } + rCtx = stdContext.WithValue(rCtx, ctxContentType, &contentTypeValue{mt, cs}) + return mt, cs, request.WithContext(rCtx), nil +} + +// LookupRoute looks a route up and returns true when it is found +func (c *Context) LookupRoute(request *http.Request) (*MatchedRoute, bool) { + if route, ok := c.router.Lookup(request.Method, request.URL.EscapedPath()); ok { + return route, ok + } + return nil, false +} + +// RouteInfo tries to match a route for this request +// Returns the matched route, a shallow copy of the request if its context +// contains the matched router, otherwise the same request, and a bool to +// indicate if it the request matches one of the routes, if it doesn't +// then it returns false and nil for the other two return values +func (c *Context) RouteInfo(request *http.Request) (*MatchedRoute, *http.Request, bool) { + var rCtx = request.Context() + + if v, ok := rCtx.Value(ctxMatchedRoute).(*MatchedRoute); ok { + return v, request, ok + } + + if route, ok := c.LookupRoute(request); ok { + rCtx = stdContext.WithValue(rCtx, ctxMatchedRoute, route) + return route, request.WithContext(rCtx), ok + } + + return nil, nil, false +} + +// ResponseFormat negotiates the response content type +// Returns the response format and a shallow copy of the request if its context +// doesn't contain the response format, otherwise the same request +func (c *Context) ResponseFormat(r *http.Request, offers []string) (string, *http.Request) { + var rCtx = r.Context() + + if v, ok := rCtx.Value(ctxResponseFormat).(string); ok { + debugLog("[%s %s] found response format %q in context", r.Method, r.URL.Path, v) + return v, r + } + + format := NegotiateContentType(r, offers, "") + if format != "" { + debugLog("[%s %s] set response format %q in context", r.Method, r.URL.Path, format) + r = r.WithContext(stdContext.WithValue(rCtx, ctxResponseFormat, format)) + } + debugLog("[%s %s] negotiated response format %q", r.Method, r.URL.Path, format) + return format, r +} + +// AllowedMethods gets the allowed methods for the path of this request +func (c *Context) AllowedMethods(request *http.Request) []string { + return c.router.OtherMethods(request.Method, request.URL.EscapedPath()) +} + +// ResetAuth removes the current principal from the request context +func (c *Context) ResetAuth(request *http.Request) *http.Request { + rctx := request.Context() + rctx = stdContext.WithValue(rctx, ctxSecurityPrincipal, nil) + rctx = stdContext.WithValue(rctx, ctxSecurityScopes, nil) + return request.WithContext(rctx) +} + +// Authorize authorizes the request +// Returns the principal object and a shallow copy of the request when its +// context doesn't contain the principal, otherwise the same request or an error +// (the last) if one of the authenticators returns one or an Unauthenticated error +func (c *Context) Authorize(request *http.Request, route *MatchedRoute) (interface{}, *http.Request, error) { + if route == nil || !route.HasAuth() { + return nil, nil, nil + } + + var rCtx = request.Context() + if v := rCtx.Value(ctxSecurityPrincipal); v != nil { + return v, request, nil + } + + applies, usr, err := route.Authenticators.Authenticate(request, route) + if !applies || err != nil || !route.Authenticators.AllowsAnonymous() && usr == nil { + if err != nil { + return nil, nil, err + } + return nil, nil, errors.Unauthenticated("invalid credentials") + } + if route.Authorizer != nil { + if err := route.Authorizer.Authorize(request, usr); err != nil { + return nil, nil, errors.New(http.StatusForbidden, err.Error()) + } + } + + rCtx = request.Context() + + rCtx = stdContext.WithValue(rCtx, ctxSecurityPrincipal, usr) + rCtx = stdContext.WithValue(rCtx, ctxSecurityScopes, route.Authenticator.AllScopes()) + return usr, request.WithContext(rCtx), nil +} + +// BindAndValidate binds and validates the request +// Returns the validation map and a shallow copy of the request when its context +// doesn't contain the validation, otherwise it returns the same request or an +// CompositeValidationError error +func (c *Context) BindAndValidate(request *http.Request, matched *MatchedRoute) (interface{}, *http.Request, error) { + var rCtx = request.Context() + + if v, ok := rCtx.Value(ctxBoundParams).(*validation); ok { + debugLog("got cached validation (valid: %t)", len(v.result) == 0) + if len(v.result) > 0 { + return v.bound, request, errors.CompositeValidationError(v.result...) + } + return v.bound, request, nil + } + result := validateRequest(c, request, matched) + rCtx = stdContext.WithValue(rCtx, ctxBoundParams, result) + request = request.WithContext(rCtx) + if len(result.result) > 0 { + return result.bound, request, errors.CompositeValidationError(result.result...) + } + debugLog("no validation errors found") + return result.bound, request, nil +} + +// NotFound the default not found responder for when no route has been matched yet +func (c *Context) NotFound(rw http.ResponseWriter, r *http.Request) { + c.Respond(rw, r, []string{c.api.DefaultProduces()}, nil, errors.NotFound("not found")) +} + +// Respond renders the response after doing some content negotiation +func (c *Context) Respond(rw http.ResponseWriter, r *http.Request, produces []string, route *MatchedRoute, data interface{}) { + debugLog("responding to %s %s with produces: %v", r.Method, r.URL.Path, produces) + offers := []string{} + for _, mt := range produces { + if mt != c.api.DefaultProduces() { + offers = append(offers, mt) + } + } + // the default producer is last so more specific producers take precedence + offers = append(offers, c.api.DefaultProduces()) + debugLog("offers: %v", offers) + + var format string + format, r = c.ResponseFormat(r, offers) + rw.Header().Set(runtime.HeaderContentType, format) + + if resp, ok := data.(Responder); ok { + producers := route.Producers + prod, ok := producers[format] + if !ok { + prods := c.api.ProducersFor(normalizeOffers([]string{c.api.DefaultProduces()})) + pr, ok := prods[c.api.DefaultProduces()] + if !ok { + panic(errors.New(http.StatusInternalServerError, "can't find a producer for "+format)) + } + prod = pr + } + resp.WriteResponse(rw, prod) + return + } + + if err, ok := data.(error); ok { + if format == "" { + rw.Header().Set(runtime.HeaderContentType, runtime.JSONMime) + } + + if realm := security.FailedBasicAuth(r); realm != "" { + rw.Header().Set("WWW-Authenticate", fmt.Sprintf("Basic realm=%q", realm)) + } + + if route == nil || route.Operation == nil { + c.api.ServeErrorFor("")(rw, r, err) + return + } + c.api.ServeErrorFor(route.Operation.ID)(rw, r, err) + return + } + + if route == nil || route.Operation == nil { + rw.WriteHeader(200) + if r.Method == "HEAD" { + return + } + producers := c.api.ProducersFor(normalizeOffers(offers)) + prod, ok := producers[format] + if !ok { + panic(errors.New(http.StatusInternalServerError, "can't find a producer for "+format)) + } + if err := prod.Produce(rw, data); err != nil { + panic(err) // let the recovery middleware deal with this + } + return + } + + if _, code, ok := route.Operation.SuccessResponse(); ok { + rw.WriteHeader(code) + if code == 204 || r.Method == "HEAD" { + return + } + + producers := route.Producers + prod, ok := producers[format] + if !ok { + if !ok { + prods := c.api.ProducersFor(normalizeOffers([]string{c.api.DefaultProduces()})) + pr, ok := prods[c.api.DefaultProduces()] + if !ok { + panic(errors.New(http.StatusInternalServerError, "can't find a producer for "+format)) + } + prod = pr + } + } + if err := prod.Produce(rw, data); err != nil { + panic(err) // let the recovery middleware deal with this + } + return + } + + c.api.ServeErrorFor(route.Operation.ID)(rw, r, errors.New(http.StatusInternalServerError, "can't produce response")) +} + +// APIHandler returns a handler to serve the API, this includes a swagger spec, router and the contract defined in the swagger spec +func (c *Context) APIHandler(builder Builder) http.Handler { + b := builder + if b == nil { + b = PassthroughBuilder + } + + var title string + sp := c.spec.Spec() + if sp != nil && sp.Info != nil && sp.Info.Title != "" { + title = sp.Info.Title + } + + redocOpts := RedocOpts{ + BasePath: c.BasePath(), + Title: title, + } + + return Spec("", c.spec.Raw(), Redoc(redocOpts, c.RoutesHandler(b))) +} + +// RoutesHandler returns a handler to serve the API, just the routes and the contract defined in the swagger spec +func (c *Context) RoutesHandler(builder Builder) http.Handler { + b := builder + if b == nil { + b = PassthroughBuilder + } + return NewRouter(c, b(NewOperationExecutor(c))) +} diff --git a/vendor/github.com/go-openapi/runtime/middleware/denco/LICENSE b/vendor/github.com/go-openapi/runtime/middleware/denco/LICENSE new file mode 100644 index 00000000000..e65039ad84c --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/denco/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2014 Naoya Inada + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/go-openapi/runtime/middleware/denco/README.md b/vendor/github.com/go-openapi/runtime/middleware/denco/README.md new file mode 100644 index 00000000000..30109e17d5e --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/denco/README.md @@ -0,0 +1,180 @@ +# Denco [![Build Status](https://travis-ci.org/naoina/denco.png?branch=master)](https://travis-ci.org/naoina/denco) + +The fast and flexible HTTP request router for [Go](http://golang.org). + +Denco is based on Double-Array implementation of [Kocha-urlrouter](https://github.com/naoina/kocha-urlrouter). +However, Denco is optimized and some features added. + +## Features + +* Fast (See [go-http-routing-benchmark](https://github.com/naoina/go-http-routing-benchmark)) +* [URL patterns](#url-patterns) (`/foo/:bar` and `/foo/*wildcard`) +* Small (but enough) URL router API +* HTTP request multiplexer like `http.ServeMux` + +## Installation + + go get -u github.com/go-openapi/runtime/middleware/denco + +## Using as HTTP request multiplexer + +```go +package main + +import ( + "fmt" + "log" + "net/http" + + "github.com/go-openapi/runtime/middleware/denco" +) + +func Index(w http.ResponseWriter, r *http.Request, params denco.Params) { + fmt.Fprintf(w, "Welcome to Denco!\n") +} + +func User(w http.ResponseWriter, r *http.Request, params denco.Params) { + fmt.Fprintf(w, "Hello %s!\n", params.Get("name")) +} + +func main() { + mux := denco.NewMux() + handler, err := mux.Build([]denco.Handler{ + mux.GET("/", Index), + mux.GET("/user/:name", User), + mux.POST("/user/:name", User), + }) + if err != nil { + panic(err) + } + log.Fatal(http.ListenAndServe(":8080", handler)) +} +``` + +## Using as URL router + +```go +package main + +import ( + "fmt" + + "github.com/go-openapi/runtime/middleware/denco" +) + +type route struct { + name string +} + +func main() { + router := denco.New() + router.Build([]denco.Record{ + {"/", &route{"root"}}, + {"/user/:id", &route{"user"}}, + {"/user/:name/:id", &route{"username"}}, + {"/static/*filepath", &route{"static"}}, + }) + + data, params, found := router.Lookup("/") + // print `&main.route{name:"root"}, denco.Params(nil), true`. + fmt.Printf("%#v, %#v, %#v\n", data, params, found) + + data, params, found = router.Lookup("/user/hoge") + // print `&main.route{name:"user"}, denco.Params{denco.Param{Name:"id", Value:"hoge"}}, true`. + fmt.Printf("%#v, %#v, %#v\n", data, params, found) + + data, params, found = router.Lookup("/user/hoge/7") + // print `&main.route{name:"username"}, denco.Params{denco.Param{Name:"name", Value:"hoge"}, denco.Param{Name:"id", Value:"7"}}, true`. + fmt.Printf("%#v, %#v, %#v\n", data, params, found) + + data, params, found = router.Lookup("/static/path/to/file") + // print `&main.route{name:"static"}, denco.Params{denco.Param{Name:"filepath", Value:"path/to/file"}}, true`. + fmt.Printf("%#v, %#v, %#v\n", data, params, found) +} +``` + +See [Godoc](http://godoc.org/github.com/go-openapi/runtime/middleware/denco) for more details. + +## Getting the value of path parameter + +You can get the value of path parameter by 2 ways. + +1. Using [`denco.Params.Get`](http://godoc.org/github.com/go-openapi/runtime/middleware/denco#Params.Get) method +2. Find by loop + +```go +package main + +import ( + "fmt" + + "github.com/go-openapi/runtime/middleware/denco" +) + +func main() { + router := denco.New() + if err := router.Build([]denco.Record{ + {"/user/:name/:id", "route1"}, + }); err != nil { + panic(err) + } + + // 1. Using denco.Params.Get method. + _, params, _ := router.Lookup("/user/alice/1") + name := params.Get("name") + if name != "" { + fmt.Printf("Hello %s.\n", name) // prints "Hello alice.". + } + + // 2. Find by loop. + for _, param := range params { + if param.Name == "name" { + fmt.Printf("Hello %s.\n", name) // prints "Hello alice.". + } + } +} +``` + +## URL patterns + +Denco's route matching strategy is "most nearly matching". + +When routes `/:name` and `/alice` have been built, URI `/alice` matches the route `/alice`, not `/:name`. +Because URI `/alice` is more match with the route `/alice` than `/:name`. + +For more example, when routes below have been built: + +``` +/user/alice +/user/:name +/user/:name/:id +/user/alice/:id +/user/:id/bob +``` + +Routes matching are: + +``` +/user/alice => "/user/alice" (no match with "/user/:name") +/user/bob => "/user/:name" +/user/naoina/1 => "/user/:name/1" +/user/alice/1 => "/user/alice/:id" (no match with "/user/:name/:id") +/user/1/bob => "/user/:id/bob" (no match with "/user/:name/:id") +/user/alice/bob => "/user/alice/:id" (no match with "/user/:name/:id" and "/user/:id/bob") +``` + +## Limitation + +Denco has some limitations below. + +* Number of param records (such as `/:name`) must be less than 2^22 +* Number of elements of internal slice must be less than 2^22 + +## Benchmarks + + cd $GOPATH/github.com/go-openapi/runtime/middleware/denco + go test -bench . -benchmem + +## License + +Denco is licensed under the MIT License. diff --git a/vendor/github.com/go-openapi/runtime/middleware/denco/router.go b/vendor/github.com/go-openapi/runtime/middleware/denco/router.go new file mode 100644 index 00000000000..ecacc31ff68 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/denco/router.go @@ -0,0 +1,456 @@ +// Package denco provides fast URL router. +package denco + +import ( + "fmt" + "sort" + "strings" +) + +const ( + // ParamCharacter is a special character for path parameter. + ParamCharacter = ':' + + // WildcardCharacter is a special character for wildcard path parameter. + WildcardCharacter = '*' + + // TerminationCharacter is a special character for end of path. + TerminationCharacter = '#' + + // SeparatorCharacter separates path segments. + SeparatorCharacter = '/' + + // MaxSize is max size of records and internal slice. + MaxSize = (1 << 22) - 1 +) + +// Router represents a URL router. +type Router struct { + // SizeHint expects the maximum number of path parameters in records to Build. + // SizeHint will be used to determine the capacity of the memory to allocate. + // By default, SizeHint will be determined from given records to Build. + SizeHint int + + static map[string]interface{} + param *doubleArray +} + +// New returns a new Router. +func New() *Router { + return &Router{ + SizeHint: -1, + static: make(map[string]interface{}), + param: newDoubleArray(), + } +} + +// Lookup returns data and path parameters that associated with path. +// params is a slice of the Param that arranged in the order in which parameters appeared. +// e.g. when built routing path is "/path/to/:id/:name" and given path is "/path/to/1/alice". params order is [{"id": "1"}, {"name": "alice"}], not [{"name": "alice"}, {"id": "1"}]. +func (rt *Router) Lookup(path string) (data interface{}, params Params, found bool) { + if data, found := rt.static[path]; found { + return data, nil, true + } + if len(rt.param.node) == 1 { + return nil, nil, false + } + nd, params, found := rt.param.lookup(path, make([]Param, 0, rt.SizeHint), 1) + if !found { + return nil, nil, false + } + for i := 0; i < len(params); i++ { + params[i].Name = nd.paramNames[i] + } + return nd.data, params, true +} + +// Build builds URL router from records. +func (rt *Router) Build(records []Record) error { + statics, params := makeRecords(records) + if len(params) > MaxSize { + return fmt.Errorf("denco: too many records") + } + if rt.SizeHint < 0 { + rt.SizeHint = 0 + for _, p := range params { + size := 0 + for _, k := range p.Key { + if k == ParamCharacter || k == WildcardCharacter { + size++ + } + } + if size > rt.SizeHint { + rt.SizeHint = size + } + } + } + for _, r := range statics { + rt.static[r.Key] = r.Value + } + if err := rt.param.build(params, 1, 0, make(map[int]struct{})); err != nil { + return err + } + return nil +} + +// Param represents name and value of path parameter. +type Param struct { + Name string + Value string +} + +// Params represents the name and value of path parameters. +type Params []Param + +// Get gets the first value associated with the given name. +// If there are no values associated with the key, Get returns "". +func (ps Params) Get(name string) string { + for _, p := range ps { + if p.Name == name { + return p.Value + } + } + return "" +} + +type doubleArray struct { + bc []baseCheck + node []*node +} + +func newDoubleArray() *doubleArray { + return &doubleArray{ + bc: []baseCheck{0}, + node: []*node{nil}, // A start index is adjusting to 1 because 0 will be used as a mark of non-existent node. + } +} + +// baseCheck contains BASE, CHECK and Extra flags. +// From the top, 22bits of BASE, 2bits of Extra flags and 8bits of CHECK. +// +// BASE (22bit) | Extra flags (2bit) | CHECK (8bit) +// |----------------------|--|--------| +// 32 10 8 0 +type baseCheck uint32 + +func (bc baseCheck) Base() int { + return int(bc >> 10) +} + +func (bc *baseCheck) SetBase(base int) { + *bc |= baseCheck(base) << 10 +} + +func (bc baseCheck) Check() byte { + return byte(bc) +} + +func (bc *baseCheck) SetCheck(check byte) { + *bc |= baseCheck(check) +} + +func (bc baseCheck) IsEmpty() bool { + return bc&0xfffffcff == 0 +} + +func (bc baseCheck) IsSingleParam() bool { + return bc¶mTypeSingle == paramTypeSingle +} + +func (bc baseCheck) IsWildcardParam() bool { + return bc¶mTypeWildcard == paramTypeWildcard +} + +func (bc baseCheck) IsAnyParam() bool { + return bc¶mTypeAny != 0 +} + +func (bc *baseCheck) SetSingleParam() { + *bc |= (1 << 8) +} + +func (bc *baseCheck) SetWildcardParam() { + *bc |= (1 << 9) +} + +const ( + paramTypeSingle = 0x0100 + paramTypeWildcard = 0x0200 + paramTypeAny = 0x0300 +) + +func (da *doubleArray) lookup(path string, params []Param, idx int) (*node, []Param, bool) { + indices := make([]uint64, 0, 1) + for i := 0; i < len(path); i++ { + if da.bc[idx].IsAnyParam() { + indices = append(indices, (uint64(i)<<32)|(uint64(idx)&0xffffffff)) + } + c := path[i] + if idx = nextIndex(da.bc[idx].Base(), c); idx >= len(da.bc) || da.bc[idx].Check() != c { + goto BACKTRACKING + } + } + if next := nextIndex(da.bc[idx].Base(), TerminationCharacter); next < len(da.bc) && da.bc[next].Check() == TerminationCharacter { + return da.node[da.bc[next].Base()], params, true + } +BACKTRACKING: + for j := len(indices) - 1; j >= 0; j-- { + i, idx := int(indices[j]>>32), int(indices[j]&0xffffffff) + if da.bc[idx].IsSingleParam() { + idx := nextIndex(da.bc[idx].Base(), ParamCharacter) + if idx >= len(da.bc) { + break + } + next := NextSeparator(path, i) + params := append(params, Param{Value: path[i:next]}) + if nd, params, found := da.lookup(path[next:], params, idx); found { + return nd, params, true + } + } + if da.bc[idx].IsWildcardParam() { + idx := nextIndex(da.bc[idx].Base(), WildcardCharacter) + params := append(params, Param{Value: path[i:]}) + return da.node[da.bc[idx].Base()], params, true + } + } + return nil, nil, false +} + +// build builds double-array from records. +func (da *doubleArray) build(srcs []*record, idx, depth int, usedBase map[int]struct{}) error { + sort.Stable(recordSlice(srcs)) + base, siblings, leaf, err := da.arrange(srcs, idx, depth, usedBase) + if err != nil { + return err + } + if leaf != nil { + nd, err := makeNode(leaf) + if err != nil { + return err + } + da.bc[idx].SetBase(len(da.node)) + da.node = append(da.node, nd) + } + for _, sib := range siblings { + da.setCheck(nextIndex(base, sib.c), sib.c) + } + for _, sib := range siblings { + records := srcs[sib.start:sib.end] + switch sib.c { + case ParamCharacter: + for _, r := range records { + next := NextSeparator(r.Key, depth+1) + name := r.Key[depth+1 : next] + r.paramNames = append(r.paramNames, name) + r.Key = r.Key[next:] + } + da.bc[idx].SetSingleParam() + if err := da.build(records, nextIndex(base, sib.c), 0, usedBase); err != nil { + return err + } + case WildcardCharacter: + r := records[0] + name := r.Key[depth+1 : len(r.Key)-1] + r.paramNames = append(r.paramNames, name) + r.Key = "" + da.bc[idx].SetWildcardParam() + if err := da.build(records, nextIndex(base, sib.c), 0, usedBase); err != nil { + return err + } + default: + if err := da.build(records, nextIndex(base, sib.c), depth+1, usedBase); err != nil { + return err + } + } + } + return nil +} + +// setBase sets BASE. +func (da *doubleArray) setBase(i, base int) { + da.bc[i].SetBase(base) +} + +// setCheck sets CHECK. +func (da *doubleArray) setCheck(i int, check byte) { + da.bc[i].SetCheck(check) +} + +// findEmptyIndex returns an index of unused BASE/CHECK node. +func (da *doubleArray) findEmptyIndex(start int) int { + i := start + for ; i < len(da.bc); i++ { + if da.bc[i].IsEmpty() { + break + } + } + return i +} + +// findBase returns good BASE. +func (da *doubleArray) findBase(siblings []sibling, start int, usedBase map[int]struct{}) (base int) { + for idx, firstChar := start+1, siblings[0].c; ; idx = da.findEmptyIndex(idx + 1) { + base = nextIndex(idx, firstChar) + if _, used := usedBase[base]; used { + continue + } + i := 0 + for ; i < len(siblings); i++ { + next := nextIndex(base, siblings[i].c) + if len(da.bc) <= next { + da.bc = append(da.bc, make([]baseCheck, next-len(da.bc)+1)...) + } + if !da.bc[next].IsEmpty() { + break + } + } + if i == len(siblings) { + break + } + } + usedBase[base] = struct{}{} + return base +} + +func (da *doubleArray) arrange(records []*record, idx, depth int, usedBase map[int]struct{}) (base int, siblings []sibling, leaf *record, err error) { + siblings, leaf, err = makeSiblings(records, depth) + if err != nil { + return -1, nil, nil, err + } + if len(siblings) < 1 { + return -1, nil, leaf, nil + } + base = da.findBase(siblings, idx, usedBase) + if base > MaxSize { + return -1, nil, nil, fmt.Errorf("denco: too many elements of internal slice") + } + da.setBase(idx, base) + return base, siblings, leaf, err +} + +// node represents a node of Double-Array. +type node struct { + data interface{} + + // Names of path parameters. + paramNames []string +} + +// makeNode returns a new node from record. +func makeNode(r *record) (*node, error) { + dups := make(map[string]bool) + for _, name := range r.paramNames { + if dups[name] { + return nil, fmt.Errorf("denco: path parameter `%v' is duplicated in the key `%v'", name, r.Key) + } + dups[name] = true + } + return &node{data: r.Value, paramNames: r.paramNames}, nil +} + +// sibling represents an intermediate data of build for Double-Array. +type sibling struct { + // An index of start of duplicated characters. + start int + + // An index of end of duplicated characters. + end int + + // A character of sibling. + c byte +} + +// nextIndex returns a next index of array of BASE/CHECK. +func nextIndex(base int, c byte) int { + return base ^ int(c) +} + +// makeSiblings returns slice of sibling. +func makeSiblings(records []*record, depth int) (sib []sibling, leaf *record, err error) { + var ( + pc byte + n int + ) + for i, r := range records { + if len(r.Key) <= depth { + leaf = r + continue + } + c := r.Key[depth] + switch { + case pc < c: + sib = append(sib, sibling{start: i, c: c}) + case pc == c: + continue + default: + return nil, nil, fmt.Errorf("denco: BUG: routing table hasn't been sorted") + } + if n > 0 { + sib[n-1].end = i + } + pc = c + n++ + } + if n == 0 { + return nil, leaf, nil + } + sib[n-1].end = len(records) + return sib, leaf, nil +} + +// Record represents a record data for router construction. +type Record struct { + // Key for router construction. + Key string + + // Result value for Key. + Value interface{} +} + +// NewRecord returns a new Record. +func NewRecord(key string, value interface{}) Record { + return Record{ + Key: key, + Value: value, + } +} + +// record represents a record that use to build the Double-Array. +type record struct { + Record + paramNames []string +} + +// makeRecords returns the records that use to build Double-Arrays. +func makeRecords(srcs []Record) (statics, params []*record) { + termChar := string(TerminationCharacter) + paramPrefix := string(SeparatorCharacter) + string(ParamCharacter) + wildcardPrefix := string(SeparatorCharacter) + string(WildcardCharacter) + for _, r := range srcs { + if strings.Contains(r.Key, paramPrefix) || strings.Contains(r.Key, wildcardPrefix) { + r.Key += termChar + params = append(params, &record{Record: r}) + } else { + statics = append(statics, &record{Record: r}) + } + } + return statics, params +} + +// recordSlice represents a slice of Record for sort and implements the sort.Interface. +type recordSlice []*record + +// Len implements the sort.Interface.Len. +func (rs recordSlice) Len() int { + return len(rs) +} + +// Less implements the sort.Interface.Less. +func (rs recordSlice) Less(i, j int) bool { + return rs[i].Key < rs[j].Key +} + +// Swap implements the sort.Interface.Swap. +func (rs recordSlice) Swap(i, j int) { + rs[i], rs[j] = rs[j], rs[i] +} diff --git a/vendor/github.com/go-openapi/runtime/middleware/denco/server.go b/vendor/github.com/go-openapi/runtime/middleware/denco/server.go new file mode 100644 index 00000000000..0886713c181 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/denco/server.go @@ -0,0 +1,106 @@ +package denco + +import ( + "net/http" +) + +// Mux represents a multiplexer for HTTP request. +type Mux struct{} + +// NewMux returns a new Mux. +func NewMux() *Mux { + return &Mux{} +} + +// GET is shorthand of Mux.Handler("GET", path, handler). +func (m *Mux) GET(path string, handler HandlerFunc) Handler { + return m.Handler("GET", path, handler) +} + +// POST is shorthand of Mux.Handler("POST", path, handler). +func (m *Mux) POST(path string, handler HandlerFunc) Handler { + return m.Handler("POST", path, handler) +} + +// PUT is shorthand of Mux.Handler("PUT", path, handler). +func (m *Mux) PUT(path string, handler HandlerFunc) Handler { + return m.Handler("PUT", path, handler) +} + +// HEAD is shorthand of Mux.Handler("HEAD", path, handler). +func (m *Mux) HEAD(path string, handler HandlerFunc) Handler { + return m.Handler("HEAD", path, handler) +} + +// Handler returns a handler for HTTP method. +func (m *Mux) Handler(method, path string, handler HandlerFunc) Handler { + return Handler{ + Method: method, + Path: path, + Func: handler, + } +} + +// Build builds a http.Handler. +func (m *Mux) Build(handlers []Handler) (http.Handler, error) { + recordMap := make(map[string][]Record) + for _, h := range handlers { + recordMap[h.Method] = append(recordMap[h.Method], NewRecord(h.Path, h.Func)) + } + mux := newServeMux() + for m, records := range recordMap { + router := New() + if err := router.Build(records); err != nil { + return nil, err + } + mux.routers[m] = router + } + return mux, nil +} + +// Handler represents a handler of HTTP request. +type Handler struct { + // Method is an HTTP method. + Method string + + // Path is a routing path for handler. + Path string + + // Func is a function of handler of HTTP request. + Func HandlerFunc +} + +// The HandlerFunc type is aliased to type of handler function. +type HandlerFunc func(w http.ResponseWriter, r *http.Request, params Params) + +type serveMux struct { + routers map[string]*Router +} + +func newServeMux() *serveMux { + return &serveMux{ + routers: make(map[string]*Router), + } +} + +// ServeHTTP implements http.Handler interface. +func (mux *serveMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { + handler, params := mux.handler(r.Method, r.URL.Path) + handler(w, r, params) +} + +func (mux *serveMux) handler(method, path string) (HandlerFunc, []Param) { + if router, found := mux.routers[method]; found { + if handler, params, found := router.Lookup(path); found { + return handler.(HandlerFunc), params + } + } + return NotFound, nil +} + +// NotFound replies to the request with an HTTP 404 not found error. +// NotFound is called when unknown HTTP method or a handler not found. +// If you want to use the your own NotFound handler, please overwrite this variable. +var NotFound = func(w http.ResponseWriter, r *http.Request, _ Params) { + http.NotFound(w, r) +} diff --git a/vendor/github.com/go-openapi/runtime/middleware/denco/util.go b/vendor/github.com/go-openapi/runtime/middleware/denco/util.go new file mode 100644 index 00000000000..edc1f6ab80a --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/denco/util.go @@ -0,0 +1,12 @@ +package denco + +// NextSeparator returns an index of next separator in path. +func NextSeparator(path string, start int) int { + for start < len(path) { + if c := path[start]; c == '/' || c == TerminationCharacter { + break + } + start++ + } + return start +} diff --git a/vendor/github.com/go-openapi/runtime/middleware/doc.go b/vendor/github.com/go-openapi/runtime/middleware/doc.go new file mode 100644 index 00000000000..eaf90606ac3 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/doc.go @@ -0,0 +1,62 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/*Package middleware provides the library with helper functions for serving swagger APIs. + +Pseudo middleware handler + + import ( + "net/http" + + "github.com/go-openapi/errors" + ) + + func newCompleteMiddleware(ctx *Context) http.Handler { + return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + // use context to lookup routes + if matched, ok := ctx.RouteInfo(r); ok { + + if matched.NeedsAuth() { + if _, err := ctx.Authorize(r, matched); err != nil { + ctx.Respond(rw, r, matched.Produces, matched, err) + return + } + } + + bound, validation := ctx.BindAndValidate(r, matched) + if validation != nil { + ctx.Respond(rw, r, matched.Produces, matched, validation) + return + } + + result, err := matched.Handler.Handle(bound) + if err != nil { + ctx.Respond(rw, r, matched.Produces, matched, err) + return + } + + ctx.Respond(rw, r, matched.Produces, matched, result) + return + } + + // Not found, check if it exists in the other methods first + if others := ctx.AllowedMethods(r); len(others) > 0 { + ctx.Respond(rw, r, ctx.spec.RequiredProduces(), nil, errors.MethodNotAllowed(r.Method, others)) + return + } + ctx.Respond(rw, r, ctx.spec.RequiredProduces(), nil, errors.NotFound("path %s was not found", r.URL.Path)) + }) + } +*/ +package middleware diff --git a/vendor/github.com/go-openapi/runtime/middleware/go18.go b/vendor/github.com/go-openapi/runtime/middleware/go18.go new file mode 100644 index 00000000000..75c762c0948 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/go18.go @@ -0,0 +1,9 @@ +// +build go1.8 + +package middleware + +import "net/url" + +func pathUnescape(path string) (string, error) { + return url.PathUnescape(path) +} diff --git a/vendor/github.com/go-openapi/runtime/middleware/header/header.go b/vendor/github.com/go-openapi/runtime/middleware/header/header.go new file mode 100644 index 00000000000..3e342258bca --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/header/header.go @@ -0,0 +1,326 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd. + +// this file was taken from the github.com/golang/gddo repository + +// Package header provides functions for parsing HTTP headers. +package header + +import ( + "net/http" + "strings" + "time" +) + +// Octet types from RFC 2616. +var octetTypes [256]octetType + +type octetType byte + +const ( + isToken octetType = 1 << iota + isSpace +) + +func init() { + // OCTET = + // CHAR = + // CTL = + // CR = + // LF = + // SP = + // HT = + // <"> = + // CRLF = CR LF + // LWS = [CRLF] 1*( SP | HT ) + // TEXT = + // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> + // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT + // token = 1* + // qdtext = > + + for c := 0; c < 256; c++ { + var t octetType + isCtl := c <= 31 || c == 127 + isChar := 0 <= c && c <= 127 + isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) + if strings.ContainsRune(" \t\r\n", rune(c)) { + t |= isSpace + } + if isChar && !isCtl && !isSeparator { + t |= isToken + } + octetTypes[c] = t + } +} + +// Copy returns a shallow copy of the header. +func Copy(header http.Header) http.Header { + h := make(http.Header) + for k, vs := range header { + h[k] = vs + } + return h +} + +var timeLayouts = []string{"Mon, 02 Jan 2006 15:04:05 GMT", time.RFC850, time.ANSIC} + +// ParseTime parses the header as time. The zero value is returned if the +// header is not present or there is an error parsing the +// header. +func ParseTime(header http.Header, key string) time.Time { + if s := header.Get(key); s != "" { + for _, layout := range timeLayouts { + if t, err := time.Parse(layout, s); err == nil { + return t.UTC() + } + } + } + return time.Time{} +} + +// ParseList parses a comma separated list of values. Commas are ignored in +// quoted strings. Quoted values are not unescaped or unquoted. Whitespace is +// trimmed. +func ParseList(header http.Header, key string) []string { + var result []string + for _, s := range header[http.CanonicalHeaderKey(key)] { + begin := 0 + end := 0 + escape := false + quote := false + for i := 0; i < len(s); i++ { + b := s[i] + switch { + case escape: + escape = false + end = i + 1 + case quote: + switch b { + case '\\': + escape = true + case '"': + quote = false + } + end = i + 1 + case b == '"': + quote = true + end = i + 1 + case octetTypes[b]&isSpace != 0: + if begin == end { + begin = i + 1 + end = begin + } + case b == ',': + if begin < end { + result = append(result, s[begin:end]) + } + begin = i + 1 + end = begin + default: + end = i + 1 + } + } + if begin < end { + result = append(result, s[begin:end]) + } + } + return result +} + +// ParseValueAndParams parses a comma separated list of values with optional +// semicolon separated name-value pairs. Content-Type and Content-Disposition +// headers are in this format. +func ParseValueAndParams(header http.Header, key string) (string, map[string]string) { + return parseValueAndParams(header.Get(key)) +} + +func parseValueAndParams(s string) (value string, params map[string]string) { + params = make(map[string]string) + value, s = expectTokenSlash(s) + if value == "" { + return + } + value = strings.ToLower(value) + s = skipSpace(s) + for strings.HasPrefix(s, ";") { + var pkey string + pkey, s = expectToken(skipSpace(s[1:])) + if pkey == "" { + return + } + if !strings.HasPrefix(s, "=") { + return + } + var pvalue string + pvalue, s = expectTokenOrQuoted(s[1:]) + if pvalue == "" { + return + } + pkey = strings.ToLower(pkey) + params[pkey] = pvalue + s = skipSpace(s) + } + return +} + +// AcceptSpec ... +type AcceptSpec struct { + Value string + Q float64 +} + +// ParseAccept2 ... +func ParseAccept2(header http.Header, key string) (specs []AcceptSpec) { + for _, en := range ParseList(header, key) { + v, p := parseValueAndParams(en) + var spec AcceptSpec + spec.Value = v + spec.Q = 1.0 + if p != nil { + if q, ok := p["q"]; ok { + spec.Q, _ = expectQuality(q) + } + } + if spec.Q < 0.0 { + continue + } + specs = append(specs, spec) + } + + return +} + +// ParseAccept parses Accept* headers. +func ParseAccept(header http.Header, key string) (specs []AcceptSpec) { +loop: + for _, s := range header[key] { + for { + var spec AcceptSpec + spec.Value, s = expectTokenSlash(s) + if spec.Value == "" { + continue loop + } + spec.Q = 1.0 + s = skipSpace(s) + if strings.HasPrefix(s, ";") { + s = skipSpace(s[1:]) + for !strings.HasPrefix(s, "q=") && s != "" && !strings.HasPrefix(s, ",") { + s = skipSpace(s[1:]) + } + if strings.HasPrefix(s, "q=") { + spec.Q, s = expectQuality(s[2:]) + if spec.Q < 0.0 { + continue loop + } + } + } + specs = append(specs, spec) + s = skipSpace(s) + if !strings.HasPrefix(s, ",") { + continue loop + } + s = skipSpace(s[1:]) + } + } + return +} + +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isSpace == 0 { + break + } + } + return s[i:] +} + +func expectToken(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isToken == 0 { + break + } + } + return s[:i], s[i:] +} + +func expectTokenSlash(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + b := s[i] + if (octetTypes[b]&isToken == 0) && b != '/' { + break + } + } + return s[:i], s[i:] +} + +func expectQuality(s string) (q float64, rest string) { + switch { + case len(s) == 0: + return -1, "" + case s[0] == '0': + q = 0 + case s[0] == '1': + q = 1 + default: + return -1, "" + } + s = s[1:] + if !strings.HasPrefix(s, ".") { + return q, s + } + s = s[1:] + i := 0 + n := 0 + d := 1 + for ; i < len(s); i++ { + b := s[i] + if b < '0' || b > '9' { + break + } + n = n*10 + int(b) - '0' + d *= 10 + } + return q + float64(n)/float64(d), s[i:] +} + +func expectTokenOrQuoted(s string) (value string, rest string) { + if !strings.HasPrefix(s, "\"") { + return expectToken(s) + } + s = s[1:] + for i := 0; i < len(s); i++ { + switch s[i] { + case '"': + return s[:i], s[i+1:] + case '\\': + p := make([]byte, len(s)-1) + j := copy(p, s[:i]) + escape := true + for i = i + 1; i < len(s); i++ { + b := s[i] + switch { + case escape: + escape = false + p[j] = b + j++ + case b == '\\': + escape = true + case b == '"': + return string(p[:j]), s[i+1:] + default: + p[j] = b + j++ + } + } + return "", "" + } + } + return "", "" +} diff --git a/vendor/github.com/go-openapi/runtime/middleware/negotiate.go b/vendor/github.com/go-openapi/runtime/middleware/negotiate.go new file mode 100644 index 00000000000..a9b6f27d3d3 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/negotiate.go @@ -0,0 +1,98 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd. + +// this file was taken from the github.com/golang/gddo repository + +package middleware + +import ( + "net/http" + "strings" + + "github.com/go-openapi/runtime/middleware/header" +) + +// NegotiateContentEncoding returns the best offered content encoding for the +// request's Accept-Encoding header. If two offers match with equal weight and +// then the offer earlier in the list is preferred. If no offers are +// acceptable, then "" is returned. +func NegotiateContentEncoding(r *http.Request, offers []string) string { + bestOffer := "identity" + bestQ := -1.0 + specs := header.ParseAccept(r.Header, "Accept-Encoding") + for _, offer := range offers { + for _, spec := range specs { + if spec.Q > bestQ && + (spec.Value == "*" || spec.Value == offer) { + bestQ = spec.Q + bestOffer = offer + } + } + } + if bestQ == 0 { + bestOffer = "" + } + return bestOffer +} + +// NegotiateContentType returns the best offered content type for the request's +// Accept header. If two offers match with equal weight, then the more specific +// offer is preferred. For example, text/* trumps */*. If two offers match +// with equal weight and specificity, then the offer earlier in the list is +// preferred. If no offers match, then defaultOffer is returned. +func NegotiateContentType(r *http.Request, offers []string, defaultOffer string) string { + bestOffer := defaultOffer + bestQ := -1.0 + bestWild := 3 + specs := header.ParseAccept(r.Header, "Accept") + for _, rawOffer := range offers { + offer := normalizeOffer(rawOffer) + // No Accept header: just return the first offer. + if len(specs) == 0 { + return rawOffer + } + for _, spec := range specs { + switch { + case spec.Q == 0.0: + // ignore + case spec.Q < bestQ: + // better match found + case spec.Value == "*/*": + if spec.Q > bestQ || bestWild > 2 { + bestQ = spec.Q + bestWild = 2 + bestOffer = rawOffer + } + case strings.HasSuffix(spec.Value, "/*"): + if strings.HasPrefix(offer, spec.Value[:len(spec.Value)-1]) && + (spec.Q > bestQ || bestWild > 1) { + bestQ = spec.Q + bestWild = 1 + bestOffer = rawOffer + } + default: + if spec.Value == offer && + (spec.Q > bestQ || bestWild > 0) { + bestQ = spec.Q + bestWild = 0 + bestOffer = rawOffer + } + } + } + } + return bestOffer +} + +func normalizeOffers(orig []string) (norm []string) { + for _, o := range orig { + norm = append(norm, normalizeOffer(o)) + } + return +} + +func normalizeOffer(orig string) string { + return strings.SplitN(orig, ";", 2)[0] +} diff --git a/vendor/github.com/go-openapi/runtime/middleware/not_implemented.go b/vendor/github.com/go-openapi/runtime/middleware/not_implemented.go new file mode 100644 index 00000000000..466f553db48 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/not_implemented.go @@ -0,0 +1,48 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package middleware + +import ( + "net/http" + + "github.com/go-openapi/runtime" +) + +type errorResp struct { + code int + response interface{} + headers http.Header +} + +func (e *errorResp) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + for k, v := range e.headers { + for _, val := range v { + rw.Header().Add(k, val) + } + } + if e.code > 0 { + rw.WriteHeader(e.code) + } else { + rw.WriteHeader(http.StatusInternalServerError) + } + if err := producer.Produce(rw, e.response); err != nil { + panic(err) + } +} + +// NotImplemented the error response when the response is not implemented +func NotImplemented(message string) Responder { + return &errorResp{http.StatusNotImplemented, message, make(http.Header)} +} diff --git a/vendor/github.com/go-openapi/runtime/middleware/operation.go b/vendor/github.com/go-openapi/runtime/middleware/operation.go new file mode 100644 index 00000000000..1175a63cf29 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/operation.go @@ -0,0 +1,30 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package middleware + +import "net/http" + +// NewOperationExecutor creates a context aware middleware that handles the operations after routing +func NewOperationExecutor(ctx *Context) http.Handler { + return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + // use context to lookup routes + route, rCtx, _ := ctx.RouteInfo(r) + if rCtx != nil { + r = rCtx + } + + route.Handler.ServeHTTP(rw, r) + }) +} diff --git a/vendor/github.com/go-openapi/runtime/middleware/parameter.go b/vendor/github.com/go-openapi/runtime/middleware/parameter.go new file mode 100644 index 00000000000..2088605f98f --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/parameter.go @@ -0,0 +1,481 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package middleware + +import ( + "encoding" + "encoding/base64" + "fmt" + "io" + "net/http" + "reflect" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/spec" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" + + "github.com/go-openapi/runtime" +) + +const defaultMaxMemory = 32 << 20 + +var textUnmarshalType = reflect.TypeOf(new(encoding.TextUnmarshaler)).Elem() + +func newUntypedParamBinder(param spec.Parameter, spec *spec.Swagger, formats strfmt.Registry) *untypedParamBinder { + binder := new(untypedParamBinder) + binder.Name = param.Name + binder.parameter = ¶m + binder.formats = formats + if param.In != "body" { + binder.validator = validate.NewParamValidator(¶m, formats) + } else { + binder.validator = validate.NewSchemaValidator(param.Schema, spec, param.Name, formats) + } + + return binder +} + +type untypedParamBinder struct { + parameter *spec.Parameter + formats strfmt.Registry + Name string + validator validate.EntityValidator +} + +func (p *untypedParamBinder) Type() reflect.Type { + return p.typeForSchema(p.parameter.Type, p.parameter.Format, p.parameter.Items) +} + +func (p *untypedParamBinder) typeForSchema(tpe, format string, items *spec.Items) reflect.Type { + switch tpe { + case "boolean": + return reflect.TypeOf(true) + + case "string": + if tt, ok := p.formats.GetType(format); ok { + return tt + } + return reflect.TypeOf("") + + case "integer": + switch format { + case "int8": + return reflect.TypeOf(int8(0)) + case "int16": + return reflect.TypeOf(int16(0)) + case "int32": + return reflect.TypeOf(int32(0)) + case "int64": + return reflect.TypeOf(int64(0)) + default: + return reflect.TypeOf(int64(0)) + } + + case "number": + switch format { + case "float": + return reflect.TypeOf(float32(0)) + case "double": + return reflect.TypeOf(float64(0)) + } + + case "array": + if items == nil { + return nil + } + itemsType := p.typeForSchema(items.Type, items.Format, items.Items) + if itemsType == nil { + return nil + } + return reflect.MakeSlice(reflect.SliceOf(itemsType), 0, 0).Type() + + case "file": + return reflect.TypeOf(&runtime.File{}).Elem() + + case "object": + return reflect.TypeOf(map[string]interface{}{}) + } + return nil +} + +func (p *untypedParamBinder) allowsMulti() bool { + return p.parameter.In == "query" || p.parameter.In == "formData" +} + +func (p *untypedParamBinder) readValue(values runtime.Gettable, target reflect.Value) ([]string, bool, bool, error) { + name, in, cf, tpe := p.parameter.Name, p.parameter.In, p.parameter.CollectionFormat, p.parameter.Type + if tpe == "array" { + if cf == "multi" { + if !p.allowsMulti() { + return nil, false, false, errors.InvalidCollectionFormat(name, in, cf) + } + vv, hasKey, _ := values.GetOK(name) + return vv, false, hasKey, nil + } + + v, hk, hv := values.GetOK(name) + if !hv { + return nil, false, hk, nil + } + d, c, e := p.readFormattedSliceFieldValue(v[len(v)-1], target) + return d, c, hk, e + } + + vv, hk, _ := values.GetOK(name) + return vv, false, hk, nil +} + +func (p *untypedParamBinder) Bind(request *http.Request, routeParams RouteParams, consumer runtime.Consumer, target reflect.Value) error { + // fmt.Println("binding", p.name, "as", p.Type()) + switch p.parameter.In { + case "query": + data, custom, hasKey, err := p.readValue(runtime.Values(request.URL.Query()), target) + if err != nil { + return err + } + if custom { + return nil + } + + return p.bindValue(data, hasKey, target) + + case "header": + data, custom, hasKey, err := p.readValue(runtime.Values(request.Header), target) + if err != nil { + return err + } + if custom { + return nil + } + return p.bindValue(data, hasKey, target) + + case "path": + data, custom, hasKey, err := p.readValue(routeParams, target) + if err != nil { + return err + } + if custom { + return nil + } + return p.bindValue(data, hasKey, target) + + case "formData": + var err error + var mt string + + mt, _, e := runtime.ContentType(request.Header) + if e != nil { + // because of the interface conversion go thinks the error is not nil + // so we first check for nil and then set the err var if it's not nil + err = e + } + + if err != nil { + return errors.InvalidContentType("", []string{"multipart/form-data", "application/x-www-form-urlencoded"}) + } + + if mt != "multipart/form-data" && mt != "application/x-www-form-urlencoded" { + return errors.InvalidContentType(mt, []string{"multipart/form-data", "application/x-www-form-urlencoded"}) + } + + if mt == "multipart/form-data" { + if err = request.ParseMultipartForm(defaultMaxMemory); err != nil { + return errors.NewParseError(p.Name, p.parameter.In, "", err) + } + } + + if err = request.ParseForm(); err != nil { + return errors.NewParseError(p.Name, p.parameter.In, "", err) + } + + if p.parameter.Type == "file" { + file, header, ffErr := request.FormFile(p.parameter.Name) + if ffErr != nil { + return errors.NewParseError(p.Name, p.parameter.In, "", ffErr) + } + target.Set(reflect.ValueOf(runtime.File{Data: file, Header: header})) + return nil + } + + if request.MultipartForm != nil { + data, custom, hasKey, rvErr := p.readValue(runtime.Values(request.MultipartForm.Value), target) + if rvErr != nil { + return rvErr + } + if custom { + return nil + } + return p.bindValue(data, hasKey, target) + } + data, custom, hasKey, err := p.readValue(runtime.Values(request.PostForm), target) + if err != nil { + return err + } + if custom { + return nil + } + return p.bindValue(data, hasKey, target) + + case "body": + newValue := reflect.New(target.Type()) + if !runtime.HasBody(request) { + if p.parameter.Default != nil { + target.Set(reflect.ValueOf(p.parameter.Default)) + } + + return nil + } + if err := consumer.Consume(request.Body, newValue.Interface()); err != nil { + if err == io.EOF && p.parameter.Default != nil { + target.Set(reflect.ValueOf(p.parameter.Default)) + return nil + } + tpe := p.parameter.Type + if p.parameter.Format != "" { + tpe = p.parameter.Format + } + return errors.InvalidType(p.Name, p.parameter.In, tpe, nil) + } + target.Set(reflect.Indirect(newValue)) + return nil + default: + return errors.New(500, fmt.Sprintf("invalid parameter location %q", p.parameter.In)) + } +} + +func (p *untypedParamBinder) bindValue(data []string, hasKey bool, target reflect.Value) error { + if p.parameter.Type == "array" { + return p.setSliceFieldValue(target, p.parameter.Default, data, hasKey) + } + var d string + if len(data) > 0 { + d = data[len(data)-1] + } + return p.setFieldValue(target, p.parameter.Default, d, hasKey) +} + +func (p *untypedParamBinder) setFieldValue(target reflect.Value, defaultValue interface{}, data string, hasKey bool) error { + tpe := p.parameter.Type + if p.parameter.Format != "" { + tpe = p.parameter.Format + } + + if (!hasKey || (!p.parameter.AllowEmptyValue && data == "")) && p.parameter.Required && p.parameter.Default == nil { + return errors.Required(p.Name, p.parameter.In) + } + + ok, err := p.tryUnmarshaler(target, defaultValue, data) + if err != nil { + return errors.InvalidType(p.Name, p.parameter.In, tpe, data) + } + if ok { + return nil + } + + defVal := reflect.Zero(target.Type()) + if defaultValue != nil { + defVal = reflect.ValueOf(defaultValue) + } + + if tpe == "byte" { + if data == "" { + if target.CanSet() { + target.SetBytes(defVal.Bytes()) + } + return nil + } + + b, err := base64.StdEncoding.DecodeString(data) + if err != nil { + b, err = base64.URLEncoding.DecodeString(data) + if err != nil { + return errors.InvalidType(p.Name, p.parameter.In, tpe, data) + } + } + if target.CanSet() { + target.SetBytes(b) + } + return nil + } + + switch target.Kind() { + case reflect.Bool: + if data == "" { + if target.CanSet() { + target.SetBool(defVal.Bool()) + } + return nil + } + b, err := swag.ConvertBool(data) + if err != nil { + return err + } + if target.CanSet() { + target.SetBool(b) + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if data == "" { + if target.CanSet() { + rd := defVal.Convert(reflect.TypeOf(int64(0))) + target.SetInt(rd.Int()) + } + return nil + } + i, err := strconv.ParseInt(data, 10, 64) + if err != nil { + return errors.InvalidType(p.Name, p.parameter.In, tpe, data) + } + if target.OverflowInt(i) { + return errors.InvalidType(p.Name, p.parameter.In, tpe, data) + } + if target.CanSet() { + target.SetInt(i) + } + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + if data == "" { + if target.CanSet() { + rd := defVal.Convert(reflect.TypeOf(uint64(0))) + target.SetUint(rd.Uint()) + } + return nil + } + u, err := strconv.ParseUint(data, 10, 64) + if err != nil { + return errors.InvalidType(p.Name, p.parameter.In, tpe, data) + } + if target.OverflowUint(u) { + return errors.InvalidType(p.Name, p.parameter.In, tpe, data) + } + if target.CanSet() { + target.SetUint(u) + } + + case reflect.Float32, reflect.Float64: + if data == "" { + if target.CanSet() { + rd := defVal.Convert(reflect.TypeOf(float64(0))) + target.SetFloat(rd.Float()) + } + return nil + } + f, err := strconv.ParseFloat(data, 64) + if err != nil { + return errors.InvalidType(p.Name, p.parameter.In, tpe, data) + } + if target.OverflowFloat(f) { + return errors.InvalidType(p.Name, p.parameter.In, tpe, data) + } + if target.CanSet() { + target.SetFloat(f) + } + + case reflect.String: + value := data + if value == "" { + value = defVal.String() + } + // validate string + if target.CanSet() { + target.SetString(value) + } + + case reflect.Ptr: + if data == "" && defVal.Kind() == reflect.Ptr { + if target.CanSet() { + target.Set(defVal) + } + return nil + } + newVal := reflect.New(target.Type().Elem()) + if err := p.setFieldValue(reflect.Indirect(newVal), defVal, data, hasKey); err != nil { + return err + } + if target.CanSet() { + target.Set(newVal) + } + + default: + return errors.InvalidType(p.Name, p.parameter.In, tpe, data) + } + return nil +} + +func (p *untypedParamBinder) tryUnmarshaler(target reflect.Value, defaultValue interface{}, data string) (bool, error) { + if !target.CanSet() { + return false, nil + } + // When a type implements encoding.TextUnmarshaler we'll use that instead of reflecting some more + if reflect.PtrTo(target.Type()).Implements(textUnmarshalType) { + if defaultValue != nil && len(data) == 0 { + target.Set(reflect.ValueOf(defaultValue)) + return true, nil + } + value := reflect.New(target.Type()) + if err := value.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(data)); err != nil { + return true, err + } + target.Set(reflect.Indirect(value)) + return true, nil + } + return false, nil +} + +func (p *untypedParamBinder) readFormattedSliceFieldValue(data string, target reflect.Value) ([]string, bool, error) { + ok, err := p.tryUnmarshaler(target, p.parameter.Default, data) + if err != nil { + return nil, true, err + } + if ok { + return nil, true, nil + } + + return swag.SplitByFormat(data, p.parameter.CollectionFormat), false, nil +} + +func (p *untypedParamBinder) setSliceFieldValue(target reflect.Value, defaultValue interface{}, data []string, hasKey bool) error { + sz := len(data) + if (!hasKey || (!p.parameter.AllowEmptyValue && (sz == 0 || (sz == 1 && data[0] == "")))) && p.parameter.Required && defaultValue == nil { + return errors.Required(p.Name, p.parameter.In) + } + + defVal := reflect.Zero(target.Type()) + if defaultValue != nil { + defVal = reflect.ValueOf(defaultValue) + } + + if !target.CanSet() { + return nil + } + if sz == 0 { + target.Set(defVal) + return nil + } + + value := reflect.MakeSlice(reflect.SliceOf(target.Type().Elem()), sz, sz) + + for i := 0; i < sz; i++ { + if err := p.setFieldValue(value.Index(i), nil, data[i], hasKey); err != nil { + return err + } + } + + target.Set(value) + + return nil +} diff --git a/vendor/github.com/go-openapi/runtime/middleware/pre_go18.go b/vendor/github.com/go-openapi/runtime/middleware/pre_go18.go new file mode 100644 index 00000000000..03385251e19 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/pre_go18.go @@ -0,0 +1,9 @@ +// +build !go1.8 + +package middleware + +import "net/url" + +func pathUnescape(path string) (string, error) { + return url.QueryUnescape(path) +} diff --git a/vendor/github.com/go-openapi/runtime/middleware/redoc.go b/vendor/github.com/go-openapi/runtime/middleware/redoc.go new file mode 100644 index 00000000000..019c854295b --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/redoc.go @@ -0,0 +1,103 @@ +package middleware + +import ( + "bytes" + "fmt" + "html/template" + "net/http" + "path" +) + +// RedocOpts configures the Redoc middlewares +type RedocOpts struct { + // BasePath for the UI path, defaults to: / + BasePath string + // Path combines with BasePath for the full UI path, defaults to: docs + Path string + // SpecURL the url to find the spec for + SpecURL string + // RedocURL for the js that generates the redoc site, defaults to: https://cdn.jsdelivr.net/npm/redoc/bundles/redoc.standalone.js + RedocURL string + // Title for the documentation site, default to: API documentation + Title string +} + +// EnsureDefaults in case some options are missing +func (r *RedocOpts) EnsureDefaults() { + if r.BasePath == "" { + r.BasePath = "/" + } + if r.Path == "" { + r.Path = "docs" + } + if r.SpecURL == "" { + r.SpecURL = "/swagger.json" + } + if r.RedocURL == "" { + r.RedocURL = redocLatest + } + if r.Title == "" { + r.Title = "API documentation" + } +} + +// Redoc creates a middleware to serve a documentation site for a swagger spec. +// This allows for altering the spec before starting the http listener. +// +func Redoc(opts RedocOpts, next http.Handler) http.Handler { + opts.EnsureDefaults() + + pth := path.Join(opts.BasePath, opts.Path) + tmpl := template.Must(template.New("redoc").Parse(redocTemplate)) + + buf := bytes.NewBuffer(nil) + _ = tmpl.Execute(buf, opts) + b := buf.Bytes() + + return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + if r.URL.Path == pth { + rw.Header().Set("Content-Type", "text/html; charset=utf-8") + rw.WriteHeader(http.StatusOK) + + _, _ = rw.Write(b) + return + } + + if next == nil { + rw.Header().Set("Content-Type", "text/plain") + rw.WriteHeader(http.StatusNotFound) + _, _ = rw.Write([]byte(fmt.Sprintf("%q not found", pth))) + return + } + next.ServeHTTP(rw, r) + }) +} + +const ( + redocLatest = "https://cdn.jsdelivr.net/npm/redoc/bundles/redoc.standalone.js" + redocTemplate = ` + + + {{ .Title }} + + + + + + + + + + + + + +` +) diff --git a/vendor/github.com/go-openapi/runtime/middleware/request.go b/vendor/github.com/go-openapi/runtime/middleware/request.go new file mode 100644 index 00000000000..4e1c0ab58b9 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/request.go @@ -0,0 +1,104 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package middleware + +import ( + "net/http" + "reflect" + + "github.com/go-openapi/errors" + "github.com/go-openapi/spec" + "github.com/go-openapi/strfmt" + + "github.com/go-openapi/runtime" +) + +// RequestBinder binds and validates the data from a http request +type untypedRequestBinder struct { + Spec *spec.Swagger + Parameters map[string]spec.Parameter + Formats strfmt.Registry + paramBinders map[string]*untypedParamBinder +} + +// NewRequestBinder creates a new binder for reading a request. +func newUntypedRequestBinder(parameters map[string]spec.Parameter, spec *spec.Swagger, formats strfmt.Registry) *untypedRequestBinder { + binders := make(map[string]*untypedParamBinder) + for fieldName, param := range parameters { + binders[fieldName] = newUntypedParamBinder(param, spec, formats) + } + return &untypedRequestBinder{ + Parameters: parameters, + paramBinders: binders, + Spec: spec, + Formats: formats, + } +} + +// Bind perform the databinding and validation +func (o *untypedRequestBinder) Bind(request *http.Request, routeParams RouteParams, consumer runtime.Consumer, data interface{}) error { + val := reflect.Indirect(reflect.ValueOf(data)) + isMap := val.Kind() == reflect.Map + var result []error + debugLog("binding %d parameters for %s %s", len(o.Parameters), request.Method, request.URL.EscapedPath()) + for fieldName, param := range o.Parameters { + binder := o.paramBinders[fieldName] + debugLog("binding parameter %s for %s %s", fieldName, request.Method, request.URL.EscapedPath()) + var target reflect.Value + if !isMap { + binder.Name = fieldName + target = val.FieldByName(fieldName) + } + + if isMap { + tpe := binder.Type() + if tpe == nil { + if param.Schema.Type.Contains("array") { + tpe = reflect.TypeOf([]interface{}{}) + } else { + tpe = reflect.TypeOf(map[string]interface{}{}) + } + } + target = reflect.Indirect(reflect.New(tpe)) + } + + if !target.IsValid() { + result = append(result, errors.New(500, "parameter name %q is an unknown field", binder.Name)) + continue + } + + if err := binder.Bind(request, routeParams, consumer, target); err != nil { + result = append(result, err) + continue + } + + if binder.validator != nil { + rr := binder.validator.Validate(target.Interface()) + if rr != nil && rr.HasErrors() { + result = append(result, rr.AsError()) + } + } + + if isMap { + val.SetMapIndex(reflect.ValueOf(param.Name), target) + } + } + + if len(result) > 0 { + return errors.CompositeValidationError(result...) + } + + return nil +} diff --git a/vendor/github.com/go-openapi/runtime/middleware/router.go b/vendor/github.com/go-openapi/runtime/middleware/router.go new file mode 100644 index 00000000000..6d797e82c98 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/router.go @@ -0,0 +1,478 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package middleware + +import ( + "fmt" + "net/http" + fpath "path" + "regexp" + "strings" + + "github.com/go-openapi/runtime/security" + + "github.com/go-openapi/analysis" + "github.com/go-openapi/errors" + "github.com/go-openapi/loads" + "github.com/go-openapi/spec" + "github.com/go-openapi/strfmt" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware/denco" +) + +// RouteParam is a object to capture route params in a framework agnostic way. +// implementations of the muxer should use these route params to communicate with the +// swagger framework +type RouteParam struct { + Name string + Value string +} + +// RouteParams the collection of route params +type RouteParams []RouteParam + +// Get gets the value for the route param for the specified key +func (r RouteParams) Get(name string) string { + vv, _, _ := r.GetOK(name) + if len(vv) > 0 { + return vv[len(vv)-1] + } + return "" +} + +// GetOK gets the value but also returns booleans to indicate if a key or value +// is present. This aids in validation and satisfies an interface in use there +// +// The returned values are: data, has key, has value +func (r RouteParams) GetOK(name string) ([]string, bool, bool) { + for _, p := range r { + if p.Name == name { + return []string{p.Value}, true, p.Value != "" + } + } + return nil, false, false +} + +// NewRouter creates a new context aware router middleware +func NewRouter(ctx *Context, next http.Handler) http.Handler { + if ctx.router == nil { + ctx.router = DefaultRouter(ctx.spec, ctx.api) + } + + return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + if _, rCtx, ok := ctx.RouteInfo(r); ok { + next.ServeHTTP(rw, rCtx) + return + } + + // Not found, check if it exists in the other methods first + if others := ctx.AllowedMethods(r); len(others) > 0 { + ctx.Respond(rw, r, ctx.analyzer.RequiredProduces(), nil, errors.MethodNotAllowed(r.Method, others)) + return + } + + ctx.Respond(rw, r, ctx.analyzer.RequiredProduces(), nil, errors.NotFound("path %s was not found", r.URL.EscapedPath())) + }) +} + +// RoutableAPI represents an interface for things that can serve +// as a provider of implementations for the swagger router +type RoutableAPI interface { + HandlerFor(string, string) (http.Handler, bool) + ServeErrorFor(string) func(http.ResponseWriter, *http.Request, error) + ConsumersFor([]string) map[string]runtime.Consumer + ProducersFor([]string) map[string]runtime.Producer + AuthenticatorsFor(map[string]spec.SecurityScheme) map[string]runtime.Authenticator + Authorizer() runtime.Authorizer + Formats() strfmt.Registry + DefaultProduces() string + DefaultConsumes() string +} + +// Router represents a swagger aware router +type Router interface { + Lookup(method, path string) (*MatchedRoute, bool) + OtherMethods(method, path string) []string +} + +type defaultRouteBuilder struct { + spec *loads.Document + analyzer *analysis.Spec + api RoutableAPI + records map[string][]denco.Record +} + +type defaultRouter struct { + spec *loads.Document + routers map[string]*denco.Router +} + +func newDefaultRouteBuilder(spec *loads.Document, api RoutableAPI) *defaultRouteBuilder { + return &defaultRouteBuilder{ + spec: spec, + analyzer: analysis.New(spec.Spec()), + api: api, + records: make(map[string][]denco.Record), + } +} + +// DefaultRouter creates a default implemenation of the router +func DefaultRouter(spec *loads.Document, api RoutableAPI) Router { + builder := newDefaultRouteBuilder(spec, api) + if spec != nil { + for method, paths := range builder.analyzer.Operations() { + for path, operation := range paths { + fp := fpath.Join(spec.BasePath(), path) + debugLog("adding route %s %s %q", method, fp, operation.ID) + builder.AddRoute(method, fp, operation) + } + } + } + return builder.Build() +} + +// RouteAuthenticator is an authenticator that can compose several authenticators together. +// It also knows when it contains an authenticator that allows for anonymous pass through. +// Contains a group of 1 or more authenticators that have a logical AND relationship +type RouteAuthenticator struct { + Authenticator map[string]runtime.Authenticator + Schemes []string + Scopes map[string][]string + allScopes []string + commonScopes []string + allowAnonymous bool +} + +func (ra *RouteAuthenticator) AllowsAnonymous() bool { + return ra.allowAnonymous +} + +// AllScopes returns a list of unique scopes that is the combination +// of all the scopes in the requirements +func (ra *RouteAuthenticator) AllScopes() []string { + return ra.allScopes +} + +// CommonScopes returns a list of unique scopes that are common in all the +// scopes in the requirements +func (ra *RouteAuthenticator) CommonScopes() []string { + return ra.commonScopes +} + +// Authenticate Authenticator interface implementation +func (ra *RouteAuthenticator) Authenticate(req *http.Request, route *MatchedRoute) (bool, interface{}, error) { + if ra.allowAnonymous { + route.Authenticator = ra + return true, nil, nil + } + // iterate in proper order + var lastResult interface{} + for _, scheme := range ra.Schemes { + if authenticator, ok := ra.Authenticator[scheme]; ok { + applies, princ, err := authenticator.Authenticate(&security.ScopedAuthRequest{ + Request: req, + RequiredScopes: ra.Scopes[scheme], + }) + if !applies { + return false, nil, nil + } + if err != nil { + route.Authenticator = ra + return true, nil, err + } + lastResult = princ + } + } + route.Authenticator = ra + return true, lastResult, nil +} + +func stringSliceUnion(slices ...[]string) []string { + unique := make(map[string]struct{}) + var result []string + for _, slice := range slices { + for _, entry := range slice { + if _, ok := unique[entry]; ok { + continue + } + unique[entry] = struct{}{} + result = append(result, entry) + } + } + return result +} + +func stringSliceIntersection(slices ...[]string) []string { + unique := make(map[string]int) + var intersection []string + + total := len(slices) + var emptyCnt int + for _, slice := range slices { + if len(slice) == 0 { + emptyCnt++ + continue + } + + for _, entry := range slice { + unique[entry]++ + if unique[entry] == total-emptyCnt { // this entry appeared in all the non-empty slices + intersection = append(intersection, entry) + } + } + } + + return intersection +} + +// RouteAuthenticators represents a group of authenticators that represent a logical OR +type RouteAuthenticators []RouteAuthenticator + +// AllowsAnonymous returns true when there is an authenticator that means optional auth +func (ras RouteAuthenticators) AllowsAnonymous() bool { + for _, ra := range ras { + if ra.AllowsAnonymous() { + return true + } + } + return false +} + +// Authenticate method implemention so this collection can be used as authenticator +func (ras RouteAuthenticators) Authenticate(req *http.Request, route *MatchedRoute) (bool, interface{}, error) { + var lastError error + var allowsAnon bool + var anonAuth RouteAuthenticator + + for _, ra := range ras { + if ra.AllowsAnonymous() { + anonAuth = ra + allowsAnon = true + continue + } + applies, usr, err := ra.Authenticate(req, route) + if !applies || err != nil || usr == nil { + if err != nil { + lastError = err + } + continue + } + return applies, usr, nil + } + + if allowsAnon && lastError == nil { + route.Authenticator = &anonAuth + return true, nil, lastError + } + return lastError != nil, nil, lastError +} + +type routeEntry struct { + PathPattern string + BasePath string + Operation *spec.Operation + Consumes []string + Consumers map[string]runtime.Consumer + Produces []string + Producers map[string]runtime.Producer + Parameters map[string]spec.Parameter + Handler http.Handler + Formats strfmt.Registry + Binder *untypedRequestBinder + Authenticators RouteAuthenticators + Authorizer runtime.Authorizer +} + +// MatchedRoute represents the route that was matched in this request +type MatchedRoute struct { + routeEntry + Params RouteParams + Consumer runtime.Consumer + Producer runtime.Producer + Authenticator *RouteAuthenticator +} + +// HasAuth returns true when the route has a security requirement defined +func (m *MatchedRoute) HasAuth() bool { + return len(m.Authenticators) > 0 +} + +// NeedsAuth returns true when the request still +// needs to perform authentication +func (m *MatchedRoute) NeedsAuth() bool { + return m.HasAuth() && m.Authenticator == nil +} + +func (d *defaultRouter) Lookup(method, path string) (*MatchedRoute, bool) { + mth := strings.ToUpper(method) + debugLog("looking up route for %s %s", method, path) + if Debug { + if len(d.routers) == 0 { + debugLog("there are no known routers") + } + for meth := range d.routers { + debugLog("got a router for %s", meth) + } + } + if router, ok := d.routers[mth]; ok { + if m, rp, ok := router.Lookup(fpath.Clean(path)); ok && m != nil { + if entry, ok := m.(*routeEntry); ok { + debugLog("found a route for %s %s with %d parameters", method, path, len(entry.Parameters)) + var params RouteParams + for _, p := range rp { + v, err := pathUnescape(p.Value) + if err != nil { + debugLog("failed to escape %q: %v", p.Value, err) + v = p.Value + } + // a workaround to handle fragment/composing parameters until they are supported in denco router + // check if this parameter is a fragment within a path segment + if xpos := strings.Index(entry.PathPattern, fmt.Sprintf("{%s}", p.Name)) + len(p.Name) + 2; xpos < len(entry.PathPattern) && entry.PathPattern[xpos] != '/' { + // extract fragment parameters + ep := strings.Split(entry.PathPattern[xpos:], "/")[0] + pnames, pvalues := decodeCompositParams(p.Name, v, ep, nil, nil) + for i, pname := range pnames { + params = append(params, RouteParam{Name: pname, Value: pvalues[i]}) + } + } else { + // use the parameter directly + params = append(params, RouteParam{Name: p.Name, Value: v}) + } + } + return &MatchedRoute{routeEntry: *entry, Params: params}, true + } + } else { + debugLog("couldn't find a route by path for %s %s", method, path) + } + } else { + debugLog("couldn't find a route by method for %s %s", method, path) + } + return nil, false +} + +func (d *defaultRouter) OtherMethods(method, path string) []string { + mn := strings.ToUpper(method) + var methods []string + for k, v := range d.routers { + if k != mn { + if _, _, ok := v.Lookup(fpath.Clean(path)); ok { + methods = append(methods, k) + continue + } + } + } + return methods +} + +// convert swagger parameters per path segment into a denco parameter as multiple parameters per segment are not supported in denco +var pathConverter = regexp.MustCompile(`{(.+?)}([^/]*)`) + +func decodeCompositParams(name string, value string, pattern string, names []string, values []string) ([]string, []string) { + pleft := strings.Index(pattern, "{") + names = append(names, name) + if pleft < 0 { + if strings.HasSuffix(value, pattern) { + values = append(values, value[:len(value)-len(pattern)]) + } else { + values = append(values, "") + } + } else { + toskip := pattern[:pleft] + pright := strings.Index(pattern, "}") + vright := strings.Index(value, toskip) + if vright >= 0 { + values = append(values, value[:vright]) + } else { + values = append(values, "") + value = "" + } + return decodeCompositParams(pattern[pleft+1:pright], value[vright+len(toskip):], pattern[pright+1:], names, values) + } + return names, values +} + +func (d *defaultRouteBuilder) AddRoute(method, path string, operation *spec.Operation) { + mn := strings.ToUpper(method) + + bp := fpath.Clean(d.spec.BasePath()) + if len(bp) > 0 && bp[len(bp)-1] == '/' { + bp = bp[:len(bp)-1] + } + + debugLog("operation: %#v", *operation) + if handler, ok := d.api.HandlerFor(method, strings.TrimPrefix(path, bp)); ok { + consumes := d.analyzer.ConsumesFor(operation) + produces := d.analyzer.ProducesFor(operation) + parameters := d.analyzer.ParamsFor(method, strings.TrimPrefix(path, bp)) + + record := denco.NewRecord(pathConverter.ReplaceAllString(path, ":$1"), &routeEntry{ + BasePath: bp, + PathPattern: path, + Operation: operation, + Handler: handler, + Consumes: consumes, + Produces: produces, + Consumers: d.api.ConsumersFor(normalizeOffers(consumes)), + Producers: d.api.ProducersFor(normalizeOffers(produces)), + Parameters: parameters, + Formats: d.api.Formats(), + Binder: newUntypedRequestBinder(parameters, d.spec.Spec(), d.api.Formats()), + Authenticators: d.buildAuthenticators(operation), + Authorizer: d.api.Authorizer(), + }) + d.records[mn] = append(d.records[mn], record) + } +} + +func (d *defaultRouteBuilder) buildAuthenticators(operation *spec.Operation) RouteAuthenticators { + requirements := d.analyzer.SecurityRequirementsFor(operation) + var auths []RouteAuthenticator + for _, reqs := range requirements { + var schemes []string + scopes := make(map[string][]string, len(reqs)) + var scopeSlices [][]string + for _, req := range reqs { + schemes = append(schemes, req.Name) + scopes[req.Name] = req.Scopes + scopeSlices = append(scopeSlices, req.Scopes) + } + + definitions := d.analyzer.SecurityDefinitionsForRequirements(reqs) + authenticators := d.api.AuthenticatorsFor(definitions) + auths = append(auths, RouteAuthenticator{ + Authenticator: authenticators, + Schemes: schemes, + Scopes: scopes, + allScopes: stringSliceUnion(scopeSlices...), + commonScopes: stringSliceIntersection(scopeSlices...), + allowAnonymous: len(reqs) == 1 && reqs[0].Name == "", + }) + } + return auths +} + +func (d *defaultRouteBuilder) Build() *defaultRouter { + routers := make(map[string]*denco.Router) + for method, records := range d.records { + router := denco.New() + _ = router.Build(records) + routers[method] = router + } + return &defaultRouter{ + spec: d.spec, + routers: routers, + } +} diff --git a/vendor/github.com/go-openapi/runtime/middleware/security.go b/vendor/github.com/go-openapi/runtime/middleware/security.go new file mode 100644 index 00000000000..2b061caefcb --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/security.go @@ -0,0 +1,39 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package middleware + +import "net/http" + +func newSecureAPI(ctx *Context, next http.Handler) http.Handler { + return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := ctx.RouteInfo(r) + if rCtx != nil { + r = rCtx + } + if route != nil && !route.NeedsAuth() { + next.ServeHTTP(rw, r) + return + } + + _, rCtx, err := ctx.Authorize(r, route) + if err != nil { + ctx.Respond(rw, r, route.Produces, route, err) + return + } + r = rCtx + + next.ServeHTTP(rw, r) + }) +} diff --git a/vendor/github.com/go-openapi/runtime/middleware/spec.go b/vendor/github.com/go-openapi/runtime/middleware/spec.go new file mode 100644 index 00000000000..f0291429806 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/spec.go @@ -0,0 +1,48 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package middleware + +import ( + "net/http" + "path" +) + +// Spec creates a middleware to serve a swagger spec. +// This allows for altering the spec before starting the http listener. +// This can be useful if you want to serve the swagger spec from another path than /swagger.json +// +func Spec(basePath string, b []byte, next http.Handler) http.Handler { + if basePath == "" { + basePath = "/" + } + pth := path.Join(basePath, "swagger.json") + + return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + if r.URL.Path == pth { + rw.Header().Set("Content-Type", "application/json") + rw.WriteHeader(http.StatusOK) + //#nosec + _, _ = rw.Write(b) + return + } + + if next == nil { + rw.Header().Set("Content-Type", "application/json") + rw.WriteHeader(http.StatusNotFound) + return + } + next.ServeHTTP(rw, r) + }) +} diff --git a/vendor/github.com/go-openapi/runtime/middleware/untyped/api.go b/vendor/github.com/go-openapi/runtime/middleware/untyped/api.go new file mode 100644 index 00000000000..39a85f7d9e8 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/untyped/api.go @@ -0,0 +1,286 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package untyped + +import ( + "fmt" + "net/http" + "sort" + "strings" + + "github.com/go-openapi/analysis" + "github.com/go-openapi/errors" + "github.com/go-openapi/loads" + "github.com/go-openapi/spec" + "github.com/go-openapi/strfmt" + + "github.com/go-openapi/runtime" +) + +// NewAPI creates the default untyped API +func NewAPI(spec *loads.Document) *API { + var an *analysis.Spec + if spec != nil && spec.Spec() != nil { + an = analysis.New(spec.Spec()) + } + api := &API{ + spec: spec, + analyzer: an, + consumers: make(map[string]runtime.Consumer, 10), + producers: make(map[string]runtime.Producer, 10), + authenticators: make(map[string]runtime.Authenticator), + operations: make(map[string]map[string]runtime.OperationHandler), + ServeError: errors.ServeError, + Models: make(map[string]func() interface{}), + formats: strfmt.NewFormats(), + } + return api.WithJSONDefaults() +} + +// API represents an untyped mux for a swagger spec +type API struct { + spec *loads.Document + analyzer *analysis.Spec + DefaultProduces string + DefaultConsumes string + consumers map[string]runtime.Consumer + producers map[string]runtime.Producer + authenticators map[string]runtime.Authenticator + authorizer runtime.Authorizer + operations map[string]map[string]runtime.OperationHandler + ServeError func(http.ResponseWriter, *http.Request, error) + Models map[string]func() interface{} + formats strfmt.Registry +} + +// WithJSONDefaults loads the json defaults for this api +func (d *API) WithJSONDefaults() *API { + d.DefaultConsumes = runtime.JSONMime + d.DefaultProduces = runtime.JSONMime + d.consumers[runtime.JSONMime] = runtime.JSONConsumer() + d.producers[runtime.JSONMime] = runtime.JSONProducer() + return d +} + +// WithoutJSONDefaults clears the json defaults for this api +func (d *API) WithoutJSONDefaults() *API { + d.DefaultConsumes = "" + d.DefaultProduces = "" + delete(d.consumers, runtime.JSONMime) + delete(d.producers, runtime.JSONMime) + return d +} + +// Formats returns the registered string formats +func (d *API) Formats() strfmt.Registry { + if d.formats == nil { + d.formats = strfmt.NewFormats() + } + return d.formats +} + +// RegisterFormat registers a custom format validator +func (d *API) RegisterFormat(name string, format strfmt.Format, validator strfmt.Validator) { + if d.formats == nil { + d.formats = strfmt.NewFormats() + } + d.formats.Add(name, format, validator) +} + +// RegisterAuth registers an auth handler in this api +func (d *API) RegisterAuth(scheme string, handler runtime.Authenticator) { + if d.authenticators == nil { + d.authenticators = make(map[string]runtime.Authenticator) + } + d.authenticators[scheme] = handler +} + +// RegisterAuthorizer registers an authorizer handler in this api +func (d *API) RegisterAuthorizer(handler runtime.Authorizer) { + d.authorizer = handler +} + +// RegisterConsumer registers a consumer for a media type. +func (d *API) RegisterConsumer(mediaType string, handler runtime.Consumer) { + if d.consumers == nil { + d.consumers = make(map[string]runtime.Consumer, 10) + } + d.consumers[strings.ToLower(mediaType)] = handler +} + +// RegisterProducer registers a producer for a media type +func (d *API) RegisterProducer(mediaType string, handler runtime.Producer) { + if d.producers == nil { + d.producers = make(map[string]runtime.Producer, 10) + } + d.producers[strings.ToLower(mediaType)] = handler +} + +// RegisterOperation registers an operation handler for an operation name +func (d *API) RegisterOperation(method, path string, handler runtime.OperationHandler) { + if d.operations == nil { + d.operations = make(map[string]map[string]runtime.OperationHandler, 30) + } + um := strings.ToUpper(method) + if b, ok := d.operations[um]; !ok || b == nil { + d.operations[um] = make(map[string]runtime.OperationHandler) + } + d.operations[um][path] = handler +} + +// OperationHandlerFor returns the operation handler for the specified id if it can be found +func (d *API) OperationHandlerFor(method, path string) (runtime.OperationHandler, bool) { + if d.operations == nil { + return nil, false + } + if pi, ok := d.operations[strings.ToUpper(method)]; ok { + h, ok := pi[path] + return h, ok + } + return nil, false +} + +// ConsumersFor gets the consumers for the specified media types +func (d *API) ConsumersFor(mediaTypes []string) map[string]runtime.Consumer { + result := make(map[string]runtime.Consumer) + for _, mt := range mediaTypes { + if consumer, ok := d.consumers[mt]; ok { + result[mt] = consumer + } + } + return result +} + +// ProducersFor gets the producers for the specified media types +func (d *API) ProducersFor(mediaTypes []string) map[string]runtime.Producer { + result := make(map[string]runtime.Producer) + for _, mt := range mediaTypes { + if producer, ok := d.producers[mt]; ok { + result[mt] = producer + } + } + return result +} + +// AuthenticatorsFor gets the authenticators for the specified security schemes +func (d *API) AuthenticatorsFor(schemes map[string]spec.SecurityScheme) map[string]runtime.Authenticator { + result := make(map[string]runtime.Authenticator) + for k := range schemes { + if a, ok := d.authenticators[k]; ok { + result[k] = a + } + } + return result +} + +// Authorizer returns the registered authorizer +func (d *API) Authorizer() runtime.Authorizer { + return d.authorizer +} + +// Validate validates this API for any missing items +func (d *API) Validate() error { + return d.validate() +} + +// validateWith validates the registrations in this API against the provided spec analyzer +func (d *API) validate() error { + var consumes []string + for k := range d.consumers { + consumes = append(consumes, k) + } + + var produces []string + for k := range d.producers { + produces = append(produces, k) + } + + var authenticators []string + for k := range d.authenticators { + authenticators = append(authenticators, k) + } + + var operations []string + for m, v := range d.operations { + for p := range v { + operations = append(operations, fmt.Sprintf("%s %s", strings.ToUpper(m), p)) + } + } + + var definedAuths []string + for k := range d.spec.Spec().SecurityDefinitions { + definedAuths = append(definedAuths, k) + } + + if err := d.verify("consumes", consumes, d.analyzer.RequiredConsumes()); err != nil { + return err + } + if err := d.verify("produces", produces, d.analyzer.RequiredProduces()); err != nil { + return err + } + if err := d.verify("operation", operations, d.analyzer.OperationMethodPaths()); err != nil { + return err + } + + requiredAuths := d.analyzer.RequiredSecuritySchemes() + if err := d.verify("auth scheme", authenticators, requiredAuths); err != nil { + return err + } + if err := d.verify("security definitions", definedAuths, requiredAuths); err != nil { + return err + } + return nil +} + +func (d *API) verify(name string, registrations []string, expectations []string) error { + sort.Strings(registrations) + sort.Strings(expectations) + + expected := map[string]struct{}{} + seen := map[string]struct{}{} + + for _, v := range expectations { + expected[v] = struct{}{} + } + + var unspecified []string + for _, v := range registrations { + seen[v] = struct{}{} + if _, ok := expected[v]; !ok { + unspecified = append(unspecified, v) + } + } + + for k := range seen { + delete(expected, k) + } + + var unregistered []string + for k := range expected { + unregistered = append(unregistered, k) + } + sort.Strings(unspecified) + sort.Strings(unregistered) + + if len(unregistered) > 0 || len(unspecified) > 0 { + return &errors.APIVerificationFailed{ + Section: name, + MissingSpecification: unspecified, + MissingRegistration: unregistered, + } + } + + return nil +} diff --git a/vendor/github.com/go-openapi/runtime/middleware/validation.go b/vendor/github.com/go-openapi/runtime/middleware/validation.go new file mode 100644 index 00000000000..9505c66c8c1 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/validation.go @@ -0,0 +1,123 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package middleware + +import ( + "mime" + "net/http" + "strings" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + + "github.com/go-openapi/runtime" +) + +type validation struct { + context *Context + result []error + request *http.Request + route *MatchedRoute + bound map[string]interface{} +} + +// ContentType validates the content type of a request +func validateContentType(allowed []string, actual string) error { + debugLog("validating content type for %q against [%s]", actual, strings.Join(allowed, ", ")) + if len(allowed) == 0 { + return nil + } + mt, _, err := mime.ParseMediaType(actual) + if err != nil { + return errors.InvalidContentType(actual, allowed) + } + if swag.ContainsStringsCI(allowed, mt) { + return nil + } + if swag.ContainsStringsCI(allowed, "*/*") { + return nil + } + parts := strings.Split(actual, "/") + if len(parts) == 2 && swag.ContainsStringsCI(allowed, parts[0]+"/*") { + return nil + } + return errors.InvalidContentType(actual, allowed) +} + +func validateRequest(ctx *Context, request *http.Request, route *MatchedRoute) *validation { + debugLog("validating request %s %s", request.Method, request.URL.EscapedPath()) + validate := &validation{ + context: ctx, + request: request, + route: route, + bound: make(map[string]interface{}), + } + + validate.contentType() + if len(validate.result) == 0 { + validate.responseFormat() + } + if len(validate.result) == 0 { + validate.parameters() + } + + return validate +} + +func (v *validation) parameters() { + debugLog("validating request parameters for %s %s", v.request.Method, v.request.URL.EscapedPath()) + if result := v.route.Binder.Bind(v.request, v.route.Params, v.route.Consumer, v.bound); result != nil { + if result.Error() == "validation failure list" { + for _, e := range result.(*errors.Validation).Value.([]interface{}) { + v.result = append(v.result, e.(error)) + } + return + } + v.result = append(v.result, result) + } +} + +func (v *validation) contentType() { + if len(v.result) == 0 && runtime.HasBody(v.request) { + debugLog("validating body content type for %s %s", v.request.Method, v.request.URL.EscapedPath()) + ct, _, req, err := v.context.ContentType(v.request) + if err != nil { + v.result = append(v.result, err) + } else { + v.request = req + } + + if len(v.result) == 0 { + if err := validateContentType(v.route.Consumes, ct); err != nil { + v.result = append(v.result, err) + } + } + if ct != "" && v.route.Consumer == nil { + cons, ok := v.route.Consumers[ct] + if !ok { + v.result = append(v.result, errors.New(500, "no consumer registered for %s", ct)) + } else { + v.route.Consumer = cons + } + } + } +} + +func (v *validation) responseFormat() { + if str, rCtx := v.context.ResponseFormat(v.request, v.route.Produces); str == "" { + v.request = rCtx + v.result = append(v.result, errors.InvalidResponseFormat(v.request.Header.Get(runtime.HeaderAccept), v.route.Produces)) + } +} diff --git a/vendor/github.com/go-openapi/runtime/request.go b/vendor/github.com/go-openapi/runtime/request.go new file mode 100644 index 00000000000..9e51b42b59d --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/request.go @@ -0,0 +1,139 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "bufio" + "io" + "net/http" + "strings" + + "github.com/go-openapi/swag" +) + +// CanHaveBody returns true if this method can have a body +func CanHaveBody(method string) bool { + mn := strings.ToUpper(method) + return mn == "POST" || mn == "PUT" || mn == "PATCH" || mn == "DELETE" +} + +// IsSafe returns true if this is a request with a safe method +func IsSafe(r *http.Request) bool { + mn := strings.ToUpper(r.Method) + return mn == "GET" || mn == "HEAD" +} + +// AllowsBody returns true if the request allows for a body +func AllowsBody(r *http.Request) bool { + mn := strings.ToUpper(r.Method) + return mn != "HEAD" +} + +// HasBody returns true if this method needs a content-type +func HasBody(r *http.Request) bool { + // happy case: we have a content length set + if r.ContentLength > 0 { + return true + } + + if r.Header.Get(http.CanonicalHeaderKey("content-length")) != "" { + // in this case, no Transfer-Encoding should be present + // we have a header set but it was explicitly set to 0, so we assume no body + return false + } + + rdr := newPeekingReader(r.Body) + r.Body = rdr + return rdr.HasContent() +} + +func newPeekingReader(r io.ReadCloser) *peekingReader { + if r == nil { + return nil + } + return &peekingReader{ + underlying: bufio.NewReader(r), + orig: r, + } +} + +type peekingReader struct { + underlying interface { + Buffered() int + Peek(int) ([]byte, error) + Read([]byte) (int, error) + } + orig io.ReadCloser +} + +func (p *peekingReader) HasContent() bool { + if p == nil { + return false + } + if p.underlying.Buffered() > 0 { + return true + } + b, err := p.underlying.Peek(1) + if err != nil { + return false + } + return len(b) > 0 +} + +func (p *peekingReader) Read(d []byte) (int, error) { + if p == nil { + return 0, io.EOF + } + return p.underlying.Read(d) +} + +func (p *peekingReader) Close() error { + p.underlying = nil + if p.orig != nil { + return p.orig.Close() + } + return nil +} + +// JSONRequest creates a new http request with json headers set +func JSONRequest(method, urlStr string, body io.Reader) (*http.Request, error) { + req, err := http.NewRequest(method, urlStr, body) + if err != nil { + return nil, err + } + req.Header.Add(HeaderContentType, JSONMime) + req.Header.Add(HeaderAccept, JSONMime) + return req, nil +} + +// Gettable for things with a method GetOK(string) (data string, hasKey bool, hasValue bool) +type Gettable interface { + GetOK(string) ([]string, bool, bool) +} + +// ReadSingleValue reads a single value from the source +func ReadSingleValue(values Gettable, name string) string { + vv, _, hv := values.GetOK(name) + if hv { + return vv[len(vv)-1] + } + return "" +} + +// ReadCollectionValue reads a collection value from a string data source +func ReadCollectionValue(values Gettable, name, collectionFormat string) []string { + v := ReadSingleValue(values, name) + return swag.SplitByFormat(v, collectionFormat) +} diff --git a/vendor/github.com/go-openapi/runtime/security/authenticator.go b/vendor/github.com/go-openapi/runtime/security/authenticator.go new file mode 100644 index 00000000000..476d26c3e77 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/security/authenticator.go @@ -0,0 +1,276 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package security + +import ( + "context" + "net/http" + "strings" + + "github.com/go-openapi/errors" + + "github.com/go-openapi/runtime" +) + +const ( + query = "query" + header = "header" +) + +// HttpAuthenticator is a function that authenticates a HTTP request +func HttpAuthenticator(handler func(*http.Request) (bool, interface{}, error)) runtime.Authenticator { + return runtime.AuthenticatorFunc(func(params interface{}) (bool, interface{}, error) { + if request, ok := params.(*http.Request); ok { + return handler(request) + } + if scoped, ok := params.(*ScopedAuthRequest); ok { + return handler(scoped.Request) + } + return false, nil, nil + }) +} + +// ScopedAuthenticator is a function that authenticates a HTTP request against a list of valid scopes +func ScopedAuthenticator(handler func(*ScopedAuthRequest) (bool, interface{}, error)) runtime.Authenticator { + return runtime.AuthenticatorFunc(func(params interface{}) (bool, interface{}, error) { + if request, ok := params.(*ScopedAuthRequest); ok { + return handler(request) + } + return false, nil, nil + }) +} + +// UserPassAuthentication authentication function +type UserPassAuthentication func(string, string) (interface{}, error) + +// UserPassAuthenticationCtx authentication function with context.Context +type UserPassAuthenticationCtx func(context.Context, string, string) (context.Context, interface{}, error) + +// TokenAuthentication authentication function +type TokenAuthentication func(string) (interface{}, error) + +// TokenAuthenticationCtx authentication function with context.Context +type TokenAuthenticationCtx func(context.Context, string) (context.Context, interface{}, error) + +// ScopedTokenAuthentication authentication function +type ScopedTokenAuthentication func(string, []string) (interface{}, error) + +// ScopedTokenAuthenticationCtx authentication function with context.Context +type ScopedTokenAuthenticationCtx func(context.Context, string, []string) (context.Context, interface{}, error) + +var DefaultRealmName = "API" + +type secCtxKey uint8 + +const ( + failedBasicAuth secCtxKey = iota + oauth2SchemeName +) + +func FailedBasicAuth(r *http.Request) string { + return FailedBasicAuthCtx(r.Context()) +} + +func FailedBasicAuthCtx(ctx context.Context) string { + v, ok := ctx.Value(failedBasicAuth).(string) + if !ok { + return "" + } + return v +} + +func OAuth2SchemeName(r *http.Request) string { + return OAuth2SchemeNameCtx(r.Context()) +} + +func OAuth2SchemeNameCtx(ctx context.Context) string { + v, ok := ctx.Value(oauth2SchemeName).(string) + if !ok { + return "" + } + return v +} + +// BasicAuth creates a basic auth authenticator with the provided authentication function +func BasicAuth(authenticate UserPassAuthentication) runtime.Authenticator { + return BasicAuthRealm(DefaultRealmName, authenticate) +} + +// BasicAuthRealm creates a basic auth authenticator with the provided authentication function and realm name +func BasicAuthRealm(realm string, authenticate UserPassAuthentication) runtime.Authenticator { + if realm == "" { + realm = DefaultRealmName + } + + return HttpAuthenticator(func(r *http.Request) (bool, interface{}, error) { + if usr, pass, ok := r.BasicAuth(); ok { + p, err := authenticate(usr, pass) + if err != nil { + *r = *r.WithContext(context.WithValue(r.Context(), failedBasicAuth, realm)) + } + return true, p, err + } + *r = *r.WithContext(context.WithValue(r.Context(), failedBasicAuth, realm)) + return false, nil, nil + }) +} + +// BasicAuthCtx creates a basic auth authenticator with the provided authentication function with support for context.Context +func BasicAuthCtx(authenticate UserPassAuthenticationCtx) runtime.Authenticator { + return BasicAuthRealmCtx(DefaultRealmName, authenticate) +} + +// BasicAuthRealmCtx creates a basic auth authenticator with the provided authentication function and realm name with support for context.Context +func BasicAuthRealmCtx(realm string, authenticate UserPassAuthenticationCtx) runtime.Authenticator { + if realm == "" { + realm = DefaultRealmName + } + + return HttpAuthenticator(func(r *http.Request) (bool, interface{}, error) { + if usr, pass, ok := r.BasicAuth(); ok { + ctx, p, err := authenticate(r.Context(), usr, pass) + if err != nil { + ctx = context.WithValue(ctx, failedBasicAuth, realm) + } + *r = *r.WithContext(ctx) + return true, p, err + } + *r = *r.WithContext(context.WithValue(r.Context(), failedBasicAuth, realm)) + return false, nil, nil + }) +} + +// APIKeyAuth creates an authenticator that uses a token for authorization. +// This token can be obtained from either a header or a query string +func APIKeyAuth(name, in string, authenticate TokenAuthentication) runtime.Authenticator { + inl := strings.ToLower(in) + if inl != query && inl != header { + // panic because this is most likely a typo + panic(errors.New(500, "api key auth: in value needs to be either \"query\" or \"header\".")) + } + + var getToken func(*http.Request) string + switch inl { + case header: + getToken = func(r *http.Request) string { return r.Header.Get(name) } + case query: + getToken = func(r *http.Request) string { return r.URL.Query().Get(name) } + } + + return HttpAuthenticator(func(r *http.Request) (bool, interface{}, error) { + token := getToken(r) + if token == "" { + return false, nil, nil + } + + p, err := authenticate(token) + return true, p, err + }) +} + +// APIKeyAuthCtx creates an authenticator that uses a token for authorization with support for context.Context. +// This token can be obtained from either a header or a query string +func APIKeyAuthCtx(name, in string, authenticate TokenAuthenticationCtx) runtime.Authenticator { + inl := strings.ToLower(in) + if inl != query && inl != header { + // panic because this is most likely a typo + panic(errors.New(500, "api key auth: in value needs to be either \"query\" or \"header\".")) + } + + var getToken func(*http.Request) string + switch inl { + case header: + getToken = func(r *http.Request) string { return r.Header.Get(name) } + case query: + getToken = func(r *http.Request) string { return r.URL.Query().Get(name) } + } + + return HttpAuthenticator(func(r *http.Request) (bool, interface{}, error) { + token := getToken(r) + if token == "" { + return false, nil, nil + } + + ctx, p, err := authenticate(r.Context(), token) + *r = *r.WithContext(ctx) + return true, p, err + }) +} + +// ScopedAuthRequest contains both a http request and the required scopes for a particular operation +type ScopedAuthRequest struct { + Request *http.Request + RequiredScopes []string +} + +// BearerAuth for use with oauth2 flows +func BearerAuth(name string, authenticate ScopedTokenAuthentication) runtime.Authenticator { + const prefix = "Bearer " + return ScopedAuthenticator(func(r *ScopedAuthRequest) (bool, interface{}, error) { + var token string + hdr := r.Request.Header.Get("Authorization") + if strings.HasPrefix(hdr, prefix) { + token = strings.TrimPrefix(hdr, prefix) + } + if token == "" { + qs := r.Request.URL.Query() + token = qs.Get("access_token") + } + //#nosec + ct, _, _ := runtime.ContentType(r.Request.Header) + if token == "" && (ct == "application/x-www-form-urlencoded" || ct == "multipart/form-data") { + token = r.Request.FormValue("access_token") + } + + if token == "" { + return false, nil, nil + } + + rctx := context.WithValue(r.Request.Context(), oauth2SchemeName, name) + *r.Request = *r.Request.WithContext(rctx) + p, err := authenticate(token, r.RequiredScopes) + return true, p, err + }) +} + +// BearerAuthCtx for use with oauth2 flows with support for context.Context. +func BearerAuthCtx(name string, authenticate ScopedTokenAuthenticationCtx) runtime.Authenticator { + const prefix = "Bearer " + return ScopedAuthenticator(func(r *ScopedAuthRequest) (bool, interface{}, error) { + var token string + hdr := r.Request.Header.Get("Authorization") + if strings.HasPrefix(hdr, prefix) { + token = strings.TrimPrefix(hdr, prefix) + } + if token == "" { + qs := r.Request.URL.Query() + token = qs.Get("access_token") + } + //#nosec + ct, _, _ := runtime.ContentType(r.Request.Header) + if token == "" && (ct == "application/x-www-form-urlencoded" || ct == "multipart/form-data") { + token = r.Request.FormValue("access_token") + } + + if token == "" { + return false, nil, nil + } + + rctx := context.WithValue(r.Request.Context(), oauth2SchemeName, name) + ctx, p, err := authenticate(rctx, token, r.RequiredScopes) + *r.Request = *r.Request.WithContext(ctx) + return true, p, err + }) +} diff --git a/vendor/github.com/go-openapi/runtime/security/authorizer.go b/vendor/github.com/go-openapi/runtime/security/authorizer.go new file mode 100644 index 00000000000..00c1a4d6a4c --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/security/authorizer.go @@ -0,0 +1,27 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package security + +import ( + "net/http" + + "github.com/go-openapi/runtime" +) + +// Authorized provides a default implementation of the Authorizer interface where all +// requests are authorized (successful) +func Authorized() runtime.Authorizer { + return runtime.AuthorizerFunc(func(_ *http.Request, _ interface{}) error { return nil }) +} diff --git a/vendor/github.com/go-openapi/runtime/statuses.go b/vendor/github.com/go-openapi/runtime/statuses.go new file mode 100644 index 00000000000..3b011a0bff1 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/statuses.go @@ -0,0 +1,90 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +// Statuses lists the most common HTTP status codes to default message +// taken from https://httpstatuses.com/ +var Statuses = map[int]string{ + 100: "Continue", + 101: "Switching Protocols", + 102: "Processing", + 103: "Checkpoint", + 122: "URI too long", + 200: "OK", + 201: "Created", + 202: "Accepted", + 203: "Request Processed", + 204: "No Content", + 205: "Reset Content", + 206: "Partial Content", + 207: "Multi-Status", + 208: "Already Reported", + 226: "IM Used", + 300: "Multiple Choices", + 301: "Moved Permanently", + 302: "Found", + 303: "See Other", + 304: "Not Modified", + 305: "Use Proxy", + 306: "Switch Proxy", + 307: "Temporary Redirect", + 308: "Permanent Redirect", + 400: "Bad Request", + 401: "Unauthorized", + 402: "Payment Required", + 403: "Forbidden", + 404: "Not Found", + 405: "Method Not Allowed", + 406: "Not Acceptable", + 407: "Proxy Authentication Required", + 408: "Request Timeout", + 409: "Conflict", + 410: "Gone", + 411: "Length Required", + 412: "Precondition Failed", + 413: "Request Entity Too Large", + 414: "Request-URI Too Long", + 415: "Unsupported Media Type", + 416: "Request Range Not Satisfiable", + 417: "Expectation Failed", + 418: "I'm a teapot", + 420: "Enhance Your Calm", + 422: "Unprocessable Entity", + 423: "Locked", + 424: "Failed Dependency", + 426: "Upgrade Required", + 428: "Precondition Required", + 429: "Too Many Requests", + 431: "Request Header Fields Too Large", + 444: "No Response", + 449: "Retry With", + 450: "Blocked by Windows Parental Controls", + 451: "Wrong Exchange Server", + 499: "Client Closed Request", + 500: "Internal Server Error", + 501: "Not Implemented", + 502: "Bad Gateway", + 503: "Service Unavailable", + 504: "Gateway Timeout", + 505: "HTTP Version Not Supported", + 506: "Variant Also Negotiates", + 507: "Insufficient Storage", + 508: "Loop Detected", + 509: "Bandwidth Limit Exceeded", + 510: "Not Extended", + 511: "Network Authentication Required", + 598: "Network read timeout error", + 599: "Network connect timeout error", +} diff --git a/vendor/github.com/go-openapi/runtime/text.go b/vendor/github.com/go-openapi/runtime/text.go new file mode 100644 index 00000000000..c7fd04c3c5c --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/text.go @@ -0,0 +1,117 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "bytes" + "encoding" + "errors" + "fmt" + "io" + "reflect" + + "github.com/go-openapi/swag" +) + +// TextConsumer creates a new text consumer +func TextConsumer() Consumer { + return ConsumerFunc(func(reader io.Reader, data interface{}) error { + if reader == nil { + return errors.New("TextConsumer requires a reader") // early exit + } + + buf := new(bytes.Buffer) + _, err := buf.ReadFrom(reader) + if err != nil { + return err + } + b := buf.Bytes() + + // If the buffer is empty, no need to unmarshal it, which causes a panic. + if len(b) == 0 { + data = "" + return nil + } + + if tu, ok := data.(encoding.TextUnmarshaler); ok { + err := tu.UnmarshalText(b) + if err != nil { + return fmt.Errorf("text consumer: %v", err) + } + + return nil + } + + t := reflect.TypeOf(data) + if data != nil && t.Kind() == reflect.Ptr { + v := reflect.Indirect(reflect.ValueOf(data)) + if t.Elem().Kind() == reflect.String { + v.SetString(string(b)) + return nil + } + } + + return fmt.Errorf("%v (%T) is not supported by the TextConsumer, %s", + data, data, "can be resolved by supporting TextUnmarshaler interface") + }) +} + +// TextProducer creates a new text producer +func TextProducer() Producer { + return ProducerFunc(func(writer io.Writer, data interface{}) error { + if writer == nil { + return errors.New("TextProducer requires a writer") // early exit + } + + if data == nil { + return errors.New("no data given to produce text from") + } + + if tm, ok := data.(encoding.TextMarshaler); ok { + txt, err := tm.MarshalText() + if err != nil { + return fmt.Errorf("text producer: %v", err) + } + _, err = writer.Write(txt) + return err + } + + if str, ok := data.(error); ok { + _, err := writer.Write([]byte(str.Error())) + return err + } + + if str, ok := data.(fmt.Stringer); ok { + _, err := writer.Write([]byte(str.String())) + return err + } + + v := reflect.Indirect(reflect.ValueOf(data)) + if t := v.Type(); t.Kind() == reflect.Struct || t.Kind() == reflect.Slice { + b, err := swag.WriteJSON(data) + if err != nil { + return err + } + _, err = writer.Write(b) + return err + } + if v.Kind() != reflect.String { + return fmt.Errorf("%T is not a supported type by the TextProducer", data) + } + + _, err := writer.Write([]byte(v.String())) + return err + }) +} diff --git a/vendor/github.com/go-openapi/runtime/values.go b/vendor/github.com/go-openapi/runtime/values.go new file mode 100644 index 00000000000..11f5732af4e --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/values.go @@ -0,0 +1,19 @@ +package runtime + +// Values typically represent parameters on a http request. +type Values map[string][]string + +// GetOK returns the values collection for the given key. +// When the key is present in the map it will return true for hasKey. +// When the value is not empty it will return true for hasValue. +func (v Values) GetOK(key string) (value []string, hasKey bool, hasValue bool) { + value, hasKey = v[key] + if !hasKey { + return + } + if len(value) == 0 { + return + } + hasValue = true + return +} diff --git a/vendor/github.com/go-openapi/runtime/xml.go b/vendor/github.com/go-openapi/runtime/xml.go new file mode 100644 index 00000000000..821c7393dfb --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/xml.go @@ -0,0 +1,36 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "encoding/xml" + "io" +) + +// XMLConsumer creates a new XML consumer +func XMLConsumer() Consumer { + return ConsumerFunc(func(reader io.Reader, data interface{}) error { + dec := xml.NewDecoder(reader) + return dec.Decode(data) + }) +} + +// XMLProducer creates a new XML producer +func XMLProducer() Producer { + return ProducerFunc(func(writer io.Writer, data interface{}) error { + enc := xml.NewEncoder(writer) + return enc.Encode(data) + }) +} diff --git a/vendor/github.com/go-openapi/validate/.editorconfig b/vendor/github.com/go-openapi/validate/.editorconfig new file mode 100644 index 00000000000..3152da69a5d --- /dev/null +++ b/vendor/github.com/go-openapi/validate/.editorconfig @@ -0,0 +1,26 @@ +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true +indent_style = space +indent_size = 2 +trim_trailing_whitespace = true + +# Set default charset +[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] +charset = utf-8 + +# Tab indentation (no size specified) +[*.go] +indent_style = tab + +[*.md] +trim_trailing_whitespace = false + +# Matches the exact files either package.json or .travis.yml +[{package.json,.travis.yml}] +indent_style = space +indent_size = 2 diff --git a/vendor/github.com/go-openapi/validate/.gitignore b/vendor/github.com/go-openapi/validate/.gitignore new file mode 100644 index 00000000000..fea8b84eca9 --- /dev/null +++ b/vendor/github.com/go-openapi/validate/.gitignore @@ -0,0 +1,5 @@ +secrets.yml +coverage.out +*.cov +*.out +playground diff --git a/vendor/github.com/go-openapi/validate/.golangci.yml b/vendor/github.com/go-openapi/validate/.golangci.yml new file mode 100644 index 00000000000..dc8178c809d --- /dev/null +++ b/vendor/github.com/go-openapi/validate/.golangci.yml @@ -0,0 +1,28 @@ +linters-settings: + govet: + check-shadowing: true + golint: + min-confidence: 0 + gocyclo: + min-complexity: 50 + maligned: + suggest-new: true + dupl: + threshold: 100 + goconst: + min-len: 2 + min-occurrences: 3 + +linters: + enable-all: true + disable: + - maligned + - lll + - godox + - gocognit + - whitespace + - wsl + - funlen + - gochecknoglobals + - gochecknoinits + - scopelint diff --git a/vendor/github.com/go-openapi/validate/.travis.yml b/vendor/github.com/go-openapi/validate/.travis.yml new file mode 100644 index 00000000000..db0bb78f5d4 --- /dev/null +++ b/vendor/github.com/go-openapi/validate/.travis.yml @@ -0,0 +1,17 @@ +after_success: +- bash <(curl -s https://codecov.io/bash) +go: +- 1.13.x +- 1.14.x +install: +- GO111MODULE=off go get -u gotest.tools/gotestsum +language: go +notifications: + slack: + secure: EmObnQuM9Mw8J9vpFaKKHqSMN4Wsr/A9+v7ewAD5cEhA0T1P4m7MbJMiJOhxUhj/X+BFh2DamW+P2lT8mybj5wg8wnkQ2BteKA8Tawi6f9PRw2NRheO8tAi8o/npLnlmet0kc93mn+oLuqHw36w4+j5mkOl2FghkfGiUVhwrhkCP7KXQN+3TU87e+/HzQumlJ3nsE+6terVxkH3PmaUTsS5ONaODZfuxFpfb7RsoEl3skHf6d+tr+1nViLxxly7558Nc33C+W1mr0qiEvMLZ+kJ/CpGWBJ6CUJM3jm6hNe2eMuIPwEK2hxZob8c7n22VPap4K6a0bBRoydoDXaba+2sD7Ym6ivDO/DVyL44VeBBLyIiIBylDGQdZH+6SoWm90Qe/i7tnY/T5Ao5igT8f3cfQY1c3EsTfqmlDfrhmACBmwSlgkdVBLTprHL63JMY24LWmh4jhxsmMRZhCL4dze8su1w6pLN/pD1pGHtKYCEVbdTmaM3PblNRFf12XB7qosmQsgUndH4Vq3bTbU0s1pKjeDhRyLvFzvR0TBbo0pDLEoF1A/i5GVFWa7yLZNUDudQERRh7qv/xBl2excIaQ1sV4DSVm7bAE9l6Kp+yeHQJW2uN6Y3X8wu9gB9nv9l5HBze7wh8KE6PyWAOLYYqZg9/sAtsv/2GcQqXcKFF1zcA= +script: +- gotestsum -f short-verbose -- -race ./... +- gotestsum -f short-verbose -- -timeout=20m -coverprofile=coverage.txt -covermode=atomic -args -enable-long ./... +- gotestsum -f short-verbose -- -timeout=30m -args -enable-go-swagger ./... +- go get -u github.com/go-openapi/runtime@master +- gotestsum -f short-verbose -- -timeout=30m github.com/go-openapi/runtime/... diff --git a/vendor/github.com/go-openapi/validate/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/validate/CODE_OF_CONDUCT.md new file mode 100644 index 00000000000..9322b065e37 --- /dev/null +++ b/vendor/github.com/go-openapi/validate/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/validate/LICENSE b/vendor/github.com/go-openapi/validate/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/go-openapi/validate/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/validate/README.md b/vendor/github.com/go-openapi/validate/README.md new file mode 100644 index 00000000000..08fb352bcf3 --- /dev/null +++ b/vendor/github.com/go-openapi/validate/README.md @@ -0,0 +1,6 @@ +# Validation helpers [![Build Status](https://travis-ci.org/go-openapi/validate.svg?branch=master)](https://travis-ci.org/go-openapi/validate) [![codecov](https://codecov.io/gh/go-openapi/validate/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/validate) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) + +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/validate/master/LICENSE) +[![GoDoc](https://godoc.org/github.com/go-openapi/validate?status.svg)](http://godoc.org/github.com/go-openapi/validate) +[![GolangCI](https://golangci.com/badges/github.com/go-openapi/validate.svg)](https://golangci.com) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/validate)](https://goreportcard.com/report/github.com/go-openapi/validate) diff --git a/vendor/github.com/go-openapi/validate/debug.go b/vendor/github.com/go-openapi/validate/debug.go new file mode 100644 index 00000000000..8815fd93597 --- /dev/null +++ b/vendor/github.com/go-openapi/validate/debug.go @@ -0,0 +1,47 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package validate + +import ( + "fmt" + "log" + "os" + "path/filepath" + "runtime" +) + +var ( + // Debug is true when the SWAGGER_DEBUG env var is not empty. + // It enables a more verbose logging of validators. + Debug = os.Getenv("SWAGGER_DEBUG") != "" + // validateLogger is a debug logger for this package + validateLogger *log.Logger +) + +func init() { + debugOptions() +} + +func debugOptions() { + validateLogger = log.New(os.Stdout, "validate:", log.LstdFlags) +} + +func debugLog(msg string, args ...interface{}) { + // A private, trivial trace logger, based on go-openapi/spec/expander.go:debugLog() + if Debug { + _, file1, pos1, _ := runtime.Caller(1) + validateLogger.Printf("%s:%d: %s", filepath.Base(file1), pos1, fmt.Sprintf(msg, args...)) + } +} diff --git a/vendor/github.com/go-openapi/validate/default_validator.go b/vendor/github.com/go-openapi/validate/default_validator.go new file mode 100644 index 00000000000..26d54b7c23b --- /dev/null +++ b/vendor/github.com/go-openapi/validate/default_validator.go @@ -0,0 +1,281 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package validate + +import ( + "fmt" + "strings" + + "github.com/go-openapi/spec" +) + +// defaultValidator validates default values in a spec. +// According to Swagger spec, default values MUST validate their schema. +type defaultValidator struct { + SpecValidator *SpecValidator + visitedSchemas map[string]bool +} + +// resetVisited resets the internal state of visited schemas +func (d *defaultValidator) resetVisited() { + d.visitedSchemas = map[string]bool{} +} + +func isVisited(path string, visitedSchemas map[string]bool) bool { + found := visitedSchemas[path] + if !found { + // search for overlapping paths + frags := strings.Split(path, ".") + if len(frags) < 2 { + // shortcut exit on smaller paths + return found + } + last := len(frags) - 1 + var currentFragStr, parent string + for i := range frags { + if i == 0 { + currentFragStr = frags[last] + } else { + currentFragStr = strings.Join([]string{frags[last-i], currentFragStr}, ".") + } + if i < last { + parent = strings.Join(frags[0:last-i], ".") + } else { + parent = "" + } + if strings.HasSuffix(parent, currentFragStr) { + found = true + break + } + } + } + return found +} + +// beingVisited asserts a schema is being visited +func (d *defaultValidator) beingVisited(path string) { + d.visitedSchemas[path] = true +} + +// isVisited tells if a path has already been visited +func (d *defaultValidator) isVisited(path string) bool { + return isVisited(path, d.visitedSchemas) +} + +// Validate validates the default values declared in the swagger spec +func (d *defaultValidator) Validate() (errs *Result) { + errs = new(Result) + if d == nil || d.SpecValidator == nil { + return errs + } + d.resetVisited() + errs.Merge(d.validateDefaultValueValidAgainstSchema()) // error - + return errs +} + +func (d *defaultValidator) validateDefaultValueValidAgainstSchema() *Result { + // every default value that is specified must validate against the schema for that property + // headers, items, parameters, schema + + res := new(Result) + s := d.SpecValidator + + for method, pathItem := range s.analyzer.Operations() { + for path, op := range pathItem { + // parameters + for _, param := range paramHelp.safeExpandedParamsFor(path, method, op.ID, res, s) { + if param.Default != nil && param.Required { + res.AddWarnings(requiredHasDefaultMsg(param.Name, param.In)) + } + + // reset explored schemas to get depth-first recursive-proof exploration + d.resetVisited() + + // Check simple parameters first + // default values provided must validate against their inline definition (no explicit schema) + if param.Default != nil && param.Schema == nil { + // check param default value is valid + red := NewParamValidator(¶m, s.KnownFormats).Validate(param.Default) + if red.HasErrorsOrWarnings() { + res.AddErrors(defaultValueDoesNotValidateMsg(param.Name, param.In)) + res.Merge(red) + } + } + + // Recursively follows Items and Schemas + if param.Items != nil { + red := d.validateDefaultValueItemsAgainstSchema(param.Name, param.In, ¶m, param.Items) + if red.HasErrorsOrWarnings() { + res.AddErrors(defaultValueItemsDoesNotValidateMsg(param.Name, param.In)) + res.Merge(red) + } + } + + if param.Schema != nil { + // Validate default value against schema + red := d.validateDefaultValueSchemaAgainstSchema(param.Name, param.In, param.Schema) + if red.HasErrorsOrWarnings() { + res.AddErrors(defaultValueDoesNotValidateMsg(param.Name, param.In)) + res.Merge(red) + } + } + } + + if op.Responses != nil { + if op.Responses.Default != nil { + // Same constraint on default Response + res.Merge(d.validateDefaultInResponse(op.Responses.Default, jsonDefault, path, 0, op.ID)) + } + // Same constraint on regular Responses + if op.Responses.StatusCodeResponses != nil { // Safeguard + for code, r := range op.Responses.StatusCodeResponses { + res.Merge(d.validateDefaultInResponse(&r, "response", path, code, op.ID)) + } + } + } else if op.ID != "" { + // Empty op.ID means there is no meaningful operation: no need to report a specific message + res.AddErrors(noValidResponseMsg(op.ID)) + } + } + } + if s.spec.Spec().Definitions != nil { // Safeguard + // reset explored schemas to get depth-first recursive-proof exploration + d.resetVisited() + for nm, sch := range s.spec.Spec().Definitions { + res.Merge(d.validateDefaultValueSchemaAgainstSchema(fmt.Sprintf("definitions.%s", nm), "body", &sch)) + } + } + return res +} + +func (d *defaultValidator) validateDefaultInResponse(resp *spec.Response, responseType, path string, responseCode int, operationID string) *Result { + s := d.SpecValidator + + response, res := responseHelp.expandResponseRef(resp, path, s) + if !res.IsValid() { + return res + } + + responseName, responseCodeAsStr := responseHelp.responseMsgVariants(responseType, responseCode) + + // nolint: dupl + if response.Headers != nil { // Safeguard + for nm, h := range response.Headers { + // reset explored schemas to get depth-first recursive-proof exploration + d.resetVisited() + + if h.Default != nil { + red := NewHeaderValidator(nm, &h, s.KnownFormats).Validate(h.Default) + if red.HasErrorsOrWarnings() { + res.AddErrors(defaultValueHeaderDoesNotValidateMsg(operationID, nm, responseName)) + res.Merge(red) + } + } + + // Headers have inline definition, like params + if h.Items != nil { + red := d.validateDefaultValueItemsAgainstSchema(nm, "header", &h, h.Items) + if red.HasErrorsOrWarnings() { + res.AddErrors(defaultValueHeaderItemsDoesNotValidateMsg(operationID, nm, responseName)) + res.Merge(red) + } + } + + if _, err := compileRegexp(h.Pattern); err != nil { + res.AddErrors(invalidPatternInHeaderMsg(operationID, nm, responseName, h.Pattern, err)) + } + + // Headers don't have schema + } + } + if response.Schema != nil { + // reset explored schemas to get depth-first recursive-proof exploration + d.resetVisited() + + red := d.validateDefaultValueSchemaAgainstSchema(responseCodeAsStr, "response", response.Schema) + if red.HasErrorsOrWarnings() { + // Additional message to make sure the context of the error is not lost + res.AddErrors(defaultValueInDoesNotValidateMsg(operationID, responseName)) + res.Merge(red) + } + } + return res +} + +func (d *defaultValidator) validateDefaultValueSchemaAgainstSchema(path, in string, schema *spec.Schema) *Result { + if schema == nil || d.isVisited(path) { + // Avoids recursing if we are already done with that check + return nil + } + d.beingVisited(path) + res := new(Result) + s := d.SpecValidator + + if schema.Default != nil { + res.Merge(NewSchemaValidator(schema, s.spec.Spec(), path+".default", s.KnownFormats, SwaggerSchema(true)).Validate(schema.Default)) + } + if schema.Items != nil { + if schema.Items.Schema != nil { + res.Merge(d.validateDefaultValueSchemaAgainstSchema(path+".items.default", in, schema.Items.Schema)) + } + // Multiple schemas in items + if schema.Items.Schemas != nil { // Safeguard + for i, sch := range schema.Items.Schemas { + res.Merge(d.validateDefaultValueSchemaAgainstSchema(fmt.Sprintf("%s.items[%d].default", path, i), in, &sch)) + } + } + } + if _, err := compileRegexp(schema.Pattern); err != nil { + res.AddErrors(invalidPatternInMsg(path, in, schema.Pattern)) + } + if schema.AdditionalItems != nil && schema.AdditionalItems.Schema != nil { + // NOTE: we keep validating values, even though additionalItems is not supported by Swagger 2.0 (and 3.0 as well) + res.Merge(d.validateDefaultValueSchemaAgainstSchema(fmt.Sprintf("%s.additionalItems", path), in, schema.AdditionalItems.Schema)) + } + for propName, prop := range schema.Properties { + res.Merge(d.validateDefaultValueSchemaAgainstSchema(path+"."+propName, in, &prop)) + } + for propName, prop := range schema.PatternProperties { + res.Merge(d.validateDefaultValueSchemaAgainstSchema(path+"."+propName, in, &prop)) + } + if schema.AdditionalProperties != nil && schema.AdditionalProperties.Schema != nil { + res.Merge(d.validateDefaultValueSchemaAgainstSchema(fmt.Sprintf("%s.additionalProperties", path), in, schema.AdditionalProperties.Schema)) + } + if schema.AllOf != nil { + for i, aoSch := range schema.AllOf { + res.Merge(d.validateDefaultValueSchemaAgainstSchema(fmt.Sprintf("%s.allOf[%d]", path, i), in, &aoSch)) + } + } + return res +} + +// TODO: Temporary duplicated code. Need to refactor with examples +// nolint: dupl +func (d *defaultValidator) validateDefaultValueItemsAgainstSchema(path, in string, root interface{}, items *spec.Items) *Result { + res := new(Result) + s := d.SpecValidator + if items != nil { + if items.Default != nil { + res.Merge(newItemsValidator(path, in, items, root, s.KnownFormats).Validate(0, items.Default)) + } + if items.Items != nil { + res.Merge(d.validateDefaultValueItemsAgainstSchema(path+"[0].default", in, root, items.Items)) + } + if _, err := compileRegexp(items.Pattern); err != nil { + res.AddErrors(invalidPatternInMsg(path, in, items.Pattern)) + } + } + return res +} diff --git a/vendor/github.com/go-openapi/validate/doc.go b/vendor/github.com/go-openapi/validate/doc.go new file mode 100644 index 00000000000..f5ca9a5d580 --- /dev/null +++ b/vendor/github.com/go-openapi/validate/doc.go @@ -0,0 +1,85 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package validate provides methods to validate a swagger specification, +as well as tools to validate data against their schema. + +This package follows Swagger 2.0. specification (aka OpenAPI 2.0). Reference +can be found here: https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md. + +Validating a specification + +Validates a spec document (from JSON or YAML) against the JSON schema for swagger, +then checks a number of extra rules that can't be expressed in JSON schema. + +Entry points: + - Spec() + - NewSpecValidator() + - SpecValidator.Validate() + +Reported as errors: + [x] definition can't declare a property that's already defined by one of its ancestors + [x] definition's ancestor can't be a descendant of the same model + [x] path uniqueness: each api path should be non-verbatim (account for path param names) unique per method + [x] each security reference should contain only unique scopes + [x] each security scope in a security definition should be unique + [x] parameters in path must be unique + [x] each path parameter must correspond to a parameter placeholder and vice versa + [x] each referenceable definition must have references + [x] each definition property listed in the required array must be defined in the properties of the model + [x] each parameter should have a unique `name` and `type` combination + [x] each operation should have only 1 parameter of type body + [x] each reference must point to a valid object + [x] every default value that is specified must validate against the schema for that property + [x] items property is required for all schemas/definitions of type `array` + [x] path parameters must be declared a required + [x] headers must not contain $ref + [x] schema and property examples provided must validate against their respective object's schema + [x] examples provided must validate their schema + +Reported as warnings: + [x] path parameters should not contain any of [{,},\w] + [x] empty path + [x] unused definitions + [x] unsupported validation of examples on non-JSON media types + [x] examples in response without schema + [x] readOnly properties should not be required + +Validating a schema + +The schema validation toolkit validates data against JSON-schema-draft 04 schema. + +It is tested against the full json-schema-testing-suite (https://github.com/json-schema-org/JSON-Schema-Test-Suite), +except for the optional part (bignum, ECMA regexp, ...). + +It supports the complete JSON-schema vocabulary, including keywords not supported by Swagger (e.g. additionalItems, ...) + +Entry points: + - AgainstSchema() + - ... + +Known limitations + +With the current version of this package, the following aspects of swagger are not yet supported: + [ ] errors and warnings are not reported with key/line number in spec + [ ] default values and examples on responses only support application/json producer type + [ ] invalid numeric constraints (such as Minimum, etc..) are not checked except for default and example values + [ ] rules for collectionFormat are not implemented + [ ] no validation rule for polymorphism support (discriminator) [not done here] + [ ] valid js ECMA regexp not supported by Go regexp engine are considered invalid + [ ] arbitrary large numbers are not supported: max is math.MaxFloat64 + +*/ +package validate diff --git a/vendor/github.com/go-openapi/validate/example_validator.go b/vendor/github.com/go-openapi/validate/example_validator.go new file mode 100644 index 00000000000..13035917513 --- /dev/null +++ b/vendor/github.com/go-openapi/validate/example_validator.go @@ -0,0 +1,270 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package validate + +import ( + "fmt" + + "github.com/go-openapi/spec" +) + +// ExampleValidator validates example values defined in a spec +type exampleValidator struct { + SpecValidator *SpecValidator + visitedSchemas map[string]bool +} + +// resetVisited resets the internal state of visited schemas +func (ex *exampleValidator) resetVisited() { + ex.visitedSchemas = map[string]bool{} +} + +// beingVisited asserts a schema is being visited +func (ex *exampleValidator) beingVisited(path string) { + ex.visitedSchemas[path] = true +} + +// isVisited tells if a path has already been visited +func (ex *exampleValidator) isVisited(path string) bool { + return isVisited(path, ex.visitedSchemas) +} + +// Validate validates the example values declared in the swagger spec +// Example values MUST conform to their schema. +// +// With Swagger 2.0, examples are supported in: +// - schemas +// - individual property +// - responses +// +func (ex *exampleValidator) Validate() (errs *Result) { + errs = new(Result) + if ex == nil || ex.SpecValidator == nil { + return errs + } + ex.resetVisited() + errs.Merge(ex.validateExampleValueValidAgainstSchema()) // error - + + return errs +} + +func (ex *exampleValidator) validateExampleValueValidAgainstSchema() *Result { + // every example value that is specified must validate against the schema for that property + // in: schemas, properties, object, items + // not in: headers, parameters without schema + + res := new(Result) + s := ex.SpecValidator + + for method, pathItem := range s.analyzer.Operations() { + for path, op := range pathItem { + // parameters + for _, param := range paramHelp.safeExpandedParamsFor(path, method, op.ID, res, s) { + + // As of swagger 2.0, Examples are not supported in simple parameters + // However, it looks like it is supported by go-openapi + + // reset explored schemas to get depth-first recursive-proof exploration + ex.resetVisited() + + // Check simple parameters first + // default values provided must validate against their inline definition (no explicit schema) + if param.Example != nil && param.Schema == nil { + // check param default value is valid + red := NewParamValidator(¶m, s.KnownFormats).Validate(param.Example) + if red.HasErrorsOrWarnings() { + res.AddWarnings(exampleValueDoesNotValidateMsg(param.Name, param.In)) + res.MergeAsWarnings(red) + } + } + + // Recursively follows Items and Schemas + if param.Items != nil { + red := ex.validateExampleValueItemsAgainstSchema(param.Name, param.In, ¶m, param.Items) + if red.HasErrorsOrWarnings() { + res.AddWarnings(exampleValueItemsDoesNotValidateMsg(param.Name, param.In)) + res.Merge(red) + } + } + + if param.Schema != nil { + // Validate example value against schema + red := ex.validateExampleValueSchemaAgainstSchema(param.Name, param.In, param.Schema) + if red.HasErrorsOrWarnings() { + res.AddWarnings(exampleValueDoesNotValidateMsg(param.Name, param.In)) + res.Merge(red) + } + } + } + + if op.Responses != nil { + if op.Responses.Default != nil { + // Same constraint on default Response + res.Merge(ex.validateExampleInResponse(op.Responses.Default, jsonDefault, path, 0, op.ID)) + } + // Same constraint on regular Responses + if op.Responses.StatusCodeResponses != nil { // Safeguard + for code, r := range op.Responses.StatusCodeResponses { + res.Merge(ex.validateExampleInResponse(&r, "response", path, code, op.ID)) + } + } + } else if op.ID != "" { + // Empty op.ID means there is no meaningful operation: no need to report a specific message + res.AddErrors(noValidResponseMsg(op.ID)) + } + } + } + if s.spec.Spec().Definitions != nil { // Safeguard + // reset explored schemas to get depth-first recursive-proof exploration + ex.resetVisited() + for nm, sch := range s.spec.Spec().Definitions { + res.Merge(ex.validateExampleValueSchemaAgainstSchema(fmt.Sprintf("definitions.%s", nm), "body", &sch)) + } + } + return res +} + +func (ex *exampleValidator) validateExampleInResponse(resp *spec.Response, responseType, path string, responseCode int, operationID string) *Result { + s := ex.SpecValidator + + response, res := responseHelp.expandResponseRef(resp, path, s) + if !res.IsValid() { // Safeguard + return res + } + + responseName, responseCodeAsStr := responseHelp.responseMsgVariants(responseType, responseCode) + + // nolint: dupl + if response.Headers != nil { // Safeguard + for nm, h := range response.Headers { + // reset explored schemas to get depth-first recursive-proof exploration + ex.resetVisited() + + if h.Example != nil { + red := NewHeaderValidator(nm, &h, s.KnownFormats).Validate(h.Example) + if red.HasErrorsOrWarnings() { + res.AddWarnings(exampleValueHeaderDoesNotValidateMsg(operationID, nm, responseName)) + res.MergeAsWarnings(red) + } + } + + // Headers have inline definition, like params + if h.Items != nil { + red := ex.validateExampleValueItemsAgainstSchema(nm, "header", &h, h.Items) + if red.HasErrorsOrWarnings() { + res.AddWarnings(exampleValueHeaderItemsDoesNotValidateMsg(operationID, nm, responseName)) + res.MergeAsWarnings(red) + } + } + + if _, err := compileRegexp(h.Pattern); err != nil { + res.AddErrors(invalidPatternInHeaderMsg(operationID, nm, responseName, h.Pattern, err)) + } + + // Headers don't have schema + } + } + if response.Schema != nil { + // reset explored schemas to get depth-first recursive-proof exploration + ex.resetVisited() + + red := ex.validateExampleValueSchemaAgainstSchema(responseCodeAsStr, "response", response.Schema) + if red.HasErrorsOrWarnings() { + // Additional message to make sure the context of the error is not lost + res.AddWarnings(exampleValueInDoesNotValidateMsg(operationID, responseName)) + res.Merge(red) + } + } + + if response.Examples != nil { + if response.Schema != nil { + if example, ok := response.Examples["application/json"]; ok { + res.MergeAsWarnings(NewSchemaValidator(response.Schema, s.spec.Spec(), path+".examples", s.KnownFormats, SwaggerSchema(true)).Validate(example)) + } else { + // TODO: validate other media types too + res.AddWarnings(examplesMimeNotSupportedMsg(operationID, responseName)) + } + } else { + res.AddWarnings(examplesWithoutSchemaMsg(operationID, responseName)) + } + } + return res +} + +func (ex *exampleValidator) validateExampleValueSchemaAgainstSchema(path, in string, schema *spec.Schema) *Result { + if schema == nil || ex.isVisited(path) { + // Avoids recursing if we are already done with that check + return nil + } + ex.beingVisited(path) + s := ex.SpecValidator + res := new(Result) + + if schema.Example != nil { + res.MergeAsWarnings(NewSchemaValidator(schema, s.spec.Spec(), path+".example", s.KnownFormats, SwaggerSchema(true)).Validate(schema.Example)) + } + if schema.Items != nil { + if schema.Items.Schema != nil { + res.Merge(ex.validateExampleValueSchemaAgainstSchema(path+".items.example", in, schema.Items.Schema)) + } + // Multiple schemas in items + if schema.Items.Schemas != nil { // Safeguard + for i, sch := range schema.Items.Schemas { + res.Merge(ex.validateExampleValueSchemaAgainstSchema(fmt.Sprintf("%s.items[%d].example", path, i), in, &sch)) + } + } + } + if _, err := compileRegexp(schema.Pattern); err != nil { + res.AddErrors(invalidPatternInMsg(path, in, schema.Pattern)) + } + if schema.AdditionalItems != nil && schema.AdditionalItems.Schema != nil { + // NOTE: we keep validating values, even though additionalItems is unsupported in Swagger 2.0 (and 3.0 as well) + res.Merge(ex.validateExampleValueSchemaAgainstSchema(fmt.Sprintf("%s.additionalItems", path), in, schema.AdditionalItems.Schema)) + } + for propName, prop := range schema.Properties { + res.Merge(ex.validateExampleValueSchemaAgainstSchema(path+"."+propName, in, &prop)) + } + for propName, prop := range schema.PatternProperties { + res.Merge(ex.validateExampleValueSchemaAgainstSchema(path+"."+propName, in, &prop)) + } + if schema.AdditionalProperties != nil && schema.AdditionalProperties.Schema != nil { + res.Merge(ex.validateExampleValueSchemaAgainstSchema(fmt.Sprintf("%s.additionalProperties", path), in, schema.AdditionalProperties.Schema)) + } + if schema.AllOf != nil { + for i, aoSch := range schema.AllOf { + res.Merge(ex.validateExampleValueSchemaAgainstSchema(fmt.Sprintf("%s.allOf[%d]", path, i), in, &aoSch)) + } + } + return res +} + +// TODO: Temporary duplicated code. Need to refactor with examples +// nolint: dupl +func (ex *exampleValidator) validateExampleValueItemsAgainstSchema(path, in string, root interface{}, items *spec.Items) *Result { + res := new(Result) + s := ex.SpecValidator + if items != nil { + if items.Example != nil { + res.MergeAsWarnings(newItemsValidator(path, in, items, root, s.KnownFormats).Validate(0, items.Example)) + } + if items.Items != nil { + res.Merge(ex.validateExampleValueItemsAgainstSchema(path+"[0].example", in, root, items.Items)) + } + if _, err := compileRegexp(items.Pattern); err != nil { + res.AddErrors(invalidPatternInMsg(path, in, items.Pattern)) + } + } + return res +} diff --git a/vendor/github.com/go-openapi/validate/formats.go b/vendor/github.com/go-openapi/validate/formats.go new file mode 100644 index 00000000000..0ad996cbbc2 --- /dev/null +++ b/vendor/github.com/go-openapi/validate/formats.go @@ -0,0 +1,69 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package validate + +import ( + "reflect" + + "github.com/go-openapi/spec" + "github.com/go-openapi/strfmt" +) + +type formatValidator struct { + Format string + Path string + In string + KnownFormats strfmt.Registry +} + +func (f *formatValidator) SetPath(path string) { + f.Path = path +} + +func (f *formatValidator) Applies(source interface{}, kind reflect.Kind) bool { + doit := func() bool { + if source == nil { + return false + } + switch source := source.(type) { + case *spec.Items: + return kind == reflect.String && f.KnownFormats.ContainsName(source.Format) + case *spec.Parameter: + return kind == reflect.String && f.KnownFormats.ContainsName(source.Format) + case *spec.Schema: + return kind == reflect.String && f.KnownFormats.ContainsName(source.Format) + case *spec.Header: + return kind == reflect.String && f.KnownFormats.ContainsName(source.Format) + } + return false + } + r := doit() + debugLog("format validator for %q applies %t for %T (kind: %v)\n", f.Path, r, source, kind) + return r +} + +func (f *formatValidator) Validate(val interface{}) *Result { + result := new(Result) + debugLog("validating \"%v\" against format: %s", val, f.Format) + + if err := FormatOf(f.Path, f.In, f.Format, val.(string), f.KnownFormats); err != nil { + result.AddErrors(err) + } + + if result.HasErrors() { + return result + } + return nil +} diff --git a/vendor/github.com/go-openapi/validate/go.mod b/vendor/github.com/go-openapi/validate/go.mod new file mode 100644 index 00000000000..2d59ddbafab --- /dev/null +++ b/vendor/github.com/go-openapi/validate/go.mod @@ -0,0 +1,20 @@ +module github.com/go-openapi/validate + +require ( + github.com/go-openapi/analysis v0.19.5 + github.com/go-openapi/errors v0.19.2 + github.com/go-openapi/jsonpointer v0.19.3 + github.com/go-openapi/jsonreference v0.19.3 // indirect + github.com/go-openapi/loads v0.19.4 + github.com/go-openapi/runtime v0.19.4 + github.com/go-openapi/spec v0.19.3 + github.com/go-openapi/strfmt v0.19.3 + github.com/go-openapi/swag v0.19.5 + github.com/mailru/easyjson v0.7.0 // indirect + github.com/stretchr/testify v1.4.0 + github.com/vektah/gqlparser v1.1.2 + go.mongodb.org/mongo-driver v1.1.2 // indirect + gopkg.in/yaml.v2 v2.2.4 +) + +go 1.13 diff --git a/vendor/github.com/go-openapi/validate/go.sum b/vendor/github.com/go-openapi/validate/go.sum new file mode 100644 index 00000000000..0e9c75c1f2a --- /dev/null +++ b/vendor/github.com/go-openapi/validate/go.sum @@ -0,0 +1,156 @@ +github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/agnivade/levenshtein v1.0.1 h1:3oJU7J3FGFmyhn8KHjmVaZCN5hxTr7GxgRue+sxIXdQ= +github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= +github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= +github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8 h1:DujepqpGd1hyOd7aW59XpK7Qymp8iy83xq74fLr21is= +github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= +github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.19.2 h1:ophLETFestFZHk3ji7niPEL4d466QjW+0Tdg5VyDq7E= +github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= +github.com/go-openapi/analysis v0.19.5 h1:8b2ZgKfKIUTVQpTb77MoRDIMEIwvDVw40o3aOXdfYzI= +github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= +github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.19.2 h1:a2kIyV3w+OS3S97zxUndRVD46+FhGOUBDFY7nmu4CsY= +github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.19.2 h1:A9+F4Dc/MCNB5jibxf6rRvOvR/iFgQdyNx9eIhnGqq0= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.19.2 h1:o20suLFB4Ri0tuzpWtyHlh7E7HnkqTNLq6aR6WVNS1w= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3 h1:5cxNfTy0UVC3X8JL5ymxzyoUZmo8iZb+jeTWn7tUa8o= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.0 h1:wCOBNscACI8L93tt5tvB2zOMkJ098XCw3fP0BY2ybDA= +github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.2 h1:rf5ArTHmIJxyV5Oiks+Su0mUens1+AjpkPoWr5xFRcI= +github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= +github.com/go-openapi/loads v0.19.3 h1:jwIoahqCmaA5OBoc/B+1+Mu2L0Gr8xYQnbeyQEo/7b0= +github.com/go-openapi/loads v0.19.3/go.mod h1:YVfqhUCdahYwR3f3iiwQLhicVRvLlU/WO5WPaZvcvSI= +github.com/go-openapi/loads v0.19.4 h1:5I4CCSqoWzT+82bBkNIvmLc0UOsoKKQ4Fz+3VxOB7SY= +github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk= +github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= +github.com/go-openapi/runtime v0.19.0 h1:sU6pp4dSV2sGlNKKyHxZzi1m1kG4WnYtWcJ+HYbygjE= +github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= +github.com/go-openapi/runtime v0.19.4 h1:csnOgcgAiuGoM/Po7PEpKDoNulCcF3FGbSnbHfxgjMI= +github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= +github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.19.2 h1:SStNd1jRcYtfKCN7R0laGNs80WYYvn5CbBjM2sOmCrE= +github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= +github.com/go-openapi/spec v0.19.3 h1:0XRyw8kguri6Yw4SxhsQA/atC88yqrk0+G4YhI2wabc= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.19.0 h1:0Dn9qy1G9+UJfRU7TR8bmdGxb4uifB7HNrJjOnV0yPk= +github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= +github.com/go-openapi/strfmt v0.19.3 h1:eRfyY5SkaNJCAwmmMcADjY31ow9+N7MCLW7oRkbsINA= +github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= +github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.19.2 h1:jvO6bCMBEilGwMfHhrd61zIID4oIFdwb76V17SM88dE= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= +github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= +github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63 h1:nTT4s92Dgz2HlrB2NaMgvlfqHH39OgMhA7z3PK7PGD4= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e h1:hB2xlXdHp/pmPZq0y3QnmWAArdw9PqbmotexnWx/FU8= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.0 h1:aizVhC/NAAcKWb+5QsU1iNOZb4Yws5UO2I+aIprQITM= +github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/vektah/gqlparser v1.1.2 h1:ZsyLGn7/7jDNI+y4SEhI4yAxRChlv15pUHMjijT+e68= +github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= +go.mongodb.org/mongo-driver v1.0.3 h1:GKoji1ld3tw2aC+GX1wbr/J2fX13yNacEYoJ8Nhr0yU= +go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.1.1 h1:Sq1fR+0c58RME5EoqKdjkiQAmPjmfHlZOoRI6fTUOcs= +go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.1.2 h1:jxcFYjlkl8xaERsgLo+RNquI0epW6zuy/ZRQs6jnrFA= +go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 h1:dfGZHvZk057jK2MCeWus/TowKpJ8y4AmooUzdBSR9GU= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 h1:k7pJ2yAPLPgbskkFdhRCsA77k2fySZ1zf2zCjvQCiIM= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191014212845-da9a3fd4c582 h1:p9xBe/w/OzkeYVKm234g55gMdD1nSIooTir5kV11kfA= +golang.org/x/net v0.0.0-20191014212845-da9a3fd4c582/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/go-openapi/validate/helpers.go b/vendor/github.com/go-openapi/validate/helpers.go new file mode 100644 index 00000000000..4b77a000474 --- /dev/null +++ b/vendor/github.com/go-openapi/validate/helpers.go @@ -0,0 +1,324 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package validate + +// TODO: define this as package validate/internal +// This must be done while keeping CI intact with all tests and test coverage + +import ( + "reflect" + "strconv" + "strings" + + "github.com/go-openapi/errors" + "github.com/go-openapi/spec" +) + +const ( + swaggerBody = "body" + swaggerExample = "example" + swaggerExamples = "examples" +) + +const ( + objectType = "object" + arrayType = "array" + stringType = "string" + integerType = "integer" + numberType = "number" + booleanType = "boolean" + fileType = "file" + nullType = "null" +) + +const ( + jsonProperties = "properties" + jsonItems = "items" + jsonType = "type" + //jsonSchema = "schema" + jsonDefault = "default" +) + +const ( + stringFormatDate = "date" + stringFormatDateTime = "date-time" + stringFormatPassword = "password" + stringFormatByte = "byte" + //stringFormatBinary = "binary" + stringFormatCreditCard = "creditcard" + stringFormatDuration = "duration" + stringFormatEmail = "email" + stringFormatHexColor = "hexcolor" + stringFormatHostname = "hostname" + stringFormatIPv4 = "ipv4" + stringFormatIPv6 = "ipv6" + stringFormatISBN = "isbn" + stringFormatISBN10 = "isbn10" + stringFormatISBN13 = "isbn13" + stringFormatMAC = "mac" + stringFormatBSONObjectID = "bsonobjectid" + stringFormatRGBColor = "rgbcolor" + stringFormatSSN = "ssn" + stringFormatURI = "uri" + stringFormatUUID = "uuid" + stringFormatUUID3 = "uuid3" + stringFormatUUID4 = "uuid4" + stringFormatUUID5 = "uuid5" + + integerFormatInt32 = "int32" + integerFormatInt64 = "int64" + integerFormatUInt32 = "uint32" + integerFormatUInt64 = "uint64" + + numberFormatFloat32 = "float32" + numberFormatFloat64 = "float64" + numberFormatFloat = "float" + numberFormatDouble = "double" +) + +// Helpers available at the package level +var ( + pathHelp *pathHelper + valueHelp *valueHelper + errorHelp *errorHelper + paramHelp *paramHelper + responseHelp *responseHelper +) + +type errorHelper struct { + // A collection of unexported helpers for error construction +} + +func (h *errorHelper) sErr(err errors.Error) *Result { + // Builds a Result from standard errors.Error + return &Result{Errors: []error{err}} +} + +func (h *errorHelper) addPointerError(res *Result, err error, ref string, fromPath string) *Result { + // Provides more context on error messages + // reported by the jsoinpointer package by altering the passed Result + if err != nil { + res.AddErrors(cannotResolveRefMsg(fromPath, ref, err)) + } + return res +} + +type pathHelper struct { + // A collection of unexported helpers for path validation +} + +func (h *pathHelper) stripParametersInPath(path string) string { + // Returns a path stripped from all path parameters, with multiple or trailing slashes removed. + // + // Stripping is performed on a slash-separated basis, e.g '/a{/b}' remains a{/b} and not /a. + // - Trailing "/" make a difference, e.g. /a/ !~ /a (ex: canary/bitbucket.org/swagger.json) + // - presence or absence of a parameter makes a difference, e.g. /a/{log} !~ /a/ (ex: canary/kubernetes/swagger.json) + + // Regexp to extract parameters from path, with surrounding {}. + // NOTE: important non-greedy modifier + rexParsePathParam := mustCompileRegexp(`{[^{}]+?}`) + strippedSegments := []string{} + + for _, segment := range strings.Split(path, "/") { + strippedSegments = append(strippedSegments, rexParsePathParam.ReplaceAllString(segment, "X")) + } + return strings.Join(strippedSegments, "/") +} + +func (h *pathHelper) extractPathParams(path string) (params []string) { + // Extracts all params from a path, with surrounding "{}" + rexParsePathParam := mustCompileRegexp(`{[^{}]+?}`) + + for _, segment := range strings.Split(path, "/") { + for _, v := range rexParsePathParam.FindAllStringSubmatch(segment, -1) { + params = append(params, v...) + } + } + return +} + +type valueHelper struct { + // A collection of unexported helpers for value validation +} + +func (h *valueHelper) asInt64(val interface{}) int64 { + // Number conversion function for int64, without error checking + // (implements an implicit type upgrade). + v := reflect.ValueOf(val) + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return int64(v.Uint()) + case reflect.Float32, reflect.Float64: + return int64(v.Float()) + default: + //panic("Non numeric value in asInt64()") + return 0 + } +} + +func (h *valueHelper) asUint64(val interface{}) uint64 { + // Number conversion function for uint64, without error checking + // (implements an implicit type upgrade). + v := reflect.ValueOf(val) + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return uint64(v.Int()) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return v.Uint() + case reflect.Float32, reflect.Float64: + return uint64(v.Float()) + default: + //panic("Non numeric value in asUint64()") + return 0 + } +} + +// Same for unsigned floats +func (h *valueHelper) asFloat64(val interface{}) float64 { + // Number conversion function for float64, without error checking + // (implements an implicit type upgrade). + v := reflect.ValueOf(val) + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(v.Int()) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return float64(v.Uint()) + case reflect.Float32, reflect.Float64: + return v.Float() + default: + //panic("Non numeric value in asFloat64()") + return 0 + } +} + +type paramHelper struct { + // A collection of unexported helpers for parameters resolution +} + +func (h *paramHelper) safeExpandedParamsFor(path, method, operationID string, res *Result, s *SpecValidator) (params []spec.Parameter) { + operation, ok := s.analyzer.OperationFor(method, path) + if ok { + // expand parameters first if necessary + resolvedParams := []spec.Parameter{} + for _, ppr := range operation.Parameters { + resolvedParam, red := h.resolveParam(path, method, operationID, &ppr, s) + res.Merge(red) + if resolvedParam != nil { + resolvedParams = append(resolvedParams, *resolvedParam) + } + } + // remove params with invalid expansion from Slice + operation.Parameters = resolvedParams + + for _, ppr := range s.analyzer.SafeParamsFor(method, path, + func(p spec.Parameter, err error) bool { + // since params have already been expanded, there are few causes for error + res.AddErrors(someParametersBrokenMsg(path, method, operationID)) + // original error from analyzer + res.AddErrors(err) + return true + }) { + params = append(params, ppr) + } + } + return +} + +func (h *paramHelper) resolveParam(path, method, operationID string, param *spec.Parameter, s *SpecValidator) (*spec.Parameter, *Result) { + // Ensure parameter is expanded + var err error + res := new(Result) + isRef := param.Ref.String() != "" + if s.spec.SpecFilePath() == "" { + err = spec.ExpandParameterWithRoot(param, s.spec.Spec(), nil) + } else { + err = spec.ExpandParameter(param, s.spec.SpecFilePath()) + + } + if err != nil { // Safeguard + // NOTE: we may enter enter here when the whole parameter is an unresolved $ref + refPath := strings.Join([]string{"\"" + path + "\"", method}, ".") + errorHelp.addPointerError(res, err, param.Ref.String(), refPath) + return nil, res + } + res.Merge(h.checkExpandedParam(param, param.Name, param.In, operationID, isRef)) + return param, res +} + +func (h *paramHelper) checkExpandedParam(pr *spec.Parameter, path, in, operation string, isRef bool) *Result { + // Secure parameter structure after $ref resolution + res := new(Result) + simpleZero := spec.SimpleSchema{} + // Try to explain why... best guess + switch { + case pr.In == swaggerBody && (pr.SimpleSchema != simpleZero && pr.SimpleSchema.Type != objectType): + if isRef { + // Most likely, a $ref with a sibling is an unwanted situation: in itself this is a warning... + // but we detect it because of the following error: + // schema took over Parameter for an unexplained reason + res.AddWarnings(refShouldNotHaveSiblingsMsg(path, operation)) + } + res.AddErrors(invalidParameterDefinitionMsg(path, in, operation)) + case pr.In != swaggerBody && pr.Schema != nil: + if isRef { + res.AddWarnings(refShouldNotHaveSiblingsMsg(path, operation)) + } + res.AddErrors(invalidParameterDefinitionAsSchemaMsg(path, in, operation)) + case (pr.In == swaggerBody && pr.Schema == nil) || (pr.In != swaggerBody && pr.SimpleSchema == simpleZero): + // Other unexpected mishaps + res.AddErrors(invalidParameterDefinitionMsg(path, in, operation)) + } + return res +} + +type responseHelper struct { + // A collection of unexported helpers for response resolution +} + +func (r *responseHelper) expandResponseRef( + response *spec.Response, + path string, s *SpecValidator) (*spec.Response, *Result) { + // Ensure response is expanded + var err error + res := new(Result) + if s.spec.SpecFilePath() == "" { + // there is no physical document to resolve $ref in response + err = spec.ExpandResponseWithRoot(response, s.spec.Spec(), nil) + } else { + err = spec.ExpandResponse(response, s.spec.SpecFilePath()) + } + if err != nil { // Safeguard + // NOTE: we may enter here when the whole response is an unresolved $ref. + errorHelp.addPointerError(res, err, response.Ref.String(), path) + return nil, res + } + return response, res +} + +func (r *responseHelper) responseMsgVariants( + responseType string, + responseCode int) (responseName, responseCodeAsStr string) { + // Path variants for messages + if responseType == jsonDefault { + responseCodeAsStr = jsonDefault + responseName = "default response" + } else { + responseCodeAsStr = strconv.Itoa(responseCode) + responseName = "response " + responseCodeAsStr + } + return +} diff --git a/vendor/github.com/go-openapi/validate/object_validator.go b/vendor/github.com/go-openapi/validate/object_validator.go new file mode 100644 index 00000000000..aea85b300d6 --- /dev/null +++ b/vendor/github.com/go-openapi/validate/object_validator.go @@ -0,0 +1,279 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package validate + +import ( + "reflect" + "regexp" + "strings" + + "github.com/go-openapi/errors" + "github.com/go-openapi/spec" + "github.com/go-openapi/strfmt" +) + +type objectValidator struct { + Path string + In string + MaxProperties *int64 + MinProperties *int64 + Required []string + Properties map[string]spec.Schema + AdditionalProperties *spec.SchemaOrBool + PatternProperties map[string]spec.Schema + Root interface{} + KnownFormats strfmt.Registry + Options SchemaValidatorOptions +} + +func (o *objectValidator) SetPath(path string) { + o.Path = path +} + +func (o *objectValidator) Applies(source interface{}, kind reflect.Kind) bool { + // TODO: this should also work for structs + // there is a problem in the type validator where it will be unhappy about null values + // so that requires more testing + r := reflect.TypeOf(source) == specSchemaType && (kind == reflect.Map || kind == reflect.Struct) + debugLog("object validator for %q applies %t for %T (kind: %v)\n", o.Path, r, source, kind) + return r +} + +func (o *objectValidator) isProperties() bool { + p := strings.Split(o.Path, ".") + return len(p) > 1 && p[len(p)-1] == jsonProperties && p[len(p)-2] != jsonProperties +} + +func (o *objectValidator) isDefault() bool { + p := strings.Split(o.Path, ".") + return len(p) > 1 && p[len(p)-1] == jsonDefault && p[len(p)-2] != jsonDefault +} + +func (o *objectValidator) isExample() bool { + p := strings.Split(o.Path, ".") + return len(p) > 1 && (p[len(p)-1] == swaggerExample || p[len(p)-1] == swaggerExamples) && p[len(p)-2] != swaggerExample +} + +func (o *objectValidator) checkArrayMustHaveItems(res *Result, val map[string]interface{}) { + // for swagger 2.0 schemas, there is an additional constraint to have array items defined explicitly. + // with pure jsonschema draft 4, one may have arrays with undefined items (i.e. any type). + if t, typeFound := val[jsonType]; typeFound { + if tpe, ok := t.(string); ok && tpe == arrayType { + if _, itemsKeyFound := val[jsonItems]; !itemsKeyFound { + res.AddErrors(errors.Required(jsonItems, o.Path)) + } + } + } +} + +func (o *objectValidator) checkItemsMustBeTypeArray(res *Result, val map[string]interface{}) { + if !o.isProperties() && !o.isDefault() && !o.isExample() { + if _, itemsKeyFound := val[jsonItems]; itemsKeyFound { + t, typeFound := val[jsonType] + if typeFound { + if tpe, ok := t.(string); !ok || tpe != arrayType { + res.AddErrors(errors.InvalidType(o.Path, o.In, arrayType, nil)) + } + } else { + // there is no type + res.AddErrors(errors.Required(jsonType, o.Path)) + } + } + } +} + +func (o *objectValidator) precheck(res *Result, val map[string]interface{}) { + if o.Options.EnableArrayMustHaveItemsCheck { + o.checkArrayMustHaveItems(res, val) + } + if o.Options.EnableObjectArrayTypeCheck { + o.checkItemsMustBeTypeArray(res, val) + } +} + +func (o *objectValidator) Validate(data interface{}) *Result { + val := data.(map[string]interface{}) + // TODO: guard against nil data + numKeys := int64(len(val)) + + if o.MinProperties != nil && numKeys < *o.MinProperties { + return errorHelp.sErr(errors.TooFewProperties(o.Path, o.In, *o.MinProperties)) + } + if o.MaxProperties != nil && numKeys > *o.MaxProperties { + return errorHelp.sErr(errors.TooManyProperties(o.Path, o.In, *o.MaxProperties)) + } + + res := new(Result) + + o.precheck(res, val) + + // check validity of field names + if o.AdditionalProperties != nil && !o.AdditionalProperties.Allows { + // Case: additionalProperties: false + for k := range val { + _, regularProperty := o.Properties[k] + matched := false + + for pk := range o.PatternProperties { + if matches, _ := regexp.MatchString(pk, k); matches { + matched = true + break + } + } + + if !regularProperty && k != "$schema" && k != "id" && !matched { + // Special properties "$schema" and "id" are ignored + res.AddErrors(errors.PropertyNotAllowed(o.Path, o.In, k)) + + // BUG(fredbi): This section should move to a part dedicated to spec validation as + // it will conflict with regular schemas where a property "headers" is defined. + + // + // Croaks a more explicit message on top of the standard one + // on some recognized cases. + // + // NOTE: edge cases with invalid type assertion are simply ignored here. + // NOTE: prefix your messages here by "IMPORTANT!" so there are not filtered + // by higher level callers (the IMPORTANT! tag will be eventually + // removed). + if k == "headers" && val[k] != nil { + // $ref is forbidden in header + if headers, mapOk := val[k].(map[string]interface{}); mapOk { + for headerKey, headerBody := range headers { + if headerBody != nil { + if headerSchema, mapOfMapOk := headerBody.(map[string]interface{}); mapOfMapOk { + if _, found := headerSchema["$ref"]; found { + var msg string + if refString, stringOk := headerSchema["$ref"].(string); stringOk { + msg = strings.Join([]string{", one may not use $ref=\":", refString, "\""}, "") + } + res.AddErrors(refNotAllowedInHeaderMsg(o.Path, headerKey, msg)) + } + } + } + } + } + /* + case "$ref": + if val[k] != nil { + // TODO: check context of that ref: warn about siblings, check against invalid context + } + */ + } + } + } + } else { + // Cases: no additionalProperties (implying: true), or additionalProperties: true, or additionalProperties: { <> } + for key, value := range val { + _, regularProperty := o.Properties[key] + + // Validates property against "patternProperties" if applicable + // BUG(fredbi): succeededOnce is always false + + // NOTE: how about regular properties which do not match patternProperties? + matched, succeededOnce, _ := o.validatePatternProperty(key, value, res) + + if !(regularProperty || matched || succeededOnce) { + + // Cases: properties which are not regular properties and have not been matched by the PatternProperties validator + if o.AdditionalProperties != nil && o.AdditionalProperties.Schema != nil { + // AdditionalProperties as Schema + r := NewSchemaValidator(o.AdditionalProperties.Schema, o.Root, o.Path+"."+key, o.KnownFormats, o.Options.Options()...).Validate(value) + res.mergeForField(data.(map[string]interface{}), key, r) + } else if regularProperty && !(matched || succeededOnce) { + // TODO: this is dead code since regularProperty=false here + res.AddErrors(errors.FailedAllPatternProperties(o.Path, o.In, key)) + } + } + } + // Valid cases: additionalProperties: true or undefined + } + + createdFromDefaults := map[string]bool{} + + // Property types: + // - regular Property + for pName := range o.Properties { + pSchema := o.Properties[pName] // one instance per iteration + rName := pName + if o.Path != "" { + rName = o.Path + "." + pName + } + + // Recursively validates each property against its schema + if v, ok := val[pName]; ok { + r := NewSchemaValidator(&pSchema, o.Root, rName, o.KnownFormats, o.Options.Options()...).Validate(v) + res.mergeForField(data.(map[string]interface{}), pName, r) + } else if pSchema.Default != nil { + // If a default value is defined, creates the property from defaults + // NOTE: JSON schema does not enforce default values to be valid against schema. Swagger does. + createdFromDefaults[pName] = true + res.addPropertySchemata(data.(map[string]interface{}), pName, &pSchema) + } + } + + // Check required properties + if len(o.Required) > 0 { + for _, k := range o.Required { + if _, ok := val[k]; !ok && !createdFromDefaults[k] { + res.AddErrors(errors.Required(o.Path+"."+k, o.In)) + continue + } + } + } + + // Check patternProperties + // TODO: it looks like we have done that twice in many cases + for key, value := range val { + _, regularProperty := o.Properties[key] + matched, _ /*succeededOnce*/, patterns := o.validatePatternProperty(key, value, res) + if !regularProperty && (matched /*|| succeededOnce*/) { + for _, pName := range patterns { + if v, ok := o.PatternProperties[pName]; ok { + r := NewSchemaValidator(&v, o.Root, o.Path+"."+key, o.KnownFormats, o.Options.Options()...).Validate(value) + res.mergeForField(data.(map[string]interface{}), key, r) + } + } + } + } + return res +} + +// TODO: succeededOnce is not used anywhere +func (o *objectValidator) validatePatternProperty(key string, value interface{}, result *Result) (bool, bool, []string) { + matched := false + succeededOnce := false + var patterns []string + + for k, schema := range o.PatternProperties { + sch := schema + if match, _ := regexp.MatchString(k, key); match { + patterns = append(patterns, k) + matched = true + validator := NewSchemaValidator(&sch, o.Root, o.Path+"."+key, o.KnownFormats, o.Options.Options()...) + + res := validator.Validate(value) + result.Merge(res) + } + } + + // BUG(fredbi): can't get to here. Should remove dead code (commented out). + + //if succeededOnce { + // result.Inc() + //} + + return matched, succeededOnce, patterns +} diff --git a/vendor/github.com/go-openapi/validate/options.go b/vendor/github.com/go-openapi/validate/options.go new file mode 100644 index 00000000000..deeec2f2ecc --- /dev/null +++ b/vendor/github.com/go-openapi/validate/options.go @@ -0,0 +1,43 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package validate + +import "sync" + +// Opts specifies validation options for a SpecValidator. +// +// NOTE: other options might be needed, for example a go-swagger specific mode. +type Opts struct { + ContinueOnErrors bool // true: continue reporting errors, even if spec is invalid +} + +var ( + defaultOpts = Opts{ContinueOnErrors: false} // default is to stop validation on errors + defaultOptsMutex = &sync.Mutex{} +) + +// SetContinueOnErrors sets global default behavior regarding spec validation errors reporting. +// +// For extended error reporting, you most likely want to set it to true. +// For faster validation, it's better to give up early when a spec is detected as invalid: set it to false (this is the default). +// +// Setting this mode does NOT affect the validation status. +// +// NOTE: this method affects global defaults. It is not suitable for a concurrent usage. +func SetContinueOnErrors(c bool) { + defer defaultOptsMutex.Unlock() + defaultOptsMutex.Lock() + defaultOpts.ContinueOnErrors = c +} diff --git a/vendor/github.com/go-openapi/validate/result.go b/vendor/github.com/go-openapi/validate/result.go new file mode 100644 index 00000000000..8f5f935e5d1 --- /dev/null +++ b/vendor/github.com/go-openapi/validate/result.go @@ -0,0 +1,486 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package validate + +import ( + "fmt" + "reflect" + "strings" + + "github.com/go-openapi/errors" + "github.com/go-openapi/spec" +) + +// Result represents a validation result set, composed of +// errors and warnings. +// +// It is used to keep track of all detected errors and warnings during +// the validation of a specification. +// +// Matchcount is used to determine +// which errors are relevant in the case of AnyOf, OneOf +// schema validation. Results from the validation branch +// with most matches get eventually selected. +// +// TODO: keep path of key originating the error +type Result struct { + Errors []error + Warnings []error + MatchCount int + + // the object data + data interface{} + + // Schemata for the root object + rootObjectSchemata schemata + // Schemata for object fields + fieldSchemata []fieldSchemata + // Schemata for slice items + itemSchemata []itemSchemata + + cachedFieldSchemta map[FieldKey][]*spec.Schema + cachedItemSchemata map[ItemKey][]*spec.Schema +} + +// FieldKey is a pair of an object and a field, usable as a key for a map. +type FieldKey struct { + object reflect.Value // actually a map[string]interface{}, but the latter cannot be a key + field string +} + +// ItemKey is a pair of a slice and an index, usable as a key for a map. +type ItemKey struct { + slice reflect.Value // actually a []interface{}, but the latter cannot be a key + index int +} + +// NewFieldKey returns a pair of an object and field usable as a key of a map. +func NewFieldKey(obj map[string]interface{}, field string) FieldKey { + return FieldKey{object: reflect.ValueOf(obj), field: field} +} + +// Object returns the underlying object of this key. +func (fk *FieldKey) Object() map[string]interface{} { + return fk.object.Interface().(map[string]interface{}) +} + +// Field returns the underlying field of this key. +func (fk *FieldKey) Field() string { + return fk.field +} + +// NewItemKey returns a pair of a slice and index usable as a key of a map. +func NewItemKey(slice interface{}, i int) ItemKey { + return ItemKey{slice: reflect.ValueOf(slice), index: i} +} + +// Slice returns the underlying slice of this key. +func (ik *ItemKey) Slice() []interface{} { + return ik.slice.Interface().([]interface{}) +} + +// Index returns the underlying index of this key. +func (ik *ItemKey) Index() int { + return ik.index +} + +type fieldSchemata struct { + obj map[string]interface{} + field string + schemata schemata +} + +type itemSchemata struct { + slice reflect.Value + index int + schemata schemata +} + +// Merge merges this result with the other one(s), preserving match counts etc. +func (r *Result) Merge(others ...*Result) *Result { + for _, other := range others { + if other == nil { + continue + } + r.mergeWithoutRootSchemata(other) + r.rootObjectSchemata.Append(other.rootObjectSchemata) + } + return r +} + +// Data returns the original data object used for validation. Mutating this renders +// the result invalid. +func (r *Result) Data() interface{} { + return r.data +} + +// RootObjectSchemata returns the schemata which apply to the root object. +func (r *Result) RootObjectSchemata() []*spec.Schema { + return r.rootObjectSchemata.Slice() +} + +// FieldSchemata returns the schemata which apply to fields in objects. +// nolint: dupl +func (r *Result) FieldSchemata() map[FieldKey][]*spec.Schema { + if r.cachedFieldSchemta != nil { + return r.cachedFieldSchemta + } + + ret := make(map[FieldKey][]*spec.Schema, len(r.fieldSchemata)) + for _, fs := range r.fieldSchemata { + key := NewFieldKey(fs.obj, fs.field) + if fs.schemata.one != nil { + ret[key] = append(ret[key], fs.schemata.one) + } else if len(fs.schemata.multiple) > 0 { + ret[key] = append(ret[key], fs.schemata.multiple...) + } + } + r.cachedFieldSchemta = ret + return ret +} + +// ItemSchemata returns the schemata which apply to items in slices. +// nolint: dupl +func (r *Result) ItemSchemata() map[ItemKey][]*spec.Schema { + if r.cachedItemSchemata != nil { + return r.cachedItemSchemata + } + + ret := make(map[ItemKey][]*spec.Schema, len(r.itemSchemata)) + for _, ss := range r.itemSchemata { + key := NewItemKey(ss.slice, ss.index) + if ss.schemata.one != nil { + ret[key] = append(ret[key], ss.schemata.one) + } else if len(ss.schemata.multiple) > 0 { + ret[key] = append(ret[key], ss.schemata.multiple...) + } + } + r.cachedItemSchemata = ret + return ret +} + +func (r *Result) resetCaches() { + r.cachedFieldSchemta = nil + r.cachedItemSchemata = nil +} + +// mergeForField merges other into r, assigning other's root schemata to the given Object and field name. +// nolint: unparam +func (r *Result) mergeForField(obj map[string]interface{}, field string, other *Result) *Result { + if other == nil { + return r + } + r.mergeWithoutRootSchemata(other) + + if other.rootObjectSchemata.Len() > 0 { + if r.fieldSchemata == nil { + r.fieldSchemata = make([]fieldSchemata, len(obj)) + } + r.fieldSchemata = append(r.fieldSchemata, fieldSchemata{ + obj: obj, + field: field, + schemata: other.rootObjectSchemata, + }) + } + + return r +} + +// mergeForSlice merges other into r, assigning other's root schemata to the given slice and index. +// nolint: unparam +func (r *Result) mergeForSlice(slice reflect.Value, i int, other *Result) *Result { + if other == nil { + return r + } + r.mergeWithoutRootSchemata(other) + + if other.rootObjectSchemata.Len() > 0 { + if r.itemSchemata == nil { + r.itemSchemata = make([]itemSchemata, slice.Len()) + } + r.itemSchemata = append(r.itemSchemata, itemSchemata{ + slice: slice, + index: i, + schemata: other.rootObjectSchemata, + }) + } + + return r +} + +// addRootObjectSchemata adds the given schemata for the root object of the result. +// The slice schemata might be reused. I.e. do not modify it after being added to a result. +func (r *Result) addRootObjectSchemata(s *spec.Schema) { + r.rootObjectSchemata.Append(schemata{one: s}) +} + +// addPropertySchemata adds the given schemata for the object and field. +// The slice schemata might be reused. I.e. do not modify it after being added to a result. +func (r *Result) addPropertySchemata(obj map[string]interface{}, fld string, schema *spec.Schema) { + if r.fieldSchemata == nil { + r.fieldSchemata = make([]fieldSchemata, 0, len(obj)) + } + r.fieldSchemata = append(r.fieldSchemata, fieldSchemata{obj: obj, field: fld, schemata: schemata{one: schema}}) +} + +/* +// addSliceSchemata adds the given schemata for the slice and index. +// The slice schemata might be reused. I.e. do not modify it after being added to a result. +func (r *Result) addSliceSchemata(slice reflect.Value, i int, schema *spec.Schema) { + if r.itemSchemata == nil { + r.itemSchemata = make([]itemSchemata, 0, slice.Len()) + } + r.itemSchemata = append(r.itemSchemata, itemSchemata{slice: slice, index: i, schemata: schemata{one: schema}}) +} +*/ + +// mergeWithoutRootSchemata merges other into r, ignoring the rootObject schemata. +func (r *Result) mergeWithoutRootSchemata(other *Result) { + r.resetCaches() + r.AddErrors(other.Errors...) + r.AddWarnings(other.Warnings...) + r.MatchCount += other.MatchCount + + if other.fieldSchemata != nil { + if r.fieldSchemata == nil { + r.fieldSchemata = other.fieldSchemata + } else { + r.fieldSchemata = append(r.fieldSchemata, other.fieldSchemata...) + } + } + + if other.itemSchemata != nil { + if r.itemSchemata == nil { + r.itemSchemata = other.itemSchemata + } else { + r.itemSchemata = append(r.itemSchemata, other.itemSchemata...) + } + } +} + +// MergeAsErrors merges this result with the other one(s), preserving match counts etc. +// +// Warnings from input are merged as Errors in the returned merged Result. +func (r *Result) MergeAsErrors(others ...*Result) *Result { + for _, other := range others { + if other != nil { + r.resetCaches() + r.AddErrors(other.Errors...) + r.AddErrors(other.Warnings...) + r.MatchCount += other.MatchCount + } + } + return r +} + +// MergeAsWarnings merges this result with the other one(s), preserving match counts etc. +// +// Errors from input are merged as Warnings in the returned merged Result. +func (r *Result) MergeAsWarnings(others ...*Result) *Result { + for _, other := range others { + if other != nil { + r.resetCaches() + r.AddWarnings(other.Errors...) + r.AddWarnings(other.Warnings...) + r.MatchCount += other.MatchCount + } + } + return r +} + +// AddErrors adds errors to this validation result (if not already reported). +// +// Since the same check may be passed several times while exploring the +// spec structure (via $ref, ...) reported messages are kept +// unique. +func (r *Result) AddErrors(errors ...error) { + for _, e := range errors { + found := false + if e != nil { + for _, isReported := range r.Errors { + if e.Error() == isReported.Error() { + found = true + break + } + } + if !found { + r.Errors = append(r.Errors, e) + } + } + } +} + +// AddWarnings adds warnings to this validation result (if not already reported). +func (r *Result) AddWarnings(warnings ...error) { + for _, e := range warnings { + found := false + if e != nil { + for _, isReported := range r.Warnings { + if e.Error() == isReported.Error() { + found = true + break + } + } + if !found { + r.Warnings = append(r.Warnings, e) + } + } + } +} + +func (r *Result) keepRelevantErrors() *Result { + // TODO: this one is going to disapear... + // keepRelevantErrors strips a result from standard errors and keeps + // the ones which are supposedly more accurate. + // + // The original result remains unaffected (creates a new instance of Result). + // This method is used to work around the "matchCount" filter which would otherwise + // strip our result from some accurate error reporting from lower level validators. + // + // NOTE: this implementation with a placeholder (IMPORTANT!) is neither clean nor + // very efficient. On the other hand, relying on go-openapi/errors to manipulate + // codes would require to change a lot here. So, for the moment, let's go with + // placeholders. + strippedErrors := []error{} + for _, e := range r.Errors { + if strings.HasPrefix(e.Error(), "IMPORTANT!") { + strippedErrors = append(strippedErrors, fmt.Errorf(strings.TrimPrefix(e.Error(), "IMPORTANT!"))) + } + } + strippedWarnings := []error{} + for _, e := range r.Warnings { + if strings.HasPrefix(e.Error(), "IMPORTANT!") { + strippedWarnings = append(strippedWarnings, fmt.Errorf(strings.TrimPrefix(e.Error(), "IMPORTANT!"))) + } + } + strippedResult := new(Result) + strippedResult.Errors = strippedErrors + strippedResult.Warnings = strippedWarnings + return strippedResult +} + +// IsValid returns true when this result is valid. +// +// Returns true on a nil *Result. +func (r *Result) IsValid() bool { + if r == nil { + return true + } + return len(r.Errors) == 0 +} + +// HasErrors returns true when this result is invalid. +// +// Returns false on a nil *Result. +func (r *Result) HasErrors() bool { + if r == nil { + return false + } + return !r.IsValid() +} + +// HasWarnings returns true when this result contains warnings. +// +// Returns false on a nil *Result. +func (r *Result) HasWarnings() bool { + if r == nil { + return false + } + return len(r.Warnings) > 0 +} + +// HasErrorsOrWarnings returns true when this result contains +// either errors or warnings. +// +// Returns false on a nil *Result. +func (r *Result) HasErrorsOrWarnings() bool { + if r == nil { + return false + } + return len(r.Errors) > 0 || len(r.Warnings) > 0 +} + +// Inc increments the match count +func (r *Result) Inc() { + r.MatchCount++ +} + +// AsError renders this result as an error interface +// +// TODO: reporting / pretty print with path ordered and indented +func (r *Result) AsError() error { + if r.IsValid() { + return nil + } + return errors.CompositeValidationError(r.Errors...) +} + +// schemata is an arbitrary number of schemata. It does a distinction between zero, +// one and many schemata to avoid slice allocations. +type schemata struct { + // one is set if there is exactly one schema. In that case multiple must be nil. + one *spec.Schema + // multiple is an arbitrary number of schemas. If it is set, one must be nil. + multiple []*spec.Schema +} + +func (s *schemata) Len() int { + if s.one != nil { + return 1 + } + return len(s.multiple) +} + +func (s *schemata) Slice() []*spec.Schema { + if s == nil { + return nil + } + if s.one != nil { + return []*spec.Schema{s.one} + } + return s.multiple +} + +// appendSchemata appends the schemata in other to s. It mutated s in-place. +func (s *schemata) Append(other schemata) { + if other.one == nil && len(other.multiple) == 0 { + return + } + if s.one == nil && len(s.multiple) == 0 { + *s = other + return + } + + if s.one != nil { + if other.one != nil { + s.multiple = []*spec.Schema{s.one, other.one} + } else { + t := make([]*spec.Schema, 0, 1+len(other.multiple)) + s.multiple = append(append(t, s.one), other.multiple...) + } + s.one = nil + } else { + if other.one != nil { + s.multiple = append(s.multiple, other.one) + } else { + if cap(s.multiple) >= len(s.multiple)+len(other.multiple) { + s.multiple = append(s.multiple, other.multiple...) + } else { + t := make([]*spec.Schema, 0, len(s.multiple)+len(other.multiple)) + s.multiple = append(append(t, s.multiple...), other.multiple...) + } + } + } +} diff --git a/vendor/github.com/go-openapi/validate/rexp.go b/vendor/github.com/go-openapi/validate/rexp.go new file mode 100644 index 00000000000..5a0824395c7 --- /dev/null +++ b/vendor/github.com/go-openapi/validate/rexp.go @@ -0,0 +1,71 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package validate + +import ( + re "regexp" + "sync" + "sync/atomic" +) + +// Cache for compiled regular expressions +var ( + cacheMutex = &sync.Mutex{} + reDict = atomic.Value{} //map[string]*re.Regexp +) + +func compileRegexp(pattern string) (*re.Regexp, error) { + if cache, ok := reDict.Load().(map[string]*re.Regexp); ok { + if r := cache[pattern]; r != nil { + return r, nil + } + } + + r, err := re.Compile(pattern) + if err != nil { + return nil, err + } + cacheRegexp(r) + return r, nil +} + +func mustCompileRegexp(pattern string) *re.Regexp { + if cache, ok := reDict.Load().(map[string]*re.Regexp); ok { + if r := cache[pattern]; r != nil { + return r + } + } + + r := re.MustCompile(pattern) + cacheRegexp(r) + return r +} + +func cacheRegexp(r *re.Regexp) { + cacheMutex.Lock() + defer cacheMutex.Unlock() + + if cache, ok := reDict.Load().(map[string]*re.Regexp); !ok || cache[r.String()] == nil { + newCache := map[string]*re.Regexp{ + r.String(): r, + } + + for k, v := range cache { + newCache[k] = v + } + + reDict.Store(newCache) + } +} diff --git a/vendor/github.com/go-openapi/validate/schema.go b/vendor/github.com/go-openapi/validate/schema.go new file mode 100644 index 00000000000..55454b85227 --- /dev/null +++ b/vendor/github.com/go-openapi/validate/schema.go @@ -0,0 +1,260 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package validate + +import ( + "encoding/json" + "reflect" + + "github.com/go-openapi/errors" + "github.com/go-openapi/spec" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +var ( + specSchemaType = reflect.TypeOf(&spec.Schema{}) + specParameterType = reflect.TypeOf(&spec.Parameter{}) + specHeaderType = reflect.TypeOf(&spec.Header{}) + //specItemsType = reflect.TypeOf(&spec.Items{}) +) + +// SchemaValidator validates data against a JSON schema +type SchemaValidator struct { + Path string + in string + Schema *spec.Schema + validators []valueValidator + Root interface{} + KnownFormats strfmt.Registry + Options SchemaValidatorOptions +} + +// AgainstSchema validates the specified data against the provided schema, using a registry of supported formats. +// +// When no pre-parsed *spec.Schema structure is provided, it uses a JSON schema as default. See example. +func AgainstSchema(schema *spec.Schema, data interface{}, formats strfmt.Registry, options ...Option) error { + res := NewSchemaValidator(schema, nil, "", formats, options...).Validate(data) + if res.HasErrors() { + return errors.CompositeValidationError(res.Errors...) + } + return nil +} + +// NewSchemaValidator creates a new schema validator. +// +// Panics if the provided schema is invalid. +func NewSchemaValidator(schema *spec.Schema, rootSchema interface{}, root string, formats strfmt.Registry, options ...Option) *SchemaValidator { + if schema == nil { + return nil + } + + if rootSchema == nil { + rootSchema = schema + } + + if schema.ID != "" || schema.Ref.String() != "" || schema.Ref.IsRoot() { + err := spec.ExpandSchema(schema, rootSchema, nil) + if err != nil { + msg := invalidSchemaProvidedMsg(err).Error() + panic(msg) + } + } + s := SchemaValidator{ + Path: root, + in: "body", + Schema: schema, + Root: rootSchema, + KnownFormats: formats, + Options: SchemaValidatorOptions{}} + for _, o := range options { + o(&s.Options) + } + s.validators = []valueValidator{ + s.typeValidator(), + s.schemaPropsValidator(), + s.stringValidator(), + s.formatValidator(), + s.numberValidator(), + s.sliceValidator(), + s.commonValidator(), + s.objectValidator(), + } + return &s +} + +// SetPath sets the path for this schema valdiator +func (s *SchemaValidator) SetPath(path string) { + s.Path = path +} + +// Applies returns true when this schema validator applies +func (s *SchemaValidator) Applies(source interface{}, kind reflect.Kind) bool { + _, ok := source.(*spec.Schema) + return ok +} + +// Validate validates the data against the schema +func (s *SchemaValidator) Validate(data interface{}) *Result { + result := &Result{data: data} + if s == nil { + return result + } + if s.Schema != nil { + result.addRootObjectSchemata(s.Schema) + } + + if data == nil { + result.Merge(s.validators[0].Validate(data)) // type validator + result.Merge(s.validators[6].Validate(data)) // common validator + return result + } + + tpe := reflect.TypeOf(data) + kind := tpe.Kind() + for kind == reflect.Ptr { + tpe = tpe.Elem() + kind = tpe.Kind() + } + d := data + + if kind == reflect.Struct { + // NOTE: since reflect retrieves the true nature of types + // this means that all strfmt types passed here (e.g. strfmt.Datetime, etc..) + // are converted here to strings, and structs are systematically converted + // to map[string]interface{}. + d = swag.ToDynamicJSON(data) + } + + // TODO: this part should be handed over to type validator + // Handle special case of json.Number data (number marshalled as string) + isnumber := s.Schema.Type.Contains(numberType) || s.Schema.Type.Contains(integerType) + if num, ok := data.(json.Number); ok && isnumber { + if s.Schema.Type.Contains(integerType) { // avoid lossy conversion + in, erri := num.Int64() + if erri != nil { + result.AddErrors(invalidTypeConversionMsg(s.Path, erri)) + result.Inc() + return result + } + d = in + } else { + nf, errf := num.Float64() + if errf != nil { + result.AddErrors(invalidTypeConversionMsg(s.Path, errf)) + result.Inc() + return result + } + d = nf + } + + tpe = reflect.TypeOf(d) + kind = tpe.Kind() + } + + for _, v := range s.validators { + if !v.Applies(s.Schema, kind) { + debugLog("%T does not apply for %v", v, kind) + continue + } + + err := v.Validate(d) + result.Merge(err) + result.Inc() + } + result.Inc() + + return result +} + +func (s *SchemaValidator) typeValidator() valueValidator { + return &typeValidator{Type: s.Schema.Type, Nullable: s.Schema.Nullable, Format: s.Schema.Format, In: s.in, Path: s.Path} +} + +func (s *SchemaValidator) commonValidator() valueValidator { + return &basicCommonValidator{ + Path: s.Path, + In: s.in, + Enum: s.Schema.Enum, + } +} + +func (s *SchemaValidator) sliceValidator() valueValidator { + return &schemaSliceValidator{ + Path: s.Path, + In: s.in, + MaxItems: s.Schema.MaxItems, + MinItems: s.Schema.MinItems, + UniqueItems: s.Schema.UniqueItems, + AdditionalItems: s.Schema.AdditionalItems, + Items: s.Schema.Items, + Root: s.Root, + KnownFormats: s.KnownFormats, + Options: s.Options, + } +} + +func (s *SchemaValidator) numberValidator() valueValidator { + return &numberValidator{ + Path: s.Path, + In: s.in, + Default: s.Schema.Default, + MultipleOf: s.Schema.MultipleOf, + Maximum: s.Schema.Maximum, + ExclusiveMaximum: s.Schema.ExclusiveMaximum, + Minimum: s.Schema.Minimum, + ExclusiveMinimum: s.Schema.ExclusiveMinimum, + } +} + +func (s *SchemaValidator) stringValidator() valueValidator { + return &stringValidator{ + Path: s.Path, + In: s.in, + MaxLength: s.Schema.MaxLength, + MinLength: s.Schema.MinLength, + Pattern: s.Schema.Pattern, + } +} + +func (s *SchemaValidator) formatValidator() valueValidator { + return &formatValidator{ + Path: s.Path, + In: s.in, + Format: s.Schema.Format, + KnownFormats: s.KnownFormats, + } +} + +func (s *SchemaValidator) schemaPropsValidator() valueValidator { + sch := s.Schema + return newSchemaPropsValidator(s.Path, s.in, sch.AllOf, sch.OneOf, sch.AnyOf, sch.Not, sch.Dependencies, s.Root, s.KnownFormats, s.Options.Options()...) +} + +func (s *SchemaValidator) objectValidator() valueValidator { + return &objectValidator{ + Path: s.Path, + In: s.in, + MaxProperties: s.Schema.MaxProperties, + MinProperties: s.Schema.MinProperties, + Required: s.Schema.Required, + Properties: s.Schema.Properties, + AdditionalProperties: s.Schema.AdditionalProperties, + PatternProperties: s.Schema.PatternProperties, + Root: s.Root, + KnownFormats: s.KnownFormats, + Options: s.Options, + } +} diff --git a/vendor/github.com/go-openapi/validate/schema_messages.go b/vendor/github.com/go-openapi/validate/schema_messages.go new file mode 100644 index 00000000000..786e2e3554e --- /dev/null +++ b/vendor/github.com/go-openapi/validate/schema_messages.go @@ -0,0 +1,78 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package validate + +import ( + "github.com/go-openapi/errors" +) + +// Error messages related to schema validation and returned as results. +const ( + // ArrayDoesNotAllowAdditionalItemsError when an additionalItems construct is not verified by the array values provided. + // + // TODO: should move to package go-openapi/errors + ArrayDoesNotAllowAdditionalItemsError = "array doesn't allow for additional items" + + // HasDependencyError indicates that a dependencies construct was not verified + HasDependencyError = "%q has a dependency on %s" + + // InvalidSchemaProvidedError indicates that the schema provided to validate a value cannot be properly compiled + InvalidSchemaProvidedError = "Invalid schema provided to SchemaValidator: %v" + + // InvalidTypeConversionError indicates that a numerical conversion for the given type could not be carried on + InvalidTypeConversionError = "invalid type conversion in %s: %v " + + // MustValidateAtLeastOneSchemaError indicates that in a AnyOf construct, none of the schema constraints specified were verified + MustValidateAtLeastOneSchemaError = "%q must validate at least one schema (anyOf)" + + // MustValidateOnlyOneSchemaError indicates that in a OneOf construct, either none of the schema constraints specified were verified, or several were + MustValidateOnlyOneSchemaError = "%q must validate one and only one schema (oneOf). %s" + + // MustValidateAllSchemasError indicates that in a AllOf construct, at least one of the schema constraints specified were not verified + // + // TODO: punctuation in message + MustValidateAllSchemasError = "%q must validate all the schemas (allOf)%s" + + // MustNotValidateSchemaError indicates that in a Not construct, the schema constraint specified was verified + MustNotValidateSchemaError = "%q must not validate the schema (not)" +) + +// Warning messages related to schema validation and returned as results +const () + +func invalidSchemaProvidedMsg(err error) errors.Error { + return errors.New(InternalErrorCode, InvalidSchemaProvidedError, err) +} +func invalidTypeConversionMsg(path string, err error) errors.Error { + return errors.New(errors.CompositeErrorCode, InvalidTypeConversionError, path, err) +} +func mustValidateOnlyOneSchemaMsg(path, additionalMsg string) errors.Error { + return errors.New(errors.CompositeErrorCode, MustValidateOnlyOneSchemaError, path, additionalMsg) +} +func mustValidateAtLeastOneSchemaMsg(path string) errors.Error { + return errors.New(errors.CompositeErrorCode, MustValidateAtLeastOneSchemaError, path) +} +func mustValidateAllSchemasMsg(path, additionalMsg string) errors.Error { + return errors.New(errors.CompositeErrorCode, MustValidateAllSchemasError, path, additionalMsg) +} +func mustNotValidatechemaMsg(path string) errors.Error { + return errors.New(errors.CompositeErrorCode, MustNotValidateSchemaError, path) +} +func hasADependencyMsg(path, depkey string) errors.Error { + return errors.New(errors.CompositeErrorCode, HasDependencyError, path, depkey) +} +func arrayDoesNotAllowAdditionalItemsMsg() errors.Error { + return errors.New(errors.CompositeErrorCode, ArrayDoesNotAllowAdditionalItemsError) +} diff --git a/vendor/github.com/go-openapi/validate/schema_option.go b/vendor/github.com/go-openapi/validate/schema_option.go new file mode 100644 index 00000000000..4b4879de8b1 --- /dev/null +++ b/vendor/github.com/go-openapi/validate/schema_option.go @@ -0,0 +1,54 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package validate + +// SchemaValidatorOptions defines optional rules for schema validation +type SchemaValidatorOptions struct { + EnableObjectArrayTypeCheck bool + EnableArrayMustHaveItemsCheck bool +} + +// Option sets optional rules for schema validation +type Option func(*SchemaValidatorOptions) + +// EnableObjectArrayTypeCheck activates the swagger rule: an items must be in type: array +func EnableObjectArrayTypeCheck(enable bool) Option { + return func(svo *SchemaValidatorOptions) { + svo.EnableObjectArrayTypeCheck = enable + } +} + +// EnableArrayMustHaveItemsCheck activates the swagger rule: an array must have items defined +func EnableArrayMustHaveItemsCheck(enable bool) Option { + return func(svo *SchemaValidatorOptions) { + svo.EnableArrayMustHaveItemsCheck = enable + } +} + +// SwaggerSchema activates swagger schema validation rules +func SwaggerSchema(enable bool) Option { + return func(svo *SchemaValidatorOptions) { + svo.EnableObjectArrayTypeCheck = enable + svo.EnableArrayMustHaveItemsCheck = enable + } +} + +// Options returns current options +func (svo SchemaValidatorOptions) Options() []Option { + return []Option{ + EnableObjectArrayTypeCheck(svo.EnableObjectArrayTypeCheck), + EnableArrayMustHaveItemsCheck(svo.EnableArrayMustHaveItemsCheck), + } +} diff --git a/vendor/github.com/go-openapi/validate/schema_props.go b/vendor/github.com/go-openapi/validate/schema_props.go new file mode 100644 index 00000000000..5643c783cdd --- /dev/null +++ b/vendor/github.com/go-openapi/validate/schema_props.go @@ -0,0 +1,240 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package validate + +import ( + "fmt" + "reflect" + + "github.com/go-openapi/spec" + "github.com/go-openapi/strfmt" +) + +type schemaPropsValidator struct { + Path string + In string + AllOf []spec.Schema + OneOf []spec.Schema + AnyOf []spec.Schema + Not *spec.Schema + Dependencies spec.Dependencies + anyOfValidators []SchemaValidator + allOfValidators []SchemaValidator + oneOfValidators []SchemaValidator + notValidator *SchemaValidator + Root interface{} + KnownFormats strfmt.Registry + Options SchemaValidatorOptions +} + +func (s *schemaPropsValidator) SetPath(path string) { + s.Path = path +} + +func newSchemaPropsValidator(path string, in string, allOf, oneOf, anyOf []spec.Schema, not *spec.Schema, deps spec.Dependencies, root interface{}, formats strfmt.Registry, options ...Option) *schemaPropsValidator { + anyValidators := make([]SchemaValidator, 0, len(anyOf)) + for _, v := range anyOf { + v := v + anyValidators = append(anyValidators, *NewSchemaValidator(&v, root, path, formats, options...)) + } + allValidators := make([]SchemaValidator, 0, len(allOf)) + for _, v := range allOf { + v := v + allValidators = append(allValidators, *NewSchemaValidator(&v, root, path, formats, options...)) + } + oneValidators := make([]SchemaValidator, 0, len(oneOf)) + for _, v := range oneOf { + v := v + oneValidators = append(oneValidators, *NewSchemaValidator(&v, root, path, formats, options...)) + } + + var notValidator *SchemaValidator + if not != nil { + notValidator = NewSchemaValidator(not, root, path, formats, options...) + } + + schOptions := &SchemaValidatorOptions{} + for _, o := range options { + o(schOptions) + } + return &schemaPropsValidator{ + Path: path, + In: in, + AllOf: allOf, + OneOf: oneOf, + AnyOf: anyOf, + Not: not, + Dependencies: deps, + anyOfValidators: anyValidators, + allOfValidators: allValidators, + oneOfValidators: oneValidators, + notValidator: notValidator, + Root: root, + KnownFormats: formats, + Options: *schOptions, + } +} + +func (s *schemaPropsValidator) Applies(source interface{}, kind reflect.Kind) bool { + r := reflect.TypeOf(source) == specSchemaType + debugLog("schema props validator for %q applies %t for %T (kind: %v)\n", s.Path, r, source, kind) + return r +} + +func (s *schemaPropsValidator) Validate(data interface{}) *Result { + mainResult := new(Result) + + // Intermediary error results + + // IMPORTANT! messages from underlying validators + keepResultAnyOf := new(Result) + keepResultOneOf := new(Result) + keepResultAllOf := new(Result) + + // Validates at least one in anyOf schemas + var firstSuccess *Result + if len(s.anyOfValidators) > 0 { + var bestFailures *Result + succeededOnce := false + for _, anyOfSchema := range s.anyOfValidators { + result := anyOfSchema.Validate(data) + // We keep inner IMPORTANT! errors no matter what MatchCount tells us + keepResultAnyOf.Merge(result.keepRelevantErrors()) + if result.IsValid() { + bestFailures = nil + succeededOnce = true + if firstSuccess == nil { + firstSuccess = result + } + keepResultAnyOf = new(Result) + break + } + // MatchCount is used to select errors from the schema with most positive checks + if bestFailures == nil || result.MatchCount > bestFailures.MatchCount { + bestFailures = result + } + } + + if !succeededOnce { + mainResult.AddErrors(mustValidateAtLeastOneSchemaMsg(s.Path)) + } + if bestFailures != nil { + mainResult.Merge(bestFailures) + } else if firstSuccess != nil { + mainResult.Merge(firstSuccess) + } + } + + // Validates exactly one in oneOf schemas + if len(s.oneOfValidators) > 0 { + var bestFailures *Result + var firstSuccess *Result + validated := 0 + + for _, oneOfSchema := range s.oneOfValidators { + result := oneOfSchema.Validate(data) + // We keep inner IMPORTANT! errors no matter what MatchCount tells us + keepResultOneOf.Merge(result.keepRelevantErrors()) + if result.IsValid() { + validated++ + bestFailures = nil + if firstSuccess == nil { + firstSuccess = result + } + keepResultOneOf = new(Result) + continue + } + // MatchCount is used to select errors from the schema with most positive checks + if validated == 0 && (bestFailures == nil || result.MatchCount > bestFailures.MatchCount) { + bestFailures = result + } + } + + if validated != 1 { + additionalMsg := "" + if validated == 0 { + additionalMsg = "Found none valid" + } else { + additionalMsg = fmt.Sprintf("Found %d valid alternatives", validated) + } + + mainResult.AddErrors(mustValidateOnlyOneSchemaMsg(s.Path, additionalMsg)) + if bestFailures != nil { + mainResult.Merge(bestFailures) + } + } else if firstSuccess != nil { + mainResult.Merge(firstSuccess) + } + } + + // Validates all of allOf schemas + if len(s.allOfValidators) > 0 { + validated := 0 + + for _, allOfSchema := range s.allOfValidators { + result := allOfSchema.Validate(data) + // We keep inner IMPORTANT! errors no matter what MatchCount tells us + keepResultAllOf.Merge(result.keepRelevantErrors()) + //keepResultAllOf.Merge(result) + if result.IsValid() { + validated++ + } + mainResult.Merge(result) + } + + if validated != len(s.allOfValidators) { + additionalMsg := "" + if validated == 0 { + additionalMsg = ". None validated" + } + + mainResult.AddErrors(mustValidateAllSchemasMsg(s.Path, additionalMsg)) + } + } + + if s.notValidator != nil { + result := s.notValidator.Validate(data) + // We keep inner IMPORTANT! errors no matter what MatchCount tells us + if result.IsValid() { + mainResult.AddErrors(mustNotValidatechemaMsg(s.Path)) + } + } + + if s.Dependencies != nil && len(s.Dependencies) > 0 && reflect.TypeOf(data).Kind() == reflect.Map { + val := data.(map[string]interface{}) + for key := range val { + if dep, ok := s.Dependencies[key]; ok { + + if dep.Schema != nil { + mainResult.Merge(NewSchemaValidator(dep.Schema, s.Root, s.Path+"."+key, s.KnownFormats, s.Options.Options()...).Validate(data)) + continue + } + + if len(dep.Property) > 0 { + for _, depKey := range dep.Property { + if _, ok := val[depKey]; !ok { + mainResult.AddErrors(hasADependencyMsg(s.Path, depKey)) + } + } + } + } + } + } + + mainResult.Inc() + // In the end we retain best failures for schema validation + // plus, if any, composite errors which may explain special cases (tagged as IMPORTANT!). + return mainResult.Merge(keepResultAllOf, keepResultOneOf, keepResultAnyOf) +} diff --git a/vendor/github.com/go-openapi/validate/slice_validator.go b/vendor/github.com/go-openapi/validate/slice_validator.go new file mode 100644 index 00000000000..aa429f5184e --- /dev/null +++ b/vendor/github.com/go-openapi/validate/slice_validator.go @@ -0,0 +1,105 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package validate + +import ( + "fmt" + "reflect" + + "github.com/go-openapi/spec" + "github.com/go-openapi/strfmt" +) + +type schemaSliceValidator struct { + Path string + In string + MaxItems *int64 + MinItems *int64 + UniqueItems bool + AdditionalItems *spec.SchemaOrBool + Items *spec.SchemaOrArray + Root interface{} + KnownFormats strfmt.Registry + Options SchemaValidatorOptions +} + +func (s *schemaSliceValidator) SetPath(path string) { + s.Path = path +} + +func (s *schemaSliceValidator) Applies(source interface{}, kind reflect.Kind) bool { + _, ok := source.(*spec.Schema) + r := ok && kind == reflect.Slice + return r +} + +func (s *schemaSliceValidator) Validate(data interface{}) *Result { + result := new(Result) + if data == nil { + return result + } + val := reflect.ValueOf(data) + size := val.Len() + + if s.Items != nil && s.Items.Schema != nil { + validator := NewSchemaValidator(s.Items.Schema, s.Root, s.Path, s.KnownFormats, s.Options.Options()...) + for i := 0; i < size; i++ { + validator.SetPath(fmt.Sprintf("%s.%d", s.Path, i)) + value := val.Index(i) + result.mergeForSlice(val, i, validator.Validate(value.Interface())) + } + } + + itemsSize := 0 + if s.Items != nil && len(s.Items.Schemas) > 0 { + itemsSize = len(s.Items.Schemas) + for i := 0; i < itemsSize; i++ { + validator := NewSchemaValidator(&s.Items.Schemas[i], s.Root, fmt.Sprintf("%s.%d", s.Path, i), s.KnownFormats, s.Options.Options()...) + if val.Len() <= i { + break + } + result.mergeForSlice(val, i, validator.Validate(val.Index(i).Interface())) + } + } + if s.AdditionalItems != nil && itemsSize < size { + if s.Items != nil && len(s.Items.Schemas) > 0 && !s.AdditionalItems.Allows { + result.AddErrors(arrayDoesNotAllowAdditionalItemsMsg()) + } + if s.AdditionalItems.Schema != nil { + for i := itemsSize; i < size-itemsSize+1; i++ { + validator := NewSchemaValidator(s.AdditionalItems.Schema, s.Root, fmt.Sprintf("%s.%d", s.Path, i), s.KnownFormats, s.Options.Options()...) + result.mergeForSlice(val, i, validator.Validate(val.Index(i).Interface())) + } + } + } + + if s.MinItems != nil { + if err := MinItems(s.Path, s.In, int64(size), *s.MinItems); err != nil { + result.AddErrors(err) + } + } + if s.MaxItems != nil { + if err := MaxItems(s.Path, s.In, int64(size), *s.MaxItems); err != nil { + result.AddErrors(err) + } + } + if s.UniqueItems { + if err := UniqueItems(s.Path, s.In, val.Interface()); err != nil { + result.AddErrors(err) + } + } + result.Inc() + return result +} diff --git a/vendor/github.com/go-openapi/validate/spec.go b/vendor/github.com/go-openapi/validate/spec.go new file mode 100644 index 00000000000..f30dd79e383 --- /dev/null +++ b/vendor/github.com/go-openapi/validate/spec.go @@ -0,0 +1,795 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package validate + +import ( + "encoding/json" + "fmt" + "sort" + "strings" + + "github.com/go-openapi/analysis" + "github.com/go-openapi/errors" + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/loads" + "github.com/go-openapi/spec" + "github.com/go-openapi/strfmt" +) + +// Spec validates an OpenAPI 2.0 specification document. +// +// Returns an error flattening in a single standard error, all validation messages. +// +// - TODO: $ref should not have siblings +// - TODO: make sure documentation reflects all checks and warnings +// - TODO: check on discriminators +// - TODO: explicit message on unsupported keywords (better than "forbidden property"...) +// - TODO: full list of unresolved refs +// - TODO: validate numeric constraints (issue#581): this should be handled like defaults and examples +// - TODO: option to determine if we validate for go-swagger or in a more general context +// - TODO: check on required properties to support anyOf, allOf, oneOf +// +// NOTE: SecurityScopes are maps: no need to check uniqueness +// +func Spec(doc *loads.Document, formats strfmt.Registry) error { + errs, _ /*warns*/ := NewSpecValidator(doc.Schema(), formats).Validate(doc) + if errs.HasErrors() { + return errors.CompositeValidationError(errs.Errors...) + } + return nil +} + +// SpecValidator validates a swagger 2.0 spec +type SpecValidator struct { + schema *spec.Schema // swagger 2.0 schema + spec *loads.Document + analyzer *analysis.Spec + expanded *loads.Document + KnownFormats strfmt.Registry + Options Opts // validation options +} + +// NewSpecValidator creates a new swagger spec validator instance +func NewSpecValidator(schema *spec.Schema, formats strfmt.Registry) *SpecValidator { + return &SpecValidator{ + schema: schema, + KnownFormats: formats, + Options: defaultOpts, + } +} + +// Validate validates the swagger spec +func (s *SpecValidator) Validate(data interface{}) (*Result, *Result) { + var sd *loads.Document + errs, warnings := new(Result), new(Result) + + if v, ok := data.(*loads.Document); ok { + sd = v + } + if sd == nil { + errs.AddErrors(invalidDocumentMsg()) + return errs, warnings // no point in continuing + } + s.spec = sd + s.analyzer = analysis.New(sd.Spec()) + + // Swagger schema validator + schv := NewSchemaValidator(s.schema, nil, "", s.KnownFormats, SwaggerSchema(true)) + var obj interface{} + + // Raw spec unmarshalling errors + if err := json.Unmarshal(sd.Raw(), &obj); err != nil { + // NOTE: under normal conditions, the *load.Document has been already unmarshalled + // So this one is just a paranoid check on the behavior of the spec package + panic(InvalidDocumentError) + } + + defer func() { + // errs holds all errors and warnings, + // warnings only warnings + errs.MergeAsWarnings(warnings) + warnings.AddErrors(errs.Warnings...) + }() + + errs.Merge(schv.Validate(obj)) // error - + // There may be a point in continuing to try and determine more accurate errors + if !s.Options.ContinueOnErrors && errs.HasErrors() { + return errs, warnings // no point in continuing + } + + errs.Merge(s.validateReferencesValid()) // error - + // There may be a point in continuing to try and determine more accurate errors + if !s.Options.ContinueOnErrors && errs.HasErrors() { + return errs, warnings // no point in continuing + } + + errs.Merge(s.validateDuplicateOperationIDs()) + errs.Merge(s.validateDuplicatePropertyNames()) // error - + errs.Merge(s.validateParameters()) // error - + errs.Merge(s.validateItems()) // error - + + // Properties in required definition MUST validate their schema + // Properties SHOULD NOT be declared as both required and readOnly (warning) + errs.Merge(s.validateRequiredDefinitions()) // error and warning + + // There may be a point in continuing to try and determine more accurate errors + if !s.Options.ContinueOnErrors && errs.HasErrors() { + return errs, warnings // no point in continuing + } + + // Values provided as default MUST validate their schema + df := &defaultValidator{SpecValidator: s} + errs.Merge(df.Validate()) + + // Values provided as examples MUST validate their schema + // Value provided as examples in a response without schema generate a warning + // Known limitations: examples in responses for mime type not application/json are ignored (warning) + ex := &exampleValidator{SpecValidator: s} + errs.Merge(ex.Validate()) + + errs.Merge(s.validateNonEmptyPathParamNames()) + + //errs.Merge(s.validateRefNoSibling()) // warning only + errs.Merge(s.validateReferenced()) // warning only + + return errs, warnings +} + +func (s *SpecValidator) validateNonEmptyPathParamNames() *Result { + res := new(Result) + if s.spec.Spec().Paths == nil { + // There is no Paths object: error + res.AddErrors(noValidPathMsg()) + } else { + if s.spec.Spec().Paths.Paths == nil { + // Paths may be empty: warning + res.AddWarnings(noValidPathMsg()) + } else { + for k := range s.spec.Spec().Paths.Paths { + if strings.Contains(k, "{}") { + res.AddErrors(emptyPathParameterMsg(k)) + } + } + } + } + return res +} + +func (s *SpecValidator) validateDuplicateOperationIDs() *Result { + // OperationID, if specified, must be unique across the board + var analyzer *analysis.Spec + if s.expanded != nil { + // $ref are valid: we can analyze operations on an expanded spec + analyzer = analysis.New(s.expanded.Spec()) + } else { + // fallback on possible incomplete picture because of previous errors + analyzer = s.analyzer + } + res := new(Result) + known := make(map[string]int) + for _, v := range analyzer.OperationIDs() { + if v != "" { + known[v]++ + } + } + for k, v := range known { + if v > 1 { + res.AddErrors(nonUniqueOperationIDMsg(k, v)) + } + } + return res +} + +type dupProp struct { + Name string + Definition string +} + +func (s *SpecValidator) validateDuplicatePropertyNames() *Result { + // definition can't declare a property that's already defined by one of its ancestors + res := new(Result) + for k, sch := range s.spec.Spec().Definitions { + if len(sch.AllOf) == 0 { + continue + } + + knownanc := map[string]struct{}{ + "#/definitions/" + k: {}, + } + + ancs, rec := s.validateCircularAncestry(k, sch, knownanc) + if rec != nil && (rec.HasErrors() || !rec.HasWarnings()) { + res.Merge(rec) + } + if len(ancs) > 0 { + res.AddErrors(circularAncestryDefinitionMsg(k, ancs)) + return res + } + + knowns := make(map[string]struct{}) + dups, rep := s.validateSchemaPropertyNames(k, sch, knowns) + if rep != nil && (rep.HasErrors() || rep.HasWarnings()) { + res.Merge(rep) + } + if len(dups) > 0 { + var pns []string + for _, v := range dups { + pns = append(pns, v.Definition+"."+v.Name) + } + res.AddErrors(duplicatePropertiesMsg(k, pns)) + } + + } + return res +} + +func (s *SpecValidator) resolveRef(ref *spec.Ref) (*spec.Schema, error) { + if s.spec.SpecFilePath() != "" { + return spec.ResolveRefWithBase(s.spec.Spec(), ref, &spec.ExpandOptions{RelativeBase: s.spec.SpecFilePath()}) + } + // NOTE: it looks like with the new spec resolver, this code is now unrecheable + return spec.ResolveRef(s.spec.Spec(), ref) +} + +func (s *SpecValidator) validateSchemaPropertyNames(nm string, sch spec.Schema, knowns map[string]struct{}) ([]dupProp, *Result) { + var dups []dupProp + + schn := nm + schc := &sch + res := new(Result) + + for schc.Ref.String() != "" { + // gather property names + reso, err := s.resolveRef(&schc.Ref) + if err != nil { + errorHelp.addPointerError(res, err, schc.Ref.String(), nm) + return dups, res + } + schc = reso + schn = sch.Ref.String() + } + + if len(schc.AllOf) > 0 { + for _, chld := range schc.AllOf { + dup, rep := s.validateSchemaPropertyNames(schn, chld, knowns) + if rep != nil && (rep.HasErrors() || rep.HasWarnings()) { + res.Merge(rep) + } + dups = append(dups, dup...) + } + return dups, res + } + + for k := range schc.Properties { + _, ok := knowns[k] + if ok { + dups = append(dups, dupProp{Name: k, Definition: schn}) + } else { + knowns[k] = struct{}{} + } + } + + return dups, res +} + +func (s *SpecValidator) validateCircularAncestry(nm string, sch spec.Schema, knowns map[string]struct{}) ([]string, *Result) { + res := new(Result) + + if sch.Ref.String() == "" && len(sch.AllOf) == 0 { // Safeguard. We should not be able to actually get there + return nil, res + } + var ancs []string + + schn := nm + schc := &sch + + for schc.Ref.String() != "" { + reso, err := s.resolveRef(&schc.Ref) + if err != nil { + errorHelp.addPointerError(res, err, schc.Ref.String(), nm) + return ancs, res + } + schc = reso + schn = sch.Ref.String() + } + + if schn != nm && schn != "" { + if _, ok := knowns[schn]; ok { + ancs = append(ancs, schn) + } + knowns[schn] = struct{}{} + + if len(ancs) > 0 { + return ancs, res + } + } + + if len(schc.AllOf) > 0 { + for _, chld := range schc.AllOf { + if chld.Ref.String() != "" || len(chld.AllOf) > 0 { + anc, rec := s.validateCircularAncestry(schn, chld, knowns) + if rec != nil && (rec.HasErrors() || !rec.HasWarnings()) { + res.Merge(rec) + } + ancs = append(ancs, anc...) + if len(ancs) > 0 { + return ancs, res + } + } + } + } + return ancs, res +} + +func (s *SpecValidator) validateItems() *Result { + // validate parameter, items, schema and response objects for presence of item if type is array + res := new(Result) + + for method, pi := range s.analyzer.Operations() { + for path, op := range pi { + for _, param := range paramHelp.safeExpandedParamsFor(path, method, op.ID, res, s) { + + if param.TypeName() == arrayType && param.ItemsTypeName() == "" { + res.AddErrors(arrayInParamRequiresItemsMsg(param.Name, op.ID)) + continue + } + if param.In != swaggerBody { + if param.Items != nil { + items := param.Items + for items.TypeName() == arrayType { + if items.ItemsTypeName() == "" { + res.AddErrors(arrayInParamRequiresItemsMsg(param.Name, op.ID)) + break + } + items = items.Items + } + } + } else { + // In: body + if param.Schema != nil { + res.Merge(s.validateSchemaItems(*param.Schema, fmt.Sprintf("body param %q", param.Name), op.ID)) + } + } + } + + var responses []spec.Response + if op.Responses != nil { + if op.Responses.Default != nil { + responses = append(responses, *op.Responses.Default) + } + if op.Responses.StatusCodeResponses != nil { + for _, v := range op.Responses.StatusCodeResponses { + responses = append(responses, v) + } + } + } + + for _, resp := range responses { + // Response headers with array + for hn, hv := range resp.Headers { + if hv.TypeName() == arrayType && hv.ItemsTypeName() == "" { + res.AddErrors(arrayInHeaderRequiresItemsMsg(hn, op.ID)) + } + } + if resp.Schema != nil { + res.Merge(s.validateSchemaItems(*resp.Schema, "response body", op.ID)) + } + } + } + } + return res +} + +// Verifies constraints on array type +func (s *SpecValidator) validateSchemaItems(schema spec.Schema, prefix, opID string) *Result { + res := new(Result) + if !schema.Type.Contains(arrayType) { + return res + } + + if schema.Items == nil || schema.Items.Len() == 0 { + res.AddErrors(arrayRequiresItemsMsg(prefix, opID)) + return res + } + + if schema.Items.Schema != nil { + schema = *schema.Items.Schema + if _, err := compileRegexp(schema.Pattern); err != nil { + res.AddErrors(invalidItemsPatternMsg(prefix, opID, schema.Pattern)) + } + + res.Merge(s.validateSchemaItems(schema, prefix, opID)) + } + return res +} + +func (s *SpecValidator) validatePathParamPresence(path string, fromPath, fromOperation []string) *Result { + // Each defined operation path parameters must correspond to a named element in the API's path pattern. + // (For example, you cannot have a path parameter named id for the following path /pets/{petId} but you must have a path parameter named petId.) + res := new(Result) + for _, l := range fromPath { + var matched bool + for _, r := range fromOperation { + if l == "{"+r+"}" { + matched = true + break + } + } + if !matched { + res.AddErrors(noParameterInPathMsg(l)) + } + } + + for _, p := range fromOperation { + var matched bool + for _, r := range fromPath { + if "{"+p+"}" == r { + matched = true + break + } + } + if !matched { + res.AddErrors(pathParamNotInPathMsg(path, p)) + } + } + + return res +} + +func (s *SpecValidator) validateReferenced() *Result { + var res Result + res.MergeAsWarnings(s.validateReferencedParameters()) + res.MergeAsWarnings(s.validateReferencedResponses()) + res.MergeAsWarnings(s.validateReferencedDefinitions()) + return &res +} + +// nolint: dupl +func (s *SpecValidator) validateReferencedParameters() *Result { + // Each referenceable definition should have references. + params := s.spec.Spec().Parameters + if len(params) == 0 { + return nil + } + + expected := make(map[string]struct{}) + for k := range params { + expected["#/parameters/"+jsonpointer.Escape(k)] = struct{}{} + } + for _, k := range s.analyzer.AllParameterReferences() { + delete(expected, k) + } + + if len(expected) == 0 { + return nil + } + result := new(Result) + for k := range expected { + result.AddWarnings(unusedParamMsg(k)) + } + return result +} + +// nolint: dupl +func (s *SpecValidator) validateReferencedResponses() *Result { + // Each referenceable definition should have references. + responses := s.spec.Spec().Responses + if len(responses) == 0 { + return nil + } + + expected := make(map[string]struct{}) + for k := range responses { + expected["#/responses/"+jsonpointer.Escape(k)] = struct{}{} + } + for _, k := range s.analyzer.AllResponseReferences() { + delete(expected, k) + } + + if len(expected) == 0 { + return nil + } + result := new(Result) + for k := range expected { + result.AddWarnings(unusedResponseMsg(k)) + } + return result +} + +// nolint: dupl +func (s *SpecValidator) validateReferencedDefinitions() *Result { + // Each referenceable definition must have references. + defs := s.spec.Spec().Definitions + if len(defs) == 0 { + return nil + } + + expected := make(map[string]struct{}) + for k := range defs { + expected["#/definitions/"+jsonpointer.Escape(k)] = struct{}{} + } + for _, k := range s.analyzer.AllDefinitionReferences() { + delete(expected, k) + } + + if len(expected) == 0 { + return nil + } + + result := new(Result) + for k := range expected { + result.AddWarnings(unusedDefinitionMsg(k)) + } + return result +} + +func (s *SpecValidator) validateRequiredDefinitions() *Result { + // Each property listed in the required array must be defined in the properties of the model + res := new(Result) + +DEFINITIONS: + for d, schema := range s.spec.Spec().Definitions { + if schema.Required != nil { // Safeguard + for _, pn := range schema.Required { + red := s.validateRequiredProperties(pn, d, &schema) + res.Merge(red) + if !red.IsValid() && !s.Options.ContinueOnErrors { + break DEFINITIONS // there is an error, let's stop that bleeding + } + } + } + } + return res +} + +func (s *SpecValidator) validateRequiredProperties(path, in string, v *spec.Schema) *Result { + // Takes care of recursive property definitions, which may be nested in additionalProperties schemas + res := new(Result) + propertyMatch := false + patternMatch := false + additionalPropertiesMatch := false + isReadOnly := false + + // Regular properties + if _, ok := v.Properties[path]; ok { + propertyMatch = true + isReadOnly = v.Properties[path].ReadOnly + } + + // NOTE: patternProperties are not supported in swagger. Even though, we continue validation here + // We check all defined patterns: if one regexp is invalid, croaks an error + for pp, pv := range v.PatternProperties { + re, err := compileRegexp(pp) + if err != nil { + res.AddErrors(invalidPatternMsg(pp, in)) + } else if re.MatchString(path) { + patternMatch = true + if !propertyMatch { + isReadOnly = pv.ReadOnly + } + } + } + + if !(propertyMatch || patternMatch) { + if v.AdditionalProperties != nil { + if v.AdditionalProperties.Allows && v.AdditionalProperties.Schema == nil { + additionalPropertiesMatch = true + } else if v.AdditionalProperties.Schema != nil { + // additionalProperties as schema are upported in swagger + // recursively validates additionalProperties schema + // TODO : anyOf, allOf, oneOf like in schemaPropsValidator + red := s.validateRequiredProperties(path, in, v.AdditionalProperties.Schema) + if red.IsValid() { + additionalPropertiesMatch = true + if !propertyMatch && !patternMatch { + isReadOnly = v.AdditionalProperties.Schema.ReadOnly + } + } + res.Merge(red) + } + } + } + + if !(propertyMatch || patternMatch || additionalPropertiesMatch) { + res.AddErrors(requiredButNotDefinedMsg(path, in)) + } + + if isReadOnly { + res.AddWarnings(readOnlyAndRequiredMsg(in, path)) + } + return res +} + +func (s *SpecValidator) validateParameters() *Result { + // - for each method, path is unique, regardless of path parameters + // e.g. GET:/petstore/{id}, GET:/petstore/{pet}, GET:/petstore are + // considered duplicate paths + // - each parameter should have a unique `name` and `type` combination + // - each operation should have only 1 parameter of type body + // - there must be at most 1 parameter in body + // - parameters with pattern property must specify valid patterns + // - $ref in parameters must resolve + // - path param must be required + res := new(Result) + rexGarbledPathSegment := mustCompileRegexp(`.*[{}\s]+.*`) + for method, pi := range s.analyzer.Operations() { + methodPaths := make(map[string]map[string]string) + for path, op := range pi { + pathToAdd := pathHelp.stripParametersInPath(path) + + // Warn on garbled path afer param stripping + if rexGarbledPathSegment.MatchString(pathToAdd) { + res.AddWarnings(pathStrippedParamGarbledMsg(pathToAdd)) + } + + // Check uniqueness of stripped paths + if _, found := methodPaths[method][pathToAdd]; found { + + // Sort names for stable, testable output + if strings.Compare(path, methodPaths[method][pathToAdd]) < 0 { + res.AddErrors(pathOverlapMsg(path, methodPaths[method][pathToAdd])) + } else { + res.AddErrors(pathOverlapMsg(methodPaths[method][pathToAdd], path)) + } + } else { + if _, found := methodPaths[method]; !found { + methodPaths[method] = map[string]string{} + } + methodPaths[method][pathToAdd] = path //Original non stripped path + + } + + var bodyParams []string + var paramNames []string + var hasForm, hasBody bool + + // Check parameters names uniqueness for operation + // TODO: should be done after param expansion + res.Merge(s.checkUniqueParams(path, method, op)) + + for _, pr := range paramHelp.safeExpandedParamsFor(path, method, op.ID, res, s) { + // Validate pattern regexp for parameters with a Pattern property + if _, err := compileRegexp(pr.Pattern); err != nil { + res.AddErrors(invalidPatternInParamMsg(op.ID, pr.Name, pr.Pattern)) + } + + // There must be at most one parameter in body: list them all + if pr.In == swaggerBody { + bodyParams = append(bodyParams, fmt.Sprintf("%q", pr.Name)) + hasBody = true + } + + if pr.In == "path" { + paramNames = append(paramNames, pr.Name) + // Path declared in path must have the required: true property + if !pr.Required { + res.AddErrors(pathParamRequiredMsg(op.ID, pr.Name)) + } + } + + if pr.In == "formData" { + hasForm = true + } + + if !(pr.Type == numberType || pr.Type == integerType) && + (pr.Maximum != nil || pr.Minimum != nil || pr.MultipleOf != nil) { + // A non-numeric parameter has validation keywords for numeric instances (number and integer) + res.AddWarnings(parameterValidationTypeMismatchMsg(pr.Name, path, pr.Type)) + } + + if !(pr.Type == stringType) && + // A non-string parameter has validation keywords for strings + (pr.MaxLength != nil || pr.MinLength != nil || pr.Pattern != "") { + res.AddWarnings(parameterValidationTypeMismatchMsg(pr.Name, path, pr.Type)) + } + + if !(pr.Type == arrayType) && + // A non-array parameter has validation keywords for arrays + (pr.MaxItems != nil || pr.MinItems != nil || pr.UniqueItems) { + res.AddWarnings(parameterValidationTypeMismatchMsg(pr.Name, path, pr.Type)) + } + } + + // In:formData and In:body are mutually exclusive + if hasBody && hasForm { + res.AddErrors(bothFormDataAndBodyMsg(op.ID)) + } + // There must be at most one body param + // Accurately report situations when more than 1 body param is declared (possibly unnamed) + if len(bodyParams) > 1 { + sort.Strings(bodyParams) + res.AddErrors(multipleBodyParamMsg(op.ID, bodyParams)) + } + + // Check uniqueness of parameters in path + paramsInPath := pathHelp.extractPathParams(path) + for i, p := range paramsInPath { + for j, q := range paramsInPath { + if p == q && i > j { + res.AddErrors(pathParamNotUniqueMsg(path, p, q)) + break + } + } + } + + // Warns about possible malformed params in path + rexGarbledParam := mustCompileRegexp(`{.*[{}\s]+.*}`) + for _, p := range paramsInPath { + if rexGarbledParam.MatchString(p) { + res.AddWarnings(pathParamGarbledMsg(path, p)) + } + } + + // Match params from path vs params from params section + res.Merge(s.validatePathParamPresence(path, paramsInPath, paramNames)) + } + } + return res +} + +func (s *SpecValidator) validateReferencesValid() *Result { + // each reference must point to a valid object + res := new(Result) + for _, r := range s.analyzer.AllRefs() { + if !r.IsValidURI(s.spec.SpecFilePath()) { // Safeguard - spec should always yield a valid URI + res.AddErrors(invalidRefMsg(r.String())) + } + } + if !res.HasErrors() { + // NOTE: with default settings, loads.Document.Expanded() + // stops on first error. Anyhow, the expand option to continue + // on errors fails to report errors at all. + exp, err := s.spec.Expanded() + if err != nil { + res.AddErrors(unresolvedReferencesMsg(err)) + } + s.expanded = exp + } + return res +} + +func (s *SpecValidator) checkUniqueParams(path, method string, op *spec.Operation) *Result { + // Check for duplicate parameters declaration in param section. + // Each parameter should have a unique `name` and `type` combination + // NOTE: this could be factorized in analysis (when constructing the params map) + // However, there are some issues with such a factorization: + // - analysis does not seem to fully expand params + // - param keys may be altered by x-go-name + res := new(Result) + pnames := make(map[string]struct{}) + + if op.Parameters != nil { // Safeguard + for _, ppr := range op.Parameters { + var ok bool + pr, red := paramHelp.resolveParam(path, method, op.ID, &ppr, s) + res.Merge(red) + + if pr != nil && pr.Name != "" { // params with empty name does no participate the check + key := fmt.Sprintf("%s#%s", pr.In, pr.Name) + + if _, ok = pnames[key]; ok { + res.AddErrors(duplicateParamNameMsg(pr.In, pr.Name, op.ID)) + } + pnames[key] = struct{}{} + } + } + } + return res +} + +// SetContinueOnErrors sets the ContinueOnErrors option for this validator. +func (s *SpecValidator) SetContinueOnErrors(c bool) { + s.Options.ContinueOnErrors = c +} diff --git a/vendor/github.com/go-openapi/validate/spec_messages.go b/vendor/github.com/go-openapi/validate/spec_messages.go new file mode 100644 index 00000000000..1a5892aee4d --- /dev/null +++ b/vendor/github.com/go-openapi/validate/spec_messages.go @@ -0,0 +1,360 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package validate + +import ( + "net/http" + + "github.com/go-openapi/errors" +) + +// Error messages related to spec validation and returned as results. +const ( + // ArrayRequiresItemsError ... + ArrayRequiresItemsError = "%s for %q is a collection without an element type (array requires items definition)" + + // ArrayInParamRequiresItemsError ... + ArrayInParamRequiresItemsError = "param %q for %q is a collection without an element type (array requires item definition)" + + // ArrayInHeaderRequiresItemsError ... + ArrayInHeaderRequiresItemsError = "header %q for %q is a collection without an element type (array requires items definition)" + + // BothFormDataAndBodyError indicates that an operation specifies both a body and a formData parameter, which is forbidden + BothFormDataAndBodyError = "operation %q has both formData and body parameters. Only one such In: type may be used for a given operation" + + // CannotResolveRefError when a $ref could not be resolved + CannotResolveReferenceError = "could not resolve reference in %s to $ref %s: %v" + + // CircularAncestryDefinitionError ... + CircularAncestryDefinitionError = "definition %q has circular ancestry: %v" + + // DefaultValueDoesNotValidateError results from an invalid default value provided + DefaultValueDoesNotValidateError = "default value for %s in %s does not validate its schema" + + // DefaultValueItemsDoesNotValidateError results from an invalid default value provided for Items + DefaultValueItemsDoesNotValidateError = "default value for %s.items in %s does not validate its schema" + + // DefaultValueHeaderDoesNotValidateError results from an invalid default value provided in header + DefaultValueHeaderDoesNotValidateError = "in operation %q, default value in header %s for %s does not validate its schema" + + // DefaultValueHeaderItemsDoesNotValidateError results from an invalid default value provided in header.items + DefaultValueHeaderItemsDoesNotValidateError = "in operation %q, default value in header.items %s for %s does not validate its schema" + + // DefaultValueInDoesNotValidateError ... + DefaultValueInDoesNotValidateError = "in operation %q, default value in %s does not validate its schema" + + // DuplicateParamNameError ... + DuplicateParamNameError = "duplicate parameter name %q for %q in operation %q" + + // DuplicatePropertiesError ... + DuplicatePropertiesError = "definition %q contains duplicate properties: %v" + + // ExampleValueDoesNotValidateError results from an invalid example value provided + ExampleValueDoesNotValidateError = "example value for %s in %s does not validate its schema" + + // ExampleValueItemsDoesNotValidateError results from an invalid example value provided for Items + ExampleValueItemsDoesNotValidateError = "example value for %s.items in %s does not validate its schema" + + // ExampleValueHeaderDoesNotValidateError results from an invalid example value provided in header + ExampleValueHeaderDoesNotValidateError = "in operation %q, example value in header %s for %s does not validate its schema" + + // ExampleValueHeaderItemsDoesNotValidateError results from an invalid example value provided in header.items + ExampleValueHeaderItemsDoesNotValidateError = "in operation %q, example value in header.items %s for %s does not validate its schema" + + // ExampleValueInDoesNotValidateError ... + ExampleValueInDoesNotValidateError = "in operation %q, example value in %s does not validate its schema" + + // EmptyPathParameterError means that a path parameter was found empty (e.g. "{}") + EmptyPathParameterError = "%q contains an empty path parameter" + + // InvalidDocumentError states that spec validation only processes spec.Document objects + InvalidDocumentError = "spec validator can only validate spec.Document objects" + + // InvalidItemsPatternError indicates an Items definition with invalid pattern + InvalidItemsPatternError = "%s for %q has invalid items pattern: %q" + + // InvalidParameterDefinitionError indicates an error detected on a parameter definition + InvalidParameterDefinitionError = "invalid definition for parameter %s in %s in operation %q" + + // InvalidParameterDefinitionAsSchemaError indicates an error detected on a parameter definition, which was mistaken with a schema definition. + // Most likely, this situation is encountered whenever a $ref has been added as a sibling of the parameter definition. + InvalidParameterDefinitionAsSchemaError = "invalid definition as Schema for parameter %s in %s in operation %q" + + // InvalidPatternError ... + InvalidPatternError = "pattern %q is invalid in %s" + + // InvalidPatternInError indicates an invalid pattern in a schema or items definition + InvalidPatternInError = "%s in %s has invalid pattern: %q" + + // InvalidPatternInHeaderError indicates a header definition with an invalid pattern + InvalidPatternInHeaderError = "in operation %q, header %s for %s has invalid pattern %q: %v" + + // InvalidPatternInParamError ... + InvalidPatternInParamError = "operation %q has invalid pattern in param %q: %q" + + // InvalidReferenceError indicates that a $ref property could not be resolved + InvalidReferenceError = "invalid ref %q" + + // InvalidResponseDefinitionAsSchemaError indicates an error detected on a response definition, which was mistaken with a schema definition. + // Most likely, this situation is encountered whenever a $ref has been added as a sibling of the response definition. + InvalidResponseDefinitionAsSchemaError = "invalid definition as Schema for response %s in %s" + + // MultipleBodyParamError indicates that an operation specifies multiple parameter with in: body + MultipleBodyParamError = "operation %q has more than 1 body param: %v" + + // NonUniqueOperationIDError indicates that the same operationId has been specified several times + NonUniqueOperationIDError = "%q is defined %d times" + + // NoParameterInPathError indicates that a path was found without any parameter + NoParameterInPathError = "path param %q has no parameter definition" + + // NoValidPathErrorOrWarning indicates that no single path could be validated. If Paths is empty, this message is only a warning. + NoValidPathErrorOrWarning = "spec has no valid path defined" + + // NoValidResponseError indicates that no valid response description could be found for an operation + NoValidResponseError = "operation %q has no valid response" + + // PathOverlapError ... + PathOverlapError = "path %s overlaps with %s" + + // PathParamNotInPathError indicates that a parameter specified with in: path was not found in the path specification + PathParamNotInPathError = "path param %q is not present in path %q" + + // PathParamNotUniqueError ... + PathParamNotUniqueError = "params in path %q must be unique: %q conflicts with %q" + + // PathParamNotRequiredError ... + PathParamRequiredError = "in operation %q,path param %q must be declared as required" + + // RefNotAllowedInHeaderError indicates a $ref was found in a header definition, which is not allowed by Swagger + RefNotAllowedInHeaderError = "IMPORTANT!in %q: $ref are not allowed in headers. In context for header %q%s" + + // RequiredButNotDefinedError ... + RequiredButNotDefinedError = "%q is present in required but not defined as property in definition %q" + + // SomeParametersBrokenError indicates that some parameters could not be resolved, which might result in partial checks to be carried on + SomeParametersBrokenError = "some parameters definitions are broken in %q.%s. Cannot carry on full checks on parameters for operation %s" + + // UnresolvedReferencesError indicates that at least one $ref could not be resolved + UnresolvedReferencesError = "some references could not be resolved in spec. First found: %v" +) + +// Warning messages related to spec validation and returned as results +const ( + // ExamplesWithoutSchemaWarning indicates that examples are provided for a response,but not schema to validate the example against + ExamplesWithoutSchemaWarning = "Examples provided without schema in operation %q, %s" + + // ExamplesMimeNotSupportedWarning indicates that examples are provided with a mime type different than application/json, which + // the validator dos not support yetl + ExamplesMimeNotSupportedWarning = "No validation attempt for examples for media types other than application/json, in operation %q, %s" + + // PathParamGarbledWarning ... + PathParamGarbledWarning = "in path %q, param %q contains {,} or white space. Albeit not stricly illegal, this is probably no what you want" + + // ParamValidationTypeMismatch indicates that parameter has validation which does not match its type + ParamValidationTypeMismatch = "validation keywords of parameter %q in path %q don't match its type %s" + + // PathStrippedParamGarbledWarning ... + PathStrippedParamGarbledWarning = "path stripped from path parameters %s contains {,} or white space. This is probably no what you want." + + // ReadOnlyAndRequiredWarning ... + ReadOnlyAndRequiredWarning = "Required property %s in %q should not be marked as both required and readOnly" + + // RefShouldNotHaveSiblingsWarning indicates that a $ref was found with a sibling definition. This results in the $ref taking over its siblings, + // which is most likely not wanted. + RefShouldNotHaveSiblingsWarning = "$ref property should have no sibling in %q.%s" + + // RequiredHasDefaultWarning indicates that a required parameter property should not have a default + RequiredHasDefaultWarning = "%s in %s has a default value and is required as parameter" + + // UnusedDefinitionWarning ... + UnusedDefinitionWarning = "definition %q is not used anywhere" + + // UnusedParamWarning ... + UnusedParamWarning = "parameter %q is not used anywhere" + + // UnusedResponseWarning ... + UnusedResponseWarning = "response %q is not used anywhere" +) + +// Additional error codes +const ( + // InternalErrorCode reports an internal technical error + InternalErrorCode = http.StatusInternalServerError + // NotFoundErrorCode indicates that a resource (e.g. a $ref) could not be found + NotFoundErrorCode = http.StatusNotFound +) + +func invalidDocumentMsg() errors.Error { + return errors.New(InternalErrorCode, InvalidDocumentError) +} +func invalidRefMsg(path string) errors.Error { + return errors.New(NotFoundErrorCode, InvalidReferenceError, path) +} +func unresolvedReferencesMsg(err error) errors.Error { + return errors.New(errors.CompositeErrorCode, UnresolvedReferencesError, err) +} +func noValidPathMsg() errors.Error { + return errors.New(errors.CompositeErrorCode, NoValidPathErrorOrWarning) +} +func emptyPathParameterMsg(path string) errors.Error { + return errors.New(errors.CompositeErrorCode, EmptyPathParameterError, path) +} +func nonUniqueOperationIDMsg(path string, i int) errors.Error { + return errors.New(errors.CompositeErrorCode, NonUniqueOperationIDError, path, i) +} +func circularAncestryDefinitionMsg(path string, args interface{}) errors.Error { + return errors.New(errors.CompositeErrorCode, CircularAncestryDefinitionError, path, args) +} +func duplicatePropertiesMsg(path string, args interface{}) errors.Error { + return errors.New(errors.CompositeErrorCode, DuplicatePropertiesError, path, args) +} +func pathParamNotInPathMsg(path, param string) errors.Error { + return errors.New(errors.CompositeErrorCode, PathParamNotInPathError, param, path) +} +func arrayRequiresItemsMsg(path, operation string) errors.Error { + return errors.New(errors.CompositeErrorCode, ArrayRequiresItemsError, path, operation) +} +func arrayInParamRequiresItemsMsg(path, operation string) errors.Error { + return errors.New(errors.CompositeErrorCode, ArrayInParamRequiresItemsError, path, operation) +} +func arrayInHeaderRequiresItemsMsg(path, operation string) errors.Error { + return errors.New(errors.CompositeErrorCode, ArrayInHeaderRequiresItemsError, path, operation) +} +func invalidItemsPatternMsg(path, operation, pattern string) errors.Error { + return errors.New(errors.CompositeErrorCode, InvalidItemsPatternError, path, operation, pattern) +} +func invalidPatternMsg(pattern, path string) errors.Error { + return errors.New(errors.CompositeErrorCode, InvalidPatternError, pattern, path) +} +func requiredButNotDefinedMsg(path, definition string) errors.Error { + return errors.New(errors.CompositeErrorCode, RequiredButNotDefinedError, path, definition) +} +func pathParamGarbledMsg(path, param string) errors.Error { + return errors.New(errors.CompositeErrorCode, PathParamGarbledWarning, path, param) +} +func pathStrippedParamGarbledMsg(path string) errors.Error { + return errors.New(errors.CompositeErrorCode, PathStrippedParamGarbledWarning, path) +} +func pathOverlapMsg(path, arg string) errors.Error { + return errors.New(errors.CompositeErrorCode, PathOverlapError, path, arg) +} +func invalidPatternInParamMsg(operation, param, pattern string) errors.Error { + return errors.New(errors.CompositeErrorCode, InvalidPatternInParamError, operation, param, pattern) +} +func pathParamRequiredMsg(operation, param string) errors.Error { + return errors.New(errors.CompositeErrorCode, PathParamRequiredError, operation, param) +} +func bothFormDataAndBodyMsg(operation string) errors.Error { + return errors.New(errors.CompositeErrorCode, BothFormDataAndBodyError, operation) +} +func multipleBodyParamMsg(operation string, args interface{}) errors.Error { + return errors.New(errors.CompositeErrorCode, MultipleBodyParamError, operation, args) +} +func pathParamNotUniqueMsg(path, param, arg string) errors.Error { + return errors.New(errors.CompositeErrorCode, PathParamNotUniqueError, path, param, arg) +} +func duplicateParamNameMsg(path, param, operation string) errors.Error { + return errors.New(errors.CompositeErrorCode, DuplicateParamNameError, param, path, operation) +} +func unusedParamMsg(arg string) errors.Error { + return errors.New(errors.CompositeErrorCode, UnusedParamWarning, arg) +} +func unusedDefinitionMsg(arg string) errors.Error { + return errors.New(errors.CompositeErrorCode, UnusedDefinitionWarning, arg) +} +func unusedResponseMsg(arg string) errors.Error { + return errors.New(errors.CompositeErrorCode, UnusedResponseWarning, arg) +} +func readOnlyAndRequiredMsg(path, param string) errors.Error { + return errors.New(errors.CompositeErrorCode, ReadOnlyAndRequiredWarning, param, path) +} +func noParameterInPathMsg(param string) errors.Error { + return errors.New(errors.CompositeErrorCode, NoParameterInPathError, param) +} +func requiredHasDefaultMsg(param, path string) errors.Error { + return errors.New(errors.CompositeErrorCode, RequiredHasDefaultWarning, param, path) +} +func defaultValueDoesNotValidateMsg(param, path string) errors.Error { + return errors.New(errors.CompositeErrorCode, DefaultValueDoesNotValidateError, param, path) +} +func defaultValueItemsDoesNotValidateMsg(param, path string) errors.Error { + return errors.New(errors.CompositeErrorCode, DefaultValueItemsDoesNotValidateError, param, path) +} +func noValidResponseMsg(operation string) errors.Error { + return errors.New(errors.CompositeErrorCode, NoValidResponseError, operation) +} +func defaultValueHeaderDoesNotValidateMsg(operation, header, path string) errors.Error { + return errors.New(errors.CompositeErrorCode, DefaultValueHeaderDoesNotValidateError, operation, header, path) +} +func defaultValueHeaderItemsDoesNotValidateMsg(operation, header, path string) errors.Error { + return errors.New(errors.CompositeErrorCode, DefaultValueHeaderItemsDoesNotValidateError, operation, header, path) +} +func invalidPatternInHeaderMsg(operation, header, path, pattern string, args interface{}) errors.Error { + return errors.New(errors.CompositeErrorCode, InvalidPatternInHeaderError, operation, header, path, pattern, args) +} +func invalidPatternInMsg(path, in, pattern string) errors.Error { + return errors.New(errors.CompositeErrorCode, InvalidPatternInError, path, in, pattern) +} +func defaultValueInDoesNotValidateMsg(operation, path string) errors.Error { + return errors.New(errors.CompositeErrorCode, DefaultValueInDoesNotValidateError, operation, path) +} +func exampleValueDoesNotValidateMsg(param, path string) errors.Error { + return errors.New(errors.CompositeErrorCode, ExampleValueDoesNotValidateError, param, path) +} +func exampleValueItemsDoesNotValidateMsg(param, path string) errors.Error { + return errors.New(errors.CompositeErrorCode, ExampleValueItemsDoesNotValidateError, param, path) +} +func exampleValueHeaderDoesNotValidateMsg(operation, header, path string) errors.Error { + return errors.New(errors.CompositeErrorCode, ExampleValueHeaderDoesNotValidateError, operation, header, path) +} +func exampleValueHeaderItemsDoesNotValidateMsg(operation, header, path string) errors.Error { + return errors.New(errors.CompositeErrorCode, ExampleValueHeaderItemsDoesNotValidateError, operation, header, path) +} +func exampleValueInDoesNotValidateMsg(operation, path string) errors.Error { + return errors.New(errors.CompositeErrorCode, ExampleValueInDoesNotValidateError, operation, path) +} +func examplesWithoutSchemaMsg(operation, response string) errors.Error { + return errors.New(errors.CompositeErrorCode, ExamplesWithoutSchemaWarning, operation, response) +} +func examplesMimeNotSupportedMsg(operation, response string) errors.Error { + return errors.New(errors.CompositeErrorCode, ExamplesMimeNotSupportedWarning, operation, response) +} +func refNotAllowedInHeaderMsg(path, header, ref string) errors.Error { + return errors.New(errors.CompositeErrorCode, RefNotAllowedInHeaderError, path, header, ref) +} +func cannotResolveRefMsg(path, ref string, err error) errors.Error { + return errors.New(errors.CompositeErrorCode, CannotResolveReferenceError, path, ref, err) +} +func invalidParameterDefinitionMsg(path, method, operationID string) errors.Error { + return errors.New(errors.CompositeErrorCode, InvalidParameterDefinitionError, path, method, operationID) +} +func invalidParameterDefinitionAsSchemaMsg(path, method, operationID string) errors.Error { + return errors.New(errors.CompositeErrorCode, InvalidParameterDefinitionAsSchemaError, path, method, operationID) +} +func parameterValidationTypeMismatchMsg(param, path, typ string) errors.Error { + return errors.New(errors.CompositeErrorCode, ParamValidationTypeMismatch, param, path, typ) +} + +// disabled +//func invalidResponseDefinitionAsSchemaMsg(path, method string) errors.Error { +// return errors.New(errors.CompositeErrorCode, InvalidResponseDefinitionAsSchemaError, path, method) +//} +func someParametersBrokenMsg(path, method, operationID string) errors.Error { + return errors.New(errors.CompositeErrorCode, SomeParametersBrokenError, path, method, operationID) +} +func refShouldNotHaveSiblingsMsg(path, operationID string) errors.Error { + return errors.New(errors.CompositeErrorCode, RefShouldNotHaveSiblingsWarning, operationID, path) +} diff --git a/vendor/github.com/go-openapi/validate/type.go b/vendor/github.com/go-openapi/validate/type.go new file mode 100644 index 00000000000..f55140d1e5b --- /dev/null +++ b/vendor/github.com/go-openapi/validate/type.go @@ -0,0 +1,178 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package validate + +import ( + "reflect" + "strings" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/spec" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +type typeValidator struct { + Type spec.StringOrArray + Nullable bool + Format string + In string + Path string +} + +func (t *typeValidator) schemaInfoForType(data interface{}) (string, string) { + // internal type to JSON type with swagger 2.0 format (with go-openapi/strfmt extensions), + // see https://github.com/go-openapi/strfmt/blob/master/README.md + // TODO: this switch really is some sort of reverse lookup for formats. It should be provided by strfmt. + switch data.(type) { + case []byte, strfmt.Base64, *strfmt.Base64: + return stringType, stringFormatByte + case strfmt.CreditCard, *strfmt.CreditCard: + return stringType, stringFormatCreditCard + case strfmt.Date, *strfmt.Date: + return stringType, stringFormatDate + case strfmt.DateTime, *strfmt.DateTime: + return stringType, stringFormatDateTime + case strfmt.Duration, *strfmt.Duration: + return stringType, stringFormatDuration + case runtime.File, *runtime.File: + return fileType, "" + case strfmt.Email, *strfmt.Email: + return stringType, stringFormatEmail + case strfmt.HexColor, *strfmt.HexColor: + return stringType, stringFormatHexColor + case strfmt.Hostname, *strfmt.Hostname: + return stringType, stringFormatHostname + case strfmt.IPv4, *strfmt.IPv4: + return stringType, stringFormatIPv4 + case strfmt.IPv6, *strfmt.IPv6: + return stringType, stringFormatIPv6 + case strfmt.ISBN, *strfmt.ISBN: + return stringType, stringFormatISBN + case strfmt.ISBN10, *strfmt.ISBN10: + return stringType, stringFormatISBN10 + case strfmt.ISBN13, *strfmt.ISBN13: + return stringType, stringFormatISBN13 + case strfmt.MAC, *strfmt.MAC: + return stringType, stringFormatMAC + case strfmt.ObjectId, *strfmt.ObjectId: + return stringType, stringFormatBSONObjectID + case strfmt.Password, *strfmt.Password: + return stringType, stringFormatPassword + case strfmt.RGBColor, *strfmt.RGBColor: + return stringType, stringFormatRGBColor + case strfmt.SSN, *strfmt.SSN: + return stringType, stringFormatSSN + case strfmt.URI, *strfmt.URI: + return stringType, stringFormatURI + case strfmt.UUID, *strfmt.UUID: + return stringType, stringFormatUUID + case strfmt.UUID3, *strfmt.UUID3: + return stringType, stringFormatUUID3 + case strfmt.UUID4, *strfmt.UUID4: + return stringType, stringFormatUUID4 + case strfmt.UUID5, *strfmt.UUID5: + return stringType, stringFormatUUID5 + // TODO: missing binary (io.ReadCloser) + // TODO: missing json.Number + default: + val := reflect.ValueOf(data) + tpe := val.Type() + switch tpe.Kind() { + case reflect.Bool: + return booleanType, "" + case reflect.String: + return stringType, "" + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint8, reflect.Uint16, reflect.Uint32: + // NOTE: that is the spec. With go-openapi, is that not uint32 for unsigned integers? + return integerType, integerFormatInt32 + case reflect.Int, reflect.Int64, reflect.Uint, reflect.Uint64: + return integerType, integerFormatInt64 + case reflect.Float32: + // NOTE: is that not numberFormatFloat? + return numberType, numberFormatFloat32 + case reflect.Float64: + // NOTE: is that not "double"? + return numberType, numberFormatFloat64 + // NOTE: go arrays (reflect.Array) are not supported (fixed length) + case reflect.Slice: + return arrayType, "" + case reflect.Map, reflect.Struct: + return objectType, "" + case reflect.Interface: + // What to do here? + panic("dunno what to do here") + case reflect.Ptr: + return t.schemaInfoForType(reflect.Indirect(val).Interface()) + } + } + return "", "" +} + +func (t *typeValidator) SetPath(path string) { + t.Path = path +} + +func (t *typeValidator) Applies(source interface{}, kind reflect.Kind) bool { + // typeValidator applies to Schema, Parameter and Header objects + stpe := reflect.TypeOf(source) + r := (len(t.Type) > 0 || t.Format != "") && (stpe == specSchemaType || stpe == specParameterType || stpe == specHeaderType) + debugLog("type validator for %q applies %t for %T (kind: %v)\n", t.Path, r, source, kind) + return r +} + +func (t *typeValidator) Validate(data interface{}) *Result { + result := new(Result) + result.Inc() + if data == nil || reflect.DeepEqual(reflect.Zero(reflect.TypeOf(data)), reflect.ValueOf(data)) { + // nil or zero value for the passed structure require Type: null + if len(t.Type) > 0 && !t.Type.Contains(nullType) && !t.Nullable { // TODO: if a property is not required it also passes this + return errorHelp.sErr(errors.InvalidType(t.Path, t.In, strings.Join(t.Type, ","), nullType)) + } + return result + } + + // check if the type matches, should be used in every validator chain as first item + val := reflect.Indirect(reflect.ValueOf(data)) + kind := val.Kind() + + // infer schema type (JSON) and format from passed data type + schType, format := t.schemaInfoForType(data) + + debugLog("path: %s, schType: %s, format: %s, expType: %s, expFmt: %s, kind: %s", t.Path, schType, format, t.Type, t.Format, val.Kind().String()) + + // check numerical types + // TODO: check unsigned ints + // TODO: check json.Number (see schema.go) + isLowerInt := t.Format == integerFormatInt64 && format == integerFormatInt32 + isLowerFloat := t.Format == numberFormatFloat64 && format == numberFormatFloat32 + isFloatInt := schType == numberType && swag.IsFloat64AJSONInteger(val.Float()) && t.Type.Contains(integerType) + isIntFloat := schType == integerType && t.Type.Contains(numberType) + + if kind != reflect.String && kind != reflect.Slice && t.Format != "" && !(t.Type.Contains(schType) || format == t.Format || isFloatInt || isIntFloat || isLowerInt || isLowerFloat) { + // TODO: test case + return errorHelp.sErr(errors.InvalidType(t.Path, t.In, t.Format, format)) + } + + if !(t.Type.Contains(numberType) || t.Type.Contains(integerType)) && t.Format != "" && (kind == reflect.String || kind == reflect.Slice) { + return result + } + + if !(t.Type.Contains(schType) || isFloatInt || isIntFloat) { + return errorHelp.sErr(errors.InvalidType(t.Path, t.In, strings.Join(t.Type, ","), schType)) + } + return result +} diff --git a/vendor/github.com/go-openapi/validate/update-fixtures.sh b/vendor/github.com/go-openapi/validate/update-fixtures.sh new file mode 100644 index 00000000000..21b06e2b09a --- /dev/null +++ b/vendor/github.com/go-openapi/validate/update-fixtures.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +set -eu -o pipefail +dir=$(git rev-parse --show-toplevel) +scratch=$(mktemp -d -t tmp.XXXXXXXXXX) + +function finish { + rm -rf "$scratch" +} +trap finish EXIT SIGHUP SIGINT SIGTERM + +cd "$scratch" +git clone https://github.com/json-schema-org/JSON-Schema-Test-Suite Suite +cp -r Suite/tests/draft4/* "$dir/fixtures/jsonschema_suite" +cp -a Suite/remotes "$dir/fixtures/jsonschema_suite" diff --git a/vendor/github.com/go-openapi/validate/validator.go b/vendor/github.com/go-openapi/validate/validator.go new file mode 100644 index 00000000000..2acb839db8e --- /dev/null +++ b/vendor/github.com/go-openapi/validate/validator.go @@ -0,0 +1,646 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package validate + +import ( + "fmt" + "reflect" + + "github.com/go-openapi/errors" + "github.com/go-openapi/spec" + "github.com/go-openapi/strfmt" +) + +// An EntityValidator is an interface for things that can validate entities +type EntityValidator interface { + Validate(interface{}) *Result +} + +type valueValidator interface { + SetPath(path string) + Applies(interface{}, reflect.Kind) bool + Validate(interface{}) *Result +} + +type itemsValidator struct { + items *spec.Items + root interface{} + path string + in string + validators []valueValidator + KnownFormats strfmt.Registry +} + +func newItemsValidator(path, in string, items *spec.Items, root interface{}, formats strfmt.Registry) *itemsValidator { + iv := &itemsValidator{path: path, in: in, items: items, root: root, KnownFormats: formats} + iv.validators = []valueValidator{ + &typeValidator{ + Type: spec.StringOrArray([]string{items.Type}), + Nullable: items.Nullable, + Format: items.Format, + In: in, + Path: path, + }, + iv.stringValidator(), + iv.formatValidator(), + iv.numberValidator(), + iv.sliceValidator(), + iv.commonValidator(), + } + return iv +} + +func (i *itemsValidator) Validate(index int, data interface{}) *Result { + tpe := reflect.TypeOf(data) + kind := tpe.Kind() + mainResult := new(Result) + path := fmt.Sprintf("%s.%d", i.path, index) + + for _, validator := range i.validators { + validator.SetPath(path) + if validator.Applies(i.root, kind) { + result := validator.Validate(data) + mainResult.Merge(result) + mainResult.Inc() + if result != nil && result.HasErrors() { + return mainResult + } + } + } + return mainResult +} + +func (i *itemsValidator) commonValidator() valueValidator { + return &basicCommonValidator{ + In: i.in, + Default: i.items.Default, + Enum: i.items.Enum, + } +} + +func (i *itemsValidator) sliceValidator() valueValidator { + return &basicSliceValidator{ + In: i.in, + Default: i.items.Default, + MaxItems: i.items.MaxItems, + MinItems: i.items.MinItems, + UniqueItems: i.items.UniqueItems, + Source: i.root, + Items: i.items.Items, + KnownFormats: i.KnownFormats, + } +} + +func (i *itemsValidator) numberValidator() valueValidator { + return &numberValidator{ + In: i.in, + Default: i.items.Default, + MultipleOf: i.items.MultipleOf, + Maximum: i.items.Maximum, + ExclusiveMaximum: i.items.ExclusiveMaximum, + Minimum: i.items.Minimum, + ExclusiveMinimum: i.items.ExclusiveMinimum, + Type: i.items.Type, + Format: i.items.Format, + } +} + +func (i *itemsValidator) stringValidator() valueValidator { + return &stringValidator{ + In: i.in, + Default: i.items.Default, + MaxLength: i.items.MaxLength, + MinLength: i.items.MinLength, + Pattern: i.items.Pattern, + AllowEmptyValue: false, + } +} + +func (i *itemsValidator) formatValidator() valueValidator { + return &formatValidator{ + In: i.in, + //Default: i.items.Default, + Format: i.items.Format, + KnownFormats: i.KnownFormats, + } +} + +type basicCommonValidator struct { + Path string + In string + Default interface{} + Enum []interface{} +} + +func (b *basicCommonValidator) SetPath(path string) { + b.Path = path +} + +func (b *basicCommonValidator) Applies(source interface{}, kind reflect.Kind) bool { + switch source.(type) { + case *spec.Parameter, *spec.Schema, *spec.Header: + return true + } + return false +} + +func (b *basicCommonValidator) Validate(data interface{}) (res *Result) { + if len(b.Enum) > 0 { + for _, enumValue := range b.Enum { + actualType := reflect.TypeOf(enumValue) + if actualType != nil { // Safeguard + expectedValue := reflect.ValueOf(data) + if expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) { + if reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), enumValue) { + return nil + } + } + } + } + return errorHelp.sErr(errors.EnumFail(b.Path, b.In, data, b.Enum)) + } + return nil +} + +// A HeaderValidator has very limited subset of validations to apply +type HeaderValidator struct { + name string + header *spec.Header + validators []valueValidator + KnownFormats strfmt.Registry +} + +// NewHeaderValidator creates a new header validator object +func NewHeaderValidator(name string, header *spec.Header, formats strfmt.Registry) *HeaderValidator { + p := &HeaderValidator{name: name, header: header, KnownFormats: formats} + p.validators = []valueValidator{ + &typeValidator{ + Type: spec.StringOrArray([]string{header.Type}), + Nullable: header.Nullable, + Format: header.Format, + In: "header", + Path: name, + }, + p.stringValidator(), + p.formatValidator(), + p.numberValidator(), + p.sliceValidator(), + p.commonValidator(), + } + return p +} + +// Validate the value of the header against its schema +func (p *HeaderValidator) Validate(data interface{}) *Result { + result := new(Result) + tpe := reflect.TypeOf(data) + kind := tpe.Kind() + + for _, validator := range p.validators { + if validator.Applies(p.header, kind) { + if err := validator.Validate(data); err != nil { + result.Merge(err) + if err.HasErrors() { + return result + } + } + } + } + return nil +} + +func (p *HeaderValidator) commonValidator() valueValidator { + return &basicCommonValidator{ + Path: p.name, + In: "response", + Default: p.header.Default, + Enum: p.header.Enum, + } +} + +func (p *HeaderValidator) sliceValidator() valueValidator { + return &basicSliceValidator{ + Path: p.name, + In: "response", + Default: p.header.Default, + MaxItems: p.header.MaxItems, + MinItems: p.header.MinItems, + UniqueItems: p.header.UniqueItems, + Items: p.header.Items, + Source: p.header, + KnownFormats: p.KnownFormats, + } +} + +func (p *HeaderValidator) numberValidator() valueValidator { + return &numberValidator{ + Path: p.name, + In: "response", + Default: p.header.Default, + MultipleOf: p.header.MultipleOf, + Maximum: p.header.Maximum, + ExclusiveMaximum: p.header.ExclusiveMaximum, + Minimum: p.header.Minimum, + ExclusiveMinimum: p.header.ExclusiveMinimum, + Type: p.header.Type, + Format: p.header.Format, + } +} + +func (p *HeaderValidator) stringValidator() valueValidator { + return &stringValidator{ + Path: p.name, + In: "response", + Default: p.header.Default, + Required: true, + MaxLength: p.header.MaxLength, + MinLength: p.header.MinLength, + Pattern: p.header.Pattern, + AllowEmptyValue: false, + } +} + +func (p *HeaderValidator) formatValidator() valueValidator { + return &formatValidator{ + Path: p.name, + In: "response", + //Default: p.header.Default, + Format: p.header.Format, + KnownFormats: p.KnownFormats, + } +} + +// A ParamValidator has very limited subset of validations to apply +type ParamValidator struct { + param *spec.Parameter + validators []valueValidator + KnownFormats strfmt.Registry +} + +// NewParamValidator creates a new param validator object +func NewParamValidator(param *spec.Parameter, formats strfmt.Registry) *ParamValidator { + p := &ParamValidator{param: param, KnownFormats: formats} + p.validators = []valueValidator{ + &typeValidator{ + Type: spec.StringOrArray([]string{param.Type}), + Nullable: param.Nullable, + Format: param.Format, + In: param.In, + Path: param.Name, + }, + p.stringValidator(), + p.formatValidator(), + p.numberValidator(), + p.sliceValidator(), + p.commonValidator(), + } + return p +} + +// Validate the data against the description of the parameter +func (p *ParamValidator) Validate(data interface{}) *Result { + result := new(Result) + tpe := reflect.TypeOf(data) + kind := tpe.Kind() + + // TODO: validate type + for _, validator := range p.validators { + if validator.Applies(p.param, kind) { + if err := validator.Validate(data); err != nil { + result.Merge(err) + if err.HasErrors() { + return result + } + } + } + } + return nil +} + +func (p *ParamValidator) commonValidator() valueValidator { + return &basicCommonValidator{ + Path: p.param.Name, + In: p.param.In, + Default: p.param.Default, + Enum: p.param.Enum, + } +} + +func (p *ParamValidator) sliceValidator() valueValidator { + return &basicSliceValidator{ + Path: p.param.Name, + In: p.param.In, + Default: p.param.Default, + MaxItems: p.param.MaxItems, + MinItems: p.param.MinItems, + UniqueItems: p.param.UniqueItems, + Items: p.param.Items, + Source: p.param, + KnownFormats: p.KnownFormats, + } +} + +func (p *ParamValidator) numberValidator() valueValidator { + return &numberValidator{ + Path: p.param.Name, + In: p.param.In, + Default: p.param.Default, + MultipleOf: p.param.MultipleOf, + Maximum: p.param.Maximum, + ExclusiveMaximum: p.param.ExclusiveMaximum, + Minimum: p.param.Minimum, + ExclusiveMinimum: p.param.ExclusiveMinimum, + Type: p.param.Type, + Format: p.param.Format, + } +} + +func (p *ParamValidator) stringValidator() valueValidator { + return &stringValidator{ + Path: p.param.Name, + In: p.param.In, + Default: p.param.Default, + AllowEmptyValue: p.param.AllowEmptyValue, + Required: p.param.Required, + MaxLength: p.param.MaxLength, + MinLength: p.param.MinLength, + Pattern: p.param.Pattern, + } +} + +func (p *ParamValidator) formatValidator() valueValidator { + return &formatValidator{ + Path: p.param.Name, + In: p.param.In, + //Default: p.param.Default, + Format: p.param.Format, + KnownFormats: p.KnownFormats, + } +} + +type basicSliceValidator struct { + Path string + In string + Default interface{} + MaxItems *int64 + MinItems *int64 + UniqueItems bool + Items *spec.Items + Source interface{} + itemsValidator *itemsValidator + KnownFormats strfmt.Registry +} + +func (s *basicSliceValidator) SetPath(path string) { + s.Path = path +} + +func (s *basicSliceValidator) Applies(source interface{}, kind reflect.Kind) bool { + switch source.(type) { + case *spec.Parameter, *spec.Items, *spec.Header: + return kind == reflect.Slice + } + return false +} + +func (s *basicSliceValidator) Validate(data interface{}) *Result { + val := reflect.ValueOf(data) + + size := int64(val.Len()) + if s.MinItems != nil { + if err := MinItems(s.Path, s.In, size, *s.MinItems); err != nil { + return errorHelp.sErr(err) + } + } + + if s.MaxItems != nil { + if err := MaxItems(s.Path, s.In, size, *s.MaxItems); err != nil { + return errorHelp.sErr(err) + } + } + + if s.UniqueItems { + if err := UniqueItems(s.Path, s.In, data); err != nil { + return errorHelp.sErr(err) + } + } + + if s.itemsValidator == nil && s.Items != nil { + s.itemsValidator = newItemsValidator(s.Path, s.In, s.Items, s.Source, s.KnownFormats) + } + + if s.itemsValidator != nil { + for i := 0; i < int(size); i++ { + ele := val.Index(i) + if err := s.itemsValidator.Validate(i, ele.Interface()); err != nil && err.HasErrors() { + return err + } + } + } + return nil +} + +/* unused +func (s *basicSliceValidator) hasDuplicates(value reflect.Value, size int) bool { + dict := make(map[interface{}]struct{}) + for i := 0; i < size; i++ { + ele := value.Index(i) + if _, ok := dict[ele.Interface()]; ok { + return true + } + dict[ele.Interface()] = struct{}{} + } + return false +} +*/ + +type numberValidator struct { + Path string + In string + Default interface{} + MultipleOf *float64 + Maximum *float64 + ExclusiveMaximum bool + Minimum *float64 + ExclusiveMinimum bool + // Allows for more accurate behavior regarding integers + Type string + Format string +} + +func (n *numberValidator) SetPath(path string) { + n.Path = path +} + +func (n *numberValidator) Applies(source interface{}, kind reflect.Kind) bool { + switch source.(type) { + case *spec.Parameter, *spec.Schema, *spec.Items, *spec.Header: + isInt := kind >= reflect.Int && kind <= reflect.Uint64 + isFloat := kind == reflect.Float32 || kind == reflect.Float64 + r := isInt || isFloat + debugLog("schema props validator for %q applies %t for %T (kind: %v) isInt=%t, isFloat=%t\n", n.Path, r, source, kind, isInt, isFloat) + return r + } + debugLog("schema props validator for %q applies %t for %T (kind: %v)\n", n.Path, false, source, kind) + return false +} + +// Validate provides a validator for generic JSON numbers, +// +// By default, numbers are internally represented as float64. +// Formats float, or float32 may alter this behavior by mapping to float32. +// A special validation process is followed for integers, with optional "format": +// this is an attempt to provide a validation with native types. +// +// NOTE: since the constraint specified (boundary, multipleOf) is unmarshalled +// as float64, loss of information remains possible (e.g. on very large integers). +// +// Since this value directly comes from the unmarshalling, it is not possible +// at this stage of processing to check further and guarantee the correctness of such values. +// +// Normally, the JSON Number.MAX_SAFE_INTEGER (resp. Number.MIN_SAFE_INTEGER) +// would check we do not get such a loss. +// +// If this is the case, replace AddErrors() by AddWarnings() and IsValid() by !HasWarnings(). +// +// TODO: consider replacing boundary check errors by simple warnings. +// +// TODO: default boundaries with MAX_SAFE_INTEGER are not checked (specific to json.Number?) +func (n *numberValidator) Validate(val interface{}) *Result { + res := new(Result) + + resMultiple := new(Result) + resMinimum := new(Result) + resMaximum := new(Result) + + // Used only to attempt to validate constraint on value, + // even though value or constraint specified do not match type and format + data := valueHelp.asFloat64(val) + + // Is the provided value within the range of the specified numeric type and format? + res.AddErrors(IsValueValidAgainstRange(val, n.Type, n.Format, "Checked", n.Path)) + + // nolint: dupl + if n.MultipleOf != nil { + // Is the constraint specifier within the range of the specific numeric type and format? + resMultiple.AddErrors(IsValueValidAgainstRange(*n.MultipleOf, n.Type, n.Format, "MultipleOf", n.Path)) + if resMultiple.IsValid() { + // Constraint validated with compatible types + if err := MultipleOfNativeType(n.Path, n.In, val, *n.MultipleOf); err != nil { + resMultiple.Merge(errorHelp.sErr(err)) + } + } else { + // Constraint nevertheless validated, converted as general number + if err := MultipleOf(n.Path, n.In, data, *n.MultipleOf); err != nil { + resMultiple.Merge(errorHelp.sErr(err)) + } + } + } + + // nolint: dupl + if n.Maximum != nil { + // Is the constraint specifier within the range of the specific numeric type and format? + resMaximum.AddErrors(IsValueValidAgainstRange(*n.Maximum, n.Type, n.Format, "Maximum boundary", n.Path)) + if resMaximum.IsValid() { + // Constraint validated with compatible types + if err := MaximumNativeType(n.Path, n.In, val, *n.Maximum, n.ExclusiveMaximum); err != nil { + resMaximum.Merge(errorHelp.sErr(err)) + } + } else { + // Constraint nevertheless validated, converted as general number + if err := Maximum(n.Path, n.In, data, *n.Maximum, n.ExclusiveMaximum); err != nil { + resMaximum.Merge(errorHelp.sErr(err)) + } + } + } + + // nolint: dupl + if n.Minimum != nil { + // Is the constraint specifier within the range of the specific numeric type and format? + resMinimum.AddErrors(IsValueValidAgainstRange(*n.Minimum, n.Type, n.Format, "Minimum boundary", n.Path)) + if resMinimum.IsValid() { + // Constraint validated with compatible types + if err := MinimumNativeType(n.Path, n.In, val, *n.Minimum, n.ExclusiveMinimum); err != nil { + resMinimum.Merge(errorHelp.sErr(err)) + } + } else { + // Constraint nevertheless validated, converted as general number + if err := Minimum(n.Path, n.In, data, *n.Minimum, n.ExclusiveMinimum); err != nil { + resMinimum.Merge(errorHelp.sErr(err)) + } + } + } + res.Merge(resMultiple, resMinimum, resMaximum) + res.Inc() + return res +} + +type stringValidator struct { + Default interface{} + Required bool + AllowEmptyValue bool + MaxLength *int64 + MinLength *int64 + Pattern string + Path string + In string +} + +func (s *stringValidator) SetPath(path string) { + s.Path = path +} + +func (s *stringValidator) Applies(source interface{}, kind reflect.Kind) bool { + switch source.(type) { + case *spec.Parameter, *spec.Schema, *spec.Items, *spec.Header: + r := kind == reflect.String + debugLog("string validator for %q applies %t for %T (kind: %v)\n", s.Path, r, source, kind) + return r + } + debugLog("string validator for %q applies %t for %T (kind: %v)\n", s.Path, false, source, kind) + return false +} + +func (s *stringValidator) Validate(val interface{}) *Result { + data, ok := val.(string) + if !ok { + return errorHelp.sErr(errors.InvalidType(s.Path, s.In, stringType, val)) + } + + if s.Required && !s.AllowEmptyValue && (s.Default == nil || s.Default == "") { + if err := RequiredString(s.Path, s.In, data); err != nil { + return errorHelp.sErr(err) + } + } + + if s.MaxLength != nil { + if err := MaxLength(s.Path, s.In, data, *s.MaxLength); err != nil { + return errorHelp.sErr(err) + } + } + + if s.MinLength != nil { + if err := MinLength(s.Path, s.In, data, *s.MinLength); err != nil { + return errorHelp.sErr(err) + } + } + + if s.Pattern != "" { + if err := Pattern(s.Path, s.In, data, s.Pattern); err != nil { + return errorHelp.sErr(err) + } + } + return nil +} diff --git a/vendor/github.com/go-openapi/validate/values.go b/vendor/github.com/go-openapi/validate/values.go new file mode 100644 index 00000000000..1a9371d1e59 --- /dev/null +++ b/vendor/github.com/go-openapi/validate/values.go @@ -0,0 +1,424 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package validate + +import ( + "fmt" + "reflect" + "strings" + "unicode/utf8" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// Enum validates if the data is a member of the enum +func Enum(path, in string, data interface{}, enum interface{}) *errors.Validation { + return EnumCase(path, in, data, enum, true) +} + +// EnumCase validates if the data is a member of the enum and may respect case-sensitivity for strings +func EnumCase(path, in string, data interface{}, enum interface{}, caseSensitive bool) *errors.Validation { + val := reflect.ValueOf(enum) + if val.Kind() != reflect.Slice { + return nil + } + + dataString := convertEnumCaseStringKind(data, caseSensitive) + var values []interface{} + for i := 0; i < val.Len(); i++ { + ele := val.Index(i) + enumValue := ele.Interface() + if data != nil { + if reflect.DeepEqual(data, enumValue) { + return nil + } + enumString := convertEnumCaseStringKind(enumValue, caseSensitive) + if dataString != nil && enumString != nil && strings.EqualFold(*dataString, *enumString) { + return nil + } + actualType := reflect.TypeOf(enumValue) + if actualType == nil { // Safeguard. Frankly, I don't know how we may get a nil + continue + } + expectedValue := reflect.ValueOf(data) + if expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) { + // Attempt comparison after type conversion + if reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), enumValue) { + return nil + } + } + } + values = append(values, enumValue) + } + return errors.EnumFail(path, in, data, values) +} + +// convertEnumCaseStringKind converts interface if it is kind of string and case insensitivity is set +func convertEnumCaseStringKind(value interface{}, caseSensitive bool) *string { + if caseSensitive { + return nil + } + + val := reflect.ValueOf(value) + if val.Kind() != reflect.String { + return nil + } + + str := fmt.Sprintf("%v", value) + return &str +} + +// MinItems validates that there are at least n items in a slice +func MinItems(path, in string, size, min int64) *errors.Validation { + if size < min { + return errors.TooFewItems(path, in, min) + } + return nil +} + +// MaxItems validates that there are at most n items in a slice +func MaxItems(path, in string, size, max int64) *errors.Validation { + if size > max { + return errors.TooManyItems(path, in, max) + } + return nil +} + +// UniqueItems validates that the provided slice has unique elements +func UniqueItems(path, in string, data interface{}) *errors.Validation { + val := reflect.ValueOf(data) + if val.Kind() != reflect.Slice { + return nil + } + var unique []interface{} + for i := 0; i < val.Len(); i++ { + v := val.Index(i).Interface() + for _, u := range unique { + if reflect.DeepEqual(v, u) { + return errors.DuplicateItems(path, in) + } + } + unique = append(unique, v) + } + return nil +} + +// MinLength validates a string for minimum length +func MinLength(path, in, data string, minLength int64) *errors.Validation { + strLen := int64(utf8.RuneCount([]byte(data))) + if strLen < minLength { + return errors.TooShort(path, in, minLength) + } + return nil +} + +// MaxLength validates a string for maximum length +func MaxLength(path, in, data string, maxLength int64) *errors.Validation { + strLen := int64(utf8.RuneCount([]byte(data))) + if strLen > maxLength { + return errors.TooLong(path, in, maxLength) + } + return nil +} + +// Required validates an interface for requiredness +func Required(path, in string, data interface{}) *errors.Validation { + val := reflect.ValueOf(data) + if val.IsValid() { + if reflect.DeepEqual(reflect.Zero(val.Type()).Interface(), val.Interface()) { + return errors.Required(path, in) + } + return nil + } + return errors.Required(path, in) +} + +// RequiredString validates a string for requiredness +func RequiredString(path, in, data string) *errors.Validation { + if data == "" { + return errors.Required(path, in) + } + return nil +} + +// RequiredNumber validates a number for requiredness +func RequiredNumber(path, in string, data float64) *errors.Validation { + if data == 0 { + return errors.Required(path, in) + } + return nil +} + +// Pattern validates a string against a regular expression +func Pattern(path, in, data, pattern string) *errors.Validation { + re, err := compileRegexp(pattern) + if err != nil { + return errors.FailedPattern(path, in, fmt.Sprintf("%s, but pattern is invalid: %s", pattern, err.Error())) + } + if !re.MatchString(data) { + return errors.FailedPattern(path, in, pattern) + } + return nil +} + +// MaximumInt validates if a number is smaller than a given maximum +func MaximumInt(path, in string, data, max int64, exclusive bool) *errors.Validation { + if (!exclusive && data > max) || (exclusive && data >= max) { + return errors.ExceedsMaximumInt(path, in, max, exclusive) + } + return nil +} + +// MaximumUint validates if a number is smaller than a given maximum +func MaximumUint(path, in string, data, max uint64, exclusive bool) *errors.Validation { + if (!exclusive && data > max) || (exclusive && data >= max) { + return errors.ExceedsMaximumUint(path, in, max, exclusive) + } + return nil +} + +// Maximum validates if a number is smaller than a given maximum +func Maximum(path, in string, data, max float64, exclusive bool) *errors.Validation { + if (!exclusive && data > max) || (exclusive && data >= max) { + return errors.ExceedsMaximum(path, in, max, exclusive) + } + return nil +} + +// Minimum validates if a number is smaller than a given minimum +func Minimum(path, in string, data, min float64, exclusive bool) *errors.Validation { + if (!exclusive && data < min) || (exclusive && data <= min) { + return errors.ExceedsMinimum(path, in, min, exclusive) + } + return nil +} + +// MinimumInt validates if a number is smaller than a given minimum +func MinimumInt(path, in string, data, min int64, exclusive bool) *errors.Validation { + if (!exclusive && data < min) || (exclusive && data <= min) { + return errors.ExceedsMinimumInt(path, in, min, exclusive) + } + return nil +} + +// MinimumUint validates if a number is smaller than a given minimum +func MinimumUint(path, in string, data, min uint64, exclusive bool) *errors.Validation { + if (!exclusive && data < min) || (exclusive && data <= min) { + return errors.ExceedsMinimumUint(path, in, min, exclusive) + } + return nil +} + +// MultipleOf validates if the provided number is a multiple of the factor +func MultipleOf(path, in string, data, factor float64) *errors.Validation { + // multipleOf factor must be positive + if factor < 0 { + return errors.MultipleOfMustBePositive(path, in, factor) + } + var mult float64 + if factor < 1 { + mult = 1 / factor * data + } else { + mult = data / factor + } + if !swag.IsFloat64AJSONInteger(mult) { + return errors.NotMultipleOf(path, in, factor) + } + return nil +} + +// MultipleOfInt validates if the provided integer is a multiple of the factor +func MultipleOfInt(path, in string, data int64, factor int64) *errors.Validation { + // multipleOf factor must be positive + if factor < 0 { + return errors.MultipleOfMustBePositive(path, in, factor) + } + mult := data / factor + if mult*factor != data { + return errors.NotMultipleOf(path, in, factor) + } + return nil +} + +// MultipleOfUint validates if the provided unsigned integer is a multiple of the factor +func MultipleOfUint(path, in string, data, factor uint64) *errors.Validation { + mult := data / factor + if mult*factor != data { + return errors.NotMultipleOf(path, in, factor) + } + return nil +} + +// FormatOf validates if a string matches a format in the format registry +func FormatOf(path, in, format, data string, registry strfmt.Registry) *errors.Validation { + if registry == nil { + registry = strfmt.Default + } + if ok := registry.ContainsName(format); !ok { + return errors.InvalidTypeName(format) + } + if ok := registry.Validates(format, data); !ok { + return errors.InvalidType(path, in, format, data) + } + return nil +} + +// MaximumNativeType provides native type constraint validation as a facade +// to various numeric types versions of Maximum constraint check. +// +// Assumes that any possible loss conversion during conversion has been +// checked beforehand. +// +// NOTE: currently, the max value is marshalled as a float64, no matter what, +// which means there may be a loss during conversions (e.g. for very large integers) +// +// TODO: Normally, a JSON MAX_SAFE_INTEGER check would ensure conversion remains loss-free +func MaximumNativeType(path, in string, val interface{}, max float64, exclusive bool) *errors.Validation { + kind := reflect.ValueOf(val).Type().Kind() + switch kind { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + value := valueHelp.asInt64(val) + return MaximumInt(path, in, value, int64(max), exclusive) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + value := valueHelp.asUint64(val) + if max < 0 { + return errors.ExceedsMaximum(path, in, max, exclusive) + } + return MaximumUint(path, in, value, uint64(max), exclusive) + case reflect.Float32, reflect.Float64: + fallthrough + default: + value := valueHelp.asFloat64(val) + return Maximum(path, in, value, max, exclusive) + } +} + +// MinimumNativeType provides native type constraint validation as a facade +// to various numeric types versions of Minimum constraint check. +// +// Assumes that any possible loss conversion during conversion has been +// checked beforehand. +// +// NOTE: currently, the min value is marshalled as a float64, no matter what, +// which means there may be a loss during conversions (e.g. for very large integers) +// +// TODO: Normally, a JSON MAX_SAFE_INTEGER check would ensure conversion remains loss-free +func MinimumNativeType(path, in string, val interface{}, min float64, exclusive bool) *errors.Validation { + kind := reflect.ValueOf(val).Type().Kind() + switch kind { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + value := valueHelp.asInt64(val) + return MinimumInt(path, in, value, int64(min), exclusive) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + value := valueHelp.asUint64(val) + if min < 0 { + return nil + } + return MinimumUint(path, in, value, uint64(min), exclusive) + case reflect.Float32, reflect.Float64: + fallthrough + default: + value := valueHelp.asFloat64(val) + return Minimum(path, in, value, min, exclusive) + } +} + +// MultipleOfNativeType provides native type constraint validation as a facade +// to various numeric types version of MultipleOf constraint check. +// +// Assumes that any possible loss conversion during conversion has been +// checked beforehand. +// +// NOTE: currently, the multipleOf factor is marshalled as a float64, no matter what, +// which means there may be a loss during conversions (e.g. for very large integers) +// +// TODO: Normally, a JSON MAX_SAFE_INTEGER check would ensure conversion remains loss-free +func MultipleOfNativeType(path, in string, val interface{}, multipleOf float64) *errors.Validation { + kind := reflect.ValueOf(val).Type().Kind() + switch kind { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + value := valueHelp.asInt64(val) + return MultipleOfInt(path, in, value, int64(multipleOf)) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + value := valueHelp.asUint64(val) + return MultipleOfUint(path, in, value, uint64(multipleOf)) + case reflect.Float32, reflect.Float64: + fallthrough + default: + value := valueHelp.asFloat64(val) + return MultipleOf(path, in, value, multipleOf) + } +} + +// IsValueValidAgainstRange checks that a numeric value is compatible with +// the range defined by Type and Format, that is, may be converted without loss. +// +// NOTE: this check is about type capacity and not formal verification such as: 1.0 != 1L +func IsValueValidAgainstRange(val interface{}, typeName, format, prefix, path string) error { + kind := reflect.ValueOf(val).Type().Kind() + + // What is the string representation of val + stringRep := "" + switch kind { + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + stringRep = swag.FormatUint64(valueHelp.asUint64(val)) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + stringRep = swag.FormatInt64(valueHelp.asInt64(val)) + case reflect.Float32, reflect.Float64: + stringRep = swag.FormatFloat64(valueHelp.asFloat64(val)) + default: + return fmt.Errorf("%s value number range checking called with invalid (non numeric) val type in %s", prefix, path) + } + + var errVal error + + switch typeName { + case integerType: + switch format { + case integerFormatInt32: + _, errVal = swag.ConvertInt32(stringRep) + case integerFormatUInt32: + _, errVal = swag.ConvertUint32(stringRep) + case integerFormatUInt64: + _, errVal = swag.ConvertUint64(stringRep) + case integerFormatInt64: + fallthrough + default: + _, errVal = swag.ConvertInt64(stringRep) + } + case numberType: + fallthrough + default: + switch format { + case numberFormatFloat, numberFormatFloat32: + _, errVal = swag.ConvertFloat32(stringRep) + case numberFormatDouble, numberFormatFloat64: + fallthrough + default: + // No check can be performed here since + // no number beyond float64 is supported + } + } + if errVal != nil { // We don't report the actual errVal from strconv + if format != "" { + errVal = fmt.Errorf("%s value must be of type %s with format %s in %s", prefix, typeName, format, path) + } else { + errVal = fmt.Errorf("%s value must be of type %s (default format) in %s", prefix, typeName, path) + } + } + return errVal +} diff --git a/vendor/github.com/gobuffalo/flect/azure-pipelines.yml b/vendor/github.com/gobuffalo/flect/azure-pipelines.yml deleted file mode 100644 index 417e2c57921..00000000000 --- a/vendor/github.com/gobuffalo/flect/azure-pipelines.yml +++ /dev/null @@ -1,71 +0,0 @@ -variables: - GOBIN: "$(GOPATH)/bin" # Go binaries path - GOPATH: "$(system.defaultWorkingDirectory)/gopath" # Go workspace path - modulePath: "$(GOPATH)/src/github.com/$(build.repository.name)" # Path to the module"s code - -jobs: -- job: Windows - pool: - vmImage: "vs2017-win2016" - strategy: - matrix: - go 1.10: - go_version: "1.10" - go 1.11 (on): - go_version: "1.11.5" - GO111MODULE: "on" - go 1.11 (off): - go_version: "1.11.5" - GO111MODULE: "off" - go 1.12 (on): - go_version: "1.12" - GO111MODULE: "on" - go 1.12 (off): - go_version: "1.12" - GO111MODULE: "off" - steps: - - template: azure-tests.yml - -- job: macOS - pool: - vmImage: "macOS-10.13" - strategy: - matrix: - go 1.10: - go_version: "1.10" - go 1.11 (on): - go_version: "1.11.5" - GO111MODULE: "on" - go 1.11 (off): - go_version: "1.11.5" - GO111MODULE: "off" - go 1.12 (on): - go_version: "1.12" - GO111MODULE: "on" - go 1.12 (off): - go_version: "1.12" - GO111MODULE: "off" - steps: - - template: azure-tests.yml - -- job: Linux - pool: - vmImage: "ubuntu-16.04" - strategy: - matrix: - go 1.10: - go_version: "1.10" - go 1.11 (on): - go_version: "1.11.5" - GO111MODULE: "on" - go 1.11 (off): - go_version: "1.11.5" - GO111MODULE: "off" - go 1.12 (on): - go_version: "1.12" - GO111MODULE: "on" - go 1.12 (off): - go_version: "1.12" - GO111MODULE: "off" - steps: - - template: azure-tests.yml diff --git a/vendor/github.com/gobuffalo/flect/azure-tests.yml b/vendor/github.com/gobuffalo/flect/azure-tests.yml deleted file mode 100644 index eea5822fad5..00000000000 --- a/vendor/github.com/gobuffalo/flect/azure-tests.yml +++ /dev/null @@ -1,19 +0,0 @@ -steps: - - task: GoTool@0 - inputs: - version: $(go_version) - - task: Bash@3 - inputs: - targetType: inline - script: | - mkdir -p "$(GOBIN)" - mkdir -p "$(GOPATH)/pkg" - mkdir -p "$(modulePath)" - shopt -s extglob - mv !(gopath) "$(modulePath)" - displayName: "Setup Go Workspace" - - script: | - go get -t -v ./... - go test -race ./... - workingDirectory: "$(modulePath)" - displayName: "Tests" diff --git a/vendor/github.com/gobuffalo/flect/capitalize.go b/vendor/github.com/gobuffalo/flect/capitalize.go index 42ecc166cb3..78334fc0f91 100644 --- a/vendor/github.com/gobuffalo/flect/capitalize.go +++ b/vendor/github.com/gobuffalo/flect/capitalize.go @@ -15,13 +15,10 @@ func Capitalize(s string) string { // bob dylan = Bob dylan // widget_id = Widget_id func (i Ident) Capitalize() Ident { - var x string if len(i.Parts) == 0 { return New("") } - x = string(unicode.ToTitle(rune(i.Original[0]))) - if len(i.Original) > 1 { - x += i.Original[1:] - } - return New(x) + runes := []rune(i.Original) + runes[0] = unicode.ToTitle(runes[0]) + return New(string(runes)) } diff --git a/vendor/github.com/gobuffalo/flect/go.mod b/vendor/github.com/gobuffalo/flect/go.mod index cd02d074b93..7c8d049abaa 100644 --- a/vendor/github.com/gobuffalo/flect/go.mod +++ b/vendor/github.com/gobuffalo/flect/go.mod @@ -1,8 +1,5 @@ module github.com/gobuffalo/flect -go 1.12 +go 1.13 -require ( - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/stretchr/testify v1.3.0 -) +require github.com/stretchr/testify v1.4.0 diff --git a/vendor/github.com/gobuffalo/flect/go.sum b/vendor/github.com/gobuffalo/flect/go.sum index 4f76e62c1f3..8fdee5854f1 100644 --- a/vendor/github.com/gobuffalo/flect/go.sum +++ b/vendor/github.com/gobuffalo/flect/go.sum @@ -1,9 +1,11 @@ github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/gobuffalo/flect/humanize.go b/vendor/github.com/gobuffalo/flect/humanize.go index ded09397039..6a0b75af7bf 100644 --- a/vendor/github.com/gobuffalo/flect/humanize.go +++ b/vendor/github.com/gobuffalo/flect/humanize.go @@ -7,11 +7,11 @@ import ( // Humanize returns first letter of sentence capitalized. // Common acronyms are capitalized as well. // Other capital letters in string are left as provided. -// employee_salary = Employee salary -// employee_id = employee ID -// employee_mobile_number = Employee mobile number -// first_Name = First Name -// firstName = First Name +// employee_salary = Employee salary +// employee_id = employee ID +// employee_mobile_number = Employee mobile number +// first_Name = First Name +// firstName = First Name func Humanize(s string) string { return New(s).Humanize().String() } diff --git a/vendor/github.com/gobuffalo/flect/ident.go b/vendor/github.com/gobuffalo/flect/ident.go index 78b51d45767..9189e9a39b6 100644 --- a/vendor/github.com/gobuffalo/flect/ident.go +++ b/vendor/github.com/gobuffalo/flect/ident.go @@ -38,9 +38,9 @@ func toParts(s string) []string { return []string{strings.ToUpper(s)} } var prev rune - var x string + var x strings.Builder + x.Grow(len(s)) for _, c := range s { - cs := string(c) // fmt.Println("### cs ->", cs) // fmt.Println("### unicode.IsControl(c) ->", unicode.IsControl(c)) // fmt.Println("### unicode.IsDigit(c) ->", unicode.IsDigit(c)) @@ -58,35 +58,38 @@ func toParts(s string) []string { } if isSpace(c) { - parts = xappend(parts, x) - x = cs + parts = xappend(parts, x.String()) + x.Reset() + x.WriteRune(c) prev = c continue } if unicode.IsUpper(c) && !unicode.IsUpper(prev) { - parts = xappend(parts, x) - x = cs + parts = xappend(parts, x.String()) + x.Reset() + x.WriteRune(c) prev = c continue } - if unicode.IsUpper(c) && baseAcronyms[strings.ToUpper(x)] { - parts = xappend(parts, x) - x = cs + if unicode.IsUpper(c) && baseAcronyms[strings.ToUpper(x.String())] { + parts = xappend(parts, x.String()) + x.Reset() + x.WriteRune(c) prev = c continue } if unicode.IsLetter(c) || unicode.IsDigit(c) || unicode.IsPunct(c) || c == '`' { prev = c - x += cs + x.WriteRune(c) continue } - parts = xappend(parts, x) - x = "" + parts = xappend(parts, x.String()) + x.Reset() prev = c } - parts = xappend(parts, x) + parts = xappend(parts, x.String()) return parts } @@ -94,6 +97,19 @@ func toParts(s string) []string { var _ encoding.TextUnmarshaler = &Ident{} var _ encoding.TextMarshaler = &Ident{} +// LastPart returns the last part/word of the original string +func (i *Ident) LastPart() string { + if len(i.Parts) == 0 { + return "" + } + return i.Parts[len(i.Parts)-1] +} + +// ReplaceSuffix creates a new Ident with the original suffix replaced by new +func (i Ident) ReplaceSuffix(orig, new string) Ident { + return New(strings.TrimSuffix(i.Original, orig) + new) +} + //UnmarshalText unmarshalls byte array into the Ident func (i *Ident) UnmarshalText(data []byte) error { (*i) = New(string(data)) diff --git a/vendor/github.com/gobuffalo/flect/plural_rules.go b/vendor/github.com/gobuffalo/flect/plural_rules.go index 86fca8c5f5a..8cd3ba72e7a 100644 --- a/vendor/github.com/gobuffalo/flect/plural_rules.go +++ b/vendor/github.com/gobuffalo/flect/plural_rules.go @@ -40,6 +40,7 @@ var singleToPlural = map[string]string{ "bus": "buses", "campus": "campuses", "caucus": "caucuses", + "child": "children", "château": "châteaux", "circus": "circuses", "codex": "codices", @@ -48,7 +49,6 @@ var singleToPlural = map[string]string{ "crisis": "crises", "curriculum": "curriculums", "datum": "data", - "dear": "dear", "deer": "deer", "diagnosis": "diagnoses", "die": "dice", @@ -105,11 +105,13 @@ var singleToPlural = map[string]string{ "prognosis": "prognoses", "prometheus": "prometheuses", "quiz": "quizzes", + "quota": "quotas", "radius": "radiuses", "referendum": "referendums", "ress": "resses", "rice": "rice", "salmon": "salmon", + "sex": "sexes", "series": "series", "sheep": "sheep", "shoe": "shoes", @@ -120,6 +122,7 @@ var singleToPlural = map[string]string{ "swine": "swine", "syllabus": "syllabi", "symposium": "symposiums", + "synapse": "synapses", "synopsis": "synopses", "tableau": "tableaus", "testis": "testes", @@ -128,12 +131,14 @@ var singleToPlural = map[string]string{ "tooth": "teeth", "trout": "trout", "tuna": "tuna", + "vedalia": "vedalias", "vertebra": "vertebrae", "vertix": "vertices", "vita": "vitae", "vortex": "vortices", "wharf": "wharves", "wife": "wives", + "woman": "women", "wolf": "wolves", "you": "you", } @@ -160,7 +165,6 @@ var singularToPluralSuffixList = []singularToPluralSuffix{ {"randum", "randa"}, {"actus", "acti"}, {"adium", "adia"}, - {"alias", "aliases"}, {"basis", "basis"}, {"child", "children"}, {"chive", "chives"}, @@ -168,6 +172,7 @@ var singularToPluralSuffixList = []singularToPluralSuffix{ {"hello", "hellos"}, {"jeans", "jeans"}, {"louse", "lice"}, + {"media", "media"}, {"mouse", "mice"}, {"movie", "movies"}, {"oasis", "oasis"}, @@ -251,12 +256,10 @@ var singularToPluralSuffixList = []singularToPluralSuffix{ {"io", "ios"}, {"jy", "jies"}, {"ky", "kies"}, - {"ld", "ldren"}, {"lf", "lves"}, {"ly", "lies"}, {"my", "mies"}, {"ny", "nies"}, - {"ox", "oxen"}, {"py", "pies"}, {"qy", "qies"}, {"rf", "rves"}, diff --git a/vendor/github.com/gobuffalo/flect/pluralize.go b/vendor/github.com/gobuffalo/flect/pluralize.go index 1b9d43e4620..e265f84e91d 100644 --- a/vendor/github.com/gobuffalo/flect/pluralize.go +++ b/vendor/github.com/gobuffalo/flect/pluralize.go @@ -15,12 +15,22 @@ func Pluralize(s string) string { return New(s).Pluralize().String() } +// PluralizeWithSize will pluralize a string taking a number number into account. +// PluralizeWithSize("user", 1) = user +// PluralizeWithSize("user", 2) = users +func PluralizeWithSize(s string, i int) string { + if i == 1 || i == -1 { + return New(s).Singularize().String() + } + return New(s).Pluralize().String() +} + // Pluralize returns a plural version of the string // user = users // person = people // datum = data func (i Ident) Pluralize() Ident { - s := i.Original + s := i.LastPart() if len(s) == 0 { return New("") } @@ -33,11 +43,11 @@ func (i Ident) Pluralize() Ident { return i } if p, ok := singleToPlural[ls]; ok { - return New(p) + return i.ReplaceSuffix(s, p) } for _, r := range pluralRules { if strings.HasSuffix(ls, r.suffix) { - return New(r.fn(s)) + return i.ReplaceSuffix(s, r.fn(s)) } } diff --git a/vendor/github.com/gobuffalo/flect/singularize.go b/vendor/github.com/gobuffalo/flect/singularize.go index a0f8545ef2a..1ed4995053e 100644 --- a/vendor/github.com/gobuffalo/flect/singularize.go +++ b/vendor/github.com/gobuffalo/flect/singularize.go @@ -15,6 +15,16 @@ func Singularize(s string) string { return New(s).Singularize().String() } +// SingularizeWithSize will singular a string taking a number number into account. +// SingularizeWithSize("user", 1) = user +// SingularizeWithSize("user", 2) = users +func SingularizeWithSize(s string, i int) string { + if i == 1 || i == -1 { + return New(s).Singularize().String() + } + return New(s).Pluralize().String() +} + // Singularize returns a singular version of the string // users = user // data = datum diff --git a/vendor/github.com/gobuffalo/flect/underscore.go b/vendor/github.com/gobuffalo/flect/underscore.go index b92488aa00b..e1466d99b9f 100644 --- a/vendor/github.com/gobuffalo/flect/underscore.go +++ b/vendor/github.com/gobuffalo/flect/underscore.go @@ -18,16 +18,17 @@ func Underscore(s string) string { // Nice to see you! = nice_to_see_you // widgetID = widget_id func (i Ident) Underscore() Ident { - var out []string + out := make([]string, 0, len(i.Parts)) for _, part := range i.Parts { - var x string + var x strings.Builder + x.Grow(len(part)) for _, c := range part { if unicode.IsLetter(c) || unicode.IsDigit(c) { - x += string(c) + x.WriteRune(c) } } - if x != "" { - out = append(out, x) + if x.Len() > 0 { + out = append(out, x.String()) } } return New(strings.ToLower(strings.Join(out, "_"))) diff --git a/vendor/github.com/google/go-querystring/LICENSE b/vendor/github.com/google/go-querystring/LICENSE new file mode 100644 index 00000000000..ae121a1e46d --- /dev/null +++ b/vendor/github.com/google/go-querystring/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2013 Google. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/google/go-querystring/query/encode.go b/vendor/github.com/google/go-querystring/query/encode.go new file mode 100644 index 00000000000..91198f819a7 --- /dev/null +++ b/vendor/github.com/google/go-querystring/query/encode.go @@ -0,0 +1,357 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package query implements encoding of structs into URL query parameters. +// +// As a simple example: +// +// type Options struct { +// Query string `url:"q"` +// ShowAll bool `url:"all"` +// Page int `url:"page"` +// } +// +// opt := Options{ "foo", true, 2 } +// v, _ := query.Values(opt) +// fmt.Print(v.Encode()) // will output: "q=foo&all=true&page=2" +// +// The exact mapping between Go values and url.Values is described in the +// documentation for the Values() function. +package query + +import ( + "bytes" + "fmt" + "net/url" + "reflect" + "strconv" + "strings" + "time" +) + +var timeType = reflect.TypeOf(time.Time{}) + +var encoderType = reflect.TypeOf(new(Encoder)).Elem() + +// Encoder is an interface implemented by any type that wishes to encode +// itself into URL values in a non-standard way. +type Encoder interface { + EncodeValues(key string, v *url.Values) error +} + +// Values returns the url.Values encoding of v. +// +// Values expects to be passed a struct, and traverses it recursively using the +// following encoding rules. +// +// Each exported struct field is encoded as a URL parameter unless +// +// - the field's tag is "-", or +// - the field is empty and its tag specifies the "omitempty" option +// +// The empty values are false, 0, any nil pointer or interface value, any array +// slice, map, or string of length zero, and any type (such as time.Time) that +// returns true for IsZero(). +// +// The URL parameter name defaults to the struct field name but can be +// specified in the struct field's tag value. The "url" key in the struct +// field's tag value is the key name, followed by an optional comma and +// options. For example: +// +// // Field is ignored by this package. +// Field int `url:"-"` +// +// // Field appears as URL parameter "myName". +// Field int `url:"myName"` +// +// // Field appears as URL parameter "myName" and the field is omitted if +// // its value is empty +// Field int `url:"myName,omitempty"` +// +// // Field appears as URL parameter "Field" (the default), but the field +// // is skipped if empty. Note the leading comma. +// Field int `url:",omitempty"` +// +// For encoding individual field values, the following type-dependent rules +// apply: +// +// Boolean values default to encoding as the strings "true" or "false". +// Including the "int" option signals that the field should be encoded as the +// strings "1" or "0". +// +// time.Time values default to encoding as RFC3339 timestamps. Including the +// "unix" option signals that the field should be encoded as a Unix time (see +// time.Unix()). The "unixmilli" and "unixnano" options will encode the number +// of milliseconds and nanoseconds, respectively, since January 1, 1970 (see +// time.UnixNano()). Including the "layout" struct tag (separate from the +// "url" tag) will use the value of the "layout" tag as a layout passed to +// time.Format. For example: +// +// // Encode a time.Time as YYYY-MM-DD +// Field time.Time `layout:"2006-01-02"` +// +// Slice and Array values default to encoding as multiple URL values of the +// same name. Including the "comma" option signals that the field should be +// encoded as a single comma-delimited value. Including the "space" option +// similarly encodes the value as a single space-delimited string. Including +// the "semicolon" option will encode the value as a semicolon-delimited string. +// Including the "brackets" option signals that the multiple URL values should +// have "[]" appended to the value name. "numbered" will append a number to +// the end of each incidence of the value name, example: +// name0=value0&name1=value1, etc. Including the "del" struct tag (separate +// from the "url" tag) will use the value of the "del" tag as the delimiter. +// For example: +// +// // Encode a slice of bools as ints ("1" for true, "0" for false), +// // separated by exclamation points "!". +// Field []bool `url:",int" del:"!"` +// +// Anonymous struct fields are usually encoded as if their inner exported +// fields were fields in the outer struct, subject to the standard Go +// visibility rules. An anonymous struct field with a name given in its URL +// tag is treated as having that name, rather than being anonymous. +// +// Non-nil pointer values are encoded as the value pointed to. +// +// Nested structs are encoded including parent fields in value names for +// scoping. e.g: +// +// "user[name]=acme&user[addr][postcode]=1234&user[addr][city]=SFO" +// +// All other values are encoded using their default string representation. +// +// Multiple fields that encode to the same URL parameter name will be included +// as multiple URL values of the same name. +func Values(v interface{}) (url.Values, error) { + values := make(url.Values) + val := reflect.ValueOf(v) + for val.Kind() == reflect.Ptr { + if val.IsNil() { + return values, nil + } + val = val.Elem() + } + + if v == nil { + return values, nil + } + + if val.Kind() != reflect.Struct { + return nil, fmt.Errorf("query: Values() expects struct input. Got %v", val.Kind()) + } + + err := reflectValue(values, val, "") + return values, err +} + +// reflectValue populates the values parameter from the struct fields in val. +// Embedded structs are followed recursively (using the rules defined in the +// Values function documentation) breadth-first. +func reflectValue(values url.Values, val reflect.Value, scope string) error { + var embedded []reflect.Value + + typ := val.Type() + for i := 0; i < typ.NumField(); i++ { + sf := typ.Field(i) + if sf.PkgPath != "" && !sf.Anonymous { // unexported + continue + } + + sv := val.Field(i) + tag := sf.Tag.Get("url") + if tag == "-" { + continue + } + name, opts := parseTag(tag) + + if name == "" { + if sf.Anonymous { + v := reflect.Indirect(sv) + if v.IsValid() && v.Kind() == reflect.Struct { + // save embedded struct for later processing + embedded = append(embedded, v) + continue + } + } + + name = sf.Name + } + + if scope != "" { + name = scope + "[" + name + "]" + } + + if opts.Contains("omitempty") && isEmptyValue(sv) { + continue + } + + if sv.Type().Implements(encoderType) { + // if sv is a nil pointer and the custom encoder is defined on a non-pointer + // method receiver, set sv to the zero value of the underlying type + if !reflect.Indirect(sv).IsValid() && sv.Type().Elem().Implements(encoderType) { + sv = reflect.New(sv.Type().Elem()) + } + + m := sv.Interface().(Encoder) + if err := m.EncodeValues(name, &values); err != nil { + return err + } + continue + } + + // recursively dereference pointers. break on nil pointers + for sv.Kind() == reflect.Ptr { + if sv.IsNil() { + break + } + sv = sv.Elem() + } + + if sv.Kind() == reflect.Slice || sv.Kind() == reflect.Array { + var del string + if opts.Contains("comma") { + del = "," + } else if opts.Contains("space") { + del = " " + } else if opts.Contains("semicolon") { + del = ";" + } else if opts.Contains("brackets") { + name = name + "[]" + } else { + del = sf.Tag.Get("del") + } + + if del != "" { + s := new(bytes.Buffer) + first := true + for i := 0; i < sv.Len(); i++ { + if first { + first = false + } else { + s.WriteString(del) + } + s.WriteString(valueString(sv.Index(i), opts, sf)) + } + values.Add(name, s.String()) + } else { + for i := 0; i < sv.Len(); i++ { + k := name + if opts.Contains("numbered") { + k = fmt.Sprintf("%s%d", name, i) + } + values.Add(k, valueString(sv.Index(i), opts, sf)) + } + } + continue + } + + if sv.Type() == timeType { + values.Add(name, valueString(sv, opts, sf)) + continue + } + + if sv.Kind() == reflect.Struct { + if err := reflectValue(values, sv, name); err != nil { + return err + } + continue + } + + values.Add(name, valueString(sv, opts, sf)) + } + + for _, f := range embedded { + if err := reflectValue(values, f, scope); err != nil { + return err + } + } + + return nil +} + +// valueString returns the string representation of a value. +func valueString(v reflect.Value, opts tagOptions, sf reflect.StructField) string { + for v.Kind() == reflect.Ptr { + if v.IsNil() { + return "" + } + v = v.Elem() + } + + if v.Kind() == reflect.Bool && opts.Contains("int") { + if v.Bool() { + return "1" + } + return "0" + } + + if v.Type() == timeType { + t := v.Interface().(time.Time) + if opts.Contains("unix") { + return strconv.FormatInt(t.Unix(), 10) + } + if opts.Contains("unixmilli") { + return strconv.FormatInt((t.UnixNano() / 1e6), 10) + } + if opts.Contains("unixnano") { + return strconv.FormatInt(t.UnixNano(), 10) + } + if layout := sf.Tag.Get("layout"); layout != "" { + return t.Format(layout) + } + return t.Format(time.RFC3339) + } + + return fmt.Sprint(v.Interface()) +} + +// isEmptyValue checks if a value should be considered empty for the purposes +// of omitting fields with the "omitempty" option. +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + + type zeroable interface { + IsZero() bool + } + + if z, ok := v.Interface().(zeroable); ok { + return z.IsZero() + } + + return false +} + +// tagOptions is the string following a comma in a struct field's "url" tag, or +// the empty string. It does not include the leading comma. +type tagOptions []string + +// parseTag splits a struct field's url tag into its name and comma-separated +// options. +func parseTag(tag string) (string, tagOptions) { + s := strings.Split(tag, ",") + return s[0], s[1:] +} + +// Contains checks whether the tagOptions contains the specified option. +func (o tagOptions) Contains(option string) bool { + for _, s := range o { + if s == option { + return true + } + } + return false +} diff --git a/vendor/github.com/hashicorp/go-version/CHANGELOG.md b/vendor/github.com/hashicorp/go-version/CHANGELOG.md new file mode 100644 index 00000000000..dbae7f7be9c --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/CHANGELOG.md @@ -0,0 +1,25 @@ +# 1.3.0 (March 31, 2021) + +Please note that CHANGELOG.md does not exist in the source code prior to this release. + +FEATURES: + - Add `Core` function to return a version without prerelease or metadata ([#85](https://github.com/hashicorp/go-version/pull/85)) + +# 1.2.1 (June 17, 2020) + +BUG FIXES: + - Prevent `Version.Equal` method from panicking on `nil` encounter ([#73](https://github.com/hashicorp/go-version/pull/73)) + +# 1.2.0 (April 23, 2019) + +FEATURES: + - Add `GreaterThanOrEqual` and `LessThanOrEqual` helper methods ([#53](https://github.com/hashicorp/go-version/pull/53)) + +# 1.1.0 (Jan 07, 2019) + +FEATURES: + - Add `NewSemver` constructor ([#45](https://github.com/hashicorp/go-version/pull/45)) + +# 1.0.0 (August 24, 2018) + +Initial release. diff --git a/vendor/github.com/hashicorp/go-version/version.go b/vendor/github.com/hashicorp/go-version/version.go index 09703e8e6ff..8068834ec84 100644 --- a/vendor/github.com/hashicorp/go-version/version.go +++ b/vendor/github.com/hashicorp/go-version/version.go @@ -278,6 +278,14 @@ func comparePrereleases(v string, other string) int { return 0 } +// Core returns a new version constructed from only the MAJOR.MINOR.PATCH +// segments of the version, without prerelease or metadata. +func (v *Version) Core() *Version { + segments := v.Segments64() + segmentsOnly := fmt.Sprintf("%d.%d.%d", segments[0], segments[1], segments[2]) + return Must(NewVersion(segmentsOnly)) +} + // Equal tests if two versions are equal. func (v *Version) Equal(o *Version) bool { if v == nil || o == nil { diff --git a/vendor/github.com/hashicorp/hil/README.md b/vendor/github.com/hashicorp/hil/README.md index 97d2292af7b..ca9e1a4999b 100644 --- a/vendor/github.com/hashicorp/hil/README.md +++ b/vendor/github.com/hashicorp/hil/README.md @@ -1,6 +1,6 @@ # HIL -[![GoDoc](https://godoc.org/github.com/hashicorp/hil?status.png)](https://godoc.org/github.com/hashicorp/hil) [![Build Status](https://travis-ci.org/hashicorp/hil.svg?branch=master)](https://travis-ci.org/hashicorp/hil) +[![GoDoc](https://godoc.org/github.com/hashicorp/hil?status.png)](https://godoc.org/github.com/hashicorp/hil) [![Build Status](https://circleci.com/gh/hashicorp/hil/tree/master.svg?style=svg)](https://circleci.com/gh/hashicorp/hil/tree/master) HIL (HashiCorp Interpolation Language) is a lightweight embedded language used primarily for configuration interpolation. The goal of HIL is to make a simple diff --git a/vendor/github.com/hashicorp/hil/appveyor.yml b/vendor/github.com/hashicorp/hil/appveyor.yml deleted file mode 100644 index feaf7a34e22..00000000000 --- a/vendor/github.com/hashicorp/hil/appveyor.yml +++ /dev/null @@ -1,18 +0,0 @@ -version: "build-{branch}-{build}" -image: Visual Studio 2015 -clone_folder: c:\gopath\src\github.com\hashicorp\hil -environment: - GOPATH: c:\gopath -init: - - git config --global core.autocrlf true -install: -- cmd: >- - echo %Path% - - go version - - go env - - go get -d -v -t ./... -build_script: -- cmd: go test -v ./... diff --git a/vendor/github.com/hokaccha/go-prettyjson/.gitignore b/vendor/github.com/hokaccha/go-prettyjson/.gitignore new file mode 100644 index 00000000000..daf913b1b34 --- /dev/null +++ b/vendor/github.com/hokaccha/go-prettyjson/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/hokaccha/go-prettyjson/README.md b/vendor/github.com/hokaccha/go-prettyjson/README.md new file mode 100644 index 00000000000..0aa36e4006c --- /dev/null +++ b/vendor/github.com/hokaccha/go-prettyjson/README.md @@ -0,0 +1,26 @@ +# prettyjson + +JSON pretty print for Golang. + +## Example + +```go +v := map[string]interface{}{ + "str": "foo", + "num": 100, + "bool": false, + "null": nil, + "array": []string{"foo", "bar", "baz"}, + "map": map[string]interface{}{ + "foo": "bar", + }, +} +s, _ := prettyjson.Marshal(v) +fmt.Println(string(s)) +``` + +![Output](http://i.imgur.com/cUFj5os.png) + +## License + +MIT diff --git a/vendor/github.com/hokaccha/go-prettyjson/prettyjson.go b/vendor/github.com/hokaccha/go-prettyjson/prettyjson.go new file mode 100644 index 00000000000..ccd9f2412ec --- /dev/null +++ b/vendor/github.com/hokaccha/go-prettyjson/prettyjson.go @@ -0,0 +1,184 @@ +// Package prettyjson provides JSON pretty print. +package prettyjson + +import ( + "encoding/json" + "fmt" + "sort" + "strconv" + "strings" + + "github.com/fatih/color" +) + +// Formatter is a struct to format JSON data. `color` is github.com/fatih/color: https://github.com/fatih/color +type Formatter struct { + // JSON key color. Default is `color.New(color.FgBlue, color.Bold)`. + KeyColor *color.Color + + // JSON string value color. Default is `color.New(color.FgGreen, color.Bold)`. + StringColor *color.Color + + // JSON boolean value color. Default is `color.New(color.FgYellow, color.Bold)`. + BoolColor *color.Color + + // JSON number value color. Default is `color.New(color.FgCyan, color.Bold)`. + NumberColor *color.Color + + // JSON null value color. Default is `color.New(color.FgBlack, color.Bold)`. + NullColor *color.Color + + // Max length of JSON string value. When the value is 1 and over, string is truncated to length of the value. Default is 0 (not truncated). + StringMaxLength int + + // Boolean to disable color. Default is false. + DisabledColor bool + + // Indent space number. Default is 2. + Indent int +} + +// NewFormatter returns a new formatter with following default values. +func NewFormatter() *Formatter { + return &Formatter{ + KeyColor: color.New(color.FgBlue, color.Bold), + StringColor: color.New(color.FgGreen, color.Bold), + BoolColor: color.New(color.FgYellow, color.Bold), + NumberColor: color.New(color.FgCyan, color.Bold), + NullColor: color.New(color.FgBlack, color.Bold), + StringMaxLength: 0, + DisabledColor: false, + Indent: 2, + } +} + +// Marshals and formats JSON data. +func (f *Formatter) Marshal(v interface{}) ([]byte, error) { + data, err := json.Marshal(v) + + if err != nil { + return nil, err + } + + return f.Format(data) +} + +// Formats JSON string. +func (f *Formatter) Format(data []byte) ([]byte, error) { + var v interface{} + err := json.Unmarshal(data, &v) + + if err != nil { + return nil, err + } + + s := f.pretty(v, 1) + + return []byte(s), nil +} + +func (f *Formatter) sprintfColor(c *color.Color, format string, args ...interface{}) string { + if f.DisabledColor || c == nil { + return fmt.Sprintf(format, args...) + } else { + return c.SprintfFunc()(format, args...) + } +} + +func (f *Formatter) sprintColor(c *color.Color, s string) string { + if f.DisabledColor || c == nil { + return fmt.Sprint(s) + } else { + return c.SprintFunc()(s) + } +} + +func (f *Formatter) pretty(v interface{}, depth int) string { + switch val := v.(type) { + case string: + return f.processString(val) + case float64: + return f.sprintColor(f.NumberColor, strconv.FormatFloat(val, 'f', -1, 64)) + case bool: + return f.sprintColor(f.BoolColor, strconv.FormatBool(val)) + case nil: + return f.sprintColor(f.NullColor, "null") + case map[string]interface{}: + return f.processMap(val, depth) + case []interface{}: + return f.processArray(val, depth) + } + + return "" +} + +func (f *Formatter) processString(s string) string { + r := []rune(s) + + if f.StringMaxLength != 0 && len(r) >= f.StringMaxLength { + s = string(r[0:f.StringMaxLength]) + "..." + } + + b, _ := json.Marshal(s) + + return f.sprintColor(f.StringColor, string(b)) +} + +func (f *Formatter) processMap(m map[string]interface{}, depth int) string { + currentIndent := f.generateIndent(depth - 1) + nextIndent := f.generateIndent(depth) + rows := []string{} + keys := []string{} + + if len(m) == 0 { + return "{}" + } + + for key, _ := range m { + keys = append(keys, key) + } + + sort.Strings(keys) + + for _, key := range keys { + val := m[key] + k := f.sprintfColor(f.KeyColor, `"%s"`, key) + v := f.pretty(val, depth+1) + row := fmt.Sprintf("%s%s: %s", nextIndent, k, v) + rows = append(rows, row) + } + + return fmt.Sprintf("{\n%s\n%s}", strings.Join(rows, ",\n"), currentIndent) +} + +func (f *Formatter) processArray(a []interface{}, depth int) string { + currentIndent := f.generateIndent(depth - 1) + nextIndent := f.generateIndent(depth) + rows := []string{} + + if len(a) == 0 { + return "[]" + } + + for _, val := range a { + c := f.pretty(val, depth+1) + row := nextIndent + c + rows = append(rows, row) + } + + return fmt.Sprintf("[\n%s\n%s]", strings.Join(rows, ",\n"), currentIndent) +} + +func (f *Formatter) generateIndent(depth int) string { + return strings.Join(make([]string, f.Indent*depth+1), " ") +} + +// Marshal JSON data with default options. +func Marshal(v interface{}) ([]byte, error) { + return NewFormatter().Marshal(v) +} + +// Format JSON string with default options. +func Format(data []byte) ([]byte, error) { + return NewFormatter().Format(data) +} diff --git a/vendor/github.com/jcmturner/gofork/LICENSE b/vendor/github.com/jcmturner/gofork/LICENSE new file mode 100644 index 00000000000..6a66aea5eaf --- /dev/null +++ b/vendor/github.com/jcmturner/gofork/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/jcmturner/gofork/encoding/asn1/README.md b/vendor/github.com/jcmturner/gofork/encoding/asn1/README.md new file mode 100644 index 00000000000..66a2a8cca71 --- /dev/null +++ b/vendor/github.com/jcmturner/gofork/encoding/asn1/README.md @@ -0,0 +1,5 @@ +This is a temporary repository that will be removed when the issues below are fixed in the core golang code. + +## Issues +* [encoding/asn1: cannot marshal into a GeneralString](https://github.com/golang/go/issues/18832) +* [encoding/asn1: cannot marshal into slice of strings and pass stringtype parameter tags to members](https://github.com/golang/go/issues/18834) \ No newline at end of file diff --git a/vendor/github.com/jcmturner/gofork/encoding/asn1/asn1.go b/vendor/github.com/jcmturner/gofork/encoding/asn1/asn1.go new file mode 100644 index 00000000000..f1bb7671795 --- /dev/null +++ b/vendor/github.com/jcmturner/gofork/encoding/asn1/asn1.go @@ -0,0 +1,1003 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package asn1 implements parsing of DER-encoded ASN.1 data structures, +// as defined in ITU-T Rec X.690. +// +// See also ``A Layman's Guide to a Subset of ASN.1, BER, and DER,'' +// http://luca.ntop.org/Teaching/Appunti/asn1.html. +package asn1 + +// ASN.1 is a syntax for specifying abstract objects and BER, DER, PER, XER etc +// are different encoding formats for those objects. Here, we'll be dealing +// with DER, the Distinguished Encoding Rules. DER is used in X.509 because +// it's fast to parse and, unlike BER, has a unique encoding for every object. +// When calculating hashes over objects, it's important that the resulting +// bytes be the same at both ends and DER removes this margin of error. +// +// ASN.1 is very complex and this package doesn't attempt to implement +// everything by any means. + +import ( + "errors" + "fmt" + "math/big" + "reflect" + "strconv" + "time" + "unicode/utf8" +) + +// A StructuralError suggests that the ASN.1 data is valid, but the Go type +// which is receiving it doesn't match. +type StructuralError struct { + Msg string +} + +func (e StructuralError) Error() string { return "asn1: structure error: " + e.Msg } + +// A SyntaxError suggests that the ASN.1 data is invalid. +type SyntaxError struct { + Msg string +} + +func (e SyntaxError) Error() string { return "asn1: syntax error: " + e.Msg } + +// We start by dealing with each of the primitive types in turn. + +// BOOLEAN + +func parseBool(bytes []byte) (ret bool, err error) { + if len(bytes) != 1 { + err = SyntaxError{"invalid boolean"} + return + } + + // DER demands that "If the encoding represents the boolean value TRUE, + // its single contents octet shall have all eight bits set to one." + // Thus only 0 and 255 are valid encoded values. + switch bytes[0] { + case 0: + ret = false + case 0xff: + ret = true + default: + err = SyntaxError{"invalid boolean"} + } + + return +} + +// INTEGER + +// checkInteger returns nil if the given bytes are a valid DER-encoded +// INTEGER and an error otherwise. +func checkInteger(bytes []byte) error { + if len(bytes) == 0 { + return StructuralError{"empty integer"} + } + if len(bytes) == 1 { + return nil + } + if (bytes[0] == 0 && bytes[1]&0x80 == 0) || (bytes[0] == 0xff && bytes[1]&0x80 == 0x80) { + return StructuralError{"integer not minimally-encoded"} + } + return nil +} + +// parseInt64 treats the given bytes as a big-endian, signed integer and +// returns the result. +func parseInt64(bytes []byte) (ret int64, err error) { + err = checkInteger(bytes) + if err != nil { + return + } + if len(bytes) > 8 { + // We'll overflow an int64 in this case. + err = StructuralError{"integer too large"} + return + } + for bytesRead := 0; bytesRead < len(bytes); bytesRead++ { + ret <<= 8 + ret |= int64(bytes[bytesRead]) + } + + // Shift up and down in order to sign extend the result. + ret <<= 64 - uint8(len(bytes))*8 + ret >>= 64 - uint8(len(bytes))*8 + return +} + +// parseInt treats the given bytes as a big-endian, signed integer and returns +// the result. +func parseInt32(bytes []byte) (int32, error) { + if err := checkInteger(bytes); err != nil { + return 0, err + } + ret64, err := parseInt64(bytes) + if err != nil { + return 0, err + } + if ret64 != int64(int32(ret64)) { + return 0, StructuralError{"integer too large"} + } + return int32(ret64), nil +} + +var bigOne = big.NewInt(1) + +// parseBigInt treats the given bytes as a big-endian, signed integer and returns +// the result. +func parseBigInt(bytes []byte) (*big.Int, error) { + if err := checkInteger(bytes); err != nil { + return nil, err + } + ret := new(big.Int) + if len(bytes) > 0 && bytes[0]&0x80 == 0x80 { + // This is a negative number. + notBytes := make([]byte, len(bytes)) + for i := range notBytes { + notBytes[i] = ^bytes[i] + } + ret.SetBytes(notBytes) + ret.Add(ret, bigOne) + ret.Neg(ret) + return ret, nil + } + ret.SetBytes(bytes) + return ret, nil +} + +// BIT STRING + +// BitString is the structure to use when you want an ASN.1 BIT STRING type. A +// bit string is padded up to the nearest byte in memory and the number of +// valid bits is recorded. Padding bits will be zero. +type BitString struct { + Bytes []byte // bits packed into bytes. + BitLength int // length in bits. +} + +// At returns the bit at the given index. If the index is out of range it +// returns false. +func (b BitString) At(i int) int { + if i < 0 || i >= b.BitLength { + return 0 + } + x := i / 8 + y := 7 - uint(i%8) + return int(b.Bytes[x]>>y) & 1 +} + +// RightAlign returns a slice where the padding bits are at the beginning. The +// slice may share memory with the BitString. +func (b BitString) RightAlign() []byte { + shift := uint(8 - (b.BitLength % 8)) + if shift == 8 || len(b.Bytes) == 0 { + return b.Bytes + } + + a := make([]byte, len(b.Bytes)) + a[0] = b.Bytes[0] >> shift + for i := 1; i < len(b.Bytes); i++ { + a[i] = b.Bytes[i-1] << (8 - shift) + a[i] |= b.Bytes[i] >> shift + } + + return a +} + +// parseBitString parses an ASN.1 bit string from the given byte slice and returns it. +func parseBitString(bytes []byte) (ret BitString, err error) { + if len(bytes) == 0 { + err = SyntaxError{"zero length BIT STRING"} + return + } + paddingBits := int(bytes[0]) + if paddingBits > 7 || + len(bytes) == 1 && paddingBits > 0 || + bytes[len(bytes)-1]&((1< 0 { + s += "." + } + s += strconv.Itoa(v) + } + + return s +} + +// parseObjectIdentifier parses an OBJECT IDENTIFIER from the given bytes and +// returns it. An object identifier is a sequence of variable length integers +// that are assigned in a hierarchy. +func parseObjectIdentifier(bytes []byte) (s []int, err error) { + if len(bytes) == 0 { + err = SyntaxError{"zero length OBJECT IDENTIFIER"} + return + } + + // In the worst case, we get two elements from the first byte (which is + // encoded differently) and then every varint is a single byte long. + s = make([]int, len(bytes)+1) + + // The first varint is 40*value1 + value2: + // According to this packing, value1 can take the values 0, 1 and 2 only. + // When value1 = 0 or value1 = 1, then value2 is <= 39. When value1 = 2, + // then there are no restrictions on value2. + v, offset, err := parseBase128Int(bytes, 0) + if err != nil { + return + } + if v < 80 { + s[0] = v / 40 + s[1] = v % 40 + } else { + s[0] = 2 + s[1] = v - 80 + } + + i := 2 + for ; offset < len(bytes); i++ { + v, offset, err = parseBase128Int(bytes, offset) + if err != nil { + return + } + s[i] = v + } + s = s[0:i] + return +} + +// ENUMERATED + +// An Enumerated is represented as a plain int. +type Enumerated int + +// FLAG + +// A Flag accepts any data and is set to true if present. +type Flag bool + +// parseBase128Int parses a base-128 encoded int from the given offset in the +// given byte slice. It returns the value and the new offset. +func parseBase128Int(bytes []byte, initOffset int) (ret, offset int, err error) { + offset = initOffset + for shifted := 0; offset < len(bytes); shifted++ { + if shifted == 4 { + err = StructuralError{"base 128 integer too large"} + return + } + ret <<= 7 + b := bytes[offset] + ret |= int(b & 0x7f) + offset++ + if b&0x80 == 0 { + return + } + } + err = SyntaxError{"truncated base 128 integer"} + return +} + +// UTCTime + +func parseUTCTime(bytes []byte) (ret time.Time, err error) { + s := string(bytes) + + formatStr := "0601021504Z0700" + ret, err = time.Parse(formatStr, s) + if err != nil { + formatStr = "060102150405Z0700" + ret, err = time.Parse(formatStr, s) + } + if err != nil { + return + } + + if serialized := ret.Format(formatStr); serialized != s { + err = fmt.Errorf("asn1: time did not serialize back to the original value and may be invalid: given %q, but serialized as %q", s, serialized) + return + } + + if ret.Year() >= 2050 { + // UTCTime only encodes times prior to 2050. See https://tools.ietf.org/html/rfc5280#section-4.1.2.5.1 + ret = ret.AddDate(-100, 0, 0) + } + + return +} + +// parseGeneralizedTime parses the GeneralizedTime from the given byte slice +// and returns the resulting time. +func parseGeneralizedTime(bytes []byte) (ret time.Time, err error) { + const formatStr = "20060102150405Z0700" + s := string(bytes) + + if ret, err = time.Parse(formatStr, s); err != nil { + return + } + + if serialized := ret.Format(formatStr); serialized != s { + err = fmt.Errorf("asn1: time did not serialize back to the original value and may be invalid: given %q, but serialized as %q", s, serialized) + } + + return +} + +// PrintableString + +// parsePrintableString parses a ASN.1 PrintableString from the given byte +// array and returns it. +func parsePrintableString(bytes []byte) (ret string, err error) { + for _, b := range bytes { + if !isPrintable(b) { + err = SyntaxError{"PrintableString contains invalid character"} + return + } + } + ret = string(bytes) + return +} + +// isPrintable reports whether the given b is in the ASN.1 PrintableString set. +func isPrintable(b byte) bool { + return 'a' <= b && b <= 'z' || + 'A' <= b && b <= 'Z' || + '0' <= b && b <= '9' || + '\'' <= b && b <= ')' || + '+' <= b && b <= '/' || + b == ' ' || + b == ':' || + b == '=' || + b == '?' || + // This is technically not allowed in a PrintableString. + // However, x509 certificates with wildcard strings don't + // always use the correct string type so we permit it. + b == '*' +} + +// IA5String + +// parseIA5String parses a ASN.1 IA5String (ASCII string) from the given +// byte slice and returns it. +func parseIA5String(bytes []byte) (ret string, err error) { + for _, b := range bytes { + if b >= utf8.RuneSelf { + err = SyntaxError{"IA5String contains invalid character"} + return + } + } + ret = string(bytes) + return +} + +// T61String + +// parseT61String parses a ASN.1 T61String (8-bit clean string) from the given +// byte slice and returns it. +func parseT61String(bytes []byte) (ret string, err error) { + return string(bytes), nil +} + +// UTF8String + +// parseUTF8String parses a ASN.1 UTF8String (raw UTF-8) from the given byte +// array and returns it. +func parseUTF8String(bytes []byte) (ret string, err error) { + if !utf8.Valid(bytes) { + return "", errors.New("asn1: invalid UTF-8 string") + } + return string(bytes), nil +} + +// A RawValue represents an undecoded ASN.1 object. +type RawValue struct { + Class, Tag int + IsCompound bool + Bytes []byte + FullBytes []byte // includes the tag and length +} + +// RawContent is used to signal that the undecoded, DER data needs to be +// preserved for a struct. To use it, the first field of the struct must have +// this type. It's an error for any of the other fields to have this type. +type RawContent []byte + +// Tagging + +// parseTagAndLength parses an ASN.1 tag and length pair from the given offset +// into a byte slice. It returns the parsed data and the new offset. SET and +// SET OF (tag 17) are mapped to SEQUENCE and SEQUENCE OF (tag 16) since we +// don't distinguish between ordered and unordered objects in this code. +func parseTagAndLength(bytes []byte, initOffset int) (ret tagAndLength, offset int, err error) { + offset = initOffset + // parseTagAndLength should not be called without at least a single + // byte to read. Thus this check is for robustness: + if offset >= len(bytes) { + err = errors.New("asn1: internal error in parseTagAndLength") + return + } + b := bytes[offset] + offset++ + ret.class = int(b >> 6) + ret.isCompound = b&0x20 == 0x20 + ret.tag = int(b & 0x1f) + + // If the bottom five bits are set, then the tag number is actually base 128 + // encoded afterwards + if ret.tag == 0x1f { + ret.tag, offset, err = parseBase128Int(bytes, offset) + if err != nil { + return + } + // Tags should be encoded in minimal form. + if ret.tag < 0x1f { + err = SyntaxError{"non-minimal tag"} + return + } + } + if offset >= len(bytes) { + err = SyntaxError{"truncated tag or length"} + return + } + b = bytes[offset] + offset++ + if b&0x80 == 0 { + // The length is encoded in the bottom 7 bits. + ret.length = int(b & 0x7f) + } else { + // Bottom 7 bits give the number of length bytes to follow. + numBytes := int(b & 0x7f) + if numBytes == 0 { + err = SyntaxError{"indefinite length found (not DER)"} + return + } + ret.length = 0 + for i := 0; i < numBytes; i++ { + if offset >= len(bytes) { + err = SyntaxError{"truncated tag or length"} + return + } + b = bytes[offset] + offset++ + if ret.length >= 1<<23 { + // We can't shift ret.length up without + // overflowing. + err = StructuralError{"length too large"} + return + } + ret.length <<= 8 + ret.length |= int(b) + if ret.length == 0 { + // DER requires that lengths be minimal. + err = StructuralError{"superfluous leading zeros in length"} + return + } + } + // Short lengths must be encoded in short form. + if ret.length < 0x80 { + err = StructuralError{"non-minimal length"} + return + } + } + + return +} + +// parseSequenceOf is used for SEQUENCE OF and SET OF values. It tries to parse +// a number of ASN.1 values from the given byte slice and returns them as a +// slice of Go values of the given type. +func parseSequenceOf(bytes []byte, sliceType reflect.Type, elemType reflect.Type) (ret reflect.Value, err error) { + expectedTag, compoundType, ok := getUniversalType(elemType) + if !ok { + err = StructuralError{"unknown Go type for slice"} + return + } + + // First we iterate over the input and count the number of elements, + // checking that the types are correct in each case. + numElements := 0 + for offset := 0; offset < len(bytes); { + var t tagAndLength + t, offset, err = parseTagAndLength(bytes, offset) + if err != nil { + return + } + switch t.tag { + case TagIA5String, TagGeneralString, TagT61String, TagUTF8String: + // We pretend that various other string types are + // PRINTABLE STRINGs so that a sequence of them can be + // parsed into a []string. + t.tag = TagPrintableString + case TagGeneralizedTime, TagUTCTime: + // Likewise, both time types are treated the same. + t.tag = TagUTCTime + } + + if t.class != ClassUniversal || t.isCompound != compoundType || t.tag != expectedTag { + err = StructuralError{"sequence tag mismatch"} + return + } + if invalidLength(offset, t.length, len(bytes)) { + err = SyntaxError{"truncated sequence"} + return + } + offset += t.length + numElements++ + } + ret = reflect.MakeSlice(sliceType, numElements, numElements) + params := fieldParameters{} + offset := 0 + for i := 0; i < numElements; i++ { + offset, err = parseField(ret.Index(i), bytes, offset, params) + if err != nil { + return + } + } + return +} + +var ( + bitStringType = reflect.TypeOf(BitString{}) + objectIdentifierType = reflect.TypeOf(ObjectIdentifier{}) + enumeratedType = reflect.TypeOf(Enumerated(0)) + flagType = reflect.TypeOf(Flag(false)) + timeType = reflect.TypeOf(time.Time{}) + rawValueType = reflect.TypeOf(RawValue{}) + rawContentsType = reflect.TypeOf(RawContent(nil)) + bigIntType = reflect.TypeOf(new(big.Int)) +) + +// invalidLength returns true iff offset + length > sliceLength, or if the +// addition would overflow. +func invalidLength(offset, length, sliceLength int) bool { + return offset+length < offset || offset+length > sliceLength +} + +// parseField is the main parsing function. Given a byte slice and an offset +// into the array, it will try to parse a suitable ASN.1 value out and store it +// in the given Value. +func parseField(v reflect.Value, bytes []byte, initOffset int, params fieldParameters) (offset int, err error) { + offset = initOffset + fieldType := v.Type() + + // If we have run out of data, it may be that there are optional elements at the end. + if offset == len(bytes) { + if !setDefaultValue(v, params) { + err = SyntaxError{"sequence truncated"} + } + return + } + + // Deal with raw values. + if fieldType == rawValueType { + var t tagAndLength + t, offset, err = parseTagAndLength(bytes, offset) + if err != nil { + return + } + if invalidLength(offset, t.length, len(bytes)) { + err = SyntaxError{"data truncated"} + return + } + result := RawValue{t.class, t.tag, t.isCompound, bytes[offset : offset+t.length], bytes[initOffset : offset+t.length]} + offset += t.length + v.Set(reflect.ValueOf(result)) + return + } + + // Deal with the ANY type. + if ifaceType := fieldType; ifaceType.Kind() == reflect.Interface && ifaceType.NumMethod() == 0 { + var t tagAndLength + t, offset, err = parseTagAndLength(bytes, offset) + if err != nil { + return + } + if invalidLength(offset, t.length, len(bytes)) { + err = SyntaxError{"data truncated"} + return + } + var result interface{} + if !t.isCompound && t.class == ClassUniversal { + innerBytes := bytes[offset : offset+t.length] + switch t.tag { + case TagPrintableString: + result, err = parsePrintableString(innerBytes) + case TagIA5String: + result, err = parseIA5String(innerBytes) + // jtasn1 addition of following case + case TagGeneralString: + result, err = parseIA5String(innerBytes) + case TagT61String: + result, err = parseT61String(innerBytes) + case TagUTF8String: + result, err = parseUTF8String(innerBytes) + case TagInteger: + result, err = parseInt64(innerBytes) + case TagBitString: + result, err = parseBitString(innerBytes) + case TagOID: + result, err = parseObjectIdentifier(innerBytes) + case TagUTCTime: + result, err = parseUTCTime(innerBytes) + case TagGeneralizedTime: + result, err = parseGeneralizedTime(innerBytes) + case TagOctetString: + result = innerBytes + default: + // If we don't know how to handle the type, we just leave Value as nil. + } + } + offset += t.length + if err != nil { + return + } + if result != nil { + v.Set(reflect.ValueOf(result)) + } + return + } + universalTag, compoundType, ok1 := getUniversalType(fieldType) + if !ok1 { + err = StructuralError{fmt.Sprintf("unknown Go type: %v", fieldType)} + return + } + + t, offset, err := parseTagAndLength(bytes, offset) + if err != nil { + return + } + if params.explicit { + expectedClass := ClassContextSpecific + if params.application { + expectedClass = ClassApplication + } + if offset == len(bytes) { + err = StructuralError{"explicit tag has no child"} + return + } + if t.class == expectedClass && t.tag == *params.tag && (t.length == 0 || t.isCompound) { + if t.length > 0 { + t, offset, err = parseTagAndLength(bytes, offset) + if err != nil { + return + } + } else { + if fieldType != flagType { + err = StructuralError{"zero length explicit tag was not an asn1.Flag"} + return + } + v.SetBool(true) + return + } + } else { + // The tags didn't match, it might be an optional element. + ok := setDefaultValue(v, params) + if ok { + offset = initOffset + } else { + err = StructuralError{"explicitly tagged member didn't match"} + } + return + } + } + + // Special case for strings: all the ASN.1 string types map to the Go + // type string. getUniversalType returns the tag for PrintableString + // when it sees a string, so if we see a different string type on the + // wire, we change the universal type to match. + if universalTag == TagPrintableString { + if t.class == ClassUniversal { + switch t.tag { + case TagIA5String, TagGeneralString, TagT61String, TagUTF8String: + universalTag = t.tag + } + } else if params.stringType != 0 { + universalTag = params.stringType + } + } + + // Special case for time: UTCTime and GeneralizedTime both map to the + // Go type time.Time. + if universalTag == TagUTCTime && t.tag == TagGeneralizedTime && t.class == ClassUniversal { + universalTag = TagGeneralizedTime + } + + if params.set { + universalTag = TagSet + } + + expectedClass := ClassUniversal + expectedTag := universalTag + + if !params.explicit && params.tag != nil { + expectedClass = ClassContextSpecific + expectedTag = *params.tag + } + + if !params.explicit && params.application && params.tag != nil { + expectedClass = ClassApplication + expectedTag = *params.tag + } + + // We have unwrapped any explicit tagging at this point. + if t.class != expectedClass || t.tag != expectedTag || t.isCompound != compoundType { + // Tags don't match. Again, it could be an optional element. + ok := setDefaultValue(v, params) + if ok { + offset = initOffset + } else { + err = StructuralError{fmt.Sprintf("tags don't match (%d vs %+v) %+v %s @%d", expectedTag, t, params, fieldType.Name(), offset)} + } + return + } + if invalidLength(offset, t.length, len(bytes)) { + err = SyntaxError{"data truncated"} + return + } + innerBytes := bytes[offset : offset+t.length] + offset += t.length + + // We deal with the structures defined in this package first. + switch fieldType { + case objectIdentifierType: + newSlice, err1 := parseObjectIdentifier(innerBytes) + v.Set(reflect.MakeSlice(v.Type(), len(newSlice), len(newSlice))) + if err1 == nil { + reflect.Copy(v, reflect.ValueOf(newSlice)) + } + err = err1 + return + case bitStringType: + bs, err1 := parseBitString(innerBytes) + if err1 == nil { + v.Set(reflect.ValueOf(bs)) + } + err = err1 + return + case timeType: + var time time.Time + var err1 error + if universalTag == TagUTCTime { + time, err1 = parseUTCTime(innerBytes) + } else { + time, err1 = parseGeneralizedTime(innerBytes) + } + if err1 == nil { + v.Set(reflect.ValueOf(time)) + } + err = err1 + return + case enumeratedType: + parsedInt, err1 := parseInt32(innerBytes) + if err1 == nil { + v.SetInt(int64(parsedInt)) + } + err = err1 + return + case flagType: + v.SetBool(true) + return + case bigIntType: + parsedInt, err1 := parseBigInt(innerBytes) + if err1 == nil { + v.Set(reflect.ValueOf(parsedInt)) + } + err = err1 + return + } + switch val := v; val.Kind() { + case reflect.Bool: + parsedBool, err1 := parseBool(innerBytes) + if err1 == nil { + val.SetBool(parsedBool) + } + err = err1 + return + case reflect.Int, reflect.Int32, reflect.Int64: + if val.Type().Size() == 4 { + parsedInt, err1 := parseInt32(innerBytes) + if err1 == nil { + val.SetInt(int64(parsedInt)) + } + err = err1 + } else { + parsedInt, err1 := parseInt64(innerBytes) + if err1 == nil { + val.SetInt(parsedInt) + } + err = err1 + } + return + // TODO(dfc) Add support for the remaining integer types + case reflect.Struct: + structType := fieldType + + if structType.NumField() > 0 && + structType.Field(0).Type == rawContentsType { + bytes := bytes[initOffset:offset] + val.Field(0).Set(reflect.ValueOf(RawContent(bytes))) + } + + innerOffset := 0 + for i := 0; i < structType.NumField(); i++ { + field := structType.Field(i) + if i == 0 && field.Type == rawContentsType { + continue + } + innerOffset, err = parseField(val.Field(i), innerBytes, innerOffset, parseFieldParameters(field.Tag.Get("asn1"))) + if err != nil { + return + } + } + // We allow extra bytes at the end of the SEQUENCE because + // adding elements to the end has been used in X.509 as the + // version numbers have increased. + return + case reflect.Slice: + sliceType := fieldType + if sliceType.Elem().Kind() == reflect.Uint8 { + val.Set(reflect.MakeSlice(sliceType, len(innerBytes), len(innerBytes))) + reflect.Copy(val, reflect.ValueOf(innerBytes)) + return + } + newSlice, err1 := parseSequenceOf(innerBytes, sliceType, sliceType.Elem()) + if err1 == nil { + val.Set(newSlice) + } + err = err1 + return + case reflect.String: + var v string + switch universalTag { + case TagPrintableString: + v, err = parsePrintableString(innerBytes) + case TagIA5String: + v, err = parseIA5String(innerBytes) + case TagT61String: + v, err = parseT61String(innerBytes) + case TagUTF8String: + v, err = parseUTF8String(innerBytes) + case TagGeneralString: + // GeneralString is specified in ISO-2022/ECMA-35, + // A brief review suggests that it includes structures + // that allow the encoding to change midstring and + // such. We give up and pass it as an 8-bit string. + v, err = parseT61String(innerBytes) + default: + err = SyntaxError{fmt.Sprintf("internal error: unknown string type %d", universalTag)} + } + if err == nil { + val.SetString(v) + } + return + } + err = StructuralError{"unsupported: " + v.Type().String()} + return +} + +// canHaveDefaultValue reports whether k is a Kind that we will set a default +// value for. (A signed integer, essentially.) +func canHaveDefaultValue(k reflect.Kind) bool { + switch k { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return true + } + + return false +} + +// setDefaultValue is used to install a default value, from a tag string, into +// a Value. It is successful if the field was optional, even if a default value +// wasn't provided or it failed to install it into the Value. +func setDefaultValue(v reflect.Value, params fieldParameters) (ok bool) { + if !params.optional { + return + } + ok = true + if params.defaultValue == nil { + return + } + if canHaveDefaultValue(v.Kind()) { + v.SetInt(*params.defaultValue) + } + return +} + +// Unmarshal parses the DER-encoded ASN.1 data structure b +// and uses the reflect package to fill in an arbitrary value pointed at by val. +// Because Unmarshal uses the reflect package, the structs +// being written to must use upper case field names. +// +// An ASN.1 INTEGER can be written to an int, int32, int64, +// or *big.Int (from the math/big package). +// If the encoded value does not fit in the Go type, +// Unmarshal returns a parse error. +// +// An ASN.1 BIT STRING can be written to a BitString. +// +// An ASN.1 OCTET STRING can be written to a []byte. +// +// An ASN.1 OBJECT IDENTIFIER can be written to an +// ObjectIdentifier. +// +// An ASN.1 ENUMERATED can be written to an Enumerated. +// +// An ASN.1 UTCTIME or GENERALIZEDTIME can be written to a time.Time. +// +// An ASN.1 PrintableString or IA5String can be written to a string. +// +// Any of the above ASN.1 values can be written to an interface{}. +// The value stored in the interface has the corresponding Go type. +// For integers, that type is int64. +// +// An ASN.1 SEQUENCE OF x or SET OF x can be written +// to a slice if an x can be written to the slice's element type. +// +// An ASN.1 SEQUENCE or SET can be written to a struct +// if each of the elements in the sequence can be +// written to the corresponding element in the struct. +// +// The following tags on struct fields have special meaning to Unmarshal: +// +// application specifies that a APPLICATION tag is used +// default:x sets the default value for optional integer fields +// explicit specifies that an additional, explicit tag wraps the implicit one +// optional marks the field as ASN.1 OPTIONAL +// set causes a SET, rather than a SEQUENCE type to be expected +// tag:x specifies the ASN.1 tag number; implies ASN.1 CONTEXT SPECIFIC +// +// If the type of the first field of a structure is RawContent then the raw +// ASN1 contents of the struct will be stored in it. +// +// If the type name of a slice element ends with "SET" then it's treated as if +// the "set" tag was set on it. This can be used with nested slices where a +// struct tag cannot be given. +// +// Other ASN.1 types are not supported; if it encounters them, +// Unmarshal returns a parse error. +func Unmarshal(b []byte, val interface{}) (rest []byte, err error) { + return UnmarshalWithParams(b, val, "") +} + +// UnmarshalWithParams allows field parameters to be specified for the +// top-level element. The form of the params is the same as the field tags. +func UnmarshalWithParams(b []byte, val interface{}, params string) (rest []byte, err error) { + v := reflect.ValueOf(val).Elem() + offset, err := parseField(v, b, 0, parseFieldParameters(params)) + if err != nil { + return nil, err + } + return b[offset:], nil +} diff --git a/vendor/github.com/jcmturner/gofork/encoding/asn1/common.go b/vendor/github.com/jcmturner/gofork/encoding/asn1/common.go new file mode 100644 index 00000000000..7a9da49f396 --- /dev/null +++ b/vendor/github.com/jcmturner/gofork/encoding/asn1/common.go @@ -0,0 +1,173 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package asn1 + +import ( + "reflect" + "strconv" + "strings" +) + +// ASN.1 objects have metadata preceding them: +// the tag: the type of the object +// a flag denoting if this object is compound or not +// the class type: the namespace of the tag +// the length of the object, in bytes + +// Here are some standard tags and classes + +// ASN.1 tags represent the type of the following object. +const ( + TagBoolean = 1 + TagInteger = 2 + TagBitString = 3 + TagOctetString = 4 + TagOID = 6 + TagEnum = 10 + TagUTF8String = 12 + TagSequence = 16 + TagSet = 17 + TagPrintableString = 19 + TagT61String = 20 + TagIA5String = 22 + TagUTCTime = 23 + TagGeneralizedTime = 24 + TagGeneralString = 27 +) + +// ASN.1 class types represent the namespace of the tag. +const ( + ClassUniversal = 0 + ClassApplication = 1 + ClassContextSpecific = 2 + ClassPrivate = 3 +) + +type tagAndLength struct { + class, tag, length int + isCompound bool +} + +// ASN.1 has IMPLICIT and EXPLICIT tags, which can be translated as "instead +// of" and "in addition to". When not specified, every primitive type has a +// default tag in the UNIVERSAL class. +// +// For example: a BIT STRING is tagged [UNIVERSAL 3] by default (although ASN.1 +// doesn't actually have a UNIVERSAL keyword). However, by saying [IMPLICIT +// CONTEXT-SPECIFIC 42], that means that the tag is replaced by another. +// +// On the other hand, if it said [EXPLICIT CONTEXT-SPECIFIC 10], then an +// /additional/ tag would wrap the default tag. This explicit tag will have the +// compound flag set. +// +// (This is used in order to remove ambiguity with optional elements.) +// +// You can layer EXPLICIT and IMPLICIT tags to an arbitrary depth, however we +// don't support that here. We support a single layer of EXPLICIT or IMPLICIT +// tagging with tag strings on the fields of a structure. + +// fieldParameters is the parsed representation of tag string from a structure field. +type fieldParameters struct { + optional bool // true iff the field is OPTIONAL + explicit bool // true iff an EXPLICIT tag is in use. + application bool // true iff an APPLICATION tag is in use. + defaultValue *int64 // a default value for INTEGER typed fields (maybe nil). + tag *int // the EXPLICIT or IMPLICIT tag (maybe nil). + stringType int // the string tag to use when marshaling. + timeType int // the time tag to use when marshaling. + set bool // true iff this should be encoded as a SET + omitEmpty bool // true iff this should be omitted if empty when marshaling. + + // Invariants: + // if explicit is set, tag is non-nil. +} + +// Given a tag string with the format specified in the package comment, +// parseFieldParameters will parse it into a fieldParameters structure, +// ignoring unknown parts of the string. +func parseFieldParameters(str string) (ret fieldParameters) { + for _, part := range strings.Split(str, ",") { + switch { + case part == "optional": + ret.optional = true + case part == "explicit": + ret.explicit = true + if ret.tag == nil { + ret.tag = new(int) + } + case part == "generalized": + ret.timeType = TagGeneralizedTime + case part == "utc": + ret.timeType = TagUTCTime + case part == "ia5": + ret.stringType = TagIA5String + // jtasn1 case below added + case part == "generalstring": + ret.stringType = TagGeneralString + case part == "printable": + ret.stringType = TagPrintableString + case part == "utf8": + ret.stringType = TagUTF8String + case strings.HasPrefix(part, "default:"): + i, err := strconv.ParseInt(part[8:], 10, 64) + if err == nil { + ret.defaultValue = new(int64) + *ret.defaultValue = i + } + case strings.HasPrefix(part, "tag:"): + i, err := strconv.Atoi(part[4:]) + if err == nil { + ret.tag = new(int) + *ret.tag = i + } + case part == "set": + ret.set = true + case part == "application": + ret.application = true + if ret.tag == nil { + ret.tag = new(int) + } + case part == "omitempty": + ret.omitEmpty = true + } + } + return +} + +// Given a reflected Go type, getUniversalType returns the default tag number +// and expected compound flag. +func getUniversalType(t reflect.Type) (tagNumber int, isCompound, ok bool) { + switch t { + case objectIdentifierType: + return TagOID, false, true + case bitStringType: + return TagBitString, false, true + case timeType: + return TagUTCTime, false, true + case enumeratedType: + return TagEnum, false, true + case bigIntType: + return TagInteger, false, true + } + switch t.Kind() { + case reflect.Bool: + return TagBoolean, false, true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return TagInteger, false, true + case reflect.Struct: + return TagSequence, true, true + case reflect.Slice: + if t.Elem().Kind() == reflect.Uint8 { + return TagOctetString, false, true + } + if strings.HasSuffix(t.Name(), "SET") { + return TagSet, true, true + } + return TagSequence, true, true + case reflect.String: + return TagPrintableString, false, true + } + return 0, false, false +} diff --git a/vendor/github.com/jcmturner/gofork/encoding/asn1/marshal.go b/vendor/github.com/jcmturner/gofork/encoding/asn1/marshal.go new file mode 100644 index 00000000000..f52eee9d261 --- /dev/null +++ b/vendor/github.com/jcmturner/gofork/encoding/asn1/marshal.go @@ -0,0 +1,659 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package asn1 + +import ( + "bytes" + "errors" + "fmt" + "io" + "math/big" + "reflect" + "time" + "unicode/utf8" +) + +// A forkableWriter is an in-memory buffer that can be +// 'forked' to create new forkableWriters that bracket the +// original. After +// pre, post := w.fork() +// the overall sequence of bytes represented is logically w+pre+post. +type forkableWriter struct { + *bytes.Buffer + pre, post *forkableWriter +} + +func newForkableWriter() *forkableWriter { + return &forkableWriter{new(bytes.Buffer), nil, nil} +} + +func (f *forkableWriter) fork() (pre, post *forkableWriter) { + if f.pre != nil || f.post != nil { + panic("have already forked") + } + f.pre = newForkableWriter() + f.post = newForkableWriter() + return f.pre, f.post +} + +func (f *forkableWriter) Len() (l int) { + l += f.Buffer.Len() + if f.pre != nil { + l += f.pre.Len() + } + if f.post != nil { + l += f.post.Len() + } + return +} + +func (f *forkableWriter) writeTo(out io.Writer) (n int, err error) { + n, err = out.Write(f.Bytes()) + if err != nil { + return + } + + var nn int + + if f.pre != nil { + nn, err = f.pre.writeTo(out) + n += nn + if err != nil { + return + } + } + + if f.post != nil { + nn, err = f.post.writeTo(out) + n += nn + } + return +} + +func marshalBase128Int(out *forkableWriter, n int64) (err error) { + if n == 0 { + err = out.WriteByte(0) + return + } + + l := 0 + for i := n; i > 0; i >>= 7 { + l++ + } + + for i := l - 1; i >= 0; i-- { + o := byte(n >> uint(i*7)) + o &= 0x7f + if i != 0 { + o |= 0x80 + } + err = out.WriteByte(o) + if err != nil { + return + } + } + + return nil +} + +func marshalInt64(out *forkableWriter, i int64) (err error) { + n := int64Length(i) + + for ; n > 0; n-- { + err = out.WriteByte(byte(i >> uint((n-1)*8))) + if err != nil { + return + } + } + + return nil +} + +func int64Length(i int64) (numBytes int) { + numBytes = 1 + + for i > 127 { + numBytes++ + i >>= 8 + } + + for i < -128 { + numBytes++ + i >>= 8 + } + + return +} + +func marshalBigInt(out *forkableWriter, n *big.Int) (err error) { + if n.Sign() < 0 { + // A negative number has to be converted to two's-complement + // form. So we'll subtract 1 and invert. If the + // most-significant-bit isn't set then we'll need to pad the + // beginning with 0xff in order to keep the number negative. + nMinus1 := new(big.Int).Neg(n) + nMinus1.Sub(nMinus1, bigOne) + bytes := nMinus1.Bytes() + for i := range bytes { + bytes[i] ^= 0xff + } + if len(bytes) == 0 || bytes[0]&0x80 == 0 { + err = out.WriteByte(0xff) + if err != nil { + return + } + } + _, err = out.Write(bytes) + } else if n.Sign() == 0 { + // Zero is written as a single 0 zero rather than no bytes. + err = out.WriteByte(0x00) + } else { + bytes := n.Bytes() + if len(bytes) > 0 && bytes[0]&0x80 != 0 { + // We'll have to pad this with 0x00 in order to stop it + // looking like a negative number. + err = out.WriteByte(0) + if err != nil { + return + } + } + _, err = out.Write(bytes) + } + return +} + +func marshalLength(out *forkableWriter, i int) (err error) { + n := lengthLength(i) + + for ; n > 0; n-- { + err = out.WriteByte(byte(i >> uint((n-1)*8))) + if err != nil { + return + } + } + + return nil +} + +func lengthLength(i int) (numBytes int) { + numBytes = 1 + for i > 255 { + numBytes++ + i >>= 8 + } + return +} + +func marshalTagAndLength(out *forkableWriter, t tagAndLength) (err error) { + b := uint8(t.class) << 6 + if t.isCompound { + b |= 0x20 + } + if t.tag >= 31 { + b |= 0x1f + err = out.WriteByte(b) + if err != nil { + return + } + err = marshalBase128Int(out, int64(t.tag)) + if err != nil { + return + } + } else { + b |= uint8(t.tag) + err = out.WriteByte(b) + if err != nil { + return + } + } + + if t.length >= 128 { + l := lengthLength(t.length) + err = out.WriteByte(0x80 | byte(l)) + if err != nil { + return + } + err = marshalLength(out, t.length) + if err != nil { + return + } + } else { + err = out.WriteByte(byte(t.length)) + if err != nil { + return + } + } + + return nil +} + +func marshalBitString(out *forkableWriter, b BitString) (err error) { + paddingBits := byte((8 - b.BitLength%8) % 8) + err = out.WriteByte(paddingBits) + if err != nil { + return + } + _, err = out.Write(b.Bytes) + return +} + +func marshalObjectIdentifier(out *forkableWriter, oid []int) (err error) { + if len(oid) < 2 || oid[0] > 2 || (oid[0] < 2 && oid[1] >= 40) { + return StructuralError{"invalid object identifier"} + } + + err = marshalBase128Int(out, int64(oid[0]*40+oid[1])) + if err != nil { + return + } + for i := 2; i < len(oid); i++ { + err = marshalBase128Int(out, int64(oid[i])) + if err != nil { + return + } + } + + return +} + +func marshalPrintableString(out *forkableWriter, s string) (err error) { + b := []byte(s) + for _, c := range b { + if !isPrintable(c) { + return StructuralError{"PrintableString contains invalid character"} + } + } + + _, err = out.Write(b) + return +} + +func marshalIA5String(out *forkableWriter, s string) (err error) { + b := []byte(s) + for _, c := range b { + if c > 127 { + return StructuralError{"IA5String contains invalid character"} + } + } + + _, err = out.Write(b) + return +} + +func marshalUTF8String(out *forkableWriter, s string) (err error) { + _, err = out.Write([]byte(s)) + return +} + +func marshalTwoDigits(out *forkableWriter, v int) (err error) { + err = out.WriteByte(byte('0' + (v/10)%10)) + if err != nil { + return + } + return out.WriteByte(byte('0' + v%10)) +} + +func marshalFourDigits(out *forkableWriter, v int) (err error) { + var bytes [4]byte + for i := range bytes { + bytes[3-i] = '0' + byte(v%10) + v /= 10 + } + _, err = out.Write(bytes[:]) + return +} + +func outsideUTCRange(t time.Time) bool { + year := t.Year() + return year < 1950 || year >= 2050 +} + +func marshalUTCTime(out *forkableWriter, t time.Time) (err error) { + year := t.Year() + + switch { + case 1950 <= year && year < 2000: + err = marshalTwoDigits(out, year-1900) + case 2000 <= year && year < 2050: + err = marshalTwoDigits(out, year-2000) + default: + return StructuralError{"cannot represent time as UTCTime"} + } + if err != nil { + return + } + + return marshalTimeCommon(out, t) +} + +func marshalGeneralizedTime(out *forkableWriter, t time.Time) (err error) { + year := t.Year() + if year < 0 || year > 9999 { + return StructuralError{"cannot represent time as GeneralizedTime"} + } + if err = marshalFourDigits(out, year); err != nil { + return + } + + return marshalTimeCommon(out, t) +} + +func marshalTimeCommon(out *forkableWriter, t time.Time) (err error) { + _, month, day := t.Date() + + err = marshalTwoDigits(out, int(month)) + if err != nil { + return + } + + err = marshalTwoDigits(out, day) + if err != nil { + return + } + + hour, min, sec := t.Clock() + + err = marshalTwoDigits(out, hour) + if err != nil { + return + } + + err = marshalTwoDigits(out, min) + if err != nil { + return + } + + err = marshalTwoDigits(out, sec) + if err != nil { + return + } + + _, offset := t.Zone() + + switch { + case offset/60 == 0: + err = out.WriteByte('Z') + return + case offset > 0: + err = out.WriteByte('+') + case offset < 0: + err = out.WriteByte('-') + } + + if err != nil { + return + } + + offsetMinutes := offset / 60 + if offsetMinutes < 0 { + offsetMinutes = -offsetMinutes + } + + err = marshalTwoDigits(out, offsetMinutes/60) + if err != nil { + return + } + + err = marshalTwoDigits(out, offsetMinutes%60) + return +} + +func stripTagAndLength(in []byte) []byte { + _, offset, err := parseTagAndLength(in, 0) + if err != nil { + return in + } + return in[offset:] +} + +func marshalBody(out *forkableWriter, value reflect.Value, params fieldParameters) (err error) { + switch value.Type() { + case flagType: + return nil + case timeType: + t := value.Interface().(time.Time) + if params.timeType == TagGeneralizedTime || outsideUTCRange(t) { + return marshalGeneralizedTime(out, t) + } else { + return marshalUTCTime(out, t) + } + case bitStringType: + return marshalBitString(out, value.Interface().(BitString)) + case objectIdentifierType: + return marshalObjectIdentifier(out, value.Interface().(ObjectIdentifier)) + case bigIntType: + return marshalBigInt(out, value.Interface().(*big.Int)) + } + + switch v := value; v.Kind() { + case reflect.Bool: + if v.Bool() { + return out.WriteByte(255) + } else { + return out.WriteByte(0) + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return marshalInt64(out, v.Int()) + case reflect.Struct: + t := v.Type() + + startingField := 0 + + // If the first element of the structure is a non-empty + // RawContents, then we don't bother serializing the rest. + if t.NumField() > 0 && t.Field(0).Type == rawContentsType { + s := v.Field(0) + if s.Len() > 0 { + bytes := make([]byte, s.Len()) + for i := 0; i < s.Len(); i++ { + bytes[i] = uint8(s.Index(i).Uint()) + } + /* The RawContents will contain the tag and + * length fields but we'll also be writing + * those ourselves, so we strip them out of + * bytes */ + _, err = out.Write(stripTagAndLength(bytes)) + return + } else { + startingField = 1 + } + } + + for i := startingField; i < t.NumField(); i++ { + var pre *forkableWriter + pre, out = out.fork() + err = marshalField(pre, v.Field(i), parseFieldParameters(t.Field(i).Tag.Get("asn1"))) + if err != nil { + return + } + } + return + case reflect.Slice: + sliceType := v.Type() + if sliceType.Elem().Kind() == reflect.Uint8 { + bytes := make([]byte, v.Len()) + for i := 0; i < v.Len(); i++ { + bytes[i] = uint8(v.Index(i).Uint()) + } + _, err = out.Write(bytes) + return + } + + // jtasn1 Pass on the tags to the members but need to unset explicit switch and implicit value + //var fp fieldParameters + params.explicit = false + params.tag = nil + for i := 0; i < v.Len(); i++ { + var pre *forkableWriter + pre, out = out.fork() + err = marshalField(pre, v.Index(i), params) + if err != nil { + return + } + } + return + case reflect.String: + switch params.stringType { + case TagIA5String: + return marshalIA5String(out, v.String()) + case TagPrintableString: + return marshalPrintableString(out, v.String()) + default: + return marshalUTF8String(out, v.String()) + } + } + + return StructuralError{"unknown Go type"} +} + +func marshalField(out *forkableWriter, v reflect.Value, params fieldParameters) (err error) { + if !v.IsValid() { + return fmt.Errorf("asn1: cannot marshal nil value") + } + // If the field is an interface{} then recurse into it. + if v.Kind() == reflect.Interface && v.Type().NumMethod() == 0 { + return marshalField(out, v.Elem(), params) + } + + if v.Kind() == reflect.Slice && v.Len() == 0 && params.omitEmpty { + return + } + + if params.optional && params.defaultValue != nil && canHaveDefaultValue(v.Kind()) { + defaultValue := reflect.New(v.Type()).Elem() + defaultValue.SetInt(*params.defaultValue) + + if reflect.DeepEqual(v.Interface(), defaultValue.Interface()) { + return + } + } + + // If no default value is given then the zero value for the type is + // assumed to be the default value. This isn't obviously the correct + // behaviour, but it's what Go has traditionally done. + if params.optional && params.defaultValue == nil { + if reflect.DeepEqual(v.Interface(), reflect.Zero(v.Type()).Interface()) { + return + } + } + + if v.Type() == rawValueType { + rv := v.Interface().(RawValue) + if len(rv.FullBytes) != 0 { + _, err = out.Write(rv.FullBytes) + } else { + err = marshalTagAndLength(out, tagAndLength{rv.Class, rv.Tag, len(rv.Bytes), rv.IsCompound}) + if err != nil { + return + } + _, err = out.Write(rv.Bytes) + } + return + } + + tag, isCompound, ok := getUniversalType(v.Type()) + if !ok { + err = StructuralError{fmt.Sprintf("unknown Go type: %v", v.Type())} + return + } + class := ClassUniversal + + if params.timeType != 0 && tag != TagUTCTime { + return StructuralError{"explicit time type given to non-time member"} + } + + // jtasn1 updated to allow slices of strings + if params.stringType != 0 && !(tag == TagPrintableString || (v.Kind() == reflect.Slice && tag == 16 && v.Type().Elem().Kind() == reflect.String)) { + return StructuralError{"explicit string type given to non-string member"} + } + + switch tag { + case TagPrintableString: + if params.stringType == 0 { + // This is a string without an explicit string type. We'll use + // a PrintableString if the character set in the string is + // sufficiently limited, otherwise we'll use a UTF8String. + for _, r := range v.String() { + if r >= utf8.RuneSelf || !isPrintable(byte(r)) { + if !utf8.ValidString(v.String()) { + return errors.New("asn1: string not valid UTF-8") + } + tag = TagUTF8String + break + } + } + } else { + tag = params.stringType + } + case TagUTCTime: + if params.timeType == TagGeneralizedTime || outsideUTCRange(v.Interface().(time.Time)) { + tag = TagGeneralizedTime + } + } + + if params.set { + if tag != TagSequence { + return StructuralError{"non sequence tagged as set"} + } + tag = TagSet + } + + tags, body := out.fork() + + err = marshalBody(body, v, params) + if err != nil { + return + } + + bodyLen := body.Len() + + var explicitTag *forkableWriter + if params.explicit { + explicitTag, tags = tags.fork() + } + + if !params.explicit && params.tag != nil { + // implicit tag. + tag = *params.tag + class = ClassContextSpecific + } + + err = marshalTagAndLength(tags, tagAndLength{class, tag, bodyLen, isCompound}) + if err != nil { + return + } + + if params.explicit { + err = marshalTagAndLength(explicitTag, tagAndLength{ + class: ClassContextSpecific, + tag: *params.tag, + length: bodyLen + tags.Len(), + isCompound: true, + }) + } + + return err +} + +// Marshal returns the ASN.1 encoding of val. +// +// In addition to the struct tags recognised by Unmarshal, the following can be +// used: +// +// ia5: causes strings to be marshaled as ASN.1, IA5 strings +// omitempty: causes empty slices to be skipped +// printable: causes strings to be marshaled as ASN.1, PrintableString strings. +// utf8: causes strings to be marshaled as ASN.1, UTF8 strings +func Marshal(val interface{}) ([]byte, error) { + var out bytes.Buffer + v := reflect.ValueOf(val) + f := newForkableWriter() + err := marshalField(f, v, fieldParameters{}) + if err != nil { + return nil, err + } + _, err = f.writeTo(&out) + return out.Bytes(), err +} diff --git a/vendor/github.com/jcmturner/gofork/x/crypto/pbkdf2/pbkdf2.go b/vendor/github.com/jcmturner/gofork/x/crypto/pbkdf2/pbkdf2.go new file mode 100644 index 00000000000..75d418763db --- /dev/null +++ b/vendor/github.com/jcmturner/gofork/x/crypto/pbkdf2/pbkdf2.go @@ -0,0 +1,98 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package pbkdf2 implements the key derivation function PBKDF2 as defined in RFC +2898 / PKCS #5 v2.0. + +A key derivation function is useful when encrypting data based on a password +or any other not-fully-random data. It uses a pseudorandom function to derive +a secure encryption key based on the password. + +While v2.0 of the standard defines only one pseudorandom function to use, +HMAC-SHA1, the drafted v2.1 specification allows use of all five FIPS Approved +Hash Functions SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for HMAC. To +choose, you can pass the `New` functions from the different SHA packages to +pbkdf2.Key. +*/ +package pbkdf2 + +import ( + "crypto/hmac" + "hash" +) + +// Key derives a key from the password, salt and iteration count, returning a +// []byte of length keylen that can be used as cryptographic key. The key is +// derived based on the method described as PBKDF2 with the HMAC variant using +// the supplied hash function. +// +// For example, to use a HMAC-SHA-1 based PBKDF2 key derivation function, you +// can get a derived key for e.g. AES-256 (which needs a 32-byte key) by +// doing: +// +// dk := pbkdf2.Key([]byte("some password"), salt, 4096, 32, sha1.New) +// +// Remember to get a good random salt. At least 8 bytes is recommended by the +// RFC. +// +// Using a higher iteration count will increase the cost of an exhaustive +// search but will also make derivation proportionally slower. +func Key(password, salt []byte, iter, keyLen int, h func() hash.Hash) []byte { + return Key64(password, salt, int64(iter), int64(keyLen), h) +} + +// Key64 derives a key from the password, salt and iteration count, returning a +// []byte of length keylen that can be used as cryptographic key. Key64 uses +// int64 for the iteration count and key length to allow larger values. +// The key is derived based on the method described as PBKDF2 with the HMAC +// variant using the supplied hash function. +// +// For example, to use a HMAC-SHA-1 based PBKDF2 key derivation function, you +// can get a derived key for e.g. AES-256 (which needs a 32-byte key) by +// doing: +// +// dk := pbkdf2.Key([]byte("some password"), salt, 4096, 32, sha1.New) +// +// Remember to get a good random salt. At least 8 bytes is recommended by the +// RFC. +// +// Using a higher iteration count will increase the cost of an exhaustive +// search but will also make derivation proportionally slower. +func Key64(password, salt []byte, iter, keyLen int64, h func() hash.Hash) []byte { + prf := hmac.New(h, password) + hashLen := int64(prf.Size()) + numBlocks := (keyLen + hashLen - 1) / hashLen + + var buf [4]byte + dk := make([]byte, 0, numBlocks*hashLen) + U := make([]byte, hashLen) + for block := int64(1); block <= numBlocks; block++ { + // N.B.: || means concatenation, ^ means XOR + // for each block T_i = U_1 ^ U_2 ^ ... ^ U_iter + // U_1 = PRF(password, salt || uint(i)) + prf.Reset() + prf.Write(salt) + buf[0] = byte(block >> 24) + buf[1] = byte(block >> 16) + buf[2] = byte(block >> 8) + buf[3] = byte(block) + prf.Write(buf[:4]) + dk = prf.Sum(dk) + T := dk[int64(len(dk))-hashLen:] + copy(U, T) + + // U_n = PRF(password, U_(n-1)) + for n := int64(2); n <= iter; n++ { + prf.Reset() + prf.Write(U) + U = U[:0] + U = prf.Sum(U) + for x := range U { + T[x] ^= U[x] + } + } + } + return dk[:keyLen] +} diff --git a/vendor/github.com/jmespath/go-jmespath/.travis.yml b/vendor/github.com/jmespath/go-jmespath/.travis.yml index 730c7fa51be..c56f37c0c94 100644 --- a/vendor/github.com/jmespath/go-jmespath/.travis.yml +++ b/vendor/github.com/jmespath/go-jmespath/.travis.yml @@ -12,6 +12,17 @@ go: - 1.11.x - 1.12.x - 1.13.x + - 1.14.x + - 1.15.x + - tip -install: go get -v -t ./... -script: make test +allow_failures: + - go: tip + +script: make build + +matrix: + include: + - language: go + go: 1.15.x + script: make test diff --git a/vendor/github.com/jmespath/go-jmespath/Makefile b/vendor/github.com/jmespath/go-jmespath/Makefile index a828d2848f0..fb38ec2760e 100644 --- a/vendor/github.com/jmespath/go-jmespath/Makefile +++ b/vendor/github.com/jmespath/go-jmespath/Makefile @@ -1,6 +1,8 @@ CMD = jpgo +SRC_PKGS=./ ./cmd/... ./fuzz/... + help: @echo "Please use \`make ' where is one of" @echo " test to run all the tests" @@ -9,21 +11,22 @@ help: generate: - go generate ./... + go generate ${SRC_PKGS} build: rm -f $(CMD) - go build ./... + go build ${SRC_PKGS} rm -f cmd/$(CMD)/$(CMD) && cd cmd/$(CMD)/ && go build ./... mv cmd/$(CMD)/$(CMD) . -test: - go test -v ./... +test: test-internal-testify + echo "making tests ${SRC_PKGS}" + go test -v ${SRC_PKGS} check: - go vet ./... - @echo "golint ./..." - @lint=`golint ./...`; \ + go vet ${SRC_PKGS} + @echo "golint ${SRC_PKGS}" + @lint=`golint ${SRC_PKGS}`; \ lint=`echo "$$lint" | grep -v "astnodetype_string.go" | grep -v "toktype_string.go"`; \ echo "$$lint"; \ if [ "$$lint" != "" ]; then exit 1; fi @@ -42,3 +45,7 @@ bench: pprof-cpu: go tool pprof ./go-jmespath.test ./cpu.out + +test-internal-testify: + cd internal/testify && go test ./... + diff --git a/vendor/github.com/jmespath/go-jmespath/go.mod b/vendor/github.com/jmespath/go-jmespath/go.mod index aa1e3f1c9f7..4d448e88b06 100644 --- a/vendor/github.com/jmespath/go-jmespath/go.mod +++ b/vendor/github.com/jmespath/go-jmespath/go.mod @@ -2,4 +2,4 @@ module github.com/jmespath/go-jmespath go 1.14 -require github.com/stretchr/testify v1.5.1 +require github.com/jmespath/go-jmespath/internal/testify v1.5.1 diff --git a/vendor/github.com/jmespath/go-jmespath/go.sum b/vendor/github.com/jmespath/go-jmespath/go.sum index 331fa69822d..d2db411e585 100644 --- a/vendor/github.com/jmespath/go-jmespath/go.sum +++ b/vendor/github.com/jmespath/go-jmespath/go.sum @@ -1,11 +1,11 @@ github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/klauspost/compress/LICENSE b/vendor/github.com/klauspost/compress/LICENSE new file mode 100644 index 00000000000..1eb75ef68e4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2019 Klaus Post. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/klauspost/compress/fse/README.md b/vendor/github.com/klauspost/compress/fse/README.md new file mode 100644 index 00000000000..ea7324da671 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/README.md @@ -0,0 +1,79 @@ +# Finite State Entropy + +This package provides Finite State Entropy encoding and decoding. + +Finite State Entropy (also referenced as [tANS](https://en.wikipedia.org/wiki/Asymmetric_numeral_systems#tANS)) +encoding provides a fast near-optimal symbol encoding/decoding +for byte blocks as implemented in [zstandard](https://github.com/facebook/zstd). + +This can be used for compressing input with a lot of similar input values to the smallest number of bytes. +This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, +but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. + +* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/fse) + +## News + + * Feb 2018: First implementation released. Consider this beta software for now. + +# Usage + +This package provides a low level interface that allows to compress single independent blocks. + +Each block is separate, and there is no built in integrity checks. +This means that the caller should keep track of block sizes and also do checksums if needed. + +Compressing a block is done via the [`Compress`](https://godoc.org/github.com/klauspost/compress/fse#Compress) function. +You must provide input and will receive the output and maybe an error. + +These error values can be returned: + +| Error | Description | +|---------------------|-----------------------------------------------------------------------------| +| `` | Everything ok, output is returned | +| `ErrIncompressible` | Returned when input is judged to be too hard to compress | +| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | +| `(error)` | An internal error occurred. | + +As can be seen above there are errors that will be returned even under normal operation so it is important to handle these. + +To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/fse#Scratch) object +that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same +object can be used for both. + +Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this +you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. + +Decompressing is done by calling the [`Decompress`](https://godoc.org/github.com/klauspost/compress/fse#Decompress) function. +You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back +your input was likely corrupted. + +It is important to note that a successful decoding does *not* mean your output matches your original input. +There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. + +For more detailed usage, see examples in the [godoc documentation](https://godoc.org/github.com/klauspost/compress/fse#pkg-examples). + +# Performance + +A lot of factors are affecting speed. Block sizes and compressibility of the material are primary factors. +All compression functions are currently only running on the calling goroutine so only one core will be used per block. + +The compressor is significantly faster if symbols are kept as small as possible. The highest byte value of the input +is used to reduce some of the processing, so if all your input is above byte value 64 for instance, it may be +beneficial to transpose all your input values down by 64. + +With moderate block sizes around 64k speed are typically 200MB/s per core for compression and +around 300MB/s decompression speed. + +The same hardware typically does Huffman (deflate) encoding at 125MB/s and decompression at 100MB/s. + +# Plans + +At one point, more internals will be exposed to facilitate more "expert" usage of the components. + +A streaming interface is also likely to be implemented. Likely compatible with [FSE stream format](https://github.com/Cyan4973/FiniteStateEntropy/blob/dev/programs/fileio.c#L261). + +# Contributing + +Contributions are always welcome. Be aware that adding public functions will require good justification and breaking +changes will likely not be accepted. If in doubt open an issue before writing the PR. \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/bitreader.go b/vendor/github.com/klauspost/compress/fse/bitreader.go new file mode 100644 index 00000000000..f65eb3909cf --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/bitreader.go @@ -0,0 +1,122 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import ( + "encoding/binary" + "errors" + "io" +) + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReader struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReader) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.bitsRead += 8 - uint8(highBits(uint32(v))) + return nil +} + +// getBits will return n bits. n can be 0. +func (b *bitReader) getBits(n uint8) uint16 { + if n == 0 || b.bitsRead >= 64 { + return 0 + } + return b.getBitsFast(n) +} + +// getBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReader) getBitsFast(n uint8) uint16 { + const regMask = 64 - 1 + v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) + b.bitsRead += n + return v +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReader) fillFast() { + if b.bitsRead < 32 { + return + } + // 2 bounds checks. + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReader) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value = (b.value << 8) | uint64(b.in[b.off-1]) + b.bitsRead -= 8 + b.off-- + } +} + +// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. +func (b *bitReader) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReader) finished() bool { + return b.bitsRead >= 64 && b.off == 0 +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReader) close() error { + // Release reference. + b.in = nil + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/fse/bitwriter.go b/vendor/github.com/klauspost/compress/fse/bitwriter.go new file mode 100644 index 00000000000..43e463611b1 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/bitwriter.go @@ -0,0 +1,168 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import "fmt" + +// bitWriter will write bits. +// First bit will be LSB of the first byte of output. +type bitWriter struct { + bitContainer uint64 + nBits uint8 + out []byte +} + +// bitMask16 is bitmasks. Has extra to avoid bounds check. +var bitMask16 = [32]uint16{ + 0, 1, 3, 7, 0xF, 0x1F, + 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, + 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF} /* up to 16 bits */ + +// addBits16NC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16NC(value uint16, bits uint8) { + b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16ZeroNC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +// This is fastest if bits can be zero. +func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) { + if bits == 0 { + return + } + value <<= (16 - bits) & 15 + value >>= (16 - bits) & 15 + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// flush will flush all pending full bytes. +// There will be at least 56 bits available for writing when this has been called. +// Using flush32 is faster, but leaves less space for writing. +func (b *bitWriter) flush() { + v := b.nBits >> 3 + switch v { + case 0: + case 1: + b.out = append(b.out, + byte(b.bitContainer), + ) + case 2: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + ) + case 3: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + ) + case 4: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + ) + case 5: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + ) + case 6: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + ) + case 7: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + ) + case 8: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + byte(b.bitContainer>>56), + ) + default: + panic(fmt.Errorf("bits (%d) > 64", b.nBits)) + } + b.bitContainer >>= v << 3 + b.nBits &= 7 +} + +// flush32 will flush out, so there are at least 32 bits available for writing. +func (b *bitWriter) flush32() { + if b.nBits < 32 { + return + } + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24)) + b.nBits -= 32 + b.bitContainer >>= 32 +} + +// flushAlign will flush remaining full bytes and align to next byte boundary. +func (b *bitWriter) flushAlign() { + nbBytes := (b.nBits + 7) >> 3 + for i := uint8(0); i < nbBytes; i++ { + b.out = append(b.out, byte(b.bitContainer>>(i*8))) + } + b.nBits = 0 + b.bitContainer = 0 +} + +// close will write the alignment bit and write the final byte(s) +// to the output. +func (b *bitWriter) close() error { + // End mark + b.addBits16Clean(1, 1) + // flush until next byte. + b.flushAlign() + return nil +} + +// reset and continue writing by appending to out. +func (b *bitWriter) reset(out []byte) { + b.bitContainer = 0 + b.nBits = 0 + b.out = out +} diff --git a/vendor/github.com/klauspost/compress/fse/bytereader.go b/vendor/github.com/klauspost/compress/fse/bytereader.go new file mode 100644 index 00000000000..abade2d6052 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/bytereader.go @@ -0,0 +1,47 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +// byteReader provides a byte reader that reads +// little endian values from a byte stream. +// The input stream is manually advanced. +// The reader performs no bounds checks. +type byteReader struct { + b []byte + off int +} + +// init will initialize the reader and set the input. +func (b *byteReader) init(in []byte) { + b.b = in + b.off = 0 +} + +// advance the stream b n bytes. +func (b *byteReader) advance(n uint) { + b.off += int(n) +} + +// Uint32 returns a little endian uint32 starting at current offset. +func (b byteReader) Uint32() uint32 { + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := uint32(b2[3]) + v2 := uint32(b2[2]) + v1 := uint32(b2[1]) + v0 := uint32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// unread returns the unread portion of the input. +func (b byteReader) unread() []byte { + return b.b[b.off:] +} + +// remain will return the number of bytes remaining. +func (b byteReader) remain() int { + return len(b.b) - b.off +} diff --git a/vendor/github.com/klauspost/compress/fse/compress.go b/vendor/github.com/klauspost/compress/fse/compress.go new file mode 100644 index 00000000000..b69237c9b8f --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/compress.go @@ -0,0 +1,684 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import ( + "errors" + "fmt" +) + +// Compress the input bytes. Input must be < 2GB. +// Provide a Scratch buffer to avoid memory allocations. +// Note that the output is also kept in the scratch buffer. +// If input is too hard to compress, ErrIncompressible is returned. +// If input is a single byte value repeated ErrUseRLE is returned. +func Compress(in []byte, s *Scratch) ([]byte, error) { + if len(in) <= 1 { + return nil, ErrIncompressible + } + if len(in) > (2<<30)-1 { + return nil, errors.New("input too big, must be < 2GB") + } + s, err := s.prepare(in) + if err != nil { + return nil, err + } + + // Create histogram, if none was provided. + maxCount := s.maxCount + if maxCount == 0 { + maxCount = s.countSimple(in) + } + // Reset for next run. + s.clearCount = true + s.maxCount = 0 + if maxCount == len(in) { + // One symbol, use RLE + return nil, ErrUseRLE + } + if maxCount == 1 || maxCount < (len(in)>>7) { + // Each symbol present maximum once or too well distributed. + return nil, ErrIncompressible + } + s.optimalTableLog() + err = s.normalizeCount() + if err != nil { + return nil, err + } + err = s.writeCount() + if err != nil { + return nil, err + } + + if false { + err = s.validateNorm() + if err != nil { + return nil, err + } + } + + err = s.buildCTable() + if err != nil { + return nil, err + } + err = s.compress(in) + if err != nil { + return nil, err + } + s.Out = s.bw.out + // Check if we compressed. + if len(s.Out) >= len(in) { + return nil, ErrIncompressible + } + return s.Out, nil +} + +// cState contains the compression state of a stream. +type cState struct { + bw *bitWriter + stateTable []uint16 + state uint16 +} + +// init will initialize the compression state to the first symbol of the stream. +func (c *cState) init(bw *bitWriter, ct *cTable, tableLog uint8, first symbolTransform) { + c.bw = bw + c.stateTable = ct.stateTable + + nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 + im := int32((nbBitsOut << 16) - first.deltaNbBits) + lu := (im >> nbBitsOut) + first.deltaFindState + c.state = c.stateTable[lu] + return +} + +// encode the output symbol provided and write it to the bitstream. +func (c *cState) encode(symbolTT symbolTransform) { + nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 + dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState + c.bw.addBits16NC(c.state, uint8(nbBitsOut)) + c.state = c.stateTable[dstState] +} + +// encode the output symbol provided and write it to the bitstream. +func (c *cState) encodeZero(symbolTT symbolTransform) { + nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 + dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState + c.bw.addBits16ZeroNC(c.state, uint8(nbBitsOut)) + c.state = c.stateTable[dstState] +} + +// flush will write the tablelog to the output and flush the remaining full bytes. +func (c *cState) flush(tableLog uint8) { + c.bw.flush32() + c.bw.addBits16NC(c.state, tableLog) + c.bw.flush() +} + +// compress is the main compression loop that will encode the input from the last byte to the first. +func (s *Scratch) compress(src []byte) error { + if len(src) <= 2 { + return errors.New("compress: src too small") + } + tt := s.ct.symbolTT[:256] + s.bw.reset(s.Out) + + // Our two states each encodes every second byte. + // Last byte encoded (first byte decoded) will always be encoded by c1. + var c1, c2 cState + + // Encode so remaining size is divisible by 4. + ip := len(src) + if ip&1 == 1 { + c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) + c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) + c1.encodeZero(tt[src[ip-3]]) + ip -= 3 + } else { + c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) + c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) + ip -= 2 + } + if ip&2 != 0 { + c2.encodeZero(tt[src[ip-1]]) + c1.encodeZero(tt[src[ip-2]]) + ip -= 2 + } + + // Main compression loop. + switch { + case !s.zeroBits && s.actualTableLog <= 8: + // We can encode 4 symbols without requiring a flush. + // We do not need to check if any output is 0 bits. + for ip >= 4 { + s.bw.flush32() + v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + c2.encode(tt[v0]) + c1.encode(tt[v1]) + c2.encode(tt[v2]) + c1.encode(tt[v3]) + ip -= 4 + } + case !s.zeroBits: + // We do not need to check if any output is 0 bits. + for ip >= 4 { + s.bw.flush32() + v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + c2.encode(tt[v0]) + c1.encode(tt[v1]) + s.bw.flush32() + c2.encode(tt[v2]) + c1.encode(tt[v3]) + ip -= 4 + } + case s.actualTableLog <= 8: + // We can encode 4 symbols without requiring a flush + for ip >= 4 { + s.bw.flush32() + v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + c2.encodeZero(tt[v0]) + c1.encodeZero(tt[v1]) + c2.encodeZero(tt[v2]) + c1.encodeZero(tt[v3]) + ip -= 4 + } + default: + for ip >= 4 { + s.bw.flush32() + v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + c2.encodeZero(tt[v0]) + c1.encodeZero(tt[v1]) + s.bw.flush32() + c2.encodeZero(tt[v2]) + c1.encodeZero(tt[v3]) + ip -= 4 + } + } + + // Flush final state. + // Used to initialize state when decoding. + c2.flush(s.actualTableLog) + c1.flush(s.actualTableLog) + + return s.bw.close() +} + +// writeCount will write the normalized histogram count to header. +// This is read back by readNCount. +func (s *Scratch) writeCount() error { + var ( + tableLog = s.actualTableLog + tableSize = 1 << tableLog + previous0 bool + charnum uint16 + + maxHeaderSize = ((int(s.symbolLen) * int(tableLog)) >> 3) + 3 + + // Write Table Size + bitStream = uint32(tableLog - minTablelog) + bitCount = uint(4) + remaining = int16(tableSize + 1) /* +1 for extra accuracy */ + threshold = int16(tableSize) + nbBits = uint(tableLog + 1) + ) + if cap(s.Out) < maxHeaderSize { + s.Out = make([]byte, 0, s.br.remain()+maxHeaderSize) + } + outP := uint(0) + out := s.Out[:maxHeaderSize] + + // stops at 1 + for remaining > 1 { + if previous0 { + start := charnum + for s.norm[charnum] == 0 { + charnum++ + } + for charnum >= start+24 { + start += 24 + bitStream += uint32(0xFFFF) << bitCount + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + } + for charnum >= start+3 { + start += 3 + bitStream += 3 << bitCount + bitCount += 2 + } + bitStream += uint32(charnum-start) << bitCount + bitCount += 2 + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + count := s.norm[charnum] + charnum++ + max := (2*threshold - 1) - remaining + if count < 0 { + remaining += count + } else { + remaining -= count + } + count++ // +1 for extra accuracy + if count >= threshold { + count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ + } + bitStream += uint32(count) << bitCount + bitCount += nbBits + if count < max { + bitCount-- + } + + previous0 = count == 1 + if remaining < 1 { + return errors.New("internal error: remaining<1") + } + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += (bitCount + 7) / 8 + + if uint16(charnum) > s.symbolLen { + return errors.New("internal error: charnum > s.symbolLen") + } + s.Out = out[:outP] + return nil +} + +// symbolTransform contains the state transform for a symbol. +type symbolTransform struct { + deltaFindState int32 + deltaNbBits uint32 +} + +// String prints values as a human readable string. +func (s symbolTransform) String() string { + return fmt.Sprintf("dnbits: %08x, fs:%d", s.deltaNbBits, s.deltaFindState) +} + +// cTable contains tables used for compression. +type cTable struct { + tableSymbol []byte + stateTable []uint16 + symbolTT []symbolTransform +} + +// allocCtable will allocate tables needed for compression. +// If existing tables a re big enough, they are simply re-used. +func (s *Scratch) allocCtable() { + tableSize := 1 << s.actualTableLog + // get tableSymbol that is big enough. + if cap(s.ct.tableSymbol) < int(tableSize) { + s.ct.tableSymbol = make([]byte, tableSize) + } + s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] + + ctSize := tableSize + if cap(s.ct.stateTable) < ctSize { + s.ct.stateTable = make([]uint16, ctSize) + } + s.ct.stateTable = s.ct.stateTable[:ctSize] + + if cap(s.ct.symbolTT) < 256 { + s.ct.symbolTT = make([]symbolTransform, 256) + } + s.ct.symbolTT = s.ct.symbolTT[:256] +} + +// buildCTable will populate the compression table so it is ready to be used. +func (s *Scratch) buildCTable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + var cumul [maxSymbolValue + 2]int16 + + s.allocCtable() + tableSymbol := s.ct.tableSymbol[:tableSize] + // symbol start positions + { + cumul[0] = 0 + for ui, v := range s.norm[:s.symbolLen-1] { + u := byte(ui) // one less than reference + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = u + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + } + // Encode last symbol separately to avoid overflowing u + u := int(s.symbolLen - 1) + v := s.norm[s.symbolLen-1] + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = byte(u) + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + if uint32(cumul[s.symbolLen]) != tableSize { + return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) + } + cumul[s.symbolLen] = int16(tableSize) + 1 + } + // Spread symbols + s.zeroBits = false + { + step := tableStep(tableSize) + tableMask := tableSize - 1 + var position uint32 + // if any symbol > largeLimit, we may have 0 bits output. + largeLimit := int16(1 << (s.actualTableLog - 1)) + for ui, v := range s.norm[:s.symbolLen] { + symbol := byte(ui) + if v > largeLimit { + s.zeroBits = true + } + for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { + tableSymbol[position] = symbol + position = (position + step) & tableMask + for position > highThreshold { + position = (position + step) & tableMask + } /* Low proba area */ + } + } + + // Check if we have gone through all positions + if position != 0 { + return errors.New("position!=0") + } + } + + // Build table + table := s.ct.stateTable + { + tsi := int(tableSize) + for u, v := range tableSymbol { + // TableU16 : sorted by symbol order; gives next state value + table[cumul[v]] = uint16(tsi + u) + cumul[v]++ + } + } + + // Build Symbol Transformation Table + { + total := int16(0) + symbolTT := s.ct.symbolTT[:s.symbolLen] + tableLog := s.actualTableLog + tl := (uint32(tableLog) << 16) - (1 << tableLog) + for i, v := range s.norm[:s.symbolLen] { + switch v { + case 0: + case -1, 1: + symbolTT[i].deltaNbBits = tl + symbolTT[i].deltaFindState = int32(total - 1) + total++ + default: + maxBitsOut := uint32(tableLog) - highBits(uint32(v-1)) + minStatePlus := uint32(v) << maxBitsOut + symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus + symbolTT[i].deltaFindState = int32(total - v) + total += v + } + } + if total != int16(tableSize) { + return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) + } + } + return nil +} + +// countSimple will create a simple histogram in s.count. +// Returns the biggest count. +// Does not update s.clearCount. +func (s *Scratch) countSimple(in []byte) (max int) { + for _, v := range in { + s.count[v]++ + } + m := uint32(0) + for i, v := range s.count[:] { + if v > m { + m = v + } + if v > 0 { + s.symbolLen = uint16(i) + 1 + } + } + return int(m) +} + +// minTableLog provides the minimum logSize to safely represent a distribution. +func (s *Scratch) minTableLog() uint8 { + minBitsSrc := highBits(uint32(s.br.remain()-1)) + 1 + minBitsSymbols := highBits(uint32(s.symbolLen-1)) + 2 + if minBitsSrc < minBitsSymbols { + return uint8(minBitsSrc) + } + return uint8(minBitsSymbols) +} + +// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog +func (s *Scratch) optimalTableLog() { + tableLog := s.TableLog + minBits := s.minTableLog() + maxBitsSrc := uint8(highBits(uint32(s.br.remain()-1))) - 2 + if maxBitsSrc < tableLog { + // Accuracy can be reduced + tableLog = maxBitsSrc + } + if minBits > tableLog { + tableLog = minBits + } + // Need a minimum to safely represent all symbol values + if tableLog < minTablelog { + tableLog = minTablelog + } + if tableLog > maxTableLog { + tableLog = maxTableLog + } + s.actualTableLog = tableLog +} + +var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} + +// normalizeCount will normalize the count of the symbols so +// the total is equal to the table size. +func (s *Scratch) normalizeCount() error { + var ( + tableLog = s.actualTableLog + scale = 62 - uint64(tableLog) + step = (1 << 62) / uint64(s.br.remain()) + vStep = uint64(1) << (scale - 20) + stillToDistribute = int16(1 << tableLog) + largest int + largestP int16 + lowThreshold = (uint32)(s.br.remain() >> tableLog) + ) + + for i, cnt := range s.count[:s.symbolLen] { + // already handled + // if (count[s] == s.length) return 0; /* rle special case */ + + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + stillToDistribute-- + } else { + proba := (int16)((uint64(cnt) * step) >> scale) + if proba < 8 { + restToBeat := vStep * uint64(rtbTable[proba]) + v := uint64(cnt)*step - (uint64(proba) << scale) + if v > restToBeat { + proba++ + } + } + if proba > largestP { + largestP = proba + largest = i + } + s.norm[i] = proba + stillToDistribute -= proba + } + } + + if -stillToDistribute >= (s.norm[largest] >> 1) { + // corner case, need another normalization method + return s.normalizeCount2() + } + s.norm[largest] += stillToDistribute + return nil +} + +// Secondary normalization method. +// To be used when primary method fails. +func (s *Scratch) normalizeCount2() error { + const notYetAssigned = -2 + var ( + distributed uint32 + total = uint32(s.br.remain()) + tableLog = s.actualTableLog + lowThreshold = uint32(total >> tableLog) + lowOne = uint32((total * 3) >> (tableLog + 1)) + ) + for i, cnt := range s.count[:s.symbolLen] { + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + distributed++ + total -= cnt + continue + } + if cnt <= lowOne { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + s.norm[i] = notYetAssigned + } + toDistribute := (1 << tableLog) - distributed + + if (total / toDistribute) > lowOne { + // risk of rounding to zero + lowOne = uint32((total * 3) / (toDistribute * 2)) + for i, cnt := range s.count[:s.symbolLen] { + if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + } + toDistribute = (1 << tableLog) - distributed + } + if distributed == uint32(s.symbolLen)+1 { + // all values are pretty poor; + // probably incompressible data (should have already been detected); + // find max, then give all remaining points to max + var maxV int + var maxC uint32 + for i, cnt := range s.count[:s.symbolLen] { + if cnt > maxC { + maxV = i + maxC = cnt + } + } + s.norm[maxV] += int16(toDistribute) + return nil + } + + if total == 0 { + // all of the symbols were low enough for the lowOne or lowThreshold + for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { + if s.norm[i] > 0 { + toDistribute-- + s.norm[i]++ + } + } + return nil + } + + var ( + vStepLog = 62 - uint64(tableLog) + mid = uint64((1 << (vStepLog - 1)) - 1) + rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining + tmpTotal = mid + ) + for i, cnt := range s.count[:s.symbolLen] { + if s.norm[i] == notYetAssigned { + var ( + end = tmpTotal + uint64(cnt)*rStep + sStart = uint32(tmpTotal >> vStepLog) + sEnd = uint32(end >> vStepLog) + weight = sEnd - sStart + ) + if weight < 1 { + return errors.New("weight < 1") + } + s.norm[i] = int16(weight) + tmpTotal = end + } + } + return nil +} + +// validateNorm validates the normalized histogram table. +func (s *Scratch) validateNorm() (err error) { + var total int + for _, v := range s.norm[:s.symbolLen] { + if v >= 0 { + total += int(v) + } else { + total -= int(v) + } + } + defer func() { + if err == nil { + return + } + fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) + for i, v := range s.norm[:s.symbolLen] { + fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) + } + }() + if total != (1 << s.actualTableLog) { + return fmt.Errorf("warning: Total == %d != %d", total, 1< tablelogAbsoluteMax { + return errors.New("tableLog too large") + } + bitStream >>= 4 + bitCount := uint(4) + + s.actualTableLog = uint8(nbBits) + remaining := int32((1 << nbBits) + 1) + threshold := int32(1 << nbBits) + gotTotal := int32(0) + nbBits++ + + for remaining > 1 { + if previous0 { + n0 := charnum + for (bitStream & 0xFFFF) == 0xFFFF { + n0 += 24 + if b.off < iend-5 { + b.advance(2) + bitStream = b.Uint32() >> bitCount + } else { + bitStream >>= 16 + bitCount += 16 + } + } + for (bitStream & 3) == 3 { + n0 += 3 + bitStream >>= 2 + bitCount += 2 + } + n0 += uint16(bitStream & 3) + bitCount += 2 + if n0 > maxSymbolValue { + return errors.New("maxSymbolValue too small") + } + for charnum < n0 { + s.norm[charnum&0xff] = 0 + charnum++ + } + + if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { + b.advance(bitCount >> 3) + bitCount &= 7 + bitStream = b.Uint32() >> bitCount + } else { + bitStream >>= 2 + } + } + + max := (2*(threshold) - 1) - (remaining) + var count int32 + + if (int32(bitStream) & (threshold - 1)) < max { + count = int32(bitStream) & (threshold - 1) + bitCount += nbBits - 1 + } else { + count = int32(bitStream) & (2*threshold - 1) + if count >= threshold { + count -= max + } + bitCount += nbBits + } + + count-- // extra accuracy + if count < 0 { + // -1 means +1 + remaining += count + gotTotal -= count + } else { + remaining -= count + gotTotal += count + } + s.norm[charnum&0xff] = int16(count) + charnum++ + previous0 = count == 0 + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { + b.advance(bitCount >> 3) + bitCount &= 7 + } else { + bitCount -= (uint)(8 * (len(b.b) - 4 - b.off)) + b.off = len(b.b) - 4 + } + bitStream = b.Uint32() >> (bitCount & 31) + } + s.symbolLen = charnum + + if s.symbolLen <= 1 { + return fmt.Errorf("symbolLen (%d) too small", s.symbolLen) + } + if s.symbolLen > maxSymbolValue+1 { + return fmt.Errorf("symbolLen (%d) too big", s.symbolLen) + } + if remaining != 1 { + return fmt.Errorf("corruption detected (remaining %d != 1)", remaining) + } + if bitCount > 32 { + return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount) + } + if gotTotal != 1<> 3) + return nil +} + +// decSymbol contains information about a state entry, +// Including the state offset base, the output symbol and +// the number of bits to read for the low part of the destination state. +type decSymbol struct { + newState uint16 + symbol uint8 + nbBits uint8 +} + +// allocDtable will allocate decoding tables if they are not big enough. +func (s *Scratch) allocDtable() { + tableSize := 1 << s.actualTableLog + if cap(s.decTable) < int(tableSize) { + s.decTable = make([]decSymbol, tableSize) + } + s.decTable = s.decTable[:tableSize] + + if cap(s.ct.tableSymbol) < 256 { + s.ct.tableSymbol = make([]byte, 256) + } + s.ct.tableSymbol = s.ct.tableSymbol[:256] + + if cap(s.ct.stateTable) < 256 { + s.ct.stateTable = make([]uint16, 256) + } + s.ct.stateTable = s.ct.stateTable[:256] +} + +// buildDtable will build the decoding table. +func (s *Scratch) buildDtable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + s.allocDtable() + symbolNext := s.ct.stateTable[:256] + + // Init, lay down lowprob symbols + s.zeroBits = false + { + largeLimit := int16(1 << (s.actualTableLog - 1)) + for i, v := range s.norm[:s.symbolLen] { + if v == -1 { + s.decTable[highThreshold].symbol = uint8(i) + highThreshold-- + symbolNext[i] = 1 + } else { + if v >= largeLimit { + s.zeroBits = true + } + symbolNext[i] = uint16(v) + } + } + } + // Spread symbols + { + tableMask := tableSize - 1 + step := tableStep(tableSize) + position := uint32(0) + for ss, v := range s.norm[:s.symbolLen] { + for i := 0; i < int(v); i++ { + s.decTable[position].symbol = uint8(ss) + position = (position + step) & tableMask + for position > highThreshold { + // lowprob area + position = (position + step) & tableMask + } + } + } + if position != 0 { + // position must reach all cells once, otherwise normalizedCounter is incorrect + return errors.New("corrupted input (position != 0)") + } + } + + // Build Decoding table + { + tableSize := uint16(1 << s.actualTableLog) + for u, v := range s.decTable { + symbol := v.symbol + nextState := symbolNext[symbol] + symbolNext[symbol] = nextState + 1 + nBits := s.actualTableLog - byte(highBits(uint32(nextState))) + s.decTable[u].nbBits = nBits + newState := (nextState << nBits) - tableSize + if newState >= tableSize { + return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) + } + if newState == uint16(u) && nBits == 0 { + // Seems weird that this is possible with nbits > 0. + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) + } + s.decTable[u].newState = newState + } + } + return nil +} + +// decompress will decompress the bitstream. +// If the buffer is over-read an error is returned. +func (s *Scratch) decompress() error { + br := &s.bits + br.init(s.br.unread()) + + var s1, s2 decoder + // Initialize and decode first state and symbol. + s1.init(br, s.decTable, s.actualTableLog) + s2.init(br, s.decTable, s.actualTableLog) + + // Use temp table to avoid bound checks/append penalty. + var tmp = s.ct.tableSymbol[:256] + var off uint8 + + // Main part + if !s.zeroBits { + for br.off >= 8 { + br.fillFast() + tmp[off+0] = s1.nextFast() + tmp[off+1] = s2.nextFast() + br.fillFast() + tmp[off+2] = s1.nextFast() + tmp[off+3] = s2.nextFast() + off += 4 + // When off is 0, we have overflowed and should write. + if off == 0 { + s.Out = append(s.Out, tmp...) + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } + } + } + } else { + for br.off >= 8 { + br.fillFast() + tmp[off+0] = s1.next() + tmp[off+1] = s2.next() + br.fillFast() + tmp[off+2] = s1.next() + tmp[off+3] = s2.next() + off += 4 + if off == 0 { + s.Out = append(s.Out, tmp...) + // When off is 0, we have overflowed and should write. + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } + } + } + } + s.Out = append(s.Out, tmp[:off]...) + + // Final bits, a bit more expensive check + for { + if s1.finished() { + s.Out = append(s.Out, s1.final(), s2.final()) + break + } + br.fill() + s.Out = append(s.Out, s1.next()) + if s2.finished() { + s.Out = append(s.Out, s2.final(), s1.final()) + break + } + s.Out = append(s.Out, s2.next()) + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } + } + return br.close() +} + +// decoder keeps track of the current state and updates it from the bitstream. +type decoder struct { + state uint16 + br *bitReader + dt []decSymbol +} + +// init will initialize the decoder and read the first state from the stream. +func (d *decoder) init(in *bitReader, dt []decSymbol, tableLog uint8) { + d.dt = dt + d.br = in + d.state = uint16(in.getBits(tableLog)) +} + +// next returns the next symbol and sets the next state. +// At least tablelog bits must be available in the bit reader. +func (d *decoder) next() uint8 { + n := &d.dt[d.state] + lowBits := d.br.getBits(n.nbBits) + d.state = n.newState + lowBits + return n.symbol +} + +// finished returns true if all bits have been read from the bitstream +// and the next state would require reading bits from the input. +func (d *decoder) finished() bool { + return d.br.finished() && d.dt[d.state].nbBits > 0 +} + +// final returns the current state symbol without decoding the next. +func (d *decoder) final() uint8 { + return d.dt[d.state].symbol +} + +// nextFast returns the next symbol and sets the next state. +// This can only be used if no symbols are 0 bits. +// At least tablelog bits must be available in the bit reader. +func (d *decoder) nextFast() uint8 { + n := d.dt[d.state] + lowBits := d.br.getBitsFast(n.nbBits) + d.state = n.newState + lowBits + return n.symbol +} diff --git a/vendor/github.com/klauspost/compress/fse/fse.go b/vendor/github.com/klauspost/compress/fse/fse.go new file mode 100644 index 00000000000..535cbadfdea --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fse.go @@ -0,0 +1,144 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +// Package fse provides Finite State Entropy encoding and decoding. +// +// Finite State Entropy encoding provides a fast near-optimal symbol encoding/decoding +// for byte blocks as implemented in zstd. +// +// See https://github.com/klauspost/compress/tree/master/fse for more information. +package fse + +import ( + "errors" + "fmt" + "math/bits" +) + +const ( + /*!MEMORY_USAGE : + * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) + * Increasing memory usage improves compression ratio + * Reduced memory usage can improve speed, due to cache effect + * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ + maxMemoryUsage = 14 + defaultMemoryUsage = 13 + + maxTableLog = maxMemoryUsage - 2 + maxTablesize = 1 << maxTableLog + defaultTablelog = defaultMemoryUsage - 2 + minTablelog = 5 + maxSymbolValue = 255 +) + +var ( + // ErrIncompressible is returned when input is judged to be too hard to compress. + ErrIncompressible = errors.New("input is not compressible") + + // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. + ErrUseRLE = errors.New("input is single value repeated") +) + +// Scratch provides temporary storage for compression and decompression. +type Scratch struct { + // Private + count [maxSymbolValue + 1]uint32 + norm [maxSymbolValue + 1]int16 + br byteReader + bits bitReader + bw bitWriter + ct cTable // Compression tables. + decTable []decSymbol // Decompression table. + maxCount int // count of the most probable symbol + + // Per block parameters. + // These can be used to override compression parameters of the block. + // Do not touch, unless you know what you are doing. + + // Out is output buffer. + // If the scratch is re-used before the caller is done processing the output, + // set this field to nil. + // Otherwise the output buffer will be re-used for next Compression/Decompression step + // and allocation will be avoided. + Out []byte + + // DecompressLimit limits the maximum decoded size acceptable. + // If > 0 decompression will stop when approximately this many bytes + // has been decoded. + // If 0, maximum size will be 2GB. + DecompressLimit int + + symbolLen uint16 // Length of active part of the symbol table. + actualTableLog uint8 // Selected tablelog. + zeroBits bool // no bits has prob > 50%. + clearCount bool // clear count + + // MaxSymbolValue will override the maximum symbol value of the next block. + MaxSymbolValue uint8 + + // TableLog will attempt to override the tablelog for the next block. + TableLog uint8 +} + +// Histogram allows to populate the histogram and skip that step in the compression, +// It otherwise allows to inspect the histogram when compression is done. +// To indicate that you have populated the histogram call HistogramFinished +// with the value of the highest populated symbol, as well as the number of entries +// in the most populated entry. These are accepted at face value. +// The returned slice will always be length 256. +func (s *Scratch) Histogram() []uint32 { + return s.count[:] +} + +// HistogramFinished can be called to indicate that the histogram has been populated. +// maxSymbol is the index of the highest set symbol of the next data segment. +// maxCount is the number of entries in the most populated entry. +// These are accepted at face value. +func (s *Scratch) HistogramFinished(maxSymbol uint8, maxCount int) { + s.maxCount = maxCount + s.symbolLen = uint16(maxSymbol) + 1 + s.clearCount = maxCount != 0 +} + +// prepare will prepare and allocate scratch tables used for both compression and decompression. +func (s *Scratch) prepare(in []byte) (*Scratch, error) { + if s == nil { + s = &Scratch{} + } + if s.MaxSymbolValue == 0 { + s.MaxSymbolValue = 255 + } + if s.TableLog == 0 { + s.TableLog = defaultTablelog + } + if s.TableLog > maxTableLog { + return nil, fmt.Errorf("tableLog (%d) > maxTableLog (%d)", s.TableLog, maxTableLog) + } + if cap(s.Out) == 0 { + s.Out = make([]byte, 0, len(in)) + } + if s.clearCount && s.maxCount == 0 { + for i := range s.count { + s.count[i] = 0 + } + s.clearCount = false + } + s.br.init(in) + if s.DecompressLimit == 0 { + // Max size 2GB. + s.DecompressLimit = (2 << 30) - 1 + } + + return s, nil +} + +// tableStep returns the next table index. +func tableStep(tableSize uint32) uint32 { + return (tableSize >> 1) + (tableSize >> 3) + 3 +} + +func highBits(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} diff --git a/vendor/github.com/klauspost/compress/huff0/.gitignore b/vendor/github.com/klauspost/compress/huff0/.gitignore new file mode 100644 index 00000000000..b3d262958f8 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/.gitignore @@ -0,0 +1 @@ +/huff0-fuzz.zip diff --git a/vendor/github.com/klauspost/compress/huff0/README.md b/vendor/github.com/klauspost/compress/huff0/README.md new file mode 100644 index 00000000000..e12da4db2fd --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/README.md @@ -0,0 +1,87 @@ +# Huff0 entropy compression + +This package provides Huff0 encoding and decoding as used in zstd. + +[Huff0](https://github.com/Cyan4973/FiniteStateEntropy#new-generation-entropy-coders), +a Huffman codec designed for modern CPU, featuring OoO (Out of Order) operations on multiple ALU +(Arithmetic Logic Unit), achieving extremely fast compression and decompression speeds. + +This can be used for compressing input with a lot of similar input values to the smallest number of bytes. +This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, +but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. + +* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/huff0) + +## News + + * Mar 2018: First implementation released. Consider this beta software for now. + +# Usage + +This package provides a low level interface that allows to compress single independent blocks. + +Each block is separate, and there is no built in integrity checks. +This means that the caller should keep track of block sizes and also do checksums if needed. + +Compressing a block is done via the [`Compress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress1X) and +[`Compress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress4X) functions. +You must provide input and will receive the output and maybe an error. + +These error values can be returned: + +| Error | Description | +|---------------------|-----------------------------------------------------------------------------| +| `` | Everything ok, output is returned | +| `ErrIncompressible` | Returned when input is judged to be too hard to compress | +| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | +| `ErrTooBig` | Returned if the input block exceeds the maximum allowed size (128 Kib) | +| `(error)` | An internal error occurred. | + + +As can be seen above some of there are errors that will be returned even under normal operation so it is important to handle these. + +To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object +that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same +object can be used for both. + +Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this +you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. + +The `Scratch` object will retain state that allows to re-use previous tables for encoding and decoding. + +## Tables and re-use + +Huff0 allows for reusing tables from the previous block to save space if that is expected to give better/faster results. + +The Scratch object allows you to set a [`ReusePolicy`](https://godoc.org/github.com/klauspost/compress/huff0#ReusePolicy) +that controls this behaviour. See the documentation for details. This can be altered between each block. + +Do however note that this information is *not* stored in the output block and it is up to the users of the package to +record whether [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable) should be called, +based on the boolean reported back from the CompressXX call. + +If you want to store the table separate from the data, you can access them as `OutData` and `OutTable` on the +[`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object. + +## Decompressing + +The first part of decoding is to initialize the decoding table through [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable). +This will initialize the decoding tables. +You can supply the complete block to `ReadTable` and it will return the data part of the block +which can be given to the decompressor. + +Decompressing is done by calling the [`Decompress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress1X) +or [`Decompress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress4X) function. + +For concurrently decompressing content with a fixed table a stateless [`Decoder`](https://godoc.org/github.com/klauspost/compress/huff0#Decoder) can be requested which will remain correct as long as the scratch is unchanged. The capacity of the provided slice indicates the expected output size. + +You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back +your input was likely corrupted. + +It is important to note that a successful decoding does *not* mean your output matches your original input. +There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. + +# Contributing + +Contributions are always welcome. Be aware that adding public functions will require good justification and breaking +changes will likely not be accepted. If in doubt open an issue before writing the PR. diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go new file mode 100644 index 00000000000..a4979e8868a --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/bitreader.go @@ -0,0 +1,329 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package huff0 + +import ( + "encoding/binary" + "errors" + "io" +) + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReader struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReader) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.bitsRead += 8 - uint8(highBit32(uint32(v))) + return nil +} + +// peekBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReader) peekBitsFast(n uint8) uint16 { + const regMask = 64 - 1 + v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) + return v +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReader) fillFast() { + if b.bitsRead < 32 { + return + } + + // 2 bounds checks. + v := b.in[b.off-4 : b.off] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 +} + +func (b *bitReader) advance(n uint8) { + b.bitsRead += n +} + +// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. +func (b *bitReader) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReader) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value = (b.value << 8) | uint64(b.in[b.off-1]) + b.bitsRead -= 8 + b.off-- + } +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReader) finished() bool { + return b.off == 0 && b.bitsRead >= 64 +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReader) close() error { + // Release reference. + b.in = nil + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReaderBytes struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReaderBytes) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.advance(8 - uint8(highBit32(uint32(v)))) + return nil +} + +// peekBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReaderBytes) peekByteFast() uint8 { + got := uint8(b.value >> 56) + return got +} + +func (b *bitReaderBytes) advance(n uint8) { + b.bitsRead += n + b.value <<= n & 63 +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReaderBytes) fillFast() { + if b.bitsRead < 32 { + return + } + + // 2 bounds checks. + v := b.in[b.off-4 : b.off] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << (b.bitsRead - 32) + b.bitsRead -= 32 + b.off -= 4 +} + +// fillFastStart() assumes the bitReaderBytes is empty and there is at least 8 bytes to read. +func (b *bitReaderBytes) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReaderBytes) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << (b.bitsRead - 32) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value |= uint64(b.in[b.off-1]) << (b.bitsRead - 8) + b.bitsRead -= 8 + b.off-- + } +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReaderBytes) finished() bool { + return b.off == 0 && b.bitsRead >= 64 +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReaderBytes) close() error { + // Release reference. + b.in = nil + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} + +// bitReaderShifted reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReaderShifted struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReaderShifted) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.advance(8 - uint8(highBit32(uint32(v)))) + return nil +} + +// peekBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReaderShifted) peekBitsFast(n uint8) uint16 { + return uint16(b.value >> ((64 - n) & 63)) +} + +func (b *bitReaderShifted) advance(n uint8) { + b.bitsRead += n + b.value <<= n & 63 +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReaderShifted) fillFast() { + if b.bitsRead < 32 { + return + } + + // 2 bounds checks. + v := b.in[b.off-4 : b.off] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << ((b.bitsRead - 32) & 63) + b.bitsRead -= 32 + b.off -= 4 +} + +// fillFastStart() assumes the bitReaderShifted is empty and there is at least 8 bytes to read. +func (b *bitReaderShifted) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReaderShifted) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << ((b.bitsRead - 32) & 63) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value |= uint64(b.in[b.off-1]) << ((b.bitsRead - 8) & 63) + b.bitsRead -= 8 + b.off-- + } +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReaderShifted) finished() bool { + return b.off == 0 && b.bitsRead >= 64 +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReaderShifted) close() error { + // Release reference. + b.in = nil + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go new file mode 100644 index 00000000000..6bce4e87d4f --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/bitwriter.go @@ -0,0 +1,210 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package huff0 + +import "fmt" + +// bitWriter will write bits. +// First bit will be LSB of the first byte of output. +type bitWriter struct { + bitContainer uint64 + nBits uint8 + out []byte +} + +// bitMask16 is bitmasks. Has extra to avoid bounds check. +var bitMask16 = [32]uint16{ + 0, 1, 3, 7, 0xF, 0x1F, + 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, + 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF} /* up to 16 bits */ + +// addBits16NC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16NC(value uint16, bits uint8) { + b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// encSymbol will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) encSymbol(ct cTable, symbol byte) { + enc := ct[symbol] + b.bitContainer |= uint64(enc.val) << (b.nBits & 63) + if false { + if enc.nBits == 0 { + panic("nbits 0") + } + } + b.nBits += enc.nBits +} + +// encTwoSymbols will add up to 32 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) { + encA := ct[av] + encB := ct[bv] + sh := b.nBits & 63 + combined := uint64(encA.val) | (uint64(encB.val) << (encA.nBits & 63)) + b.bitContainer |= combined << sh + if false { + if encA.nBits == 0 { + panic("nbitsA 0") + } + if encB.nBits == 0 { + panic("nbitsB 0") + } + } + b.nBits += encA.nBits + encB.nBits +} + +// addBits16ZeroNC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +// This is fastest if bits can be zero. +func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) { + if bits == 0 { + return + } + value <<= (16 - bits) & 15 + value >>= (16 - bits) & 15 + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// flush will flush all pending full bytes. +// There will be at least 56 bits available for writing when this has been called. +// Using flush32 is faster, but leaves less space for writing. +func (b *bitWriter) flush() { + v := b.nBits >> 3 + switch v { + case 0: + return + case 1: + b.out = append(b.out, + byte(b.bitContainer), + ) + b.bitContainer >>= 1 << 3 + case 2: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + ) + b.bitContainer >>= 2 << 3 + case 3: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + ) + b.bitContainer >>= 3 << 3 + case 4: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + ) + b.bitContainer >>= 4 << 3 + case 5: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + ) + b.bitContainer >>= 5 << 3 + case 6: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + ) + b.bitContainer >>= 6 << 3 + case 7: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + ) + b.bitContainer >>= 7 << 3 + case 8: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + byte(b.bitContainer>>56), + ) + b.bitContainer = 0 + b.nBits = 0 + return + default: + panic(fmt.Errorf("bits (%d) > 64", b.nBits)) + } + b.nBits &= 7 +} + +// flush32 will flush out, so there are at least 32 bits available for writing. +func (b *bitWriter) flush32() { + if b.nBits < 32 { + return + } + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24)) + b.nBits -= 32 + b.bitContainer >>= 32 +} + +// flushAlign will flush remaining full bytes and align to next byte boundary. +func (b *bitWriter) flushAlign() { + nbBytes := (b.nBits + 7) >> 3 + for i := uint8(0); i < nbBytes; i++ { + b.out = append(b.out, byte(b.bitContainer>>(i*8))) + } + b.nBits = 0 + b.bitContainer = 0 +} + +// close will write the alignment bit and write the final byte(s) +// to the output. +func (b *bitWriter) close() error { + // End mark + b.addBits16Clean(1, 1) + // flush until next byte. + b.flushAlign() + return nil +} + +// reset and continue writing by appending to out. +func (b *bitWriter) reset(out []byte) { + b.bitContainer = 0 + b.nBits = 0 + b.out = out +} diff --git a/vendor/github.com/klauspost/compress/huff0/bytereader.go b/vendor/github.com/klauspost/compress/huff0/bytereader.go new file mode 100644 index 00000000000..50bcdf6ea99 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/bytereader.go @@ -0,0 +1,54 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package huff0 + +// byteReader provides a byte reader that reads +// little endian values from a byte stream. +// The input stream is manually advanced. +// The reader performs no bounds checks. +type byteReader struct { + b []byte + off int +} + +// init will initialize the reader and set the input. +func (b *byteReader) init(in []byte) { + b.b = in + b.off = 0 +} + +// advance the stream b n bytes. +func (b *byteReader) advance(n uint) { + b.off += int(n) +} + +// Int32 returns a little endian int32 starting at current offset. +func (b byteReader) Int32() int32 { + v3 := int32(b.b[b.off+3]) + v2 := int32(b.b[b.off+2]) + v1 := int32(b.b[b.off+1]) + v0 := int32(b.b[b.off]) + return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0 +} + +// Uint32 returns a little endian uint32 starting at current offset. +func (b byteReader) Uint32() uint32 { + v3 := uint32(b.b[b.off+3]) + v2 := uint32(b.b[b.off+2]) + v1 := uint32(b.b[b.off+1]) + v0 := uint32(b.b[b.off]) + return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0 +} + +// unread returns the unread portion of the input. +func (b byteReader) unread() []byte { + return b.b[b.off:] +} + +// remain will return the number of bytes remaining. +func (b byteReader) remain() int { + return len(b.b) - b.off +} diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go new file mode 100644 index 00000000000..f9ed5f8306e --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/compress.go @@ -0,0 +1,657 @@ +package huff0 + +import ( + "fmt" + "runtime" + "sync" +) + +// Compress1X will compress the input. +// The output can be decoded using Decompress1X. +// Supply a Scratch object. The scratch object contains state about re-use, +// So when sharing across independent encodes, be sure to set the re-use policy. +func Compress1X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { + s, err = s.prepare(in) + if err != nil { + return nil, false, err + } + return compress(in, s, s.compress1X) +} + +// Compress4X will compress the input. The input is split into 4 independent blocks +// and compressed similar to Compress1X. +// The output can be decoded using Decompress4X. +// Supply a Scratch object. The scratch object contains state about re-use, +// So when sharing across independent encodes, be sure to set the re-use policy. +func Compress4X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { + s, err = s.prepare(in) + if err != nil { + return nil, false, err + } + if false { + // TODO: compress4Xp only slightly faster. + const parallelThreshold = 8 << 10 + if len(in) < parallelThreshold || runtime.GOMAXPROCS(0) == 1 { + return compress(in, s, s.compress4X) + } + return compress(in, s, s.compress4Xp) + } + return compress(in, s, s.compress4X) +} + +func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error)) (out []byte, reUsed bool, err error) { + // Nuke previous table if we cannot reuse anyway. + if s.Reuse == ReusePolicyNone { + s.prevTable = s.prevTable[:0] + } + + // Create histogram, if none was provided. + maxCount := s.maxCount + var canReuse = false + if maxCount == 0 { + maxCount, canReuse = s.countSimple(in) + } else { + canReuse = s.canUseTable(s.prevTable) + } + + // We want the output size to be less than this: + wantSize := len(in) + if s.WantLogLess > 0 { + wantSize -= wantSize >> s.WantLogLess + } + + // Reset for next run. + s.clearCount = true + s.maxCount = 0 + if maxCount >= len(in) { + if maxCount > len(in) { + return nil, false, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in)) + } + if len(in) == 1 { + return nil, false, ErrIncompressible + } + // One symbol, use RLE + return nil, false, ErrUseRLE + } + if maxCount == 1 || maxCount < (len(in)>>7) { + // Each symbol present maximum once or too well distributed. + return nil, false, ErrIncompressible + } + if s.Reuse == ReusePolicyMust && !canReuse { + // We must reuse, but we can't. + return nil, false, ErrIncompressible + } + if (s.Reuse == ReusePolicyPrefer || s.Reuse == ReusePolicyMust) && canReuse { + keepTable := s.cTable + keepTL := s.actualTableLog + s.cTable = s.prevTable + s.actualTableLog = s.prevTableLog + s.Out, err = compressor(in) + s.cTable = keepTable + s.actualTableLog = keepTL + if err == nil && len(s.Out) < wantSize { + s.OutData = s.Out + return s.Out, true, nil + } + if s.Reuse == ReusePolicyMust { + return nil, false, ErrIncompressible + } + // Do not attempt to re-use later. + s.prevTable = s.prevTable[:0] + } + + // Calculate new table. + err = s.buildCTable() + if err != nil { + return nil, false, err + } + + if false && !s.canUseTable(s.cTable) { + panic("invalid table generated") + } + + if s.Reuse == ReusePolicyAllow && canReuse { + hSize := len(s.Out) + oldSize := s.prevTable.estimateSize(s.count[:s.symbolLen]) + newSize := s.cTable.estimateSize(s.count[:s.symbolLen]) + if oldSize <= hSize+newSize || hSize+12 >= wantSize { + // Retain cTable even if we re-use. + keepTable := s.cTable + keepTL := s.actualTableLog + + s.cTable = s.prevTable + s.actualTableLog = s.prevTableLog + s.Out, err = compressor(in) + + // Restore ctable. + s.cTable = keepTable + s.actualTableLog = keepTL + if err != nil { + return nil, false, err + } + if len(s.Out) >= wantSize { + return nil, false, ErrIncompressible + } + s.OutData = s.Out + return s.Out, true, nil + } + } + + // Use new table + err = s.cTable.write(s) + if err != nil { + s.OutTable = nil + return nil, false, err + } + s.OutTable = s.Out + + // Compress using new table + s.Out, err = compressor(in) + if err != nil { + s.OutTable = nil + return nil, false, err + } + if len(s.Out) >= wantSize { + s.OutTable = nil + return nil, false, ErrIncompressible + } + // Move current table into previous. + s.prevTable, s.prevTableLog, s.cTable = s.cTable, s.actualTableLog, s.prevTable[:0] + s.OutData = s.Out[len(s.OutTable):] + return s.Out, false, nil +} + +func (s *Scratch) compress1X(src []byte) ([]byte, error) { + return s.compress1xDo(s.Out, src) +} + +func (s *Scratch) compress1xDo(dst, src []byte) ([]byte, error) { + var bw = bitWriter{out: dst} + + // N is length divisible by 4. + n := len(src) + n -= n & 3 + cTable := s.cTable[:256] + + // Encode last bytes. + for i := len(src) & 3; i > 0; i-- { + bw.encSymbol(cTable, src[n+i-1]) + } + n -= 4 + if s.actualTableLog <= 8 { + for ; n >= 0; n -= 4 { + tmp := src[n : n+4] + // tmp should be len 4 + bw.flush32() + bw.encTwoSymbols(cTable, tmp[3], tmp[2]) + bw.encTwoSymbols(cTable, tmp[1], tmp[0]) + } + } else { + for ; n >= 0; n -= 4 { + tmp := src[n : n+4] + // tmp should be len 4 + bw.flush32() + bw.encTwoSymbols(cTable, tmp[3], tmp[2]) + bw.flush32() + bw.encTwoSymbols(cTable, tmp[1], tmp[0]) + } + } + err := bw.close() + return bw.out, err +} + +var sixZeros [6]byte + +func (s *Scratch) compress4X(src []byte) ([]byte, error) { + if len(src) < 12 { + return nil, ErrIncompressible + } + segmentSize := (len(src) + 3) / 4 + + // Add placeholder for output length + offsetIdx := len(s.Out) + s.Out = append(s.Out, sixZeros[:]...) + + for i := 0; i < 4; i++ { + toDo := src + if len(toDo) > segmentSize { + toDo = toDo[:segmentSize] + } + src = src[len(toDo):] + + var err error + idx := len(s.Out) + s.Out, err = s.compress1xDo(s.Out, toDo) + if err != nil { + return nil, err + } + // Write compressed length as little endian before block. + if i < 3 { + // Last length is not written. + length := len(s.Out) - idx + s.Out[i*2+offsetIdx] = byte(length) + s.Out[i*2+offsetIdx+1] = byte(length >> 8) + } + } + + return s.Out, nil +} + +// compress4Xp will compress 4 streams using separate goroutines. +func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { + if len(src) < 12 { + return nil, ErrIncompressible + } + // Add placeholder for output length + s.Out = s.Out[:6] + + segmentSize := (len(src) + 3) / 4 + var wg sync.WaitGroup + var errs [4]error + wg.Add(4) + for i := 0; i < 4; i++ { + toDo := src + if len(toDo) > segmentSize { + toDo = toDo[:segmentSize] + } + src = src[len(toDo):] + + // Separate goroutine for each block. + go func(i int) { + s.tmpOut[i], errs[i] = s.compress1xDo(s.tmpOut[i][:0], toDo) + wg.Done() + }(i) + } + wg.Wait() + for i := 0; i < 4; i++ { + if errs[i] != nil { + return nil, errs[i] + } + o := s.tmpOut[i] + // Write compressed length as little endian before block. + if i < 3 { + // Last length is not written. + s.Out[i*2] = byte(len(o)) + s.Out[i*2+1] = byte(len(o) >> 8) + } + + // Write output. + s.Out = append(s.Out, o...) + } + return s.Out, nil +} + +// countSimple will create a simple histogram in s.count. +// Returns the biggest count. +// Does not update s.clearCount. +func (s *Scratch) countSimple(in []byte) (max int, reuse bool) { + reuse = true + for _, v := range in { + s.count[v]++ + } + m := uint32(0) + if len(s.prevTable) > 0 { + for i, v := range s.count[:] { + if v > m { + m = v + } + if v > 0 { + s.symbolLen = uint16(i) + 1 + if i >= len(s.prevTable) { + reuse = false + } else { + if s.prevTable[i].nBits == 0 { + reuse = false + } + } + } + } + return int(m), reuse + } + for i, v := range s.count[:] { + if v > m { + m = v + } + if v > 0 { + s.symbolLen = uint16(i) + 1 + } + } + return int(m), false +} + +func (s *Scratch) canUseTable(c cTable) bool { + if len(c) < int(s.symbolLen) { + return false + } + for i, v := range s.count[:s.symbolLen] { + if v != 0 && c[i].nBits == 0 { + return false + } + } + return true +} + +func (s *Scratch) validateTable(c cTable) bool { + if len(c) < int(s.symbolLen) { + return false + } + for i, v := range s.count[:s.symbolLen] { + if v != 0 { + if c[i].nBits == 0 { + return false + } + if c[i].nBits > s.actualTableLog { + return false + } + } + } + return true +} + +// minTableLog provides the minimum logSize to safely represent a distribution. +func (s *Scratch) minTableLog() uint8 { + minBitsSrc := highBit32(uint32(s.br.remain())) + 1 + minBitsSymbols := highBit32(uint32(s.symbolLen-1)) + 2 + if minBitsSrc < minBitsSymbols { + return uint8(minBitsSrc) + } + return uint8(minBitsSymbols) +} + +// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog +func (s *Scratch) optimalTableLog() { + tableLog := s.TableLog + minBits := s.minTableLog() + maxBitsSrc := uint8(highBit32(uint32(s.br.remain()-1))) - 1 + if maxBitsSrc < tableLog { + // Accuracy can be reduced + tableLog = maxBitsSrc + } + if minBits > tableLog { + tableLog = minBits + } + // Need a minimum to safely represent all symbol values + if tableLog < minTablelog { + tableLog = minTablelog + } + if tableLog > tableLogMax { + tableLog = tableLogMax + } + s.actualTableLog = tableLog +} + +type cTableEntry struct { + val uint16 + nBits uint8 + // We have 8 bits extra +} + +const huffNodesMask = huffNodesLen - 1 + +func (s *Scratch) buildCTable() error { + s.optimalTableLog() + s.huffSort() + if cap(s.cTable) < maxSymbolValue+1 { + s.cTable = make([]cTableEntry, s.symbolLen, maxSymbolValue+1) + } else { + s.cTable = s.cTable[:s.symbolLen] + for i := range s.cTable { + s.cTable[i] = cTableEntry{} + } + } + + var startNode = int16(s.symbolLen) + nonNullRank := s.symbolLen - 1 + + nodeNb := int16(startNode) + huffNode := s.nodes[1 : huffNodesLen+1] + + // This overlays the slice above, but allows "-1" index lookups. + // Different from reference implementation. + huffNode0 := s.nodes[0 : huffNodesLen+1] + + for huffNode[nonNullRank].count == 0 { + nonNullRank-- + } + + lowS := int16(nonNullRank) + nodeRoot := nodeNb + lowS - 1 + lowN := nodeNb + huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count + huffNode[lowS].parent, huffNode[lowS-1].parent = uint16(nodeNb), uint16(nodeNb) + nodeNb++ + lowS -= 2 + for n := nodeNb; n <= nodeRoot; n++ { + huffNode[n].count = 1 << 30 + } + // fake entry, strong barrier + huffNode0[0].count = 1 << 31 + + // create parents + for nodeNb <= nodeRoot { + var n1, n2 int16 + if huffNode0[lowS+1].count < huffNode0[lowN+1].count { + n1 = lowS + lowS-- + } else { + n1 = lowN + lowN++ + } + if huffNode0[lowS+1].count < huffNode0[lowN+1].count { + n2 = lowS + lowS-- + } else { + n2 = lowN + lowN++ + } + + huffNode[nodeNb].count = huffNode0[n1+1].count + huffNode0[n2+1].count + huffNode0[n1+1].parent, huffNode0[n2+1].parent = uint16(nodeNb), uint16(nodeNb) + nodeNb++ + } + + // distribute weights (unlimited tree height) + huffNode[nodeRoot].nbBits = 0 + for n := nodeRoot - 1; n >= startNode; n-- { + huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1 + } + for n := uint16(0); n <= nonNullRank; n++ { + huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1 + } + s.actualTableLog = s.setMaxHeight(int(nonNullRank)) + maxNbBits := s.actualTableLog + + // fill result into tree (val, nbBits) + if maxNbBits > tableLogMax { + return fmt.Errorf("internal error: maxNbBits (%d) > tableLogMax (%d)", maxNbBits, tableLogMax) + } + var nbPerRank [tableLogMax + 1]uint16 + var valPerRank [16]uint16 + for _, v := range huffNode[:nonNullRank+1] { + nbPerRank[v.nbBits]++ + } + // determine stating value per rank + { + min := uint16(0) + for n := maxNbBits; n > 0; n-- { + // get starting value within each rank + valPerRank[n] = min + min += nbPerRank[n] + min >>= 1 + } + } + + // push nbBits per symbol, symbol order + for _, v := range huffNode[:nonNullRank+1] { + s.cTable[v.symbol].nBits = v.nbBits + } + + // assign value within rank, symbol order + t := s.cTable[:s.symbolLen] + for n, val := range t { + nbits := val.nBits & 15 + v := valPerRank[nbits] + t[n].val = v + valPerRank[nbits] = v + 1 + } + + return nil +} + +// huffSort will sort symbols, decreasing order. +func (s *Scratch) huffSort() { + type rankPos struct { + base uint32 + current uint32 + } + + // Clear nodes + nodes := s.nodes[:huffNodesLen+1] + s.nodes = nodes + nodes = nodes[1 : huffNodesLen+1] + + // Sort into buckets based on length of symbol count. + var rank [32]rankPos + for _, v := range s.count[:s.symbolLen] { + r := highBit32(v+1) & 31 + rank[r].base++ + } + // maxBitLength is log2(BlockSizeMax) + 1 + const maxBitLength = 18 + 1 + for n := maxBitLength; n > 0; n-- { + rank[n-1].base += rank[n].base + } + for n := range rank[:maxBitLength] { + rank[n].current = rank[n].base + } + for n, c := range s.count[:s.symbolLen] { + r := (highBit32(c+1) + 1) & 31 + pos := rank[r].current + rank[r].current++ + prev := nodes[(pos-1)&huffNodesMask] + for pos > rank[r].base && c > prev.count { + nodes[pos&huffNodesMask] = prev + pos-- + prev = nodes[(pos-1)&huffNodesMask] + } + nodes[pos&huffNodesMask] = nodeElt{count: c, symbol: byte(n)} + } + return +} + +func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { + maxNbBits := s.actualTableLog + huffNode := s.nodes[1 : huffNodesLen+1] + //huffNode = huffNode[: huffNodesLen] + + largestBits := huffNode[lastNonNull].nbBits + + // early exit : no elt > maxNbBits + if largestBits <= maxNbBits { + return largestBits + } + totalCost := int(0) + baseCost := int(1) << (largestBits - maxNbBits) + n := uint32(lastNonNull) + + for huffNode[n].nbBits > maxNbBits { + totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits)) + huffNode[n].nbBits = maxNbBits + n-- + } + // n stops at huffNode[n].nbBits <= maxNbBits + + for huffNode[n].nbBits == maxNbBits { + n-- + } + // n end at index of smallest symbol using < maxNbBits + + // renorm totalCost + totalCost >>= largestBits - maxNbBits /* note : totalCost is necessarily a multiple of baseCost */ + + // repay normalized cost + { + const noSymbol = 0xF0F0F0F0 + var rankLast [tableLogMax + 2]uint32 + + for i := range rankLast[:] { + rankLast[i] = noSymbol + } + + // Get pos of last (smallest) symbol per rank + { + currentNbBits := uint8(maxNbBits) + for pos := int(n); pos >= 0; pos-- { + if huffNode[pos].nbBits >= currentNbBits { + continue + } + currentNbBits = huffNode[pos].nbBits // < maxNbBits + rankLast[maxNbBits-currentNbBits] = uint32(pos) + } + } + + for totalCost > 0 { + nBitsToDecrease := uint8(highBit32(uint32(totalCost))) + 1 + + for ; nBitsToDecrease > 1; nBitsToDecrease-- { + highPos := rankLast[nBitsToDecrease] + lowPos := rankLast[nBitsToDecrease-1] + if highPos == noSymbol { + continue + } + if lowPos == noSymbol { + break + } + highTotal := huffNode[highPos].count + lowTotal := 2 * huffNode[lowPos].count + if highTotal <= lowTotal { + break + } + } + // only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) + // HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary + // FIXME: try to remove + for (nBitsToDecrease <= tableLogMax) && (rankLast[nBitsToDecrease] == noSymbol) { + nBitsToDecrease++ + } + totalCost -= 1 << (nBitsToDecrease - 1) + if rankLast[nBitsToDecrease-1] == noSymbol { + // this rank is no longer empty + rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease] + } + huffNode[rankLast[nBitsToDecrease]].nbBits++ + if rankLast[nBitsToDecrease] == 0 { + /* special case, reached largest symbol */ + rankLast[nBitsToDecrease] = noSymbol + } else { + rankLast[nBitsToDecrease]-- + if huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease { + rankLast[nBitsToDecrease] = noSymbol /* this rank is now empty */ + } + } + } + + for totalCost < 0 { /* Sometimes, cost correction overshoot */ + if rankLast[1] == noSymbol { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */ + for huffNode[n].nbBits == maxNbBits { + n-- + } + huffNode[n+1].nbBits-- + rankLast[1] = n + 1 + totalCost++ + continue + } + huffNode[rankLast[1]+1].nbBits-- + rankLast[1]++ + totalCost++ + } + } + return maxNbBits +} + +type nodeElt struct { + count uint32 + parent uint16 + symbol byte + nbBits uint8 +} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go new file mode 100644 index 00000000000..41703bba4d6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -0,0 +1,1164 @@ +package huff0 + +import ( + "errors" + "fmt" + "io" + + "github.com/klauspost/compress/fse" +) + +type dTable struct { + single []dEntrySingle + double []dEntryDouble +} + +// single-symbols decoding +type dEntrySingle struct { + entry uint16 +} + +// double-symbols decoding +type dEntryDouble struct { + seq uint16 + nBits uint8 + len uint8 +} + +// Uses special code for all tables that are < 8 bits. +const use8BitTables = true + +// ReadTable will read a table from the input. +// The size of the input may be larger than the table definition. +// Any content remaining after the table definition will be returned. +// If no Scratch is provided a new one is allocated. +// The returned Scratch can be used for encoding or decoding input using this table. +func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) { + s, err = s.prepare(in) + if err != nil { + return s, nil, err + } + if len(in) <= 1 { + return s, nil, errors.New("input too small for table") + } + iSize := in[0] + in = in[1:] + if iSize >= 128 { + // Uncompressed + oSize := iSize - 127 + iSize = (oSize + 1) / 2 + if int(iSize) > len(in) { + return s, nil, errors.New("input too small for table") + } + for n := uint8(0); n < oSize; n += 2 { + v := in[n/2] + s.huffWeight[n] = v >> 4 + s.huffWeight[n+1] = v & 15 + } + s.symbolLen = uint16(oSize) + in = in[iSize:] + } else { + if len(in) < int(iSize) { + return s, nil, fmt.Errorf("input too small for table, want %d bytes, have %d", iSize, len(in)) + } + // FSE compressed weights + s.fse.DecompressLimit = 255 + hw := s.huffWeight[:] + s.fse.Out = hw + b, err := fse.Decompress(in[:iSize], s.fse) + s.fse.Out = nil + if err != nil { + return s, nil, err + } + if len(b) > 255 { + return s, nil, errors.New("corrupt input: output table too large") + } + s.symbolLen = uint16(len(b)) + in = in[iSize:] + } + + // collect weight stats + var rankStats [16]uint32 + weightTotal := uint32(0) + for _, v := range s.huffWeight[:s.symbolLen] { + if v > tableLogMax { + return s, nil, errors.New("corrupt input: weight too large") + } + v2 := v & 15 + rankStats[v2]++ + // (1 << (v2-1)) is slower since the compiler cannot prove that v2 isn't 0. + weightTotal += (1 << v2) >> 1 + } + if weightTotal == 0 { + return s, nil, errors.New("corrupt input: weights zero") + } + + // get last non-null symbol weight (implied, total must be 2^n) + { + tableLog := highBit32(weightTotal) + 1 + if tableLog > tableLogMax { + return s, nil, errors.New("corrupt input: tableLog too big") + } + s.actualTableLog = uint8(tableLog) + // determine last weight + { + total := uint32(1) << tableLog + rest := total - weightTotal + verif := uint32(1) << highBit32(rest) + lastWeight := highBit32(rest) + 1 + if verif != rest { + // last value must be a clean power of 2 + return s, nil, errors.New("corrupt input: last value not power of two") + } + s.huffWeight[s.symbolLen] = uint8(lastWeight) + s.symbolLen++ + rankStats[lastWeight]++ + } + } + + if (rankStats[1] < 2) || (rankStats[1]&1 != 0) { + // by construction : at least 2 elts of rank 1, must be even + return s, nil, errors.New("corrupt input: min elt size, even check failed ") + } + + // TODO: Choose between single/double symbol decoding + + // Calculate starting value for each rank + { + var nextRankStart uint32 + for n := uint8(1); n < s.actualTableLog+1; n++ { + current := nextRankStart + nextRankStart += rankStats[n] << (n - 1) + rankStats[n] = current + } + } + + // fill DTable (always full size) + tSize := 1 << tableLogMax + if len(s.dt.single) != tSize { + s.dt.single = make([]dEntrySingle, tSize) + } + cTable := s.prevTable + if cap(cTable) < maxSymbolValue+1 { + cTable = make([]cTableEntry, 0, maxSymbolValue+1) + } + cTable = cTable[:maxSymbolValue+1] + s.prevTable = cTable[:s.symbolLen] + s.prevTableLog = s.actualTableLog + + for n, w := range s.huffWeight[:s.symbolLen] { + if w == 0 { + cTable[n] = cTableEntry{ + val: 0, + nBits: 0, + } + continue + } + length := (uint32(1) << w) >> 1 + d := dEntrySingle{ + entry: uint16(s.actualTableLog+1-w) | (uint16(n) << 8), + } + + rank := &rankStats[w] + cTable[n] = cTableEntry{ + val: uint16(*rank >> (w - 1)), + nBits: uint8(d.entry), + } + + single := s.dt.single[*rank : *rank+length] + for i := range single { + single[i] = d + } + *rank += length + } + + return s, in, nil +} + +// Decompress1X will decompress a 1X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// Before this is called, the table must be initialized with ReadTable unless +// the encoder re-used the table. +// deprecated: Use the stateless Decoder() to get a concurrent version. +func (s *Scratch) Decompress1X(in []byte) (out []byte, err error) { + if cap(s.Out) < s.MaxDecodedSize { + s.Out = make([]byte, s.MaxDecodedSize) + } + s.Out = s.Out[:0:s.MaxDecodedSize] + s.Out, err = s.Decoder().Decompress1X(s.Out, in) + return s.Out, err +} + +// Decompress4X will decompress a 4X encoded stream. +// Before this is called, the table must be initialized with ReadTable unless +// the encoder re-used the table. +// The length of the supplied input must match the end of a block exactly. +// The destination size of the uncompressed data must be known and provided. +// deprecated: Use the stateless Decoder() to get a concurrent version. +func (s *Scratch) Decompress4X(in []byte, dstSize int) (out []byte, err error) { + if dstSize > s.MaxDecodedSize { + return nil, ErrMaxDecodedSizeExceeded + } + if cap(s.Out) < dstSize { + s.Out = make([]byte, s.MaxDecodedSize) + } + s.Out = s.Out[:0:dstSize] + s.Out, err = s.Decoder().Decompress4X(s.Out, in) + return s.Out, err +} + +// Decoder will return a stateless decoder that can be used by multiple +// decompressors concurrently. +// Before this is called, the table must be initialized with ReadTable. +// The Decoder is still linked to the scratch buffer so that cannot be reused. +// However, it is safe to discard the scratch. +func (s *Scratch) Decoder() *Decoder { + return &Decoder{ + dt: s.dt, + actualTableLog: s.actualTableLog, + } +} + +// Decoder provides stateless decoding. +type Decoder struct { + dt dTable + actualTableLog uint8 +} + +// Decompress1X will decompress a 1X encoded stream. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if use8BitTables && d.actualTableLog <= 8 { + return d.decompress1X8Bit(dst, src) + } + var br bitReaderShifted + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + dt := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + var buf [256]byte + var off uint8 + + for br.off >= 8 { + br.fillFast() + v := dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + // Refill + br.fillFast() + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + + if len(dst)+int(off) > maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 8, so uint8 is fine + bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead + for bitsLeft > 0 { + br.fill() + if false && br.bitsRead >= 32 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value = (br.value << 32) | uint64(low) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value = (br.value << 8) | uint64(br.in[br.off-1]) + br.bitsRead -= 8 + br.off-- + } + } + } + if len(dst) >= maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= nBits + dst = append(dst, uint8(v.entry>>8)) + } + return dst, br.close() +} + +// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { + if d.actualTableLog == 8 { + return d.decompress1X8BitExactly(dst, src) + } + var br bitReaderBytes + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + dt := d.dt.single[:256] + + // Use temp table to avoid bound checks/append penalty. + var buf [256]byte + var off uint8 + + shift := (8 - d.actualTableLog) & 7 + + //fmt.Printf("mask: %b, tl:%d\n", mask, d.actualTableLog) + for br.off >= 4 { + br.fillFast() + v := dt[br.peekByteFast()>>shift] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[br.peekByteFast()>>shift] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[br.peekByteFast()>>shift] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[br.peekByteFast()>>shift] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + + if len(dst)+int(off) > maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 4, so uint8 is fine + bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) + for bitsLeft > 0 { + if br.bitsRead >= 64-8 { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + if len(dst) >= maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := dt[br.peekByteFast()>>shift] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= int8(nBits) + dst = append(dst, uint8(v.entry>>8)) + } + return dst, br.close() +} + +// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { + var br bitReaderBytes + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + dt := d.dt.single[:256] + + // Use temp table to avoid bound checks/append penalty. + var buf [256]byte + var off uint8 + + const shift = 0 + + //fmt.Printf("mask: %b, tl:%d\n", mask, d.actualTableLog) + for br.off >= 4 { + br.fillFast() + v := dt[br.peekByteFast()>>shift] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[br.peekByteFast()>>shift] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[br.peekByteFast()>>shift] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[br.peekByteFast()>>shift] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + + if len(dst)+int(off) > maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 4, so uint8 is fine + bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) + for bitsLeft > 0 { + if br.bitsRead >= 64-8 { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + if len(dst) >= maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := dt[br.peekByteFast()>>shift] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= int8(nBits) + dst = append(dst, uint8(v.entry>>8)) + } + return dst, br.close() +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if len(src) < 6+(4*1) { + return nil, errors.New("input too small") + } + if use8BitTables && d.actualTableLog <= 8 { + return d.decompress4X8bit(dst, src) + } + + var br [4]bitReaderShifted + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + var buf [256]byte + var off uint8 + var decoded int + + // Decode 2 values from each decoder/loop. + const bufoff = 256 / 4 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + const stream = 0 + const stream2 = 1 + br[stream].fillFast() + br[stream2].fillFast() + + val := br[stream].peekBitsFast(d.actualTableLog) + v := single[val&tlMask] + br[stream].advance(uint8(v.entry)) + buf[off+bufoff*stream] = uint8(v.entry >> 8) + + val2 := br[stream2].peekBitsFast(d.actualTableLog) + v2 := single[val2&tlMask] + br[stream2].advance(uint8(v2.entry)) + buf[off+bufoff*stream2] = uint8(v2.entry >> 8) + + val = br[stream].peekBitsFast(d.actualTableLog) + v = single[val&tlMask] + br[stream].advance(uint8(v.entry)) + buf[off+bufoff*stream+1] = uint8(v.entry >> 8) + + val2 = br[stream2].peekBitsFast(d.actualTableLog) + v2 = single[val2&tlMask] + br[stream2].advance(uint8(v2.entry)) + buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8) + } + + { + const stream = 2 + const stream2 = 3 + br[stream].fillFast() + br[stream2].fillFast() + + val := br[stream].peekBitsFast(d.actualTableLog) + v := single[val&tlMask] + br[stream].advance(uint8(v.entry)) + buf[off+bufoff*stream] = uint8(v.entry >> 8) + + val2 := br[stream2].peekBitsFast(d.actualTableLog) + v2 := single[val2&tlMask] + br[stream2].advance(uint8(v2.entry)) + buf[off+bufoff*stream2] = uint8(v2.entry >> 8) + + val = br[stream].peekBitsFast(d.actualTableLog) + v = single[val&tlMask] + br[stream].advance(uint8(v.entry)) + buf[off+bufoff*stream+1] = uint8(v.entry >> 8) + + val2 = br[stream2].peekBitsFast(d.actualTableLog) + v2 = single[val2&tlMask] + br[stream2].advance(uint8(v2.entry)) + buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8) + } + + off += 2 + + if off == bufoff { + if bufoff > dstEvery { + return nil, errors.New("corruption detected: stream overrun 1") + } + copy(out, buf[:bufoff]) + copy(out[dstEvery:], buf[bufoff:bufoff*2]) + copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3]) + copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4]) + off = 0 + out = out[bufoff:] + decoded += 256 + // There must at least be 3 buffers left. + if len(out) < dstEvery*3 { + return nil, errors.New("corruption detected: stream overrun 2") + } + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[:off]) + copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2]) + copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3]) + copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + for i := range br { + offset := dstEvery * i + br := &br[i] + bitsLeft := br.off*8 + uint(64-br.bitsRead) + for bitsLeft > 0 { + br.fill() + if false && br.bitsRead >= 32 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value = (br.value << 32) | uint64(low) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value = (br.value << 8) | uint64(br.in[br.off-1]) + br.bitsRead -= 8 + br.off-- + } + } + } + // end inline... + if offset >= len(out) { + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + val := br.peekBitsFast(d.actualTableLog) + v := single[val&tlMask].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { + if d.actualTableLog == 8 { + return d.decompress4X8bitExactly(dst, src) + } + + var br [4]bitReaderBytes + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + shift := (8 - d.actualTableLog) & 7 + + const tlSize = 1 << 8 + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + var buf [256]byte + var off uint8 + var decoded int + + // Decode 4 values from each decoder/loop. + const bufoff = 256 / 4 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + // Interleave 2 decodes. + const stream = 0 + const stream2 = 1 + br[stream].fillFast() + br[stream2].fillFast() + + v := single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 := single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+1] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+1] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+2] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+2] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+3] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+3] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + } + + { + const stream = 2 + const stream2 = 3 + br[stream].fillFast() + br[stream2].fillFast() + + v := single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 := single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+1] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+1] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+2] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+2] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+3] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+3] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + } + + off += 4 + + if off == bufoff { + if bufoff > dstEvery { + return nil, errors.New("corruption detected: stream overrun 1") + } + copy(out, buf[:bufoff]) + copy(out[dstEvery:], buf[bufoff:bufoff*2]) + copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3]) + copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4]) + off = 0 + out = out[bufoff:] + decoded += 256 + // There must at least be 3 buffers left. + if len(out) < dstEvery*3 { + return nil, errors.New("corruption detected: stream overrun 2") + } + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[:off]) + copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2]) + copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3]) + copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + for i := range br { + offset := dstEvery * i + br := &br[i] + bitsLeft := int(br.off*8) + int(64-br.bitsRead) + for bitsLeft > 0 { + if br.finished() { + return nil, io.ErrUnexpectedEOF + } + if br.bitsRead >= 56 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value |= uint64(low) << (br.bitsRead - 32) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + } + // end inline... + if offset >= len(out) { + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + v := single[br.peekByteFast()>>shift].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= int(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { + var br [4]bitReaderBytes + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const shift = 0 + const tlSize = 1 << 8 + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + var buf [256]byte + var off uint8 + var decoded int + + // Decode 4 values from each decoder/loop. + const bufoff = 256 / 4 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + // Interleave 2 decodes. + const stream = 0 + const stream2 = 1 + br[stream].fillFast() + br[stream2].fillFast() + + v := single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 := single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+1] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+1] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+2] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+2] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+3] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+3] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + } + + { + const stream = 2 + const stream2 = 3 + br[stream].fillFast() + br[stream2].fillFast() + + v := single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 := single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+1] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+1] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+2] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+2] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+3] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+3] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + } + + off += 4 + + if off == bufoff { + if bufoff > dstEvery { + return nil, errors.New("corruption detected: stream overrun 1") + } + copy(out, buf[:bufoff]) + copy(out[dstEvery:], buf[bufoff:bufoff*2]) + copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3]) + copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4]) + off = 0 + out = out[bufoff:] + decoded += 256 + // There must at least be 3 buffers left. + if len(out) < dstEvery*3 { + return nil, errors.New("corruption detected: stream overrun 2") + } + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[:off]) + copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2]) + copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3]) + copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + for i := range br { + offset := dstEvery * i + br := &br[i] + bitsLeft := int(br.off*8) + int(64-br.bitsRead) + for bitsLeft > 0 { + if br.finished() { + return nil, io.ErrUnexpectedEOF + } + if br.bitsRead >= 56 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value |= uint64(low) << (br.bitsRead - 32) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + } + // end inline... + if offset >= len(out) { + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + v := single[br.peekByteFast()>>shift].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= int(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// matches will compare a decoding table to a coding table. +// Errors are written to the writer. +// Nothing will be written if table is ok. +func (s *Scratch) matches(ct cTable, w io.Writer) { + if s == nil || len(s.dt.single) == 0 { + return + } + dt := s.dt.single[:1<>8) == byte(sym) { + fmt.Fprintf(w, "symbol %x has decoder, but no encoder\n", sym) + errs++ + break + } + } + if errs == 0 { + broken-- + } + continue + } + // Unused bits in input + ub := tablelog - enc.nBits + top := enc.val << ub + // decoder looks at top bits. + dec := dt[top] + if uint8(dec.entry) != enc.nBits { + fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", sym, enc.nBits, uint8(dec.entry)) + errs++ + } + if uint8(dec.entry>>8) != uint8(sym) { + fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", sym, sym, uint8(dec.entry>>8)) + errs++ + } + if errs > 0 { + fmt.Fprintf(w, "%d errros in base, stopping\n", errs) + continue + } + // Ensure that all combinations are covered. + for i := uint16(0); i < (1 << ub); i++ { + vval := top | i + dec := dt[vval] + if uint8(dec.entry) != enc.nBits { + fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", vval, enc.nBits, uint8(dec.entry)) + errs++ + } + if uint8(dec.entry>>8) != uint8(sym) { + fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", vval, sym, uint8(dec.entry>>8)) + errs++ + } + if errs > 20 { + fmt.Fprintf(w, "%d errros, stopping\n", errs) + break + } + } + if errs == 0 { + ok++ + broken-- + } + } + if broken > 0 { + fmt.Fprintf(w, "%d broken, %d ok\n", broken, ok) + } +} diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go new file mode 100644 index 00000000000..7ec2022b650 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/huff0.go @@ -0,0 +1,273 @@ +// Package huff0 provides fast huffman encoding as used in zstd. +// +// See README.md at https://github.com/klauspost/compress/tree/master/huff0 for details. +package huff0 + +import ( + "errors" + "fmt" + "math" + "math/bits" + + "github.com/klauspost/compress/fse" +) + +const ( + maxSymbolValue = 255 + + // zstandard limits tablelog to 11, see: + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#huffman-tree-description + tableLogMax = 11 + tableLogDefault = 11 + minTablelog = 5 + huffNodesLen = 512 + + // BlockSizeMax is maximum input size for a single block uncompressed. + BlockSizeMax = 1<<18 - 1 +) + +var ( + // ErrIncompressible is returned when input is judged to be too hard to compress. + ErrIncompressible = errors.New("input is not compressible") + + // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. + ErrUseRLE = errors.New("input is single value repeated") + + // ErrTooBig is return if input is too large for a single block. + ErrTooBig = errors.New("input too big") + + // ErrMaxDecodedSizeExceeded is return if input is too large for a single block. + ErrMaxDecodedSizeExceeded = errors.New("maximum output size exceeded") +) + +type ReusePolicy uint8 + +const ( + // ReusePolicyAllow will allow reuse if it produces smaller output. + ReusePolicyAllow ReusePolicy = iota + + // ReusePolicyPrefer will re-use aggressively if possible. + // This will not check if a new table will produce smaller output, + // except if the current table is impossible to use or + // compressed output is bigger than input. + ReusePolicyPrefer + + // ReusePolicyNone will disable re-use of tables. + // This is slightly faster than ReusePolicyAllow but may produce larger output. + ReusePolicyNone + + // ReusePolicyMust must allow reuse and produce smaller output. + ReusePolicyMust +) + +type Scratch struct { + count [maxSymbolValue + 1]uint32 + + // Per block parameters. + // These can be used to override compression parameters of the block. + // Do not touch, unless you know what you are doing. + + // Out is output buffer. + // If the scratch is re-used before the caller is done processing the output, + // set this field to nil. + // Otherwise the output buffer will be re-used for next Compression/Decompression step + // and allocation will be avoided. + Out []byte + + // OutTable will contain the table data only, if a new table has been generated. + // Slice of the returned data. + OutTable []byte + + // OutData will contain the compressed data. + // Slice of the returned data. + OutData []byte + + // MaxDecodedSize will set the maximum allowed output size. + // This value will automatically be set to BlockSizeMax if not set. + // Decoders will return ErrMaxDecodedSizeExceeded is this limit is exceeded. + MaxDecodedSize int + + br byteReader + + // MaxSymbolValue will override the maximum symbol value of the next block. + MaxSymbolValue uint8 + + // TableLog will attempt to override the tablelog for the next block. + // Must be <= 11 and >= 5. + TableLog uint8 + + // Reuse will specify the reuse policy + Reuse ReusePolicy + + // WantLogLess allows to specify a log 2 reduction that should at least be achieved, + // otherwise the block will be returned as incompressible. + // The reduction should then at least be (input size >> WantLogLess) + // If WantLogLess == 0 any improvement will do. + WantLogLess uint8 + + symbolLen uint16 // Length of active part of the symbol table. + maxCount int // count of the most probable symbol + clearCount bool // clear count + actualTableLog uint8 // Selected tablelog. + prevTableLog uint8 // Tablelog for previous table + prevTable cTable // Table used for previous compression. + cTable cTable // compression table + dt dTable // decompression table + nodes []nodeElt + tmpOut [4][]byte + fse *fse.Scratch + huffWeight [maxSymbolValue + 1]byte +} + +// TransferCTable will transfer the previously used compression table. +func (s *Scratch) TransferCTable(src *Scratch) { + if cap(s.prevTable) < len(src.prevTable) { + s.prevTable = make(cTable, 0, maxSymbolValue+1) + } + s.prevTable = s.prevTable[:len(src.prevTable)] + copy(s.prevTable, src.prevTable) + s.prevTableLog = src.prevTableLog +} + +func (s *Scratch) prepare(in []byte) (*Scratch, error) { + if len(in) > BlockSizeMax { + return nil, ErrTooBig + } + if s == nil { + s = &Scratch{} + } + if s.MaxSymbolValue == 0 { + s.MaxSymbolValue = maxSymbolValue + } + if s.TableLog == 0 { + s.TableLog = tableLogDefault + } + if s.TableLog > tableLogMax || s.TableLog < minTablelog { + return nil, fmt.Errorf(" invalid tableLog %d (%d -> %d)", s.TableLog, minTablelog, tableLogMax) + } + if s.MaxDecodedSize <= 0 || s.MaxDecodedSize > BlockSizeMax { + s.MaxDecodedSize = BlockSizeMax + } + if s.clearCount && s.maxCount == 0 { + for i := range s.count { + s.count[i] = 0 + } + s.clearCount = false + } + if cap(s.Out) == 0 { + s.Out = make([]byte, 0, len(in)) + } + s.Out = s.Out[:0] + + s.OutTable = nil + s.OutData = nil + if cap(s.nodes) < huffNodesLen+1 { + s.nodes = make([]nodeElt, 0, huffNodesLen+1) + } + s.nodes = s.nodes[:0] + if s.fse == nil { + s.fse = &fse.Scratch{} + } + s.br.init(in) + + return s, nil +} + +type cTable []cTableEntry + +func (c cTable) write(s *Scratch) error { + var ( + // precomputed conversion table + bitsToWeight [tableLogMax + 1]byte + huffLog = s.actualTableLog + // last weight is not saved. + maxSymbolValue = uint8(s.symbolLen - 1) + huffWeight = s.huffWeight[:256] + ) + const ( + maxFSETableLog = 6 + ) + // convert to weight + bitsToWeight[0] = 0 + for n := uint8(1); n < huffLog+1; n++ { + bitsToWeight[n] = huffLog + 1 - n + } + + // Acquire histogram for FSE. + hist := s.fse.Histogram() + hist = hist[:256] + for i := range hist[:16] { + hist[i] = 0 + } + for n := uint8(0); n < maxSymbolValue; n++ { + v := bitsToWeight[c[n].nBits] & 15 + huffWeight[n] = v + hist[v]++ + } + + // FSE compress if feasible. + if maxSymbolValue >= 2 { + huffMaxCnt := uint32(0) + huffMax := uint8(0) + for i, v := range hist[:16] { + if v == 0 { + continue + } + huffMax = byte(i) + if v > huffMaxCnt { + huffMaxCnt = v + } + } + s.fse.HistogramFinished(huffMax, int(huffMaxCnt)) + s.fse.TableLog = maxFSETableLog + b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse) + if err == nil && len(b) < int(s.symbolLen>>1) { + s.Out = append(s.Out, uint8(len(b))) + s.Out = append(s.Out, b...) + return nil + } + // Unable to compress (RLE/uncompressible) + } + // write raw values as 4-bits (max : 15) + if maxSymbolValue > (256 - 128) { + // should not happen : likely means source cannot be compressed + return ErrIncompressible + } + op := s.Out + // special case, pack weights 4 bits/weight. + op = append(op, 128|(maxSymbolValue-1)) + // be sure it doesn't cause msan issue in final combination + huffWeight[maxSymbolValue] = 0 + for n := uint16(0); n < uint16(maxSymbolValue); n += 2 { + op = append(op, (huffWeight[n]<<4)|huffWeight[n+1]) + } + s.Out = op + return nil +} + +// estimateSize returns the estimated size in bytes of the input represented in the +// histogram supplied. +func (c cTable) estimateSize(hist []uint32) int { + nbBits := uint32(7) + for i, v := range c[:len(hist)] { + nbBits += uint32(v.nBits) * hist[i] + } + return int(nbBits >> 3) +} + +// minSize returns the minimum possible size considering the shannon limit. +func (s *Scratch) minSize(total int) int { + nbBits := float64(7) + fTotal := float64(total) + for _, v := range s.count[:s.symbolLen] { + n := float64(v) + if n > 0 { + nbBits += math.Log2(fTotal/n) * n + } + } + return int(nbBits) >> 3 +} + +func highBit32(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} diff --git a/vendor/github.com/klauspost/compress/snappy/.gitignore b/vendor/github.com/klauspost/compress/snappy/.gitignore new file mode 100644 index 00000000000..042091d9b3b --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/.gitignore @@ -0,0 +1,16 @@ +cmd/snappytool/snappytool +testdata/bench + +# These explicitly listed benchmark data files are for an obsolete version of +# snappy_test.go. +testdata/alice29.txt +testdata/asyoulik.txt +testdata/fireworks.jpeg +testdata/geo.protodata +testdata/html +testdata/html_x_4 +testdata/kppkn.gtb +testdata/lcet10.txt +testdata/paper-100k.pdf +testdata/plrabn12.txt +testdata/urls.10K diff --git a/vendor/github.com/klauspost/compress/snappy/AUTHORS b/vendor/github.com/klauspost/compress/snappy/AUTHORS new file mode 100644 index 00000000000..bcfa19520af --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/AUTHORS @@ -0,0 +1,15 @@ +# This is the official list of Snappy-Go authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# Please keep the list sorted. + +Damian Gryski +Google Inc. +Jan Mercl <0xjnml@gmail.com> +Rodolfo Carvalho +Sebastien Binet diff --git a/vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS b/vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS new file mode 100644 index 00000000000..931ae31606f --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS @@ -0,0 +1,37 @@ +# This is the official list of people who can contribute +# (and typically have contributed) code to the Snappy-Go repository. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# The submission process automatically checks to make sure +# that people submitting code are listed in this file (by email address). +# +# Names should be added to this file only after verifying that +# the individual or the individual's organization has agreed to +# the appropriate Contributor License Agreement, found here: +# +# http://code.google.com/legal/individual-cla-v1.0.html +# http://code.google.com/legal/corporate-cla-v1.0.html +# +# The agreement for individuals can be filled out on the web. +# +# When adding J Random Contributor's name to this file, +# either J's name or J's organization's name should be +# added to the AUTHORS file, depending on whether the +# individual or corporate CLA was used. + +# Names should be added to this file like so: +# Name + +# Please keep the list sorted. + +Damian Gryski +Jan Mercl <0xjnml@gmail.com> +Kai Backman +Marc-Antoine Ruel +Nigel Tao +Rob Pike +Rodolfo Carvalho +Russ Cox +Sebastien Binet diff --git a/vendor/github.com/klauspost/compress/snappy/LICENSE b/vendor/github.com/klauspost/compress/snappy/LICENSE new file mode 100644 index 00000000000..6050c10f4c8 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/klauspost/compress/snappy/README b/vendor/github.com/klauspost/compress/snappy/README new file mode 100644 index 00000000000..cea12879a0e --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/README @@ -0,0 +1,107 @@ +The Snappy compression format in the Go programming language. + +To download and install from source: +$ go get github.com/golang/snappy + +Unless otherwise noted, the Snappy-Go source files are distributed +under the BSD-style license found in the LICENSE file. + + + +Benchmarks. + +The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten +or so files, the same set used by the C++ Snappy code (github.com/google/snappy +and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @ +3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29: + +"go test -test.bench=." + +_UFlat0-8 2.19GB/s ± 0% html +_UFlat1-8 1.41GB/s ± 0% urls +_UFlat2-8 23.5GB/s ± 2% jpg +_UFlat3-8 1.91GB/s ± 0% jpg_200 +_UFlat4-8 14.0GB/s ± 1% pdf +_UFlat5-8 1.97GB/s ± 0% html4 +_UFlat6-8 814MB/s ± 0% txt1 +_UFlat7-8 785MB/s ± 0% txt2 +_UFlat8-8 857MB/s ± 0% txt3 +_UFlat9-8 719MB/s ± 1% txt4 +_UFlat10-8 2.84GB/s ± 0% pb +_UFlat11-8 1.05GB/s ± 0% gaviota + +_ZFlat0-8 1.04GB/s ± 0% html +_ZFlat1-8 534MB/s ± 0% urls +_ZFlat2-8 15.7GB/s ± 1% jpg +_ZFlat3-8 740MB/s ± 3% jpg_200 +_ZFlat4-8 9.20GB/s ± 1% pdf +_ZFlat5-8 991MB/s ± 0% html4 +_ZFlat6-8 379MB/s ± 0% txt1 +_ZFlat7-8 352MB/s ± 0% txt2 +_ZFlat8-8 396MB/s ± 1% txt3 +_ZFlat9-8 327MB/s ± 1% txt4 +_ZFlat10-8 1.33GB/s ± 1% pb +_ZFlat11-8 605MB/s ± 1% gaviota + + + +"go test -test.bench=. -tags=noasm" + +_UFlat0-8 621MB/s ± 2% html +_UFlat1-8 494MB/s ± 1% urls +_UFlat2-8 23.2GB/s ± 1% jpg +_UFlat3-8 1.12GB/s ± 1% jpg_200 +_UFlat4-8 4.35GB/s ± 1% pdf +_UFlat5-8 609MB/s ± 0% html4 +_UFlat6-8 296MB/s ± 0% txt1 +_UFlat7-8 288MB/s ± 0% txt2 +_UFlat8-8 309MB/s ± 1% txt3 +_UFlat9-8 280MB/s ± 1% txt4 +_UFlat10-8 753MB/s ± 0% pb +_UFlat11-8 400MB/s ± 0% gaviota + +_ZFlat0-8 409MB/s ± 1% html +_ZFlat1-8 250MB/s ± 1% urls +_ZFlat2-8 12.3GB/s ± 1% jpg +_ZFlat3-8 132MB/s ± 0% jpg_200 +_ZFlat4-8 2.92GB/s ± 0% pdf +_ZFlat5-8 405MB/s ± 1% html4 +_ZFlat6-8 179MB/s ± 1% txt1 +_ZFlat7-8 170MB/s ± 1% txt2 +_ZFlat8-8 189MB/s ± 1% txt3 +_ZFlat9-8 164MB/s ± 1% txt4 +_ZFlat10-8 479MB/s ± 1% pb +_ZFlat11-8 270MB/s ± 1% gaviota + + + +For comparison (Go's encoded output is byte-for-byte identical to C++'s), here +are the numbers from C++ Snappy's + +make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log + +BM_UFlat/0 2.4GB/s html +BM_UFlat/1 1.4GB/s urls +BM_UFlat/2 21.8GB/s jpg +BM_UFlat/3 1.5GB/s jpg_200 +BM_UFlat/4 13.3GB/s pdf +BM_UFlat/5 2.1GB/s html4 +BM_UFlat/6 1.0GB/s txt1 +BM_UFlat/7 959.4MB/s txt2 +BM_UFlat/8 1.0GB/s txt3 +BM_UFlat/9 864.5MB/s txt4 +BM_UFlat/10 2.9GB/s pb +BM_UFlat/11 1.2GB/s gaviota + +BM_ZFlat/0 944.3MB/s html (22.31 %) +BM_ZFlat/1 501.6MB/s urls (47.78 %) +BM_ZFlat/2 14.3GB/s jpg (99.95 %) +BM_ZFlat/3 538.3MB/s jpg_200 (73.00 %) +BM_ZFlat/4 8.3GB/s pdf (83.30 %) +BM_ZFlat/5 903.5MB/s html4 (22.52 %) +BM_ZFlat/6 336.0MB/s txt1 (57.88 %) +BM_ZFlat/7 312.3MB/s txt2 (61.91 %) +BM_ZFlat/8 353.1MB/s txt3 (54.99 %) +BM_ZFlat/9 289.9MB/s txt4 (66.26 %) +BM_ZFlat/10 1.2GB/s pb (19.68 %) +BM_ZFlat/11 527.4MB/s gaviota (37.72 %) diff --git a/vendor/github.com/klauspost/compress/snappy/decode.go b/vendor/github.com/klauspost/compress/snappy/decode.go new file mode 100644 index 00000000000..72efb0353dd --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/decode.go @@ -0,0 +1,237 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "encoding/binary" + "errors" + "io" +) + +var ( + // ErrCorrupt reports that the input is invalid. + ErrCorrupt = errors.New("snappy: corrupt input") + // ErrTooLarge reports that the uncompressed length is too large. + ErrTooLarge = errors.New("snappy: decoded block is too large") + // ErrUnsupported reports that the input isn't supported. + ErrUnsupported = errors.New("snappy: unsupported input") + + errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") +) + +// DecodedLen returns the length of the decoded block. +func DecodedLen(src []byte) (int, error) { + v, _, err := decodedLen(src) + return v, err +} + +// decodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func decodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrTooLarge + } + return int(v), n, nil +} + +const ( + decodeErrCodeCorrupt = 1 + decodeErrCodeUnsupportedLiteralLength = 2 +) + +// Decode returns the decoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire decoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +func Decode(dst, src []byte) ([]byte, error) { + dLen, s, err := decodedLen(src) + if err != nil { + return nil, err + } + if dLen <= len(dst) { + dst = dst[:dLen] + } else { + dst = make([]byte, dLen) + } + switch decode(dst, src[s:]) { + case 0: + return dst, nil + case decodeErrCodeUnsupportedLiteralLength: + return nil, errUnsupportedLiteralLength + } + return nil, ErrCorrupt +} + +// NewReader returns a new Reader that decompresses from r, using the framing +// format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +func NewReader(r io.Reader) *Reader { + return &Reader{ + r: r, + decoded: make([]byte, maxBlockSize), + buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), + } +} + +// Reader is an io.Reader that can read Snappy-compressed bytes. +type Reader struct { + r io.Reader + err error + decoded []byte + buf []byte + // decoded[i:j] contains decoded bytes that have not yet been passed on. + i, j int + readHeader bool +} + +// Reset discards any buffered data, resets all state, and switches the Snappy +// reader to read from r. This permits reusing a Reader rather than allocating +// a new one. +func (r *Reader) Reset(reader io.Reader) { + r.r = reader + r.err = nil + r.i = 0 + r.j = 0 + r.readHeader = false +} + +func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrCorrupt + } + return false + } + return true +} + +// Read satisfies the io.Reader interface. +func (r *Reader) Read(p []byte) (int, error) { + if r.err != nil { + return 0, r.err + } + for { + if r.i < r.j { + n := copy(p, r.decoded[r.i:r.j]) + r.i += n + return n, nil + } + if !r.readFull(r.buf[:4], true) { + return 0, r.err + } + chunkType := r.buf[0] + if !r.readHeader { + if chunkType != chunkTypeStreamIdentifier { + r.err = ErrCorrupt + return 0, r.err + } + r.readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + if chunkLen > len(r.buf) { + r.err = ErrUnsupported + return 0, r.err + } + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[checksumSize:] + + n, err := DecodedLen(buf) + if err != nil { + r.err = err + return 0, r.err + } + if n > len(r.decoded) { + r.err = ErrCorrupt + return 0, r.err + } + if _, err := Decode(r.decoded, buf); err != nil { + r.err = err + return 0, r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return 0, r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeUncompressedData: + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + buf := r.buf[:checksumSize] + if !r.readFull(buf, false) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - checksumSize + if n > len(r.decoded) { + r.err = ErrCorrupt + return 0, r.err + } + if !r.readFull(r.decoded[:n], false) { + return 0, r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return 0, r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeStreamIdentifier: + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(magicBody) { + r.err = ErrCorrupt + return 0, r.err + } + if !r.readFull(r.buf[:len(magicBody)], false) { + return 0, r.err + } + for i := 0; i < len(magicBody); i++ { + if r.buf[i] != magicBody[i] { + r.err = ErrCorrupt + return 0, r.err + } + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + r.err = ErrUnsupported + return 0, r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.readFull(r.buf[:chunkLen], false) { + return 0, r.err + } + } +} diff --git a/vendor/github.com/klauspost/compress/snappy/decode_amd64.go b/vendor/github.com/klauspost/compress/snappy/decode_amd64.go new file mode 100644 index 00000000000..fcd192b849e --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/decode_amd64.go @@ -0,0 +1,14 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +package snappy + +// decode has the same semantics as in decode_other.go. +// +//go:noescape +func decode(dst, src []byte) int diff --git a/vendor/github.com/klauspost/compress/snappy/decode_amd64.s b/vendor/github.com/klauspost/compress/snappy/decode_amd64.s new file mode 100644 index 00000000000..1c66e37234d --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/decode_amd64.s @@ -0,0 +1,482 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The asm code generally follows the pure Go code in decode_other.go, except +// where marked with a "!!!". + +// func decode(dst, src []byte) int +// +// All local variables fit into registers. The non-zero stack size is only to +// spill registers and push args when issuing a CALL. The register allocation: +// - AX scratch +// - BX scratch +// - CX length or x +// - DX offset +// - SI &src[s] +// - DI &dst[d] +// + R8 dst_base +// + R9 dst_len +// + R10 dst_base + dst_len +// + R11 src_base +// + R12 src_len +// + R13 src_base + src_len +// - R14 used by doCopy +// - R15 used by doCopy +// +// The registers R8-R13 (marked with a "+") are set at the start of the +// function, and after a CALL returns, and are not otherwise modified. +// +// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI. +// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI. +TEXT ·decode(SB), NOSPLIT, $48-56 + // Initialize SI, DI and R8-R13. + MOVQ dst_base+0(FP), R8 + MOVQ dst_len+8(FP), R9 + MOVQ R8, DI + MOVQ R8, R10 + ADDQ R9, R10 + MOVQ src_base+24(FP), R11 + MOVQ src_len+32(FP), R12 + MOVQ R11, SI + MOVQ R11, R13 + ADDQ R12, R13 + +loop: + // for s < len(src) + CMPQ SI, R13 + JEQ end + + // CX = uint32(src[s]) + // + // switch src[s] & 0x03 + MOVBLZX (SI), CX + MOVL CX, BX + ANDL $3, BX + CMPL BX, $1 + JAE tagCopy + + // ---------------------------------------- + // The code below handles literal tags. + + // case tagLiteral: + // x := uint32(src[s] >> 2) + // switch + SHRL $2, CX + CMPL CX, $60 + JAE tagLit60Plus + + // case x < 60: + // s++ + INCQ SI + +doLit: + // This is the end of the inner "switch", when we have a literal tag. + // + // We assume that CX == x and x fits in a uint32, where x is the variable + // used in the pure Go decode_other.go code. + + // length = int(x) + 1 + // + // Unlike the pure Go code, we don't need to check if length <= 0 because + // CX can hold 64 bits, so the increment cannot overflow. + INCQ CX + + // Prepare to check if copying length bytes will run past the end of dst or + // src. + // + // AX = len(dst) - d + // BX = len(src) - s + MOVQ R10, AX + SUBQ DI, AX + MOVQ R13, BX + SUBQ SI, BX + + // !!! Try a faster technique for short (16 or fewer bytes) copies. + // + // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { + // goto callMemmove // Fall back on calling runtime·memmove. + // } + // + // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s + // against 21 instead of 16, because it cannot assume that all of its input + // is contiguous in memory and so it needs to leave enough source bytes to + // read the next tag without refilling buffers, but Go's Decode assumes + // contiguousness (the src argument is a []byte). + CMPQ CX, $16 + JGT callMemmove + CMPQ AX, $16 + JLT callMemmove + CMPQ BX, $16 + JLT callMemmove + + // !!! Implement the copy from src to dst as a 16-byte load and store. + // (Decode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only length bytes, but that's + // OK. If the input is a valid Snappy encoding then subsequent iterations + // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a + // non-nil error), so the overrun will be ignored. + // + // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + MOVOU 0(SI), X0 + MOVOU X0, 0(DI) + + // d += length + // s += length + ADDQ CX, DI + ADDQ CX, SI + JMP loop + +callMemmove: + // if length > len(dst)-d || length > len(src)-s { etc } + CMPQ CX, AX + JGT errCorrupt + CMPQ CX, BX + JGT errCorrupt + + // copy(dst[d:], src[s:s+length]) + // + // This means calling runtime·memmove(&dst[d], &src[s], length), so we push + // DI, SI and CX as arguments. Coincidentally, we also need to spill those + // three registers to the stack, to save local variables across the CALL. + MOVQ DI, 0(SP) + MOVQ SI, 8(SP) + MOVQ CX, 16(SP) + MOVQ DI, 24(SP) + MOVQ SI, 32(SP) + MOVQ CX, 40(SP) + CALL runtime·memmove(SB) + + // Restore local variables: unspill registers from the stack and + // re-calculate R8-R13. + MOVQ 24(SP), DI + MOVQ 32(SP), SI + MOVQ 40(SP), CX + MOVQ dst_base+0(FP), R8 + MOVQ dst_len+8(FP), R9 + MOVQ R8, R10 + ADDQ R9, R10 + MOVQ src_base+24(FP), R11 + MOVQ src_len+32(FP), R12 + MOVQ R11, R13 + ADDQ R12, R13 + + // d += length + // s += length + ADDQ CX, DI + ADDQ CX, SI + JMP loop + +tagLit60Plus: + // !!! This fragment does the + // + // s += x - 58; if uint(s) > uint(len(src)) { etc } + // + // checks. In the asm version, we code it once instead of once per switch case. + ADDQ CX, SI + SUBQ $58, SI + CMPQ SI, R13 + JA errCorrupt + + // case x == 60: + CMPL CX, $61 + JEQ tagLit61 + JA tagLit62Plus + + // x = uint32(src[s-1]) + MOVBLZX -1(SI), CX + JMP doLit + +tagLit61: + // case x == 61: + // x = uint32(src[s-2]) | uint32(src[s-1])<<8 + MOVWLZX -2(SI), CX + JMP doLit + +tagLit62Plus: + CMPL CX, $62 + JA tagLit63 + + // case x == 62: + // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + MOVWLZX -3(SI), CX + MOVBLZX -1(SI), BX + SHLL $16, BX + ORL BX, CX + JMP doLit + +tagLit63: + // case x == 63: + // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + MOVL -4(SI), CX + JMP doLit + +// The code above handles literal tags. +// ---------------------------------------- +// The code below handles copy tags. + +tagCopy4: + // case tagCopy4: + // s += 5 + ADDQ $5, SI + + // if uint(s) > uint(len(src)) { etc } + CMPQ SI, R13 + JA errCorrupt + + // length = 1 + int(src[s-5])>>2 + SHRQ $2, CX + INCQ CX + + // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + MOVLQZX -4(SI), DX + JMP doCopy + +tagCopy2: + // case tagCopy2: + // s += 3 + ADDQ $3, SI + + // if uint(s) > uint(len(src)) { etc } + CMPQ SI, R13 + JA errCorrupt + + // length = 1 + int(src[s-3])>>2 + SHRQ $2, CX + INCQ CX + + // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + MOVWQZX -2(SI), DX + JMP doCopy + +tagCopy: + // We have a copy tag. We assume that: + // - BX == src[s] & 0x03 + // - CX == src[s] + CMPQ BX, $2 + JEQ tagCopy2 + JA tagCopy4 + + // case tagCopy1: + // s += 2 + ADDQ $2, SI + + // if uint(s) > uint(len(src)) { etc } + CMPQ SI, R13 + JA errCorrupt + + // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + MOVQ CX, DX + ANDQ $0xe0, DX + SHLQ $3, DX + MOVBQZX -1(SI), BX + ORQ BX, DX + + // length = 4 + int(src[s-2])>>2&0x7 + SHRQ $2, CX + ANDQ $7, CX + ADDQ $4, CX + +doCopy: + // This is the end of the outer "switch", when we have a copy tag. + // + // We assume that: + // - CX == length && CX > 0 + // - DX == offset + + // if offset <= 0 { etc } + CMPQ DX, $0 + JLE errCorrupt + + // if d < offset { etc } + MOVQ DI, BX + SUBQ R8, BX + CMPQ BX, DX + JLT errCorrupt + + // if length > len(dst)-d { etc } + MOVQ R10, BX + SUBQ DI, BX + CMPQ CX, BX + JGT errCorrupt + + // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length + // + // Set: + // - R14 = len(dst)-d + // - R15 = &dst[d-offset] + MOVQ R10, R14 + SUBQ DI, R14 + MOVQ DI, R15 + SUBQ DX, R15 + + // !!! Try a faster technique for short (16 or fewer bytes) forward copies. + // + // First, try using two 8-byte load/stores, similar to the doLit technique + // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is + // still OK if offset >= 8. Note that this has to be two 8-byte load/stores + // and not one 16-byte load/store, and the first store has to be before the + // second load, due to the overlap if offset is in the range [8, 16). + // + // if length > 16 || offset < 8 || len(dst)-d < 16 { + // goto slowForwardCopy + // } + // copy 16 bytes + // d += length + CMPQ CX, $16 + JGT slowForwardCopy + CMPQ DX, $8 + JLT slowForwardCopy + CMPQ R14, $16 + JLT slowForwardCopy + MOVQ 0(R15), AX + MOVQ AX, 0(DI) + MOVQ 8(R15), BX + MOVQ BX, 8(DI) + ADDQ CX, DI + JMP loop + +slowForwardCopy: + // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we + // can still try 8-byte load stores, provided we can overrun up to 10 extra + // bytes. As above, the overrun will be fixed up by subsequent iterations + // of the outermost loop. + // + // The C++ snappy code calls this technique IncrementalCopyFastPath. Its + // commentary says: + // + // ---- + // + // The main part of this loop is a simple copy of eight bytes at a time + // until we've copied (at least) the requested amount of bytes. However, + // if d and d-offset are less than eight bytes apart (indicating a + // repeating pattern of length < 8), we first need to expand the pattern in + // order to get the correct results. For instance, if the buffer looks like + // this, with the eight-byte and patterns marked as + // intervals: + // + // abxxxxxxxxxxxx + // [------] d-offset + // [------] d + // + // a single eight-byte copy from to will repeat the pattern + // once, after which we can move two bytes without moving : + // + // ababxxxxxxxxxx + // [------] d-offset + // [------] d + // + // and repeat the exercise until the two no longer overlap. + // + // This allows us to do very well in the special case of one single byte + // repeated many times, without taking a big hit for more general cases. + // + // The worst case of extra writing past the end of the match occurs when + // offset == 1 and length == 1; the last copy will read from byte positions + // [0..7] and write to [4..11], whereas it was only supposed to write to + // position 1. Thus, ten excess bytes. + // + // ---- + // + // That "10 byte overrun" worst case is confirmed by Go's + // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy + // and finishSlowForwardCopy algorithm. + // + // if length > len(dst)-d-10 { + // goto verySlowForwardCopy + // } + SUBQ $10, R14 + CMPQ CX, R14 + JGT verySlowForwardCopy + +makeOffsetAtLeast8: + // !!! As above, expand the pattern so that offset >= 8 and we can use + // 8-byte load/stores. + // + // for offset < 8 { + // copy 8 bytes from dst[d-offset:] to dst[d:] + // length -= offset + // d += offset + // offset += offset + // // The two previous lines together means that d-offset, and therefore + // // R15, is unchanged. + // } + CMPQ DX, $8 + JGE fixUpSlowForwardCopy + MOVQ (R15), BX + MOVQ BX, (DI) + SUBQ DX, CX + ADDQ DX, DI + ADDQ DX, DX + JMP makeOffsetAtLeast8 + +fixUpSlowForwardCopy: + // !!! Add length (which might be negative now) to d (implied by DI being + // &dst[d]) so that d ends up at the right place when we jump back to the + // top of the loop. Before we do that, though, we save DI to AX so that, if + // length is positive, copying the remaining length bytes will write to the + // right place. + MOVQ DI, AX + ADDQ CX, DI + +finishSlowForwardCopy: + // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative + // length means that we overrun, but as above, that will be fixed up by + // subsequent iterations of the outermost loop. + CMPQ CX, $0 + JLE loop + MOVQ (R15), BX + MOVQ BX, (AX) + ADDQ $8, R15 + ADDQ $8, AX + SUBQ $8, CX + JMP finishSlowForwardCopy + +verySlowForwardCopy: + // verySlowForwardCopy is a simple implementation of forward copy. In C + // parlance, this is a do/while loop instead of a while loop, since we know + // that length > 0. In Go syntax: + // + // for { + // dst[d] = dst[d - offset] + // d++ + // length-- + // if length == 0 { + // break + // } + // } + MOVB (R15), BX + MOVB BX, (DI) + INCQ R15 + INCQ DI + DECQ CX + JNZ verySlowForwardCopy + JMP loop + +// The code above handles copy tags. +// ---------------------------------------- + +end: + // This is the end of the "for s < len(src)". + // + // if d != len(dst) { etc } + CMPQ DI, R10 + JNE errCorrupt + + // return 0 + MOVQ $0, ret+48(FP) + RET + +errCorrupt: + // return decodeErrCodeCorrupt + MOVQ $1, ret+48(FP) + RET diff --git a/vendor/github.com/klauspost/compress/snappy/decode_other.go b/vendor/github.com/klauspost/compress/snappy/decode_other.go new file mode 100644 index 00000000000..94a96c5d7b8 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/decode_other.go @@ -0,0 +1,115 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine !gc noasm + +package snappy + +// decode writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read, and that len(dst) +// equals that length. +// +// It returns 0 on success or a decodeErrCodeXxx error code on failure. +func decode(dst, src []byte) int { + var d, s, offset, length int + for s < len(src) { + switch src[s] & 0x03 { + case tagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + length = int(x) + 1 + if length <= 0 { + return decodeErrCodeUnsupportedLiteralLength + } + if length > len(dst)-d || length > len(src)-s { + return decodeErrCodeCorrupt + } + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + + case tagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + + case tagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + } + + if offset <= 0 || d < offset || length > len(dst)-d { + return decodeErrCodeCorrupt + } + // Copy from an earlier sub-slice of dst to a later sub-slice. + // If no overlap, use the built-in copy: + if offset > length { + copy(dst[d:d+length], dst[d-offset:]) + d += length + continue + } + + // Unlike the built-in copy function, this byte-by-byte copy always runs + // forwards, even if the slices overlap. Conceptually, this is: + // + // d += forwardCopy(dst[d:d+length], dst[d-offset:]) + // + // We align the slices into a and b and show the compiler they are the same size. + // This allows the loop to run without bounds checks. + a := dst[d : d+length] + b := dst[d-offset:] + b = b[:len(a)] + for i := range a { + a[i] = b[i] + } + d += length + } + if d != len(dst) { + return decodeErrCodeCorrupt + } + return 0 +} diff --git a/vendor/github.com/klauspost/compress/snappy/encode.go b/vendor/github.com/klauspost/compress/snappy/encode.go new file mode 100644 index 00000000000..8d393e904bb --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/encode.go @@ -0,0 +1,285 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "encoding/binary" + "errors" + "io" +) + +// Encode returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +func Encode(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + for len(src) > 0 { + p := src + src = nil + if len(p) > maxBlockSize { + p, src = p[:maxBlockSize], p[maxBlockSize:] + } + if len(p) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], p) + } else { + d += encodeBlock(dst[d:], p) + } + } + return dst[:d] +} + +// inputMargin is the minimum number of extra input bytes to keep, inside +// encodeBlock's inner loop. On some architectures, this margin lets us +// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) +// literals can be implemented as a single load to and store from a 16-byte +// register. That literal's actual length can be as short as 1 byte, so this +// can copy up to 15 bytes too much, but that's OK as subsequent iterations of +// the encoding loop will fix up the copy overrun, and this inputMargin ensures +// that we don't overrun the dst and src buffers. +const inputMargin = 16 - 1 + +// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that +// could be encoded with a copy tag. This is the minimum with respect to the +// algorithm used by encodeBlock, not a minimum enforced by the file format. +// +// The encoded output must start with at least a 1 byte literal, as there are +// no previous bytes to copy. A minimal (1 byte) copy after that, generated +// from an emitCopy call in encodeBlock's main loop, would require at least +// another inputMargin bytes, for the reason above: we want any emitLiteral +// calls inside encodeBlock's main loop to use the fast path if possible, which +// requires being able to overrun by inputMargin bytes. Thus, +// minNonLiteralBlockSize equals 1 + 1 + inputMargin. +// +// The C++ code doesn't use this exact threshold, but it could, as discussed at +// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion +// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an +// optimization. It should not affect the encoded form. This is tested by +// TestSameEncodingAsCppShortCopies. +const minNonLiteralBlockSize = 1 + 1 + inputMargin + +// MaxEncodedLen returns the maximum length of a snappy block, given its +// uncompressed length. +// +// It will return a negative value if srcLen is too large to encode. +func MaxEncodedLen(srcLen int) int { + n := uint64(srcLen) + if n > 0xffffffff { + return -1 + } + // Compressed data can be defined as: + // compressed := item* literal* + // item := literal* copy + // + // The trailing literal sequence has a space blowup of at most 62/60 + // since a literal of length 60 needs one tag byte + one extra byte + // for length information. + // + // Item blowup is trickier to measure. Suppose the "copy" op copies + // 4 bytes of data. Because of a special check in the encoding code, + // we produce a 4-byte copy only if the offset is < 65536. Therefore + // the copy op takes 3 bytes to encode, and this type of item leads + // to at most the 62/60 blowup for representing literals. + // + // Suppose the "copy" op copies 5 bytes of data. If the offset is big + // enough, it will take 5 bytes to encode the copy op. Therefore the + // worst case here is a one-byte literal followed by a five-byte copy. + // That is, 6 bytes of input turn into 7 bytes of "compressed" data. + // + // This last factor dominates the blowup, so the final estimate is: + n = 32 + n + n/6 + if n > 0xffffffff { + return -1 + } + return int(n) +} + +var errClosed = errors.New("snappy: Writer is closed") + +// NewWriter returns a new Writer that compresses to w. +// +// The Writer returned does not buffer writes. There is no need to Flush or +// Close such a Writer. +// +// Deprecated: the Writer returned is not suitable for many small writes, only +// for few large writes. Use NewBufferedWriter instead, which is efficient +// regardless of the frequency and shape of the writes, and remember to Close +// that Writer when done. +func NewWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + obuf: make([]byte, obufLen), + } +} + +// NewBufferedWriter returns a new Writer that compresses to w, using the +// framing format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +// +// The Writer returned buffers writes. Users must call Close to guarantee all +// data has been forwarded to the underlying io.Writer. They may also call +// Flush zero or more times before calling Close. +func NewBufferedWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + ibuf: make([]byte, 0, maxBlockSize), + obuf: make([]byte, obufLen), + } +} + +// Writer is an io.Writer that can write Snappy-compressed bytes. +type Writer struct { + w io.Writer + err error + + // ibuf is a buffer for the incoming (uncompressed) bytes. + // + // Its use is optional. For backwards compatibility, Writers created by the + // NewWriter function have ibuf == nil, do not buffer incoming bytes, and + // therefore do not need to be Flush'ed or Close'd. + ibuf []byte + + // obuf is a buffer for the outgoing (compressed) bytes. + obuf []byte + + // wroteStreamHeader is whether we have written the stream header. + wroteStreamHeader bool +} + +// Reset discards the writer's state and switches the Snappy writer to write to +// w. This permits reusing a Writer rather than allocating a new one. +func (w *Writer) Reset(writer io.Writer) { + w.w = writer + w.err = nil + if w.ibuf != nil { + w.ibuf = w.ibuf[:0] + } + w.wroteStreamHeader = false +} + +// Write satisfies the io.Writer interface. +func (w *Writer) Write(p []byte) (nRet int, errRet error) { + if w.ibuf == nil { + // Do not buffer incoming bytes. This does not perform or compress well + // if the caller of Writer.Write writes many small slices. This + // behavior is therefore deprecated, but still supported for backwards + // compatibility with code that doesn't explicitly Flush or Close. + return w.write(p) + } + + // The remainder of this method is based on bufio.Writer.Write from the + // standard library. + + for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { + var n int + if len(w.ibuf) == 0 { + // Large write, empty buffer. + // Write directly from p to avoid copy. + n, _ = w.write(p) + } else { + n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + w.Flush() + } + nRet += n + p = p[n:] + } + if w.err != nil { + return nRet, w.err + } + n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + nRet += n + return nRet, nil +} + +func (w *Writer) write(p []byte) (nRet int, errRet error) { + if w.err != nil { + return 0, w.err + } + for len(p) > 0 { + obufStart := len(magicChunk) + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + copy(w.obuf, magicChunk) + obufStart = 0 + } + + var uncompressed []byte + if len(p) > maxBlockSize { + uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] + } else { + uncompressed, p = p, nil + } + checksum := crc(uncompressed) + + // Compress the buffer, discarding the result if the improvement + // isn't at least 12.5%. + compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) + chunkType := uint8(chunkTypeCompressedData) + chunkLen := 4 + len(compressed) + obufEnd := obufHeaderLen + len(compressed) + if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { + chunkType = chunkTypeUncompressedData + chunkLen = 4 + len(uncompressed) + obufEnd = obufHeaderLen + } + + // Fill in the per-chunk header that comes before the body. + w.obuf[len(magicChunk)+0] = chunkType + w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) + w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) + w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) + w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) + w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) + w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) + w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) + + if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { + w.err = err + return nRet, err + } + if chunkType == chunkTypeUncompressedData { + if _, err := w.w.Write(uncompressed); err != nil { + w.err = err + return nRet, err + } + } + nRet += len(uncompressed) + } + return nRet, nil +} + +// Flush flushes the Writer to its underlying io.Writer. +func (w *Writer) Flush() error { + if w.err != nil { + return w.err + } + if len(w.ibuf) == 0 { + return nil + } + w.write(w.ibuf) + w.ibuf = w.ibuf[:0] + return w.err +} + +// Close calls Flush and then closes the Writer. +func (w *Writer) Close() error { + w.Flush() + ret := w.err + if w.err == nil { + w.err = errClosed + } + return ret +} diff --git a/vendor/github.com/klauspost/compress/snappy/encode_amd64.go b/vendor/github.com/klauspost/compress/snappy/encode_amd64.go new file mode 100644 index 00000000000..150d91bc8be --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/encode_amd64.go @@ -0,0 +1,29 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +package snappy + +// emitLiteral has the same semantics as in encode_other.go. +// +//go:noescape +func emitLiteral(dst, lit []byte) int + +// emitCopy has the same semantics as in encode_other.go. +// +//go:noescape +func emitCopy(dst []byte, offset, length int) int + +// extendMatch has the same semantics as in encode_other.go. +// +//go:noescape +func extendMatch(src []byte, i, j int) int + +// encodeBlock has the same semantics as in encode_other.go. +// +//go:noescape +func encodeBlock(dst, src []byte) (d int) diff --git a/vendor/github.com/klauspost/compress/snappy/encode_amd64.s b/vendor/github.com/klauspost/compress/snappy/encode_amd64.s new file mode 100644 index 00000000000..adfd979fe27 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/encode_amd64.s @@ -0,0 +1,730 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a +// Go toolchain regression. See https://github.com/golang/go/issues/15426 and +// https://github.com/golang/snappy/issues/29 +// +// As a workaround, the package was built with a known good assembler, and +// those instructions were disassembled by "objdump -d" to yield the +// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 +// style comments, in AT&T asm syntax. Note that rsp here is a physical +// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm). +// The instructions were then encoded as "BYTE $0x.." sequences, which assemble +// fine on Go 1.6. + +// The asm code generally follows the pure Go code in encode_other.go, except +// where marked with a "!!!". + +// ---------------------------------------------------------------------------- + +// func emitLiteral(dst, lit []byte) int +// +// All local variables fit into registers. The register allocation: +// - AX len(lit) +// - BX n +// - DX return value +// - DI &dst[i] +// - R10 &lit[0] +// +// The 24 bytes of stack space is to call runtime·memmove. +// +// The unusual register allocation of local variables, such as R10 for the +// source pointer, matches the allocation used at the call site in encodeBlock, +// which makes it easier to manually inline this function. +TEXT ·emitLiteral(SB), NOSPLIT, $24-56 + MOVQ dst_base+0(FP), DI + MOVQ lit_base+24(FP), R10 + MOVQ lit_len+32(FP), AX + MOVQ AX, DX + MOVL AX, BX + SUBL $1, BX + + CMPL BX, $60 + JLT oneByte + CMPL BX, $256 + JLT twoBytes + +threeBytes: + MOVB $0xf4, 0(DI) + MOVW BX, 1(DI) + ADDQ $3, DI + ADDQ $3, DX + JMP memmove + +twoBytes: + MOVB $0xf0, 0(DI) + MOVB BX, 1(DI) + ADDQ $2, DI + ADDQ $2, DX + JMP memmove + +oneByte: + SHLB $2, BX + MOVB BX, 0(DI) + ADDQ $1, DI + ADDQ $1, DX + +memmove: + MOVQ DX, ret+48(FP) + + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // DI, R10 and AX as arguments. + MOVQ DI, 0(SP) + MOVQ R10, 8(SP) + MOVQ AX, 16(SP) + CALL runtime·memmove(SB) + RET + +// ---------------------------------------------------------------------------- + +// func emitCopy(dst []byte, offset, length int) int +// +// All local variables fit into registers. The register allocation: +// - AX length +// - SI &dst[0] +// - DI &dst[i] +// - R11 offset +// +// The unusual register allocation of local variables, such as R11 for the +// offset, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·emitCopy(SB), NOSPLIT, $0-48 + MOVQ dst_base+0(FP), DI + MOVQ DI, SI + MOVQ offset+24(FP), R11 + MOVQ length+32(FP), AX + +loop0: + // for length >= 68 { etc } + CMPL AX, $68 + JLT step1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVB $0xfe, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $64, AX + JMP loop0 + +step1: + // if length > 64 { etc } + CMPL AX, $64 + JLE step2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVB $0xee, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $60, AX + +step2: + // if length >= 12 || offset >= 2048 { goto step3 } + CMPL AX, $12 + JGE step3 + CMPL R11, $2048 + JGE step3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(DI) + SHRL $8, R11 + SHLB $5, R11 + SUBB $4, AX + SHLB $2, AX + ORB AX, R11 + ORB $1, R11 + MOVB R11, 0(DI) + ADDQ $2, DI + + // Return the number of bytes written. + SUBQ SI, DI + MOVQ DI, ret+40(FP) + RET + +step3: + // Emit the remaining copy, encoded as 3 bytes. + SUBL $1, AX + SHLB $2, AX + ORB $2, AX + MOVB AX, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + + // Return the number of bytes written. + SUBQ SI, DI + MOVQ DI, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func extendMatch(src []byte, i, j int) int +// +// All local variables fit into registers. The register allocation: +// - DX &src[0] +// - SI &src[j] +// - R13 &src[len(src) - 8] +// - R14 &src[len(src)] +// - R15 &src[i] +// +// The unusual register allocation of local variables, such as R15 for a source +// pointer, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·extendMatch(SB), NOSPLIT, $0-48 + MOVQ src_base+0(FP), DX + MOVQ src_len+8(FP), R14 + MOVQ i+24(FP), R15 + MOVQ j+32(FP), SI + ADDQ DX, R14 + ADDQ DX, R15 + ADDQ DX, SI + MOVQ R14, R13 + SUBQ $8, R13 + +cmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMPQ SI, R13 + JA cmp1 + MOVQ (R15), AX + MOVQ (SI), BX + CMPQ AX, BX + JNE bsf + ADDQ $8, R15 + ADDQ $8, SI + JMP cmp8 + +bsf: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. The BSF instruction finds the + // least significant 1 bit, the amd64 architecture is little-endian, and + // the shift by 3 converts a bit index to a byte index. + XORQ AX, BX + BSFQ BX, BX + SHRQ $3, BX + ADDQ BX, SI + + // Convert from &src[ret] to ret. + SUBQ DX, SI + MOVQ SI, ret+40(FP) + RET + +cmp1: + // In src's tail, compare 1 byte at a time. + CMPQ SI, R14 + JAE extendMatchEnd + MOVB (R15), AX + MOVB (SI), BX + CMPB AX, BX + JNE extendMatchEnd + ADDQ $1, R15 + ADDQ $1, SI + JMP cmp1 + +extendMatchEnd: + // Convert from &src[ret] to ret. + SUBQ DX, SI + MOVQ SI, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func encodeBlock(dst, src []byte) (d int) +// +// All local variables fit into registers, other than "var table". The register +// allocation: +// - AX . . +// - BX . . +// - CX 56 shift (note that amd64 shifts by non-immediates must use CX). +// - DX 64 &src[0], tableSize +// - SI 72 &src[s] +// - DI 80 &dst[d] +// - R9 88 sLimit +// - R10 . &src[nextEmit] +// - R11 96 prevHash, currHash, nextHash, offset +// - R12 104 &src[base], skip +// - R13 . &src[nextS], &src[len(src) - 8] +// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x +// - R15 112 candidate +// +// The second column (56, 64, etc) is the stack offset to spill the registers +// when calling other functions. We could pack this slightly tighter, but it's +// simpler to have a dedicated spill map independent of the function called. +// +// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An +// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill +// local variables (registers) during calls gives 32768 + 56 + 64 = 32888. +TEXT ·encodeBlock(SB), 0, $32888-56 + MOVQ dst_base+0(FP), DI + MOVQ src_base+24(FP), SI + MOVQ src_len+32(FP), R14 + + // shift, tableSize := uint32(32-8), 1<<8 + MOVQ $24, CX + MOVQ $256, DX + +calcShift: + // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + // shift-- + // } + CMPQ DX, $16384 + JGE varTable + CMPQ DX, R14 + JGE varTable + SUBQ $1, CX + SHLQ $1, DX + JMP calcShift + +varTable: + // var table [maxTableSize]uint16 + // + // In the asm code, unlike the Go code, we can zero-initialize only the + // first tableSize elements. Each uint16 element is 2 bytes and each MOVOU + // writes 16 bytes, so we can do only tableSize/8 writes instead of the + // 2048 writes that would zero-initialize all of table's 32768 bytes. + SHRQ $3, DX + LEAQ table-32768(SP), BX + PXOR X0, X0 + +memclr: + MOVOU X0, 0(BX) + ADDQ $16, BX + SUBQ $1, DX + JNZ memclr + + // !!! DX = &src[0] + MOVQ SI, DX + + // sLimit := len(src) - inputMargin + MOVQ R14, R9 + SUBQ $15, R9 + + // !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't + // change for the rest of the function. + MOVQ CX, 56(SP) + MOVQ DX, 64(SP) + MOVQ R9, 88(SP) + + // nextEmit := 0 + MOVQ DX, R10 + + // s := 1 + ADDQ $1, SI + + // nextHash := hash(load32(src, s), shift) + MOVL 0(SI), R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + +outer: + // for { etc } + + // skip := 32 + MOVQ $32, R12 + + // nextS := s + MOVQ SI, R13 + + // candidate := 0 + MOVQ $0, R15 + +inner0: + // for { etc } + + // s := nextS + MOVQ R13, SI + + // bytesBetweenHashLookups := skip >> 5 + MOVQ R12, R14 + SHRQ $5, R14 + + // nextS = s + bytesBetweenHashLookups + ADDQ R14, R13 + + // skip += bytesBetweenHashLookups + ADDQ R14, R12 + + // if nextS > sLimit { goto emitRemainder } + MOVQ R13, AX + SUBQ DX, AX + CMPQ AX, R9 + JA emitRemainder + + // candidate = int(table[nextHash]) + // XXX: MOVWQZX table-32768(SP)(R11*2), R15 + // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 + BYTE $0x4e + BYTE $0x0f + BYTE $0xb7 + BYTE $0x7c + BYTE $0x5c + BYTE $0x78 + + // table[nextHash] = uint16(s) + MOVQ SI, AX + SUBQ DX, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // nextHash = hash(load32(src, nextS), shift) + MOVL 0(R13), R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // if load32(src, s) != load32(src, candidate) { continue } break + MOVL 0(SI), AX + MOVL (DX)(R15*1), BX + CMPL AX, BX + JNE inner0 + +fourByteMatch: + // As per the encode_other.go code: + // + // A 4-byte match has been found. We'll later see etc. + + // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment + // on inputMargin in encode.go. + MOVQ SI, AX + SUBQ R10, AX + CMPQ AX, $16 + JLE emitLiteralFastPath + + // ---------------------------------------- + // Begin inline of the emitLiteral call. + // + // d += emitLiteral(dst[d:], src[nextEmit:s]) + + MOVL AX, BX + SUBL $1, BX + + CMPL BX, $60 + JLT inlineEmitLiteralOneByte + CMPL BX, $256 + JLT inlineEmitLiteralTwoBytes + +inlineEmitLiteralThreeBytes: + MOVB $0xf4, 0(DI) + MOVW BX, 1(DI) + ADDQ $3, DI + JMP inlineEmitLiteralMemmove + +inlineEmitLiteralTwoBytes: + MOVB $0xf0, 0(DI) + MOVB BX, 1(DI) + ADDQ $2, DI + JMP inlineEmitLiteralMemmove + +inlineEmitLiteralOneByte: + SHLB $2, BX + MOVB BX, 0(DI) + ADDQ $1, DI + +inlineEmitLiteralMemmove: + // Spill local variables (registers) onto the stack; call; unspill. + // + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // DI, R10 and AX as arguments. + MOVQ DI, 0(SP) + MOVQ R10, 8(SP) + MOVQ AX, 16(SP) + ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)". + MOVQ SI, 72(SP) + MOVQ DI, 80(SP) + MOVQ R15, 112(SP) + CALL runtime·memmove(SB) + MOVQ 56(SP), CX + MOVQ 64(SP), DX + MOVQ 72(SP), SI + MOVQ 80(SP), DI + MOVQ 88(SP), R9 + MOVQ 112(SP), R15 + JMP inner1 + +inlineEmitLiteralEnd: + // End inline of the emitLiteral call. + // ---------------------------------------- + +emitLiteralFastPath: + // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2". + MOVB AX, BX + SUBB $1, BX + SHLB $2, BX + MOVB BX, (DI) + ADDQ $1, DI + + // !!! Implement the copy from lit to dst as a 16-byte load and store. + // (Encode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only len(lit) bytes, but that's + // OK. Subsequent iterations will fix up the overrun. + // + // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + MOVOU 0(R10), X0 + MOVOU X0, 0(DI) + ADDQ AX, DI + +inner1: + // for { etc } + + // base := s + MOVQ SI, R12 + + // !!! offset := base - candidate + MOVQ R12, R11 + SUBQ R15, R11 + SUBQ DX, R11 + + // ---------------------------------------- + // Begin inline of the extendMatch call. + // + // s = extendMatch(src, candidate+4, s+4) + + // !!! R14 = &src[len(src)] + MOVQ src_len+32(FP), R14 + ADDQ DX, R14 + + // !!! R13 = &src[len(src) - 8] + MOVQ R14, R13 + SUBQ $8, R13 + + // !!! R15 = &src[candidate + 4] + ADDQ $4, R15 + ADDQ DX, R15 + + // !!! s += 4 + ADDQ $4, SI + +inlineExtendMatchCmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMPQ SI, R13 + JA inlineExtendMatchCmp1 + MOVQ (R15), AX + MOVQ (SI), BX + CMPQ AX, BX + JNE inlineExtendMatchBSF + ADDQ $8, R15 + ADDQ $8, SI + JMP inlineExtendMatchCmp8 + +inlineExtendMatchBSF: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. The BSF instruction finds the + // least significant 1 bit, the amd64 architecture is little-endian, and + // the shift by 3 converts a bit index to a byte index. + XORQ AX, BX + BSFQ BX, BX + SHRQ $3, BX + ADDQ BX, SI + JMP inlineExtendMatchEnd + +inlineExtendMatchCmp1: + // In src's tail, compare 1 byte at a time. + CMPQ SI, R14 + JAE inlineExtendMatchEnd + MOVB (R15), AX + MOVB (SI), BX + CMPB AX, BX + JNE inlineExtendMatchEnd + ADDQ $1, R15 + ADDQ $1, SI + JMP inlineExtendMatchCmp1 + +inlineExtendMatchEnd: + // End inline of the extendMatch call. + // ---------------------------------------- + + // ---------------------------------------- + // Begin inline of the emitCopy call. + // + // d += emitCopy(dst[d:], base-candidate, s-base) + + // !!! length := s - base + MOVQ SI, AX + SUBQ R12, AX + +inlineEmitCopyLoop0: + // for length >= 68 { etc } + CMPL AX, $68 + JLT inlineEmitCopyStep1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVB $0xfe, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $64, AX + JMP inlineEmitCopyLoop0 + +inlineEmitCopyStep1: + // if length > 64 { etc } + CMPL AX, $64 + JLE inlineEmitCopyStep2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVB $0xee, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $60, AX + +inlineEmitCopyStep2: + // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 } + CMPL AX, $12 + JGE inlineEmitCopyStep3 + CMPL R11, $2048 + JGE inlineEmitCopyStep3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(DI) + SHRL $8, R11 + SHLB $5, R11 + SUBB $4, AX + SHLB $2, AX + ORB AX, R11 + ORB $1, R11 + MOVB R11, 0(DI) + ADDQ $2, DI + JMP inlineEmitCopyEnd + +inlineEmitCopyStep3: + // Emit the remaining copy, encoded as 3 bytes. + SUBL $1, AX + SHLB $2, AX + ORB $2, AX + MOVB AX, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + +inlineEmitCopyEnd: + // End inline of the emitCopy call. + // ---------------------------------------- + + // nextEmit = s + MOVQ SI, R10 + + // if s >= sLimit { goto emitRemainder } + MOVQ SI, AX + SUBQ DX, AX + CMPQ AX, R9 + JAE emitRemainder + + // As per the encode_other.go code: + // + // We could immediately etc. + + // x := load64(src, s-1) + MOVQ -1(SI), R14 + + // prevHash := hash(uint32(x>>0), shift) + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // table[prevHash] = uint16(s-1) + MOVQ SI, AX + SUBQ DX, AX + SUBQ $1, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // currHash := hash(uint32(x>>8), shift) + SHRQ $8, R14 + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // candidate = int(table[currHash]) + // XXX: MOVWQZX table-32768(SP)(R11*2), R15 + // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 + BYTE $0x4e + BYTE $0x0f + BYTE $0xb7 + BYTE $0x7c + BYTE $0x5c + BYTE $0x78 + + // table[currHash] = uint16(s) + ADDQ $1, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // if uint32(x>>8) == load32(src, candidate) { continue } + MOVL (DX)(R15*1), BX + CMPL R14, BX + JEQ inner1 + + // nextHash = hash(uint32(x>>16), shift) + SHRQ $8, R14 + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // s++ + ADDQ $1, SI + + // break out of the inner1 for loop, i.e. continue the outer loop. + JMP outer + +emitRemainder: + // if nextEmit < len(src) { etc } + MOVQ src_len+32(FP), AX + ADDQ DX, AX + CMPQ R10, AX + JEQ encodeBlockEnd + + // d += emitLiteral(dst[d:], src[nextEmit:]) + // + // Push args. + MOVQ DI, 0(SP) + MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative. + MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative. + MOVQ R10, 24(SP) + SUBQ R10, AX + MOVQ AX, 32(SP) + MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative. + + // Spill local variables (registers) onto the stack; call; unspill. + MOVQ DI, 80(SP) + CALL ·emitLiteral(SB) + MOVQ 80(SP), DI + + // Finish the "d +=" part of "d += emitLiteral(etc)". + ADDQ 48(SP), DI + +encodeBlockEnd: + MOVQ dst_base+0(FP), AX + SUBQ AX, DI + MOVQ DI, d+48(FP) + RET diff --git a/vendor/github.com/klauspost/compress/snappy/encode_other.go b/vendor/github.com/klauspost/compress/snappy/encode_other.go new file mode 100644 index 00000000000..dbcae905e6e --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/encode_other.go @@ -0,0 +1,238 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine !gc noasm + +package snappy + +func load32(b []byte, i int) uint32 { + b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load64(b []byte, i int) uint64 { + b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= len(lit) && len(lit) <= 65536 +func emitLiteral(dst, lit []byte) int { + i, n := 0, uint(len(lit)-1) + switch { + case n < 60: + dst[0] = uint8(n)<<2 | tagLiteral + i = 1 + case n < 1<<8: + dst[0] = 60<<2 | tagLiteral + dst[1] = uint8(n) + i = 2 + default: + dst[0] = 61<<2 | tagLiteral + dst[1] = uint8(n) + dst[2] = uint8(n >> 8) + i = 3 + } + return i + copy(dst[i:], lit) +} + +// emitCopy writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= 65535 +// 4 <= length && length <= 65535 +func emitCopy(dst []byte, offset, length int) int { + i := 0 + // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The + // threshold for this loop is a little higher (at 68 = 64 + 4), and the + // length emitted down below is is a little lower (at 60 = 64 - 4), because + // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed + // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as + // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as + // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a + // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an + // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. + for length >= 68 { + // Emit a length 64 copy, encoded as 3 bytes. + dst[i+0] = 63<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 64 + } + if length > 64 { + // Emit a length 60 copy, encoded as 3 bytes. + dst[i+0] = 59<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 60 + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[i+0] = uint8(length-1)<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + return i + 3 + } + // Emit the remaining copy, encoded as 2 bytes. + dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + dst[i+1] = uint8(offset) + return i + 2 +} + +// extendMatch returns the largest k such that k <= len(src) and that +// src[i:i+k-j] and src[j:k] have the same contents. +// +// It assumes that: +// 0 <= i && i < j && j <= len(src) +func extendMatch(src []byte, i, j int) int { + for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { + } + return j +} + +func hash(u, shift uint32) uint32 { + return (u * 0x1e35a7bd) >> shift +} + +// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlock(dst, src []byte) (d int) { + // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. + // The table element type is uint16, as s < sLimit and sLimit < len(src) + // and len(src) <= maxBlockSize and maxBlockSize == 65536. + const ( + maxTableSize = 1 << 14 + // tableMask is redundant, but helps the compiler eliminate bounds + // checks. + tableMask = maxTableSize - 1 + ) + shift := uint32(32 - 8) + for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + shift-- + } + // In Go, all array elements are zero-initialized, so there is no advantage + // to a smaller tableSize per se. However, it matches the C++ algorithm, + // and in the asm versions of this code, we can get away with zeroing only + // the first tableSize elements. + var table [maxTableSize]uint16 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + nextHash := hash(load32(src, s), shift) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := 32 + + nextS := s + candidate := 0 + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidate = int(table[nextHash&tableMask]) + table[nextHash&tableMask] = uint16(s) + nextHash = hash(load32(src, nextS), shift) + if load32(src, s) == load32(src, candidate) { + break + } + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + d += emitLiteral(dst[d:], src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + + // Extend the 4-byte match as long as possible. + // + // This is an inlined version of: + // s = extendMatch(src, candidate+4, s+4) + s += 4 + for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { + } + + d += emitCopy(dst[d:], base-candidate, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load64(src, s-1) + prevHash := hash(uint32(x>>0), shift) + table[prevHash&tableMask] = uint16(s - 1) + currHash := hash(uint32(x>>8), shift) + candidate = int(table[currHash&tableMask]) + table[currHash&tableMask] = uint16(s) + if uint32(x>>8) != load32(src, candidate) { + nextHash = hash(uint32(x>>16), shift) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} diff --git a/vendor/github.com/klauspost/compress/snappy/runbench.cmd b/vendor/github.com/klauspost/compress/snappy/runbench.cmd new file mode 100644 index 00000000000..d24eb4b47c3 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/runbench.cmd @@ -0,0 +1,2 @@ +del old.txt +go test -bench=. >>old.txt && go test -bench=. >>old.txt && go test -bench=. >>old.txt && benchstat -delta-test=ttest old.txt new.txt diff --git a/vendor/github.com/klauspost/compress/snappy/snappy.go b/vendor/github.com/klauspost/compress/snappy/snappy.go new file mode 100644 index 00000000000..74a36689e87 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/snappy.go @@ -0,0 +1,98 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package snappy implements the Snappy compression format. It aims for very +// high speeds and reasonable compression. +// +// There are actually two Snappy formats: block and stream. They are related, +// but different: trying to decompress block-compressed data as a Snappy stream +// will fail, and vice versa. The block format is the Decode and Encode +// functions and the stream format is the Reader and Writer types. +// +// The block format, the more common case, is used when the complete size (the +// number of bytes) of the original data is known upfront, at the time +// compression starts. The stream format, also known as the framing format, is +// for when that isn't always true. +// +// The canonical, C++ implementation is at https://github.com/google/snappy and +// it only implements the block format. +package snappy + +import ( + "hash/crc32" +) + +/* +Each encoded block begins with the varint-encoded length of the decoded data, +followed by a sequence of chunks. Chunks begin and end on byte boundaries. The +first byte of each chunk is broken into its 2 least and 6 most significant bits +called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. +Zero means a literal tag. All other values mean a copy tag. + +For literal tags: + - If m < 60, the next 1 + m bytes are literal bytes. + - Otherwise, let n be the little-endian unsigned integer denoted by the next + m - 59 bytes. The next 1 + n bytes after that are literal bytes. + +For copy tags, length bytes are copied from offset bytes ago, in the style of +Lempel-Ziv compression algorithms. In particular: + - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). + The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 + of the offset. The next byte is bits 0-7 of the offset. + - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). + The length is 1 + m. The offset is the little-endian unsigned integer + denoted by the next 2 bytes. + - For l == 3, this tag is a legacy format that is no longer issued by most + encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in + [1, 65). The length is 1 + m. The offset is the little-endian unsigned + integer denoted by the next 4 bytes. +*/ +const ( + tagLiteral = 0x00 + tagCopy1 = 0x01 + tagCopy2 = 0x02 + tagCopy4 = 0x03 +) + +const ( + checksumSize = 4 + chunkHeaderSize = 4 + magicChunk = "\xff\x06\x00\x00" + magicBody + magicBody = "sNaPpY" + + // maxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + maxBlockSize = 65536 + + // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is + // hard coded to be a const instead of a variable, so that obufLen can also + // be a const. Their equivalence is confirmed by + // TestMaxEncodedLenOfMaxBlockSize. + maxEncodedLenOfMaxBlockSize = 76490 + + obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize + obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func crc(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return uint32(c>>15|c<<17) + 0xa282ead8 +} diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md new file mode 100644 index 00000000000..ea3e5108270 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/README.md @@ -0,0 +1,406 @@ +# zstd + +[Zstandard](https://facebook.github.io/zstd/) is a real-time compression algorithm, providing high compression ratios. +It offers a very wide range of compression / speed trade-off, while being backed by a very fast decoder. +A high performance compression algorithm is implemented. For now focused on speed. + +This package provides [compression](#Compressor) to and [decompression](#Decompressor) of Zstandard content. + +This package is pure Go and without use of "unsafe". + +The `zstd` package is provided as open source software using a Go standard license. + +Currently the package is heavily optimized for 64 bit processors and will be significantly slower on 32 bit processors. + +## Installation + +Install using `go get -u github.com/klauspost/compress`. The package is located in `github.com/klauspost/compress/zstd`. + +Godoc Documentation: https://godoc.org/github.com/klauspost/compress/zstd + + +## Compressor + +### Status: + +STABLE - there may always be subtle bugs, a wide variety of content has been tested and the library is actively +used by several projects. This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz), +kindly supplied by [fuzzit.dev](https://fuzzit.dev/). + +There may still be specific combinations of data types/size/settings that could lead to edge cases, +so as always, testing is recommended. + +For now, a high speed (fastest) and medium-fast (default) compressor has been implemented. + +The "Fastest" compression ratio is roughly equivalent to zstd level 1. +The "Default" compression ratio is roughly equivalent to zstd level 3 (default). + +In terms of speed, it is typically 2x as fast as the stdlib deflate/gzip in its fastest mode. +The compression ratio compared to stdlib is around level 3, but usually 3x as fast. + +Compared to cgo zstd, the speed is around level 3 (default), but compression slightly worse, between level 1&2. + + +### Usage + +An Encoder can be used for either compressing a stream via the +`io.WriteCloser` interface supported by the Encoder or as multiple independent +tasks via the `EncodeAll` function. +Smaller encodes are encouraged to use the EncodeAll function. +Use `NewWriter` to create a new instance that can be used for both. + +To create a writer with default options, do like this: + +```Go +// Compress input to output. +func Compress(in io.Reader, out io.Writer) error { + w, err := NewWriter(output) + if err != nil { + return err + } + _, err := io.Copy(w, input) + if err != nil { + enc.Close() + return err + } + return enc.Close() +} +``` + +Now you can encode by writing data to `enc`. The output will be finished writing when `Close()` is called. +Even if your encode fails, you should still call `Close()` to release any resources that may be held up. + +The above is fine for big encodes. However, whenever possible try to *reuse* the writer. + +To reuse the encoder, you can use the `Reset(io.Writer)` function to change to another output. +This will allow the encoder to reuse all resources and avoid wasteful allocations. + +Currently stream encoding has 'light' concurrency, meaning up to 2 goroutines can be working on part +of a stream. This is independent of the `WithEncoderConcurrency(n)`, but that is likely to change +in the future. So if you want to limit concurrency for future updates, specify the concurrency +you would like. + +You can specify your desired compression level using `WithEncoderLevel()` option. Currently only pre-defined +compression settings can be specified. + +#### Future Compatibility Guarantees + +This will be an evolving project. When using this package it is important to note that both the compression efficiency and speed may change. + +The goal will be to keep the default efficiency at the default zstd (level 3). +However the encoding should never be assumed to remain the same, +and you should not use hashes of compressed output for similarity checks. + +The Encoder can be assumed to produce the same output from the exact same code version. +However, the may be modes in the future that break this, +although they will not be enabled without an explicit option. + +This encoder is not designed to (and will probably never) output the exact same bitstream as the reference encoder. + +Also note, that the cgo decompressor currently does not [report all errors on invalid input](https://github.com/DataDog/zstd/issues/59), +[omits error checks](https://github.com/DataDog/zstd/issues/61), [ignores checksums](https://github.com/DataDog/zstd/issues/43) +and seems to ignore concatenated streams, even though [it is part of the spec](https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frames). + +#### Blocks + +For compressing small blocks, the returned encoder has a function called `EncodeAll(src, dst []byte) []byte`. + +`EncodeAll` will encode all input in src and append it to dst. +This function can be called concurrently, but each call will only run on a single goroutine. + +Encoded blocks can be concatenated and the result will be the combined input stream. +Data compressed with EncodeAll can be decoded with the Decoder, using either a stream or `DecodeAll`. + +Especially when encoding blocks you should take special care to reuse the encoder. +This will effectively make it run without allocations after a warmup period. +To make it run completely without allocations, supply a destination buffer with space for all content. + +```Go +import "github.com/klauspost/compress/zstd" + +// Create a writer that caches compressors. +// For this operation type we supply a nil Reader. +var encoder, _ = zstd.NewWriter(nil) + +// Compress a buffer. +// If you have a destination buffer, the allocation in the call can also be eliminated. +func Compress(src []byte) []byte { + return encoder.EncodeAll(src, make([]byte, 0, len(src))) +} +``` + +You can control the maximum number of concurrent encodes using the `WithEncoderConcurrency(n)` +option when creating the writer. + +Using the Encoder for both a stream and individual blocks concurrently is safe. + +### Performance + +I have collected some speed examples to compare speed and compression against other compressors. + +* `file` is the input file. +* `out` is the compressor used. `zskp` is this package. `zstd` is the Datadog cgo library. `gzstd/gzkp` is gzip standard and this library. +* `level` is the compression level used. For `zskp` level 1 is "fastest", level 2 is "default". +* `insize`/`outsize` is the input/output size. +* `millis` is the number of milliseconds used for compression. +* `mb/s` is megabytes (2^20 bytes) per second. + +``` +Silesia Corpus: +http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip + +This package: +file out level insize outsize millis mb/s +silesia.tar zskp 1 211947520 73101992 643 313.87 +silesia.tar zskp 2 211947520 67504318 969 208.38 +silesia.tar zskp 3 211947520 65177448 1899 106.44 + +cgo zstd: +silesia.tar zstd 1 211947520 73605392 543 371.56 +silesia.tar zstd 3 211947520 66793289 864 233.68 +silesia.tar zstd 6 211947520 62916450 1913 105.66 + +gzip, stdlib/this package: +silesia.tar gzstd 1 211947520 80007735 1654 122.21 +silesia.tar gzkp 1 211947520 80369488 1168 173.06 + +GOB stream of binary data. Highly compressible. +https://files.klauspost.com/compress/gob-stream.7z + +file out level insize outsize millis mb/s +gob-stream zskp 1 1911399616 235022249 3088 590.30 +gob-stream zskp 2 1911399616 205669791 3786 481.34 +gob-stream zskp 3 1911399616 185792019 9324 195.48 +gob-stream zstd 1 1911399616 249810424 2637 691.26 +gob-stream zstd 3 1911399616 208192146 3490 522.31 +gob-stream zstd 6 1911399616 193632038 6687 272.56 +gob-stream gzstd 1 1911399616 357382641 10251 177.82 +gob-stream gzkp 1 1911399616 362156523 5695 320.08 + +The test data for the Large Text Compression Benchmark is the first +10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. +http://mattmahoney.net/dc/textdata.html + +file out level insize outsize millis mb/s +enwik9 zskp 1 1000000000 343848582 3609 264.18 +enwik9 zskp 2 1000000000 317276632 5746 165.97 +enwik9 zskp 3 1000000000 294540704 11725 81.34 +enwik9 zstd 1 1000000000 358072021 3110 306.65 +enwik9 zstd 3 1000000000 313734672 4784 199.35 +enwik9 zstd 6 1000000000 295138875 10290 92.68 +enwik9 gzstd 1 1000000000 382578136 9604 99.30 +enwik9 gzkp 1 1000000000 383825945 6544 145.73 + +Highly compressible JSON file. +https://files.klauspost.com/compress/github-june-2days-2019.json.zst + +file out level insize outsize millis mb/s +github-june-2days-2019.json zskp 1 6273951764 699045015 10620 563.40 +github-june-2days-2019.json zskp 2 6273951764 617881763 11687 511.96 +github-june-2days-2019.json zskp 3 6273951764 537511906 29252 204.54 +github-june-2days-2019.json zstd 1 6273951764 766284037 8450 708.00 +github-june-2days-2019.json zstd 3 6273951764 661889476 10927 547.57 +github-june-2days-2019.json zstd 6 6273951764 642756859 22996 260.18 +github-june-2days-2019.json gzstd 1 6273951764 1164400847 29948 199.79 +github-june-2days-2019.json gzkp 1 6273951764 1128755542 19236 311.03 + +VM Image, Linux mint with a few installed applications: +https://files.klauspost.com/compress/rawstudio-mint14.7z + +file out level insize outsize millis mb/s +rawstudio-mint14.tar zskp 1 8558382592 3667489370 20210 403.84 +rawstudio-mint14.tar zskp 2 8558382592 3364592300 31873 256.07 +rawstudio-mint14.tar zskp 3 8558382592 3224594213 71751 113.75 +rawstudio-mint14.tar zstd 1 8558382592 3609250104 17136 476.27 +rawstudio-mint14.tar zstd 3 8558382592 3341679997 29262 278.92 +rawstudio-mint14.tar zstd 6 8558382592 3235846406 77904 104.77 +rawstudio-mint14.tar gzstd 1 8558382592 3926257486 57722 141.40 +rawstudio-mint14.tar gzkp 1 8558382592 3970463184 41749 195.49 + +CSV data: +https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst + +file out level insize outsize millis mb/s +nyc-taxi-data-10M.csv zskp 1 3325605752 641339945 8925 355.35 +nyc-taxi-data-10M.csv zskp 2 3325605752 591748091 11268 281.44 +nyc-taxi-data-10M.csv zskp 3 3325605752 538490114 19880 159.53 +nyc-taxi-data-10M.csv zstd 1 3325605752 687399637 8233 385.18 +nyc-taxi-data-10M.csv zstd 3 3325605752 598514411 10065 315.07 +nyc-taxi-data-10M.csv zstd 6 3325605752 570522953 20038 158.27 +nyc-taxi-data-10M.csv gzstd 1 3325605752 928656485 23876 132.83 +nyc-taxi-data-10M.csv gzkp 1 3325605752 924718719 16388 193.53 +``` + +## Decompressor + +Staus: STABLE - there may still be subtle bugs, but a wide variety of content has been tested. + +This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz), +kindly supplied by [fuzzit.dev](https://fuzzit.dev/). +The main purpose of the fuzz testing is to ensure that it is not possible to crash the decoder, +or run it past its limits with ANY input provided. + +### Usage + +The package has been designed for two main usages, big streams of data and smaller in-memory buffers. +There are two main usages of the package for these. Both of them are accessed by creating a `Decoder`. + +For streaming use a simple setup could look like this: + +```Go +import "github.com/klauspost/compress/zstd" + +func Decompress(in io.Reader, out io.Writer) error { + d, err := zstd.NewReader(input) + if err != nil { + return err + } + defer d.Close() + + // Copy content... + _, err := io.Copy(out, d) + return err +} +``` + +It is important to use the "Close" function when you no longer need the Reader to stop running goroutines. +See "Allocation-less operation" below. + +For decoding buffers, it could look something like this: + +```Go +import "github.com/klauspost/compress/zstd" + +// Create a reader that caches decompressors. +// For this operation type we supply a nil Reader. +var decoder, _ = zstd.NewReader(nil) + +// Decompress a buffer. We don't supply a destination buffer, +// so it will be allocated by the decoder. +func Decompress(src []byte) ([]byte, error) { + return decoder.DecodeAll(src, nil) +} +``` + +Both of these cases should provide the functionality needed. +The decoder can be used for *concurrent* decompression of multiple buffers. +It will only allow a certain number of concurrent operations to run. +To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder. + +### Dictionaries + +Data compressed with [dictionaries](https://github.com/facebook/zstd#the-case-for-small-data-compression) can be decompressed. + +Dictionaries are added individually to Decoders. +Dictionaries are generated by the `zstd --train` command and contains an initial state for the decoder. +To add a dictionary use the `WithDecoderDicts(dicts ...[]byte)` option with the dictionary data. +Several dictionaries can be added at once. + +The dictionary will be used automatically for the data that specifies them. +A re-used Decoder will still contain the dictionaries registered. + +When registering multiple dictionaries with the same ID, the last one will be used. + +It is possible to use dictionaries when compressing data. + +To enable a dictionary use `WithEncoderDict(dict []byte)`. Here only one dictionary will be used +and it will likely be used even if it doesn't improve compression. + +The used dictionary must be used to decompress the content. + +For any real gains, the dictionary should be built with similar data. +If an unsuitable dictionary is used the output may be slightly larger than using no dictionary. +Use the [zstd commandline tool](https://github.com/facebook/zstd/releases) to build a dictionary from sample data. +For information see [zstd dictionary information](https://github.com/facebook/zstd#the-case-for-small-data-compression). + +For now there is a fixed startup performance penalty for compressing content with dictionaries. +This will likely be improved over time. Just be aware to test performance when implementing. + +### Allocation-less operation + +The decoder has been designed to operate without allocations after a warmup. + +This means that you should *store* the decoder for best performance. +To re-use a stream decoder, use the `Reset(r io.Reader) error` to switch to another stream. +A decoder can safely be re-used even if the previous stream failed. + +To release the resources, you must call the `Close()` function on a decoder. +After this it can *no longer be reused*, but all running goroutines will be stopped. +So you *must* use this if you will no longer need the Reader. + +For decompressing smaller buffers a single decoder can be used. +When decoding buffers, you can supply a destination slice with length 0 and your expected capacity. +In this case no unneeded allocations should be made. + +### Concurrency + +The buffer decoder does everything on the same goroutine and does nothing concurrently. +It can however decode several buffers concurrently. Use `WithDecoderConcurrency(n)` to limit that. + +The stream decoder operates on + +* One goroutine reads input and splits the input to several block decoders. +* A number of decoders will decode blocks. +* A goroutine coordinates these blocks and sends history from one to the next. + +So effectively this also means the decoder will "read ahead" and prepare data to always be available for output. + +Since "blocks" are quite dependent on the output of the previous block stream decoding will only have limited concurrency. + +In practice this means that concurrency is often limited to utilizing about 2 cores effectively. + + +### Benchmarks + +These are some examples of performance compared to [datadog cgo library](https://github.com/DataDog/zstd). + +The first two are streaming decodes and the last are smaller inputs. + +``` +BenchmarkDecoderSilesia-8 3 385000067 ns/op 550.51 MB/s 5498 B/op 8 allocs/op +BenchmarkDecoderSilesiaCgo-8 6 197666567 ns/op 1072.25 MB/s 270672 B/op 8 allocs/op + +BenchmarkDecoderEnwik9-8 1 2027001600 ns/op 493.34 MB/s 10496 B/op 18 allocs/op +BenchmarkDecoderEnwik9Cgo-8 2 979499200 ns/op 1020.93 MB/s 270672 B/op 8 allocs/op + +Concurrent performance: + +BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-16 28915 42469 ns/op 4340.07 MB/s 114 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-16 116505 9965 ns/op 11900.16 MB/s 16 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-16 8952 134272 ns/op 3588.70 MB/s 915 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-16 11820 102538 ns/op 4161.90 MB/s 594 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-16 34782 34184 ns/op 3661.88 MB/s 60 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-16 27712 43447 ns/op 3500.58 MB/s 99 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-16 62826 18750 ns/op 21845.10 MB/s 104 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-16 631545 1794 ns/op 57078.74 MB/s 2 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-16 1690140 712 ns/op 172938.13 MB/s 1 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-16 10432 113593 ns/op 6180.73 MB/s 1143 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/html.zst-16 113206 10671 ns/op 9596.27 MB/s 15 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-16 1530615 779 ns/op 5229.49 MB/s 0 B/op 0 allocs/op + +BenchmarkDecoder_DecodeAllParallelCgo/kppkn.gtb.zst-16 65217 16192 ns/op 11383.34 MB/s 46 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/geo.protodata.zst-16 292671 4039 ns/op 29363.19 MB/s 6 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/plrabn12.txt.zst-16 26314 46021 ns/op 10470.43 MB/s 293 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/lcet10.txt.zst-16 33897 34900 ns/op 12227.96 MB/s 205 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/asyoulik.txt.zst-16 104348 11433 ns/op 10949.01 MB/s 20 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/alice29.txt.zst-16 75949 15510 ns/op 9805.60 MB/s 32 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/html_x_4.zst-16 173910 6756 ns/op 60624.29 MB/s 37 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/paper-100k.pdf.zst-16 923076 1339 ns/op 76474.87 MB/s 1 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/fireworks.jpeg.zst-16 922920 1351 ns/op 91102.57 MB/s 2 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/urls.10K.zst-16 27649 43618 ns/op 16096.19 MB/s 407 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/html.zst-16 279073 4160 ns/op 24614.18 MB/s 6 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/comp-data.bin.zst-16 749938 1579 ns/op 2581.71 MB/s 0 B/op 0 allocs/op +``` + +This reflects the performance around May 2020, but this may be out of date. + +# Contributions + +Contributions are always welcome. +For new features/fixes, remember to add tests and for performance enhancements include benchmarks. + +For sending files for reproducing errors use a service like [goobox](https://goobox.io/#/upload) or similar to share your files. + +For general feedback and experience reports, feel free to open an issue or write me on [Twitter](https://twitter.com/sh0dan). + +This package includes the excellent [`github.com/cespare/xxhash`](https://github.com/cespare/xxhash) package Copyright (c) 2016 Caleb Spare. diff --git a/vendor/github.com/klauspost/compress/zstd/bitreader.go b/vendor/github.com/klauspost/compress/zstd/bitreader.go new file mode 100644 index 00000000000..85445853715 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bitreader.go @@ -0,0 +1,136 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "errors" + "io" + "math/bits" +) + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReader struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 // Maybe use [16]byte, but shifting is awkward. + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReader) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.bitsRead += 8 - uint8(highBits(uint32(v))) + return nil +} + +// getBits will return n bits. n can be 0. +func (b *bitReader) getBits(n uint8) int { + if n == 0 /*|| b.bitsRead >= 64 */ { + return 0 + } + return b.getBitsFast(n) +} + +// getBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReader) getBitsFast(n uint8) int { + const regMask = 64 - 1 + v := uint32((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) + b.bitsRead += n + return int(v) +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReader) fillFast() { + if b.bitsRead < 32 { + return + } + // 2 bounds checks. + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 +} + +// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. +func (b *bitReader) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReader) fill() { + if b.bitsRead < 32 { + return + } + if b.off >= 4 { + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value = (b.value << 8) | uint64(b.in[b.off-1]) + b.bitsRead -= 8 + b.off-- + } +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReader) finished() bool { + return b.off == 0 && b.bitsRead >= 64 +} + +// overread returns true if more bits have been requested than is on the stream. +func (b *bitReader) overread() bool { + return b.bitsRead > 64 +} + +// remain returns the number of bits remaining. +func (b *bitReader) remain() uint { + return b.off*8 + 64 - uint(b.bitsRead) +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReader) close() error { + // Release reference. + b.in = nil + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} + +func highBits(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} diff --git a/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/vendor/github.com/klauspost/compress/zstd/bitwriter.go new file mode 100644 index 00000000000..303ae90f944 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bitwriter.go @@ -0,0 +1,169 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package zstd + +import "fmt" + +// bitWriter will write bits. +// First bit will be LSB of the first byte of output. +type bitWriter struct { + bitContainer uint64 + nBits uint8 + out []byte +} + +// bitMask16 is bitmasks. Has extra to avoid bounds check. +var bitMask16 = [32]uint16{ + 0, 1, 3, 7, 0xF, 0x1F, + 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, + 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF} /* up to 16 bits */ + +var bitMask32 = [32]uint32{ + 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, + 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, + 0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF, + 0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF, +} // up to 32 bits + +// addBits16NC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16NC(value uint16, bits uint8) { + b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits32NC will add up to 32 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits32NC(value uint32, bits uint8) { + b.bitContainer |= uint64(value&bitMask32[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// flush will flush all pending full bytes. +// There will be at least 56 bits available for writing when this has been called. +// Using flush32 is faster, but leaves less space for writing. +func (b *bitWriter) flush() { + v := b.nBits >> 3 + switch v { + case 0: + case 1: + b.out = append(b.out, + byte(b.bitContainer), + ) + case 2: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + ) + case 3: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + ) + case 4: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + ) + case 5: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + ) + case 6: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + ) + case 7: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + ) + case 8: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + byte(b.bitContainer>>56), + ) + default: + panic(fmt.Errorf("bits (%d) > 64", b.nBits)) + } + b.bitContainer >>= v << 3 + b.nBits &= 7 +} + +// flush32 will flush out, so there are at least 32 bits available for writing. +func (b *bitWriter) flush32() { + if b.nBits < 32 { + return + } + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24)) + b.nBits -= 32 + b.bitContainer >>= 32 +} + +// flushAlign will flush remaining full bytes and align to next byte boundary. +func (b *bitWriter) flushAlign() { + nbBytes := (b.nBits + 7) >> 3 + for i := uint8(0); i < nbBytes; i++ { + b.out = append(b.out, byte(b.bitContainer>>(i*8))) + } + b.nBits = 0 + b.bitContainer = 0 +} + +// close will write the alignment bit and write the final byte(s) +// to the output. +func (b *bitWriter) close() error { + // End mark + b.addBits16Clean(1, 1) + // flush until next byte. + b.flushAlign() + return nil +} + +// reset and continue writing by appending to out. +func (b *bitWriter) reset(out []byte) { + b.bitContainer = 0 + b.nBits = 0 + b.out = out +} diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go new file mode 100644 index 00000000000..4733ea876a1 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -0,0 +1,739 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "io" + "sync" + + "github.com/klauspost/compress/huff0" + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +type blockType uint8 + +//go:generate stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex + +const ( + blockTypeRaw blockType = iota + blockTypeRLE + blockTypeCompressed + blockTypeReserved +) + +type literalsBlockType uint8 + +const ( + literalsBlockRaw literalsBlockType = iota + literalsBlockRLE + literalsBlockCompressed + literalsBlockTreeless +) + +const ( + // maxCompressedBlockSize is the biggest allowed compressed block size (128KB) + maxCompressedBlockSize = 128 << 10 + + // Maximum possible block size (all Raw+Uncompressed). + maxBlockSize = (1 << 21) - 1 + + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#literals_section_header + maxCompressedLiteralSize = 1 << 18 + maxRLELiteralSize = 1 << 20 + maxMatchLen = 131074 + maxSequences = 0x7f00 + 0xffff + + // We support slightly less than the reference decoder to be able to + // use ints on 32 bit archs. + maxOffsetBits = 30 +) + +var ( + huffDecoderPool = sync.Pool{New: func() interface{} { + return &huff0.Scratch{} + }} + + fseDecoderPool = sync.Pool{New: func() interface{} { + return &fseDecoder{} + }} +) + +type blockDec struct { + // Raw source data of the block. + data []byte + dataStorage []byte + + // Destination of the decoded data. + dst []byte + + // Buffer for literals data. + literalBuf []byte + + // Window size of the block. + WindowSize uint64 + + history chan *history + input chan struct{} + result chan decodeOutput + sequenceBuf []seq + err error + decWG sync.WaitGroup + + // Frame to use for singlethreaded decoding. + // Should not be used by the decoder itself since parent may be another frame. + localFrame *frameDec + + // Block is RLE, this is the size. + RLESize uint32 + tmp [4]byte + + Type blockType + + // Is this the last block of a frame? + Last bool + + // Use less memory + lowMem bool +} + +func (b *blockDec) String() string { + if b == nil { + return "" + } + return fmt.Sprintf("Steam Size: %d, Type: %v, Last: %t, Window: %d", len(b.data), b.Type, b.Last, b.WindowSize) +} + +func newBlockDec(lowMem bool) *blockDec { + b := blockDec{ + lowMem: lowMem, + result: make(chan decodeOutput, 1), + input: make(chan struct{}, 1), + history: make(chan *history, 1), + } + b.decWG.Add(1) + go b.startDecoder() + return &b +} + +// reset will reset the block. +// Input must be a start of a block and will be at the end of the block when returned. +func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { + b.WindowSize = windowSize + tmp := br.readSmall(3) + if tmp == nil { + if debug { + println("Reading block header:", io.ErrUnexpectedEOF) + } + return io.ErrUnexpectedEOF + } + bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) + b.Last = bh&1 != 0 + b.Type = blockType((bh >> 1) & 3) + // find size. + cSize := int(bh >> 3) + maxSize := maxBlockSize + switch b.Type { + case blockTypeReserved: + return ErrReservedBlockType + case blockTypeRLE: + b.RLESize = uint32(cSize) + if b.lowMem { + maxSize = cSize + } + cSize = 1 + case blockTypeCompressed: + if debug { + println("Data size on stream:", cSize) + } + b.RLESize = 0 + maxSize = maxCompressedBlockSize + if windowSize < maxCompressedBlockSize && b.lowMem { + maxSize = int(windowSize) + } + if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize { + if debug { + printf("compressed block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrCompressedSizeTooBig + } + case blockTypeRaw: + b.RLESize = 0 + // We do not need a destination for raw blocks. + maxSize = -1 + default: + panic("Invalid block type") + } + + // Read block data. + if cap(b.dataStorage) < cSize { + if b.lowMem { + b.dataStorage = make([]byte, 0, cSize) + } else { + b.dataStorage = make([]byte, 0, maxBlockSize) + } + } + if cap(b.dst) <= maxSize { + b.dst = make([]byte, 0, maxSize+1) + } + var err error + b.data, err = br.readBig(cSize, b.dataStorage) + if err != nil { + if debug { + println("Reading block:", err, "(", cSize, ")", len(b.data)) + printf("%T", br) + } + return err + } + return nil +} + +// sendEOF will make the decoder send EOF on this frame. +func (b *blockDec) sendErr(err error) { + b.Last = true + b.Type = blockTypeReserved + b.err = err + b.input <- struct{}{} +} + +// Close will release resources. +// Closed blockDec cannot be reset. +func (b *blockDec) Close() { + close(b.input) + close(b.history) + close(b.result) + b.decWG.Wait() +} + +// decodeAsync will prepare decoding the block when it receives input. +// This will separate output and history. +func (b *blockDec) startDecoder() { + defer b.decWG.Done() + for range b.input { + //println("blockDec: Got block input") + switch b.Type { + case blockTypeRLE: + if cap(b.dst) < int(b.RLESize) { + if b.lowMem { + b.dst = make([]byte, b.RLESize) + } else { + b.dst = make([]byte, maxBlockSize) + } + } + o := decodeOutput{ + d: b, + b: b.dst[:b.RLESize], + err: nil, + } + v := b.data[0] + for i := range o.b { + o.b[i] = v + } + hist := <-b.history + hist.append(o.b) + b.result <- o + case blockTypeRaw: + o := decodeOutput{ + d: b, + b: b.data, + err: nil, + } + hist := <-b.history + hist.append(o.b) + b.result <- o + case blockTypeCompressed: + b.dst = b.dst[:0] + err := b.decodeCompressed(nil) + o := decodeOutput{ + d: b, + b: b.dst, + err: err, + } + if debug { + println("Decompressed to", len(b.dst), "bytes, error:", err) + } + b.result <- o + case blockTypeReserved: + // Used for returning errors. + <-b.history + b.result <- decodeOutput{ + d: b, + b: nil, + err: b.err, + } + default: + panic("Invalid block type") + } + if debug { + println("blockDec: Finished block") + } + } +} + +// decodeAsync will prepare decoding the block when it receives the history. +// If history is provided, it will not fetch it from the channel. +func (b *blockDec) decodeBuf(hist *history) error { + switch b.Type { + case blockTypeRLE: + if cap(b.dst) < int(b.RLESize) { + if b.lowMem { + b.dst = make([]byte, b.RLESize) + } else { + b.dst = make([]byte, maxBlockSize) + } + } + b.dst = b.dst[:b.RLESize] + v := b.data[0] + for i := range b.dst { + b.dst[i] = v + } + hist.appendKeep(b.dst) + return nil + case blockTypeRaw: + hist.appendKeep(b.data) + return nil + case blockTypeCompressed: + saved := b.dst + b.dst = hist.b + hist.b = nil + err := b.decodeCompressed(hist) + if debug { + println("Decompressed to total", len(b.dst), "bytes, hash:", xxhash.Sum64(b.dst), "error:", err) + } + hist.b = b.dst + b.dst = saved + return err + case blockTypeReserved: + // Used for returning errors. + return b.err + default: + panic("Invalid block type") + } +} + +// decodeCompressed will start decompressing a block. +// If no history is supplied the decoder will decodeAsync as much as possible +// before fetching from blockDec.history +func (b *blockDec) decodeCompressed(hist *history) error { + in := b.data + delayedHistory := hist == nil + + if delayedHistory { + // We must always grab history. + defer func() { + if hist == nil { + <-b.history + } + }() + } + // There must be at least one byte for Literals_Block_Type and one for Sequences_Section_Header + if len(in) < 2 { + return ErrBlockTooSmall + } + litType := literalsBlockType(in[0] & 3) + var litRegenSize int + var litCompSize int + sizeFormat := (in[0] >> 2) & 3 + var fourStreams bool + switch litType { + case literalsBlockRaw, literalsBlockRLE: + switch sizeFormat { + case 0, 2: + // Regenerated_Size uses 5 bits (0-31). Literals_Section_Header uses 1 byte. + litRegenSize = int(in[0] >> 3) + in = in[1:] + case 1: + // Regenerated_Size uses 12 bits (0-4095). Literals_Section_Header uses 2 bytes. + litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + in = in[2:] + case 3: + // Regenerated_Size uses 20 bits (0-1048575). Literals_Section_Header uses 3 bytes. + if len(in) < 3 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return ErrBlockTooSmall + } + litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + (int(in[2]) << 12) + in = in[3:] + } + case literalsBlockCompressed, literalsBlockTreeless: + switch sizeFormat { + case 0, 1: + // Both Regenerated_Size and Compressed_Size use 10 bits (0-1023). + if len(in) < 3 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return ErrBlockTooSmall + } + n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + litRegenSize = int(n & 1023) + litCompSize = int(n >> 10) + fourStreams = sizeFormat == 1 + in = in[3:] + case 2: + fourStreams = true + if len(in) < 4 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return ErrBlockTooSmall + } + n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + litRegenSize = int(n & 16383) + litCompSize = int(n >> 14) + in = in[4:] + case 3: + fourStreams = true + if len(in) < 5 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return ErrBlockTooSmall + } + n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + (uint64(in[4]) << 28) + litRegenSize = int(n & 262143) + litCompSize = int(n >> 18) + in = in[5:] + } + } + if debug { + println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize:", litCompSize, "sizeFormat:", sizeFormat, "4X:", fourStreams) + } + var literals []byte + var huff *huff0.Scratch + switch litType { + case literalsBlockRaw: + if len(in) < litRegenSize { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litRegenSize) + return ErrBlockTooSmall + } + literals = in[:litRegenSize] + in = in[litRegenSize:] + //printf("Found %d uncompressed literals\n", litRegenSize) + case literalsBlockRLE: + if len(in) < 1 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", 1) + return ErrBlockTooSmall + } + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, litRegenSize) + } else { + if litRegenSize > maxCompressedLiteralSize { + // Exceptional + b.literalBuf = make([]byte, litRegenSize) + } else { + b.literalBuf = make([]byte, litRegenSize, maxCompressedLiteralSize) + + } + } + } + literals = b.literalBuf[:litRegenSize] + v := in[0] + for i := range literals { + literals[i] = v + } + in = in[1:] + if debug { + printf("Found %d RLE compressed literals\n", litRegenSize) + } + case literalsBlockTreeless: + if len(in) < litCompSize { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) + return ErrBlockTooSmall + } + // Store compressed literals, so we defer decoding until we get history. + literals = in[:litCompSize] + in = in[litCompSize:] + if debug { + printf("Found %d compressed literals\n", litCompSize) + } + case literalsBlockCompressed: + if len(in) < litCompSize { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) + return ErrBlockTooSmall + } + literals = in[:litCompSize] + in = in[litCompSize:] + huff = huffDecoderPool.Get().(*huff0.Scratch) + var err error + // Ensure we have space to store it. + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, 0, litRegenSize) + } else { + b.literalBuf = make([]byte, 0, maxCompressedLiteralSize) + } + } + if huff == nil { + huff = &huff0.Scratch{} + } + huff, literals, err = huff0.ReadTable(literals, huff) + if err != nil { + println("reading huffman table:", err) + return err + } + // Use our out buffer. + if fourStreams { + literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) + } else { + literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) + } + if err != nil { + println("decoding compressed literals:", err) + return err + } + // Make sure we don't leak our literals buffer + if len(literals) != litRegenSize { + return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) + } + if debug { + printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize) + } + } + + // Decode Sequences + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section + if len(in) < 1 { + return ErrBlockTooSmall + } + seqHeader := in[0] + nSeqs := 0 + switch { + case seqHeader == 0: + in = in[1:] + case seqHeader < 128: + nSeqs = int(seqHeader) + in = in[1:] + case seqHeader < 255: + if len(in) < 2 { + return ErrBlockTooSmall + } + nSeqs = int(seqHeader-128)<<8 | int(in[1]) + in = in[2:] + case seqHeader == 255: + if len(in) < 3 { + return ErrBlockTooSmall + } + nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8) + in = in[3:] + } + // Allocate sequences + if cap(b.sequenceBuf) < nSeqs { + if b.lowMem { + b.sequenceBuf = make([]seq, nSeqs) + } else { + // Allocate max + b.sequenceBuf = make([]seq, nSeqs, maxSequences) + } + } else { + // Reuse buffer + b.sequenceBuf = b.sequenceBuf[:nSeqs] + } + var seqs = &sequenceDecs{} + if nSeqs > 0 { + if len(in) < 1 { + return ErrBlockTooSmall + } + br := byteReader{b: in, off: 0} + compMode := br.Uint8() + br.advance(1) + if debug { + printf("Compression modes: 0b%b", compMode) + } + for i := uint(0); i < 3; i++ { + mode := seqCompMode((compMode >> (6 - i*2)) & 3) + if debug { + println("Table", tableIndex(i), "is", mode) + } + var seq *sequenceDec + switch tableIndex(i) { + case tableLiteralLengths: + seq = &seqs.litLengths + case tableOffsets: + seq = &seqs.offsets + case tableMatchLengths: + seq = &seqs.matchLengths + default: + panic("unknown table") + } + switch mode { + case compModePredefined: + seq.fse = &fsePredef[i] + case compModeRLE: + if br.remain() < 1 { + return ErrBlockTooSmall + } + v := br.Uint8() + br.advance(1) + dec := fseDecoderPool.Get().(*fseDecoder) + symb, err := decSymbolValue(v, symbolTableX[i]) + if err != nil { + printf("RLE Transform table (%v) error: %v", tableIndex(i), err) + return err + } + dec.setRLE(symb) + seq.fse = dec + if debug { + printf("RLE set to %+v, code: %v", symb, v) + } + case compModeFSE: + println("Reading table for", tableIndex(i)) + dec := fseDecoderPool.Get().(*fseDecoder) + err := dec.readNCount(&br, uint16(maxTableSymbol[i])) + if err != nil { + println("Read table error:", err) + return err + } + err = dec.transform(symbolTableX[i]) + if err != nil { + println("Transform table error:", err) + return err + } + if debug { + println("Read table ok", "symbolLen:", dec.symbolLen) + } + seq.fse = dec + case compModeRepeat: + seq.repeat = true + } + if br.overread() { + return io.ErrUnexpectedEOF + } + } + in = br.unread() + } + + // Wait for history. + // All time spent after this is critical since it is strictly sequential. + if hist == nil { + hist = <-b.history + if hist.error { + return ErrDecoderClosed + } + } + + // Decode treeless literal block. + if litType == literalsBlockTreeless { + // TODO: We could send the history early WITHOUT the stream history. + // This would allow decoding treeless literials before the byte history is available. + // Silencia stats: Treeless 4393, with: 32775, total: 37168, 11% treeless. + // So not much obvious gain here. + + if hist.huffTree == nil { + return errors.New("literal block was treeless, but no history was defined") + } + // Ensure we have space to store it. + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, 0, litRegenSize) + } else { + b.literalBuf = make([]byte, 0, maxCompressedLiteralSize) + } + } + var err error + // Use our out buffer. + huff = hist.huffTree + if fourStreams { + literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) + } else { + literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) + } + // Make sure we don't leak our literals buffer + if err != nil { + println("decompressing literals:", err) + return err + } + if len(literals) != litRegenSize { + return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) + } + } else { + if hist.huffTree != nil && huff != nil { + if hist.dict == nil || hist.dict.litEnc != hist.huffTree { + huffDecoderPool.Put(hist.huffTree) + } + hist.huffTree = nil + } + } + if huff != nil { + hist.huffTree = huff + } + if debug { + println("Final literals:", len(literals), "hash:", xxhash.Sum64(literals), "and", nSeqs, "sequences.") + } + + if nSeqs == 0 { + // Decompressed content is defined entirely as Literals Section content. + b.dst = append(b.dst, literals...) + if delayedHistory { + hist.append(literals) + } + return nil + } + + seqs, err := seqs.mergeHistory(&hist.decoders) + if err != nil { + return err + } + if debug { + println("History merged ok") + } + br := &bitReader{} + if err := br.init(in); err != nil { + return err + } + + // TODO: Investigate if sending history without decoders are faster. + // This would allow the sequences to be decoded async and only have to construct stream history. + // If only recent offsets were not transferred, this would be an obvious win. + // Also, if first 3 sequences don't reference recent offsets, all sequences can be decoded. + + hbytes := hist.b + if len(hbytes) > hist.windowSize { + hbytes = hbytes[len(hbytes)-hist.windowSize:] + // We do not need history any more. + if hist.dict != nil { + hist.dict.content = nil + } + } + + if err := seqs.initialize(br, hist, literals, b.dst); err != nil { + println("initializing sequences:", err) + return err + } + + err = seqs.decode(nSeqs, br, hbytes) + if err != nil { + return err + } + if !br.finished() { + return fmt.Errorf("%d extra bits on block, should be 0", br.remain()) + } + + err = br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + } + if len(b.data) > maxCompressedBlockSize { + return fmt.Errorf("compressed block size too large (%d)", len(b.data)) + } + // Set output and release references. + b.dst = seqs.out + seqs.out, seqs.literals, seqs.hist = nil, nil, nil + + if !delayedHistory { + // If we don't have delayed history, no need to update. + hist.recentOffsets = seqs.prevOffset + return nil + } + if b.Last { + // if last block we don't care about history. + println("Last block, no history returned") + hist.b = hist.b[:0] + return nil + } + hist.append(b.dst) + hist.recentOffsets = seqs.prevOffset + if debug { + println("Finished block with literals:", len(literals), "and", nSeqs, "sequences.") + } + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go new file mode 100644 index 00000000000..083fbb502f4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go @@ -0,0 +1,854 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "math" + "math/bits" + + "github.com/klauspost/compress/huff0" +) + +type blockEnc struct { + size int + literals []byte + sequences []seq + coders seqCoders + litEnc *huff0.Scratch + dictLitEnc *huff0.Scratch + wr bitWriter + + extraLits int + last bool + + output []byte + recentOffsets [3]uint32 + prevRecentOffsets [3]uint32 +} + +// init should be used once the block has been created. +// If called more than once, the effect is the same as calling reset. +func (b *blockEnc) init() { + if cap(b.literals) < maxCompressedLiteralSize { + b.literals = make([]byte, 0, maxCompressedLiteralSize) + } + const defSeqs = 200 + b.literals = b.literals[:0] + if cap(b.sequences) < defSeqs { + b.sequences = make([]seq, 0, defSeqs) + } + if cap(b.output) < maxCompressedBlockSize { + b.output = make([]byte, 0, maxCompressedBlockSize) + } + if b.coders.mlEnc == nil { + b.coders.mlEnc = &fseEncoder{} + b.coders.mlPrev = &fseEncoder{} + b.coders.ofEnc = &fseEncoder{} + b.coders.ofPrev = &fseEncoder{} + b.coders.llEnc = &fseEncoder{} + b.coders.llPrev = &fseEncoder{} + } + b.litEnc = &huff0.Scratch{WantLogLess: 4} + b.reset(nil) +} + +// initNewEncode can be used to reset offsets and encoders to the initial state. +func (b *blockEnc) initNewEncode() { + b.recentOffsets = [3]uint32{1, 4, 8} + b.litEnc.Reuse = huff0.ReusePolicyNone + b.coders.setPrev(nil, nil, nil) +} + +// reset will reset the block for a new encode, but in the same stream, +// meaning that state will be carried over, but the block content is reset. +// If a previous block is provided, the recent offsets are carried over. +func (b *blockEnc) reset(prev *blockEnc) { + b.extraLits = 0 + b.literals = b.literals[:0] + b.size = 0 + b.sequences = b.sequences[:0] + b.output = b.output[:0] + b.last = false + if prev != nil { + b.recentOffsets = prev.prevRecentOffsets + } +} + +// reset will reset the block for a new encode, but in the same stream, +// meaning that state will be carried over, but the block content is reset. +// If a previous block is provided, the recent offsets are carried over. +func (b *blockEnc) swapEncoders(prev *blockEnc) { + b.coders.swap(&prev.coders) + b.litEnc, prev.litEnc = prev.litEnc, b.litEnc +} + +// blockHeader contains the information for a block header. +type blockHeader uint32 + +// setLast sets the 'last' indicator on a block. +func (h *blockHeader) setLast(b bool) { + if b { + *h = *h | 1 + } else { + const mask = (1 << 24) - 2 + *h = *h & mask + } +} + +// setSize will store the compressed size of a block. +func (h *blockHeader) setSize(v uint32) { + const mask = 7 + *h = (*h)&mask | blockHeader(v<<3) +} + +// setType sets the block type. +func (h *blockHeader) setType(t blockType) { + const mask = 1 | (((1 << 24) - 1) ^ 7) + *h = (*h & mask) | blockHeader(t<<1) +} + +// appendTo will append the block header to a slice. +func (h blockHeader) appendTo(b []byte) []byte { + return append(b, uint8(h), uint8(h>>8), uint8(h>>16)) +} + +// String returns a string representation of the block. +func (h blockHeader) String() string { + return fmt.Sprintf("Type: %d, Size: %d, Last:%t", (h>>1)&3, h>>3, h&1 == 1) +} + +// literalsHeader contains literals header information. +type literalsHeader uint64 + +// setType can be used to set the type of literal block. +func (h *literalsHeader) setType(t literalsBlockType) { + const mask = math.MaxUint64 - 3 + *h = (*h & mask) | literalsHeader(t) +} + +// setSize can be used to set a single size, for uncompressed and RLE content. +func (h *literalsHeader) setSize(regenLen int) { + inBits := bits.Len32(uint32(regenLen)) + // Only retain 2 bits + const mask = 3 + lh := uint64(*h & mask) + switch { + case inBits < 5: + lh |= (uint64(regenLen) << 3) | (1 << 60) + if debug { + got := int(lh>>3) & 0xff + if got != regenLen { + panic(fmt.Sprint("litRegenSize = ", regenLen, "(want) != ", got, "(got)")) + } + } + case inBits < 12: + lh |= (1 << 2) | (uint64(regenLen) << 4) | (2 << 60) + case inBits < 20: + lh |= (3 << 2) | (uint64(regenLen) << 4) | (3 << 60) + default: + panic(fmt.Errorf("internal error: block too big (%d)", regenLen)) + } + *h = literalsHeader(lh) +} + +// setSizes will set the size of a compressed literals section and the input length. +func (h *literalsHeader) setSizes(compLen, inLen int, single bool) { + compBits, inBits := bits.Len32(uint32(compLen)), bits.Len32(uint32(inLen)) + // Only retain 2 bits + const mask = 3 + lh := uint64(*h & mask) + switch { + case compBits <= 10 && inBits <= 10: + if !single { + lh |= 1 << 2 + } + lh |= (uint64(inLen) << 4) | (uint64(compLen) << (10 + 4)) | (3 << 60) + if debug { + const mmask = (1 << 24) - 1 + n := (lh >> 4) & mmask + if int(n&1023) != inLen { + panic(fmt.Sprint("regensize:", int(n&1023), "!=", inLen, inBits)) + } + if int(n>>10) != compLen { + panic(fmt.Sprint("compsize:", int(n>>10), "!=", compLen, compBits)) + } + } + case compBits <= 14 && inBits <= 14: + lh |= (2 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (14 + 4)) | (4 << 60) + if single { + panic("single stream used with more than 10 bits length.") + } + case compBits <= 18 && inBits <= 18: + lh |= (3 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (18 + 4)) | (5 << 60) + if single { + panic("single stream used with more than 10 bits length.") + } + default: + panic("internal error: block too big") + } + *h = literalsHeader(lh) +} + +// appendTo will append the literals header to a byte slice. +func (h literalsHeader) appendTo(b []byte) []byte { + size := uint8(h >> 60) + switch size { + case 1: + b = append(b, uint8(h)) + case 2: + b = append(b, uint8(h), uint8(h>>8)) + case 3: + b = append(b, uint8(h), uint8(h>>8), uint8(h>>16)) + case 4: + b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24)) + case 5: + b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24), uint8(h>>32)) + default: + panic(fmt.Errorf("internal error: literalsHeader has invalid size (%d)", size)) + } + return b +} + +// size returns the output size with currently set values. +func (h literalsHeader) size() int { + return int(h >> 60) +} + +func (h literalsHeader) String() string { + return fmt.Sprintf("Type: %d, SizeFormat: %d, Size: 0x%d, Bytes:%d", literalsBlockType(h&3), (h>>2)&3, h&((1<<60)-1)>>4, h>>60) +} + +// pushOffsets will push the recent offsets to the backup store. +func (b *blockEnc) pushOffsets() { + b.prevRecentOffsets = b.recentOffsets +} + +// pushOffsets will push the recent offsets to the backup store. +func (b *blockEnc) popOffsets() { + b.recentOffsets = b.prevRecentOffsets +} + +// matchOffset will adjust recent offsets and return the adjusted one, +// if it matches a previous offset. +func (b *blockEnc) matchOffset(offset, lits uint32) uint32 { + // Check if offset is one of the recent offsets. + // Adjusts the output offset accordingly. + // Gives a tiny bit of compression, typically around 1%. + if true { + if lits > 0 { + switch offset { + case b.recentOffsets[0]: + offset = 1 + case b.recentOffsets[1]: + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 2 + case b.recentOffsets[2]: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 3 + default: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset += 3 + } + } else { + switch offset { + case b.recentOffsets[1]: + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 1 + case b.recentOffsets[2]: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 2 + case b.recentOffsets[0] - 1: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 3 + default: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset += 3 + } + } + } else { + offset += 3 + } + return offset +} + +// encodeRaw can be used to set the output to a raw representation of supplied bytes. +func (b *blockEnc) encodeRaw(a []byte) { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(uint32(len(a))) + bh.setType(blockTypeRaw) + b.output = bh.appendTo(b.output[:0]) + b.output = append(b.output, a...) + if debug { + println("Adding RAW block, length", len(a), "last:", b.last) + } +} + +// encodeRaw can be used to set the output to a raw representation of supplied bytes. +func (b *blockEnc) encodeRawTo(dst, src []byte) []byte { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(uint32(len(src))) + bh.setType(blockTypeRaw) + dst = bh.appendTo(dst) + dst = append(dst, src...) + if debug { + println("Adding RAW block, length", len(src), "last:", b.last) + } + return dst +} + +// encodeLits can be used if the block is only litLen. +func (b *blockEnc) encodeLits(lits []byte, raw bool) error { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(uint32(len(lits))) + + // Don't compress extremely small blocks + if len(lits) < 8 || (len(lits) < 32 && b.dictLitEnc == nil) || raw { + if debug { + println("Adding RAW block, length", len(lits), "last:", b.last) + } + bh.setType(blockTypeRaw) + b.output = bh.appendTo(b.output) + b.output = append(b.output, lits...) + return nil + } + + var ( + out []byte + reUsed, single bool + err error + ) + if b.dictLitEnc != nil { + b.litEnc.TransferCTable(b.dictLitEnc) + b.litEnc.Reuse = huff0.ReusePolicyAllow + b.dictLitEnc = nil + } + if len(lits) >= 1024 { + // Use 4 Streams. + out, reUsed, err = huff0.Compress4X(lits, b.litEnc) + } else if len(lits) > 32 { + // Use 1 stream + single = true + out, reUsed, err = huff0.Compress1X(lits, b.litEnc) + } else { + err = huff0.ErrIncompressible + } + + switch err { + case huff0.ErrIncompressible: + if debug { + println("Adding RAW block, length", len(lits), "last:", b.last) + } + bh.setType(blockTypeRaw) + b.output = bh.appendTo(b.output) + b.output = append(b.output, lits...) + return nil + case huff0.ErrUseRLE: + if debug { + println("Adding RLE block, length", len(lits)) + } + bh.setType(blockTypeRLE) + b.output = bh.appendTo(b.output) + b.output = append(b.output, lits[0]) + return nil + default: + return err + case nil: + } + // Compressed... + // Now, allow reuse + b.litEnc.Reuse = huff0.ReusePolicyAllow + bh.setType(blockTypeCompressed) + var lh literalsHeader + if reUsed { + if debug { + println("Reused tree, compressed to", len(out)) + } + lh.setType(literalsBlockTreeless) + } else { + if debug { + println("New tree, compressed to", len(out), "tree size:", len(b.litEnc.OutTable)) + } + lh.setType(literalsBlockCompressed) + } + // Set sizes + lh.setSizes(len(out), len(lits), single) + bh.setSize(uint32(len(out) + lh.size() + 1)) + + // Write block headers. + b.output = bh.appendTo(b.output) + b.output = lh.appendTo(b.output) + // Add compressed data. + b.output = append(b.output, out...) + // No sequences. + b.output = append(b.output, 0) + return nil +} + +// fuzzFseEncoder can be used to fuzz the FSE encoder. +func fuzzFseEncoder(data []byte) int { + if len(data) > maxSequences || len(data) < 2 { + return 0 + } + enc := fseEncoder{} + hist := enc.Histogram()[:256] + maxSym := uint8(0) + for i, v := range data { + v = v & 63 + data[i] = v + hist[v]++ + if v > maxSym { + maxSym = v + } + } + if maxSym == 0 { + // All 0 + return 0 + } + maxCount := func(a []uint32) int { + var max uint32 + for _, v := range a { + if v > max { + max = v + } + } + return int(max) + } + cnt := maxCount(hist[:maxSym]) + if cnt == len(data) { + // RLE + return 0 + } + enc.HistogramFinished(maxSym, cnt) + err := enc.normalizeCount(len(data)) + if err != nil { + return 0 + } + _, err = enc.writeCount(nil) + if err != nil { + panic(err) + } + return 1 +} + +// encode will encode the block and append the output in b.output. +// Previous offset codes must be pushed if more blocks are expected. +func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { + if len(b.sequences) == 0 { + return b.encodeLits(b.literals, rawAllLits) + } + // We want some difference to at least account for the headers. + saved := b.size - len(b.literals) - (b.size >> 5) + if saved < 16 { + if org == nil { + return errIncompressible + } + b.popOffsets() + return b.encodeLits(org, rawAllLits) + } + + var bh blockHeader + var lh literalsHeader + bh.setLast(b.last) + bh.setType(blockTypeCompressed) + // Store offset of the block header. Needed when we know the size. + bhOffset := len(b.output) + b.output = bh.appendTo(b.output) + + var ( + out []byte + reUsed, single bool + err error + ) + if b.dictLitEnc != nil { + b.litEnc.TransferCTable(b.dictLitEnc) + b.litEnc.Reuse = huff0.ReusePolicyAllow + b.dictLitEnc = nil + } + if len(b.literals) >= 1024 && !raw { + // Use 4 Streams. + out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc) + } else if len(b.literals) > 32 && !raw { + // Use 1 stream + single = true + out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc) + } else { + err = huff0.ErrIncompressible + } + + switch err { + case huff0.ErrIncompressible: + lh.setType(literalsBlockRaw) + lh.setSize(len(b.literals)) + b.output = lh.appendTo(b.output) + b.output = append(b.output, b.literals...) + if debug { + println("Adding literals RAW, length", len(b.literals)) + } + case huff0.ErrUseRLE: + lh.setType(literalsBlockRLE) + lh.setSize(len(b.literals)) + b.output = lh.appendTo(b.output) + b.output = append(b.output, b.literals[0]) + if debug { + println("Adding literals RLE") + } + default: + if debug { + println("Adding literals ERROR:", err) + } + return err + case nil: + // Compressed litLen... + if reUsed { + if debug { + println("reused tree") + } + lh.setType(literalsBlockTreeless) + } else { + if debug { + println("new tree, size:", len(b.litEnc.OutTable)) + } + lh.setType(literalsBlockCompressed) + if debug { + _, _, err := huff0.ReadTable(out, nil) + if err != nil { + panic(err) + } + } + } + lh.setSizes(len(out), len(b.literals), single) + if debug { + printf("Compressed %d literals to %d bytes", len(b.literals), len(out)) + println("Adding literal header:", lh) + } + b.output = lh.appendTo(b.output) + b.output = append(b.output, out...) + b.litEnc.Reuse = huff0.ReusePolicyAllow + if debug { + println("Adding literals compressed") + } + } + // Sequence compression + + // Write the number of sequences + switch { + case len(b.sequences) < 128: + b.output = append(b.output, uint8(len(b.sequences))) + case len(b.sequences) < 0x7f00: // TODO: this could be wrong + n := len(b.sequences) + b.output = append(b.output, 128+uint8(n>>8), uint8(n)) + default: + n := len(b.sequences) - 0x7f00 + b.output = append(b.output, 255, uint8(n), uint8(n>>8)) + } + if debug { + println("Encoding", len(b.sequences), "sequences") + } + b.genCodes() + llEnc := b.coders.llEnc + ofEnc := b.coders.ofEnc + mlEnc := b.coders.mlEnc + err = llEnc.normalizeCount(len(b.sequences)) + if err != nil { + return err + } + err = ofEnc.normalizeCount(len(b.sequences)) + if err != nil { + return err + } + err = mlEnc.normalizeCount(len(b.sequences)) + if err != nil { + return err + } + + // Choose the best compression mode for each type. + // Will evaluate the new vs predefined and previous. + chooseComp := func(cur, prev, preDef *fseEncoder) (*fseEncoder, seqCompMode) { + // See if predefined/previous is better + hist := cur.count[:cur.symbolLen] + nSize := cur.approxSize(hist) + cur.maxHeaderSize() + predefSize := preDef.approxSize(hist) + prevSize := prev.approxSize(hist) + + // Add a small penalty for new encoders. + // Don't bother with extremely small (<2 byte gains). + nSize = nSize + (nSize+2*8*16)>>4 + switch { + case predefSize <= prevSize && predefSize <= nSize || forcePreDef: + if debug { + println("Using predefined", predefSize>>3, "<=", nSize>>3) + } + return preDef, compModePredefined + case prevSize <= nSize: + if debug { + println("Using previous", prevSize>>3, "<=", nSize>>3) + } + return prev, compModeRepeat + default: + if debug { + println("Using new, predef", predefSize>>3, ". previous:", prevSize>>3, ">", nSize>>3, "header max:", cur.maxHeaderSize()>>3, "bytes") + println("tl:", cur.actualTableLog, "symbolLen:", cur.symbolLen, "norm:", cur.norm[:cur.symbolLen], "hist", cur.count[:cur.symbolLen]) + } + return cur, compModeFSE + } + } + + // Write compression mode + var mode uint8 + if llEnc.useRLE { + mode |= uint8(compModeRLE) << 6 + llEnc.setRLE(b.sequences[0].llCode) + if debug { + println("llEnc.useRLE") + } + } else { + var m seqCompMode + llEnc, m = chooseComp(llEnc, b.coders.llPrev, &fsePredefEnc[tableLiteralLengths]) + mode |= uint8(m) << 6 + } + if ofEnc.useRLE { + mode |= uint8(compModeRLE) << 4 + ofEnc.setRLE(b.sequences[0].ofCode) + if debug { + println("ofEnc.useRLE") + } + } else { + var m seqCompMode + ofEnc, m = chooseComp(ofEnc, b.coders.ofPrev, &fsePredefEnc[tableOffsets]) + mode |= uint8(m) << 4 + } + + if mlEnc.useRLE { + mode |= uint8(compModeRLE) << 2 + mlEnc.setRLE(b.sequences[0].mlCode) + if debug { + println("mlEnc.useRLE, code: ", b.sequences[0].mlCode, "value", b.sequences[0].matchLen) + } + } else { + var m seqCompMode + mlEnc, m = chooseComp(mlEnc, b.coders.mlPrev, &fsePredefEnc[tableMatchLengths]) + mode |= uint8(m) << 2 + } + b.output = append(b.output, mode) + if debug { + printf("Compression modes: 0b%b", mode) + } + b.output, err = llEnc.writeCount(b.output) + if err != nil { + return err + } + start := len(b.output) + b.output, err = ofEnc.writeCount(b.output) + if err != nil { + return err + } + if false { + println("block:", b.output[start:], "tablelog", ofEnc.actualTableLog, "maxcount:", ofEnc.maxCount) + fmt.Printf("selected TableLog: %d, Symbol length: %d\n", ofEnc.actualTableLog, ofEnc.symbolLen) + for i, v := range ofEnc.norm[:ofEnc.symbolLen] { + fmt.Printf("%3d: %5d -> %4d \n", i, ofEnc.count[i], v) + } + } + b.output, err = mlEnc.writeCount(b.output) + if err != nil { + return err + } + + // Maybe in block? + wr := &b.wr + wr.reset(b.output) + + var ll, of, ml cState + + // Current sequence + seq := len(b.sequences) - 1 + s := b.sequences[seq] + llEnc.setBits(llBitsTable[:]) + mlEnc.setBits(mlBitsTable[:]) + ofEnc.setBits(nil) + + llTT, ofTT, mlTT := llEnc.ct.symbolTT[:256], ofEnc.ct.symbolTT[:256], mlEnc.ct.symbolTT[:256] + + // We have 3 bounds checks here (and in the loop). + // Since we are iterating backwards it is kinda hard to avoid. + llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode] + ll.init(wr, &llEnc.ct, llB) + of.init(wr, &ofEnc.ct, ofB) + wr.flush32() + ml.init(wr, &mlEnc.ct, mlB) + + // Each of these lookups also generates a bounds check. + wr.addBits32NC(s.litLen, llB.outBits) + wr.addBits32NC(s.matchLen, mlB.outBits) + wr.flush32() + wr.addBits32NC(s.offset, ofB.outBits) + if debugSequences { + println("Encoded seq", seq, s, "codes:", s.llCode, s.mlCode, s.ofCode, "states:", ll.state, ml.state, of.state, "bits:", llB, mlB, ofB) + } + seq-- + if llEnc.maxBits+mlEnc.maxBits+ofEnc.maxBits <= 32 { + // No need to flush (common) + for seq >= 0 { + s = b.sequences[seq] + wr.flush32() + llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode] + // tabelog max is 8 for all. + of.encode(ofB) + ml.encode(mlB) + ll.encode(llB) + wr.flush32() + + // We checked that all can stay within 32 bits + wr.addBits32NC(s.litLen, llB.outBits) + wr.addBits32NC(s.matchLen, mlB.outBits) + wr.addBits32NC(s.offset, ofB.outBits) + + if debugSequences { + println("Encoded seq", seq, s) + } + + seq-- + } + } else { + for seq >= 0 { + s = b.sequences[seq] + wr.flush32() + llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode] + // tabelog max is below 8 for each. + of.encode(ofB) + ml.encode(mlB) + ll.encode(llB) + wr.flush32() + + // ml+ll = max 32 bits total + wr.addBits32NC(s.litLen, llB.outBits) + wr.addBits32NC(s.matchLen, mlB.outBits) + wr.flush32() + wr.addBits32NC(s.offset, ofB.outBits) + + if debugSequences { + println("Encoded seq", seq, s) + } + + seq-- + } + } + ml.flush(mlEnc.actualTableLog) + of.flush(ofEnc.actualTableLog) + ll.flush(llEnc.actualTableLog) + err = wr.close() + if err != nil { + return err + } + b.output = wr.out + + if len(b.output)-3-bhOffset >= b.size { + // Maybe even add a bigger margin. + b.litEnc.Reuse = huff0.ReusePolicyNone + return errIncompressible + } + + // Size is output minus block header. + bh.setSize(uint32(len(b.output)-bhOffset) - 3) + if debug { + println("Rewriting block header", bh) + } + _ = bh.appendTo(b.output[bhOffset:bhOffset]) + b.coders.setPrev(llEnc, mlEnc, ofEnc) + return nil +} + +var errIncompressible = errors.New("incompressible") + +func (b *blockEnc) genCodes() { + if len(b.sequences) == 0 { + // nothing to do + return + } + + if len(b.sequences) > math.MaxUint16 { + panic("can only encode up to 64K sequences") + } + // No bounds checks after here: + llH := b.coders.llEnc.Histogram()[:256] + ofH := b.coders.ofEnc.Histogram()[:256] + mlH := b.coders.mlEnc.Histogram()[:256] + for i := range llH { + llH[i] = 0 + } + for i := range ofH { + ofH[i] = 0 + } + for i := range mlH { + mlH[i] = 0 + } + + var llMax, ofMax, mlMax uint8 + for i, seq := range b.sequences { + v := llCode(seq.litLen) + seq.llCode = v + llH[v]++ + if v > llMax { + llMax = v + } + + v = ofCode(seq.offset) + seq.ofCode = v + ofH[v]++ + if v > ofMax { + ofMax = v + } + + v = mlCode(seq.matchLen) + seq.mlCode = v + mlH[v]++ + if v > mlMax { + mlMax = v + if debugAsserts && mlMax > maxMatchLengthSymbol { + panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d), matchlen: %d", mlMax, seq.matchLen)) + } + } + b.sequences[i] = seq + } + maxCount := func(a []uint32) int { + var max uint32 + for _, v := range a { + if v > max { + max = v + } + } + return int(max) + } + if debugAsserts && mlMax > maxMatchLengthSymbol { + panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d)", mlMax)) + } + if debugAsserts && ofMax > maxOffsetBits { + panic(fmt.Errorf("ofMax > maxOffsetBits (%d)", ofMax)) + } + if debugAsserts && llMax > maxLiteralLengthSymbol { + panic(fmt.Errorf("llMax > maxLiteralLengthSymbol (%d)", llMax)) + } + + b.coders.mlEnc.HistogramFinished(mlMax, maxCount(mlH[:mlMax+1])) + b.coders.ofEnc.HistogramFinished(ofMax, maxCount(ofH[:ofMax+1])) + b.coders.llEnc.HistogramFinished(llMax, maxCount(llH[:llMax+1])) +} diff --git a/vendor/github.com/klauspost/compress/zstd/blocktype_string.go b/vendor/github.com/klauspost/compress/zstd/blocktype_string.go new file mode 100644 index 00000000000..01a01e486e1 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/blocktype_string.go @@ -0,0 +1,85 @@ +// Code generated by "stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex"; DO NOT EDIT. + +package zstd + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[blockTypeRaw-0] + _ = x[blockTypeRLE-1] + _ = x[blockTypeCompressed-2] + _ = x[blockTypeReserved-3] +} + +const _blockType_name = "blockTypeRawblockTypeRLEblockTypeCompressedblockTypeReserved" + +var _blockType_index = [...]uint8{0, 12, 24, 43, 60} + +func (i blockType) String() string { + if i >= blockType(len(_blockType_index)-1) { + return "blockType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _blockType_name[_blockType_index[i]:_blockType_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[literalsBlockRaw-0] + _ = x[literalsBlockRLE-1] + _ = x[literalsBlockCompressed-2] + _ = x[literalsBlockTreeless-3] +} + +const _literalsBlockType_name = "literalsBlockRawliteralsBlockRLEliteralsBlockCompressedliteralsBlockTreeless" + +var _literalsBlockType_index = [...]uint8{0, 16, 32, 55, 76} + +func (i literalsBlockType) String() string { + if i >= literalsBlockType(len(_literalsBlockType_index)-1) { + return "literalsBlockType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _literalsBlockType_name[_literalsBlockType_index[i]:_literalsBlockType_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[compModePredefined-0] + _ = x[compModeRLE-1] + _ = x[compModeFSE-2] + _ = x[compModeRepeat-3] +} + +const _seqCompMode_name = "compModePredefinedcompModeRLEcompModeFSEcompModeRepeat" + +var _seqCompMode_index = [...]uint8{0, 18, 29, 40, 54} + +func (i seqCompMode) String() string { + if i >= seqCompMode(len(_seqCompMode_index)-1) { + return "seqCompMode(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _seqCompMode_name[_seqCompMode_index[i]:_seqCompMode_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[tableLiteralLengths-0] + _ = x[tableOffsets-1] + _ = x[tableMatchLengths-2] +} + +const _tableIndex_name = "tableLiteralLengthstableOffsetstableMatchLengths" + +var _tableIndex_index = [...]uint8{0, 19, 31, 48} + +func (i tableIndex) String() string { + if i >= tableIndex(len(_tableIndex_index)-1) { + return "tableIndex(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _tableIndex_name[_tableIndex_index[i]:_tableIndex_index[i+1]] +} diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go new file mode 100644 index 00000000000..658ef78380e --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bytebuf.go @@ -0,0 +1,127 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" + "io" + "io/ioutil" +) + +type byteBuffer interface { + // Read up to 8 bytes. + // Returns nil if no more input is available. + readSmall(n int) []byte + + // Read >8 bytes. + // MAY use the destination slice. + readBig(n int, dst []byte) ([]byte, error) + + // Read a single byte. + readByte() (byte, error) + + // Skip n bytes. + skipN(n int) error +} + +// in-memory buffer +type byteBuf []byte + +func (b *byteBuf) readSmall(n int) []byte { + if debugAsserts && n > 8 { + panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) + } + bb := *b + if len(bb) < n { + return nil + } + r := bb[:n] + *b = bb[n:] + return r +} + +func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) { + bb := *b + if len(bb) < n { + return nil, io.ErrUnexpectedEOF + } + r := bb[:n] + *b = bb[n:] + return r, nil +} + +func (b *byteBuf) remain() []byte { + return *b +} + +func (b *byteBuf) readByte() (byte, error) { + bb := *b + if len(bb) < 1 { + return 0, nil + } + r := bb[0] + *b = bb[1:] + return r, nil +} + +func (b *byteBuf) skipN(n int) error { + bb := *b + if len(bb) < n { + return io.ErrUnexpectedEOF + } + *b = bb[n:] + return nil +} + +// wrapper around a reader. +type readerWrapper struct { + r io.Reader + tmp [8]byte +} + +func (r *readerWrapper) readSmall(n int) []byte { + if debugAsserts && n > 8 { + panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) + } + n2, err := io.ReadFull(r.r, r.tmp[:n]) + // We only really care about the actual bytes read. + if n2 != n { + if debug { + println("readSmall: got", n2, "want", n, "err", err) + } + return nil + } + return r.tmp[:n] +} + +func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) { + if cap(dst) < n { + dst = make([]byte, n) + } + n2, err := io.ReadFull(r.r, dst[:n]) + if err == io.EOF && n > 0 { + err = io.ErrUnexpectedEOF + } + return dst[:n2], err +} + +func (r *readerWrapper) readByte() (byte, error) { + n2, err := r.r.Read(r.tmp[:1]) + if err != nil { + return 0, err + } + if n2 != 1 { + return 0, io.ErrUnexpectedEOF + } + return r.tmp[0], nil +} + +func (r *readerWrapper) skipN(n int) error { + n2, err := io.CopyN(ioutil.Discard, r.r, int64(n)) + if n2 != int64(n) { + err = io.ErrUnexpectedEOF + } + return err +} diff --git a/vendor/github.com/klauspost/compress/zstd/bytereader.go b/vendor/github.com/klauspost/compress/zstd/bytereader.go new file mode 100644 index 00000000000..2c4fca17fa1 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bytereader.go @@ -0,0 +1,88 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +// byteReader provides a byte reader that reads +// little endian values from a byte stream. +// The input stream is manually advanced. +// The reader performs no bounds checks. +type byteReader struct { + b []byte + off int +} + +// init will initialize the reader and set the input. +func (b *byteReader) init(in []byte) { + b.b = in + b.off = 0 +} + +// advance the stream b n bytes. +func (b *byteReader) advance(n uint) { + b.off += int(n) +} + +// overread returns whether we have advanced too far. +func (b *byteReader) overread() bool { + return b.off > len(b.b) +} + +// Int32 returns a little endian int32 starting at current offset. +func (b byteReader) Int32() int32 { + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := int32(b2[3]) + v2 := int32(b2[2]) + v1 := int32(b2[1]) + v0 := int32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// Uint8 returns the next byte +func (b *byteReader) Uint8() uint8 { + v := b.b[b.off] + return v +} + +// Uint32 returns a little endian uint32 starting at current offset. +func (b byteReader) Uint32() uint32 { + if r := b.remain(); r < 4 { + // Very rare + v := uint32(0) + for i := 1; i <= r; i++ { + v = (v << 8) | uint32(b.b[len(b.b)-i]) + } + return v + } + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := uint32(b2[3]) + v2 := uint32(b2[2]) + v1 := uint32(b2[1]) + v0 := uint32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// Uint32NC returns a little endian uint32 starting at current offset. +// The caller must be sure if there are at least 4 bytes left. +func (b byteReader) Uint32NC() uint32 { + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := uint32(b2[3]) + v2 := uint32(b2[2]) + v1 := uint32(b2[1]) + v0 := uint32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// unread returns the unread portion of the input. +func (b byteReader) unread() []byte { + return b.b[b.off:] +} + +// remain will return the number of bytes remaining. +func (b byteReader) remain() int { + return len(b.b) - b.off +} diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go new file mode 100644 index 00000000000..d78be6d4236 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -0,0 +1,546 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "bytes" + "errors" + "io" + "sync" +) + +// Decoder provides decoding of zstandard streams. +// The decoder has been designed to operate without allocations after a warmup. +// This means that you should store the decoder for best performance. +// To re-use a stream decoder, use the Reset(r io.Reader) error to switch to another stream. +// A decoder can safely be re-used even if the previous stream failed. +// To release the resources, you must call the Close() function on a decoder. +type Decoder struct { + o decoderOptions + + // Unreferenced decoders, ready for use. + decoders chan *blockDec + + // Streams ready to be decoded. + stream chan decodeStream + + // Current read position used for Reader functionality. + current decoderState + + // Custom dictionaries. + // Always uses copies. + dicts map[uint32]dict + + // streamWg is the waitgroup for all streams + streamWg sync.WaitGroup +} + +// decoderState is used for maintaining state when the decoder +// is used for streaming. +type decoderState struct { + // current block being written to stream. + decodeOutput + + // output in order to be written to stream. + output chan decodeOutput + + // cancel remaining output. + cancel chan struct{} + + flushed bool +} + +var ( + // Check the interfaces we want to support. + _ = io.WriterTo(&Decoder{}) + _ = io.Reader(&Decoder{}) +) + +// NewReader creates a new decoder. +// A nil Reader can be provided in which case Reset can be used to start a decode. +// +// A Decoder can be used in two modes: +// +// 1) As a stream, or +// 2) For stateless decoding using DecodeAll. +// +// Only a single stream can be decoded concurrently, but the same decoder +// can run multiple concurrent stateless decodes. It is even possible to +// use stateless decodes while a stream is being decoded. +// +// The Reset function can be used to initiate a new stream, which is will considerably +// reduce the allocations normally caused by NewReader. +func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { + initPredefined() + var d Decoder + d.o.setDefault() + for _, o := range opts { + err := o(&d.o) + if err != nil { + return nil, err + } + } + d.current.output = make(chan decodeOutput, d.o.concurrent) + d.current.flushed = true + + // Transfer option dicts. + d.dicts = make(map[uint32]dict, len(d.o.dicts)) + for _, dc := range d.o.dicts { + d.dicts[dc.id] = dc + } + d.o.dicts = nil + + // Create decoders + d.decoders = make(chan *blockDec, d.o.concurrent) + for i := 0; i < d.o.concurrent; i++ { + dec := newBlockDec(d.o.lowMem) + dec.localFrame = newFrameDec(d.o) + d.decoders <- dec + } + + if r == nil { + return &d, nil + } + return &d, d.Reset(r) +} + +// Read bytes from the decompressed stream into p. +// Returns the number of bytes written and any error that occurred. +// When the stream is done, io.EOF will be returned. +func (d *Decoder) Read(p []byte) (int, error) { + if d.stream == nil { + return 0, errors.New("no input has been initialized") + } + var n int + for { + if len(d.current.b) > 0 { + filled := copy(p, d.current.b) + p = p[filled:] + d.current.b = d.current.b[filled:] + n += filled + } + if len(p) == 0 { + break + } + if len(d.current.b) == 0 { + // We have an error and no more data + if d.current.err != nil { + break + } + if !d.nextBlock(n == 0) { + return n, nil + } + } + } + if len(d.current.b) > 0 { + if debug { + println("returning", n, "still bytes left:", len(d.current.b)) + } + // Only return error at end of block + return n, nil + } + if d.current.err != nil { + d.drainOutput() + } + if debug { + println("returning", n, d.current.err, len(d.decoders)) + } + return n, d.current.err +} + +// Reset will reset the decoder the supplied stream after the current has finished processing. +// Note that this functionality cannot be used after Close has been called. +func (d *Decoder) Reset(r io.Reader) error { + if d.current.err == ErrDecoderClosed { + return d.current.err + } + if r == nil { + return errors.New("nil Reader sent as input") + } + + if d.stream == nil { + d.stream = make(chan decodeStream, 1) + d.streamWg.Add(1) + go d.startStreamDecoder(d.stream) + } + + d.drainOutput() + + // If bytes buffer and < 1MB, do sync decoding anyway. + if bb, ok := r.(*bytes.Buffer); ok && bb.Len() < 1<<20 { + if debug { + println("*bytes.Buffer detected, doing sync decode, len:", bb.Len()) + } + b := bb.Bytes() + var dst []byte + if cap(d.current.b) > 0 { + dst = d.current.b + } + + dst, err := d.DecodeAll(b, dst[:0]) + if err == nil { + err = io.EOF + } + d.current.b = dst + d.current.err = err + d.current.flushed = true + if debug { + println("sync decode to", len(dst), "bytes, err:", err) + } + return nil + } + + // Remove current block. + d.current.decodeOutput = decodeOutput{} + d.current.err = nil + d.current.cancel = make(chan struct{}) + d.current.flushed = false + d.current.d = nil + + d.stream <- decodeStream{ + r: r, + output: d.current.output, + cancel: d.current.cancel, + } + return nil +} + +// drainOutput will drain the output until errEndOfStream is sent. +func (d *Decoder) drainOutput() { + if d.current.cancel != nil { + println("cancelling current") + close(d.current.cancel) + d.current.cancel = nil + } + if d.current.d != nil { + if debug { + printf("re-adding current decoder %p, decoders: %d", d.current.d, len(d.decoders)) + } + d.decoders <- d.current.d + d.current.d = nil + d.current.b = nil + } + if d.current.output == nil || d.current.flushed { + println("current already flushed") + return + } + for { + select { + case v := <-d.current.output: + if v.d != nil { + if debug { + printf("re-adding decoder %p", v.d) + } + d.decoders <- v.d + } + if v.err == errEndOfStream { + println("current flushed") + d.current.flushed = true + return + } + } + } +} + +// WriteTo writes data to w until there's no more data to write or when an error occurs. +// The return value n is the number of bytes written. +// Any error encountered during the write is also returned. +func (d *Decoder) WriteTo(w io.Writer) (int64, error) { + if d.stream == nil { + return 0, errors.New("no input has been initialized") + } + var n int64 + for { + if len(d.current.b) > 0 { + n2, err2 := w.Write(d.current.b) + n += int64(n2) + if err2 != nil && d.current.err == nil { + d.current.err = err2 + break + } + } + if d.current.err != nil { + break + } + d.nextBlock(true) + } + err := d.current.err + if err != nil { + d.drainOutput() + } + if err == io.EOF { + err = nil + } + return n, err +} + +// DecodeAll allows stateless decoding of a blob of bytes. +// Output will be appended to dst, so if the destination size is known +// you can pre-allocate the destination slice to avoid allocations. +// DecodeAll can be used concurrently. +// The Decoder concurrency limits will be respected. +func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { + if d.current.err == ErrDecoderClosed { + return dst, ErrDecoderClosed + } + + // Grab a block decoder and frame decoder. + block := <-d.decoders + frame := block.localFrame + defer func() { + if debug { + printf("re-adding decoder: %p", block) + } + frame.rawInput = nil + frame.bBuf = nil + d.decoders <- block + }() + frame.bBuf = input + + for { + frame.history.reset() + err := frame.reset(&frame.bBuf) + if err == io.EOF { + if debug { + println("frame reset return EOF") + } + return dst, nil + } + if frame.DictionaryID != nil { + dict, ok := d.dicts[*frame.DictionaryID] + if !ok { + return nil, ErrUnknownDictionary + } + frame.history.setDict(&dict) + } + if err != nil { + return dst, err + } + if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) { + return dst, ErrDecoderSizeExceeded + } + if frame.FrameContentSize > 0 && frame.FrameContentSize < 1<<30 { + // Never preallocate moe than 1 GB up front. + if uint64(cap(dst)) < frame.FrameContentSize { + dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)) + copy(dst2, dst) + dst = dst2 + } + } + if cap(dst) == 0 { + // Allocate window size * 2 by default if nothing is provided and we didn't get frame content size. + size := frame.WindowSize * 2 + // Cap to 1 MB. + if size > 1<<20 { + size = 1 << 20 + } + dst = make([]byte, 0, size) + } + + dst, err = frame.runDecoder(dst, block) + if err != nil { + return dst, err + } + if len(frame.bBuf) == 0 { + if debug { + println("frame dbuf empty") + } + break + } + } + return dst, nil +} + +// nextBlock returns the next block. +// If an error occurs d.err will be set. +// Optionally the function can block for new output. +// If non-blocking mode is used the returned boolean will be false +// if no data was available without blocking. +func (d *Decoder) nextBlock(blocking bool) (ok bool) { + if d.current.d != nil { + if debug { + printf("re-adding current decoder %p", d.current.d) + } + d.decoders <- d.current.d + d.current.d = nil + } + if d.current.err != nil { + // Keep error state. + return blocking + } + + if blocking { + d.current.decodeOutput = <-d.current.output + } else { + select { + case d.current.decodeOutput = <-d.current.output: + default: + return false + } + } + if debug { + println("got", len(d.current.b), "bytes, error:", d.current.err) + } + return true +} + +// Close will release all resources. +// It is NOT possible to reuse the decoder after this. +func (d *Decoder) Close() { + if d.current.err == ErrDecoderClosed { + return + } + d.drainOutput() + if d.stream != nil { + close(d.stream) + d.streamWg.Wait() + d.stream = nil + } + if d.decoders != nil { + close(d.decoders) + for dec := range d.decoders { + dec.Close() + } + d.decoders = nil + } + if d.current.d != nil { + d.current.d.Close() + d.current.d = nil + } + d.current.err = ErrDecoderClosed +} + +// IOReadCloser returns the decoder as an io.ReadCloser for convenience. +// Any changes to the decoder will be reflected, so the returned ReadCloser +// can be reused along with the decoder. +// io.WriterTo is also supported by the returned ReadCloser. +func (d *Decoder) IOReadCloser() io.ReadCloser { + return closeWrapper{d: d} +} + +// closeWrapper wraps a function call as a closer. +type closeWrapper struct { + d *Decoder +} + +// WriteTo forwards WriteTo calls to the decoder. +func (c closeWrapper) WriteTo(w io.Writer) (n int64, err error) { + return c.d.WriteTo(w) +} + +// Read forwards read calls to the decoder. +func (c closeWrapper) Read(p []byte) (n int, err error) { + return c.d.Read(p) +} + +// Close closes the decoder. +func (c closeWrapper) Close() error { + c.d.Close() + return nil +} + +type decodeOutput struct { + d *blockDec + b []byte + err error +} + +type decodeStream struct { + r io.Reader + + // Blocks ready to be written to output. + output chan decodeOutput + + // cancel reading from the input + cancel chan struct{} +} + +// errEndOfStream indicates that everything from the stream was read. +var errEndOfStream = errors.New("end-of-stream") + +// Create Decoder: +// Spawn n block decoders. These accept tasks to decode a block. +// Create goroutine that handles stream processing, this will send history to decoders as they are available. +// Decoders update the history as they decode. +// When a block is returned: +// a) history is sent to the next decoder, +// b) content written to CRC. +// c) return data to WRITER. +// d) wait for next block to return data. +// Once WRITTEN, the decoders reused by the writer frame decoder for re-use. +func (d *Decoder) startStreamDecoder(inStream chan decodeStream) { + defer d.streamWg.Done() + frame := newFrameDec(d.o) + for stream := range inStream { + if debug { + println("got new stream") + } + br := readerWrapper{r: stream.r} + decodeStream: + for { + frame.history.reset() + err := frame.reset(&br) + if debug && err != nil { + println("Frame decoder returned", err) + } + if err == nil && frame.DictionaryID != nil { + dict, ok := d.dicts[*frame.DictionaryID] + if !ok { + err = ErrUnknownDictionary + } else { + frame.history.setDict(&dict) + } + } + if err != nil { + stream.output <- decodeOutput{ + err: err, + } + break + } + if debug { + println("starting frame decoder") + } + + // This goroutine will forward history between frames. + frame.frameDone.Add(1) + frame.initAsync() + + go frame.startDecoder(stream.output) + decodeFrame: + // Go through all blocks of the frame. + for { + dec := <-d.decoders + select { + case <-stream.cancel: + if !frame.sendErr(dec, io.EOF) { + // To not let the decoder dangle, send it back. + stream.output <- decodeOutput{d: dec} + } + break decodeStream + default: + } + err := frame.next(dec) + switch err { + case io.EOF: + // End of current frame, no error + println("EOF on next block") + break decodeFrame + case nil: + continue + default: + println("block decoder returned", err) + break decodeStream + } + } + // All blocks have started decoding, check if there are more frames. + println("waiting for done") + frame.frameDone.Wait() + println("done waiting...") + } + frame.frameDone.Wait() + println("Sending EOS") + stream.output <- decodeOutput{err: errEndOfStream} + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go new file mode 100644 index 00000000000..284d384492b --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go @@ -0,0 +1,84 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "runtime" +) + +// DOption is an option for creating a decoder. +type DOption func(*decoderOptions) error + +// options retains accumulated state of multiple options. +type decoderOptions struct { + lowMem bool + concurrent int + maxDecodedSize uint64 + dicts []dict +} + +func (o *decoderOptions) setDefault() { + *o = decoderOptions{ + // use less ram: true for now, but may change. + lowMem: true, + concurrent: runtime.GOMAXPROCS(0), + } + o.maxDecodedSize = 1 << 63 +} + +// WithDecoderLowmem will set whether to use a lower amount of memory, +// but possibly have to allocate more while running. +func WithDecoderLowmem(b bool) DOption { + return func(o *decoderOptions) error { o.lowMem = b; return nil } +} + +// WithDecoderConcurrency will set the concurrency, +// meaning the maximum number of decoders to run concurrently. +// The value supplied must be at least 1. +// By default this will be set to GOMAXPROCS. +func WithDecoderConcurrency(n int) DOption { + return func(o *decoderOptions) error { + if n <= 0 { + return fmt.Errorf("Concurrency must be at least 1") + } + o.concurrent = n + return nil + } +} + +// WithDecoderMaxMemory allows to set a maximum decoded size for in-memory +// non-streaming operations or maximum window size for streaming operations. +// This can be used to control memory usage of potentially hostile content. +// For streaming operations, the maximum window size is capped at 1<<30 bytes. +// Maximum and default is 1 << 63 bytes. +func WithDecoderMaxMemory(n uint64) DOption { + return func(o *decoderOptions) error { + if n == 0 { + return errors.New("WithDecoderMaxMemory must be at least 1") + } + if n > 1<<63 { + return fmt.Errorf("WithDecoderMaxmemory must be less than 1 << 63") + } + o.maxDecodedSize = n + return nil + } +} + +// WithDecoderDicts allows to register one or more dictionaries for the decoder. +// If several dictionaries with the same ID is provided the last one will be used. +func WithDecoderDicts(dicts ...[]byte) DOption { + return func(o *decoderOptions) error { + for _, b := range dicts { + d, err := loadDict(b) + if err != nil { + return err + } + o.dicts = append(o.dicts, *d) + } + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go new file mode 100644 index 00000000000..fa25a18d864 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/dict.go @@ -0,0 +1,122 @@ +package zstd + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + + "github.com/klauspost/compress/huff0" +) + +type dict struct { + id uint32 + + litEnc *huff0.Scratch + llDec, ofDec, mlDec sequenceDec + //llEnc, ofEnc, mlEnc []*fseEncoder + offsets [3]int + content []byte +} + +var dictMagic = [4]byte{0x37, 0xa4, 0x30, 0xec} + +// ID returns the dictionary id or 0 if d is nil. +func (d *dict) ID() uint32 { + if d == nil { + return 0 + } + return d.id +} + +// DictContentSize returns the dictionary content size or 0 if d is nil. +func (d *dict) DictContentSize() int { + if d == nil { + return 0 + } + return len(d.content) +} + +// Load a dictionary as described in +// https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format +func loadDict(b []byte) (*dict, error) { + // Check static field size. + if len(b) <= 8+(3*4) { + return nil, io.ErrUnexpectedEOF + } + d := dict{ + llDec: sequenceDec{fse: &fseDecoder{}}, + ofDec: sequenceDec{fse: &fseDecoder{}}, + mlDec: sequenceDec{fse: &fseDecoder{}}, + } + if !bytes.Equal(b[:4], dictMagic[:]) { + return nil, ErrMagicMismatch + } + d.id = binary.LittleEndian.Uint32(b[4:8]) + if d.id == 0 { + return nil, errors.New("dictionaries cannot have ID 0") + } + + // Read literal table + var err error + d.litEnc, b, err = huff0.ReadTable(b[8:], nil) + if err != nil { + return nil, err + } + d.litEnc.Reuse = huff0.ReusePolicyMust + + br := byteReader{ + b: b, + off: 0, + } + readDec := func(i tableIndex, dec *fseDecoder) error { + if err := dec.readNCount(&br, uint16(maxTableSymbol[i])); err != nil { + return err + } + if br.overread() { + return io.ErrUnexpectedEOF + } + err = dec.transform(symbolTableX[i]) + if err != nil { + println("Transform table error:", err) + return err + } + if debug { + println("Read table ok", "symbolLen:", dec.symbolLen) + } + // Set decoders as predefined so they aren't reused. + dec.preDefined = true + return nil + } + + if err := readDec(tableOffsets, d.ofDec.fse); err != nil { + return nil, err + } + if err := readDec(tableMatchLengths, d.mlDec.fse); err != nil { + return nil, err + } + if err := readDec(tableLiteralLengths, d.llDec.fse); err != nil { + return nil, err + } + if br.remain() < 12 { + return nil, io.ErrUnexpectedEOF + } + + d.offsets[0] = int(br.Uint32()) + br.advance(4) + d.offsets[1] = int(br.Uint32()) + br.advance(4) + d.offsets[2] = int(br.Uint32()) + br.advance(4) + if d.offsets[0] <= 0 || d.offsets[1] <= 0 || d.offsets[2] <= 0 { + return nil, errors.New("invalid offset in dictionary") + } + d.content = make([]byte, br.remain()) + copy(d.content, br.unread()) + if d.offsets[0] > len(d.content) || d.offsets[1] > len(d.content) || d.offsets[2] > len(d.content) { + return nil, fmt.Errorf("initial offset bigger than dictionary content size %d, offsets: %v", len(d.content), d.offsets) + } + + return &d, nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go new file mode 100644 index 00000000000..b1b7c6e6a72 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_base.go @@ -0,0 +1,155 @@ +package zstd + +import ( + "fmt" + "math/bits" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +type fastBase struct { + // cur is the offset at the start of hist + cur int32 + // maximum offset. Should be at least 2x block size. + maxMatchOff int32 + hist []byte + crc *xxhash.Digest + tmp [8]byte + blk *blockEnc + lastDictID uint32 +} + +// CRC returns the underlying CRC writer. +func (e *fastBase) CRC() *xxhash.Digest { + return e.crc +} + +// AppendCRC will append the CRC to the destination slice and return it. +func (e *fastBase) AppendCRC(dst []byte) []byte { + crc := e.crc.Sum(e.tmp[:0]) + dst = append(dst, crc[7], crc[6], crc[5], crc[4]) + return dst +} + +// WindowSize returns the window size of the encoder, +// or a window size small enough to contain the input size, if > 0. +func (e *fastBase) WindowSize(size int) int32 { + if size > 0 && size < int(e.maxMatchOff) { + b := int32(1) << uint(bits.Len(uint(size))) + // Keep minimum window. + if b < 1024 { + b = 1024 + } + return b + } + return e.maxMatchOff +} + +// Block returns the current block. +func (e *fastBase) Block() *blockEnc { + return e.blk +} + +func (e *fastBase) addBlock(src []byte) int32 { + if debugAsserts && e.cur > bufferReset { + panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, bufferReset)) + } + // check if we have space already + if len(e.hist)+len(src) > cap(e.hist) { + if cap(e.hist) == 0 { + l := e.maxMatchOff * 2 + // Make it at least 1MB. + if l < 1<<20 { + l = 1 << 20 + } + e.hist = make([]byte, 0, l) + } else { + if cap(e.hist) < int(e.maxMatchOff*2) { + panic("unexpected buffer size") + } + // Move down + offset := int32(len(e.hist)) - e.maxMatchOff + copy(e.hist[0:e.maxMatchOff], e.hist[offset:]) + e.cur += offset + e.hist = e.hist[:e.maxMatchOff] + } + } + s := int32(len(e.hist)) + e.hist = append(e.hist, src...) + return s +} + +// useBlock will replace the block with the provided one, +// but transfer recent offsets from the previous. +func (e *fastBase) UseBlock(enc *blockEnc) { + enc.reset(e.blk) + e.blk = enc +} + +func (e *fastBase) matchlenNoHist(s, t int32, src []byte) int32 { + // Extend the match to be as long as possible. + return int32(matchLen(src[s:], src[t:])) +} + +func (e *fastBase) matchlen(s, t int32, src []byte) int32 { + if debugAsserts { + if s < 0 { + err := fmt.Sprintf("s (%d) < 0", s) + panic(err) + } + if t < 0 { + err := fmt.Sprintf("s (%d) < 0", s) + panic(err) + } + if s-t > e.maxMatchOff { + err := fmt.Sprintf("s (%d) - t (%d) > maxMatchOff (%d)", s, t, e.maxMatchOff) + panic(err) + } + if len(src)-int(s) > maxCompressedBlockSize { + panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize)) + } + } + + // Extend the match to be as long as possible. + return int32(matchLen(src[s:], src[t:])) +} + +// Reset the encoding table. +func (e *fastBase) resetBase(d *dict, singleBlock bool) { + if e.blk == nil { + e.blk = &blockEnc{} + e.blk.init() + } else { + e.blk.reset(nil) + } + e.blk.initNewEncode() + if e.crc == nil { + e.crc = xxhash.New() + } else { + e.crc.Reset() + } + if (!singleBlock || d.DictContentSize() > 0) && cap(e.hist) < int(e.maxMatchOff*2)+d.DictContentSize() { + l := e.maxMatchOff*2 + int32(d.DictContentSize()) + // Make it at least 1MB. + if l < 1<<20 { + l = 1 << 20 + } + e.hist = make([]byte, 0, l) + } + // We offset current position so everything will be out of reach. + // If above reset line, history will be purged. + if e.cur < bufferReset { + e.cur += e.maxMatchOff + int32(len(e.hist)) + } + e.hist = e.hist[:0] + if d != nil { + // Set offsets (currently not used) + for i, off := range d.offsets { + e.blk.recentOffsets[i] = uint32(off) + e.blk.prevRecentOffsets[i] = e.blk.recentOffsets[i] + } + // Transfer litenc. + e.blk.dictLitEnc = d.litEnc + e.hist = append(e.hist, d.content...) + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go new file mode 100644 index 00000000000..94a5343d00e --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -0,0 +1,595 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import "fmt" + +const ( + betterLongTableBits = 19 // Bits used in the long match table + betterLongTableSize = 1 << betterLongTableBits // Size of the table + + // Note: Increasing the short table bits or making the hash shorter + // can actually lead to compression degradation since it will 'steal' more from the + // long match table and match offsets are quite big. + // This greatly depends on the type of input. + betterShortTableBits = 13 // Bits used in the short match table + betterShortTableSize = 1 << betterShortTableBits // Size of the table +) + +type prevEntry struct { + offset int32 + prev int32 +} + +// betterFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. +// The long match table contains the previous entry with the same hash, +// effectively making it a "chain" of length 2. +// When we find a long match we choose between the two values and select the longest. +// When we find a short match, after checking the long, we check if we can find a long at n+1 +// and that it is longer (lazy matching). +type betterFastEncoder struct { + fastBase + table [betterShortTableSize]tableEntry + longTable [betterLongTableSize]prevEntry + dictTable []tableEntry + dictLongTable []prevEntry +} + +// Encode improves compression... +func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = prevEntry{} + } + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + v2 := e.longTable[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.longTable[i] = prevEntry{ + offset: v, + prev: v2, + } + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 9 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debug { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + var matched int32 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashS := hash5(cv, betterShortTableBits) + nextHashL := hash8(cv, betterLongTableBits) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + off := s + e.cur + e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} + e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Index match start+1 (long) -> s - 1 + index0 := s + repOff + s += lenght + repOff + + nextEmit = s + if s >= sLimit { + if debug { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hash8(cv0, betterLongTableBits) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hash5(cv1, betterShortTableBits)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + } + cv = load6432(src, s) + continue + } + const repOff2 = 1 + + // We deviate from the reference encoder and also check offset 2. + // Still slower and not much better, so disabled. + // repIndex = s - offset2 + repOff2 + if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { + // Consider history as well. + var seq seq + lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 2 + seq.offset = 2 + if debugSequences { + println("repeat sequence 2", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + index0 := s + repOff2 + s += lenght + repOff2 + nextEmit = s + if s >= sLimit { + if debug { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hash8(cv0, betterLongTableBits) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hash5(cv1, betterShortTableBits)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + } + cv = load6432(src, s) + // Swap offsets + offset1, offset2 = offset2, offset1 + continue + } + } + // Find the offsets of our two matches. + coffsetL := candidateL.offset - e.cur + coffsetLP := candidateL.prev - e.cur + + // Check if we have a long match. + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetL+8, src) + 8 + t = coffsetL + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8 + if prevMatch > matched { + matched = prevMatch + t = coffsetLP + } + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + } + break + } + + // Check if we have a long match on prev. + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetLP+8, src) + 8 + t = coffsetLP + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + coffsetS := candidateS.offset - e.cur + + // Check if we have a short match. + if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + matched = e.matchlen(s+4, coffsetS+4, src) + 4 + + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hash8(cv, betterLongTableBits) + candidateL = e.longTable[nextHashL] + coffsetL = candidateL.offset - e.cur + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset} + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("long match (after short)") + } + break + } + } + + // Check prev long... + coffsetL = candidateL.prev - e.cur + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("prev long match (after short)") + } + break + } + } + t = coffsetS + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the n-byte match as long as possible. + l := matched + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) -> s - 1 + index0 := s - l + 1 + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hash8(cv0, betterLongTableBits) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hash5(cv1, betterShortTableBits)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + } + + cv = load6432(src, s) + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hash5(cv, betterShortTableBits) + nextHashL := hash8(cv, betterLongTableBits) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} + e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debug { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *betterFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + e.Encode(blk, src) +} + +// ResetDict will reset and set a dictionary if not nil +func (e *betterFastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d == nil { + return + } + // Init or copy dict table + if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) { + e.dictTable = make([]tableEntry, len(e.table)) + } + end := int32(len(d.content)) - 8 + e.maxMatchOff + for i := e.maxMatchOff; i < end; i += 4 { + const hashLog = betterShortTableBits + + cv := load6432(d.content, i-e.maxMatchOff) + nextHash := hash5(cv, hashLog) // 0 -> 4 + nextHash1 := hash5(cv>>8, hashLog) // 1 -> 5 + nextHash2 := hash5(cv>>16, hashLog) // 2 -> 6 + nextHash3 := hash5(cv>>24, hashLog) // 3 -> 7 + e.dictTable[nextHash] = tableEntry{ + val: uint32(cv), + offset: i, + } + e.dictTable[nextHash1] = tableEntry{ + val: uint32(cv >> 8), + offset: i + 1, + } + e.dictTable[nextHash2] = tableEntry{ + val: uint32(cv >> 16), + offset: i + 2, + } + e.dictTable[nextHash3] = tableEntry{ + val: uint32(cv >> 24), + offset: i + 3, + } + } + e.lastDictID = d.id + } + + // Init or copy dict table + if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + if len(e.dictLongTable) != len(e.longTable) { + e.dictLongTable = make([]prevEntry, len(e.longTable)) + } + if len(d.content) >= 8 { + cv := load6432(d.content, 0) + h := hash8(cv, betterLongTableBits) + e.dictLongTable[h] = prevEntry{ + offset: e.maxMatchOff, + prev: e.dictLongTable[h].offset, + } + + end := int32(len(d.content)) - 8 + e.maxMatchOff + off := 8 // First to read + for i := e.maxMatchOff + 1; i < end; i++ { + cv = cv>>8 | (uint64(d.content[off]) << 56) + h := hash8(cv, betterLongTableBits) + e.dictLongTable[h] = prevEntry{ + offset: i, + prev: e.dictLongTable[h].offset, + } + off++ + } + } + e.lastDictID = d.id + } + // Reset table to initial state + copy(e.longTable[:], e.dictLongTable) + + e.cur = e.maxMatchOff + // Reset table to initial state + copy(e.table[:], e.dictTable) +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go new file mode 100644 index 00000000000..19eebf66e50 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -0,0 +1,713 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import "fmt" + +const ( + dFastLongTableBits = 17 // Bits used in the long match table + dFastLongTableSize = 1 << dFastLongTableBits // Size of the table + dFastLongTableMask = dFastLongTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + + dFastShortTableBits = tableBits // Bits used in the short match table + dFastShortTableSize = 1 << dFastShortTableBits // Size of the table + dFastShortTableMask = dFastShortTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. +) + +type doubleFastEncoder struct { + fastEncoder + longTable [dFastLongTableSize]tableEntry + dictLongTable []tableEntry +} + +// Encode mimmics functionality in zstd_dfast.c +func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = tableEntry{} + } + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.longTable[i].offset = v + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debug { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashS := hash5(cv, dFastShortTableBits) + nextHashL := hash8(cv, dFastLongTableBits) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += lenght + repOff + nextEmit = s + if s >= sLimit { + if debug { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + } + // Find the offsets of our two matches. + coffsetL := s - (candidateL.offset - e.cur) + coffsetS := s - (candidateS.offset - e.cur) + + // Check if we have a long match. + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + // Check if we have a short match. + if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hash8(cv, dFastLongTableBits) + candidateL = e.longTable[nextHashL] + coffsetL = s - (candidateL.offset - e.cur) + checkAt + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + s += checkAt + if debugMatches { + println("long match (after short)") + } + break + } + + t = candidateS.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) and start+2 (short) + index0 := s - l + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load6432(src, index0) + cv1 := load6432(src, index1) + te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} + te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} + e.longTable[hash8(cv0, dFastLongTableBits)] = te0 + e.longTable[hash8(cv1, dFastLongTableBits)] = te1 + cv0 >>= 8 + cv1 >>= 8 + te0.offset++ + te1.offset++ + te0.val = uint32(cv0) + te1.val = uint32(cv1) + e.table[hash5(cv0, dFastShortTableBits)] = te0 + e.table[hash5(cv1, dFastShortTableBits)] = te1 + + cv = load6432(src, s) + + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hash5(cv, dFastShortTableBits) + nextHashL := hash8(cv, dFastLongTableBits) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debug { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *doubleFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + if e.cur >= bufferReset { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = tableEntry{} + } + e.cur = e.maxMatchOff + } + + s := int32(0) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debug { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + for { + + nextHashS := hash5(cv, dFastShortTableBits) + nextHashL := hash8(cv, dFastLongTableBits) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + + if len(blk.sequences) > 2 { + if load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + //length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + int32(matchLen(src[s+4+repOff:], src[repIndex+4:])) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + repOff + nextEmit = s + if s >= sLimit { + if debug { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + } + // Find the offsets of our two matches. + coffsetL := s - (candidateL.offset - e.cur) + coffsetS := s - (candidateS.offset - e.cur) + + // Check if we have a long match. + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d). cur: %d", s, t, e.cur)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + // Check if we have a short match. + if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hash8(cv, dFastLongTableBits) + candidateL = e.longTable[nextHashL] + coffsetL = s - (candidateL.offset - e.cur) + checkAt + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + s += checkAt + if debugMatches { + println("long match (after short)") + } + break + } + + t = candidateS.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + // Extend the 4-byte match as long as possible. + //l := e.matchlen(s+4, t+4, src) + 4 + l := int32(matchLen(src[s+4:], src[t+4:])) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) and start+2 (short) + index0 := s - l + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load6432(src, index0) + cv1 := load6432(src, index1) + te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} + te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} + e.longTable[hash8(cv0, dFastLongTableBits)] = te0 + e.longTable[hash8(cv1, dFastLongTableBits)] = te1 + cv0 >>= 8 + cv1 >>= 8 + te0.offset++ + te1.offset++ + te0.val = uint32(cv0) + te1.val = uint32(cv1) + e.table[hash5(cv0, dFastShortTableBits)] = te0 + e.table[hash5(cv1, dFastShortTableBits)] = te1 + + cv = load6432(src, s) + + if len(blk.sequences) <= 2 { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hash5(cv1>>8, dFastShortTableBits) + nextHashL := hash8(cv, dFastLongTableBits) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + //l := 4 + e.matchlen(s+4, o2+4, src) + l := 4 + int32(matchLen(src[s+4:], src[o2+4:])) + + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + if debug { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } + + // We do not store history, so we must offset e.cur to avoid false matches for next user. + if e.cur < bufferReset { + e.cur += int32(len(src)) + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *doubleFastEncoder) Reset(d *dict, singleBlock bool) { + e.fastEncoder.Reset(d, singleBlock) + if d == nil { + return + } + + // Init or copy dict table + if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + if len(e.dictLongTable) != len(e.longTable) { + e.dictLongTable = make([]tableEntry, len(e.longTable)) + } + if len(d.content) >= 8 { + cv := load6432(d.content, 0) + e.dictLongTable[hash8(cv, dFastLongTableBits)] = tableEntry{ + val: uint32(cv), + offset: e.maxMatchOff, + } + end := int32(len(d.content)) - 8 + e.maxMatchOff + for i := e.maxMatchOff + 1; i < end; i++ { + cv = cv>>8 | (uint64(d.content[i-e.maxMatchOff+7]) << 56) + e.dictLongTable[hash8(cv, dFastLongTableBits)] = tableEntry{ + val: uint32(cv), + offset: i, + } + } + } + e.lastDictID = d.id + } + // Reset table to initial state + e.cur = e.maxMatchOff + copy(e.longTable[:], e.dictLongTable) +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go new file mode 100644 index 00000000000..0b301df4390 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go @@ -0,0 +1,661 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" + "math" + "math/bits" +) + +const ( + tableBits = 15 // Bits used in the table + tableSize = 1 << tableBits // Size of the table + tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + maxMatchLength = 131074 +) + +type tableEntry struct { + val uint32 + offset int32 +} + +type fastEncoder struct { + fastBase + table [tableSize]tableEntry + dictTable []tableEntry +} + +// Encode mimmics functionality in zstd_fast.c +func (e *fastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + inputMargin = 8 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 2. + const stepSize = 2 + + // TEMPLATE + const hashLog = tableBits + // seems global, but would be nice to tweak. + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debug { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // t will contain the match offset when we find one. + // When existing the search loop, we have already checked 4 bytes. + var t int32 + + // We will not use repeat offsets across blocks. + // By not using them for the first 3 matches + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHash := hash6(cv, hashLog) + nextHash2 := hash6(cv>>8, hashLog) + candidate := e.table[nextHash] + candidate2 := e.table[nextHash2] + repIndex := s - offset1 + 2 + + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} + + if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { + // Consider history as well. + var seq seq + var length int32 + // length = 4 + e.matchlen(s+6, repIndex+4, src) + { + a := src[s+6:] + b := src[repIndex+4:] + endI := len(a) & (math.MaxInt32 - 7) + length = int32(endI) + 4 + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + length = int32(i+bits.TrailingZeros64(diff)>>3) + 4 + break + } + } + } + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + 2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + sMin := s - e.maxMatchOff + if sMin < 0 { + sMin = 0 + } + for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + 2 + nextEmit = s + if s >= sLimit { + if debug { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + coffset0 := s - (candidate.offset - e.cur) + coffset1 := s - (candidate2.offset - e.cur) + 1 + if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { + // found a regular match + t = candidate.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + break + } + + if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { + // found a regular match + t = candidate2.offset - e.cur + s++ + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + break + } + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // A 4-byte match has been found. We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + //l := e.matchlen(s+4, t+4, src) + 4 + var l int32 + { + a := src[s+4:] + b := src[t+4:] + endI := len(a) & (math.MaxInt32 - 7) + l = int32(endI) + 4 + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 + break + } + } + } + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence. + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + // Don't use repeat offsets + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + + // Check offset 2 + if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + //l := 4 + e.matchlen(s+4, o2+4, src) + var l int32 + { + a := src[s+4:] + b := src[o2+4:] + endI := len(a) & (math.MaxInt32 - 7) + l = int32(endI) + 4 + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 + break + } + } + } + + // Store this, since we have it. + nextHash := hash6(cv, hashLog) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + break encodeLoop + } + // Prepare next loop. + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debug { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + const ( + inputMargin = 8 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + if debug { + if len(src) > maxBlockSize { + panic("src too big") + } + } + + // Protect against e.cur wraparound. + if e.cur >= bufferReset { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = e.maxMatchOff + } + + s := int32(0) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 2. + const stepSize = 2 + + // TEMPLATE + const hashLog = tableBits + // seems global, but would be nice to tweak. + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debug { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // t will contain the match offset when we find one. + // When existing the search loop, we have already checked 4 bytes. + var t int32 + + // We will not use repeat offsets across blocks. + // By not using them for the first 3 matches + + for { + nextHash := hash6(cv, hashLog) + nextHash2 := hash6(cv>>8, hashLog) + candidate := e.table[nextHash] + candidate2 := e.table[nextHash2] + repIndex := s - offset1 + 2 + + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} + + if len(blk.sequences) > 2 && load3232(src, repIndex) == uint32(cv>>16) { + // Consider history as well. + var seq seq + // length := 4 + e.matchlen(s+6, repIndex+4, src) + // length := 4 + int32(matchLen(src[s+6:], src[repIndex+4:])) + var length int32 + { + a := src[s+6:] + b := src[repIndex+4:] + endI := len(a) & (math.MaxInt32 - 7) + length = int32(endI) + 4 + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + length = int32(i+bits.TrailingZeros64(diff)>>3) + 4 + break + } + } + } + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + 2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + sMin := s - e.maxMatchOff + if sMin < 0 { + sMin = 0 + } + for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + 2 + nextEmit = s + if s >= sLimit { + if debug { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + coffset0 := s - (candidate.offset - e.cur) + coffset1 := s - (candidate2.offset - e.cur) + 1 + if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { + // found a regular match + t = candidate.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic(fmt.Sprintf("t (%d) < 0, candidate.offset: %d, e.cur: %d, coffset0: %d, e.maxMatchOff: %d", t, candidate.offset, e.cur, coffset0, e.maxMatchOff)) + } + break + } + + if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { + // found a regular match + t = candidate2.offset - e.cur + s++ + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + break + } + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // A 4-byte match has been found. We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && t < 0 { + panic(fmt.Sprintf("t (%d) < 0 ", t)) + } + // Extend the 4-byte match as long as possible. + //l := e.matchlenNoHist(s+4, t+4, src) + 4 + // l := int32(matchLen(src[s+4:], src[t+4:])) + 4 + var l int32 + { + a := src[s+4:] + b := src[t+4:] + endI := len(a) & (math.MaxInt32 - 7) + l = int32(endI) + 4 + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 + break + } + } + } + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + + // Write our sequence. + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + // Don't use repeat offsets + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + + // Check offset 2 + if o2 := s - offset2; len(blk.sequences) > 2 && load3232(src, o2) == uint32(cv) { + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + //l := 4 + e.matchlenNoHist(s+4, o2+4, src) + // l := 4 + int32(matchLen(src[s+4:], src[o2+4:])) + var l int32 + { + a := src[s+4:] + b := src[o2+4:] + endI := len(a) & (math.MaxInt32 - 7) + l = int32(endI) + 4 + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 + break + } + } + } + + // Store this, since we have it. + nextHash := hash6(cv, hashLog) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + break encodeLoop + } + // Prepare next loop. + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + if debug { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } + // We do not store history, so we must offset e.cur to avoid false matches for next user. + if e.cur < bufferReset { + e.cur += int32(len(src)) + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *fastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d == nil { + return + } + + // Init or copy dict table + if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) { + e.dictTable = make([]tableEntry, len(e.table)) + } + if true { + end := e.maxMatchOff + int32(len(d.content)) - 8 + for i := e.maxMatchOff; i < end; i += 3 { + const hashLog = tableBits + + cv := load6432(d.content, i-e.maxMatchOff) + nextHash := hash6(cv, hashLog) // 0 -> 5 + nextHash1 := hash6(cv>>8, hashLog) // 1 -> 6 + nextHash2 := hash6(cv>>16, hashLog) // 2 -> 7 + e.dictTable[nextHash] = tableEntry{ + val: uint32(cv), + offset: i, + } + e.dictTable[nextHash1] = tableEntry{ + val: uint32(cv >> 8), + offset: i + 1, + } + e.dictTable[nextHash2] = tableEntry{ + val: uint32(cv >> 16), + offset: i + 2, + } + } + } + e.lastDictID = d.id + } + + e.cur = e.maxMatchOff + // Reset table to initial state + copy(e.table[:], e.dictTable) +} diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go new file mode 100644 index 00000000000..f5759211dac --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -0,0 +1,570 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "crypto/rand" + "fmt" + "io" + rdebug "runtime/debug" + "sync" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +// Encoder provides encoding to Zstandard. +// An Encoder can be used for either compressing a stream via the +// io.WriteCloser interface supported by the Encoder or as multiple independent +// tasks via the EncodeAll function. +// Smaller encodes are encouraged to use the EncodeAll function. +// Use NewWriter to create a new instance. +type Encoder struct { + o encoderOptions + encoders chan encoder + state encoderState + init sync.Once +} + +type encoder interface { + Encode(blk *blockEnc, src []byte) + EncodeNoHist(blk *blockEnc, src []byte) + Block() *blockEnc + CRC() *xxhash.Digest + AppendCRC([]byte) []byte + WindowSize(size int) int32 + UseBlock(*blockEnc) + Reset(d *dict, singleBlock bool) +} + +type encoderState struct { + w io.Writer + filling []byte + current []byte + previous []byte + encoder encoder + writing *blockEnc + err error + writeErr error + nWritten int64 + headerWritten bool + eofWritten bool + fullFrameWritten bool + + // This waitgroup indicates an encode is running. + wg sync.WaitGroup + // This waitgroup indicates we have a block encoding/writing. + wWg sync.WaitGroup +} + +// NewWriter will create a new Zstandard encoder. +// If the encoder will be used for encoding blocks a nil writer can be used. +func NewWriter(w io.Writer, opts ...EOption) (*Encoder, error) { + initPredefined() + var e Encoder + e.o.setDefault() + for _, o := range opts { + err := o(&e.o) + if err != nil { + return nil, err + } + } + if w != nil { + e.Reset(w) + } + return &e, nil +} + +func (e *Encoder) initialize() { + if e.o.concurrent == 0 { + e.o.setDefault() + } + e.encoders = make(chan encoder, e.o.concurrent) + for i := 0; i < e.o.concurrent; i++ { + enc := e.o.encoder() + e.encoders <- enc + } +} + +// Reset will re-initialize the writer and new writes will encode to the supplied writer +// as a new, independent stream. +func (e *Encoder) Reset(w io.Writer) { + s := &e.state + s.wg.Wait() + s.wWg.Wait() + if cap(s.filling) == 0 { + s.filling = make([]byte, 0, e.o.blockSize) + } + if cap(s.current) == 0 { + s.current = make([]byte, 0, e.o.blockSize) + } + if cap(s.previous) == 0 { + s.previous = make([]byte, 0, e.o.blockSize) + } + if s.encoder == nil { + s.encoder = e.o.encoder() + } + if s.writing == nil { + s.writing = &blockEnc{} + s.writing.init() + } + s.writing.initNewEncode() + s.filling = s.filling[:0] + s.current = s.current[:0] + s.previous = s.previous[:0] + s.encoder.Reset(e.o.dict, false) + s.headerWritten = false + s.eofWritten = false + s.fullFrameWritten = false + s.w = w + s.err = nil + s.nWritten = 0 + s.writeErr = nil +} + +// Write data to the encoder. +// Input data will be buffered and as the buffer fills up +// content will be compressed and written to the output. +// When done writing, use Close to flush the remaining output +// and write CRC if requested. +func (e *Encoder) Write(p []byte) (n int, err error) { + s := &e.state + for len(p) > 0 { + if len(p)+len(s.filling) < e.o.blockSize { + if e.o.crc { + _, _ = s.encoder.CRC().Write(p) + } + s.filling = append(s.filling, p...) + return n + len(p), nil + } + add := p + if len(p)+len(s.filling) > e.o.blockSize { + add = add[:e.o.blockSize-len(s.filling)] + } + if e.o.crc { + _, _ = s.encoder.CRC().Write(add) + } + s.filling = append(s.filling, add...) + p = p[len(add):] + n += len(add) + if len(s.filling) < e.o.blockSize { + return n, nil + } + err := e.nextBlock(false) + if err != nil { + return n, err + } + if debugAsserts && len(s.filling) > 0 { + panic(len(s.filling)) + } + } + return n, nil +} + +// nextBlock will synchronize and start compressing input in e.state.filling. +// If an error has occurred during encoding it will be returned. +func (e *Encoder) nextBlock(final bool) error { + s := &e.state + // Wait for current block. + s.wg.Wait() + if s.err != nil { + return s.err + } + if len(s.filling) > e.o.blockSize { + return fmt.Errorf("block > maxStoreBlockSize") + } + if !s.headerWritten { + // If we have a single block encode, do a sync compression. + if final && len(s.filling) > 0 { + s.current = e.EncodeAll(s.filling, s.current[:0]) + var n2 int + n2, s.err = s.w.Write(s.current) + if s.err != nil { + return s.err + } + s.nWritten += int64(n2) + s.current = s.current[:0] + s.filling = s.filling[:0] + s.headerWritten = true + s.fullFrameWritten = true + s.eofWritten = true + return nil + } + + var tmp [maxHeaderSize]byte + fh := frameHeader{ + ContentSize: 0, + WindowSize: uint32(s.encoder.WindowSize(0)), + SingleSegment: false, + Checksum: e.o.crc, + DictID: e.o.dict.ID(), + } + + dst, err := fh.appendTo(tmp[:0]) + if err != nil { + return err + } + s.headerWritten = true + s.wWg.Wait() + var n2 int + n2, s.err = s.w.Write(dst) + if s.err != nil { + return s.err + } + s.nWritten += int64(n2) + } + if s.eofWritten { + // Ensure we only write it once. + final = false + } + + if len(s.filling) == 0 { + // Final block, but no data. + if final { + enc := s.encoder + blk := enc.Block() + blk.reset(nil) + blk.last = true + blk.encodeRaw(nil) + s.wWg.Wait() + _, s.err = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + s.eofWritten = true + } + return s.err + } + + // Move blocks forward. + s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current + s.wg.Add(1) + go func(src []byte) { + if debug { + println("Adding block,", len(src), "bytes, final:", final) + } + defer func() { + if r := recover(); r != nil { + s.err = fmt.Errorf("panic while encoding: %v", r) + rdebug.PrintStack() + } + s.wg.Done() + }() + enc := s.encoder + blk := enc.Block() + enc.Encode(blk, src) + blk.last = final + if final { + s.eofWritten = true + } + // Wait for pending writes. + s.wWg.Wait() + if s.writeErr != nil { + s.err = s.writeErr + return + } + // Transfer encoders from previous write block. + blk.swapEncoders(s.writing) + // Transfer recent offsets to next. + enc.UseBlock(s.writing) + s.writing = blk + s.wWg.Add(1) + go func() { + defer func() { + if r := recover(); r != nil { + s.writeErr = fmt.Errorf("panic while encoding/writing: %v", r) + rdebug.PrintStack() + } + s.wWg.Done() + }() + err := errIncompressible + // If we got the exact same number of literals as input, + // assume the literals cannot be compressed. + if len(src) != len(blk.literals) || len(src) != e.o.blockSize { + err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + } + switch err { + case errIncompressible: + if debug { + println("Storing incompressible block as raw") + } + blk.encodeRaw(src) + // In fast mode, we do not transfer offsets, so we don't have to deal with changing the. + case nil: + default: + s.writeErr = err + return + } + _, s.writeErr = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + }() + }(s.current) + return nil +} + +// ReadFrom reads data from r until EOF or error. +// The return value n is the number of bytes read. +// Any error except io.EOF encountered during the read is also returned. +// +// The Copy function uses ReaderFrom if available. +func (e *Encoder) ReadFrom(r io.Reader) (n int64, err error) { + if debug { + println("Using ReadFrom") + } + + // Flush any current writes. + if len(e.state.filling) > 0 { + if err := e.nextBlock(false); err != nil { + return 0, err + } + } + e.state.filling = e.state.filling[:e.o.blockSize] + src := e.state.filling + for { + n2, err := r.Read(src) + if e.o.crc { + _, _ = e.state.encoder.CRC().Write(src[:n2]) + } + // src is now the unfilled part... + src = src[n2:] + n += int64(n2) + switch err { + case io.EOF: + e.state.filling = e.state.filling[:len(e.state.filling)-len(src)] + if debug { + println("ReadFrom: got EOF final block:", len(e.state.filling)) + } + return n, nil + default: + if debug { + println("ReadFrom: got error:", err) + } + e.state.err = err + return n, err + case nil: + } + if len(src) > 0 { + if debug { + println("ReadFrom: got space left in source:", len(src)) + } + continue + } + err = e.nextBlock(false) + if err != nil { + return n, err + } + e.state.filling = e.state.filling[:e.o.blockSize] + src = e.state.filling + } +} + +// Flush will send the currently written data to output +// and block until everything has been written. +// This should only be used on rare occasions where pushing the currently queued data is critical. +func (e *Encoder) Flush() error { + s := &e.state + if len(s.filling) > 0 { + err := e.nextBlock(false) + if err != nil { + return err + } + } + s.wg.Wait() + s.wWg.Wait() + if s.err != nil { + return s.err + } + return s.writeErr +} + +// Close will flush the final output and close the stream. +// The function will block until everything has been written. +// The Encoder can still be re-used after calling this. +func (e *Encoder) Close() error { + s := &e.state + if s.encoder == nil { + return nil + } + err := e.nextBlock(true) + if err != nil { + return err + } + if e.state.fullFrameWritten { + return s.err + } + s.wg.Wait() + s.wWg.Wait() + + if s.err != nil { + return s.err + } + if s.writeErr != nil { + return s.writeErr + } + + // Write CRC + if e.o.crc && s.err == nil { + // heap alloc. + var tmp [4]byte + _, s.err = s.w.Write(s.encoder.AppendCRC(tmp[:0])) + s.nWritten += 4 + } + + // Add padding with content from crypto/rand.Reader + if s.err == nil && e.o.pad > 0 { + add := calcSkippableFrame(s.nWritten, int64(e.o.pad)) + frame, err := skippableFrame(s.filling[:0], add, rand.Reader) + if err != nil { + return err + } + _, s.err = s.w.Write(frame) + } + return s.err +} + +// EncodeAll will encode all input in src and append it to dst. +// This function can be called concurrently, but each call will only run on a single goroutine. +// If empty input is given, nothing is returned, unless WithZeroFrames is specified. +// Encoded blocks can be concatenated and the result will be the combined input stream. +// Data compressed with EncodeAll can be decoded with the Decoder, +// using either a stream or DecodeAll. +func (e *Encoder) EncodeAll(src, dst []byte) []byte { + if len(src) == 0 { + if e.o.fullZero { + // Add frame header. + fh := frameHeader{ + ContentSize: 0, + WindowSize: MinWindowSize, + SingleSegment: true, + // Adding a checksum would be a waste of space. + Checksum: false, + DictID: 0, + } + dst, _ = fh.appendTo(dst) + + // Write raw block as last one only. + var blk blockHeader + blk.setSize(0) + blk.setType(blockTypeRaw) + blk.setLast(true) + dst = blk.appendTo(dst) + } + return dst + } + e.init.Do(e.initialize) + enc := <-e.encoders + defer func() { + // Release encoder reference to last block. + // If a non-single block is needed the encoder will reset again. + e.encoders <- enc + }() + // Use single segments when above minimum window and below 1MB. + single := len(src) < 1<<20 && len(src) > MinWindowSize + if e.o.single != nil { + single = *e.o.single + } + fh := frameHeader{ + ContentSize: uint64(len(src)), + WindowSize: uint32(enc.WindowSize(len(src))), + SingleSegment: single, + Checksum: e.o.crc, + DictID: e.o.dict.ID(), + } + + // If less than 1MB, allocate a buffer up front. + if len(dst) == 0 && cap(dst) == 0 && len(src) < 1<<20 { + dst = make([]byte, 0, len(src)) + } + dst, err := fh.appendTo(dst) + if err != nil { + panic(err) + } + + // If we can do everything in one block, prefer that. + if len(src) <= maxCompressedBlockSize { + enc.Reset(e.o.dict, true) + // Slightly faster with no history and everything in one block. + if e.o.crc { + _, _ = enc.CRC().Write(src) + } + blk := enc.Block() + blk.last = true + if e.o.dict == nil { + enc.EncodeNoHist(blk, src) + } else { + enc.Encode(blk, src) + } + + // If we got the exact same number of literals as input, + // assume the literals cannot be compressed. + err := errIncompressible + oldout := blk.output + if len(blk.literals) != len(src) || len(src) != e.o.blockSize { + // Output directly to dst + blk.output = dst + err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + } + + switch err { + case errIncompressible: + if debug { + println("Storing incompressible block as raw") + } + dst = blk.encodeRawTo(dst, src) + case nil: + dst = blk.output + default: + panic(err) + } + blk.output = oldout + } else { + enc.Reset(e.o.dict, false) + blk := enc.Block() + for len(src) > 0 { + todo := src + if len(todo) > e.o.blockSize { + todo = todo[:e.o.blockSize] + } + src = src[len(todo):] + if e.o.crc { + _, _ = enc.CRC().Write(todo) + } + blk.pushOffsets() + enc.Encode(blk, todo) + if len(src) == 0 { + blk.last = true + } + err := errIncompressible + // If we got the exact same number of literals as input, + // assume the literals cannot be compressed. + if len(blk.literals) != len(todo) || len(todo) != e.o.blockSize { + err = blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy) + } + + switch err { + case errIncompressible: + if debug { + println("Storing incompressible block as raw") + } + dst = blk.encodeRawTo(dst, todo) + blk.popOffsets() + case nil: + dst = append(dst, blk.output...) + default: + panic(err) + } + blk.reset(nil) + } + } + if e.o.crc { + dst = enc.AppendCRC(dst) + } + // Add padding with content from crypto/rand.Reader + if e.o.pad > 0 { + add := calcSkippableFrame(int64(len(dst)), int64(e.o.pad)) + dst, err = skippableFrame(dst, add, rand.Reader) + if err != nil { + panic(err) + } + } + return dst +} diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go new file mode 100644 index 00000000000..5792061635e --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go @@ -0,0 +1,281 @@ +package zstd + +import ( + "errors" + "fmt" + "runtime" + "strings" +) + +// EOption is an option for creating a encoder. +type EOption func(*encoderOptions) error + +// options retains accumulated state of multiple options. +type encoderOptions struct { + concurrent int + level EncoderLevel + single *bool + pad int + blockSize int + windowSize int + crc bool + fullZero bool + noEntropy bool + allLitEntropy bool + customWindow bool + customALEntropy bool + dict *dict +} + +func (o *encoderOptions) setDefault() { + *o = encoderOptions{ + // use less ram: true for now, but may change. + concurrent: runtime.GOMAXPROCS(0), + crc: true, + single: nil, + blockSize: 1 << 16, + windowSize: 8 << 20, + level: SpeedDefault, + } +} + +// encoder returns an encoder with the selected options. +func (o encoderOptions) encoder() encoder { + switch o.level { + case SpeedDefault: + return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize)}}} + case SpeedBetterCompression: + return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize)}} + case SpeedFastest: + return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize)}} + } + panic("unknown compression level") +} + +// WithEncoderCRC will add CRC value to output. +// Output will be 4 bytes larger. +func WithEncoderCRC(b bool) EOption { + return func(o *encoderOptions) error { o.crc = b; return nil } +} + +// WithEncoderConcurrency will set the concurrency, +// meaning the maximum number of decoders to run concurrently. +// The value supplied must be at least 1. +// By default this will be set to GOMAXPROCS. +func WithEncoderConcurrency(n int) EOption { + return func(o *encoderOptions) error { + if n <= 0 { + return fmt.Errorf("concurrency must be at least 1") + } + o.concurrent = n + return nil + } +} + +// WithWindowSize will set the maximum allowed back-reference distance. +// The value must be a power of two between MinWindowSize and MaxWindowSize. +// A larger value will enable better compression but allocate more memory and, +// for above-default values, take considerably longer. +// The default value is determined by the compression level. +func WithWindowSize(n int) EOption { + return func(o *encoderOptions) error { + switch { + case n < MinWindowSize: + return fmt.Errorf("window size must be at least %d", MinWindowSize) + case n > MaxWindowSize: + return fmt.Errorf("window size must be at most %d", MaxWindowSize) + case (n & (n - 1)) != 0: + return errors.New("window size must be a power of 2") + } + + o.windowSize = n + o.customWindow = true + if o.blockSize > o.windowSize { + o.blockSize = o.windowSize + } + return nil + } +} + +// WithEncoderPadding will add padding to all output so the size will be a multiple of n. +// This can be used to obfuscate the exact output size or make blocks of a certain size. +// The contents will be a skippable frame, so it will be invisible by the decoder. +// n must be > 0 and <= 1GB, 1<<30 bytes. +// The padded area will be filled with data from crypto/rand.Reader. +// If `EncodeAll` is used with data already in the destination, the total size will be multiple of this. +func WithEncoderPadding(n int) EOption { + return func(o *encoderOptions) error { + if n <= 0 { + return fmt.Errorf("padding must be at least 1") + } + // No need to waste our time. + if n == 1 { + o.pad = 0 + } + if n > 1<<30 { + return fmt.Errorf("padding must less than 1GB (1<<30 bytes) ") + } + o.pad = n + return nil + } +} + +// EncoderLevel predefines encoder compression levels. +// Only use the constants made available, since the actual mapping +// of these values are very likely to change and your compression could change +// unpredictably when upgrading the library. +type EncoderLevel int + +const ( + speedNotSet EncoderLevel = iota + + // SpeedFastest will choose the fastest reasonable compression. + // This is roughly equivalent to the fastest Zstandard mode. + SpeedFastest + + // SpeedDefault is the default "pretty fast" compression option. + // This is roughly equivalent to the default Zstandard mode (level 3). + SpeedDefault + + // SpeedBetterCompression will yield better compression than the default. + // Currently it is about zstd level 7-8 with ~ 2x-3x the default CPU usage. + // By using this, notice that CPU usage may go up in the future. + SpeedBetterCompression + + // speedLast should be kept as the last actual compression option. + // The is not for external usage, but is used to keep track of the valid options. + speedLast + + // SpeedBestCompression will choose the best available compression option. + // For now this is not implemented. + SpeedBestCompression = SpeedBetterCompression +) + +// EncoderLevelFromString will convert a string representation of an encoding level back +// to a compression level. The compare is not case sensitive. +// If the string wasn't recognized, (false, SpeedDefault) will be returned. +func EncoderLevelFromString(s string) (bool, EncoderLevel) { + for l := EncoderLevel(speedNotSet + 1); l < speedLast; l++ { + if strings.EqualFold(s, l.String()) { + return true, l + } + } + return false, SpeedDefault +} + +// EncoderLevelFromZstd will return an encoder level that closest matches the compression +// ratio of a specific zstd compression level. +// Many input values will provide the same compression level. +func EncoderLevelFromZstd(level int) EncoderLevel { + switch { + case level < 3: + return SpeedFastest + case level >= 3 && level < 6: + return SpeedDefault + case level > 5: + return SpeedBetterCompression + } + return SpeedDefault +} + +// String provides a string representation of the compression level. +func (e EncoderLevel) String() string { + switch e { + case SpeedFastest: + return "fastest" + case SpeedDefault: + return "default" + case SpeedBetterCompression: + return "better" + default: + return "invalid" + } +} + +// WithEncoderLevel specifies a predefined compression level. +func WithEncoderLevel(l EncoderLevel) EOption { + return func(o *encoderOptions) error { + switch { + case l <= speedNotSet || l >= speedLast: + return fmt.Errorf("unknown encoder level") + } + o.level = l + if !o.customWindow { + switch o.level { + case SpeedFastest: + o.windowSize = 4 << 20 + case SpeedDefault: + o.windowSize = 8 << 20 + case SpeedBetterCompression: + o.windowSize = 16 << 20 + } + } + if !o.customALEntropy { + o.allLitEntropy = l > SpeedFastest + } + + return nil + } +} + +// WithZeroFrames will encode 0 length input as full frames. +// This can be needed for compatibility with zstandard usage, +// but is not needed for this package. +func WithZeroFrames(b bool) EOption { + return func(o *encoderOptions) error { + o.fullZero = b + return nil + } +} + +// WithAllLitEntropyCompression will apply entropy compression if no matches are found. +// Disabling this will skip incompressible data faster, but in cases with no matches but +// skewed character distribution compression is lost. +// Default value depends on the compression level selected. +func WithAllLitEntropyCompression(b bool) EOption { + return func(o *encoderOptions) error { + o.customALEntropy = true + o.allLitEntropy = b + return nil + } +} + +// WithNoEntropyCompression will always skip entropy compression of literals. +// This can be useful if content has matches, but unlikely to benefit from entropy +// compression. Usually the slight speed improvement is not worth enabling this. +func WithNoEntropyCompression(b bool) EOption { + return func(o *encoderOptions) error { + o.noEntropy = b + return nil + } +} + +// WithSingleSegment will set the "single segment" flag when EncodeAll is used. +// If this flag is set, data must be regenerated within a single continuous memory segment. +// In this case, Window_Descriptor byte is skipped, but Frame_Content_Size is necessarily present. +// As a consequence, the decoder must allocate a memory segment of size equal or larger than size of your content. +// In order to preserve the decoder from unreasonable memory requirements, +// a decoder is allowed to reject a compressed frame which requests a memory size beyond decoder's authorized range. +// For broader compatibility, decoders are recommended to support memory sizes of at least 8 MB. +// This is only a recommendation, each decoder is free to support higher or lower limits, depending on local limitations. +// If this is not specified, block encodes will automatically choose this based on the input size. +// This setting has no effect on streamed encodes. +func WithSingleSegment(b bool) EOption { + return func(o *encoderOptions) error { + o.single = &b + return nil + } +} + +// WithEncoderDict allows to register a dictionary that will be used for the encode. +// The encoder *may* choose to use no dictionary instead for certain payloads. +func WithEncoderDict(dict []byte) EOption { + return func(o *encoderOptions) error { + d, err := loadDict(dict) + if err != nil { + return err + } + o.dict = d + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go new file mode 100644 index 00000000000..fc4a566d39a --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -0,0 +1,494 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "bytes" + "encoding/hex" + "errors" + "hash" + "io" + "sync" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +type frameDec struct { + o decoderOptions + crc hash.Hash64 + offset int64 + + WindowSize uint64 + + // maxWindowSize is the maximum windows size to support. + // should never be bigger than max-int. + maxWindowSize uint64 + + // In order queue of blocks being decoded. + decoding chan *blockDec + + // Frame history passed between blocks + history history + + rawInput byteBuffer + + // Byte buffer that can be reused for small input blocks. + bBuf byteBuf + + FrameContentSize uint64 + frameDone sync.WaitGroup + + DictionaryID *uint32 + HasCheckSum bool + SingleSegment bool + + // asyncRunning indicates whether the async routine processes input on 'decoding'. + asyncRunningMu sync.Mutex + asyncRunning bool +} + +const ( + // The minimum Window_Size is 1 KB. + MinWindowSize = 1 << 10 + MaxWindowSize = 1 << 29 +) + +var ( + frameMagic = []byte{0x28, 0xb5, 0x2f, 0xfd} + skippableFrameMagic = []byte{0x2a, 0x4d, 0x18} +) + +func newFrameDec(o decoderOptions) *frameDec { + d := frameDec{ + o: o, + maxWindowSize: MaxWindowSize, + } + if d.maxWindowSize > o.maxDecodedSize { + d.maxWindowSize = o.maxDecodedSize + } + return &d +} + +// reset will read the frame header and prepare for block decoding. +// If nothing can be read from the input, io.EOF will be returned. +// Any other error indicated that the stream contained data, but +// there was a problem. +func (d *frameDec) reset(br byteBuffer) error { + d.HasCheckSum = false + d.WindowSize = 0 + var b []byte + for { + b = br.readSmall(4) + if b == nil { + return io.EOF + } + if !bytes.Equal(b[1:4], skippableFrameMagic) || b[0]&0xf0 != 0x50 { + if debug { + println("Not skippable", hex.EncodeToString(b), hex.EncodeToString(skippableFrameMagic)) + } + // Break if not skippable frame. + break + } + // Read size to skip + b = br.readSmall(4) + if b == nil { + println("Reading Frame Size EOF") + return io.ErrUnexpectedEOF + } + n := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + println("Skipping frame with", n, "bytes.") + err := br.skipN(int(n)) + if err != nil { + if debug { + println("Reading discarded frame", err) + } + return err + } + } + if !bytes.Equal(b, frameMagic) { + println("Got magic numbers: ", b, "want:", frameMagic) + return ErrMagicMismatch + } + + // Read Frame_Header_Descriptor + fhd, err := br.readByte() + if err != nil { + println("Reading Frame_Header_Descriptor", err) + return err + } + d.SingleSegment = fhd&(1<<5) != 0 + + if fhd&(1<<3) != 0 { + return errors.New("Reserved bit set on frame header") + } + + // Read Window_Descriptor + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor + d.WindowSize = 0 + if !d.SingleSegment { + wd, err := br.readByte() + if err != nil { + println("Reading Window_Descriptor", err) + return err + } + printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3) + windowLog := 10 + (wd >> 3) + windowBase := uint64(1) << windowLog + windowAdd := (windowBase / 8) * uint64(wd&0x7) + d.WindowSize = windowBase + windowAdd + } + + // Read Dictionary_ID + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id + d.DictionaryID = nil + if size := fhd & 3; size != 0 { + if size == 3 { + size = 4 + } + b = br.readSmall(int(size)) + if b == nil { + if debug { + println("Reading Dictionary_ID", io.ErrUnexpectedEOF) + } + return io.ErrUnexpectedEOF + } + var id uint32 + switch size { + case 1: + id = uint32(b[0]) + case 2: + id = uint32(b[0]) | (uint32(b[1]) << 8) + case 4: + id = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + } + if debug { + println("Dict size", size, "ID:", id) + } + if id > 0 { + // ID 0 means "sorry, no dictionary anyway". + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format + d.DictionaryID = &id + } + } + + // Read Frame_Content_Size + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size + var fcsSize int + v := fhd >> 6 + switch v { + case 0: + if d.SingleSegment { + fcsSize = 1 + } + default: + fcsSize = 1 << v + } + d.FrameContentSize = 0 + if fcsSize > 0 { + b := br.readSmall(fcsSize) + if b == nil { + println("Reading Frame content", io.ErrUnexpectedEOF) + return io.ErrUnexpectedEOF + } + switch fcsSize { + case 1: + d.FrameContentSize = uint64(b[0]) + case 2: + // When FCS_Field_Size is 2, the offset of 256 is added. + d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256 + case 4: + d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) + case 8: + d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) + d.FrameContentSize = uint64(d1) | (uint64(d2) << 32) + } + if debug { + println("field size bits:", v, "fcsSize:", fcsSize, "FrameContentSize:", d.FrameContentSize, hex.EncodeToString(b[:fcsSize]), "singleseg:", d.SingleSegment, "window:", d.WindowSize) + } + } + // Move this to shared. + d.HasCheckSum = fhd&(1<<2) != 0 + if d.HasCheckSum { + if d.crc == nil { + d.crc = xxhash.New() + } + d.crc.Reset() + } + + if d.WindowSize == 0 && d.SingleSegment { + // We may not need window in this case. + d.WindowSize = d.FrameContentSize + if d.WindowSize < MinWindowSize { + d.WindowSize = MinWindowSize + } + } + + if d.WindowSize > d.maxWindowSize { + printf("window size %d > max %d\n", d.WindowSize, d.maxWindowSize) + return ErrWindowSizeExceeded + } + // The minimum Window_Size is 1 KB. + if d.WindowSize < MinWindowSize { + println("got window size: ", d.WindowSize) + return ErrWindowSizeTooSmall + } + d.history.windowSize = int(d.WindowSize) + if d.o.lowMem && d.history.windowSize < maxBlockSize { + d.history.maxSize = d.history.windowSize * 2 + } else { + d.history.maxSize = d.history.windowSize + maxBlockSize + } + // history contains input - maybe we do something + d.rawInput = br + return nil +} + +// next will start decoding the next block from stream. +func (d *frameDec) next(block *blockDec) error { + if debug { + printf("decoding new block %p:%p", block, block.data) + } + err := block.reset(d.rawInput, d.WindowSize) + if err != nil { + println("block error:", err) + // Signal the frame decoder we have a problem. + d.sendErr(block, err) + return err + } + block.input <- struct{}{} + if debug { + println("next block:", block) + } + d.asyncRunningMu.Lock() + defer d.asyncRunningMu.Unlock() + if !d.asyncRunning { + return nil + } + if block.Last { + // We indicate the frame is done by sending io.EOF + d.decoding <- block + return io.EOF + } + d.decoding <- block + return nil +} + +// sendEOF will queue an error block on the frame. +// This will cause the frame decoder to return when it encounters the block. +// Returns true if the decoder was added. +func (d *frameDec) sendErr(block *blockDec, err error) bool { + d.asyncRunningMu.Lock() + defer d.asyncRunningMu.Unlock() + if !d.asyncRunning { + return false + } + + println("sending error", err.Error()) + block.sendErr(err) + d.decoding <- block + return true +} + +// checkCRC will check the checksum if the frame has one. +// Will return ErrCRCMismatch if crc check failed, otherwise nil. +func (d *frameDec) checkCRC() error { + if !d.HasCheckSum { + return nil + } + var tmp [4]byte + got := d.crc.Sum64() + // Flip to match file order. + tmp[0] = byte(got >> 0) + tmp[1] = byte(got >> 8) + tmp[2] = byte(got >> 16) + tmp[3] = byte(got >> 24) + + // We can overwrite upper tmp now + want := d.rawInput.readSmall(4) + if want == nil { + println("CRC missing?") + return io.ErrUnexpectedEOF + } + + if !bytes.Equal(tmp[:], want) { + if debug { + println("CRC Check Failed:", tmp[:], "!=", want) + } + return ErrCRCMismatch + } + if debug { + println("CRC ok", tmp[:]) + } + return nil +} + +func (d *frameDec) initAsync() { + if !d.o.lowMem && !d.SingleSegment { + // set max extra size history to 10MB. + d.history.maxSize = d.history.windowSize + maxBlockSize*5 + } + // re-alloc if more than one extra block size. + if d.o.lowMem && cap(d.history.b) > d.history.maxSize+maxBlockSize { + d.history.b = make([]byte, 0, d.history.maxSize) + } + if cap(d.history.b) < d.history.maxSize { + d.history.b = make([]byte, 0, d.history.maxSize) + } + if cap(d.decoding) < d.o.concurrent { + d.decoding = make(chan *blockDec, d.o.concurrent) + } + if debug { + h := d.history + printf("history init. len: %d, cap: %d", len(h.b), cap(h.b)) + } + d.asyncRunningMu.Lock() + d.asyncRunning = true + d.asyncRunningMu.Unlock() +} + +// startDecoder will start decoding blocks and write them to the writer. +// The decoder will stop as soon as an error occurs or at end of frame. +// When the frame has finished decoding the *bufio.Reader +// containing the remaining input will be sent on frameDec.frameDone. +func (d *frameDec) startDecoder(output chan decodeOutput) { + written := int64(0) + + defer func() { + d.asyncRunningMu.Lock() + d.asyncRunning = false + d.asyncRunningMu.Unlock() + + // Drain the currently decoding. + d.history.error = true + flushdone: + for { + select { + case b := <-d.decoding: + b.history <- &d.history + output <- <-b.result + default: + break flushdone + } + } + println("frame decoder done, signalling done") + d.frameDone.Done() + }() + // Get decoder for first block. + block := <-d.decoding + block.history <- &d.history + for { + var next *blockDec + // Get result + r := <-block.result + if r.err != nil { + println("Result contained error", r.err) + output <- r + return + } + if debug { + println("got result, from ", d.offset, "to", d.offset+int64(len(r.b))) + d.offset += int64(len(r.b)) + } + if !block.Last { + // Send history to next block + select { + case next = <-d.decoding: + if debug { + println("Sending ", len(d.history.b), "bytes as history") + } + next.history <- &d.history + default: + // Wait until we have sent the block, so + // other decoders can potentially get the decoder. + next = nil + } + } + + // Add checksum, async to decoding. + if d.HasCheckSum { + n, err := d.crc.Write(r.b) + if err != nil { + r.err = err + if n != len(r.b) { + r.err = io.ErrShortWrite + } + output <- r + return + } + } + written += int64(len(r.b)) + if d.SingleSegment && uint64(written) > d.FrameContentSize { + println("runDecoder: single segment and", uint64(written), ">", d.FrameContentSize) + r.err = ErrFrameSizeExceeded + output <- r + return + } + if block.Last { + r.err = d.checkCRC() + output <- r + return + } + output <- r + if next == nil { + // There was no decoder available, we wait for one now that we have sent to the writer. + if debug { + println("Sending ", len(d.history.b), " bytes as history") + } + next = <-d.decoding + next.history <- &d.history + } + block = next + } +} + +// runDecoder will create a sync decoder that will decode a block of data. +func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { + saved := d.history.b + + // We use the history for output to avoid copying it. + d.history.b = dst + // Store input length, so we only check new data. + crcStart := len(dst) + var err error + for { + err = dec.reset(d.rawInput, d.WindowSize) + if err != nil { + break + } + if debug { + println("next block:", dec) + } + err = dec.decodeBuf(&d.history) + if err != nil || dec.Last { + break + } + if uint64(len(d.history.b)) > d.o.maxDecodedSize { + err = ErrDecoderSizeExceeded + break + } + if d.SingleSegment && uint64(len(d.history.b)) > d.o.maxDecodedSize { + println("runDecoder: single segment and", uint64(len(d.history.b)), ">", d.o.maxDecodedSize) + err = ErrFrameSizeExceeded + break + } + } + dst = d.history.b + if err == nil { + if d.HasCheckSum { + var n int + n, err = d.crc.Write(dst[crcStart:]) + if err == nil { + if n != len(dst)-crcStart { + err = io.ErrShortWrite + } else { + err = d.checkCRC() + } + } + } + } + d.history.b = saved + return dst, err +} diff --git a/vendor/github.com/klauspost/compress/zstd/frameenc.go b/vendor/github.com/klauspost/compress/zstd/frameenc.go new file mode 100644 index 00000000000..4ef7f5a3e3d --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/frameenc.go @@ -0,0 +1,137 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "fmt" + "io" + "math" + "math/bits" +) + +type frameHeader struct { + ContentSize uint64 + WindowSize uint32 + SingleSegment bool + Checksum bool + DictID uint32 +} + +const maxHeaderSize = 14 + +func (f frameHeader) appendTo(dst []byte) ([]byte, error) { + dst = append(dst, frameMagic...) + var fhd uint8 + if f.Checksum { + fhd |= 1 << 2 + } + if f.SingleSegment { + fhd |= 1 << 5 + } + + var dictIDContent []byte + if f.DictID > 0 { + var tmp [4]byte + if f.DictID < 256 { + fhd |= 1 + tmp[0] = uint8(f.DictID) + dictIDContent = tmp[:1] + } else if f.DictID < 1<<16 { + fhd |= 2 + binary.LittleEndian.PutUint16(tmp[:2], uint16(f.DictID)) + dictIDContent = tmp[:2] + } else { + fhd |= 3 + binary.LittleEndian.PutUint32(tmp[:4], f.DictID) + dictIDContent = tmp[:4] + } + } + var fcs uint8 + if f.ContentSize >= 256 { + fcs++ + } + if f.ContentSize >= 65536+256 { + fcs++ + } + if f.ContentSize >= 0xffffffff { + fcs++ + } + + fhd |= fcs << 6 + + dst = append(dst, fhd) + if !f.SingleSegment { + const winLogMin = 10 + windowLog := (bits.Len32(f.WindowSize-1) - winLogMin) << 3 + dst = append(dst, uint8(windowLog)) + } + if f.DictID > 0 { + dst = append(dst, dictIDContent...) + } + switch fcs { + case 0: + if f.SingleSegment { + dst = append(dst, uint8(f.ContentSize)) + } + // Unless SingleSegment is set, framessizes < 256 are nto stored. + case 1: + f.ContentSize -= 256 + dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8)) + case 2: + dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24)) + case 3: + dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24), + uint8(f.ContentSize>>32), uint8(f.ContentSize>>40), uint8(f.ContentSize>>48), uint8(f.ContentSize>>56)) + default: + panic("invalid fcs") + } + return dst, nil +} + +const skippableFrameHeader = 4 + 4 + +// calcSkippableFrame will return a total size to be added for written +// to be divisible by multiple. +// The value will always be > skippableFrameHeader. +// The function will panic if written < 0 or wantMultiple <= 0. +func calcSkippableFrame(written, wantMultiple int64) int { + if wantMultiple <= 0 { + panic("wantMultiple <= 0") + } + if written < 0 { + panic("written < 0") + } + leftOver := written % wantMultiple + if leftOver == 0 { + return 0 + } + toAdd := wantMultiple - leftOver + for toAdd < skippableFrameHeader { + toAdd += wantMultiple + } + return int(toAdd) +} + +// skippableFrame will add a skippable frame with a total size of bytes. +// total should be >= skippableFrameHeader and < math.MaxUint32. +func skippableFrame(dst []byte, total int, r io.Reader) ([]byte, error) { + if total == 0 { + return dst, nil + } + if total < skippableFrameHeader { + return dst, fmt.Errorf("requested skippable frame (%d) < 8", total) + } + if int64(total) > math.MaxUint32 { + return dst, fmt.Errorf("requested skippable frame (%d) > max uint32", total) + } + dst = append(dst, 0x50, 0x2a, 0x4d, 0x18) + f := uint32(total - skippableFrameHeader) + dst = append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24)) + start := len(dst) + dst = append(dst, make([]byte, f)...) + _, err := io.ReadFull(r, dst[start:]) + return dst, err +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go new file mode 100644 index 00000000000..e6d3d49b39c --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go @@ -0,0 +1,385 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" +) + +const ( + tablelogAbsoluteMax = 9 +) + +const ( + /*!MEMORY_USAGE : + * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) + * Increasing memory usage improves compression ratio + * Reduced memory usage can improve speed, due to cache effect + * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ + maxMemoryUsage = tablelogAbsoluteMax + 2 + + maxTableLog = maxMemoryUsage - 2 + maxTablesize = 1 << maxTableLog + maxTableMask = (1 << maxTableLog) - 1 + minTablelog = 5 + maxSymbolValue = 255 +) + +// fseDecoder provides temporary storage for compression and decompression. +type fseDecoder struct { + dt [maxTablesize]decSymbol // Decompression table. + symbolLen uint16 // Length of active part of the symbol table. + actualTableLog uint8 // Selected tablelog. + maxBits uint8 // Maximum number of additional bits + + // used for table creation to avoid allocations. + stateTable [256]uint16 + norm [maxSymbolValue + 1]int16 + preDefined bool +} + +// tableStep returns the next table index. +func tableStep(tableSize uint32) uint32 { + return (tableSize >> 1) + (tableSize >> 3) + 3 +} + +// readNCount will read the symbol distribution so decoding tables can be constructed. +func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error { + var ( + charnum uint16 + previous0 bool + ) + if b.remain() < 4 { + return errors.New("input too small") + } + bitStream := b.Uint32NC() + nbBits := uint((bitStream & 0xF) + minTablelog) // extract tableLog + if nbBits > tablelogAbsoluteMax { + println("Invalid tablelog:", nbBits) + return errors.New("tableLog too large") + } + bitStream >>= 4 + bitCount := uint(4) + + s.actualTableLog = uint8(nbBits) + remaining := int32((1 << nbBits) + 1) + threshold := int32(1 << nbBits) + gotTotal := int32(0) + nbBits++ + + for remaining > 1 && charnum <= maxSymbol { + if previous0 { + //println("prev0") + n0 := charnum + for (bitStream & 0xFFFF) == 0xFFFF { + //println("24 x 0") + n0 += 24 + if r := b.remain(); r > 5 { + b.advance(2) + // The check above should make sure we can read 32 bits + bitStream = b.Uint32NC() >> bitCount + } else { + // end of bit stream + bitStream >>= 16 + bitCount += 16 + } + } + //printf("bitstream: %d, 0b%b", bitStream&3, bitStream) + for (bitStream & 3) == 3 { + n0 += 3 + bitStream >>= 2 + bitCount += 2 + } + n0 += uint16(bitStream & 3) + bitCount += 2 + + if n0 > maxSymbolValue { + return errors.New("maxSymbolValue too small") + } + //println("inserting ", n0-charnum, "zeroes from idx", charnum, "ending before", n0) + for charnum < n0 { + s.norm[uint8(charnum)] = 0 + charnum++ + } + + if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 { + b.advance(bitCount >> 3) + bitCount &= 7 + // The check above should make sure we can read 32 bits + bitStream = b.Uint32NC() >> bitCount + } else { + bitStream >>= 2 + } + } + + max := (2*threshold - 1) - remaining + var count int32 + + if int32(bitStream)&(threshold-1) < max { + count = int32(bitStream) & (threshold - 1) + if debugAsserts && nbBits < 1 { + panic("nbBits underflow") + } + bitCount += nbBits - 1 + } else { + count = int32(bitStream) & (2*threshold - 1) + if count >= threshold { + count -= max + } + bitCount += nbBits + } + + // extra accuracy + count-- + if count < 0 { + // -1 means +1 + remaining += count + gotTotal -= count + } else { + remaining -= count + gotTotal += count + } + s.norm[charnum&0xff] = int16(count) + charnum++ + previous0 = count == 0 + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + + if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 { + b.advance(bitCount >> 3) + bitCount &= 7 + // The check above should make sure we can read 32 bits + bitStream = b.Uint32NC() >> (bitCount & 31) + } else { + bitCount -= (uint)(8 * (len(b.b) - 4 - b.off)) + b.off = len(b.b) - 4 + bitStream = b.Uint32() >> (bitCount & 31) + } + } + s.symbolLen = charnum + if s.symbolLen <= 1 { + return fmt.Errorf("symbolLen (%d) too small", s.symbolLen) + } + if s.symbolLen > maxSymbolValue+1 { + return fmt.Errorf("symbolLen (%d) too big", s.symbolLen) + } + if remaining != 1 { + return fmt.Errorf("corruption detected (remaining %d != 1)", remaining) + } + if bitCount > 32 { + return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount) + } + if gotTotal != 1<> 3) + // println(s.norm[:s.symbolLen], s.symbolLen) + return s.buildDtable() +} + +// decSymbol contains information about a state entry, +// Including the state offset base, the output symbol and +// the number of bits to read for the low part of the destination state. +// Using a composite uint64 is faster than a struct with separate members. +type decSymbol uint64 + +func newDecSymbol(nbits, addBits uint8, newState uint16, baseline uint32) decSymbol { + return decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32) +} + +func (d decSymbol) nbBits() uint8 { + return uint8(d) +} + +func (d decSymbol) addBits() uint8 { + return uint8(d >> 8) +} + +func (d decSymbol) newState() uint16 { + return uint16(d >> 16) +} + +func (d decSymbol) baseline() uint32 { + return uint32(d >> 32) +} + +func (d decSymbol) baselineInt() int { + return int(d >> 32) +} + +func (d *decSymbol) set(nbits, addBits uint8, newState uint16, baseline uint32) { + *d = decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32) +} + +func (d *decSymbol) setNBits(nBits uint8) { + const mask = 0xffffffffffffff00 + *d = (*d & mask) | decSymbol(nBits) +} + +func (d *decSymbol) setAddBits(addBits uint8) { + const mask = 0xffffffffffff00ff + *d = (*d & mask) | (decSymbol(addBits) << 8) +} + +func (d *decSymbol) setNewState(state uint16) { + const mask = 0xffffffff0000ffff + *d = (*d & mask) | decSymbol(state)<<16 +} + +func (d *decSymbol) setBaseline(baseline uint32) { + const mask = 0xffffffff + *d = (*d & mask) | decSymbol(baseline)<<32 +} + +func (d *decSymbol) setExt(addBits uint8, baseline uint32) { + const mask = 0xffff00ff + *d = (*d & mask) | (decSymbol(addBits) << 8) | (decSymbol(baseline) << 32) +} + +// decSymbolValue returns the transformed decSymbol for the given symbol. +func decSymbolValue(symb uint8, t []baseOffset) (decSymbol, error) { + if int(symb) >= len(t) { + return 0, fmt.Errorf("rle symbol %d >= max %d", symb, len(t)) + } + lu := t[symb] + return newDecSymbol(0, lu.addBits, 0, lu.baseLine), nil +} + +// setRLE will set the decoder til RLE mode. +func (s *fseDecoder) setRLE(symbol decSymbol) { + s.actualTableLog = 0 + s.maxBits = symbol.addBits() + s.dt[0] = symbol +} + +// buildDtable will build the decoding table. +func (s *fseDecoder) buildDtable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + symbolNext := s.stateTable[:256] + + // Init, lay down lowprob symbols + { + for i, v := range s.norm[:s.symbolLen] { + if v == -1 { + s.dt[highThreshold].setAddBits(uint8(i)) + highThreshold-- + symbolNext[i] = 1 + } else { + symbolNext[i] = uint16(v) + } + } + } + // Spread symbols + { + tableMask := tableSize - 1 + step := tableStep(tableSize) + position := uint32(0) + for ss, v := range s.norm[:s.symbolLen] { + for i := 0; i < int(v); i++ { + s.dt[position].setAddBits(uint8(ss)) + position = (position + step) & tableMask + for position > highThreshold { + // lowprob area + position = (position + step) & tableMask + } + } + } + if position != 0 { + // position must reach all cells once, otherwise normalizedCounter is incorrect + return errors.New("corrupted input (position != 0)") + } + } + + // Build Decoding table + { + tableSize := uint16(1 << s.actualTableLog) + for u, v := range s.dt[:tableSize] { + symbol := v.addBits() + nextState := symbolNext[symbol] + symbolNext[symbol] = nextState + 1 + nBits := s.actualTableLog - byte(highBits(uint32(nextState))) + s.dt[u&maxTableMask].setNBits(nBits) + newState := (nextState << nBits) - tableSize + if newState > tableSize { + return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) + } + if newState == uint16(u) && nBits == 0 { + // Seems weird that this is possible with nbits > 0. + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) + } + s.dt[u&maxTableMask].setNewState(newState) + } + } + return nil +} + +// transform will transform the decoder table into a table usable for +// decoding without having to apply the transformation while decoding. +// The state will contain the base value and the number of bits to read. +func (s *fseDecoder) transform(t []baseOffset) error { + tableSize := uint16(1 << s.actualTableLog) + s.maxBits = 0 + for i, v := range s.dt[:tableSize] { + add := v.addBits() + if int(add) >= len(t) { + return fmt.Errorf("invalid decoding table entry %d, symbol %d >= max (%d)", i, v.addBits(), len(t)) + } + lu := t[add] + if lu.addBits > s.maxBits { + s.maxBits = lu.addBits + } + v.setExt(lu.addBits, lu.baseLine) + s.dt[i] = v + } + return nil +} + +type fseState struct { + dt []decSymbol + state decSymbol +} + +// Initialize and decodeAsync first state and symbol. +func (s *fseState) init(br *bitReader, tableLog uint8, dt []decSymbol) { + s.dt = dt + br.fill() + s.state = dt[br.getBits(tableLog)] +} + +// next returns the current symbol and sets the next state. +// At least tablelog bits must be available in the bit reader. +func (s *fseState) next(br *bitReader) { + lowBits := uint16(br.getBits(s.state.nbBits())) + s.state = s.dt[s.state.newState()+lowBits] +} + +// finished returns true if all bits have been read from the bitstream +// and the next state would require reading bits from the input. +func (s *fseState) finished(br *bitReader) bool { + return br.finished() && s.state.nbBits() > 0 +} + +// final returns the current state symbol without decoding the next. +func (s *fseState) final() (int, uint8) { + return s.state.baselineInt(), s.state.addBits() +} + +// final returns the current state symbol without decoding the next. +func (s decSymbol) final() (int, uint8) { + return s.baselineInt(), s.addBits() +} + +// nextFast returns the next symbol and sets the next state. +// This can only be used if no symbols are 0 bits. +// At least tablelog bits must be available in the bit reader. +func (s *fseState) nextFast(br *bitReader) (uint32, uint8) { + lowBits := uint16(br.getBitsFast(s.state.nbBits())) + s.state = s.dt[s.state.newState()+lowBits] + return s.state.baseline(), s.state.addBits() +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go new file mode 100644 index 00000000000..aa9eba88b80 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go @@ -0,0 +1,726 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "math" +) + +const ( + // For encoding we only support up to + maxEncTableLog = 8 + maxEncTablesize = 1 << maxTableLog + maxEncTableMask = (1 << maxTableLog) - 1 + minEncTablelog = 5 + maxEncSymbolValue = maxMatchLengthSymbol +) + +// Scratch provides temporary storage for compression and decompression. +type fseEncoder struct { + symbolLen uint16 // Length of active part of the symbol table. + actualTableLog uint8 // Selected tablelog. + ct cTable // Compression tables. + maxCount int // count of the most probable symbol + zeroBits bool // no bits has prob > 50%. + clearCount bool // clear count + useRLE bool // This encoder is for RLE + preDefined bool // This encoder is predefined. + reUsed bool // Set to know when the encoder has been reused. + rleVal uint8 // RLE Symbol + maxBits uint8 // Maximum output bits after transform. + + // TODO: Technically zstd should be fine with 64 bytes. + count [256]uint32 + norm [256]int16 +} + +// cTable contains tables used for compression. +type cTable struct { + tableSymbol []byte + stateTable []uint16 + symbolTT []symbolTransform +} + +// symbolTransform contains the state transform for a symbol. +type symbolTransform struct { + deltaNbBits uint32 + deltaFindState int16 + outBits uint8 +} + +// String prints values as a human readable string. +func (s symbolTransform) String() string { + return fmt.Sprintf("{deltabits: %08x, findstate:%d outbits:%d}", s.deltaNbBits, s.deltaFindState, s.outBits) +} + +// Histogram allows to populate the histogram and skip that step in the compression, +// It otherwise allows to inspect the histogram when compression is done. +// To indicate that you have populated the histogram call HistogramFinished +// with the value of the highest populated symbol, as well as the number of entries +// in the most populated entry. These are accepted at face value. +// The returned slice will always be length 256. +func (s *fseEncoder) Histogram() []uint32 { + return s.count[:] +} + +// HistogramFinished can be called to indicate that the histogram has been populated. +// maxSymbol is the index of the highest set symbol of the next data segment. +// maxCount is the number of entries in the most populated entry. +// These are accepted at face value. +func (s *fseEncoder) HistogramFinished(maxSymbol uint8, maxCount int) { + s.maxCount = maxCount + s.symbolLen = uint16(maxSymbol) + 1 + s.clearCount = maxCount != 0 +} + +// prepare will prepare and allocate scratch tables used for both compression and decompression. +func (s *fseEncoder) prepare() (*fseEncoder, error) { + if s == nil { + s = &fseEncoder{} + } + s.useRLE = false + if s.clearCount && s.maxCount == 0 { + for i := range s.count { + s.count[i] = 0 + } + s.clearCount = false + } + return s, nil +} + +// allocCtable will allocate tables needed for compression. +// If existing tables a re big enough, they are simply re-used. +func (s *fseEncoder) allocCtable() { + tableSize := 1 << s.actualTableLog + // get tableSymbol that is big enough. + if cap(s.ct.tableSymbol) < int(tableSize) { + s.ct.tableSymbol = make([]byte, tableSize) + } + s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] + + ctSize := tableSize + if cap(s.ct.stateTable) < ctSize { + s.ct.stateTable = make([]uint16, ctSize) + } + s.ct.stateTable = s.ct.stateTable[:ctSize] + + if cap(s.ct.symbolTT) < 256 { + s.ct.symbolTT = make([]symbolTransform, 256) + } + s.ct.symbolTT = s.ct.symbolTT[:256] +} + +// buildCTable will populate the compression table so it is ready to be used. +func (s *fseEncoder) buildCTable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + var cumul [256]int16 + + s.allocCtable() + tableSymbol := s.ct.tableSymbol[:tableSize] + // symbol start positions + { + cumul[0] = 0 + for ui, v := range s.norm[:s.symbolLen-1] { + u := byte(ui) // one less than reference + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = u + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + } + // Encode last symbol separately to avoid overflowing u + u := int(s.symbolLen - 1) + v := s.norm[s.symbolLen-1] + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = byte(u) + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + if uint32(cumul[s.symbolLen]) != tableSize { + return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) + } + cumul[s.symbolLen] = int16(tableSize) + 1 + } + // Spread symbols + s.zeroBits = false + { + step := tableStep(tableSize) + tableMask := tableSize - 1 + var position uint32 + // if any symbol > largeLimit, we may have 0 bits output. + largeLimit := int16(1 << (s.actualTableLog - 1)) + for ui, v := range s.norm[:s.symbolLen] { + symbol := byte(ui) + if v > largeLimit { + s.zeroBits = true + } + for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { + tableSymbol[position] = symbol + position = (position + step) & tableMask + for position > highThreshold { + position = (position + step) & tableMask + } /* Low proba area */ + } + } + + // Check if we have gone through all positions + if position != 0 { + return errors.New("position!=0") + } + } + + // Build table + table := s.ct.stateTable + { + tsi := int(tableSize) + for u, v := range tableSymbol { + // TableU16 : sorted by symbol order; gives next state value + table[cumul[v]] = uint16(tsi + u) + cumul[v]++ + } + } + + // Build Symbol Transformation Table + { + total := int16(0) + symbolTT := s.ct.symbolTT[:s.symbolLen] + tableLog := s.actualTableLog + tl := (uint32(tableLog) << 16) - (1 << tableLog) + for i, v := range s.norm[:s.symbolLen] { + switch v { + case 0: + case -1, 1: + symbolTT[i].deltaNbBits = tl + symbolTT[i].deltaFindState = int16(total - 1) + total++ + default: + maxBitsOut := uint32(tableLog) - highBit(uint32(v-1)) + minStatePlus := uint32(v) << maxBitsOut + symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus + symbolTT[i].deltaFindState = int16(total - v) + total += v + } + } + if total != int16(tableSize) { + return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) + } + } + return nil +} + +var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} + +func (s *fseEncoder) setRLE(val byte) { + s.allocCtable() + s.actualTableLog = 0 + s.ct.stateTable = s.ct.stateTable[:1] + s.ct.symbolTT[val] = symbolTransform{ + deltaFindState: 0, + deltaNbBits: 0, + } + if debug { + println("setRLE: val", val, "symbolTT", s.ct.symbolTT[val]) + } + s.rleVal = val + s.useRLE = true +} + +// setBits will set output bits for the transform. +// if nil is provided, the number of bits is equal to the index. +func (s *fseEncoder) setBits(transform []byte) { + if s.reUsed || s.preDefined { + return + } + if s.useRLE { + if transform == nil { + s.ct.symbolTT[s.rleVal].outBits = s.rleVal + s.maxBits = s.rleVal + return + } + s.maxBits = transform[s.rleVal] + s.ct.symbolTT[s.rleVal].outBits = s.maxBits + return + } + if transform == nil { + for i := range s.ct.symbolTT[:s.symbolLen] { + s.ct.symbolTT[i].outBits = uint8(i) + } + s.maxBits = uint8(s.symbolLen - 1) + return + } + s.maxBits = 0 + for i, v := range transform[:s.symbolLen] { + s.ct.symbolTT[i].outBits = v + if v > s.maxBits { + // We could assume bits always going up, but we play safe. + s.maxBits = v + } + } +} + +// normalizeCount will normalize the count of the symbols so +// the total is equal to the table size. +// If successful, compression tables will also be made ready. +func (s *fseEncoder) normalizeCount(length int) error { + if s.reUsed { + return nil + } + s.optimalTableLog(length) + var ( + tableLog = s.actualTableLog + scale = 62 - uint64(tableLog) + step = (1 << 62) / uint64(length) + vStep = uint64(1) << (scale - 20) + stillToDistribute = int16(1 << tableLog) + largest int + largestP int16 + lowThreshold = (uint32)(length >> tableLog) + ) + if s.maxCount == length { + s.useRLE = true + return nil + } + s.useRLE = false + for i, cnt := range s.count[:s.symbolLen] { + // already handled + // if (count[s] == s.length) return 0; /* rle special case */ + + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + stillToDistribute-- + } else { + proba := (int16)((uint64(cnt) * step) >> scale) + if proba < 8 { + restToBeat := vStep * uint64(rtbTable[proba]) + v := uint64(cnt)*step - (uint64(proba) << scale) + if v > restToBeat { + proba++ + } + } + if proba > largestP { + largestP = proba + largest = i + } + s.norm[i] = proba + stillToDistribute -= proba + } + } + + if -stillToDistribute >= (s.norm[largest] >> 1) { + // corner case, need another normalization method + err := s.normalizeCount2(length) + if err != nil { + return err + } + if debugAsserts { + err = s.validateNorm() + if err != nil { + return err + } + } + return s.buildCTable() + } + s.norm[largest] += stillToDistribute + if debugAsserts { + err := s.validateNorm() + if err != nil { + return err + } + } + return s.buildCTable() +} + +// Secondary normalization method. +// To be used when primary method fails. +func (s *fseEncoder) normalizeCount2(length int) error { + const notYetAssigned = -2 + var ( + distributed uint32 + total = uint32(length) + tableLog = s.actualTableLog + lowThreshold = uint32(total >> tableLog) + lowOne = uint32((total * 3) >> (tableLog + 1)) + ) + for i, cnt := range s.count[:s.symbolLen] { + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + distributed++ + total -= cnt + continue + } + if cnt <= lowOne { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + s.norm[i] = notYetAssigned + } + toDistribute := (1 << tableLog) - distributed + + if (total / toDistribute) > lowOne { + // risk of rounding to zero + lowOne = uint32((total * 3) / (toDistribute * 2)) + for i, cnt := range s.count[:s.symbolLen] { + if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + } + toDistribute = (1 << tableLog) - distributed + } + if distributed == uint32(s.symbolLen)+1 { + // all values are pretty poor; + // probably incompressible data (should have already been detected); + // find max, then give all remaining points to max + var maxV int + var maxC uint32 + for i, cnt := range s.count[:s.symbolLen] { + if cnt > maxC { + maxV = i + maxC = cnt + } + } + s.norm[maxV] += int16(toDistribute) + return nil + } + + if total == 0 { + // all of the symbols were low enough for the lowOne or lowThreshold + for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { + if s.norm[i] > 0 { + toDistribute-- + s.norm[i]++ + } + } + return nil + } + + var ( + vStepLog = 62 - uint64(tableLog) + mid = uint64((1 << (vStepLog - 1)) - 1) + rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining + tmpTotal = mid + ) + for i, cnt := range s.count[:s.symbolLen] { + if s.norm[i] == notYetAssigned { + var ( + end = tmpTotal + uint64(cnt)*rStep + sStart = uint32(tmpTotal >> vStepLog) + sEnd = uint32(end >> vStepLog) + weight = sEnd - sStart + ) + if weight < 1 { + return errors.New("weight < 1") + } + s.norm[i] = int16(weight) + tmpTotal = end + } + } + return nil +} + +// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog +func (s *fseEncoder) optimalTableLog(length int) { + tableLog := uint8(maxEncTableLog) + minBitsSrc := highBit(uint32(length)) + 1 + minBitsSymbols := highBit(uint32(s.symbolLen-1)) + 2 + minBits := uint8(minBitsSymbols) + if minBitsSrc < minBitsSymbols { + minBits = uint8(minBitsSrc) + } + + maxBitsSrc := uint8(highBit(uint32(length-1))) - 2 + if maxBitsSrc < tableLog { + // Accuracy can be reduced + tableLog = maxBitsSrc + } + if minBits > tableLog { + tableLog = minBits + } + // Need a minimum to safely represent all symbol values + if tableLog < minEncTablelog { + tableLog = minEncTablelog + } + if tableLog > maxEncTableLog { + tableLog = maxEncTableLog + } + s.actualTableLog = tableLog +} + +// validateNorm validates the normalized histogram table. +func (s *fseEncoder) validateNorm() (err error) { + var total int + for _, v := range s.norm[:s.symbolLen] { + if v >= 0 { + total += int(v) + } else { + total -= int(v) + } + } + defer func() { + if err == nil { + return + } + fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) + for i, v := range s.norm[:s.symbolLen] { + fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) + } + }() + if total != (1 << s.actualTableLog) { + return fmt.Errorf("warning: Total == %d != %d", total, 1<> 3) + 3 + 2 + + // Write Table Size + bitStream = uint32(tableLog - minEncTablelog) + bitCount = uint(4) + remaining = int16(tableSize + 1) /* +1 for extra accuracy */ + threshold = int16(tableSize) + nbBits = uint(tableLog + 1) + outP = len(out) + ) + if cap(out) < outP+maxHeaderSize { + out = append(out, make([]byte, maxHeaderSize*3)...) + out = out[:len(out)-maxHeaderSize*3] + } + out = out[:outP+maxHeaderSize] + + // stops at 1 + for remaining > 1 { + if previous0 { + start := charnum + for s.norm[charnum] == 0 { + charnum++ + } + for charnum >= start+24 { + start += 24 + bitStream += uint32(0xFFFF) << bitCount + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + } + for charnum >= start+3 { + start += 3 + bitStream += 3 << bitCount + bitCount += 2 + } + bitStream += uint32(charnum-start) << bitCount + bitCount += 2 + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + count := s.norm[charnum] + charnum++ + max := (2*threshold - 1) - remaining + if count < 0 { + remaining += count + } else { + remaining -= count + } + count++ // +1 for extra accuracy + if count >= threshold { + count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ + } + bitStream += uint32(count) << bitCount + bitCount += nbBits + if count < max { + bitCount-- + } + + previous0 = count == 1 + if remaining < 1 { + return nil, errors.New("internal error: remaining < 1") + } + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + if outP+2 > len(out) { + return nil, fmt.Errorf("internal error: %d > %d, maxheader: %d, sl: %d, tl: %d, normcount: %v", outP+2, len(out), maxHeaderSize, s.symbolLen, int(tableLog), s.norm[:s.symbolLen]) + } + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += int((bitCount + 7) / 8) + + if charnum > s.symbolLen { + return nil, errors.New("internal error: charnum > s.symbolLen") + } + return out[:outP], nil +} + +// Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits) +// note 1 : assume symbolValue is valid (<= maxSymbolValue) +// note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits * +func (s *fseEncoder) bitCost(symbolValue uint8, accuracyLog uint32) uint32 { + minNbBits := s.ct.symbolTT[symbolValue].deltaNbBits >> 16 + threshold := (minNbBits + 1) << 16 + if debugAsserts { + if !(s.actualTableLog < 16) { + panic("!s.actualTableLog < 16") + } + // ensure enough room for renormalization double shift + if !(uint8(accuracyLog) < 31-s.actualTableLog) { + panic("!uint8(accuracyLog) < 31-s.actualTableLog") + } + } + tableSize := uint32(1) << s.actualTableLog + deltaFromThreshold := threshold - (s.ct.symbolTT[symbolValue].deltaNbBits + tableSize) + // linear interpolation (very approximate) + normalizedDeltaFromThreshold := (deltaFromThreshold << accuracyLog) >> s.actualTableLog + bitMultiplier := uint32(1) << accuracyLog + if debugAsserts { + if s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold { + panic("s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold") + } + if normalizedDeltaFromThreshold > bitMultiplier { + panic("normalizedDeltaFromThreshold > bitMultiplier") + } + } + return (minNbBits+1)*bitMultiplier - normalizedDeltaFromThreshold +} + +// Returns the cost in bits of encoding the distribution in count using ctable. +// Histogram should only be up to the last non-zero symbol. +// Returns an -1 if ctable cannot represent all the symbols in count. +func (s *fseEncoder) approxSize(hist []uint32) uint32 { + if int(s.symbolLen) < len(hist) { + // More symbols than we have. + return math.MaxUint32 + } + if s.useRLE { + // We will never reuse RLE encoders. + return math.MaxUint32 + } + const kAccuracyLog = 8 + badCost := (uint32(s.actualTableLog) + 1) << kAccuracyLog + var cost uint32 + for i, v := range hist { + if v == 0 { + continue + } + if s.norm[i] == 0 { + return math.MaxUint32 + } + bitCost := s.bitCost(uint8(i), kAccuracyLog) + if bitCost > badCost { + return math.MaxUint32 + } + cost += v * bitCost + } + return cost >> kAccuracyLog +} + +// maxHeaderSize returns the maximum header size in bits. +// This is not exact size, but we want a penalty for new tables anyway. +func (s *fseEncoder) maxHeaderSize() uint32 { + if s.preDefined { + return 0 + } + if s.useRLE { + return 8 + } + return (((uint32(s.symbolLen) * uint32(s.actualTableLog)) >> 3) + 3) * 8 +} + +// cState contains the compression state of a stream. +type cState struct { + bw *bitWriter + stateTable []uint16 + state uint16 +} + +// init will initialize the compression state to the first symbol of the stream. +func (c *cState) init(bw *bitWriter, ct *cTable, first symbolTransform) { + c.bw = bw + c.stateTable = ct.stateTable + if len(c.stateTable) == 1 { + // RLE + c.stateTable[0] = uint16(0) + c.state = 0 + return + } + nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 + im := int32((nbBitsOut << 16) - first.deltaNbBits) + lu := (im >> nbBitsOut) + int32(first.deltaFindState) + c.state = c.stateTable[lu] + return +} + +// encode the output symbol provided and write it to the bitstream. +func (c *cState) encode(symbolTT symbolTransform) { + nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 + dstState := int32(c.state>>(nbBitsOut&15)) + int32(symbolTT.deltaFindState) + c.bw.addBits16NC(c.state, uint8(nbBitsOut)) + c.state = c.stateTable[dstState] +} + +// flush will write the tablelog to the output and flush the remaining full bytes. +func (c *cState) flush(tableLog uint8) { + c.bw.flush32() + c.bw.addBits16NC(c.state, tableLog) +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_predefined.go b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go new file mode 100644 index 00000000000..6c17dc17f4f --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go @@ -0,0 +1,158 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" + "math" + "sync" +) + +var ( + // fsePredef are the predefined fse tables as defined here: + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions + // These values are already transformed. + fsePredef [3]fseDecoder + + // fsePredefEnc are the predefined encoder based on fse tables as defined here: + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions + // These values are already transformed. + fsePredefEnc [3]fseEncoder + + // symbolTableX contain the transformations needed for each type as defined in + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets + symbolTableX [3][]baseOffset + + // maxTableSymbol is the biggest supported symbol for each table type + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets + maxTableSymbol = [3]uint8{tableLiteralLengths: maxLiteralLengthSymbol, tableOffsets: maxOffsetLengthSymbol, tableMatchLengths: maxMatchLengthSymbol} + + // bitTables is the bits table for each table. + bitTables = [3][]byte{tableLiteralLengths: llBitsTable[:], tableOffsets: nil, tableMatchLengths: mlBitsTable[:]} +) + +type tableIndex uint8 + +const ( + // indexes for fsePredef and symbolTableX + tableLiteralLengths tableIndex = 0 + tableOffsets tableIndex = 1 + tableMatchLengths tableIndex = 2 + + maxLiteralLengthSymbol = 35 + maxOffsetLengthSymbol = 30 + maxMatchLengthSymbol = 52 +) + +// baseOffset is used for calculating transformations. +type baseOffset struct { + baseLine uint32 + addBits uint8 +} + +// fillBase will precalculate base offsets with the given bit distributions. +func fillBase(dst []baseOffset, base uint32, bits ...uint8) { + if len(bits) != len(dst) { + panic(fmt.Sprintf("len(dst) (%d) != len(bits) (%d)", len(dst), len(bits))) + } + for i, bit := range bits { + if base > math.MaxInt32 { + panic(fmt.Sprintf("invalid decoding table, base overflows int32")) + } + + dst[i] = baseOffset{ + baseLine: base, + addBits: bit, + } + base += 1 << bit + } +} + +var predef sync.Once + +func initPredefined() { + predef.Do(func() { + // Literals length codes + tmp := make([]baseOffset, 36) + for i := range tmp[:16] { + tmp[i] = baseOffset{ + baseLine: uint32(i), + addBits: 0, + } + } + fillBase(tmp[16:], 16, 1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) + symbolTableX[tableLiteralLengths] = tmp + + // Match length codes + tmp = make([]baseOffset, 53) + for i := range tmp[:32] { + tmp[i] = baseOffset{ + // The transformation adds the 3 length. + baseLine: uint32(i) + 3, + addBits: 0, + } + } + fillBase(tmp[32:], 35, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) + symbolTableX[tableMatchLengths] = tmp + + // Offset codes + tmp = make([]baseOffset, maxOffsetBits+1) + tmp[1] = baseOffset{ + baseLine: 1, + addBits: 1, + } + fillBase(tmp[2:], 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30) + symbolTableX[tableOffsets] = tmp + + // Fill predefined tables and transform them. + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions + for i := range fsePredef[:] { + f := &fsePredef[i] + switch tableIndex(i) { + case tableLiteralLengths: + // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L243 + f.actualTableLog = 6 + copy(f.norm[:], []int16{4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1, + -1, -1, -1, -1}) + f.symbolLen = 36 + case tableOffsets: + // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L281 + f.actualTableLog = 5 + copy(f.norm[:], []int16{ + 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1}) + f.symbolLen = 29 + case tableMatchLengths: + //https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L304 + f.actualTableLog = 6 + copy(f.norm[:], []int16{ + 1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, + -1, -1, -1, -1, -1}) + f.symbolLen = 53 + } + if err := f.buildDtable(); err != nil { + panic(fmt.Errorf("building table %v: %v", tableIndex(i), err)) + } + if err := f.transform(symbolTableX[i]); err != nil { + panic(fmt.Errorf("building table %v: %v", tableIndex(i), err)) + } + f.preDefined = true + + // Create encoder as well + enc := &fsePredefEnc[i] + copy(enc.norm[:], f.norm[:]) + enc.symbolLen = f.symbolLen + enc.actualTableLog = f.actualTableLog + if err := enc.buildCTable(); err != nil { + panic(fmt.Errorf("building encoding table %v: %v", tableIndex(i), err)) + } + enc.setBits(bitTables[i]) + enc.preDefined = true + } + }) +} diff --git a/vendor/github.com/klauspost/compress/zstd/hash.go b/vendor/github.com/klauspost/compress/zstd/hash.go new file mode 100644 index 00000000000..4a752067fc9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/hash.go @@ -0,0 +1,77 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +const ( + prime3bytes = 506832829 + prime4bytes = 2654435761 + prime5bytes = 889523592379 + prime6bytes = 227718039650203 + prime7bytes = 58295818150454627 + prime8bytes = 0xcf1bbcdcb7a56463 +) + +// hashLen returns a hash of the lowest l bytes of u for a size size of h bytes. +// l must be >=4 and <=8. Any other value will return hash for 4 bytes. +// h should always be <32. +// Preferably h and l should be a constant. +// FIXME: This does NOT get resolved, if 'mls' is constant, +// so this cannot be used. +func hashLen(u uint64, hashLog, mls uint8) uint32 { + switch mls { + case 5: + return hash5(u, hashLog) + case 6: + return hash6(u, hashLog) + case 7: + return hash7(u, hashLog) + case 8: + return hash8(u, hashLog) + default: + return hash4x64(u, hashLog) + } +} + +// hash3 returns the hash of the lower 3 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <32. +func hash3(u uint32, h uint8) uint32 { + return ((u << (32 - 24)) * prime3bytes) >> ((32 - h) & 31) +} + +// hash4 returns the hash of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <32. +func hash4(u uint32, h uint8) uint32 { + return (u * prime4bytes) >> ((32 - h) & 31) +} + +// hash4x64 returns the hash of the lowest 4 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <32. +func hash4x64(u uint64, h uint8) uint32 { + return (uint32(u) * prime4bytes) >> ((32 - h) & 31) +} + +// hash5 returns the hash of the lowest 5 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <64. +func hash5(u uint64, h uint8) uint32 { + return uint32(((u << (64 - 40)) * prime5bytes) >> ((64 - h) & 63)) +} + +// hash6 returns the hash of the lowest 6 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <64. +func hash6(u uint64, h uint8) uint32 { + return uint32(((u << (64 - 48)) * prime6bytes) >> ((64 - h) & 63)) +} + +// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <64. +func hash7(u uint64, h uint8) uint32 { + return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & 63)) +} + +// hash8 returns the hash of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <64. +func hash8(u uint64, h uint8) uint32 { + return uint32((u * prime8bytes) >> ((64 - h) & 63)) +} diff --git a/vendor/github.com/klauspost/compress/zstd/history.go b/vendor/github.com/klauspost/compress/zstd/history.go new file mode 100644 index 00000000000..f783e32d251 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/history.go @@ -0,0 +1,89 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "github.com/klauspost/compress/huff0" +) + +// history contains the information transferred between blocks. +type history struct { + b []byte + huffTree *huff0.Scratch + recentOffsets [3]int + decoders sequenceDecs + windowSize int + maxSize int + error bool + dict *dict +} + +// reset will reset the history to initial state of a frame. +// The history must already have been initialized to the desired size. +func (h *history) reset() { + h.b = h.b[:0] + h.error = false + h.recentOffsets = [3]int{1, 4, 8} + if f := h.decoders.litLengths.fse; f != nil && !f.preDefined { + fseDecoderPool.Put(f) + } + if f := h.decoders.offsets.fse; f != nil && !f.preDefined { + fseDecoderPool.Put(f) + } + if f := h.decoders.matchLengths.fse; f != nil && !f.preDefined { + fseDecoderPool.Put(f) + } + h.decoders = sequenceDecs{} + if h.huffTree != nil { + if h.dict == nil || h.dict.litEnc != h.huffTree { + huffDecoderPool.Put(h.huffTree) + } + } + h.huffTree = nil + h.dict = nil + //printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b)) +} + +func (h *history) setDict(dict *dict) { + if dict == nil { + return + } + h.dict = dict + h.decoders.litLengths = dict.llDec + h.decoders.offsets = dict.ofDec + h.decoders.matchLengths = dict.mlDec + h.recentOffsets = dict.offsets + h.huffTree = dict.litEnc +} + +// append bytes to history. +// This function will make sure there is space for it, +// if the buffer has been allocated with enough extra space. +func (h *history) append(b []byte) { + if len(b) >= h.windowSize { + // Discard all history by simply overwriting + h.b = h.b[:h.windowSize] + copy(h.b, b[len(b)-h.windowSize:]) + return + } + + // If there is space, append it. + if len(b) < cap(h.b)-len(h.b) { + h.b = append(h.b, b...) + return + } + + // Move data down so we only have window size left. + // We know we have less than window size in b at this point. + discard := len(b) + len(h.b) - h.windowSize + copy(h.b, h.b[discard:]) + h.b = h.b[:h.windowSize] + copy(h.b[h.windowSize-len(b):], b) +} + +// append bytes to history without ever discarding anything. +func (h *history) appendKeep(b []byte) { + h.b = append(h.b, b...) +} diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt new file mode 100644 index 00000000000..24b53065f40 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt @@ -0,0 +1,22 @@ +Copyright (c) 2016 Caleb Spare + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md new file mode 100644 index 00000000000..69aa3bb587c --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md @@ -0,0 +1,58 @@ +# xxhash + +VENDORED: Go to [github.com/cespare/xxhash](https://github.com/cespare/xxhash) for original package. + + +[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash) +[![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash) + +xxhash is a Go implementation of the 64-bit +[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a +high-quality hashing algorithm that is much faster than anything in the Go +standard library. + +This package provides a straightforward API: + +``` +func Sum64(b []byte) uint64 +func Sum64String(s string) uint64 +type Digest struct{ ... } + func New() *Digest +``` + +The `Digest` type implements hash.Hash64. Its key methods are: + +``` +func (*Digest) Write([]byte) (int, error) +func (*Digest) WriteString(string) (int, error) +func (*Digest) Sum64() uint64 +``` + +This implementation provides a fast pure-Go implementation and an even faster +assembly implementation for amd64. + +## Benchmarks + +Here are some quick benchmarks comparing the pure-Go and assembly +implementations of Sum64. + +| input size | purego | asm | +| --- | --- | --- | +| 5 B | 979.66 MB/s | 1291.17 MB/s | +| 100 B | 7475.26 MB/s | 7973.40 MB/s | +| 4 KB | 17573.46 MB/s | 17602.65 MB/s | +| 10 MB | 17131.46 MB/s | 17142.16 MB/s | + +These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using +the following commands under Go 1.11.2: + +``` +$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes' +$ go test -benchtime 10s -bench '/xxhash,direct,bytes' +``` + +## Projects using this package + +- [InfluxDB](https://github.com/influxdata/influxdb) +- [Prometheus](https://github.com/prometheus/prometheus) +- [FreeCache](https://github.com/coocood/freecache) diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go new file mode 100644 index 00000000000..426b9cac786 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go @@ -0,0 +1,238 @@ +// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described +// at http://cyan4973.github.io/xxHash/. +// THIS IS VENDORED: Go to github.com/cespare/xxhash for original package. + +package xxhash + +import ( + "encoding/binary" + "errors" + "math/bits" +) + +const ( + prime1 uint64 = 11400714785074694791 + prime2 uint64 = 14029467366897019727 + prime3 uint64 = 1609587929392839161 + prime4 uint64 = 9650029242287828579 + prime5 uint64 = 2870177450012600261 +) + +// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where +// possible in the Go code is worth a small (but measurable) performance boost +// by avoiding some MOVQs. Vars are needed for the asm and also are useful for +// convenience in the Go code in a few places where we need to intentionally +// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the +// result overflows a uint64). +var ( + prime1v = prime1 + prime2v = prime2 + prime3v = prime3 + prime4v = prime4 + prime5v = prime5 +) + +// Digest implements hash.Hash64. +type Digest struct { + v1 uint64 + v2 uint64 + v3 uint64 + v4 uint64 + total uint64 + mem [32]byte + n int // how much of mem is used +} + +// New creates a new Digest that computes the 64-bit xxHash algorithm. +func New() *Digest { + var d Digest + d.Reset() + return &d +} + +// Reset clears the Digest's state so that it can be reused. +func (d *Digest) Reset() { + d.v1 = prime1v + prime2 + d.v2 = prime2 + d.v3 = 0 + d.v4 = -prime1v + d.total = 0 + d.n = 0 +} + +// Size always returns 8 bytes. +func (d *Digest) Size() int { return 8 } + +// BlockSize always returns 32 bytes. +func (d *Digest) BlockSize() int { return 32 } + +// Write adds more data to d. It always returns len(b), nil. +func (d *Digest) Write(b []byte) (n int, err error) { + n = len(b) + d.total += uint64(n) + + if d.n+n < 32 { + // This new data doesn't even fill the current block. + copy(d.mem[d.n:], b) + d.n += n + return + } + + if d.n > 0 { + // Finish off the partial block. + copy(d.mem[d.n:], b) + d.v1 = round(d.v1, u64(d.mem[0:8])) + d.v2 = round(d.v2, u64(d.mem[8:16])) + d.v3 = round(d.v3, u64(d.mem[16:24])) + d.v4 = round(d.v4, u64(d.mem[24:32])) + b = b[32-d.n:] + d.n = 0 + } + + if len(b) >= 32 { + // One or more full blocks left. + nw := writeBlocks(d, b) + b = b[nw:] + } + + // Store any remaining partial block. + copy(d.mem[:], b) + d.n = len(b) + + return +} + +// Sum appends the current hash to b and returns the resulting slice. +func (d *Digest) Sum(b []byte) []byte { + s := d.Sum64() + return append( + b, + byte(s>>56), + byte(s>>48), + byte(s>>40), + byte(s>>32), + byte(s>>24), + byte(s>>16), + byte(s>>8), + byte(s), + ) +} + +// Sum64 returns the current hash. +func (d *Digest) Sum64() uint64 { + var h uint64 + + if d.total >= 32 { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = d.v3 + prime5 + } + + h += d.total + + i, end := 0, d.n + for ; i+8 <= end; i += 8 { + k1 := round(0, u64(d.mem[i:i+8])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if i+4 <= end { + h ^= uint64(u32(d.mem[i:i+4])) * prime1 + h = rol23(h)*prime2 + prime3 + i += 4 + } + for i < end { + h ^= uint64(d.mem[i]) * prime5 + h = rol11(h) * prime1 + i++ + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +const ( + magic = "xxh\x06" + marshaledSize = len(magic) + 8*5 + 32 +) + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (d *Digest) MarshalBinary() ([]byte, error) { + b := make([]byte, 0, marshaledSize) + b = append(b, magic...) + b = appendUint64(b, d.v1) + b = appendUint64(b, d.v2) + b = appendUint64(b, d.v3) + b = appendUint64(b, d.v4) + b = appendUint64(b, d.total) + b = append(b, d.mem[:d.n]...) + b = b[:len(b)+len(d.mem)-d.n] + return b, nil +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +func (d *Digest) UnmarshalBinary(b []byte) error { + if len(b) < len(magic) || string(b[:len(magic)]) != magic { + return errors.New("xxhash: invalid hash state identifier") + } + if len(b) != marshaledSize { + return errors.New("xxhash: invalid hash state size") + } + b = b[len(magic):] + b, d.v1 = consumeUint64(b) + b, d.v2 = consumeUint64(b) + b, d.v3 = consumeUint64(b) + b, d.v4 = consumeUint64(b) + b, d.total = consumeUint64(b) + copy(d.mem[:], b) + b = b[len(d.mem):] + d.n = int(d.total % uint64(len(d.mem))) + return nil +} + +func appendUint64(b []byte, x uint64) []byte { + var a [8]byte + binary.LittleEndian.PutUint64(a[:], x) + return append(b, a[:]...) +} + +func consumeUint64(b []byte) ([]byte, uint64) { + x := u64(b) + return b[8:], x +} + +func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } +func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } + +func round(acc, input uint64) uint64 { + acc += input * prime2 + acc = rol31(acc) + acc *= prime1 + return acc +} + +func mergeRound(acc, val uint64) uint64 { + val = round(0, val) + acc ^= val + acc = acc*prime1 + prime4 + return acc +} + +func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } +func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } +func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } +func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } +func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } +func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } +func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } +func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go new file mode 100644 index 00000000000..35318d7c46c --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go @@ -0,0 +1,13 @@ +// +build !appengine +// +build gc +// +build !purego + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +// +//go:noescape +func Sum64(b []byte) uint64 + +//go:noescape +func writeBlocks(*Digest, []byte) int diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s new file mode 100644 index 00000000000..2c9c5357a14 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s @@ -0,0 +1,215 @@ +// +build !appengine +// +build gc +// +build !purego + +#include "textflag.h" + +// Register allocation: +// AX h +// CX pointer to advance through b +// DX n +// BX loop end +// R8 v1, k1 +// R9 v2 +// R10 v3 +// R11 v4 +// R12 tmp +// R13 prime1v +// R14 prime2v +// R15 prime4v + +// round reads from and advances the buffer pointer in CX. +// It assumes that R13 has prime1v and R14 has prime2v. +#define round(r) \ + MOVQ (CX), R12 \ + ADDQ $8, CX \ + IMULQ R14, R12 \ + ADDQ R12, r \ + ROLQ $31, r \ + IMULQ R13, r + +// mergeRound applies a merge round on the two registers acc and val. +// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v. +#define mergeRound(acc, val) \ + IMULQ R14, val \ + ROLQ $31, val \ + IMULQ R13, val \ + XORQ val, acc \ + IMULQ R13, acc \ + ADDQ R15, acc + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT, $0-32 + // Load fixed primes. + MOVQ ·prime1v(SB), R13 + MOVQ ·prime2v(SB), R14 + MOVQ ·prime4v(SB), R15 + + // Load slice. + MOVQ b_base+0(FP), CX + MOVQ b_len+8(FP), DX + LEAQ (CX)(DX*1), BX + + // The first loop limit will be len(b)-32. + SUBQ $32, BX + + // Check whether we have at least one block. + CMPQ DX, $32 + JLT noBlocks + + // Set up initial state (v1, v2, v3, v4). + MOVQ R13, R8 + ADDQ R14, R8 + MOVQ R14, R9 + XORQ R10, R10 + XORQ R11, R11 + SUBQ R13, R11 + + // Loop until CX > BX. +blockLoop: + round(R8) + round(R9) + round(R10) + round(R11) + + CMPQ CX, BX + JLE blockLoop + + MOVQ R8, AX + ROLQ $1, AX + MOVQ R9, R12 + ROLQ $7, R12 + ADDQ R12, AX + MOVQ R10, R12 + ROLQ $12, R12 + ADDQ R12, AX + MOVQ R11, R12 + ROLQ $18, R12 + ADDQ R12, AX + + mergeRound(AX, R8) + mergeRound(AX, R9) + mergeRound(AX, R10) + mergeRound(AX, R11) + + JMP afterBlocks + +noBlocks: + MOVQ ·prime5v(SB), AX + +afterBlocks: + ADDQ DX, AX + + // Right now BX has len(b)-32, and we want to loop until CX > len(b)-8. + ADDQ $24, BX + + CMPQ CX, BX + JG fourByte + +wordLoop: + // Calculate k1. + MOVQ (CX), R8 + ADDQ $8, CX + IMULQ R14, R8 + ROLQ $31, R8 + IMULQ R13, R8 + + XORQ R8, AX + ROLQ $27, AX + IMULQ R13, AX + ADDQ R15, AX + + CMPQ CX, BX + JLE wordLoop + +fourByte: + ADDQ $4, BX + CMPQ CX, BX + JG singles + + MOVL (CX), R8 + ADDQ $4, CX + IMULQ R13, R8 + XORQ R8, AX + + ROLQ $23, AX + IMULQ R14, AX + ADDQ ·prime3v(SB), AX + +singles: + ADDQ $4, BX + CMPQ CX, BX + JGE finalize + +singlesLoop: + MOVBQZX (CX), R12 + ADDQ $1, CX + IMULQ ·prime5v(SB), R12 + XORQ R12, AX + + ROLQ $11, AX + IMULQ R13, AX + + CMPQ CX, BX + JL singlesLoop + +finalize: + MOVQ AX, R12 + SHRQ $33, R12 + XORQ R12, AX + IMULQ R14, AX + MOVQ AX, R12 + SHRQ $29, R12 + XORQ R12, AX + IMULQ ·prime3v(SB), AX + MOVQ AX, R12 + SHRQ $32, R12 + XORQ R12, AX + + MOVQ AX, ret+24(FP) + RET + +// writeBlocks uses the same registers as above except that it uses AX to store +// the d pointer. + +// func writeBlocks(d *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT, $0-40 + // Load fixed primes needed for round. + MOVQ ·prime1v(SB), R13 + MOVQ ·prime2v(SB), R14 + + // Load slice. + MOVQ arg1_base+8(FP), CX + MOVQ arg1_len+16(FP), DX + LEAQ (CX)(DX*1), BX + SUBQ $32, BX + + // Load vN from d. + MOVQ arg+0(FP), AX + MOVQ 0(AX), R8 // v1 + MOVQ 8(AX), R9 // v2 + MOVQ 16(AX), R10 // v3 + MOVQ 24(AX), R11 // v4 + + // We don't need to check the loop condition here; this function is + // always called with at least one block of data to process. +blockLoop: + round(R8) + round(R9) + round(R10) + round(R11) + + CMPQ CX, BX + JLE blockLoop + + // Copy vN back to d. + MOVQ R8, 0(AX) + MOVQ R9, 8(AX) + MOVQ R10, 16(AX) + MOVQ R11, 24(AX) + + // The number of bytes written is CX minus the old base pointer. + SUBQ arg1_base+8(FP), CX + MOVQ CX, ret+32(FP) + + RET diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go new file mode 100644 index 00000000000..4a5a821603e --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go @@ -0,0 +1,76 @@ +// +build !amd64 appengine !gc purego + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +func Sum64(b []byte) uint64 { + // A simpler version would be + // d := New() + // d.Write(b) + // return d.Sum64() + // but this is faster, particularly for small inputs. + + n := len(b) + var h uint64 + + if n >= 32 { + v1 := prime1v + prime2 + v2 := prime2 + v3 := uint64(0) + v4 := -prime1v + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = prime5 + } + + h += uint64(n) + + i, end := 0, len(b) + for ; i+8 <= end; i += 8 { + k1 := round(0, u64(b[i:i+8:len(b)])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if i+4 <= end { + h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 + h = rol23(h)*prime2 + prime3 + i += 4 + } + for ; i < end; i++ { + h ^= uint64(b[i]) * prime5 + h = rol11(h) * prime1 + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +func writeBlocks(d *Digest, b []byte) int { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + n := len(b) + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4 + return n - len(b) +} diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go new file mode 100644 index 00000000000..6f3b0cb1026 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go @@ -0,0 +1,11 @@ +package xxhash + +// Sum64String computes the 64-bit xxHash digest of s. +func Sum64String(s string) uint64 { + return Sum64([]byte(s)) +} + +// WriteString adds more data to d. It always returns len(s), nil. +func (d *Digest) WriteString(s string) (n int, err error) { + return d.Write([]byte(s)) +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go new file mode 100644 index 00000000000..b5c8ef13329 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go @@ -0,0 +1,485 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "io" +) + +type seq struct { + litLen uint32 + matchLen uint32 + offset uint32 + + // Codes are stored here for the encoder + // so they only have to be looked up once. + llCode, mlCode, ofCode uint8 +} + +func (s seq) String() string { + if s.offset <= 3 { + if s.offset == 0 { + return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset: INVALID (0)") + } + return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset, " (repeat)") + } + return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset-3, " (new)") +} + +type seqCompMode uint8 + +const ( + compModePredefined seqCompMode = iota + compModeRLE + compModeFSE + compModeRepeat +) + +type sequenceDec struct { + // decoder keeps track of the current state and updates it from the bitstream. + fse *fseDecoder + state fseState + repeat bool +} + +// init the state of the decoder with input from stream. +func (s *sequenceDec) init(br *bitReader) error { + if s.fse == nil { + return errors.New("sequence decoder not defined") + } + s.state.init(br, s.fse.actualTableLog, s.fse.dt[:1<= 0; i-- { + if br.overread() { + printf("reading sequence %d, exceeded available data\n", seqs-i) + return io.ErrUnexpectedEOF + } + var ll, mo, ml int + if br.off > 4+((maxOffsetBits+16+16)>>3) { + // inlined function: + // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) + + // Final will not read from stream. + var llB, mlB, moB uint8 + ll, llB = llState.final() + ml, mlB = mlState.final() + mo, moB = ofState.final() + + // extra bits are stored in reverse order. + br.fillFast() + mo += br.getBits(moB) + if s.maxBits > 32 { + br.fillFast() + } + ml += br.getBits(mlB) + ll += br.getBits(llB) + + if moB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = mo + } else { + // mo = s.adjustOffset(mo, ll, moB) + // Inlined for rather big speedup + if ll == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + mo++ + } + + if mo == 0 { + mo = s.prevOffset[0] + } else { + var temp int + if mo == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[mo] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("temp was 0") + temp = 1 + } + + if mo != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + mo = temp + } + } + br.fillFast() + } else { + ll, mo, ml = s.next(br, llState, mlState, ofState) + br.fill() + } + + if debugSequences { + println("Seq", seqs-i-1, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) + } + + if ll > len(s.literals) { + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, len(s.literals)) + } + size := ll + ml + len(s.out) + if size-startSize > maxBlockSize { + return fmt.Errorf("output (%d) bigger than max block size", size) + } + if size > cap(s.out) { + // Not enough size, will be extremely rarely triggered, + // but could be if destination slice is too small for sync operations. + // We add maxBlockSize to the capacity. + s.out = append(s.out, make([]byte, maxBlockSize)...) + s.out = s.out[:len(s.out)-maxBlockSize] + } + if ml > maxMatchLen { + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + } + + // Add literals + s.out = append(s.out, s.literals[:ll]...) + s.literals = s.literals[ll:] + out := s.out + + if mo == 0 && ml > 0 { + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + } + + if mo > len(s.out)+len(hist) || mo > s.windowSize { + if len(s.dict) == 0 { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(s.out)+len(hist)) + } + + // we may be in dictionary. + dictO := len(s.dict) - (mo - (len(s.out) + len(hist))) + if dictO < 0 || dictO >= len(s.dict) { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(s.out)+len(hist)) + } + end := dictO + ml + if end > len(s.dict) { + out = append(out, s.dict[dictO:]...) + mo -= len(s.dict) - dictO + ml -= len(s.dict) - dictO + } else { + out = append(out, s.dict[dictO:end]...) + mo = 0 + ml = 0 + } + } + + // Copy from history. + // TODO: Blocks without history could be made to ignore this completely. + if v := mo - len(s.out); v > 0 { + // v is the start position in history from end. + start := len(s.hist) - v + if ml > v { + // Some goes into current block. + // Copy remainder of history + out = append(out, s.hist[start:]...) + mo -= v + ml -= v + } else { + out = append(out, s.hist[start:start+ml]...) + ml = 0 + } + } + // We must be in current buffer now + if ml > 0 { + start := len(s.out) - mo + if ml <= len(s.out)-start { + // No overlap + out = append(out, s.out[start:start+ml]...) + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + out = out[:len(out)+ml] + src := out[start : start+ml] + // Destination is the space we just added. + dst := out[len(out)-ml:] + dst = dst[:len(src)] + for i := range src { + dst[i] = src[i] + } + } + } + s.out = out + if i == 0 { + // This is the last sequence, so we shouldn't update state. + break + } + + // Manually inlined, ~ 5-20% faster + // Update all 3 states at once. Approx 20% faster. + nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() + if nBits == 0 { + llState = llTable[llState.newState()&maxTableMask] + mlState = mlTable[mlState.newState()&maxTableMask] + ofState = ofTable[ofState.newState()&maxTableMask] + } else { + bits := br.getBitsFast(nBits) + lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) + llState = llTable[(llState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits >> (ofState.nbBits() & 31)) + lowBits &= bitMask[mlState.nbBits()&15] + mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] + ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] + } + } + + // Add final literals + s.out = append(s.out, s.literals...) + return nil +} + +// update states, at least 27 bits must be available. +func (s *sequenceDecs) update(br *bitReader) { + // Max 8 bits + s.litLengths.state.next(br) + // Max 9 bits + s.matchLengths.state.next(br) + // Max 8 bits + s.offsets.state.next(br) +} + +var bitMask [16]uint16 + +func init() { + for i := range bitMask[:] { + bitMask[i] = uint16((1 << uint(i)) - 1) + } +} + +// update states, at least 27 bits must be available. +func (s *sequenceDecs) updateAlt(br *bitReader) { + // Update all 3 states at once. Approx 20% faster. + a, b, c := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state + + nBits := a.nbBits() + b.nbBits() + c.nbBits() + if nBits == 0 { + s.litLengths.state.state = s.litLengths.state.dt[a.newState()] + s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()] + s.offsets.state.state = s.offsets.state.dt[c.newState()] + return + } + bits := br.getBitsFast(nBits) + lowBits := uint16(bits >> ((c.nbBits() + b.nbBits()) & 31)) + s.litLengths.state.state = s.litLengths.state.dt[a.newState()+lowBits] + + lowBits = uint16(bits >> (c.nbBits() & 31)) + lowBits &= bitMask[b.nbBits()&15] + s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()+lowBits] + + lowBits = uint16(bits) & bitMask[c.nbBits()&15] + s.offsets.state.state = s.offsets.state.dt[c.newState()+lowBits] +} + +// nextFast will return new states when there are at least 4 unused bytes left on the stream when done. +func (s *sequenceDecs) nextFast(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) { + // Final will not read from stream. + ll, llB := llState.final() + ml, mlB := mlState.final() + mo, moB := ofState.final() + + // extra bits are stored in reverse order. + br.fillFast() + mo += br.getBits(moB) + if s.maxBits > 32 { + br.fillFast() + } + ml += br.getBits(mlB) + ll += br.getBits(llB) + + if moB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = mo + return + } + // mo = s.adjustOffset(mo, ll, moB) + // Inlined for rather big speedup + if ll == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + mo++ + } + + if mo == 0 { + mo = s.prevOffset[0] + return + } + var temp int + if mo == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[mo] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("temp was 0") + temp = 1 + } + + if mo != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + mo = temp + return +} + +func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) { + // Final will not read from stream. + ll, llB := llState.final() + ml, mlB := mlState.final() + mo, moB := ofState.final() + + // extra bits are stored in reverse order. + br.fill() + if s.maxBits <= 32 { + mo += br.getBits(moB) + ml += br.getBits(mlB) + ll += br.getBits(llB) + } else { + mo += br.getBits(moB) + br.fill() + // matchlength+literal length, max 32 bits + ml += br.getBits(mlB) + ll += br.getBits(llB) + + } + mo = s.adjustOffset(mo, ll, moB) + return +} + +func (s *sequenceDecs) adjustOffset(offset, litLen int, offsetB uint8) int { + if offsetB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = offset + return offset + } + + if litLen == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + offset++ + } + + if offset == 0 { + return s.prevOffset[0] + } + var temp int + if offset == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[offset] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("temp was 0") + temp = 1 + } + + if offset != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + return temp +} + +// mergeHistory will merge history. +func (s *sequenceDecs) mergeHistory(hist *sequenceDecs) (*sequenceDecs, error) { + for i := uint(0); i < 3; i++ { + var sNew, sHist *sequenceDec + switch i { + default: + // same as "case 0": + sNew = &s.litLengths + sHist = &hist.litLengths + case 1: + sNew = &s.offsets + sHist = &hist.offsets + case 2: + sNew = &s.matchLengths + sHist = &hist.matchLengths + } + if sNew.repeat { + if sHist.fse == nil { + return nil, fmt.Errorf("sequence stream %d, repeat requested, but no history", i) + } + continue + } + if sNew.fse == nil { + return nil, fmt.Errorf("sequence stream %d, no fse found", i) + } + if sHist.fse != nil && !sHist.fse.preDefined { + fseDecoderPool.Put(sHist.fse) + } + sHist.fse = sNew.fse + } + return hist, nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqenc.go b/vendor/github.com/klauspost/compress/zstd/seqenc.go new file mode 100644 index 00000000000..36bcc3cc02e --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqenc.go @@ -0,0 +1,115 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import "math/bits" + +type seqCoders struct { + llEnc, ofEnc, mlEnc *fseEncoder + llPrev, ofPrev, mlPrev *fseEncoder +} + +// swap coders with another (block). +func (s *seqCoders) swap(other *seqCoders) { + *s, *other = *other, *s +} + +// setPrev will update the previous encoders to the actually used ones +// and make sure a fresh one is in the main slot. +func (s *seqCoders) setPrev(ll, ml, of *fseEncoder) { + compareSwap := func(used *fseEncoder, current, prev **fseEncoder) { + // We used the new one, more current to history and reuse the previous history + if *current == used { + *prev, *current = *current, *prev + c := *current + p := *prev + c.reUsed = false + p.reUsed = true + return + } + if used == *prev { + return + } + // Ensure we cannot reuse by accident + prevEnc := *prev + prevEnc.symbolLen = 0 + return + } + compareSwap(ll, &s.llEnc, &s.llPrev) + compareSwap(ml, &s.mlEnc, &s.mlPrev) + compareSwap(of, &s.ofEnc, &s.ofPrev) +} + +func highBit(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} + +var llCodeTable = [64]byte{0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 16, 17, 17, 18, 18, 19, 19, + 20, 20, 20, 20, 21, 21, 21, 21, + 22, 22, 22, 22, 22, 22, 22, 22, + 23, 23, 23, 23, 23, 23, 23, 23, + 24, 24, 24, 24, 24, 24, 24, 24, + 24, 24, 24, 24, 24, 24, 24, 24} + +// Up to 6 bits +const maxLLCode = 35 + +// llBitsTable translates from ll code to number of bits. +var llBitsTable = [maxLLCode + 1]byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 2, 2, 3, 3, + 4, 6, 7, 8, 9, 10, 11, 12, + 13, 14, 15, 16} + +// llCode returns the code that represents the literal length requested. +func llCode(litLength uint32) uint8 { + const llDeltaCode = 19 + if litLength <= 63 { + // Compiler insists on bounds check (Go 1.12) + return llCodeTable[litLength&63] + } + return uint8(highBit(litLength)) + llDeltaCode +} + +var mlCodeTable = [128]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37, + 38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39, + 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, + 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42} + +// Up to 6 bits +const maxMLCode = 52 + +// mlBitsTable translates from ml code to number of bits. +var mlBitsTable = [maxMLCode + 1]byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 2, 2, 3, 3, + 4, 4, 5, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16} + +// note : mlBase = matchLength - MINMATCH; +// because it's the format it's stored in seqStore->sequences +func mlCode(mlBase uint32) uint8 { + const mlDeltaCode = 36 + if mlBase <= 127 { + // Compiler insists on bounds check (Go 1.12) + return mlCodeTable[mlBase&127] + } + return uint8(highBit(mlBase)) + mlDeltaCode +} + +func ofCode(offset uint32) uint8 { + // A valid offset will always be > 0. + return uint8(bits.Len32(offset) - 1) +} diff --git a/vendor/github.com/klauspost/compress/zstd/snappy.go b/vendor/github.com/klauspost/compress/zstd/snappy.go new file mode 100644 index 00000000000..841fd95acce --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/snappy.go @@ -0,0 +1,436 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "errors" + "hash/crc32" + "io" + + "github.com/klauspost/compress/huff0" + "github.com/klauspost/compress/snappy" +) + +const ( + snappyTagLiteral = 0x00 + snappyTagCopy1 = 0x01 + snappyTagCopy2 = 0x02 + snappyTagCopy4 = 0x03 +) + +const ( + snappyChecksumSize = 4 + snappyMagicBody = "sNaPpY" + + // snappyMaxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + snappyMaxBlockSize = 65536 + + // snappyMaxEncodedLenOfMaxBlockSize equals MaxEncodedLen(snappyMaxBlockSize), but is + // hard coded to be a const instead of a variable, so that obufLen can also + // be a const. Their equivalence is confirmed by + // TestMaxEncodedLenOfMaxBlockSize. + snappyMaxEncodedLenOfMaxBlockSize = 76490 +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var ( + // ErrSnappyCorrupt reports that the input is invalid. + ErrSnappyCorrupt = errors.New("snappy: corrupt input") + // ErrSnappyTooLarge reports that the uncompressed length is too large. + ErrSnappyTooLarge = errors.New("snappy: decoded block is too large") + // ErrSnappyUnsupported reports that the input isn't supported. + ErrSnappyUnsupported = errors.New("snappy: unsupported input") + + errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") +) + +// SnappyConverter can read SnappyConverter-compressed streams and convert them to zstd. +// Conversion is done by converting the stream directly from Snappy without intermediate +// full decoding. +// Therefore the compression ratio is much less than what can be done by a full decompression +// and compression, and a faulty Snappy stream may lead to a faulty Zstandard stream without +// any errors being generated. +// No CRC value is being generated and not all CRC values of the Snappy stream are checked. +// However, it provides really fast recompression of Snappy streams. +// The converter can be reused to avoid allocations, even after errors. +type SnappyConverter struct { + r io.Reader + err error + buf []byte + block *blockEnc +} + +// Convert the Snappy stream supplied in 'in' and write the zStandard stream to 'w'. +// If any error is detected on the Snappy stream it is returned. +// The number of bytes written is returned. +func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { + initPredefined() + r.err = nil + r.r = in + if r.block == nil { + r.block = &blockEnc{} + r.block.init() + } + r.block.initNewEncode() + if len(r.buf) != snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize { + r.buf = make([]byte, snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize) + } + r.block.litEnc.Reuse = huff0.ReusePolicyNone + var written int64 + var readHeader bool + { + var header []byte + var n int + header, r.err = frameHeader{WindowSize: snappyMaxBlockSize}.appendTo(r.buf[:0]) + + n, r.err = w.Write(header) + if r.err != nil { + return written, r.err + } + written += int64(n) + } + + for { + if !r.readFull(r.buf[:4], true) { + // Add empty last block + r.block.reset(nil) + r.block.last = true + err := r.block.encodeLits(r.block.literals, false) + if err != nil { + return written, err + } + n, err := w.Write(r.block.output) + if err != nil { + return written, err + } + written += int64(n) + + return written, r.err + } + chunkType := r.buf[0] + if !readHeader { + if chunkType != chunkTypeStreamIdentifier { + println("chunkType != chunkTypeStreamIdentifier", chunkType) + r.err = ErrSnappyCorrupt + return written, r.err + } + readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + if chunkLen > len(r.buf) { + println("chunkLen > len(r.buf)", chunkType) + r.err = ErrSnappyUnsupported + return written, r.err + } + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < snappyChecksumSize { + println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return written, r.err + } + //checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[snappyChecksumSize:] + + n, hdr, err := snappyDecodedLen(buf) + if err != nil { + r.err = err + return written, r.err + } + buf = buf[hdr:] + if n > snappyMaxBlockSize { + println("n > snappyMaxBlockSize", n, snappyMaxBlockSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + r.block.reset(nil) + r.block.pushOffsets() + if err := decodeSnappy(r.block, buf); err != nil { + r.err = err + return written, r.err + } + if r.block.size+r.block.extraLits != n { + printf("invalid size, want %d, got %d\n", n, r.block.size+r.block.extraLits) + r.err = ErrSnappyCorrupt + return written, r.err + } + err = r.block.encode(nil, false, false) + switch err { + case errIncompressible: + r.block.popOffsets() + r.block.reset(nil) + r.block.literals, err = snappy.Decode(r.block.literals[:n], r.buf[snappyChecksumSize:chunkLen]) + if err != nil { + println("snappy.Decode:", err) + return written, err + } + err = r.block.encodeLits(r.block.literals, false) + if err != nil { + return written, err + } + case nil: + default: + return written, err + } + + n, r.err = w.Write(r.block.output) + if r.err != nil { + return written, err + } + written += int64(n) + continue + case chunkTypeUncompressedData: + if debug { + println("Uncompressed, chunklen", chunkLen) + } + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < snappyChecksumSize { + println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + r.block.reset(nil) + buf := r.buf[:snappyChecksumSize] + if !r.readFull(buf, false) { + return written, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - snappyChecksumSize + if n > snappyMaxBlockSize { + println("n > snappyMaxBlockSize", n, snappyMaxBlockSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + r.block.literals = r.block.literals[:n] + if !r.readFull(r.block.literals, false) { + return written, r.err + } + if snappyCRC(r.block.literals) != checksum { + println("literals crc mismatch") + r.err = ErrSnappyCorrupt + return written, r.err + } + err := r.block.encodeLits(r.block.literals, false) + if err != nil { + return written, err + } + n, r.err = w.Write(r.block.output) + if r.err != nil { + return written, err + } + written += int64(n) + continue + + case chunkTypeStreamIdentifier: + if debug { + println("stream id", chunkLen, len(snappyMagicBody)) + } + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(snappyMagicBody) { + println("chunkLen != len(snappyMagicBody)", chunkLen, len(snappyMagicBody)) + r.err = ErrSnappyCorrupt + return written, r.err + } + if !r.readFull(r.buf[:len(snappyMagicBody)], false) { + return written, r.err + } + for i := 0; i < len(snappyMagicBody); i++ { + if r.buf[i] != snappyMagicBody[i] { + println("r.buf[i] != snappyMagicBody[i]", r.buf[i], snappyMagicBody[i], i) + r.err = ErrSnappyCorrupt + return written, r.err + } + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + println("chunkType <= 0x7f") + r.err = ErrSnappyUnsupported + return written, r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.readFull(r.buf[:chunkLen], false) { + return written, r.err + } + } +} + +// decodeSnappy writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read. +func decodeSnappy(blk *blockEnc, src []byte) error { + //decodeRef(make([]byte, snappyMaxBlockSize), src) + var s, length int + lits := blk.extraLits + var offset uint32 + for s < len(src) { + switch src[s] & 0x03 { + case snappyTagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + if x > snappyMaxBlockSize { + println("x > snappyMaxBlockSize", x, snappyMaxBlockSize) + return ErrSnappyCorrupt + } + length = int(x) + 1 + if length <= 0 { + println("length <= 0 ", length) + + return errUnsupportedLiteralLength + } + //if length > snappyMaxBlockSize-d || uint32(length) > len(src)-s { + // return ErrSnappyCorrupt + //} + + blk.literals = append(blk.literals, src[s:s+length]...) + //println(length, "litLen") + lits += length + s += length + continue + + case snappyTagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, len(src)) + return ErrSnappyCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]) + + case snappyTagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, len(src)) + return ErrSnappyCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = uint32(src[s-2]) | uint32(src[s-1])<<8 + + case snappyTagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, len(src)) + return ErrSnappyCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + + if offset <= 0 || blk.size+lits < int(offset) /*|| length > len(blk)-d */ { + println("offset <= 0 || blk.size+lits < int(offset)", offset, blk.size+lits, int(offset), blk.size, lits) + + return ErrSnappyCorrupt + } + + // Check if offset is one of the recent offsets. + // Adjusts the output offset accordingly. + // Gives a tiny bit of compression, typically around 1%. + if false { + offset = blk.matchOffset(offset, uint32(lits)) + } else { + offset += 3 + } + + blk.sequences = append(blk.sequences, seq{ + litLen: uint32(lits), + offset: offset, + matchLen: uint32(length) - zstdMinMatch, + }) + blk.size += length + lits + lits = 0 + } + blk.extraLits = lits + return nil +} + +func (r *SnappyConverter) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrSnappyCorrupt + } + return false + } + return true +} + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func snappyCRC(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return uint32(c>>15|c<<17) + 0xa282ead8 +} + +// snappyDecodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func snappyDecodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrSnappyCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrSnappyTooLarge + } + return int(v), n, nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go new file mode 100644 index 00000000000..0807719c8b9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -0,0 +1,144 @@ +// Package zstd provides decompression of zstandard files. +// +// For advanced usage and examples, go to the README: https://github.com/klauspost/compress/tree/master/zstd#zstd +package zstd + +import ( + "errors" + "log" + "math" + "math/bits" +) + +// enable debug printing +const debug = false + +// Enable extra assertions. +const debugAsserts = debug || false + +// print sequence details +const debugSequences = false + +// print detailed matching information +const debugMatches = false + +// force encoder to use predefined tables. +const forcePreDef = false + +// zstdMinMatch is the minimum zstd match length. +const zstdMinMatch = 3 + +// Reset the buffer offset when reaching this. +const bufferReset = math.MaxInt32 - MaxWindowSize + +var ( + // ErrReservedBlockType is returned when a reserved block type is found. + // Typically this indicates wrong or corrupted input. + ErrReservedBlockType = errors.New("invalid input: reserved block type encountered") + + // ErrCompressedSizeTooBig is returned when a block is bigger than allowed. + // Typically this indicates wrong or corrupted input. + ErrCompressedSizeTooBig = errors.New("invalid input: compressed size too big") + + // ErrBlockTooSmall is returned when a block is too small to be decoded. + // Typically returned on invalid input. + ErrBlockTooSmall = errors.New("block too small") + + // ErrMagicMismatch is returned when a "magic" number isn't what is expected. + // Typically this indicates wrong or corrupted input. + ErrMagicMismatch = errors.New("invalid input: magic number mismatch") + + // ErrWindowSizeExceeded is returned when a reference exceeds the valid window size. + // Typically this indicates wrong or corrupted input. + ErrWindowSizeExceeded = errors.New("window size exceeded") + + // ErrWindowSizeTooSmall is returned when no window size is specified. + // Typically this indicates wrong or corrupted input. + ErrWindowSizeTooSmall = errors.New("invalid input: window size was too small") + + // ErrDecoderSizeExceeded is returned if decompressed size exceeds the configured limit. + ErrDecoderSizeExceeded = errors.New("decompressed size exceeds configured limit") + + // ErrUnknownDictionary is returned if the dictionary ID is unknown. + // For the time being dictionaries are not supported. + ErrUnknownDictionary = errors.New("unknown dictionary") + + // ErrFrameSizeExceeded is returned if the stated frame size is exceeded. + // This is only returned if SingleSegment is specified on the frame. + ErrFrameSizeExceeded = errors.New("frame size exceeded") + + // ErrCRCMismatch is returned if CRC mismatches. + ErrCRCMismatch = errors.New("CRC check failed") + + // ErrDecoderClosed will be returned if the Decoder was used after + // Close has been called. + ErrDecoderClosed = errors.New("decoder used after Close") +) + +func println(a ...interface{}) { + if debug { + log.Println(a...) + } +} + +func printf(format string, a ...interface{}) { + if debug { + log.Printf(format, a...) + } +} + +// matchLenFast does matching, but will not match the last up to 7 bytes. +func matchLenFast(a, b []byte) int { + endI := len(a) & (math.MaxInt32 - 7) + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + return i + bits.TrailingZeros64(diff)>>3 + } + } + return endI +} + +// matchLen returns the maximum length. +// a must be the shortest of the two. +// The function also returns whether all bytes matched. +func matchLen(a, b []byte) int { + b = b[:len(a)] + for i := 0; i < len(a)-7; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + return i + (bits.TrailingZeros64(diff) >> 3) + } + } + + checked := (len(a) >> 3) << 3 + a = a[checked:] + b = b[checked:] + for i := range a { + if a[i] != b[i] { + return i + checked + } + } + return len(a) + checked +} + +func load3232(b []byte, i int32) uint32 { + // Help the compiler eliminate bounds checks on the read so it can be done in a single read. + b = b[i:] + b = b[:4] + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load6432(b []byte, i int32) uint64 { + // Help the compiler eliminate bounds checks on the read so it can be done in a single read. + b = b[i:] + b = b[:8] + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +func load64(b []byte, i int) uint64 { + // Help the compiler eliminate bounds checks on the read so it can be done in a single read. + b = b[i:] + b = b[:8] + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} diff --git a/vendor/github.com/leodido/go-urn/.travis.yml b/vendor/github.com/leodido/go-urn/.travis.yml index e56cf7cc066..21f348d65a3 100644 --- a/vendor/github.com/leodido/go-urn/.travis.yml +++ b/vendor/github.com/leodido/go-urn/.travis.yml @@ -1,11 +1,9 @@ language: go go: - - 1.9.x - - 1.10.x - - 1.11.x - - 1.12.x - 1.13.x + - 1.14.x + - 1.15.x - tip before_install: diff --git a/vendor/github.com/leodido/go-urn/go.mod b/vendor/github.com/leodido/go-urn/go.mod index 65bc1caf290..98cf196df30 100644 --- a/vendor/github.com/leodido/go-urn/go.mod +++ b/vendor/github.com/leodido/go-urn/go.mod @@ -2,4 +2,4 @@ module github.com/leodido/go-urn go 1.13 -require github.com/stretchr/testify v1.4.0 +require github.com/stretchr/testify v1.6.1 diff --git a/vendor/github.com/leodido/go-urn/go.sum b/vendor/github.com/leodido/go-urn/go.sum index 8fdee5854f1..afe7890c9a1 100644 --- a/vendor/github.com/leodido/go-urn/go.sum +++ b/vendor/github.com/leodido/go-urn/go.sum @@ -3,9 +3,9 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vendor/github.com/leodido/go-urn/urn.go b/vendor/github.com/leodido/go-urn/urn.go index b903b7b3cd5..d51a6c915be 100644 --- a/vendor/github.com/leodido/go-urn/urn.go +++ b/vendor/github.com/leodido/go-urn/urn.go @@ -1,9 +1,13 @@ package urn import ( + "encoding/json" + "fmt" "strings" ) +const errInvalidURN = "invalid URN: %s" + // URN represents an Uniform Resource Name. // // The general form represented is: @@ -61,3 +65,22 @@ func Parse(u []byte) (*URN, bool) { return urn, true } + +// MarshalJSON marshals the URN to JSON string form (e.g. `"urn:oid:1.2.3.4"`). +func (u URN) MarshalJSON() ([]byte, error) { + return json.Marshal(u.String()) +} + +// MarshalJSON unmarshals a URN from JSON string form (e.g. `"urn:oid:1.2.3.4"`). +func (u *URN) UnmarshalJSON(bytes []byte) error { + var str string + if err := json.Unmarshal(bytes, &str); err != nil { + return err + } + if value, ok := Parse([]byte(str)); !ok { + return fmt.Errorf(errInvalidURN, str) + } else { + *u = *value + } + return nil +} \ No newline at end of file diff --git a/vendor/github.com/minsikl/netscaler-nitro-go/client/client.go b/vendor/github.com/minsikl/netscaler-nitro-go/client/client.go new file mode 100644 index 00000000000..37fe211ebe8 --- /dev/null +++ b/vendor/github.com/minsikl/netscaler-nitro-go/client/client.go @@ -0,0 +1,242 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "github.com/minsikl/netscaler-nitro-go/datatypes" + "io/ioutil" + "log" + "net/http" + "reflect" + "strings" +) + +// Nitro Client +type NitroClient struct { + Protocol string + IpAddress string + Mode string + User string + Password string + Debug bool +} + +func (n *NitroClient) Add(req interface{}, options ...string) error { + resource, err := getResourceStringByObject(req) + if err != nil { + return err + } + reqJson, err := json.Marshal(req) + if err != nil { + return err + } + requestQuery := resource + getOptions(options) + responseBody, _, err := HTTPRequest(n, requestQuery, "POST", reqJson) + if err != nil { + return fmt.Errorf("Error in POST 's'", err.Error()) + } + if len(responseBody) > 0 { + res := datatypes.BaseRes{} + err = json.Unmarshal(responseBody, &res) + if err != nil { + return fmt.Errorf("Error in Unmarshal '%s'", err.Error()) + } + if *res.Severity == "ERROR" { + return fmt.Errorf("Error in POST : Errorcode '%d' Message '%s' Severity '%s'\r\n", *res.Errorcode, *res.Message, *res.Severity) + } + } + return nil +} + +func (n *NitroClient) Update(req interface{}, options ...string) error { + resource, err := getResourceStringByObject(req) + if err != nil { + return err + } + reqJson, err := json.Marshal(req) + if err != nil { + return err + } + requestQuery := resource + getOptions(options) + responseBody, _, err := HTTPRequest(n, requestQuery, "PUT", reqJson) + if err != nil { + return fmt.Errorf("Error in PUT 's'", err.Error()) + } + if len(responseBody) > 0 { + res := datatypes.BaseRes{} + err = json.Unmarshal(responseBody, &res) + if err != nil { + return fmt.Errorf("Error in Unmarshal '%s'", err.Error()) + } + if *res.Severity == "ERROR" { + return fmt.Errorf("Error in POST : Errorcode '%d' Message '%s' Severity '%s'\r\n", *res.Errorcode, *res.Message, *res.Severity) + } + } + return nil +} + +func (n *NitroClient) Get(res interface{}, resourceName string, options ...string) error { + resource, err := getResourceStringByObject(res) + if err != nil { + return err + } + + requestQuery := resource + "/" + resourceName + getOptions(options) + responseBody, _, err := HTTPRequest(n, requestQuery, "GET", nil) + if err != nil { + return err + } + + err = json.Unmarshal(responseBody, res) + if err != nil { + return fmt.Errorf("Error in Unmarshal '%s'", err.Error()) + } + resMessage := datatypes.BaseRes{} + err = json.Unmarshal(responseBody, &resMessage) + if err != nil { + return fmt.Errorf("Error in Unmarshal '%s'", err.Error()) + } + if *resMessage.Severity == "ERROR" { + return fmt.Errorf("Error in POST : Errorcode '%d' Message '%s' Severity '%s'\r\n", *resMessage.Errorcode, *resMessage.Message, *resMessage.Severity) + } + + return nil +} + +func (n *NitroClient) Delete(req interface{}, resourceName string, options ...string) error { + resource, err := getResourceStringByObject(req) + if err != nil { + return err + } + + requestQuery := resource + "/" + resourceName + getOptions(options) + responseBody, _, err := HTTPRequest(n, requestQuery, "DELETE", nil) + if err != nil { + return err + } + resMessage := datatypes.BaseRes{} + err = json.Unmarshal(responseBody, &resMessage) + if *resMessage.Severity == "ERROR" { + return fmt.Errorf("Error in POST : Errorcode '%d' Message '%s' Severity '%s'\r\n", *resMessage.Errorcode, *resMessage.Message, *resMessage.Severity) + } + + return nil +} + +func (n *NitroClient) Enable(req interface{}, enable bool) error { + resource, err := getResourceStringByObject(req) + if err != nil { + return err + } + reqJson, err := json.Marshal(req) + log.Printf(string(reqJson)) + if err != nil { + return err + } + action := "/?action=enable" + if enable == false { + action = "/?action=disable" + } + query := resource+action + log.Println("QUERY : " + query) + responseBody, _, err := HTTPRequest(n, query, "POST", reqJson) + if err != nil { + return fmt.Errorf("Error in POST '%s' for Enable", err.Error()) + } + if len(responseBody) > 0 { + res := datatypes.BaseRes{} + err = json.Unmarshal(responseBody, &res) + if err != nil { + return fmt.Errorf("Error in Unmarshal '%s'", err.Error()) + } + if *res.Severity == "ERROR" { + return fmt.Errorf("Error in POST : Errorcode '%d' Message '%s' Severity '%s'\r\n", *res.Errorcode, *res.Message, *res.Severity) + } + } + return nil +} + +func NewNitroClient(protocol string, ipAddress string, mode string, user string, password string, debug bool) *NitroClient { + nClient := NitroClient{ + Protocol: protocol, + IpAddress: ipAddress, + Mode: mode, + User: user, + Password: password, + Debug: debug, + } + return &nClient +} + +func HTTPRequest(nClient *NitroClient, requestQuery string, requestType string, requestBody []byte) ([]byte, int, error) { + + // Create a request + Url := nClient.Protocol + "://" + nClient.IpAddress + "/nitro/v1/" + nClient.Mode + "/" + requestQuery + requestBodyBuffer := bytes.NewBuffer(requestBody) + req, err := http.NewRequest(requestType, Url, requestBodyBuffer) + if err != nil { + return nil, 0, err + } + + req.Header.Set("Accept", "application/json") + req.Header.Set("Content-Type", "application/json") + req.Header.Set("X-NITRO-USER", nClient.User) + req.Header.Set("X-NITRO-PASS", nClient.Password) + + if nClient.Debug { + log.Println("[DEBUG] Nitro Request Path: ", requestType, req.URL) + log.Println("[DEBUG] Nitro Request Parameters: ", requestBodyBuffer.String()) + } + + // Execute http request + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, 0, err + } + + defer resp.Body.Close() + + responseBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, resp.StatusCode, err + } + + if nClient.Debug { + log.Println("[DEBUG] Nitro Response: ", string(responseBody)) + } + return responseBody, resp.StatusCode, nil +} + +func getResourceStringByObject(obj interface{}) (string, error) { + resourceType := reflect.TypeOf(obj).Elem().Name() + if len(resourceType) < 4 || (!strings.Contains(resourceType, "Req") && !strings.Contains(resourceType, "Res")) { + return "", fmt.Errorf("Unable to get resource name from '%s'", resourceType) + } + resourceName := resourceType[:len(resourceType)-3] + resourceBytes := make([]byte, 0) + for index, character := range []byte(resourceName) { + if index > 0 && character < 97 { + resourceBytes = append(resourceBytes, []byte("_"+string(character+32))...) + } else if character < 97 { + resourceBytes = append(resourceBytes, character + 32) + } else { + resourceBytes = append(resourceBytes, character) + } + } + return string(resourceBytes), nil +} + +func getOptions(options []string) string { + res := "" + if len(options) > 0 { + for index, option := range options { + if index == 0 { + res = "?" + option + } else { + res = res + "&" + option + } + } + } + return res +} \ No newline at end of file diff --git a/vendor/github.com/minsikl/netscaler-nitro-go/datatypes/datatypes.go b/vendor/github.com/minsikl/netscaler-nitro-go/datatypes/datatypes.go new file mode 100644 index 00000000000..e56b0285f86 --- /dev/null +++ b/vendor/github.com/minsikl/netscaler-nitro-go/datatypes/datatypes.go @@ -0,0 +1,206 @@ +package datatypes + +const ( + CONFIG = "config" + STAT = "stat" + +) + +// Base +type BaseRes struct { + Errorcode *int `json:"errorcode,omitempty"` + Message *string `json:"message,omitempty"` + Severity *string `json:"severity,omitempty"` +} + +// service +type Service struct { + Name *string `json:"name,omitempty"` + Ip *string `json:"ip,omitempty"` + Ipaddress *string `json:"ipaddress,omitempty"` + ServiceType *string `json:"servicetype,omitempty"` + Port *int `json:"port,omitempty"` + Weight *int `json:"weight,omitempty"` + Maxclient *string `json:"maxclient,omitempty"` + Usip *string `json:"usip,omitempty"` +} + +type ServiceReq struct { + Service *Service `json:"service,omitempty"` +} + +type ServiceRes struct { + BaseRes + Service []Service `json:"service,omitempty"` +} + +// lbvserver +type Lbvserver struct { + Name *string `json:"name,omitempty"` + ServiceType *string `json:"servicetype,omitempty"` + Port *int `json:"port,omitempty"` + Lbmethod *string `json:"lbmethod,omitempty"` + Ipv46 *string `json:"ipv46,omitempty"` + Persistencetype *string `json:"persistencetype,omitempty"` +} + +type LbvserverReq struct { + Lbvserver *Lbvserver `json:"lbvserver,omitempty"` +} + +type LbvserverRes struct { + BaseRes + Lbvserver []Lbvserver `json:"lbvserver,omitempty"` +} + +//lbvserver_service_binding +type LbvserverServiceBinding struct { + Name *string `json:"name,omitempty"` + ServiceName *string `json:"serviceName,omitempty"` +} + +type LbvserverServiceBindingReq struct { + LbvserverServiceBinding *LbvserverServiceBinding `json:"lbvserver_service_binding,omitempty"` +} + +type LbvserverServiceBindingRes struct { + BaseRes + LbvserverServiceBinding []LbvserverServiceBinding `json:"lbvserver_service_binding,omitempty"` +} + +// systemfile +type Systemfile struct { + Filename *string `json:"filename,omitempty"` + Filelocation *string `json:"filelocation,omitempty"` + Filecontent *string `json:"filecontent,omitempty"` + Fileencoding *string `json:"fileencoding,omitempty"` +} + +type SystemfileReq struct { + Systemfile *Systemfile `json:"systemfile,omitempty"` +} + +type SystemfileRes struct { + BaseRes + Systemfile []Systemfile `json:"systemfile,omitempty"` +} + +// nsfeature +type Nsfeature struct { + Feature []string `json:"feature"` +} + +type NsfeatureReq struct { + Nsfeature *Nsfeature `json:"nsfeature,omitempty"` +} + +type NsfeatureRes struct { + BaseRes + Nsfeature []Nsfeature `json:"nsfeature,omitempty"` +} + +// sslcertkey +type Sslcertkey struct { + Certkey *string `json:"certkey,omitempty"` + Cert *string `json:"cert,omitempty"` + Key *string `json:"key,omitempty"` +} + +type SslcertkeyReq struct { + Sslcertkey *Sslcertkey `json:"sslcertkey,omitempty"` +} + +type SslcertkeyRes struct { + BaseRes + Sslcertkey []Sslcertkey `json:"sslcertkey,omitempty"` +} + +// sslvserver_sslcertkey_binding +type SslvserverSslcertkeyBinding struct { + Vservername *string `json:"vservername,omitempty"` + Certkeyname *string `json:"certkeyname,omitempty"` +} + +type SslvserverSslcertkeyBindingReq struct { + SslvserverSslcertkeyBinding *SslvserverSslcertkeyBinding `json:"sslvserver_sslcertkey_binding,omitempty"` +} + +type SslvserverSslcertkeyBindingRes struct { + BaseRes + SslvserverSslcertkeyBinding []SslvserverSslcertkeyBinding `json:"sslvserver_sslcertkey_binding,omitempty"` +} + +// systemuser +type Systemuser struct { + Username *string `json:"username,omitempty"` + Password *string `json:"password,omitempty"` +} + +type SystemuserReq struct { + Systemuser *Systemuser `json:"systemuser,omitempty"` +} + +type SystemuserRes struct { + BaseRes + Systemuser []Systemuser `json:"systemuser,omitempty"` +} + +// hanode +type Hanode struct { + Id *string `json:"id,omitempty"` + Ipaddress *string `json:"ipaddress,omitempty"` + Hastatus *string `json:"hastatus,omitempty"` +} + +type HanodeReq struct { + Hanode *Hanode `json:"hanode,omitempty"` +} + +type HanodeRes struct { + BaseRes + Hanode []Hanode `json:"hanode,omitempty"` +} + +// nsrpcnode +type Nsrpcnode struct { + Ipaddress *string `json:"ipaddress,omitempty"` + Password *string `json:"password,omitempty"` +} + +type NsrpcnodeReq struct { + Nsrpcnode *Nsrpcnode `json:"nsrpcnode,omitempty"` +} + +type NsrpcnodeRes struct { + BaseRes + Nsrpcnode []Nsrpcnode `json:"nsrpcnode,omitempty"` +} + +// hafiles +type Hafiles struct { + Mode []string `json:"mode,omitempty"` +} + +type HafilesReq struct { + Hafiles *Hafiles `json:"hafiles,omitempty"` +} + +type HafilesRes struct { + BaseRes + Hafiles []Hafiles `json:"hafiles,omitempty"` +} + +//service_lbmonitor_binding +type ServiceLbmonitorBinding struct { + Name *string `json:"name,omitempty"` + MonitorName *string `json:"monitor_name,omitempty"` +} + +type ServiceLbmonitorBindingReq struct { + ServiceLbmonitorBinding *ServiceLbmonitorBinding `json:"service_lbmonitor_binding,omitempty"` +} + +type ServiceLbmonitorBindingRes struct { + BaseRes + ServiceLbmonitorBinding []ServiceLbmonitorBinding `json:"service_lbmonitor_binding,omitempty"` +} diff --git a/vendor/github.com/minsikl/netscaler-nitro-go/op/helpers.go b/vendor/github.com/minsikl/netscaler-nitro-go/op/helpers.go new file mode 100644 index 00000000000..5183f7df647 --- /dev/null +++ b/vendor/github.com/minsikl/netscaler-nitro-go/op/helpers.go @@ -0,0 +1,18 @@ +package op + +// Convenience functions for returning pointers to values + +// Int returns a pointer to the int value provided +func Int(v int) *int { + return &v +} + +// String returns a pointer to the string value provided +func String(v string) *string { + return &v +} + +// Bool returns a pointer to the bool value provided +func Bool(v bool) *bool { + return &v +} diff --git a/vendor/github.com/nicksnyder/go-i18n/LICENSE b/vendor/github.com/nicksnyder/go-i18n/LICENSE new file mode 100644 index 00000000000..609cce7976c --- /dev/null +++ b/vendor/github.com/nicksnyder/go-i18n/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2014 Nick Snyder https://github.com/nicksnyder + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/nicksnyder/go-i18n/i18n/bundle/bundle.go b/vendor/github.com/nicksnyder/go-i18n/i18n/bundle/bundle.go new file mode 100644 index 00000000000..a2291362fef --- /dev/null +++ b/vendor/github.com/nicksnyder/go-i18n/i18n/bundle/bundle.go @@ -0,0 +1,444 @@ +// Package bundle manages translations for multiple languages. +package bundle + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "path/filepath" + "reflect" + "sync" + "unicode" + + "github.com/nicksnyder/go-i18n/i18n/language" + "github.com/nicksnyder/go-i18n/i18n/translation" + toml "github.com/pelletier/go-toml" + "gopkg.in/yaml.v2" +) + +// TranslateFunc is a copy of i18n.TranslateFunc to avoid a circular dependency. +type TranslateFunc func(translationID string, args ...interface{}) string + +// Bundle stores the translations for multiple languages. +type Bundle struct { + // The primary translations for a language tag and translation id. + translations map[string]map[string]translation.Translation + + // Translations that can be used when an exact language match is not possible. + fallbackTranslations map[string]map[string]translation.Translation + + sync.RWMutex +} + +// New returns an empty bundle. +func New() *Bundle { + return &Bundle{ + translations: make(map[string]map[string]translation.Translation), + fallbackTranslations: make(map[string]map[string]translation.Translation), + } +} + +// MustLoadTranslationFile is similar to LoadTranslationFile +// except it panics if an error happens. +func (b *Bundle) MustLoadTranslationFile(filename string) { + if err := b.LoadTranslationFile(filename); err != nil { + panic(err) + } +} + +// LoadTranslationFile loads the translations from filename into memory. +// +// The language that the translations are associated with is parsed from the filename (e.g. en-US.json). +// +// Generally you should load translation files once during your program's initialization. +func (b *Bundle) LoadTranslationFile(filename string) error { + buf, err := ioutil.ReadFile(filename) + if err != nil { + return err + } + return b.ParseTranslationFileBytes(filename, buf) +} + +// ParseTranslationFileBytes is similar to LoadTranslationFile except it parses the bytes in buf. +// +// It is useful for parsing translation files embedded with go-bindata. +func (b *Bundle) ParseTranslationFileBytes(filename string, buf []byte) error { + basename := filepath.Base(filename) + langs := language.Parse(basename) + switch l := len(langs); { + case l == 0: + return fmt.Errorf("no language found in %q", basename) + case l > 1: + return fmt.Errorf("multiple languages found in filename %q: %v; expected one", basename, langs) + } + translations, err := parseTranslations(filename, buf) + if err != nil { + return err + } + b.AddTranslation(langs[0], translations...) + return nil +} + +func parseTranslations(filename string, buf []byte) ([]translation.Translation, error) { + if len(buf) == 0 { + return []translation.Translation{}, nil + } + + ext := filepath.Ext(filename) + + // `github.com/pelletier/go-toml` lacks an Unmarshal function, + // so we should parse TOML separately. + if ext == ".toml" { + tree, err := toml.LoadReader(bytes.NewReader(buf)) + if err != nil { + return nil, err + } + + m := make(map[string]map[string]interface{}) + for k, v := range tree.ToMap() { + m[k] = v.(map[string]interface{}) + } + + return parseFlatFormat(m) + } + + // Then parse other formats. + if isStandardFormat(ext, buf) { + var standardFormat []map[string]interface{} + if err := unmarshal(ext, buf, &standardFormat); err != nil { + return nil, fmt.Errorf("failed to unmarshal %v: %v", filename, err) + } + return parseStandardFormat(standardFormat) + } else { + var flatFormat map[string]map[string]interface{} + if err := unmarshal(ext, buf, &flatFormat); err != nil { + return nil, fmt.Errorf("failed to unmarshal %v: %v", filename, err) + } + return parseFlatFormat(flatFormat) + } +} + +func isStandardFormat(ext string, buf []byte) bool { + buf = deleteLeadingComments(ext, buf) + firstRune := rune(buf[0]) + return (ext == ".json" && firstRune == '[') || (ext == ".yaml" && firstRune == '-') +} + +// deleteLeadingComments deletes leading newlines and comments in buf. +// It only works for ext == ".yaml". +func deleteLeadingComments(ext string, buf []byte) []byte { + if ext != ".yaml" { + return buf + } + + for { + buf = bytes.TrimLeftFunc(buf, unicode.IsSpace) + if buf[0] == '#' { + buf = deleteLine(buf) + } else { + break + } + } + + return buf +} + +func deleteLine(buf []byte) []byte { + index := bytes.IndexRune(buf, '\n') + if index == -1 { // If there is only one line without newline ... + return nil // ... delete it and return nothing. + } + if index == len(buf)-1 { // If there is only one line with newline ... + return nil // ... do the same as above. + } + return buf[index+1:] +} + +// unmarshal finds an appropriate unmarshal function for ext +// (extension of filename) and unmarshals buf to out. out must be a pointer. +func unmarshal(ext string, buf []byte, out interface{}) error { + switch ext { + case ".json": + return json.Unmarshal(buf, out) + case ".yaml": + return yaml.Unmarshal(buf, out) + } + + return fmt.Errorf("unsupported file extension %v", ext) +} + +func parseStandardFormat(data []map[string]interface{}) ([]translation.Translation, error) { + translations := make([]translation.Translation, 0, len(data)) + for i, translationData := range data { + t, err := translation.NewTranslation(translationData) + if err != nil { + return nil, fmt.Errorf("unable to parse translation #%d because %s\n%v", i, err, translationData) + } + translations = append(translations, t) + } + return translations, nil +} + +// parseFlatFormat just converts data from flat format to standard format +// and passes it to parseStandardFormat. +// +// Flat format logic: +// key of data must be a string and data[key] must be always map[string]interface{}, +// but if there is only "other" key in it then it is non-plural, else plural. +func parseFlatFormat(data map[string]map[string]interface{}) ([]translation.Translation, error) { + var standardFormatData []map[string]interface{} + for id, translationData := range data { + dataObject := make(map[string]interface{}) + dataObject["id"] = id + if len(translationData) == 1 { // non-plural form + _, otherExists := translationData["other"] + if otherExists { + dataObject["translation"] = translationData["other"] + } + } else { // plural form + dataObject["translation"] = translationData + } + + standardFormatData = append(standardFormatData, dataObject) + } + + return parseStandardFormat(standardFormatData) +} + +// AddTranslation adds translations for a language. +// +// It is useful if your translations are in a format not supported by LoadTranslationFile. +func (b *Bundle) AddTranslation(lang *language.Language, translations ...translation.Translation) { + b.Lock() + defer b.Unlock() + if b.translations[lang.Tag] == nil { + b.translations[lang.Tag] = make(map[string]translation.Translation, len(translations)) + } + currentTranslations := b.translations[lang.Tag] + for _, newTranslation := range translations { + if currentTranslation := currentTranslations[newTranslation.ID()]; currentTranslation != nil { + currentTranslations[newTranslation.ID()] = currentTranslation.Merge(newTranslation) + } else { + currentTranslations[newTranslation.ID()] = newTranslation + } + } + + // lang can provide translations for less specific language tags. + for _, tag := range lang.MatchingTags() { + b.fallbackTranslations[tag] = currentTranslations + } +} + +// Translations returns all translations in the bundle. +func (b *Bundle) Translations() map[string]map[string]translation.Translation { + t := make(map[string]map[string]translation.Translation) + b.RLock() + for tag, translations := range b.translations { + t[tag] = make(map[string]translation.Translation) + for id, translation := range translations { + t[tag][id] = translation + } + } + b.RUnlock() + return t +} + +// LanguageTags returns the tags of all languages that that have been added. +func (b *Bundle) LanguageTags() []string { + var tags []string + b.RLock() + for k := range b.translations { + tags = append(tags, k) + } + b.RUnlock() + return tags +} + +// LanguageTranslationIDs returns the ids of all translations that have been added for a given language. +func (b *Bundle) LanguageTranslationIDs(languageTag string) []string { + var ids []string + b.RLock() + for id := range b.translations[languageTag] { + ids = append(ids, id) + } + b.RUnlock() + return ids +} + +// MustTfunc is similar to Tfunc except it panics if an error happens. +func (b *Bundle) MustTfunc(pref string, prefs ...string) TranslateFunc { + tfunc, err := b.Tfunc(pref, prefs...) + if err != nil { + panic(err) + } + return tfunc +} + +// MustTfuncAndLanguage is similar to TfuncAndLanguage except it panics if an error happens. +func (b *Bundle) MustTfuncAndLanguage(pref string, prefs ...string) (TranslateFunc, *language.Language) { + tfunc, language, err := b.TfuncAndLanguage(pref, prefs...) + if err != nil { + panic(err) + } + return tfunc, language +} + +// Tfunc is similar to TfuncAndLanguage except is doesn't return the Language. +func (b *Bundle) Tfunc(pref string, prefs ...string) (TranslateFunc, error) { + tfunc, _, err := b.TfuncAndLanguage(pref, prefs...) + return tfunc, err +} + +// TfuncAndLanguage returns a TranslateFunc for the first Language that +// has a non-zero number of translations in the bundle. +// +// The returned Language matches the the first language preference that could be satisfied, +// but this may not strictly match the language of the translations used to satisfy that preference. +// +// For example, the user may request "zh". If there are no translations for "zh" but there are translations +// for "zh-cn", then the translations for "zh-cn" will be used but the returned Language will be "zh". +// +// It can parse languages from Accept-Language headers (RFC 2616), +// but it assumes weights are monotonically decreasing. +func (b *Bundle) TfuncAndLanguage(pref string, prefs ...string) (TranslateFunc, *language.Language, error) { + lang := b.supportedLanguage(pref, prefs...) + var err error + if lang == nil { + err = fmt.Errorf("no supported languages found %#v", append(prefs, pref)) + } + return func(translationID string, args ...interface{}) string { + return b.translate(lang, translationID, args...) + }, lang, err +} + +// supportedLanguage returns the first language which +// has a non-zero number of translations in the bundle. +func (b *Bundle) supportedLanguage(pref string, prefs ...string) *language.Language { + lang := b.translatedLanguage(pref) + if lang == nil { + for _, pref := range prefs { + lang = b.translatedLanguage(pref) + if lang != nil { + break + } + } + } + return lang +} + +func (b *Bundle) translatedLanguage(src string) *language.Language { + langs := language.Parse(src) + b.RLock() + defer b.RUnlock() + for _, lang := range langs { + if len(b.translations[lang.Tag]) > 0 || + len(b.fallbackTranslations[lang.Tag]) > 0 { + return lang + } + } + return nil +} + +func (b *Bundle) translate(lang *language.Language, translationID string, args ...interface{}) string { + if lang == nil { + return translationID + } + + translation := b.translation(lang, translationID) + if translation == nil { + return translationID + } + + var data interface{} + var count interface{} + if argc := len(args); argc > 0 { + if isNumber(args[0]) { + count = args[0] + if argc > 1 { + data = args[1] + } + } else { + data = args[0] + } + } + + if count != nil { + if data == nil { + data = map[string]interface{}{"Count": count} + } else { + dataMap := toMap(data) + dataMap["Count"] = count + data = dataMap + } + } else { + dataMap := toMap(data) + if c, ok := dataMap["Count"]; ok { + count = c + } + } + + p, _ := lang.Plural(count) + template := translation.Template(p) + if template == nil { + return translationID + } + + s := template.Execute(data) + if s == "" { + return translationID + } + return s +} + +func (b *Bundle) translation(lang *language.Language, translationID string) translation.Translation { + b.RLock() + defer b.RUnlock() + translations := b.translations[lang.Tag] + if translations == nil { + translations = b.fallbackTranslations[lang.Tag] + if translations == nil { + return nil + } + } + return translations[translationID] +} + +func isNumber(n interface{}) bool { + switch n.(type) { + case int, int8, int16, int32, int64, string: + return true + } + return false +} + +func toMap(input interface{}) map[string]interface{} { + if data, ok := input.(map[string]interface{}); ok { + return data + } + v := reflect.ValueOf(input) + switch v.Kind() { + case reflect.Ptr: + return toMap(v.Elem().Interface()) + case reflect.Struct: + return structToMap(v) + default: + return nil + } +} + +// Converts the top level of a struct to a map[string]interface{}. +// Code inspired by github.com/fatih/structs. +func structToMap(v reflect.Value) map[string]interface{} { + out := make(map[string]interface{}) + t := v.Type() + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + if field.PkgPath != "" { + // unexported field. skip. + continue + } + out[field.Name] = v.FieldByName(field.Name).Interface() + } + return out +} diff --git a/vendor/github.com/nicksnyder/go-i18n/i18n/i18n.go b/vendor/github.com/nicksnyder/go-i18n/i18n/i18n.go new file mode 100644 index 00000000000..c478ff6ea17 --- /dev/null +++ b/vendor/github.com/nicksnyder/go-i18n/i18n/i18n.go @@ -0,0 +1,158 @@ +// Package i18n supports string translations with variable substitution and CLDR pluralization. +// It is intended to be used in conjunction with the goi18n command, although that is not strictly required. +// +// Initialization +// +// Your Go program should load translations during its initialization. +// i18n.MustLoadTranslationFile("path/to/fr-FR.all.json") +// If your translations are in a file format not supported by (Must)?LoadTranslationFile, +// then you can use the AddTranslation function to manually add translations. +// +// Fetching a translation +// +// Use Tfunc or MustTfunc to fetch a TranslateFunc that will return the translated string for a specific language. +// func handleRequest(w http.ResponseWriter, r *http.Request) { +// cookieLang := r.Cookie("lang") +// acceptLang := r.Header.Get("Accept-Language") +// defaultLang = "en-US" // known valid language +// T, err := i18n.Tfunc(cookieLang, acceptLang, defaultLang) +// fmt.Println(T("Hello world")) +// } +// +// Usually it is a good idea to identify strings by a generic id rather than the English translation, +// but the rest of this documentation will continue to use the English translation for readability. +// T("Hello world") // ok +// T("programGreeting") // better! +// +// Variables +// +// TranslateFunc supports strings that have variables using the text/template syntax. +// T("Hello {{.Person}}", map[string]interface{}{ +// "Person": "Bob", +// }) +// +// Pluralization +// +// TranslateFunc supports the pluralization of strings using the CLDR pluralization rules defined here: +// http://www.unicode.org/cldr/charts/latest/supplemental/language_plural_rules.html +// T("You have {{.Count}} unread emails.", 2) +// T("I am {{.Count}} meters tall.", "1.7") +// +// Plural strings may also have variables. +// T("{{.Person}} has {{.Count}} unread emails", 2, map[string]interface{}{ +// "Person": "Bob", +// }) +// +// Sentences with multiple plural components can be supported with nesting. +// T("{{.Person}} has {{.Count}} unread emails in the past {{.Timeframe}}.", 3, map[string]interface{}{ +// "Person": "Bob", +// "Timeframe": T("{{.Count}} days", 2), +// }) +// +// Templates +// +// You can use the .Funcs() method of a text/template or html/template to register a TranslateFunc +// for usage inside of that template. +package i18n + +import ( + "github.com/nicksnyder/go-i18n/i18n/bundle" + "github.com/nicksnyder/go-i18n/i18n/language" + "github.com/nicksnyder/go-i18n/i18n/translation" +) + +// TranslateFunc returns the translation of the string identified by translationID. +// +// If there is no translation for translationID, then the translationID itself is returned. +// This makes it easy to identify missing translations in your app. +// +// If translationID is a non-plural form, then the first variadic argument may be a map[string]interface{} +// or struct that contains template data. +// +// If translationID is a plural form, the function accepts two parameter signatures +// 1. T(count int, data struct{}) +// The first variadic argument must be an integer type +// (int, int8, int16, int32, int64) or a float formatted as a string (e.g. "123.45"). +// The second variadic argument may be a map[string]interface{} or struct{} that contains template data. +// 2. T(data struct{}) +// data must be a struct{} or map[string]interface{} that contains a Count field and the template data, +// Count field must be an integer type (int, int8, int16, int32, int64) +// or a float formatted as a string (e.g. "123.45"). +type TranslateFunc func(translationID string, args ...interface{}) string + +// IdentityTfunc returns a TranslateFunc that always returns the translationID passed to it. +// +// It is a useful placeholder when parsing a text/template or html/template +// before the actual Tfunc is available. +func IdentityTfunc() TranslateFunc { + return func(translationID string, args ...interface{}) string { + return translationID + } +} + +var defaultBundle = bundle.New() + +// MustLoadTranslationFile is similar to LoadTranslationFile +// except it panics if an error happens. +func MustLoadTranslationFile(filename string) { + defaultBundle.MustLoadTranslationFile(filename) +} + +// LoadTranslationFile loads the translations from filename into memory. +// +// The language that the translations are associated with is parsed from the filename (e.g. en-US.json). +// +// Generally you should load translation files once during your program's initialization. +func LoadTranslationFile(filename string) error { + return defaultBundle.LoadTranslationFile(filename) +} + +// ParseTranslationFileBytes is similar to LoadTranslationFile except it parses the bytes in buf. +// +// It is useful for parsing translation files embedded with go-bindata. +func ParseTranslationFileBytes(filename string, buf []byte) error { + return defaultBundle.ParseTranslationFileBytes(filename, buf) +} + +// AddTranslation adds translations for a language. +// +// It is useful if your translations are in a format not supported by LoadTranslationFile. +func AddTranslation(lang *language.Language, translations ...translation.Translation) { + defaultBundle.AddTranslation(lang, translations...) +} + +// LanguageTags returns the tags of all languages that have been added. +func LanguageTags() []string { + return defaultBundle.LanguageTags() +} + +// LanguageTranslationIDs returns the ids of all translations that have been added for a given language. +func LanguageTranslationIDs(languageTag string) []string { + return defaultBundle.LanguageTranslationIDs(languageTag) +} + +// MustTfunc is similar to Tfunc except it panics if an error happens. +func MustTfunc(languageSource string, languageSources ...string) TranslateFunc { + return TranslateFunc(defaultBundle.MustTfunc(languageSource, languageSources...)) +} + +// Tfunc returns a TranslateFunc that will be bound to the first language which +// has a non-zero number of translations. +// +// It can parse languages from Accept-Language headers (RFC 2616). +func Tfunc(languageSource string, languageSources ...string) (TranslateFunc, error) { + tfunc, err := defaultBundle.Tfunc(languageSource, languageSources...) + return TranslateFunc(tfunc), err +} + +// MustTfuncAndLanguage is similar to TfuncAndLanguage except it panics if an error happens. +func MustTfuncAndLanguage(languageSource string, languageSources ...string) (TranslateFunc, *language.Language) { + tfunc, lang := defaultBundle.MustTfuncAndLanguage(languageSource, languageSources...) + return TranslateFunc(tfunc), lang +} + +// TfuncAndLanguage is similar to Tfunc except it also returns the language which TranslateFunc is bound to. +func TfuncAndLanguage(languageSource string, languageSources ...string) (TranslateFunc, *language.Language, error) { + tfunc, lang, err := defaultBundle.TfuncAndLanguage(languageSource, languageSources...) + return TranslateFunc(tfunc), lang, err +} diff --git a/vendor/github.com/nicksnyder/go-i18n/i18n/language/language.go b/vendor/github.com/nicksnyder/go-i18n/i18n/language/language.go new file mode 100644 index 00000000000..b045a275196 --- /dev/null +++ b/vendor/github.com/nicksnyder/go-i18n/i18n/language/language.go @@ -0,0 +1,99 @@ +// Package language defines languages that implement CLDR pluralization. +package language + +import ( + "fmt" + "strings" +) + +// Language is a written human language. +type Language struct { + // Tag uniquely identifies the language as defined by RFC 5646. + // + // Most language tags are a two character language code (ISO 639-1) + // optionally followed by a dash and a two character country code (ISO 3166-1). + // (e.g. en, pt-br) + Tag string + *PluralSpec +} + +func (l *Language) String() string { + return l.Tag +} + +// MatchingTags returns the set of language tags that map to this Language. +// e.g. "zh-hans-cn" yields {"zh", "zh-hans", "zh-hans-cn"} +// BUG: This should be computed once and stored as a field on Language for efficiency, +// but this would require changing how Languages are constructed. +func (l *Language) MatchingTags() []string { + parts := strings.Split(l.Tag, "-") + var prefix, matches []string + for _, part := range parts { + prefix = append(prefix, part) + match := strings.Join(prefix, "-") + matches = append(matches, match) + } + return matches +} + +// Parse returns a slice of supported languages found in src or nil if none are found. +// It can parse language tags and Accept-Language headers. +func Parse(src string) []*Language { + var langs []*Language + start := 0 + for end, chr := range src { + switch chr { + case ',', ';', '.': + tag := strings.TrimSpace(src[start:end]) + if spec := GetPluralSpec(tag); spec != nil { + langs = append(langs, &Language{NormalizeTag(tag), spec}) + } + start = end + 1 + } + } + if start > 0 { + tag := strings.TrimSpace(src[start:]) + if spec := GetPluralSpec(tag); spec != nil { + langs = append(langs, &Language{NormalizeTag(tag), spec}) + } + return dedupe(langs) + } + if spec := GetPluralSpec(src); spec != nil { + langs = append(langs, &Language{NormalizeTag(src), spec}) + } + return langs +} + +func dedupe(langs []*Language) []*Language { + found := make(map[string]struct{}, len(langs)) + deduped := make([]*Language, 0, len(langs)) + for _, lang := range langs { + if _, ok := found[lang.Tag]; !ok { + found[lang.Tag] = struct{}{} + deduped = append(deduped, lang) + } + } + return deduped +} + +// MustParse is similar to Parse except it panics instead of retuning a nil Language. +func MustParse(src string) []*Language { + langs := Parse(src) + if len(langs) == 0 { + panic(fmt.Errorf("unable to parse language from %q", src)) + } + return langs +} + +// Add adds support for a new language. +func Add(l *Language) { + tag := NormalizeTag(l.Tag) + pluralSpecs[tag] = l.PluralSpec +} + +// NormalizeTag returns a language tag with all lower-case characters +// and dashes "-" instead of underscores "_" +func NormalizeTag(tag string) string { + tag = strings.ToLower(tag) + return strings.Replace(tag, "_", "-", -1) +} diff --git a/vendor/github.com/nicksnyder/go-i18n/i18n/language/operands.go b/vendor/github.com/nicksnyder/go-i18n/i18n/language/operands.go new file mode 100644 index 00000000000..49ee7dc7c19 --- /dev/null +++ b/vendor/github.com/nicksnyder/go-i18n/i18n/language/operands.go @@ -0,0 +1,119 @@ +package language + +import ( + "fmt" + "strconv" + "strings" +) + +// http://unicode.org/reports/tr35/tr35-numbers.html#Operands +type Operands struct { + N float64 // absolute value of the source number (integer and decimals) + I int64 // integer digits of n + V int64 // number of visible fraction digits in n, with trailing zeros + W int64 // number of visible fraction digits in n, without trailing zeros + F int64 // visible fractional digits in n, with trailing zeros + T int64 // visible fractional digits in n, without trailing zeros +} + +// NmodEqualAny returns true if o represents an integer equal to any of the arguments. +func (o *Operands) NequalsAny(any ...int64) bool { + for _, i := range any { + if o.I == i && o.T == 0 { + return true + } + } + return false +} + +// NmodEqualAny returns true if o represents an integer equal to any of the arguments modulo mod. +func (o *Operands) NmodEqualsAny(mod int64, any ...int64) bool { + modI := o.I % mod + for _, i := range any { + if modI == i && o.T == 0 { + return true + } + } + return false +} + +// NmodInRange returns true if o represents an integer in the closed interval [from, to]. +func (o *Operands) NinRange(from, to int64) bool { + return o.T == 0 && from <= o.I && o.I <= to +} + +// NmodInRange returns true if o represents an integer in the closed interval [from, to] modulo mod. +func (o *Operands) NmodInRange(mod, from, to int64) bool { + modI := o.I % mod + return o.T == 0 && from <= modI && modI <= to +} + +func newOperands(v interface{}) (*Operands, error) { + switch v := v.(type) { + case int: + return newOperandsInt64(int64(v)), nil + case int8: + return newOperandsInt64(int64(v)), nil + case int16: + return newOperandsInt64(int64(v)), nil + case int32: + return newOperandsInt64(int64(v)), nil + case int64: + return newOperandsInt64(v), nil + case string: + return newOperandsString(v) + case float32, float64: + return nil, fmt.Errorf("floats should be formatted into a string") + default: + return nil, fmt.Errorf("invalid type %T; expected integer or string", v) + } +} + +func newOperandsInt64(i int64) *Operands { + if i < 0 { + i = -i + } + return &Operands{float64(i), i, 0, 0, 0, 0} +} + +func newOperandsString(s string) (*Operands, error) { + if s[0] == '-' { + s = s[1:] + } + n, err := strconv.ParseFloat(s, 64) + if err != nil { + return nil, err + } + ops := &Operands{N: n} + parts := strings.SplitN(s, ".", 2) + ops.I, err = strconv.ParseInt(parts[0], 10, 64) + if err != nil { + return nil, err + } + if len(parts) == 1 { + return ops, nil + } + fraction := parts[1] + ops.V = int64(len(fraction)) + for i := ops.V - 1; i >= 0; i-- { + if fraction[i] != '0' { + ops.W = i + 1 + break + } + } + if ops.V > 0 { + f, err := strconv.ParseInt(fraction, 10, 0) + if err != nil { + return nil, err + } + ops.F = f + } + if ops.W > 0 { + t, err := strconv.ParseInt(fraction[:ops.W], 10, 0) + if err != nil { + return nil, err + } + ops.T = t + } + return ops, nil +} diff --git a/vendor/github.com/nicksnyder/go-i18n/i18n/language/plural.go b/vendor/github.com/nicksnyder/go-i18n/i18n/language/plural.go new file mode 100644 index 00000000000..1f3ea5c69b3 --- /dev/null +++ b/vendor/github.com/nicksnyder/go-i18n/i18n/language/plural.go @@ -0,0 +1,40 @@ +package language + +import ( + "fmt" +) + +// Plural represents a language pluralization form as defined here: +// http://cldr.unicode.org/index/cldr-spec/plural-rules +type Plural string + +// All defined plural categories. +const ( + Invalid Plural = "invalid" + Zero = "zero" + One = "one" + Two = "two" + Few = "few" + Many = "many" + Other = "other" +) + +// NewPlural returns src as a Plural +// or Invalid and a non-nil error if src is not a valid Plural. +func NewPlural(src string) (Plural, error) { + switch src { + case "zero": + return Zero, nil + case "one": + return One, nil + case "two": + return Two, nil + case "few": + return Few, nil + case "many": + return Many, nil + case "other": + return Other, nil + } + return Invalid, fmt.Errorf("invalid plural category %s", src) +} diff --git a/vendor/github.com/nicksnyder/go-i18n/i18n/language/pluralspec.go b/vendor/github.com/nicksnyder/go-i18n/i18n/language/pluralspec.go new file mode 100644 index 00000000000..fc31e8807d9 --- /dev/null +++ b/vendor/github.com/nicksnyder/go-i18n/i18n/language/pluralspec.go @@ -0,0 +1,75 @@ +package language + +import "strings" + +// PluralSpec defines the CLDR plural rules for a language. +// http://www.unicode.org/cldr/charts/latest/supplemental/language_plural_rules.html +// http://unicode.org/reports/tr35/tr35-numbers.html#Operands +type PluralSpec struct { + Plurals map[Plural]struct{} + PluralFunc func(*Operands) Plural +} + +var pluralSpecs = make(map[string]*PluralSpec) + +func normalizePluralSpecID(id string) string { + id = strings.Replace(id, "_", "-", -1) + id = strings.ToLower(id) + return id +} + +// RegisterPluralSpec registers a new plural spec for the language ids. +func RegisterPluralSpec(ids []string, ps *PluralSpec) { + for _, id := range ids { + id = normalizePluralSpecID(id) + pluralSpecs[id] = ps + } +} + +// Plural returns the plural category for number as defined by +// the language's CLDR plural rules. +func (ps *PluralSpec) Plural(number interface{}) (Plural, error) { + ops, err := newOperands(number) + if err != nil { + return Invalid, err + } + return ps.PluralFunc(ops), nil +} + +// GetPluralSpec returns the PluralSpec that matches the longest prefix of tag. +// It returns nil if no PluralSpec matches tag. +func GetPluralSpec(tag string) *PluralSpec { + tag = NormalizeTag(tag) + subtag := tag + for { + if spec := pluralSpecs[subtag]; spec != nil { + return spec + } + end := strings.LastIndex(subtag, "-") + if end == -1 { + return nil + } + subtag = subtag[:end] + } +} + +func newPluralSet(plurals ...Plural) map[Plural]struct{} { + set := make(map[Plural]struct{}, len(plurals)) + for _, plural := range plurals { + set[plural] = struct{}{} + } + return set +} + +func intInRange(i, from, to int64) bool { + return from <= i && i <= to +} + +func intEqualsAny(i int64, any ...int64) bool { + for _, a := range any { + if i == a { + return true + } + } + return false +} diff --git a/vendor/github.com/nicksnyder/go-i18n/i18n/language/pluralspec_gen.go b/vendor/github.com/nicksnyder/go-i18n/i18n/language/pluralspec_gen.go new file mode 100644 index 00000000000..0268bb92c01 --- /dev/null +++ b/vendor/github.com/nicksnyder/go-i18n/i18n/language/pluralspec_gen.go @@ -0,0 +1,557 @@ +package language + +// This file is generated by i18n/language/codegen/generate.sh + +func init() { + + RegisterPluralSpec([]string{"bm", "bo", "dz", "id", "ig", "ii", "in", "ja", "jbo", "jv", "jw", "kde", "kea", "km", "ko", "lkt", "lo", "ms", "my", "nqo", "root", "sah", "ses", "sg", "th", "to", "vi", "wo", "yo", "yue", "zh"}, &PluralSpec{ + Plurals: newPluralSet(Other), + PluralFunc: func(ops *Operands) Plural { + return Other + }, + }) + RegisterPluralSpec([]string{"am", "as", "bn", "fa", "gu", "hi", "kn", "mr", "zu"}, &PluralSpec{ + Plurals: newPluralSet(One, Other), + PluralFunc: func(ops *Operands) Plural { + // i = 0 or n = 1 + if intEqualsAny(ops.I, 0) || + ops.NequalsAny(1) { + return One + } + return Other + }, + }) + RegisterPluralSpec([]string{"ff", "fr", "hy", "kab"}, &PluralSpec{ + Plurals: newPluralSet(One, Other), + PluralFunc: func(ops *Operands) Plural { + // i = 0,1 + if intEqualsAny(ops.I, 0, 1) { + return One + } + return Other + }, + }) + RegisterPluralSpec([]string{"pt"}, &PluralSpec{ + Plurals: newPluralSet(One, Other), + PluralFunc: func(ops *Operands) Plural { + // i = 0..1 + if intInRange(ops.I, 0, 1) { + return One + } + return Other + }, + }) + RegisterPluralSpec([]string{"ast", "ca", "de", "en", "et", "fi", "fy", "gl", "it", "ji", "nl", "sv", "sw", "ur", "yi"}, &PluralSpec{ + Plurals: newPluralSet(One, Other), + PluralFunc: func(ops *Operands) Plural { + // i = 1 and v = 0 + if intEqualsAny(ops.I, 1) && intEqualsAny(ops.V, 0) { + return One + } + return Other + }, + }) + RegisterPluralSpec([]string{"si"}, &PluralSpec{ + Plurals: newPluralSet(One, Other), + PluralFunc: func(ops *Operands) Plural { + // n = 0,1 or i = 0 and f = 1 + if ops.NequalsAny(0, 1) || + intEqualsAny(ops.I, 0) && intEqualsAny(ops.F, 1) { + return One + } + return Other + }, + }) + RegisterPluralSpec([]string{"ak", "bh", "guw", "ln", "mg", "nso", "pa", "ti", "wa"}, &PluralSpec{ + Plurals: newPluralSet(One, Other), + PluralFunc: func(ops *Operands) Plural { + // n = 0..1 + if ops.NinRange(0, 1) { + return One + } + return Other + }, + }) + RegisterPluralSpec([]string{"tzm"}, &PluralSpec{ + Plurals: newPluralSet(One, Other), + PluralFunc: func(ops *Operands) Plural { + // n = 0..1 or n = 11..99 + if ops.NinRange(0, 1) || + ops.NinRange(11, 99) { + return One + } + return Other + }, + }) + RegisterPluralSpec([]string{"af", "asa", "az", "bem", "bez", "bg", "brx", "ce", "cgg", "chr", "ckb", "dv", "ee", "el", "eo", "es", "eu", "fo", "fur", "gsw", "ha", "haw", "hu", "jgo", "jmc", "ka", "kaj", "kcg", "kk", "kkj", "kl", "ks", "ksb", "ku", "ky", "lb", "lg", "mas", "mgo", "ml", "mn", "nah", "nb", "nd", "ne", "nn", "nnh", "no", "nr", "ny", "nyn", "om", "or", "os", "pap", "ps", "rm", "rof", "rwk", "saq", "sdh", "seh", "sn", "so", "sq", "ss", "ssy", "st", "syr", "ta", "te", "teo", "tig", "tk", "tn", "tr", "ts", "ug", "uz", "ve", "vo", "vun", "wae", "xh", "xog"}, &PluralSpec{ + Plurals: newPluralSet(One, Other), + PluralFunc: func(ops *Operands) Plural { + // n = 1 + if ops.NequalsAny(1) { + return One + } + return Other + }, + }) + RegisterPluralSpec([]string{"da"}, &PluralSpec{ + Plurals: newPluralSet(One, Other), + PluralFunc: func(ops *Operands) Plural { + // n = 1 or t != 0 and i = 0,1 + if ops.NequalsAny(1) || + !intEqualsAny(ops.T, 0) && intEqualsAny(ops.I, 0, 1) { + return One + } + return Other + }, + }) + RegisterPluralSpec([]string{"is"}, &PluralSpec{ + Plurals: newPluralSet(One, Other), + PluralFunc: func(ops *Operands) Plural { + // t = 0 and i % 10 = 1 and i % 100 != 11 or t != 0 + if intEqualsAny(ops.T, 0) && intEqualsAny(ops.I%10, 1) && !intEqualsAny(ops.I%100, 11) || + !intEqualsAny(ops.T, 0) { + return One + } + return Other + }, + }) + RegisterPluralSpec([]string{"mk"}, &PluralSpec{ + Plurals: newPluralSet(One, Other), + PluralFunc: func(ops *Operands) Plural { + // v = 0 and i % 10 = 1 or f % 10 = 1 + if intEqualsAny(ops.V, 0) && intEqualsAny(ops.I%10, 1) || + intEqualsAny(ops.F%10, 1) { + return One + } + return Other + }, + }) + RegisterPluralSpec([]string{"fil", "tl"}, &PluralSpec{ + Plurals: newPluralSet(One, Other), + PluralFunc: func(ops *Operands) Plural { + // v = 0 and i = 1,2,3 or v = 0 and i % 10 != 4,6,9 or v != 0 and f % 10 != 4,6,9 + if intEqualsAny(ops.V, 0) && intEqualsAny(ops.I, 1, 2, 3) || + intEqualsAny(ops.V, 0) && !intEqualsAny(ops.I%10, 4, 6, 9) || + !intEqualsAny(ops.V, 0) && !intEqualsAny(ops.F%10, 4, 6, 9) { + return One + } + return Other + }, + }) + RegisterPluralSpec([]string{"lv", "prg"}, &PluralSpec{ + Plurals: newPluralSet(Zero, One, Other), + PluralFunc: func(ops *Operands) Plural { + // n % 10 = 0 or n % 100 = 11..19 or v = 2 and f % 100 = 11..19 + if ops.NmodEqualsAny(10, 0) || + ops.NmodInRange(100, 11, 19) || + intEqualsAny(ops.V, 2) && intInRange(ops.F%100, 11, 19) { + return Zero + } + // n % 10 = 1 and n % 100 != 11 or v = 2 and f % 10 = 1 and f % 100 != 11 or v != 2 and f % 10 = 1 + if ops.NmodEqualsAny(10, 1) && !ops.NmodEqualsAny(100, 11) || + intEqualsAny(ops.V, 2) && intEqualsAny(ops.F%10, 1) && !intEqualsAny(ops.F%100, 11) || + !intEqualsAny(ops.V, 2) && intEqualsAny(ops.F%10, 1) { + return One + } + return Other + }, + }) + RegisterPluralSpec([]string{"lag"}, &PluralSpec{ + Plurals: newPluralSet(Zero, One, Other), + PluralFunc: func(ops *Operands) Plural { + // n = 0 + if ops.NequalsAny(0) { + return Zero + } + // i = 0,1 and n != 0 + if intEqualsAny(ops.I, 0, 1) && !ops.NequalsAny(0) { + return One + } + return Other + }, + }) + RegisterPluralSpec([]string{"ksh"}, &PluralSpec{ + Plurals: newPluralSet(Zero, One, Other), + PluralFunc: func(ops *Operands) Plural { + // n = 0 + if ops.NequalsAny(0) { + return Zero + } + // n = 1 + if ops.NequalsAny(1) { + return One + } + return Other + }, + }) + RegisterPluralSpec([]string{"iu", "kw", "naq", "se", "sma", "smi", "smj", "smn", "sms"}, &PluralSpec{ + Plurals: newPluralSet(One, Two, Other), + PluralFunc: func(ops *Operands) Plural { + // n = 1 + if ops.NequalsAny(1) { + return One + } + // n = 2 + if ops.NequalsAny(2) { + return Two + } + return Other + }, + }) + RegisterPluralSpec([]string{"shi"}, &PluralSpec{ + Plurals: newPluralSet(One, Few, Other), + PluralFunc: func(ops *Operands) Plural { + // i = 0 or n = 1 + if intEqualsAny(ops.I, 0) || + ops.NequalsAny(1) { + return One + } + // n = 2..10 + if ops.NinRange(2, 10) { + return Few + } + return Other + }, + }) + RegisterPluralSpec([]string{"mo", "ro"}, &PluralSpec{ + Plurals: newPluralSet(One, Few, Other), + PluralFunc: func(ops *Operands) Plural { + // i = 1 and v = 0 + if intEqualsAny(ops.I, 1) && intEqualsAny(ops.V, 0) { + return One + } + // v != 0 or n = 0 or n != 1 and n % 100 = 1..19 + if !intEqualsAny(ops.V, 0) || + ops.NequalsAny(0) || + !ops.NequalsAny(1) && ops.NmodInRange(100, 1, 19) { + return Few + } + return Other + }, + }) + RegisterPluralSpec([]string{"bs", "hr", "sh", "sr"}, &PluralSpec{ + Plurals: newPluralSet(One, Few, Other), + PluralFunc: func(ops *Operands) Plural { + // v = 0 and i % 10 = 1 and i % 100 != 11 or f % 10 = 1 and f % 100 != 11 + if intEqualsAny(ops.V, 0) && intEqualsAny(ops.I%10, 1) && !intEqualsAny(ops.I%100, 11) || + intEqualsAny(ops.F%10, 1) && !intEqualsAny(ops.F%100, 11) { + return One + } + // v = 0 and i % 10 = 2..4 and i % 100 != 12..14 or f % 10 = 2..4 and f % 100 != 12..14 + if intEqualsAny(ops.V, 0) && intInRange(ops.I%10, 2, 4) && !intInRange(ops.I%100, 12, 14) || + intInRange(ops.F%10, 2, 4) && !intInRange(ops.F%100, 12, 14) { + return Few + } + return Other + }, + }) + RegisterPluralSpec([]string{"gd"}, &PluralSpec{ + Plurals: newPluralSet(One, Two, Few, Other), + PluralFunc: func(ops *Operands) Plural { + // n = 1,11 + if ops.NequalsAny(1, 11) { + return One + } + // n = 2,12 + if ops.NequalsAny(2, 12) { + return Two + } + // n = 3..10,13..19 + if ops.NinRange(3, 10) || ops.NinRange(13, 19) { + return Few + } + return Other + }, + }) + RegisterPluralSpec([]string{"sl"}, &PluralSpec{ + Plurals: newPluralSet(One, Two, Few, Other), + PluralFunc: func(ops *Operands) Plural { + // v = 0 and i % 100 = 1 + if intEqualsAny(ops.V, 0) && intEqualsAny(ops.I%100, 1) { + return One + } + // v = 0 and i % 100 = 2 + if intEqualsAny(ops.V, 0) && intEqualsAny(ops.I%100, 2) { + return Two + } + // v = 0 and i % 100 = 3..4 or v != 0 + if intEqualsAny(ops.V, 0) && intInRange(ops.I%100, 3, 4) || + !intEqualsAny(ops.V, 0) { + return Few + } + return Other + }, + }) + RegisterPluralSpec([]string{"dsb", "hsb"}, &PluralSpec{ + Plurals: newPluralSet(One, Two, Few, Other), + PluralFunc: func(ops *Operands) Plural { + // v = 0 and i % 100 = 1 or f % 100 = 1 + if intEqualsAny(ops.V, 0) && intEqualsAny(ops.I%100, 1) || + intEqualsAny(ops.F%100, 1) { + return One + } + // v = 0 and i % 100 = 2 or f % 100 = 2 + if intEqualsAny(ops.V, 0) && intEqualsAny(ops.I%100, 2) || + intEqualsAny(ops.F%100, 2) { + return Two + } + // v = 0 and i % 100 = 3..4 or f % 100 = 3..4 + if intEqualsAny(ops.V, 0) && intInRange(ops.I%100, 3, 4) || + intInRange(ops.F%100, 3, 4) { + return Few + } + return Other + }, + }) + RegisterPluralSpec([]string{"he", "iw"}, &PluralSpec{ + Plurals: newPluralSet(One, Two, Many, Other), + PluralFunc: func(ops *Operands) Plural { + // i = 1 and v = 0 + if intEqualsAny(ops.I, 1) && intEqualsAny(ops.V, 0) { + return One + } + // i = 2 and v = 0 + if intEqualsAny(ops.I, 2) && intEqualsAny(ops.V, 0) { + return Two + } + // v = 0 and n != 0..10 and n % 10 = 0 + if intEqualsAny(ops.V, 0) && !ops.NinRange(0, 10) && ops.NmodEqualsAny(10, 0) { + return Many + } + return Other + }, + }) + RegisterPluralSpec([]string{"cs", "sk"}, &PluralSpec{ + Plurals: newPluralSet(One, Few, Many, Other), + PluralFunc: func(ops *Operands) Plural { + // i = 1 and v = 0 + if intEqualsAny(ops.I, 1) && intEqualsAny(ops.V, 0) { + return One + } + // i = 2..4 and v = 0 + if intInRange(ops.I, 2, 4) && intEqualsAny(ops.V, 0) { + return Few + } + // v != 0 + if !intEqualsAny(ops.V, 0) { + return Many + } + return Other + }, + }) + RegisterPluralSpec([]string{"pl"}, &PluralSpec{ + Plurals: newPluralSet(One, Few, Many, Other), + PluralFunc: func(ops *Operands) Plural { + // i = 1 and v = 0 + if intEqualsAny(ops.I, 1) && intEqualsAny(ops.V, 0) { + return One + } + // v = 0 and i % 10 = 2..4 and i % 100 != 12..14 + if intEqualsAny(ops.V, 0) && intInRange(ops.I%10, 2, 4) && !intInRange(ops.I%100, 12, 14) { + return Few + } + // v = 0 and i != 1 and i % 10 = 0..1 or v = 0 and i % 10 = 5..9 or v = 0 and i % 100 = 12..14 + if intEqualsAny(ops.V, 0) && !intEqualsAny(ops.I, 1) && intInRange(ops.I%10, 0, 1) || + intEqualsAny(ops.V, 0) && intInRange(ops.I%10, 5, 9) || + intEqualsAny(ops.V, 0) && intInRange(ops.I%100, 12, 14) { + return Many + } + return Other + }, + }) + RegisterPluralSpec([]string{"be"}, &PluralSpec{ + Plurals: newPluralSet(One, Few, Many, Other), + PluralFunc: func(ops *Operands) Plural { + // n % 10 = 1 and n % 100 != 11 + if ops.NmodEqualsAny(10, 1) && !ops.NmodEqualsAny(100, 11) { + return One + } + // n % 10 = 2..4 and n % 100 != 12..14 + if ops.NmodInRange(10, 2, 4) && !ops.NmodInRange(100, 12, 14) { + return Few + } + // n % 10 = 0 or n % 10 = 5..9 or n % 100 = 11..14 + if ops.NmodEqualsAny(10, 0) || + ops.NmodInRange(10, 5, 9) || + ops.NmodInRange(100, 11, 14) { + return Many + } + return Other + }, + }) + RegisterPluralSpec([]string{"lt"}, &PluralSpec{ + Plurals: newPluralSet(One, Few, Many, Other), + PluralFunc: func(ops *Operands) Plural { + // n % 10 = 1 and n % 100 != 11..19 + if ops.NmodEqualsAny(10, 1) && !ops.NmodInRange(100, 11, 19) { + return One + } + // n % 10 = 2..9 and n % 100 != 11..19 + if ops.NmodInRange(10, 2, 9) && !ops.NmodInRange(100, 11, 19) { + return Few + } + // f != 0 + if !intEqualsAny(ops.F, 0) { + return Many + } + return Other + }, + }) + RegisterPluralSpec([]string{"mt"}, &PluralSpec{ + Plurals: newPluralSet(One, Few, Many, Other), + PluralFunc: func(ops *Operands) Plural { + // n = 1 + if ops.NequalsAny(1) { + return One + } + // n = 0 or n % 100 = 2..10 + if ops.NequalsAny(0) || + ops.NmodInRange(100, 2, 10) { + return Few + } + // n % 100 = 11..19 + if ops.NmodInRange(100, 11, 19) { + return Many + } + return Other + }, + }) + RegisterPluralSpec([]string{"ru", "uk"}, &PluralSpec{ + Plurals: newPluralSet(One, Few, Many, Other), + PluralFunc: func(ops *Operands) Plural { + // v = 0 and i % 10 = 1 and i % 100 != 11 + if intEqualsAny(ops.V, 0) && intEqualsAny(ops.I%10, 1) && !intEqualsAny(ops.I%100, 11) { + return One + } + // v = 0 and i % 10 = 2..4 and i % 100 != 12..14 + if intEqualsAny(ops.V, 0) && intInRange(ops.I%10, 2, 4) && !intInRange(ops.I%100, 12, 14) { + return Few + } + // v = 0 and i % 10 = 0 or v = 0 and i % 10 = 5..9 or v = 0 and i % 100 = 11..14 + if intEqualsAny(ops.V, 0) && intEqualsAny(ops.I%10, 0) || + intEqualsAny(ops.V, 0) && intInRange(ops.I%10, 5, 9) || + intEqualsAny(ops.V, 0) && intInRange(ops.I%100, 11, 14) { + return Many + } + return Other + }, + }) + RegisterPluralSpec([]string{"br"}, &PluralSpec{ + Plurals: newPluralSet(One, Two, Few, Many, Other), + PluralFunc: func(ops *Operands) Plural { + // n % 10 = 1 and n % 100 != 11,71,91 + if ops.NmodEqualsAny(10, 1) && !ops.NmodEqualsAny(100, 11, 71, 91) { + return One + } + // n % 10 = 2 and n % 100 != 12,72,92 + if ops.NmodEqualsAny(10, 2) && !ops.NmodEqualsAny(100, 12, 72, 92) { + return Two + } + // n % 10 = 3..4,9 and n % 100 != 10..19,70..79,90..99 + if (ops.NmodInRange(10, 3, 4) || ops.NmodEqualsAny(10, 9)) && !(ops.NmodInRange(100, 10, 19) || ops.NmodInRange(100, 70, 79) || ops.NmodInRange(100, 90, 99)) { + return Few + } + // n != 0 and n % 1000000 = 0 + if !ops.NequalsAny(0) && ops.NmodEqualsAny(1000000, 0) { + return Many + } + return Other + }, + }) + RegisterPluralSpec([]string{"ga"}, &PluralSpec{ + Plurals: newPluralSet(One, Two, Few, Many, Other), + PluralFunc: func(ops *Operands) Plural { + // n = 1 + if ops.NequalsAny(1) { + return One + } + // n = 2 + if ops.NequalsAny(2) { + return Two + } + // n = 3..6 + if ops.NinRange(3, 6) { + return Few + } + // n = 7..10 + if ops.NinRange(7, 10) { + return Many + } + return Other + }, + }) + RegisterPluralSpec([]string{"gv"}, &PluralSpec{ + Plurals: newPluralSet(One, Two, Few, Many, Other), + PluralFunc: func(ops *Operands) Plural { + // v = 0 and i % 10 = 1 + if intEqualsAny(ops.V, 0) && intEqualsAny(ops.I%10, 1) { + return One + } + // v = 0 and i % 10 = 2 + if intEqualsAny(ops.V, 0) && intEqualsAny(ops.I%10, 2) { + return Two + } + // v = 0 and i % 100 = 0,20,40,60,80 + if intEqualsAny(ops.V, 0) && intEqualsAny(ops.I%100, 0, 20, 40, 60, 80) { + return Few + } + // v != 0 + if !intEqualsAny(ops.V, 0) { + return Many + } + return Other + }, + }) + RegisterPluralSpec([]string{"ar", "ars"}, &PluralSpec{ + Plurals: newPluralSet(Zero, One, Two, Few, Many, Other), + PluralFunc: func(ops *Operands) Plural { + // n = 0 + if ops.NequalsAny(0) { + return Zero + } + // n = 1 + if ops.NequalsAny(1) { + return One + } + // n = 2 + if ops.NequalsAny(2) { + return Two + } + // n % 100 = 3..10 + if ops.NmodInRange(100, 3, 10) { + return Few + } + // n % 100 = 11..99 + if ops.NmodInRange(100, 11, 99) { + return Many + } + return Other + }, + }) + RegisterPluralSpec([]string{"cy"}, &PluralSpec{ + Plurals: newPluralSet(Zero, One, Two, Few, Many, Other), + PluralFunc: func(ops *Operands) Plural { + // n = 0 + if ops.NequalsAny(0) { + return Zero + } + // n = 1 + if ops.NequalsAny(1) { + return One + } + // n = 2 + if ops.NequalsAny(2) { + return Two + } + // n = 3 + if ops.NequalsAny(3) { + return Few + } + // n = 6 + if ops.NequalsAny(6) { + return Many + } + return Other + }, + }) +} diff --git a/vendor/github.com/nicksnyder/go-i18n/i18n/translation/plural_translation.go b/vendor/github.com/nicksnyder/go-i18n/i18n/translation/plural_translation.go new file mode 100644 index 00000000000..17c32609ceb --- /dev/null +++ b/vendor/github.com/nicksnyder/go-i18n/i18n/translation/plural_translation.go @@ -0,0 +1,82 @@ +package translation + +import ( + "github.com/nicksnyder/go-i18n/i18n/language" +) + +type pluralTranslation struct { + id string + templates map[language.Plural]*template +} + +func (pt *pluralTranslation) MarshalInterface() interface{} { + return map[string]interface{}{ + "id": pt.id, + "translation": pt.templates, + } +} + +func (pt *pluralTranslation) MarshalFlatInterface() interface{} { + return pt.templates +} + +func (pt *pluralTranslation) ID() string { + return pt.id +} + +func (pt *pluralTranslation) Template(pc language.Plural) *template { + return pt.templates[pc] +} + +func (pt *pluralTranslation) UntranslatedCopy() Translation { + return &pluralTranslation{pt.id, make(map[language.Plural]*template)} +} + +func (pt *pluralTranslation) Normalize(l *language.Language) Translation { + // Delete plural categories that don't belong to this language. + for pc := range pt.templates { + if _, ok := l.Plurals[pc]; !ok { + delete(pt.templates, pc) + } + } + // Create map entries for missing valid categories. + for pc := range l.Plurals { + if _, ok := pt.templates[pc]; !ok { + pt.templates[pc] = mustNewTemplate("") + } + } + return pt +} + +func (pt *pluralTranslation) Backfill(src Translation) Translation { + for pc, t := range pt.templates { + if (t == nil || t.src == "") && src != nil { + pt.templates[pc] = src.Template(language.Other) + } + } + return pt +} + +func (pt *pluralTranslation) Merge(t Translation) Translation { + other, ok := t.(*pluralTranslation) + if !ok || pt.ID() != t.ID() { + return t + } + for pluralCategory, template := range other.templates { + if template != nil && template.src != "" { + pt.templates[pluralCategory] = template + } + } + return pt +} + +func (pt *pluralTranslation) Incomplete(l *language.Language) bool { + for pc := range l.Plurals { + if t := pt.templates[pc]; t == nil || t.src == "" { + return true + } + } + return false +} + +var _ = Translation(&pluralTranslation{}) diff --git a/vendor/github.com/nicksnyder/go-i18n/i18n/translation/single_translation.go b/vendor/github.com/nicksnyder/go-i18n/i18n/translation/single_translation.go new file mode 100644 index 00000000000..a76c8c941ac --- /dev/null +++ b/vendor/github.com/nicksnyder/go-i18n/i18n/translation/single_translation.go @@ -0,0 +1,61 @@ +package translation + +import ( + "github.com/nicksnyder/go-i18n/i18n/language" +) + +type singleTranslation struct { + id string + template *template +} + +func (st *singleTranslation) MarshalInterface() interface{} { + return map[string]interface{}{ + "id": st.id, + "translation": st.template, + } +} + +func (st *singleTranslation) MarshalFlatInterface() interface{} { + return map[string]interface{}{"other": st.template} +} + +func (st *singleTranslation) ID() string { + return st.id +} + +func (st *singleTranslation) Template(pc language.Plural) *template { + return st.template +} + +func (st *singleTranslation) UntranslatedCopy() Translation { + return &singleTranslation{st.id, mustNewTemplate("")} +} + +func (st *singleTranslation) Normalize(language *language.Language) Translation { + return st +} + +func (st *singleTranslation) Backfill(src Translation) Translation { + if (st.template == nil || st.template.src == "") && src != nil { + st.template = src.Template(language.Other) + } + return st +} + +func (st *singleTranslation) Merge(t Translation) Translation { + other, ok := t.(*singleTranslation) + if !ok || st.ID() != t.ID() { + return t + } + if other.template != nil && other.template.src != "" { + st.template = other.template + } + return st +} + +func (st *singleTranslation) Incomplete(l *language.Language) bool { + return st.template == nil || st.template.src == "" +} + +var _ = Translation(&singleTranslation{}) diff --git a/vendor/github.com/nicksnyder/go-i18n/i18n/translation/template.go b/vendor/github.com/nicksnyder/go-i18n/i18n/translation/template.go new file mode 100644 index 00000000000..3310150c098 --- /dev/null +++ b/vendor/github.com/nicksnyder/go-i18n/i18n/translation/template.go @@ -0,0 +1,65 @@ +package translation + +import ( + "bytes" + "encoding" + "strings" + gotemplate "text/template" +) + +type template struct { + tmpl *gotemplate.Template + src string +} + +func newTemplate(src string) (*template, error) { + if src == "" { + return new(template), nil + } + + var tmpl template + err := tmpl.parseTemplate(src) + return &tmpl, err +} + +func mustNewTemplate(src string) *template { + t, err := newTemplate(src) + if err != nil { + panic(err) + } + return t +} + +func (t *template) String() string { + return t.src +} + +func (t *template) Execute(args interface{}) string { + if t.tmpl == nil { + return t.src + } + var buf bytes.Buffer + if err := t.tmpl.Execute(&buf, args); err != nil { + return err.Error() + } + return buf.String() +} + +func (t *template) MarshalText() ([]byte, error) { + return []byte(t.src), nil +} + +func (t *template) UnmarshalText(src []byte) error { + return t.parseTemplate(string(src)) +} + +func (t *template) parseTemplate(src string) (err error) { + t.src = src + if strings.Contains(src, "{{") { + t.tmpl, err = gotemplate.New(src).Parse(src) + } + return +} + +var _ = encoding.TextMarshaler(&template{}) +var _ = encoding.TextUnmarshaler(&template{}) diff --git a/vendor/github.com/nicksnyder/go-i18n/i18n/translation/translation.go b/vendor/github.com/nicksnyder/go-i18n/i18n/translation/translation.go new file mode 100644 index 00000000000..197514623f7 --- /dev/null +++ b/vendor/github.com/nicksnyder/go-i18n/i18n/translation/translation.go @@ -0,0 +1,84 @@ +// Package translation defines the interface for a translation. +package translation + +import ( + "fmt" + + "github.com/nicksnyder/go-i18n/i18n/language" +) + +// Translation is the interface that represents a translated string. +type Translation interface { + // MarshalInterface returns the object that should be used + // to serialize the translation. + MarshalInterface() interface{} + MarshalFlatInterface() interface{} + ID() string + Template(language.Plural) *template + UntranslatedCopy() Translation + Normalize(language *language.Language) Translation + Backfill(src Translation) Translation + Merge(Translation) Translation + Incomplete(l *language.Language) bool +} + +// SortableByID implements sort.Interface for a slice of translations. +type SortableByID []Translation + +func (a SortableByID) Len() int { return len(a) } +func (a SortableByID) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a SortableByID) Less(i, j int) bool { return a[i].ID() < a[j].ID() } + +// NewTranslation reflects on data to create a new Translation. +// +// data["id"] must be a string and data["translation"] must be either a string +// for a non-plural translation or a map[string]interface{} for a plural translation. +func NewTranslation(data map[string]interface{}) (Translation, error) { + id, ok := data["id"].(string) + if !ok { + return nil, fmt.Errorf(`missing "id" key`) + } + var pluralObject map[string]interface{} + switch translation := data["translation"].(type) { + case string: + tmpl, err := newTemplate(translation) + if err != nil { + return nil, err + } + return &singleTranslation{id, tmpl}, nil + case map[interface{}]interface{}: + // The YAML parser uses interface{} keys so we first convert them to string keys. + pluralObject = make(map[string]interface{}) + for k, v := range translation { + kStr, ok := k.(string) + if !ok { + return nil, fmt.Errorf(`invalid plural category type %T; expected string`, k) + } + pluralObject[kStr] = v + } + case map[string]interface{}: + pluralObject = translation + case nil: + return nil, fmt.Errorf(`missing "translation" key`) + default: + return nil, fmt.Errorf(`unsupported type for "translation" key %T`, translation) + } + + templates := make(map[language.Plural]*template, len(pluralObject)) + for k, v := range pluralObject { + pc, err := language.NewPlural(k) + if err != nil { + return nil, err + } + str, ok := v.(string) + if !ok { + return nil, fmt.Errorf(`plural category "%s" has value of type %T; expected string`, pc, v) + } + tmpl, err := newTemplate(str) + if err != nil { + return nil, err + } + templates[pc] = tmpl + } + return &pluralTranslation{id, templates}, nil +} diff --git a/vendor/github.com/openshift/cluster-api-provider-ibmcloud/LICENSE b/vendor/github.com/openshift/cluster-api-provider-ibmcloud/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/vendor/github.com/openshift/cluster-api-provider-ibmcloud/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/openshift/cluster-api-provider-ibmcloud/pkg/apis/ibmcloudprovider/v1beta1/doc.go b/vendor/github.com/openshift/cluster-api-provider-ibmcloud/pkg/apis/ibmcloudprovider/v1beta1/doc.go new file mode 100644 index 00000000000..b014978163c --- /dev/null +++ b/vendor/github.com/openshift/cluster-api-provider-ibmcloud/pkg/apis/ibmcloudprovider/v1beta1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2021. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1beta1 contains API Schema definitions for the ibmcloudprovider v1beta1 API group +// +k8s:openapi-gen=true +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/openshift/cluster-api-provider-ibmcloud/pkg/apis/ibmcloudprovider +// +k8s:defaulter-gen=TypeMeta +// +groupName=ibmcloudproviderconfig.openshift.io +package v1beta1 diff --git a/vendor/github.com/openshift/cluster-api-provider-ibmcloud/pkg/apis/ibmcloudprovider/v1beta1/ibmcloudproviderconfig_types.go b/vendor/github.com/openshift/cluster-api-provider-ibmcloud/pkg/apis/ibmcloudprovider/v1beta1/ibmcloudproviderconfig_types.go new file mode 100644 index 00000000000..e6c09980681 --- /dev/null +++ b/vendor/github.com/openshift/cluster-api-provider-ibmcloud/pkg/apis/ibmcloudprovider/v1beta1/ibmcloudproviderconfig_types.go @@ -0,0 +1,105 @@ +/* +Copyright 2021. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// IBMCloudMachineProviderSpec is the type that will be embedded in a Machine.Spec.ProviderSpec field +// for an IBM Cloud VPC virtual machine. It is used by the IBM Cloud machine actuator to create a single Machine. +// +k8s:openapi-gen=true +type IBMCloudMachineProviderSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // vpc type Instance struct + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // VPC name where the instance will be created + VPC string `json:"vpc"` + + // Actuator will apply these tags to an virtual server instance if not present in additon + // to default tags applied by the actuator + Tags []TagSpecs `json:"tags,omitempty"` + + // TODO: Add labels to the virtual server + // Labels map[string]string `json:"labels,omitempty"` + + // Image is the id of the custom OS image in VPC + // Example: rchos-4-4-7 (Image name) + Image string `json:"image"` + + // Profile indicates the flavor of instance. + // Example: bx2-8x32 (8 vCPUs, 32 GB RAM) + Profile string `json:"profile"` + + // Region of the virtual machine + Region string `json:"region"` + + // Zone where the virtual server instance will be created + Zone string `json:"zone"` + + // ResourceGroup of VPC + ResourceGroup string `json:"resourceGroup"` + + // PrimaryNetworkInterface is required to specify subnet + PrimaryNetworkInterface NetworkInterface `json:"primaryNetworkInterface"` + + // TODO: Probably not needed for the worker machines + // SSHKeys is the SSH pub keys that will be used to access virtual service instance + // SSHKeys []*string `json:"sshKeys,omitempty"` + + // UserDataSecret holds reference to a secret which containes Instance Ignition data (User Data) + UserDataSecret *corev1.LocalObjectReference `json:"userDataSecret"` + + // CredentialsSecret is a reference to the secret with IBM Cloud credentials. + CredentialsSecret *corev1.LocalObjectReference `json:"credentialsSecret"` +} + +// NetworkInterface struct +type NetworkInterface struct { + // Subnet name of the network interface + Subnet string `json:"subnet"` + // SecurityGroups holds a list of security group names + SecurityGroups []string `json:"securityGroups"` +} + +// TagSpecs is the name:value pair for a tag +type TagSpecs struct { + // Name and Value of the tag + Name string `json:"name"` + Value string `json:"value"` +} + +// TODO: want to configure Disk/Block Device Mapping for VPC instances + +// // IBMCloudMetadata describes metadata for IBM Cloud. +// type IBMCloudMetadata struct { +// Key string `json:"key"` +// Value *string `json:"value"` +// } + +// TODO: IBMCloudLoadBalancerReferece - register an instance with the LoadBalancer + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +func init() { + SchemeBuilder.Register(&IBMCloudMachineProviderSpec{}) +} diff --git a/vendor/github.com/openshift/cluster-api-provider-ibmcloud/pkg/apis/ibmcloudprovider/v1beta1/ibmcloudproviderstatus_types.go b/vendor/github.com/openshift/cluster-api-provider-ibmcloud/pkg/apis/ibmcloudprovider/v1beta1/ibmcloudproviderstatus_types.go new file mode 100644 index 00000000000..aacb8b4d5f6 --- /dev/null +++ b/vendor/github.com/openshift/cluster-api-provider-ibmcloud/pkg/apis/ibmcloudprovider/v1beta1/ibmcloudproviderstatus_types.go @@ -0,0 +1,89 @@ +/* +Copyright 2021. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// IBMCloudMachineProviderStatus is the type that will be embedded in a Machine.Status.ProviderStatus field. +// It contains IBMCloud-specific status information. +type IBMCloudMachineProviderStatus struct { + metav1.TypeMeta `json:",inline"` + + // InstanceID is the instance ID of the machine created in IBM Cloud + // +optional + InstanceID *string `json:"instanceId,omitempty"` + + // InstanceState is the state of the IBM Cloud instance for this machine + // +optional + InstanceState *string `json:"instanceState,omitempty"` + + // Conditions is a set of conditions associated with the Machine to indicate + // errors or other status + Conditions []IBMCloudMachineProviderCondition `json:"conditions,omitempty"` +} + +// IBMCloudMachineProviderConditionType is a valid value for IBMCloudMachineProviderCondition.Type +type IBMCloudMachineProviderConditionType string + +// Valid conditions for an IBM Cloud machine instance +const ( + // MachineCreated indicates whether the machine has been created or not. If not, + // it should include a reason and message for the failure. + MachineCreated IBMCloudMachineProviderConditionType = "MachineCreated" +) + +// IBMCloudMachineProviderConditionReason is reason for the condition's last transition. +type IBMCloudMachineProviderConditionReason string + +const ( + // MachineCreationSucceeded indicates machine creation success. + MachineCreationSucceeded IBMCloudMachineProviderConditionReason = "MachineCreationSucceeded" + // MachineCreationFailed indicates machine creation failure. + MachineCreationFailed IBMCloudMachineProviderConditionReason = "MachineCreationFailed" +) + +// IBMCloudMachineProviderCondition is a condition in a IBMCloudMachineProviderStatus. +type IBMCloudMachineProviderCondition struct { + // Type is the type of the condition. + Type IBMCloudMachineProviderConditionType `json:"type"` + // Status is the status of the condition. + Status corev1.ConditionStatus `json:"status"` + // LastProbeTime is the last time we probed the condition. + // +optional + LastProbeTime metav1.Time `json:"lastProbeTime,omitempty"` + // LastTransitionTime is the last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` + // Reason is a unique, one-word, CamelCase reason for the condition's last transition. + // +optional + Reason IBMCloudMachineProviderConditionReason `json:"reason,omitempty"` + // Message is a human-readable message indicating details about last transition. + // +optional + Message string `json:"message,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +func init() { + SchemeBuilder.Register(&IBMCloudMachineProviderStatus{}) +} diff --git a/vendor/github.com/openshift/cluster-api-provider-ibmcloud/pkg/apis/ibmcloudprovider/v1beta1/register.go b/vendor/github.com/openshift/cluster-api-provider-ibmcloud/pkg/apis/ibmcloudprovider/v1beta1/register.go new file mode 100644 index 00000000000..7cf6a038adb --- /dev/null +++ b/vendor/github.com/openshift/cluster-api-provider-ibmcloud/pkg/apis/ibmcloudprovider/v1beta1/register.go @@ -0,0 +1,106 @@ +/* +Copyright 2021. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1beta1 contains API Schema definitions for the ibmcloudproviderconfig v1beta1 API group +// +k8s:openapi-gen=true +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/openshift/cluster-api-provider-ibmcloud/pkg/apis/ibmcloudprovider +// +k8s:defaulter-gen=TypeMeta +// +groupName=ibmcloudproviderconfig.openshift.io +package v1beta1 + +import ( + "encoding/json" + "fmt" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + klog "k8s.io/klog/v2" + "sigs.k8s.io/controller-runtime/pkg/scheme" + yaml "sigs.k8s.io/yaml" +) + +var ( + // SchemeGroupVersion is group version used to register these objects + SchemeGroupVersion = schema.GroupVersion{Group: "ibmcloudproviderconfig.openshift.io", Version: "v1beta1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} +) + +// RawExtensionFromProviderSpec marshals the machine provider spec. +func RawExtensionFromProviderSpec(spec *IBMCloudMachineProviderSpec) (*runtime.RawExtension, error) { + if spec == nil { + return &runtime.RawExtension{}, nil + } + + var rawBytes []byte + var err error + if rawBytes, err = json.Marshal(spec); err != nil { + return nil, fmt.Errorf("error marshalling providerSpec: %v", err) + } + + return &runtime.RawExtension{ + Raw: rawBytes, + }, nil +} + +// RawExtensionFromProviderStatus marshals the provider status +func RawExtensionFromProviderStatus(status *IBMCloudMachineProviderStatus) (*runtime.RawExtension, error) { + if status == nil { + return &runtime.RawExtension{}, nil + } + + var rawBytes []byte + var err error + if rawBytes, err = json.Marshal(status); err != nil { + return nil, fmt.Errorf("error marshalling providerStatus: %v", err) + } + + return &runtime.RawExtension{ + Raw: rawBytes, + }, nil +} + +// ProviderSpecFromRawExtension unmarshals the JSON-encoded spec +func ProviderSpecFromRawExtension(rawExtension *runtime.RawExtension) (*IBMCloudMachineProviderSpec, error) { + if rawExtension == nil { + return &IBMCloudMachineProviderSpec{}, nil + } + + spec := new(IBMCloudMachineProviderSpec) + if err := yaml.Unmarshal(rawExtension.Raw, &spec); err != nil { + return nil, fmt.Errorf("error unmarshalling providerSpec: %v", err) + } + + klog.V(5).Infof("Got provider Spec from raw extension: %+v", spec) + return spec, nil +} + +// ProviderStatusFromRawExtension unmarshals the JSON-encoded into a IBMCloudMachineProviderStatus type +func ProviderStatusFromRawExtension(rawExtension *runtime.RawExtension) (*IBMCloudMachineProviderStatus, error) { + if rawExtension == nil { + return &IBMCloudMachineProviderStatus{}, nil + } + + providerStatus := new(IBMCloudMachineProviderStatus) + if err := yaml.Unmarshal(rawExtension.Raw, providerStatus); err != nil { + return nil, fmt.Errorf("error unmarshalling providerStatus: %v", err) + } + + klog.V(5).Infof("Got provider Status from raw extension: %+v", providerStatus) + return providerStatus, nil +} diff --git a/vendor/github.com/openshift/cluster-api-provider-ibmcloud/pkg/apis/ibmcloudprovider/v1beta1/zz_generated.deepcopy.go b/vendor/github.com/openshift/cluster-api-provider-ibmcloud/pkg/apis/ibmcloudprovider/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 00000000000..2c3a14cdc85 --- /dev/null +++ b/vendor/github.com/openshift/cluster-api-provider-ibmcloud/pkg/apis/ibmcloudprovider/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,160 @@ +// +build !ignore_autogenerated + +/* +Copyright 2021. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IBMCloudMachineProviderCondition) DeepCopyInto(out *IBMCloudMachineProviderCondition) { + *out = *in + in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBMCloudMachineProviderCondition. +func (in *IBMCloudMachineProviderCondition) DeepCopy() *IBMCloudMachineProviderCondition { + if in == nil { + return nil + } + out := new(IBMCloudMachineProviderCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IBMCloudMachineProviderSpec) DeepCopyInto(out *IBMCloudMachineProviderSpec) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]TagSpecs, len(*in)) + copy(*out, *in) + } + in.PrimaryNetworkInterface.DeepCopyInto(&out.PrimaryNetworkInterface) + if in.UserDataSecret != nil { + in, out := &in.UserDataSecret, &out.UserDataSecret + *out = new(v1.LocalObjectReference) + **out = **in + } + if in.CredentialsSecret != nil { + in, out := &in.CredentialsSecret, &out.CredentialsSecret + *out = new(v1.LocalObjectReference) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBMCloudMachineProviderSpec. +func (in *IBMCloudMachineProviderSpec) DeepCopy() *IBMCloudMachineProviderSpec { + if in == nil { + return nil + } + out := new(IBMCloudMachineProviderSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IBMCloudMachineProviderSpec) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IBMCloudMachineProviderStatus) DeepCopyInto(out *IBMCloudMachineProviderStatus) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.InstanceID != nil { + in, out := &in.InstanceID, &out.InstanceID + *out = new(string) + **out = **in + } + if in.InstanceState != nil { + in, out := &in.InstanceState, &out.InstanceState + *out = new(string) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]IBMCloudMachineProviderCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBMCloudMachineProviderStatus. +func (in *IBMCloudMachineProviderStatus) DeepCopy() *IBMCloudMachineProviderStatus { + if in == nil { + return nil + } + out := new(IBMCloudMachineProviderStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IBMCloudMachineProviderStatus) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkInterface) DeepCopyInto(out *NetworkInterface) { + *out = *in + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkInterface. +func (in *NetworkInterface) DeepCopy() *NetworkInterface { + if in == nil { + return nil + } + out := new(NetworkInterface) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagSpecs) DeepCopyInto(out *TagSpecs) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagSpecs. +func (in *TagSpecs) DeepCopy() *TagSpecs { + if in == nil { + return nil + } + out := new(TagSpecs) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/library-go/pkg/route/routeapihelpers/routeapihelpers.go b/vendor/github.com/openshift/library-go/pkg/route/routeapihelpers/routeapihelpers.go index 4f108144af9..8f9d8b1b694 100644 --- a/vendor/github.com/openshift/library-go/pkg/route/routeapihelpers/routeapihelpers.go +++ b/vendor/github.com/openshift/library-go/pkg/route/routeapihelpers/routeapihelpers.go @@ -7,6 +7,8 @@ import ( routev1 "github.com/openshift/api/route/v1" corev1 "k8s.io/api/core/v1" + kvalidation "k8s.io/apimachinery/pkg/util/validation" + field "k8s.io/apimachinery/pkg/util/validation/field" ) // IngressURI calculates an admitted ingress URI. @@ -42,3 +44,30 @@ func IngressURI(route *routev1.Route, host string) (*url.URL, *routev1.RouteIngr } return nil, nil, fmt.Errorf("no ingress for host %s in route %s in namespace %s", host, route.ObjectMeta.Name, route.ObjectMeta.Namespace) } + +// ValidateHost checks that a route's host name satisfies DNS requirements, with +// the assumption that the caller has already checked for an empty host name. +// Unless the allowNonCompliant annotation is set to true, host name must have +// at least two labels, with each label no more than 63 characters from the set of +// alphanumeric characters, '-' or '.', and must start and end with an alphanumeric +// character. A trailing dot is allowed. The total host name length must be no more +// than 253 characters. +// If allowNonCompliant is set to true, it uses a smaller set of conditions from +// IsDNS1123Subdomain, e.g. character set as described above, and total host name +// length must be no more than 253 characters. +func ValidateHost(host string, allowNonCompliant string, hostPath *field.Path) field.ErrorList { + result := field.ErrorList{} + + if allowNonCompliant == "true" { + errs := kvalidation.IsDNS1123Subdomain(host) + if len(errs) != 0 { + result = append(result, field.Invalid(hostPath, host, fmt.Sprintf("host must conform to DNS naming conventions: %v", errs))) + } + } else { + errs := kvalidation.IsFullyQualifiedDomainName(hostPath, host) + if len(errs) != 0 { + result = append(result, field.Invalid(hostPath, host, fmt.Sprintf("host must conform to DNS 1123 naming conventions: %v", errs))) + } + } + return result +} diff --git a/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/condition_consts.go b/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/condition_consts.go index c840522d21c..9b579de3fb3 100644 --- a/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/condition_consts.go +++ b/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/condition_consts.go @@ -17,7 +17,6 @@ limitations under the License. package v1beta1 // Conditions and condition Reasons for the MachineHealthCheck object - const ( // RemediationAllowedCondition is set on MachineHealthChecks to show the status of whether the MachineHealthCheck is // allowed to remediate any Machines or whether it is blocked from remediating any further. @@ -26,4 +25,33 @@ const ( // TooManyUnhealthy is the reason used when too many Machines are unhealthy and the MachineHealthCheck is blocked // from making any further remediations. TooManyUnhealthyReason = "TooManyUnhealthy" + + // ExternalRemediationTemplateAvailable is set on machinehealthchecks when MachineHealthCheck controller uses external remediation. + // ExternalRemediationTemplateAvailable is set to false if external remediation template is not found. + ExternalRemediationTemplateAvailable ConditionType = "ExternalRemediationTemplateAvailable" + + // ExternalRemediationTemplateNotFound is the reason used when a machine health check fails to find external remediation template. + ExternalRemediationTemplateNotFound = "ExternalRemediationTemplateNotFound" + + // ExternalRemediationRequestAvailable is set on machinehealthchecks when MachineHealthCheck controller uses external remediation. + // ExternalRemediationRequestAvailable is set to false if creating external remediation request fails. + ExternalRemediationRequestAvailable ConditionType = "ExternalRemediationRequestAvailable" + + // ExternalRemediationRequestCreationFailed is the reason used when a machine health check fails to create external remediation request. + ExternalRemediationRequestCreationFailed = "ExternalRemediationRequestCreationFailed" +) + +const ( + // InstanceExistsCondition is set on the Machine to show whether a virtual mahcine has been created by the cloud provider. + InstanceExistsCondition ConditionType = "InstanceExists" + + // ErrorCheckingProviderReason is the reason used when the exist operation fails. + // This would normally be because we cannot contact the provider. + ErrorCheckingProviderReason = "ErrorCheckingProvider" + + // InstanceMissingReason is the reason used when the machine was provisioned, but the instance has gone missing. + InstanceMissingReason = "InstanceMissing" + + // InstanceNotCreatedReason is the reason used when the machine has not yet been provisioned. + InstanceNotCreatedReason = "InstanceNotCreated" ) diff --git a/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/consts.go b/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/consts.go index 363716809a5..e5f81aee82f 100644 --- a/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/consts.go +++ b/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/consts.go @@ -16,10 +16,6 @@ limitations under the License. package v1beta1 -// Constants aren't automatically generated for unversioned packages. -// Instead share the same constant for all versioned packages -type MachineStatusError string - const ( // Represents that the combination of configuration in the MachineSpec // is not supported by this cluster. This is not a transient error, but @@ -28,20 +24,6 @@ const ( // Example: the ProviderSpec specifies an instance type that doesn't exist, InvalidConfigurationMachineError MachineStatusError = "InvalidConfiguration" - // This indicates that the MachineSpec has been updated in a way that - // is not supported for reconciliation on this cluster. The spec may be - // completely valid from a configuration standpoint, but the controller - // does not support changing the real world state to match the new - // spec. - // - // Example: the responsible controller is not capable of changing the - // container runtime from docker to rkt. - UnsupportedChangeMachineError MachineStatusError = "UnsupportedChange" - - // This generally refers to exceeding one's quota in a cloud provider, - // or running out of physical machines in an on-premise environment. - InsufficientResourcesMachineError MachineStatusError = "InsufficientResources" - // There was an error while trying to create a Node to match this // Machine. This may indicate a transient problem that will be fixed // automatically with time, such as a service outage, or a terminal @@ -66,56 +48,17 @@ const ( // Example: cannot resolve EC2 IP address. DeleteMachineError MachineStatusError = "DeleteError" - // This error indicates that the machine did not join the cluster - // as a new node within the expected timeframe after instance - // creation at the provider succeeded - // - // Example use case: A controller that deletes Machines which do - // not result in a Node joining the cluster within a given timeout - // and that are managed by a MachineSet - JoinClusterTimeoutMachineError = "JoinClusterTimeoutError" -) - -type ClusterStatusError string - -const ( - // InvalidConfigurationClusterError indicates that the cluster - // configuration is invalid. - InvalidConfigurationClusterError ClusterStatusError = "InvalidConfiguration" - - // UnsupportedChangeClusterError indicates that the cluster - // spec has been updated in an unsupported way. That cannot be - // reconciled. - UnsupportedChangeClusterError ClusterStatusError = "UnsupportedChange" - - // CreateClusterError indicates that an error was encountered - // when trying to create the cluster. - CreateClusterError ClusterStatusError = "CreateError" - - // UpdateClusterError indicates that an error was encountered - // when trying to update the cluster. - UpdateClusterError ClusterStatusError = "UpdateError" + // TemplateClonedFromGroupKindAnnotation is the infrastructure machine annotation that stores the group-kind of the infrastructure template resource + // that was cloned for the machine. This annotation is set only during cloning a template. Older/adopted machines will not have this annotation. + TemplateClonedFromGroupKindAnnotation = "machine.openshift.io/cloned-from-groupkind" - // DeleteClusterError indicates that an error was encountered - // when trying to delete the cluster. - DeleteClusterError ClusterStatusError = "DeleteError" + // TemplateClonedFromNameAnnotation is the infrastructure machine annotation that stores the name of the infrastructure template resource + // that was cloned for the machine. This annotation is set only during cloning a template. Older/adopted machines will not have this annotation. + TemplateClonedFromNameAnnotation = "machine.openshift.io/cloned-from-name" ) -type MachineSetStatusError string - -const ( - // Represents that the combination of configuration in the MachineTemplateSpec - // is not supported by this cluster. This is not a transient error, but - // indicates a state that must be fixed before progress can be made. - // - // Example: the ProviderSpec specifies an instance type that doesn't exist. - InvalidConfigurationMachineSetError MachineSetStatusError = "InvalidConfiguration" -) - -type MachineDeploymentStrategyType string +// Constants aren't automatically generated for unversioned packages. +// Instead share the same constant for all versioned packages +type MachineStatusError string -const ( - // Replace the old MachineSet by new one using rolling update - // i.e. gradually scale down the old MachineSet and scale up the new one. - RollingUpdateMachineDeploymentStrategyType MachineDeploymentStrategyType = "RollingUpdate" -) +type MachineSetStatusError string diff --git a/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/machine_types.go b/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/machine_types.go index 6fa53ba670c..a38c8909cfd 100644 --- a/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/machine_types.go +++ b/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/machine_types.go @@ -59,6 +59,14 @@ type Machine struct { Status MachineStatus `json:"status,omitempty"` } +func (m *Machine) GetConditions() Conditions { + return m.Status.Conditions +} + +func (m *Machine) SetConditions(conditions Conditions) { + m.Status.Conditions = conditions +} + // MachineSpec defines the desired state of Machine type MachineSpec struct { // ObjectMeta will autopopulate the Node created. Use this to @@ -165,6 +173,9 @@ type MachineStatus struct { // One of: Failed, Provisioning, Provisioned, Running, Deleting // +optional Phase *string `json:"phase,omitempty"` + + // Conditions defines the current state of the Machine + Conditions Conditions `json:"conditions,omitempty"` } // LastOperation represents the detail of the last performed operation on the MachineObject. diff --git a/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/machine_webhook.go b/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/machine_webhook.go index a5299328bce..4257b9cecc4 100644 --- a/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/machine_webhook.go +++ b/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/machine_webhook.go @@ -93,10 +93,9 @@ const ( defaultGCPCredentialsSecret = "gcp-cloud-credentials" defaultGCPDiskSizeGb = 128 defaultGCPDiskType = "pd-standard" - // https://releases-art-rhcos.svc.ci.openshift.org/art/storage/releases/rhcos-4.6/46.82.202007212240-0/x86_64/meta.json - // https://github.com/openshift/installer/pull/3808 - // https://github.com/openshift/installer/blob/d75bf7ad98124b901ae7e22b5595e0392ed6ea3c/data/data/rhcos.json - defaultGCPDiskImage = "projects/rhcos-cloud/global/images/rhcos-46-82-202007212240-0-gcp-x86-64" + // https://releases-art-rhcos.svc.ci.openshift.org/art/storage/releases/rhcos-4.8/48.83.202103122318-0/x86_64/meta.json + // https://github.com/openshift/installer/blob/796a99049d3b7489b6c08ec5bd7c7983731afbcf/data/data/rhcos.json#L90-L94 + defaultGCPDiskImage = "projects/rhcos-cloud/global/images/rhcos-48-83-202103221318-0-gcp-x86-64" // vSphere Defaults defaultVSphereCredentialsSecret = "vsphere-cloud-credentials" @@ -224,27 +223,18 @@ type machineDefaulterHandler struct { } // NewValidator returns a new machineValidatorHandler. -func NewMachineValidator() (*machineValidatorHandler, error) { +func NewMachineValidator(client client.Client) (*machineValidatorHandler, error) { infra, err := getInfra() if err != nil { return nil, err } - cfg, err := ctrl.GetConfig() - if err != nil { - return nil, err - } - c, err := client.New(cfg, client.Options{}) - if err != nil { - return nil, fmt.Errorf("failed to build kubernetes client: %v", err) - } - dns, err := getDNS() if err != nil { return nil, err } - return createMachineValidator(infra, c, dns), nil + return createMachineValidator(infra, client, dns), nil } func createMachineValidator(infra *osconfigv1.Infrastructure, client client.Client, dns *osconfigv1.DNS) *machineValidatorHandler { @@ -351,7 +341,7 @@ func MachineValidatingWebhook() admissionregistrationv1.ValidatingWebhook { Port: pointer.Int32Ptr(defaultWebhookServicePort), } return admissionregistrationv1.ValidatingWebhook{ - AdmissionReviewVersions: []string{"v1beta1"}, + AdmissionReviewVersions: []string{"v1"}, Name: "validation.machine.machine.openshift.io", FailurePolicy: &webhookFailurePolicy, SideEffects: &webhookSideEffects, @@ -383,7 +373,7 @@ func MachineSetValidatingWebhook() admissionregistrationv1.ValidatingWebhook { Port: pointer.Int32Ptr(defaultWebhookServicePort), } return admissionregistrationv1.ValidatingWebhook{ - AdmissionReviewVersions: []string{"v1beta1"}, + AdmissionReviewVersions: []string{"v1"}, Name: "validation.machineset.machine.openshift.io", FailurePolicy: &webhookFailurePolicy, SideEffects: &webhookSideEffects, @@ -436,7 +426,7 @@ func MachineMutatingWebhook() admissionregistrationv1.MutatingWebhook { Port: pointer.Int32Ptr(defaultWebhookServicePort), } return admissionregistrationv1.MutatingWebhook{ - AdmissionReviewVersions: []string{"v1beta1"}, + AdmissionReviewVersions: []string{"v1"}, Name: "default.machine.machine.openshift.io", FailurePolicy: &webhookFailurePolicy, SideEffects: &webhookSideEffects, @@ -467,7 +457,7 @@ func MachineSetMutatingWebhook() admissionregistrationv1.MutatingWebhook { Port: pointer.Int32Ptr(defaultWebhookServicePort), } return admissionregistrationv1.MutatingWebhook{ - AdmissionReviewVersions: []string{"v1beta1"}, + AdmissionReviewVersions: []string{"v1"}, Name: "default.machineset.machine.openshift.io", FailurePolicy: &webhookFailurePolicy, SideEffects: &webhookSideEffects, @@ -664,6 +654,11 @@ func validateAWS(m *Machine, config *admissionConfig) (bool, []string, utilerror "providerSpec.subnet: No subnet has been provided. Instances may be created in an unexpected subnet and may not join the cluster.", ) } + + if providerSpec.IAMInstanceProfile == nil { + warnings = append(warnings, "providerSpec.iamInstanceProfile: no IAM instance profile provided: nodes may be unable to join the cluster") + } + // TODO(alberto): Validate providerSpec.BlockDevices. // https://github.com/openshift/cluster-api-provider-aws/pull/299#discussion_r433920532 diff --git a/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/machinehealthcheck_types.go b/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/machinehealthcheck_types.go index 6324e0f7452..28bfe29c35f 100644 --- a/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/machinehealthcheck_types.go +++ b/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/machinehealthcheck_types.go @@ -80,6 +80,15 @@ type MachineHealthCheckSpec struct { // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$" // +kubebuilder:validation:Type:=string NodeStartupTimeout metav1.Duration `json:"nodeStartupTimeout,omitempty"` + + // RemediationTemplate is a reference to a remediation template + // provided by an infrastructure provider. + // + // This field is completely optional, when filled, the MachineHealthCheck controller + // creates a new object from the template referenced and hands off remediation of the machine to + // a controller that lives outside of Machine API Operator. + // +optional + RemediationTemplate *corev1.ObjectReference `json:"remediationTemplate,omitempty"` } // UnhealthyCondition represents a Node condition type and value with a timeout diff --git a/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/machineset_webhook.go b/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/machineset_webhook.go index 8b6bda1e6f2..7d71ed54133 100644 --- a/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/machineset_webhook.go +++ b/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/machineset_webhook.go @@ -3,14 +3,12 @@ package v1beta1 import ( "context" "encoding/json" - "fmt" "net/http" osconfigv1 "github.com/openshift/api/config/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/klog/v2" - ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" ) @@ -30,27 +28,18 @@ type machineSetDefaulterHandler struct { } // NewMachineSetValidator returns a new machineSetValidatorHandler. -func NewMachineSetValidator() (*machineSetValidatorHandler, error) { +func NewMachineSetValidator(client client.Client) (*machineSetValidatorHandler, error) { infra, err := getInfra() if err != nil { return nil, err } - cfg, err := ctrl.GetConfig() - if err != nil { - return nil, err - } - c, err := client.New(cfg, client.Options{}) - if err != nil { - return nil, fmt.Errorf("failed to build kubernetes client: %v", err) - } - dns, err := getDNS() if err != nil { return nil, err } - return createMachineSetValidator(infra, c, dns), nil + return createMachineSetValidator(infra, client, dns), nil } func createMachineSetValidator(infra *osconfigv1.Infrastructure, client client.Client, dns *osconfigv1.DNS) *machineSetValidatorHandler { diff --git a/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/zz_generated.deepcopy.go b/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/zz_generated.deepcopy.go index 059e3e04dac..7b85b787509 100644 --- a/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/zz_generated.deepcopy.go @@ -200,6 +200,11 @@ func (in *MachineHealthCheckSpec) DeepCopyInto(out *MachineHealthCheckSpec) { **out = **in } out.NodeStartupTimeout = in.NodeStartupTimeout + if in.RemediationTemplate != nil { + in, out := &in.RemediationTemplate, &out.RemediationTemplate + *out = new(corev1.ObjectReference) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineHealthCheckSpec. @@ -453,6 +458,13 @@ func (in *MachineStatus) DeepCopyInto(out *MachineStatus) { *out = new(string) **out = **in } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make(Conditions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineStatus. diff --git a/vendor/github.com/pelletier/go-toml/.dockerignore b/vendor/github.com/pelletier/go-toml/.dockerignore new file mode 100644 index 00000000000..7b5883475df --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/.dockerignore @@ -0,0 +1,2 @@ +cmd/tomll/tomll +cmd/tomljson/tomljson diff --git a/vendor/github.com/pelletier/go-toml/.gitignore b/vendor/github.com/pelletier/go-toml/.gitignore new file mode 100644 index 00000000000..e6ba63a5c5c --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/.gitignore @@ -0,0 +1,5 @@ +test_program/test_program_bin +fuzz/ +cmd/tomll/tomll +cmd/tomljson/tomljson +cmd/tomltestgen/tomltestgen diff --git a/vendor/github.com/pelletier/go-toml/CONTRIBUTING.md b/vendor/github.com/pelletier/go-toml/CONTRIBUTING.md new file mode 100644 index 00000000000..405c911c903 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/CONTRIBUTING.md @@ -0,0 +1,132 @@ +## Contributing + +Thank you for your interest in go-toml! We appreciate you considering +contributing to go-toml! + +The main goal is the project is to provide an easy-to-use TOML +implementation for Go that gets the job done and gets out of your way – +dealing with TOML is probably not the central piece of your project. + +As the single maintainer of go-toml, time is scarce. All help, big or +small, is more than welcomed! + +### Ask questions + +Any question you may have, somebody else might have it too. Always feel +free to ask them on the [issues tracker][issues-tracker]. We will try to +answer them as clearly and quickly as possible, time permitting. + +Asking questions also helps us identify areas where the documentation needs +improvement, or new features that weren't envisioned before. Sometimes, a +seemingly innocent question leads to the fix of a bug. Don't hesitate and +ask away! + +### Improve the documentation + +The best way to share your knowledge and experience with go-toml is to +improve the documentation. Fix a typo, clarify an interface, add an +example, anything goes! + +The documentation is present in the [README][readme] and thorough the +source code. On release, it gets updated on [GoDoc][godoc]. To make a +change to the documentation, create a pull request with your proposed +changes. For simple changes like that, the easiest way to go is probably +the "Fork this project and edit the file" button on Github, displayed at +the top right of the file. Unless it's a trivial change (for example a +typo), provide a little bit of context in your pull request description or +commit message. + +### Report a bug + +Found a bug! Sorry to hear that :(. Help us and other track them down and +fix by reporting it. [File a new bug report][bug-report] on the [issues +tracker][issues-tracker]. The template should provide enough guidance on +what to include. When in doubt: add more details! By reducing ambiguity and +providing more information, it decreases back and forth and saves everyone +time. + +### Code changes + +Want to contribute a patch? Very happy to hear that! + +First, some high-level rules: + +* A short proposal with some POC code is better than a lengthy piece of + text with no code. Code speaks louder than words. +* No backward-incompatible patch will be accepted unless discussed. + Sometimes it's hard, and Go's lack of versioning by default does not + help, but we try not to break people's programs unless we absolutely have + to. +* If you are writing a new feature or extending an existing one, make sure + to write some documentation. +* Bug fixes need to be accompanied with regression tests. +* New code needs to be tested. +* Your commit messages need to explain why the change is needed, even if + already included in the PR description. + +It does sound like a lot, but those best practices are here to save time +overall and continuously improve the quality of the project, which is +something everyone benefits from. + +#### Get started + +The fairly standard code contribution process looks like that: + +1. [Fork the project][fork]. +2. Make your changes, commit on any branch you like. +3. [Open up a pull request][pull-request] +4. Review, potential ask for changes. +5. Merge. You're in! + +Feel free to ask for help! You can create draft pull requests to gather +some early feedback! + +#### Run the tests + +You can run tests for go-toml using Go's test tool: `go test ./...`. +When creating a pull requests, all tests will be ran on Linux on a few Go +versions (Travis CI), and on Windows using the latest Go version +(AppVeyor). + +#### Style + +Try to look around and follow the same format and structure as the rest of +the code. We enforce using `go fmt` on the whole code base. + +--- + +### Maintainers-only + +#### Merge pull request + +Checklist: + +* Passing CI. +* Does not introduce backward-incompatible changes (unless discussed). +* Has relevant doc changes. +* Has relevant unit tests. + +1. Merge using "squash and merge". +2. Make sure to edit the commit message to keep all the useful information + nice and clean. +3. Make sure the commit title is clear and contains the PR number (#123). + +#### New release + +1. Go to [releases][releases]. Click on "X commits to master since this + release". +2. Make note of all the changes. Look for backward incompatible changes, + new features, and bug fixes. +3. Pick the new version using the above and semver. +4. Create a [new release][new-release]. +5. Follow the same format as [1.1.0][release-110]. + +[issues-tracker]: https://github.com/pelletier/go-toml/issues +[bug-report]: https://github.com/pelletier/go-toml/issues/new?template=bug_report.md +[godoc]: https://godoc.org/github.com/pelletier/go-toml +[readme]: ./README.md +[fork]: https://help.github.com/articles/fork-a-repo +[pull-request]: https://help.github.com/en/articles/creating-a-pull-request +[releases]: https://github.com/pelletier/go-toml/releases +[new-release]: https://github.com/pelletier/go-toml/releases/new +[release-110]: https://github.com/pelletier/go-toml/releases/tag/v1.1.0 diff --git a/vendor/github.com/pelletier/go-toml/Dockerfile b/vendor/github.com/pelletier/go-toml/Dockerfile new file mode 100644 index 00000000000..fffdb016668 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/Dockerfile @@ -0,0 +1,11 @@ +FROM golang:1.12-alpine3.9 as builder +WORKDIR /go/src/github.com/pelletier/go-toml +COPY . . +ENV CGO_ENABLED=0 +ENV GOOS=linux +RUN go install ./... + +FROM scratch +COPY --from=builder /go/bin/tomll /usr/bin/tomll +COPY --from=builder /go/bin/tomljson /usr/bin/tomljson +COPY --from=builder /go/bin/jsontoml /usr/bin/jsontoml diff --git a/vendor/github.com/pelletier/go-toml/LICENSE b/vendor/github.com/pelletier/go-toml/LICENSE new file mode 100644 index 00000000000..583bdae6282 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 - 2017 Thomas Pelletier, Eric Anderton + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/pelletier/go-toml/Makefile b/vendor/github.com/pelletier/go-toml/Makefile new file mode 100644 index 00000000000..9e4503aea65 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/Makefile @@ -0,0 +1,29 @@ +export CGO_ENABLED=0 +go := go +go.goos ?= $(shell echo `go version`|cut -f4 -d ' '|cut -d '/' -f1) +go.goarch ?= $(shell echo `go version`|cut -f4 -d ' '|cut -d '/' -f2) + +out.tools := tomll tomljson jsontoml +out.dist := $(out.tools:=_$(go.goos)_$(go.goarch).tar.xz) +sources := $(wildcard **/*.go) + + +.PHONY: +tools: $(out.tools) + +$(out.tools): $(sources) + GOOS=$(go.goos) GOARCH=$(go.goarch) $(go) build ./cmd/$@ + +.PHONY: +dist: $(out.dist) + +$(out.dist):%_$(go.goos)_$(go.goarch).tar.xz: % + if [ "$(go.goos)" = "windows" ]; then \ + tar -cJf $@ $^.exe; \ + else \ + tar -cJf $@ $^; \ + fi + +.PHONY: +clean: + rm -rf $(out.tools) $(out.dist) diff --git a/vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md b/vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000000..041cdc4a2f1 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,5 @@ +**Issue:** add link to pelletier/go-toml issue here + +Explanation of what this pull request does. + +More detailed description of the decisions being made and the reasons why (if the patch is non-trivial). diff --git a/vendor/github.com/pelletier/go-toml/README.md b/vendor/github.com/pelletier/go-toml/README.md new file mode 100644 index 00000000000..6831deb5bd1 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/README.md @@ -0,0 +1,151 @@ +# go-toml + +Go library for the [TOML](https://github.com/mojombo/toml) format. + +This library supports TOML version +[v1.0.0-rc.1](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v1.0.0-rc.1.md) + +[![GoDoc](https://godoc.org/github.com/pelletier/go-toml?status.svg)](http://godoc.org/github.com/pelletier/go-toml) +[![license](https://img.shields.io/github/license/pelletier/go-toml.svg)](https://github.com/pelletier/go-toml/blob/master/LICENSE) +[![Build Status](https://dev.azure.com/pelletierthomas/go-toml-ci/_apis/build/status/pelletier.go-toml?branchName=master)](https://dev.azure.com/pelletierthomas/go-toml-ci/_build/latest?definitionId=1&branchName=master) +[![codecov](https://codecov.io/gh/pelletier/go-toml/branch/master/graph/badge.svg)](https://codecov.io/gh/pelletier/go-toml) +[![Go Report Card](https://goreportcard.com/badge/github.com/pelletier/go-toml)](https://goreportcard.com/report/github.com/pelletier/go-toml) +[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fpelletier%2Fgo-toml.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fpelletier%2Fgo-toml?ref=badge_shield) + +## Features + +Go-toml provides the following features for using data parsed from TOML documents: + +* Load TOML documents from files and string data +* Easily navigate TOML structure using Tree +* Marshaling and unmarshaling to and from data structures +* Line & column position data for all parsed elements +* [Query support similar to JSON-Path](query/) +* Syntax errors contain line and column numbers + +## Import + +```go +import "github.com/pelletier/go-toml" +``` + +## Usage example + +Read a TOML document: + +```go +config, _ := toml.Load(` +[postgres] +user = "pelletier" +password = "mypassword"`) +// retrieve data directly +user := config.Get("postgres.user").(string) + +// or using an intermediate object +postgresConfig := config.Get("postgres").(*toml.Tree) +password := postgresConfig.Get("password").(string) +``` + +Or use Unmarshal: + +```go +type Postgres struct { + User string + Password string +} +type Config struct { + Postgres Postgres +} + +doc := []byte(` +[Postgres] +User = "pelletier" +Password = "mypassword"`) + +config := Config{} +toml.Unmarshal(doc, &config) +fmt.Println("user=", config.Postgres.User) +``` + +Or use a query: + +```go +// use a query to gather elements without walking the tree +q, _ := query.Compile("$..[user,password]") +results := q.Execute(config) +for ii, item := range results.Values() { + fmt.Printf("Query result %d: %v\n", ii, item) +} +``` + +## Documentation + +The documentation and additional examples are available at +[godoc.org](http://godoc.org/github.com/pelletier/go-toml). + +## Tools + +Go-toml provides two handy command line tools: + +* `tomll`: Reads TOML files and lints them. + + ``` + go install github.com/pelletier/go-toml/cmd/tomll + tomll --help + ``` +* `tomljson`: Reads a TOML file and outputs its JSON representation. + + ``` + go install github.com/pelletier/go-toml/cmd/tomljson + tomljson --help + ``` + + * `jsontoml`: Reads a JSON file and outputs a TOML representation. + + ``` + go install github.com/pelletier/go-toml/cmd/jsontoml + jsontoml --help + ``` + +### Docker image + +Those tools are also availble as a Docker image from +[dockerhub](https://hub.docker.com/r/pelletier/go-toml). For example, to +use `tomljson`: + +``` +docker run -v $PWD:/workdir pelletier/go-toml tomljson /workdir/example.toml +``` + +Only master (`latest`) and tagged versions are published to dockerhub. You +can build your own image as usual: + +``` +docker build -t go-toml . +``` + +## Contribute + +Feel free to report bugs and patches using GitHub's pull requests system on +[pelletier/go-toml](https://github.com/pelletier/go-toml). Any feedback would be +much appreciated! + +### Run tests + +`go test ./...` + +### Fuzzing + +The script `./fuzz.sh` is available to +run [go-fuzz](https://github.com/dvyukov/go-fuzz) on go-toml. + +## Versioning + +Go-toml follows [Semantic Versioning](http://semver.org/). The supported version +of [TOML](https://github.com/toml-lang/toml) is indicated at the beginning of +this document. The last two major versions of Go are supported +(see [Go Release Policy](https://golang.org/doc/devel/release.html#policy)). + +## License + +The MIT License (MIT). Read [LICENSE](LICENSE). diff --git a/vendor/github.com/pelletier/go-toml/azure-pipelines.yml b/vendor/github.com/pelletier/go-toml/azure-pipelines.yml new file mode 100644 index 00000000000..242b5b5403b --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/azure-pipelines.yml @@ -0,0 +1,230 @@ +trigger: +- master + +stages: +- stage: fuzzit + displayName: "Run Fuzzit" + dependsOn: [] + condition: and(succeeded(), eq(variables['Build.SourceBranchName'], 'master')) + jobs: + - job: submit + displayName: "Submit" + pool: + vmImage: ubuntu-latest + steps: + - task: GoTool@0 + displayName: "Install Go 1.14" + inputs: + version: "1.14" + - script: echo "##vso[task.setvariable variable=PATH]${PATH}:/home/vsts/go/bin/" + - script: mkdir -p ${HOME}/go/src/github.com/pelletier/go-toml + - script: cp -R . ${HOME}/go/src/github.com/pelletier/go-toml + - task: Bash@3 + inputs: + filePath: './fuzzit.sh' + env: + TYPE: fuzzing + FUZZIT_API_KEY: $(FUZZIT_API_KEY) + +- stage: run_checks + displayName: "Check" + dependsOn: [] + jobs: + - job: fmt + displayName: "fmt" + pool: + vmImage: ubuntu-latest + steps: + - task: GoTool@0 + displayName: "Install Go 1.14" + inputs: + version: "1.14" + - task: Go@0 + displayName: "go fmt ./..." + inputs: + command: 'custom' + customCommand: 'fmt' + arguments: './...' + - job: coverage + displayName: "coverage" + pool: + vmImage: ubuntu-latest + steps: + - task: GoTool@0 + displayName: "Install Go 1.14" + inputs: + version: "1.14" + - task: Go@0 + displayName: "Generate coverage" + inputs: + command: 'test' + arguments: "-race -coverprofile=coverage.txt -covermode=atomic" + - task: Bash@3 + inputs: + targetType: 'inline' + script: 'bash <(curl -s https://codecov.io/bash) -t ${CODECOV_TOKEN}' + env: + CODECOV_TOKEN: $(CODECOV_TOKEN) + - job: benchmark + displayName: "benchmark" + pool: + vmImage: ubuntu-latest + steps: + - task: GoTool@0 + displayName: "Install Go 1.14" + inputs: + version: "1.14" + - script: echo "##vso[task.setvariable variable=PATH]${PATH}:/home/vsts/go/bin/" + - task: Bash@3 + inputs: + filePath: './benchmark.sh' + arguments: "master $(Build.Repository.Uri)" + + - job: fuzzing + displayName: "fuzzing" + pool: + vmImage: ubuntu-latest + steps: + - task: GoTool@0 + displayName: "Install Go 1.14" + inputs: + version: "1.14" + - script: echo "##vso[task.setvariable variable=PATH]${PATH}:/home/vsts/go/bin/" + - script: mkdir -p ${HOME}/go/src/github.com/pelletier/go-toml + - script: cp -R . ${HOME}/go/src/github.com/pelletier/go-toml + - task: Bash@3 + inputs: + filePath: './fuzzit.sh' + env: + TYPE: local-regression + + - job: go_unit_tests + displayName: "unit tests" + strategy: + matrix: + linux 1.14: + goVersion: '1.14' + imageName: 'ubuntu-latest' + mac 1.14: + goVersion: '1.14' + imageName: 'macOS-latest' + windows 1.14: + goVersion: '1.14' + imageName: 'windows-latest' + linux 1.13: + goVersion: '1.13' + imageName: 'ubuntu-latest' + mac 1.13: + goVersion: '1.13' + imageName: 'macOS-latest' + windows 1.13: + goVersion: '1.13' + imageName: 'windows-latest' + pool: + vmImage: $(imageName) + steps: + - task: GoTool@0 + displayName: "Install Go $(goVersion)" + inputs: + version: $(goVersion) + - task: Go@0 + displayName: "go test ./..." + inputs: + command: 'test' + arguments: './...' +- stage: build_binaries + displayName: "Build binaries" + dependsOn: run_checks + jobs: + - job: build_binary + displayName: "Build binary" + strategy: + matrix: + linux_amd64: + GOOS: linux + GOARCH: amd64 + darwin_amd64: + GOOS: darwin + GOARCH: amd64 + windows_amd64: + GOOS: windows + GOARCH: amd64 + pool: + vmImage: ubuntu-latest + steps: + - task: GoTool@0 + displayName: "Install Go" + inputs: + version: 1.14 + - task: Bash@3 + inputs: + targetType: inline + script: "make dist" + env: + go.goos: $(GOOS) + go.goarch: $(GOARCH) + - task: CopyFiles@2 + inputs: + sourceFolder: '$(Build.SourcesDirectory)' + contents: '*.tar.xz' + TargetFolder: '$(Build.ArtifactStagingDirectory)' + - task: PublishBuildArtifacts@1 + inputs: + pathtoPublish: '$(Build.ArtifactStagingDirectory)' + artifactName: binaries +- stage: build_binaries_manifest + displayName: "Build binaries manifest" + dependsOn: build_binaries + jobs: + - job: build_manifest + displayName: "Build binaries manifest" + steps: + - task: DownloadBuildArtifacts@0 + inputs: + buildType: 'current' + downloadType: 'single' + artifactName: 'binaries' + downloadPath: '$(Build.SourcesDirectory)' + - task: Bash@3 + inputs: + targetType: inline + script: "cd binaries && sha256sum --binary *.tar.xz | tee $(Build.ArtifactStagingDirectory)/sha256sums.txt" + - task: PublishBuildArtifacts@1 + inputs: + pathtoPublish: '$(Build.ArtifactStagingDirectory)' + artifactName: manifest + +- stage: build_docker_image + displayName: "Build Docker image" + dependsOn: run_checks + jobs: + - job: build + displayName: "Build" + pool: + vmImage: ubuntu-latest + steps: + - task: Docker@2 + inputs: + command: 'build' + Dockerfile: 'Dockerfile' + buildContext: '.' + addPipelineData: false + +- stage: publish_docker_image + displayName: "Publish Docker image" + dependsOn: build_docker_image + condition: and(succeeded(), eq(variables['Build.SourceBranchName'], 'master')) + jobs: + - job: publish + displayName: "Publish" + pool: + vmImage: ubuntu-latest + steps: + - task: Docker@2 + inputs: + containerRegistry: 'DockerHub' + repository: 'pelletier/go-toml' + command: 'buildAndPush' + Dockerfile: 'Dockerfile' + buildContext: '.' + tags: 'latest' diff --git a/vendor/github.com/pelletier/go-toml/benchmark.json b/vendor/github.com/pelletier/go-toml/benchmark.json new file mode 100644 index 00000000000..86f99c6a877 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/benchmark.json @@ -0,0 +1,164 @@ +{ + "array": { + "key1": [ + 1, + 2, + 3 + ], + "key2": [ + "red", + "yellow", + "green" + ], + "key3": [ + [ + 1, + 2 + ], + [ + 3, + 4, + 5 + ] + ], + "key4": [ + [ + 1, + 2 + ], + [ + "a", + "b", + "c" + ] + ], + "key5": [ + 1, + 2, + 3 + ], + "key6": [ + 1, + 2 + ] + }, + "boolean": { + "False": false, + "True": true + }, + "datetime": { + "key1": "1979-05-27T07:32:00Z", + "key2": "1979-05-27T00:32:00-07:00", + "key3": "1979-05-27T00:32:00.999999-07:00" + }, + "float": { + "both": { + "key": 6.626e-34 + }, + "exponent": { + "key1": 5e+22, + "key2": 1000000, + "key3": -0.02 + }, + "fractional": { + "key1": 1, + "key2": 3.1415, + "key3": -0.01 + }, + "underscores": { + "key1": 9224617.445991227, + "key2": 1e+100 + } + }, + "fruit": [{ + "name": "apple", + "physical": { + "color": "red", + "shape": "round" + }, + "variety": [{ + "name": "red delicious" + }, + { + "name": "granny smith" + } + ] + }, + { + "name": "banana", + "variety": [{ + "name": "plantain" + }] + } + ], + "integer": { + "key1": 99, + "key2": 42, + "key3": 0, + "key4": -17, + "underscores": { + "key1": 1000, + "key2": 5349221, + "key3": 12345 + } + }, + "products": [{ + "name": "Hammer", + "sku": 738594937 + }, + {}, + { + "color": "gray", + "name": "Nail", + "sku": 284758393 + } + ], + "string": { + "basic": { + "basic": "I'm a string. \"You can quote me\". Name\tJosé\nLocation\tSF." + }, + "literal": { + "multiline": { + "lines": "The first newline is\ntrimmed in raw strings.\n All other whitespace\n is preserved.\n", + "regex2": "I [dw]on't need \\d{2} apples" + }, + "quoted": "Tom \"Dubs\" Preston-Werner", + "regex": "\u003c\\i\\c*\\s*\u003e", + "winpath": "C:\\Users\\nodejs\\templates", + "winpath2": "\\\\ServerX\\admin$\\system32\\" + }, + "multiline": { + "continued": { + "key1": "The quick brown fox jumps over the lazy dog.", + "key2": "The quick brown fox jumps over the lazy dog.", + "key3": "The quick brown fox jumps over the lazy dog." + }, + "key1": "One\nTwo", + "key2": "One\nTwo", + "key3": "One\nTwo" + } + }, + "table": { + "inline": { + "name": { + "first": "Tom", + "last": "Preston-Werner" + }, + "point": { + "x": 1, + "y": 2 + } + }, + "key": "value", + "subtable": { + "key": "another value" + } + }, + "x": { + "y": { + "z": { + "w": {} + } + } + } +} diff --git a/vendor/github.com/pelletier/go-toml/benchmark.sh b/vendor/github.com/pelletier/go-toml/benchmark.sh new file mode 100644 index 00000000000..7914fff49c9 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/benchmark.sh @@ -0,0 +1,31 @@ +#!/bin/bash + +set -ex + +reference_ref=${1:-master} +reference_git=${2:-.} + +if ! `hash benchstat 2>/dev/null`; then + echo "Installing benchstat" + go get golang.org/x/perf/cmd/benchstat +fi + +tempdir=`mktemp -d /tmp/go-toml-benchmark-XXXXXX` +ref_tempdir="${tempdir}/ref" +ref_benchmark="${ref_tempdir}/benchmark-`echo -n ${reference_ref}|tr -s '/' '-'`.txt" +local_benchmark="`pwd`/benchmark-local.txt" + +echo "=== ${reference_ref} (${ref_tempdir})" +git clone ${reference_git} ${ref_tempdir} >/dev/null 2>/dev/null +pushd ${ref_tempdir} >/dev/null +git checkout ${reference_ref} >/dev/null 2>/dev/null +go test -bench=. -benchmem | tee ${ref_benchmark} +popd >/dev/null + +echo "" +echo "=== local" +go test -bench=. -benchmem | tee ${local_benchmark} + +echo "" +echo "=== diff" +benchstat -delta-test=none ${ref_benchmark} ${local_benchmark} diff --git a/vendor/github.com/pelletier/go-toml/benchmark.toml b/vendor/github.com/pelletier/go-toml/benchmark.toml new file mode 100644 index 00000000000..dfd77e09622 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/benchmark.toml @@ -0,0 +1,244 @@ +################################################################################ +## Comment + +# Speak your mind with the hash symbol. They go from the symbol to the end of +# the line. + + +################################################################################ +## Table + +# Tables (also known as hash tables or dictionaries) are collections of +# key/value pairs. They appear in square brackets on a line by themselves. + +[table] + +key = "value" # Yeah, you can do this. + +# Nested tables are denoted by table names with dots in them. Name your tables +# whatever crap you please, just don't use #, ., [ or ]. + +[table.subtable] + +key = "another value" + +# You don't need to specify all the super-tables if you don't want to. TOML +# knows how to do it for you. + +# [x] you +# [x.y] don't +# [x.y.z] need these +[x.y.z.w] # for this to work + + +################################################################################ +## Inline Table + +# Inline tables provide a more compact syntax for expressing tables. They are +# especially useful for grouped data that can otherwise quickly become verbose. +# Inline tables are enclosed in curly braces `{` and `}`. No newlines are +# allowed between the curly braces unless they are valid within a value. + +[table.inline] + +name = { first = "Tom", last = "Preston-Werner" } +point = { x = 1, y = 2 } + + +################################################################################ +## String + +# There are four ways to express strings: basic, multi-line basic, literal, and +# multi-line literal. All strings must contain only valid UTF-8 characters. + +[string.basic] + +basic = "I'm a string. \"You can quote me\". Name\tJos\u00E9\nLocation\tSF." + +[string.multiline] + +# The following strings are byte-for-byte equivalent: +key1 = "One\nTwo" +key2 = """One\nTwo""" +key3 = """ +One +Two""" + +[string.multiline.continued] + +# The following strings are byte-for-byte equivalent: +key1 = "The quick brown fox jumps over the lazy dog." + +key2 = """ +The quick brown \ + + + fox jumps over \ + the lazy dog.""" + +key3 = """\ + The quick brown \ + fox jumps over \ + the lazy dog.\ + """ + +[string.literal] + +# What you see is what you get. +winpath = 'C:\Users\nodejs\templates' +winpath2 = '\\ServerX\admin$\system32\' +quoted = 'Tom "Dubs" Preston-Werner' +regex = '<\i\c*\s*>' + + +[string.literal.multiline] + +regex2 = '''I [dw]on't need \d{2} apples''' +lines = ''' +The first newline is +trimmed in raw strings. + All other whitespace + is preserved. +''' + + +################################################################################ +## Integer + +# Integers are whole numbers. Positive numbers may be prefixed with a plus sign. +# Negative numbers are prefixed with a minus sign. + +[integer] + +key1 = +99 +key2 = 42 +key3 = 0 +key4 = -17 + +[integer.underscores] + +# For large numbers, you may use underscores to enhance readability. Each +# underscore must be surrounded by at least one digit. +key1 = 1_000 +key2 = 5_349_221 +key3 = 1_2_3_4_5 # valid but inadvisable + + +################################################################################ +## Float + +# A float consists of an integer part (which may be prefixed with a plus or +# minus sign) followed by a fractional part and/or an exponent part. + +[float.fractional] + +key1 = +1.0 +key2 = 3.1415 +key3 = -0.01 + +[float.exponent] + +key1 = 5e+22 +key2 = 1e6 +key3 = -2E-2 + +[float.both] + +key = 6.626e-34 + +[float.underscores] + +key1 = 9_224_617.445_991_228_313 +key2 = 1e1_00 + + +################################################################################ +## Boolean + +# Booleans are just the tokens you're used to. Always lowercase. + +[boolean] + +True = true +False = false + + +################################################################################ +## Datetime + +# Datetimes are RFC 3339 dates. + +[datetime] + +key1 = 1979-05-27T07:32:00Z +key2 = 1979-05-27T00:32:00-07:00 +key3 = 1979-05-27T00:32:00.999999-07:00 + + +################################################################################ +## Array + +# Arrays are square brackets with other primitives inside. Whitespace is +# ignored. Elements are separated by commas. Data types may not be mixed. + +[array] + +key1 = [ 1, 2, 3 ] +key2 = [ "red", "yellow", "green" ] +key3 = [ [ 1, 2 ], [3, 4, 5] ] +#key4 = [ [ 1, 2 ], ["a", "b", "c"] ] # this is ok + +# Arrays can also be multiline. So in addition to ignoring whitespace, arrays +# also ignore newlines between the brackets. Terminating commas are ok before +# the closing bracket. + +key5 = [ + 1, 2, 3 +] +key6 = [ + 1, + 2, # this is ok +] + + +################################################################################ +## Array of Tables + +# These can be expressed by using a table name in double brackets. Each table +# with the same double bracketed name will be an element in the array. The +# tables are inserted in the order encountered. + +[[products]] + +name = "Hammer" +sku = 738594937 + +[[products]] + +[[products]] + +name = "Nail" +sku = 284758393 +color = "gray" + + +# You can create nested arrays of tables as well. + +[[fruit]] + name = "apple" + + [fruit.physical] + color = "red" + shape = "round" + + [[fruit.variety]] + name = "red delicious" + + [[fruit.variety]] + name = "granny smith" + +[[fruit]] + name = "banana" + + [[fruit.variety]] + name = "plantain" diff --git a/vendor/github.com/pelletier/go-toml/benchmark.yml b/vendor/github.com/pelletier/go-toml/benchmark.yml new file mode 100644 index 00000000000..0bd19f08a69 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/benchmark.yml @@ -0,0 +1,121 @@ +--- +array: + key1: + - 1 + - 2 + - 3 + key2: + - red + - yellow + - green + key3: + - - 1 + - 2 + - - 3 + - 4 + - 5 + key4: + - - 1 + - 2 + - - a + - b + - c + key5: + - 1 + - 2 + - 3 + key6: + - 1 + - 2 +boolean: + 'False': false + 'True': true +datetime: + key1: '1979-05-27T07:32:00Z' + key2: '1979-05-27T00:32:00-07:00' + key3: '1979-05-27T00:32:00.999999-07:00' +float: + both: + key: 6.626e-34 + exponent: + key1: 5.0e+22 + key2: 1000000 + key3: -0.02 + fractional: + key1: 1 + key2: 3.1415 + key3: -0.01 + underscores: + key1: 9224617.445991227 + key2: 1.0e+100 +fruit: +- name: apple + physical: + color: red + shape: round + variety: + - name: red delicious + - name: granny smith +- name: banana + variety: + - name: plantain +integer: + key1: 99 + key2: 42 + key3: 0 + key4: -17 + underscores: + key1: 1000 + key2: 5349221 + key3: 12345 +products: +- name: Hammer + sku: 738594937 +- {} +- color: gray + name: Nail + sku: 284758393 +string: + basic: + basic: "I'm a string. \"You can quote me\". Name\tJosé\nLocation\tSF." + literal: + multiline: + lines: | + The first newline is + trimmed in raw strings. + All other whitespace + is preserved. + regex2: I [dw]on't need \d{2} apples + quoted: Tom "Dubs" Preston-Werner + regex: "<\\i\\c*\\s*>" + winpath: C:\Users\nodejs\templates + winpath2: "\\\\ServerX\\admin$\\system32\\" + multiline: + continued: + key1: The quick brown fox jumps over the lazy dog. + key2: The quick brown fox jumps over the lazy dog. + key3: The quick brown fox jumps over the lazy dog. + key1: |- + One + Two + key2: |- + One + Two + key3: |- + One + Two +table: + inline: + name: + first: Tom + last: Preston-Werner + point: + x: 1 + y: 2 + key: value + subtable: + key: another value +x: + y: + z: + w: {} diff --git a/vendor/github.com/pelletier/go-toml/doc.go b/vendor/github.com/pelletier/go-toml/doc.go new file mode 100644 index 00000000000..a1406a32b38 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/doc.go @@ -0,0 +1,23 @@ +// Package toml is a TOML parser and manipulation library. +// +// This version supports the specification as described in +// https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.5.0.md +// +// Marshaling +// +// Go-toml can marshal and unmarshal TOML documents from and to data +// structures. +// +// TOML document as a tree +// +// Go-toml can operate on a TOML document as a tree. Use one of the Load* +// functions to parse TOML data and obtain a Tree instance, then one of its +// methods to manipulate the tree. +// +// JSONPath-like queries +// +// The package github.com/pelletier/go-toml/query implements a system +// similar to JSONPath to quickly retrieve elements of a TOML document using a +// single expression. See the package documentation for more information. +// +package toml diff --git a/vendor/github.com/pelletier/go-toml/example-crlf.toml b/vendor/github.com/pelletier/go-toml/example-crlf.toml new file mode 100644 index 00000000000..780d9c68f2d --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/example-crlf.toml @@ -0,0 +1,30 @@ +# This is a TOML document. Boom. + +title = "TOML Example" + +[owner] +name = "Tom Preston-Werner" +organization = "GitHub" +bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." +dob = 1979-05-27T07:32:00Z # First class dates? Why not? + +[database] +server = "192.168.1.1" +ports = [ 8001, 8001, 8002 ] +connection_max = 5000 +enabled = true + +[servers] + + # You can indent as you please. Tabs or spaces. TOML don't care. + [servers.alpha] + ip = "10.0.0.1" + dc = "eqdc10" + + [servers.beta] + ip = "10.0.0.2" + dc = "eqdc10" + +[clients] +data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it +score = 4e-08 # to make sure leading zeroes in exponent parts of floats are supported \ No newline at end of file diff --git a/vendor/github.com/pelletier/go-toml/example.toml b/vendor/github.com/pelletier/go-toml/example.toml new file mode 100644 index 00000000000..f45bf88b8f6 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/example.toml @@ -0,0 +1,30 @@ +# This is a TOML document. Boom. + +title = "TOML Example" + +[owner] +name = "Tom Preston-Werner" +organization = "GitHub" +bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." +dob = 1979-05-27T07:32:00Z # First class dates? Why not? + +[database] +server = "192.168.1.1" +ports = [ 8001, 8001, 8002 ] +connection_max = 5000 +enabled = true + +[servers] + + # You can indent as you please. Tabs or spaces. TOML don't care. + [servers.alpha] + ip = "10.0.0.1" + dc = "eqdc10" + + [servers.beta] + ip = "10.0.0.2" + dc = "eqdc10" + +[clients] +data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it +score = 4e-08 # to make sure leading zeroes in exponent parts of floats are supported \ No newline at end of file diff --git a/vendor/github.com/pelletier/go-toml/fuzz.go b/vendor/github.com/pelletier/go-toml/fuzz.go new file mode 100644 index 00000000000..14570c8d357 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/fuzz.go @@ -0,0 +1,31 @@ +// +build gofuzz + +package toml + +func Fuzz(data []byte) int { + tree, err := LoadBytes(data) + if err != nil { + if tree != nil { + panic("tree must be nil if there is an error") + } + return 0 + } + + str, err := tree.ToTomlString() + if err != nil { + if str != "" { + panic(`str must be "" if there is an error`) + } + panic(err) + } + + tree, err = Load(str) + if err != nil { + if tree != nil { + panic("tree must be nil if there is an error") + } + return 0 + } + + return 1 +} diff --git a/vendor/github.com/pelletier/go-toml/fuzz.sh b/vendor/github.com/pelletier/go-toml/fuzz.sh new file mode 100644 index 00000000000..3204b4c4463 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/fuzz.sh @@ -0,0 +1,15 @@ +#! /bin/sh +set -eu + +go get github.com/dvyukov/go-fuzz/go-fuzz +go get github.com/dvyukov/go-fuzz/go-fuzz-build + +if [ ! -e toml-fuzz.zip ]; then + go-fuzz-build github.com/pelletier/go-toml +fi + +rm -fr fuzz +mkdir -p fuzz/corpus +cp *.toml fuzz/corpus + +go-fuzz -bin=toml-fuzz.zip -workdir=fuzz diff --git a/vendor/github.com/pelletier/go-toml/fuzzit.sh b/vendor/github.com/pelletier/go-toml/fuzzit.sh new file mode 100644 index 00000000000..b575a6081f0 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/fuzzit.sh @@ -0,0 +1,26 @@ +#!/bin/bash +set -xe + +# go-fuzz doesn't support modules yet, so ensure we do everything +# in the old style GOPATH way +export GO111MODULE="off" + +# install go-fuzz +go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-build + +# target name can only contain lower-case letters (a-z), digits (0-9) and a dash (-) +# to add another target, make sure to create it with `fuzzit create target` +# before using `fuzzit create job` +TARGET=toml-fuzzer + +go-fuzz-build -libfuzzer -o ${TARGET}.a github.com/pelletier/go-toml +clang -fsanitize=fuzzer ${TARGET}.a -o ${TARGET} + +# install fuzzit for talking to fuzzit.dev service +# or latest version: +# https://github.com/fuzzitdev/fuzzit/releases/latest/download/fuzzit_Linux_x86_64 +wget -q -O fuzzit https://github.com/fuzzitdev/fuzzit/releases/download/v2.4.52/fuzzit_Linux_x86_64 +chmod a+x fuzzit + +# TODO: change kkowalczyk to go-toml and create toml-fuzzer target there +./fuzzit create job --type $TYPE go-toml/${TARGET} ${TARGET} diff --git a/vendor/github.com/pelletier/go-toml/go.mod b/vendor/github.com/pelletier/go-toml/go.mod new file mode 100644 index 00000000000..c7faa6b3e11 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/go.mod @@ -0,0 +1,9 @@ +module github.com/pelletier/go-toml + +go 1.12 + +require ( + github.com/BurntSushi/toml v0.3.1 + github.com/davecgh/go-spew v1.1.1 + gopkg.in/yaml.v2 v2.3.0 +) diff --git a/vendor/github.com/pelletier/go-toml/go.sum b/vendor/github.com/pelletier/go-toml/go.sum new file mode 100644 index 00000000000..6f356470d7c --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/go.sum @@ -0,0 +1,19 @@ +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3 h1:fvjTMHxHEw/mxHbtzPi3JCcKXQRAnQTBRo6YCJSVHKI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5 h1:ymVxjfMaHvXD8RqPRmzHHsB3VvucivSkIAvJFDI5O3c= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo= +gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/pelletier/go-toml/keysparsing.go b/vendor/github.com/pelletier/go-toml/keysparsing.go new file mode 100644 index 00000000000..e091500b246 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/keysparsing.go @@ -0,0 +1,112 @@ +// Parsing keys handling both bare and quoted keys. + +package toml + +import ( + "errors" + "fmt" +) + +// Convert the bare key group string to an array. +// The input supports double quotation and single quotation, +// but escape sequences are not supported. Lexers must unescape them beforehand. +func parseKey(key string) ([]string, error) { + runes := []rune(key) + var groups []string + + if len(key) == 0 { + return nil, errors.New("empty key") + } + + idx := 0 + for idx < len(runes) { + for ; idx < len(runes) && isSpace(runes[idx]); idx++ { + // skip leading whitespace + } + if idx >= len(runes) { + break + } + r := runes[idx] + if isValidBareChar(r) { + // parse bare key + startIdx := idx + endIdx := -1 + idx++ + for idx < len(runes) { + r = runes[idx] + if isValidBareChar(r) { + idx++ + } else if r == '.' { + endIdx = idx + break + } else if isSpace(r) { + endIdx = idx + for ; idx < len(runes) && isSpace(runes[idx]); idx++ { + // skip trailing whitespace + } + if idx < len(runes) && runes[idx] != '.' { + return nil, fmt.Errorf("invalid key character after whitespace: %c", runes[idx]) + } + break + } else { + return nil, fmt.Errorf("invalid bare key character: %c", r) + } + } + if endIdx == -1 { + endIdx = idx + } + groups = append(groups, string(runes[startIdx:endIdx])) + } else if r == '\'' { + // parse single quoted key + idx++ + startIdx := idx + for { + if idx >= len(runes) { + return nil, fmt.Errorf("unclosed single-quoted key") + } + r = runes[idx] + if r == '\'' { + groups = append(groups, string(runes[startIdx:idx])) + idx++ + break + } + idx++ + } + } else if r == '"' { + // parse double quoted key + idx++ + startIdx := idx + for { + if idx >= len(runes) { + return nil, fmt.Errorf("unclosed double-quoted key") + } + r = runes[idx] + if r == '"' { + groups = append(groups, string(runes[startIdx:idx])) + idx++ + break + } + idx++ + } + } else if r == '.' { + idx++ + if idx >= len(runes) { + return nil, fmt.Errorf("unexpected end of key") + } + r = runes[idx] + if !isValidBareChar(r) && r != '\'' && r != '"' && r != ' ' { + return nil, fmt.Errorf("expecting key part after dot") + } + } else { + return nil, fmt.Errorf("invalid key character: %c", r) + } + } + if len(groups) == 0 { + return nil, fmt.Errorf("empty key") + } + return groups, nil +} + +func isValidBareChar(r rune) bool { + return isAlphanumeric(r) || r == '-' || isDigit(r) +} diff --git a/vendor/github.com/pelletier/go-toml/lexer.go b/vendor/github.com/pelletier/go-toml/lexer.go new file mode 100644 index 00000000000..425e847a7aa --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/lexer.go @@ -0,0 +1,801 @@ +// TOML lexer. +// +// Written using the principles developed by Rob Pike in +// http://www.youtube.com/watch?v=HxaD_trXwRE + +package toml + +import ( + "bytes" + "errors" + "fmt" + "regexp" + "strconv" + "strings" +) + +var dateRegexp *regexp.Regexp + +// Define state functions +type tomlLexStateFn func() tomlLexStateFn + +// Define lexer +type tomlLexer struct { + inputIdx int + input []rune // Textual source + currentTokenStart int + currentTokenStop int + tokens []token + brackets []rune + line int + col int + endbufferLine int + endbufferCol int +} + +// Basic read operations on input + +func (l *tomlLexer) read() rune { + r := l.peek() + if r == '\n' { + l.endbufferLine++ + l.endbufferCol = 1 + } else { + l.endbufferCol++ + } + l.inputIdx++ + return r +} + +func (l *tomlLexer) next() rune { + r := l.read() + + if r != eof { + l.currentTokenStop++ + } + return r +} + +func (l *tomlLexer) ignore() { + l.currentTokenStart = l.currentTokenStop + l.line = l.endbufferLine + l.col = l.endbufferCol +} + +func (l *tomlLexer) skip() { + l.next() + l.ignore() +} + +func (l *tomlLexer) fastForward(n int) { + for i := 0; i < n; i++ { + l.next() + } +} + +func (l *tomlLexer) emitWithValue(t tokenType, value string) { + l.tokens = append(l.tokens, token{ + Position: Position{l.line, l.col}, + typ: t, + val: value, + }) + l.ignore() +} + +func (l *tomlLexer) emit(t tokenType) { + l.emitWithValue(t, string(l.input[l.currentTokenStart:l.currentTokenStop])) +} + +func (l *tomlLexer) peek() rune { + if l.inputIdx >= len(l.input) { + return eof + } + return l.input[l.inputIdx] +} + +func (l *tomlLexer) peekString(size int) string { + maxIdx := len(l.input) + upperIdx := l.inputIdx + size // FIXME: potential overflow + if upperIdx > maxIdx { + upperIdx = maxIdx + } + return string(l.input[l.inputIdx:upperIdx]) +} + +func (l *tomlLexer) follow(next string) bool { + return next == l.peekString(len(next)) +} + +// Error management + +func (l *tomlLexer) errorf(format string, args ...interface{}) tomlLexStateFn { + l.tokens = append(l.tokens, token{ + Position: Position{l.line, l.col}, + typ: tokenError, + val: fmt.Sprintf(format, args...), + }) + return nil +} + +// State functions + +func (l *tomlLexer) lexVoid() tomlLexStateFn { + for { + next := l.peek() + switch next { + case '}': // after '{' + return l.lexRightCurlyBrace + case '[': + return l.lexTableKey + case '#': + return l.lexComment(l.lexVoid) + case '=': + return l.lexEqual + case '\r': + fallthrough + case '\n': + l.skip() + continue + } + + if isSpace(next) { + l.skip() + } + + if isKeyStartChar(next) { + return l.lexKey + } + + if next == eof { + l.next() + break + } + } + + l.emit(tokenEOF) + return nil +} + +func (l *tomlLexer) lexRvalue() tomlLexStateFn { + for { + next := l.peek() + switch next { + case '.': + return l.errorf("cannot start float with a dot") + case '=': + return l.lexEqual + case '[': + return l.lexLeftBracket + case ']': + return l.lexRightBracket + case '{': + return l.lexLeftCurlyBrace + case '}': + return l.lexRightCurlyBrace + case '#': + return l.lexComment(l.lexRvalue) + case '"': + return l.lexString + case '\'': + return l.lexLiteralString + case ',': + return l.lexComma + case '\r': + fallthrough + case '\n': + l.skip() + if len(l.brackets) > 0 && l.brackets[len(l.brackets)-1] == '[' { + return l.lexRvalue + } + return l.lexVoid + } + + if l.follow("true") { + return l.lexTrue + } + + if l.follow("false") { + return l.lexFalse + } + + if l.follow("inf") { + return l.lexInf + } + + if l.follow("nan") { + return l.lexNan + } + + if isSpace(next) { + l.skip() + continue + } + + if next == eof { + l.next() + break + } + + possibleDate := l.peekString(35) + dateSubmatches := dateRegexp.FindStringSubmatch(possibleDate) + if dateSubmatches != nil && dateSubmatches[0] != "" { + l.fastForward(len(dateSubmatches[0])) + if dateSubmatches[2] == "" { // no timezone information => local date + return l.lexLocalDate + } + return l.lexDate + } + + if next == '+' || next == '-' || isDigit(next) { + return l.lexNumber + } + + return l.errorf("no value can start with %c", next) + } + + l.emit(tokenEOF) + return nil +} + +func (l *tomlLexer) lexLeftCurlyBrace() tomlLexStateFn { + l.next() + l.emit(tokenLeftCurlyBrace) + l.brackets = append(l.brackets, '{') + return l.lexVoid +} + +func (l *tomlLexer) lexRightCurlyBrace() tomlLexStateFn { + l.next() + l.emit(tokenRightCurlyBrace) + if len(l.brackets) == 0 || l.brackets[len(l.brackets)-1] != '{' { + return l.errorf("cannot have '}' here") + } + l.brackets = l.brackets[:len(l.brackets)-1] + return l.lexRvalue +} + +func (l *tomlLexer) lexDate() tomlLexStateFn { + l.emit(tokenDate) + return l.lexRvalue +} + +func (l *tomlLexer) lexLocalDate() tomlLexStateFn { + l.emit(tokenLocalDate) + return l.lexRvalue +} + +func (l *tomlLexer) lexTrue() tomlLexStateFn { + l.fastForward(4) + l.emit(tokenTrue) + return l.lexRvalue +} + +func (l *tomlLexer) lexFalse() tomlLexStateFn { + l.fastForward(5) + l.emit(tokenFalse) + return l.lexRvalue +} + +func (l *tomlLexer) lexInf() tomlLexStateFn { + l.fastForward(3) + l.emit(tokenInf) + return l.lexRvalue +} + +func (l *tomlLexer) lexNan() tomlLexStateFn { + l.fastForward(3) + l.emit(tokenNan) + return l.lexRvalue +} + +func (l *tomlLexer) lexEqual() tomlLexStateFn { + l.next() + l.emit(tokenEqual) + return l.lexRvalue +} + +func (l *tomlLexer) lexComma() tomlLexStateFn { + l.next() + l.emit(tokenComma) + if len(l.brackets) > 0 && l.brackets[len(l.brackets)-1] == '{' { + return l.lexVoid + } + return l.lexRvalue +} + +// Parse the key and emits its value without escape sequences. +// bare keys, basic string keys and literal string keys are supported. +func (l *tomlLexer) lexKey() tomlLexStateFn { + growingString := "" + + for r := l.peek(); isKeyChar(r) || r == '\n' || r == '\r'; r = l.peek() { + if r == '"' { + l.next() + str, err := l.lexStringAsString(`"`, false, true) + if err != nil { + return l.errorf(err.Error()) + } + growingString += "\"" + str + "\"" + l.next() + continue + } else if r == '\'' { + l.next() + str, err := l.lexLiteralStringAsString(`'`, false) + if err != nil { + return l.errorf(err.Error()) + } + growingString += "'" + str + "'" + l.next() + continue + } else if r == '\n' { + return l.errorf("keys cannot contain new lines") + } else if isSpace(r) { + str := " " + // skip trailing whitespace + l.next() + for r = l.peek(); isSpace(r); r = l.peek() { + str += string(r) + l.next() + } + // break loop if not a dot + if r != '.' { + break + } + str += "." + // skip trailing whitespace after dot + l.next() + for r = l.peek(); isSpace(r); r = l.peek() { + str += string(r) + l.next() + } + growingString += str + continue + } else if r == '.' { + // skip + } else if !isValidBareChar(r) { + return l.errorf("keys cannot contain %c character", r) + } + growingString += string(r) + l.next() + } + l.emitWithValue(tokenKey, growingString) + return l.lexVoid +} + +func (l *tomlLexer) lexComment(previousState tomlLexStateFn) tomlLexStateFn { + return func() tomlLexStateFn { + for next := l.peek(); next != '\n' && next != eof; next = l.peek() { + if next == '\r' && l.follow("\r\n") { + break + } + l.next() + } + l.ignore() + return previousState + } +} + +func (l *tomlLexer) lexLeftBracket() tomlLexStateFn { + l.next() + l.emit(tokenLeftBracket) + l.brackets = append(l.brackets, '[') + return l.lexRvalue +} + +func (l *tomlLexer) lexLiteralStringAsString(terminator string, discardLeadingNewLine bool) (string, error) { + growingString := "" + + if discardLeadingNewLine { + if l.follow("\r\n") { + l.skip() + l.skip() + } else if l.peek() == '\n' { + l.skip() + } + } + + // find end of string + for { + if l.follow(terminator) { + return growingString, nil + } + + next := l.peek() + if next == eof { + break + } + growingString += string(l.next()) + } + + return "", errors.New("unclosed string") +} + +func (l *tomlLexer) lexLiteralString() tomlLexStateFn { + l.skip() + + // handle special case for triple-quote + terminator := "'" + discardLeadingNewLine := false + if l.follow("''") { + l.skip() + l.skip() + terminator = "'''" + discardLeadingNewLine = true + } + + str, err := l.lexLiteralStringAsString(terminator, discardLeadingNewLine) + if err != nil { + return l.errorf(err.Error()) + } + + l.emitWithValue(tokenString, str) + l.fastForward(len(terminator)) + l.ignore() + return l.lexRvalue +} + +// Lex a string and return the results as a string. +// Terminator is the substring indicating the end of the token. +// The resulting string does not include the terminator. +func (l *tomlLexer) lexStringAsString(terminator string, discardLeadingNewLine, acceptNewLines bool) (string, error) { + growingString := "" + + if discardLeadingNewLine { + if l.follow("\r\n") { + l.skip() + l.skip() + } else if l.peek() == '\n' { + l.skip() + } + } + + for { + if l.follow(terminator) { + return growingString, nil + } + + if l.follow("\\") { + l.next() + switch l.peek() { + case '\r': + fallthrough + case '\n': + fallthrough + case '\t': + fallthrough + case ' ': + // skip all whitespace chars following backslash + for strings.ContainsRune("\r\n\t ", l.peek()) { + l.next() + } + case '"': + growingString += "\"" + l.next() + case 'n': + growingString += "\n" + l.next() + case 'b': + growingString += "\b" + l.next() + case 'f': + growingString += "\f" + l.next() + case '/': + growingString += "/" + l.next() + case 't': + growingString += "\t" + l.next() + case 'r': + growingString += "\r" + l.next() + case '\\': + growingString += "\\" + l.next() + case 'u': + l.next() + code := "" + for i := 0; i < 4; i++ { + c := l.peek() + if !isHexDigit(c) { + return "", errors.New("unfinished unicode escape") + } + l.next() + code = code + string(c) + } + intcode, err := strconv.ParseInt(code, 16, 32) + if err != nil { + return "", errors.New("invalid unicode escape: \\u" + code) + } + growingString += string(rune(intcode)) + case 'U': + l.next() + code := "" + for i := 0; i < 8; i++ { + c := l.peek() + if !isHexDigit(c) { + return "", errors.New("unfinished unicode escape") + } + l.next() + code = code + string(c) + } + intcode, err := strconv.ParseInt(code, 16, 64) + if err != nil { + return "", errors.New("invalid unicode escape: \\U" + code) + } + growingString += string(rune(intcode)) + default: + return "", errors.New("invalid escape sequence: \\" + string(l.peek())) + } + } else { + r := l.peek() + + if 0x00 <= r && r <= 0x1F && r != '\t' && !(acceptNewLines && (r == '\n' || r == '\r')) { + return "", fmt.Errorf("unescaped control character %U", r) + } + l.next() + growingString += string(r) + } + + if l.peek() == eof { + break + } + } + + return "", errors.New("unclosed string") +} + +func (l *tomlLexer) lexString() tomlLexStateFn { + l.skip() + + // handle special case for triple-quote + terminator := `"` + discardLeadingNewLine := false + acceptNewLines := false + if l.follow(`""`) { + l.skip() + l.skip() + terminator = `"""` + discardLeadingNewLine = true + acceptNewLines = true + } + + str, err := l.lexStringAsString(terminator, discardLeadingNewLine, acceptNewLines) + if err != nil { + return l.errorf(err.Error()) + } + + l.emitWithValue(tokenString, str) + l.fastForward(len(terminator)) + l.ignore() + return l.lexRvalue +} + +func (l *tomlLexer) lexTableKey() tomlLexStateFn { + l.next() + + if l.peek() == '[' { + // token '[[' signifies an array of tables + l.next() + l.emit(tokenDoubleLeftBracket) + return l.lexInsideTableArrayKey + } + // vanilla table key + l.emit(tokenLeftBracket) + return l.lexInsideTableKey +} + +// Parse the key till "]]", but only bare keys are supported +func (l *tomlLexer) lexInsideTableArrayKey() tomlLexStateFn { + for r := l.peek(); r != eof; r = l.peek() { + switch r { + case ']': + if l.currentTokenStop > l.currentTokenStart { + l.emit(tokenKeyGroupArray) + } + l.next() + if l.peek() != ']' { + break + } + l.next() + l.emit(tokenDoubleRightBracket) + return l.lexVoid + case '[': + return l.errorf("table array key cannot contain ']'") + default: + l.next() + } + } + return l.errorf("unclosed table array key") +} + +// Parse the key till "]" but only bare keys are supported +func (l *tomlLexer) lexInsideTableKey() tomlLexStateFn { + for r := l.peek(); r != eof; r = l.peek() { + switch r { + case ']': + if l.currentTokenStop > l.currentTokenStart { + l.emit(tokenKeyGroup) + } + l.next() + l.emit(tokenRightBracket) + return l.lexVoid + case '[': + return l.errorf("table key cannot contain ']'") + default: + l.next() + } + } + return l.errorf("unclosed table key") +} + +func (l *tomlLexer) lexRightBracket() tomlLexStateFn { + l.next() + l.emit(tokenRightBracket) + if len(l.brackets) == 0 || l.brackets[len(l.brackets)-1] != '[' { + return l.errorf("cannot have ']' here") + } + l.brackets = l.brackets[:len(l.brackets)-1] + return l.lexRvalue +} + +type validRuneFn func(r rune) bool + +func isValidHexRune(r rune) bool { + return r >= 'a' && r <= 'f' || + r >= 'A' && r <= 'F' || + r >= '0' && r <= '9' || + r == '_' +} + +func isValidOctalRune(r rune) bool { + return r >= '0' && r <= '7' || r == '_' +} + +func isValidBinaryRune(r rune) bool { + return r == '0' || r == '1' || r == '_' +} + +func (l *tomlLexer) lexNumber() tomlLexStateFn { + r := l.peek() + + if r == '0' { + follow := l.peekString(2) + if len(follow) == 2 { + var isValidRune validRuneFn + switch follow[1] { + case 'x': + isValidRune = isValidHexRune + case 'o': + isValidRune = isValidOctalRune + case 'b': + isValidRune = isValidBinaryRune + default: + if follow[1] >= 'a' && follow[1] <= 'z' || follow[1] >= 'A' && follow[1] <= 'Z' { + return l.errorf("unknown number base: %s. possible options are x (hex) o (octal) b (binary)", string(follow[1])) + } + } + + if isValidRune != nil { + l.next() + l.next() + digitSeen := false + for { + next := l.peek() + if !isValidRune(next) { + break + } + digitSeen = true + l.next() + } + + if !digitSeen { + return l.errorf("number needs at least one digit") + } + + l.emit(tokenInteger) + + return l.lexRvalue + } + } + } + + if r == '+' || r == '-' { + l.next() + if l.follow("inf") { + return l.lexInf + } + if l.follow("nan") { + return l.lexNan + } + } + + pointSeen := false + expSeen := false + digitSeen := false + for { + next := l.peek() + if next == '.' { + if pointSeen { + return l.errorf("cannot have two dots in one float") + } + l.next() + if !isDigit(l.peek()) { + return l.errorf("float cannot end with a dot") + } + pointSeen = true + } else if next == 'e' || next == 'E' { + expSeen = true + l.next() + r := l.peek() + if r == '+' || r == '-' { + l.next() + } + } else if isDigit(next) { + digitSeen = true + l.next() + } else if next == '_' { + l.next() + } else { + break + } + if pointSeen && !digitSeen { + return l.errorf("cannot start float with a dot") + } + } + + if !digitSeen { + return l.errorf("no digit in that number") + } + if pointSeen || expSeen { + l.emit(tokenFloat) + } else { + l.emit(tokenInteger) + } + return l.lexRvalue +} + +func (l *tomlLexer) run() { + for state := l.lexVoid; state != nil; { + state = state() + } +} + +func init() { + // Regexp for all date/time formats supported by TOML. + // Group 1: nano precision + // Group 2: timezone + // + // /!\ also matches the empty string + // + // Example matches: + //1979-05-27T07:32:00Z + //1979-05-27T00:32:00-07:00 + //1979-05-27T00:32:00.999999-07:00 + //1979-05-27 07:32:00Z + //1979-05-27 00:32:00-07:00 + //1979-05-27 00:32:00.999999-07:00 + //1979-05-27T07:32:00 + //1979-05-27T00:32:00.999999 + //1979-05-27 07:32:00 + //1979-05-27 00:32:00.999999 + //1979-05-27 + //07:32:00 + //00:32:00.999999 + dateRegexp = regexp.MustCompile(`^(?:\d{1,4}-\d{2}-\d{2})?(?:[T ]?\d{2}:\d{2}:\d{2}(\.\d{1,9})?(Z|[+-]\d{2}:\d{2})?)?`) +} + +// Entry point +func lexToml(inputBytes []byte) []token { + runes := bytes.Runes(inputBytes) + l := &tomlLexer{ + input: runes, + tokens: make([]token, 0, 256), + line: 1, + col: 1, + endbufferLine: 1, + endbufferCol: 1, + } + l.run() + return l.tokens +} diff --git a/vendor/github.com/pelletier/go-toml/localtime.go b/vendor/github.com/pelletier/go-toml/localtime.go new file mode 100644 index 00000000000..a2149e9663a --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/localtime.go @@ -0,0 +1,281 @@ +// Implementation of TOML's local date/time. +// Copied over from https://github.com/googleapis/google-cloud-go/blob/master/civil/civil.go +// to avoid pulling all the Google dependencies. +// +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package civil implements types for civil time, a time-zone-independent +// representation of time that follows the rules of the proleptic +// Gregorian calendar with exactly 24-hour days, 60-minute hours, and 60-second +// minutes. +// +// Because they lack location information, these types do not represent unique +// moments or intervals of time. Use time.Time for that purpose. +package toml + +import ( + "fmt" + "time" +) + +// A LocalDate represents a date (year, month, day). +// +// This type does not include location information, and therefore does not +// describe a unique 24-hour timespan. +type LocalDate struct { + Year int // Year (e.g., 2014). + Month time.Month // Month of the year (January = 1, ...). + Day int // Day of the month, starting at 1. +} + +// LocalDateOf returns the LocalDate in which a time occurs in that time's location. +func LocalDateOf(t time.Time) LocalDate { + var d LocalDate + d.Year, d.Month, d.Day = t.Date() + return d +} + +// ParseLocalDate parses a string in RFC3339 full-date format and returns the date value it represents. +func ParseLocalDate(s string) (LocalDate, error) { + t, err := time.Parse("2006-01-02", s) + if err != nil { + return LocalDate{}, err + } + return LocalDateOf(t), nil +} + +// String returns the date in RFC3339 full-date format. +func (d LocalDate) String() string { + return fmt.Sprintf("%04d-%02d-%02d", d.Year, d.Month, d.Day) +} + +// IsValid reports whether the date is valid. +func (d LocalDate) IsValid() bool { + return LocalDateOf(d.In(time.UTC)) == d +} + +// In returns the time corresponding to time 00:00:00 of the date in the location. +// +// In is always consistent with time.LocalDate, even when time.LocalDate returns a time +// on a different day. For example, if loc is America/Indiana/Vincennes, then both +// time.LocalDate(1955, time.May, 1, 0, 0, 0, 0, loc) +// and +// civil.LocalDate{Year: 1955, Month: time.May, Day: 1}.In(loc) +// return 23:00:00 on April 30, 1955. +// +// In panics if loc is nil. +func (d LocalDate) In(loc *time.Location) time.Time { + return time.Date(d.Year, d.Month, d.Day, 0, 0, 0, 0, loc) +} + +// AddDays returns the date that is n days in the future. +// n can also be negative to go into the past. +func (d LocalDate) AddDays(n int) LocalDate { + return LocalDateOf(d.In(time.UTC).AddDate(0, 0, n)) +} + +// DaysSince returns the signed number of days between the date and s, not including the end day. +// This is the inverse operation to AddDays. +func (d LocalDate) DaysSince(s LocalDate) (days int) { + // We convert to Unix time so we do not have to worry about leap seconds: + // Unix time increases by exactly 86400 seconds per day. + deltaUnix := d.In(time.UTC).Unix() - s.In(time.UTC).Unix() + return int(deltaUnix / 86400) +} + +// Before reports whether d1 occurs before d2. +func (d1 LocalDate) Before(d2 LocalDate) bool { + if d1.Year != d2.Year { + return d1.Year < d2.Year + } + if d1.Month != d2.Month { + return d1.Month < d2.Month + } + return d1.Day < d2.Day +} + +// After reports whether d1 occurs after d2. +func (d1 LocalDate) After(d2 LocalDate) bool { + return d2.Before(d1) +} + +// MarshalText implements the encoding.TextMarshaler interface. +// The output is the result of d.String(). +func (d LocalDate) MarshalText() ([]byte, error) { + return []byte(d.String()), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// The date is expected to be a string in a format accepted by ParseLocalDate. +func (d *LocalDate) UnmarshalText(data []byte) error { + var err error + *d, err = ParseLocalDate(string(data)) + return err +} + +// A LocalTime represents a time with nanosecond precision. +// +// This type does not include location information, and therefore does not +// describe a unique moment in time. +// +// This type exists to represent the TIME type in storage-based APIs like BigQuery. +// Most operations on Times are unlikely to be meaningful. Prefer the LocalDateTime type. +type LocalTime struct { + Hour int // The hour of the day in 24-hour format; range [0-23] + Minute int // The minute of the hour; range [0-59] + Second int // The second of the minute; range [0-59] + Nanosecond int // The nanosecond of the second; range [0-999999999] +} + +// LocalTimeOf returns the LocalTime representing the time of day in which a time occurs +// in that time's location. It ignores the date. +func LocalTimeOf(t time.Time) LocalTime { + var tm LocalTime + tm.Hour, tm.Minute, tm.Second = t.Clock() + tm.Nanosecond = t.Nanosecond() + return tm +} + +// ParseLocalTime parses a string and returns the time value it represents. +// ParseLocalTime accepts an extended form of the RFC3339 partial-time format. After +// the HH:MM:SS part of the string, an optional fractional part may appear, +// consisting of a decimal point followed by one to nine decimal digits. +// (RFC3339 admits only one digit after the decimal point). +func ParseLocalTime(s string) (LocalTime, error) { + t, err := time.Parse("15:04:05.999999999", s) + if err != nil { + return LocalTime{}, err + } + return LocalTimeOf(t), nil +} + +// String returns the date in the format described in ParseLocalTime. If Nanoseconds +// is zero, no fractional part will be generated. Otherwise, the result will +// end with a fractional part consisting of a decimal point and nine digits. +func (t LocalTime) String() string { + s := fmt.Sprintf("%02d:%02d:%02d", t.Hour, t.Minute, t.Second) + if t.Nanosecond == 0 { + return s + } + return s + fmt.Sprintf(".%09d", t.Nanosecond) +} + +// IsValid reports whether the time is valid. +func (t LocalTime) IsValid() bool { + // Construct a non-zero time. + tm := time.Date(2, 2, 2, t.Hour, t.Minute, t.Second, t.Nanosecond, time.UTC) + return LocalTimeOf(tm) == t +} + +// MarshalText implements the encoding.TextMarshaler interface. +// The output is the result of t.String(). +func (t LocalTime) MarshalText() ([]byte, error) { + return []byte(t.String()), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// The time is expected to be a string in a format accepted by ParseLocalTime. +func (t *LocalTime) UnmarshalText(data []byte) error { + var err error + *t, err = ParseLocalTime(string(data)) + return err +} + +// A LocalDateTime represents a date and time. +// +// This type does not include location information, and therefore does not +// describe a unique moment in time. +type LocalDateTime struct { + Date LocalDate + Time LocalTime +} + +// Note: We deliberately do not embed LocalDate into LocalDateTime, to avoid promoting AddDays and Sub. + +// LocalDateTimeOf returns the LocalDateTime in which a time occurs in that time's location. +func LocalDateTimeOf(t time.Time) LocalDateTime { + return LocalDateTime{ + Date: LocalDateOf(t), + Time: LocalTimeOf(t), + } +} + +// ParseLocalDateTime parses a string and returns the LocalDateTime it represents. +// ParseLocalDateTime accepts a variant of the RFC3339 date-time format that omits +// the time offset but includes an optional fractional time, as described in +// ParseLocalTime. Informally, the accepted format is +// YYYY-MM-DDTHH:MM:SS[.FFFFFFFFF] +// where the 'T' may be a lower-case 't'. +func ParseLocalDateTime(s string) (LocalDateTime, error) { + t, err := time.Parse("2006-01-02T15:04:05.999999999", s) + if err != nil { + t, err = time.Parse("2006-01-02t15:04:05.999999999", s) + if err != nil { + return LocalDateTime{}, err + } + } + return LocalDateTimeOf(t), nil +} + +// String returns the date in the format described in ParseLocalDate. +func (dt LocalDateTime) String() string { + return dt.Date.String() + "T" + dt.Time.String() +} + +// IsValid reports whether the datetime is valid. +func (dt LocalDateTime) IsValid() bool { + return dt.Date.IsValid() && dt.Time.IsValid() +} + +// In returns the time corresponding to the LocalDateTime in the given location. +// +// If the time is missing or ambigous at the location, In returns the same +// result as time.LocalDate. For example, if loc is America/Indiana/Vincennes, then +// both +// time.LocalDate(1955, time.May, 1, 0, 30, 0, 0, loc) +// and +// civil.LocalDateTime{ +// civil.LocalDate{Year: 1955, Month: time.May, Day: 1}}, +// civil.LocalTime{Minute: 30}}.In(loc) +// return 23:30:00 on April 30, 1955. +// +// In panics if loc is nil. +func (dt LocalDateTime) In(loc *time.Location) time.Time { + return time.Date(dt.Date.Year, dt.Date.Month, dt.Date.Day, dt.Time.Hour, dt.Time.Minute, dt.Time.Second, dt.Time.Nanosecond, loc) +} + +// Before reports whether dt1 occurs before dt2. +func (dt1 LocalDateTime) Before(dt2 LocalDateTime) bool { + return dt1.In(time.UTC).Before(dt2.In(time.UTC)) +} + +// After reports whether dt1 occurs after dt2. +func (dt1 LocalDateTime) After(dt2 LocalDateTime) bool { + return dt2.Before(dt1) +} + +// MarshalText implements the encoding.TextMarshaler interface. +// The output is the result of dt.String(). +func (dt LocalDateTime) MarshalText() ([]byte, error) { + return []byte(dt.String()), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// The datetime is expected to be a string in a format accepted by ParseLocalDateTime +func (dt *LocalDateTime) UnmarshalText(data []byte) error { + var err error + *dt, err = ParseLocalDateTime(string(data)) + return err +} diff --git a/vendor/github.com/pelletier/go-toml/marshal.go b/vendor/github.com/pelletier/go-toml/marshal.go new file mode 100644 index 00000000000..db5a7b4f09a --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/marshal.go @@ -0,0 +1,1240 @@ +package toml + +import ( + "bytes" + "encoding" + "errors" + "fmt" + "io" + "reflect" + "sort" + "strconv" + "strings" + "time" +) + +const ( + tagFieldName = "toml" + tagFieldComment = "comment" + tagCommented = "commented" + tagMultiline = "multiline" + tagDefault = "default" +) + +type tomlOpts struct { + name string + nameFromTag bool + comment string + commented bool + multiline bool + include bool + omitempty bool + defaultValue string +} + +type encOpts struct { + quoteMapKeys bool + arraysOneElementPerLine bool +} + +var encOptsDefaults = encOpts{ + quoteMapKeys: false, +} + +type annotation struct { + tag string + comment string + commented string + multiline string + defaultValue string +} + +var annotationDefault = annotation{ + tag: tagFieldName, + comment: tagFieldComment, + commented: tagCommented, + multiline: tagMultiline, + defaultValue: tagDefault, +} + +type marshalOrder int + +// Orders the Encoder can write the fields to the output stream. +const ( + // Sort fields alphabetically. + OrderAlphabetical marshalOrder = iota + 1 + // Preserve the order the fields are encountered. For example, the order of fields in + // a struct. + OrderPreserve +) + +var timeType = reflect.TypeOf(time.Time{}) +var marshalerType = reflect.TypeOf(new(Marshaler)).Elem() +var unmarshalerType = reflect.TypeOf(new(Unmarshaler)).Elem() +var textMarshalerType = reflect.TypeOf(new(encoding.TextMarshaler)).Elem() +var textUnmarshalerType = reflect.TypeOf(new(encoding.TextUnmarshaler)).Elem() +var localDateType = reflect.TypeOf(LocalDate{}) +var localTimeType = reflect.TypeOf(LocalTime{}) +var localDateTimeType = reflect.TypeOf(LocalDateTime{}) + +// Check if the given marshal type maps to a Tree primitive +func isPrimitive(mtype reflect.Type) bool { + switch mtype.Kind() { + case reflect.Ptr: + return isPrimitive(mtype.Elem()) + case reflect.Bool: + return true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return true + case reflect.Float32, reflect.Float64: + return true + case reflect.String: + return true + case reflect.Struct: + return isTimeType(mtype) + default: + return false + } +} + +func isTimeType(mtype reflect.Type) bool { + return mtype == timeType || mtype == localDateType || mtype == localDateTimeType || mtype == localTimeType +} + +// Check if the given marshal type maps to a Tree slice or array +func isTreeSequence(mtype reflect.Type) bool { + switch mtype.Kind() { + case reflect.Ptr: + return isTreeSequence(mtype.Elem()) + case reflect.Slice, reflect.Array: + return isTree(mtype.Elem()) + default: + return false + } +} + +// Check if the given marshal type maps to a slice or array of a custom marshaler type +func isCustomMarshalerSequence(mtype reflect.Type) bool { + switch mtype.Kind() { + case reflect.Ptr: + return isCustomMarshalerSequence(mtype.Elem()) + case reflect.Slice, reflect.Array: + return isCustomMarshaler(mtype.Elem()) || isCustomMarshaler(reflect.New(mtype.Elem()).Type()) + default: + return false + } +} + +// Check if the given marshal type maps to a slice or array of a text marshaler type +func isTextMarshalerSequence(mtype reflect.Type) bool { + switch mtype.Kind() { + case reflect.Ptr: + return isTextMarshalerSequence(mtype.Elem()) + case reflect.Slice, reflect.Array: + return isTextMarshaler(mtype.Elem()) || isTextMarshaler(reflect.New(mtype.Elem()).Type()) + default: + return false + } +} + +// Check if the given marshal type maps to a non-Tree slice or array +func isOtherSequence(mtype reflect.Type) bool { + switch mtype.Kind() { + case reflect.Ptr: + return isOtherSequence(mtype.Elem()) + case reflect.Slice, reflect.Array: + return !isTreeSequence(mtype) + default: + return false + } +} + +// Check if the given marshal type maps to a Tree +func isTree(mtype reflect.Type) bool { + switch mtype.Kind() { + case reflect.Ptr: + return isTree(mtype.Elem()) + case reflect.Map: + return true + case reflect.Struct: + return !isPrimitive(mtype) + default: + return false + } +} + +func isCustomMarshaler(mtype reflect.Type) bool { + return mtype.Implements(marshalerType) +} + +func callCustomMarshaler(mval reflect.Value) ([]byte, error) { + return mval.Interface().(Marshaler).MarshalTOML() +} + +func isTextMarshaler(mtype reflect.Type) bool { + return mtype.Implements(textMarshalerType) && !isTimeType(mtype) +} + +func callTextMarshaler(mval reflect.Value) ([]byte, error) { + return mval.Interface().(encoding.TextMarshaler).MarshalText() +} + +func isCustomUnmarshaler(mtype reflect.Type) bool { + return mtype.Implements(unmarshalerType) +} + +func callCustomUnmarshaler(mval reflect.Value, tval interface{}) error { + return mval.Interface().(Unmarshaler).UnmarshalTOML(tval) +} + +func isTextUnmarshaler(mtype reflect.Type) bool { + return mtype.Implements(textUnmarshalerType) +} + +func callTextUnmarshaler(mval reflect.Value, text []byte) error { + return mval.Interface().(encoding.TextUnmarshaler).UnmarshalText(text) +} + +// Marshaler is the interface implemented by types that +// can marshal themselves into valid TOML. +type Marshaler interface { + MarshalTOML() ([]byte, error) +} + +// Unmarshaler is the interface implemented by types that +// can unmarshal a TOML description of themselves. +type Unmarshaler interface { + UnmarshalTOML(interface{}) error +} + +/* +Marshal returns the TOML encoding of v. Behavior is similar to the Go json +encoder, except that there is no concept of a Marshaler interface or MarshalTOML +function for sub-structs, and currently only definite types can be marshaled +(i.e. no `interface{}`). + +The following struct annotations are supported: + + toml:"Field" Overrides the field's name to output. + omitempty When set, empty values and groups are not emitted. + comment:"comment" Emits a # comment on the same line. This supports new lines. + commented:"true" Emits the value as commented. + +Note that pointers are automatically assigned the "omitempty" option, as TOML +explicitly does not handle null values (saying instead the label should be +dropped). + +Tree structural types and corresponding marshal types: + + *Tree (*)struct, (*)map[string]interface{} + []*Tree (*)[](*)struct, (*)[](*)map[string]interface{} + []interface{} (as interface{}) (*)[]primitive, (*)[]([]interface{}) + interface{} (*)primitive + +Tree primitive types and corresponding marshal types: + + uint64 uint, uint8-uint64, pointers to same + int64 int, int8-uint64, pointers to same + float64 float32, float64, pointers to same + string string, pointers to same + bool bool, pointers to same + time.LocalTime time.LocalTime{}, pointers to same + +For additional flexibility, use the Encoder API. +*/ +func Marshal(v interface{}) ([]byte, error) { + return NewEncoder(nil).marshal(v) +} + +// Encoder writes TOML values to an output stream. +type Encoder struct { + w io.Writer + encOpts + annotation + line int + col int + order marshalOrder + promoteAnon bool + indentation string +} + +// NewEncoder returns a new encoder that writes to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + w: w, + encOpts: encOptsDefaults, + annotation: annotationDefault, + line: 0, + col: 1, + order: OrderAlphabetical, + indentation: " ", + } +} + +// Encode writes the TOML encoding of v to the stream. +// +// See the documentation for Marshal for details. +func (e *Encoder) Encode(v interface{}) error { + b, err := e.marshal(v) + if err != nil { + return err + } + if _, err := e.w.Write(b); err != nil { + return err + } + return nil +} + +// QuoteMapKeys sets up the encoder to encode +// maps with string type keys with quoted TOML keys. +// +// This relieves the character limitations on map keys. +func (e *Encoder) QuoteMapKeys(v bool) *Encoder { + e.quoteMapKeys = v + return e +} + +// ArraysWithOneElementPerLine sets up the encoder to encode arrays +// with more than one element on multiple lines instead of one. +// +// For example: +// +// A = [1,2,3] +// +// Becomes +// +// A = [ +// 1, +// 2, +// 3, +// ] +func (e *Encoder) ArraysWithOneElementPerLine(v bool) *Encoder { + e.arraysOneElementPerLine = v + return e +} + +// Order allows to change in which order fields will be written to the output stream. +func (e *Encoder) Order(ord marshalOrder) *Encoder { + e.order = ord + return e +} + +// Indentation allows to change indentation when marshalling. +func (e *Encoder) Indentation(indent string) *Encoder { + e.indentation = indent + return e +} + +// SetTagName allows changing default tag "toml" +func (e *Encoder) SetTagName(v string) *Encoder { + e.tag = v + return e +} + +// SetTagComment allows changing default tag "comment" +func (e *Encoder) SetTagComment(v string) *Encoder { + e.comment = v + return e +} + +// SetTagCommented allows changing default tag "commented" +func (e *Encoder) SetTagCommented(v string) *Encoder { + e.commented = v + return e +} + +// SetTagMultiline allows changing default tag "multiline" +func (e *Encoder) SetTagMultiline(v string) *Encoder { + e.multiline = v + return e +} + +// PromoteAnonymous allows to change how anonymous struct fields are marshaled. +// Usually, they are marshaled as if the inner exported fields were fields in +// the outer struct. However, if an anonymous struct field is given a name in +// its TOML tag, it is treated like a regular struct field with that name. +// rather than being anonymous. +// +// In case anonymous promotion is enabled, all anonymous structs are promoted +// and treated like regular struct fields. +func (e *Encoder) PromoteAnonymous(promote bool) *Encoder { + e.promoteAnon = promote + return e +} + +func (e *Encoder) marshal(v interface{}) ([]byte, error) { + // Check if indentation is valid + for _, char := range e.indentation { + if !isSpace(char) { + return []byte{}, fmt.Errorf("invalid indentation: must only contains space or tab characters") + } + } + + mtype := reflect.TypeOf(v) + if mtype == nil { + return []byte{}, errors.New("nil cannot be marshaled to TOML") + } + + switch mtype.Kind() { + case reflect.Struct, reflect.Map: + case reflect.Ptr: + if mtype.Elem().Kind() != reflect.Struct { + return []byte{}, errors.New("Only pointer to struct can be marshaled to TOML") + } + if reflect.ValueOf(v).IsNil() { + return []byte{}, errors.New("nil pointer cannot be marshaled to TOML") + } + default: + return []byte{}, errors.New("Only a struct or map can be marshaled to TOML") + } + + sval := reflect.ValueOf(v) + if isCustomMarshaler(mtype) { + return callCustomMarshaler(sval) + } + if isTextMarshaler(mtype) { + return callTextMarshaler(sval) + } + t, err := e.valueToTree(mtype, sval) + if err != nil { + return []byte{}, err + } + + var buf bytes.Buffer + _, err = t.writeToOrdered(&buf, "", "", 0, e.arraysOneElementPerLine, e.order, e.indentation, false) + + return buf.Bytes(), err +} + +// Create next tree with a position based on Encoder.line +func (e *Encoder) nextTree() *Tree { + return newTreeWithPosition(Position{Line: e.line, Col: 1}) +} + +// Convert given marshal struct or map value to toml tree +func (e *Encoder) valueToTree(mtype reflect.Type, mval reflect.Value) (*Tree, error) { + if mtype.Kind() == reflect.Ptr { + return e.valueToTree(mtype.Elem(), mval.Elem()) + } + tval := e.nextTree() + switch mtype.Kind() { + case reflect.Struct: + switch mval.Interface().(type) { + case Tree: + reflect.ValueOf(tval).Elem().Set(mval) + default: + for i := 0; i < mtype.NumField(); i++ { + mtypef, mvalf := mtype.Field(i), mval.Field(i) + opts := tomlOptions(mtypef, e.annotation) + if opts.include && ((mtypef.Type.Kind() != reflect.Interface && !opts.omitempty) || !isZero(mvalf)) { + val, err := e.valueToToml(mtypef.Type, mvalf) + if err != nil { + return nil, err + } + if tree, ok := val.(*Tree); ok && mtypef.Anonymous && !opts.nameFromTag && !e.promoteAnon { + e.appendTree(tval, tree) + } else { + tval.SetPathWithOptions([]string{opts.name}, SetOptions{ + Comment: opts.comment, + Commented: opts.commented, + Multiline: opts.multiline, + }, val) + } + } + } + } + case reflect.Map: + keys := mval.MapKeys() + if e.order == OrderPreserve && len(keys) > 0 { + // Sorting []reflect.Value is not straight forward. + // + // OrderPreserve will support deterministic results when string is used + // as the key to maps. + typ := keys[0].Type() + kind := keys[0].Kind() + if kind == reflect.String { + ikeys := make([]string, len(keys)) + for i := range keys { + ikeys[i] = keys[i].Interface().(string) + } + sort.Strings(ikeys) + for i := range ikeys { + keys[i] = reflect.ValueOf(ikeys[i]).Convert(typ) + } + } + } + for _, key := range keys { + mvalf := mval.MapIndex(key) + if (mtype.Elem().Kind() == reflect.Ptr || mtype.Elem().Kind() == reflect.Interface) && mvalf.IsNil() { + continue + } + val, err := e.valueToToml(mtype.Elem(), mvalf) + if err != nil { + return nil, err + } + if e.quoteMapKeys { + keyStr, err := tomlValueStringRepresentation(key.String(), "", "", e.order, e.arraysOneElementPerLine) + if err != nil { + return nil, err + } + tval.SetPath([]string{keyStr}, val) + } else { + tval.SetPath([]string{key.String()}, val) + } + } + } + return tval, nil +} + +// Convert given marshal slice to slice of Toml trees +func (e *Encoder) valueToTreeSlice(mtype reflect.Type, mval reflect.Value) ([]*Tree, error) { + tval := make([]*Tree, mval.Len(), mval.Len()) + for i := 0; i < mval.Len(); i++ { + val, err := e.valueToTree(mtype.Elem(), mval.Index(i)) + if err != nil { + return nil, err + } + tval[i] = val + } + return tval, nil +} + +// Convert given marshal slice to slice of toml values +func (e *Encoder) valueToOtherSlice(mtype reflect.Type, mval reflect.Value) (interface{}, error) { + tval := make([]interface{}, mval.Len(), mval.Len()) + for i := 0; i < mval.Len(); i++ { + val, err := e.valueToToml(mtype.Elem(), mval.Index(i)) + if err != nil { + return nil, err + } + tval[i] = val + } + return tval, nil +} + +// Convert given marshal value to toml value +func (e *Encoder) valueToToml(mtype reflect.Type, mval reflect.Value) (interface{}, error) { + e.line++ + if mtype.Kind() == reflect.Ptr { + switch { + case isCustomMarshaler(mtype): + return callCustomMarshaler(mval) + case isTextMarshaler(mtype): + return callTextMarshaler(mval) + default: + return e.valueToToml(mtype.Elem(), mval.Elem()) + } + } + if mtype.Kind() == reflect.Interface { + return e.valueToToml(mval.Elem().Type(), mval.Elem()) + } + switch { + case isCustomMarshaler(mtype): + return callCustomMarshaler(mval) + case isTextMarshaler(mtype): + return callTextMarshaler(mval) + case isTree(mtype): + return e.valueToTree(mtype, mval) + case isOtherSequence(mtype), isCustomMarshalerSequence(mtype), isTextMarshalerSequence(mtype): + return e.valueToOtherSlice(mtype, mval) + case isTreeSequence(mtype): + return e.valueToTreeSlice(mtype, mval) + default: + switch mtype.Kind() { + case reflect.Bool: + return mval.Bool(), nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if mtype.Kind() == reflect.Int64 && mtype == reflect.TypeOf(time.Duration(1)) { + return fmt.Sprint(mval), nil + } + return mval.Int(), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return mval.Uint(), nil + case reflect.Float32, reflect.Float64: + return mval.Float(), nil + case reflect.String: + return mval.String(), nil + case reflect.Struct: + return mval.Interface(), nil + default: + return nil, fmt.Errorf("Marshal can't handle %v(%v)", mtype, mtype.Kind()) + } + } +} + +func (e *Encoder) appendTree(t, o *Tree) error { + for key, value := range o.values { + if _, ok := t.values[key]; ok { + continue + } + if tomlValue, ok := value.(*tomlValue); ok { + tomlValue.position.Col = t.position.Col + } + t.values[key] = value + } + return nil +} + +// Unmarshal attempts to unmarshal the Tree into a Go struct pointed by v. +// Neither Unmarshaler interfaces nor UnmarshalTOML functions are supported for +// sub-structs, and only definite types can be unmarshaled. +func (t *Tree) Unmarshal(v interface{}) error { + d := Decoder{tval: t, tagName: tagFieldName} + return d.unmarshal(v) +} + +// Marshal returns the TOML encoding of Tree. +// See Marshal() documentation for types mapping table. +func (t *Tree) Marshal() ([]byte, error) { + var buf bytes.Buffer + _, err := t.WriteTo(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// Unmarshal parses the TOML-encoded data and stores the result in the value +// pointed to by v. Behavior is similar to the Go json encoder, except that there +// is no concept of an Unmarshaler interface or UnmarshalTOML function for +// sub-structs, and currently only definite types can be unmarshaled to (i.e. no +// `interface{}`). +// +// The following struct annotations are supported: +// +// toml:"Field" Overrides the field's name to map to. +// default:"foo" Provides a default value. +// +// For default values, only fields of the following types are supported: +// * string +// * bool +// * int +// * int64 +// * float64 +// +// See Marshal() documentation for types mapping table. +func Unmarshal(data []byte, v interface{}) error { + t, err := LoadReader(bytes.NewReader(data)) + if err != nil { + return err + } + return t.Unmarshal(v) +} + +// Decoder reads and decodes TOML values from an input stream. +type Decoder struct { + r io.Reader + tval *Tree + encOpts + tagName string + strict bool + visitor visitorState +} + +// NewDecoder returns a new decoder that reads from r. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{ + r: r, + encOpts: encOptsDefaults, + tagName: tagFieldName, + } +} + +// Decode reads a TOML-encoded value from it's input +// and unmarshals it in the value pointed at by v. +// +// See the documentation for Marshal for details. +func (d *Decoder) Decode(v interface{}) error { + var err error + d.tval, err = LoadReader(d.r) + if err != nil { + return err + } + return d.unmarshal(v) +} + +// SetTagName allows changing default tag "toml" +func (d *Decoder) SetTagName(v string) *Decoder { + d.tagName = v + return d +} + +// Strict allows changing to strict decoding. Any fields that are found in the +// input data and do not have a corresponding struct member cause an error. +func (d *Decoder) Strict(strict bool) *Decoder { + d.strict = strict + return d +} + +func (d *Decoder) unmarshal(v interface{}) error { + mtype := reflect.TypeOf(v) + if mtype == nil { + return errors.New("nil cannot be unmarshaled from TOML") + } + if mtype.Kind() != reflect.Ptr { + return errors.New("only a pointer to struct or map can be unmarshaled from TOML") + } + + elem := mtype.Elem() + + switch elem.Kind() { + case reflect.Struct, reflect.Map: + default: + return errors.New("only a pointer to struct or map can be unmarshaled from TOML") + } + + if reflect.ValueOf(v).IsNil() { + return errors.New("nil pointer cannot be unmarshaled from TOML") + } + + vv := reflect.ValueOf(v).Elem() + + if d.strict { + d.visitor = newVisitorState(d.tval) + } + + sval, err := d.valueFromTree(elem, d.tval, &vv) + if err != nil { + return err + } + if err := d.visitor.validate(); err != nil { + return err + } + reflect.ValueOf(v).Elem().Set(sval) + return nil +} + +// Convert toml tree to marshal struct or map, using marshal type. When mval1 +// is non-nil, merge fields into the given value instead of allocating a new one. +func (d *Decoder) valueFromTree(mtype reflect.Type, tval *Tree, mval1 *reflect.Value) (reflect.Value, error) { + if mtype.Kind() == reflect.Ptr { + return d.unwrapPointer(mtype, tval, mval1) + } + + // Check if pointer to value implements the Unmarshaler interface. + if mvalPtr := reflect.New(mtype); isCustomUnmarshaler(mvalPtr.Type()) { + d.visitor.visitAll() + + if err := callCustomUnmarshaler(mvalPtr, tval.ToMap()); err != nil { + return reflect.ValueOf(nil), fmt.Errorf("unmarshal toml: %v", err) + } + return mvalPtr.Elem(), nil + } + + var mval reflect.Value + switch mtype.Kind() { + case reflect.Struct: + if mval1 != nil { + mval = *mval1 + } else { + mval = reflect.New(mtype).Elem() + } + + switch mval.Interface().(type) { + case Tree: + mval.Set(reflect.ValueOf(tval).Elem()) + default: + for i := 0; i < mtype.NumField(); i++ { + mtypef := mtype.Field(i) + an := annotation{tag: d.tagName} + opts := tomlOptions(mtypef, an) + if !opts.include { + continue + } + baseKey := opts.name + keysToTry := []string{ + baseKey, + strings.ToLower(baseKey), + strings.ToTitle(baseKey), + strings.ToLower(string(baseKey[0])) + baseKey[1:], + } + + found := false + if tval != nil { + for _, key := range keysToTry { + exists := tval.HasPath([]string{key}) + if !exists { + continue + } + + d.visitor.push(key) + val := tval.GetPath([]string{key}) + fval := mval.Field(i) + mvalf, err := d.valueFromToml(mtypef.Type, val, &fval) + if err != nil { + return mval, formatError(err, tval.GetPositionPath([]string{key})) + } + mval.Field(i).Set(mvalf) + found = true + d.visitor.pop() + break + } + } + + if !found && opts.defaultValue != "" { + mvalf := mval.Field(i) + var val interface{} + var err error + switch mvalf.Kind() { + case reflect.String: + val = opts.defaultValue + case reflect.Bool: + val, err = strconv.ParseBool(opts.defaultValue) + case reflect.Uint: + val, err = strconv.ParseUint(opts.defaultValue, 10, 0) + case reflect.Uint8: + val, err = strconv.ParseUint(opts.defaultValue, 10, 8) + case reflect.Uint16: + val, err = strconv.ParseUint(opts.defaultValue, 10, 16) + case reflect.Uint32: + val, err = strconv.ParseUint(opts.defaultValue, 10, 32) + case reflect.Uint64: + val, err = strconv.ParseUint(opts.defaultValue, 10, 64) + case reflect.Int: + val, err = strconv.ParseInt(opts.defaultValue, 10, 0) + case reflect.Int8: + val, err = strconv.ParseInt(opts.defaultValue, 10, 8) + case reflect.Int16: + val, err = strconv.ParseInt(opts.defaultValue, 10, 16) + case reflect.Int32: + val, err = strconv.ParseInt(opts.defaultValue, 10, 32) + case reflect.Int64: + val, err = strconv.ParseInt(opts.defaultValue, 10, 64) + case reflect.Float32: + val, err = strconv.ParseFloat(opts.defaultValue, 32) + case reflect.Float64: + val, err = strconv.ParseFloat(opts.defaultValue, 64) + default: + return mvalf, fmt.Errorf("unsupported field type for default option") + } + + if err != nil { + return mvalf, err + } + mvalf.Set(reflect.ValueOf(val).Convert(mvalf.Type())) + } + + // save the old behavior above and try to check structs + if !found && opts.defaultValue == "" && mtypef.Type.Kind() == reflect.Struct { + tmpTval := tval + if !mtypef.Anonymous { + tmpTval = nil + } + fval := mval.Field(i) + v, err := d.valueFromTree(mtypef.Type, tmpTval, &fval) + if err != nil { + return v, err + } + mval.Field(i).Set(v) + } + } + } + case reflect.Map: + mval = reflect.MakeMap(mtype) + for _, key := range tval.Keys() { + d.visitor.push(key) + // TODO: path splits key + val := tval.GetPath([]string{key}) + mvalf, err := d.valueFromToml(mtype.Elem(), val, nil) + if err != nil { + return mval, formatError(err, tval.GetPositionPath([]string{key})) + } + mval.SetMapIndex(reflect.ValueOf(key).Convert(mtype.Key()), mvalf) + d.visitor.pop() + } + } + return mval, nil +} + +// Convert toml value to marshal struct/map slice, using marshal type +func (d *Decoder) valueFromTreeSlice(mtype reflect.Type, tval []*Tree) (reflect.Value, error) { + mval, err := makeSliceOrArray(mtype, len(tval)) + if err != nil { + return mval, err + } + + for i := 0; i < len(tval); i++ { + d.visitor.push(strconv.Itoa(i)) + val, err := d.valueFromTree(mtype.Elem(), tval[i], nil) + if err != nil { + return mval, err + } + mval.Index(i).Set(val) + d.visitor.pop() + } + return mval, nil +} + +// Convert toml value to marshal primitive slice, using marshal type +func (d *Decoder) valueFromOtherSlice(mtype reflect.Type, tval []interface{}) (reflect.Value, error) { + mval, err := makeSliceOrArray(mtype, len(tval)) + if err != nil { + return mval, err + } + + for i := 0; i < len(tval); i++ { + val, err := d.valueFromToml(mtype.Elem(), tval[i], nil) + if err != nil { + return mval, err + } + mval.Index(i).Set(val) + } + return mval, nil +} + +// Convert toml value to marshal primitive slice, using marshal type +func (d *Decoder) valueFromOtherSliceI(mtype reflect.Type, tval interface{}) (reflect.Value, error) { + val := reflect.ValueOf(tval) + length := val.Len() + + mval, err := makeSliceOrArray(mtype, length) + if err != nil { + return mval, err + } + + for i := 0; i < length; i++ { + val, err := d.valueFromToml(mtype.Elem(), val.Index(i).Interface(), nil) + if err != nil { + return mval, err + } + mval.Index(i).Set(val) + } + return mval, nil +} + +// Create a new slice or a new array with specified length +func makeSliceOrArray(mtype reflect.Type, tLength int) (reflect.Value, error) { + var mval reflect.Value + switch mtype.Kind() { + case reflect.Slice: + mval = reflect.MakeSlice(mtype, tLength, tLength) + case reflect.Array: + mval = reflect.New(reflect.ArrayOf(mtype.Len(), mtype.Elem())).Elem() + if tLength > mtype.Len() { + return mval, fmt.Errorf("unmarshal: TOML array length (%v) exceeds destination array length (%v)", tLength, mtype.Len()) + } + } + return mval, nil +} + +// Convert toml value to marshal value, using marshal type. When mval1 is non-nil +// and the given type is a struct value, merge fields into it. +func (d *Decoder) valueFromToml(mtype reflect.Type, tval interface{}, mval1 *reflect.Value) (reflect.Value, error) { + if mtype.Kind() == reflect.Ptr { + return d.unwrapPointer(mtype, tval, mval1) + } + + switch t := tval.(type) { + case *Tree: + var mval11 *reflect.Value + if mtype.Kind() == reflect.Struct { + mval11 = mval1 + } + + if isTree(mtype) { + return d.valueFromTree(mtype, t, mval11) + } + + if mtype.Kind() == reflect.Interface { + if mval1 == nil || mval1.IsNil() { + return d.valueFromTree(reflect.TypeOf(map[string]interface{}{}), t, nil) + } else { + return d.valueFromToml(mval1.Elem().Type(), t, nil) + } + } + + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a tree", tval, tval) + case []*Tree: + if isTreeSequence(mtype) { + return d.valueFromTreeSlice(mtype, t) + } + if mtype.Kind() == reflect.Interface { + if mval1 == nil || mval1.IsNil() { + return d.valueFromTreeSlice(reflect.TypeOf([]map[string]interface{}{}), t) + } else { + ival := mval1.Elem() + return d.valueFromToml(mval1.Elem().Type(), t, &ival) + } + } + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to trees", tval, tval) + case []interface{}: + d.visitor.visit() + if isOtherSequence(mtype) { + return d.valueFromOtherSlice(mtype, t) + } + if mtype.Kind() == reflect.Interface { + if mval1 == nil || mval1.IsNil() { + return d.valueFromOtherSlice(reflect.TypeOf([]interface{}{}), t) + } else { + ival := mval1.Elem() + return d.valueFromToml(mval1.Elem().Type(), t, &ival) + } + } + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a slice", tval, tval) + default: + d.visitor.visit() + // Check if pointer to value implements the encoding.TextUnmarshaler. + if mvalPtr := reflect.New(mtype); isTextUnmarshaler(mvalPtr.Type()) && !isTimeType(mtype) { + if err := d.unmarshalText(tval, mvalPtr); err != nil { + return reflect.ValueOf(nil), fmt.Errorf("unmarshal text: %v", err) + } + return mvalPtr.Elem(), nil + } + + switch mtype.Kind() { + case reflect.Bool, reflect.Struct: + val := reflect.ValueOf(tval) + + switch val.Type() { + case localDateType: + localDate := val.Interface().(LocalDate) + switch mtype { + case timeType: + return reflect.ValueOf(time.Date(localDate.Year, localDate.Month, localDate.Day, 0, 0, 0, 0, time.Local)), nil + } + case localDateTimeType: + localDateTime := val.Interface().(LocalDateTime) + switch mtype { + case timeType: + return reflect.ValueOf(time.Date( + localDateTime.Date.Year, + localDateTime.Date.Month, + localDateTime.Date.Day, + localDateTime.Time.Hour, + localDateTime.Time.Minute, + localDateTime.Time.Second, + localDateTime.Time.Nanosecond, + time.Local)), nil + } + } + + // if this passes for when mtype is reflect.Struct, tval is a time.LocalTime + if !val.Type().ConvertibleTo(mtype) { + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) + } + + return val.Convert(mtype), nil + case reflect.String: + val := reflect.ValueOf(tval) + // stupidly, int64 is convertible to string. So special case this. + if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Int64 { + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) + } + + return val.Convert(mtype), nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + val := reflect.ValueOf(tval) + if mtype.Kind() == reflect.Int64 && mtype == reflect.TypeOf(time.Duration(1)) && val.Kind() == reflect.String { + d, err := time.ParseDuration(val.String()) + if err != nil { + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v. %s", tval, tval, mtype.String(), err) + } + return reflect.ValueOf(d), nil + } + if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Float64 { + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) + } + if reflect.Indirect(reflect.New(mtype)).OverflowInt(val.Convert(reflect.TypeOf(int64(0))).Int()) { + return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) + } + + return val.Convert(mtype), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + val := reflect.ValueOf(tval) + if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Float64 { + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) + } + + if val.Convert(reflect.TypeOf(int(1))).Int() < 0 { + return reflect.ValueOf(nil), fmt.Errorf("%v(%T) is negative so does not fit in %v", tval, tval, mtype.String()) + } + if reflect.Indirect(reflect.New(mtype)).OverflowUint(val.Convert(reflect.TypeOf(uint64(0))).Uint()) { + return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) + } + + return val.Convert(mtype), nil + case reflect.Float32, reflect.Float64: + val := reflect.ValueOf(tval) + if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Int64 { + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) + } + if reflect.Indirect(reflect.New(mtype)).OverflowFloat(val.Convert(reflect.TypeOf(float64(0))).Float()) { + return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) + } + + return val.Convert(mtype), nil + case reflect.Interface: + if mval1 == nil || mval1.IsNil() { + return reflect.ValueOf(tval), nil + } else { + ival := mval1.Elem() + return d.valueFromToml(mval1.Elem().Type(), t, &ival) + } + case reflect.Slice, reflect.Array: + if isOtherSequence(mtype) && isOtherSequence(reflect.TypeOf(t)) { + return d.valueFromOtherSliceI(mtype, t) + } + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v(%v)", tval, tval, mtype, mtype.Kind()) + default: + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v(%v)", tval, tval, mtype, mtype.Kind()) + } + } +} + +func (d *Decoder) unwrapPointer(mtype reflect.Type, tval interface{}, mval1 *reflect.Value) (reflect.Value, error) { + var melem *reflect.Value + + if mval1 != nil && !mval1.IsNil() && (mtype.Elem().Kind() == reflect.Struct || mtype.Elem().Kind() == reflect.Interface) { + elem := mval1.Elem() + melem = &elem + } + + val, err := d.valueFromToml(mtype.Elem(), tval, melem) + if err != nil { + return reflect.ValueOf(nil), err + } + mval := reflect.New(mtype.Elem()) + mval.Elem().Set(val) + return mval, nil +} + +func (d *Decoder) unmarshalText(tval interface{}, mval reflect.Value) error { + var buf bytes.Buffer + fmt.Fprint(&buf, tval) + return callTextUnmarshaler(mval, buf.Bytes()) +} + +func tomlOptions(vf reflect.StructField, an annotation) tomlOpts { + tag := vf.Tag.Get(an.tag) + parse := strings.Split(tag, ",") + var comment string + if c := vf.Tag.Get(an.comment); c != "" { + comment = c + } + commented, _ := strconv.ParseBool(vf.Tag.Get(an.commented)) + multiline, _ := strconv.ParseBool(vf.Tag.Get(an.multiline)) + defaultValue := vf.Tag.Get(tagDefault) + result := tomlOpts{ + name: vf.Name, + nameFromTag: false, + comment: comment, + commented: commented, + multiline: multiline, + include: true, + omitempty: false, + defaultValue: defaultValue, + } + if parse[0] != "" { + if parse[0] == "-" && len(parse) == 1 { + result.include = false + } else { + result.name = strings.Trim(parse[0], " ") + result.nameFromTag = true + } + } + if vf.PkgPath != "" { + result.include = false + } + if len(parse) > 1 && strings.Trim(parse[1], " ") == "omitempty" { + result.omitempty = true + } + if vf.Type.Kind() == reflect.Ptr { + result.omitempty = true + } + return result +} + +func isZero(val reflect.Value) bool { + switch val.Type().Kind() { + case reflect.Slice, reflect.Array, reflect.Map: + return val.Len() == 0 + default: + return reflect.DeepEqual(val.Interface(), reflect.Zero(val.Type()).Interface()) + } +} + +func formatError(err error, pos Position) error { + if err.Error()[0] == '(' { // Error already contains position information + return err + } + return fmt.Errorf("%s: %s", pos, err) +} + +// visitorState keeps track of which keys were unmarshaled. +type visitorState struct { + tree *Tree + path []string + keys map[string]struct{} + active bool +} + +func newVisitorState(tree *Tree) visitorState { + path, result := []string{}, map[string]struct{}{} + insertKeys(path, result, tree) + return visitorState{ + tree: tree, + path: path[:0], + keys: result, + active: true, + } +} + +func (s *visitorState) push(key string) { + if s.active { + s.path = append(s.path, key) + } +} + +func (s *visitorState) pop() { + if s.active { + s.path = s.path[:len(s.path)-1] + } +} + +func (s *visitorState) visit() { + if s.active { + delete(s.keys, strings.Join(s.path, ".")) + } +} + +func (s *visitorState) visitAll() { + if s.active { + for k := range s.keys { + if strings.HasPrefix(k, strings.Join(s.path, ".")) { + delete(s.keys, k) + } + } + } +} + +func (s *visitorState) validate() error { + if !s.active { + return nil + } + undecoded := make([]string, 0, len(s.keys)) + for key := range s.keys { + undecoded = append(undecoded, key) + } + sort.Strings(undecoded) + if len(undecoded) > 0 { + return fmt.Errorf("undecoded keys: %q", undecoded) + } + return nil +} + +func insertKeys(path []string, m map[string]struct{}, tree *Tree) { + for k, v := range tree.values { + switch node := v.(type) { + case []*Tree: + for i, item := range node { + insertKeys(append(path, k, strconv.Itoa(i)), m, item) + } + case *Tree: + insertKeys(append(path, k), m, node) + case *tomlValue: + m[strings.Join(append(path, k), ".")] = struct{}{} + } + } +} diff --git a/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml b/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml new file mode 100644 index 00000000000..792b72ed721 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml @@ -0,0 +1,39 @@ +title = "TOML Marshal Testing" + +[basic_lists] + floats = [12.3,45.6,78.9] + bools = [true,false,true] + dates = [1979-05-27T07:32:00Z,1980-05-27T07:32:00Z] + ints = [8001,8001,8002] + uints = [5002,5003] + strings = ["One","Two","Three"] + +[[subdocptrs]] + name = "Second" + +[basic_map] + one = "one" + two = "two" + +[subdoc] + + [subdoc.second] + name = "Second" + + [subdoc.first] + name = "First" + +[basic] + uint = 5001 + bool = true + float = 123.4 + float64 = 123.456782132399 + int = 5000 + string = "Bite me" + date = 1979-05-27T07:32:00Z + +[[subdoclist]] + name = "List.First" + +[[subdoclist]] + name = "List.Second" diff --git a/vendor/github.com/pelletier/go-toml/marshal_test.toml b/vendor/github.com/pelletier/go-toml/marshal_test.toml new file mode 100644 index 00000000000..ba5e110bf04 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/marshal_test.toml @@ -0,0 +1,39 @@ +title = "TOML Marshal Testing" + +[basic] + bool = true + date = 1979-05-27T07:32:00Z + float = 123.4 + float64 = 123.456782132399 + int = 5000 + string = "Bite me" + uint = 5001 + +[basic_lists] + bools = [true,false,true] + dates = [1979-05-27T07:32:00Z,1980-05-27T07:32:00Z] + floats = [12.3,45.6,78.9] + ints = [8001,8001,8002] + strings = ["One","Two","Three"] + uints = [5002,5003] + +[basic_map] + one = "one" + two = "two" + +[subdoc] + + [subdoc.first] + name = "First" + + [subdoc.second] + name = "Second" + +[[subdoclist]] + name = "List.First" + +[[subdoclist]] + name = "List.Second" + +[[subdocptrs]] + name = "Second" diff --git a/vendor/github.com/pelletier/go-toml/parser.go b/vendor/github.com/pelletier/go-toml/parser.go new file mode 100644 index 00000000000..7bf40bbdc7e --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/parser.go @@ -0,0 +1,493 @@ +// TOML Parser. + +package toml + +import ( + "errors" + "fmt" + "math" + "reflect" + "regexp" + "strconv" + "strings" + "time" +) + +type tomlParser struct { + flowIdx int + flow []token + tree *Tree + currentTable []string + seenTableKeys []string +} + +type tomlParserStateFn func() tomlParserStateFn + +// Formats and panics an error message based on a token +func (p *tomlParser) raiseError(tok *token, msg string, args ...interface{}) { + panic(tok.Position.String() + ": " + fmt.Sprintf(msg, args...)) +} + +func (p *tomlParser) run() { + for state := p.parseStart; state != nil; { + state = state() + } +} + +func (p *tomlParser) peek() *token { + if p.flowIdx >= len(p.flow) { + return nil + } + return &p.flow[p.flowIdx] +} + +func (p *tomlParser) assume(typ tokenType) { + tok := p.getToken() + if tok == nil { + p.raiseError(tok, "was expecting token %s, but token stream is empty", tok) + } + if tok.typ != typ { + p.raiseError(tok, "was expecting token %s, but got %s instead", typ, tok) + } +} + +func (p *tomlParser) getToken() *token { + tok := p.peek() + if tok == nil { + return nil + } + p.flowIdx++ + return tok +} + +func (p *tomlParser) parseStart() tomlParserStateFn { + tok := p.peek() + + // end of stream, parsing is finished + if tok == nil { + return nil + } + + switch tok.typ { + case tokenDoubleLeftBracket: + return p.parseGroupArray + case tokenLeftBracket: + return p.parseGroup + case tokenKey: + return p.parseAssign + case tokenEOF: + return nil + case tokenError: + p.raiseError(tok, "parsing error: %s", tok.String()) + default: + p.raiseError(tok, "unexpected token %s", tok.typ) + } + return nil +} + +func (p *tomlParser) parseGroupArray() tomlParserStateFn { + startToken := p.getToken() // discard the [[ + key := p.getToken() + if key.typ != tokenKeyGroupArray { + p.raiseError(key, "unexpected token %s, was expecting a table array key", key) + } + + // get or create table array element at the indicated part in the path + keys, err := parseKey(key.val) + if err != nil { + p.raiseError(key, "invalid table array key: %s", err) + } + p.tree.createSubTree(keys[:len(keys)-1], startToken.Position) // create parent entries + destTree := p.tree.GetPath(keys) + var array []*Tree + if destTree == nil { + array = make([]*Tree, 0) + } else if target, ok := destTree.([]*Tree); ok && target != nil { + array = destTree.([]*Tree) + } else { + p.raiseError(key, "key %s is already assigned and not of type table array", key) + } + p.currentTable = keys + + // add a new tree to the end of the table array + newTree := newTree() + newTree.position = startToken.Position + array = append(array, newTree) + p.tree.SetPath(p.currentTable, array) + + // remove all keys that were children of this table array + prefix := key.val + "." + found := false + for ii := 0; ii < len(p.seenTableKeys); { + tableKey := p.seenTableKeys[ii] + if strings.HasPrefix(tableKey, prefix) { + p.seenTableKeys = append(p.seenTableKeys[:ii], p.seenTableKeys[ii+1:]...) + } else { + found = (tableKey == key.val) + ii++ + } + } + + // keep this key name from use by other kinds of assignments + if !found { + p.seenTableKeys = append(p.seenTableKeys, key.val) + } + + // move to next parser state + p.assume(tokenDoubleRightBracket) + return p.parseStart +} + +func (p *tomlParser) parseGroup() tomlParserStateFn { + startToken := p.getToken() // discard the [ + key := p.getToken() + if key.typ != tokenKeyGroup { + p.raiseError(key, "unexpected token %s, was expecting a table key", key) + } + for _, item := range p.seenTableKeys { + if item == key.val { + p.raiseError(key, "duplicated tables") + } + } + + p.seenTableKeys = append(p.seenTableKeys, key.val) + keys, err := parseKey(key.val) + if err != nil { + p.raiseError(key, "invalid table array key: %s", err) + } + if err := p.tree.createSubTree(keys, startToken.Position); err != nil { + p.raiseError(key, "%s", err) + } + destTree := p.tree.GetPath(keys) + if target, ok := destTree.(*Tree); ok && target != nil && target.inline { + p.raiseError(key, "could not re-define exist inline table or its sub-table : %s", + strings.Join(keys, ".")) + } + p.assume(tokenRightBracket) + p.currentTable = keys + return p.parseStart +} + +func (p *tomlParser) parseAssign() tomlParserStateFn { + key := p.getToken() + p.assume(tokenEqual) + + parsedKey, err := parseKey(key.val) + if err != nil { + p.raiseError(key, "invalid key: %s", err.Error()) + } + + value := p.parseRvalue() + var tableKey []string + if len(p.currentTable) > 0 { + tableKey = p.currentTable + } else { + tableKey = []string{} + } + + prefixKey := parsedKey[0 : len(parsedKey)-1] + tableKey = append(tableKey, prefixKey...) + + // find the table to assign, looking out for arrays of tables + var targetNode *Tree + switch node := p.tree.GetPath(tableKey).(type) { + case []*Tree: + targetNode = node[len(node)-1] + case *Tree: + targetNode = node + case nil: + // create intermediate + if err := p.tree.createSubTree(tableKey, key.Position); err != nil { + p.raiseError(key, "could not create intermediate group: %s", err) + } + targetNode = p.tree.GetPath(tableKey).(*Tree) + default: + p.raiseError(key, "Unknown table type for path: %s", + strings.Join(tableKey, ".")) + } + + if targetNode.inline { + p.raiseError(key, "could not add key or sub-table to exist inline table or its sub-table : %s", + strings.Join(tableKey, ".")) + } + + // assign value to the found table + keyVal := parsedKey[len(parsedKey)-1] + localKey := []string{keyVal} + finalKey := append(tableKey, keyVal) + if targetNode.GetPath(localKey) != nil { + p.raiseError(key, "The following key was defined twice: %s", + strings.Join(finalKey, ".")) + } + var toInsert interface{} + + switch value.(type) { + case *Tree, []*Tree: + toInsert = value + default: + toInsert = &tomlValue{value: value, position: key.Position} + } + targetNode.values[keyVal] = toInsert + return p.parseStart +} + +var numberUnderscoreInvalidRegexp *regexp.Regexp +var hexNumberUnderscoreInvalidRegexp *regexp.Regexp + +func numberContainsInvalidUnderscore(value string) error { + if numberUnderscoreInvalidRegexp.MatchString(value) { + return errors.New("invalid use of _ in number") + } + return nil +} + +func hexNumberContainsInvalidUnderscore(value string) error { + if hexNumberUnderscoreInvalidRegexp.MatchString(value) { + return errors.New("invalid use of _ in hex number") + } + return nil +} + +func cleanupNumberToken(value string) string { + cleanedVal := strings.Replace(value, "_", "", -1) + return cleanedVal +} + +func (p *tomlParser) parseRvalue() interface{} { + tok := p.getToken() + if tok == nil || tok.typ == tokenEOF { + p.raiseError(tok, "expecting a value") + } + + switch tok.typ { + case tokenString: + return tok.val + case tokenTrue: + return true + case tokenFalse: + return false + case tokenInf: + if tok.val[0] == '-' { + return math.Inf(-1) + } + return math.Inf(1) + case tokenNan: + return math.NaN() + case tokenInteger: + cleanedVal := cleanupNumberToken(tok.val) + var err error + var val int64 + if len(cleanedVal) >= 3 && cleanedVal[0] == '0' { + switch cleanedVal[1] { + case 'x': + err = hexNumberContainsInvalidUnderscore(tok.val) + if err != nil { + p.raiseError(tok, "%s", err) + } + val, err = strconv.ParseInt(cleanedVal[2:], 16, 64) + case 'o': + err = numberContainsInvalidUnderscore(tok.val) + if err != nil { + p.raiseError(tok, "%s", err) + } + val, err = strconv.ParseInt(cleanedVal[2:], 8, 64) + case 'b': + err = numberContainsInvalidUnderscore(tok.val) + if err != nil { + p.raiseError(tok, "%s", err) + } + val, err = strconv.ParseInt(cleanedVal[2:], 2, 64) + default: + panic("invalid base") // the lexer should catch this first + } + } else { + err = numberContainsInvalidUnderscore(tok.val) + if err != nil { + p.raiseError(tok, "%s", err) + } + val, err = strconv.ParseInt(cleanedVal, 10, 64) + } + if err != nil { + p.raiseError(tok, "%s", err) + } + return val + case tokenFloat: + err := numberContainsInvalidUnderscore(tok.val) + if err != nil { + p.raiseError(tok, "%s", err) + } + cleanedVal := cleanupNumberToken(tok.val) + val, err := strconv.ParseFloat(cleanedVal, 64) + if err != nil { + p.raiseError(tok, "%s", err) + } + return val + case tokenDate: + layout := time.RFC3339Nano + if !strings.Contains(tok.val, "T") { + layout = strings.Replace(layout, "T", " ", 1) + } + val, err := time.ParseInLocation(layout, tok.val, time.UTC) + if err != nil { + p.raiseError(tok, "%s", err) + } + return val + case tokenLocalDate: + v := strings.Replace(tok.val, " ", "T", -1) + isDateTime := false + isTime := false + for _, c := range v { + if c == 'T' || c == 't' { + isDateTime = true + break + } + if c == ':' { + isTime = true + break + } + } + + var val interface{} + var err error + + if isDateTime { + val, err = ParseLocalDateTime(v) + } else if isTime { + val, err = ParseLocalTime(v) + } else { + val, err = ParseLocalDate(v) + } + + if err != nil { + p.raiseError(tok, "%s", err) + } + return val + case tokenLeftBracket: + return p.parseArray() + case tokenLeftCurlyBrace: + return p.parseInlineTable() + case tokenEqual: + p.raiseError(tok, "cannot have multiple equals for the same key") + case tokenError: + p.raiseError(tok, "%s", tok) + } + + p.raiseError(tok, "never reached") + + return nil +} + +func tokenIsComma(t *token) bool { + return t != nil && t.typ == tokenComma +} + +func (p *tomlParser) parseInlineTable() *Tree { + tree := newTree() + var previous *token +Loop: + for { + follow := p.peek() + if follow == nil || follow.typ == tokenEOF { + p.raiseError(follow, "unterminated inline table") + } + switch follow.typ { + case tokenRightCurlyBrace: + p.getToken() + break Loop + case tokenKey, tokenInteger, tokenString: + if !tokenIsComma(previous) && previous != nil { + p.raiseError(follow, "comma expected between fields in inline table") + } + key := p.getToken() + p.assume(tokenEqual) + + parsedKey, err := parseKey(key.val) + if err != nil { + p.raiseError(key, "invalid key: %s", err) + } + + value := p.parseRvalue() + tree.SetPath(parsedKey, value) + case tokenComma: + if tokenIsComma(previous) { + p.raiseError(follow, "need field between two commas in inline table") + } + p.getToken() + default: + p.raiseError(follow, "unexpected token type in inline table: %s", follow.String()) + } + previous = follow + } + if tokenIsComma(previous) { + p.raiseError(previous, "trailing comma at the end of inline table") + } + tree.inline = true + return tree +} + +func (p *tomlParser) parseArray() interface{} { + var array []interface{} + arrayType := reflect.TypeOf(newTree()) + for { + follow := p.peek() + if follow == nil || follow.typ == tokenEOF { + p.raiseError(follow, "unterminated array") + } + if follow.typ == tokenRightBracket { + p.getToken() + break + } + val := p.parseRvalue() + if reflect.TypeOf(val) != arrayType { + arrayType = nil + } + array = append(array, val) + follow = p.peek() + if follow == nil || follow.typ == tokenEOF { + p.raiseError(follow, "unterminated array") + } + if follow.typ != tokenRightBracket && follow.typ != tokenComma { + p.raiseError(follow, "missing comma") + } + if follow.typ == tokenComma { + p.getToken() + } + } + + // if the array is a mixed-type array or its length is 0, + // don't convert it to a table array + if len(array) <= 0 { + arrayType = nil + } + // An array of Trees is actually an array of inline + // tables, which is a shorthand for a table array. If the + // array was not converted from []interface{} to []*Tree, + // the two notations would not be equivalent. + if arrayType == reflect.TypeOf(newTree()) { + tomlArray := make([]*Tree, len(array)) + for i, v := range array { + tomlArray[i] = v.(*Tree) + } + return tomlArray + } + return array +} + +func parseToml(flow []token) *Tree { + result := newTree() + result.position = Position{1, 1} + parser := &tomlParser{ + flowIdx: 0, + flow: flow, + tree: result, + currentTable: make([]string, 0), + seenTableKeys: make([]string, 0), + } + parser.run() + return result +} + +func init() { + numberUnderscoreInvalidRegexp = regexp.MustCompile(`([^\d]_|_[^\d])|_$|^_`) + hexNumberUnderscoreInvalidRegexp = regexp.MustCompile(`(^0x_)|([^\da-f]_|_[^\da-f])|_$|^_`) +} diff --git a/vendor/github.com/pelletier/go-toml/position.go b/vendor/github.com/pelletier/go-toml/position.go new file mode 100644 index 00000000000..c17bff87baa --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/position.go @@ -0,0 +1,29 @@ +// Position support for go-toml + +package toml + +import ( + "fmt" +) + +// Position of a document element within a TOML document. +// +// Line and Col are both 1-indexed positions for the element's line number and +// column number, respectively. Values of zero or less will cause Invalid(), +// to return true. +type Position struct { + Line int // line within the document + Col int // column within the line +} + +// String representation of the position. +// Displays 1-indexed line and column numbers. +func (p Position) String() string { + return fmt.Sprintf("(%d, %d)", p.Line, p.Col) +} + +// Invalid returns whether or not the position is valid (i.e. with negative or +// null values) +func (p Position) Invalid() bool { + return p.Line <= 0 || p.Col <= 0 +} diff --git a/vendor/github.com/pelletier/go-toml/token.go b/vendor/github.com/pelletier/go-toml/token.go new file mode 100644 index 00000000000..6af4ec46bcf --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/token.go @@ -0,0 +1,134 @@ +package toml + +import "fmt" + +// Define tokens +type tokenType int + +const ( + eof = -(iota + 1) +) + +const ( + tokenError tokenType = iota + tokenEOF + tokenComment + tokenKey + tokenString + tokenInteger + tokenTrue + tokenFalse + tokenFloat + tokenInf + tokenNan + tokenEqual + tokenLeftBracket + tokenRightBracket + tokenLeftCurlyBrace + tokenRightCurlyBrace + tokenLeftParen + tokenRightParen + tokenDoubleLeftBracket + tokenDoubleRightBracket + tokenDate + tokenLocalDate + tokenKeyGroup + tokenKeyGroupArray + tokenComma + tokenColon + tokenDollar + tokenStar + tokenQuestion + tokenDot + tokenDotDot + tokenEOL +) + +var tokenTypeNames = []string{ + "Error", + "EOF", + "Comment", + "Key", + "String", + "Integer", + "True", + "False", + "Float", + "Inf", + "NaN", + "=", + "[", + "]", + "{", + "}", + "(", + ")", + "]]", + "[[", + "LocalDate", + "LocalDate", + "KeyGroup", + "KeyGroupArray", + ",", + ":", + "$", + "*", + "?", + ".", + "..", + "EOL", +} + +type token struct { + Position + typ tokenType + val string +} + +func (tt tokenType) String() string { + idx := int(tt) + if idx < len(tokenTypeNames) { + return tokenTypeNames[idx] + } + return "Unknown" +} + +func (t token) String() string { + switch t.typ { + case tokenEOF: + return "EOF" + case tokenError: + return t.val + } + + return fmt.Sprintf("%q", t.val) +} + +func isSpace(r rune) bool { + return r == ' ' || r == '\t' +} + +func isAlphanumeric(r rune) bool { + return 'a' <= r && r <= 'z' || 'A' <= r && r <= 'Z' || r == '_' +} + +func isKeyChar(r rune) bool { + // Keys start with the first character that isn't whitespace or [ and end + // with the last non-whitespace character before the equals sign. Keys + // cannot contain a # character." + return !(r == '\r' || r == '\n' || r == eof || r == '=') +} + +func isKeyStartChar(r rune) bool { + return !(isSpace(r) || r == '\r' || r == '\n' || r == eof || r == '[') +} + +func isDigit(r rune) bool { + return '0' <= r && r <= '9' +} + +func isHexDigit(r rune) bool { + return isDigit(r) || + (r >= 'a' && r <= 'f') || + (r >= 'A' && r <= 'F') +} diff --git a/vendor/github.com/pelletier/go-toml/toml.go b/vendor/github.com/pelletier/go-toml/toml.go new file mode 100644 index 00000000000..d323c39bce9 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/toml.go @@ -0,0 +1,399 @@ +package toml + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "runtime" + "strings" +) + +type tomlValue struct { + value interface{} // string, int64, uint64, float64, bool, time.Time, [] of any of this list + comment string + commented bool + multiline bool + position Position +} + +// Tree is the result of the parsing of a TOML file. +type Tree struct { + values map[string]interface{} // string -> *tomlValue, *Tree, []*Tree + comment string + commented bool + inline bool + position Position +} + +func newTree() *Tree { + return newTreeWithPosition(Position{}) +} + +func newTreeWithPosition(pos Position) *Tree { + return &Tree{ + values: make(map[string]interface{}), + position: pos, + } +} + +// TreeFromMap initializes a new Tree object using the given map. +func TreeFromMap(m map[string]interface{}) (*Tree, error) { + result, err := toTree(m) + if err != nil { + return nil, err + } + return result.(*Tree), nil +} + +// Position returns the position of the tree. +func (t *Tree) Position() Position { + return t.position +} + +// Has returns a boolean indicating if the given key exists. +func (t *Tree) Has(key string) bool { + if key == "" { + return false + } + return t.HasPath(strings.Split(key, ".")) +} + +// HasPath returns true if the given path of keys exists, false otherwise. +func (t *Tree) HasPath(keys []string) bool { + return t.GetPath(keys) != nil +} + +// Keys returns the keys of the toplevel tree (does not recurse). +func (t *Tree) Keys() []string { + keys := make([]string, len(t.values)) + i := 0 + for k := range t.values { + keys[i] = k + i++ + } + return keys +} + +// Get the value at key in the Tree. +// Key is a dot-separated path (e.g. a.b.c) without single/double quoted strings. +// If you need to retrieve non-bare keys, use GetPath. +// Returns nil if the path does not exist in the tree. +// If keys is of length zero, the current tree is returned. +func (t *Tree) Get(key string) interface{} { + if key == "" { + return t + } + return t.GetPath(strings.Split(key, ".")) +} + +// GetPath returns the element in the tree indicated by 'keys'. +// If keys is of length zero, the current tree is returned. +func (t *Tree) GetPath(keys []string) interface{} { + if len(keys) == 0 { + return t + } + subtree := t + for _, intermediateKey := range keys[:len(keys)-1] { + value, exists := subtree.values[intermediateKey] + if !exists { + return nil + } + switch node := value.(type) { + case *Tree: + subtree = node + case []*Tree: + // go to most recent element + if len(node) == 0 { + return nil + } + subtree = node[len(node)-1] + default: + return nil // cannot navigate through other node types + } + } + // branch based on final node type + switch node := subtree.values[keys[len(keys)-1]].(type) { + case *tomlValue: + return node.value + default: + return node + } +} + +// GetPosition returns the position of the given key. +func (t *Tree) GetPosition(key string) Position { + if key == "" { + return t.position + } + return t.GetPositionPath(strings.Split(key, ".")) +} + +// GetPositionPath returns the element in the tree indicated by 'keys'. +// If keys is of length zero, the current tree is returned. +func (t *Tree) GetPositionPath(keys []string) Position { + if len(keys) == 0 { + return t.position + } + subtree := t + for _, intermediateKey := range keys[:len(keys)-1] { + value, exists := subtree.values[intermediateKey] + if !exists { + return Position{0, 0} + } + switch node := value.(type) { + case *Tree: + subtree = node + case []*Tree: + // go to most recent element + if len(node) == 0 { + return Position{0, 0} + } + subtree = node[len(node)-1] + default: + return Position{0, 0} + } + } + // branch based on final node type + switch node := subtree.values[keys[len(keys)-1]].(type) { + case *tomlValue: + return node.position + case *Tree: + return node.position + case []*Tree: + // go to most recent element + if len(node) == 0 { + return Position{0, 0} + } + return node[len(node)-1].position + default: + return Position{0, 0} + } +} + +// GetDefault works like Get but with a default value +func (t *Tree) GetDefault(key string, def interface{}) interface{} { + val := t.Get(key) + if val == nil { + return def + } + return val +} + +// SetOptions arguments are supplied to the SetWithOptions and SetPathWithOptions functions to modify marshalling behaviour. +// The default values within the struct are valid default options. +type SetOptions struct { + Comment string + Commented bool + Multiline bool +} + +// SetWithOptions is the same as Set, but allows you to provide formatting +// instructions to the key, that will be used by Marshal(). +func (t *Tree) SetWithOptions(key string, opts SetOptions, value interface{}) { + t.SetPathWithOptions(strings.Split(key, "."), opts, value) +} + +// SetPathWithOptions is the same as SetPath, but allows you to provide +// formatting instructions to the key, that will be reused by Marshal(). +func (t *Tree) SetPathWithOptions(keys []string, opts SetOptions, value interface{}) { + subtree := t + for i, intermediateKey := range keys[:len(keys)-1] { + nextTree, exists := subtree.values[intermediateKey] + if !exists { + nextTree = newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col}) + subtree.values[intermediateKey] = nextTree // add new element here + } + switch node := nextTree.(type) { + case *Tree: + subtree = node + case []*Tree: + // go to most recent element + if len(node) == 0 { + // create element if it does not exist + subtree.values[intermediateKey] = append(node, newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col})) + } + subtree = node[len(node)-1] + } + } + + var toInsert interface{} + + switch v := value.(type) { + case *Tree: + v.comment = opts.Comment + v.commented = opts.Commented + toInsert = value + case []*Tree: + for i := range v { + v[i].commented = opts.Commented + } + toInsert = value + case *tomlValue: + v.comment = opts.Comment + toInsert = v + default: + toInsert = &tomlValue{value: value, + comment: opts.Comment, + commented: opts.Commented, + multiline: opts.Multiline, + position: Position{Line: subtree.position.Line + len(subtree.values) + 1, Col: subtree.position.Col}} + } + + subtree.values[keys[len(keys)-1]] = toInsert +} + +// Set an element in the tree. +// Key is a dot-separated path (e.g. a.b.c). +// Creates all necessary intermediate trees, if needed. +func (t *Tree) Set(key string, value interface{}) { + t.SetWithComment(key, "", false, value) +} + +// SetWithComment is the same as Set, but allows you to provide comment +// information to the key, that will be reused by Marshal(). +func (t *Tree) SetWithComment(key string, comment string, commented bool, value interface{}) { + t.SetPathWithComment(strings.Split(key, "."), comment, commented, value) +} + +// SetPath sets an element in the tree. +// Keys is an array of path elements (e.g. {"a","b","c"}). +// Creates all necessary intermediate trees, if needed. +func (t *Tree) SetPath(keys []string, value interface{}) { + t.SetPathWithComment(keys, "", false, value) +} + +// SetPathWithComment is the same as SetPath, but allows you to provide comment +// information to the key, that will be reused by Marshal(). +func (t *Tree) SetPathWithComment(keys []string, comment string, commented bool, value interface{}) { + t.SetPathWithOptions(keys, SetOptions{Comment: comment, Commented: commented}, value) +} + +// Delete removes a key from the tree. +// Key is a dot-separated path (e.g. a.b.c). +func (t *Tree) Delete(key string) error { + keys, err := parseKey(key) + if err != nil { + return err + } + return t.DeletePath(keys) +} + +// DeletePath removes a key from the tree. +// Keys is an array of path elements (e.g. {"a","b","c"}). +func (t *Tree) DeletePath(keys []string) error { + keyLen := len(keys) + if keyLen == 1 { + delete(t.values, keys[0]) + return nil + } + tree := t.GetPath(keys[:keyLen-1]) + item := keys[keyLen-1] + switch node := tree.(type) { + case *Tree: + delete(node.values, item) + return nil + } + return errors.New("no such key to delete") +} + +// createSubTree takes a tree and a key and create the necessary intermediate +// subtrees to create a subtree at that point. In-place. +// +// e.g. passing a.b.c will create (assuming tree is empty) tree[a], tree[a][b] +// and tree[a][b][c] +// +// Returns nil on success, error object on failure +func (t *Tree) createSubTree(keys []string, pos Position) error { + subtree := t + for i, intermediateKey := range keys { + nextTree, exists := subtree.values[intermediateKey] + if !exists { + tree := newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col}) + tree.position = pos + tree.inline = subtree.inline + subtree.values[intermediateKey] = tree + nextTree = tree + } + + switch node := nextTree.(type) { + case []*Tree: + subtree = node[len(node)-1] + case *Tree: + subtree = node + default: + return fmt.Errorf("unknown type for path %s (%s): %T (%#v)", + strings.Join(keys, "."), intermediateKey, nextTree, nextTree) + } + } + return nil +} + +// LoadBytes creates a Tree from a []byte. +func LoadBytes(b []byte) (tree *Tree, err error) { + defer func() { + if r := recover(); r != nil { + if _, ok := r.(runtime.Error); ok { + panic(r) + } + err = errors.New(r.(string)) + } + }() + + if len(b) >= 4 && (hasUTF32BigEndianBOM4(b) || hasUTF32LittleEndianBOM4(b)) { + b = b[4:] + } else if len(b) >= 3 && hasUTF8BOM3(b) { + b = b[3:] + } else if len(b) >= 2 && (hasUTF16BigEndianBOM2(b) || hasUTF16LittleEndianBOM2(b)) { + b = b[2:] + } + + tree = parseToml(lexToml(b)) + return +} + +func hasUTF16BigEndianBOM2(b []byte) bool { + return b[0] == 0xFE && b[1] == 0xFF +} + +func hasUTF16LittleEndianBOM2(b []byte) bool { + return b[0] == 0xFF && b[1] == 0xFE +} + +func hasUTF8BOM3(b []byte) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +func hasUTF32BigEndianBOM4(b []byte) bool { + return b[0] == 0x00 && b[1] == 0x00 && b[2] == 0xFE && b[3] == 0xFF +} + +func hasUTF32LittleEndianBOM4(b []byte) bool { + return b[0] == 0xFF && b[1] == 0xFE && b[2] == 0x00 && b[3] == 0x00 +} + +// LoadReader creates a Tree from any io.Reader. +func LoadReader(reader io.Reader) (tree *Tree, err error) { + inputBytes, err := ioutil.ReadAll(reader) + if err != nil { + return + } + tree, err = LoadBytes(inputBytes) + return +} + +// Load creates a Tree from a string. +func Load(content string) (tree *Tree, err error) { + return LoadBytes([]byte(content)) +} + +// LoadFile creates a Tree from a file. +func LoadFile(path string) (tree *Tree, err error) { + file, err := os.Open(path) + if err != nil { + return nil, err + } + defer file.Close() + return LoadReader(file) +} diff --git a/vendor/github.com/pelletier/go-toml/tomltree_create.go b/vendor/github.com/pelletier/go-toml/tomltree_create.go new file mode 100644 index 00000000000..79610e9b340 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/tomltree_create.go @@ -0,0 +1,142 @@ +package toml + +import ( + "fmt" + "reflect" + "time" +) + +var kindToType = [reflect.String + 1]reflect.Type{ + reflect.Bool: reflect.TypeOf(true), + reflect.String: reflect.TypeOf(""), + reflect.Float32: reflect.TypeOf(float64(1)), + reflect.Float64: reflect.TypeOf(float64(1)), + reflect.Int: reflect.TypeOf(int64(1)), + reflect.Int8: reflect.TypeOf(int64(1)), + reflect.Int16: reflect.TypeOf(int64(1)), + reflect.Int32: reflect.TypeOf(int64(1)), + reflect.Int64: reflect.TypeOf(int64(1)), + reflect.Uint: reflect.TypeOf(uint64(1)), + reflect.Uint8: reflect.TypeOf(uint64(1)), + reflect.Uint16: reflect.TypeOf(uint64(1)), + reflect.Uint32: reflect.TypeOf(uint64(1)), + reflect.Uint64: reflect.TypeOf(uint64(1)), +} + +// typeFor returns a reflect.Type for a reflect.Kind, or nil if none is found. +// supported values: +// string, bool, int64, uint64, float64, time.Time, int, int8, int16, int32, uint, uint8, uint16, uint32, float32 +func typeFor(k reflect.Kind) reflect.Type { + if k > 0 && int(k) < len(kindToType) { + return kindToType[k] + } + return nil +} + +func simpleValueCoercion(object interface{}) (interface{}, error) { + switch original := object.(type) { + case string, bool, int64, uint64, float64, time.Time: + return original, nil + case int: + return int64(original), nil + case int8: + return int64(original), nil + case int16: + return int64(original), nil + case int32: + return int64(original), nil + case uint: + return uint64(original), nil + case uint8: + return uint64(original), nil + case uint16: + return uint64(original), nil + case uint32: + return uint64(original), nil + case float32: + return float64(original), nil + case fmt.Stringer: + return original.String(), nil + default: + return nil, fmt.Errorf("cannot convert type %T to Tree", object) + } +} + +func sliceToTree(object interface{}) (interface{}, error) { + // arrays are a bit tricky, since they can represent either a + // collection of simple values, which is represented by one + // *tomlValue, or an array of tables, which is represented by an + // array of *Tree. + + // holding the assumption that this function is called from toTree only when value.Kind() is Array or Slice + value := reflect.ValueOf(object) + insideType := value.Type().Elem() + length := value.Len() + if length > 0 { + insideType = reflect.ValueOf(value.Index(0).Interface()).Type() + } + if insideType.Kind() == reflect.Map { + // this is considered as an array of tables + tablesArray := make([]*Tree, 0, length) + for i := 0; i < length; i++ { + table := value.Index(i) + tree, err := toTree(table.Interface()) + if err != nil { + return nil, err + } + tablesArray = append(tablesArray, tree.(*Tree)) + } + return tablesArray, nil + } + + sliceType := typeFor(insideType.Kind()) + if sliceType == nil { + sliceType = insideType + } + + arrayValue := reflect.MakeSlice(reflect.SliceOf(sliceType), 0, length) + + for i := 0; i < length; i++ { + val := value.Index(i).Interface() + simpleValue, err := simpleValueCoercion(val) + if err != nil { + return nil, err + } + arrayValue = reflect.Append(arrayValue, reflect.ValueOf(simpleValue)) + } + return &tomlValue{value: arrayValue.Interface(), position: Position{}}, nil +} + +func toTree(object interface{}) (interface{}, error) { + value := reflect.ValueOf(object) + + if value.Kind() == reflect.Map { + values := map[string]interface{}{} + keys := value.MapKeys() + for _, key := range keys { + if key.Kind() != reflect.String { + if _, ok := key.Interface().(string); !ok { + return nil, fmt.Errorf("map key needs to be a string, not %T (%v)", key.Interface(), key.Kind()) + } + } + + v := value.MapIndex(key) + newValue, err := toTree(v.Interface()) + if err != nil { + return nil, err + } + values[key.String()] = newValue + } + return &Tree{values: values, position: Position{}}, nil + } + + if value.Kind() == reflect.Array || value.Kind() == reflect.Slice { + return sliceToTree(object) + } + + simpleValue, err := simpleValueCoercion(object) + if err != nil { + return nil, err + } + return &tomlValue{value: simpleValue, position: Position{}}, nil +} diff --git a/vendor/github.com/pelletier/go-toml/tomltree_write.go b/vendor/github.com/pelletier/go-toml/tomltree_write.go new file mode 100644 index 00000000000..2d6487ede4a --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/tomltree_write.go @@ -0,0 +1,517 @@ +package toml + +import ( + "bytes" + "fmt" + "io" + "math" + "math/big" + "reflect" + "sort" + "strconv" + "strings" + "time" +) + +type valueComplexity int + +const ( + valueSimple valueComplexity = iota + 1 + valueComplex +) + +type sortNode struct { + key string + complexity valueComplexity +} + +// Encodes a string to a TOML-compliant multi-line string value +// This function is a clone of the existing encodeTomlString function, except that whitespace characters +// are preserved. Quotation marks and backslashes are also not escaped. +func encodeMultilineTomlString(value string, commented string) string { + var b bytes.Buffer + adjacentQuoteCount := 0 + + b.WriteString(commented) + for i, rr := range value { + if rr != '"' { + adjacentQuoteCount = 0 + } else { + adjacentQuoteCount++ + } + switch rr { + case '\b': + b.WriteString(`\b`) + case '\t': + b.WriteString("\t") + case '\n': + b.WriteString("\n" + commented) + case '\f': + b.WriteString(`\f`) + case '\r': + b.WriteString("\r") + case '"': + if adjacentQuoteCount >= 3 || i == len(value)-1 { + adjacentQuoteCount = 0 + b.WriteString(`\"`) + } else { + b.WriteString(`"`) + } + case '\\': + b.WriteString(`\`) + default: + intRr := uint16(rr) + if intRr < 0x001F { + b.WriteString(fmt.Sprintf("\\u%0.4X", intRr)) + } else { + b.WriteRune(rr) + } + } + } + return b.String() +} + +// Encodes a string to a TOML-compliant string value +func encodeTomlString(value string) string { + var b bytes.Buffer + + for _, rr := range value { + switch rr { + case '\b': + b.WriteString(`\b`) + case '\t': + b.WriteString(`\t`) + case '\n': + b.WriteString(`\n`) + case '\f': + b.WriteString(`\f`) + case '\r': + b.WriteString(`\r`) + case '"': + b.WriteString(`\"`) + case '\\': + b.WriteString(`\\`) + default: + intRr := uint16(rr) + if intRr < 0x001F { + b.WriteString(fmt.Sprintf("\\u%0.4X", intRr)) + } else { + b.WriteRune(rr) + } + } + } + return b.String() +} + +func tomlTreeStringRepresentation(t *Tree, ord marshalOrder) (string, error) { + var orderedVals []sortNode + switch ord { + case OrderPreserve: + orderedVals = sortByLines(t) + default: + orderedVals = sortAlphabetical(t) + } + + var values []string + for _, node := range orderedVals { + k := node.key + v := t.values[k] + + repr, err := tomlValueStringRepresentation(v, "", "", ord, false) + if err != nil { + return "", err + } + values = append(values, quoteKeyIfNeeded(k)+" = "+repr) + } + return "{ " + strings.Join(values, ", ") + " }", nil +} + +func tomlValueStringRepresentation(v interface{}, commented string, indent string, ord marshalOrder, arraysOneElementPerLine bool) (string, error) { + // this interface check is added to dereference the change made in the writeTo function. + // That change was made to allow this function to see formatting options. + tv, ok := v.(*tomlValue) + if ok { + v = tv.value + } else { + tv = &tomlValue{} + } + + switch value := v.(type) { + case uint64: + return strconv.FormatUint(value, 10), nil + case int64: + return strconv.FormatInt(value, 10), nil + case float64: + // Default bit length is full 64 + bits := 64 + // Float panics if nan is used + if !math.IsNaN(value) { + // if 32 bit accuracy is enough to exactly show, use 32 + _, acc := big.NewFloat(value).Float32() + if acc == big.Exact { + bits = 32 + } + } + if math.Trunc(value) == value { + return strings.ToLower(strconv.FormatFloat(value, 'f', 1, bits)), nil + } + return strings.ToLower(strconv.FormatFloat(value, 'f', -1, bits)), nil + case string: + if tv.multiline { + return "\"\"\"\n" + encodeMultilineTomlString(value, commented) + "\"\"\"", nil + } + return "\"" + encodeTomlString(value) + "\"", nil + case []byte: + b, _ := v.([]byte) + return tomlValueStringRepresentation(string(b), commented, indent, ord, arraysOneElementPerLine) + case bool: + if value { + return "true", nil + } + return "false", nil + case time.Time: + return value.Format(time.RFC3339), nil + case LocalDate: + return value.String(), nil + case LocalDateTime: + return value.String(), nil + case LocalTime: + return value.String(), nil + case *Tree: + return tomlTreeStringRepresentation(value, ord) + case nil: + return "", nil + } + + rv := reflect.ValueOf(v) + + if rv.Kind() == reflect.Slice { + var values []string + for i := 0; i < rv.Len(); i++ { + item := rv.Index(i).Interface() + itemRepr, err := tomlValueStringRepresentation(item, commented, indent, ord, arraysOneElementPerLine) + if err != nil { + return "", err + } + values = append(values, itemRepr) + } + if arraysOneElementPerLine && len(values) > 1 { + stringBuffer := bytes.Buffer{} + valueIndent := indent + ` ` // TODO: move that to a shared encoder state + + stringBuffer.WriteString("[\n") + + for _, value := range values { + stringBuffer.WriteString(valueIndent) + stringBuffer.WriteString(commented + value) + stringBuffer.WriteString(`,`) + stringBuffer.WriteString("\n") + } + + stringBuffer.WriteString(indent + commented + "]") + + return stringBuffer.String(), nil + } + return "[" + strings.Join(values, ", ") + "]", nil + } + return "", fmt.Errorf("unsupported value type %T: %v", v, v) +} + +func getTreeArrayLine(trees []*Tree) (line int) { + // get lowest line number that is not 0 + for _, tv := range trees { + if tv.position.Line < line || line == 0 { + line = tv.position.Line + } + } + return +} + +func sortByLines(t *Tree) (vals []sortNode) { + var ( + line int + lines []int + tv *Tree + tom *tomlValue + node sortNode + ) + vals = make([]sortNode, 0) + m := make(map[int]sortNode) + + for k := range t.values { + v := t.values[k] + switch v.(type) { + case *Tree: + tv = v.(*Tree) + line = tv.position.Line + node = sortNode{key: k, complexity: valueComplex} + case []*Tree: + line = getTreeArrayLine(v.([]*Tree)) + node = sortNode{key: k, complexity: valueComplex} + default: + tom = v.(*tomlValue) + line = tom.position.Line + node = sortNode{key: k, complexity: valueSimple} + } + lines = append(lines, line) + vals = append(vals, node) + m[line] = node + } + sort.Ints(lines) + + for i, line := range lines { + vals[i] = m[line] + } + + return vals +} + +func sortAlphabetical(t *Tree) (vals []sortNode) { + var ( + node sortNode + simpVals []string + compVals []string + ) + vals = make([]sortNode, 0) + m := make(map[string]sortNode) + + for k := range t.values { + v := t.values[k] + switch v.(type) { + case *Tree, []*Tree: + node = sortNode{key: k, complexity: valueComplex} + compVals = append(compVals, node.key) + default: + node = sortNode{key: k, complexity: valueSimple} + simpVals = append(simpVals, node.key) + } + vals = append(vals, node) + m[node.key] = node + } + + // Simples first to match previous implementation + sort.Strings(simpVals) + i := 0 + for _, key := range simpVals { + vals[i] = m[key] + i++ + } + + sort.Strings(compVals) + for _, key := range compVals { + vals[i] = m[key] + i++ + } + + return vals +} + +func (t *Tree) writeTo(w io.Writer, indent, keyspace string, bytesCount int64, arraysOneElementPerLine bool) (int64, error) { + return t.writeToOrdered(w, indent, keyspace, bytesCount, arraysOneElementPerLine, OrderAlphabetical, " ", false) +} + +func (t *Tree) writeToOrdered(w io.Writer, indent, keyspace string, bytesCount int64, arraysOneElementPerLine bool, ord marshalOrder, indentString string, parentCommented bool) (int64, error) { + var orderedVals []sortNode + + switch ord { + case OrderPreserve: + orderedVals = sortByLines(t) + default: + orderedVals = sortAlphabetical(t) + } + + for _, node := range orderedVals { + switch node.complexity { + case valueComplex: + k := node.key + v := t.values[k] + + combinedKey := quoteKeyIfNeeded(k) + if keyspace != "" { + combinedKey = keyspace + "." + combinedKey + } + + switch node := v.(type) { + // node has to be of those two types given how keys are sorted above + case *Tree: + tv, ok := t.values[k].(*Tree) + if !ok { + return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k]) + } + if tv.comment != "" { + comment := strings.Replace(tv.comment, "\n", "\n"+indent+"#", -1) + start := "# " + if strings.HasPrefix(comment, "#") { + start = "" + } + writtenBytesCountComment, errc := writeStrings(w, "\n", indent, start, comment) + bytesCount += int64(writtenBytesCountComment) + if errc != nil { + return bytesCount, errc + } + } + + var commented string + if parentCommented || t.commented || tv.commented { + commented = "# " + } + writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[", combinedKey, "]\n") + bytesCount += int64(writtenBytesCount) + if err != nil { + return bytesCount, err + } + bytesCount, err = node.writeToOrdered(w, indent+indentString, combinedKey, bytesCount, arraysOneElementPerLine, ord, indentString, parentCommented || t.commented || tv.commented) + if err != nil { + return bytesCount, err + } + case []*Tree: + for _, subTree := range node { + var commented string + if parentCommented || t.commented || subTree.commented { + commented = "# " + } + writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[[", combinedKey, "]]\n") + bytesCount += int64(writtenBytesCount) + if err != nil { + return bytesCount, err + } + + bytesCount, err = subTree.writeToOrdered(w, indent+indentString, combinedKey, bytesCount, arraysOneElementPerLine, ord, indentString, parentCommented || t.commented || subTree.commented) + if err != nil { + return bytesCount, err + } + } + } + default: // Simple + k := node.key + v, ok := t.values[k].(*tomlValue) + if !ok { + return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k]) + } + + var commented string + if parentCommented || t.commented || v.commented { + commented = "# " + } + repr, err := tomlValueStringRepresentation(v, commented, indent, ord, arraysOneElementPerLine) + if err != nil { + return bytesCount, err + } + + if v.comment != "" { + comment := strings.Replace(v.comment, "\n", "\n"+indent+"#", -1) + start := "# " + if strings.HasPrefix(comment, "#") { + start = "" + } + writtenBytesCountComment, errc := writeStrings(w, "\n", indent, start, comment, "\n") + bytesCount += int64(writtenBytesCountComment) + if errc != nil { + return bytesCount, errc + } + } + + quotedKey := quoteKeyIfNeeded(k) + writtenBytesCount, err := writeStrings(w, indent, commented, quotedKey, " = ", repr, "\n") + bytesCount += int64(writtenBytesCount) + if err != nil { + return bytesCount, err + } + } + } + + return bytesCount, nil +} + +// quote a key if it does not fit the bare key format (A-Za-z0-9_-) +// quoted keys use the same rules as strings +func quoteKeyIfNeeded(k string) string { + // when encoding a map with the 'quoteMapKeys' option enabled, the tree will contain + // keys that have already been quoted. + // not an ideal situation, but good enough of a stop gap. + if len(k) >= 2 && k[0] == '"' && k[len(k)-1] == '"' { + return k + } + isBare := true + for _, r := range k { + if !isValidBareChar(r) { + isBare = false + break + } + } + if isBare { + return k + } + return quoteKey(k) +} + +func quoteKey(k string) string { + return "\"" + encodeTomlString(k) + "\"" +} + +func writeStrings(w io.Writer, s ...string) (int, error) { + var n int + for i := range s { + b, err := io.WriteString(w, s[i]) + n += b + if err != nil { + return n, err + } + } + return n, nil +} + +// WriteTo encode the Tree as Toml and writes it to the writer w. +// Returns the number of bytes written in case of success, or an error if anything happened. +func (t *Tree) WriteTo(w io.Writer) (int64, error) { + return t.writeTo(w, "", "", 0, false) +} + +// ToTomlString generates a human-readable representation of the current tree. +// Output spans multiple lines, and is suitable for ingest by a TOML parser. +// If the conversion cannot be performed, ToString returns a non-nil error. +func (t *Tree) ToTomlString() (string, error) { + b, err := t.Marshal() + if err != nil { + return "", err + } + return string(b), nil +} + +// String generates a human-readable representation of the current tree. +// Alias of ToString. Present to implement the fmt.Stringer interface. +func (t *Tree) String() string { + result, _ := t.ToTomlString() + return result +} + +// ToMap recursively generates a representation of the tree using Go built-in structures. +// The following types are used: +// +// * bool +// * float64 +// * int64 +// * string +// * uint64 +// * time.Time +// * map[string]interface{} (where interface{} is any of this list) +// * []interface{} (where interface{} is any of this list) +func (t *Tree) ToMap() map[string]interface{} { + result := map[string]interface{}{} + + for k, v := range t.values { + switch node := v.(type) { + case []*Tree: + var array []interface{} + for _, item := range node { + array = append(array, item.ToMap()) + } + result[k] = array + case *Tree: + result[k] = node.ToMap() + case *tomlValue: + result[k] = node.value + } + } + return result +} diff --git a/vendor/github.com/pierrec/lz4/README.md b/vendor/github.com/pierrec/lz4/README.md index 00899fb7e04..4ee388e81bf 100644 --- a/vendor/github.com/pierrec/lz4/README.md +++ b/vendor/github.com/pierrec/lz4/README.md @@ -83,23 +83,8 @@ Contributions are very welcome for bug fixing, performance improvements...! ## Contributors -Thanks to all contributors so far: - -- [@klauspost](https://github.com/klauspost) -- [@heidawei](https://github.com/heidawei) -- [@x4m](https://github.com/x4m) -- [@Zariel](https://github.com/Zariel) -- [@edwingeng](https://github.com/edwingeng) -- [@danielmoy-google](https://github.com/danielmoy-google) -- [@honda-tatsuya](https://github.com/honda-tatsuya) -- [@h8liu](https://github.com/h8liu) -- [@sbinet](https://github.com/sbinet) -- [@fingon](https://github.com/fingon) -- [@emfree](https://github.com/emfree) -- [@lhemala](https://github.com/lhemala) -- [@connor4312](https://github.com/connor4312) -- [@oov](https://github.com/oov) -- [@arya](https://github.com/arya) -- [@ikkeps](https://github.com/ikkeps) - -Special thanks to [@Zariel](https://github.com/Zariel) for his asm implementation of the decoder +Thanks to all [contributors](https://github.com/pierrec/lz4/graphs/contributors) so far! + +Special thanks to [@Zariel](https://github.com/Zariel) for his asm implementation of the decoder. + +Special thanks to [@klauspost](https://github.com/klauspost) for his work on optimizing the code. diff --git a/vendor/github.com/pierrec/lz4/block.go b/vendor/github.com/pierrec/lz4/block.go index 5755cda2460..664d9be580d 100644 --- a/vendor/github.com/pierrec/lz4/block.go +++ b/vendor/github.com/pierrec/lz4/block.go @@ -2,8 +2,8 @@ package lz4 import ( "encoding/binary" - "fmt" "math/bits" + "sync" ) // blockHash hashes the lower 6 bytes into a value < htSize. @@ -35,24 +35,31 @@ func UncompressBlock(src, dst []byte) (int, error) { // CompressBlock compresses the source buffer into the destination one. // This is the fast version of LZ4 compression and also the default one. -// The size of hashTable must be at least 64Kb. // -// The size of the compressed data is returned. If it is 0 and no error, then the data is incompressible. +// The argument hashTable is scratch space for a hash table used by the +// compressor. If provided, it should have length at least 1<<16. If it is +// shorter (or nil), CompressBlock allocates its own hash table. +// +// The size of the compressed data is returned. +// +// If the destination buffer size is lower than CompressBlockBound and +// the compressed size is 0 and no error, then the data is incompressible. // // An error is returned if the destination buffer is too small. -func CompressBlock(src, dst []byte, hashTable []int) (di int, err error) { +func CompressBlock(src, dst []byte, hashTable []int) (_ int, err error) { defer recoverBlock(&err) + // Return 0, nil only if the destination buffer size is < CompressBlockBound. + isNotCompressible := len(dst) < CompressBlockBound(len(src)) + // adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible. - // This significantly speeds up incompressible data and usually has very small impact on compresssion. + // This significantly speeds up incompressible data and usually has very small impact on compression. // bytes to skip = 1 + (bytes since last match >> adaptSkipLog) const adaptSkipLog = 7 - sn, dn := len(src)-mfLimit, len(dst) - if sn <= 0 || dn == 0 { - return 0, nil - } if len(hashTable) < htSize { - return 0, fmt.Errorf("hash table too small, should be at least %d in size", htSize) + htIface := htPool.Get() + defer htPool.Put(htIface) + hashTable = (*(htIface).(*[htSize]int))[:] } // Prove to the compiler the table has at least htSize elements. // The compiler can see that "uint32() >> hashShift" cannot be out of bounds. @@ -60,7 +67,11 @@ func CompressBlock(src, dst []byte, hashTable []int) (di int, err error) { // si: Current position of the search. // anchor: Position of the current literals. - var si, anchor int + var si, di, anchor int + sn := len(src) - mfLimit + if sn <= 0 { + goto lastLiterals + } // Fast scan strategy: the hash table only stores the last 4 bytes sequences. for si < sn { @@ -124,7 +135,7 @@ func CompressBlock(src, dst []byte, hashTable []int) (di int, err error) { si, mLen = si+mLen, si+minMatch // Find the longest match by looking by batches of 8 bytes. - for si < sn { + for si+8 < sn { x := binary.LittleEndian.Uint64(src[si:]) ^ binary.LittleEndian.Uint64(src[si-offset:]) if x == 0 { si += 8 @@ -184,7 +195,8 @@ func CompressBlock(src, dst []byte, hashTable []int) (di int, err error) { hashTable[h] = si - 2 } - if anchor == 0 { +lastLiterals: + if isNotCompressible && anchor == 0 { // Incompressible. return 0, nil } @@ -205,7 +217,7 @@ func CompressBlock(src, dst []byte, hashTable []int) (di int, err error) { di++ // Write the last literals. - if di >= anchor { + if isNotCompressible && di >= anchor { // Incompressible. return 0, nil } @@ -213,6 +225,13 @@ func CompressBlock(src, dst []byte, hashTable []int) (di int, err error) { return di, nil } +// Pool of hash tables for CompressBlock. +var htPool = sync.Pool{ + New: func() interface{} { + return new([htSize]int) + }, +} + // blockHash hashes 4 bytes into a value < winSize. func blockHashHC(x uint32) uint32 { const hasher uint32 = 2654435761 // Knuth multiplicative hash. @@ -224,22 +243,24 @@ func blockHashHC(x uint32) uint32 { // // CompressBlockHC compression ratio is better than CompressBlock but it is also slower. // -// The size of the compressed data is returned. If it is 0 and no error, then the data is not compressible. +// The size of the compressed data is returned. +// +// If the destination buffer size is lower than CompressBlockBound and +// the compressed size is 0 and no error, then the data is incompressible. // // An error is returned if the destination buffer is too small. -func CompressBlockHC(src, dst []byte, depth int) (di int, err error) { +func CompressBlockHC(src, dst []byte, depth int) (_ int, err error) { defer recoverBlock(&err) + // Return 0, nil only if the destination buffer size is < CompressBlockBound. + isNotCompressible := len(dst) < CompressBlockBound(len(src)) + // adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible. - // This significantly speeds up incompressible data and usually has very small impact on compresssion. + // This significantly speeds up incompressible data and usually has very small impact on compression. // bytes to skip = 1 + (bytes since last match >> adaptSkipLog) const adaptSkipLog = 7 - sn, dn := len(src)-mfLimit, len(dst) - if sn <= 0 || dn == 0 { - return 0, nil - } - var si int + var si, di, anchor int // hashTable: stores the last position found for a given hash // chainTable: stores previous positions for a given hash @@ -249,7 +270,11 @@ func CompressBlockHC(src, dst []byte, depth int) (di int, err error) { depth = winSize } - anchor := si + sn := len(src) - mfLimit + if sn <= 0 { + goto lastLiterals + } + for si < sn { // Hash the next 4 bytes (sequence). match := binary.LittleEndian.Uint32(src[si:]) @@ -356,12 +381,13 @@ func CompressBlockHC(src, dst []byte, depth int) (di int, err error) { } } - if anchor == 0 { + if isNotCompressible && anchor == 0 { // Incompressible. return 0, nil } // Last literals. +lastLiterals: lLen := len(src) - anchor if lLen < 0xF { dst[di] = byte(lLen << 4) @@ -378,7 +404,7 @@ func CompressBlockHC(src, dst []byte, depth int) (di int, err error) { di++ // Write the last literals. - if di >= anchor { + if isNotCompressible && di >= anchor { // Incompressible. return 0, nil } diff --git a/vendor/github.com/pierrec/lz4/decode_other.go b/vendor/github.com/pierrec/lz4/decode_other.go index 002519f3e7f..919888edf7d 100644 --- a/vendor/github.com/pierrec/lz4/decode_other.go +++ b/vendor/github.com/pierrec/lz4/decode_other.go @@ -19,7 +19,7 @@ func decodeBlock(dst, src []byte) (ret int) { // Literals. if lLen := b >> 4; lLen > 0 { switch { - case lLen < 0xF && di+18 < len(dst) && si+16 < len(src): + case lLen < 0xF && si+16 < len(src): // Shortcut 1 // if we have enough room in src and dst, and the literals length // is small enough (0..14) then copy all 16 bytes, even if not all @@ -34,7 +34,13 @@ func decodeBlock(dst, src []byte) (ret int) { mLen += 4 if offset := int(src[si]) | int(src[si+1])<<8; mLen <= offset { i := di - offset - copy(dst[di:], dst[i:i+18]) + end := i + 18 + if end > len(dst) { + // The remaining buffer may not hold 18 bytes. + // See https://github.com/pierrec/lz4/issues/51. + end = len(dst) + } + copy(dst[di:], dst[i:end]) si += 2 di += mLen continue diff --git a/vendor/github.com/pierrec/lz4/errors.go b/vendor/github.com/pierrec/lz4/errors.go index 3e278945ec8..1c45d1813ce 100644 --- a/vendor/github.com/pierrec/lz4/errors.go +++ b/vendor/github.com/pierrec/lz4/errors.go @@ -15,6 +15,8 @@ var ( ErrInvalid = errors.New("lz4: bad magic number") // ErrBlockDependency is returned when attempting to decompress an archive created with block dependency. ErrBlockDependency = errors.New("lz4: block dependency not supported") + // ErrUnsupportedSeek is returned when attempting to Seek any way but forward from the current position. + ErrUnsupportedSeek = errors.New("lz4: can only seek forward from io.SeekCurrent") ) func recoverBlock(e *error) { diff --git a/vendor/github.com/pierrec/lz4/lz4.go b/vendor/github.com/pierrec/lz4/lz4.go index cdbf9611f48..6c73539a343 100644 --- a/vendor/github.com/pierrec/lz4/lz4.go +++ b/vendor/github.com/pierrec/lz4/lz4.go @@ -10,6 +10,10 @@ // package lz4 +import "math/bits" + +import "sync" + const ( // Extension is the LZ4 frame file name extension Extension = ".lz4" @@ -34,28 +38,67 @@ const ( hashLog = 16 htSize = 1 << hashLog - mfLimit = 8 + minMatch // The last match cannot start within the last 12 bytes. + mfLimit = 10 + minMatch // The last match cannot start within the last 14 bytes. ) // map the block max size id with its value in bytes: 64Kb, 256Kb, 1Mb and 4Mb. const ( - blockSize64K = 64 << 10 - blockSize256K = 256 << 10 - blockSize1M = 1 << 20 - blockSize4M = 4 << 20 + blockSize64K = 1 << (16 + 2*iota) + blockSize256K + blockSize1M + blockSize4M ) var ( - bsMapID = map[byte]int{4: blockSize64K, 5: blockSize256K, 6: blockSize1M, 7: blockSize4M} - bsMapValue = map[int]byte{blockSize64K: 4, blockSize256K: 5, blockSize1M: 6, blockSize4M: 7} + // Keep a pool of buffers for each valid block sizes. + bsMapValue = [...]*sync.Pool{ + newBufferPool(2 * blockSize64K), + newBufferPool(2 * blockSize256K), + newBufferPool(2 * blockSize1M), + newBufferPool(2 * blockSize4M), + } ) +// newBufferPool returns a pool for buffers of the given size. +func newBufferPool(size int) *sync.Pool { + return &sync.Pool{ + New: func() interface{} { + return make([]byte, size) + }, + } +} + +// getBuffer returns a buffer to its pool. +func getBuffer(size int) []byte { + idx := blockSizeValueToIndex(size) - 4 + return bsMapValue[idx].Get().([]byte) +} + +// putBuffer returns a buffer to its pool. +func putBuffer(size int, buf []byte) { + if cap(buf) > 0 { + idx := blockSizeValueToIndex(size) - 4 + bsMapValue[idx].Put(buf[:cap(buf)]) + } +} +func blockSizeIndexToValue(i byte) int { + return 1 << (16 + 2*uint(i)) +} +func isValidBlockSize(size int) bool { + const blockSizeMask = blockSize64K | blockSize256K | blockSize1M | blockSize4M + + return size&blockSizeMask > 0 && bits.OnesCount(uint(size)) == 1 +} +func blockSizeValueToIndex(size int) byte { + return 4 + byte(bits.TrailingZeros(uint(size)>>16)/2) +} + // Header describes the various flags that can be set on a Writer or obtained from a Reader. // The default values match those of the LZ4 frame format definition // (http://fastcompression.blogspot.com/2013/04/lz4-streaming-format-final.html). // // NB. in a Reader, in case of concatenated frames, the Header values may change between Read() calls. -// It is the caller responsibility to check them if necessary. +// It is the caller's responsibility to check them if necessary. type Header struct { BlockChecksum bool // Compressed blocks checksum flag. NoChecksum bool // Frame checksum flag. @@ -64,3 +107,7 @@ type Header struct { CompressionLevel int // Compression level (higher is better, use 0 for fastest compression). done bool // Header processed flag (Read or Write and checked). } + +func (h *Header) Reset() { + h.done = false +} diff --git a/vendor/github.com/pierrec/lz4/reader.go b/vendor/github.com/pierrec/lz4/reader.go index 90e8efe2eb0..87dd72bd0db 100644 --- a/vendor/github.com/pierrec/lz4/reader.go +++ b/vendor/github.com/pierrec/lz4/reader.go @@ -25,6 +25,8 @@ type Reader struct { data []byte // Uncompressed data. idx int // Index of unread bytes into data. checksum xxh32.XXHZero // Frame hash. + skip int64 // Bytes to skip before next read. + dpos int64 // Position in dest } // NewReader returns a new LZ4 frame decoder. @@ -86,10 +88,10 @@ func (z *Reader) readHeader(first bool) error { z.NoChecksum = b>>2&1 == 0 bmsID := buf[1] >> 4 & 0x7 - bSize, ok := bsMapID[bmsID] - if !ok { + if bmsID < 4 || bmsID > 7 { return fmt.Errorf("lz4: invalid block max size ID: %d", bmsID) } + bSize := blockSizeIndexToValue(bmsID - 4) z.BlockMaxSize = bSize // Allocate the compressed/uncompressed buffers. @@ -275,8 +277,20 @@ func (z *Reader) Read(buf []byte) (int, error) { z.idx = 0 } + if z.skip > int64(len(z.data[z.idx:])) { + z.skip -= int64(len(z.data[z.idx:])) + z.dpos += int64(len(z.data[z.idx:])) + z.idx = len(z.data) + return 0, nil + } + + z.idx += int(z.skip) + z.dpos += z.skip + z.skip = 0 + n := copy(buf, z.data[z.idx:]) z.idx += n + z.dpos += int64(n) if debugFlag { debug("copied %d bytes to input", n) } @@ -284,6 +298,20 @@ func (z *Reader) Read(buf []byte) (int, error) { return n, nil } +// Seek implements io.Seeker, but supports seeking forward from the current +// position only. Any other seek will return an error. Allows skipping output +// bytes which aren't needed, which in some scenarios is faster than reading +// and discarding them. +// Note this may cause future calls to Read() to read 0 bytes if all of the +// data they would have returned is skipped. +func (z *Reader) Seek(offset int64, whence int) (int64, error) { + if offset < 0 || whence != io.SeekCurrent { + return z.dpos + z.skip, ErrUnsupportedSeek + } + z.skip += offset + return z.dpos + z.skip, nil +} + // Reset discards the Reader's state and makes it equivalent to the // result of its original state from NewReader, but reading from r instead. // This permits reusing a Reader rather than allocating a new one. diff --git a/vendor/github.com/pierrec/lz4/writer.go b/vendor/github.com/pierrec/lz4/writer.go index 804a68cc258..324f1386b8a 100644 --- a/vendor/github.com/pierrec/lz4/writer.go +++ b/vendor/github.com/pierrec/lz4/writer.go @@ -3,11 +3,18 @@ package lz4 import ( "encoding/binary" "fmt" - "io" - "github.com/pierrec/lz4/internal/xxh32" + "io" + "runtime" ) +// zResult contains the results of compressing a block. +type zResult struct { + size uint32 // Block header + data []byte // Compressed data + checksum uint32 // Data checksum +} + // Writer implements the LZ4 frame encoder. type Writer struct { Header @@ -18,10 +25,13 @@ type Writer struct { buf [19]byte // magic number(4) + header(flags(2)+[Size(8)+DictID(4)]+checksum(1)) does not exceed 19 bytes dst io.Writer // Destination. checksum xxh32.XXHZero // Frame checksum. - zdata []byte // Compressed data. - data []byte // Data to be compressed. + data []byte // Data to be compressed + buffer for compressed data. idx int // Index into data. hashtable [winSize]int // Hash table used in CompressBlock(). + + // For concurrency. + c chan chan zResult // Channel for block compression goroutines and writer goroutine. + err error // Any error encountered while writing to the underlying destination. } // NewWriter returns a new LZ4 frame encoder. @@ -29,28 +39,92 @@ type Writer struct { // The supplied Header is checked at the first Write. // It is ok to change it before the first Write but then not until a Reset() is performed. func NewWriter(dst io.Writer) *Writer { - return &Writer{dst: dst} + z := new(Writer) + z.Reset(dst) + return z +} + +// WithConcurrency sets the number of concurrent go routines used for compression. +// A negative value sets the concurrency to GOMAXPROCS. +func (z *Writer) WithConcurrency(n int) *Writer { + switch { + case n == 0 || n == 1: + z.c = nil + return z + case n < 0: + n = runtime.GOMAXPROCS(0) + } + z.c = make(chan chan zResult, n) + // Writer goroutine managing concurrent block compression goroutines. + go func() { + // Process next block compression item. + for c := range z.c { + // Read the next compressed block result. + // Waiting here ensures that the blocks are output in the order they were sent. + // The incoming channel is always closed as it indicates to the caller that + // the block has been processed. + res := <-c + n := len(res.data) + if n == 0 { + // Notify the block compression routine that we are done with its result. + // This is used when a sentinel block is sent to terminate the compression. + close(c) + return + } + // Write the block. + if err := z.writeUint32(res.size); err != nil && z.err == nil { + z.err = err + } + if _, err := z.dst.Write(res.data); err != nil && z.err == nil { + z.err = err + } + if z.BlockChecksum { + if err := z.writeUint32(res.checksum); err != nil && z.err == nil { + z.err = err + } + } + if isCompressed := res.size&compressedBlockFlag == 0; isCompressed { + // It is now safe to release the buffer as no longer in use by any goroutine. + putBuffer(cap(res.data), res.data) + } + if h := z.OnBlockDone; h != nil { + h(n) + } + close(c) + } + }() + return z +} + +// newBuffers instantiates new buffers which size matches the one in Header. +// The returned buffers are for decompression and compression respectively. +func (z *Writer) newBuffers() { + bSize := z.Header.BlockMaxSize + buf := getBuffer(bSize) + z.data = buf[:bSize] // Uncompressed buffer is the first half. +} + +// freeBuffers puts the writer's buffers back to the pool. +func (z *Writer) freeBuffers() { + // Put the buffer back into the pool, if any. + putBuffer(z.Header.BlockMaxSize, z.data) + z.data = nil } // writeHeader builds and writes the header (magic+header) to the underlying io.Writer. func (z *Writer) writeHeader() error { // Default to 4Mb if BlockMaxSize is not set. if z.Header.BlockMaxSize == 0 { - z.Header.BlockMaxSize = bsMapID[7] + z.Header.BlockMaxSize = blockSize4M } // The only option that needs to be validated. bSize := z.Header.BlockMaxSize - bSizeID, ok := bsMapValue[bSize] - if !ok { + if !isValidBlockSize(z.Header.BlockMaxSize) { return fmt.Errorf("lz4: invalid block max size: %d", bSize) } // Allocate the compressed/uncompressed buffers. // The compressed buffer cannot exceed the uncompressed one. - if n := 2 * bSize; cap(z.zdata) < n { - z.zdata = make([]byte, n, n) - } - z.data = z.zdata[:bSize] - z.zdata = z.zdata[:cap(z.zdata)][bSize:] + z.newBuffers() z.idx = 0 // Size is optional. @@ -70,7 +144,7 @@ func (z *Writer) writeHeader() error { flg |= 1 << 2 } buf[4] = flg - buf[5] = bSizeID << 4 + buf[5] = blockSizeValueToIndex(z.Header.BlockMaxSize) << 4 // Current buffer size: magic(4) + flags(1) + block max size (1). n := 6 @@ -150,28 +224,34 @@ func (z *Writer) Write(buf []byte) (int, error) { // compressBlock compresses a block. func (z *Writer) compressBlock(data []byte) error { if !z.NoChecksum { - z.checksum.Write(data) + _, _ = z.checksum.Write(data) } + if z.c != nil { + c := make(chan zResult) + z.c <- c // Send now to guarantee order + go writerCompressBlock(c, z.Header, data) + return nil + } + + zdata := z.data[z.Header.BlockMaxSize:cap(z.data)] // The compressed block size cannot exceed the input's. var zn int - var err error if level := z.Header.CompressionLevel; level != 0 { - zn, err = CompressBlockHC(data, z.zdata, level) + zn, _ = CompressBlockHC(data, zdata, level) } else { - zn, err = CompressBlock(data, z.zdata, z.hashtable[:]) + zn, _ = CompressBlock(data, zdata, z.hashtable[:]) } - var zdata []byte var bLen uint32 if debugFlag { debug("block compression %d => %d", len(data), zn) } - if err == nil && zn > 0 && zn < len(data) { + if zn > 0 && zn < len(data) { // Compressible and compressed size smaller than uncompressed: ok! bLen = uint32(zn) - zdata = z.zdata[:zn] + zdata = zdata[:zn] } else { // Uncompressed block. bLen = uint32(len(data)) | compressedBlockFlag @@ -218,13 +298,35 @@ func (z *Writer) Flush() error { return nil } - if err := z.compressBlock(z.data[:z.idx]); err != nil { - return err - } + data := z.data[:z.idx] z.idx = 0 + if z.c == nil { + return z.compressBlock(data) + } + if !z.NoChecksum { + _, _ = z.checksum.Write(data) + } + c := make(chan zResult) + z.c <- c + writerCompressBlock(c, z.Header, data) return nil } +func (z *Writer) close() error { + if z.c == nil { + return nil + } + // Send a sentinel block (no data to compress) to terminate the writer main goroutine. + c := make(chan zResult) + z.c <- c + c <- zResult{} + // Wait for the main goroutine to complete. + <-c + // At this point the main goroutine has shut down or is about to return. + z.c = nil + return z.err +} + // Close closes the Writer, flushing any unwritten data to the underlying io.Writer, but does not close the underlying io.Writer. func (z *Writer) Close() error { if !z.Header.done { @@ -235,6 +337,10 @@ func (z *Writer) Close() error { if err := z.Flush(); err != nil { return err } + if err := z.close(); err != nil { + return err + } + z.freeBuffers() if debugFlag { debug("writing last empty block") @@ -256,12 +362,15 @@ func (z *Writer) Close() error { // initial state from NewWriter, but instead writing to w. // No access to the underlying io.Writer is performed. func (z *Writer) Reset(w io.Writer) { - z.Header = Header{} + n := cap(z.c) + _ = z.close() + z.freeBuffers() + z.Header.Reset() z.dst = w z.checksum.Reset() - z.zdata = z.zdata[:0] - z.data = z.data[:0] z.idx = 0 + z.err = nil + z.WithConcurrency(n) } // writeUint32 writes a uint32 to the underlying writer. @@ -271,3 +380,29 @@ func (z *Writer) writeUint32(x uint32) error { _, err := z.dst.Write(buf) return err } + +// writerCompressBlock compresses data into a pooled buffer and writes its result +// out to the input channel. +func writerCompressBlock(c chan zResult, header Header, data []byte) { + zdata := getBuffer(header.BlockMaxSize) + // The compressed block size cannot exceed the input's. + var zn int + if level := header.CompressionLevel; level != 0 { + zn, _ = CompressBlockHC(data, zdata, level) + } else { + var hashTable [winSize]int + zn, _ = CompressBlock(data, zdata, hashTable[:]) + } + var res zResult + if zn > 0 && zn < len(data) { + res.size = uint32(zn) + res.data = zdata[:zn] + } else { + res.size = uint32(len(data)) | compressedBlockFlag + res.data = data + } + if header.BlockChecksum { + res.checksum = xxh32.ChecksumZero(res.data) + } + c <- res +} diff --git a/vendor/github.com/rcrowley/go-metrics/.gitignore b/vendor/github.com/rcrowley/go-metrics/.gitignore new file mode 100644 index 00000000000..83c8f82374a --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/.gitignore @@ -0,0 +1,9 @@ +*.[68] +*.a +*.out +*.swp +_obj +_testmain.go +cmd/metrics-bench/metrics-bench +cmd/metrics-example/metrics-example +cmd/never-read/never-read diff --git a/vendor/github.com/rcrowley/go-metrics/.travis.yml b/vendor/github.com/rcrowley/go-metrics/.travis.yml new file mode 100644 index 00000000000..409a5b631c3 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/.travis.yml @@ -0,0 +1,22 @@ +language: go + +go: + - "1.3" + - "1.4" + - "1.5" + - "1.6" + - "1.7" + - "1.8" + - "1.9" + - "1.10" + - "1.11" + - "1.12" + - "1.13" + - "1.14" + +script: + - ./validate.sh + +# this should give us faster builds according to +# http://docs.travis-ci.com/user/migrating-from-legacy/ +sudo: false diff --git a/vendor/github.com/rcrowley/go-metrics/LICENSE b/vendor/github.com/rcrowley/go-metrics/LICENSE new file mode 100644 index 00000000000..363fa9ee77b --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/LICENSE @@ -0,0 +1,29 @@ +Copyright 2012 Richard Crowley. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + +THIS SOFTWARE IS PROVIDED BY RICHARD CROWLEY ``AS IS'' AND ANY EXPRESS +OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL RICHARD CROWLEY OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +THE POSSIBILITY OF SUCH DAMAGE. + +The views and conclusions contained in the software and documentation +are those of the authors and should not be interpreted as representing +official policies, either expressed or implied, of Richard Crowley. diff --git a/vendor/github.com/rcrowley/go-metrics/README.md b/vendor/github.com/rcrowley/go-metrics/README.md new file mode 100644 index 00000000000..27ddfee8b89 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/README.md @@ -0,0 +1,171 @@ +go-metrics +========== + +![travis build status](https://travis-ci.org/rcrowley/go-metrics.svg?branch=master) + +Go port of Coda Hale's Metrics library: . + +Documentation: . + +Usage +----- + +Create and update metrics: + +```go +c := metrics.NewCounter() +metrics.Register("foo", c) +c.Inc(47) + +g := metrics.NewGauge() +metrics.Register("bar", g) +g.Update(47) + +r := NewRegistry() +g := metrics.NewRegisteredFunctionalGauge("cache-evictions", r, func() int64 { return cache.getEvictionsCount() }) + +s := metrics.NewExpDecaySample(1028, 0.015) // or metrics.NewUniformSample(1028) +h := metrics.NewHistogram(s) +metrics.Register("baz", h) +h.Update(47) + +m := metrics.NewMeter() +metrics.Register("quux", m) +m.Mark(47) + +t := metrics.NewTimer() +metrics.Register("bang", t) +t.Time(func() {}) +t.Update(47) +``` + +Register() is not threadsafe. For threadsafe metric registration use +GetOrRegister: + +```go +t := metrics.GetOrRegisterTimer("account.create.latency", nil) +t.Time(func() {}) +t.Update(47) +``` + +**NOTE:** Be sure to unregister short-lived meters and timers otherwise they will +leak memory: + +```go +// Will call Stop() on the Meter to allow for garbage collection +metrics.Unregister("quux") +// Or similarly for a Timer that embeds a Meter +metrics.Unregister("bang") +``` + +Periodically log every metric in human-readable form to standard error: + +```go +go metrics.Log(metrics.DefaultRegistry, 5 * time.Second, log.New(os.Stderr, "metrics: ", log.Lmicroseconds)) +``` + +Periodically log every metric in slightly-more-parseable form to syslog: + +```go +w, _ := syslog.Dial("unixgram", "/dev/log", syslog.LOG_INFO, "metrics") +go metrics.Syslog(metrics.DefaultRegistry, 60e9, w) +``` + +Periodically emit every metric to Graphite using the [Graphite client](https://github.com/cyberdelia/go-metrics-graphite): + +```go + +import "github.com/cyberdelia/go-metrics-graphite" + +addr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:2003") +go graphite.Graphite(metrics.DefaultRegistry, 10e9, "metrics", addr) +``` + +Periodically emit every metric into InfluxDB: + +**NOTE:** this has been pulled out of the library due to constant fluctuations +in the InfluxDB API. In fact, all client libraries are on their way out. see +issues [#121](https://github.com/rcrowley/go-metrics/issues/121) and +[#124](https://github.com/rcrowley/go-metrics/issues/124) for progress and details. + +```go +import "github.com/vrischmann/go-metrics-influxdb" + +go influxdb.InfluxDB(metrics.DefaultRegistry, + 10e9, + "127.0.0.1:8086", + "database-name", + "username", + "password" +) +``` + +Periodically upload every metric to Librato using the [Librato client](https://github.com/mihasya/go-metrics-librato): + +**Note**: the client included with this repository under the `librato` package +has been deprecated and moved to the repository linked above. + +```go +import "github.com/mihasya/go-metrics-librato" + +go librato.Librato(metrics.DefaultRegistry, + 10e9, // interval + "example@example.com", // account owner email address + "token", // Librato API token + "hostname", // source + []float64{0.95}, // percentiles to send + time.Millisecond, // time unit +) +``` + +Periodically emit every metric to StatHat: + +```go +import "github.com/rcrowley/go-metrics/stathat" + +go stathat.Stathat(metrics.DefaultRegistry, 10e9, "example@example.com") +``` + +Maintain all metrics along with expvars at `/debug/metrics`: + +This uses the same mechanism as [the official expvar](http://golang.org/pkg/expvar/) +but exposed under `/debug/metrics`, which shows a json representation of all your usual expvars +as well as all your go-metrics. + + +```go +import "github.com/rcrowley/go-metrics/exp" + +exp.Exp(metrics.DefaultRegistry) +``` + +Installation +------------ + +```sh +go get github.com/rcrowley/go-metrics +``` + +StatHat support additionally requires their Go client: + +```sh +go get github.com/stathat/go +``` + +Publishing Metrics +------------------ + +Clients are available for the following destinations: + +* AppOptics - https://github.com/ysamlan/go-metrics-appoptics +* Librato - https://github.com/mihasya/go-metrics-librato +* Graphite - https://github.com/cyberdelia/go-metrics-graphite +* InfluxDB - https://github.com/vrischmann/go-metrics-influxdb +* Ganglia - https://github.com/appscode/metlia +* Prometheus - https://github.com/deathowl/go-metrics-prometheus +* DataDog - https://github.com/syntaqx/go-metrics-datadog +* SignalFX - https://github.com/pascallouisperez/go-metrics-signalfx +* Honeycomb - https://github.com/getspine/go-metrics-honeycomb +* Wavefront - https://github.com/wavefrontHQ/go-metrics-wavefront +* Open-Falcon - https://github.com/g4zhuj/go-metrics-falcon +* AWS CloudWatch - [https://github.com/savaki/cloudmetrics](https://github.com/savaki/cloudmetrics) diff --git a/vendor/github.com/rcrowley/go-metrics/counter.go b/vendor/github.com/rcrowley/go-metrics/counter.go new file mode 100644 index 00000000000..bb7b039cb57 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/counter.go @@ -0,0 +1,112 @@ +package metrics + +import "sync/atomic" + +// Counters hold an int64 value that can be incremented and decremented. +type Counter interface { + Clear() + Count() int64 + Dec(int64) + Inc(int64) + Snapshot() Counter +} + +// GetOrRegisterCounter returns an existing Counter or constructs and registers +// a new StandardCounter. +func GetOrRegisterCounter(name string, r Registry) Counter { + if nil == r { + r = DefaultRegistry + } + return r.GetOrRegister(name, NewCounter).(Counter) +} + +// NewCounter constructs a new StandardCounter. +func NewCounter() Counter { + if UseNilMetrics { + return NilCounter{} + } + return &StandardCounter{0} +} + +// NewRegisteredCounter constructs and registers a new StandardCounter. +func NewRegisteredCounter(name string, r Registry) Counter { + c := NewCounter() + if nil == r { + r = DefaultRegistry + } + r.Register(name, c) + return c +} + +// CounterSnapshot is a read-only copy of another Counter. +type CounterSnapshot int64 + +// Clear panics. +func (CounterSnapshot) Clear() { + panic("Clear called on a CounterSnapshot") +} + +// Count returns the count at the time the snapshot was taken. +func (c CounterSnapshot) Count() int64 { return int64(c) } + +// Dec panics. +func (CounterSnapshot) Dec(int64) { + panic("Dec called on a CounterSnapshot") +} + +// Inc panics. +func (CounterSnapshot) Inc(int64) { + panic("Inc called on a CounterSnapshot") +} + +// Snapshot returns the snapshot. +func (c CounterSnapshot) Snapshot() Counter { return c } + +// NilCounter is a no-op Counter. +type NilCounter struct{} + +// Clear is a no-op. +func (NilCounter) Clear() {} + +// Count is a no-op. +func (NilCounter) Count() int64 { return 0 } + +// Dec is a no-op. +func (NilCounter) Dec(i int64) {} + +// Inc is a no-op. +func (NilCounter) Inc(i int64) {} + +// Snapshot is a no-op. +func (NilCounter) Snapshot() Counter { return NilCounter{} } + +// StandardCounter is the standard implementation of a Counter and uses the +// sync/atomic package to manage a single int64 value. +type StandardCounter struct { + count int64 +} + +// Clear sets the counter to zero. +func (c *StandardCounter) Clear() { + atomic.StoreInt64(&c.count, 0) +} + +// Count returns the current count. +func (c *StandardCounter) Count() int64 { + return atomic.LoadInt64(&c.count) +} + +// Dec decrements the counter by the given amount. +func (c *StandardCounter) Dec(i int64) { + atomic.AddInt64(&c.count, -i) +} + +// Inc increments the counter by the given amount. +func (c *StandardCounter) Inc(i int64) { + atomic.AddInt64(&c.count, i) +} + +// Snapshot returns a read-only copy of the counter. +func (c *StandardCounter) Snapshot() Counter { + return CounterSnapshot(c.Count()) +} diff --git a/vendor/github.com/rcrowley/go-metrics/debug.go b/vendor/github.com/rcrowley/go-metrics/debug.go new file mode 100644 index 00000000000..179e5aae729 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/debug.go @@ -0,0 +1,80 @@ +package metrics + +import ( + "runtime/debug" + "sync" + "time" +) + +var ( + debugMetrics struct { + GCStats struct { + LastGC Gauge + NumGC Gauge + Pause Histogram + //PauseQuantiles Histogram + PauseTotal Gauge + } + ReadGCStats Timer + } + gcStats debug.GCStats + registerDebugMetricsOnce = sync.Once{} +) + +// Capture new values for the Go garbage collector statistics exported in +// debug.GCStats. This is designed to be called as a goroutine. +func CaptureDebugGCStats(r Registry, d time.Duration) { + for _ = range time.Tick(d) { + CaptureDebugGCStatsOnce(r) + } +} + +// Capture new values for the Go garbage collector statistics exported in +// debug.GCStats. This is designed to be called in a background goroutine. +// Giving a registry which has not been given to RegisterDebugGCStats will +// panic. +// +// Be careful (but much less so) with this because debug.ReadGCStats calls +// the C function runtime·lock(runtime·mheap) which, while not a stop-the-world +// operation, isn't something you want to be doing all the time. +func CaptureDebugGCStatsOnce(r Registry) { + lastGC := gcStats.LastGC + t := time.Now() + debug.ReadGCStats(&gcStats) + debugMetrics.ReadGCStats.UpdateSince(t) + + debugMetrics.GCStats.LastGC.Update(int64(gcStats.LastGC.UnixNano())) + debugMetrics.GCStats.NumGC.Update(int64(gcStats.NumGC)) + if lastGC != gcStats.LastGC && 0 < len(gcStats.Pause) { + debugMetrics.GCStats.Pause.Update(int64(gcStats.Pause[0])) + } + //debugMetrics.GCStats.PauseQuantiles.Update(gcStats.PauseQuantiles) + debugMetrics.GCStats.PauseTotal.Update(int64(gcStats.PauseTotal)) +} + +// Register metrics for the Go garbage collector statistics exported in +// debug.GCStats. The metrics are named by their fully-qualified Go symbols, +// i.e. debug.GCStats.PauseTotal. +func RegisterDebugGCStats(r Registry) { + registerDebugMetricsOnce.Do(func() { + debugMetrics.GCStats.LastGC = NewGauge() + debugMetrics.GCStats.NumGC = NewGauge() + debugMetrics.GCStats.Pause = NewHistogram(NewExpDecaySample(1028, 0.015)) + //debugMetrics.GCStats.PauseQuantiles = NewHistogram(NewExpDecaySample(1028, 0.015)) + debugMetrics.GCStats.PauseTotal = NewGauge() + debugMetrics.ReadGCStats = NewTimer() + + r.Register("debug.GCStats.LastGC", debugMetrics.GCStats.LastGC) + r.Register("debug.GCStats.NumGC", debugMetrics.GCStats.NumGC) + r.Register("debug.GCStats.Pause", debugMetrics.GCStats.Pause) + //r.Register("debug.GCStats.PauseQuantiles", debugMetrics.GCStats.PauseQuantiles) + r.Register("debug.GCStats.PauseTotal", debugMetrics.GCStats.PauseTotal) + r.Register("debug.ReadGCStats", debugMetrics.ReadGCStats) + }) +} + +// Allocate an initial slice for gcStats.Pause to avoid allocations during +// normal operation. +func init() { + gcStats.Pause = make([]time.Duration, 11) +} diff --git a/vendor/github.com/rcrowley/go-metrics/ewma.go b/vendor/github.com/rcrowley/go-metrics/ewma.go new file mode 100644 index 00000000000..a8183dd7e21 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/ewma.go @@ -0,0 +1,138 @@ +package metrics + +import ( + "math" + "sync" + "sync/atomic" +) + +// EWMAs continuously calculate an exponentially-weighted moving average +// based on an outside source of clock ticks. +type EWMA interface { + Rate() float64 + Snapshot() EWMA + Tick() + Update(int64) +} + +// NewEWMA constructs a new EWMA with the given alpha. +func NewEWMA(alpha float64) EWMA { + if UseNilMetrics { + return NilEWMA{} + } + return &StandardEWMA{alpha: alpha} +} + +// NewEWMA1 constructs a new EWMA for a one-minute moving average. +func NewEWMA1() EWMA { + return NewEWMA(1 - math.Exp(-5.0/60.0/1)) +} + +// NewEWMA5 constructs a new EWMA for a five-minute moving average. +func NewEWMA5() EWMA { + return NewEWMA(1 - math.Exp(-5.0/60.0/5)) +} + +// NewEWMA15 constructs a new EWMA for a fifteen-minute moving average. +func NewEWMA15() EWMA { + return NewEWMA(1 - math.Exp(-5.0/60.0/15)) +} + +// EWMASnapshot is a read-only copy of another EWMA. +type EWMASnapshot float64 + +// Rate returns the rate of events per second at the time the snapshot was +// taken. +func (a EWMASnapshot) Rate() float64 { return float64(a) } + +// Snapshot returns the snapshot. +func (a EWMASnapshot) Snapshot() EWMA { return a } + +// Tick panics. +func (EWMASnapshot) Tick() { + panic("Tick called on an EWMASnapshot") +} + +// Update panics. +func (EWMASnapshot) Update(int64) { + panic("Update called on an EWMASnapshot") +} + +// NilEWMA is a no-op EWMA. +type NilEWMA struct{} + +// Rate is a no-op. +func (NilEWMA) Rate() float64 { return 0.0 } + +// Snapshot is a no-op. +func (NilEWMA) Snapshot() EWMA { return NilEWMA{} } + +// Tick is a no-op. +func (NilEWMA) Tick() {} + +// Update is a no-op. +func (NilEWMA) Update(n int64) {} + +// StandardEWMA is the standard implementation of an EWMA and tracks the number +// of uncounted events and processes them on each tick. It uses the +// sync/atomic package to manage uncounted events. +type StandardEWMA struct { + uncounted int64 // /!\ this should be the first member to ensure 64-bit alignment + alpha float64 + rate uint64 + init uint32 + mutex sync.Mutex +} + +// Rate returns the moving average rate of events per second. +func (a *StandardEWMA) Rate() float64 { + currentRate := math.Float64frombits(atomic.LoadUint64(&a.rate)) * float64(1e9) + return currentRate +} + +// Snapshot returns a read-only copy of the EWMA. +func (a *StandardEWMA) Snapshot() EWMA { + return EWMASnapshot(a.Rate()) +} + +// Tick ticks the clock to update the moving average. It assumes it is called +// every five seconds. +func (a *StandardEWMA) Tick() { + // Optimization to avoid mutex locking in the hot-path. + if atomic.LoadUint32(&a.init) == 1 { + a.updateRate(a.fetchInstantRate()) + } else { + // Slow-path: this is only needed on the first Tick() and preserves transactional updating + // of init and rate in the else block. The first conditional is needed below because + // a different thread could have set a.init = 1 between the time of the first atomic load and when + // the lock was acquired. + a.mutex.Lock() + if atomic.LoadUint32(&a.init) == 1 { + // The fetchInstantRate() uses atomic loading, which is unecessary in this critical section + // but again, this section is only invoked on the first successful Tick() operation. + a.updateRate(a.fetchInstantRate()) + } else { + atomic.StoreUint32(&a.init, 1) + atomic.StoreUint64(&a.rate, math.Float64bits(a.fetchInstantRate())) + } + a.mutex.Unlock() + } +} + +func (a *StandardEWMA) fetchInstantRate() float64 { + count := atomic.LoadInt64(&a.uncounted) + atomic.AddInt64(&a.uncounted, -count) + instantRate := float64(count) / float64(5e9) + return instantRate +} + +func (a *StandardEWMA) updateRate(instantRate float64) { + currentRate := math.Float64frombits(atomic.LoadUint64(&a.rate)) + currentRate += a.alpha * (instantRate - currentRate) + atomic.StoreUint64(&a.rate, math.Float64bits(currentRate)) +} + +// Update adds n uncounted events. +func (a *StandardEWMA) Update(n int64) { + atomic.AddInt64(&a.uncounted, n) +} diff --git a/vendor/github.com/rcrowley/go-metrics/gauge.go b/vendor/github.com/rcrowley/go-metrics/gauge.go new file mode 100644 index 00000000000..cb57a93889f --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/gauge.go @@ -0,0 +1,120 @@ +package metrics + +import "sync/atomic" + +// Gauges hold an int64 value that can be set arbitrarily. +type Gauge interface { + Snapshot() Gauge + Update(int64) + Value() int64 +} + +// GetOrRegisterGauge returns an existing Gauge or constructs and registers a +// new StandardGauge. +func GetOrRegisterGauge(name string, r Registry) Gauge { + if nil == r { + r = DefaultRegistry + } + return r.GetOrRegister(name, NewGauge).(Gauge) +} + +// NewGauge constructs a new StandardGauge. +func NewGauge() Gauge { + if UseNilMetrics { + return NilGauge{} + } + return &StandardGauge{0} +} + +// NewRegisteredGauge constructs and registers a new StandardGauge. +func NewRegisteredGauge(name string, r Registry) Gauge { + c := NewGauge() + if nil == r { + r = DefaultRegistry + } + r.Register(name, c) + return c +} + +// NewFunctionalGauge constructs a new FunctionalGauge. +func NewFunctionalGauge(f func() int64) Gauge { + if UseNilMetrics { + return NilGauge{} + } + return &FunctionalGauge{value: f} +} + +// NewRegisteredFunctionalGauge constructs and registers a new StandardGauge. +func NewRegisteredFunctionalGauge(name string, r Registry, f func() int64) Gauge { + c := NewFunctionalGauge(f) + if nil == r { + r = DefaultRegistry + } + r.Register(name, c) + return c +} + +// GaugeSnapshot is a read-only copy of another Gauge. +type GaugeSnapshot int64 + +// Snapshot returns the snapshot. +func (g GaugeSnapshot) Snapshot() Gauge { return g } + +// Update panics. +func (GaugeSnapshot) Update(int64) { + panic("Update called on a GaugeSnapshot") +} + +// Value returns the value at the time the snapshot was taken. +func (g GaugeSnapshot) Value() int64 { return int64(g) } + +// NilGauge is a no-op Gauge. +type NilGauge struct{} + +// Snapshot is a no-op. +func (NilGauge) Snapshot() Gauge { return NilGauge{} } + +// Update is a no-op. +func (NilGauge) Update(v int64) {} + +// Value is a no-op. +func (NilGauge) Value() int64 { return 0 } + +// StandardGauge is the standard implementation of a Gauge and uses the +// sync/atomic package to manage a single int64 value. +type StandardGauge struct { + value int64 +} + +// Snapshot returns a read-only copy of the gauge. +func (g *StandardGauge) Snapshot() Gauge { + return GaugeSnapshot(g.Value()) +} + +// Update updates the gauge's value. +func (g *StandardGauge) Update(v int64) { + atomic.StoreInt64(&g.value, v) +} + +// Value returns the gauge's current value. +func (g *StandardGauge) Value() int64 { + return atomic.LoadInt64(&g.value) +} + +// FunctionalGauge returns value from given function +type FunctionalGauge struct { + value func() int64 +} + +// Value returns the gauge's current value. +func (g FunctionalGauge) Value() int64 { + return g.value() +} + +// Snapshot returns the snapshot. +func (g FunctionalGauge) Snapshot() Gauge { return GaugeSnapshot(g.Value()) } + +// Update panics. +func (FunctionalGauge) Update(int64) { + panic("Update called on a FunctionalGauge") +} diff --git a/vendor/github.com/rcrowley/go-metrics/gauge_float64.go b/vendor/github.com/rcrowley/go-metrics/gauge_float64.go new file mode 100644 index 00000000000..3962e6db09a --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/gauge_float64.go @@ -0,0 +1,125 @@ +package metrics + +import ( + "math" + "sync/atomic" +) + +// GaugeFloat64s hold a float64 value that can be set arbitrarily. +type GaugeFloat64 interface { + Snapshot() GaugeFloat64 + Update(float64) + Value() float64 +} + +// GetOrRegisterGaugeFloat64 returns an existing GaugeFloat64 or constructs and registers a +// new StandardGaugeFloat64. +func GetOrRegisterGaugeFloat64(name string, r Registry) GaugeFloat64 { + if nil == r { + r = DefaultRegistry + } + return r.GetOrRegister(name, NewGaugeFloat64()).(GaugeFloat64) +} + +// NewGaugeFloat64 constructs a new StandardGaugeFloat64. +func NewGaugeFloat64() GaugeFloat64 { + if UseNilMetrics { + return NilGaugeFloat64{} + } + return &StandardGaugeFloat64{ + value: 0.0, + } +} + +// NewRegisteredGaugeFloat64 constructs and registers a new StandardGaugeFloat64. +func NewRegisteredGaugeFloat64(name string, r Registry) GaugeFloat64 { + c := NewGaugeFloat64() + if nil == r { + r = DefaultRegistry + } + r.Register(name, c) + return c +} + +// NewFunctionalGauge constructs a new FunctionalGauge. +func NewFunctionalGaugeFloat64(f func() float64) GaugeFloat64 { + if UseNilMetrics { + return NilGaugeFloat64{} + } + return &FunctionalGaugeFloat64{value: f} +} + +// NewRegisteredFunctionalGauge constructs and registers a new StandardGauge. +func NewRegisteredFunctionalGaugeFloat64(name string, r Registry, f func() float64) GaugeFloat64 { + c := NewFunctionalGaugeFloat64(f) + if nil == r { + r = DefaultRegistry + } + r.Register(name, c) + return c +} + +// GaugeFloat64Snapshot is a read-only copy of another GaugeFloat64. +type GaugeFloat64Snapshot float64 + +// Snapshot returns the snapshot. +func (g GaugeFloat64Snapshot) Snapshot() GaugeFloat64 { return g } + +// Update panics. +func (GaugeFloat64Snapshot) Update(float64) { + panic("Update called on a GaugeFloat64Snapshot") +} + +// Value returns the value at the time the snapshot was taken. +func (g GaugeFloat64Snapshot) Value() float64 { return float64(g) } + +// NilGauge is a no-op Gauge. +type NilGaugeFloat64 struct{} + +// Snapshot is a no-op. +func (NilGaugeFloat64) Snapshot() GaugeFloat64 { return NilGaugeFloat64{} } + +// Update is a no-op. +func (NilGaugeFloat64) Update(v float64) {} + +// Value is a no-op. +func (NilGaugeFloat64) Value() float64 { return 0.0 } + +// StandardGaugeFloat64 is the standard implementation of a GaugeFloat64 and uses +// sync.Mutex to manage a single float64 value. +type StandardGaugeFloat64 struct { + value uint64 +} + +// Snapshot returns a read-only copy of the gauge. +func (g *StandardGaugeFloat64) Snapshot() GaugeFloat64 { + return GaugeFloat64Snapshot(g.Value()) +} + +// Update updates the gauge's value. +func (g *StandardGaugeFloat64) Update(v float64) { + atomic.StoreUint64(&g.value, math.Float64bits(v)) +} + +// Value returns the gauge's current value. +func (g *StandardGaugeFloat64) Value() float64 { + return math.Float64frombits(atomic.LoadUint64(&g.value)) +} + +// FunctionalGaugeFloat64 returns value from given function +type FunctionalGaugeFloat64 struct { + value func() float64 +} + +// Value returns the gauge's current value. +func (g FunctionalGaugeFloat64) Value() float64 { + return g.value() +} + +// Snapshot returns the snapshot. +func (g FunctionalGaugeFloat64) Snapshot() GaugeFloat64 { return GaugeFloat64Snapshot(g.Value()) } + +// Update panics. +func (FunctionalGaugeFloat64) Update(float64) { + panic("Update called on a FunctionalGaugeFloat64") +} diff --git a/vendor/github.com/rcrowley/go-metrics/graphite.go b/vendor/github.com/rcrowley/go-metrics/graphite.go new file mode 100644 index 00000000000..abd0a7d2918 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/graphite.go @@ -0,0 +1,113 @@ +package metrics + +import ( + "bufio" + "fmt" + "log" + "net" + "strconv" + "strings" + "time" +) + +// GraphiteConfig provides a container with configuration parameters for +// the Graphite exporter +type GraphiteConfig struct { + Addr *net.TCPAddr // Network address to connect to + Registry Registry // Registry to be exported + FlushInterval time.Duration // Flush interval + DurationUnit time.Duration // Time conversion unit for durations + Prefix string // Prefix to be prepended to metric names + Percentiles []float64 // Percentiles to export from timers and histograms +} + +// Graphite is a blocking exporter function which reports metrics in r +// to a graphite server located at addr, flushing them every d duration +// and prepending metric names with prefix. +func Graphite(r Registry, d time.Duration, prefix string, addr *net.TCPAddr) { + GraphiteWithConfig(GraphiteConfig{ + Addr: addr, + Registry: r, + FlushInterval: d, + DurationUnit: time.Nanosecond, + Prefix: prefix, + Percentiles: []float64{0.5, 0.75, 0.95, 0.99, 0.999}, + }) +} + +// GraphiteWithConfig is a blocking exporter function just like Graphite, +// but it takes a GraphiteConfig instead. +func GraphiteWithConfig(c GraphiteConfig) { + log.Printf("WARNING: This go-metrics client has been DEPRECATED! It has been moved to https://github.com/cyberdelia/go-metrics-graphite and will be removed from rcrowley/go-metrics on August 12th 2015") + for _ = range time.Tick(c.FlushInterval) { + if err := graphite(&c); nil != err { + log.Println(err) + } + } +} + +// GraphiteOnce performs a single submission to Graphite, returning a +// non-nil error on failed connections. This can be used in a loop +// similar to GraphiteWithConfig for custom error handling. +func GraphiteOnce(c GraphiteConfig) error { + log.Printf("WARNING: This go-metrics client has been DEPRECATED! It has been moved to https://github.com/cyberdelia/go-metrics-graphite and will be removed from rcrowley/go-metrics on August 12th 2015") + return graphite(&c) +} + +func graphite(c *GraphiteConfig) error { + now := time.Now().Unix() + du := float64(c.DurationUnit) + conn, err := net.DialTCP("tcp", nil, c.Addr) + if nil != err { + return err + } + defer conn.Close() + w := bufio.NewWriter(conn) + c.Registry.Each(func(name string, i interface{}) { + switch metric := i.(type) { + case Counter: + fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, metric.Count(), now) + case Gauge: + fmt.Fprintf(w, "%s.%s.value %d %d\n", c.Prefix, name, metric.Value(), now) + case GaugeFloat64: + fmt.Fprintf(w, "%s.%s.value %f %d\n", c.Prefix, name, metric.Value(), now) + case Histogram: + h := metric.Snapshot() + ps := h.Percentiles(c.Percentiles) + fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, h.Count(), now) + fmt.Fprintf(w, "%s.%s.min %d %d\n", c.Prefix, name, h.Min(), now) + fmt.Fprintf(w, "%s.%s.max %d %d\n", c.Prefix, name, h.Max(), now) + fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, h.Mean(), now) + fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", c.Prefix, name, h.StdDev(), now) + for psIdx, psKey := range c.Percentiles { + key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1) + fmt.Fprintf(w, "%s.%s.%s-percentile %.2f %d\n", c.Prefix, name, key, ps[psIdx], now) + } + case Meter: + m := metric.Snapshot() + fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, m.Count(), now) + fmt.Fprintf(w, "%s.%s.one-minute %.2f %d\n", c.Prefix, name, m.Rate1(), now) + fmt.Fprintf(w, "%s.%s.five-minute %.2f %d\n", c.Prefix, name, m.Rate5(), now) + fmt.Fprintf(w, "%s.%s.fifteen-minute %.2f %d\n", c.Prefix, name, m.Rate15(), now) + fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, m.RateMean(), now) + case Timer: + t := metric.Snapshot() + ps := t.Percentiles(c.Percentiles) + fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, t.Count(), now) + fmt.Fprintf(w, "%s.%s.min %d %d\n", c.Prefix, name, t.Min()/int64(du), now) + fmt.Fprintf(w, "%s.%s.max %d %d\n", c.Prefix, name, t.Max()/int64(du), now) + fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, t.Mean()/du, now) + fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", c.Prefix, name, t.StdDev()/du, now) + for psIdx, psKey := range c.Percentiles { + key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1) + fmt.Fprintf(w, "%s.%s.%s-percentile %.2f %d\n", c.Prefix, name, key, ps[psIdx], now) + } + fmt.Fprintf(w, "%s.%s.one-minute %.2f %d\n", c.Prefix, name, t.Rate1(), now) + fmt.Fprintf(w, "%s.%s.five-minute %.2f %d\n", c.Prefix, name, t.Rate5(), now) + fmt.Fprintf(w, "%s.%s.fifteen-minute %.2f %d\n", c.Prefix, name, t.Rate15(), now) + fmt.Fprintf(w, "%s.%s.mean-rate %.2f %d\n", c.Prefix, name, t.RateMean(), now) + } + w.Flush() + }) + return nil +} diff --git a/vendor/github.com/rcrowley/go-metrics/healthcheck.go b/vendor/github.com/rcrowley/go-metrics/healthcheck.go new file mode 100644 index 00000000000..445131caee5 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/healthcheck.go @@ -0,0 +1,61 @@ +package metrics + +// Healthchecks hold an error value describing an arbitrary up/down status. +type Healthcheck interface { + Check() + Error() error + Healthy() + Unhealthy(error) +} + +// NewHealthcheck constructs a new Healthcheck which will use the given +// function to update its status. +func NewHealthcheck(f func(Healthcheck)) Healthcheck { + if UseNilMetrics { + return NilHealthcheck{} + } + return &StandardHealthcheck{nil, f} +} + +// NilHealthcheck is a no-op. +type NilHealthcheck struct{} + +// Check is a no-op. +func (NilHealthcheck) Check() {} + +// Error is a no-op. +func (NilHealthcheck) Error() error { return nil } + +// Healthy is a no-op. +func (NilHealthcheck) Healthy() {} + +// Unhealthy is a no-op. +func (NilHealthcheck) Unhealthy(error) {} + +// StandardHealthcheck is the standard implementation of a Healthcheck and +// stores the status and a function to call to update the status. +type StandardHealthcheck struct { + err error + f func(Healthcheck) +} + +// Check runs the healthcheck function to update the healthcheck's status. +func (h *StandardHealthcheck) Check() { + h.f(h) +} + +// Error returns the healthcheck's status, which will be nil if it is healthy. +func (h *StandardHealthcheck) Error() error { + return h.err +} + +// Healthy marks the healthcheck as healthy. +func (h *StandardHealthcheck) Healthy() { + h.err = nil +} + +// Unhealthy marks the healthcheck as unhealthy. The error is stored and +// may be retrieved by the Error method. +func (h *StandardHealthcheck) Unhealthy(err error) { + h.err = err +} diff --git a/vendor/github.com/rcrowley/go-metrics/histogram.go b/vendor/github.com/rcrowley/go-metrics/histogram.go new file mode 100644 index 00000000000..dbc837fe4d9 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/histogram.go @@ -0,0 +1,202 @@ +package metrics + +// Histograms calculate distribution statistics from a series of int64 values. +type Histogram interface { + Clear() + Count() int64 + Max() int64 + Mean() float64 + Min() int64 + Percentile(float64) float64 + Percentiles([]float64) []float64 + Sample() Sample + Snapshot() Histogram + StdDev() float64 + Sum() int64 + Update(int64) + Variance() float64 +} + +// GetOrRegisterHistogram returns an existing Histogram or constructs and +// registers a new StandardHistogram. +func GetOrRegisterHistogram(name string, r Registry, s Sample) Histogram { + if nil == r { + r = DefaultRegistry + } + return r.GetOrRegister(name, func() Histogram { return NewHistogram(s) }).(Histogram) +} + +// NewHistogram constructs a new StandardHistogram from a Sample. +func NewHistogram(s Sample) Histogram { + if UseNilMetrics { + return NilHistogram{} + } + return &StandardHistogram{sample: s} +} + +// NewRegisteredHistogram constructs and registers a new StandardHistogram from +// a Sample. +func NewRegisteredHistogram(name string, r Registry, s Sample) Histogram { + c := NewHistogram(s) + if nil == r { + r = DefaultRegistry + } + r.Register(name, c) + return c +} + +// HistogramSnapshot is a read-only copy of another Histogram. +type HistogramSnapshot struct { + sample *SampleSnapshot +} + +// Clear panics. +func (*HistogramSnapshot) Clear() { + panic("Clear called on a HistogramSnapshot") +} + +// Count returns the number of samples recorded at the time the snapshot was +// taken. +func (h *HistogramSnapshot) Count() int64 { return h.sample.Count() } + +// Max returns the maximum value in the sample at the time the snapshot was +// taken. +func (h *HistogramSnapshot) Max() int64 { return h.sample.Max() } + +// Mean returns the mean of the values in the sample at the time the snapshot +// was taken. +func (h *HistogramSnapshot) Mean() float64 { return h.sample.Mean() } + +// Min returns the minimum value in the sample at the time the snapshot was +// taken. +func (h *HistogramSnapshot) Min() int64 { return h.sample.Min() } + +// Percentile returns an arbitrary percentile of values in the sample at the +// time the snapshot was taken. +func (h *HistogramSnapshot) Percentile(p float64) float64 { + return h.sample.Percentile(p) +} + +// Percentiles returns a slice of arbitrary percentiles of values in the sample +// at the time the snapshot was taken. +func (h *HistogramSnapshot) Percentiles(ps []float64) []float64 { + return h.sample.Percentiles(ps) +} + +// Sample returns the Sample underlying the histogram. +func (h *HistogramSnapshot) Sample() Sample { return h.sample } + +// Snapshot returns the snapshot. +func (h *HistogramSnapshot) Snapshot() Histogram { return h } + +// StdDev returns the standard deviation of the values in the sample at the +// time the snapshot was taken. +func (h *HistogramSnapshot) StdDev() float64 { return h.sample.StdDev() } + +// Sum returns the sum in the sample at the time the snapshot was taken. +func (h *HistogramSnapshot) Sum() int64 { return h.sample.Sum() } + +// Update panics. +func (*HistogramSnapshot) Update(int64) { + panic("Update called on a HistogramSnapshot") +} + +// Variance returns the variance of inputs at the time the snapshot was taken. +func (h *HistogramSnapshot) Variance() float64 { return h.sample.Variance() } + +// NilHistogram is a no-op Histogram. +type NilHistogram struct{} + +// Clear is a no-op. +func (NilHistogram) Clear() {} + +// Count is a no-op. +func (NilHistogram) Count() int64 { return 0 } + +// Max is a no-op. +func (NilHistogram) Max() int64 { return 0 } + +// Mean is a no-op. +func (NilHistogram) Mean() float64 { return 0.0 } + +// Min is a no-op. +func (NilHistogram) Min() int64 { return 0 } + +// Percentile is a no-op. +func (NilHistogram) Percentile(p float64) float64 { return 0.0 } + +// Percentiles is a no-op. +func (NilHistogram) Percentiles(ps []float64) []float64 { + return make([]float64, len(ps)) +} + +// Sample is a no-op. +func (NilHistogram) Sample() Sample { return NilSample{} } + +// Snapshot is a no-op. +func (NilHistogram) Snapshot() Histogram { return NilHistogram{} } + +// StdDev is a no-op. +func (NilHistogram) StdDev() float64 { return 0.0 } + +// Sum is a no-op. +func (NilHistogram) Sum() int64 { return 0 } + +// Update is a no-op. +func (NilHistogram) Update(v int64) {} + +// Variance is a no-op. +func (NilHistogram) Variance() float64 { return 0.0 } + +// StandardHistogram is the standard implementation of a Histogram and uses a +// Sample to bound its memory use. +type StandardHistogram struct { + sample Sample +} + +// Clear clears the histogram and its sample. +func (h *StandardHistogram) Clear() { h.sample.Clear() } + +// Count returns the number of samples recorded since the histogram was last +// cleared. +func (h *StandardHistogram) Count() int64 { return h.sample.Count() } + +// Max returns the maximum value in the sample. +func (h *StandardHistogram) Max() int64 { return h.sample.Max() } + +// Mean returns the mean of the values in the sample. +func (h *StandardHistogram) Mean() float64 { return h.sample.Mean() } + +// Min returns the minimum value in the sample. +func (h *StandardHistogram) Min() int64 { return h.sample.Min() } + +// Percentile returns an arbitrary percentile of the values in the sample. +func (h *StandardHistogram) Percentile(p float64) float64 { + return h.sample.Percentile(p) +} + +// Percentiles returns a slice of arbitrary percentiles of the values in the +// sample. +func (h *StandardHistogram) Percentiles(ps []float64) []float64 { + return h.sample.Percentiles(ps) +} + +// Sample returns the Sample underlying the histogram. +func (h *StandardHistogram) Sample() Sample { return h.sample } + +// Snapshot returns a read-only copy of the histogram. +func (h *StandardHistogram) Snapshot() Histogram { + return &HistogramSnapshot{sample: h.sample.Snapshot().(*SampleSnapshot)} +} + +// StdDev returns the standard deviation of the values in the sample. +func (h *StandardHistogram) StdDev() float64 { return h.sample.StdDev() } + +// Sum returns the sum in the sample. +func (h *StandardHistogram) Sum() int64 { return h.sample.Sum() } + +// Update samples a new value. +func (h *StandardHistogram) Update(v int64) { h.sample.Update(v) } + +// Variance returns the variance of the values in the sample. +func (h *StandardHistogram) Variance() float64 { return h.sample.Variance() } diff --git a/vendor/github.com/rcrowley/go-metrics/json.go b/vendor/github.com/rcrowley/go-metrics/json.go new file mode 100644 index 00000000000..174b9477e92 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/json.go @@ -0,0 +1,31 @@ +package metrics + +import ( + "encoding/json" + "io" + "time" +) + +// MarshalJSON returns a byte slice containing a JSON representation of all +// the metrics in the Registry. +func (r *StandardRegistry) MarshalJSON() ([]byte, error) { + return json.Marshal(r.GetAll()) +} + +// WriteJSON writes metrics from the given registry periodically to the +// specified io.Writer as JSON. +func WriteJSON(r Registry, d time.Duration, w io.Writer) { + for _ = range time.Tick(d) { + WriteJSONOnce(r, w) + } +} + +// WriteJSONOnce writes metrics from the given registry to the specified +// io.Writer as JSON. +func WriteJSONOnce(r Registry, w io.Writer) { + json.NewEncoder(w).Encode(r) +} + +func (p *PrefixedRegistry) MarshalJSON() ([]byte, error) { + return json.Marshal(p.GetAll()) +} diff --git a/vendor/github.com/rcrowley/go-metrics/log.go b/vendor/github.com/rcrowley/go-metrics/log.go new file mode 100644 index 00000000000..2614a0a33eb --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/log.go @@ -0,0 +1,100 @@ +package metrics + +import ( + "time" +) + +type Logger interface { + Printf(format string, v ...interface{}) +} + +// Log outputs each metric in the given registry periodically using the given logger. +func Log(r Registry, freq time.Duration, l Logger) { + LogScaled(r, freq, time.Nanosecond, l) +} + +// LogOnCue outputs each metric in the given registry on demand through the channel +// using the given logger +func LogOnCue(r Registry, ch chan interface{}, l Logger) { + LogScaledOnCue(r, ch, time.Nanosecond, l) +} + +// LogScaled outputs each metric in the given registry periodically using the given +// logger. Print timings in `scale` units (eg time.Millisecond) rather than nanos. +func LogScaled(r Registry, freq time.Duration, scale time.Duration, l Logger) { + ch := make(chan interface{}) + go func(channel chan interface{}) { + for _ = range time.Tick(freq) { + channel <- struct{}{} + } + }(ch) + LogScaledOnCue(r, ch, scale, l) +} + +// LogScaledOnCue outputs each metric in the given registry on demand through the channel +// using the given logger. Print timings in `scale` units (eg time.Millisecond) rather +// than nanos. +func LogScaledOnCue(r Registry, ch chan interface{}, scale time.Duration, l Logger) { + du := float64(scale) + duSuffix := scale.String()[1:] + + for _ = range ch { + r.Each(func(name string, i interface{}) { + switch metric := i.(type) { + case Counter: + l.Printf("counter %s\n", name) + l.Printf(" count: %9d\n", metric.Count()) + case Gauge: + l.Printf("gauge %s\n", name) + l.Printf(" value: %9d\n", metric.Value()) + case GaugeFloat64: + l.Printf("gauge %s\n", name) + l.Printf(" value: %f\n", metric.Value()) + case Healthcheck: + metric.Check() + l.Printf("healthcheck %s\n", name) + l.Printf(" error: %v\n", metric.Error()) + case Histogram: + h := metric.Snapshot() + ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) + l.Printf("histogram %s\n", name) + l.Printf(" count: %9d\n", h.Count()) + l.Printf(" min: %9d\n", h.Min()) + l.Printf(" max: %9d\n", h.Max()) + l.Printf(" mean: %12.2f\n", h.Mean()) + l.Printf(" stddev: %12.2f\n", h.StdDev()) + l.Printf(" median: %12.2f\n", ps[0]) + l.Printf(" 75%%: %12.2f\n", ps[1]) + l.Printf(" 95%%: %12.2f\n", ps[2]) + l.Printf(" 99%%: %12.2f\n", ps[3]) + l.Printf(" 99.9%%: %12.2f\n", ps[4]) + case Meter: + m := metric.Snapshot() + l.Printf("meter %s\n", name) + l.Printf(" count: %9d\n", m.Count()) + l.Printf(" 1-min rate: %12.2f\n", m.Rate1()) + l.Printf(" 5-min rate: %12.2f\n", m.Rate5()) + l.Printf(" 15-min rate: %12.2f\n", m.Rate15()) + l.Printf(" mean rate: %12.2f\n", m.RateMean()) + case Timer: + t := metric.Snapshot() + ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) + l.Printf("timer %s\n", name) + l.Printf(" count: %9d\n", t.Count()) + l.Printf(" min: %12.2f%s\n", float64(t.Min())/du, duSuffix) + l.Printf(" max: %12.2f%s\n", float64(t.Max())/du, duSuffix) + l.Printf(" mean: %12.2f%s\n", t.Mean()/du, duSuffix) + l.Printf(" stddev: %12.2f%s\n", t.StdDev()/du, duSuffix) + l.Printf(" median: %12.2f%s\n", ps[0]/du, duSuffix) + l.Printf(" 75%%: %12.2f%s\n", ps[1]/du, duSuffix) + l.Printf(" 95%%: %12.2f%s\n", ps[2]/du, duSuffix) + l.Printf(" 99%%: %12.2f%s\n", ps[3]/du, duSuffix) + l.Printf(" 99.9%%: %12.2f%s\n", ps[4]/du, duSuffix) + l.Printf(" 1-min rate: %12.2f\n", t.Rate1()) + l.Printf(" 5-min rate: %12.2f\n", t.Rate5()) + l.Printf(" 15-min rate: %12.2f\n", t.Rate15()) + l.Printf(" mean rate: %12.2f\n", t.RateMean()) + } + }) + } +} diff --git a/vendor/github.com/rcrowley/go-metrics/memory.md b/vendor/github.com/rcrowley/go-metrics/memory.md new file mode 100644 index 00000000000..47454f54b64 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/memory.md @@ -0,0 +1,285 @@ +Memory usage +============ + +(Highly unscientific.) + +Command used to gather static memory usage: + +```sh +grep ^Vm "/proc/$(ps fax | grep [m]etrics-bench | awk '{print $1}')/status" +``` + +Program used to gather baseline memory usage: + +```go +package main + +import "time" + +func main() { + time.Sleep(600e9) +} +``` + +Baseline +-------- + +``` +VmPeak: 42604 kB +VmSize: 42604 kB +VmLck: 0 kB +VmHWM: 1120 kB +VmRSS: 1120 kB +VmData: 35460 kB +VmStk: 136 kB +VmExe: 1020 kB +VmLib: 1848 kB +VmPTE: 36 kB +VmSwap: 0 kB +``` + +Program used to gather metric memory usage (with other metrics being similar): + +```go +package main + +import ( + "fmt" + "metrics" + "time" +) + +func main() { + fmt.Sprintf("foo") + metrics.NewRegistry() + time.Sleep(600e9) +} +``` + +1000 counters registered +------------------------ + +``` +VmPeak: 44016 kB +VmSize: 44016 kB +VmLck: 0 kB +VmHWM: 1928 kB +VmRSS: 1928 kB +VmData: 36868 kB +VmStk: 136 kB +VmExe: 1024 kB +VmLib: 1848 kB +VmPTE: 40 kB +VmSwap: 0 kB +``` + +**1.412 kB virtual, TODO 0.808 kB resident per counter.** + +100000 counters registered +-------------------------- + +``` +VmPeak: 55024 kB +VmSize: 55024 kB +VmLck: 0 kB +VmHWM: 12440 kB +VmRSS: 12440 kB +VmData: 47876 kB +VmStk: 136 kB +VmExe: 1024 kB +VmLib: 1848 kB +VmPTE: 64 kB +VmSwap: 0 kB +``` + +**0.1242 kB virtual, 0.1132 kB resident per counter.** + +1000 gauges registered +---------------------- + +``` +VmPeak: 44012 kB +VmSize: 44012 kB +VmLck: 0 kB +VmHWM: 1928 kB +VmRSS: 1928 kB +VmData: 36868 kB +VmStk: 136 kB +VmExe: 1020 kB +VmLib: 1848 kB +VmPTE: 40 kB +VmSwap: 0 kB +``` + +**1.408 kB virtual, 0.808 kB resident per counter.** + +100000 gauges registered +------------------------ + +``` +VmPeak: 55020 kB +VmSize: 55020 kB +VmLck: 0 kB +VmHWM: 12432 kB +VmRSS: 12432 kB +VmData: 47876 kB +VmStk: 136 kB +VmExe: 1020 kB +VmLib: 1848 kB +VmPTE: 60 kB +VmSwap: 0 kB +``` + +**0.12416 kB virtual, 0.11312 resident per gauge.** + +1000 histograms with a uniform sample size of 1028 +-------------------------------------------------- + +``` +VmPeak: 72272 kB +VmSize: 72272 kB +VmLck: 0 kB +VmHWM: 16204 kB +VmRSS: 16204 kB +VmData: 65100 kB +VmStk: 136 kB +VmExe: 1048 kB +VmLib: 1848 kB +VmPTE: 80 kB +VmSwap: 0 kB +``` + +**29.668 kB virtual, TODO 15.084 resident per histogram.** + +10000 histograms with a uniform sample size of 1028 +--------------------------------------------------- + +``` +VmPeak: 256912 kB +VmSize: 256912 kB +VmLck: 0 kB +VmHWM: 146204 kB +VmRSS: 146204 kB +VmData: 249740 kB +VmStk: 136 kB +VmExe: 1048 kB +VmLib: 1848 kB +VmPTE: 448 kB +VmSwap: 0 kB +``` + +**21.4308 kB virtual, 14.5084 kB resident per histogram.** + +50000 histograms with a uniform sample size of 1028 +--------------------------------------------------- + +``` +VmPeak: 908112 kB +VmSize: 908112 kB +VmLck: 0 kB +VmHWM: 645832 kB +VmRSS: 645588 kB +VmData: 900940 kB +VmStk: 136 kB +VmExe: 1048 kB +VmLib: 1848 kB +VmPTE: 1716 kB +VmSwap: 1544 kB +``` + +**17.31016 kB virtual, 12.88936 kB resident per histogram.** + +1000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015 +------------------------------------------------------------------------------------- + +``` +VmPeak: 62480 kB +VmSize: 62480 kB +VmLck: 0 kB +VmHWM: 11572 kB +VmRSS: 11572 kB +VmData: 55308 kB +VmStk: 136 kB +VmExe: 1048 kB +VmLib: 1848 kB +VmPTE: 64 kB +VmSwap: 0 kB +``` + +**19.876 kB virtual, 10.452 kB resident per histogram.** + +10000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015 +-------------------------------------------------------------------------------------- + +``` +VmPeak: 153296 kB +VmSize: 153296 kB +VmLck: 0 kB +VmHWM: 101176 kB +VmRSS: 101176 kB +VmData: 146124 kB +VmStk: 136 kB +VmExe: 1048 kB +VmLib: 1848 kB +VmPTE: 240 kB +VmSwap: 0 kB +``` + +**11.0692 kB virtual, 10.0056 kB resident per histogram.** + +50000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015 +-------------------------------------------------------------------------------------- + +``` +VmPeak: 557264 kB +VmSize: 557264 kB +VmLck: 0 kB +VmHWM: 501056 kB +VmRSS: 501056 kB +VmData: 550092 kB +VmStk: 136 kB +VmExe: 1048 kB +VmLib: 1848 kB +VmPTE: 1032 kB +VmSwap: 0 kB +``` + +**10.2932 kB virtual, 9.99872 kB resident per histogram.** + +1000 meters +----------- + +``` +VmPeak: 74504 kB +VmSize: 74504 kB +VmLck: 0 kB +VmHWM: 24124 kB +VmRSS: 24124 kB +VmData: 67340 kB +VmStk: 136 kB +VmExe: 1040 kB +VmLib: 1848 kB +VmPTE: 92 kB +VmSwap: 0 kB +``` + +**31.9 kB virtual, 23.004 kB resident per meter.** + +10000 meters +------------ + +``` +VmPeak: 278920 kB +VmSize: 278920 kB +VmLck: 0 kB +VmHWM: 227300 kB +VmRSS: 227300 kB +VmData: 271756 kB +VmStk: 136 kB +VmExe: 1040 kB +VmLib: 1848 kB +VmPTE: 488 kB +VmSwap: 0 kB +``` + +**23.6316 kB virtual, 22.618 kB resident per meter.** diff --git a/vendor/github.com/rcrowley/go-metrics/meter.go b/vendor/github.com/rcrowley/go-metrics/meter.go new file mode 100644 index 00000000000..223669bcb29 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/meter.go @@ -0,0 +1,251 @@ +package metrics + +import ( + "math" + "sync" + "sync/atomic" + "time" +) + +// Meters count events to produce exponentially-weighted moving average rates +// at one-, five-, and fifteen-minutes and a mean rate. +type Meter interface { + Count() int64 + Mark(int64) + Rate1() float64 + Rate5() float64 + Rate15() float64 + RateMean() float64 + Snapshot() Meter + Stop() +} + +// GetOrRegisterMeter returns an existing Meter or constructs and registers a +// new StandardMeter. +// Be sure to unregister the meter from the registry once it is of no use to +// allow for garbage collection. +func GetOrRegisterMeter(name string, r Registry) Meter { + if nil == r { + r = DefaultRegistry + } + return r.GetOrRegister(name, NewMeter).(Meter) +} + +// NewMeter constructs a new StandardMeter and launches a goroutine. +// Be sure to call Stop() once the meter is of no use to allow for garbage collection. +func NewMeter() Meter { + if UseNilMetrics { + return NilMeter{} + } + m := newStandardMeter() + arbiter.Lock() + defer arbiter.Unlock() + arbiter.meters[m] = struct{}{} + if !arbiter.started { + arbiter.started = true + go arbiter.tick() + } + return m +} + +// NewMeter constructs and registers a new StandardMeter and launches a +// goroutine. +// Be sure to unregister the meter from the registry once it is of no use to +// allow for garbage collection. +func NewRegisteredMeter(name string, r Registry) Meter { + c := NewMeter() + if nil == r { + r = DefaultRegistry + } + r.Register(name, c) + return c +} + +// MeterSnapshot is a read-only copy of another Meter. +type MeterSnapshot struct { + count int64 + rate1, rate5, rate15, rateMean uint64 +} + +// Count returns the count of events at the time the snapshot was taken. +func (m *MeterSnapshot) Count() int64 { return m.count } + +// Mark panics. +func (*MeterSnapshot) Mark(n int64) { + panic("Mark called on a MeterSnapshot") +} + +// Rate1 returns the one-minute moving average rate of events per second at the +// time the snapshot was taken. +func (m *MeterSnapshot) Rate1() float64 { return math.Float64frombits(m.rate1) } + +// Rate5 returns the five-minute moving average rate of events per second at +// the time the snapshot was taken. +func (m *MeterSnapshot) Rate5() float64 { return math.Float64frombits(m.rate5) } + +// Rate15 returns the fifteen-minute moving average rate of events per second +// at the time the snapshot was taken. +func (m *MeterSnapshot) Rate15() float64 { return math.Float64frombits(m.rate15) } + +// RateMean returns the meter's mean rate of events per second at the time the +// snapshot was taken. +func (m *MeterSnapshot) RateMean() float64 { return math.Float64frombits(m.rateMean) } + +// Snapshot returns the snapshot. +func (m *MeterSnapshot) Snapshot() Meter { return m } + +// Stop is a no-op. +func (m *MeterSnapshot) Stop() {} + +// NilMeter is a no-op Meter. +type NilMeter struct{} + +// Count is a no-op. +func (NilMeter) Count() int64 { return 0 } + +// Mark is a no-op. +func (NilMeter) Mark(n int64) {} + +// Rate1 is a no-op. +func (NilMeter) Rate1() float64 { return 0.0 } + +// Rate5 is a no-op. +func (NilMeter) Rate5() float64 { return 0.0 } + +// Rate15is a no-op. +func (NilMeter) Rate15() float64 { return 0.0 } + +// RateMean is a no-op. +func (NilMeter) RateMean() float64 { return 0.0 } + +// Snapshot is a no-op. +func (NilMeter) Snapshot() Meter { return NilMeter{} } + +// Stop is a no-op. +func (NilMeter) Stop() {} + +// StandardMeter is the standard implementation of a Meter. +type StandardMeter struct { + snapshot *MeterSnapshot + a1, a5, a15 EWMA + startTime time.Time + stopped uint32 +} + +func newStandardMeter() *StandardMeter { + return &StandardMeter{ + snapshot: &MeterSnapshot{}, + a1: NewEWMA1(), + a5: NewEWMA5(), + a15: NewEWMA15(), + startTime: time.Now(), + } +} + +// Stop stops the meter, Mark() will be a no-op if you use it after being stopped. +func (m *StandardMeter) Stop() { + if atomic.CompareAndSwapUint32(&m.stopped, 0, 1) { + arbiter.Lock() + delete(arbiter.meters, m) + arbiter.Unlock() + } +} + +// Count returns the number of events recorded. +func (m *StandardMeter) Count() int64 { + return atomic.LoadInt64(&m.snapshot.count) +} + +// Mark records the occurance of n events. +func (m *StandardMeter) Mark(n int64) { + if atomic.LoadUint32(&m.stopped) == 1 { + return + } + + atomic.AddInt64(&m.snapshot.count, n) + + m.a1.Update(n) + m.a5.Update(n) + m.a15.Update(n) + m.updateSnapshot() +} + +// Rate1 returns the one-minute moving average rate of events per second. +func (m *StandardMeter) Rate1() float64 { + return math.Float64frombits(atomic.LoadUint64(&m.snapshot.rate1)) +} + +// Rate5 returns the five-minute moving average rate of events per second. +func (m *StandardMeter) Rate5() float64 { + return math.Float64frombits(atomic.LoadUint64(&m.snapshot.rate5)) +} + +// Rate15 returns the fifteen-minute moving average rate of events per second. +func (m *StandardMeter) Rate15() float64 { + return math.Float64frombits(atomic.LoadUint64(&m.snapshot.rate15)) +} + +// RateMean returns the meter's mean rate of events per second. +func (m *StandardMeter) RateMean() float64 { + return math.Float64frombits(atomic.LoadUint64(&m.snapshot.rateMean)) +} + +// Snapshot returns a read-only copy of the meter. +func (m *StandardMeter) Snapshot() Meter { + copiedSnapshot := MeterSnapshot{ + count: atomic.LoadInt64(&m.snapshot.count), + rate1: atomic.LoadUint64(&m.snapshot.rate1), + rate5: atomic.LoadUint64(&m.snapshot.rate5), + rate15: atomic.LoadUint64(&m.snapshot.rate15), + rateMean: atomic.LoadUint64(&m.snapshot.rateMean), + } + return &copiedSnapshot +} + +func (m *StandardMeter) updateSnapshot() { + rate1 := math.Float64bits(m.a1.Rate()) + rate5 := math.Float64bits(m.a5.Rate()) + rate15 := math.Float64bits(m.a15.Rate()) + rateMean := math.Float64bits(float64(m.Count()) / time.Since(m.startTime).Seconds()) + + atomic.StoreUint64(&m.snapshot.rate1, rate1) + atomic.StoreUint64(&m.snapshot.rate5, rate5) + atomic.StoreUint64(&m.snapshot.rate15, rate15) + atomic.StoreUint64(&m.snapshot.rateMean, rateMean) +} + +func (m *StandardMeter) tick() { + m.a1.Tick() + m.a5.Tick() + m.a15.Tick() + m.updateSnapshot() +} + +// meterArbiter ticks meters every 5s from a single goroutine. +// meters are references in a set for future stopping. +type meterArbiter struct { + sync.RWMutex + started bool + meters map[*StandardMeter]struct{} + ticker *time.Ticker +} + +var arbiter = meterArbiter{ticker: time.NewTicker(5e9), meters: make(map[*StandardMeter]struct{})} + +// Ticks meters on the scheduled interval +func (ma *meterArbiter) tick() { + for { + select { + case <-ma.ticker.C: + ma.tickMeters() + } + } +} + +func (ma *meterArbiter) tickMeters() { + ma.RLock() + defer ma.RUnlock() + for meter := range ma.meters { + meter.tick() + } +} diff --git a/vendor/github.com/rcrowley/go-metrics/metrics.go b/vendor/github.com/rcrowley/go-metrics/metrics.go new file mode 100644 index 00000000000..b97a49ed123 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/metrics.go @@ -0,0 +1,13 @@ +// Go port of Coda Hale's Metrics library +// +// +// +// Coda Hale's original work: +package metrics + +// UseNilMetrics is checked by the constructor functions for all of the +// standard metrics. If it is true, the metric returned is a stub. +// +// This global kill-switch helps quantify the observer effect and makes +// for less cluttered pprof profiles. +var UseNilMetrics bool = false diff --git a/vendor/github.com/rcrowley/go-metrics/opentsdb.go b/vendor/github.com/rcrowley/go-metrics/opentsdb.go new file mode 100644 index 00000000000..266b6c93d21 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/opentsdb.go @@ -0,0 +1,119 @@ +package metrics + +import ( + "bufio" + "fmt" + "log" + "net" + "os" + "strings" + "time" +) + +var shortHostName string = "" + +// OpenTSDBConfig provides a container with configuration parameters for +// the OpenTSDB exporter +type OpenTSDBConfig struct { + Addr *net.TCPAddr // Network address to connect to + Registry Registry // Registry to be exported + FlushInterval time.Duration // Flush interval + DurationUnit time.Duration // Time conversion unit for durations + Prefix string // Prefix to be prepended to metric names +} + +// OpenTSDB is a blocking exporter function which reports metrics in r +// to a TSDB server located at addr, flushing them every d duration +// and prepending metric names with prefix. +func OpenTSDB(r Registry, d time.Duration, prefix string, addr *net.TCPAddr) { + OpenTSDBWithConfig(OpenTSDBConfig{ + Addr: addr, + Registry: r, + FlushInterval: d, + DurationUnit: time.Nanosecond, + Prefix: prefix, + }) +} + +// OpenTSDBWithConfig is a blocking exporter function just like OpenTSDB, +// but it takes a OpenTSDBConfig instead. +func OpenTSDBWithConfig(c OpenTSDBConfig) { + for _ = range time.Tick(c.FlushInterval) { + if err := openTSDB(&c); nil != err { + log.Println(err) + } + } +} + +func getShortHostname() string { + if shortHostName == "" { + host, _ := os.Hostname() + if index := strings.Index(host, "."); index > 0 { + shortHostName = host[:index] + } else { + shortHostName = host + } + } + return shortHostName +} + +func openTSDB(c *OpenTSDBConfig) error { + shortHostname := getShortHostname() + now := time.Now().Unix() + du := float64(c.DurationUnit) + conn, err := net.DialTCP("tcp", nil, c.Addr) + if nil != err { + return err + } + defer conn.Close() + w := bufio.NewWriter(conn) + c.Registry.Each(func(name string, i interface{}) { + switch metric := i.(type) { + case Counter: + fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, metric.Count(), shortHostname) + case Gauge: + fmt.Fprintf(w, "put %s.%s.value %d %d host=%s\n", c.Prefix, name, now, metric.Value(), shortHostname) + case GaugeFloat64: + fmt.Fprintf(w, "put %s.%s.value %d %f host=%s\n", c.Prefix, name, now, metric.Value(), shortHostname) + case Histogram: + h := metric.Snapshot() + ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) + fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, h.Count(), shortHostname) + fmt.Fprintf(w, "put %s.%s.min %d %d host=%s\n", c.Prefix, name, now, h.Min(), shortHostname) + fmt.Fprintf(w, "put %s.%s.max %d %d host=%s\n", c.Prefix, name, now, h.Max(), shortHostname) + fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, h.Mean(), shortHostname) + fmt.Fprintf(w, "put %s.%s.std-dev %d %.2f host=%s\n", c.Prefix, name, now, h.StdDev(), shortHostname) + fmt.Fprintf(w, "put %s.%s.50-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[0], shortHostname) + fmt.Fprintf(w, "put %s.%s.75-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[1], shortHostname) + fmt.Fprintf(w, "put %s.%s.95-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[2], shortHostname) + fmt.Fprintf(w, "put %s.%s.99-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[3], shortHostname) + fmt.Fprintf(w, "put %s.%s.999-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[4], shortHostname) + case Meter: + m := metric.Snapshot() + fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, m.Count(), shortHostname) + fmt.Fprintf(w, "put %s.%s.one-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate1(), shortHostname) + fmt.Fprintf(w, "put %s.%s.five-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate5(), shortHostname) + fmt.Fprintf(w, "put %s.%s.fifteen-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate15(), shortHostname) + fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, m.RateMean(), shortHostname) + case Timer: + t := metric.Snapshot() + ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) + fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, t.Count(), shortHostname) + fmt.Fprintf(w, "put %s.%s.min %d %d host=%s\n", c.Prefix, name, now, t.Min()/int64(du), shortHostname) + fmt.Fprintf(w, "put %s.%s.max %d %d host=%s\n", c.Prefix, name, now, t.Max()/int64(du), shortHostname) + fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, t.Mean()/du, shortHostname) + fmt.Fprintf(w, "put %s.%s.std-dev %d %.2f host=%s\n", c.Prefix, name, now, t.StdDev()/du, shortHostname) + fmt.Fprintf(w, "put %s.%s.50-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[0]/du, shortHostname) + fmt.Fprintf(w, "put %s.%s.75-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[1]/du, shortHostname) + fmt.Fprintf(w, "put %s.%s.95-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[2]/du, shortHostname) + fmt.Fprintf(w, "put %s.%s.99-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[3]/du, shortHostname) + fmt.Fprintf(w, "put %s.%s.999-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[4]/du, shortHostname) + fmt.Fprintf(w, "put %s.%s.one-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate1(), shortHostname) + fmt.Fprintf(w, "put %s.%s.five-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate5(), shortHostname) + fmt.Fprintf(w, "put %s.%s.fifteen-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate15(), shortHostname) + fmt.Fprintf(w, "put %s.%s.mean-rate %d %.2f host=%s\n", c.Prefix, name, now, t.RateMean(), shortHostname) + } + w.Flush() + }) + return nil +} diff --git a/vendor/github.com/rcrowley/go-metrics/registry.go b/vendor/github.com/rcrowley/go-metrics/registry.go new file mode 100644 index 00000000000..a8e67228a45 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/registry.go @@ -0,0 +1,373 @@ +package metrics + +import ( + "fmt" + "reflect" + "strings" + "sync" +) + +// DuplicateMetric is the error returned by Registry.Register when a metric +// already exists. If you mean to Register that metric you must first +// Unregister the existing metric. +type DuplicateMetric string + +func (err DuplicateMetric) Error() string { + return fmt.Sprintf("duplicate metric: %s", string(err)) +} + +// A Registry holds references to a set of metrics by name and can iterate +// over them, calling callback functions provided by the user. +// +// This is an interface so as to encourage other structs to implement +// the Registry API as appropriate. +type Registry interface { + + // Call the given function for each registered metric. + Each(func(string, interface{})) + + // Get the metric by the given name or nil if none is registered. + Get(string) interface{} + + // GetAll metrics in the Registry. + GetAll() map[string]map[string]interface{} + + // Gets an existing metric or registers the given one. + // The interface can be the metric to register if not found in registry, + // or a function returning the metric for lazy instantiation. + GetOrRegister(string, interface{}) interface{} + + // Register the given metric under the given name. + Register(string, interface{}) error + + // Run all registered healthchecks. + RunHealthchecks() + + // Unregister the metric with the given name. + Unregister(string) + + // Unregister all metrics. (Mostly for testing.) + UnregisterAll() +} + +// The standard implementation of a Registry is a mutex-protected map +// of names to metrics. +type StandardRegistry struct { + metrics map[string]interface{} + mutex sync.RWMutex +} + +// Create a new registry. +func NewRegistry() Registry { + return &StandardRegistry{metrics: make(map[string]interface{})} +} + +// Call the given function for each registered metric. +func (r *StandardRegistry) Each(f func(string, interface{})) { + metrics := r.registered() + for i := range metrics { + kv := &metrics[i] + f(kv.name, kv.value) + } +} + +// Get the metric by the given name or nil if none is registered. +func (r *StandardRegistry) Get(name string) interface{} { + r.mutex.RLock() + defer r.mutex.RUnlock() + return r.metrics[name] +} + +// Gets an existing metric or creates and registers a new one. Threadsafe +// alternative to calling Get and Register on failure. +// The interface can be the metric to register if not found in registry, +// or a function returning the metric for lazy instantiation. +func (r *StandardRegistry) GetOrRegister(name string, i interface{}) interface{} { + // access the read lock first which should be re-entrant + r.mutex.RLock() + metric, ok := r.metrics[name] + r.mutex.RUnlock() + if ok { + return metric + } + + // only take the write lock if we'll be modifying the metrics map + r.mutex.Lock() + defer r.mutex.Unlock() + if metric, ok := r.metrics[name]; ok { + return metric + } + if v := reflect.ValueOf(i); v.Kind() == reflect.Func { + i = v.Call(nil)[0].Interface() + } + r.register(name, i) + return i +} + +// Register the given metric under the given name. Returns a DuplicateMetric +// if a metric by the given name is already registered. +func (r *StandardRegistry) Register(name string, i interface{}) error { + r.mutex.Lock() + defer r.mutex.Unlock() + return r.register(name, i) +} + +// Run all registered healthchecks. +func (r *StandardRegistry) RunHealthchecks() { + r.mutex.RLock() + defer r.mutex.RUnlock() + for _, i := range r.metrics { + if h, ok := i.(Healthcheck); ok { + h.Check() + } + } +} + +// GetAll metrics in the Registry +func (r *StandardRegistry) GetAll() map[string]map[string]interface{} { + data := make(map[string]map[string]interface{}) + r.Each(func(name string, i interface{}) { + values := make(map[string]interface{}) + switch metric := i.(type) { + case Counter: + values["count"] = metric.Count() + case Gauge: + values["value"] = metric.Value() + case GaugeFloat64: + values["value"] = metric.Value() + case Healthcheck: + values["error"] = nil + metric.Check() + if err := metric.Error(); nil != err { + values["error"] = metric.Error().Error() + } + case Histogram: + h := metric.Snapshot() + ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) + values["count"] = h.Count() + values["min"] = h.Min() + values["max"] = h.Max() + values["mean"] = h.Mean() + values["stddev"] = h.StdDev() + values["median"] = ps[0] + values["75%"] = ps[1] + values["95%"] = ps[2] + values["99%"] = ps[3] + values["99.9%"] = ps[4] + case Meter: + m := metric.Snapshot() + values["count"] = m.Count() + values["1m.rate"] = m.Rate1() + values["5m.rate"] = m.Rate5() + values["15m.rate"] = m.Rate15() + values["mean.rate"] = m.RateMean() + case Timer: + t := metric.Snapshot() + ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) + values["count"] = t.Count() + values["min"] = t.Min() + values["max"] = t.Max() + values["mean"] = t.Mean() + values["stddev"] = t.StdDev() + values["median"] = ps[0] + values["75%"] = ps[1] + values["95%"] = ps[2] + values["99%"] = ps[3] + values["99.9%"] = ps[4] + values["1m.rate"] = t.Rate1() + values["5m.rate"] = t.Rate5() + values["15m.rate"] = t.Rate15() + values["mean.rate"] = t.RateMean() + } + data[name] = values + }) + return data +} + +// Unregister the metric with the given name. +func (r *StandardRegistry) Unregister(name string) { + r.mutex.Lock() + defer r.mutex.Unlock() + r.stop(name) + delete(r.metrics, name) +} + +// Unregister all metrics. (Mostly for testing.) +func (r *StandardRegistry) UnregisterAll() { + r.mutex.Lock() + defer r.mutex.Unlock() + for name, _ := range r.metrics { + r.stop(name) + delete(r.metrics, name) + } +} + +func (r *StandardRegistry) register(name string, i interface{}) error { + if _, ok := r.metrics[name]; ok { + return DuplicateMetric(name) + } + switch i.(type) { + case Counter, Gauge, GaugeFloat64, Healthcheck, Histogram, Meter, Timer: + r.metrics[name] = i + } + return nil +} + +type metricKV struct { + name string + value interface{} +} + +func (r *StandardRegistry) registered() []metricKV { + r.mutex.RLock() + defer r.mutex.RUnlock() + metrics := make([]metricKV, 0, len(r.metrics)) + for name, i := range r.metrics { + metrics = append(metrics, metricKV{ + name: name, + value: i, + }) + } + return metrics +} + +func (r *StandardRegistry) stop(name string) { + if i, ok := r.metrics[name]; ok { + if s, ok := i.(Stoppable); ok { + s.Stop() + } + } +} + +// Stoppable defines the metrics which has to be stopped. +type Stoppable interface { + Stop() +} + +type PrefixedRegistry struct { + underlying Registry + prefix string +} + +func NewPrefixedRegistry(prefix string) Registry { + return &PrefixedRegistry{ + underlying: NewRegistry(), + prefix: prefix, + } +} + +func NewPrefixedChildRegistry(parent Registry, prefix string) Registry { + return &PrefixedRegistry{ + underlying: parent, + prefix: prefix, + } +} + +// Call the given function for each registered metric. +func (r *PrefixedRegistry) Each(fn func(string, interface{})) { + wrappedFn := func(prefix string) func(string, interface{}) { + return func(name string, iface interface{}) { + if strings.HasPrefix(name, prefix) { + fn(name, iface) + } else { + return + } + } + } + + baseRegistry, prefix := findPrefix(r, "") + baseRegistry.Each(wrappedFn(prefix)) +} + +func findPrefix(registry Registry, prefix string) (Registry, string) { + switch r := registry.(type) { + case *PrefixedRegistry: + return findPrefix(r.underlying, r.prefix+prefix) + case *StandardRegistry: + return r, prefix + } + return nil, "" +} + +// Get the metric by the given name or nil if none is registered. +func (r *PrefixedRegistry) Get(name string) interface{} { + realName := r.prefix + name + return r.underlying.Get(realName) +} + +// Gets an existing metric or registers the given one. +// The interface can be the metric to register if not found in registry, +// or a function returning the metric for lazy instantiation. +func (r *PrefixedRegistry) GetOrRegister(name string, metric interface{}) interface{} { + realName := r.prefix + name + return r.underlying.GetOrRegister(realName, metric) +} + +// Register the given metric under the given name. The name will be prefixed. +func (r *PrefixedRegistry) Register(name string, metric interface{}) error { + realName := r.prefix + name + return r.underlying.Register(realName, metric) +} + +// Run all registered healthchecks. +func (r *PrefixedRegistry) RunHealthchecks() { + r.underlying.RunHealthchecks() +} + +// GetAll metrics in the Registry +func (r *PrefixedRegistry) GetAll() map[string]map[string]interface{} { + return r.underlying.GetAll() +} + +// Unregister the metric with the given name. The name will be prefixed. +func (r *PrefixedRegistry) Unregister(name string) { + realName := r.prefix + name + r.underlying.Unregister(realName) +} + +// Unregister all metrics. (Mostly for testing.) +func (r *PrefixedRegistry) UnregisterAll() { + r.underlying.UnregisterAll() +} + +var DefaultRegistry Registry = NewRegistry() + +// Call the given function for each registered metric. +func Each(f func(string, interface{})) { + DefaultRegistry.Each(f) +} + +// Get the metric by the given name or nil if none is registered. +func Get(name string) interface{} { + return DefaultRegistry.Get(name) +} + +// Gets an existing metric or creates and registers a new one. Threadsafe +// alternative to calling Get and Register on failure. +func GetOrRegister(name string, i interface{}) interface{} { + return DefaultRegistry.GetOrRegister(name, i) +} + +// Register the given metric under the given name. Returns a DuplicateMetric +// if a metric by the given name is already registered. +func Register(name string, i interface{}) error { + return DefaultRegistry.Register(name, i) +} + +// Register the given metric under the given name. Panics if a metric by the +// given name is already registered. +func MustRegister(name string, i interface{}) { + if err := Register(name, i); err != nil { + panic(err) + } +} + +// Run all registered healthchecks. +func RunHealthchecks() { + DefaultRegistry.RunHealthchecks() +} + +// Unregister the metric with the given name. +func Unregister(name string) { + DefaultRegistry.Unregister(name) +} diff --git a/vendor/github.com/rcrowley/go-metrics/runtime.go b/vendor/github.com/rcrowley/go-metrics/runtime.go new file mode 100644 index 00000000000..4047ab3d373 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/runtime.go @@ -0,0 +1,216 @@ +package metrics + +import ( + "runtime" + "runtime/pprof" + "sync" + "time" +) + +var ( + memStats runtime.MemStats + runtimeMetrics struct { + MemStats struct { + Alloc Gauge + BuckHashSys Gauge + DebugGC Gauge + EnableGC Gauge + Frees Gauge + HeapAlloc Gauge + HeapIdle Gauge + HeapInuse Gauge + HeapObjects Gauge + HeapReleased Gauge + HeapSys Gauge + LastGC Gauge + Lookups Gauge + Mallocs Gauge + MCacheInuse Gauge + MCacheSys Gauge + MSpanInuse Gauge + MSpanSys Gauge + NextGC Gauge + NumGC Gauge + GCCPUFraction GaugeFloat64 + PauseNs Histogram + PauseTotalNs Gauge + StackInuse Gauge + StackSys Gauge + Sys Gauge + TotalAlloc Gauge + } + NumCgoCall Gauge + NumGoroutine Gauge + NumThread Gauge + ReadMemStats Timer + } + frees uint64 + lookups uint64 + mallocs uint64 + numGC uint32 + numCgoCalls int64 + + threadCreateProfile = pprof.Lookup("threadcreate") + registerRuntimeMetricsOnce = sync.Once{} +) + +// Capture new values for the Go runtime statistics exported in +// runtime.MemStats. This is designed to be called as a goroutine. +func CaptureRuntimeMemStats(r Registry, d time.Duration) { + for _ = range time.Tick(d) { + CaptureRuntimeMemStatsOnce(r) + } +} + +// Capture new values for the Go runtime statistics exported in +// runtime.MemStats. This is designed to be called in a background +// goroutine. Giving a registry which has not been given to +// RegisterRuntimeMemStats will panic. +// +// Be very careful with this because runtime.ReadMemStats calls the C +// functions runtime·semacquire(&runtime·worldsema) and runtime·stoptheworld() +// and that last one does what it says on the tin. +func CaptureRuntimeMemStatsOnce(r Registry) { + t := time.Now() + runtime.ReadMemStats(&memStats) // This takes 50-200us. + runtimeMetrics.ReadMemStats.UpdateSince(t) + + runtimeMetrics.MemStats.Alloc.Update(int64(memStats.Alloc)) + runtimeMetrics.MemStats.BuckHashSys.Update(int64(memStats.BuckHashSys)) + if memStats.DebugGC { + runtimeMetrics.MemStats.DebugGC.Update(1) + } else { + runtimeMetrics.MemStats.DebugGC.Update(0) + } + if memStats.EnableGC { + runtimeMetrics.MemStats.EnableGC.Update(1) + } else { + runtimeMetrics.MemStats.EnableGC.Update(0) + } + + runtimeMetrics.MemStats.Frees.Update(int64(memStats.Frees - frees)) + runtimeMetrics.MemStats.HeapAlloc.Update(int64(memStats.HeapAlloc)) + runtimeMetrics.MemStats.HeapIdle.Update(int64(memStats.HeapIdle)) + runtimeMetrics.MemStats.HeapInuse.Update(int64(memStats.HeapInuse)) + runtimeMetrics.MemStats.HeapObjects.Update(int64(memStats.HeapObjects)) + runtimeMetrics.MemStats.HeapReleased.Update(int64(memStats.HeapReleased)) + runtimeMetrics.MemStats.HeapSys.Update(int64(memStats.HeapSys)) + runtimeMetrics.MemStats.LastGC.Update(int64(memStats.LastGC)) + runtimeMetrics.MemStats.Lookups.Update(int64(memStats.Lookups - lookups)) + runtimeMetrics.MemStats.Mallocs.Update(int64(memStats.Mallocs - mallocs)) + runtimeMetrics.MemStats.MCacheInuse.Update(int64(memStats.MCacheInuse)) + runtimeMetrics.MemStats.MCacheSys.Update(int64(memStats.MCacheSys)) + runtimeMetrics.MemStats.MSpanInuse.Update(int64(memStats.MSpanInuse)) + runtimeMetrics.MemStats.MSpanSys.Update(int64(memStats.MSpanSys)) + runtimeMetrics.MemStats.NextGC.Update(int64(memStats.NextGC)) + runtimeMetrics.MemStats.NumGC.Update(int64(memStats.NumGC - numGC)) + runtimeMetrics.MemStats.GCCPUFraction.Update(gcCPUFraction(&memStats)) + + // + i := numGC % uint32(len(memStats.PauseNs)) + ii := memStats.NumGC % uint32(len(memStats.PauseNs)) + if memStats.NumGC-numGC >= uint32(len(memStats.PauseNs)) { + for i = 0; i < uint32(len(memStats.PauseNs)); i++ { + runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i])) + } + } else { + if i > ii { + for ; i < uint32(len(memStats.PauseNs)); i++ { + runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i])) + } + i = 0 + } + for ; i < ii; i++ { + runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i])) + } + } + frees = memStats.Frees + lookups = memStats.Lookups + mallocs = memStats.Mallocs + numGC = memStats.NumGC + + runtimeMetrics.MemStats.PauseTotalNs.Update(int64(memStats.PauseTotalNs)) + runtimeMetrics.MemStats.StackInuse.Update(int64(memStats.StackInuse)) + runtimeMetrics.MemStats.StackSys.Update(int64(memStats.StackSys)) + runtimeMetrics.MemStats.Sys.Update(int64(memStats.Sys)) + runtimeMetrics.MemStats.TotalAlloc.Update(int64(memStats.TotalAlloc)) + + currentNumCgoCalls := numCgoCall() + runtimeMetrics.NumCgoCall.Update(currentNumCgoCalls - numCgoCalls) + numCgoCalls = currentNumCgoCalls + + runtimeMetrics.NumGoroutine.Update(int64(runtime.NumGoroutine())) + + runtimeMetrics.NumThread.Update(int64(threadCreateProfile.Count())) +} + +// Register runtimeMetrics for the Go runtime statistics exported in runtime and +// specifically runtime.MemStats. The runtimeMetrics are named by their +// fully-qualified Go symbols, i.e. runtime.MemStats.Alloc. +func RegisterRuntimeMemStats(r Registry) { + registerRuntimeMetricsOnce.Do(func() { + runtimeMetrics.MemStats.Alloc = NewGauge() + runtimeMetrics.MemStats.BuckHashSys = NewGauge() + runtimeMetrics.MemStats.DebugGC = NewGauge() + runtimeMetrics.MemStats.EnableGC = NewGauge() + runtimeMetrics.MemStats.Frees = NewGauge() + runtimeMetrics.MemStats.HeapAlloc = NewGauge() + runtimeMetrics.MemStats.HeapIdle = NewGauge() + runtimeMetrics.MemStats.HeapInuse = NewGauge() + runtimeMetrics.MemStats.HeapObjects = NewGauge() + runtimeMetrics.MemStats.HeapReleased = NewGauge() + runtimeMetrics.MemStats.HeapSys = NewGauge() + runtimeMetrics.MemStats.LastGC = NewGauge() + runtimeMetrics.MemStats.Lookups = NewGauge() + runtimeMetrics.MemStats.Mallocs = NewGauge() + runtimeMetrics.MemStats.MCacheInuse = NewGauge() + runtimeMetrics.MemStats.MCacheSys = NewGauge() + runtimeMetrics.MemStats.MSpanInuse = NewGauge() + runtimeMetrics.MemStats.MSpanSys = NewGauge() + runtimeMetrics.MemStats.NextGC = NewGauge() + runtimeMetrics.MemStats.NumGC = NewGauge() + runtimeMetrics.MemStats.GCCPUFraction = NewGaugeFloat64() + runtimeMetrics.MemStats.PauseNs = NewHistogram(NewExpDecaySample(1028, 0.015)) + runtimeMetrics.MemStats.PauseTotalNs = NewGauge() + runtimeMetrics.MemStats.StackInuse = NewGauge() + runtimeMetrics.MemStats.StackSys = NewGauge() + runtimeMetrics.MemStats.Sys = NewGauge() + runtimeMetrics.MemStats.TotalAlloc = NewGauge() + runtimeMetrics.NumCgoCall = NewGauge() + runtimeMetrics.NumGoroutine = NewGauge() + runtimeMetrics.NumThread = NewGauge() + runtimeMetrics.ReadMemStats = NewTimer() + + r.Register("runtime.MemStats.Alloc", runtimeMetrics.MemStats.Alloc) + r.Register("runtime.MemStats.BuckHashSys", runtimeMetrics.MemStats.BuckHashSys) + r.Register("runtime.MemStats.DebugGC", runtimeMetrics.MemStats.DebugGC) + r.Register("runtime.MemStats.EnableGC", runtimeMetrics.MemStats.EnableGC) + r.Register("runtime.MemStats.Frees", runtimeMetrics.MemStats.Frees) + r.Register("runtime.MemStats.HeapAlloc", runtimeMetrics.MemStats.HeapAlloc) + r.Register("runtime.MemStats.HeapIdle", runtimeMetrics.MemStats.HeapIdle) + r.Register("runtime.MemStats.HeapInuse", runtimeMetrics.MemStats.HeapInuse) + r.Register("runtime.MemStats.HeapObjects", runtimeMetrics.MemStats.HeapObjects) + r.Register("runtime.MemStats.HeapReleased", runtimeMetrics.MemStats.HeapReleased) + r.Register("runtime.MemStats.HeapSys", runtimeMetrics.MemStats.HeapSys) + r.Register("runtime.MemStats.LastGC", runtimeMetrics.MemStats.LastGC) + r.Register("runtime.MemStats.Lookups", runtimeMetrics.MemStats.Lookups) + r.Register("runtime.MemStats.Mallocs", runtimeMetrics.MemStats.Mallocs) + r.Register("runtime.MemStats.MCacheInuse", runtimeMetrics.MemStats.MCacheInuse) + r.Register("runtime.MemStats.MCacheSys", runtimeMetrics.MemStats.MCacheSys) + r.Register("runtime.MemStats.MSpanInuse", runtimeMetrics.MemStats.MSpanInuse) + r.Register("runtime.MemStats.MSpanSys", runtimeMetrics.MemStats.MSpanSys) + r.Register("runtime.MemStats.NextGC", runtimeMetrics.MemStats.NextGC) + r.Register("runtime.MemStats.NumGC", runtimeMetrics.MemStats.NumGC) + r.Register("runtime.MemStats.GCCPUFraction", runtimeMetrics.MemStats.GCCPUFraction) + r.Register("runtime.MemStats.PauseNs", runtimeMetrics.MemStats.PauseNs) + r.Register("runtime.MemStats.PauseTotalNs", runtimeMetrics.MemStats.PauseTotalNs) + r.Register("runtime.MemStats.StackInuse", runtimeMetrics.MemStats.StackInuse) + r.Register("runtime.MemStats.StackSys", runtimeMetrics.MemStats.StackSys) + r.Register("runtime.MemStats.Sys", runtimeMetrics.MemStats.Sys) + r.Register("runtime.MemStats.TotalAlloc", runtimeMetrics.MemStats.TotalAlloc) + r.Register("runtime.NumCgoCall", runtimeMetrics.NumCgoCall) + r.Register("runtime.NumGoroutine", runtimeMetrics.NumGoroutine) + r.Register("runtime.NumThread", runtimeMetrics.NumThread) + r.Register("runtime.ReadMemStats", runtimeMetrics.ReadMemStats) + }) +} diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_cgo.go b/vendor/github.com/rcrowley/go-metrics/runtime_cgo.go new file mode 100644 index 00000000000..e3391f4e89f --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/runtime_cgo.go @@ -0,0 +1,10 @@ +// +build cgo +// +build !appengine + +package metrics + +import "runtime" + +func numCgoCall() int64 { + return runtime.NumCgoCall() +} diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go b/vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go new file mode 100644 index 00000000000..ca12c05bac7 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go @@ -0,0 +1,9 @@ +// +build go1.5 + +package metrics + +import "runtime" + +func gcCPUFraction(memStats *runtime.MemStats) float64 { + return memStats.GCCPUFraction +} diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go b/vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go new file mode 100644 index 00000000000..616a3b4751b --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go @@ -0,0 +1,7 @@ +// +build !cgo appengine + +package metrics + +func numCgoCall() int64 { + return 0 +} diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go b/vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go new file mode 100644 index 00000000000..be96aa6f1be --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go @@ -0,0 +1,9 @@ +// +build !go1.5 + +package metrics + +import "runtime" + +func gcCPUFraction(memStats *runtime.MemStats) float64 { + return 0 +} diff --git a/vendor/github.com/rcrowley/go-metrics/sample.go b/vendor/github.com/rcrowley/go-metrics/sample.go new file mode 100644 index 00000000000..fecee5ef68b --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/sample.go @@ -0,0 +1,616 @@ +package metrics + +import ( + "math" + "math/rand" + "sort" + "sync" + "time" +) + +const rescaleThreshold = time.Hour + +// Samples maintain a statistically-significant selection of values from +// a stream. +type Sample interface { + Clear() + Count() int64 + Max() int64 + Mean() float64 + Min() int64 + Percentile(float64) float64 + Percentiles([]float64) []float64 + Size() int + Snapshot() Sample + StdDev() float64 + Sum() int64 + Update(int64) + Values() []int64 + Variance() float64 +} + +// ExpDecaySample is an exponentially-decaying sample using a forward-decaying +// priority reservoir. See Cormode et al's "Forward Decay: A Practical Time +// Decay Model for Streaming Systems". +// +// +type ExpDecaySample struct { + alpha float64 + count int64 + mutex sync.Mutex + reservoirSize int + t0, t1 time.Time + values *expDecaySampleHeap +} + +// NewExpDecaySample constructs a new exponentially-decaying sample with the +// given reservoir size and alpha. +func NewExpDecaySample(reservoirSize int, alpha float64) Sample { + if UseNilMetrics { + return NilSample{} + } + s := &ExpDecaySample{ + alpha: alpha, + reservoirSize: reservoirSize, + t0: time.Now(), + values: newExpDecaySampleHeap(reservoirSize), + } + s.t1 = s.t0.Add(rescaleThreshold) + return s +} + +// Clear clears all samples. +func (s *ExpDecaySample) Clear() { + s.mutex.Lock() + defer s.mutex.Unlock() + s.count = 0 + s.t0 = time.Now() + s.t1 = s.t0.Add(rescaleThreshold) + s.values.Clear() +} + +// Count returns the number of samples recorded, which may exceed the +// reservoir size. +func (s *ExpDecaySample) Count() int64 { + s.mutex.Lock() + defer s.mutex.Unlock() + return s.count +} + +// Max returns the maximum value in the sample, which may not be the maximum +// value ever to be part of the sample. +func (s *ExpDecaySample) Max() int64 { + return SampleMax(s.Values()) +} + +// Mean returns the mean of the values in the sample. +func (s *ExpDecaySample) Mean() float64 { + return SampleMean(s.Values()) +} + +// Min returns the minimum value in the sample, which may not be the minimum +// value ever to be part of the sample. +func (s *ExpDecaySample) Min() int64 { + return SampleMin(s.Values()) +} + +// Percentile returns an arbitrary percentile of values in the sample. +func (s *ExpDecaySample) Percentile(p float64) float64 { + return SamplePercentile(s.Values(), p) +} + +// Percentiles returns a slice of arbitrary percentiles of values in the +// sample. +func (s *ExpDecaySample) Percentiles(ps []float64) []float64 { + return SamplePercentiles(s.Values(), ps) +} + +// Size returns the size of the sample, which is at most the reservoir size. +func (s *ExpDecaySample) Size() int { + s.mutex.Lock() + defer s.mutex.Unlock() + return s.values.Size() +} + +// Snapshot returns a read-only copy of the sample. +func (s *ExpDecaySample) Snapshot() Sample { + s.mutex.Lock() + defer s.mutex.Unlock() + vals := s.values.Values() + values := make([]int64, len(vals)) + for i, v := range vals { + values[i] = v.v + } + return &SampleSnapshot{ + count: s.count, + values: values, + } +} + +// StdDev returns the standard deviation of the values in the sample. +func (s *ExpDecaySample) StdDev() float64 { + return SampleStdDev(s.Values()) +} + +// Sum returns the sum of the values in the sample. +func (s *ExpDecaySample) Sum() int64 { + return SampleSum(s.Values()) +} + +// Update samples a new value. +func (s *ExpDecaySample) Update(v int64) { + s.update(time.Now(), v) +} + +// Values returns a copy of the values in the sample. +func (s *ExpDecaySample) Values() []int64 { + s.mutex.Lock() + defer s.mutex.Unlock() + vals := s.values.Values() + values := make([]int64, len(vals)) + for i, v := range vals { + values[i] = v.v + } + return values +} + +// Variance returns the variance of the values in the sample. +func (s *ExpDecaySample) Variance() float64 { + return SampleVariance(s.Values()) +} + +// update samples a new value at a particular timestamp. This is a method all +// its own to facilitate testing. +func (s *ExpDecaySample) update(t time.Time, v int64) { + s.mutex.Lock() + defer s.mutex.Unlock() + s.count++ + if s.values.Size() == s.reservoirSize { + s.values.Pop() + } + s.values.Push(expDecaySample{ + k: math.Exp(t.Sub(s.t0).Seconds()*s.alpha) / rand.Float64(), + v: v, + }) + if t.After(s.t1) { + values := s.values.Values() + t0 := s.t0 + s.values.Clear() + s.t0 = t + s.t1 = s.t0.Add(rescaleThreshold) + for _, v := range values { + v.k = v.k * math.Exp(-s.alpha*s.t0.Sub(t0).Seconds()) + s.values.Push(v) + } + } +} + +// NilSample is a no-op Sample. +type NilSample struct{} + +// Clear is a no-op. +func (NilSample) Clear() {} + +// Count is a no-op. +func (NilSample) Count() int64 { return 0 } + +// Max is a no-op. +func (NilSample) Max() int64 { return 0 } + +// Mean is a no-op. +func (NilSample) Mean() float64 { return 0.0 } + +// Min is a no-op. +func (NilSample) Min() int64 { return 0 } + +// Percentile is a no-op. +func (NilSample) Percentile(p float64) float64 { return 0.0 } + +// Percentiles is a no-op. +func (NilSample) Percentiles(ps []float64) []float64 { + return make([]float64, len(ps)) +} + +// Size is a no-op. +func (NilSample) Size() int { return 0 } + +// Sample is a no-op. +func (NilSample) Snapshot() Sample { return NilSample{} } + +// StdDev is a no-op. +func (NilSample) StdDev() float64 { return 0.0 } + +// Sum is a no-op. +func (NilSample) Sum() int64 { return 0 } + +// Update is a no-op. +func (NilSample) Update(v int64) {} + +// Values is a no-op. +func (NilSample) Values() []int64 { return []int64{} } + +// Variance is a no-op. +func (NilSample) Variance() float64 { return 0.0 } + +// SampleMax returns the maximum value of the slice of int64. +func SampleMax(values []int64) int64 { + if 0 == len(values) { + return 0 + } + var max int64 = math.MinInt64 + for _, v := range values { + if max < v { + max = v + } + } + return max +} + +// SampleMean returns the mean value of the slice of int64. +func SampleMean(values []int64) float64 { + if 0 == len(values) { + return 0.0 + } + return float64(SampleSum(values)) / float64(len(values)) +} + +// SampleMin returns the minimum value of the slice of int64. +func SampleMin(values []int64) int64 { + if 0 == len(values) { + return 0 + } + var min int64 = math.MaxInt64 + for _, v := range values { + if min > v { + min = v + } + } + return min +} + +// SamplePercentiles returns an arbitrary percentile of the slice of int64. +func SamplePercentile(values int64Slice, p float64) float64 { + return SamplePercentiles(values, []float64{p})[0] +} + +// SamplePercentiles returns a slice of arbitrary percentiles of the slice of +// int64. +func SamplePercentiles(values int64Slice, ps []float64) []float64 { + scores := make([]float64, len(ps)) + size := len(values) + if size > 0 { + sort.Sort(values) + for i, p := range ps { + pos := p * float64(size+1) + if pos < 1.0 { + scores[i] = float64(values[0]) + } else if pos >= float64(size) { + scores[i] = float64(values[size-1]) + } else { + lower := float64(values[int(pos)-1]) + upper := float64(values[int(pos)]) + scores[i] = lower + (pos-math.Floor(pos))*(upper-lower) + } + } + } + return scores +} + +// SampleSnapshot is a read-only copy of another Sample. +type SampleSnapshot struct { + count int64 + values []int64 +} + +func NewSampleSnapshot(count int64, values []int64) *SampleSnapshot { + return &SampleSnapshot{ + count: count, + values: values, + } +} + +// Clear panics. +func (*SampleSnapshot) Clear() { + panic("Clear called on a SampleSnapshot") +} + +// Count returns the count of inputs at the time the snapshot was taken. +func (s *SampleSnapshot) Count() int64 { return s.count } + +// Max returns the maximal value at the time the snapshot was taken. +func (s *SampleSnapshot) Max() int64 { return SampleMax(s.values) } + +// Mean returns the mean value at the time the snapshot was taken. +func (s *SampleSnapshot) Mean() float64 { return SampleMean(s.values) } + +// Min returns the minimal value at the time the snapshot was taken. +func (s *SampleSnapshot) Min() int64 { return SampleMin(s.values) } + +// Percentile returns an arbitrary percentile of values at the time the +// snapshot was taken. +func (s *SampleSnapshot) Percentile(p float64) float64 { + return SamplePercentile(s.values, p) +} + +// Percentiles returns a slice of arbitrary percentiles of values at the time +// the snapshot was taken. +func (s *SampleSnapshot) Percentiles(ps []float64) []float64 { + return SamplePercentiles(s.values, ps) +} + +// Size returns the size of the sample at the time the snapshot was taken. +func (s *SampleSnapshot) Size() int { return len(s.values) } + +// Snapshot returns the snapshot. +func (s *SampleSnapshot) Snapshot() Sample { return s } + +// StdDev returns the standard deviation of values at the time the snapshot was +// taken. +func (s *SampleSnapshot) StdDev() float64 { return SampleStdDev(s.values) } + +// Sum returns the sum of values at the time the snapshot was taken. +func (s *SampleSnapshot) Sum() int64 { return SampleSum(s.values) } + +// Update panics. +func (*SampleSnapshot) Update(int64) { + panic("Update called on a SampleSnapshot") +} + +// Values returns a copy of the values in the sample. +func (s *SampleSnapshot) Values() []int64 { + values := make([]int64, len(s.values)) + copy(values, s.values) + return values +} + +// Variance returns the variance of values at the time the snapshot was taken. +func (s *SampleSnapshot) Variance() float64 { return SampleVariance(s.values) } + +// SampleStdDev returns the standard deviation of the slice of int64. +func SampleStdDev(values []int64) float64 { + return math.Sqrt(SampleVariance(values)) +} + +// SampleSum returns the sum of the slice of int64. +func SampleSum(values []int64) int64 { + var sum int64 + for _, v := range values { + sum += v + } + return sum +} + +// SampleVariance returns the variance of the slice of int64. +func SampleVariance(values []int64) float64 { + if 0 == len(values) { + return 0.0 + } + m := SampleMean(values) + var sum float64 + for _, v := range values { + d := float64(v) - m + sum += d * d + } + return sum / float64(len(values)) +} + +// A uniform sample using Vitter's Algorithm R. +// +// +type UniformSample struct { + count int64 + mutex sync.Mutex + reservoirSize int + values []int64 +} + +// NewUniformSample constructs a new uniform sample with the given reservoir +// size. +func NewUniformSample(reservoirSize int) Sample { + if UseNilMetrics { + return NilSample{} + } + return &UniformSample{ + reservoirSize: reservoirSize, + values: make([]int64, 0, reservoirSize), + } +} + +// Clear clears all samples. +func (s *UniformSample) Clear() { + s.mutex.Lock() + defer s.mutex.Unlock() + s.count = 0 + s.values = make([]int64, 0, s.reservoirSize) +} + +// Count returns the number of samples recorded, which may exceed the +// reservoir size. +func (s *UniformSample) Count() int64 { + s.mutex.Lock() + defer s.mutex.Unlock() + return s.count +} + +// Max returns the maximum value in the sample, which may not be the maximum +// value ever to be part of the sample. +func (s *UniformSample) Max() int64 { + s.mutex.Lock() + defer s.mutex.Unlock() + return SampleMax(s.values) +} + +// Mean returns the mean of the values in the sample. +func (s *UniformSample) Mean() float64 { + s.mutex.Lock() + defer s.mutex.Unlock() + return SampleMean(s.values) +} + +// Min returns the minimum value in the sample, which may not be the minimum +// value ever to be part of the sample. +func (s *UniformSample) Min() int64 { + s.mutex.Lock() + defer s.mutex.Unlock() + return SampleMin(s.values) +} + +// Percentile returns an arbitrary percentile of values in the sample. +func (s *UniformSample) Percentile(p float64) float64 { + s.mutex.Lock() + defer s.mutex.Unlock() + return SamplePercentile(s.values, p) +} + +// Percentiles returns a slice of arbitrary percentiles of values in the +// sample. +func (s *UniformSample) Percentiles(ps []float64) []float64 { + s.mutex.Lock() + defer s.mutex.Unlock() + return SamplePercentiles(s.values, ps) +} + +// Size returns the size of the sample, which is at most the reservoir size. +func (s *UniformSample) Size() int { + s.mutex.Lock() + defer s.mutex.Unlock() + return len(s.values) +} + +// Snapshot returns a read-only copy of the sample. +func (s *UniformSample) Snapshot() Sample { + s.mutex.Lock() + defer s.mutex.Unlock() + values := make([]int64, len(s.values)) + copy(values, s.values) + return &SampleSnapshot{ + count: s.count, + values: values, + } +} + +// StdDev returns the standard deviation of the values in the sample. +func (s *UniformSample) StdDev() float64 { + s.mutex.Lock() + defer s.mutex.Unlock() + return SampleStdDev(s.values) +} + +// Sum returns the sum of the values in the sample. +func (s *UniformSample) Sum() int64 { + s.mutex.Lock() + defer s.mutex.Unlock() + return SampleSum(s.values) +} + +// Update samples a new value. +func (s *UniformSample) Update(v int64) { + s.mutex.Lock() + defer s.mutex.Unlock() + s.count++ + if len(s.values) < s.reservoirSize { + s.values = append(s.values, v) + } else { + r := rand.Int63n(s.count) + if r < int64(len(s.values)) { + s.values[int(r)] = v + } + } +} + +// Values returns a copy of the values in the sample. +func (s *UniformSample) Values() []int64 { + s.mutex.Lock() + defer s.mutex.Unlock() + values := make([]int64, len(s.values)) + copy(values, s.values) + return values +} + +// Variance returns the variance of the values in the sample. +func (s *UniformSample) Variance() float64 { + s.mutex.Lock() + defer s.mutex.Unlock() + return SampleVariance(s.values) +} + +// expDecaySample represents an individual sample in a heap. +type expDecaySample struct { + k float64 + v int64 +} + +func newExpDecaySampleHeap(reservoirSize int) *expDecaySampleHeap { + return &expDecaySampleHeap{make([]expDecaySample, 0, reservoirSize)} +} + +// expDecaySampleHeap is a min-heap of expDecaySamples. +// The internal implementation is copied from the standard library's container/heap +type expDecaySampleHeap struct { + s []expDecaySample +} + +func (h *expDecaySampleHeap) Clear() { + h.s = h.s[:0] +} + +func (h *expDecaySampleHeap) Push(s expDecaySample) { + n := len(h.s) + h.s = h.s[0 : n+1] + h.s[n] = s + h.up(n) +} + +func (h *expDecaySampleHeap) Pop() expDecaySample { + n := len(h.s) - 1 + h.s[0], h.s[n] = h.s[n], h.s[0] + h.down(0, n) + + n = len(h.s) + s := h.s[n-1] + h.s = h.s[0 : n-1] + return s +} + +func (h *expDecaySampleHeap) Size() int { + return len(h.s) +} + +func (h *expDecaySampleHeap) Values() []expDecaySample { + return h.s +} + +func (h *expDecaySampleHeap) up(j int) { + for { + i := (j - 1) / 2 // parent + if i == j || !(h.s[j].k < h.s[i].k) { + break + } + h.s[i], h.s[j] = h.s[j], h.s[i] + j = i + } +} + +func (h *expDecaySampleHeap) down(i, n int) { + for { + j1 := 2*i + 1 + if j1 >= n || j1 < 0 { // j1 < 0 after int overflow + break + } + j := j1 // left child + if j2 := j1 + 1; j2 < n && !(h.s[j1].k < h.s[j2].k) { + j = j2 // = 2*i + 2 // right child + } + if !(h.s[j].k < h.s[i].k) { + break + } + h.s[i], h.s[j] = h.s[j], h.s[i] + i = j + } +} + +type int64Slice []int64 + +func (p int64Slice) Len() int { return len(p) } +func (p int64Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/github.com/rcrowley/go-metrics/syslog.go b/vendor/github.com/rcrowley/go-metrics/syslog.go new file mode 100644 index 00000000000..693f190855c --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/syslog.go @@ -0,0 +1,78 @@ +// +build !windows + +package metrics + +import ( + "fmt" + "log/syslog" + "time" +) + +// Output each metric in the given registry to syslog periodically using +// the given syslogger. +func Syslog(r Registry, d time.Duration, w *syslog.Writer) { + for _ = range time.Tick(d) { + r.Each(func(name string, i interface{}) { + switch metric := i.(type) { + case Counter: + w.Info(fmt.Sprintf("counter %s: count: %d", name, metric.Count())) + case Gauge: + w.Info(fmt.Sprintf("gauge %s: value: %d", name, metric.Value())) + case GaugeFloat64: + w.Info(fmt.Sprintf("gauge %s: value: %f", name, metric.Value())) + case Healthcheck: + metric.Check() + w.Info(fmt.Sprintf("healthcheck %s: error: %v", name, metric.Error())) + case Histogram: + h := metric.Snapshot() + ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) + w.Info(fmt.Sprintf( + "histogram %s: count: %d min: %d max: %d mean: %.2f stddev: %.2f median: %.2f 75%%: %.2f 95%%: %.2f 99%%: %.2f 99.9%%: %.2f", + name, + h.Count(), + h.Min(), + h.Max(), + h.Mean(), + h.StdDev(), + ps[0], + ps[1], + ps[2], + ps[3], + ps[4], + )) + case Meter: + m := metric.Snapshot() + w.Info(fmt.Sprintf( + "meter %s: count: %d 1-min: %.2f 5-min: %.2f 15-min: %.2f mean: %.2f", + name, + m.Count(), + m.Rate1(), + m.Rate5(), + m.Rate15(), + m.RateMean(), + )) + case Timer: + t := metric.Snapshot() + ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) + w.Info(fmt.Sprintf( + "timer %s: count: %d min: %d max: %d mean: %.2f stddev: %.2f median: %.2f 75%%: %.2f 95%%: %.2f 99%%: %.2f 99.9%%: %.2f 1-min: %.2f 5-min: %.2f 15-min: %.2f mean-rate: %.2f", + name, + t.Count(), + t.Min(), + t.Max(), + t.Mean(), + t.StdDev(), + ps[0], + ps[1], + ps[2], + ps[3], + ps[4], + t.Rate1(), + t.Rate5(), + t.Rate15(), + t.RateMean(), + )) + } + }) + } +} diff --git a/vendor/github.com/rcrowley/go-metrics/timer.go b/vendor/github.com/rcrowley/go-metrics/timer.go new file mode 100644 index 00000000000..d6ec4c6260f --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/timer.go @@ -0,0 +1,329 @@ +package metrics + +import ( + "sync" + "time" +) + +// Timers capture the duration and rate of events. +type Timer interface { + Count() int64 + Max() int64 + Mean() float64 + Min() int64 + Percentile(float64) float64 + Percentiles([]float64) []float64 + Rate1() float64 + Rate5() float64 + Rate15() float64 + RateMean() float64 + Snapshot() Timer + StdDev() float64 + Stop() + Sum() int64 + Time(func()) + Update(time.Duration) + UpdateSince(time.Time) + Variance() float64 +} + +// GetOrRegisterTimer returns an existing Timer or constructs and registers a +// new StandardTimer. +// Be sure to unregister the meter from the registry once it is of no use to +// allow for garbage collection. +func GetOrRegisterTimer(name string, r Registry) Timer { + if nil == r { + r = DefaultRegistry + } + return r.GetOrRegister(name, NewTimer).(Timer) +} + +// NewCustomTimer constructs a new StandardTimer from a Histogram and a Meter. +// Be sure to call Stop() once the timer is of no use to allow for garbage collection. +func NewCustomTimer(h Histogram, m Meter) Timer { + if UseNilMetrics { + return NilTimer{} + } + return &StandardTimer{ + histogram: h, + meter: m, + } +} + +// NewRegisteredTimer constructs and registers a new StandardTimer. +// Be sure to unregister the meter from the registry once it is of no use to +// allow for garbage collection. +func NewRegisteredTimer(name string, r Registry) Timer { + c := NewTimer() + if nil == r { + r = DefaultRegistry + } + r.Register(name, c) + return c +} + +// NewTimer constructs a new StandardTimer using an exponentially-decaying +// sample with the same reservoir size and alpha as UNIX load averages. +// Be sure to call Stop() once the timer is of no use to allow for garbage collection. +func NewTimer() Timer { + if UseNilMetrics { + return NilTimer{} + } + return &StandardTimer{ + histogram: NewHistogram(NewExpDecaySample(1028, 0.015)), + meter: NewMeter(), + } +} + +// NilTimer is a no-op Timer. +type NilTimer struct { + h Histogram + m Meter +} + +// Count is a no-op. +func (NilTimer) Count() int64 { return 0 } + +// Max is a no-op. +func (NilTimer) Max() int64 { return 0 } + +// Mean is a no-op. +func (NilTimer) Mean() float64 { return 0.0 } + +// Min is a no-op. +func (NilTimer) Min() int64 { return 0 } + +// Percentile is a no-op. +func (NilTimer) Percentile(p float64) float64 { return 0.0 } + +// Percentiles is a no-op. +func (NilTimer) Percentiles(ps []float64) []float64 { + return make([]float64, len(ps)) +} + +// Rate1 is a no-op. +func (NilTimer) Rate1() float64 { return 0.0 } + +// Rate5 is a no-op. +func (NilTimer) Rate5() float64 { return 0.0 } + +// Rate15 is a no-op. +func (NilTimer) Rate15() float64 { return 0.0 } + +// RateMean is a no-op. +func (NilTimer) RateMean() float64 { return 0.0 } + +// Snapshot is a no-op. +func (NilTimer) Snapshot() Timer { return NilTimer{} } + +// StdDev is a no-op. +func (NilTimer) StdDev() float64 { return 0.0 } + +// Stop is a no-op. +func (NilTimer) Stop() {} + +// Sum is a no-op. +func (NilTimer) Sum() int64 { return 0 } + +// Time is a no-op. +func (NilTimer) Time(func()) {} + +// Update is a no-op. +func (NilTimer) Update(time.Duration) {} + +// UpdateSince is a no-op. +func (NilTimer) UpdateSince(time.Time) {} + +// Variance is a no-op. +func (NilTimer) Variance() float64 { return 0.0 } + +// StandardTimer is the standard implementation of a Timer and uses a Histogram +// and Meter. +type StandardTimer struct { + histogram Histogram + meter Meter + mutex sync.Mutex +} + +// Count returns the number of events recorded. +func (t *StandardTimer) Count() int64 { + return t.histogram.Count() +} + +// Max returns the maximum value in the sample. +func (t *StandardTimer) Max() int64 { + return t.histogram.Max() +} + +// Mean returns the mean of the values in the sample. +func (t *StandardTimer) Mean() float64 { + return t.histogram.Mean() +} + +// Min returns the minimum value in the sample. +func (t *StandardTimer) Min() int64 { + return t.histogram.Min() +} + +// Percentile returns an arbitrary percentile of the values in the sample. +func (t *StandardTimer) Percentile(p float64) float64 { + return t.histogram.Percentile(p) +} + +// Percentiles returns a slice of arbitrary percentiles of the values in the +// sample. +func (t *StandardTimer) Percentiles(ps []float64) []float64 { + return t.histogram.Percentiles(ps) +} + +// Rate1 returns the one-minute moving average rate of events per second. +func (t *StandardTimer) Rate1() float64 { + return t.meter.Rate1() +} + +// Rate5 returns the five-minute moving average rate of events per second. +func (t *StandardTimer) Rate5() float64 { + return t.meter.Rate5() +} + +// Rate15 returns the fifteen-minute moving average rate of events per second. +func (t *StandardTimer) Rate15() float64 { + return t.meter.Rate15() +} + +// RateMean returns the meter's mean rate of events per second. +func (t *StandardTimer) RateMean() float64 { + return t.meter.RateMean() +} + +// Snapshot returns a read-only copy of the timer. +func (t *StandardTimer) Snapshot() Timer { + t.mutex.Lock() + defer t.mutex.Unlock() + return &TimerSnapshot{ + histogram: t.histogram.Snapshot().(*HistogramSnapshot), + meter: t.meter.Snapshot().(*MeterSnapshot), + } +} + +// StdDev returns the standard deviation of the values in the sample. +func (t *StandardTimer) StdDev() float64 { + return t.histogram.StdDev() +} + +// Stop stops the meter. +func (t *StandardTimer) Stop() { + t.meter.Stop() +} + +// Sum returns the sum in the sample. +func (t *StandardTimer) Sum() int64 { + return t.histogram.Sum() +} + +// Record the duration of the execution of the given function. +func (t *StandardTimer) Time(f func()) { + ts := time.Now() + f() + t.Update(time.Since(ts)) +} + +// Record the duration of an event. +func (t *StandardTimer) Update(d time.Duration) { + t.mutex.Lock() + defer t.mutex.Unlock() + t.histogram.Update(int64(d)) + t.meter.Mark(1) +} + +// Record the duration of an event that started at a time and ends now. +func (t *StandardTimer) UpdateSince(ts time.Time) { + t.mutex.Lock() + defer t.mutex.Unlock() + t.histogram.Update(int64(time.Since(ts))) + t.meter.Mark(1) +} + +// Variance returns the variance of the values in the sample. +func (t *StandardTimer) Variance() float64 { + return t.histogram.Variance() +} + +// TimerSnapshot is a read-only copy of another Timer. +type TimerSnapshot struct { + histogram *HistogramSnapshot + meter *MeterSnapshot +} + +// Count returns the number of events recorded at the time the snapshot was +// taken. +func (t *TimerSnapshot) Count() int64 { return t.histogram.Count() } + +// Max returns the maximum value at the time the snapshot was taken. +func (t *TimerSnapshot) Max() int64 { return t.histogram.Max() } + +// Mean returns the mean value at the time the snapshot was taken. +func (t *TimerSnapshot) Mean() float64 { return t.histogram.Mean() } + +// Min returns the minimum value at the time the snapshot was taken. +func (t *TimerSnapshot) Min() int64 { return t.histogram.Min() } + +// Percentile returns an arbitrary percentile of sampled values at the time the +// snapshot was taken. +func (t *TimerSnapshot) Percentile(p float64) float64 { + return t.histogram.Percentile(p) +} + +// Percentiles returns a slice of arbitrary percentiles of sampled values at +// the time the snapshot was taken. +func (t *TimerSnapshot) Percentiles(ps []float64) []float64 { + return t.histogram.Percentiles(ps) +} + +// Rate1 returns the one-minute moving average rate of events per second at the +// time the snapshot was taken. +func (t *TimerSnapshot) Rate1() float64 { return t.meter.Rate1() } + +// Rate5 returns the five-minute moving average rate of events per second at +// the time the snapshot was taken. +func (t *TimerSnapshot) Rate5() float64 { return t.meter.Rate5() } + +// Rate15 returns the fifteen-minute moving average rate of events per second +// at the time the snapshot was taken. +func (t *TimerSnapshot) Rate15() float64 { return t.meter.Rate15() } + +// RateMean returns the meter's mean rate of events per second at the time the +// snapshot was taken. +func (t *TimerSnapshot) RateMean() float64 { return t.meter.RateMean() } + +// Snapshot returns the snapshot. +func (t *TimerSnapshot) Snapshot() Timer { return t } + +// StdDev returns the standard deviation of the values at the time the snapshot +// was taken. +func (t *TimerSnapshot) StdDev() float64 { return t.histogram.StdDev() } + +// Stop is a no-op. +func (t *TimerSnapshot) Stop() {} + +// Sum returns the sum at the time the snapshot was taken. +func (t *TimerSnapshot) Sum() int64 { return t.histogram.Sum() } + +// Time panics. +func (*TimerSnapshot) Time(func()) { + panic("Time called on a TimerSnapshot") +} + +// Update panics. +func (*TimerSnapshot) Update(time.Duration) { + panic("Update called on a TimerSnapshot") +} + +// UpdateSince panics. +func (*TimerSnapshot) UpdateSince(time.Time) { + panic("UpdateSince called on a TimerSnapshot") +} + +// Variance returns the variance of the values at the time the snapshot was +// taken. +func (t *TimerSnapshot) Variance() float64 { return t.histogram.Variance() } diff --git a/vendor/github.com/rcrowley/go-metrics/validate.sh b/vendor/github.com/rcrowley/go-metrics/validate.sh new file mode 100644 index 00000000000..c4ae91e642d --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/validate.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +set -e + +# check there are no formatting issues +GOFMT_LINES=`gofmt -l . | wc -l | xargs` +test $GOFMT_LINES -eq 0 || echo "gofmt needs to be run, ${GOFMT_LINES} files have issues" + +# run the tests for the root package +go test -race . diff --git a/vendor/github.com/rcrowley/go-metrics/writer.go b/vendor/github.com/rcrowley/go-metrics/writer.go new file mode 100644 index 00000000000..091e971d2e6 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/writer.go @@ -0,0 +1,100 @@ +package metrics + +import ( + "fmt" + "io" + "sort" + "time" +) + +// Write sorts writes each metric in the given registry periodically to the +// given io.Writer. +func Write(r Registry, d time.Duration, w io.Writer) { + for _ = range time.Tick(d) { + WriteOnce(r, w) + } +} + +// WriteOnce sorts and writes metrics in the given registry to the given +// io.Writer. +func WriteOnce(r Registry, w io.Writer) { + var namedMetrics namedMetricSlice + r.Each(func(name string, i interface{}) { + namedMetrics = append(namedMetrics, namedMetric{name, i}) + }) + + sort.Sort(namedMetrics) + for _, namedMetric := range namedMetrics { + switch metric := namedMetric.m.(type) { + case Counter: + fmt.Fprintf(w, "counter %s\n", namedMetric.name) + fmt.Fprintf(w, " count: %9d\n", metric.Count()) + case Gauge: + fmt.Fprintf(w, "gauge %s\n", namedMetric.name) + fmt.Fprintf(w, " value: %9d\n", metric.Value()) + case GaugeFloat64: + fmt.Fprintf(w, "gauge %s\n", namedMetric.name) + fmt.Fprintf(w, " value: %f\n", metric.Value()) + case Healthcheck: + metric.Check() + fmt.Fprintf(w, "healthcheck %s\n", namedMetric.name) + fmt.Fprintf(w, " error: %v\n", metric.Error()) + case Histogram: + h := metric.Snapshot() + ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) + fmt.Fprintf(w, "histogram %s\n", namedMetric.name) + fmt.Fprintf(w, " count: %9d\n", h.Count()) + fmt.Fprintf(w, " min: %9d\n", h.Min()) + fmt.Fprintf(w, " max: %9d\n", h.Max()) + fmt.Fprintf(w, " mean: %12.2f\n", h.Mean()) + fmt.Fprintf(w, " stddev: %12.2f\n", h.StdDev()) + fmt.Fprintf(w, " median: %12.2f\n", ps[0]) + fmt.Fprintf(w, " 75%%: %12.2f\n", ps[1]) + fmt.Fprintf(w, " 95%%: %12.2f\n", ps[2]) + fmt.Fprintf(w, " 99%%: %12.2f\n", ps[3]) + fmt.Fprintf(w, " 99.9%%: %12.2f\n", ps[4]) + case Meter: + m := metric.Snapshot() + fmt.Fprintf(w, "meter %s\n", namedMetric.name) + fmt.Fprintf(w, " count: %9d\n", m.Count()) + fmt.Fprintf(w, " 1-min rate: %12.2f\n", m.Rate1()) + fmt.Fprintf(w, " 5-min rate: %12.2f\n", m.Rate5()) + fmt.Fprintf(w, " 15-min rate: %12.2f\n", m.Rate15()) + fmt.Fprintf(w, " mean rate: %12.2f\n", m.RateMean()) + case Timer: + t := metric.Snapshot() + ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) + fmt.Fprintf(w, "timer %s\n", namedMetric.name) + fmt.Fprintf(w, " count: %9d\n", t.Count()) + fmt.Fprintf(w, " min: %9d\n", t.Min()) + fmt.Fprintf(w, " max: %9d\n", t.Max()) + fmt.Fprintf(w, " mean: %12.2f\n", t.Mean()) + fmt.Fprintf(w, " stddev: %12.2f\n", t.StdDev()) + fmt.Fprintf(w, " median: %12.2f\n", ps[0]) + fmt.Fprintf(w, " 75%%: %12.2f\n", ps[1]) + fmt.Fprintf(w, " 95%%: %12.2f\n", ps[2]) + fmt.Fprintf(w, " 99%%: %12.2f\n", ps[3]) + fmt.Fprintf(w, " 99.9%%: %12.2f\n", ps[4]) + fmt.Fprintf(w, " 1-min rate: %12.2f\n", t.Rate1()) + fmt.Fprintf(w, " 5-min rate: %12.2f\n", t.Rate5()) + fmt.Fprintf(w, " 15-min rate: %12.2f\n", t.Rate15()) + fmt.Fprintf(w, " mean rate: %12.2f\n", t.RateMean()) + } + } +} + +type namedMetric struct { + name string + m interface{} +} + +// namedMetricSlice is a slice of namedMetrics that implements sort.Interface. +type namedMetricSlice []namedMetric + +func (nms namedMetricSlice) Len() int { return len(nms) } + +func (nms namedMetricSlice) Swap(i, j int) { nms[i], nms[j] = nms[j], nms[i] } + +func (nms namedMetricSlice) Less(i, j int) bool { + return nms[i].name < nms[j].name +} diff --git a/vendor/github.com/softlayer/softlayer-go/LICENSE b/vendor/github.com/softlayer/softlayer-go/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/softlayer/softlayer-go/config/config.go b/vendor/github.com/softlayer/softlayer-go/config/config.go new file mode 100644 index 00000000000..36d5348e9d1 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/config/config.go @@ -0,0 +1,145 @@ +/** + * // This file is borrowed from https://github.com/vaughan0/go-ini/blob/master/ini.go + * // which is distributed under the MIT license (https://github.com/vaughan0/go-ini/blob/master/LICENSE). + * + * Copyright (c) 2013 Vaughan Newton + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and + * associated documentation files (the "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the + * following conditions: + * + * The above copyright notice and this permission notice shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT + * LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +// Package config provides functions for parsing INI configuration files. +package config + +import ( + "bufio" + "fmt" + "io" + "os" + "regexp" + "strings" +) + +var ( + sectionRegex = regexp.MustCompile(`^\[(.*)\]$`) + assignRegex = regexp.MustCompile(`^([^=]+)=(.*)$`) +) + +// ErrSyntax is returned when there is a syntax error in an INI file. +type ErrSyntax struct { + Line int + Source string // The contents of the erroneous line, without leading or trailing whitespace +} + +func (e ErrSyntax) Error() string { + return fmt.Sprintf("invalid INI syntax on line %d: %s", e.Line, e.Source) +} + +// A File represents a parsed INI file. +type File map[string]Section + +// A Section represents a single section of an INI file. +type Section map[string]string + +// Returns a named Section. A Section will be created if one does not already exist for the given name. +func (f File) Section(name string) Section { + section := f[name] + if section == nil { + section = make(Section) + f[name] = section + } + return section +} + +// Looks up a value for a key in a section and returns that value, along with a boolean result similar to a map lookup. +func (f File) Get(section, key string) (value string, ok bool) { + if s := f[section]; s != nil { + value, ok = s[key] + } + return +} + +// Loads INI data from a reader and stores the data in the File. +func (f File) Load(in io.Reader) error { + bufin, ok := in.(*bufio.Reader) + if !ok { + bufin = bufio.NewReader(in) + } + return parseFile(bufin, f) +} + +// Loads INI data from a named file and stores the data in the File. +func (f File) LoadFile(file string) (err error) { + in, err := os.Open(file) + if err != nil { + return + } + defer in.Close() + return f.Load(in) +} + +func parseFile(in *bufio.Reader, file File) (err error) { + section := "" + lineNum := 0 + for done := false; !done; { + var line string + if line, err = in.ReadString('\n'); err != nil { + if err == io.EOF { + done = true + } else { + return + } + } + lineNum++ + line = strings.TrimSpace(line) + if len(line) == 0 { + // Skip blank lines + continue + } + if line[0] == ';' || line[0] == '#' { + // Skip comments + continue + } + + if groups := assignRegex.FindStringSubmatch(line); groups != nil { + key, val := groups[1], groups[2] + key, val = strings.TrimSpace(key), strings.TrimSpace(val) + file.Section(section)[key] = val + } else if groups := sectionRegex.FindStringSubmatch(line); groups != nil { + name := strings.TrimSpace(groups[1]) + section = name + // Create the section if it does not exist + file.Section(section) + } else { + return ErrSyntax{Line: lineNum, Source: line} + } + + } + return nil +} + +// Loads and returns a File from a reader. +func Load(in io.Reader) (File, error) { + file := make(File) + err := file.Load(in) + return file, err +} + +// Loads and returns an INI File from a file on disk. +func LoadFile(filename string) (File, error) { + file := make(File) + err := file.LoadFile(filename) + return file, err +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/abuse.go b/vendor/github.com/softlayer/softlayer-go/datatypes/abuse.go new file mode 100644 index 00000000000..8cc82eb3254 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/abuse.go @@ -0,0 +1,32 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// no documentation yet +type Abuse_Lockdown_Resource struct { + Entity + + // no documentation yet + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // no documentation yet + InvoiceItem *Billing_Invoice_Item `json:"invoiceItem,omitempty" xmlrpc:"invoiceItem,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/account.go b/vendor/github.com/softlayer/softlayer-go/datatypes/account.go new file mode 100644 index 00000000000..f06fdc3972a --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/account.go @@ -0,0 +1,3020 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// The SoftLayer_Account data type contains general information relating to a single SoftLayer customer account. Personal information in this type such as names, addresses, and phone numbers are assigned to the account only and not to users belonging to the account. The SoftLayer_Account data type contains a number of relational properties that are used by the SoftLayer customer portal to quickly present a variety of account related services to it's users. +// +// SoftLayer customers are unable to change their company account information in the portal or the API. If you need to change this information please open a sales ticket in our customer portal and our account management staff will assist you. +type Account struct { + Entity + + // An email address that is responsible for abuse and legal inquiries on behalf of an account. For instance, new legal and abuse tickets are sent to this address. + AbuseEmail *string `json:"abuseEmail,omitempty" xmlrpc:"abuseEmail,omitempty"` + + // A count of email addresses that are responsible for abuse and legal inquiries on behalf of an account. For instance, new legal and abuse tickets are sent to these addresses. + AbuseEmailCount *uint `json:"abuseEmailCount,omitempty" xmlrpc:"abuseEmailCount,omitempty"` + + // Email addresses that are responsible for abuse and legal inquiries on behalf of an account. For instance, new legal and abuse tickets are sent to these addresses. + AbuseEmails []Account_AbuseEmail `json:"abuseEmails,omitempty" xmlrpc:"abuseEmails,omitempty"` + + // A count of the account contacts on an account. + AccountContactCount *uint `json:"accountContactCount,omitempty" xmlrpc:"accountContactCount,omitempty"` + + // The account contacts on an account. + AccountContacts []Account_Contact `json:"accountContacts,omitempty" xmlrpc:"accountContacts,omitempty"` + + // A count of the account software licenses owned by an account + AccountLicenseCount *uint `json:"accountLicenseCount,omitempty" xmlrpc:"accountLicenseCount,omitempty"` + + // The account software licenses owned by an account + AccountLicenses []Software_AccountLicense `json:"accountLicenses,omitempty" xmlrpc:"accountLicenses,omitempty"` + + // A count of + AccountLinkCount *uint `json:"accountLinkCount,omitempty" xmlrpc:"accountLinkCount,omitempty"` + + // no documentation yet + AccountLinks []Account_Link `json:"accountLinks,omitempty" xmlrpc:"accountLinks,omitempty"` + + // A flag indicating that the account has a managed resource. + AccountManagedResourcesFlag *bool `json:"accountManagedResourcesFlag,omitempty" xmlrpc:"accountManagedResourcesFlag,omitempty"` + + // An account's status presented in a more detailed data type. + AccountStatus *Account_Status `json:"accountStatus,omitempty" xmlrpc:"accountStatus,omitempty"` + + // A number reflecting the state of an account. + AccountStatusId *int `json:"accountStatusId,omitempty" xmlrpc:"accountStatusId,omitempty"` + + // The billing item associated with an account's monthly discount. + ActiveAccountDiscountBillingItem *Billing_Item `json:"activeAccountDiscountBillingItem,omitempty" xmlrpc:"activeAccountDiscountBillingItem,omitempty"` + + // A count of the active account software licenses owned by an account + ActiveAccountLicenseCount *uint `json:"activeAccountLicenseCount,omitempty" xmlrpc:"activeAccountLicenseCount,omitempty"` + + // The active account software licenses owned by an account + ActiveAccountLicenses []Software_AccountLicense `json:"activeAccountLicenses,omitempty" xmlrpc:"activeAccountLicenses,omitempty"` + + // A count of the active address(es) that belong to an account. + ActiveAddressCount *uint `json:"activeAddressCount,omitempty" xmlrpc:"activeAddressCount,omitempty"` + + // The active address(es) that belong to an account. + ActiveAddresses []Account_Address `json:"activeAddresses,omitempty" xmlrpc:"activeAddresses,omitempty"` + + // A count of all active agreements for an account + ActiveAgreementCount *uint `json:"activeAgreementCount,omitempty" xmlrpc:"activeAgreementCount,omitempty"` + + // All active agreements for an account + ActiveAgreements []Account_Agreement `json:"activeAgreements,omitempty" xmlrpc:"activeAgreements,omitempty"` + + // A count of all billing agreements for an account + ActiveBillingAgreementCount *uint `json:"activeBillingAgreementCount,omitempty" xmlrpc:"activeBillingAgreementCount,omitempty"` + + // All billing agreements for an account + ActiveBillingAgreements []Account_Agreement `json:"activeBillingAgreements,omitempty" xmlrpc:"activeBillingAgreements,omitempty"` + + // no documentation yet + ActiveCatalystEnrollment *Catalyst_Enrollment `json:"activeCatalystEnrollment,omitempty" xmlrpc:"activeCatalystEnrollment,omitempty"` + + // A count of the account's active top level colocation containers. + ActiveColocationContainerCount *uint `json:"activeColocationContainerCount,omitempty" xmlrpc:"activeColocationContainerCount,omitempty"` + + // The account's active top level colocation containers. + ActiveColocationContainers []Billing_Item `json:"activeColocationContainers,omitempty" xmlrpc:"activeColocationContainers,omitempty"` + + // [Deprecated] Please use SoftLayer_Account::activeFlexibleCreditEnrollments. + ActiveFlexibleCreditEnrollment *FlexibleCredit_Enrollment `json:"activeFlexibleCreditEnrollment,omitempty" xmlrpc:"activeFlexibleCreditEnrollment,omitempty"` + + // A count of + ActiveFlexibleCreditEnrollmentCount *uint `json:"activeFlexibleCreditEnrollmentCount,omitempty" xmlrpc:"activeFlexibleCreditEnrollmentCount,omitempty"` + + // no documentation yet + ActiveFlexibleCreditEnrollments []FlexibleCredit_Enrollment `json:"activeFlexibleCreditEnrollments,omitempty" xmlrpc:"activeFlexibleCreditEnrollments,omitempty"` + + // A count of + ActiveNotificationSubscriberCount *uint `json:"activeNotificationSubscriberCount,omitempty" xmlrpc:"activeNotificationSubscriberCount,omitempty"` + + // no documentation yet + ActiveNotificationSubscribers []Notification_Subscriber `json:"activeNotificationSubscribers,omitempty" xmlrpc:"activeNotificationSubscribers,omitempty"` + + // A count of an account's non-expired quotes. + ActiveQuoteCount *uint `json:"activeQuoteCount,omitempty" xmlrpc:"activeQuoteCount,omitempty"` + + // An account's non-expired quotes. + ActiveQuotes []Billing_Order_Quote `json:"activeQuotes,omitempty" xmlrpc:"activeQuotes,omitempty"` + + // A count of active reserved capacity agreements for an account + ActiveReservedCapacityAgreementCount *uint `json:"activeReservedCapacityAgreementCount,omitempty" xmlrpc:"activeReservedCapacityAgreementCount,omitempty"` + + // Active reserved capacity agreements for an account + ActiveReservedCapacityAgreements []Account_Agreement `json:"activeReservedCapacityAgreements,omitempty" xmlrpc:"activeReservedCapacityAgreements,omitempty"` + + // A count of the virtual software licenses controlled by an account + ActiveVirtualLicenseCount *uint `json:"activeVirtualLicenseCount,omitempty" xmlrpc:"activeVirtualLicenseCount,omitempty"` + + // The virtual software licenses controlled by an account + ActiveVirtualLicenses []Software_VirtualLicense `json:"activeVirtualLicenses,omitempty" xmlrpc:"activeVirtualLicenses,omitempty"` + + // A count of an account's associated load balancers. + AdcLoadBalancerCount *uint `json:"adcLoadBalancerCount,omitempty" xmlrpc:"adcLoadBalancerCount,omitempty"` + + // An account's associated load balancers. + AdcLoadBalancers []Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress `json:"adcLoadBalancers,omitempty" xmlrpc:"adcLoadBalancers,omitempty"` + + // The first line of the mailing address belonging to an account. + Address1 *string `json:"address1,omitempty" xmlrpc:"address1,omitempty"` + + // The second line of the mailing address belonging to an account. + Address2 *string `json:"address2,omitempty" xmlrpc:"address2,omitempty"` + + // A count of all the address(es) that belong to an account. + AddressCount *uint `json:"addressCount,omitempty" xmlrpc:"addressCount,omitempty"` + + // All the address(es) that belong to an account. + Addresses []Account_Address `json:"addresses,omitempty" xmlrpc:"addresses,omitempty"` + + // An affiliate identifier associated with the customer account. + AffiliateId *string `json:"affiliateId,omitempty" xmlrpc:"affiliateId,omitempty"` + + // The billing items that will be on an account's next invoice. + AllBillingItems []Billing_Item `json:"allBillingItems,omitempty" xmlrpc:"allBillingItems,omitempty"` + + // A count of the billing items that will be on an account's next invoice. + AllCommissionBillingItemCount *uint `json:"allCommissionBillingItemCount,omitempty" xmlrpc:"allCommissionBillingItemCount,omitempty"` + + // The billing items that will be on an account's next invoice. + AllCommissionBillingItems []Billing_Item `json:"allCommissionBillingItems,omitempty" xmlrpc:"allCommissionBillingItems,omitempty"` + + // A count of the billing items that will be on an account's next invoice. + AllRecurringTopLevelBillingItemCount *uint `json:"allRecurringTopLevelBillingItemCount,omitempty" xmlrpc:"allRecurringTopLevelBillingItemCount,omitempty"` + + // The billing items that will be on an account's next invoice. + AllRecurringTopLevelBillingItems []Billing_Item `json:"allRecurringTopLevelBillingItems,omitempty" xmlrpc:"allRecurringTopLevelBillingItems,omitempty"` + + // The billing items that will be on an account's next invoice. Does not consider associated items. + AllRecurringTopLevelBillingItemsUnfiltered []Billing_Item `json:"allRecurringTopLevelBillingItemsUnfiltered,omitempty" xmlrpc:"allRecurringTopLevelBillingItemsUnfiltered,omitempty"` + + // A count of the billing items that will be on an account's next invoice. Does not consider associated items. + AllRecurringTopLevelBillingItemsUnfilteredCount *uint `json:"allRecurringTopLevelBillingItemsUnfilteredCount,omitempty" xmlrpc:"allRecurringTopLevelBillingItemsUnfilteredCount,omitempty"` + + // A count of the billing items that will be on an account's next invoice. + AllSubnetBillingItemCount *uint `json:"allSubnetBillingItemCount,omitempty" xmlrpc:"allSubnetBillingItemCount,omitempty"` + + // The billing items that will be on an account's next invoice. + AllSubnetBillingItems []Billing_Item `json:"allSubnetBillingItems,omitempty" xmlrpc:"allSubnetBillingItems,omitempty"` + + // A count of all billing items of an account. + AllTopLevelBillingItemCount *uint `json:"allTopLevelBillingItemCount,omitempty" xmlrpc:"allTopLevelBillingItemCount,omitempty"` + + // All billing items of an account. + AllTopLevelBillingItems []Billing_Item `json:"allTopLevelBillingItems,omitempty" xmlrpc:"allTopLevelBillingItems,omitempty"` + + // The billing items that will be on an account's next invoice. Does not consider associated items. + AllTopLevelBillingItemsUnfiltered []Billing_Item `json:"allTopLevelBillingItemsUnfiltered,omitempty" xmlrpc:"allTopLevelBillingItemsUnfiltered,omitempty"` + + // A count of the billing items that will be on an account's next invoice. Does not consider associated items. + AllTopLevelBillingItemsUnfilteredCount *uint `json:"allTopLevelBillingItemsUnfilteredCount,omitempty" xmlrpc:"allTopLevelBillingItemsUnfilteredCount,omitempty"` + + // Indicates whether this account is allowed to silently migrate to use IBMid Authentication. + AllowIbmIdSilentMigrationFlag *bool `json:"allowIbmIdSilentMigrationFlag,omitempty" xmlrpc:"allowIbmIdSilentMigrationFlag,omitempty"` + + // The number of PPTP VPN users allowed on an account. + AllowedPptpVpnQuantity *int `json:"allowedPptpVpnQuantity,omitempty" xmlrpc:"allowedPptpVpnQuantity,omitempty"` + + // Flag indicating if this account can be linked with Bluemix. + AllowsBluemixAccountLinkingFlag *bool `json:"allowsBluemixAccountLinkingFlag,omitempty" xmlrpc:"allowsBluemixAccountLinkingFlag,omitempty"` + + // A secondary phone number assigned to an account. + AlternatePhone *string `json:"alternatePhone,omitempty" xmlrpc:"alternatePhone,omitempty"` + + // A count of an account's associated application delivery controller records. + ApplicationDeliveryControllerCount *uint `json:"applicationDeliveryControllerCount,omitempty" xmlrpc:"applicationDeliveryControllerCount,omitempty"` + + // An account's associated application delivery controller records. + ApplicationDeliveryControllers []Network_Application_Delivery_Controller `json:"applicationDeliveryControllers,omitempty" xmlrpc:"applicationDeliveryControllers,omitempty"` + + // A count of the account attribute values for a SoftLayer customer account. + AttributeCount *uint `json:"attributeCount,omitempty" xmlrpc:"attributeCount,omitempty"` + + // The account attribute values for a SoftLayer customer account. + Attributes []Account_Attribute `json:"attributes,omitempty" xmlrpc:"attributes,omitempty"` + + // A count of the public network VLANs assigned to an account. + AvailablePublicNetworkVlanCount *uint `json:"availablePublicNetworkVlanCount,omitempty" xmlrpc:"availablePublicNetworkVlanCount,omitempty"` + + // The public network VLANs assigned to an account. + AvailablePublicNetworkVlans []Network_Vlan `json:"availablePublicNetworkVlans,omitempty" xmlrpc:"availablePublicNetworkVlans,omitempty"` + + // The account balance of a SoftLayer customer account. An account's balance is the amount of money owed to SoftLayer by the account holder, returned as a floating point number with two decimal places, measured in US Dollars ($USD). A negative account balance means the account holder has overpaid and is owed money by SoftLayer. + Balance *Float64 `json:"balance,omitempty" xmlrpc:"balance,omitempty"` + + // A count of the bandwidth allotments for an account. + BandwidthAllotmentCount *uint `json:"bandwidthAllotmentCount,omitempty" xmlrpc:"bandwidthAllotmentCount,omitempty"` + + // The bandwidth allotments for an account. + BandwidthAllotments []Network_Bandwidth_Version1_Allotment `json:"bandwidthAllotments,omitempty" xmlrpc:"bandwidthAllotments,omitempty"` + + // The bandwidth allotments for an account currently over allocation. + BandwidthAllotmentsOverAllocation []Network_Bandwidth_Version1_Allotment `json:"bandwidthAllotmentsOverAllocation,omitempty" xmlrpc:"bandwidthAllotmentsOverAllocation,omitempty"` + + // A count of the bandwidth allotments for an account currently over allocation. + BandwidthAllotmentsOverAllocationCount *uint `json:"bandwidthAllotmentsOverAllocationCount,omitempty" xmlrpc:"bandwidthAllotmentsOverAllocationCount,omitempty"` + + // The bandwidth allotments for an account projected to go over allocation. + BandwidthAllotmentsProjectedOverAllocation []Network_Bandwidth_Version1_Allotment `json:"bandwidthAllotmentsProjectedOverAllocation,omitempty" xmlrpc:"bandwidthAllotmentsProjectedOverAllocation,omitempty"` + + // A count of the bandwidth allotments for an account projected to go over allocation. + BandwidthAllotmentsProjectedOverAllocationCount *uint `json:"bandwidthAllotmentsProjectedOverAllocationCount,omitempty" xmlrpc:"bandwidthAllotmentsProjectedOverAllocationCount,omitempty"` + + // A count of an account's associated bare metal server objects. + BareMetalInstanceCount *uint `json:"bareMetalInstanceCount,omitempty" xmlrpc:"bareMetalInstanceCount,omitempty"` + + // An account's associated bare metal server objects. + BareMetalInstances []Hardware `json:"bareMetalInstances,omitempty" xmlrpc:"bareMetalInstances,omitempty"` + + // A count of all billing agreements for an account + BillingAgreementCount *uint `json:"billingAgreementCount,omitempty" xmlrpc:"billingAgreementCount,omitempty"` + + // All billing agreements for an account + BillingAgreements []Account_Agreement `json:"billingAgreements,omitempty" xmlrpc:"billingAgreements,omitempty"` + + // An account's billing information. + BillingInfo *Billing_Info `json:"billingInfo,omitempty" xmlrpc:"billingInfo,omitempty"` + + // A count of private template group objects (parent and children) and the shared template group objects (parent only) for an account. + BlockDeviceTemplateGroupCount *uint `json:"blockDeviceTemplateGroupCount,omitempty" xmlrpc:"blockDeviceTemplateGroupCount,omitempty"` + + // Private template group objects (parent and children) and the shared template group objects (parent only) for an account. + BlockDeviceTemplateGroups []Virtual_Guest_Block_Device_Template_Group `json:"blockDeviceTemplateGroups,omitempty" xmlrpc:"blockDeviceTemplateGroups,omitempty"` + + // The Bluemix account link associated with this SoftLayer account, if one exists. + BluemixAccountLink *Account_Link_Bluemix `json:"bluemixAccountLink,omitempty" xmlrpc:"bluemixAccountLink,omitempty"` + + // Returns true if this account is linked to IBM Bluemix, false if not. + BluemixLinkedFlag *bool `json:"bluemixLinkedFlag,omitempty" xmlrpc:"bluemixLinkedFlag,omitempty"` + + // no documentation yet + Brand *Brand `json:"brand,omitempty" xmlrpc:"brand,omitempty"` + + // no documentation yet + BrandAccountFlag *bool `json:"brandAccountFlag,omitempty" xmlrpc:"brandAccountFlag,omitempty"` + + // The Brand tied to an account. + BrandId *int `json:"brandId,omitempty" xmlrpc:"brandId,omitempty"` + + // The brand keyName. + BrandKeyName *string `json:"brandKeyName,omitempty" xmlrpc:"brandKeyName,omitempty"` + + // The Business Partner details for the account. Country Enterprise Code, Channel, Segment, Reseller Level. + BusinessPartner *Account_Business_Partner `json:"businessPartner,omitempty" xmlrpc:"businessPartner,omitempty"` + + // [DEPRECATED] All accounts may order VLANs. + CanOrderAdditionalVlansFlag *bool `json:"canOrderAdditionalVlansFlag,omitempty" xmlrpc:"canOrderAdditionalVlansFlag,omitempty"` + + // A count of an account's active carts. + CartCount *uint `json:"cartCount,omitempty" xmlrpc:"cartCount,omitempty"` + + // An account's active carts. + Carts []Billing_Order_Quote `json:"carts,omitempty" xmlrpc:"carts,omitempty"` + + // A count of + CatalystEnrollmentCount *uint `json:"catalystEnrollmentCount,omitempty" xmlrpc:"catalystEnrollmentCount,omitempty"` + + // no documentation yet + CatalystEnrollments []Catalyst_Enrollment `json:"catalystEnrollments,omitempty" xmlrpc:"catalystEnrollments,omitempty"` + + // The city of the mailing address belonging to an account. + City *string `json:"city,omitempty" xmlrpc:"city,omitempty"` + + // Whether an account is exempt from taxes on their invoices. + ClaimedTaxExemptTxFlag *bool `json:"claimedTaxExemptTxFlag,omitempty" xmlrpc:"claimedTaxExemptTxFlag,omitempty"` + + // A count of all closed tickets associated with an account. + ClosedTicketCount *uint `json:"closedTicketCount,omitempty" xmlrpc:"closedTicketCount,omitempty"` + + // All closed tickets associated with an account. + ClosedTickets []Ticket `json:"closedTickets,omitempty" xmlrpc:"closedTickets,omitempty"` + + // The company name associated with an account. + CompanyName *string `json:"companyName,omitempty" xmlrpc:"companyName,omitempty"` + + // A two-letter abbreviation of the country in the mailing address belonging to an account. + Country *string `json:"country,omitempty" xmlrpc:"country,omitempty"` + + // The date an account was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A count of datacenters which contain subnets that the account has access to route. + DatacentersWithSubnetAllocationCount *uint `json:"datacentersWithSubnetAllocationCount,omitempty" xmlrpc:"datacentersWithSubnetAllocationCount,omitempty"` + + // Datacenters which contain subnets that the account has access to route. + DatacentersWithSubnetAllocations []Location `json:"datacentersWithSubnetAllocations,omitempty" xmlrpc:"datacentersWithSubnetAllocations,omitempty"` + + // A count of an account's associated virtual dedicated host objects. + DedicatedHostCount *uint `json:"dedicatedHostCount,omitempty" xmlrpc:"dedicatedHostCount,omitempty"` + + // An account's associated virtual dedicated host objects. + DedicatedHosts []Virtual_DedicatedHost `json:"dedicatedHosts,omitempty" xmlrpc:"dedicatedHosts,omitempty"` + + // Device Fingerprint Identifier - Used internally and can safely be ignored. + DeviceFingerprintId *string `json:"deviceFingerprintId,omitempty" xmlrpc:"deviceFingerprintId,omitempty"` + + // A flag indicating whether payments are processed for this account. + DisablePaymentProcessingFlag *bool `json:"disablePaymentProcessingFlag,omitempty" xmlrpc:"disablePaymentProcessingFlag,omitempty"` + + // A count of the SoftLayer employees that an account is assigned to. + DisplaySupportRepresentativeAssignmentCount *uint `json:"displaySupportRepresentativeAssignmentCount,omitempty" xmlrpc:"displaySupportRepresentativeAssignmentCount,omitempty"` + + // The SoftLayer employees that an account is assigned to. + DisplaySupportRepresentativeAssignments []Account_Attachment_Employee `json:"displaySupportRepresentativeAssignments,omitempty" xmlrpc:"displaySupportRepresentativeAssignments,omitempty"` + + // A count of the DNS domains associated with an account. + DomainCount *uint `json:"domainCount,omitempty" xmlrpc:"domainCount,omitempty"` + + // A count of + DomainRegistrationCount *uint `json:"domainRegistrationCount,omitempty" xmlrpc:"domainRegistrationCount,omitempty"` + + // no documentation yet + DomainRegistrations []Dns_Domain_Registration `json:"domainRegistrations,omitempty" xmlrpc:"domainRegistrations,omitempty"` + + // The DNS domains associated with an account. + Domains []Dns_Domain `json:"domains,omitempty" xmlrpc:"domains,omitempty"` + + // A count of the DNS domains associated with an account that were not created as a result of a secondary DNS zone transfer. + DomainsWithoutSecondaryDnsRecordCount *uint `json:"domainsWithoutSecondaryDnsRecordCount,omitempty" xmlrpc:"domainsWithoutSecondaryDnsRecordCount,omitempty"` + + // The DNS domains associated with an account that were not created as a result of a secondary DNS zone transfer. + DomainsWithoutSecondaryDnsRecords []Dns_Domain `json:"domainsWithoutSecondaryDnsRecords,omitempty" xmlrpc:"domainsWithoutSecondaryDnsRecords,omitempty"` + + // A general email address assigned to an account. + Email *string `json:"email,omitempty" xmlrpc:"email,omitempty"` + + // Boolean flag dictating whether or not this account has the EU Supported flag. This flag indicates that this account uses IBM Cloud services to process EU citizen's personal data. + EuSupportedFlag *bool `json:"euSupportedFlag,omitempty" xmlrpc:"euSupportedFlag,omitempty"` + + // The total capacity of Legacy EVault Volumes on an account, in GB. + EvaultCapacityGB *uint `json:"evaultCapacityGB,omitempty" xmlrpc:"evaultCapacityGB,omitempty"` + + // A count of an account's master EVault user. This is only used when an account has EVault service. + EvaultMasterUserCount *uint `json:"evaultMasterUserCount,omitempty" xmlrpc:"evaultMasterUserCount,omitempty"` + + // An account's master EVault user. This is only used when an account has EVault service. + EvaultMasterUsers []Account_Password `json:"evaultMasterUsers,omitempty" xmlrpc:"evaultMasterUsers,omitempty"` + + // An account's associated EVault storage volumes. + EvaultNetworkStorage []Network_Storage `json:"evaultNetworkStorage,omitempty" xmlrpc:"evaultNetworkStorage,omitempty"` + + // A count of an account's associated EVault storage volumes. + EvaultNetworkStorageCount *uint `json:"evaultNetworkStorageCount,omitempty" xmlrpc:"evaultNetworkStorageCount,omitempty"` + + // A count of stored security certificates that are expired (ie. SSL) + ExpiredSecurityCertificateCount *uint `json:"expiredSecurityCertificateCount,omitempty" xmlrpc:"expiredSecurityCertificateCount,omitempty"` + + // Stored security certificates that are expired (ie. SSL) + ExpiredSecurityCertificates []Security_Certificate `json:"expiredSecurityCertificates,omitempty" xmlrpc:"expiredSecurityCertificates,omitempty"` + + // A count of logs of who entered a colocation area which is assigned to this account, or when a user under this account enters a datacenter. + FacilityLogCount *uint `json:"facilityLogCount,omitempty" xmlrpc:"facilityLogCount,omitempty"` + + // Logs of who entered a colocation area which is assigned to this account, or when a user under this account enters a datacenter. + FacilityLogs []User_Access_Facility_Log `json:"facilityLogs,omitempty" xmlrpc:"facilityLogs,omitempty"` + + // A fax phone number assigned to an account. + FaxPhone *string `json:"faxPhone,omitempty" xmlrpc:"faxPhone,omitempty"` + + // no documentation yet + FileBlockBetaAccessFlag *bool `json:"fileBlockBetaAccessFlag,omitempty" xmlrpc:"fileBlockBetaAccessFlag,omitempty"` + + // Each customer account is listed under a single individual. This is that individual's first name. + FirstName *string `json:"firstName,omitempty" xmlrpc:"firstName,omitempty"` + + // A count of all of the account's current and former Flexible Credit enrollments. + FlexibleCreditEnrollmentCount *uint `json:"flexibleCreditEnrollmentCount,omitempty" xmlrpc:"flexibleCreditEnrollmentCount,omitempty"` + + // All of the account's current and former Flexible Credit enrollments. + FlexibleCreditEnrollments []FlexibleCredit_Enrollment `json:"flexibleCreditEnrollments,omitempty" xmlrpc:"flexibleCreditEnrollments,omitempty"` + + // Timestamp representing the point in time when an account is required to link with PaaS. + ForcePaasAccountLinkDate *string `json:"forcePaasAccountLinkDate,omitempty" xmlrpc:"forcePaasAccountLinkDate,omitempty"` + + // A count of + GlobalIpRecordCount *uint `json:"globalIpRecordCount,omitempty" xmlrpc:"globalIpRecordCount,omitempty"` + + // no documentation yet + GlobalIpRecords []Network_Subnet_IpAddress_Global `json:"globalIpRecords,omitempty" xmlrpc:"globalIpRecords,omitempty"` + + // A count of + GlobalIpv4RecordCount *uint `json:"globalIpv4RecordCount,omitempty" xmlrpc:"globalIpv4RecordCount,omitempty"` + + // no documentation yet + GlobalIpv4Records []Network_Subnet_IpAddress_Global `json:"globalIpv4Records,omitempty" xmlrpc:"globalIpv4Records,omitempty"` + + // A count of + GlobalIpv6RecordCount *uint `json:"globalIpv6RecordCount,omitempty" xmlrpc:"globalIpv6RecordCount,omitempty"` + + // no documentation yet + GlobalIpv6Records []Network_Subnet_IpAddress_Global `json:"globalIpv6Records,omitempty" xmlrpc:"globalIpv6Records,omitempty"` + + // A count of [Deprecated] The global load balancer accounts for a softlayer customer account. + GlobalLoadBalancerAccountCount *uint `json:"globalLoadBalancerAccountCount,omitempty" xmlrpc:"globalLoadBalancerAccountCount,omitempty"` + + // [Deprecated] The global load balancer accounts for a softlayer customer account. + GlobalLoadBalancerAccounts []Network_LoadBalancer_Global_Account `json:"globalLoadBalancerAccounts,omitempty" xmlrpc:"globalLoadBalancerAccounts,omitempty"` + + // An account's associated hardware objects. + Hardware []Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // A count of an account's associated hardware objects. + HardwareCount *uint `json:"hardwareCount,omitempty" xmlrpc:"hardwareCount,omitempty"` + + // An account's associated hardware objects currently over bandwidth allocation. + HardwareOverBandwidthAllocation []Hardware `json:"hardwareOverBandwidthAllocation,omitempty" xmlrpc:"hardwareOverBandwidthAllocation,omitempty"` + + // A count of an account's associated hardware objects currently over bandwidth allocation. + HardwareOverBandwidthAllocationCount *uint `json:"hardwareOverBandwidthAllocationCount,omitempty" xmlrpc:"hardwareOverBandwidthAllocationCount,omitempty"` + + // An account's associated hardware objects projected to go over bandwidth allocation. + HardwareProjectedOverBandwidthAllocation []Hardware `json:"hardwareProjectedOverBandwidthAllocation,omitempty" xmlrpc:"hardwareProjectedOverBandwidthAllocation,omitempty"` + + // A count of an account's associated hardware objects projected to go over bandwidth allocation. + HardwareProjectedOverBandwidthAllocationCount *uint `json:"hardwareProjectedOverBandwidthAllocationCount,omitempty" xmlrpc:"hardwareProjectedOverBandwidthAllocationCount,omitempty"` + + // All hardware associated with an account that has the cPanel web hosting control panel installed. + HardwareWithCpanel []Hardware `json:"hardwareWithCpanel,omitempty" xmlrpc:"hardwareWithCpanel,omitempty"` + + // A count of all hardware associated with an account that has the cPanel web hosting control panel installed. + HardwareWithCpanelCount *uint `json:"hardwareWithCpanelCount,omitempty" xmlrpc:"hardwareWithCpanelCount,omitempty"` + + // All hardware associated with an account that has the Helm web hosting control panel installed. + HardwareWithHelm []Hardware `json:"hardwareWithHelm,omitempty" xmlrpc:"hardwareWithHelm,omitempty"` + + // A count of all hardware associated with an account that has the Helm web hosting control panel installed. + HardwareWithHelmCount *uint `json:"hardwareWithHelmCount,omitempty" xmlrpc:"hardwareWithHelmCount,omitempty"` + + // All hardware associated with an account that has McAfee Secure software components. + HardwareWithMcafee []Hardware `json:"hardwareWithMcafee,omitempty" xmlrpc:"hardwareWithMcafee,omitempty"` + + // All hardware associated with an account that has McAfee Secure AntiVirus for Redhat software components. + HardwareWithMcafeeAntivirusRedhat []Hardware `json:"hardwareWithMcafeeAntivirusRedhat,omitempty" xmlrpc:"hardwareWithMcafeeAntivirusRedhat,omitempty"` + + // A count of all hardware associated with an account that has McAfee Secure AntiVirus for Redhat software components. + HardwareWithMcafeeAntivirusRedhatCount *uint `json:"hardwareWithMcafeeAntivirusRedhatCount,omitempty" xmlrpc:"hardwareWithMcafeeAntivirusRedhatCount,omitempty"` + + // A count of all hardware associated with an account that has McAfee Secure AntiVirus for Windows software components. + HardwareWithMcafeeAntivirusWindowCount *uint `json:"hardwareWithMcafeeAntivirusWindowCount,omitempty" xmlrpc:"hardwareWithMcafeeAntivirusWindowCount,omitempty"` + + // All hardware associated with an account that has McAfee Secure AntiVirus for Windows software components. + HardwareWithMcafeeAntivirusWindows []Hardware `json:"hardwareWithMcafeeAntivirusWindows,omitempty" xmlrpc:"hardwareWithMcafeeAntivirusWindows,omitempty"` + + // A count of all hardware associated with an account that has McAfee Secure software components. + HardwareWithMcafeeCount *uint `json:"hardwareWithMcafeeCount,omitempty" xmlrpc:"hardwareWithMcafeeCount,omitempty"` + + // All hardware associated with an account that has McAfee Secure Intrusion Detection System software components. + HardwareWithMcafeeIntrusionDetectionSystem []Hardware `json:"hardwareWithMcafeeIntrusionDetectionSystem,omitempty" xmlrpc:"hardwareWithMcafeeIntrusionDetectionSystem,omitempty"` + + // A count of all hardware associated with an account that has McAfee Secure Intrusion Detection System software components. + HardwareWithMcafeeIntrusionDetectionSystemCount *uint `json:"hardwareWithMcafeeIntrusionDetectionSystemCount,omitempty" xmlrpc:"hardwareWithMcafeeIntrusionDetectionSystemCount,omitempty"` + + // All hardware associated with an account that has the Plesk web hosting control panel installed. + HardwareWithPlesk []Hardware `json:"hardwareWithPlesk,omitempty" xmlrpc:"hardwareWithPlesk,omitempty"` + + // A count of all hardware associated with an account that has the Plesk web hosting control panel installed. + HardwareWithPleskCount *uint `json:"hardwareWithPleskCount,omitempty" xmlrpc:"hardwareWithPleskCount,omitempty"` + + // All hardware associated with an account that has the QuantaStor storage system installed. + HardwareWithQuantastor []Hardware `json:"hardwareWithQuantastor,omitempty" xmlrpc:"hardwareWithQuantastor,omitempty"` + + // A count of all hardware associated with an account that has the QuantaStor storage system installed. + HardwareWithQuantastorCount *uint `json:"hardwareWithQuantastorCount,omitempty" xmlrpc:"hardwareWithQuantastorCount,omitempty"` + + // All hardware associated with an account that has the Urchin web traffic analytics package installed. + HardwareWithUrchin []Hardware `json:"hardwareWithUrchin,omitempty" xmlrpc:"hardwareWithUrchin,omitempty"` + + // A count of all hardware associated with an account that has the Urchin web traffic analytics package installed. + HardwareWithUrchinCount *uint `json:"hardwareWithUrchinCount,omitempty" xmlrpc:"hardwareWithUrchinCount,omitempty"` + + // A count of all hardware associated with an account that is running a version of the Microsoft Windows operating system. + HardwareWithWindowCount *uint `json:"hardwareWithWindowCount,omitempty" xmlrpc:"hardwareWithWindowCount,omitempty"` + + // All hardware associated with an account that is running a version of the Microsoft Windows operating system. + HardwareWithWindows []Hardware `json:"hardwareWithWindows,omitempty" xmlrpc:"hardwareWithWindows,omitempty"` + + // Return 1 if one of the account's hardware has the EVault Bare Metal Server Restore Plugin otherwise 0. + HasEvaultBareMetalRestorePluginFlag *bool `json:"hasEvaultBareMetalRestorePluginFlag,omitempty" xmlrpc:"hasEvaultBareMetalRestorePluginFlag,omitempty"` + + // Return 1 if one of the account's hardware has an installation of Idera Server Backup otherwise 0. + HasIderaBareMetalRestorePluginFlag *bool `json:"hasIderaBareMetalRestorePluginFlag,omitempty" xmlrpc:"hasIderaBareMetalRestorePluginFlag,omitempty"` + + // The number of orders in a PENDING status for a SoftLayer customer account. + HasPendingOrder *uint `json:"hasPendingOrder,omitempty" xmlrpc:"hasPendingOrder,omitempty"` + + // Return 1 if one of the account's hardware has an installation of R1Soft CDP otherwise 0. + HasR1softBareMetalRestorePluginFlag *bool `json:"hasR1softBareMetalRestorePluginFlag,omitempty" xmlrpc:"hasR1softBareMetalRestorePluginFlag,omitempty"` + + // A count of an account's associated hourly bare metal server objects. + HourlyBareMetalInstanceCount *uint `json:"hourlyBareMetalInstanceCount,omitempty" xmlrpc:"hourlyBareMetalInstanceCount,omitempty"` + + // An account's associated hourly bare metal server objects. + HourlyBareMetalInstances []Hardware `json:"hourlyBareMetalInstances,omitempty" xmlrpc:"hourlyBareMetalInstances,omitempty"` + + // A count of hourly service billing items that will be on an account's next invoice. + HourlyServiceBillingItemCount *uint `json:"hourlyServiceBillingItemCount,omitempty" xmlrpc:"hourlyServiceBillingItemCount,omitempty"` + + // Hourly service billing items that will be on an account's next invoice. + HourlyServiceBillingItems []Billing_Item `json:"hourlyServiceBillingItems,omitempty" xmlrpc:"hourlyServiceBillingItems,omitempty"` + + // A count of an account's associated hourly virtual guest objects. + HourlyVirtualGuestCount *uint `json:"hourlyVirtualGuestCount,omitempty" xmlrpc:"hourlyVirtualGuestCount,omitempty"` + + // An account's associated hourly virtual guest objects. + HourlyVirtualGuests []Virtual_Guest `json:"hourlyVirtualGuests,omitempty" xmlrpc:"hourlyVirtualGuests,omitempty"` + + // An account's associated Virtual Storage volumes. + HubNetworkStorage []Network_Storage `json:"hubNetworkStorage,omitempty" xmlrpc:"hubNetworkStorage,omitempty"` + + // A count of an account's associated Virtual Storage volumes. + HubNetworkStorageCount *uint `json:"hubNetworkStorageCount,omitempty" xmlrpc:"hubNetworkStorageCount,omitempty"` + + // Unique identifier for a customer used throughout IBM. + IbmCustomerNumber *string `json:"ibmCustomerNumber,omitempty" xmlrpc:"ibmCustomerNumber,omitempty"` + + // Indicates whether this account requires IBMid authentication. + IbmIdAuthenticationRequiredFlag *bool `json:"ibmIdAuthenticationRequiredFlag,omitempty" xmlrpc:"ibmIdAuthenticationRequiredFlag,omitempty"` + + // This key is deprecated and should not be used. + IbmIdMigrationExpirationTimestamp *string `json:"ibmIdMigrationExpirationTimestamp,omitempty" xmlrpc:"ibmIdMigrationExpirationTimestamp,omitempty"` + + // A customer account's internal identifier. Account numbers are typically preceded by the string "SL" in the customer portal. Every SoftLayer account has at least one portal user whose username follows the "SL" + account number naming scheme. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // An in progress request to switch billing systems. + InProgressExternalAccountSetup *Account_External_Setup `json:"inProgressExternalAccountSetup,omitempty" xmlrpc:"inProgressExternalAccountSetup,omitempty"` + + // A count of + InternalNoteCount *uint `json:"internalNoteCount,omitempty" xmlrpc:"internalNoteCount,omitempty"` + + // no documentation yet + InternalNotes []Account_Note `json:"internalNotes,omitempty" xmlrpc:"internalNotes,omitempty"` + + // A count of an account's associated billing invoices. + InvoiceCount *uint `json:"invoiceCount,omitempty" xmlrpc:"invoiceCount,omitempty"` + + // An account's associated billing invoices. + Invoices []Billing_Invoice `json:"invoices,omitempty" xmlrpc:"invoices,omitempty"` + + // A count of + IpAddressCount *uint `json:"ipAddressCount,omitempty" xmlrpc:"ipAddressCount,omitempty"` + + // no documentation yet + IpAddresses []Network_Subnet_IpAddress `json:"ipAddresses,omitempty" xmlrpc:"ipAddresses,omitempty"` + + // A flag indicating if an account belongs to a reseller or not. + IsReseller *int `json:"isReseller,omitempty" xmlrpc:"isReseller,omitempty"` + + // no documentation yet + IscsiIsolationDisabled *bool `json:"iscsiIsolationDisabled,omitempty" xmlrpc:"iscsiIsolationDisabled,omitempty"` + + // An account's associated iSCSI storage volumes. + IscsiNetworkStorage []Network_Storage `json:"iscsiNetworkStorage,omitempty" xmlrpc:"iscsiNetworkStorage,omitempty"` + + // A count of an account's associated iSCSI storage volumes. + IscsiNetworkStorageCount *uint `json:"iscsiNetworkStorageCount,omitempty" xmlrpc:"iscsiNetworkStorageCount,omitempty"` + + // The most recently canceled billing item. + LastCanceledBillingItem *Billing_Item `json:"lastCanceledBillingItem,omitempty" xmlrpc:"lastCanceledBillingItem,omitempty"` + + // The most recent cancelled server billing item. + LastCancelledServerBillingItem *Billing_Item `json:"lastCancelledServerBillingItem,omitempty" xmlrpc:"lastCancelledServerBillingItem,omitempty"` + + // A count of the five most recently closed abuse tickets associated with an account. + LastFiveClosedAbuseTicketCount *uint `json:"lastFiveClosedAbuseTicketCount,omitempty" xmlrpc:"lastFiveClosedAbuseTicketCount,omitempty"` + + // The five most recently closed abuse tickets associated with an account. + LastFiveClosedAbuseTickets []Ticket `json:"lastFiveClosedAbuseTickets,omitempty" xmlrpc:"lastFiveClosedAbuseTickets,omitempty"` + + // A count of the five most recently closed accounting tickets associated with an account. + LastFiveClosedAccountingTicketCount *uint `json:"lastFiveClosedAccountingTicketCount,omitempty" xmlrpc:"lastFiveClosedAccountingTicketCount,omitempty"` + + // The five most recently closed accounting tickets associated with an account. + LastFiveClosedAccountingTickets []Ticket `json:"lastFiveClosedAccountingTickets,omitempty" xmlrpc:"lastFiveClosedAccountingTickets,omitempty"` + + // A count of the five most recently closed tickets that do not belong to the abuse, accounting, sales, or support groups associated with an account. + LastFiveClosedOtherTicketCount *uint `json:"lastFiveClosedOtherTicketCount,omitempty" xmlrpc:"lastFiveClosedOtherTicketCount,omitempty"` + + // The five most recently closed tickets that do not belong to the abuse, accounting, sales, or support groups associated with an account. + LastFiveClosedOtherTickets []Ticket `json:"lastFiveClosedOtherTickets,omitempty" xmlrpc:"lastFiveClosedOtherTickets,omitempty"` + + // A count of the five most recently closed sales tickets associated with an account. + LastFiveClosedSalesTicketCount *uint `json:"lastFiveClosedSalesTicketCount,omitempty" xmlrpc:"lastFiveClosedSalesTicketCount,omitempty"` + + // The five most recently closed sales tickets associated with an account. + LastFiveClosedSalesTickets []Ticket `json:"lastFiveClosedSalesTickets,omitempty" xmlrpc:"lastFiveClosedSalesTickets,omitempty"` + + // A count of the five most recently closed support tickets associated with an account. + LastFiveClosedSupportTicketCount *uint `json:"lastFiveClosedSupportTicketCount,omitempty" xmlrpc:"lastFiveClosedSupportTicketCount,omitempty"` + + // The five most recently closed support tickets associated with an account. + LastFiveClosedSupportTickets []Ticket `json:"lastFiveClosedSupportTickets,omitempty" xmlrpc:"lastFiveClosedSupportTickets,omitempty"` + + // A count of the five most recently closed tickets associated with an account. + LastFiveClosedTicketCount *uint `json:"lastFiveClosedTicketCount,omitempty" xmlrpc:"lastFiveClosedTicketCount,omitempty"` + + // The five most recently closed tickets associated with an account. + LastFiveClosedTickets []Ticket `json:"lastFiveClosedTickets,omitempty" xmlrpc:"lastFiveClosedTickets,omitempty"` + + // Each customer account is listed under a single individual. This is that individual's last name. + LastName *string `json:"lastName,omitempty" xmlrpc:"lastName,omitempty"` + + // Whether an account has late fee protection. + LateFeeProtectionFlag *bool `json:"lateFeeProtectionFlag,omitempty" xmlrpc:"lateFeeProtectionFlag,omitempty"` + + // An account's most recent billing date. + LatestBillDate *Time `json:"latestBillDate,omitempty" xmlrpc:"latestBillDate,omitempty"` + + // An account's latest recurring invoice. + LatestRecurringInvoice *Billing_Invoice `json:"latestRecurringInvoice,omitempty" xmlrpc:"latestRecurringInvoice,omitempty"` + + // An account's latest recurring pending invoice. + LatestRecurringPendingInvoice *Billing_Invoice `json:"latestRecurringPendingInvoice,omitempty" xmlrpc:"latestRecurringPendingInvoice,omitempty"` + + // A count of the legacy bandwidth allotments for an account. + LegacyBandwidthAllotmentCount *uint `json:"legacyBandwidthAllotmentCount,omitempty" xmlrpc:"legacyBandwidthAllotmentCount,omitempty"` + + // The legacy bandwidth allotments for an account. + LegacyBandwidthAllotments []Network_Bandwidth_Version1_Allotment `json:"legacyBandwidthAllotments,omitempty" xmlrpc:"legacyBandwidthAllotments,omitempty"` + + // The total capacity of Legacy iSCSI Volumes on an account, in GB. + LegacyIscsiCapacityGB *uint `json:"legacyIscsiCapacityGB,omitempty" xmlrpc:"legacyIscsiCapacityGB,omitempty"` + + // A count of an account's associated load balancers. + LoadBalancerCount *uint `json:"loadBalancerCount,omitempty" xmlrpc:"loadBalancerCount,omitempty"` + + // An account's associated load balancers. + LoadBalancers []Network_LoadBalancer_VirtualIpAddress `json:"loadBalancers,omitempty" xmlrpc:"loadBalancers,omitempty"` + + // The total capacity of Legacy lockbox Volumes on an account, in GB. + LockboxCapacityGB *uint `json:"lockboxCapacityGB,omitempty" xmlrpc:"lockboxCapacityGB,omitempty"` + + // An account's associated Lockbox storage volumes. + LockboxNetworkStorage []Network_Storage `json:"lockboxNetworkStorage,omitempty" xmlrpc:"lockboxNetworkStorage,omitempty"` + + // A count of an account's associated Lockbox storage volumes. + LockboxNetworkStorageCount *uint `json:"lockboxNetworkStorageCount,omitempty" xmlrpc:"lockboxNetworkStorageCount,omitempty"` + + // no documentation yet + ManualPaymentsUnderReview []Billing_Payment_Card_ManualPayment `json:"manualPaymentsUnderReview,omitempty" xmlrpc:"manualPaymentsUnderReview,omitempty"` + + // A count of + ManualPaymentsUnderReviewCount *uint `json:"manualPaymentsUnderReviewCount,omitempty" xmlrpc:"manualPaymentsUnderReviewCount,omitempty"` + + // An account's master user. + MasterUser *User_Customer `json:"masterUser,omitempty" xmlrpc:"masterUser,omitempty"` + + // A count of an account's media transfer service requests. + MediaDataTransferRequestCount *uint `json:"mediaDataTransferRequestCount,omitempty" xmlrpc:"mediaDataTransferRequestCount,omitempty"` + + // An account's media transfer service requests. + MediaDataTransferRequests []Account_Media_Data_Transfer_Request `json:"mediaDataTransferRequests,omitempty" xmlrpc:"mediaDataTransferRequests,omitempty"` + + // Flag indicating whether this account is restricted to the IBM Cloud portal. + MigratedToIbmCloudPortalFlag *bool `json:"migratedToIbmCloudPortalFlag,omitempty" xmlrpc:"migratedToIbmCloudPortalFlag,omitempty"` + + // The date an account was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // A count of an account's associated monthly bare metal server objects. + MonthlyBareMetalInstanceCount *uint `json:"monthlyBareMetalInstanceCount,omitempty" xmlrpc:"monthlyBareMetalInstanceCount,omitempty"` + + // An account's associated monthly bare metal server objects. + MonthlyBareMetalInstances []Hardware `json:"monthlyBareMetalInstances,omitempty" xmlrpc:"monthlyBareMetalInstances,omitempty"` + + // A count of an account's associated monthly virtual guest objects. + MonthlyVirtualGuestCount *uint `json:"monthlyVirtualGuestCount,omitempty" xmlrpc:"monthlyVirtualGuestCount,omitempty"` + + // An account's associated monthly virtual guest objects. + MonthlyVirtualGuests []Virtual_Guest `json:"monthlyVirtualGuests,omitempty" xmlrpc:"monthlyVirtualGuests,omitempty"` + + // An account's associated NAS storage volumes. + NasNetworkStorage []Network_Storage `json:"nasNetworkStorage,omitempty" xmlrpc:"nasNetworkStorage,omitempty"` + + // A count of an account's associated NAS storage volumes. + NasNetworkStorageCount *uint `json:"nasNetworkStorageCount,omitempty" xmlrpc:"nasNetworkStorageCount,omitempty"` + + // [Deprecated] Whether or not this account can define their own networks. + NetworkCreationFlag *bool `json:"networkCreationFlag,omitempty" xmlrpc:"networkCreationFlag,omitempty"` + + // A count of all network gateway devices on this account. + NetworkGatewayCount *uint `json:"networkGatewayCount,omitempty" xmlrpc:"networkGatewayCount,omitempty"` + + // All network gateway devices on this account. + NetworkGateways []Network_Gateway `json:"networkGateways,omitempty" xmlrpc:"networkGateways,omitempty"` + + // An account's associated network hardware. + NetworkHardware []Hardware `json:"networkHardware,omitempty" xmlrpc:"networkHardware,omitempty"` + + // A count of an account's associated network hardware. + NetworkHardwareCount *uint `json:"networkHardwareCount,omitempty" xmlrpc:"networkHardwareCount,omitempty"` + + // A count of + NetworkMessageDeliveryAccountCount *uint `json:"networkMessageDeliveryAccountCount,omitempty" xmlrpc:"networkMessageDeliveryAccountCount,omitempty"` + + // no documentation yet + NetworkMessageDeliveryAccounts []Network_Message_Delivery `json:"networkMessageDeliveryAccounts,omitempty" xmlrpc:"networkMessageDeliveryAccounts,omitempty"` + + // Hardware which is currently experiencing a service failure. + NetworkMonitorDownHardware []Hardware `json:"networkMonitorDownHardware,omitempty" xmlrpc:"networkMonitorDownHardware,omitempty"` + + // A count of hardware which is currently experiencing a service failure. + NetworkMonitorDownHardwareCount *uint `json:"networkMonitorDownHardwareCount,omitempty" xmlrpc:"networkMonitorDownHardwareCount,omitempty"` + + // A count of virtual guest which is currently experiencing a service failure. + NetworkMonitorDownVirtualGuestCount *uint `json:"networkMonitorDownVirtualGuestCount,omitempty" xmlrpc:"networkMonitorDownVirtualGuestCount,omitempty"` + + // Virtual guest which is currently experiencing a service failure. + NetworkMonitorDownVirtualGuests []Virtual_Guest `json:"networkMonitorDownVirtualGuests,omitempty" xmlrpc:"networkMonitorDownVirtualGuests,omitempty"` + + // Hardware which is currently recovering from a service failure. + NetworkMonitorRecoveringHardware []Hardware `json:"networkMonitorRecoveringHardware,omitempty" xmlrpc:"networkMonitorRecoveringHardware,omitempty"` + + // A count of hardware which is currently recovering from a service failure. + NetworkMonitorRecoveringHardwareCount *uint `json:"networkMonitorRecoveringHardwareCount,omitempty" xmlrpc:"networkMonitorRecoveringHardwareCount,omitempty"` + + // A count of virtual guest which is currently recovering from a service failure. + NetworkMonitorRecoveringVirtualGuestCount *uint `json:"networkMonitorRecoveringVirtualGuestCount,omitempty" xmlrpc:"networkMonitorRecoveringVirtualGuestCount,omitempty"` + + // Virtual guest which is currently recovering from a service failure. + NetworkMonitorRecoveringVirtualGuests []Virtual_Guest `json:"networkMonitorRecoveringVirtualGuests,omitempty" xmlrpc:"networkMonitorRecoveringVirtualGuests,omitempty"` + + // Hardware which is currently online. + NetworkMonitorUpHardware []Hardware `json:"networkMonitorUpHardware,omitempty" xmlrpc:"networkMonitorUpHardware,omitempty"` + + // A count of hardware which is currently online. + NetworkMonitorUpHardwareCount *uint `json:"networkMonitorUpHardwareCount,omitempty" xmlrpc:"networkMonitorUpHardwareCount,omitempty"` + + // A count of virtual guest which is currently online. + NetworkMonitorUpVirtualGuestCount *uint `json:"networkMonitorUpVirtualGuestCount,omitempty" xmlrpc:"networkMonitorUpVirtualGuestCount,omitempty"` + + // Virtual guest which is currently online. + NetworkMonitorUpVirtualGuests []Virtual_Guest `json:"networkMonitorUpVirtualGuests,omitempty" xmlrpc:"networkMonitorUpVirtualGuests,omitempty"` + + // An account's associated storage volumes. This includes Lockbox, NAS, EVault, and iSCSI volumes. + NetworkStorage []Network_Storage `json:"networkStorage,omitempty" xmlrpc:"networkStorage,omitempty"` + + // A count of an account's associated storage volumes. This includes Lockbox, NAS, EVault, and iSCSI volumes. + NetworkStorageCount *uint `json:"networkStorageCount,omitempty" xmlrpc:"networkStorageCount,omitempty"` + + // A count of an account's Network Storage groups. + NetworkStorageGroupCount *uint `json:"networkStorageGroupCount,omitempty" xmlrpc:"networkStorageGroupCount,omitempty"` + + // An account's Network Storage groups. + NetworkStorageGroups []Network_Storage_Group `json:"networkStorageGroups,omitempty" xmlrpc:"networkStorageGroups,omitempty"` + + // A count of iPSec network tunnels for an account. + NetworkTunnelContextCount *uint `json:"networkTunnelContextCount,omitempty" xmlrpc:"networkTunnelContextCount,omitempty"` + + // IPSec network tunnels for an account. + NetworkTunnelContexts []Network_Tunnel_Module_Context `json:"networkTunnelContexts,omitempty" xmlrpc:"networkTunnelContexts,omitempty"` + + // A count of all network VLANs assigned to an account. + NetworkVlanCount *uint `json:"networkVlanCount,omitempty" xmlrpc:"networkVlanCount,omitempty"` + + // Whether or not an account has automatic private VLAN spanning enabled. + NetworkVlanSpan *Account_Network_Vlan_Span `json:"networkVlanSpan,omitempty" xmlrpc:"networkVlanSpan,omitempty"` + + // All network VLANs assigned to an account. + NetworkVlans []Network_Vlan `json:"networkVlans,omitempty" xmlrpc:"networkVlans,omitempty"` + + // A count of dEPRECATED - This information can be pulled directly through tapping keys now - DEPRECATED. The allotments for this account and their servers for the next billing cycle. The public inbound and outbound bandwidth is calculated for each server in addition to the daily average network traffic since the last billing date. + NextBillingPublicAllotmentHardwareBandwidthDetailCount *uint `json:"nextBillingPublicAllotmentHardwareBandwidthDetailCount,omitempty" xmlrpc:"nextBillingPublicAllotmentHardwareBandwidthDetailCount,omitempty"` + + // DEPRECATED - This information can be pulled directly through tapping keys now - DEPRECATED. The allotments for this account and their servers for the next billing cycle. The public inbound and outbound bandwidth is calculated for each server in addition to the daily average network traffic since the last billing date. + NextBillingPublicAllotmentHardwareBandwidthDetails []Network_Bandwidth_Version1_Allotment `json:"nextBillingPublicAllotmentHardwareBandwidthDetails,omitempty" xmlrpc:"nextBillingPublicAllotmentHardwareBandwidthDetails,omitempty"` + + // The pre-tax total amount exempt from incubator credit for the account's next invoice. This field is now deprecated and will soon be removed. Please update all references to instead use nextInvoiceTotalAmount + NextInvoiceIncubatorExemptTotal *Float64 `json:"nextInvoiceIncubatorExemptTotal,omitempty" xmlrpc:"nextInvoiceIncubatorExemptTotal,omitempty"` + + // The total recurring charge amount of an account's next invoice eligible for account discount measured in US Dollars ($USD), assuming no changes or charges occur between now and time of billing. + NextInvoiceRecurringAmountEligibleForAccountDiscount *Float64 `json:"nextInvoiceRecurringAmountEligibleForAccountDiscount,omitempty" xmlrpc:"nextInvoiceRecurringAmountEligibleForAccountDiscount,omitempty"` + + // A count of the billing items that will be on an account's next invoice. + NextInvoiceTopLevelBillingItemCount *uint `json:"nextInvoiceTopLevelBillingItemCount,omitempty" xmlrpc:"nextInvoiceTopLevelBillingItemCount,omitempty"` + + // The billing items that will be on an account's next invoice. + NextInvoiceTopLevelBillingItems []Billing_Item `json:"nextInvoiceTopLevelBillingItems,omitempty" xmlrpc:"nextInvoiceTopLevelBillingItems,omitempty"` + + // The pre-tax total amount of an account's next invoice measured in US Dollars ($USD), assuming no changes or charges occur between now and time of billing. + NextInvoiceTotalAmount *Float64 `json:"nextInvoiceTotalAmount,omitempty" xmlrpc:"nextInvoiceTotalAmount,omitempty"` + + // The total one-time charge amount of an account's next invoice measured in US Dollars ($USD), assuming no changes or charges occur between now and time of billing. + NextInvoiceTotalOneTimeAmount *Float64 `json:"nextInvoiceTotalOneTimeAmount,omitempty" xmlrpc:"nextInvoiceTotalOneTimeAmount,omitempty"` + + // The total one-time tax amount of an account's next invoice measured in US Dollars ($USD), assuming no changes or charges occur between now and time of billing. + NextInvoiceTotalOneTimeTaxAmount *Float64 `json:"nextInvoiceTotalOneTimeTaxAmount,omitempty" xmlrpc:"nextInvoiceTotalOneTimeTaxAmount,omitempty"` + + // The total recurring charge amount of an account's next invoice measured in US Dollars ($USD), assuming no changes or charges occur between now and time of billing. + NextInvoiceTotalRecurringAmount *Float64 `json:"nextInvoiceTotalRecurringAmount,omitempty" xmlrpc:"nextInvoiceTotalRecurringAmount,omitempty"` + + // The total recurring charge amount of an account's next invoice measured in US Dollars ($USD), assuming no changes or charges occur between now and time of billing. + NextInvoiceTotalRecurringAmountBeforeAccountDiscount *Float64 `json:"nextInvoiceTotalRecurringAmountBeforeAccountDiscount,omitempty" xmlrpc:"nextInvoiceTotalRecurringAmountBeforeAccountDiscount,omitempty"` + + // The total recurring tax amount of an account's next invoice measured in US Dollars ($USD), assuming no changes or charges occur between now and time of billing. + NextInvoiceTotalRecurringTaxAmount *Float64 `json:"nextInvoiceTotalRecurringTaxAmount,omitempty" xmlrpc:"nextInvoiceTotalRecurringTaxAmount,omitempty"` + + // The total recurring charge amount of an account's next invoice measured in US Dollars ($USD), assuming no changes or charges occur between now and time of billing. + NextInvoiceTotalTaxableRecurringAmount *Float64 `json:"nextInvoiceTotalTaxableRecurringAmount,omitempty" xmlrpc:"nextInvoiceTotalTaxableRecurringAmount,omitempty"` + + // A count of + NotificationSubscriberCount *uint `json:"notificationSubscriberCount,omitempty" xmlrpc:"notificationSubscriberCount,omitempty"` + + // no documentation yet + NotificationSubscribers []Notification_Subscriber `json:"notificationSubscribers,omitempty" xmlrpc:"notificationSubscribers,omitempty"` + + // An office phone number assigned to an account. + OfficePhone *string `json:"officePhone,omitempty" xmlrpc:"officePhone,omitempty"` + + // A count of the open abuse tickets associated with an account. + OpenAbuseTicketCount *uint `json:"openAbuseTicketCount,omitempty" xmlrpc:"openAbuseTicketCount,omitempty"` + + // The open abuse tickets associated with an account. + OpenAbuseTickets []Ticket `json:"openAbuseTickets,omitempty" xmlrpc:"openAbuseTickets,omitempty"` + + // A count of the open accounting tickets associated with an account. + OpenAccountingTicketCount *uint `json:"openAccountingTicketCount,omitempty" xmlrpc:"openAccountingTicketCount,omitempty"` + + // The open accounting tickets associated with an account. + OpenAccountingTickets []Ticket `json:"openAccountingTickets,omitempty" xmlrpc:"openAccountingTickets,omitempty"` + + // A count of the open billing tickets associated with an account. + OpenBillingTicketCount *uint `json:"openBillingTicketCount,omitempty" xmlrpc:"openBillingTicketCount,omitempty"` + + // The open billing tickets associated with an account. + OpenBillingTickets []Ticket `json:"openBillingTickets,omitempty" xmlrpc:"openBillingTickets,omitempty"` + + // A count of an open ticket requesting cancellation of this server, if one exists. + OpenCancellationRequestCount *uint `json:"openCancellationRequestCount,omitempty" xmlrpc:"openCancellationRequestCount,omitempty"` + + // An open ticket requesting cancellation of this server, if one exists. + OpenCancellationRequests []Billing_Item_Cancellation_Request `json:"openCancellationRequests,omitempty" xmlrpc:"openCancellationRequests,omitempty"` + + // A count of the open tickets that do not belong to the abuse, accounting, sales, or support groups associated with an account. + OpenOtherTicketCount *uint `json:"openOtherTicketCount,omitempty" xmlrpc:"openOtherTicketCount,omitempty"` + + // The open tickets that do not belong to the abuse, accounting, sales, or support groups associated with an account. + OpenOtherTickets []Ticket `json:"openOtherTickets,omitempty" xmlrpc:"openOtherTickets,omitempty"` + + // A count of an account's recurring invoices. + OpenRecurringInvoiceCount *uint `json:"openRecurringInvoiceCount,omitempty" xmlrpc:"openRecurringInvoiceCount,omitempty"` + + // An account's recurring invoices. + OpenRecurringInvoices []Billing_Invoice `json:"openRecurringInvoices,omitempty" xmlrpc:"openRecurringInvoices,omitempty"` + + // A count of the open sales tickets associated with an account. + OpenSalesTicketCount *uint `json:"openSalesTicketCount,omitempty" xmlrpc:"openSalesTicketCount,omitempty"` + + // The open sales tickets associated with an account. + OpenSalesTickets []Ticket `json:"openSalesTickets,omitempty" xmlrpc:"openSalesTickets,omitempty"` + + // A count of + OpenStackAccountLinkCount *uint `json:"openStackAccountLinkCount,omitempty" xmlrpc:"openStackAccountLinkCount,omitempty"` + + // no documentation yet + OpenStackAccountLinks []Account_Link `json:"openStackAccountLinks,omitempty" xmlrpc:"openStackAccountLinks,omitempty"` + + // An account's associated Openstack related Object Storage accounts. + OpenStackObjectStorage []Network_Storage `json:"openStackObjectStorage,omitempty" xmlrpc:"openStackObjectStorage,omitempty"` + + // A count of an account's associated Openstack related Object Storage accounts. + OpenStackObjectStorageCount *uint `json:"openStackObjectStorageCount,omitempty" xmlrpc:"openStackObjectStorageCount,omitempty"` + + // A count of the open support tickets associated with an account. + OpenSupportTicketCount *uint `json:"openSupportTicketCount,omitempty" xmlrpc:"openSupportTicketCount,omitempty"` + + // The open support tickets associated with an account. + OpenSupportTickets []Ticket `json:"openSupportTickets,omitempty" xmlrpc:"openSupportTickets,omitempty"` + + // A count of all open tickets associated with an account. + OpenTicketCount *uint `json:"openTicketCount,omitempty" xmlrpc:"openTicketCount,omitempty"` + + // All open tickets associated with an account. + OpenTickets []Ticket `json:"openTickets,omitempty" xmlrpc:"openTickets,omitempty"` + + // All open tickets associated with an account last edited by an employee. + OpenTicketsWaitingOnCustomer []Ticket `json:"openTicketsWaitingOnCustomer,omitempty" xmlrpc:"openTicketsWaitingOnCustomer,omitempty"` + + // A count of all open tickets associated with an account last edited by an employee. + OpenTicketsWaitingOnCustomerCount *uint `json:"openTicketsWaitingOnCustomerCount,omitempty" xmlrpc:"openTicketsWaitingOnCustomerCount,omitempty"` + + // A count of an account's associated billing orders excluding upgrades. + OrderCount *uint `json:"orderCount,omitempty" xmlrpc:"orderCount,omitempty"` + + // An account's associated billing orders excluding upgrades. + Orders []Billing_Order `json:"orders,omitempty" xmlrpc:"orders,omitempty"` + + // A count of the billing items that have no parent billing item. These are items that don't necessarily belong to a single server. + OrphanBillingItemCount *uint `json:"orphanBillingItemCount,omitempty" xmlrpc:"orphanBillingItemCount,omitempty"` + + // The billing items that have no parent billing item. These are items that don't necessarily belong to a single server. + OrphanBillingItems []Billing_Item `json:"orphanBillingItems,omitempty" xmlrpc:"orphanBillingItems,omitempty"` + + // A count of + OwnedBrandCount *uint `json:"ownedBrandCount,omitempty" xmlrpc:"ownedBrandCount,omitempty"` + + // no documentation yet + OwnedBrands []Brand `json:"ownedBrands,omitempty" xmlrpc:"ownedBrands,omitempty"` + + // A count of + OwnedHardwareGenericComponentModelCount *uint `json:"ownedHardwareGenericComponentModelCount,omitempty" xmlrpc:"ownedHardwareGenericComponentModelCount,omitempty"` + + // no documentation yet + OwnedHardwareGenericComponentModels []Hardware_Component_Model_Generic `json:"ownedHardwareGenericComponentModels,omitempty" xmlrpc:"ownedHardwareGenericComponentModels,omitempty"` + + // A count of + PaymentProcessorCount *uint `json:"paymentProcessorCount,omitempty" xmlrpc:"paymentProcessorCount,omitempty"` + + // no documentation yet + PaymentProcessors []Billing_Payment_Processor `json:"paymentProcessors,omitempty" xmlrpc:"paymentProcessors,omitempty"` + + // A count of + PendingEventCount *uint `json:"pendingEventCount,omitempty" xmlrpc:"pendingEventCount,omitempty"` + + // no documentation yet + PendingEvents []Notification_Occurrence_Event `json:"pendingEvents,omitempty" xmlrpc:"pendingEvents,omitempty"` + + // An account's latest open (pending) invoice. + PendingInvoice *Billing_Invoice `json:"pendingInvoice,omitempty" xmlrpc:"pendingInvoice,omitempty"` + + // A count of a list of top-level invoice items that are on an account's currently pending invoice. + PendingInvoiceTopLevelItemCount *uint `json:"pendingInvoiceTopLevelItemCount,omitempty" xmlrpc:"pendingInvoiceTopLevelItemCount,omitempty"` + + // A list of top-level invoice items that are on an account's currently pending invoice. + PendingInvoiceTopLevelItems []Billing_Invoice_Item `json:"pendingInvoiceTopLevelItems,omitempty" xmlrpc:"pendingInvoiceTopLevelItems,omitempty"` + + // The total amount of an account's pending invoice, if one exists. + PendingInvoiceTotalAmount *Float64 `json:"pendingInvoiceTotalAmount,omitempty" xmlrpc:"pendingInvoiceTotalAmount,omitempty"` + + // The total one-time charges for an account's pending invoice, if one exists. In other words, it is the sum of one-time charges, setup fees, and labor fees. It does not include taxes. + PendingInvoiceTotalOneTimeAmount *Float64 `json:"pendingInvoiceTotalOneTimeAmount,omitempty" xmlrpc:"pendingInvoiceTotalOneTimeAmount,omitempty"` + + // The sum of all the taxes related to one time charges for an account's pending invoice, if one exists. + PendingInvoiceTotalOneTimeTaxAmount *Float64 `json:"pendingInvoiceTotalOneTimeTaxAmount,omitempty" xmlrpc:"pendingInvoiceTotalOneTimeTaxAmount,omitempty"` + + // The total recurring amount of an account's pending invoice, if one exists. + PendingInvoiceTotalRecurringAmount *Float64 `json:"pendingInvoiceTotalRecurringAmount,omitempty" xmlrpc:"pendingInvoiceTotalRecurringAmount,omitempty"` + + // The total amount of the recurring taxes on an account's pending invoice, if one exists. + PendingInvoiceTotalRecurringTaxAmount *Float64 `json:"pendingInvoiceTotalRecurringTaxAmount,omitempty" xmlrpc:"pendingInvoiceTotalRecurringTaxAmount,omitempty"` + + // A count of an account's permission groups. + PermissionGroupCount *uint `json:"permissionGroupCount,omitempty" xmlrpc:"permissionGroupCount,omitempty"` + + // An account's permission groups. + PermissionGroups []User_Permission_Group `json:"permissionGroups,omitempty" xmlrpc:"permissionGroups,omitempty"` + + // A count of an account's user roles. + PermissionRoleCount *uint `json:"permissionRoleCount,omitempty" xmlrpc:"permissionRoleCount,omitempty"` + + // An account's user roles. + PermissionRoles []User_Permission_Role `json:"permissionRoles,omitempty" xmlrpc:"permissionRoles,omitempty"` + + // A count of an account's associated virtual placement groups. + PlacementGroupCount *uint `json:"placementGroupCount,omitempty" xmlrpc:"placementGroupCount,omitempty"` + + // An account's associated virtual placement groups. + PlacementGroups []Virtual_PlacementGroup `json:"placementGroups,omitempty" xmlrpc:"placementGroups,omitempty"` + + // A count of + PortableStorageVolumeCount *uint `json:"portableStorageVolumeCount,omitempty" xmlrpc:"portableStorageVolumeCount,omitempty"` + + // no documentation yet + PortableStorageVolumes []Virtual_Disk_Image `json:"portableStorageVolumes,omitempty" xmlrpc:"portableStorageVolumes,omitempty"` + + // A count of customer specified URIs that are downloaded onto a newly provisioned or reloaded server. If the URI is sent over https it will be executed directly on the server. + PostProvisioningHookCount *uint `json:"postProvisioningHookCount,omitempty" xmlrpc:"postProvisioningHookCount,omitempty"` + + // Customer specified URIs that are downloaded onto a newly provisioned or reloaded server. If the URI is sent over https it will be executed directly on the server. + PostProvisioningHooks []Provisioning_Hook `json:"postProvisioningHooks,omitempty" xmlrpc:"postProvisioningHooks,omitempty"` + + // The postal code of the mailing address belonging to an account. + PostalCode *string `json:"postalCode,omitempty" xmlrpc:"postalCode,omitempty"` + + // (Deprecated) Boolean flag dictating whether or not this account supports PPTP VPN Access. + PptpVpnAllowedFlag *bool `json:"pptpVpnAllowedFlag,omitempty" xmlrpc:"pptpVpnAllowedFlag,omitempty"` + + // A count of an account's associated portal users with PPTP VPN access. (Deprecated) + PptpVpnUserCount *uint `json:"pptpVpnUserCount,omitempty" xmlrpc:"pptpVpnUserCount,omitempty"` + + // An account's associated portal users with PPTP VPN access. (Deprecated) + PptpVpnUsers []User_Customer `json:"pptpVpnUsers,omitempty" xmlrpc:"pptpVpnUsers,omitempty"` + + // The total recurring amount for an accounts previous revenue. + PreviousRecurringRevenue *Float64 `json:"previousRecurringRevenue,omitempty" xmlrpc:"previousRecurringRevenue,omitempty"` + + // A count of the item price that an account is restricted to. + PriceRestrictionCount *uint `json:"priceRestrictionCount,omitempty" xmlrpc:"priceRestrictionCount,omitempty"` + + // The item price that an account is restricted to. + PriceRestrictions []Product_Item_Price_Account_Restriction `json:"priceRestrictions,omitempty" xmlrpc:"priceRestrictions,omitempty"` + + // A count of all priority one tickets associated with an account. + PriorityOneTicketCount *uint `json:"priorityOneTicketCount,omitempty" xmlrpc:"priorityOneTicketCount,omitempty"` + + // All priority one tickets associated with an account. + PriorityOneTickets []Ticket `json:"priorityOneTickets,omitempty" xmlrpc:"priorityOneTickets,omitempty"` + + // A count of dEPRECATED - This information can be pulled directly through tapping keys now - DEPRECATED. The allotments for this account and their servers. The private inbound and outbound bandwidth is calculated for each server in addition to the daily average network traffic since the last billing date. + PrivateAllotmentHardwareBandwidthDetailCount *uint `json:"privateAllotmentHardwareBandwidthDetailCount,omitempty" xmlrpc:"privateAllotmentHardwareBandwidthDetailCount,omitempty"` + + // DEPRECATED - This information can be pulled directly through tapping keys now - DEPRECATED. The allotments for this account and their servers. The private inbound and outbound bandwidth is calculated for each server in addition to the daily average network traffic since the last billing date. + PrivateAllotmentHardwareBandwidthDetails []Network_Bandwidth_Version1_Allotment `json:"privateAllotmentHardwareBandwidthDetails,omitempty" xmlrpc:"privateAllotmentHardwareBandwidthDetails,omitempty"` + + // A count of private and shared template group objects (parent only) for an account. + PrivateBlockDeviceTemplateGroupCount *uint `json:"privateBlockDeviceTemplateGroupCount,omitempty" xmlrpc:"privateBlockDeviceTemplateGroupCount,omitempty"` + + // Private and shared template group objects (parent only) for an account. + PrivateBlockDeviceTemplateGroups []Virtual_Guest_Block_Device_Template_Group `json:"privateBlockDeviceTemplateGroups,omitempty" xmlrpc:"privateBlockDeviceTemplateGroups,omitempty"` + + // A count of + PrivateIpAddressCount *uint `json:"privateIpAddressCount,omitempty" xmlrpc:"privateIpAddressCount,omitempty"` + + // no documentation yet + PrivateIpAddresses []Network_Subnet_IpAddress `json:"privateIpAddresses,omitempty" xmlrpc:"privateIpAddresses,omitempty"` + + // A count of the private network VLANs assigned to an account. + PrivateNetworkVlanCount *uint `json:"privateNetworkVlanCount,omitempty" xmlrpc:"privateNetworkVlanCount,omitempty"` + + // The private network VLANs assigned to an account. + PrivateNetworkVlans []Network_Vlan `json:"privateNetworkVlans,omitempty" xmlrpc:"privateNetworkVlans,omitempty"` + + // A count of all private subnets associated with an account. + PrivateSubnetCount *uint `json:"privateSubnetCount,omitempty" xmlrpc:"privateSubnetCount,omitempty"` + + // All private subnets associated with an account. + PrivateSubnets []Network_Subnet `json:"privateSubnets,omitempty" xmlrpc:"privateSubnets,omitempty"` + + // Boolean flag indicating whether or not this account is a Proof of Concept account. + ProofOfConceptAccountFlag *bool `json:"proofOfConceptAccountFlag,omitempty" xmlrpc:"proofOfConceptAccountFlag,omitempty"` + + // A count of dEPRECATED - This information can be pulled directly through tapping keys now - DEPRECATED. The allotments for this account and their servers. The public inbound and outbound bandwidth is calculated for each server in addition to the daily average network traffic since the last billing date. + PublicAllotmentHardwareBandwidthDetailCount *uint `json:"publicAllotmentHardwareBandwidthDetailCount,omitempty" xmlrpc:"publicAllotmentHardwareBandwidthDetailCount,omitempty"` + + // DEPRECATED - This information can be pulled directly through tapping keys now - DEPRECATED. The allotments for this account and their servers. The public inbound and outbound bandwidth is calculated for each server in addition to the daily average network traffic since the last billing date. + PublicAllotmentHardwareBandwidthDetails []Network_Bandwidth_Version1_Allotment `json:"publicAllotmentHardwareBandwidthDetails,omitempty" xmlrpc:"publicAllotmentHardwareBandwidthDetails,omitempty"` + + // A count of + PublicIpAddressCount *uint `json:"publicIpAddressCount,omitempty" xmlrpc:"publicIpAddressCount,omitempty"` + + // no documentation yet + PublicIpAddresses []Network_Subnet_IpAddress `json:"publicIpAddresses,omitempty" xmlrpc:"publicIpAddresses,omitempty"` + + // A count of the public network VLANs assigned to an account. + PublicNetworkVlanCount *uint `json:"publicNetworkVlanCount,omitempty" xmlrpc:"publicNetworkVlanCount,omitempty"` + + // The public network VLANs assigned to an account. + PublicNetworkVlans []Network_Vlan `json:"publicNetworkVlans,omitempty" xmlrpc:"publicNetworkVlans,omitempty"` + + // A count of all public network subnets associated with an account. + PublicSubnetCount *uint `json:"publicSubnetCount,omitempty" xmlrpc:"publicSubnetCount,omitempty"` + + // All public network subnets associated with an account. + PublicSubnets []Network_Subnet `json:"publicSubnets,omitempty" xmlrpc:"publicSubnets,omitempty"` + + // A count of an account's quotes. + QuoteCount *uint `json:"quoteCount,omitempty" xmlrpc:"quoteCount,omitempty"` + + // An account's quotes. + Quotes []Billing_Order_Quote `json:"quotes,omitempty" xmlrpc:"quotes,omitempty"` + + // A count of + RecentEventCount *uint `json:"recentEventCount,omitempty" xmlrpc:"recentEventCount,omitempty"` + + // no documentation yet + RecentEvents []Notification_Occurrence_Event `json:"recentEvents,omitempty" xmlrpc:"recentEvents,omitempty"` + + // The Referral Partner for this account, if any. + ReferralPartner *Account `json:"referralPartner,omitempty" xmlrpc:"referralPartner,omitempty"` + + // A count of if this is a account is a referral partner, the accounts this referral partner has referred + ReferredAccountCount *uint `json:"referredAccountCount,omitempty" xmlrpc:"referredAccountCount,omitempty"` + + // If this is a account is a referral partner, the accounts this referral partner has referred + ReferredAccounts []Account `json:"referredAccounts,omitempty" xmlrpc:"referredAccounts,omitempty"` + + // A count of + RegulatedWorkloadCount *uint `json:"regulatedWorkloadCount,omitempty" xmlrpc:"regulatedWorkloadCount,omitempty"` + + // no documentation yet + RegulatedWorkloads []Legal_RegulatedWorkload `json:"regulatedWorkloads,omitempty" xmlrpc:"regulatedWorkloads,omitempty"` + + // A count of remote management command requests for an account + RemoteManagementCommandRequestCount *uint `json:"remoteManagementCommandRequestCount,omitempty" xmlrpc:"remoteManagementCommandRequestCount,omitempty"` + + // Remote management command requests for an account + RemoteManagementCommandRequests []Hardware_Component_RemoteManagement_Command_Request `json:"remoteManagementCommandRequests,omitempty" xmlrpc:"remoteManagementCommandRequests,omitempty"` + + // A count of the Replication events for all Network Storage volumes on an account. + ReplicationEventCount *uint `json:"replicationEventCount,omitempty" xmlrpc:"replicationEventCount,omitempty"` + + // The Replication events for all Network Storage volumes on an account. + ReplicationEvents []Network_Storage_Event `json:"replicationEvents,omitempty" xmlrpc:"replicationEvents,omitempty"` + + // Indicates whether newly created users under this account will be associated with IBMid via an email requiring a response, or not. + RequireSilentIBMidUserCreation *bool `json:"requireSilentIBMidUserCreation,omitempty" xmlrpc:"requireSilentIBMidUserCreation,omitempty"` + + // The Reseller level of the account. + ResellerLevel *int `json:"resellerLevel,omitempty" xmlrpc:"resellerLevel,omitempty"` + + // A count of all reserved capacity agreements for an account + ReservedCapacityAgreementCount *uint `json:"reservedCapacityAgreementCount,omitempty" xmlrpc:"reservedCapacityAgreementCount,omitempty"` + + // All reserved capacity agreements for an account + ReservedCapacityAgreements []Account_Agreement `json:"reservedCapacityAgreements,omitempty" xmlrpc:"reservedCapacityAgreements,omitempty"` + + // A count of the reserved capacity groups owned by this account. + ReservedCapacityGroupCount *uint `json:"reservedCapacityGroupCount,omitempty" xmlrpc:"reservedCapacityGroupCount,omitempty"` + + // The reserved capacity groups owned by this account. + ReservedCapacityGroups []Virtual_ReservedCapacityGroup `json:"reservedCapacityGroups,omitempty" xmlrpc:"reservedCapacityGroups,omitempty"` + + // A count of an account's associated top-level resource groups. + ResourceGroupCount *uint `json:"resourceGroupCount,omitempty" xmlrpc:"resourceGroupCount,omitempty"` + + // An account's associated top-level resource groups. + ResourceGroups []Resource_Group `json:"resourceGroups,omitempty" xmlrpc:"resourceGroups,omitempty"` + + // A count of all Routers that an accounts VLANs reside on + RouterCount *uint `json:"routerCount,omitempty" xmlrpc:"routerCount,omitempty"` + + // All Routers that an accounts VLANs reside on + Routers []Hardware `json:"routers,omitempty" xmlrpc:"routers,omitempty"` + + // DEPRECATED + RwhoisData []Network_Subnet_Rwhois_Data `json:"rwhoisData,omitempty" xmlrpc:"rwhoisData,omitempty"` + + // A count of dEPRECATED + RwhoisDataCount *uint `json:"rwhoisDataCount,omitempty" xmlrpc:"rwhoisDataCount,omitempty"` + + // The SAML configuration for this account. + SamlAuthentication *Account_Authentication_Saml `json:"samlAuthentication,omitempty" xmlrpc:"samlAuthentication,omitempty"` + + // A count of all scale groups on this account. + ScaleGroupCount *uint `json:"scaleGroupCount,omitempty" xmlrpc:"scaleGroupCount,omitempty"` + + // All scale groups on this account. + ScaleGroups []Scale_Group `json:"scaleGroups,omitempty" xmlrpc:"scaleGroups,omitempty"` + + // A count of the secondary DNS records for a SoftLayer customer account. + SecondaryDomainCount *uint `json:"secondaryDomainCount,omitempty" xmlrpc:"secondaryDomainCount,omitempty"` + + // The secondary DNS records for a SoftLayer customer account. + SecondaryDomains []Dns_Secondary `json:"secondaryDomains,omitempty" xmlrpc:"secondaryDomains,omitempty"` + + // A count of stored security certificates (ie. SSL) + SecurityCertificateCount *uint `json:"securityCertificateCount,omitempty" xmlrpc:"securityCertificateCount,omitempty"` + + // Stored security certificates (ie. SSL) + SecurityCertificates []Security_Certificate `json:"securityCertificates,omitempty" xmlrpc:"securityCertificates,omitempty"` + + // A count of the security groups belonging to this account. + SecurityGroupCount *uint `json:"securityGroupCount,omitempty" xmlrpc:"securityGroupCount,omitempty"` + + // The security groups belonging to this account. + SecurityGroups []Network_SecurityGroup `json:"securityGroups,omitempty" xmlrpc:"securityGroups,omitempty"` + + // no documentation yet + SecurityLevel *Security_Level `json:"securityLevel,omitempty" xmlrpc:"securityLevel,omitempty"` + + // A count of an account's vulnerability scan requests. + SecurityScanRequestCount *uint `json:"securityScanRequestCount,omitempty" xmlrpc:"securityScanRequestCount,omitempty"` + + // An account's vulnerability scan requests. + SecurityScanRequests []Network_Security_Scanner_Request `json:"securityScanRequests,omitempty" xmlrpc:"securityScanRequests,omitempty"` + + // A count of the service billing items that will be on an account's next invoice. + ServiceBillingItemCount *uint `json:"serviceBillingItemCount,omitempty" xmlrpc:"serviceBillingItemCount,omitempty"` + + // The service billing items that will be on an account's next invoice. + ServiceBillingItems []Billing_Item `json:"serviceBillingItems,omitempty" xmlrpc:"serviceBillingItems,omitempty"` + + // A count of shipments that belong to the customer's account. + ShipmentCount *uint `json:"shipmentCount,omitempty" xmlrpc:"shipmentCount,omitempty"` + + // Shipments that belong to the customer's account. + Shipments []Account_Shipment `json:"shipments,omitempty" xmlrpc:"shipments,omitempty"` + + // A count of customer specified SSH keys that can be implemented onto a newly provisioned or reloaded server. + SshKeyCount *uint `json:"sshKeyCount,omitempty" xmlrpc:"sshKeyCount,omitempty"` + + // Customer specified SSH keys that can be implemented onto a newly provisioned or reloaded server. + SshKeys []Security_Ssh_Key `json:"sshKeys,omitempty" xmlrpc:"sshKeys,omitempty"` + + // A count of an account's associated portal users with SSL VPN access. + SslVpnUserCount *uint `json:"sslVpnUserCount,omitempty" xmlrpc:"sslVpnUserCount,omitempty"` + + // An account's associated portal users with SSL VPN access. + SslVpnUsers []User_Customer `json:"sslVpnUsers,omitempty" xmlrpc:"sslVpnUsers,omitempty"` + + // A count of an account's virtual guest objects that are hosted on a user provisioned hypervisor. + StandardPoolVirtualGuestCount *uint `json:"standardPoolVirtualGuestCount,omitempty" xmlrpc:"standardPoolVirtualGuestCount,omitempty"` + + // An account's virtual guest objects that are hosted on a user provisioned hypervisor. + StandardPoolVirtualGuests []Virtual_Guest `json:"standardPoolVirtualGuests,omitempty" xmlrpc:"standardPoolVirtualGuests,omitempty"` + + // A two-letter abbreviation of the state in the mailing address belonging to an account. If an account does not reside in a province then this is typically blank. + State *string `json:"state,omitempty" xmlrpc:"state,omitempty"` + + // The date of an account's last status change. + StatusDate *Time `json:"statusDate,omitempty" xmlrpc:"statusDate,omitempty"` + + // A count of all network subnets associated with an account. + SubnetCount *uint `json:"subnetCount,omitempty" xmlrpc:"subnetCount,omitempty"` + + // A count of + SubnetRegistrationCount *uint `json:"subnetRegistrationCount,omitempty" xmlrpc:"subnetRegistrationCount,omitempty"` + + // A count of + SubnetRegistrationDetailCount *uint `json:"subnetRegistrationDetailCount,omitempty" xmlrpc:"subnetRegistrationDetailCount,omitempty"` + + // no documentation yet + SubnetRegistrationDetails []Account_Regional_Registry_Detail `json:"subnetRegistrationDetails,omitempty" xmlrpc:"subnetRegistrationDetails,omitempty"` + + // no documentation yet + SubnetRegistrations []Network_Subnet_Registration `json:"subnetRegistrations,omitempty" xmlrpc:"subnetRegistrations,omitempty"` + + // All network subnets associated with an account. + Subnets []Network_Subnet `json:"subnets,omitempty" xmlrpc:"subnets,omitempty"` + + // A count of the SoftLayer employees that an account is assigned to. + SupportRepresentativeCount *uint `json:"supportRepresentativeCount,omitempty" xmlrpc:"supportRepresentativeCount,omitempty"` + + // The SoftLayer employees that an account is assigned to. + SupportRepresentatives []User_Employee `json:"supportRepresentatives,omitempty" xmlrpc:"supportRepresentatives,omitempty"` + + // A count of the active support subscriptions for this account. + SupportSubscriptionCount *uint `json:"supportSubscriptionCount,omitempty" xmlrpc:"supportSubscriptionCount,omitempty"` + + // The active support subscriptions for this account. + SupportSubscriptions []Billing_Item `json:"supportSubscriptions,omitempty" xmlrpc:"supportSubscriptions,omitempty"` + + // no documentation yet + SupportTier *string `json:"supportTier,omitempty" xmlrpc:"supportTier,omitempty"` + + // A flag indicating to suppress invoices. + SuppressInvoicesFlag *bool `json:"suppressInvoicesFlag,omitempty" xmlrpc:"suppressInvoicesFlag,omitempty"` + + // A count of + TagCount *uint `json:"tagCount,omitempty" xmlrpc:"tagCount,omitempty"` + + // no documentation yet + Tags []Tag `json:"tags,omitempty" xmlrpc:"tags,omitempty"` + + // A count of an account's associated tickets. + TicketCount *uint `json:"ticketCount,omitempty" xmlrpc:"ticketCount,omitempty"` + + // An account's associated tickets. + Tickets []Ticket `json:"tickets,omitempty" xmlrpc:"tickets,omitempty"` + + // Tickets closed within the last 72 hours or last 10 tickets, whichever is less, associated with an account. + TicketsClosedInTheLastThreeDays []Ticket `json:"ticketsClosedInTheLastThreeDays,omitempty" xmlrpc:"ticketsClosedInTheLastThreeDays,omitempty"` + + // A count of tickets closed within the last 72 hours or last 10 tickets, whichever is less, associated with an account. + TicketsClosedInTheLastThreeDaysCount *uint `json:"ticketsClosedInTheLastThreeDaysCount,omitempty" xmlrpc:"ticketsClosedInTheLastThreeDaysCount,omitempty"` + + // Tickets closed today associated with an account. + TicketsClosedToday []Ticket `json:"ticketsClosedToday,omitempty" xmlrpc:"ticketsClosedToday,omitempty"` + + // A count of tickets closed today associated with an account. + TicketsClosedTodayCount *uint `json:"ticketsClosedTodayCount,omitempty" xmlrpc:"ticketsClosedTodayCount,omitempty"` + + // A count of an account's associated Transcode account. + TranscodeAccountCount *uint `json:"transcodeAccountCount,omitempty" xmlrpc:"transcodeAccountCount,omitempty"` + + // An account's associated Transcode account. + TranscodeAccounts []Network_Media_Transcode_Account `json:"transcodeAccounts,omitempty" xmlrpc:"transcodeAccounts,omitempty"` + + // A count of an account's associated upgrade requests. + UpgradeRequestCount *uint `json:"upgradeRequestCount,omitempty" xmlrpc:"upgradeRequestCount,omitempty"` + + // An account's associated upgrade requests. + UpgradeRequests []Product_Upgrade_Request `json:"upgradeRequests,omitempty" xmlrpc:"upgradeRequests,omitempty"` + + // A count of an account's portal users. + UserCount *uint `json:"userCount,omitempty" xmlrpc:"userCount,omitempty"` + + // An account's portal users. + Users []User_Customer `json:"users,omitempty" xmlrpc:"users,omitempty"` + + // A count of stored security certificates that are not expired (ie. SSL) + ValidSecurityCertificateCount *uint `json:"validSecurityCertificateCount,omitempty" xmlrpc:"validSecurityCertificateCount,omitempty"` + + // Stored security certificates that are not expired (ie. SSL) + ValidSecurityCertificates []Security_Certificate `json:"validSecurityCertificates,omitempty" xmlrpc:"validSecurityCertificates,omitempty"` + + // Return 0 if vpn updates are currently in progress on this account otherwise 1. + VdrUpdatesInProgressFlag *bool `json:"vdrUpdatesInProgressFlag,omitempty" xmlrpc:"vdrUpdatesInProgressFlag,omitempty"` + + // A count of the bandwidth pooling for this account. + VirtualDedicatedRackCount *uint `json:"virtualDedicatedRackCount,omitempty" xmlrpc:"virtualDedicatedRackCount,omitempty"` + + // The bandwidth pooling for this account. + VirtualDedicatedRacks []Network_Bandwidth_Version1_Allotment `json:"virtualDedicatedRacks,omitempty" xmlrpc:"virtualDedicatedRacks,omitempty"` + + // A count of an account's associated virtual server virtual disk images. + VirtualDiskImageCount *uint `json:"virtualDiskImageCount,omitempty" xmlrpc:"virtualDiskImageCount,omitempty"` + + // An account's associated virtual server virtual disk images. + VirtualDiskImages []Virtual_Disk_Image `json:"virtualDiskImages,omitempty" xmlrpc:"virtualDiskImages,omitempty"` + + // A count of an account's associated virtual guest objects. + VirtualGuestCount *uint `json:"virtualGuestCount,omitempty" xmlrpc:"virtualGuestCount,omitempty"` + + // An account's associated virtual guest objects. + VirtualGuests []Virtual_Guest `json:"virtualGuests,omitempty" xmlrpc:"virtualGuests,omitempty"` + + // An account's associated virtual guest objects currently over bandwidth allocation. + VirtualGuestsOverBandwidthAllocation []Virtual_Guest `json:"virtualGuestsOverBandwidthAllocation,omitempty" xmlrpc:"virtualGuestsOverBandwidthAllocation,omitempty"` + + // A count of an account's associated virtual guest objects currently over bandwidth allocation. + VirtualGuestsOverBandwidthAllocationCount *uint `json:"virtualGuestsOverBandwidthAllocationCount,omitempty" xmlrpc:"virtualGuestsOverBandwidthAllocationCount,omitempty"` + + // An account's associated virtual guest objects currently over bandwidth allocation. + VirtualGuestsProjectedOverBandwidthAllocation []Virtual_Guest `json:"virtualGuestsProjectedOverBandwidthAllocation,omitempty" xmlrpc:"virtualGuestsProjectedOverBandwidthAllocation,omitempty"` + + // A count of an account's associated virtual guest objects currently over bandwidth allocation. + VirtualGuestsProjectedOverBandwidthAllocationCount *uint `json:"virtualGuestsProjectedOverBandwidthAllocationCount,omitempty" xmlrpc:"virtualGuestsProjectedOverBandwidthAllocationCount,omitempty"` + + // All virtual guests associated with an account that has the cPanel web hosting control panel installed. + VirtualGuestsWithCpanel []Virtual_Guest `json:"virtualGuestsWithCpanel,omitempty" xmlrpc:"virtualGuestsWithCpanel,omitempty"` + + // A count of all virtual guests associated with an account that has the cPanel web hosting control panel installed. + VirtualGuestsWithCpanelCount *uint `json:"virtualGuestsWithCpanelCount,omitempty" xmlrpc:"virtualGuestsWithCpanelCount,omitempty"` + + // All virtual guests associated with an account that have McAfee Secure software components. + VirtualGuestsWithMcafee []Virtual_Guest `json:"virtualGuestsWithMcafee,omitempty" xmlrpc:"virtualGuestsWithMcafee,omitempty"` + + // All virtual guests associated with an account that have McAfee Secure AntiVirus for Redhat software components. + VirtualGuestsWithMcafeeAntivirusRedhat []Virtual_Guest `json:"virtualGuestsWithMcafeeAntivirusRedhat,omitempty" xmlrpc:"virtualGuestsWithMcafeeAntivirusRedhat,omitempty"` + + // A count of all virtual guests associated with an account that have McAfee Secure AntiVirus for Redhat software components. + VirtualGuestsWithMcafeeAntivirusRedhatCount *uint `json:"virtualGuestsWithMcafeeAntivirusRedhatCount,omitempty" xmlrpc:"virtualGuestsWithMcafeeAntivirusRedhatCount,omitempty"` + + // A count of all virtual guests associated with an account that has McAfee Secure AntiVirus for Windows software components. + VirtualGuestsWithMcafeeAntivirusWindowCount *uint `json:"virtualGuestsWithMcafeeAntivirusWindowCount,omitempty" xmlrpc:"virtualGuestsWithMcafeeAntivirusWindowCount,omitempty"` + + // All virtual guests associated with an account that has McAfee Secure AntiVirus for Windows software components. + VirtualGuestsWithMcafeeAntivirusWindows []Virtual_Guest `json:"virtualGuestsWithMcafeeAntivirusWindows,omitempty" xmlrpc:"virtualGuestsWithMcafeeAntivirusWindows,omitempty"` + + // A count of all virtual guests associated with an account that have McAfee Secure software components. + VirtualGuestsWithMcafeeCount *uint `json:"virtualGuestsWithMcafeeCount,omitempty" xmlrpc:"virtualGuestsWithMcafeeCount,omitempty"` + + // All virtual guests associated with an account that has McAfee Secure Intrusion Detection System software components. + VirtualGuestsWithMcafeeIntrusionDetectionSystem []Virtual_Guest `json:"virtualGuestsWithMcafeeIntrusionDetectionSystem,omitempty" xmlrpc:"virtualGuestsWithMcafeeIntrusionDetectionSystem,omitempty"` + + // A count of all virtual guests associated with an account that has McAfee Secure Intrusion Detection System software components. + VirtualGuestsWithMcafeeIntrusionDetectionSystemCount *uint `json:"virtualGuestsWithMcafeeIntrusionDetectionSystemCount,omitempty" xmlrpc:"virtualGuestsWithMcafeeIntrusionDetectionSystemCount,omitempty"` + + // All virtual guests associated with an account that has the Plesk web hosting control panel installed. + VirtualGuestsWithPlesk []Virtual_Guest `json:"virtualGuestsWithPlesk,omitempty" xmlrpc:"virtualGuestsWithPlesk,omitempty"` + + // A count of all virtual guests associated with an account that has the Plesk web hosting control panel installed. + VirtualGuestsWithPleskCount *uint `json:"virtualGuestsWithPleskCount,omitempty" xmlrpc:"virtualGuestsWithPleskCount,omitempty"` + + // All virtual guests associated with an account that have the QuantaStor storage system installed. + VirtualGuestsWithQuantastor []Virtual_Guest `json:"virtualGuestsWithQuantastor,omitempty" xmlrpc:"virtualGuestsWithQuantastor,omitempty"` + + // A count of all virtual guests associated with an account that have the QuantaStor storage system installed. + VirtualGuestsWithQuantastorCount *uint `json:"virtualGuestsWithQuantastorCount,omitempty" xmlrpc:"virtualGuestsWithQuantastorCount,omitempty"` + + // All virtual guests associated with an account that has the Urchin web traffic analytics package installed. + VirtualGuestsWithUrchin []Virtual_Guest `json:"virtualGuestsWithUrchin,omitempty" xmlrpc:"virtualGuestsWithUrchin,omitempty"` + + // A count of all virtual guests associated with an account that has the Urchin web traffic analytics package installed. + VirtualGuestsWithUrchinCount *uint `json:"virtualGuestsWithUrchinCount,omitempty" xmlrpc:"virtualGuestsWithUrchinCount,omitempty"` + + // The bandwidth pooling for this account. + VirtualPrivateRack *Network_Bandwidth_Version1_Allotment `json:"virtualPrivateRack,omitempty" xmlrpc:"virtualPrivateRack,omitempty"` + + // An account's associated virtual server archived storage repositories. + VirtualStorageArchiveRepositories []Virtual_Storage_Repository `json:"virtualStorageArchiveRepositories,omitempty" xmlrpc:"virtualStorageArchiveRepositories,omitempty"` + + // A count of an account's associated virtual server archived storage repositories. + VirtualStorageArchiveRepositoryCount *uint `json:"virtualStorageArchiveRepositoryCount,omitempty" xmlrpc:"virtualStorageArchiveRepositoryCount,omitempty"` + + // An account's associated virtual server public storage repositories. + VirtualStoragePublicRepositories []Virtual_Storage_Repository `json:"virtualStoragePublicRepositories,omitempty" xmlrpc:"virtualStoragePublicRepositories,omitempty"` + + // A count of an account's associated virtual server public storage repositories. + VirtualStoragePublicRepositoryCount *uint `json:"virtualStoragePublicRepositoryCount,omitempty" xmlrpc:"virtualStoragePublicRepositoryCount,omitempty"` + + // A count of an account's associated VPC configured virtual guest objects. + VpcVirtualGuestCount *uint `json:"vpcVirtualGuestCount,omitempty" xmlrpc:"vpcVirtualGuestCount,omitempty"` + + // An account's associated VPC configured virtual guest objects. + VpcVirtualGuests []Virtual_Guest `json:"vpcVirtualGuests,omitempty" xmlrpc:"vpcVirtualGuests,omitempty"` +} + +// An unfortunate facet of the hosting business is the necessity of with legal and network abuse inquiries. As these types of inquiries frequently contain sensitive information SoftLayer keeps a separate account contact email address for direct contact about legal and abuse matters, modeled by the SoftLayer_Account_AbuseEmail data type. SoftLayer will typically email an account's abuse email addresses in these types of cases, and an email is automatically sent to an account's abuse email addresses when a legal or abuse ticket is created or updated. +type Account_AbuseEmail struct { + Entity + + // The account associated with an abuse email address. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // A valid email address. + Email *string `json:"email,omitempty" xmlrpc:"email,omitempty"` +} + +// The SoftLayer_Account_Address data type contains information on an address associated with a SoftLayer account. +type Account_Address struct { + Entity + + // The account to which this address belongs. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // no documentation yet + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // Line 1 of the address (normally the street address). + Address1 *string `json:"address1,omitempty" xmlrpc:"address1,omitempty"` + + // Line 2 of the address. + Address2 *string `json:"address2,omitempty" xmlrpc:"address2,omitempty"` + + // The city of the address. + City *string `json:"city,omitempty" xmlrpc:"city,omitempty"` + + // The contact name (person, office) of the address. + ContactName *string `json:"contactName,omitempty" xmlrpc:"contactName,omitempty"` + + // The country of the address. + Country *string `json:"country,omitempty" xmlrpc:"country,omitempty"` + + // The customer user who created this address. + CreateUser *User_Customer `json:"createUser,omitempty" xmlrpc:"createUser,omitempty"` + + // The description of the address. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The unique id of the address. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Flag to show whether the address is active. + IsActive *int `json:"isActive,omitempty" xmlrpc:"isActive,omitempty"` + + // The location of this address. + Location *Location `json:"location,omitempty" xmlrpc:"location,omitempty"` + + // The location id of the address. + LocationId *int `json:"locationId,omitempty" xmlrpc:"locationId,omitempty"` + + // The employee who last modified this address. + ModifyEmployee *User_Employee `json:"modifyEmployee,omitempty" xmlrpc:"modifyEmployee,omitempty"` + + // The customer user who last modified this address. + ModifyUser *User_Customer `json:"modifyUser,omitempty" xmlrpc:"modifyUser,omitempty"` + + // The postal (zip) code of the address. + PostalCode *string `json:"postalCode,omitempty" xmlrpc:"postalCode,omitempty"` + + // The state of the address. + State *string `json:"state,omitempty" xmlrpc:"state,omitempty"` + + // An account address' type. + Type *Account_Address_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// no documentation yet +type Account_Address_Type struct { + Entity + + // DEPRECATED + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// This service allows for a unique identifier to be associated to an existing customer account. +type Account_Affiliation struct { + Entity + + // The account that an affiliation belongs to. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // A customer account's internal identifier. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // An affiliate identifier associated with the customer account. + AffiliateId *string `json:"affiliateId,omitempty" xmlrpc:"affiliateId,omitempty"` + + // The date an account affiliation was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A customer affiliation internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The date an account affiliation was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` +} + +// no documentation yet +type Account_Agreement struct { + Entity + + // no documentation yet + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The type of agreement. + AgreementType *Account_Agreement_Type `json:"agreementType,omitempty" xmlrpc:"agreementType,omitempty"` + + // The type of agreement identifier. + AgreementTypeId *int `json:"agreementTypeId,omitempty" xmlrpc:"agreementTypeId,omitempty"` + + // A count of the files attached to an agreement. + AttachedBillingAgreementFileCount *uint `json:"attachedBillingAgreementFileCount,omitempty" xmlrpc:"attachedBillingAgreementFileCount,omitempty"` + + // The files attached to an agreement. + AttachedBillingAgreementFiles []Account_MasterServiceAgreement `json:"attachedBillingAgreementFiles,omitempty" xmlrpc:"attachedBillingAgreementFiles,omitempty"` + + // no documentation yet + AutoRenew *int `json:"autoRenew,omitempty" xmlrpc:"autoRenew,omitempty"` + + // A count of the billing items associated with an agreement. + BillingItemCount *uint `json:"billingItemCount,omitempty" xmlrpc:"billingItemCount,omitempty"` + + // The billing items associated with an agreement. + BillingItems []Billing_Item `json:"billingItems,omitempty" xmlrpc:"billingItems,omitempty"` + + // no documentation yet + CancellationFee *int `json:"cancellationFee,omitempty" xmlrpc:"cancellationFee,omitempty"` + + // The date an agreement was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The duration in months of an agreement. + DurationMonths *int `json:"durationMonths,omitempty" xmlrpc:"durationMonths,omitempty"` + + // The end date of an agreement. + EndDate *Time `json:"endDate,omitempty" xmlrpc:"endDate,omitempty"` + + // An agreement's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The effective start date of an agreement. + StartDate *Time `json:"startDate,omitempty" xmlrpc:"startDate,omitempty"` + + // The status of the agreement. + Status *Account_Agreement_Status `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // The status identifier for an agreement. + StatusId *int `json:"statusId,omitempty" xmlrpc:"statusId,omitempty"` + + // The title of an agreement. + Title *string `json:"title,omitempty" xmlrpc:"title,omitempty"` + + // A count of the top level billing item associated with an agreement. + TopLevelBillingItemCount *uint `json:"topLevelBillingItemCount,omitempty" xmlrpc:"topLevelBillingItemCount,omitempty"` + + // The top level billing item associated with an agreement. + TopLevelBillingItems []Billing_Item `json:"topLevelBillingItems,omitempty" xmlrpc:"topLevelBillingItems,omitempty"` +} + +// no documentation yet +type Account_Agreement_Status struct { + Entity + + // The name of the agreement status. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Account_Agreement_Type struct { + Entity + + // The name of the agreement type. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// A SoftLayer_Account_Attachment_Employee models an assignment of a single [[SoftLayer_User_Employee|employee]] with a single [[SoftLayer_Account|account]] +type Account_Attachment_Employee struct { + Entity + + // A [[SoftLayer_Account|account]] that is assigned to a [[SoftLayer_User_Employee|employee]]. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // A [[SoftLayer_User_Employee|employee]] that is assigned to a [[SoftLayer_Account|account]]. + Employee *User_Employee `json:"employee,omitempty" xmlrpc:"employee,omitempty"` + + // A [[SoftLayer_User_Employee|employee]] that is assigned to a [[SoftLayer_Account|account]]. + EmployeeRole *Account_Attachment_Employee_Role `json:"employeeRole,omitempty" xmlrpc:"employeeRole,omitempty"` + + // Role identifier. + RoleId *int `json:"roleId,omitempty" xmlrpc:"roleId,omitempty"` +} + +// no documentation yet +type Account_Attachment_Employee_Role struct { + Entity + + // no documentation yet + Keyname *string `json:"keyname,omitempty" xmlrpc:"keyname,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// Many SoftLayer customer accounts have individual attributes assigned to them that describe features or special features for that account, such as special pricing, account statuses, and ordering instructions. The SoftLayer_Account_Attribute data type contains information relating to a single SoftLayer_Account attribute. +type Account_Attribute struct { + Entity + + // The SoftLayer customer account that has an attribute. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The type of attribute assigned to a SoftLayer customer account. + AccountAttributeType *Account_Attribute_Type `json:"accountAttributeType,omitempty" xmlrpc:"accountAttributeType,omitempty"` + + // The internal identifier of the type of attribute that a SoftLayer customer account attribute belongs to. + AccountAttributeTypeId *int `json:"accountAttributeTypeId,omitempty" xmlrpc:"accountAttributeTypeId,omitempty"` + + // The internal identifier of the SoftLayer customer account that is assigned an account attribute. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // A SoftLayer customer account attribute's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A SoftLayer account attribute's value. + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// SoftLayer_Account_Attribute_Type models the type of attribute that can be assigned to a SoftLayer customer account. +type Account_Attribute_Type struct { + Entity + + // A brief description of a SoftLayer account attribute type. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // A SoftLayer account attribute type's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A SoftLayer account attribute type's key name. This is typically a shorter version of an attribute type's name. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // A SoftLayer account attribute type's name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// Account authentication has many different settings that can be set. This class allows the customer or employee to set these settigns. +type Account_Authentication_Attribute struct { + Entity + + // The SoftLayer customer account. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The internal identifier of the SoftLayer customer account that is assigned an account authenction attribute. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // The SoftLayer account authentication that has an attribute. + AuthenticationRecord *Account_Authentication_Saml `json:"authenticationRecord,omitempty" xmlrpc:"authenticationRecord,omitempty"` + + // A SoftLayer account authenction attribute's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The type of attribute assigned to a SoftLayer account authentication. + Type *Account_Authentication_Attribute_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // The internal identifier of the type of attribute that a SoftLayer account authenction attribute belongs to. + TypeId *int `json:"typeId,omitempty" xmlrpc:"typeId,omitempty"` + + // A SoftLayer account authenction attribute's value. + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// SoftLayer_Account_Authentication_Attribute_Type models the type of attribute that can be assigned to a SoftLayer customer account authentication. +type Account_Authentication_Attribute_Type struct { + Entity + + // A brief description of a SoftLayer account authentication attribute type. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // A SoftLayer account authentication attribute type's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A SoftLayer account authentication attribute type's key name. This is typically a shorter version of an attribute type's name. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // A SoftLayer account authentication attribute type's name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // An example of what you can put in as your value. + ValueExample *string `json:"valueExample,omitempty" xmlrpc:"valueExample,omitempty"` +} + +// no documentation yet +type Account_Authentication_OpenIdConnect_Option struct { + Entity + + // no documentation yet + Key *string `json:"key,omitempty" xmlrpc:"key,omitempty"` + + // no documentation yet + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// no documentation yet +type Account_Authentication_OpenIdConnect_RegistrationInformation struct { + Entity + + // no documentation yet + ExistingBlueIdFlag *bool `json:"existingBlueIdFlag,omitempty" xmlrpc:"existingBlueIdFlag,omitempty"` + + // no documentation yet + FederatedEmailDomainFlag *bool `json:"federatedEmailDomainFlag,omitempty" xmlrpc:"federatedEmailDomainFlag,omitempty"` + + // no documentation yet + User *User_Customer `json:"user,omitempty" xmlrpc:"user,omitempty"` +} + +// no documentation yet +type Account_Authentication_Saml struct { + Entity + + // The account associated with this saml configuration. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The saml account id. + AccountId *string `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // A count of the saml attribute values for a SoftLayer customer account. + AttributeCount *uint `json:"attributeCount,omitempty" xmlrpc:"attributeCount,omitempty"` + + // The saml attribute values for a SoftLayer customer account. + Attributes []Account_Authentication_Attribute `json:"attributes,omitempty" xmlrpc:"attributes,omitempty"` + + // The identity provider x509 certificate. + Certificate *string `json:"certificate,omitempty" xmlrpc:"certificate,omitempty"` + + // The identity provider x509 certificate fingerprint. + CertificateFingerprint *string `json:"certificateFingerprint,omitempty" xmlrpc:"certificateFingerprint,omitempty"` + + // The identity provider entity ID. + EntityId *string `json:"entityId,omitempty" xmlrpc:"entityId,omitempty"` + + // The saml internal identifying number. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The service provider x509 certificate. + ServiceProviderCertificate *string `json:"serviceProviderCertificate,omitempty" xmlrpc:"serviceProviderCertificate,omitempty"` + + // The service provider entity IDs. + ServiceProviderEntityId *string `json:"serviceProviderEntityId,omitempty" xmlrpc:"serviceProviderEntityId,omitempty"` + + // The service provider public key. + ServiceProviderPublicKey *string `json:"serviceProviderPublicKey,omitempty" xmlrpc:"serviceProviderPublicKey,omitempty"` + + // The service provider signle logout encoding. + ServiceProviderSingleLogoutEncoding *string `json:"serviceProviderSingleLogoutEncoding,omitempty" xmlrpc:"serviceProviderSingleLogoutEncoding,omitempty"` + + // The service provider signle logout address. + ServiceProviderSingleLogoutUrl *string `json:"serviceProviderSingleLogoutUrl,omitempty" xmlrpc:"serviceProviderSingleLogoutUrl,omitempty"` + + // The service provider signle sign on encoding. + ServiceProviderSingleSignOnEncoding *string `json:"serviceProviderSingleSignOnEncoding,omitempty" xmlrpc:"serviceProviderSingleSignOnEncoding,omitempty"` + + // The service provider signle sign on address. + ServiceProviderSingleSignOnUrl *string `json:"serviceProviderSingleSignOnUrl,omitempty" xmlrpc:"serviceProviderSingleSignOnUrl,omitempty"` + + // The identity provider single logout encoding. + SingleLogoutEncoding *string `json:"singleLogoutEncoding,omitempty" xmlrpc:"singleLogoutEncoding,omitempty"` + + // The identity provider sigle logout address. + SingleLogoutUrl *string `json:"singleLogoutUrl,omitempty" xmlrpc:"singleLogoutUrl,omitempty"` + + // The identity provider single sign on encoding. + SingleSignOnEncoding *string `json:"singleSignOnEncoding,omitempty" xmlrpc:"singleSignOnEncoding,omitempty"` + + // The identity provider signle sign on address. + SingleSignOnUrl *string `json:"singleSignOnUrl,omitempty" xmlrpc:"singleSignOnUrl,omitempty"` +} + +// Contains business partner details associated with an account. Country Enterprise Identifier (CEID), Channel ID, Segment ID and Reseller Level. +type Account_Business_Partner struct { + Entity + + // Account associated with the business partner data + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // Channel indicator used to categorize business partner revenue. + Channel *Business_Partner_Channel `json:"channel,omitempty" xmlrpc:"channel,omitempty"` + + // Account business partner channel identifier + ChannelId *int `json:"channelId,omitempty" xmlrpc:"channelId,omitempty"` + + // Account business partner country enterprise code + CountryEnterpriseCode *string `json:"countryEnterpriseCode,omitempty" xmlrpc:"countryEnterpriseCode,omitempty"` + + // Reseller level of an account business partner + ResellerLevel *int `json:"resellerLevel,omitempty" xmlrpc:"resellerLevel,omitempty"` + + // Segment indicator used to categorize business partner revenue. + Segment *Business_Partner_Segment `json:"segment,omitempty" xmlrpc:"segment,omitempty"` + + // Account business partner segment identifier + SegmentId *int `json:"segmentId,omitempty" xmlrpc:"segmentId,omitempty"` +} + +// no documentation yet +type Account_Classification_Group_Type struct { + Entity + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` +} + +// no documentation yet +type Account_Contact struct { + Entity + + // no documentation yet + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // no documentation yet + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // no documentation yet + Address1 *string `json:"address1,omitempty" xmlrpc:"address1,omitempty"` + + // no documentation yet + Address2 *string `json:"address2,omitempty" xmlrpc:"address2,omitempty"` + + // no documentation yet + AlternatePhone *string `json:"alternatePhone,omitempty" xmlrpc:"alternatePhone,omitempty"` + + // no documentation yet + City *string `json:"city,omitempty" xmlrpc:"city,omitempty"` + + // no documentation yet + CompanyName *string `json:"companyName,omitempty" xmlrpc:"companyName,omitempty"` + + // no documentation yet + Country *string `json:"country,omitempty" xmlrpc:"country,omitempty"` + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + Email *string `json:"email,omitempty" xmlrpc:"email,omitempty"` + + // no documentation yet + FaxPhone *string `json:"faxPhone,omitempty" xmlrpc:"faxPhone,omitempty"` + + // no documentation yet + FirstName *string `json:"firstName,omitempty" xmlrpc:"firstName,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + JobTitle *string `json:"jobTitle,omitempty" xmlrpc:"jobTitle,omitempty"` + + // no documentation yet + LastName *string `json:"lastName,omitempty" xmlrpc:"lastName,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // no documentation yet + OfficePhone *string `json:"officePhone,omitempty" xmlrpc:"officePhone,omitempty"` + + // no documentation yet + PostalCode *string `json:"postalCode,omitempty" xmlrpc:"postalCode,omitempty"` + + // no documentation yet + ProfileName *string `json:"profileName,omitempty" xmlrpc:"profileName,omitempty"` + + // no documentation yet + State *string `json:"state,omitempty" xmlrpc:"state,omitempty"` + + // no documentation yet + Type *Account_Contact_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // no documentation yet + TypeId *int `json:"typeId,omitempty" xmlrpc:"typeId,omitempty"` + + // no documentation yet + Url *string `json:"url,omitempty" xmlrpc:"url,omitempty"` +} + +// no documentation yet +type Account_Contact_Type struct { + Entity + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Account_External_Setup struct { + Entity + + // The SoftLayer customer account the request belongs to. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // The currency requested after the billing switch. + CurrencyId *int `json:"currencyId,omitempty" xmlrpc:"currencyId,omitempty"` + + // The unique identifier for this setup request. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The external system that will handle billing. + ServiceProviderId *int `json:"serviceProviderId,omitempty" xmlrpc:"serviceProviderId,omitempty"` + + // The status of the account setup request. + StatusCode *string `json:"statusCode,omitempty" xmlrpc:"statusCode,omitempty"` + + // no documentation yet + TypeCode *string `json:"typeCode,omitempty" xmlrpc:"typeCode,omitempty"` + + // The transaction information related to verifying the customer credit card. + VerifyCardTransaction *Billing_Payment_Card_Transaction `json:"verifyCardTransaction,omitempty" xmlrpc:"verifyCardTransaction,omitempty"` + + // The related credit card transaction record for card verification. + VerifyCardTransactionId *int `json:"verifyCardTransactionId,omitempty" xmlrpc:"verifyCardTransactionId,omitempty"` +} + +// no documentation yet +type Account_Historical_Report struct { + Entity +} + +// no documentation yet +type Account_Internal_Ibm struct { + Entity +} + +// no documentation yet +type Account_Link struct { + Entity + + // no documentation yet + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // no documentation yet + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + DestinationAccountAlphanumericId *string `json:"destinationAccountAlphanumericId,omitempty" xmlrpc:"destinationAccountAlphanumericId,omitempty"` + + // no documentation yet + DestinationAccountId *int `json:"destinationAccountId,omitempty" xmlrpc:"destinationAccountId,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + ServiceProvider *Service_Provider `json:"serviceProvider,omitempty" xmlrpc:"serviceProvider,omitempty"` + + // no documentation yet + ServiceProviderId *int `json:"serviceProviderId,omitempty" xmlrpc:"serviceProviderId,omitempty"` +} + +// no documentation yet +type Account_Link_Bluemix struct { + Account_Link +} + +// no documentation yet +type Account_Link_OpenStack struct { + Account_Link + + // Pseudonym for destinationAccountAlphanumericId + DomainId *string `json:"domainId,omitempty" xmlrpc:"domainId,omitempty"` +} + +// OpenStack domain creation details +type Account_Link_OpenStack_DomainCreationDetails struct { + Entity + + // Id for the domain this user was added to. + DomainId *string `json:"domainId,omitempty" xmlrpc:"domainId,omitempty"` + + // Id for the user given the Cloud Admin role for this domain. + UserId *string `json:"userId,omitempty" xmlrpc:"userId,omitempty"` + + // Name for the user given the Cloud Admin role for this domain. + UserName *string `json:"userName,omitempty" xmlrpc:"userName,omitempty"` +} + +// Details required for OpenStack link request +type Account_Link_OpenStack_LinkRequest struct { + Entity + + // Optional password + DesiredPassword *string `json:"desiredPassword,omitempty" xmlrpc:"desiredPassword,omitempty"` + + // Optional projectName + DesiredProjectName *string `json:"desiredProjectName,omitempty" xmlrpc:"desiredProjectName,omitempty"` + + // Required username + DesiredUsername *string `json:"desiredUsername,omitempty" xmlrpc:"desiredUsername,omitempty"` +} + +// OpenStack project creation details +type Account_Link_OpenStack_ProjectCreationDetails struct { + Entity + + // Id for the domain this project was added to. + DomainId *string `json:"domainId,omitempty" xmlrpc:"domainId,omitempty"` + + // Id for this project. + ProjectId *string `json:"projectId,omitempty" xmlrpc:"projectId,omitempty"` + + // Name for this project. + ProjectName *string `json:"projectName,omitempty" xmlrpc:"projectName,omitempty"` + + // Id for the user given the Project Admin role for this project. + UserId *string `json:"userId,omitempty" xmlrpc:"userId,omitempty"` + + // Name for the user given the Project Admin role for this project. + UserName *string `json:"userName,omitempty" xmlrpc:"userName,omitempty"` +} + +// OpenStack project details +type Account_Link_OpenStack_ProjectDetails struct { + Entity + + // Id for this project. + ProjectId *string `json:"projectId,omitempty" xmlrpc:"projectId,omitempty"` + + // Name for this project. + ProjectName *string `json:"projectName,omitempty" xmlrpc:"projectName,omitempty"` +} + +// no documentation yet +type Account_Link_Vendor struct { + Entity + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Account_Lockdown_Request data type holds information on API requests from brand customers. +type Account_Lockdown_Request struct { + Entity + + // Account ID associated with this lockdown request. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // Type of request. + Action *string `json:"action,omitempty" xmlrpc:"action,omitempty"` + + // Timestamp when the lockdown request was initially made. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // ID of this lockdown request. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Timestamp when the lockdown request was modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // Status of the lockdown request denoting whether it's been completed. + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` +} + +// no documentation yet +type Account_MasterServiceAgreement struct { + Entity + + // no documentation yet + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // no documentation yet + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // no documentation yet + Guid *string `json:"guid,omitempty" xmlrpc:"guid,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Account_Media data type contains information on a single piece of media associated with a Data Transfer Service request. +type Account_Media struct { + Entity + + // The account to which the media belongs. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The customer user who created the media object. + CreateUser *User_Customer `json:"createUser,omitempty" xmlrpc:"createUser,omitempty"` + + // The datacenter where the media resides. + Datacenter *Location `json:"datacenter,omitempty" xmlrpc:"datacenter,omitempty"` + + // The description of the media. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The unique id of the media. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The employee who last modified the media. + ModifyEmployee *User_Employee `json:"modifyEmployee,omitempty" xmlrpc:"modifyEmployee,omitempty"` + + // The customer user who last modified the media. + ModifyUser *User_Customer `json:"modifyUser,omitempty" xmlrpc:"modifyUser,omitempty"` + + // The request to which the media belongs. + Request *Account_Media_Data_Transfer_Request `json:"request,omitempty" xmlrpc:"request,omitempty"` + + // The request id of the media. + RequestId *int `json:"requestId,omitempty" xmlrpc:"requestId,omitempty"` + + // The manufacturer's serial number of the media. + SerialNumber *string `json:"serialNumber,omitempty" xmlrpc:"serialNumber,omitempty"` + + // The media's type. + Type *Account_Media_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // The type id of the media. + TypeId *int `json:"typeId,omitempty" xmlrpc:"typeId,omitempty"` + + // A guest's associated EVault network storage service account. + Volume *Network_Storage `json:"volume,omitempty" xmlrpc:"volume,omitempty"` +} + +// The SoftLayer_Account_Media_Data_Transfer_Request data type contains information on a single Data Transfer Service request. Creation of these requests is limited to SoftLayer customers through the SoftLayer Customer Portal. +type Account_Media_Data_Transfer_Request struct { + Entity + + // The account to which the request belongs. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The account id of the request. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // A count of the active tickets that are attached to the data transfer request. + ActiveTicketCount *uint `json:"activeTicketCount,omitempty" xmlrpc:"activeTicketCount,omitempty"` + + // The active tickets that are attached to the data transfer request. + ActiveTickets []Ticket `json:"activeTickets,omitempty" xmlrpc:"activeTickets,omitempty"` + + // The billing item for the original request. + BillingItem *Billing_Item `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // The customer user who created the request. + CreateUser *User_Customer `json:"createUser,omitempty" xmlrpc:"createUser,omitempty"` + + // The create user id of the request. + CreateUserId *int `json:"createUserId,omitempty" xmlrpc:"createUserId,omitempty"` + + // The end date of the request. + EndDate *Time `json:"endDate,omitempty" xmlrpc:"endDate,omitempty"` + + // The unique id of the request. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The media of the request. + Media *Account_Media `json:"media,omitempty" xmlrpc:"media,omitempty"` + + // The employee who last modified the request. + ModifyEmployee *User_Employee `json:"modifyEmployee,omitempty" xmlrpc:"modifyEmployee,omitempty"` + + // The customer user who last modified the request. + ModifyUser *User_Customer `json:"modifyUser,omitempty" xmlrpc:"modifyUser,omitempty"` + + // The modify user id of the request. + ModifyUserId *int `json:"modifyUserId,omitempty" xmlrpc:"modifyUserId,omitempty"` + + // A count of the shipments of the request. + ShipmentCount *uint `json:"shipmentCount,omitempty" xmlrpc:"shipmentCount,omitempty"` + + // The shipments of the request. + Shipments []Account_Shipment `json:"shipments,omitempty" xmlrpc:"shipments,omitempty"` + + // The start date of the request. + StartDate *Time `json:"startDate,omitempty" xmlrpc:"startDate,omitempty"` + + // The status of the request. + Status *Account_Media_Data_Transfer_Request_Status `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // The status id of the request. + StatusId *int `json:"statusId,omitempty" xmlrpc:"statusId,omitempty"` + + // A count of all tickets that are attached to the data transfer request. + TicketCount *uint `json:"ticketCount,omitempty" xmlrpc:"ticketCount,omitempty"` + + // All tickets that are attached to the data transfer request. + Tickets []Ticket `json:"tickets,omitempty" xmlrpc:"tickets,omitempty"` +} + +// The SoftLayer_Account_Media_Data_Transfer_Request_Status data type contains general information relating to the statuses to which a Data Transfer Request may be set. +type Account_Media_Data_Transfer_Request_Status struct { + Entity + + // The description of the request status. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The unique id of the request status. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The unique keyname of the request status. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // The name of the request status. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Account_Media_Type data type contains general information relating to the different types of media devices that SoftLayer currently supports, as part of the Data Transfer Request Service. Such devices as USB hard drives and flash drives, as well as optical media such as CD and DVD are currently supported. +type Account_Media_Type struct { + Entity + + // The description of the media type. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The unique id of the media type. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The unique keyname of the media type. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // The name of the media type. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Account_Network_Vlan_Span data type exposes the setting which controls the automatic spanning of private VLANs attached to a given customers account. +type Account_Network_Vlan_Span struct { + Entity + + // The SoftLayer customer account associated with a VLAN. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // Flag indicating whether the customer wishes to have all private network VLANs associated with account automatically joined [0 or 1] + EnabledFlag *bool `json:"enabledFlag,omitempty" xmlrpc:"enabledFlag,omitempty"` + + // The unique internal identifier of the SoftLayer_Account_Network_Vlan_Span object. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Timestamp of the last time the ACL for this account was applied. + LastAppliedDate *Time `json:"lastAppliedDate,omitempty" xmlrpc:"lastAppliedDate,omitempty"` + + // Timestamp of the last time the subnet hash was verified for this VLAN span record. + LastVerifiedDate *Time `json:"lastVerifiedDate,omitempty" xmlrpc:"lastVerifiedDate,omitempty"` + + // Timestamp of the last edit of the record. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` +} + +// no documentation yet +type Account_Note struct { + Entity + + // no documentation yet + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // no documentation yet + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + Customer *User_Customer `json:"customer,omitempty" xmlrpc:"customer,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // no documentation yet + Note *string `json:"note,omitempty" xmlrpc:"note,omitempty"` + + // no documentation yet + NoteHistory []Account_Note_History `json:"noteHistory,omitempty" xmlrpc:"noteHistory,omitempty"` + + // A count of + NoteHistoryCount *uint `json:"noteHistoryCount,omitempty" xmlrpc:"noteHistoryCount,omitempty"` + + // no documentation yet + NoteTypeId *int `json:"noteTypeId,omitempty" xmlrpc:"noteTypeId,omitempty"` + + // no documentation yet + UserId *int `json:"userId,omitempty" xmlrpc:"userId,omitempty"` +} + +// no documentation yet +type Account_Note_History struct { + Entity + + // no documentation yet + AccountNote *Account_Note `json:"accountNote,omitempty" xmlrpc:"accountNote,omitempty"` + + // no documentation yet + AccountNoteId *int `json:"accountNoteId,omitempty" xmlrpc:"accountNoteId,omitempty"` + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + Customer *User_Customer `json:"customer,omitempty" xmlrpc:"customer,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // no documentation yet + Note *string `json:"note,omitempty" xmlrpc:"note,omitempty"` + + // no documentation yet + UserId *int `json:"userId,omitempty" xmlrpc:"userId,omitempty"` +} + +// no documentation yet +type Account_Partner_Referral_Prospect struct { + User_Customer_Prospect + + // no documentation yet + CompanyName *string `json:"companyName,omitempty" xmlrpc:"companyName,omitempty"` + + // no documentation yet + EmailAddress *string `json:"emailAddress,omitempty" xmlrpc:"emailAddress,omitempty"` + + // no documentation yet + FirstName *string `json:"firstName,omitempty" xmlrpc:"firstName,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + LastName *string `json:"lastName,omitempty" xmlrpc:"lastName,omitempty"` +} + +// The SoftLayer_Account_Password contains username, passwords and notes for services that may require for external applications such the Webcc interface for the EVault Storage service. +type Account_Password struct { + Entity + + // no documentation yet + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The SoftLayer customer account id that a username/password combination is associated with. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // A username/password combination's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A simple description of a username/password combination. These notes don't affect portal functionality. + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // The password portion of a username/password combination. + Password *string `json:"password,omitempty" xmlrpc:"password,omitempty"` + + // The service that an account/password combination is tied to. + Type *Account_Password_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // An identifier relating to a username/password combinations's associated service. + TypeId *int `json:"typeId,omitempty" xmlrpc:"typeId,omitempty"` + + // The username portion of a username/password combination. + Username *string `json:"username,omitempty" xmlrpc:"username,omitempty"` +} + +// Every username and password combination associated with a SoftLayer customer account belongs to a service that SoftLayer provides. The relationship between a username/password and it's service is provided by the SoftLayer_Account_Password_Type data type. Each username/password belongs to a single service type. +type Account_Password_Type struct { + Entity + + // A description of the use for the account username/password combination. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` +} + +// no documentation yet +type Account_PersonalData_RemoveRequestReview struct { + Entity + + // no documentation yet + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // no documentation yet + ApprovedFlag *Account_PersonalData_RemoveRequestReview `json:"approvedFlag,omitempty" xmlrpc:"approvedFlag,omitempty"` +} + +// no documentation yet +type Account_ProofOfConcept struct { + Entity +} + +// This class represents a Proof of Concept account approver. +type Account_ProofOfConcept_Approver struct { + Entity + + // Approval slot of the approver. + ApprovalOrder *int `json:"approvalOrder,omitempty" xmlrpc:"approvalOrder,omitempty"` + + // Internal identifier. + BluepagesUid *string `json:"bluepagesUid,omitempty" xmlrpc:"bluepagesUid,omitempty"` + + // Email of the approver. + Email *string `json:"email,omitempty" xmlrpc:"email,omitempty"` + + // First name of the approver. + FirstName *string `json:"firstName,omitempty" xmlrpc:"firstName,omitempty"` + + // Internal identifier of a Proof of Concept account approver. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Last name of the approver. + LastName *string `json:"lastName,omitempty" xmlrpc:"lastName,omitempty"` + + // SoftLayer_Account_ProofOfConcept_Approver_Region identifier of the approver. + RegionKeyName *string `json:"regionKeyName,omitempty" xmlrpc:"regionKeyName,omitempty"` + + // no documentation yet + Role *Account_ProofOfConcept_Approver_Role `json:"role,omitempty" xmlrpc:"role,omitempty"` + + // SoftLayer_Account_ProofOfConcept_Approver_Role identifier of the approver. + RoleId *int `json:"roleId,omitempty" xmlrpc:"roleId,omitempty"` + + // no documentation yet + Type *Account_ProofOfConcept_Approver_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // SoftLayer_Account_ProofOfConcept_Approver_Type identifier of the approver. + TypeId *int `json:"typeId,omitempty" xmlrpc:"typeId,omitempty"` +} + +// This class represents a Proof of Concept account approver type. The current roles are Primary and Backup approvers. +type Account_ProofOfConcept_Approver_Role struct { + Entity + + // Description of a Proof of Concept account approver role. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Internal identifier of a Proof of Concept account approver role. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Key name of a Proof of Concept account approver role. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // Name of a Proof of Concept account approver role. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// This class represents a Proof of Concept account approver type. +type Account_ProofOfConcept_Approver_Type struct { + Entity + + // A count of + ApproverCount *uint `json:"approverCount,omitempty" xmlrpc:"approverCount,omitempty"` + + // no documentation yet + Approvers []Account_ProofOfConcept_Approver `json:"approvers,omitempty" xmlrpc:"approvers,omitempty"` + + // Description for a Proof of Concept account approver type. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Internal identifier of a Proof of Concept account approver type. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Key name for a Proof of Concept account approver type. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // Name of a Proof of Concept account approver type. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// A [SoftLayer_Account_ProofOfConcept_Campaign_Code] provides a `code` and an optional `description`. +type Account_ProofOfConcept_Campaign_Code struct { + Entity + + // no documentation yet + Code *string `json:"code,omitempty" xmlrpc:"code,omitempty"` + + // no documentation yet + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` +} + +// no documentation yet +type Account_ProofOfConcept_Funding_Type struct { + Entity + + // A count of + ApproverCount *uint `json:"approverCount,omitempty" xmlrpc:"approverCount,omitempty"` + + // A count of + ApproverTypeCount *uint `json:"approverTypeCount,omitempty" xmlrpc:"approverTypeCount,omitempty"` + + // no documentation yet + ApproverTypes []Account_ProofOfConcept_Approver_Type `json:"approverTypes,omitempty" xmlrpc:"approverTypes,omitempty"` + + // no documentation yet + Approvers []Account_ProofOfConcept_Approver `json:"approvers,omitempty" xmlrpc:"approvers,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` +} + +// +// +// +// +// +type Account_Regional_Registry_Detail struct { + Entity + + // The account that this detail object belongs to. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The detail object's associated [[SoftLayer_Account|account]] id + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // The date and time the detail object was created + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A count of references to the [[SoftLayer_Network_Subnet_Registration|registration objects]] that consume this detail object. + DetailCount *uint `json:"detailCount,omitempty" xmlrpc:"detailCount,omitempty"` + + // The associated type of this detail object. + DetailType *Account_Regional_Registry_Detail_Type `json:"detailType,omitempty" xmlrpc:"detailType,omitempty"` + + // The detail object's associated [[SoftLayer_Account_Regional_Registry_Detail_Type|type]] id + DetailTypeId *int `json:"detailTypeId,omitempty" xmlrpc:"detailTypeId,omitempty"` + + // References to the [[SoftLayer_Network_Subnet_Registration|registration objects]] that consume this detail object. + Details []Network_Subnet_Registration_Details `json:"details,omitempty" xmlrpc:"details,omitempty"` + + // Unique ID of the detail object + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The date and time the detail object was last modified + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The individual properties that define this detail object's values. + Properties []Account_Regional_Registry_Detail_Property `json:"properties,omitempty" xmlrpc:"properties,omitempty"` + + // A count of the individual properties that define this detail object's values. + PropertyCount *uint `json:"propertyCount,omitempty" xmlrpc:"propertyCount,omitempty"` + + // The associated RWhois handle of this detail object. Used only when detailed reassignments are necessary. + RegionalInternetRegistryHandle *Account_Rwhois_Handle `json:"regionalInternetRegistryHandle,omitempty" xmlrpc:"regionalInternetRegistryHandle,omitempty"` + + // The detail object's associated [[SoftLayer_Account_Rwhois_Handle|RIR handle]] id + RegionalInternetRegistryHandleId *int `json:"regionalInternetRegistryHandleId,omitempty" xmlrpc:"regionalInternetRegistryHandleId,omitempty"` +} + +// Subnet registration properties are used to define various attributes of the [[SoftLayer_Account_Regional_Registry_Detail|detail objects]]. These properties are defined by the [[SoftLayer_Account_Regional_Registry_Detail_Property_Type]] objects, which describe the available value formats. +type Account_Regional_Registry_Detail_Property struct { + Entity + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The [[SoftLayer_Account_Regional_Registry_Detail]] object this property belongs to + Detail *Account_Regional_Registry_Detail `json:"detail,omitempty" xmlrpc:"detail,omitempty"` + + // Unique ID of the property object + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The [[SoftLayer_Account_Regional_Registry_Detail_Property_Type]] object this property belongs to + PropertyType *Account_Regional_Registry_Detail_Property_Type `json:"propertyType,omitempty" xmlrpc:"propertyType,omitempty"` + + // The numeric ID of the related [[SoftLayer_Account_Regional_Registry_Detail_Property_Type|property type object]] + PropertyTypeId *int `json:"propertyTypeId,omitempty" xmlrpc:"propertyTypeId,omitempty"` + + // The numeric ID of the related [[SoftLayer_Account_Regional_Registry_Detail|detail object]] + RegistrationDetailId *int `json:"registrationDetailId,omitempty" xmlrpc:"registrationDetailId,omitempty"` + + // When multiple properties exist for a property type, defines the position in the sequence of those properties + SequencePosition *int `json:"sequencePosition,omitempty" xmlrpc:"sequencePosition,omitempty"` + + // The value of the property + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// Subnet Registration Detail Property Type objects describe the nature of a [[SoftLayer_Account_Regional_Registry_Detail_Property]] object. These types use [http://php.net/pcre.pattern.php Perl-Compatible Regular Expressions] to validate the value of a property object. +type Account_Regional_Registry_Detail_Property_Type struct { + Entity + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // Unique numeric ID of the property type object + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Code-friendly string name of the property type + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // Human-readable name of the property type + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // A Perl-compatible regular expression used to describe the valid format of the property + ValueExpression *string `json:"valueExpression,omitempty" xmlrpc:"valueExpression,omitempty"` +} + +// Subnet Registration Detail Type objects describe the nature of a [[SoftLayer_Account_Regional_Registry_Detail]] object. +// +// The standard values for these objects are as follows:
  • NETWORK - The detail object represents the information for a [[SoftLayer_Network_Subnet|subnet]]
  • NETWORK6 - The detail object represents the information for an [[SoftLayer_Network_Subnet_Version6|IPv6 subnet]]
  • PERSON - The detail object represents the information for a customer with the RIR
+type Account_Regional_Registry_Detail_Type struct { + Entity + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // Unique numeric ID of the detail type object + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Code-friendly string name of the detail type + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // Human-readable name of the detail type + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Account_Regional_Registry_Detail_Version4_Person_Default data type contains general information relating to a single SoftLayer RIR account. RIR account information in this type such as names, addresses, and phone numbers are assigned to the registry only and not to users belonging to the account. +type Account_Regional_Registry_Detail_Version4_Person_Default struct { + Account_Regional_Registry_Detail +} + +// no documentation yet +type Account_Reports_Request struct { + Entity + + // no documentation yet + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // A request's corresponding external contact, if one exists. + AccountContact *Account_Contact `json:"accountContact,omitempty" xmlrpc:"accountContact,omitempty"` + + // no documentation yet + AccountContactId *int `json:"accountContactId,omitempty" xmlrpc:"accountContactId,omitempty"` + + // no documentation yet + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // no documentation yet + ComplianceReportTypeId *string `json:"complianceReportTypeId,omitempty" xmlrpc:"complianceReportTypeId,omitempty"` + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + EmployeeRecordId *int `json:"employeeRecordId,omitempty" xmlrpc:"employeeRecordId,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // no documentation yet + Nda *string `json:"nda,omitempty" xmlrpc:"nda,omitempty"` + + // no documentation yet + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // no documentation yet + Report *string `json:"report,omitempty" xmlrpc:"report,omitempty"` + + // Type of the report customer is requesting for. + ReportType *Compliance_Report_Type `json:"reportType,omitempty" xmlrpc:"reportType,omitempty"` + + // no documentation yet + RequestKey *string `json:"requestKey,omitempty" xmlrpc:"requestKey,omitempty"` + + // A request's corresponding requestor contact, if one exists. + RequestorContact *Account_Contact `json:"requestorContact,omitempty" xmlrpc:"requestorContact,omitempty"` + + // no documentation yet + RequestorContactId *int `json:"requestorContactId,omitempty" xmlrpc:"requestorContactId,omitempty"` + + // no documentation yet + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // no documentation yet + Ticket *Ticket `json:"ticket,omitempty" xmlrpc:"ticket,omitempty"` + + // no documentation yet + TicketId *int `json:"ticketId,omitempty" xmlrpc:"ticketId,omitempty"` + + // The customer user that initiated a report request. + User *User_Customer `json:"user,omitempty" xmlrpc:"user,omitempty"` + + // no documentation yet + UsrRecordId *int `json:"usrRecordId,omitempty" xmlrpc:"usrRecordId,omitempty"` +} + +// Provides a means of tracking handle identifiers at the various regional internet registries (RIRs). These objects are used by the [[SoftLayer_Network_Subnet_Registration (type)|SoftLayer_Network_Subnet_Registration]] objects to identify a customer or organization when a subnet is registered. +type Account_Rwhois_Handle struct { + Entity + + // The account that this handle belongs to. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The handle object's associated [[SoftLayer_Account|account]] id + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The handle object's unique identifier as assigned by the RIR. + Handle *string `json:"handle,omitempty" xmlrpc:"handle,omitempty"` + + // Unique ID of the handle object + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` +} + +// The SoftLayer_Account_Shipment data type contains information relating to a shipment. Basic information such as addresses, the shipment courier, and any tracking information for as shipment is accessible with this data type. +type Account_Shipment struct { + Entity + + // The account to which the shipment belongs. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The account id of the shipment. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // The courier handling the shipment. + Courier *Auxiliary_Shipping_Courier `json:"courier,omitempty" xmlrpc:"courier,omitempty"` + + // The courier id of the shipment. + CourierId *int `json:"courierId,omitempty" xmlrpc:"courierId,omitempty"` + + // The courier name of the shipment. + CourierName *string `json:"courierName,omitempty" xmlrpc:"courierName,omitempty"` + + // The employee who created the shipment. + CreateEmployee *User_Employee `json:"createEmployee,omitempty" xmlrpc:"createEmployee,omitempty"` + + // The customer user who created the shipment. + CreateUser *User_Customer `json:"createUser,omitempty" xmlrpc:"createUser,omitempty"` + + // The create user id of the shipment. + CreateUserId *int `json:"createUserId,omitempty" xmlrpc:"createUserId,omitempty"` + + // no documentation yet + Currency *Billing_Currency `json:"currency,omitempty" xmlrpc:"currency,omitempty"` + + // The address at which the shipment is received. + DestinationAddress *Account_Address `json:"destinationAddress,omitempty" xmlrpc:"destinationAddress,omitempty"` + + // The destination address id of the shipment. + DestinationAddressId *int `json:"destinationAddressId,omitempty" xmlrpc:"destinationAddressId,omitempty"` + + // The destination date of the shipment. + DestinationDate *Time `json:"destinationDate,omitempty" xmlrpc:"destinationDate,omitempty"` + + // The unique id of the shipment. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The one master tracking data for the shipment. + MasterTrackingData *Account_Shipment_Tracking_Data `json:"masterTrackingData,omitempty" xmlrpc:"masterTrackingData,omitempty"` + + // The employee who last modified the shipment. + ModifyEmployee *User_Employee `json:"modifyEmployee,omitempty" xmlrpc:"modifyEmployee,omitempty"` + + // The customer user who last modified the shipment. + ModifyUser *User_Customer `json:"modifyUser,omitempty" xmlrpc:"modifyUser,omitempty"` + + // The modify user id of the shipment. + ModifyUserId *int `json:"modifyUserId,omitempty" xmlrpc:"modifyUserId,omitempty"` + + // The shipment note (special handling instructions). + Note *string `json:"note,omitempty" xmlrpc:"note,omitempty"` + + // The address from which the shipment is sent. + OriginationAddress *Account_Address `json:"originationAddress,omitempty" xmlrpc:"originationAddress,omitempty"` + + // The origination address id of the shipment. + OriginationAddressId *int `json:"originationAddressId,omitempty" xmlrpc:"originationAddressId,omitempty"` + + // The origination date of the shipment. + OriginationDate *Time `json:"originationDate,omitempty" xmlrpc:"originationDate,omitempty"` + + // A count of the items in the shipment. + ShipmentItemCount *uint `json:"shipmentItemCount,omitempty" xmlrpc:"shipmentItemCount,omitempty"` + + // The items in the shipment. + ShipmentItems []Account_Shipment_Item `json:"shipmentItems,omitempty" xmlrpc:"shipmentItems,omitempty"` + + // The status of the shipment. + Status *Account_Shipment_Status `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // The status id of the shipment. + StatusId *int `json:"statusId,omitempty" xmlrpc:"statusId,omitempty"` + + // All tracking data for the shipment and packages. + TrackingData []Account_Shipment_Tracking_Data `json:"trackingData,omitempty" xmlrpc:"trackingData,omitempty"` + + // A count of all tracking data for the shipment and packages. + TrackingDataCount *uint `json:"trackingDataCount,omitempty" xmlrpc:"trackingDataCount,omitempty"` + + // The type of shipment (e.g. for Data Transfer Service or Colocation Service). + Type *Account_Shipment_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // The type id of the shipment. + TypeId *int `json:"typeId,omitempty" xmlrpc:"typeId,omitempty"` + + // The address at which the shipment is received. + ViaAddress *Account_Address `json:"viaAddress,omitempty" xmlrpc:"viaAddress,omitempty"` + + // The via address id of the shipment. + ViaAddressId *int `json:"viaAddressId,omitempty" xmlrpc:"viaAddressId,omitempty"` +} + +// The SoftLayer_Account_Shipment_Item data type contains information relating to a shipment's item. Basic information such as addresses, the shipment courier, and any tracking information for as shipment is accessible with this data type. +type Account_Shipment_Item struct { + Entity + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The description of the shipping item. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The unique id of the shipping item. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The package id of the shipping item. + PackageId *int `json:"packageId,omitempty" xmlrpc:"packageId,omitempty"` + + // The shipment to which this item belongs. + Shipment *Account_Shipment `json:"shipment,omitempty" xmlrpc:"shipment,omitempty"` + + // The shipment id of the shipping item. + ShipmentId *int `json:"shipmentId,omitempty" xmlrpc:"shipmentId,omitempty"` + + // The item id of the shipping item. + ShipmentItemId *int `json:"shipmentItemId,omitempty" xmlrpc:"shipmentItemId,omitempty"` + + // The type of this shipment item. + ShipmentItemType *Account_Shipment_Item_Type `json:"shipmentItemType,omitempty" xmlrpc:"shipmentItemType,omitempty"` + + // The item type id of the shipping item. + ShipmentItemTypeId *int `json:"shipmentItemTypeId,omitempty" xmlrpc:"shipmentItemTypeId,omitempty"` +} + +// no documentation yet +type Account_Shipment_Item_Type struct { + Entity + + // DEPRECATED + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Account_Shipment_Resource_Type struct { + Entity +} + +// no documentation yet +type Account_Shipment_Status struct { + Entity + + // DEPRECATED + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Account_Shipment_Tracking_Data data type contains information on a single piece of tracking information pertaining to a shipment. This tracking information tracking numbers by which the shipment may be tracked through the shipping courier. +type Account_Shipment_Tracking_Data struct { + Entity + + // The employee who created the tracking datum. + CreateEmployee *User_Employee `json:"createEmployee,omitempty" xmlrpc:"createEmployee,omitempty"` + + // The customer user who created the tracking datum. + CreateUser *User_Customer `json:"createUser,omitempty" xmlrpc:"createUser,omitempty"` + + // The create user id of the tracking data. + CreateUserId *int `json:"createUserId,omitempty" xmlrpc:"createUserId,omitempty"` + + // The unique id of the tracking data. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The employee who last modified the tracking datum. + ModifyEmployee *User_Employee `json:"modifyEmployee,omitempty" xmlrpc:"modifyEmployee,omitempty"` + + // The customer user who last modified the tracking datum. + ModifyUser *User_Customer `json:"modifyUser,omitempty" xmlrpc:"modifyUser,omitempty"` + + // The user id of the tracking data. + ModifyUserId *int `json:"modifyUserId,omitempty" xmlrpc:"modifyUserId,omitempty"` + + // The package id of the tracking data. + PackageId *int `json:"packageId,omitempty" xmlrpc:"packageId,omitempty"` + + // The sequence of the tracking data. + Sequence *int `json:"sequence,omitempty" xmlrpc:"sequence,omitempty"` + + // The shipment of the tracking datum. + Shipment *Account_Shipment `json:"shipment,omitempty" xmlrpc:"shipment,omitempty"` + + // The shipment id of the tracking data. + ShipmentId *int `json:"shipmentId,omitempty" xmlrpc:"shipmentId,omitempty"` + + // The tracking data (tracking number/reference number). + TrackingData *string `json:"trackingData,omitempty" xmlrpc:"trackingData,omitempty"` +} + +// no documentation yet +type Account_Shipment_Type struct { + Entity + + // DEPRECATED + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Account_Status struct { + Entity + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/auxiliary.go b/vendor/github.com/softlayer/softlayer-go/datatypes/auxiliary.go new file mode 100644 index 00000000000..84f03a52e97 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/auxiliary.go @@ -0,0 +1,342 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// no documentation yet +type Auxiliary_Marketing_Event struct { + Entity + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + EnabledFlag *int `json:"enabledFlag,omitempty" xmlrpc:"enabledFlag,omitempty"` + + // no documentation yet + EndDate *Time `json:"endDate,omitempty" xmlrpc:"endDate,omitempty"` + + // no documentation yet + Location *string `json:"location,omitempty" xmlrpc:"location,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // no documentation yet + StartDate *Time `json:"startDate,omitempty" xmlrpc:"startDate,omitempty"` + + // no documentation yet + Title *string `json:"title,omitempty" xmlrpc:"title,omitempty"` + + // no documentation yet + Url *string `json:"url,omitempty" xmlrpc:"url,omitempty"` +} + +// no documentation yet +type Auxiliary_Network_Status struct { + Entity +} + +// A SoftLayer_Auxiliary_Notification_Emergency data object represents a notification event being broadcast to the SoftLayer customer base. It is used to provide information regarding outages or current known issues. +type Auxiliary_Notification_Emergency struct { + Entity + + // The date this event was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The device (if any) effected by this event. + Device *string `json:"device,omitempty" xmlrpc:"device,omitempty"` + + // The duration of this event. + Duration *string `json:"duration,omitempty" xmlrpc:"duration,omitempty"` + + // The device (if any) effected by this event. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The location effected by this event. + Location *string `json:"location,omitempty" xmlrpc:"location,omitempty"` + + // A message describing this event. + Message *string `json:"message,omitempty" xmlrpc:"message,omitempty"` + + // The last date this event was modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The service(s) (if any) effected by this event. + ServicesAffected *string `json:"servicesAffected,omitempty" xmlrpc:"servicesAffected,omitempty"` + + // The signature of the SoftLayer employee department associated with this notification. + Signature *Auxiliary_Notification_Emergency_Signature `json:"signature,omitempty" xmlrpc:"signature,omitempty"` + + // The date this event will start. + StartDate *Time `json:"startDate,omitempty" xmlrpc:"startDate,omitempty"` + + // The status of this notification. + Status *Auxiliary_Notification_Emergency_Status `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // Current status record for this event. + StatusId *int `json:"statusId,omitempty" xmlrpc:"statusId,omitempty"` +} + +// Every SoftLayer_Auxiliary_Notification_Emergency has a signatureId that references a SoftLayer_Auxiliary_Notification_Emergency_Signature data type. The signature is the user or group responsible for the current event. +type Auxiliary_Notification_Emergency_Signature struct { + Entity + + // The name or signature for the current Emergency Notification. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// Every SoftLayer_Auxiliary_Notification_Emergency has a statusId that references a SoftLayer_Auxiliary_Notification_Emergency_Status data type. The status is used to determine the current state of the event. +type Auxiliary_Notification_Emergency_Status struct { + Entity + + // A name describing the status of the current Emergency Notification. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Auxiliary_Press_Release struct { + Entity + + // no documentation yet + About []Auxiliary_Press_Release_About_Press_Release `json:"about,omitempty" xmlrpc:"about,omitempty"` + + // A count of + AboutCount *uint `json:"aboutCount,omitempty" xmlrpc:"aboutCount,omitempty"` + + // A count of + ContactCount *uint `json:"contactCount,omitempty" xmlrpc:"contactCount,omitempty"` + + // no documentation yet + Contacts []Auxiliary_Press_Release_Contact_Press_Release `json:"contacts,omitempty" xmlrpc:"contacts,omitempty"` + + // A press release's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A count of + MediaPartnerCount *uint `json:"mediaPartnerCount,omitempty" xmlrpc:"mediaPartnerCount,omitempty"` + + // no documentation yet + MediaPartners []Auxiliary_Press_Release_Media_Partner_Press_Release `json:"mediaPartners,omitempty" xmlrpc:"mediaPartners,omitempty"` + + // no documentation yet + PressReleaseContent *Auxiliary_Press_Release_Content `json:"pressReleaseContent,omitempty" xmlrpc:"pressReleaseContent,omitempty"` + + // The data a press release was published. + PublishDate *Time `json:"publishDate,omitempty" xmlrpc:"publishDate,omitempty"` + + // A press release's location. + ReleaseLocation *string `json:"releaseLocation,omitempty" xmlrpc:"releaseLocation,omitempty"` + + // A press release's sub-title. + SubTitle *string `json:"subTitle,omitempty" xmlrpc:"subTitle,omitempty"` + + // A press release's title. + Title *string `json:"title,omitempty" xmlrpc:"title,omitempty"` + + // Whether or not a press release is highlighted on the SoftLayer Website. + WebsiteHighlightFlag *bool `json:"websiteHighlightFlag,omitempty" xmlrpc:"websiteHighlightFlag,omitempty"` +} + +// no documentation yet +type Auxiliary_Press_Release_About struct { + Entity + + // A press release about's content. + Content *string `json:"content,omitempty" xmlrpc:"content,omitempty"` + + // A press release about's internal + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A press release about's title. + Title *string `json:"title,omitempty" xmlrpc:"title,omitempty"` +} + +// no documentation yet +type Auxiliary_Press_Release_About_Press_Release struct { + Entity + + // A count of + AboutParagraphCount *uint `json:"aboutParagraphCount,omitempty" xmlrpc:"aboutParagraphCount,omitempty"` + + // no documentation yet + AboutParagraphs []Auxiliary_Press_Release_About `json:"aboutParagraphs,omitempty" xmlrpc:"aboutParagraphs,omitempty"` + + // A press release about cross + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A press release about's internal + PressReleaseAboutId *int `json:"pressReleaseAboutId,omitempty" xmlrpc:"pressReleaseAboutId,omitempty"` + + // A count of + PressReleaseCount *uint `json:"pressReleaseCount,omitempty" xmlrpc:"pressReleaseCount,omitempty"` + + // A press release internal identifier. + PressReleaseId *int `json:"pressReleaseId,omitempty" xmlrpc:"pressReleaseId,omitempty"` + + // no documentation yet + PressReleases []Auxiliary_Press_Release `json:"pressReleases,omitempty" xmlrpc:"pressReleases,omitempty"` + + // The number that associated an about + SortOrder *int `json:"sortOrder,omitempty" xmlrpc:"sortOrder,omitempty"` +} + +// no documentation yet +type Auxiliary_Press_Release_Contact struct { + Entity + + // A press release contact's email + Email *string `json:"email,omitempty" xmlrpc:"email,omitempty"` + + // A press release contact's first name. + FirstName *string `json:"firstName,omitempty" xmlrpc:"firstName,omitempty"` + + // A press release contact's internal + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A press release contact's last name. + LastName *string `json:"lastName,omitempty" xmlrpc:"lastName,omitempty"` + + // A press release contact's phone + Phone *string `json:"phone,omitempty" xmlrpc:"phone,omitempty"` + + // A press release contact's + ProfessionalTitle *string `json:"professionalTitle,omitempty" xmlrpc:"professionalTitle,omitempty"` +} + +// no documentation yet +type Auxiliary_Press_Release_Contact_Press_Release struct { + Entity + + // A count of + ContactCount *uint `json:"contactCount,omitempty" xmlrpc:"contactCount,omitempty"` + + // no documentation yet + Contacts []Auxiliary_Press_Release_Contact `json:"contacts,omitempty" xmlrpc:"contacts,omitempty"` + + // A press release contact cross + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A press release contact's internal + PressReleaseContactId *int `json:"pressReleaseContactId,omitempty" xmlrpc:"pressReleaseContactId,omitempty"` + + // A count of + PressReleaseCount *uint `json:"pressReleaseCount,omitempty" xmlrpc:"pressReleaseCount,omitempty"` + + // A press release internal identifier. + PressReleaseId *int `json:"pressReleaseId,omitempty" xmlrpc:"pressReleaseId,omitempty"` + + // no documentation yet + PressReleases []Auxiliary_Press_Release `json:"pressReleases,omitempty" xmlrpc:"pressReleases,omitempty"` + + // The number that associated a contact + SortOrder *int `json:"sortOrder,omitempty" xmlrpc:"sortOrder,omitempty"` +} + +// no documentation yet +type Auxiliary_Press_Release_Content struct { + Entity + + // the id of a single press release + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // the press release id that the content + PressReleaseId *int `json:"pressReleaseId,omitempty" xmlrpc:"pressReleaseId,omitempty"` + + // the content of a press release + Text *string `json:"text,omitempty" xmlrpc:"text,omitempty"` +} + +// no documentation yet +type Auxiliary_Press_Release_Media_Partner struct { + Entity + + // A press release media partner's + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A press release media partner's name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Auxiliary_Press_Release_Media_Partner_Press_Release struct { + Entity + + // A press release media partner cross + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A count of + MediaPartnerCount *uint `json:"mediaPartnerCount,omitempty" xmlrpc:"mediaPartnerCount,omitempty"` + + // A press release media partner's + MediaPartnerId *int `json:"mediaPartnerId,omitempty" xmlrpc:"mediaPartnerId,omitempty"` + + // no documentation yet + MediaPartners []Auxiliary_Press_Release_Media_Partner `json:"mediaPartners,omitempty" xmlrpc:"mediaPartners,omitempty"` + + // A count of + PressReleaseCount *uint `json:"pressReleaseCount,omitempty" xmlrpc:"pressReleaseCount,omitempty"` + + // A press release internal identifier. + PressReleaseId *int `json:"pressReleaseId,omitempty" xmlrpc:"pressReleaseId,omitempty"` + + // no documentation yet + PressReleases []Auxiliary_Press_Release `json:"pressReleases,omitempty" xmlrpc:"pressReleases,omitempty"` +} + +// The SoftLayer_Auxiliary_Shipping_Courier data type contains general information relating the different (major) couriers that SoftLayer may use for shipping. +type Auxiliary_Shipping_Courier struct { + Entity + + // The unique id of the shipping courier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The unique keyname of the shipping courier. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // The name of the shipping courier. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The url to shipping courier's website. + Url *string `json:"url,omitempty" xmlrpc:"url,omitempty"` +} + +// no documentation yet +type Auxiliary_Shipping_Courier_Type struct { + Entity + + // no documentation yet + Courier []Auxiliary_Shipping_Courier `json:"courier,omitempty" xmlrpc:"courier,omitempty"` + + // A count of + CourierCount *uint `json:"courierCount,omitempty" xmlrpc:"courierCount,omitempty"` + + // no documentation yet + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/billing.go b/vendor/github.com/softlayer/softlayer-go/datatypes/billing.go new file mode 100644 index 00000000000..ba791ac7db6 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/billing.go @@ -0,0 +1,2696 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// no documentation yet +type Billing_Currency struct { + Entity + + // The current exchange rate + CurrentExchangeRate *Billing_Currency_ExchangeRate `json:"currentExchangeRate,omitempty" xmlrpc:"currentExchangeRate,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Billing_Currency_Country data type maps what currencies are valid for specific countries. US Dollars are valid from any country, but other currencies are only available to customers in certain countries. +type Billing_Currency_Country struct { + Entity + + // A unique identifier for the related country. + CountryId *int `json:"countryId,omitempty" xmlrpc:"countryId,omitempty"` + + // A unique identifier for the related currency. + CurrencyId *int `json:"currencyId,omitempty" xmlrpc:"currencyId,omitempty"` + + // A unique identifier for a map between a country and currency. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The country currency locale. + Locale *string `json:"locale,omitempty" xmlrpc:"locale,omitempty"` +} + +// no documentation yet +type Billing_Currency_ExchangeRate struct { + Entity + + // no documentation yet + EffectiveDate *Time `json:"effectiveDate,omitempty" xmlrpc:"effectiveDate,omitempty"` + + // no documentation yet + ExpirationDate *Time `json:"expirationDate,omitempty" xmlrpc:"expirationDate,omitempty"` + + // no documentation yet + FundingCurrency *Billing_Currency `json:"fundingCurrency,omitempty" xmlrpc:"fundingCurrency,omitempty"` + + // The id of the exchange rate record. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + LocalCurrency *Billing_Currency `json:"localCurrency,omitempty" xmlrpc:"localCurrency,omitempty"` + + // no documentation yet + Rate *Float64 `json:"rate,omitempty" xmlrpc:"rate,omitempty"` +} + +// Every SoftLayer customer account has billing specific information which is kept in the SoftLayer_Billing_Info data type. This information is used by the SoftLayer accounting group when sending invoices and making billing inquiries. +type Billing_Info struct { + Entity + + // The SoftLayer customer account associated with this billing information. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // A SoftLayer account's identifier. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // no documentation yet + AchInformation []Billing_Info_Ach `json:"achInformation,omitempty" xmlrpc:"achInformation,omitempty"` + + // A count of + AchInformationCount *uint `json:"achInformationCount,omitempty" xmlrpc:"achInformationCount,omitempty"` + + // The day of the month that a SoftLayer customer is billed. + AnniversaryDayOfMonth *int `json:"anniversaryDayOfMonth,omitempty" xmlrpc:"anniversaryDayOfMonth,omitempty"` + + // This value doesn't persist to this object. It's used as part of the account creation process only; + CardAccountNumber *string `json:"cardAccountNumber,omitempty" xmlrpc:"cardAccountNumber,omitempty"` + + // the expiration month of the credit card on file + CardExpirationMonth *int `json:"cardExpirationMonth,omitempty" xmlrpc:"cardExpirationMonth,omitempty"` + + // the expiration year of the credit card on file + CardExpirationYear *int `json:"cardExpirationYear,omitempty" xmlrpc:"cardExpirationYear,omitempty"` + + // no documentation yet + CardNickname *string `json:"cardNickname,omitempty" xmlrpc:"cardNickname,omitempty"` + + // the type of the credit card on file + CardType *string `json:"cardType,omitempty" xmlrpc:"cardType,omitempty"` + + // This value doesn't persist to this object. It's used as part of the account creation process only. + CardVerificationNumber *string `json:"cardVerificationNumber,omitempty" xmlrpc:"cardVerificationNumber,omitempty"` + + // The date a customer's billing information was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // Currency to be used by this customer account. + Currency *Billing_Currency `json:"currency,omitempty" xmlrpc:"currency,omitempty"` + + // Information related to an account's current and previous billing cycles. + CurrentBillingCycle *Billing_Info_Cycle `json:"currentBillingCycle,omitempty" xmlrpc:"currentBillingCycle,omitempty"` + + // A SoftLayer customer's billing information identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The date on which an account was last billed. + LastBillDate *Time `json:"lastBillDate,omitempty" xmlrpc:"lastBillDate,omitempty"` + + // The last four digits of the credit card currently on the account. This is the only portion of the card that we store. For Paypal customers, this value will be empty. + LastFourPaymentCardDigits *int `json:"lastFourPaymentCardDigits,omitempty" xmlrpc:"lastFourPaymentCardDigits,omitempty"` + + // The date of the last payment received by SoftLayer from the account holder. + LastPaymentDate *Time `json:"lastPaymentDate,omitempty" xmlrpc:"lastPaymentDate,omitempty"` + + // The date a customer's billing information was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The date on which an account will be billed next. + NextBillDate *Time `json:"nextBillDate,omitempty" xmlrpc:"nextBillDate,omitempty"` + + // The payment terms for an account. + PaymentTerms *int `json:"paymentTerms,omitempty" xmlrpc:"paymentTerms,omitempty"` + + // The percentage discount received on all one-time charges on a customer's monthly bill. + PercentDiscountOnetime *int `json:"percentDiscountOnetime,omitempty" xmlrpc:"percentDiscountOnetime,omitempty"` + + // The percentage discount received on all recurring charges on a customer's monthly bill. + PercentDiscountRecurring *int `json:"percentDiscountRecurring,omitempty" xmlrpc:"percentDiscountRecurring,omitempty"` + + // The total recurring fee amount for servers that are in the spare pool status. + SparePoolAmount *int `json:"sparePoolAmount,omitempty" xmlrpc:"sparePoolAmount,omitempty"` + + // no documentation yet + TaxCertificateId *string `json:"taxCertificateId,omitempty" xmlrpc:"taxCertificateId,omitempty"` + + // no documentation yet + VatId *string `json:"vatId,omitempty" xmlrpc:"vatId,omitempty"` +} + +// no documentation yet +type Billing_Info_Ach struct { + Entity + + // no documentation yet + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // no documentation yet + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // no documentation yet + AccountNumber *string `json:"accountNumber,omitempty" xmlrpc:"accountNumber,omitempty"` + + // no documentation yet + AccountType *string `json:"accountType,omitempty" xmlrpc:"accountType,omitempty"` + + // no documentation yet + BankTransitNumber *string `json:"bankTransitNumber,omitempty" xmlrpc:"bankTransitNumber,omitempty"` + + // no documentation yet + City *string `json:"city,omitempty" xmlrpc:"city,omitempty"` + + // no documentation yet + Country *string `json:"country,omitempty" xmlrpc:"country,omitempty"` + + // no documentation yet + FirstName *string `json:"firstName,omitempty" xmlrpc:"firstName,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + LastName *string `json:"lastName,omitempty" xmlrpc:"lastName,omitempty"` + + // no documentation yet + PhoneNumber *string `json:"phoneNumber,omitempty" xmlrpc:"phoneNumber,omitempty"` + + // no documentation yet + Postalcode *string `json:"postalcode,omitempty" xmlrpc:"postalcode,omitempty"` + + // no documentation yet + State *string `json:"state,omitempty" xmlrpc:"state,omitempty"` + + // no documentation yet + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // no documentation yet + Street1 *string `json:"street1,omitempty" xmlrpc:"street1,omitempty"` + + // no documentation yet + Street2 *string `json:"street2,omitempty" xmlrpc:"street2,omitempty"` + + // no documentation yet + VerifiedDate *Time `json:"verifiedDate,omitempty" xmlrpc:"verifiedDate,omitempty"` +} + +// The SoftLayer_Billing_Info_Cycle data type models basic information concerning a SoftLayer account's previous and current billing cycles. The information in this class is only populated for SoftLayer customers who are billed monthly. +type Billing_Info_Cycle struct { + Entity + + // The account that a current billing cycle is associated with. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The ending date of an account's current billing cycle. + CurrentCycleEndDate *Time `json:"currentCycleEndDate,omitempty" xmlrpc:"currentCycleEndDate,omitempty"` + + // The starting date of an account's current billing cycle. + CurrentCycleStartDate *Time `json:"currentCycleStartDate,omitempty" xmlrpc:"currentCycleStartDate,omitempty"` + + // The start date of an account's next billing cycle. + NextCycleStartDate *Time `json:"nextCycleStartDate,omitempty" xmlrpc:"nextCycleStartDate,omitempty"` + + // The ending date of an account's previous billing cycle. + PreviousCycleEndDate *Time `json:"previousCycleEndDate,omitempty" xmlrpc:"previousCycleEndDate,omitempty"` + + // The starting date of an account's previous billing cycle. + PreviousCycleStartDate *Time `json:"previousCycleStartDate,omitempty" xmlrpc:"previousCycleStartDate,omitempty"` +} + +// The SoftLayer_Billing_Invoice data type contains general information relating to an individual invoice applied to a SoftLayer customer account. Personal information in this type such as names, addresses, and phone numbers are taken from the account's contact information at the time the invoice is generated. +type Billing_Invoice struct { + Entity + + // The account that an invoice belongs to. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The SoftLayer customer account that an invoice belongs to. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // The first line of an address belonging to an account at the time an invoice is created. + Address1 *string `json:"address1,omitempty" xmlrpc:"address1,omitempty"` + + // The second line of an address belonging to an account at the time an invoice is created. + Address2 *string `json:"address2,omitempty" xmlrpc:"address2,omitempty"` + + // This is the amount of this invoice. + Amount *Float64 `json:"amount,omitempty" xmlrpc:"amount,omitempty"` + + // no documentation yet + BrandAtInvoiceCreation *Brand `json:"brandAtInvoiceCreation,omitempty" xmlrpc:"brandAtInvoiceCreation,omitempty"` + + // The city portion of an address belonging to an account at the time an invoice is created. + City *string `json:"city,omitempty" xmlrpc:"city,omitempty"` + + // Whether an account was exempt from taxes on their invoices at the time an invoice is created. + ClaimedTaxExemptTxFlag *bool `json:"claimedTaxExemptTxFlag,omitempty" xmlrpc:"claimedTaxExemptTxFlag,omitempty"` + + // The date an invoice was closed. Open invoices have a null closed date. + ClosedDate *Time `json:"closedDate,omitempty" xmlrpc:"closedDate,omitempty"` + + // The company name belonging to an account at the time an invoice is created. + CompanyName *string `json:"companyName,omitempty" xmlrpc:"companyName,omitempty"` + + // A two-letter abbreviation of the country portion of an address belonging to an account at the time an invoice is created. + Country *string `json:"country,omitempty" xmlrpc:"country,omitempty"` + + // The date an invoice was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A flag that will reflect whether the detailed version of the pdf has been generated. + DetailedPdfGeneratedFlag *bool `json:"detailedPdfGeneratedFlag,omitempty" xmlrpc:"detailedPdfGeneratedFlag,omitempty"` + + // no documentation yet + DocumentsGeneratedFlag *bool `json:"documentsGeneratedFlag,omitempty" xmlrpc:"documentsGeneratedFlag,omitempty"` + + // The email address belonging to an account at the time an invoice is created. + Email *string `json:"email,omitempty" xmlrpc:"email,omitempty"` + + // An SoftLayer account's balance at the time an invoice is closed. This value is measured in US Dollar ($USD) currency. + EndingBalance *Float64 `json:"endingBalance,omitempty" xmlrpc:"endingBalance,omitempty"` + + // The fax telephone number belonging to an account at the time an invoice is created. + FaxPhone *string `json:"faxPhone,omitempty" xmlrpc:"faxPhone,omitempty"` + + // The first name of the account holder at the time an invoice is created. + FirstName *string `json:"firstName,omitempty" xmlrpc:"firstName,omitempty"` + + // An invoice's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A count of a list of top-level invoice items that are on the currently pending invoice. + InvoiceTopLevelItemCount *uint `json:"invoiceTopLevelItemCount,omitempty" xmlrpc:"invoiceTopLevelItemCount,omitempty"` + + // A list of top-level invoice items that are on the currently pending invoice. + InvoiceTopLevelItems []Billing_Invoice_Item `json:"invoiceTopLevelItems,omitempty" xmlrpc:"invoiceTopLevelItems,omitempty"` + + // The total amount of this invoice. + InvoiceTotalAmount *Float64 `json:"invoiceTotalAmount,omitempty" xmlrpc:"invoiceTotalAmount,omitempty"` + + // The total one-time charges for this invoice. This is the sum of one-time charges + setup fees + labor fees. This does not include taxes. + InvoiceTotalOneTimeAmount *Float64 `json:"invoiceTotalOneTimeAmount,omitempty" xmlrpc:"invoiceTotalOneTimeAmount,omitempty"` + + // A sum of all the taxes related to one time charges for this invoice. + InvoiceTotalOneTimeTaxAmount *Float64 `json:"invoiceTotalOneTimeTaxAmount,omitempty" xmlrpc:"invoiceTotalOneTimeTaxAmount,omitempty"` + + // The total amount of this invoice. This does not include taxes. + InvoiceTotalPreTaxAmount *Float64 `json:"invoiceTotalPreTaxAmount,omitempty" xmlrpc:"invoiceTotalPreTaxAmount,omitempty"` + + // The total Recurring amount of this invoice. This amount does not include taxes or one time charges. + InvoiceTotalRecurringAmount *Float64 `json:"invoiceTotalRecurringAmount,omitempty" xmlrpc:"invoiceTotalRecurringAmount,omitempty"` + + // The total amount of the recurring taxes on this invoice. + InvoiceTotalRecurringTaxAmount *Float64 `json:"invoiceTotalRecurringTaxAmount,omitempty" xmlrpc:"invoiceTotalRecurringTaxAmount,omitempty"` + + // A count of the items that belong to this invoice. + ItemCount *uint `json:"itemCount,omitempty" xmlrpc:"itemCount,omitempty"` + + // The items that belong to this invoice. + Items []Billing_Invoice_Item `json:"items,omitempty" xmlrpc:"items,omitempty"` + + // The last name of the account holder at the time an invoice is created. + LastName *string `json:"lastName,omitempty" xmlrpc:"lastName,omitempty"` + + // Exchange rate used for billing this invoice. + LocalCurrencyExchangeRate *Billing_Currency_ExchangeRate `json:"localCurrencyExchangeRate,omitempty" xmlrpc:"localCurrencyExchangeRate,omitempty"` + + // The date an invoice was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The telephone number belonging to an account at the time an invoice is created. + OfficePhone *string `json:"officePhone,omitempty" xmlrpc:"officePhone,omitempty"` + + // This is the total payment made on this invoice. + Payment *Float64 `json:"payment,omitempty" xmlrpc:"payment,omitempty"` + + // A count of the payments for the invoice. + PaymentCount *uint `json:"paymentCount,omitempty" xmlrpc:"paymentCount,omitempty"` + + // The payments for the invoice. + Payments []Billing_Invoice_Receivable_Payment `json:"payments,omitempty" xmlrpc:"payments,omitempty"` + + // The postal code portion of an address belonging to an account at the time an invoice is created. + PostalCode *string `json:"postalCode,omitempty" xmlrpc:"postalCode,omitempty"` + + // no documentation yet + PurchaseOrderNumber *string `json:"purchaseOrderNumber,omitempty" xmlrpc:"purchaseOrderNumber,omitempty"` + + // This is the seller's tax registration. + SellerRegistration *string `json:"sellerRegistration,omitempty" xmlrpc:"sellerRegistration,omitempty"` + + // An SoftLayer account's balance at the time an invoice is created. This value is measured in US Dollar ($USD) currency. + StartingBalance *Float64 `json:"startingBalance,omitempty" xmlrpc:"startingBalance,omitempty"` + + // A two-letter abbreviation of the state portion of an address belonging to an account at the time an invoice is created. If the account that the invoice was generated for resides outside a province then this is set to "other". + State *string `json:"state,omitempty" xmlrpc:"state,omitempty"` + + // An invoice's status. The "OPEN" status means SoftLayer has not yet received payment for this invoice. "CLOSED" status means that SoftLayer has received payment and closed the invoice. The "CLOSED_FAILED" status code means SoftLayer closed the invoice without receiving a payment. Invoices are usually set to CLOSED_FAILED status in cases where customer accounts are terminated for non-payment. + StatusCode *string `json:"statusCode,omitempty" xmlrpc:"statusCode,omitempty"` + + // This is the tax information that applies to tax auditing. This is the official tax record for this invoice. + TaxInfo *Billing_Invoice_Tax_Info `json:"taxInfo,omitempty" xmlrpc:"taxInfo,omitempty"` + + // This is the set of tax information for any tax calculation for this invoice. Note that not all of these are necessarily official, so use the taxInfo key to get the final information. + TaxInfoHistory []Billing_Invoice_Tax_Info `json:"taxInfoHistory,omitempty" xmlrpc:"taxInfoHistory,omitempty"` + + // A count of this is the set of tax information for any tax calculation for this invoice. Note that not all of these are necessarily official, so use the taxInfo key to get the final information. + TaxInfoHistoryCount *uint `json:"taxInfoHistoryCount,omitempty" xmlrpc:"taxInfoHistoryCount,omitempty"` + + // This is a message explaining the tax treatment for this invoice. + TaxMessage *string `json:"taxMessage,omitempty" xmlrpc:"taxMessage,omitempty"` + + // no documentation yet + TaxStatusId *int `json:"taxStatusId,omitempty" xmlrpc:"taxStatusId,omitempty"` + + // This is the strategy used to calculate tax on this invoice. + TaxType *Billing_Invoice_Tax_Type `json:"taxType,omitempty" xmlrpc:"taxType,omitempty"` + + // no documentation yet + TaxTypeId *int `json:"taxTypeId,omitempty" xmlrpc:"taxTypeId,omitempty"` + + // An invoice's type. SoftLayer invoices and service credits are differentiated by their type. The "NEW" type code signifies an invoice for new service. A SoftLayer customer's first invoice has the NEW type code. "RECURRING" invoices are generated on a SoftLayer customer's anniversary billing date for monthly services. "ONE-TIME-CHARGE" invoices are generated when one-time charges are applied to an account. "CREDIT" invoices are generated whenever SoftLayer applies a credit against an account's balance. There are two special types of service credits. "REFUND" type credits are applied against a customer's account balance along with the receivables on their account. "MANUAL_PAYMENT_CREDIT" invoice credits are generated whenever a customer makes an unscheduled payment. + TypeCode *string `json:"typeCode,omitempty" xmlrpc:"typeCode,omitempty"` +} + +// Each billing invoice item makes up a record within an invoice. This provides you with a detailed record of everything related to an invoice item. When you are billed, our system takes active billing items and creates an invoice. These invoice items are a copy of your active billing items, and make up the contents of your invoice. +type Billing_Invoice_Item struct { + Entity + + // An Invoice Item's associated child invoice items. Only parent invoice items have associated children. For instance, a server invoice item may have associated children. + AssociatedChildren []Billing_Invoice_Item `json:"associatedChildren,omitempty" xmlrpc:"associatedChildren,omitempty"` + + // A count of an Invoice Item's associated child invoice items. Only parent invoice items have associated children. For instance, a server invoice item may have associated children. + AssociatedChildrenCount *uint `json:"associatedChildrenCount,omitempty" xmlrpc:"associatedChildrenCount,omitempty"` + + // An Invoice Item's associated invoice item. If this is populated, it means this is an orphaned invoice item, but logically belongs to the associated invoice item. + AssociatedInvoiceItem *Billing_Invoice_Item `json:"associatedInvoiceItem,omitempty" xmlrpc:"associatedInvoiceItem,omitempty"` + + // The associated invoice Item ID. + AssociatedInvoiceItemId *int `json:"associatedInvoiceItemId,omitempty" xmlrpc:"associatedInvoiceItemId,omitempty"` + + // An Invoice Item's billing item, from which this item was generated. + BillingItem *Billing_Item `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // The billing item from which this invoice item was generated. + BillingItemId *int `json:"billingItemId,omitempty" xmlrpc:"billingItemId,omitempty"` + + // This invoice item's "item category". + Category *Product_Item_Category `json:"category,omitempty" xmlrpc:"category,omitempty"` + + // The item category of the invoice item being invoiced. + CategoryCode *string `json:"categoryCode,omitempty" xmlrpc:"categoryCode,omitempty"` + + // An Invoice Item's child invoice items. Only parent invoice items have children. For instance, a server invoice item will have children. + Children []Billing_Invoice_Item `json:"children,omitempty" xmlrpc:"children,omitempty"` + + // A count of an Invoice Item's child invoice items. Only parent invoice items have children. For instance, a server invoice item will have children. + ChildrenCount *uint `json:"childrenCount,omitempty" xmlrpc:"childrenCount,omitempty"` + + // The date the invoice item was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The item description for this invoice item. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The domain name of the invoiced item. This is only used on invoice items whose category is "server". + DomainName *string `json:"domainName,omitempty" xmlrpc:"domainName,omitempty"` + + // An Invoice Item's associated child invoice items, excluding some items with a $0.00 recurring fee. Only parent invoice items have associated children. For instance, a server invoice item may have associated children. + FilteredAssociatedChildren []Billing_Invoice_Item `json:"filteredAssociatedChildren,omitempty" xmlrpc:"filteredAssociatedChildren,omitempty"` + + // A count of an Invoice Item's associated child invoice items, excluding some items with a $0.00 recurring fee. Only parent invoice items have associated children. For instance, a server invoice item may have associated children. + FilteredAssociatedChildrenCount *uint `json:"filteredAssociatedChildrenCount,omitempty" xmlrpc:"filteredAssociatedChildrenCount,omitempty"` + + // The Host name of the invoiced item. This is only used on invoice items whose category is "server". + HostName *string `json:"hostName,omitempty" xmlrpc:"hostName,omitempty"` + + // Indicating whether this invoice item is billed on an hourly basis. + HourlyFlag *bool `json:"hourlyFlag,omitempty" xmlrpc:"hourlyFlag,omitempty"` + + // The hourly recurring fee of the invoice item represented by a floating point decimal in US Dollars ($USD) + HourlyRecurringFee *Float64 `json:"hourlyRecurringFee,omitempty" xmlrpc:"hourlyRecurringFee,omitempty"` + + // The ID of the invoice item. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The invoice to which this item belongs. + Invoice *Billing_Invoice `json:"invoice,omitempty" xmlrpc:"invoice,omitempty"` + + // The invoice to which this invoice item belongs. + InvoiceId *int `json:"invoiceId,omitempty" xmlrpc:"invoiceId,omitempty"` + + // An invoice item's labor fee total after taxes. This does not include any child invoice items. + LaborAfterTaxAmount *Float64 `json:"laborAfterTaxAmount,omitempty" xmlrpc:"laborAfterTaxAmount,omitempty"` + + // This also a one-time fee of a special type. + LaborFee *Float64 `json:"laborFee,omitempty" xmlrpc:"laborFee,omitempty"` + + // The tax rate at which the labor fee is taxed. + LaborFeeTaxRate *Float64 `json:"laborFeeTaxRate,omitempty" xmlrpc:"laborFeeTaxRate,omitempty"` + + // An invoice item's labor tax amount. This does not include any child invoice items. + LaborTaxAmount *Float64 `json:"laborTaxAmount,omitempty" xmlrpc:"laborTaxAmount,omitempty"` + + // An invoice item's location, if one exists.' + Location *Location `json:"location,omitempty" xmlrpc:"location,omitempty"` + + // An Invoice Item's associated child invoice items, excluding ALL items with a $0.00 recurring fee. Only parent invoice items have associated children. For instance, a server invoice item may have associated children. + NonZeroAssociatedChildren []Billing_Invoice_Item `json:"nonZeroAssociatedChildren,omitempty" xmlrpc:"nonZeroAssociatedChildren,omitempty"` + + // A count of an Invoice Item's associated child invoice items, excluding ALL items with a $0.00 recurring fee. Only parent invoice items have associated children. For instance, a server invoice item may have associated children. + NonZeroAssociatedChildrenCount *uint `json:"nonZeroAssociatedChildrenCount,omitempty" xmlrpc:"nonZeroAssociatedChildrenCount,omitempty"` + + // A note to help describe more about the item. This normally holds usernames, or some other bit of extra information. + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // An invoice item's one-time fee total after taxes. This does not include any child invoice items. + OneTimeAfterTaxAmount *Float64 `json:"oneTimeAfterTaxAmount,omitempty" xmlrpc:"oneTimeAfterTaxAmount,omitempty"` + + // If there are any one-time charges assessed, it will show up here represented by a floating point decimal in US Dollars ($USD) + OneTimeFee *Float64 `json:"oneTimeFee,omitempty" xmlrpc:"oneTimeFee,omitempty"` + + // The rate at which the one-time fee is taxed. + OneTimeFeeTaxRate *Float64 `json:"oneTimeFeeTaxRate,omitempty" xmlrpc:"oneTimeFeeTaxRate,omitempty"` + + // An invoice item's one-time tax amount. This does not include any child invoice items. + OneTimeTaxAmount *Float64 `json:"oneTimeTaxAmount,omitempty" xmlrpc:"oneTimeTaxAmount,omitempty"` + + // Every item tied to a server should have a parent invoice item which is the server line item. This is how we associate items to a server. + Parent *Billing_Invoice_Item `json:"parent,omitempty" xmlrpc:"parent,omitempty"` + + // The parent invoice item, usually the server invoice item. + ParentId *int `json:"parentId,omitempty" xmlrpc:"parentId,omitempty"` + + // The entry in the product catalog that a invoice item is based upon. + Product *Product_Item `json:"product,omitempty" xmlrpc:"product,omitempty"` + + // The entry in the product catalog that a invoice item is based upon. + ProductItemId *int `json:"productItemId,omitempty" xmlrpc:"productItemId,omitempty"` + + // An invoice item's recurring fee total after taxes. This does not include any child invoice items. + RecurringAfterTaxAmount *Float64 `json:"recurringAfterTaxAmount,omitempty" xmlrpc:"recurringAfterTaxAmount,omitempty"` + + // The recurring fee of the invoice item represented by a floating point decimal in US Dollars ($USD) + RecurringFee *Float64 `json:"recurringFee,omitempty" xmlrpc:"recurringFee,omitempty"` + + // the rate at which the recurring fee is taxed. + RecurringFeeTaxRate *Float64 `json:"recurringFeeTaxRate,omitempty" xmlrpc:"recurringFeeTaxRate,omitempty"` + + // An invoice item's recurring tax amount. This does not include any child invoice items. + RecurringTaxAmount *Float64 `json:"recurringTaxAmount,omitempty" xmlrpc:"recurringTaxAmount,omitempty"` + + // A unique identifier for a SoftLayer Service that is associated to an invoice item. + ResourceTableId *int `json:"resourceTableId,omitempty" xmlrpc:"resourceTableId,omitempty"` + + // The service provider for the invoice item. + ServiceProviderId *int `json:"serviceProviderId,omitempty" xmlrpc:"serviceProviderId,omitempty"` + + // An invoice item's setup fee total after taxes. This does not include any child invoice items. + SetupAfterTaxAmount *Float64 `json:"setupAfterTaxAmount,omitempty" xmlrpc:"setupAfterTaxAmount,omitempty"` + + // If there were any setup fees they will show up here. These are normally a one-time fee. + SetupFee *Float64 `json:"setupFee,omitempty" xmlrpc:"setupFee,omitempty"` + + // The number of months the setup fee is being deferred. + SetupFeeDeferralMonths *int `json:"setupFeeDeferralMonths,omitempty" xmlrpc:"setupFeeDeferralMonths,omitempty"` + + // The tax rate at which the setup fee is taxed. + SetupFeeTaxRate *Float64 `json:"setupFeeTaxRate,omitempty" xmlrpc:"setupFeeTaxRate,omitempty"` + + // An invoice item's setup tax amount. This does not include any child invoice items. + SetupTaxAmount *Float64 `json:"setupTaxAmount,omitempty" xmlrpc:"setupTaxAmount,omitempty"` + + // A string representing the name of parent level product group of an invoice item. + TopLevelProductGroupName *string `json:"topLevelProductGroupName,omitempty" xmlrpc:"topLevelProductGroupName,omitempty"` + + // An invoice Item's total, including any child invoice items if they exist. + TotalOneTimeAmount *Float64 `json:"totalOneTimeAmount,omitempty" xmlrpc:"totalOneTimeAmount,omitempty"` + + // An invoice Item's total, including any child invoice items if they exist. + TotalOneTimeTaxAmount *Float64 `json:"totalOneTimeTaxAmount,omitempty" xmlrpc:"totalOneTimeTaxAmount,omitempty"` + + // An invoice Item's total, including any child invoice items if they exist. + TotalRecurringAmount *Float64 `json:"totalRecurringAmount,omitempty" xmlrpc:"totalRecurringAmount,omitempty"` + + // A Billing Item's total, including any child billing items if they exist.' + TotalRecurringTaxAmount *Float64 `json:"totalRecurringTaxAmount,omitempty" xmlrpc:"totalRecurringTaxAmount,omitempty"` + + // Indicating whether this invoice item is for the usage charge. + UsageChargeFlag *bool `json:"usageChargeFlag,omitempty" xmlrpc:"usageChargeFlag,omitempty"` +} + +// The SoftLayer_Billing_Invoice_Item_Hardware data type contains a "resource". This resource is a link to the hardware tied to a SoftLayer_Billing_item whose category code is "server". +type Billing_Invoice_Item_Hardware struct { + Billing_Invoice_Item + + // The resource for a server invoice item. + Resource *Hardware `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// Information about the tax rates that apply to a particular invoice item. +type Billing_Invoice_Item_Tax_Info struct { + Entity + + // The date and time the tax information was recorded. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The invoice description with special information about the invoice. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The tax rate that can be multiplied by the subtotal to get the + EffectiveTaxRate *Float64 `json:"effectiveTaxRate,omitempty" xmlrpc:"effectiveTaxRate,omitempty"` + + // The amount that is exempt from tax. + ExemptAmount *Float64 `json:"exemptAmount,omitempty" xmlrpc:"exemptAmount,omitempty"` + + // The type of fee being tracked for this particular set of tax information. + FeeProperty *string `json:"feeProperty,omitempty" xmlrpc:"feeProperty,omitempty"` + + // An invoice item's tax information internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + InvoiceItem *Billing_Invoice_Item `json:"invoiceItem,omitempty" xmlrpc:"invoiceItem,omitempty"` + + // A reference to the related invoice item. + InvoiceItemId *int `json:"invoiceItemId,omitempty" xmlrpc:"invoiceItemId,omitempty"` + + // no documentation yet + InvoiceTaxInfo *Billing_Invoice_Tax_Info `json:"invoiceTaxInfo,omitempty" xmlrpc:"invoiceTaxInfo,omitempty"` + + // A reference to the tax information for the parent invoice. + InvoiceTaxInfoId *int `json:"invoiceTaxInfoId,omitempty" xmlrpc:"invoiceTaxInfoId,omitempty"` + + // The date and time the tax information was modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The amount that is exempt from tax. + NonTaxableBasis *Float64 `json:"nonTaxableBasis,omitempty" xmlrpc:"nonTaxableBasis,omitempty"` + + // A flag to indicate whether this is the official record for this invoice item. + ReportedFlag *bool `json:"reportedFlag,omitempty" xmlrpc:"reportedFlag,omitempty"` + + // The registration that the seller will use to report the invoice. + SellerRegistration *string `json:"sellerRegistration,omitempty" xmlrpc:"sellerRegistration,omitempty"` + + // The tax amount associated with this line item. + TaxAmount *Float64 `json:"taxAmount,omitempty" xmlrpc:"taxAmount,omitempty"` + + // The tax amount (converted to the 'to' currency) associated with this line item. + TaxAmountToCurrency *Float64 `json:"taxAmountToCurrency,omitempty" xmlrpc:"taxAmountToCurrency,omitempty"` + + // The tax rate used. Note that this might apply to only part of the + TaxRate *Float64 `json:"taxRate,omitempty" xmlrpc:"taxRate,omitempty"` + + // The amount that is subject to tax. + TaxableBasis *Float64 `json:"taxableBasis,omitempty" xmlrpc:"taxableBasis,omitempty"` + + // This is the currency the invoice will be converted to. + ToCurrency *Billing_Currency `json:"toCurrency,omitempty" xmlrpc:"toCurrency,omitempty"` + + // The currency code that the invoice is being converted to. + ToCurrencyId *int `json:"toCurrencyId,omitempty" xmlrpc:"toCurrencyId,omitempty"` +} + +// no documentation yet +type Billing_Invoice_Next struct { + Entity +} + +// The SoftLayer_Billing_Invoice_Receivable_Payment data type contains general information relating to payments made against invoices. +type Billing_Invoice_Receivable_Payment struct { + Entity + + // no documentation yet + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The amount of the payment. + Amount *Float64 `json:"amount,omitempty" xmlrpc:"amount,omitempty"` + + // The date of the payment. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + CreditCardLastFourDigits *int `json:"creditCardLastFourDigits,omitempty" xmlrpc:"creditCardLastFourDigits,omitempty"` + + // no documentation yet + CreditCardRequestId *string `json:"creditCardRequestId,omitempty" xmlrpc:"creditCardRequestId,omitempty"` + + // no documentation yet + CreditCardTransaction *Billing_Payment_Card_Transaction `json:"creditCardTransaction,omitempty" xmlrpc:"creditCardTransaction,omitempty"` + + // no documentation yet + ExchangeRate *Billing_Currency_ExchangeRate `json:"exchangeRate,omitempty" xmlrpc:"exchangeRate,omitempty"` + + // no documentation yet + Invoice *Billing_Invoice `json:"invoice,omitempty" xmlrpc:"invoice,omitempty"` + + // The invoice that the payment is for. + InvoiceId *int `json:"invoiceId,omitempty" xmlrpc:"invoiceId,omitempty"` + + // no documentation yet + PaypalTransaction *Billing_Payment_PayPal_Transaction `json:"paypalTransaction,omitempty" xmlrpc:"paypalTransaction,omitempty"` + + // The type of payment. + TypeCode *string `json:"typeCode,omitempty" xmlrpc:"typeCode,omitempty"` +} + +// Invoice tax information contains top-level information about the taxes recorded for a particular invoice. +type Billing_Invoice_Tax_Info struct { + Entity + + // The date and time this tax information was recorded. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // This is the currency used for the invoice. + Currency *Billing_Currency `json:"currency,omitempty" xmlrpc:"currency,omitempty"` + + // The currency code that the invoice should be recorded in. + CurrencyId *int `json:"currencyId,omitempty" xmlrpc:"currencyId,omitempty"` + + // This is the functional currency used for the invoice. + FunctionalCurrency *Billing_Currency `json:"functionalCurrency,omitempty" xmlrpc:"functionalCurrency,omitempty"` + + // The internal identifier for this invoice tax information. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // This is the related invoice for this tax-related information. + Invoice *Billing_Invoice `json:"invoice,omitempty" xmlrpc:"invoice,omitempty"` + + // A reference to the related invoice. + InvoiceId *int `json:"invoiceId,omitempty" xmlrpc:"invoiceId,omitempty"` + + // A count of this is the collection of tax information for each of the related invoice items. + ItemCount *uint `json:"itemCount,omitempty" xmlrpc:"itemCount,omitempty"` + + // This tax information on the invoice item that includes currency details. + ItemWithCurrencyInfo *Billing_Invoice_Item_Tax_Info `json:"itemWithCurrencyInfo,omitempty" xmlrpc:"itemWithCurrencyInfo,omitempty"` + + // This is the collection of tax information for each of the related invoice items. + Items []Billing_Invoice_Item_Tax_Info `json:"items,omitempty" xmlrpc:"items,omitempty"` + + // The date and time this tax information was updated. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // A flag to indicate whether the invoice will be auditable. + ReportedFlag *bool `json:"reportedFlag,omitempty" xmlrpc:"reportedFlag,omitempty"` + + // This the total tax amount (converted to the 'to' currency) for the invoice. + TotalTaxAmountToCurrency *Float64 `json:"totalTaxAmountToCurrency,omitempty" xmlrpc:"totalTaxAmountToCurrency,omitempty"` +} + +// The invoice tax status data type models a single status or state that an invoice can reflect in regard to an integration with a third-party tax calculation service. +type Billing_Invoice_Tax_Status struct { + Entity + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The invoice tax type data type models a single strategy for handling tax calculations. +type Billing_Invoice_Tax_Type struct { + Entity + + // A tax type's internal identifier. Each type of tax calculation strategy has a unique ID value. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A unique string that identifies each strategy and is guaranteed to be stable over time. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // A human-readable label for each tax strategy. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// Every individual item that a SoftLayer customer is billed for is recorded in the SoftLayer_Billing_Item data type. Billing items range from server chassis to hard drives to control panels, bandwidth quota upgrades and port upgrade charges. Softlayer [[SoftLayer_Billing_Invoice|invoices]] are generated from the cost of a customer's billing items. Billing items are copied from the product catalog as they're ordered by customers to create a reference between an account and the billable items they own. +// +// Billing items exist in a tree relationship. Items are associated with each other by parent/child relationships. Component items such as CPU's, RAM, and software each have a parent billing item for the server chassis they're associated with. Billing Items with a null parent item do not have an associated parent item. +type Billing_Item struct { + Entity + + // The account that a billing item belongs to. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // no documentation yet + ActiveAgreement *Account_Agreement `json:"activeAgreement,omitempty" xmlrpc:"activeAgreement,omitempty"` + + // A flag indicating that the billing item is under an active agreement. + ActiveAgreementFlag *Account_Agreement `json:"activeAgreementFlag,omitempty" xmlrpc:"activeAgreementFlag,omitempty"` + + // A billing item's active associated child billing items. This includes "floating" items that are not necessarily child items of this billing item. + ActiveAssociatedChildren []Billing_Item `json:"activeAssociatedChildren,omitempty" xmlrpc:"activeAssociatedChildren,omitempty"` + + // A count of a billing item's active associated child billing items. This includes "floating" items that are not necessarily child items of this billing item. + ActiveAssociatedChildrenCount *uint `json:"activeAssociatedChildrenCount,omitempty" xmlrpc:"activeAssociatedChildrenCount,omitempty"` + + // A count of + ActiveAssociatedGuestDiskBillingItemCount *uint `json:"activeAssociatedGuestDiskBillingItemCount,omitempty" xmlrpc:"activeAssociatedGuestDiskBillingItemCount,omitempty"` + + // no documentation yet + ActiveAssociatedGuestDiskBillingItems []Billing_Item `json:"activeAssociatedGuestDiskBillingItems,omitempty" xmlrpc:"activeAssociatedGuestDiskBillingItems,omitempty"` + + // A count of a Billing Item's active bundled billing items. + ActiveBundledItemCount *uint `json:"activeBundledItemCount,omitempty" xmlrpc:"activeBundledItemCount,omitempty"` + + // A Billing Item's active bundled billing items. + ActiveBundledItems []Billing_Item `json:"activeBundledItems,omitempty" xmlrpc:"activeBundledItems,omitempty"` + + // A service cancellation request item that corresponds to the billing item. + ActiveCancellationItem *Billing_Item_Cancellation_Request_Item `json:"activeCancellationItem,omitempty" xmlrpc:"activeCancellationItem,omitempty"` + + // A Billing Item's active child billing items. + ActiveChildren []Billing_Item `json:"activeChildren,omitempty" xmlrpc:"activeChildren,omitempty"` + + // A count of a Billing Item's active child billing items. + ActiveChildrenCount *uint `json:"activeChildrenCount,omitempty" xmlrpc:"activeChildrenCount,omitempty"` + + // no documentation yet + ActiveFlag *bool `json:"activeFlag,omitempty" xmlrpc:"activeFlag,omitempty"` + + // A count of + ActiveSparePoolAssociatedGuestDiskBillingItemCount *uint `json:"activeSparePoolAssociatedGuestDiskBillingItemCount,omitempty" xmlrpc:"activeSparePoolAssociatedGuestDiskBillingItemCount,omitempty"` + + // no documentation yet + ActiveSparePoolAssociatedGuestDiskBillingItems []Billing_Item `json:"activeSparePoolAssociatedGuestDiskBillingItems,omitempty" xmlrpc:"activeSparePoolAssociatedGuestDiskBillingItems,omitempty"` + + // A count of a Billing Item's spare pool bundled billing items. + ActiveSparePoolBundledItemCount *uint `json:"activeSparePoolBundledItemCount,omitempty" xmlrpc:"activeSparePoolBundledItemCount,omitempty"` + + // A Billing Item's spare pool bundled billing items. + ActiveSparePoolBundledItems []Billing_Item `json:"activeSparePoolBundledItems,omitempty" xmlrpc:"activeSparePoolBundledItems,omitempty"` + + // Flag to check if a billing item can be cancelled. 1 = yes. 0 = no. + AllowCancellationFlag *int `json:"allowCancellationFlag,omitempty" xmlrpc:"allowCancellationFlag,omitempty"` + + // A billing item's associated parent. This is to be used for billing items that are "floating", and therefore are not child items of any parent billing item. If it is desired to associate an item to another, populate this with the SoftLayer_Billing_Item ID of that associated parent item. + AssociatedBillingItem *Billing_Item `json:"associatedBillingItem,omitempty" xmlrpc:"associatedBillingItem,omitempty"` + + // A history of billing items which a billing item has been associated with. + AssociatedBillingItemHistory []Billing_Item_Association_History `json:"associatedBillingItemHistory,omitempty" xmlrpc:"associatedBillingItemHistory,omitempty"` + + // A count of a history of billing items which a billing item has been associated with. + AssociatedBillingItemHistoryCount *uint `json:"associatedBillingItemHistoryCount,omitempty" xmlrpc:"associatedBillingItemHistoryCount,omitempty"` + + // This is sometimes populated for orphan billing items that are not attached to servers. Billing items like secondary portable IP addresses fit into this category. A user may set an association by calling [[SoftLayer_Billing_Item::setAssociationId]]. This will cause this orphan item to appear under its associated server billing item on future invoices. You may only attach orphaned billing items to server billing items without cancellation dates set. + AssociatedBillingItemId *string `json:"associatedBillingItemId,omitempty" xmlrpc:"associatedBillingItemId,omitempty"` + + // A Billing Item's associated child billing items. This includes "floating" items that are not necessarily child billing items of this billing item. + AssociatedChildren []Billing_Item `json:"associatedChildren,omitempty" xmlrpc:"associatedChildren,omitempty"` + + // A count of a Billing Item's associated child billing items. This includes "floating" items that are not necessarily child billing items of this billing item. + AssociatedChildrenCount *uint `json:"associatedChildrenCount,omitempty" xmlrpc:"associatedChildrenCount,omitempty"` + + // A billing item's associated parent billing item. This object will be the same as the parent billing item if parentId is set. + AssociatedParent []Billing_Item `json:"associatedParent,omitempty" xmlrpc:"associatedParent,omitempty"` + + // A count of a billing item's associated parent billing item. This object will be the same as the parent billing item if parentId is set. + AssociatedParentCount *uint `json:"associatedParentCount,omitempty" xmlrpc:"associatedParentCount,omitempty"` + + // A count of + AvailableMatchingVlanCount *uint `json:"availableMatchingVlanCount,omitempty" xmlrpc:"availableMatchingVlanCount,omitempty"` + + // no documentation yet + AvailableMatchingVlans []Network_Vlan `json:"availableMatchingVlans,omitempty" xmlrpc:"availableMatchingVlans,omitempty"` + + // The bandwidth allocation for a billing item. + BandwidthAllocation *Network_Bandwidth_Version1_Allocation `json:"bandwidthAllocation,omitempty" xmlrpc:"bandwidthAllocation,omitempty"` + + // A billing item's recurring child items that have once been billed and are scheduled to be billed in the future. + BillableChildren []Billing_Item `json:"billableChildren,omitempty" xmlrpc:"billableChildren,omitempty"` + + // A count of a billing item's recurring child items that have once been billed and are scheduled to be billed in the future. + BillableChildrenCount *uint `json:"billableChildrenCount,omitempty" xmlrpc:"billableChildrenCount,omitempty"` + + // A count of a Billing Item's bundled billing items + BundleItemCount *uint `json:"bundleItemCount,omitempty" xmlrpc:"bundleItemCount,omitempty"` + + // A Billing Item's bundled billing items + BundleItems []Product_Item_Bundles `json:"bundleItems,omitempty" xmlrpc:"bundleItems,omitempty"` + + // A count of a Billing Item's bundled billing items' + BundledItemCount *uint `json:"bundledItemCount,omitempty" xmlrpc:"bundledItemCount,omitempty"` + + // A Billing Item's bundled billing items' + BundledItems []Billing_Item `json:"bundledItems,omitempty" xmlrpc:"bundledItems,omitempty"` + + // A Billing Item's active child billing items. + CanceledChildren []Billing_Item `json:"canceledChildren,omitempty" xmlrpc:"canceledChildren,omitempty"` + + // A count of a Billing Item's active child billing items. + CanceledChildrenCount *uint `json:"canceledChildrenCount,omitempty" xmlrpc:"canceledChildrenCount,omitempty"` + + // A billing item's cancellation date. A billing item with a cancellation date in the past is not charged on your SoftLayer invoice. Cancellation dates in the future indicate the current billing item is active, but will be cancelled and not charged for in the future. A billing item with a null cancellation date is also considered an active billing item and is charged once every billing cycle. + CancellationDate *Time `json:"cancellationDate,omitempty" xmlrpc:"cancellationDate,omitempty"` + + // The billing item's cancellation reason. + CancellationReason *Billing_Item_Cancellation_Reason `json:"cancellationReason,omitempty" xmlrpc:"cancellationReason,omitempty"` + + // A count of this will return any cancellation requests that are associated with this billing item. + CancellationRequestCount *uint `json:"cancellationRequestCount,omitempty" xmlrpc:"cancellationRequestCount,omitempty"` + + // This will return any cancellation requests that are associated with this billing item. + CancellationRequests []Billing_Item_Cancellation_Request `json:"cancellationRequests,omitempty" xmlrpc:"cancellationRequests,omitempty"` + + // The item category to which the billing item's item belongs. + Category *Product_Item_Category `json:"category,omitempty" xmlrpc:"category,omitempty"` + + // The category code of this billing item. It is used to tell us the difference between a primary disk and a secondary disk, for instance. + CategoryCode *string `json:"categoryCode,omitempty" xmlrpc:"categoryCode,omitempty"` + + // A Billing Item's child billing items' + Children []Billing_Item `json:"children,omitempty" xmlrpc:"children,omitempty"` + + // A count of a Billing Item's child billing items' + ChildrenCount *uint `json:"childrenCount,omitempty" xmlrpc:"childrenCount,omitempty"` + + // A Billing Item's active child billing items. + ChildrenWithActiveAgreement []Billing_Item `json:"childrenWithActiveAgreement,omitempty" xmlrpc:"childrenWithActiveAgreement,omitempty"` + + // A count of a Billing Item's active child billing items. + ChildrenWithActiveAgreementCount *uint `json:"childrenWithActiveAgreementCount,omitempty" xmlrpc:"childrenWithActiveAgreementCount,omitempty"` + + // The date the billing item was created. You can see this date on the invoice. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // This is the total charge for the billing item for this billing item. It is calculated based on the hourlyRecurringFee * hoursUsed. + CurrentHourlyCharge *string `json:"currentHourlyCharge,omitempty" xmlrpc:"currentHourlyCharge,omitempty"` + + // The last time this billing item was charged. + CycleStartDate *Time `json:"cycleStartDate,omitempty" xmlrpc:"cycleStartDate,omitempty"` + + // A brief description of a billing item. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The domain name is provided for server billing items. + DomainName *string `json:"domainName,omitempty" xmlrpc:"domainName,omitempty"` + + // A count of for product items which have a downgrade path defined, this will return those product items. + DowngradeItemCount *uint `json:"downgradeItemCount,omitempty" xmlrpc:"downgradeItemCount,omitempty"` + + // For product items which have a downgrade path defined, this will return those product items. + DowngradeItems []Product_Item `json:"downgradeItems,omitempty" xmlrpc:"downgradeItems,omitempty"` + + // A Billing Item's associated child billing items, excluding some items with a $0.00 recurring fee. + FilteredNextInvoiceChildren []Billing_Item `json:"filteredNextInvoiceChildren,omitempty" xmlrpc:"filteredNextInvoiceChildren,omitempty"` + + // A count of a Billing Item's associated child billing items, excluding some items with a $0.00 recurring fee. + FilteredNextInvoiceChildrenCount *uint `json:"filteredNextInvoiceChildrenCount,omitempty" xmlrpc:"filteredNextInvoiceChildrenCount,omitempty"` + + // The hostname is provided for server billing items + HostName *string `json:"hostName,omitempty" xmlrpc:"hostName,omitempty"` + + // A flag that will reflect whether this billing item is billed on an hourly basis or not. + HourlyFlag *bool `json:"hourlyFlag,omitempty" xmlrpc:"hourlyFlag,omitempty"` + + // The amount of money charged per hour for a billing item, if applicable. hourlyRecurringFee is measured in US Dollars ($USD). + HourlyRecurringFee *Float64 `json:"hourlyRecurringFee,omitempty" xmlrpc:"hourlyRecurringFee,omitempty"` + + // This is the number of hours the hourly billing item has been in use this billing period. For virtual servers, this means running, paused or stopped. + HoursUsed *string `json:"hoursUsed,omitempty" xmlrpc:"hoursUsed,omitempty"` + + // The unique identifier for this billing item. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Invoice items associated with this billing item + InvoiceItem *Billing_Invoice_Item `json:"invoiceItem,omitempty" xmlrpc:"invoiceItem,omitempty"` + + // A count of all invoice items associated with the billing item + InvoiceItemCount *uint `json:"invoiceItemCount,omitempty" xmlrpc:"invoiceItemCount,omitempty"` + + // All invoice items associated with the billing item + InvoiceItems []Billing_Invoice_Item `json:"invoiceItems,omitempty" xmlrpc:"invoiceItems,omitempty"` + + // The entry in the SoftLayer product catalog that a billing item is based upon. + Item *Product_Item `json:"item,omitempty" xmlrpc:"item,omitempty"` + + // The labor fee, if any. This is a one time charge. + LaborFee *Float64 `json:"laborFee,omitempty" xmlrpc:"laborFee,omitempty"` + + // The rate at which labor fees are taxed if you are a taxable customer. + LaborFeeTaxRate *Float64 `json:"laborFeeTaxRate,omitempty" xmlrpc:"laborFeeTaxRate,omitempty"` + + // The last time this billing item was charged. + LastBillDate *Time `json:"lastBillDate,omitempty" xmlrpc:"lastBillDate,omitempty"` + + // The location of the billing item. Some billing items have physical properties such as the server itself. For items such as these, we provide location information. + Location *Location `json:"location,omitempty" xmlrpc:"location,omitempty"` + + // The date that a billing item was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The date on which your account will be charged for this billing item. + NextBillDate *Time `json:"nextBillDate,omitempty" xmlrpc:"nextBillDate,omitempty"` + + // A Billing Item's child billing items and associated items' + NextInvoiceChildren []Billing_Item `json:"nextInvoiceChildren,omitempty" xmlrpc:"nextInvoiceChildren,omitempty"` + + // A count of a Billing Item's child billing items and associated items' + NextInvoiceChildrenCount *uint `json:"nextInvoiceChildrenCount,omitempty" xmlrpc:"nextInvoiceChildrenCount,omitempty"` + + // A Billing Item's total, including any child billing items if they exist.' + NextInvoiceTotalOneTimeAmount *Float64 `json:"nextInvoiceTotalOneTimeAmount,omitempty" xmlrpc:"nextInvoiceTotalOneTimeAmount,omitempty"` + + // A Billing Item's total, including any child billing items if they exist.' + NextInvoiceTotalOneTimeTaxAmount *Float64 `json:"nextInvoiceTotalOneTimeTaxAmount,omitempty" xmlrpc:"nextInvoiceTotalOneTimeTaxAmount,omitempty"` + + // A Billing Item's total, including any child billing items and associated billing items if they exist.' + NextInvoiceTotalRecurringAmount *Float64 `json:"nextInvoiceTotalRecurringAmount,omitempty" xmlrpc:"nextInvoiceTotalRecurringAmount,omitempty"` + + // This is deprecated and will always be zero. Because tax is calculated in real-time, previewing the next recurring invoice is pre-tax only. + NextInvoiceTotalRecurringTaxAmount *Float64 `json:"nextInvoiceTotalRecurringTaxAmount,omitempty" xmlrpc:"nextInvoiceTotalRecurringTaxAmount,omitempty"` + + // A Billing Item's associated child billing items, excluding ALL items with a $0.00 recurring fee. + NonZeroNextInvoiceChildren []Billing_Item `json:"nonZeroNextInvoiceChildren,omitempty" xmlrpc:"nonZeroNextInvoiceChildren,omitempty"` + + // A count of a Billing Item's associated child billing items, excluding ALL items with a $0.00 recurring fee. + NonZeroNextInvoiceChildrenCount *uint `json:"nonZeroNextInvoiceChildrenCount,omitempty" xmlrpc:"nonZeroNextInvoiceChildrenCount,omitempty"` + + // Extra information provided to help you identify this billing item. This is often a username or something to help identify items that customers have more than one of. + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // The amount of money charged as a one-time charge for a billing item, if applicable. oneTimeFee is measured in US Dollars ($USD). + OneTimeFee *Float64 `json:"oneTimeFee,omitempty" xmlrpc:"oneTimeFee,omitempty"` + + // The rate at which one time fees are taxed if you are a taxable customer. + OneTimeFeeTaxRate *Float64 `json:"oneTimeFeeTaxRate,omitempty" xmlrpc:"oneTimeFeeTaxRate,omitempty"` + + // A billing item's original order item. Simply a reference to the original order from which this billing item was created. + OrderItem *Billing_Order_Item `json:"orderItem,omitempty" xmlrpc:"orderItem,omitempty"` + + // the SoftLayer_Billing_Order_Item ID. This is a reference to the original order item from which this billing item was originally created. + OrderItemId *int `json:"orderItemId,omitempty" xmlrpc:"orderItemId,omitempty"` + + // The original physical location for this billing item--may differ from current. + OriginalLocation *Location `json:"originalLocation,omitempty" xmlrpc:"originalLocation,omitempty"` + + // The package under which this billing item was sold. A Package is the general grouping of products as seen on our order forms. + Package *Product_Package `json:"package,omitempty" xmlrpc:"package,omitempty"` + + // A billing item's parent item. If a billing item has no parent item then this value is null. + Parent *Billing_Item `json:"parent,omitempty" xmlrpc:"parent,omitempty"` + + // The unique identifier of the parent of this billing item. + ParentId *int `json:"parentId,omitempty" xmlrpc:"parentId,omitempty"` + + // A billing item's parent item. If a billing item has no parent item then this value is null. + ParentVirtualGuestBillingItem *Billing_Item_Virtual_Guest `json:"parentVirtualGuestBillingItem,omitempty" xmlrpc:"parentVirtualGuestBillingItem,omitempty"` + + // This flag indicates whether a billing item is scheduled to be canceled or not. + PendingCancellationFlag *bool `json:"pendingCancellationFlag,omitempty" xmlrpc:"pendingCancellationFlag,omitempty"` + + // The new order item that will replace this billing item. + PendingOrderItem *Billing_Order_Item `json:"pendingOrderItem,omitempty" xmlrpc:"pendingOrderItem,omitempty"` + + // Provisioning transaction for this billing item + ProvisionTransaction *Provisioning_Version1_Transaction `json:"provisionTransaction,omitempty" xmlrpc:"provisionTransaction,omitempty"` + + // The amount of money charged per month for a billing item, if applicable. recurringFee is measured in US Dollars ($USD). + RecurringFee *Float64 `json:"recurringFee,omitempty" xmlrpc:"recurringFee,omitempty"` + + // The rate at which recurring fees are taxed if you are a taxable customer. + RecurringFeeTaxRate *Float64 `json:"recurringFeeTaxRate,omitempty" xmlrpc:"recurringFeeTaxRate,omitempty"` + + // The number of months in which the recurring fees will be incurred. + RecurringMonths *int `json:"recurringMonths,omitempty" xmlrpc:"recurringMonths,omitempty"` + + // This is the service provider for this billing item. + ServiceProviderId *int `json:"serviceProviderId,omitempty" xmlrpc:"serviceProviderId,omitempty"` + + // The setup fee, if any. This is a one time charge. + SetupFee *Float64 `json:"setupFee,omitempty" xmlrpc:"setupFee,omitempty"` + + // The rate at which setup fees are taxed if you are a taxable customer. + SetupFeeTaxRate *Float64 `json:"setupFeeTaxRate,omitempty" xmlrpc:"setupFeeTaxRate,omitempty"` + + // A friendly description of software component + SoftwareDescription *Software_Description `json:"softwareDescription,omitempty" xmlrpc:"softwareDescription,omitempty"` + + // Billing items whose product item has an upgrade path defined in our system will return the next product item in the upgrade path. + UpgradeItem *Product_Item `json:"upgradeItem,omitempty" xmlrpc:"upgradeItem,omitempty"` + + // A count of billing items whose product item has an upgrade path defined in our system will return all the product items in the upgrade path. + UpgradeItemCount *uint `json:"upgradeItemCount,omitempty" xmlrpc:"upgradeItemCount,omitempty"` + + // Billing items whose product item has an upgrade path defined in our system will return all the product items in the upgrade path. + UpgradeItems []Product_Item `json:"upgradeItems,omitempty" xmlrpc:"upgradeItems,omitempty"` +} + +// The SoftLayer_Billing_Item_Account_Media_Data_Transfer_Request data type contains general information relating to a single SoftLayer billing item for a data transfer request. +type Billing_Item_Account_Media_Data_Transfer_Request struct { + Billing_Item + + // The data transfer request to which the billing item points. + Resource *Account_Media_Data_Transfer_Request `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// The SoftLayer_Billing_Item_Association_History type keeps a record of which server billing items an "orphan" item has been associated with. Orphan billing items are billable items for secondary portable services (such as secondary subnets and StorageLayer accounts) that are not associated with a server and appear at the bottom of a SoftLayer invoice. The [[SoftLayer_Billing_Item::setAssociationId]] method allows you to associate these kinds of items with servers, making them appear as a child item of the server on your invoice. A SoftLayer_Billing_Item_Association_History record is created every time one of these associations are set. +type Billing_Item_Association_History struct { + Entity + + // The server billing item that an orphaned billing item was associated with. + AssociatedBillingItem *Billing_Item `json:"associatedBillingItem,omitempty" xmlrpc:"associatedBillingItem,omitempty"` + + // The internal identifier of the server billing item that an orphaned billing item was associated with. + AssociatedBillingItemId *int `json:"associatedBillingItemId,omitempty" xmlrpc:"associatedBillingItemId,omitempty"` + + // The billing item that was associated with a server billing item. + BillingItem *Billing_Item `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // The internal identifier of the billing item that was associated with a server billing item. + BillingItemId *int `json:"billingItemId,omitempty" xmlrpc:"billingItemId,omitempty"` + + // The date that a billing item association was last changed. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A billing item association history's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` +} + +// The SoftLayer_Billing_Item_Cancellation_Reason data type contains cancellation reasons. +type Billing_Item_Cancellation_Reason struct { + Entity + + // A cancel reason category internal identifier. + BillingCancelReasonCategoryId *int `json:"billingCancelReasonCategoryId,omitempty" xmlrpc:"billingCancelReasonCategoryId,omitempty"` + + // An billing cancellation reason category. + BillingCancellationReasonCategory *Billing_Item_Cancellation_Reason_Category `json:"billingCancellationReasonCategory,omitempty" xmlrpc:"billingCancellationReasonCategory,omitempty"` + + // A count of the corresponding billing items having the specific cancellation reason. + BillingItemCount *uint `json:"billingItemCount,omitempty" xmlrpc:"billingItemCount,omitempty"` + + // The corresponding billing items having the specific cancellation reason. + BillingItems []Billing_Item `json:"billingItems,omitempty" xmlrpc:"billingItems,omitempty"` + + // A reason internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A standardized reason internal identifier. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // The descriptoin of the reason + Reason *string `json:"reason,omitempty" xmlrpc:"reason,omitempty"` + + // no documentation yet + TranslatedReason *string `json:"translatedReason,omitempty" xmlrpc:"translatedReason,omitempty"` +} + +// The SoftLayer_Billing_Item_Cancellation_Reason_Category data type contains cancellation reason categories. +type Billing_Item_Cancellation_Reason_Category struct { + Entity + + // A count of the corresponding billing cancellation reasons having the specific billing cancellation reason category. + BillingCancellationReasonCount *uint `json:"billingCancellationReasonCount,omitempty" xmlrpc:"billingCancellationReasonCount,omitempty"` + + // The corresponding billing cancellation reasons having the specific billing cancellation reason category. + BillingCancellationReasons []Billing_Item_Cancellation_Reason `json:"billingCancellationReasons,omitempty" xmlrpc:"billingCancellationReasons,omitempty"` + + // A category internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The description of the category + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// SoftLayer_Billing_Item_Cancellation_Request data type is used to cancel service billing items. +type Billing_Item_Cancellation_Request struct { + Entity + + // The SoftLayer account that a service cancellation request belongs to. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The internal identifier of the customer account that a service cancellation record belongs to. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // The last modified date. + BillingCancelReasonId *int `json:"billingCancelReasonId,omitempty" xmlrpc:"billingCancelReasonId,omitempty"` + + // The date that a cancellation request was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A cancellation record's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A count of a collection of service cancellation items. + ItemCount *uint `json:"itemCount,omitempty" xmlrpc:"itemCount,omitempty"` + + // A collection of service cancellation items. + Items []Billing_Item_Cancellation_Request_Item `json:"items,omitempty" xmlrpc:"items,omitempty"` + + // The last modified date. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // Brief cancellation note. + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // The status of a service cancellation request. + Status *Billing_Item_Cancellation_Request_Status `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // An internal identifier of the service cancellation status that this request is associated with. When a service cancellation is submitted, it will be in "Pending" status until SoftLayer Sales team reviews it. The status of a cancellation request will be updated to "Approved" or "Voided" by SoftLayer Sales. + // + // It will be updated to "Complete" when all services are reclaimed. + StatusId *int `json:"statusId,omitempty" xmlrpc:"statusId,omitempty"` + + // The ticket that is associated with the service cancellation request. + Ticket *Ticket `json:"ticket,omitempty" xmlrpc:"ticket,omitempty"` + + // An internal identifier of the ticket that is associated with a service cancellation request. When a service cancellation is submitted, a support ticket will be created. This ticket contains details on your service cancellation details and SoftLayer Sales team will use it to further communicate with you. + TicketId *int `json:"ticketId,omitempty" xmlrpc:"ticketId,omitempty"` + + // The user that initiated a service cancellation request. + User *User_Customer `json:"user,omitempty" xmlrpc:"user,omitempty"` +} + +// SoftLayer_Billing_Item_Cancellation_Request_Item data type contains a billing item for cancellation. This data type is used to harness billing items to the associated service. +type Billing_Item_Cancellation_Request_Item struct { + Entity + + // The billing item for cancellation. + BillingItem *Billing_Item `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // The internal identifier of a billing item + BillingItemId *int `json:"billingItemId,omitempty" xmlrpc:"billingItemId,omitempty"` + + // The service cancellation request that a cancellation item belongs to. + CancellationRequest *Billing_Item_Cancellation_Request `json:"cancellationRequest,omitempty" xmlrpc:"cancellationRequest,omitempty"` + + // A cancellation request's internal identifier. + CancellationRequestId *int `json:"cancellationRequestId,omitempty" xmlrpc:"cancellationRequestId,omitempty"` + + // A cancellation request item's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // This flag indicated if a billing item should be canceled immediately or not. Set this flag to true when creating a cancellation request. + ImmediateCancellationFlag *bool `json:"immediateCancellationFlag,omitempty" xmlrpc:"immediateCancellationFlag,omitempty"` + + // The scheduled cancellation date + ScheduledCancellationDate *Time `json:"scheduledCancellationDate,omitempty" xmlrpc:"scheduledCancellationDate,omitempty"` + + // The reclaim status of a service. + ServiceReclaimStatusCode *string `json:"serviceReclaimStatusCode,omitempty" xmlrpc:"serviceReclaimStatusCode,omitempty"` +} + +// SoftLayer_Billing_Item_Cancellation_Request_Status data type represents the status of a service cancellation request. +type Billing_Item_Cancellation_Request_Status struct { + Entity + + // The short description of a cancellation request status + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The internal identifier of a cancellation request status. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // status key name + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // The status name + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Billing_Item_Ctc_Account data type contains general information relating to a single SoftLayer billing item for a CTC client account creation +type Billing_Item_Ctc_Account struct { + Billing_Item +} + +// The SoftLayer_Billing_Item_Big_Data_Cluster data type contains general information relating to a single SoftLayer billing item for a big data cluster. +type Billing_Item_Gateway_Appliance_Cluster struct { + Billing_Item + + // The resource for a resource group billing item. + Resource *Resource_Group `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// The SoftLayer_Billing_Item_Hardware data type contains general information relating to a single SoftLayer billing item for hardware. +type Billing_Item_Hardware struct { + Billing_Item + + // The raw bandwidth usage data for the current billing cycle. One object will be returned for each network this server is attached to. + BillingCycleBandwidthUsage []Network_Bandwidth_Usage `json:"billingCycleBandwidthUsage,omitempty" xmlrpc:"billingCycleBandwidthUsage,omitempty"` + + // A count of the raw bandwidth usage data for the current billing cycle. One object will be returned for each network this server is attached to. + BillingCycleBandwidthUsageCount *uint `json:"billingCycleBandwidthUsageCount,omitempty" xmlrpc:"billingCycleBandwidthUsageCount,omitempty"` + + // The raw private bandwidth usage data for the current billing cycle. + BillingCyclePrivateBandwidthUsage []Network_Bandwidth_Usage `json:"billingCyclePrivateBandwidthUsage,omitempty" xmlrpc:"billingCyclePrivateBandwidthUsage,omitempty"` + + // A count of the raw private bandwidth usage data for the current billing cycle. + BillingCyclePrivateBandwidthUsageCount *uint `json:"billingCyclePrivateBandwidthUsageCount,omitempty" xmlrpc:"billingCyclePrivateBandwidthUsageCount,omitempty"` + + // The total private inbound bandwidth for this hardware for the current billing cycle. + BillingCyclePrivateUsageIn *Float64 `json:"billingCyclePrivateUsageIn,omitempty" xmlrpc:"billingCyclePrivateUsageIn,omitempty"` + + // The total private outbound bandwidth for this hardware for the current billing cycle. + BillingCyclePrivateUsageOut *Float64 `json:"billingCyclePrivateUsageOut,omitempty" xmlrpc:"billingCyclePrivateUsageOut,omitempty"` + + // The total private bandwidth for this hardware for the current billing cycle. + BillingCyclePrivateUsageTotal *uint `json:"billingCyclePrivateUsageTotal,omitempty" xmlrpc:"billingCyclePrivateUsageTotal,omitempty"` + + // The raw public bandwidth usage data for the current billing cycle. + BillingCyclePublicBandwidthUsage []Network_Bandwidth_Usage `json:"billingCyclePublicBandwidthUsage,omitempty" xmlrpc:"billingCyclePublicBandwidthUsage,omitempty"` + + // A count of the raw public bandwidth usage data for the current billing cycle. + BillingCyclePublicBandwidthUsageCount *uint `json:"billingCyclePublicBandwidthUsageCount,omitempty" xmlrpc:"billingCyclePublicBandwidthUsageCount,omitempty"` + + // The total public inbound bandwidth for this hardware for the current billing cycle. + BillingCyclePublicUsageIn *Float64 `json:"billingCyclePublicUsageIn,omitempty" xmlrpc:"billingCyclePublicUsageIn,omitempty"` + + // The total public outbound bandwidth for this hardware for the current billing cycle. + BillingCyclePublicUsageOut *Float64 `json:"billingCyclePublicUsageOut,omitempty" xmlrpc:"billingCyclePublicUsageOut,omitempty"` + + // The total public bandwidth for this hardware for the current billing cycle. + BillingCyclePublicUsageTotal *uint `json:"billingCyclePublicUsageTotal,omitempty" xmlrpc:"billingCyclePublicUsageTotal,omitempty"` + + // A lockbox account associated with a server. + LockboxNetworkStorage *Billing_Item_Network_Storage `json:"lockboxNetworkStorage,omitempty" xmlrpc:"lockboxNetworkStorage,omitempty"` + + // A count of + MonitoringBillingItemCount *uint `json:"monitoringBillingItemCount,omitempty" xmlrpc:"monitoringBillingItemCount,omitempty"` + + // no documentation yet + MonitoringBillingItems []Billing_Item `json:"monitoringBillingItems,omitempty" xmlrpc:"monitoringBillingItems,omitempty"` + + // The resource for a server billing item. + Resource *Hardware_Server `json:"resource,omitempty" xmlrpc:"resource,omitempty"` + + // The resource (unique identifier) for a server billing item. + ResourceTableId *int `json:"resourceTableId,omitempty" xmlrpc:"resourceTableId,omitempty"` +} + +// The SoftLayer_Billing_Item_Hardware data type contains general information relating to a single SoftLayer billing item for hardware. +type Billing_Item_Hardware_Colocation struct { + Billing_Item_Hardware +} + +// The SoftLayer_Billing_Item_Hardware data type contains general information relating to a single SoftLayer billing item for hardware components. +type Billing_Item_Hardware_Component struct { + Billing_Item + + // The hardware component that this billing item points to. + Resource []Hardware_Component `json:"resource,omitempty" xmlrpc:"resource,omitempty"` + + // A count of the hardware component that this billing item points to. + ResourceCount *uint `json:"resourceCount,omitempty" xmlrpc:"resourceCount,omitempty"` + + // The resource (unique identifier) for a server billing item. + ResourceTableId *int `json:"resourceTableId,omitempty" xmlrpc:"resourceTableId,omitempty"` +} + +// The SoftLayer_Billing_Item_Hardware_Security_Module data type contains general information relating to a single SoftLayer billing item for a hardware security module. +type Billing_Item_Hardware_Security_Module struct { + Billing_Item_Hardware +} + +// The SoftLayer_Billing_Item_Hardware_Server data type contains billing information about a bare metal server and its relationship to a particular customer account. +type Billing_Item_Hardware_Server struct { + Billing_Item_Hardware +} + +// The SoftLayer_Billing_Item_Network_Application_Delivery_Controller data type describes the billing item related to a NetScaler VPX +type Billing_Item_Network_Application_Delivery_Controller struct { + Billing_Item + + // The bandwidth allotment detail for a billing item. + BandwidthAllotmentDetail *Network_Bandwidth_Version1_Allotment_Detail `json:"bandwidthAllotmentDetail,omitempty" xmlrpc:"bandwidthAllotmentDetail,omitempty"` + + // The network application controller that a billing item is associated with. + Resource *Network_Application_Delivery_Controller `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// A SoftLayer_Billing_Item_Network_Application_Delivery_Controller_LoadBalancer represents the [[SoftLayer_Billing_Item|billing item]] related to a single [[SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress|load balancer]] instance. +type Billing_Item_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress struct { + Billing_Item + + // The load balancer that a load balancer billing item is associated with. + Resource *Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// The SoftLayer_Billing_Item_Hardware data type contains general information relating to a single SoftLayer billing item for hardware. +type Billing_Item_Network_Bandwidth struct { + Billing_Item +} + +// The SoftLayer_Billing_Item_Network_Firewall data type contains general information relating to a single SoftLayer billing item whose item category code is 'firewall' +type Billing_Item_Network_Firewall struct { + Billing_Item + + // The VLAN firewall that a VLAN firewall billing item is associated with. + Resource *Network_Component_Firewall `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// The SoftLayer_Billing_Item_Network_Firewall_Module_Context data type describes the billing items related to VLAN Firewalls. +type Billing_Item_Network_Firewall_Module_Context struct { + Billing_Item + + // The total public outbound bandwidth for this firewall for the current billing cycle. + BillingCyclePublicUsageOut *Float64 `json:"billingCyclePublicUsageOut,omitempty" xmlrpc:"billingCyclePublicUsageOut,omitempty"` +} + +// A SoftLayer_Billing_Item_Network_Interconnect represents the [[SoftLayer_Billing_Item|billing item]] related to a network interconnect instance. +type Billing_Item_Network_Interconnect struct { + Billing_Item + + // The interconnect tenant that the billing item is associated with. + Resource *Network_Interconnect_Tenant `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// A SoftLayer_Billing_Item_Network_Interconnect_Routing represents the [[SoftLayer_Billing_Item|billing item]] related to a network interconnect global routing. +type Billing_Item_Network_Interconnect_Routing struct { + Billing_Item + + // The interconnect tenant that the billing item is associated with. + Resource *Network_Interconnect_Tenant `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// A SoftLayer_Billing_Item_Network_LoadBalancer represents the [[SoftLayer_Billing_Item|billing item]] related to a single [[SoftLayer_Network_LoadBalancer|load balancer]] instance. +type Billing_Item_Network_LoadBalancer struct { + Billing_Item +} + +// The global load balancer service has been deprecated and is no longer available. +// +// The SoftLayer_Billing_Item_Network_LoadBalancer_Global data type contains general information relating to a single SoftLayer billing item whose item category code is 'global_load_balancer' +type Billing_Item_Network_LoadBalancer_Global struct { + Billing_Item + + // The resource for a global load balancer billing item. + Resource *Network_LoadBalancer_Global_Account `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// A SoftLayer_Billing_Item_Network_LoadBalancer_VirtualIpAddress represents the [[SoftLayer_Billing_Item|billing item]] related to a single [[SoftLayer_Network_LoadBalancer_VirtualIpAddress|load balancer]] instance. +type Billing_Item_Network_LoadBalancer_VirtualIpAddress struct { + Billing_Item + + // The load balancer's virtual IP address that the billing item is associated with. + Resource *Network_LoadBalancer_VirtualIpAddress `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// The SoftLayer_Billing_Item_Network_Message_Delivery data describes the related billing item. +type Billing_Item_Network_Message_Delivery struct { + Billing_Item + + // The object this billing item is associated with. + Resource *Network_Message_Delivery `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// The SoftLayer_Billing_Item_Network_PerformanceStorage_Iscsi data type contains general information relating to a single SoftLayer billing item whose item category code is 'performance_storage_iscsi' +type Billing_Item_Network_PerformanceStorage_Iscsi struct { + Billing_Item_Network_Storage +} + +// The SoftLayer_Billing_Item_Network_PerformanceStorage_Nfs data type contains general information relating to a single SoftLayer billing item whose item category code is 'performance_storage_nfs' +type Billing_Item_Network_PerformanceStorage_Nfs struct { + Billing_Item_Network_Storage +} + +// The SoftLayer_Billing_Item_Network_Storage data type describes the billing items related to StorageLayer accounts. +type Billing_Item_Network_Storage struct { + Billing_Item + + // The StorageLayer account that a network storage billing item is associated with. + Resource *Network_Storage `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// The SoftLayer_Billing_Item_Network_Storage_Hub models all billing items related to hub-based StorageLayer offerings, such as CloudLayer storage. +type Billing_Item_Network_Storage_Hub struct { + Billing_Item_Network_Storage +} + +// The SoftLayer_Billing_Item_Network_Storage_Hub_Bandwidth data type models the billing items created when a CloudLayer storage account generates a bandwidth overage charge. +type Billing_Item_Network_Storage_Hub_Bandwidth struct { + Billing_Item_Network_Storage +} + +// The SoftLayer_Billing_Item_Network_Subnet data type contains general information relating to a single SoftLayer billing item whose item category code is one of the following: +// * pri_ip_address +// * static_sec_ip_addresses (static secondary) +// * sov_sec_ip_addresses (secondary on vlan, also known as "portable ips") +// * sov_sec_ip_addresses_pub (sov_sec_ip_addresses public only) +// * sov_sec_ip_addresses_priv (sov_sec_ip_addresses private only) +// * sec_ip_addresses (old style, secondary ip addresses) +// +// +// These item categories denote that the billing item has subnet information attached. +type Billing_Item_Network_Subnet struct { + Billing_Item + + // The resource for a subnet-related billing item. + Resource *Network_Subnet `json:"resource,omitempty" xmlrpc:"resource,omitempty"` + + // The resource name for a subnet billing item. + ResourceName *string `json:"resourceName,omitempty" xmlrpc:"resourceName,omitempty"` + + // The resource (unique identifier) for a server billing item. + ResourceTableId *int `json:"resourceTableId,omitempty" xmlrpc:"resourceTableId,omitempty"` +} + +// The SoftLayer_Billing_Item_Network_Subnet_IpAddress_Global data type contains general information relating to a single SoftLayer billing item whose item category code is one of the following: +// * global_ipv4 +// * global_ipv6 +// +// +// These item categories denote that the billing item has subnet information attached. +type Billing_Item_Network_Subnet_IpAddress_Global struct { + Billing_Item_Network_Subnet +} + +// The SoftLayer_Billing_Item_Network_Storage data type describes the billing items related to StorageLayer accounts. +type Billing_Item_Network_Tunnel struct { + Billing_Item + + // The IPsec VPN that a network tunnel billing item is associated with. + Resource *Network_Tunnel_Module_Context `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// The SoftLayer_Billing_Item_Network_Vlan data type contains general information relating to a single SoftLayer billing item whose item category code is one of the following: +// * network_vlan +// +// +// These item categories denote that the billing item has network vlan information attached. +type Billing_Item_Network_Vlan struct { + Billing_Item + + // The network vlan resource for this billing item. + Resource *Network_Vlan `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// no documentation yet +type Billing_Item_NewCustomerSetup struct { + Billing_Item +} + +// The SoftLayer_Billing_Item_Private_Cloud data type contains general information relating to a single billing item for a private cloud. +type Billing_Item_Private_Cloud struct { + Billing_Item +} + +// The SoftLayer_Billing_Item_Hardware data type contains general information relating to a single SoftLayer billing item for hardware components. +type Billing_Item_Software_Component struct { + Billing_Item + + // The software component that this billing item points to. + Resource *Software_Component `json:"resource,omitempty" xmlrpc:"resource,omitempty"` + + // The resource (unique identifier) for a software component billing item. + ResourceTableId *int `json:"resourceTableId,omitempty" xmlrpc:"resourceTableId,omitempty"` +} + +// The SoftLayer_Billing_Item_Software_Component_Analytics_Urchin data type contains general information relating to a single SoftLayer billing item for Urchin software components. +type Billing_Item_Software_Component_Analytics_Urchin struct { + Billing_Item +} + +// The SoftLayer_Billing_Item_Software_Component_ControlPanel data type contains general information relating to a single SoftLayer billing item for control panel software components. +type Billing_Item_Software_Component_ControlPanel struct { + Billing_Item +} + +// The SoftLayer_Billing_Item_Software_Component_ControlPanel data type contains general information relating to a single SoftLayer billing item for control panel software components. +type Billing_Item_Software_Component_ControlPanel_Parallels_Plesk_Billing struct { + Billing_Item +} + +// The SoftLayer_Billing_Item_Software_Component_OperatingSystem_Addon data type contains general information relating to a single SoftLayer billing item for operating system add-on software components. +type Billing_Item_Software_Component_OperatingSystem_Addon struct { + Billing_Item +} + +// The SoftLayer_Billing_Item_Software_Component_OperatingSystem_Addon_Citrix_Essentials data type contains general information relating to a single SoftLayer billing item for Citrix Essentials software components. +type Billing_Item_Software_Component_OperatingSystem_Addon_Citrix_Essentials struct { + Billing_Item_Software_Component_OperatingSystem_Addon + + // The Citrix Essentials software component that a billing item is associated with. + Resource *Software_Component `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// The SoftLayer_Billing_Item_Software_Component_Virtual_OperatingSystem data type contains general information relating to a single SoftLayer billing item for operating system software components on virtual machines. +type Billing_Item_Software_Component_Virtual_OperatingSystem struct { + Billing_Item +} + +// The SoftLayer_Billing_Item_Software_Component_Virtual_OperatingSystem_Microsoft data type contains general information relating to a single SoftLayer billing item for a Microsoft operating system software components on virtual machines. +type Billing_Item_Software_Component_Virtual_OperatingSystem_Microsoft struct { + Billing_Item_Software_Component_Virtual_OperatingSystem + + // The software virtual license to which this billing item points. + Resource *Software_VirtualLicense `json:"resource,omitempty" xmlrpc:"resource,omitempty"` + + // The resource (unique identifier) for a software virtual license billing item. + ResourceTableId *int `json:"resourceTableId,omitempty" xmlrpc:"resourceTableId,omitempty"` +} + +// The SoftLayer_Billing_Item_Software_Component_Virtual_OperatingSystem_Microsoft data type contains general information relating to a single SoftLayer billing item for a Microsoft operating system software components on virtual machines. +type Billing_Item_Software_Component_Virtual_OperatingSystem_Redhat struct { + Billing_Item_Software_Component_Virtual_OperatingSystem + + // The software component to which this billing item points. + Resource *Software_Component `json:"resource,omitempty" xmlrpc:"resource,omitempty"` + + // The resource (unique identifier) for a software component billing item. + ResourceTableId *int `json:"resourceTableId,omitempty" xmlrpc:"resourceTableId,omitempty"` +} + +// The SoftLayer_Billing_Item_Software_License data type contains general information relating to a single SoftLayer billing item for a software license. +type Billing_Item_Software_License struct { + Billing_Item + + // The resource for a software license billing item. + Resource *Software_AccountLicense `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// The SoftLayer_Billing_Item_Support data type contains general information relating to a premium support offering +type Billing_Item_Support struct { + Billing_Item +} + +// The SoftLayer_Billing_Item_Network_Application_Delivery_Controller data type describes the billing item related to an external authentication binding +type Billing_Item_User_Customer_External_Binding struct { + Billing_Item + + // The external authentication binding that a billing item is associated with. + Resource *User_Customer_External_Binding `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// no documentation yet +type Billing_Item_Virtual_DedicatedHost struct { + Billing_Item + + // The resource for a virtual dedicated host billing item. + Resource *Virtual_DedicatedHost `json:"resource,omitempty" xmlrpc:"resource,omitempty"` + + // The resource (unique identifier) for a server billing item. + ResourceTableId *int `json:"resourceTableId,omitempty" xmlrpc:"resourceTableId,omitempty"` +} + +// A SoftLayer_Billing_Item_Virtual_Dedicated_Rack data type models the billing information for a single bandwidth pooling. Bandwidth pooling members share their public bandwidth allocations, and incur overage charges instead of the overages on individual rack members. Virtual rack billing items are the parent items for all of it's rack membership billing items. +type Billing_Item_Virtual_Dedicated_Rack struct { + Billing_Item + + // The raw bandwidth usage data for the current billing cycle. One object is returned for each network a virtual rack is attached to. + BillingCycleBandwidthUsage []Network_Bandwidth_Usage `json:"billingCycleBandwidthUsage,omitempty" xmlrpc:"billingCycleBandwidthUsage,omitempty"` + + // A count of the raw bandwidth usage data for the current billing cycle. One object is returned for each network a virtual rack is attached to. + BillingCycleBandwidthUsageCount *uint `json:"billingCycleBandwidthUsageCount,omitempty" xmlrpc:"billingCycleBandwidthUsageCount,omitempty"` + + // The raw private bandwidth usage data for the current billing cycle. + BillingCyclePrivateBandwidthUsage []Network_Bandwidth_Usage `json:"billingCyclePrivateBandwidthUsage,omitempty" xmlrpc:"billingCyclePrivateBandwidthUsage,omitempty"` + + // A count of the raw private bandwidth usage data for the current billing cycle. + BillingCyclePrivateBandwidthUsageCount *uint `json:"billingCyclePrivateBandwidthUsageCount,omitempty" xmlrpc:"billingCyclePrivateBandwidthUsageCount,omitempty"` + + // The total private network inbound bandwidth for this virtual rack for the current billing cycle. + BillingCyclePrivateUsageIn *Float64 `json:"billingCyclePrivateUsageIn,omitempty" xmlrpc:"billingCyclePrivateUsageIn,omitempty"` + + // The total private network outbound bandwidth for this virtual rack for the current billing cycle. + BillingCyclePrivateUsageOut *Float64 `json:"billingCyclePrivateUsageOut,omitempty" xmlrpc:"billingCyclePrivateUsageOut,omitempty"` + + // The total private network bandwidth for this virtual rack for the current billing cycle. + BillingCyclePrivateUsageTotal *uint `json:"billingCyclePrivateUsageTotal,omitempty" xmlrpc:"billingCyclePrivateUsageTotal,omitempty"` + + // The raw public bandwidth usage data for the current billing cycle. + BillingCyclePublicBandwidthUsage []Network_Bandwidth_Usage `json:"billingCyclePublicBandwidthUsage,omitempty" xmlrpc:"billingCyclePublicBandwidthUsage,omitempty"` + + // A count of the raw public bandwidth usage data for the current billing cycle. + BillingCyclePublicBandwidthUsageCount *uint `json:"billingCyclePublicBandwidthUsageCount,omitempty" xmlrpc:"billingCyclePublicBandwidthUsageCount,omitempty"` + + // The total public inbound bandwidth for this virtual rack for the current billing cycle. + BillingCyclePublicUsageIn *Float64 `json:"billingCyclePublicUsageIn,omitempty" xmlrpc:"billingCyclePublicUsageIn,omitempty"` + + // The total public outbound bandwidth for this virtual rack for the current billing cycle. + BillingCyclePublicUsageOut *Float64 `json:"billingCyclePublicUsageOut,omitempty" xmlrpc:"billingCyclePublicUsageOut,omitempty"` + + // The total public bandwidth for this virtual rack for the current billing cycle. + BillingCyclePublicUsageTotal *uint `json:"billingCyclePublicUsageTotal,omitempty" xmlrpc:"billingCyclePublicUsageTotal,omitempty"` + + // The virtual rack that a virtual rack billing item is associated with. + Resource *Network_Bandwidth_Version1_Allotment `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// The SoftLayer_Billing_Item_Virtual_Disk_Image data type contains general information relating to a single SoftLayer billing item for disk images. +type Billing_Item_Virtual_Disk_Image struct { + Billing_Item + + // The disk image to which the billing item points. + Resource *Virtual_Disk_Image `json:"resource,omitempty" xmlrpc:"resource,omitempty"` + + // The resource (unique identifier) for a disk image billing item. + ResourceTableId *int `json:"resourceTableId,omitempty" xmlrpc:"resourceTableId,omitempty"` +} + +// The SoftLayer_Billing_Item_Virtual_Guest data type contains general information relating to a single SoftLayer billing item for guests. +type Billing_Item_Virtual_Guest struct { + Billing_Item + + // The raw bandwidth usage data for the current billing cycle. One object will be returned for each network this server is attached to. + BillingCycleBandwidthUsage []Network_Bandwidth_Usage `json:"billingCycleBandwidthUsage,omitempty" xmlrpc:"billingCycleBandwidthUsage,omitempty"` + + // A count of the raw bandwidth usage data for the current billing cycle. One object will be returned for each network this server is attached to. + BillingCycleBandwidthUsageCount *uint `json:"billingCycleBandwidthUsageCount,omitempty" xmlrpc:"billingCycleBandwidthUsageCount,omitempty"` + + // The raw private bandwidth usage data for the current billing cycle. + BillingCyclePrivateBandwidthUsage []Network_Bandwidth_Usage `json:"billingCyclePrivateBandwidthUsage,omitempty" xmlrpc:"billingCyclePrivateBandwidthUsage,omitempty"` + + // A count of the raw private bandwidth usage data for the current billing cycle. + BillingCyclePrivateBandwidthUsageCount *uint `json:"billingCyclePrivateBandwidthUsageCount,omitempty" xmlrpc:"billingCyclePrivateBandwidthUsageCount,omitempty"` + + // The total private inbound bandwidth for this virtual server for the current billing cycle. + BillingCyclePrivateUsageIn *Float64 `json:"billingCyclePrivateUsageIn,omitempty" xmlrpc:"billingCyclePrivateUsageIn,omitempty"` + + // The total private outbound bandwidth for this virtual server for the current billing cycle. + BillingCyclePrivateUsageOut *Float64 `json:"billingCyclePrivateUsageOut,omitempty" xmlrpc:"billingCyclePrivateUsageOut,omitempty"` + + // The total private bandwidth for this virtual server for the current billing cycle. + BillingCyclePrivateUsageTotal *uint `json:"billingCyclePrivateUsageTotal,omitempty" xmlrpc:"billingCyclePrivateUsageTotal,omitempty"` + + // The raw public bandwidth usage data for the current billing cycle. + BillingCyclePublicBandwidthUsage []Network_Bandwidth_Usage `json:"billingCyclePublicBandwidthUsage,omitempty" xmlrpc:"billingCyclePublicBandwidthUsage,omitempty"` + + // A count of the raw public bandwidth usage data for the current billing cycle. + BillingCyclePublicBandwidthUsageCount *uint `json:"billingCyclePublicBandwidthUsageCount,omitempty" xmlrpc:"billingCyclePublicBandwidthUsageCount,omitempty"` + + // The total public inbound bandwidth for this virtual server for the current billing cycle. + BillingCyclePublicUsageIn *Float64 `json:"billingCyclePublicUsageIn,omitempty" xmlrpc:"billingCyclePublicUsageIn,omitempty"` + + // The total public outbound bandwidth for this virtual server for the current billing cycle. + BillingCyclePublicUsageOut *Float64 `json:"billingCyclePublicUsageOut,omitempty" xmlrpc:"billingCyclePublicUsageOut,omitempty"` + + // The total public bandwidth for this virtual server for the current billing cycle. + BillingCyclePublicUsageTotal *uint `json:"billingCyclePublicUsageTotal,omitempty" xmlrpc:"billingCyclePublicUsageTotal,omitempty"` + + // A count of + MonitoringBillingItemCount *uint `json:"monitoringBillingItemCount,omitempty" xmlrpc:"monitoringBillingItemCount,omitempty"` + + // no documentation yet + MonitoringBillingItems []Billing_Item `json:"monitoringBillingItems,omitempty" xmlrpc:"monitoringBillingItems,omitempty"` + + // The resource for a cloud server billing item. + Resource *Virtual_Guest `json:"resource,omitempty" xmlrpc:"resource,omitempty"` + + // The resource (unique identifier) for a server billing item. + ResourceTableId *int `json:"resourceTableId,omitempty" xmlrpc:"resourceTableId,omitempty"` +} + +// The SoftLayer_Billing_Item_Virtual_Host_Usage data type contains general information relating to a single SoftLayer billing item for virtual machine peak usage +type Billing_Item_Virtual_Host_Usage struct { + Billing_Item + + // The resource for a peak virtual machine usage billing item. + Resource *Hardware `json:"resource,omitempty" xmlrpc:"resource,omitempty"` + + // The resource (unique identifier) for a server billing item. + ResourceTableId *int `json:"resourceTableId,omitempty" xmlrpc:"resourceTableId,omitempty"` +} + +// no documentation yet +type Billing_Item_Virtual_ReservedCapacity struct { + Billing_Item + + // The resource for a virtual dedicated host billing item. + Resource *Virtual_ReservedCapacityGroup_Instance `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// The SoftLayer_Billing_Item_Workspace data type contains general information relating to a single SoftLayer billing item whose item category code is 'workspace' +type Billing_Item_Workspace struct { + Billing_Item +} + +// The SoftLayer_Billing_Order data type contains general information relating to an individual order applied to a SoftLayer customer account or to a new customer. Personal information in this type such as names, addresses, and phone numbers are taken from the account's contact information at the time the order is generated for existing SoftLayer customer. +type Billing_Order struct { + Entity + + // The [[SoftLayer_Account|account]] to which an order belongs. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The account ID to which an order belongs. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // no documentation yet + Brand *Brand `json:"brand,omitempty" xmlrpc:"brand,omitempty"` + + // A cart is similar to a quote, except that it can be continually modified by the customer and does not have locked-in prices. Not all orders will have a cart associated with them. See [[SoftLayer_Billing_Order_Cart]] for more information. + Cart *Billing_Order_Cart `json:"cart,omitempty" xmlrpc:"cart,omitempty"` + + // A count of the [[SoftLayer_Billing_Order_Item (type)|order items]] that are core restricted + CoreRestrictedItemCount *uint `json:"coreRestrictedItemCount,omitempty" xmlrpc:"coreRestrictedItemCount,omitempty"` + + // The [[SoftLayer_Billing_Order_Item (type)|order items]] that are core restricted + CoreRestrictedItems []Billing_Order_Item `json:"coreRestrictedItems,omitempty" xmlrpc:"coreRestrictedItems,omitempty"` + + // The point in time at which a billing item was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A count of all credit card transactions associated with this order. If this order was not placed with a credit card, this will be empty. + CreditCardTransactionCount *uint `json:"creditCardTransactionCount,omitempty" xmlrpc:"creditCardTransactionCount,omitempty"` + + // All credit card transactions associated with this order. If this order was not placed with a credit card, this will be empty. + CreditCardTransactions []Billing_Payment_Card_Transaction `json:"creditCardTransactions,omitempty" xmlrpc:"creditCardTransactions,omitempty"` + + // no documentation yet + ExchangeRate *Billing_Currency_ExchangeRate `json:"exchangeRate,omitempty" xmlrpc:"exchangeRate,omitempty"` + + // * + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The SoftLayer_User_Customer id of the portal or API user who impersonated the user which submitted an order. + ImpersonatingUserRecordId *int `json:"impersonatingUserRecordId,omitempty" xmlrpc:"impersonatingUserRecordId,omitempty"` + + // no documentation yet + InitialInvoice *Billing_Invoice `json:"initialInvoice,omitempty" xmlrpc:"initialInvoice,omitempty"` + + // A count of the SoftLayer_Billing_Order_items included in an order. + ItemCount *uint `json:"itemCount,omitempty" xmlrpc:"itemCount,omitempty"` + + // The SoftLayer_Billing_Order_items included in an order. + Items []Billing_Order_Item `json:"items,omitempty" xmlrpc:"items,omitempty"` + + // The last time an order was updated. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // no documentation yet + OrderApprovalDate *Time `json:"orderApprovalDate,omitempty" xmlrpc:"orderApprovalDate,omitempty"` + + // An order's non-server items total monthly fee. + OrderNonServerMonthlyAmount *Float64 `json:"orderNonServerMonthlyAmount,omitempty" xmlrpc:"orderNonServerMonthlyAmount,omitempty"` + + // The SoftLayer_Billing_Order_Quote id of the quote's user who finalized an order. + OrderQuoteId *int `json:"orderQuoteId,omitempty" xmlrpc:"orderQuoteId,omitempty"` + + // An order's server items total monthly fee. + OrderServerMonthlyAmount *Float64 `json:"orderServerMonthlyAmount,omitempty" xmlrpc:"orderServerMonthlyAmount,omitempty"` + + // A count of an order's top level items. This normally includes the server line item and any non-server additional services such as NAS or ISCSI. + OrderTopLevelItemCount *uint `json:"orderTopLevelItemCount,omitempty" xmlrpc:"orderTopLevelItemCount,omitempty"` + + // An order's top level items. This normally includes the server line item and any non-server additional services such as NAS or ISCSI. + OrderTopLevelItems []Billing_Order_Item `json:"orderTopLevelItems,omitempty" xmlrpc:"orderTopLevelItems,omitempty"` + + // This amount represents the order's initial charge including set up fee and taxes. + OrderTotalAmount *Float64 `json:"orderTotalAmount,omitempty" xmlrpc:"orderTotalAmount,omitempty"` + + // An order's total one time amount summing all the set up fees, the labor fees and the one time fees. Taxes will be applied for non-tax-exempt. This amount represents the initial fees that will be charged. + OrderTotalOneTime *Float64 `json:"orderTotalOneTime,omitempty" xmlrpc:"orderTotalOneTime,omitempty"` + + // An order's total one time amount. This amount represents the initial fees before tax. + OrderTotalOneTimeAmount *Float64 `json:"orderTotalOneTimeAmount,omitempty" xmlrpc:"orderTotalOneTimeAmount,omitempty"` + + // An order's total one time tax amount. This amount represents the tax that will be applied to the total charge, if the SoftLayer_Account tied to a SoftLayer_Billing_Order is a taxable account. + OrderTotalOneTimeTaxAmount *Float64 `json:"orderTotalOneTimeTaxAmount,omitempty" xmlrpc:"orderTotalOneTimeTaxAmount,omitempty"` + + // An order's total recurring amount. Taxes will be applied for non-tax-exempt. This amount represents the fees that will be charged on a recurring (usually monthly) basis. + OrderTotalRecurring *Float64 `json:"orderTotalRecurring,omitempty" xmlrpc:"orderTotalRecurring,omitempty"` + + // An order's total recurring amount. This amount represents the fees that will be charged on a recurring (usually monthly) basis. + OrderTotalRecurringAmount *Float64 `json:"orderTotalRecurringAmount,omitempty" xmlrpc:"orderTotalRecurringAmount,omitempty"` + + // The total tax amount of the recurring fees, if the SoftLayer_Account tied to a SoftLayer_Billing_Order is a taxable account. + OrderTotalRecurringTaxAmount *Float64 `json:"orderTotalRecurringTaxAmount,omitempty" xmlrpc:"orderTotalRecurringTaxAmount,omitempty"` + + // An order's total setup fee. + OrderTotalSetupAmount *Float64 `json:"orderTotalSetupAmount,omitempty" xmlrpc:"orderTotalSetupAmount,omitempty"` + + // The type of an order. This lets you know where this order was generated from. + OrderType *Billing_Order_Type `json:"orderType,omitempty" xmlrpc:"orderType,omitempty"` + + // The SoftLayer_Billing_Order_Type id of the order. + OrderTypeId *int `json:"orderTypeId,omitempty" xmlrpc:"orderTypeId,omitempty"` + + // A count of all PayPal transactions associated with this order. If this order was not placed with PayPal, this will be empty. + PaypalTransactionCount *uint `json:"paypalTransactionCount,omitempty" xmlrpc:"paypalTransactionCount,omitempty"` + + // All PayPal transactions associated with this order. If this order was not placed with PayPal, this will be empty. + PaypalTransactions []Billing_Payment_PayPal_Transaction `json:"paypalTransactions,omitempty" xmlrpc:"paypalTransactions,omitempty"` + + // no documentation yet + PresaleEvent *Sales_Presale_Event `json:"presaleEvent,omitempty" xmlrpc:"presaleEvent,omitempty"` + + // no documentation yet + PresaleEventId *int `json:"presaleEventId,omitempty" xmlrpc:"presaleEventId,omitempty"` + + // Flag indicating a private cloud solution order (Deprecated) + PrivateCloudOrderFlag *bool `json:"privateCloudOrderFlag,omitempty" xmlrpc:"privateCloudOrderFlag,omitempty"` + + // The quote of an order. This quote holds information about its expiration date, creation date, name and status. This information is tied to an order having the status 'QUOTE' + Quote *Billing_Order_Quote `json:"quote,omitempty" xmlrpc:"quote,omitempty"` + + // The Referral Partner who referred this order. (Only necessary for new customer orders) + ReferralPartner *Account `json:"referralPartner,omitempty" xmlrpc:"referralPartner,omitempty"` + + // Purchaser current status e.i. Approved, Pending_Approval + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // This flag indicates an order is an upgrade. + UpgradeRequestFlag *bool `json:"upgradeRequestFlag,omitempty" xmlrpc:"upgradeRequestFlag,omitempty"` + + // The SoftLayer_User_Customer object tied to an order. + UserRecord *User_Customer `json:"userRecord,omitempty" xmlrpc:"userRecord,omitempty"` + + // The SoftLayer_User_Customer id of the portal or API user who submitted an order. + UserRecordId *int `json:"userRecordId,omitempty" xmlrpc:"userRecordId,omitempty"` +} + +// no documentation yet +type Billing_Order_Cart struct { + Billing_Order_Quote +} + +// Every individual item that a SoftLayer customer is billed for is recorded in the SoftLayer_Billing_Item data type. Billing items range from server chassis to hard drives to control panels, bandwidth quota upgrades and port upgrade charges. Softlayer [[SoftLayer_Billing_Invoice|invoices]] are generated from the cost of a customer's billing items. Billing items are copied from the product catalog as they're ordered by customers to create a reference between an account and the billable items they own. +// +// Billing items exist in a tree relationship. Items are associated with each other by parent/child relationships. Component items such as CPU's, RAM, and software each have a parent billing item for the server chassis they're associated with. Billing Items with a null parent item do not have an associated parent item. +type Billing_Order_Item struct { + Entity + + // The SoftLayer_Billing_Item tied to the order item. + BillingItem *Billing_Item `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // A count of the other items included with an ordered item. + BundledItemCount *uint `json:"bundledItemCount,omitempty" xmlrpc:"bundledItemCount,omitempty"` + + // The other items included with an ordered item. + BundledItems []Billing_Order_Item `json:"bundledItems,omitempty" xmlrpc:"bundledItems,omitempty"` + + // The item category tied to an order item. + Category *Product_Item_Category `json:"category,omitempty" xmlrpc:"category,omitempty"` + + // The category code for the order item. + CategoryCode *string `json:"categoryCode,omitempty" xmlrpc:"categoryCode,omitempty"` + + // The child order items for an order item. All server order items should have children. These children are considered a part of the server. + Children []Billing_Order_Item `json:"children,omitempty" xmlrpc:"children,omitempty"` + + // A count of the child order items for an order item. All server order items should have children. These children are considered a part of the server. + ChildrenCount *uint `json:"childrenCount,omitempty" xmlrpc:"childrenCount,omitempty"` + + // friendly description of purchase item. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The domain name of the server as designated by the purchaser at the time of order placement. + DomainName *string `json:"domainName,omitempty" xmlrpc:"domainName,omitempty"` + + // A hardware's universally unique identifier. + GlobalIdentifier *string `json:"globalIdentifier,omitempty" xmlrpc:"globalIdentifier,omitempty"` + + // The component type tied to an order item. All hardware-specific items should have a generic hardware component. + HardwareGenericComponent *Hardware_Component_Model_Generic `json:"hardwareGenericComponent,omitempty" xmlrpc:"hardwareGenericComponent,omitempty"` + + // The hostname of the server as designated by the purchaser at the time of order placement. + HostName *string `json:"hostName,omitempty" xmlrpc:"hostName,omitempty"` + + // The amount of money charged per hourly for an order item, if applicable, and only if it was ordered this day. hourlyRecurringFee is measured in US Dollars ($USD). + HourlyRecurringFee *Float64 `json:"hourlyRecurringFee,omitempty" xmlrpc:"hourlyRecurringFee,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The SoftLayer_Product_Item tied to an order item. The item is the actual definition of the product being sold. + Item *Product_Item `json:"item,omitempty" xmlrpc:"item,omitempty"` + + // A count of this is an item's category answers. + ItemCategoryAnswerCount *uint `json:"itemCategoryAnswerCount,omitempty" xmlrpc:"itemCategoryAnswerCount,omitempty"` + + // This is an item's category answers. + ItemCategoryAnswers []Billing_Order_Item_Category_Answer `json:"itemCategoryAnswers,omitempty" xmlrpc:"itemCategoryAnswers,omitempty"` + + // The SoftLayer_Product_Item ID for this order item. + ItemId *int `json:"itemId,omitempty" xmlrpc:"itemId,omitempty"` + + // The SoftLayer_Product_Item_Price tied to an order item. The item price object describes the cost of an item. + ItemPrice *Product_Item_Price `json:"itemPrice,omitempty" xmlrpc:"itemPrice,omitempty"` + + // the item price id (SoftLayer_Product_Item_Price->id) of the ordered item. + ItemPriceId *Float64 `json:"itemPriceId,omitempty" xmlrpc:"itemPriceId,omitempty"` + + // An order item's labor fee total after taxes. This does not include any child invoice items. + LaborAfterTaxAmount *Float64 `json:"laborAfterTaxAmount,omitempty" xmlrpc:"laborAfterTaxAmount,omitempty"` + + // The labor fee, if any. This is a one time charge. + LaborFee *Float64 `json:"laborFee,omitempty" xmlrpc:"laborFee,omitempty"` + + // The rate at which labor fees are taxed if you are a taxable customer. + LaborFeeTaxRate *Float64 `json:"laborFeeTaxRate,omitempty" xmlrpc:"laborFeeTaxRate,omitempty"` + + // An order item's labor tax amount. This does not include any child invoice items. + LaborTaxAmount *Float64 `json:"laborTaxAmount,omitempty" xmlrpc:"laborTaxAmount,omitempty"` + + // The location of an ordered item. This is usually the same as the server it is being ordered with. Otherwise it describes the location of the additional service being ordered. + Location *Location `json:"location,omitempty" xmlrpc:"location,omitempty"` + + // no documentation yet + NextOrderChildren []Billing_Order_Item `json:"nextOrderChildren,omitempty" xmlrpc:"nextOrderChildren,omitempty"` + + // A count of + NextOrderChildrenCount *uint `json:"nextOrderChildrenCount,omitempty" xmlrpc:"nextOrderChildrenCount,omitempty"` + + // This is only populated when an upgrade order is placed. The old billing item represents what the billing was before the upgrade happened. + OldBillingItem *Billing_Item `json:"oldBillingItem,omitempty" xmlrpc:"oldBillingItem,omitempty"` + + // An order item's one-time fee total after taxes. This does not include any child invoice items. + OneTimeAfterTaxAmount *Float64 `json:"oneTimeAfterTaxAmount,omitempty" xmlrpc:"oneTimeAfterTaxAmount,omitempty"` + + // The amount of money charged as a one-time charge for an order item, if applicable. oneTimeFee is measured in US Dollars ($USD). + OneTimeFee *Float64 `json:"oneTimeFee,omitempty" xmlrpc:"oneTimeFee,omitempty"` + + // The rate at which one time fees are taxed if you are a taxable customer. + OneTimeFeeTaxRate *Float64 `json:"oneTimeFeeTaxRate,omitempty" xmlrpc:"oneTimeFeeTaxRate,omitempty"` + + // An order item's one-time tax amount. This does not include any child invoice items. + OneTimeTaxAmount *Float64 `json:"oneTimeTaxAmount,omitempty" xmlrpc:"oneTimeTaxAmount,omitempty"` + + // The order to which this item belongs. The order contains all the information related to the items included in an order + Order *Billing_Order `json:"order,omitempty" xmlrpc:"order,omitempty"` + + // no documentation yet + OrderApprovalDate *Time `json:"orderApprovalDate,omitempty" xmlrpc:"orderApprovalDate,omitempty"` + + // The SoftLayer_Product_Package an order item is a part of. + Package *Product_Package `json:"package,omitempty" xmlrpc:"package,omitempty"` + + // The parent order item ID for an item. Items that are associated with a server will have a parent. The parent will be the server item itself. + Parent *Billing_Order_Item `json:"parent,omitempty" xmlrpc:"parent,omitempty"` + + // no documentation yet + ParentId *int `json:"parentId,omitempty" xmlrpc:"parentId,omitempty"` + + // The SoftLayer_Product_Package_Preset related to this order item. + Preset *Product_Package_Preset `json:"preset,omitempty" xmlrpc:"preset,omitempty"` + + // The id for the preset configuration ordered. + PresetId *int `json:"presetId,omitempty" xmlrpc:"presetId,omitempty"` + + // no documentation yet + PromoCode *Product_Promotion `json:"promoCode,omitempty" xmlrpc:"promoCode,omitempty"` + + // no documentation yet + PromoCodeId *int `json:"promoCodeId,omitempty" xmlrpc:"promoCodeId,omitempty"` + + // the quantity of the ordered item in a quote. + Quantity *int `json:"quantity,omitempty" xmlrpc:"quantity,omitempty"` + + // An order item's recurring fee total after taxes. This does not include any child invoice items. + RecurringAfterTaxAmount *Float64 `json:"recurringAfterTaxAmount,omitempty" xmlrpc:"recurringAfterTaxAmount,omitempty"` + + // The amount of money charged per month for an order item, if applicable. recurringFee is measured in US Dollars ($USD). + RecurringFee *Float64 `json:"recurringFee,omitempty" xmlrpc:"recurringFee,omitempty"` + + // An order item's recurring tax amount. This does not include any child invoice items. + RecurringTaxAmount *Float64 `json:"recurringTaxAmount,omitempty" xmlrpc:"recurringTaxAmount,omitempty"` + + // A count of power supplies contained within this SoftLayer_Billing_Order + RedundantPowerSupplyCount *uint `json:"redundantPowerSupplyCount,omitempty" xmlrpc:"redundantPowerSupplyCount,omitempty"` + + // An order item's setup fee total after taxes. This does not include any child invoice items. + SetupAfterTaxAmount *Float64 `json:"setupAfterTaxAmount,omitempty" xmlrpc:"setupAfterTaxAmount,omitempty"` + + // The setup fee, if any. This is a one time charge. + SetupFee *Float64 `json:"setupFee,omitempty" xmlrpc:"setupFee,omitempty"` + + // The month set up fee deferral. + SetupFeeDeferralMonths *int `json:"setupFeeDeferralMonths,omitempty" xmlrpc:"setupFeeDeferralMonths,omitempty"` + + // The rate at which setup fees are taxed if you are a taxable customer. + SetupFeeTaxRate *Float64 `json:"setupFeeTaxRate,omitempty" xmlrpc:"setupFeeTaxRate,omitempty"` + + // An order item's setup tax amount. This does not include any child invoice items. + SetupTaxAmount *Float64 `json:"setupTaxAmount,omitempty" xmlrpc:"setupTaxAmount,omitempty"` + + // For ordered items that are software items, a full description of that software can be found with this property. + SoftwareDescription *Software_Description `json:"softwareDescription,omitempty" xmlrpc:"softwareDescription,omitempty"` + + // A count of the drive storage groups that are attached to this billing order item. + StorageGroupCount *uint `json:"storageGroupCount,omitempty" xmlrpc:"storageGroupCount,omitempty"` + + // The drive storage groups that are attached to this billing order item. + StorageGroups []Configuration_Storage_Group_Order `json:"storageGroups,omitempty" xmlrpc:"storageGroups,omitempty"` + + // The recurring fee of an ordered item. This amount represents the fees that will be charged on a recurring (usually monthly) basis. + TotalRecurringAmount *Float64 `json:"totalRecurringAmount,omitempty" xmlrpc:"totalRecurringAmount,omitempty"` + + // The next SoftLayer_Product_Item in the upgrade path for this order item. + UpgradeItem *Product_Item `json:"upgradeItem,omitempty" xmlrpc:"upgradeItem,omitempty"` +} + +// The SoftLayer_Billing_Order_Item_Category_Answer data type represents a single answer to an item category question. +type Billing_Order_Item_Category_Answer struct { + Entity + + // The answer to the question. + Answer *string `json:"answer,omitempty" xmlrpc:"answer,omitempty"` + + // The date that the answer was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The billing order item that the answer is for. + OrderItem *Billing_Order_Item `json:"orderItem,omitempty" xmlrpc:"orderItem,omitempty"` + + // The question that is being answered. + Question *Product_Item_Category_Question `json:"question,omitempty" xmlrpc:"question,omitempty"` + + // The identifier for the question that the answer belongs to. + QuestionId *int `json:"questionId,omitempty" xmlrpc:"questionId,omitempty"` +} + +// The SoftLayer_Billing_Oder_Quote data type contains general information relating to an individual order applied to a SoftLayer customer account or to a new customer. Personal information in this type such as names, addresses, and phone numbers are taken from the account's contact information at the time the quote is generated for existing SoftLayer customer. +type Billing_Order_Quote struct { + Entity + + // A quote's corresponding account. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // Identification Number of the account record tied to the quote + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // Identification Number of the order record tied to the quote. + CompletedPurchaseDataId *int `json:"completedPurchaseDataId,omitempty" xmlrpc:"completedPurchaseDataId,omitempty"` + + // Holds the date the quote record was created + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // Indicates whether the owner of the quote chosen to no longer be contacted. + DoNotContactFlag *bool `json:"doNotContactFlag,omitempty" xmlrpc:"doNotContactFlag,omitempty"` + + // This property holds the date of expiration of a quote, after that date the quote would be deem expired + ExpirationDate *Time `json:"expirationDate,omitempty" xmlrpc:"expirationDate,omitempty"` + + // The id use to identify a quote. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Holds the date when the quote record was modified with reference to its creation date + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The name given to quote by the initiator + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // This order contains the records for which products were selected for this quote. + Order *Billing_Order `json:"order,omitempty" xmlrpc:"order,omitempty"` + + // These are all the orders that were created from this quote. + OrdersFromQuote []Billing_Order `json:"ordersFromQuote,omitempty" xmlrpc:"ordersFromQuote,omitempty"` + + // A count of these are all the orders that were created from this quote. + OrdersFromQuoteCount *uint `json:"ordersFromQuoteCount,omitempty" xmlrpc:"ordersFromQuoteCount,omitempty"` + + // This property Holds system generated notes. In our case if a quote is tied to an order where one of the order item has an inactive promotion code, the quote will be considered invalid. + PublicNote *string `json:"publicNote,omitempty" xmlrpc:"publicNote,omitempty"` + + // Holds system generated hash password for the Quote + QuoteKey *string `json:"quoteKey,omitempty" xmlrpc:"quoteKey,omitempty"` + + // This property Holds the current status of a Quote: pending,expired, saved or deleted + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` +} + +// The SoftLayer_Billing_Oder_Type data type contains general information relating to all the different types of orders that exist. This data pertains only to where an order was generated from, from any of the SoftLayer websites with ordering interfaces or directly through the SoftLayer API. +type Billing_Order_Type struct { + Entity + + // A brief description of where a SoftLayer order originated from. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // A SoftLayer order type's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A simple keyname stating where a SoftLayer order originated from. + Type *string `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// The SoftLayer_Billing_Payment_Card_ChangeRequest data type contains general information relating to attempted credit card information changes. This supports enablement of 3D Secure via Cardinal Cruise implementation that allows for credit card authentication and is currently limited to specified merchants. +type Billing_Payment_Card_ChangeRequest struct { + Entity + + // no documentation yet + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The account ID to which the credit card and billing information is associated with. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // The total amount of the attempted transaction, represented in decimal format as US Dollars ($USD). + Amount *Float64 `json:"amount,omitempty" xmlrpc:"amount,omitempty"` + + // The SoftLayer_Billing_Payment_Card_Transaction tied to the authorization performed as part of this change request. + AuthorizedCreditCardTransaction *Billing_Payment_Card_Transaction `json:"authorizedCreditCardTransaction,omitempty" xmlrpc:"authorizedCreditCardTransaction,omitempty"` + + // The physical street address. Reserve information such as "apartment #123" or "Suite 2" for line 1. + BillingAddressLine1 *string `json:"billingAddressLine1,omitempty" xmlrpc:"billingAddressLine1,omitempty"` + + // The second line in the address. Information such as suite number goes here. + BillingAddressLine2 *string `json:"billingAddressLine2,omitempty" xmlrpc:"billingAddressLine2,omitempty"` + + // The city in which a customer's account resides. + BillingCity *string `json:"billingCity,omitempty" xmlrpc:"billingCity,omitempty"` + + // The 2-character Country code for an account's address. (i.e. US) + BillingCountryCode *string `json:"billingCountryCode,omitempty" xmlrpc:"billingCountryCode,omitempty"` + + // The email address associated with a customer account. + BillingEmail *string `json:"billingEmail,omitempty" xmlrpc:"billingEmail,omitempty"` + + // the company name for an account. + BillingNameCompany *string `json:"billingNameCompany,omitempty" xmlrpc:"billingNameCompany,omitempty"` + + // The first name of the customer account owner. + BillingNameFirst *string `json:"billingNameFirst,omitempty" xmlrpc:"billingNameFirst,omitempty"` + + // The last name of the customer account owner + BillingNameLast *string `json:"billingNameLast,omitempty" xmlrpc:"billingNameLast,omitempty"` + + // The fax number associated with a customer account. + BillingPhoneFax *string `json:"billingPhoneFax,omitempty" xmlrpc:"billingPhoneFax,omitempty"` + + // The phone number associated with a customer account. + BillingPhoneVoice *string `json:"billingPhoneVoice,omitempty" xmlrpc:"billingPhoneVoice,omitempty"` + + // The Zip or Postal Code for the billing address on an account. + BillingPostalCode *string `json:"billingPostalCode,omitempty" xmlrpc:"billingPostalCode,omitempty"` + + // The State for the account. + BillingState *string `json:"billingState,omitempty" xmlrpc:"billingState,omitempty"` + + // The SoftLayer_Billing_Payment_Card_Transaction tied to the capture of funds performed as part of this change request. + CaptureCreditCardTransaction *Billing_Payment_Card_Transaction `json:"captureCreditCardTransaction,omitempty" xmlrpc:"captureCreditCardTransaction,omitempty"` + + // The last 4 digits of a customer's credit card. + CardAccountLast4 *string `json:"cardAccountLast4,omitempty" xmlrpc:"cardAccountLast4,omitempty"` + + // The card number submitted in the change request. + CardAccountNumber *string `json:"cardAccountNumber,omitempty" xmlrpc:"cardAccountNumber,omitempty"` + + // The month (MM) in which a customer's payment card will expire. + CardExpirationMonth *string `json:"cardExpirationMonth,omitempty" xmlrpc:"cardExpirationMonth,omitempty"` + + // The year (YYYY) in which a customer's payment card will expire. + CardExpirationYear *string `json:"cardExpirationYear,omitempty" xmlrpc:"cardExpirationYear,omitempty"` + + // no documentation yet + CardNickname *string `json:"cardNickname,omitempty" xmlrpc:"cardNickname,omitempty"` + + // The type of payment card a customer has. (i.e. Visa, MasterCard, American Express). + CardType *string `json:"cardType,omitempty" xmlrpc:"cardType,omitempty"` + + // The credit card verification number submitted in the change request. + CreditCardVerificationNumber *string `json:"creditCardVerificationNumber,omitempty" xmlrpc:"creditCardVerificationNumber,omitempty"` + + // Describes the currency selected for payment + CurrencyShortName *string `json:"currencyShortName,omitempty" xmlrpc:"currencyShortName,omitempty"` + + // Device Fingerprint Identifier - Used internally and can safely be ignored. + DeviceFingerprintId *string `json:"deviceFingerprintId,omitempty" xmlrpc:"deviceFingerprintId,omitempty"` + + // The unique identifier for a single change request. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // the notes stored about a customer's payment card. + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // The purpose of this property is to allow enablement of 3D Secure (3DS). This is the Reference ID that corresponds to the device data for Payer Authentication. In order to properly enable 3DS, this will require implementation of Cardinal Cruise Hybrid. + // + // Please refer to https://cardinaldocs.atlassian.net/wiki/spaces/CC/pages/360668/Cardinal+Cruise+Hybrid and view section under "DFReferenceId / ReferenceId" to populate this property accordingly. + PayerAuthenticationEnrollmentReferenceId *string `json:"payerAuthenticationEnrollmentReferenceId,omitempty" xmlrpc:"payerAuthenticationEnrollmentReferenceId,omitempty"` + + // "Continue with Consumer Authentication" decoded response JWT (JSON Web Token) after successful authentication. The response is part of the implementation of Cardinal Cruise Hybrid. + // + // Please refer to https://cardinaldocs.atlassian.net/wiki/spaces/CC/pages/360668/Cardinal+Cruise+Hybrid and view section under "Continue with Consumer Authentication" to populate this property accordingly based on the CCA response. + PayerAuthenticationWebToken *string `json:"payerAuthenticationWebToken,omitempty" xmlrpc:"payerAuthenticationWebToken,omitempty"` + + // no documentation yet + PaymentRoleId *int `json:"paymentRoleId,omitempty" xmlrpc:"paymentRoleId,omitempty"` + + // The description of the type of payment sent in a change transaction. + PaymentType *string `json:"paymentType,omitempty" xmlrpc:"paymentType,omitempty"` + + // A count of these are tickets tied to a credit card change request. + TicketAttachmentReferenceCount *uint `json:"ticketAttachmentReferenceCount,omitempty" xmlrpc:"ticketAttachmentReferenceCount,omitempty"` + + // These are tickets tied to a credit card change request. + TicketAttachmentReferences []Ticket_Attachment `json:"ticketAttachmentReferences,omitempty" xmlrpc:"ticketAttachmentReferences,omitempty"` + + // Unique identifier for a ticket discussing the switch between payment methods. + TicketId *int `json:"ticketId,omitempty" xmlrpc:"ticketId,omitempty"` +} + +// The SoftLayer_Billing_Payment_Card_ManualPayment data type contains general information related to requesting a manual payment. This supports enablement of 3D Secure via Cardinal Cruise implementation that allows for credit card authentication and is currently limited to specified merchants. +type Billing_Payment_Card_ManualPayment struct { + Entity + + // no documentation yet + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The account ID to which the credit card and billing information is associated with. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // The total amount of the attempted transaction, represented in decimal format as US Dollars ($USD). + Amount *Float64 `json:"amount,omitempty" xmlrpc:"amount,omitempty"` + + // This is the credit card transaction data tied to a credit card manual payment. + AuthorizedCreditCardTransaction *Billing_Payment_Card_Transaction `json:"authorizedCreditCardTransaction,omitempty" xmlrpc:"authorizedCreditCardTransaction,omitempty"` + + // The unique identifier of an attempted credit card transaction. + AuthorizedCreditCardTransactionId *int `json:"authorizedCreditCardTransactionId,omitempty" xmlrpc:"authorizedCreditCardTransactionId,omitempty"` + + // This is the PayPal transaction data tied to a PayPal manual payment. + AuthorizedPayPalTransaction *Billing_Payment_PayPal_Transaction `json:"authorizedPayPalTransaction,omitempty" xmlrpc:"authorizedPayPalTransaction,omitempty"` + + // The unique identifier of an attempted PayPal transaction. + AuthorizedPayPalTransactionId *int `json:"authorizedPayPalTransactionId,omitempty" xmlrpc:"authorizedPayPalTransactionId,omitempty"` + + // The physical street address. Reserve information such as "apartment #123" or "Suite 2" for line 1. + BillingAddressLine1 *string `json:"billingAddressLine1,omitempty" xmlrpc:"billingAddressLine1,omitempty"` + + // The second line in the address. Information such as suite number goes here. + BillingAddressLine2 *string `json:"billingAddressLine2,omitempty" xmlrpc:"billingAddressLine2,omitempty"` + + // The city in which a customer's account resides. + BillingCity *string `json:"billingCity,omitempty" xmlrpc:"billingCity,omitempty"` + + // The 2-character Country code for an account's address. (i.e. US) + BillingCountryCode *string `json:"billingCountryCode,omitempty" xmlrpc:"billingCountryCode,omitempty"` + + // The email address associated with a customer account. + BillingEmail *string `json:"billingEmail,omitempty" xmlrpc:"billingEmail,omitempty"` + + // the company name for an account. + BillingNameCompany *string `json:"billingNameCompany,omitempty" xmlrpc:"billingNameCompany,omitempty"` + + // The first name of the customer account owner. + BillingNameFirst *string `json:"billingNameFirst,omitempty" xmlrpc:"billingNameFirst,omitempty"` + + // The last name of the customer account owner. + BillingNameLast *string `json:"billingNameLast,omitempty" xmlrpc:"billingNameLast,omitempty"` + + // The fax number associated with a customer account. + BillingPhoneFax *string `json:"billingPhoneFax,omitempty" xmlrpc:"billingPhoneFax,omitempty"` + + // The phone number associated with a customer account. + BillingPhoneVoice *string `json:"billingPhoneVoice,omitempty" xmlrpc:"billingPhoneVoice,omitempty"` + + // The Zip or Postal Code for the billing address on an account. + BillingPostalCode *string `json:"billingPostalCode,omitempty" xmlrpc:"billingPostalCode,omitempty"` + + // The State for the account. + BillingState *string `json:"billingState,omitempty" xmlrpc:"billingState,omitempty"` + + // The cancel URL is the page to which PayPal redirects if payment is not approved. + CancelUrl *string `json:"cancelUrl,omitempty" xmlrpc:"cancelUrl,omitempty"` + + // The SoftLayer_Billing_Payment_Card_Transaction tied to the capture performed as part of this manual payment. This will only exist if the manual payment was performed with a credit card. + CaptureCreditCardTransaction *Billing_Payment_Card_Transaction `json:"captureCreditCardTransaction,omitempty" xmlrpc:"captureCreditCardTransaction,omitempty"` + + // The SoftLayer_Billing_Payment_PayPal_Transaction tied to the capture performed as part of this manual payment. This will only exist if the manual payment was performed via PayPal. + CapturePayPalTransaction *Billing_Payment_PayPal_Transaction `json:"capturePayPalTransaction,omitempty" xmlrpc:"capturePayPalTransaction,omitempty"` + + // A hash value of the credit card number. + CardAccountHash *string `json:"cardAccountHash,omitempty" xmlrpc:"cardAccountHash,omitempty"` + + // The last 4 digits of a customer's credit card. + CardAccountLast4 *string `json:"cardAccountLast4,omitempty" xmlrpc:"cardAccountLast4,omitempty"` + + // The card number submitted in the change request. + CardAccountNumber *string `json:"cardAccountNumber,omitempty" xmlrpc:"cardAccountNumber,omitempty"` + + // The month (MM) in which a customer's payment card will expire. + CardExpirationMonth *string `json:"cardExpirationMonth,omitempty" xmlrpc:"cardExpirationMonth,omitempty"` + + // The year (YYYY) in which a customer's payment card will expire. + CardExpirationYear *string `json:"cardExpirationYear,omitempty" xmlrpc:"cardExpirationYear,omitempty"` + + // The method key of the type payment issued (Visa - 001, Mastercard - 002, American Express - 003, Discover - 004, PayPal - paypal). + CardType *string `json:"cardType,omitempty" xmlrpc:"cardType,omitempty"` + + // The credit card verification number submitted in the change request. + CreditCardVerificationNumber *string `json:"creditCardVerificationNumber,omitempty" xmlrpc:"creditCardVerificationNumber,omitempty"` + + // Describes the currency selected for payment + CurrencyShortName *string `json:"currencyShortName,omitempty" xmlrpc:"currencyShortName,omitempty"` + + // Device Fingerprint Identifier - Used internally and can safely be ignored. + DeviceFingerprintId *string `json:"deviceFingerprintId,omitempty" xmlrpc:"deviceFingerprintId,omitempty"` + + // The IP address from which the transaction originates. + FromIpAddress *string `json:"fromIpAddress,omitempty" xmlrpc:"fromIpAddress,omitempty"` + + // The unique identifier for a single manual payment request. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Notes generated as a result of the payment request. + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // The purpose of this property is to allow enablement of 3D Secure (3DS). This is the Reference ID that corresponds to the device data for Payer Authentication. In order to properly enable 3DS, this will require implementation of Cardinal Cruise Hybrid. + // + // Please refer to https://cardinaldocs.atlassian.net/wiki/spaces/CC/pages/360668/Cardinal+Cruise+Hybrid and view section under "DFReferenceId / ReferenceId" to populate this property accordingly. + PayerAuthenticationEnrollmentReferenceId *string `json:"payerAuthenticationEnrollmentReferenceId,omitempty" xmlrpc:"payerAuthenticationEnrollmentReferenceId,omitempty"` + + // "Continue with Consumer Authentication" decoded response JWT (JSON Web Token) after successful authentication. The response is part of the implementation of Cardinal Cruise Hybrid. + // + // Please refer to https://cardinaldocs.atlassian.net/wiki/spaces/CC/pages/360668/Cardinal+Cruise+Hybrid and view section under "Continue with Consumer Authentication" to populate this property accordingly based on the CCA response. + PayerAuthenticationWebToken *string `json:"payerAuthenticationWebToken,omitempty" xmlrpc:"payerAuthenticationWebToken,omitempty"` + + // The description of the type of payment sent in a change transaction. + PaymentType *string `json:"paymentType,omitempty" xmlrpc:"paymentType,omitempty"` + + // The return URL is the page to which PayPal redirects after payment is approved. + ReturnUrl *string `json:"returnUrl,omitempty" xmlrpc:"returnUrl,omitempty"` + + // A count of these are tickets tied to a credit card manual payment. + TicketAttachmentReferenceCount *uint `json:"ticketAttachmentReferenceCount,omitempty" xmlrpc:"ticketAttachmentReferenceCount,omitempty"` + + // These are tickets tied to a credit card manual payment. + TicketAttachmentReferences []Ticket_Attachment `json:"ticketAttachmentReferences,omitempty" xmlrpc:"ticketAttachmentReferences,omitempty"` + + // Describes the type of manual payment (PAYPAL or CREDIT_CARD). + Type *string `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// The SoftLayer_Billing_Payment_Card_Transaction data type contains general information relating to attempted credit card transactions. +type Billing_Payment_Card_Transaction struct { + Billing_Payment_Transaction + + // The account to which a transaction belongs. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The account ID to which the credit card and billing information is associated with. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // The total amount of the attempted transaction, represented in decimal format as US Dollars ($USD). + Amount *Float64 `json:"amount,omitempty" xmlrpc:"amount,omitempty"` + + // The physical street address. Reserve information such as "apartment #123" or "Suite 2" for line 1. + BillingAddressLine1 *string `json:"billingAddressLine1,omitempty" xmlrpc:"billingAddressLine1,omitempty"` + + // The second line in the address. Information such as suite number goes here. + BillingAddressLine2 *string `json:"billingAddressLine2,omitempty" xmlrpc:"billingAddressLine2,omitempty"` + + // The city in which a customer's account resides. + BillingCity *string `json:"billingCity,omitempty" xmlrpc:"billingCity,omitempty"` + + // The 2-character Country code for an account's address. (i.e. US) + BillingCountryCode *string `json:"billingCountryCode,omitempty" xmlrpc:"billingCountryCode,omitempty"` + + // The email address associated with a customer account. + BillingEmail *string `json:"billingEmail,omitempty" xmlrpc:"billingEmail,omitempty"` + + // the company name for an account. + BillingNameCompany *string `json:"billingNameCompany,omitempty" xmlrpc:"billingNameCompany,omitempty"` + + // The first name of the customer account owner. + BillingNameFirst *string `json:"billingNameFirst,omitempty" xmlrpc:"billingNameFirst,omitempty"` + + // The last name of the customer account owner. + BillingNameLast *string `json:"billingNameLast,omitempty" xmlrpc:"billingNameLast,omitempty"` + + // The fax number associated with a customer account. + BillingPhoneFax *string `json:"billingPhoneFax,omitempty" xmlrpc:"billingPhoneFax,omitempty"` + + // The phone number associated with a customer account. + BillingPhoneVoice *string `json:"billingPhoneVoice,omitempty" xmlrpc:"billingPhoneVoice,omitempty"` + + // The Zip or Postal Code for the billing address on an account. + BillingPostalCode *string `json:"billingPostalCode,omitempty" xmlrpc:"billingPostalCode,omitempty"` + + // The State for the account. + BillingState *string `json:"billingState,omitempty" xmlrpc:"billingState,omitempty"` + + // The last 4 digits of a customer's credit card. + CardAccountLast4 *int `json:"cardAccountLast4,omitempty" xmlrpc:"cardAccountLast4,omitempty"` + + // The month (MM) in which a customer's payment card will expire. + CardExpirationMonth *int `json:"cardExpirationMonth,omitempty" xmlrpc:"cardExpirationMonth,omitempty"` + + // The year (YYYY) in which a customer's payment card will expire. + CardExpirationYear *int `json:"cardExpirationYear,omitempty" xmlrpc:"cardExpirationYear,omitempty"` + + // The type of payment issued (i.e. Visa, MasterCard, American Express). + CardType *string `json:"cardType,omitempty" xmlrpc:"cardType,omitempty"` + + // The date that the transaction was attempted. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The unique identifier for a single credit card transaction request. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Unique identifier of the invoice to which funds will be applied. + InvoiceId *int `json:"invoiceId,omitempty" xmlrpc:"invoiceId,omitempty"` + + // The date that the transaction was modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // no documentation yet + Order *Billing_Order `json:"order,omitempty" xmlrpc:"order,omitempty"` + + // The IP address from which the transaction originates. + OrderFromIpAddress *string `json:"orderFromIpAddress,omitempty" xmlrpc:"orderFromIpAddress,omitempty"` + + // A code used by the financial institution to refer to the requested transaction. + ReferenceCode *string `json:"referenceCode,omitempty" xmlrpc:"referenceCode,omitempty"` + + // The unique identifier of the request submitted to the financial institution. + RequestId *string `json:"requestId,omitempty" xmlrpc:"requestId,omitempty"` + + // The status code returned from the financial institution. + ReturnStatus *int `json:"returnStatus,omitempty" xmlrpc:"returnStatus,omitempty"` + + // A serialized, delimited string of the transaction request sent to the financial institution. + SerializedReply *string `json:"serializedReply,omitempty" xmlrpc:"serializedReply,omitempty"` + + // A serialized, delimited string of the transaction request sent to the financial institution. + SerializedRequest *string `json:"serializedRequest,omitempty" xmlrpc:"serializedRequest,omitempty"` +} + +// The SoftLayer_Billing_Payment_PayPal_Transaction data type contains general information relating to attempted PayPal transactions. +type Billing_Payment_PayPal_Transaction struct { + Billing_Payment_Transaction + + // The account to which a transaction belongs. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The account ID to which the PayPal and billing information is associated with. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // City given in the address of the PayPal user. + AddressCityName *string `json:"addressCityName,omitempty" xmlrpc:"addressCityName,omitempty"` + + // Country given in the named address of the PayPal user. + AddressCountry *string `json:"addressCountry,omitempty" xmlrpc:"addressCountry,omitempty"` + + // Name given to the address provided for the PayPal user. + AddressName *string `json:"addressName,omitempty" xmlrpc:"addressName,omitempty"` + + // Postal Code of the address of the PayPal user. + AddressPostalCode *string `json:"addressPostalCode,omitempty" xmlrpc:"addressPostalCode,omitempty"` + + // State or Province in the address of the PayPal user. + AddressStateProvence *string `json:"addressStateProvence,omitempty" xmlrpc:"addressStateProvence,omitempty"` + + // PayPal defined status of the address of the PayPal user. + AddressStatus *string `json:"addressStatus,omitempty" xmlrpc:"addressStatus,omitempty"` + + // First line of the street address of the PayPal user. + AddressStreet1 *string `json:"addressStreet1,omitempty" xmlrpc:"addressStreet1,omitempty"` + + // Second line of the street address of the PayPal user. + AddressStreet2 *string `json:"addressStreet2,omitempty" xmlrpc:"addressStreet2,omitempty"` + + // Phone number provided for the PayPal user. + ContactPhone *string `json:"contactPhone,omitempty" xmlrpc:"contactPhone,omitempty"` + + // The date that the transaction was attempted. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // Exchange rate imposed on the payment amount. + ExchangeRate *string `json:"exchangeRate,omitempty" xmlrpc:"exchangeRate,omitempty"` + + // PayPal fee applied to the payment. + FeeAmount *Float64 `json:"feeAmount,omitempty" xmlrpc:"feeAmount,omitempty"` + + // The total amount of the payment executed by PayPal, represented in decimal format as US Dollars ($USD). + GrossAmount *Float64 `json:"grossAmount,omitempty" xmlrpc:"grossAmount,omitempty"` + + // The unique identifier for a single PayPal transaction request. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Unique identifier of the invoice to which funds will be applied. + InvoiceId *int `json:"invoiceId,omitempty" xmlrpc:"invoiceId,omitempty"` + + // The name of the command issued to PayPal with regards to the attempted transaction. + LastPaypalCommand *string `json:"lastPaypalCommand,omitempty" xmlrpc:"lastPaypalCommand,omitempty"` + + // The date that the transaction was modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // no documentation yet + Order *Billing_Order `json:"order,omitempty" xmlrpc:"order,omitempty"` + + // The IP address from where the PayPal payment request originated. + OrderFromIpAddress *string `json:"orderFromIpAddress,omitempty" xmlrpc:"orderFromIpAddress,omitempty"` + + // The amount of the payment submitted through the SoftLayer interface, represented in decimal format as US Dollars ($USD). + OrderTotal *Float64 `json:"orderTotal,omitempty" xmlrpc:"orderTotal,omitempty"` + + // The PayPal user account name (email address) associated with the customer account. + Payer *string `json:"payer,omitempty" xmlrpc:"payer,omitempty"` + + // The name of the business associated with the PayPal user. + PayerBusiness *string `json:"payerBusiness,omitempty" xmlrpc:"payerBusiness,omitempty"` + + // Country given in the address of the PayPal user. + PayerCountry *string `json:"payerCountry,omitempty" xmlrpc:"payerCountry,omitempty"` + + // First name of the PayPal user. + PayerFirstName *string `json:"payerFirstName,omitempty" xmlrpc:"payerFirstName,omitempty"` + + // Unique PayPal user account identifier. + PayerId *string `json:"payerId,omitempty" xmlrpc:"payerId,omitempty"` + + // Last name of the PayPal user. + PayerLastName *string `json:"payerLastName,omitempty" xmlrpc:"payerLastName,omitempty"` + + // Current PayPal status associated with the user account. + PayerStatus *string `json:"payerStatus,omitempty" xmlrpc:"payerStatus,omitempty"` + + // Date that the payment was confirmed in PayPal by the user. + PaymentDate *Time `json:"paymentDate,omitempty" xmlrpc:"paymentDate,omitempty"` + + // PayPal defined status of the attempted payment. + PaymentStatus *string `json:"paymentStatus,omitempty" xmlrpc:"paymentStatus,omitempty"` + + // PayPal defined code used to identify the type of payment. Provided in a PayPal response. + PaymentType *string `json:"paymentType,omitempty" xmlrpc:"paymentType,omitempty"` + + // Reason provided by PayPal for a payment given a pending status. + PendingReason *string `json:"pendingReason,omitempty" xmlrpc:"pendingReason,omitempty"` + + // A serialized, delimited string of the reply received from PayPal. + SerializedReply *string `json:"serializedReply,omitempty" xmlrpc:"serializedReply,omitempty"` + + // A serialized, delimited string of the request submitted to PayPal. + SerializedRequest *string `json:"serializedRequest,omitempty" xmlrpc:"serializedRequest,omitempty"` + + // PayPal defined fee. + SettleAmount *Float64 `json:"settleAmount,omitempty" xmlrpc:"settleAmount,omitempty"` + + // Tax applied by PayPal to the payment amount. + TaxAmount *Float64 `json:"taxAmount,omitempty" xmlrpc:"taxAmount,omitempty"` + + // Value issued by PayPal for referencing the attempted transaction. + Token *string `json:"token,omitempty" xmlrpc:"token,omitempty"` + + // Unique transaction ID provided in a PayPal response. + TransactionId *string `json:"transactionId,omitempty" xmlrpc:"transactionId,omitempty"` + + // PayPal defined code used to identify the type of transaction. Provided in a PayPal response. + TransactionType *string `json:"transactionType,omitempty" xmlrpc:"transactionType,omitempty"` +} + +// no documentation yet +type Billing_Payment_Processor struct { + Entity + + // A count of + BrandAssignmentCount *uint `json:"brandAssignmentCount,omitempty" xmlrpc:"brandAssignmentCount,omitempty"` + + // no documentation yet + BrandAssignments []Brand_Payment_Processor `json:"brandAssignments,omitempty" xmlrpc:"brandAssignments,omitempty"` + + // no documentation yet + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // no documentation yet + OwnerAccount *Account `json:"ownerAccount,omitempty" xmlrpc:"ownerAccount,omitempty"` + + // A count of + PaymentMethodCount *uint `json:"paymentMethodCount,omitempty" xmlrpc:"paymentMethodCount,omitempty"` + + // no documentation yet + PaymentMethods []Billing_Payment_Processor_Method `json:"paymentMethods,omitempty" xmlrpc:"paymentMethods,omitempty"` + + // no documentation yet + Type *Billing_Payment_Processor_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// no documentation yet +type Billing_Payment_Processor_Method struct { + Entity + + // no documentation yet + MethodKey *string `json:"methodKey,omitempty" xmlrpc:"methodKey,omitempty"` + + // no documentation yet + MultipleCurrencyFlag *bool `json:"multipleCurrencyFlag,omitempty" xmlrpc:"multipleCurrencyFlag,omitempty"` + + // no documentation yet + PaymentProcessor *Billing_Payment_Processor `json:"paymentProcessor,omitempty" xmlrpc:"paymentProcessor,omitempty"` + + // no documentation yet + PaymentType *Billing_Payment_Type `json:"paymentType,omitempty" xmlrpc:"paymentType,omitempty"` +} + +// no documentation yet +type Billing_Payment_Processor_Type struct { + Entity + + // no documentation yet + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // A count of + PaymentProcessorCount *uint `json:"paymentProcessorCount,omitempty" xmlrpc:"paymentProcessorCount,omitempty"` + + // no documentation yet + PaymentProcessors []Billing_Payment_Processor `json:"paymentProcessors,omitempty" xmlrpc:"paymentProcessors,omitempty"` +} + +// Implementation for payment transactions. +type Billing_Payment_Transaction struct { + Entity +} + +// no documentation yet +type Billing_Payment_Type struct { + Entity + + // no documentation yet + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/brand.go b/vendor/github.com/softlayer/softlayer-go/datatypes/brand.go new file mode 100644 index 00000000000..fd26d2a238b --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/brand.go @@ -0,0 +1,265 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// The SoftLayer_Brand data type contains brand information relating to the single SoftLayer customer account. +// +// IBM Cloud Infrastructure customers are unable to change their brand information in the portal or the API. +type Brand struct { + Entity + + // no documentation yet + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // A count of all accounts owned by the brand. + AllOwnedAccountCount *uint `json:"allOwnedAccountCount,omitempty" xmlrpc:"allOwnedAccountCount,omitempty"` + + // All accounts owned by the brand. + AllOwnedAccounts []Account `json:"allOwnedAccounts,omitempty" xmlrpc:"allOwnedAccounts,omitempty"` + + // This flag indicates if creation of accounts is allowed. + AllowAccountCreationFlag *bool `json:"allowAccountCreationFlag,omitempty" xmlrpc:"allowAccountCreationFlag,omitempty"` + + // Business Partner details for the brand. Country Enterprise Code, Channel, Segment, Reseller Level. + BusinessPartner *Brand_Business_Partner `json:"businessPartner,omitempty" xmlrpc:"businessPartner,omitempty"` + + // Flag indicating if the brand is a business partner. + BusinessPartnerFlag *bool `json:"businessPartnerFlag,omitempty" xmlrpc:"businessPartnerFlag,omitempty"` + + // The Product Catalog for the Brand + Catalog *Product_Catalog `json:"catalog,omitempty" xmlrpc:"catalog,omitempty"` + + // ID of the Catalog used by this Brand + CatalogId *int `json:"catalogId,omitempty" xmlrpc:"catalogId,omitempty"` + + // A count of the contacts for the brand. + ContactCount *uint `json:"contactCount,omitempty" xmlrpc:"contactCount,omitempty"` + + // The contacts for the brand. + Contacts []Brand_Contact `json:"contacts,omitempty" xmlrpc:"contacts,omitempty"` + + // A count of this references relationship between brands, locations and countries associated with a user's account that are ineligible when ordering products. For example, the India datacenter may not be available on this brand for customers that live in Great Britain. + CustomerCountryLocationRestrictionCount *uint `json:"customerCountryLocationRestrictionCount,omitempty" xmlrpc:"customerCountryLocationRestrictionCount,omitempty"` + + // This references relationship between brands, locations and countries associated with a user's account that are ineligible when ordering products. For example, the India datacenter may not be available on this brand for customers that live in Great Britain. + CustomerCountryLocationRestrictions []Brand_Restriction_Location_CustomerCountry `json:"customerCountryLocationRestrictions,omitempty" xmlrpc:"customerCountryLocationRestrictions,omitempty"` + + // no documentation yet + Distributor *Brand `json:"distributor,omitempty" xmlrpc:"distributor,omitempty"` + + // no documentation yet + DistributorChildFlag *bool `json:"distributorChildFlag,omitempty" xmlrpc:"distributorChildFlag,omitempty"` + + // no documentation yet + DistributorFlag *string `json:"distributorFlag,omitempty" xmlrpc:"distributorFlag,omitempty"` + + // An account's associated hardware objects. + Hardware []Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // A count of an account's associated hardware objects. + HardwareCount *uint `json:"hardwareCount,omitempty" xmlrpc:"hardwareCount,omitempty"` + + // no documentation yet + HasAgentAdvancedSupportFlag *bool `json:"hasAgentAdvancedSupportFlag,omitempty" xmlrpc:"hasAgentAdvancedSupportFlag,omitempty"` + + // no documentation yet + HasAgentSupportFlag *bool `json:"hasAgentSupportFlag,omitempty" xmlrpc:"hasAgentSupportFlag,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The brand key name. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // The brand long name. + LongName *string `json:"longName,omitempty" xmlrpc:"longName,omitempty"` + + // The brand name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // A count of + OpenTicketCount *uint `json:"openTicketCount,omitempty" xmlrpc:"openTicketCount,omitempty"` + + // no documentation yet + OpenTickets []Ticket `json:"openTickets,omitempty" xmlrpc:"openTickets,omitempty"` + + // A count of active accounts owned by the brand. + OwnedAccountCount *uint `json:"ownedAccountCount,omitempty" xmlrpc:"ownedAccountCount,omitempty"` + + // Active accounts owned by the brand. + OwnedAccounts []Account `json:"ownedAccounts,omitempty" xmlrpc:"ownedAccounts,omitempty"` + + // no documentation yet + SecurityLevel *Security_Level `json:"securityLevel,omitempty" xmlrpc:"securityLevel,omitempty"` + + // A count of + TicketCount *uint `json:"ticketCount,omitempty" xmlrpc:"ticketCount,omitempty"` + + // A count of + TicketGroupCount *uint `json:"ticketGroupCount,omitempty" xmlrpc:"ticketGroupCount,omitempty"` + + // no documentation yet + TicketGroups []Ticket_Group `json:"ticketGroups,omitempty" xmlrpc:"ticketGroups,omitempty"` + + // no documentation yet + Tickets []Ticket `json:"tickets,omitempty" xmlrpc:"tickets,omitempty"` + + // A count of + UserCount *uint `json:"userCount,omitempty" xmlrpc:"userCount,omitempty"` + + // no documentation yet + Users []User_Customer `json:"users,omitempty" xmlrpc:"users,omitempty"` + + // A count of an account's associated virtual guest objects. + VirtualGuestCount *uint `json:"virtualGuestCount,omitempty" xmlrpc:"virtualGuestCount,omitempty"` + + // An account's associated virtual guest objects. + VirtualGuests []Virtual_Guest `json:"virtualGuests,omitempty" xmlrpc:"virtualGuests,omitempty"` +} + +// no documentation yet +type Brand_Attribute struct { + Entity + + // no documentation yet + Brand *Brand `json:"brand,omitempty" xmlrpc:"brand,omitempty"` +} + +// Contains business partner details associated with a brand. Country Enterprise Identifier (CEID), Channel ID, Segment ID and Reseller Level. +type Brand_Business_Partner struct { + Entity + + // Brand associated with the business partner data + Brand *Brand `json:"brand,omitempty" xmlrpc:"brand,omitempty"` + + // Channel indicator used to categorize business partner revenue. + Channel *Business_Partner_Channel `json:"channel,omitempty" xmlrpc:"channel,omitempty"` + + // Brand business partner channel identifier + ChannelId *int `json:"channelId,omitempty" xmlrpc:"channelId,omitempty"` + + // Brand business partner country enterprise code + CountryEnterpriseCode *string `json:"countryEnterpriseCode,omitempty" xmlrpc:"countryEnterpriseCode,omitempty"` + + // Reseller level of a brand business partner + ResellerLevel *int `json:"resellerLevel,omitempty" xmlrpc:"resellerLevel,omitempty"` + + // Segment indicator used to categorize business partner revenue. + Segment *Business_Partner_Segment `json:"segment,omitempty" xmlrpc:"segment,omitempty"` + + // Brand business partner segment identifier + SegmentId *int `json:"segmentId,omitempty" xmlrpc:"segmentId,omitempty"` +} + +// SoftLayer_Brand_Contact contains the contact information for the brand such as Corporate or Support contact information +type Brand_Contact struct { + Entity + + // The contact's address 1. + Address1 *string `json:"address1,omitempty" xmlrpc:"address1,omitempty"` + + // The contact's address 2. + Address2 *string `json:"address2,omitempty" xmlrpc:"address2,omitempty"` + + // The contact's alternate phone number. + AlternatePhone *string `json:"alternatePhone,omitempty" xmlrpc:"alternatePhone,omitempty"` + + // no documentation yet + Brand *Brand `json:"brand,omitempty" xmlrpc:"brand,omitempty"` + + // no documentation yet + BrandContactType *Brand_Contact_Type `json:"brandContactType,omitempty" xmlrpc:"brandContactType,omitempty"` + + // The contact's type identifier. + BrandContactTypeId *int `json:"brandContactTypeId,omitempty" xmlrpc:"brandContactTypeId,omitempty"` + + // The contact's city. + City *string `json:"city,omitempty" xmlrpc:"city,omitempty"` + + // The contact's country. + Country *string `json:"country,omitempty" xmlrpc:"country,omitempty"` + + // The contact's email address. + Email *string `json:"email,omitempty" xmlrpc:"email,omitempty"` + + // The contact's fax number. + FaxPhone *string `json:"faxPhone,omitempty" xmlrpc:"faxPhone,omitempty"` + + // The contact's first name. + FirstName *string `json:"firstName,omitempty" xmlrpc:"firstName,omitempty"` + + // The contact's last name. + LastName *string `json:"lastName,omitempty" xmlrpc:"lastName,omitempty"` + + // The contact's phone number. + OfficePhone *string `json:"officePhone,omitempty" xmlrpc:"officePhone,omitempty"` + + // The contact's postal code. + PostalCode *string `json:"postalCode,omitempty" xmlrpc:"postalCode,omitempty"` + + // The contact's state. + State *string `json:"state,omitempty" xmlrpc:"state,omitempty"` +} + +// SoftLayer_Brand_Contact_Type contains the contact type information for the brand contacts such as Corporate or Support contact type +type Brand_Contact_Type struct { + Entity + + // Contact type description. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Contact type key name. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // Contact type name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Brand_Payment_Processor struct { + Entity + + // no documentation yet + Brand *Brand `json:"brand,omitempty" xmlrpc:"brand,omitempty"` + + // no documentation yet + PaymentProcessor *Billing_Payment_Processor `json:"paymentProcessor,omitempty" xmlrpc:"paymentProcessor,omitempty"` +} + +// The [[SoftLayer_Brand_Restriction_Location_CustomerCountry]] data type defines the relationship between brands, locations and countries associated with a user's account that are ineligible when ordering products. For example, the India datacenter may not be available on the SoftLayer US brand for customers that live in Great Britain. +type Brand_Restriction_Location_CustomerCountry struct { + Entity + + // This references the brand that has a brand-location-country restriction setup. + Brand *Brand `json:"brand,omitempty" xmlrpc:"brand,omitempty"` + + // The brand associated with customer's account. + BrandId *int `json:"brandId,omitempty" xmlrpc:"brandId,omitempty"` + + // country code associated with customer's account. + CustomerCountryCode *string `json:"customerCountryCode,omitempty" xmlrpc:"customerCountryCode,omitempty"` + + // This references the datacenter that has a brand-location-country restriction setup. For example, if a datacenter is listed with a restriction for Canada, a Canadian customer may not be eligible to order services at that location. + Location *Location `json:"location,omitempty" xmlrpc:"location,omitempty"` + + // The id for datacenter location. + LocationId *int `json:"locationId,omitempty" xmlrpc:"locationId,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/business.go b/vendor/github.com/softlayer/softlayer-go/datatypes/business.go new file mode 100644 index 00000000000..0d57545bca0 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/business.go @@ -0,0 +1,43 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// Contains business partner channel information +type Business_Partner_Channel struct { + Entity + + // Business partner channel description + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Business partner channel name + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` +} + +// Contains business partner segment information +type Business_Partner_Segment struct { + Entity + + // Business partner segment description + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Business partner segment name + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/catalyst.go b/vendor/github.com/softlayer/softlayer-go/datatypes/catalyst.go new file mode 100644 index 00000000000..9134e98d46d --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/catalyst.go @@ -0,0 +1,234 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// no documentation yet +type Catalyst_Affiliate struct { + Entity + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // no documentation yet + SkipCreditCardVerificationFlag *bool `json:"skipCreditCardVerificationFlag,omitempty" xmlrpc:"skipCreditCardVerificationFlag,omitempty"` +} + +// no documentation yet +type Catalyst_Company_Type struct { + Entity + + // no documentation yet + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` +} + +// no documentation yet +type Catalyst_Enrollment struct { + Entity + + // no documentation yet + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // no documentation yet + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // no documentation yet + Affiliate *Catalyst_Affiliate `json:"affiliate,omitempty" xmlrpc:"affiliate,omitempty"` + + // no documentation yet + AffiliateId *int `json:"affiliateId,omitempty" xmlrpc:"affiliateId,omitempty"` + + // no documentation yet + AgreementCompleteFlag *int `json:"agreementCompleteFlag,omitempty" xmlrpc:"agreementCompleteFlag,omitempty"` + + // no documentation yet + CompanyDescription *string `json:"companyDescription,omitempty" xmlrpc:"companyDescription,omitempty"` + + // no documentation yet + CompanyType *Catalyst_Company_Type `json:"companyType,omitempty" xmlrpc:"companyType,omitempty"` + + // no documentation yet + CompanyTypeId *int `json:"companyTypeId,omitempty" xmlrpc:"companyTypeId,omitempty"` + + // no documentation yet + EnrollmentDate *Time `json:"enrollmentDate,omitempty" xmlrpc:"enrollmentDate,omitempty"` + + // no documentation yet + GraduationDate *Time `json:"graduationDate,omitempty" xmlrpc:"graduationDate,omitempty"` + + // no documentation yet + IsActiveFlag *bool `json:"isActiveFlag,omitempty" xmlrpc:"isActiveFlag,omitempty"` + + // no documentation yet + MonthlyCreditAmount *Float64 `json:"monthlyCreditAmount,omitempty" xmlrpc:"monthlyCreditAmount,omitempty"` + + // no documentation yet + Representative *User_Employee `json:"representative,omitempty" xmlrpc:"representative,omitempty"` + + // no documentation yet + RepresentativeEmployeeId *int `json:"representativeEmployeeId,omitempty" xmlrpc:"representativeEmployeeId,omitempty"` +} + +// Contains user information for Catalyst self-enrollment. +type Catalyst_Enrollment_Request struct { + Entity + + // Applicant's address + Address1 *string `json:"address1,omitempty" xmlrpc:"address1,omitempty"` + + // Additional field for extended address + Address2 *string `json:"address2,omitempty" xmlrpc:"address2,omitempty"` + + // no documentation yet + Affiliate *Catalyst_Affiliate `json:"affiliate,omitempty" xmlrpc:"affiliate,omitempty"` + + // Id of the affiliate who referred applicant's + AffiliateId *int `json:"affiliateId,omitempty" xmlrpc:"affiliateId,omitempty"` + + // no documentation yet + AgreementCompleteFlag *bool `json:"agreementCompleteFlag,omitempty" xmlrpc:"agreementCompleteFlag,omitempty"` + + // Determines whether or not to also apply to the GEP program + ApplyToGepFlag *bool `json:"applyToGepFlag,omitempty" xmlrpc:"applyToGepFlag,omitempty"` + + // no documentation yet + CardAccountNumber *string `json:"cardAccountNumber,omitempty" xmlrpc:"cardAccountNumber,omitempty"` + + // no documentation yet + CardExpirationMonth *string `json:"cardExpirationMonth,omitempty" xmlrpc:"cardExpirationMonth,omitempty"` + + // no documentation yet + CardExpirationYear *string `json:"cardExpirationYear,omitempty" xmlrpc:"cardExpirationYear,omitempty"` + + // no documentation yet + CardType *string `json:"cardType,omitempty" xmlrpc:"cardType,omitempty"` + + // no documentation yet + CardVerificationNumber *string `json:"cardVerificationNumber,omitempty" xmlrpc:"cardVerificationNumber,omitempty"` + + // Applicant's city + City *string `json:"city,omitempty" xmlrpc:"city,omitempty"` + + // Brief description of Startup's product and key differentiators + CompanyDescription *string `json:"companyDescription,omitempty" xmlrpc:"companyDescription,omitempty"` + + // Name of the applicant's company + CompanyName *string `json:"companyName,omitempty" xmlrpc:"companyName,omitempty"` + + // no documentation yet + CompanyType *Catalyst_Company_Type `json:"companyType,omitempty" xmlrpc:"companyType,omitempty"` + + // Id of the company type which best describes applicant's company + CompanyTypeId *int `json:"companyTypeId,omitempty" xmlrpc:"companyTypeId,omitempty"` + + // URL to the Startup's site + CompanyUrl *string `json:"companyUrl,omitempty" xmlrpc:"companyUrl,omitempty"` + + // Applicant's country code + Country *string `json:"country,omitempty" xmlrpc:"country,omitempty"` + + // Index of answer chosen for how many current users question + CurrentUserChoice *int `json:"currentUserChoice,omitempty" xmlrpc:"currentUserChoice,omitempty"` + + // Id of the fingerprint + DeviceFingerprintId *string `json:"deviceFingerprintId,omitempty" xmlrpc:"deviceFingerprintId,omitempty"` + + // Applicant's email address + Email *string `json:"email,omitempty" xmlrpc:"email,omitempty"` + + // Applicant's first name + FirstName *string `json:"firstName,omitempty" xmlrpc:"firstName,omitempty"` + + // Index of answer chosen for how many future users question + FutureUserChoice *int `json:"futureUserChoice,omitempty" xmlrpc:"futureUserChoice,omitempty"` + + // Master user's IBMId username + IbmIdUsername *string `json:"ibmIdUsername,omitempty" xmlrpc:"ibmIdUsername,omitempty"` + + // Name of accelerator or incubator startup belongs to, if any + IncubatorName *string `json:"incubatorName,omitempty" xmlrpc:"incubatorName,omitempty"` + + // Name of the investor, if any + InvestorName *string `json:"investorName,omitempty" xmlrpc:"investorName,omitempty"` + + // Applicant's last name + LastName *string `json:"lastName,omitempty" xmlrpc:"lastName,omitempty"` + + // no documentation yet + MarketingConsent *Catalyst_Enrollment_Request_MarketingConsent `json:"marketingConsent,omitempty" xmlrpc:"marketingConsent,omitempty"` + + // Applicant's primary phone number + OfficePhone *string `json:"officePhone,omitempty" xmlrpc:"officePhone,omitempty"` + + // Whether or not the startup has been operating for more than five years + OverFiveYearsOldFlag *bool `json:"overFiveYearsOldFlag,omitempty" xmlrpc:"overFiveYearsOldFlag,omitempty"` + + // Applicant's postal code + PostalCode *string `json:"postalCode,omitempty" xmlrpc:"postalCode,omitempty"` + + // IBM referral code, if any + ReferralCode *string `json:"referralCode,omitempty" xmlrpc:"referralCode,omitempty"` + + // Whether or not the startup has over one million in annual revenue + RevenueOverOneMillionFlag *bool `json:"revenueOverOneMillionFlag,omitempty" xmlrpc:"revenueOverOneMillionFlag,omitempty"` + + // Determines whether or not to apply to the Catalyst program + SkipCatalystApplicationFlag *bool `json:"skipCatalystApplicationFlag,omitempty" xmlrpc:"skipCatalystApplicationFlag,omitempty"` + + // Applicant's state/region code + State *string `json:"state,omitempty" xmlrpc:"state,omitempty"` + + // Applicant's vatId, if one exists + VatId *string `json:"vatId,omitempty" xmlrpc:"vatId,omitempty"` +} + +// no documentation yet +type Catalyst_Enrollment_Request_Container_AnswerOption struct { + Entity + + // no documentation yet + Answer *string `json:"answer,omitempty" xmlrpc:"answer,omitempty"` + + // no documentation yet + Index *int `json:"index,omitempty" xmlrpc:"index,omitempty"` +} + +// Contains user marketing consent information for Catalyst self-enrollment. +type Catalyst_Enrollment_Request_MarketingConsent struct { + Entity + + // no documentation yet + MarketingByEmailFlag *bool `json:"marketingByEmailFlag,omitempty" xmlrpc:"marketingByEmailFlag,omitempty"` + + // no documentation yet + MarketingByPostalMailFlag *bool `json:"marketingByPostalMailFlag,omitempty" xmlrpc:"marketingByPostalMailFlag,omitempty"` + + // no documentation yet + MarketingByTelephoneFlag *bool `json:"marketingByTelephoneFlag,omitempty" xmlrpc:"marketingByTelephoneFlag,omitempty"` + + // no documentation yet + Request *Catalyst_Enrollment_Request `json:"request,omitempty" xmlrpc:"request,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/compliance.go b/vendor/github.com/softlayer/softlayer-go/datatypes/compliance.go new file mode 100644 index 00000000000..0efbbf9292e --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/compliance.go @@ -0,0 +1,35 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// no documentation yet +type Compliance_Report_Type struct { + Entity + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/configuration.go b/vendor/github.com/softlayer/softlayer-go/datatypes/configuration.go new file mode 100644 index 00000000000..c4e9a7d52a4 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/configuration.go @@ -0,0 +1,594 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// no documentation yet +type Configuration_Storage_Filesystem_Type struct { + Entity + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// This class describes the base Storage Group for a Complex Drive Configuration +type Configuration_Storage_Group struct { + Entity + + // A count of this class represents a storage groups ancestors + AncestorGroupCount *uint `json:"ancestorGroupCount,omitempty" xmlrpc:"ancestorGroupCount,omitempty"` + + // This class represents a storage groups ancestors + AncestorGroups []Configuration_Storage_Group `json:"ancestorGroups,omitempty" xmlrpc:"ancestorGroups,omitempty"` + + // This class represents a storage group disk array type + ArrayType *Configuration_Storage_Group_Array_Type `json:"arrayType,omitempty" xmlrpc:"arrayType,omitempty"` + + // Determine if the storage group is able to be image captured. If unable to image capture the reasons will be provided. + CaptureEnabledFlag *Container_Hardware_CaptureEnabled `json:"captureEnabledFlag,omitempty" xmlrpc:"captureEnabledFlag,omitempty"` + + // no documentation yet + CreateEmployee *User_Employee `json:"createEmployee,omitempty" xmlrpc:"createEmployee,omitempty"` + + // A count of this class represents a storage groups descendants + DescendantGroupCount *uint `json:"descendantGroupCount,omitempty" xmlrpc:"descendantGroupCount,omitempty"` + + // This class represents a storage groups descendants + DescendantGroups []Configuration_Storage_Group `json:"descendantGroups,omitempty" xmlrpc:"descendantGroups,omitempty"` + + // Storage group description + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Storage group disk space + DiskSpace *Float64 `json:"diskSpace,omitempty" xmlrpc:"diskSpace,omitempty"` + + // A count of the hard drives contained within this storage group. + HardDriveCount *uint `json:"hardDriveCount,omitempty" xmlrpc:"hardDriveCount,omitempty"` + + // The hard drives contained within this storage group. + HardDrives []Hardware_Component `json:"hardDrives,omitempty" xmlrpc:"hardDrives,omitempty"` + + // Storage group type id + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Flag to indicate if the storage group is setup for lvm + LvmFlag *bool `json:"lvmFlag,omitempty" xmlrpc:"lvmFlag,omitempty"` + + // Storage group name + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // Storage group disk size units + Units *string `json:"units,omitempty" xmlrpc:"units,omitempty"` +} + +// Supported hardware raid modes +type Configuration_Storage_Group_Array_Type struct { + Entity + + // no documentation yet + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + DriveMultiplier *int `json:"driveMultiplier,omitempty" xmlrpc:"driveMultiplier,omitempty"` + + // A count of + HardwareComponentModelCount *uint `json:"hardwareComponentModelCount,omitempty" xmlrpc:"hardwareComponentModelCount,omitempty"` + + // no documentation yet + HardwareComponentModels []Hardware_Component_Model `json:"hardwareComponentModels,omitempty" xmlrpc:"hardwareComponentModels,omitempty"` + + // no documentation yet + HotspareAllow *bool `json:"hotspareAllow,omitempty" xmlrpc:"hotspareAllow,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + MaximumDrives *int `json:"maximumDrives,omitempty" xmlrpc:"maximumDrives,omitempty"` + + // no documentation yet + MinimumDrives *int `json:"minimumDrives,omitempty" xmlrpc:"minimumDrives,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// Single storage group(array) used for a hardware server order. +// +// If a raid configuration is required this object will describe a single array that will be configured on the server. If the server requires more than one array, a storage group will need to be created for each array. +type Configuration_Storage_Group_Order struct { + Entity + + // no documentation yet + ArrayNumber *int `json:"arrayNumber,omitempty" xmlrpc:"arrayNumber,omitempty"` + + // no documentation yet + ArraySize *Float64 `json:"arraySize,omitempty" xmlrpc:"arraySize,omitempty"` + + // Raid mode for the storage group. + ArrayType *Configuration_Storage_Group_Array_Type `json:"arrayType,omitempty" xmlrpc:"arrayType,omitempty"` + + // no documentation yet + ArrayTypeId *int `json:"arrayTypeId,omitempty" xmlrpc:"arrayTypeId,omitempty"` + + // The order item that relates to this storage group. + BillingOrderItem *Billing_Order_Item `json:"billingOrderItem,omitempty" xmlrpc:"billingOrderItem,omitempty"` + + // no documentation yet + BillingOrderItemId *int `json:"billingOrderItemId,omitempty" xmlrpc:"billingOrderItemId,omitempty"` + + // no documentation yet + Controller *int `json:"controller,omitempty" xmlrpc:"controller,omitempty"` + + // no documentation yet + HardDrives []int `json:"hardDrives,omitempty" xmlrpc:"hardDrives,omitempty"` + + // no documentation yet + HotSpareDrives []int `json:"hotSpareDrives,omitempty" xmlrpc:"hotSpareDrives,omitempty"` + + // no documentation yet + LvmFlag *bool `json:"lvmFlag,omitempty" xmlrpc:"lvmFlag,omitempty"` + + // no documentation yet + PartitionData *string `json:"partitionData,omitempty" xmlrpc:"partitionData,omitempty"` +} + +// Single storage group(array) used in a storage group template. +// +// If a server configuration requires a raid configuration this object will describe a single array to be configured. +type Configuration_Storage_Group_Template_Group struct { + Entity + + // The disk controller for the array. + DiskControllerIndex *int `json:"diskControllerIndex,omitempty" xmlrpc:"diskControllerIndex,omitempty"` + + // Flag to use all available space. + Grow *bool `json:"grow,omitempty" xmlrpc:"grow,omitempty"` + + // Comma delimited integers of drive indexes for the array. This can also be the string 'all' to specify all drives in the server + HardDrivesString *string `json:"hardDrivesString,omitempty" xmlrpc:"hardDrivesString,omitempty"` + + // Comma delimited integers of drive indexes for hot spares on the array. + HotSpareDrivesString *string `json:"hotSpareDrivesString,omitempty" xmlrpc:"hotSpareDrivesString,omitempty"` + + // The order of the arrays in the template. + OrderIndex *int `json:"orderIndex,omitempty" xmlrpc:"orderIndex,omitempty"` + + // Size of array. Must be within limitations of the smallest drive and raid mode + Size *Float64 `json:"size,omitempty" xmlrpc:"size,omitempty"` + + // no documentation yet + Type *Configuration_Storage_Group_Array_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// The SoftLayer_Configuration_Template data type contains general information of an arbitrary resource. +type Configuration_Template struct { + Entity + + // no documentation yet + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // Internal identifier of a SoftLayer account that this configuration template belongs to + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // A count of + ConfigurationSectionCount *uint `json:"configurationSectionCount,omitempty" xmlrpc:"configurationSectionCount,omitempty"` + + // no documentation yet + ConfigurationSections []Configuration_Template_Section `json:"configurationSections,omitempty" xmlrpc:"configurationSections,omitempty"` + + // Created date + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A count of + DefaultValueCount *uint `json:"defaultValueCount,omitempty" xmlrpc:"defaultValueCount,omitempty"` + + // no documentation yet + DefaultValues []Configuration_Template_Section_Definition_Value `json:"defaultValues,omitempty" xmlrpc:"defaultValues,omitempty"` + + // A count of + DefinitionCount *uint `json:"definitionCount,omitempty" xmlrpc:"definitionCount,omitempty"` + + // no documentation yet + Definitions []Configuration_Template_Section_Definition `json:"definitions,omitempty" xmlrpc:"definitions,omitempty"` + + // Configuration template description + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Internal identifier of a configuration template. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + Item *Product_Item `json:"item,omitempty" xmlrpc:"item,omitempty"` + + // Internal identifier of a product item that this configuration template is associated with + ItemId *int `json:"itemId,omitempty" xmlrpc:"itemId,omitempty"` + + // no documentation yet + LinkedSectionReferences *Configuration_Template_Section_Reference `json:"linkedSectionReferences,omitempty" xmlrpc:"linkedSectionReferences,omitempty"` + + // Last modified date + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // Configuration template name + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // no documentation yet + Parent *Configuration_Template `json:"parent,omitempty" xmlrpc:"parent,omitempty"` + + // Internal identifier of the parent configuration template + ParentId *int `json:"parentId,omitempty" xmlrpc:"parentId,omitempty"` + + // no documentation yet + User *User_Customer `json:"user,omitempty" xmlrpc:"user,omitempty"` + + // Internal identifier of a user that last modified this configuration template + UserRecordId *int `json:"userRecordId,omitempty" xmlrpc:"userRecordId,omitempty"` +} + +// Configuration template attribute class contains supplementary information for a configuration template. +type Configuration_Template_Attribute struct { + Entity + + // no documentation yet + ConfigurationTemplate *Configuration_Template `json:"configurationTemplate,omitempty" xmlrpc:"configurationTemplate,omitempty"` + + // Value of a configuration template attribute + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// The SoftLayer_Configuration_Template_Section data type contains information of a configuration section. +// +// Configuration can contain sub-sections. +type Configuration_Template_Section struct { + Entity + + // Created date + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A count of + DefinitionCount *uint `json:"definitionCount,omitempty" xmlrpc:"definitionCount,omitempty"` + + // no documentation yet + Definitions []Configuration_Template_Section_Definition `json:"definitions,omitempty" xmlrpc:"definitions,omitempty"` + + // Configuration section description + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + DisallowedDeletionFlag *bool `json:"disallowedDeletionFlag,omitempty" xmlrpc:"disallowedDeletionFlag,omitempty"` + + // Internal identifier of a configuration section. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + LinkedTemplate *Configuration_Template `json:"linkedTemplate,omitempty" xmlrpc:"linkedTemplate,omitempty"` + + // Internal identifier of a sub configuration template that this section points to. Use this property if you wish to create a reference to a sub configuration template when creating a linked section. + LinkedTemplateId *string `json:"linkedTemplateId,omitempty" xmlrpc:"linkedTemplateId,omitempty"` + + // no documentation yet + LinkedTemplateReference *Configuration_Template_Section_Reference `json:"linkedTemplateReference,omitempty" xmlrpc:"linkedTemplateReference,omitempty"` + + // Last modified date + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // Configuration section name + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // Internal identifier of the parent configuration section + ParentId *int `json:"parentId,omitempty" xmlrpc:"parentId,omitempty"` + + // A count of + ProfileCount *uint `json:"profileCount,omitempty" xmlrpc:"profileCount,omitempty"` + + // no documentation yet + Profiles []Configuration_Template_Section_Profile `json:"profiles,omitempty" xmlrpc:"profiles,omitempty"` + + // no documentation yet + SectionType *Configuration_Template_Section_Type `json:"sectionType,omitempty" xmlrpc:"sectionType,omitempty"` + + // no documentation yet + SectionTypeName *string `json:"sectionTypeName,omitempty" xmlrpc:"sectionTypeName,omitempty"` + + // Sort order + Sort *int `json:"sort,omitempty" xmlrpc:"sort,omitempty"` + + // A count of + SubSectionCount *uint `json:"subSectionCount,omitempty" xmlrpc:"subSectionCount,omitempty"` + + // no documentation yet + SubSections []Configuration_Template_Section `json:"subSections,omitempty" xmlrpc:"subSections,omitempty"` + + // no documentation yet + Template *Configuration_Template `json:"template,omitempty" xmlrpc:"template,omitempty"` + + // Internal identifier of a configuration template that this section belongs to + TemplateId *string `json:"templateId,omitempty" xmlrpc:"templateId,omitempty"` + + // Internal identifier of the configuration section type + TypeId *int `json:"typeId,omitempty" xmlrpc:"typeId,omitempty"` +} + +// Configuration section attribute class contains supplementary information for a configuration section. +type Configuration_Template_Section_Attribute struct { + Entity + + // no documentation yet + ConfigurationSection *Configuration_Template_Section `json:"configurationSection,omitempty" xmlrpc:"configurationSection,omitempty"` + + // Value of a configuration section attribute + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// Configuration definition gives you details of the value that you're setting. +// +// If value type is defined as "Resource Specific Values", you will have to make an additional API call to retrieve your system specific values. +// +// +type Configuration_Template_Section_Definition struct { + Entity + + // A count of + AttributeCount *uint `json:"attributeCount,omitempty" xmlrpc:"attributeCount,omitempty"` + + // no documentation yet + Attributes []Configuration_Template_Section_Definition_Attribute `json:"attributes,omitempty" xmlrpc:"attributes,omitempty"` + + // Created date + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + DefaultValue *Configuration_Template_Section_Definition_Value `json:"defaultValue,omitempty" xmlrpc:"defaultValue,omitempty"` + + // Description of a configuration definition. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Enumeration values separated by comma. + EnumerationValues *string `json:"enumerationValues,omitempty" xmlrpc:"enumerationValues,omitempty"` + + // no documentation yet + Group *Configuration_Template_Section_Definition_Group `json:"group,omitempty" xmlrpc:"group,omitempty"` + + // Definition group id. + GroupId *string `json:"groupId,omitempty" xmlrpc:"groupId,omitempty"` + + // Internal identifier of a configuration definition. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Maximum value of a configuration definition. + MaximumValue *string `json:"maximumValue,omitempty" xmlrpc:"maximumValue,omitempty"` + + // Minimum value of a configuration definition. + MinimumValue *string `json:"minimumValue,omitempty" xmlrpc:"minimumValue,omitempty"` + + // Last modify date + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // no documentation yet + MonitoringDataFlag *bool `json:"monitoringDataFlag,omitempty" xmlrpc:"monitoringDataFlag,omitempty"` + + // Configuration definition name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // Definition path. + Path *string `json:"path,omitempty" xmlrpc:"path,omitempty"` + + // Indicates if a configuration value is required for this definition. + RequireValueFlag *int `json:"requireValueFlag,omitempty" xmlrpc:"requireValueFlag,omitempty"` + + // no documentation yet + Section *Configuration_Template_Section `json:"section,omitempty" xmlrpc:"section,omitempty"` + + // Internal identifier of a configuration section. + SectionId *int `json:"sectionId,omitempty" xmlrpc:"sectionId,omitempty"` + + // Shortened configuration definition name. + ShortName *string `json:"shortName,omitempty" xmlrpc:"shortName,omitempty"` + + // Sort order + Sort *int `json:"sort,omitempty" xmlrpc:"sort,omitempty"` + + // Internal identifier of a configuration definition type. + TypeId *int `json:"typeId,omitempty" xmlrpc:"typeId,omitempty"` + + // no documentation yet + ValueType *Configuration_Template_Section_Definition_Type `json:"valueType,omitempty" xmlrpc:"valueType,omitempty"` +} + +// Configuration definition attribute class contains supplementary information for a configuration definition. +type Configuration_Template_Section_Definition_Attribute struct { + Entity + + // no documentation yet + AttributeType *Configuration_Template_Section_Definition_Attribute_Type `json:"attributeType,omitempty" xmlrpc:"attributeType,omitempty"` + + // no documentation yet + ConfigurationDefinition *Configuration_Template_Section_Definition `json:"configurationDefinition,omitempty" xmlrpc:"configurationDefinition,omitempty"` + + // Value of a configuration definition attribute + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// SoftLayer_Configuration_Template_Attribute_Type models the type of attribute that can be assigned to a configuration definition. +type Configuration_Template_Section_Definition_Attribute_Type struct { + Entity + + // Description of a definition attribute type + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Name of a definition attribute type + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// Configuration definition group gives you details of the definition and allows extra functionality. +// +// +type Configuration_Template_Section_Definition_Group struct { + Entity + + // Created date + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // Internal Description of a definition group. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Internal identifier of a definition group. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Internal Definition group name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // no documentation yet + Parent *Configuration_Template_Section_Definition_Group `json:"parent,omitempty" xmlrpc:"parent,omitempty"` + + // Sort order + SortOrder *int `json:"sortOrder,omitempty" xmlrpc:"sortOrder,omitempty"` +} + +// SoftLayer_Configuration_Template_Section_Definition_Type further defines the value of a configuration definition. +type Configuration_Template_Section_Definition_Type struct { + Entity + + // Description of a configuration value type + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Internal identifier of a configuration value type + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Name of a configuration value type + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// SoftLayer_Configuration_Section_Value is used to set the value for a configuration definition +type Configuration_Template_Section_Definition_Value struct { + Entity + + // Created date + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + Definition *Configuration_Template_Section_Definition `json:"definition,omitempty" xmlrpc:"definition,omitempty"` + + // Internal identifier of a configuration definition that this configuration value if defined by + DefinitionId *int `json:"definitionId,omitempty" xmlrpc:"definitionId,omitempty"` + + // Internal Last modified date + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // no documentation yet + Template *Configuration_Template `json:"template,omitempty" xmlrpc:"template,omitempty"` + + // Internal identifier of a configuration template that this configuration value belongs to + TemplateId *int `json:"templateId,omitempty" xmlrpc:"templateId,omitempty"` + + // Internal Configuration value + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// Some configuration templates let you create a unique configuration profiles. +// +// For example, you can create multiple configuration profiles to monitor multiple hard drives with "CPU/Memory/Disk Monitoring Agent". SoftLayer_Configuration_Template_Section_Profile help you keep track of custom configuration profiles. +type Configuration_Template_Section_Profile struct { + Entity + + // Internal identifier of a monitoring agent this profile belongs to. + AgentId *int `json:"agentId,omitempty" xmlrpc:"agentId,omitempty"` + + // no documentation yet + ConfigurationSection *Configuration_Template_Section `json:"configurationSection,omitempty" xmlrpc:"configurationSection,omitempty"` + + // Created date + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // Internal identifier of a configuration profile. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Name of a configuration profile + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // Internal identifier of a configuration section that this profile belongs to. + SectionId *int `json:"sectionId,omitempty" xmlrpc:"sectionId,omitempty"` +} + +// The SoftLayer_Configuration_Template_Section_Reference data type contains information of a configuration section and its associated configuration template. +type Configuration_Template_Section_Reference struct { + Entity + + // Created date + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // Internal identifier of a configuration section reference. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Modified date + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // no documentation yet + Section *Configuration_Template_Section `json:"section,omitempty" xmlrpc:"section,omitempty"` + + // Internal identifier of a configuration section. + SectionId *int `json:"sectionId,omitempty" xmlrpc:"sectionId,omitempty"` + + // no documentation yet + Template *Configuration_Template `json:"template,omitempty" xmlrpc:"template,omitempty"` + + // Internal identifier of a configuration template. + TemplateId *int `json:"templateId,omitempty" xmlrpc:"templateId,omitempty"` +} + +// The SoftLayer_Configuration_Template_Section_Type data type contains information of a configuration section type. +// +// Configuration can contain sub-sections. +type Configuration_Template_Section_Type struct { + Entity + + // Configuration section type description + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Internal identifier of a configuration section type + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Configuration section type name + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Configuration_Template_Type data type contains configuration template type information. +type Configuration_Template_Type struct { + Entity + + // Created date. This is deprecated now. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // Description of a configuration template + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Internal identifier of a configuration template type + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Name of a configuration template type + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/container.go b/vendor/github.com/softlayer/softlayer-go/datatypes/container.go new file mode 100644 index 00000000000..6e3d69ce4f5 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/container.go @@ -0,0 +1,6255 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// no documentation yet +type Container_Account_Authentication_OpenIdConnect_UsernameLookupContainer struct { + Entity + + // no documentation yet + Active *bool `json:"active,omitempty" xmlrpc:"active,omitempty"` + + // no documentation yet + EmailAddress *string `json:"emailAddress,omitempty" xmlrpc:"emailAddress,omitempty"` + + // no documentation yet + Federated *bool `json:"federated,omitempty" xmlrpc:"federated,omitempty"` + + // no documentation yet + FoundAs *string `json:"foundAs,omitempty" xmlrpc:"foundAs,omitempty"` + + // no documentation yet + NumberOfIbmIdsWithEmailAddress *int `json:"numberOfIbmIdsWithEmailAddress,omitempty" xmlrpc:"numberOfIbmIdsWithEmailAddress,omitempty"` + + // no documentation yet + Realm *string `json:"realm,omitempty" xmlrpc:"realm,omitempty"` + + // no documentation yet + UniqueId *string `json:"uniqueId,omitempty" xmlrpc:"uniqueId,omitempty"` + + // no documentation yet + Username *string `json:"username,omitempty" xmlrpc:"username,omitempty"` +} + +// SoftLayer_Container_Account_Discount_Program models a single outbound object for a graph of given data sets. +type Container_Account_Discount_Program struct { + Entity + + // The credit allowance that has already been applied during the current billing cycle. If the lifetime limit has been or soon will be reached, this amount may included credit applied in previous billing cycles. + AppliedCredit *Float64 `json:"appliedCredit,omitempty" xmlrpc:"appliedCredit,omitempty"` + + // Flag to signify whether the account is a participant in the discount program. + IsParticipant *bool `json:"isParticipant,omitempty" xmlrpc:"isParticipant,omitempty"` + + // Credit allowance applied over the course of the entire program enrollment. For enrollments without a lifetime restriction, this property will not be populated as credit will be tracked on a purely monthly basis. + LifetimeAppliedCredit *Float64 `json:"lifetimeAppliedCredit,omitempty" xmlrpc:"lifetimeAppliedCredit,omitempty"` + + // Credit allowance available over the course of the entire program enrollment. If null, enrollment credit is applied on a strictly monthly basis and there is no lifetime maximum. Enrollments with non-null lifetime credit will receive the lesser of the remaining monthly credit or the remaining lifetime credit. + LifetimeCredit *Float64 `json:"lifetimeCredit,omitempty" xmlrpc:"lifetimeCredit,omitempty"` + + // Remaining credit allowance available over the remaining duration of the program enrollment. If null, enrollment credit is applied on a strictly monthly basis and there is no lifetime maximum. Enrollments with non-null remaining lifetime credit will receive the lesser of the remaining monthly credit or the remaining lifetime credit. + LifetimeRemainingCredit *Float64 `json:"lifetimeRemainingCredit,omitempty" xmlrpc:"lifetimeRemainingCredit,omitempty"` + + // Maximum number of orders the enrolled account is allowed to have open at one time. If null, then the Flexible Credit Program does not impose an order limit. + MaximumActiveOrders *Float64 `json:"maximumActiveOrders,omitempty" xmlrpc:"maximumActiveOrders,omitempty"` + + // The monthly credit allowance that is available at the beginning of the billing cycle. + MonthlyCredit *Float64 `json:"monthlyCredit,omitempty" xmlrpc:"monthlyCredit,omitempty"` + + // DEPRECATED: Taxes are calculated in real time and discount amounts are shown pre-tax in all cases. Tax values in the SoftLayer_Container_Account_Discount_Program container are now populated with the related pre-tax values. + PostTaxRemainingCredit *Float64 `json:"postTaxRemainingCredit,omitempty" xmlrpc:"postTaxRemainingCredit,omitempty"` + + // The date at which the program expires in MM/DD/YYYY format. + ProgramEndDate *Time `json:"programEndDate,omitempty" xmlrpc:"programEndDate,omitempty"` + + // Name of the Flexible Credit Program the account is enrolled in. + ProgramName *string `json:"programName,omitempty" xmlrpc:"programName,omitempty"` + + // The credit allowance that is available during the current billing cycle. If the lifetime limit has been or soon will be reached, this amount may be reduced by credit applied in previous billing cycles. + RemainingCredit *Float64 `json:"remainingCredit,omitempty" xmlrpc:"remainingCredit,omitempty"` + + // DEPRECATED: Taxes are calculated in real time and discount amounts are shown pre-tax in all cases. Tax values in the SoftLayer_Container_Account_Discount_Program container are now populated with the related pre-tax values. + RemainingCreditTax *Float64 `json:"remainingCreditTax,omitempty" xmlrpc:"remainingCreditTax,omitempty"` +} + +// no documentation yet +type Container_Account_Discount_Program_Collection struct { + Entity + + // The amount of credit that has been used by all account level enrollments in the billing cycle. + AccountLevelAppliedCredit *Float64 `json:"accountLevelAppliedCredit,omitempty" xmlrpc:"accountLevelAppliedCredit,omitempty"` + + // Account level credit allowance applied over the course of entire active program enrollments. For enrollments without a lifetime restriction, this property will not be populated as credit will be tracked on a purely monthly basis. + AccountLevelLifetimeAppliedCredit *Float64 `json:"accountLevelLifetimeAppliedCredit,omitempty" xmlrpc:"accountLevelLifetimeAppliedCredit,omitempty"` + + // The total account level credit over the course of an entire program enrollment. This value may be null, in which case the enrollment credit is applied on a monthly basis and there is no lifetime maximum. + AccountLevelLifetimeCredit *Float64 `json:"accountLevelLifetimeCredit,omitempty" xmlrpc:"accountLevelLifetimeCredit,omitempty"` + + // Remaining account level credit allowance available over the remaining duration of the program enrollments. If null, enrollment credit is applied on a strictly monthly basis and there is no lifetime maximum. Enrollments with non-null remaining lifetime credit will receive the lesser of the remaining monthly credit or the remaining lifetime credit. + AccountLevelLifetimeRemainingCredit *Float64 `json:"accountLevelLifetimeRemainingCredit,omitempty" xmlrpc:"accountLevelLifetimeRemainingCredit,omitempty"` + + // The total account level monthly credit allowance available at the beginning of a billing cycle. + AccountLevelMonthlyCredit *Float64 `json:"accountLevelMonthlyCredit,omitempty" xmlrpc:"accountLevelMonthlyCredit,omitempty"` + + // The total account level credit allowance still available during the current billing cycle. + AccountLevelRemainingCredit *Float64 `json:"accountLevelRemainingCredit,omitempty" xmlrpc:"accountLevelRemainingCredit,omitempty"` + + // The active enrollments for this account. + Enrollments []FlexibleCredit_Enrollment `json:"enrollments,omitempty" xmlrpc:"enrollments,omitempty"` + + // Indicates whether or not the account is participating in any account level Flexible Credit programs. + IsAccountLevelParticipantFlag *bool `json:"isAccountLevelParticipantFlag,omitempty" xmlrpc:"isAccountLevelParticipantFlag,omitempty"` + + // Indicates whether or not the account is participating in any Flexible Credit programs. + IsParticipantFlag *bool `json:"isParticipantFlag,omitempty" xmlrpc:"isParticipantFlag,omitempty"` + + // Indicates whether or not the account is participating in any product specific level Flexible Credit programs. + IsProductSpecificParticipantFlag *bool `json:"isProductSpecificParticipantFlag,omitempty" xmlrpc:"isProductSpecificParticipantFlag,omitempty"` + + // The amount of credit that has been used by all product specific enrollments in the billing cycle. + ProductSpecificAppliedCredit *Float64 `json:"productSpecificAppliedCredit,omitempty" xmlrpc:"productSpecificAppliedCredit,omitempty"` + + // Product specific credit allowance applied over the course of entire active program enrollments. For enrollments without a lifetime restriction, this property will not be populated as credit will be tracked on a purely monthly basis. + ProductSpecificLifetimeAppliedCredit *Float64 `json:"productSpecificLifetimeAppliedCredit,omitempty" xmlrpc:"productSpecificLifetimeAppliedCredit,omitempty"` + + // The total product specific credit over the course of an entire program enrollment. This value may be null, in which case the enrollment credit is applied on a monthly basis and there is no lifetime maximum. + ProductSpecificLifetimeCredit *Float64 `json:"productSpecificLifetimeCredit,omitempty" xmlrpc:"productSpecificLifetimeCredit,omitempty"` + + // Remaining product specific level credit allowance available over the remaining duration of the program enrollments. If null, enrollment credit is applied on a strictly monthly basis and there is no lifetime maximum. Enrollments with non-null remaining lifetime credit will receive the lesser of the remaining monthly credit or the remaining lifetime credit. + ProductSpecificLifetimeRemainingCredit *Float64 `json:"productSpecificLifetimeRemainingCredit,omitempty" xmlrpc:"productSpecificLifetimeRemainingCredit,omitempty"` + + // The total product specific monthly credit allowance available at the beginning of a billing cycle. + ProductSpecificMonthlyCredit *Float64 `json:"productSpecificMonthlyCredit,omitempty" xmlrpc:"productSpecificMonthlyCredit,omitempty"` + + // The total product specific credit allowance still available during the current billing cycle. + ProductSpecificRemainingCredit *Float64 `json:"productSpecificRemainingCredit,omitempty" xmlrpc:"productSpecificRemainingCredit,omitempty"` + + // The credit allowance that has already been applied during the current billing cycle from all enrollments. If the lifetime limit has been or soon will be reached, this amount may included credit applied in previous billing cycles. + TotalAppliedCredit *Float64 `json:"totalAppliedCredit,omitempty" xmlrpc:"totalAppliedCredit,omitempty"` + + // The credit allowance that is available during the current billing cycle from all enrollments. If the lifetime limit has been or soon will be reached, this amount may be reduced by credit applied in previous billing cycles. + TotalRemainingCredit *Float64 `json:"totalRemainingCredit,omitempty" xmlrpc:"totalRemainingCredit,omitempty"` +} + +// no documentation yet +type Container_Account_External_Setup_ProvisioningHoldLifted struct { + Entity + + // no documentation yet + AdditionalAttributes *Container_Account_External_Setup_ProvisioningHoldLifted_Attributes `json:"additionalAttributes,omitempty" xmlrpc:"additionalAttributes,omitempty"` + + // no documentation yet + Code *string `json:"code,omitempty" xmlrpc:"code,omitempty"` + + // no documentation yet + Error *string `json:"error,omitempty" xmlrpc:"error,omitempty"` + + // no documentation yet + State *string `json:"state,omitempty" xmlrpc:"state,omitempty"` +} + +// no documentation yet +type Container_Account_External_Setup_ProvisioningHoldLifted_Attributes struct { + Entity + + // no documentation yet + BrandKeyName *string `json:"brandKeyName,omitempty" xmlrpc:"brandKeyName,omitempty"` + + // no documentation yet + SoftLayerBrandMoveDate *Time `json:"softLayerBrandMoveDate,omitempty" xmlrpc:"softLayerBrandMoveDate,omitempty"` +} + +// Models a single outbound object for a graph of given data sets. +type Container_Account_Graph_Outputs struct { + Entity + + // The count of closed tickets included in this graph. + ClosedTickets *string `json:"closedTickets,omitempty" xmlrpc:"closedTickets,omitempty"` + + // The count of completed backups included in this graph. + CompletedBackupCount *string `json:"completedBackupCount,omitempty" xmlrpc:"completedBackupCount,omitempty"` + + // The count of conflicted backups included in this graph. + ConflictBackupCount *string `json:"conflictBackupCount,omitempty" xmlrpc:"conflictBackupCount,omitempty"` + + // The maximum date included in this graph. + EndDate *Time `json:"endDate,omitempty" xmlrpc:"endDate,omitempty"` + + // The count of failed backups included in this graph. + FailedBackupCount *string `json:"failedBackupCount,omitempty" xmlrpc:"failedBackupCount,omitempty"` + + // Error message encountered during graphing + GraphError *string `json:"graphError,omitempty" xmlrpc:"graphError,omitempty"` + + // The raw PNG binary data to be displayed once the graph is drawn. + GraphImage *[]byte `json:"graphImage,omitempty" xmlrpc:"graphImage,omitempty"` + + // The average of hardware uptime included in this graph. + HardwareUptime *string `json:"hardwareUptime,omitempty" xmlrpc:"hardwareUptime,omitempty"` + + // The inbound bandwidth usage shown in this graph. + InboundUsage *string `json:"inboundUsage,omitempty" xmlrpc:"inboundUsage,omitempty"` + + // The count of open tickets included in this graph. + OpenTickets *string `json:"openTickets,omitempty" xmlrpc:"openTickets,omitempty"` + + // The outbound bandwidth usage shown in this graph. + OutboundUsage *string `json:"outboundUsage,omitempty" xmlrpc:"outboundUsage,omitempty"` + + // The count of tickets included in this graph. + PendingCustomerResponseTicketCount *string `json:"pendingCustomerResponseTicketCount,omitempty" xmlrpc:"pendingCustomerResponseTicketCount,omitempty"` + + // The minimum date included in this graph. + StartDate *Time `json:"startDate,omitempty" xmlrpc:"startDate,omitempty"` + + // The average of url uptime included in this graph. + UrlUptime *string `json:"urlUptime,omitempty" xmlrpc:"urlUptime,omitempty"` + + // The count of tickets included in this graph. + WaitingEmployeeResponseTicketCount *string `json:"waitingEmployeeResponseTicketCount,omitempty" xmlrpc:"waitingEmployeeResponseTicketCount,omitempty"` +} + +// Historical Summary Container for account resource details +type Container_Account_Historical_Summary struct { + Entity + + // Array of server uptime detail containers + Details []Container_Account_Historical_Summary_Detail `json:"details,omitempty" xmlrpc:"details,omitempty"` + + // The maximum date included in the summary. + EndDate *Time `json:"endDate,omitempty" xmlrpc:"endDate,omitempty"` + + // The minimum date included in the summary. + StartDate *Time `json:"startDate,omitempty" xmlrpc:"startDate,omitempty"` +} + +// Historical Summary Details Container for a resource's data +type Container_Account_Historical_Summary_Detail struct { + Entity + + // The maximum date included in the detail. + EndDate *Time `json:"endDate,omitempty" xmlrpc:"endDate,omitempty"` + + // The minimum date included in the detail. + StartDate *Time `json:"startDate,omitempty" xmlrpc:"startDate,omitempty"` +} + +// Historical Summary Details Container for a host resource uptime +type Container_Account_Historical_Summary_Detail_Uptime struct { + Container_Account_Historical_Summary_Detail + + // The hardware for uptime details. + CloudComputingInstance *Virtual_Guest `json:"cloudComputingInstance,omitempty" xmlrpc:"cloudComputingInstance,omitempty"` + + // The data associated with a host uptime details. + Data []Metric_Tracking_Object_Data `json:"data,omitempty" xmlrpc:"data,omitempty"` + + // The hardware for uptime details. + Hardware *Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` +} + +// Historical Summary Container for account host's resource uptime details +type Container_Account_Historical_Summary_Uptime struct { + Container_Account_Historical_Summary +} + +// Contains data required to both request a new IaaS account for active IBM employees and review pending requests. Fields used exclusively in the review process are scrubbed of user input. +type Container_Account_Internal_Ibm_Request struct { + Entity + + // Purpose of the internal IBM account chosen from the list of available + AccountType *string `json:"accountType,omitempty" xmlrpc:"accountType,omitempty"` + + // If not provided, will attempt to retrieve from BluePages + Address1 *string `json:"address1,omitempty" xmlrpc:"address1,omitempty"` + + // If no address provided, will attempt to retrieve from BluePages + Address2 *string `json:"address2,omitempty" xmlrpc:"address2,omitempty"` + + // If not provided, will attempt to retrieve from BluePages + City *string `json:"city,omitempty" xmlrpc:"city,omitempty"` + + // Name of the company displayed on the IaaS account + CompanyName *string `json:"companyName,omitempty" xmlrpc:"companyName,omitempty"` + + // If not provided, will attempt to retrieve from BluePages + Country *string `json:"country,omitempty" xmlrpc:"country,omitempty"` + + // True if the request has been denied by either the IaaS team or the + DeniedFlag *bool `json:"deniedFlag,omitempty" xmlrpc:"deniedFlag,omitempty"` + + // Department within the division which will be changed during cost recovery. + DepartmentCode *string `json:"departmentCode,omitempty" xmlrpc:"departmentCode,omitempty"` + + // Country assigned to the department for cost recovery. + DepartmentCountry *string `json:"departmentCountry,omitempty" xmlrpc:"departmentCountry,omitempty"` + + // Division code used for cost recovery. + DivisionCode *string `json:"divisionCode,omitempty" xmlrpc:"divisionCode,omitempty"` + + // Account owner's IBM email address. Must be a discoverable email + EmailAddress *string `json:"emailAddress,omitempty" xmlrpc:"emailAddress,omitempty"` + + // Applicant's first name, as provided by IBM BluePages API. + FirstName *string `json:"firstName,omitempty" xmlrpc:"firstName,omitempty"` + + // Applicant's last name, as provided by IBM BluePages API. + LastName *string `json:"lastName,omitempty" xmlrpc:"lastName,omitempty"` + + // APPROVED if the request has been approved by the first-line manager, + ManagerApprovalStatus *string `json:"managerApprovalStatus,omitempty" xmlrpc:"managerApprovalStatus,omitempty"` + + // True for accounts intended to be multi-tenant and false otherwise + MultiTenantFlag *bool `json:"multiTenantFlag,omitempty" xmlrpc:"multiTenantFlag,omitempty"` + + // Account owner's primary phone number. If no phone number is available + OfficePhone *string `json:"officePhone,omitempty" xmlrpc:"officePhone,omitempty"` + + // Bluemix PaaS 32 digit hexadecimal account id being automatically linked + PaasAccountId *string `json:"paasAccountId,omitempty" xmlrpc:"paasAccountId,omitempty"` + + // If not provided, will attempt to retrieve from BluePages + PostalCode *string `json:"postalCode,omitempty" xmlrpc:"postalCode,omitempty"` + + // Stated purpose of the new account this request would create + Purpose *string `json:"purpose,omitempty" xmlrpc:"purpose,omitempty"` + + // Division's security SME's email address, if available + SecuritySubjectMatterExpertEmail *string `json:"securitySubjectMatterExpertEmail,omitempty" xmlrpc:"securitySubjectMatterExpertEmail,omitempty"` + + // Division's security SME's name, if available + SecuritySubjectMatterExpertName *string `json:"securitySubjectMatterExpertName,omitempty" xmlrpc:"securitySubjectMatterExpertName,omitempty"` + + // Division's security SME's phone, if available + SecuritySubjectMatterExpertPhone *string `json:"securitySubjectMatterExpertPhone,omitempty" xmlrpc:"securitySubjectMatterExpertPhone,omitempty"` + + // If required for chosen country and not provided, will attempt + State *string `json:"state,omitempty" xmlrpc:"state,omitempty"` +} + +// no documentation yet +type Container_Account_Payment_Method_CreditCard struct { + Entity + + // no documentation yet + Address1 *string `json:"address1,omitempty" xmlrpc:"address1,omitempty"` + + // no documentation yet + Address2 *string `json:"address2,omitempty" xmlrpc:"address2,omitempty"` + + // no documentation yet + City *string `json:"city,omitempty" xmlrpc:"city,omitempty"` + + // no documentation yet + Country *string `json:"country,omitempty" xmlrpc:"country,omitempty"` + + // no documentation yet + CurrencyShortName *string `json:"currencyShortName,omitempty" xmlrpc:"currencyShortName,omitempty"` + + // no documentation yet + CybersourceAssignedCardType *string `json:"cybersourceAssignedCardType,omitempty" xmlrpc:"cybersourceAssignedCardType,omitempty"` + + // no documentation yet + ExpireMonth *string `json:"expireMonth,omitempty" xmlrpc:"expireMonth,omitempty"` + + // no documentation yet + ExpireYear *string `json:"expireYear,omitempty" xmlrpc:"expireYear,omitempty"` + + // no documentation yet + FirstName *string `json:"firstName,omitempty" xmlrpc:"firstName,omitempty"` + + // no documentation yet + LastFourDigits *string `json:"lastFourDigits,omitempty" xmlrpc:"lastFourDigits,omitempty"` + + // no documentation yet + LastName *string `json:"lastName,omitempty" xmlrpc:"lastName,omitempty"` + + // no documentation yet + Nickname *string `json:"nickname,omitempty" xmlrpc:"nickname,omitempty"` + + // no documentation yet + PaymentMethodRoleName *string `json:"paymentMethodRoleName,omitempty" xmlrpc:"paymentMethodRoleName,omitempty"` + + // no documentation yet + PaymentTypeId *string `json:"paymentTypeId,omitempty" xmlrpc:"paymentTypeId,omitempty"` + + // no documentation yet + PaymentTypeName *string `json:"paymentTypeName,omitempty" xmlrpc:"paymentTypeName,omitempty"` + + // no documentation yet + PostalCode *string `json:"postalCode,omitempty" xmlrpc:"postalCode,omitempty"` + + // no documentation yet + State *string `json:"state,omitempty" xmlrpc:"state,omitempty"` +} + +// no documentation yet +type Container_Account_PersonalInformation struct { + Entity + + // no documentation yet + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // no documentation yet + Address1 *string `json:"address1,omitempty" xmlrpc:"address1,omitempty"` + + // no documentation yet + Address2 *string `json:"address2,omitempty" xmlrpc:"address2,omitempty"` + + // no documentation yet + AlternatePhone *string `json:"alternatePhone,omitempty" xmlrpc:"alternatePhone,omitempty"` + + // no documentation yet + City *string `json:"city,omitempty" xmlrpc:"city,omitempty"` + + // no documentation yet + Country *string `json:"country,omitempty" xmlrpc:"country,omitempty"` + + // no documentation yet + Email *string `json:"email,omitempty" xmlrpc:"email,omitempty"` + + // no documentation yet + FirstName *string `json:"firstName,omitempty" xmlrpc:"firstName,omitempty"` + + // no documentation yet + LastName *string `json:"lastName,omitempty" xmlrpc:"lastName,omitempty"` + + // no documentation yet + OfficePhone *string `json:"officePhone,omitempty" xmlrpc:"officePhone,omitempty"` + + // no documentation yet + PostalCode *string `json:"postalCode,omitempty" xmlrpc:"postalCode,omitempty"` + + // no documentation yet + RequestDate *Time `json:"requestDate,omitempty" xmlrpc:"requestDate,omitempty"` + + // no documentation yet + RequestId *int `json:"requestId,omitempty" xmlrpc:"requestId,omitempty"` + + // no documentation yet + State *string `json:"state,omitempty" xmlrpc:"state,omitempty"` +} + +// The customer and prospective owner of a proof of concept account established by an IBMer. +type Container_Account_ProofOfConcept_Contact_Customer struct { + Entity + + // Customer's address + Address1 *string `json:"address1,omitempty" xmlrpc:"address1,omitempty"` + + // Customer's address + Address2 *string `json:"address2,omitempty" xmlrpc:"address2,omitempty"` + + // Customer's city + City *string `json:"city,omitempty" xmlrpc:"city,omitempty"` + + // Customer's ISO country code + Country *string `json:"country,omitempty" xmlrpc:"country,omitempty"` + + // Customer's email address + Email *string `json:"email,omitempty" xmlrpc:"email,omitempty"` + + // Customer's first name + FirstName *string `json:"firstName,omitempty" xmlrpc:"firstName,omitempty"` + + // Customer's last name + LastName *string `json:"lastName,omitempty" xmlrpc:"lastName,omitempty"` + + // Customer's primary phone number + Phone *string `json:"phone,omitempty" xmlrpc:"phone,omitempty"` + + // Customer's postal code + PostalCode *string `json:"postalCode,omitempty" xmlrpc:"postalCode,omitempty"` + + // Customer's state + State *string `json:"state,omitempty" xmlrpc:"state,omitempty"` + + // Customer's VAT ID + VatId *string `json:"vatId,omitempty" xmlrpc:"vatId,omitempty"` +} + +// IBMer who is submitting a proof of concept request on behalf of a prospective customer. +type Container_Account_ProofOfConcept_Contact_Ibmer_Requester struct { + Entity + + // Customer's address + Address1 *string `json:"address1,omitempty" xmlrpc:"address1,omitempty"` + + // Customer's address + Address2 *string `json:"address2,omitempty" xmlrpc:"address2,omitempty"` + + // no documentation yet + BusinessUnit *string `json:"businessUnit,omitempty" xmlrpc:"businessUnit,omitempty"` + + // Customer's city + City *string `json:"city,omitempty" xmlrpc:"city,omitempty"` + + // Customer's ISO country code + Country *string `json:"country,omitempty" xmlrpc:"country,omitempty"` + + // Customer's email address + Email *string `json:"email,omitempty" xmlrpc:"email,omitempty"` + + // Customer's first name + FirstName *string `json:"firstName,omitempty" xmlrpc:"firstName,omitempty"` + + // Customer's last name + LastName *string `json:"lastName,omitempty" xmlrpc:"lastName,omitempty"` + + // no documentation yet + OrganizationCountry *string `json:"organizationCountry,omitempty" xmlrpc:"organizationCountry,omitempty"` + + // no documentation yet + PaasAccountId *string `json:"paasAccountId,omitempty" xmlrpc:"paasAccountId,omitempty"` + + // Customer's primary phone number + Phone *string `json:"phone,omitempty" xmlrpc:"phone,omitempty"` + + // Customer's postal code + PostalCode *string `json:"postalCode,omitempty" xmlrpc:"postalCode,omitempty"` + + // Customer's state + State *string `json:"state,omitempty" xmlrpc:"state,omitempty"` + + // no documentation yet + SubOrganization *string `json:"subOrganization,omitempty" xmlrpc:"subOrganization,omitempty"` + + // no documentation yet + Uid *string `json:"uid,omitempty" xmlrpc:"uid,omitempty"` + + // Customer's VAT ID + VatId *string `json:"vatId,omitempty" xmlrpc:"vatId,omitempty"` +} + +// IBMer who will assist the requester with technical aspects of configuring the proof of concept account. +type Container_Account_ProofOfConcept_Contact_Ibmer_Technical struct { + Entity + + // Customer's address + Address1 *string `json:"address1,omitempty" xmlrpc:"address1,omitempty"` + + // Customer's address + Address2 *string `json:"address2,omitempty" xmlrpc:"address2,omitempty"` + + // Customer's city + City *string `json:"city,omitempty" xmlrpc:"city,omitempty"` + + // Customer's ISO country code + Country *string `json:"country,omitempty" xmlrpc:"country,omitempty"` + + // Customer's email address + Email *string `json:"email,omitempty" xmlrpc:"email,omitempty"` + + // Customer's first name + FirstName *string `json:"firstName,omitempty" xmlrpc:"firstName,omitempty"` + + // Customer's last name + LastName *string `json:"lastName,omitempty" xmlrpc:"lastName,omitempty"` + + // Customer's primary phone number + Phone *string `json:"phone,omitempty" xmlrpc:"phone,omitempty"` + + // Customer's postal code + PostalCode *string `json:"postalCode,omitempty" xmlrpc:"postalCode,omitempty"` + + // Customer's state + State *string `json:"state,omitempty" xmlrpc:"state,omitempty"` + + // no documentation yet + Uid *string `json:"uid,omitempty" xmlrpc:"uid,omitempty"` + + // Customer's VAT ID + VatId *string `json:"vatId,omitempty" xmlrpc:"vatId,omitempty"` +} + +// Proof of concept request using the account team funding model. Note that proof of concept account request are available only to internal IBM employees. +type Container_Account_ProofOfConcept_Request_AccountFunded struct { + Container_Account_ProofOfConcept_Request_GlobalFunded + + // Billing codes for the department paying for the proof of concept account + CostRecoveryRequest *Container_Account_ProofOfConcept_Request_CostRecovery `json:"costRecoveryRequest,omitempty" xmlrpc:"costRecoveryRequest,omitempty"` +} + +// Funding codes for the department paying for the proof of concept account. +type Container_Account_ProofOfConcept_Request_CostRecovery struct { + Entity + + // Internal billing system country code + CountryCode *string `json:"countryCode,omitempty" xmlrpc:"countryCode,omitempty"` + + // Customer's Internal billing system department code + DepartmentCode *string `json:"departmentCode,omitempty" xmlrpc:"departmentCode,omitempty"` + + // Internal billing system division code + DivisionCode *string `json:"divisionCode,omitempty" xmlrpc:"divisionCode,omitempty"` +} + +// Proof of concept request using the global funding model. Note that proof of concept account request are available only to internal IBM employees. +type Container_Account_ProofOfConcept_Request_GlobalFunded struct { + Entity + + // Dollar amount of funding requested for the proof of concept period + Amount *Float64 `json:"amount,omitempty" xmlrpc:"amount,omitempty"` + + // Customer intended to take over ownership and and billing of the account + Customer *Container_Account_ProofOfConcept_Contact_Customer `json:"customer,omitempty" xmlrpc:"customer,omitempty"` + + // Explanation of the purpose of the proof of concept request + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // End date for the proof of concept period + EndDate *Time `json:"endDate,omitempty" xmlrpc:"endDate,omitempty"` + + // Internal opportunity system details + Opportunity *Container_Account_ProofOfConcept_Request_Opportunity `json:"opportunity,omitempty" xmlrpc:"opportunity,omitempty"` + + // Name of the project or company and will become the account companyName + ProjectName *string `json:"projectName,omitempty" xmlrpc:"projectName,omitempty"` + + // IBM region responsible for overseeing the proof of concept account + RegionKeyName *string `json:"regionKeyName,omitempty" xmlrpc:"regionKeyName,omitempty"` + + // IBMer requesting the proof of concept account + Requester *Container_Account_ProofOfConcept_Contact_Ibmer_Requester `json:"requester,omitempty" xmlrpc:"requester,omitempty"` + + // Start date for the proof of concept period + StartDate *Time `json:"startDate,omitempty" xmlrpc:"startDate,omitempty"` + + // IBMer assisting with technical aspects of account configuration + TechnicalContact *Container_Account_ProofOfConcept_Contact_Ibmer_Technical `json:"technicalContact,omitempty" xmlrpc:"technicalContact,omitempty"` +} + +// Internal IBM opportunity codes required when applying for a Proof of Concept account. +type Container_Account_ProofOfConcept_Request_Opportunity struct { + Entity + + // The campaign or promotion code for this request, provided by Sales. + CampaignCode *string `json:"campaignCode,omitempty" xmlrpc:"campaignCode,omitempty"` + + // Expected monthly revenue. + MonthlyRecurringRevenue *Float64 `json:"monthlyRecurringRevenue,omitempty" xmlrpc:"monthlyRecurringRevenue,omitempty"` + + // Internal system identifier. + OpportunityNumber *string `json:"opportunityNumber,omitempty" xmlrpc:"opportunityNumber,omitempty"` + + // Expected overall contract value. + TotalContractValue *Float64 `json:"totalContractValue,omitempty" xmlrpc:"totalContractValue,omitempty"` +} + +// Full details presented to reviewers when determining whether or not to accept a proof of concept request. Note that reviewers are internal IBM employees and reviews are not exposed to external users. +type Container_Account_ProofOfConcept_Review struct { + Entity + + // Type of brand the account will use + AccountType *string `json:"accountType,omitempty" xmlrpc:"accountType,omitempty"` + + // Internal billing codes + CostRecoveryCodes *Container_Account_ProofOfConcept_Request_CostRecovery `json:"costRecoveryCodes,omitempty" xmlrpc:"costRecoveryCodes,omitempty"` + + // Customer intended to take over billing after the proof of concept period + Customer *Container_Account_ProofOfConcept_Contact_Customer `json:"customer,omitempty" xmlrpc:"customer,omitempty"` + + // Describes the purpose and rationale of the request + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Expected end date of the proof of concept period + EndDate *Time `json:"endDate,omitempty" xmlrpc:"endDate,omitempty"` + + // Dollar amount of funding requested + FundingAmount *Float64 `json:"fundingAmount,omitempty" xmlrpc:"fundingAmount,omitempty"` + + // Funding option chosen for the request + FundingType *string `json:"fundingType,omitempty" xmlrpc:"fundingType,omitempty"` + + // System id of the request + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Name of the integrated offering team lead reviewing the request + IotLeadName *string `json:"iotLeadName,omitempty" xmlrpc:"iotLeadName,omitempty"` + + // Name of the integrated offering team region + IotRegionName *string `json:"iotRegionName,omitempty" xmlrpc:"iotRegionName,omitempty"` + + // Name of requesting IBMer's manager + ManagerName *string `json:"managerName,omitempty" xmlrpc:"managerName,omitempty"` + + // Internal opportunity tracking information + Opportunity *Container_Account_ProofOfConcept_Request_Opportunity `json:"opportunity,omitempty" xmlrpc:"opportunity,omitempty"` + + // Project name chosen by the requesting IBMer + ProjectName *string `json:"projectName,omitempty" xmlrpc:"projectName,omitempty"` + + // IBMer requesting the account on behalf of a customer + Requester *Container_Account_ProofOfConcept_Contact_Ibmer_Requester `json:"requester,omitempty" xmlrpc:"requester,omitempty"` + + // Summary of request's review activity + ReviewHistory *Container_Account_ProofOfConcept_Review_History `json:"reviewHistory,omitempty" xmlrpc:"reviewHistory,omitempty"` + + // URL for the individual review + ReviewUrl *string `json:"reviewUrl,omitempty" xmlrpc:"reviewUrl,omitempty"` + + // Expected start date of the proof of concept period + StartDate *Time `json:"startDate,omitempty" xmlrpc:"startDate,omitempty"` + + // Additional IBMer responsible for configuring the cloud capabilities + TechnicalContact *Container_Account_ProofOfConcept_Contact_Ibmer_Technical `json:"technicalContact,omitempty" xmlrpc:"technicalContact,omitempty"` +} + +// Review event within proof of concept request review period. +type Container_Account_ProofOfConcept_Review_Event struct { + Entity + + // Explanation of the event. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Reviewer's email address. + ReviewerEmail *string `json:"reviewerEmail,omitempty" xmlrpc:"reviewerEmail,omitempty"` + + // Reviewer's BluePages UID. + ReviewerUid *string `json:"reviewerUid,omitempty" xmlrpc:"reviewerUid,omitempty"` +} + +// Summary of review activity for a proof of concept request. +type Container_Account_ProofOfConcept_Review_History struct { + Entity + + // True for approved requests associated with a new account and false otherwise. + AccountCreatedFlag *bool `json:"accountCreatedFlag,omitempty" xmlrpc:"accountCreatedFlag,omitempty"` + + // True for denied requests and false otherwise. + DeniedFlag *bool `json:"deniedFlag,omitempty" xmlrpc:"deniedFlag,omitempty"` + + // List of events occurring during the review. + Events []Container_Account_ProofOfConcept_Review_Event `json:"events,omitempty" xmlrpc:"events,omitempty"` + + // True for fully reviewed requests and false otherwise. + ReviewCompleteFlag *bool `json:"reviewCompleteFlag,omitempty" xmlrpc:"reviewCompleteFlag,omitempty"` +} + +// Summary presented to reviewers when determining whether or not to accept a proof of concept request. Note that reviewers are internal IBM employees and reviews are not exposed to external users. +type Container_Account_ProofOfConcept_Review_Summary struct { + Entity + + // Account's companyName + AccountName *string `json:"accountName,omitempty" xmlrpc:"accountName,omitempty"` + + // Current account owner + AccountOwnerName *string `json:"accountOwnerName,omitempty" xmlrpc:"accountOwnerName,omitempty"` + + // Dollar amount requested + Amount *Float64 `json:"amount,omitempty" xmlrpc:"amount,omitempty"` + + // Date the request was submitted + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // Email of the customer receiving the proof of concept account + CustomerEmail *string `json:"customerEmail,omitempty" xmlrpc:"customerEmail,omitempty"` + + // Name of the customer receiving the proof of concept account + CustomerName *string `json:"customerName,omitempty" xmlrpc:"customerName,omitempty"` + + // Request record's id + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Date of the last state change on the request + LastUpdate *Time `json:"lastUpdate,omitempty" xmlrpc:"lastUpdate,omitempty"` + + // Email address of the reviewer, if any, currently reviewing the request + NextApproverEmail *string `json:"nextApproverEmail,omitempty" xmlrpc:"nextApproverEmail,omitempty"` + + // Email address of the requester + RequesterEmail *string `json:"requesterEmail,omitempty" xmlrpc:"requesterEmail,omitempty"` + + // Requesting IBMer's full name + RequesterName *string `json:"requesterName,omitempty" xmlrpc:"requesterName,omitempty"` + + // URL for the individual review + ReviewUrl *string `json:"reviewUrl,omitempty" xmlrpc:"reviewUrl,omitempty"` + + // Request's current status (Pending, Denied, or Approved) + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` +} + +// Contains data related to an account after editing its information. +type Container_Account_Update_Response struct { + Entity + + // Whether or not the update was accepted and applied. + AcceptedFlag *bool `json:"acceptedFlag,omitempty" xmlrpc:"acceptedFlag,omitempty"` + + // The updated SoftLayer_Account. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // If a manual review is required, this will be populated with the SoftLayer_Ticket for that review. + Ticket *Ticket `json:"ticket,omitempty" xmlrpc:"ticket,omitempty"` +} + +// The SoftLayer_Container_Authentication_Request_Common data type contains common information for requests to the getPortalLogin API. This is an abstract class that serves as a base that more specialized classes will derive from. For example, a request class specific to SoftLayer Native IMS Login (username and password). +type Container_Authentication_Request_Common struct { + Container_Authentication_Request_Contract + + // The answer to your security question. + SecurityQuestionAnswer *string `json:"securityQuestionAnswer,omitempty" xmlrpc:"securityQuestionAnswer,omitempty"` + + // A security question you wish to answer when authenticating to the SoftLayer customer portal. This parameter isn't required if no security questions are set on your portal account or if your account is configured to not require answering a security account upon login. + SecurityQuestionId *int `json:"securityQuestionId,omitempty" xmlrpc:"securityQuestionId,omitempty"` +} + +// The SoftLayer_Container_Authentication_Request_Contract provides a common set of operations for implementing classes. +type Container_Authentication_Request_Contract struct { + Entity +} + +// The SoftLayer_Container_Authentication_Request_Native data type contains information for requests to the getPortalLogin API. This class is specific to the SoftLayer Native login (username/password). The request information will be verified to ensure it is valid, and then there will be an attempt to obtain a portal login token in authenticating the user with the provided information. +type Container_Authentication_Request_Native struct { + Container_Authentication_Request_Common + + // no documentation yet + AuxiliaryClaimsMiniToken *string `json:"auxiliaryClaimsMiniToken,omitempty" xmlrpc:"auxiliaryClaimsMiniToken,omitempty"` + + // Your SoftLayer customer portal user's portal password. + Password *string `json:"password,omitempty" xmlrpc:"password,omitempty"` + + // The username you wish to authenticate to the SoftLayer customer portal with. + Username *string `json:"username,omitempty" xmlrpc:"username,omitempty"` +} + +// The SoftLayer_Container_Authentication_Request_Native_External data type contains information for requests to the getPortalLogin API. This class serves as a base class for more specialized external authentication classes to the SoftLayer Native login (username/password). +type Container_Authentication_Request_Native_External struct { + Container_Authentication_Request_Native +} + +// The SoftLayer_Container_Authentication_Request_Native_External_Totp data type contains information for requests to the getPortalLogin API. This class provides information to allow the user to submit a request to the native SoftLayer (username/password) login service for a portal login token, as well as submitting a request to the TOTP 2 factor authentication service. +type Container_Authentication_Request_Native_External_Totp struct { + Container_Authentication_Request_Native_External + + // no documentation yet + SecondSecurityCode *string `json:"secondSecurityCode,omitempty" xmlrpc:"secondSecurityCode,omitempty"` + + // no documentation yet + SecurityCode *string `json:"securityCode,omitempty" xmlrpc:"securityCode,omitempty"` + + // no documentation yet + Vendor *string `json:"vendor,omitempty" xmlrpc:"vendor,omitempty"` +} + +// The SoftLayer_Container_Authentication_Request_Native_External_Verisign data type contains information for requests to the getPortalLogin API. This class provides information to allow the user to submit a request to the native SoftLayer (username/password) login service for a portal login token, as well as submitting a request to the Verisign 2 factor authentication service. +type Container_Authentication_Request_Native_External_Verisign struct { + Container_Authentication_Request_Native_External + + // no documentation yet + SecondSecurityCode *string `json:"secondSecurityCode,omitempty" xmlrpc:"secondSecurityCode,omitempty"` + + // no documentation yet + SecurityCode *string `json:"securityCode,omitempty" xmlrpc:"securityCode,omitempty"` + + // no documentation yet + Vendor *string `json:"vendor,omitempty" xmlrpc:"vendor,omitempty"` +} + +// The SoftLayer_Container_Authentication_Request_OpenIdConnect data type contains information for requests to the getPortalLogin API. This class is specific to the SoftLayer Cloud Token login. The request information will be verified to ensure it is valid, and then there will be an attempt to obtain a portal login token in authenticating the user with the provided information. +type Container_Authentication_Request_OpenIdConnect struct { + Container_Authentication_Request_Common + + // no documentation yet + OpenIdConnectAccessToken *string `json:"openIdConnectAccessToken,omitempty" xmlrpc:"openIdConnectAccessToken,omitempty"` + + // no documentation yet + OpenIdConnectAccountId *int `json:"openIdConnectAccountId,omitempty" xmlrpc:"openIdConnectAccountId,omitempty"` + + // no documentation yet + OpenIdConnectProvider *string `json:"openIdConnectProvider,omitempty" xmlrpc:"openIdConnectProvider,omitempty"` +} + +// The SoftLayer_Container_Authentication_Request_OpenIdConnect_External data type contains information for requests to the getPortalLogin API. This class serves as a base class for more specialized external authentication classes to the SoftLayer OpenIdConnect login service. +type Container_Authentication_Request_OpenIdConnect_External struct { + Container_Authentication_Request_OpenIdConnect +} + +// The SoftLayer_Container_Authentication_Request_OpenIdConnect_External_Totp data type contains information for requests to the getPortalLogin API. This class provides information to allow the user to submit a request to the SoftLayer OpenIdConnect (token) login service for a portal login token, as well as submitting a request to the TOTP 2 factor authentication service. +type Container_Authentication_Request_OpenIdConnect_External_Totp struct { + Container_Authentication_Request_OpenIdConnect_External + + // no documentation yet + SecondSecurityCode *string `json:"secondSecurityCode,omitempty" xmlrpc:"secondSecurityCode,omitempty"` + + // no documentation yet + SecurityCode *string `json:"securityCode,omitempty" xmlrpc:"securityCode,omitempty"` + + // no documentation yet + Vendor *string `json:"vendor,omitempty" xmlrpc:"vendor,omitempty"` +} + +// The SoftLayer_Container_Authentication_Request_OpenIdConnect_External_Verisign data type contains information for requests to the getPortalLogin API. This class provides information to allow the user to submit a request to the SoftLayer OpenIdConnect (token) login service for a portal login token, as well as submitting a request to the Verisign 2 factor authentication service. +type Container_Authentication_Request_OpenIdConnect_External_Verisign struct { + Container_Authentication_Request_OpenIdConnect_External + + // no documentation yet + SecondSecurityCode *string `json:"secondSecurityCode,omitempty" xmlrpc:"secondSecurityCode,omitempty"` + + // no documentation yet + SecurityCode *int `json:"securityCode,omitempty" xmlrpc:"securityCode,omitempty"` + + // no documentation yet + Vendor *string `json:"vendor,omitempty" xmlrpc:"vendor,omitempty"` +} + +// The SoftLayer_Container_Authentication_Response_2FactorAuthenticationNeeded data type contains information for specific responses from the getPortalLogin API. This class is indicative of a request that is missing the appropriate 2FA information. +type Container_Authentication_Response_2FactorAuthenticationNeeded struct { + Container_Authentication_Response_Common + + // no documentation yet + AdditionalData *Container_Authentication_Response_Common `json:"additionalData,omitempty" xmlrpc:"additionalData,omitempty"` + + // no documentation yet + StatusKeyName *string `json:"statusKeyName,omitempty" xmlrpc:"statusKeyName,omitempty"` +} + +// The SoftLayer_Container_Authentication_Response_Account data type contains account information for responses from the getPortalLogin API. +type Container_Authentication_Response_Account struct { + Entity + + // no documentation yet + AccountCompanyName *string `json:"accountCompanyName,omitempty" xmlrpc:"accountCompanyName,omitempty"` + + // no documentation yet + AccountCountry *string `json:"accountCountry,omitempty" xmlrpc:"accountCountry,omitempty"` + + // no documentation yet + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // no documentation yet + AccountStatusName *string `json:"accountStatusName,omitempty" xmlrpc:"accountStatusName,omitempty"` + + // no documentation yet + BluemixAccountId *string `json:"bluemixAccountId,omitempty" xmlrpc:"bluemixAccountId,omitempty"` + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + DefaultAccount *bool `json:"defaultAccount,omitempty" xmlrpc:"defaultAccount,omitempty"` + + // no documentation yet + IpAddressCheckRequired *bool `json:"ipAddressCheckRequired,omitempty" xmlrpc:"ipAddressCheckRequired,omitempty"` + + // no documentation yet + IsMasterUserFlag *bool `json:"isMasterUserFlag,omitempty" xmlrpc:"isMasterUserFlag,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // no documentation yet + PhoneFactorExternalAuthenticationRequired *bool `json:"phoneFactorExternalAuthenticationRequired,omitempty" xmlrpc:"phoneFactorExternalAuthenticationRequired,omitempty"` + + // no documentation yet + SecurityQuestionRequired *bool `json:"securityQuestionRequired,omitempty" xmlrpc:"securityQuestionRequired,omitempty"` + + // no documentation yet + TotpExternalAuthenticationRequired *bool `json:"totpExternalAuthenticationRequired,omitempty" xmlrpc:"totpExternalAuthenticationRequired,omitempty"` + + // no documentation yet + UserId *int `json:"userId,omitempty" xmlrpc:"userId,omitempty"` + + // no documentation yet + VerisignExternalAuthenticationRequired *bool `json:"verisignExternalAuthenticationRequired,omitempty" xmlrpc:"verisignExternalAuthenticationRequired,omitempty"` +} + +// The SoftLayer_Container_Authentication_Response_AccountIdMissing data type contains information for specific responses from the getPortalLogin API. This class is indicative of a request that is missing the account id. +type Container_Authentication_Response_AccountIdMissing struct { + Container_Authentication_Response_Common + + // no documentation yet + StatusKeyName *string `json:"statusKeyName,omitempty" xmlrpc:"statusKeyName,omitempty"` +} + +// The SoftLayer_Container_Authentication_Response_Common data type contains common information for responses from the getPortalLogin API. This is an abstract class that serves as a base that more specialized classes will derive from. For example, a response class that is specific to a successful response from the getPortalLogin API. +type Container_Authentication_Response_Common struct { + Entity + + // The list of linked accounts for the authenticated SoftLayer customer portal user. + Accounts []Container_Authentication_Response_Account `json:"accounts,omitempty" xmlrpc:"accounts,omitempty"` +} + +// The SoftLayer_Container_Authentication_Response_IpAddressRestrictionCheckNeeded data type indicates that the caller (IAM presumably) needs to do an IP address check of the logging-in user against the restricted IP list kept in BSS. We don't know the IP address of the user here (only IAM does) so we return an indicator of which user matched the username and expect IAM to come back with another login call that will include a mini-JWT token that contains an assertion that the IP address was checked. +type Container_Authentication_Response_IpAddressRestrictionCheckNeeded struct { + Container_Authentication_Response_Common + + // no documentation yet + StatusKeyName *string `json:"statusKeyName,omitempty" xmlrpc:"statusKeyName,omitempty"` +} + +// The SoftLayer_Container_Authentication_Response_LOGIN_FAILED data type contains information for specific responses from the getPortalLogin API. This class is indicative of a request where there was an inability to login based on the information that was provided. +type Container_Authentication_Response_LoginFailed struct { + Container_Authentication_Response_Common + + // no documentation yet + ErrorMessage *string `json:"errorMessage,omitempty" xmlrpc:"errorMessage,omitempty"` + + // no documentation yet + StatusKeyName *string `json:"statusKeyName,omitempty" xmlrpc:"statusKeyName,omitempty"` +} + +// The SoftLayer_Container_Authentication_Response_SUCCESS data type contains information for specific responses from the getPortalLogin API. This class is indicative of a request that was successful in obtaining a portal login token from the getPortalLogin API. +type Container_Authentication_Response_Success struct { + Container_Authentication_Response_Common + + // no documentation yet + StatusKeyName *string `json:"statusKeyName,omitempty" xmlrpc:"statusKeyName,omitempty"` + + // The token for interacting with the SoftLayer customer portal. + Token *Container_User_Authentication_Token `json:"token,omitempty" xmlrpc:"token,omitempty"` +} + +// The SoftLayer_Container_Auxiliary_Network_Status_Reading data type contains information relating to an object being monitored from outside the SoftLayer network. It is primarily used to check the status of our edge routers from multiple locations around the world. +type Container_Auxiliary_Network_Status_Reading struct { + Entity + + // Average packet round-trip time. + AveragePing *Float64 `json:"averagePing,omitempty" xmlrpc:"averagePing,omitempty"` + + // Number of failures since the target was last detected to be working properly. + Fails *int `json:"fails,omitempty" xmlrpc:"fails,omitempty"` + + // Monitoring frequency in minutes. + Frequency *int `json:"frequency,omitempty" xmlrpc:"frequency,omitempty"` + + // The target babel. + Label *string `json:"label,omitempty" xmlrpc:"label,omitempty"` + + // Last check date and time. + LastCheckDate *Time `json:"lastCheckDate,omitempty" xmlrpc:"lastCheckDate,omitempty"` + + // Date and time of the last problem detected. + LastDownDate *Time `json:"lastDownDate,omitempty" xmlrpc:"lastDownDate,omitempty"` + + // The total response time in seconds calculated during the last check. + Latency *Float64 `json:"latency,omitempty" xmlrpc:"latency,omitempty"` + + // The monitoring location name. + Location *string `json:"location,omitempty" xmlrpc:"location,omitempty"` + + // Maximum packet round-trip time. + MaximumPing *Float64 `json:"maximumPing,omitempty" xmlrpc:"maximumPing,omitempty"` + + // Minimum packet round-trip time. + MinimumPing *Float64 `json:"minimumPing,omitempty" xmlrpc:"minimumPing,omitempty"` + + // Packet loss percentage. + PingLoss *Float64 `json:"pingLoss,omitempty" xmlrpc:"pingLoss,omitempty"` + + // The date monitoring first began + StartDate *Time `json:"startDate,omitempty" xmlrpc:"startDate,omitempty"` + + // Status Code - one of UP, Down, Test pending. + StatusCode *string `json:"statusCode,omitempty" xmlrpc:"statusCode,omitempty"` + + // The status message from the last effective check. + StatusMessage *string `json:"statusMessage,omitempty" xmlrpc:"statusMessage,omitempty"` + + // The target object. + Target *string `json:"target,omitempty" xmlrpc:"target,omitempty"` + + // A letter indicating the target type. + TargetType *string `json:"targetType,omitempty" xmlrpc:"targetType,omitempty"` +} + +// SoftLayer_Container_Bandwidth_GraphInputs models a single inbound object for a given bandwidth graph. +type Container_Bandwidth_GraphInputs struct { + Entity + + // This is a unix timestamp that represents the stop date/time for a graph. + EndDate *Time `json:"endDate,omitempty" xmlrpc:"endDate,omitempty"` + + // The front-end or back-end network uplink interface associated with this server. + NetworkInterfaceId *int `json:"networkInterfaceId,omitempty" xmlrpc:"networkInterfaceId,omitempty"` + + // * + Pod *int `json:"pod,omitempty" xmlrpc:"pod,omitempty"` + + // This is a human readable name for the server or rack being graphed. + ServerName *string `json:"serverName,omitempty" xmlrpc:"serverName,omitempty"` + + // This is a unix timestamp that represents the begin date/time for a graph. + StartDate *Time `json:"startDate,omitempty" xmlrpc:"startDate,omitempty"` +} + +// SoftLayer_Container_Bandwidth_GraphOutputs models a single outbound object for a given bandwidth graph. +type Container_Bandwidth_GraphOutputs struct { + Entity + + // The raw PNG binary data to be displayed once the graph is drawn. + GraphImage *[]byte `json:"graphImage,omitempty" xmlrpc:"graphImage,omitempty"` + + // The title that ended up being displayed as part of the graph image. + GraphTitle *string `json:"graphTitle,omitempty" xmlrpc:"graphTitle,omitempty"` + + // The maximum date included in this graph. + MaxEndDate *Time `json:"maxEndDate,omitempty" xmlrpc:"maxEndDate,omitempty"` + + // The minimum date included in this graph. + MinStartDate *Time `json:"minStartDate,omitempty" xmlrpc:"minStartDate,omitempty"` +} + +// SoftLayer_Container_Bandwidth_GraphOutputs models an individual bandwidth graph image and certain details about that graph image. +type Container_Bandwidth_GraphOutputsExtended struct { + Entity + + // The raw PNG binary data of a bandwidth graph image. + GraphImage *[]byte `json:"graphImage,omitempty" xmlrpc:"graphImage,omitempty"` + + // A bandwidth graph's title. + GraphTitle *string `json:"graphTitle,omitempty" xmlrpc:"graphTitle,omitempty"` + + // The amount of inbound traffic reported on a bandwidth graph image. + InBoundTotalBytes *uint `json:"inBoundTotalBytes,omitempty" xmlrpc:"inBoundTotalBytes,omitempty"` + + // The ending date of the data represented in a bandwidth graph. + MaxEndDate *Time `json:"maxEndDate,omitempty" xmlrpc:"maxEndDate,omitempty"` + + // The beginning date of the data represented in a bandwidth graph. + MinStartDate *Time `json:"minStartDate,omitempty" xmlrpc:"minStartDate,omitempty"` + + // The amount of outbound traffic reported on a bandwidth graph image. + OutBoundTotalBytes *uint `json:"outBoundTotalBytes,omitempty" xmlrpc:"outBoundTotalBytes,omitempty"` +} + +// SoftLayer_Container_Bandwidth_Projection models projected bandwidth use over a time range. +type Container_Bandwidth_Projection struct { + Entity + + // Bandwidth limit for this hardware. + AllowedUsage *string `json:"allowedUsage,omitempty" xmlrpc:"allowedUsage,omitempty"` + + // Estimated bandwidth usage so far this billing cycle. + EstimatedUsage *string `json:"estimatedUsage,omitempty" xmlrpc:"estimatedUsage,omitempty"` + + // Hardware ID of server to monitor. + HardwareId *int `json:"hardwareId,omitempty" xmlrpc:"hardwareId,omitempty"` + + // Projected usage for this hardware based on previous usage this billing cycle. + ProjectedUsage *string `json:"projectedUsage,omitempty" xmlrpc:"projectedUsage,omitempty"` + + // the text name of the server being monitored. + ServerName *string `json:"serverName,omitempty" xmlrpc:"serverName,omitempty"` + + // The minimum date included in this list. + StartDate *Time `json:"startDate,omitempty" xmlrpc:"startDate,omitempty"` +} + +// When a customer uses SoftLayer_Account::getBandwidthUsage, this container is used to return their usage information in bytes +type Container_Bandwidth_Usage struct { + Entity + + // no documentation yet + EndDate *Time `json:"endDate,omitempty" xmlrpc:"endDate,omitempty"` + + // no documentation yet + HardwareId *int `json:"hardwareId,omitempty" xmlrpc:"hardwareId,omitempty"` + + // no documentation yet + PrivateInUsage *Float64 `json:"privateInUsage,omitempty" xmlrpc:"privateInUsage,omitempty"` + + // no documentation yet + PrivateOutUsage *Float64 `json:"privateOutUsage,omitempty" xmlrpc:"privateOutUsage,omitempty"` + + // no documentation yet + PublicInUsage *Float64 `json:"publicInUsage,omitempty" xmlrpc:"publicInUsage,omitempty"` + + // no documentation yet + PublicOutUsage *Float64 `json:"publicOutUsage,omitempty" xmlrpc:"publicOutUsage,omitempty"` + + // no documentation yet + StartDate *Time `json:"startDate,omitempty" xmlrpc:"startDate,omitempty"` +} + +// no documentation yet +type Container_Billing_Currency_Country struct { + Entity + + // no documentation yet + AvailableCurrencies []Billing_Currency `json:"availableCurrencies,omitempty" xmlrpc:"availableCurrencies,omitempty"` + + // no documentation yet + Country *Locale_Country `json:"country,omitempty" xmlrpc:"country,omitempty"` + + // no documentation yet + CurrencyCountryLocales []Billing_Currency_Country `json:"currencyCountryLocales,omitempty" xmlrpc:"currencyCountryLocales,omitempty"` +} + +// no documentation yet +type Container_Billing_Currency_Format struct { + Entity + + // no documentation yet + Currency *string `json:"currency,omitempty" xmlrpc:"currency,omitempty"` + + // no documentation yet + Display *int `json:"display,omitempty" xmlrpc:"display,omitempty"` + + // no documentation yet + Format *string `json:"format,omitempty" xmlrpc:"format,omitempty"` + + // no documentation yet + Locale *string `json:"locale,omitempty" xmlrpc:"locale,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // no documentation yet + Position *int `json:"position,omitempty" xmlrpc:"position,omitempty"` + + // no documentation yet + Precision *int `json:"precision,omitempty" xmlrpc:"precision,omitempty"` + + // no documentation yet + Script *string `json:"script,omitempty" xmlrpc:"script,omitempty"` + + // no documentation yet + Service *string `json:"service,omitempty" xmlrpc:"service,omitempty"` + + // no documentation yet + Symbol *string `json:"symbol,omitempty" xmlrpc:"symbol,omitempty"` + + // no documentation yet + Tag *string `json:"tag,omitempty" xmlrpc:"tag,omitempty"` + + // no documentation yet + Value *Float64 `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// no documentation yet +type Container_Billing_Info_Ach struct { + Entity + + // no documentation yet + AccountNumber *string `json:"accountNumber,omitempty" xmlrpc:"accountNumber,omitempty"` + + // no documentation yet + AccountType *string `json:"accountType,omitempty" xmlrpc:"accountType,omitempty"` + + // no documentation yet + BankTransitNumber *string `json:"bankTransitNumber,omitempty" xmlrpc:"bankTransitNumber,omitempty"` + + // no documentation yet + City *string `json:"city,omitempty" xmlrpc:"city,omitempty"` + + // no documentation yet + Country *string `json:"country,omitempty" xmlrpc:"country,omitempty"` + + // no documentation yet + FederalTaxId *string `json:"federalTaxId,omitempty" xmlrpc:"federalTaxId,omitempty"` + + // no documentation yet + FirstName *string `json:"firstName,omitempty" xmlrpc:"firstName,omitempty"` + + // no documentation yet + LastName *string `json:"lastName,omitempty" xmlrpc:"lastName,omitempty"` + + // no documentation yet + PhoneNumber *string `json:"phoneNumber,omitempty" xmlrpc:"phoneNumber,omitempty"` + + // no documentation yet + PostalCode *string `json:"postalCode,omitempty" xmlrpc:"postalCode,omitempty"` + + // no documentation yet + State *string `json:"state,omitempty" xmlrpc:"state,omitempty"` + + // no documentation yet + Street1 *string `json:"street1,omitempty" xmlrpc:"street1,omitempty"` + + // no documentation yet + Street2 *string `json:"street2,omitempty" xmlrpc:"street2,omitempty"` +} + +// This container is used to provide all the options for [[SoftLayer_Billing_Invoice/emailInvoices|emailInvoices]] in order to have the necessary invoices generated and links sent to the user's email. +type Container_Billing_Invoice_Email struct { + Entity + + // Excel Invoices to email + ExcelInvoiceIds []int `json:"excelInvoiceIds,omitempty" xmlrpc:"excelInvoiceIds,omitempty"` + + // PDF Invoice Details to email + PdfDetailedInvoiceIds []int `json:"pdfDetailedInvoiceIds,omitempty" xmlrpc:"pdfDetailedInvoiceIds,omitempty"` + + // PDF Invoices to email + PdfInvoiceIds []int `json:"pdfInvoiceIds,omitempty" xmlrpc:"pdfInvoiceIds,omitempty"` + + // The type of Invoices to be emailed [current|next]. If next is selected, the account id will be used. + Type *string `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// SoftLayer_Container_Billing_Order_Status models an order status. +type Container_Billing_Order_Status struct { + Entity + + // The description of the status. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The keyname of the status. + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` +} + +// Contains user information used to request a manual Catalyst enrollment. +type Container_Catalyst_ManualEnrollmentRequest struct { + Entity + + // Applicant's email address + CustomerEmail *string `json:"customerEmail,omitempty" xmlrpc:"customerEmail,omitempty"` + + // Applicant's first and last name + CustomerName *string `json:"customerName,omitempty" xmlrpc:"customerName,omitempty"` + + // Name of applicant's startup company + StartupName *string `json:"startupName,omitempty" xmlrpc:"startupName,omitempty"` + + // Flag indicating whether (true) or not (false) and applicant is + VentureAffiliationFlag *bool `json:"ventureAffiliationFlag,omitempty" xmlrpc:"ventureAffiliationFlag,omitempty"` + + // Name of the venture capital fund, if any, applicant is affiliated with + VentureFundName *string `json:"ventureFundName,omitempty" xmlrpc:"ventureFundName,omitempty"` +} + +// This container is used to hold country locale information. +type Container_Collection_Locale_CountryCode struct { + Entity + + // no documentation yet + LongName *string `json:"longName,omitempty" xmlrpc:"longName,omitempty"` + + // no documentation yet + ShortName *string `json:"shortName,omitempty" xmlrpc:"shortName,omitempty"` + + // no documentation yet + StateCodes []Container_Collection_Locale_StateCode `json:"stateCodes,omitempty" xmlrpc:"stateCodes,omitempty"` +} + +// This container is used to hold information regarding a state or province. +type Container_Collection_Locale_StateCode struct { + Entity + + // no documentation yet + LongName *string `json:"longName,omitempty" xmlrpc:"longName,omitempty"` + + // no documentation yet + ShortName *string `json:"shortName,omitempty" xmlrpc:"shortName,omitempty"` +} + +// This container is used to hold VAT information. +type Container_Collection_Locale_VatCountryCodeAndFormat struct { + Entity + + // no documentation yet + CountryCode *string `json:"countryCode,omitempty" xmlrpc:"countryCode,omitempty"` + + // no documentation yet + Regex *string `json:"regex,omitempty" xmlrpc:"regex,omitempty"` +} + +// no documentation yet +type Container_Disk_Image_Capture_Template struct { + Entity + + // no documentation yet + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // no documentation yet + Summary *string `json:"summary,omitempty" xmlrpc:"summary,omitempty"` + + // no documentation yet + Volumes []Container_Disk_Image_Capture_Template_Volume `json:"volumes,omitempty" xmlrpc:"volumes,omitempty"` +} + +// no documentation yet +type Container_Disk_Image_Capture_Template_Volume struct { + Entity + + // A customer provided flag to indicate that the current volume is the boot drive + BootVolumeFlag *bool `json:"bootVolumeFlag,omitempty" xmlrpc:"bootVolumeFlag,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // no documentation yet + Partitions []Container_Disk_Image_Capture_Template_Volume_Partition `json:"partitions,omitempty" xmlrpc:"partitions,omitempty"` + + // The storage group to capture + StorageGroupId *int `json:"storageGroupId,omitempty" xmlrpc:"storageGroupId,omitempty"` +} + +// no documentation yet +type Container_Disk_Image_Capture_Template_Volume_Partition struct { + Entity + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// Contact information container for domain registration +type Container_Dns_Domain_Registration_Contact struct { + Entity + + // The street address of the contact. + Address1 *string `json:"address1,omitempty" xmlrpc:"address1,omitempty"` + + // The second line in the address of the contact. + Address2 *string `json:"address2,omitempty" xmlrpc:"address2,omitempty"` + + // The third line in the address of the contact. + Address3 *string `json:"address3,omitempty" xmlrpc:"address3,omitempty"` + + // The city of the contact. + City *string `json:"city,omitempty" xmlrpc:"city,omitempty"` + + // The 2-character Country code. (i.e. US) + Country *string `json:"country,omitempty" xmlrpc:"country,omitempty"` + + // The email address of the contact. + Email *string `json:"email,omitempty" xmlrpc:"email,omitempty"` + + // The fax number of the contact. + Fax *string `json:"fax,omitempty" xmlrpc:"fax,omitempty"` + + // The first name of the contact. + FirstName *string `json:"firstName,omitempty" xmlrpc:"firstName,omitempty"` + + // The last name of the contact. + LastName *string `json:"lastName,omitempty" xmlrpc:"lastName,omitempty"` + + // The organization name of the contact. + OrganizationName *string `json:"organizationName,omitempty" xmlrpc:"organizationName,omitempty"` + + // The phone number of the contact. + Phone *string `json:"phone,omitempty" xmlrpc:"phone,omitempty"` + + // The postal code of the contact. + PostalCode *string `json:"postalCode,omitempty" xmlrpc:"postalCode,omitempty"` + + // The state of the contact. + State *string `json:"state,omitempty" xmlrpc:"state,omitempty"` + + // The type of contact. The following are the valid types of contacts: + // * admin + // * owner + // * billing + // * tech + Type *string `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// This container data type contains extended attributes information for a domain of country code TLD. +type Container_Dns_Domain_Registration_ExtendedAttribute struct { + Entity + + // Indicates if this is a child of another extended attribute. + ChildFlag *bool `json:"childFlag,omitempty" xmlrpc:"childFlag,omitempty"` + + // The description of an extended attribute. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The name of an extended attribute. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The collection of options for an extended attribute. + Options []Container_Dns_Domain_Registration_ExtendedAttribute_Option `json:"options,omitempty" xmlrpc:"options,omitempty"` + + // Indicates if extended attribute is required. + RequiredFlag *int `json:"requiredFlag,omitempty" xmlrpc:"requiredFlag,omitempty"` + + // User defined indicates that the value is required from outside sources. + UserDefinedFlag *bool `json:"userDefinedFlag,omitempty" xmlrpc:"userDefinedFlag,omitempty"` +} + +// This is the data type that may need to be populated to complete registraton for domains that are country code TLD's. +type Container_Dns_Domain_Registration_ExtendedAttribute_Configuration struct { + Entity + + // The extended attribute name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The extended attribute option value. + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// This container data type contains extended attribute options information for a domain of country code TLD. +type Container_Dns_Domain_Registration_ExtendedAttribute_Option struct { + Entity + + // The description of an option. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Extended Attribute that is required for an option. + RequireExtendedAttributes []Container_Dns_Domain_Registration_ExtendedAttribute_Option_Require `json:"requireExtendedAttributes,omitempty" xmlrpc:"requireExtendedAttributes,omitempty"` + + // The title of an option. + Title *string `json:"title,omitempty" xmlrpc:"title,omitempty"` + + // The value of an option. + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// This container data type contains the extended attribute name that is required by an extended attribute option. +type Container_Dns_Domain_Registration_ExtendedAttribute_Option_Require struct { + Entity + + // The name of an extended attribute that is required by an extended attribute option. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// Information container for domain registration +type Container_Dns_Domain_Registration_Information struct { + Entity + + // The information of the registered domain. + Contacts []Container_Dns_Domain_Registration_Contact `json:"contacts,omitempty" xmlrpc:"contacts,omitempty"` + + // The date that a domain is set to expire. + ExpireDate *Time `json:"expireDate,omitempty" xmlrpc:"expireDate,omitempty"` + + // The list of nameservers for the domain. + Nameservers []Container_Dns_Domain_Registration_Nameserver `json:"nameservers,omitempty" xmlrpc:"nameservers,omitempty"` + + // no documentation yet + RegistryCreateDate *Time `json:"registryCreateDate,omitempty" xmlrpc:"registryCreateDate,omitempty"` + + // no documentation yet + RegistryExpireDate *Time `json:"registryExpireDate,omitempty" xmlrpc:"registryExpireDate,omitempty"` + + // no documentation yet + RegistryUpdateDate *Time `json:"registryUpdateDate,omitempty" xmlrpc:"registryUpdateDate,omitempty"` +} + +// no documentation yet +type Container_Dns_Domain_Registration_List struct { + Entity + + // The domain name. + DomainName *string `json:"domainName,omitempty" xmlrpc:"domainName,omitempty"` + + // Three-character language tag for the IDN domain that you're trying to register. This is only required for IDN domains. + EncodingType *string `json:"encodingType,omitempty" xmlrpc:"encodingType,omitempty"` + + // Data required by the Registry for some country code top level domains (i.e. example.us). + // + // In order to determine if a domain requires extended attributes, use [[SoftLayer_Dns_Domain_Registration::getExtendedAttributes|domain registration]] service. + ExtendedAttributeConfiguration []Container_Dns_Domain_Registration_ExtendedAttribute_Configuration `json:"extendedAttributeConfiguration,omitempty" xmlrpc:"extendedAttributeConfiguration,omitempty"` + + // The length of the registration period in years. Valid values are 1 – 10. + RegistrationPeriod *int `json:"registrationPeriod,omitempty" xmlrpc:"registrationPeriod,omitempty"` +} + +// Lookup domain container for domain registration +type Container_Dns_Domain_Registration_Lookup struct { + Entity + + // The list of available and taken domain names. + Items []Container_Dns_Domain_Registration_Lookup_Items `json:"items,omitempty" xmlrpc:"items,omitempty"` +} + +// Lookup items container for domain registration +type Container_Dns_Domain_Registration_Lookup_Items struct { + Entity + + // The domain name. + DomainName *string `json:"domainName,omitempty" xmlrpc:"domainName,omitempty"` + + // The status of the domain name if available and can be registered. + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` +} + +// Nameserver container for domain registration +type Container_Dns_Domain_Registration_Nameserver struct { + Entity + + // The list of fully qualified names of the nameserver. + Nameservers []Container_Dns_Domain_Registration_Nameserver_List `json:"nameservers,omitempty" xmlrpc:"nameservers,omitempty"` +} + +// Nameservers list container for domain registration +type Container_Dns_Domain_Registration_Nameserver_List struct { + Entity + + // The IPv4 address of the nameserver. + Ipv4Address *string `json:"ipv4Address,omitempty" xmlrpc:"ipv4Address,omitempty"` + + // The IPv6 address of the nameserver. + Ipv6Address *string `json:"ipv6Address,omitempty" xmlrpc:"ipv6Address,omitempty"` + + // The fully qualified name of the nameserver + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The sort order of the nameserver + SortOrder *int `json:"sortOrder,omitempty" xmlrpc:"sortOrder,omitempty"` +} + +// no documentation yet +type Container_Dns_Domain_Registration_Registrant_Verification_StatusDetail struct { + Entity + + // The current status of the verification. + Status *Dns_Domain_Registration_Registrant_Verification_Status `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // The adate when the domain will be suspended. + VerificationDeadlineDate *Time `json:"verificationDeadlineDate,omitempty" xmlrpc:"verificationDeadlineDate,omitempty"` +} + +// Transfer Information container for domain registration +type Container_Dns_Domain_Registration_Transfer_Information struct { + Entity + + // The reason why a domain is not transferable. + Reason *string `json:"reason,omitempty" xmlrpc:"reason,omitempty"` + + // The registrant email. + RegistrantEmail *string `json:"registrantEmail,omitempty" xmlrpc:"registrantEmail,omitempty"` + + // The status of the latest transfer on the domain. + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // The date and time of the most recent update to the state of the transfer. + TimeStamp *Time `json:"timeStamp,omitempty" xmlrpc:"timeStamp,omitempty"` + + // Indicates if the domain can be transferred. + Transferrable *int `json:"transferrable,omitempty" xmlrpc:"transferrable,omitempty"` +} + +// The SoftLayer_Container_Exception data type represents a SoftLayer_Exception. +type Container_Exception struct { + Entity + + // The SoftLayer_Exception class that the error is. + ExceptionClass *string `json:"exceptionClass,omitempty" xmlrpc:"exceptionClass,omitempty"` + + // The exception message. + ExceptionMessage *string `json:"exceptionMessage,omitempty" xmlrpc:"exceptionMessage,omitempty"` +} + +// no documentation yet +type Container_Graph struct { + Entity + + // base units associated with the graph. + BaseUnit *string `json:"baseUnit,omitempty" xmlrpc:"baseUnit,omitempty"` + + // Graph range end datetime. + EndDatetime *string `json:"endDatetime,omitempty" xmlrpc:"endDatetime,omitempty"` + + // The height of the graph image. + Height *int `json:"height,omitempty" xmlrpc:"height,omitempty"` + + // The graph image. + Image *[]byte `json:"image,omitempty" xmlrpc:"image,omitempty"` + + // The graph interval in seconds. + Interval *int `json:"interval,omitempty" xmlrpc:"interval,omitempty"` + + // Metric types associated with the graph. + Metrics []Container_Metric_Data_Type `json:"metrics,omitempty" xmlrpc:"metrics,omitempty"` + + // Indicator to control whether the graph data is normalized. + NormalizeFlag *[]byte `json:"normalizeFlag,omitempty" xmlrpc:"normalizeFlag,omitempty"` + + // The options used to control the graph appearance. + Options []Container_Graph_Option `json:"options,omitempty" xmlrpc:"options,omitempty"` + + // A collection of graph plots. + Plots []Container_Graph_Plot `json:"plots,omitempty" xmlrpc:"plots,omitempty"` + + // option to not return the image. + ReturnImage *bool `json:"returnImage,omitempty" xmlrpc:"returnImage,omitempty"` + + // Graph range start datetime. + StartDatetime *string `json:"startDatetime,omitempty" xmlrpc:"startDatetime,omitempty"` + + // The name of the template to use; may be null. + Template *string `json:"template,omitempty" xmlrpc:"template,omitempty"` + + // The title of the graph image. + Title *string `json:"title,omitempty" xmlrpc:"title,omitempty"` + + // The width of the graph image. + Width *int `json:"width,omitempty" xmlrpc:"width,omitempty"` +} + +// no documentation yet +type Container_Graph_Option struct { + Entity + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // no documentation yet + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// no documentation yet +type Container_Graph_Plot struct { + Entity + + // no documentation yet + Data []Container_Graph_Plot_Coordinate `json:"data,omitempty" xmlrpc:"data,omitempty"` + + // no documentation yet + Metric *Container_Metric_Data_Type `json:"metric,omitempty" xmlrpc:"metric,omitempty"` + + // no documentation yet + Unit *string `json:"unit,omitempty" xmlrpc:"unit,omitempty"` +} + +// no documentation yet +type Container_Graph_Plot_Coordinate struct { + Entity + + // no documentation yet + XValue *Float64 `json:"xValue,omitempty" xmlrpc:"xValue,omitempty"` + + // no documentation yet + YValue *Float64 `json:"yValue,omitempty" xmlrpc:"yValue,omitempty"` + + // no documentation yet + ZValue *Float64 `json:"zValue,omitempty" xmlrpc:"zValue,omitempty"` +} + +// no documentation yet +type Container_Hardware_CaptureEnabled struct { + Entity + + // no documentation yet + Enabled *bool `json:"enabled,omitempty" xmlrpc:"enabled,omitempty"` + + // no documentation yet + Reasons []string `json:"reasons,omitempty" xmlrpc:"reasons,omitempty"` +} + +// The hardware configuration container is used to provide configuration options for servers. +// +// Each configuration option will include both an itemPrice and a template. +// +// The itemPrice value will provide hourly and monthly costs (if either are applicable), and a description of the option. +// +// The template will provide a fragment of the request with the properties and values that must be sent when creating a server with the option. +// +// The [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] method returns this data structure. +// +// +type Container_Hardware_Configuration struct { + Entity + + // + //
+ // Available datacenter options. + // + // + // The datacenter.name value in the template represents which datacenter the server will be provisioned in. + //
+ Datacenters []Container_Hardware_Configuration_Option `json:"datacenters,omitempty" xmlrpc:"datacenters,omitempty"` + + // + //
+ // Available fixed configuration preset options. + // + // + // The fixedConfigurationPreset.keyName value in the template is an identifier for a particular fixed configuration. When provided exactly as shown in the template, that fixed configuration will be used. + // + // + // When providing a fixedConfigurationPreset.keyName while ordering a server the processors and hardDrives configuration options cannot be used. + //
+ FixedConfigurationPresets []Container_Hardware_Configuration_Option `json:"fixedConfigurationPresets,omitempty" xmlrpc:"fixedConfigurationPresets,omitempty"` + + // + //
+ // Available hard drive options. + // + // + // A server will have at least one hard drive. + // + // + // The hardDrives.capacity value in the template represents the size, in gigabytes, of the disk. + //
+ HardDrives []Container_Hardware_Configuration_Option `json:"hardDrives,omitempty" xmlrpc:"hardDrives,omitempty"` + + // + //
+ // Available network component options. + // + // + // The networkComponent.maxSpeed value in the template represents the link speed, in megabits per second, of the network connections for a server. + //
+ NetworkComponents []Container_Hardware_Configuration_Option `json:"networkComponents,omitempty" xmlrpc:"networkComponents,omitempty"` + + // + //
+ // Available operating system options. + // + // + // The operatingSystemReferenceCode value in the template is an identifier for a particular operating system. When provided exactly as shown in the template, that operating system will be used. + // + // + // A reference code is structured as three tokens separated by underscores. The first token represents the product, the second is the version of the product, and the third is whether the OS is 32 or 64bit. + // + // + // When providing an operatingSystemReferenceCode while ordering a server the only token required to match exactly is the product. The version token may be given as 'LATEST', else it will require an exact match as well. When the bits token is not provided, 64 bits will be assumed. + // + // + // Providing the value of 'LATEST' for a version will select the latest release of that product for the operating system. As this may change over time, you should be sure that the release version is irrelevant for your applications. + // + // + // For Windows based operating systems the version will represent both the release version (2008, 2012, etc) and the edition (Standard, Enterprise, etc). For all other operating systems the version will represent the major version (Centos 6, Ubuntu 12, etc) of that operating system, minor versions are represented in few reference codes where they are significant. + //
+ OperatingSystems []Container_Hardware_Configuration_Option `json:"operatingSystems,omitempty" xmlrpc:"operatingSystems,omitempty"` + + // + //
+ // Available processor options. + // + // + // The processorCoreAmount value in the template represents the number of cores allocated to the server. + // The memoryCapacity value in the template represents the amount of memory, in gigabytes, allocated to the server. + //
+ Processors []Container_Hardware_Configuration_Option `json:"processors,omitempty" xmlrpc:"processors,omitempty"` +} + +// An option found within a [[SoftLayer_Container_Hardware_Configuration (type)]] structure. +type Container_Hardware_Configuration_Option struct { + Entity + + // + // Provides hourly and monthly costs (if either are applicable), and a description of the option. + ItemPrice *Product_Item_Price `json:"itemPrice,omitempty" xmlrpc:"itemPrice,omitempty"` + + // + // Provides a description of a fixed configuration preset with monthly and hourly costs. + Preset *Product_Package_Preset `json:"preset,omitempty" xmlrpc:"preset,omitempty"` + + // + // Provides a fragment of the request with the properties and values that must be sent when creating a server with the option. + Template *Hardware `json:"template,omitempty" xmlrpc:"template,omitempty"` +} + +// no documentation yet +type Container_Hardware_DiskImageMap struct { + Entity + + // no documentation yet + BootFlag *int `json:"bootFlag,omitempty" xmlrpc:"bootFlag,omitempty"` + + // no documentation yet + DiskImageUUID *string `json:"diskImageUUID,omitempty" xmlrpc:"diskImageUUID,omitempty"` + + // no documentation yet + DiskSerialNumber *string `json:"diskSerialNumber,omitempty" xmlrpc:"diskSerialNumber,omitempty"` +} + +// no documentation yet +type Container_Hardware_MassUpdate struct { + Entity + + // The hardwares updated by the mass update tool + HardwareId *int `json:"hardwareId,omitempty" xmlrpc:"hardwareId,omitempty"` + + // Errors encountered while mass updating hardwares + Message *string `json:"message,omitempty" xmlrpc:"message,omitempty"` + + // The hardwares that failed to update + SuccessFlag *string `json:"successFlag,omitempty" xmlrpc:"successFlag,omitempty"` +} + +// no documentation yet +type Container_Hardware_Pool_Details struct { + Entity + + // no documentation yet + PendingOrders *int `json:"pendingOrders,omitempty" xmlrpc:"pendingOrders,omitempty"` + + // no documentation yet + PendingTransactions *int `json:"pendingTransactions,omitempty" xmlrpc:"pendingTransactions,omitempty"` + + // no documentation yet + PoolDescription *string `json:"poolDescription,omitempty" xmlrpc:"poolDescription,omitempty"` + + // no documentation yet + PoolKeyName *string `json:"poolKeyName,omitempty" xmlrpc:"poolKeyName,omitempty"` + + // no documentation yet + PoolName *string `json:"poolName,omitempty" xmlrpc:"poolName,omitempty"` + + // no documentation yet + Routers []Container_Hardware_Pool_Details_Router `json:"routers,omitempty" xmlrpc:"routers,omitempty"` + + // no documentation yet + TotalHardware *int `json:"totalHardware,omitempty" xmlrpc:"totalHardware,omitempty"` + + // no documentation yet + TotalInventoryHardware *int `json:"totalInventoryHardware,omitempty" xmlrpc:"totalInventoryHardware,omitempty"` + + // no documentation yet + TotalProvisionedHardware *int `json:"totalProvisionedHardware,omitempty" xmlrpc:"totalProvisionedHardware,omitempty"` + + // no documentation yet + TotalTestedHardware *int `json:"totalTestedHardware,omitempty" xmlrpc:"totalTestedHardware,omitempty"` + + // no documentation yet + TotalTestingHardware *int `json:"totalTestingHardware,omitempty" xmlrpc:"totalTestingHardware,omitempty"` +} + +// no documentation yet +type Container_Hardware_Pool_Details_Router struct { + Entity + + // no documentation yet + PoolThreshold *int `json:"poolThreshold,omitempty" xmlrpc:"poolThreshold,omitempty"` + + // no documentation yet + RouterId *int `json:"routerId,omitempty" xmlrpc:"routerId,omitempty"` + + // no documentation yet + RouterName *string `json:"routerName,omitempty" xmlrpc:"routerName,omitempty"` + + // no documentation yet + TotalHardware *int `json:"totalHardware,omitempty" xmlrpc:"totalHardware,omitempty"` + + // no documentation yet + TotalInventoryHardware *int `json:"totalInventoryHardware,omitempty" xmlrpc:"totalInventoryHardware,omitempty"` + + // no documentation yet + TotalProvisionedHardware *int `json:"totalProvisionedHardware,omitempty" xmlrpc:"totalProvisionedHardware,omitempty"` + + // no documentation yet + TotalTestedHardware *int `json:"totalTestedHardware,omitempty" xmlrpc:"totalTestedHardware,omitempty"` + + // no documentation yet + TotalTestingHardware *int `json:"totalTestingHardware,omitempty" xmlrpc:"totalTestingHardware,omitempty"` +} + +// The SoftLayer_Container_Hardware_Server_Configuration data type contains information relating to a server's item price information, and hard drive partition information. +type Container_Hardware_Server_Configuration struct { + Entity + + // A flag indicating that the server will be moved into the spare pool after an Operating system reload. + AddToSparePoolAfterOsReload *int `json:"addToSparePoolAfterOsReload,omitempty" xmlrpc:"addToSparePoolAfterOsReload,omitempty"` + + // The customer provision uri will be used to download and execute a customer defined script on the host at the end of provisioning. + CustomProvisionScriptUri *string `json:"customProvisionScriptUri,omitempty" xmlrpc:"customProvisionScriptUri,omitempty"` + + // A flag indicating that the primary drive will be converted to a portable storage volume during an Operating System reload. + DriveRetentionFlag *bool `json:"driveRetentionFlag,omitempty" xmlrpc:"driveRetentionFlag,omitempty"` + + // A flag indicating that all data will be erased from drives during an Operating System reload. + EraseHardDrives *int `json:"eraseHardDrives,omitempty" xmlrpc:"eraseHardDrives,omitempty"` + + // The hard drive partitions that a server can be partitioned with. + HardDrives []Hardware_Component `json:"hardDrives,omitempty" xmlrpc:"hardDrives,omitempty"` + + // An Image Template ID [[SoftLayer_Virtual_Guest_Block_Device_Template_Group]] that will be deployed to the host. If provided no item prices are required. + ImageTemplateId *int `json:"imageTemplateId,omitempty" xmlrpc:"imageTemplateId,omitempty"` + + // The item prices that a server can be configured with. + ItemPrices []Product_Item_Price `json:"itemPrices,omitempty" xmlrpc:"itemPrices,omitempty"` + + // A flag indicating that the provision should use LVM for all logical drives. + LvmFlag *int `json:"lvmFlag,omitempty" xmlrpc:"lvmFlag,omitempty"` + + // A flag indicating that the remote management cards password will be reset. + ResetIpmiPassword *int `json:"resetIpmiPassword,omitempty" xmlrpc:"resetIpmiPassword,omitempty"` + + // The token of the requesting service. Do not set. + ServiceToken *string `json:"serviceToken,omitempty" xmlrpc:"serviceToken,omitempty"` + + // IDs to SoftLayer_Security_Ssh_Key objects on the current account which will be added to the server for authentication. SSH Keys will not be added to servers with Microsoft Windows. + SshKeyIds []int `json:"sshKeyIds,omitempty" xmlrpc:"sshKeyIds,omitempty"` + + // A flag indicating that the BIOS will be updated when installing the operating system. + UpgradeBios *int `json:"upgradeBios,omitempty" xmlrpc:"upgradeBios,omitempty"` + + // A flag indicating that the firmware on all hard drives will be updated when installing the operating system. + UpgradeHardDriveFirmware *int `json:"upgradeHardDriveFirmware,omitempty" xmlrpc:"upgradeHardDriveFirmware,omitempty"` +} + +// The SoftLayer_Container_Hardware_Server_Details data type contains information relating to a server's component information, network information, and software information. +type Container_Hardware_Server_Details struct { + Entity + + // The components that belong to a piece of hardware. + Components []Hardware_Component `json:"components,omitempty" xmlrpc:"components,omitempty"` + + // The network components that belong to a piece of hardware. + NetworkComponents []Network_Component `json:"networkComponents,omitempty" xmlrpc:"networkComponents,omitempty"` + + // The software that belong to a piece of hardware. + Software []Software_Component `json:"software,omitempty" xmlrpc:"software,omitempty"` +} + +// no documentation yet +type Container_Hardware_Server_Request struct { + Entity + + // no documentation yet + HardwareId *int `json:"hardwareId,omitempty" xmlrpc:"hardwareId,omitempty"` + + // no documentation yet + Message *string `json:"message,omitempty" xmlrpc:"message,omitempty"` + + // no documentation yet + SuccessFlag *bool `json:"successFlag,omitempty" xmlrpc:"successFlag,omitempty"` +} + +// no documentation yet +type Container_Image_StorageGroupDetails struct { + Entity + + // no documentation yet + Drives []Container_Image_StorageGroupDetails_Drives `json:"drives,omitempty" xmlrpc:"drives,omitempty"` + + // no documentation yet + StorageGroupName *string `json:"storageGroupName,omitempty" xmlrpc:"storageGroupName,omitempty"` + + // no documentation yet + StorageGroupType *string `json:"storageGroupType,omitempty" xmlrpc:"storageGroupType,omitempty"` +} + +// no documentation yet +type Container_Image_StorageGroupDetails_Drives struct { + Entity + + // no documentation yet + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + DiskSpace *string `json:"diskSpace,omitempty" xmlrpc:"diskSpace,omitempty"` + + // no documentation yet + Units *string `json:"units,omitempty" xmlrpc:"units,omitempty"` +} + +// SoftLayer_Container_KnowledgeLayer_QuestionAnswer models a single question and answer pair from SoftLayer's KnowledgeLayer knowledge base. SoftLayer's backend network interfaces with the KnowledgeLayer to recommend helpful articles when support tickets are created. +type Container_KnowledgeLayer_QuestionAnswer struct { + Entity + + // The answer to a question asked on the SoftLayer KnowledgeLayer. + Answer *string `json:"answer,omitempty" xmlrpc:"answer,omitempty"` + + // The link to a question asked on the SoftLayer KnowledgeLayer. + Link *string `json:"link,omitempty" xmlrpc:"link,omitempty"` + + // A question asked on the SoftLayer KnowledgeLayer. + Question *string `json:"question,omitempty" xmlrpc:"question,omitempty"` +} + +// no documentation yet +type Container_Message struct { + Entity + + // no documentation yet + Message *string `json:"message,omitempty" xmlrpc:"message,omitempty"` + + // no documentation yet + Type *string `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// no documentation yet +type Container_Metric_Data_Type struct { + Entity + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // no documentation yet + SummaryType *string `json:"summaryType,omitempty" xmlrpc:"summaryType,omitempty"` + + // no documentation yet + Unit *string `json:"unit,omitempty" xmlrpc:"unit,omitempty"` +} + +// SoftLayer_Container_Metric_Tracking_Object_Details This container is a parent class for detailing diverse metrics. +type Container_Metric_Tracking_Object_Details struct { + Entity + + // The name that best describes the metric being collected. + MetricName *string `json:"metricName,omitempty" xmlrpc:"metricName,omitempty"` +} + +// SoftLayer_Container_Metric_Tracking_Object_Summary This container is a parent class for summarizing diverse metrics. +type Container_Metric_Tracking_Object_Summary struct { + Entity + + // The name that best describes the metric being collected. + MetricName *string `json:"metricName,omitempty" xmlrpc:"metricName,omitempty"` +} + +// SoftLayer_Container_Metric_Tracking_Object_Virtual_Host_Details This container details a virtual host's metric data. +type Container_Metric_Tracking_Object_Virtual_Host_Details struct { + Container_Metric_Tracking_Object_Details + + // The day this metric was collected. + Day *Time `json:"day,omitempty" xmlrpc:"day,omitempty"` + + // The maximum number of guests hosted by this platform for the given day. + MaxInstances *int `json:"maxInstances,omitempty" xmlrpc:"maxInstances,omitempty"` + + // The maximum amount of memory utilized by this platform for the given day. + MaxMemoryUsage *int `json:"maxMemoryUsage,omitempty" xmlrpc:"maxMemoryUsage,omitempty"` + + // The mean number of guests hosted by this platform for the given day. + MeanInstances *Float64 `json:"meanInstances,omitempty" xmlrpc:"meanInstances,omitempty"` + + // The mean amount of memory utilized by this platform for the given day. + MeanMemoryUsage *Float64 `json:"meanMemoryUsage,omitempty" xmlrpc:"meanMemoryUsage,omitempty"` + + // The minimum number of guests hosted by this platform for the given day. + MinInstances *int `json:"minInstances,omitempty" xmlrpc:"minInstances,omitempty"` + + // The minimum amount of memory utilized by this platform for the given day. + MinMemoryUsage *int `json:"minMemoryUsage,omitempty" xmlrpc:"minMemoryUsage,omitempty"` +} + +// SoftLayer_Container_Metric_Tracking_Object_Virtual_Host_Summary This container summarizes a virtual host's metric data. +type Container_Metric_Tracking_Object_Virtual_Host_Summary struct { + Container_Metric_Tracking_Object_Summary + + // The average amount of memory usage thus far in this billing cycle. + AvgMemoryUsageInBillingCycle *int `json:"avgMemoryUsageInBillingCycle,omitempty" xmlrpc:"avgMemoryUsageInBillingCycle,omitempty"` + + // Current bill cycle end date. + CurrentBillCycleEnd *Time `json:"currentBillCycleEnd,omitempty" xmlrpc:"currentBillCycleEnd,omitempty"` + + // Current bill cycle start date. + CurrentBillCycleStart *Time `json:"currentBillCycleStart,omitempty" xmlrpc:"currentBillCycleStart,omitempty"` + + // The last count of instances this platform was hosting. + LastInstanceCount *int `json:"lastInstanceCount,omitempty" xmlrpc:"lastInstanceCount,omitempty"` + + // The last amount of memory this platform was using. + LastMemoryUsageAmount *int `json:"lastMemoryUsageAmount,omitempty" xmlrpc:"lastMemoryUsageAmount,omitempty"` + + // The last time this virtual host was polled for metrics. + LastPollTime *Time `json:"lastPollTime,omitempty" xmlrpc:"lastPollTime,omitempty"` + + // The max number of instances hosted thus far in this billing cycle. + MaxInstanceInBillingCycle *int `json:"maxInstanceInBillingCycle,omitempty" xmlrpc:"maxInstanceInBillingCycle,omitempty"` + + // Previous bill cycle end date. + PreviousBillCycleEnd *Time `json:"previousBillCycleEnd,omitempty" xmlrpc:"previousBillCycleEnd,omitempty"` + + // Previous bill cycle start date. + PreviousBillCycleStart *Time `json:"previousBillCycleStart,omitempty" xmlrpc:"previousBillCycleStart,omitempty"` + + // This virtual hosting platform name. + VirtualPlatformName *string `json:"virtualPlatformName,omitempty" xmlrpc:"virtualPlatformName,omitempty"` +} + +// The SoftLayer_Container_Monitoring_Alarm_History data type contains information relating to SoftLayer monitoring alarm history. +type Container_Monitoring_Alarm_History struct { + Entity + + // Account ID that this alarm belongs to + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // DEPRECATED. ID of the monitoring agent that triggered this alarm + AgentId *int `json:"agentId,omitempty" xmlrpc:"agentId,omitempty"` + + // Alarm ID + AlarmId *string `json:"alarmId,omitempty" xmlrpc:"alarmId,omitempty"` + + // Time that an alarm was closed. + ClosedDate *Time `json:"closedDate,omitempty" xmlrpc:"closedDate,omitempty"` + + // Time that an alarm was triggered + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // Alarm message + Message *string `json:"message,omitempty" xmlrpc:"message,omitempty"` + + // DEPRECATED. Robot ID + RobotId *int `json:"robotId,omitempty" xmlrpc:"robotId,omitempty"` + + // Severity of an alarm + Severity *string `json:"severity,omitempty" xmlrpc:"severity,omitempty"` +} + +// This object holds authentication data to a server. +type Container_Network_Authentication_Data struct { + Entity + + // The name of a host + Host *string `json:"host,omitempty" xmlrpc:"host,omitempty"` + + // The authentication password + Password *string `json:"password,omitempty" xmlrpc:"password,omitempty"` + + // The port number + Port *int `json:"port,omitempty" xmlrpc:"port,omitempty"` + + // The type of network protocol. This can be ftp, ssh and so on. + Type *string `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // The authentication username + Username *string `json:"username,omitempty" xmlrpc:"username,omitempty"` +} + +// SoftLayer_Container_Network_Bandwidth_Data_Summary models an interface's overall bandwidth usage during it's current billing cycle. +type Container_Network_Bandwidth_Data_Summary struct { + Entity + + // The amount of bandwidth a server has allocated to it in it's current billing period. + AllowedUsage *Float64 `json:"allowedUsage,omitempty" xmlrpc:"allowedUsage,omitempty"` + + // The amount of bandwidth that a server has used within it's current billing period. + EstimatedUsage *Float64 `json:"estimatedUsage,omitempty" xmlrpc:"estimatedUsage,omitempty"` + + // The amount of bandwidth a server is projected to use within its billing period, based on it's current usage. + ProjectedUsage *Float64 `json:"projectedUsage,omitempty" xmlrpc:"projectedUsage,omitempty"` + + // The unit of measurement used in a bandwidth data summary. + UsageUnits *string `json:"usageUnits,omitempty" xmlrpc:"usageUnits,omitempty"` +} + +// SoftLayer_Container_Network_Bandwidth_Version1_Usage models an hourly bandwidth record. +type Container_Network_Bandwidth_Version1_Usage struct { + Entity + + // The amount of incoming bandwidth that a server has used within the hour of the recordedDate. + IncomingAmount *Float64 `json:"incomingAmount,omitempty" xmlrpc:"incomingAmount,omitempty"` + + // The amount of outgoing bandwidth that a server has used within the hour of the recordedDate. + OutgoingAmount *Float64 `json:"outgoingAmount,omitempty" xmlrpc:"outgoingAmount,omitempty"` + + // The date and time that the bandwidth was used by a piece of hardware + RecordedDate *Time `json:"recordedDate,omitempty" xmlrpc:"recordedDate,omitempty"` +} + +// The SoftLayer_Container_Network_CdnMarketplace_Configuration_Behavior_ModifyResponseHeader data type contains information for specific responses from the modify response header API. +type Container_Network_CdnMarketplace_Configuration_Behavior_ModifyResponseHeader struct { + Entity + + // Specifies the delimiter to be used when indicating multiple values for a header. Valid delimiter is, a , , (comma), ; (semicolon), , (comma and space), or ; (semicolon and space). + Delimiter *string `json:"delimiter,omitempty" xmlrpc:"delimiter,omitempty"` + + // The description of modify response header. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // A collection of key value pairs that specify the headers and associated values to be modified. The header name and header value must be separated by colon (:). Example: ['header1:value1','header2:Value2'] + Headers []string `json:"headers,omitempty" xmlrpc:"headers,omitempty"` + + // The uniqueId of the modify response header to which the existing behavior belongs. + ModResHeaderUniqueId *string `json:"modResHeaderUniqueId,omitempty" xmlrpc:"modResHeaderUniqueId,omitempty"` + + // The path, relative to the domain that is accessed via modify response header. + Path *string `json:"path,omitempty" xmlrpc:"path,omitempty"` + + // The type of the modify response header, could be append/modify/delete. Set this to append to add a given header value to a header name set in the headerList. Set this to delete to remove a given header value from a header name set in the headerList. Set this to overwrite to match on a specified header name and replace its existing header value with a new one you specify. + Type *string `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // The uniqueId of the mapping to which the existing behavior belongs. + UniqueId *string `json:"uniqueId,omitempty" xmlrpc:"uniqueId,omitempty"` +} + +// The SoftLayer_Container_Network_CdnMarketplace_Configuration_Behavior_TokenAuth data type contains information for specific responses from the Token Authentication API. +type Container_Network_CdnMarketplace_Configuration_Behavior_TokenAuth struct { + Entity + + // Specifies a single character to separate access control list (ACL) fields. The default value is '!'. + AclDelimiter *string `json:"aclDelimiter,omitempty" xmlrpc:"aclDelimiter,omitempty"` + + // Possible values '0' and '1'. If set to '1', input values are escaped before adding them to the token. Default value is '1'. + EscapeTokenInputs *string `json:"escapeTokenInputs,omitempty" xmlrpc:"escapeTokenInputs,omitempty"` + + // Specifies the algorithm to use for the token's hash-based message authentication code (HMAC) field. Valid entries are 'SHA256', 'SHA1', or 'MD5'. The default value is 'SHA256'. + HmacAlgorithm *string `json:"hmacAlgorithm,omitempty" xmlrpc:"hmacAlgorithm,omitempty"` + + // Possible values '0' and '1'. If set to '1', query strings are removed from a URL when computing the token's HMAC algorithm. Default value is '0'. + IgnoreQueryString *string `json:"ignoreQueryString,omitempty" xmlrpc:"ignoreQueryString,omitempty"` + + // The token name. If this value is empty, then it is set to the default value '__token__'. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The path, relative to the domain that is accessed via token authentication. + Path *string `json:"path,omitempty" xmlrpc:"path,omitempty"` + + // Specifies a single character to separate the individual token fields. The default value is '~'. + TokenDelimiter *string `json:"tokenDelimiter,omitempty" xmlrpc:"tokenDelimiter,omitempty"` + + // The token encryption key, which specifies an even number of hex digits for the token key. An entry can be up to 64 characters in length. + TokenKey *string `json:"tokenKey,omitempty" xmlrpc:"tokenKey,omitempty"` + + // The token transition key, which specifies an even number of hex digits for the token transition key. An entry can be up to 64 characters in length. + TransitionKey *string `json:"transitionKey,omitempty" xmlrpc:"transitionKey,omitempty"` + + // The uniqueId of the mapping to which the existing behavior belongs. + UniqueId *string `json:"uniqueId,omitempty" xmlrpc:"uniqueId,omitempty"` +} + +// no documentation yet +type Container_Network_CdnMarketplace_Configuration_Cache_Purge struct { + Entity + + // no documentation yet + Date *string `json:"date,omitempty" xmlrpc:"date,omitempty"` + + // no documentation yet + Path *string `json:"path,omitempty" xmlrpc:"path,omitempty"` + + // no documentation yet + Saved *string `json:"saved,omitempty" xmlrpc:"saved,omitempty"` + + // no documentation yet + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` +} + +// The SoftLayer_Container_Network_CdnMarketplace_Configuration_Cache_PurgeGroup data type contains information for specific responses from the Purge Group API. Each of the Purge Group APIs returns a collection of this type +type Container_Network_CdnMarketplace_Configuration_Cache_PurgeGroup struct { + Entity + + // Date in which record is created + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A identifier that is unique to purge group. + GroupUniqueId *string `json:"groupUniqueId,omitempty" xmlrpc:"groupUniqueId,omitempty"` + + // The Unix timestamp of the last purge. + LastPurgeDate *Time `json:"lastPurgeDate,omitempty" xmlrpc:"lastPurgeDate,omitempty"` + + // Purge Group name. The favorite group name must be unique, but non-favorite groups do not have this limitation + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The following options are available to create a Purge Group: option 1: only purge the paths in the group, but don't save as favorite. option 2: only save the purge group as favorite, but don't purge paths. option 3: save the purge group as favorite and also purge paths. + Option *int `json:"option,omitempty" xmlrpc:"option,omitempty"` + + // Total number of purge paths. + PathCount *int `json:"pathCount,omitempty" xmlrpc:"pathCount,omitempty"` + + // A collection of purge paths. + Paths []string `json:"paths,omitempty" xmlrpc:"paths,omitempty"` + + // The purge's status when the input option field is 1 or 3. Status can be SUCCESS, FAILED, or IN_PROGRESS. + PurgeStatus *string `json:"purgeStatus,omitempty" xmlrpc:"purgeStatus,omitempty"` + + // Type of the Purge Group, currently SAVED or UNSAVED. + Saved *string `json:"saved,omitempty" xmlrpc:"saved,omitempty"` + + // A identifier that is unique to domain mapping. + UniqueId *string `json:"uniqueId,omitempty" xmlrpc:"uniqueId,omitempty"` +} + +// The SoftLayer_Container_Network_CdnMarketplace_Configuration_Cache_PurgeGroupHistory data type contains information for specific responses from the Purge Group API and Purge History API. +type Container_Network_CdnMarketplace_Configuration_Cache_PurgeGroupHistory struct { + Entity + + // Date in which record is created + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // Purge Group name. The favorite group name must be unique, but un-favorite groups do not have this limitation + GroupName *string `json:"groupName,omitempty" xmlrpc:"groupName,omitempty"` + + // Purge group unique ID + GroupUniqueId *string `json:"groupUniqueId,omitempty" xmlrpc:"groupUniqueId,omitempty"` + + // The purge's status. Status can be SUCCESS, FAILED, or IN_PROGRESS. + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // Domain mapping unique ID. + UniqueId *string `json:"uniqueId,omitempty" xmlrpc:"uniqueId,omitempty"` +} + +// no documentation yet +type Container_Network_CdnMarketplace_Configuration_Input struct { + Entity + + // no documentation yet + BucketName *string `json:"bucketName,omitempty" xmlrpc:"bucketName,omitempty"` + + // no documentation yet + CacheKeyQueryRule *string `json:"cacheKeyQueryRule,omitempty" xmlrpc:"cacheKeyQueryRule,omitempty"` + + // no documentation yet + CertificateType *string `json:"certificateType,omitempty" xmlrpc:"certificateType,omitempty"` + + // no documentation yet + Cname *string `json:"cname,omitempty" xmlrpc:"cname,omitempty"` + + // no documentation yet + Domain *string `json:"domain,omitempty" xmlrpc:"domain,omitempty"` + + // no documentation yet + DynamicContentAcceleration *Container_Network_CdnMarketplace_Configuration_Performance_DynamicContentAcceleration `json:"dynamicContentAcceleration,omitempty" xmlrpc:"dynamicContentAcceleration,omitempty"` + + // no documentation yet + FileExtension *string `json:"fileExtension,omitempty" xmlrpc:"fileExtension,omitempty"` + + // no documentation yet + GeoblockingRule *Network_CdnMarketplace_Configuration_Behavior_Geoblocking `json:"geoblockingRule,omitempty" xmlrpc:"geoblockingRule,omitempty"` + + // no documentation yet + Header *string `json:"header,omitempty" xmlrpc:"header,omitempty"` + + // no documentation yet + HotlinkProtection *Network_CdnMarketplace_Configuration_Behavior_HotlinkProtection `json:"hotlinkProtection,omitempty" xmlrpc:"hotlinkProtection,omitempty"` + + // no documentation yet + HttpPort *int `json:"httpPort,omitempty" xmlrpc:"httpPort,omitempty"` + + // no documentation yet + HttpsPort *int `json:"httpsPort,omitempty" xmlrpc:"httpsPort,omitempty"` + + // Used by the following method: updateOriginPath(). This property will store the path of the path record to be saved. The $path attribute stores the new path. + OldPath *string `json:"oldPath,omitempty" xmlrpc:"oldPath,omitempty"` + + // no documentation yet + Origin *string `json:"origin,omitempty" xmlrpc:"origin,omitempty"` + + // no documentation yet + OriginType *string `json:"originType,omitempty" xmlrpc:"originType,omitempty"` + + // no documentation yet + Path *string `json:"path,omitempty" xmlrpc:"path,omitempty"` + + // no documentation yet + PerformanceConfiguration *string `json:"performanceConfiguration,omitempty" xmlrpc:"performanceConfiguration,omitempty"` + + // no documentation yet + Protocol *string `json:"protocol,omitempty" xmlrpc:"protocol,omitempty"` + + // no documentation yet + RespectHeaders *string `json:"respectHeaders,omitempty" xmlrpc:"respectHeaders,omitempty"` + + // no documentation yet + ServeStale *string `json:"serveStale,omitempty" xmlrpc:"serveStale,omitempty"` + + // no documentation yet + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // no documentation yet + UniqueId *string `json:"uniqueId,omitempty" xmlrpc:"uniqueId,omitempty"` + + // no documentation yet + VendorName *string `json:"vendorName,omitempty" xmlrpc:"vendorName,omitempty"` +} + +// no documentation yet +type Container_Network_CdnMarketplace_Configuration_Mapping struct { + Entity + + // no documentation yet + AkamaiCname *string `json:"akamaiCname,omitempty" xmlrpc:"akamaiCname,omitempty"` + + // no documentation yet + BucketName *string `json:"bucketName,omitempty" xmlrpc:"bucketName,omitempty"` + + // no documentation yet + CacheKeyQueryRule *string `json:"cacheKeyQueryRule,omitempty" xmlrpc:"cacheKeyQueryRule,omitempty"` + + // no documentation yet + CertificateType *string `json:"certificateType,omitempty" xmlrpc:"certificateType,omitempty"` + + // no documentation yet + Cname *string `json:"cname,omitempty" xmlrpc:"cname,omitempty"` + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + Domain *string `json:"domain,omitempty" xmlrpc:"domain,omitempty"` + + // no documentation yet + DynamicContentAcceleration *Container_Network_CdnMarketplace_Configuration_Performance_DynamicContentAcceleration `json:"dynamicContentAcceleration,omitempty" xmlrpc:"dynamicContentAcceleration,omitempty"` + + // no documentation yet + FileExtension *string `json:"fileExtension,omitempty" xmlrpc:"fileExtension,omitempty"` + + // no documentation yet + Header *string `json:"header,omitempty" xmlrpc:"header,omitempty"` + + // no documentation yet + HttpPort *int `json:"httpPort,omitempty" xmlrpc:"httpPort,omitempty"` + + // no documentation yet + HttpsChallengeRedirectUrl *string `json:"httpsChallengeRedirectUrl,omitempty" xmlrpc:"httpsChallengeRedirectUrl,omitempty"` + + // no documentation yet + HttpsChallengeResponse *string `json:"httpsChallengeResponse,omitempty" xmlrpc:"httpsChallengeResponse,omitempty"` + + // no documentation yet + HttpsChallengeUrl *string `json:"httpsChallengeUrl,omitempty" xmlrpc:"httpsChallengeUrl,omitempty"` + + // no documentation yet + HttpsPort *int `json:"httpsPort,omitempty" xmlrpc:"httpsPort,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // no documentation yet + OriginHost *string `json:"originHost,omitempty" xmlrpc:"originHost,omitempty"` + + // no documentation yet + OriginType *string `json:"originType,omitempty" xmlrpc:"originType,omitempty"` + + // no documentation yet + Path *string `json:"path,omitempty" xmlrpc:"path,omitempty"` + + // no documentation yet + PerformanceConfiguration *string `json:"performanceConfiguration,omitempty" xmlrpc:"performanceConfiguration,omitempty"` + + // no documentation yet + Protocol *string `json:"protocol,omitempty" xmlrpc:"protocol,omitempty"` + + // no documentation yet + RespectHeaders *bool `json:"respectHeaders,omitempty" xmlrpc:"respectHeaders,omitempty"` + + // no documentation yet + ServeStale *bool `json:"serveStale,omitempty" xmlrpc:"serveStale,omitempty"` + + // no documentation yet + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // no documentation yet + UniqueId *string `json:"uniqueId,omitempty" xmlrpc:"uniqueId,omitempty"` + + // no documentation yet + VendorName *string `json:"vendorName,omitempty" xmlrpc:"vendorName,omitempty"` +} + +// no documentation yet +type Container_Network_CdnMarketplace_Configuration_Mapping_Path struct { + Entity + + // no documentation yet + BucketName *string `json:"bucketName,omitempty" xmlrpc:"bucketName,omitempty"` + + // no documentation yet + CacheKeyQueryRule *string `json:"cacheKeyQueryRule,omitempty" xmlrpc:"cacheKeyQueryRule,omitempty"` + + // no documentation yet + DynamicContentAcceleration *Container_Network_CdnMarketplace_Configuration_Performance_DynamicContentAcceleration `json:"dynamicContentAcceleration,omitempty" xmlrpc:"dynamicContentAcceleration,omitempty"` + + // no documentation yet + FileExtension *string `json:"fileExtension,omitempty" xmlrpc:"fileExtension,omitempty"` + + // no documentation yet + Header *string `json:"header,omitempty" xmlrpc:"header,omitempty"` + + // no documentation yet + HttpPort *int `json:"httpPort,omitempty" xmlrpc:"httpPort,omitempty"` + + // no documentation yet + HttpsPort *int `json:"httpsPort,omitempty" xmlrpc:"httpsPort,omitempty"` + + // no documentation yet + MappingUniqueId *string `json:"mappingUniqueId,omitempty" xmlrpc:"mappingUniqueId,omitempty"` + + // no documentation yet + Origin *string `json:"origin,omitempty" xmlrpc:"origin,omitempty"` + + // no documentation yet + OriginType *string `json:"originType,omitempty" xmlrpc:"originType,omitempty"` + + // no documentation yet + Path *string `json:"path,omitempty" xmlrpc:"path,omitempty"` + + // no documentation yet + PerformanceConfiguration *string `json:"performanceConfiguration,omitempty" xmlrpc:"performanceConfiguration,omitempty"` + + // no documentation yet + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` +} + +// no documentation yet +type Container_Network_CdnMarketplace_Configuration_Performance_DynamicContentAcceleration struct { + Entity + + // The detectionPath is used by CDN edge servers to find the best optimized route from edge to the origin server. The Akamai edge servers fetch the test object from the origin to know the network condition to your origin server, and then calculate the best optimized route with the network condition. The best path to origin must be known at the time a user’s request arrives at an edge server, since any in-line analysis or probing would defeat the purpose of speeding things up. + DetectionPath *string `json:"detectionPath,omitempty" xmlrpc:"detectionPath,omitempty"` + + // Serving compressed images reduces the amount of content required to load a page. This feature helps offset less robust connections, such as those formed with mobile devices. Basically, if your site visitors have slow network speeds, MobileImageCompression technology can automatically increase compression of JPEG images to speed up loading. On the other hand, this feature results in lossy compression or irreversible compression, and may affect the quality of the images on your site. + // + // JPG supported file extensions: .jpg, .jpeg, .jpe, .jig, .jgig, .jgi The default is enabled. + MobileImageCompressionEnabled *bool `json:"mobileImageCompressionEnabled,omitempty" xmlrpc:"mobileImageCompressionEnabled,omitempty"` + + // Inspects HTML responses and prefetches embedded objects in HTML files. Prefetching works on any page that includes ,